1 /* 2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 // no precompiled headers 26 #include "classfile/classLoader.hpp" 27 #include "classfile/systemDictionary.hpp" 28 #include "classfile/vmSymbols.hpp" 29 #include "code/icBuffer.hpp" 30 #include "code/vtableStubs.hpp" 31 #include "compiler/compileBroker.hpp" 32 #include "interpreter/interpreter.hpp" 33 #include "jvm_solaris.h" 34 #include "memory/allocation.inline.hpp" 35 #include "memory/filemap.hpp" 36 #include "mutex_solaris.inline.hpp" 37 #include "oops/oop.inline.hpp" 38 #include "os_share_solaris.hpp" 39 #include "prims/jniFastGetField.hpp" 40 #include "prims/jvm.h" 41 #include "prims/jvm_misc.hpp" 42 #include "runtime/arguments.hpp" 43 #include "runtime/extendedPC.hpp" 44 #include "runtime/globals.hpp" 45 #include "runtime/interfaceSupport.hpp" 46 #include "runtime/java.hpp" 47 #include "runtime/javaCalls.hpp" 48 #include "runtime/mutexLocker.hpp" 49 #include "runtime/objectMonitor.hpp" 50 #include "runtime/osThread.hpp" 51 #include "runtime/perfMemory.hpp" 52 #include "runtime/sharedRuntime.hpp" 53 #include "runtime/statSampler.hpp" 54 #include "runtime/stubRoutines.hpp" 55 #include "runtime/threadCritical.hpp" 56 #include "runtime/timer.hpp" 57 #include "services/attachListener.hpp" 58 #include "services/runtimeService.hpp" 59 #include "thread_solaris.inline.hpp" 60 #include "utilities/decoder.hpp" 61 #include "utilities/defaultStream.hpp" 62 #include "utilities/events.hpp" 63 #include "utilities/growableArray.hpp" 64 #include "utilities/vmError.hpp" 65 #ifdef TARGET_ARCH_x86 66 # include "assembler_x86.inline.hpp" 67 # include "nativeInst_x86.hpp" 68 #endif 69 #ifdef TARGET_ARCH_sparc 70 # include "assembler_sparc.inline.hpp" 71 # include "nativeInst_sparc.hpp" 72 #endif 73 #ifdef COMPILER1 74 #include "c1/c1_Runtime1.hpp" 75 #endif 76 #ifdef COMPILER2 77 #include "opto/runtime.hpp" 78 #endif 79 80 // put OS-includes here 81 # include <dlfcn.h> 82 # include <errno.h> 83 # include <exception> 84 # include <link.h> 85 # include <poll.h> 86 # include <pthread.h> 87 # include <pwd.h> 88 # include <schedctl.h> 89 # include <setjmp.h> 90 # include <signal.h> 91 # include <stdio.h> 92 # include <alloca.h> 93 # include <sys/filio.h> 94 # include <sys/ipc.h> 95 # include <sys/lwp.h> 96 # include <sys/machelf.h> // for elf Sym structure used by dladdr1 97 # include <sys/mman.h> 98 # include <sys/processor.h> 99 # include <sys/procset.h> 100 # include <sys/pset.h> 101 # include <sys/resource.h> 102 # include <sys/shm.h> 103 # include <sys/socket.h> 104 # include <sys/stat.h> 105 # include <sys/systeminfo.h> 106 # include <sys/time.h> 107 # include <sys/times.h> 108 # include <sys/types.h> 109 # include <sys/wait.h> 110 # include <sys/utsname.h> 111 # include <thread.h> 112 # include <unistd.h> 113 # include <sys/priocntl.h> 114 # include <sys/rtpriocntl.h> 115 # include <sys/tspriocntl.h> 116 # include <sys/iapriocntl.h> 117 # include <sys/loadavg.h> 118 # include <string.h> 119 # include <stdio.h> 120 121 # define _STRUCTURED_PROC 1 // this gets us the new structured proc interfaces of 5.6 & later 122 # include <sys/procfs.h> // see comment in <sys/procfs.h> 123 124 #define MAX_PATH (2 * K) 125 126 // for timer info max values which include all bits 127 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF) 128 129 #ifdef _GNU_SOURCE 130 // See bug #6514594 131 extern "C" int madvise(caddr_t, size_t, int); 132 extern "C" int memcntl(caddr_t addr, size_t len, int cmd, caddr_t arg, 133 int attr, int mask); 134 #endif //_GNU_SOURCE 135 136 /* 137 MPSS Changes Start. 138 The JVM binary needs to be built and run on pre-Solaris 9 139 systems, but the constants needed by MPSS are only in Solaris 9 140 header files. They are textually replicated here to allow 141 building on earlier systems. Once building on Solaris 8 is 142 no longer a requirement, these #defines can be replaced by ordinary 143 system .h inclusion. 144 145 In earlier versions of the JDK and Solaris, we used ISM for large pages. 146 But ISM requires shared memory to achieve this and thus has many caveats. 147 MPSS is a fully transparent and is a cleaner way to get large pages. 148 Although we still require keeping ISM for backward compatiblitiy as well as 149 giving the opportunity to use large pages on older systems it is 150 recommended that MPSS be used for Solaris 9 and above. 151 152 */ 153 154 #ifndef MC_HAT_ADVISE 155 156 struct memcntl_mha { 157 uint_t mha_cmd; /* command(s) */ 158 uint_t mha_flags; 159 size_t mha_pagesize; 160 }; 161 #define MC_HAT_ADVISE 7 /* advise hat map size */ 162 #define MHA_MAPSIZE_VA 0x1 /* set preferred page size */ 163 #define MAP_ALIGN 0x200 /* addr specifies alignment */ 164 165 #endif 166 // MPSS Changes End. 167 168 169 // Here are some liblgrp types from sys/lgrp_user.h to be able to 170 // compile on older systems without this header file. 171 172 #ifndef MADV_ACCESS_LWP 173 # define MADV_ACCESS_LWP 7 /* next LWP to access heavily */ 174 #endif 175 #ifndef MADV_ACCESS_MANY 176 # define MADV_ACCESS_MANY 8 /* many processes to access heavily */ 177 #endif 178 179 #ifndef LGRP_RSRC_CPU 180 # define LGRP_RSRC_CPU 0 /* CPU resources */ 181 #endif 182 #ifndef LGRP_RSRC_MEM 183 # define LGRP_RSRC_MEM 1 /* memory resources */ 184 #endif 185 186 // Some more macros from sys/mman.h that are not present in Solaris 8. 187 188 #ifndef MAX_MEMINFO_CNT 189 /* 190 * info_req request type definitions for meminfo 191 * request types starting with MEMINFO_V are used for Virtual addresses 192 * and should not be mixed with MEMINFO_PLGRP which is targeted for Physical 193 * addresses 194 */ 195 # define MEMINFO_SHIFT 16 196 # define MEMINFO_MASK (0xFF << MEMINFO_SHIFT) 197 # define MEMINFO_VPHYSICAL (0x01 << MEMINFO_SHIFT) /* get physical addr */ 198 # define MEMINFO_VLGRP (0x02 << MEMINFO_SHIFT) /* get lgroup */ 199 # define MEMINFO_VPAGESIZE (0x03 << MEMINFO_SHIFT) /* size of phys page */ 200 # define MEMINFO_VREPLCNT (0x04 << MEMINFO_SHIFT) /* no. of replica */ 201 # define MEMINFO_VREPL (0x05 << MEMINFO_SHIFT) /* physical replica */ 202 # define MEMINFO_VREPL_LGRP (0x06 << MEMINFO_SHIFT) /* lgrp of replica */ 203 # define MEMINFO_PLGRP (0x07 << MEMINFO_SHIFT) /* lgroup for paddr */ 204 205 /* maximum number of addresses meminfo() can process at a time */ 206 # define MAX_MEMINFO_CNT 256 207 208 /* maximum number of request types */ 209 # define MAX_MEMINFO_REQ 31 210 #endif 211 212 // see thr_setprio(3T) for the basis of these numbers 213 #define MinimumPriority 0 214 #define NormalPriority 64 215 #define MaximumPriority 127 216 217 // Values for ThreadPriorityPolicy == 1 218 int prio_policy1[MaxPriority+1] = { -99999, 0, 16, 32, 48, 64, 219 80, 96, 112, 124, 127 }; 220 221 // System parameters used internally 222 static clock_t clock_tics_per_sec = 100; 223 224 // Track if we have called enable_extended_FILE_stdio (on Solaris 10u4+) 225 static bool enabled_extended_FILE_stdio = false; 226 227 // For diagnostics to print a message once. see run_periodic_checks 228 static bool check_addr0_done = false; 229 static sigset_t check_signal_done; 230 static bool check_signals = true; 231 232 address os::Solaris::handler_start; // start pc of thr_sighndlrinfo 233 address os::Solaris::handler_end; // end pc of thr_sighndlrinfo 234 235 address os::Solaris::_main_stack_base = NULL; // 4352906 workaround 236 237 238 // "default" initializers for missing libc APIs 239 extern "C" { 240 static int lwp_mutex_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; } 241 static int lwp_mutex_destroy(mutex_t *mx) { return 0; } 242 243 static int lwp_cond_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; } 244 static int lwp_cond_destroy(cond_t *cv) { return 0; } 245 } 246 247 // "default" initializers for pthread-based synchronization 248 extern "C" { 249 static int pthread_mutex_default_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; } 250 static int pthread_cond_default_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; } 251 } 252 253 // Thread Local Storage 254 // This is common to all Solaris platforms so it is defined here, 255 // in this common file. 256 // The declarations are in the os_cpu threadLS*.hpp files. 257 // 258 // Static member initialization for TLS 259 Thread* ThreadLocalStorage::_get_thread_cache[ThreadLocalStorage::_pd_cache_size] = {NULL}; 260 261 #ifndef PRODUCT 262 #define _PCT(n,d) ((100.0*(double)(n))/(double)(d)) 263 264 int ThreadLocalStorage::_tcacheHit = 0; 265 int ThreadLocalStorage::_tcacheMiss = 0; 266 267 void ThreadLocalStorage::print_statistics() { 268 int total = _tcacheMiss+_tcacheHit; 269 tty->print_cr("Thread cache hits %d misses %d total %d percent %f\n", 270 _tcacheHit, _tcacheMiss, total, _PCT(_tcacheHit, total)); 271 } 272 #undef _PCT 273 #endif // PRODUCT 274 275 Thread* ThreadLocalStorage::get_thread_via_cache_slowly(uintptr_t raw_id, 276 int index) { 277 Thread *thread = get_thread_slow(); 278 if (thread != NULL) { 279 address sp = os::current_stack_pointer(); 280 guarantee(thread->_stack_base == NULL || 281 (sp <= thread->_stack_base && 282 sp >= thread->_stack_base - thread->_stack_size) || 283 is_error_reported(), 284 "sp must be inside of selected thread stack"); 285 286 thread->set_self_raw_id(raw_id); // mark for quick retrieval 287 _get_thread_cache[ index ] = thread; 288 } 289 return thread; 290 } 291 292 293 static const double all_zero[ sizeof(Thread) / sizeof(double) + 1 ] = {0}; 294 #define NO_CACHED_THREAD ((Thread*)all_zero) 295 296 void ThreadLocalStorage::pd_set_thread(Thread* thread) { 297 298 // Store the new value before updating the cache to prevent a race 299 // between get_thread_via_cache_slowly() and this store operation. 300 os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread); 301 302 // Update thread cache with new thread if setting on thread create, 303 // or NO_CACHED_THREAD (zeroed) thread if resetting thread on exit. 304 uintptr_t raw = pd_raw_thread_id(); 305 int ix = pd_cache_index(raw); 306 _get_thread_cache[ix] = thread == NULL ? NO_CACHED_THREAD : thread; 307 } 308 309 void ThreadLocalStorage::pd_init() { 310 for (int i = 0; i < _pd_cache_size; i++) { 311 _get_thread_cache[i] = NO_CACHED_THREAD; 312 } 313 } 314 315 // Invalidate all the caches (happens to be the same as pd_init). 316 void ThreadLocalStorage::pd_invalidate_all() { pd_init(); } 317 318 #undef NO_CACHED_THREAD 319 320 // END Thread Local Storage 321 322 static inline size_t adjust_stack_size(address base, size_t size) { 323 if ((ssize_t)size < 0) { 324 // 4759953: Compensate for ridiculous stack size. 325 size = max_intx; 326 } 327 if (size > (size_t)base) { 328 // 4812466: Make sure size doesn't allow the stack to wrap the address space. 329 size = (size_t)base; 330 } 331 return size; 332 } 333 334 static inline stack_t get_stack_info() { 335 stack_t st; 336 int retval = thr_stksegment(&st); 337 st.ss_size = adjust_stack_size((address)st.ss_sp, st.ss_size); 338 assert(retval == 0, "incorrect return value from thr_stksegment"); 339 assert((address)&st < (address)st.ss_sp, "Invalid stack base returned"); 340 assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned"); 341 return st; 342 } 343 344 address os::current_stack_base() { 345 int r = thr_main() ; 346 guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ; 347 bool is_primordial_thread = r; 348 349 // Workaround 4352906, avoid calls to thr_stksegment by 350 // thr_main after the first one (it looks like we trash 351 // some data, causing the value for ss_sp to be incorrect). 352 if (!is_primordial_thread || os::Solaris::_main_stack_base == NULL) { 353 stack_t st = get_stack_info(); 354 if (is_primordial_thread) { 355 // cache initial value of stack base 356 os::Solaris::_main_stack_base = (address)st.ss_sp; 357 } 358 return (address)st.ss_sp; 359 } else { 360 guarantee(os::Solaris::_main_stack_base != NULL, "Attempt to use null cached stack base"); 361 return os::Solaris::_main_stack_base; 362 } 363 } 364 365 size_t os::current_stack_size() { 366 size_t size; 367 368 int r = thr_main() ; 369 guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ; 370 if(!r) { 371 size = get_stack_info().ss_size; 372 } else { 373 struct rlimit limits; 374 getrlimit(RLIMIT_STACK, &limits); 375 size = adjust_stack_size(os::Solaris::_main_stack_base, (size_t)limits.rlim_cur); 376 } 377 // base may not be page aligned 378 address base = current_stack_base(); 379 address bottom = (address)align_size_up((intptr_t)(base - size), os::vm_page_size());; 380 return (size_t)(base - bottom); 381 } 382 383 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) { 384 return localtime_r(clock, res); 385 } 386 387 // interruptible infrastructure 388 389 // setup_interruptible saves the thread state before going into an 390 // interruptible system call. 391 // The saved state is used to restore the thread to 392 // its former state whether or not an interrupt is received. 393 // Used by classloader os::read 394 // os::restartable_read calls skip this layer and stay in _thread_in_native 395 396 void os::Solaris::setup_interruptible(JavaThread* thread) { 397 398 JavaThreadState thread_state = thread->thread_state(); 399 400 assert(thread_state != _thread_blocked, "Coming from the wrong thread"); 401 assert(thread_state != _thread_in_native, "Native threads skip setup_interruptible"); 402 OSThread* osthread = thread->osthread(); 403 osthread->set_saved_interrupt_thread_state(thread_state); 404 thread->frame_anchor()->make_walkable(thread); 405 ThreadStateTransition::transition(thread, thread_state, _thread_blocked); 406 } 407 408 // Version of setup_interruptible() for threads that are already in 409 // _thread_blocked. Used by os_sleep(). 410 void os::Solaris::setup_interruptible_already_blocked(JavaThread* thread) { 411 thread->frame_anchor()->make_walkable(thread); 412 } 413 414 JavaThread* os::Solaris::setup_interruptible() { 415 JavaThread* thread = (JavaThread*)ThreadLocalStorage::thread(); 416 setup_interruptible(thread); 417 return thread; 418 } 419 420 void os::Solaris::try_enable_extended_io() { 421 typedef int (*enable_extended_FILE_stdio_t)(int, int); 422 423 if (!UseExtendedFileIO) { 424 return; 425 } 426 427 enable_extended_FILE_stdio_t enabler = 428 (enable_extended_FILE_stdio_t) dlsym(RTLD_DEFAULT, 429 "enable_extended_FILE_stdio"); 430 if (enabler) { 431 enabler(-1, -1); 432 } 433 } 434 435 436 #ifdef ASSERT 437 438 JavaThread* os::Solaris::setup_interruptible_native() { 439 JavaThread* thread = (JavaThread*)ThreadLocalStorage::thread(); 440 JavaThreadState thread_state = thread->thread_state(); 441 assert(thread_state == _thread_in_native, "Assumed thread_in_native"); 442 return thread; 443 } 444 445 void os::Solaris::cleanup_interruptible_native(JavaThread* thread) { 446 JavaThreadState thread_state = thread->thread_state(); 447 assert(thread_state == _thread_in_native, "Assumed thread_in_native"); 448 } 449 #endif 450 451 // cleanup_interruptible reverses the effects of setup_interruptible 452 // setup_interruptible_already_blocked() does not need any cleanup. 453 454 void os::Solaris::cleanup_interruptible(JavaThread* thread) { 455 OSThread* osthread = thread->osthread(); 456 457 ThreadStateTransition::transition(thread, _thread_blocked, osthread->saved_interrupt_thread_state()); 458 } 459 460 // I/O interruption related counters called in _INTERRUPTIBLE 461 462 void os::Solaris::bump_interrupted_before_count() { 463 RuntimeService::record_interrupted_before_count(); 464 } 465 466 void os::Solaris::bump_interrupted_during_count() { 467 RuntimeService::record_interrupted_during_count(); 468 } 469 470 static int _processors_online = 0; 471 472 jint os::Solaris::_os_thread_limit = 0; 473 volatile jint os::Solaris::_os_thread_count = 0; 474 475 julong os::available_memory() { 476 return Solaris::available_memory(); 477 } 478 479 julong os::Solaris::available_memory() { 480 return (julong)sysconf(_SC_AVPHYS_PAGES) * os::vm_page_size(); 481 } 482 483 julong os::Solaris::_physical_memory = 0; 484 485 julong os::physical_memory() { 486 return Solaris::physical_memory(); 487 } 488 489 julong os::allocatable_physical_memory(julong size) { 490 #ifdef _LP64 491 return size; 492 #else 493 julong result = MIN2(size, (julong)3835*M); 494 if (!is_allocatable(result)) { 495 // Memory allocations will be aligned but the alignment 496 // is not known at this point. Alignments will 497 // be at most to LargePageSizeInBytes. Protect 498 // allocations from alignments up to illegal 499 // values. If at this point 2G is illegal. 500 julong reasonable_size = (julong)2*G - 2 * LargePageSizeInBytes; 501 result = MIN2(size, reasonable_size); 502 } 503 return result; 504 #endif 505 } 506 507 static hrtime_t first_hrtime = 0; 508 static const hrtime_t hrtime_hz = 1000*1000*1000; 509 const int LOCK_BUSY = 1; 510 const int LOCK_FREE = 0; 511 const int LOCK_INVALID = -1; 512 static volatile hrtime_t max_hrtime = 0; 513 static volatile int max_hrtime_lock = LOCK_FREE; // Update counter with LSB as lock-in-progress 514 515 516 void os::Solaris::initialize_system_info() { 517 set_processor_count(sysconf(_SC_NPROCESSORS_CONF)); 518 _processors_online = sysconf (_SC_NPROCESSORS_ONLN); 519 _physical_memory = (julong)sysconf(_SC_PHYS_PAGES) * (julong)sysconf(_SC_PAGESIZE); 520 } 521 522 int os::active_processor_count() { 523 int online_cpus = sysconf(_SC_NPROCESSORS_ONLN); 524 pid_t pid = getpid(); 525 psetid_t pset = PS_NONE; 526 // Are we running in a processor set or is there any processor set around? 527 if (pset_bind(PS_QUERY, P_PID, pid, &pset) == 0) { 528 uint_t pset_cpus; 529 // Query the number of cpus available to us. 530 if (pset_info(pset, NULL, &pset_cpus, NULL) == 0) { 531 assert(pset_cpus > 0 && pset_cpus <= online_cpus, "sanity check"); 532 _processors_online = pset_cpus; 533 return pset_cpus; 534 } 535 } 536 // Otherwise return number of online cpus 537 return online_cpus; 538 } 539 540 static bool find_processors_in_pset(psetid_t pset, 541 processorid_t** id_array, 542 uint_t* id_length) { 543 bool result = false; 544 // Find the number of processors in the processor set. 545 if (pset_info(pset, NULL, id_length, NULL) == 0) { 546 // Make up an array to hold their ids. 547 *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length); 548 // Fill in the array with their processor ids. 549 if (pset_info(pset, NULL, id_length, *id_array) == 0) { 550 result = true; 551 } 552 } 553 return result; 554 } 555 556 // Callers of find_processors_online() must tolerate imprecise results -- 557 // the system configuration can change asynchronously because of DR 558 // or explicit psradm operations. 559 // 560 // We also need to take care that the loop (below) terminates as the 561 // number of processors online can change between the _SC_NPROCESSORS_ONLN 562 // request and the loop that builds the list of processor ids. Unfortunately 563 // there's no reliable way to determine the maximum valid processor id, 564 // so we use a manifest constant, MAX_PROCESSOR_ID, instead. See p_online 565 // man pages, which claim the processor id set is "sparse, but 566 // not too sparse". MAX_PROCESSOR_ID is used to ensure that we eventually 567 // exit the loop. 568 // 569 // In the future we'll be able to use sysconf(_SC_CPUID_MAX), but that's 570 // not available on S8.0. 571 572 static bool find_processors_online(processorid_t** id_array, 573 uint* id_length) { 574 const processorid_t MAX_PROCESSOR_ID = 100000 ; 575 // Find the number of processors online. 576 *id_length = sysconf(_SC_NPROCESSORS_ONLN); 577 // Make up an array to hold their ids. 578 *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length); 579 // Processors need not be numbered consecutively. 580 long found = 0; 581 processorid_t next = 0; 582 while (found < *id_length && next < MAX_PROCESSOR_ID) { 583 processor_info_t info; 584 if (processor_info(next, &info) == 0) { 585 // NB, PI_NOINTR processors are effectively online ... 586 if (info.pi_state == P_ONLINE || info.pi_state == P_NOINTR) { 587 (*id_array)[found] = next; 588 found += 1; 589 } 590 } 591 next += 1; 592 } 593 if (found < *id_length) { 594 // The loop above didn't identify the expected number of processors. 595 // We could always retry the operation, calling sysconf(_SC_NPROCESSORS_ONLN) 596 // and re-running the loop, above, but there's no guarantee of progress 597 // if the system configuration is in flux. Instead, we just return what 598 // we've got. Note that in the worst case find_processors_online() could 599 // return an empty set. (As a fall-back in the case of the empty set we 600 // could just return the ID of the current processor). 601 *id_length = found ; 602 } 603 604 return true; 605 } 606 607 static bool assign_distribution(processorid_t* id_array, 608 uint id_length, 609 uint* distribution, 610 uint distribution_length) { 611 // We assume we can assign processorid_t's to uint's. 612 assert(sizeof(processorid_t) == sizeof(uint), 613 "can't convert processorid_t to uint"); 614 // Quick check to see if we won't succeed. 615 if (id_length < distribution_length) { 616 return false; 617 } 618 // Assign processor ids to the distribution. 619 // Try to shuffle processors to distribute work across boards, 620 // assuming 4 processors per board. 621 const uint processors_per_board = ProcessDistributionStride; 622 // Find the maximum processor id. 623 processorid_t max_id = 0; 624 for (uint m = 0; m < id_length; m += 1) { 625 max_id = MAX2(max_id, id_array[m]); 626 } 627 // The next id, to limit loops. 628 const processorid_t limit_id = max_id + 1; 629 // Make up markers for available processors. 630 bool* available_id = NEW_C_HEAP_ARRAY(bool, limit_id); 631 for (uint c = 0; c < limit_id; c += 1) { 632 available_id[c] = false; 633 } 634 for (uint a = 0; a < id_length; a += 1) { 635 available_id[id_array[a]] = true; 636 } 637 // Step by "boards", then by "slot", copying to "assigned". 638 // NEEDS_CLEANUP: The assignment of processors should be stateful, 639 // remembering which processors have been assigned by 640 // previous calls, etc., so as to distribute several 641 // independent calls of this method. What we'd like is 642 // It would be nice to have an API that let us ask 643 // how many processes are bound to a processor, 644 // but we don't have that, either. 645 // In the short term, "board" is static so that 646 // subsequent distributions don't all start at board 0. 647 static uint board = 0; 648 uint assigned = 0; 649 // Until we've found enough processors .... 650 while (assigned < distribution_length) { 651 // ... find the next available processor in the board. 652 for (uint slot = 0; slot < processors_per_board; slot += 1) { 653 uint try_id = board * processors_per_board + slot; 654 if ((try_id < limit_id) && (available_id[try_id] == true)) { 655 distribution[assigned] = try_id; 656 available_id[try_id] = false; 657 assigned += 1; 658 break; 659 } 660 } 661 board += 1; 662 if (board * processors_per_board + 0 >= limit_id) { 663 board = 0; 664 } 665 } 666 if (available_id != NULL) { 667 FREE_C_HEAP_ARRAY(bool, available_id); 668 } 669 return true; 670 } 671 672 void os::set_native_thread_name(const char *name) { 673 // Not yet implemented. 674 return; 675 } 676 677 bool os::distribute_processes(uint length, uint* distribution) { 678 bool result = false; 679 // Find the processor id's of all the available CPUs. 680 processorid_t* id_array = NULL; 681 uint id_length = 0; 682 // There are some races between querying information and using it, 683 // since processor sets can change dynamically. 684 psetid_t pset = PS_NONE; 685 // Are we running in a processor set? 686 if ((pset_bind(PS_QUERY, P_PID, P_MYID, &pset) == 0) && pset != PS_NONE) { 687 result = find_processors_in_pset(pset, &id_array, &id_length); 688 } else { 689 result = find_processors_online(&id_array, &id_length); 690 } 691 if (result == true) { 692 if (id_length >= length) { 693 result = assign_distribution(id_array, id_length, distribution, length); 694 } else { 695 result = false; 696 } 697 } 698 if (id_array != NULL) { 699 FREE_C_HEAP_ARRAY(processorid_t, id_array); 700 } 701 return result; 702 } 703 704 bool os::bind_to_processor(uint processor_id) { 705 // We assume that a processorid_t can be stored in a uint. 706 assert(sizeof(uint) == sizeof(processorid_t), 707 "can't convert uint to processorid_t"); 708 int bind_result = 709 processor_bind(P_LWPID, // bind LWP. 710 P_MYID, // bind current LWP. 711 (processorid_t) processor_id, // id. 712 NULL); // don't return old binding. 713 return (bind_result == 0); 714 } 715 716 bool os::getenv(const char* name, char* buffer, int len) { 717 char* val = ::getenv( name ); 718 if ( val == NULL 719 || strlen(val) + 1 > len ) { 720 if (len > 0) buffer[0] = 0; // return a null string 721 return false; 722 } 723 strcpy( buffer, val ); 724 return true; 725 } 726 727 728 // Return true if user is running as root. 729 730 bool os::have_special_privileges() { 731 static bool init = false; 732 static bool privileges = false; 733 if (!init) { 734 privileges = (getuid() != geteuid()) || (getgid() != getegid()); 735 init = true; 736 } 737 return privileges; 738 } 739 740 741 void os::init_system_properties_values() { 742 char arch[12]; 743 sysinfo(SI_ARCHITECTURE, arch, sizeof(arch)); 744 745 // The next steps are taken in the product version: 746 // 747 // Obtain the JAVA_HOME value from the location of libjvm[_g].so. 748 // This library should be located at: 749 // <JAVA_HOME>/jre/lib/<arch>/{client|server}/libjvm[_g].so. 750 // 751 // If "/jre/lib/" appears at the right place in the path, then we 752 // assume libjvm[_g].so is installed in a JDK and we use this path. 753 // 754 // Otherwise exit with message: "Could not create the Java virtual machine." 755 // 756 // The following extra steps are taken in the debugging version: 757 // 758 // If "/jre/lib/" does NOT appear at the right place in the path 759 // instead of exit check for $JAVA_HOME environment variable. 760 // 761 // If it is defined and we are able to locate $JAVA_HOME/jre/lib/<arch>, 762 // then we append a fake suffix "hotspot/libjvm[_g].so" to this path so 763 // it looks like libjvm[_g].so is installed there 764 // <JAVA_HOME>/jre/lib/<arch>/hotspot/libjvm[_g].so. 765 // 766 // Otherwise exit. 767 // 768 // Important note: if the location of libjvm.so changes this 769 // code needs to be changed accordingly. 770 771 // The next few definitions allow the code to be verbatim: 772 #define malloc(n) (char*)NEW_C_HEAP_ARRAY(char, (n)) 773 #define free(p) FREE_C_HEAP_ARRAY(char, p) 774 #define getenv(n) ::getenv(n) 775 776 #define EXTENSIONS_DIR "/lib/ext" 777 #define ENDORSED_DIR "/lib/endorsed" 778 #define COMMON_DIR "/usr/jdk/packages" 779 780 { 781 /* sysclasspath, java_home, dll_dir */ 782 { 783 char *home_path; 784 char *dll_path; 785 char *pslash; 786 char buf[MAXPATHLEN]; 787 os::jvm_path(buf, sizeof(buf)); 788 789 // Found the full path to libjvm.so. 790 // Now cut the path to <java_home>/jre if we can. 791 *(strrchr(buf, '/')) = '\0'; /* get rid of /libjvm.so */ 792 pslash = strrchr(buf, '/'); 793 if (pslash != NULL) 794 *pslash = '\0'; /* get rid of /{client|server|hotspot} */ 795 dll_path = malloc(strlen(buf) + 1); 796 if (dll_path == NULL) 797 return; 798 strcpy(dll_path, buf); 799 Arguments::set_dll_dir(dll_path); 800 801 if (pslash != NULL) { 802 pslash = strrchr(buf, '/'); 803 if (pslash != NULL) { 804 *pslash = '\0'; /* get rid of /<arch> */ 805 pslash = strrchr(buf, '/'); 806 if (pslash != NULL) 807 *pslash = '\0'; /* get rid of /lib */ 808 } 809 } 810 811 home_path = malloc(strlen(buf) + 1); 812 if (home_path == NULL) 813 return; 814 strcpy(home_path, buf); 815 Arguments::set_java_home(home_path); 816 817 if (!set_boot_path('/', ':')) 818 return; 819 } 820 821 /* 822 * Where to look for native libraries 823 */ 824 { 825 // Use dlinfo() to determine the correct java.library.path. 826 // 827 // If we're launched by the Java launcher, and the user 828 // does not set java.library.path explicitly on the commandline, 829 // the Java launcher sets LD_LIBRARY_PATH for us and unsets 830 // LD_LIBRARY_PATH_32 and LD_LIBRARY_PATH_64. In this case 831 // dlinfo returns LD_LIBRARY_PATH + crle settings (including 832 // /usr/lib), which is exactly what we want. 833 // 834 // If the user does set java.library.path, it completely 835 // overwrites this setting, and always has. 836 // 837 // If we're not launched by the Java launcher, we may 838 // get here with any/all of the LD_LIBRARY_PATH[_32|64] 839 // settings. Again, dlinfo does exactly what we want. 840 841 Dl_serinfo _info, *info = &_info; 842 Dl_serpath *path; 843 char* library_path; 844 char *common_path; 845 int i; 846 847 // determine search path count and required buffer size 848 if (dlinfo(RTLD_SELF, RTLD_DI_SERINFOSIZE, (void *)info) == -1) { 849 vm_exit_during_initialization("dlinfo SERINFOSIZE request", dlerror()); 850 } 851 852 // allocate new buffer and initialize 853 info = (Dl_serinfo*)malloc(_info.dls_size); 854 if (info == NULL) { 855 vm_exit_out_of_memory(_info.dls_size, 856 "init_system_properties_values info"); 857 } 858 info->dls_size = _info.dls_size; 859 info->dls_cnt = _info.dls_cnt; 860 861 // obtain search path information 862 if (dlinfo(RTLD_SELF, RTLD_DI_SERINFO, (void *)info) == -1) { 863 free(info); 864 vm_exit_during_initialization("dlinfo SERINFO request", dlerror()); 865 } 866 867 path = &info->dls_serpath[0]; 868 869 // Note: Due to a legacy implementation, most of the library path 870 // is set in the launcher. This was to accomodate linking restrictions 871 // on legacy Solaris implementations (which are no longer supported). 872 // Eventually, all the library path setting will be done here. 873 // 874 // However, to prevent the proliferation of improperly built native 875 // libraries, the new path component /usr/jdk/packages is added here. 876 877 // Determine the actual CPU architecture. 878 char cpu_arch[12]; 879 sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch)); 880 #ifdef _LP64 881 // If we are a 64-bit vm, perform the following translations: 882 // sparc -> sparcv9 883 // i386 -> amd64 884 if (strcmp(cpu_arch, "sparc") == 0) 885 strcat(cpu_arch, "v9"); 886 else if (strcmp(cpu_arch, "i386") == 0) 887 strcpy(cpu_arch, "amd64"); 888 #endif 889 890 // Construct the invariant part of ld_library_path. Note that the 891 // space for the colon and the trailing null are provided by the 892 // nulls included by the sizeof operator. 893 size_t bufsize = sizeof(COMMON_DIR) + sizeof("/lib/") + strlen(cpu_arch); 894 common_path = malloc(bufsize); 895 if (common_path == NULL) { 896 free(info); 897 vm_exit_out_of_memory(bufsize, 898 "init_system_properties_values common_path"); 899 } 900 sprintf(common_path, COMMON_DIR "/lib/%s", cpu_arch); 901 902 // struct size is more than sufficient for the path components obtained 903 // through the dlinfo() call, so only add additional space for the path 904 // components explicitly added here. 905 bufsize = info->dls_size + strlen(common_path); 906 library_path = malloc(bufsize); 907 if (library_path == NULL) { 908 free(info); 909 free(common_path); 910 vm_exit_out_of_memory(bufsize, 911 "init_system_properties_values library_path"); 912 } 913 library_path[0] = '\0'; 914 915 // Construct the desired Java library path from the linker's library 916 // search path. 917 // 918 // For compatibility, it is optimal that we insert the additional path 919 // components specific to the Java VM after those components specified 920 // in LD_LIBRARY_PATH (if any) but before those added by the ld.so 921 // infrastructure. 922 if (info->dls_cnt == 0) { // Not sure this can happen, but allow for it 923 strcpy(library_path, common_path); 924 } else { 925 int inserted = 0; 926 for (i = 0; i < info->dls_cnt; i++, path++) { 927 uint_t flags = path->dls_flags & LA_SER_MASK; 928 if (((flags & LA_SER_LIBPATH) == 0) && !inserted) { 929 strcat(library_path, common_path); 930 strcat(library_path, os::path_separator()); 931 inserted = 1; 932 } 933 strcat(library_path, path->dls_name); 934 strcat(library_path, os::path_separator()); 935 } 936 // eliminate trailing path separator 937 library_path[strlen(library_path)-1] = '\0'; 938 } 939 940 // happens before argument parsing - can't use a trace flag 941 // tty->print_raw("init_system_properties_values: native lib path: "); 942 // tty->print_raw_cr(library_path); 943 944 // callee copies into its own buffer 945 Arguments::set_library_path(library_path); 946 947 free(common_path); 948 free(library_path); 949 free(info); 950 } 951 952 /* 953 * Extensions directories. 954 * 955 * Note that the space for the colon and the trailing null are provided 956 * by the nulls included by the sizeof operator (so actually one byte more 957 * than necessary is allocated). 958 */ 959 { 960 char *buf = (char *) malloc(strlen(Arguments::get_java_home()) + 961 sizeof(EXTENSIONS_DIR) + sizeof(COMMON_DIR) + 962 sizeof(EXTENSIONS_DIR)); 963 sprintf(buf, "%s" EXTENSIONS_DIR ":" COMMON_DIR EXTENSIONS_DIR, 964 Arguments::get_java_home()); 965 Arguments::set_ext_dirs(buf); 966 } 967 968 /* Endorsed standards default directory. */ 969 { 970 char * buf = malloc(strlen(Arguments::get_java_home()) + sizeof(ENDORSED_DIR)); 971 sprintf(buf, "%s" ENDORSED_DIR, Arguments::get_java_home()); 972 Arguments::set_endorsed_dirs(buf); 973 } 974 } 975 976 #undef malloc 977 #undef free 978 #undef getenv 979 #undef EXTENSIONS_DIR 980 #undef ENDORSED_DIR 981 #undef COMMON_DIR 982 983 } 984 985 void os::breakpoint() { 986 BREAKPOINT; 987 } 988 989 bool os::obsolete_option(const JavaVMOption *option) 990 { 991 if (!strncmp(option->optionString, "-Xt", 3)) { 992 return true; 993 } else if (!strncmp(option->optionString, "-Xtm", 4)) { 994 return true; 995 } else if (!strncmp(option->optionString, "-Xverifyheap", 12)) { 996 return true; 997 } else if (!strncmp(option->optionString, "-Xmaxjitcodesize", 16)) { 998 return true; 999 } 1000 return false; 1001 } 1002 1003 bool os::Solaris::valid_stack_address(Thread* thread, address sp) { 1004 address stackStart = (address)thread->stack_base(); 1005 address stackEnd = (address)(stackStart - (address)thread->stack_size()); 1006 if (sp < stackStart && sp >= stackEnd ) return true; 1007 return false; 1008 } 1009 1010 extern "C" void breakpoint() { 1011 // use debugger to set breakpoint here 1012 } 1013 1014 // Returns an estimate of the current stack pointer. Result must be guaranteed to 1015 // point into the calling threads stack, and be no lower than the current stack 1016 // pointer. 1017 address os::current_stack_pointer() { 1018 volatile int dummy; 1019 address sp = (address)&dummy + 8; // %%%% need to confirm if this is right 1020 return sp; 1021 } 1022 1023 static thread_t main_thread; 1024 1025 // Thread start routine for all new Java threads 1026 extern "C" void* java_start(void* thread_addr) { 1027 // Try to randomize the cache line index of hot stack frames. 1028 // This helps when threads of the same stack traces evict each other's 1029 // cache lines. The threads can be either from the same JVM instance, or 1030 // from different JVM instances. The benefit is especially true for 1031 // processors with hyperthreading technology. 1032 static int counter = 0; 1033 int pid = os::current_process_id(); 1034 alloca(((pid ^ counter++) & 7) * 128); 1035 1036 int prio; 1037 Thread* thread = (Thread*)thread_addr; 1038 OSThread* osthr = thread->osthread(); 1039 1040 osthr->set_lwp_id( _lwp_self() ); // Store lwp in case we are bound 1041 thread->_schedctl = (void *) schedctl_init () ; 1042 1043 if (UseNUMA) { 1044 int lgrp_id = os::numa_get_group_id(); 1045 if (lgrp_id != -1) { 1046 thread->set_lgrp_id(lgrp_id); 1047 } 1048 } 1049 1050 // If the creator called set priority before we started, 1051 // we need to call set priority now that we have an lwp. 1052 // Get the priority from libthread and set the priority 1053 // for the new Solaris lwp. 1054 if ( osthr->thread_id() != -1 ) { 1055 if ( UseThreadPriorities ) { 1056 thr_getprio(osthr->thread_id(), &prio); 1057 if (ThreadPriorityVerbose) { 1058 tty->print_cr("Starting Thread " INTPTR_FORMAT ", LWP is " INTPTR_FORMAT ", setting priority: %d\n", 1059 osthr->thread_id(), osthr->lwp_id(), prio ); 1060 } 1061 os::set_native_priority(thread, prio); 1062 } 1063 } else if (ThreadPriorityVerbose) { 1064 warning("Can't set priority in _start routine, thread id hasn't been set\n"); 1065 } 1066 1067 assert(osthr->get_state() == RUNNABLE, "invalid os thread state"); 1068 1069 // initialize signal mask for this thread 1070 os::Solaris::hotspot_sigmask(thread); 1071 1072 thread->run(); 1073 1074 // One less thread is executing 1075 // When the VMThread gets here, the main thread may have already exited 1076 // which frees the CodeHeap containing the Atomic::dec code 1077 if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) { 1078 Atomic::dec(&os::Solaris::_os_thread_count); 1079 } 1080 1081 if (UseDetachedThreads) { 1082 thr_exit(NULL); 1083 ShouldNotReachHere(); 1084 } 1085 return NULL; 1086 } 1087 1088 static OSThread* create_os_thread(Thread* thread, thread_t thread_id) { 1089 // Allocate the OSThread object 1090 OSThread* osthread = new OSThread(NULL, NULL); 1091 if (osthread == NULL) return NULL; 1092 1093 // Store info on the Solaris thread into the OSThread 1094 osthread->set_thread_id(thread_id); 1095 osthread->set_lwp_id(_lwp_self()); 1096 thread->_schedctl = (void *) schedctl_init () ; 1097 1098 if (UseNUMA) { 1099 int lgrp_id = os::numa_get_group_id(); 1100 if (lgrp_id != -1) { 1101 thread->set_lgrp_id(lgrp_id); 1102 } 1103 } 1104 1105 if ( ThreadPriorityVerbose ) { 1106 tty->print_cr("In create_os_thread, Thread " INTPTR_FORMAT ", LWP is " INTPTR_FORMAT "\n", 1107 osthread->thread_id(), osthread->lwp_id() ); 1108 } 1109 1110 // Initial thread state is INITIALIZED, not SUSPENDED 1111 osthread->set_state(INITIALIZED); 1112 1113 return osthread; 1114 } 1115 1116 void os::Solaris::hotspot_sigmask(Thread* thread) { 1117 1118 //Save caller's signal mask 1119 sigset_t sigmask; 1120 thr_sigsetmask(SIG_SETMASK, NULL, &sigmask); 1121 OSThread *osthread = thread->osthread(); 1122 osthread->set_caller_sigmask(sigmask); 1123 1124 thr_sigsetmask(SIG_UNBLOCK, os::Solaris::unblocked_signals(), NULL); 1125 if (!ReduceSignalUsage) { 1126 if (thread->is_VM_thread()) { 1127 // Only the VM thread handles BREAK_SIGNAL ... 1128 thr_sigsetmask(SIG_UNBLOCK, vm_signals(), NULL); 1129 } else { 1130 // ... all other threads block BREAK_SIGNAL 1131 assert(!sigismember(vm_signals(), SIGINT), "SIGINT should not be blocked"); 1132 thr_sigsetmask(SIG_BLOCK, vm_signals(), NULL); 1133 } 1134 } 1135 } 1136 1137 bool os::create_attached_thread(JavaThread* thread) { 1138 #ifdef ASSERT 1139 thread->verify_not_published(); 1140 #endif 1141 OSThread* osthread = create_os_thread(thread, thr_self()); 1142 if (osthread == NULL) { 1143 return false; 1144 } 1145 1146 // Initial thread state is RUNNABLE 1147 osthread->set_state(RUNNABLE); 1148 thread->set_osthread(osthread); 1149 1150 // initialize signal mask for this thread 1151 // and save the caller's signal mask 1152 os::Solaris::hotspot_sigmask(thread); 1153 1154 return true; 1155 } 1156 1157 bool os::create_main_thread(JavaThread* thread) { 1158 #ifdef ASSERT 1159 thread->verify_not_published(); 1160 #endif 1161 if (_starting_thread == NULL) { 1162 _starting_thread = create_os_thread(thread, main_thread); 1163 if (_starting_thread == NULL) { 1164 return false; 1165 } 1166 } 1167 1168 // The primodial thread is runnable from the start 1169 _starting_thread->set_state(RUNNABLE); 1170 1171 thread->set_osthread(_starting_thread); 1172 1173 // initialize signal mask for this thread 1174 // and save the caller's signal mask 1175 os::Solaris::hotspot_sigmask(thread); 1176 1177 return true; 1178 } 1179 1180 // _T2_libthread is true if we believe we are running with the newer 1181 // SunSoft lwp/libthread.so (2.8 patch, 2.9 default) 1182 bool os::Solaris::_T2_libthread = false; 1183 1184 bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) { 1185 // Allocate the OSThread object 1186 OSThread* osthread = new OSThread(NULL, NULL); 1187 if (osthread == NULL) { 1188 return false; 1189 } 1190 1191 if ( ThreadPriorityVerbose ) { 1192 char *thrtyp; 1193 switch ( thr_type ) { 1194 case vm_thread: 1195 thrtyp = (char *)"vm"; 1196 break; 1197 case cgc_thread: 1198 thrtyp = (char *)"cgc"; 1199 break; 1200 case pgc_thread: 1201 thrtyp = (char *)"pgc"; 1202 break; 1203 case java_thread: 1204 thrtyp = (char *)"java"; 1205 break; 1206 case compiler_thread: 1207 thrtyp = (char *)"compiler"; 1208 break; 1209 case watcher_thread: 1210 thrtyp = (char *)"watcher"; 1211 break; 1212 default: 1213 thrtyp = (char *)"unknown"; 1214 break; 1215 } 1216 tty->print_cr("In create_thread, creating a %s thread\n", thrtyp); 1217 } 1218 1219 // Calculate stack size if it's not specified by caller. 1220 if (stack_size == 0) { 1221 // The default stack size 1M (2M for LP64). 1222 stack_size = (BytesPerWord >> 2) * K * K; 1223 1224 switch (thr_type) { 1225 case os::java_thread: 1226 // Java threads use ThreadStackSize which default value can be changed with the flag -Xss 1227 if (JavaThread::stack_size_at_create() > 0) stack_size = JavaThread::stack_size_at_create(); 1228 break; 1229 case os::compiler_thread: 1230 if (CompilerThreadStackSize > 0) { 1231 stack_size = (size_t)(CompilerThreadStackSize * K); 1232 break; 1233 } // else fall through: 1234 // use VMThreadStackSize if CompilerThreadStackSize is not defined 1235 case os::vm_thread: 1236 case os::pgc_thread: 1237 case os::cgc_thread: 1238 case os::watcher_thread: 1239 if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K); 1240 break; 1241 } 1242 } 1243 stack_size = MAX2(stack_size, os::Solaris::min_stack_allowed); 1244 1245 // Initial state is ALLOCATED but not INITIALIZED 1246 osthread->set_state(ALLOCATED); 1247 1248 if (os::Solaris::_os_thread_count > os::Solaris::_os_thread_limit) { 1249 // We got lots of threads. Check if we still have some address space left. 1250 // Need to be at least 5Mb of unreserved address space. We do check by 1251 // trying to reserve some. 1252 const size_t VirtualMemoryBangSize = 20*K*K; 1253 char* mem = os::reserve_memory(VirtualMemoryBangSize); 1254 if (mem == NULL) { 1255 delete osthread; 1256 return false; 1257 } else { 1258 // Release the memory again 1259 os::release_memory(mem, VirtualMemoryBangSize); 1260 } 1261 } 1262 1263 // Setup osthread because the child thread may need it. 1264 thread->set_osthread(osthread); 1265 1266 // Create the Solaris thread 1267 // explicit THR_BOUND for T2_libthread case in case 1268 // that assumption is not accurate, but our alternate signal stack 1269 // handling is based on it which must have bound threads 1270 thread_t tid = 0; 1271 long flags = (UseDetachedThreads ? THR_DETACHED : 0) | THR_SUSPENDED 1272 | ((UseBoundThreads || os::Solaris::T2_libthread() || 1273 (thr_type == vm_thread) || 1274 (thr_type == cgc_thread) || 1275 (thr_type == pgc_thread) || 1276 (thr_type == compiler_thread && BackgroundCompilation)) ? 1277 THR_BOUND : 0); 1278 int status; 1279 1280 // 4376845 -- libthread/kernel don't provide enough LWPs to utilize all CPUs. 1281 // 1282 // On multiprocessors systems, libthread sometimes under-provisions our 1283 // process with LWPs. On a 30-way systems, for instance, we could have 1284 // 50 user-level threads in ready state and only 2 or 3 LWPs assigned 1285 // to our process. This can result in under utilization of PEs. 1286 // I suspect the problem is related to libthread's LWP 1287 // pool management and to the kernel's SIGBLOCKING "last LWP parked" 1288 // upcall policy. 1289 // 1290 // The following code is palliative -- it attempts to ensure that our 1291 // process has sufficient LWPs to take advantage of multiple PEs. 1292 // Proper long-term cures include using user-level threads bound to LWPs 1293 // (THR_BOUND) or using LWP-based synchronization. Note that there is a 1294 // slight timing window with respect to sampling _os_thread_count, but 1295 // the race is benign. Also, we should periodically recompute 1296 // _processors_online as the min of SC_NPROCESSORS_ONLN and the 1297 // the number of PEs in our partition. You might be tempted to use 1298 // THR_NEW_LWP here, but I'd recommend against it as that could 1299 // result in undesirable growth of the libthread's LWP pool. 1300 // The fix below isn't sufficient; for instance, it doesn't take into count 1301 // LWPs parked on IO. It does, however, help certain CPU-bound benchmarks. 1302 // 1303 // Some pathologies this scheme doesn't handle: 1304 // * Threads can block, releasing the LWPs. The LWPs can age out. 1305 // When a large number of threads become ready again there aren't 1306 // enough LWPs available to service them. This can occur when the 1307 // number of ready threads oscillates. 1308 // * LWPs/Threads park on IO, thus taking the LWP out of circulation. 1309 // 1310 // Finally, we should call thr_setconcurrency() periodically to refresh 1311 // the LWP pool and thwart the LWP age-out mechanism. 1312 // The "+3" term provides a little slop -- we want to slightly overprovision. 1313 1314 if (AdjustConcurrency && os::Solaris::_os_thread_count < (_processors_online+3)) { 1315 if (!(flags & THR_BOUND)) { 1316 thr_setconcurrency (os::Solaris::_os_thread_count); // avoid starvation 1317 } 1318 } 1319 // Although this doesn't hurt, we should warn of undefined behavior 1320 // when using unbound T1 threads with schedctl(). This should never 1321 // happen, as the compiler and VM threads are always created bound 1322 DEBUG_ONLY( 1323 if ((VMThreadHintNoPreempt || CompilerThreadHintNoPreempt) && 1324 (!os::Solaris::T2_libthread() && (!(flags & THR_BOUND))) && 1325 ((thr_type == vm_thread) || (thr_type == cgc_thread) || 1326 (thr_type == pgc_thread) || (thr_type == compiler_thread && BackgroundCompilation))) { 1327 warning("schedctl behavior undefined when Compiler/VM/GC Threads are Unbound"); 1328 } 1329 ); 1330 1331 1332 // Mark that we don't have an lwp or thread id yet. 1333 // In case we attempt to set the priority before the thread starts. 1334 osthread->set_lwp_id(-1); 1335 osthread->set_thread_id(-1); 1336 1337 status = thr_create(NULL, stack_size, java_start, thread, flags, &tid); 1338 if (status != 0) { 1339 if (PrintMiscellaneous && (Verbose || WizardMode)) { 1340 perror("os::create_thread"); 1341 } 1342 thread->set_osthread(NULL); 1343 // Need to clean up stuff we've allocated so far 1344 delete osthread; 1345 return false; 1346 } 1347 1348 Atomic::inc(&os::Solaris::_os_thread_count); 1349 1350 // Store info on the Solaris thread into the OSThread 1351 osthread->set_thread_id(tid); 1352 1353 // Remember that we created this thread so we can set priority on it 1354 osthread->set_vm_created(); 1355 1356 // Set the default thread priority otherwise use NormalPriority 1357 1358 if ( UseThreadPriorities ) { 1359 thr_setprio(tid, (DefaultThreadPriority == -1) ? 1360 java_to_os_priority[NormPriority] : 1361 DefaultThreadPriority); 1362 } 1363 1364 // Initial thread state is INITIALIZED, not SUSPENDED 1365 osthread->set_state(INITIALIZED); 1366 1367 // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain 1368 return true; 1369 } 1370 1371 /* defined for >= Solaris 10. This allows builds on earlier versions 1372 * of Solaris to take advantage of the newly reserved Solaris JVM signals 1373 * With SIGJVM1, SIGJVM2, INTERRUPT_SIGNAL is SIGJVM1, ASYNC_SIGNAL is SIGJVM2 1374 * and -XX:+UseAltSigs does nothing since these should have no conflict 1375 */ 1376 #if !defined(SIGJVM1) 1377 #define SIGJVM1 39 1378 #define SIGJVM2 40 1379 #endif 1380 1381 debug_only(static bool signal_sets_initialized = false); 1382 static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs; 1383 int os::Solaris::_SIGinterrupt = INTERRUPT_SIGNAL; 1384 int os::Solaris::_SIGasync = ASYNC_SIGNAL; 1385 1386 bool os::Solaris::is_sig_ignored(int sig) { 1387 struct sigaction oact; 1388 sigaction(sig, (struct sigaction*)NULL, &oact); 1389 void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*, oact.sa_sigaction) 1390 : CAST_FROM_FN_PTR(void*, oact.sa_handler); 1391 if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN)) 1392 return true; 1393 else 1394 return false; 1395 } 1396 1397 // Note: SIGRTMIN is a macro that calls sysconf() so it will 1398 // dynamically detect SIGRTMIN value for the system at runtime, not buildtime 1399 static bool isJVM1available() { 1400 return SIGJVM1 < SIGRTMIN; 1401 } 1402 1403 void os::Solaris::signal_sets_init() { 1404 // Should also have an assertion stating we are still single-threaded. 1405 assert(!signal_sets_initialized, "Already initialized"); 1406 // Fill in signals that are necessarily unblocked for all threads in 1407 // the VM. Currently, we unblock the following signals: 1408 // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden 1409 // by -Xrs (=ReduceSignalUsage)); 1410 // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all 1411 // other threads. The "ReduceSignalUsage" boolean tells us not to alter 1412 // the dispositions or masks wrt these signals. 1413 // Programs embedding the VM that want to use the above signals for their 1414 // own purposes must, at this time, use the "-Xrs" option to prevent 1415 // interference with shutdown hooks and BREAK_SIGNAL thread dumping. 1416 // (See bug 4345157, and other related bugs). 1417 // In reality, though, unblocking these signals is really a nop, since 1418 // these signals are not blocked by default. 1419 sigemptyset(&unblocked_sigs); 1420 sigemptyset(&allowdebug_blocked_sigs); 1421 sigaddset(&unblocked_sigs, SIGILL); 1422 sigaddset(&unblocked_sigs, SIGSEGV); 1423 sigaddset(&unblocked_sigs, SIGBUS); 1424 sigaddset(&unblocked_sigs, SIGFPE); 1425 1426 if (isJVM1available) { 1427 os::Solaris::set_SIGinterrupt(SIGJVM1); 1428 os::Solaris::set_SIGasync(SIGJVM2); 1429 } else if (UseAltSigs) { 1430 os::Solaris::set_SIGinterrupt(ALT_INTERRUPT_SIGNAL); 1431 os::Solaris::set_SIGasync(ALT_ASYNC_SIGNAL); 1432 } else { 1433 os::Solaris::set_SIGinterrupt(INTERRUPT_SIGNAL); 1434 os::Solaris::set_SIGasync(ASYNC_SIGNAL); 1435 } 1436 1437 sigaddset(&unblocked_sigs, os::Solaris::SIGinterrupt()); 1438 sigaddset(&unblocked_sigs, os::Solaris::SIGasync()); 1439 1440 if (!ReduceSignalUsage) { 1441 if (!os::Solaris::is_sig_ignored(SHUTDOWN1_SIGNAL)) { 1442 sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL); 1443 sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL); 1444 } 1445 if (!os::Solaris::is_sig_ignored(SHUTDOWN2_SIGNAL)) { 1446 sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL); 1447 sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL); 1448 } 1449 if (!os::Solaris::is_sig_ignored(SHUTDOWN3_SIGNAL)) { 1450 sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL); 1451 sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL); 1452 } 1453 } 1454 // Fill in signals that are blocked by all but the VM thread. 1455 sigemptyset(&vm_sigs); 1456 if (!ReduceSignalUsage) 1457 sigaddset(&vm_sigs, BREAK_SIGNAL); 1458 debug_only(signal_sets_initialized = true); 1459 1460 // For diagnostics only used in run_periodic_checks 1461 sigemptyset(&check_signal_done); 1462 } 1463 1464 // These are signals that are unblocked while a thread is running Java. 1465 // (For some reason, they get blocked by default.) 1466 sigset_t* os::Solaris::unblocked_signals() { 1467 assert(signal_sets_initialized, "Not initialized"); 1468 return &unblocked_sigs; 1469 } 1470 1471 // These are the signals that are blocked while a (non-VM) thread is 1472 // running Java. Only the VM thread handles these signals. 1473 sigset_t* os::Solaris::vm_signals() { 1474 assert(signal_sets_initialized, "Not initialized"); 1475 return &vm_sigs; 1476 } 1477 1478 // These are signals that are blocked during cond_wait to allow debugger in 1479 sigset_t* os::Solaris::allowdebug_blocked_signals() { 1480 assert(signal_sets_initialized, "Not initialized"); 1481 return &allowdebug_blocked_sigs; 1482 } 1483 1484 1485 void _handle_uncaught_cxx_exception() { 1486 VMError err("An uncaught C++ exception"); 1487 err.report_and_die(); 1488 } 1489 1490 1491 // First crack at OS-specific initialization, from inside the new thread. 1492 void os::initialize_thread() { 1493 int r = thr_main() ; 1494 guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ; 1495 if (r) { 1496 JavaThread* jt = (JavaThread *)Thread::current(); 1497 assert(jt != NULL,"Sanity check"); 1498 size_t stack_size; 1499 address base = jt->stack_base(); 1500 if (Arguments::created_by_java_launcher()) { 1501 // Use 2MB to allow for Solaris 7 64 bit mode. 1502 stack_size = JavaThread::stack_size_at_create() == 0 1503 ? 2048*K : JavaThread::stack_size_at_create(); 1504 1505 // There are rare cases when we may have already used more than 1506 // the basic stack size allotment before this method is invoked. 1507 // Attempt to allow for a normally sized java_stack. 1508 size_t current_stack_offset = (size_t)(base - (address)&stack_size); 1509 stack_size += ReservedSpace::page_align_size_down(current_stack_offset); 1510 } else { 1511 // 6269555: If we were not created by a Java launcher, i.e. if we are 1512 // running embedded in a native application, treat the primordial thread 1513 // as much like a native attached thread as possible. This means using 1514 // the current stack size from thr_stksegment(), unless it is too large 1515 // to reliably setup guard pages. A reasonable max size is 8MB. 1516 size_t current_size = current_stack_size(); 1517 // This should never happen, but just in case.... 1518 if (current_size == 0) current_size = 2 * K * K; 1519 stack_size = current_size > (8 * K * K) ? (8 * K * K) : current_size; 1520 } 1521 address bottom = (address)align_size_up((intptr_t)(base - stack_size), os::vm_page_size());; 1522 stack_size = (size_t)(base - bottom); 1523 1524 assert(stack_size > 0, "Stack size calculation problem"); 1525 1526 if (stack_size > jt->stack_size()) { 1527 NOT_PRODUCT( 1528 struct rlimit limits; 1529 getrlimit(RLIMIT_STACK, &limits); 1530 size_t size = adjust_stack_size(base, (size_t)limits.rlim_cur); 1531 assert(size >= jt->stack_size(), "Stack size problem in main thread"); 1532 ) 1533 tty->print_cr( 1534 "Stack size of %d Kb exceeds current limit of %d Kb.\n" 1535 "(Stack sizes are rounded up to a multiple of the system page size.)\n" 1536 "See limit(1) to increase the stack size limit.", 1537 stack_size / K, jt->stack_size() / K); 1538 vm_exit(1); 1539 } 1540 assert(jt->stack_size() >= stack_size, 1541 "Attempt to map more stack than was allocated"); 1542 jt->set_stack_size(stack_size); 1543 } 1544 1545 // 5/22/01: Right now alternate signal stacks do not handle 1546 // throwing stack overflow exceptions, see bug 4463178 1547 // Until a fix is found for this, T2 will NOT imply alternate signal 1548 // stacks. 1549 // If using T2 libthread threads, install an alternate signal stack. 1550 // Because alternate stacks associate with LWPs on Solaris, 1551 // see sigaltstack(2), if using UNBOUND threads, or if UseBoundThreads 1552 // we prefer to explicitly stack bang. 1553 // If not using T2 libthread, but using UseBoundThreads any threads 1554 // (primordial thread, jni_attachCurrentThread) we do not create, 1555 // probably are not bound, therefore they can not have an alternate 1556 // signal stack. Since our stack banging code is generated and 1557 // is shared across threads, all threads must be bound to allow 1558 // using alternate signal stacks. The alternative is to interpose 1559 // on _lwp_create to associate an alt sig stack with each LWP, 1560 // and this could be a problem when the JVM is embedded. 1561 // We would prefer to use alternate signal stacks with T2 1562 // Since there is currently no accurate way to detect T2 1563 // we do not. Assuming T2 when running T1 causes sig 11s or assertions 1564 // on installing alternate signal stacks 1565 1566 1567 // 05/09/03: removed alternate signal stack support for Solaris 1568 // The alternate signal stack mechanism is no longer needed to 1569 // handle stack overflow. This is now handled by allocating 1570 // guard pages (red zone) and stackbanging. 1571 // Initially the alternate signal stack mechanism was removed because 1572 // it did not work with T1 llibthread. Alternate 1573 // signal stacks MUST have all threads bound to lwps. Applications 1574 // can create their own threads and attach them without their being 1575 // bound under T1. This is frequently the case for the primordial thread. 1576 // If we were ever to reenable this mechanism we would need to 1577 // use the dynamic check for T2 libthread. 1578 1579 os::Solaris::init_thread_fpu_state(); 1580 std::set_terminate(_handle_uncaught_cxx_exception); 1581 } 1582 1583 1584 1585 // Free Solaris resources related to the OSThread 1586 void os::free_thread(OSThread* osthread) { 1587 assert(osthread != NULL, "os::free_thread but osthread not set"); 1588 1589 1590 // We are told to free resources of the argument thread, 1591 // but we can only really operate on the current thread. 1592 // The main thread must take the VMThread down synchronously 1593 // before the main thread exits and frees up CodeHeap 1594 guarantee((Thread::current()->osthread() == osthread 1595 || (osthread == VMThread::vm_thread()->osthread())), "os::free_thread but not current thread"); 1596 if (Thread::current()->osthread() == osthread) { 1597 // Restore caller's signal mask 1598 sigset_t sigmask = osthread->caller_sigmask(); 1599 thr_sigsetmask(SIG_SETMASK, &sigmask, NULL); 1600 } 1601 delete osthread; 1602 } 1603 1604 void os::pd_start_thread(Thread* thread) { 1605 int status = thr_continue(thread->osthread()->thread_id()); 1606 assert_status(status == 0, status, "thr_continue failed"); 1607 } 1608 1609 1610 intx os::current_thread_id() { 1611 return (intx)thr_self(); 1612 } 1613 1614 static pid_t _initial_pid = 0; 1615 1616 int os::current_process_id() { 1617 return (int)(_initial_pid ? _initial_pid : getpid()); 1618 } 1619 1620 int os::allocate_thread_local_storage() { 1621 // %%% in Win32 this allocates a memory segment pointed to by a 1622 // register. Dan Stein can implement a similar feature in 1623 // Solaris. Alternatively, the VM can do the same thing 1624 // explicitly: malloc some storage and keep the pointer in a 1625 // register (which is part of the thread's context) (or keep it 1626 // in TLS). 1627 // %%% In current versions of Solaris, thr_self and TSD can 1628 // be accessed via short sequences of displaced indirections. 1629 // The value of thr_self is available as %g7(36). 1630 // The value of thr_getspecific(k) is stored in %g7(12)(4)(k*4-4), 1631 // assuming that the current thread already has a value bound to k. 1632 // It may be worth experimenting with such access patterns, 1633 // and later having the parameters formally exported from a Solaris 1634 // interface. I think, however, that it will be faster to 1635 // maintain the invariant that %g2 always contains the 1636 // JavaThread in Java code, and have stubs simply 1637 // treat %g2 as a caller-save register, preserving it in a %lN. 1638 thread_key_t tk; 1639 if (thr_keycreate( &tk, NULL ) ) 1640 fatal(err_msg("os::allocate_thread_local_storage: thr_keycreate failed " 1641 "(%s)", strerror(errno))); 1642 return int(tk); 1643 } 1644 1645 void os::free_thread_local_storage(int index) { 1646 // %%% don't think we need anything here 1647 // if ( pthread_key_delete((pthread_key_t) tk) ) 1648 // fatal("os::free_thread_local_storage: pthread_key_delete failed"); 1649 } 1650 1651 #define SMALLINT 32 // libthread allocate for tsd_common is a version specific 1652 // small number - point is NO swap space available 1653 void os::thread_local_storage_at_put(int index, void* value) { 1654 // %%% this is used only in threadLocalStorage.cpp 1655 if (thr_setspecific((thread_key_t)index, value)) { 1656 if (errno == ENOMEM) { 1657 vm_exit_out_of_memory(SMALLINT, "thr_setspecific: out of swap space"); 1658 } else { 1659 fatal(err_msg("os::thread_local_storage_at_put: thr_setspecific failed " 1660 "(%s)", strerror(errno))); 1661 } 1662 } else { 1663 ThreadLocalStorage::set_thread_in_slot ((Thread *) value) ; 1664 } 1665 } 1666 1667 // This function could be called before TLS is initialized, for example, when 1668 // VM receives an async signal or when VM causes a fatal error during 1669 // initialization. Return NULL if thr_getspecific() fails. 1670 void* os::thread_local_storage_at(int index) { 1671 // %%% this is used only in threadLocalStorage.cpp 1672 void* r = NULL; 1673 return thr_getspecific((thread_key_t)index, &r) != 0 ? NULL : r; 1674 } 1675 1676 1677 const int NANOSECS_PER_MILLISECS = 1000000; 1678 // gethrtime can move backwards if read from one cpu and then a different cpu 1679 // getTimeNanos is guaranteed to not move backward on Solaris 1680 // local spinloop created as faster for a CAS on an int than 1681 // a CAS on a 64bit jlong. Also Atomic::cmpxchg for jlong is not 1682 // supported on sparc v8 or pre supports_cx8 intel boxes. 1683 // oldgetTimeNanos for systems which do not support CAS on 64bit jlong 1684 // i.e. sparc v8 and pre supports_cx8 (i486) intel boxes 1685 inline hrtime_t oldgetTimeNanos() { 1686 int gotlock = LOCK_INVALID; 1687 hrtime_t newtime = gethrtime(); 1688 1689 for (;;) { 1690 // grab lock for max_hrtime 1691 int curlock = max_hrtime_lock; 1692 if (curlock & LOCK_BUSY) continue; 1693 if (gotlock = Atomic::cmpxchg(LOCK_BUSY, &max_hrtime_lock, LOCK_FREE) != LOCK_FREE) continue; 1694 if (newtime > max_hrtime) { 1695 max_hrtime = newtime; 1696 } else { 1697 newtime = max_hrtime; 1698 } 1699 // release lock 1700 max_hrtime_lock = LOCK_FREE; 1701 return newtime; 1702 } 1703 } 1704 // gethrtime can move backwards if read from one cpu and then a different cpu 1705 // getTimeNanos is guaranteed to not move backward on Solaris 1706 inline hrtime_t getTimeNanos() { 1707 if (VM_Version::supports_cx8()) { 1708 const hrtime_t now = gethrtime(); 1709 // Use atomic long load since 32-bit x86 uses 2 registers to keep long. 1710 const hrtime_t prev = Atomic::load((volatile jlong*)&max_hrtime); 1711 if (now <= prev) return prev; // same or retrograde time; 1712 const hrtime_t obsv = Atomic::cmpxchg(now, (volatile jlong*)&max_hrtime, prev); 1713 assert(obsv >= prev, "invariant"); // Monotonicity 1714 // If the CAS succeeded then we're done and return "now". 1715 // If the CAS failed and the observed value "obs" is >= now then 1716 // we should return "obs". If the CAS failed and now > obs > prv then 1717 // some other thread raced this thread and installed a new value, in which case 1718 // we could either (a) retry the entire operation, (b) retry trying to install now 1719 // or (c) just return obs. We use (c). No loop is required although in some cases 1720 // we might discard a higher "now" value in deference to a slightly lower but freshly 1721 // installed obs value. That's entirely benign -- it admits no new orderings compared 1722 // to (a) or (b) -- and greatly reduces coherence traffic. 1723 // We might also condition (c) on the magnitude of the delta between obs and now. 1724 // Avoiding excessive CAS operations to hot RW locations is critical. 1725 // See http://blogs.sun.com/dave/entry/cas_and_cache_trivia_invalidate 1726 return (prev == obsv) ? now : obsv ; 1727 } else { 1728 return oldgetTimeNanos(); 1729 } 1730 } 1731 1732 // Time since start-up in seconds to a fine granularity. 1733 // Used by VMSelfDestructTimer and the MemProfiler. 1734 double os::elapsedTime() { 1735 return (double)(getTimeNanos() - first_hrtime) / (double)hrtime_hz; 1736 } 1737 1738 jlong os::elapsed_counter() { 1739 return (jlong)(getTimeNanos() - first_hrtime); 1740 } 1741 1742 jlong os::elapsed_frequency() { 1743 return hrtime_hz; 1744 } 1745 1746 // Return the real, user, and system times in seconds from an 1747 // arbitrary fixed point in the past. 1748 bool os::getTimesSecs(double* process_real_time, 1749 double* process_user_time, 1750 double* process_system_time) { 1751 struct tms ticks; 1752 clock_t real_ticks = times(&ticks); 1753 1754 if (real_ticks == (clock_t) (-1)) { 1755 return false; 1756 } else { 1757 double ticks_per_second = (double) clock_tics_per_sec; 1758 *process_user_time = ((double) ticks.tms_utime) / ticks_per_second; 1759 *process_system_time = ((double) ticks.tms_stime) / ticks_per_second; 1760 // For consistency return the real time from getTimeNanos() 1761 // converted to seconds. 1762 *process_real_time = ((double) getTimeNanos()) / ((double) NANOUNITS); 1763 1764 return true; 1765 } 1766 } 1767 1768 bool os::supports_vtime() { return true; } 1769 1770 bool os::enable_vtime() { 1771 int fd = ::open("/proc/self/ctl", O_WRONLY); 1772 if (fd == -1) 1773 return false; 1774 1775 long cmd[] = { PCSET, PR_MSACCT }; 1776 int res = ::write(fd, cmd, sizeof(long) * 2); 1777 ::close(fd); 1778 if (res != sizeof(long) * 2) 1779 return false; 1780 1781 return true; 1782 } 1783 1784 bool os::vtime_enabled() { 1785 int fd = ::open("/proc/self/status", O_RDONLY); 1786 if (fd == -1) 1787 return false; 1788 1789 pstatus_t status; 1790 int res = os::read(fd, (void*) &status, sizeof(pstatus_t)); 1791 ::close(fd); 1792 if (res != sizeof(pstatus_t)) 1793 return false; 1794 1795 return status.pr_flags & PR_MSACCT; 1796 } 1797 1798 double os::elapsedVTime() { 1799 return (double)gethrvtime() / (double)hrtime_hz; 1800 } 1801 1802 // Used internally for comparisons only 1803 // getTimeMillis guaranteed to not move backwards on Solaris 1804 jlong getTimeMillis() { 1805 jlong nanotime = getTimeNanos(); 1806 return (jlong)(nanotime / NANOSECS_PER_MILLISECS); 1807 } 1808 1809 // Must return millis since Jan 1 1970 for JVM_CurrentTimeMillis 1810 jlong os::javaTimeMillis() { 1811 timeval t; 1812 if (gettimeofday( &t, NULL) == -1) 1813 fatal(err_msg("os::javaTimeMillis: gettimeofday (%s)", strerror(errno))); 1814 return jlong(t.tv_sec) * 1000 + jlong(t.tv_usec) / 1000; 1815 } 1816 1817 jlong os::javaTimeNanos() { 1818 return (jlong)getTimeNanos(); 1819 } 1820 1821 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) { 1822 info_ptr->max_value = ALL_64_BITS; // gethrtime() uses all 64 bits 1823 info_ptr->may_skip_backward = false; // not subject to resetting or drifting 1824 info_ptr->may_skip_forward = false; // not subject to resetting or drifting 1825 info_ptr->kind = JVMTI_TIMER_ELAPSED; // elapsed not CPU time 1826 } 1827 1828 char * os::local_time_string(char *buf, size_t buflen) { 1829 struct tm t; 1830 time_t long_time; 1831 time(&long_time); 1832 localtime_r(&long_time, &t); 1833 jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d", 1834 t.tm_year + 1900, t.tm_mon + 1, t.tm_mday, 1835 t.tm_hour, t.tm_min, t.tm_sec); 1836 return buf; 1837 } 1838 1839 // Note: os::shutdown() might be called very early during initialization, or 1840 // called from signal handler. Before adding something to os::shutdown(), make 1841 // sure it is async-safe and can handle partially initialized VM. 1842 void os::shutdown() { 1843 1844 // allow PerfMemory to attempt cleanup of any persistent resources 1845 perfMemory_exit(); 1846 1847 // needs to remove object in file system 1848 AttachListener::abort(); 1849 1850 // flush buffered output, finish log files 1851 ostream_abort(); 1852 1853 // Check for abort hook 1854 abort_hook_t abort_hook = Arguments::abort_hook(); 1855 if (abort_hook != NULL) { 1856 abort_hook(); 1857 } 1858 } 1859 1860 // Note: os::abort() might be called very early during initialization, or 1861 // called from signal handler. Before adding something to os::abort(), make 1862 // sure it is async-safe and can handle partially initialized VM. 1863 void os::abort(bool dump_core) { 1864 os::shutdown(); 1865 if (dump_core) { 1866 #ifndef PRODUCT 1867 fdStream out(defaultStream::output_fd()); 1868 out.print_raw("Current thread is "); 1869 char buf[16]; 1870 jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id()); 1871 out.print_raw_cr(buf); 1872 out.print_raw_cr("Dumping core ..."); 1873 #endif 1874 ::abort(); // dump core (for debugging) 1875 } 1876 1877 ::exit(1); 1878 } 1879 1880 // Die immediately, no exit hook, no abort hook, no cleanup. 1881 void os::die() { 1882 _exit(-1); 1883 } 1884 1885 // unused 1886 void os::set_error_file(const char *logfile) {} 1887 1888 // DLL functions 1889 1890 const char* os::dll_file_extension() { return ".so"; } 1891 1892 // This must be hard coded because it's the system's temporary 1893 // directory not the java application's temp directory, ala java.io.tmpdir. 1894 const char* os::get_temp_directory() { return "/tmp"; } 1895 1896 static bool file_exists(const char* filename) { 1897 struct stat statbuf; 1898 if (filename == NULL || strlen(filename) == 0) { 1899 return false; 1900 } 1901 return os::stat(filename, &statbuf) == 0; 1902 } 1903 1904 void os::dll_build_name(char* buffer, size_t buflen, 1905 const char* pname, const char* fname) { 1906 const size_t pnamelen = pname ? strlen(pname) : 0; 1907 1908 // Quietly truncate on buffer overflow. Should be an error. 1909 if (pnamelen + strlen(fname) + 10 > (size_t) buflen) { 1910 *buffer = '\0'; 1911 return; 1912 } 1913 1914 if (pnamelen == 0) { 1915 snprintf(buffer, buflen, "lib%s.so", fname); 1916 } else if (strchr(pname, *os::path_separator()) != NULL) { 1917 int n; 1918 char** pelements = split_path(pname, &n); 1919 for (int i = 0 ; i < n ; i++) { 1920 // really shouldn't be NULL but what the heck, check can't hurt 1921 if (pelements[i] == NULL || strlen(pelements[i]) == 0) { 1922 continue; // skip the empty path values 1923 } 1924 snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname); 1925 if (file_exists(buffer)) { 1926 break; 1927 } 1928 } 1929 // release the storage 1930 for (int i = 0 ; i < n ; i++) { 1931 if (pelements[i] != NULL) { 1932 FREE_C_HEAP_ARRAY(char, pelements[i]); 1933 } 1934 } 1935 if (pelements != NULL) { 1936 FREE_C_HEAP_ARRAY(char*, pelements); 1937 } 1938 } else { 1939 snprintf(buffer, buflen, "%s/lib%s.so", pname, fname); 1940 } 1941 } 1942 1943 const char* os::get_current_directory(char *buf, int buflen) { 1944 return getcwd(buf, buflen); 1945 } 1946 1947 // check if addr is inside libjvm[_g].so 1948 bool os::address_is_in_vm(address addr) { 1949 static address libjvm_base_addr; 1950 Dl_info dlinfo; 1951 1952 if (libjvm_base_addr == NULL) { 1953 dladdr(CAST_FROM_FN_PTR(void *, os::address_is_in_vm), &dlinfo); 1954 libjvm_base_addr = (address)dlinfo.dli_fbase; 1955 assert(libjvm_base_addr !=NULL, "Cannot obtain base address for libjvm"); 1956 } 1957 1958 if (dladdr((void *)addr, &dlinfo)) { 1959 if (libjvm_base_addr == (address)dlinfo.dli_fbase) return true; 1960 } 1961 1962 return false; 1963 } 1964 1965 typedef int (*dladdr1_func_type) (void *, Dl_info *, void **, int); 1966 static dladdr1_func_type dladdr1_func = NULL; 1967 1968 bool os::dll_address_to_function_name(address addr, char *buf, 1969 int buflen, int * offset) { 1970 Dl_info dlinfo; 1971 1972 // dladdr1_func was initialized in os::init() 1973 if (dladdr1_func){ 1974 // yes, we have dladdr1 1975 1976 // Support for dladdr1 is checked at runtime; it may be 1977 // available even if the vm is built on a machine that does 1978 // not have dladdr1 support. Make sure there is a value for 1979 // RTLD_DL_SYMENT. 1980 #ifndef RTLD_DL_SYMENT 1981 #define RTLD_DL_SYMENT 1 1982 #endif 1983 #ifdef _LP64 1984 Elf64_Sym * info; 1985 #else 1986 Elf32_Sym * info; 1987 #endif 1988 if (dladdr1_func((void *)addr, &dlinfo, (void **)&info, 1989 RTLD_DL_SYMENT)) { 1990 if ((char *)dlinfo.dli_saddr + info->st_size > (char *)addr) { 1991 if (buf != NULL) { 1992 if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) 1993 jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname); 1994 } 1995 if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr; 1996 return true; 1997 } 1998 } 1999 if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != 0) { 2000 if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase), 2001 dlinfo.dli_fname, buf, buflen, offset) == Decoder::no_error) { 2002 return true; 2003 } 2004 } 2005 if (buf != NULL) buf[0] = '\0'; 2006 if (offset != NULL) *offset = -1; 2007 return false; 2008 } else { 2009 // no, only dladdr is available 2010 if (dladdr((void *)addr, &dlinfo)) { 2011 if (buf != NULL) { 2012 if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) 2013 jio_snprintf(buf, buflen, dlinfo.dli_sname); 2014 } 2015 if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr; 2016 return true; 2017 } else if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != 0) { 2018 if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase), 2019 dlinfo.dli_fname, buf, buflen, offset) == Decoder::no_error) { 2020 return true; 2021 } 2022 } 2023 if (buf != NULL) buf[0] = '\0'; 2024 if (offset != NULL) *offset = -1; 2025 return false; 2026 } 2027 } 2028 2029 bool os::dll_address_to_library_name(address addr, char* buf, 2030 int buflen, int* offset) { 2031 Dl_info dlinfo; 2032 2033 if (dladdr((void*)addr, &dlinfo)){ 2034 if (buf) jio_snprintf(buf, buflen, "%s", dlinfo.dli_fname); 2035 if (offset) *offset = addr - (address)dlinfo.dli_fbase; 2036 return true; 2037 } else { 2038 if (buf) buf[0] = '\0'; 2039 if (offset) *offset = -1; 2040 return false; 2041 } 2042 } 2043 2044 // Prints the names and full paths of all opened dynamic libraries 2045 // for current process 2046 void os::print_dll_info(outputStream * st) { 2047 Dl_info dli; 2048 void *handle; 2049 Link_map *map; 2050 Link_map *p; 2051 2052 st->print_cr("Dynamic libraries:"); st->flush(); 2053 2054 if (!dladdr(CAST_FROM_FN_PTR(void *, os::print_dll_info), &dli)) { 2055 st->print_cr("Error: Cannot print dynamic libraries."); 2056 return; 2057 } 2058 handle = dlopen(dli.dli_fname, RTLD_LAZY); 2059 if (handle == NULL) { 2060 st->print_cr("Error: Cannot print dynamic libraries."); 2061 return; 2062 } 2063 dlinfo(handle, RTLD_DI_LINKMAP, &map); 2064 if (map == NULL) { 2065 st->print_cr("Error: Cannot print dynamic libraries."); 2066 return; 2067 } 2068 2069 while (map->l_prev != NULL) 2070 map = map->l_prev; 2071 2072 while (map != NULL) { 2073 st->print_cr(PTR_FORMAT " \t%s", map->l_addr, map->l_name); 2074 map = map->l_next; 2075 } 2076 2077 dlclose(handle); 2078 } 2079 2080 // Loads .dll/.so and 2081 // in case of error it checks if .dll/.so was built for the 2082 // same architecture as Hotspot is running on 2083 2084 void * os::dll_load(const char *filename, char *ebuf, int ebuflen) 2085 { 2086 void * result= ::dlopen(filename, RTLD_LAZY); 2087 if (result != NULL) { 2088 // Successful loading 2089 return result; 2090 } 2091 2092 Elf32_Ehdr elf_head; 2093 2094 // Read system error message into ebuf 2095 // It may or may not be overwritten below 2096 ::strncpy(ebuf, ::dlerror(), ebuflen-1); 2097 ebuf[ebuflen-1]='\0'; 2098 int diag_msg_max_length=ebuflen-strlen(ebuf); 2099 char* diag_msg_buf=ebuf+strlen(ebuf); 2100 2101 if (diag_msg_max_length==0) { 2102 // No more space in ebuf for additional diagnostics message 2103 return NULL; 2104 } 2105 2106 2107 int file_descriptor= ::open(filename, O_RDONLY | O_NONBLOCK); 2108 2109 if (file_descriptor < 0) { 2110 // Can't open library, report dlerror() message 2111 return NULL; 2112 } 2113 2114 bool failed_to_read_elf_head= 2115 (sizeof(elf_head)!= 2116 (::read(file_descriptor, &elf_head,sizeof(elf_head)))) ; 2117 2118 ::close(file_descriptor); 2119 if (failed_to_read_elf_head) { 2120 // file i/o error - report dlerror() msg 2121 return NULL; 2122 } 2123 2124 typedef struct { 2125 Elf32_Half code; // Actual value as defined in elf.h 2126 Elf32_Half compat_class; // Compatibility of archs at VM's sense 2127 char elf_class; // 32 or 64 bit 2128 char endianess; // MSB or LSB 2129 char* name; // String representation 2130 } arch_t; 2131 2132 static const arch_t arch_array[]={ 2133 {EM_386, EM_386, ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"}, 2134 {EM_486, EM_386, ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"}, 2135 {EM_IA_64, EM_IA_64, ELFCLASS64, ELFDATA2LSB, (char*)"IA 64"}, 2136 {EM_X86_64, EM_X86_64, ELFCLASS64, ELFDATA2LSB, (char*)"AMD 64"}, 2137 {EM_SPARC, EM_SPARC, ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"}, 2138 {EM_SPARC32PLUS, EM_SPARC, ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"}, 2139 {EM_SPARCV9, EM_SPARCV9, ELFCLASS64, ELFDATA2MSB, (char*)"Sparc v9 64"}, 2140 {EM_PPC, EM_PPC, ELFCLASS32, ELFDATA2MSB, (char*)"Power PC 32"}, 2141 {EM_PPC64, EM_PPC64, ELFCLASS64, ELFDATA2MSB, (char*)"Power PC 64"}, 2142 {EM_ARM, EM_ARM, ELFCLASS32, ELFDATA2LSB, (char*)"ARM 32"} 2143 }; 2144 2145 #if (defined IA32) 2146 static Elf32_Half running_arch_code=EM_386; 2147 #elif (defined AMD64) 2148 static Elf32_Half running_arch_code=EM_X86_64; 2149 #elif (defined IA64) 2150 static Elf32_Half running_arch_code=EM_IA_64; 2151 #elif (defined __sparc) && (defined _LP64) 2152 static Elf32_Half running_arch_code=EM_SPARCV9; 2153 #elif (defined __sparc) && (!defined _LP64) 2154 static Elf32_Half running_arch_code=EM_SPARC; 2155 #elif (defined __powerpc64__) 2156 static Elf32_Half running_arch_code=EM_PPC64; 2157 #elif (defined __powerpc__) 2158 static Elf32_Half running_arch_code=EM_PPC; 2159 #elif (defined ARM) 2160 static Elf32_Half running_arch_code=EM_ARM; 2161 #else 2162 #error Method os::dll_load requires that one of following is defined:\ 2163 IA32, AMD64, IA64, __sparc, __powerpc__, ARM, ARM 2164 #endif 2165 2166 // Identify compatability class for VM's architecture and library's architecture 2167 // Obtain string descriptions for architectures 2168 2169 arch_t lib_arch={elf_head.e_machine,0,elf_head.e_ident[EI_CLASS], elf_head.e_ident[EI_DATA], NULL}; 2170 int running_arch_index=-1; 2171 2172 for (unsigned int i=0 ; i < ARRAY_SIZE(arch_array) ; i++ ) { 2173 if (running_arch_code == arch_array[i].code) { 2174 running_arch_index = i; 2175 } 2176 if (lib_arch.code == arch_array[i].code) { 2177 lib_arch.compat_class = arch_array[i].compat_class; 2178 lib_arch.name = arch_array[i].name; 2179 } 2180 } 2181 2182 assert(running_arch_index != -1, 2183 "Didn't find running architecture code (running_arch_code) in arch_array"); 2184 if (running_arch_index == -1) { 2185 // Even though running architecture detection failed 2186 // we may still continue with reporting dlerror() message 2187 return NULL; 2188 } 2189 2190 if (lib_arch.endianess != arch_array[running_arch_index].endianess) { 2191 ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: endianness mismatch)"); 2192 return NULL; 2193 } 2194 2195 if (lib_arch.elf_class != arch_array[running_arch_index].elf_class) { 2196 ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: architecture word width mismatch)"); 2197 return NULL; 2198 } 2199 2200 if (lib_arch.compat_class != arch_array[running_arch_index].compat_class) { 2201 if ( lib_arch.name!=NULL ) { 2202 ::snprintf(diag_msg_buf, diag_msg_max_length-1, 2203 " (Possible cause: can't load %s-bit .so on a %s-bit platform)", 2204 lib_arch.name, arch_array[running_arch_index].name); 2205 } else { 2206 ::snprintf(diag_msg_buf, diag_msg_max_length-1, 2207 " (Possible cause: can't load this .so (machine code=0x%x) on a %s-bit platform)", 2208 lib_arch.code, 2209 arch_array[running_arch_index].name); 2210 } 2211 } 2212 2213 return NULL; 2214 } 2215 2216 void* os::dll_lookup(void* handle, const char* name) { 2217 return dlsym(handle, name); 2218 } 2219 2220 int os::stat(const char *path, struct stat *sbuf) { 2221 char pathbuf[MAX_PATH]; 2222 if (strlen(path) > MAX_PATH - 1) { 2223 errno = ENAMETOOLONG; 2224 return -1; 2225 } 2226 os::native_path(strcpy(pathbuf, path)); 2227 return ::stat(pathbuf, sbuf); 2228 } 2229 2230 static bool _print_ascii_file(const char* filename, outputStream* st) { 2231 int fd = ::open(filename, O_RDONLY); 2232 if (fd == -1) { 2233 return false; 2234 } 2235 2236 char buf[32]; 2237 int bytes; 2238 while ((bytes = ::read(fd, buf, sizeof(buf))) > 0) { 2239 st->print_raw(buf, bytes); 2240 } 2241 2242 ::close(fd); 2243 2244 return true; 2245 } 2246 2247 void os::print_os_info(outputStream* st) { 2248 st->print("OS:"); 2249 2250 if (!_print_ascii_file("/etc/release", st)) { 2251 st->print("Solaris"); 2252 } 2253 st->cr(); 2254 2255 // kernel 2256 st->print("uname:"); 2257 struct utsname name; 2258 uname(&name); 2259 st->print(name.sysname); st->print(" "); 2260 st->print(name.release); st->print(" "); 2261 st->print(name.version); st->print(" "); 2262 st->print(name.machine); 2263 2264 // libthread 2265 if (os::Solaris::T2_libthread()) st->print(" (T2 libthread)"); 2266 else st->print(" (T1 libthread)"); 2267 st->cr(); 2268 2269 // rlimit 2270 st->print("rlimit:"); 2271 struct rlimit rlim; 2272 2273 st->print(" STACK "); 2274 getrlimit(RLIMIT_STACK, &rlim); 2275 if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity"); 2276 else st->print("%uk", rlim.rlim_cur >> 10); 2277 2278 st->print(", CORE "); 2279 getrlimit(RLIMIT_CORE, &rlim); 2280 if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity"); 2281 else st->print("%uk", rlim.rlim_cur >> 10); 2282 2283 st->print(", NOFILE "); 2284 getrlimit(RLIMIT_NOFILE, &rlim); 2285 if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity"); 2286 else st->print("%d", rlim.rlim_cur); 2287 2288 st->print(", AS "); 2289 getrlimit(RLIMIT_AS, &rlim); 2290 if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity"); 2291 else st->print("%uk", rlim.rlim_cur >> 10); 2292 st->cr(); 2293 2294 // load average 2295 st->print("load average:"); 2296 double loadavg[3]; 2297 os::loadavg(loadavg, 3); 2298 st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]); 2299 st->cr(); 2300 } 2301 2302 2303 static bool check_addr0(outputStream* st) { 2304 jboolean status = false; 2305 int fd = ::open("/proc/self/map",O_RDONLY); 2306 if (fd >= 0) { 2307 prmap_t p; 2308 while(::read(fd, &p, sizeof(p)) > 0) { 2309 if (p.pr_vaddr == 0x0) { 2310 st->print("Warning: Address: 0x%x, Size: %dK, ",p.pr_vaddr, p.pr_size/1024, p.pr_mapname); 2311 st->print("Mapped file: %s, ", p.pr_mapname[0] == '\0' ? "None" : p.pr_mapname); 2312 st->print("Access:"); 2313 st->print("%s",(p.pr_mflags & MA_READ) ? "r" : "-"); 2314 st->print("%s",(p.pr_mflags & MA_WRITE) ? "w" : "-"); 2315 st->print("%s",(p.pr_mflags & MA_EXEC) ? "x" : "-"); 2316 st->cr(); 2317 status = true; 2318 } 2319 ::close(fd); 2320 } 2321 } 2322 return status; 2323 } 2324 2325 void os::pd_print_cpu_info(outputStream* st) { 2326 // Nothing to do for now. 2327 } 2328 2329 void os::print_memory_info(outputStream* st) { 2330 st->print("Memory:"); 2331 st->print(" %dk page", os::vm_page_size()>>10); 2332 st->print(", physical " UINT64_FORMAT "k", os::physical_memory()>>10); 2333 st->print("(" UINT64_FORMAT "k free)", os::available_memory() >> 10); 2334 st->cr(); 2335 (void) check_addr0(st); 2336 } 2337 2338 // Taken from /usr/include/sys/machsig.h Supposed to be architecture specific 2339 // but they're the same for all the solaris architectures that we support. 2340 const char *ill_names[] = { "ILL0", "ILL_ILLOPC", "ILL_ILLOPN", "ILL_ILLADR", 2341 "ILL_ILLTRP", "ILL_PRVOPC", "ILL_PRVREG", 2342 "ILL_COPROC", "ILL_BADSTK" }; 2343 2344 const char *fpe_names[] = { "FPE0", "FPE_INTDIV", "FPE_INTOVF", "FPE_FLTDIV", 2345 "FPE_FLTOVF", "FPE_FLTUND", "FPE_FLTRES", 2346 "FPE_FLTINV", "FPE_FLTSUB" }; 2347 2348 const char *segv_names[] = { "SEGV0", "SEGV_MAPERR", "SEGV_ACCERR" }; 2349 2350 const char *bus_names[] = { "BUS0", "BUS_ADRALN", "BUS_ADRERR", "BUS_OBJERR" }; 2351 2352 void os::print_siginfo(outputStream* st, void* siginfo) { 2353 st->print("siginfo:"); 2354 2355 const int buflen = 100; 2356 char buf[buflen]; 2357 siginfo_t *si = (siginfo_t*)siginfo; 2358 st->print("si_signo=%s: ", os::exception_name(si->si_signo, buf, buflen)); 2359 char *err = strerror(si->si_errno); 2360 if (si->si_errno != 0 && err != NULL) { 2361 st->print("si_errno=%s", err); 2362 } else { 2363 st->print("si_errno=%d", si->si_errno); 2364 } 2365 const int c = si->si_code; 2366 assert(c > 0, "unexpected si_code"); 2367 switch (si->si_signo) { 2368 case SIGILL: 2369 st->print(", si_code=%d (%s)", c, c > 8 ? "" : ill_names[c]); 2370 st->print(", si_addr=" PTR_FORMAT, si->si_addr); 2371 break; 2372 case SIGFPE: 2373 st->print(", si_code=%d (%s)", c, c > 9 ? "" : fpe_names[c]); 2374 st->print(", si_addr=" PTR_FORMAT, si->si_addr); 2375 break; 2376 case SIGSEGV: 2377 st->print(", si_code=%d (%s)", c, c > 2 ? "" : segv_names[c]); 2378 st->print(", si_addr=" PTR_FORMAT, si->si_addr); 2379 break; 2380 case SIGBUS: 2381 st->print(", si_code=%d (%s)", c, c > 3 ? "" : bus_names[c]); 2382 st->print(", si_addr=" PTR_FORMAT, si->si_addr); 2383 break; 2384 default: 2385 st->print(", si_code=%d", si->si_code); 2386 // no si_addr 2387 } 2388 2389 if ((si->si_signo == SIGBUS || si->si_signo == SIGSEGV) && 2390 UseSharedSpaces) { 2391 FileMapInfo* mapinfo = FileMapInfo::current_info(); 2392 if (mapinfo->is_in_shared_space(si->si_addr)) { 2393 st->print("\n\nError accessing class data sharing archive." \ 2394 " Mapped file inaccessible during execution, " \ 2395 " possible disk/network problem."); 2396 } 2397 } 2398 st->cr(); 2399 } 2400 2401 // Moved from whole group, because we need them here for diagnostic 2402 // prints. 2403 #define OLDMAXSIGNUM 32 2404 static int Maxsignum = 0; 2405 static int *ourSigFlags = NULL; 2406 2407 extern "C" void sigINTRHandler(int, siginfo_t*, void*); 2408 2409 int os::Solaris::get_our_sigflags(int sig) { 2410 assert(ourSigFlags!=NULL, "signal data structure not initialized"); 2411 assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range"); 2412 return ourSigFlags[sig]; 2413 } 2414 2415 void os::Solaris::set_our_sigflags(int sig, int flags) { 2416 assert(ourSigFlags!=NULL, "signal data structure not initialized"); 2417 assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range"); 2418 ourSigFlags[sig] = flags; 2419 } 2420 2421 2422 static const char* get_signal_handler_name(address handler, 2423 char* buf, int buflen) { 2424 int offset; 2425 bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset); 2426 if (found) { 2427 // skip directory names 2428 const char *p1, *p2; 2429 p1 = buf; 2430 size_t len = strlen(os::file_separator()); 2431 while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len; 2432 jio_snprintf(buf, buflen, "%s+0x%x", p1, offset); 2433 } else { 2434 jio_snprintf(buf, buflen, PTR_FORMAT, handler); 2435 } 2436 return buf; 2437 } 2438 2439 static void print_signal_handler(outputStream* st, int sig, 2440 char* buf, size_t buflen) { 2441 struct sigaction sa; 2442 2443 sigaction(sig, NULL, &sa); 2444 2445 st->print("%s: ", os::exception_name(sig, buf, buflen)); 2446 2447 address handler = (sa.sa_flags & SA_SIGINFO) 2448 ? CAST_FROM_FN_PTR(address, sa.sa_sigaction) 2449 : CAST_FROM_FN_PTR(address, sa.sa_handler); 2450 2451 if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) { 2452 st->print("SIG_DFL"); 2453 } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) { 2454 st->print("SIG_IGN"); 2455 } else { 2456 st->print("[%s]", get_signal_handler_name(handler, buf, buflen)); 2457 } 2458 2459 st->print(", sa_mask[0]=" PTR32_FORMAT, *(uint32_t*)&sa.sa_mask); 2460 2461 address rh = VMError::get_resetted_sighandler(sig); 2462 // May be, handler was resetted by VMError? 2463 if(rh != NULL) { 2464 handler = rh; 2465 sa.sa_flags = VMError::get_resetted_sigflags(sig); 2466 } 2467 2468 st->print(", sa_flags=" PTR32_FORMAT, sa.sa_flags); 2469 2470 // Check: is it our handler? 2471 if(handler == CAST_FROM_FN_PTR(address, signalHandler) || 2472 handler == CAST_FROM_FN_PTR(address, sigINTRHandler)) { 2473 // It is our signal handler 2474 // check for flags 2475 if(sa.sa_flags != os::Solaris::get_our_sigflags(sig)) { 2476 st->print( 2477 ", flags was changed from " PTR32_FORMAT ", consider using jsig library", 2478 os::Solaris::get_our_sigflags(sig)); 2479 } 2480 } 2481 st->cr(); 2482 } 2483 2484 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) { 2485 st->print_cr("Signal Handlers:"); 2486 print_signal_handler(st, SIGSEGV, buf, buflen); 2487 print_signal_handler(st, SIGBUS , buf, buflen); 2488 print_signal_handler(st, SIGFPE , buf, buflen); 2489 print_signal_handler(st, SIGPIPE, buf, buflen); 2490 print_signal_handler(st, SIGXFSZ, buf, buflen); 2491 print_signal_handler(st, SIGILL , buf, buflen); 2492 print_signal_handler(st, INTERRUPT_SIGNAL, buf, buflen); 2493 print_signal_handler(st, ASYNC_SIGNAL, buf, buflen); 2494 print_signal_handler(st, BREAK_SIGNAL, buf, buflen); 2495 print_signal_handler(st, SHUTDOWN1_SIGNAL , buf, buflen); 2496 print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen); 2497 print_signal_handler(st, SHUTDOWN3_SIGNAL, buf, buflen); 2498 print_signal_handler(st, os::Solaris::SIGinterrupt(), buf, buflen); 2499 print_signal_handler(st, os::Solaris::SIGasync(), buf, buflen); 2500 } 2501 2502 static char saved_jvm_path[MAXPATHLEN] = { 0 }; 2503 2504 // Find the full path to the current module, libjvm.so or libjvm_g.so 2505 void os::jvm_path(char *buf, jint buflen) { 2506 // Error checking. 2507 if (buflen < MAXPATHLEN) { 2508 assert(false, "must use a large-enough buffer"); 2509 buf[0] = '\0'; 2510 return; 2511 } 2512 // Lazy resolve the path to current module. 2513 if (saved_jvm_path[0] != 0) { 2514 strcpy(buf, saved_jvm_path); 2515 return; 2516 } 2517 2518 Dl_info dlinfo; 2519 int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo); 2520 assert(ret != 0, "cannot locate libjvm"); 2521 realpath((char *)dlinfo.dli_fname, buf); 2522 2523 if (Arguments::created_by_gamma_launcher()) { 2524 // Support for the gamma launcher. Typical value for buf is 2525 // "<JAVA_HOME>/jre/lib/<arch>/<vmtype>/libjvm.so". If "/jre/lib/" appears at 2526 // the right place in the string, then assume we are installed in a JDK and 2527 // we're done. Otherwise, check for a JAVA_HOME environment variable and fix 2528 // up the path so it looks like libjvm.so is installed there (append a 2529 // fake suffix hotspot/libjvm.so). 2530 const char *p = buf + strlen(buf) - 1; 2531 for (int count = 0; p > buf && count < 5; ++count) { 2532 for (--p; p > buf && *p != '/'; --p) 2533 /* empty */ ; 2534 } 2535 2536 if (strncmp(p, "/jre/lib/", 9) != 0) { 2537 // Look for JAVA_HOME in the environment. 2538 char* java_home_var = ::getenv("JAVA_HOME"); 2539 if (java_home_var != NULL && java_home_var[0] != 0) { 2540 char cpu_arch[12]; 2541 char* jrelib_p; 2542 int len; 2543 sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch)); 2544 #ifdef _LP64 2545 // If we are on sparc running a 64-bit vm, look in jre/lib/sparcv9. 2546 if (strcmp(cpu_arch, "sparc") == 0) { 2547 strcat(cpu_arch, "v9"); 2548 } else if (strcmp(cpu_arch, "i386") == 0) { 2549 strcpy(cpu_arch, "amd64"); 2550 } 2551 #endif 2552 // Check the current module name "libjvm.so" or "libjvm_g.so". 2553 p = strrchr(buf, '/'); 2554 assert(strstr(p, "/libjvm") == p, "invalid library name"); 2555 p = strstr(p, "_g") ? "_g" : ""; 2556 2557 realpath(java_home_var, buf); 2558 // determine if this is a legacy image or modules image 2559 // modules image doesn't have "jre" subdirectory 2560 len = strlen(buf); 2561 jrelib_p = buf + len; 2562 snprintf(jrelib_p, buflen-len, "/jre/lib/%s", cpu_arch); 2563 if (0 != access(buf, F_OK)) { 2564 snprintf(jrelib_p, buflen-len, "/lib/%s", cpu_arch); 2565 } 2566 2567 if (0 == access(buf, F_OK)) { 2568 // Use current module name "libjvm[_g].so" instead of 2569 // "libjvm"debug_only("_g")".so" since for fastdebug version 2570 // we should have "libjvm.so" but debug_only("_g") adds "_g"! 2571 len = strlen(buf); 2572 snprintf(buf + len, buflen-len, "/hotspot/libjvm%s.so", p); 2573 } else { 2574 // Go back to path of .so 2575 realpath((char *)dlinfo.dli_fname, buf); 2576 } 2577 } 2578 } 2579 } 2580 2581 strcpy(saved_jvm_path, buf); 2582 } 2583 2584 2585 void os::print_jni_name_prefix_on(outputStream* st, int args_size) { 2586 // no prefix required, not even "_" 2587 } 2588 2589 2590 void os::print_jni_name_suffix_on(outputStream* st, int args_size) { 2591 // no suffix required 2592 } 2593 2594 // This method is a copy of JDK's sysGetLastErrorString 2595 // from src/solaris/hpi/src/system_md.c 2596 2597 size_t os::lasterror(char *buf, size_t len) { 2598 2599 if (errno == 0) return 0; 2600 2601 const char *s = ::strerror(errno); 2602 size_t n = ::strlen(s); 2603 if (n >= len) { 2604 n = len - 1; 2605 } 2606 ::strncpy(buf, s, n); 2607 buf[n] = '\0'; 2608 return n; 2609 } 2610 2611 2612 // sun.misc.Signal 2613 2614 extern "C" { 2615 static void UserHandler(int sig, void *siginfo, void *context) { 2616 // Ctrl-C is pressed during error reporting, likely because the error 2617 // handler fails to abort. Let VM die immediately. 2618 if (sig == SIGINT && is_error_reported()) { 2619 os::die(); 2620 } 2621 2622 os::signal_notify(sig); 2623 // We do not need to reinstate the signal handler each time... 2624 } 2625 } 2626 2627 void* os::user_handler() { 2628 return CAST_FROM_FN_PTR(void*, UserHandler); 2629 } 2630 2631 extern "C" { 2632 typedef void (*sa_handler_t)(int); 2633 typedef void (*sa_sigaction_t)(int, siginfo_t *, void *); 2634 } 2635 2636 void* os::signal(int signal_number, void* handler) { 2637 struct sigaction sigAct, oldSigAct; 2638 sigfillset(&(sigAct.sa_mask)); 2639 sigAct.sa_flags = SA_RESTART & ~SA_RESETHAND; 2640 sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler); 2641 2642 if (sigaction(signal_number, &sigAct, &oldSigAct)) 2643 // -1 means registration failed 2644 return (void *)-1; 2645 2646 return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler); 2647 } 2648 2649 void os::signal_raise(int signal_number) { 2650 raise(signal_number); 2651 } 2652 2653 /* 2654 * The following code is moved from os.cpp for making this 2655 * code platform specific, which it is by its very nature. 2656 */ 2657 2658 // a counter for each possible signal value 2659 static int Sigexit = 0; 2660 static int Maxlibjsigsigs; 2661 static jint *pending_signals = NULL; 2662 static int *preinstalled_sigs = NULL; 2663 static struct sigaction *chainedsigactions = NULL; 2664 static sema_t sig_sem; 2665 typedef int (*version_getting_t)(); 2666 version_getting_t os::Solaris::get_libjsig_version = NULL; 2667 static int libjsigversion = NULL; 2668 2669 int os::sigexitnum_pd() { 2670 assert(Sigexit > 0, "signal memory not yet initialized"); 2671 return Sigexit; 2672 } 2673 2674 void os::Solaris::init_signal_mem() { 2675 // Initialize signal structures 2676 Maxsignum = SIGRTMAX; 2677 Sigexit = Maxsignum+1; 2678 assert(Maxsignum >0, "Unable to obtain max signal number"); 2679 2680 Maxlibjsigsigs = Maxsignum; 2681 2682 // pending_signals has one int per signal 2683 // The additional signal is for SIGEXIT - exit signal to signal_thread 2684 pending_signals = (jint *)os::malloc(sizeof(jint) * (Sigexit+1)); 2685 memset(pending_signals, 0, (sizeof(jint) * (Sigexit+1))); 2686 2687 if (UseSignalChaining) { 2688 chainedsigactions = (struct sigaction *)malloc(sizeof(struct sigaction) 2689 * (Maxsignum + 1)); 2690 memset(chainedsigactions, 0, (sizeof(struct sigaction) * (Maxsignum + 1))); 2691 preinstalled_sigs = (int *)os::malloc(sizeof(int) * (Maxsignum + 1)); 2692 memset(preinstalled_sigs, 0, (sizeof(int) * (Maxsignum + 1))); 2693 } 2694 ourSigFlags = (int*)malloc(sizeof(int) * (Maxsignum + 1 )); 2695 memset(ourSigFlags, 0, sizeof(int) * (Maxsignum + 1)); 2696 } 2697 2698 void os::signal_init_pd() { 2699 int ret; 2700 2701 ret = ::sema_init(&sig_sem, 0, NULL, NULL); 2702 assert(ret == 0, "sema_init() failed"); 2703 } 2704 2705 void os::signal_notify(int signal_number) { 2706 int ret; 2707 2708 Atomic::inc(&pending_signals[signal_number]); 2709 ret = ::sema_post(&sig_sem); 2710 assert(ret == 0, "sema_post() failed"); 2711 } 2712 2713 static int check_pending_signals(bool wait_for_signal) { 2714 int ret; 2715 while (true) { 2716 for (int i = 0; i < Sigexit + 1; i++) { 2717 jint n = pending_signals[i]; 2718 if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) { 2719 return i; 2720 } 2721 } 2722 if (!wait_for_signal) { 2723 return -1; 2724 } 2725 JavaThread *thread = JavaThread::current(); 2726 ThreadBlockInVM tbivm(thread); 2727 2728 bool threadIsSuspended; 2729 do { 2730 thread->set_suspend_equivalent(); 2731 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self() 2732 while((ret = ::sema_wait(&sig_sem)) == EINTR) 2733 ; 2734 assert(ret == 0, "sema_wait() failed"); 2735 2736 // were we externally suspended while we were waiting? 2737 threadIsSuspended = thread->handle_special_suspend_equivalent_condition(); 2738 if (threadIsSuspended) { 2739 // 2740 // The semaphore has been incremented, but while we were waiting 2741 // another thread suspended us. We don't want to continue running 2742 // while suspended because that would surprise the thread that 2743 // suspended us. 2744 // 2745 ret = ::sema_post(&sig_sem); 2746 assert(ret == 0, "sema_post() failed"); 2747 2748 thread->java_suspend_self(); 2749 } 2750 } while (threadIsSuspended); 2751 } 2752 } 2753 2754 int os::signal_lookup() { 2755 return check_pending_signals(false); 2756 } 2757 2758 int os::signal_wait() { 2759 return check_pending_signals(true); 2760 } 2761 2762 //////////////////////////////////////////////////////////////////////////////// 2763 // Virtual Memory 2764 2765 static int page_size = -1; 2766 2767 // The mmap MAP_ALIGN flag is supported on Solaris 9 and later. init_2() will 2768 // clear this var if support is not available. 2769 static bool has_map_align = true; 2770 2771 int os::vm_page_size() { 2772 assert(page_size != -1, "must call os::init"); 2773 return page_size; 2774 } 2775 2776 // Solaris allocates memory by pages. 2777 int os::vm_allocation_granularity() { 2778 assert(page_size != -1, "must call os::init"); 2779 return page_size; 2780 } 2781 2782 bool os::commit_memory(char* addr, size_t bytes, bool exec) { 2783 int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE; 2784 size_t size = bytes; 2785 char *res = Solaris::mmap_chunk(addr, size, MAP_PRIVATE|MAP_FIXED, prot); 2786 if (res != NULL) { 2787 if (UseNUMAInterleaving) { 2788 numa_make_global(addr, bytes); 2789 } 2790 return true; 2791 } 2792 return false; 2793 } 2794 2795 bool os::commit_memory(char* addr, size_t bytes, size_t alignment_hint, 2796 bool exec) { 2797 if (commit_memory(addr, bytes, exec)) { 2798 if (UseMPSS && alignment_hint > (size_t)vm_page_size()) { 2799 // If the large page size has been set and the VM 2800 // is using large pages, use the large page size 2801 // if it is smaller than the alignment hint. This is 2802 // a case where the VM wants to use a larger alignment size 2803 // for its own reasons but still want to use large pages 2804 // (which is what matters to setting the mpss range. 2805 size_t page_size = 0; 2806 if (large_page_size() < alignment_hint) { 2807 assert(UseLargePages, "Expected to be here for large page use only"); 2808 page_size = large_page_size(); 2809 } else { 2810 // If the alignment hint is less than the large page 2811 // size, the VM wants a particular alignment (thus the hint) 2812 // for internal reasons. Try to set the mpss range using 2813 // the alignment_hint. 2814 page_size = alignment_hint; 2815 } 2816 // Since this is a hint, ignore any failures. 2817 (void)Solaris::set_mpss_range(addr, bytes, page_size); 2818 } 2819 return true; 2820 } 2821 return false; 2822 } 2823 2824 // Uncommit the pages in a specified region. 2825 void os::free_memory(char* addr, size_t bytes) { 2826 if (madvise(addr, bytes, MADV_FREE) < 0) { 2827 debug_only(warning("MADV_FREE failed.")); 2828 return; 2829 } 2830 } 2831 2832 bool os::create_stack_guard_pages(char* addr, size_t size) { 2833 return os::commit_memory(addr, size); 2834 } 2835 2836 bool os::remove_stack_guard_pages(char* addr, size_t size) { 2837 return os::uncommit_memory(addr, size); 2838 } 2839 2840 // Change the page size in a given range. 2841 void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) { 2842 assert((intptr_t)addr % alignment_hint == 0, "Address should be aligned."); 2843 assert((intptr_t)(addr + bytes) % alignment_hint == 0, "End should be aligned."); 2844 if (UseLargePages && UseMPSS) { 2845 Solaris::set_mpss_range(addr, bytes, alignment_hint); 2846 } 2847 } 2848 2849 // Tell the OS to make the range local to the first-touching LWP 2850 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) { 2851 assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned."); 2852 if (madvise(addr, bytes, MADV_ACCESS_LWP) < 0) { 2853 debug_only(warning("MADV_ACCESS_LWP failed.")); 2854 } 2855 } 2856 2857 // Tell the OS that this range would be accessed from different LWPs. 2858 void os::numa_make_global(char *addr, size_t bytes) { 2859 assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned."); 2860 if (madvise(addr, bytes, MADV_ACCESS_MANY) < 0) { 2861 debug_only(warning("MADV_ACCESS_MANY failed.")); 2862 } 2863 } 2864 2865 // Get the number of the locality groups. 2866 size_t os::numa_get_groups_num() { 2867 size_t n = Solaris::lgrp_nlgrps(Solaris::lgrp_cookie()); 2868 return n != -1 ? n : 1; 2869 } 2870 2871 // Get a list of leaf locality groups. A leaf lgroup is group that 2872 // doesn't have any children. Typical leaf group is a CPU or a CPU/memory 2873 // board. An LWP is assigned to one of these groups upon creation. 2874 size_t os::numa_get_leaf_groups(int *ids, size_t size) { 2875 if ((ids[0] = Solaris::lgrp_root(Solaris::lgrp_cookie())) == -1) { 2876 ids[0] = 0; 2877 return 1; 2878 } 2879 int result_size = 0, top = 1, bottom = 0, cur = 0; 2880 for (int k = 0; k < size; k++) { 2881 int r = Solaris::lgrp_children(Solaris::lgrp_cookie(), ids[cur], 2882 (Solaris::lgrp_id_t*)&ids[top], size - top); 2883 if (r == -1) { 2884 ids[0] = 0; 2885 return 1; 2886 } 2887 if (!r) { 2888 // That's a leaf node. 2889 assert (bottom <= cur, "Sanity check"); 2890 // Check if the node has memory 2891 if (Solaris::lgrp_resources(Solaris::lgrp_cookie(), ids[cur], 2892 NULL, 0, LGRP_RSRC_MEM) > 0) { 2893 ids[bottom++] = ids[cur]; 2894 } 2895 } 2896 top += r; 2897 cur++; 2898 } 2899 if (bottom == 0) { 2900 // Handle a situation, when the OS reports no memory available. 2901 // Assume UMA architecture. 2902 ids[0] = 0; 2903 return 1; 2904 } 2905 return bottom; 2906 } 2907 2908 // Detect the topology change. Typically happens during CPU plugging-unplugging. 2909 bool os::numa_topology_changed() { 2910 int is_stale = Solaris::lgrp_cookie_stale(Solaris::lgrp_cookie()); 2911 if (is_stale != -1 && is_stale) { 2912 Solaris::lgrp_fini(Solaris::lgrp_cookie()); 2913 Solaris::lgrp_cookie_t c = Solaris::lgrp_init(Solaris::LGRP_VIEW_CALLER); 2914 assert(c != 0, "Failure to initialize LGRP API"); 2915 Solaris::set_lgrp_cookie(c); 2916 return true; 2917 } 2918 return false; 2919 } 2920 2921 // Get the group id of the current LWP. 2922 int os::numa_get_group_id() { 2923 int lgrp_id = Solaris::lgrp_home(P_LWPID, P_MYID); 2924 if (lgrp_id == -1) { 2925 return 0; 2926 } 2927 const int size = os::numa_get_groups_num(); 2928 int *ids = (int*)alloca(size * sizeof(int)); 2929 2930 // Get the ids of all lgroups with memory; r is the count. 2931 int r = Solaris::lgrp_resources(Solaris::lgrp_cookie(), lgrp_id, 2932 (Solaris::lgrp_id_t*)ids, size, LGRP_RSRC_MEM); 2933 if (r <= 0) { 2934 return 0; 2935 } 2936 return ids[os::random() % r]; 2937 } 2938 2939 // Request information about the page. 2940 bool os::get_page_info(char *start, page_info* info) { 2941 const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE }; 2942 uint64_t addr = (uintptr_t)start; 2943 uint64_t outdata[2]; 2944 uint_t validity = 0; 2945 2946 if (os::Solaris::meminfo(&addr, 1, info_types, 2, outdata, &validity) < 0) { 2947 return false; 2948 } 2949 2950 info->size = 0; 2951 info->lgrp_id = -1; 2952 2953 if ((validity & 1) != 0) { 2954 if ((validity & 2) != 0) { 2955 info->lgrp_id = outdata[0]; 2956 } 2957 if ((validity & 4) != 0) { 2958 info->size = outdata[1]; 2959 } 2960 return true; 2961 } 2962 return false; 2963 } 2964 2965 // Scan the pages from start to end until a page different than 2966 // the one described in the info parameter is encountered. 2967 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) { 2968 const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE }; 2969 const size_t types = sizeof(info_types) / sizeof(info_types[0]); 2970 uint64_t addrs[MAX_MEMINFO_CNT], outdata[types * MAX_MEMINFO_CNT]; 2971 uint_t validity[MAX_MEMINFO_CNT]; 2972 2973 size_t page_size = MAX2((size_t)os::vm_page_size(), page_expected->size); 2974 uint64_t p = (uint64_t)start; 2975 while (p < (uint64_t)end) { 2976 addrs[0] = p; 2977 size_t addrs_count = 1; 2978 while (addrs_count < MAX_MEMINFO_CNT && addrs[addrs_count - 1] < (uint64_t)end) { 2979 addrs[addrs_count] = addrs[addrs_count - 1] + page_size; 2980 addrs_count++; 2981 } 2982 2983 if (os::Solaris::meminfo(addrs, addrs_count, info_types, types, outdata, validity) < 0) { 2984 return NULL; 2985 } 2986 2987 size_t i = 0; 2988 for (; i < addrs_count; i++) { 2989 if ((validity[i] & 1) != 0) { 2990 if ((validity[i] & 4) != 0) { 2991 if (outdata[types * i + 1] != page_expected->size) { 2992 break; 2993 } 2994 } else 2995 if (page_expected->size != 0) { 2996 break; 2997 } 2998 2999 if ((validity[i] & 2) != 0 && page_expected->lgrp_id > 0) { 3000 if (outdata[types * i] != page_expected->lgrp_id) { 3001 break; 3002 } 3003 } 3004 } else { 3005 return NULL; 3006 } 3007 } 3008 3009 if (i != addrs_count) { 3010 if ((validity[i] & 2) != 0) { 3011 page_found->lgrp_id = outdata[types * i]; 3012 } else { 3013 page_found->lgrp_id = -1; 3014 } 3015 if ((validity[i] & 4) != 0) { 3016 page_found->size = outdata[types * i + 1]; 3017 } else { 3018 page_found->size = 0; 3019 } 3020 return (char*)addrs[i]; 3021 } 3022 3023 p = addrs[addrs_count - 1] + page_size; 3024 } 3025 return end; 3026 } 3027 3028 bool os::uncommit_memory(char* addr, size_t bytes) { 3029 size_t size = bytes; 3030 // Map uncommitted pages PROT_NONE so we fail early if we touch an 3031 // uncommitted page. Otherwise, the read/write might succeed if we 3032 // have enough swap space to back the physical page. 3033 return 3034 NULL != Solaris::mmap_chunk(addr, size, 3035 MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE, 3036 PROT_NONE); 3037 } 3038 3039 char* os::Solaris::mmap_chunk(char *addr, size_t size, int flags, int prot) { 3040 char *b = (char *)mmap(addr, size, prot, flags, os::Solaris::_dev_zero_fd, 0); 3041 3042 if (b == MAP_FAILED) { 3043 return NULL; 3044 } 3045 return b; 3046 } 3047 3048 char* os::Solaris::anon_mmap(char* requested_addr, size_t bytes, size_t alignment_hint, bool fixed) { 3049 char* addr = requested_addr; 3050 int flags = MAP_PRIVATE | MAP_NORESERVE; 3051 3052 assert(!(fixed && (alignment_hint > 0)), "alignment hint meaningless with fixed mmap"); 3053 3054 if (fixed) { 3055 flags |= MAP_FIXED; 3056 } else if (has_map_align && (alignment_hint > (size_t) vm_page_size())) { 3057 flags |= MAP_ALIGN; 3058 addr = (char*) alignment_hint; 3059 } 3060 3061 // Map uncommitted pages PROT_NONE so we fail early if we touch an 3062 // uncommitted page. Otherwise, the read/write might succeed if we 3063 // have enough swap space to back the physical page. 3064 return mmap_chunk(addr, bytes, flags, PROT_NONE); 3065 } 3066 3067 char* os::reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) { 3068 char* addr = Solaris::anon_mmap(requested_addr, bytes, alignment_hint, (requested_addr != NULL)); 3069 3070 guarantee(requested_addr == NULL || requested_addr == addr, 3071 "OS failed to return requested mmap address."); 3072 return addr; 3073 } 3074 3075 // Reserve memory at an arbitrary address, only if that area is 3076 // available (and not reserved for something else). 3077 3078 char* os::attempt_reserve_memory_at(size_t bytes, char* requested_addr) { 3079 const int max_tries = 10; 3080 char* base[max_tries]; 3081 size_t size[max_tries]; 3082 3083 // Solaris adds a gap between mmap'ed regions. The size of the gap 3084 // is dependent on the requested size and the MMU. Our initial gap 3085 // value here is just a guess and will be corrected later. 3086 bool had_top_overlap = false; 3087 bool have_adjusted_gap = false; 3088 size_t gap = 0x400000; 3089 3090 // Assert only that the size is a multiple of the page size, since 3091 // that's all that mmap requires, and since that's all we really know 3092 // about at this low abstraction level. If we need higher alignment, 3093 // we can either pass an alignment to this method or verify alignment 3094 // in one of the methods further up the call chain. See bug 5044738. 3095 assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block"); 3096 3097 // Since snv_84, Solaris attempts to honor the address hint - see 5003415. 3098 // Give it a try, if the kernel honors the hint we can return immediately. 3099 char* addr = Solaris::anon_mmap(requested_addr, bytes, 0, false); 3100 volatile int err = errno; 3101 if (addr == requested_addr) { 3102 return addr; 3103 } else if (addr != NULL) { 3104 unmap_memory(addr, bytes); 3105 } 3106 3107 if (PrintMiscellaneous && Verbose) { 3108 char buf[256]; 3109 buf[0] = '\0'; 3110 if (addr == NULL) { 3111 jio_snprintf(buf, sizeof(buf), ": %s", strerror(err)); 3112 } 3113 warning("attempt_reserve_memory_at: couldn't reserve " SIZE_FORMAT " bytes at " 3114 PTR_FORMAT ": reserve_memory_helper returned " PTR_FORMAT 3115 "%s", bytes, requested_addr, addr, buf); 3116 } 3117 3118 // Address hint method didn't work. Fall back to the old method. 3119 // In theory, once SNV becomes our oldest supported platform, this 3120 // code will no longer be needed. 3121 // 3122 // Repeatedly allocate blocks until the block is allocated at the 3123 // right spot. Give up after max_tries. 3124 int i; 3125 for (i = 0; i < max_tries; ++i) { 3126 base[i] = reserve_memory(bytes); 3127 3128 if (base[i] != NULL) { 3129 // Is this the block we wanted? 3130 if (base[i] == requested_addr) { 3131 size[i] = bytes; 3132 break; 3133 } 3134 3135 // check that the gap value is right 3136 if (had_top_overlap && !have_adjusted_gap) { 3137 size_t actual_gap = base[i-1] - base[i] - bytes; 3138 if (gap != actual_gap) { 3139 // adjust the gap value and retry the last 2 allocations 3140 assert(i > 0, "gap adjustment code problem"); 3141 have_adjusted_gap = true; // adjust the gap only once, just in case 3142 gap = actual_gap; 3143 if (PrintMiscellaneous && Verbose) { 3144 warning("attempt_reserve_memory_at: adjusted gap to 0x%lx", gap); 3145 } 3146 unmap_memory(base[i], bytes); 3147 unmap_memory(base[i-1], size[i-1]); 3148 i-=2; 3149 continue; 3150 } 3151 } 3152 3153 // Does this overlap the block we wanted? Give back the overlapped 3154 // parts and try again. 3155 // 3156 // There is still a bug in this code: if top_overlap == bytes, 3157 // the overlap is offset from requested region by the value of gap. 3158 // In this case giving back the overlapped part will not work, 3159 // because we'll give back the entire block at base[i] and 3160 // therefore the subsequent allocation will not generate a new gap. 3161 // This could be fixed with a new algorithm that used larger 3162 // or variable size chunks to find the requested region - 3163 // but such a change would introduce additional complications. 3164 // It's rare enough that the planets align for this bug, 3165 // so we'll just wait for a fix for 6204603/5003415 which 3166 // will provide a mmap flag to allow us to avoid this business. 3167 3168 size_t top_overlap = requested_addr + (bytes + gap) - base[i]; 3169 if (top_overlap >= 0 && top_overlap < bytes) { 3170 had_top_overlap = true; 3171 unmap_memory(base[i], top_overlap); 3172 base[i] += top_overlap; 3173 size[i] = bytes - top_overlap; 3174 } else { 3175 size_t bottom_overlap = base[i] + bytes - requested_addr; 3176 if (bottom_overlap >= 0 && bottom_overlap < bytes) { 3177 if (PrintMiscellaneous && Verbose && bottom_overlap == 0) { 3178 warning("attempt_reserve_memory_at: possible alignment bug"); 3179 } 3180 unmap_memory(requested_addr, bottom_overlap); 3181 size[i] = bytes - bottom_overlap; 3182 } else { 3183 size[i] = bytes; 3184 } 3185 } 3186 } 3187 } 3188 3189 // Give back the unused reserved pieces. 3190 3191 for (int j = 0; j < i; ++j) { 3192 if (base[j] != NULL) { 3193 unmap_memory(base[j], size[j]); 3194 } 3195 } 3196 3197 return (i < max_tries) ? requested_addr : NULL; 3198 } 3199 3200 bool os::release_memory(char* addr, size_t bytes) { 3201 size_t size = bytes; 3202 return munmap(addr, size) == 0; 3203 } 3204 3205 static bool solaris_mprotect(char* addr, size_t bytes, int prot) { 3206 assert(addr == (char*)align_size_down((uintptr_t)addr, os::vm_page_size()), 3207 "addr must be page aligned"); 3208 int retVal = mprotect(addr, bytes, prot); 3209 return retVal == 0; 3210 } 3211 3212 // Protect memory (Used to pass readonly pages through 3213 // JNI GetArray<type>Elements with empty arrays.) 3214 // Also, used for serialization page and for compressed oops null pointer 3215 // checking. 3216 bool os::protect_memory(char* addr, size_t bytes, ProtType prot, 3217 bool is_committed) { 3218 unsigned int p = 0; 3219 switch (prot) { 3220 case MEM_PROT_NONE: p = PROT_NONE; break; 3221 case MEM_PROT_READ: p = PROT_READ; break; 3222 case MEM_PROT_RW: p = PROT_READ|PROT_WRITE; break; 3223 case MEM_PROT_RWX: p = PROT_READ|PROT_WRITE|PROT_EXEC; break; 3224 default: 3225 ShouldNotReachHere(); 3226 } 3227 // is_committed is unused. 3228 return solaris_mprotect(addr, bytes, p); 3229 } 3230 3231 // guard_memory and unguard_memory only happens within stack guard pages. 3232 // Since ISM pertains only to the heap, guard and unguard memory should not 3233 /// happen with an ISM region. 3234 bool os::guard_memory(char* addr, size_t bytes) { 3235 return solaris_mprotect(addr, bytes, PROT_NONE); 3236 } 3237 3238 bool os::unguard_memory(char* addr, size_t bytes) { 3239 return solaris_mprotect(addr, bytes, PROT_READ|PROT_WRITE); 3240 } 3241 3242 // Large page support 3243 3244 // UseLargePages is the master flag to enable/disable large page memory. 3245 // UseMPSS and UseISM are supported for compatibility reasons. Their combined 3246 // effects can be described in the following table: 3247 // 3248 // UseLargePages UseMPSS UseISM 3249 // false * * => UseLargePages is the master switch, turning 3250 // it off will turn off both UseMPSS and 3251 // UseISM. VM will not use large page memory 3252 // regardless the settings of UseMPSS/UseISM. 3253 // true false false => Unless future Solaris provides other 3254 // mechanism to use large page memory, this 3255 // combination is equivalent to -UseLargePages, 3256 // VM will not use large page memory 3257 // true true false => JVM will use MPSS for large page memory. 3258 // This is the default behavior. 3259 // true false true => JVM will use ISM for large page memory. 3260 // true true true => JVM will use ISM if it is available. 3261 // Otherwise, JVM will fall back to MPSS. 3262 // Becaues ISM is now available on all 3263 // supported Solaris versions, this combination 3264 // is equivalent to +UseISM -UseMPSS. 3265 3266 static size_t _large_page_size = 0; 3267 3268 bool os::Solaris::ism_sanity_check(bool warn, size_t * page_size) { 3269 // x86 uses either 2M or 4M page, depending on whether PAE (Physical Address 3270 // Extensions) mode is enabled. AMD64/EM64T uses 2M page in 64bit mode. Sparc 3271 // can support multiple page sizes. 3272 3273 // Don't bother to probe page size because getpagesizes() comes with MPSS. 3274 // ISM is only recommended on old Solaris where there is no MPSS support. 3275 // Simply choose a conservative value as default. 3276 *page_size = LargePageSizeInBytes ? LargePageSizeInBytes : 3277 SPARC_ONLY(4 * M) IA32_ONLY(4 * M) AMD64_ONLY(2 * M) 3278 ARM_ONLY(2 * M); 3279 3280 // ISM is available on all supported Solaris versions 3281 return true; 3282 } 3283 3284 // Insertion sort for small arrays (descending order). 3285 static void insertion_sort_descending(size_t* array, int len) { 3286 for (int i = 0; i < len; i++) { 3287 size_t val = array[i]; 3288 for (size_t key = i; key > 0 && array[key - 1] < val; --key) { 3289 size_t tmp = array[key]; 3290 array[key] = array[key - 1]; 3291 array[key - 1] = tmp; 3292 } 3293 } 3294 } 3295 3296 bool os::Solaris::mpss_sanity_check(bool warn, size_t * page_size) { 3297 const unsigned int usable_count = VM_Version::page_size_count(); 3298 if (usable_count == 1) { 3299 return false; 3300 } 3301 3302 // Find the right getpagesizes interface. When solaris 11 is the minimum 3303 // build platform, getpagesizes() (without the '2') can be called directly. 3304 typedef int (*gps_t)(size_t[], int); 3305 gps_t gps_func = CAST_TO_FN_PTR(gps_t, dlsym(RTLD_DEFAULT, "getpagesizes2")); 3306 if (gps_func == NULL) { 3307 gps_func = CAST_TO_FN_PTR(gps_t, dlsym(RTLD_DEFAULT, "getpagesizes")); 3308 if (gps_func == NULL) { 3309 if (warn) { 3310 warning("MPSS is not supported by the operating system."); 3311 } 3312 return false; 3313 } 3314 } 3315 3316 // Fill the array of page sizes. 3317 int n = (*gps_func)(_page_sizes, page_sizes_max); 3318 assert(n > 0, "Solaris bug?"); 3319 3320 if (n == page_sizes_max) { 3321 // Add a sentinel value (necessary only if the array was completely filled 3322 // since it is static (zeroed at initialization)). 3323 _page_sizes[--n] = 0; 3324 DEBUG_ONLY(warning("increase the size of the os::_page_sizes array.");) 3325 } 3326 assert(_page_sizes[n] == 0, "missing sentinel"); 3327 trace_page_sizes("available page sizes", _page_sizes, n); 3328 3329 if (n == 1) return false; // Only one page size available. 3330 3331 // Skip sizes larger than 4M (or LargePageSizeInBytes if it was set) and 3332 // select up to usable_count elements. First sort the array, find the first 3333 // acceptable value, then copy the usable sizes to the top of the array and 3334 // trim the rest. Make sure to include the default page size :-). 3335 // 3336 // A better policy could get rid of the 4M limit by taking the sizes of the 3337 // important VM memory regions (java heap and possibly the code cache) into 3338 // account. 3339 insertion_sort_descending(_page_sizes, n); 3340 const size_t size_limit = 3341 FLAG_IS_DEFAULT(LargePageSizeInBytes) ? 4 * M : LargePageSizeInBytes; 3342 int beg; 3343 for (beg = 0; beg < n && _page_sizes[beg] > size_limit; ++beg) /* empty */ ; 3344 const int end = MIN2((int)usable_count, n) - 1; 3345 for (int cur = 0; cur < end; ++cur, ++beg) { 3346 _page_sizes[cur] = _page_sizes[beg]; 3347 } 3348 _page_sizes[end] = vm_page_size(); 3349 _page_sizes[end + 1] = 0; 3350 3351 if (_page_sizes[end] > _page_sizes[end - 1]) { 3352 // Default page size is not the smallest; sort again. 3353 insertion_sort_descending(_page_sizes, end + 1); 3354 } 3355 *page_size = _page_sizes[0]; 3356 3357 trace_page_sizes("usable page sizes", _page_sizes, end + 1); 3358 return true; 3359 } 3360 3361 void os::large_page_init() { 3362 if (!UseLargePages) { 3363 UseISM = false; 3364 UseMPSS = false; 3365 return; 3366 } 3367 3368 // print a warning if any large page related flag is specified on command line 3369 bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) || 3370 !FLAG_IS_DEFAULT(UseISM) || 3371 !FLAG_IS_DEFAULT(UseMPSS) || 3372 !FLAG_IS_DEFAULT(LargePageSizeInBytes); 3373 UseISM = UseISM && 3374 Solaris::ism_sanity_check(warn_on_failure, &_large_page_size); 3375 if (UseISM) { 3376 // ISM disables MPSS to be compatible with old JDK behavior 3377 UseMPSS = false; 3378 _page_sizes[0] = _large_page_size; 3379 _page_sizes[1] = vm_page_size(); 3380 } 3381 3382 UseMPSS = UseMPSS && 3383 Solaris::mpss_sanity_check(warn_on_failure, &_large_page_size); 3384 3385 UseLargePages = UseISM || UseMPSS; 3386 } 3387 3388 bool os::Solaris::set_mpss_range(caddr_t start, size_t bytes, size_t align) { 3389 // Signal to OS that we want large pages for addresses 3390 // from addr, addr + bytes 3391 struct memcntl_mha mpss_struct; 3392 mpss_struct.mha_cmd = MHA_MAPSIZE_VA; 3393 mpss_struct.mha_pagesize = align; 3394 mpss_struct.mha_flags = 0; 3395 if (memcntl(start, bytes, MC_HAT_ADVISE, 3396 (caddr_t) &mpss_struct, 0, 0) < 0) { 3397 debug_only(warning("Attempt to use MPSS failed.")); 3398 return false; 3399 } 3400 return true; 3401 } 3402 3403 char* os::reserve_memory_special(size_t size, char* addr, bool exec) { 3404 // "exec" is passed in but not used. Creating the shared image for 3405 // the code cache doesn't have an SHM_X executable permission to check. 3406 assert(UseLargePages && UseISM, "only for ISM large pages"); 3407 3408 char* retAddr = NULL; 3409 int shmid; 3410 key_t ismKey; 3411 3412 bool warn_on_failure = UseISM && 3413 (!FLAG_IS_DEFAULT(UseLargePages) || 3414 !FLAG_IS_DEFAULT(UseISM) || 3415 !FLAG_IS_DEFAULT(LargePageSizeInBytes) 3416 ); 3417 char msg[128]; 3418 3419 ismKey = IPC_PRIVATE; 3420 3421 // Create a large shared memory region to attach to based on size. 3422 // Currently, size is the total size of the heap 3423 shmid = shmget(ismKey, size, SHM_R | SHM_W | IPC_CREAT); 3424 if (shmid == -1){ 3425 if (warn_on_failure) { 3426 jio_snprintf(msg, sizeof(msg), "Failed to reserve shared memory (errno = %d).", errno); 3427 warning(msg); 3428 } 3429 return NULL; 3430 } 3431 3432 // Attach to the region 3433 retAddr = (char *) shmat(shmid, 0, SHM_SHARE_MMU | SHM_R | SHM_W); 3434 int err = errno; 3435 3436 // Remove shmid. If shmat() is successful, the actual shared memory segment 3437 // will be deleted when it's detached by shmdt() or when the process 3438 // terminates. If shmat() is not successful this will remove the shared 3439 // segment immediately. 3440 shmctl(shmid, IPC_RMID, NULL); 3441 3442 if (retAddr == (char *) -1) { 3443 if (warn_on_failure) { 3444 jio_snprintf(msg, sizeof(msg), "Failed to attach shared memory (errno = %d).", err); 3445 warning(msg); 3446 } 3447 return NULL; 3448 } 3449 if ((retAddr != NULL) && UseNUMAInterleaving) { 3450 numa_make_global(retAddr, size); 3451 } 3452 return retAddr; 3453 } 3454 3455 bool os::release_memory_special(char* base, size_t bytes) { 3456 // detaching the SHM segment will also delete it, see reserve_memory_special() 3457 int rslt = shmdt(base); 3458 return rslt == 0; 3459 } 3460 3461 size_t os::large_page_size() { 3462 return _large_page_size; 3463 } 3464 3465 // MPSS allows application to commit large page memory on demand; with ISM 3466 // the entire memory region must be allocated as shared memory. 3467 bool os::can_commit_large_page_memory() { 3468 return UseISM ? false : true; 3469 } 3470 3471 bool os::can_execute_large_page_memory() { 3472 return UseISM ? false : true; 3473 } 3474 3475 static int os_sleep(jlong millis, bool interruptible) { 3476 const jlong limit = INT_MAX; 3477 jlong prevtime; 3478 int res; 3479 3480 while (millis > limit) { 3481 if ((res = os_sleep(limit, interruptible)) != OS_OK) 3482 return res; 3483 millis -= limit; 3484 } 3485 3486 // Restart interrupted polls with new parameters until the proper delay 3487 // has been completed. 3488 3489 prevtime = getTimeMillis(); 3490 3491 while (millis > 0) { 3492 jlong newtime; 3493 3494 if (!interruptible) { 3495 // Following assert fails for os::yield_all: 3496 // assert(!thread->is_Java_thread(), "must not be java thread"); 3497 res = poll(NULL, 0, millis); 3498 } else { 3499 JavaThread *jt = JavaThread::current(); 3500 3501 INTERRUPTIBLE_NORESTART_VM_ALWAYS(poll(NULL, 0, millis), res, jt, 3502 os::Solaris::clear_interrupted); 3503 } 3504 3505 // INTERRUPTIBLE_NORESTART_VM_ALWAYS returns res == OS_INTRPT for 3506 // thread.Interrupt. 3507 3508 // See c/r 6751923. Poll can return 0 before time 3509 // has elapsed if time is set via clock_settime (as NTP does). 3510 // res == 0 if poll timed out (see man poll RETURN VALUES) 3511 // using the logic below checks that we really did 3512 // sleep at least "millis" if not we'll sleep again. 3513 if( ( res == 0 ) || ((res == OS_ERR) && (errno == EINTR))) { 3514 newtime = getTimeMillis(); 3515 assert(newtime >= prevtime, "time moving backwards"); 3516 /* Doing prevtime and newtime in microseconds doesn't help precision, 3517 and trying to round up to avoid lost milliseconds can result in a 3518 too-short delay. */ 3519 millis -= newtime - prevtime; 3520 if(millis <= 0) 3521 return OS_OK; 3522 prevtime = newtime; 3523 } else 3524 return res; 3525 } 3526 3527 return OS_OK; 3528 } 3529 3530 // Read calls from inside the vm need to perform state transitions 3531 size_t os::read(int fd, void *buf, unsigned int nBytes) { 3532 INTERRUPTIBLE_RETURN_INT_VM(::read(fd, buf, nBytes), os::Solaris::clear_interrupted); 3533 } 3534 3535 size_t os::restartable_read(int fd, void *buf, unsigned int nBytes) { 3536 INTERRUPTIBLE_RETURN_INT(::read(fd, buf, nBytes), os::Solaris::clear_interrupted); 3537 } 3538 3539 int os::sleep(Thread* thread, jlong millis, bool interruptible) { 3540 assert(thread == Thread::current(), "thread consistency check"); 3541 3542 // TODO-FIXME: this should be removed. 3543 // On Solaris machines (especially 2.5.1) we found that sometimes the VM gets into a live lock 3544 // situation with a JavaThread being starved out of a lwp. The kernel doesn't seem to generate 3545 // a SIGWAITING signal which would enable the threads library to create a new lwp for the starving 3546 // thread. We suspect that because the Watcher thread keeps waking up at periodic intervals the kernel 3547 // is fooled into believing that the system is making progress. In the code below we block the 3548 // the watcher thread while safepoint is in progress so that it would not appear as though the 3549 // system is making progress. 3550 if (!Solaris::T2_libthread() && 3551 thread->is_Watcher_thread() && SafepointSynchronize::is_synchronizing() && !Arguments::has_profile()) { 3552 // We now try to acquire the threads lock. Since this lock is held by the VM thread during 3553 // the entire safepoint, the watcher thread will line up here during the safepoint. 3554 Threads_lock->lock_without_safepoint_check(); 3555 Threads_lock->unlock(); 3556 } 3557 3558 if (thread->is_Java_thread()) { 3559 // This is a JavaThread so we honor the _thread_blocked protocol 3560 // even for sleeps of 0 milliseconds. This was originally done 3561 // as a workaround for bug 4338139. However, now we also do it 3562 // to honor the suspend-equivalent protocol. 3563 3564 JavaThread *jt = (JavaThread *) thread; 3565 ThreadBlockInVM tbivm(jt); 3566 3567 jt->set_suspend_equivalent(); 3568 // cleared by handle_special_suspend_equivalent_condition() or 3569 // java_suspend_self() via check_and_wait_while_suspended() 3570 3571 int ret_code; 3572 if (millis <= 0) { 3573 thr_yield(); 3574 ret_code = 0; 3575 } else { 3576 // The original sleep() implementation did not create an 3577 // OSThreadWaitState helper for sleeps of 0 milliseconds. 3578 // I'm preserving that decision for now. 3579 OSThreadWaitState osts(jt->osthread(), false /* not Object.wait() */); 3580 3581 ret_code = os_sleep(millis, interruptible); 3582 } 3583 3584 // were we externally suspended while we were waiting? 3585 jt->check_and_wait_while_suspended(); 3586 3587 return ret_code; 3588 } 3589 3590 // non-JavaThread from this point on: 3591 3592 if (millis <= 0) { 3593 thr_yield(); 3594 return 0; 3595 } 3596 3597 OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */); 3598 3599 return os_sleep(millis, interruptible); 3600 } 3601 3602 int os::naked_sleep() { 3603 // %% make the sleep time an integer flag. for now use 1 millisec. 3604 return os_sleep(1, false); 3605 } 3606 3607 // Sleep forever; naked call to OS-specific sleep; use with CAUTION 3608 void os::infinite_sleep() { 3609 while (true) { // sleep forever ... 3610 ::sleep(100); // ... 100 seconds at a time 3611 } 3612 } 3613 3614 // Used to convert frequent JVM_Yield() to nops 3615 bool os::dont_yield() { 3616 if (DontYieldALot) { 3617 static hrtime_t last_time = 0; 3618 hrtime_t diff = getTimeNanos() - last_time; 3619 3620 if (diff < DontYieldALotInterval * 1000000) 3621 return true; 3622 3623 last_time += diff; 3624 3625 return false; 3626 } 3627 else { 3628 return false; 3629 } 3630 } 3631 3632 // Caveat: Solaris os::yield() causes a thread-state transition whereas 3633 // the linux and win32 implementations do not. This should be checked. 3634 3635 void os::yield() { 3636 // Yields to all threads with same or greater priority 3637 os::sleep(Thread::current(), 0, false); 3638 } 3639 3640 // Note that yield semantics are defined by the scheduling class to which 3641 // the thread currently belongs. Typically, yield will _not yield to 3642 // other equal or higher priority threads that reside on the dispatch queues 3643 // of other CPUs. 3644 3645 os::YieldResult os::NakedYield() { thr_yield(); return os::YIELD_UNKNOWN; } 3646 3647 3648 // On Solaris we found that yield_all doesn't always yield to all other threads. 3649 // There have been cases where there is a thread ready to execute but it doesn't 3650 // get an lwp as the VM thread continues to spin with sleeps of 1 millisecond. 3651 // The 1 millisecond wait doesn't seem long enough for the kernel to issue a 3652 // SIGWAITING signal which will cause a new lwp to be created. So we count the 3653 // number of times yield_all is called in the one loop and increase the sleep 3654 // time after 8 attempts. If this fails too we increase the concurrency level 3655 // so that the starving thread would get an lwp 3656 3657 void os::yield_all(int attempts) { 3658 // Yields to all threads, including threads with lower priorities 3659 if (attempts == 0) { 3660 os::sleep(Thread::current(), 1, false); 3661 } else { 3662 int iterations = attempts % 30; 3663 if (iterations == 0 && !os::Solaris::T2_libthread()) { 3664 // thr_setconcurrency and _getconcurrency make sense only under T1. 3665 int noofLWPS = thr_getconcurrency(); 3666 if (noofLWPS < (Threads::number_of_threads() + 2)) { 3667 thr_setconcurrency(thr_getconcurrency() + 1); 3668 } 3669 } else if (iterations < 25) { 3670 os::sleep(Thread::current(), 1, false); 3671 } else { 3672 os::sleep(Thread::current(), 10, false); 3673 } 3674 } 3675 } 3676 3677 // Called from the tight loops to possibly influence time-sharing heuristics 3678 void os::loop_breaker(int attempts) { 3679 os::yield_all(attempts); 3680 } 3681 3682 3683 // Interface for setting lwp priorities. If we are using T2 libthread, 3684 // which forces the use of BoundThreads or we manually set UseBoundThreads, 3685 // all of our threads will be assigned to real lwp's. Using the thr_setprio 3686 // function is meaningless in this mode so we must adjust the real lwp's priority 3687 // The routines below implement the getting and setting of lwp priorities. 3688 // 3689 // Note: There are three priority scales used on Solaris. Java priotities 3690 // which range from 1 to 10, libthread "thr_setprio" scale which range 3691 // from 0 to 127, and the current scheduling class of the process we 3692 // are running in. This is typically from -60 to +60. 3693 // The setting of the lwp priorities in done after a call to thr_setprio 3694 // so Java priorities are mapped to libthread priorities and we map from 3695 // the latter to lwp priorities. We don't keep priorities stored in 3696 // Java priorities since some of our worker threads want to set priorities 3697 // higher than all Java threads. 3698 // 3699 // For related information: 3700 // (1) man -s 2 priocntl 3701 // (2) man -s 4 priocntl 3702 // (3) man dispadmin 3703 // = librt.so 3704 // = libthread/common/rtsched.c - thrp_setlwpprio(). 3705 // = ps -cL <pid> ... to validate priority. 3706 // = sched_get_priority_min and _max 3707 // pthread_create 3708 // sched_setparam 3709 // pthread_setschedparam 3710 // 3711 // Assumptions: 3712 // + We assume that all threads in the process belong to the same 3713 // scheduling class. IE. an homogenous process. 3714 // + Must be root or in IA group to change change "interactive" attribute. 3715 // Priocntl() will fail silently. The only indication of failure is when 3716 // we read-back the value and notice that it hasn't changed. 3717 // + Interactive threads enter the runq at the head, non-interactive at the tail. 3718 // + For RT, change timeslice as well. Invariant: 3719 // constant "priority integral" 3720 // Konst == TimeSlice * (60-Priority) 3721 // Given a priority, compute appropriate timeslice. 3722 // + Higher numerical values have higher priority. 3723 3724 // sched class attributes 3725 typedef struct { 3726 int schedPolicy; // classID 3727 int maxPrio; 3728 int minPrio; 3729 } SchedInfo; 3730 3731 3732 static SchedInfo tsLimits, iaLimits, rtLimits; 3733 3734 #ifdef ASSERT 3735 static int ReadBackValidate = 1; 3736 #endif 3737 static int myClass = 0; 3738 static int myMin = 0; 3739 static int myMax = 0; 3740 static int myCur = 0; 3741 static bool priocntl_enable = false; 3742 3743 3744 // Call the version of priocntl suitable for all supported versions 3745 // of Solaris. We need to call through this wrapper so that we can 3746 // build on Solaris 9 and run on Solaris 8, 9 and 10. 3747 // 3748 // This code should be removed if we ever stop supporting Solaris 8 3749 // and earlier releases. 3750 3751 static long priocntl_stub(int pcver, idtype_t idtype, id_t id, int cmd, caddr_t arg); 3752 typedef long (*priocntl_type)(int pcver, idtype_t idtype, id_t id, int cmd, caddr_t arg); 3753 static priocntl_type priocntl_ptr = priocntl_stub; 3754 3755 // Stub to set the value of the real pointer, and then call the real 3756 // function. 3757 3758 static long priocntl_stub(int pcver, idtype_t idtype, id_t id, int cmd, caddr_t arg) { 3759 // Try Solaris 8- name only. 3760 priocntl_type tmp = (priocntl_type)dlsym(RTLD_DEFAULT, "__priocntl"); 3761 guarantee(tmp != NULL, "priocntl function not found."); 3762 priocntl_ptr = tmp; 3763 return (*priocntl_ptr)(PC_VERSION, idtype, id, cmd, arg); 3764 } 3765 3766 3767 // lwp_priocntl_init 3768 // 3769 // Try to determine the priority scale for our process. 3770 // 3771 // Return errno or 0 if OK. 3772 // 3773 static 3774 int lwp_priocntl_init () 3775 { 3776 int rslt; 3777 pcinfo_t ClassInfo; 3778 pcparms_t ParmInfo; 3779 int i; 3780 3781 if (!UseThreadPriorities) return 0; 3782 3783 // We are using Bound threads, we need to determine our priority ranges 3784 if (os::Solaris::T2_libthread() || UseBoundThreads) { 3785 // If ThreadPriorityPolicy is 1, switch tables 3786 if (ThreadPriorityPolicy == 1) { 3787 for (i = 0 ; i < MaxPriority+1; i++) 3788 os::java_to_os_priority[i] = prio_policy1[i]; 3789 } 3790 } 3791 // Not using Bound Threads, set to ThreadPolicy 1 3792 else { 3793 for ( i = 0 ; i < MaxPriority+1; i++ ) { 3794 os::java_to_os_priority[i] = prio_policy1[i]; 3795 } 3796 return 0; 3797 } 3798 3799 3800 // Get IDs for a set of well-known scheduling classes. 3801 // TODO-FIXME: GETCLINFO returns the current # of classes in the 3802 // the system. We should have a loop that iterates over the 3803 // classID values, which are known to be "small" integers. 3804 3805 strcpy(ClassInfo.pc_clname, "TS"); 3806 ClassInfo.pc_cid = -1; 3807 rslt = (*priocntl_ptr)(PC_VERSION, P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo); 3808 if (rslt < 0) return errno; 3809 assert(ClassInfo.pc_cid != -1, "cid for TS class is -1"); 3810 tsLimits.schedPolicy = ClassInfo.pc_cid; 3811 tsLimits.maxPrio = ((tsinfo_t*)ClassInfo.pc_clinfo)->ts_maxupri; 3812 tsLimits.minPrio = -tsLimits.maxPrio; 3813 3814 strcpy(ClassInfo.pc_clname, "IA"); 3815 ClassInfo.pc_cid = -1; 3816 rslt = (*priocntl_ptr)(PC_VERSION, P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo); 3817 if (rslt < 0) return errno; 3818 assert(ClassInfo.pc_cid != -1, "cid for IA class is -1"); 3819 iaLimits.schedPolicy = ClassInfo.pc_cid; 3820 iaLimits.maxPrio = ((iainfo_t*)ClassInfo.pc_clinfo)->ia_maxupri; 3821 iaLimits.minPrio = -iaLimits.maxPrio; 3822 3823 strcpy(ClassInfo.pc_clname, "RT"); 3824 ClassInfo.pc_cid = -1; 3825 rslt = (*priocntl_ptr)(PC_VERSION, P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo); 3826 if (rslt < 0) return errno; 3827 assert(ClassInfo.pc_cid != -1, "cid for RT class is -1"); 3828 rtLimits.schedPolicy = ClassInfo.pc_cid; 3829 rtLimits.maxPrio = ((rtinfo_t*)ClassInfo.pc_clinfo)->rt_maxpri; 3830 rtLimits.minPrio = 0; 3831 3832 3833 // Query our "current" scheduling class. 3834 // This will normally be IA,TS or, rarely, RT. 3835 memset (&ParmInfo, 0, sizeof(ParmInfo)); 3836 ParmInfo.pc_cid = PC_CLNULL; 3837 rslt = (*priocntl_ptr) (PC_VERSION, P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo ); 3838 if ( rslt < 0 ) return errno; 3839 myClass = ParmInfo.pc_cid; 3840 3841 // We now know our scheduling classId, get specific information 3842 // the class. 3843 ClassInfo.pc_cid = myClass; 3844 ClassInfo.pc_clname[0] = 0; 3845 rslt = (*priocntl_ptr) (PC_VERSION, (idtype)0, 0, PC_GETCLINFO, (caddr_t)&ClassInfo ); 3846 if ( rslt < 0 ) return errno; 3847 3848 if (ThreadPriorityVerbose) 3849 tty->print_cr ("lwp_priocntl_init: Class=%d(%s)...", myClass, ClassInfo.pc_clname); 3850 3851 memset(&ParmInfo, 0, sizeof(pcparms_t)); 3852 ParmInfo.pc_cid = PC_CLNULL; 3853 rslt = (*priocntl_ptr)(PC_VERSION, P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo); 3854 if (rslt < 0) return errno; 3855 3856 if (ParmInfo.pc_cid == rtLimits.schedPolicy) { 3857 myMin = rtLimits.minPrio; 3858 myMax = rtLimits.maxPrio; 3859 } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) { 3860 iaparms_t *iaInfo = (iaparms_t*)ParmInfo.pc_clparms; 3861 myMin = iaLimits.minPrio; 3862 myMax = iaLimits.maxPrio; 3863 myMax = MIN2(myMax, (int)iaInfo->ia_uprilim); // clamp - restrict 3864 } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) { 3865 tsparms_t *tsInfo = (tsparms_t*)ParmInfo.pc_clparms; 3866 myMin = tsLimits.minPrio; 3867 myMax = tsLimits.maxPrio; 3868 myMax = MIN2(myMax, (int)tsInfo->ts_uprilim); // clamp - restrict 3869 } else { 3870 // No clue - punt 3871 if (ThreadPriorityVerbose) 3872 tty->print_cr ("Unknown scheduling class: %s ... \n", ClassInfo.pc_clname); 3873 return EINVAL; // no clue, punt 3874 } 3875 3876 if (ThreadPriorityVerbose) 3877 tty->print_cr ("Thread priority Range: [%d..%d]\n", myMin, myMax); 3878 3879 priocntl_enable = true; // Enable changing priorities 3880 return 0; 3881 } 3882 3883 #define IAPRI(x) ((iaparms_t *)((x).pc_clparms)) 3884 #define RTPRI(x) ((rtparms_t *)((x).pc_clparms)) 3885 #define TSPRI(x) ((tsparms_t *)((x).pc_clparms)) 3886 3887 3888 // scale_to_lwp_priority 3889 // 3890 // Convert from the libthread "thr_setprio" scale to our current 3891 // lwp scheduling class scale. 3892 // 3893 static 3894 int scale_to_lwp_priority (int rMin, int rMax, int x) 3895 { 3896 int v; 3897 3898 if (x == 127) return rMax; // avoid round-down 3899 v = (((x*(rMax-rMin)))/128)+rMin; 3900 return v; 3901 } 3902 3903 3904 // set_lwp_priority 3905 // 3906 // Set the priority of the lwp. This call should only be made 3907 // when using bound threads (T2 threads are bound by default). 3908 // 3909 int set_lwp_priority (int ThreadID, int lwpid, int newPrio ) 3910 { 3911 int rslt; 3912 int Actual, Expected, prv; 3913 pcparms_t ParmInfo; // for GET-SET 3914 #ifdef ASSERT 3915 pcparms_t ReadBack; // for readback 3916 #endif 3917 3918 // Set priority via PC_GETPARMS, update, PC_SETPARMS 3919 // Query current values. 3920 // TODO: accelerate this by eliminating the PC_GETPARMS call. 3921 // Cache "pcparms_t" in global ParmCache. 3922 // TODO: elide set-to-same-value 3923 3924 // If something went wrong on init, don't change priorities. 3925 if ( !priocntl_enable ) { 3926 if (ThreadPriorityVerbose) 3927 tty->print_cr("Trying to set priority but init failed, ignoring"); 3928 return EINVAL; 3929 } 3930 3931 3932 // If lwp hasn't started yet, just return 3933 // the _start routine will call us again. 3934 if ( lwpid <= 0 ) { 3935 if (ThreadPriorityVerbose) { 3936 tty->print_cr ("deferring the set_lwp_priority of thread " INTPTR_FORMAT " to %d, lwpid not set", 3937 ThreadID, newPrio); 3938 } 3939 return 0; 3940 } 3941 3942 if (ThreadPriorityVerbose) { 3943 tty->print_cr ("set_lwp_priority(" INTPTR_FORMAT "@" INTPTR_FORMAT " %d) ", 3944 ThreadID, lwpid, newPrio); 3945 } 3946 3947 memset(&ParmInfo, 0, sizeof(pcparms_t)); 3948 ParmInfo.pc_cid = PC_CLNULL; 3949 rslt = (*priocntl_ptr)(PC_VERSION, P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ParmInfo); 3950 if (rslt < 0) return errno; 3951 3952 if (ParmInfo.pc_cid == rtLimits.schedPolicy) { 3953 rtparms_t *rtInfo = (rtparms_t*)ParmInfo.pc_clparms; 3954 rtInfo->rt_pri = scale_to_lwp_priority (rtLimits.minPrio, rtLimits.maxPrio, newPrio); 3955 rtInfo->rt_tqsecs = RT_NOCHANGE; 3956 rtInfo->rt_tqnsecs = RT_NOCHANGE; 3957 if (ThreadPriorityVerbose) { 3958 tty->print_cr("RT: %d->%d\n", newPrio, rtInfo->rt_pri); 3959 } 3960 } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) { 3961 iaparms_t *iaInfo = (iaparms_t*)ParmInfo.pc_clparms; 3962 int maxClamped = MIN2(iaLimits.maxPrio, (int)iaInfo->ia_uprilim); 3963 iaInfo->ia_upri = scale_to_lwp_priority(iaLimits.minPrio, maxClamped, newPrio); 3964 iaInfo->ia_uprilim = IA_NOCHANGE; 3965 iaInfo->ia_mode = IA_NOCHANGE; 3966 if (ThreadPriorityVerbose) { 3967 tty->print_cr ("IA: [%d...%d] %d->%d\n", 3968 iaLimits.minPrio, maxClamped, newPrio, iaInfo->ia_upri); 3969 } 3970 } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) { 3971 tsparms_t *tsInfo = (tsparms_t*)ParmInfo.pc_clparms; 3972 int maxClamped = MIN2(tsLimits.maxPrio, (int)tsInfo->ts_uprilim); 3973 prv = tsInfo->ts_upri; 3974 tsInfo->ts_upri = scale_to_lwp_priority(tsLimits.minPrio, maxClamped, newPrio); 3975 tsInfo->ts_uprilim = IA_NOCHANGE; 3976 if (ThreadPriorityVerbose) { 3977 tty->print_cr ("TS: %d [%d...%d] %d->%d\n", 3978 prv, tsLimits.minPrio, maxClamped, newPrio, tsInfo->ts_upri); 3979 } 3980 if (prv == tsInfo->ts_upri) return 0; 3981 } else { 3982 if ( ThreadPriorityVerbose ) { 3983 tty->print_cr ("Unknown scheduling class\n"); 3984 } 3985 return EINVAL; // no clue, punt 3986 } 3987 3988 rslt = (*priocntl_ptr)(PC_VERSION, P_LWPID, lwpid, PC_SETPARMS, (caddr_t)&ParmInfo); 3989 if (ThreadPriorityVerbose && rslt) { 3990 tty->print_cr ("PC_SETPARMS ->%d %d\n", rslt, errno); 3991 } 3992 if (rslt < 0) return errno; 3993 3994 #ifdef ASSERT 3995 // Sanity check: read back what we just attempted to set. 3996 // In theory it could have changed in the interim ... 3997 // 3998 // The priocntl system call is tricky. 3999 // Sometimes it'll validate the priority value argument and 4000 // return EINVAL if unhappy. At other times it fails silently. 4001 // Readbacks are prudent. 4002 4003 if (!ReadBackValidate) return 0; 4004 4005 memset(&ReadBack, 0, sizeof(pcparms_t)); 4006 ReadBack.pc_cid = PC_CLNULL; 4007 rslt = (*priocntl_ptr)(PC_VERSION, P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ReadBack); 4008 assert(rslt >= 0, "priocntl failed"); 4009 Actual = Expected = 0xBAD; 4010 assert(ParmInfo.pc_cid == ReadBack.pc_cid, "cid's don't match"); 4011 if (ParmInfo.pc_cid == rtLimits.schedPolicy) { 4012 Actual = RTPRI(ReadBack)->rt_pri; 4013 Expected = RTPRI(ParmInfo)->rt_pri; 4014 } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) { 4015 Actual = IAPRI(ReadBack)->ia_upri; 4016 Expected = IAPRI(ParmInfo)->ia_upri; 4017 } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) { 4018 Actual = TSPRI(ReadBack)->ts_upri; 4019 Expected = TSPRI(ParmInfo)->ts_upri; 4020 } else { 4021 if ( ThreadPriorityVerbose ) { 4022 tty->print_cr("set_lwp_priority: unexpected class in readback: %d\n", ParmInfo.pc_cid); 4023 } 4024 } 4025 4026 if (Actual != Expected) { 4027 if ( ThreadPriorityVerbose ) { 4028 tty->print_cr ("set_lwp_priority(%d %d) Class=%d: actual=%d vs expected=%d\n", 4029 lwpid, newPrio, ReadBack.pc_cid, Actual, Expected); 4030 } 4031 } 4032 #endif 4033 4034 return 0; 4035 } 4036 4037 4038 4039 // Solaris only gives access to 128 real priorities at a time, 4040 // so we expand Java's ten to fill this range. This would be better 4041 // if we dynamically adjusted relative priorities. 4042 // 4043 // The ThreadPriorityPolicy option allows us to select 2 different 4044 // priority scales. 4045 // 4046 // ThreadPriorityPolicy=0 4047 // Since the Solaris' default priority is MaximumPriority, we do not 4048 // set a priority lower than Max unless a priority lower than 4049 // NormPriority is requested. 4050 // 4051 // ThreadPriorityPolicy=1 4052 // This mode causes the priority table to get filled with 4053 // linear values. NormPriority get's mapped to 50% of the 4054 // Maximum priority an so on. This will cause VM threads 4055 // to get unfair treatment against other Solaris processes 4056 // which do not explicitly alter their thread priorities. 4057 // 4058 4059 4060 int os::java_to_os_priority[MaxPriority + 1] = { 4061 -99999, // 0 Entry should never be used 4062 4063 0, // 1 MinPriority 4064 32, // 2 4065 64, // 3 4066 4067 96, // 4 4068 127, // 5 NormPriority 4069 127, // 6 4070 4071 127, // 7 4072 127, // 8 4073 127, // 9 NearMaxPriority 4074 4075 127 // 10 MaxPriority 4076 }; 4077 4078 4079 OSReturn os::set_native_priority(Thread* thread, int newpri) { 4080 assert(newpri >= MinimumPriority && newpri <= MaximumPriority, "bad priority mapping"); 4081 if ( !UseThreadPriorities ) return OS_OK; 4082 int status = thr_setprio(thread->osthread()->thread_id(), newpri); 4083 if ( os::Solaris::T2_libthread() || (UseBoundThreads && thread->osthread()->is_vm_created()) ) 4084 status |= (set_lwp_priority (thread->osthread()->thread_id(), 4085 thread->osthread()->lwp_id(), newpri )); 4086 return (status == 0) ? OS_OK : OS_ERR; 4087 } 4088 4089 4090 OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) { 4091 int p; 4092 if ( !UseThreadPriorities ) { 4093 *priority_ptr = NormalPriority; 4094 return OS_OK; 4095 } 4096 int status = thr_getprio(thread->osthread()->thread_id(), &p); 4097 if (status != 0) { 4098 return OS_ERR; 4099 } 4100 *priority_ptr = p; 4101 return OS_OK; 4102 } 4103 4104 4105 // Hint to the underlying OS that a task switch would not be good. 4106 // Void return because it's a hint and can fail. 4107 void os::hint_no_preempt() { 4108 schedctl_start(schedctl_init()); 4109 } 4110 4111 void os::interrupt(Thread* thread) { 4112 assert(Thread::current() == thread || Threads_lock->owned_by_self(), "possibility of dangling Thread pointer"); 4113 4114 OSThread* osthread = thread->osthread(); 4115 4116 int isInterrupted = osthread->interrupted(); 4117 if (!isInterrupted) { 4118 osthread->set_interrupted(true); 4119 OrderAccess::fence(); 4120 // os::sleep() is implemented with either poll (NULL,0,timeout) or 4121 // by parking on _SleepEvent. If the former, thr_kill will unwedge 4122 // the sleeper by SIGINTR, otherwise the unpark() will wake the sleeper. 4123 ParkEvent * const slp = thread->_SleepEvent ; 4124 if (slp != NULL) slp->unpark() ; 4125 } 4126 4127 // For JSR166: unpark after setting status but before thr_kill -dl 4128 if (thread->is_Java_thread()) { 4129 ((JavaThread*)thread)->parker()->unpark(); 4130 } 4131 4132 // Handle interruptible wait() ... 4133 ParkEvent * const ev = thread->_ParkEvent ; 4134 if (ev != NULL) ev->unpark() ; 4135 4136 // When events are used everywhere for os::sleep, then this thr_kill 4137 // will only be needed if UseVMInterruptibleIO is true. 4138 4139 if (!isInterrupted) { 4140 int status = thr_kill(osthread->thread_id(), os::Solaris::SIGinterrupt()); 4141 assert_status(status == 0, status, "thr_kill"); 4142 4143 // Bump thread interruption counter 4144 RuntimeService::record_thread_interrupt_signaled_count(); 4145 } 4146 } 4147 4148 4149 bool os::is_interrupted(Thread* thread, bool clear_interrupted) { 4150 assert(Thread::current() == thread || Threads_lock->owned_by_self(), "possibility of dangling Thread pointer"); 4151 4152 OSThread* osthread = thread->osthread(); 4153 4154 bool res = osthread->interrupted(); 4155 4156 // NOTE that since there is no "lock" around these two operations, 4157 // there is the possibility that the interrupted flag will be 4158 // "false" but that the interrupt event will be set. This is 4159 // intentional. The effect of this is that Object.wait() will appear 4160 // to have a spurious wakeup, which is not harmful, and the 4161 // possibility is so rare that it is not worth the added complexity 4162 // to add yet another lock. It has also been recommended not to put 4163 // the interrupted flag into the os::Solaris::Event structure, 4164 // because it hides the issue. 4165 if (res && clear_interrupted) { 4166 osthread->set_interrupted(false); 4167 } 4168 return res; 4169 } 4170 4171 4172 void os::print_statistics() { 4173 } 4174 4175 int os::message_box(const char* title, const char* message) { 4176 int i; 4177 fdStream err(defaultStream::error_fd()); 4178 for (i = 0; i < 78; i++) err.print_raw("="); 4179 err.cr(); 4180 err.print_raw_cr(title); 4181 for (i = 0; i < 78; i++) err.print_raw("-"); 4182 err.cr(); 4183 err.print_raw_cr(message); 4184 for (i = 0; i < 78; i++) err.print_raw("="); 4185 err.cr(); 4186 4187 char buf[16]; 4188 // Prevent process from exiting upon "read error" without consuming all CPU 4189 while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); } 4190 4191 return buf[0] == 'y' || buf[0] == 'Y'; 4192 } 4193 4194 // A lightweight implementation that does not suspend the target thread and 4195 // thus returns only a hint. Used for profiling only! 4196 ExtendedPC os::get_thread_pc(Thread* thread) { 4197 // Make sure that it is called by the watcher and the Threads lock is owned. 4198 assert(Thread::current()->is_Watcher_thread(), "Must be watcher and own Threads_lock"); 4199 // For now, is only used to profile the VM Thread 4200 assert(thread->is_VM_thread(), "Can only be called for VMThread"); 4201 ExtendedPC epc; 4202 4203 GetThreadPC_Callback cb(ProfileVM_lock); 4204 OSThread *osthread = thread->osthread(); 4205 const int time_to_wait = 400; // 400ms wait for initial response 4206 int status = cb.interrupt(thread, time_to_wait); 4207 4208 if (cb.is_done() ) { 4209 epc = cb.addr(); 4210 } else { 4211 DEBUG_ONLY(tty->print_cr("Failed to get pc for thread: %d got %d status", 4212 osthread->thread_id(), status);); 4213 // epc is already NULL 4214 } 4215 return epc; 4216 } 4217 4218 4219 // This does not do anything on Solaris. This is basically a hook for being 4220 // able to use structured exception handling (thread-local exception filters) on, e.g., Win32. 4221 void os::os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread) { 4222 f(value, method, args, thread); 4223 } 4224 4225 // This routine may be used by user applications as a "hook" to catch signals. 4226 // The user-defined signal handler must pass unrecognized signals to this 4227 // routine, and if it returns true (non-zero), then the signal handler must 4228 // return immediately. If the flag "abort_if_unrecognized" is true, then this 4229 // routine will never retun false (zero), but instead will execute a VM panic 4230 // routine kill the process. 4231 // 4232 // If this routine returns false, it is OK to call it again. This allows 4233 // the user-defined signal handler to perform checks either before or after 4234 // the VM performs its own checks. Naturally, the user code would be making 4235 // a serious error if it tried to handle an exception (such as a null check 4236 // or breakpoint) that the VM was generating for its own correct operation. 4237 // 4238 // This routine may recognize any of the following kinds of signals: 4239 // SIGBUS, SIGSEGV, SIGILL, SIGFPE, BREAK_SIGNAL, SIGPIPE, SIGXFSZ, 4240 // os::Solaris::SIGasync 4241 // It should be consulted by handlers for any of those signals. 4242 // It explicitly does not recognize os::Solaris::SIGinterrupt 4243 // 4244 // The caller of this routine must pass in the three arguments supplied 4245 // to the function referred to in the "sa_sigaction" (not the "sa_handler") 4246 // field of the structure passed to sigaction(). This routine assumes that 4247 // the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART. 4248 // 4249 // Note that the VM will print warnings if it detects conflicting signal 4250 // handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers". 4251 // 4252 extern "C" JNIEXPORT int 4253 JVM_handle_solaris_signal(int signo, siginfo_t* siginfo, void* ucontext, 4254 int abort_if_unrecognized); 4255 4256 4257 void signalHandler(int sig, siginfo_t* info, void* ucVoid) { 4258 JVM_handle_solaris_signal(sig, info, ucVoid, true); 4259 } 4260 4261 /* Do not delete - if guarantee is ever removed, a signal handler (even empty) 4262 is needed to provoke threads blocked on IO to return an EINTR 4263 Note: this explicitly does NOT call JVM_handle_solaris_signal and 4264 does NOT participate in signal chaining due to requirement for 4265 NOT setting SA_RESTART to make EINTR work. */ 4266 extern "C" void sigINTRHandler(int sig, siginfo_t* info, void* ucVoid) { 4267 if (UseSignalChaining) { 4268 struct sigaction *actp = os::Solaris::get_chained_signal_action(sig); 4269 if (actp && actp->sa_handler) { 4270 vm_exit_during_initialization("Signal chaining detected for VM interrupt signal, try -XX:+UseAltSigs"); 4271 } 4272 } 4273 } 4274 4275 // This boolean allows users to forward their own non-matching signals 4276 // to JVM_handle_solaris_signal, harmlessly. 4277 bool os::Solaris::signal_handlers_are_installed = false; 4278 4279 // For signal-chaining 4280 bool os::Solaris::libjsig_is_loaded = false; 4281 typedef struct sigaction *(*get_signal_t)(int); 4282 get_signal_t os::Solaris::get_signal_action = NULL; 4283 4284 struct sigaction* os::Solaris::get_chained_signal_action(int sig) { 4285 struct sigaction *actp = NULL; 4286 4287 if ((libjsig_is_loaded) && (sig <= Maxlibjsigsigs)) { 4288 // Retrieve the old signal handler from libjsig 4289 actp = (*get_signal_action)(sig); 4290 } 4291 if (actp == NULL) { 4292 // Retrieve the preinstalled signal handler from jvm 4293 actp = get_preinstalled_handler(sig); 4294 } 4295 4296 return actp; 4297 } 4298 4299 static bool call_chained_handler(struct sigaction *actp, int sig, 4300 siginfo_t *siginfo, void *context) { 4301 // Call the old signal handler 4302 if (actp->sa_handler == SIG_DFL) { 4303 // It's more reasonable to let jvm treat it as an unexpected exception 4304 // instead of taking the default action. 4305 return false; 4306 } else if (actp->sa_handler != SIG_IGN) { 4307 if ((actp->sa_flags & SA_NODEFER) == 0) { 4308 // automaticlly block the signal 4309 sigaddset(&(actp->sa_mask), sig); 4310 } 4311 4312 sa_handler_t hand; 4313 sa_sigaction_t sa; 4314 bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0; 4315 // retrieve the chained handler 4316 if (siginfo_flag_set) { 4317 sa = actp->sa_sigaction; 4318 } else { 4319 hand = actp->sa_handler; 4320 } 4321 4322 if ((actp->sa_flags & SA_RESETHAND) != 0) { 4323 actp->sa_handler = SIG_DFL; 4324 } 4325 4326 // try to honor the signal mask 4327 sigset_t oset; 4328 thr_sigsetmask(SIG_SETMASK, &(actp->sa_mask), &oset); 4329 4330 // call into the chained handler 4331 if (siginfo_flag_set) { 4332 (*sa)(sig, siginfo, context); 4333 } else { 4334 (*hand)(sig); 4335 } 4336 4337 // restore the signal mask 4338 thr_sigsetmask(SIG_SETMASK, &oset, 0); 4339 } 4340 // Tell jvm's signal handler the signal is taken care of. 4341 return true; 4342 } 4343 4344 bool os::Solaris::chained_handler(int sig, siginfo_t* siginfo, void* context) { 4345 bool chained = false; 4346 // signal-chaining 4347 if (UseSignalChaining) { 4348 struct sigaction *actp = get_chained_signal_action(sig); 4349 if (actp != NULL) { 4350 chained = call_chained_handler(actp, sig, siginfo, context); 4351 } 4352 } 4353 return chained; 4354 } 4355 4356 struct sigaction* os::Solaris::get_preinstalled_handler(int sig) { 4357 assert((chainedsigactions != (struct sigaction *)NULL) && (preinstalled_sigs != (int *)NULL) , "signals not yet initialized"); 4358 if (preinstalled_sigs[sig] != 0) { 4359 return &chainedsigactions[sig]; 4360 } 4361 return NULL; 4362 } 4363 4364 void os::Solaris::save_preinstalled_handler(int sig, struct sigaction& oldAct) { 4365 4366 assert(sig > 0 && sig <= Maxsignum, "vm signal out of expected range"); 4367 assert((chainedsigactions != (struct sigaction *)NULL) && (preinstalled_sigs != (int *)NULL) , "signals not yet initialized"); 4368 chainedsigactions[sig] = oldAct; 4369 preinstalled_sigs[sig] = 1; 4370 } 4371 4372 void os::Solaris::set_signal_handler(int sig, bool set_installed, bool oktochain) { 4373 // Check for overwrite. 4374 struct sigaction oldAct; 4375 sigaction(sig, (struct sigaction*)NULL, &oldAct); 4376 void* oldhand = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction) 4377 : CAST_FROM_FN_PTR(void*, oldAct.sa_handler); 4378 if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) && 4379 oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) && 4380 oldhand != CAST_FROM_FN_PTR(void*, signalHandler)) { 4381 if (AllowUserSignalHandlers || !set_installed) { 4382 // Do not overwrite; user takes responsibility to forward to us. 4383 return; 4384 } else if (UseSignalChaining) { 4385 if (oktochain) { 4386 // save the old handler in jvm 4387 save_preinstalled_handler(sig, oldAct); 4388 } else { 4389 vm_exit_during_initialization("Signal chaining not allowed for VM interrupt signal, try -XX:+UseAltSigs."); 4390 } 4391 // libjsig also interposes the sigaction() call below and saves the 4392 // old sigaction on it own. 4393 } else { 4394 fatal(err_msg("Encountered unexpected pre-existing sigaction handler " 4395 "%#lx for signal %d.", (long)oldhand, sig)); 4396 } 4397 } 4398 4399 struct sigaction sigAct; 4400 sigfillset(&(sigAct.sa_mask)); 4401 sigAct.sa_handler = SIG_DFL; 4402 4403 sigAct.sa_sigaction = signalHandler; 4404 // Handle SIGSEGV on alternate signal stack if 4405 // not using stack banging 4406 if (!UseStackBanging && sig == SIGSEGV) { 4407 sigAct.sa_flags = SA_SIGINFO | SA_RESTART | SA_ONSTACK; 4408 // Interruptible i/o requires SA_RESTART cleared so EINTR 4409 // is returned instead of restarting system calls 4410 } else if (sig == os::Solaris::SIGinterrupt()) { 4411 sigemptyset(&sigAct.sa_mask); 4412 sigAct.sa_handler = NULL; 4413 sigAct.sa_flags = SA_SIGINFO; 4414 sigAct.sa_sigaction = sigINTRHandler; 4415 } else { 4416 sigAct.sa_flags = SA_SIGINFO | SA_RESTART; 4417 } 4418 os::Solaris::set_our_sigflags(sig, sigAct.sa_flags); 4419 4420 sigaction(sig, &sigAct, &oldAct); 4421 4422 void* oldhand2 = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction) 4423 : CAST_FROM_FN_PTR(void*, oldAct.sa_handler); 4424 assert(oldhand2 == oldhand, "no concurrent signal handler installation"); 4425 } 4426 4427 4428 #define DO_SIGNAL_CHECK(sig) \ 4429 if (!sigismember(&check_signal_done, sig)) \ 4430 os::Solaris::check_signal_handler(sig) 4431 4432 // This method is a periodic task to check for misbehaving JNI applications 4433 // under CheckJNI, we can add any periodic checks here 4434 4435 void os::run_periodic_checks() { 4436 // A big source of grief is hijacking virt. addr 0x0 on Solaris, 4437 // thereby preventing a NULL checks. 4438 if(!check_addr0_done) check_addr0_done = check_addr0(tty); 4439 4440 if (check_signals == false) return; 4441 4442 // SEGV and BUS if overridden could potentially prevent 4443 // generation of hs*.log in the event of a crash, debugging 4444 // such a case can be very challenging, so we absolutely 4445 // check for the following for a good measure: 4446 DO_SIGNAL_CHECK(SIGSEGV); 4447 DO_SIGNAL_CHECK(SIGILL); 4448 DO_SIGNAL_CHECK(SIGFPE); 4449 DO_SIGNAL_CHECK(SIGBUS); 4450 DO_SIGNAL_CHECK(SIGPIPE); 4451 DO_SIGNAL_CHECK(SIGXFSZ); 4452 4453 // ReduceSignalUsage allows the user to override these handlers 4454 // see comments at the very top and jvm_solaris.h 4455 if (!ReduceSignalUsage) { 4456 DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL); 4457 DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL); 4458 DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL); 4459 DO_SIGNAL_CHECK(BREAK_SIGNAL); 4460 } 4461 4462 // See comments above for using JVM1/JVM2 and UseAltSigs 4463 DO_SIGNAL_CHECK(os::Solaris::SIGinterrupt()); 4464 DO_SIGNAL_CHECK(os::Solaris::SIGasync()); 4465 4466 } 4467 4468 typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *); 4469 4470 static os_sigaction_t os_sigaction = NULL; 4471 4472 void os::Solaris::check_signal_handler(int sig) { 4473 char buf[O_BUFLEN]; 4474 address jvmHandler = NULL; 4475 4476 struct sigaction act; 4477 if (os_sigaction == NULL) { 4478 // only trust the default sigaction, in case it has been interposed 4479 os_sigaction = (os_sigaction_t)dlsym(RTLD_DEFAULT, "sigaction"); 4480 if (os_sigaction == NULL) return; 4481 } 4482 4483 os_sigaction(sig, (struct sigaction*)NULL, &act); 4484 4485 address thisHandler = (act.sa_flags & SA_SIGINFO) 4486 ? CAST_FROM_FN_PTR(address, act.sa_sigaction) 4487 : CAST_FROM_FN_PTR(address, act.sa_handler) ; 4488 4489 4490 switch(sig) { 4491 case SIGSEGV: 4492 case SIGBUS: 4493 case SIGFPE: 4494 case SIGPIPE: 4495 case SIGXFSZ: 4496 case SIGILL: 4497 jvmHandler = CAST_FROM_FN_PTR(address, signalHandler); 4498 break; 4499 4500 case SHUTDOWN1_SIGNAL: 4501 case SHUTDOWN2_SIGNAL: 4502 case SHUTDOWN3_SIGNAL: 4503 case BREAK_SIGNAL: 4504 jvmHandler = (address)user_handler(); 4505 break; 4506 4507 default: 4508 int intrsig = os::Solaris::SIGinterrupt(); 4509 int asynsig = os::Solaris::SIGasync(); 4510 4511 if (sig == intrsig) { 4512 jvmHandler = CAST_FROM_FN_PTR(address, sigINTRHandler); 4513 } else if (sig == asynsig) { 4514 jvmHandler = CAST_FROM_FN_PTR(address, signalHandler); 4515 } else { 4516 return; 4517 } 4518 break; 4519 } 4520 4521 4522 if (thisHandler != jvmHandler) { 4523 tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN)); 4524 tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN)); 4525 tty->print_cr(" found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN)); 4526 // No need to check this sig any longer 4527 sigaddset(&check_signal_done, sig); 4528 } else if(os::Solaris::get_our_sigflags(sig) != 0 && act.sa_flags != os::Solaris::get_our_sigflags(sig)) { 4529 tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN)); 4530 tty->print("expected:" PTR32_FORMAT, os::Solaris::get_our_sigflags(sig)); 4531 tty->print_cr(" found:" PTR32_FORMAT, act.sa_flags); 4532 // No need to check this sig any longer 4533 sigaddset(&check_signal_done, sig); 4534 } 4535 4536 // Print all the signal handler state 4537 if (sigismember(&check_signal_done, sig)) { 4538 print_signal_handlers(tty, buf, O_BUFLEN); 4539 } 4540 4541 } 4542 4543 void os::Solaris::install_signal_handlers() { 4544 bool libjsigdone = false; 4545 signal_handlers_are_installed = true; 4546 4547 // signal-chaining 4548 typedef void (*signal_setting_t)(); 4549 signal_setting_t begin_signal_setting = NULL; 4550 signal_setting_t end_signal_setting = NULL; 4551 begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t, 4552 dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting")); 4553 if (begin_signal_setting != NULL) { 4554 end_signal_setting = CAST_TO_FN_PTR(signal_setting_t, 4555 dlsym(RTLD_DEFAULT, "JVM_end_signal_setting")); 4556 get_signal_action = CAST_TO_FN_PTR(get_signal_t, 4557 dlsym(RTLD_DEFAULT, "JVM_get_signal_action")); 4558 get_libjsig_version = CAST_TO_FN_PTR(version_getting_t, 4559 dlsym(RTLD_DEFAULT, "JVM_get_libjsig_version")); 4560 libjsig_is_loaded = true; 4561 if (os::Solaris::get_libjsig_version != NULL) { 4562 libjsigversion = (*os::Solaris::get_libjsig_version)(); 4563 } 4564 assert(UseSignalChaining, "should enable signal-chaining"); 4565 } 4566 if (libjsig_is_loaded) { 4567 // Tell libjsig jvm is setting signal handlers 4568 (*begin_signal_setting)(); 4569 } 4570 4571 set_signal_handler(SIGSEGV, true, true); 4572 set_signal_handler(SIGPIPE, true, true); 4573 set_signal_handler(SIGXFSZ, true, true); 4574 set_signal_handler(SIGBUS, true, true); 4575 set_signal_handler(SIGILL, true, true); 4576 set_signal_handler(SIGFPE, true, true); 4577 4578 4579 if (os::Solaris::SIGinterrupt() > OLDMAXSIGNUM || os::Solaris::SIGasync() > OLDMAXSIGNUM) { 4580 4581 // Pre-1.4.1 Libjsig limited to signal chaining signals <= 32 so 4582 // can not register overridable signals which might be > 32 4583 if (libjsig_is_loaded && libjsigversion <= JSIG_VERSION_1_4_1) { 4584 // Tell libjsig jvm has finished setting signal handlers 4585 (*end_signal_setting)(); 4586 libjsigdone = true; 4587 } 4588 } 4589 4590 // Never ok to chain our SIGinterrupt 4591 set_signal_handler(os::Solaris::SIGinterrupt(), true, false); 4592 set_signal_handler(os::Solaris::SIGasync(), true, true); 4593 4594 if (libjsig_is_loaded && !libjsigdone) { 4595 // Tell libjsig jvm finishes setting signal handlers 4596 (*end_signal_setting)(); 4597 } 4598 4599 // We don't activate signal checker if libjsig is in place, we trust ourselves 4600 // and if UserSignalHandler is installed all bets are off. 4601 // Log that signal checking is off only if -verbose:jni is specified. 4602 if (CheckJNICalls) { 4603 if (libjsig_is_loaded) { 4604 if (PrintJNIResolving) { 4605 tty->print_cr("Info: libjsig is activated, all active signal checking is disabled"); 4606 } 4607 check_signals = false; 4608 } 4609 if (AllowUserSignalHandlers) { 4610 if (PrintJNIResolving) { 4611 tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled"); 4612 } 4613 check_signals = false; 4614 } 4615 } 4616 } 4617 4618 4619 void report_error(const char* file_name, int line_no, const char* title, const char* format, ...); 4620 4621 const char * signames[] = { 4622 "SIG0", 4623 "SIGHUP", "SIGINT", "SIGQUIT", "SIGILL", "SIGTRAP", 4624 "SIGABRT", "SIGEMT", "SIGFPE", "SIGKILL", "SIGBUS", 4625 "SIGSEGV", "SIGSYS", "SIGPIPE", "SIGALRM", "SIGTERM", 4626 "SIGUSR1", "SIGUSR2", "SIGCLD", "SIGPWR", "SIGWINCH", 4627 "SIGURG", "SIGPOLL", "SIGSTOP", "SIGTSTP", "SIGCONT", 4628 "SIGTTIN", "SIGTTOU", "SIGVTALRM", "SIGPROF", "SIGXCPU", 4629 "SIGXFSZ", "SIGWAITING", "SIGLWP", "SIGFREEZE", "SIGTHAW", 4630 "SIGCANCEL", "SIGLOST" 4631 }; 4632 4633 const char* os::exception_name(int exception_code, char* buf, size_t size) { 4634 if (0 < exception_code && exception_code <= SIGRTMAX) { 4635 // signal 4636 if (exception_code < sizeof(signames)/sizeof(const char*)) { 4637 jio_snprintf(buf, size, "%s", signames[exception_code]); 4638 } else { 4639 jio_snprintf(buf, size, "SIG%d", exception_code); 4640 } 4641 return buf; 4642 } else { 4643 return NULL; 4644 } 4645 } 4646 4647 // (Static) wrappers for the new libthread API 4648 int_fnP_thread_t_iP_uP_stack_tP_gregset_t os::Solaris::_thr_getstate; 4649 int_fnP_thread_t_i_gregset_t os::Solaris::_thr_setstate; 4650 int_fnP_thread_t_i os::Solaris::_thr_setmutator; 4651 int_fnP_thread_t os::Solaris::_thr_suspend_mutator; 4652 int_fnP_thread_t os::Solaris::_thr_continue_mutator; 4653 4654 // (Static) wrapper for getisax(2) call. 4655 os::Solaris::getisax_func_t os::Solaris::_getisax = 0; 4656 4657 // (Static) wrappers for the liblgrp API 4658 os::Solaris::lgrp_home_func_t os::Solaris::_lgrp_home; 4659 os::Solaris::lgrp_init_func_t os::Solaris::_lgrp_init; 4660 os::Solaris::lgrp_fini_func_t os::Solaris::_lgrp_fini; 4661 os::Solaris::lgrp_root_func_t os::Solaris::_lgrp_root; 4662 os::Solaris::lgrp_children_func_t os::Solaris::_lgrp_children; 4663 os::Solaris::lgrp_resources_func_t os::Solaris::_lgrp_resources; 4664 os::Solaris::lgrp_nlgrps_func_t os::Solaris::_lgrp_nlgrps; 4665 os::Solaris::lgrp_cookie_stale_func_t os::Solaris::_lgrp_cookie_stale; 4666 os::Solaris::lgrp_cookie_t os::Solaris::_lgrp_cookie = 0; 4667 4668 // (Static) wrapper for meminfo() call. 4669 os::Solaris::meminfo_func_t os::Solaris::_meminfo = 0; 4670 4671 static address resolve_symbol_lazy(const char* name) { 4672 address addr = (address) dlsym(RTLD_DEFAULT, name); 4673 if(addr == NULL) { 4674 // RTLD_DEFAULT was not defined on some early versions of 2.5.1 4675 addr = (address) dlsym(RTLD_NEXT, name); 4676 } 4677 return addr; 4678 } 4679 4680 static address resolve_symbol(const char* name) { 4681 address addr = resolve_symbol_lazy(name); 4682 if(addr == NULL) { 4683 fatal(dlerror()); 4684 } 4685 return addr; 4686 } 4687 4688 4689 4690 // isT2_libthread() 4691 // 4692 // Routine to determine if we are currently using the new T2 libthread. 4693 // 4694 // We determine if we are using T2 by reading /proc/self/lstatus and 4695 // looking for a thread with the ASLWP bit set. If we find this status 4696 // bit set, we must assume that we are NOT using T2. The T2 team 4697 // has approved this algorithm. 4698 // 4699 // We need to determine if we are running with the new T2 libthread 4700 // since setting native thread priorities is handled differently 4701 // when using this library. All threads created using T2 are bound 4702 // threads. Calling thr_setprio is meaningless in this case. 4703 // 4704 bool isT2_libthread() { 4705 static prheader_t * lwpArray = NULL; 4706 static int lwpSize = 0; 4707 static int lwpFile = -1; 4708 lwpstatus_t * that; 4709 char lwpName [128]; 4710 bool isT2 = false; 4711 4712 #define ADR(x) ((uintptr_t)(x)) 4713 #define LWPINDEX(ary,ix) ((lwpstatus_t *)(((ary)->pr_entsize * (ix)) + (ADR((ary) + 1)))) 4714 4715 lwpFile = ::open("/proc/self/lstatus", O_RDONLY, 0); 4716 if (lwpFile < 0) { 4717 if (ThreadPriorityVerbose) warning ("Couldn't open /proc/self/lstatus\n"); 4718 return false; 4719 } 4720 lwpSize = 16*1024; 4721 for (;;) { 4722 ::lseek64 (lwpFile, 0, SEEK_SET); 4723 lwpArray = (prheader_t *)NEW_C_HEAP_ARRAY(char, lwpSize); 4724 if (::read(lwpFile, lwpArray, lwpSize) < 0) { 4725 if (ThreadPriorityVerbose) warning("Error reading /proc/self/lstatus\n"); 4726 break; 4727 } 4728 if ((lwpArray->pr_nent * lwpArray->pr_entsize) <= lwpSize) { 4729 // We got a good snapshot - now iterate over the list. 4730 int aslwpcount = 0; 4731 for (int i = 0; i < lwpArray->pr_nent; i++ ) { 4732 that = LWPINDEX(lwpArray,i); 4733 if (that->pr_flags & PR_ASLWP) { 4734 aslwpcount++; 4735 } 4736 } 4737 if (aslwpcount == 0) isT2 = true; 4738 break; 4739 } 4740 lwpSize = lwpArray->pr_nent * lwpArray->pr_entsize; 4741 FREE_C_HEAP_ARRAY(char, lwpArray); // retry. 4742 } 4743 4744 FREE_C_HEAP_ARRAY(char, lwpArray); 4745 ::close (lwpFile); 4746 if (ThreadPriorityVerbose) { 4747 if (isT2) tty->print_cr("We are running with a T2 libthread\n"); 4748 else tty->print_cr("We are not running with a T2 libthread\n"); 4749 } 4750 return isT2; 4751 } 4752 4753 4754 void os::Solaris::libthread_init() { 4755 address func = (address)dlsym(RTLD_DEFAULT, "_thr_suspend_allmutators"); 4756 4757 // Determine if we are running with the new T2 libthread 4758 os::Solaris::set_T2_libthread(isT2_libthread()); 4759 4760 lwp_priocntl_init(); 4761 4762 // RTLD_DEFAULT was not defined on some early versions of 5.5.1 4763 if(func == NULL) { 4764 func = (address) dlsym(RTLD_NEXT, "_thr_suspend_allmutators"); 4765 // Guarantee that this VM is running on an new enough OS (5.6 or 4766 // later) that it will have a new enough libthread.so. 4767 guarantee(func != NULL, "libthread.so is too old."); 4768 } 4769 4770 // Initialize the new libthread getstate API wrappers 4771 func = resolve_symbol("thr_getstate"); 4772 os::Solaris::set_thr_getstate(CAST_TO_FN_PTR(int_fnP_thread_t_iP_uP_stack_tP_gregset_t, func)); 4773 4774 func = resolve_symbol("thr_setstate"); 4775 os::Solaris::set_thr_setstate(CAST_TO_FN_PTR(int_fnP_thread_t_i_gregset_t, func)); 4776 4777 func = resolve_symbol("thr_setmutator"); 4778 os::Solaris::set_thr_setmutator(CAST_TO_FN_PTR(int_fnP_thread_t_i, func)); 4779 4780 func = resolve_symbol("thr_suspend_mutator"); 4781 os::Solaris::set_thr_suspend_mutator(CAST_TO_FN_PTR(int_fnP_thread_t, func)); 4782 4783 func = resolve_symbol("thr_continue_mutator"); 4784 os::Solaris::set_thr_continue_mutator(CAST_TO_FN_PTR(int_fnP_thread_t, func)); 4785 4786 int size; 4787 void (*handler_info_func)(address *, int *); 4788 handler_info_func = CAST_TO_FN_PTR(void (*)(address *, int *), resolve_symbol("thr_sighndlrinfo")); 4789 handler_info_func(&handler_start, &size); 4790 handler_end = handler_start + size; 4791 } 4792 4793 4794 int_fnP_mutex_tP os::Solaris::_mutex_lock; 4795 int_fnP_mutex_tP os::Solaris::_mutex_trylock; 4796 int_fnP_mutex_tP os::Solaris::_mutex_unlock; 4797 int_fnP_mutex_tP_i_vP os::Solaris::_mutex_init; 4798 int_fnP_mutex_tP os::Solaris::_mutex_destroy; 4799 int os::Solaris::_mutex_scope = USYNC_THREAD; 4800 4801 int_fnP_cond_tP_mutex_tP_timestruc_tP os::Solaris::_cond_timedwait; 4802 int_fnP_cond_tP_mutex_tP os::Solaris::_cond_wait; 4803 int_fnP_cond_tP os::Solaris::_cond_signal; 4804 int_fnP_cond_tP os::Solaris::_cond_broadcast; 4805 int_fnP_cond_tP_i_vP os::Solaris::_cond_init; 4806 int_fnP_cond_tP os::Solaris::_cond_destroy; 4807 int os::Solaris::_cond_scope = USYNC_THREAD; 4808 4809 void os::Solaris::synchronization_init() { 4810 if(UseLWPSynchronization) { 4811 os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_lock"))); 4812 os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_trylock"))); 4813 os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_unlock"))); 4814 os::Solaris::set_mutex_init(lwp_mutex_init); 4815 os::Solaris::set_mutex_destroy(lwp_mutex_destroy); 4816 os::Solaris::set_mutex_scope(USYNC_THREAD); 4817 4818 os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("_lwp_cond_timedwait"))); 4819 os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("_lwp_cond_wait"))); 4820 os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_signal"))); 4821 os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_broadcast"))); 4822 os::Solaris::set_cond_init(lwp_cond_init); 4823 os::Solaris::set_cond_destroy(lwp_cond_destroy); 4824 os::Solaris::set_cond_scope(USYNC_THREAD); 4825 } 4826 else { 4827 os::Solaris::set_mutex_scope(USYNC_THREAD); 4828 os::Solaris::set_cond_scope(USYNC_THREAD); 4829 4830 if(UsePthreads) { 4831 os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_lock"))); 4832 os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_trylock"))); 4833 os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_unlock"))); 4834 os::Solaris::set_mutex_init(pthread_mutex_default_init); 4835 os::Solaris::set_mutex_destroy(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_destroy"))); 4836 4837 os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("pthread_cond_timedwait"))); 4838 os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("pthread_cond_wait"))); 4839 os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_signal"))); 4840 os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_broadcast"))); 4841 os::Solaris::set_cond_init(pthread_cond_default_init); 4842 os::Solaris::set_cond_destroy(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_destroy"))); 4843 } 4844 else { 4845 os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_lock"))); 4846 os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_trylock"))); 4847 os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_unlock"))); 4848 os::Solaris::set_mutex_init(::mutex_init); 4849 os::Solaris::set_mutex_destroy(::mutex_destroy); 4850 4851 os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("cond_timedwait"))); 4852 os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("cond_wait"))); 4853 os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_signal"))); 4854 os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_broadcast"))); 4855 os::Solaris::set_cond_init(::cond_init); 4856 os::Solaris::set_cond_destroy(::cond_destroy); 4857 } 4858 } 4859 } 4860 4861 bool os::Solaris::liblgrp_init() { 4862 void *handle = dlopen("liblgrp.so.1", RTLD_LAZY); 4863 if (handle != NULL) { 4864 os::Solaris::set_lgrp_home(CAST_TO_FN_PTR(lgrp_home_func_t, dlsym(handle, "lgrp_home"))); 4865 os::Solaris::set_lgrp_init(CAST_TO_FN_PTR(lgrp_init_func_t, dlsym(handle, "lgrp_init"))); 4866 os::Solaris::set_lgrp_fini(CAST_TO_FN_PTR(lgrp_fini_func_t, dlsym(handle, "lgrp_fini"))); 4867 os::Solaris::set_lgrp_root(CAST_TO_FN_PTR(lgrp_root_func_t, dlsym(handle, "lgrp_root"))); 4868 os::Solaris::set_lgrp_children(CAST_TO_FN_PTR(lgrp_children_func_t, dlsym(handle, "lgrp_children"))); 4869 os::Solaris::set_lgrp_resources(CAST_TO_FN_PTR(lgrp_resources_func_t, dlsym(handle, "lgrp_resources"))); 4870 os::Solaris::set_lgrp_nlgrps(CAST_TO_FN_PTR(lgrp_nlgrps_func_t, dlsym(handle, "lgrp_nlgrps"))); 4871 os::Solaris::set_lgrp_cookie_stale(CAST_TO_FN_PTR(lgrp_cookie_stale_func_t, 4872 dlsym(handle, "lgrp_cookie_stale"))); 4873 4874 lgrp_cookie_t c = lgrp_init(LGRP_VIEW_CALLER); 4875 set_lgrp_cookie(c); 4876 return true; 4877 } 4878 return false; 4879 } 4880 4881 void os::Solaris::misc_sym_init() { 4882 address func; 4883 4884 // getisax 4885 func = resolve_symbol_lazy("getisax"); 4886 if (func != NULL) { 4887 os::Solaris::_getisax = CAST_TO_FN_PTR(getisax_func_t, func); 4888 } 4889 4890 // meminfo 4891 func = resolve_symbol_lazy("meminfo"); 4892 if (func != NULL) { 4893 os::Solaris::set_meminfo(CAST_TO_FN_PTR(meminfo_func_t, func)); 4894 } 4895 } 4896 4897 uint_t os::Solaris::getisax(uint32_t* array, uint_t n) { 4898 assert(_getisax != NULL, "_getisax not set"); 4899 return _getisax(array, n); 4900 } 4901 4902 // Symbol doesn't exist in Solaris 8 pset.h 4903 #ifndef PS_MYID 4904 #define PS_MYID -3 4905 #endif 4906 4907 // int pset_getloadavg(psetid_t pset, double loadavg[], int nelem); 4908 typedef long (*pset_getloadavg_type)(psetid_t pset, double loadavg[], int nelem); 4909 static pset_getloadavg_type pset_getloadavg_ptr = NULL; 4910 4911 void init_pset_getloadavg_ptr(void) { 4912 pset_getloadavg_ptr = 4913 (pset_getloadavg_type)dlsym(RTLD_DEFAULT, "pset_getloadavg"); 4914 if (PrintMiscellaneous && Verbose && pset_getloadavg_ptr == NULL) { 4915 warning("pset_getloadavg function not found"); 4916 } 4917 } 4918 4919 int os::Solaris::_dev_zero_fd = -1; 4920 4921 // this is called _before_ the global arguments have been parsed 4922 void os::init(void) { 4923 _initial_pid = getpid(); 4924 4925 max_hrtime = first_hrtime = gethrtime(); 4926 4927 init_random(1234567); 4928 4929 page_size = sysconf(_SC_PAGESIZE); 4930 if (page_size == -1) 4931 fatal(err_msg("os_solaris.cpp: os::init: sysconf failed (%s)", 4932 strerror(errno))); 4933 init_page_sizes((size_t) page_size); 4934 4935 Solaris::initialize_system_info(); 4936 4937 // Initialize misc. symbols as soon as possible, so we can use them 4938 // if we need them. 4939 Solaris::misc_sym_init(); 4940 4941 int fd = ::open("/dev/zero", O_RDWR); 4942 if (fd < 0) { 4943 fatal(err_msg("os::init: cannot open /dev/zero (%s)", strerror(errno))); 4944 } else { 4945 Solaris::set_dev_zero_fd(fd); 4946 4947 // Close on exec, child won't inherit. 4948 fcntl(fd, F_SETFD, FD_CLOEXEC); 4949 } 4950 4951 clock_tics_per_sec = CLK_TCK; 4952 4953 // check if dladdr1() exists; dladdr1 can provide more information than 4954 // dladdr for os::dll_address_to_function_name. It comes with SunOS 5.9 4955 // and is available on linker patches for 5.7 and 5.8. 4956 // libdl.so must have been loaded, this call is just an entry lookup 4957 void * hdl = dlopen("libdl.so", RTLD_NOW); 4958 if (hdl) 4959 dladdr1_func = CAST_TO_FN_PTR(dladdr1_func_type, dlsym(hdl, "dladdr1")); 4960 4961 // (Solaris only) this switches to calls that actually do locking. 4962 ThreadCritical::initialize(); 4963 4964 main_thread = thr_self(); 4965 4966 // Constant minimum stack size allowed. It must be at least 4967 // the minimum of what the OS supports (thr_min_stack()), and 4968 // enough to allow the thread to get to user bytecode execution. 4969 Solaris::min_stack_allowed = MAX2(thr_min_stack(), Solaris::min_stack_allowed); 4970 // If the pagesize of the VM is greater than 8K determine the appropriate 4971 // number of initial guard pages. The user can change this with the 4972 // command line arguments, if needed. 4973 if (vm_page_size() > 8*K) { 4974 StackYellowPages = 1; 4975 StackRedPages = 1; 4976 StackShadowPages = round_to((StackShadowPages*8*K), vm_page_size()) / vm_page_size(); 4977 } 4978 } 4979 4980 // To install functions for atexit system call 4981 extern "C" { 4982 static void perfMemory_exit_helper() { 4983 perfMemory_exit(); 4984 } 4985 } 4986 4987 // this is called _after_ the global arguments have been parsed 4988 jint os::init_2(void) { 4989 // try to enable extended file IO ASAP, see 6431278 4990 os::Solaris::try_enable_extended_io(); 4991 4992 // Allocate a single page and mark it as readable for safepoint polling. Also 4993 // use this first mmap call to check support for MAP_ALIGN. 4994 address polling_page = (address)Solaris::mmap_chunk((char*)page_size, 4995 page_size, 4996 MAP_PRIVATE | MAP_ALIGN, 4997 PROT_READ); 4998 if (polling_page == NULL) { 4999 has_map_align = false; 5000 polling_page = (address)Solaris::mmap_chunk(NULL, page_size, MAP_PRIVATE, 5001 PROT_READ); 5002 } 5003 5004 os::set_polling_page(polling_page); 5005 5006 #ifndef PRODUCT 5007 if( Verbose && PrintMiscellaneous ) 5008 tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n", (intptr_t)polling_page); 5009 #endif 5010 5011 if (!UseMembar) { 5012 address mem_serialize_page = (address)Solaris::mmap_chunk( NULL, page_size, MAP_PRIVATE, PROT_READ | PROT_WRITE ); 5013 guarantee( mem_serialize_page != NULL, "mmap Failed for memory serialize page"); 5014 os::set_memory_serialize_page( mem_serialize_page ); 5015 5016 #ifndef PRODUCT 5017 if(Verbose && PrintMiscellaneous) 5018 tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page); 5019 #endif 5020 } 5021 5022 os::large_page_init(); 5023 5024 // Check minimum allowable stack size for thread creation and to initialize 5025 // the java system classes, including StackOverflowError - depends on page 5026 // size. Add a page for compiler2 recursion in main thread. 5027 // Add in 2*BytesPerWord times page size to account for VM stack during 5028 // class initialization depending on 32 or 64 bit VM. 5029 os::Solaris::min_stack_allowed = MAX2(os::Solaris::min_stack_allowed, 5030 (size_t)(StackYellowPages+StackRedPages+StackShadowPages+ 5031 2*BytesPerWord COMPILER2_PRESENT(+1)) * page_size); 5032 5033 size_t threadStackSizeInBytes = ThreadStackSize * K; 5034 if (threadStackSizeInBytes != 0 && 5035 threadStackSizeInBytes < os::Solaris::min_stack_allowed) { 5036 tty->print_cr("\nThe stack size specified is too small, Specify at least %dk", 5037 os::Solaris::min_stack_allowed/K); 5038 return JNI_ERR; 5039 } 5040 5041 // For 64kbps there will be a 64kb page size, which makes 5042 // the usable default stack size quite a bit less. Increase the 5043 // stack for 64kb (or any > than 8kb) pages, this increases 5044 // virtual memory fragmentation (since we're not creating the 5045 // stack on a power of 2 boundary. The real fix for this 5046 // should be to fix the guard page mechanism. 5047 5048 if (vm_page_size() > 8*K) { 5049 threadStackSizeInBytes = (threadStackSizeInBytes != 0) 5050 ? threadStackSizeInBytes + 5051 ((StackYellowPages + StackRedPages) * vm_page_size()) 5052 : 0; 5053 ThreadStackSize = threadStackSizeInBytes/K; 5054 } 5055 5056 // Make the stack size a multiple of the page size so that 5057 // the yellow/red zones can be guarded. 5058 JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes, 5059 vm_page_size())); 5060 5061 Solaris::libthread_init(); 5062 5063 if (UseNUMA) { 5064 if (!Solaris::liblgrp_init()) { 5065 UseNUMA = false; 5066 } else { 5067 size_t lgrp_limit = os::numa_get_groups_num(); 5068 int *lgrp_ids = NEW_C_HEAP_ARRAY(int, lgrp_limit); 5069 size_t lgrp_num = os::numa_get_leaf_groups(lgrp_ids, lgrp_limit); 5070 FREE_C_HEAP_ARRAY(int, lgrp_ids); 5071 if (lgrp_num < 2) { 5072 // There's only one locality group, disable NUMA. 5073 UseNUMA = false; 5074 } 5075 } 5076 // ISM is not compatible with the NUMA allocator - it always allocates 5077 // pages round-robin across the lgroups. 5078 if (UseNUMA && UseLargePages && UseISM) { 5079 if (!FLAG_IS_DEFAULT(UseNUMA)) { 5080 if (FLAG_IS_DEFAULT(UseLargePages) && FLAG_IS_DEFAULT(UseISM)) { 5081 UseLargePages = false; 5082 } else { 5083 warning("UseNUMA is not compatible with ISM large pages, disabling NUMA allocator"); 5084 UseNUMA = false; 5085 } 5086 } else { 5087 UseNUMA = false; 5088 } 5089 } 5090 if (!UseNUMA && ForceNUMA) { 5091 UseNUMA = true; 5092 } 5093 } 5094 5095 Solaris::signal_sets_init(); 5096 Solaris::init_signal_mem(); 5097 Solaris::install_signal_handlers(); 5098 5099 if (libjsigversion < JSIG_VERSION_1_4_1) { 5100 Maxlibjsigsigs = OLDMAXSIGNUM; 5101 } 5102 5103 // initialize synchronization primitives to use either thread or 5104 // lwp synchronization (controlled by UseLWPSynchronization) 5105 Solaris::synchronization_init(); 5106 5107 if (MaxFDLimit) { 5108 // set the number of file descriptors to max. print out error 5109 // if getrlimit/setrlimit fails but continue regardless. 5110 struct rlimit nbr_files; 5111 int status = getrlimit(RLIMIT_NOFILE, &nbr_files); 5112 if (status != 0) { 5113 if (PrintMiscellaneous && (Verbose || WizardMode)) 5114 perror("os::init_2 getrlimit failed"); 5115 } else { 5116 nbr_files.rlim_cur = nbr_files.rlim_max; 5117 status = setrlimit(RLIMIT_NOFILE, &nbr_files); 5118 if (status != 0) { 5119 if (PrintMiscellaneous && (Verbose || WizardMode)) 5120 perror("os::init_2 setrlimit failed"); 5121 } 5122 } 5123 } 5124 5125 // Calculate theoretical max. size of Threads to guard gainst 5126 // artifical out-of-memory situations, where all available address- 5127 // space has been reserved by thread stacks. Default stack size is 1Mb. 5128 size_t pre_thread_stack_size = (JavaThread::stack_size_at_create()) ? 5129 JavaThread::stack_size_at_create() : (1*K*K); 5130 assert(pre_thread_stack_size != 0, "Must have a stack"); 5131 // Solaris has a maximum of 4Gb of user programs. Calculate the thread limit when 5132 // we should start doing Virtual Memory banging. Currently when the threads will 5133 // have used all but 200Mb of space. 5134 size_t max_address_space = ((unsigned int)4 * K * K * K) - (200 * K * K); 5135 Solaris::_os_thread_limit = max_address_space / pre_thread_stack_size; 5136 5137 // at-exit methods are called in the reverse order of their registration. 5138 // In Solaris 7 and earlier, atexit functions are called on return from 5139 // main or as a result of a call to exit(3C). There can be only 32 of 5140 // these functions registered and atexit() does not set errno. In Solaris 5141 // 8 and later, there is no limit to the number of functions registered 5142 // and atexit() sets errno. In addition, in Solaris 8 and later, atexit 5143 // functions are called upon dlclose(3DL) in addition to return from main 5144 // and exit(3C). 5145 5146 if (PerfAllowAtExitRegistration) { 5147 // only register atexit functions if PerfAllowAtExitRegistration is set. 5148 // atexit functions can be delayed until process exit time, which 5149 // can be problematic for embedded VM situations. Embedded VMs should 5150 // call DestroyJavaVM() to assure that VM resources are released. 5151 5152 // note: perfMemory_exit_helper atexit function may be removed in 5153 // the future if the appropriate cleanup code can be added to the 5154 // VM_Exit VMOperation's doit method. 5155 if (atexit(perfMemory_exit_helper) != 0) { 5156 warning("os::init2 atexit(perfMemory_exit_helper) failed"); 5157 } 5158 } 5159 5160 // Init pset_loadavg function pointer 5161 init_pset_getloadavg_ptr(); 5162 5163 return JNI_OK; 5164 } 5165 5166 void os::init_3(void) { 5167 return; 5168 } 5169 5170 // Mark the polling page as unreadable 5171 void os::make_polling_page_unreadable(void) { 5172 if( mprotect((char *)_polling_page, page_size, PROT_NONE) != 0 ) 5173 fatal("Could not disable polling page"); 5174 }; 5175 5176 // Mark the polling page as readable 5177 void os::make_polling_page_readable(void) { 5178 if( mprotect((char *)_polling_page, page_size, PROT_READ) != 0 ) 5179 fatal("Could not enable polling page"); 5180 }; 5181 5182 // OS interface. 5183 5184 bool os::check_heap(bool force) { return true; } 5185 5186 typedef int (*vsnprintf_t)(char* buf, size_t count, const char* fmt, va_list argptr); 5187 static vsnprintf_t sol_vsnprintf = NULL; 5188 5189 int local_vsnprintf(char* buf, size_t count, const char* fmt, va_list argptr) { 5190 if (!sol_vsnprintf) { 5191 //search for the named symbol in the objects that were loaded after libjvm 5192 void* where = RTLD_NEXT; 5193 if ((sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "__vsnprintf"))) == NULL) 5194 sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "vsnprintf")); 5195 if (!sol_vsnprintf){ 5196 //search for the named symbol in the objects that were loaded before libjvm 5197 where = RTLD_DEFAULT; 5198 if ((sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "__vsnprintf"))) == NULL) 5199 sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "vsnprintf")); 5200 assert(sol_vsnprintf != NULL, "vsnprintf not found"); 5201 } 5202 } 5203 return (*sol_vsnprintf)(buf, count, fmt, argptr); 5204 } 5205 5206 5207 // Is a (classpath) directory empty? 5208 bool os::dir_is_empty(const char* path) { 5209 DIR *dir = NULL; 5210 struct dirent *ptr; 5211 5212 dir = opendir(path); 5213 if (dir == NULL) return true; 5214 5215 /* Scan the directory */ 5216 bool result = true; 5217 char buf[sizeof(struct dirent) + MAX_PATH]; 5218 struct dirent *dbuf = (struct dirent *) buf; 5219 while (result && (ptr = readdir(dir, dbuf)) != NULL) { 5220 if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) { 5221 result = false; 5222 } 5223 } 5224 closedir(dir); 5225 return result; 5226 } 5227 5228 // This code originates from JDK's sysOpen and open64_w 5229 // from src/solaris/hpi/src/system_md.c 5230 5231 #ifndef O_DELETE 5232 #define O_DELETE 0x10000 5233 #endif 5234 5235 // Open a file. Unlink the file immediately after open returns 5236 // if the specified oflag has the O_DELETE flag set. 5237 // O_DELETE is used only in j2se/src/share/native/java/util/zip/ZipFile.c 5238 5239 int os::open(const char *path, int oflag, int mode) { 5240 if (strlen(path) > MAX_PATH - 1) { 5241 errno = ENAMETOOLONG; 5242 return -1; 5243 } 5244 int fd; 5245 int o_delete = (oflag & O_DELETE); 5246 oflag = oflag & ~O_DELETE; 5247 5248 fd = ::open64(path, oflag, mode); 5249 if (fd == -1) return -1; 5250 5251 //If the open succeeded, the file might still be a directory 5252 { 5253 struct stat64 buf64; 5254 int ret = ::fstat64(fd, &buf64); 5255 int st_mode = buf64.st_mode; 5256 5257 if (ret != -1) { 5258 if ((st_mode & S_IFMT) == S_IFDIR) { 5259 errno = EISDIR; 5260 ::close(fd); 5261 return -1; 5262 } 5263 } else { 5264 ::close(fd); 5265 return -1; 5266 } 5267 } 5268 /* 5269 * 32-bit Solaris systems suffer from: 5270 * 5271 * - an historical default soft limit of 256 per-process file 5272 * descriptors that is too low for many Java programs. 5273 * 5274 * - a design flaw where file descriptors created using stdio 5275 * fopen must be less than 256, _even_ when the first limit above 5276 * has been raised. This can cause calls to fopen (but not calls to 5277 * open, for example) to fail mysteriously, perhaps in 3rd party 5278 * native code (although the JDK itself uses fopen). One can hardly 5279 * criticize them for using this most standard of all functions. 5280 * 5281 * We attempt to make everything work anyways by: 5282 * 5283 * - raising the soft limit on per-process file descriptors beyond 5284 * 256 5285 * 5286 * - As of Solaris 10u4, we can request that Solaris raise the 256 5287 * stdio fopen limit by calling function enable_extended_FILE_stdio. 5288 * This is done in init_2 and recorded in enabled_extended_FILE_stdio 5289 * 5290 * - If we are stuck on an old (pre 10u4) Solaris system, we can 5291 * workaround the bug by remapping non-stdio file descriptors below 5292 * 256 to ones beyond 256, which is done below. 5293 * 5294 * See: 5295 * 1085341: 32-bit stdio routines should support file descriptors >255 5296 * 6533291: Work around 32-bit Solaris stdio limit of 256 open files 5297 * 6431278: Netbeans crash on 32 bit Solaris: need to call 5298 * enable_extended_FILE_stdio() in VM initialisation 5299 * Giri Mandalika's blog 5300 * http://technopark02.blogspot.com/2005_05_01_archive.html 5301 */ 5302 #ifndef _LP64 5303 if ((!enabled_extended_FILE_stdio) && fd < 256) { 5304 int newfd = ::fcntl(fd, F_DUPFD, 256); 5305 if (newfd != -1) { 5306 ::close(fd); 5307 fd = newfd; 5308 } 5309 } 5310 #endif // 32-bit Solaris 5311 /* 5312 * All file descriptors that are opened in the JVM and not 5313 * specifically destined for a subprocess should have the 5314 * close-on-exec flag set. If we don't set it, then careless 3rd 5315 * party native code might fork and exec without closing all 5316 * appropriate file descriptors (e.g. as we do in closeDescriptors in 5317 * UNIXProcess.c), and this in turn might: 5318 * 5319 * - cause end-of-file to fail to be detected on some file 5320 * descriptors, resulting in mysterious hangs, or 5321 * 5322 * - might cause an fopen in the subprocess to fail on a system 5323 * suffering from bug 1085341. 5324 * 5325 * (Yes, the default setting of the close-on-exec flag is a Unix 5326 * design flaw) 5327 * 5328 * See: 5329 * 1085341: 32-bit stdio routines should support file descriptors >255 5330 * 4843136: (process) pipe file descriptor from Runtime.exec not being closed 5331 * 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9 5332 */ 5333 #ifdef FD_CLOEXEC 5334 { 5335 int flags = ::fcntl(fd, F_GETFD); 5336 if (flags != -1) 5337 ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC); 5338 } 5339 #endif 5340 5341 if (o_delete != 0) { 5342 ::unlink(path); 5343 } 5344 return fd; 5345 } 5346 5347 // create binary file, rewriting existing file if required 5348 int os::create_binary_file(const char* path, bool rewrite_existing) { 5349 int oflags = O_WRONLY | O_CREAT; 5350 if (!rewrite_existing) { 5351 oflags |= O_EXCL; 5352 } 5353 return ::open64(path, oflags, S_IREAD | S_IWRITE); 5354 } 5355 5356 // return current position of file pointer 5357 jlong os::current_file_offset(int fd) { 5358 return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR); 5359 } 5360 5361 // move file pointer to the specified offset 5362 jlong os::seek_to_file_offset(int fd, jlong offset) { 5363 return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET); 5364 } 5365 5366 jlong os::lseek(int fd, jlong offset, int whence) { 5367 return (jlong) ::lseek64(fd, offset, whence); 5368 } 5369 5370 char * os::native_path(char *path) { 5371 return path; 5372 } 5373 5374 int os::ftruncate(int fd, jlong length) { 5375 return ::ftruncate64(fd, length); 5376 } 5377 5378 int os::fsync(int fd) { 5379 RESTARTABLE_RETURN_INT(::fsync(fd)); 5380 } 5381 5382 int os::available(int fd, jlong *bytes) { 5383 jlong cur, end; 5384 int mode; 5385 struct stat64 buf64; 5386 5387 if (::fstat64(fd, &buf64) >= 0) { 5388 mode = buf64.st_mode; 5389 if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) { 5390 /* 5391 * XXX: is the following call interruptible? If so, this might 5392 * need to go through the INTERRUPT_IO() wrapper as for other 5393 * blocking, interruptible calls in this file. 5394 */ 5395 int n,ioctl_return; 5396 5397 INTERRUPTIBLE(::ioctl(fd, FIONREAD, &n),ioctl_return,os::Solaris::clear_interrupted); 5398 if (ioctl_return>= 0) { 5399 *bytes = n; 5400 return 1; 5401 } 5402 } 5403 } 5404 if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) { 5405 return 0; 5406 } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) { 5407 return 0; 5408 } else if (::lseek64(fd, cur, SEEK_SET) == -1) { 5409 return 0; 5410 } 5411 *bytes = end - cur; 5412 return 1; 5413 } 5414 5415 // Map a block of memory. 5416 char* os::map_memory(int fd, const char* file_name, size_t file_offset, 5417 char *addr, size_t bytes, bool read_only, 5418 bool allow_exec) { 5419 int prot; 5420 int flags; 5421 5422 if (read_only) { 5423 prot = PROT_READ; 5424 flags = MAP_SHARED; 5425 } else { 5426 prot = PROT_READ | PROT_WRITE; 5427 flags = MAP_PRIVATE; 5428 } 5429 5430 if (allow_exec) { 5431 prot |= PROT_EXEC; 5432 } 5433 5434 if (addr != NULL) { 5435 flags |= MAP_FIXED; 5436 } 5437 5438 char* mapped_address = (char*)mmap(addr, (size_t)bytes, prot, flags, 5439 fd, file_offset); 5440 if (mapped_address == MAP_FAILED) { 5441 return NULL; 5442 } 5443 return mapped_address; 5444 } 5445 5446 5447 // Remap a block of memory. 5448 char* os::remap_memory(int fd, const char* file_name, size_t file_offset, 5449 char *addr, size_t bytes, bool read_only, 5450 bool allow_exec) { 5451 // same as map_memory() on this OS 5452 return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only, 5453 allow_exec); 5454 } 5455 5456 5457 // Unmap a block of memory. 5458 bool os::unmap_memory(char* addr, size_t bytes) { 5459 return munmap(addr, bytes) == 0; 5460 } 5461 5462 void os::pause() { 5463 char filename[MAX_PATH]; 5464 if (PauseAtStartupFile && PauseAtStartupFile[0]) { 5465 jio_snprintf(filename, MAX_PATH, PauseAtStartupFile); 5466 } else { 5467 jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id()); 5468 } 5469 5470 int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666); 5471 if (fd != -1) { 5472 struct stat buf; 5473 ::close(fd); 5474 while (::stat(filename, &buf) == 0) { 5475 (void)::poll(NULL, 0, 100); 5476 } 5477 } else { 5478 jio_fprintf(stderr, 5479 "Could not open pause file '%s', continuing immediately.\n", filename); 5480 } 5481 } 5482 5483 #ifndef PRODUCT 5484 #ifdef INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS 5485 // Turn this on if you need to trace synch operations. 5486 // Set RECORD_SYNCH_LIMIT to a large-enough value, 5487 // and call record_synch_enable and record_synch_disable 5488 // around the computation of interest. 5489 5490 void record_synch(char* name, bool returning); // defined below 5491 5492 class RecordSynch { 5493 char* _name; 5494 public: 5495 RecordSynch(char* name) :_name(name) 5496 { record_synch(_name, false); } 5497 ~RecordSynch() { record_synch(_name, true); } 5498 }; 5499 5500 #define CHECK_SYNCH_OP(ret, name, params, args, inner) \ 5501 extern "C" ret name params { \ 5502 typedef ret name##_t params; \ 5503 static name##_t* implem = NULL; \ 5504 static int callcount = 0; \ 5505 if (implem == NULL) { \ 5506 implem = (name##_t*) dlsym(RTLD_NEXT, #name); \ 5507 if (implem == NULL) fatal(dlerror()); \ 5508 } \ 5509 ++callcount; \ 5510 RecordSynch _rs(#name); \ 5511 inner; \ 5512 return implem args; \ 5513 } 5514 // in dbx, examine callcounts this way: 5515 // for n in $(eval whereis callcount | awk '{print $2}'); do print $n; done 5516 5517 #define CHECK_POINTER_OK(p) \ 5518 (Universe::perm_gen() == NULL || !Universe::is_reserved_heap((oop)(p))) 5519 #define CHECK_MU \ 5520 if (!CHECK_POINTER_OK(mu)) fatal("Mutex must be in C heap only."); 5521 #define CHECK_CV \ 5522 if (!CHECK_POINTER_OK(cv)) fatal("Condvar must be in C heap only."); 5523 #define CHECK_P(p) \ 5524 if (!CHECK_POINTER_OK(p)) fatal(false, "Pointer must be in C heap only."); 5525 5526 #define CHECK_MUTEX(mutex_op) \ 5527 CHECK_SYNCH_OP(int, mutex_op, (mutex_t *mu), (mu), CHECK_MU); 5528 5529 CHECK_MUTEX( mutex_lock) 5530 CHECK_MUTEX( _mutex_lock) 5531 CHECK_MUTEX( mutex_unlock) 5532 CHECK_MUTEX(_mutex_unlock) 5533 CHECK_MUTEX( mutex_trylock) 5534 CHECK_MUTEX(_mutex_trylock) 5535 5536 #define CHECK_COND(cond_op) \ 5537 CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu), (cv, mu), CHECK_MU;CHECK_CV); 5538 5539 CHECK_COND( cond_wait); 5540 CHECK_COND(_cond_wait); 5541 CHECK_COND(_cond_wait_cancel); 5542 5543 #define CHECK_COND2(cond_op) \ 5544 CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu, timestruc_t* ts), (cv, mu, ts), CHECK_MU;CHECK_CV); 5545 5546 CHECK_COND2( cond_timedwait); 5547 CHECK_COND2(_cond_timedwait); 5548 CHECK_COND2(_cond_timedwait_cancel); 5549 5550 // do the _lwp_* versions too 5551 #define mutex_t lwp_mutex_t 5552 #define cond_t lwp_cond_t 5553 CHECK_MUTEX( _lwp_mutex_lock) 5554 CHECK_MUTEX( _lwp_mutex_unlock) 5555 CHECK_MUTEX( _lwp_mutex_trylock) 5556 CHECK_MUTEX( __lwp_mutex_lock) 5557 CHECK_MUTEX( __lwp_mutex_unlock) 5558 CHECK_MUTEX( __lwp_mutex_trylock) 5559 CHECK_MUTEX(___lwp_mutex_lock) 5560 CHECK_MUTEX(___lwp_mutex_unlock) 5561 5562 CHECK_COND( _lwp_cond_wait); 5563 CHECK_COND( __lwp_cond_wait); 5564 CHECK_COND(___lwp_cond_wait); 5565 5566 CHECK_COND2( _lwp_cond_timedwait); 5567 CHECK_COND2( __lwp_cond_timedwait); 5568 #undef mutex_t 5569 #undef cond_t 5570 5571 CHECK_SYNCH_OP(int, _lwp_suspend2, (int lwp, int *n), (lwp, n), 0); 5572 CHECK_SYNCH_OP(int,__lwp_suspend2, (int lwp, int *n), (lwp, n), 0); 5573 CHECK_SYNCH_OP(int, _lwp_kill, (int lwp, int n), (lwp, n), 0); 5574 CHECK_SYNCH_OP(int,__lwp_kill, (int lwp, int n), (lwp, n), 0); 5575 CHECK_SYNCH_OP(int, _lwp_sema_wait, (lwp_sema_t* p), (p), CHECK_P(p)); 5576 CHECK_SYNCH_OP(int,__lwp_sema_wait, (lwp_sema_t* p), (p), CHECK_P(p)); 5577 CHECK_SYNCH_OP(int, _lwp_cond_broadcast, (lwp_cond_t* cv), (cv), CHECK_CV); 5578 CHECK_SYNCH_OP(int,__lwp_cond_broadcast, (lwp_cond_t* cv), (cv), CHECK_CV); 5579 5580 5581 // recording machinery: 5582 5583 enum { RECORD_SYNCH_LIMIT = 200 }; 5584 char* record_synch_name[RECORD_SYNCH_LIMIT]; 5585 void* record_synch_arg0ptr[RECORD_SYNCH_LIMIT]; 5586 bool record_synch_returning[RECORD_SYNCH_LIMIT]; 5587 thread_t record_synch_thread[RECORD_SYNCH_LIMIT]; 5588 int record_synch_count = 0; 5589 bool record_synch_enabled = false; 5590 5591 // in dbx, examine recorded data this way: 5592 // for n in name arg0ptr returning thread; do print record_synch_$n[0..record_synch_count-1]; done 5593 5594 void record_synch(char* name, bool returning) { 5595 if (record_synch_enabled) { 5596 if (record_synch_count < RECORD_SYNCH_LIMIT) { 5597 record_synch_name[record_synch_count] = name; 5598 record_synch_returning[record_synch_count] = returning; 5599 record_synch_thread[record_synch_count] = thr_self(); 5600 record_synch_arg0ptr[record_synch_count] = &name; 5601 record_synch_count++; 5602 } 5603 // put more checking code here: 5604 // ... 5605 } 5606 } 5607 5608 void record_synch_enable() { 5609 // start collecting trace data, if not already doing so 5610 if (!record_synch_enabled) record_synch_count = 0; 5611 record_synch_enabled = true; 5612 } 5613 5614 void record_synch_disable() { 5615 // stop collecting trace data 5616 record_synch_enabled = false; 5617 } 5618 5619 #endif // INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS 5620 #endif // PRODUCT 5621 5622 const intptr_t thr_time_off = (intptr_t)(&((prusage_t *)(NULL))->pr_utime); 5623 const intptr_t thr_time_size = (intptr_t)(&((prusage_t *)(NULL))->pr_ttime) - 5624 (intptr_t)(&((prusage_t *)(NULL))->pr_utime); 5625 5626 5627 // JVMTI & JVM monitoring and management support 5628 // The thread_cpu_time() and current_thread_cpu_time() are only 5629 // supported if is_thread_cpu_time_supported() returns true. 5630 // They are not supported on Solaris T1. 5631 5632 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool) 5633 // are used by JVM M&M and JVMTI to get user+sys or user CPU time 5634 // of a thread. 5635 // 5636 // current_thread_cpu_time() and thread_cpu_time(Thread *) 5637 // returns the fast estimate available on the platform. 5638 5639 // hrtime_t gethrvtime() return value includes 5640 // user time but does not include system time 5641 jlong os::current_thread_cpu_time() { 5642 return (jlong) gethrvtime(); 5643 } 5644 5645 jlong os::thread_cpu_time(Thread *thread) { 5646 // return user level CPU time only to be consistent with 5647 // what current_thread_cpu_time returns. 5648 // thread_cpu_time_info() must be changed if this changes 5649 return os::thread_cpu_time(thread, false /* user time only */); 5650 } 5651 5652 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) { 5653 if (user_sys_cpu_time) { 5654 return os::thread_cpu_time(Thread::current(), user_sys_cpu_time); 5655 } else { 5656 return os::current_thread_cpu_time(); 5657 } 5658 } 5659 5660 jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) { 5661 char proc_name[64]; 5662 int count; 5663 prusage_t prusage; 5664 jlong lwp_time; 5665 int fd; 5666 5667 sprintf(proc_name, "/proc/%d/lwp/%d/lwpusage", 5668 getpid(), 5669 thread->osthread()->lwp_id()); 5670 fd = ::open(proc_name, O_RDONLY); 5671 if ( fd == -1 ) return -1; 5672 5673 do { 5674 count = ::pread(fd, 5675 (void *)&prusage.pr_utime, 5676 thr_time_size, 5677 thr_time_off); 5678 } while (count < 0 && errno == EINTR); 5679 ::close(fd); 5680 if ( count < 0 ) return -1; 5681 5682 if (user_sys_cpu_time) { 5683 // user + system CPU time 5684 lwp_time = (((jlong)prusage.pr_stime.tv_sec + 5685 (jlong)prusage.pr_utime.tv_sec) * (jlong)1000000000) + 5686 (jlong)prusage.pr_stime.tv_nsec + 5687 (jlong)prusage.pr_utime.tv_nsec; 5688 } else { 5689 // user level CPU time only 5690 lwp_time = ((jlong)prusage.pr_utime.tv_sec * (jlong)1000000000) + 5691 (jlong)prusage.pr_utime.tv_nsec; 5692 } 5693 5694 return(lwp_time); 5695 } 5696 5697 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 5698 info_ptr->max_value = ALL_64_BITS; // will not wrap in less than 64 bits 5699 info_ptr->may_skip_backward = false; // elapsed time not wall time 5700 info_ptr->may_skip_forward = false; // elapsed time not wall time 5701 info_ptr->kind = JVMTI_TIMER_USER_CPU; // only user time is returned 5702 } 5703 5704 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 5705 info_ptr->max_value = ALL_64_BITS; // will not wrap in less than 64 bits 5706 info_ptr->may_skip_backward = false; // elapsed time not wall time 5707 info_ptr->may_skip_forward = false; // elapsed time not wall time 5708 info_ptr->kind = JVMTI_TIMER_USER_CPU; // only user time is returned 5709 } 5710 5711 bool os::is_thread_cpu_time_supported() { 5712 if ( os::Solaris::T2_libthread() || UseBoundThreads ) { 5713 return true; 5714 } else { 5715 return false; 5716 } 5717 } 5718 5719 // System loadavg support. Returns -1 if load average cannot be obtained. 5720 // Return the load average for our processor set if the primitive exists 5721 // (Solaris 9 and later). Otherwise just return system wide loadavg. 5722 int os::loadavg(double loadavg[], int nelem) { 5723 if (pset_getloadavg_ptr != NULL) { 5724 return (*pset_getloadavg_ptr)(PS_MYID, loadavg, nelem); 5725 } else { 5726 return ::getloadavg(loadavg, nelem); 5727 } 5728 } 5729 5730 //--------------------------------------------------------------------------------- 5731 5732 static address same_page(address x, address y) { 5733 intptr_t page_bits = -os::vm_page_size(); 5734 if ((intptr_t(x) & page_bits) == (intptr_t(y) & page_bits)) 5735 return x; 5736 else if (x > y) 5737 return (address)(intptr_t(y) | ~page_bits) + 1; 5738 else 5739 return (address)(intptr_t(y) & page_bits); 5740 } 5741 5742 bool os::find(address addr, outputStream* st) { 5743 Dl_info dlinfo; 5744 memset(&dlinfo, 0, sizeof(dlinfo)); 5745 if (dladdr(addr, &dlinfo)) { 5746 #ifdef _LP64 5747 st->print("0x%016lx: ", addr); 5748 #else 5749 st->print("0x%08x: ", addr); 5750 #endif 5751 if (dlinfo.dli_sname != NULL) 5752 st->print("%s+%#lx", dlinfo.dli_sname, addr-(intptr_t)dlinfo.dli_saddr); 5753 else if (dlinfo.dli_fname) 5754 st->print("<offset %#lx>", addr-(intptr_t)dlinfo.dli_fbase); 5755 else 5756 st->print("<absolute address>"); 5757 if (dlinfo.dli_fname) st->print(" in %s", dlinfo.dli_fname); 5758 #ifdef _LP64 5759 if (dlinfo.dli_fbase) st->print(" at 0x%016lx", dlinfo.dli_fbase); 5760 #else 5761 if (dlinfo.dli_fbase) st->print(" at 0x%08x", dlinfo.dli_fbase); 5762 #endif 5763 st->cr(); 5764 5765 if (Verbose) { 5766 // decode some bytes around the PC 5767 address begin = same_page(addr-40, addr); 5768 address end = same_page(addr+40, addr); 5769 address lowest = (address) dlinfo.dli_sname; 5770 if (!lowest) lowest = (address) dlinfo.dli_fbase; 5771 if (begin < lowest) begin = lowest; 5772 Dl_info dlinfo2; 5773 if (dladdr(end, &dlinfo2) && dlinfo2.dli_saddr != dlinfo.dli_saddr 5774 && end > dlinfo2.dli_saddr && dlinfo2.dli_saddr > begin) 5775 end = (address) dlinfo2.dli_saddr; 5776 Disassembler::decode(begin, end, st); 5777 } 5778 return true; 5779 } 5780 return false; 5781 } 5782 5783 // Following function has been added to support HotSparc's libjvm.so running 5784 // under Solaris production JDK 1.2.2 / 1.3.0. These came from 5785 // src/solaris/hpi/native_threads in the EVM codebase. 5786 // 5787 // NOTE: This is no longer needed in the 1.3.1 and 1.4 production release 5788 // libraries and should thus be removed. We will leave it behind for a while 5789 // until we no longer want to able to run on top of 1.3.0 Solaris production 5790 // JDK. See 4341971. 5791 5792 #define STACK_SLACK 0x800 5793 5794 extern "C" { 5795 intptr_t sysThreadAvailableStackWithSlack() { 5796 stack_t st; 5797 intptr_t retval, stack_top; 5798 retval = thr_stksegment(&st); 5799 assert(retval == 0, "incorrect return value from thr_stksegment"); 5800 assert((address)&st < (address)st.ss_sp, "Invalid stack base returned"); 5801 assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned"); 5802 stack_top=(intptr_t)st.ss_sp-st.ss_size; 5803 return ((intptr_t)&stack_top - stack_top - STACK_SLACK); 5804 } 5805 } 5806 5807 // Just to get the Kernel build to link on solaris for testing. 5808 5809 extern "C" { 5810 class ASGCT_CallTrace; 5811 void AsyncGetCallTrace(ASGCT_CallTrace *trace, jint depth, void* ucontext) 5812 KERNEL_RETURN; 5813 } 5814 5815 5816 // ObjectMonitor park-unpark infrastructure ... 5817 // 5818 // We implement Solaris and Linux PlatformEvents with the 5819 // obvious condvar-mutex-flag triple. 5820 // Another alternative that works quite well is pipes: 5821 // Each PlatformEvent consists of a pipe-pair. 5822 // The thread associated with the PlatformEvent 5823 // calls park(), which reads from the input end of the pipe. 5824 // Unpark() writes into the other end of the pipe. 5825 // The write-side of the pipe must be set NDELAY. 5826 // Unfortunately pipes consume a large # of handles. 5827 // Native solaris lwp_park() and lwp_unpark() work nicely, too. 5828 // Using pipes for the 1st few threads might be workable, however. 5829 // 5830 // park() is permitted to return spuriously. 5831 // Callers of park() should wrap the call to park() in 5832 // an appropriate loop. A litmus test for the correct 5833 // usage of park is the following: if park() were modified 5834 // to immediately return 0 your code should still work, 5835 // albeit degenerating to a spin loop. 5836 // 5837 // An interesting optimization for park() is to use a trylock() 5838 // to attempt to acquire the mutex. If the trylock() fails 5839 // then we know that a concurrent unpark() operation is in-progress. 5840 // in that case the park() code could simply set _count to 0 5841 // and return immediately. The subsequent park() operation *might* 5842 // return immediately. That's harmless as the caller of park() is 5843 // expected to loop. By using trylock() we will have avoided a 5844 // avoided a context switch caused by contention on the per-thread mutex. 5845 // 5846 // TODO-FIXME: 5847 // 1. Reconcile Doug's JSR166 j.u.c park-unpark with the 5848 // objectmonitor implementation. 5849 // 2. Collapse the JSR166 parker event, and the 5850 // objectmonitor ParkEvent into a single "Event" construct. 5851 // 3. In park() and unpark() add: 5852 // assert (Thread::current() == AssociatedWith). 5853 // 4. add spurious wakeup injection on a -XX:EarlyParkReturn=N switch. 5854 // 1-out-of-N park() operations will return immediately. 5855 // 5856 // _Event transitions in park() 5857 // -1 => -1 : illegal 5858 // 1 => 0 : pass - return immediately 5859 // 0 => -1 : block 5860 // 5861 // _Event serves as a restricted-range semaphore. 5862 // 5863 // Another possible encoding of _Event would be with 5864 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits. 5865 // 5866 // TODO-FIXME: add DTRACE probes for: 5867 // 1. Tx parks 5868 // 2. Ty unparks Tx 5869 // 3. Tx resumes from park 5870 5871 5872 // value determined through experimentation 5873 #define ROUNDINGFIX 11 5874 5875 // utility to compute the abstime argument to timedwait. 5876 // TODO-FIXME: switch from compute_abstime() to unpackTime(). 5877 5878 static timestruc_t* compute_abstime(timestruc_t* abstime, jlong millis) { 5879 // millis is the relative timeout time 5880 // abstime will be the absolute timeout time 5881 if (millis < 0) millis = 0; 5882 struct timeval now; 5883 int status = gettimeofday(&now, NULL); 5884 assert(status == 0, "gettimeofday"); 5885 jlong seconds = millis / 1000; 5886 jlong max_wait_period; 5887 5888 if (UseLWPSynchronization) { 5889 // forward port of fix for 4275818 (not sleeping long enough) 5890 // There was a bug in Solaris 6, 7 and pre-patch 5 of 8 where 5891 // _lwp_cond_timedwait() used a round_down algorithm rather 5892 // than a round_up. For millis less than our roundfactor 5893 // it rounded down to 0 which doesn't meet the spec. 5894 // For millis > roundfactor we may return a bit sooner, but 5895 // since we can not accurately identify the patch level and 5896 // this has already been fixed in Solaris 9 and 8 we will 5897 // leave it alone rather than always rounding down. 5898 5899 if (millis > 0 && millis < ROUNDINGFIX) millis = ROUNDINGFIX; 5900 // It appears that when we go directly through Solaris _lwp_cond_timedwait() 5901 // the acceptable max time threshold is smaller than for libthread on 2.5.1 and 2.6 5902 max_wait_period = 21000000; 5903 } else { 5904 max_wait_period = 50000000; 5905 } 5906 millis %= 1000; 5907 if (seconds > max_wait_period) { // see man cond_timedwait(3T) 5908 seconds = max_wait_period; 5909 } 5910 abstime->tv_sec = now.tv_sec + seconds; 5911 long usec = now.tv_usec + millis * 1000; 5912 if (usec >= 1000000) { 5913 abstime->tv_sec += 1; 5914 usec -= 1000000; 5915 } 5916 abstime->tv_nsec = usec * 1000; 5917 return abstime; 5918 } 5919 5920 // Test-and-clear _Event, always leaves _Event set to 0, returns immediately. 5921 // Conceptually TryPark() should be equivalent to park(0). 5922 5923 int os::PlatformEvent::TryPark() { 5924 for (;;) { 5925 const int v = _Event ; 5926 guarantee ((v == 0) || (v == 1), "invariant") ; 5927 if (Atomic::cmpxchg (0, &_Event, v) == v) return v ; 5928 } 5929 } 5930 5931 void os::PlatformEvent::park() { // AKA: down() 5932 // Invariant: Only the thread associated with the Event/PlatformEvent 5933 // may call park(). 5934 int v ; 5935 for (;;) { 5936 v = _Event ; 5937 if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ; 5938 } 5939 guarantee (v >= 0, "invariant") ; 5940 if (v == 0) { 5941 // Do this the hard way by blocking ... 5942 // See http://monaco.sfbay/detail.jsf?cr=5094058. 5943 // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking. 5944 // Only for SPARC >= V8PlusA 5945 #if defined(__sparc) && defined(COMPILER2) 5946 if (ClearFPUAtPark) { _mark_fpu_nosave() ; } 5947 #endif 5948 int status = os::Solaris::mutex_lock(_mutex); 5949 assert_status(status == 0, status, "mutex_lock"); 5950 guarantee (_nParked == 0, "invariant") ; 5951 ++ _nParked ; 5952 while (_Event < 0) { 5953 // for some reason, under 2.7 lwp_cond_wait() may return ETIME ... 5954 // Treat this the same as if the wait was interrupted 5955 // With usr/lib/lwp going to kernel, always handle ETIME 5956 status = os::Solaris::cond_wait(_cond, _mutex); 5957 if (status == ETIME) status = EINTR ; 5958 assert_status(status == 0 || status == EINTR, status, "cond_wait"); 5959 } 5960 -- _nParked ; 5961 _Event = 0 ; 5962 status = os::Solaris::mutex_unlock(_mutex); 5963 assert_status(status == 0, status, "mutex_unlock"); 5964 } 5965 } 5966 5967 int os::PlatformEvent::park(jlong millis) { 5968 guarantee (_nParked == 0, "invariant") ; 5969 int v ; 5970 for (;;) { 5971 v = _Event ; 5972 if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ; 5973 } 5974 guarantee (v >= 0, "invariant") ; 5975 if (v != 0) return OS_OK ; 5976 5977 int ret = OS_TIMEOUT; 5978 timestruc_t abst; 5979 compute_abstime (&abst, millis); 5980 5981 // See http://monaco.sfbay/detail.jsf?cr=5094058. 5982 // For Solaris SPARC set fprs.FEF=0 prior to parking. 5983 // Only for SPARC >= V8PlusA 5984 #if defined(__sparc) && defined(COMPILER2) 5985 if (ClearFPUAtPark) { _mark_fpu_nosave() ; } 5986 #endif 5987 int status = os::Solaris::mutex_lock(_mutex); 5988 assert_status(status == 0, status, "mutex_lock"); 5989 guarantee (_nParked == 0, "invariant") ; 5990 ++ _nParked ; 5991 while (_Event < 0) { 5992 int status = os::Solaris::cond_timedwait(_cond, _mutex, &abst); 5993 assert_status(status == 0 || status == EINTR || 5994 status == ETIME || status == ETIMEDOUT, 5995 status, "cond_timedwait"); 5996 if (!FilterSpuriousWakeups) break ; // previous semantics 5997 if (status == ETIME || status == ETIMEDOUT) break ; 5998 // We consume and ignore EINTR and spurious wakeups. 5999 } 6000 -- _nParked ; 6001 if (_Event >= 0) ret = OS_OK ; 6002 _Event = 0 ; 6003 status = os::Solaris::mutex_unlock(_mutex); 6004 assert_status(status == 0, status, "mutex_unlock"); 6005 return ret; 6006 } 6007 6008 void os::PlatformEvent::unpark() { 6009 int v, AnyWaiters; 6010 6011 // Increment _Event. 6012 // Another acceptable implementation would be to simply swap 1 6013 // into _Event: 6014 // if (Swap (&_Event, 1) < 0) { 6015 // mutex_lock (_mutex) ; AnyWaiters = nParked; mutex_unlock (_mutex) ; 6016 // if (AnyWaiters) cond_signal (_cond) ; 6017 // } 6018 6019 for (;;) { 6020 v = _Event ; 6021 if (v > 0) { 6022 // The LD of _Event could have reordered or be satisfied 6023 // by a read-aside from this processor's write buffer. 6024 // To avoid problems execute a barrier and then 6025 // ratify the value. A degenerate CAS() would also work. 6026 // Viz., CAS (v+0, &_Event, v) == v). 6027 OrderAccess::fence() ; 6028 if (_Event == v) return ; 6029 continue ; 6030 } 6031 if (Atomic::cmpxchg (v+1, &_Event, v) == v) break ; 6032 } 6033 6034 // If the thread associated with the event was parked, wake it. 6035 if (v < 0) { 6036 int status ; 6037 // Wait for the thread assoc with the PlatformEvent to vacate. 6038 status = os::Solaris::mutex_lock(_mutex); 6039 assert_status(status == 0, status, "mutex_lock"); 6040 AnyWaiters = _nParked ; 6041 status = os::Solaris::mutex_unlock(_mutex); 6042 assert_status(status == 0, status, "mutex_unlock"); 6043 guarantee (AnyWaiters == 0 || AnyWaiters == 1, "invariant") ; 6044 if (AnyWaiters != 0) { 6045 // We intentional signal *after* dropping the lock 6046 // to avoid a common class of futile wakeups. 6047 status = os::Solaris::cond_signal(_cond); 6048 assert_status(status == 0, status, "cond_signal"); 6049 } 6050 } 6051 } 6052 6053 // JSR166 6054 // ------------------------------------------------------- 6055 6056 /* 6057 * The solaris and linux implementations of park/unpark are fairly 6058 * conservative for now, but can be improved. They currently use a 6059 * mutex/condvar pair, plus _counter. 6060 * Park decrements _counter if > 0, else does a condvar wait. Unpark 6061 * sets count to 1 and signals condvar. Only one thread ever waits 6062 * on the condvar. Contention seen when trying to park implies that someone 6063 * is unparking you, so don't wait. And spurious returns are fine, so there 6064 * is no need to track notifications. 6065 */ 6066 6067 #define NANOSECS_PER_SEC 1000000000 6068 #define NANOSECS_PER_MILLISEC 1000000 6069 #define MAX_SECS 100000000 6070 6071 /* 6072 * This code is common to linux and solaris and will be moved to a 6073 * common place in dolphin. 6074 * 6075 * The passed in time value is either a relative time in nanoseconds 6076 * or an absolute time in milliseconds. Either way it has to be unpacked 6077 * into suitable seconds and nanoseconds components and stored in the 6078 * given timespec structure. 6079 * Given time is a 64-bit value and the time_t used in the timespec is only 6080 * a signed-32-bit value (except on 64-bit Linux) we have to watch for 6081 * overflow if times way in the future are given. Further on Solaris versions 6082 * prior to 10 there is a restriction (see cond_timedwait) that the specified 6083 * number of seconds, in abstime, is less than current_time + 100,000,000. 6084 * As it will be 28 years before "now + 100000000" will overflow we can 6085 * ignore overflow and just impose a hard-limit on seconds using the value 6086 * of "now + 100,000,000". This places a limit on the timeout of about 3.17 6087 * years from "now". 6088 */ 6089 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) { 6090 assert (time > 0, "convertTime"); 6091 6092 struct timeval now; 6093 int status = gettimeofday(&now, NULL); 6094 assert(status == 0, "gettimeofday"); 6095 6096 time_t max_secs = now.tv_sec + MAX_SECS; 6097 6098 if (isAbsolute) { 6099 jlong secs = time / 1000; 6100 if (secs > max_secs) { 6101 absTime->tv_sec = max_secs; 6102 } 6103 else { 6104 absTime->tv_sec = secs; 6105 } 6106 absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC; 6107 } 6108 else { 6109 jlong secs = time / NANOSECS_PER_SEC; 6110 if (secs >= MAX_SECS) { 6111 absTime->tv_sec = max_secs; 6112 absTime->tv_nsec = 0; 6113 } 6114 else { 6115 absTime->tv_sec = now.tv_sec + secs; 6116 absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000; 6117 if (absTime->tv_nsec >= NANOSECS_PER_SEC) { 6118 absTime->tv_nsec -= NANOSECS_PER_SEC; 6119 ++absTime->tv_sec; // note: this must be <= max_secs 6120 } 6121 } 6122 } 6123 assert(absTime->tv_sec >= 0, "tv_sec < 0"); 6124 assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs"); 6125 assert(absTime->tv_nsec >= 0, "tv_nsec < 0"); 6126 assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec"); 6127 } 6128 6129 void Parker::park(bool isAbsolute, jlong time) { 6130 6131 // Optional fast-path check: 6132 // Return immediately if a permit is available. 6133 if (_counter > 0) { 6134 _counter = 0 ; 6135 OrderAccess::fence(); 6136 return ; 6137 } 6138 6139 // Optional fast-exit: Check interrupt before trying to wait 6140 Thread* thread = Thread::current(); 6141 assert(thread->is_Java_thread(), "Must be JavaThread"); 6142 JavaThread *jt = (JavaThread *)thread; 6143 if (Thread::is_interrupted(thread, false)) { 6144 return; 6145 } 6146 6147 // First, demultiplex/decode time arguments 6148 timespec absTime; 6149 if (time < 0 || (isAbsolute && time == 0) ) { // don't wait at all 6150 return; 6151 } 6152 if (time > 0) { 6153 // Warning: this code might be exposed to the old Solaris time 6154 // round-down bugs. Grep "roundingFix" for details. 6155 unpackTime(&absTime, isAbsolute, time); 6156 } 6157 6158 // Enter safepoint region 6159 // Beware of deadlocks such as 6317397. 6160 // The per-thread Parker:: _mutex is a classic leaf-lock. 6161 // In particular a thread must never block on the Threads_lock while 6162 // holding the Parker:: mutex. If safepoints are pending both the 6163 // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock. 6164 ThreadBlockInVM tbivm(jt); 6165 6166 // Don't wait if cannot get lock since interference arises from 6167 // unblocking. Also. check interrupt before trying wait 6168 if (Thread::is_interrupted(thread, false) || 6169 os::Solaris::mutex_trylock(_mutex) != 0) { 6170 return; 6171 } 6172 6173 int status ; 6174 6175 if (_counter > 0) { // no wait needed 6176 _counter = 0; 6177 status = os::Solaris::mutex_unlock(_mutex); 6178 assert (status == 0, "invariant") ; 6179 OrderAccess::fence(); 6180 return; 6181 } 6182 6183 #ifdef ASSERT 6184 // Don't catch signals while blocked; let the running threads have the signals. 6185 // (This allows a debugger to break into the running thread.) 6186 sigset_t oldsigs; 6187 sigset_t* allowdebug_blocked = os::Solaris::allowdebug_blocked_signals(); 6188 thr_sigsetmask(SIG_BLOCK, allowdebug_blocked, &oldsigs); 6189 #endif 6190 6191 OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */); 6192 jt->set_suspend_equivalent(); 6193 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self() 6194 6195 // Do this the hard way by blocking ... 6196 // See http://monaco.sfbay/detail.jsf?cr=5094058. 6197 // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking. 6198 // Only for SPARC >= V8PlusA 6199 #if defined(__sparc) && defined(COMPILER2) 6200 if (ClearFPUAtPark) { _mark_fpu_nosave() ; } 6201 #endif 6202 6203 if (time == 0) { 6204 status = os::Solaris::cond_wait (_cond, _mutex) ; 6205 } else { 6206 status = os::Solaris::cond_timedwait (_cond, _mutex, &absTime); 6207 } 6208 // Note that an untimed cond_wait() can sometimes return ETIME on older 6209 // versions of the Solaris. 6210 assert_status(status == 0 || status == EINTR || 6211 status == ETIME || status == ETIMEDOUT, 6212 status, "cond_timedwait"); 6213 6214 #ifdef ASSERT 6215 thr_sigsetmask(SIG_SETMASK, &oldsigs, NULL); 6216 #endif 6217 _counter = 0 ; 6218 status = os::Solaris::mutex_unlock(_mutex); 6219 assert_status(status == 0, status, "mutex_unlock") ; 6220 6221 // If externally suspended while waiting, re-suspend 6222 if (jt->handle_special_suspend_equivalent_condition()) { 6223 jt->java_suspend_self(); 6224 } 6225 OrderAccess::fence(); 6226 } 6227 6228 void Parker::unpark() { 6229 int s, status ; 6230 status = os::Solaris::mutex_lock (_mutex) ; 6231 assert (status == 0, "invariant") ; 6232 s = _counter; 6233 _counter = 1; 6234 status = os::Solaris::mutex_unlock (_mutex) ; 6235 assert (status == 0, "invariant") ; 6236 6237 if (s < 1) { 6238 status = os::Solaris::cond_signal (_cond) ; 6239 assert (status == 0, "invariant") ; 6240 } 6241 } 6242 6243 extern char** environ; 6244 6245 // Run the specified command in a separate process. Return its exit value, 6246 // or -1 on failure (e.g. can't fork a new process). 6247 // Unlike system(), this function can be called from signal handler. It 6248 // doesn't block SIGINT et al. 6249 int os::fork_and_exec(char* cmd) { 6250 char * argv[4]; 6251 argv[0] = (char *)"sh"; 6252 argv[1] = (char *)"-c"; 6253 argv[2] = cmd; 6254 argv[3] = NULL; 6255 6256 // fork is async-safe, fork1 is not so can't use in signal handler 6257 pid_t pid; 6258 Thread* t = ThreadLocalStorage::get_thread_slow(); 6259 if (t != NULL && t->is_inside_signal_handler()) { 6260 pid = fork(); 6261 } else { 6262 pid = fork1(); 6263 } 6264 6265 if (pid < 0) { 6266 // fork failed 6267 warning("fork failed: %s", strerror(errno)); 6268 return -1; 6269 6270 } else if (pid == 0) { 6271 // child process 6272 6273 // try to be consistent with system(), which uses "/usr/bin/sh" on Solaris 6274 execve("/usr/bin/sh", argv, environ); 6275 6276 // execve failed 6277 _exit(-1); 6278 6279 } else { 6280 // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't 6281 // care about the actual exit code, for now. 6282 6283 int status; 6284 6285 // Wait for the child process to exit. This returns immediately if 6286 // the child has already exited. */ 6287 while (waitpid(pid, &status, 0) < 0) { 6288 switch (errno) { 6289 case ECHILD: return 0; 6290 case EINTR: break; 6291 default: return -1; 6292 } 6293 } 6294 6295 if (WIFEXITED(status)) { 6296 // The child exited normally; get its exit code. 6297 return WEXITSTATUS(status); 6298 } else if (WIFSIGNALED(status)) { 6299 // The child exited because of a signal 6300 // The best value to return is 0x80 + signal number, 6301 // because that is what all Unix shells do, and because 6302 // it allows callers to distinguish between process exit and 6303 // process death by signal. 6304 return 0x80 + WTERMSIG(status); 6305 } else { 6306 // Unknown exit code; pass it through 6307 return status; 6308 } 6309 } 6310 } 6311 6312 // is_headless_jre() 6313 // 6314 // Test for the existence of xawt/libmawt.so or libawt_xawt.so 6315 // in order to report if we are running in a headless jre 6316 // 6317 // Since JDK8 xawt/libmawt.so was moved into the same directory 6318 // as libawt.so, and renamed libawt_xawt.so 6319 // 6320 bool os::is_headless_jre() { 6321 struct stat statbuf; 6322 char buf[MAXPATHLEN]; 6323 char libmawtpath[MAXPATHLEN]; 6324 const char *xawtstr = "/xawt/libmawt.so"; 6325 const char *new_xawtstr = "/libawt_xawt.so"; 6326 char *p; 6327 6328 // Get path to libjvm.so 6329 os::jvm_path(buf, sizeof(buf)); 6330 6331 // Get rid of libjvm.so 6332 p = strrchr(buf, '/'); 6333 if (p == NULL) return false; 6334 else *p = '\0'; 6335 6336 // Get rid of client or server 6337 p = strrchr(buf, '/'); 6338 if (p == NULL) return false; 6339 else *p = '\0'; 6340 6341 // check xawt/libmawt.so 6342 strcpy(libmawtpath, buf); 6343 strcat(libmawtpath, xawtstr); 6344 if (::stat(libmawtpath, &statbuf) == 0) return false; 6345 6346 // check libawt_xawt.so 6347 strcpy(libmawtpath, buf); 6348 strcat(libmawtpath, new_xawtstr); 6349 if (::stat(libmawtpath, &statbuf) == 0) return false; 6350 6351 return true; 6352 } 6353 6354 size_t os::write(int fd, const void *buf, unsigned int nBytes) { 6355 INTERRUPTIBLE_RETURN_INT(::write(fd, buf, nBytes), os::Solaris::clear_interrupted); 6356 } 6357 6358 int os::close(int fd) { 6359 RESTARTABLE_RETURN_INT(::close(fd)); 6360 } 6361 6362 int os::socket_close(int fd) { 6363 RESTARTABLE_RETURN_INT(::close(fd)); 6364 } 6365 6366 int os::recv(int fd, char *buf, int nBytes, int flags) { 6367 INTERRUPTIBLE_RETURN_INT(::recv(fd, buf, nBytes, flags), os::Solaris::clear_interrupted); 6368 } 6369 6370 6371 int os::send(int fd, char *buf, int nBytes, int flags) { 6372 INTERRUPTIBLE_RETURN_INT(::send(fd, buf, nBytes, flags), os::Solaris::clear_interrupted); 6373 } 6374 6375 int os::raw_send(int fd, char *buf, int nBytes, int flags) { 6376 RESTARTABLE_RETURN_INT(::send(fd, buf, nBytes, flags)); 6377 } 6378 6379 // As both poll and select can be interrupted by signals, we have to be 6380 // prepared to restart the system call after updating the timeout, unless 6381 // a poll() is done with timeout == -1, in which case we repeat with this 6382 // "wait forever" value. 6383 6384 int os::timeout(int fd, long timeout) { 6385 int res; 6386 struct timeval t; 6387 julong prevtime, newtime; 6388 static const char* aNull = 0; 6389 struct pollfd pfd; 6390 pfd.fd = fd; 6391 pfd.events = POLLIN; 6392 6393 gettimeofday(&t, &aNull); 6394 prevtime = ((julong)t.tv_sec * 1000) + t.tv_usec / 1000; 6395 6396 for(;;) { 6397 INTERRUPTIBLE_NORESTART(::poll(&pfd, 1, timeout), res, os::Solaris::clear_interrupted); 6398 if(res == OS_ERR && errno == EINTR) { 6399 if(timeout != -1) { 6400 gettimeofday(&t, &aNull); 6401 newtime = ((julong)t.tv_sec * 1000) + t.tv_usec /1000; 6402 timeout -= newtime - prevtime; 6403 if(timeout <= 0) 6404 return OS_OK; 6405 prevtime = newtime; 6406 } 6407 } else return res; 6408 } 6409 } 6410 6411 int os::connect(int fd, struct sockaddr *him, int len) { 6412 int _result; 6413 INTERRUPTIBLE_NORESTART(::connect(fd, him, len), _result, 6414 os::Solaris::clear_interrupted); 6415 6416 // Depending on when thread interruption is reset, _result could be 6417 // one of two values when errno == EINTR 6418 6419 if (((_result == OS_INTRPT) || (_result == OS_ERR)) 6420 && (errno == EINTR)) { 6421 /* restarting a connect() changes its errno semantics */ 6422 INTERRUPTIBLE(::connect(fd, him, len), _result, 6423 os::Solaris::clear_interrupted); 6424 /* undo these changes */ 6425 if (_result == OS_ERR) { 6426 if (errno == EALREADY) { 6427 errno = EINPROGRESS; /* fall through */ 6428 } else if (errno == EISCONN) { 6429 errno = 0; 6430 return OS_OK; 6431 } 6432 } 6433 } 6434 return _result; 6435 } 6436 6437 int os::accept(int fd, struct sockaddr *him, int *len) { 6438 if (fd < 0) 6439 return OS_ERR; 6440 INTERRUPTIBLE_RETURN_INT((int)::accept(fd, him,\ 6441 (socklen_t*) len), os::Solaris::clear_interrupted); 6442 } 6443 6444 int os::recvfrom(int fd, char *buf, int nBytes, int flags, 6445 sockaddr *from, int *fromlen) { 6446 //%%note jvm_r11 6447 INTERRUPTIBLE_RETURN_INT((int)::recvfrom(fd, buf, nBytes,\ 6448 flags, from, fromlen), os::Solaris::clear_interrupted); 6449 } 6450 6451 int os::sendto(int fd, char *buf, int len, int flags, 6452 struct sockaddr *to, int tolen) { 6453 //%%note jvm_r11 6454 INTERRUPTIBLE_RETURN_INT((int)::sendto(fd, buf, len, flags,\ 6455 to, tolen), os::Solaris::clear_interrupted); 6456 } 6457 6458 int os::socket_available(int fd, jint *pbytes) { 6459 if (fd < 0) 6460 return OS_OK; 6461 6462 int ret; 6463 6464 RESTARTABLE(::ioctl(fd, FIONREAD, pbytes), ret); 6465 6466 //%% note ioctl can return 0 when successful, JVM_SocketAvailable 6467 // is expected to return 0 on failure and 1 on success to the jdk. 6468 6469 return (ret == OS_ERR) ? 0 : 1; 6470 } 6471 6472 6473 int os::bind(int fd, struct sockaddr *him, int len) { 6474 INTERRUPTIBLE_RETURN_INT_NORESTART(::bind(fd, him, len),\ 6475 os::Solaris::clear_interrupted); 6476 }