1 /* 2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 // no precompiled headers 26 #include "classfile/classLoader.hpp" 27 #include "classfile/systemDictionary.hpp" 28 #include "classfile/vmSymbols.hpp" 29 #include "code/icBuffer.hpp" 30 #include "code/vtableStubs.hpp" 31 #include "compiler/compileBroker.hpp" 32 #include "compiler/disassembler.hpp" 33 #include "interpreter/interpreter.hpp" 34 #include "jvm_solaris.h" 35 #include "memory/allocation.inline.hpp" 36 #include "memory/filemap.hpp" 37 #include "mutex_solaris.inline.hpp" 38 #include "oops/oop.inline.hpp" 39 #include "os_share_solaris.hpp" 40 #include "prims/jniFastGetField.hpp" 41 #include "prims/jvm.h" 42 #include "prims/jvm_misc.hpp" 43 #include "runtime/arguments.hpp" 44 #include "runtime/extendedPC.hpp" 45 #include "runtime/globals.hpp" 46 #include "runtime/interfaceSupport.hpp" 47 #include "runtime/java.hpp" 48 #include "runtime/javaCalls.hpp" 49 #include "runtime/mutexLocker.hpp" 50 #include "runtime/objectMonitor.hpp" 51 #include "runtime/osThread.hpp" 52 #include "runtime/perfMemory.hpp" 53 #include "runtime/sharedRuntime.hpp" 54 #include "runtime/statSampler.hpp" 55 #include "runtime/stubRoutines.hpp" 56 #include "runtime/thread.inline.hpp" 57 #include "runtime/threadCritical.hpp" 58 #include "runtime/timer.hpp" 59 #include "services/attachListener.hpp" 60 #include "services/memTracker.hpp" 61 #include "services/runtimeService.hpp" 62 #include "utilities/decoder.hpp" 63 #include "utilities/defaultStream.hpp" 64 #include "utilities/events.hpp" 65 #include "utilities/growableArray.hpp" 66 #include "utilities/vmError.hpp" 67 68 // put OS-includes here 69 # include <dlfcn.h> 70 # include <errno.h> 71 # include <exception> 72 # include <link.h> 73 # include <poll.h> 74 # include <pthread.h> 75 # include <pwd.h> 76 # include <schedctl.h> 77 # include <setjmp.h> 78 # include <signal.h> 79 # include <stdio.h> 80 # include <alloca.h> 81 # include <sys/filio.h> 82 # include <sys/ipc.h> 83 # include <sys/lwp.h> 84 # include <sys/machelf.h> // for elf Sym structure used by dladdr1 85 # include <sys/mman.h> 86 # include <sys/processor.h> 87 # include <sys/procset.h> 88 # include <sys/pset.h> 89 # include <sys/resource.h> 90 # include <sys/shm.h> 91 # include <sys/socket.h> 92 # include <sys/stat.h> 93 # include <sys/systeminfo.h> 94 # include <sys/time.h> 95 # include <sys/times.h> 96 # include <sys/types.h> 97 # include <sys/wait.h> 98 # include <sys/utsname.h> 99 # include <thread.h> 100 # include <unistd.h> 101 # include <sys/priocntl.h> 102 # include <sys/rtpriocntl.h> 103 # include <sys/tspriocntl.h> 104 # include <sys/iapriocntl.h> 105 # include <sys/fxpriocntl.h> 106 # include <sys/loadavg.h> 107 # include <string.h> 108 # include <stdio.h> 109 110 # define _STRUCTURED_PROC 1 // this gets us the new structured proc interfaces of 5.6 & later 111 # include <sys/procfs.h> // see comment in <sys/procfs.h> 112 113 #define MAX_PATH (2 * K) 114 115 // for timer info max values which include all bits 116 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF) 117 118 119 // Here are some liblgrp types from sys/lgrp_user.h to be able to 120 // compile on older systems without this header file. 121 122 #ifndef MADV_ACCESS_LWP 123 # define MADV_ACCESS_LWP 7 /* next LWP to access heavily */ 124 #endif 125 #ifndef MADV_ACCESS_MANY 126 # define MADV_ACCESS_MANY 8 /* many processes to access heavily */ 127 #endif 128 129 #ifndef LGRP_RSRC_CPU 130 # define LGRP_RSRC_CPU 0 /* CPU resources */ 131 #endif 132 #ifndef LGRP_RSRC_MEM 133 # define LGRP_RSRC_MEM 1 /* memory resources */ 134 #endif 135 136 // see thr_setprio(3T) for the basis of these numbers 137 #define MinimumPriority 0 138 #define NormalPriority 64 139 #define MaximumPriority 127 140 141 // Values for ThreadPriorityPolicy == 1 142 int prio_policy1[CriticalPriority+1] = { 143 -99999, 0, 16, 32, 48, 64, 144 80, 96, 112, 124, 127, 127 }; 145 146 // System parameters used internally 147 static clock_t clock_tics_per_sec = 100; 148 149 // Track if we have called enable_extended_FILE_stdio (on Solaris 10u4+) 150 static bool enabled_extended_FILE_stdio = false; 151 152 // For diagnostics to print a message once. see run_periodic_checks 153 static bool check_addr0_done = false; 154 static sigset_t check_signal_done; 155 static bool check_signals = true; 156 157 address os::Solaris::handler_start; // start pc of thr_sighndlrinfo 158 address os::Solaris::handler_end; // end pc of thr_sighndlrinfo 159 160 address os::Solaris::_main_stack_base = NULL; // 4352906 workaround 161 162 163 // "default" initializers for missing libc APIs 164 extern "C" { 165 static int lwp_mutex_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; } 166 static int lwp_mutex_destroy(mutex_t *mx) { return 0; } 167 168 static int lwp_cond_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; } 169 static int lwp_cond_destroy(cond_t *cv) { return 0; } 170 } 171 172 // "default" initializers for pthread-based synchronization 173 extern "C" { 174 static int pthread_mutex_default_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; } 175 static int pthread_cond_default_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; } 176 } 177 178 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time); 179 180 // Thread Local Storage 181 // This is common to all Solaris platforms so it is defined here, 182 // in this common file. 183 // The declarations are in the os_cpu threadLS*.hpp files. 184 // 185 // Static member initialization for TLS 186 Thread* ThreadLocalStorage::_get_thread_cache[ThreadLocalStorage::_pd_cache_size] = {NULL}; 187 188 #ifndef PRODUCT 189 #define _PCT(n,d) ((100.0*(double)(n))/(double)(d)) 190 191 int ThreadLocalStorage::_tcacheHit = 0; 192 int ThreadLocalStorage::_tcacheMiss = 0; 193 194 void ThreadLocalStorage::print_statistics() { 195 int total = _tcacheMiss+_tcacheHit; 196 tty->print_cr("Thread cache hits %d misses %d total %d percent %f\n", 197 _tcacheHit, _tcacheMiss, total, _PCT(_tcacheHit, total)); 198 } 199 #undef _PCT 200 #endif // PRODUCT 201 202 Thread* ThreadLocalStorage::get_thread_via_cache_slowly(uintptr_t raw_id, 203 int index) { 204 Thread *thread = get_thread_slow(); 205 if (thread != NULL) { 206 address sp = os::current_stack_pointer(); 207 guarantee(thread->_stack_base == NULL || 208 (sp <= thread->_stack_base && 209 sp >= thread->_stack_base - thread->_stack_size) || 210 is_error_reported(), 211 "sp must be inside of selected thread stack"); 212 213 thread->set_self_raw_id(raw_id); // mark for quick retrieval 214 _get_thread_cache[ index ] = thread; 215 } 216 return thread; 217 } 218 219 220 static const double all_zero[ sizeof(Thread) / sizeof(double) + 1 ] = {0}; 221 #define NO_CACHED_THREAD ((Thread*)all_zero) 222 223 void ThreadLocalStorage::pd_set_thread(Thread* thread) { 224 225 // Store the new value before updating the cache to prevent a race 226 // between get_thread_via_cache_slowly() and this store operation. 227 os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread); 228 229 // Update thread cache with new thread if setting on thread create, 230 // or NO_CACHED_THREAD (zeroed) thread if resetting thread on exit. 231 uintptr_t raw = pd_raw_thread_id(); 232 int ix = pd_cache_index(raw); 233 _get_thread_cache[ix] = thread == NULL ? NO_CACHED_THREAD : thread; 234 } 235 236 void ThreadLocalStorage::pd_init() { 237 for (int i = 0; i < _pd_cache_size; i++) { 238 _get_thread_cache[i] = NO_CACHED_THREAD; 239 } 240 } 241 242 // Invalidate all the caches (happens to be the same as pd_init). 243 void ThreadLocalStorage::pd_invalidate_all() { pd_init(); } 244 245 #undef NO_CACHED_THREAD 246 247 // END Thread Local Storage 248 249 static inline size_t adjust_stack_size(address base, size_t size) { 250 if ((ssize_t)size < 0) { 251 // 4759953: Compensate for ridiculous stack size. 252 size = max_intx; 253 } 254 if (size > (size_t)base) { 255 // 4812466: Make sure size doesn't allow the stack to wrap the address space. 256 size = (size_t)base; 257 } 258 return size; 259 } 260 261 static inline stack_t get_stack_info() { 262 stack_t st; 263 int retval = thr_stksegment(&st); 264 st.ss_size = adjust_stack_size((address)st.ss_sp, st.ss_size); 265 assert(retval == 0, "incorrect return value from thr_stksegment"); 266 assert((address)&st < (address)st.ss_sp, "Invalid stack base returned"); 267 assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned"); 268 return st; 269 } 270 271 address os::current_stack_base() { 272 int r = thr_main() ; 273 guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ; 274 bool is_primordial_thread = r; 275 276 // Workaround 4352906, avoid calls to thr_stksegment by 277 // thr_main after the first one (it looks like we trash 278 // some data, causing the value for ss_sp to be incorrect). 279 if (!is_primordial_thread || os::Solaris::_main_stack_base == NULL) { 280 stack_t st = get_stack_info(); 281 if (is_primordial_thread) { 282 // cache initial value of stack base 283 os::Solaris::_main_stack_base = (address)st.ss_sp; 284 } 285 return (address)st.ss_sp; 286 } else { 287 guarantee(os::Solaris::_main_stack_base != NULL, "Attempt to use null cached stack base"); 288 return os::Solaris::_main_stack_base; 289 } 290 } 291 292 size_t os::current_stack_size() { 293 size_t size; 294 295 int r = thr_main() ; 296 guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ; 297 if(!r) { 298 size = get_stack_info().ss_size; 299 } else { 300 struct rlimit limits; 301 getrlimit(RLIMIT_STACK, &limits); 302 size = adjust_stack_size(os::Solaris::_main_stack_base, (size_t)limits.rlim_cur); 303 } 304 // base may not be page aligned 305 address base = current_stack_base(); 306 address bottom = (address)align_size_up((intptr_t)(base - size), os::vm_page_size());; 307 return (size_t)(base - bottom); 308 } 309 310 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) { 311 return localtime_r(clock, res); 312 } 313 314 // interruptible infrastructure 315 316 // setup_interruptible saves the thread state before going into an 317 // interruptible system call. 318 // The saved state is used to restore the thread to 319 // its former state whether or not an interrupt is received. 320 // Used by classloader os::read 321 // os::restartable_read calls skip this layer and stay in _thread_in_native 322 323 void os::Solaris::setup_interruptible(JavaThread* thread) { 324 325 JavaThreadState thread_state = thread->thread_state(); 326 327 assert(thread_state != _thread_blocked, "Coming from the wrong thread"); 328 assert(thread_state != _thread_in_native, "Native threads skip setup_interruptible"); 329 OSThread* osthread = thread->osthread(); 330 osthread->set_saved_interrupt_thread_state(thread_state); 331 thread->frame_anchor()->make_walkable(thread); 332 ThreadStateTransition::transition(thread, thread_state, _thread_blocked); 333 } 334 335 JavaThread* os::Solaris::setup_interruptible() { 336 JavaThread* thread = (JavaThread*)ThreadLocalStorage::thread(); 337 setup_interruptible(thread); 338 return thread; 339 } 340 341 void os::Solaris::try_enable_extended_io() { 342 typedef int (*enable_extended_FILE_stdio_t)(int, int); 343 344 if (!UseExtendedFileIO) { 345 return; 346 } 347 348 enable_extended_FILE_stdio_t enabler = 349 (enable_extended_FILE_stdio_t) dlsym(RTLD_DEFAULT, 350 "enable_extended_FILE_stdio"); 351 if (enabler) { 352 enabler(-1, -1); 353 } 354 } 355 356 357 #ifdef ASSERT 358 359 JavaThread* os::Solaris::setup_interruptible_native() { 360 JavaThread* thread = (JavaThread*)ThreadLocalStorage::thread(); 361 JavaThreadState thread_state = thread->thread_state(); 362 assert(thread_state == _thread_in_native, "Assumed thread_in_native"); 363 return thread; 364 } 365 366 void os::Solaris::cleanup_interruptible_native(JavaThread* thread) { 367 JavaThreadState thread_state = thread->thread_state(); 368 assert(thread_state == _thread_in_native, "Assumed thread_in_native"); 369 } 370 #endif 371 372 // cleanup_interruptible reverses the effects of setup_interruptible 373 // setup_interruptible_already_blocked() does not need any cleanup. 374 375 void os::Solaris::cleanup_interruptible(JavaThread* thread) { 376 OSThread* osthread = thread->osthread(); 377 378 ThreadStateTransition::transition(thread, _thread_blocked, osthread->saved_interrupt_thread_state()); 379 } 380 381 // I/O interruption related counters called in _INTERRUPTIBLE 382 383 void os::Solaris::bump_interrupted_before_count() { 384 RuntimeService::record_interrupted_before_count(); 385 } 386 387 void os::Solaris::bump_interrupted_during_count() { 388 RuntimeService::record_interrupted_during_count(); 389 } 390 391 static int _processors_online = 0; 392 393 jint os::Solaris::_os_thread_limit = 0; 394 volatile jint os::Solaris::_os_thread_count = 0; 395 396 julong os::available_memory() { 397 return Solaris::available_memory(); 398 } 399 400 julong os::Solaris::available_memory() { 401 return (julong)sysconf(_SC_AVPHYS_PAGES) * os::vm_page_size(); 402 } 403 404 julong os::Solaris::_physical_memory = 0; 405 406 julong os::physical_memory() { 407 return Solaris::physical_memory(); 408 } 409 410 static hrtime_t first_hrtime = 0; 411 static const hrtime_t hrtime_hz = 1000*1000*1000; 412 const int LOCK_BUSY = 1; 413 const int LOCK_FREE = 0; 414 const int LOCK_INVALID = -1; 415 static volatile hrtime_t max_hrtime = 0; 416 static volatile int max_hrtime_lock = LOCK_FREE; // Update counter with LSB as lock-in-progress 417 418 419 void os::Solaris::initialize_system_info() { 420 set_processor_count(sysconf(_SC_NPROCESSORS_CONF)); 421 _processors_online = sysconf (_SC_NPROCESSORS_ONLN); 422 _physical_memory = (julong)sysconf(_SC_PHYS_PAGES) * (julong)sysconf(_SC_PAGESIZE); 423 } 424 425 int os::active_processor_count() { 426 int online_cpus = sysconf(_SC_NPROCESSORS_ONLN); 427 pid_t pid = getpid(); 428 psetid_t pset = PS_NONE; 429 // Are we running in a processor set or is there any processor set around? 430 if (pset_bind(PS_QUERY, P_PID, pid, &pset) == 0) { 431 uint_t pset_cpus; 432 // Query the number of cpus available to us. 433 if (pset_info(pset, NULL, &pset_cpus, NULL) == 0) { 434 assert(pset_cpus > 0 && pset_cpus <= online_cpus, "sanity check"); 435 _processors_online = pset_cpus; 436 return pset_cpus; 437 } 438 } 439 // Otherwise return number of online cpus 440 return online_cpus; 441 } 442 443 static bool find_processors_in_pset(psetid_t pset, 444 processorid_t** id_array, 445 uint_t* id_length) { 446 bool result = false; 447 // Find the number of processors in the processor set. 448 if (pset_info(pset, NULL, id_length, NULL) == 0) { 449 // Make up an array to hold their ids. 450 *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length, mtInternal); 451 // Fill in the array with their processor ids. 452 if (pset_info(pset, NULL, id_length, *id_array) == 0) { 453 result = true; 454 } 455 } 456 return result; 457 } 458 459 // Callers of find_processors_online() must tolerate imprecise results -- 460 // the system configuration can change asynchronously because of DR 461 // or explicit psradm operations. 462 // 463 // We also need to take care that the loop (below) terminates as the 464 // number of processors online can change between the _SC_NPROCESSORS_ONLN 465 // request and the loop that builds the list of processor ids. Unfortunately 466 // there's no reliable way to determine the maximum valid processor id, 467 // so we use a manifest constant, MAX_PROCESSOR_ID, instead. See p_online 468 // man pages, which claim the processor id set is "sparse, but 469 // not too sparse". MAX_PROCESSOR_ID is used to ensure that we eventually 470 // exit the loop. 471 // 472 // In the future we'll be able to use sysconf(_SC_CPUID_MAX), but that's 473 // not available on S8.0. 474 475 static bool find_processors_online(processorid_t** id_array, 476 uint* id_length) { 477 const processorid_t MAX_PROCESSOR_ID = 100000 ; 478 // Find the number of processors online. 479 *id_length = sysconf(_SC_NPROCESSORS_ONLN); 480 // Make up an array to hold their ids. 481 *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length, mtInternal); 482 // Processors need not be numbered consecutively. 483 long found = 0; 484 processorid_t next = 0; 485 while (found < *id_length && next < MAX_PROCESSOR_ID) { 486 processor_info_t info; 487 if (processor_info(next, &info) == 0) { 488 // NB, PI_NOINTR processors are effectively online ... 489 if (info.pi_state == P_ONLINE || info.pi_state == P_NOINTR) { 490 (*id_array)[found] = next; 491 found += 1; 492 } 493 } 494 next += 1; 495 } 496 if (found < *id_length) { 497 // The loop above didn't identify the expected number of processors. 498 // We could always retry the operation, calling sysconf(_SC_NPROCESSORS_ONLN) 499 // and re-running the loop, above, but there's no guarantee of progress 500 // if the system configuration is in flux. Instead, we just return what 501 // we've got. Note that in the worst case find_processors_online() could 502 // return an empty set. (As a fall-back in the case of the empty set we 503 // could just return the ID of the current processor). 504 *id_length = found ; 505 } 506 507 return true; 508 } 509 510 static bool assign_distribution(processorid_t* id_array, 511 uint id_length, 512 uint* distribution, 513 uint distribution_length) { 514 // We assume we can assign processorid_t's to uint's. 515 assert(sizeof(processorid_t) == sizeof(uint), 516 "can't convert processorid_t to uint"); 517 // Quick check to see if we won't succeed. 518 if (id_length < distribution_length) { 519 return false; 520 } 521 // Assign processor ids to the distribution. 522 // Try to shuffle processors to distribute work across boards, 523 // assuming 4 processors per board. 524 const uint processors_per_board = ProcessDistributionStride; 525 // Find the maximum processor id. 526 processorid_t max_id = 0; 527 for (uint m = 0; m < id_length; m += 1) { 528 max_id = MAX2(max_id, id_array[m]); 529 } 530 // The next id, to limit loops. 531 const processorid_t limit_id = max_id + 1; 532 // Make up markers for available processors. 533 bool* available_id = NEW_C_HEAP_ARRAY(bool, limit_id, mtInternal); 534 for (uint c = 0; c < limit_id; c += 1) { 535 available_id[c] = false; 536 } 537 for (uint a = 0; a < id_length; a += 1) { 538 available_id[id_array[a]] = true; 539 } 540 // Step by "boards", then by "slot", copying to "assigned". 541 // NEEDS_CLEANUP: The assignment of processors should be stateful, 542 // remembering which processors have been assigned by 543 // previous calls, etc., so as to distribute several 544 // independent calls of this method. What we'd like is 545 // It would be nice to have an API that let us ask 546 // how many processes are bound to a processor, 547 // but we don't have that, either. 548 // In the short term, "board" is static so that 549 // subsequent distributions don't all start at board 0. 550 static uint board = 0; 551 uint assigned = 0; 552 // Until we've found enough processors .... 553 while (assigned < distribution_length) { 554 // ... find the next available processor in the board. 555 for (uint slot = 0; slot < processors_per_board; slot += 1) { 556 uint try_id = board * processors_per_board + slot; 557 if ((try_id < limit_id) && (available_id[try_id] == true)) { 558 distribution[assigned] = try_id; 559 available_id[try_id] = false; 560 assigned += 1; 561 break; 562 } 563 } 564 board += 1; 565 if (board * processors_per_board + 0 >= limit_id) { 566 board = 0; 567 } 568 } 569 if (available_id != NULL) { 570 FREE_C_HEAP_ARRAY(bool, available_id, mtInternal); 571 } 572 return true; 573 } 574 575 void os::set_native_thread_name(const char *name) { 576 // Not yet implemented. 577 return; 578 } 579 580 bool os::distribute_processes(uint length, uint* distribution) { 581 bool result = false; 582 // Find the processor id's of all the available CPUs. 583 processorid_t* id_array = NULL; 584 uint id_length = 0; 585 // There are some races between querying information and using it, 586 // since processor sets can change dynamically. 587 psetid_t pset = PS_NONE; 588 // Are we running in a processor set? 589 if ((pset_bind(PS_QUERY, P_PID, P_MYID, &pset) == 0) && pset != PS_NONE) { 590 result = find_processors_in_pset(pset, &id_array, &id_length); 591 } else { 592 result = find_processors_online(&id_array, &id_length); 593 } 594 if (result == true) { 595 if (id_length >= length) { 596 result = assign_distribution(id_array, id_length, distribution, length); 597 } else { 598 result = false; 599 } 600 } 601 if (id_array != NULL) { 602 FREE_C_HEAP_ARRAY(processorid_t, id_array, mtInternal); 603 } 604 return result; 605 } 606 607 bool os::bind_to_processor(uint processor_id) { 608 // We assume that a processorid_t can be stored in a uint. 609 assert(sizeof(uint) == sizeof(processorid_t), 610 "can't convert uint to processorid_t"); 611 int bind_result = 612 processor_bind(P_LWPID, // bind LWP. 613 P_MYID, // bind current LWP. 614 (processorid_t) processor_id, // id. 615 NULL); // don't return old binding. 616 return (bind_result == 0); 617 } 618 619 bool os::getenv(const char* name, char* buffer, int len) { 620 char* val = ::getenv( name ); 621 if ( val == NULL 622 || strlen(val) + 1 > len ) { 623 if (len > 0) buffer[0] = 0; // return a null string 624 return false; 625 } 626 strcpy( buffer, val ); 627 return true; 628 } 629 630 631 // Return true if user is running as root. 632 633 bool os::have_special_privileges() { 634 static bool init = false; 635 static bool privileges = false; 636 if (!init) { 637 privileges = (getuid() != geteuid()) || (getgid() != getegid()); 638 init = true; 639 } 640 return privileges; 641 } 642 643 644 void os::init_system_properties_values() { 645 char arch[12]; 646 sysinfo(SI_ARCHITECTURE, arch, sizeof(arch)); 647 648 // The next steps are taken in the product version: 649 // 650 // Obtain the JAVA_HOME value from the location of libjvm.so. 651 // This library should be located at: 652 // <JAVA_HOME>/jre/lib/<arch>/{client|server}/libjvm.so. 653 // 654 // If "/jre/lib/" appears at the right place in the path, then we 655 // assume libjvm.so is installed in a JDK and we use this path. 656 // 657 // Otherwise exit with message: "Could not create the Java virtual machine." 658 // 659 // The following extra steps are taken in the debugging version: 660 // 661 // If "/jre/lib/" does NOT appear at the right place in the path 662 // instead of exit check for $JAVA_HOME environment variable. 663 // 664 // If it is defined and we are able to locate $JAVA_HOME/jre/lib/<arch>, 665 // then we append a fake suffix "hotspot/libjvm.so" to this path so 666 // it looks like libjvm.so is installed there 667 // <JAVA_HOME>/jre/lib/<arch>/hotspot/libjvm.so. 668 // 669 // Otherwise exit. 670 // 671 // Important note: if the location of libjvm.so changes this 672 // code needs to be changed accordingly. 673 674 // The next few definitions allow the code to be verbatim: 675 #define malloc(n) (char*)NEW_C_HEAP_ARRAY(char, (n), mtInternal) 676 #define free(p) FREE_C_HEAP_ARRAY(char, p, mtInternal) 677 #define getenv(n) ::getenv(n) 678 679 #define EXTENSIONS_DIR "/lib/ext" 680 #define ENDORSED_DIR "/lib/endorsed" 681 #define COMMON_DIR "/usr/jdk/packages" 682 683 { 684 /* sysclasspath, java_home, dll_dir */ 685 { 686 char *home_path; 687 char *dll_path; 688 char *pslash; 689 char buf[MAXPATHLEN]; 690 os::jvm_path(buf, sizeof(buf)); 691 692 // Found the full path to libjvm.so. 693 // Now cut the path to <java_home>/jre if we can. 694 *(strrchr(buf, '/')) = '\0'; /* get rid of /libjvm.so */ 695 pslash = strrchr(buf, '/'); 696 if (pslash != NULL) 697 *pslash = '\0'; /* get rid of /{client|server|hotspot} */ 698 dll_path = malloc(strlen(buf) + 1); 699 if (dll_path == NULL) 700 return; 701 strcpy(dll_path, buf); 702 Arguments::set_dll_dir(dll_path); 703 704 if (pslash != NULL) { 705 pslash = strrchr(buf, '/'); 706 if (pslash != NULL) { 707 *pslash = '\0'; /* get rid of /<arch> */ 708 pslash = strrchr(buf, '/'); 709 if (pslash != NULL) 710 *pslash = '\0'; /* get rid of /lib */ 711 } 712 } 713 714 home_path = malloc(strlen(buf) + 1); 715 if (home_path == NULL) 716 return; 717 strcpy(home_path, buf); 718 Arguments::set_java_home(home_path); 719 720 if (!set_boot_path('/', ':')) 721 return; 722 } 723 724 /* 725 * Where to look for native libraries 726 */ 727 { 728 // Use dlinfo() to determine the correct java.library.path. 729 // 730 // If we're launched by the Java launcher, and the user 731 // does not set java.library.path explicitly on the commandline, 732 // the Java launcher sets LD_LIBRARY_PATH for us and unsets 733 // LD_LIBRARY_PATH_32 and LD_LIBRARY_PATH_64. In this case 734 // dlinfo returns LD_LIBRARY_PATH + crle settings (including 735 // /usr/lib), which is exactly what we want. 736 // 737 // If the user does set java.library.path, it completely 738 // overwrites this setting, and always has. 739 // 740 // If we're not launched by the Java launcher, we may 741 // get here with any/all of the LD_LIBRARY_PATH[_32|64] 742 // settings. Again, dlinfo does exactly what we want. 743 744 Dl_serinfo _info, *info = &_info; 745 Dl_serpath *path; 746 char* library_path; 747 char *common_path; 748 int i; 749 750 // determine search path count and required buffer size 751 if (dlinfo(RTLD_SELF, RTLD_DI_SERINFOSIZE, (void *)info) == -1) { 752 vm_exit_during_initialization("dlinfo SERINFOSIZE request", dlerror()); 753 } 754 755 // allocate new buffer and initialize 756 info = (Dl_serinfo*)malloc(_info.dls_size); 757 if (info == NULL) { 758 vm_exit_out_of_memory(_info.dls_size, OOM_MALLOC_ERROR, 759 "init_system_properties_values info"); 760 } 761 info->dls_size = _info.dls_size; 762 info->dls_cnt = _info.dls_cnt; 763 764 // obtain search path information 765 if (dlinfo(RTLD_SELF, RTLD_DI_SERINFO, (void *)info) == -1) { 766 free(info); 767 vm_exit_during_initialization("dlinfo SERINFO request", dlerror()); 768 } 769 770 path = &info->dls_serpath[0]; 771 772 // Note: Due to a legacy implementation, most of the library path 773 // is set in the launcher. This was to accomodate linking restrictions 774 // on legacy Solaris implementations (which are no longer supported). 775 // Eventually, all the library path setting will be done here. 776 // 777 // However, to prevent the proliferation of improperly built native 778 // libraries, the new path component /usr/jdk/packages is added here. 779 780 // Determine the actual CPU architecture. 781 char cpu_arch[12]; 782 sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch)); 783 #ifdef _LP64 784 // If we are a 64-bit vm, perform the following translations: 785 // sparc -> sparcv9 786 // i386 -> amd64 787 if (strcmp(cpu_arch, "sparc") == 0) 788 strcat(cpu_arch, "v9"); 789 else if (strcmp(cpu_arch, "i386") == 0) 790 strcpy(cpu_arch, "amd64"); 791 #endif 792 793 // Construct the invariant part of ld_library_path. Note that the 794 // space for the colon and the trailing null are provided by the 795 // nulls included by the sizeof operator. 796 size_t bufsize = sizeof(COMMON_DIR) + sizeof("/lib/") + strlen(cpu_arch); 797 common_path = malloc(bufsize); 798 if (common_path == NULL) { 799 free(info); 800 vm_exit_out_of_memory(bufsize, OOM_MALLOC_ERROR, 801 "init_system_properties_values common_path"); 802 } 803 sprintf(common_path, COMMON_DIR "/lib/%s", cpu_arch); 804 805 // struct size is more than sufficient for the path components obtained 806 // through the dlinfo() call, so only add additional space for the path 807 // components explicitly added here. 808 bufsize = info->dls_size + strlen(common_path); 809 library_path = malloc(bufsize); 810 if (library_path == NULL) { 811 free(info); 812 free(common_path); 813 vm_exit_out_of_memory(bufsize, OOM_MALLOC_ERROR, 814 "init_system_properties_values library_path"); 815 } 816 library_path[0] = '\0'; 817 818 // Construct the desired Java library path from the linker's library 819 // search path. 820 // 821 // For compatibility, it is optimal that we insert the additional path 822 // components specific to the Java VM after those components specified 823 // in LD_LIBRARY_PATH (if any) but before those added by the ld.so 824 // infrastructure. 825 if (info->dls_cnt == 0) { // Not sure this can happen, but allow for it 826 strcpy(library_path, common_path); 827 } else { 828 int inserted = 0; 829 for (i = 0; i < info->dls_cnt; i++, path++) { 830 uint_t flags = path->dls_flags & LA_SER_MASK; 831 if (((flags & LA_SER_LIBPATH) == 0) && !inserted) { 832 strcat(library_path, common_path); 833 strcat(library_path, os::path_separator()); 834 inserted = 1; 835 } 836 strcat(library_path, path->dls_name); 837 strcat(library_path, os::path_separator()); 838 } 839 // eliminate trailing path separator 840 library_path[strlen(library_path)-1] = '\0'; 841 } 842 843 // happens before argument parsing - can't use a trace flag 844 // tty->print_raw("init_system_properties_values: native lib path: "); 845 // tty->print_raw_cr(library_path); 846 847 // callee copies into its own buffer 848 Arguments::set_library_path(library_path); 849 850 free(common_path); 851 free(library_path); 852 free(info); 853 } 854 855 /* 856 * Extensions directories. 857 * 858 * Note that the space for the colon and the trailing null are provided 859 * by the nulls included by the sizeof operator (so actually one byte more 860 * than necessary is allocated). 861 */ 862 { 863 char *buf = (char *) malloc(strlen(Arguments::get_java_home()) + 864 sizeof(EXTENSIONS_DIR) + sizeof(COMMON_DIR) + 865 sizeof(EXTENSIONS_DIR)); 866 sprintf(buf, "%s" EXTENSIONS_DIR ":" COMMON_DIR EXTENSIONS_DIR, 867 Arguments::get_java_home()); 868 Arguments::set_ext_dirs(buf); 869 } 870 871 /* Endorsed standards default directory. */ 872 { 873 char * buf = malloc(strlen(Arguments::get_java_home()) + sizeof(ENDORSED_DIR)); 874 sprintf(buf, "%s" ENDORSED_DIR, Arguments::get_java_home()); 875 Arguments::set_endorsed_dirs(buf); 876 } 877 } 878 879 #undef malloc 880 #undef free 881 #undef getenv 882 #undef EXTENSIONS_DIR 883 #undef ENDORSED_DIR 884 #undef COMMON_DIR 885 886 } 887 888 void os::breakpoint() { 889 BREAKPOINT; 890 } 891 892 bool os::obsolete_option(const JavaVMOption *option) 893 { 894 if (!strncmp(option->optionString, "-Xt", 3)) { 895 return true; 896 } else if (!strncmp(option->optionString, "-Xtm", 4)) { 897 return true; 898 } else if (!strncmp(option->optionString, "-Xverifyheap", 12)) { 899 return true; 900 } else if (!strncmp(option->optionString, "-Xmaxjitcodesize", 16)) { 901 return true; 902 } 903 return false; 904 } 905 906 bool os::Solaris::valid_stack_address(Thread* thread, address sp) { 907 address stackStart = (address)thread->stack_base(); 908 address stackEnd = (address)(stackStart - (address)thread->stack_size()); 909 if (sp < stackStart && sp >= stackEnd ) return true; 910 return false; 911 } 912 913 extern "C" void breakpoint() { 914 // use debugger to set breakpoint here 915 } 916 917 static thread_t main_thread; 918 919 // Thread start routine for all new Java threads 920 extern "C" void* java_start(void* thread_addr) { 921 // Try to randomize the cache line index of hot stack frames. 922 // This helps when threads of the same stack traces evict each other's 923 // cache lines. The threads can be either from the same JVM instance, or 924 // from different JVM instances. The benefit is especially true for 925 // processors with hyperthreading technology. 926 static int counter = 0; 927 int pid = os::current_process_id(); 928 alloca(((pid ^ counter++) & 7) * 128); 929 930 int prio; 931 Thread* thread = (Thread*)thread_addr; 932 OSThread* osthr = thread->osthread(); 933 934 osthr->set_lwp_id( _lwp_self() ); // Store lwp in case we are bound 935 thread->_schedctl = (void *) schedctl_init () ; 936 937 if (UseNUMA) { 938 int lgrp_id = os::numa_get_group_id(); 939 if (lgrp_id != -1) { 940 thread->set_lgrp_id(lgrp_id); 941 } 942 } 943 944 // If the creator called set priority before we started, 945 // we need to call set_native_priority now that we have an lwp. 946 // We used to get the priority from thr_getprio (we called 947 // thr_setprio way back in create_thread) and pass it to 948 // set_native_priority, but Solaris scales the priority 949 // in java_to_os_priority, so when we read it back here, 950 // we pass trash to set_native_priority instead of what's 951 // in java_to_os_priority. So we save the native priority 952 // in the osThread and recall it here. 953 954 if ( osthr->thread_id() != -1 ) { 955 if ( UseThreadPriorities ) { 956 int prio = osthr->native_priority(); 957 if (ThreadPriorityVerbose) { 958 tty->print_cr("Starting Thread " INTPTR_FORMAT ", LWP is " 959 INTPTR_FORMAT ", setting priority: %d\n", 960 osthr->thread_id(), osthr->lwp_id(), prio); 961 } 962 os::set_native_priority(thread, prio); 963 } 964 } else if (ThreadPriorityVerbose) { 965 warning("Can't set priority in _start routine, thread id hasn't been set\n"); 966 } 967 968 assert(osthr->get_state() == RUNNABLE, "invalid os thread state"); 969 970 // initialize signal mask for this thread 971 os::Solaris::hotspot_sigmask(thread); 972 973 thread->run(); 974 975 // One less thread is executing 976 // When the VMThread gets here, the main thread may have already exited 977 // which frees the CodeHeap containing the Atomic::dec code 978 if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) { 979 Atomic::dec(&os::Solaris::_os_thread_count); 980 } 981 982 if (UseDetachedThreads) { 983 thr_exit(NULL); 984 ShouldNotReachHere(); 985 } 986 return NULL; 987 } 988 989 static OSThread* create_os_thread(Thread* thread, thread_t thread_id) { 990 // Allocate the OSThread object 991 OSThread* osthread = new OSThread(NULL, NULL); 992 if (osthread == NULL) return NULL; 993 994 // Store info on the Solaris thread into the OSThread 995 osthread->set_thread_id(thread_id); 996 osthread->set_lwp_id(_lwp_self()); 997 thread->_schedctl = (void *) schedctl_init () ; 998 999 if (UseNUMA) { 1000 int lgrp_id = os::numa_get_group_id(); 1001 if (lgrp_id != -1) { 1002 thread->set_lgrp_id(lgrp_id); 1003 } 1004 } 1005 1006 if ( ThreadPriorityVerbose ) { 1007 tty->print_cr("In create_os_thread, Thread " INTPTR_FORMAT ", LWP is " INTPTR_FORMAT "\n", 1008 osthread->thread_id(), osthread->lwp_id() ); 1009 } 1010 1011 // Initial thread state is INITIALIZED, not SUSPENDED 1012 osthread->set_state(INITIALIZED); 1013 1014 return osthread; 1015 } 1016 1017 void os::Solaris::hotspot_sigmask(Thread* thread) { 1018 1019 //Save caller's signal mask 1020 sigset_t sigmask; 1021 thr_sigsetmask(SIG_SETMASK, NULL, &sigmask); 1022 OSThread *osthread = thread->osthread(); 1023 osthread->set_caller_sigmask(sigmask); 1024 1025 thr_sigsetmask(SIG_UNBLOCK, os::Solaris::unblocked_signals(), NULL); 1026 if (!ReduceSignalUsage) { 1027 if (thread->is_VM_thread()) { 1028 // Only the VM thread handles BREAK_SIGNAL ... 1029 thr_sigsetmask(SIG_UNBLOCK, vm_signals(), NULL); 1030 } else { 1031 // ... all other threads block BREAK_SIGNAL 1032 assert(!sigismember(vm_signals(), SIGINT), "SIGINT should not be blocked"); 1033 thr_sigsetmask(SIG_BLOCK, vm_signals(), NULL); 1034 } 1035 } 1036 } 1037 1038 bool os::create_attached_thread(JavaThread* thread) { 1039 #ifdef ASSERT 1040 thread->verify_not_published(); 1041 #endif 1042 OSThread* osthread = create_os_thread(thread, thr_self()); 1043 if (osthread == NULL) { 1044 return false; 1045 } 1046 1047 // Initial thread state is RUNNABLE 1048 osthread->set_state(RUNNABLE); 1049 thread->set_osthread(osthread); 1050 1051 // initialize signal mask for this thread 1052 // and save the caller's signal mask 1053 os::Solaris::hotspot_sigmask(thread); 1054 1055 return true; 1056 } 1057 1058 bool os::create_main_thread(JavaThread* thread) { 1059 #ifdef ASSERT 1060 thread->verify_not_published(); 1061 #endif 1062 if (_starting_thread == NULL) { 1063 _starting_thread = create_os_thread(thread, main_thread); 1064 if (_starting_thread == NULL) { 1065 return false; 1066 } 1067 } 1068 1069 // The primodial thread is runnable from the start 1070 _starting_thread->set_state(RUNNABLE); 1071 1072 thread->set_osthread(_starting_thread); 1073 1074 // initialize signal mask for this thread 1075 // and save the caller's signal mask 1076 os::Solaris::hotspot_sigmask(thread); 1077 1078 return true; 1079 } 1080 1081 // _T2_libthread is true if we believe we are running with the newer 1082 // SunSoft lwp/libthread.so (2.8 patch, 2.9 default) 1083 bool os::Solaris::_T2_libthread = false; 1084 1085 bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) { 1086 // Allocate the OSThread object 1087 OSThread* osthread = new OSThread(NULL, NULL); 1088 if (osthread == NULL) { 1089 return false; 1090 } 1091 1092 if ( ThreadPriorityVerbose ) { 1093 char *thrtyp; 1094 switch ( thr_type ) { 1095 case vm_thread: 1096 thrtyp = (char *)"vm"; 1097 break; 1098 case cgc_thread: 1099 thrtyp = (char *)"cgc"; 1100 break; 1101 case pgc_thread: 1102 thrtyp = (char *)"pgc"; 1103 break; 1104 case java_thread: 1105 thrtyp = (char *)"java"; 1106 break; 1107 case compiler_thread: 1108 thrtyp = (char *)"compiler"; 1109 break; 1110 case watcher_thread: 1111 thrtyp = (char *)"watcher"; 1112 break; 1113 default: 1114 thrtyp = (char *)"unknown"; 1115 break; 1116 } 1117 tty->print_cr("In create_thread, creating a %s thread\n", thrtyp); 1118 } 1119 1120 // Calculate stack size if it's not specified by caller. 1121 if (stack_size == 0) { 1122 // The default stack size 1M (2M for LP64). 1123 stack_size = (BytesPerWord >> 2) * K * K; 1124 1125 switch (thr_type) { 1126 case os::java_thread: 1127 // Java threads use ThreadStackSize which default value can be changed with the flag -Xss 1128 if (JavaThread::stack_size_at_create() > 0) stack_size = JavaThread::stack_size_at_create(); 1129 break; 1130 case os::compiler_thread: 1131 if (CompilerThreadStackSize > 0) { 1132 stack_size = (size_t)(CompilerThreadStackSize * K); 1133 break; 1134 } // else fall through: 1135 // use VMThreadStackSize if CompilerThreadStackSize is not defined 1136 case os::vm_thread: 1137 case os::pgc_thread: 1138 case os::cgc_thread: 1139 case os::watcher_thread: 1140 if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K); 1141 break; 1142 } 1143 } 1144 stack_size = MAX2(stack_size, os::Solaris::min_stack_allowed); 1145 1146 // Initial state is ALLOCATED but not INITIALIZED 1147 osthread->set_state(ALLOCATED); 1148 1149 if (os::Solaris::_os_thread_count > os::Solaris::_os_thread_limit) { 1150 // We got lots of threads. Check if we still have some address space left. 1151 // Need to be at least 5Mb of unreserved address space. We do check by 1152 // trying to reserve some. 1153 const size_t VirtualMemoryBangSize = 20*K*K; 1154 char* mem = os::reserve_memory(VirtualMemoryBangSize); 1155 if (mem == NULL) { 1156 delete osthread; 1157 return false; 1158 } else { 1159 // Release the memory again 1160 os::release_memory(mem, VirtualMemoryBangSize); 1161 } 1162 } 1163 1164 // Setup osthread because the child thread may need it. 1165 thread->set_osthread(osthread); 1166 1167 // Create the Solaris thread 1168 // explicit THR_BOUND for T2_libthread case in case 1169 // that assumption is not accurate, but our alternate signal stack 1170 // handling is based on it which must have bound threads 1171 thread_t tid = 0; 1172 long flags = (UseDetachedThreads ? THR_DETACHED : 0) | THR_SUSPENDED 1173 | ((UseBoundThreads || os::Solaris::T2_libthread() || 1174 (thr_type == vm_thread) || 1175 (thr_type == cgc_thread) || 1176 (thr_type == pgc_thread) || 1177 (thr_type == compiler_thread && BackgroundCompilation)) ? 1178 THR_BOUND : 0); 1179 int status; 1180 1181 // 4376845 -- libthread/kernel don't provide enough LWPs to utilize all CPUs. 1182 // 1183 // On multiprocessors systems, libthread sometimes under-provisions our 1184 // process with LWPs. On a 30-way systems, for instance, we could have 1185 // 50 user-level threads in ready state and only 2 or 3 LWPs assigned 1186 // to our process. This can result in under utilization of PEs. 1187 // I suspect the problem is related to libthread's LWP 1188 // pool management and to the kernel's SIGBLOCKING "last LWP parked" 1189 // upcall policy. 1190 // 1191 // The following code is palliative -- it attempts to ensure that our 1192 // process has sufficient LWPs to take advantage of multiple PEs. 1193 // Proper long-term cures include using user-level threads bound to LWPs 1194 // (THR_BOUND) or using LWP-based synchronization. Note that there is a 1195 // slight timing window with respect to sampling _os_thread_count, but 1196 // the race is benign. Also, we should periodically recompute 1197 // _processors_online as the min of SC_NPROCESSORS_ONLN and the 1198 // the number of PEs in our partition. You might be tempted to use 1199 // THR_NEW_LWP here, but I'd recommend against it as that could 1200 // result in undesirable growth of the libthread's LWP pool. 1201 // The fix below isn't sufficient; for instance, it doesn't take into count 1202 // LWPs parked on IO. It does, however, help certain CPU-bound benchmarks. 1203 // 1204 // Some pathologies this scheme doesn't handle: 1205 // * Threads can block, releasing the LWPs. The LWPs can age out. 1206 // When a large number of threads become ready again there aren't 1207 // enough LWPs available to service them. This can occur when the 1208 // number of ready threads oscillates. 1209 // * LWPs/Threads park on IO, thus taking the LWP out of circulation. 1210 // 1211 // Finally, we should call thr_setconcurrency() periodically to refresh 1212 // the LWP pool and thwart the LWP age-out mechanism. 1213 // The "+3" term provides a little slop -- we want to slightly overprovision. 1214 1215 if (AdjustConcurrency && os::Solaris::_os_thread_count < (_processors_online+3)) { 1216 if (!(flags & THR_BOUND)) { 1217 thr_setconcurrency (os::Solaris::_os_thread_count); // avoid starvation 1218 } 1219 } 1220 // Although this doesn't hurt, we should warn of undefined behavior 1221 // when using unbound T1 threads with schedctl(). This should never 1222 // happen, as the compiler and VM threads are always created bound 1223 DEBUG_ONLY( 1224 if ((VMThreadHintNoPreempt || CompilerThreadHintNoPreempt) && 1225 (!os::Solaris::T2_libthread() && (!(flags & THR_BOUND))) && 1226 ((thr_type == vm_thread) || (thr_type == cgc_thread) || 1227 (thr_type == pgc_thread) || (thr_type == compiler_thread && BackgroundCompilation))) { 1228 warning("schedctl behavior undefined when Compiler/VM/GC Threads are Unbound"); 1229 } 1230 ); 1231 1232 1233 // Mark that we don't have an lwp or thread id yet. 1234 // In case we attempt to set the priority before the thread starts. 1235 osthread->set_lwp_id(-1); 1236 osthread->set_thread_id(-1); 1237 1238 status = thr_create(NULL, stack_size, java_start, thread, flags, &tid); 1239 if (status != 0) { 1240 if (PrintMiscellaneous && (Verbose || WizardMode)) { 1241 perror("os::create_thread"); 1242 } 1243 thread->set_osthread(NULL); 1244 // Need to clean up stuff we've allocated so far 1245 delete osthread; 1246 return false; 1247 } 1248 1249 Atomic::inc(&os::Solaris::_os_thread_count); 1250 1251 // Store info on the Solaris thread into the OSThread 1252 osthread->set_thread_id(tid); 1253 1254 // Remember that we created this thread so we can set priority on it 1255 osthread->set_vm_created(); 1256 1257 // Set the default thread priority. If using bound threads, setting 1258 // lwp priority will be delayed until thread start. 1259 set_native_priority(thread, 1260 DefaultThreadPriority == -1 ? 1261 java_to_os_priority[NormPriority] : 1262 DefaultThreadPriority); 1263 1264 // Initial thread state is INITIALIZED, not SUSPENDED 1265 osthread->set_state(INITIALIZED); 1266 1267 // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain 1268 return true; 1269 } 1270 1271 /* defined for >= Solaris 10. This allows builds on earlier versions 1272 * of Solaris to take advantage of the newly reserved Solaris JVM signals 1273 * With SIGJVM1, SIGJVM2, INTERRUPT_SIGNAL is SIGJVM1, ASYNC_SIGNAL is SIGJVM2 1274 * and -XX:+UseAltSigs does nothing since these should have no conflict 1275 */ 1276 #if !defined(SIGJVM1) 1277 #define SIGJVM1 39 1278 #define SIGJVM2 40 1279 #endif 1280 1281 debug_only(static bool signal_sets_initialized = false); 1282 static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs; 1283 int os::Solaris::_SIGinterrupt = INTERRUPT_SIGNAL; 1284 int os::Solaris::_SIGasync = ASYNC_SIGNAL; 1285 1286 bool os::Solaris::is_sig_ignored(int sig) { 1287 struct sigaction oact; 1288 sigaction(sig, (struct sigaction*)NULL, &oact); 1289 void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*, oact.sa_sigaction) 1290 : CAST_FROM_FN_PTR(void*, oact.sa_handler); 1291 if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN)) 1292 return true; 1293 else 1294 return false; 1295 } 1296 1297 // Note: SIGRTMIN is a macro that calls sysconf() so it will 1298 // dynamically detect SIGRTMIN value for the system at runtime, not buildtime 1299 static bool isJVM1available() { 1300 return SIGJVM1 < SIGRTMIN; 1301 } 1302 1303 void os::Solaris::signal_sets_init() { 1304 // Should also have an assertion stating we are still single-threaded. 1305 assert(!signal_sets_initialized, "Already initialized"); 1306 // Fill in signals that are necessarily unblocked for all threads in 1307 // the VM. Currently, we unblock the following signals: 1308 // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden 1309 // by -Xrs (=ReduceSignalUsage)); 1310 // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all 1311 // other threads. The "ReduceSignalUsage" boolean tells us not to alter 1312 // the dispositions or masks wrt these signals. 1313 // Programs embedding the VM that want to use the above signals for their 1314 // own purposes must, at this time, use the "-Xrs" option to prevent 1315 // interference with shutdown hooks and BREAK_SIGNAL thread dumping. 1316 // (See bug 4345157, and other related bugs). 1317 // In reality, though, unblocking these signals is really a nop, since 1318 // these signals are not blocked by default. 1319 sigemptyset(&unblocked_sigs); 1320 sigemptyset(&allowdebug_blocked_sigs); 1321 sigaddset(&unblocked_sigs, SIGILL); 1322 sigaddset(&unblocked_sigs, SIGSEGV); 1323 sigaddset(&unblocked_sigs, SIGBUS); 1324 sigaddset(&unblocked_sigs, SIGFPE); 1325 1326 if (isJVM1available) { 1327 os::Solaris::set_SIGinterrupt(SIGJVM1); 1328 os::Solaris::set_SIGasync(SIGJVM2); 1329 } else if (UseAltSigs) { 1330 os::Solaris::set_SIGinterrupt(ALT_INTERRUPT_SIGNAL); 1331 os::Solaris::set_SIGasync(ALT_ASYNC_SIGNAL); 1332 } else { 1333 os::Solaris::set_SIGinterrupt(INTERRUPT_SIGNAL); 1334 os::Solaris::set_SIGasync(ASYNC_SIGNAL); 1335 } 1336 1337 sigaddset(&unblocked_sigs, os::Solaris::SIGinterrupt()); 1338 sigaddset(&unblocked_sigs, os::Solaris::SIGasync()); 1339 1340 if (!ReduceSignalUsage) { 1341 if (!os::Solaris::is_sig_ignored(SHUTDOWN1_SIGNAL)) { 1342 sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL); 1343 sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL); 1344 } 1345 if (!os::Solaris::is_sig_ignored(SHUTDOWN2_SIGNAL)) { 1346 sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL); 1347 sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL); 1348 } 1349 if (!os::Solaris::is_sig_ignored(SHUTDOWN3_SIGNAL)) { 1350 sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL); 1351 sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL); 1352 } 1353 } 1354 // Fill in signals that are blocked by all but the VM thread. 1355 sigemptyset(&vm_sigs); 1356 if (!ReduceSignalUsage) 1357 sigaddset(&vm_sigs, BREAK_SIGNAL); 1358 debug_only(signal_sets_initialized = true); 1359 1360 // For diagnostics only used in run_periodic_checks 1361 sigemptyset(&check_signal_done); 1362 } 1363 1364 // These are signals that are unblocked while a thread is running Java. 1365 // (For some reason, they get blocked by default.) 1366 sigset_t* os::Solaris::unblocked_signals() { 1367 assert(signal_sets_initialized, "Not initialized"); 1368 return &unblocked_sigs; 1369 } 1370 1371 // These are the signals that are blocked while a (non-VM) thread is 1372 // running Java. Only the VM thread handles these signals. 1373 sigset_t* os::Solaris::vm_signals() { 1374 assert(signal_sets_initialized, "Not initialized"); 1375 return &vm_sigs; 1376 } 1377 1378 // These are signals that are blocked during cond_wait to allow debugger in 1379 sigset_t* os::Solaris::allowdebug_blocked_signals() { 1380 assert(signal_sets_initialized, "Not initialized"); 1381 return &allowdebug_blocked_sigs; 1382 } 1383 1384 1385 void _handle_uncaught_cxx_exception() { 1386 VMError err("An uncaught C++ exception"); 1387 err.report_and_die(); 1388 } 1389 1390 1391 // First crack at OS-specific initialization, from inside the new thread. 1392 void os::initialize_thread(Thread* thr) { 1393 int r = thr_main() ; 1394 guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ; 1395 if (r) { 1396 JavaThread* jt = (JavaThread *)thr; 1397 assert(jt != NULL,"Sanity check"); 1398 size_t stack_size; 1399 address base = jt->stack_base(); 1400 if (Arguments::created_by_java_launcher()) { 1401 // Use 2MB to allow for Solaris 7 64 bit mode. 1402 stack_size = JavaThread::stack_size_at_create() == 0 1403 ? 2048*K : JavaThread::stack_size_at_create(); 1404 1405 // There are rare cases when we may have already used more than 1406 // the basic stack size allotment before this method is invoked. 1407 // Attempt to allow for a normally sized java_stack. 1408 size_t current_stack_offset = (size_t)(base - (address)&stack_size); 1409 stack_size += ReservedSpace::page_align_size_down(current_stack_offset); 1410 } else { 1411 // 6269555: If we were not created by a Java launcher, i.e. if we are 1412 // running embedded in a native application, treat the primordial thread 1413 // as much like a native attached thread as possible. This means using 1414 // the current stack size from thr_stksegment(), unless it is too large 1415 // to reliably setup guard pages. A reasonable max size is 8MB. 1416 size_t current_size = current_stack_size(); 1417 // This should never happen, but just in case.... 1418 if (current_size == 0) current_size = 2 * K * K; 1419 stack_size = current_size > (8 * K * K) ? (8 * K * K) : current_size; 1420 } 1421 address bottom = (address)align_size_up((intptr_t)(base - stack_size), os::vm_page_size());; 1422 stack_size = (size_t)(base - bottom); 1423 1424 assert(stack_size > 0, "Stack size calculation problem"); 1425 1426 if (stack_size > jt->stack_size()) { 1427 NOT_PRODUCT( 1428 struct rlimit limits; 1429 getrlimit(RLIMIT_STACK, &limits); 1430 size_t size = adjust_stack_size(base, (size_t)limits.rlim_cur); 1431 assert(size >= jt->stack_size(), "Stack size problem in main thread"); 1432 ) 1433 tty->print_cr( 1434 "Stack size of %d Kb exceeds current limit of %d Kb.\n" 1435 "(Stack sizes are rounded up to a multiple of the system page size.)\n" 1436 "See limit(1) to increase the stack size limit.", 1437 stack_size / K, jt->stack_size() / K); 1438 vm_exit(1); 1439 } 1440 assert(jt->stack_size() >= stack_size, 1441 "Attempt to map more stack than was allocated"); 1442 jt->set_stack_size(stack_size); 1443 } 1444 1445 // 5/22/01: Right now alternate signal stacks do not handle 1446 // throwing stack overflow exceptions, see bug 4463178 1447 // Until a fix is found for this, T2 will NOT imply alternate signal 1448 // stacks. 1449 // If using T2 libthread threads, install an alternate signal stack. 1450 // Because alternate stacks associate with LWPs on Solaris, 1451 // see sigaltstack(2), if using UNBOUND threads, or if UseBoundThreads 1452 // we prefer to explicitly stack bang. 1453 // If not using T2 libthread, but using UseBoundThreads any threads 1454 // (primordial thread, jni_attachCurrentThread) we do not create, 1455 // probably are not bound, therefore they can not have an alternate 1456 // signal stack. Since our stack banging code is generated and 1457 // is shared across threads, all threads must be bound to allow 1458 // using alternate signal stacks. The alternative is to interpose 1459 // on _lwp_create to associate an alt sig stack with each LWP, 1460 // and this could be a problem when the JVM is embedded. 1461 // We would prefer to use alternate signal stacks with T2 1462 // Since there is currently no accurate way to detect T2 1463 // we do not. Assuming T2 when running T1 causes sig 11s or assertions 1464 // on installing alternate signal stacks 1465 1466 1467 // 05/09/03: removed alternate signal stack support for Solaris 1468 // The alternate signal stack mechanism is no longer needed to 1469 // handle stack overflow. This is now handled by allocating 1470 // guard pages (red zone) and stackbanging. 1471 // Initially the alternate signal stack mechanism was removed because 1472 // it did not work with T1 llibthread. Alternate 1473 // signal stacks MUST have all threads bound to lwps. Applications 1474 // can create their own threads and attach them without their being 1475 // bound under T1. This is frequently the case for the primordial thread. 1476 // If we were ever to reenable this mechanism we would need to 1477 // use the dynamic check for T2 libthread. 1478 1479 os::Solaris::init_thread_fpu_state(); 1480 std::set_terminate(_handle_uncaught_cxx_exception); 1481 } 1482 1483 1484 1485 // Free Solaris resources related to the OSThread 1486 void os::free_thread(OSThread* osthread) { 1487 assert(osthread != NULL, "os::free_thread but osthread not set"); 1488 1489 1490 // We are told to free resources of the argument thread, 1491 // but we can only really operate on the current thread. 1492 // The main thread must take the VMThread down synchronously 1493 // before the main thread exits and frees up CodeHeap 1494 guarantee((Thread::current()->osthread() == osthread 1495 || (osthread == VMThread::vm_thread()->osthread())), "os::free_thread but not current thread"); 1496 if (Thread::current()->osthread() == osthread) { 1497 // Restore caller's signal mask 1498 sigset_t sigmask = osthread->caller_sigmask(); 1499 thr_sigsetmask(SIG_SETMASK, &sigmask, NULL); 1500 } 1501 delete osthread; 1502 } 1503 1504 void os::pd_start_thread(Thread* thread) { 1505 int status = thr_continue(thread->osthread()->thread_id()); 1506 assert_status(status == 0, status, "thr_continue failed"); 1507 } 1508 1509 1510 intx os::current_thread_id() { 1511 return (intx)thr_self(); 1512 } 1513 1514 static pid_t _initial_pid = 0; 1515 1516 int os::current_process_id() { 1517 return (int)(_initial_pid ? _initial_pid : getpid()); 1518 } 1519 1520 int os::allocate_thread_local_storage() { 1521 // %%% in Win32 this allocates a memory segment pointed to by a 1522 // register. Dan Stein can implement a similar feature in 1523 // Solaris. Alternatively, the VM can do the same thing 1524 // explicitly: malloc some storage and keep the pointer in a 1525 // register (which is part of the thread's context) (or keep it 1526 // in TLS). 1527 // %%% In current versions of Solaris, thr_self and TSD can 1528 // be accessed via short sequences of displaced indirections. 1529 // The value of thr_self is available as %g7(36). 1530 // The value of thr_getspecific(k) is stored in %g7(12)(4)(k*4-4), 1531 // assuming that the current thread already has a value bound to k. 1532 // It may be worth experimenting with such access patterns, 1533 // and later having the parameters formally exported from a Solaris 1534 // interface. I think, however, that it will be faster to 1535 // maintain the invariant that %g2 always contains the 1536 // JavaThread in Java code, and have stubs simply 1537 // treat %g2 as a caller-save register, preserving it in a %lN. 1538 thread_key_t tk; 1539 if (thr_keycreate( &tk, NULL ) ) 1540 fatal(err_msg("os::allocate_thread_local_storage: thr_keycreate failed " 1541 "(%s)", strerror(errno))); 1542 return int(tk); 1543 } 1544 1545 void os::free_thread_local_storage(int index) { 1546 // %%% don't think we need anything here 1547 // if ( pthread_key_delete((pthread_key_t) tk) ) 1548 // fatal("os::free_thread_local_storage: pthread_key_delete failed"); 1549 } 1550 1551 #define SMALLINT 32 // libthread allocate for tsd_common is a version specific 1552 // small number - point is NO swap space available 1553 void os::thread_local_storage_at_put(int index, void* value) { 1554 // %%% this is used only in threadLocalStorage.cpp 1555 if (thr_setspecific((thread_key_t)index, value)) { 1556 if (errno == ENOMEM) { 1557 vm_exit_out_of_memory(SMALLINT, OOM_MALLOC_ERROR, 1558 "thr_setspecific: out of swap space"); 1559 } else { 1560 fatal(err_msg("os::thread_local_storage_at_put: thr_setspecific failed " 1561 "(%s)", strerror(errno))); 1562 } 1563 } else { 1564 ThreadLocalStorage::set_thread_in_slot ((Thread *) value) ; 1565 } 1566 } 1567 1568 // This function could be called before TLS is initialized, for example, when 1569 // VM receives an async signal or when VM causes a fatal error during 1570 // initialization. Return NULL if thr_getspecific() fails. 1571 void* os::thread_local_storage_at(int index) { 1572 // %%% this is used only in threadLocalStorage.cpp 1573 void* r = NULL; 1574 return thr_getspecific((thread_key_t)index, &r) != 0 ? NULL : r; 1575 } 1576 1577 1578 // gethrtime can move backwards if read from one cpu and then a different cpu 1579 // getTimeNanos is guaranteed to not move backward on Solaris 1580 // local spinloop created as faster for a CAS on an int than 1581 // a CAS on a 64bit jlong. Also Atomic::cmpxchg for jlong is not 1582 // supported on sparc v8 or pre supports_cx8 intel boxes. 1583 // oldgetTimeNanos for systems which do not support CAS on 64bit jlong 1584 // i.e. sparc v8 and pre supports_cx8 (i486) intel boxes 1585 inline hrtime_t oldgetTimeNanos() { 1586 int gotlock = LOCK_INVALID; 1587 hrtime_t newtime = gethrtime(); 1588 1589 for (;;) { 1590 // grab lock for max_hrtime 1591 int curlock = max_hrtime_lock; 1592 if (curlock & LOCK_BUSY) continue; 1593 if (gotlock = Atomic::cmpxchg(LOCK_BUSY, &max_hrtime_lock, LOCK_FREE) != LOCK_FREE) continue; 1594 if (newtime > max_hrtime) { 1595 max_hrtime = newtime; 1596 } else { 1597 newtime = max_hrtime; 1598 } 1599 // release lock 1600 max_hrtime_lock = LOCK_FREE; 1601 return newtime; 1602 } 1603 } 1604 // gethrtime can move backwards if read from one cpu and then a different cpu 1605 // getTimeNanos is guaranteed to not move backward on Solaris 1606 inline hrtime_t getTimeNanos() { 1607 if (VM_Version::supports_cx8()) { 1608 const hrtime_t now = gethrtime(); 1609 // Use atomic long load since 32-bit x86 uses 2 registers to keep long. 1610 const hrtime_t prev = Atomic::load((volatile jlong*)&max_hrtime); 1611 if (now <= prev) return prev; // same or retrograde time; 1612 const hrtime_t obsv = Atomic::cmpxchg(now, (volatile jlong*)&max_hrtime, prev); 1613 assert(obsv >= prev, "invariant"); // Monotonicity 1614 // If the CAS succeeded then we're done and return "now". 1615 // If the CAS failed and the observed value "obs" is >= now then 1616 // we should return "obs". If the CAS failed and now > obs > prv then 1617 // some other thread raced this thread and installed a new value, in which case 1618 // we could either (a) retry the entire operation, (b) retry trying to install now 1619 // or (c) just return obs. We use (c). No loop is required although in some cases 1620 // we might discard a higher "now" value in deference to a slightly lower but freshly 1621 // installed obs value. That's entirely benign -- it admits no new orderings compared 1622 // to (a) or (b) -- and greatly reduces coherence traffic. 1623 // We might also condition (c) on the magnitude of the delta between obs and now. 1624 // Avoiding excessive CAS operations to hot RW locations is critical. 1625 // See http://blogs.sun.com/dave/entry/cas_and_cache_trivia_invalidate 1626 return (prev == obsv) ? now : obsv ; 1627 } else { 1628 return oldgetTimeNanos(); 1629 } 1630 } 1631 1632 // Time since start-up in seconds to a fine granularity. 1633 // Used by VMSelfDestructTimer and the MemProfiler. 1634 double os::elapsedTime() { 1635 return (double)(getTimeNanos() - first_hrtime) / (double)hrtime_hz; 1636 } 1637 1638 jlong os::elapsed_counter() { 1639 return (jlong)(getTimeNanos() - first_hrtime); 1640 } 1641 1642 jlong os::elapsed_frequency() { 1643 return hrtime_hz; 1644 } 1645 1646 // Return the real, user, and system times in seconds from an 1647 // arbitrary fixed point in the past. 1648 bool os::getTimesSecs(double* process_real_time, 1649 double* process_user_time, 1650 double* process_system_time) { 1651 struct tms ticks; 1652 clock_t real_ticks = times(&ticks); 1653 1654 if (real_ticks == (clock_t) (-1)) { 1655 return false; 1656 } else { 1657 double ticks_per_second = (double) clock_tics_per_sec; 1658 *process_user_time = ((double) ticks.tms_utime) / ticks_per_second; 1659 *process_system_time = ((double) ticks.tms_stime) / ticks_per_second; 1660 // For consistency return the real time from getTimeNanos() 1661 // converted to seconds. 1662 *process_real_time = ((double) getTimeNanos()) / ((double) NANOUNITS); 1663 1664 return true; 1665 } 1666 } 1667 1668 bool os::supports_vtime() { return true; } 1669 1670 bool os::enable_vtime() { 1671 int fd = ::open("/proc/self/ctl", O_WRONLY); 1672 if (fd == -1) 1673 return false; 1674 1675 long cmd[] = { PCSET, PR_MSACCT }; 1676 int res = ::write(fd, cmd, sizeof(long) * 2); 1677 ::close(fd); 1678 if (res != sizeof(long) * 2) 1679 return false; 1680 1681 return true; 1682 } 1683 1684 bool os::vtime_enabled() { 1685 int fd = ::open("/proc/self/status", O_RDONLY); 1686 if (fd == -1) 1687 return false; 1688 1689 pstatus_t status; 1690 int res = os::read(fd, (void*) &status, sizeof(pstatus_t)); 1691 ::close(fd); 1692 if (res != sizeof(pstatus_t)) 1693 return false; 1694 1695 return status.pr_flags & PR_MSACCT; 1696 } 1697 1698 double os::elapsedVTime() { 1699 return (double)gethrvtime() / (double)hrtime_hz; 1700 } 1701 1702 // Used internally for comparisons only 1703 // getTimeMillis guaranteed to not move backwards on Solaris 1704 jlong getTimeMillis() { 1705 jlong nanotime = getTimeNanos(); 1706 return (jlong)(nanotime / NANOSECS_PER_MILLISEC); 1707 } 1708 1709 // Must return millis since Jan 1 1970 for JVM_CurrentTimeMillis 1710 jlong os::javaTimeMillis() { 1711 timeval t; 1712 if (gettimeofday( &t, NULL) == -1) 1713 fatal(err_msg("os::javaTimeMillis: gettimeofday (%s)", strerror(errno))); 1714 return jlong(t.tv_sec) * 1000 + jlong(t.tv_usec) / 1000; 1715 } 1716 1717 jlong os::javaTimeNanos() { 1718 return (jlong)getTimeNanos(); 1719 } 1720 1721 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) { 1722 info_ptr->max_value = ALL_64_BITS; // gethrtime() uses all 64 bits 1723 info_ptr->may_skip_backward = false; // not subject to resetting or drifting 1724 info_ptr->may_skip_forward = false; // not subject to resetting or drifting 1725 info_ptr->kind = JVMTI_TIMER_ELAPSED; // elapsed not CPU time 1726 } 1727 1728 char * os::local_time_string(char *buf, size_t buflen) { 1729 struct tm t; 1730 time_t long_time; 1731 time(&long_time); 1732 localtime_r(&long_time, &t); 1733 jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d", 1734 t.tm_year + 1900, t.tm_mon + 1, t.tm_mday, 1735 t.tm_hour, t.tm_min, t.tm_sec); 1736 return buf; 1737 } 1738 1739 // Note: os::shutdown() might be called very early during initialization, or 1740 // called from signal handler. Before adding something to os::shutdown(), make 1741 // sure it is async-safe and can handle partially initialized VM. 1742 void os::shutdown() { 1743 1744 // allow PerfMemory to attempt cleanup of any persistent resources 1745 perfMemory_exit(); 1746 1747 // needs to remove object in file system 1748 AttachListener::abort(); 1749 1750 // flush buffered output, finish log files 1751 ostream_abort(); 1752 1753 // Check for abort hook 1754 abort_hook_t abort_hook = Arguments::abort_hook(); 1755 if (abort_hook != NULL) { 1756 abort_hook(); 1757 } 1758 } 1759 1760 // Note: os::abort() might be called very early during initialization, or 1761 // called from signal handler. Before adding something to os::abort(), make 1762 // sure it is async-safe and can handle partially initialized VM. 1763 void os::abort(bool dump_core) { 1764 os::shutdown(); 1765 if (dump_core) { 1766 #ifndef PRODUCT 1767 fdStream out(defaultStream::output_fd()); 1768 out.print_raw("Current thread is "); 1769 char buf[16]; 1770 jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id()); 1771 out.print_raw_cr(buf); 1772 out.print_raw_cr("Dumping core ..."); 1773 #endif 1774 ::abort(); // dump core (for debugging) 1775 } 1776 1777 ::exit(1); 1778 } 1779 1780 // Die immediately, no exit hook, no abort hook, no cleanup. 1781 void os::die() { 1782 ::abort(); // dump core (for debugging) 1783 } 1784 1785 // unused 1786 void os::set_error_file(const char *logfile) {} 1787 1788 // DLL functions 1789 1790 const char* os::dll_file_extension() { return ".so"; } 1791 1792 // This must be hard coded because it's the system's temporary 1793 // directory not the java application's temp directory, ala java.io.tmpdir. 1794 const char* os::get_temp_directory() { return "/tmp"; } 1795 1796 static bool file_exists(const char* filename) { 1797 struct stat statbuf; 1798 if (filename == NULL || strlen(filename) == 0) { 1799 return false; 1800 } 1801 return os::stat(filename, &statbuf) == 0; 1802 } 1803 1804 bool os::dll_build_name(char* buffer, size_t buflen, 1805 const char* pname, const char* fname) { 1806 bool retval = false; 1807 const size_t pnamelen = pname ? strlen(pname) : 0; 1808 1809 // Return error on buffer overflow. 1810 if (pnamelen + strlen(fname) + 10 > (size_t) buflen) { 1811 return retval; 1812 } 1813 1814 if (pnamelen == 0) { 1815 snprintf(buffer, buflen, "lib%s.so", fname); 1816 retval = true; 1817 } else if (strchr(pname, *os::path_separator()) != NULL) { 1818 int n; 1819 char** pelements = split_path(pname, &n); 1820 if (pelements == NULL) { 1821 return false; 1822 } 1823 for (int i = 0 ; i < n ; i++) { 1824 // really shouldn't be NULL but what the heck, check can't hurt 1825 if (pelements[i] == NULL || strlen(pelements[i]) == 0) { 1826 continue; // skip the empty path values 1827 } 1828 snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname); 1829 if (file_exists(buffer)) { 1830 retval = true; 1831 break; 1832 } 1833 } 1834 // release the storage 1835 for (int i = 0 ; i < n ; i++) { 1836 if (pelements[i] != NULL) { 1837 FREE_C_HEAP_ARRAY(char, pelements[i], mtInternal); 1838 } 1839 } 1840 if (pelements != NULL) { 1841 FREE_C_HEAP_ARRAY(char*, pelements, mtInternal); 1842 } 1843 } else { 1844 snprintf(buffer, buflen, "%s/lib%s.so", pname, fname); 1845 retval = true; 1846 } 1847 return retval; 1848 } 1849 1850 // check if addr is inside libjvm.so 1851 bool os::address_is_in_vm(address addr) { 1852 static address libjvm_base_addr; 1853 Dl_info dlinfo; 1854 1855 if (libjvm_base_addr == NULL) { 1856 if (dladdr(CAST_FROM_FN_PTR(void *, os::address_is_in_vm), &dlinfo) != 0) { 1857 libjvm_base_addr = (address)dlinfo.dli_fbase; 1858 } 1859 assert(libjvm_base_addr !=NULL, "Cannot obtain base address for libjvm"); 1860 } 1861 1862 if (dladdr((void *)addr, &dlinfo) != 0) { 1863 if (libjvm_base_addr == (address)dlinfo.dli_fbase) return true; 1864 } 1865 1866 return false; 1867 } 1868 1869 typedef int (*dladdr1_func_type) (void *, Dl_info *, void **, int); 1870 static dladdr1_func_type dladdr1_func = NULL; 1871 1872 bool os::dll_address_to_function_name(address addr, char *buf, 1873 int buflen, int * offset) { 1874 // buf is not optional, but offset is optional 1875 assert(buf != NULL, "sanity check"); 1876 1877 Dl_info dlinfo; 1878 1879 // dladdr1_func was initialized in os::init() 1880 if (dladdr1_func != NULL) { 1881 // yes, we have dladdr1 1882 1883 // Support for dladdr1 is checked at runtime; it may be 1884 // available even if the vm is built on a machine that does 1885 // not have dladdr1 support. Make sure there is a value for 1886 // RTLD_DL_SYMENT. 1887 #ifndef RTLD_DL_SYMENT 1888 #define RTLD_DL_SYMENT 1 1889 #endif 1890 #ifdef _LP64 1891 Elf64_Sym * info; 1892 #else 1893 Elf32_Sym * info; 1894 #endif 1895 if (dladdr1_func((void *)addr, &dlinfo, (void **)&info, 1896 RTLD_DL_SYMENT) != 0) { 1897 // see if we have a matching symbol that covers our address 1898 if (dlinfo.dli_saddr != NULL && 1899 (char *)dlinfo.dli_saddr + info->st_size > (char *)addr) { 1900 if (dlinfo.dli_sname != NULL) { 1901 if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) { 1902 jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname); 1903 } 1904 if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr; 1905 return true; 1906 } 1907 } 1908 // no matching symbol so try for just file info 1909 if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) { 1910 if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase), 1911 buf, buflen, offset, dlinfo.dli_fname)) { 1912 return true; 1913 } 1914 } 1915 } 1916 buf[0] = '\0'; 1917 if (offset != NULL) *offset = -1; 1918 return false; 1919 } 1920 1921 // no, only dladdr is available 1922 if (dladdr((void *)addr, &dlinfo) != 0) { 1923 // see if we have a matching symbol 1924 if (dlinfo.dli_saddr != NULL && dlinfo.dli_sname != NULL) { 1925 if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) { 1926 jio_snprintf(buf, buflen, dlinfo.dli_sname); 1927 } 1928 if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr; 1929 return true; 1930 } 1931 // no matching symbol so try for just file info 1932 if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) { 1933 if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase), 1934 buf, buflen, offset, dlinfo.dli_fname)) { 1935 return true; 1936 } 1937 } 1938 } 1939 buf[0] = '\0'; 1940 if (offset != NULL) *offset = -1; 1941 return false; 1942 } 1943 1944 bool os::dll_address_to_library_name(address addr, char* buf, 1945 int buflen, int* offset) { 1946 // buf is not optional, but offset is optional 1947 assert(buf != NULL, "sanity check"); 1948 1949 Dl_info dlinfo; 1950 1951 if (dladdr((void*)addr, &dlinfo) != 0) { 1952 if (dlinfo.dli_fname != NULL) { 1953 jio_snprintf(buf, buflen, "%s", dlinfo.dli_fname); 1954 } 1955 if (dlinfo.dli_fbase != NULL && offset != NULL) { 1956 *offset = addr - (address)dlinfo.dli_fbase; 1957 } 1958 return true; 1959 } 1960 1961 buf[0] = '\0'; 1962 if (offset) *offset = -1; 1963 return false; 1964 } 1965 1966 // Prints the names and full paths of all opened dynamic libraries 1967 // for current process 1968 void os::print_dll_info(outputStream * st) { 1969 Dl_info dli; 1970 void *handle; 1971 Link_map *map; 1972 Link_map *p; 1973 1974 st->print_cr("Dynamic libraries:"); st->flush(); 1975 1976 if (dladdr(CAST_FROM_FN_PTR(void *, os::print_dll_info), &dli) == 0 || 1977 dli.dli_fname == NULL) { 1978 st->print_cr("Error: Cannot print dynamic libraries."); 1979 return; 1980 } 1981 handle = dlopen(dli.dli_fname, RTLD_LAZY); 1982 if (handle == NULL) { 1983 st->print_cr("Error: Cannot print dynamic libraries."); 1984 return; 1985 } 1986 dlinfo(handle, RTLD_DI_LINKMAP, &map); 1987 if (map == NULL) { 1988 st->print_cr("Error: Cannot print dynamic libraries."); 1989 return; 1990 } 1991 1992 while (map->l_prev != NULL) 1993 map = map->l_prev; 1994 1995 while (map != NULL) { 1996 st->print_cr(PTR_FORMAT " \t%s", map->l_addr, map->l_name); 1997 map = map->l_next; 1998 } 1999 2000 dlclose(handle); 2001 } 2002 2003 // Loads .dll/.so and 2004 // in case of error it checks if .dll/.so was built for the 2005 // same architecture as Hotspot is running on 2006 2007 void * os::dll_load(const char *filename, char *ebuf, int ebuflen) 2008 { 2009 void * result= ::dlopen(filename, RTLD_LAZY); 2010 if (result != NULL) { 2011 // Successful loading 2012 return result; 2013 } 2014 2015 Elf32_Ehdr elf_head; 2016 2017 // Read system error message into ebuf 2018 // It may or may not be overwritten below 2019 ::strncpy(ebuf, ::dlerror(), ebuflen-1); 2020 ebuf[ebuflen-1]='\0'; 2021 int diag_msg_max_length=ebuflen-strlen(ebuf); 2022 char* diag_msg_buf=ebuf+strlen(ebuf); 2023 2024 if (diag_msg_max_length==0) { 2025 // No more space in ebuf for additional diagnostics message 2026 return NULL; 2027 } 2028 2029 2030 int file_descriptor= ::open(filename, O_RDONLY | O_NONBLOCK); 2031 2032 if (file_descriptor < 0) { 2033 // Can't open library, report dlerror() message 2034 return NULL; 2035 } 2036 2037 bool failed_to_read_elf_head= 2038 (sizeof(elf_head)!= 2039 (::read(file_descriptor, &elf_head,sizeof(elf_head)))) ; 2040 2041 ::close(file_descriptor); 2042 if (failed_to_read_elf_head) { 2043 // file i/o error - report dlerror() msg 2044 return NULL; 2045 } 2046 2047 typedef struct { 2048 Elf32_Half code; // Actual value as defined in elf.h 2049 Elf32_Half compat_class; // Compatibility of archs at VM's sense 2050 char elf_class; // 32 or 64 bit 2051 char endianess; // MSB or LSB 2052 char* name; // String representation 2053 } arch_t; 2054 2055 static const arch_t arch_array[]={ 2056 {EM_386, EM_386, ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"}, 2057 {EM_486, EM_386, ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"}, 2058 {EM_IA_64, EM_IA_64, ELFCLASS64, ELFDATA2LSB, (char*)"IA 64"}, 2059 {EM_X86_64, EM_X86_64, ELFCLASS64, ELFDATA2LSB, (char*)"AMD 64"}, 2060 {EM_SPARC, EM_SPARC, ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"}, 2061 {EM_SPARC32PLUS, EM_SPARC, ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"}, 2062 {EM_SPARCV9, EM_SPARCV9, ELFCLASS64, ELFDATA2MSB, (char*)"Sparc v9 64"}, 2063 {EM_PPC, EM_PPC, ELFCLASS32, ELFDATA2MSB, (char*)"Power PC 32"}, 2064 {EM_PPC64, EM_PPC64, ELFCLASS64, ELFDATA2MSB, (char*)"Power PC 64"}, 2065 {EM_ARM, EM_ARM, ELFCLASS32, ELFDATA2LSB, (char*)"ARM 32"} 2066 }; 2067 2068 #if (defined IA32) 2069 static Elf32_Half running_arch_code=EM_386; 2070 #elif (defined AMD64) 2071 static Elf32_Half running_arch_code=EM_X86_64; 2072 #elif (defined IA64) 2073 static Elf32_Half running_arch_code=EM_IA_64; 2074 #elif (defined __sparc) && (defined _LP64) 2075 static Elf32_Half running_arch_code=EM_SPARCV9; 2076 #elif (defined __sparc) && (!defined _LP64) 2077 static Elf32_Half running_arch_code=EM_SPARC; 2078 #elif (defined __powerpc64__) 2079 static Elf32_Half running_arch_code=EM_PPC64; 2080 #elif (defined __powerpc__) 2081 static Elf32_Half running_arch_code=EM_PPC; 2082 #elif (defined ARM) 2083 static Elf32_Half running_arch_code=EM_ARM; 2084 #else 2085 #error Method os::dll_load requires that one of following is defined:\ 2086 IA32, AMD64, IA64, __sparc, __powerpc__, ARM, ARM 2087 #endif 2088 2089 // Identify compatability class for VM's architecture and library's architecture 2090 // Obtain string descriptions for architectures 2091 2092 arch_t lib_arch={elf_head.e_machine,0,elf_head.e_ident[EI_CLASS], elf_head.e_ident[EI_DATA], NULL}; 2093 int running_arch_index=-1; 2094 2095 for (unsigned int i=0 ; i < ARRAY_SIZE(arch_array) ; i++ ) { 2096 if (running_arch_code == arch_array[i].code) { 2097 running_arch_index = i; 2098 } 2099 if (lib_arch.code == arch_array[i].code) { 2100 lib_arch.compat_class = arch_array[i].compat_class; 2101 lib_arch.name = arch_array[i].name; 2102 } 2103 } 2104 2105 assert(running_arch_index != -1, 2106 "Didn't find running architecture code (running_arch_code) in arch_array"); 2107 if (running_arch_index == -1) { 2108 // Even though running architecture detection failed 2109 // we may still continue with reporting dlerror() message 2110 return NULL; 2111 } 2112 2113 if (lib_arch.endianess != arch_array[running_arch_index].endianess) { 2114 ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: endianness mismatch)"); 2115 return NULL; 2116 } 2117 2118 if (lib_arch.elf_class != arch_array[running_arch_index].elf_class) { 2119 ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: architecture word width mismatch)"); 2120 return NULL; 2121 } 2122 2123 if (lib_arch.compat_class != arch_array[running_arch_index].compat_class) { 2124 if ( lib_arch.name!=NULL ) { 2125 ::snprintf(diag_msg_buf, diag_msg_max_length-1, 2126 " (Possible cause: can't load %s-bit .so on a %s-bit platform)", 2127 lib_arch.name, arch_array[running_arch_index].name); 2128 } else { 2129 ::snprintf(diag_msg_buf, diag_msg_max_length-1, 2130 " (Possible cause: can't load this .so (machine code=0x%x) on a %s-bit platform)", 2131 lib_arch.code, 2132 arch_array[running_arch_index].name); 2133 } 2134 } 2135 2136 return NULL; 2137 } 2138 2139 void* os::dll_lookup(void* handle, const char* name) { 2140 return dlsym(handle, name); 2141 } 2142 2143 void* os::get_default_process_handle() { 2144 return (void*)::dlopen(NULL, RTLD_LAZY); 2145 } 2146 2147 int os::stat(const char *path, struct stat *sbuf) { 2148 char pathbuf[MAX_PATH]; 2149 if (strlen(path) > MAX_PATH - 1) { 2150 errno = ENAMETOOLONG; 2151 return -1; 2152 } 2153 os::native_path(strcpy(pathbuf, path)); 2154 return ::stat(pathbuf, sbuf); 2155 } 2156 2157 static bool _print_ascii_file(const char* filename, outputStream* st) { 2158 int fd = ::open(filename, O_RDONLY); 2159 if (fd == -1) { 2160 return false; 2161 } 2162 2163 char buf[32]; 2164 int bytes; 2165 while ((bytes = ::read(fd, buf, sizeof(buf))) > 0) { 2166 st->print_raw(buf, bytes); 2167 } 2168 2169 ::close(fd); 2170 2171 return true; 2172 } 2173 2174 void os::print_os_info_brief(outputStream* st) { 2175 os::Solaris::print_distro_info(st); 2176 2177 os::Posix::print_uname_info(st); 2178 2179 os::Solaris::print_libversion_info(st); 2180 } 2181 2182 void os::print_os_info(outputStream* st) { 2183 st->print("OS:"); 2184 2185 os::Solaris::print_distro_info(st); 2186 2187 os::Posix::print_uname_info(st); 2188 2189 os::Solaris::print_libversion_info(st); 2190 2191 os::Posix::print_rlimit_info(st); 2192 2193 os::Posix::print_load_average(st); 2194 } 2195 2196 void os::Solaris::print_distro_info(outputStream* st) { 2197 if (!_print_ascii_file("/etc/release", st)) { 2198 st->print("Solaris"); 2199 } 2200 st->cr(); 2201 } 2202 2203 void os::Solaris::print_libversion_info(outputStream* st) { 2204 if (os::Solaris::T2_libthread()) { 2205 st->print(" (T2 libthread)"); 2206 } 2207 else { 2208 st->print(" (T1 libthread)"); 2209 } 2210 st->cr(); 2211 } 2212 2213 static bool check_addr0(outputStream* st) { 2214 jboolean status = false; 2215 int fd = ::open("/proc/self/map",O_RDONLY); 2216 if (fd >= 0) { 2217 prmap_t p; 2218 while(::read(fd, &p, sizeof(p)) > 0) { 2219 if (p.pr_vaddr == 0x0) { 2220 st->print("Warning: Address: 0x%x, Size: %dK, ",p.pr_vaddr, p.pr_size/1024, p.pr_mapname); 2221 st->print("Mapped file: %s, ", p.pr_mapname[0] == '\0' ? "None" : p.pr_mapname); 2222 st->print("Access:"); 2223 st->print("%s",(p.pr_mflags & MA_READ) ? "r" : "-"); 2224 st->print("%s",(p.pr_mflags & MA_WRITE) ? "w" : "-"); 2225 st->print("%s",(p.pr_mflags & MA_EXEC) ? "x" : "-"); 2226 st->cr(); 2227 status = true; 2228 } 2229 } 2230 ::close(fd); 2231 } 2232 return status; 2233 } 2234 2235 void os::pd_print_cpu_info(outputStream* st) { 2236 // Nothing to do for now. 2237 } 2238 2239 void os::print_memory_info(outputStream* st) { 2240 st->print("Memory:"); 2241 st->print(" %dk page", os::vm_page_size()>>10); 2242 st->print(", physical " UINT64_FORMAT "k", os::physical_memory()>>10); 2243 st->print("(" UINT64_FORMAT "k free)", os::available_memory() >> 10); 2244 st->cr(); 2245 (void) check_addr0(st); 2246 } 2247 2248 // Taken from /usr/include/sys/machsig.h Supposed to be architecture specific 2249 // but they're the same for all the solaris architectures that we support. 2250 const char *ill_names[] = { "ILL0", "ILL_ILLOPC", "ILL_ILLOPN", "ILL_ILLADR", 2251 "ILL_ILLTRP", "ILL_PRVOPC", "ILL_PRVREG", 2252 "ILL_COPROC", "ILL_BADSTK" }; 2253 2254 const size_t ill_names_length = (sizeof(ill_names)/sizeof(char *)); 2255 2256 const char *fpe_names[] = { "FPE0", "FPE_INTDIV", "FPE_INTOVF", "FPE_FLTDIV", 2257 "FPE_FLTOVF", "FPE_FLTUND", "FPE_FLTRES", 2258 "FPE_FLTINV", "FPE_FLTSUB" }; 2259 const size_t fpe_names_length = (sizeof(fpe_names)/sizeof(char *)); 2260 2261 const char *segv_names[] = { "SEGV0", "SEGV_MAPERR", "SEGV_ACCERR" }; 2262 const size_t segv_names_length = (sizeof(segv_names)/sizeof(char *)); 2263 2264 const char *bus_names[] = { "BUS0", "BUS_ADRALN", "BUS_ADRERR", "BUS_OBJERR" }; 2265 const size_t bus_names_length = (sizeof(bus_names)/sizeof(char *)); 2266 2267 void os::print_siginfo(outputStream* st, void* siginfo) { 2268 st->print("siginfo:"); 2269 2270 const int buflen = 100; 2271 char buf[buflen]; 2272 siginfo_t *si = (siginfo_t*)siginfo; 2273 st->print("si_signo=%s: ", os::exception_name(si->si_signo, buf, buflen)); 2274 char *err = strerror(si->si_errno); 2275 if (si->si_errno != 0 && err != NULL) { 2276 st->print("si_errno=%s", err); 2277 } else { 2278 st->print("si_errno=%d", si->si_errno); 2279 } 2280 const int c = si->si_code; 2281 assert(c > 0, "unexpected si_code"); 2282 switch (si->si_signo) { 2283 case SIGILL: 2284 st->print(", si_code=%d (%s)", c, 2285 c >= ill_names_length ? "" : ill_names[c]); 2286 st->print(", si_addr=" PTR_FORMAT, si->si_addr); 2287 break; 2288 case SIGFPE: 2289 st->print(", si_code=%d (%s)", c, 2290 c >= fpe_names_length ? "" : fpe_names[c]); 2291 st->print(", si_addr=" PTR_FORMAT, si->si_addr); 2292 break; 2293 case SIGSEGV: 2294 st->print(", si_code=%d (%s)", c, 2295 c >= segv_names_length ? "" : segv_names[c]); 2296 st->print(", si_addr=" PTR_FORMAT, si->si_addr); 2297 break; 2298 case SIGBUS: 2299 st->print(", si_code=%d (%s)", c, 2300 c >= bus_names_length ? "" : bus_names[c]); 2301 st->print(", si_addr=" PTR_FORMAT, si->si_addr); 2302 break; 2303 default: 2304 st->print(", si_code=%d", si->si_code); 2305 // no si_addr 2306 } 2307 2308 if ((si->si_signo == SIGBUS || si->si_signo == SIGSEGV) && 2309 UseSharedSpaces) { 2310 FileMapInfo* mapinfo = FileMapInfo::current_info(); 2311 if (mapinfo->is_in_shared_space(si->si_addr)) { 2312 st->print("\n\nError accessing class data sharing archive." \ 2313 " Mapped file inaccessible during execution, " \ 2314 " possible disk/network problem."); 2315 } 2316 } 2317 st->cr(); 2318 } 2319 2320 // Moved from whole group, because we need them here for diagnostic 2321 // prints. 2322 #define OLDMAXSIGNUM 32 2323 static int Maxsignum = 0; 2324 static int *ourSigFlags = NULL; 2325 2326 extern "C" void sigINTRHandler(int, siginfo_t*, void*); 2327 2328 int os::Solaris::get_our_sigflags(int sig) { 2329 assert(ourSigFlags!=NULL, "signal data structure not initialized"); 2330 assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range"); 2331 return ourSigFlags[sig]; 2332 } 2333 2334 void os::Solaris::set_our_sigflags(int sig, int flags) { 2335 assert(ourSigFlags!=NULL, "signal data structure not initialized"); 2336 assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range"); 2337 ourSigFlags[sig] = flags; 2338 } 2339 2340 2341 static const char* get_signal_handler_name(address handler, 2342 char* buf, int buflen) { 2343 int offset; 2344 bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset); 2345 if (found) { 2346 // skip directory names 2347 const char *p1, *p2; 2348 p1 = buf; 2349 size_t len = strlen(os::file_separator()); 2350 while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len; 2351 jio_snprintf(buf, buflen, "%s+0x%x", p1, offset); 2352 } else { 2353 jio_snprintf(buf, buflen, PTR_FORMAT, handler); 2354 } 2355 return buf; 2356 } 2357 2358 static void print_signal_handler(outputStream* st, int sig, 2359 char* buf, size_t buflen) { 2360 struct sigaction sa; 2361 2362 sigaction(sig, NULL, &sa); 2363 2364 st->print("%s: ", os::exception_name(sig, buf, buflen)); 2365 2366 address handler = (sa.sa_flags & SA_SIGINFO) 2367 ? CAST_FROM_FN_PTR(address, sa.sa_sigaction) 2368 : CAST_FROM_FN_PTR(address, sa.sa_handler); 2369 2370 if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) { 2371 st->print("SIG_DFL"); 2372 } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) { 2373 st->print("SIG_IGN"); 2374 } else { 2375 st->print("[%s]", get_signal_handler_name(handler, buf, buflen)); 2376 } 2377 2378 st->print(", sa_mask[0]=" PTR32_FORMAT, *(uint32_t*)&sa.sa_mask); 2379 2380 address rh = VMError::get_resetted_sighandler(sig); 2381 // May be, handler was resetted by VMError? 2382 if(rh != NULL) { 2383 handler = rh; 2384 sa.sa_flags = VMError::get_resetted_sigflags(sig); 2385 } 2386 2387 st->print(", sa_flags=" PTR32_FORMAT, sa.sa_flags); 2388 2389 // Check: is it our handler? 2390 if(handler == CAST_FROM_FN_PTR(address, signalHandler) || 2391 handler == CAST_FROM_FN_PTR(address, sigINTRHandler)) { 2392 // It is our signal handler 2393 // check for flags 2394 if(sa.sa_flags != os::Solaris::get_our_sigflags(sig)) { 2395 st->print( 2396 ", flags was changed from " PTR32_FORMAT ", consider using jsig library", 2397 os::Solaris::get_our_sigflags(sig)); 2398 } 2399 } 2400 st->cr(); 2401 } 2402 2403 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) { 2404 st->print_cr("Signal Handlers:"); 2405 print_signal_handler(st, SIGSEGV, buf, buflen); 2406 print_signal_handler(st, SIGBUS , buf, buflen); 2407 print_signal_handler(st, SIGFPE , buf, buflen); 2408 print_signal_handler(st, SIGPIPE, buf, buflen); 2409 print_signal_handler(st, SIGXFSZ, buf, buflen); 2410 print_signal_handler(st, SIGILL , buf, buflen); 2411 print_signal_handler(st, INTERRUPT_SIGNAL, buf, buflen); 2412 print_signal_handler(st, ASYNC_SIGNAL, buf, buflen); 2413 print_signal_handler(st, BREAK_SIGNAL, buf, buflen); 2414 print_signal_handler(st, SHUTDOWN1_SIGNAL , buf, buflen); 2415 print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen); 2416 print_signal_handler(st, SHUTDOWN3_SIGNAL, buf, buflen); 2417 print_signal_handler(st, os::Solaris::SIGinterrupt(), buf, buflen); 2418 print_signal_handler(st, os::Solaris::SIGasync(), buf, buflen); 2419 } 2420 2421 static char saved_jvm_path[MAXPATHLEN] = { 0 }; 2422 2423 // Find the full path to the current module, libjvm.so 2424 void os::jvm_path(char *buf, jint buflen) { 2425 // Error checking. 2426 if (buflen < MAXPATHLEN) { 2427 assert(false, "must use a large-enough buffer"); 2428 buf[0] = '\0'; 2429 return; 2430 } 2431 // Lazy resolve the path to current module. 2432 if (saved_jvm_path[0] != 0) { 2433 strcpy(buf, saved_jvm_path); 2434 return; 2435 } 2436 2437 Dl_info dlinfo; 2438 int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo); 2439 assert(ret != 0, "cannot locate libjvm"); 2440 if (ret != 0 && dlinfo.dli_fname != NULL) { 2441 realpath((char *)dlinfo.dli_fname, buf); 2442 } else { 2443 buf[0] = '\0'; 2444 return; 2445 } 2446 2447 if (Arguments::sun_java_launcher_is_altjvm()) { 2448 // Support for the java launcher's '-XXaltjvm=<path>' option. Typical 2449 // value for buf is "<JAVA_HOME>/jre/lib/<arch>/<vmtype>/libjvm.so". 2450 // If "/jre/lib/" appears at the right place in the string, then 2451 // assume we are installed in a JDK and we're done. Otherwise, check 2452 // for a JAVA_HOME environment variable and fix up the path so it 2453 // looks like libjvm.so is installed there (append a fake suffix 2454 // hotspot/libjvm.so). 2455 const char *p = buf + strlen(buf) - 1; 2456 for (int count = 0; p > buf && count < 5; ++count) { 2457 for (--p; p > buf && *p != '/'; --p) 2458 /* empty */ ; 2459 } 2460 2461 if (strncmp(p, "/jre/lib/", 9) != 0) { 2462 // Look for JAVA_HOME in the environment. 2463 char* java_home_var = ::getenv("JAVA_HOME"); 2464 if (java_home_var != NULL && java_home_var[0] != 0) { 2465 char cpu_arch[12]; 2466 char* jrelib_p; 2467 int len; 2468 sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch)); 2469 #ifdef _LP64 2470 // If we are on sparc running a 64-bit vm, look in jre/lib/sparcv9. 2471 if (strcmp(cpu_arch, "sparc") == 0) { 2472 strcat(cpu_arch, "v9"); 2473 } else if (strcmp(cpu_arch, "i386") == 0) { 2474 strcpy(cpu_arch, "amd64"); 2475 } 2476 #endif 2477 // Check the current module name "libjvm.so". 2478 p = strrchr(buf, '/'); 2479 assert(strstr(p, "/libjvm") == p, "invalid library name"); 2480 2481 realpath(java_home_var, buf); 2482 // determine if this is a legacy image or modules image 2483 // modules image doesn't have "jre" subdirectory 2484 len = strlen(buf); 2485 jrelib_p = buf + len; 2486 snprintf(jrelib_p, buflen-len, "/jre/lib/%s", cpu_arch); 2487 if (0 != access(buf, F_OK)) { 2488 snprintf(jrelib_p, buflen-len, "/lib/%s", cpu_arch); 2489 } 2490 2491 if (0 == access(buf, F_OK)) { 2492 // Use current module name "libjvm.so" 2493 len = strlen(buf); 2494 snprintf(buf + len, buflen-len, "/hotspot/libjvm.so"); 2495 } else { 2496 // Go back to path of .so 2497 realpath((char *)dlinfo.dli_fname, buf); 2498 } 2499 } 2500 } 2501 } 2502 2503 strcpy(saved_jvm_path, buf); 2504 } 2505 2506 2507 void os::print_jni_name_prefix_on(outputStream* st, int args_size) { 2508 // no prefix required, not even "_" 2509 } 2510 2511 2512 void os::print_jni_name_suffix_on(outputStream* st, int args_size) { 2513 // no suffix required 2514 } 2515 2516 // This method is a copy of JDK's sysGetLastErrorString 2517 // from src/solaris/hpi/src/system_md.c 2518 2519 size_t os::lasterror(char *buf, size_t len) { 2520 2521 if (errno == 0) return 0; 2522 2523 const char *s = ::strerror(errno); 2524 size_t n = ::strlen(s); 2525 if (n >= len) { 2526 n = len - 1; 2527 } 2528 ::strncpy(buf, s, n); 2529 buf[n] = '\0'; 2530 return n; 2531 } 2532 2533 2534 // sun.misc.Signal 2535 2536 extern "C" { 2537 static void UserHandler(int sig, void *siginfo, void *context) { 2538 // Ctrl-C is pressed during error reporting, likely because the error 2539 // handler fails to abort. Let VM die immediately. 2540 if (sig == SIGINT && is_error_reported()) { 2541 os::die(); 2542 } 2543 2544 os::signal_notify(sig); 2545 // We do not need to reinstate the signal handler each time... 2546 } 2547 } 2548 2549 void* os::user_handler() { 2550 return CAST_FROM_FN_PTR(void*, UserHandler); 2551 } 2552 2553 class Semaphore : public StackObj { 2554 public: 2555 Semaphore(); 2556 ~Semaphore(); 2557 void signal(); 2558 void wait(); 2559 bool trywait(); 2560 bool timedwait(unsigned int sec, int nsec); 2561 private: 2562 sema_t _semaphore; 2563 }; 2564 2565 2566 Semaphore::Semaphore() { 2567 sema_init(&_semaphore, 0, NULL, NULL); 2568 } 2569 2570 Semaphore::~Semaphore() { 2571 sema_destroy(&_semaphore); 2572 } 2573 2574 void Semaphore::signal() { 2575 sema_post(&_semaphore); 2576 } 2577 2578 void Semaphore::wait() { 2579 sema_wait(&_semaphore); 2580 } 2581 2582 bool Semaphore::trywait() { 2583 return sema_trywait(&_semaphore) == 0; 2584 } 2585 2586 bool Semaphore::timedwait(unsigned int sec, int nsec) { 2587 struct timespec ts; 2588 unpackTime(&ts, false, (sec * NANOSECS_PER_SEC) + nsec); 2589 2590 while (1) { 2591 int result = sema_timedwait(&_semaphore, &ts); 2592 if (result == 0) { 2593 return true; 2594 } else if (errno == EINTR) { 2595 continue; 2596 } else if (errno == ETIME) { 2597 return false; 2598 } else { 2599 return false; 2600 } 2601 } 2602 } 2603 2604 extern "C" { 2605 typedef void (*sa_handler_t)(int); 2606 typedef void (*sa_sigaction_t)(int, siginfo_t *, void *); 2607 } 2608 2609 void* os::signal(int signal_number, void* handler) { 2610 struct sigaction sigAct, oldSigAct; 2611 sigfillset(&(sigAct.sa_mask)); 2612 sigAct.sa_flags = SA_RESTART & ~SA_RESETHAND; 2613 sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler); 2614 2615 if (sigaction(signal_number, &sigAct, &oldSigAct)) 2616 // -1 means registration failed 2617 return (void *)-1; 2618 2619 return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler); 2620 } 2621 2622 void os::signal_raise(int signal_number) { 2623 raise(signal_number); 2624 } 2625 2626 /* 2627 * The following code is moved from os.cpp for making this 2628 * code platform specific, which it is by its very nature. 2629 */ 2630 2631 // a counter for each possible signal value 2632 static int Sigexit = 0; 2633 static int Maxlibjsigsigs; 2634 static jint *pending_signals = NULL; 2635 static int *preinstalled_sigs = NULL; 2636 static struct sigaction *chainedsigactions = NULL; 2637 static sema_t sig_sem; 2638 typedef int (*version_getting_t)(); 2639 version_getting_t os::Solaris::get_libjsig_version = NULL; 2640 static int libjsigversion = NULL; 2641 2642 int os::sigexitnum_pd() { 2643 assert(Sigexit > 0, "signal memory not yet initialized"); 2644 return Sigexit; 2645 } 2646 2647 void os::Solaris::init_signal_mem() { 2648 // Initialize signal structures 2649 Maxsignum = SIGRTMAX; 2650 Sigexit = Maxsignum+1; 2651 assert(Maxsignum >0, "Unable to obtain max signal number"); 2652 2653 Maxlibjsigsigs = Maxsignum; 2654 2655 // pending_signals has one int per signal 2656 // The additional signal is for SIGEXIT - exit signal to signal_thread 2657 pending_signals = (jint *)os::malloc(sizeof(jint) * (Sigexit+1), mtInternal); 2658 memset(pending_signals, 0, (sizeof(jint) * (Sigexit+1))); 2659 2660 if (UseSignalChaining) { 2661 chainedsigactions = (struct sigaction *)malloc(sizeof(struct sigaction) 2662 * (Maxsignum + 1), mtInternal); 2663 memset(chainedsigactions, 0, (sizeof(struct sigaction) * (Maxsignum + 1))); 2664 preinstalled_sigs = (int *)os::malloc(sizeof(int) * (Maxsignum + 1), mtInternal); 2665 memset(preinstalled_sigs, 0, (sizeof(int) * (Maxsignum + 1))); 2666 } 2667 ourSigFlags = (int*)malloc(sizeof(int) * (Maxsignum + 1 ), mtInternal); 2668 memset(ourSigFlags, 0, sizeof(int) * (Maxsignum + 1)); 2669 } 2670 2671 void os::signal_init_pd() { 2672 int ret; 2673 2674 ret = ::sema_init(&sig_sem, 0, NULL, NULL); 2675 assert(ret == 0, "sema_init() failed"); 2676 } 2677 2678 void os::signal_notify(int signal_number) { 2679 int ret; 2680 2681 Atomic::inc(&pending_signals[signal_number]); 2682 ret = ::sema_post(&sig_sem); 2683 assert(ret == 0, "sema_post() failed"); 2684 } 2685 2686 static int check_pending_signals(bool wait_for_signal) { 2687 int ret; 2688 while (true) { 2689 for (int i = 0; i < Sigexit + 1; i++) { 2690 jint n = pending_signals[i]; 2691 if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) { 2692 return i; 2693 } 2694 } 2695 if (!wait_for_signal) { 2696 return -1; 2697 } 2698 JavaThread *thread = JavaThread::current(); 2699 ThreadBlockInVM tbivm(thread); 2700 2701 bool threadIsSuspended; 2702 do { 2703 thread->set_suspend_equivalent(); 2704 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self() 2705 while((ret = ::sema_wait(&sig_sem)) == EINTR) 2706 ; 2707 assert(ret == 0, "sema_wait() failed"); 2708 2709 // were we externally suspended while we were waiting? 2710 threadIsSuspended = thread->handle_special_suspend_equivalent_condition(); 2711 if (threadIsSuspended) { 2712 // 2713 // The semaphore has been incremented, but while we were waiting 2714 // another thread suspended us. We don't want to continue running 2715 // while suspended because that would surprise the thread that 2716 // suspended us. 2717 // 2718 ret = ::sema_post(&sig_sem); 2719 assert(ret == 0, "sema_post() failed"); 2720 2721 thread->java_suspend_self(); 2722 } 2723 } while (threadIsSuspended); 2724 } 2725 } 2726 2727 int os::signal_lookup() { 2728 return check_pending_signals(false); 2729 } 2730 2731 int os::signal_wait() { 2732 return check_pending_signals(true); 2733 } 2734 2735 //////////////////////////////////////////////////////////////////////////////// 2736 // Virtual Memory 2737 2738 static int page_size = -1; 2739 2740 // The mmap MAP_ALIGN flag is supported on Solaris 9 and later. init_2() will 2741 // clear this var if support is not available. 2742 static bool has_map_align = true; 2743 2744 int os::vm_page_size() { 2745 assert(page_size != -1, "must call os::init"); 2746 return page_size; 2747 } 2748 2749 // Solaris allocates memory by pages. 2750 int os::vm_allocation_granularity() { 2751 assert(page_size != -1, "must call os::init"); 2752 return page_size; 2753 } 2754 2755 static bool recoverable_mmap_error(int err) { 2756 // See if the error is one we can let the caller handle. This 2757 // list of errno values comes from the Solaris mmap(2) man page. 2758 switch (err) { 2759 case EBADF: 2760 case EINVAL: 2761 case ENOTSUP: 2762 // let the caller deal with these errors 2763 return true; 2764 2765 default: 2766 // Any remaining errors on this OS can cause our reserved mapping 2767 // to be lost. That can cause confusion where different data 2768 // structures think they have the same memory mapped. The worst 2769 // scenario is if both the VM and a library think they have the 2770 // same memory mapped. 2771 return false; 2772 } 2773 } 2774 2775 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec, 2776 int err) { 2777 warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT 2778 ", %d) failed; error='%s' (errno=%d)", addr, bytes, exec, 2779 strerror(err), err); 2780 } 2781 2782 static void warn_fail_commit_memory(char* addr, size_t bytes, 2783 size_t alignment_hint, bool exec, 2784 int err) { 2785 warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT 2786 ", " SIZE_FORMAT ", %d) failed; error='%s' (errno=%d)", addr, bytes, 2787 alignment_hint, exec, strerror(err), err); 2788 } 2789 2790 int os::Solaris::commit_memory_impl(char* addr, size_t bytes, bool exec) { 2791 int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE; 2792 size_t size = bytes; 2793 char *res = Solaris::mmap_chunk(addr, size, MAP_PRIVATE|MAP_FIXED, prot); 2794 if (res != NULL) { 2795 if (UseNUMAInterleaving) { 2796 numa_make_global(addr, bytes); 2797 } 2798 return 0; 2799 } 2800 2801 int err = errno; // save errno from mmap() call in mmap_chunk() 2802 2803 if (!recoverable_mmap_error(err)) { 2804 warn_fail_commit_memory(addr, bytes, exec, err); 2805 vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, "committing reserved memory."); 2806 } 2807 2808 return err; 2809 } 2810 2811 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) { 2812 return Solaris::commit_memory_impl(addr, bytes, exec) == 0; 2813 } 2814 2815 void os::pd_commit_memory_or_exit(char* addr, size_t bytes, bool exec, 2816 const char* mesg) { 2817 assert(mesg != NULL, "mesg must be specified"); 2818 int err = os::Solaris::commit_memory_impl(addr, bytes, exec); 2819 if (err != 0) { 2820 // the caller wants all commit errors to exit with the specified mesg: 2821 warn_fail_commit_memory(addr, bytes, exec, err); 2822 vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, mesg); 2823 } 2824 } 2825 2826 int os::Solaris::commit_memory_impl(char* addr, size_t bytes, 2827 size_t alignment_hint, bool exec) { 2828 int err = Solaris::commit_memory_impl(addr, bytes, exec); 2829 if (err == 0) { 2830 if (UseLargePages && (alignment_hint > (size_t)vm_page_size())) { 2831 // If the large page size has been set and the VM 2832 // is using large pages, use the large page size 2833 // if it is smaller than the alignment hint. This is 2834 // a case where the VM wants to use a larger alignment size 2835 // for its own reasons but still want to use large pages 2836 // (which is what matters to setting the mpss range. 2837 size_t page_size = 0; 2838 if (large_page_size() < alignment_hint) { 2839 assert(UseLargePages, "Expected to be here for large page use only"); 2840 page_size = large_page_size(); 2841 } else { 2842 // If the alignment hint is less than the large page 2843 // size, the VM wants a particular alignment (thus the hint) 2844 // for internal reasons. Try to set the mpss range using 2845 // the alignment_hint. 2846 page_size = alignment_hint; 2847 } 2848 // Since this is a hint, ignore any failures. 2849 (void)Solaris::setup_large_pages(addr, bytes, page_size); 2850 } 2851 } 2852 return err; 2853 } 2854 2855 bool os::pd_commit_memory(char* addr, size_t bytes, size_t alignment_hint, 2856 bool exec) { 2857 return Solaris::commit_memory_impl(addr, bytes, alignment_hint, exec) == 0; 2858 } 2859 2860 void os::pd_commit_memory_or_exit(char* addr, size_t bytes, 2861 size_t alignment_hint, bool exec, 2862 const char* mesg) { 2863 assert(mesg != NULL, "mesg must be specified"); 2864 int err = os::Solaris::commit_memory_impl(addr, bytes, alignment_hint, exec); 2865 if (err != 0) { 2866 // the caller wants all commit errors to exit with the specified mesg: 2867 warn_fail_commit_memory(addr, bytes, alignment_hint, exec, err); 2868 vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, mesg); 2869 } 2870 } 2871 2872 // Uncommit the pages in a specified region. 2873 void os::pd_free_memory(char* addr, size_t bytes, size_t alignment_hint) { 2874 if (madvise(addr, bytes, MADV_FREE) < 0) { 2875 debug_only(warning("MADV_FREE failed.")); 2876 return; 2877 } 2878 } 2879 2880 bool os::pd_create_stack_guard_pages(char* addr, size_t size) { 2881 return os::commit_memory(addr, size, !ExecMem); 2882 } 2883 2884 bool os::remove_stack_guard_pages(char* addr, size_t size) { 2885 return os::uncommit_memory(addr, size); 2886 } 2887 2888 // Change the page size in a given range. 2889 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { 2890 assert((intptr_t)addr % alignment_hint == 0, "Address should be aligned."); 2891 assert((intptr_t)(addr + bytes) % alignment_hint == 0, "End should be aligned."); 2892 if (UseLargePages) { 2893 Solaris::setup_large_pages(addr, bytes, alignment_hint); 2894 } 2895 } 2896 2897 // Tell the OS to make the range local to the first-touching LWP 2898 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) { 2899 assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned."); 2900 if (madvise(addr, bytes, MADV_ACCESS_LWP) < 0) { 2901 debug_only(warning("MADV_ACCESS_LWP failed.")); 2902 } 2903 } 2904 2905 // Tell the OS that this range would be accessed from different LWPs. 2906 void os::numa_make_global(char *addr, size_t bytes) { 2907 assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned."); 2908 if (madvise(addr, bytes, MADV_ACCESS_MANY) < 0) { 2909 debug_only(warning("MADV_ACCESS_MANY failed.")); 2910 } 2911 } 2912 2913 // Get the number of the locality groups. 2914 size_t os::numa_get_groups_num() { 2915 size_t n = Solaris::lgrp_nlgrps(Solaris::lgrp_cookie()); 2916 return n != -1 ? n : 1; 2917 } 2918 2919 // Get a list of leaf locality groups. A leaf lgroup is group that 2920 // doesn't have any children. Typical leaf group is a CPU or a CPU/memory 2921 // board. An LWP is assigned to one of these groups upon creation. 2922 size_t os::numa_get_leaf_groups(int *ids, size_t size) { 2923 if ((ids[0] = Solaris::lgrp_root(Solaris::lgrp_cookie())) == -1) { 2924 ids[0] = 0; 2925 return 1; 2926 } 2927 int result_size = 0, top = 1, bottom = 0, cur = 0; 2928 for (int k = 0; k < size; k++) { 2929 int r = Solaris::lgrp_children(Solaris::lgrp_cookie(), ids[cur], 2930 (Solaris::lgrp_id_t*)&ids[top], size - top); 2931 if (r == -1) { 2932 ids[0] = 0; 2933 return 1; 2934 } 2935 if (!r) { 2936 // That's a leaf node. 2937 assert (bottom <= cur, "Sanity check"); 2938 // Check if the node has memory 2939 if (Solaris::lgrp_resources(Solaris::lgrp_cookie(), ids[cur], 2940 NULL, 0, LGRP_RSRC_MEM) > 0) { 2941 ids[bottom++] = ids[cur]; 2942 } 2943 } 2944 top += r; 2945 cur++; 2946 } 2947 if (bottom == 0) { 2948 // Handle a situation, when the OS reports no memory available. 2949 // Assume UMA architecture. 2950 ids[0] = 0; 2951 return 1; 2952 } 2953 return bottom; 2954 } 2955 2956 // Detect the topology change. Typically happens during CPU plugging-unplugging. 2957 bool os::numa_topology_changed() { 2958 int is_stale = Solaris::lgrp_cookie_stale(Solaris::lgrp_cookie()); 2959 if (is_stale != -1 && is_stale) { 2960 Solaris::lgrp_fini(Solaris::lgrp_cookie()); 2961 Solaris::lgrp_cookie_t c = Solaris::lgrp_init(Solaris::LGRP_VIEW_CALLER); 2962 assert(c != 0, "Failure to initialize LGRP API"); 2963 Solaris::set_lgrp_cookie(c); 2964 return true; 2965 } 2966 return false; 2967 } 2968 2969 // Get the group id of the current LWP. 2970 int os::numa_get_group_id() { 2971 int lgrp_id = Solaris::lgrp_home(P_LWPID, P_MYID); 2972 if (lgrp_id == -1) { 2973 return 0; 2974 } 2975 const int size = os::numa_get_groups_num(); 2976 int *ids = (int*)alloca(size * sizeof(int)); 2977 2978 // Get the ids of all lgroups with memory; r is the count. 2979 int r = Solaris::lgrp_resources(Solaris::lgrp_cookie(), lgrp_id, 2980 (Solaris::lgrp_id_t*)ids, size, LGRP_RSRC_MEM); 2981 if (r <= 0) { 2982 return 0; 2983 } 2984 return ids[os::random() % r]; 2985 } 2986 2987 // Request information about the page. 2988 bool os::get_page_info(char *start, page_info* info) { 2989 const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE }; 2990 uint64_t addr = (uintptr_t)start; 2991 uint64_t outdata[2]; 2992 uint_t validity = 0; 2993 2994 if (os::Solaris::meminfo(&addr, 1, info_types, 2, outdata, &validity) < 0) { 2995 return false; 2996 } 2997 2998 info->size = 0; 2999 info->lgrp_id = -1; 3000 3001 if ((validity & 1) != 0) { 3002 if ((validity & 2) != 0) { 3003 info->lgrp_id = outdata[0]; 3004 } 3005 if ((validity & 4) != 0) { 3006 info->size = outdata[1]; 3007 } 3008 return true; 3009 } 3010 return false; 3011 } 3012 3013 // Scan the pages from start to end until a page different than 3014 // the one described in the info parameter is encountered. 3015 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) { 3016 const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE }; 3017 const size_t types = sizeof(info_types) / sizeof(info_types[0]); 3018 uint64_t addrs[MAX_MEMINFO_CNT], outdata[types * MAX_MEMINFO_CNT + 1]; 3019 uint_t validity[MAX_MEMINFO_CNT]; 3020 3021 size_t page_size = MAX2((size_t)os::vm_page_size(), page_expected->size); 3022 uint64_t p = (uint64_t)start; 3023 while (p < (uint64_t)end) { 3024 addrs[0] = p; 3025 size_t addrs_count = 1; 3026 while (addrs_count < MAX_MEMINFO_CNT && addrs[addrs_count - 1] + page_size < (uint64_t)end) { 3027 addrs[addrs_count] = addrs[addrs_count - 1] + page_size; 3028 addrs_count++; 3029 } 3030 3031 if (os::Solaris::meminfo(addrs, addrs_count, info_types, types, outdata, validity) < 0) { 3032 return NULL; 3033 } 3034 3035 size_t i = 0; 3036 for (; i < addrs_count; i++) { 3037 if ((validity[i] & 1) != 0) { 3038 if ((validity[i] & 4) != 0) { 3039 if (outdata[types * i + 1] != page_expected->size) { 3040 break; 3041 } 3042 } else 3043 if (page_expected->size != 0) { 3044 break; 3045 } 3046 3047 if ((validity[i] & 2) != 0 && page_expected->lgrp_id > 0) { 3048 if (outdata[types * i] != page_expected->lgrp_id) { 3049 break; 3050 } 3051 } 3052 } else { 3053 return NULL; 3054 } 3055 } 3056 3057 if (i < addrs_count) { 3058 if ((validity[i] & 2) != 0) { 3059 page_found->lgrp_id = outdata[types * i]; 3060 } else { 3061 page_found->lgrp_id = -1; 3062 } 3063 if ((validity[i] & 4) != 0) { 3064 page_found->size = outdata[types * i + 1]; 3065 } else { 3066 page_found->size = 0; 3067 } 3068 return (char*)addrs[i]; 3069 } 3070 3071 p = addrs[addrs_count - 1] + page_size; 3072 } 3073 return end; 3074 } 3075 3076 bool os::pd_uncommit_memory(char* addr, size_t bytes) { 3077 size_t size = bytes; 3078 // Map uncommitted pages PROT_NONE so we fail early if we touch an 3079 // uncommitted page. Otherwise, the read/write might succeed if we 3080 // have enough swap space to back the physical page. 3081 return 3082 NULL != Solaris::mmap_chunk(addr, size, 3083 MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE, 3084 PROT_NONE); 3085 } 3086 3087 char* os::Solaris::mmap_chunk(char *addr, size_t size, int flags, int prot) { 3088 char *b = (char *)mmap(addr, size, prot, flags, os::Solaris::_dev_zero_fd, 0); 3089 3090 if (b == MAP_FAILED) { 3091 return NULL; 3092 } 3093 return b; 3094 } 3095 3096 char* os::Solaris::anon_mmap(char* requested_addr, size_t bytes, size_t alignment_hint, bool fixed) { 3097 char* addr = requested_addr; 3098 int flags = MAP_PRIVATE | MAP_NORESERVE; 3099 3100 assert(!(fixed && (alignment_hint > 0)), "alignment hint meaningless with fixed mmap"); 3101 3102 if (fixed) { 3103 flags |= MAP_FIXED; 3104 } else if (has_map_align && (alignment_hint > (size_t) vm_page_size())) { 3105 flags |= MAP_ALIGN; 3106 addr = (char*) alignment_hint; 3107 } 3108 3109 // Map uncommitted pages PROT_NONE so we fail early if we touch an 3110 // uncommitted page. Otherwise, the read/write might succeed if we 3111 // have enough swap space to back the physical page. 3112 return mmap_chunk(addr, bytes, flags, PROT_NONE); 3113 } 3114 3115 char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) { 3116 char* addr = Solaris::anon_mmap(requested_addr, bytes, alignment_hint, (requested_addr != NULL)); 3117 3118 guarantee(requested_addr == NULL || requested_addr == addr, 3119 "OS failed to return requested mmap address."); 3120 return addr; 3121 } 3122 3123 // Reserve memory at an arbitrary address, only if that area is 3124 // available (and not reserved for something else). 3125 3126 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) { 3127 const int max_tries = 10; 3128 char* base[max_tries]; 3129 size_t size[max_tries]; 3130 3131 // Solaris adds a gap between mmap'ed regions. The size of the gap 3132 // is dependent on the requested size and the MMU. Our initial gap 3133 // value here is just a guess and will be corrected later. 3134 bool had_top_overlap = false; 3135 bool have_adjusted_gap = false; 3136 size_t gap = 0x400000; 3137 3138 // Assert only that the size is a multiple of the page size, since 3139 // that's all that mmap requires, and since that's all we really know 3140 // about at this low abstraction level. If we need higher alignment, 3141 // we can either pass an alignment to this method or verify alignment 3142 // in one of the methods further up the call chain. See bug 5044738. 3143 assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block"); 3144 3145 // Since snv_84, Solaris attempts to honor the address hint - see 5003415. 3146 // Give it a try, if the kernel honors the hint we can return immediately. 3147 char* addr = Solaris::anon_mmap(requested_addr, bytes, 0, false); 3148 3149 volatile int err = errno; 3150 if (addr == requested_addr) { 3151 return addr; 3152 } else if (addr != NULL) { 3153 pd_unmap_memory(addr, bytes); 3154 } 3155 3156 if (PrintMiscellaneous && Verbose) { 3157 char buf[256]; 3158 buf[0] = '\0'; 3159 if (addr == NULL) { 3160 jio_snprintf(buf, sizeof(buf), ": %s", strerror(err)); 3161 } 3162 warning("attempt_reserve_memory_at: couldn't reserve " SIZE_FORMAT " bytes at " 3163 PTR_FORMAT ": reserve_memory_helper returned " PTR_FORMAT 3164 "%s", bytes, requested_addr, addr, buf); 3165 } 3166 3167 // Address hint method didn't work. Fall back to the old method. 3168 // In theory, once SNV becomes our oldest supported platform, this 3169 // code will no longer be needed. 3170 // 3171 // Repeatedly allocate blocks until the block is allocated at the 3172 // right spot. Give up after max_tries. 3173 int i; 3174 for (i = 0; i < max_tries; ++i) { 3175 base[i] = reserve_memory(bytes); 3176 3177 if (base[i] != NULL) { 3178 // Is this the block we wanted? 3179 if (base[i] == requested_addr) { 3180 size[i] = bytes; 3181 break; 3182 } 3183 3184 // check that the gap value is right 3185 if (had_top_overlap && !have_adjusted_gap) { 3186 size_t actual_gap = base[i-1] - base[i] - bytes; 3187 if (gap != actual_gap) { 3188 // adjust the gap value and retry the last 2 allocations 3189 assert(i > 0, "gap adjustment code problem"); 3190 have_adjusted_gap = true; // adjust the gap only once, just in case 3191 gap = actual_gap; 3192 if (PrintMiscellaneous && Verbose) { 3193 warning("attempt_reserve_memory_at: adjusted gap to 0x%lx", gap); 3194 } 3195 unmap_memory(base[i], bytes); 3196 unmap_memory(base[i-1], size[i-1]); 3197 i-=2; 3198 continue; 3199 } 3200 } 3201 3202 // Does this overlap the block we wanted? Give back the overlapped 3203 // parts and try again. 3204 // 3205 // There is still a bug in this code: if top_overlap == bytes, 3206 // the overlap is offset from requested region by the value of gap. 3207 // In this case giving back the overlapped part will not work, 3208 // because we'll give back the entire block at base[i] and 3209 // therefore the subsequent allocation will not generate a new gap. 3210 // This could be fixed with a new algorithm that used larger 3211 // or variable size chunks to find the requested region - 3212 // but such a change would introduce additional complications. 3213 // It's rare enough that the planets align for this bug, 3214 // so we'll just wait for a fix for 6204603/5003415 which 3215 // will provide a mmap flag to allow us to avoid this business. 3216 3217 size_t top_overlap = requested_addr + (bytes + gap) - base[i]; 3218 if (top_overlap >= 0 && top_overlap < bytes) { 3219 had_top_overlap = true; 3220 unmap_memory(base[i], top_overlap); 3221 base[i] += top_overlap; 3222 size[i] = bytes - top_overlap; 3223 } else { 3224 size_t bottom_overlap = base[i] + bytes - requested_addr; 3225 if (bottom_overlap >= 0 && bottom_overlap < bytes) { 3226 if (PrintMiscellaneous && Verbose && bottom_overlap == 0) { 3227 warning("attempt_reserve_memory_at: possible alignment bug"); 3228 } 3229 unmap_memory(requested_addr, bottom_overlap); 3230 size[i] = bytes - bottom_overlap; 3231 } else { 3232 size[i] = bytes; 3233 } 3234 } 3235 } 3236 } 3237 3238 // Give back the unused reserved pieces. 3239 3240 for (int j = 0; j < i; ++j) { 3241 if (base[j] != NULL) { 3242 unmap_memory(base[j], size[j]); 3243 } 3244 } 3245 3246 return (i < max_tries) ? requested_addr : NULL; 3247 } 3248 3249 bool os::pd_release_memory(char* addr, size_t bytes) { 3250 size_t size = bytes; 3251 return munmap(addr, size) == 0; 3252 } 3253 3254 static bool solaris_mprotect(char* addr, size_t bytes, int prot) { 3255 assert(addr == (char*)align_size_down((uintptr_t)addr, os::vm_page_size()), 3256 "addr must be page aligned"); 3257 int retVal = mprotect(addr, bytes, prot); 3258 return retVal == 0; 3259 } 3260 3261 // Protect memory (Used to pass readonly pages through 3262 // JNI GetArray<type>Elements with empty arrays.) 3263 // Also, used for serialization page and for compressed oops null pointer 3264 // checking. 3265 bool os::protect_memory(char* addr, size_t bytes, ProtType prot, 3266 bool is_committed) { 3267 unsigned int p = 0; 3268 switch (prot) { 3269 case MEM_PROT_NONE: p = PROT_NONE; break; 3270 case MEM_PROT_READ: p = PROT_READ; break; 3271 case MEM_PROT_RW: p = PROT_READ|PROT_WRITE; break; 3272 case MEM_PROT_RWX: p = PROT_READ|PROT_WRITE|PROT_EXEC; break; 3273 default: 3274 ShouldNotReachHere(); 3275 } 3276 // is_committed is unused. 3277 return solaris_mprotect(addr, bytes, p); 3278 } 3279 3280 // guard_memory and unguard_memory only happens within stack guard pages. 3281 // Since ISM pertains only to the heap, guard and unguard memory should not 3282 /// happen with an ISM region. 3283 bool os::guard_memory(char* addr, size_t bytes) { 3284 return solaris_mprotect(addr, bytes, PROT_NONE); 3285 } 3286 3287 bool os::unguard_memory(char* addr, size_t bytes) { 3288 return solaris_mprotect(addr, bytes, PROT_READ|PROT_WRITE); 3289 } 3290 3291 // Large page support 3292 static size_t _large_page_size = 0; 3293 3294 // Insertion sort for small arrays (descending order). 3295 static void insertion_sort_descending(size_t* array, int len) { 3296 for (int i = 0; i < len; i++) { 3297 size_t val = array[i]; 3298 for (size_t key = i; key > 0 && array[key - 1] < val; --key) { 3299 size_t tmp = array[key]; 3300 array[key] = array[key - 1]; 3301 array[key - 1] = tmp; 3302 } 3303 } 3304 } 3305 3306 bool os::Solaris::mpss_sanity_check(bool warn, size_t* page_size) { 3307 const unsigned int usable_count = VM_Version::page_size_count(); 3308 if (usable_count == 1) { 3309 return false; 3310 } 3311 3312 // Find the right getpagesizes interface. When solaris 11 is the minimum 3313 // build platform, getpagesizes() (without the '2') can be called directly. 3314 typedef int (*gps_t)(size_t[], int); 3315 gps_t gps_func = CAST_TO_FN_PTR(gps_t, dlsym(RTLD_DEFAULT, "getpagesizes2")); 3316 if (gps_func == NULL) { 3317 gps_func = CAST_TO_FN_PTR(gps_t, dlsym(RTLD_DEFAULT, "getpagesizes")); 3318 if (gps_func == NULL) { 3319 if (warn) { 3320 warning("MPSS is not supported by the operating system."); 3321 } 3322 return false; 3323 } 3324 } 3325 3326 // Fill the array of page sizes. 3327 int n = (*gps_func)(_page_sizes, page_sizes_max); 3328 assert(n > 0, "Solaris bug?"); 3329 3330 if (n == page_sizes_max) { 3331 // Add a sentinel value (necessary only if the array was completely filled 3332 // since it is static (zeroed at initialization)). 3333 _page_sizes[--n] = 0; 3334 DEBUG_ONLY(warning("increase the size of the os::_page_sizes array.");) 3335 } 3336 assert(_page_sizes[n] == 0, "missing sentinel"); 3337 trace_page_sizes("available page sizes", _page_sizes, n); 3338 3339 if (n == 1) return false; // Only one page size available. 3340 3341 // Skip sizes larger than 4M (or LargePageSizeInBytes if it was set) and 3342 // select up to usable_count elements. First sort the array, find the first 3343 // acceptable value, then copy the usable sizes to the top of the array and 3344 // trim the rest. Make sure to include the default page size :-). 3345 // 3346 // A better policy could get rid of the 4M limit by taking the sizes of the 3347 // important VM memory regions (java heap and possibly the code cache) into 3348 // account. 3349 insertion_sort_descending(_page_sizes, n); 3350 const size_t size_limit = 3351 FLAG_IS_DEFAULT(LargePageSizeInBytes) ? 4 * M : LargePageSizeInBytes; 3352 int beg; 3353 for (beg = 0; beg < n && _page_sizes[beg] > size_limit; ++beg) /* empty */ ; 3354 const int end = MIN2((int)usable_count, n) - 1; 3355 for (int cur = 0; cur < end; ++cur, ++beg) { 3356 _page_sizes[cur] = _page_sizes[beg]; 3357 } 3358 _page_sizes[end] = vm_page_size(); 3359 _page_sizes[end + 1] = 0; 3360 3361 if (_page_sizes[end] > _page_sizes[end - 1]) { 3362 // Default page size is not the smallest; sort again. 3363 insertion_sort_descending(_page_sizes, end + 1); 3364 } 3365 *page_size = _page_sizes[0]; 3366 3367 trace_page_sizes("usable page sizes", _page_sizes, end + 1); 3368 return true; 3369 } 3370 3371 void os::large_page_init() { 3372 if (UseLargePages) { 3373 // print a warning if any large page related flag is specified on command line 3374 bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) || 3375 !FLAG_IS_DEFAULT(LargePageSizeInBytes); 3376 3377 UseLargePages = Solaris::mpss_sanity_check(warn_on_failure, &_large_page_size); 3378 } 3379 } 3380 3381 bool os::Solaris::setup_large_pages(caddr_t start, size_t bytes, size_t align) { 3382 // Signal to OS that we want large pages for addresses 3383 // from addr, addr + bytes 3384 struct memcntl_mha mpss_struct; 3385 mpss_struct.mha_cmd = MHA_MAPSIZE_VA; 3386 mpss_struct.mha_pagesize = align; 3387 mpss_struct.mha_flags = 0; 3388 // Upon successful completion, memcntl() returns 0 3389 if (memcntl(start, bytes, MC_HAT_ADVISE, (caddr_t) &mpss_struct, 0, 0)) { 3390 debug_only(warning("Attempt to use MPSS failed.")); 3391 return false; 3392 } 3393 return true; 3394 } 3395 3396 char* os::reserve_memory_special(size_t size, size_t alignment, char* addr, bool exec) { 3397 fatal("os::reserve_memory_special should not be called on Solaris."); 3398 return NULL; 3399 } 3400 3401 bool os::release_memory_special(char* base, size_t bytes) { 3402 fatal("os::release_memory_special should not be called on Solaris."); 3403 return false; 3404 } 3405 3406 size_t os::large_page_size() { 3407 return _large_page_size; 3408 } 3409 3410 // MPSS allows application to commit large page memory on demand; with ISM 3411 // the entire memory region must be allocated as shared memory. 3412 bool os::can_commit_large_page_memory() { 3413 return true; 3414 } 3415 3416 bool os::can_execute_large_page_memory() { 3417 return true; 3418 } 3419 3420 // Read calls from inside the vm need to perform state transitions 3421 size_t os::read(int fd, void *buf, unsigned int nBytes) { 3422 INTERRUPTIBLE_RETURN_INT_VM(::read(fd, buf, nBytes), os::Solaris::clear_interrupted); 3423 } 3424 3425 size_t os::restartable_read(int fd, void *buf, unsigned int nBytes) { 3426 INTERRUPTIBLE_RETURN_INT(::read(fd, buf, nBytes), os::Solaris::clear_interrupted); 3427 } 3428 3429 void os::naked_short_sleep(jlong ms) { 3430 assert(ms < 1000, "Un-interruptable sleep, short time use only"); 3431 3432 // usleep is deprecated and removed from POSIX, in favour of nanosleep, but 3433 // Solaris requires -lrt for this. 3434 usleep((ms * 1000)); 3435 3436 return; 3437 } 3438 3439 // Sleep forever; naked call to OS-specific sleep; use with CAUTION 3440 void os::infinite_sleep() { 3441 while (true) { // sleep forever ... 3442 ::sleep(100); // ... 100 seconds at a time 3443 } 3444 } 3445 3446 // Used to convert frequent JVM_Yield() to nops 3447 bool os::dont_yield() { 3448 if (DontYieldALot) { 3449 static hrtime_t last_time = 0; 3450 hrtime_t diff = getTimeNanos() - last_time; 3451 3452 if (diff < DontYieldALotInterval * 1000000) 3453 return true; 3454 3455 last_time += diff; 3456 3457 return false; 3458 } 3459 else { 3460 return false; 3461 } 3462 } 3463 3464 // Caveat: Solaris os::yield() causes a thread-state transition whereas 3465 // the linux and win32 implementations do not. This should be checked. 3466 3467 void os::yield() { 3468 // Yields to all threads with same or greater priority 3469 os::sleep(Thread::current(), 0, false); 3470 } 3471 3472 // Note that yield semantics are defined by the scheduling class to which 3473 // the thread currently belongs. Typically, yield will _not yield to 3474 // other equal or higher priority threads that reside on the dispatch queues 3475 // of other CPUs. 3476 3477 os::YieldResult os::NakedYield() { thr_yield(); return os::YIELD_UNKNOWN; } 3478 3479 3480 // On Solaris we found that yield_all doesn't always yield to all other threads. 3481 // There have been cases where there is a thread ready to execute but it doesn't 3482 // get an lwp as the VM thread continues to spin with sleeps of 1 millisecond. 3483 // The 1 millisecond wait doesn't seem long enough for the kernel to issue a 3484 // SIGWAITING signal which will cause a new lwp to be created. So we count the 3485 // number of times yield_all is called in the one loop and increase the sleep 3486 // time after 8 attempts. If this fails too we increase the concurrency level 3487 // so that the starving thread would get an lwp 3488 3489 void os::yield_all(int attempts) { 3490 // Yields to all threads, including threads with lower priorities 3491 if (attempts == 0) { 3492 os::sleep(Thread::current(), 1, false); 3493 } else { 3494 int iterations = attempts % 30; 3495 if (iterations == 0 && !os::Solaris::T2_libthread()) { 3496 // thr_setconcurrency and _getconcurrency make sense only under T1. 3497 int noofLWPS = thr_getconcurrency(); 3498 if (noofLWPS < (Threads::number_of_threads() + 2)) { 3499 thr_setconcurrency(thr_getconcurrency() + 1); 3500 } 3501 } else if (iterations < 25) { 3502 os::sleep(Thread::current(), 1, false); 3503 } else { 3504 os::sleep(Thread::current(), 10, false); 3505 } 3506 } 3507 } 3508 3509 // Called from the tight loops to possibly influence time-sharing heuristics 3510 void os::loop_breaker(int attempts) { 3511 os::yield_all(attempts); 3512 } 3513 3514 3515 // Interface for setting lwp priorities. If we are using T2 libthread, 3516 // which forces the use of BoundThreads or we manually set UseBoundThreads, 3517 // all of our threads will be assigned to real lwp's. Using the thr_setprio 3518 // function is meaningless in this mode so we must adjust the real lwp's priority 3519 // The routines below implement the getting and setting of lwp priorities. 3520 // 3521 // Note: There are three priority scales used on Solaris. Java priotities 3522 // which range from 1 to 10, libthread "thr_setprio" scale which range 3523 // from 0 to 127, and the current scheduling class of the process we 3524 // are running in. This is typically from -60 to +60. 3525 // The setting of the lwp priorities in done after a call to thr_setprio 3526 // so Java priorities are mapped to libthread priorities and we map from 3527 // the latter to lwp priorities. We don't keep priorities stored in 3528 // Java priorities since some of our worker threads want to set priorities 3529 // higher than all Java threads. 3530 // 3531 // For related information: 3532 // (1) man -s 2 priocntl 3533 // (2) man -s 4 priocntl 3534 // (3) man dispadmin 3535 // = librt.so 3536 // = libthread/common/rtsched.c - thrp_setlwpprio(). 3537 // = ps -cL <pid> ... to validate priority. 3538 // = sched_get_priority_min and _max 3539 // pthread_create 3540 // sched_setparam 3541 // pthread_setschedparam 3542 // 3543 // Assumptions: 3544 // + We assume that all threads in the process belong to the same 3545 // scheduling class. IE. an homogenous process. 3546 // + Must be root or in IA group to change change "interactive" attribute. 3547 // Priocntl() will fail silently. The only indication of failure is when 3548 // we read-back the value and notice that it hasn't changed. 3549 // + Interactive threads enter the runq at the head, non-interactive at the tail. 3550 // + For RT, change timeslice as well. Invariant: 3551 // constant "priority integral" 3552 // Konst == TimeSlice * (60-Priority) 3553 // Given a priority, compute appropriate timeslice. 3554 // + Higher numerical values have higher priority. 3555 3556 // sched class attributes 3557 typedef struct { 3558 int schedPolicy; // classID 3559 int maxPrio; 3560 int minPrio; 3561 } SchedInfo; 3562 3563 3564 static SchedInfo tsLimits, iaLimits, rtLimits, fxLimits; 3565 3566 #ifdef ASSERT 3567 static int ReadBackValidate = 1; 3568 #endif 3569 static int myClass = 0; 3570 static int myMin = 0; 3571 static int myMax = 0; 3572 static int myCur = 0; 3573 static bool priocntl_enable = false; 3574 3575 static const int criticalPrio = 60; // FX/60 is critical thread class/priority on T4 3576 static int java_MaxPriority_to_os_priority = 0; // Saved mapping 3577 3578 3579 // lwp_priocntl_init 3580 // 3581 // Try to determine the priority scale for our process. 3582 // 3583 // Return errno or 0 if OK. 3584 // 3585 static int lwp_priocntl_init () { 3586 int rslt; 3587 pcinfo_t ClassInfo; 3588 pcparms_t ParmInfo; 3589 int i; 3590 3591 if (!UseThreadPriorities) return 0; 3592 3593 // We are using Bound threads, we need to determine our priority ranges 3594 if (os::Solaris::T2_libthread() || UseBoundThreads) { 3595 // If ThreadPriorityPolicy is 1, switch tables 3596 if (ThreadPriorityPolicy == 1) { 3597 for (i = 0 ; i < CriticalPriority+1; i++) 3598 os::java_to_os_priority[i] = prio_policy1[i]; 3599 } 3600 if (UseCriticalJavaThreadPriority) { 3601 // MaxPriority always maps to the FX scheduling class and criticalPrio. 3602 // See set_native_priority() and set_lwp_class_and_priority(). 3603 // Save original MaxPriority mapping in case attempt to 3604 // use critical priority fails. 3605 java_MaxPriority_to_os_priority = os::java_to_os_priority[MaxPriority]; 3606 // Set negative to distinguish from other priorities 3607 os::java_to_os_priority[MaxPriority] = -criticalPrio; 3608 } 3609 } 3610 // Not using Bound Threads, set to ThreadPolicy 1 3611 else { 3612 for ( i = 0 ; i < CriticalPriority+1; i++ ) { 3613 os::java_to_os_priority[i] = prio_policy1[i]; 3614 } 3615 return 0; 3616 } 3617 3618 // Get IDs for a set of well-known scheduling classes. 3619 // TODO-FIXME: GETCLINFO returns the current # of classes in the 3620 // the system. We should have a loop that iterates over the 3621 // classID values, which are known to be "small" integers. 3622 3623 strcpy(ClassInfo.pc_clname, "TS"); 3624 ClassInfo.pc_cid = -1; 3625 rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo); 3626 if (rslt < 0) return errno; 3627 assert(ClassInfo.pc_cid != -1, "cid for TS class is -1"); 3628 tsLimits.schedPolicy = ClassInfo.pc_cid; 3629 tsLimits.maxPrio = ((tsinfo_t*)ClassInfo.pc_clinfo)->ts_maxupri; 3630 tsLimits.minPrio = -tsLimits.maxPrio; 3631 3632 strcpy(ClassInfo.pc_clname, "IA"); 3633 ClassInfo.pc_cid = -1; 3634 rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo); 3635 if (rslt < 0) return errno; 3636 assert(ClassInfo.pc_cid != -1, "cid for IA class is -1"); 3637 iaLimits.schedPolicy = ClassInfo.pc_cid; 3638 iaLimits.maxPrio = ((iainfo_t*)ClassInfo.pc_clinfo)->ia_maxupri; 3639 iaLimits.minPrio = -iaLimits.maxPrio; 3640 3641 strcpy(ClassInfo.pc_clname, "RT"); 3642 ClassInfo.pc_cid = -1; 3643 rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo); 3644 if (rslt < 0) return errno; 3645 assert(ClassInfo.pc_cid != -1, "cid for RT class is -1"); 3646 rtLimits.schedPolicy = ClassInfo.pc_cid; 3647 rtLimits.maxPrio = ((rtinfo_t*)ClassInfo.pc_clinfo)->rt_maxpri; 3648 rtLimits.minPrio = 0; 3649 3650 strcpy(ClassInfo.pc_clname, "FX"); 3651 ClassInfo.pc_cid = -1; 3652 rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo); 3653 if (rslt < 0) return errno; 3654 assert(ClassInfo.pc_cid != -1, "cid for FX class is -1"); 3655 fxLimits.schedPolicy = ClassInfo.pc_cid; 3656 fxLimits.maxPrio = ((fxinfo_t*)ClassInfo.pc_clinfo)->fx_maxupri; 3657 fxLimits.minPrio = 0; 3658 3659 // Query our "current" scheduling class. 3660 // This will normally be IA, TS or, rarely, FX or RT. 3661 memset(&ParmInfo, 0, sizeof(ParmInfo)); 3662 ParmInfo.pc_cid = PC_CLNULL; 3663 rslt = priocntl(P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo); 3664 if (rslt < 0) return errno; 3665 myClass = ParmInfo.pc_cid; 3666 3667 // We now know our scheduling classId, get specific information 3668 // about the class. 3669 ClassInfo.pc_cid = myClass; 3670 ClassInfo.pc_clname[0] = 0; 3671 rslt = priocntl((idtype)0, 0, PC_GETCLINFO, (caddr_t)&ClassInfo); 3672 if (rslt < 0) return errno; 3673 3674 if (ThreadPriorityVerbose) { 3675 tty->print_cr("lwp_priocntl_init: Class=%d(%s)...", myClass, ClassInfo.pc_clname); 3676 } 3677 3678 memset(&ParmInfo, 0, sizeof(pcparms_t)); 3679 ParmInfo.pc_cid = PC_CLNULL; 3680 rslt = priocntl(P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo); 3681 if (rslt < 0) return errno; 3682 3683 if (ParmInfo.pc_cid == rtLimits.schedPolicy) { 3684 myMin = rtLimits.minPrio; 3685 myMax = rtLimits.maxPrio; 3686 } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) { 3687 iaparms_t *iaInfo = (iaparms_t*)ParmInfo.pc_clparms; 3688 myMin = iaLimits.minPrio; 3689 myMax = iaLimits.maxPrio; 3690 myMax = MIN2(myMax, (int)iaInfo->ia_uprilim); // clamp - restrict 3691 } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) { 3692 tsparms_t *tsInfo = (tsparms_t*)ParmInfo.pc_clparms; 3693 myMin = tsLimits.minPrio; 3694 myMax = tsLimits.maxPrio; 3695 myMax = MIN2(myMax, (int)tsInfo->ts_uprilim); // clamp - restrict 3696 } else if (ParmInfo.pc_cid == fxLimits.schedPolicy) { 3697 fxparms_t *fxInfo = (fxparms_t*)ParmInfo.pc_clparms; 3698 myMin = fxLimits.minPrio; 3699 myMax = fxLimits.maxPrio; 3700 myMax = MIN2(myMax, (int)fxInfo->fx_uprilim); // clamp - restrict 3701 } else { 3702 // No clue - punt 3703 if (ThreadPriorityVerbose) 3704 tty->print_cr ("Unknown scheduling class: %s ... \n", ClassInfo.pc_clname); 3705 return EINVAL; // no clue, punt 3706 } 3707 3708 if (ThreadPriorityVerbose) { 3709 tty->print_cr ("Thread priority Range: [%d..%d]\n", myMin, myMax); 3710 } 3711 3712 priocntl_enable = true; // Enable changing priorities 3713 return 0; 3714 } 3715 3716 #define IAPRI(x) ((iaparms_t *)((x).pc_clparms)) 3717 #define RTPRI(x) ((rtparms_t *)((x).pc_clparms)) 3718 #define TSPRI(x) ((tsparms_t *)((x).pc_clparms)) 3719 #define FXPRI(x) ((fxparms_t *)((x).pc_clparms)) 3720 3721 3722 // scale_to_lwp_priority 3723 // 3724 // Convert from the libthread "thr_setprio" scale to our current 3725 // lwp scheduling class scale. 3726 // 3727 static 3728 int scale_to_lwp_priority (int rMin, int rMax, int x) 3729 { 3730 int v; 3731 3732 if (x == 127) return rMax; // avoid round-down 3733 v = (((x*(rMax-rMin)))/128)+rMin; 3734 return v; 3735 } 3736 3737 3738 // set_lwp_class_and_priority 3739 // 3740 // Set the class and priority of the lwp. This call should only 3741 // be made when using bound threads (T2 threads are bound by default). 3742 // 3743 int set_lwp_class_and_priority(int ThreadID, int lwpid, 3744 int newPrio, int new_class, bool scale) { 3745 int rslt; 3746 int Actual, Expected, prv; 3747 pcparms_t ParmInfo; // for GET-SET 3748 #ifdef ASSERT 3749 pcparms_t ReadBack; // for readback 3750 #endif 3751 3752 // Set priority via PC_GETPARMS, update, PC_SETPARMS 3753 // Query current values. 3754 // TODO: accelerate this by eliminating the PC_GETPARMS call. 3755 // Cache "pcparms_t" in global ParmCache. 3756 // TODO: elide set-to-same-value 3757 3758 // If something went wrong on init, don't change priorities. 3759 if ( !priocntl_enable ) { 3760 if (ThreadPriorityVerbose) 3761 tty->print_cr("Trying to set priority but init failed, ignoring"); 3762 return EINVAL; 3763 } 3764 3765 // If lwp hasn't started yet, just return 3766 // the _start routine will call us again. 3767 if ( lwpid <= 0 ) { 3768 if (ThreadPriorityVerbose) { 3769 tty->print_cr ("deferring the set_lwp_class_and_priority of thread " 3770 INTPTR_FORMAT " to %d, lwpid not set", 3771 ThreadID, newPrio); 3772 } 3773 return 0; 3774 } 3775 3776 if (ThreadPriorityVerbose) { 3777 tty->print_cr ("set_lwp_class_and_priority(" 3778 INTPTR_FORMAT "@" INTPTR_FORMAT " %d) ", 3779 ThreadID, lwpid, newPrio); 3780 } 3781 3782 memset(&ParmInfo, 0, sizeof(pcparms_t)); 3783 ParmInfo.pc_cid = PC_CLNULL; 3784 rslt = priocntl(P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ParmInfo); 3785 if (rslt < 0) return errno; 3786 3787 int cur_class = ParmInfo.pc_cid; 3788 ParmInfo.pc_cid = (id_t)new_class; 3789 3790 if (new_class == rtLimits.schedPolicy) { 3791 rtparms_t *rtInfo = (rtparms_t*)ParmInfo.pc_clparms; 3792 rtInfo->rt_pri = scale ? scale_to_lwp_priority(rtLimits.minPrio, 3793 rtLimits.maxPrio, newPrio) 3794 : newPrio; 3795 rtInfo->rt_tqsecs = RT_NOCHANGE; 3796 rtInfo->rt_tqnsecs = RT_NOCHANGE; 3797 if (ThreadPriorityVerbose) { 3798 tty->print_cr("RT: %d->%d\n", newPrio, rtInfo->rt_pri); 3799 } 3800 } else if (new_class == iaLimits.schedPolicy) { 3801 iaparms_t* iaInfo = (iaparms_t*)ParmInfo.pc_clparms; 3802 int maxClamped = MIN2(iaLimits.maxPrio, 3803 cur_class == new_class 3804 ? (int)iaInfo->ia_uprilim : iaLimits.maxPrio); 3805 iaInfo->ia_upri = scale ? scale_to_lwp_priority(iaLimits.minPrio, 3806 maxClamped, newPrio) 3807 : newPrio; 3808 iaInfo->ia_uprilim = cur_class == new_class 3809 ? IA_NOCHANGE : (pri_t)iaLimits.maxPrio; 3810 iaInfo->ia_mode = IA_NOCHANGE; 3811 if (ThreadPriorityVerbose) { 3812 tty->print_cr("IA: [%d...%d] %d->%d\n", 3813 iaLimits.minPrio, maxClamped, newPrio, iaInfo->ia_upri); 3814 } 3815 } else if (new_class == tsLimits.schedPolicy) { 3816 tsparms_t* tsInfo = (tsparms_t*)ParmInfo.pc_clparms; 3817 int maxClamped = MIN2(tsLimits.maxPrio, 3818 cur_class == new_class 3819 ? (int)tsInfo->ts_uprilim : tsLimits.maxPrio); 3820 tsInfo->ts_upri = scale ? scale_to_lwp_priority(tsLimits.minPrio, 3821 maxClamped, newPrio) 3822 : newPrio; 3823 tsInfo->ts_uprilim = cur_class == new_class 3824 ? TS_NOCHANGE : (pri_t)tsLimits.maxPrio; 3825 if (ThreadPriorityVerbose) { 3826 tty->print_cr("TS: [%d...%d] %d->%d\n", 3827 tsLimits.minPrio, maxClamped, newPrio, tsInfo->ts_upri); 3828 } 3829 } else if (new_class == fxLimits.schedPolicy) { 3830 fxparms_t* fxInfo = (fxparms_t*)ParmInfo.pc_clparms; 3831 int maxClamped = MIN2(fxLimits.maxPrio, 3832 cur_class == new_class 3833 ? (int)fxInfo->fx_uprilim : fxLimits.maxPrio); 3834 fxInfo->fx_upri = scale ? scale_to_lwp_priority(fxLimits.minPrio, 3835 maxClamped, newPrio) 3836 : newPrio; 3837 fxInfo->fx_uprilim = cur_class == new_class 3838 ? FX_NOCHANGE : (pri_t)fxLimits.maxPrio; 3839 fxInfo->fx_tqsecs = FX_NOCHANGE; 3840 fxInfo->fx_tqnsecs = FX_NOCHANGE; 3841 if (ThreadPriorityVerbose) { 3842 tty->print_cr("FX: [%d...%d] %d->%d\n", 3843 fxLimits.minPrio, maxClamped, newPrio, fxInfo->fx_upri); 3844 } 3845 } else { 3846 if (ThreadPriorityVerbose) { 3847 tty->print_cr("Unknown new scheduling class %d\n", new_class); 3848 } 3849 return EINVAL; // no clue, punt 3850 } 3851 3852 rslt = priocntl(P_LWPID, lwpid, PC_SETPARMS, (caddr_t)&ParmInfo); 3853 if (ThreadPriorityVerbose && rslt) { 3854 tty->print_cr ("PC_SETPARMS ->%d %d\n", rslt, errno); 3855 } 3856 if (rslt < 0) return errno; 3857 3858 #ifdef ASSERT 3859 // Sanity check: read back what we just attempted to set. 3860 // In theory it could have changed in the interim ... 3861 // 3862 // The priocntl system call is tricky. 3863 // Sometimes it'll validate the priority value argument and 3864 // return EINVAL if unhappy. At other times it fails silently. 3865 // Readbacks are prudent. 3866 3867 if (!ReadBackValidate) return 0; 3868 3869 memset(&ReadBack, 0, sizeof(pcparms_t)); 3870 ReadBack.pc_cid = PC_CLNULL; 3871 rslt = priocntl(P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ReadBack); 3872 assert(rslt >= 0, "priocntl failed"); 3873 Actual = Expected = 0xBAD; 3874 assert(ParmInfo.pc_cid == ReadBack.pc_cid, "cid's don't match"); 3875 if (ParmInfo.pc_cid == rtLimits.schedPolicy) { 3876 Actual = RTPRI(ReadBack)->rt_pri; 3877 Expected = RTPRI(ParmInfo)->rt_pri; 3878 } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) { 3879 Actual = IAPRI(ReadBack)->ia_upri; 3880 Expected = IAPRI(ParmInfo)->ia_upri; 3881 } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) { 3882 Actual = TSPRI(ReadBack)->ts_upri; 3883 Expected = TSPRI(ParmInfo)->ts_upri; 3884 } else if (ParmInfo.pc_cid == fxLimits.schedPolicy) { 3885 Actual = FXPRI(ReadBack)->fx_upri; 3886 Expected = FXPRI(ParmInfo)->fx_upri; 3887 } else { 3888 if (ThreadPriorityVerbose) { 3889 tty->print_cr("set_lwp_class_and_priority: unexpected class in readback: %d\n", 3890 ParmInfo.pc_cid); 3891 } 3892 } 3893 3894 if (Actual != Expected) { 3895 if (ThreadPriorityVerbose) { 3896 tty->print_cr ("set_lwp_class_and_priority(%d %d) Class=%d: actual=%d vs expected=%d\n", 3897 lwpid, newPrio, ReadBack.pc_cid, Actual, Expected); 3898 } 3899 } 3900 #endif 3901 3902 return 0; 3903 } 3904 3905 // Solaris only gives access to 128 real priorities at a time, 3906 // so we expand Java's ten to fill this range. This would be better 3907 // if we dynamically adjusted relative priorities. 3908 // 3909 // The ThreadPriorityPolicy option allows us to select 2 different 3910 // priority scales. 3911 // 3912 // ThreadPriorityPolicy=0 3913 // Since the Solaris' default priority is MaximumPriority, we do not 3914 // set a priority lower than Max unless a priority lower than 3915 // NormPriority is requested. 3916 // 3917 // ThreadPriorityPolicy=1 3918 // This mode causes the priority table to get filled with 3919 // linear values. NormPriority get's mapped to 50% of the 3920 // Maximum priority an so on. This will cause VM threads 3921 // to get unfair treatment against other Solaris processes 3922 // which do not explicitly alter their thread priorities. 3923 // 3924 3925 int os::java_to_os_priority[CriticalPriority + 1] = { 3926 -99999, // 0 Entry should never be used 3927 3928 0, // 1 MinPriority 3929 32, // 2 3930 64, // 3 3931 3932 96, // 4 3933 127, // 5 NormPriority 3934 127, // 6 3935 3936 127, // 7 3937 127, // 8 3938 127, // 9 NearMaxPriority 3939 3940 127, // 10 MaxPriority 3941 3942 -criticalPrio // 11 CriticalPriority 3943 }; 3944 3945 OSReturn os::set_native_priority(Thread* thread, int newpri) { 3946 OSThread* osthread = thread->osthread(); 3947 3948 // Save requested priority in case the thread hasn't been started 3949 osthread->set_native_priority(newpri); 3950 3951 // Check for critical priority request 3952 bool fxcritical = false; 3953 if (newpri == -criticalPrio) { 3954 fxcritical = true; 3955 newpri = criticalPrio; 3956 } 3957 3958 assert(newpri >= MinimumPriority && newpri <= MaximumPriority, "bad priority mapping"); 3959 if (!UseThreadPriorities) return OS_OK; 3960 3961 int status = 0; 3962 3963 if (!fxcritical) { 3964 // Use thr_setprio only if we have a priority that thr_setprio understands 3965 status = thr_setprio(thread->osthread()->thread_id(), newpri); 3966 } 3967 3968 if (os::Solaris::T2_libthread() || 3969 (UseBoundThreads && osthread->is_vm_created())) { 3970 int lwp_status = 3971 set_lwp_class_and_priority(osthread->thread_id(), 3972 osthread->lwp_id(), 3973 newpri, 3974 fxcritical ? fxLimits.schedPolicy : myClass, 3975 !fxcritical); 3976 if (lwp_status != 0 && fxcritical) { 3977 // Try again, this time without changing the scheduling class 3978 newpri = java_MaxPriority_to_os_priority; 3979 lwp_status = set_lwp_class_and_priority(osthread->thread_id(), 3980 osthread->lwp_id(), 3981 newpri, myClass, false); 3982 } 3983 status |= lwp_status; 3984 } 3985 return (status == 0) ? OS_OK : OS_ERR; 3986 } 3987 3988 3989 OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) { 3990 int p; 3991 if ( !UseThreadPriorities ) { 3992 *priority_ptr = NormalPriority; 3993 return OS_OK; 3994 } 3995 int status = thr_getprio(thread->osthread()->thread_id(), &p); 3996 if (status != 0) { 3997 return OS_ERR; 3998 } 3999 *priority_ptr = p; 4000 return OS_OK; 4001 } 4002 4003 4004 // Hint to the underlying OS that a task switch would not be good. 4005 // Void return because it's a hint and can fail. 4006 void os::hint_no_preempt() { 4007 schedctl_start(schedctl_init()); 4008 } 4009 4010 static void resume_clear_context(OSThread *osthread) { 4011 osthread->set_ucontext(NULL); 4012 } 4013 4014 static void suspend_save_context(OSThread *osthread, ucontext_t* context) { 4015 osthread->set_ucontext(context); 4016 } 4017 4018 static Semaphore sr_semaphore; 4019 4020 void os::Solaris::SR_handler(Thread* thread, ucontext_t* uc) { 4021 // Save and restore errno to avoid confusing native code with EINTR 4022 // after sigsuspend. 4023 int old_errno = errno; 4024 4025 OSThread* osthread = thread->osthread(); 4026 assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread"); 4027 4028 os::SuspendResume::State current = osthread->sr.state(); 4029 if (current == os::SuspendResume::SR_SUSPEND_REQUEST) { 4030 suspend_save_context(osthread, uc); 4031 4032 // attempt to switch the state, we assume we had a SUSPEND_REQUEST 4033 os::SuspendResume::State state = osthread->sr.suspended(); 4034 if (state == os::SuspendResume::SR_SUSPENDED) { 4035 sigset_t suspend_set; // signals for sigsuspend() 4036 4037 // get current set of blocked signals and unblock resume signal 4038 thr_sigsetmask(SIG_BLOCK, NULL, &suspend_set); 4039 sigdelset(&suspend_set, os::Solaris::SIGasync()); 4040 4041 sr_semaphore.signal(); 4042 // wait here until we are resumed 4043 while (1) { 4044 sigsuspend(&suspend_set); 4045 4046 os::SuspendResume::State result = osthread->sr.running(); 4047 if (result == os::SuspendResume::SR_RUNNING) { 4048 sr_semaphore.signal(); 4049 break; 4050 } 4051 } 4052 4053 } else if (state == os::SuspendResume::SR_RUNNING) { 4054 // request was cancelled, continue 4055 } else { 4056 ShouldNotReachHere(); 4057 } 4058 4059 resume_clear_context(osthread); 4060 } else if (current == os::SuspendResume::SR_RUNNING) { 4061 // request was cancelled, continue 4062 } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) { 4063 // ignore 4064 } else { 4065 // ignore 4066 } 4067 4068 errno = old_errno; 4069 } 4070 4071 void os::print_statistics() { 4072 } 4073 4074 int os::message_box(const char* title, const char* message) { 4075 int i; 4076 fdStream err(defaultStream::error_fd()); 4077 for (i = 0; i < 78; i++) err.print_raw("="); 4078 err.cr(); 4079 err.print_raw_cr(title); 4080 for (i = 0; i < 78; i++) err.print_raw("-"); 4081 err.cr(); 4082 err.print_raw_cr(message); 4083 for (i = 0; i < 78; i++) err.print_raw("="); 4084 err.cr(); 4085 4086 char buf[16]; 4087 // Prevent process from exiting upon "read error" without consuming all CPU 4088 while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); } 4089 4090 return buf[0] == 'y' || buf[0] == 'Y'; 4091 } 4092 4093 static int sr_notify(OSThread* osthread) { 4094 int status = thr_kill(osthread->thread_id(), os::Solaris::SIGasync()); 4095 assert_status(status == 0, status, "thr_kill"); 4096 return status; 4097 } 4098 4099 // "Randomly" selected value for how long we want to spin 4100 // before bailing out on suspending a thread, also how often 4101 // we send a signal to a thread we want to resume 4102 static const int RANDOMLY_LARGE_INTEGER = 1000000; 4103 static const int RANDOMLY_LARGE_INTEGER2 = 100; 4104 4105 static bool do_suspend(OSThread* osthread) { 4106 assert(osthread->sr.is_running(), "thread should be running"); 4107 assert(!sr_semaphore.trywait(), "semaphore has invalid state"); 4108 4109 // mark as suspended and send signal 4110 if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) { 4111 // failed to switch, state wasn't running? 4112 ShouldNotReachHere(); 4113 return false; 4114 } 4115 4116 if (sr_notify(osthread) != 0) { 4117 ShouldNotReachHere(); 4118 } 4119 4120 // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED 4121 while (true) { 4122 if (sr_semaphore.timedwait(0, 2000 * NANOSECS_PER_MILLISEC)) { 4123 break; 4124 } else { 4125 // timeout 4126 os::SuspendResume::State cancelled = osthread->sr.cancel_suspend(); 4127 if (cancelled == os::SuspendResume::SR_RUNNING) { 4128 return false; 4129 } else if (cancelled == os::SuspendResume::SR_SUSPENDED) { 4130 // make sure that we consume the signal on the semaphore as well 4131 sr_semaphore.wait(); 4132 break; 4133 } else { 4134 ShouldNotReachHere(); 4135 return false; 4136 } 4137 } 4138 } 4139 4140 guarantee(osthread->sr.is_suspended(), "Must be suspended"); 4141 return true; 4142 } 4143 4144 static void do_resume(OSThread* osthread) { 4145 assert(osthread->sr.is_suspended(), "thread should be suspended"); 4146 assert(!sr_semaphore.trywait(), "invalid semaphore state"); 4147 4148 if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) { 4149 // failed to switch to WAKEUP_REQUEST 4150 ShouldNotReachHere(); 4151 return; 4152 } 4153 4154 while (true) { 4155 if (sr_notify(osthread) == 0) { 4156 if (sr_semaphore.timedwait(0, 2 * NANOSECS_PER_MILLISEC)) { 4157 if (osthread->sr.is_running()) { 4158 return; 4159 } 4160 } 4161 } else { 4162 ShouldNotReachHere(); 4163 } 4164 } 4165 4166 guarantee(osthread->sr.is_running(), "Must be running!"); 4167 } 4168 4169 void os::SuspendedThreadTask::internal_do_task() { 4170 if (do_suspend(_thread->osthread())) { 4171 SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext()); 4172 do_task(context); 4173 do_resume(_thread->osthread()); 4174 } 4175 } 4176 4177 class PcFetcher : public os::SuspendedThreadTask { 4178 public: 4179 PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {} 4180 ExtendedPC result(); 4181 protected: 4182 void do_task(const os::SuspendedThreadTaskContext& context); 4183 private: 4184 ExtendedPC _epc; 4185 }; 4186 4187 ExtendedPC PcFetcher::result() { 4188 guarantee(is_done(), "task is not done yet."); 4189 return _epc; 4190 } 4191 4192 void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) { 4193 Thread* thread = context.thread(); 4194 OSThread* osthread = thread->osthread(); 4195 if (osthread->ucontext() != NULL) { 4196 _epc = os::Solaris::ucontext_get_pc((ucontext_t *) context.ucontext()); 4197 } else { 4198 // NULL context is unexpected, double-check this is the VMThread 4199 guarantee(thread->is_VM_thread(), "can only be called for VMThread"); 4200 } 4201 } 4202 4203 // A lightweight implementation that does not suspend the target thread and 4204 // thus returns only a hint. Used for profiling only! 4205 ExtendedPC os::get_thread_pc(Thread* thread) { 4206 // Make sure that it is called by the watcher and the Threads lock is owned. 4207 assert(Thread::current()->is_Watcher_thread(), "Must be watcher and own Threads_lock"); 4208 // For now, is only used to profile the VM Thread 4209 assert(thread->is_VM_thread(), "Can only be called for VMThread"); 4210 PcFetcher fetcher(thread); 4211 fetcher.run(); 4212 return fetcher.result(); 4213 } 4214 4215 4216 // This does not do anything on Solaris. This is basically a hook for being 4217 // able to use structured exception handling (thread-local exception filters) on, e.g., Win32. 4218 void os::os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread) { 4219 f(value, method, args, thread); 4220 } 4221 4222 // This routine may be used by user applications as a "hook" to catch signals. 4223 // The user-defined signal handler must pass unrecognized signals to this 4224 // routine, and if it returns true (non-zero), then the signal handler must 4225 // return immediately. If the flag "abort_if_unrecognized" is true, then this 4226 // routine will never retun false (zero), but instead will execute a VM panic 4227 // routine kill the process. 4228 // 4229 // If this routine returns false, it is OK to call it again. This allows 4230 // the user-defined signal handler to perform checks either before or after 4231 // the VM performs its own checks. Naturally, the user code would be making 4232 // a serious error if it tried to handle an exception (such as a null check 4233 // or breakpoint) that the VM was generating for its own correct operation. 4234 // 4235 // This routine may recognize any of the following kinds of signals: 4236 // SIGBUS, SIGSEGV, SIGILL, SIGFPE, BREAK_SIGNAL, SIGPIPE, SIGXFSZ, 4237 // os::Solaris::SIGasync 4238 // It should be consulted by handlers for any of those signals. 4239 // It explicitly does not recognize os::Solaris::SIGinterrupt 4240 // 4241 // The caller of this routine must pass in the three arguments supplied 4242 // to the function referred to in the "sa_sigaction" (not the "sa_handler") 4243 // field of the structure passed to sigaction(). This routine assumes that 4244 // the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART. 4245 // 4246 // Note that the VM will print warnings if it detects conflicting signal 4247 // handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers". 4248 // 4249 extern "C" JNIEXPORT int 4250 JVM_handle_solaris_signal(int signo, siginfo_t* siginfo, void* ucontext, 4251 int abort_if_unrecognized); 4252 4253 4254 void signalHandler(int sig, siginfo_t* info, void* ucVoid) { 4255 int orig_errno = errno; // Preserve errno value over signal handler. 4256 JVM_handle_solaris_signal(sig, info, ucVoid, true); 4257 errno = orig_errno; 4258 } 4259 4260 /* Do not delete - if guarantee is ever removed, a signal handler (even empty) 4261 is needed to provoke threads blocked on IO to return an EINTR 4262 Note: this explicitly does NOT call JVM_handle_solaris_signal and 4263 does NOT participate in signal chaining due to requirement for 4264 NOT setting SA_RESTART to make EINTR work. */ 4265 extern "C" void sigINTRHandler(int sig, siginfo_t* info, void* ucVoid) { 4266 if (UseSignalChaining) { 4267 struct sigaction *actp = os::Solaris::get_chained_signal_action(sig); 4268 if (actp && actp->sa_handler) { 4269 vm_exit_during_initialization("Signal chaining detected for VM interrupt signal, try -XX:+UseAltSigs"); 4270 } 4271 } 4272 } 4273 4274 // This boolean allows users to forward their own non-matching signals 4275 // to JVM_handle_solaris_signal, harmlessly. 4276 bool os::Solaris::signal_handlers_are_installed = false; 4277 4278 // For signal-chaining 4279 bool os::Solaris::libjsig_is_loaded = false; 4280 typedef struct sigaction *(*get_signal_t)(int); 4281 get_signal_t os::Solaris::get_signal_action = NULL; 4282 4283 struct sigaction* os::Solaris::get_chained_signal_action(int sig) { 4284 struct sigaction *actp = NULL; 4285 4286 if ((libjsig_is_loaded) && (sig <= Maxlibjsigsigs)) { 4287 // Retrieve the old signal handler from libjsig 4288 actp = (*get_signal_action)(sig); 4289 } 4290 if (actp == NULL) { 4291 // Retrieve the preinstalled signal handler from jvm 4292 actp = get_preinstalled_handler(sig); 4293 } 4294 4295 return actp; 4296 } 4297 4298 static bool call_chained_handler(struct sigaction *actp, int sig, 4299 siginfo_t *siginfo, void *context) { 4300 // Call the old signal handler 4301 if (actp->sa_handler == SIG_DFL) { 4302 // It's more reasonable to let jvm treat it as an unexpected exception 4303 // instead of taking the default action. 4304 return false; 4305 } else if (actp->sa_handler != SIG_IGN) { 4306 if ((actp->sa_flags & SA_NODEFER) == 0) { 4307 // automaticlly block the signal 4308 sigaddset(&(actp->sa_mask), sig); 4309 } 4310 4311 sa_handler_t hand; 4312 sa_sigaction_t sa; 4313 bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0; 4314 // retrieve the chained handler 4315 if (siginfo_flag_set) { 4316 sa = actp->sa_sigaction; 4317 } else { 4318 hand = actp->sa_handler; 4319 } 4320 4321 if ((actp->sa_flags & SA_RESETHAND) != 0) { 4322 actp->sa_handler = SIG_DFL; 4323 } 4324 4325 // try to honor the signal mask 4326 sigset_t oset; 4327 thr_sigsetmask(SIG_SETMASK, &(actp->sa_mask), &oset); 4328 4329 // call into the chained handler 4330 if (siginfo_flag_set) { 4331 (*sa)(sig, siginfo, context); 4332 } else { 4333 (*hand)(sig); 4334 } 4335 4336 // restore the signal mask 4337 thr_sigsetmask(SIG_SETMASK, &oset, 0); 4338 } 4339 // Tell jvm's signal handler the signal is taken care of. 4340 return true; 4341 } 4342 4343 bool os::Solaris::chained_handler(int sig, siginfo_t* siginfo, void* context) { 4344 bool chained = false; 4345 // signal-chaining 4346 if (UseSignalChaining) { 4347 struct sigaction *actp = get_chained_signal_action(sig); 4348 if (actp != NULL) { 4349 chained = call_chained_handler(actp, sig, siginfo, context); 4350 } 4351 } 4352 return chained; 4353 } 4354 4355 struct sigaction* os::Solaris::get_preinstalled_handler(int sig) { 4356 assert((chainedsigactions != (struct sigaction *)NULL) && (preinstalled_sigs != (int *)NULL) , "signals not yet initialized"); 4357 if (preinstalled_sigs[sig] != 0) { 4358 return &chainedsigactions[sig]; 4359 } 4360 return NULL; 4361 } 4362 4363 void os::Solaris::save_preinstalled_handler(int sig, struct sigaction& oldAct) { 4364 4365 assert(sig > 0 && sig <= Maxsignum, "vm signal out of expected range"); 4366 assert((chainedsigactions != (struct sigaction *)NULL) && (preinstalled_sigs != (int *)NULL) , "signals not yet initialized"); 4367 chainedsigactions[sig] = oldAct; 4368 preinstalled_sigs[sig] = 1; 4369 } 4370 4371 void os::Solaris::set_signal_handler(int sig, bool set_installed, bool oktochain) { 4372 // Check for overwrite. 4373 struct sigaction oldAct; 4374 sigaction(sig, (struct sigaction*)NULL, &oldAct); 4375 void* oldhand = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction) 4376 : CAST_FROM_FN_PTR(void*, oldAct.sa_handler); 4377 if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) && 4378 oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) && 4379 oldhand != CAST_FROM_FN_PTR(void*, signalHandler)) { 4380 if (AllowUserSignalHandlers || !set_installed) { 4381 // Do not overwrite; user takes responsibility to forward to us. 4382 return; 4383 } else if (UseSignalChaining) { 4384 if (oktochain) { 4385 // save the old handler in jvm 4386 save_preinstalled_handler(sig, oldAct); 4387 } else { 4388 vm_exit_during_initialization("Signal chaining not allowed for VM interrupt signal, try -XX:+UseAltSigs."); 4389 } 4390 // libjsig also interposes the sigaction() call below and saves the 4391 // old sigaction on it own. 4392 } else { 4393 fatal(err_msg("Encountered unexpected pre-existing sigaction handler " 4394 "%#lx for signal %d.", (long)oldhand, sig)); 4395 } 4396 } 4397 4398 struct sigaction sigAct; 4399 sigfillset(&(sigAct.sa_mask)); 4400 sigAct.sa_handler = SIG_DFL; 4401 4402 sigAct.sa_sigaction = signalHandler; 4403 // Handle SIGSEGV on alternate signal stack if 4404 // not using stack banging 4405 if (!UseStackBanging && sig == SIGSEGV) { 4406 sigAct.sa_flags = SA_SIGINFO | SA_RESTART | SA_ONSTACK; 4407 // Interruptible i/o requires SA_RESTART cleared so EINTR 4408 // is returned instead of restarting system calls 4409 } else if (sig == os::Solaris::SIGinterrupt()) { 4410 sigemptyset(&sigAct.sa_mask); 4411 sigAct.sa_handler = NULL; 4412 sigAct.sa_flags = SA_SIGINFO; 4413 sigAct.sa_sigaction = sigINTRHandler; 4414 } else { 4415 sigAct.sa_flags = SA_SIGINFO | SA_RESTART; 4416 } 4417 os::Solaris::set_our_sigflags(sig, sigAct.sa_flags); 4418 4419 sigaction(sig, &sigAct, &oldAct); 4420 4421 void* oldhand2 = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction) 4422 : CAST_FROM_FN_PTR(void*, oldAct.sa_handler); 4423 assert(oldhand2 == oldhand, "no concurrent signal handler installation"); 4424 } 4425 4426 4427 #define DO_SIGNAL_CHECK(sig) \ 4428 if (!sigismember(&check_signal_done, sig)) \ 4429 os::Solaris::check_signal_handler(sig) 4430 4431 // This method is a periodic task to check for misbehaving JNI applications 4432 // under CheckJNI, we can add any periodic checks here 4433 4434 void os::run_periodic_checks() { 4435 // A big source of grief is hijacking virt. addr 0x0 on Solaris, 4436 // thereby preventing a NULL checks. 4437 if(!check_addr0_done) check_addr0_done = check_addr0(tty); 4438 4439 if (check_signals == false) return; 4440 4441 // SEGV and BUS if overridden could potentially prevent 4442 // generation of hs*.log in the event of a crash, debugging 4443 // such a case can be very challenging, so we absolutely 4444 // check for the following for a good measure: 4445 DO_SIGNAL_CHECK(SIGSEGV); 4446 DO_SIGNAL_CHECK(SIGILL); 4447 DO_SIGNAL_CHECK(SIGFPE); 4448 DO_SIGNAL_CHECK(SIGBUS); 4449 DO_SIGNAL_CHECK(SIGPIPE); 4450 DO_SIGNAL_CHECK(SIGXFSZ); 4451 4452 // ReduceSignalUsage allows the user to override these handlers 4453 // see comments at the very top and jvm_solaris.h 4454 if (!ReduceSignalUsage) { 4455 DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL); 4456 DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL); 4457 DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL); 4458 DO_SIGNAL_CHECK(BREAK_SIGNAL); 4459 } 4460 4461 // See comments above for using JVM1/JVM2 and UseAltSigs 4462 DO_SIGNAL_CHECK(os::Solaris::SIGinterrupt()); 4463 DO_SIGNAL_CHECK(os::Solaris::SIGasync()); 4464 4465 } 4466 4467 typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *); 4468 4469 static os_sigaction_t os_sigaction = NULL; 4470 4471 void os::Solaris::check_signal_handler(int sig) { 4472 char buf[O_BUFLEN]; 4473 address jvmHandler = NULL; 4474 4475 struct sigaction act; 4476 if (os_sigaction == NULL) { 4477 // only trust the default sigaction, in case it has been interposed 4478 os_sigaction = (os_sigaction_t)dlsym(RTLD_DEFAULT, "sigaction"); 4479 if (os_sigaction == NULL) return; 4480 } 4481 4482 os_sigaction(sig, (struct sigaction*)NULL, &act); 4483 4484 address thisHandler = (act.sa_flags & SA_SIGINFO) 4485 ? CAST_FROM_FN_PTR(address, act.sa_sigaction) 4486 : CAST_FROM_FN_PTR(address, act.sa_handler) ; 4487 4488 4489 switch(sig) { 4490 case SIGSEGV: 4491 case SIGBUS: 4492 case SIGFPE: 4493 case SIGPIPE: 4494 case SIGXFSZ: 4495 case SIGILL: 4496 jvmHandler = CAST_FROM_FN_PTR(address, signalHandler); 4497 break; 4498 4499 case SHUTDOWN1_SIGNAL: 4500 case SHUTDOWN2_SIGNAL: 4501 case SHUTDOWN3_SIGNAL: 4502 case BREAK_SIGNAL: 4503 jvmHandler = (address)user_handler(); 4504 break; 4505 4506 default: 4507 int intrsig = os::Solaris::SIGinterrupt(); 4508 int asynsig = os::Solaris::SIGasync(); 4509 4510 if (sig == intrsig) { 4511 jvmHandler = CAST_FROM_FN_PTR(address, sigINTRHandler); 4512 } else if (sig == asynsig) { 4513 jvmHandler = CAST_FROM_FN_PTR(address, signalHandler); 4514 } else { 4515 return; 4516 } 4517 break; 4518 } 4519 4520 4521 if (thisHandler != jvmHandler) { 4522 tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN)); 4523 tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN)); 4524 tty->print_cr(" found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN)); 4525 // No need to check this sig any longer 4526 sigaddset(&check_signal_done, sig); 4527 } else if(os::Solaris::get_our_sigflags(sig) != 0 && act.sa_flags != os::Solaris::get_our_sigflags(sig)) { 4528 tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN)); 4529 tty->print("expected:" PTR32_FORMAT, os::Solaris::get_our_sigflags(sig)); 4530 tty->print_cr(" found:" PTR32_FORMAT, act.sa_flags); 4531 // No need to check this sig any longer 4532 sigaddset(&check_signal_done, sig); 4533 } 4534 4535 // Print all the signal handler state 4536 if (sigismember(&check_signal_done, sig)) { 4537 print_signal_handlers(tty, buf, O_BUFLEN); 4538 } 4539 4540 } 4541 4542 void os::Solaris::install_signal_handlers() { 4543 bool libjsigdone = false; 4544 signal_handlers_are_installed = true; 4545 4546 // signal-chaining 4547 typedef void (*signal_setting_t)(); 4548 signal_setting_t begin_signal_setting = NULL; 4549 signal_setting_t end_signal_setting = NULL; 4550 begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t, 4551 dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting")); 4552 if (begin_signal_setting != NULL) { 4553 end_signal_setting = CAST_TO_FN_PTR(signal_setting_t, 4554 dlsym(RTLD_DEFAULT, "JVM_end_signal_setting")); 4555 get_signal_action = CAST_TO_FN_PTR(get_signal_t, 4556 dlsym(RTLD_DEFAULT, "JVM_get_signal_action")); 4557 get_libjsig_version = CAST_TO_FN_PTR(version_getting_t, 4558 dlsym(RTLD_DEFAULT, "JVM_get_libjsig_version")); 4559 libjsig_is_loaded = true; 4560 if (os::Solaris::get_libjsig_version != NULL) { 4561 libjsigversion = (*os::Solaris::get_libjsig_version)(); 4562 } 4563 assert(UseSignalChaining, "should enable signal-chaining"); 4564 } 4565 if (libjsig_is_loaded) { 4566 // Tell libjsig jvm is setting signal handlers 4567 (*begin_signal_setting)(); 4568 } 4569 4570 set_signal_handler(SIGSEGV, true, true); 4571 set_signal_handler(SIGPIPE, true, true); 4572 set_signal_handler(SIGXFSZ, true, true); 4573 set_signal_handler(SIGBUS, true, true); 4574 set_signal_handler(SIGILL, true, true); 4575 set_signal_handler(SIGFPE, true, true); 4576 4577 4578 if (os::Solaris::SIGinterrupt() > OLDMAXSIGNUM || os::Solaris::SIGasync() > OLDMAXSIGNUM) { 4579 4580 // Pre-1.4.1 Libjsig limited to signal chaining signals <= 32 so 4581 // can not register overridable signals which might be > 32 4582 if (libjsig_is_loaded && libjsigversion <= JSIG_VERSION_1_4_1) { 4583 // Tell libjsig jvm has finished setting signal handlers 4584 (*end_signal_setting)(); 4585 libjsigdone = true; 4586 } 4587 } 4588 4589 // Never ok to chain our SIGinterrupt 4590 set_signal_handler(os::Solaris::SIGinterrupt(), true, false); 4591 set_signal_handler(os::Solaris::SIGasync(), true, true); 4592 4593 if (libjsig_is_loaded && !libjsigdone) { 4594 // Tell libjsig jvm finishes setting signal handlers 4595 (*end_signal_setting)(); 4596 } 4597 4598 // We don't activate signal checker if libjsig is in place, we trust ourselves 4599 // and if UserSignalHandler is installed all bets are off. 4600 // Log that signal checking is off only if -verbose:jni is specified. 4601 if (CheckJNICalls) { 4602 if (libjsig_is_loaded) { 4603 if (PrintJNIResolving) { 4604 tty->print_cr("Info: libjsig is activated, all active signal checking is disabled"); 4605 } 4606 check_signals = false; 4607 } 4608 if (AllowUserSignalHandlers) { 4609 if (PrintJNIResolving) { 4610 tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled"); 4611 } 4612 check_signals = false; 4613 } 4614 } 4615 } 4616 4617 4618 void report_error(const char* file_name, int line_no, const char* title, const char* format, ...); 4619 4620 const char * signames[] = { 4621 "SIG0", 4622 "SIGHUP", "SIGINT", "SIGQUIT", "SIGILL", "SIGTRAP", 4623 "SIGABRT", "SIGEMT", "SIGFPE", "SIGKILL", "SIGBUS", 4624 "SIGSEGV", "SIGSYS", "SIGPIPE", "SIGALRM", "SIGTERM", 4625 "SIGUSR1", "SIGUSR2", "SIGCLD", "SIGPWR", "SIGWINCH", 4626 "SIGURG", "SIGPOLL", "SIGSTOP", "SIGTSTP", "SIGCONT", 4627 "SIGTTIN", "SIGTTOU", "SIGVTALRM", "SIGPROF", "SIGXCPU", 4628 "SIGXFSZ", "SIGWAITING", "SIGLWP", "SIGFREEZE", "SIGTHAW", 4629 "SIGCANCEL", "SIGLOST" 4630 }; 4631 4632 const char* os::exception_name(int exception_code, char* buf, size_t size) { 4633 if (0 < exception_code && exception_code <= SIGRTMAX) { 4634 // signal 4635 if (exception_code < sizeof(signames)/sizeof(const char*)) { 4636 jio_snprintf(buf, size, "%s", signames[exception_code]); 4637 } else { 4638 jio_snprintf(buf, size, "SIG%d", exception_code); 4639 } 4640 return buf; 4641 } else { 4642 return NULL; 4643 } 4644 } 4645 4646 // (Static) wrappers for the new libthread API 4647 int_fnP_thread_t_iP_uP_stack_tP_gregset_t os::Solaris::_thr_getstate; 4648 int_fnP_thread_t_i_gregset_t os::Solaris::_thr_setstate; 4649 int_fnP_thread_t_i os::Solaris::_thr_setmutator; 4650 int_fnP_thread_t os::Solaris::_thr_suspend_mutator; 4651 int_fnP_thread_t os::Solaris::_thr_continue_mutator; 4652 4653 // (Static) wrapper for getisax(2) call. 4654 os::Solaris::getisax_func_t os::Solaris::_getisax = 0; 4655 4656 // (Static) wrappers for the liblgrp API 4657 os::Solaris::lgrp_home_func_t os::Solaris::_lgrp_home; 4658 os::Solaris::lgrp_init_func_t os::Solaris::_lgrp_init; 4659 os::Solaris::lgrp_fini_func_t os::Solaris::_lgrp_fini; 4660 os::Solaris::lgrp_root_func_t os::Solaris::_lgrp_root; 4661 os::Solaris::lgrp_children_func_t os::Solaris::_lgrp_children; 4662 os::Solaris::lgrp_resources_func_t os::Solaris::_lgrp_resources; 4663 os::Solaris::lgrp_nlgrps_func_t os::Solaris::_lgrp_nlgrps; 4664 os::Solaris::lgrp_cookie_stale_func_t os::Solaris::_lgrp_cookie_stale; 4665 os::Solaris::lgrp_cookie_t os::Solaris::_lgrp_cookie = 0; 4666 4667 // (Static) wrapper for meminfo() call. 4668 os::Solaris::meminfo_func_t os::Solaris::_meminfo = 0; 4669 4670 static address resolve_symbol_lazy(const char* name) { 4671 address addr = (address) dlsym(RTLD_DEFAULT, name); 4672 if(addr == NULL) { 4673 // RTLD_DEFAULT was not defined on some early versions of 2.5.1 4674 addr = (address) dlsym(RTLD_NEXT, name); 4675 } 4676 return addr; 4677 } 4678 4679 static address resolve_symbol(const char* name) { 4680 address addr = resolve_symbol_lazy(name); 4681 if(addr == NULL) { 4682 fatal(dlerror()); 4683 } 4684 return addr; 4685 } 4686 4687 4688 4689 // isT2_libthread() 4690 // 4691 // Routine to determine if we are currently using the new T2 libthread. 4692 // 4693 // We determine if we are using T2 by reading /proc/self/lstatus and 4694 // looking for a thread with the ASLWP bit set. If we find this status 4695 // bit set, we must assume that we are NOT using T2. The T2 team 4696 // has approved this algorithm. 4697 // 4698 // We need to determine if we are running with the new T2 libthread 4699 // since setting native thread priorities is handled differently 4700 // when using this library. All threads created using T2 are bound 4701 // threads. Calling thr_setprio is meaningless in this case. 4702 // 4703 bool isT2_libthread() { 4704 static prheader_t * lwpArray = NULL; 4705 static int lwpSize = 0; 4706 static int lwpFile = -1; 4707 lwpstatus_t * that; 4708 char lwpName [128]; 4709 bool isT2 = false; 4710 4711 #define ADR(x) ((uintptr_t)(x)) 4712 #define LWPINDEX(ary,ix) ((lwpstatus_t *)(((ary)->pr_entsize * (ix)) + (ADR((ary) + 1)))) 4713 4714 lwpFile = ::open("/proc/self/lstatus", O_RDONLY, 0); 4715 if (lwpFile < 0) { 4716 if (ThreadPriorityVerbose) warning ("Couldn't open /proc/self/lstatus\n"); 4717 return false; 4718 } 4719 lwpSize = 16*1024; 4720 for (;;) { 4721 ::lseek64 (lwpFile, 0, SEEK_SET); 4722 lwpArray = (prheader_t *)NEW_C_HEAP_ARRAY(char, lwpSize, mtInternal); 4723 if (::read(lwpFile, lwpArray, lwpSize) < 0) { 4724 if (ThreadPriorityVerbose) warning("Error reading /proc/self/lstatus\n"); 4725 break; 4726 } 4727 if ((lwpArray->pr_nent * lwpArray->pr_entsize) <= lwpSize) { 4728 // We got a good snapshot - now iterate over the list. 4729 int aslwpcount = 0; 4730 for (int i = 0; i < lwpArray->pr_nent; i++ ) { 4731 that = LWPINDEX(lwpArray,i); 4732 if (that->pr_flags & PR_ASLWP) { 4733 aslwpcount++; 4734 } 4735 } 4736 if (aslwpcount == 0) isT2 = true; 4737 break; 4738 } 4739 lwpSize = lwpArray->pr_nent * lwpArray->pr_entsize; 4740 FREE_C_HEAP_ARRAY(char, lwpArray, mtInternal); // retry. 4741 } 4742 4743 FREE_C_HEAP_ARRAY(char, lwpArray, mtInternal); 4744 ::close (lwpFile); 4745 if (ThreadPriorityVerbose) { 4746 if (isT2) tty->print_cr("We are running with a T2 libthread\n"); 4747 else tty->print_cr("We are not running with a T2 libthread\n"); 4748 } 4749 return isT2; 4750 } 4751 4752 4753 void os::Solaris::libthread_init() { 4754 address func = (address)dlsym(RTLD_DEFAULT, "_thr_suspend_allmutators"); 4755 4756 // Determine if we are running with the new T2 libthread 4757 os::Solaris::set_T2_libthread(isT2_libthread()); 4758 4759 lwp_priocntl_init(); 4760 4761 // RTLD_DEFAULT was not defined on some early versions of 5.5.1 4762 if(func == NULL) { 4763 func = (address) dlsym(RTLD_NEXT, "_thr_suspend_allmutators"); 4764 // Guarantee that this VM is running on an new enough OS (5.6 or 4765 // later) that it will have a new enough libthread.so. 4766 guarantee(func != NULL, "libthread.so is too old."); 4767 } 4768 4769 // Initialize the new libthread getstate API wrappers 4770 func = resolve_symbol("thr_getstate"); 4771 os::Solaris::set_thr_getstate(CAST_TO_FN_PTR(int_fnP_thread_t_iP_uP_stack_tP_gregset_t, func)); 4772 4773 func = resolve_symbol("thr_setstate"); 4774 os::Solaris::set_thr_setstate(CAST_TO_FN_PTR(int_fnP_thread_t_i_gregset_t, func)); 4775 4776 func = resolve_symbol("thr_setmutator"); 4777 os::Solaris::set_thr_setmutator(CAST_TO_FN_PTR(int_fnP_thread_t_i, func)); 4778 4779 func = resolve_symbol("thr_suspend_mutator"); 4780 os::Solaris::set_thr_suspend_mutator(CAST_TO_FN_PTR(int_fnP_thread_t, func)); 4781 4782 func = resolve_symbol("thr_continue_mutator"); 4783 os::Solaris::set_thr_continue_mutator(CAST_TO_FN_PTR(int_fnP_thread_t, func)); 4784 4785 int size; 4786 void (*handler_info_func)(address *, int *); 4787 handler_info_func = CAST_TO_FN_PTR(void (*)(address *, int *), resolve_symbol("thr_sighndlrinfo")); 4788 handler_info_func(&handler_start, &size); 4789 handler_end = handler_start + size; 4790 } 4791 4792 4793 int_fnP_mutex_tP os::Solaris::_mutex_lock; 4794 int_fnP_mutex_tP os::Solaris::_mutex_trylock; 4795 int_fnP_mutex_tP os::Solaris::_mutex_unlock; 4796 int_fnP_mutex_tP_i_vP os::Solaris::_mutex_init; 4797 int_fnP_mutex_tP os::Solaris::_mutex_destroy; 4798 int os::Solaris::_mutex_scope = USYNC_THREAD; 4799 4800 int_fnP_cond_tP_mutex_tP_timestruc_tP os::Solaris::_cond_timedwait; 4801 int_fnP_cond_tP_mutex_tP os::Solaris::_cond_wait; 4802 int_fnP_cond_tP os::Solaris::_cond_signal; 4803 int_fnP_cond_tP os::Solaris::_cond_broadcast; 4804 int_fnP_cond_tP_i_vP os::Solaris::_cond_init; 4805 int_fnP_cond_tP os::Solaris::_cond_destroy; 4806 int os::Solaris::_cond_scope = USYNC_THREAD; 4807 4808 void os::Solaris::synchronization_init() { 4809 if(UseLWPSynchronization) { 4810 os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_lock"))); 4811 os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_trylock"))); 4812 os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_unlock"))); 4813 os::Solaris::set_mutex_init(lwp_mutex_init); 4814 os::Solaris::set_mutex_destroy(lwp_mutex_destroy); 4815 os::Solaris::set_mutex_scope(USYNC_THREAD); 4816 4817 os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("_lwp_cond_timedwait"))); 4818 os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("_lwp_cond_wait"))); 4819 os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_signal"))); 4820 os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_broadcast"))); 4821 os::Solaris::set_cond_init(lwp_cond_init); 4822 os::Solaris::set_cond_destroy(lwp_cond_destroy); 4823 os::Solaris::set_cond_scope(USYNC_THREAD); 4824 } 4825 else { 4826 os::Solaris::set_mutex_scope(USYNC_THREAD); 4827 os::Solaris::set_cond_scope(USYNC_THREAD); 4828 4829 if(UsePthreads) { 4830 os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_lock"))); 4831 os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_trylock"))); 4832 os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_unlock"))); 4833 os::Solaris::set_mutex_init(pthread_mutex_default_init); 4834 os::Solaris::set_mutex_destroy(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_destroy"))); 4835 4836 os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("pthread_cond_timedwait"))); 4837 os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("pthread_cond_wait"))); 4838 os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_signal"))); 4839 os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_broadcast"))); 4840 os::Solaris::set_cond_init(pthread_cond_default_init); 4841 os::Solaris::set_cond_destroy(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_destroy"))); 4842 } 4843 else { 4844 os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_lock"))); 4845 os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_trylock"))); 4846 os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_unlock"))); 4847 os::Solaris::set_mutex_init(::mutex_init); 4848 os::Solaris::set_mutex_destroy(::mutex_destroy); 4849 4850 os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("cond_timedwait"))); 4851 os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("cond_wait"))); 4852 os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_signal"))); 4853 os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_broadcast"))); 4854 os::Solaris::set_cond_init(::cond_init); 4855 os::Solaris::set_cond_destroy(::cond_destroy); 4856 } 4857 } 4858 } 4859 4860 bool os::Solaris::liblgrp_init() { 4861 void *handle = dlopen("liblgrp.so.1", RTLD_LAZY); 4862 if (handle != NULL) { 4863 os::Solaris::set_lgrp_home(CAST_TO_FN_PTR(lgrp_home_func_t, dlsym(handle, "lgrp_home"))); 4864 os::Solaris::set_lgrp_init(CAST_TO_FN_PTR(lgrp_init_func_t, dlsym(handle, "lgrp_init"))); 4865 os::Solaris::set_lgrp_fini(CAST_TO_FN_PTR(lgrp_fini_func_t, dlsym(handle, "lgrp_fini"))); 4866 os::Solaris::set_lgrp_root(CAST_TO_FN_PTR(lgrp_root_func_t, dlsym(handle, "lgrp_root"))); 4867 os::Solaris::set_lgrp_children(CAST_TO_FN_PTR(lgrp_children_func_t, dlsym(handle, "lgrp_children"))); 4868 os::Solaris::set_lgrp_resources(CAST_TO_FN_PTR(lgrp_resources_func_t, dlsym(handle, "lgrp_resources"))); 4869 os::Solaris::set_lgrp_nlgrps(CAST_TO_FN_PTR(lgrp_nlgrps_func_t, dlsym(handle, "lgrp_nlgrps"))); 4870 os::Solaris::set_lgrp_cookie_stale(CAST_TO_FN_PTR(lgrp_cookie_stale_func_t, 4871 dlsym(handle, "lgrp_cookie_stale"))); 4872 4873 lgrp_cookie_t c = lgrp_init(LGRP_VIEW_CALLER); 4874 set_lgrp_cookie(c); 4875 return true; 4876 } 4877 return false; 4878 } 4879 4880 void os::Solaris::misc_sym_init() { 4881 address func; 4882 4883 // getisax 4884 func = resolve_symbol_lazy("getisax"); 4885 if (func != NULL) { 4886 os::Solaris::_getisax = CAST_TO_FN_PTR(getisax_func_t, func); 4887 } 4888 4889 // meminfo 4890 func = resolve_symbol_lazy("meminfo"); 4891 if (func != NULL) { 4892 os::Solaris::set_meminfo(CAST_TO_FN_PTR(meminfo_func_t, func)); 4893 } 4894 } 4895 4896 uint_t os::Solaris::getisax(uint32_t* array, uint_t n) { 4897 assert(_getisax != NULL, "_getisax not set"); 4898 return _getisax(array, n); 4899 } 4900 4901 // int pset_getloadavg(psetid_t pset, double loadavg[], int nelem); 4902 typedef long (*pset_getloadavg_type)(psetid_t pset, double loadavg[], int nelem); 4903 static pset_getloadavg_type pset_getloadavg_ptr = NULL; 4904 4905 void init_pset_getloadavg_ptr(void) { 4906 pset_getloadavg_ptr = 4907 (pset_getloadavg_type)dlsym(RTLD_DEFAULT, "pset_getloadavg"); 4908 if (PrintMiscellaneous && Verbose && pset_getloadavg_ptr == NULL) { 4909 warning("pset_getloadavg function not found"); 4910 } 4911 } 4912 4913 int os::Solaris::_dev_zero_fd = -1; 4914 4915 // this is called _before_ the global arguments have been parsed 4916 void os::init(void) { 4917 _initial_pid = getpid(); 4918 4919 max_hrtime = first_hrtime = gethrtime(); 4920 4921 init_random(1234567); 4922 4923 page_size = sysconf(_SC_PAGESIZE); 4924 if (page_size == -1) 4925 fatal(err_msg("os_solaris.cpp: os::init: sysconf failed (%s)", 4926 strerror(errno))); 4927 init_page_sizes((size_t) page_size); 4928 4929 Solaris::initialize_system_info(); 4930 4931 // Initialize misc. symbols as soon as possible, so we can use them 4932 // if we need them. 4933 Solaris::misc_sym_init(); 4934 4935 int fd = ::open("/dev/zero", O_RDWR); 4936 if (fd < 0) { 4937 fatal(err_msg("os::init: cannot open /dev/zero (%s)", strerror(errno))); 4938 } else { 4939 Solaris::set_dev_zero_fd(fd); 4940 4941 // Close on exec, child won't inherit. 4942 fcntl(fd, F_SETFD, FD_CLOEXEC); 4943 } 4944 4945 clock_tics_per_sec = CLK_TCK; 4946 4947 // check if dladdr1() exists; dladdr1 can provide more information than 4948 // dladdr for os::dll_address_to_function_name. It comes with SunOS 5.9 4949 // and is available on linker patches for 5.7 and 5.8. 4950 // libdl.so must have been loaded, this call is just an entry lookup 4951 void * hdl = dlopen("libdl.so", RTLD_NOW); 4952 if (hdl) 4953 dladdr1_func = CAST_TO_FN_PTR(dladdr1_func_type, dlsym(hdl, "dladdr1")); 4954 4955 // (Solaris only) this switches to calls that actually do locking. 4956 ThreadCritical::initialize(); 4957 4958 main_thread = thr_self(); 4959 4960 // Constant minimum stack size allowed. It must be at least 4961 // the minimum of what the OS supports (thr_min_stack()), and 4962 // enough to allow the thread to get to user bytecode execution. 4963 Solaris::min_stack_allowed = MAX2(thr_min_stack(), Solaris::min_stack_allowed); 4964 // If the pagesize of the VM is greater than 8K determine the appropriate 4965 // number of initial guard pages. The user can change this with the 4966 // command line arguments, if needed. 4967 if (vm_page_size() > 8*K) { 4968 StackYellowPages = 1; 4969 StackRedPages = 1; 4970 StackShadowPages = round_to((StackShadowPages*8*K), vm_page_size()) / vm_page_size(); 4971 } 4972 } 4973 4974 // To install functions for atexit system call 4975 extern "C" { 4976 static void perfMemory_exit_helper() { 4977 perfMemory_exit(); 4978 } 4979 } 4980 4981 // this is called _after_ the global arguments have been parsed 4982 jint os::init_2(void) { 4983 // try to enable extended file IO ASAP, see 6431278 4984 os::Solaris::try_enable_extended_io(); 4985 4986 // Allocate a single page and mark it as readable for safepoint polling. Also 4987 // use this first mmap call to check support for MAP_ALIGN. 4988 address polling_page = (address)Solaris::mmap_chunk((char*)page_size, 4989 page_size, 4990 MAP_PRIVATE | MAP_ALIGN, 4991 PROT_READ); 4992 if (polling_page == NULL) { 4993 has_map_align = false; 4994 polling_page = (address)Solaris::mmap_chunk(NULL, page_size, MAP_PRIVATE, 4995 PROT_READ); 4996 } 4997 4998 os::set_polling_page(polling_page); 4999 5000 #ifndef PRODUCT 5001 if( Verbose && PrintMiscellaneous ) 5002 tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n", (intptr_t)polling_page); 5003 #endif 5004 5005 if (!UseMembar) { 5006 address mem_serialize_page = (address)Solaris::mmap_chunk( NULL, page_size, MAP_PRIVATE, PROT_READ | PROT_WRITE ); 5007 guarantee( mem_serialize_page != NULL, "mmap Failed for memory serialize page"); 5008 os::set_memory_serialize_page( mem_serialize_page ); 5009 5010 #ifndef PRODUCT 5011 if(Verbose && PrintMiscellaneous) 5012 tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page); 5013 #endif 5014 } 5015 5016 // Check minimum allowable stack size for thread creation and to initialize 5017 // the java system classes, including StackOverflowError - depends on page 5018 // size. Add a page for compiler2 recursion in main thread. 5019 // Add in 2*BytesPerWord times page size to account for VM stack during 5020 // class initialization depending on 32 or 64 bit VM. 5021 os::Solaris::min_stack_allowed = MAX2(os::Solaris::min_stack_allowed, 5022 (size_t)(StackYellowPages+StackRedPages+StackShadowPages+ 5023 2*BytesPerWord COMPILER2_PRESENT(+1)) * page_size); 5024 5025 size_t threadStackSizeInBytes = ThreadStackSize * K; 5026 if (threadStackSizeInBytes != 0 && 5027 threadStackSizeInBytes < os::Solaris::min_stack_allowed) { 5028 tty->print_cr("\nThe stack size specified is too small, Specify at least %dk", 5029 os::Solaris::min_stack_allowed/K); 5030 return JNI_ERR; 5031 } 5032 5033 // For 64kbps there will be a 64kb page size, which makes 5034 // the usable default stack size quite a bit less. Increase the 5035 // stack for 64kb (or any > than 8kb) pages, this increases 5036 // virtual memory fragmentation (since we're not creating the 5037 // stack on a power of 2 boundary. The real fix for this 5038 // should be to fix the guard page mechanism. 5039 5040 if (vm_page_size() > 8*K) { 5041 threadStackSizeInBytes = (threadStackSizeInBytes != 0) 5042 ? threadStackSizeInBytes + 5043 ((StackYellowPages + StackRedPages) * vm_page_size()) 5044 : 0; 5045 ThreadStackSize = threadStackSizeInBytes/K; 5046 } 5047 5048 // Make the stack size a multiple of the page size so that 5049 // the yellow/red zones can be guarded. 5050 JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes, 5051 vm_page_size())); 5052 5053 Solaris::libthread_init(); 5054 5055 if (UseNUMA) { 5056 if (!Solaris::liblgrp_init()) { 5057 UseNUMA = false; 5058 } else { 5059 size_t lgrp_limit = os::numa_get_groups_num(); 5060 int *lgrp_ids = NEW_C_HEAP_ARRAY(int, lgrp_limit, mtInternal); 5061 size_t lgrp_num = os::numa_get_leaf_groups(lgrp_ids, lgrp_limit); 5062 FREE_C_HEAP_ARRAY(int, lgrp_ids, mtInternal); 5063 if (lgrp_num < 2) { 5064 // There's only one locality group, disable NUMA. 5065 UseNUMA = false; 5066 } 5067 } 5068 if (!UseNUMA && ForceNUMA) { 5069 UseNUMA = true; 5070 } 5071 } 5072 5073 Solaris::signal_sets_init(); 5074 Solaris::init_signal_mem(); 5075 Solaris::install_signal_handlers(); 5076 5077 if (libjsigversion < JSIG_VERSION_1_4_1) { 5078 Maxlibjsigsigs = OLDMAXSIGNUM; 5079 } 5080 5081 // initialize synchronization primitives to use either thread or 5082 // lwp synchronization (controlled by UseLWPSynchronization) 5083 Solaris::synchronization_init(); 5084 5085 if (MaxFDLimit) { 5086 // set the number of file descriptors to max. print out error 5087 // if getrlimit/setrlimit fails but continue regardless. 5088 struct rlimit nbr_files; 5089 int status = getrlimit(RLIMIT_NOFILE, &nbr_files); 5090 if (status != 0) { 5091 if (PrintMiscellaneous && (Verbose || WizardMode)) 5092 perror("os::init_2 getrlimit failed"); 5093 } else { 5094 nbr_files.rlim_cur = nbr_files.rlim_max; 5095 status = setrlimit(RLIMIT_NOFILE, &nbr_files); 5096 if (status != 0) { 5097 if (PrintMiscellaneous && (Verbose || WizardMode)) 5098 perror("os::init_2 setrlimit failed"); 5099 } 5100 } 5101 } 5102 5103 // Calculate theoretical max. size of Threads to guard gainst 5104 // artifical out-of-memory situations, where all available address- 5105 // space has been reserved by thread stacks. Default stack size is 1Mb. 5106 size_t pre_thread_stack_size = (JavaThread::stack_size_at_create()) ? 5107 JavaThread::stack_size_at_create() : (1*K*K); 5108 assert(pre_thread_stack_size != 0, "Must have a stack"); 5109 // Solaris has a maximum of 4Gb of user programs. Calculate the thread limit when 5110 // we should start doing Virtual Memory banging. Currently when the threads will 5111 // have used all but 200Mb of space. 5112 size_t max_address_space = ((unsigned int)4 * K * K * K) - (200 * K * K); 5113 Solaris::_os_thread_limit = max_address_space / pre_thread_stack_size; 5114 5115 // at-exit methods are called in the reverse order of their registration. 5116 // In Solaris 7 and earlier, atexit functions are called on return from 5117 // main or as a result of a call to exit(3C). There can be only 32 of 5118 // these functions registered and atexit() does not set errno. In Solaris 5119 // 8 and later, there is no limit to the number of functions registered 5120 // and atexit() sets errno. In addition, in Solaris 8 and later, atexit 5121 // functions are called upon dlclose(3DL) in addition to return from main 5122 // and exit(3C). 5123 5124 if (PerfAllowAtExitRegistration) { 5125 // only register atexit functions if PerfAllowAtExitRegistration is set. 5126 // atexit functions can be delayed until process exit time, which 5127 // can be problematic for embedded VM situations. Embedded VMs should 5128 // call DestroyJavaVM() to assure that VM resources are released. 5129 5130 // note: perfMemory_exit_helper atexit function may be removed in 5131 // the future if the appropriate cleanup code can be added to the 5132 // VM_Exit VMOperation's doit method. 5133 if (atexit(perfMemory_exit_helper) != 0) { 5134 warning("os::init2 atexit(perfMemory_exit_helper) failed"); 5135 } 5136 } 5137 5138 // Init pset_loadavg function pointer 5139 init_pset_getloadavg_ptr(); 5140 5141 return JNI_OK; 5142 } 5143 5144 void os::init_3(void) { 5145 return; 5146 } 5147 5148 // Mark the polling page as unreadable 5149 void os::make_polling_page_unreadable(void) { 5150 if( mprotect((char *)_polling_page, page_size, PROT_NONE) != 0 ) 5151 fatal("Could not disable polling page"); 5152 }; 5153 5154 // Mark the polling page as readable 5155 void os::make_polling_page_readable(void) { 5156 if( mprotect((char *)_polling_page, page_size, PROT_READ) != 0 ) 5157 fatal("Could not enable polling page"); 5158 }; 5159 5160 // OS interface. 5161 5162 bool os::check_heap(bool force) { return true; } 5163 5164 typedef int (*vsnprintf_t)(char* buf, size_t count, const char* fmt, va_list argptr); 5165 static vsnprintf_t sol_vsnprintf = NULL; 5166 5167 int local_vsnprintf(char* buf, size_t count, const char* fmt, va_list argptr) { 5168 if (!sol_vsnprintf) { 5169 //search for the named symbol in the objects that were loaded after libjvm 5170 void* where = RTLD_NEXT; 5171 if ((sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "__vsnprintf"))) == NULL) 5172 sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "vsnprintf")); 5173 if (!sol_vsnprintf){ 5174 //search for the named symbol in the objects that were loaded before libjvm 5175 where = RTLD_DEFAULT; 5176 if ((sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "__vsnprintf"))) == NULL) 5177 sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "vsnprintf")); 5178 assert(sol_vsnprintf != NULL, "vsnprintf not found"); 5179 } 5180 } 5181 return (*sol_vsnprintf)(buf, count, fmt, argptr); 5182 } 5183 5184 5185 // Is a (classpath) directory empty? 5186 bool os::dir_is_empty(const char* path) { 5187 DIR *dir = NULL; 5188 struct dirent *ptr; 5189 5190 dir = opendir(path); 5191 if (dir == NULL) return true; 5192 5193 /* Scan the directory */ 5194 bool result = true; 5195 char buf[sizeof(struct dirent) + MAX_PATH]; 5196 struct dirent *dbuf = (struct dirent *) buf; 5197 while (result && (ptr = readdir(dir, dbuf)) != NULL) { 5198 if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) { 5199 result = false; 5200 } 5201 } 5202 closedir(dir); 5203 return result; 5204 } 5205 5206 // This code originates from JDK's sysOpen and open64_w 5207 // from src/solaris/hpi/src/system_md.c 5208 5209 #ifndef O_DELETE 5210 #define O_DELETE 0x10000 5211 #endif 5212 5213 // Open a file. Unlink the file immediately after open returns 5214 // if the specified oflag has the O_DELETE flag set. 5215 // O_DELETE is used only in j2se/src/share/native/java/util/zip/ZipFile.c 5216 5217 int os::open(const char *path, int oflag, int mode) { 5218 if (strlen(path) > MAX_PATH - 1) { 5219 errno = ENAMETOOLONG; 5220 return -1; 5221 } 5222 int fd; 5223 int o_delete = (oflag & O_DELETE); 5224 oflag = oflag & ~O_DELETE; 5225 5226 fd = ::open64(path, oflag, mode); 5227 if (fd == -1) return -1; 5228 5229 //If the open succeeded, the file might still be a directory 5230 { 5231 struct stat64 buf64; 5232 int ret = ::fstat64(fd, &buf64); 5233 int st_mode = buf64.st_mode; 5234 5235 if (ret != -1) { 5236 if ((st_mode & S_IFMT) == S_IFDIR) { 5237 errno = EISDIR; 5238 ::close(fd); 5239 return -1; 5240 } 5241 } else { 5242 ::close(fd); 5243 return -1; 5244 } 5245 } 5246 /* 5247 * 32-bit Solaris systems suffer from: 5248 * 5249 * - an historical default soft limit of 256 per-process file 5250 * descriptors that is too low for many Java programs. 5251 * 5252 * - a design flaw where file descriptors created using stdio 5253 * fopen must be less than 256, _even_ when the first limit above 5254 * has been raised. This can cause calls to fopen (but not calls to 5255 * open, for example) to fail mysteriously, perhaps in 3rd party 5256 * native code (although the JDK itself uses fopen). One can hardly 5257 * criticize them for using this most standard of all functions. 5258 * 5259 * We attempt to make everything work anyways by: 5260 * 5261 * - raising the soft limit on per-process file descriptors beyond 5262 * 256 5263 * 5264 * - As of Solaris 10u4, we can request that Solaris raise the 256 5265 * stdio fopen limit by calling function enable_extended_FILE_stdio. 5266 * This is done in init_2 and recorded in enabled_extended_FILE_stdio 5267 * 5268 * - If we are stuck on an old (pre 10u4) Solaris system, we can 5269 * workaround the bug by remapping non-stdio file descriptors below 5270 * 256 to ones beyond 256, which is done below. 5271 * 5272 * See: 5273 * 1085341: 32-bit stdio routines should support file descriptors >255 5274 * 6533291: Work around 32-bit Solaris stdio limit of 256 open files 5275 * 6431278: Netbeans crash on 32 bit Solaris: need to call 5276 * enable_extended_FILE_stdio() in VM initialisation 5277 * Giri Mandalika's blog 5278 * http://technopark02.blogspot.com/2005_05_01_archive.html 5279 */ 5280 #ifndef _LP64 5281 if ((!enabled_extended_FILE_stdio) && fd < 256) { 5282 int newfd = ::fcntl(fd, F_DUPFD, 256); 5283 if (newfd != -1) { 5284 ::close(fd); 5285 fd = newfd; 5286 } 5287 } 5288 #endif // 32-bit Solaris 5289 /* 5290 * All file descriptors that are opened in the JVM and not 5291 * specifically destined for a subprocess should have the 5292 * close-on-exec flag set. If we don't set it, then careless 3rd 5293 * party native code might fork and exec without closing all 5294 * appropriate file descriptors (e.g. as we do in closeDescriptors in 5295 * UNIXProcess.c), and this in turn might: 5296 * 5297 * - cause end-of-file to fail to be detected on some file 5298 * descriptors, resulting in mysterious hangs, or 5299 * 5300 * - might cause an fopen in the subprocess to fail on a system 5301 * suffering from bug 1085341. 5302 * 5303 * (Yes, the default setting of the close-on-exec flag is a Unix 5304 * design flaw) 5305 * 5306 * See: 5307 * 1085341: 32-bit stdio routines should support file descriptors >255 5308 * 4843136: (process) pipe file descriptor from Runtime.exec not being closed 5309 * 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9 5310 */ 5311 #ifdef FD_CLOEXEC 5312 { 5313 int flags = ::fcntl(fd, F_GETFD); 5314 if (flags != -1) 5315 ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC); 5316 } 5317 #endif 5318 5319 if (o_delete != 0) { 5320 ::unlink(path); 5321 } 5322 return fd; 5323 } 5324 5325 // create binary file, rewriting existing file if required 5326 int os::create_binary_file(const char* path, bool rewrite_existing) { 5327 int oflags = O_WRONLY | O_CREAT; 5328 if (!rewrite_existing) { 5329 oflags |= O_EXCL; 5330 } 5331 return ::open64(path, oflags, S_IREAD | S_IWRITE); 5332 } 5333 5334 // return current position of file pointer 5335 jlong os::current_file_offset(int fd) { 5336 return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR); 5337 } 5338 5339 // move file pointer to the specified offset 5340 jlong os::seek_to_file_offset(int fd, jlong offset) { 5341 return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET); 5342 } 5343 5344 jlong os::lseek(int fd, jlong offset, int whence) { 5345 return (jlong) ::lseek64(fd, offset, whence); 5346 } 5347 5348 char * os::native_path(char *path) { 5349 return path; 5350 } 5351 5352 int os::ftruncate(int fd, jlong length) { 5353 return ::ftruncate64(fd, length); 5354 } 5355 5356 int os::fsync(int fd) { 5357 RESTARTABLE_RETURN_INT(::fsync(fd)); 5358 } 5359 5360 int os::available(int fd, jlong *bytes) { 5361 jlong cur, end; 5362 int mode; 5363 struct stat64 buf64; 5364 5365 if (::fstat64(fd, &buf64) >= 0) { 5366 mode = buf64.st_mode; 5367 if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) { 5368 /* 5369 * XXX: is the following call interruptible? If so, this might 5370 * need to go through the INTERRUPT_IO() wrapper as for other 5371 * blocking, interruptible calls in this file. 5372 */ 5373 int n,ioctl_return; 5374 5375 INTERRUPTIBLE(::ioctl(fd, FIONREAD, &n),ioctl_return,os::Solaris::clear_interrupted); 5376 if (ioctl_return>= 0) { 5377 *bytes = n; 5378 return 1; 5379 } 5380 } 5381 } 5382 if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) { 5383 return 0; 5384 } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) { 5385 return 0; 5386 } else if (::lseek64(fd, cur, SEEK_SET) == -1) { 5387 return 0; 5388 } 5389 *bytes = end - cur; 5390 return 1; 5391 } 5392 5393 // Map a block of memory. 5394 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset, 5395 char *addr, size_t bytes, bool read_only, 5396 bool allow_exec) { 5397 int prot; 5398 int flags; 5399 5400 if (read_only) { 5401 prot = PROT_READ; 5402 flags = MAP_SHARED; 5403 } else { 5404 prot = PROT_READ | PROT_WRITE; 5405 flags = MAP_PRIVATE; 5406 } 5407 5408 if (allow_exec) { 5409 prot |= PROT_EXEC; 5410 } 5411 5412 if (addr != NULL) { 5413 flags |= MAP_FIXED; 5414 } 5415 5416 char* mapped_address = (char*)mmap(addr, (size_t)bytes, prot, flags, 5417 fd, file_offset); 5418 if (mapped_address == MAP_FAILED) { 5419 return NULL; 5420 } 5421 return mapped_address; 5422 } 5423 5424 5425 // Remap a block of memory. 5426 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset, 5427 char *addr, size_t bytes, bool read_only, 5428 bool allow_exec) { 5429 // same as map_memory() on this OS 5430 return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only, 5431 allow_exec); 5432 } 5433 5434 5435 // Unmap a block of memory. 5436 bool os::pd_unmap_memory(char* addr, size_t bytes) { 5437 return munmap(addr, bytes) == 0; 5438 } 5439 5440 void os::pause() { 5441 char filename[MAX_PATH]; 5442 if (PauseAtStartupFile && PauseAtStartupFile[0]) { 5443 jio_snprintf(filename, MAX_PATH, PauseAtStartupFile); 5444 } else { 5445 jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id()); 5446 } 5447 5448 int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666); 5449 if (fd != -1) { 5450 struct stat buf; 5451 ::close(fd); 5452 while (::stat(filename, &buf) == 0) { 5453 (void)::poll(NULL, 0, 100); 5454 } 5455 } else { 5456 jio_fprintf(stderr, 5457 "Could not open pause file '%s', continuing immediately.\n", filename); 5458 } 5459 } 5460 5461 #ifndef PRODUCT 5462 #ifdef INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS 5463 // Turn this on if you need to trace synch operations. 5464 // Set RECORD_SYNCH_LIMIT to a large-enough value, 5465 // and call record_synch_enable and record_synch_disable 5466 // around the computation of interest. 5467 5468 void record_synch(char* name, bool returning); // defined below 5469 5470 class RecordSynch { 5471 char* _name; 5472 public: 5473 RecordSynch(char* name) :_name(name) 5474 { record_synch(_name, false); } 5475 ~RecordSynch() { record_synch(_name, true); } 5476 }; 5477 5478 #define CHECK_SYNCH_OP(ret, name, params, args, inner) \ 5479 extern "C" ret name params { \ 5480 typedef ret name##_t params; \ 5481 static name##_t* implem = NULL; \ 5482 static int callcount = 0; \ 5483 if (implem == NULL) { \ 5484 implem = (name##_t*) dlsym(RTLD_NEXT, #name); \ 5485 if (implem == NULL) fatal(dlerror()); \ 5486 } \ 5487 ++callcount; \ 5488 RecordSynch _rs(#name); \ 5489 inner; \ 5490 return implem args; \ 5491 } 5492 // in dbx, examine callcounts this way: 5493 // for n in $(eval whereis callcount | awk '{print $2}'); do print $n; done 5494 5495 #define CHECK_POINTER_OK(p) \ 5496 (!Universe::is_fully_initialized() || !Universe::is_reserved_heap((oop)(p))) 5497 #define CHECK_MU \ 5498 if (!CHECK_POINTER_OK(mu)) fatal("Mutex must be in C heap only."); 5499 #define CHECK_CV \ 5500 if (!CHECK_POINTER_OK(cv)) fatal("Condvar must be in C heap only."); 5501 #define CHECK_P(p) \ 5502 if (!CHECK_POINTER_OK(p)) fatal(false, "Pointer must be in C heap only."); 5503 5504 #define CHECK_MUTEX(mutex_op) \ 5505 CHECK_SYNCH_OP(int, mutex_op, (mutex_t *mu), (mu), CHECK_MU); 5506 5507 CHECK_MUTEX( mutex_lock) 5508 CHECK_MUTEX( _mutex_lock) 5509 CHECK_MUTEX( mutex_unlock) 5510 CHECK_MUTEX(_mutex_unlock) 5511 CHECK_MUTEX( mutex_trylock) 5512 CHECK_MUTEX(_mutex_trylock) 5513 5514 #define CHECK_COND(cond_op) \ 5515 CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu), (cv, mu), CHECK_MU;CHECK_CV); 5516 5517 CHECK_COND( cond_wait); 5518 CHECK_COND(_cond_wait); 5519 CHECK_COND(_cond_wait_cancel); 5520 5521 #define CHECK_COND2(cond_op) \ 5522 CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu, timestruc_t* ts), (cv, mu, ts), CHECK_MU;CHECK_CV); 5523 5524 CHECK_COND2( cond_timedwait); 5525 CHECK_COND2(_cond_timedwait); 5526 CHECK_COND2(_cond_timedwait_cancel); 5527 5528 // do the _lwp_* versions too 5529 #define mutex_t lwp_mutex_t 5530 #define cond_t lwp_cond_t 5531 CHECK_MUTEX( _lwp_mutex_lock) 5532 CHECK_MUTEX( _lwp_mutex_unlock) 5533 CHECK_MUTEX( _lwp_mutex_trylock) 5534 CHECK_MUTEX( __lwp_mutex_lock) 5535 CHECK_MUTEX( __lwp_mutex_unlock) 5536 CHECK_MUTEX( __lwp_mutex_trylock) 5537 CHECK_MUTEX(___lwp_mutex_lock) 5538 CHECK_MUTEX(___lwp_mutex_unlock) 5539 5540 CHECK_COND( _lwp_cond_wait); 5541 CHECK_COND( __lwp_cond_wait); 5542 CHECK_COND(___lwp_cond_wait); 5543 5544 CHECK_COND2( _lwp_cond_timedwait); 5545 CHECK_COND2( __lwp_cond_timedwait); 5546 #undef mutex_t 5547 #undef cond_t 5548 5549 CHECK_SYNCH_OP(int, _lwp_suspend2, (int lwp, int *n), (lwp, n), 0); 5550 CHECK_SYNCH_OP(int,__lwp_suspend2, (int lwp, int *n), (lwp, n), 0); 5551 CHECK_SYNCH_OP(int, _lwp_kill, (int lwp, int n), (lwp, n), 0); 5552 CHECK_SYNCH_OP(int,__lwp_kill, (int lwp, int n), (lwp, n), 0); 5553 CHECK_SYNCH_OP(int, _lwp_sema_wait, (lwp_sema_t* p), (p), CHECK_P(p)); 5554 CHECK_SYNCH_OP(int,__lwp_sema_wait, (lwp_sema_t* p), (p), CHECK_P(p)); 5555 CHECK_SYNCH_OP(int, _lwp_cond_broadcast, (lwp_cond_t* cv), (cv), CHECK_CV); 5556 CHECK_SYNCH_OP(int,__lwp_cond_broadcast, (lwp_cond_t* cv), (cv), CHECK_CV); 5557 5558 5559 // recording machinery: 5560 5561 enum { RECORD_SYNCH_LIMIT = 200 }; 5562 char* record_synch_name[RECORD_SYNCH_LIMIT]; 5563 void* record_synch_arg0ptr[RECORD_SYNCH_LIMIT]; 5564 bool record_synch_returning[RECORD_SYNCH_LIMIT]; 5565 thread_t record_synch_thread[RECORD_SYNCH_LIMIT]; 5566 int record_synch_count = 0; 5567 bool record_synch_enabled = false; 5568 5569 // in dbx, examine recorded data this way: 5570 // for n in name arg0ptr returning thread; do print record_synch_$n[0..record_synch_count-1]; done 5571 5572 void record_synch(char* name, bool returning) { 5573 if (record_synch_enabled) { 5574 if (record_synch_count < RECORD_SYNCH_LIMIT) { 5575 record_synch_name[record_synch_count] = name; 5576 record_synch_returning[record_synch_count] = returning; 5577 record_synch_thread[record_synch_count] = thr_self(); 5578 record_synch_arg0ptr[record_synch_count] = &name; 5579 record_synch_count++; 5580 } 5581 // put more checking code here: 5582 // ... 5583 } 5584 } 5585 5586 void record_synch_enable() { 5587 // start collecting trace data, if not already doing so 5588 if (!record_synch_enabled) record_synch_count = 0; 5589 record_synch_enabled = true; 5590 } 5591 5592 void record_synch_disable() { 5593 // stop collecting trace data 5594 record_synch_enabled = false; 5595 } 5596 5597 #endif // INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS 5598 #endif // PRODUCT 5599 5600 const intptr_t thr_time_off = (intptr_t)(&((prusage_t *)(NULL))->pr_utime); 5601 const intptr_t thr_time_size = (intptr_t)(&((prusage_t *)(NULL))->pr_ttime) - 5602 (intptr_t)(&((prusage_t *)(NULL))->pr_utime); 5603 5604 5605 // JVMTI & JVM monitoring and management support 5606 // The thread_cpu_time() and current_thread_cpu_time() are only 5607 // supported if is_thread_cpu_time_supported() returns true. 5608 // They are not supported on Solaris T1. 5609 5610 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool) 5611 // are used by JVM M&M and JVMTI to get user+sys or user CPU time 5612 // of a thread. 5613 // 5614 // current_thread_cpu_time() and thread_cpu_time(Thread *) 5615 // returns the fast estimate available on the platform. 5616 5617 // hrtime_t gethrvtime() return value includes 5618 // user time but does not include system time 5619 jlong os::current_thread_cpu_time() { 5620 return (jlong) gethrvtime(); 5621 } 5622 5623 jlong os::thread_cpu_time(Thread *thread) { 5624 // return user level CPU time only to be consistent with 5625 // what current_thread_cpu_time returns. 5626 // thread_cpu_time_info() must be changed if this changes 5627 return os::thread_cpu_time(thread, false /* user time only */); 5628 } 5629 5630 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) { 5631 if (user_sys_cpu_time) { 5632 return os::thread_cpu_time(Thread::current(), user_sys_cpu_time); 5633 } else { 5634 return os::current_thread_cpu_time(); 5635 } 5636 } 5637 5638 jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) { 5639 char proc_name[64]; 5640 int count; 5641 prusage_t prusage; 5642 jlong lwp_time; 5643 int fd; 5644 5645 sprintf(proc_name, "/proc/%d/lwp/%d/lwpusage", 5646 getpid(), 5647 thread->osthread()->lwp_id()); 5648 fd = ::open(proc_name, O_RDONLY); 5649 if ( fd == -1 ) return -1; 5650 5651 do { 5652 count = ::pread(fd, 5653 (void *)&prusage.pr_utime, 5654 thr_time_size, 5655 thr_time_off); 5656 } while (count < 0 && errno == EINTR); 5657 ::close(fd); 5658 if ( count < 0 ) return -1; 5659 5660 if (user_sys_cpu_time) { 5661 // user + system CPU time 5662 lwp_time = (((jlong)prusage.pr_stime.tv_sec + 5663 (jlong)prusage.pr_utime.tv_sec) * (jlong)1000000000) + 5664 (jlong)prusage.pr_stime.tv_nsec + 5665 (jlong)prusage.pr_utime.tv_nsec; 5666 } else { 5667 // user level CPU time only 5668 lwp_time = ((jlong)prusage.pr_utime.tv_sec * (jlong)1000000000) + 5669 (jlong)prusage.pr_utime.tv_nsec; 5670 } 5671 5672 return(lwp_time); 5673 } 5674 5675 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 5676 info_ptr->max_value = ALL_64_BITS; // will not wrap in less than 64 bits 5677 info_ptr->may_skip_backward = false; // elapsed time not wall time 5678 info_ptr->may_skip_forward = false; // elapsed time not wall time 5679 info_ptr->kind = JVMTI_TIMER_USER_CPU; // only user time is returned 5680 } 5681 5682 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 5683 info_ptr->max_value = ALL_64_BITS; // will not wrap in less than 64 bits 5684 info_ptr->may_skip_backward = false; // elapsed time not wall time 5685 info_ptr->may_skip_forward = false; // elapsed time not wall time 5686 info_ptr->kind = JVMTI_TIMER_USER_CPU; // only user time is returned 5687 } 5688 5689 bool os::is_thread_cpu_time_supported() { 5690 if ( os::Solaris::T2_libthread() || UseBoundThreads ) { 5691 return true; 5692 } else { 5693 return false; 5694 } 5695 } 5696 5697 // System loadavg support. Returns -1 if load average cannot be obtained. 5698 // Return the load average for our processor set if the primitive exists 5699 // (Solaris 9 and later). Otherwise just return system wide loadavg. 5700 int os::loadavg(double loadavg[], int nelem) { 5701 if (pset_getloadavg_ptr != NULL) { 5702 return (*pset_getloadavg_ptr)(PS_MYID, loadavg, nelem); 5703 } else { 5704 return ::getloadavg(loadavg, nelem); 5705 } 5706 } 5707 5708 //--------------------------------------------------------------------------------- 5709 5710 bool os::find(address addr, outputStream* st) { 5711 Dl_info dlinfo; 5712 memset(&dlinfo, 0, sizeof(dlinfo)); 5713 if (dladdr(addr, &dlinfo) != 0) { 5714 st->print(PTR_FORMAT ": ", addr); 5715 if (dlinfo.dli_sname != NULL && dlinfo.dli_saddr != NULL) { 5716 st->print("%s+%#lx", dlinfo.dli_sname, addr-(intptr_t)dlinfo.dli_saddr); 5717 } else if (dlinfo.dli_fbase != NULL) 5718 st->print("<offset %#lx>", addr-(intptr_t)dlinfo.dli_fbase); 5719 else 5720 st->print("<absolute address>"); 5721 if (dlinfo.dli_fname != NULL) { 5722 st->print(" in %s", dlinfo.dli_fname); 5723 } 5724 if (dlinfo.dli_fbase != NULL) { 5725 st->print(" at " PTR_FORMAT, dlinfo.dli_fbase); 5726 } 5727 st->cr(); 5728 5729 if (Verbose) { 5730 // decode some bytes around the PC 5731 address begin = clamp_address_in_page(addr-40, addr, os::vm_page_size()); 5732 address end = clamp_address_in_page(addr+40, addr, os::vm_page_size()); 5733 address lowest = (address) dlinfo.dli_sname; 5734 if (!lowest) lowest = (address) dlinfo.dli_fbase; 5735 if (begin < lowest) begin = lowest; 5736 Dl_info dlinfo2; 5737 if (dladdr(end, &dlinfo2) != 0 && dlinfo2.dli_saddr != dlinfo.dli_saddr 5738 && end > dlinfo2.dli_saddr && dlinfo2.dli_saddr > begin) 5739 end = (address) dlinfo2.dli_saddr; 5740 Disassembler::decode(begin, end, st); 5741 } 5742 return true; 5743 } 5744 return false; 5745 } 5746 5747 // Following function has been added to support HotSparc's libjvm.so running 5748 // under Solaris production JDK 1.2.2 / 1.3.0. These came from 5749 // src/solaris/hpi/native_threads in the EVM codebase. 5750 // 5751 // NOTE: This is no longer needed in the 1.3.1 and 1.4 production release 5752 // libraries and should thus be removed. We will leave it behind for a while 5753 // until we no longer want to able to run on top of 1.3.0 Solaris production 5754 // JDK. See 4341971. 5755 5756 #define STACK_SLACK 0x800 5757 5758 extern "C" { 5759 intptr_t sysThreadAvailableStackWithSlack() { 5760 stack_t st; 5761 intptr_t retval, stack_top; 5762 retval = thr_stksegment(&st); 5763 assert(retval == 0, "incorrect return value from thr_stksegment"); 5764 assert((address)&st < (address)st.ss_sp, "Invalid stack base returned"); 5765 assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned"); 5766 stack_top=(intptr_t)st.ss_sp-st.ss_size; 5767 return ((intptr_t)&stack_top - stack_top - STACK_SLACK); 5768 } 5769 } 5770 5771 // ObjectMonitor park-unpark infrastructure ... 5772 // 5773 // We implement Solaris and Linux PlatformEvents with the 5774 // obvious condvar-mutex-flag triple. 5775 // Another alternative that works quite well is pipes: 5776 // Each PlatformEvent consists of a pipe-pair. 5777 // The thread associated with the PlatformEvent 5778 // calls park(), which reads from the input end of the pipe. 5779 // Unpark() writes into the other end of the pipe. 5780 // The write-side of the pipe must be set NDELAY. 5781 // Unfortunately pipes consume a large # of handles. 5782 // Native solaris lwp_park() and lwp_unpark() work nicely, too. 5783 // Using pipes for the 1st few threads might be workable, however. 5784 // 5785 // park() is permitted to return spuriously. 5786 // Callers of park() should wrap the call to park() in 5787 // an appropriate loop. A litmus test for the correct 5788 // usage of park is the following: if park() were modified 5789 // to immediately return 0 your code should still work, 5790 // albeit degenerating to a spin loop. 5791 // 5792 // An interesting optimization for park() is to use a trylock() 5793 // to attempt to acquire the mutex. If the trylock() fails 5794 // then we know that a concurrent unpark() operation is in-progress. 5795 // in that case the park() code could simply set _count to 0 5796 // and return immediately. The subsequent park() operation *might* 5797 // return immediately. That's harmless as the caller of park() is 5798 // expected to loop. By using trylock() we will have avoided a 5799 // avoided a context switch caused by contention on the per-thread mutex. 5800 // 5801 // TODO-FIXME: 5802 // 1. Reconcile Doug's JSR166 j.u.c park-unpark with the 5803 // objectmonitor implementation. 5804 // 2. Collapse the JSR166 parker event, and the 5805 // objectmonitor ParkEvent into a single "Event" construct. 5806 // 3. In park() and unpark() add: 5807 // assert (Thread::current() == AssociatedWith). 5808 // 4. add spurious wakeup injection on a -XX:EarlyParkReturn=N switch. 5809 // 1-out-of-N park() operations will return immediately. 5810 // 5811 // _Event transitions in park() 5812 // -1 => -1 : illegal 5813 // 1 => 0 : pass - return immediately 5814 // 0 => -1 : block 5815 // 5816 // _Event serves as a restricted-range semaphore. 5817 // 5818 // Another possible encoding of _Event would be with 5819 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits. 5820 // 5821 // TODO-FIXME: add DTRACE probes for: 5822 // 1. Tx parks 5823 // 2. Ty unparks Tx 5824 // 3. Tx resumes from park 5825 5826 5827 // value determined through experimentation 5828 #define ROUNDINGFIX 11 5829 5830 // utility to compute the abstime argument to timedwait. 5831 // TODO-FIXME: switch from compute_abstime() to unpackTime(). 5832 5833 static timestruc_t* compute_abstime(timestruc_t* abstime, jlong millis) { 5834 // millis is the relative timeout time 5835 // abstime will be the absolute timeout time 5836 if (millis < 0) millis = 0; 5837 struct timeval now; 5838 int status = gettimeofday(&now, NULL); 5839 assert(status == 0, "gettimeofday"); 5840 jlong seconds = millis / 1000; 5841 jlong max_wait_period; 5842 5843 if (UseLWPSynchronization) { 5844 // forward port of fix for 4275818 (not sleeping long enough) 5845 // There was a bug in Solaris 6, 7 and pre-patch 5 of 8 where 5846 // _lwp_cond_timedwait() used a round_down algorithm rather 5847 // than a round_up. For millis less than our roundfactor 5848 // it rounded down to 0 which doesn't meet the spec. 5849 // For millis > roundfactor we may return a bit sooner, but 5850 // since we can not accurately identify the patch level and 5851 // this has already been fixed in Solaris 9 and 8 we will 5852 // leave it alone rather than always rounding down. 5853 5854 if (millis > 0 && millis < ROUNDINGFIX) millis = ROUNDINGFIX; 5855 // It appears that when we go directly through Solaris _lwp_cond_timedwait() 5856 // the acceptable max time threshold is smaller than for libthread on 2.5.1 and 2.6 5857 max_wait_period = 21000000; 5858 } else { 5859 max_wait_period = 50000000; 5860 } 5861 millis %= 1000; 5862 if (seconds > max_wait_period) { // see man cond_timedwait(3T) 5863 seconds = max_wait_period; 5864 } 5865 abstime->tv_sec = now.tv_sec + seconds; 5866 long usec = now.tv_usec + millis * 1000; 5867 if (usec >= 1000000) { 5868 abstime->tv_sec += 1; 5869 usec -= 1000000; 5870 } 5871 abstime->tv_nsec = usec * 1000; 5872 return abstime; 5873 } 5874 5875 // Test-and-clear _Event, always leaves _Event set to 0, returns immediately. 5876 // Conceptually TryPark() should be equivalent to park(0). 5877 5878 int os::PlatformEvent::TryPark() { 5879 for (;;) { 5880 const int v = _Event ; 5881 guarantee ((v == 0) || (v == 1), "invariant") ; 5882 if (Atomic::cmpxchg (0, &_Event, v) == v) return v ; 5883 } 5884 } 5885 5886 void os::PlatformEvent::park() { // AKA: down() 5887 // Invariant: Only the thread associated with the Event/PlatformEvent 5888 // may call park(). 5889 int v ; 5890 for (;;) { 5891 v = _Event ; 5892 if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ; 5893 } 5894 guarantee (v >= 0, "invariant") ; 5895 if (v == 0) { 5896 // Do this the hard way by blocking ... 5897 // See http://monaco.sfbay/detail.jsf?cr=5094058. 5898 // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking. 5899 // Only for SPARC >= V8PlusA 5900 #if defined(__sparc) && defined(COMPILER2) 5901 if (ClearFPUAtPark) { _mark_fpu_nosave() ; } 5902 #endif 5903 int status = os::Solaris::mutex_lock(_mutex); 5904 assert_status(status == 0, status, "mutex_lock"); 5905 guarantee (_nParked == 0, "invariant") ; 5906 ++ _nParked ; 5907 while (_Event < 0) { 5908 // for some reason, under 2.7 lwp_cond_wait() may return ETIME ... 5909 // Treat this the same as if the wait was interrupted 5910 // With usr/lib/lwp going to kernel, always handle ETIME 5911 status = os::Solaris::cond_wait(_cond, _mutex); 5912 if (status == ETIME) status = EINTR ; 5913 assert_status(status == 0 || status == EINTR, status, "cond_wait"); 5914 } 5915 -- _nParked ; 5916 _Event = 0 ; 5917 status = os::Solaris::mutex_unlock(_mutex); 5918 assert_status(status == 0, status, "mutex_unlock"); 5919 // Paranoia to ensure our locked and lock-free paths interact 5920 // correctly with each other. 5921 OrderAccess::fence(); 5922 } 5923 } 5924 5925 int os::PlatformEvent::park(jlong millis) { 5926 guarantee (_nParked == 0, "invariant") ; 5927 int v ; 5928 for (;;) { 5929 v = _Event ; 5930 if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ; 5931 } 5932 guarantee (v >= 0, "invariant") ; 5933 if (v != 0) return OS_OK ; 5934 5935 int ret = OS_TIMEOUT; 5936 timestruc_t abst; 5937 compute_abstime (&abst, millis); 5938 5939 // See http://monaco.sfbay/detail.jsf?cr=5094058. 5940 // For Solaris SPARC set fprs.FEF=0 prior to parking. 5941 // Only for SPARC >= V8PlusA 5942 #if defined(__sparc) && defined(COMPILER2) 5943 if (ClearFPUAtPark) { _mark_fpu_nosave() ; } 5944 #endif 5945 int status = os::Solaris::mutex_lock(_mutex); 5946 assert_status(status == 0, status, "mutex_lock"); 5947 guarantee (_nParked == 0, "invariant") ; 5948 ++ _nParked ; 5949 while (_Event < 0) { 5950 int status = os::Solaris::cond_timedwait(_cond, _mutex, &abst); 5951 assert_status(status == 0 || status == EINTR || 5952 status == ETIME || status == ETIMEDOUT, 5953 status, "cond_timedwait"); 5954 if (!FilterSpuriousWakeups) break ; // previous semantics 5955 if (status == ETIME || status == ETIMEDOUT) break ; 5956 // We consume and ignore EINTR and spurious wakeups. 5957 } 5958 -- _nParked ; 5959 if (_Event >= 0) ret = OS_OK ; 5960 _Event = 0 ; 5961 status = os::Solaris::mutex_unlock(_mutex); 5962 assert_status(status == 0, status, "mutex_unlock"); 5963 // Paranoia to ensure our locked and lock-free paths interact 5964 // correctly with each other. 5965 OrderAccess::fence(); 5966 return ret; 5967 } 5968 5969 void os::PlatformEvent::unpark() { 5970 // Transitions for _Event: 5971 // 0 :=> 1 5972 // 1 :=> 1 5973 // -1 :=> either 0 or 1; must signal target thread 5974 // That is, we can safely transition _Event from -1 to either 5975 // 0 or 1. Forcing 1 is slightly more efficient for back-to-back 5976 // unpark() calls. 5977 // See also: "Semaphores in Plan 9" by Mullender & Cox 5978 // 5979 // Note: Forcing a transition from "-1" to "1" on an unpark() means 5980 // that it will take two back-to-back park() calls for the owning 5981 // thread to block. This has the benefit of forcing a spurious return 5982 // from the first park() call after an unpark() call which will help 5983 // shake out uses of park() and unpark() without condition variables. 5984 5985 if (Atomic::xchg(1, &_Event) >= 0) return; 5986 5987 // If the thread associated with the event was parked, wake it. 5988 // Wait for the thread assoc with the PlatformEvent to vacate. 5989 int status = os::Solaris::mutex_lock(_mutex); 5990 assert_status(status == 0, status, "mutex_lock"); 5991 int AnyWaiters = _nParked; 5992 status = os::Solaris::mutex_unlock(_mutex); 5993 assert_status(status == 0, status, "mutex_unlock"); 5994 guarantee(AnyWaiters == 0 || AnyWaiters == 1, "invariant"); 5995 if (AnyWaiters != 0) { 5996 // We intentional signal *after* dropping the lock 5997 // to avoid a common class of futile wakeups. 5998 status = os::Solaris::cond_signal(_cond); 5999 assert_status(status == 0, status, "cond_signal"); 6000 } 6001 } 6002 6003 // JSR166 6004 // ------------------------------------------------------- 6005 6006 /* 6007 * The solaris and linux implementations of park/unpark are fairly 6008 * conservative for now, but can be improved. They currently use a 6009 * mutex/condvar pair, plus _counter. 6010 * Park decrements _counter if > 0, else does a condvar wait. Unpark 6011 * sets count to 1 and signals condvar. Only one thread ever waits 6012 * on the condvar. Contention seen when trying to park implies that someone 6013 * is unparking you, so don't wait. And spurious returns are fine, so there 6014 * is no need to track notifications. 6015 */ 6016 6017 #define MAX_SECS 100000000 6018 /* 6019 * This code is common to linux and solaris and will be moved to a 6020 * common place in dolphin. 6021 * 6022 * The passed in time value is either a relative time in nanoseconds 6023 * or an absolute time in milliseconds. Either way it has to be unpacked 6024 * into suitable seconds and nanoseconds components and stored in the 6025 * given timespec structure. 6026 * Given time is a 64-bit value and the time_t used in the timespec is only 6027 * a signed-32-bit value (except on 64-bit Linux) we have to watch for 6028 * overflow if times way in the future are given. Further on Solaris versions 6029 * prior to 10 there is a restriction (see cond_timedwait) that the specified 6030 * number of seconds, in abstime, is less than current_time + 100,000,000. 6031 * As it will be 28 years before "now + 100000000" will overflow we can 6032 * ignore overflow and just impose a hard-limit on seconds using the value 6033 * of "now + 100,000,000". This places a limit on the timeout of about 3.17 6034 * years from "now". 6035 */ 6036 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) { 6037 assert (time > 0, "convertTime"); 6038 6039 struct timeval now; 6040 int status = gettimeofday(&now, NULL); 6041 assert(status == 0, "gettimeofday"); 6042 6043 time_t max_secs = now.tv_sec + MAX_SECS; 6044 6045 if (isAbsolute) { 6046 jlong secs = time / 1000; 6047 if (secs > max_secs) { 6048 absTime->tv_sec = max_secs; 6049 } 6050 else { 6051 absTime->tv_sec = secs; 6052 } 6053 absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC; 6054 } 6055 else { 6056 jlong secs = time / NANOSECS_PER_SEC; 6057 if (secs >= MAX_SECS) { 6058 absTime->tv_sec = max_secs; 6059 absTime->tv_nsec = 0; 6060 } 6061 else { 6062 absTime->tv_sec = now.tv_sec + secs; 6063 absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000; 6064 if (absTime->tv_nsec >= NANOSECS_PER_SEC) { 6065 absTime->tv_nsec -= NANOSECS_PER_SEC; 6066 ++absTime->tv_sec; // note: this must be <= max_secs 6067 } 6068 } 6069 } 6070 assert(absTime->tv_sec >= 0, "tv_sec < 0"); 6071 assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs"); 6072 assert(absTime->tv_nsec >= 0, "tv_nsec < 0"); 6073 assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec"); 6074 } 6075 6076 void Parker::park(bool isAbsolute, jlong time) { 6077 // Ideally we'd do something useful while spinning, such 6078 // as calling unpackTime(). 6079 6080 // Optional fast-path check: 6081 // Return immediately if a permit is available. 6082 // We depend on Atomic::xchg() having full barrier semantics 6083 // since we are doing a lock-free update to _counter. 6084 if (Atomic::xchg(0, &_counter) > 0) return; 6085 6086 // Optional fast-exit: Check interrupt before trying to wait 6087 Thread* thread = Thread::current(); 6088 assert(thread->is_Java_thread(), "Must be JavaThread"); 6089 JavaThread *jt = (JavaThread *)thread; 6090 if (Thread::is_interrupted(thread, false)) { 6091 return; 6092 } 6093 6094 // First, demultiplex/decode time arguments 6095 timespec absTime; 6096 if (time < 0 || (isAbsolute && time == 0) ) { // don't wait at all 6097 return; 6098 } 6099 if (time > 0) { 6100 // Warning: this code might be exposed to the old Solaris time 6101 // round-down bugs. Grep "roundingFix" for details. 6102 unpackTime(&absTime, isAbsolute, time); 6103 } 6104 6105 // Enter safepoint region 6106 // Beware of deadlocks such as 6317397. 6107 // The per-thread Parker:: _mutex is a classic leaf-lock. 6108 // In particular a thread must never block on the Threads_lock while 6109 // holding the Parker:: mutex. If safepoints are pending both the 6110 // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock. 6111 ThreadBlockInVM tbivm(jt); 6112 6113 // Don't wait if cannot get lock since interference arises from 6114 // unblocking. Also. check interrupt before trying wait 6115 if (Thread::is_interrupted(thread, false) || 6116 os::Solaris::mutex_trylock(_mutex) != 0) { 6117 return; 6118 } 6119 6120 int status ; 6121 6122 if (_counter > 0) { // no wait needed 6123 _counter = 0; 6124 status = os::Solaris::mutex_unlock(_mutex); 6125 assert (status == 0, "invariant") ; 6126 // Paranoia to ensure our locked and lock-free paths interact 6127 // correctly with each other and Java-level accesses. 6128 OrderAccess::fence(); 6129 return; 6130 } 6131 6132 #ifdef ASSERT 6133 // Don't catch signals while blocked; let the running threads have the signals. 6134 // (This allows a debugger to break into the running thread.) 6135 sigset_t oldsigs; 6136 sigset_t* allowdebug_blocked = os::Solaris::allowdebug_blocked_signals(); 6137 thr_sigsetmask(SIG_BLOCK, allowdebug_blocked, &oldsigs); 6138 #endif 6139 6140 OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */); 6141 jt->set_suspend_equivalent(); 6142 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self() 6143 6144 // Do this the hard way by blocking ... 6145 // See http://monaco.sfbay/detail.jsf?cr=5094058. 6146 // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking. 6147 // Only for SPARC >= V8PlusA 6148 #if defined(__sparc) && defined(COMPILER2) 6149 if (ClearFPUAtPark) { _mark_fpu_nosave() ; } 6150 #endif 6151 6152 if (time == 0) { 6153 status = os::Solaris::cond_wait (_cond, _mutex) ; 6154 } else { 6155 status = os::Solaris::cond_timedwait (_cond, _mutex, &absTime); 6156 } 6157 // Note that an untimed cond_wait() can sometimes return ETIME on older 6158 // versions of the Solaris. 6159 assert_status(status == 0 || status == EINTR || 6160 status == ETIME || status == ETIMEDOUT, 6161 status, "cond_timedwait"); 6162 6163 #ifdef ASSERT 6164 thr_sigsetmask(SIG_SETMASK, &oldsigs, NULL); 6165 #endif 6166 _counter = 0 ; 6167 status = os::Solaris::mutex_unlock(_mutex); 6168 assert_status(status == 0, status, "mutex_unlock") ; 6169 // Paranoia to ensure our locked and lock-free paths interact 6170 // correctly with each other and Java-level accesses. 6171 OrderAccess::fence(); 6172 6173 // If externally suspended while waiting, re-suspend 6174 if (jt->handle_special_suspend_equivalent_condition()) { 6175 jt->java_suspend_self(); 6176 } 6177 } 6178 6179 void Parker::unpark() { 6180 int s, status ; 6181 status = os::Solaris::mutex_lock (_mutex) ; 6182 assert (status == 0, "invariant") ; 6183 s = _counter; 6184 _counter = 1; 6185 status = os::Solaris::mutex_unlock (_mutex) ; 6186 assert (status == 0, "invariant") ; 6187 6188 if (s < 1) { 6189 status = os::Solaris::cond_signal (_cond) ; 6190 assert (status == 0, "invariant") ; 6191 } 6192 } 6193 6194 extern char** environ; 6195 6196 // Run the specified command in a separate process. Return its exit value, 6197 // or -1 on failure (e.g. can't fork a new process). 6198 // Unlike system(), this function can be called from signal handler. It 6199 // doesn't block SIGINT et al. 6200 int os::fork_and_exec(char* cmd) { 6201 char * argv[4]; 6202 argv[0] = (char *)"sh"; 6203 argv[1] = (char *)"-c"; 6204 argv[2] = cmd; 6205 argv[3] = NULL; 6206 6207 // fork is async-safe, fork1 is not so can't use in signal handler 6208 pid_t pid; 6209 Thread* t = ThreadLocalStorage::get_thread_slow(); 6210 if (t != NULL && t->is_inside_signal_handler()) { 6211 pid = fork(); 6212 } else { 6213 pid = fork1(); 6214 } 6215 6216 if (pid < 0) { 6217 // fork failed 6218 warning("fork failed: %s", strerror(errno)); 6219 return -1; 6220 6221 } else if (pid == 0) { 6222 // child process 6223 6224 // try to be consistent with system(), which uses "/usr/bin/sh" on Solaris 6225 execve("/usr/bin/sh", argv, environ); 6226 6227 // execve failed 6228 _exit(-1); 6229 6230 } else { 6231 // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't 6232 // care about the actual exit code, for now. 6233 6234 int status; 6235 6236 // Wait for the child process to exit. This returns immediately if 6237 // the child has already exited. */ 6238 while (waitpid(pid, &status, 0) < 0) { 6239 switch (errno) { 6240 case ECHILD: return 0; 6241 case EINTR: break; 6242 default: return -1; 6243 } 6244 } 6245 6246 if (WIFEXITED(status)) { 6247 // The child exited normally; get its exit code. 6248 return WEXITSTATUS(status); 6249 } else if (WIFSIGNALED(status)) { 6250 // The child exited because of a signal 6251 // The best value to return is 0x80 + signal number, 6252 // because that is what all Unix shells do, and because 6253 // it allows callers to distinguish between process exit and 6254 // process death by signal. 6255 return 0x80 + WTERMSIG(status); 6256 } else { 6257 // Unknown exit code; pass it through 6258 return status; 6259 } 6260 } 6261 } 6262 6263 // is_headless_jre() 6264 // 6265 // Test for the existence of xawt/libmawt.so or libawt_xawt.so 6266 // in order to report if we are running in a headless jre 6267 // 6268 // Since JDK8 xawt/libmawt.so was moved into the same directory 6269 // as libawt.so, and renamed libawt_xawt.so 6270 // 6271 bool os::is_headless_jre() { 6272 struct stat statbuf; 6273 char buf[MAXPATHLEN]; 6274 char libmawtpath[MAXPATHLEN]; 6275 const char *xawtstr = "/xawt/libmawt.so"; 6276 const char *new_xawtstr = "/libawt_xawt.so"; 6277 char *p; 6278 6279 // Get path to libjvm.so 6280 os::jvm_path(buf, sizeof(buf)); 6281 6282 // Get rid of libjvm.so 6283 p = strrchr(buf, '/'); 6284 if (p == NULL) return false; 6285 else *p = '\0'; 6286 6287 // Get rid of client or server 6288 p = strrchr(buf, '/'); 6289 if (p == NULL) return false; 6290 else *p = '\0'; 6291 6292 // check xawt/libmawt.so 6293 strcpy(libmawtpath, buf); 6294 strcat(libmawtpath, xawtstr); 6295 if (::stat(libmawtpath, &statbuf) == 0) return false; 6296 6297 // check libawt_xawt.so 6298 strcpy(libmawtpath, buf); 6299 strcat(libmawtpath, new_xawtstr); 6300 if (::stat(libmawtpath, &statbuf) == 0) return false; 6301 6302 return true; 6303 } 6304 6305 size_t os::write(int fd, const void *buf, unsigned int nBytes) { 6306 INTERRUPTIBLE_RETURN_INT(::write(fd, buf, nBytes), os::Solaris::clear_interrupted); 6307 } 6308 6309 int os::close(int fd) { 6310 return ::close(fd); 6311 } 6312 6313 int os::socket_close(int fd) { 6314 return ::close(fd); 6315 } 6316 6317 int os::recv(int fd, char* buf, size_t nBytes, uint flags) { 6318 INTERRUPTIBLE_RETURN_INT((int)::recv(fd, buf, nBytes, flags), os::Solaris::clear_interrupted); 6319 } 6320 6321 int os::send(int fd, char* buf, size_t nBytes, uint flags) { 6322 INTERRUPTIBLE_RETURN_INT((int)::send(fd, buf, nBytes, flags), os::Solaris::clear_interrupted); 6323 } 6324 6325 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) { 6326 RESTARTABLE_RETURN_INT((int)::send(fd, buf, nBytes, flags)); 6327 } 6328 6329 // As both poll and select can be interrupted by signals, we have to be 6330 // prepared to restart the system call after updating the timeout, unless 6331 // a poll() is done with timeout == -1, in which case we repeat with this 6332 // "wait forever" value. 6333 6334 int os::timeout(int fd, long timeout) { 6335 int res; 6336 struct timeval t; 6337 julong prevtime, newtime; 6338 static const char* aNull = 0; 6339 struct pollfd pfd; 6340 pfd.fd = fd; 6341 pfd.events = POLLIN; 6342 6343 gettimeofday(&t, &aNull); 6344 prevtime = ((julong)t.tv_sec * 1000) + t.tv_usec / 1000; 6345 6346 for(;;) { 6347 INTERRUPTIBLE_NORESTART(::poll(&pfd, 1, timeout), res, os::Solaris::clear_interrupted); 6348 if(res == OS_ERR && errno == EINTR) { 6349 if(timeout != -1) { 6350 gettimeofday(&t, &aNull); 6351 newtime = ((julong)t.tv_sec * 1000) + t.tv_usec /1000; 6352 timeout -= newtime - prevtime; 6353 if(timeout <= 0) 6354 return OS_OK; 6355 prevtime = newtime; 6356 } 6357 } else return res; 6358 } 6359 } 6360 6361 int os::connect(int fd, struct sockaddr *him, socklen_t len) { 6362 int _result; 6363 INTERRUPTIBLE_NORESTART(::connect(fd, him, len), _result,\ 6364 os::Solaris::clear_interrupted); 6365 6366 // Depending on when thread interruption is reset, _result could be 6367 // one of two values when errno == EINTR 6368 6369 if (((_result == OS_INTRPT) || (_result == OS_ERR)) 6370 && (errno == EINTR)) { 6371 /* restarting a connect() changes its errno semantics */ 6372 INTERRUPTIBLE(::connect(fd, him, len), _result,\ 6373 os::Solaris::clear_interrupted); 6374 /* undo these changes */ 6375 if (_result == OS_ERR) { 6376 if (errno == EALREADY) { 6377 errno = EINPROGRESS; /* fall through */ 6378 } else if (errno == EISCONN) { 6379 errno = 0; 6380 return OS_OK; 6381 } 6382 } 6383 } 6384 return _result; 6385 } 6386 6387 int os::accept(int fd, struct sockaddr* him, socklen_t* len) { 6388 if (fd < 0) { 6389 return OS_ERR; 6390 } 6391 INTERRUPTIBLE_RETURN_INT((int)::accept(fd, him, len),\ 6392 os::Solaris::clear_interrupted); 6393 } 6394 6395 int os::recvfrom(int fd, char* buf, size_t nBytes, uint flags, 6396 sockaddr* from, socklen_t* fromlen) { 6397 INTERRUPTIBLE_RETURN_INT((int)::recvfrom(fd, buf, nBytes, flags, from, fromlen),\ 6398 os::Solaris::clear_interrupted); 6399 } 6400 6401 int os::sendto(int fd, char* buf, size_t len, uint flags, 6402 struct sockaddr* to, socklen_t tolen) { 6403 INTERRUPTIBLE_RETURN_INT((int)::sendto(fd, buf, len, flags, to, tolen),\ 6404 os::Solaris::clear_interrupted); 6405 } 6406 6407 int os::socket_available(int fd, jint *pbytes) { 6408 if (fd < 0) { 6409 return OS_OK; 6410 } 6411 int ret; 6412 RESTARTABLE(::ioctl(fd, FIONREAD, pbytes), ret); 6413 // note: ioctl can return 0 when successful, JVM_SocketAvailable 6414 // is expected to return 0 on failure and 1 on success to the jdk. 6415 return (ret == OS_ERR) ? 0 : 1; 6416 } 6417 6418 int os::bind(int fd, struct sockaddr* him, socklen_t len) { 6419 INTERRUPTIBLE_RETURN_INT_NORESTART(::bind(fd, him, len),\ 6420 os::Solaris::clear_interrupted); 6421 } 6422 6423 // Get the default path to the core file 6424 // Returns the length of the string 6425 int os::get_core_path(char* buffer, size_t bufferSize) { 6426 const char* p = get_current_directory(buffer, bufferSize); 6427 6428 if (p == NULL) { 6429 assert(p != NULL, "failed to get current directory"); 6430 return 0; 6431 } 6432 6433 return strlen(buffer); 6434 } 6435 6436 #ifndef PRODUCT 6437 void TestReserveMemorySpecial_test() { 6438 // No tests available for this platform 6439 } 6440 #endif