1 /* 2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 // no precompiled headers 26 #include "classfile/classLoader.hpp" 27 #include "classfile/systemDictionary.hpp" 28 #include "classfile/vmSymbols.hpp" 29 #include "code/icBuffer.hpp" 30 #include "code/vtableStubs.hpp" 31 #include "compiler/compileBroker.hpp" 32 #include "compiler/disassembler.hpp" 33 #include "interpreter/interpreter.hpp" 34 #include "jvm_solaris.h" 35 #include "memory/allocation.inline.hpp" 36 #include "memory/filemap.hpp" 37 #include "mutex_solaris.inline.hpp" 38 #include "oops/oop.inline.hpp" 39 #include "os_share_solaris.hpp" 40 #include "prims/jniFastGetField.hpp" 41 #include "prims/jvm.h" 42 #include "prims/jvm_misc.hpp" 43 #include "runtime/arguments.hpp" 44 #include "runtime/extendedPC.hpp" 45 #include "runtime/globals.hpp" 46 #include "runtime/interfaceSupport.hpp" 47 #include "runtime/java.hpp" 48 #include "runtime/javaCalls.hpp" 49 #include "runtime/mutexLocker.hpp" 50 #include "runtime/objectMonitor.hpp" 51 #include "runtime/osThread.hpp" 52 #include "runtime/perfMemory.hpp" 53 #include "runtime/sharedRuntime.hpp" 54 #include "runtime/statSampler.hpp" 55 #include "runtime/stubRoutines.hpp" 56 #include "runtime/thread.inline.hpp" 57 #include "runtime/threadCritical.hpp" 58 #include "runtime/timer.hpp" 59 #include "services/attachListener.hpp" 60 #include "services/memTracker.hpp" 61 #include "services/runtimeService.hpp" 62 #include "utilities/decoder.hpp" 63 #include "utilities/defaultStream.hpp" 64 #include "utilities/events.hpp" 65 #include "utilities/growableArray.hpp" 66 #include "utilities/vmError.hpp" 67 68 // put OS-includes here 69 # include <dlfcn.h> 70 # include <errno.h> 71 # include <exception> 72 # include <link.h> 73 # include <poll.h> 74 # include <pthread.h> 75 # include <pwd.h> 76 # include <schedctl.h> 77 # include <setjmp.h> 78 # include <signal.h> 79 # include <stdio.h> 80 # include <alloca.h> 81 # include <sys/filio.h> 82 # include <sys/ipc.h> 83 # include <sys/lwp.h> 84 # include <sys/machelf.h> // for elf Sym structure used by dladdr1 85 # include <sys/mman.h> 86 # include <sys/processor.h> 87 # include <sys/procset.h> 88 # include <sys/pset.h> 89 # include <sys/resource.h> 90 # include <sys/shm.h> 91 # include <sys/socket.h> 92 # include <sys/stat.h> 93 # include <sys/systeminfo.h> 94 # include <sys/time.h> 95 # include <sys/times.h> 96 # include <sys/types.h> 97 # include <sys/wait.h> 98 # include <sys/utsname.h> 99 # include <thread.h> 100 # include <unistd.h> 101 # include <sys/priocntl.h> 102 # include <sys/rtpriocntl.h> 103 # include <sys/tspriocntl.h> 104 # include <sys/iapriocntl.h> 105 # include <sys/fxpriocntl.h> 106 # include <sys/loadavg.h> 107 # include <string.h> 108 # include <stdio.h> 109 110 # define _STRUCTURED_PROC 1 // this gets us the new structured proc interfaces of 5.6 & later 111 # include <sys/procfs.h> // see comment in <sys/procfs.h> 112 113 #define MAX_PATH (2 * K) 114 115 // for timer info max values which include all bits 116 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF) 117 118 119 // Here are some liblgrp types from sys/lgrp_user.h to be able to 120 // compile on older systems without this header file. 121 122 #ifndef MADV_ACCESS_LWP 123 # define MADV_ACCESS_LWP 7 /* next LWP to access heavily */ 124 #endif 125 #ifndef MADV_ACCESS_MANY 126 # define MADV_ACCESS_MANY 8 /* many processes to access heavily */ 127 #endif 128 129 #ifndef LGRP_RSRC_CPU 130 # define LGRP_RSRC_CPU 0 /* CPU resources */ 131 #endif 132 #ifndef LGRP_RSRC_MEM 133 # define LGRP_RSRC_MEM 1 /* memory resources */ 134 #endif 135 136 // see thr_setprio(3T) for the basis of these numbers 137 #define MinimumPriority 0 138 #define NormalPriority 64 139 #define MaximumPriority 127 140 141 // Values for ThreadPriorityPolicy == 1 142 int prio_policy1[CriticalPriority+1] = { 143 -99999, 0, 16, 32, 48, 64, 144 80, 96, 112, 124, 127, 127 }; 145 146 // System parameters used internally 147 static clock_t clock_tics_per_sec = 100; 148 149 // Track if we have called enable_extended_FILE_stdio (on Solaris 10u4+) 150 static bool enabled_extended_FILE_stdio = false; 151 152 // For diagnostics to print a message once. see run_periodic_checks 153 static bool check_addr0_done = false; 154 static sigset_t check_signal_done; 155 static bool check_signals = true; 156 157 address os::Solaris::handler_start; // start pc of thr_sighndlrinfo 158 address os::Solaris::handler_end; // end pc of thr_sighndlrinfo 159 160 address os::Solaris::_main_stack_base = NULL; // 4352906 workaround 161 162 163 // "default" initializers for missing libc APIs 164 extern "C" { 165 static int lwp_mutex_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; } 166 static int lwp_mutex_destroy(mutex_t *mx) { return 0; } 167 168 static int lwp_cond_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; } 169 static int lwp_cond_destroy(cond_t *cv) { return 0; } 170 } 171 172 // "default" initializers for pthread-based synchronization 173 extern "C" { 174 static int pthread_mutex_default_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; } 175 static int pthread_cond_default_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; } 176 } 177 178 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time); 179 180 // Thread Local Storage 181 // This is common to all Solaris platforms so it is defined here, 182 // in this common file. 183 // The declarations are in the os_cpu threadLS*.hpp files. 184 // 185 // Static member initialization for TLS 186 Thread* ThreadLocalStorage::_get_thread_cache[ThreadLocalStorage::_pd_cache_size] = {NULL}; 187 188 #ifndef PRODUCT 189 #define _PCT(n,d) ((100.0*(double)(n))/(double)(d)) 190 191 int ThreadLocalStorage::_tcacheHit = 0; 192 int ThreadLocalStorage::_tcacheMiss = 0; 193 194 void ThreadLocalStorage::print_statistics() { 195 int total = _tcacheMiss+_tcacheHit; 196 tty->print_cr("Thread cache hits %d misses %d total %d percent %f\n", 197 _tcacheHit, _tcacheMiss, total, _PCT(_tcacheHit, total)); 198 } 199 #undef _PCT 200 #endif // PRODUCT 201 202 Thread* ThreadLocalStorage::get_thread_via_cache_slowly(uintptr_t raw_id, 203 int index) { 204 Thread *thread = get_thread_slow(); 205 if (thread != NULL) { 206 address sp = os::current_stack_pointer(); 207 guarantee(thread->_stack_base == NULL || 208 (sp <= thread->_stack_base && 209 sp >= thread->_stack_base - thread->_stack_size) || 210 is_error_reported(), 211 "sp must be inside of selected thread stack"); 212 213 thread->set_self_raw_id(raw_id); // mark for quick retrieval 214 _get_thread_cache[ index ] = thread; 215 } 216 return thread; 217 } 218 219 220 static const double all_zero[ sizeof(Thread) / sizeof(double) + 1 ] = {0}; 221 #define NO_CACHED_THREAD ((Thread*)all_zero) 222 223 void ThreadLocalStorage::pd_set_thread(Thread* thread) { 224 225 // Store the new value before updating the cache to prevent a race 226 // between get_thread_via_cache_slowly() and this store operation. 227 os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread); 228 229 // Update thread cache with new thread if setting on thread create, 230 // or NO_CACHED_THREAD (zeroed) thread if resetting thread on exit. 231 uintptr_t raw = pd_raw_thread_id(); 232 int ix = pd_cache_index(raw); 233 _get_thread_cache[ix] = thread == NULL ? NO_CACHED_THREAD : thread; 234 } 235 236 void ThreadLocalStorage::pd_init() { 237 for (int i = 0; i < _pd_cache_size; i++) { 238 _get_thread_cache[i] = NO_CACHED_THREAD; 239 } 240 } 241 242 // Invalidate all the caches (happens to be the same as pd_init). 243 void ThreadLocalStorage::pd_invalidate_all() { pd_init(); } 244 245 #undef NO_CACHED_THREAD 246 247 // END Thread Local Storage 248 249 static inline size_t adjust_stack_size(address base, size_t size) { 250 if ((ssize_t)size < 0) { 251 // 4759953: Compensate for ridiculous stack size. 252 size = max_intx; 253 } 254 if (size > (size_t)base) { 255 // 4812466: Make sure size doesn't allow the stack to wrap the address space. 256 size = (size_t)base; 257 } 258 return size; 259 } 260 261 static inline stack_t get_stack_info() { 262 stack_t st; 263 int retval = thr_stksegment(&st); 264 st.ss_size = adjust_stack_size((address)st.ss_sp, st.ss_size); 265 assert(retval == 0, "incorrect return value from thr_stksegment"); 266 assert((address)&st < (address)st.ss_sp, "Invalid stack base returned"); 267 assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned"); 268 return st; 269 } 270 271 address os::current_stack_base() { 272 int r = thr_main() ; 273 guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ; 274 bool is_primordial_thread = r; 275 276 // Workaround 4352906, avoid calls to thr_stksegment by 277 // thr_main after the first one (it looks like we trash 278 // some data, causing the value for ss_sp to be incorrect). 279 if (!is_primordial_thread || os::Solaris::_main_stack_base == NULL) { 280 stack_t st = get_stack_info(); 281 if (is_primordial_thread) { 282 // cache initial value of stack base 283 os::Solaris::_main_stack_base = (address)st.ss_sp; 284 } 285 return (address)st.ss_sp; 286 } else { 287 guarantee(os::Solaris::_main_stack_base != NULL, "Attempt to use null cached stack base"); 288 return os::Solaris::_main_stack_base; 289 } 290 } 291 292 size_t os::current_stack_size() { 293 size_t size; 294 295 int r = thr_main() ; 296 guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ; 297 if(!r) { 298 size = get_stack_info().ss_size; 299 } else { 300 struct rlimit limits; 301 getrlimit(RLIMIT_STACK, &limits); 302 size = adjust_stack_size(os::Solaris::_main_stack_base, (size_t)limits.rlim_cur); 303 } 304 // base may not be page aligned 305 address base = current_stack_base(); 306 address bottom = (address)align_size_up((intptr_t)(base - size), os::vm_page_size());; 307 return (size_t)(base - bottom); 308 } 309 310 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) { 311 return localtime_r(clock, res); 312 } 313 314 // interruptible infrastructure 315 316 // setup_interruptible saves the thread state before going into an 317 // interruptible system call. 318 // The saved state is used to restore the thread to 319 // its former state whether or not an interrupt is received. 320 // Used by classloader os::read 321 // os::restartable_read calls skip this layer and stay in _thread_in_native 322 323 void os::Solaris::setup_interruptible(JavaThread* thread) { 324 325 JavaThreadState thread_state = thread->thread_state(); 326 327 assert(thread_state != _thread_blocked, "Coming from the wrong thread"); 328 assert(thread_state != _thread_in_native, "Native threads skip setup_interruptible"); 329 OSThread* osthread = thread->osthread(); 330 osthread->set_saved_interrupt_thread_state(thread_state); 331 thread->frame_anchor()->make_walkable(thread); 332 ThreadStateTransition::transition(thread, thread_state, _thread_blocked); 333 } 334 335 JavaThread* os::Solaris::setup_interruptible() { 336 JavaThread* thread = (JavaThread*)ThreadLocalStorage::thread(); 337 setup_interruptible(thread); 338 return thread; 339 } 340 341 void os::Solaris::try_enable_extended_io() { 342 typedef int (*enable_extended_FILE_stdio_t)(int, int); 343 344 if (!UseExtendedFileIO) { 345 return; 346 } 347 348 enable_extended_FILE_stdio_t enabler = 349 (enable_extended_FILE_stdio_t) dlsym(RTLD_DEFAULT, 350 "enable_extended_FILE_stdio"); 351 if (enabler) { 352 enabler(-1, -1); 353 } 354 } 355 356 357 #ifdef ASSERT 358 359 JavaThread* os::Solaris::setup_interruptible_native() { 360 JavaThread* thread = (JavaThread*)ThreadLocalStorage::thread(); 361 JavaThreadState thread_state = thread->thread_state(); 362 assert(thread_state == _thread_in_native, "Assumed thread_in_native"); 363 return thread; 364 } 365 366 void os::Solaris::cleanup_interruptible_native(JavaThread* thread) { 367 JavaThreadState thread_state = thread->thread_state(); 368 assert(thread_state == _thread_in_native, "Assumed thread_in_native"); 369 } 370 #endif 371 372 // cleanup_interruptible reverses the effects of setup_interruptible 373 // setup_interruptible_already_blocked() does not need any cleanup. 374 375 void os::Solaris::cleanup_interruptible(JavaThread* thread) { 376 OSThread* osthread = thread->osthread(); 377 378 ThreadStateTransition::transition(thread, _thread_blocked, osthread->saved_interrupt_thread_state()); 379 } 380 381 // I/O interruption related counters called in _INTERRUPTIBLE 382 383 void os::Solaris::bump_interrupted_before_count() { 384 RuntimeService::record_interrupted_before_count(); 385 } 386 387 void os::Solaris::bump_interrupted_during_count() { 388 RuntimeService::record_interrupted_during_count(); 389 } 390 391 static int _processors_online = 0; 392 393 jint os::Solaris::_os_thread_limit = 0; 394 volatile jint os::Solaris::_os_thread_count = 0; 395 396 julong os::available_memory() { 397 return Solaris::available_memory(); 398 } 399 400 julong os::Solaris::available_memory() { 401 return (julong)sysconf(_SC_AVPHYS_PAGES) * os::vm_page_size(); 402 } 403 404 julong os::Solaris::_physical_memory = 0; 405 406 julong os::physical_memory() { 407 return Solaris::physical_memory(); 408 } 409 410 static hrtime_t first_hrtime = 0; 411 static const hrtime_t hrtime_hz = 1000*1000*1000; 412 const int LOCK_BUSY = 1; 413 const int LOCK_FREE = 0; 414 const int LOCK_INVALID = -1; 415 static volatile hrtime_t max_hrtime = 0; 416 static volatile int max_hrtime_lock = LOCK_FREE; // Update counter with LSB as lock-in-progress 417 418 419 void os::Solaris::initialize_system_info() { 420 set_processor_count(sysconf(_SC_NPROCESSORS_CONF)); 421 _processors_online = sysconf (_SC_NPROCESSORS_ONLN); 422 _physical_memory = (julong)sysconf(_SC_PHYS_PAGES) * (julong)sysconf(_SC_PAGESIZE); 423 } 424 425 int os::active_processor_count() { 426 int online_cpus = sysconf(_SC_NPROCESSORS_ONLN); 427 pid_t pid = getpid(); 428 psetid_t pset = PS_NONE; 429 // Are we running in a processor set or is there any processor set around? 430 if (pset_bind(PS_QUERY, P_PID, pid, &pset) == 0) { 431 uint_t pset_cpus; 432 // Query the number of cpus available to us. 433 if (pset_info(pset, NULL, &pset_cpus, NULL) == 0) { 434 assert(pset_cpus > 0 && pset_cpus <= online_cpus, "sanity check"); 435 _processors_online = pset_cpus; 436 return pset_cpus; 437 } 438 } 439 // Otherwise return number of online cpus 440 return online_cpus; 441 } 442 443 static bool find_processors_in_pset(psetid_t pset, 444 processorid_t** id_array, 445 uint_t* id_length) { 446 bool result = false; 447 // Find the number of processors in the processor set. 448 if (pset_info(pset, NULL, id_length, NULL) == 0) { 449 // Make up an array to hold their ids. 450 *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length, mtInternal); 451 // Fill in the array with their processor ids. 452 if (pset_info(pset, NULL, id_length, *id_array) == 0) { 453 result = true; 454 } 455 } 456 return result; 457 } 458 459 // Callers of find_processors_online() must tolerate imprecise results -- 460 // the system configuration can change asynchronously because of DR 461 // or explicit psradm operations. 462 // 463 // We also need to take care that the loop (below) terminates as the 464 // number of processors online can change between the _SC_NPROCESSORS_ONLN 465 // request and the loop that builds the list of processor ids. Unfortunately 466 // there's no reliable way to determine the maximum valid processor id, 467 // so we use a manifest constant, MAX_PROCESSOR_ID, instead. See p_online 468 // man pages, which claim the processor id set is "sparse, but 469 // not too sparse". MAX_PROCESSOR_ID is used to ensure that we eventually 470 // exit the loop. 471 // 472 // In the future we'll be able to use sysconf(_SC_CPUID_MAX), but that's 473 // not available on S8.0. 474 475 static bool find_processors_online(processorid_t** id_array, 476 uint* id_length) { 477 const processorid_t MAX_PROCESSOR_ID = 100000 ; 478 // Find the number of processors online. 479 *id_length = sysconf(_SC_NPROCESSORS_ONLN); 480 // Make up an array to hold their ids. 481 *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length, mtInternal); 482 // Processors need not be numbered consecutively. 483 long found = 0; 484 processorid_t next = 0; 485 while (found < *id_length && next < MAX_PROCESSOR_ID) { 486 processor_info_t info; 487 if (processor_info(next, &info) == 0) { 488 // NB, PI_NOINTR processors are effectively online ... 489 if (info.pi_state == P_ONLINE || info.pi_state == P_NOINTR) { 490 (*id_array)[found] = next; 491 found += 1; 492 } 493 } 494 next += 1; 495 } 496 if (found < *id_length) { 497 // The loop above didn't identify the expected number of processors. 498 // We could always retry the operation, calling sysconf(_SC_NPROCESSORS_ONLN) 499 // and re-running the loop, above, but there's no guarantee of progress 500 // if the system configuration is in flux. Instead, we just return what 501 // we've got. Note that in the worst case find_processors_online() could 502 // return an empty set. (As a fall-back in the case of the empty set we 503 // could just return the ID of the current processor). 504 *id_length = found ; 505 } 506 507 return true; 508 } 509 510 static bool assign_distribution(processorid_t* id_array, 511 uint id_length, 512 uint* distribution, 513 uint distribution_length) { 514 // We assume we can assign processorid_t's to uint's. 515 assert(sizeof(processorid_t) == sizeof(uint), 516 "can't convert processorid_t to uint"); 517 // Quick check to see if we won't succeed. 518 if (id_length < distribution_length) { 519 return false; 520 } 521 // Assign processor ids to the distribution. 522 // Try to shuffle processors to distribute work across boards, 523 // assuming 4 processors per board. 524 const uint processors_per_board = ProcessDistributionStride; 525 // Find the maximum processor id. 526 processorid_t max_id = 0; 527 for (uint m = 0; m < id_length; m += 1) { 528 max_id = MAX2(max_id, id_array[m]); 529 } 530 // The next id, to limit loops. 531 const processorid_t limit_id = max_id + 1; 532 // Make up markers for available processors. 533 bool* available_id = NEW_C_HEAP_ARRAY(bool, limit_id, mtInternal); 534 for (uint c = 0; c < limit_id; c += 1) { 535 available_id[c] = false; 536 } 537 for (uint a = 0; a < id_length; a += 1) { 538 available_id[id_array[a]] = true; 539 } 540 // Step by "boards", then by "slot", copying to "assigned". 541 // NEEDS_CLEANUP: The assignment of processors should be stateful, 542 // remembering which processors have been assigned by 543 // previous calls, etc., so as to distribute several 544 // independent calls of this method. What we'd like is 545 // It would be nice to have an API that let us ask 546 // how many processes are bound to a processor, 547 // but we don't have that, either. 548 // In the short term, "board" is static so that 549 // subsequent distributions don't all start at board 0. 550 static uint board = 0; 551 uint assigned = 0; 552 // Until we've found enough processors .... 553 while (assigned < distribution_length) { 554 // ... find the next available processor in the board. 555 for (uint slot = 0; slot < processors_per_board; slot += 1) { 556 uint try_id = board * processors_per_board + slot; 557 if ((try_id < limit_id) && (available_id[try_id] == true)) { 558 distribution[assigned] = try_id; 559 available_id[try_id] = false; 560 assigned += 1; 561 break; 562 } 563 } 564 board += 1; 565 if (board * processors_per_board + 0 >= limit_id) { 566 board = 0; 567 } 568 } 569 if (available_id != NULL) { 570 FREE_C_HEAP_ARRAY(bool, available_id, mtInternal); 571 } 572 return true; 573 } 574 575 void os::set_native_thread_name(const char *name) { 576 // Not yet implemented. 577 return; 578 } 579 580 bool os::distribute_processes(uint length, uint* distribution) { 581 bool result = false; 582 // Find the processor id's of all the available CPUs. 583 processorid_t* id_array = NULL; 584 uint id_length = 0; 585 // There are some races between querying information and using it, 586 // since processor sets can change dynamically. 587 psetid_t pset = PS_NONE; 588 // Are we running in a processor set? 589 if ((pset_bind(PS_QUERY, P_PID, P_MYID, &pset) == 0) && pset != PS_NONE) { 590 result = find_processors_in_pset(pset, &id_array, &id_length); 591 } else { 592 result = find_processors_online(&id_array, &id_length); 593 } 594 if (result == true) { 595 if (id_length >= length) { 596 result = assign_distribution(id_array, id_length, distribution, length); 597 } else { 598 result = false; 599 } 600 } 601 if (id_array != NULL) { 602 FREE_C_HEAP_ARRAY(processorid_t, id_array, mtInternal); 603 } 604 return result; 605 } 606 607 bool os::bind_to_processor(uint processor_id) { 608 // We assume that a processorid_t can be stored in a uint. 609 assert(sizeof(uint) == sizeof(processorid_t), 610 "can't convert uint to processorid_t"); 611 int bind_result = 612 processor_bind(P_LWPID, // bind LWP. 613 P_MYID, // bind current LWP. 614 (processorid_t) processor_id, // id. 615 NULL); // don't return old binding. 616 return (bind_result == 0); 617 } 618 619 bool os::getenv(const char* name, char* buffer, int len) { 620 char* val = ::getenv( name ); 621 if ( val == NULL 622 || strlen(val) + 1 > len ) { 623 if (len > 0) buffer[0] = 0; // return a null string 624 return false; 625 } 626 strcpy( buffer, val ); 627 return true; 628 } 629 630 631 // Return true if user is running as root. 632 633 bool os::have_special_privileges() { 634 static bool init = false; 635 static bool privileges = false; 636 if (!init) { 637 privileges = (getuid() != geteuid()) || (getgid() != getegid()); 638 init = true; 639 } 640 return privileges; 641 } 642 643 644 void os::init_system_properties_values() { 645 char arch[12]; 646 sysinfo(SI_ARCHITECTURE, arch, sizeof(arch)); 647 648 // The next steps are taken in the product version: 649 // 650 // Obtain the JAVA_HOME value from the location of libjvm.so. 651 // This library should be located at: 652 // <JAVA_HOME>/jre/lib/<arch>/{client|server}/libjvm.so. 653 // 654 // If "/jre/lib/" appears at the right place in the path, then we 655 // assume libjvm.so is installed in a JDK and we use this path. 656 // 657 // Otherwise exit with message: "Could not create the Java virtual machine." 658 // 659 // The following extra steps are taken in the debugging version: 660 // 661 // If "/jre/lib/" does NOT appear at the right place in the path 662 // instead of exit check for $JAVA_HOME environment variable. 663 // 664 // If it is defined and we are able to locate $JAVA_HOME/jre/lib/<arch>, 665 // then we append a fake suffix "hotspot/libjvm.so" to this path so 666 // it looks like libjvm.so is installed there 667 // <JAVA_HOME>/jre/lib/<arch>/hotspot/libjvm.so. 668 // 669 // Otherwise exit. 670 // 671 // Important note: if the location of libjvm.so changes this 672 // code needs to be changed accordingly. 673 674 #define EXTENSIONS_DIR "/lib/ext" 675 #define ENDORSED_DIR "/lib/endorsed" 676 #define COMMON_DIR "/usr/jdk/packages" 677 678 /* sysclasspath, java_home, dll_dir */ 679 { 680 char *pslash; 681 char buf[MAXPATHLEN]; 682 os::jvm_path(buf, sizeof(buf)); 683 684 // Found the full path to libjvm.so. 685 // Now cut the path to <java_home>/jre if we can. 686 *(strrchr(buf, '/')) = '\0'; /* get rid of /libjvm.so */ 687 pslash = strrchr(buf, '/'); 688 if (pslash != NULL) { 689 *pslash = '\0'; /* get rid of /{client|server|hotspot} */ 690 } 691 Arguments::set_dll_dir(buf); 692 693 if (pslash != NULL) { 694 pslash = strrchr(buf, '/'); 695 if (pslash != NULL) { 696 *pslash = '\0'; /* get rid of /<arch> */ 697 pslash = strrchr(buf, '/'); 698 if (pslash != NULL) { 699 *pslash = '\0'; /* get rid of /lib */ 700 } 701 } 702 } 703 Arguments::set_java_home(buf); 704 705 if (!set_boot_path('/', ':')) { 706 return; 707 } 708 } 709 710 /* 711 * Where to look for native libraries 712 */ 713 { 714 // Use dlinfo() to determine the correct java.library.path. 715 // 716 // If we're launched by the Java launcher, and the user 717 // does not set java.library.path explicitly on the commandline, 718 // the Java launcher sets LD_LIBRARY_PATH for us and unsets 719 // LD_LIBRARY_PATH_32 and LD_LIBRARY_PATH_64. In this case 720 // dlinfo returns LD_LIBRARY_PATH + crle settings (including 721 // /usr/lib), which is exactly what we want. 722 // 723 // If the user does set java.library.path, it completely 724 // overwrites this setting, and always has. 725 // 726 // If we're not launched by the Java launcher, we may 727 // get here with any/all of the LD_LIBRARY_PATH[_32|64] 728 // settings. Again, dlinfo does exactly what we want. 729 730 Dl_serinfo info_sz, *info = &info_sz; 731 Dl_serpath *path; 732 char* library_path; 733 char *common_path; 734 int i; 735 736 // determine search path count and required buffer size 737 if (dlinfo(RTLD_SELF, RTLD_DI_SERINFOSIZE, (void *)info) == -1) { 738 vm_exit_during_initialization("dlinfo SERINFOSIZE request", dlerror()); 739 } 740 741 // allocate new buffer and initialize 742 info = (Dl_serinfo*)NEW_C_HEAP_ARRAY(char, info_sz.dls_size, mtInternal); 743 info->dls_size = info_sz.dls_size; 744 info->dls_cnt = info_sz.dls_cnt; 745 746 // obtain search path information 747 if (dlinfo(RTLD_SELF, RTLD_DI_SERINFO, (void *)info) == -1) { 748 FREE_C_HEAP_ARRAY(char, info, mtInternal); 749 vm_exit_during_initialization("dlinfo SERINFO request", dlerror()); 750 } 751 752 path = &info->dls_serpath[0]; 753 754 // Note: Due to a legacy implementation, most of the library path 755 // is set in the launcher. This was to accomodate linking restrictions 756 // on legacy Solaris implementations (which are no longer supported). 757 // Eventually, all the library path setting will be done here. 758 // 759 // However, to prevent the proliferation of improperly built native 760 // libraries, the new path component /usr/jdk/packages is added here. 761 762 // Determine the actual CPU architecture. 763 char cpu_arch[12]; 764 sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch)); 765 #ifdef _LP64 766 // If we are a 64-bit vm, perform the following translations: 767 // sparc -> sparcv9 768 // i386 -> amd64 769 if (strcmp(cpu_arch, "sparc") == 0) { 770 strcat(cpu_arch, "v9"); 771 } else if (strcmp(cpu_arch, "i386") == 0) { 772 strcpy(cpu_arch, "amd64"); 773 } 774 #endif 775 776 // Construct the invariant part of ld_library_path. Note that the 777 // space for the colon and the trailing null are provided by the 778 // nulls included by the sizeof operator. 779 size_t bufsize = sizeof(COMMON_DIR) + sizeof("/lib/") + strlen(cpu_arch); 780 common_path = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal); 781 sprintf(common_path, COMMON_DIR "/lib/%s", cpu_arch); 782 783 // struct size is more than sufficient for the path components obtained 784 // through the dlinfo() call, so only add additional space for the path 785 // components explicitly added here. 786 bufsize = info->dls_size + strlen(common_path); 787 library_path = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal); 788 library_path[0] = '\0'; 789 790 // Construct the desired Java library path from the linker's library 791 // search path. 792 // 793 // For compatibility, it is optimal that we insert the additional path 794 // components specific to the Java VM after those components specified 795 // in LD_LIBRARY_PATH (if any) but before those added by the ld.so 796 // infrastructure. 797 if (info->dls_cnt == 0) { // Not sure this can happen, but allow for it 798 strcpy(library_path, common_path); 799 } else { 800 int inserted = 0; 801 for (i = 0; i < info->dls_cnt; i++, path++) { 802 uint_t flags = path->dls_flags & LA_SER_MASK; 803 if (((flags & LA_SER_LIBPATH) == 0) && !inserted) { 804 strcat(library_path, common_path); 805 strcat(library_path, os::path_separator()); 806 inserted = 1; 807 } 808 strcat(library_path, path->dls_name); 809 strcat(library_path, os::path_separator()); 810 } 811 // eliminate trailing path separator 812 library_path[strlen(library_path)-1] = '\0'; 813 } 814 815 // happens before argument parsing - can't use a trace flag 816 // tty->print_raw("init_system_properties_values: native lib path: "); 817 // tty->print_raw_cr(library_path); 818 819 // callee copies into its own buffer 820 Arguments::set_library_path(library_path); 821 822 FREE_C_HEAP_ARRAY(char, common_path, mtInternal); 823 FREE_C_HEAP_ARRAY(char, library_path, mtInternal); 824 FREE_C_HEAP_ARRAY(char, info, mtInternal); 825 } 826 827 /* 828 * Buffer that fits both sprintfs. 829 * Note that the space for the colon and the trailing null are provided 830 * by the nulls included by the sizeof operator (so actually one byte more 831 * than necessary is allocated). 832 */ 833 char buf[MAXPATHLEN + sizeof(EXTENSIONS_DIR) + sizeof(COMMON_DIR) + sizeof(EXTENSIONS_DIR) + sizeof(ENDORSED_DIR)]; 834 835 /* Extensions directories */ 836 sprintf(buf, "%s" EXTENSIONS_DIR ":" COMMON_DIR EXTENSIONS_DIR, Arguments::get_java_home()); 837 Arguments::set_ext_dirs(buf); 838 839 /* Endorsed standards default directory. */ 840 sprintf(buf, "%s" ENDORSED_DIR, Arguments::get_java_home()); 841 Arguments::set_endorsed_dirs(buf); 842 843 #undef EXTENSIONS_DIR 844 #undef ENDORSED_DIR 845 #undef COMMON_DIR 846 } 847 848 void os::breakpoint() { 849 BREAKPOINT; 850 } 851 852 bool os::obsolete_option(const JavaVMOption *option) 853 { 854 if (!strncmp(option->optionString, "-Xt", 3)) { 855 return true; 856 } else if (!strncmp(option->optionString, "-Xtm", 4)) { 857 return true; 858 } else if (!strncmp(option->optionString, "-Xverifyheap", 12)) { 859 return true; 860 } else if (!strncmp(option->optionString, "-Xmaxjitcodesize", 16)) { 861 return true; 862 } 863 return false; 864 } 865 866 bool os::Solaris::valid_stack_address(Thread* thread, address sp) { 867 address stackStart = (address)thread->stack_base(); 868 address stackEnd = (address)(stackStart - (address)thread->stack_size()); 869 if (sp < stackStart && sp >= stackEnd ) return true; 870 return false; 871 } 872 873 extern "C" void breakpoint() { 874 // use debugger to set breakpoint here 875 } 876 877 static thread_t main_thread; 878 879 // Thread start routine for all new Java threads 880 extern "C" void* java_start(void* thread_addr) { 881 // Try to randomize the cache line index of hot stack frames. 882 // This helps when threads of the same stack traces evict each other's 883 // cache lines. The threads can be either from the same JVM instance, or 884 // from different JVM instances. The benefit is especially true for 885 // processors with hyperthreading technology. 886 static int counter = 0; 887 int pid = os::current_process_id(); 888 alloca(((pid ^ counter++) & 7) * 128); 889 890 int prio; 891 Thread* thread = (Thread*)thread_addr; 892 OSThread* osthr = thread->osthread(); 893 894 osthr->set_lwp_id( _lwp_self() ); // Store lwp in case we are bound 895 thread->_schedctl = (void *) schedctl_init () ; 896 897 if (UseNUMA) { 898 int lgrp_id = os::numa_get_group_id(); 899 if (lgrp_id != -1) { 900 thread->set_lgrp_id(lgrp_id); 901 } 902 } 903 904 // If the creator called set priority before we started, 905 // we need to call set_native_priority now that we have an lwp. 906 // We used to get the priority from thr_getprio (we called 907 // thr_setprio way back in create_thread) and pass it to 908 // set_native_priority, but Solaris scales the priority 909 // in java_to_os_priority, so when we read it back here, 910 // we pass trash to set_native_priority instead of what's 911 // in java_to_os_priority. So we save the native priority 912 // in the osThread and recall it here. 913 914 if ( osthr->thread_id() != -1 ) { 915 if ( UseThreadPriorities ) { 916 int prio = osthr->native_priority(); 917 if (ThreadPriorityVerbose) { 918 tty->print_cr("Starting Thread " INTPTR_FORMAT ", LWP is " 919 INTPTR_FORMAT ", setting priority: %d\n", 920 osthr->thread_id(), osthr->lwp_id(), prio); 921 } 922 os::set_native_priority(thread, prio); 923 } 924 } else if (ThreadPriorityVerbose) { 925 warning("Can't set priority in _start routine, thread id hasn't been set\n"); 926 } 927 928 assert(osthr->get_state() == RUNNABLE, "invalid os thread state"); 929 930 // initialize signal mask for this thread 931 os::Solaris::hotspot_sigmask(thread); 932 933 thread->run(); 934 935 // One less thread is executing 936 // When the VMThread gets here, the main thread may have already exited 937 // which frees the CodeHeap containing the Atomic::dec code 938 if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) { 939 Atomic::dec(&os::Solaris::_os_thread_count); 940 } 941 942 if (UseDetachedThreads) { 943 thr_exit(NULL); 944 ShouldNotReachHere(); 945 } 946 return NULL; 947 } 948 949 static OSThread* create_os_thread(Thread* thread, thread_t thread_id) { 950 // Allocate the OSThread object 951 OSThread* osthread = new OSThread(NULL, NULL); 952 if (osthread == NULL) return NULL; 953 954 // Store info on the Solaris thread into the OSThread 955 osthread->set_thread_id(thread_id); 956 osthread->set_lwp_id(_lwp_self()); 957 thread->_schedctl = (void *) schedctl_init () ; 958 959 if (UseNUMA) { 960 int lgrp_id = os::numa_get_group_id(); 961 if (lgrp_id != -1) { 962 thread->set_lgrp_id(lgrp_id); 963 } 964 } 965 966 if ( ThreadPriorityVerbose ) { 967 tty->print_cr("In create_os_thread, Thread " INTPTR_FORMAT ", LWP is " INTPTR_FORMAT "\n", 968 osthread->thread_id(), osthread->lwp_id() ); 969 } 970 971 // Initial thread state is INITIALIZED, not SUSPENDED 972 osthread->set_state(INITIALIZED); 973 974 return osthread; 975 } 976 977 void os::Solaris::hotspot_sigmask(Thread* thread) { 978 979 //Save caller's signal mask 980 sigset_t sigmask; 981 thr_sigsetmask(SIG_SETMASK, NULL, &sigmask); 982 OSThread *osthread = thread->osthread(); 983 osthread->set_caller_sigmask(sigmask); 984 985 thr_sigsetmask(SIG_UNBLOCK, os::Solaris::unblocked_signals(), NULL); 986 if (!ReduceSignalUsage) { 987 if (thread->is_VM_thread()) { 988 // Only the VM thread handles BREAK_SIGNAL ... 989 thr_sigsetmask(SIG_UNBLOCK, vm_signals(), NULL); 990 } else { 991 // ... all other threads block BREAK_SIGNAL 992 assert(!sigismember(vm_signals(), SIGINT), "SIGINT should not be blocked"); 993 thr_sigsetmask(SIG_BLOCK, vm_signals(), NULL); 994 } 995 } 996 } 997 998 bool os::create_attached_thread(JavaThread* thread) { 999 #ifdef ASSERT 1000 thread->verify_not_published(); 1001 #endif 1002 OSThread* osthread = create_os_thread(thread, thr_self()); 1003 if (osthread == NULL) { 1004 return false; 1005 } 1006 1007 // Initial thread state is RUNNABLE 1008 osthread->set_state(RUNNABLE); 1009 thread->set_osthread(osthread); 1010 1011 // initialize signal mask for this thread 1012 // and save the caller's signal mask 1013 os::Solaris::hotspot_sigmask(thread); 1014 1015 return true; 1016 } 1017 1018 bool os::create_main_thread(JavaThread* thread) { 1019 #ifdef ASSERT 1020 thread->verify_not_published(); 1021 #endif 1022 if (_starting_thread == NULL) { 1023 _starting_thread = create_os_thread(thread, main_thread); 1024 if (_starting_thread == NULL) { 1025 return false; 1026 } 1027 } 1028 1029 // The primodial thread is runnable from the start 1030 _starting_thread->set_state(RUNNABLE); 1031 1032 thread->set_osthread(_starting_thread); 1033 1034 // initialize signal mask for this thread 1035 // and save the caller's signal mask 1036 os::Solaris::hotspot_sigmask(thread); 1037 1038 return true; 1039 } 1040 1041 // _T2_libthread is true if we believe we are running with the newer 1042 // SunSoft lwp/libthread.so (2.8 patch, 2.9 default) 1043 bool os::Solaris::_T2_libthread = false; 1044 1045 bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) { 1046 // Allocate the OSThread object 1047 OSThread* osthread = new OSThread(NULL, NULL); 1048 if (osthread == NULL) { 1049 return false; 1050 } 1051 1052 if ( ThreadPriorityVerbose ) { 1053 char *thrtyp; 1054 switch ( thr_type ) { 1055 case vm_thread: 1056 thrtyp = (char *)"vm"; 1057 break; 1058 case cgc_thread: 1059 thrtyp = (char *)"cgc"; 1060 break; 1061 case pgc_thread: 1062 thrtyp = (char *)"pgc"; 1063 break; 1064 case java_thread: 1065 thrtyp = (char *)"java"; 1066 break; 1067 case compiler_thread: 1068 thrtyp = (char *)"compiler"; 1069 break; 1070 case watcher_thread: 1071 thrtyp = (char *)"watcher"; 1072 break; 1073 default: 1074 thrtyp = (char *)"unknown"; 1075 break; 1076 } 1077 tty->print_cr("In create_thread, creating a %s thread\n", thrtyp); 1078 } 1079 1080 // Calculate stack size if it's not specified by caller. 1081 if (stack_size == 0) { 1082 // The default stack size 1M (2M for LP64). 1083 stack_size = (BytesPerWord >> 2) * K * K; 1084 1085 switch (thr_type) { 1086 case os::java_thread: 1087 // Java threads use ThreadStackSize which default value can be changed with the flag -Xss 1088 if (JavaThread::stack_size_at_create() > 0) stack_size = JavaThread::stack_size_at_create(); 1089 break; 1090 case os::compiler_thread: 1091 if (CompilerThreadStackSize > 0) { 1092 stack_size = (size_t)(CompilerThreadStackSize * K); 1093 break; 1094 } // else fall through: 1095 // use VMThreadStackSize if CompilerThreadStackSize is not defined 1096 case os::vm_thread: 1097 case os::pgc_thread: 1098 case os::cgc_thread: 1099 case os::watcher_thread: 1100 if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K); 1101 break; 1102 } 1103 } 1104 stack_size = MAX2(stack_size, os::Solaris::min_stack_allowed); 1105 1106 // Initial state is ALLOCATED but not INITIALIZED 1107 osthread->set_state(ALLOCATED); 1108 1109 if (os::Solaris::_os_thread_count > os::Solaris::_os_thread_limit) { 1110 // We got lots of threads. Check if we still have some address space left. 1111 // Need to be at least 5Mb of unreserved address space. We do check by 1112 // trying to reserve some. 1113 const size_t VirtualMemoryBangSize = 20*K*K; 1114 char* mem = os::reserve_memory(VirtualMemoryBangSize); 1115 if (mem == NULL) { 1116 delete osthread; 1117 return false; 1118 } else { 1119 // Release the memory again 1120 os::release_memory(mem, VirtualMemoryBangSize); 1121 } 1122 } 1123 1124 // Setup osthread because the child thread may need it. 1125 thread->set_osthread(osthread); 1126 1127 // Create the Solaris thread 1128 // explicit THR_BOUND for T2_libthread case in case 1129 // that assumption is not accurate, but our alternate signal stack 1130 // handling is based on it which must have bound threads 1131 thread_t tid = 0; 1132 long flags = (UseDetachedThreads ? THR_DETACHED : 0) | THR_SUSPENDED 1133 | ((UseBoundThreads || os::Solaris::T2_libthread() || 1134 (thr_type == vm_thread) || 1135 (thr_type == cgc_thread) || 1136 (thr_type == pgc_thread) || 1137 (thr_type == compiler_thread && BackgroundCompilation)) ? 1138 THR_BOUND : 0); 1139 int status; 1140 1141 // 4376845 -- libthread/kernel don't provide enough LWPs to utilize all CPUs. 1142 // 1143 // On multiprocessors systems, libthread sometimes under-provisions our 1144 // process with LWPs. On a 30-way systems, for instance, we could have 1145 // 50 user-level threads in ready state and only 2 or 3 LWPs assigned 1146 // to our process. This can result in under utilization of PEs. 1147 // I suspect the problem is related to libthread's LWP 1148 // pool management and to the kernel's SIGBLOCKING "last LWP parked" 1149 // upcall policy. 1150 // 1151 // The following code is palliative -- it attempts to ensure that our 1152 // process has sufficient LWPs to take advantage of multiple PEs. 1153 // Proper long-term cures include using user-level threads bound to LWPs 1154 // (THR_BOUND) or using LWP-based synchronization. Note that there is a 1155 // slight timing window with respect to sampling _os_thread_count, but 1156 // the race is benign. Also, we should periodically recompute 1157 // _processors_online as the min of SC_NPROCESSORS_ONLN and the 1158 // the number of PEs in our partition. You might be tempted to use 1159 // THR_NEW_LWP here, but I'd recommend against it as that could 1160 // result in undesirable growth of the libthread's LWP pool. 1161 // The fix below isn't sufficient; for instance, it doesn't take into count 1162 // LWPs parked on IO. It does, however, help certain CPU-bound benchmarks. 1163 // 1164 // Some pathologies this scheme doesn't handle: 1165 // * Threads can block, releasing the LWPs. The LWPs can age out. 1166 // When a large number of threads become ready again there aren't 1167 // enough LWPs available to service them. This can occur when the 1168 // number of ready threads oscillates. 1169 // * LWPs/Threads park on IO, thus taking the LWP out of circulation. 1170 // 1171 // Finally, we should call thr_setconcurrency() periodically to refresh 1172 // the LWP pool and thwart the LWP age-out mechanism. 1173 // The "+3" term provides a little slop -- we want to slightly overprovision. 1174 1175 if (AdjustConcurrency && os::Solaris::_os_thread_count < (_processors_online+3)) { 1176 if (!(flags & THR_BOUND)) { 1177 thr_setconcurrency (os::Solaris::_os_thread_count); // avoid starvation 1178 } 1179 } 1180 // Although this doesn't hurt, we should warn of undefined behavior 1181 // when using unbound T1 threads with schedctl(). This should never 1182 // happen, as the compiler and VM threads are always created bound 1183 DEBUG_ONLY( 1184 if ((VMThreadHintNoPreempt || CompilerThreadHintNoPreempt) && 1185 (!os::Solaris::T2_libthread() && (!(flags & THR_BOUND))) && 1186 ((thr_type == vm_thread) || (thr_type == cgc_thread) || 1187 (thr_type == pgc_thread) || (thr_type == compiler_thread && BackgroundCompilation))) { 1188 warning("schedctl behavior undefined when Compiler/VM/GC Threads are Unbound"); 1189 } 1190 ); 1191 1192 1193 // Mark that we don't have an lwp or thread id yet. 1194 // In case we attempt to set the priority before the thread starts. 1195 osthread->set_lwp_id(-1); 1196 osthread->set_thread_id(-1); 1197 1198 status = thr_create(NULL, stack_size, java_start, thread, flags, &tid); 1199 if (status != 0) { 1200 if (PrintMiscellaneous && (Verbose || WizardMode)) { 1201 perror("os::create_thread"); 1202 } 1203 thread->set_osthread(NULL); 1204 // Need to clean up stuff we've allocated so far 1205 delete osthread; 1206 return false; 1207 } 1208 1209 Atomic::inc(&os::Solaris::_os_thread_count); 1210 1211 // Store info on the Solaris thread into the OSThread 1212 osthread->set_thread_id(tid); 1213 1214 // Remember that we created this thread so we can set priority on it 1215 osthread->set_vm_created(); 1216 1217 // Set the default thread priority. If using bound threads, setting 1218 // lwp priority will be delayed until thread start. 1219 set_native_priority(thread, 1220 DefaultThreadPriority == -1 ? 1221 java_to_os_priority[NormPriority] : 1222 DefaultThreadPriority); 1223 1224 // Initial thread state is INITIALIZED, not SUSPENDED 1225 osthread->set_state(INITIALIZED); 1226 1227 // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain 1228 return true; 1229 } 1230 1231 /* defined for >= Solaris 10. This allows builds on earlier versions 1232 * of Solaris to take advantage of the newly reserved Solaris JVM signals 1233 * With SIGJVM1, SIGJVM2, INTERRUPT_SIGNAL is SIGJVM1, ASYNC_SIGNAL is SIGJVM2 1234 * and -XX:+UseAltSigs does nothing since these should have no conflict 1235 */ 1236 #if !defined(SIGJVM1) 1237 #define SIGJVM1 39 1238 #define SIGJVM2 40 1239 #endif 1240 1241 debug_only(static bool signal_sets_initialized = false); 1242 static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs; 1243 int os::Solaris::_SIGinterrupt = INTERRUPT_SIGNAL; 1244 int os::Solaris::_SIGasync = ASYNC_SIGNAL; 1245 1246 bool os::Solaris::is_sig_ignored(int sig) { 1247 struct sigaction oact; 1248 sigaction(sig, (struct sigaction*)NULL, &oact); 1249 void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*, oact.sa_sigaction) 1250 : CAST_FROM_FN_PTR(void*, oact.sa_handler); 1251 if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN)) 1252 return true; 1253 else 1254 return false; 1255 } 1256 1257 // Note: SIGRTMIN is a macro that calls sysconf() so it will 1258 // dynamically detect SIGRTMIN value for the system at runtime, not buildtime 1259 static bool isJVM1available() { 1260 return SIGJVM1 < SIGRTMIN; 1261 } 1262 1263 void os::Solaris::signal_sets_init() { 1264 // Should also have an assertion stating we are still single-threaded. 1265 assert(!signal_sets_initialized, "Already initialized"); 1266 // Fill in signals that are necessarily unblocked for all threads in 1267 // the VM. Currently, we unblock the following signals: 1268 // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden 1269 // by -Xrs (=ReduceSignalUsage)); 1270 // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all 1271 // other threads. The "ReduceSignalUsage" boolean tells us not to alter 1272 // the dispositions or masks wrt these signals. 1273 // Programs embedding the VM that want to use the above signals for their 1274 // own purposes must, at this time, use the "-Xrs" option to prevent 1275 // interference with shutdown hooks and BREAK_SIGNAL thread dumping. 1276 // (See bug 4345157, and other related bugs). 1277 // In reality, though, unblocking these signals is really a nop, since 1278 // these signals are not blocked by default. 1279 sigemptyset(&unblocked_sigs); 1280 sigemptyset(&allowdebug_blocked_sigs); 1281 sigaddset(&unblocked_sigs, SIGILL); 1282 sigaddset(&unblocked_sigs, SIGSEGV); 1283 sigaddset(&unblocked_sigs, SIGBUS); 1284 sigaddset(&unblocked_sigs, SIGFPE); 1285 1286 if (isJVM1available) { 1287 os::Solaris::set_SIGinterrupt(SIGJVM1); 1288 os::Solaris::set_SIGasync(SIGJVM2); 1289 } else if (UseAltSigs) { 1290 os::Solaris::set_SIGinterrupt(ALT_INTERRUPT_SIGNAL); 1291 os::Solaris::set_SIGasync(ALT_ASYNC_SIGNAL); 1292 } else { 1293 os::Solaris::set_SIGinterrupt(INTERRUPT_SIGNAL); 1294 os::Solaris::set_SIGasync(ASYNC_SIGNAL); 1295 } 1296 1297 sigaddset(&unblocked_sigs, os::Solaris::SIGinterrupt()); 1298 sigaddset(&unblocked_sigs, os::Solaris::SIGasync()); 1299 1300 if (!ReduceSignalUsage) { 1301 if (!os::Solaris::is_sig_ignored(SHUTDOWN1_SIGNAL)) { 1302 sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL); 1303 sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL); 1304 } 1305 if (!os::Solaris::is_sig_ignored(SHUTDOWN2_SIGNAL)) { 1306 sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL); 1307 sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL); 1308 } 1309 if (!os::Solaris::is_sig_ignored(SHUTDOWN3_SIGNAL)) { 1310 sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL); 1311 sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL); 1312 } 1313 } 1314 // Fill in signals that are blocked by all but the VM thread. 1315 sigemptyset(&vm_sigs); 1316 if (!ReduceSignalUsage) 1317 sigaddset(&vm_sigs, BREAK_SIGNAL); 1318 debug_only(signal_sets_initialized = true); 1319 1320 // For diagnostics only used in run_periodic_checks 1321 sigemptyset(&check_signal_done); 1322 } 1323 1324 // These are signals that are unblocked while a thread is running Java. 1325 // (For some reason, they get blocked by default.) 1326 sigset_t* os::Solaris::unblocked_signals() { 1327 assert(signal_sets_initialized, "Not initialized"); 1328 return &unblocked_sigs; 1329 } 1330 1331 // These are the signals that are blocked while a (non-VM) thread is 1332 // running Java. Only the VM thread handles these signals. 1333 sigset_t* os::Solaris::vm_signals() { 1334 assert(signal_sets_initialized, "Not initialized"); 1335 return &vm_sigs; 1336 } 1337 1338 // These are signals that are blocked during cond_wait to allow debugger in 1339 sigset_t* os::Solaris::allowdebug_blocked_signals() { 1340 assert(signal_sets_initialized, "Not initialized"); 1341 return &allowdebug_blocked_sigs; 1342 } 1343 1344 1345 void _handle_uncaught_cxx_exception() { 1346 VMError err("An uncaught C++ exception"); 1347 err.report_and_die(); 1348 } 1349 1350 1351 // First crack at OS-specific initialization, from inside the new thread. 1352 void os::initialize_thread(Thread* thr) { 1353 int r = thr_main() ; 1354 guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ; 1355 if (r) { 1356 JavaThread* jt = (JavaThread *)thr; 1357 assert(jt != NULL,"Sanity check"); 1358 size_t stack_size; 1359 address base = jt->stack_base(); 1360 if (Arguments::created_by_java_launcher()) { 1361 // Use 2MB to allow for Solaris 7 64 bit mode. 1362 stack_size = JavaThread::stack_size_at_create() == 0 1363 ? 2048*K : JavaThread::stack_size_at_create(); 1364 1365 // There are rare cases when we may have already used more than 1366 // the basic stack size allotment before this method is invoked. 1367 // Attempt to allow for a normally sized java_stack. 1368 size_t current_stack_offset = (size_t)(base - (address)&stack_size); 1369 stack_size += ReservedSpace::page_align_size_down(current_stack_offset); 1370 } else { 1371 // 6269555: If we were not created by a Java launcher, i.e. if we are 1372 // running embedded in a native application, treat the primordial thread 1373 // as much like a native attached thread as possible. This means using 1374 // the current stack size from thr_stksegment(), unless it is too large 1375 // to reliably setup guard pages. A reasonable max size is 8MB. 1376 size_t current_size = current_stack_size(); 1377 // This should never happen, but just in case.... 1378 if (current_size == 0) current_size = 2 * K * K; 1379 stack_size = current_size > (8 * K * K) ? (8 * K * K) : current_size; 1380 } 1381 address bottom = (address)align_size_up((intptr_t)(base - stack_size), os::vm_page_size());; 1382 stack_size = (size_t)(base - bottom); 1383 1384 assert(stack_size > 0, "Stack size calculation problem"); 1385 1386 if (stack_size > jt->stack_size()) { 1387 NOT_PRODUCT( 1388 struct rlimit limits; 1389 getrlimit(RLIMIT_STACK, &limits); 1390 size_t size = adjust_stack_size(base, (size_t)limits.rlim_cur); 1391 assert(size >= jt->stack_size(), "Stack size problem in main thread"); 1392 ) 1393 tty->print_cr( 1394 "Stack size of %d Kb exceeds current limit of %d Kb.\n" 1395 "(Stack sizes are rounded up to a multiple of the system page size.)\n" 1396 "See limit(1) to increase the stack size limit.", 1397 stack_size / K, jt->stack_size() / K); 1398 vm_exit(1); 1399 } 1400 assert(jt->stack_size() >= stack_size, 1401 "Attempt to map more stack than was allocated"); 1402 jt->set_stack_size(stack_size); 1403 } 1404 1405 // 5/22/01: Right now alternate signal stacks do not handle 1406 // throwing stack overflow exceptions, see bug 4463178 1407 // Until a fix is found for this, T2 will NOT imply alternate signal 1408 // stacks. 1409 // If using T2 libthread threads, install an alternate signal stack. 1410 // Because alternate stacks associate with LWPs on Solaris, 1411 // see sigaltstack(2), if using UNBOUND threads, or if UseBoundThreads 1412 // we prefer to explicitly stack bang. 1413 // If not using T2 libthread, but using UseBoundThreads any threads 1414 // (primordial thread, jni_attachCurrentThread) we do not create, 1415 // probably are not bound, therefore they can not have an alternate 1416 // signal stack. Since our stack banging code is generated and 1417 // is shared across threads, all threads must be bound to allow 1418 // using alternate signal stacks. The alternative is to interpose 1419 // on _lwp_create to associate an alt sig stack with each LWP, 1420 // and this could be a problem when the JVM is embedded. 1421 // We would prefer to use alternate signal stacks with T2 1422 // Since there is currently no accurate way to detect T2 1423 // we do not. Assuming T2 when running T1 causes sig 11s or assertions 1424 // on installing alternate signal stacks 1425 1426 1427 // 05/09/03: removed alternate signal stack support for Solaris 1428 // The alternate signal stack mechanism is no longer needed to 1429 // handle stack overflow. This is now handled by allocating 1430 // guard pages (red zone) and stackbanging. 1431 // Initially the alternate signal stack mechanism was removed because 1432 // it did not work with T1 llibthread. Alternate 1433 // signal stacks MUST have all threads bound to lwps. Applications 1434 // can create their own threads and attach them without their being 1435 // bound under T1. This is frequently the case for the primordial thread. 1436 // If we were ever to reenable this mechanism we would need to 1437 // use the dynamic check for T2 libthread. 1438 1439 os::Solaris::init_thread_fpu_state(); 1440 std::set_terminate(_handle_uncaught_cxx_exception); 1441 } 1442 1443 1444 1445 // Free Solaris resources related to the OSThread 1446 void os::free_thread(OSThread* osthread) { 1447 assert(osthread != NULL, "os::free_thread but osthread not set"); 1448 1449 1450 // We are told to free resources of the argument thread, 1451 // but we can only really operate on the current thread. 1452 // The main thread must take the VMThread down synchronously 1453 // before the main thread exits and frees up CodeHeap 1454 guarantee((Thread::current()->osthread() == osthread 1455 || (osthread == VMThread::vm_thread()->osthread())), "os::free_thread but not current thread"); 1456 if (Thread::current()->osthread() == osthread) { 1457 // Restore caller's signal mask 1458 sigset_t sigmask = osthread->caller_sigmask(); 1459 thr_sigsetmask(SIG_SETMASK, &sigmask, NULL); 1460 } 1461 delete osthread; 1462 } 1463 1464 void os::pd_start_thread(Thread* thread) { 1465 int status = thr_continue(thread->osthread()->thread_id()); 1466 assert_status(status == 0, status, "thr_continue failed"); 1467 } 1468 1469 1470 intx os::current_thread_id() { 1471 return (intx)thr_self(); 1472 } 1473 1474 static pid_t _initial_pid = 0; 1475 1476 int os::current_process_id() { 1477 return (int)(_initial_pid ? _initial_pid : getpid()); 1478 } 1479 1480 int os::allocate_thread_local_storage() { 1481 // %%% in Win32 this allocates a memory segment pointed to by a 1482 // register. Dan Stein can implement a similar feature in 1483 // Solaris. Alternatively, the VM can do the same thing 1484 // explicitly: malloc some storage and keep the pointer in a 1485 // register (which is part of the thread's context) (or keep it 1486 // in TLS). 1487 // %%% In current versions of Solaris, thr_self and TSD can 1488 // be accessed via short sequences of displaced indirections. 1489 // The value of thr_self is available as %g7(36). 1490 // The value of thr_getspecific(k) is stored in %g7(12)(4)(k*4-4), 1491 // assuming that the current thread already has a value bound to k. 1492 // It may be worth experimenting with such access patterns, 1493 // and later having the parameters formally exported from a Solaris 1494 // interface. I think, however, that it will be faster to 1495 // maintain the invariant that %g2 always contains the 1496 // JavaThread in Java code, and have stubs simply 1497 // treat %g2 as a caller-save register, preserving it in a %lN. 1498 thread_key_t tk; 1499 if (thr_keycreate( &tk, NULL ) ) 1500 fatal(err_msg("os::allocate_thread_local_storage: thr_keycreate failed " 1501 "(%s)", strerror(errno))); 1502 return int(tk); 1503 } 1504 1505 void os::free_thread_local_storage(int index) { 1506 // %%% don't think we need anything here 1507 // if ( pthread_key_delete((pthread_key_t) tk) ) 1508 // fatal("os::free_thread_local_storage: pthread_key_delete failed"); 1509 } 1510 1511 #define SMALLINT 32 // libthread allocate for tsd_common is a version specific 1512 // small number - point is NO swap space available 1513 void os::thread_local_storage_at_put(int index, void* value) { 1514 // %%% this is used only in threadLocalStorage.cpp 1515 if (thr_setspecific((thread_key_t)index, value)) { 1516 if (errno == ENOMEM) { 1517 vm_exit_out_of_memory(SMALLINT, OOM_MALLOC_ERROR, 1518 "thr_setspecific: out of swap space"); 1519 } else { 1520 fatal(err_msg("os::thread_local_storage_at_put: thr_setspecific failed " 1521 "(%s)", strerror(errno))); 1522 } 1523 } else { 1524 ThreadLocalStorage::set_thread_in_slot ((Thread *) value) ; 1525 } 1526 } 1527 1528 // This function could be called before TLS is initialized, for example, when 1529 // VM receives an async signal or when VM causes a fatal error during 1530 // initialization. Return NULL if thr_getspecific() fails. 1531 void* os::thread_local_storage_at(int index) { 1532 // %%% this is used only in threadLocalStorage.cpp 1533 void* r = NULL; 1534 return thr_getspecific((thread_key_t)index, &r) != 0 ? NULL : r; 1535 } 1536 1537 1538 // gethrtime can move backwards if read from one cpu and then a different cpu 1539 // getTimeNanos is guaranteed to not move backward on Solaris 1540 // local spinloop created as faster for a CAS on an int than 1541 // a CAS on a 64bit jlong. Also Atomic::cmpxchg for jlong is not 1542 // supported on sparc v8 or pre supports_cx8 intel boxes. 1543 // oldgetTimeNanos for systems which do not support CAS on 64bit jlong 1544 // i.e. sparc v8 and pre supports_cx8 (i486) intel boxes 1545 inline hrtime_t oldgetTimeNanos() { 1546 int gotlock = LOCK_INVALID; 1547 hrtime_t newtime = gethrtime(); 1548 1549 for (;;) { 1550 // grab lock for max_hrtime 1551 int curlock = max_hrtime_lock; 1552 if (curlock & LOCK_BUSY) continue; 1553 if (gotlock = Atomic::cmpxchg(LOCK_BUSY, &max_hrtime_lock, LOCK_FREE) != LOCK_FREE) continue; 1554 if (newtime > max_hrtime) { 1555 max_hrtime = newtime; 1556 } else { 1557 newtime = max_hrtime; 1558 } 1559 // release lock 1560 max_hrtime_lock = LOCK_FREE; 1561 return newtime; 1562 } 1563 } 1564 // gethrtime can move backwards if read from one cpu and then a different cpu 1565 // getTimeNanos is guaranteed to not move backward on Solaris 1566 inline hrtime_t getTimeNanos() { 1567 if (VM_Version::supports_cx8()) { 1568 const hrtime_t now = gethrtime(); 1569 // Use atomic long load since 32-bit x86 uses 2 registers to keep long. 1570 const hrtime_t prev = Atomic::load((volatile jlong*)&max_hrtime); 1571 if (now <= prev) return prev; // same or retrograde time; 1572 const hrtime_t obsv = Atomic::cmpxchg(now, (volatile jlong*)&max_hrtime, prev); 1573 assert(obsv >= prev, "invariant"); // Monotonicity 1574 // If the CAS succeeded then we're done and return "now". 1575 // If the CAS failed and the observed value "obs" is >= now then 1576 // we should return "obs". If the CAS failed and now > obs > prv then 1577 // some other thread raced this thread and installed a new value, in which case 1578 // we could either (a) retry the entire operation, (b) retry trying to install now 1579 // or (c) just return obs. We use (c). No loop is required although in some cases 1580 // we might discard a higher "now" value in deference to a slightly lower but freshly 1581 // installed obs value. That's entirely benign -- it admits no new orderings compared 1582 // to (a) or (b) -- and greatly reduces coherence traffic. 1583 // We might also condition (c) on the magnitude of the delta between obs and now. 1584 // Avoiding excessive CAS operations to hot RW locations is critical. 1585 // See http://blogs.sun.com/dave/entry/cas_and_cache_trivia_invalidate 1586 return (prev == obsv) ? now : obsv ; 1587 } else { 1588 return oldgetTimeNanos(); 1589 } 1590 } 1591 1592 // Time since start-up in seconds to a fine granularity. 1593 // Used by VMSelfDestructTimer and the MemProfiler. 1594 double os::elapsedTime() { 1595 return (double)(getTimeNanos() - first_hrtime) / (double)hrtime_hz; 1596 } 1597 1598 jlong os::elapsed_counter() { 1599 return (jlong)(getTimeNanos() - first_hrtime); 1600 } 1601 1602 jlong os::elapsed_frequency() { 1603 return hrtime_hz; 1604 } 1605 1606 // Return the real, user, and system times in seconds from an 1607 // arbitrary fixed point in the past. 1608 bool os::getTimesSecs(double* process_real_time, 1609 double* process_user_time, 1610 double* process_system_time) { 1611 struct tms ticks; 1612 clock_t real_ticks = times(&ticks); 1613 1614 if (real_ticks == (clock_t) (-1)) { 1615 return false; 1616 } else { 1617 double ticks_per_second = (double) clock_tics_per_sec; 1618 *process_user_time = ((double) ticks.tms_utime) / ticks_per_second; 1619 *process_system_time = ((double) ticks.tms_stime) / ticks_per_second; 1620 // For consistency return the real time from getTimeNanos() 1621 // converted to seconds. 1622 *process_real_time = ((double) getTimeNanos()) / ((double) NANOUNITS); 1623 1624 return true; 1625 } 1626 } 1627 1628 bool os::supports_vtime() { return true; } 1629 1630 bool os::enable_vtime() { 1631 int fd = ::open("/proc/self/ctl", O_WRONLY); 1632 if (fd == -1) 1633 return false; 1634 1635 long cmd[] = { PCSET, PR_MSACCT }; 1636 int res = ::write(fd, cmd, sizeof(long) * 2); 1637 ::close(fd); 1638 if (res != sizeof(long) * 2) 1639 return false; 1640 1641 return true; 1642 } 1643 1644 bool os::vtime_enabled() { 1645 int fd = ::open("/proc/self/status", O_RDONLY); 1646 if (fd == -1) 1647 return false; 1648 1649 pstatus_t status; 1650 int res = os::read(fd, (void*) &status, sizeof(pstatus_t)); 1651 ::close(fd); 1652 if (res != sizeof(pstatus_t)) 1653 return false; 1654 1655 return status.pr_flags & PR_MSACCT; 1656 } 1657 1658 double os::elapsedVTime() { 1659 return (double)gethrvtime() / (double)hrtime_hz; 1660 } 1661 1662 // Used internally for comparisons only 1663 // getTimeMillis guaranteed to not move backwards on Solaris 1664 jlong getTimeMillis() { 1665 jlong nanotime = getTimeNanos(); 1666 return (jlong)(nanotime / NANOSECS_PER_MILLISEC); 1667 } 1668 1669 // Must return millis since Jan 1 1970 for JVM_CurrentTimeMillis 1670 jlong os::javaTimeMillis() { 1671 timeval t; 1672 if (gettimeofday( &t, NULL) == -1) 1673 fatal(err_msg("os::javaTimeMillis: gettimeofday (%s)", strerror(errno))); 1674 return jlong(t.tv_sec) * 1000 + jlong(t.tv_usec) / 1000; 1675 } 1676 1677 jlong os::javaTimeNanos() { 1678 return (jlong)getTimeNanos(); 1679 } 1680 1681 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) { 1682 info_ptr->max_value = ALL_64_BITS; // gethrtime() uses all 64 bits 1683 info_ptr->may_skip_backward = false; // not subject to resetting or drifting 1684 info_ptr->may_skip_forward = false; // not subject to resetting or drifting 1685 info_ptr->kind = JVMTI_TIMER_ELAPSED; // elapsed not CPU time 1686 } 1687 1688 char * os::local_time_string(char *buf, size_t buflen) { 1689 struct tm t; 1690 time_t long_time; 1691 time(&long_time); 1692 localtime_r(&long_time, &t); 1693 jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d", 1694 t.tm_year + 1900, t.tm_mon + 1, t.tm_mday, 1695 t.tm_hour, t.tm_min, t.tm_sec); 1696 return buf; 1697 } 1698 1699 // Note: os::shutdown() might be called very early during initialization, or 1700 // called from signal handler. Before adding something to os::shutdown(), make 1701 // sure it is async-safe and can handle partially initialized VM. 1702 void os::shutdown() { 1703 1704 // allow PerfMemory to attempt cleanup of any persistent resources 1705 perfMemory_exit(); 1706 1707 // needs to remove object in file system 1708 AttachListener::abort(); 1709 1710 // flush buffered output, finish log files 1711 ostream_abort(); 1712 1713 // Check for abort hook 1714 abort_hook_t abort_hook = Arguments::abort_hook(); 1715 if (abort_hook != NULL) { 1716 abort_hook(); 1717 } 1718 } 1719 1720 // Note: os::abort() might be called very early during initialization, or 1721 // called from signal handler. Before adding something to os::abort(), make 1722 // sure it is async-safe and can handle partially initialized VM. 1723 void os::abort(bool dump_core) { 1724 os::shutdown(); 1725 if (dump_core) { 1726 #ifndef PRODUCT 1727 fdStream out(defaultStream::output_fd()); 1728 out.print_raw("Current thread is "); 1729 char buf[16]; 1730 jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id()); 1731 out.print_raw_cr(buf); 1732 out.print_raw_cr("Dumping core ..."); 1733 #endif 1734 ::abort(); // dump core (for debugging) 1735 } 1736 1737 ::exit(1); 1738 } 1739 1740 // Die immediately, no exit hook, no abort hook, no cleanup. 1741 void os::die() { 1742 ::abort(); // dump core (for debugging) 1743 } 1744 1745 // unused 1746 void os::set_error_file(const char *logfile) {} 1747 1748 // DLL functions 1749 1750 const char* os::dll_file_extension() { return ".so"; } 1751 1752 // This must be hard coded because it's the system's temporary 1753 // directory not the java application's temp directory, ala java.io.tmpdir. 1754 const char* os::get_temp_directory() { return "/tmp"; } 1755 1756 static bool file_exists(const char* filename) { 1757 struct stat statbuf; 1758 if (filename == NULL || strlen(filename) == 0) { 1759 return false; 1760 } 1761 return os::stat(filename, &statbuf) == 0; 1762 } 1763 1764 bool os::dll_build_name(char* buffer, size_t buflen, 1765 const char* pname, const char* fname) { 1766 bool retval = false; 1767 const size_t pnamelen = pname ? strlen(pname) : 0; 1768 1769 // Return error on buffer overflow. 1770 if (pnamelen + strlen(fname) + 10 > (size_t) buflen) { 1771 return retval; 1772 } 1773 1774 if (pnamelen == 0) { 1775 snprintf(buffer, buflen, "lib%s.so", fname); 1776 retval = true; 1777 } else if (strchr(pname, *os::path_separator()) != NULL) { 1778 int n; 1779 char** pelements = split_path(pname, &n); 1780 if (pelements == NULL) { 1781 return false; 1782 } 1783 for (int i = 0 ; i < n ; i++) { 1784 // really shouldn't be NULL but what the heck, check can't hurt 1785 if (pelements[i] == NULL || strlen(pelements[i]) == 0) { 1786 continue; // skip the empty path values 1787 } 1788 snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname); 1789 if (file_exists(buffer)) { 1790 retval = true; 1791 break; 1792 } 1793 } 1794 // release the storage 1795 for (int i = 0 ; i < n ; i++) { 1796 if (pelements[i] != NULL) { 1797 FREE_C_HEAP_ARRAY(char, pelements[i], mtInternal); 1798 } 1799 } 1800 if (pelements != NULL) { 1801 FREE_C_HEAP_ARRAY(char*, pelements, mtInternal); 1802 } 1803 } else { 1804 snprintf(buffer, buflen, "%s/lib%s.so", pname, fname); 1805 retval = true; 1806 } 1807 return retval; 1808 } 1809 1810 // check if addr is inside libjvm.so 1811 bool os::address_is_in_vm(address addr) { 1812 static address libjvm_base_addr; 1813 Dl_info dlinfo; 1814 1815 if (libjvm_base_addr == NULL) { 1816 if (dladdr(CAST_FROM_FN_PTR(void *, os::address_is_in_vm), &dlinfo) != 0) { 1817 libjvm_base_addr = (address)dlinfo.dli_fbase; 1818 } 1819 assert(libjvm_base_addr !=NULL, "Cannot obtain base address for libjvm"); 1820 } 1821 1822 if (dladdr((void *)addr, &dlinfo) != 0) { 1823 if (libjvm_base_addr == (address)dlinfo.dli_fbase) return true; 1824 } 1825 1826 return false; 1827 } 1828 1829 typedef int (*dladdr1_func_type) (void *, Dl_info *, void **, int); 1830 static dladdr1_func_type dladdr1_func = NULL; 1831 1832 bool os::dll_address_to_function_name(address addr, char *buf, 1833 int buflen, int * offset) { 1834 // buf is not optional, but offset is optional 1835 assert(buf != NULL, "sanity check"); 1836 1837 Dl_info dlinfo; 1838 1839 // dladdr1_func was initialized in os::init() 1840 if (dladdr1_func != NULL) { 1841 // yes, we have dladdr1 1842 1843 // Support for dladdr1 is checked at runtime; it may be 1844 // available even if the vm is built on a machine that does 1845 // not have dladdr1 support. Make sure there is a value for 1846 // RTLD_DL_SYMENT. 1847 #ifndef RTLD_DL_SYMENT 1848 #define RTLD_DL_SYMENT 1 1849 #endif 1850 #ifdef _LP64 1851 Elf64_Sym * info; 1852 #else 1853 Elf32_Sym * info; 1854 #endif 1855 if (dladdr1_func((void *)addr, &dlinfo, (void **)&info, 1856 RTLD_DL_SYMENT) != 0) { 1857 // see if we have a matching symbol that covers our address 1858 if (dlinfo.dli_saddr != NULL && 1859 (char *)dlinfo.dli_saddr + info->st_size > (char *)addr) { 1860 if (dlinfo.dli_sname != NULL) { 1861 if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) { 1862 jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname); 1863 } 1864 if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr; 1865 return true; 1866 } 1867 } 1868 // no matching symbol so try for just file info 1869 if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) { 1870 if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase), 1871 buf, buflen, offset, dlinfo.dli_fname)) { 1872 return true; 1873 } 1874 } 1875 } 1876 buf[0] = '\0'; 1877 if (offset != NULL) *offset = -1; 1878 return false; 1879 } 1880 1881 // no, only dladdr is available 1882 if (dladdr((void *)addr, &dlinfo) != 0) { 1883 // see if we have a matching symbol 1884 if (dlinfo.dli_saddr != NULL && dlinfo.dli_sname != NULL) { 1885 if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) { 1886 jio_snprintf(buf, buflen, dlinfo.dli_sname); 1887 } 1888 if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr; 1889 return true; 1890 } 1891 // no matching symbol so try for just file info 1892 if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) { 1893 if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase), 1894 buf, buflen, offset, dlinfo.dli_fname)) { 1895 return true; 1896 } 1897 } 1898 } 1899 buf[0] = '\0'; 1900 if (offset != NULL) *offset = -1; 1901 return false; 1902 } 1903 1904 bool os::dll_address_to_library_name(address addr, char* buf, 1905 int buflen, int* offset) { 1906 // buf is not optional, but offset is optional 1907 assert(buf != NULL, "sanity check"); 1908 1909 Dl_info dlinfo; 1910 1911 if (dladdr((void*)addr, &dlinfo) != 0) { 1912 if (dlinfo.dli_fname != NULL) { 1913 jio_snprintf(buf, buflen, "%s", dlinfo.dli_fname); 1914 } 1915 if (dlinfo.dli_fbase != NULL && offset != NULL) { 1916 *offset = addr - (address)dlinfo.dli_fbase; 1917 } 1918 return true; 1919 } 1920 1921 buf[0] = '\0'; 1922 if (offset) *offset = -1; 1923 return false; 1924 } 1925 1926 // Prints the names and full paths of all opened dynamic libraries 1927 // for current process 1928 void os::print_dll_info(outputStream * st) { 1929 Dl_info dli; 1930 void *handle; 1931 Link_map *map; 1932 Link_map *p; 1933 1934 st->print_cr("Dynamic libraries:"); st->flush(); 1935 1936 if (dladdr(CAST_FROM_FN_PTR(void *, os::print_dll_info), &dli) == 0 || 1937 dli.dli_fname == NULL) { 1938 st->print_cr("Error: Cannot print dynamic libraries."); 1939 return; 1940 } 1941 handle = dlopen(dli.dli_fname, RTLD_LAZY); 1942 if (handle == NULL) { 1943 st->print_cr("Error: Cannot print dynamic libraries."); 1944 return; 1945 } 1946 dlinfo(handle, RTLD_DI_LINKMAP, &map); 1947 if (map == NULL) { 1948 st->print_cr("Error: Cannot print dynamic libraries."); 1949 return; 1950 } 1951 1952 while (map->l_prev != NULL) 1953 map = map->l_prev; 1954 1955 while (map != NULL) { 1956 st->print_cr(PTR_FORMAT " \t%s", map->l_addr, map->l_name); 1957 map = map->l_next; 1958 } 1959 1960 dlclose(handle); 1961 } 1962 1963 // Loads .dll/.so and 1964 // in case of error it checks if .dll/.so was built for the 1965 // same architecture as Hotspot is running on 1966 1967 void * os::dll_load(const char *filename, char *ebuf, int ebuflen) 1968 { 1969 void * result= ::dlopen(filename, RTLD_LAZY); 1970 if (result != NULL) { 1971 // Successful loading 1972 return result; 1973 } 1974 1975 Elf32_Ehdr elf_head; 1976 1977 // Read system error message into ebuf 1978 // It may or may not be overwritten below 1979 ::strncpy(ebuf, ::dlerror(), ebuflen-1); 1980 ebuf[ebuflen-1]='\0'; 1981 int diag_msg_max_length=ebuflen-strlen(ebuf); 1982 char* diag_msg_buf=ebuf+strlen(ebuf); 1983 1984 if (diag_msg_max_length==0) { 1985 // No more space in ebuf for additional diagnostics message 1986 return NULL; 1987 } 1988 1989 1990 int file_descriptor= ::open(filename, O_RDONLY | O_NONBLOCK); 1991 1992 if (file_descriptor < 0) { 1993 // Can't open library, report dlerror() message 1994 return NULL; 1995 } 1996 1997 bool failed_to_read_elf_head= 1998 (sizeof(elf_head)!= 1999 (::read(file_descriptor, &elf_head,sizeof(elf_head)))) ; 2000 2001 ::close(file_descriptor); 2002 if (failed_to_read_elf_head) { 2003 // file i/o error - report dlerror() msg 2004 return NULL; 2005 } 2006 2007 typedef struct { 2008 Elf32_Half code; // Actual value as defined in elf.h 2009 Elf32_Half compat_class; // Compatibility of archs at VM's sense 2010 char elf_class; // 32 or 64 bit 2011 char endianess; // MSB or LSB 2012 char* name; // String representation 2013 } arch_t; 2014 2015 static const arch_t arch_array[]={ 2016 {EM_386, EM_386, ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"}, 2017 {EM_486, EM_386, ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"}, 2018 {EM_IA_64, EM_IA_64, ELFCLASS64, ELFDATA2LSB, (char*)"IA 64"}, 2019 {EM_X86_64, EM_X86_64, ELFCLASS64, ELFDATA2LSB, (char*)"AMD 64"}, 2020 {EM_SPARC, EM_SPARC, ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"}, 2021 {EM_SPARC32PLUS, EM_SPARC, ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"}, 2022 {EM_SPARCV9, EM_SPARCV9, ELFCLASS64, ELFDATA2MSB, (char*)"Sparc v9 64"}, 2023 {EM_PPC, EM_PPC, ELFCLASS32, ELFDATA2MSB, (char*)"Power PC 32"}, 2024 {EM_PPC64, EM_PPC64, ELFCLASS64, ELFDATA2MSB, (char*)"Power PC 64"}, 2025 {EM_ARM, EM_ARM, ELFCLASS32, ELFDATA2LSB, (char*)"ARM 32"} 2026 }; 2027 2028 #if (defined IA32) 2029 static Elf32_Half running_arch_code=EM_386; 2030 #elif (defined AMD64) 2031 static Elf32_Half running_arch_code=EM_X86_64; 2032 #elif (defined IA64) 2033 static Elf32_Half running_arch_code=EM_IA_64; 2034 #elif (defined __sparc) && (defined _LP64) 2035 static Elf32_Half running_arch_code=EM_SPARCV9; 2036 #elif (defined __sparc) && (!defined _LP64) 2037 static Elf32_Half running_arch_code=EM_SPARC; 2038 #elif (defined __powerpc64__) 2039 static Elf32_Half running_arch_code=EM_PPC64; 2040 #elif (defined __powerpc__) 2041 static Elf32_Half running_arch_code=EM_PPC; 2042 #elif (defined ARM) 2043 static Elf32_Half running_arch_code=EM_ARM; 2044 #else 2045 #error Method os::dll_load requires that one of following is defined:\ 2046 IA32, AMD64, IA64, __sparc, __powerpc__, ARM, ARM 2047 #endif 2048 2049 // Identify compatability class for VM's architecture and library's architecture 2050 // Obtain string descriptions for architectures 2051 2052 arch_t lib_arch={elf_head.e_machine,0,elf_head.e_ident[EI_CLASS], elf_head.e_ident[EI_DATA], NULL}; 2053 int running_arch_index=-1; 2054 2055 for (unsigned int i=0 ; i < ARRAY_SIZE(arch_array) ; i++ ) { 2056 if (running_arch_code == arch_array[i].code) { 2057 running_arch_index = i; 2058 } 2059 if (lib_arch.code == arch_array[i].code) { 2060 lib_arch.compat_class = arch_array[i].compat_class; 2061 lib_arch.name = arch_array[i].name; 2062 } 2063 } 2064 2065 assert(running_arch_index != -1, 2066 "Didn't find running architecture code (running_arch_code) in arch_array"); 2067 if (running_arch_index == -1) { 2068 // Even though running architecture detection failed 2069 // we may still continue with reporting dlerror() message 2070 return NULL; 2071 } 2072 2073 if (lib_arch.endianess != arch_array[running_arch_index].endianess) { 2074 ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: endianness mismatch)"); 2075 return NULL; 2076 } 2077 2078 if (lib_arch.elf_class != arch_array[running_arch_index].elf_class) { 2079 ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: architecture word width mismatch)"); 2080 return NULL; 2081 } 2082 2083 if (lib_arch.compat_class != arch_array[running_arch_index].compat_class) { 2084 if ( lib_arch.name!=NULL ) { 2085 ::snprintf(diag_msg_buf, diag_msg_max_length-1, 2086 " (Possible cause: can't load %s-bit .so on a %s-bit platform)", 2087 lib_arch.name, arch_array[running_arch_index].name); 2088 } else { 2089 ::snprintf(diag_msg_buf, diag_msg_max_length-1, 2090 " (Possible cause: can't load this .so (machine code=0x%x) on a %s-bit platform)", 2091 lib_arch.code, 2092 arch_array[running_arch_index].name); 2093 } 2094 } 2095 2096 return NULL; 2097 } 2098 2099 void* os::dll_lookup(void* handle, const char* name) { 2100 return dlsym(handle, name); 2101 } 2102 2103 void* os::get_default_process_handle() { 2104 return (void*)::dlopen(NULL, RTLD_LAZY); 2105 } 2106 2107 int os::stat(const char *path, struct stat *sbuf) { 2108 char pathbuf[MAX_PATH]; 2109 if (strlen(path) > MAX_PATH - 1) { 2110 errno = ENAMETOOLONG; 2111 return -1; 2112 } 2113 os::native_path(strcpy(pathbuf, path)); 2114 return ::stat(pathbuf, sbuf); 2115 } 2116 2117 static bool _print_ascii_file(const char* filename, outputStream* st) { 2118 int fd = ::open(filename, O_RDONLY); 2119 if (fd == -1) { 2120 return false; 2121 } 2122 2123 char buf[32]; 2124 int bytes; 2125 while ((bytes = ::read(fd, buf, sizeof(buf))) > 0) { 2126 st->print_raw(buf, bytes); 2127 } 2128 2129 ::close(fd); 2130 2131 return true; 2132 } 2133 2134 void os::print_os_info_brief(outputStream* st) { 2135 os::Solaris::print_distro_info(st); 2136 2137 os::Posix::print_uname_info(st); 2138 2139 os::Solaris::print_libversion_info(st); 2140 } 2141 2142 void os::print_os_info(outputStream* st) { 2143 st->print("OS:"); 2144 2145 os::Solaris::print_distro_info(st); 2146 2147 os::Posix::print_uname_info(st); 2148 2149 os::Solaris::print_libversion_info(st); 2150 2151 os::Posix::print_rlimit_info(st); 2152 2153 os::Posix::print_load_average(st); 2154 } 2155 2156 void os::Solaris::print_distro_info(outputStream* st) { 2157 if (!_print_ascii_file("/etc/release", st)) { 2158 st->print("Solaris"); 2159 } 2160 st->cr(); 2161 } 2162 2163 void os::Solaris::print_libversion_info(outputStream* st) { 2164 if (os::Solaris::T2_libthread()) { 2165 st->print(" (T2 libthread)"); 2166 } 2167 else { 2168 st->print(" (T1 libthread)"); 2169 } 2170 st->cr(); 2171 } 2172 2173 static bool check_addr0(outputStream* st) { 2174 jboolean status = false; 2175 int fd = ::open("/proc/self/map",O_RDONLY); 2176 if (fd >= 0) { 2177 prmap_t p; 2178 while(::read(fd, &p, sizeof(p)) > 0) { 2179 if (p.pr_vaddr == 0x0) { 2180 st->print("Warning: Address: 0x%x, Size: %dK, ",p.pr_vaddr, p.pr_size/1024, p.pr_mapname); 2181 st->print("Mapped file: %s, ", p.pr_mapname[0] == '\0' ? "None" : p.pr_mapname); 2182 st->print("Access:"); 2183 st->print("%s",(p.pr_mflags & MA_READ) ? "r" : "-"); 2184 st->print("%s",(p.pr_mflags & MA_WRITE) ? "w" : "-"); 2185 st->print("%s",(p.pr_mflags & MA_EXEC) ? "x" : "-"); 2186 st->cr(); 2187 status = true; 2188 } 2189 } 2190 ::close(fd); 2191 } 2192 return status; 2193 } 2194 2195 void os::pd_print_cpu_info(outputStream* st) { 2196 // Nothing to do for now. 2197 } 2198 2199 void os::print_memory_info(outputStream* st) { 2200 st->print("Memory:"); 2201 st->print(" %dk page", os::vm_page_size()>>10); 2202 st->print(", physical " UINT64_FORMAT "k", os::physical_memory()>>10); 2203 st->print("(" UINT64_FORMAT "k free)", os::available_memory() >> 10); 2204 st->cr(); 2205 (void) check_addr0(st); 2206 } 2207 2208 void os::print_siginfo(outputStream* st, void* siginfo) { 2209 const siginfo_t* si = (const siginfo_t*)siginfo; 2210 2211 os::Posix::print_siginfo_brief(st, si); 2212 2213 if (si && (si->si_signo == SIGBUS || si->si_signo == SIGSEGV) && 2214 UseSharedSpaces) { 2215 FileMapInfo* mapinfo = FileMapInfo::current_info(); 2216 if (mapinfo->is_in_shared_space(si->si_addr)) { 2217 st->print("\n\nError accessing class data sharing archive." \ 2218 " Mapped file inaccessible during execution, " \ 2219 " possible disk/network problem."); 2220 } 2221 } 2222 st->cr(); 2223 } 2224 2225 // Moved from whole group, because we need them here for diagnostic 2226 // prints. 2227 #define OLDMAXSIGNUM 32 2228 static int Maxsignum = 0; 2229 static int *ourSigFlags = NULL; 2230 2231 extern "C" void sigINTRHandler(int, siginfo_t*, void*); 2232 2233 int os::Solaris::get_our_sigflags(int sig) { 2234 assert(ourSigFlags!=NULL, "signal data structure not initialized"); 2235 assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range"); 2236 return ourSigFlags[sig]; 2237 } 2238 2239 void os::Solaris::set_our_sigflags(int sig, int flags) { 2240 assert(ourSigFlags!=NULL, "signal data structure not initialized"); 2241 assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range"); 2242 ourSigFlags[sig] = flags; 2243 } 2244 2245 2246 static const char* get_signal_handler_name(address handler, 2247 char* buf, int buflen) { 2248 int offset; 2249 bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset); 2250 if (found) { 2251 // skip directory names 2252 const char *p1, *p2; 2253 p1 = buf; 2254 size_t len = strlen(os::file_separator()); 2255 while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len; 2256 jio_snprintf(buf, buflen, "%s+0x%x", p1, offset); 2257 } else { 2258 jio_snprintf(buf, buflen, PTR_FORMAT, handler); 2259 } 2260 return buf; 2261 } 2262 2263 static void print_signal_handler(outputStream* st, int sig, 2264 char* buf, size_t buflen) { 2265 struct sigaction sa; 2266 2267 sigaction(sig, NULL, &sa); 2268 2269 st->print("%s: ", os::exception_name(sig, buf, buflen)); 2270 2271 address handler = (sa.sa_flags & SA_SIGINFO) 2272 ? CAST_FROM_FN_PTR(address, sa.sa_sigaction) 2273 : CAST_FROM_FN_PTR(address, sa.sa_handler); 2274 2275 if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) { 2276 st->print("SIG_DFL"); 2277 } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) { 2278 st->print("SIG_IGN"); 2279 } else { 2280 st->print("[%s]", get_signal_handler_name(handler, buf, buflen)); 2281 } 2282 2283 st->print(", sa_mask[0]="); 2284 os::Posix::print_signal_set_short(st, &sa.sa_mask); 2285 2286 address rh = VMError::get_resetted_sighandler(sig); 2287 // May be, handler was resetted by VMError? 2288 if(rh != NULL) { 2289 handler = rh; 2290 sa.sa_flags = VMError::get_resetted_sigflags(sig); 2291 } 2292 2293 st->print(", sa_flags="); 2294 os::Posix::print_sa_flags(st, sa.sa_flags); 2295 2296 // Check: is it our handler? 2297 if(handler == CAST_FROM_FN_PTR(address, signalHandler) || 2298 handler == CAST_FROM_FN_PTR(address, sigINTRHandler)) { 2299 // It is our signal handler 2300 // check for flags 2301 if(sa.sa_flags != os::Solaris::get_our_sigflags(sig)) { 2302 st->print( 2303 ", flags was changed from " PTR32_FORMAT ", consider using jsig library", 2304 os::Solaris::get_our_sigflags(sig)); 2305 } 2306 } 2307 st->cr(); 2308 } 2309 2310 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) { 2311 st->print_cr("Signal Handlers:"); 2312 print_signal_handler(st, SIGSEGV, buf, buflen); 2313 print_signal_handler(st, SIGBUS , buf, buflen); 2314 print_signal_handler(st, SIGFPE , buf, buflen); 2315 print_signal_handler(st, SIGPIPE, buf, buflen); 2316 print_signal_handler(st, SIGXFSZ, buf, buflen); 2317 print_signal_handler(st, SIGILL , buf, buflen); 2318 print_signal_handler(st, INTERRUPT_SIGNAL, buf, buflen); 2319 print_signal_handler(st, ASYNC_SIGNAL, buf, buflen); 2320 print_signal_handler(st, BREAK_SIGNAL, buf, buflen); 2321 print_signal_handler(st, SHUTDOWN1_SIGNAL , buf, buflen); 2322 print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen); 2323 print_signal_handler(st, SHUTDOWN3_SIGNAL, buf, buflen); 2324 print_signal_handler(st, os::Solaris::SIGinterrupt(), buf, buflen); 2325 print_signal_handler(st, os::Solaris::SIGasync(), buf, buflen); 2326 } 2327 2328 static char saved_jvm_path[MAXPATHLEN] = { 0 }; 2329 2330 // Find the full path to the current module, libjvm.so 2331 void os::jvm_path(char *buf, jint buflen) { 2332 // Error checking. 2333 if (buflen < MAXPATHLEN) { 2334 assert(false, "must use a large-enough buffer"); 2335 buf[0] = '\0'; 2336 return; 2337 } 2338 // Lazy resolve the path to current module. 2339 if (saved_jvm_path[0] != 0) { 2340 strcpy(buf, saved_jvm_path); 2341 return; 2342 } 2343 2344 Dl_info dlinfo; 2345 int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo); 2346 assert(ret != 0, "cannot locate libjvm"); 2347 if (ret != 0 && dlinfo.dli_fname != NULL) { 2348 realpath((char *)dlinfo.dli_fname, buf); 2349 } else { 2350 buf[0] = '\0'; 2351 return; 2352 } 2353 2354 if (Arguments::sun_java_launcher_is_altjvm()) { 2355 // Support for the java launcher's '-XXaltjvm=<path>' option. Typical 2356 // value for buf is "<JAVA_HOME>/jre/lib/<arch>/<vmtype>/libjvm.so". 2357 // If "/jre/lib/" appears at the right place in the string, then 2358 // assume we are installed in a JDK and we're done. Otherwise, check 2359 // for a JAVA_HOME environment variable and fix up the path so it 2360 // looks like libjvm.so is installed there (append a fake suffix 2361 // hotspot/libjvm.so). 2362 const char *p = buf + strlen(buf) - 1; 2363 for (int count = 0; p > buf && count < 5; ++count) { 2364 for (--p; p > buf && *p != '/'; --p) 2365 /* empty */ ; 2366 } 2367 2368 if (strncmp(p, "/jre/lib/", 9) != 0) { 2369 // Look for JAVA_HOME in the environment. 2370 char* java_home_var = ::getenv("JAVA_HOME"); 2371 if (java_home_var != NULL && java_home_var[0] != 0) { 2372 char cpu_arch[12]; 2373 char* jrelib_p; 2374 int len; 2375 sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch)); 2376 #ifdef _LP64 2377 // If we are on sparc running a 64-bit vm, look in jre/lib/sparcv9. 2378 if (strcmp(cpu_arch, "sparc") == 0) { 2379 strcat(cpu_arch, "v9"); 2380 } else if (strcmp(cpu_arch, "i386") == 0) { 2381 strcpy(cpu_arch, "amd64"); 2382 } 2383 #endif 2384 // Check the current module name "libjvm.so". 2385 p = strrchr(buf, '/'); 2386 assert(strstr(p, "/libjvm") == p, "invalid library name"); 2387 2388 realpath(java_home_var, buf); 2389 // determine if this is a legacy image or modules image 2390 // modules image doesn't have "jre" subdirectory 2391 len = strlen(buf); 2392 jrelib_p = buf + len; 2393 snprintf(jrelib_p, buflen-len, "/jre/lib/%s", cpu_arch); 2394 if (0 != access(buf, F_OK)) { 2395 snprintf(jrelib_p, buflen-len, "/lib/%s", cpu_arch); 2396 } 2397 2398 if (0 == access(buf, F_OK)) { 2399 // Use current module name "libjvm.so" 2400 len = strlen(buf); 2401 snprintf(buf + len, buflen-len, "/hotspot/libjvm.so"); 2402 } else { 2403 // Go back to path of .so 2404 realpath((char *)dlinfo.dli_fname, buf); 2405 } 2406 } 2407 } 2408 } 2409 2410 strcpy(saved_jvm_path, buf); 2411 } 2412 2413 2414 void os::print_jni_name_prefix_on(outputStream* st, int args_size) { 2415 // no prefix required, not even "_" 2416 } 2417 2418 2419 void os::print_jni_name_suffix_on(outputStream* st, int args_size) { 2420 // no suffix required 2421 } 2422 2423 // This method is a copy of JDK's sysGetLastErrorString 2424 // from src/solaris/hpi/src/system_md.c 2425 2426 size_t os::lasterror(char *buf, size_t len) { 2427 2428 if (errno == 0) return 0; 2429 2430 const char *s = ::strerror(errno); 2431 size_t n = ::strlen(s); 2432 if (n >= len) { 2433 n = len - 1; 2434 } 2435 ::strncpy(buf, s, n); 2436 buf[n] = '\0'; 2437 return n; 2438 } 2439 2440 2441 // sun.misc.Signal 2442 2443 extern "C" { 2444 static void UserHandler(int sig, void *siginfo, void *context) { 2445 // Ctrl-C is pressed during error reporting, likely because the error 2446 // handler fails to abort. Let VM die immediately. 2447 if (sig == SIGINT && is_error_reported()) { 2448 os::die(); 2449 } 2450 2451 os::signal_notify(sig); 2452 // We do not need to reinstate the signal handler each time... 2453 } 2454 } 2455 2456 void* os::user_handler() { 2457 return CAST_FROM_FN_PTR(void*, UserHandler); 2458 } 2459 2460 class Semaphore : public StackObj { 2461 public: 2462 Semaphore(); 2463 ~Semaphore(); 2464 void signal(); 2465 void wait(); 2466 bool trywait(); 2467 bool timedwait(unsigned int sec, int nsec); 2468 private: 2469 sema_t _semaphore; 2470 }; 2471 2472 2473 Semaphore::Semaphore() { 2474 sema_init(&_semaphore, 0, NULL, NULL); 2475 } 2476 2477 Semaphore::~Semaphore() { 2478 sema_destroy(&_semaphore); 2479 } 2480 2481 void Semaphore::signal() { 2482 sema_post(&_semaphore); 2483 } 2484 2485 void Semaphore::wait() { 2486 sema_wait(&_semaphore); 2487 } 2488 2489 bool Semaphore::trywait() { 2490 return sema_trywait(&_semaphore) == 0; 2491 } 2492 2493 bool Semaphore::timedwait(unsigned int sec, int nsec) { 2494 struct timespec ts; 2495 unpackTime(&ts, false, (sec * NANOSECS_PER_SEC) + nsec); 2496 2497 while (1) { 2498 int result = sema_timedwait(&_semaphore, &ts); 2499 if (result == 0) { 2500 return true; 2501 } else if (errno == EINTR) { 2502 continue; 2503 } else if (errno == ETIME) { 2504 return false; 2505 } else { 2506 return false; 2507 } 2508 } 2509 } 2510 2511 extern "C" { 2512 typedef void (*sa_handler_t)(int); 2513 typedef void (*sa_sigaction_t)(int, siginfo_t *, void *); 2514 } 2515 2516 void* os::signal(int signal_number, void* handler) { 2517 struct sigaction sigAct, oldSigAct; 2518 sigfillset(&(sigAct.sa_mask)); 2519 sigAct.sa_flags = SA_RESTART & ~SA_RESETHAND; 2520 sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler); 2521 2522 if (sigaction(signal_number, &sigAct, &oldSigAct)) 2523 // -1 means registration failed 2524 return (void *)-1; 2525 2526 return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler); 2527 } 2528 2529 void os::signal_raise(int signal_number) { 2530 raise(signal_number); 2531 } 2532 2533 /* 2534 * The following code is moved from os.cpp for making this 2535 * code platform specific, which it is by its very nature. 2536 */ 2537 2538 // a counter for each possible signal value 2539 static int Sigexit = 0; 2540 static int Maxlibjsigsigs; 2541 static jint *pending_signals = NULL; 2542 static int *preinstalled_sigs = NULL; 2543 static struct sigaction *chainedsigactions = NULL; 2544 static sema_t sig_sem; 2545 typedef int (*version_getting_t)(); 2546 version_getting_t os::Solaris::get_libjsig_version = NULL; 2547 static int libjsigversion = NULL; 2548 2549 int os::sigexitnum_pd() { 2550 assert(Sigexit > 0, "signal memory not yet initialized"); 2551 return Sigexit; 2552 } 2553 2554 void os::Solaris::init_signal_mem() { 2555 // Initialize signal structures 2556 Maxsignum = SIGRTMAX; 2557 Sigexit = Maxsignum+1; 2558 assert(Maxsignum >0, "Unable to obtain max signal number"); 2559 2560 Maxlibjsigsigs = Maxsignum; 2561 2562 // pending_signals has one int per signal 2563 // The additional signal is for SIGEXIT - exit signal to signal_thread 2564 pending_signals = (jint *)os::malloc(sizeof(jint) * (Sigexit+1), mtInternal); 2565 memset(pending_signals, 0, (sizeof(jint) * (Sigexit+1))); 2566 2567 if (UseSignalChaining) { 2568 chainedsigactions = (struct sigaction *)malloc(sizeof(struct sigaction) 2569 * (Maxsignum + 1), mtInternal); 2570 memset(chainedsigactions, 0, (sizeof(struct sigaction) * (Maxsignum + 1))); 2571 preinstalled_sigs = (int *)os::malloc(sizeof(int) * (Maxsignum + 1), mtInternal); 2572 memset(preinstalled_sigs, 0, (sizeof(int) * (Maxsignum + 1))); 2573 } 2574 ourSigFlags = (int*)malloc(sizeof(int) * (Maxsignum + 1 ), mtInternal); 2575 memset(ourSigFlags, 0, sizeof(int) * (Maxsignum + 1)); 2576 } 2577 2578 void os::signal_init_pd() { 2579 int ret; 2580 2581 ret = ::sema_init(&sig_sem, 0, NULL, NULL); 2582 assert(ret == 0, "sema_init() failed"); 2583 } 2584 2585 void os::signal_notify(int signal_number) { 2586 int ret; 2587 2588 Atomic::inc(&pending_signals[signal_number]); 2589 ret = ::sema_post(&sig_sem); 2590 assert(ret == 0, "sema_post() failed"); 2591 } 2592 2593 static int check_pending_signals(bool wait_for_signal) { 2594 int ret; 2595 while (true) { 2596 for (int i = 0; i < Sigexit + 1; i++) { 2597 jint n = pending_signals[i]; 2598 if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) { 2599 return i; 2600 } 2601 } 2602 if (!wait_for_signal) { 2603 return -1; 2604 } 2605 JavaThread *thread = JavaThread::current(); 2606 ThreadBlockInVM tbivm(thread); 2607 2608 bool threadIsSuspended; 2609 do { 2610 thread->set_suspend_equivalent(); 2611 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self() 2612 while((ret = ::sema_wait(&sig_sem)) == EINTR) 2613 ; 2614 assert(ret == 0, "sema_wait() failed"); 2615 2616 // were we externally suspended while we were waiting? 2617 threadIsSuspended = thread->handle_special_suspend_equivalent_condition(); 2618 if (threadIsSuspended) { 2619 // 2620 // The semaphore has been incremented, but while we were waiting 2621 // another thread suspended us. We don't want to continue running 2622 // while suspended because that would surprise the thread that 2623 // suspended us. 2624 // 2625 ret = ::sema_post(&sig_sem); 2626 assert(ret == 0, "sema_post() failed"); 2627 2628 thread->java_suspend_self(); 2629 } 2630 } while (threadIsSuspended); 2631 } 2632 } 2633 2634 int os::signal_lookup() { 2635 return check_pending_signals(false); 2636 } 2637 2638 int os::signal_wait() { 2639 return check_pending_signals(true); 2640 } 2641 2642 //////////////////////////////////////////////////////////////////////////////// 2643 // Virtual Memory 2644 2645 static int page_size = -1; 2646 2647 // The mmap MAP_ALIGN flag is supported on Solaris 9 and later. init_2() will 2648 // clear this var if support is not available. 2649 static bool has_map_align = true; 2650 2651 int os::vm_page_size() { 2652 assert(page_size != -1, "must call os::init"); 2653 return page_size; 2654 } 2655 2656 // Solaris allocates memory by pages. 2657 int os::vm_allocation_granularity() { 2658 assert(page_size != -1, "must call os::init"); 2659 return page_size; 2660 } 2661 2662 static bool recoverable_mmap_error(int err) { 2663 // See if the error is one we can let the caller handle. This 2664 // list of errno values comes from the Solaris mmap(2) man page. 2665 switch (err) { 2666 case EBADF: 2667 case EINVAL: 2668 case ENOTSUP: 2669 // let the caller deal with these errors 2670 return true; 2671 2672 default: 2673 // Any remaining errors on this OS can cause our reserved mapping 2674 // to be lost. That can cause confusion where different data 2675 // structures think they have the same memory mapped. The worst 2676 // scenario is if both the VM and a library think they have the 2677 // same memory mapped. 2678 return false; 2679 } 2680 } 2681 2682 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec, 2683 int err) { 2684 warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT 2685 ", %d) failed; error='%s' (errno=%d)", addr, bytes, exec, 2686 strerror(err), err); 2687 } 2688 2689 static void warn_fail_commit_memory(char* addr, size_t bytes, 2690 size_t alignment_hint, bool exec, 2691 int err) { 2692 warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT 2693 ", " SIZE_FORMAT ", %d) failed; error='%s' (errno=%d)", addr, bytes, 2694 alignment_hint, exec, strerror(err), err); 2695 } 2696 2697 int os::Solaris::commit_memory_impl(char* addr, size_t bytes, bool exec) { 2698 int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE; 2699 size_t size = bytes; 2700 char *res = Solaris::mmap_chunk(addr, size, MAP_PRIVATE|MAP_FIXED, prot); 2701 if (res != NULL) { 2702 if (UseNUMAInterleaving) { 2703 numa_make_global(addr, bytes); 2704 } 2705 return 0; 2706 } 2707 2708 int err = errno; // save errno from mmap() call in mmap_chunk() 2709 2710 if (!recoverable_mmap_error(err)) { 2711 warn_fail_commit_memory(addr, bytes, exec, err); 2712 vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, "committing reserved memory."); 2713 } 2714 2715 return err; 2716 } 2717 2718 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) { 2719 return Solaris::commit_memory_impl(addr, bytes, exec) == 0; 2720 } 2721 2722 void os::pd_commit_memory_or_exit(char* addr, size_t bytes, bool exec, 2723 const char* mesg) { 2724 assert(mesg != NULL, "mesg must be specified"); 2725 int err = os::Solaris::commit_memory_impl(addr, bytes, exec); 2726 if (err != 0) { 2727 // the caller wants all commit errors to exit with the specified mesg: 2728 warn_fail_commit_memory(addr, bytes, exec, err); 2729 vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, mesg); 2730 } 2731 } 2732 2733 int os::Solaris::commit_memory_impl(char* addr, size_t bytes, 2734 size_t alignment_hint, bool exec) { 2735 int err = Solaris::commit_memory_impl(addr, bytes, exec); 2736 if (err == 0) { 2737 if (UseLargePages && (alignment_hint > (size_t)vm_page_size())) { 2738 // If the large page size has been set and the VM 2739 // is using large pages, use the large page size 2740 // if it is smaller than the alignment hint. This is 2741 // a case where the VM wants to use a larger alignment size 2742 // for its own reasons but still want to use large pages 2743 // (which is what matters to setting the mpss range. 2744 size_t page_size = 0; 2745 if (large_page_size() < alignment_hint) { 2746 assert(UseLargePages, "Expected to be here for large page use only"); 2747 page_size = large_page_size(); 2748 } else { 2749 // If the alignment hint is less than the large page 2750 // size, the VM wants a particular alignment (thus the hint) 2751 // for internal reasons. Try to set the mpss range using 2752 // the alignment_hint. 2753 page_size = alignment_hint; 2754 } 2755 // Since this is a hint, ignore any failures. 2756 (void)Solaris::setup_large_pages(addr, bytes, page_size); 2757 } 2758 } 2759 return err; 2760 } 2761 2762 bool os::pd_commit_memory(char* addr, size_t bytes, size_t alignment_hint, 2763 bool exec) { 2764 return Solaris::commit_memory_impl(addr, bytes, alignment_hint, exec) == 0; 2765 } 2766 2767 void os::pd_commit_memory_or_exit(char* addr, size_t bytes, 2768 size_t alignment_hint, bool exec, 2769 const char* mesg) { 2770 assert(mesg != NULL, "mesg must be specified"); 2771 int err = os::Solaris::commit_memory_impl(addr, bytes, alignment_hint, exec); 2772 if (err != 0) { 2773 // the caller wants all commit errors to exit with the specified mesg: 2774 warn_fail_commit_memory(addr, bytes, alignment_hint, exec, err); 2775 vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, mesg); 2776 } 2777 } 2778 2779 // Uncommit the pages in a specified region. 2780 void os::pd_free_memory(char* addr, size_t bytes, size_t alignment_hint) { 2781 if (madvise(addr, bytes, MADV_FREE) < 0) { 2782 debug_only(warning("MADV_FREE failed.")); 2783 return; 2784 } 2785 } 2786 2787 bool os::pd_create_stack_guard_pages(char* addr, size_t size) { 2788 return os::commit_memory(addr, size, !ExecMem); 2789 } 2790 2791 bool os::remove_stack_guard_pages(char* addr, size_t size) { 2792 return os::uncommit_memory(addr, size); 2793 } 2794 2795 // Change the page size in a given range. 2796 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { 2797 assert((intptr_t)addr % alignment_hint == 0, "Address should be aligned."); 2798 assert((intptr_t)(addr + bytes) % alignment_hint == 0, "End should be aligned."); 2799 if (UseLargePages) { 2800 Solaris::setup_large_pages(addr, bytes, alignment_hint); 2801 } 2802 } 2803 2804 // Tell the OS to make the range local to the first-touching LWP 2805 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) { 2806 assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned."); 2807 if (madvise(addr, bytes, MADV_ACCESS_LWP) < 0) { 2808 debug_only(warning("MADV_ACCESS_LWP failed.")); 2809 } 2810 } 2811 2812 // Tell the OS that this range would be accessed from different LWPs. 2813 void os::numa_make_global(char *addr, size_t bytes) { 2814 assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned."); 2815 if (madvise(addr, bytes, MADV_ACCESS_MANY) < 0) { 2816 debug_only(warning("MADV_ACCESS_MANY failed.")); 2817 } 2818 } 2819 2820 // Get the number of the locality groups. 2821 size_t os::numa_get_groups_num() { 2822 size_t n = Solaris::lgrp_nlgrps(Solaris::lgrp_cookie()); 2823 return n != -1 ? n : 1; 2824 } 2825 2826 // Get a list of leaf locality groups. A leaf lgroup is group that 2827 // doesn't have any children. Typical leaf group is a CPU or a CPU/memory 2828 // board. An LWP is assigned to one of these groups upon creation. 2829 size_t os::numa_get_leaf_groups(int *ids, size_t size) { 2830 if ((ids[0] = Solaris::lgrp_root(Solaris::lgrp_cookie())) == -1) { 2831 ids[0] = 0; 2832 return 1; 2833 } 2834 int result_size = 0, top = 1, bottom = 0, cur = 0; 2835 for (int k = 0; k < size; k++) { 2836 int r = Solaris::lgrp_children(Solaris::lgrp_cookie(), ids[cur], 2837 (Solaris::lgrp_id_t*)&ids[top], size - top); 2838 if (r == -1) { 2839 ids[0] = 0; 2840 return 1; 2841 } 2842 if (!r) { 2843 // That's a leaf node. 2844 assert (bottom <= cur, "Sanity check"); 2845 // Check if the node has memory 2846 if (Solaris::lgrp_resources(Solaris::lgrp_cookie(), ids[cur], 2847 NULL, 0, LGRP_RSRC_MEM) > 0) { 2848 ids[bottom++] = ids[cur]; 2849 } 2850 } 2851 top += r; 2852 cur++; 2853 } 2854 if (bottom == 0) { 2855 // Handle a situation, when the OS reports no memory available. 2856 // Assume UMA architecture. 2857 ids[0] = 0; 2858 return 1; 2859 } 2860 return bottom; 2861 } 2862 2863 // Detect the topology change. Typically happens during CPU plugging-unplugging. 2864 bool os::numa_topology_changed() { 2865 int is_stale = Solaris::lgrp_cookie_stale(Solaris::lgrp_cookie()); 2866 if (is_stale != -1 && is_stale) { 2867 Solaris::lgrp_fini(Solaris::lgrp_cookie()); 2868 Solaris::lgrp_cookie_t c = Solaris::lgrp_init(Solaris::LGRP_VIEW_CALLER); 2869 assert(c != 0, "Failure to initialize LGRP API"); 2870 Solaris::set_lgrp_cookie(c); 2871 return true; 2872 } 2873 return false; 2874 } 2875 2876 // Get the group id of the current LWP. 2877 int os::numa_get_group_id() { 2878 int lgrp_id = Solaris::lgrp_home(P_LWPID, P_MYID); 2879 if (lgrp_id == -1) { 2880 return 0; 2881 } 2882 const int size = os::numa_get_groups_num(); 2883 int *ids = (int*)alloca(size * sizeof(int)); 2884 2885 // Get the ids of all lgroups with memory; r is the count. 2886 int r = Solaris::lgrp_resources(Solaris::lgrp_cookie(), lgrp_id, 2887 (Solaris::lgrp_id_t*)ids, size, LGRP_RSRC_MEM); 2888 if (r <= 0) { 2889 return 0; 2890 } 2891 return ids[os::random() % r]; 2892 } 2893 2894 // Request information about the page. 2895 bool os::get_page_info(char *start, page_info* info) { 2896 const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE }; 2897 uint64_t addr = (uintptr_t)start; 2898 uint64_t outdata[2]; 2899 uint_t validity = 0; 2900 2901 if (os::Solaris::meminfo(&addr, 1, info_types, 2, outdata, &validity) < 0) { 2902 return false; 2903 } 2904 2905 info->size = 0; 2906 info->lgrp_id = -1; 2907 2908 if ((validity & 1) != 0) { 2909 if ((validity & 2) != 0) { 2910 info->lgrp_id = outdata[0]; 2911 } 2912 if ((validity & 4) != 0) { 2913 info->size = outdata[1]; 2914 } 2915 return true; 2916 } 2917 return false; 2918 } 2919 2920 // Scan the pages from start to end until a page different than 2921 // the one described in the info parameter is encountered. 2922 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) { 2923 const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE }; 2924 const size_t types = sizeof(info_types) / sizeof(info_types[0]); 2925 uint64_t addrs[MAX_MEMINFO_CNT], outdata[types * MAX_MEMINFO_CNT + 1]; 2926 uint_t validity[MAX_MEMINFO_CNT]; 2927 2928 size_t page_size = MAX2((size_t)os::vm_page_size(), page_expected->size); 2929 uint64_t p = (uint64_t)start; 2930 while (p < (uint64_t)end) { 2931 addrs[0] = p; 2932 size_t addrs_count = 1; 2933 while (addrs_count < MAX_MEMINFO_CNT && addrs[addrs_count - 1] + page_size < (uint64_t)end) { 2934 addrs[addrs_count] = addrs[addrs_count - 1] + page_size; 2935 addrs_count++; 2936 } 2937 2938 if (os::Solaris::meminfo(addrs, addrs_count, info_types, types, outdata, validity) < 0) { 2939 return NULL; 2940 } 2941 2942 size_t i = 0; 2943 for (; i < addrs_count; i++) { 2944 if ((validity[i] & 1) != 0) { 2945 if ((validity[i] & 4) != 0) { 2946 if (outdata[types * i + 1] != page_expected->size) { 2947 break; 2948 } 2949 } else 2950 if (page_expected->size != 0) { 2951 break; 2952 } 2953 2954 if ((validity[i] & 2) != 0 && page_expected->lgrp_id > 0) { 2955 if (outdata[types * i] != page_expected->lgrp_id) { 2956 break; 2957 } 2958 } 2959 } else { 2960 return NULL; 2961 } 2962 } 2963 2964 if (i < addrs_count) { 2965 if ((validity[i] & 2) != 0) { 2966 page_found->lgrp_id = outdata[types * i]; 2967 } else { 2968 page_found->lgrp_id = -1; 2969 } 2970 if ((validity[i] & 4) != 0) { 2971 page_found->size = outdata[types * i + 1]; 2972 } else { 2973 page_found->size = 0; 2974 } 2975 return (char*)addrs[i]; 2976 } 2977 2978 p = addrs[addrs_count - 1] + page_size; 2979 } 2980 return end; 2981 } 2982 2983 bool os::pd_uncommit_memory(char* addr, size_t bytes) { 2984 size_t size = bytes; 2985 // Map uncommitted pages PROT_NONE so we fail early if we touch an 2986 // uncommitted page. Otherwise, the read/write might succeed if we 2987 // have enough swap space to back the physical page. 2988 return 2989 NULL != Solaris::mmap_chunk(addr, size, 2990 MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE, 2991 PROT_NONE); 2992 } 2993 2994 char* os::Solaris::mmap_chunk(char *addr, size_t size, int flags, int prot) { 2995 char *b = (char *)mmap(addr, size, prot, flags, os::Solaris::_dev_zero_fd, 0); 2996 2997 if (b == MAP_FAILED) { 2998 return NULL; 2999 } 3000 return b; 3001 } 3002 3003 char* os::Solaris::anon_mmap(char* requested_addr, size_t bytes, size_t alignment_hint, bool fixed) { 3004 char* addr = requested_addr; 3005 int flags = MAP_PRIVATE | MAP_NORESERVE; 3006 3007 assert(!(fixed && (alignment_hint > 0)), "alignment hint meaningless with fixed mmap"); 3008 3009 if (fixed) { 3010 flags |= MAP_FIXED; 3011 } else if (has_map_align && (alignment_hint > (size_t) vm_page_size())) { 3012 flags |= MAP_ALIGN; 3013 addr = (char*) alignment_hint; 3014 } 3015 3016 // Map uncommitted pages PROT_NONE so we fail early if we touch an 3017 // uncommitted page. Otherwise, the read/write might succeed if we 3018 // have enough swap space to back the physical page. 3019 return mmap_chunk(addr, bytes, flags, PROT_NONE); 3020 } 3021 3022 char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) { 3023 char* addr = Solaris::anon_mmap(requested_addr, bytes, alignment_hint, (requested_addr != NULL)); 3024 3025 guarantee(requested_addr == NULL || requested_addr == addr, 3026 "OS failed to return requested mmap address."); 3027 return addr; 3028 } 3029 3030 // Reserve memory at an arbitrary address, only if that area is 3031 // available (and not reserved for something else). 3032 3033 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) { 3034 const int max_tries = 10; 3035 char* base[max_tries]; 3036 size_t size[max_tries]; 3037 3038 // Solaris adds a gap between mmap'ed regions. The size of the gap 3039 // is dependent on the requested size and the MMU. Our initial gap 3040 // value here is just a guess and will be corrected later. 3041 bool had_top_overlap = false; 3042 bool have_adjusted_gap = false; 3043 size_t gap = 0x400000; 3044 3045 // Assert only that the size is a multiple of the page size, since 3046 // that's all that mmap requires, and since that's all we really know 3047 // about at this low abstraction level. If we need higher alignment, 3048 // we can either pass an alignment to this method or verify alignment 3049 // in one of the methods further up the call chain. See bug 5044738. 3050 assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block"); 3051 3052 // Since snv_84, Solaris attempts to honor the address hint - see 5003415. 3053 // Give it a try, if the kernel honors the hint we can return immediately. 3054 char* addr = Solaris::anon_mmap(requested_addr, bytes, 0, false); 3055 3056 volatile int err = errno; 3057 if (addr == requested_addr) { 3058 return addr; 3059 } else if (addr != NULL) { 3060 pd_unmap_memory(addr, bytes); 3061 } 3062 3063 if (PrintMiscellaneous && Verbose) { 3064 char buf[256]; 3065 buf[0] = '\0'; 3066 if (addr == NULL) { 3067 jio_snprintf(buf, sizeof(buf), ": %s", strerror(err)); 3068 } 3069 warning("attempt_reserve_memory_at: couldn't reserve " SIZE_FORMAT " bytes at " 3070 PTR_FORMAT ": reserve_memory_helper returned " PTR_FORMAT 3071 "%s", bytes, requested_addr, addr, buf); 3072 } 3073 3074 // Address hint method didn't work. Fall back to the old method. 3075 // In theory, once SNV becomes our oldest supported platform, this 3076 // code will no longer be needed. 3077 // 3078 // Repeatedly allocate blocks until the block is allocated at the 3079 // right spot. Give up after max_tries. 3080 int i; 3081 for (i = 0; i < max_tries; ++i) { 3082 base[i] = reserve_memory(bytes); 3083 3084 if (base[i] != NULL) { 3085 // Is this the block we wanted? 3086 if (base[i] == requested_addr) { 3087 size[i] = bytes; 3088 break; 3089 } 3090 3091 // check that the gap value is right 3092 if (had_top_overlap && !have_adjusted_gap) { 3093 size_t actual_gap = base[i-1] - base[i] - bytes; 3094 if (gap != actual_gap) { 3095 // adjust the gap value and retry the last 2 allocations 3096 assert(i > 0, "gap adjustment code problem"); 3097 have_adjusted_gap = true; // adjust the gap only once, just in case 3098 gap = actual_gap; 3099 if (PrintMiscellaneous && Verbose) { 3100 warning("attempt_reserve_memory_at: adjusted gap to 0x%lx", gap); 3101 } 3102 unmap_memory(base[i], bytes); 3103 unmap_memory(base[i-1], size[i-1]); 3104 i-=2; 3105 continue; 3106 } 3107 } 3108 3109 // Does this overlap the block we wanted? Give back the overlapped 3110 // parts and try again. 3111 // 3112 // There is still a bug in this code: if top_overlap == bytes, 3113 // the overlap is offset from requested region by the value of gap. 3114 // In this case giving back the overlapped part will not work, 3115 // because we'll give back the entire block at base[i] and 3116 // therefore the subsequent allocation will not generate a new gap. 3117 // This could be fixed with a new algorithm that used larger 3118 // or variable size chunks to find the requested region - 3119 // but such a change would introduce additional complications. 3120 // It's rare enough that the planets align for this bug, 3121 // so we'll just wait for a fix for 6204603/5003415 which 3122 // will provide a mmap flag to allow us to avoid this business. 3123 3124 size_t top_overlap = requested_addr + (bytes + gap) - base[i]; 3125 if (top_overlap >= 0 && top_overlap < bytes) { 3126 had_top_overlap = true; 3127 unmap_memory(base[i], top_overlap); 3128 base[i] += top_overlap; 3129 size[i] = bytes - top_overlap; 3130 } else { 3131 size_t bottom_overlap = base[i] + bytes - requested_addr; 3132 if (bottom_overlap >= 0 && bottom_overlap < bytes) { 3133 if (PrintMiscellaneous && Verbose && bottom_overlap == 0) { 3134 warning("attempt_reserve_memory_at: possible alignment bug"); 3135 } 3136 unmap_memory(requested_addr, bottom_overlap); 3137 size[i] = bytes - bottom_overlap; 3138 } else { 3139 size[i] = bytes; 3140 } 3141 } 3142 } 3143 } 3144 3145 // Give back the unused reserved pieces. 3146 3147 for (int j = 0; j < i; ++j) { 3148 if (base[j] != NULL) { 3149 unmap_memory(base[j], size[j]); 3150 } 3151 } 3152 3153 return (i < max_tries) ? requested_addr : NULL; 3154 } 3155 3156 bool os::pd_release_memory(char* addr, size_t bytes) { 3157 size_t size = bytes; 3158 return munmap(addr, size) == 0; 3159 } 3160 3161 static bool solaris_mprotect(char* addr, size_t bytes, int prot) { 3162 assert(addr == (char*)align_size_down((uintptr_t)addr, os::vm_page_size()), 3163 "addr must be page aligned"); 3164 int retVal = mprotect(addr, bytes, prot); 3165 return retVal == 0; 3166 } 3167 3168 // Protect memory (Used to pass readonly pages through 3169 // JNI GetArray<type>Elements with empty arrays.) 3170 // Also, used for serialization page and for compressed oops null pointer 3171 // checking. 3172 bool os::protect_memory(char* addr, size_t bytes, ProtType prot, 3173 bool is_committed) { 3174 unsigned int p = 0; 3175 switch (prot) { 3176 case MEM_PROT_NONE: p = PROT_NONE; break; 3177 case MEM_PROT_READ: p = PROT_READ; break; 3178 case MEM_PROT_RW: p = PROT_READ|PROT_WRITE; break; 3179 case MEM_PROT_RWX: p = PROT_READ|PROT_WRITE|PROT_EXEC; break; 3180 default: 3181 ShouldNotReachHere(); 3182 } 3183 // is_committed is unused. 3184 return solaris_mprotect(addr, bytes, p); 3185 } 3186 3187 // guard_memory and unguard_memory only happens within stack guard pages. 3188 // Since ISM pertains only to the heap, guard and unguard memory should not 3189 /// happen with an ISM region. 3190 bool os::guard_memory(char* addr, size_t bytes) { 3191 return solaris_mprotect(addr, bytes, PROT_NONE); 3192 } 3193 3194 bool os::unguard_memory(char* addr, size_t bytes) { 3195 return solaris_mprotect(addr, bytes, PROT_READ|PROT_WRITE); 3196 } 3197 3198 // Large page support 3199 static size_t _large_page_size = 0; 3200 3201 // Insertion sort for small arrays (descending order). 3202 static void insertion_sort_descending(size_t* array, int len) { 3203 for (int i = 0; i < len; i++) { 3204 size_t val = array[i]; 3205 for (size_t key = i; key > 0 && array[key - 1] < val; --key) { 3206 size_t tmp = array[key]; 3207 array[key] = array[key - 1]; 3208 array[key - 1] = tmp; 3209 } 3210 } 3211 } 3212 3213 bool os::Solaris::mpss_sanity_check(bool warn, size_t* page_size) { 3214 const unsigned int usable_count = VM_Version::page_size_count(); 3215 if (usable_count == 1) { 3216 return false; 3217 } 3218 3219 // Find the right getpagesizes interface. When solaris 11 is the minimum 3220 // build platform, getpagesizes() (without the '2') can be called directly. 3221 typedef int (*gps_t)(size_t[], int); 3222 gps_t gps_func = CAST_TO_FN_PTR(gps_t, dlsym(RTLD_DEFAULT, "getpagesizes2")); 3223 if (gps_func == NULL) { 3224 gps_func = CAST_TO_FN_PTR(gps_t, dlsym(RTLD_DEFAULT, "getpagesizes")); 3225 if (gps_func == NULL) { 3226 if (warn) { 3227 warning("MPSS is not supported by the operating system."); 3228 } 3229 return false; 3230 } 3231 } 3232 3233 // Fill the array of page sizes. 3234 int n = (*gps_func)(_page_sizes, page_sizes_max); 3235 assert(n > 0, "Solaris bug?"); 3236 3237 if (n == page_sizes_max) { 3238 // Add a sentinel value (necessary only if the array was completely filled 3239 // since it is static (zeroed at initialization)). 3240 _page_sizes[--n] = 0; 3241 DEBUG_ONLY(warning("increase the size of the os::_page_sizes array.");) 3242 } 3243 assert(_page_sizes[n] == 0, "missing sentinel"); 3244 trace_page_sizes("available page sizes", _page_sizes, n); 3245 3246 if (n == 1) return false; // Only one page size available. 3247 3248 // Skip sizes larger than 4M (or LargePageSizeInBytes if it was set) and 3249 // select up to usable_count elements. First sort the array, find the first 3250 // acceptable value, then copy the usable sizes to the top of the array and 3251 // trim the rest. Make sure to include the default page size :-). 3252 // 3253 // A better policy could get rid of the 4M limit by taking the sizes of the 3254 // important VM memory regions (java heap and possibly the code cache) into 3255 // account. 3256 insertion_sort_descending(_page_sizes, n); 3257 const size_t size_limit = 3258 FLAG_IS_DEFAULT(LargePageSizeInBytes) ? 4 * M : LargePageSizeInBytes; 3259 int beg; 3260 for (beg = 0; beg < n && _page_sizes[beg] > size_limit; ++beg) /* empty */ ; 3261 const int end = MIN2((int)usable_count, n) - 1; 3262 for (int cur = 0; cur < end; ++cur, ++beg) { 3263 _page_sizes[cur] = _page_sizes[beg]; 3264 } 3265 _page_sizes[end] = vm_page_size(); 3266 _page_sizes[end + 1] = 0; 3267 3268 if (_page_sizes[end] > _page_sizes[end - 1]) { 3269 // Default page size is not the smallest; sort again. 3270 insertion_sort_descending(_page_sizes, end + 1); 3271 } 3272 *page_size = _page_sizes[0]; 3273 3274 trace_page_sizes("usable page sizes", _page_sizes, end + 1); 3275 return true; 3276 } 3277 3278 void os::large_page_init() { 3279 if (UseLargePages) { 3280 // print a warning if any large page related flag is specified on command line 3281 bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) || 3282 !FLAG_IS_DEFAULT(LargePageSizeInBytes); 3283 3284 UseLargePages = Solaris::mpss_sanity_check(warn_on_failure, &_large_page_size); 3285 } 3286 } 3287 3288 bool os::Solaris::setup_large_pages(caddr_t start, size_t bytes, size_t align) { 3289 // Signal to OS that we want large pages for addresses 3290 // from addr, addr + bytes 3291 struct memcntl_mha mpss_struct; 3292 mpss_struct.mha_cmd = MHA_MAPSIZE_VA; 3293 mpss_struct.mha_pagesize = align; 3294 mpss_struct.mha_flags = 0; 3295 // Upon successful completion, memcntl() returns 0 3296 if (memcntl(start, bytes, MC_HAT_ADVISE, (caddr_t) &mpss_struct, 0, 0)) { 3297 debug_only(warning("Attempt to use MPSS failed.")); 3298 return false; 3299 } 3300 return true; 3301 } 3302 3303 char* os::reserve_memory_special(size_t size, size_t alignment, char* addr, bool exec) { 3304 fatal("os::reserve_memory_special should not be called on Solaris."); 3305 return NULL; 3306 } 3307 3308 bool os::release_memory_special(char* base, size_t bytes) { 3309 fatal("os::release_memory_special should not be called on Solaris."); 3310 return false; 3311 } 3312 3313 size_t os::large_page_size() { 3314 return _large_page_size; 3315 } 3316 3317 // MPSS allows application to commit large page memory on demand; with ISM 3318 // the entire memory region must be allocated as shared memory. 3319 bool os::can_commit_large_page_memory() { 3320 return true; 3321 } 3322 3323 bool os::can_execute_large_page_memory() { 3324 return true; 3325 } 3326 3327 // Read calls from inside the vm need to perform state transitions 3328 size_t os::read(int fd, void *buf, unsigned int nBytes) { 3329 INTERRUPTIBLE_RETURN_INT_VM(::read(fd, buf, nBytes), os::Solaris::clear_interrupted); 3330 } 3331 3332 size_t os::restartable_read(int fd, void *buf, unsigned int nBytes) { 3333 INTERRUPTIBLE_RETURN_INT(::read(fd, buf, nBytes), os::Solaris::clear_interrupted); 3334 } 3335 3336 void os::naked_short_sleep(jlong ms) { 3337 assert(ms < 1000, "Un-interruptable sleep, short time use only"); 3338 3339 // usleep is deprecated and removed from POSIX, in favour of nanosleep, but 3340 // Solaris requires -lrt for this. 3341 usleep((ms * 1000)); 3342 3343 return; 3344 } 3345 3346 // Sleep forever; naked call to OS-specific sleep; use with CAUTION 3347 void os::infinite_sleep() { 3348 while (true) { // sleep forever ... 3349 ::sleep(100); // ... 100 seconds at a time 3350 } 3351 } 3352 3353 // Used to convert frequent JVM_Yield() to nops 3354 bool os::dont_yield() { 3355 if (DontYieldALot) { 3356 static hrtime_t last_time = 0; 3357 hrtime_t diff = getTimeNanos() - last_time; 3358 3359 if (diff < DontYieldALotInterval * 1000000) 3360 return true; 3361 3362 last_time += diff; 3363 3364 return false; 3365 } 3366 else { 3367 return false; 3368 } 3369 } 3370 3371 // Caveat: Solaris os::yield() causes a thread-state transition whereas 3372 // the linux and win32 implementations do not. This should be checked. 3373 3374 void os::yield() { 3375 // Yields to all threads with same or greater priority 3376 os::sleep(Thread::current(), 0, false); 3377 } 3378 3379 // Note that yield semantics are defined by the scheduling class to which 3380 // the thread currently belongs. Typically, yield will _not yield to 3381 // other equal or higher priority threads that reside on the dispatch queues 3382 // of other CPUs. 3383 3384 os::YieldResult os::NakedYield() { thr_yield(); return os::YIELD_UNKNOWN; } 3385 3386 3387 // On Solaris we found that yield_all doesn't always yield to all other threads. 3388 // There have been cases where there is a thread ready to execute but it doesn't 3389 // get an lwp as the VM thread continues to spin with sleeps of 1 millisecond. 3390 // The 1 millisecond wait doesn't seem long enough for the kernel to issue a 3391 // SIGWAITING signal which will cause a new lwp to be created. So we count the 3392 // number of times yield_all is called in the one loop and increase the sleep 3393 // time after 8 attempts. If this fails too we increase the concurrency level 3394 // so that the starving thread would get an lwp 3395 3396 void os::yield_all(int attempts) { 3397 // Yields to all threads, including threads with lower priorities 3398 if (attempts == 0) { 3399 os::sleep(Thread::current(), 1, false); 3400 } else { 3401 int iterations = attempts % 30; 3402 if (iterations == 0 && !os::Solaris::T2_libthread()) { 3403 // thr_setconcurrency and _getconcurrency make sense only under T1. 3404 int noofLWPS = thr_getconcurrency(); 3405 if (noofLWPS < (Threads::number_of_threads() + 2)) { 3406 thr_setconcurrency(thr_getconcurrency() + 1); 3407 } 3408 } else if (iterations < 25) { 3409 os::sleep(Thread::current(), 1, false); 3410 } else { 3411 os::sleep(Thread::current(), 10, false); 3412 } 3413 } 3414 } 3415 3416 // Called from the tight loops to possibly influence time-sharing heuristics 3417 void os::loop_breaker(int attempts) { 3418 os::yield_all(attempts); 3419 } 3420 3421 3422 // Interface for setting lwp priorities. If we are using T2 libthread, 3423 // which forces the use of BoundThreads or we manually set UseBoundThreads, 3424 // all of our threads will be assigned to real lwp's. Using the thr_setprio 3425 // function is meaningless in this mode so we must adjust the real lwp's priority 3426 // The routines below implement the getting and setting of lwp priorities. 3427 // 3428 // Note: There are three priority scales used on Solaris. Java priotities 3429 // which range from 1 to 10, libthread "thr_setprio" scale which range 3430 // from 0 to 127, and the current scheduling class of the process we 3431 // are running in. This is typically from -60 to +60. 3432 // The setting of the lwp priorities in done after a call to thr_setprio 3433 // so Java priorities are mapped to libthread priorities and we map from 3434 // the latter to lwp priorities. We don't keep priorities stored in 3435 // Java priorities since some of our worker threads want to set priorities 3436 // higher than all Java threads. 3437 // 3438 // For related information: 3439 // (1) man -s 2 priocntl 3440 // (2) man -s 4 priocntl 3441 // (3) man dispadmin 3442 // = librt.so 3443 // = libthread/common/rtsched.c - thrp_setlwpprio(). 3444 // = ps -cL <pid> ... to validate priority. 3445 // = sched_get_priority_min and _max 3446 // pthread_create 3447 // sched_setparam 3448 // pthread_setschedparam 3449 // 3450 // Assumptions: 3451 // + We assume that all threads in the process belong to the same 3452 // scheduling class. IE. an homogenous process. 3453 // + Must be root or in IA group to change change "interactive" attribute. 3454 // Priocntl() will fail silently. The only indication of failure is when 3455 // we read-back the value and notice that it hasn't changed. 3456 // + Interactive threads enter the runq at the head, non-interactive at the tail. 3457 // + For RT, change timeslice as well. Invariant: 3458 // constant "priority integral" 3459 // Konst == TimeSlice * (60-Priority) 3460 // Given a priority, compute appropriate timeslice. 3461 // + Higher numerical values have higher priority. 3462 3463 // sched class attributes 3464 typedef struct { 3465 int schedPolicy; // classID 3466 int maxPrio; 3467 int minPrio; 3468 } SchedInfo; 3469 3470 3471 static SchedInfo tsLimits, iaLimits, rtLimits, fxLimits; 3472 3473 #ifdef ASSERT 3474 static int ReadBackValidate = 1; 3475 #endif 3476 static int myClass = 0; 3477 static int myMin = 0; 3478 static int myMax = 0; 3479 static int myCur = 0; 3480 static bool priocntl_enable = false; 3481 3482 static const int criticalPrio = 60; // FX/60 is critical thread class/priority on T4 3483 static int java_MaxPriority_to_os_priority = 0; // Saved mapping 3484 3485 3486 // lwp_priocntl_init 3487 // 3488 // Try to determine the priority scale for our process. 3489 // 3490 // Return errno or 0 if OK. 3491 // 3492 static int lwp_priocntl_init () { 3493 int rslt; 3494 pcinfo_t ClassInfo; 3495 pcparms_t ParmInfo; 3496 int i; 3497 3498 if (!UseThreadPriorities) return 0; 3499 3500 // We are using Bound threads, we need to determine our priority ranges 3501 if (os::Solaris::T2_libthread() || UseBoundThreads) { 3502 // If ThreadPriorityPolicy is 1, switch tables 3503 if (ThreadPriorityPolicy == 1) { 3504 for (i = 0 ; i < CriticalPriority+1; i++) 3505 os::java_to_os_priority[i] = prio_policy1[i]; 3506 } 3507 if (UseCriticalJavaThreadPriority) { 3508 // MaxPriority always maps to the FX scheduling class and criticalPrio. 3509 // See set_native_priority() and set_lwp_class_and_priority(). 3510 // Save original MaxPriority mapping in case attempt to 3511 // use critical priority fails. 3512 java_MaxPriority_to_os_priority = os::java_to_os_priority[MaxPriority]; 3513 // Set negative to distinguish from other priorities 3514 os::java_to_os_priority[MaxPriority] = -criticalPrio; 3515 } 3516 } 3517 // Not using Bound Threads, set to ThreadPolicy 1 3518 else { 3519 for ( i = 0 ; i < CriticalPriority+1; i++ ) { 3520 os::java_to_os_priority[i] = prio_policy1[i]; 3521 } 3522 return 0; 3523 } 3524 3525 // Get IDs for a set of well-known scheduling classes. 3526 // TODO-FIXME: GETCLINFO returns the current # of classes in the 3527 // the system. We should have a loop that iterates over the 3528 // classID values, which are known to be "small" integers. 3529 3530 strcpy(ClassInfo.pc_clname, "TS"); 3531 ClassInfo.pc_cid = -1; 3532 rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo); 3533 if (rslt < 0) return errno; 3534 assert(ClassInfo.pc_cid != -1, "cid for TS class is -1"); 3535 tsLimits.schedPolicy = ClassInfo.pc_cid; 3536 tsLimits.maxPrio = ((tsinfo_t*)ClassInfo.pc_clinfo)->ts_maxupri; 3537 tsLimits.minPrio = -tsLimits.maxPrio; 3538 3539 strcpy(ClassInfo.pc_clname, "IA"); 3540 ClassInfo.pc_cid = -1; 3541 rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo); 3542 if (rslt < 0) return errno; 3543 assert(ClassInfo.pc_cid != -1, "cid for IA class is -1"); 3544 iaLimits.schedPolicy = ClassInfo.pc_cid; 3545 iaLimits.maxPrio = ((iainfo_t*)ClassInfo.pc_clinfo)->ia_maxupri; 3546 iaLimits.minPrio = -iaLimits.maxPrio; 3547 3548 strcpy(ClassInfo.pc_clname, "RT"); 3549 ClassInfo.pc_cid = -1; 3550 rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo); 3551 if (rslt < 0) return errno; 3552 assert(ClassInfo.pc_cid != -1, "cid for RT class is -1"); 3553 rtLimits.schedPolicy = ClassInfo.pc_cid; 3554 rtLimits.maxPrio = ((rtinfo_t*)ClassInfo.pc_clinfo)->rt_maxpri; 3555 rtLimits.minPrio = 0; 3556 3557 strcpy(ClassInfo.pc_clname, "FX"); 3558 ClassInfo.pc_cid = -1; 3559 rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo); 3560 if (rslt < 0) return errno; 3561 assert(ClassInfo.pc_cid != -1, "cid for FX class is -1"); 3562 fxLimits.schedPolicy = ClassInfo.pc_cid; 3563 fxLimits.maxPrio = ((fxinfo_t*)ClassInfo.pc_clinfo)->fx_maxupri; 3564 fxLimits.minPrio = 0; 3565 3566 // Query our "current" scheduling class. 3567 // This will normally be IA, TS or, rarely, FX or RT. 3568 memset(&ParmInfo, 0, sizeof(ParmInfo)); 3569 ParmInfo.pc_cid = PC_CLNULL; 3570 rslt = priocntl(P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo); 3571 if (rslt < 0) return errno; 3572 myClass = ParmInfo.pc_cid; 3573 3574 // We now know our scheduling classId, get specific information 3575 // about the class. 3576 ClassInfo.pc_cid = myClass; 3577 ClassInfo.pc_clname[0] = 0; 3578 rslt = priocntl((idtype)0, 0, PC_GETCLINFO, (caddr_t)&ClassInfo); 3579 if (rslt < 0) return errno; 3580 3581 if (ThreadPriorityVerbose) { 3582 tty->print_cr("lwp_priocntl_init: Class=%d(%s)...", myClass, ClassInfo.pc_clname); 3583 } 3584 3585 memset(&ParmInfo, 0, sizeof(pcparms_t)); 3586 ParmInfo.pc_cid = PC_CLNULL; 3587 rslt = priocntl(P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo); 3588 if (rslt < 0) return errno; 3589 3590 if (ParmInfo.pc_cid == rtLimits.schedPolicy) { 3591 myMin = rtLimits.minPrio; 3592 myMax = rtLimits.maxPrio; 3593 } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) { 3594 iaparms_t *iaInfo = (iaparms_t*)ParmInfo.pc_clparms; 3595 myMin = iaLimits.minPrio; 3596 myMax = iaLimits.maxPrio; 3597 myMax = MIN2(myMax, (int)iaInfo->ia_uprilim); // clamp - restrict 3598 } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) { 3599 tsparms_t *tsInfo = (tsparms_t*)ParmInfo.pc_clparms; 3600 myMin = tsLimits.minPrio; 3601 myMax = tsLimits.maxPrio; 3602 myMax = MIN2(myMax, (int)tsInfo->ts_uprilim); // clamp - restrict 3603 } else if (ParmInfo.pc_cid == fxLimits.schedPolicy) { 3604 fxparms_t *fxInfo = (fxparms_t*)ParmInfo.pc_clparms; 3605 myMin = fxLimits.minPrio; 3606 myMax = fxLimits.maxPrio; 3607 myMax = MIN2(myMax, (int)fxInfo->fx_uprilim); // clamp - restrict 3608 } else { 3609 // No clue - punt 3610 if (ThreadPriorityVerbose) 3611 tty->print_cr ("Unknown scheduling class: %s ... \n", ClassInfo.pc_clname); 3612 return EINVAL; // no clue, punt 3613 } 3614 3615 if (ThreadPriorityVerbose) { 3616 tty->print_cr ("Thread priority Range: [%d..%d]\n", myMin, myMax); 3617 } 3618 3619 priocntl_enable = true; // Enable changing priorities 3620 return 0; 3621 } 3622 3623 #define IAPRI(x) ((iaparms_t *)((x).pc_clparms)) 3624 #define RTPRI(x) ((rtparms_t *)((x).pc_clparms)) 3625 #define TSPRI(x) ((tsparms_t *)((x).pc_clparms)) 3626 #define FXPRI(x) ((fxparms_t *)((x).pc_clparms)) 3627 3628 3629 // scale_to_lwp_priority 3630 // 3631 // Convert from the libthread "thr_setprio" scale to our current 3632 // lwp scheduling class scale. 3633 // 3634 static 3635 int scale_to_lwp_priority (int rMin, int rMax, int x) 3636 { 3637 int v; 3638 3639 if (x == 127) return rMax; // avoid round-down 3640 v = (((x*(rMax-rMin)))/128)+rMin; 3641 return v; 3642 } 3643 3644 3645 // set_lwp_class_and_priority 3646 // 3647 // Set the class and priority of the lwp. This call should only 3648 // be made when using bound threads (T2 threads are bound by default). 3649 // 3650 int set_lwp_class_and_priority(int ThreadID, int lwpid, 3651 int newPrio, int new_class, bool scale) { 3652 int rslt; 3653 int Actual, Expected, prv; 3654 pcparms_t ParmInfo; // for GET-SET 3655 #ifdef ASSERT 3656 pcparms_t ReadBack; // for readback 3657 #endif 3658 3659 // Set priority via PC_GETPARMS, update, PC_SETPARMS 3660 // Query current values. 3661 // TODO: accelerate this by eliminating the PC_GETPARMS call. 3662 // Cache "pcparms_t" in global ParmCache. 3663 // TODO: elide set-to-same-value 3664 3665 // If something went wrong on init, don't change priorities. 3666 if ( !priocntl_enable ) { 3667 if (ThreadPriorityVerbose) 3668 tty->print_cr("Trying to set priority but init failed, ignoring"); 3669 return EINVAL; 3670 } 3671 3672 // If lwp hasn't started yet, just return 3673 // the _start routine will call us again. 3674 if ( lwpid <= 0 ) { 3675 if (ThreadPriorityVerbose) { 3676 tty->print_cr ("deferring the set_lwp_class_and_priority of thread " 3677 INTPTR_FORMAT " to %d, lwpid not set", 3678 ThreadID, newPrio); 3679 } 3680 return 0; 3681 } 3682 3683 if (ThreadPriorityVerbose) { 3684 tty->print_cr ("set_lwp_class_and_priority(" 3685 INTPTR_FORMAT "@" INTPTR_FORMAT " %d) ", 3686 ThreadID, lwpid, newPrio); 3687 } 3688 3689 memset(&ParmInfo, 0, sizeof(pcparms_t)); 3690 ParmInfo.pc_cid = PC_CLNULL; 3691 rslt = priocntl(P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ParmInfo); 3692 if (rslt < 0) return errno; 3693 3694 int cur_class = ParmInfo.pc_cid; 3695 ParmInfo.pc_cid = (id_t)new_class; 3696 3697 if (new_class == rtLimits.schedPolicy) { 3698 rtparms_t *rtInfo = (rtparms_t*)ParmInfo.pc_clparms; 3699 rtInfo->rt_pri = scale ? scale_to_lwp_priority(rtLimits.minPrio, 3700 rtLimits.maxPrio, newPrio) 3701 : newPrio; 3702 rtInfo->rt_tqsecs = RT_NOCHANGE; 3703 rtInfo->rt_tqnsecs = RT_NOCHANGE; 3704 if (ThreadPriorityVerbose) { 3705 tty->print_cr("RT: %d->%d\n", newPrio, rtInfo->rt_pri); 3706 } 3707 } else if (new_class == iaLimits.schedPolicy) { 3708 iaparms_t* iaInfo = (iaparms_t*)ParmInfo.pc_clparms; 3709 int maxClamped = MIN2(iaLimits.maxPrio, 3710 cur_class == new_class 3711 ? (int)iaInfo->ia_uprilim : iaLimits.maxPrio); 3712 iaInfo->ia_upri = scale ? scale_to_lwp_priority(iaLimits.minPrio, 3713 maxClamped, newPrio) 3714 : newPrio; 3715 iaInfo->ia_uprilim = cur_class == new_class 3716 ? IA_NOCHANGE : (pri_t)iaLimits.maxPrio; 3717 iaInfo->ia_mode = IA_NOCHANGE; 3718 if (ThreadPriorityVerbose) { 3719 tty->print_cr("IA: [%d...%d] %d->%d\n", 3720 iaLimits.minPrio, maxClamped, newPrio, iaInfo->ia_upri); 3721 } 3722 } else if (new_class == tsLimits.schedPolicy) { 3723 tsparms_t* tsInfo = (tsparms_t*)ParmInfo.pc_clparms; 3724 int maxClamped = MIN2(tsLimits.maxPrio, 3725 cur_class == new_class 3726 ? (int)tsInfo->ts_uprilim : tsLimits.maxPrio); 3727 tsInfo->ts_upri = scale ? scale_to_lwp_priority(tsLimits.minPrio, 3728 maxClamped, newPrio) 3729 : newPrio; 3730 tsInfo->ts_uprilim = cur_class == new_class 3731 ? TS_NOCHANGE : (pri_t)tsLimits.maxPrio; 3732 if (ThreadPriorityVerbose) { 3733 tty->print_cr("TS: [%d...%d] %d->%d\n", 3734 tsLimits.minPrio, maxClamped, newPrio, tsInfo->ts_upri); 3735 } 3736 } else if (new_class == fxLimits.schedPolicy) { 3737 fxparms_t* fxInfo = (fxparms_t*)ParmInfo.pc_clparms; 3738 int maxClamped = MIN2(fxLimits.maxPrio, 3739 cur_class == new_class 3740 ? (int)fxInfo->fx_uprilim : fxLimits.maxPrio); 3741 fxInfo->fx_upri = scale ? scale_to_lwp_priority(fxLimits.minPrio, 3742 maxClamped, newPrio) 3743 : newPrio; 3744 fxInfo->fx_uprilim = cur_class == new_class 3745 ? FX_NOCHANGE : (pri_t)fxLimits.maxPrio; 3746 fxInfo->fx_tqsecs = FX_NOCHANGE; 3747 fxInfo->fx_tqnsecs = FX_NOCHANGE; 3748 if (ThreadPriorityVerbose) { 3749 tty->print_cr("FX: [%d...%d] %d->%d\n", 3750 fxLimits.minPrio, maxClamped, newPrio, fxInfo->fx_upri); 3751 } 3752 } else { 3753 if (ThreadPriorityVerbose) { 3754 tty->print_cr("Unknown new scheduling class %d\n", new_class); 3755 } 3756 return EINVAL; // no clue, punt 3757 } 3758 3759 rslt = priocntl(P_LWPID, lwpid, PC_SETPARMS, (caddr_t)&ParmInfo); 3760 if (ThreadPriorityVerbose && rslt) { 3761 tty->print_cr ("PC_SETPARMS ->%d %d\n", rslt, errno); 3762 } 3763 if (rslt < 0) return errno; 3764 3765 #ifdef ASSERT 3766 // Sanity check: read back what we just attempted to set. 3767 // In theory it could have changed in the interim ... 3768 // 3769 // The priocntl system call is tricky. 3770 // Sometimes it'll validate the priority value argument and 3771 // return EINVAL if unhappy. At other times it fails silently. 3772 // Readbacks are prudent. 3773 3774 if (!ReadBackValidate) return 0; 3775 3776 memset(&ReadBack, 0, sizeof(pcparms_t)); 3777 ReadBack.pc_cid = PC_CLNULL; 3778 rslt = priocntl(P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ReadBack); 3779 assert(rslt >= 0, "priocntl failed"); 3780 Actual = Expected = 0xBAD; 3781 assert(ParmInfo.pc_cid == ReadBack.pc_cid, "cid's don't match"); 3782 if (ParmInfo.pc_cid == rtLimits.schedPolicy) { 3783 Actual = RTPRI(ReadBack)->rt_pri; 3784 Expected = RTPRI(ParmInfo)->rt_pri; 3785 } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) { 3786 Actual = IAPRI(ReadBack)->ia_upri; 3787 Expected = IAPRI(ParmInfo)->ia_upri; 3788 } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) { 3789 Actual = TSPRI(ReadBack)->ts_upri; 3790 Expected = TSPRI(ParmInfo)->ts_upri; 3791 } else if (ParmInfo.pc_cid == fxLimits.schedPolicy) { 3792 Actual = FXPRI(ReadBack)->fx_upri; 3793 Expected = FXPRI(ParmInfo)->fx_upri; 3794 } else { 3795 if (ThreadPriorityVerbose) { 3796 tty->print_cr("set_lwp_class_and_priority: unexpected class in readback: %d\n", 3797 ParmInfo.pc_cid); 3798 } 3799 } 3800 3801 if (Actual != Expected) { 3802 if (ThreadPriorityVerbose) { 3803 tty->print_cr ("set_lwp_class_and_priority(%d %d) Class=%d: actual=%d vs expected=%d\n", 3804 lwpid, newPrio, ReadBack.pc_cid, Actual, Expected); 3805 } 3806 } 3807 #endif 3808 3809 return 0; 3810 } 3811 3812 // Solaris only gives access to 128 real priorities at a time, 3813 // so we expand Java's ten to fill this range. This would be better 3814 // if we dynamically adjusted relative priorities. 3815 // 3816 // The ThreadPriorityPolicy option allows us to select 2 different 3817 // priority scales. 3818 // 3819 // ThreadPriorityPolicy=0 3820 // Since the Solaris' default priority is MaximumPriority, we do not 3821 // set a priority lower than Max unless a priority lower than 3822 // NormPriority is requested. 3823 // 3824 // ThreadPriorityPolicy=1 3825 // This mode causes the priority table to get filled with 3826 // linear values. NormPriority get's mapped to 50% of the 3827 // Maximum priority an so on. This will cause VM threads 3828 // to get unfair treatment against other Solaris processes 3829 // which do not explicitly alter their thread priorities. 3830 // 3831 3832 int os::java_to_os_priority[CriticalPriority + 1] = { 3833 -99999, // 0 Entry should never be used 3834 3835 0, // 1 MinPriority 3836 32, // 2 3837 64, // 3 3838 3839 96, // 4 3840 127, // 5 NormPriority 3841 127, // 6 3842 3843 127, // 7 3844 127, // 8 3845 127, // 9 NearMaxPriority 3846 3847 127, // 10 MaxPriority 3848 3849 -criticalPrio // 11 CriticalPriority 3850 }; 3851 3852 OSReturn os::set_native_priority(Thread* thread, int newpri) { 3853 OSThread* osthread = thread->osthread(); 3854 3855 // Save requested priority in case the thread hasn't been started 3856 osthread->set_native_priority(newpri); 3857 3858 // Check for critical priority request 3859 bool fxcritical = false; 3860 if (newpri == -criticalPrio) { 3861 fxcritical = true; 3862 newpri = criticalPrio; 3863 } 3864 3865 assert(newpri >= MinimumPriority && newpri <= MaximumPriority, "bad priority mapping"); 3866 if (!UseThreadPriorities) return OS_OK; 3867 3868 int status = 0; 3869 3870 if (!fxcritical) { 3871 // Use thr_setprio only if we have a priority that thr_setprio understands 3872 status = thr_setprio(thread->osthread()->thread_id(), newpri); 3873 } 3874 3875 if (os::Solaris::T2_libthread() || 3876 (UseBoundThreads && osthread->is_vm_created())) { 3877 int lwp_status = 3878 set_lwp_class_and_priority(osthread->thread_id(), 3879 osthread->lwp_id(), 3880 newpri, 3881 fxcritical ? fxLimits.schedPolicy : myClass, 3882 !fxcritical); 3883 if (lwp_status != 0 && fxcritical) { 3884 // Try again, this time without changing the scheduling class 3885 newpri = java_MaxPriority_to_os_priority; 3886 lwp_status = set_lwp_class_and_priority(osthread->thread_id(), 3887 osthread->lwp_id(), 3888 newpri, myClass, false); 3889 } 3890 status |= lwp_status; 3891 } 3892 return (status == 0) ? OS_OK : OS_ERR; 3893 } 3894 3895 3896 OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) { 3897 int p; 3898 if ( !UseThreadPriorities ) { 3899 *priority_ptr = NormalPriority; 3900 return OS_OK; 3901 } 3902 int status = thr_getprio(thread->osthread()->thread_id(), &p); 3903 if (status != 0) { 3904 return OS_ERR; 3905 } 3906 *priority_ptr = p; 3907 return OS_OK; 3908 } 3909 3910 3911 // Hint to the underlying OS that a task switch would not be good. 3912 // Void return because it's a hint and can fail. 3913 void os::hint_no_preempt() { 3914 schedctl_start(schedctl_init()); 3915 } 3916 3917 static void resume_clear_context(OSThread *osthread) { 3918 osthread->set_ucontext(NULL); 3919 } 3920 3921 static void suspend_save_context(OSThread *osthread, ucontext_t* context) { 3922 osthread->set_ucontext(context); 3923 } 3924 3925 static Semaphore sr_semaphore; 3926 3927 void os::Solaris::SR_handler(Thread* thread, ucontext_t* uc) { 3928 // Save and restore errno to avoid confusing native code with EINTR 3929 // after sigsuspend. 3930 int old_errno = errno; 3931 3932 OSThread* osthread = thread->osthread(); 3933 assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread"); 3934 3935 os::SuspendResume::State current = osthread->sr.state(); 3936 if (current == os::SuspendResume::SR_SUSPEND_REQUEST) { 3937 suspend_save_context(osthread, uc); 3938 3939 // attempt to switch the state, we assume we had a SUSPEND_REQUEST 3940 os::SuspendResume::State state = osthread->sr.suspended(); 3941 if (state == os::SuspendResume::SR_SUSPENDED) { 3942 sigset_t suspend_set; // signals for sigsuspend() 3943 3944 // get current set of blocked signals and unblock resume signal 3945 thr_sigsetmask(SIG_BLOCK, NULL, &suspend_set); 3946 sigdelset(&suspend_set, os::Solaris::SIGasync()); 3947 3948 sr_semaphore.signal(); 3949 // wait here until we are resumed 3950 while (1) { 3951 sigsuspend(&suspend_set); 3952 3953 os::SuspendResume::State result = osthread->sr.running(); 3954 if (result == os::SuspendResume::SR_RUNNING) { 3955 sr_semaphore.signal(); 3956 break; 3957 } 3958 } 3959 3960 } else if (state == os::SuspendResume::SR_RUNNING) { 3961 // request was cancelled, continue 3962 } else { 3963 ShouldNotReachHere(); 3964 } 3965 3966 resume_clear_context(osthread); 3967 } else if (current == os::SuspendResume::SR_RUNNING) { 3968 // request was cancelled, continue 3969 } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) { 3970 // ignore 3971 } else { 3972 // ignore 3973 } 3974 3975 errno = old_errno; 3976 } 3977 3978 void os::print_statistics() { 3979 } 3980 3981 int os::message_box(const char* title, const char* message) { 3982 int i; 3983 fdStream err(defaultStream::error_fd()); 3984 for (i = 0; i < 78; i++) err.print_raw("="); 3985 err.cr(); 3986 err.print_raw_cr(title); 3987 for (i = 0; i < 78; i++) err.print_raw("-"); 3988 err.cr(); 3989 err.print_raw_cr(message); 3990 for (i = 0; i < 78; i++) err.print_raw("="); 3991 err.cr(); 3992 3993 char buf[16]; 3994 // Prevent process from exiting upon "read error" without consuming all CPU 3995 while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); } 3996 3997 return buf[0] == 'y' || buf[0] == 'Y'; 3998 } 3999 4000 static int sr_notify(OSThread* osthread) { 4001 int status = thr_kill(osthread->thread_id(), os::Solaris::SIGasync()); 4002 assert_status(status == 0, status, "thr_kill"); 4003 return status; 4004 } 4005 4006 // "Randomly" selected value for how long we want to spin 4007 // before bailing out on suspending a thread, also how often 4008 // we send a signal to a thread we want to resume 4009 static const int RANDOMLY_LARGE_INTEGER = 1000000; 4010 static const int RANDOMLY_LARGE_INTEGER2 = 100; 4011 4012 static bool do_suspend(OSThread* osthread) { 4013 assert(osthread->sr.is_running(), "thread should be running"); 4014 assert(!sr_semaphore.trywait(), "semaphore has invalid state"); 4015 4016 // mark as suspended and send signal 4017 if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) { 4018 // failed to switch, state wasn't running? 4019 ShouldNotReachHere(); 4020 return false; 4021 } 4022 4023 if (sr_notify(osthread) != 0) { 4024 ShouldNotReachHere(); 4025 } 4026 4027 // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED 4028 while (true) { 4029 if (sr_semaphore.timedwait(0, 2000 * NANOSECS_PER_MILLISEC)) { 4030 break; 4031 } else { 4032 // timeout 4033 os::SuspendResume::State cancelled = osthread->sr.cancel_suspend(); 4034 if (cancelled == os::SuspendResume::SR_RUNNING) { 4035 return false; 4036 } else if (cancelled == os::SuspendResume::SR_SUSPENDED) { 4037 // make sure that we consume the signal on the semaphore as well 4038 sr_semaphore.wait(); 4039 break; 4040 } else { 4041 ShouldNotReachHere(); 4042 return false; 4043 } 4044 } 4045 } 4046 4047 guarantee(osthread->sr.is_suspended(), "Must be suspended"); 4048 return true; 4049 } 4050 4051 static void do_resume(OSThread* osthread) { 4052 assert(osthread->sr.is_suspended(), "thread should be suspended"); 4053 assert(!sr_semaphore.trywait(), "invalid semaphore state"); 4054 4055 if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) { 4056 // failed to switch to WAKEUP_REQUEST 4057 ShouldNotReachHere(); 4058 return; 4059 } 4060 4061 while (true) { 4062 if (sr_notify(osthread) == 0) { 4063 if (sr_semaphore.timedwait(0, 2 * NANOSECS_PER_MILLISEC)) { 4064 if (osthread->sr.is_running()) { 4065 return; 4066 } 4067 } 4068 } else { 4069 ShouldNotReachHere(); 4070 } 4071 } 4072 4073 guarantee(osthread->sr.is_running(), "Must be running!"); 4074 } 4075 4076 void os::SuspendedThreadTask::internal_do_task() { 4077 if (do_suspend(_thread->osthread())) { 4078 SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext()); 4079 do_task(context); 4080 do_resume(_thread->osthread()); 4081 } 4082 } 4083 4084 class PcFetcher : public os::SuspendedThreadTask { 4085 public: 4086 PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {} 4087 ExtendedPC result(); 4088 protected: 4089 void do_task(const os::SuspendedThreadTaskContext& context); 4090 private: 4091 ExtendedPC _epc; 4092 }; 4093 4094 ExtendedPC PcFetcher::result() { 4095 guarantee(is_done(), "task is not done yet."); 4096 return _epc; 4097 } 4098 4099 void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) { 4100 Thread* thread = context.thread(); 4101 OSThread* osthread = thread->osthread(); 4102 if (osthread->ucontext() != NULL) { 4103 _epc = os::Solaris::ucontext_get_pc((ucontext_t *) context.ucontext()); 4104 } else { 4105 // NULL context is unexpected, double-check this is the VMThread 4106 guarantee(thread->is_VM_thread(), "can only be called for VMThread"); 4107 } 4108 } 4109 4110 // A lightweight implementation that does not suspend the target thread and 4111 // thus returns only a hint. Used for profiling only! 4112 ExtendedPC os::get_thread_pc(Thread* thread) { 4113 // Make sure that it is called by the watcher and the Threads lock is owned. 4114 assert(Thread::current()->is_Watcher_thread(), "Must be watcher and own Threads_lock"); 4115 // For now, is only used to profile the VM Thread 4116 assert(thread->is_VM_thread(), "Can only be called for VMThread"); 4117 PcFetcher fetcher(thread); 4118 fetcher.run(); 4119 return fetcher.result(); 4120 } 4121 4122 4123 // This does not do anything on Solaris. This is basically a hook for being 4124 // able to use structured exception handling (thread-local exception filters) on, e.g., Win32. 4125 void os::os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread) { 4126 f(value, method, args, thread); 4127 } 4128 4129 // This routine may be used by user applications as a "hook" to catch signals. 4130 // The user-defined signal handler must pass unrecognized signals to this 4131 // routine, and if it returns true (non-zero), then the signal handler must 4132 // return immediately. If the flag "abort_if_unrecognized" is true, then this 4133 // routine will never retun false (zero), but instead will execute a VM panic 4134 // routine kill the process. 4135 // 4136 // If this routine returns false, it is OK to call it again. This allows 4137 // the user-defined signal handler to perform checks either before or after 4138 // the VM performs its own checks. Naturally, the user code would be making 4139 // a serious error if it tried to handle an exception (such as a null check 4140 // or breakpoint) that the VM was generating for its own correct operation. 4141 // 4142 // This routine may recognize any of the following kinds of signals: 4143 // SIGBUS, SIGSEGV, SIGILL, SIGFPE, BREAK_SIGNAL, SIGPIPE, SIGXFSZ, 4144 // os::Solaris::SIGasync 4145 // It should be consulted by handlers for any of those signals. 4146 // It explicitly does not recognize os::Solaris::SIGinterrupt 4147 // 4148 // The caller of this routine must pass in the three arguments supplied 4149 // to the function referred to in the "sa_sigaction" (not the "sa_handler") 4150 // field of the structure passed to sigaction(). This routine assumes that 4151 // the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART. 4152 // 4153 // Note that the VM will print warnings if it detects conflicting signal 4154 // handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers". 4155 // 4156 extern "C" JNIEXPORT int 4157 JVM_handle_solaris_signal(int signo, siginfo_t* siginfo, void* ucontext, 4158 int abort_if_unrecognized); 4159 4160 4161 void signalHandler(int sig, siginfo_t* info, void* ucVoid) { 4162 int orig_errno = errno; // Preserve errno value over signal handler. 4163 JVM_handle_solaris_signal(sig, info, ucVoid, true); 4164 errno = orig_errno; 4165 } 4166 4167 /* Do not delete - if guarantee is ever removed, a signal handler (even empty) 4168 is needed to provoke threads blocked on IO to return an EINTR 4169 Note: this explicitly does NOT call JVM_handle_solaris_signal and 4170 does NOT participate in signal chaining due to requirement for 4171 NOT setting SA_RESTART to make EINTR work. */ 4172 extern "C" void sigINTRHandler(int sig, siginfo_t* info, void* ucVoid) { 4173 if (UseSignalChaining) { 4174 struct sigaction *actp = os::Solaris::get_chained_signal_action(sig); 4175 if (actp && actp->sa_handler) { 4176 vm_exit_during_initialization("Signal chaining detected for VM interrupt signal, try -XX:+UseAltSigs"); 4177 } 4178 } 4179 } 4180 4181 // This boolean allows users to forward their own non-matching signals 4182 // to JVM_handle_solaris_signal, harmlessly. 4183 bool os::Solaris::signal_handlers_are_installed = false; 4184 4185 // For signal-chaining 4186 bool os::Solaris::libjsig_is_loaded = false; 4187 typedef struct sigaction *(*get_signal_t)(int); 4188 get_signal_t os::Solaris::get_signal_action = NULL; 4189 4190 struct sigaction* os::Solaris::get_chained_signal_action(int sig) { 4191 struct sigaction *actp = NULL; 4192 4193 if ((libjsig_is_loaded) && (sig <= Maxlibjsigsigs)) { 4194 // Retrieve the old signal handler from libjsig 4195 actp = (*get_signal_action)(sig); 4196 } 4197 if (actp == NULL) { 4198 // Retrieve the preinstalled signal handler from jvm 4199 actp = get_preinstalled_handler(sig); 4200 } 4201 4202 return actp; 4203 } 4204 4205 static bool call_chained_handler(struct sigaction *actp, int sig, 4206 siginfo_t *siginfo, void *context) { 4207 // Call the old signal handler 4208 if (actp->sa_handler == SIG_DFL) { 4209 // It's more reasonable to let jvm treat it as an unexpected exception 4210 // instead of taking the default action. 4211 return false; 4212 } else if (actp->sa_handler != SIG_IGN) { 4213 if ((actp->sa_flags & SA_NODEFER) == 0) { 4214 // automaticlly block the signal 4215 sigaddset(&(actp->sa_mask), sig); 4216 } 4217 4218 sa_handler_t hand; 4219 sa_sigaction_t sa; 4220 bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0; 4221 // retrieve the chained handler 4222 if (siginfo_flag_set) { 4223 sa = actp->sa_sigaction; 4224 } else { 4225 hand = actp->sa_handler; 4226 } 4227 4228 if ((actp->sa_flags & SA_RESETHAND) != 0) { 4229 actp->sa_handler = SIG_DFL; 4230 } 4231 4232 // try to honor the signal mask 4233 sigset_t oset; 4234 thr_sigsetmask(SIG_SETMASK, &(actp->sa_mask), &oset); 4235 4236 // call into the chained handler 4237 if (siginfo_flag_set) { 4238 (*sa)(sig, siginfo, context); 4239 } else { 4240 (*hand)(sig); 4241 } 4242 4243 // restore the signal mask 4244 thr_sigsetmask(SIG_SETMASK, &oset, 0); 4245 } 4246 // Tell jvm's signal handler the signal is taken care of. 4247 return true; 4248 } 4249 4250 bool os::Solaris::chained_handler(int sig, siginfo_t* siginfo, void* context) { 4251 bool chained = false; 4252 // signal-chaining 4253 if (UseSignalChaining) { 4254 struct sigaction *actp = get_chained_signal_action(sig); 4255 if (actp != NULL) { 4256 chained = call_chained_handler(actp, sig, siginfo, context); 4257 } 4258 } 4259 return chained; 4260 } 4261 4262 struct sigaction* os::Solaris::get_preinstalled_handler(int sig) { 4263 assert((chainedsigactions != (struct sigaction *)NULL) && (preinstalled_sigs != (int *)NULL) , "signals not yet initialized"); 4264 if (preinstalled_sigs[sig] != 0) { 4265 return &chainedsigactions[sig]; 4266 } 4267 return NULL; 4268 } 4269 4270 void os::Solaris::save_preinstalled_handler(int sig, struct sigaction& oldAct) { 4271 4272 assert(sig > 0 && sig <= Maxsignum, "vm signal out of expected range"); 4273 assert((chainedsigactions != (struct sigaction *)NULL) && (preinstalled_sigs != (int *)NULL) , "signals not yet initialized"); 4274 chainedsigactions[sig] = oldAct; 4275 preinstalled_sigs[sig] = 1; 4276 } 4277 4278 void os::Solaris::set_signal_handler(int sig, bool set_installed, bool oktochain) { 4279 // Check for overwrite. 4280 struct sigaction oldAct; 4281 sigaction(sig, (struct sigaction*)NULL, &oldAct); 4282 void* oldhand = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction) 4283 : CAST_FROM_FN_PTR(void*, oldAct.sa_handler); 4284 if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) && 4285 oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) && 4286 oldhand != CAST_FROM_FN_PTR(void*, signalHandler)) { 4287 if (AllowUserSignalHandlers || !set_installed) { 4288 // Do not overwrite; user takes responsibility to forward to us. 4289 return; 4290 } else if (UseSignalChaining) { 4291 if (oktochain) { 4292 // save the old handler in jvm 4293 save_preinstalled_handler(sig, oldAct); 4294 } else { 4295 vm_exit_during_initialization("Signal chaining not allowed for VM interrupt signal, try -XX:+UseAltSigs."); 4296 } 4297 // libjsig also interposes the sigaction() call below and saves the 4298 // old sigaction on it own. 4299 } else { 4300 fatal(err_msg("Encountered unexpected pre-existing sigaction handler " 4301 "%#lx for signal %d.", (long)oldhand, sig)); 4302 } 4303 } 4304 4305 struct sigaction sigAct; 4306 sigfillset(&(sigAct.sa_mask)); 4307 sigAct.sa_handler = SIG_DFL; 4308 4309 sigAct.sa_sigaction = signalHandler; 4310 // Handle SIGSEGV on alternate signal stack if 4311 // not using stack banging 4312 if (!UseStackBanging && sig == SIGSEGV) { 4313 sigAct.sa_flags = SA_SIGINFO | SA_RESTART | SA_ONSTACK; 4314 // Interruptible i/o requires SA_RESTART cleared so EINTR 4315 // is returned instead of restarting system calls 4316 } else if (sig == os::Solaris::SIGinterrupt()) { 4317 sigemptyset(&sigAct.sa_mask); 4318 sigAct.sa_handler = NULL; 4319 sigAct.sa_flags = SA_SIGINFO; 4320 sigAct.sa_sigaction = sigINTRHandler; 4321 } else { 4322 sigAct.sa_flags = SA_SIGINFO | SA_RESTART; 4323 } 4324 os::Solaris::set_our_sigflags(sig, sigAct.sa_flags); 4325 4326 sigaction(sig, &sigAct, &oldAct); 4327 4328 void* oldhand2 = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction) 4329 : CAST_FROM_FN_PTR(void*, oldAct.sa_handler); 4330 assert(oldhand2 == oldhand, "no concurrent signal handler installation"); 4331 } 4332 4333 4334 #define DO_SIGNAL_CHECK(sig) \ 4335 if (!sigismember(&check_signal_done, sig)) \ 4336 os::Solaris::check_signal_handler(sig) 4337 4338 // This method is a periodic task to check for misbehaving JNI applications 4339 // under CheckJNI, we can add any periodic checks here 4340 4341 void os::run_periodic_checks() { 4342 // A big source of grief is hijacking virt. addr 0x0 on Solaris, 4343 // thereby preventing a NULL checks. 4344 if(!check_addr0_done) check_addr0_done = check_addr0(tty); 4345 4346 if (check_signals == false) return; 4347 4348 // SEGV and BUS if overridden could potentially prevent 4349 // generation of hs*.log in the event of a crash, debugging 4350 // such a case can be very challenging, so we absolutely 4351 // check for the following for a good measure: 4352 DO_SIGNAL_CHECK(SIGSEGV); 4353 DO_SIGNAL_CHECK(SIGILL); 4354 DO_SIGNAL_CHECK(SIGFPE); 4355 DO_SIGNAL_CHECK(SIGBUS); 4356 DO_SIGNAL_CHECK(SIGPIPE); 4357 DO_SIGNAL_CHECK(SIGXFSZ); 4358 4359 // ReduceSignalUsage allows the user to override these handlers 4360 // see comments at the very top and jvm_solaris.h 4361 if (!ReduceSignalUsage) { 4362 DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL); 4363 DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL); 4364 DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL); 4365 DO_SIGNAL_CHECK(BREAK_SIGNAL); 4366 } 4367 4368 // See comments above for using JVM1/JVM2 and UseAltSigs 4369 DO_SIGNAL_CHECK(os::Solaris::SIGinterrupt()); 4370 DO_SIGNAL_CHECK(os::Solaris::SIGasync()); 4371 4372 } 4373 4374 typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *); 4375 4376 static os_sigaction_t os_sigaction = NULL; 4377 4378 void os::Solaris::check_signal_handler(int sig) { 4379 char buf[O_BUFLEN]; 4380 address jvmHandler = NULL; 4381 4382 struct sigaction act; 4383 if (os_sigaction == NULL) { 4384 // only trust the default sigaction, in case it has been interposed 4385 os_sigaction = (os_sigaction_t)dlsym(RTLD_DEFAULT, "sigaction"); 4386 if (os_sigaction == NULL) return; 4387 } 4388 4389 os_sigaction(sig, (struct sigaction*)NULL, &act); 4390 4391 address thisHandler = (act.sa_flags & SA_SIGINFO) 4392 ? CAST_FROM_FN_PTR(address, act.sa_sigaction) 4393 : CAST_FROM_FN_PTR(address, act.sa_handler) ; 4394 4395 4396 switch(sig) { 4397 case SIGSEGV: 4398 case SIGBUS: 4399 case SIGFPE: 4400 case SIGPIPE: 4401 case SIGXFSZ: 4402 case SIGILL: 4403 jvmHandler = CAST_FROM_FN_PTR(address, signalHandler); 4404 break; 4405 4406 case SHUTDOWN1_SIGNAL: 4407 case SHUTDOWN2_SIGNAL: 4408 case SHUTDOWN3_SIGNAL: 4409 case BREAK_SIGNAL: 4410 jvmHandler = (address)user_handler(); 4411 break; 4412 4413 default: 4414 int intrsig = os::Solaris::SIGinterrupt(); 4415 int asynsig = os::Solaris::SIGasync(); 4416 4417 if (sig == intrsig) { 4418 jvmHandler = CAST_FROM_FN_PTR(address, sigINTRHandler); 4419 } else if (sig == asynsig) { 4420 jvmHandler = CAST_FROM_FN_PTR(address, signalHandler); 4421 } else { 4422 return; 4423 } 4424 break; 4425 } 4426 4427 4428 if (thisHandler != jvmHandler) { 4429 tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN)); 4430 tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN)); 4431 tty->print_cr(" found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN)); 4432 // No need to check this sig any longer 4433 sigaddset(&check_signal_done, sig); 4434 } else if(os::Solaris::get_our_sigflags(sig) != 0 && act.sa_flags != os::Solaris::get_our_sigflags(sig)) { 4435 tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN)); 4436 tty->print("expected:" PTR32_FORMAT, os::Solaris::get_our_sigflags(sig)); 4437 tty->print_cr(" found:" PTR32_FORMAT, act.sa_flags); 4438 // No need to check this sig any longer 4439 sigaddset(&check_signal_done, sig); 4440 } 4441 4442 // Print all the signal handler state 4443 if (sigismember(&check_signal_done, sig)) { 4444 print_signal_handlers(tty, buf, O_BUFLEN); 4445 } 4446 4447 } 4448 4449 void os::Solaris::install_signal_handlers() { 4450 bool libjsigdone = false; 4451 signal_handlers_are_installed = true; 4452 4453 // signal-chaining 4454 typedef void (*signal_setting_t)(); 4455 signal_setting_t begin_signal_setting = NULL; 4456 signal_setting_t end_signal_setting = NULL; 4457 begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t, 4458 dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting")); 4459 if (begin_signal_setting != NULL) { 4460 end_signal_setting = CAST_TO_FN_PTR(signal_setting_t, 4461 dlsym(RTLD_DEFAULT, "JVM_end_signal_setting")); 4462 get_signal_action = CAST_TO_FN_PTR(get_signal_t, 4463 dlsym(RTLD_DEFAULT, "JVM_get_signal_action")); 4464 get_libjsig_version = CAST_TO_FN_PTR(version_getting_t, 4465 dlsym(RTLD_DEFAULT, "JVM_get_libjsig_version")); 4466 libjsig_is_loaded = true; 4467 if (os::Solaris::get_libjsig_version != NULL) { 4468 libjsigversion = (*os::Solaris::get_libjsig_version)(); 4469 } 4470 assert(UseSignalChaining, "should enable signal-chaining"); 4471 } 4472 if (libjsig_is_loaded) { 4473 // Tell libjsig jvm is setting signal handlers 4474 (*begin_signal_setting)(); 4475 } 4476 4477 set_signal_handler(SIGSEGV, true, true); 4478 set_signal_handler(SIGPIPE, true, true); 4479 set_signal_handler(SIGXFSZ, true, true); 4480 set_signal_handler(SIGBUS, true, true); 4481 set_signal_handler(SIGILL, true, true); 4482 set_signal_handler(SIGFPE, true, true); 4483 4484 4485 if (os::Solaris::SIGinterrupt() > OLDMAXSIGNUM || os::Solaris::SIGasync() > OLDMAXSIGNUM) { 4486 4487 // Pre-1.4.1 Libjsig limited to signal chaining signals <= 32 so 4488 // can not register overridable signals which might be > 32 4489 if (libjsig_is_loaded && libjsigversion <= JSIG_VERSION_1_4_1) { 4490 // Tell libjsig jvm has finished setting signal handlers 4491 (*end_signal_setting)(); 4492 libjsigdone = true; 4493 } 4494 } 4495 4496 // Never ok to chain our SIGinterrupt 4497 set_signal_handler(os::Solaris::SIGinterrupt(), true, false); 4498 set_signal_handler(os::Solaris::SIGasync(), true, true); 4499 4500 if (libjsig_is_loaded && !libjsigdone) { 4501 // Tell libjsig jvm finishes setting signal handlers 4502 (*end_signal_setting)(); 4503 } 4504 4505 // We don't activate signal checker if libjsig is in place, we trust ourselves 4506 // and if UserSignalHandler is installed all bets are off. 4507 // Log that signal checking is off only if -verbose:jni is specified. 4508 if (CheckJNICalls) { 4509 if (libjsig_is_loaded) { 4510 if (PrintJNIResolving) { 4511 tty->print_cr("Info: libjsig is activated, all active signal checking is disabled"); 4512 } 4513 check_signals = false; 4514 } 4515 if (AllowUserSignalHandlers) { 4516 if (PrintJNIResolving) { 4517 tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled"); 4518 } 4519 check_signals = false; 4520 } 4521 } 4522 } 4523 4524 4525 void report_error(const char* file_name, int line_no, const char* title, const char* format, ...); 4526 4527 const char * signames[] = { 4528 "SIG0", 4529 "SIGHUP", "SIGINT", "SIGQUIT", "SIGILL", "SIGTRAP", 4530 "SIGABRT", "SIGEMT", "SIGFPE", "SIGKILL", "SIGBUS", 4531 "SIGSEGV", "SIGSYS", "SIGPIPE", "SIGALRM", "SIGTERM", 4532 "SIGUSR1", "SIGUSR2", "SIGCLD", "SIGPWR", "SIGWINCH", 4533 "SIGURG", "SIGPOLL", "SIGSTOP", "SIGTSTP", "SIGCONT", 4534 "SIGTTIN", "SIGTTOU", "SIGVTALRM", "SIGPROF", "SIGXCPU", 4535 "SIGXFSZ", "SIGWAITING", "SIGLWP", "SIGFREEZE", "SIGTHAW", 4536 "SIGCANCEL", "SIGLOST" 4537 }; 4538 4539 const char* os::exception_name(int exception_code, char* buf, size_t size) { 4540 if (0 < exception_code && exception_code <= SIGRTMAX) { 4541 // signal 4542 if (exception_code < sizeof(signames)/sizeof(const char*)) { 4543 jio_snprintf(buf, size, "%s", signames[exception_code]); 4544 } else { 4545 jio_snprintf(buf, size, "SIG%d", exception_code); 4546 } 4547 return buf; 4548 } else { 4549 return NULL; 4550 } 4551 } 4552 4553 // (Static) wrappers for the new libthread API 4554 int_fnP_thread_t_iP_uP_stack_tP_gregset_t os::Solaris::_thr_getstate; 4555 int_fnP_thread_t_i_gregset_t os::Solaris::_thr_setstate; 4556 int_fnP_thread_t_i os::Solaris::_thr_setmutator; 4557 int_fnP_thread_t os::Solaris::_thr_suspend_mutator; 4558 int_fnP_thread_t os::Solaris::_thr_continue_mutator; 4559 4560 // (Static) wrapper for getisax(2) call. 4561 os::Solaris::getisax_func_t os::Solaris::_getisax = 0; 4562 4563 // (Static) wrappers for the liblgrp API 4564 os::Solaris::lgrp_home_func_t os::Solaris::_lgrp_home; 4565 os::Solaris::lgrp_init_func_t os::Solaris::_lgrp_init; 4566 os::Solaris::lgrp_fini_func_t os::Solaris::_lgrp_fini; 4567 os::Solaris::lgrp_root_func_t os::Solaris::_lgrp_root; 4568 os::Solaris::lgrp_children_func_t os::Solaris::_lgrp_children; 4569 os::Solaris::lgrp_resources_func_t os::Solaris::_lgrp_resources; 4570 os::Solaris::lgrp_nlgrps_func_t os::Solaris::_lgrp_nlgrps; 4571 os::Solaris::lgrp_cookie_stale_func_t os::Solaris::_lgrp_cookie_stale; 4572 os::Solaris::lgrp_cookie_t os::Solaris::_lgrp_cookie = 0; 4573 4574 // (Static) wrapper for meminfo() call. 4575 os::Solaris::meminfo_func_t os::Solaris::_meminfo = 0; 4576 4577 static address resolve_symbol_lazy(const char* name) { 4578 address addr = (address) dlsym(RTLD_DEFAULT, name); 4579 if(addr == NULL) { 4580 // RTLD_DEFAULT was not defined on some early versions of 2.5.1 4581 addr = (address) dlsym(RTLD_NEXT, name); 4582 } 4583 return addr; 4584 } 4585 4586 static address resolve_symbol(const char* name) { 4587 address addr = resolve_symbol_lazy(name); 4588 if(addr == NULL) { 4589 fatal(dlerror()); 4590 } 4591 return addr; 4592 } 4593 4594 4595 4596 // isT2_libthread() 4597 // 4598 // Routine to determine if we are currently using the new T2 libthread. 4599 // 4600 // We determine if we are using T2 by reading /proc/self/lstatus and 4601 // looking for a thread with the ASLWP bit set. If we find this status 4602 // bit set, we must assume that we are NOT using T2. The T2 team 4603 // has approved this algorithm. 4604 // 4605 // We need to determine if we are running with the new T2 libthread 4606 // since setting native thread priorities is handled differently 4607 // when using this library. All threads created using T2 are bound 4608 // threads. Calling thr_setprio is meaningless in this case. 4609 // 4610 bool isT2_libthread() { 4611 static prheader_t * lwpArray = NULL; 4612 static int lwpSize = 0; 4613 static int lwpFile = -1; 4614 lwpstatus_t * that; 4615 char lwpName [128]; 4616 bool isT2 = false; 4617 4618 #define ADR(x) ((uintptr_t)(x)) 4619 #define LWPINDEX(ary,ix) ((lwpstatus_t *)(((ary)->pr_entsize * (ix)) + (ADR((ary) + 1)))) 4620 4621 lwpFile = ::open("/proc/self/lstatus", O_RDONLY, 0); 4622 if (lwpFile < 0) { 4623 if (ThreadPriorityVerbose) warning ("Couldn't open /proc/self/lstatus\n"); 4624 return false; 4625 } 4626 lwpSize = 16*1024; 4627 for (;;) { 4628 ::lseek64 (lwpFile, 0, SEEK_SET); 4629 lwpArray = (prheader_t *)NEW_C_HEAP_ARRAY(char, lwpSize, mtInternal); 4630 if (::read(lwpFile, lwpArray, lwpSize) < 0) { 4631 if (ThreadPriorityVerbose) warning("Error reading /proc/self/lstatus\n"); 4632 break; 4633 } 4634 if ((lwpArray->pr_nent * lwpArray->pr_entsize) <= lwpSize) { 4635 // We got a good snapshot - now iterate over the list. 4636 int aslwpcount = 0; 4637 for (int i = 0; i < lwpArray->pr_nent; i++ ) { 4638 that = LWPINDEX(lwpArray,i); 4639 if (that->pr_flags & PR_ASLWP) { 4640 aslwpcount++; 4641 } 4642 } 4643 if (aslwpcount == 0) isT2 = true; 4644 break; 4645 } 4646 lwpSize = lwpArray->pr_nent * lwpArray->pr_entsize; 4647 FREE_C_HEAP_ARRAY(char, lwpArray, mtInternal); // retry. 4648 } 4649 4650 FREE_C_HEAP_ARRAY(char, lwpArray, mtInternal); 4651 ::close (lwpFile); 4652 if (ThreadPriorityVerbose) { 4653 if (isT2) tty->print_cr("We are running with a T2 libthread\n"); 4654 else tty->print_cr("We are not running with a T2 libthread\n"); 4655 } 4656 return isT2; 4657 } 4658 4659 4660 void os::Solaris::libthread_init() { 4661 address func = (address)dlsym(RTLD_DEFAULT, "_thr_suspend_allmutators"); 4662 4663 // Determine if we are running with the new T2 libthread 4664 os::Solaris::set_T2_libthread(isT2_libthread()); 4665 4666 lwp_priocntl_init(); 4667 4668 // RTLD_DEFAULT was not defined on some early versions of 5.5.1 4669 if(func == NULL) { 4670 func = (address) dlsym(RTLD_NEXT, "_thr_suspend_allmutators"); 4671 // Guarantee that this VM is running on an new enough OS (5.6 or 4672 // later) that it will have a new enough libthread.so. 4673 guarantee(func != NULL, "libthread.so is too old."); 4674 } 4675 4676 // Initialize the new libthread getstate API wrappers 4677 func = resolve_symbol("thr_getstate"); 4678 os::Solaris::set_thr_getstate(CAST_TO_FN_PTR(int_fnP_thread_t_iP_uP_stack_tP_gregset_t, func)); 4679 4680 func = resolve_symbol("thr_setstate"); 4681 os::Solaris::set_thr_setstate(CAST_TO_FN_PTR(int_fnP_thread_t_i_gregset_t, func)); 4682 4683 func = resolve_symbol("thr_setmutator"); 4684 os::Solaris::set_thr_setmutator(CAST_TO_FN_PTR(int_fnP_thread_t_i, func)); 4685 4686 func = resolve_symbol("thr_suspend_mutator"); 4687 os::Solaris::set_thr_suspend_mutator(CAST_TO_FN_PTR(int_fnP_thread_t, func)); 4688 4689 func = resolve_symbol("thr_continue_mutator"); 4690 os::Solaris::set_thr_continue_mutator(CAST_TO_FN_PTR(int_fnP_thread_t, func)); 4691 4692 int size; 4693 void (*handler_info_func)(address *, int *); 4694 handler_info_func = CAST_TO_FN_PTR(void (*)(address *, int *), resolve_symbol("thr_sighndlrinfo")); 4695 handler_info_func(&handler_start, &size); 4696 handler_end = handler_start + size; 4697 } 4698 4699 4700 int_fnP_mutex_tP os::Solaris::_mutex_lock; 4701 int_fnP_mutex_tP os::Solaris::_mutex_trylock; 4702 int_fnP_mutex_tP os::Solaris::_mutex_unlock; 4703 int_fnP_mutex_tP_i_vP os::Solaris::_mutex_init; 4704 int_fnP_mutex_tP os::Solaris::_mutex_destroy; 4705 int os::Solaris::_mutex_scope = USYNC_THREAD; 4706 4707 int_fnP_cond_tP_mutex_tP_timestruc_tP os::Solaris::_cond_timedwait; 4708 int_fnP_cond_tP_mutex_tP os::Solaris::_cond_wait; 4709 int_fnP_cond_tP os::Solaris::_cond_signal; 4710 int_fnP_cond_tP os::Solaris::_cond_broadcast; 4711 int_fnP_cond_tP_i_vP os::Solaris::_cond_init; 4712 int_fnP_cond_tP os::Solaris::_cond_destroy; 4713 int os::Solaris::_cond_scope = USYNC_THREAD; 4714 4715 void os::Solaris::synchronization_init() { 4716 if(UseLWPSynchronization) { 4717 os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_lock"))); 4718 os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_trylock"))); 4719 os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_unlock"))); 4720 os::Solaris::set_mutex_init(lwp_mutex_init); 4721 os::Solaris::set_mutex_destroy(lwp_mutex_destroy); 4722 os::Solaris::set_mutex_scope(USYNC_THREAD); 4723 4724 os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("_lwp_cond_timedwait"))); 4725 os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("_lwp_cond_wait"))); 4726 os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_signal"))); 4727 os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_broadcast"))); 4728 os::Solaris::set_cond_init(lwp_cond_init); 4729 os::Solaris::set_cond_destroy(lwp_cond_destroy); 4730 os::Solaris::set_cond_scope(USYNC_THREAD); 4731 } 4732 else { 4733 os::Solaris::set_mutex_scope(USYNC_THREAD); 4734 os::Solaris::set_cond_scope(USYNC_THREAD); 4735 4736 if(UsePthreads) { 4737 os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_lock"))); 4738 os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_trylock"))); 4739 os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_unlock"))); 4740 os::Solaris::set_mutex_init(pthread_mutex_default_init); 4741 os::Solaris::set_mutex_destroy(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_destroy"))); 4742 4743 os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("pthread_cond_timedwait"))); 4744 os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("pthread_cond_wait"))); 4745 os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_signal"))); 4746 os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_broadcast"))); 4747 os::Solaris::set_cond_init(pthread_cond_default_init); 4748 os::Solaris::set_cond_destroy(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_destroy"))); 4749 } 4750 else { 4751 os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_lock"))); 4752 os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_trylock"))); 4753 os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_unlock"))); 4754 os::Solaris::set_mutex_init(::mutex_init); 4755 os::Solaris::set_mutex_destroy(::mutex_destroy); 4756 4757 os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("cond_timedwait"))); 4758 os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("cond_wait"))); 4759 os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_signal"))); 4760 os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_broadcast"))); 4761 os::Solaris::set_cond_init(::cond_init); 4762 os::Solaris::set_cond_destroy(::cond_destroy); 4763 } 4764 } 4765 } 4766 4767 bool os::Solaris::liblgrp_init() { 4768 void *handle = dlopen("liblgrp.so.1", RTLD_LAZY); 4769 if (handle != NULL) { 4770 os::Solaris::set_lgrp_home(CAST_TO_FN_PTR(lgrp_home_func_t, dlsym(handle, "lgrp_home"))); 4771 os::Solaris::set_lgrp_init(CAST_TO_FN_PTR(lgrp_init_func_t, dlsym(handle, "lgrp_init"))); 4772 os::Solaris::set_lgrp_fini(CAST_TO_FN_PTR(lgrp_fini_func_t, dlsym(handle, "lgrp_fini"))); 4773 os::Solaris::set_lgrp_root(CAST_TO_FN_PTR(lgrp_root_func_t, dlsym(handle, "lgrp_root"))); 4774 os::Solaris::set_lgrp_children(CAST_TO_FN_PTR(lgrp_children_func_t, dlsym(handle, "lgrp_children"))); 4775 os::Solaris::set_lgrp_resources(CAST_TO_FN_PTR(lgrp_resources_func_t, dlsym(handle, "lgrp_resources"))); 4776 os::Solaris::set_lgrp_nlgrps(CAST_TO_FN_PTR(lgrp_nlgrps_func_t, dlsym(handle, "lgrp_nlgrps"))); 4777 os::Solaris::set_lgrp_cookie_stale(CAST_TO_FN_PTR(lgrp_cookie_stale_func_t, 4778 dlsym(handle, "lgrp_cookie_stale"))); 4779 4780 lgrp_cookie_t c = lgrp_init(LGRP_VIEW_CALLER); 4781 set_lgrp_cookie(c); 4782 return true; 4783 } 4784 return false; 4785 } 4786 4787 void os::Solaris::misc_sym_init() { 4788 address func; 4789 4790 // getisax 4791 func = resolve_symbol_lazy("getisax"); 4792 if (func != NULL) { 4793 os::Solaris::_getisax = CAST_TO_FN_PTR(getisax_func_t, func); 4794 } 4795 4796 // meminfo 4797 func = resolve_symbol_lazy("meminfo"); 4798 if (func != NULL) { 4799 os::Solaris::set_meminfo(CAST_TO_FN_PTR(meminfo_func_t, func)); 4800 } 4801 } 4802 4803 uint_t os::Solaris::getisax(uint32_t* array, uint_t n) { 4804 assert(_getisax != NULL, "_getisax not set"); 4805 return _getisax(array, n); 4806 } 4807 4808 // int pset_getloadavg(psetid_t pset, double loadavg[], int nelem); 4809 typedef long (*pset_getloadavg_type)(psetid_t pset, double loadavg[], int nelem); 4810 static pset_getloadavg_type pset_getloadavg_ptr = NULL; 4811 4812 void init_pset_getloadavg_ptr(void) { 4813 pset_getloadavg_ptr = 4814 (pset_getloadavg_type)dlsym(RTLD_DEFAULT, "pset_getloadavg"); 4815 if (PrintMiscellaneous && Verbose && pset_getloadavg_ptr == NULL) { 4816 warning("pset_getloadavg function not found"); 4817 } 4818 } 4819 4820 int os::Solaris::_dev_zero_fd = -1; 4821 4822 // this is called _before_ the global arguments have been parsed 4823 void os::init(void) { 4824 _initial_pid = getpid(); 4825 4826 max_hrtime = first_hrtime = gethrtime(); 4827 4828 init_random(1234567); 4829 4830 page_size = sysconf(_SC_PAGESIZE); 4831 if (page_size == -1) 4832 fatal(err_msg("os_solaris.cpp: os::init: sysconf failed (%s)", 4833 strerror(errno))); 4834 init_page_sizes((size_t) page_size); 4835 4836 Solaris::initialize_system_info(); 4837 4838 // Initialize misc. symbols as soon as possible, so we can use them 4839 // if we need them. 4840 Solaris::misc_sym_init(); 4841 4842 int fd = ::open("/dev/zero", O_RDWR); 4843 if (fd < 0) { 4844 fatal(err_msg("os::init: cannot open /dev/zero (%s)", strerror(errno))); 4845 } else { 4846 Solaris::set_dev_zero_fd(fd); 4847 4848 // Close on exec, child won't inherit. 4849 fcntl(fd, F_SETFD, FD_CLOEXEC); 4850 } 4851 4852 clock_tics_per_sec = CLK_TCK; 4853 4854 // check if dladdr1() exists; dladdr1 can provide more information than 4855 // dladdr for os::dll_address_to_function_name. It comes with SunOS 5.9 4856 // and is available on linker patches for 5.7 and 5.8. 4857 // libdl.so must have been loaded, this call is just an entry lookup 4858 void * hdl = dlopen("libdl.so", RTLD_NOW); 4859 if (hdl) 4860 dladdr1_func = CAST_TO_FN_PTR(dladdr1_func_type, dlsym(hdl, "dladdr1")); 4861 4862 // (Solaris only) this switches to calls that actually do locking. 4863 ThreadCritical::initialize(); 4864 4865 main_thread = thr_self(); 4866 4867 // Constant minimum stack size allowed. It must be at least 4868 // the minimum of what the OS supports (thr_min_stack()), and 4869 // enough to allow the thread to get to user bytecode execution. 4870 Solaris::min_stack_allowed = MAX2(thr_min_stack(), Solaris::min_stack_allowed); 4871 // If the pagesize of the VM is greater than 8K determine the appropriate 4872 // number of initial guard pages. The user can change this with the 4873 // command line arguments, if needed. 4874 if (vm_page_size() > 8*K) { 4875 StackYellowPages = 1; 4876 StackRedPages = 1; 4877 StackShadowPages = round_to((StackShadowPages*8*K), vm_page_size()) / vm_page_size(); 4878 } 4879 } 4880 4881 // To install functions for atexit system call 4882 extern "C" { 4883 static void perfMemory_exit_helper() { 4884 perfMemory_exit(); 4885 } 4886 } 4887 4888 // this is called _after_ the global arguments have been parsed 4889 jint os::init_2(void) { 4890 // try to enable extended file IO ASAP, see 6431278 4891 os::Solaris::try_enable_extended_io(); 4892 4893 // Allocate a single page and mark it as readable for safepoint polling. Also 4894 // use this first mmap call to check support for MAP_ALIGN. 4895 address polling_page = (address)Solaris::mmap_chunk((char*)page_size, 4896 page_size, 4897 MAP_PRIVATE | MAP_ALIGN, 4898 PROT_READ); 4899 if (polling_page == NULL) { 4900 has_map_align = false; 4901 polling_page = (address)Solaris::mmap_chunk(NULL, page_size, MAP_PRIVATE, 4902 PROT_READ); 4903 } 4904 4905 os::set_polling_page(polling_page); 4906 4907 #ifndef PRODUCT 4908 if( Verbose && PrintMiscellaneous ) 4909 tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n", (intptr_t)polling_page); 4910 #endif 4911 4912 if (!UseMembar) { 4913 address mem_serialize_page = (address)Solaris::mmap_chunk( NULL, page_size, MAP_PRIVATE, PROT_READ | PROT_WRITE ); 4914 guarantee( mem_serialize_page != NULL, "mmap Failed for memory serialize page"); 4915 os::set_memory_serialize_page( mem_serialize_page ); 4916 4917 #ifndef PRODUCT 4918 if(Verbose && PrintMiscellaneous) 4919 tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page); 4920 #endif 4921 } 4922 4923 // Check minimum allowable stack size for thread creation and to initialize 4924 // the java system classes, including StackOverflowError - depends on page 4925 // size. Add a page for compiler2 recursion in main thread. 4926 // Add in 2*BytesPerWord times page size to account for VM stack during 4927 // class initialization depending on 32 or 64 bit VM. 4928 os::Solaris::min_stack_allowed = MAX2(os::Solaris::min_stack_allowed, 4929 (size_t)(StackYellowPages+StackRedPages+StackShadowPages+ 4930 2*BytesPerWord COMPILER2_PRESENT(+1)) * page_size); 4931 4932 size_t threadStackSizeInBytes = ThreadStackSize * K; 4933 if (threadStackSizeInBytes != 0 && 4934 threadStackSizeInBytes < os::Solaris::min_stack_allowed) { 4935 tty->print_cr("\nThe stack size specified is too small, Specify at least %dk", 4936 os::Solaris::min_stack_allowed/K); 4937 return JNI_ERR; 4938 } 4939 4940 // For 64kbps there will be a 64kb page size, which makes 4941 // the usable default stack size quite a bit less. Increase the 4942 // stack for 64kb (or any > than 8kb) pages, this increases 4943 // virtual memory fragmentation (since we're not creating the 4944 // stack on a power of 2 boundary. The real fix for this 4945 // should be to fix the guard page mechanism. 4946 4947 if (vm_page_size() > 8*K) { 4948 threadStackSizeInBytes = (threadStackSizeInBytes != 0) 4949 ? threadStackSizeInBytes + 4950 ((StackYellowPages + StackRedPages) * vm_page_size()) 4951 : 0; 4952 ThreadStackSize = threadStackSizeInBytes/K; 4953 } 4954 4955 // Make the stack size a multiple of the page size so that 4956 // the yellow/red zones can be guarded. 4957 JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes, 4958 vm_page_size())); 4959 4960 Solaris::libthread_init(); 4961 4962 if (UseNUMA) { 4963 if (!Solaris::liblgrp_init()) { 4964 UseNUMA = false; 4965 } else { 4966 size_t lgrp_limit = os::numa_get_groups_num(); 4967 int *lgrp_ids = NEW_C_HEAP_ARRAY(int, lgrp_limit, mtInternal); 4968 size_t lgrp_num = os::numa_get_leaf_groups(lgrp_ids, lgrp_limit); 4969 FREE_C_HEAP_ARRAY(int, lgrp_ids, mtInternal); 4970 if (lgrp_num < 2) { 4971 // There's only one locality group, disable NUMA. 4972 UseNUMA = false; 4973 } 4974 } 4975 if (!UseNUMA && ForceNUMA) { 4976 UseNUMA = true; 4977 } 4978 } 4979 4980 Solaris::signal_sets_init(); 4981 Solaris::init_signal_mem(); 4982 Solaris::install_signal_handlers(); 4983 4984 if (libjsigversion < JSIG_VERSION_1_4_1) { 4985 Maxlibjsigsigs = OLDMAXSIGNUM; 4986 } 4987 4988 // initialize synchronization primitives to use either thread or 4989 // lwp synchronization (controlled by UseLWPSynchronization) 4990 Solaris::synchronization_init(); 4991 4992 if (MaxFDLimit) { 4993 // set the number of file descriptors to max. print out error 4994 // if getrlimit/setrlimit fails but continue regardless. 4995 struct rlimit nbr_files; 4996 int status = getrlimit(RLIMIT_NOFILE, &nbr_files); 4997 if (status != 0) { 4998 if (PrintMiscellaneous && (Verbose || WizardMode)) 4999 perror("os::init_2 getrlimit failed"); 5000 } else { 5001 nbr_files.rlim_cur = nbr_files.rlim_max; 5002 status = setrlimit(RLIMIT_NOFILE, &nbr_files); 5003 if (status != 0) { 5004 if (PrintMiscellaneous && (Verbose || WizardMode)) 5005 perror("os::init_2 setrlimit failed"); 5006 } 5007 } 5008 } 5009 5010 // Calculate theoretical max. size of Threads to guard gainst 5011 // artifical out-of-memory situations, where all available address- 5012 // space has been reserved by thread stacks. Default stack size is 1Mb. 5013 size_t pre_thread_stack_size = (JavaThread::stack_size_at_create()) ? 5014 JavaThread::stack_size_at_create() : (1*K*K); 5015 assert(pre_thread_stack_size != 0, "Must have a stack"); 5016 // Solaris has a maximum of 4Gb of user programs. Calculate the thread limit when 5017 // we should start doing Virtual Memory banging. Currently when the threads will 5018 // have used all but 200Mb of space. 5019 size_t max_address_space = ((unsigned int)4 * K * K * K) - (200 * K * K); 5020 Solaris::_os_thread_limit = max_address_space / pre_thread_stack_size; 5021 5022 // at-exit methods are called in the reverse order of their registration. 5023 // In Solaris 7 and earlier, atexit functions are called on return from 5024 // main or as a result of a call to exit(3C). There can be only 32 of 5025 // these functions registered and atexit() does not set errno. In Solaris 5026 // 8 and later, there is no limit to the number of functions registered 5027 // and atexit() sets errno. In addition, in Solaris 8 and later, atexit 5028 // functions are called upon dlclose(3DL) in addition to return from main 5029 // and exit(3C). 5030 5031 if (PerfAllowAtExitRegistration) { 5032 // only register atexit functions if PerfAllowAtExitRegistration is set. 5033 // atexit functions can be delayed until process exit time, which 5034 // can be problematic for embedded VM situations. Embedded VMs should 5035 // call DestroyJavaVM() to assure that VM resources are released. 5036 5037 // note: perfMemory_exit_helper atexit function may be removed in 5038 // the future if the appropriate cleanup code can be added to the 5039 // VM_Exit VMOperation's doit method. 5040 if (atexit(perfMemory_exit_helper) != 0) { 5041 warning("os::init2 atexit(perfMemory_exit_helper) failed"); 5042 } 5043 } 5044 5045 // Init pset_loadavg function pointer 5046 init_pset_getloadavg_ptr(); 5047 5048 return JNI_OK; 5049 } 5050 5051 void os::init_3(void) { 5052 return; 5053 } 5054 5055 // Mark the polling page as unreadable 5056 void os::make_polling_page_unreadable(void) { 5057 if( mprotect((char *)_polling_page, page_size, PROT_NONE) != 0 ) 5058 fatal("Could not disable polling page"); 5059 }; 5060 5061 // Mark the polling page as readable 5062 void os::make_polling_page_readable(void) { 5063 if( mprotect((char *)_polling_page, page_size, PROT_READ) != 0 ) 5064 fatal("Could not enable polling page"); 5065 }; 5066 5067 // OS interface. 5068 5069 bool os::check_heap(bool force) { return true; } 5070 5071 typedef int (*vsnprintf_t)(char* buf, size_t count, const char* fmt, va_list argptr); 5072 static vsnprintf_t sol_vsnprintf = NULL; 5073 5074 int local_vsnprintf(char* buf, size_t count, const char* fmt, va_list argptr) { 5075 if (!sol_vsnprintf) { 5076 //search for the named symbol in the objects that were loaded after libjvm 5077 void* where = RTLD_NEXT; 5078 if ((sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "__vsnprintf"))) == NULL) 5079 sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "vsnprintf")); 5080 if (!sol_vsnprintf){ 5081 //search for the named symbol in the objects that were loaded before libjvm 5082 where = RTLD_DEFAULT; 5083 if ((sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "__vsnprintf"))) == NULL) 5084 sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "vsnprintf")); 5085 assert(sol_vsnprintf != NULL, "vsnprintf not found"); 5086 } 5087 } 5088 return (*sol_vsnprintf)(buf, count, fmt, argptr); 5089 } 5090 5091 5092 // Is a (classpath) directory empty? 5093 bool os::dir_is_empty(const char* path) { 5094 DIR *dir = NULL; 5095 struct dirent *ptr; 5096 5097 dir = opendir(path); 5098 if (dir == NULL) return true; 5099 5100 /* Scan the directory */ 5101 bool result = true; 5102 char buf[sizeof(struct dirent) + MAX_PATH]; 5103 struct dirent *dbuf = (struct dirent *) buf; 5104 while (result && (ptr = readdir(dir, dbuf)) != NULL) { 5105 if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) { 5106 result = false; 5107 } 5108 } 5109 closedir(dir); 5110 return result; 5111 } 5112 5113 // This code originates from JDK's sysOpen and open64_w 5114 // from src/solaris/hpi/src/system_md.c 5115 5116 #ifndef O_DELETE 5117 #define O_DELETE 0x10000 5118 #endif 5119 5120 // Open a file. Unlink the file immediately after open returns 5121 // if the specified oflag has the O_DELETE flag set. 5122 // O_DELETE is used only in j2se/src/share/native/java/util/zip/ZipFile.c 5123 5124 int os::open(const char *path, int oflag, int mode) { 5125 if (strlen(path) > MAX_PATH - 1) { 5126 errno = ENAMETOOLONG; 5127 return -1; 5128 } 5129 int fd; 5130 int o_delete = (oflag & O_DELETE); 5131 oflag = oflag & ~O_DELETE; 5132 5133 fd = ::open64(path, oflag, mode); 5134 if (fd == -1) return -1; 5135 5136 //If the open succeeded, the file might still be a directory 5137 { 5138 struct stat64 buf64; 5139 int ret = ::fstat64(fd, &buf64); 5140 int st_mode = buf64.st_mode; 5141 5142 if (ret != -1) { 5143 if ((st_mode & S_IFMT) == S_IFDIR) { 5144 errno = EISDIR; 5145 ::close(fd); 5146 return -1; 5147 } 5148 } else { 5149 ::close(fd); 5150 return -1; 5151 } 5152 } 5153 /* 5154 * 32-bit Solaris systems suffer from: 5155 * 5156 * - an historical default soft limit of 256 per-process file 5157 * descriptors that is too low for many Java programs. 5158 * 5159 * - a design flaw where file descriptors created using stdio 5160 * fopen must be less than 256, _even_ when the first limit above 5161 * has been raised. This can cause calls to fopen (but not calls to 5162 * open, for example) to fail mysteriously, perhaps in 3rd party 5163 * native code (although the JDK itself uses fopen). One can hardly 5164 * criticize them for using this most standard of all functions. 5165 * 5166 * We attempt to make everything work anyways by: 5167 * 5168 * - raising the soft limit on per-process file descriptors beyond 5169 * 256 5170 * 5171 * - As of Solaris 10u4, we can request that Solaris raise the 256 5172 * stdio fopen limit by calling function enable_extended_FILE_stdio. 5173 * This is done in init_2 and recorded in enabled_extended_FILE_stdio 5174 * 5175 * - If we are stuck on an old (pre 10u4) Solaris system, we can 5176 * workaround the bug by remapping non-stdio file descriptors below 5177 * 256 to ones beyond 256, which is done below. 5178 * 5179 * See: 5180 * 1085341: 32-bit stdio routines should support file descriptors >255 5181 * 6533291: Work around 32-bit Solaris stdio limit of 256 open files 5182 * 6431278: Netbeans crash on 32 bit Solaris: need to call 5183 * enable_extended_FILE_stdio() in VM initialisation 5184 * Giri Mandalika's blog 5185 * http://technopark02.blogspot.com/2005_05_01_archive.html 5186 */ 5187 #ifndef _LP64 5188 if ((!enabled_extended_FILE_stdio) && fd < 256) { 5189 int newfd = ::fcntl(fd, F_DUPFD, 256); 5190 if (newfd != -1) { 5191 ::close(fd); 5192 fd = newfd; 5193 } 5194 } 5195 #endif // 32-bit Solaris 5196 /* 5197 * All file descriptors that are opened in the JVM and not 5198 * specifically destined for a subprocess should have the 5199 * close-on-exec flag set. If we don't set it, then careless 3rd 5200 * party native code might fork and exec without closing all 5201 * appropriate file descriptors (e.g. as we do in closeDescriptors in 5202 * UNIXProcess.c), and this in turn might: 5203 * 5204 * - cause end-of-file to fail to be detected on some file 5205 * descriptors, resulting in mysterious hangs, or 5206 * 5207 * - might cause an fopen in the subprocess to fail on a system 5208 * suffering from bug 1085341. 5209 * 5210 * (Yes, the default setting of the close-on-exec flag is a Unix 5211 * design flaw) 5212 * 5213 * See: 5214 * 1085341: 32-bit stdio routines should support file descriptors >255 5215 * 4843136: (process) pipe file descriptor from Runtime.exec not being closed 5216 * 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9 5217 */ 5218 #ifdef FD_CLOEXEC 5219 { 5220 int flags = ::fcntl(fd, F_GETFD); 5221 if (flags != -1) 5222 ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC); 5223 } 5224 #endif 5225 5226 if (o_delete != 0) { 5227 ::unlink(path); 5228 } 5229 return fd; 5230 } 5231 5232 // create binary file, rewriting existing file if required 5233 int os::create_binary_file(const char* path, bool rewrite_existing) { 5234 int oflags = O_WRONLY | O_CREAT; 5235 if (!rewrite_existing) { 5236 oflags |= O_EXCL; 5237 } 5238 return ::open64(path, oflags, S_IREAD | S_IWRITE); 5239 } 5240 5241 // return current position of file pointer 5242 jlong os::current_file_offset(int fd) { 5243 return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR); 5244 } 5245 5246 // move file pointer to the specified offset 5247 jlong os::seek_to_file_offset(int fd, jlong offset) { 5248 return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET); 5249 } 5250 5251 jlong os::lseek(int fd, jlong offset, int whence) { 5252 return (jlong) ::lseek64(fd, offset, whence); 5253 } 5254 5255 char * os::native_path(char *path) { 5256 return path; 5257 } 5258 5259 int os::ftruncate(int fd, jlong length) { 5260 return ::ftruncate64(fd, length); 5261 } 5262 5263 int os::fsync(int fd) { 5264 RESTARTABLE_RETURN_INT(::fsync(fd)); 5265 } 5266 5267 int os::available(int fd, jlong *bytes) { 5268 jlong cur, end; 5269 int mode; 5270 struct stat64 buf64; 5271 5272 if (::fstat64(fd, &buf64) >= 0) { 5273 mode = buf64.st_mode; 5274 if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) { 5275 /* 5276 * XXX: is the following call interruptible? If so, this might 5277 * need to go through the INTERRUPT_IO() wrapper as for other 5278 * blocking, interruptible calls in this file. 5279 */ 5280 int n,ioctl_return; 5281 5282 INTERRUPTIBLE(::ioctl(fd, FIONREAD, &n),ioctl_return,os::Solaris::clear_interrupted); 5283 if (ioctl_return>= 0) { 5284 *bytes = n; 5285 return 1; 5286 } 5287 } 5288 } 5289 if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) { 5290 return 0; 5291 } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) { 5292 return 0; 5293 } else if (::lseek64(fd, cur, SEEK_SET) == -1) { 5294 return 0; 5295 } 5296 *bytes = end - cur; 5297 return 1; 5298 } 5299 5300 // Map a block of memory. 5301 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset, 5302 char *addr, size_t bytes, bool read_only, 5303 bool allow_exec) { 5304 int prot; 5305 int flags; 5306 5307 if (read_only) { 5308 prot = PROT_READ; 5309 flags = MAP_SHARED; 5310 } else { 5311 prot = PROT_READ | PROT_WRITE; 5312 flags = MAP_PRIVATE; 5313 } 5314 5315 if (allow_exec) { 5316 prot |= PROT_EXEC; 5317 } 5318 5319 if (addr != NULL) { 5320 flags |= MAP_FIXED; 5321 } 5322 5323 char* mapped_address = (char*)mmap(addr, (size_t)bytes, prot, flags, 5324 fd, file_offset); 5325 if (mapped_address == MAP_FAILED) { 5326 return NULL; 5327 } 5328 return mapped_address; 5329 } 5330 5331 5332 // Remap a block of memory. 5333 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset, 5334 char *addr, size_t bytes, bool read_only, 5335 bool allow_exec) { 5336 // same as map_memory() on this OS 5337 return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only, 5338 allow_exec); 5339 } 5340 5341 5342 // Unmap a block of memory. 5343 bool os::pd_unmap_memory(char* addr, size_t bytes) { 5344 return munmap(addr, bytes) == 0; 5345 } 5346 5347 void os::pause() { 5348 char filename[MAX_PATH]; 5349 if (PauseAtStartupFile && PauseAtStartupFile[0]) { 5350 jio_snprintf(filename, MAX_PATH, PauseAtStartupFile); 5351 } else { 5352 jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id()); 5353 } 5354 5355 int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666); 5356 if (fd != -1) { 5357 struct stat buf; 5358 ::close(fd); 5359 while (::stat(filename, &buf) == 0) { 5360 (void)::poll(NULL, 0, 100); 5361 } 5362 } else { 5363 jio_fprintf(stderr, 5364 "Could not open pause file '%s', continuing immediately.\n", filename); 5365 } 5366 } 5367 5368 #ifndef PRODUCT 5369 #ifdef INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS 5370 // Turn this on if you need to trace synch operations. 5371 // Set RECORD_SYNCH_LIMIT to a large-enough value, 5372 // and call record_synch_enable and record_synch_disable 5373 // around the computation of interest. 5374 5375 void record_synch(char* name, bool returning); // defined below 5376 5377 class RecordSynch { 5378 char* _name; 5379 public: 5380 RecordSynch(char* name) :_name(name) 5381 { record_synch(_name, false); } 5382 ~RecordSynch() { record_synch(_name, true); } 5383 }; 5384 5385 #define CHECK_SYNCH_OP(ret, name, params, args, inner) \ 5386 extern "C" ret name params { \ 5387 typedef ret name##_t params; \ 5388 static name##_t* implem = NULL; \ 5389 static int callcount = 0; \ 5390 if (implem == NULL) { \ 5391 implem = (name##_t*) dlsym(RTLD_NEXT, #name); \ 5392 if (implem == NULL) fatal(dlerror()); \ 5393 } \ 5394 ++callcount; \ 5395 RecordSynch _rs(#name); \ 5396 inner; \ 5397 return implem args; \ 5398 } 5399 // in dbx, examine callcounts this way: 5400 // for n in $(eval whereis callcount | awk '{print $2}'); do print $n; done 5401 5402 #define CHECK_POINTER_OK(p) \ 5403 (!Universe::is_fully_initialized() || !Universe::is_reserved_heap((oop)(p))) 5404 #define CHECK_MU \ 5405 if (!CHECK_POINTER_OK(mu)) fatal("Mutex must be in C heap only."); 5406 #define CHECK_CV \ 5407 if (!CHECK_POINTER_OK(cv)) fatal("Condvar must be in C heap only."); 5408 #define CHECK_P(p) \ 5409 if (!CHECK_POINTER_OK(p)) fatal(false, "Pointer must be in C heap only."); 5410 5411 #define CHECK_MUTEX(mutex_op) \ 5412 CHECK_SYNCH_OP(int, mutex_op, (mutex_t *mu), (mu), CHECK_MU); 5413 5414 CHECK_MUTEX( mutex_lock) 5415 CHECK_MUTEX( _mutex_lock) 5416 CHECK_MUTEX( mutex_unlock) 5417 CHECK_MUTEX(_mutex_unlock) 5418 CHECK_MUTEX( mutex_trylock) 5419 CHECK_MUTEX(_mutex_trylock) 5420 5421 #define CHECK_COND(cond_op) \ 5422 CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu), (cv, mu), CHECK_MU;CHECK_CV); 5423 5424 CHECK_COND( cond_wait); 5425 CHECK_COND(_cond_wait); 5426 CHECK_COND(_cond_wait_cancel); 5427 5428 #define CHECK_COND2(cond_op) \ 5429 CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu, timestruc_t* ts), (cv, mu, ts), CHECK_MU;CHECK_CV); 5430 5431 CHECK_COND2( cond_timedwait); 5432 CHECK_COND2(_cond_timedwait); 5433 CHECK_COND2(_cond_timedwait_cancel); 5434 5435 // do the _lwp_* versions too 5436 #define mutex_t lwp_mutex_t 5437 #define cond_t lwp_cond_t 5438 CHECK_MUTEX( _lwp_mutex_lock) 5439 CHECK_MUTEX( _lwp_mutex_unlock) 5440 CHECK_MUTEX( _lwp_mutex_trylock) 5441 CHECK_MUTEX( __lwp_mutex_lock) 5442 CHECK_MUTEX( __lwp_mutex_unlock) 5443 CHECK_MUTEX( __lwp_mutex_trylock) 5444 CHECK_MUTEX(___lwp_mutex_lock) 5445 CHECK_MUTEX(___lwp_mutex_unlock) 5446 5447 CHECK_COND( _lwp_cond_wait); 5448 CHECK_COND( __lwp_cond_wait); 5449 CHECK_COND(___lwp_cond_wait); 5450 5451 CHECK_COND2( _lwp_cond_timedwait); 5452 CHECK_COND2( __lwp_cond_timedwait); 5453 #undef mutex_t 5454 #undef cond_t 5455 5456 CHECK_SYNCH_OP(int, _lwp_suspend2, (int lwp, int *n), (lwp, n), 0); 5457 CHECK_SYNCH_OP(int,__lwp_suspend2, (int lwp, int *n), (lwp, n), 0); 5458 CHECK_SYNCH_OP(int, _lwp_kill, (int lwp, int n), (lwp, n), 0); 5459 CHECK_SYNCH_OP(int,__lwp_kill, (int lwp, int n), (lwp, n), 0); 5460 CHECK_SYNCH_OP(int, _lwp_sema_wait, (lwp_sema_t* p), (p), CHECK_P(p)); 5461 CHECK_SYNCH_OP(int,__lwp_sema_wait, (lwp_sema_t* p), (p), CHECK_P(p)); 5462 CHECK_SYNCH_OP(int, _lwp_cond_broadcast, (lwp_cond_t* cv), (cv), CHECK_CV); 5463 CHECK_SYNCH_OP(int,__lwp_cond_broadcast, (lwp_cond_t* cv), (cv), CHECK_CV); 5464 5465 5466 // recording machinery: 5467 5468 enum { RECORD_SYNCH_LIMIT = 200 }; 5469 char* record_synch_name[RECORD_SYNCH_LIMIT]; 5470 void* record_synch_arg0ptr[RECORD_SYNCH_LIMIT]; 5471 bool record_synch_returning[RECORD_SYNCH_LIMIT]; 5472 thread_t record_synch_thread[RECORD_SYNCH_LIMIT]; 5473 int record_synch_count = 0; 5474 bool record_synch_enabled = false; 5475 5476 // in dbx, examine recorded data this way: 5477 // for n in name arg0ptr returning thread; do print record_synch_$n[0..record_synch_count-1]; done 5478 5479 void record_synch(char* name, bool returning) { 5480 if (record_synch_enabled) { 5481 if (record_synch_count < RECORD_SYNCH_LIMIT) { 5482 record_synch_name[record_synch_count] = name; 5483 record_synch_returning[record_synch_count] = returning; 5484 record_synch_thread[record_synch_count] = thr_self(); 5485 record_synch_arg0ptr[record_synch_count] = &name; 5486 record_synch_count++; 5487 } 5488 // put more checking code here: 5489 // ... 5490 } 5491 } 5492 5493 void record_synch_enable() { 5494 // start collecting trace data, if not already doing so 5495 if (!record_synch_enabled) record_synch_count = 0; 5496 record_synch_enabled = true; 5497 } 5498 5499 void record_synch_disable() { 5500 // stop collecting trace data 5501 record_synch_enabled = false; 5502 } 5503 5504 #endif // INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS 5505 #endif // PRODUCT 5506 5507 const intptr_t thr_time_off = (intptr_t)(&((prusage_t *)(NULL))->pr_utime); 5508 const intptr_t thr_time_size = (intptr_t)(&((prusage_t *)(NULL))->pr_ttime) - 5509 (intptr_t)(&((prusage_t *)(NULL))->pr_utime); 5510 5511 5512 // JVMTI & JVM monitoring and management support 5513 // The thread_cpu_time() and current_thread_cpu_time() are only 5514 // supported if is_thread_cpu_time_supported() returns true. 5515 // They are not supported on Solaris T1. 5516 5517 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool) 5518 // are used by JVM M&M and JVMTI to get user+sys or user CPU time 5519 // of a thread. 5520 // 5521 // current_thread_cpu_time() and thread_cpu_time(Thread *) 5522 // returns the fast estimate available on the platform. 5523 5524 // hrtime_t gethrvtime() return value includes 5525 // user time but does not include system time 5526 jlong os::current_thread_cpu_time() { 5527 return (jlong) gethrvtime(); 5528 } 5529 5530 jlong os::thread_cpu_time(Thread *thread) { 5531 // return user level CPU time only to be consistent with 5532 // what current_thread_cpu_time returns. 5533 // thread_cpu_time_info() must be changed if this changes 5534 return os::thread_cpu_time(thread, false /* user time only */); 5535 } 5536 5537 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) { 5538 if (user_sys_cpu_time) { 5539 return os::thread_cpu_time(Thread::current(), user_sys_cpu_time); 5540 } else { 5541 return os::current_thread_cpu_time(); 5542 } 5543 } 5544 5545 jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) { 5546 char proc_name[64]; 5547 int count; 5548 prusage_t prusage; 5549 jlong lwp_time; 5550 int fd; 5551 5552 sprintf(proc_name, "/proc/%d/lwp/%d/lwpusage", 5553 getpid(), 5554 thread->osthread()->lwp_id()); 5555 fd = ::open(proc_name, O_RDONLY); 5556 if ( fd == -1 ) return -1; 5557 5558 do { 5559 count = ::pread(fd, 5560 (void *)&prusage.pr_utime, 5561 thr_time_size, 5562 thr_time_off); 5563 } while (count < 0 && errno == EINTR); 5564 ::close(fd); 5565 if ( count < 0 ) return -1; 5566 5567 if (user_sys_cpu_time) { 5568 // user + system CPU time 5569 lwp_time = (((jlong)prusage.pr_stime.tv_sec + 5570 (jlong)prusage.pr_utime.tv_sec) * (jlong)1000000000) + 5571 (jlong)prusage.pr_stime.tv_nsec + 5572 (jlong)prusage.pr_utime.tv_nsec; 5573 } else { 5574 // user level CPU time only 5575 lwp_time = ((jlong)prusage.pr_utime.tv_sec * (jlong)1000000000) + 5576 (jlong)prusage.pr_utime.tv_nsec; 5577 } 5578 5579 return(lwp_time); 5580 } 5581 5582 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 5583 info_ptr->max_value = ALL_64_BITS; // will not wrap in less than 64 bits 5584 info_ptr->may_skip_backward = false; // elapsed time not wall time 5585 info_ptr->may_skip_forward = false; // elapsed time not wall time 5586 info_ptr->kind = JVMTI_TIMER_USER_CPU; // only user time is returned 5587 } 5588 5589 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 5590 info_ptr->max_value = ALL_64_BITS; // will not wrap in less than 64 bits 5591 info_ptr->may_skip_backward = false; // elapsed time not wall time 5592 info_ptr->may_skip_forward = false; // elapsed time not wall time 5593 info_ptr->kind = JVMTI_TIMER_USER_CPU; // only user time is returned 5594 } 5595 5596 bool os::is_thread_cpu_time_supported() { 5597 if ( os::Solaris::T2_libthread() || UseBoundThreads ) { 5598 return true; 5599 } else { 5600 return false; 5601 } 5602 } 5603 5604 // System loadavg support. Returns -1 if load average cannot be obtained. 5605 // Return the load average for our processor set if the primitive exists 5606 // (Solaris 9 and later). Otherwise just return system wide loadavg. 5607 int os::loadavg(double loadavg[], int nelem) { 5608 if (pset_getloadavg_ptr != NULL) { 5609 return (*pset_getloadavg_ptr)(PS_MYID, loadavg, nelem); 5610 } else { 5611 return ::getloadavg(loadavg, nelem); 5612 } 5613 } 5614 5615 //--------------------------------------------------------------------------------- 5616 5617 bool os::find(address addr, outputStream* st) { 5618 Dl_info dlinfo; 5619 memset(&dlinfo, 0, sizeof(dlinfo)); 5620 if (dladdr(addr, &dlinfo) != 0) { 5621 st->print(PTR_FORMAT ": ", addr); 5622 if (dlinfo.dli_sname != NULL && dlinfo.dli_saddr != NULL) { 5623 st->print("%s+%#lx", dlinfo.dli_sname, addr-(intptr_t)dlinfo.dli_saddr); 5624 } else if (dlinfo.dli_fbase != NULL) 5625 st->print("<offset %#lx>", addr-(intptr_t)dlinfo.dli_fbase); 5626 else 5627 st->print("<absolute address>"); 5628 if (dlinfo.dli_fname != NULL) { 5629 st->print(" in %s", dlinfo.dli_fname); 5630 } 5631 if (dlinfo.dli_fbase != NULL) { 5632 st->print(" at " PTR_FORMAT, dlinfo.dli_fbase); 5633 } 5634 st->cr(); 5635 5636 if (Verbose) { 5637 // decode some bytes around the PC 5638 address begin = clamp_address_in_page(addr-40, addr, os::vm_page_size()); 5639 address end = clamp_address_in_page(addr+40, addr, os::vm_page_size()); 5640 address lowest = (address) dlinfo.dli_sname; 5641 if (!lowest) lowest = (address) dlinfo.dli_fbase; 5642 if (begin < lowest) begin = lowest; 5643 Dl_info dlinfo2; 5644 if (dladdr(end, &dlinfo2) != 0 && dlinfo2.dli_saddr != dlinfo.dli_saddr 5645 && end > dlinfo2.dli_saddr && dlinfo2.dli_saddr > begin) 5646 end = (address) dlinfo2.dli_saddr; 5647 Disassembler::decode(begin, end, st); 5648 } 5649 return true; 5650 } 5651 return false; 5652 } 5653 5654 // Following function has been added to support HotSparc's libjvm.so running 5655 // under Solaris production JDK 1.2.2 / 1.3.0. These came from 5656 // src/solaris/hpi/native_threads in the EVM codebase. 5657 // 5658 // NOTE: This is no longer needed in the 1.3.1 and 1.4 production release 5659 // libraries and should thus be removed. We will leave it behind for a while 5660 // until we no longer want to able to run on top of 1.3.0 Solaris production 5661 // JDK. See 4341971. 5662 5663 #define STACK_SLACK 0x800 5664 5665 extern "C" { 5666 intptr_t sysThreadAvailableStackWithSlack() { 5667 stack_t st; 5668 intptr_t retval, stack_top; 5669 retval = thr_stksegment(&st); 5670 assert(retval == 0, "incorrect return value from thr_stksegment"); 5671 assert((address)&st < (address)st.ss_sp, "Invalid stack base returned"); 5672 assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned"); 5673 stack_top=(intptr_t)st.ss_sp-st.ss_size; 5674 return ((intptr_t)&stack_top - stack_top - STACK_SLACK); 5675 } 5676 } 5677 5678 // ObjectMonitor park-unpark infrastructure ... 5679 // 5680 // We implement Solaris and Linux PlatformEvents with the 5681 // obvious condvar-mutex-flag triple. 5682 // Another alternative that works quite well is pipes: 5683 // Each PlatformEvent consists of a pipe-pair. 5684 // The thread associated with the PlatformEvent 5685 // calls park(), which reads from the input end of the pipe. 5686 // Unpark() writes into the other end of the pipe. 5687 // The write-side of the pipe must be set NDELAY. 5688 // Unfortunately pipes consume a large # of handles. 5689 // Native solaris lwp_park() and lwp_unpark() work nicely, too. 5690 // Using pipes for the 1st few threads might be workable, however. 5691 // 5692 // park() is permitted to return spuriously. 5693 // Callers of park() should wrap the call to park() in 5694 // an appropriate loop. A litmus test for the correct 5695 // usage of park is the following: if park() were modified 5696 // to immediately return 0 your code should still work, 5697 // albeit degenerating to a spin loop. 5698 // 5699 // An interesting optimization for park() is to use a trylock() 5700 // to attempt to acquire the mutex. If the trylock() fails 5701 // then we know that a concurrent unpark() operation is in-progress. 5702 // in that case the park() code could simply set _count to 0 5703 // and return immediately. The subsequent park() operation *might* 5704 // return immediately. That's harmless as the caller of park() is 5705 // expected to loop. By using trylock() we will have avoided a 5706 // avoided a context switch caused by contention on the per-thread mutex. 5707 // 5708 // TODO-FIXME: 5709 // 1. Reconcile Doug's JSR166 j.u.c park-unpark with the 5710 // objectmonitor implementation. 5711 // 2. Collapse the JSR166 parker event, and the 5712 // objectmonitor ParkEvent into a single "Event" construct. 5713 // 3. In park() and unpark() add: 5714 // assert (Thread::current() == AssociatedWith). 5715 // 4. add spurious wakeup injection on a -XX:EarlyParkReturn=N switch. 5716 // 1-out-of-N park() operations will return immediately. 5717 // 5718 // _Event transitions in park() 5719 // -1 => -1 : illegal 5720 // 1 => 0 : pass - return immediately 5721 // 0 => -1 : block 5722 // 5723 // _Event serves as a restricted-range semaphore. 5724 // 5725 // Another possible encoding of _Event would be with 5726 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits. 5727 // 5728 // TODO-FIXME: add DTRACE probes for: 5729 // 1. Tx parks 5730 // 2. Ty unparks Tx 5731 // 3. Tx resumes from park 5732 5733 5734 // value determined through experimentation 5735 #define ROUNDINGFIX 11 5736 5737 // utility to compute the abstime argument to timedwait. 5738 // TODO-FIXME: switch from compute_abstime() to unpackTime(). 5739 5740 static timestruc_t* compute_abstime(timestruc_t* abstime, jlong millis) { 5741 // millis is the relative timeout time 5742 // abstime will be the absolute timeout time 5743 if (millis < 0) millis = 0; 5744 struct timeval now; 5745 int status = gettimeofday(&now, NULL); 5746 assert(status == 0, "gettimeofday"); 5747 jlong seconds = millis / 1000; 5748 jlong max_wait_period; 5749 5750 if (UseLWPSynchronization) { 5751 // forward port of fix for 4275818 (not sleeping long enough) 5752 // There was a bug in Solaris 6, 7 and pre-patch 5 of 8 where 5753 // _lwp_cond_timedwait() used a round_down algorithm rather 5754 // than a round_up. For millis less than our roundfactor 5755 // it rounded down to 0 which doesn't meet the spec. 5756 // For millis > roundfactor we may return a bit sooner, but 5757 // since we can not accurately identify the patch level and 5758 // this has already been fixed in Solaris 9 and 8 we will 5759 // leave it alone rather than always rounding down. 5760 5761 if (millis > 0 && millis < ROUNDINGFIX) millis = ROUNDINGFIX; 5762 // It appears that when we go directly through Solaris _lwp_cond_timedwait() 5763 // the acceptable max time threshold is smaller than for libthread on 2.5.1 and 2.6 5764 max_wait_period = 21000000; 5765 } else { 5766 max_wait_period = 50000000; 5767 } 5768 millis %= 1000; 5769 if (seconds > max_wait_period) { // see man cond_timedwait(3T) 5770 seconds = max_wait_period; 5771 } 5772 abstime->tv_sec = now.tv_sec + seconds; 5773 long usec = now.tv_usec + millis * 1000; 5774 if (usec >= 1000000) { 5775 abstime->tv_sec += 1; 5776 usec -= 1000000; 5777 } 5778 abstime->tv_nsec = usec * 1000; 5779 return abstime; 5780 } 5781 5782 // Test-and-clear _Event, always leaves _Event set to 0, returns immediately. 5783 // Conceptually TryPark() should be equivalent to park(0). 5784 5785 int os::PlatformEvent::TryPark() { 5786 for (;;) { 5787 const int v = _Event ; 5788 guarantee ((v == 0) || (v == 1), "invariant") ; 5789 if (Atomic::cmpxchg (0, &_Event, v) == v) return v ; 5790 } 5791 } 5792 5793 void os::PlatformEvent::park() { // AKA: down() 5794 // Invariant: Only the thread associated with the Event/PlatformEvent 5795 // may call park(). 5796 int v ; 5797 for (;;) { 5798 v = _Event ; 5799 if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ; 5800 } 5801 guarantee (v >= 0, "invariant") ; 5802 if (v == 0) { 5803 // Do this the hard way by blocking ... 5804 // See http://monaco.sfbay/detail.jsf?cr=5094058. 5805 // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking. 5806 // Only for SPARC >= V8PlusA 5807 #if defined(__sparc) && defined(COMPILER2) 5808 if (ClearFPUAtPark) { _mark_fpu_nosave() ; } 5809 #endif 5810 int status = os::Solaris::mutex_lock(_mutex); 5811 assert_status(status == 0, status, "mutex_lock"); 5812 guarantee (_nParked == 0, "invariant") ; 5813 ++ _nParked ; 5814 while (_Event < 0) { 5815 // for some reason, under 2.7 lwp_cond_wait() may return ETIME ... 5816 // Treat this the same as if the wait was interrupted 5817 // With usr/lib/lwp going to kernel, always handle ETIME 5818 status = os::Solaris::cond_wait(_cond, _mutex); 5819 if (status == ETIME) status = EINTR ; 5820 assert_status(status == 0 || status == EINTR, status, "cond_wait"); 5821 } 5822 -- _nParked ; 5823 _Event = 0 ; 5824 status = os::Solaris::mutex_unlock(_mutex); 5825 assert_status(status == 0, status, "mutex_unlock"); 5826 // Paranoia to ensure our locked and lock-free paths interact 5827 // correctly with each other. 5828 OrderAccess::fence(); 5829 } 5830 } 5831 5832 int os::PlatformEvent::park(jlong millis) { 5833 guarantee (_nParked == 0, "invariant") ; 5834 int v ; 5835 for (;;) { 5836 v = _Event ; 5837 if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ; 5838 } 5839 guarantee (v >= 0, "invariant") ; 5840 if (v != 0) return OS_OK ; 5841 5842 int ret = OS_TIMEOUT; 5843 timestruc_t abst; 5844 compute_abstime (&abst, millis); 5845 5846 // See http://monaco.sfbay/detail.jsf?cr=5094058. 5847 // For Solaris SPARC set fprs.FEF=0 prior to parking. 5848 // Only for SPARC >= V8PlusA 5849 #if defined(__sparc) && defined(COMPILER2) 5850 if (ClearFPUAtPark) { _mark_fpu_nosave() ; } 5851 #endif 5852 int status = os::Solaris::mutex_lock(_mutex); 5853 assert_status(status == 0, status, "mutex_lock"); 5854 guarantee (_nParked == 0, "invariant") ; 5855 ++ _nParked ; 5856 while (_Event < 0) { 5857 int status = os::Solaris::cond_timedwait(_cond, _mutex, &abst); 5858 assert_status(status == 0 || status == EINTR || 5859 status == ETIME || status == ETIMEDOUT, 5860 status, "cond_timedwait"); 5861 if (!FilterSpuriousWakeups) break ; // previous semantics 5862 if (status == ETIME || status == ETIMEDOUT) break ; 5863 // We consume and ignore EINTR and spurious wakeups. 5864 } 5865 -- _nParked ; 5866 if (_Event >= 0) ret = OS_OK ; 5867 _Event = 0 ; 5868 status = os::Solaris::mutex_unlock(_mutex); 5869 assert_status(status == 0, status, "mutex_unlock"); 5870 // Paranoia to ensure our locked and lock-free paths interact 5871 // correctly with each other. 5872 OrderAccess::fence(); 5873 return ret; 5874 } 5875 5876 void os::PlatformEvent::unpark() { 5877 // Transitions for _Event: 5878 // 0 :=> 1 5879 // 1 :=> 1 5880 // -1 :=> either 0 or 1; must signal target thread 5881 // That is, we can safely transition _Event from -1 to either 5882 // 0 or 1. Forcing 1 is slightly more efficient for back-to-back 5883 // unpark() calls. 5884 // See also: "Semaphores in Plan 9" by Mullender & Cox 5885 // 5886 // Note: Forcing a transition from "-1" to "1" on an unpark() means 5887 // that it will take two back-to-back park() calls for the owning 5888 // thread to block. This has the benefit of forcing a spurious return 5889 // from the first park() call after an unpark() call which will help 5890 // shake out uses of park() and unpark() without condition variables. 5891 5892 if (Atomic::xchg(1, &_Event) >= 0) return; 5893 5894 // If the thread associated with the event was parked, wake it. 5895 // Wait for the thread assoc with the PlatformEvent to vacate. 5896 int status = os::Solaris::mutex_lock(_mutex); 5897 assert_status(status == 0, status, "mutex_lock"); 5898 int AnyWaiters = _nParked; 5899 status = os::Solaris::mutex_unlock(_mutex); 5900 assert_status(status == 0, status, "mutex_unlock"); 5901 guarantee(AnyWaiters == 0 || AnyWaiters == 1, "invariant"); 5902 if (AnyWaiters != 0) { 5903 // We intentional signal *after* dropping the lock 5904 // to avoid a common class of futile wakeups. 5905 status = os::Solaris::cond_signal(_cond); 5906 assert_status(status == 0, status, "cond_signal"); 5907 } 5908 } 5909 5910 // JSR166 5911 // ------------------------------------------------------- 5912 5913 /* 5914 * The solaris and linux implementations of park/unpark are fairly 5915 * conservative for now, but can be improved. They currently use a 5916 * mutex/condvar pair, plus _counter. 5917 * Park decrements _counter if > 0, else does a condvar wait. Unpark 5918 * sets count to 1 and signals condvar. Only one thread ever waits 5919 * on the condvar. Contention seen when trying to park implies that someone 5920 * is unparking you, so don't wait. And spurious returns are fine, so there 5921 * is no need to track notifications. 5922 */ 5923 5924 #define MAX_SECS 100000000 5925 /* 5926 * This code is common to linux and solaris and will be moved to a 5927 * common place in dolphin. 5928 * 5929 * The passed in time value is either a relative time in nanoseconds 5930 * or an absolute time in milliseconds. Either way it has to be unpacked 5931 * into suitable seconds and nanoseconds components and stored in the 5932 * given timespec structure. 5933 * Given time is a 64-bit value and the time_t used in the timespec is only 5934 * a signed-32-bit value (except on 64-bit Linux) we have to watch for 5935 * overflow if times way in the future are given. Further on Solaris versions 5936 * prior to 10 there is a restriction (see cond_timedwait) that the specified 5937 * number of seconds, in abstime, is less than current_time + 100,000,000. 5938 * As it will be 28 years before "now + 100000000" will overflow we can 5939 * ignore overflow and just impose a hard-limit on seconds using the value 5940 * of "now + 100,000,000". This places a limit on the timeout of about 3.17 5941 * years from "now". 5942 */ 5943 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) { 5944 assert (time > 0, "convertTime"); 5945 5946 struct timeval now; 5947 int status = gettimeofday(&now, NULL); 5948 assert(status == 0, "gettimeofday"); 5949 5950 time_t max_secs = now.tv_sec + MAX_SECS; 5951 5952 if (isAbsolute) { 5953 jlong secs = time / 1000; 5954 if (secs > max_secs) { 5955 absTime->tv_sec = max_secs; 5956 } 5957 else { 5958 absTime->tv_sec = secs; 5959 } 5960 absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC; 5961 } 5962 else { 5963 jlong secs = time / NANOSECS_PER_SEC; 5964 if (secs >= MAX_SECS) { 5965 absTime->tv_sec = max_secs; 5966 absTime->tv_nsec = 0; 5967 } 5968 else { 5969 absTime->tv_sec = now.tv_sec + secs; 5970 absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000; 5971 if (absTime->tv_nsec >= NANOSECS_PER_SEC) { 5972 absTime->tv_nsec -= NANOSECS_PER_SEC; 5973 ++absTime->tv_sec; // note: this must be <= max_secs 5974 } 5975 } 5976 } 5977 assert(absTime->tv_sec >= 0, "tv_sec < 0"); 5978 assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs"); 5979 assert(absTime->tv_nsec >= 0, "tv_nsec < 0"); 5980 assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec"); 5981 } 5982 5983 void Parker::park(bool isAbsolute, jlong time) { 5984 // Ideally we'd do something useful while spinning, such 5985 // as calling unpackTime(). 5986 5987 // Optional fast-path check: 5988 // Return immediately if a permit is available. 5989 // We depend on Atomic::xchg() having full barrier semantics 5990 // since we are doing a lock-free update to _counter. 5991 if (Atomic::xchg(0, &_counter) > 0) return; 5992 5993 // Optional fast-exit: Check interrupt before trying to wait 5994 Thread* thread = Thread::current(); 5995 assert(thread->is_Java_thread(), "Must be JavaThread"); 5996 JavaThread *jt = (JavaThread *)thread; 5997 if (Thread::is_interrupted(thread, false)) { 5998 return; 5999 } 6000 6001 // First, demultiplex/decode time arguments 6002 timespec absTime; 6003 if (time < 0 || (isAbsolute && time == 0) ) { // don't wait at all 6004 return; 6005 } 6006 if (time > 0) { 6007 // Warning: this code might be exposed to the old Solaris time 6008 // round-down bugs. Grep "roundingFix" for details. 6009 unpackTime(&absTime, isAbsolute, time); 6010 } 6011 6012 // Enter safepoint region 6013 // Beware of deadlocks such as 6317397. 6014 // The per-thread Parker:: _mutex is a classic leaf-lock. 6015 // In particular a thread must never block on the Threads_lock while 6016 // holding the Parker:: mutex. If safepoints are pending both the 6017 // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock. 6018 ThreadBlockInVM tbivm(jt); 6019 6020 // Don't wait if cannot get lock since interference arises from 6021 // unblocking. Also. check interrupt before trying wait 6022 if (Thread::is_interrupted(thread, false) || 6023 os::Solaris::mutex_trylock(_mutex) != 0) { 6024 return; 6025 } 6026 6027 int status ; 6028 6029 if (_counter > 0) { // no wait needed 6030 _counter = 0; 6031 status = os::Solaris::mutex_unlock(_mutex); 6032 assert (status == 0, "invariant") ; 6033 // Paranoia to ensure our locked and lock-free paths interact 6034 // correctly with each other and Java-level accesses. 6035 OrderAccess::fence(); 6036 return; 6037 } 6038 6039 #ifdef ASSERT 6040 // Don't catch signals while blocked; let the running threads have the signals. 6041 // (This allows a debugger to break into the running thread.) 6042 sigset_t oldsigs; 6043 sigset_t* allowdebug_blocked = os::Solaris::allowdebug_blocked_signals(); 6044 thr_sigsetmask(SIG_BLOCK, allowdebug_blocked, &oldsigs); 6045 #endif 6046 6047 OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */); 6048 jt->set_suspend_equivalent(); 6049 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self() 6050 6051 // Do this the hard way by blocking ... 6052 // See http://monaco.sfbay/detail.jsf?cr=5094058. 6053 // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking. 6054 // Only for SPARC >= V8PlusA 6055 #if defined(__sparc) && defined(COMPILER2) 6056 if (ClearFPUAtPark) { _mark_fpu_nosave() ; } 6057 #endif 6058 6059 if (time == 0) { 6060 status = os::Solaris::cond_wait (_cond, _mutex) ; 6061 } else { 6062 status = os::Solaris::cond_timedwait (_cond, _mutex, &absTime); 6063 } 6064 // Note that an untimed cond_wait() can sometimes return ETIME on older 6065 // versions of the Solaris. 6066 assert_status(status == 0 || status == EINTR || 6067 status == ETIME || status == ETIMEDOUT, 6068 status, "cond_timedwait"); 6069 6070 #ifdef ASSERT 6071 thr_sigsetmask(SIG_SETMASK, &oldsigs, NULL); 6072 #endif 6073 _counter = 0 ; 6074 status = os::Solaris::mutex_unlock(_mutex); 6075 assert_status(status == 0, status, "mutex_unlock") ; 6076 // Paranoia to ensure our locked and lock-free paths interact 6077 // correctly with each other and Java-level accesses. 6078 OrderAccess::fence(); 6079 6080 // If externally suspended while waiting, re-suspend 6081 if (jt->handle_special_suspend_equivalent_condition()) { 6082 jt->java_suspend_self(); 6083 } 6084 } 6085 6086 void Parker::unpark() { 6087 int s, status ; 6088 status = os::Solaris::mutex_lock (_mutex) ; 6089 assert (status == 0, "invariant") ; 6090 s = _counter; 6091 _counter = 1; 6092 status = os::Solaris::mutex_unlock (_mutex) ; 6093 assert (status == 0, "invariant") ; 6094 6095 if (s < 1) { 6096 status = os::Solaris::cond_signal (_cond) ; 6097 assert (status == 0, "invariant") ; 6098 } 6099 } 6100 6101 extern char** environ; 6102 6103 // Run the specified command in a separate process. Return its exit value, 6104 // or -1 on failure (e.g. can't fork a new process). 6105 // Unlike system(), this function can be called from signal handler. It 6106 // doesn't block SIGINT et al. 6107 int os::fork_and_exec(char* cmd) { 6108 char * argv[4]; 6109 argv[0] = (char *)"sh"; 6110 argv[1] = (char *)"-c"; 6111 argv[2] = cmd; 6112 argv[3] = NULL; 6113 6114 // fork is async-safe, fork1 is not so can't use in signal handler 6115 pid_t pid; 6116 Thread* t = ThreadLocalStorage::get_thread_slow(); 6117 if (t != NULL && t->is_inside_signal_handler()) { 6118 pid = fork(); 6119 } else { 6120 pid = fork1(); 6121 } 6122 6123 if (pid < 0) { 6124 // fork failed 6125 warning("fork failed: %s", strerror(errno)); 6126 return -1; 6127 6128 } else if (pid == 0) { 6129 // child process 6130 6131 // try to be consistent with system(), which uses "/usr/bin/sh" on Solaris 6132 execve("/usr/bin/sh", argv, environ); 6133 6134 // execve failed 6135 _exit(-1); 6136 6137 } else { 6138 // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't 6139 // care about the actual exit code, for now. 6140 6141 int status; 6142 6143 // Wait for the child process to exit. This returns immediately if 6144 // the child has already exited. */ 6145 while (waitpid(pid, &status, 0) < 0) { 6146 switch (errno) { 6147 case ECHILD: return 0; 6148 case EINTR: break; 6149 default: return -1; 6150 } 6151 } 6152 6153 if (WIFEXITED(status)) { 6154 // The child exited normally; get its exit code. 6155 return WEXITSTATUS(status); 6156 } else if (WIFSIGNALED(status)) { 6157 // The child exited because of a signal 6158 // The best value to return is 0x80 + signal number, 6159 // because that is what all Unix shells do, and because 6160 // it allows callers to distinguish between process exit and 6161 // process death by signal. 6162 return 0x80 + WTERMSIG(status); 6163 } else { 6164 // Unknown exit code; pass it through 6165 return status; 6166 } 6167 } 6168 } 6169 6170 // is_headless_jre() 6171 // 6172 // Test for the existence of xawt/libmawt.so or libawt_xawt.so 6173 // in order to report if we are running in a headless jre 6174 // 6175 // Since JDK8 xawt/libmawt.so was moved into the same directory 6176 // as libawt.so, and renamed libawt_xawt.so 6177 // 6178 bool os::is_headless_jre() { 6179 struct stat statbuf; 6180 char buf[MAXPATHLEN]; 6181 char libmawtpath[MAXPATHLEN]; 6182 const char *xawtstr = "/xawt/libmawt.so"; 6183 const char *new_xawtstr = "/libawt_xawt.so"; 6184 char *p; 6185 6186 // Get path to libjvm.so 6187 os::jvm_path(buf, sizeof(buf)); 6188 6189 // Get rid of libjvm.so 6190 p = strrchr(buf, '/'); 6191 if (p == NULL) return false; 6192 else *p = '\0'; 6193 6194 // Get rid of client or server 6195 p = strrchr(buf, '/'); 6196 if (p == NULL) return false; 6197 else *p = '\0'; 6198 6199 // check xawt/libmawt.so 6200 strcpy(libmawtpath, buf); 6201 strcat(libmawtpath, xawtstr); 6202 if (::stat(libmawtpath, &statbuf) == 0) return false; 6203 6204 // check libawt_xawt.so 6205 strcpy(libmawtpath, buf); 6206 strcat(libmawtpath, new_xawtstr); 6207 if (::stat(libmawtpath, &statbuf) == 0) return false; 6208 6209 return true; 6210 } 6211 6212 size_t os::write(int fd, const void *buf, unsigned int nBytes) { 6213 INTERRUPTIBLE_RETURN_INT(::write(fd, buf, nBytes), os::Solaris::clear_interrupted); 6214 } 6215 6216 int os::close(int fd) { 6217 return ::close(fd); 6218 } 6219 6220 int os::socket_close(int fd) { 6221 return ::close(fd); 6222 } 6223 6224 int os::recv(int fd, char* buf, size_t nBytes, uint flags) { 6225 INTERRUPTIBLE_RETURN_INT((int)::recv(fd, buf, nBytes, flags), os::Solaris::clear_interrupted); 6226 } 6227 6228 int os::send(int fd, char* buf, size_t nBytes, uint flags) { 6229 INTERRUPTIBLE_RETURN_INT((int)::send(fd, buf, nBytes, flags), os::Solaris::clear_interrupted); 6230 } 6231 6232 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) { 6233 RESTARTABLE_RETURN_INT((int)::send(fd, buf, nBytes, flags)); 6234 } 6235 6236 // As both poll and select can be interrupted by signals, we have to be 6237 // prepared to restart the system call after updating the timeout, unless 6238 // a poll() is done with timeout == -1, in which case we repeat with this 6239 // "wait forever" value. 6240 6241 int os::timeout(int fd, long timeout) { 6242 int res; 6243 struct timeval t; 6244 julong prevtime, newtime; 6245 static const char* aNull = 0; 6246 struct pollfd pfd; 6247 pfd.fd = fd; 6248 pfd.events = POLLIN; 6249 6250 gettimeofday(&t, &aNull); 6251 prevtime = ((julong)t.tv_sec * 1000) + t.tv_usec / 1000; 6252 6253 for(;;) { 6254 INTERRUPTIBLE_NORESTART(::poll(&pfd, 1, timeout), res, os::Solaris::clear_interrupted); 6255 if(res == OS_ERR && errno == EINTR) { 6256 if(timeout != -1) { 6257 gettimeofday(&t, &aNull); 6258 newtime = ((julong)t.tv_sec * 1000) + t.tv_usec /1000; 6259 timeout -= newtime - prevtime; 6260 if(timeout <= 0) 6261 return OS_OK; 6262 prevtime = newtime; 6263 } 6264 } else return res; 6265 } 6266 } 6267 6268 int os::connect(int fd, struct sockaddr *him, socklen_t len) { 6269 int _result; 6270 INTERRUPTIBLE_NORESTART(::connect(fd, him, len), _result,\ 6271 os::Solaris::clear_interrupted); 6272 6273 // Depending on when thread interruption is reset, _result could be 6274 // one of two values when errno == EINTR 6275 6276 if (((_result == OS_INTRPT) || (_result == OS_ERR)) 6277 && (errno == EINTR)) { 6278 /* restarting a connect() changes its errno semantics */ 6279 INTERRUPTIBLE(::connect(fd, him, len), _result,\ 6280 os::Solaris::clear_interrupted); 6281 /* undo these changes */ 6282 if (_result == OS_ERR) { 6283 if (errno == EALREADY) { 6284 errno = EINPROGRESS; /* fall through */ 6285 } else if (errno == EISCONN) { 6286 errno = 0; 6287 return OS_OK; 6288 } 6289 } 6290 } 6291 return _result; 6292 } 6293 6294 int os::accept(int fd, struct sockaddr* him, socklen_t* len) { 6295 if (fd < 0) { 6296 return OS_ERR; 6297 } 6298 INTERRUPTIBLE_RETURN_INT((int)::accept(fd, him, len),\ 6299 os::Solaris::clear_interrupted); 6300 } 6301 6302 int os::recvfrom(int fd, char* buf, size_t nBytes, uint flags, 6303 sockaddr* from, socklen_t* fromlen) { 6304 INTERRUPTIBLE_RETURN_INT((int)::recvfrom(fd, buf, nBytes, flags, from, fromlen),\ 6305 os::Solaris::clear_interrupted); 6306 } 6307 6308 int os::sendto(int fd, char* buf, size_t len, uint flags, 6309 struct sockaddr* to, socklen_t tolen) { 6310 INTERRUPTIBLE_RETURN_INT((int)::sendto(fd, buf, len, flags, to, tolen),\ 6311 os::Solaris::clear_interrupted); 6312 } 6313 6314 int os::socket_available(int fd, jint *pbytes) { 6315 if (fd < 0) { 6316 return OS_OK; 6317 } 6318 int ret; 6319 RESTARTABLE(::ioctl(fd, FIONREAD, pbytes), ret); 6320 // note: ioctl can return 0 when successful, JVM_SocketAvailable 6321 // is expected to return 0 on failure and 1 on success to the jdk. 6322 return (ret == OS_ERR) ? 0 : 1; 6323 } 6324 6325 int os::bind(int fd, struct sockaddr* him, socklen_t len) { 6326 INTERRUPTIBLE_RETURN_INT_NORESTART(::bind(fd, him, len),\ 6327 os::Solaris::clear_interrupted); 6328 } 6329 6330 // Get the default path to the core file 6331 // Returns the length of the string 6332 int os::get_core_path(char* buffer, size_t bufferSize) { 6333 const char* p = get_current_directory(buffer, bufferSize); 6334 6335 if (p == NULL) { 6336 assert(p != NULL, "failed to get current directory"); 6337 return 0; 6338 } 6339 6340 return strlen(buffer); 6341 } 6342 6343 #ifndef PRODUCT 6344 void TestReserveMemorySpecial_test() { 6345 // No tests available for this platform 6346 } 6347 #endif