1 /* 2 * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 // no precompiled headers 26 #include "classfile/classLoader.hpp" 27 #include "classfile/systemDictionary.hpp" 28 #include "classfile/vmSymbols.hpp" 29 #include "code/icBuffer.hpp" 30 #include "code/vtableStubs.hpp" 31 #include "compiler/compileBroker.hpp" 32 #include "compiler/disassembler.hpp" 33 #include "interpreter/interpreter.hpp" 34 #include "jvm_solaris.h" 35 #include "memory/allocation.inline.hpp" 36 #include "memory/filemap.hpp" 37 #include "mutex_solaris.inline.hpp" 38 #include "oops/oop.inline.hpp" 39 #include "os_share_solaris.hpp" 40 #include "os_solaris.inline.hpp" 41 #include "prims/jniFastGetField.hpp" 42 #include "prims/jvm.h" 43 #include "prims/jvm_misc.hpp" 44 #include "runtime/arguments.hpp" 45 #include "runtime/atomic.inline.hpp" 46 #include "runtime/extendedPC.hpp" 47 #include "runtime/globals.hpp" 48 #include "runtime/interfaceSupport.hpp" 49 #include "runtime/java.hpp" 50 #include "runtime/javaCalls.hpp" 51 #include "runtime/mutexLocker.hpp" 52 #include "runtime/objectMonitor.hpp" 53 #include "runtime/orderAccess.inline.hpp" 54 #include "runtime/osThread.hpp" 55 #include "runtime/perfMemory.hpp" 56 #include "runtime/sharedRuntime.hpp" 57 #include "runtime/statSampler.hpp" 58 #include "runtime/stubRoutines.hpp" 59 #include "runtime/thread.inline.hpp" 60 #include "runtime/threadCritical.hpp" 61 #include "runtime/timer.hpp" 62 #include "runtime/vm_version.hpp" 63 #include "semaphore_posix.hpp" 64 #include "services/attachListener.hpp" 65 #include "services/memTracker.hpp" 66 #include "services/runtimeService.hpp" 67 #include "utilities/decoder.hpp" 68 #include "utilities/defaultStream.hpp" 69 #include "utilities/events.hpp" 70 #include "utilities/growableArray.hpp" 71 #include "utilities/vmError.hpp" 72 73 // put OS-includes here 74 # include <dlfcn.h> 75 # include <errno.h> 76 # include <exception> 77 # include <link.h> 78 # include <poll.h> 79 # include <pthread.h> 80 # include <pwd.h> 81 # include <schedctl.h> 82 # include <setjmp.h> 83 # include <signal.h> 84 # include <stdio.h> 85 # include <alloca.h> 86 # include <sys/filio.h> 87 # include <sys/ipc.h> 88 # include <sys/lwp.h> 89 # include <sys/machelf.h> // for elf Sym structure used by dladdr1 90 # include <sys/mman.h> 91 # include <sys/processor.h> 92 # include <sys/procset.h> 93 # include <sys/pset.h> 94 # include <sys/resource.h> 95 # include <sys/shm.h> 96 # include <sys/socket.h> 97 # include <sys/stat.h> 98 # include <sys/systeminfo.h> 99 # include <sys/time.h> 100 # include <sys/times.h> 101 # include <sys/types.h> 102 # include <sys/wait.h> 103 # include <sys/utsname.h> 104 # include <thread.h> 105 # include <unistd.h> 106 # include <sys/priocntl.h> 107 # include <sys/rtpriocntl.h> 108 # include <sys/tspriocntl.h> 109 # include <sys/iapriocntl.h> 110 # include <sys/fxpriocntl.h> 111 # include <sys/loadavg.h> 112 # include <string.h> 113 # include <stdio.h> 114 115 # define _STRUCTURED_PROC 1 // this gets us the new structured proc interfaces of 5.6 & later 116 # include <sys/procfs.h> // see comment in <sys/procfs.h> 117 118 #define MAX_PATH (2 * K) 119 120 // for timer info max values which include all bits 121 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF) 122 123 124 // Here are some liblgrp types from sys/lgrp_user.h to be able to 125 // compile on older systems without this header file. 126 127 #ifndef MADV_ACCESS_LWP 128 #define MADV_ACCESS_LWP 7 /* next LWP to access heavily */ 129 #endif 130 #ifndef MADV_ACCESS_MANY 131 #define MADV_ACCESS_MANY 8 /* many processes to access heavily */ 132 #endif 133 134 #ifndef LGRP_RSRC_CPU 135 #define LGRP_RSRC_CPU 0 /* CPU resources */ 136 #endif 137 #ifndef LGRP_RSRC_MEM 138 #define LGRP_RSRC_MEM 1 /* memory resources */ 139 #endif 140 141 // see thr_setprio(3T) for the basis of these numbers 142 #define MinimumPriority 0 143 #define NormalPriority 64 144 #define MaximumPriority 127 145 146 // Values for ThreadPriorityPolicy == 1 147 int prio_policy1[CriticalPriority+1] = { 148 -99999, 0, 16, 32, 48, 64, 149 80, 96, 112, 124, 127, 127 }; 150 151 // System parameters used internally 152 static clock_t clock_tics_per_sec = 100; 153 154 // Track if we have called enable_extended_FILE_stdio (on Solaris 10u4+) 155 static bool enabled_extended_FILE_stdio = false; 156 157 // For diagnostics to print a message once. see run_periodic_checks 158 static bool check_addr0_done = false; 159 static sigset_t check_signal_done; 160 static bool check_signals = true; 161 162 address os::Solaris::handler_start; // start pc of thr_sighndlrinfo 163 address os::Solaris::handler_end; // end pc of thr_sighndlrinfo 164 165 address os::Solaris::_main_stack_base = NULL; // 4352906 workaround 166 167 168 // "default" initializers for missing libc APIs 169 extern "C" { 170 static int lwp_mutex_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; } 171 static int lwp_mutex_destroy(mutex_t *mx) { return 0; } 172 173 static int lwp_cond_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; } 174 static int lwp_cond_destroy(cond_t *cv) { return 0; } 175 } 176 177 // "default" initializers for pthread-based synchronization 178 extern "C" { 179 static int pthread_mutex_default_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; } 180 static int pthread_cond_default_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; } 181 } 182 183 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time); 184 185 static inline size_t adjust_stack_size(address base, size_t size) { 186 if ((ssize_t)size < 0) { 187 // 4759953: Compensate for ridiculous stack size. 188 size = max_intx; 189 } 190 if (size > (size_t)base) { 191 // 4812466: Make sure size doesn't allow the stack to wrap the address space. 192 size = (size_t)base; 193 } 194 return size; 195 } 196 197 static inline stack_t get_stack_info() { 198 stack_t st; 199 int retval = thr_stksegment(&st); 200 st.ss_size = adjust_stack_size((address)st.ss_sp, st.ss_size); 201 assert(retval == 0, "incorrect return value from thr_stksegment"); 202 assert((address)&st < (address)st.ss_sp, "Invalid stack base returned"); 203 assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned"); 204 return st; 205 } 206 207 address os::current_stack_base() { 208 int r = thr_main(); 209 guarantee(r == 0 || r == 1, "CR6501650 or CR6493689"); 210 bool is_primordial_thread = r; 211 212 // Workaround 4352906, avoid calls to thr_stksegment by 213 // thr_main after the first one (it looks like we trash 214 // some data, causing the value for ss_sp to be incorrect). 215 if (!is_primordial_thread || os::Solaris::_main_stack_base == NULL) { 216 stack_t st = get_stack_info(); 217 if (is_primordial_thread) { 218 // cache initial value of stack base 219 os::Solaris::_main_stack_base = (address)st.ss_sp; 220 } 221 return (address)st.ss_sp; 222 } else { 223 guarantee(os::Solaris::_main_stack_base != NULL, "Attempt to use null cached stack base"); 224 return os::Solaris::_main_stack_base; 225 } 226 } 227 228 size_t os::current_stack_size() { 229 size_t size; 230 231 int r = thr_main(); 232 guarantee(r == 0 || r == 1, "CR6501650 or CR6493689"); 233 if (!r) { 234 size = get_stack_info().ss_size; 235 } else { 236 struct rlimit limits; 237 getrlimit(RLIMIT_STACK, &limits); 238 size = adjust_stack_size(os::Solaris::_main_stack_base, (size_t)limits.rlim_cur); 239 } 240 // base may not be page aligned 241 address base = current_stack_base(); 242 address bottom = (address)align_size_up((intptr_t)(base - size), os::vm_page_size());; 243 return (size_t)(base - bottom); 244 } 245 246 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) { 247 return localtime_r(clock, res); 248 } 249 250 void os::Solaris::try_enable_extended_io() { 251 typedef int (*enable_extended_FILE_stdio_t)(int, int); 252 253 if (!UseExtendedFileIO) { 254 return; 255 } 256 257 enable_extended_FILE_stdio_t enabler = 258 (enable_extended_FILE_stdio_t) dlsym(RTLD_DEFAULT, 259 "enable_extended_FILE_stdio"); 260 if (enabler) { 261 enabler(-1, -1); 262 } 263 } 264 265 static int _processors_online = 0; 266 267 jint os::Solaris::_os_thread_limit = 0; 268 volatile jint os::Solaris::_os_thread_count = 0; 269 270 julong os::available_memory() { 271 return Solaris::available_memory(); 272 } 273 274 julong os::Solaris::available_memory() { 275 return (julong)sysconf(_SC_AVPHYS_PAGES) * os::vm_page_size(); 276 } 277 278 julong os::Solaris::_physical_memory = 0; 279 280 julong os::physical_memory() { 281 return Solaris::physical_memory(); 282 } 283 284 static hrtime_t first_hrtime = 0; 285 static const hrtime_t hrtime_hz = 1000*1000*1000; 286 static volatile hrtime_t max_hrtime = 0; 287 288 289 void os::Solaris::initialize_system_info() { 290 set_processor_count(sysconf(_SC_NPROCESSORS_CONF)); 291 _processors_online = sysconf(_SC_NPROCESSORS_ONLN); 292 _physical_memory = (julong)sysconf(_SC_PHYS_PAGES) * 293 (julong)sysconf(_SC_PAGESIZE); 294 } 295 296 int os::active_processor_count() { 297 int online_cpus = sysconf(_SC_NPROCESSORS_ONLN); 298 pid_t pid = getpid(); 299 psetid_t pset = PS_NONE; 300 // Are we running in a processor set or is there any processor set around? 301 if (pset_bind(PS_QUERY, P_PID, pid, &pset) == 0) { 302 uint_t pset_cpus; 303 // Query the number of cpus available to us. 304 if (pset_info(pset, NULL, &pset_cpus, NULL) == 0) { 305 assert(pset_cpus > 0 && pset_cpus <= online_cpus, "sanity check"); 306 _processors_online = pset_cpus; 307 return pset_cpus; 308 } 309 } 310 // Otherwise return number of online cpus 311 return online_cpus; 312 } 313 314 static bool find_processors_in_pset(psetid_t pset, 315 processorid_t** id_array, 316 uint_t* id_length) { 317 bool result = false; 318 // Find the number of processors in the processor set. 319 if (pset_info(pset, NULL, id_length, NULL) == 0) { 320 // Make up an array to hold their ids. 321 *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length, mtInternal); 322 // Fill in the array with their processor ids. 323 if (pset_info(pset, NULL, id_length, *id_array) == 0) { 324 result = true; 325 } 326 } 327 return result; 328 } 329 330 // Callers of find_processors_online() must tolerate imprecise results -- 331 // the system configuration can change asynchronously because of DR 332 // or explicit psradm operations. 333 // 334 // We also need to take care that the loop (below) terminates as the 335 // number of processors online can change between the _SC_NPROCESSORS_ONLN 336 // request and the loop that builds the list of processor ids. Unfortunately 337 // there's no reliable way to determine the maximum valid processor id, 338 // so we use a manifest constant, MAX_PROCESSOR_ID, instead. See p_online 339 // man pages, which claim the processor id set is "sparse, but 340 // not too sparse". MAX_PROCESSOR_ID is used to ensure that we eventually 341 // exit the loop. 342 // 343 // In the future we'll be able to use sysconf(_SC_CPUID_MAX), but that's 344 // not available on S8.0. 345 346 static bool find_processors_online(processorid_t** id_array, 347 uint* id_length) { 348 const processorid_t MAX_PROCESSOR_ID = 100000; 349 // Find the number of processors online. 350 *id_length = sysconf(_SC_NPROCESSORS_ONLN); 351 // Make up an array to hold their ids. 352 *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length, mtInternal); 353 // Processors need not be numbered consecutively. 354 long found = 0; 355 processorid_t next = 0; 356 while (found < *id_length && next < MAX_PROCESSOR_ID) { 357 processor_info_t info; 358 if (processor_info(next, &info) == 0) { 359 // NB, PI_NOINTR processors are effectively online ... 360 if (info.pi_state == P_ONLINE || info.pi_state == P_NOINTR) { 361 (*id_array)[found] = next; 362 found += 1; 363 } 364 } 365 next += 1; 366 } 367 if (found < *id_length) { 368 // The loop above didn't identify the expected number of processors. 369 // We could always retry the operation, calling sysconf(_SC_NPROCESSORS_ONLN) 370 // and re-running the loop, above, but there's no guarantee of progress 371 // if the system configuration is in flux. Instead, we just return what 372 // we've got. Note that in the worst case find_processors_online() could 373 // return an empty set. (As a fall-back in the case of the empty set we 374 // could just return the ID of the current processor). 375 *id_length = found; 376 } 377 378 return true; 379 } 380 381 static bool assign_distribution(processorid_t* id_array, 382 uint id_length, 383 uint* distribution, 384 uint distribution_length) { 385 // We assume we can assign processorid_t's to uint's. 386 assert(sizeof(processorid_t) == sizeof(uint), 387 "can't convert processorid_t to uint"); 388 // Quick check to see if we won't succeed. 389 if (id_length < distribution_length) { 390 return false; 391 } 392 // Assign processor ids to the distribution. 393 // Try to shuffle processors to distribute work across boards, 394 // assuming 4 processors per board. 395 const uint processors_per_board = ProcessDistributionStride; 396 // Find the maximum processor id. 397 processorid_t max_id = 0; 398 for (uint m = 0; m < id_length; m += 1) { 399 max_id = MAX2(max_id, id_array[m]); 400 } 401 // The next id, to limit loops. 402 const processorid_t limit_id = max_id + 1; 403 // Make up markers for available processors. 404 bool* available_id = NEW_C_HEAP_ARRAY(bool, limit_id, mtInternal); 405 for (uint c = 0; c < limit_id; c += 1) { 406 available_id[c] = false; 407 } 408 for (uint a = 0; a < id_length; a += 1) { 409 available_id[id_array[a]] = true; 410 } 411 // Step by "boards", then by "slot", copying to "assigned". 412 // NEEDS_CLEANUP: The assignment of processors should be stateful, 413 // remembering which processors have been assigned by 414 // previous calls, etc., so as to distribute several 415 // independent calls of this method. What we'd like is 416 // It would be nice to have an API that let us ask 417 // how many processes are bound to a processor, 418 // but we don't have that, either. 419 // In the short term, "board" is static so that 420 // subsequent distributions don't all start at board 0. 421 static uint board = 0; 422 uint assigned = 0; 423 // Until we've found enough processors .... 424 while (assigned < distribution_length) { 425 // ... find the next available processor in the board. 426 for (uint slot = 0; slot < processors_per_board; slot += 1) { 427 uint try_id = board * processors_per_board + slot; 428 if ((try_id < limit_id) && (available_id[try_id] == true)) { 429 distribution[assigned] = try_id; 430 available_id[try_id] = false; 431 assigned += 1; 432 break; 433 } 434 } 435 board += 1; 436 if (board * processors_per_board + 0 >= limit_id) { 437 board = 0; 438 } 439 } 440 if (available_id != NULL) { 441 FREE_C_HEAP_ARRAY(bool, available_id); 442 } 443 return true; 444 } 445 446 void os::set_native_thread_name(const char *name) { 447 // Not yet implemented. 448 return; 449 } 450 451 bool os::distribute_processes(uint length, uint* distribution) { 452 bool result = false; 453 // Find the processor id's of all the available CPUs. 454 processorid_t* id_array = NULL; 455 uint id_length = 0; 456 // There are some races between querying information and using it, 457 // since processor sets can change dynamically. 458 psetid_t pset = PS_NONE; 459 // Are we running in a processor set? 460 if ((pset_bind(PS_QUERY, P_PID, P_MYID, &pset) == 0) && pset != PS_NONE) { 461 result = find_processors_in_pset(pset, &id_array, &id_length); 462 } else { 463 result = find_processors_online(&id_array, &id_length); 464 } 465 if (result == true) { 466 if (id_length >= length) { 467 result = assign_distribution(id_array, id_length, distribution, length); 468 } else { 469 result = false; 470 } 471 } 472 if (id_array != NULL) { 473 FREE_C_HEAP_ARRAY(processorid_t, id_array); 474 } 475 return result; 476 } 477 478 bool os::bind_to_processor(uint processor_id) { 479 // We assume that a processorid_t can be stored in a uint. 480 assert(sizeof(uint) == sizeof(processorid_t), 481 "can't convert uint to processorid_t"); 482 int bind_result = 483 processor_bind(P_LWPID, // bind LWP. 484 P_MYID, // bind current LWP. 485 (processorid_t) processor_id, // id. 486 NULL); // don't return old binding. 487 return (bind_result == 0); 488 } 489 490 // Return true if user is running as root. 491 492 bool os::have_special_privileges() { 493 static bool init = false; 494 static bool privileges = false; 495 if (!init) { 496 privileges = (getuid() != geteuid()) || (getgid() != getegid()); 497 init = true; 498 } 499 return privileges; 500 } 501 502 503 void os::init_system_properties_values() { 504 // The next steps are taken in the product version: 505 // 506 // Obtain the JAVA_HOME value from the location of libjvm.so. 507 // This library should be located at: 508 // <JAVA_HOME>/jre/lib/<arch>/{client|server}/libjvm.so. 509 // 510 // If "/jre/lib/" appears at the right place in the path, then we 511 // assume libjvm.so is installed in a JDK and we use this path. 512 // 513 // Otherwise exit with message: "Could not create the Java virtual machine." 514 // 515 // The following extra steps are taken in the debugging version: 516 // 517 // If "/jre/lib/" does NOT appear at the right place in the path 518 // instead of exit check for $JAVA_HOME environment variable. 519 // 520 // If it is defined and we are able to locate $JAVA_HOME/jre/lib/<arch>, 521 // then we append a fake suffix "hotspot/libjvm.so" to this path so 522 // it looks like libjvm.so is installed there 523 // <JAVA_HOME>/jre/lib/<arch>/hotspot/libjvm.so. 524 // 525 // Otherwise exit. 526 // 527 // Important note: if the location of libjvm.so changes this 528 // code needs to be changed accordingly. 529 530 // Base path of extensions installed on the system. 531 #define SYS_EXT_DIR "/usr/jdk/packages" 532 #define EXTENSIONS_DIR "/lib/ext" 533 534 char cpu_arch[12]; 535 // Buffer that fits several sprintfs. 536 // Note that the space for the colon and the trailing null are provided 537 // by the nulls included by the sizeof operator. 538 const size_t bufsize = 539 MAX3((size_t)MAXPATHLEN, // For dll_dir & friends. 540 sizeof(SYS_EXT_DIR) + sizeof("/lib/") + strlen(cpu_arch), // invariant ld_library_path 541 (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR) + sizeof(SYS_EXT_DIR) + sizeof(EXTENSIONS_DIR)); // extensions dir 542 char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal); 543 544 // sysclasspath, java_home, dll_dir 545 { 546 char *pslash; 547 os::jvm_path(buf, bufsize); 548 549 // Found the full path to libjvm.so. 550 // Now cut the path to <java_home>/jre if we can. 551 *(strrchr(buf, '/')) = '\0'; // Get rid of /libjvm.so. 552 pslash = strrchr(buf, '/'); 553 if (pslash != NULL) { 554 *pslash = '\0'; // Get rid of /{client|server|hotspot}. 555 } 556 Arguments::set_dll_dir(buf); 557 558 if (pslash != NULL) { 559 pslash = strrchr(buf, '/'); 560 if (pslash != NULL) { 561 *pslash = '\0'; // Get rid of /<arch>. 562 pslash = strrchr(buf, '/'); 563 if (pslash != NULL) { 564 *pslash = '\0'; // Get rid of /lib. 565 } 566 } 567 } 568 Arguments::set_java_home(buf); 569 set_boot_path('/', ':'); 570 } 571 572 // Where to look for native libraries. 573 { 574 // Use dlinfo() to determine the correct java.library.path. 575 // 576 // If we're launched by the Java launcher, and the user 577 // does not set java.library.path explicitly on the commandline, 578 // the Java launcher sets LD_LIBRARY_PATH for us and unsets 579 // LD_LIBRARY_PATH_32 and LD_LIBRARY_PATH_64. In this case 580 // dlinfo returns LD_LIBRARY_PATH + crle settings (including 581 // /usr/lib), which is exactly what we want. 582 // 583 // If the user does set java.library.path, it completely 584 // overwrites this setting, and always has. 585 // 586 // If we're not launched by the Java launcher, we may 587 // get here with any/all of the LD_LIBRARY_PATH[_32|64] 588 // settings. Again, dlinfo does exactly what we want. 589 590 Dl_serinfo info_sz, *info = &info_sz; 591 Dl_serpath *path; 592 char *library_path; 593 char *common_path = buf; 594 595 // Determine search path count and required buffer size. 596 if (dlinfo(RTLD_SELF, RTLD_DI_SERINFOSIZE, (void *)info) == -1) { 597 FREE_C_HEAP_ARRAY(char, buf); 598 vm_exit_during_initialization("dlinfo SERINFOSIZE request", dlerror()); 599 } 600 601 // Allocate new buffer and initialize. 602 info = (Dl_serinfo*)NEW_C_HEAP_ARRAY(char, info_sz.dls_size, mtInternal); 603 info->dls_size = info_sz.dls_size; 604 info->dls_cnt = info_sz.dls_cnt; 605 606 // Obtain search path information. 607 if (dlinfo(RTLD_SELF, RTLD_DI_SERINFO, (void *)info) == -1) { 608 FREE_C_HEAP_ARRAY(char, buf); 609 FREE_C_HEAP_ARRAY(char, info); 610 vm_exit_during_initialization("dlinfo SERINFO request", dlerror()); 611 } 612 613 path = &info->dls_serpath[0]; 614 615 // Note: Due to a legacy implementation, most of the library path 616 // is set in the launcher. This was to accomodate linking restrictions 617 // on legacy Solaris implementations (which are no longer supported). 618 // Eventually, all the library path setting will be done here. 619 // 620 // However, to prevent the proliferation of improperly built native 621 // libraries, the new path component /usr/jdk/packages is added here. 622 623 // Determine the actual CPU architecture. 624 sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch)); 625 #ifdef _LP64 626 // If we are a 64-bit vm, perform the following translations: 627 // sparc -> sparcv9 628 // i386 -> amd64 629 if (strcmp(cpu_arch, "sparc") == 0) { 630 strcat(cpu_arch, "v9"); 631 } else if (strcmp(cpu_arch, "i386") == 0) { 632 strcpy(cpu_arch, "amd64"); 633 } 634 #endif 635 636 // Construct the invariant part of ld_library_path. 637 sprintf(common_path, SYS_EXT_DIR "/lib/%s", cpu_arch); 638 639 // Struct size is more than sufficient for the path components obtained 640 // through the dlinfo() call, so only add additional space for the path 641 // components explicitly added here. 642 size_t library_path_size = info->dls_size + strlen(common_path); 643 library_path = (char *)NEW_C_HEAP_ARRAY(char, library_path_size, mtInternal); 644 library_path[0] = '\0'; 645 646 // Construct the desired Java library path from the linker's library 647 // search path. 648 // 649 // For compatibility, it is optimal that we insert the additional path 650 // components specific to the Java VM after those components specified 651 // in LD_LIBRARY_PATH (if any) but before those added by the ld.so 652 // infrastructure. 653 if (info->dls_cnt == 0) { // Not sure this can happen, but allow for it. 654 strcpy(library_path, common_path); 655 } else { 656 int inserted = 0; 657 int i; 658 for (i = 0; i < info->dls_cnt; i++, path++) { 659 uint_t flags = path->dls_flags & LA_SER_MASK; 660 if (((flags & LA_SER_LIBPATH) == 0) && !inserted) { 661 strcat(library_path, common_path); 662 strcat(library_path, os::path_separator()); 663 inserted = 1; 664 } 665 strcat(library_path, path->dls_name); 666 strcat(library_path, os::path_separator()); 667 } 668 // Eliminate trailing path separator. 669 library_path[strlen(library_path)-1] = '\0'; 670 } 671 672 // happens before argument parsing - can't use a trace flag 673 // tty->print_raw("init_system_properties_values: native lib path: "); 674 // tty->print_raw_cr(library_path); 675 676 // Callee copies into its own buffer. 677 Arguments::set_library_path(library_path); 678 679 FREE_C_HEAP_ARRAY(char, library_path); 680 FREE_C_HEAP_ARRAY(char, info); 681 } 682 683 // Extensions directories. 684 sprintf(buf, "%s" EXTENSIONS_DIR ":" SYS_EXT_DIR EXTENSIONS_DIR, Arguments::get_java_home()); 685 Arguments::set_ext_dirs(buf); 686 687 FREE_C_HEAP_ARRAY(char, buf); 688 689 #undef SYS_EXT_DIR 690 #undef EXTENSIONS_DIR 691 } 692 693 void os::breakpoint() { 694 BREAKPOINT; 695 } 696 697 bool os::obsolete_option(const JavaVMOption *option) { 698 if (!strncmp(option->optionString, "-Xt", 3)) { 699 return true; 700 } else if (!strncmp(option->optionString, "-Xtm", 4)) { 701 return true; 702 } else if (!strncmp(option->optionString, "-Xverifyheap", 12)) { 703 return true; 704 } else if (!strncmp(option->optionString, "-Xmaxjitcodesize", 16)) { 705 return true; 706 } 707 return false; 708 } 709 710 bool os::Solaris::valid_stack_address(Thread* thread, address sp) { 711 address stackStart = (address)thread->stack_base(); 712 address stackEnd = (address)(stackStart - (address)thread->stack_size()); 713 if (sp < stackStart && sp >= stackEnd) return true; 714 return false; 715 } 716 717 extern "C" void breakpoint() { 718 // use debugger to set breakpoint here 719 } 720 721 static thread_t main_thread; 722 723 // Thread start routine for all new Java threads 724 extern "C" void* java_start(void* thread_addr) { 725 // Try to randomize the cache line index of hot stack frames. 726 // This helps when threads of the same stack traces evict each other's 727 // cache lines. The threads can be either from the same JVM instance, or 728 // from different JVM instances. The benefit is especially true for 729 // processors with hyperthreading technology. 730 static int counter = 0; 731 int pid = os::current_process_id(); 732 alloca(((pid ^ counter++) & 7) * 128); 733 734 int prio; 735 Thread* thread = (Thread*)thread_addr; 736 OSThread* osthr = thread->osthread(); 737 738 osthr->set_lwp_id(_lwp_self()); // Store lwp in case we are bound 739 thread->_schedctl = (void *) schedctl_init(); 740 741 if (UseNUMA) { 742 int lgrp_id = os::numa_get_group_id(); 743 if (lgrp_id != -1) { 744 thread->set_lgrp_id(lgrp_id); 745 } 746 } 747 748 // If the creator called set priority before we started, 749 // we need to call set_native_priority now that we have an lwp. 750 // We used to get the priority from thr_getprio (we called 751 // thr_setprio way back in create_thread) and pass it to 752 // set_native_priority, but Solaris scales the priority 753 // in java_to_os_priority, so when we read it back here, 754 // we pass trash to set_native_priority instead of what's 755 // in java_to_os_priority. So we save the native priority 756 // in the osThread and recall it here. 757 758 if (osthr->thread_id() != -1) { 759 if (UseThreadPriorities) { 760 int prio = osthr->native_priority(); 761 if (ThreadPriorityVerbose) { 762 tty->print_cr("Starting Thread " INTPTR_FORMAT ", LWP is " 763 INTPTR_FORMAT ", setting priority: %d\n", 764 osthr->thread_id(), osthr->lwp_id(), prio); 765 } 766 os::set_native_priority(thread, prio); 767 } 768 } else if (ThreadPriorityVerbose) { 769 warning("Can't set priority in _start routine, thread id hasn't been set\n"); 770 } 771 772 assert(osthr->get_state() == RUNNABLE, "invalid os thread state"); 773 774 // initialize signal mask for this thread 775 os::Solaris::hotspot_sigmask(thread); 776 777 thread->run(); 778 779 // One less thread is executing 780 // When the VMThread gets here, the main thread may have already exited 781 // which frees the CodeHeap containing the Atomic::dec code 782 if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) { 783 Atomic::dec(&os::Solaris::_os_thread_count); 784 } 785 786 if (UseDetachedThreads) { 787 thr_exit(NULL); 788 ShouldNotReachHere(); 789 } 790 return NULL; 791 } 792 793 static OSThread* create_os_thread(Thread* thread, thread_t thread_id) { 794 // Allocate the OSThread object 795 OSThread* osthread = new OSThread(NULL, NULL); 796 if (osthread == NULL) return NULL; 797 798 // Store info on the Solaris thread into the OSThread 799 osthread->set_thread_id(thread_id); 800 osthread->set_lwp_id(_lwp_self()); 801 thread->_schedctl = (void *) schedctl_init(); 802 803 if (UseNUMA) { 804 int lgrp_id = os::numa_get_group_id(); 805 if (lgrp_id != -1) { 806 thread->set_lgrp_id(lgrp_id); 807 } 808 } 809 810 if (ThreadPriorityVerbose) { 811 tty->print_cr("In create_os_thread, Thread " INTPTR_FORMAT ", LWP is " INTPTR_FORMAT "\n", 812 osthread->thread_id(), osthread->lwp_id()); 813 } 814 815 // Initial thread state is INITIALIZED, not SUSPENDED 816 osthread->set_state(INITIALIZED); 817 818 return osthread; 819 } 820 821 void os::Solaris::hotspot_sigmask(Thread* thread) { 822 //Save caller's signal mask 823 sigset_t sigmask; 824 thr_sigsetmask(SIG_SETMASK, NULL, &sigmask); 825 OSThread *osthread = thread->osthread(); 826 osthread->set_caller_sigmask(sigmask); 827 828 thr_sigsetmask(SIG_UNBLOCK, os::Solaris::unblocked_signals(), NULL); 829 if (!ReduceSignalUsage) { 830 if (thread->is_VM_thread()) { 831 // Only the VM thread handles BREAK_SIGNAL ... 832 thr_sigsetmask(SIG_UNBLOCK, vm_signals(), NULL); 833 } else { 834 // ... all other threads block BREAK_SIGNAL 835 assert(!sigismember(vm_signals(), SIGINT), "SIGINT should not be blocked"); 836 thr_sigsetmask(SIG_BLOCK, vm_signals(), NULL); 837 } 838 } 839 } 840 841 bool os::create_attached_thread(JavaThread* thread) { 842 #ifdef ASSERT 843 thread->verify_not_published(); 844 #endif 845 OSThread* osthread = create_os_thread(thread, thr_self()); 846 if (osthread == NULL) { 847 return false; 848 } 849 850 // Initial thread state is RUNNABLE 851 osthread->set_state(RUNNABLE); 852 thread->set_osthread(osthread); 853 854 // initialize signal mask for this thread 855 // and save the caller's signal mask 856 os::Solaris::hotspot_sigmask(thread); 857 858 return true; 859 } 860 861 bool os::create_main_thread(JavaThread* thread) { 862 #ifdef ASSERT 863 thread->verify_not_published(); 864 #endif 865 if (_starting_thread == NULL) { 866 _starting_thread = create_os_thread(thread, main_thread); 867 if (_starting_thread == NULL) { 868 return false; 869 } 870 } 871 872 // The primodial thread is runnable from the start 873 _starting_thread->set_state(RUNNABLE); 874 875 thread->set_osthread(_starting_thread); 876 877 // initialize signal mask for this thread 878 // and save the caller's signal mask 879 os::Solaris::hotspot_sigmask(thread); 880 881 return true; 882 } 883 884 885 bool os::create_thread(Thread* thread, ThreadType thr_type, 886 size_t stack_size) { 887 // Allocate the OSThread object 888 OSThread* osthread = new OSThread(NULL, NULL); 889 if (osthread == NULL) { 890 return false; 891 } 892 893 if (ThreadPriorityVerbose) { 894 char *thrtyp; 895 switch (thr_type) { 896 case vm_thread: 897 thrtyp = (char *)"vm"; 898 break; 899 case cgc_thread: 900 thrtyp = (char *)"cgc"; 901 break; 902 case pgc_thread: 903 thrtyp = (char *)"pgc"; 904 break; 905 case java_thread: 906 thrtyp = (char *)"java"; 907 break; 908 case compiler_thread: 909 thrtyp = (char *)"compiler"; 910 break; 911 case watcher_thread: 912 thrtyp = (char *)"watcher"; 913 break; 914 default: 915 thrtyp = (char *)"unknown"; 916 break; 917 } 918 tty->print_cr("In create_thread, creating a %s thread\n", thrtyp); 919 } 920 921 // Calculate stack size if it's not specified by caller. 922 if (stack_size == 0) { 923 // The default stack size 1M (2M for LP64). 924 stack_size = (BytesPerWord >> 2) * K * K; 925 926 switch (thr_type) { 927 case os::java_thread: 928 // Java threads use ThreadStackSize which default value can be changed with the flag -Xss 929 if (JavaThread::stack_size_at_create() > 0) stack_size = JavaThread::stack_size_at_create(); 930 break; 931 case os::compiler_thread: 932 if (CompilerThreadStackSize > 0) { 933 stack_size = (size_t)(CompilerThreadStackSize * K); 934 break; 935 } // else fall through: 936 // use VMThreadStackSize if CompilerThreadStackSize is not defined 937 case os::vm_thread: 938 case os::pgc_thread: 939 case os::cgc_thread: 940 case os::watcher_thread: 941 if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K); 942 break; 943 } 944 } 945 stack_size = MAX2(stack_size, os::Solaris::min_stack_allowed); 946 947 // Initial state is ALLOCATED but not INITIALIZED 948 osthread->set_state(ALLOCATED); 949 950 if (os::Solaris::_os_thread_count > os::Solaris::_os_thread_limit) { 951 // We got lots of threads. Check if we still have some address space left. 952 // Need to be at least 5Mb of unreserved address space. We do check by 953 // trying to reserve some. 954 const size_t VirtualMemoryBangSize = 20*K*K; 955 char* mem = os::reserve_memory(VirtualMemoryBangSize); 956 if (mem == NULL) { 957 delete osthread; 958 return false; 959 } else { 960 // Release the memory again 961 os::release_memory(mem, VirtualMemoryBangSize); 962 } 963 } 964 965 // Setup osthread because the child thread may need it. 966 thread->set_osthread(osthread); 967 968 // Create the Solaris thread 969 thread_t tid = 0; 970 long flags = (UseDetachedThreads ? THR_DETACHED : 0) | THR_SUSPENDED; 971 int status; 972 973 // Mark that we don't have an lwp or thread id yet. 974 // In case we attempt to set the priority before the thread starts. 975 osthread->set_lwp_id(-1); 976 osthread->set_thread_id(-1); 977 978 status = thr_create(NULL, stack_size, java_start, thread, flags, &tid); 979 if (status != 0) { 980 if (PrintMiscellaneous && (Verbose || WizardMode)) { 981 perror("os::create_thread"); 982 } 983 thread->set_osthread(NULL); 984 // Need to clean up stuff we've allocated so far 985 delete osthread; 986 return false; 987 } 988 989 Atomic::inc(&os::Solaris::_os_thread_count); 990 991 // Store info on the Solaris thread into the OSThread 992 osthread->set_thread_id(tid); 993 994 // Remember that we created this thread so we can set priority on it 995 osthread->set_vm_created(); 996 997 // Initial thread state is INITIALIZED, not SUSPENDED 998 osthread->set_state(INITIALIZED); 999 1000 // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain 1001 return true; 1002 } 1003 1004 // defined for >= Solaris 10. This allows builds on earlier versions 1005 // of Solaris to take advantage of the newly reserved Solaris JVM signals 1006 // With SIGJVM1, SIGJVM2, INTERRUPT_SIGNAL is SIGJVM1, ASYNC_SIGNAL is SIGJVM2 1007 // and -XX:+UseAltSigs does nothing since these should have no conflict 1008 // 1009 #if !defined(SIGJVM1) 1010 #define SIGJVM1 39 1011 #define SIGJVM2 40 1012 #endif 1013 1014 debug_only(static bool signal_sets_initialized = false); 1015 static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs; 1016 int os::Solaris::_SIGinterrupt = INTERRUPT_SIGNAL; 1017 int os::Solaris::_SIGasync = ASYNC_SIGNAL; 1018 1019 bool os::Solaris::is_sig_ignored(int sig) { 1020 struct sigaction oact; 1021 sigaction(sig, (struct sigaction*)NULL, &oact); 1022 void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*, oact.sa_sigaction) 1023 : CAST_FROM_FN_PTR(void*, oact.sa_handler); 1024 if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN)) { 1025 return true; 1026 } else { 1027 return false; 1028 } 1029 } 1030 1031 // Note: SIGRTMIN is a macro that calls sysconf() so it will 1032 // dynamically detect SIGRTMIN value for the system at runtime, not buildtime 1033 static bool isJVM1available() { 1034 return SIGJVM1 < SIGRTMIN; 1035 } 1036 1037 void os::Solaris::signal_sets_init() { 1038 // Should also have an assertion stating we are still single-threaded. 1039 assert(!signal_sets_initialized, "Already initialized"); 1040 // Fill in signals that are necessarily unblocked for all threads in 1041 // the VM. Currently, we unblock the following signals: 1042 // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden 1043 // by -Xrs (=ReduceSignalUsage)); 1044 // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all 1045 // other threads. The "ReduceSignalUsage" boolean tells us not to alter 1046 // the dispositions or masks wrt these signals. 1047 // Programs embedding the VM that want to use the above signals for their 1048 // own purposes must, at this time, use the "-Xrs" option to prevent 1049 // interference with shutdown hooks and BREAK_SIGNAL thread dumping. 1050 // (See bug 4345157, and other related bugs). 1051 // In reality, though, unblocking these signals is really a nop, since 1052 // these signals are not blocked by default. 1053 sigemptyset(&unblocked_sigs); 1054 sigemptyset(&allowdebug_blocked_sigs); 1055 sigaddset(&unblocked_sigs, SIGILL); 1056 sigaddset(&unblocked_sigs, SIGSEGV); 1057 sigaddset(&unblocked_sigs, SIGBUS); 1058 sigaddset(&unblocked_sigs, SIGFPE); 1059 1060 if (isJVM1available) { 1061 os::Solaris::set_SIGinterrupt(SIGJVM1); 1062 os::Solaris::set_SIGasync(SIGJVM2); 1063 } else if (UseAltSigs) { 1064 os::Solaris::set_SIGinterrupt(ALT_INTERRUPT_SIGNAL); 1065 os::Solaris::set_SIGasync(ALT_ASYNC_SIGNAL); 1066 } else { 1067 os::Solaris::set_SIGinterrupt(INTERRUPT_SIGNAL); 1068 os::Solaris::set_SIGasync(ASYNC_SIGNAL); 1069 } 1070 1071 sigaddset(&unblocked_sigs, os::Solaris::SIGinterrupt()); 1072 sigaddset(&unblocked_sigs, os::Solaris::SIGasync()); 1073 1074 if (!ReduceSignalUsage) { 1075 if (!os::Solaris::is_sig_ignored(SHUTDOWN1_SIGNAL)) { 1076 sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL); 1077 sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL); 1078 } 1079 if (!os::Solaris::is_sig_ignored(SHUTDOWN2_SIGNAL)) { 1080 sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL); 1081 sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL); 1082 } 1083 if (!os::Solaris::is_sig_ignored(SHUTDOWN3_SIGNAL)) { 1084 sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL); 1085 sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL); 1086 } 1087 } 1088 // Fill in signals that are blocked by all but the VM thread. 1089 sigemptyset(&vm_sigs); 1090 if (!ReduceSignalUsage) { 1091 sigaddset(&vm_sigs, BREAK_SIGNAL); 1092 } 1093 debug_only(signal_sets_initialized = true); 1094 1095 // For diagnostics only used in run_periodic_checks 1096 sigemptyset(&check_signal_done); 1097 } 1098 1099 // These are signals that are unblocked while a thread is running Java. 1100 // (For some reason, they get blocked by default.) 1101 sigset_t* os::Solaris::unblocked_signals() { 1102 assert(signal_sets_initialized, "Not initialized"); 1103 return &unblocked_sigs; 1104 } 1105 1106 // These are the signals that are blocked while a (non-VM) thread is 1107 // running Java. Only the VM thread handles these signals. 1108 sigset_t* os::Solaris::vm_signals() { 1109 assert(signal_sets_initialized, "Not initialized"); 1110 return &vm_sigs; 1111 } 1112 1113 // These are signals that are blocked during cond_wait to allow debugger in 1114 sigset_t* os::Solaris::allowdebug_blocked_signals() { 1115 assert(signal_sets_initialized, "Not initialized"); 1116 return &allowdebug_blocked_sigs; 1117 } 1118 1119 1120 void _handle_uncaught_cxx_exception() { 1121 VMError err("An uncaught C++ exception"); 1122 err.report_and_die(); 1123 } 1124 1125 1126 // First crack at OS-specific initialization, from inside the new thread. 1127 void os::initialize_thread(Thread* thr) { 1128 int r = thr_main(); 1129 guarantee(r == 0 || r == 1, "CR6501650 or CR6493689"); 1130 if (r) { 1131 JavaThread* jt = (JavaThread *)thr; 1132 assert(jt != NULL, "Sanity check"); 1133 size_t stack_size; 1134 address base = jt->stack_base(); 1135 if (Arguments::created_by_java_launcher()) { 1136 // Use 2MB to allow for Solaris 7 64 bit mode. 1137 stack_size = JavaThread::stack_size_at_create() == 0 1138 ? 2048*K : JavaThread::stack_size_at_create(); 1139 1140 // There are rare cases when we may have already used more than 1141 // the basic stack size allotment before this method is invoked. 1142 // Attempt to allow for a normally sized java_stack. 1143 size_t current_stack_offset = (size_t)(base - (address)&stack_size); 1144 stack_size += ReservedSpace::page_align_size_down(current_stack_offset); 1145 } else { 1146 // 6269555: If we were not created by a Java launcher, i.e. if we are 1147 // running embedded in a native application, treat the primordial thread 1148 // as much like a native attached thread as possible. This means using 1149 // the current stack size from thr_stksegment(), unless it is too large 1150 // to reliably setup guard pages. A reasonable max size is 8MB. 1151 size_t current_size = current_stack_size(); 1152 // This should never happen, but just in case.... 1153 if (current_size == 0) current_size = 2 * K * K; 1154 stack_size = current_size > (8 * K * K) ? (8 * K * K) : current_size; 1155 } 1156 address bottom = (address)align_size_up((intptr_t)(base - stack_size), os::vm_page_size());; 1157 stack_size = (size_t)(base - bottom); 1158 1159 assert(stack_size > 0, "Stack size calculation problem"); 1160 1161 if (stack_size > jt->stack_size()) { 1162 #ifndef PRODUCT 1163 struct rlimit limits; 1164 getrlimit(RLIMIT_STACK, &limits); 1165 size_t size = adjust_stack_size(base, (size_t)limits.rlim_cur); 1166 assert(size >= jt->stack_size(), "Stack size problem in main thread"); 1167 #endif 1168 tty->print_cr("Stack size of %d Kb exceeds current limit of %d Kb.\n" 1169 "(Stack sizes are rounded up to a multiple of the system page size.)\n" 1170 "See limit(1) to increase the stack size limit.", 1171 stack_size / K, jt->stack_size() / K); 1172 vm_exit(1); 1173 } 1174 assert(jt->stack_size() >= stack_size, 1175 "Attempt to map more stack than was allocated"); 1176 jt->set_stack_size(stack_size); 1177 } 1178 1179 // With the T2 libthread (T1 is no longer supported) threads are always bound 1180 // and we use stackbanging in all cases. 1181 1182 os::Solaris::init_thread_fpu_state(); 1183 std::set_terminate(_handle_uncaught_cxx_exception); 1184 } 1185 1186 1187 1188 // Free Solaris resources related to the OSThread 1189 void os::free_thread(OSThread* osthread) { 1190 assert(osthread != NULL, "os::free_thread but osthread not set"); 1191 1192 1193 // We are told to free resources of the argument thread, 1194 // but we can only really operate on the current thread. 1195 // The main thread must take the VMThread down synchronously 1196 // before the main thread exits and frees up CodeHeap 1197 guarantee((Thread::current()->osthread() == osthread 1198 || (osthread == VMThread::vm_thread()->osthread())), "os::free_thread but not current thread"); 1199 if (Thread::current()->osthread() == osthread) { 1200 // Restore caller's signal mask 1201 sigset_t sigmask = osthread->caller_sigmask(); 1202 thr_sigsetmask(SIG_SETMASK, &sigmask, NULL); 1203 } 1204 delete osthread; 1205 } 1206 1207 void os::pd_start_thread(Thread* thread) { 1208 int status = thr_continue(thread->osthread()->thread_id()); 1209 assert_status(status == 0, status, "thr_continue failed"); 1210 } 1211 1212 1213 intx os::current_thread_id() { 1214 return (intx)thr_self(); 1215 } 1216 1217 static pid_t _initial_pid = 0; 1218 1219 int os::current_process_id() { 1220 return (int)(_initial_pid ? _initial_pid : getpid()); 1221 } 1222 1223 // gethrtime() should be monotonic according to the documentation, 1224 // but some virtualized platforms are known to break this guarantee. 1225 // getTimeNanos() must be guaranteed not to move backwards, so we 1226 // are forced to add a check here. 1227 inline hrtime_t getTimeNanos() { 1228 const hrtime_t now = gethrtime(); 1229 const hrtime_t prev = max_hrtime; 1230 if (now <= prev) { 1231 return prev; // same or retrograde time; 1232 } 1233 const hrtime_t obsv = Atomic::cmpxchg(now, (volatile jlong*)&max_hrtime, prev); 1234 assert(obsv >= prev, "invariant"); // Monotonicity 1235 // If the CAS succeeded then we're done and return "now". 1236 // If the CAS failed and the observed value "obsv" is >= now then 1237 // we should return "obsv". If the CAS failed and now > obsv > prv then 1238 // some other thread raced this thread and installed a new value, in which case 1239 // we could either (a) retry the entire operation, (b) retry trying to install now 1240 // or (c) just return obsv. We use (c). No loop is required although in some cases 1241 // we might discard a higher "now" value in deference to a slightly lower but freshly 1242 // installed obsv value. That's entirely benign -- it admits no new orderings compared 1243 // to (a) or (b) -- and greatly reduces coherence traffic. 1244 // We might also condition (c) on the magnitude of the delta between obsv and now. 1245 // Avoiding excessive CAS operations to hot RW locations is critical. 1246 // See https://blogs.oracle.com/dave/entry/cas_and_cache_trivia_invalidate 1247 return (prev == obsv) ? now : obsv; 1248 } 1249 1250 // Time since start-up in seconds to a fine granularity. 1251 // Used by VMSelfDestructTimer and the MemProfiler. 1252 double os::elapsedTime() { 1253 return (double)(getTimeNanos() - first_hrtime) / (double)hrtime_hz; 1254 } 1255 1256 jlong os::elapsed_counter() { 1257 return (jlong)(getTimeNanos() - first_hrtime); 1258 } 1259 1260 jlong os::elapsed_frequency() { 1261 return hrtime_hz; 1262 } 1263 1264 // Return the real, user, and system times in seconds from an 1265 // arbitrary fixed point in the past. 1266 bool os::getTimesSecs(double* process_real_time, 1267 double* process_user_time, 1268 double* process_system_time) { 1269 struct tms ticks; 1270 clock_t real_ticks = times(&ticks); 1271 1272 if (real_ticks == (clock_t) (-1)) { 1273 return false; 1274 } else { 1275 double ticks_per_second = (double) clock_tics_per_sec; 1276 *process_user_time = ((double) ticks.tms_utime) / ticks_per_second; 1277 *process_system_time = ((double) ticks.tms_stime) / ticks_per_second; 1278 // For consistency return the real time from getTimeNanos() 1279 // converted to seconds. 1280 *process_real_time = ((double) getTimeNanos()) / ((double) NANOUNITS); 1281 1282 return true; 1283 } 1284 } 1285 1286 bool os::supports_vtime() { return true; } 1287 1288 bool os::enable_vtime() { 1289 int fd = ::open("/proc/self/ctl", O_WRONLY); 1290 if (fd == -1) { 1291 return false; 1292 } 1293 1294 long cmd[] = { PCSET, PR_MSACCT }; 1295 int res = ::write(fd, cmd, sizeof(long) * 2); 1296 ::close(fd); 1297 if (res != sizeof(long) * 2) { 1298 return false; 1299 } 1300 return true; 1301 } 1302 1303 bool os::vtime_enabled() { 1304 int fd = ::open("/proc/self/status", O_RDONLY); 1305 if (fd == -1) { 1306 return false; 1307 } 1308 1309 pstatus_t status; 1310 int res = os::read(fd, (void*) &status, sizeof(pstatus_t)); 1311 ::close(fd); 1312 if (res != sizeof(pstatus_t)) { 1313 return false; 1314 } 1315 return status.pr_flags & PR_MSACCT; 1316 } 1317 1318 double os::elapsedVTime() { 1319 return (double)gethrvtime() / (double)hrtime_hz; 1320 } 1321 1322 // Used internally for comparisons only 1323 // getTimeMillis guaranteed to not move backwards on Solaris 1324 jlong getTimeMillis() { 1325 jlong nanotime = getTimeNanos(); 1326 return (jlong)(nanotime / NANOSECS_PER_MILLISEC); 1327 } 1328 1329 // Must return millis since Jan 1 1970 for JVM_CurrentTimeMillis 1330 jlong os::javaTimeMillis() { 1331 timeval t; 1332 if (gettimeofday(&t, NULL) == -1) { 1333 fatal(err_msg("os::javaTimeMillis: gettimeofday (%s)", strerror(errno))); 1334 } 1335 return jlong(t.tv_sec) * 1000 + jlong(t.tv_usec) / 1000; 1336 } 1337 1338 void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) { 1339 timeval t; 1340 if (gettimeofday(&t, NULL) == -1) { 1341 fatal(err_msg("os::javaTimeSystemUTC: gettimeofday (%s)", strerror(errno))); 1342 } 1343 seconds = jlong(t.tv_sec); 1344 nanos = jlong(t.tv_usec) * 1000; 1345 } 1346 1347 1348 jlong os::javaTimeNanos() { 1349 return (jlong)getTimeNanos(); 1350 } 1351 1352 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) { 1353 info_ptr->max_value = ALL_64_BITS; // gethrtime() uses all 64 bits 1354 info_ptr->may_skip_backward = false; // not subject to resetting or drifting 1355 info_ptr->may_skip_forward = false; // not subject to resetting or drifting 1356 info_ptr->kind = JVMTI_TIMER_ELAPSED; // elapsed not CPU time 1357 } 1358 1359 char * os::local_time_string(char *buf, size_t buflen) { 1360 struct tm t; 1361 time_t long_time; 1362 time(&long_time); 1363 localtime_r(&long_time, &t); 1364 jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d", 1365 t.tm_year + 1900, t.tm_mon + 1, t.tm_mday, 1366 t.tm_hour, t.tm_min, t.tm_sec); 1367 return buf; 1368 } 1369 1370 // Note: os::shutdown() might be called very early during initialization, or 1371 // called from signal handler. Before adding something to os::shutdown(), make 1372 // sure it is async-safe and can handle partially initialized VM. 1373 void os::shutdown() { 1374 1375 // allow PerfMemory to attempt cleanup of any persistent resources 1376 perfMemory_exit(); 1377 1378 // needs to remove object in file system 1379 AttachListener::abort(); 1380 1381 // flush buffered output, finish log files 1382 ostream_abort(); 1383 1384 // Check for abort hook 1385 abort_hook_t abort_hook = Arguments::abort_hook(); 1386 if (abort_hook != NULL) { 1387 abort_hook(); 1388 } 1389 } 1390 1391 // Note: os::abort() might be called very early during initialization, or 1392 // called from signal handler. Before adding something to os::abort(), make 1393 // sure it is async-safe and can handle partially initialized VM. 1394 void os::abort(bool dump_core, void* siginfo, void* context) { 1395 os::shutdown(); 1396 if (dump_core) { 1397 #ifndef PRODUCT 1398 fdStream out(defaultStream::output_fd()); 1399 out.print_raw("Current thread is "); 1400 char buf[16]; 1401 jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id()); 1402 out.print_raw_cr(buf); 1403 out.print_raw_cr("Dumping core ..."); 1404 #endif 1405 ::abort(); // dump core (for debugging) 1406 } 1407 1408 ::exit(1); 1409 } 1410 1411 // Die immediately, no exit hook, no abort hook, no cleanup. 1412 void os::die() { 1413 ::abort(); // dump core (for debugging) 1414 } 1415 1416 // DLL functions 1417 1418 const char* os::dll_file_extension() { return ".so"; } 1419 1420 // This must be hard coded because it's the system's temporary 1421 // directory not the java application's temp directory, ala java.io.tmpdir. 1422 const char* os::get_temp_directory() { return "/tmp"; } 1423 1424 static bool file_exists(const char* filename) { 1425 struct stat statbuf; 1426 if (filename == NULL || strlen(filename) == 0) { 1427 return false; 1428 } 1429 return os::stat(filename, &statbuf) == 0; 1430 } 1431 1432 bool os::dll_build_name(char* buffer, size_t buflen, 1433 const char* pname, const char* fname) { 1434 bool retval = false; 1435 const size_t pnamelen = pname ? strlen(pname) : 0; 1436 1437 // Return error on buffer overflow. 1438 if (pnamelen + strlen(fname) + 10 > (size_t) buflen) { 1439 return retval; 1440 } 1441 1442 if (pnamelen == 0) { 1443 snprintf(buffer, buflen, "lib%s.so", fname); 1444 retval = true; 1445 } else if (strchr(pname, *os::path_separator()) != NULL) { 1446 int n; 1447 char** pelements = split_path(pname, &n); 1448 if (pelements == NULL) { 1449 return false; 1450 } 1451 for (int i = 0; i < n; i++) { 1452 // really shouldn't be NULL but what the heck, check can't hurt 1453 if (pelements[i] == NULL || strlen(pelements[i]) == 0) { 1454 continue; // skip the empty path values 1455 } 1456 snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname); 1457 if (file_exists(buffer)) { 1458 retval = true; 1459 break; 1460 } 1461 } 1462 // release the storage 1463 for (int i = 0; i < n; i++) { 1464 if (pelements[i] != NULL) { 1465 FREE_C_HEAP_ARRAY(char, pelements[i]); 1466 } 1467 } 1468 if (pelements != NULL) { 1469 FREE_C_HEAP_ARRAY(char*, pelements); 1470 } 1471 } else { 1472 snprintf(buffer, buflen, "%s/lib%s.so", pname, fname); 1473 retval = true; 1474 } 1475 return retval; 1476 } 1477 1478 // check if addr is inside libjvm.so 1479 bool os::address_is_in_vm(address addr) { 1480 static address libjvm_base_addr; 1481 Dl_info dlinfo; 1482 1483 if (libjvm_base_addr == NULL) { 1484 if (dladdr(CAST_FROM_FN_PTR(void *, os::address_is_in_vm), &dlinfo) != 0) { 1485 libjvm_base_addr = (address)dlinfo.dli_fbase; 1486 } 1487 assert(libjvm_base_addr !=NULL, "Cannot obtain base address for libjvm"); 1488 } 1489 1490 if (dladdr((void *)addr, &dlinfo) != 0) { 1491 if (libjvm_base_addr == (address)dlinfo.dli_fbase) return true; 1492 } 1493 1494 return false; 1495 } 1496 1497 typedef int (*dladdr1_func_type)(void *, Dl_info *, void **, int); 1498 static dladdr1_func_type dladdr1_func = NULL; 1499 1500 bool os::dll_address_to_function_name(address addr, char *buf, 1501 int buflen, int * offset, 1502 bool demangle) { 1503 // buf is not optional, but offset is optional 1504 assert(buf != NULL, "sanity check"); 1505 1506 Dl_info dlinfo; 1507 1508 // dladdr1_func was initialized in os::init() 1509 if (dladdr1_func != NULL) { 1510 // yes, we have dladdr1 1511 1512 // Support for dladdr1 is checked at runtime; it may be 1513 // available even if the vm is built on a machine that does 1514 // not have dladdr1 support. Make sure there is a value for 1515 // RTLD_DL_SYMENT. 1516 #ifndef RTLD_DL_SYMENT 1517 #define RTLD_DL_SYMENT 1 1518 #endif 1519 #ifdef _LP64 1520 Elf64_Sym * info; 1521 #else 1522 Elf32_Sym * info; 1523 #endif 1524 if (dladdr1_func((void *)addr, &dlinfo, (void **)&info, 1525 RTLD_DL_SYMENT) != 0) { 1526 // see if we have a matching symbol that covers our address 1527 if (dlinfo.dli_saddr != NULL && 1528 (char *)dlinfo.dli_saddr + info->st_size > (char *)addr) { 1529 if (dlinfo.dli_sname != NULL) { 1530 if (!(demangle && Decoder::demangle(dlinfo.dli_sname, buf, buflen))) { 1531 jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname); 1532 } 1533 if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr; 1534 return true; 1535 } 1536 } 1537 // no matching symbol so try for just file info 1538 if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) { 1539 if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase), 1540 buf, buflen, offset, dlinfo.dli_fname, demangle)) { 1541 return true; 1542 } 1543 } 1544 } 1545 buf[0] = '\0'; 1546 if (offset != NULL) *offset = -1; 1547 return false; 1548 } 1549 1550 // no, only dladdr is available 1551 if (dladdr((void *)addr, &dlinfo) != 0) { 1552 // see if we have a matching symbol 1553 if (dlinfo.dli_saddr != NULL && dlinfo.dli_sname != NULL) { 1554 if (!(demangle && Decoder::demangle(dlinfo.dli_sname, buf, buflen))) { 1555 jio_snprintf(buf, buflen, dlinfo.dli_sname); 1556 } 1557 if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr; 1558 return true; 1559 } 1560 // no matching symbol so try for just file info 1561 if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) { 1562 if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase), 1563 buf, buflen, offset, dlinfo.dli_fname, demangle)) { 1564 return true; 1565 } 1566 } 1567 } 1568 buf[0] = '\0'; 1569 if (offset != NULL) *offset = -1; 1570 return false; 1571 } 1572 1573 bool os::dll_address_to_library_name(address addr, char* buf, 1574 int buflen, int* offset) { 1575 // buf is not optional, but offset is optional 1576 assert(buf != NULL, "sanity check"); 1577 1578 Dl_info dlinfo; 1579 1580 if (dladdr((void*)addr, &dlinfo) != 0) { 1581 if (dlinfo.dli_fname != NULL) { 1582 jio_snprintf(buf, buflen, "%s", dlinfo.dli_fname); 1583 } 1584 if (dlinfo.dli_fbase != NULL && offset != NULL) { 1585 *offset = addr - (address)dlinfo.dli_fbase; 1586 } 1587 return true; 1588 } 1589 1590 buf[0] = '\0'; 1591 if (offset) *offset = -1; 1592 return false; 1593 } 1594 1595 int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) { 1596 Dl_info dli; 1597 // Sanity check? 1598 if (dladdr(CAST_FROM_FN_PTR(void *, os::get_loaded_modules_info), &dli) == 0 || 1599 dli.dli_fname == NULL) { 1600 return 1; 1601 } 1602 1603 void * handle = dlopen(dli.dli_fname, RTLD_LAZY); 1604 if (handle == NULL) { 1605 return 1; 1606 } 1607 1608 Link_map *map; 1609 dlinfo(handle, RTLD_DI_LINKMAP, &map); 1610 if (map == NULL) { 1611 dlclose(handle); 1612 return 1; 1613 } 1614 1615 while (map->l_prev != NULL) { 1616 map = map->l_prev; 1617 } 1618 1619 while (map != NULL) { 1620 // Iterate through all map entries and call callback with fields of interest 1621 if(callback(map->l_name, (address)map->l_addr, (address)0, param)) { 1622 dlclose(handle); 1623 return 1; 1624 } 1625 map = map->l_next; 1626 } 1627 1628 dlclose(handle); 1629 return 0; 1630 } 1631 1632 int _print_dll_info_cb(const char * name, address base_address, address top_address, void * param) { 1633 outputStream * out = (outputStream *) param; 1634 out->print_cr(PTR_FORMAT " \t%s", base_address, name); 1635 return 0; 1636 } 1637 1638 void os::print_dll_info(outputStream * st) { 1639 st->print_cr("Dynamic libraries:"); st->flush(); 1640 if (get_loaded_modules_info(_print_dll_info_cb, (void *)st)) { 1641 st->print_cr("Error: Cannot print dynamic libraries."); 1642 } 1643 } 1644 1645 // Loads .dll/.so and 1646 // in case of error it checks if .dll/.so was built for the 1647 // same architecture as Hotspot is running on 1648 1649 void * os::dll_load(const char *filename, char *ebuf, int ebuflen) { 1650 void * result= ::dlopen(filename, RTLD_LAZY); 1651 if (result != NULL) { 1652 // Successful loading 1653 return result; 1654 } 1655 1656 Elf32_Ehdr elf_head; 1657 1658 // Read system error message into ebuf 1659 // It may or may not be overwritten below 1660 ::strncpy(ebuf, ::dlerror(), ebuflen-1); 1661 ebuf[ebuflen-1]='\0'; 1662 int diag_msg_max_length=ebuflen-strlen(ebuf); 1663 char* diag_msg_buf=ebuf+strlen(ebuf); 1664 1665 if (diag_msg_max_length==0) { 1666 // No more space in ebuf for additional diagnostics message 1667 return NULL; 1668 } 1669 1670 1671 int file_descriptor= ::open(filename, O_RDONLY | O_NONBLOCK); 1672 1673 if (file_descriptor < 0) { 1674 // Can't open library, report dlerror() message 1675 return NULL; 1676 } 1677 1678 bool failed_to_read_elf_head= 1679 (sizeof(elf_head)!= 1680 (::read(file_descriptor, &elf_head,sizeof(elf_head)))); 1681 1682 ::close(file_descriptor); 1683 if (failed_to_read_elf_head) { 1684 // file i/o error - report dlerror() msg 1685 return NULL; 1686 } 1687 1688 typedef struct { 1689 Elf32_Half code; // Actual value as defined in elf.h 1690 Elf32_Half compat_class; // Compatibility of archs at VM's sense 1691 char elf_class; // 32 or 64 bit 1692 char endianess; // MSB or LSB 1693 char* name; // String representation 1694 } arch_t; 1695 1696 static const arch_t arch_array[]={ 1697 {EM_386, EM_386, ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"}, 1698 {EM_486, EM_386, ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"}, 1699 {EM_IA_64, EM_IA_64, ELFCLASS64, ELFDATA2LSB, (char*)"IA 64"}, 1700 {EM_X86_64, EM_X86_64, ELFCLASS64, ELFDATA2LSB, (char*)"AMD 64"}, 1701 {EM_SPARC, EM_SPARC, ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"}, 1702 {EM_SPARC32PLUS, EM_SPARC, ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"}, 1703 {EM_SPARCV9, EM_SPARCV9, ELFCLASS64, ELFDATA2MSB, (char*)"Sparc v9 64"}, 1704 {EM_PPC, EM_PPC, ELFCLASS32, ELFDATA2MSB, (char*)"Power PC 32"}, 1705 {EM_PPC64, EM_PPC64, ELFCLASS64, ELFDATA2MSB, (char*)"Power PC 64"}, 1706 {EM_ARM, EM_ARM, ELFCLASS32, ELFDATA2LSB, (char*)"ARM 32"} 1707 }; 1708 1709 #if (defined IA32) 1710 static Elf32_Half running_arch_code=EM_386; 1711 #elif (defined AMD64) 1712 static Elf32_Half running_arch_code=EM_X86_64; 1713 #elif (defined IA64) 1714 static Elf32_Half running_arch_code=EM_IA_64; 1715 #elif (defined __sparc) && (defined _LP64) 1716 static Elf32_Half running_arch_code=EM_SPARCV9; 1717 #elif (defined __sparc) && (!defined _LP64) 1718 static Elf32_Half running_arch_code=EM_SPARC; 1719 #elif (defined __powerpc64__) 1720 static Elf32_Half running_arch_code=EM_PPC64; 1721 #elif (defined __powerpc__) 1722 static Elf32_Half running_arch_code=EM_PPC; 1723 #elif (defined ARM) 1724 static Elf32_Half running_arch_code=EM_ARM; 1725 #else 1726 #error Method os::dll_load requires that one of following is defined:\ 1727 IA32, AMD64, IA64, __sparc, __powerpc__, ARM, ARM 1728 #endif 1729 1730 // Identify compatability class for VM's architecture and library's architecture 1731 // Obtain string descriptions for architectures 1732 1733 arch_t lib_arch={elf_head.e_machine,0,elf_head.e_ident[EI_CLASS], elf_head.e_ident[EI_DATA], NULL}; 1734 int running_arch_index=-1; 1735 1736 for (unsigned int i=0; i < ARRAY_SIZE(arch_array); i++) { 1737 if (running_arch_code == arch_array[i].code) { 1738 running_arch_index = i; 1739 } 1740 if (lib_arch.code == arch_array[i].code) { 1741 lib_arch.compat_class = arch_array[i].compat_class; 1742 lib_arch.name = arch_array[i].name; 1743 } 1744 } 1745 1746 assert(running_arch_index != -1, 1747 "Didn't find running architecture code (running_arch_code) in arch_array"); 1748 if (running_arch_index == -1) { 1749 // Even though running architecture detection failed 1750 // we may still continue with reporting dlerror() message 1751 return NULL; 1752 } 1753 1754 if (lib_arch.endianess != arch_array[running_arch_index].endianess) { 1755 ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: endianness mismatch)"); 1756 return NULL; 1757 } 1758 1759 if (lib_arch.elf_class != arch_array[running_arch_index].elf_class) { 1760 ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: architecture word width mismatch)"); 1761 return NULL; 1762 } 1763 1764 if (lib_arch.compat_class != arch_array[running_arch_index].compat_class) { 1765 if (lib_arch.name!=NULL) { 1766 ::snprintf(diag_msg_buf, diag_msg_max_length-1, 1767 " (Possible cause: can't load %s-bit .so on a %s-bit platform)", 1768 lib_arch.name, arch_array[running_arch_index].name); 1769 } else { 1770 ::snprintf(diag_msg_buf, diag_msg_max_length-1, 1771 " (Possible cause: can't load this .so (machine code=0x%x) on a %s-bit platform)", 1772 lib_arch.code, 1773 arch_array[running_arch_index].name); 1774 } 1775 } 1776 1777 return NULL; 1778 } 1779 1780 void* os::dll_lookup(void* handle, const char* name) { 1781 return dlsym(handle, name); 1782 } 1783 1784 void* os::get_default_process_handle() { 1785 return (void*)::dlopen(NULL, RTLD_LAZY); 1786 } 1787 1788 int os::stat(const char *path, struct stat *sbuf) { 1789 char pathbuf[MAX_PATH]; 1790 if (strlen(path) > MAX_PATH - 1) { 1791 errno = ENAMETOOLONG; 1792 return -1; 1793 } 1794 os::native_path(strcpy(pathbuf, path)); 1795 return ::stat(pathbuf, sbuf); 1796 } 1797 1798 static bool _print_ascii_file(const char* filename, outputStream* st) { 1799 int fd = ::open(filename, O_RDONLY); 1800 if (fd == -1) { 1801 return false; 1802 } 1803 1804 char buf[32]; 1805 int bytes; 1806 while ((bytes = ::read(fd, buf, sizeof(buf))) > 0) { 1807 st->print_raw(buf, bytes); 1808 } 1809 1810 ::close(fd); 1811 1812 return true; 1813 } 1814 1815 void os::print_os_info_brief(outputStream* st) { 1816 os::Solaris::print_distro_info(st); 1817 1818 os::Posix::print_uname_info(st); 1819 1820 os::Solaris::print_libversion_info(st); 1821 } 1822 1823 void os::print_os_info(outputStream* st) { 1824 st->print("OS:"); 1825 1826 os::Solaris::print_distro_info(st); 1827 1828 os::Posix::print_uname_info(st); 1829 1830 os::Solaris::print_libversion_info(st); 1831 1832 os::Posix::print_rlimit_info(st); 1833 1834 os::Posix::print_load_average(st); 1835 } 1836 1837 void os::Solaris::print_distro_info(outputStream* st) { 1838 if (!_print_ascii_file("/etc/release", st)) { 1839 st->print("Solaris"); 1840 } 1841 st->cr(); 1842 } 1843 1844 void os::Solaris::print_libversion_info(outputStream* st) { 1845 st->print(" (T2 libthread)"); 1846 st->cr(); 1847 } 1848 1849 static bool check_addr0(outputStream* st) { 1850 jboolean status = false; 1851 int fd = ::open("/proc/self/map",O_RDONLY); 1852 if (fd >= 0) { 1853 prmap_t p; 1854 while (::read(fd, &p, sizeof(p)) > 0) { 1855 if (p.pr_vaddr == 0x0) { 1856 st->print("Warning: Address: 0x%x, Size: %dK, ",p.pr_vaddr, p.pr_size/1024, p.pr_mapname); 1857 st->print("Mapped file: %s, ", p.pr_mapname[0] == '\0' ? "None" : p.pr_mapname); 1858 st->print("Access:"); 1859 st->print("%s",(p.pr_mflags & MA_READ) ? "r" : "-"); 1860 st->print("%s",(p.pr_mflags & MA_WRITE) ? "w" : "-"); 1861 st->print("%s",(p.pr_mflags & MA_EXEC) ? "x" : "-"); 1862 st->cr(); 1863 status = true; 1864 } 1865 } 1866 ::close(fd); 1867 } 1868 return status; 1869 } 1870 1871 void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) { 1872 // Nothing to do for now. 1873 } 1874 1875 void os::print_memory_info(outputStream* st) { 1876 st->print("Memory:"); 1877 st->print(" %dk page", os::vm_page_size()>>10); 1878 st->print(", physical " UINT64_FORMAT "k", os::physical_memory()>>10); 1879 st->print("(" UINT64_FORMAT "k free)", os::available_memory() >> 10); 1880 st->cr(); 1881 (void) check_addr0(st); 1882 } 1883 1884 void os::print_siginfo(outputStream* st, void* siginfo) { 1885 const siginfo_t* si = (const siginfo_t*)siginfo; 1886 1887 os::Posix::print_siginfo_brief(st, si); 1888 1889 if (si && (si->si_signo == SIGBUS || si->si_signo == SIGSEGV) && 1890 UseSharedSpaces) { 1891 FileMapInfo* mapinfo = FileMapInfo::current_info(); 1892 if (mapinfo->is_in_shared_space(si->si_addr)) { 1893 st->print("\n\nError accessing class data sharing archive." \ 1894 " Mapped file inaccessible during execution, " \ 1895 " possible disk/network problem."); 1896 } 1897 } 1898 st->cr(); 1899 } 1900 1901 // Moved from whole group, because we need them here for diagnostic 1902 // prints. 1903 #define OLDMAXSIGNUM 32 1904 static int Maxsignum = 0; 1905 static int *ourSigFlags = NULL; 1906 1907 extern "C" void sigINTRHandler(int, siginfo_t*, void*); 1908 1909 int os::Solaris::get_our_sigflags(int sig) { 1910 assert(ourSigFlags!=NULL, "signal data structure not initialized"); 1911 assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range"); 1912 return ourSigFlags[sig]; 1913 } 1914 1915 void os::Solaris::set_our_sigflags(int sig, int flags) { 1916 assert(ourSigFlags!=NULL, "signal data structure not initialized"); 1917 assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range"); 1918 ourSigFlags[sig] = flags; 1919 } 1920 1921 1922 static const char* get_signal_handler_name(address handler, 1923 char* buf, int buflen) { 1924 int offset; 1925 bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset); 1926 if (found) { 1927 // skip directory names 1928 const char *p1, *p2; 1929 p1 = buf; 1930 size_t len = strlen(os::file_separator()); 1931 while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len; 1932 jio_snprintf(buf, buflen, "%s+0x%x", p1, offset); 1933 } else { 1934 jio_snprintf(buf, buflen, PTR_FORMAT, handler); 1935 } 1936 return buf; 1937 } 1938 1939 static void print_signal_handler(outputStream* st, int sig, 1940 char* buf, size_t buflen) { 1941 struct sigaction sa; 1942 1943 sigaction(sig, NULL, &sa); 1944 1945 st->print("%s: ", os::exception_name(sig, buf, buflen)); 1946 1947 address handler = (sa.sa_flags & SA_SIGINFO) 1948 ? CAST_FROM_FN_PTR(address, sa.sa_sigaction) 1949 : CAST_FROM_FN_PTR(address, sa.sa_handler); 1950 1951 if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) { 1952 st->print("SIG_DFL"); 1953 } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) { 1954 st->print("SIG_IGN"); 1955 } else { 1956 st->print("[%s]", get_signal_handler_name(handler, buf, buflen)); 1957 } 1958 1959 st->print(", sa_mask[0]="); 1960 os::Posix::print_signal_set_short(st, &sa.sa_mask); 1961 1962 address rh = VMError::get_resetted_sighandler(sig); 1963 // May be, handler was resetted by VMError? 1964 if (rh != NULL) { 1965 handler = rh; 1966 sa.sa_flags = VMError::get_resetted_sigflags(sig); 1967 } 1968 1969 st->print(", sa_flags="); 1970 os::Posix::print_sa_flags(st, sa.sa_flags); 1971 1972 // Check: is it our handler? 1973 if (handler == CAST_FROM_FN_PTR(address, signalHandler) || 1974 handler == CAST_FROM_FN_PTR(address, sigINTRHandler)) { 1975 // It is our signal handler 1976 // check for flags 1977 if (sa.sa_flags != os::Solaris::get_our_sigflags(sig)) { 1978 st->print( 1979 ", flags was changed from " PTR32_FORMAT ", consider using jsig library", 1980 os::Solaris::get_our_sigflags(sig)); 1981 } 1982 } 1983 st->cr(); 1984 } 1985 1986 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) { 1987 st->print_cr("Signal Handlers:"); 1988 print_signal_handler(st, SIGSEGV, buf, buflen); 1989 print_signal_handler(st, SIGBUS , buf, buflen); 1990 print_signal_handler(st, SIGFPE , buf, buflen); 1991 print_signal_handler(st, SIGPIPE, buf, buflen); 1992 print_signal_handler(st, SIGXFSZ, buf, buflen); 1993 print_signal_handler(st, SIGILL , buf, buflen); 1994 print_signal_handler(st, INTERRUPT_SIGNAL, buf, buflen); 1995 print_signal_handler(st, ASYNC_SIGNAL, buf, buflen); 1996 print_signal_handler(st, BREAK_SIGNAL, buf, buflen); 1997 print_signal_handler(st, SHUTDOWN1_SIGNAL , buf, buflen); 1998 print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen); 1999 print_signal_handler(st, SHUTDOWN3_SIGNAL, buf, buflen); 2000 print_signal_handler(st, os::Solaris::SIGinterrupt(), buf, buflen); 2001 print_signal_handler(st, os::Solaris::SIGasync(), buf, buflen); 2002 } 2003 2004 static char saved_jvm_path[MAXPATHLEN] = { 0 }; 2005 2006 // Find the full path to the current module, libjvm.so 2007 void os::jvm_path(char *buf, jint buflen) { 2008 // Error checking. 2009 if (buflen < MAXPATHLEN) { 2010 assert(false, "must use a large-enough buffer"); 2011 buf[0] = '\0'; 2012 return; 2013 } 2014 // Lazy resolve the path to current module. 2015 if (saved_jvm_path[0] != 0) { 2016 strcpy(buf, saved_jvm_path); 2017 return; 2018 } 2019 2020 Dl_info dlinfo; 2021 int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo); 2022 assert(ret != 0, "cannot locate libjvm"); 2023 if (ret != 0 && dlinfo.dli_fname != NULL) { 2024 realpath((char *)dlinfo.dli_fname, buf); 2025 } else { 2026 buf[0] = '\0'; 2027 return; 2028 } 2029 2030 if (Arguments::sun_java_launcher_is_altjvm()) { 2031 // Support for the java launcher's '-XXaltjvm=<path>' option. Typical 2032 // value for buf is "<JAVA_HOME>/jre/lib/<arch>/<vmtype>/libjvm.so". 2033 // If "/jre/lib/" appears at the right place in the string, then 2034 // assume we are installed in a JDK and we're done. Otherwise, check 2035 // for a JAVA_HOME environment variable and fix up the path so it 2036 // looks like libjvm.so is installed there (append a fake suffix 2037 // hotspot/libjvm.so). 2038 const char *p = buf + strlen(buf) - 1; 2039 for (int count = 0; p > buf && count < 5; ++count) { 2040 for (--p; p > buf && *p != '/'; --p) 2041 /* empty */ ; 2042 } 2043 2044 if (strncmp(p, "/jre/lib/", 9) != 0) { 2045 // Look for JAVA_HOME in the environment. 2046 char* java_home_var = ::getenv("JAVA_HOME"); 2047 if (java_home_var != NULL && java_home_var[0] != 0) { 2048 char cpu_arch[12]; 2049 char* jrelib_p; 2050 int len; 2051 sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch)); 2052 #ifdef _LP64 2053 // If we are on sparc running a 64-bit vm, look in jre/lib/sparcv9. 2054 if (strcmp(cpu_arch, "sparc") == 0) { 2055 strcat(cpu_arch, "v9"); 2056 } else if (strcmp(cpu_arch, "i386") == 0) { 2057 strcpy(cpu_arch, "amd64"); 2058 } 2059 #endif 2060 // Check the current module name "libjvm.so". 2061 p = strrchr(buf, '/'); 2062 assert(strstr(p, "/libjvm") == p, "invalid library name"); 2063 2064 realpath(java_home_var, buf); 2065 // determine if this is a legacy image or modules image 2066 // modules image doesn't have "jre" subdirectory 2067 len = strlen(buf); 2068 assert(len < buflen, "Ran out of buffer space"); 2069 jrelib_p = buf + len; 2070 snprintf(jrelib_p, buflen-len, "/jre/lib/%s", cpu_arch); 2071 if (0 != access(buf, F_OK)) { 2072 snprintf(jrelib_p, buflen-len, "/lib/%s", cpu_arch); 2073 } 2074 2075 if (0 == access(buf, F_OK)) { 2076 // Use current module name "libjvm.so" 2077 len = strlen(buf); 2078 snprintf(buf + len, buflen-len, "/hotspot/libjvm.so"); 2079 } else { 2080 // Go back to path of .so 2081 realpath((char *)dlinfo.dli_fname, buf); 2082 } 2083 } 2084 } 2085 } 2086 2087 strncpy(saved_jvm_path, buf, MAXPATHLEN); 2088 saved_jvm_path[MAXPATHLEN - 1] = '\0'; 2089 } 2090 2091 2092 void os::print_jni_name_prefix_on(outputStream* st, int args_size) { 2093 // no prefix required, not even "_" 2094 } 2095 2096 2097 void os::print_jni_name_suffix_on(outputStream* st, int args_size) { 2098 // no suffix required 2099 } 2100 2101 // This method is a copy of JDK's sysGetLastErrorString 2102 // from src/solaris/hpi/src/system_md.c 2103 2104 size_t os::lasterror(char *buf, size_t len) { 2105 if (errno == 0) return 0; 2106 2107 const char *s = ::strerror(errno); 2108 size_t n = ::strlen(s); 2109 if (n >= len) { 2110 n = len - 1; 2111 } 2112 ::strncpy(buf, s, n); 2113 buf[n] = '\0'; 2114 return n; 2115 } 2116 2117 2118 // sun.misc.Signal 2119 2120 extern "C" { 2121 static void UserHandler(int sig, void *siginfo, void *context) { 2122 // Ctrl-C is pressed during error reporting, likely because the error 2123 // handler fails to abort. Let VM die immediately. 2124 if (sig == SIGINT && is_error_reported()) { 2125 os::die(); 2126 } 2127 2128 os::signal_notify(sig); 2129 // We do not need to reinstate the signal handler each time... 2130 } 2131 } 2132 2133 void* os::user_handler() { 2134 return CAST_FROM_FN_PTR(void*, UserHandler); 2135 } 2136 2137 struct timespec PosixSemaphore::create_timespec(unsigned int sec, int nsec) { 2138 struct timespec ts; 2139 unpackTime(&ts, false, (sec * NANOSECS_PER_SEC) + nsec); 2140 2141 return ts; 2142 } 2143 2144 extern "C" { 2145 typedef void (*sa_handler_t)(int); 2146 typedef void (*sa_sigaction_t)(int, siginfo_t *, void *); 2147 } 2148 2149 void* os::signal(int signal_number, void* handler) { 2150 struct sigaction sigAct, oldSigAct; 2151 sigfillset(&(sigAct.sa_mask)); 2152 sigAct.sa_flags = SA_RESTART & ~SA_RESETHAND; 2153 sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler); 2154 2155 if (sigaction(signal_number, &sigAct, &oldSigAct)) { 2156 // -1 means registration failed 2157 return (void *)-1; 2158 } 2159 2160 return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler); 2161 } 2162 2163 void os::signal_raise(int signal_number) { 2164 raise(signal_number); 2165 } 2166 2167 // The following code is moved from os.cpp for making this 2168 // code platform specific, which it is by its very nature. 2169 2170 // a counter for each possible signal value 2171 static int Sigexit = 0; 2172 static int Maxlibjsigsigs; 2173 static jint *pending_signals = NULL; 2174 static int *preinstalled_sigs = NULL; 2175 static struct sigaction *chainedsigactions = NULL; 2176 static sema_t sig_sem; 2177 typedef int (*version_getting_t)(); 2178 version_getting_t os::Solaris::get_libjsig_version = NULL; 2179 static int libjsigversion = NULL; 2180 2181 int os::sigexitnum_pd() { 2182 assert(Sigexit > 0, "signal memory not yet initialized"); 2183 return Sigexit; 2184 } 2185 2186 void os::Solaris::init_signal_mem() { 2187 // Initialize signal structures 2188 Maxsignum = SIGRTMAX; 2189 Sigexit = Maxsignum+1; 2190 assert(Maxsignum >0, "Unable to obtain max signal number"); 2191 2192 Maxlibjsigsigs = Maxsignum; 2193 2194 // pending_signals has one int per signal 2195 // The additional signal is for SIGEXIT - exit signal to signal_thread 2196 pending_signals = (jint *)os::malloc(sizeof(jint) * (Sigexit+1), mtInternal); 2197 memset(pending_signals, 0, (sizeof(jint) * (Sigexit+1))); 2198 2199 if (UseSignalChaining) { 2200 chainedsigactions = (struct sigaction *)malloc(sizeof(struct sigaction) 2201 * (Maxsignum + 1), mtInternal); 2202 memset(chainedsigactions, 0, (sizeof(struct sigaction) * (Maxsignum + 1))); 2203 preinstalled_sigs = (int *)os::malloc(sizeof(int) * (Maxsignum + 1), mtInternal); 2204 memset(preinstalled_sigs, 0, (sizeof(int) * (Maxsignum + 1))); 2205 } 2206 ourSigFlags = (int*)malloc(sizeof(int) * (Maxsignum + 1), mtInternal); 2207 memset(ourSigFlags, 0, sizeof(int) * (Maxsignum + 1)); 2208 } 2209 2210 void os::signal_init_pd() { 2211 int ret; 2212 2213 ret = ::sema_init(&sig_sem, 0, NULL, NULL); 2214 assert(ret == 0, "sema_init() failed"); 2215 } 2216 2217 void os::signal_notify(int signal_number) { 2218 int ret; 2219 2220 Atomic::inc(&pending_signals[signal_number]); 2221 ret = ::sema_post(&sig_sem); 2222 assert(ret == 0, "sema_post() failed"); 2223 } 2224 2225 static int check_pending_signals(bool wait_for_signal) { 2226 int ret; 2227 while (true) { 2228 for (int i = 0; i < Sigexit + 1; i++) { 2229 jint n = pending_signals[i]; 2230 if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) { 2231 return i; 2232 } 2233 } 2234 if (!wait_for_signal) { 2235 return -1; 2236 } 2237 JavaThread *thread = JavaThread::current(); 2238 ThreadBlockInVM tbivm(thread); 2239 2240 bool threadIsSuspended; 2241 do { 2242 thread->set_suspend_equivalent(); 2243 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self() 2244 while ((ret = ::sema_wait(&sig_sem)) == EINTR) 2245 ; 2246 assert(ret == 0, "sema_wait() failed"); 2247 2248 // were we externally suspended while we were waiting? 2249 threadIsSuspended = thread->handle_special_suspend_equivalent_condition(); 2250 if (threadIsSuspended) { 2251 // The semaphore has been incremented, but while we were waiting 2252 // another thread suspended us. We don't want to continue running 2253 // while suspended because that would surprise the thread that 2254 // suspended us. 2255 ret = ::sema_post(&sig_sem); 2256 assert(ret == 0, "sema_post() failed"); 2257 2258 thread->java_suspend_self(); 2259 } 2260 } while (threadIsSuspended); 2261 } 2262 } 2263 2264 int os::signal_lookup() { 2265 return check_pending_signals(false); 2266 } 2267 2268 int os::signal_wait() { 2269 return check_pending_signals(true); 2270 } 2271 2272 //////////////////////////////////////////////////////////////////////////////// 2273 // Virtual Memory 2274 2275 static int page_size = -1; 2276 2277 // The mmap MAP_ALIGN flag is supported on Solaris 9 and later. init_2() will 2278 // clear this var if support is not available. 2279 static bool has_map_align = true; 2280 2281 int os::vm_page_size() { 2282 assert(page_size != -1, "must call os::init"); 2283 return page_size; 2284 } 2285 2286 // Solaris allocates memory by pages. 2287 int os::vm_allocation_granularity() { 2288 assert(page_size != -1, "must call os::init"); 2289 return page_size; 2290 } 2291 2292 static bool recoverable_mmap_error(int err) { 2293 // See if the error is one we can let the caller handle. This 2294 // list of errno values comes from the Solaris mmap(2) man page. 2295 switch (err) { 2296 case EBADF: 2297 case EINVAL: 2298 case ENOTSUP: 2299 // let the caller deal with these errors 2300 return true; 2301 2302 default: 2303 // Any remaining errors on this OS can cause our reserved mapping 2304 // to be lost. That can cause confusion where different data 2305 // structures think they have the same memory mapped. The worst 2306 // scenario is if both the VM and a library think they have the 2307 // same memory mapped. 2308 return false; 2309 } 2310 } 2311 2312 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec, 2313 int err) { 2314 warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT 2315 ", %d) failed; error='%s' (errno=%d)", addr, bytes, exec, 2316 strerror(err), err); 2317 } 2318 2319 static void warn_fail_commit_memory(char* addr, size_t bytes, 2320 size_t alignment_hint, bool exec, 2321 int err) { 2322 warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT 2323 ", " SIZE_FORMAT ", %d) failed; error='%s' (errno=%d)", addr, bytes, 2324 alignment_hint, exec, strerror(err), err); 2325 } 2326 2327 int os::Solaris::commit_memory_impl(char* addr, size_t bytes, bool exec) { 2328 int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE; 2329 size_t size = bytes; 2330 char *res = Solaris::mmap_chunk(addr, size, MAP_PRIVATE|MAP_FIXED, prot); 2331 if (res != NULL) { 2332 if (UseNUMAInterleaving) { 2333 numa_make_global(addr, bytes); 2334 } 2335 return 0; 2336 } 2337 2338 int err = errno; // save errno from mmap() call in mmap_chunk() 2339 2340 if (!recoverable_mmap_error(err)) { 2341 warn_fail_commit_memory(addr, bytes, exec, err); 2342 vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, "committing reserved memory."); 2343 } 2344 2345 return err; 2346 } 2347 2348 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) { 2349 return Solaris::commit_memory_impl(addr, bytes, exec) == 0; 2350 } 2351 2352 void os::pd_commit_memory_or_exit(char* addr, size_t bytes, bool exec, 2353 const char* mesg) { 2354 assert(mesg != NULL, "mesg must be specified"); 2355 int err = os::Solaris::commit_memory_impl(addr, bytes, exec); 2356 if (err != 0) { 2357 // the caller wants all commit errors to exit with the specified mesg: 2358 warn_fail_commit_memory(addr, bytes, exec, err); 2359 vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, mesg); 2360 } 2361 } 2362 2363 size_t os::Solaris::page_size_for_alignment(size_t alignment) { 2364 assert(is_size_aligned(alignment, (size_t) vm_page_size()), 2365 err_msg(SIZE_FORMAT " is not aligned to " SIZE_FORMAT, 2366 alignment, (size_t) vm_page_size())); 2367 2368 for (int i = 0; _page_sizes[i] != 0; i++) { 2369 if (is_size_aligned(alignment, _page_sizes[i])) { 2370 return _page_sizes[i]; 2371 } 2372 } 2373 2374 return (size_t) vm_page_size(); 2375 } 2376 2377 int os::Solaris::commit_memory_impl(char* addr, size_t bytes, 2378 size_t alignment_hint, bool exec) { 2379 int err = Solaris::commit_memory_impl(addr, bytes, exec); 2380 if (err == 0 && UseLargePages && alignment_hint > 0) { 2381 assert(is_size_aligned(bytes, alignment_hint), 2382 err_msg(SIZE_FORMAT " is not aligned to " SIZE_FORMAT, bytes, alignment_hint)); 2383 2384 // The syscall memcntl requires an exact page size (see man memcntl for details). 2385 size_t page_size = page_size_for_alignment(alignment_hint); 2386 if (page_size > (size_t) vm_page_size()) { 2387 (void)Solaris::setup_large_pages(addr, bytes, page_size); 2388 } 2389 } 2390 return err; 2391 } 2392 2393 bool os::pd_commit_memory(char* addr, size_t bytes, size_t alignment_hint, 2394 bool exec) { 2395 return Solaris::commit_memory_impl(addr, bytes, alignment_hint, exec) == 0; 2396 } 2397 2398 void os::pd_commit_memory_or_exit(char* addr, size_t bytes, 2399 size_t alignment_hint, bool exec, 2400 const char* mesg) { 2401 assert(mesg != NULL, "mesg must be specified"); 2402 int err = os::Solaris::commit_memory_impl(addr, bytes, alignment_hint, exec); 2403 if (err != 0) { 2404 // the caller wants all commit errors to exit with the specified mesg: 2405 warn_fail_commit_memory(addr, bytes, alignment_hint, exec, err); 2406 vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, mesg); 2407 } 2408 } 2409 2410 // Uncommit the pages in a specified region. 2411 void os::pd_free_memory(char* addr, size_t bytes, size_t alignment_hint) { 2412 if (madvise(addr, bytes, MADV_FREE) < 0) { 2413 debug_only(warning("MADV_FREE failed.")); 2414 return; 2415 } 2416 } 2417 2418 bool os::pd_create_stack_guard_pages(char* addr, size_t size) { 2419 return os::commit_memory(addr, size, !ExecMem); 2420 } 2421 2422 bool os::remove_stack_guard_pages(char* addr, size_t size) { 2423 return os::uncommit_memory(addr, size); 2424 } 2425 2426 // Change the page size in a given range. 2427 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { 2428 assert((intptr_t)addr % alignment_hint == 0, "Address should be aligned."); 2429 assert((intptr_t)(addr + bytes) % alignment_hint == 0, "End should be aligned."); 2430 if (UseLargePages) { 2431 size_t page_size = Solaris::page_size_for_alignment(alignment_hint); 2432 if (page_size > (size_t) vm_page_size()) { 2433 Solaris::setup_large_pages(addr, bytes, page_size); 2434 } 2435 } 2436 } 2437 2438 // Tell the OS to make the range local to the first-touching LWP 2439 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) { 2440 assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned."); 2441 if (madvise(addr, bytes, MADV_ACCESS_LWP) < 0) { 2442 debug_only(warning("MADV_ACCESS_LWP failed.")); 2443 } 2444 } 2445 2446 // Tell the OS that this range would be accessed from different LWPs. 2447 void os::numa_make_global(char *addr, size_t bytes) { 2448 assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned."); 2449 if (madvise(addr, bytes, MADV_ACCESS_MANY) < 0) { 2450 debug_only(warning("MADV_ACCESS_MANY failed.")); 2451 } 2452 } 2453 2454 // Get the number of the locality groups. 2455 size_t os::numa_get_groups_num() { 2456 size_t n = Solaris::lgrp_nlgrps(Solaris::lgrp_cookie()); 2457 return n != -1 ? n : 1; 2458 } 2459 2460 // Get a list of leaf locality groups. A leaf lgroup is group that 2461 // doesn't have any children. Typical leaf group is a CPU or a CPU/memory 2462 // board. An LWP is assigned to one of these groups upon creation. 2463 size_t os::numa_get_leaf_groups(int *ids, size_t size) { 2464 if ((ids[0] = Solaris::lgrp_root(Solaris::lgrp_cookie())) == -1) { 2465 ids[0] = 0; 2466 return 1; 2467 } 2468 int result_size = 0, top = 1, bottom = 0, cur = 0; 2469 for (int k = 0; k < size; k++) { 2470 int r = Solaris::lgrp_children(Solaris::lgrp_cookie(), ids[cur], 2471 (Solaris::lgrp_id_t*)&ids[top], size - top); 2472 if (r == -1) { 2473 ids[0] = 0; 2474 return 1; 2475 } 2476 if (!r) { 2477 // That's a leaf node. 2478 assert(bottom <= cur, "Sanity check"); 2479 // Check if the node has memory 2480 if (Solaris::lgrp_resources(Solaris::lgrp_cookie(), ids[cur], 2481 NULL, 0, LGRP_RSRC_MEM) > 0) { 2482 ids[bottom++] = ids[cur]; 2483 } 2484 } 2485 top += r; 2486 cur++; 2487 } 2488 if (bottom == 0) { 2489 // Handle a situation, when the OS reports no memory available. 2490 // Assume UMA architecture. 2491 ids[0] = 0; 2492 return 1; 2493 } 2494 return bottom; 2495 } 2496 2497 // Detect the topology change. Typically happens during CPU plugging-unplugging. 2498 bool os::numa_topology_changed() { 2499 int is_stale = Solaris::lgrp_cookie_stale(Solaris::lgrp_cookie()); 2500 if (is_stale != -1 && is_stale) { 2501 Solaris::lgrp_fini(Solaris::lgrp_cookie()); 2502 Solaris::lgrp_cookie_t c = Solaris::lgrp_init(Solaris::LGRP_VIEW_CALLER); 2503 assert(c != 0, "Failure to initialize LGRP API"); 2504 Solaris::set_lgrp_cookie(c); 2505 return true; 2506 } 2507 return false; 2508 } 2509 2510 // Get the group id of the current LWP. 2511 int os::numa_get_group_id() { 2512 int lgrp_id = Solaris::lgrp_home(P_LWPID, P_MYID); 2513 if (lgrp_id == -1) { 2514 return 0; 2515 } 2516 const int size = os::numa_get_groups_num(); 2517 int *ids = (int*)alloca(size * sizeof(int)); 2518 2519 // Get the ids of all lgroups with memory; r is the count. 2520 int r = Solaris::lgrp_resources(Solaris::lgrp_cookie(), lgrp_id, 2521 (Solaris::lgrp_id_t*)ids, size, LGRP_RSRC_MEM); 2522 if (r <= 0) { 2523 return 0; 2524 } 2525 return ids[os::random() % r]; 2526 } 2527 2528 // Request information about the page. 2529 bool os::get_page_info(char *start, page_info* info) { 2530 const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE }; 2531 uint64_t addr = (uintptr_t)start; 2532 uint64_t outdata[2]; 2533 uint_t validity = 0; 2534 2535 if (os::Solaris::meminfo(&addr, 1, info_types, 2, outdata, &validity) < 0) { 2536 return false; 2537 } 2538 2539 info->size = 0; 2540 info->lgrp_id = -1; 2541 2542 if ((validity & 1) != 0) { 2543 if ((validity & 2) != 0) { 2544 info->lgrp_id = outdata[0]; 2545 } 2546 if ((validity & 4) != 0) { 2547 info->size = outdata[1]; 2548 } 2549 return true; 2550 } 2551 return false; 2552 } 2553 2554 // Scan the pages from start to end until a page different than 2555 // the one described in the info parameter is encountered. 2556 char *os::scan_pages(char *start, char* end, page_info* page_expected, 2557 page_info* page_found) { 2558 const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE }; 2559 const size_t types = sizeof(info_types) / sizeof(info_types[0]); 2560 uint64_t addrs[MAX_MEMINFO_CNT], outdata[types * MAX_MEMINFO_CNT + 1]; 2561 uint_t validity[MAX_MEMINFO_CNT]; 2562 2563 size_t page_size = MAX2((size_t)os::vm_page_size(), page_expected->size); 2564 uint64_t p = (uint64_t)start; 2565 while (p < (uint64_t)end) { 2566 addrs[0] = p; 2567 size_t addrs_count = 1; 2568 while (addrs_count < MAX_MEMINFO_CNT && addrs[addrs_count - 1] + page_size < (uint64_t)end) { 2569 addrs[addrs_count] = addrs[addrs_count - 1] + page_size; 2570 addrs_count++; 2571 } 2572 2573 if (os::Solaris::meminfo(addrs, addrs_count, info_types, types, outdata, validity) < 0) { 2574 return NULL; 2575 } 2576 2577 size_t i = 0; 2578 for (; i < addrs_count; i++) { 2579 if ((validity[i] & 1) != 0) { 2580 if ((validity[i] & 4) != 0) { 2581 if (outdata[types * i + 1] != page_expected->size) { 2582 break; 2583 } 2584 } else if (page_expected->size != 0) { 2585 break; 2586 } 2587 2588 if ((validity[i] & 2) != 0 && page_expected->lgrp_id > 0) { 2589 if (outdata[types * i] != page_expected->lgrp_id) { 2590 break; 2591 } 2592 } 2593 } else { 2594 return NULL; 2595 } 2596 } 2597 2598 if (i < addrs_count) { 2599 if ((validity[i] & 2) != 0) { 2600 page_found->lgrp_id = outdata[types * i]; 2601 } else { 2602 page_found->lgrp_id = -1; 2603 } 2604 if ((validity[i] & 4) != 0) { 2605 page_found->size = outdata[types * i + 1]; 2606 } else { 2607 page_found->size = 0; 2608 } 2609 return (char*)addrs[i]; 2610 } 2611 2612 p = addrs[addrs_count - 1] + page_size; 2613 } 2614 return end; 2615 } 2616 2617 bool os::pd_uncommit_memory(char* addr, size_t bytes) { 2618 size_t size = bytes; 2619 // Map uncommitted pages PROT_NONE so we fail early if we touch an 2620 // uncommitted page. Otherwise, the read/write might succeed if we 2621 // have enough swap space to back the physical page. 2622 return 2623 NULL != Solaris::mmap_chunk(addr, size, 2624 MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE, 2625 PROT_NONE); 2626 } 2627 2628 char* os::Solaris::mmap_chunk(char *addr, size_t size, int flags, int prot) { 2629 char *b = (char *)mmap(addr, size, prot, flags, os::Solaris::_dev_zero_fd, 0); 2630 2631 if (b == MAP_FAILED) { 2632 return NULL; 2633 } 2634 return b; 2635 } 2636 2637 char* os::Solaris::anon_mmap(char* requested_addr, size_t bytes, 2638 size_t alignment_hint, bool fixed) { 2639 char* addr = requested_addr; 2640 int flags = MAP_PRIVATE | MAP_NORESERVE; 2641 2642 assert(!(fixed && (alignment_hint > 0)), 2643 "alignment hint meaningless with fixed mmap"); 2644 2645 if (fixed) { 2646 flags |= MAP_FIXED; 2647 } else if (has_map_align && (alignment_hint > (size_t) vm_page_size())) { 2648 flags |= MAP_ALIGN; 2649 addr = (char*) alignment_hint; 2650 } 2651 2652 // Map uncommitted pages PROT_NONE so we fail early if we touch an 2653 // uncommitted page. Otherwise, the read/write might succeed if we 2654 // have enough swap space to back the physical page. 2655 return mmap_chunk(addr, bytes, flags, PROT_NONE); 2656 } 2657 2658 char* os::pd_reserve_memory(size_t bytes, char* requested_addr, 2659 size_t alignment_hint) { 2660 char* addr = Solaris::anon_mmap(requested_addr, bytes, alignment_hint, 2661 (requested_addr != NULL)); 2662 2663 guarantee(requested_addr == NULL || requested_addr == addr, 2664 "OS failed to return requested mmap address."); 2665 return addr; 2666 } 2667 2668 // Reserve memory at an arbitrary address, only if that area is 2669 // available (and not reserved for something else). 2670 2671 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) { 2672 const int max_tries = 10; 2673 char* base[max_tries]; 2674 size_t size[max_tries]; 2675 2676 // Solaris adds a gap between mmap'ed regions. The size of the gap 2677 // is dependent on the requested size and the MMU. Our initial gap 2678 // value here is just a guess and will be corrected later. 2679 bool had_top_overlap = false; 2680 bool have_adjusted_gap = false; 2681 size_t gap = 0x400000; 2682 2683 // Assert only that the size is a multiple of the page size, since 2684 // that's all that mmap requires, and since that's all we really know 2685 // about at this low abstraction level. If we need higher alignment, 2686 // we can either pass an alignment to this method or verify alignment 2687 // in one of the methods further up the call chain. See bug 5044738. 2688 assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block"); 2689 2690 // Since snv_84, Solaris attempts to honor the address hint - see 5003415. 2691 // Give it a try, if the kernel honors the hint we can return immediately. 2692 char* addr = Solaris::anon_mmap(requested_addr, bytes, 0, false); 2693 2694 volatile int err = errno; 2695 if (addr == requested_addr) { 2696 return addr; 2697 } else if (addr != NULL) { 2698 pd_unmap_memory(addr, bytes); 2699 } 2700 2701 if (PrintMiscellaneous && Verbose) { 2702 char buf[256]; 2703 buf[0] = '\0'; 2704 if (addr == NULL) { 2705 jio_snprintf(buf, sizeof(buf), ": %s", strerror(err)); 2706 } 2707 warning("attempt_reserve_memory_at: couldn't reserve " SIZE_FORMAT " bytes at " 2708 PTR_FORMAT ": reserve_memory_helper returned " PTR_FORMAT 2709 "%s", bytes, requested_addr, addr, buf); 2710 } 2711 2712 // Address hint method didn't work. Fall back to the old method. 2713 // In theory, once SNV becomes our oldest supported platform, this 2714 // code will no longer be needed. 2715 // 2716 // Repeatedly allocate blocks until the block is allocated at the 2717 // right spot. Give up after max_tries. 2718 int i; 2719 for (i = 0; i < max_tries; ++i) { 2720 base[i] = reserve_memory(bytes); 2721 2722 if (base[i] != NULL) { 2723 // Is this the block we wanted? 2724 if (base[i] == requested_addr) { 2725 size[i] = bytes; 2726 break; 2727 } 2728 2729 // check that the gap value is right 2730 if (had_top_overlap && !have_adjusted_gap) { 2731 size_t actual_gap = base[i-1] - base[i] - bytes; 2732 if (gap != actual_gap) { 2733 // adjust the gap value and retry the last 2 allocations 2734 assert(i > 0, "gap adjustment code problem"); 2735 have_adjusted_gap = true; // adjust the gap only once, just in case 2736 gap = actual_gap; 2737 if (PrintMiscellaneous && Verbose) { 2738 warning("attempt_reserve_memory_at: adjusted gap to 0x%lx", gap); 2739 } 2740 unmap_memory(base[i], bytes); 2741 unmap_memory(base[i-1], size[i-1]); 2742 i-=2; 2743 continue; 2744 } 2745 } 2746 2747 // Does this overlap the block we wanted? Give back the overlapped 2748 // parts and try again. 2749 // 2750 // There is still a bug in this code: if top_overlap == bytes, 2751 // the overlap is offset from requested region by the value of gap. 2752 // In this case giving back the overlapped part will not work, 2753 // because we'll give back the entire block at base[i] and 2754 // therefore the subsequent allocation will not generate a new gap. 2755 // This could be fixed with a new algorithm that used larger 2756 // or variable size chunks to find the requested region - 2757 // but such a change would introduce additional complications. 2758 // It's rare enough that the planets align for this bug, 2759 // so we'll just wait for a fix for 6204603/5003415 which 2760 // will provide a mmap flag to allow us to avoid this business. 2761 2762 size_t top_overlap = requested_addr + (bytes + gap) - base[i]; 2763 if (top_overlap >= 0 && top_overlap < bytes) { 2764 had_top_overlap = true; 2765 unmap_memory(base[i], top_overlap); 2766 base[i] += top_overlap; 2767 size[i] = bytes - top_overlap; 2768 } else { 2769 size_t bottom_overlap = base[i] + bytes - requested_addr; 2770 if (bottom_overlap >= 0 && bottom_overlap < bytes) { 2771 if (PrintMiscellaneous && Verbose && bottom_overlap == 0) { 2772 warning("attempt_reserve_memory_at: possible alignment bug"); 2773 } 2774 unmap_memory(requested_addr, bottom_overlap); 2775 size[i] = bytes - bottom_overlap; 2776 } else { 2777 size[i] = bytes; 2778 } 2779 } 2780 } 2781 } 2782 2783 // Give back the unused reserved pieces. 2784 2785 for (int j = 0; j < i; ++j) { 2786 if (base[j] != NULL) { 2787 unmap_memory(base[j], size[j]); 2788 } 2789 } 2790 2791 return (i < max_tries) ? requested_addr : NULL; 2792 } 2793 2794 bool os::pd_release_memory(char* addr, size_t bytes) { 2795 size_t size = bytes; 2796 return munmap(addr, size) == 0; 2797 } 2798 2799 static bool solaris_mprotect(char* addr, size_t bytes, int prot) { 2800 assert(addr == (char*)align_size_down((uintptr_t)addr, os::vm_page_size()), 2801 "addr must be page aligned"); 2802 int retVal = mprotect(addr, bytes, prot); 2803 return retVal == 0; 2804 } 2805 2806 // Protect memory (Used to pass readonly pages through 2807 // JNI GetArray<type>Elements with empty arrays.) 2808 // Also, used for serialization page and for compressed oops null pointer 2809 // checking. 2810 bool os::protect_memory(char* addr, size_t bytes, ProtType prot, 2811 bool is_committed) { 2812 unsigned int p = 0; 2813 switch (prot) { 2814 case MEM_PROT_NONE: p = PROT_NONE; break; 2815 case MEM_PROT_READ: p = PROT_READ; break; 2816 case MEM_PROT_RW: p = PROT_READ|PROT_WRITE; break; 2817 case MEM_PROT_RWX: p = PROT_READ|PROT_WRITE|PROT_EXEC; break; 2818 default: 2819 ShouldNotReachHere(); 2820 } 2821 // is_committed is unused. 2822 return solaris_mprotect(addr, bytes, p); 2823 } 2824 2825 // guard_memory and unguard_memory only happens within stack guard pages. 2826 // Since ISM pertains only to the heap, guard and unguard memory should not 2827 /// happen with an ISM region. 2828 bool os::guard_memory(char* addr, size_t bytes) { 2829 return solaris_mprotect(addr, bytes, PROT_NONE); 2830 } 2831 2832 bool os::unguard_memory(char* addr, size_t bytes) { 2833 return solaris_mprotect(addr, bytes, PROT_READ|PROT_WRITE); 2834 } 2835 2836 // Large page support 2837 static size_t _large_page_size = 0; 2838 2839 // Insertion sort for small arrays (descending order). 2840 static void insertion_sort_descending(size_t* array, int len) { 2841 for (int i = 0; i < len; i++) { 2842 size_t val = array[i]; 2843 for (size_t key = i; key > 0 && array[key - 1] < val; --key) { 2844 size_t tmp = array[key]; 2845 array[key] = array[key - 1]; 2846 array[key - 1] = tmp; 2847 } 2848 } 2849 } 2850 2851 bool os::Solaris::mpss_sanity_check(bool warn, size_t* page_size) { 2852 const unsigned int usable_count = VM_Version::page_size_count(); 2853 if (usable_count == 1) { 2854 return false; 2855 } 2856 2857 // Find the right getpagesizes interface. When solaris 11 is the minimum 2858 // build platform, getpagesizes() (without the '2') can be called directly. 2859 typedef int (*gps_t)(size_t[], int); 2860 gps_t gps_func = CAST_TO_FN_PTR(gps_t, dlsym(RTLD_DEFAULT, "getpagesizes2")); 2861 if (gps_func == NULL) { 2862 gps_func = CAST_TO_FN_PTR(gps_t, dlsym(RTLD_DEFAULT, "getpagesizes")); 2863 if (gps_func == NULL) { 2864 if (warn) { 2865 warning("MPSS is not supported by the operating system."); 2866 } 2867 return false; 2868 } 2869 } 2870 2871 // Fill the array of page sizes. 2872 int n = (*gps_func)(_page_sizes, page_sizes_max); 2873 assert(n > 0, "Solaris bug?"); 2874 2875 if (n == page_sizes_max) { 2876 // Add a sentinel value (necessary only if the array was completely filled 2877 // since it is static (zeroed at initialization)). 2878 _page_sizes[--n] = 0; 2879 DEBUG_ONLY(warning("increase the size of the os::_page_sizes array.");) 2880 } 2881 assert(_page_sizes[n] == 0, "missing sentinel"); 2882 trace_page_sizes("available page sizes", _page_sizes, n); 2883 2884 if (n == 1) return false; // Only one page size available. 2885 2886 // Skip sizes larger than 4M (or LargePageSizeInBytes if it was set) and 2887 // select up to usable_count elements. First sort the array, find the first 2888 // acceptable value, then copy the usable sizes to the top of the array and 2889 // trim the rest. Make sure to include the default page size :-). 2890 // 2891 // A better policy could get rid of the 4M limit by taking the sizes of the 2892 // important VM memory regions (java heap and possibly the code cache) into 2893 // account. 2894 insertion_sort_descending(_page_sizes, n); 2895 const size_t size_limit = 2896 FLAG_IS_DEFAULT(LargePageSizeInBytes) ? 4 * M : LargePageSizeInBytes; 2897 int beg; 2898 for (beg = 0; beg < n && _page_sizes[beg] > size_limit; ++beg) /* empty */; 2899 const int end = MIN2((int)usable_count, n) - 1; 2900 for (int cur = 0; cur < end; ++cur, ++beg) { 2901 _page_sizes[cur] = _page_sizes[beg]; 2902 } 2903 _page_sizes[end] = vm_page_size(); 2904 _page_sizes[end + 1] = 0; 2905 2906 if (_page_sizes[end] > _page_sizes[end - 1]) { 2907 // Default page size is not the smallest; sort again. 2908 insertion_sort_descending(_page_sizes, end + 1); 2909 } 2910 *page_size = _page_sizes[0]; 2911 2912 trace_page_sizes("usable page sizes", _page_sizes, end + 1); 2913 return true; 2914 } 2915 2916 void os::large_page_init() { 2917 if (UseLargePages) { 2918 // print a warning if any large page related flag is specified on command line 2919 bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) || 2920 !FLAG_IS_DEFAULT(LargePageSizeInBytes); 2921 2922 UseLargePages = Solaris::mpss_sanity_check(warn_on_failure, &_large_page_size); 2923 } 2924 } 2925 2926 bool os::Solaris::is_valid_page_size(size_t bytes) { 2927 for (int i = 0; _page_sizes[i] != 0; i++) { 2928 if (_page_sizes[i] == bytes) { 2929 return true; 2930 } 2931 } 2932 return false; 2933 } 2934 2935 bool os::Solaris::setup_large_pages(caddr_t start, size_t bytes, size_t align) { 2936 assert(is_valid_page_size(align), err_msg(SIZE_FORMAT " is not a valid page size", align)); 2937 assert(is_ptr_aligned((void*) start, align), 2938 err_msg(PTR_FORMAT " is not aligned to " SIZE_FORMAT, p2i((void*) start), align)); 2939 assert(is_size_aligned(bytes, align), 2940 err_msg(SIZE_FORMAT " is not aligned to " SIZE_FORMAT, bytes, align)); 2941 2942 // Signal to OS that we want large pages for addresses 2943 // from addr, addr + bytes 2944 struct memcntl_mha mpss_struct; 2945 mpss_struct.mha_cmd = MHA_MAPSIZE_VA; 2946 mpss_struct.mha_pagesize = align; 2947 mpss_struct.mha_flags = 0; 2948 // Upon successful completion, memcntl() returns 0 2949 if (memcntl(start, bytes, MC_HAT_ADVISE, (caddr_t) &mpss_struct, 0, 0)) { 2950 debug_only(warning("Attempt to use MPSS failed.")); 2951 return false; 2952 } 2953 return true; 2954 } 2955 2956 char* os::reserve_memory_special(size_t size, size_t alignment, char* addr, bool exec) { 2957 fatal("os::reserve_memory_special should not be called on Solaris."); 2958 return NULL; 2959 } 2960 2961 bool os::release_memory_special(char* base, size_t bytes) { 2962 fatal("os::release_memory_special should not be called on Solaris."); 2963 return false; 2964 } 2965 2966 size_t os::large_page_size() { 2967 return _large_page_size; 2968 } 2969 2970 // MPSS allows application to commit large page memory on demand; with ISM 2971 // the entire memory region must be allocated as shared memory. 2972 bool os::can_commit_large_page_memory() { 2973 return true; 2974 } 2975 2976 bool os::can_execute_large_page_memory() { 2977 return true; 2978 } 2979 2980 // Read calls from inside the vm need to perform state transitions 2981 size_t os::read(int fd, void *buf, unsigned int nBytes) { 2982 size_t res; 2983 JavaThread* thread = (JavaThread*)Thread::current(); 2984 assert(thread->thread_state() == _thread_in_vm, "Assumed _thread_in_vm"); 2985 ThreadBlockInVM tbiv(thread); 2986 RESTARTABLE(::read(fd, buf, (size_t) nBytes), res); 2987 return res; 2988 } 2989 2990 size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) { 2991 size_t res; 2992 JavaThread* thread = (JavaThread*)Thread::current(); 2993 assert(thread->thread_state() == _thread_in_vm, "Assumed _thread_in_vm"); 2994 ThreadBlockInVM tbiv(thread); 2995 RESTARTABLE(::pread(fd, buf, (size_t) nBytes, offset), res); 2996 return res; 2997 } 2998 2999 size_t os::restartable_read(int fd, void *buf, unsigned int nBytes) { 3000 size_t res; 3001 assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native, 3002 "Assumed _thread_in_native"); 3003 RESTARTABLE(::read(fd, buf, (size_t) nBytes), res); 3004 return res; 3005 } 3006 3007 void os::naked_short_sleep(jlong ms) { 3008 assert(ms < 1000, "Un-interruptable sleep, short time use only"); 3009 3010 // usleep is deprecated and removed from POSIX, in favour of nanosleep, but 3011 // Solaris requires -lrt for this. 3012 usleep((ms * 1000)); 3013 3014 return; 3015 } 3016 3017 // Sleep forever; naked call to OS-specific sleep; use with CAUTION 3018 void os::infinite_sleep() { 3019 while (true) { // sleep forever ... 3020 ::sleep(100); // ... 100 seconds at a time 3021 } 3022 } 3023 3024 // Used to convert frequent JVM_Yield() to nops 3025 bool os::dont_yield() { 3026 if (DontYieldALot) { 3027 static hrtime_t last_time = 0; 3028 hrtime_t diff = getTimeNanos() - last_time; 3029 3030 if (diff < DontYieldALotInterval * 1000000) { 3031 return true; 3032 } 3033 3034 last_time += diff; 3035 3036 return false; 3037 } else { 3038 return false; 3039 } 3040 } 3041 3042 // Note that yield semantics are defined by the scheduling class to which 3043 // the thread currently belongs. Typically, yield will _not yield to 3044 // other equal or higher priority threads that reside on the dispatch queues 3045 // of other CPUs. 3046 3047 void os::naked_yield() { 3048 thr_yield(); 3049 } 3050 3051 // Interface for setting lwp priorities. If we are using T2 libthread, 3052 // which forces the use of BoundThreads or we manually set UseBoundThreads, 3053 // all of our threads will be assigned to real lwp's. Using the thr_setprio 3054 // function is meaningless in this mode so we must adjust the real lwp's priority 3055 // The routines below implement the getting and setting of lwp priorities. 3056 // 3057 // Note: T2 is now the only supported libthread. UseBoundThreads flag is 3058 // being deprecated and all threads are now BoundThreads 3059 // 3060 // Note: There are three priority scales used on Solaris. Java priotities 3061 // which range from 1 to 10, libthread "thr_setprio" scale which range 3062 // from 0 to 127, and the current scheduling class of the process we 3063 // are running in. This is typically from -60 to +60. 3064 // The setting of the lwp priorities in done after a call to thr_setprio 3065 // so Java priorities are mapped to libthread priorities and we map from 3066 // the latter to lwp priorities. We don't keep priorities stored in 3067 // Java priorities since some of our worker threads want to set priorities 3068 // higher than all Java threads. 3069 // 3070 // For related information: 3071 // (1) man -s 2 priocntl 3072 // (2) man -s 4 priocntl 3073 // (3) man dispadmin 3074 // = librt.so 3075 // = libthread/common/rtsched.c - thrp_setlwpprio(). 3076 // = ps -cL <pid> ... to validate priority. 3077 // = sched_get_priority_min and _max 3078 // pthread_create 3079 // sched_setparam 3080 // pthread_setschedparam 3081 // 3082 // Assumptions: 3083 // + We assume that all threads in the process belong to the same 3084 // scheduling class. IE. an homogenous process. 3085 // + Must be root or in IA group to change change "interactive" attribute. 3086 // Priocntl() will fail silently. The only indication of failure is when 3087 // we read-back the value and notice that it hasn't changed. 3088 // + Interactive threads enter the runq at the head, non-interactive at the tail. 3089 // + For RT, change timeslice as well. Invariant: 3090 // constant "priority integral" 3091 // Konst == TimeSlice * (60-Priority) 3092 // Given a priority, compute appropriate timeslice. 3093 // + Higher numerical values have higher priority. 3094 3095 // sched class attributes 3096 typedef struct { 3097 int schedPolicy; // classID 3098 int maxPrio; 3099 int minPrio; 3100 } SchedInfo; 3101 3102 3103 static SchedInfo tsLimits, iaLimits, rtLimits, fxLimits; 3104 3105 #ifdef ASSERT 3106 static int ReadBackValidate = 1; 3107 #endif 3108 static int myClass = 0; 3109 static int myMin = 0; 3110 static int myMax = 0; 3111 static int myCur = 0; 3112 static bool priocntl_enable = false; 3113 3114 static const int criticalPrio = 60; // FX/60 is critical thread class/priority on T4 3115 static int java_MaxPriority_to_os_priority = 0; // Saved mapping 3116 3117 3118 // lwp_priocntl_init 3119 // 3120 // Try to determine the priority scale for our process. 3121 // 3122 // Return errno or 0 if OK. 3123 // 3124 static int lwp_priocntl_init() { 3125 int rslt; 3126 pcinfo_t ClassInfo; 3127 pcparms_t ParmInfo; 3128 int i; 3129 3130 if (!UseThreadPriorities) return 0; 3131 3132 // If ThreadPriorityPolicy is 1, switch tables 3133 if (ThreadPriorityPolicy == 1) { 3134 for (i = 0; i < CriticalPriority+1; i++) 3135 os::java_to_os_priority[i] = prio_policy1[i]; 3136 } 3137 if (UseCriticalJavaThreadPriority) { 3138 // MaxPriority always maps to the FX scheduling class and criticalPrio. 3139 // See set_native_priority() and set_lwp_class_and_priority(). 3140 // Save original MaxPriority mapping in case attempt to 3141 // use critical priority fails. 3142 java_MaxPriority_to_os_priority = os::java_to_os_priority[MaxPriority]; 3143 // Set negative to distinguish from other priorities 3144 os::java_to_os_priority[MaxPriority] = -criticalPrio; 3145 } 3146 3147 // Get IDs for a set of well-known scheduling classes. 3148 // TODO-FIXME: GETCLINFO returns the current # of classes in the 3149 // the system. We should have a loop that iterates over the 3150 // classID values, which are known to be "small" integers. 3151 3152 strcpy(ClassInfo.pc_clname, "TS"); 3153 ClassInfo.pc_cid = -1; 3154 rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo); 3155 if (rslt < 0) return errno; 3156 assert(ClassInfo.pc_cid != -1, "cid for TS class is -1"); 3157 tsLimits.schedPolicy = ClassInfo.pc_cid; 3158 tsLimits.maxPrio = ((tsinfo_t*)ClassInfo.pc_clinfo)->ts_maxupri; 3159 tsLimits.minPrio = -tsLimits.maxPrio; 3160 3161 strcpy(ClassInfo.pc_clname, "IA"); 3162 ClassInfo.pc_cid = -1; 3163 rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo); 3164 if (rslt < 0) return errno; 3165 assert(ClassInfo.pc_cid != -1, "cid for IA class is -1"); 3166 iaLimits.schedPolicy = ClassInfo.pc_cid; 3167 iaLimits.maxPrio = ((iainfo_t*)ClassInfo.pc_clinfo)->ia_maxupri; 3168 iaLimits.minPrio = -iaLimits.maxPrio; 3169 3170 strcpy(ClassInfo.pc_clname, "RT"); 3171 ClassInfo.pc_cid = -1; 3172 rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo); 3173 if (rslt < 0) return errno; 3174 assert(ClassInfo.pc_cid != -1, "cid for RT class is -1"); 3175 rtLimits.schedPolicy = ClassInfo.pc_cid; 3176 rtLimits.maxPrio = ((rtinfo_t*)ClassInfo.pc_clinfo)->rt_maxpri; 3177 rtLimits.minPrio = 0; 3178 3179 strcpy(ClassInfo.pc_clname, "FX"); 3180 ClassInfo.pc_cid = -1; 3181 rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo); 3182 if (rslt < 0) return errno; 3183 assert(ClassInfo.pc_cid != -1, "cid for FX class is -1"); 3184 fxLimits.schedPolicy = ClassInfo.pc_cid; 3185 fxLimits.maxPrio = ((fxinfo_t*)ClassInfo.pc_clinfo)->fx_maxupri; 3186 fxLimits.minPrio = 0; 3187 3188 // Query our "current" scheduling class. 3189 // This will normally be IA, TS or, rarely, FX or RT. 3190 memset(&ParmInfo, 0, sizeof(ParmInfo)); 3191 ParmInfo.pc_cid = PC_CLNULL; 3192 rslt = priocntl(P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo); 3193 if (rslt < 0) return errno; 3194 myClass = ParmInfo.pc_cid; 3195 3196 // We now know our scheduling classId, get specific information 3197 // about the class. 3198 ClassInfo.pc_cid = myClass; 3199 ClassInfo.pc_clname[0] = 0; 3200 rslt = priocntl((idtype)0, 0, PC_GETCLINFO, (caddr_t)&ClassInfo); 3201 if (rslt < 0) return errno; 3202 3203 if (ThreadPriorityVerbose) { 3204 tty->print_cr("lwp_priocntl_init: Class=%d(%s)...", myClass, ClassInfo.pc_clname); 3205 } 3206 3207 memset(&ParmInfo, 0, sizeof(pcparms_t)); 3208 ParmInfo.pc_cid = PC_CLNULL; 3209 rslt = priocntl(P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo); 3210 if (rslt < 0) return errno; 3211 3212 if (ParmInfo.pc_cid == rtLimits.schedPolicy) { 3213 myMin = rtLimits.minPrio; 3214 myMax = rtLimits.maxPrio; 3215 } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) { 3216 iaparms_t *iaInfo = (iaparms_t*)ParmInfo.pc_clparms; 3217 myMin = iaLimits.minPrio; 3218 myMax = iaLimits.maxPrio; 3219 myMax = MIN2(myMax, (int)iaInfo->ia_uprilim); // clamp - restrict 3220 } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) { 3221 tsparms_t *tsInfo = (tsparms_t*)ParmInfo.pc_clparms; 3222 myMin = tsLimits.minPrio; 3223 myMax = tsLimits.maxPrio; 3224 myMax = MIN2(myMax, (int)tsInfo->ts_uprilim); // clamp - restrict 3225 } else if (ParmInfo.pc_cid == fxLimits.schedPolicy) { 3226 fxparms_t *fxInfo = (fxparms_t*)ParmInfo.pc_clparms; 3227 myMin = fxLimits.minPrio; 3228 myMax = fxLimits.maxPrio; 3229 myMax = MIN2(myMax, (int)fxInfo->fx_uprilim); // clamp - restrict 3230 } else { 3231 // No clue - punt 3232 if (ThreadPriorityVerbose) { 3233 tty->print_cr("Unknown scheduling class: %s ... \n", 3234 ClassInfo.pc_clname); 3235 } 3236 return EINVAL; // no clue, punt 3237 } 3238 3239 if (ThreadPriorityVerbose) { 3240 tty->print_cr("Thread priority Range: [%d..%d]\n", myMin, myMax); 3241 } 3242 3243 priocntl_enable = true; // Enable changing priorities 3244 return 0; 3245 } 3246 3247 #define IAPRI(x) ((iaparms_t *)((x).pc_clparms)) 3248 #define RTPRI(x) ((rtparms_t *)((x).pc_clparms)) 3249 #define TSPRI(x) ((tsparms_t *)((x).pc_clparms)) 3250 #define FXPRI(x) ((fxparms_t *)((x).pc_clparms)) 3251 3252 3253 // scale_to_lwp_priority 3254 // 3255 // Convert from the libthread "thr_setprio" scale to our current 3256 // lwp scheduling class scale. 3257 // 3258 static int scale_to_lwp_priority(int rMin, int rMax, int x) { 3259 int v; 3260 3261 if (x == 127) return rMax; // avoid round-down 3262 v = (((x*(rMax-rMin)))/128)+rMin; 3263 return v; 3264 } 3265 3266 3267 // set_lwp_class_and_priority 3268 int set_lwp_class_and_priority(int ThreadID, int lwpid, 3269 int newPrio, int new_class, bool scale) { 3270 int rslt; 3271 int Actual, Expected, prv; 3272 pcparms_t ParmInfo; // for GET-SET 3273 #ifdef ASSERT 3274 pcparms_t ReadBack; // for readback 3275 #endif 3276 3277 // Set priority via PC_GETPARMS, update, PC_SETPARMS 3278 // Query current values. 3279 // TODO: accelerate this by eliminating the PC_GETPARMS call. 3280 // Cache "pcparms_t" in global ParmCache. 3281 // TODO: elide set-to-same-value 3282 3283 // If something went wrong on init, don't change priorities. 3284 if (!priocntl_enable) { 3285 if (ThreadPriorityVerbose) { 3286 tty->print_cr("Trying to set priority but init failed, ignoring"); 3287 } 3288 return EINVAL; 3289 } 3290 3291 // If lwp hasn't started yet, just return 3292 // the _start routine will call us again. 3293 if (lwpid <= 0) { 3294 if (ThreadPriorityVerbose) { 3295 tty->print_cr("deferring the set_lwp_class_and_priority of thread " 3296 INTPTR_FORMAT " to %d, lwpid not set", 3297 ThreadID, newPrio); 3298 } 3299 return 0; 3300 } 3301 3302 if (ThreadPriorityVerbose) { 3303 tty->print_cr ("set_lwp_class_and_priority(" 3304 INTPTR_FORMAT "@" INTPTR_FORMAT " %d) ", 3305 ThreadID, lwpid, newPrio); 3306 } 3307 3308 memset(&ParmInfo, 0, sizeof(pcparms_t)); 3309 ParmInfo.pc_cid = PC_CLNULL; 3310 rslt = priocntl(P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ParmInfo); 3311 if (rslt < 0) return errno; 3312 3313 int cur_class = ParmInfo.pc_cid; 3314 ParmInfo.pc_cid = (id_t)new_class; 3315 3316 if (new_class == rtLimits.schedPolicy) { 3317 rtparms_t *rtInfo = (rtparms_t*)ParmInfo.pc_clparms; 3318 rtInfo->rt_pri = scale ? scale_to_lwp_priority(rtLimits.minPrio, 3319 rtLimits.maxPrio, newPrio) 3320 : newPrio; 3321 rtInfo->rt_tqsecs = RT_NOCHANGE; 3322 rtInfo->rt_tqnsecs = RT_NOCHANGE; 3323 if (ThreadPriorityVerbose) { 3324 tty->print_cr("RT: %d->%d\n", newPrio, rtInfo->rt_pri); 3325 } 3326 } else if (new_class == iaLimits.schedPolicy) { 3327 iaparms_t* iaInfo = (iaparms_t*)ParmInfo.pc_clparms; 3328 int maxClamped = MIN2(iaLimits.maxPrio, 3329 cur_class == new_class 3330 ? (int)iaInfo->ia_uprilim : iaLimits.maxPrio); 3331 iaInfo->ia_upri = scale ? scale_to_lwp_priority(iaLimits.minPrio, 3332 maxClamped, newPrio) 3333 : newPrio; 3334 iaInfo->ia_uprilim = cur_class == new_class 3335 ? IA_NOCHANGE : (pri_t)iaLimits.maxPrio; 3336 iaInfo->ia_mode = IA_NOCHANGE; 3337 if (ThreadPriorityVerbose) { 3338 tty->print_cr("IA: [%d...%d] %d->%d\n", 3339 iaLimits.minPrio, maxClamped, newPrio, iaInfo->ia_upri); 3340 } 3341 } else if (new_class == tsLimits.schedPolicy) { 3342 tsparms_t* tsInfo = (tsparms_t*)ParmInfo.pc_clparms; 3343 int maxClamped = MIN2(tsLimits.maxPrio, 3344 cur_class == new_class 3345 ? (int)tsInfo->ts_uprilim : tsLimits.maxPrio); 3346 tsInfo->ts_upri = scale ? scale_to_lwp_priority(tsLimits.minPrio, 3347 maxClamped, newPrio) 3348 : newPrio; 3349 tsInfo->ts_uprilim = cur_class == new_class 3350 ? TS_NOCHANGE : (pri_t)tsLimits.maxPrio; 3351 if (ThreadPriorityVerbose) { 3352 tty->print_cr("TS: [%d...%d] %d->%d\n", 3353 tsLimits.minPrio, maxClamped, newPrio, tsInfo->ts_upri); 3354 } 3355 } else if (new_class == fxLimits.schedPolicy) { 3356 fxparms_t* fxInfo = (fxparms_t*)ParmInfo.pc_clparms; 3357 int maxClamped = MIN2(fxLimits.maxPrio, 3358 cur_class == new_class 3359 ? (int)fxInfo->fx_uprilim : fxLimits.maxPrio); 3360 fxInfo->fx_upri = scale ? scale_to_lwp_priority(fxLimits.minPrio, 3361 maxClamped, newPrio) 3362 : newPrio; 3363 fxInfo->fx_uprilim = cur_class == new_class 3364 ? FX_NOCHANGE : (pri_t)fxLimits.maxPrio; 3365 fxInfo->fx_tqsecs = FX_NOCHANGE; 3366 fxInfo->fx_tqnsecs = FX_NOCHANGE; 3367 if (ThreadPriorityVerbose) { 3368 tty->print_cr("FX: [%d...%d] %d->%d\n", 3369 fxLimits.minPrio, maxClamped, newPrio, fxInfo->fx_upri); 3370 } 3371 } else { 3372 if (ThreadPriorityVerbose) { 3373 tty->print_cr("Unknown new scheduling class %d\n", new_class); 3374 } 3375 return EINVAL; // no clue, punt 3376 } 3377 3378 rslt = priocntl(P_LWPID, lwpid, PC_SETPARMS, (caddr_t)&ParmInfo); 3379 if (ThreadPriorityVerbose && rslt) { 3380 tty->print_cr ("PC_SETPARMS ->%d %d\n", rslt, errno); 3381 } 3382 if (rslt < 0) return errno; 3383 3384 #ifdef ASSERT 3385 // Sanity check: read back what we just attempted to set. 3386 // In theory it could have changed in the interim ... 3387 // 3388 // The priocntl system call is tricky. 3389 // Sometimes it'll validate the priority value argument and 3390 // return EINVAL if unhappy. At other times it fails silently. 3391 // Readbacks are prudent. 3392 3393 if (!ReadBackValidate) return 0; 3394 3395 memset(&ReadBack, 0, sizeof(pcparms_t)); 3396 ReadBack.pc_cid = PC_CLNULL; 3397 rslt = priocntl(P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ReadBack); 3398 assert(rslt >= 0, "priocntl failed"); 3399 Actual = Expected = 0xBAD; 3400 assert(ParmInfo.pc_cid == ReadBack.pc_cid, "cid's don't match"); 3401 if (ParmInfo.pc_cid == rtLimits.schedPolicy) { 3402 Actual = RTPRI(ReadBack)->rt_pri; 3403 Expected = RTPRI(ParmInfo)->rt_pri; 3404 } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) { 3405 Actual = IAPRI(ReadBack)->ia_upri; 3406 Expected = IAPRI(ParmInfo)->ia_upri; 3407 } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) { 3408 Actual = TSPRI(ReadBack)->ts_upri; 3409 Expected = TSPRI(ParmInfo)->ts_upri; 3410 } else if (ParmInfo.pc_cid == fxLimits.schedPolicy) { 3411 Actual = FXPRI(ReadBack)->fx_upri; 3412 Expected = FXPRI(ParmInfo)->fx_upri; 3413 } else { 3414 if (ThreadPriorityVerbose) { 3415 tty->print_cr("set_lwp_class_and_priority: unexpected class in readback: %d\n", 3416 ParmInfo.pc_cid); 3417 } 3418 } 3419 3420 if (Actual != Expected) { 3421 if (ThreadPriorityVerbose) { 3422 tty->print_cr ("set_lwp_class_and_priority(%d %d) Class=%d: actual=%d vs expected=%d\n", 3423 lwpid, newPrio, ReadBack.pc_cid, Actual, Expected); 3424 } 3425 } 3426 #endif 3427 3428 return 0; 3429 } 3430 3431 // Solaris only gives access to 128 real priorities at a time, 3432 // so we expand Java's ten to fill this range. This would be better 3433 // if we dynamically adjusted relative priorities. 3434 // 3435 // The ThreadPriorityPolicy option allows us to select 2 different 3436 // priority scales. 3437 // 3438 // ThreadPriorityPolicy=0 3439 // Since the Solaris' default priority is MaximumPriority, we do not 3440 // set a priority lower than Max unless a priority lower than 3441 // NormPriority is requested. 3442 // 3443 // ThreadPriorityPolicy=1 3444 // This mode causes the priority table to get filled with 3445 // linear values. NormPriority get's mapped to 50% of the 3446 // Maximum priority an so on. This will cause VM threads 3447 // to get unfair treatment against other Solaris processes 3448 // which do not explicitly alter their thread priorities. 3449 3450 int os::java_to_os_priority[CriticalPriority + 1] = { 3451 -99999, // 0 Entry should never be used 3452 3453 0, // 1 MinPriority 3454 32, // 2 3455 64, // 3 3456 3457 96, // 4 3458 127, // 5 NormPriority 3459 127, // 6 3460 3461 127, // 7 3462 127, // 8 3463 127, // 9 NearMaxPriority 3464 3465 127, // 10 MaxPriority 3466 3467 -criticalPrio // 11 CriticalPriority 3468 }; 3469 3470 OSReturn os::set_native_priority(Thread* thread, int newpri) { 3471 OSThread* osthread = thread->osthread(); 3472 3473 // Save requested priority in case the thread hasn't been started 3474 osthread->set_native_priority(newpri); 3475 3476 // Check for critical priority request 3477 bool fxcritical = false; 3478 if (newpri == -criticalPrio) { 3479 fxcritical = true; 3480 newpri = criticalPrio; 3481 } 3482 3483 assert(newpri >= MinimumPriority && newpri <= MaximumPriority, "bad priority mapping"); 3484 if (!UseThreadPriorities) return OS_OK; 3485 3486 int status = 0; 3487 3488 if (!fxcritical) { 3489 // Use thr_setprio only if we have a priority that thr_setprio understands 3490 status = thr_setprio(thread->osthread()->thread_id(), newpri); 3491 } 3492 3493 int lwp_status = 3494 set_lwp_class_and_priority(osthread->thread_id(), 3495 osthread->lwp_id(), 3496 newpri, 3497 fxcritical ? fxLimits.schedPolicy : myClass, 3498 !fxcritical); 3499 if (lwp_status != 0 && fxcritical) { 3500 // Try again, this time without changing the scheduling class 3501 newpri = java_MaxPriority_to_os_priority; 3502 lwp_status = set_lwp_class_and_priority(osthread->thread_id(), 3503 osthread->lwp_id(), 3504 newpri, myClass, false); 3505 } 3506 status |= lwp_status; 3507 return (status == 0) ? OS_OK : OS_ERR; 3508 } 3509 3510 3511 OSReturn os::get_native_priority(const Thread* const thread, 3512 int *priority_ptr) { 3513 int p; 3514 if (!UseThreadPriorities) { 3515 *priority_ptr = NormalPriority; 3516 return OS_OK; 3517 } 3518 int status = thr_getprio(thread->osthread()->thread_id(), &p); 3519 if (status != 0) { 3520 return OS_ERR; 3521 } 3522 *priority_ptr = p; 3523 return OS_OK; 3524 } 3525 3526 3527 // Hint to the underlying OS that a task switch would not be good. 3528 // Void return because it's a hint and can fail. 3529 void os::hint_no_preempt() { 3530 schedctl_start(schedctl_init()); 3531 } 3532 3533 static void resume_clear_context(OSThread *osthread) { 3534 osthread->set_ucontext(NULL); 3535 } 3536 3537 static void suspend_save_context(OSThread *osthread, ucontext_t* context) { 3538 osthread->set_ucontext(context); 3539 } 3540 3541 static PosixSemaphore sr_semaphore; 3542 3543 void os::Solaris::SR_handler(Thread* thread, ucontext_t* uc) { 3544 // Save and restore errno to avoid confusing native code with EINTR 3545 // after sigsuspend. 3546 int old_errno = errno; 3547 3548 OSThread* osthread = thread->osthread(); 3549 assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread"); 3550 3551 os::SuspendResume::State current = osthread->sr.state(); 3552 if (current == os::SuspendResume::SR_SUSPEND_REQUEST) { 3553 suspend_save_context(osthread, uc); 3554 3555 // attempt to switch the state, we assume we had a SUSPEND_REQUEST 3556 os::SuspendResume::State state = osthread->sr.suspended(); 3557 if (state == os::SuspendResume::SR_SUSPENDED) { 3558 sigset_t suspend_set; // signals for sigsuspend() 3559 3560 // get current set of blocked signals and unblock resume signal 3561 thr_sigsetmask(SIG_BLOCK, NULL, &suspend_set); 3562 sigdelset(&suspend_set, os::Solaris::SIGasync()); 3563 3564 sr_semaphore.signal(); 3565 // wait here until we are resumed 3566 while (1) { 3567 sigsuspend(&suspend_set); 3568 3569 os::SuspendResume::State result = osthread->sr.running(); 3570 if (result == os::SuspendResume::SR_RUNNING) { 3571 sr_semaphore.signal(); 3572 break; 3573 } 3574 } 3575 3576 } else if (state == os::SuspendResume::SR_RUNNING) { 3577 // request was cancelled, continue 3578 } else { 3579 ShouldNotReachHere(); 3580 } 3581 3582 resume_clear_context(osthread); 3583 } else if (current == os::SuspendResume::SR_RUNNING) { 3584 // request was cancelled, continue 3585 } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) { 3586 // ignore 3587 } else { 3588 // ignore 3589 } 3590 3591 errno = old_errno; 3592 } 3593 3594 void os::print_statistics() { 3595 } 3596 3597 int os::message_box(const char* title, const char* message) { 3598 int i; 3599 fdStream err(defaultStream::error_fd()); 3600 for (i = 0; i < 78; i++) err.print_raw("="); 3601 err.cr(); 3602 err.print_raw_cr(title); 3603 for (i = 0; i < 78; i++) err.print_raw("-"); 3604 err.cr(); 3605 err.print_raw_cr(message); 3606 for (i = 0; i < 78; i++) err.print_raw("="); 3607 err.cr(); 3608 3609 char buf[16]; 3610 // Prevent process from exiting upon "read error" without consuming all CPU 3611 while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); } 3612 3613 return buf[0] == 'y' || buf[0] == 'Y'; 3614 } 3615 3616 static int sr_notify(OSThread* osthread) { 3617 int status = thr_kill(osthread->thread_id(), os::Solaris::SIGasync()); 3618 assert_status(status == 0, status, "thr_kill"); 3619 return status; 3620 } 3621 3622 // "Randomly" selected value for how long we want to spin 3623 // before bailing out on suspending a thread, also how often 3624 // we send a signal to a thread we want to resume 3625 static const int RANDOMLY_LARGE_INTEGER = 1000000; 3626 static const int RANDOMLY_LARGE_INTEGER2 = 100; 3627 3628 static bool do_suspend(OSThread* osthread) { 3629 assert(osthread->sr.is_running(), "thread should be running"); 3630 assert(!sr_semaphore.trywait(), "semaphore has invalid state"); 3631 3632 // mark as suspended and send signal 3633 if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) { 3634 // failed to switch, state wasn't running? 3635 ShouldNotReachHere(); 3636 return false; 3637 } 3638 3639 if (sr_notify(osthread) != 0) { 3640 ShouldNotReachHere(); 3641 } 3642 3643 // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED 3644 while (true) { 3645 if (sr_semaphore.timedwait(0, 2000 * NANOSECS_PER_MILLISEC)) { 3646 break; 3647 } else { 3648 // timeout 3649 os::SuspendResume::State cancelled = osthread->sr.cancel_suspend(); 3650 if (cancelled == os::SuspendResume::SR_RUNNING) { 3651 return false; 3652 } else if (cancelled == os::SuspendResume::SR_SUSPENDED) { 3653 // make sure that we consume the signal on the semaphore as well 3654 sr_semaphore.wait(); 3655 break; 3656 } else { 3657 ShouldNotReachHere(); 3658 return false; 3659 } 3660 } 3661 } 3662 3663 guarantee(osthread->sr.is_suspended(), "Must be suspended"); 3664 return true; 3665 } 3666 3667 static void do_resume(OSThread* osthread) { 3668 assert(osthread->sr.is_suspended(), "thread should be suspended"); 3669 assert(!sr_semaphore.trywait(), "invalid semaphore state"); 3670 3671 if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) { 3672 // failed to switch to WAKEUP_REQUEST 3673 ShouldNotReachHere(); 3674 return; 3675 } 3676 3677 while (true) { 3678 if (sr_notify(osthread) == 0) { 3679 if (sr_semaphore.timedwait(0, 2 * NANOSECS_PER_MILLISEC)) { 3680 if (osthread->sr.is_running()) { 3681 return; 3682 } 3683 } 3684 } else { 3685 ShouldNotReachHere(); 3686 } 3687 } 3688 3689 guarantee(osthread->sr.is_running(), "Must be running!"); 3690 } 3691 3692 void os::SuspendedThreadTask::internal_do_task() { 3693 if (do_suspend(_thread->osthread())) { 3694 SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext()); 3695 do_task(context); 3696 do_resume(_thread->osthread()); 3697 } 3698 } 3699 3700 class PcFetcher : public os::SuspendedThreadTask { 3701 public: 3702 PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {} 3703 ExtendedPC result(); 3704 protected: 3705 void do_task(const os::SuspendedThreadTaskContext& context); 3706 private: 3707 ExtendedPC _epc; 3708 }; 3709 3710 ExtendedPC PcFetcher::result() { 3711 guarantee(is_done(), "task is not done yet."); 3712 return _epc; 3713 } 3714 3715 void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) { 3716 Thread* thread = context.thread(); 3717 OSThread* osthread = thread->osthread(); 3718 if (osthread->ucontext() != NULL) { 3719 _epc = os::Solaris::ucontext_get_pc((ucontext_t *) context.ucontext()); 3720 } else { 3721 // NULL context is unexpected, double-check this is the VMThread 3722 guarantee(thread->is_VM_thread(), "can only be called for VMThread"); 3723 } 3724 } 3725 3726 // A lightweight implementation that does not suspend the target thread and 3727 // thus returns only a hint. Used for profiling only! 3728 ExtendedPC os::get_thread_pc(Thread* thread) { 3729 // Make sure that it is called by the watcher and the Threads lock is owned. 3730 assert(Thread::current()->is_Watcher_thread(), "Must be watcher and own Threads_lock"); 3731 // For now, is only used to profile the VM Thread 3732 assert(thread->is_VM_thread(), "Can only be called for VMThread"); 3733 PcFetcher fetcher(thread); 3734 fetcher.run(); 3735 return fetcher.result(); 3736 } 3737 3738 3739 // This does not do anything on Solaris. This is basically a hook for being 3740 // able to use structured exception handling (thread-local exception filters) on, e.g., Win32. 3741 void os::os_exception_wrapper(java_call_t f, JavaValue* value, 3742 methodHandle* method, JavaCallArguments* args, 3743 Thread* thread) { 3744 f(value, method, args, thread); 3745 } 3746 3747 // This routine may be used by user applications as a "hook" to catch signals. 3748 // The user-defined signal handler must pass unrecognized signals to this 3749 // routine, and if it returns true (non-zero), then the signal handler must 3750 // return immediately. If the flag "abort_if_unrecognized" is true, then this 3751 // routine will never retun false (zero), but instead will execute a VM panic 3752 // routine kill the process. 3753 // 3754 // If this routine returns false, it is OK to call it again. This allows 3755 // the user-defined signal handler to perform checks either before or after 3756 // the VM performs its own checks. Naturally, the user code would be making 3757 // a serious error if it tried to handle an exception (such as a null check 3758 // or breakpoint) that the VM was generating for its own correct operation. 3759 // 3760 // This routine may recognize any of the following kinds of signals: 3761 // SIGBUS, SIGSEGV, SIGILL, SIGFPE, BREAK_SIGNAL, SIGPIPE, SIGXFSZ, 3762 // os::Solaris::SIGasync 3763 // It should be consulted by handlers for any of those signals. 3764 // It explicitly does not recognize os::Solaris::SIGinterrupt 3765 // 3766 // The caller of this routine must pass in the three arguments supplied 3767 // to the function referred to in the "sa_sigaction" (not the "sa_handler") 3768 // field of the structure passed to sigaction(). This routine assumes that 3769 // the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART. 3770 // 3771 // Note that the VM will print warnings if it detects conflicting signal 3772 // handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers". 3773 // 3774 extern "C" JNIEXPORT int JVM_handle_solaris_signal(int signo, 3775 siginfo_t* siginfo, 3776 void* ucontext, 3777 int abort_if_unrecognized); 3778 3779 3780 void signalHandler(int sig, siginfo_t* info, void* ucVoid) { 3781 int orig_errno = errno; // Preserve errno value over signal handler. 3782 JVM_handle_solaris_signal(sig, info, ucVoid, true); 3783 errno = orig_errno; 3784 } 3785 3786 // Do not delete - if guarantee is ever removed, a signal handler (even empty) 3787 // is needed to provoke threads blocked on IO to return an EINTR 3788 // Note: this explicitly does NOT call JVM_handle_solaris_signal and 3789 // does NOT participate in signal chaining due to requirement for 3790 // NOT setting SA_RESTART to make EINTR work. 3791 extern "C" void sigINTRHandler(int sig, siginfo_t* info, void* ucVoid) { 3792 if (UseSignalChaining) { 3793 struct sigaction *actp = os::Solaris::get_chained_signal_action(sig); 3794 if (actp && actp->sa_handler) { 3795 vm_exit_during_initialization("Signal chaining detected for VM interrupt signal, try -XX:+UseAltSigs"); 3796 } 3797 } 3798 } 3799 3800 // This boolean allows users to forward their own non-matching signals 3801 // to JVM_handle_solaris_signal, harmlessly. 3802 bool os::Solaris::signal_handlers_are_installed = false; 3803 3804 // For signal-chaining 3805 bool os::Solaris::libjsig_is_loaded = false; 3806 typedef struct sigaction *(*get_signal_t)(int); 3807 get_signal_t os::Solaris::get_signal_action = NULL; 3808 3809 struct sigaction* os::Solaris::get_chained_signal_action(int sig) { 3810 struct sigaction *actp = NULL; 3811 3812 if ((libjsig_is_loaded) && (sig <= Maxlibjsigsigs)) { 3813 // Retrieve the old signal handler from libjsig 3814 actp = (*get_signal_action)(sig); 3815 } 3816 if (actp == NULL) { 3817 // Retrieve the preinstalled signal handler from jvm 3818 actp = get_preinstalled_handler(sig); 3819 } 3820 3821 return actp; 3822 } 3823 3824 static bool call_chained_handler(struct sigaction *actp, int sig, 3825 siginfo_t *siginfo, void *context) { 3826 // Call the old signal handler 3827 if (actp->sa_handler == SIG_DFL) { 3828 // It's more reasonable to let jvm treat it as an unexpected exception 3829 // instead of taking the default action. 3830 return false; 3831 } else if (actp->sa_handler != SIG_IGN) { 3832 if ((actp->sa_flags & SA_NODEFER) == 0) { 3833 // automaticlly block the signal 3834 sigaddset(&(actp->sa_mask), sig); 3835 } 3836 3837 sa_handler_t hand; 3838 sa_sigaction_t sa; 3839 bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0; 3840 // retrieve the chained handler 3841 if (siginfo_flag_set) { 3842 sa = actp->sa_sigaction; 3843 } else { 3844 hand = actp->sa_handler; 3845 } 3846 3847 if ((actp->sa_flags & SA_RESETHAND) != 0) { 3848 actp->sa_handler = SIG_DFL; 3849 } 3850 3851 // try to honor the signal mask 3852 sigset_t oset; 3853 thr_sigsetmask(SIG_SETMASK, &(actp->sa_mask), &oset); 3854 3855 // call into the chained handler 3856 if (siginfo_flag_set) { 3857 (*sa)(sig, siginfo, context); 3858 } else { 3859 (*hand)(sig); 3860 } 3861 3862 // restore the signal mask 3863 thr_sigsetmask(SIG_SETMASK, &oset, 0); 3864 } 3865 // Tell jvm's signal handler the signal is taken care of. 3866 return true; 3867 } 3868 3869 bool os::Solaris::chained_handler(int sig, siginfo_t* siginfo, void* context) { 3870 bool chained = false; 3871 // signal-chaining 3872 if (UseSignalChaining) { 3873 struct sigaction *actp = get_chained_signal_action(sig); 3874 if (actp != NULL) { 3875 chained = call_chained_handler(actp, sig, siginfo, context); 3876 } 3877 } 3878 return chained; 3879 } 3880 3881 struct sigaction* os::Solaris::get_preinstalled_handler(int sig) { 3882 assert((chainedsigactions != (struct sigaction *)NULL) && 3883 (preinstalled_sigs != (int *)NULL), "signals not yet initialized"); 3884 if (preinstalled_sigs[sig] != 0) { 3885 return &chainedsigactions[sig]; 3886 } 3887 return NULL; 3888 } 3889 3890 void os::Solaris::save_preinstalled_handler(int sig, 3891 struct sigaction& oldAct) { 3892 assert(sig > 0 && sig <= Maxsignum, "vm signal out of expected range"); 3893 assert((chainedsigactions != (struct sigaction *)NULL) && 3894 (preinstalled_sigs != (int *)NULL), "signals not yet initialized"); 3895 chainedsigactions[sig] = oldAct; 3896 preinstalled_sigs[sig] = 1; 3897 } 3898 3899 void os::Solaris::set_signal_handler(int sig, bool set_installed, 3900 bool oktochain) { 3901 // Check for overwrite. 3902 struct sigaction oldAct; 3903 sigaction(sig, (struct sigaction*)NULL, &oldAct); 3904 void* oldhand = 3905 oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction) 3906 : CAST_FROM_FN_PTR(void*, oldAct.sa_handler); 3907 if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) && 3908 oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) && 3909 oldhand != CAST_FROM_FN_PTR(void*, signalHandler)) { 3910 if (AllowUserSignalHandlers || !set_installed) { 3911 // Do not overwrite; user takes responsibility to forward to us. 3912 return; 3913 } else if (UseSignalChaining) { 3914 if (oktochain) { 3915 // save the old handler in jvm 3916 save_preinstalled_handler(sig, oldAct); 3917 } else { 3918 vm_exit_during_initialization("Signal chaining not allowed for VM interrupt signal, try -XX:+UseAltSigs."); 3919 } 3920 // libjsig also interposes the sigaction() call below and saves the 3921 // old sigaction on it own. 3922 } else { 3923 fatal(err_msg("Encountered unexpected pre-existing sigaction handler " 3924 "%#lx for signal %d.", (long)oldhand, sig)); 3925 } 3926 } 3927 3928 struct sigaction sigAct; 3929 sigfillset(&(sigAct.sa_mask)); 3930 sigAct.sa_handler = SIG_DFL; 3931 3932 sigAct.sa_sigaction = signalHandler; 3933 // Handle SIGSEGV on alternate signal stack if 3934 // not using stack banging 3935 if (!UseStackBanging && sig == SIGSEGV) { 3936 sigAct.sa_flags = SA_SIGINFO | SA_RESTART | SA_ONSTACK; 3937 } else if (sig == os::Solaris::SIGinterrupt()) { 3938 // Interruptible i/o requires SA_RESTART cleared so EINTR 3939 // is returned instead of restarting system calls 3940 sigemptyset(&sigAct.sa_mask); 3941 sigAct.sa_handler = NULL; 3942 sigAct.sa_flags = SA_SIGINFO; 3943 sigAct.sa_sigaction = sigINTRHandler; 3944 } else { 3945 sigAct.sa_flags = SA_SIGINFO | SA_RESTART; 3946 } 3947 os::Solaris::set_our_sigflags(sig, sigAct.sa_flags); 3948 3949 sigaction(sig, &sigAct, &oldAct); 3950 3951 void* oldhand2 = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction) 3952 : CAST_FROM_FN_PTR(void*, oldAct.sa_handler); 3953 assert(oldhand2 == oldhand, "no concurrent signal handler installation"); 3954 } 3955 3956 3957 #define DO_SIGNAL_CHECK(sig) \ 3958 do { \ 3959 if (!sigismember(&check_signal_done, sig)) { \ 3960 os::Solaris::check_signal_handler(sig); \ 3961 } \ 3962 } while (0) 3963 3964 // This method is a periodic task to check for misbehaving JNI applications 3965 // under CheckJNI, we can add any periodic checks here 3966 3967 void os::run_periodic_checks() { 3968 // A big source of grief is hijacking virt. addr 0x0 on Solaris, 3969 // thereby preventing a NULL checks. 3970 if (!check_addr0_done) check_addr0_done = check_addr0(tty); 3971 3972 if (check_signals == false) return; 3973 3974 // SEGV and BUS if overridden could potentially prevent 3975 // generation of hs*.log in the event of a crash, debugging 3976 // such a case can be very challenging, so we absolutely 3977 // check for the following for a good measure: 3978 DO_SIGNAL_CHECK(SIGSEGV); 3979 DO_SIGNAL_CHECK(SIGILL); 3980 DO_SIGNAL_CHECK(SIGFPE); 3981 DO_SIGNAL_CHECK(SIGBUS); 3982 DO_SIGNAL_CHECK(SIGPIPE); 3983 DO_SIGNAL_CHECK(SIGXFSZ); 3984 3985 // ReduceSignalUsage allows the user to override these handlers 3986 // see comments at the very top and jvm_solaris.h 3987 if (!ReduceSignalUsage) { 3988 DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL); 3989 DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL); 3990 DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL); 3991 DO_SIGNAL_CHECK(BREAK_SIGNAL); 3992 } 3993 3994 // See comments above for using JVM1/JVM2 and UseAltSigs 3995 DO_SIGNAL_CHECK(os::Solaris::SIGinterrupt()); 3996 DO_SIGNAL_CHECK(os::Solaris::SIGasync()); 3997 3998 } 3999 4000 typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *); 4001 4002 static os_sigaction_t os_sigaction = NULL; 4003 4004 void os::Solaris::check_signal_handler(int sig) { 4005 char buf[O_BUFLEN]; 4006 address jvmHandler = NULL; 4007 4008 struct sigaction act; 4009 if (os_sigaction == NULL) { 4010 // only trust the default sigaction, in case it has been interposed 4011 os_sigaction = (os_sigaction_t)dlsym(RTLD_DEFAULT, "sigaction"); 4012 if (os_sigaction == NULL) return; 4013 } 4014 4015 os_sigaction(sig, (struct sigaction*)NULL, &act); 4016 4017 address thisHandler = (act.sa_flags & SA_SIGINFO) 4018 ? CAST_FROM_FN_PTR(address, act.sa_sigaction) 4019 : CAST_FROM_FN_PTR(address, act.sa_handler); 4020 4021 4022 switch (sig) { 4023 case SIGSEGV: 4024 case SIGBUS: 4025 case SIGFPE: 4026 case SIGPIPE: 4027 case SIGXFSZ: 4028 case SIGILL: 4029 jvmHandler = CAST_FROM_FN_PTR(address, signalHandler); 4030 break; 4031 4032 case SHUTDOWN1_SIGNAL: 4033 case SHUTDOWN2_SIGNAL: 4034 case SHUTDOWN3_SIGNAL: 4035 case BREAK_SIGNAL: 4036 jvmHandler = (address)user_handler(); 4037 break; 4038 4039 default: 4040 int intrsig = os::Solaris::SIGinterrupt(); 4041 int asynsig = os::Solaris::SIGasync(); 4042 4043 if (sig == intrsig) { 4044 jvmHandler = CAST_FROM_FN_PTR(address, sigINTRHandler); 4045 } else if (sig == asynsig) { 4046 jvmHandler = CAST_FROM_FN_PTR(address, signalHandler); 4047 } else { 4048 return; 4049 } 4050 break; 4051 } 4052 4053 4054 if (thisHandler != jvmHandler) { 4055 tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN)); 4056 tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN)); 4057 tty->print_cr(" found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN)); 4058 // No need to check this sig any longer 4059 sigaddset(&check_signal_done, sig); 4060 // Running under non-interactive shell, SHUTDOWN2_SIGNAL will be reassigned SIG_IGN 4061 if (sig == SHUTDOWN2_SIGNAL && !isatty(fileno(stdin))) { 4062 tty->print_cr("Running in non-interactive shell, %s handler is replaced by shell", 4063 exception_name(sig, buf, O_BUFLEN)); 4064 } 4065 } else if(os::Solaris::get_our_sigflags(sig) != 0 && act.sa_flags != os::Solaris::get_our_sigflags(sig)) { 4066 tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN)); 4067 tty->print("expected:" PTR32_FORMAT, os::Solaris::get_our_sigflags(sig)); 4068 tty->print_cr(" found:" PTR32_FORMAT, act.sa_flags); 4069 // No need to check this sig any longer 4070 sigaddset(&check_signal_done, sig); 4071 } 4072 4073 // Print all the signal handler state 4074 if (sigismember(&check_signal_done, sig)) { 4075 print_signal_handlers(tty, buf, O_BUFLEN); 4076 } 4077 4078 } 4079 4080 void os::Solaris::install_signal_handlers() { 4081 bool libjsigdone = false; 4082 signal_handlers_are_installed = true; 4083 4084 // signal-chaining 4085 typedef void (*signal_setting_t)(); 4086 signal_setting_t begin_signal_setting = NULL; 4087 signal_setting_t end_signal_setting = NULL; 4088 begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t, 4089 dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting")); 4090 if (begin_signal_setting != NULL) { 4091 end_signal_setting = CAST_TO_FN_PTR(signal_setting_t, 4092 dlsym(RTLD_DEFAULT, "JVM_end_signal_setting")); 4093 get_signal_action = CAST_TO_FN_PTR(get_signal_t, 4094 dlsym(RTLD_DEFAULT, "JVM_get_signal_action")); 4095 get_libjsig_version = CAST_TO_FN_PTR(version_getting_t, 4096 dlsym(RTLD_DEFAULT, "JVM_get_libjsig_version")); 4097 libjsig_is_loaded = true; 4098 if (os::Solaris::get_libjsig_version != NULL) { 4099 libjsigversion = (*os::Solaris::get_libjsig_version)(); 4100 } 4101 assert(UseSignalChaining, "should enable signal-chaining"); 4102 } 4103 if (libjsig_is_loaded) { 4104 // Tell libjsig jvm is setting signal handlers 4105 (*begin_signal_setting)(); 4106 } 4107 4108 set_signal_handler(SIGSEGV, true, true); 4109 set_signal_handler(SIGPIPE, true, true); 4110 set_signal_handler(SIGXFSZ, true, true); 4111 set_signal_handler(SIGBUS, true, true); 4112 set_signal_handler(SIGILL, true, true); 4113 set_signal_handler(SIGFPE, true, true); 4114 4115 4116 if (os::Solaris::SIGinterrupt() > OLDMAXSIGNUM || os::Solaris::SIGasync() > OLDMAXSIGNUM) { 4117 4118 // Pre-1.4.1 Libjsig limited to signal chaining signals <= 32 so 4119 // can not register overridable signals which might be > 32 4120 if (libjsig_is_loaded && libjsigversion <= JSIG_VERSION_1_4_1) { 4121 // Tell libjsig jvm has finished setting signal handlers 4122 (*end_signal_setting)(); 4123 libjsigdone = true; 4124 } 4125 } 4126 4127 // Never ok to chain our SIGinterrupt 4128 set_signal_handler(os::Solaris::SIGinterrupt(), true, false); 4129 set_signal_handler(os::Solaris::SIGasync(), true, true); 4130 4131 if (libjsig_is_loaded && !libjsigdone) { 4132 // Tell libjsig jvm finishes setting signal handlers 4133 (*end_signal_setting)(); 4134 } 4135 4136 // We don't activate signal checker if libjsig is in place, we trust ourselves 4137 // and if UserSignalHandler is installed all bets are off. 4138 // Log that signal checking is off only if -verbose:jni is specified. 4139 if (CheckJNICalls) { 4140 if (libjsig_is_loaded) { 4141 if (PrintJNIResolving) { 4142 tty->print_cr("Info: libjsig is activated, all active signal checking is disabled"); 4143 } 4144 check_signals = false; 4145 } 4146 if (AllowUserSignalHandlers) { 4147 if (PrintJNIResolving) { 4148 tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled"); 4149 } 4150 check_signals = false; 4151 } 4152 } 4153 } 4154 4155 4156 void report_error(const char* file_name, int line_no, const char* title, 4157 const char* format, ...); 4158 4159 const char * signames[] = { 4160 "SIG0", 4161 "SIGHUP", "SIGINT", "SIGQUIT", "SIGILL", "SIGTRAP", 4162 "SIGABRT", "SIGEMT", "SIGFPE", "SIGKILL", "SIGBUS", 4163 "SIGSEGV", "SIGSYS", "SIGPIPE", "SIGALRM", "SIGTERM", 4164 "SIGUSR1", "SIGUSR2", "SIGCLD", "SIGPWR", "SIGWINCH", 4165 "SIGURG", "SIGPOLL", "SIGSTOP", "SIGTSTP", "SIGCONT", 4166 "SIGTTIN", "SIGTTOU", "SIGVTALRM", "SIGPROF", "SIGXCPU", 4167 "SIGXFSZ", "SIGWAITING", "SIGLWP", "SIGFREEZE", "SIGTHAW", 4168 "SIGCANCEL", "SIGLOST" 4169 }; 4170 4171 const char* os::exception_name(int exception_code, char* buf, size_t size) { 4172 if (0 < exception_code && exception_code <= SIGRTMAX) { 4173 // signal 4174 if (exception_code < sizeof(signames)/sizeof(const char*)) { 4175 jio_snprintf(buf, size, "%s", signames[exception_code]); 4176 } else { 4177 jio_snprintf(buf, size, "SIG%d", exception_code); 4178 } 4179 return buf; 4180 } else { 4181 return NULL; 4182 } 4183 } 4184 4185 // (Static) wrapper for getisax(2) call. 4186 os::Solaris::getisax_func_t os::Solaris::_getisax = 0; 4187 4188 // (Static) wrappers for the liblgrp API 4189 os::Solaris::lgrp_home_func_t os::Solaris::_lgrp_home; 4190 os::Solaris::lgrp_init_func_t os::Solaris::_lgrp_init; 4191 os::Solaris::lgrp_fini_func_t os::Solaris::_lgrp_fini; 4192 os::Solaris::lgrp_root_func_t os::Solaris::_lgrp_root; 4193 os::Solaris::lgrp_children_func_t os::Solaris::_lgrp_children; 4194 os::Solaris::lgrp_resources_func_t os::Solaris::_lgrp_resources; 4195 os::Solaris::lgrp_nlgrps_func_t os::Solaris::_lgrp_nlgrps; 4196 os::Solaris::lgrp_cookie_stale_func_t os::Solaris::_lgrp_cookie_stale; 4197 os::Solaris::lgrp_cookie_t os::Solaris::_lgrp_cookie = 0; 4198 4199 // (Static) wrapper for meminfo() call. 4200 os::Solaris::meminfo_func_t os::Solaris::_meminfo = 0; 4201 4202 static address resolve_symbol_lazy(const char* name) { 4203 address addr = (address) dlsym(RTLD_DEFAULT, name); 4204 if (addr == NULL) { 4205 // RTLD_DEFAULT was not defined on some early versions of 2.5.1 4206 addr = (address) dlsym(RTLD_NEXT, name); 4207 } 4208 return addr; 4209 } 4210 4211 static address resolve_symbol(const char* name) { 4212 address addr = resolve_symbol_lazy(name); 4213 if (addr == NULL) { 4214 fatal(dlerror()); 4215 } 4216 return addr; 4217 } 4218 4219 void os::Solaris::libthread_init() { 4220 address func = (address)dlsym(RTLD_DEFAULT, "_thr_suspend_allmutators"); 4221 4222 lwp_priocntl_init(); 4223 4224 // RTLD_DEFAULT was not defined on some early versions of 5.5.1 4225 if (func == NULL) { 4226 func = (address) dlsym(RTLD_NEXT, "_thr_suspend_allmutators"); 4227 // Guarantee that this VM is running on an new enough OS (5.6 or 4228 // later) that it will have a new enough libthread.so. 4229 guarantee(func != NULL, "libthread.so is too old."); 4230 } 4231 4232 int size; 4233 void (*handler_info_func)(address *, int *); 4234 handler_info_func = CAST_TO_FN_PTR(void (*)(address *, int *), resolve_symbol("thr_sighndlrinfo")); 4235 handler_info_func(&handler_start, &size); 4236 handler_end = handler_start + size; 4237 } 4238 4239 4240 int_fnP_mutex_tP os::Solaris::_mutex_lock; 4241 int_fnP_mutex_tP os::Solaris::_mutex_trylock; 4242 int_fnP_mutex_tP os::Solaris::_mutex_unlock; 4243 int_fnP_mutex_tP_i_vP os::Solaris::_mutex_init; 4244 int_fnP_mutex_tP os::Solaris::_mutex_destroy; 4245 int os::Solaris::_mutex_scope = USYNC_THREAD; 4246 4247 int_fnP_cond_tP_mutex_tP_timestruc_tP os::Solaris::_cond_timedwait; 4248 int_fnP_cond_tP_mutex_tP os::Solaris::_cond_wait; 4249 int_fnP_cond_tP os::Solaris::_cond_signal; 4250 int_fnP_cond_tP os::Solaris::_cond_broadcast; 4251 int_fnP_cond_tP_i_vP os::Solaris::_cond_init; 4252 int_fnP_cond_tP os::Solaris::_cond_destroy; 4253 int os::Solaris::_cond_scope = USYNC_THREAD; 4254 4255 void os::Solaris::synchronization_init() { 4256 if (UseLWPSynchronization) { 4257 os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_lock"))); 4258 os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_trylock"))); 4259 os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_unlock"))); 4260 os::Solaris::set_mutex_init(lwp_mutex_init); 4261 os::Solaris::set_mutex_destroy(lwp_mutex_destroy); 4262 os::Solaris::set_mutex_scope(USYNC_THREAD); 4263 4264 os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("_lwp_cond_timedwait"))); 4265 os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("_lwp_cond_wait"))); 4266 os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_signal"))); 4267 os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_broadcast"))); 4268 os::Solaris::set_cond_init(lwp_cond_init); 4269 os::Solaris::set_cond_destroy(lwp_cond_destroy); 4270 os::Solaris::set_cond_scope(USYNC_THREAD); 4271 } else { 4272 os::Solaris::set_mutex_scope(USYNC_THREAD); 4273 os::Solaris::set_cond_scope(USYNC_THREAD); 4274 4275 if (UsePthreads) { 4276 os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_lock"))); 4277 os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_trylock"))); 4278 os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_unlock"))); 4279 os::Solaris::set_mutex_init(pthread_mutex_default_init); 4280 os::Solaris::set_mutex_destroy(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_destroy"))); 4281 4282 os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("pthread_cond_timedwait"))); 4283 os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("pthread_cond_wait"))); 4284 os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_signal"))); 4285 os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_broadcast"))); 4286 os::Solaris::set_cond_init(pthread_cond_default_init); 4287 os::Solaris::set_cond_destroy(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_destroy"))); 4288 } else { 4289 os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_lock"))); 4290 os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_trylock"))); 4291 os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_unlock"))); 4292 os::Solaris::set_mutex_init(::mutex_init); 4293 os::Solaris::set_mutex_destroy(::mutex_destroy); 4294 4295 os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("cond_timedwait"))); 4296 os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("cond_wait"))); 4297 os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_signal"))); 4298 os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_broadcast"))); 4299 os::Solaris::set_cond_init(::cond_init); 4300 os::Solaris::set_cond_destroy(::cond_destroy); 4301 } 4302 } 4303 } 4304 4305 bool os::Solaris::liblgrp_init() { 4306 void *handle = dlopen("liblgrp.so.1", RTLD_LAZY); 4307 if (handle != NULL) { 4308 os::Solaris::set_lgrp_home(CAST_TO_FN_PTR(lgrp_home_func_t, dlsym(handle, "lgrp_home"))); 4309 os::Solaris::set_lgrp_init(CAST_TO_FN_PTR(lgrp_init_func_t, dlsym(handle, "lgrp_init"))); 4310 os::Solaris::set_lgrp_fini(CAST_TO_FN_PTR(lgrp_fini_func_t, dlsym(handle, "lgrp_fini"))); 4311 os::Solaris::set_lgrp_root(CAST_TO_FN_PTR(lgrp_root_func_t, dlsym(handle, "lgrp_root"))); 4312 os::Solaris::set_lgrp_children(CAST_TO_FN_PTR(lgrp_children_func_t, dlsym(handle, "lgrp_children"))); 4313 os::Solaris::set_lgrp_resources(CAST_TO_FN_PTR(lgrp_resources_func_t, dlsym(handle, "lgrp_resources"))); 4314 os::Solaris::set_lgrp_nlgrps(CAST_TO_FN_PTR(lgrp_nlgrps_func_t, dlsym(handle, "lgrp_nlgrps"))); 4315 os::Solaris::set_lgrp_cookie_stale(CAST_TO_FN_PTR(lgrp_cookie_stale_func_t, 4316 dlsym(handle, "lgrp_cookie_stale"))); 4317 4318 lgrp_cookie_t c = lgrp_init(LGRP_VIEW_CALLER); 4319 set_lgrp_cookie(c); 4320 return true; 4321 } 4322 return false; 4323 } 4324 4325 void os::Solaris::misc_sym_init() { 4326 address func; 4327 4328 // getisax 4329 func = resolve_symbol_lazy("getisax"); 4330 if (func != NULL) { 4331 os::Solaris::_getisax = CAST_TO_FN_PTR(getisax_func_t, func); 4332 } 4333 4334 // meminfo 4335 func = resolve_symbol_lazy("meminfo"); 4336 if (func != NULL) { 4337 os::Solaris::set_meminfo(CAST_TO_FN_PTR(meminfo_func_t, func)); 4338 } 4339 } 4340 4341 uint_t os::Solaris::getisax(uint32_t* array, uint_t n) { 4342 assert(_getisax != NULL, "_getisax not set"); 4343 return _getisax(array, n); 4344 } 4345 4346 // int pset_getloadavg(psetid_t pset, double loadavg[], int nelem); 4347 typedef long (*pset_getloadavg_type)(psetid_t pset, double loadavg[], int nelem); 4348 static pset_getloadavg_type pset_getloadavg_ptr = NULL; 4349 4350 void init_pset_getloadavg_ptr(void) { 4351 pset_getloadavg_ptr = 4352 (pset_getloadavg_type)dlsym(RTLD_DEFAULT, "pset_getloadavg"); 4353 if (PrintMiscellaneous && Verbose && pset_getloadavg_ptr == NULL) { 4354 warning("pset_getloadavg function not found"); 4355 } 4356 } 4357 4358 int os::Solaris::_dev_zero_fd = -1; 4359 4360 // this is called _before_ the global arguments have been parsed 4361 void os::init(void) { 4362 _initial_pid = getpid(); 4363 4364 max_hrtime = first_hrtime = gethrtime(); 4365 4366 init_random(1234567); 4367 4368 page_size = sysconf(_SC_PAGESIZE); 4369 if (page_size == -1) { 4370 fatal(err_msg("os_solaris.cpp: os::init: sysconf failed (%s)", 4371 strerror(errno))); 4372 } 4373 init_page_sizes((size_t) page_size); 4374 4375 Solaris::initialize_system_info(); 4376 4377 // Initialize misc. symbols as soon as possible, so we can use them 4378 // if we need them. 4379 Solaris::misc_sym_init(); 4380 4381 int fd = ::open("/dev/zero", O_RDWR); 4382 if (fd < 0) { 4383 fatal(err_msg("os::init: cannot open /dev/zero (%s)", strerror(errno))); 4384 } else { 4385 Solaris::set_dev_zero_fd(fd); 4386 4387 // Close on exec, child won't inherit. 4388 fcntl(fd, F_SETFD, FD_CLOEXEC); 4389 } 4390 4391 clock_tics_per_sec = CLK_TCK; 4392 4393 // check if dladdr1() exists; dladdr1 can provide more information than 4394 // dladdr for os::dll_address_to_function_name. It comes with SunOS 5.9 4395 // and is available on linker patches for 5.7 and 5.8. 4396 // libdl.so must have been loaded, this call is just an entry lookup 4397 void * hdl = dlopen("libdl.so", RTLD_NOW); 4398 if (hdl) { 4399 dladdr1_func = CAST_TO_FN_PTR(dladdr1_func_type, dlsym(hdl, "dladdr1")); 4400 } 4401 4402 // (Solaris only) this switches to calls that actually do locking. 4403 ThreadCritical::initialize(); 4404 4405 main_thread = thr_self(); 4406 4407 // Constant minimum stack size allowed. It must be at least 4408 // the minimum of what the OS supports (thr_min_stack()), and 4409 // enough to allow the thread to get to user bytecode execution. 4410 Solaris::min_stack_allowed = MAX2(thr_min_stack(), Solaris::min_stack_allowed); 4411 // If the pagesize of the VM is greater than 8K determine the appropriate 4412 // number of initial guard pages. The user can change this with the 4413 // command line arguments, if needed. 4414 if (vm_page_size() > 8*K) { 4415 StackYellowPages = 1; 4416 StackRedPages = 1; 4417 StackShadowPages = round_to((StackShadowPages*8*K), vm_page_size()) / vm_page_size(); 4418 } 4419 } 4420 4421 // To install functions for atexit system call 4422 extern "C" { 4423 static void perfMemory_exit_helper() { 4424 perfMemory_exit(); 4425 } 4426 } 4427 4428 // this is called _after_ the global arguments have been parsed 4429 jint os::init_2(void) { 4430 // try to enable extended file IO ASAP, see 6431278 4431 os::Solaris::try_enable_extended_io(); 4432 4433 // Allocate a single page and mark it as readable for safepoint polling. Also 4434 // use this first mmap call to check support for MAP_ALIGN. 4435 address polling_page = (address)Solaris::mmap_chunk((char*)page_size, 4436 page_size, 4437 MAP_PRIVATE | MAP_ALIGN, 4438 PROT_READ); 4439 if (polling_page == NULL) { 4440 has_map_align = false; 4441 polling_page = (address)Solaris::mmap_chunk(NULL, page_size, MAP_PRIVATE, 4442 PROT_READ); 4443 } 4444 4445 os::set_polling_page(polling_page); 4446 4447 #ifndef PRODUCT 4448 if (Verbose && PrintMiscellaneous) { 4449 tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n", 4450 (intptr_t)polling_page); 4451 } 4452 #endif 4453 4454 if (!UseMembar) { 4455 address mem_serialize_page = (address)Solaris::mmap_chunk(NULL, page_size, MAP_PRIVATE, PROT_READ | PROT_WRITE); 4456 guarantee(mem_serialize_page != NULL, "mmap Failed for memory serialize page"); 4457 os::set_memory_serialize_page(mem_serialize_page); 4458 4459 #ifndef PRODUCT 4460 if (Verbose && PrintMiscellaneous) { 4461 tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", 4462 (intptr_t)mem_serialize_page); 4463 } 4464 #endif 4465 } 4466 4467 // Check minimum allowable stack size for thread creation and to initialize 4468 // the java system classes, including StackOverflowError - depends on page 4469 // size. Add a page for compiler2 recursion in main thread. 4470 // Add in 2*BytesPerWord times page size to account for VM stack during 4471 // class initialization depending on 32 or 64 bit VM. 4472 os::Solaris::min_stack_allowed = MAX2(os::Solaris::min_stack_allowed, 4473 (size_t)(StackYellowPages+StackRedPages+StackShadowPages+ 4474 2*BytesPerWord COMPILER2_PRESENT(+1)) * page_size); 4475 4476 size_t threadStackSizeInBytes = ThreadStackSize * K; 4477 if (threadStackSizeInBytes != 0 && 4478 threadStackSizeInBytes < os::Solaris::min_stack_allowed) { 4479 tty->print_cr("\nThe stack size specified is too small, Specify at least %dk", 4480 os::Solaris::min_stack_allowed/K); 4481 return JNI_ERR; 4482 } 4483 4484 // For 64kbps there will be a 64kb page size, which makes 4485 // the usable default stack size quite a bit less. Increase the 4486 // stack for 64kb (or any > than 8kb) pages, this increases 4487 // virtual memory fragmentation (since we're not creating the 4488 // stack on a power of 2 boundary. The real fix for this 4489 // should be to fix the guard page mechanism. 4490 4491 if (vm_page_size() > 8*K) { 4492 threadStackSizeInBytes = (threadStackSizeInBytes != 0) 4493 ? threadStackSizeInBytes + 4494 ((StackYellowPages + StackRedPages) * vm_page_size()) 4495 : 0; 4496 ThreadStackSize = threadStackSizeInBytes/K; 4497 } 4498 4499 // Make the stack size a multiple of the page size so that 4500 // the yellow/red zones can be guarded. 4501 JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes, 4502 vm_page_size())); 4503 4504 Solaris::libthread_init(); 4505 4506 if (UseNUMA) { 4507 if (!Solaris::liblgrp_init()) { 4508 UseNUMA = false; 4509 } else { 4510 size_t lgrp_limit = os::numa_get_groups_num(); 4511 int *lgrp_ids = NEW_C_HEAP_ARRAY(int, lgrp_limit, mtInternal); 4512 size_t lgrp_num = os::numa_get_leaf_groups(lgrp_ids, lgrp_limit); 4513 FREE_C_HEAP_ARRAY(int, lgrp_ids); 4514 if (lgrp_num < 2) { 4515 // There's only one locality group, disable NUMA. 4516 UseNUMA = false; 4517 } 4518 } 4519 if (!UseNUMA && ForceNUMA) { 4520 UseNUMA = true; 4521 } 4522 } 4523 4524 Solaris::signal_sets_init(); 4525 Solaris::init_signal_mem(); 4526 Solaris::install_signal_handlers(); 4527 4528 if (libjsigversion < JSIG_VERSION_1_4_1) { 4529 Maxlibjsigsigs = OLDMAXSIGNUM; 4530 } 4531 4532 // initialize synchronization primitives to use either thread or 4533 // lwp synchronization (controlled by UseLWPSynchronization) 4534 Solaris::synchronization_init(); 4535 4536 if (MaxFDLimit) { 4537 // set the number of file descriptors to max. print out error 4538 // if getrlimit/setrlimit fails but continue regardless. 4539 struct rlimit nbr_files; 4540 int status = getrlimit(RLIMIT_NOFILE, &nbr_files); 4541 if (status != 0) { 4542 if (PrintMiscellaneous && (Verbose || WizardMode)) { 4543 perror("os::init_2 getrlimit failed"); 4544 } 4545 } else { 4546 nbr_files.rlim_cur = nbr_files.rlim_max; 4547 status = setrlimit(RLIMIT_NOFILE, &nbr_files); 4548 if (status != 0) { 4549 if (PrintMiscellaneous && (Verbose || WizardMode)) { 4550 perror("os::init_2 setrlimit failed"); 4551 } 4552 } 4553 } 4554 } 4555 4556 // Calculate theoretical max. size of Threads to guard gainst 4557 // artifical out-of-memory situations, where all available address- 4558 // space has been reserved by thread stacks. Default stack size is 1Mb. 4559 size_t pre_thread_stack_size = (JavaThread::stack_size_at_create()) ? 4560 JavaThread::stack_size_at_create() : (1*K*K); 4561 assert(pre_thread_stack_size != 0, "Must have a stack"); 4562 // Solaris has a maximum of 4Gb of user programs. Calculate the thread limit when 4563 // we should start doing Virtual Memory banging. Currently when the threads will 4564 // have used all but 200Mb of space. 4565 size_t max_address_space = ((unsigned int)4 * K * K * K) - (200 * K * K); 4566 Solaris::_os_thread_limit = max_address_space / pre_thread_stack_size; 4567 4568 // at-exit methods are called in the reverse order of their registration. 4569 // In Solaris 7 and earlier, atexit functions are called on return from 4570 // main or as a result of a call to exit(3C). There can be only 32 of 4571 // these functions registered and atexit() does not set errno. In Solaris 4572 // 8 and later, there is no limit to the number of functions registered 4573 // and atexit() sets errno. In addition, in Solaris 8 and later, atexit 4574 // functions are called upon dlclose(3DL) in addition to return from main 4575 // and exit(3C). 4576 4577 if (PerfAllowAtExitRegistration) { 4578 // only register atexit functions if PerfAllowAtExitRegistration is set. 4579 // atexit functions can be delayed until process exit time, which 4580 // can be problematic for embedded VM situations. Embedded VMs should 4581 // call DestroyJavaVM() to assure that VM resources are released. 4582 4583 // note: perfMemory_exit_helper atexit function may be removed in 4584 // the future if the appropriate cleanup code can be added to the 4585 // VM_Exit VMOperation's doit method. 4586 if (atexit(perfMemory_exit_helper) != 0) { 4587 warning("os::init2 atexit(perfMemory_exit_helper) failed"); 4588 } 4589 } 4590 4591 // Init pset_loadavg function pointer 4592 init_pset_getloadavg_ptr(); 4593 4594 return JNI_OK; 4595 } 4596 4597 // Mark the polling page as unreadable 4598 void os::make_polling_page_unreadable(void) { 4599 if (mprotect((char *)_polling_page, page_size, PROT_NONE) != 0) { 4600 fatal("Could not disable polling page"); 4601 } 4602 } 4603 4604 // Mark the polling page as readable 4605 void os::make_polling_page_readable(void) { 4606 if (mprotect((char *)_polling_page, page_size, PROT_READ) != 0) { 4607 fatal("Could not enable polling page"); 4608 } 4609 } 4610 4611 // OS interface. 4612 4613 bool os::check_heap(bool force) { return true; } 4614 4615 // Is a (classpath) directory empty? 4616 bool os::dir_is_empty(const char* path) { 4617 DIR *dir = NULL; 4618 struct dirent *ptr; 4619 4620 dir = opendir(path); 4621 if (dir == NULL) return true; 4622 4623 // Scan the directory 4624 bool result = true; 4625 char buf[sizeof(struct dirent) + MAX_PATH]; 4626 struct dirent *dbuf = (struct dirent *) buf; 4627 while (result && (ptr = readdir(dir, dbuf)) != NULL) { 4628 if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) { 4629 result = false; 4630 } 4631 } 4632 closedir(dir); 4633 return result; 4634 } 4635 4636 // This code originates from JDK's sysOpen and open64_w 4637 // from src/solaris/hpi/src/system_md.c 4638 4639 int os::open(const char *path, int oflag, int mode) { 4640 if (strlen(path) > MAX_PATH - 1) { 4641 errno = ENAMETOOLONG; 4642 return -1; 4643 } 4644 int fd; 4645 4646 fd = ::open64(path, oflag, mode); 4647 if (fd == -1) return -1; 4648 4649 // If the open succeeded, the file might still be a directory 4650 { 4651 struct stat64 buf64; 4652 int ret = ::fstat64(fd, &buf64); 4653 int st_mode = buf64.st_mode; 4654 4655 if (ret != -1) { 4656 if ((st_mode & S_IFMT) == S_IFDIR) { 4657 errno = EISDIR; 4658 ::close(fd); 4659 return -1; 4660 } 4661 } else { 4662 ::close(fd); 4663 return -1; 4664 } 4665 } 4666 4667 // 32-bit Solaris systems suffer from: 4668 // 4669 // - an historical default soft limit of 256 per-process file 4670 // descriptors that is too low for many Java programs. 4671 // 4672 // - a design flaw where file descriptors created using stdio 4673 // fopen must be less than 256, _even_ when the first limit above 4674 // has been raised. This can cause calls to fopen (but not calls to 4675 // open, for example) to fail mysteriously, perhaps in 3rd party 4676 // native code (although the JDK itself uses fopen). One can hardly 4677 // criticize them for using this most standard of all functions. 4678 // 4679 // We attempt to make everything work anyways by: 4680 // 4681 // - raising the soft limit on per-process file descriptors beyond 4682 // 256 4683 // 4684 // - As of Solaris 10u4, we can request that Solaris raise the 256 4685 // stdio fopen limit by calling function enable_extended_FILE_stdio. 4686 // This is done in init_2 and recorded in enabled_extended_FILE_stdio 4687 // 4688 // - If we are stuck on an old (pre 10u4) Solaris system, we can 4689 // workaround the bug by remapping non-stdio file descriptors below 4690 // 256 to ones beyond 256, which is done below. 4691 // 4692 // See: 4693 // 1085341: 32-bit stdio routines should support file descriptors >255 4694 // 6533291: Work around 32-bit Solaris stdio limit of 256 open files 4695 // 6431278: Netbeans crash on 32 bit Solaris: need to call 4696 // enable_extended_FILE_stdio() in VM initialisation 4697 // Giri Mandalika's blog 4698 // http://technopark02.blogspot.com/2005_05_01_archive.html 4699 // 4700 #ifndef _LP64 4701 if ((!enabled_extended_FILE_stdio) && fd < 256) { 4702 int newfd = ::fcntl(fd, F_DUPFD, 256); 4703 if (newfd != -1) { 4704 ::close(fd); 4705 fd = newfd; 4706 } 4707 } 4708 #endif // 32-bit Solaris 4709 4710 // All file descriptors that are opened in the JVM and not 4711 // specifically destined for a subprocess should have the 4712 // close-on-exec flag set. If we don't set it, then careless 3rd 4713 // party native code might fork and exec without closing all 4714 // appropriate file descriptors (e.g. as we do in closeDescriptors in 4715 // UNIXProcess.c), and this in turn might: 4716 // 4717 // - cause end-of-file to fail to be detected on some file 4718 // descriptors, resulting in mysterious hangs, or 4719 // 4720 // - might cause an fopen in the subprocess to fail on a system 4721 // suffering from bug 1085341. 4722 // 4723 // (Yes, the default setting of the close-on-exec flag is a Unix 4724 // design flaw) 4725 // 4726 // See: 4727 // 1085341: 32-bit stdio routines should support file descriptors >255 4728 // 4843136: (process) pipe file descriptor from Runtime.exec not being closed 4729 // 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9 4730 // 4731 #ifdef FD_CLOEXEC 4732 { 4733 int flags = ::fcntl(fd, F_GETFD); 4734 if (flags != -1) { 4735 ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC); 4736 } 4737 } 4738 #endif 4739 4740 return fd; 4741 } 4742 4743 // create binary file, rewriting existing file if required 4744 int os::create_binary_file(const char* path, bool rewrite_existing) { 4745 int oflags = O_WRONLY | O_CREAT; 4746 if (!rewrite_existing) { 4747 oflags |= O_EXCL; 4748 } 4749 return ::open64(path, oflags, S_IREAD | S_IWRITE); 4750 } 4751 4752 // return current position of file pointer 4753 jlong os::current_file_offset(int fd) { 4754 return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR); 4755 } 4756 4757 // move file pointer to the specified offset 4758 jlong os::seek_to_file_offset(int fd, jlong offset) { 4759 return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET); 4760 } 4761 4762 jlong os::lseek(int fd, jlong offset, int whence) { 4763 return (jlong) ::lseek64(fd, offset, whence); 4764 } 4765 4766 char * os::native_path(char *path) { 4767 return path; 4768 } 4769 4770 int os::ftruncate(int fd, jlong length) { 4771 return ::ftruncate64(fd, length); 4772 } 4773 4774 int os::fsync(int fd) { 4775 RESTARTABLE_RETURN_INT(::fsync(fd)); 4776 } 4777 4778 int os::available(int fd, jlong *bytes) { 4779 assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native, 4780 "Assumed _thread_in_native"); 4781 jlong cur, end; 4782 int mode; 4783 struct stat64 buf64; 4784 4785 if (::fstat64(fd, &buf64) >= 0) { 4786 mode = buf64.st_mode; 4787 if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) { 4788 int n,ioctl_return; 4789 4790 RESTARTABLE(::ioctl(fd, FIONREAD, &n), ioctl_return); 4791 if (ioctl_return>= 0) { 4792 *bytes = n; 4793 return 1; 4794 } 4795 } 4796 } 4797 if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) { 4798 return 0; 4799 } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) { 4800 return 0; 4801 } else if (::lseek64(fd, cur, SEEK_SET) == -1) { 4802 return 0; 4803 } 4804 *bytes = end - cur; 4805 return 1; 4806 } 4807 4808 // Map a block of memory. 4809 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset, 4810 char *addr, size_t bytes, bool read_only, 4811 bool allow_exec) { 4812 int prot; 4813 int flags; 4814 4815 if (read_only) { 4816 prot = PROT_READ; 4817 flags = MAP_SHARED; 4818 } else { 4819 prot = PROT_READ | PROT_WRITE; 4820 flags = MAP_PRIVATE; 4821 } 4822 4823 if (allow_exec) { 4824 prot |= PROT_EXEC; 4825 } 4826 4827 if (addr != NULL) { 4828 flags |= MAP_FIXED; 4829 } 4830 4831 char* mapped_address = (char*)mmap(addr, (size_t)bytes, prot, flags, 4832 fd, file_offset); 4833 if (mapped_address == MAP_FAILED) { 4834 return NULL; 4835 } 4836 return mapped_address; 4837 } 4838 4839 4840 // Remap a block of memory. 4841 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset, 4842 char *addr, size_t bytes, bool read_only, 4843 bool allow_exec) { 4844 // same as map_memory() on this OS 4845 return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only, 4846 allow_exec); 4847 } 4848 4849 4850 // Unmap a block of memory. 4851 bool os::pd_unmap_memory(char* addr, size_t bytes) { 4852 return munmap(addr, bytes) == 0; 4853 } 4854 4855 void os::pause() { 4856 char filename[MAX_PATH]; 4857 if (PauseAtStartupFile && PauseAtStartupFile[0]) { 4858 jio_snprintf(filename, MAX_PATH, PauseAtStartupFile); 4859 } else { 4860 jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id()); 4861 } 4862 4863 int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666); 4864 if (fd != -1) { 4865 struct stat buf; 4866 ::close(fd); 4867 while (::stat(filename, &buf) == 0) { 4868 (void)::poll(NULL, 0, 100); 4869 } 4870 } else { 4871 jio_fprintf(stderr, 4872 "Could not open pause file '%s', continuing immediately.\n", filename); 4873 } 4874 } 4875 4876 #ifndef PRODUCT 4877 #ifdef INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS 4878 // Turn this on if you need to trace synch operations. 4879 // Set RECORD_SYNCH_LIMIT to a large-enough value, 4880 // and call record_synch_enable and record_synch_disable 4881 // around the computation of interest. 4882 4883 void record_synch(char* name, bool returning); // defined below 4884 4885 class RecordSynch { 4886 char* _name; 4887 public: 4888 RecordSynch(char* name) :_name(name) { record_synch(_name, false); } 4889 ~RecordSynch() { record_synch(_name, true); } 4890 }; 4891 4892 #define CHECK_SYNCH_OP(ret, name, params, args, inner) \ 4893 extern "C" ret name params { \ 4894 typedef ret name##_t params; \ 4895 static name##_t* implem = NULL; \ 4896 static int callcount = 0; \ 4897 if (implem == NULL) { \ 4898 implem = (name##_t*) dlsym(RTLD_NEXT, #name); \ 4899 if (implem == NULL) fatal(dlerror()); \ 4900 } \ 4901 ++callcount; \ 4902 RecordSynch _rs(#name); \ 4903 inner; \ 4904 return implem args; \ 4905 } 4906 // in dbx, examine callcounts this way: 4907 // for n in $(eval whereis callcount | awk '{print $2}'); do print $n; done 4908 4909 #define CHECK_POINTER_OK(p) \ 4910 (!Universe::is_fully_initialized() || !Universe::is_reserved_heap((oop)(p))) 4911 #define CHECK_MU \ 4912 if (!CHECK_POINTER_OK(mu)) fatal("Mutex must be in C heap only."); 4913 #define CHECK_CV \ 4914 if (!CHECK_POINTER_OK(cv)) fatal("Condvar must be in C heap only."); 4915 #define CHECK_P(p) \ 4916 if (!CHECK_POINTER_OK(p)) fatal(false, "Pointer must be in C heap only."); 4917 4918 #define CHECK_MUTEX(mutex_op) \ 4919 CHECK_SYNCH_OP(int, mutex_op, (mutex_t *mu), (mu), CHECK_MU); 4920 4921 CHECK_MUTEX( mutex_lock) 4922 CHECK_MUTEX( _mutex_lock) 4923 CHECK_MUTEX( mutex_unlock) 4924 CHECK_MUTEX(_mutex_unlock) 4925 CHECK_MUTEX( mutex_trylock) 4926 CHECK_MUTEX(_mutex_trylock) 4927 4928 #define CHECK_COND(cond_op) \ 4929 CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu), (cv, mu), CHECK_MU; CHECK_CV); 4930 4931 CHECK_COND( cond_wait); 4932 CHECK_COND(_cond_wait); 4933 CHECK_COND(_cond_wait_cancel); 4934 4935 #define CHECK_COND2(cond_op) \ 4936 CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu, timestruc_t* ts), (cv, mu, ts), CHECK_MU; CHECK_CV); 4937 4938 CHECK_COND2( cond_timedwait); 4939 CHECK_COND2(_cond_timedwait); 4940 CHECK_COND2(_cond_timedwait_cancel); 4941 4942 // do the _lwp_* versions too 4943 #define mutex_t lwp_mutex_t 4944 #define cond_t lwp_cond_t 4945 CHECK_MUTEX( _lwp_mutex_lock) 4946 CHECK_MUTEX( _lwp_mutex_unlock) 4947 CHECK_MUTEX( _lwp_mutex_trylock) 4948 CHECK_MUTEX( __lwp_mutex_lock) 4949 CHECK_MUTEX( __lwp_mutex_unlock) 4950 CHECK_MUTEX( __lwp_mutex_trylock) 4951 CHECK_MUTEX(___lwp_mutex_lock) 4952 CHECK_MUTEX(___lwp_mutex_unlock) 4953 4954 CHECK_COND( _lwp_cond_wait); 4955 CHECK_COND( __lwp_cond_wait); 4956 CHECK_COND(___lwp_cond_wait); 4957 4958 CHECK_COND2( _lwp_cond_timedwait); 4959 CHECK_COND2( __lwp_cond_timedwait); 4960 #undef mutex_t 4961 #undef cond_t 4962 4963 CHECK_SYNCH_OP(int, _lwp_suspend2, (int lwp, int *n), (lwp, n), 0); 4964 CHECK_SYNCH_OP(int,__lwp_suspend2, (int lwp, int *n), (lwp, n), 0); 4965 CHECK_SYNCH_OP(int, _lwp_kill, (int lwp, int n), (lwp, n), 0); 4966 CHECK_SYNCH_OP(int,__lwp_kill, (int lwp, int n), (lwp, n), 0); 4967 CHECK_SYNCH_OP(int, _lwp_sema_wait, (lwp_sema_t* p), (p), CHECK_P(p)); 4968 CHECK_SYNCH_OP(int,__lwp_sema_wait, (lwp_sema_t* p), (p), CHECK_P(p)); 4969 CHECK_SYNCH_OP(int, _lwp_cond_broadcast, (lwp_cond_t* cv), (cv), CHECK_CV); 4970 CHECK_SYNCH_OP(int,__lwp_cond_broadcast, (lwp_cond_t* cv), (cv), CHECK_CV); 4971 4972 4973 // recording machinery: 4974 4975 enum { RECORD_SYNCH_LIMIT = 200 }; 4976 char* record_synch_name[RECORD_SYNCH_LIMIT]; 4977 void* record_synch_arg0ptr[RECORD_SYNCH_LIMIT]; 4978 bool record_synch_returning[RECORD_SYNCH_LIMIT]; 4979 thread_t record_synch_thread[RECORD_SYNCH_LIMIT]; 4980 int record_synch_count = 0; 4981 bool record_synch_enabled = false; 4982 4983 // in dbx, examine recorded data this way: 4984 // for n in name arg0ptr returning thread; do print record_synch_$n[0..record_synch_count-1]; done 4985 4986 void record_synch(char* name, bool returning) { 4987 if (record_synch_enabled) { 4988 if (record_synch_count < RECORD_SYNCH_LIMIT) { 4989 record_synch_name[record_synch_count] = name; 4990 record_synch_returning[record_synch_count] = returning; 4991 record_synch_thread[record_synch_count] = thr_self(); 4992 record_synch_arg0ptr[record_synch_count] = &name; 4993 record_synch_count++; 4994 } 4995 // put more checking code here: 4996 // ... 4997 } 4998 } 4999 5000 void record_synch_enable() { 5001 // start collecting trace data, if not already doing so 5002 if (!record_synch_enabled) record_synch_count = 0; 5003 record_synch_enabled = true; 5004 } 5005 5006 void record_synch_disable() { 5007 // stop collecting trace data 5008 record_synch_enabled = false; 5009 } 5010 5011 #endif // INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS 5012 #endif // PRODUCT 5013 5014 const intptr_t thr_time_off = (intptr_t)(&((prusage_t *)(NULL))->pr_utime); 5015 const intptr_t thr_time_size = (intptr_t)(&((prusage_t *)(NULL))->pr_ttime) - 5016 (intptr_t)(&((prusage_t *)(NULL))->pr_utime); 5017 5018 5019 // JVMTI & JVM monitoring and management support 5020 // The thread_cpu_time() and current_thread_cpu_time() are only 5021 // supported if is_thread_cpu_time_supported() returns true. 5022 // They are not supported on Solaris T1. 5023 5024 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool) 5025 // are used by JVM M&M and JVMTI to get user+sys or user CPU time 5026 // of a thread. 5027 // 5028 // current_thread_cpu_time() and thread_cpu_time(Thread *) 5029 // returns the fast estimate available on the platform. 5030 5031 // hrtime_t gethrvtime() return value includes 5032 // user time but does not include system time 5033 jlong os::current_thread_cpu_time() { 5034 return (jlong) gethrvtime(); 5035 } 5036 5037 jlong os::thread_cpu_time(Thread *thread) { 5038 // return user level CPU time only to be consistent with 5039 // what current_thread_cpu_time returns. 5040 // thread_cpu_time_info() must be changed if this changes 5041 return os::thread_cpu_time(thread, false /* user time only */); 5042 } 5043 5044 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) { 5045 if (user_sys_cpu_time) { 5046 return os::thread_cpu_time(Thread::current(), user_sys_cpu_time); 5047 } else { 5048 return os::current_thread_cpu_time(); 5049 } 5050 } 5051 5052 jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) { 5053 char proc_name[64]; 5054 int count; 5055 prusage_t prusage; 5056 jlong lwp_time; 5057 int fd; 5058 5059 sprintf(proc_name, "/proc/%d/lwp/%d/lwpusage", 5060 getpid(), 5061 thread->osthread()->lwp_id()); 5062 fd = ::open(proc_name, O_RDONLY); 5063 if (fd == -1) return -1; 5064 5065 do { 5066 count = ::pread(fd, 5067 (void *)&prusage.pr_utime, 5068 thr_time_size, 5069 thr_time_off); 5070 } while (count < 0 && errno == EINTR); 5071 ::close(fd); 5072 if (count < 0) return -1; 5073 5074 if (user_sys_cpu_time) { 5075 // user + system CPU time 5076 lwp_time = (((jlong)prusage.pr_stime.tv_sec + 5077 (jlong)prusage.pr_utime.tv_sec) * (jlong)1000000000) + 5078 (jlong)prusage.pr_stime.tv_nsec + 5079 (jlong)prusage.pr_utime.tv_nsec; 5080 } else { 5081 // user level CPU time only 5082 lwp_time = ((jlong)prusage.pr_utime.tv_sec * (jlong)1000000000) + 5083 (jlong)prusage.pr_utime.tv_nsec; 5084 } 5085 5086 return (lwp_time); 5087 } 5088 5089 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 5090 info_ptr->max_value = ALL_64_BITS; // will not wrap in less than 64 bits 5091 info_ptr->may_skip_backward = false; // elapsed time not wall time 5092 info_ptr->may_skip_forward = false; // elapsed time not wall time 5093 info_ptr->kind = JVMTI_TIMER_USER_CPU; // only user time is returned 5094 } 5095 5096 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 5097 info_ptr->max_value = ALL_64_BITS; // will not wrap in less than 64 bits 5098 info_ptr->may_skip_backward = false; // elapsed time not wall time 5099 info_ptr->may_skip_forward = false; // elapsed time not wall time 5100 info_ptr->kind = JVMTI_TIMER_USER_CPU; // only user time is returned 5101 } 5102 5103 bool os::is_thread_cpu_time_supported() { 5104 return true; 5105 } 5106 5107 // System loadavg support. Returns -1 if load average cannot be obtained. 5108 // Return the load average for our processor set if the primitive exists 5109 // (Solaris 9 and later). Otherwise just return system wide loadavg. 5110 int os::loadavg(double loadavg[], int nelem) { 5111 if (pset_getloadavg_ptr != NULL) { 5112 return (*pset_getloadavg_ptr)(PS_MYID, loadavg, nelem); 5113 } else { 5114 return ::getloadavg(loadavg, nelem); 5115 } 5116 } 5117 5118 //--------------------------------------------------------------------------------- 5119 5120 bool os::find(address addr, outputStream* st) { 5121 Dl_info dlinfo; 5122 memset(&dlinfo, 0, sizeof(dlinfo)); 5123 if (dladdr(addr, &dlinfo) != 0) { 5124 st->print(PTR_FORMAT ": ", addr); 5125 if (dlinfo.dli_sname != NULL && dlinfo.dli_saddr != NULL) { 5126 st->print("%s+%#lx", dlinfo.dli_sname, addr-(intptr_t)dlinfo.dli_saddr); 5127 } else if (dlinfo.dli_fbase != NULL) { 5128 st->print("<offset %#lx>", addr-(intptr_t)dlinfo.dli_fbase); 5129 } else { 5130 st->print("<absolute address>"); 5131 } 5132 if (dlinfo.dli_fname != NULL) { 5133 st->print(" in %s", dlinfo.dli_fname); 5134 } 5135 if (dlinfo.dli_fbase != NULL) { 5136 st->print(" at " PTR_FORMAT, dlinfo.dli_fbase); 5137 } 5138 st->cr(); 5139 5140 if (Verbose) { 5141 // decode some bytes around the PC 5142 address begin = clamp_address_in_page(addr-40, addr, os::vm_page_size()); 5143 address end = clamp_address_in_page(addr+40, addr, os::vm_page_size()); 5144 address lowest = (address) dlinfo.dli_sname; 5145 if (!lowest) lowest = (address) dlinfo.dli_fbase; 5146 if (begin < lowest) begin = lowest; 5147 Dl_info dlinfo2; 5148 if (dladdr(end, &dlinfo2) != 0 && dlinfo2.dli_saddr != dlinfo.dli_saddr 5149 && end > dlinfo2.dli_saddr && dlinfo2.dli_saddr > begin) { 5150 end = (address) dlinfo2.dli_saddr; 5151 } 5152 Disassembler::decode(begin, end, st); 5153 } 5154 return true; 5155 } 5156 return false; 5157 } 5158 5159 // Following function has been added to support HotSparc's libjvm.so running 5160 // under Solaris production JDK 1.2.2 / 1.3.0. These came from 5161 // src/solaris/hpi/native_threads in the EVM codebase. 5162 // 5163 // NOTE: This is no longer needed in the 1.3.1 and 1.4 production release 5164 // libraries and should thus be removed. We will leave it behind for a while 5165 // until we no longer want to able to run on top of 1.3.0 Solaris production 5166 // JDK. See 4341971. 5167 5168 #define STACK_SLACK 0x800 5169 5170 extern "C" { 5171 intptr_t sysThreadAvailableStackWithSlack() { 5172 stack_t st; 5173 intptr_t retval, stack_top; 5174 retval = thr_stksegment(&st); 5175 assert(retval == 0, "incorrect return value from thr_stksegment"); 5176 assert((address)&st < (address)st.ss_sp, "Invalid stack base returned"); 5177 assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned"); 5178 stack_top=(intptr_t)st.ss_sp-st.ss_size; 5179 return ((intptr_t)&stack_top - stack_top - STACK_SLACK); 5180 } 5181 } 5182 5183 // ObjectMonitor park-unpark infrastructure ... 5184 // 5185 // We implement Solaris and Linux PlatformEvents with the 5186 // obvious condvar-mutex-flag triple. 5187 // Another alternative that works quite well is pipes: 5188 // Each PlatformEvent consists of a pipe-pair. 5189 // The thread associated with the PlatformEvent 5190 // calls park(), which reads from the input end of the pipe. 5191 // Unpark() writes into the other end of the pipe. 5192 // The write-side of the pipe must be set NDELAY. 5193 // Unfortunately pipes consume a large # of handles. 5194 // Native solaris lwp_park() and lwp_unpark() work nicely, too. 5195 // Using pipes for the 1st few threads might be workable, however. 5196 // 5197 // park() is permitted to return spuriously. 5198 // Callers of park() should wrap the call to park() in 5199 // an appropriate loop. A litmus test for the correct 5200 // usage of park is the following: if park() were modified 5201 // to immediately return 0 your code should still work, 5202 // albeit degenerating to a spin loop. 5203 // 5204 // In a sense, park()-unpark() just provides more polite spinning 5205 // and polling with the key difference over naive spinning being 5206 // that a parked thread needs to be explicitly unparked() in order 5207 // to wake up and to poll the underlying condition. 5208 // 5209 // Assumption: 5210 // Only one parker can exist on an event, which is why we allocate 5211 // them per-thread. Multiple unparkers can coexist. 5212 // 5213 // _Event transitions in park() 5214 // -1 => -1 : illegal 5215 // 1 => 0 : pass - return immediately 5216 // 0 => -1 : block; then set _Event to 0 before returning 5217 // 5218 // _Event transitions in unpark() 5219 // 0 => 1 : just return 5220 // 1 => 1 : just return 5221 // -1 => either 0 or 1; must signal target thread 5222 // That is, we can safely transition _Event from -1 to either 5223 // 0 or 1. 5224 // 5225 // _Event serves as a restricted-range semaphore. 5226 // -1 : thread is blocked, i.e. there is a waiter 5227 // 0 : neutral: thread is running or ready, 5228 // could have been signaled after a wait started 5229 // 1 : signaled - thread is running or ready 5230 // 5231 // Another possible encoding of _Event would be with 5232 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits. 5233 // 5234 // TODO-FIXME: add DTRACE probes for: 5235 // 1. Tx parks 5236 // 2. Ty unparks Tx 5237 // 3. Tx resumes from park 5238 5239 5240 // value determined through experimentation 5241 #define ROUNDINGFIX 11 5242 5243 // utility to compute the abstime argument to timedwait. 5244 // TODO-FIXME: switch from compute_abstime() to unpackTime(). 5245 5246 static timestruc_t* compute_abstime(timestruc_t* abstime, jlong millis) { 5247 // millis is the relative timeout time 5248 // abstime will be the absolute timeout time 5249 if (millis < 0) millis = 0; 5250 struct timeval now; 5251 int status = gettimeofday(&now, NULL); 5252 assert(status == 0, "gettimeofday"); 5253 jlong seconds = millis / 1000; 5254 jlong max_wait_period; 5255 5256 if (UseLWPSynchronization) { 5257 // forward port of fix for 4275818 (not sleeping long enough) 5258 // There was a bug in Solaris 6, 7 and pre-patch 5 of 8 where 5259 // _lwp_cond_timedwait() used a round_down algorithm rather 5260 // than a round_up. For millis less than our roundfactor 5261 // it rounded down to 0 which doesn't meet the spec. 5262 // For millis > roundfactor we may return a bit sooner, but 5263 // since we can not accurately identify the patch level and 5264 // this has already been fixed in Solaris 9 and 8 we will 5265 // leave it alone rather than always rounding down. 5266 5267 if (millis > 0 && millis < ROUNDINGFIX) millis = ROUNDINGFIX; 5268 // It appears that when we go directly through Solaris _lwp_cond_timedwait() 5269 // the acceptable max time threshold is smaller than for libthread on 2.5.1 and 2.6 5270 max_wait_period = 21000000; 5271 } else { 5272 max_wait_period = 50000000; 5273 } 5274 millis %= 1000; 5275 if (seconds > max_wait_period) { // see man cond_timedwait(3T) 5276 seconds = max_wait_period; 5277 } 5278 abstime->tv_sec = now.tv_sec + seconds; 5279 long usec = now.tv_usec + millis * 1000; 5280 if (usec >= 1000000) { 5281 abstime->tv_sec += 1; 5282 usec -= 1000000; 5283 } 5284 abstime->tv_nsec = usec * 1000; 5285 return abstime; 5286 } 5287 5288 void os::PlatformEvent::park() { // AKA: down() 5289 // Transitions for _Event: 5290 // -1 => -1 : illegal 5291 // 1 => 0 : pass - return immediately 5292 // 0 => -1 : block; then set _Event to 0 before returning 5293 5294 // Invariant: Only the thread associated with the Event/PlatformEvent 5295 // may call park(). 5296 assert(_nParked == 0, "invariant"); 5297 5298 int v; 5299 for (;;) { 5300 v = _Event; 5301 if (Atomic::cmpxchg(v-1, &_Event, v) == v) break; 5302 } 5303 guarantee(v >= 0, "invariant"); 5304 if (v == 0) { 5305 // Do this the hard way by blocking ... 5306 // See http://monaco.sfbay/detail.jsf?cr=5094058. 5307 // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking. 5308 // Only for SPARC >= V8PlusA 5309 #if defined(__sparc) && defined(COMPILER2) 5310 if (ClearFPUAtPark) { _mark_fpu_nosave(); } 5311 #endif 5312 int status = os::Solaris::mutex_lock(_mutex); 5313 assert_status(status == 0, status, "mutex_lock"); 5314 guarantee(_nParked == 0, "invariant"); 5315 ++_nParked; 5316 while (_Event < 0) { 5317 // for some reason, under 2.7 lwp_cond_wait() may return ETIME ... 5318 // Treat this the same as if the wait was interrupted 5319 // With usr/lib/lwp going to kernel, always handle ETIME 5320 status = os::Solaris::cond_wait(_cond, _mutex); 5321 if (status == ETIME) status = EINTR; 5322 assert_status(status == 0 || status == EINTR, status, "cond_wait"); 5323 } 5324 --_nParked; 5325 _Event = 0; 5326 status = os::Solaris::mutex_unlock(_mutex); 5327 assert_status(status == 0, status, "mutex_unlock"); 5328 // Paranoia to ensure our locked and lock-free paths interact 5329 // correctly with each other. 5330 OrderAccess::fence(); 5331 } 5332 } 5333 5334 int os::PlatformEvent::park(jlong millis) { 5335 // Transitions for _Event: 5336 // -1 => -1 : illegal 5337 // 1 => 0 : pass - return immediately 5338 // 0 => -1 : block; then set _Event to 0 before returning 5339 5340 guarantee(_nParked == 0, "invariant"); 5341 int v; 5342 for (;;) { 5343 v = _Event; 5344 if (Atomic::cmpxchg(v-1, &_Event, v) == v) break; 5345 } 5346 guarantee(v >= 0, "invariant"); 5347 if (v != 0) return OS_OK; 5348 5349 int ret = OS_TIMEOUT; 5350 timestruc_t abst; 5351 compute_abstime(&abst, millis); 5352 5353 // See http://monaco.sfbay/detail.jsf?cr=5094058. 5354 // For Solaris SPARC set fprs.FEF=0 prior to parking. 5355 // Only for SPARC >= V8PlusA 5356 #if defined(__sparc) && defined(COMPILER2) 5357 if (ClearFPUAtPark) { _mark_fpu_nosave(); } 5358 #endif 5359 int status = os::Solaris::mutex_lock(_mutex); 5360 assert_status(status == 0, status, "mutex_lock"); 5361 guarantee(_nParked == 0, "invariant"); 5362 ++_nParked; 5363 while (_Event < 0) { 5364 int status = os::Solaris::cond_timedwait(_cond, _mutex, &abst); 5365 assert_status(status == 0 || status == EINTR || 5366 status == ETIME || status == ETIMEDOUT, 5367 status, "cond_timedwait"); 5368 if (!FilterSpuriousWakeups) break; // previous semantics 5369 if (status == ETIME || status == ETIMEDOUT) break; 5370 // We consume and ignore EINTR and spurious wakeups. 5371 } 5372 --_nParked; 5373 if (_Event >= 0) ret = OS_OK; 5374 _Event = 0; 5375 status = os::Solaris::mutex_unlock(_mutex); 5376 assert_status(status == 0, status, "mutex_unlock"); 5377 // Paranoia to ensure our locked and lock-free paths interact 5378 // correctly with each other. 5379 OrderAccess::fence(); 5380 return ret; 5381 } 5382 5383 void os::PlatformEvent::unpark() { 5384 // Transitions for _Event: 5385 // 0 => 1 : just return 5386 // 1 => 1 : just return 5387 // -1 => either 0 or 1; must signal target thread 5388 // That is, we can safely transition _Event from -1 to either 5389 // 0 or 1. 5390 // See also: "Semaphores in Plan 9" by Mullender & Cox 5391 // 5392 // Note: Forcing a transition from "-1" to "1" on an unpark() means 5393 // that it will take two back-to-back park() calls for the owning 5394 // thread to block. This has the benefit of forcing a spurious return 5395 // from the first park() call after an unpark() call which will help 5396 // shake out uses of park() and unpark() without condition variables. 5397 5398 if (Atomic::xchg(1, &_Event) >= 0) return; 5399 5400 // If the thread associated with the event was parked, wake it. 5401 // Wait for the thread assoc with the PlatformEvent to vacate. 5402 int status = os::Solaris::mutex_lock(_mutex); 5403 assert_status(status == 0, status, "mutex_lock"); 5404 int AnyWaiters = _nParked; 5405 status = os::Solaris::mutex_unlock(_mutex); 5406 assert_status(status == 0, status, "mutex_unlock"); 5407 guarantee(AnyWaiters == 0 || AnyWaiters == 1, "invariant"); 5408 if (AnyWaiters != 0) { 5409 // Note that we signal() *after* dropping the lock for "immortal" Events. 5410 // This is safe and avoids a common class of futile wakeups. In rare 5411 // circumstances this can cause a thread to return prematurely from 5412 // cond_{timed}wait() but the spurious wakeup is benign and the victim 5413 // will simply re-test the condition and re-park itself. 5414 // This provides particular benefit if the underlying platform does not 5415 // provide wait morphing. 5416 status = os::Solaris::cond_signal(_cond); 5417 assert_status(status == 0, status, "cond_signal"); 5418 } 5419 } 5420 5421 // JSR166 5422 // ------------------------------------------------------- 5423 5424 // The solaris and linux implementations of park/unpark are fairly 5425 // conservative for now, but can be improved. They currently use a 5426 // mutex/condvar pair, plus _counter. 5427 // Park decrements _counter if > 0, else does a condvar wait. Unpark 5428 // sets count to 1 and signals condvar. Only one thread ever waits 5429 // on the condvar. Contention seen when trying to park implies that someone 5430 // is unparking you, so don't wait. And spurious returns are fine, so there 5431 // is no need to track notifications. 5432 5433 #define MAX_SECS 100000000 5434 5435 // This code is common to linux and solaris and will be moved to a 5436 // common place in dolphin. 5437 // 5438 // The passed in time value is either a relative time in nanoseconds 5439 // or an absolute time in milliseconds. Either way it has to be unpacked 5440 // into suitable seconds and nanoseconds components and stored in the 5441 // given timespec structure. 5442 // Given time is a 64-bit value and the time_t used in the timespec is only 5443 // a signed-32-bit value (except on 64-bit Linux) we have to watch for 5444 // overflow if times way in the future are given. Further on Solaris versions 5445 // prior to 10 there is a restriction (see cond_timedwait) that the specified 5446 // number of seconds, in abstime, is less than current_time + 100,000,000. 5447 // As it will be 28 years before "now + 100000000" will overflow we can 5448 // ignore overflow and just impose a hard-limit on seconds using the value 5449 // of "now + 100,000,000". This places a limit on the timeout of about 3.17 5450 // years from "now". 5451 // 5452 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) { 5453 assert(time > 0, "convertTime"); 5454 5455 struct timeval now; 5456 int status = gettimeofday(&now, NULL); 5457 assert(status == 0, "gettimeofday"); 5458 5459 time_t max_secs = now.tv_sec + MAX_SECS; 5460 5461 if (isAbsolute) { 5462 jlong secs = time / 1000; 5463 if (secs > max_secs) { 5464 absTime->tv_sec = max_secs; 5465 } else { 5466 absTime->tv_sec = secs; 5467 } 5468 absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC; 5469 } else { 5470 jlong secs = time / NANOSECS_PER_SEC; 5471 if (secs >= MAX_SECS) { 5472 absTime->tv_sec = max_secs; 5473 absTime->tv_nsec = 0; 5474 } else { 5475 absTime->tv_sec = now.tv_sec + secs; 5476 absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000; 5477 if (absTime->tv_nsec >= NANOSECS_PER_SEC) { 5478 absTime->tv_nsec -= NANOSECS_PER_SEC; 5479 ++absTime->tv_sec; // note: this must be <= max_secs 5480 } 5481 } 5482 } 5483 assert(absTime->tv_sec >= 0, "tv_sec < 0"); 5484 assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs"); 5485 assert(absTime->tv_nsec >= 0, "tv_nsec < 0"); 5486 assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec"); 5487 } 5488 5489 void Parker::park(bool isAbsolute, jlong time) { 5490 // Ideally we'd do something useful while spinning, such 5491 // as calling unpackTime(). 5492 5493 // Optional fast-path check: 5494 // Return immediately if a permit is available. 5495 // We depend on Atomic::xchg() having full barrier semantics 5496 // since we are doing a lock-free update to _counter. 5497 if (Atomic::xchg(0, &_counter) > 0) return; 5498 5499 // Optional fast-exit: Check interrupt before trying to wait 5500 Thread* thread = Thread::current(); 5501 assert(thread->is_Java_thread(), "Must be JavaThread"); 5502 JavaThread *jt = (JavaThread *)thread; 5503 if (Thread::is_interrupted(thread, false)) { 5504 return; 5505 } 5506 5507 // First, demultiplex/decode time arguments 5508 timespec absTime; 5509 if (time < 0 || (isAbsolute && time == 0)) { // don't wait at all 5510 return; 5511 } 5512 if (time > 0) { 5513 // Warning: this code might be exposed to the old Solaris time 5514 // round-down bugs. Grep "roundingFix" for details. 5515 unpackTime(&absTime, isAbsolute, time); 5516 } 5517 5518 // Enter safepoint region 5519 // Beware of deadlocks such as 6317397. 5520 // The per-thread Parker:: _mutex is a classic leaf-lock. 5521 // In particular a thread must never block on the Threads_lock while 5522 // holding the Parker:: mutex. If safepoints are pending both the 5523 // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock. 5524 ThreadBlockInVM tbivm(jt); 5525 5526 // Don't wait if cannot get lock since interference arises from 5527 // unblocking. Also. check interrupt before trying wait 5528 if (Thread::is_interrupted(thread, false) || 5529 os::Solaris::mutex_trylock(_mutex) != 0) { 5530 return; 5531 } 5532 5533 int status; 5534 5535 if (_counter > 0) { // no wait needed 5536 _counter = 0; 5537 status = os::Solaris::mutex_unlock(_mutex); 5538 assert(status == 0, "invariant"); 5539 // Paranoia to ensure our locked and lock-free paths interact 5540 // correctly with each other and Java-level accesses. 5541 OrderAccess::fence(); 5542 return; 5543 } 5544 5545 #ifdef ASSERT 5546 // Don't catch signals while blocked; let the running threads have the signals. 5547 // (This allows a debugger to break into the running thread.) 5548 sigset_t oldsigs; 5549 sigset_t* allowdebug_blocked = os::Solaris::allowdebug_blocked_signals(); 5550 thr_sigsetmask(SIG_BLOCK, allowdebug_blocked, &oldsigs); 5551 #endif 5552 5553 OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */); 5554 jt->set_suspend_equivalent(); 5555 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self() 5556 5557 // Do this the hard way by blocking ... 5558 // See http://monaco.sfbay/detail.jsf?cr=5094058. 5559 // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking. 5560 // Only for SPARC >= V8PlusA 5561 #if defined(__sparc) && defined(COMPILER2) 5562 if (ClearFPUAtPark) { _mark_fpu_nosave(); } 5563 #endif 5564 5565 if (time == 0) { 5566 status = os::Solaris::cond_wait(_cond, _mutex); 5567 } else { 5568 status = os::Solaris::cond_timedwait (_cond, _mutex, &absTime); 5569 } 5570 // Note that an untimed cond_wait() can sometimes return ETIME on older 5571 // versions of the Solaris. 5572 assert_status(status == 0 || status == EINTR || 5573 status == ETIME || status == ETIMEDOUT, 5574 status, "cond_timedwait"); 5575 5576 #ifdef ASSERT 5577 thr_sigsetmask(SIG_SETMASK, &oldsigs, NULL); 5578 #endif 5579 _counter = 0; 5580 status = os::Solaris::mutex_unlock(_mutex); 5581 assert_status(status == 0, status, "mutex_unlock"); 5582 // Paranoia to ensure our locked and lock-free paths interact 5583 // correctly with each other and Java-level accesses. 5584 OrderAccess::fence(); 5585 5586 // If externally suspended while waiting, re-suspend 5587 if (jt->handle_special_suspend_equivalent_condition()) { 5588 jt->java_suspend_self(); 5589 } 5590 } 5591 5592 void Parker::unpark() { 5593 int status = os::Solaris::mutex_lock(_mutex); 5594 assert(status == 0, "invariant"); 5595 const int s = _counter; 5596 _counter = 1; 5597 status = os::Solaris::mutex_unlock(_mutex); 5598 assert(status == 0, "invariant"); 5599 5600 if (s < 1) { 5601 status = os::Solaris::cond_signal(_cond); 5602 assert(status == 0, "invariant"); 5603 } 5604 } 5605 5606 extern char** environ; 5607 5608 // Run the specified command in a separate process. Return its exit value, 5609 // or -1 on failure (e.g. can't fork a new process). 5610 // Unlike system(), this function can be called from signal handler. It 5611 // doesn't block SIGINT et al. 5612 int os::fork_and_exec(char* cmd) { 5613 char * argv[4]; 5614 argv[0] = (char *)"sh"; 5615 argv[1] = (char *)"-c"; 5616 argv[2] = cmd; 5617 argv[3] = NULL; 5618 5619 // fork is async-safe, fork1 is not so can't use in signal handler 5620 pid_t pid; 5621 Thread* t = ThreadLocalStorage::get_thread_slow(); 5622 if (t != NULL && t->is_inside_signal_handler()) { 5623 pid = fork(); 5624 } else { 5625 pid = fork1(); 5626 } 5627 5628 if (pid < 0) { 5629 // fork failed 5630 warning("fork failed: %s", strerror(errno)); 5631 return -1; 5632 5633 } else if (pid == 0) { 5634 // child process 5635 5636 // try to be consistent with system(), which uses "/usr/bin/sh" on Solaris 5637 execve("/usr/bin/sh", argv, environ); 5638 5639 // execve failed 5640 _exit(-1); 5641 5642 } else { 5643 // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't 5644 // care about the actual exit code, for now. 5645 5646 int status; 5647 5648 // Wait for the child process to exit. This returns immediately if 5649 // the child has already exited. */ 5650 while (waitpid(pid, &status, 0) < 0) { 5651 switch (errno) { 5652 case ECHILD: return 0; 5653 case EINTR: break; 5654 default: return -1; 5655 } 5656 } 5657 5658 if (WIFEXITED(status)) { 5659 // The child exited normally; get its exit code. 5660 return WEXITSTATUS(status); 5661 } else if (WIFSIGNALED(status)) { 5662 // The child exited because of a signal 5663 // The best value to return is 0x80 + signal number, 5664 // because that is what all Unix shells do, and because 5665 // it allows callers to distinguish between process exit and 5666 // process death by signal. 5667 return 0x80 + WTERMSIG(status); 5668 } else { 5669 // Unknown exit code; pass it through 5670 return status; 5671 } 5672 } 5673 } 5674 5675 // is_headless_jre() 5676 // 5677 // Test for the existence of xawt/libmawt.so or libawt_xawt.so 5678 // in order to report if we are running in a headless jre 5679 // 5680 // Since JDK8 xawt/libmawt.so was moved into the same directory 5681 // as libawt.so, and renamed libawt_xawt.so 5682 // 5683 bool os::is_headless_jre() { 5684 struct stat statbuf; 5685 char buf[MAXPATHLEN]; 5686 char libmawtpath[MAXPATHLEN]; 5687 const char *xawtstr = "/xawt/libmawt.so"; 5688 const char *new_xawtstr = "/libawt_xawt.so"; 5689 char *p; 5690 5691 // Get path to libjvm.so 5692 os::jvm_path(buf, sizeof(buf)); 5693 5694 // Get rid of libjvm.so 5695 p = strrchr(buf, '/'); 5696 if (p == NULL) { 5697 return false; 5698 } else { 5699 *p = '\0'; 5700 } 5701 5702 // Get rid of client or server 5703 p = strrchr(buf, '/'); 5704 if (p == NULL) { 5705 return false; 5706 } else { 5707 *p = '\0'; 5708 } 5709 5710 // check xawt/libmawt.so 5711 strcpy(libmawtpath, buf); 5712 strcat(libmawtpath, xawtstr); 5713 if (::stat(libmawtpath, &statbuf) == 0) return false; 5714 5715 // check libawt_xawt.so 5716 strcpy(libmawtpath, buf); 5717 strcat(libmawtpath, new_xawtstr); 5718 if (::stat(libmawtpath, &statbuf) == 0) return false; 5719 5720 return true; 5721 } 5722 5723 size_t os::write(int fd, const void *buf, unsigned int nBytes) { 5724 size_t res; 5725 assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native, 5726 "Assumed _thread_in_native"); 5727 RESTARTABLE((size_t) ::write(fd, buf, (size_t) nBytes), res); 5728 return res; 5729 } 5730 5731 int os::close(int fd) { 5732 return ::close(fd); 5733 } 5734 5735 int os::socket_close(int fd) { 5736 return ::close(fd); 5737 } 5738 5739 int os::recv(int fd, char* buf, size_t nBytes, uint flags) { 5740 assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native, 5741 "Assumed _thread_in_native"); 5742 RESTARTABLE_RETURN_INT((int)::recv(fd, buf, nBytes, flags)); 5743 } 5744 5745 int os::send(int fd, char* buf, size_t nBytes, uint flags) { 5746 assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native, 5747 "Assumed _thread_in_native"); 5748 RESTARTABLE_RETURN_INT((int)::send(fd, buf, nBytes, flags)); 5749 } 5750 5751 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) { 5752 RESTARTABLE_RETURN_INT((int)::send(fd, buf, nBytes, flags)); 5753 } 5754 5755 // As both poll and select can be interrupted by signals, we have to be 5756 // prepared to restart the system call after updating the timeout, unless 5757 // a poll() is done with timeout == -1, in which case we repeat with this 5758 // "wait forever" value. 5759 5760 int os::connect(int fd, struct sockaddr *him, socklen_t len) { 5761 int _result; 5762 _result = ::connect(fd, him, len); 5763 5764 // On Solaris, when a connect() call is interrupted, the connection 5765 // can be established asynchronously (see 6343810). Subsequent calls 5766 // to connect() must check the errno value which has the semantic 5767 // described below (copied from the connect() man page). Handling 5768 // of asynchronously established connections is required for both 5769 // blocking and non-blocking sockets. 5770 // EINTR The connection attempt was interrupted 5771 // before any data arrived by the delivery of 5772 // a signal. The connection, however, will be 5773 // established asynchronously. 5774 // 5775 // EINPROGRESS The socket is non-blocking, and the connec- 5776 // tion cannot be completed immediately. 5777 // 5778 // EALREADY The socket is non-blocking, and a previous 5779 // connection attempt has not yet been com- 5780 // pleted. 5781 // 5782 // EISCONN The socket is already connected. 5783 if (_result == OS_ERR && errno == EINTR) { 5784 // restarting a connect() changes its errno semantics 5785 RESTARTABLE(::connect(fd, him, len), _result); 5786 // undo these changes 5787 if (_result == OS_ERR) { 5788 if (errno == EALREADY) { 5789 errno = EINPROGRESS; // fall through 5790 } else if (errno == EISCONN) { 5791 errno = 0; 5792 return OS_OK; 5793 } 5794 } 5795 } 5796 return _result; 5797 } 5798 5799 // Get the default path to the core file 5800 // Returns the length of the string 5801 int os::get_core_path(char* buffer, size_t bufferSize) { 5802 const char* p = get_current_directory(buffer, bufferSize); 5803 5804 if (p == NULL) { 5805 assert(p != NULL, "failed to get current directory"); 5806 return 0; 5807 } 5808 5809 jio_snprintf(buffer, bufferSize, "%s/core or core.%d", 5810 p, current_process_id()); 5811 5812 return strlen(buffer); 5813 } 5814 5815 #ifndef PRODUCT 5816 void TestReserveMemorySpecial_test() { 5817 // No tests available for this platform 5818 } 5819 #endif