Print this page
Split |
Close |
Expand all |
Collapse all |
--- old/src/os/solaris/vm/os_solaris.cpp
+++ new/src/os/solaris/vm/os_solaris.cpp
1 1 /*
2 2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 // no precompiled headers
26 26 #include "classfile/classLoader.hpp"
27 27 #include "classfile/systemDictionary.hpp"
28 28 #include "classfile/vmSymbols.hpp"
29 29 #include "code/icBuffer.hpp"
30 30 #include "code/vtableStubs.hpp"
31 31 #include "compiler/compileBroker.hpp"
32 32 #include "interpreter/interpreter.hpp"
33 33 #include "jvm_solaris.h"
34 34 #include "memory/allocation.inline.hpp"
35 35 #include "memory/filemap.hpp"
36 36 #include "mutex_solaris.inline.hpp"
37 37 #include "oops/oop.inline.hpp"
38 38 #include "os_share_solaris.hpp"
39 39 #include "prims/jniFastGetField.hpp"
40 40 #include "prims/jvm.h"
41 41 #include "prims/jvm_misc.hpp"
42 42 #include "runtime/arguments.hpp"
43 43 #include "runtime/extendedPC.hpp"
44 44 #include "runtime/globals.hpp"
45 45 #include "runtime/interfaceSupport.hpp"
46 46 #include "runtime/java.hpp"
47 47 #include "runtime/javaCalls.hpp"
48 48 #include "runtime/mutexLocker.hpp"
49 49 #include "runtime/objectMonitor.hpp"
50 50 #include "runtime/osThread.hpp"
51 51 #include "runtime/perfMemory.hpp"
52 52 #include "runtime/sharedRuntime.hpp"
53 53 #include "runtime/statSampler.hpp"
54 54 #include "runtime/stubRoutines.hpp"
55 55 #include "runtime/threadCritical.hpp"
56 56 #include "runtime/timer.hpp"
57 57 #include "services/attachListener.hpp"
58 58 #include "services/runtimeService.hpp"
59 59 #include "thread_solaris.inline.hpp"
60 60 #include "utilities/decoder.hpp"
61 61 #include "utilities/defaultStream.hpp"
62 62 #include "utilities/events.hpp"
63 63 #include "utilities/growableArray.hpp"
64 64 #include "utilities/vmError.hpp"
65 65 #ifdef TARGET_ARCH_x86
66 66 # include "assembler_x86.inline.hpp"
67 67 # include "nativeInst_x86.hpp"
68 68 #endif
69 69 #ifdef TARGET_ARCH_sparc
70 70 # include "assembler_sparc.inline.hpp"
71 71 # include "nativeInst_sparc.hpp"
72 72 #endif
73 73 #ifdef COMPILER1
74 74 #include "c1/c1_Runtime1.hpp"
75 75 #endif
76 76 #ifdef COMPILER2
77 77 #include "opto/runtime.hpp"
78 78 #endif
79 79
80 80 // put OS-includes here
81 81 # include <dlfcn.h>
82 82 # include <errno.h>
83 83 # include <exception>
84 84 # include <link.h>
85 85 # include <poll.h>
86 86 # include <pthread.h>
87 87 # include <pwd.h>
88 88 # include <schedctl.h>
89 89 # include <setjmp.h>
90 90 # include <signal.h>
91 91 # include <stdio.h>
92 92 # include <alloca.h>
93 93 # include <sys/filio.h>
94 94 # include <sys/ipc.h>
95 95 # include <sys/lwp.h>
96 96 # include <sys/machelf.h> // for elf Sym structure used by dladdr1
97 97 # include <sys/mman.h>
98 98 # include <sys/processor.h>
99 99 # include <sys/procset.h>
100 100 # include <sys/pset.h>
101 101 # include <sys/resource.h>
102 102 # include <sys/shm.h>
103 103 # include <sys/socket.h>
104 104 # include <sys/stat.h>
105 105 # include <sys/systeminfo.h>
106 106 # include <sys/time.h>
107 107 # include <sys/times.h>
108 108 # include <sys/types.h>
109 109 # include <sys/wait.h>
110 110 # include <sys/utsname.h>
111 111 # include <thread.h>
112 112 # include <unistd.h>
113 113 # include <sys/priocntl.h>
114 114 # include <sys/rtpriocntl.h>
115 115 # include <sys/tspriocntl.h>
116 116 # include <sys/iapriocntl.h>
117 117 # include <sys/loadavg.h>
118 118 # include <string.h>
119 119 # include <stdio.h>
120 120
121 121 # define _STRUCTURED_PROC 1 // this gets us the new structured proc interfaces of 5.6 & later
122 122 # include <sys/procfs.h> // see comment in <sys/procfs.h>
123 123
124 124 #define MAX_PATH (2 * K)
125 125
126 126 // for timer info max values which include all bits
127 127 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
128 128
129 129 #ifdef _GNU_SOURCE
130 130 // See bug #6514594
131 131 extern "C" int madvise(caddr_t, size_t, int);
132 132 extern "C" int memcntl(caddr_t addr, size_t len, int cmd, caddr_t arg,
133 133 int attr, int mask);
134 134 #endif //_GNU_SOURCE
135 135
136 136 /*
137 137 MPSS Changes Start.
138 138 The JVM binary needs to be built and run on pre-Solaris 9
139 139 systems, but the constants needed by MPSS are only in Solaris 9
140 140 header files. They are textually replicated here to allow
141 141 building on earlier systems. Once building on Solaris 8 is
142 142 no longer a requirement, these #defines can be replaced by ordinary
143 143 system .h inclusion.
144 144
145 145 In earlier versions of the JDK and Solaris, we used ISM for large pages.
146 146 But ISM requires shared memory to achieve this and thus has many caveats.
147 147 MPSS is a fully transparent and is a cleaner way to get large pages.
148 148 Although we still require keeping ISM for backward compatiblitiy as well as
149 149 giving the opportunity to use large pages on older systems it is
150 150 recommended that MPSS be used for Solaris 9 and above.
151 151
152 152 */
153 153
154 154 #ifndef MC_HAT_ADVISE
155 155
156 156 struct memcntl_mha {
157 157 uint_t mha_cmd; /* command(s) */
158 158 uint_t mha_flags;
159 159 size_t mha_pagesize;
160 160 };
161 161 #define MC_HAT_ADVISE 7 /* advise hat map size */
162 162 #define MHA_MAPSIZE_VA 0x1 /* set preferred page size */
163 163 #define MAP_ALIGN 0x200 /* addr specifies alignment */
164 164
165 165 #endif
166 166 // MPSS Changes End.
167 167
168 168
169 169 // Here are some liblgrp types from sys/lgrp_user.h to be able to
170 170 // compile on older systems without this header file.
171 171
172 172 #ifndef MADV_ACCESS_LWP
173 173 # define MADV_ACCESS_LWP 7 /* next LWP to access heavily */
174 174 #endif
175 175 #ifndef MADV_ACCESS_MANY
176 176 # define MADV_ACCESS_MANY 8 /* many processes to access heavily */
177 177 #endif
178 178
179 179 #ifndef LGRP_RSRC_CPU
180 180 # define LGRP_RSRC_CPU 0 /* CPU resources */
181 181 #endif
182 182 #ifndef LGRP_RSRC_MEM
183 183 # define LGRP_RSRC_MEM 1 /* memory resources */
184 184 #endif
185 185
186 186 // Some more macros from sys/mman.h that are not present in Solaris 8.
187 187
188 188 #ifndef MAX_MEMINFO_CNT
189 189 /*
190 190 * info_req request type definitions for meminfo
191 191 * request types starting with MEMINFO_V are used for Virtual addresses
192 192 * and should not be mixed with MEMINFO_PLGRP which is targeted for Physical
193 193 * addresses
194 194 */
195 195 # define MEMINFO_SHIFT 16
196 196 # define MEMINFO_MASK (0xFF << MEMINFO_SHIFT)
197 197 # define MEMINFO_VPHYSICAL (0x01 << MEMINFO_SHIFT) /* get physical addr */
198 198 # define MEMINFO_VLGRP (0x02 << MEMINFO_SHIFT) /* get lgroup */
199 199 # define MEMINFO_VPAGESIZE (0x03 << MEMINFO_SHIFT) /* size of phys page */
200 200 # define MEMINFO_VREPLCNT (0x04 << MEMINFO_SHIFT) /* no. of replica */
201 201 # define MEMINFO_VREPL (0x05 << MEMINFO_SHIFT) /* physical replica */
202 202 # define MEMINFO_VREPL_LGRP (0x06 << MEMINFO_SHIFT) /* lgrp of replica */
203 203 # define MEMINFO_PLGRP (0x07 << MEMINFO_SHIFT) /* lgroup for paddr */
204 204
205 205 /* maximum number of addresses meminfo() can process at a time */
206 206 # define MAX_MEMINFO_CNT 256
207 207
208 208 /* maximum number of request types */
209 209 # define MAX_MEMINFO_REQ 31
210 210 #endif
211 211
212 212 // see thr_setprio(3T) for the basis of these numbers
213 213 #define MinimumPriority 0
214 214 #define NormalPriority 64
215 215 #define MaximumPriority 127
216 216
217 217 // Values for ThreadPriorityPolicy == 1
218 218 int prio_policy1[MaxPriority+1] = { -99999, 0, 16, 32, 48, 64,
219 219 80, 96, 112, 124, 127 };
220 220
221 221 // System parameters used internally
222 222 static clock_t clock_tics_per_sec = 100;
223 223
224 224 // Track if we have called enable_extended_FILE_stdio (on Solaris 10u4+)
225 225 static bool enabled_extended_FILE_stdio = false;
226 226
227 227 // For diagnostics to print a message once. see run_periodic_checks
228 228 static bool check_addr0_done = false;
229 229 static sigset_t check_signal_done;
230 230 static bool check_signals = true;
231 231
232 232 address os::Solaris::handler_start; // start pc of thr_sighndlrinfo
233 233 address os::Solaris::handler_end; // end pc of thr_sighndlrinfo
234 234
235 235 address os::Solaris::_main_stack_base = NULL; // 4352906 workaround
236 236
237 237
238 238 // "default" initializers for missing libc APIs
239 239 extern "C" {
240 240 static int lwp_mutex_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; }
241 241 static int lwp_mutex_destroy(mutex_t *mx) { return 0; }
242 242
243 243 static int lwp_cond_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; }
244 244 static int lwp_cond_destroy(cond_t *cv) { return 0; }
245 245 }
246 246
247 247 // "default" initializers for pthread-based synchronization
248 248 extern "C" {
249 249 static int pthread_mutex_default_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; }
250 250 static int pthread_cond_default_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; }
251 251 }
252 252
253 253 // Thread Local Storage
254 254 // This is common to all Solaris platforms so it is defined here,
255 255 // in this common file.
256 256 // The declarations are in the os_cpu threadLS*.hpp files.
257 257 //
258 258 // Static member initialization for TLS
259 259 Thread* ThreadLocalStorage::_get_thread_cache[ThreadLocalStorage::_pd_cache_size] = {NULL};
260 260
261 261 #ifndef PRODUCT
262 262 #define _PCT(n,d) ((100.0*(double)(n))/(double)(d))
263 263
264 264 int ThreadLocalStorage::_tcacheHit = 0;
265 265 int ThreadLocalStorage::_tcacheMiss = 0;
266 266
267 267 void ThreadLocalStorage::print_statistics() {
268 268 int total = _tcacheMiss+_tcacheHit;
269 269 tty->print_cr("Thread cache hits %d misses %d total %d percent %f\n",
270 270 _tcacheHit, _tcacheMiss, total, _PCT(_tcacheHit, total));
271 271 }
272 272 #undef _PCT
273 273 #endif // PRODUCT
274 274
275 275 Thread* ThreadLocalStorage::get_thread_via_cache_slowly(uintptr_t raw_id,
276 276 int index) {
277 277 Thread *thread = get_thread_slow();
278 278 if (thread != NULL) {
279 279 address sp = os::current_stack_pointer();
280 280 guarantee(thread->_stack_base == NULL ||
281 281 (sp <= thread->_stack_base &&
282 282 sp >= thread->_stack_base - thread->_stack_size) ||
283 283 is_error_reported(),
284 284 "sp must be inside of selected thread stack");
285 285
286 286 thread->set_self_raw_id(raw_id); // mark for quick retrieval
287 287 _get_thread_cache[ index ] = thread;
288 288 }
289 289 return thread;
290 290 }
291 291
292 292
293 293 static const double all_zero[ sizeof(Thread) / sizeof(double) + 1 ] = {0};
294 294 #define NO_CACHED_THREAD ((Thread*)all_zero)
295 295
296 296 void ThreadLocalStorage::pd_set_thread(Thread* thread) {
297 297
298 298 // Store the new value before updating the cache to prevent a race
299 299 // between get_thread_via_cache_slowly() and this store operation.
300 300 os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread);
301 301
302 302 // Update thread cache with new thread if setting on thread create,
303 303 // or NO_CACHED_THREAD (zeroed) thread if resetting thread on exit.
304 304 uintptr_t raw = pd_raw_thread_id();
305 305 int ix = pd_cache_index(raw);
306 306 _get_thread_cache[ix] = thread == NULL ? NO_CACHED_THREAD : thread;
307 307 }
308 308
309 309 void ThreadLocalStorage::pd_init() {
310 310 for (int i = 0; i < _pd_cache_size; i++) {
311 311 _get_thread_cache[i] = NO_CACHED_THREAD;
312 312 }
313 313 }
314 314
315 315 // Invalidate all the caches (happens to be the same as pd_init).
316 316 void ThreadLocalStorage::pd_invalidate_all() { pd_init(); }
317 317
318 318 #undef NO_CACHED_THREAD
319 319
320 320 // END Thread Local Storage
321 321
322 322 static inline size_t adjust_stack_size(address base, size_t size) {
323 323 if ((ssize_t)size < 0) {
324 324 // 4759953: Compensate for ridiculous stack size.
325 325 size = max_intx;
326 326 }
327 327 if (size > (size_t)base) {
328 328 // 4812466: Make sure size doesn't allow the stack to wrap the address space.
329 329 size = (size_t)base;
330 330 }
331 331 return size;
332 332 }
333 333
334 334 static inline stack_t get_stack_info() {
335 335 stack_t st;
336 336 int retval = thr_stksegment(&st);
337 337 st.ss_size = adjust_stack_size((address)st.ss_sp, st.ss_size);
338 338 assert(retval == 0, "incorrect return value from thr_stksegment");
339 339 assert((address)&st < (address)st.ss_sp, "Invalid stack base returned");
340 340 assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned");
341 341 return st;
342 342 }
343 343
344 344 address os::current_stack_base() {
345 345 int r = thr_main() ;
346 346 guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ;
347 347 bool is_primordial_thread = r;
348 348
349 349 // Workaround 4352906, avoid calls to thr_stksegment by
350 350 // thr_main after the first one (it looks like we trash
351 351 // some data, causing the value for ss_sp to be incorrect).
352 352 if (!is_primordial_thread || os::Solaris::_main_stack_base == NULL) {
353 353 stack_t st = get_stack_info();
354 354 if (is_primordial_thread) {
355 355 // cache initial value of stack base
356 356 os::Solaris::_main_stack_base = (address)st.ss_sp;
357 357 }
358 358 return (address)st.ss_sp;
359 359 } else {
360 360 guarantee(os::Solaris::_main_stack_base != NULL, "Attempt to use null cached stack base");
361 361 return os::Solaris::_main_stack_base;
362 362 }
363 363 }
364 364
365 365 size_t os::current_stack_size() {
366 366 size_t size;
367 367
368 368 int r = thr_main() ;
369 369 guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ;
370 370 if(!r) {
371 371 size = get_stack_info().ss_size;
372 372 } else {
373 373 struct rlimit limits;
374 374 getrlimit(RLIMIT_STACK, &limits);
375 375 size = adjust_stack_size(os::Solaris::_main_stack_base, (size_t)limits.rlim_cur);
376 376 }
377 377 // base may not be page aligned
378 378 address base = current_stack_base();
379 379 address bottom = (address)align_size_up((intptr_t)(base - size), os::vm_page_size());;
380 380 return (size_t)(base - bottom);
381 381 }
382 382
383 383 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
384 384 return localtime_r(clock, res);
385 385 }
386 386
387 387 // interruptible infrastructure
388 388
389 389 // setup_interruptible saves the thread state before going into an
390 390 // interruptible system call.
391 391 // The saved state is used to restore the thread to
392 392 // its former state whether or not an interrupt is received.
393 393 // Used by classloader os::read
394 394 // os::restartable_read calls skip this layer and stay in _thread_in_native
395 395
396 396 void os::Solaris::setup_interruptible(JavaThread* thread) {
397 397
398 398 JavaThreadState thread_state = thread->thread_state();
399 399
400 400 assert(thread_state != _thread_blocked, "Coming from the wrong thread");
401 401 assert(thread_state != _thread_in_native, "Native threads skip setup_interruptible");
402 402 OSThread* osthread = thread->osthread();
403 403 osthread->set_saved_interrupt_thread_state(thread_state);
404 404 thread->frame_anchor()->make_walkable(thread);
405 405 ThreadStateTransition::transition(thread, thread_state, _thread_blocked);
406 406 }
407 407
408 408 // Version of setup_interruptible() for threads that are already in
409 409 // _thread_blocked. Used by os_sleep().
410 410 void os::Solaris::setup_interruptible_already_blocked(JavaThread* thread) {
411 411 thread->frame_anchor()->make_walkable(thread);
412 412 }
413 413
414 414 JavaThread* os::Solaris::setup_interruptible() {
415 415 JavaThread* thread = (JavaThread*)ThreadLocalStorage::thread();
416 416 setup_interruptible(thread);
417 417 return thread;
418 418 }
419 419
420 420 void os::Solaris::try_enable_extended_io() {
421 421 typedef int (*enable_extended_FILE_stdio_t)(int, int);
422 422
423 423 if (!UseExtendedFileIO) {
424 424 return;
425 425 }
426 426
427 427 enable_extended_FILE_stdio_t enabler =
428 428 (enable_extended_FILE_stdio_t) dlsym(RTLD_DEFAULT,
429 429 "enable_extended_FILE_stdio");
430 430 if (enabler) {
431 431 enabler(-1, -1);
432 432 }
433 433 }
434 434
435 435
436 436 #ifdef ASSERT
437 437
438 438 JavaThread* os::Solaris::setup_interruptible_native() {
439 439 JavaThread* thread = (JavaThread*)ThreadLocalStorage::thread();
440 440 JavaThreadState thread_state = thread->thread_state();
441 441 assert(thread_state == _thread_in_native, "Assumed thread_in_native");
442 442 return thread;
443 443 }
444 444
445 445 void os::Solaris::cleanup_interruptible_native(JavaThread* thread) {
446 446 JavaThreadState thread_state = thread->thread_state();
447 447 assert(thread_state == _thread_in_native, "Assumed thread_in_native");
448 448 }
449 449 #endif
450 450
451 451 // cleanup_interruptible reverses the effects of setup_interruptible
452 452 // setup_interruptible_already_blocked() does not need any cleanup.
453 453
454 454 void os::Solaris::cleanup_interruptible(JavaThread* thread) {
455 455 OSThread* osthread = thread->osthread();
456 456
457 457 ThreadStateTransition::transition(thread, _thread_blocked, osthread->saved_interrupt_thread_state());
458 458 }
459 459
460 460 // I/O interruption related counters called in _INTERRUPTIBLE
461 461
462 462 void os::Solaris::bump_interrupted_before_count() {
463 463 RuntimeService::record_interrupted_before_count();
464 464 }
465 465
466 466 void os::Solaris::bump_interrupted_during_count() {
467 467 RuntimeService::record_interrupted_during_count();
468 468 }
469 469
470 470 static int _processors_online = 0;
471 471
472 472 jint os::Solaris::_os_thread_limit = 0;
473 473 volatile jint os::Solaris::_os_thread_count = 0;
474 474
475 475 julong os::available_memory() {
476 476 return Solaris::available_memory();
477 477 }
478 478
479 479 julong os::Solaris::available_memory() {
480 480 return (julong)sysconf(_SC_AVPHYS_PAGES) * os::vm_page_size();
481 481 }
482 482
483 483 julong os::Solaris::_physical_memory = 0;
484 484
485 485 julong os::physical_memory() {
486 486 return Solaris::physical_memory();
487 487 }
488 488
489 489 julong os::allocatable_physical_memory(julong size) {
490 490 #ifdef _LP64
491 491 return size;
492 492 #else
493 493 julong result = MIN2(size, (julong)3835*M);
494 494 if (!is_allocatable(result)) {
495 495 // Memory allocations will be aligned but the alignment
496 496 // is not known at this point. Alignments will
497 497 // be at most to LargePageSizeInBytes. Protect
498 498 // allocations from alignments up to illegal
499 499 // values. If at this point 2G is illegal.
500 500 julong reasonable_size = (julong)2*G - 2 * LargePageSizeInBytes;
501 501 result = MIN2(size, reasonable_size);
502 502 }
503 503 return result;
504 504 #endif
505 505 }
506 506
507 507 static hrtime_t first_hrtime = 0;
508 508 static const hrtime_t hrtime_hz = 1000*1000*1000;
509 509 const int LOCK_BUSY = 1;
510 510 const int LOCK_FREE = 0;
511 511 const int LOCK_INVALID = -1;
512 512 static volatile hrtime_t max_hrtime = 0;
513 513 static volatile int max_hrtime_lock = LOCK_FREE; // Update counter with LSB as lock-in-progress
514 514
515 515
516 516 void os::Solaris::initialize_system_info() {
517 517 set_processor_count(sysconf(_SC_NPROCESSORS_CONF));
518 518 _processors_online = sysconf (_SC_NPROCESSORS_ONLN);
519 519 _physical_memory = (julong)sysconf(_SC_PHYS_PAGES) * (julong)sysconf(_SC_PAGESIZE);
520 520 }
521 521
522 522 int os::active_processor_count() {
523 523 int online_cpus = sysconf(_SC_NPROCESSORS_ONLN);
524 524 pid_t pid = getpid();
525 525 psetid_t pset = PS_NONE;
526 526 // Are we running in a processor set or is there any processor set around?
527 527 if (pset_bind(PS_QUERY, P_PID, pid, &pset) == 0) {
528 528 uint_t pset_cpus;
529 529 // Query the number of cpus available to us.
530 530 if (pset_info(pset, NULL, &pset_cpus, NULL) == 0) {
531 531 assert(pset_cpus > 0 && pset_cpus <= online_cpus, "sanity check");
532 532 _processors_online = pset_cpus;
533 533 return pset_cpus;
534 534 }
535 535 }
536 536 // Otherwise return number of online cpus
537 537 return online_cpus;
538 538 }
539 539
540 540 static bool find_processors_in_pset(psetid_t pset,
541 541 processorid_t** id_array,
542 542 uint_t* id_length) {
543 543 bool result = false;
544 544 // Find the number of processors in the processor set.
545 545 if (pset_info(pset, NULL, id_length, NULL) == 0) {
546 546 // Make up an array to hold their ids.
547 547 *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length);
548 548 // Fill in the array with their processor ids.
549 549 if (pset_info(pset, NULL, id_length, *id_array) == 0) {
550 550 result = true;
551 551 }
552 552 }
553 553 return result;
554 554 }
555 555
556 556 // Callers of find_processors_online() must tolerate imprecise results --
557 557 // the system configuration can change asynchronously because of DR
558 558 // or explicit psradm operations.
559 559 //
560 560 // We also need to take care that the loop (below) terminates as the
561 561 // number of processors online can change between the _SC_NPROCESSORS_ONLN
562 562 // request and the loop that builds the list of processor ids. Unfortunately
563 563 // there's no reliable way to determine the maximum valid processor id,
564 564 // so we use a manifest constant, MAX_PROCESSOR_ID, instead. See p_online
565 565 // man pages, which claim the processor id set is "sparse, but
566 566 // not too sparse". MAX_PROCESSOR_ID is used to ensure that we eventually
567 567 // exit the loop.
568 568 //
569 569 // In the future we'll be able to use sysconf(_SC_CPUID_MAX), but that's
570 570 // not available on S8.0.
571 571
572 572 static bool find_processors_online(processorid_t** id_array,
573 573 uint* id_length) {
574 574 const processorid_t MAX_PROCESSOR_ID = 100000 ;
575 575 // Find the number of processors online.
576 576 *id_length = sysconf(_SC_NPROCESSORS_ONLN);
577 577 // Make up an array to hold their ids.
578 578 *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length);
579 579 // Processors need not be numbered consecutively.
580 580 long found = 0;
581 581 processorid_t next = 0;
582 582 while (found < *id_length && next < MAX_PROCESSOR_ID) {
583 583 processor_info_t info;
584 584 if (processor_info(next, &info) == 0) {
585 585 // NB, PI_NOINTR processors are effectively online ...
586 586 if (info.pi_state == P_ONLINE || info.pi_state == P_NOINTR) {
587 587 (*id_array)[found] = next;
588 588 found += 1;
589 589 }
590 590 }
591 591 next += 1;
592 592 }
593 593 if (found < *id_length) {
594 594 // The loop above didn't identify the expected number of processors.
595 595 // We could always retry the operation, calling sysconf(_SC_NPROCESSORS_ONLN)
596 596 // and re-running the loop, above, but there's no guarantee of progress
597 597 // if the system configuration is in flux. Instead, we just return what
598 598 // we've got. Note that in the worst case find_processors_online() could
599 599 // return an empty set. (As a fall-back in the case of the empty set we
600 600 // could just return the ID of the current processor).
601 601 *id_length = found ;
602 602 }
603 603
604 604 return true;
605 605 }
606 606
607 607 static bool assign_distribution(processorid_t* id_array,
608 608 uint id_length,
609 609 uint* distribution,
610 610 uint distribution_length) {
611 611 // We assume we can assign processorid_t's to uint's.
612 612 assert(sizeof(processorid_t) == sizeof(uint),
613 613 "can't convert processorid_t to uint");
614 614 // Quick check to see if we won't succeed.
615 615 if (id_length < distribution_length) {
616 616 return false;
617 617 }
618 618 // Assign processor ids to the distribution.
619 619 // Try to shuffle processors to distribute work across boards,
620 620 // assuming 4 processors per board.
621 621 const uint processors_per_board = ProcessDistributionStride;
622 622 // Find the maximum processor id.
623 623 processorid_t max_id = 0;
624 624 for (uint m = 0; m < id_length; m += 1) {
625 625 max_id = MAX2(max_id, id_array[m]);
626 626 }
627 627 // The next id, to limit loops.
628 628 const processorid_t limit_id = max_id + 1;
629 629 // Make up markers for available processors.
630 630 bool* available_id = NEW_C_HEAP_ARRAY(bool, limit_id);
631 631 for (uint c = 0; c < limit_id; c += 1) {
632 632 available_id[c] = false;
633 633 }
634 634 for (uint a = 0; a < id_length; a += 1) {
635 635 available_id[id_array[a]] = true;
636 636 }
637 637 // Step by "boards", then by "slot", copying to "assigned".
638 638 // NEEDS_CLEANUP: The assignment of processors should be stateful,
639 639 // remembering which processors have been assigned by
640 640 // previous calls, etc., so as to distribute several
641 641 // independent calls of this method. What we'd like is
642 642 // It would be nice to have an API that let us ask
643 643 // how many processes are bound to a processor,
644 644 // but we don't have that, either.
645 645 // In the short term, "board" is static so that
646 646 // subsequent distributions don't all start at board 0.
647 647 static uint board = 0;
648 648 uint assigned = 0;
649 649 // Until we've found enough processors ....
650 650 while (assigned < distribution_length) {
651 651 // ... find the next available processor in the board.
652 652 for (uint slot = 0; slot < processors_per_board; slot += 1) {
653 653 uint try_id = board * processors_per_board + slot;
654 654 if ((try_id < limit_id) && (available_id[try_id] == true)) {
655 655 distribution[assigned] = try_id;
656 656 available_id[try_id] = false;
657 657 assigned += 1;
658 658 break;
659 659 }
660 660 }
661 661 board += 1;
662 662 if (board * processors_per_board + 0 >= limit_id) {
663 663 board = 0;
664 664 }
665 665 }
666 666 if (available_id != NULL) {
667 667 FREE_C_HEAP_ARRAY(bool, available_id);
668 668 }
669 669 return true;
670 670 }
671 671
672 672 void os::set_native_thread_name(const char *name) {
673 673 // Not yet implemented.
674 674 return;
675 675 }
676 676
677 677 bool os::distribute_processes(uint length, uint* distribution) {
678 678 bool result = false;
679 679 // Find the processor id's of all the available CPUs.
680 680 processorid_t* id_array = NULL;
681 681 uint id_length = 0;
682 682 // There are some races between querying information and using it,
683 683 // since processor sets can change dynamically.
684 684 psetid_t pset = PS_NONE;
685 685 // Are we running in a processor set?
686 686 if ((pset_bind(PS_QUERY, P_PID, P_MYID, &pset) == 0) && pset != PS_NONE) {
687 687 result = find_processors_in_pset(pset, &id_array, &id_length);
688 688 } else {
689 689 result = find_processors_online(&id_array, &id_length);
690 690 }
691 691 if (result == true) {
692 692 if (id_length >= length) {
693 693 result = assign_distribution(id_array, id_length, distribution, length);
694 694 } else {
695 695 result = false;
696 696 }
697 697 }
698 698 if (id_array != NULL) {
699 699 FREE_C_HEAP_ARRAY(processorid_t, id_array);
700 700 }
701 701 return result;
702 702 }
703 703
704 704 bool os::bind_to_processor(uint processor_id) {
705 705 // We assume that a processorid_t can be stored in a uint.
706 706 assert(sizeof(uint) == sizeof(processorid_t),
707 707 "can't convert uint to processorid_t");
708 708 int bind_result =
709 709 processor_bind(P_LWPID, // bind LWP.
710 710 P_MYID, // bind current LWP.
711 711 (processorid_t) processor_id, // id.
712 712 NULL); // don't return old binding.
713 713 return (bind_result == 0);
714 714 }
715 715
716 716 bool os::getenv(const char* name, char* buffer, int len) {
717 717 char* val = ::getenv( name );
718 718 if ( val == NULL
719 719 || strlen(val) + 1 > len ) {
720 720 if (len > 0) buffer[0] = 0; // return a null string
721 721 return false;
722 722 }
723 723 strcpy( buffer, val );
724 724 return true;
725 725 }
726 726
727 727
728 728 // Return true if user is running as root.
729 729
730 730 bool os::have_special_privileges() {
731 731 static bool init = false;
732 732 static bool privileges = false;
733 733 if (!init) {
734 734 privileges = (getuid() != geteuid()) || (getgid() != getegid());
735 735 init = true;
736 736 }
737 737 return privileges;
738 738 }
739 739
740 740
741 741 void os::init_system_properties_values() {
742 742 char arch[12];
743 743 sysinfo(SI_ARCHITECTURE, arch, sizeof(arch));
744 744
745 745 // The next steps are taken in the product version:
746 746 //
747 747 // Obtain the JAVA_HOME value from the location of libjvm[_g].so.
748 748 // This library should be located at:
749 749 // <JAVA_HOME>/jre/lib/<arch>/{client|server}/libjvm[_g].so.
750 750 //
751 751 // If "/jre/lib/" appears at the right place in the path, then we
752 752 // assume libjvm[_g].so is installed in a JDK and we use this path.
753 753 //
754 754 // Otherwise exit with message: "Could not create the Java virtual machine."
755 755 //
756 756 // The following extra steps are taken in the debugging version:
757 757 //
758 758 // If "/jre/lib/" does NOT appear at the right place in the path
759 759 // instead of exit check for $JAVA_HOME environment variable.
760 760 //
761 761 // If it is defined and we are able to locate $JAVA_HOME/jre/lib/<arch>,
762 762 // then we append a fake suffix "hotspot/libjvm[_g].so" to this path so
763 763 // it looks like libjvm[_g].so is installed there
764 764 // <JAVA_HOME>/jre/lib/<arch>/hotspot/libjvm[_g].so.
765 765 //
766 766 // Otherwise exit.
767 767 //
768 768 // Important note: if the location of libjvm.so changes this
769 769 // code needs to be changed accordingly.
770 770
771 771 // The next few definitions allow the code to be verbatim:
772 772 #define malloc(n) (char*)NEW_C_HEAP_ARRAY(char, (n))
773 773 #define free(p) FREE_C_HEAP_ARRAY(char, p)
774 774 #define getenv(n) ::getenv(n)
775 775
776 776 #define EXTENSIONS_DIR "/lib/ext"
777 777 #define ENDORSED_DIR "/lib/endorsed"
778 778 #define COMMON_DIR "/usr/jdk/packages"
779 779
780 780 {
781 781 /* sysclasspath, java_home, dll_dir */
782 782 {
783 783 char *home_path;
784 784 char *dll_path;
785 785 char *pslash;
786 786 char buf[MAXPATHLEN];
787 787 os::jvm_path(buf, sizeof(buf));
788 788
789 789 // Found the full path to libjvm.so.
790 790 // Now cut the path to <java_home>/jre if we can.
791 791 *(strrchr(buf, '/')) = '\0'; /* get rid of /libjvm.so */
792 792 pslash = strrchr(buf, '/');
793 793 if (pslash != NULL)
794 794 *pslash = '\0'; /* get rid of /{client|server|hotspot} */
795 795 dll_path = malloc(strlen(buf) + 1);
796 796 if (dll_path == NULL)
797 797 return;
798 798 strcpy(dll_path, buf);
799 799 Arguments::set_dll_dir(dll_path);
800 800
801 801 if (pslash != NULL) {
802 802 pslash = strrchr(buf, '/');
803 803 if (pslash != NULL) {
804 804 *pslash = '\0'; /* get rid of /<arch> */
805 805 pslash = strrchr(buf, '/');
806 806 if (pslash != NULL)
807 807 *pslash = '\0'; /* get rid of /lib */
808 808 }
809 809 }
810 810
811 811 home_path = malloc(strlen(buf) + 1);
812 812 if (home_path == NULL)
813 813 return;
814 814 strcpy(home_path, buf);
815 815 Arguments::set_java_home(home_path);
816 816
817 817 if (!set_boot_path('/', ':'))
818 818 return;
819 819 }
820 820
821 821 /*
822 822 * Where to look for native libraries
823 823 */
824 824 {
825 825 // Use dlinfo() to determine the correct java.library.path.
826 826 //
827 827 // If we're launched by the Java launcher, and the user
828 828 // does not set java.library.path explicitly on the commandline,
829 829 // the Java launcher sets LD_LIBRARY_PATH for us and unsets
830 830 // LD_LIBRARY_PATH_32 and LD_LIBRARY_PATH_64. In this case
831 831 // dlinfo returns LD_LIBRARY_PATH + crle settings (including
832 832 // /usr/lib), which is exactly what we want.
833 833 //
834 834 // If the user does set java.library.path, it completely
835 835 // overwrites this setting, and always has.
836 836 //
837 837 // If we're not launched by the Java launcher, we may
838 838 // get here with any/all of the LD_LIBRARY_PATH[_32|64]
839 839 // settings. Again, dlinfo does exactly what we want.
840 840
841 841 Dl_serinfo _info, *info = &_info;
842 842 Dl_serpath *path;
843 843 char* library_path;
844 844 char *common_path;
845 845 int i;
846 846
847 847 // determine search path count and required buffer size
848 848 if (dlinfo(RTLD_SELF, RTLD_DI_SERINFOSIZE, (void *)info) == -1) {
849 849 vm_exit_during_initialization("dlinfo SERINFOSIZE request", dlerror());
850 850 }
851 851
852 852 // allocate new buffer and initialize
853 853 info = (Dl_serinfo*)malloc(_info.dls_size);
854 854 if (info == NULL) {
855 855 vm_exit_out_of_memory(_info.dls_size,
856 856 "init_system_properties_values info");
857 857 }
858 858 info->dls_size = _info.dls_size;
859 859 info->dls_cnt = _info.dls_cnt;
860 860
861 861 // obtain search path information
862 862 if (dlinfo(RTLD_SELF, RTLD_DI_SERINFO, (void *)info) == -1) {
863 863 free(info);
864 864 vm_exit_during_initialization("dlinfo SERINFO request", dlerror());
865 865 }
866 866
867 867 path = &info->dls_serpath[0];
868 868
869 869 // Note: Due to a legacy implementation, most of the library path
870 870 // is set in the launcher. This was to accomodate linking restrictions
871 871 // on legacy Solaris implementations (which are no longer supported).
872 872 // Eventually, all the library path setting will be done here.
873 873 //
874 874 // However, to prevent the proliferation of improperly built native
875 875 // libraries, the new path component /usr/jdk/packages is added here.
876 876
877 877 // Determine the actual CPU architecture.
878 878 char cpu_arch[12];
879 879 sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch));
880 880 #ifdef _LP64
881 881 // If we are a 64-bit vm, perform the following translations:
882 882 // sparc -> sparcv9
883 883 // i386 -> amd64
884 884 if (strcmp(cpu_arch, "sparc") == 0)
885 885 strcat(cpu_arch, "v9");
886 886 else if (strcmp(cpu_arch, "i386") == 0)
887 887 strcpy(cpu_arch, "amd64");
888 888 #endif
889 889
890 890 // Construct the invariant part of ld_library_path. Note that the
891 891 // space for the colon and the trailing null are provided by the
892 892 // nulls included by the sizeof operator.
893 893 size_t bufsize = sizeof(COMMON_DIR) + sizeof("/lib/") + strlen(cpu_arch);
894 894 common_path = malloc(bufsize);
895 895 if (common_path == NULL) {
896 896 free(info);
897 897 vm_exit_out_of_memory(bufsize,
898 898 "init_system_properties_values common_path");
899 899 }
900 900 sprintf(common_path, COMMON_DIR "/lib/%s", cpu_arch);
901 901
902 902 // struct size is more than sufficient for the path components obtained
903 903 // through the dlinfo() call, so only add additional space for the path
904 904 // components explicitly added here.
905 905 bufsize = info->dls_size + strlen(common_path);
906 906 library_path = malloc(bufsize);
907 907 if (library_path == NULL) {
908 908 free(info);
909 909 free(common_path);
910 910 vm_exit_out_of_memory(bufsize,
911 911 "init_system_properties_values library_path");
912 912 }
913 913 library_path[0] = '\0';
914 914
915 915 // Construct the desired Java library path from the linker's library
916 916 // search path.
917 917 //
918 918 // For compatibility, it is optimal that we insert the additional path
919 919 // components specific to the Java VM after those components specified
920 920 // in LD_LIBRARY_PATH (if any) but before those added by the ld.so
921 921 // infrastructure.
922 922 if (info->dls_cnt == 0) { // Not sure this can happen, but allow for it
923 923 strcpy(library_path, common_path);
924 924 } else {
925 925 int inserted = 0;
926 926 for (i = 0; i < info->dls_cnt; i++, path++) {
927 927 uint_t flags = path->dls_flags & LA_SER_MASK;
928 928 if (((flags & LA_SER_LIBPATH) == 0) && !inserted) {
929 929 strcat(library_path, common_path);
930 930 strcat(library_path, os::path_separator());
931 931 inserted = 1;
932 932 }
933 933 strcat(library_path, path->dls_name);
934 934 strcat(library_path, os::path_separator());
935 935 }
936 936 // eliminate trailing path separator
937 937 library_path[strlen(library_path)-1] = '\0';
938 938 }
939 939
940 940 // happens before argument parsing - can't use a trace flag
941 941 // tty->print_raw("init_system_properties_values: native lib path: ");
942 942 // tty->print_raw_cr(library_path);
943 943
944 944 // callee copies into its own buffer
945 945 Arguments::set_library_path(library_path);
946 946
947 947 free(common_path);
948 948 free(library_path);
949 949 free(info);
950 950 }
951 951
952 952 /*
953 953 * Extensions directories.
954 954 *
955 955 * Note that the space for the colon and the trailing null are provided
956 956 * by the nulls included by the sizeof operator (so actually one byte more
957 957 * than necessary is allocated).
958 958 */
959 959 {
960 960 char *buf = (char *) malloc(strlen(Arguments::get_java_home()) +
961 961 sizeof(EXTENSIONS_DIR) + sizeof(COMMON_DIR) +
962 962 sizeof(EXTENSIONS_DIR));
963 963 sprintf(buf, "%s" EXTENSIONS_DIR ":" COMMON_DIR EXTENSIONS_DIR,
964 964 Arguments::get_java_home());
965 965 Arguments::set_ext_dirs(buf);
966 966 }
967 967
968 968 /* Endorsed standards default directory. */
969 969 {
970 970 char * buf = malloc(strlen(Arguments::get_java_home()) + sizeof(ENDORSED_DIR));
971 971 sprintf(buf, "%s" ENDORSED_DIR, Arguments::get_java_home());
972 972 Arguments::set_endorsed_dirs(buf);
973 973 }
974 974 }
975 975
976 976 #undef malloc
977 977 #undef free
978 978 #undef getenv
979 979 #undef EXTENSIONS_DIR
980 980 #undef ENDORSED_DIR
981 981 #undef COMMON_DIR
982 982
983 983 }
984 984
985 985 void os::breakpoint() {
986 986 BREAKPOINT;
987 987 }
988 988
989 989 bool os::obsolete_option(const JavaVMOption *option)
990 990 {
991 991 if (!strncmp(option->optionString, "-Xt", 3)) {
992 992 return true;
993 993 } else if (!strncmp(option->optionString, "-Xtm", 4)) {
994 994 return true;
995 995 } else if (!strncmp(option->optionString, "-Xverifyheap", 12)) {
996 996 return true;
997 997 } else if (!strncmp(option->optionString, "-Xmaxjitcodesize", 16)) {
998 998 return true;
999 999 }
1000 1000 return false;
1001 1001 }
1002 1002
1003 1003 bool os::Solaris::valid_stack_address(Thread* thread, address sp) {
1004 1004 address stackStart = (address)thread->stack_base();
1005 1005 address stackEnd = (address)(stackStart - (address)thread->stack_size());
1006 1006 if (sp < stackStart && sp >= stackEnd ) return true;
1007 1007 return false;
1008 1008 }
1009 1009
1010 1010 extern "C" void breakpoint() {
1011 1011 // use debugger to set breakpoint here
1012 1012 }
1013 1013
1014 1014 // Returns an estimate of the current stack pointer. Result must be guaranteed to
1015 1015 // point into the calling threads stack, and be no lower than the current stack
1016 1016 // pointer.
1017 1017 address os::current_stack_pointer() {
1018 1018 volatile int dummy;
1019 1019 address sp = (address)&dummy + 8; // %%%% need to confirm if this is right
1020 1020 return sp;
1021 1021 }
1022 1022
1023 1023 static thread_t main_thread;
1024 1024
1025 1025 // Thread start routine for all new Java threads
1026 1026 extern "C" void* java_start(void* thread_addr) {
1027 1027 // Try to randomize the cache line index of hot stack frames.
1028 1028 // This helps when threads of the same stack traces evict each other's
1029 1029 // cache lines. The threads can be either from the same JVM instance, or
1030 1030 // from different JVM instances. The benefit is especially true for
1031 1031 // processors with hyperthreading technology.
1032 1032 static int counter = 0;
1033 1033 int pid = os::current_process_id();
1034 1034 alloca(((pid ^ counter++) & 7) * 128);
1035 1035
1036 1036 int prio;
1037 1037 Thread* thread = (Thread*)thread_addr;
1038 1038 OSThread* osthr = thread->osthread();
1039 1039
1040 1040 osthr->set_lwp_id( _lwp_self() ); // Store lwp in case we are bound
1041 1041 thread->_schedctl = (void *) schedctl_init () ;
1042 1042
1043 1043 if (UseNUMA) {
1044 1044 int lgrp_id = os::numa_get_group_id();
1045 1045 if (lgrp_id != -1) {
1046 1046 thread->set_lgrp_id(lgrp_id);
1047 1047 }
1048 1048 }
1049 1049
1050 1050 // If the creator called set priority before we started,
1051 1051 // we need to call set priority now that we have an lwp.
1052 1052 // Get the priority from libthread and set the priority
1053 1053 // for the new Solaris lwp.
1054 1054 if ( osthr->thread_id() != -1 ) {
1055 1055 if ( UseThreadPriorities ) {
1056 1056 thr_getprio(osthr->thread_id(), &prio);
1057 1057 if (ThreadPriorityVerbose) {
1058 1058 tty->print_cr("Starting Thread " INTPTR_FORMAT ", LWP is " INTPTR_FORMAT ", setting priority: %d\n",
1059 1059 osthr->thread_id(), osthr->lwp_id(), prio );
1060 1060 }
1061 1061 os::set_native_priority(thread, prio);
1062 1062 }
1063 1063 } else if (ThreadPriorityVerbose) {
1064 1064 warning("Can't set priority in _start routine, thread id hasn't been set\n");
1065 1065 }
1066 1066
1067 1067 assert(osthr->get_state() == RUNNABLE, "invalid os thread state");
1068 1068
1069 1069 // initialize signal mask for this thread
1070 1070 os::Solaris::hotspot_sigmask(thread);
1071 1071
1072 1072 thread->run();
1073 1073
1074 1074 // One less thread is executing
1075 1075 // When the VMThread gets here, the main thread may have already exited
1076 1076 // which frees the CodeHeap containing the Atomic::dec code
1077 1077 if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) {
1078 1078 Atomic::dec(&os::Solaris::_os_thread_count);
1079 1079 }
1080 1080
1081 1081 if (UseDetachedThreads) {
1082 1082 thr_exit(NULL);
1083 1083 ShouldNotReachHere();
1084 1084 }
1085 1085 return NULL;
1086 1086 }
1087 1087
1088 1088 static OSThread* create_os_thread(Thread* thread, thread_t thread_id) {
1089 1089 // Allocate the OSThread object
1090 1090 OSThread* osthread = new OSThread(NULL, NULL);
1091 1091 if (osthread == NULL) return NULL;
1092 1092
1093 1093 // Store info on the Solaris thread into the OSThread
1094 1094 osthread->set_thread_id(thread_id);
1095 1095 osthread->set_lwp_id(_lwp_self());
1096 1096 thread->_schedctl = (void *) schedctl_init () ;
1097 1097
1098 1098 if (UseNUMA) {
1099 1099 int lgrp_id = os::numa_get_group_id();
1100 1100 if (lgrp_id != -1) {
1101 1101 thread->set_lgrp_id(lgrp_id);
1102 1102 }
1103 1103 }
1104 1104
1105 1105 if ( ThreadPriorityVerbose ) {
1106 1106 tty->print_cr("In create_os_thread, Thread " INTPTR_FORMAT ", LWP is " INTPTR_FORMAT "\n",
1107 1107 osthread->thread_id(), osthread->lwp_id() );
1108 1108 }
1109 1109
1110 1110 // Initial thread state is INITIALIZED, not SUSPENDED
1111 1111 osthread->set_state(INITIALIZED);
1112 1112
1113 1113 return osthread;
1114 1114 }
1115 1115
1116 1116 void os::Solaris::hotspot_sigmask(Thread* thread) {
1117 1117
1118 1118 //Save caller's signal mask
1119 1119 sigset_t sigmask;
1120 1120 thr_sigsetmask(SIG_SETMASK, NULL, &sigmask);
1121 1121 OSThread *osthread = thread->osthread();
1122 1122 osthread->set_caller_sigmask(sigmask);
1123 1123
1124 1124 thr_sigsetmask(SIG_UNBLOCK, os::Solaris::unblocked_signals(), NULL);
1125 1125 if (!ReduceSignalUsage) {
1126 1126 if (thread->is_VM_thread()) {
1127 1127 // Only the VM thread handles BREAK_SIGNAL ...
1128 1128 thr_sigsetmask(SIG_UNBLOCK, vm_signals(), NULL);
1129 1129 } else {
1130 1130 // ... all other threads block BREAK_SIGNAL
1131 1131 assert(!sigismember(vm_signals(), SIGINT), "SIGINT should not be blocked");
1132 1132 thr_sigsetmask(SIG_BLOCK, vm_signals(), NULL);
1133 1133 }
1134 1134 }
1135 1135 }
1136 1136
1137 1137 bool os::create_attached_thread(JavaThread* thread) {
1138 1138 #ifdef ASSERT
1139 1139 thread->verify_not_published();
1140 1140 #endif
1141 1141 OSThread* osthread = create_os_thread(thread, thr_self());
1142 1142 if (osthread == NULL) {
1143 1143 return false;
1144 1144 }
1145 1145
1146 1146 // Initial thread state is RUNNABLE
1147 1147 osthread->set_state(RUNNABLE);
1148 1148 thread->set_osthread(osthread);
1149 1149
1150 1150 // initialize signal mask for this thread
1151 1151 // and save the caller's signal mask
1152 1152 os::Solaris::hotspot_sigmask(thread);
1153 1153
1154 1154 return true;
1155 1155 }
1156 1156
1157 1157 bool os::create_main_thread(JavaThread* thread) {
1158 1158 #ifdef ASSERT
1159 1159 thread->verify_not_published();
1160 1160 #endif
1161 1161 if (_starting_thread == NULL) {
1162 1162 _starting_thread = create_os_thread(thread, main_thread);
1163 1163 if (_starting_thread == NULL) {
1164 1164 return false;
1165 1165 }
1166 1166 }
1167 1167
1168 1168 // The primodial thread is runnable from the start
1169 1169 _starting_thread->set_state(RUNNABLE);
1170 1170
1171 1171 thread->set_osthread(_starting_thread);
1172 1172
1173 1173 // initialize signal mask for this thread
1174 1174 // and save the caller's signal mask
1175 1175 os::Solaris::hotspot_sigmask(thread);
1176 1176
1177 1177 return true;
1178 1178 }
1179 1179
1180 1180 // _T2_libthread is true if we believe we are running with the newer
1181 1181 // SunSoft lwp/libthread.so (2.8 patch, 2.9 default)
1182 1182 bool os::Solaris::_T2_libthread = false;
1183 1183
1184 1184 bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
1185 1185 // Allocate the OSThread object
1186 1186 OSThread* osthread = new OSThread(NULL, NULL);
1187 1187 if (osthread == NULL) {
1188 1188 return false;
1189 1189 }
1190 1190
1191 1191 if ( ThreadPriorityVerbose ) {
1192 1192 char *thrtyp;
1193 1193 switch ( thr_type ) {
1194 1194 case vm_thread:
1195 1195 thrtyp = (char *)"vm";
1196 1196 break;
1197 1197 case cgc_thread:
1198 1198 thrtyp = (char *)"cgc";
1199 1199 break;
1200 1200 case pgc_thread:
1201 1201 thrtyp = (char *)"pgc";
1202 1202 break;
1203 1203 case java_thread:
1204 1204 thrtyp = (char *)"java";
1205 1205 break;
1206 1206 case compiler_thread:
1207 1207 thrtyp = (char *)"compiler";
1208 1208 break;
1209 1209 case watcher_thread:
1210 1210 thrtyp = (char *)"watcher";
1211 1211 break;
1212 1212 default:
1213 1213 thrtyp = (char *)"unknown";
1214 1214 break;
1215 1215 }
1216 1216 tty->print_cr("In create_thread, creating a %s thread\n", thrtyp);
1217 1217 }
1218 1218
1219 1219 // Calculate stack size if it's not specified by caller.
1220 1220 if (stack_size == 0) {
1221 1221 // The default stack size 1M (2M for LP64).
1222 1222 stack_size = (BytesPerWord >> 2) * K * K;
1223 1223
1224 1224 switch (thr_type) {
1225 1225 case os::java_thread:
1226 1226 // Java threads use ThreadStackSize which default value can be changed with the flag -Xss
1227 1227 if (JavaThread::stack_size_at_create() > 0) stack_size = JavaThread::stack_size_at_create();
1228 1228 break;
1229 1229 case os::compiler_thread:
1230 1230 if (CompilerThreadStackSize > 0) {
1231 1231 stack_size = (size_t)(CompilerThreadStackSize * K);
1232 1232 break;
1233 1233 } // else fall through:
1234 1234 // use VMThreadStackSize if CompilerThreadStackSize is not defined
1235 1235 case os::vm_thread:
1236 1236 case os::pgc_thread:
1237 1237 case os::cgc_thread:
1238 1238 case os::watcher_thread:
1239 1239 if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
1240 1240 break;
1241 1241 }
1242 1242 }
1243 1243 stack_size = MAX2(stack_size, os::Solaris::min_stack_allowed);
1244 1244
1245 1245 // Initial state is ALLOCATED but not INITIALIZED
1246 1246 osthread->set_state(ALLOCATED);
1247 1247
1248 1248 if (os::Solaris::_os_thread_count > os::Solaris::_os_thread_limit) {
1249 1249 // We got lots of threads. Check if we still have some address space left.
1250 1250 // Need to be at least 5Mb of unreserved address space. We do check by
1251 1251 // trying to reserve some.
1252 1252 const size_t VirtualMemoryBangSize = 20*K*K;
1253 1253 char* mem = os::reserve_memory(VirtualMemoryBangSize);
1254 1254 if (mem == NULL) {
1255 1255 delete osthread;
1256 1256 return false;
1257 1257 } else {
1258 1258 // Release the memory again
1259 1259 os::release_memory(mem, VirtualMemoryBangSize);
1260 1260 }
1261 1261 }
1262 1262
1263 1263 // Setup osthread because the child thread may need it.
1264 1264 thread->set_osthread(osthread);
1265 1265
1266 1266 // Create the Solaris thread
1267 1267 // explicit THR_BOUND for T2_libthread case in case
1268 1268 // that assumption is not accurate, but our alternate signal stack
1269 1269 // handling is based on it which must have bound threads
1270 1270 thread_t tid = 0;
1271 1271 long flags = (UseDetachedThreads ? THR_DETACHED : 0) | THR_SUSPENDED
1272 1272 | ((UseBoundThreads || os::Solaris::T2_libthread() ||
1273 1273 (thr_type == vm_thread) ||
1274 1274 (thr_type == cgc_thread) ||
1275 1275 (thr_type == pgc_thread) ||
1276 1276 (thr_type == compiler_thread && BackgroundCompilation)) ?
1277 1277 THR_BOUND : 0);
1278 1278 int status;
1279 1279
1280 1280 // 4376845 -- libthread/kernel don't provide enough LWPs to utilize all CPUs.
1281 1281 //
1282 1282 // On multiprocessors systems, libthread sometimes under-provisions our
1283 1283 // process with LWPs. On a 30-way systems, for instance, we could have
1284 1284 // 50 user-level threads in ready state and only 2 or 3 LWPs assigned
1285 1285 // to our process. This can result in under utilization of PEs.
1286 1286 // I suspect the problem is related to libthread's LWP
1287 1287 // pool management and to the kernel's SIGBLOCKING "last LWP parked"
1288 1288 // upcall policy.
1289 1289 //
1290 1290 // The following code is palliative -- it attempts to ensure that our
1291 1291 // process has sufficient LWPs to take advantage of multiple PEs.
1292 1292 // Proper long-term cures include using user-level threads bound to LWPs
1293 1293 // (THR_BOUND) or using LWP-based synchronization. Note that there is a
1294 1294 // slight timing window with respect to sampling _os_thread_count, but
1295 1295 // the race is benign. Also, we should periodically recompute
1296 1296 // _processors_online as the min of SC_NPROCESSORS_ONLN and the
1297 1297 // the number of PEs in our partition. You might be tempted to use
1298 1298 // THR_NEW_LWP here, but I'd recommend against it as that could
1299 1299 // result in undesirable growth of the libthread's LWP pool.
1300 1300 // The fix below isn't sufficient; for instance, it doesn't take into count
1301 1301 // LWPs parked on IO. It does, however, help certain CPU-bound benchmarks.
1302 1302 //
1303 1303 // Some pathologies this scheme doesn't handle:
1304 1304 // * Threads can block, releasing the LWPs. The LWPs can age out.
1305 1305 // When a large number of threads become ready again there aren't
1306 1306 // enough LWPs available to service them. This can occur when the
1307 1307 // number of ready threads oscillates.
1308 1308 // * LWPs/Threads park on IO, thus taking the LWP out of circulation.
1309 1309 //
1310 1310 // Finally, we should call thr_setconcurrency() periodically to refresh
1311 1311 // the LWP pool and thwart the LWP age-out mechanism.
1312 1312 // The "+3" term provides a little slop -- we want to slightly overprovision.
1313 1313
1314 1314 if (AdjustConcurrency && os::Solaris::_os_thread_count < (_processors_online+3)) {
1315 1315 if (!(flags & THR_BOUND)) {
1316 1316 thr_setconcurrency (os::Solaris::_os_thread_count); // avoid starvation
1317 1317 }
1318 1318 }
1319 1319 // Although this doesn't hurt, we should warn of undefined behavior
1320 1320 // when using unbound T1 threads with schedctl(). This should never
1321 1321 // happen, as the compiler and VM threads are always created bound
1322 1322 DEBUG_ONLY(
1323 1323 if ((VMThreadHintNoPreempt || CompilerThreadHintNoPreempt) &&
1324 1324 (!os::Solaris::T2_libthread() && (!(flags & THR_BOUND))) &&
1325 1325 ((thr_type == vm_thread) || (thr_type == cgc_thread) ||
1326 1326 (thr_type == pgc_thread) || (thr_type == compiler_thread && BackgroundCompilation))) {
1327 1327 warning("schedctl behavior undefined when Compiler/VM/GC Threads are Unbound");
1328 1328 }
1329 1329 );
1330 1330
1331 1331
1332 1332 // Mark that we don't have an lwp or thread id yet.
1333 1333 // In case we attempt to set the priority before the thread starts.
1334 1334 osthread->set_lwp_id(-1);
1335 1335 osthread->set_thread_id(-1);
1336 1336
1337 1337 status = thr_create(NULL, stack_size, java_start, thread, flags, &tid);
1338 1338 if (status != 0) {
1339 1339 if (PrintMiscellaneous && (Verbose || WizardMode)) {
1340 1340 perror("os::create_thread");
1341 1341 }
1342 1342 thread->set_osthread(NULL);
1343 1343 // Need to clean up stuff we've allocated so far
1344 1344 delete osthread;
1345 1345 return false;
1346 1346 }
1347 1347
1348 1348 Atomic::inc(&os::Solaris::_os_thread_count);
1349 1349
1350 1350 // Store info on the Solaris thread into the OSThread
1351 1351 osthread->set_thread_id(tid);
1352 1352
1353 1353 // Remember that we created this thread so we can set priority on it
1354 1354 osthread->set_vm_created();
1355 1355
1356 1356 // Set the default thread priority otherwise use NormalPriority
1357 1357
1358 1358 if ( UseThreadPriorities ) {
1359 1359 thr_setprio(tid, (DefaultThreadPriority == -1) ?
1360 1360 java_to_os_priority[NormPriority] :
1361 1361 DefaultThreadPriority);
1362 1362 }
1363 1363
1364 1364 // Initial thread state is INITIALIZED, not SUSPENDED
1365 1365 osthread->set_state(INITIALIZED);
1366 1366
1367 1367 // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain
1368 1368 return true;
1369 1369 }
1370 1370
1371 1371 /* defined for >= Solaris 10. This allows builds on earlier versions
1372 1372 * of Solaris to take advantage of the newly reserved Solaris JVM signals
1373 1373 * With SIGJVM1, SIGJVM2, INTERRUPT_SIGNAL is SIGJVM1, ASYNC_SIGNAL is SIGJVM2
1374 1374 * and -XX:+UseAltSigs does nothing since these should have no conflict
1375 1375 */
1376 1376 #if !defined(SIGJVM1)
1377 1377 #define SIGJVM1 39
1378 1378 #define SIGJVM2 40
1379 1379 #endif
1380 1380
1381 1381 debug_only(static bool signal_sets_initialized = false);
1382 1382 static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs;
1383 1383 int os::Solaris::_SIGinterrupt = INTERRUPT_SIGNAL;
1384 1384 int os::Solaris::_SIGasync = ASYNC_SIGNAL;
1385 1385
1386 1386 bool os::Solaris::is_sig_ignored(int sig) {
1387 1387 struct sigaction oact;
1388 1388 sigaction(sig, (struct sigaction*)NULL, &oact);
1389 1389 void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*, oact.sa_sigaction)
1390 1390 : CAST_FROM_FN_PTR(void*, oact.sa_handler);
1391 1391 if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN))
1392 1392 return true;
1393 1393 else
1394 1394 return false;
1395 1395 }
1396 1396
1397 1397 // Note: SIGRTMIN is a macro that calls sysconf() so it will
1398 1398 // dynamically detect SIGRTMIN value for the system at runtime, not buildtime
1399 1399 static bool isJVM1available() {
1400 1400 return SIGJVM1 < SIGRTMIN;
1401 1401 }
1402 1402
1403 1403 void os::Solaris::signal_sets_init() {
1404 1404 // Should also have an assertion stating we are still single-threaded.
1405 1405 assert(!signal_sets_initialized, "Already initialized");
1406 1406 // Fill in signals that are necessarily unblocked for all threads in
1407 1407 // the VM. Currently, we unblock the following signals:
1408 1408 // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden
1409 1409 // by -Xrs (=ReduceSignalUsage));
1410 1410 // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all
1411 1411 // other threads. The "ReduceSignalUsage" boolean tells us not to alter
1412 1412 // the dispositions or masks wrt these signals.
1413 1413 // Programs embedding the VM that want to use the above signals for their
1414 1414 // own purposes must, at this time, use the "-Xrs" option to prevent
1415 1415 // interference with shutdown hooks and BREAK_SIGNAL thread dumping.
1416 1416 // (See bug 4345157, and other related bugs).
1417 1417 // In reality, though, unblocking these signals is really a nop, since
1418 1418 // these signals are not blocked by default.
1419 1419 sigemptyset(&unblocked_sigs);
1420 1420 sigemptyset(&allowdebug_blocked_sigs);
1421 1421 sigaddset(&unblocked_sigs, SIGILL);
1422 1422 sigaddset(&unblocked_sigs, SIGSEGV);
1423 1423 sigaddset(&unblocked_sigs, SIGBUS);
1424 1424 sigaddset(&unblocked_sigs, SIGFPE);
1425 1425
1426 1426 if (isJVM1available) {
1427 1427 os::Solaris::set_SIGinterrupt(SIGJVM1);
1428 1428 os::Solaris::set_SIGasync(SIGJVM2);
1429 1429 } else if (UseAltSigs) {
1430 1430 os::Solaris::set_SIGinterrupt(ALT_INTERRUPT_SIGNAL);
1431 1431 os::Solaris::set_SIGasync(ALT_ASYNC_SIGNAL);
1432 1432 } else {
1433 1433 os::Solaris::set_SIGinterrupt(INTERRUPT_SIGNAL);
1434 1434 os::Solaris::set_SIGasync(ASYNC_SIGNAL);
1435 1435 }
1436 1436
1437 1437 sigaddset(&unblocked_sigs, os::Solaris::SIGinterrupt());
1438 1438 sigaddset(&unblocked_sigs, os::Solaris::SIGasync());
1439 1439
1440 1440 if (!ReduceSignalUsage) {
1441 1441 if (!os::Solaris::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
1442 1442 sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
1443 1443 sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL);
1444 1444 }
1445 1445 if (!os::Solaris::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
1446 1446 sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
1447 1447 sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL);
1448 1448 }
1449 1449 if (!os::Solaris::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
1450 1450 sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
1451 1451 sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL);
1452 1452 }
1453 1453 }
1454 1454 // Fill in signals that are blocked by all but the VM thread.
1455 1455 sigemptyset(&vm_sigs);
1456 1456 if (!ReduceSignalUsage)
1457 1457 sigaddset(&vm_sigs, BREAK_SIGNAL);
1458 1458 debug_only(signal_sets_initialized = true);
1459 1459
1460 1460 // For diagnostics only used in run_periodic_checks
1461 1461 sigemptyset(&check_signal_done);
1462 1462 }
1463 1463
1464 1464 // These are signals that are unblocked while a thread is running Java.
1465 1465 // (For some reason, they get blocked by default.)
1466 1466 sigset_t* os::Solaris::unblocked_signals() {
1467 1467 assert(signal_sets_initialized, "Not initialized");
1468 1468 return &unblocked_sigs;
1469 1469 }
1470 1470
1471 1471 // These are the signals that are blocked while a (non-VM) thread is
1472 1472 // running Java. Only the VM thread handles these signals.
1473 1473 sigset_t* os::Solaris::vm_signals() {
1474 1474 assert(signal_sets_initialized, "Not initialized");
1475 1475 return &vm_sigs;
1476 1476 }
1477 1477
1478 1478 // These are signals that are blocked during cond_wait to allow debugger in
1479 1479 sigset_t* os::Solaris::allowdebug_blocked_signals() {
1480 1480 assert(signal_sets_initialized, "Not initialized");
1481 1481 return &allowdebug_blocked_sigs;
1482 1482 }
1483 1483
1484 1484
1485 1485 void _handle_uncaught_cxx_exception() {
1486 1486 VMError err("An uncaught C++ exception");
1487 1487 err.report_and_die();
1488 1488 }
1489 1489
1490 1490
1491 1491 // First crack at OS-specific initialization, from inside the new thread.
1492 1492 void os::initialize_thread() {
1493 1493 int r = thr_main() ;
1494 1494 guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ;
1495 1495 if (r) {
1496 1496 JavaThread* jt = (JavaThread *)Thread::current();
1497 1497 assert(jt != NULL,"Sanity check");
1498 1498 size_t stack_size;
1499 1499 address base = jt->stack_base();
1500 1500 if (Arguments::created_by_java_launcher()) {
1501 1501 // Use 2MB to allow for Solaris 7 64 bit mode.
1502 1502 stack_size = JavaThread::stack_size_at_create() == 0
1503 1503 ? 2048*K : JavaThread::stack_size_at_create();
1504 1504
1505 1505 // There are rare cases when we may have already used more than
1506 1506 // the basic stack size allotment before this method is invoked.
1507 1507 // Attempt to allow for a normally sized java_stack.
1508 1508 size_t current_stack_offset = (size_t)(base - (address)&stack_size);
1509 1509 stack_size += ReservedSpace::page_align_size_down(current_stack_offset);
1510 1510 } else {
1511 1511 // 6269555: If we were not created by a Java launcher, i.e. if we are
1512 1512 // running embedded in a native application, treat the primordial thread
1513 1513 // as much like a native attached thread as possible. This means using
1514 1514 // the current stack size from thr_stksegment(), unless it is too large
1515 1515 // to reliably setup guard pages. A reasonable max size is 8MB.
1516 1516 size_t current_size = current_stack_size();
1517 1517 // This should never happen, but just in case....
1518 1518 if (current_size == 0) current_size = 2 * K * K;
1519 1519 stack_size = current_size > (8 * K * K) ? (8 * K * K) : current_size;
1520 1520 }
1521 1521 address bottom = (address)align_size_up((intptr_t)(base - stack_size), os::vm_page_size());;
1522 1522 stack_size = (size_t)(base - bottom);
1523 1523
1524 1524 assert(stack_size > 0, "Stack size calculation problem");
1525 1525
1526 1526 if (stack_size > jt->stack_size()) {
1527 1527 NOT_PRODUCT(
1528 1528 struct rlimit limits;
1529 1529 getrlimit(RLIMIT_STACK, &limits);
1530 1530 size_t size = adjust_stack_size(base, (size_t)limits.rlim_cur);
1531 1531 assert(size >= jt->stack_size(), "Stack size problem in main thread");
1532 1532 )
1533 1533 tty->print_cr(
1534 1534 "Stack size of %d Kb exceeds current limit of %d Kb.\n"
1535 1535 "(Stack sizes are rounded up to a multiple of the system page size.)\n"
1536 1536 "See limit(1) to increase the stack size limit.",
1537 1537 stack_size / K, jt->stack_size() / K);
1538 1538 vm_exit(1);
1539 1539 }
1540 1540 assert(jt->stack_size() >= stack_size,
1541 1541 "Attempt to map more stack than was allocated");
1542 1542 jt->set_stack_size(stack_size);
1543 1543 }
1544 1544
1545 1545 // 5/22/01: Right now alternate signal stacks do not handle
1546 1546 // throwing stack overflow exceptions, see bug 4463178
1547 1547 // Until a fix is found for this, T2 will NOT imply alternate signal
1548 1548 // stacks.
1549 1549 // If using T2 libthread threads, install an alternate signal stack.
1550 1550 // Because alternate stacks associate with LWPs on Solaris,
1551 1551 // see sigaltstack(2), if using UNBOUND threads, or if UseBoundThreads
1552 1552 // we prefer to explicitly stack bang.
1553 1553 // If not using T2 libthread, but using UseBoundThreads any threads
1554 1554 // (primordial thread, jni_attachCurrentThread) we do not create,
1555 1555 // probably are not bound, therefore they can not have an alternate
1556 1556 // signal stack. Since our stack banging code is generated and
1557 1557 // is shared across threads, all threads must be bound to allow
1558 1558 // using alternate signal stacks. The alternative is to interpose
1559 1559 // on _lwp_create to associate an alt sig stack with each LWP,
1560 1560 // and this could be a problem when the JVM is embedded.
1561 1561 // We would prefer to use alternate signal stacks with T2
1562 1562 // Since there is currently no accurate way to detect T2
1563 1563 // we do not. Assuming T2 when running T1 causes sig 11s or assertions
1564 1564 // on installing alternate signal stacks
1565 1565
1566 1566
1567 1567 // 05/09/03: removed alternate signal stack support for Solaris
1568 1568 // The alternate signal stack mechanism is no longer needed to
1569 1569 // handle stack overflow. This is now handled by allocating
1570 1570 // guard pages (red zone) and stackbanging.
1571 1571 // Initially the alternate signal stack mechanism was removed because
1572 1572 // it did not work with T1 llibthread. Alternate
1573 1573 // signal stacks MUST have all threads bound to lwps. Applications
1574 1574 // can create their own threads and attach them without their being
1575 1575 // bound under T1. This is frequently the case for the primordial thread.
1576 1576 // If we were ever to reenable this mechanism we would need to
1577 1577 // use the dynamic check for T2 libthread.
1578 1578
1579 1579 os::Solaris::init_thread_fpu_state();
1580 1580 std::set_terminate(_handle_uncaught_cxx_exception);
1581 1581 }
1582 1582
1583 1583
1584 1584
1585 1585 // Free Solaris resources related to the OSThread
1586 1586 void os::free_thread(OSThread* osthread) {
1587 1587 assert(osthread != NULL, "os::free_thread but osthread not set");
1588 1588
1589 1589
1590 1590 // We are told to free resources of the argument thread,
1591 1591 // but we can only really operate on the current thread.
1592 1592 // The main thread must take the VMThread down synchronously
1593 1593 // before the main thread exits and frees up CodeHeap
1594 1594 guarantee((Thread::current()->osthread() == osthread
1595 1595 || (osthread == VMThread::vm_thread()->osthread())), "os::free_thread but not current thread");
1596 1596 if (Thread::current()->osthread() == osthread) {
1597 1597 // Restore caller's signal mask
1598 1598 sigset_t sigmask = osthread->caller_sigmask();
1599 1599 thr_sigsetmask(SIG_SETMASK, &sigmask, NULL);
1600 1600 }
1601 1601 delete osthread;
1602 1602 }
1603 1603
1604 1604 void os::pd_start_thread(Thread* thread) {
1605 1605 int status = thr_continue(thread->osthread()->thread_id());
1606 1606 assert_status(status == 0, status, "thr_continue failed");
1607 1607 }
1608 1608
1609 1609
1610 1610 intx os::current_thread_id() {
1611 1611 return (intx)thr_self();
1612 1612 }
1613 1613
1614 1614 static pid_t _initial_pid = 0;
1615 1615
1616 1616 int os::current_process_id() {
1617 1617 return (int)(_initial_pid ? _initial_pid : getpid());
1618 1618 }
1619 1619
1620 1620 int os::allocate_thread_local_storage() {
1621 1621 // %%% in Win32 this allocates a memory segment pointed to by a
1622 1622 // register. Dan Stein can implement a similar feature in
1623 1623 // Solaris. Alternatively, the VM can do the same thing
1624 1624 // explicitly: malloc some storage and keep the pointer in a
1625 1625 // register (which is part of the thread's context) (or keep it
1626 1626 // in TLS).
1627 1627 // %%% In current versions of Solaris, thr_self and TSD can
1628 1628 // be accessed via short sequences of displaced indirections.
1629 1629 // The value of thr_self is available as %g7(36).
1630 1630 // The value of thr_getspecific(k) is stored in %g7(12)(4)(k*4-4),
1631 1631 // assuming that the current thread already has a value bound to k.
1632 1632 // It may be worth experimenting with such access patterns,
1633 1633 // and later having the parameters formally exported from a Solaris
1634 1634 // interface. I think, however, that it will be faster to
1635 1635 // maintain the invariant that %g2 always contains the
1636 1636 // JavaThread in Java code, and have stubs simply
1637 1637 // treat %g2 as a caller-save register, preserving it in a %lN.
1638 1638 thread_key_t tk;
1639 1639 if (thr_keycreate( &tk, NULL ) )
1640 1640 fatal(err_msg("os::allocate_thread_local_storage: thr_keycreate failed "
1641 1641 "(%s)", strerror(errno)));
1642 1642 return int(tk);
1643 1643 }
1644 1644
1645 1645 void os::free_thread_local_storage(int index) {
1646 1646 // %%% don't think we need anything here
1647 1647 // if ( pthread_key_delete((pthread_key_t) tk) )
1648 1648 // fatal("os::free_thread_local_storage: pthread_key_delete failed");
1649 1649 }
1650 1650
1651 1651 #define SMALLINT 32 // libthread allocate for tsd_common is a version specific
1652 1652 // small number - point is NO swap space available
1653 1653 void os::thread_local_storage_at_put(int index, void* value) {
1654 1654 // %%% this is used only in threadLocalStorage.cpp
1655 1655 if (thr_setspecific((thread_key_t)index, value)) {
1656 1656 if (errno == ENOMEM) {
1657 1657 vm_exit_out_of_memory(SMALLINT, "thr_setspecific: out of swap space");
1658 1658 } else {
1659 1659 fatal(err_msg("os::thread_local_storage_at_put: thr_setspecific failed "
1660 1660 "(%s)", strerror(errno)));
1661 1661 }
1662 1662 } else {
1663 1663 ThreadLocalStorage::set_thread_in_slot ((Thread *) value) ;
1664 1664 }
1665 1665 }
1666 1666
1667 1667 // This function could be called before TLS is initialized, for example, when
1668 1668 // VM receives an async signal or when VM causes a fatal error during
1669 1669 // initialization. Return NULL if thr_getspecific() fails.
1670 1670 void* os::thread_local_storage_at(int index) {
1671 1671 // %%% this is used only in threadLocalStorage.cpp
1672 1672 void* r = NULL;
1673 1673 return thr_getspecific((thread_key_t)index, &r) != 0 ? NULL : r;
1674 1674 }
1675 1675
1676 1676
1677 1677 // gethrtime can move backwards if read from one cpu and then a different cpu
1678 1678 // getTimeNanos is guaranteed to not move backward on Solaris
1679 1679 // local spinloop created as faster for a CAS on an int than
1680 1680 // a CAS on a 64bit jlong. Also Atomic::cmpxchg for jlong is not
1681 1681 // supported on sparc v8 or pre supports_cx8 intel boxes.
1682 1682 // oldgetTimeNanos for systems which do not support CAS on 64bit jlong
1683 1683 // i.e. sparc v8 and pre supports_cx8 (i486) intel boxes
1684 1684 inline hrtime_t oldgetTimeNanos() {
1685 1685 int gotlock = LOCK_INVALID;
1686 1686 hrtime_t newtime = gethrtime();
1687 1687
1688 1688 for (;;) {
1689 1689 // grab lock for max_hrtime
1690 1690 int curlock = max_hrtime_lock;
1691 1691 if (curlock & LOCK_BUSY) continue;
1692 1692 if (gotlock = Atomic::cmpxchg(LOCK_BUSY, &max_hrtime_lock, LOCK_FREE) != LOCK_FREE) continue;
1693 1693 if (newtime > max_hrtime) {
1694 1694 max_hrtime = newtime;
1695 1695 } else {
1696 1696 newtime = max_hrtime;
1697 1697 }
1698 1698 // release lock
1699 1699 max_hrtime_lock = LOCK_FREE;
1700 1700 return newtime;
1701 1701 }
1702 1702 }
1703 1703 // gethrtime can move backwards if read from one cpu and then a different cpu
1704 1704 // getTimeNanos is guaranteed to not move backward on Solaris
1705 1705 inline hrtime_t getTimeNanos() {
1706 1706 if (VM_Version::supports_cx8()) {
1707 1707 const hrtime_t now = gethrtime();
1708 1708 // Use atomic long load since 32-bit x86 uses 2 registers to keep long.
1709 1709 const hrtime_t prev = Atomic::load((volatile jlong*)&max_hrtime);
1710 1710 if (now <= prev) return prev; // same or retrograde time;
1711 1711 const hrtime_t obsv = Atomic::cmpxchg(now, (volatile jlong*)&max_hrtime, prev);
1712 1712 assert(obsv >= prev, "invariant"); // Monotonicity
1713 1713 // If the CAS succeeded then we're done and return "now".
1714 1714 // If the CAS failed and the observed value "obs" is >= now then
1715 1715 // we should return "obs". If the CAS failed and now > obs > prv then
1716 1716 // some other thread raced this thread and installed a new value, in which case
1717 1717 // we could either (a) retry the entire operation, (b) retry trying to install now
1718 1718 // or (c) just return obs. We use (c). No loop is required although in some cases
1719 1719 // we might discard a higher "now" value in deference to a slightly lower but freshly
1720 1720 // installed obs value. That's entirely benign -- it admits no new orderings compared
1721 1721 // to (a) or (b) -- and greatly reduces coherence traffic.
1722 1722 // We might also condition (c) on the magnitude of the delta between obs and now.
1723 1723 // Avoiding excessive CAS operations to hot RW locations is critical.
1724 1724 // See http://blogs.sun.com/dave/entry/cas_and_cache_trivia_invalidate
1725 1725 return (prev == obsv) ? now : obsv ;
1726 1726 } else {
1727 1727 return oldgetTimeNanos();
1728 1728 }
1729 1729 }
1730 1730
1731 1731 // Time since start-up in seconds to a fine granularity.
1732 1732 // Used by VMSelfDestructTimer and the MemProfiler.
1733 1733 double os::elapsedTime() {
1734 1734 return (double)(getTimeNanos() - first_hrtime) / (double)hrtime_hz;
1735 1735 }
1736 1736
1737 1737 jlong os::elapsed_counter() {
1738 1738 return (jlong)(getTimeNanos() - first_hrtime);
1739 1739 }
1740 1740
1741 1741 jlong os::elapsed_frequency() {
1742 1742 return hrtime_hz;
1743 1743 }
1744 1744
1745 1745 // Return the real, user, and system times in seconds from an
1746 1746 // arbitrary fixed point in the past.
1747 1747 bool os::getTimesSecs(double* process_real_time,
1748 1748 double* process_user_time,
1749 1749 double* process_system_time) {
1750 1750 struct tms ticks;
1751 1751 clock_t real_ticks = times(&ticks);
1752 1752
1753 1753 if (real_ticks == (clock_t) (-1)) {
1754 1754 return false;
1755 1755 } else {
1756 1756 double ticks_per_second = (double) clock_tics_per_sec;
1757 1757 *process_user_time = ((double) ticks.tms_utime) / ticks_per_second;
1758 1758 *process_system_time = ((double) ticks.tms_stime) / ticks_per_second;
1759 1759 // For consistency return the real time from getTimeNanos()
1760 1760 // converted to seconds.
1761 1761 *process_real_time = ((double) getTimeNanos()) / ((double) NANOUNITS);
1762 1762
1763 1763 return true;
1764 1764 }
1765 1765 }
1766 1766
1767 1767 bool os::supports_vtime() { return true; }
1768 1768
1769 1769 bool os::enable_vtime() {
1770 1770 int fd = ::open("/proc/self/ctl", O_WRONLY);
1771 1771 if (fd == -1)
1772 1772 return false;
1773 1773
1774 1774 long cmd[] = { PCSET, PR_MSACCT };
1775 1775 int res = ::write(fd, cmd, sizeof(long) * 2);
1776 1776 ::close(fd);
1777 1777 if (res != sizeof(long) * 2)
1778 1778 return false;
1779 1779
1780 1780 return true;
1781 1781 }
1782 1782
1783 1783 bool os::vtime_enabled() {
1784 1784 int fd = ::open("/proc/self/status", O_RDONLY);
1785 1785 if (fd == -1)
1786 1786 return false;
1787 1787
1788 1788 pstatus_t status;
1789 1789 int res = os::read(fd, (void*) &status, sizeof(pstatus_t));
1790 1790 ::close(fd);
1791 1791 if (res != sizeof(pstatus_t))
1792 1792 return false;
1793 1793
1794 1794 return status.pr_flags & PR_MSACCT;
1795 1795 }
1796 1796
1797 1797 double os::elapsedVTime() {
1798 1798 return (double)gethrvtime() / (double)hrtime_hz;
1799 1799 }
1800 1800
1801 1801 // Used internally for comparisons only
1802 1802 // getTimeMillis guaranteed to not move backwards on Solaris
1803 1803 jlong getTimeMillis() {
1804 1804 jlong nanotime = getTimeNanos();
1805 1805 return (jlong)(nanotime / NANOSECS_PER_MILLISEC);
1806 1806 }
1807 1807
1808 1808 // Must return millis since Jan 1 1970 for JVM_CurrentTimeMillis
1809 1809 jlong os::javaTimeMillis() {
1810 1810 timeval t;
1811 1811 if (gettimeofday( &t, NULL) == -1)
1812 1812 fatal(err_msg("os::javaTimeMillis: gettimeofday (%s)", strerror(errno)));
1813 1813 return jlong(t.tv_sec) * 1000 + jlong(t.tv_usec) / 1000;
1814 1814 }
1815 1815
1816 1816 jlong os::javaTimeNanos() {
1817 1817 return (jlong)getTimeNanos();
1818 1818 }
1819 1819
1820 1820 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
1821 1821 info_ptr->max_value = ALL_64_BITS; // gethrtime() uses all 64 bits
1822 1822 info_ptr->may_skip_backward = false; // not subject to resetting or drifting
1823 1823 info_ptr->may_skip_forward = false; // not subject to resetting or drifting
1824 1824 info_ptr->kind = JVMTI_TIMER_ELAPSED; // elapsed not CPU time
1825 1825 }
1826 1826
1827 1827 char * os::local_time_string(char *buf, size_t buflen) {
1828 1828 struct tm t;
1829 1829 time_t long_time;
1830 1830 time(&long_time);
1831 1831 localtime_r(&long_time, &t);
1832 1832 jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
1833 1833 t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
1834 1834 t.tm_hour, t.tm_min, t.tm_sec);
1835 1835 return buf;
1836 1836 }
1837 1837
1838 1838 // Note: os::shutdown() might be called very early during initialization, or
1839 1839 // called from signal handler. Before adding something to os::shutdown(), make
1840 1840 // sure it is async-safe and can handle partially initialized VM.
1841 1841 void os::shutdown() {
1842 1842
1843 1843 // allow PerfMemory to attempt cleanup of any persistent resources
1844 1844 perfMemory_exit();
1845 1845
1846 1846 // needs to remove object in file system
1847 1847 AttachListener::abort();
1848 1848
1849 1849 // flush buffered output, finish log files
1850 1850 ostream_abort();
1851 1851
1852 1852 // Check for abort hook
1853 1853 abort_hook_t abort_hook = Arguments::abort_hook();
1854 1854 if (abort_hook != NULL) {
1855 1855 abort_hook();
1856 1856 }
1857 1857 }
1858 1858
1859 1859 // Note: os::abort() might be called very early during initialization, or
1860 1860 // called from signal handler. Before adding something to os::abort(), make
1861 1861 // sure it is async-safe and can handle partially initialized VM.
1862 1862 void os::abort(bool dump_core) {
1863 1863 os::shutdown();
1864 1864 if (dump_core) {
1865 1865 #ifndef PRODUCT
1866 1866 fdStream out(defaultStream::output_fd());
1867 1867 out.print_raw("Current thread is ");
1868 1868 char buf[16];
1869 1869 jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());
1870 1870 out.print_raw_cr(buf);
1871 1871 out.print_raw_cr("Dumping core ...");
1872 1872 #endif
1873 1873 ::abort(); // dump core (for debugging)
1874 1874 }
1875 1875
1876 1876 ::exit(1);
1877 1877 }
1878 1878
1879 1879 // Die immediately, no exit hook, no abort hook, no cleanup.
1880 1880 void os::die() {
1881 1881 _exit(-1);
1882 1882 }
1883 1883
1884 1884 // unused
1885 1885 void os::set_error_file(const char *logfile) {}
1886 1886
1887 1887 // DLL functions
1888 1888
1889 1889 const char* os::dll_file_extension() { return ".so"; }
1890 1890
1891 1891 // This must be hard coded because it's the system's temporary
1892 1892 // directory not the java application's temp directory, ala java.io.tmpdir.
1893 1893 const char* os::get_temp_directory() { return "/tmp"; }
1894 1894
1895 1895 static bool file_exists(const char* filename) {
1896 1896 struct stat statbuf;
1897 1897 if (filename == NULL || strlen(filename) == 0) {
1898 1898 return false;
1899 1899 }
1900 1900 return os::stat(filename, &statbuf) == 0;
1901 1901 }
1902 1902
1903 1903 void os::dll_build_name(char* buffer, size_t buflen,
1904 1904 const char* pname, const char* fname) {
1905 1905 const size_t pnamelen = pname ? strlen(pname) : 0;
1906 1906
1907 1907 // Quietly truncate on buffer overflow. Should be an error.
1908 1908 if (pnamelen + strlen(fname) + 10 > (size_t) buflen) {
1909 1909 *buffer = '\0';
1910 1910 return;
1911 1911 }
1912 1912
1913 1913 if (pnamelen == 0) {
1914 1914 snprintf(buffer, buflen, "lib%s.so", fname);
1915 1915 } else if (strchr(pname, *os::path_separator()) != NULL) {
1916 1916 int n;
1917 1917 char** pelements = split_path(pname, &n);
1918 1918 for (int i = 0 ; i < n ; i++) {
1919 1919 // really shouldn't be NULL but what the heck, check can't hurt
1920 1920 if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
1921 1921 continue; // skip the empty path values
1922 1922 }
1923 1923 snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname);
1924 1924 if (file_exists(buffer)) {
1925 1925 break;
1926 1926 }
1927 1927 }
1928 1928 // release the storage
1929 1929 for (int i = 0 ; i < n ; i++) {
1930 1930 if (pelements[i] != NULL) {
1931 1931 FREE_C_HEAP_ARRAY(char, pelements[i]);
1932 1932 }
1933 1933 }
1934 1934 if (pelements != NULL) {
1935 1935 FREE_C_HEAP_ARRAY(char*, pelements);
1936 1936 }
1937 1937 } else {
1938 1938 snprintf(buffer, buflen, "%s/lib%s.so", pname, fname);
1939 1939 }
1940 1940 }
1941 1941
1942 1942 const char* os::get_current_directory(char *buf, int buflen) {
1943 1943 return getcwd(buf, buflen);
1944 1944 }
1945 1945
1946 1946 // check if addr is inside libjvm[_g].so
1947 1947 bool os::address_is_in_vm(address addr) {
1948 1948 static address libjvm_base_addr;
1949 1949 Dl_info dlinfo;
1950 1950
1951 1951 if (libjvm_base_addr == NULL) {
1952 1952 dladdr(CAST_FROM_FN_PTR(void *, os::address_is_in_vm), &dlinfo);
1953 1953 libjvm_base_addr = (address)dlinfo.dli_fbase;
1954 1954 assert(libjvm_base_addr !=NULL, "Cannot obtain base address for libjvm");
1955 1955 }
1956 1956
1957 1957 if (dladdr((void *)addr, &dlinfo)) {
1958 1958 if (libjvm_base_addr == (address)dlinfo.dli_fbase) return true;
1959 1959 }
1960 1960
1961 1961 return false;
1962 1962 }
1963 1963
1964 1964 typedef int (*dladdr1_func_type) (void *, Dl_info *, void **, int);
1965 1965 static dladdr1_func_type dladdr1_func = NULL;
1966 1966
1967 1967 bool os::dll_address_to_function_name(address addr, char *buf,
1968 1968 int buflen, int * offset) {
1969 1969 Dl_info dlinfo;
1970 1970
1971 1971 // dladdr1_func was initialized in os::init()
1972 1972 if (dladdr1_func){
1973 1973 // yes, we have dladdr1
1974 1974
1975 1975 // Support for dladdr1 is checked at runtime; it may be
1976 1976 // available even if the vm is built on a machine that does
1977 1977 // not have dladdr1 support. Make sure there is a value for
1978 1978 // RTLD_DL_SYMENT.
1979 1979 #ifndef RTLD_DL_SYMENT
1980 1980 #define RTLD_DL_SYMENT 1
1981 1981 #endif
1982 1982 #ifdef _LP64
1983 1983 Elf64_Sym * info;
1984 1984 #else
1985 1985 Elf32_Sym * info;
1986 1986 #endif
1987 1987 if (dladdr1_func((void *)addr, &dlinfo, (void **)&info,
1988 1988 RTLD_DL_SYMENT)) {
1989 1989 if ((char *)dlinfo.dli_saddr + info->st_size > (char *)addr) {
1990 1990 if (buf != NULL) {
1991 1991 if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen))
1992 1992 jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname);
1993 1993 }
1994 1994 if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
1995 1995 return true;
1996 1996 }
1997 1997 }
1998 1998 if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != 0) {
1999 1999 if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
2000 2000 dlinfo.dli_fname, buf, buflen, offset) == Decoder::no_error) {
2001 2001 return true;
2002 2002 }
2003 2003 }
2004 2004 if (buf != NULL) buf[0] = '\0';
2005 2005 if (offset != NULL) *offset = -1;
2006 2006 return false;
2007 2007 } else {
2008 2008 // no, only dladdr is available
2009 2009 if (dladdr((void *)addr, &dlinfo)) {
2010 2010 if (buf != NULL) {
2011 2011 if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen))
2012 2012 jio_snprintf(buf, buflen, dlinfo.dli_sname);
2013 2013 }
2014 2014 if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
2015 2015 return true;
2016 2016 } else if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != 0) {
2017 2017 if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
2018 2018 dlinfo.dli_fname, buf, buflen, offset) == Decoder::no_error) {
2019 2019 return true;
2020 2020 }
2021 2021 }
2022 2022 if (buf != NULL) buf[0] = '\0';
2023 2023 if (offset != NULL) *offset = -1;
2024 2024 return false;
2025 2025 }
2026 2026 }
2027 2027
2028 2028 bool os::dll_address_to_library_name(address addr, char* buf,
2029 2029 int buflen, int* offset) {
2030 2030 Dl_info dlinfo;
2031 2031
2032 2032 if (dladdr((void*)addr, &dlinfo)){
2033 2033 if (buf) jio_snprintf(buf, buflen, "%s", dlinfo.dli_fname);
2034 2034 if (offset) *offset = addr - (address)dlinfo.dli_fbase;
2035 2035 return true;
2036 2036 } else {
2037 2037 if (buf) buf[0] = '\0';
2038 2038 if (offset) *offset = -1;
2039 2039 return false;
2040 2040 }
2041 2041 }
2042 2042
2043 2043 // Prints the names and full paths of all opened dynamic libraries
2044 2044 // for current process
2045 2045 void os::print_dll_info(outputStream * st) {
2046 2046 Dl_info dli;
2047 2047 void *handle;
2048 2048 Link_map *map;
2049 2049 Link_map *p;
2050 2050
2051 2051 st->print_cr("Dynamic libraries:"); st->flush();
2052 2052
2053 2053 if (!dladdr(CAST_FROM_FN_PTR(void *, os::print_dll_info), &dli)) {
2054 2054 st->print_cr("Error: Cannot print dynamic libraries.");
2055 2055 return;
2056 2056 }
2057 2057 handle = dlopen(dli.dli_fname, RTLD_LAZY);
2058 2058 if (handle == NULL) {
2059 2059 st->print_cr("Error: Cannot print dynamic libraries.");
2060 2060 return;
2061 2061 }
2062 2062 dlinfo(handle, RTLD_DI_LINKMAP, &map);
2063 2063 if (map == NULL) {
2064 2064 st->print_cr("Error: Cannot print dynamic libraries.");
2065 2065 return;
2066 2066 }
2067 2067
2068 2068 while (map->l_prev != NULL)
2069 2069 map = map->l_prev;
2070 2070
2071 2071 while (map != NULL) {
2072 2072 st->print_cr(PTR_FORMAT " \t%s", map->l_addr, map->l_name);
2073 2073 map = map->l_next;
2074 2074 }
2075 2075
2076 2076 dlclose(handle);
2077 2077 }
2078 2078
2079 2079 // Loads .dll/.so and
2080 2080 // in case of error it checks if .dll/.so was built for the
2081 2081 // same architecture as Hotspot is running on
2082 2082
2083 2083 void * os::dll_load(const char *filename, char *ebuf, int ebuflen)
2084 2084 {
2085 2085 void * result= ::dlopen(filename, RTLD_LAZY);
2086 2086 if (result != NULL) {
2087 2087 // Successful loading
2088 2088 return result;
2089 2089 }
2090 2090
2091 2091 Elf32_Ehdr elf_head;
2092 2092
2093 2093 // Read system error message into ebuf
2094 2094 // It may or may not be overwritten below
2095 2095 ::strncpy(ebuf, ::dlerror(), ebuflen-1);
2096 2096 ebuf[ebuflen-1]='\0';
2097 2097 int diag_msg_max_length=ebuflen-strlen(ebuf);
2098 2098 char* diag_msg_buf=ebuf+strlen(ebuf);
2099 2099
2100 2100 if (diag_msg_max_length==0) {
2101 2101 // No more space in ebuf for additional diagnostics message
2102 2102 return NULL;
2103 2103 }
2104 2104
2105 2105
2106 2106 int file_descriptor= ::open(filename, O_RDONLY | O_NONBLOCK);
2107 2107
2108 2108 if (file_descriptor < 0) {
2109 2109 // Can't open library, report dlerror() message
2110 2110 return NULL;
2111 2111 }
2112 2112
2113 2113 bool failed_to_read_elf_head=
2114 2114 (sizeof(elf_head)!=
2115 2115 (::read(file_descriptor, &elf_head,sizeof(elf_head)))) ;
2116 2116
2117 2117 ::close(file_descriptor);
2118 2118 if (failed_to_read_elf_head) {
2119 2119 // file i/o error - report dlerror() msg
2120 2120 return NULL;
2121 2121 }
2122 2122
2123 2123 typedef struct {
2124 2124 Elf32_Half code; // Actual value as defined in elf.h
2125 2125 Elf32_Half compat_class; // Compatibility of archs at VM's sense
2126 2126 char elf_class; // 32 or 64 bit
2127 2127 char endianess; // MSB or LSB
2128 2128 char* name; // String representation
2129 2129 } arch_t;
2130 2130
2131 2131 static const arch_t arch_array[]={
2132 2132 {EM_386, EM_386, ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
2133 2133 {EM_486, EM_386, ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
2134 2134 {EM_IA_64, EM_IA_64, ELFCLASS64, ELFDATA2LSB, (char*)"IA 64"},
2135 2135 {EM_X86_64, EM_X86_64, ELFCLASS64, ELFDATA2LSB, (char*)"AMD 64"},
2136 2136 {EM_SPARC, EM_SPARC, ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
2137 2137 {EM_SPARC32PLUS, EM_SPARC, ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
2138 2138 {EM_SPARCV9, EM_SPARCV9, ELFCLASS64, ELFDATA2MSB, (char*)"Sparc v9 64"},
2139 2139 {EM_PPC, EM_PPC, ELFCLASS32, ELFDATA2MSB, (char*)"Power PC 32"},
2140 2140 {EM_PPC64, EM_PPC64, ELFCLASS64, ELFDATA2MSB, (char*)"Power PC 64"},
2141 2141 {EM_ARM, EM_ARM, ELFCLASS32, ELFDATA2LSB, (char*)"ARM 32"}
2142 2142 };
2143 2143
2144 2144 #if (defined IA32)
2145 2145 static Elf32_Half running_arch_code=EM_386;
2146 2146 #elif (defined AMD64)
2147 2147 static Elf32_Half running_arch_code=EM_X86_64;
2148 2148 #elif (defined IA64)
2149 2149 static Elf32_Half running_arch_code=EM_IA_64;
2150 2150 #elif (defined __sparc) && (defined _LP64)
2151 2151 static Elf32_Half running_arch_code=EM_SPARCV9;
2152 2152 #elif (defined __sparc) && (!defined _LP64)
2153 2153 static Elf32_Half running_arch_code=EM_SPARC;
2154 2154 #elif (defined __powerpc64__)
2155 2155 static Elf32_Half running_arch_code=EM_PPC64;
2156 2156 #elif (defined __powerpc__)
2157 2157 static Elf32_Half running_arch_code=EM_PPC;
2158 2158 #elif (defined ARM)
2159 2159 static Elf32_Half running_arch_code=EM_ARM;
2160 2160 #else
2161 2161 #error Method os::dll_load requires that one of following is defined:\
2162 2162 IA32, AMD64, IA64, __sparc, __powerpc__, ARM, ARM
2163 2163 #endif
2164 2164
2165 2165 // Identify compatability class for VM's architecture and library's architecture
2166 2166 // Obtain string descriptions for architectures
2167 2167
2168 2168 arch_t lib_arch={elf_head.e_machine,0,elf_head.e_ident[EI_CLASS], elf_head.e_ident[EI_DATA], NULL};
2169 2169 int running_arch_index=-1;
2170 2170
2171 2171 for (unsigned int i=0 ; i < ARRAY_SIZE(arch_array) ; i++ ) {
2172 2172 if (running_arch_code == arch_array[i].code) {
2173 2173 running_arch_index = i;
2174 2174 }
2175 2175 if (lib_arch.code == arch_array[i].code) {
2176 2176 lib_arch.compat_class = arch_array[i].compat_class;
2177 2177 lib_arch.name = arch_array[i].name;
2178 2178 }
2179 2179 }
2180 2180
2181 2181 assert(running_arch_index != -1,
2182 2182 "Didn't find running architecture code (running_arch_code) in arch_array");
2183 2183 if (running_arch_index == -1) {
2184 2184 // Even though running architecture detection failed
2185 2185 // we may still continue with reporting dlerror() message
2186 2186 return NULL;
2187 2187 }
2188 2188
2189 2189 if (lib_arch.endianess != arch_array[running_arch_index].endianess) {
2190 2190 ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: endianness mismatch)");
2191 2191 return NULL;
2192 2192 }
2193 2193
2194 2194 if (lib_arch.elf_class != arch_array[running_arch_index].elf_class) {
2195 2195 ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: architecture word width mismatch)");
2196 2196 return NULL;
2197 2197 }
2198 2198
2199 2199 if (lib_arch.compat_class != arch_array[running_arch_index].compat_class) {
2200 2200 if ( lib_arch.name!=NULL ) {
2201 2201 ::snprintf(diag_msg_buf, diag_msg_max_length-1,
2202 2202 " (Possible cause: can't load %s-bit .so on a %s-bit platform)",
2203 2203 lib_arch.name, arch_array[running_arch_index].name);
2204 2204 } else {
2205 2205 ::snprintf(diag_msg_buf, diag_msg_max_length-1,
2206 2206 " (Possible cause: can't load this .so (machine code=0x%x) on a %s-bit platform)",
2207 2207 lib_arch.code,
2208 2208 arch_array[running_arch_index].name);
2209 2209 }
2210 2210 }
2211 2211
2212 2212 return NULL;
2213 2213 }
2214 2214
2215 2215 void* os::dll_lookup(void* handle, const char* name) {
2216 2216 return dlsym(handle, name);
2217 2217 }
2218 2218
2219 2219 int os::stat(const char *path, struct stat *sbuf) {
2220 2220 char pathbuf[MAX_PATH];
2221 2221 if (strlen(path) > MAX_PATH - 1) {
2222 2222 errno = ENAMETOOLONG;
2223 2223 return -1;
2224 2224 }
2225 2225 os::native_path(strcpy(pathbuf, path));
2226 2226 return ::stat(pathbuf, sbuf);
2227 2227 }
2228 2228
2229 2229 static bool _print_ascii_file(const char* filename, outputStream* st) {
2230 2230 int fd = ::open(filename, O_RDONLY);
2231 2231 if (fd == -1) {
2232 2232 return false;
2233 2233 }
2234 2234
2235 2235 char buf[32];
2236 2236 int bytes;
2237 2237 while ((bytes = ::read(fd, buf, sizeof(buf))) > 0) {
2238 2238 st->print_raw(buf, bytes);
2239 2239 }
2240 2240
2241 2241 ::close(fd);
2242 2242
2243 2243 return true;
2244 2244 }
2245 2245
2246 2246 void os::print_os_info(outputStream* st) {
2247 2247 st->print("OS:");
2248 2248
2249 2249 if (!_print_ascii_file("/etc/release", st)) {
2250 2250 st->print("Solaris");
2251 2251 }
2252 2252 st->cr();
2253 2253
2254 2254 // kernel
2255 2255 st->print("uname:");
2256 2256 struct utsname name;
2257 2257 uname(&name);
2258 2258 st->print(name.sysname); st->print(" ");
2259 2259 st->print(name.release); st->print(" ");
2260 2260 st->print(name.version); st->print(" ");
2261 2261 st->print(name.machine);
2262 2262
2263 2263 // libthread
2264 2264 if (os::Solaris::T2_libthread()) st->print(" (T2 libthread)");
2265 2265 else st->print(" (T1 libthread)");
2266 2266 st->cr();
2267 2267
2268 2268 // rlimit
2269 2269 st->print("rlimit:");
2270 2270 struct rlimit rlim;
2271 2271
2272 2272 st->print(" STACK ");
2273 2273 getrlimit(RLIMIT_STACK, &rlim);
2274 2274 if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
2275 2275 else st->print("%uk", rlim.rlim_cur >> 10);
2276 2276
2277 2277 st->print(", CORE ");
2278 2278 getrlimit(RLIMIT_CORE, &rlim);
2279 2279 if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
2280 2280 else st->print("%uk", rlim.rlim_cur >> 10);
2281 2281
2282 2282 st->print(", NOFILE ");
2283 2283 getrlimit(RLIMIT_NOFILE, &rlim);
2284 2284 if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
2285 2285 else st->print("%d", rlim.rlim_cur);
2286 2286
2287 2287 st->print(", AS ");
2288 2288 getrlimit(RLIMIT_AS, &rlim);
2289 2289 if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
2290 2290 else st->print("%uk", rlim.rlim_cur >> 10);
2291 2291 st->cr();
2292 2292
2293 2293 // load average
2294 2294 st->print("load average:");
2295 2295 double loadavg[3];
2296 2296 os::loadavg(loadavg, 3);
2297 2297 st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]);
2298 2298 st->cr();
2299 2299 }
2300 2300
2301 2301
2302 2302 static bool check_addr0(outputStream* st) {
2303 2303 jboolean status = false;
2304 2304 int fd = ::open("/proc/self/map",O_RDONLY);
2305 2305 if (fd >= 0) {
2306 2306 prmap_t p;
2307 2307 while(::read(fd, &p, sizeof(p)) > 0) {
2308 2308 if (p.pr_vaddr == 0x0) {
2309 2309 st->print("Warning: Address: 0x%x, Size: %dK, ",p.pr_vaddr, p.pr_size/1024, p.pr_mapname);
2310 2310 st->print("Mapped file: %s, ", p.pr_mapname[0] == '\0' ? "None" : p.pr_mapname);
2311 2311 st->print("Access:");
2312 2312 st->print("%s",(p.pr_mflags & MA_READ) ? "r" : "-");
2313 2313 st->print("%s",(p.pr_mflags & MA_WRITE) ? "w" : "-");
2314 2314 st->print("%s",(p.pr_mflags & MA_EXEC) ? "x" : "-");
2315 2315 st->cr();
2316 2316 status = true;
2317 2317 }
2318 2318 ::close(fd);
2319 2319 }
2320 2320 }
2321 2321 return status;
2322 2322 }
2323 2323
2324 2324 void os::pd_print_cpu_info(outputStream* st) {
2325 2325 // Nothing to do for now.
2326 2326 }
2327 2327
2328 2328 void os::print_memory_info(outputStream* st) {
2329 2329 st->print("Memory:");
2330 2330 st->print(" %dk page", os::vm_page_size()>>10);
2331 2331 st->print(", physical " UINT64_FORMAT "k", os::physical_memory()>>10);
2332 2332 st->print("(" UINT64_FORMAT "k free)", os::available_memory() >> 10);
2333 2333 st->cr();
2334 2334 (void) check_addr0(st);
2335 2335 }
2336 2336
2337 2337 // Taken from /usr/include/sys/machsig.h Supposed to be architecture specific
2338 2338 // but they're the same for all the solaris architectures that we support.
2339 2339 const char *ill_names[] = { "ILL0", "ILL_ILLOPC", "ILL_ILLOPN", "ILL_ILLADR",
2340 2340 "ILL_ILLTRP", "ILL_PRVOPC", "ILL_PRVREG",
2341 2341 "ILL_COPROC", "ILL_BADSTK" };
2342 2342
2343 2343 const char *fpe_names[] = { "FPE0", "FPE_INTDIV", "FPE_INTOVF", "FPE_FLTDIV",
2344 2344 "FPE_FLTOVF", "FPE_FLTUND", "FPE_FLTRES",
2345 2345 "FPE_FLTINV", "FPE_FLTSUB" };
2346 2346
2347 2347 const char *segv_names[] = { "SEGV0", "SEGV_MAPERR", "SEGV_ACCERR" };
2348 2348
2349 2349 const char *bus_names[] = { "BUS0", "BUS_ADRALN", "BUS_ADRERR", "BUS_OBJERR" };
2350 2350
2351 2351 void os::print_siginfo(outputStream* st, void* siginfo) {
2352 2352 st->print("siginfo:");
2353 2353
2354 2354 const int buflen = 100;
2355 2355 char buf[buflen];
2356 2356 siginfo_t *si = (siginfo_t*)siginfo;
2357 2357 st->print("si_signo=%s: ", os::exception_name(si->si_signo, buf, buflen));
2358 2358 char *err = strerror(si->si_errno);
2359 2359 if (si->si_errno != 0 && err != NULL) {
2360 2360 st->print("si_errno=%s", err);
2361 2361 } else {
2362 2362 st->print("si_errno=%d", si->si_errno);
2363 2363 }
2364 2364 const int c = si->si_code;
2365 2365 assert(c > 0, "unexpected si_code");
2366 2366 switch (si->si_signo) {
2367 2367 case SIGILL:
2368 2368 st->print(", si_code=%d (%s)", c, c > 8 ? "" : ill_names[c]);
2369 2369 st->print(", si_addr=" PTR_FORMAT, si->si_addr);
2370 2370 break;
2371 2371 case SIGFPE:
2372 2372 st->print(", si_code=%d (%s)", c, c > 9 ? "" : fpe_names[c]);
2373 2373 st->print(", si_addr=" PTR_FORMAT, si->si_addr);
2374 2374 break;
2375 2375 case SIGSEGV:
2376 2376 st->print(", si_code=%d (%s)", c, c > 2 ? "" : segv_names[c]);
2377 2377 st->print(", si_addr=" PTR_FORMAT, si->si_addr);
2378 2378 break;
2379 2379 case SIGBUS:
2380 2380 st->print(", si_code=%d (%s)", c, c > 3 ? "" : bus_names[c]);
2381 2381 st->print(", si_addr=" PTR_FORMAT, si->si_addr);
2382 2382 break;
2383 2383 default:
2384 2384 st->print(", si_code=%d", si->si_code);
2385 2385 // no si_addr
2386 2386 }
2387 2387
2388 2388 if ((si->si_signo == SIGBUS || si->si_signo == SIGSEGV) &&
2389 2389 UseSharedSpaces) {
2390 2390 FileMapInfo* mapinfo = FileMapInfo::current_info();
2391 2391 if (mapinfo->is_in_shared_space(si->si_addr)) {
2392 2392 st->print("\n\nError accessing class data sharing archive." \
2393 2393 " Mapped file inaccessible during execution, " \
2394 2394 " possible disk/network problem.");
2395 2395 }
2396 2396 }
2397 2397 st->cr();
2398 2398 }
2399 2399
2400 2400 // Moved from whole group, because we need them here for diagnostic
2401 2401 // prints.
2402 2402 #define OLDMAXSIGNUM 32
2403 2403 static int Maxsignum = 0;
2404 2404 static int *ourSigFlags = NULL;
2405 2405
2406 2406 extern "C" void sigINTRHandler(int, siginfo_t*, void*);
2407 2407
2408 2408 int os::Solaris::get_our_sigflags(int sig) {
2409 2409 assert(ourSigFlags!=NULL, "signal data structure not initialized");
2410 2410 assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range");
2411 2411 return ourSigFlags[sig];
2412 2412 }
2413 2413
2414 2414 void os::Solaris::set_our_sigflags(int sig, int flags) {
2415 2415 assert(ourSigFlags!=NULL, "signal data structure not initialized");
2416 2416 assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range");
2417 2417 ourSigFlags[sig] = flags;
2418 2418 }
2419 2419
2420 2420
2421 2421 static const char* get_signal_handler_name(address handler,
2422 2422 char* buf, int buflen) {
2423 2423 int offset;
2424 2424 bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset);
2425 2425 if (found) {
2426 2426 // skip directory names
2427 2427 const char *p1, *p2;
2428 2428 p1 = buf;
2429 2429 size_t len = strlen(os::file_separator());
2430 2430 while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len;
2431 2431 jio_snprintf(buf, buflen, "%s+0x%x", p1, offset);
2432 2432 } else {
2433 2433 jio_snprintf(buf, buflen, PTR_FORMAT, handler);
2434 2434 }
2435 2435 return buf;
2436 2436 }
2437 2437
2438 2438 static void print_signal_handler(outputStream* st, int sig,
2439 2439 char* buf, size_t buflen) {
2440 2440 struct sigaction sa;
2441 2441
2442 2442 sigaction(sig, NULL, &sa);
2443 2443
2444 2444 st->print("%s: ", os::exception_name(sig, buf, buflen));
2445 2445
2446 2446 address handler = (sa.sa_flags & SA_SIGINFO)
2447 2447 ? CAST_FROM_FN_PTR(address, sa.sa_sigaction)
2448 2448 : CAST_FROM_FN_PTR(address, sa.sa_handler);
2449 2449
2450 2450 if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) {
2451 2451 st->print("SIG_DFL");
2452 2452 } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) {
2453 2453 st->print("SIG_IGN");
2454 2454 } else {
2455 2455 st->print("[%s]", get_signal_handler_name(handler, buf, buflen));
2456 2456 }
2457 2457
2458 2458 st->print(", sa_mask[0]=" PTR32_FORMAT, *(uint32_t*)&sa.sa_mask);
2459 2459
2460 2460 address rh = VMError::get_resetted_sighandler(sig);
2461 2461 // May be, handler was resetted by VMError?
2462 2462 if(rh != NULL) {
2463 2463 handler = rh;
2464 2464 sa.sa_flags = VMError::get_resetted_sigflags(sig);
2465 2465 }
2466 2466
2467 2467 st->print(", sa_flags=" PTR32_FORMAT, sa.sa_flags);
2468 2468
2469 2469 // Check: is it our handler?
2470 2470 if(handler == CAST_FROM_FN_PTR(address, signalHandler) ||
2471 2471 handler == CAST_FROM_FN_PTR(address, sigINTRHandler)) {
2472 2472 // It is our signal handler
2473 2473 // check for flags
2474 2474 if(sa.sa_flags != os::Solaris::get_our_sigflags(sig)) {
2475 2475 st->print(
2476 2476 ", flags was changed from " PTR32_FORMAT ", consider using jsig library",
2477 2477 os::Solaris::get_our_sigflags(sig));
2478 2478 }
2479 2479 }
2480 2480 st->cr();
2481 2481 }
2482 2482
2483 2483 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
2484 2484 st->print_cr("Signal Handlers:");
2485 2485 print_signal_handler(st, SIGSEGV, buf, buflen);
2486 2486 print_signal_handler(st, SIGBUS , buf, buflen);
2487 2487 print_signal_handler(st, SIGFPE , buf, buflen);
2488 2488 print_signal_handler(st, SIGPIPE, buf, buflen);
2489 2489 print_signal_handler(st, SIGXFSZ, buf, buflen);
2490 2490 print_signal_handler(st, SIGILL , buf, buflen);
2491 2491 print_signal_handler(st, INTERRUPT_SIGNAL, buf, buflen);
2492 2492 print_signal_handler(st, ASYNC_SIGNAL, buf, buflen);
2493 2493 print_signal_handler(st, BREAK_SIGNAL, buf, buflen);
2494 2494 print_signal_handler(st, SHUTDOWN1_SIGNAL , buf, buflen);
2495 2495 print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
2496 2496 print_signal_handler(st, SHUTDOWN3_SIGNAL, buf, buflen);
2497 2497 print_signal_handler(st, os::Solaris::SIGinterrupt(), buf, buflen);
2498 2498 print_signal_handler(st, os::Solaris::SIGasync(), buf, buflen);
2499 2499 }
2500 2500
2501 2501 static char saved_jvm_path[MAXPATHLEN] = { 0 };
2502 2502
2503 2503 // Find the full path to the current module, libjvm.so or libjvm_g.so
2504 2504 void os::jvm_path(char *buf, jint buflen) {
2505 2505 // Error checking.
2506 2506 if (buflen < MAXPATHLEN) {
2507 2507 assert(false, "must use a large-enough buffer");
2508 2508 buf[0] = '\0';
2509 2509 return;
2510 2510 }
2511 2511 // Lazy resolve the path to current module.
2512 2512 if (saved_jvm_path[0] != 0) {
2513 2513 strcpy(buf, saved_jvm_path);
2514 2514 return;
2515 2515 }
2516 2516
2517 2517 Dl_info dlinfo;
2518 2518 int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
2519 2519 assert(ret != 0, "cannot locate libjvm");
2520 2520 realpath((char *)dlinfo.dli_fname, buf);
2521 2521
2522 2522 if (Arguments::created_by_gamma_launcher()) {
2523 2523 // Support for the gamma launcher. Typical value for buf is
2524 2524 // "<JAVA_HOME>/jre/lib/<arch>/<vmtype>/libjvm.so". If "/jre/lib/" appears at
2525 2525 // the right place in the string, then assume we are installed in a JDK and
2526 2526 // we're done. Otherwise, check for a JAVA_HOME environment variable and fix
2527 2527 // up the path so it looks like libjvm.so is installed there (append a
2528 2528 // fake suffix hotspot/libjvm.so).
2529 2529 const char *p = buf + strlen(buf) - 1;
2530 2530 for (int count = 0; p > buf && count < 5; ++count) {
2531 2531 for (--p; p > buf && *p != '/'; --p)
2532 2532 /* empty */ ;
2533 2533 }
2534 2534
2535 2535 if (strncmp(p, "/jre/lib/", 9) != 0) {
2536 2536 // Look for JAVA_HOME in the environment.
2537 2537 char* java_home_var = ::getenv("JAVA_HOME");
2538 2538 if (java_home_var != NULL && java_home_var[0] != 0) {
2539 2539 char cpu_arch[12];
2540 2540 char* jrelib_p;
2541 2541 int len;
2542 2542 sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch));
2543 2543 #ifdef _LP64
2544 2544 // If we are on sparc running a 64-bit vm, look in jre/lib/sparcv9.
2545 2545 if (strcmp(cpu_arch, "sparc") == 0) {
2546 2546 strcat(cpu_arch, "v9");
2547 2547 } else if (strcmp(cpu_arch, "i386") == 0) {
2548 2548 strcpy(cpu_arch, "amd64");
2549 2549 }
2550 2550 #endif
2551 2551 // Check the current module name "libjvm.so" or "libjvm_g.so".
2552 2552 p = strrchr(buf, '/');
2553 2553 assert(strstr(p, "/libjvm") == p, "invalid library name");
2554 2554 p = strstr(p, "_g") ? "_g" : "";
2555 2555
2556 2556 realpath(java_home_var, buf);
2557 2557 // determine if this is a legacy image or modules image
2558 2558 // modules image doesn't have "jre" subdirectory
2559 2559 len = strlen(buf);
2560 2560 jrelib_p = buf + len;
2561 2561 snprintf(jrelib_p, buflen-len, "/jre/lib/%s", cpu_arch);
2562 2562 if (0 != access(buf, F_OK)) {
2563 2563 snprintf(jrelib_p, buflen-len, "/lib/%s", cpu_arch);
2564 2564 }
2565 2565
2566 2566 if (0 == access(buf, F_OK)) {
2567 2567 // Use current module name "libjvm[_g].so" instead of
2568 2568 // "libjvm"debug_only("_g")".so" since for fastdebug version
2569 2569 // we should have "libjvm.so" but debug_only("_g") adds "_g"!
2570 2570 len = strlen(buf);
2571 2571 snprintf(buf + len, buflen-len, "/hotspot/libjvm%s.so", p);
2572 2572 } else {
2573 2573 // Go back to path of .so
2574 2574 realpath((char *)dlinfo.dli_fname, buf);
2575 2575 }
2576 2576 }
2577 2577 }
2578 2578 }
2579 2579
2580 2580 strcpy(saved_jvm_path, buf);
2581 2581 }
2582 2582
2583 2583
2584 2584 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
2585 2585 // no prefix required, not even "_"
2586 2586 }
2587 2587
2588 2588
2589 2589 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
2590 2590 // no suffix required
2591 2591 }
2592 2592
2593 2593 // This method is a copy of JDK's sysGetLastErrorString
2594 2594 // from src/solaris/hpi/src/system_md.c
2595 2595
2596 2596 size_t os::lasterror(char *buf, size_t len) {
2597 2597
2598 2598 if (errno == 0) return 0;
2599 2599
2600 2600 const char *s = ::strerror(errno);
2601 2601 size_t n = ::strlen(s);
2602 2602 if (n >= len) {
2603 2603 n = len - 1;
2604 2604 }
2605 2605 ::strncpy(buf, s, n);
2606 2606 buf[n] = '\0';
2607 2607 return n;
2608 2608 }
2609 2609
2610 2610
2611 2611 // sun.misc.Signal
2612 2612
2613 2613 extern "C" {
2614 2614 static void UserHandler(int sig, void *siginfo, void *context) {
2615 2615 // Ctrl-C is pressed during error reporting, likely because the error
2616 2616 // handler fails to abort. Let VM die immediately.
2617 2617 if (sig == SIGINT && is_error_reported()) {
2618 2618 os::die();
2619 2619 }
2620 2620
2621 2621 os::signal_notify(sig);
2622 2622 // We do not need to reinstate the signal handler each time...
2623 2623 }
2624 2624 }
2625 2625
2626 2626 void* os::user_handler() {
2627 2627 return CAST_FROM_FN_PTR(void*, UserHandler);
2628 2628 }
2629 2629
2630 2630 extern "C" {
2631 2631 typedef void (*sa_handler_t)(int);
2632 2632 typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
2633 2633 }
2634 2634
2635 2635 void* os::signal(int signal_number, void* handler) {
2636 2636 struct sigaction sigAct, oldSigAct;
2637 2637 sigfillset(&(sigAct.sa_mask));
2638 2638 sigAct.sa_flags = SA_RESTART & ~SA_RESETHAND;
2639 2639 sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler);
2640 2640
2641 2641 if (sigaction(signal_number, &sigAct, &oldSigAct))
2642 2642 // -1 means registration failed
2643 2643 return (void *)-1;
2644 2644
2645 2645 return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler);
2646 2646 }
2647 2647
2648 2648 void os::signal_raise(int signal_number) {
2649 2649 raise(signal_number);
2650 2650 }
2651 2651
2652 2652 /*
2653 2653 * The following code is moved from os.cpp for making this
2654 2654 * code platform specific, which it is by its very nature.
2655 2655 */
2656 2656
2657 2657 // a counter for each possible signal value
2658 2658 static int Sigexit = 0;
2659 2659 static int Maxlibjsigsigs;
2660 2660 static jint *pending_signals = NULL;
2661 2661 static int *preinstalled_sigs = NULL;
2662 2662 static struct sigaction *chainedsigactions = NULL;
2663 2663 static sema_t sig_sem;
2664 2664 typedef int (*version_getting_t)();
2665 2665 version_getting_t os::Solaris::get_libjsig_version = NULL;
2666 2666 static int libjsigversion = NULL;
2667 2667
2668 2668 int os::sigexitnum_pd() {
2669 2669 assert(Sigexit > 0, "signal memory not yet initialized");
2670 2670 return Sigexit;
2671 2671 }
2672 2672
2673 2673 void os::Solaris::init_signal_mem() {
2674 2674 // Initialize signal structures
2675 2675 Maxsignum = SIGRTMAX;
2676 2676 Sigexit = Maxsignum+1;
2677 2677 assert(Maxsignum >0, "Unable to obtain max signal number");
2678 2678
2679 2679 Maxlibjsigsigs = Maxsignum;
2680 2680
2681 2681 // pending_signals has one int per signal
2682 2682 // The additional signal is for SIGEXIT - exit signal to signal_thread
2683 2683 pending_signals = (jint *)os::malloc(sizeof(jint) * (Sigexit+1));
2684 2684 memset(pending_signals, 0, (sizeof(jint) * (Sigexit+1)));
2685 2685
2686 2686 if (UseSignalChaining) {
2687 2687 chainedsigactions = (struct sigaction *)malloc(sizeof(struct sigaction)
2688 2688 * (Maxsignum + 1));
2689 2689 memset(chainedsigactions, 0, (sizeof(struct sigaction) * (Maxsignum + 1)));
2690 2690 preinstalled_sigs = (int *)os::malloc(sizeof(int) * (Maxsignum + 1));
2691 2691 memset(preinstalled_sigs, 0, (sizeof(int) * (Maxsignum + 1)));
2692 2692 }
2693 2693 ourSigFlags = (int*)malloc(sizeof(int) * (Maxsignum + 1 ));
2694 2694 memset(ourSigFlags, 0, sizeof(int) * (Maxsignum + 1));
2695 2695 }
2696 2696
2697 2697 void os::signal_init_pd() {
2698 2698 int ret;
2699 2699
2700 2700 ret = ::sema_init(&sig_sem, 0, NULL, NULL);
2701 2701 assert(ret == 0, "sema_init() failed");
2702 2702 }
2703 2703
2704 2704 void os::signal_notify(int signal_number) {
2705 2705 int ret;
2706 2706
2707 2707 Atomic::inc(&pending_signals[signal_number]);
2708 2708 ret = ::sema_post(&sig_sem);
2709 2709 assert(ret == 0, "sema_post() failed");
2710 2710 }
2711 2711
2712 2712 static int check_pending_signals(bool wait_for_signal) {
2713 2713 int ret;
2714 2714 while (true) {
2715 2715 for (int i = 0; i < Sigexit + 1; i++) {
2716 2716 jint n = pending_signals[i];
2717 2717 if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
2718 2718 return i;
2719 2719 }
2720 2720 }
2721 2721 if (!wait_for_signal) {
2722 2722 return -1;
2723 2723 }
2724 2724 JavaThread *thread = JavaThread::current();
2725 2725 ThreadBlockInVM tbivm(thread);
2726 2726
2727 2727 bool threadIsSuspended;
2728 2728 do {
2729 2729 thread->set_suspend_equivalent();
2730 2730 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
2731 2731 while((ret = ::sema_wait(&sig_sem)) == EINTR)
2732 2732 ;
2733 2733 assert(ret == 0, "sema_wait() failed");
2734 2734
2735 2735 // were we externally suspended while we were waiting?
2736 2736 threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
2737 2737 if (threadIsSuspended) {
2738 2738 //
2739 2739 // The semaphore has been incremented, but while we were waiting
2740 2740 // another thread suspended us. We don't want to continue running
2741 2741 // while suspended because that would surprise the thread that
2742 2742 // suspended us.
2743 2743 //
2744 2744 ret = ::sema_post(&sig_sem);
2745 2745 assert(ret == 0, "sema_post() failed");
2746 2746
2747 2747 thread->java_suspend_self();
2748 2748 }
2749 2749 } while (threadIsSuspended);
2750 2750 }
2751 2751 }
2752 2752
2753 2753 int os::signal_lookup() {
2754 2754 return check_pending_signals(false);
2755 2755 }
2756 2756
2757 2757 int os::signal_wait() {
2758 2758 return check_pending_signals(true);
2759 2759 }
2760 2760
2761 2761 ////////////////////////////////////////////////////////////////////////////////
2762 2762 // Virtual Memory
2763 2763
2764 2764 static int page_size = -1;
2765 2765
2766 2766 // The mmap MAP_ALIGN flag is supported on Solaris 9 and later. init_2() will
2767 2767 // clear this var if support is not available.
2768 2768 static bool has_map_align = true;
2769 2769
2770 2770 int os::vm_page_size() {
2771 2771 assert(page_size != -1, "must call os::init");
2772 2772 return page_size;
2773 2773 }
2774 2774
2775 2775 // Solaris allocates memory by pages.
2776 2776 int os::vm_allocation_granularity() {
2777 2777 assert(page_size != -1, "must call os::init");
2778 2778 return page_size;
2779 2779 }
2780 2780
2781 2781 bool os::commit_memory(char* addr, size_t bytes, bool exec) {
2782 2782 int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
2783 2783 size_t size = bytes;
2784 2784 char *res = Solaris::mmap_chunk(addr, size, MAP_PRIVATE|MAP_FIXED, prot);
2785 2785 if (res != NULL) {
2786 2786 if (UseNUMAInterleaving) {
2787 2787 numa_make_global(addr, bytes);
2788 2788 }
2789 2789 return true;
2790 2790 }
2791 2791 return false;
2792 2792 }
2793 2793
2794 2794 bool os::commit_memory(char* addr, size_t bytes, size_t alignment_hint,
2795 2795 bool exec) {
2796 2796 if (commit_memory(addr, bytes, exec)) {
2797 2797 if (UseMPSS && alignment_hint > (size_t)vm_page_size()) {
2798 2798 // If the large page size has been set and the VM
2799 2799 // is using large pages, use the large page size
2800 2800 // if it is smaller than the alignment hint. This is
2801 2801 // a case where the VM wants to use a larger alignment size
2802 2802 // for its own reasons but still want to use large pages
2803 2803 // (which is what matters to setting the mpss range.
2804 2804 size_t page_size = 0;
2805 2805 if (large_page_size() < alignment_hint) {
2806 2806 assert(UseLargePages, "Expected to be here for large page use only");
2807 2807 page_size = large_page_size();
2808 2808 } else {
2809 2809 // If the alignment hint is less than the large page
2810 2810 // size, the VM wants a particular alignment (thus the hint)
2811 2811 // for internal reasons. Try to set the mpss range using
2812 2812 // the alignment_hint.
2813 2813 page_size = alignment_hint;
↓ open down ↓ |
2813 lines elided |
↑ open up ↑ |
2814 2814 }
2815 2815 // Since this is a hint, ignore any failures.
2816 2816 (void)Solaris::set_mpss_range(addr, bytes, page_size);
2817 2817 }
2818 2818 return true;
2819 2819 }
2820 2820 return false;
2821 2821 }
2822 2822
2823 2823 // Uncommit the pages in a specified region.
2824 -void os::free_memory(char* addr, size_t bytes) {
2824 +void os::free_memory(char* addr, size_t bytes, size_t alignment_hint) {
2825 2825 if (madvise(addr, bytes, MADV_FREE) < 0) {
2826 2826 debug_only(warning("MADV_FREE failed."));
2827 2827 return;
2828 2828 }
2829 2829 }
2830 2830
2831 2831 bool os::create_stack_guard_pages(char* addr, size_t size) {
2832 2832 return os::commit_memory(addr, size);
2833 2833 }
2834 2834
2835 2835 bool os::remove_stack_guard_pages(char* addr, size_t size) {
2836 2836 return os::uncommit_memory(addr, size);
2837 2837 }
2838 2838
2839 2839 // Change the page size in a given range.
2840 2840 void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
2841 2841 assert((intptr_t)addr % alignment_hint == 0, "Address should be aligned.");
2842 2842 assert((intptr_t)(addr + bytes) % alignment_hint == 0, "End should be aligned.");
2843 2843 if (UseLargePages && UseMPSS) {
2844 2844 Solaris::set_mpss_range(addr, bytes, alignment_hint);
2845 2845 }
2846 2846 }
2847 2847
2848 2848 // Tell the OS to make the range local to the first-touching LWP
2849 2849 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
2850 2850 assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned.");
2851 2851 if (madvise(addr, bytes, MADV_ACCESS_LWP) < 0) {
2852 2852 debug_only(warning("MADV_ACCESS_LWP failed."));
2853 2853 }
2854 2854 }
2855 2855
2856 2856 // Tell the OS that this range would be accessed from different LWPs.
2857 2857 void os::numa_make_global(char *addr, size_t bytes) {
2858 2858 assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned.");
2859 2859 if (madvise(addr, bytes, MADV_ACCESS_MANY) < 0) {
2860 2860 debug_only(warning("MADV_ACCESS_MANY failed."));
2861 2861 }
2862 2862 }
2863 2863
2864 2864 // Get the number of the locality groups.
2865 2865 size_t os::numa_get_groups_num() {
2866 2866 size_t n = Solaris::lgrp_nlgrps(Solaris::lgrp_cookie());
2867 2867 return n != -1 ? n : 1;
2868 2868 }
2869 2869
2870 2870 // Get a list of leaf locality groups. A leaf lgroup is group that
2871 2871 // doesn't have any children. Typical leaf group is a CPU or a CPU/memory
2872 2872 // board. An LWP is assigned to one of these groups upon creation.
2873 2873 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
2874 2874 if ((ids[0] = Solaris::lgrp_root(Solaris::lgrp_cookie())) == -1) {
2875 2875 ids[0] = 0;
2876 2876 return 1;
2877 2877 }
2878 2878 int result_size = 0, top = 1, bottom = 0, cur = 0;
2879 2879 for (int k = 0; k < size; k++) {
2880 2880 int r = Solaris::lgrp_children(Solaris::lgrp_cookie(), ids[cur],
2881 2881 (Solaris::lgrp_id_t*)&ids[top], size - top);
2882 2882 if (r == -1) {
2883 2883 ids[0] = 0;
2884 2884 return 1;
2885 2885 }
2886 2886 if (!r) {
2887 2887 // That's a leaf node.
2888 2888 assert (bottom <= cur, "Sanity check");
2889 2889 // Check if the node has memory
2890 2890 if (Solaris::lgrp_resources(Solaris::lgrp_cookie(), ids[cur],
2891 2891 NULL, 0, LGRP_RSRC_MEM) > 0) {
2892 2892 ids[bottom++] = ids[cur];
2893 2893 }
2894 2894 }
2895 2895 top += r;
2896 2896 cur++;
2897 2897 }
2898 2898 if (bottom == 0) {
2899 2899 // Handle a situation, when the OS reports no memory available.
2900 2900 // Assume UMA architecture.
2901 2901 ids[0] = 0;
2902 2902 return 1;
2903 2903 }
2904 2904 return bottom;
2905 2905 }
2906 2906
2907 2907 // Detect the topology change. Typically happens during CPU plugging-unplugging.
2908 2908 bool os::numa_topology_changed() {
2909 2909 int is_stale = Solaris::lgrp_cookie_stale(Solaris::lgrp_cookie());
2910 2910 if (is_stale != -1 && is_stale) {
2911 2911 Solaris::lgrp_fini(Solaris::lgrp_cookie());
2912 2912 Solaris::lgrp_cookie_t c = Solaris::lgrp_init(Solaris::LGRP_VIEW_CALLER);
2913 2913 assert(c != 0, "Failure to initialize LGRP API");
2914 2914 Solaris::set_lgrp_cookie(c);
2915 2915 return true;
2916 2916 }
2917 2917 return false;
2918 2918 }
2919 2919
2920 2920 // Get the group id of the current LWP.
2921 2921 int os::numa_get_group_id() {
2922 2922 int lgrp_id = Solaris::lgrp_home(P_LWPID, P_MYID);
2923 2923 if (lgrp_id == -1) {
2924 2924 return 0;
2925 2925 }
2926 2926 const int size = os::numa_get_groups_num();
2927 2927 int *ids = (int*)alloca(size * sizeof(int));
2928 2928
2929 2929 // Get the ids of all lgroups with memory; r is the count.
2930 2930 int r = Solaris::lgrp_resources(Solaris::lgrp_cookie(), lgrp_id,
2931 2931 (Solaris::lgrp_id_t*)ids, size, LGRP_RSRC_MEM);
2932 2932 if (r <= 0) {
2933 2933 return 0;
2934 2934 }
2935 2935 return ids[os::random() % r];
2936 2936 }
2937 2937
2938 2938 // Request information about the page.
2939 2939 bool os::get_page_info(char *start, page_info* info) {
2940 2940 const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE };
2941 2941 uint64_t addr = (uintptr_t)start;
2942 2942 uint64_t outdata[2];
2943 2943 uint_t validity = 0;
2944 2944
2945 2945 if (os::Solaris::meminfo(&addr, 1, info_types, 2, outdata, &validity) < 0) {
2946 2946 return false;
2947 2947 }
2948 2948
2949 2949 info->size = 0;
2950 2950 info->lgrp_id = -1;
2951 2951
2952 2952 if ((validity & 1) != 0) {
2953 2953 if ((validity & 2) != 0) {
2954 2954 info->lgrp_id = outdata[0];
2955 2955 }
2956 2956 if ((validity & 4) != 0) {
2957 2957 info->size = outdata[1];
2958 2958 }
2959 2959 return true;
2960 2960 }
2961 2961 return false;
2962 2962 }
2963 2963
2964 2964 // Scan the pages from start to end until a page different than
2965 2965 // the one described in the info parameter is encountered.
2966 2966 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
2967 2967 const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE };
2968 2968 const size_t types = sizeof(info_types) / sizeof(info_types[0]);
2969 2969 uint64_t addrs[MAX_MEMINFO_CNT], outdata[types * MAX_MEMINFO_CNT];
2970 2970 uint_t validity[MAX_MEMINFO_CNT];
2971 2971
2972 2972 size_t page_size = MAX2((size_t)os::vm_page_size(), page_expected->size);
2973 2973 uint64_t p = (uint64_t)start;
2974 2974 while (p < (uint64_t)end) {
2975 2975 addrs[0] = p;
2976 2976 size_t addrs_count = 1;
2977 2977 while (addrs_count < MAX_MEMINFO_CNT && addrs[addrs_count - 1] < (uint64_t)end) {
2978 2978 addrs[addrs_count] = addrs[addrs_count - 1] + page_size;
2979 2979 addrs_count++;
2980 2980 }
2981 2981
2982 2982 if (os::Solaris::meminfo(addrs, addrs_count, info_types, types, outdata, validity) < 0) {
2983 2983 return NULL;
2984 2984 }
2985 2985
2986 2986 size_t i = 0;
2987 2987 for (; i < addrs_count; i++) {
2988 2988 if ((validity[i] & 1) != 0) {
2989 2989 if ((validity[i] & 4) != 0) {
2990 2990 if (outdata[types * i + 1] != page_expected->size) {
2991 2991 break;
2992 2992 }
2993 2993 } else
2994 2994 if (page_expected->size != 0) {
2995 2995 break;
2996 2996 }
2997 2997
2998 2998 if ((validity[i] & 2) != 0 && page_expected->lgrp_id > 0) {
2999 2999 if (outdata[types * i] != page_expected->lgrp_id) {
3000 3000 break;
3001 3001 }
3002 3002 }
3003 3003 } else {
3004 3004 return NULL;
3005 3005 }
3006 3006 }
3007 3007
3008 3008 if (i != addrs_count) {
3009 3009 if ((validity[i] & 2) != 0) {
3010 3010 page_found->lgrp_id = outdata[types * i];
3011 3011 } else {
3012 3012 page_found->lgrp_id = -1;
3013 3013 }
3014 3014 if ((validity[i] & 4) != 0) {
3015 3015 page_found->size = outdata[types * i + 1];
3016 3016 } else {
3017 3017 page_found->size = 0;
3018 3018 }
3019 3019 return (char*)addrs[i];
3020 3020 }
3021 3021
3022 3022 p = addrs[addrs_count - 1] + page_size;
3023 3023 }
3024 3024 return end;
3025 3025 }
3026 3026
3027 3027 bool os::uncommit_memory(char* addr, size_t bytes) {
3028 3028 size_t size = bytes;
3029 3029 // Map uncommitted pages PROT_NONE so we fail early if we touch an
3030 3030 // uncommitted page. Otherwise, the read/write might succeed if we
3031 3031 // have enough swap space to back the physical page.
3032 3032 return
3033 3033 NULL != Solaris::mmap_chunk(addr, size,
3034 3034 MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE,
3035 3035 PROT_NONE);
3036 3036 }
3037 3037
3038 3038 char* os::Solaris::mmap_chunk(char *addr, size_t size, int flags, int prot) {
3039 3039 char *b = (char *)mmap(addr, size, prot, flags, os::Solaris::_dev_zero_fd, 0);
3040 3040
3041 3041 if (b == MAP_FAILED) {
3042 3042 return NULL;
3043 3043 }
3044 3044 return b;
3045 3045 }
3046 3046
3047 3047 char* os::Solaris::anon_mmap(char* requested_addr, size_t bytes, size_t alignment_hint, bool fixed) {
3048 3048 char* addr = requested_addr;
3049 3049 int flags = MAP_PRIVATE | MAP_NORESERVE;
3050 3050
3051 3051 assert(!(fixed && (alignment_hint > 0)), "alignment hint meaningless with fixed mmap");
3052 3052
3053 3053 if (fixed) {
3054 3054 flags |= MAP_FIXED;
3055 3055 } else if (has_map_align && (alignment_hint > (size_t) vm_page_size())) {
3056 3056 flags |= MAP_ALIGN;
3057 3057 addr = (char*) alignment_hint;
3058 3058 }
3059 3059
3060 3060 // Map uncommitted pages PROT_NONE so we fail early if we touch an
3061 3061 // uncommitted page. Otherwise, the read/write might succeed if we
3062 3062 // have enough swap space to back the physical page.
3063 3063 return mmap_chunk(addr, bytes, flags, PROT_NONE);
3064 3064 }
3065 3065
3066 3066 char* os::reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
3067 3067 char* addr = Solaris::anon_mmap(requested_addr, bytes, alignment_hint, (requested_addr != NULL));
3068 3068
3069 3069 guarantee(requested_addr == NULL || requested_addr == addr,
3070 3070 "OS failed to return requested mmap address.");
3071 3071 return addr;
3072 3072 }
3073 3073
3074 3074 // Reserve memory at an arbitrary address, only if that area is
3075 3075 // available (and not reserved for something else).
3076 3076
3077 3077 char* os::attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
3078 3078 const int max_tries = 10;
3079 3079 char* base[max_tries];
3080 3080 size_t size[max_tries];
3081 3081
3082 3082 // Solaris adds a gap between mmap'ed regions. The size of the gap
3083 3083 // is dependent on the requested size and the MMU. Our initial gap
3084 3084 // value here is just a guess and will be corrected later.
3085 3085 bool had_top_overlap = false;
3086 3086 bool have_adjusted_gap = false;
3087 3087 size_t gap = 0x400000;
3088 3088
3089 3089 // Assert only that the size is a multiple of the page size, since
3090 3090 // that's all that mmap requires, and since that's all we really know
3091 3091 // about at this low abstraction level. If we need higher alignment,
3092 3092 // we can either pass an alignment to this method or verify alignment
3093 3093 // in one of the methods further up the call chain. See bug 5044738.
3094 3094 assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block");
3095 3095
3096 3096 // Since snv_84, Solaris attempts to honor the address hint - see 5003415.
3097 3097 // Give it a try, if the kernel honors the hint we can return immediately.
3098 3098 char* addr = Solaris::anon_mmap(requested_addr, bytes, 0, false);
3099 3099 volatile int err = errno;
3100 3100 if (addr == requested_addr) {
3101 3101 return addr;
3102 3102 } else if (addr != NULL) {
3103 3103 unmap_memory(addr, bytes);
3104 3104 }
3105 3105
3106 3106 if (PrintMiscellaneous && Verbose) {
3107 3107 char buf[256];
3108 3108 buf[0] = '\0';
3109 3109 if (addr == NULL) {
3110 3110 jio_snprintf(buf, sizeof(buf), ": %s", strerror(err));
3111 3111 }
3112 3112 warning("attempt_reserve_memory_at: couldn't reserve " SIZE_FORMAT " bytes at "
3113 3113 PTR_FORMAT ": reserve_memory_helper returned " PTR_FORMAT
3114 3114 "%s", bytes, requested_addr, addr, buf);
3115 3115 }
3116 3116
3117 3117 // Address hint method didn't work. Fall back to the old method.
3118 3118 // In theory, once SNV becomes our oldest supported platform, this
3119 3119 // code will no longer be needed.
3120 3120 //
3121 3121 // Repeatedly allocate blocks until the block is allocated at the
3122 3122 // right spot. Give up after max_tries.
3123 3123 int i;
3124 3124 for (i = 0; i < max_tries; ++i) {
3125 3125 base[i] = reserve_memory(bytes);
3126 3126
3127 3127 if (base[i] != NULL) {
3128 3128 // Is this the block we wanted?
3129 3129 if (base[i] == requested_addr) {
3130 3130 size[i] = bytes;
3131 3131 break;
3132 3132 }
3133 3133
3134 3134 // check that the gap value is right
3135 3135 if (had_top_overlap && !have_adjusted_gap) {
3136 3136 size_t actual_gap = base[i-1] - base[i] - bytes;
3137 3137 if (gap != actual_gap) {
3138 3138 // adjust the gap value and retry the last 2 allocations
3139 3139 assert(i > 0, "gap adjustment code problem");
3140 3140 have_adjusted_gap = true; // adjust the gap only once, just in case
3141 3141 gap = actual_gap;
3142 3142 if (PrintMiscellaneous && Verbose) {
3143 3143 warning("attempt_reserve_memory_at: adjusted gap to 0x%lx", gap);
3144 3144 }
3145 3145 unmap_memory(base[i], bytes);
3146 3146 unmap_memory(base[i-1], size[i-1]);
3147 3147 i-=2;
3148 3148 continue;
3149 3149 }
3150 3150 }
3151 3151
3152 3152 // Does this overlap the block we wanted? Give back the overlapped
3153 3153 // parts and try again.
3154 3154 //
3155 3155 // There is still a bug in this code: if top_overlap == bytes,
3156 3156 // the overlap is offset from requested region by the value of gap.
3157 3157 // In this case giving back the overlapped part will not work,
3158 3158 // because we'll give back the entire block at base[i] and
3159 3159 // therefore the subsequent allocation will not generate a new gap.
3160 3160 // This could be fixed with a new algorithm that used larger
3161 3161 // or variable size chunks to find the requested region -
3162 3162 // but such a change would introduce additional complications.
3163 3163 // It's rare enough that the planets align for this bug,
3164 3164 // so we'll just wait for a fix for 6204603/5003415 which
3165 3165 // will provide a mmap flag to allow us to avoid this business.
3166 3166
3167 3167 size_t top_overlap = requested_addr + (bytes + gap) - base[i];
3168 3168 if (top_overlap >= 0 && top_overlap < bytes) {
3169 3169 had_top_overlap = true;
3170 3170 unmap_memory(base[i], top_overlap);
3171 3171 base[i] += top_overlap;
3172 3172 size[i] = bytes - top_overlap;
3173 3173 } else {
3174 3174 size_t bottom_overlap = base[i] + bytes - requested_addr;
3175 3175 if (bottom_overlap >= 0 && bottom_overlap < bytes) {
3176 3176 if (PrintMiscellaneous && Verbose && bottom_overlap == 0) {
3177 3177 warning("attempt_reserve_memory_at: possible alignment bug");
3178 3178 }
3179 3179 unmap_memory(requested_addr, bottom_overlap);
3180 3180 size[i] = bytes - bottom_overlap;
3181 3181 } else {
3182 3182 size[i] = bytes;
3183 3183 }
3184 3184 }
3185 3185 }
3186 3186 }
3187 3187
3188 3188 // Give back the unused reserved pieces.
3189 3189
3190 3190 for (int j = 0; j < i; ++j) {
3191 3191 if (base[j] != NULL) {
3192 3192 unmap_memory(base[j], size[j]);
3193 3193 }
3194 3194 }
3195 3195
3196 3196 return (i < max_tries) ? requested_addr : NULL;
3197 3197 }
3198 3198
3199 3199 bool os::release_memory(char* addr, size_t bytes) {
3200 3200 size_t size = bytes;
3201 3201 return munmap(addr, size) == 0;
3202 3202 }
3203 3203
3204 3204 static bool solaris_mprotect(char* addr, size_t bytes, int prot) {
3205 3205 assert(addr == (char*)align_size_down((uintptr_t)addr, os::vm_page_size()),
3206 3206 "addr must be page aligned");
3207 3207 int retVal = mprotect(addr, bytes, prot);
3208 3208 return retVal == 0;
3209 3209 }
3210 3210
3211 3211 // Protect memory (Used to pass readonly pages through
3212 3212 // JNI GetArray<type>Elements with empty arrays.)
3213 3213 // Also, used for serialization page and for compressed oops null pointer
3214 3214 // checking.
3215 3215 bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
3216 3216 bool is_committed) {
3217 3217 unsigned int p = 0;
3218 3218 switch (prot) {
3219 3219 case MEM_PROT_NONE: p = PROT_NONE; break;
3220 3220 case MEM_PROT_READ: p = PROT_READ; break;
3221 3221 case MEM_PROT_RW: p = PROT_READ|PROT_WRITE; break;
3222 3222 case MEM_PROT_RWX: p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
3223 3223 default:
3224 3224 ShouldNotReachHere();
3225 3225 }
3226 3226 // is_committed is unused.
3227 3227 return solaris_mprotect(addr, bytes, p);
3228 3228 }
3229 3229
3230 3230 // guard_memory and unguard_memory only happens within stack guard pages.
3231 3231 // Since ISM pertains only to the heap, guard and unguard memory should not
3232 3232 /// happen with an ISM region.
3233 3233 bool os::guard_memory(char* addr, size_t bytes) {
3234 3234 return solaris_mprotect(addr, bytes, PROT_NONE);
3235 3235 }
3236 3236
3237 3237 bool os::unguard_memory(char* addr, size_t bytes) {
3238 3238 return solaris_mprotect(addr, bytes, PROT_READ|PROT_WRITE);
3239 3239 }
3240 3240
3241 3241 // Large page support
3242 3242
3243 3243 // UseLargePages is the master flag to enable/disable large page memory.
3244 3244 // UseMPSS and UseISM are supported for compatibility reasons. Their combined
3245 3245 // effects can be described in the following table:
3246 3246 //
3247 3247 // UseLargePages UseMPSS UseISM
3248 3248 // false * * => UseLargePages is the master switch, turning
3249 3249 // it off will turn off both UseMPSS and
3250 3250 // UseISM. VM will not use large page memory
3251 3251 // regardless the settings of UseMPSS/UseISM.
3252 3252 // true false false => Unless future Solaris provides other
3253 3253 // mechanism to use large page memory, this
3254 3254 // combination is equivalent to -UseLargePages,
3255 3255 // VM will not use large page memory
3256 3256 // true true false => JVM will use MPSS for large page memory.
3257 3257 // This is the default behavior.
3258 3258 // true false true => JVM will use ISM for large page memory.
3259 3259 // true true true => JVM will use ISM if it is available.
3260 3260 // Otherwise, JVM will fall back to MPSS.
3261 3261 // Becaues ISM is now available on all
3262 3262 // supported Solaris versions, this combination
3263 3263 // is equivalent to +UseISM -UseMPSS.
3264 3264
3265 3265 static size_t _large_page_size = 0;
3266 3266
3267 3267 bool os::Solaris::ism_sanity_check(bool warn, size_t * page_size) {
3268 3268 // x86 uses either 2M or 4M page, depending on whether PAE (Physical Address
3269 3269 // Extensions) mode is enabled. AMD64/EM64T uses 2M page in 64bit mode. Sparc
3270 3270 // can support multiple page sizes.
3271 3271
3272 3272 // Don't bother to probe page size because getpagesizes() comes with MPSS.
3273 3273 // ISM is only recommended on old Solaris where there is no MPSS support.
3274 3274 // Simply choose a conservative value as default.
3275 3275 *page_size = LargePageSizeInBytes ? LargePageSizeInBytes :
3276 3276 SPARC_ONLY(4 * M) IA32_ONLY(4 * M) AMD64_ONLY(2 * M)
3277 3277 ARM_ONLY(2 * M);
3278 3278
3279 3279 // ISM is available on all supported Solaris versions
3280 3280 return true;
3281 3281 }
3282 3282
3283 3283 // Insertion sort for small arrays (descending order).
3284 3284 static void insertion_sort_descending(size_t* array, int len) {
3285 3285 for (int i = 0; i < len; i++) {
3286 3286 size_t val = array[i];
3287 3287 for (size_t key = i; key > 0 && array[key - 1] < val; --key) {
3288 3288 size_t tmp = array[key];
3289 3289 array[key] = array[key - 1];
3290 3290 array[key - 1] = tmp;
3291 3291 }
3292 3292 }
3293 3293 }
3294 3294
3295 3295 bool os::Solaris::mpss_sanity_check(bool warn, size_t * page_size) {
3296 3296 const unsigned int usable_count = VM_Version::page_size_count();
3297 3297 if (usable_count == 1) {
3298 3298 return false;
3299 3299 }
3300 3300
3301 3301 // Find the right getpagesizes interface. When solaris 11 is the minimum
3302 3302 // build platform, getpagesizes() (without the '2') can be called directly.
3303 3303 typedef int (*gps_t)(size_t[], int);
3304 3304 gps_t gps_func = CAST_TO_FN_PTR(gps_t, dlsym(RTLD_DEFAULT, "getpagesizes2"));
3305 3305 if (gps_func == NULL) {
3306 3306 gps_func = CAST_TO_FN_PTR(gps_t, dlsym(RTLD_DEFAULT, "getpagesizes"));
3307 3307 if (gps_func == NULL) {
3308 3308 if (warn) {
3309 3309 warning("MPSS is not supported by the operating system.");
3310 3310 }
3311 3311 return false;
3312 3312 }
3313 3313 }
3314 3314
3315 3315 // Fill the array of page sizes.
3316 3316 int n = (*gps_func)(_page_sizes, page_sizes_max);
3317 3317 assert(n > 0, "Solaris bug?");
3318 3318
3319 3319 if (n == page_sizes_max) {
3320 3320 // Add a sentinel value (necessary only if the array was completely filled
3321 3321 // since it is static (zeroed at initialization)).
3322 3322 _page_sizes[--n] = 0;
3323 3323 DEBUG_ONLY(warning("increase the size of the os::_page_sizes array.");)
3324 3324 }
3325 3325 assert(_page_sizes[n] == 0, "missing sentinel");
3326 3326 trace_page_sizes("available page sizes", _page_sizes, n);
3327 3327
3328 3328 if (n == 1) return false; // Only one page size available.
3329 3329
3330 3330 // Skip sizes larger than 4M (or LargePageSizeInBytes if it was set) and
3331 3331 // select up to usable_count elements. First sort the array, find the first
3332 3332 // acceptable value, then copy the usable sizes to the top of the array and
3333 3333 // trim the rest. Make sure to include the default page size :-).
3334 3334 //
3335 3335 // A better policy could get rid of the 4M limit by taking the sizes of the
3336 3336 // important VM memory regions (java heap and possibly the code cache) into
3337 3337 // account.
3338 3338 insertion_sort_descending(_page_sizes, n);
3339 3339 const size_t size_limit =
3340 3340 FLAG_IS_DEFAULT(LargePageSizeInBytes) ? 4 * M : LargePageSizeInBytes;
3341 3341 int beg;
3342 3342 for (beg = 0; beg < n && _page_sizes[beg] > size_limit; ++beg) /* empty */ ;
3343 3343 const int end = MIN2((int)usable_count, n) - 1;
3344 3344 for (int cur = 0; cur < end; ++cur, ++beg) {
3345 3345 _page_sizes[cur] = _page_sizes[beg];
3346 3346 }
3347 3347 _page_sizes[end] = vm_page_size();
3348 3348 _page_sizes[end + 1] = 0;
3349 3349
3350 3350 if (_page_sizes[end] > _page_sizes[end - 1]) {
3351 3351 // Default page size is not the smallest; sort again.
3352 3352 insertion_sort_descending(_page_sizes, end + 1);
3353 3353 }
3354 3354 *page_size = _page_sizes[0];
3355 3355
3356 3356 trace_page_sizes("usable page sizes", _page_sizes, end + 1);
3357 3357 return true;
3358 3358 }
3359 3359
3360 3360 void os::large_page_init() {
3361 3361 if (!UseLargePages) {
3362 3362 UseISM = false;
3363 3363 UseMPSS = false;
3364 3364 return;
3365 3365 }
3366 3366
3367 3367 // print a warning if any large page related flag is specified on command line
3368 3368 bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) ||
3369 3369 !FLAG_IS_DEFAULT(UseISM) ||
3370 3370 !FLAG_IS_DEFAULT(UseMPSS) ||
3371 3371 !FLAG_IS_DEFAULT(LargePageSizeInBytes);
3372 3372 UseISM = UseISM &&
3373 3373 Solaris::ism_sanity_check(warn_on_failure, &_large_page_size);
3374 3374 if (UseISM) {
3375 3375 // ISM disables MPSS to be compatible with old JDK behavior
3376 3376 UseMPSS = false;
3377 3377 _page_sizes[0] = _large_page_size;
3378 3378 _page_sizes[1] = vm_page_size();
3379 3379 }
3380 3380
3381 3381 UseMPSS = UseMPSS &&
3382 3382 Solaris::mpss_sanity_check(warn_on_failure, &_large_page_size);
3383 3383
3384 3384 UseLargePages = UseISM || UseMPSS;
3385 3385 }
3386 3386
3387 3387 bool os::Solaris::set_mpss_range(caddr_t start, size_t bytes, size_t align) {
3388 3388 // Signal to OS that we want large pages for addresses
3389 3389 // from addr, addr + bytes
3390 3390 struct memcntl_mha mpss_struct;
3391 3391 mpss_struct.mha_cmd = MHA_MAPSIZE_VA;
3392 3392 mpss_struct.mha_pagesize = align;
3393 3393 mpss_struct.mha_flags = 0;
3394 3394 if (memcntl(start, bytes, MC_HAT_ADVISE,
3395 3395 (caddr_t) &mpss_struct, 0, 0) < 0) {
3396 3396 debug_only(warning("Attempt to use MPSS failed."));
3397 3397 return false;
3398 3398 }
3399 3399 return true;
3400 3400 }
3401 3401
3402 3402 char* os::reserve_memory_special(size_t size, char* addr, bool exec) {
3403 3403 // "exec" is passed in but not used. Creating the shared image for
3404 3404 // the code cache doesn't have an SHM_X executable permission to check.
3405 3405 assert(UseLargePages && UseISM, "only for ISM large pages");
3406 3406
3407 3407 char* retAddr = NULL;
3408 3408 int shmid;
3409 3409 key_t ismKey;
3410 3410
3411 3411 bool warn_on_failure = UseISM &&
3412 3412 (!FLAG_IS_DEFAULT(UseLargePages) ||
3413 3413 !FLAG_IS_DEFAULT(UseISM) ||
3414 3414 !FLAG_IS_DEFAULT(LargePageSizeInBytes)
3415 3415 );
3416 3416 char msg[128];
3417 3417
3418 3418 ismKey = IPC_PRIVATE;
3419 3419
3420 3420 // Create a large shared memory region to attach to based on size.
3421 3421 // Currently, size is the total size of the heap
3422 3422 shmid = shmget(ismKey, size, SHM_R | SHM_W | IPC_CREAT);
3423 3423 if (shmid == -1){
3424 3424 if (warn_on_failure) {
3425 3425 jio_snprintf(msg, sizeof(msg), "Failed to reserve shared memory (errno = %d).", errno);
3426 3426 warning(msg);
3427 3427 }
3428 3428 return NULL;
3429 3429 }
3430 3430
3431 3431 // Attach to the region
3432 3432 retAddr = (char *) shmat(shmid, 0, SHM_SHARE_MMU | SHM_R | SHM_W);
3433 3433 int err = errno;
3434 3434
3435 3435 // Remove shmid. If shmat() is successful, the actual shared memory segment
3436 3436 // will be deleted when it's detached by shmdt() or when the process
3437 3437 // terminates. If shmat() is not successful this will remove the shared
3438 3438 // segment immediately.
3439 3439 shmctl(shmid, IPC_RMID, NULL);
3440 3440
3441 3441 if (retAddr == (char *) -1) {
3442 3442 if (warn_on_failure) {
3443 3443 jio_snprintf(msg, sizeof(msg), "Failed to attach shared memory (errno = %d).", err);
3444 3444 warning(msg);
3445 3445 }
3446 3446 return NULL;
3447 3447 }
3448 3448 if ((retAddr != NULL) && UseNUMAInterleaving) {
3449 3449 numa_make_global(retAddr, size);
3450 3450 }
3451 3451 return retAddr;
3452 3452 }
3453 3453
3454 3454 bool os::release_memory_special(char* base, size_t bytes) {
3455 3455 // detaching the SHM segment will also delete it, see reserve_memory_special()
3456 3456 int rslt = shmdt(base);
3457 3457 return rslt == 0;
3458 3458 }
3459 3459
3460 3460 size_t os::large_page_size() {
3461 3461 return _large_page_size;
3462 3462 }
3463 3463
3464 3464 // MPSS allows application to commit large page memory on demand; with ISM
3465 3465 // the entire memory region must be allocated as shared memory.
3466 3466 bool os::can_commit_large_page_memory() {
3467 3467 return UseISM ? false : true;
3468 3468 }
3469 3469
3470 3470 bool os::can_execute_large_page_memory() {
3471 3471 return UseISM ? false : true;
3472 3472 }
3473 3473
3474 3474 static int os_sleep(jlong millis, bool interruptible) {
3475 3475 const jlong limit = INT_MAX;
3476 3476 jlong prevtime;
3477 3477 int res;
3478 3478
3479 3479 while (millis > limit) {
3480 3480 if ((res = os_sleep(limit, interruptible)) != OS_OK)
3481 3481 return res;
3482 3482 millis -= limit;
3483 3483 }
3484 3484
3485 3485 // Restart interrupted polls with new parameters until the proper delay
3486 3486 // has been completed.
3487 3487
3488 3488 prevtime = getTimeMillis();
3489 3489
3490 3490 while (millis > 0) {
3491 3491 jlong newtime;
3492 3492
3493 3493 if (!interruptible) {
3494 3494 // Following assert fails for os::yield_all:
3495 3495 // assert(!thread->is_Java_thread(), "must not be java thread");
3496 3496 res = poll(NULL, 0, millis);
3497 3497 } else {
3498 3498 JavaThread *jt = JavaThread::current();
3499 3499
3500 3500 INTERRUPTIBLE_NORESTART_VM_ALWAYS(poll(NULL, 0, millis), res, jt,
3501 3501 os::Solaris::clear_interrupted);
3502 3502 }
3503 3503
3504 3504 // INTERRUPTIBLE_NORESTART_VM_ALWAYS returns res == OS_INTRPT for
3505 3505 // thread.Interrupt.
3506 3506
3507 3507 // See c/r 6751923. Poll can return 0 before time
3508 3508 // has elapsed if time is set via clock_settime (as NTP does).
3509 3509 // res == 0 if poll timed out (see man poll RETURN VALUES)
3510 3510 // using the logic below checks that we really did
3511 3511 // sleep at least "millis" if not we'll sleep again.
3512 3512 if( ( res == 0 ) || ((res == OS_ERR) && (errno == EINTR))) {
3513 3513 newtime = getTimeMillis();
3514 3514 assert(newtime >= prevtime, "time moving backwards");
3515 3515 /* Doing prevtime and newtime in microseconds doesn't help precision,
3516 3516 and trying to round up to avoid lost milliseconds can result in a
3517 3517 too-short delay. */
3518 3518 millis -= newtime - prevtime;
3519 3519 if(millis <= 0)
3520 3520 return OS_OK;
3521 3521 prevtime = newtime;
3522 3522 } else
3523 3523 return res;
3524 3524 }
3525 3525
3526 3526 return OS_OK;
3527 3527 }
3528 3528
3529 3529 // Read calls from inside the vm need to perform state transitions
3530 3530 size_t os::read(int fd, void *buf, unsigned int nBytes) {
3531 3531 INTERRUPTIBLE_RETURN_INT_VM(::read(fd, buf, nBytes), os::Solaris::clear_interrupted);
3532 3532 }
3533 3533
3534 3534 size_t os::restartable_read(int fd, void *buf, unsigned int nBytes) {
3535 3535 INTERRUPTIBLE_RETURN_INT(::read(fd, buf, nBytes), os::Solaris::clear_interrupted);
3536 3536 }
3537 3537
3538 3538 int os::sleep(Thread* thread, jlong millis, bool interruptible) {
3539 3539 assert(thread == Thread::current(), "thread consistency check");
3540 3540
3541 3541 // TODO-FIXME: this should be removed.
3542 3542 // On Solaris machines (especially 2.5.1) we found that sometimes the VM gets into a live lock
3543 3543 // situation with a JavaThread being starved out of a lwp. The kernel doesn't seem to generate
3544 3544 // a SIGWAITING signal which would enable the threads library to create a new lwp for the starving
3545 3545 // thread. We suspect that because the Watcher thread keeps waking up at periodic intervals the kernel
3546 3546 // is fooled into believing that the system is making progress. In the code below we block the
3547 3547 // the watcher thread while safepoint is in progress so that it would not appear as though the
3548 3548 // system is making progress.
3549 3549 if (!Solaris::T2_libthread() &&
3550 3550 thread->is_Watcher_thread() && SafepointSynchronize::is_synchronizing() && !Arguments::has_profile()) {
3551 3551 // We now try to acquire the threads lock. Since this lock is held by the VM thread during
3552 3552 // the entire safepoint, the watcher thread will line up here during the safepoint.
3553 3553 Threads_lock->lock_without_safepoint_check();
3554 3554 Threads_lock->unlock();
3555 3555 }
3556 3556
3557 3557 if (thread->is_Java_thread()) {
3558 3558 // This is a JavaThread so we honor the _thread_blocked protocol
3559 3559 // even for sleeps of 0 milliseconds. This was originally done
3560 3560 // as a workaround for bug 4338139. However, now we also do it
3561 3561 // to honor the suspend-equivalent protocol.
3562 3562
3563 3563 JavaThread *jt = (JavaThread *) thread;
3564 3564 ThreadBlockInVM tbivm(jt);
3565 3565
3566 3566 jt->set_suspend_equivalent();
3567 3567 // cleared by handle_special_suspend_equivalent_condition() or
3568 3568 // java_suspend_self() via check_and_wait_while_suspended()
3569 3569
3570 3570 int ret_code;
3571 3571 if (millis <= 0) {
3572 3572 thr_yield();
3573 3573 ret_code = 0;
3574 3574 } else {
3575 3575 // The original sleep() implementation did not create an
3576 3576 // OSThreadWaitState helper for sleeps of 0 milliseconds.
3577 3577 // I'm preserving that decision for now.
3578 3578 OSThreadWaitState osts(jt->osthread(), false /* not Object.wait() */);
3579 3579
3580 3580 ret_code = os_sleep(millis, interruptible);
3581 3581 }
3582 3582
3583 3583 // were we externally suspended while we were waiting?
3584 3584 jt->check_and_wait_while_suspended();
3585 3585
3586 3586 return ret_code;
3587 3587 }
3588 3588
3589 3589 // non-JavaThread from this point on:
3590 3590
3591 3591 if (millis <= 0) {
3592 3592 thr_yield();
3593 3593 return 0;
3594 3594 }
3595 3595
3596 3596 OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
3597 3597
3598 3598 return os_sleep(millis, interruptible);
3599 3599 }
3600 3600
3601 3601 int os::naked_sleep() {
3602 3602 // %% make the sleep time an integer flag. for now use 1 millisec.
3603 3603 return os_sleep(1, false);
3604 3604 }
3605 3605
3606 3606 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
3607 3607 void os::infinite_sleep() {
3608 3608 while (true) { // sleep forever ...
3609 3609 ::sleep(100); // ... 100 seconds at a time
3610 3610 }
3611 3611 }
3612 3612
3613 3613 // Used to convert frequent JVM_Yield() to nops
3614 3614 bool os::dont_yield() {
3615 3615 if (DontYieldALot) {
3616 3616 static hrtime_t last_time = 0;
3617 3617 hrtime_t diff = getTimeNanos() - last_time;
3618 3618
3619 3619 if (diff < DontYieldALotInterval * 1000000)
3620 3620 return true;
3621 3621
3622 3622 last_time += diff;
3623 3623
3624 3624 return false;
3625 3625 }
3626 3626 else {
3627 3627 return false;
3628 3628 }
3629 3629 }
3630 3630
3631 3631 // Caveat: Solaris os::yield() causes a thread-state transition whereas
3632 3632 // the linux and win32 implementations do not. This should be checked.
3633 3633
3634 3634 void os::yield() {
3635 3635 // Yields to all threads with same or greater priority
3636 3636 os::sleep(Thread::current(), 0, false);
3637 3637 }
3638 3638
3639 3639 // Note that yield semantics are defined by the scheduling class to which
3640 3640 // the thread currently belongs. Typically, yield will _not yield to
3641 3641 // other equal or higher priority threads that reside on the dispatch queues
3642 3642 // of other CPUs.
3643 3643
3644 3644 os::YieldResult os::NakedYield() { thr_yield(); return os::YIELD_UNKNOWN; }
3645 3645
3646 3646
3647 3647 // On Solaris we found that yield_all doesn't always yield to all other threads.
3648 3648 // There have been cases where there is a thread ready to execute but it doesn't
3649 3649 // get an lwp as the VM thread continues to spin with sleeps of 1 millisecond.
3650 3650 // The 1 millisecond wait doesn't seem long enough for the kernel to issue a
3651 3651 // SIGWAITING signal which will cause a new lwp to be created. So we count the
3652 3652 // number of times yield_all is called in the one loop and increase the sleep
3653 3653 // time after 8 attempts. If this fails too we increase the concurrency level
3654 3654 // so that the starving thread would get an lwp
3655 3655
3656 3656 void os::yield_all(int attempts) {
3657 3657 // Yields to all threads, including threads with lower priorities
3658 3658 if (attempts == 0) {
3659 3659 os::sleep(Thread::current(), 1, false);
3660 3660 } else {
3661 3661 int iterations = attempts % 30;
3662 3662 if (iterations == 0 && !os::Solaris::T2_libthread()) {
3663 3663 // thr_setconcurrency and _getconcurrency make sense only under T1.
3664 3664 int noofLWPS = thr_getconcurrency();
3665 3665 if (noofLWPS < (Threads::number_of_threads() + 2)) {
3666 3666 thr_setconcurrency(thr_getconcurrency() + 1);
3667 3667 }
3668 3668 } else if (iterations < 25) {
3669 3669 os::sleep(Thread::current(), 1, false);
3670 3670 } else {
3671 3671 os::sleep(Thread::current(), 10, false);
3672 3672 }
3673 3673 }
3674 3674 }
3675 3675
3676 3676 // Called from the tight loops to possibly influence time-sharing heuristics
3677 3677 void os::loop_breaker(int attempts) {
3678 3678 os::yield_all(attempts);
3679 3679 }
3680 3680
3681 3681
3682 3682 // Interface for setting lwp priorities. If we are using T2 libthread,
3683 3683 // which forces the use of BoundThreads or we manually set UseBoundThreads,
3684 3684 // all of our threads will be assigned to real lwp's. Using the thr_setprio
3685 3685 // function is meaningless in this mode so we must adjust the real lwp's priority
3686 3686 // The routines below implement the getting and setting of lwp priorities.
3687 3687 //
3688 3688 // Note: There are three priority scales used on Solaris. Java priotities
3689 3689 // which range from 1 to 10, libthread "thr_setprio" scale which range
3690 3690 // from 0 to 127, and the current scheduling class of the process we
3691 3691 // are running in. This is typically from -60 to +60.
3692 3692 // The setting of the lwp priorities in done after a call to thr_setprio
3693 3693 // so Java priorities are mapped to libthread priorities and we map from
3694 3694 // the latter to lwp priorities. We don't keep priorities stored in
3695 3695 // Java priorities since some of our worker threads want to set priorities
3696 3696 // higher than all Java threads.
3697 3697 //
3698 3698 // For related information:
3699 3699 // (1) man -s 2 priocntl
3700 3700 // (2) man -s 4 priocntl
3701 3701 // (3) man dispadmin
3702 3702 // = librt.so
3703 3703 // = libthread/common/rtsched.c - thrp_setlwpprio().
3704 3704 // = ps -cL <pid> ... to validate priority.
3705 3705 // = sched_get_priority_min and _max
3706 3706 // pthread_create
3707 3707 // sched_setparam
3708 3708 // pthread_setschedparam
3709 3709 //
3710 3710 // Assumptions:
3711 3711 // + We assume that all threads in the process belong to the same
3712 3712 // scheduling class. IE. an homogenous process.
3713 3713 // + Must be root or in IA group to change change "interactive" attribute.
3714 3714 // Priocntl() will fail silently. The only indication of failure is when
3715 3715 // we read-back the value and notice that it hasn't changed.
3716 3716 // + Interactive threads enter the runq at the head, non-interactive at the tail.
3717 3717 // + For RT, change timeslice as well. Invariant:
3718 3718 // constant "priority integral"
3719 3719 // Konst == TimeSlice * (60-Priority)
3720 3720 // Given a priority, compute appropriate timeslice.
3721 3721 // + Higher numerical values have higher priority.
3722 3722
3723 3723 // sched class attributes
3724 3724 typedef struct {
3725 3725 int schedPolicy; // classID
3726 3726 int maxPrio;
3727 3727 int minPrio;
3728 3728 } SchedInfo;
3729 3729
3730 3730
3731 3731 static SchedInfo tsLimits, iaLimits, rtLimits;
3732 3732
3733 3733 #ifdef ASSERT
3734 3734 static int ReadBackValidate = 1;
3735 3735 #endif
3736 3736 static int myClass = 0;
3737 3737 static int myMin = 0;
3738 3738 static int myMax = 0;
3739 3739 static int myCur = 0;
3740 3740 static bool priocntl_enable = false;
3741 3741
3742 3742
3743 3743 // Call the version of priocntl suitable for all supported versions
3744 3744 // of Solaris. We need to call through this wrapper so that we can
3745 3745 // build on Solaris 9 and run on Solaris 8, 9 and 10.
3746 3746 //
3747 3747 // This code should be removed if we ever stop supporting Solaris 8
3748 3748 // and earlier releases.
3749 3749
3750 3750 static long priocntl_stub(int pcver, idtype_t idtype, id_t id, int cmd, caddr_t arg);
3751 3751 typedef long (*priocntl_type)(int pcver, idtype_t idtype, id_t id, int cmd, caddr_t arg);
3752 3752 static priocntl_type priocntl_ptr = priocntl_stub;
3753 3753
3754 3754 // Stub to set the value of the real pointer, and then call the real
3755 3755 // function.
3756 3756
3757 3757 static long priocntl_stub(int pcver, idtype_t idtype, id_t id, int cmd, caddr_t arg) {
3758 3758 // Try Solaris 8- name only.
3759 3759 priocntl_type tmp = (priocntl_type)dlsym(RTLD_DEFAULT, "__priocntl");
3760 3760 guarantee(tmp != NULL, "priocntl function not found.");
3761 3761 priocntl_ptr = tmp;
3762 3762 return (*priocntl_ptr)(PC_VERSION, idtype, id, cmd, arg);
3763 3763 }
3764 3764
3765 3765
3766 3766 // lwp_priocntl_init
3767 3767 //
3768 3768 // Try to determine the priority scale for our process.
3769 3769 //
3770 3770 // Return errno or 0 if OK.
3771 3771 //
3772 3772 static
3773 3773 int lwp_priocntl_init ()
3774 3774 {
3775 3775 int rslt;
3776 3776 pcinfo_t ClassInfo;
3777 3777 pcparms_t ParmInfo;
3778 3778 int i;
3779 3779
3780 3780 if (!UseThreadPriorities) return 0;
3781 3781
3782 3782 // We are using Bound threads, we need to determine our priority ranges
3783 3783 if (os::Solaris::T2_libthread() || UseBoundThreads) {
3784 3784 // If ThreadPriorityPolicy is 1, switch tables
3785 3785 if (ThreadPriorityPolicy == 1) {
3786 3786 for (i = 0 ; i < MaxPriority+1; i++)
3787 3787 os::java_to_os_priority[i] = prio_policy1[i];
3788 3788 }
3789 3789 }
3790 3790 // Not using Bound Threads, set to ThreadPolicy 1
3791 3791 else {
3792 3792 for ( i = 0 ; i < MaxPriority+1; i++ ) {
3793 3793 os::java_to_os_priority[i] = prio_policy1[i];
3794 3794 }
3795 3795 return 0;
3796 3796 }
3797 3797
3798 3798
3799 3799 // Get IDs for a set of well-known scheduling classes.
3800 3800 // TODO-FIXME: GETCLINFO returns the current # of classes in the
3801 3801 // the system. We should have a loop that iterates over the
3802 3802 // classID values, which are known to be "small" integers.
3803 3803
3804 3804 strcpy(ClassInfo.pc_clname, "TS");
3805 3805 ClassInfo.pc_cid = -1;
3806 3806 rslt = (*priocntl_ptr)(PC_VERSION, P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3807 3807 if (rslt < 0) return errno;
3808 3808 assert(ClassInfo.pc_cid != -1, "cid for TS class is -1");
3809 3809 tsLimits.schedPolicy = ClassInfo.pc_cid;
3810 3810 tsLimits.maxPrio = ((tsinfo_t*)ClassInfo.pc_clinfo)->ts_maxupri;
3811 3811 tsLimits.minPrio = -tsLimits.maxPrio;
3812 3812
3813 3813 strcpy(ClassInfo.pc_clname, "IA");
3814 3814 ClassInfo.pc_cid = -1;
3815 3815 rslt = (*priocntl_ptr)(PC_VERSION, P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3816 3816 if (rslt < 0) return errno;
3817 3817 assert(ClassInfo.pc_cid != -1, "cid for IA class is -1");
3818 3818 iaLimits.schedPolicy = ClassInfo.pc_cid;
3819 3819 iaLimits.maxPrio = ((iainfo_t*)ClassInfo.pc_clinfo)->ia_maxupri;
3820 3820 iaLimits.minPrio = -iaLimits.maxPrio;
3821 3821
3822 3822 strcpy(ClassInfo.pc_clname, "RT");
3823 3823 ClassInfo.pc_cid = -1;
3824 3824 rslt = (*priocntl_ptr)(PC_VERSION, P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3825 3825 if (rslt < 0) return errno;
3826 3826 assert(ClassInfo.pc_cid != -1, "cid for RT class is -1");
3827 3827 rtLimits.schedPolicy = ClassInfo.pc_cid;
3828 3828 rtLimits.maxPrio = ((rtinfo_t*)ClassInfo.pc_clinfo)->rt_maxpri;
3829 3829 rtLimits.minPrio = 0;
3830 3830
3831 3831
3832 3832 // Query our "current" scheduling class.
3833 3833 // This will normally be IA,TS or, rarely, RT.
3834 3834 memset (&ParmInfo, 0, sizeof(ParmInfo));
3835 3835 ParmInfo.pc_cid = PC_CLNULL;
3836 3836 rslt = (*priocntl_ptr) (PC_VERSION, P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo );
3837 3837 if ( rslt < 0 ) return errno;
3838 3838 myClass = ParmInfo.pc_cid;
3839 3839
3840 3840 // We now know our scheduling classId, get specific information
3841 3841 // the class.
3842 3842 ClassInfo.pc_cid = myClass;
3843 3843 ClassInfo.pc_clname[0] = 0;
3844 3844 rslt = (*priocntl_ptr) (PC_VERSION, (idtype)0, 0, PC_GETCLINFO, (caddr_t)&ClassInfo );
3845 3845 if ( rslt < 0 ) return errno;
3846 3846
3847 3847 if (ThreadPriorityVerbose)
3848 3848 tty->print_cr ("lwp_priocntl_init: Class=%d(%s)...", myClass, ClassInfo.pc_clname);
3849 3849
3850 3850 memset(&ParmInfo, 0, sizeof(pcparms_t));
3851 3851 ParmInfo.pc_cid = PC_CLNULL;
3852 3852 rslt = (*priocntl_ptr)(PC_VERSION, P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo);
3853 3853 if (rslt < 0) return errno;
3854 3854
3855 3855 if (ParmInfo.pc_cid == rtLimits.schedPolicy) {
3856 3856 myMin = rtLimits.minPrio;
3857 3857 myMax = rtLimits.maxPrio;
3858 3858 } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) {
3859 3859 iaparms_t *iaInfo = (iaparms_t*)ParmInfo.pc_clparms;
3860 3860 myMin = iaLimits.minPrio;
3861 3861 myMax = iaLimits.maxPrio;
3862 3862 myMax = MIN2(myMax, (int)iaInfo->ia_uprilim); // clamp - restrict
3863 3863 } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) {
3864 3864 tsparms_t *tsInfo = (tsparms_t*)ParmInfo.pc_clparms;
3865 3865 myMin = tsLimits.minPrio;
3866 3866 myMax = tsLimits.maxPrio;
3867 3867 myMax = MIN2(myMax, (int)tsInfo->ts_uprilim); // clamp - restrict
3868 3868 } else {
3869 3869 // No clue - punt
3870 3870 if (ThreadPriorityVerbose)
3871 3871 tty->print_cr ("Unknown scheduling class: %s ... \n", ClassInfo.pc_clname);
3872 3872 return EINVAL; // no clue, punt
3873 3873 }
3874 3874
3875 3875 if (ThreadPriorityVerbose)
3876 3876 tty->print_cr ("Thread priority Range: [%d..%d]\n", myMin, myMax);
3877 3877
3878 3878 priocntl_enable = true; // Enable changing priorities
3879 3879 return 0;
3880 3880 }
3881 3881
3882 3882 #define IAPRI(x) ((iaparms_t *)((x).pc_clparms))
3883 3883 #define RTPRI(x) ((rtparms_t *)((x).pc_clparms))
3884 3884 #define TSPRI(x) ((tsparms_t *)((x).pc_clparms))
3885 3885
3886 3886
3887 3887 // scale_to_lwp_priority
3888 3888 //
3889 3889 // Convert from the libthread "thr_setprio" scale to our current
3890 3890 // lwp scheduling class scale.
3891 3891 //
3892 3892 static
3893 3893 int scale_to_lwp_priority (int rMin, int rMax, int x)
3894 3894 {
3895 3895 int v;
3896 3896
3897 3897 if (x == 127) return rMax; // avoid round-down
3898 3898 v = (((x*(rMax-rMin)))/128)+rMin;
3899 3899 return v;
3900 3900 }
3901 3901
3902 3902
3903 3903 // set_lwp_priority
3904 3904 //
3905 3905 // Set the priority of the lwp. This call should only be made
3906 3906 // when using bound threads (T2 threads are bound by default).
3907 3907 //
3908 3908 int set_lwp_priority (int ThreadID, int lwpid, int newPrio )
3909 3909 {
3910 3910 int rslt;
3911 3911 int Actual, Expected, prv;
3912 3912 pcparms_t ParmInfo; // for GET-SET
3913 3913 #ifdef ASSERT
3914 3914 pcparms_t ReadBack; // for readback
3915 3915 #endif
3916 3916
3917 3917 // Set priority via PC_GETPARMS, update, PC_SETPARMS
3918 3918 // Query current values.
3919 3919 // TODO: accelerate this by eliminating the PC_GETPARMS call.
3920 3920 // Cache "pcparms_t" in global ParmCache.
3921 3921 // TODO: elide set-to-same-value
3922 3922
3923 3923 // If something went wrong on init, don't change priorities.
3924 3924 if ( !priocntl_enable ) {
3925 3925 if (ThreadPriorityVerbose)
3926 3926 tty->print_cr("Trying to set priority but init failed, ignoring");
3927 3927 return EINVAL;
3928 3928 }
3929 3929
3930 3930
3931 3931 // If lwp hasn't started yet, just return
3932 3932 // the _start routine will call us again.
3933 3933 if ( lwpid <= 0 ) {
3934 3934 if (ThreadPriorityVerbose) {
3935 3935 tty->print_cr ("deferring the set_lwp_priority of thread " INTPTR_FORMAT " to %d, lwpid not set",
3936 3936 ThreadID, newPrio);
3937 3937 }
3938 3938 return 0;
3939 3939 }
3940 3940
3941 3941 if (ThreadPriorityVerbose) {
3942 3942 tty->print_cr ("set_lwp_priority(" INTPTR_FORMAT "@" INTPTR_FORMAT " %d) ",
3943 3943 ThreadID, lwpid, newPrio);
3944 3944 }
3945 3945
3946 3946 memset(&ParmInfo, 0, sizeof(pcparms_t));
3947 3947 ParmInfo.pc_cid = PC_CLNULL;
3948 3948 rslt = (*priocntl_ptr)(PC_VERSION, P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ParmInfo);
3949 3949 if (rslt < 0) return errno;
3950 3950
3951 3951 if (ParmInfo.pc_cid == rtLimits.schedPolicy) {
3952 3952 rtparms_t *rtInfo = (rtparms_t*)ParmInfo.pc_clparms;
3953 3953 rtInfo->rt_pri = scale_to_lwp_priority (rtLimits.minPrio, rtLimits.maxPrio, newPrio);
3954 3954 rtInfo->rt_tqsecs = RT_NOCHANGE;
3955 3955 rtInfo->rt_tqnsecs = RT_NOCHANGE;
3956 3956 if (ThreadPriorityVerbose) {
3957 3957 tty->print_cr("RT: %d->%d\n", newPrio, rtInfo->rt_pri);
3958 3958 }
3959 3959 } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) {
3960 3960 iaparms_t *iaInfo = (iaparms_t*)ParmInfo.pc_clparms;
3961 3961 int maxClamped = MIN2(iaLimits.maxPrio, (int)iaInfo->ia_uprilim);
3962 3962 iaInfo->ia_upri = scale_to_lwp_priority(iaLimits.minPrio, maxClamped, newPrio);
3963 3963 iaInfo->ia_uprilim = IA_NOCHANGE;
3964 3964 iaInfo->ia_mode = IA_NOCHANGE;
3965 3965 if (ThreadPriorityVerbose) {
3966 3966 tty->print_cr ("IA: [%d...%d] %d->%d\n",
3967 3967 iaLimits.minPrio, maxClamped, newPrio, iaInfo->ia_upri);
3968 3968 }
3969 3969 } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) {
3970 3970 tsparms_t *tsInfo = (tsparms_t*)ParmInfo.pc_clparms;
3971 3971 int maxClamped = MIN2(tsLimits.maxPrio, (int)tsInfo->ts_uprilim);
3972 3972 prv = tsInfo->ts_upri;
3973 3973 tsInfo->ts_upri = scale_to_lwp_priority(tsLimits.minPrio, maxClamped, newPrio);
3974 3974 tsInfo->ts_uprilim = IA_NOCHANGE;
3975 3975 if (ThreadPriorityVerbose) {
3976 3976 tty->print_cr ("TS: %d [%d...%d] %d->%d\n",
3977 3977 prv, tsLimits.minPrio, maxClamped, newPrio, tsInfo->ts_upri);
3978 3978 }
3979 3979 if (prv == tsInfo->ts_upri) return 0;
3980 3980 } else {
3981 3981 if ( ThreadPriorityVerbose ) {
3982 3982 tty->print_cr ("Unknown scheduling class\n");
3983 3983 }
3984 3984 return EINVAL; // no clue, punt
3985 3985 }
3986 3986
3987 3987 rslt = (*priocntl_ptr)(PC_VERSION, P_LWPID, lwpid, PC_SETPARMS, (caddr_t)&ParmInfo);
3988 3988 if (ThreadPriorityVerbose && rslt) {
3989 3989 tty->print_cr ("PC_SETPARMS ->%d %d\n", rslt, errno);
3990 3990 }
3991 3991 if (rslt < 0) return errno;
3992 3992
3993 3993 #ifdef ASSERT
3994 3994 // Sanity check: read back what we just attempted to set.
3995 3995 // In theory it could have changed in the interim ...
3996 3996 //
3997 3997 // The priocntl system call is tricky.
3998 3998 // Sometimes it'll validate the priority value argument and
3999 3999 // return EINVAL if unhappy. At other times it fails silently.
4000 4000 // Readbacks are prudent.
4001 4001
4002 4002 if (!ReadBackValidate) return 0;
4003 4003
4004 4004 memset(&ReadBack, 0, sizeof(pcparms_t));
4005 4005 ReadBack.pc_cid = PC_CLNULL;
4006 4006 rslt = (*priocntl_ptr)(PC_VERSION, P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ReadBack);
4007 4007 assert(rslt >= 0, "priocntl failed");
4008 4008 Actual = Expected = 0xBAD;
4009 4009 assert(ParmInfo.pc_cid == ReadBack.pc_cid, "cid's don't match");
4010 4010 if (ParmInfo.pc_cid == rtLimits.schedPolicy) {
4011 4011 Actual = RTPRI(ReadBack)->rt_pri;
4012 4012 Expected = RTPRI(ParmInfo)->rt_pri;
4013 4013 } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) {
4014 4014 Actual = IAPRI(ReadBack)->ia_upri;
4015 4015 Expected = IAPRI(ParmInfo)->ia_upri;
4016 4016 } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) {
4017 4017 Actual = TSPRI(ReadBack)->ts_upri;
4018 4018 Expected = TSPRI(ParmInfo)->ts_upri;
4019 4019 } else {
4020 4020 if ( ThreadPriorityVerbose ) {
4021 4021 tty->print_cr("set_lwp_priority: unexpected class in readback: %d\n", ParmInfo.pc_cid);
4022 4022 }
4023 4023 }
4024 4024
4025 4025 if (Actual != Expected) {
4026 4026 if ( ThreadPriorityVerbose ) {
4027 4027 tty->print_cr ("set_lwp_priority(%d %d) Class=%d: actual=%d vs expected=%d\n",
4028 4028 lwpid, newPrio, ReadBack.pc_cid, Actual, Expected);
4029 4029 }
4030 4030 }
4031 4031 #endif
4032 4032
4033 4033 return 0;
4034 4034 }
4035 4035
4036 4036
4037 4037
4038 4038 // Solaris only gives access to 128 real priorities at a time,
4039 4039 // so we expand Java's ten to fill this range. This would be better
4040 4040 // if we dynamically adjusted relative priorities.
4041 4041 //
4042 4042 // The ThreadPriorityPolicy option allows us to select 2 different
4043 4043 // priority scales.
4044 4044 //
4045 4045 // ThreadPriorityPolicy=0
4046 4046 // Since the Solaris' default priority is MaximumPriority, we do not
4047 4047 // set a priority lower than Max unless a priority lower than
4048 4048 // NormPriority is requested.
4049 4049 //
4050 4050 // ThreadPriorityPolicy=1
4051 4051 // This mode causes the priority table to get filled with
4052 4052 // linear values. NormPriority get's mapped to 50% of the
4053 4053 // Maximum priority an so on. This will cause VM threads
4054 4054 // to get unfair treatment against other Solaris processes
4055 4055 // which do not explicitly alter their thread priorities.
4056 4056 //
4057 4057
4058 4058
4059 4059 int os::java_to_os_priority[MaxPriority + 1] = {
4060 4060 -99999, // 0 Entry should never be used
4061 4061
4062 4062 0, // 1 MinPriority
4063 4063 32, // 2
4064 4064 64, // 3
4065 4065
4066 4066 96, // 4
4067 4067 127, // 5 NormPriority
4068 4068 127, // 6
4069 4069
4070 4070 127, // 7
4071 4071 127, // 8
4072 4072 127, // 9 NearMaxPriority
4073 4073
4074 4074 127 // 10 MaxPriority
4075 4075 };
4076 4076
4077 4077
4078 4078 OSReturn os::set_native_priority(Thread* thread, int newpri) {
4079 4079 assert(newpri >= MinimumPriority && newpri <= MaximumPriority, "bad priority mapping");
4080 4080 if ( !UseThreadPriorities ) return OS_OK;
4081 4081 int status = thr_setprio(thread->osthread()->thread_id(), newpri);
4082 4082 if ( os::Solaris::T2_libthread() || (UseBoundThreads && thread->osthread()->is_vm_created()) )
4083 4083 status |= (set_lwp_priority (thread->osthread()->thread_id(),
4084 4084 thread->osthread()->lwp_id(), newpri ));
4085 4085 return (status == 0) ? OS_OK : OS_ERR;
4086 4086 }
4087 4087
4088 4088
4089 4089 OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) {
4090 4090 int p;
4091 4091 if ( !UseThreadPriorities ) {
4092 4092 *priority_ptr = NormalPriority;
4093 4093 return OS_OK;
4094 4094 }
4095 4095 int status = thr_getprio(thread->osthread()->thread_id(), &p);
4096 4096 if (status != 0) {
4097 4097 return OS_ERR;
4098 4098 }
4099 4099 *priority_ptr = p;
4100 4100 return OS_OK;
4101 4101 }
4102 4102
4103 4103
4104 4104 // Hint to the underlying OS that a task switch would not be good.
4105 4105 // Void return because it's a hint and can fail.
4106 4106 void os::hint_no_preempt() {
4107 4107 schedctl_start(schedctl_init());
4108 4108 }
4109 4109
4110 4110 void os::interrupt(Thread* thread) {
4111 4111 assert(Thread::current() == thread || Threads_lock->owned_by_self(), "possibility of dangling Thread pointer");
4112 4112
4113 4113 OSThread* osthread = thread->osthread();
4114 4114
4115 4115 int isInterrupted = osthread->interrupted();
4116 4116 if (!isInterrupted) {
4117 4117 osthread->set_interrupted(true);
4118 4118 OrderAccess::fence();
4119 4119 // os::sleep() is implemented with either poll (NULL,0,timeout) or
4120 4120 // by parking on _SleepEvent. If the former, thr_kill will unwedge
4121 4121 // the sleeper by SIGINTR, otherwise the unpark() will wake the sleeper.
4122 4122 ParkEvent * const slp = thread->_SleepEvent ;
4123 4123 if (slp != NULL) slp->unpark() ;
4124 4124 }
4125 4125
4126 4126 // For JSR166: unpark after setting status but before thr_kill -dl
4127 4127 if (thread->is_Java_thread()) {
4128 4128 ((JavaThread*)thread)->parker()->unpark();
4129 4129 }
4130 4130
4131 4131 // Handle interruptible wait() ...
4132 4132 ParkEvent * const ev = thread->_ParkEvent ;
4133 4133 if (ev != NULL) ev->unpark() ;
4134 4134
4135 4135 // When events are used everywhere for os::sleep, then this thr_kill
4136 4136 // will only be needed if UseVMInterruptibleIO is true.
4137 4137
4138 4138 if (!isInterrupted) {
4139 4139 int status = thr_kill(osthread->thread_id(), os::Solaris::SIGinterrupt());
4140 4140 assert_status(status == 0, status, "thr_kill");
4141 4141
4142 4142 // Bump thread interruption counter
4143 4143 RuntimeService::record_thread_interrupt_signaled_count();
4144 4144 }
4145 4145 }
4146 4146
4147 4147
4148 4148 bool os::is_interrupted(Thread* thread, bool clear_interrupted) {
4149 4149 assert(Thread::current() == thread || Threads_lock->owned_by_self(), "possibility of dangling Thread pointer");
4150 4150
4151 4151 OSThread* osthread = thread->osthread();
4152 4152
4153 4153 bool res = osthread->interrupted();
4154 4154
4155 4155 // NOTE that since there is no "lock" around these two operations,
4156 4156 // there is the possibility that the interrupted flag will be
4157 4157 // "false" but that the interrupt event will be set. This is
4158 4158 // intentional. The effect of this is that Object.wait() will appear
4159 4159 // to have a spurious wakeup, which is not harmful, and the
4160 4160 // possibility is so rare that it is not worth the added complexity
4161 4161 // to add yet another lock. It has also been recommended not to put
4162 4162 // the interrupted flag into the os::Solaris::Event structure,
4163 4163 // because it hides the issue.
4164 4164 if (res && clear_interrupted) {
4165 4165 osthread->set_interrupted(false);
4166 4166 }
4167 4167 return res;
4168 4168 }
4169 4169
4170 4170
4171 4171 void os::print_statistics() {
4172 4172 }
4173 4173
4174 4174 int os::message_box(const char* title, const char* message) {
4175 4175 int i;
4176 4176 fdStream err(defaultStream::error_fd());
4177 4177 for (i = 0; i < 78; i++) err.print_raw("=");
4178 4178 err.cr();
4179 4179 err.print_raw_cr(title);
4180 4180 for (i = 0; i < 78; i++) err.print_raw("-");
4181 4181 err.cr();
4182 4182 err.print_raw_cr(message);
4183 4183 for (i = 0; i < 78; i++) err.print_raw("=");
4184 4184 err.cr();
4185 4185
4186 4186 char buf[16];
4187 4187 // Prevent process from exiting upon "read error" without consuming all CPU
4188 4188 while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); }
4189 4189
4190 4190 return buf[0] == 'y' || buf[0] == 'Y';
4191 4191 }
4192 4192
4193 4193 // A lightweight implementation that does not suspend the target thread and
4194 4194 // thus returns only a hint. Used for profiling only!
4195 4195 ExtendedPC os::get_thread_pc(Thread* thread) {
4196 4196 // Make sure that it is called by the watcher and the Threads lock is owned.
4197 4197 assert(Thread::current()->is_Watcher_thread(), "Must be watcher and own Threads_lock");
4198 4198 // For now, is only used to profile the VM Thread
4199 4199 assert(thread->is_VM_thread(), "Can only be called for VMThread");
4200 4200 ExtendedPC epc;
4201 4201
4202 4202 GetThreadPC_Callback cb(ProfileVM_lock);
4203 4203 OSThread *osthread = thread->osthread();
4204 4204 const int time_to_wait = 400; // 400ms wait for initial response
4205 4205 int status = cb.interrupt(thread, time_to_wait);
4206 4206
4207 4207 if (cb.is_done() ) {
4208 4208 epc = cb.addr();
4209 4209 } else {
4210 4210 DEBUG_ONLY(tty->print_cr("Failed to get pc for thread: %d got %d status",
4211 4211 osthread->thread_id(), status););
4212 4212 // epc is already NULL
4213 4213 }
4214 4214 return epc;
4215 4215 }
4216 4216
4217 4217
4218 4218 // This does not do anything on Solaris. This is basically a hook for being
4219 4219 // able to use structured exception handling (thread-local exception filters) on, e.g., Win32.
4220 4220 void os::os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread) {
4221 4221 f(value, method, args, thread);
4222 4222 }
4223 4223
4224 4224 // This routine may be used by user applications as a "hook" to catch signals.
4225 4225 // The user-defined signal handler must pass unrecognized signals to this
4226 4226 // routine, and if it returns true (non-zero), then the signal handler must
4227 4227 // return immediately. If the flag "abort_if_unrecognized" is true, then this
4228 4228 // routine will never retun false (zero), but instead will execute a VM panic
4229 4229 // routine kill the process.
4230 4230 //
4231 4231 // If this routine returns false, it is OK to call it again. This allows
4232 4232 // the user-defined signal handler to perform checks either before or after
4233 4233 // the VM performs its own checks. Naturally, the user code would be making
4234 4234 // a serious error if it tried to handle an exception (such as a null check
4235 4235 // or breakpoint) that the VM was generating for its own correct operation.
4236 4236 //
4237 4237 // This routine may recognize any of the following kinds of signals:
4238 4238 // SIGBUS, SIGSEGV, SIGILL, SIGFPE, BREAK_SIGNAL, SIGPIPE, SIGXFSZ,
4239 4239 // os::Solaris::SIGasync
4240 4240 // It should be consulted by handlers for any of those signals.
4241 4241 // It explicitly does not recognize os::Solaris::SIGinterrupt
4242 4242 //
4243 4243 // The caller of this routine must pass in the three arguments supplied
4244 4244 // to the function referred to in the "sa_sigaction" (not the "sa_handler")
4245 4245 // field of the structure passed to sigaction(). This routine assumes that
4246 4246 // the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART.
4247 4247 //
4248 4248 // Note that the VM will print warnings if it detects conflicting signal
4249 4249 // handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers".
4250 4250 //
4251 4251 extern "C" JNIEXPORT int
4252 4252 JVM_handle_solaris_signal(int signo, siginfo_t* siginfo, void* ucontext,
4253 4253 int abort_if_unrecognized);
4254 4254
4255 4255
4256 4256 void signalHandler(int sig, siginfo_t* info, void* ucVoid) {
4257 4257 JVM_handle_solaris_signal(sig, info, ucVoid, true);
4258 4258 }
4259 4259
4260 4260 /* Do not delete - if guarantee is ever removed, a signal handler (even empty)
4261 4261 is needed to provoke threads blocked on IO to return an EINTR
4262 4262 Note: this explicitly does NOT call JVM_handle_solaris_signal and
4263 4263 does NOT participate in signal chaining due to requirement for
4264 4264 NOT setting SA_RESTART to make EINTR work. */
4265 4265 extern "C" void sigINTRHandler(int sig, siginfo_t* info, void* ucVoid) {
4266 4266 if (UseSignalChaining) {
4267 4267 struct sigaction *actp = os::Solaris::get_chained_signal_action(sig);
4268 4268 if (actp && actp->sa_handler) {
4269 4269 vm_exit_during_initialization("Signal chaining detected for VM interrupt signal, try -XX:+UseAltSigs");
4270 4270 }
4271 4271 }
4272 4272 }
4273 4273
4274 4274 // This boolean allows users to forward their own non-matching signals
4275 4275 // to JVM_handle_solaris_signal, harmlessly.
4276 4276 bool os::Solaris::signal_handlers_are_installed = false;
4277 4277
4278 4278 // For signal-chaining
4279 4279 bool os::Solaris::libjsig_is_loaded = false;
4280 4280 typedef struct sigaction *(*get_signal_t)(int);
4281 4281 get_signal_t os::Solaris::get_signal_action = NULL;
4282 4282
4283 4283 struct sigaction* os::Solaris::get_chained_signal_action(int sig) {
4284 4284 struct sigaction *actp = NULL;
4285 4285
4286 4286 if ((libjsig_is_loaded) && (sig <= Maxlibjsigsigs)) {
4287 4287 // Retrieve the old signal handler from libjsig
4288 4288 actp = (*get_signal_action)(sig);
4289 4289 }
4290 4290 if (actp == NULL) {
4291 4291 // Retrieve the preinstalled signal handler from jvm
4292 4292 actp = get_preinstalled_handler(sig);
4293 4293 }
4294 4294
4295 4295 return actp;
4296 4296 }
4297 4297
4298 4298 static bool call_chained_handler(struct sigaction *actp, int sig,
4299 4299 siginfo_t *siginfo, void *context) {
4300 4300 // Call the old signal handler
4301 4301 if (actp->sa_handler == SIG_DFL) {
4302 4302 // It's more reasonable to let jvm treat it as an unexpected exception
4303 4303 // instead of taking the default action.
4304 4304 return false;
4305 4305 } else if (actp->sa_handler != SIG_IGN) {
4306 4306 if ((actp->sa_flags & SA_NODEFER) == 0) {
4307 4307 // automaticlly block the signal
4308 4308 sigaddset(&(actp->sa_mask), sig);
4309 4309 }
4310 4310
4311 4311 sa_handler_t hand;
4312 4312 sa_sigaction_t sa;
4313 4313 bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0;
4314 4314 // retrieve the chained handler
4315 4315 if (siginfo_flag_set) {
4316 4316 sa = actp->sa_sigaction;
4317 4317 } else {
4318 4318 hand = actp->sa_handler;
4319 4319 }
4320 4320
4321 4321 if ((actp->sa_flags & SA_RESETHAND) != 0) {
4322 4322 actp->sa_handler = SIG_DFL;
4323 4323 }
4324 4324
4325 4325 // try to honor the signal mask
4326 4326 sigset_t oset;
4327 4327 thr_sigsetmask(SIG_SETMASK, &(actp->sa_mask), &oset);
4328 4328
4329 4329 // call into the chained handler
4330 4330 if (siginfo_flag_set) {
4331 4331 (*sa)(sig, siginfo, context);
4332 4332 } else {
4333 4333 (*hand)(sig);
4334 4334 }
4335 4335
4336 4336 // restore the signal mask
4337 4337 thr_sigsetmask(SIG_SETMASK, &oset, 0);
4338 4338 }
4339 4339 // Tell jvm's signal handler the signal is taken care of.
4340 4340 return true;
4341 4341 }
4342 4342
4343 4343 bool os::Solaris::chained_handler(int sig, siginfo_t* siginfo, void* context) {
4344 4344 bool chained = false;
4345 4345 // signal-chaining
4346 4346 if (UseSignalChaining) {
4347 4347 struct sigaction *actp = get_chained_signal_action(sig);
4348 4348 if (actp != NULL) {
4349 4349 chained = call_chained_handler(actp, sig, siginfo, context);
4350 4350 }
4351 4351 }
4352 4352 return chained;
4353 4353 }
4354 4354
4355 4355 struct sigaction* os::Solaris::get_preinstalled_handler(int sig) {
4356 4356 assert((chainedsigactions != (struct sigaction *)NULL) && (preinstalled_sigs != (int *)NULL) , "signals not yet initialized");
4357 4357 if (preinstalled_sigs[sig] != 0) {
4358 4358 return &chainedsigactions[sig];
4359 4359 }
4360 4360 return NULL;
4361 4361 }
4362 4362
4363 4363 void os::Solaris::save_preinstalled_handler(int sig, struct sigaction& oldAct) {
4364 4364
4365 4365 assert(sig > 0 && sig <= Maxsignum, "vm signal out of expected range");
4366 4366 assert((chainedsigactions != (struct sigaction *)NULL) && (preinstalled_sigs != (int *)NULL) , "signals not yet initialized");
4367 4367 chainedsigactions[sig] = oldAct;
4368 4368 preinstalled_sigs[sig] = 1;
4369 4369 }
4370 4370
4371 4371 void os::Solaris::set_signal_handler(int sig, bool set_installed, bool oktochain) {
4372 4372 // Check for overwrite.
4373 4373 struct sigaction oldAct;
4374 4374 sigaction(sig, (struct sigaction*)NULL, &oldAct);
4375 4375 void* oldhand = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
4376 4376 : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
4377 4377 if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
4378 4378 oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
4379 4379 oldhand != CAST_FROM_FN_PTR(void*, signalHandler)) {
4380 4380 if (AllowUserSignalHandlers || !set_installed) {
4381 4381 // Do not overwrite; user takes responsibility to forward to us.
4382 4382 return;
4383 4383 } else if (UseSignalChaining) {
4384 4384 if (oktochain) {
4385 4385 // save the old handler in jvm
4386 4386 save_preinstalled_handler(sig, oldAct);
4387 4387 } else {
4388 4388 vm_exit_during_initialization("Signal chaining not allowed for VM interrupt signal, try -XX:+UseAltSigs.");
4389 4389 }
4390 4390 // libjsig also interposes the sigaction() call below and saves the
4391 4391 // old sigaction on it own.
4392 4392 } else {
4393 4393 fatal(err_msg("Encountered unexpected pre-existing sigaction handler "
4394 4394 "%#lx for signal %d.", (long)oldhand, sig));
4395 4395 }
4396 4396 }
4397 4397
4398 4398 struct sigaction sigAct;
4399 4399 sigfillset(&(sigAct.sa_mask));
4400 4400 sigAct.sa_handler = SIG_DFL;
4401 4401
4402 4402 sigAct.sa_sigaction = signalHandler;
4403 4403 // Handle SIGSEGV on alternate signal stack if
4404 4404 // not using stack banging
4405 4405 if (!UseStackBanging && sig == SIGSEGV) {
4406 4406 sigAct.sa_flags = SA_SIGINFO | SA_RESTART | SA_ONSTACK;
4407 4407 // Interruptible i/o requires SA_RESTART cleared so EINTR
4408 4408 // is returned instead of restarting system calls
4409 4409 } else if (sig == os::Solaris::SIGinterrupt()) {
4410 4410 sigemptyset(&sigAct.sa_mask);
4411 4411 sigAct.sa_handler = NULL;
4412 4412 sigAct.sa_flags = SA_SIGINFO;
4413 4413 sigAct.sa_sigaction = sigINTRHandler;
4414 4414 } else {
4415 4415 sigAct.sa_flags = SA_SIGINFO | SA_RESTART;
4416 4416 }
4417 4417 os::Solaris::set_our_sigflags(sig, sigAct.sa_flags);
4418 4418
4419 4419 sigaction(sig, &sigAct, &oldAct);
4420 4420
4421 4421 void* oldhand2 = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
4422 4422 : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
4423 4423 assert(oldhand2 == oldhand, "no concurrent signal handler installation");
4424 4424 }
4425 4425
4426 4426
4427 4427 #define DO_SIGNAL_CHECK(sig) \
4428 4428 if (!sigismember(&check_signal_done, sig)) \
4429 4429 os::Solaris::check_signal_handler(sig)
4430 4430
4431 4431 // This method is a periodic task to check for misbehaving JNI applications
4432 4432 // under CheckJNI, we can add any periodic checks here
4433 4433
4434 4434 void os::run_periodic_checks() {
4435 4435 // A big source of grief is hijacking virt. addr 0x0 on Solaris,
4436 4436 // thereby preventing a NULL checks.
4437 4437 if(!check_addr0_done) check_addr0_done = check_addr0(tty);
4438 4438
4439 4439 if (check_signals == false) return;
4440 4440
4441 4441 // SEGV and BUS if overridden could potentially prevent
4442 4442 // generation of hs*.log in the event of a crash, debugging
4443 4443 // such a case can be very challenging, so we absolutely
4444 4444 // check for the following for a good measure:
4445 4445 DO_SIGNAL_CHECK(SIGSEGV);
4446 4446 DO_SIGNAL_CHECK(SIGILL);
4447 4447 DO_SIGNAL_CHECK(SIGFPE);
4448 4448 DO_SIGNAL_CHECK(SIGBUS);
4449 4449 DO_SIGNAL_CHECK(SIGPIPE);
4450 4450 DO_SIGNAL_CHECK(SIGXFSZ);
4451 4451
4452 4452 // ReduceSignalUsage allows the user to override these handlers
4453 4453 // see comments at the very top and jvm_solaris.h
4454 4454 if (!ReduceSignalUsage) {
4455 4455 DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL);
4456 4456 DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL);
4457 4457 DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL);
4458 4458 DO_SIGNAL_CHECK(BREAK_SIGNAL);
4459 4459 }
4460 4460
4461 4461 // See comments above for using JVM1/JVM2 and UseAltSigs
4462 4462 DO_SIGNAL_CHECK(os::Solaris::SIGinterrupt());
4463 4463 DO_SIGNAL_CHECK(os::Solaris::SIGasync());
4464 4464
4465 4465 }
4466 4466
4467 4467 typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);
4468 4468
4469 4469 static os_sigaction_t os_sigaction = NULL;
4470 4470
4471 4471 void os::Solaris::check_signal_handler(int sig) {
4472 4472 char buf[O_BUFLEN];
4473 4473 address jvmHandler = NULL;
4474 4474
4475 4475 struct sigaction act;
4476 4476 if (os_sigaction == NULL) {
4477 4477 // only trust the default sigaction, in case it has been interposed
4478 4478 os_sigaction = (os_sigaction_t)dlsym(RTLD_DEFAULT, "sigaction");
4479 4479 if (os_sigaction == NULL) return;
4480 4480 }
4481 4481
4482 4482 os_sigaction(sig, (struct sigaction*)NULL, &act);
4483 4483
4484 4484 address thisHandler = (act.sa_flags & SA_SIGINFO)
4485 4485 ? CAST_FROM_FN_PTR(address, act.sa_sigaction)
4486 4486 : CAST_FROM_FN_PTR(address, act.sa_handler) ;
4487 4487
4488 4488
4489 4489 switch(sig) {
4490 4490 case SIGSEGV:
4491 4491 case SIGBUS:
4492 4492 case SIGFPE:
4493 4493 case SIGPIPE:
4494 4494 case SIGXFSZ:
4495 4495 case SIGILL:
4496 4496 jvmHandler = CAST_FROM_FN_PTR(address, signalHandler);
4497 4497 break;
4498 4498
4499 4499 case SHUTDOWN1_SIGNAL:
4500 4500 case SHUTDOWN2_SIGNAL:
4501 4501 case SHUTDOWN3_SIGNAL:
4502 4502 case BREAK_SIGNAL:
4503 4503 jvmHandler = (address)user_handler();
4504 4504 break;
4505 4505
4506 4506 default:
4507 4507 int intrsig = os::Solaris::SIGinterrupt();
4508 4508 int asynsig = os::Solaris::SIGasync();
4509 4509
4510 4510 if (sig == intrsig) {
4511 4511 jvmHandler = CAST_FROM_FN_PTR(address, sigINTRHandler);
4512 4512 } else if (sig == asynsig) {
4513 4513 jvmHandler = CAST_FROM_FN_PTR(address, signalHandler);
4514 4514 } else {
4515 4515 return;
4516 4516 }
4517 4517 break;
4518 4518 }
4519 4519
4520 4520
4521 4521 if (thisHandler != jvmHandler) {
4522 4522 tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN));
4523 4523 tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN));
4524 4524 tty->print_cr(" found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
4525 4525 // No need to check this sig any longer
4526 4526 sigaddset(&check_signal_done, sig);
4527 4527 } else if(os::Solaris::get_our_sigflags(sig) != 0 && act.sa_flags != os::Solaris::get_our_sigflags(sig)) {
4528 4528 tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
4529 4529 tty->print("expected:" PTR32_FORMAT, os::Solaris::get_our_sigflags(sig));
4530 4530 tty->print_cr(" found:" PTR32_FORMAT, act.sa_flags);
4531 4531 // No need to check this sig any longer
4532 4532 sigaddset(&check_signal_done, sig);
4533 4533 }
4534 4534
4535 4535 // Print all the signal handler state
4536 4536 if (sigismember(&check_signal_done, sig)) {
4537 4537 print_signal_handlers(tty, buf, O_BUFLEN);
4538 4538 }
4539 4539
4540 4540 }
4541 4541
4542 4542 void os::Solaris::install_signal_handlers() {
4543 4543 bool libjsigdone = false;
4544 4544 signal_handlers_are_installed = true;
4545 4545
4546 4546 // signal-chaining
4547 4547 typedef void (*signal_setting_t)();
4548 4548 signal_setting_t begin_signal_setting = NULL;
4549 4549 signal_setting_t end_signal_setting = NULL;
4550 4550 begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
4551 4551 dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
4552 4552 if (begin_signal_setting != NULL) {
4553 4553 end_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
4554 4554 dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
4555 4555 get_signal_action = CAST_TO_FN_PTR(get_signal_t,
4556 4556 dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
4557 4557 get_libjsig_version = CAST_TO_FN_PTR(version_getting_t,
4558 4558 dlsym(RTLD_DEFAULT, "JVM_get_libjsig_version"));
4559 4559 libjsig_is_loaded = true;
4560 4560 if (os::Solaris::get_libjsig_version != NULL) {
4561 4561 libjsigversion = (*os::Solaris::get_libjsig_version)();
4562 4562 }
4563 4563 assert(UseSignalChaining, "should enable signal-chaining");
4564 4564 }
4565 4565 if (libjsig_is_loaded) {
4566 4566 // Tell libjsig jvm is setting signal handlers
4567 4567 (*begin_signal_setting)();
4568 4568 }
4569 4569
4570 4570 set_signal_handler(SIGSEGV, true, true);
4571 4571 set_signal_handler(SIGPIPE, true, true);
4572 4572 set_signal_handler(SIGXFSZ, true, true);
4573 4573 set_signal_handler(SIGBUS, true, true);
4574 4574 set_signal_handler(SIGILL, true, true);
4575 4575 set_signal_handler(SIGFPE, true, true);
4576 4576
4577 4577
4578 4578 if (os::Solaris::SIGinterrupt() > OLDMAXSIGNUM || os::Solaris::SIGasync() > OLDMAXSIGNUM) {
4579 4579
4580 4580 // Pre-1.4.1 Libjsig limited to signal chaining signals <= 32 so
4581 4581 // can not register overridable signals which might be > 32
4582 4582 if (libjsig_is_loaded && libjsigversion <= JSIG_VERSION_1_4_1) {
4583 4583 // Tell libjsig jvm has finished setting signal handlers
4584 4584 (*end_signal_setting)();
4585 4585 libjsigdone = true;
4586 4586 }
4587 4587 }
4588 4588
4589 4589 // Never ok to chain our SIGinterrupt
4590 4590 set_signal_handler(os::Solaris::SIGinterrupt(), true, false);
4591 4591 set_signal_handler(os::Solaris::SIGasync(), true, true);
4592 4592
4593 4593 if (libjsig_is_loaded && !libjsigdone) {
4594 4594 // Tell libjsig jvm finishes setting signal handlers
4595 4595 (*end_signal_setting)();
4596 4596 }
4597 4597
4598 4598 // We don't activate signal checker if libjsig is in place, we trust ourselves
4599 4599 // and if UserSignalHandler is installed all bets are off.
4600 4600 // Log that signal checking is off only if -verbose:jni is specified.
4601 4601 if (CheckJNICalls) {
4602 4602 if (libjsig_is_loaded) {
4603 4603 if (PrintJNIResolving) {
4604 4604 tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
4605 4605 }
4606 4606 check_signals = false;
4607 4607 }
4608 4608 if (AllowUserSignalHandlers) {
4609 4609 if (PrintJNIResolving) {
4610 4610 tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
4611 4611 }
4612 4612 check_signals = false;
4613 4613 }
4614 4614 }
4615 4615 }
4616 4616
4617 4617
4618 4618 void report_error(const char* file_name, int line_no, const char* title, const char* format, ...);
4619 4619
4620 4620 const char * signames[] = {
4621 4621 "SIG0",
4622 4622 "SIGHUP", "SIGINT", "SIGQUIT", "SIGILL", "SIGTRAP",
4623 4623 "SIGABRT", "SIGEMT", "SIGFPE", "SIGKILL", "SIGBUS",
4624 4624 "SIGSEGV", "SIGSYS", "SIGPIPE", "SIGALRM", "SIGTERM",
4625 4625 "SIGUSR1", "SIGUSR2", "SIGCLD", "SIGPWR", "SIGWINCH",
4626 4626 "SIGURG", "SIGPOLL", "SIGSTOP", "SIGTSTP", "SIGCONT",
4627 4627 "SIGTTIN", "SIGTTOU", "SIGVTALRM", "SIGPROF", "SIGXCPU",
4628 4628 "SIGXFSZ", "SIGWAITING", "SIGLWP", "SIGFREEZE", "SIGTHAW",
4629 4629 "SIGCANCEL", "SIGLOST"
4630 4630 };
4631 4631
4632 4632 const char* os::exception_name(int exception_code, char* buf, size_t size) {
4633 4633 if (0 < exception_code && exception_code <= SIGRTMAX) {
4634 4634 // signal
4635 4635 if (exception_code < sizeof(signames)/sizeof(const char*)) {
4636 4636 jio_snprintf(buf, size, "%s", signames[exception_code]);
4637 4637 } else {
4638 4638 jio_snprintf(buf, size, "SIG%d", exception_code);
4639 4639 }
4640 4640 return buf;
4641 4641 } else {
4642 4642 return NULL;
4643 4643 }
4644 4644 }
4645 4645
4646 4646 // (Static) wrappers for the new libthread API
4647 4647 int_fnP_thread_t_iP_uP_stack_tP_gregset_t os::Solaris::_thr_getstate;
4648 4648 int_fnP_thread_t_i_gregset_t os::Solaris::_thr_setstate;
4649 4649 int_fnP_thread_t_i os::Solaris::_thr_setmutator;
4650 4650 int_fnP_thread_t os::Solaris::_thr_suspend_mutator;
4651 4651 int_fnP_thread_t os::Solaris::_thr_continue_mutator;
4652 4652
4653 4653 // (Static) wrapper for getisax(2) call.
4654 4654 os::Solaris::getisax_func_t os::Solaris::_getisax = 0;
4655 4655
4656 4656 // (Static) wrappers for the liblgrp API
4657 4657 os::Solaris::lgrp_home_func_t os::Solaris::_lgrp_home;
4658 4658 os::Solaris::lgrp_init_func_t os::Solaris::_lgrp_init;
4659 4659 os::Solaris::lgrp_fini_func_t os::Solaris::_lgrp_fini;
4660 4660 os::Solaris::lgrp_root_func_t os::Solaris::_lgrp_root;
4661 4661 os::Solaris::lgrp_children_func_t os::Solaris::_lgrp_children;
4662 4662 os::Solaris::lgrp_resources_func_t os::Solaris::_lgrp_resources;
4663 4663 os::Solaris::lgrp_nlgrps_func_t os::Solaris::_lgrp_nlgrps;
4664 4664 os::Solaris::lgrp_cookie_stale_func_t os::Solaris::_lgrp_cookie_stale;
4665 4665 os::Solaris::lgrp_cookie_t os::Solaris::_lgrp_cookie = 0;
4666 4666
4667 4667 // (Static) wrapper for meminfo() call.
4668 4668 os::Solaris::meminfo_func_t os::Solaris::_meminfo = 0;
4669 4669
4670 4670 static address resolve_symbol_lazy(const char* name) {
4671 4671 address addr = (address) dlsym(RTLD_DEFAULT, name);
4672 4672 if(addr == NULL) {
4673 4673 // RTLD_DEFAULT was not defined on some early versions of 2.5.1
4674 4674 addr = (address) dlsym(RTLD_NEXT, name);
4675 4675 }
4676 4676 return addr;
4677 4677 }
4678 4678
4679 4679 static address resolve_symbol(const char* name) {
4680 4680 address addr = resolve_symbol_lazy(name);
4681 4681 if(addr == NULL) {
4682 4682 fatal(dlerror());
4683 4683 }
4684 4684 return addr;
4685 4685 }
4686 4686
4687 4687
4688 4688
4689 4689 // isT2_libthread()
4690 4690 //
4691 4691 // Routine to determine if we are currently using the new T2 libthread.
4692 4692 //
4693 4693 // We determine if we are using T2 by reading /proc/self/lstatus and
4694 4694 // looking for a thread with the ASLWP bit set. If we find this status
4695 4695 // bit set, we must assume that we are NOT using T2. The T2 team
4696 4696 // has approved this algorithm.
4697 4697 //
4698 4698 // We need to determine if we are running with the new T2 libthread
4699 4699 // since setting native thread priorities is handled differently
4700 4700 // when using this library. All threads created using T2 are bound
4701 4701 // threads. Calling thr_setprio is meaningless in this case.
4702 4702 //
4703 4703 bool isT2_libthread() {
4704 4704 static prheader_t * lwpArray = NULL;
4705 4705 static int lwpSize = 0;
4706 4706 static int lwpFile = -1;
4707 4707 lwpstatus_t * that;
4708 4708 char lwpName [128];
4709 4709 bool isT2 = false;
4710 4710
4711 4711 #define ADR(x) ((uintptr_t)(x))
4712 4712 #define LWPINDEX(ary,ix) ((lwpstatus_t *)(((ary)->pr_entsize * (ix)) + (ADR((ary) + 1))))
4713 4713
4714 4714 lwpFile = ::open("/proc/self/lstatus", O_RDONLY, 0);
4715 4715 if (lwpFile < 0) {
4716 4716 if (ThreadPriorityVerbose) warning ("Couldn't open /proc/self/lstatus\n");
4717 4717 return false;
4718 4718 }
4719 4719 lwpSize = 16*1024;
4720 4720 for (;;) {
4721 4721 ::lseek64 (lwpFile, 0, SEEK_SET);
4722 4722 lwpArray = (prheader_t *)NEW_C_HEAP_ARRAY(char, lwpSize);
4723 4723 if (::read(lwpFile, lwpArray, lwpSize) < 0) {
4724 4724 if (ThreadPriorityVerbose) warning("Error reading /proc/self/lstatus\n");
4725 4725 break;
4726 4726 }
4727 4727 if ((lwpArray->pr_nent * lwpArray->pr_entsize) <= lwpSize) {
4728 4728 // We got a good snapshot - now iterate over the list.
4729 4729 int aslwpcount = 0;
4730 4730 for (int i = 0; i < lwpArray->pr_nent; i++ ) {
4731 4731 that = LWPINDEX(lwpArray,i);
4732 4732 if (that->pr_flags & PR_ASLWP) {
4733 4733 aslwpcount++;
4734 4734 }
4735 4735 }
4736 4736 if (aslwpcount == 0) isT2 = true;
4737 4737 break;
4738 4738 }
4739 4739 lwpSize = lwpArray->pr_nent * lwpArray->pr_entsize;
4740 4740 FREE_C_HEAP_ARRAY(char, lwpArray); // retry.
4741 4741 }
4742 4742
4743 4743 FREE_C_HEAP_ARRAY(char, lwpArray);
4744 4744 ::close (lwpFile);
4745 4745 if (ThreadPriorityVerbose) {
4746 4746 if (isT2) tty->print_cr("We are running with a T2 libthread\n");
4747 4747 else tty->print_cr("We are not running with a T2 libthread\n");
4748 4748 }
4749 4749 return isT2;
4750 4750 }
4751 4751
4752 4752
4753 4753 void os::Solaris::libthread_init() {
4754 4754 address func = (address)dlsym(RTLD_DEFAULT, "_thr_suspend_allmutators");
4755 4755
4756 4756 // Determine if we are running with the new T2 libthread
4757 4757 os::Solaris::set_T2_libthread(isT2_libthread());
4758 4758
4759 4759 lwp_priocntl_init();
4760 4760
4761 4761 // RTLD_DEFAULT was not defined on some early versions of 5.5.1
4762 4762 if(func == NULL) {
4763 4763 func = (address) dlsym(RTLD_NEXT, "_thr_suspend_allmutators");
4764 4764 // Guarantee that this VM is running on an new enough OS (5.6 or
4765 4765 // later) that it will have a new enough libthread.so.
4766 4766 guarantee(func != NULL, "libthread.so is too old.");
4767 4767 }
4768 4768
4769 4769 // Initialize the new libthread getstate API wrappers
4770 4770 func = resolve_symbol("thr_getstate");
4771 4771 os::Solaris::set_thr_getstate(CAST_TO_FN_PTR(int_fnP_thread_t_iP_uP_stack_tP_gregset_t, func));
4772 4772
4773 4773 func = resolve_symbol("thr_setstate");
4774 4774 os::Solaris::set_thr_setstate(CAST_TO_FN_PTR(int_fnP_thread_t_i_gregset_t, func));
4775 4775
4776 4776 func = resolve_symbol("thr_setmutator");
4777 4777 os::Solaris::set_thr_setmutator(CAST_TO_FN_PTR(int_fnP_thread_t_i, func));
4778 4778
4779 4779 func = resolve_symbol("thr_suspend_mutator");
4780 4780 os::Solaris::set_thr_suspend_mutator(CAST_TO_FN_PTR(int_fnP_thread_t, func));
4781 4781
4782 4782 func = resolve_symbol("thr_continue_mutator");
4783 4783 os::Solaris::set_thr_continue_mutator(CAST_TO_FN_PTR(int_fnP_thread_t, func));
4784 4784
4785 4785 int size;
4786 4786 void (*handler_info_func)(address *, int *);
4787 4787 handler_info_func = CAST_TO_FN_PTR(void (*)(address *, int *), resolve_symbol("thr_sighndlrinfo"));
4788 4788 handler_info_func(&handler_start, &size);
4789 4789 handler_end = handler_start + size;
4790 4790 }
4791 4791
4792 4792
4793 4793 int_fnP_mutex_tP os::Solaris::_mutex_lock;
4794 4794 int_fnP_mutex_tP os::Solaris::_mutex_trylock;
4795 4795 int_fnP_mutex_tP os::Solaris::_mutex_unlock;
4796 4796 int_fnP_mutex_tP_i_vP os::Solaris::_mutex_init;
4797 4797 int_fnP_mutex_tP os::Solaris::_mutex_destroy;
4798 4798 int os::Solaris::_mutex_scope = USYNC_THREAD;
4799 4799
4800 4800 int_fnP_cond_tP_mutex_tP_timestruc_tP os::Solaris::_cond_timedwait;
4801 4801 int_fnP_cond_tP_mutex_tP os::Solaris::_cond_wait;
4802 4802 int_fnP_cond_tP os::Solaris::_cond_signal;
4803 4803 int_fnP_cond_tP os::Solaris::_cond_broadcast;
4804 4804 int_fnP_cond_tP_i_vP os::Solaris::_cond_init;
4805 4805 int_fnP_cond_tP os::Solaris::_cond_destroy;
4806 4806 int os::Solaris::_cond_scope = USYNC_THREAD;
4807 4807
4808 4808 void os::Solaris::synchronization_init() {
4809 4809 if(UseLWPSynchronization) {
4810 4810 os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_lock")));
4811 4811 os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_trylock")));
4812 4812 os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_unlock")));
4813 4813 os::Solaris::set_mutex_init(lwp_mutex_init);
4814 4814 os::Solaris::set_mutex_destroy(lwp_mutex_destroy);
4815 4815 os::Solaris::set_mutex_scope(USYNC_THREAD);
4816 4816
4817 4817 os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("_lwp_cond_timedwait")));
4818 4818 os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("_lwp_cond_wait")));
4819 4819 os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_signal")));
4820 4820 os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_broadcast")));
4821 4821 os::Solaris::set_cond_init(lwp_cond_init);
4822 4822 os::Solaris::set_cond_destroy(lwp_cond_destroy);
4823 4823 os::Solaris::set_cond_scope(USYNC_THREAD);
4824 4824 }
4825 4825 else {
4826 4826 os::Solaris::set_mutex_scope(USYNC_THREAD);
4827 4827 os::Solaris::set_cond_scope(USYNC_THREAD);
4828 4828
4829 4829 if(UsePthreads) {
4830 4830 os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_lock")));
4831 4831 os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_trylock")));
4832 4832 os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_unlock")));
4833 4833 os::Solaris::set_mutex_init(pthread_mutex_default_init);
4834 4834 os::Solaris::set_mutex_destroy(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_destroy")));
4835 4835
4836 4836 os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("pthread_cond_timedwait")));
4837 4837 os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("pthread_cond_wait")));
4838 4838 os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_signal")));
4839 4839 os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_broadcast")));
4840 4840 os::Solaris::set_cond_init(pthread_cond_default_init);
4841 4841 os::Solaris::set_cond_destroy(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_destroy")));
4842 4842 }
4843 4843 else {
4844 4844 os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_lock")));
4845 4845 os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_trylock")));
4846 4846 os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_unlock")));
4847 4847 os::Solaris::set_mutex_init(::mutex_init);
4848 4848 os::Solaris::set_mutex_destroy(::mutex_destroy);
4849 4849
4850 4850 os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("cond_timedwait")));
4851 4851 os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("cond_wait")));
4852 4852 os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_signal")));
4853 4853 os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_broadcast")));
4854 4854 os::Solaris::set_cond_init(::cond_init);
4855 4855 os::Solaris::set_cond_destroy(::cond_destroy);
4856 4856 }
4857 4857 }
4858 4858 }
4859 4859
4860 4860 bool os::Solaris::liblgrp_init() {
4861 4861 void *handle = dlopen("liblgrp.so.1", RTLD_LAZY);
4862 4862 if (handle != NULL) {
4863 4863 os::Solaris::set_lgrp_home(CAST_TO_FN_PTR(lgrp_home_func_t, dlsym(handle, "lgrp_home")));
4864 4864 os::Solaris::set_lgrp_init(CAST_TO_FN_PTR(lgrp_init_func_t, dlsym(handle, "lgrp_init")));
4865 4865 os::Solaris::set_lgrp_fini(CAST_TO_FN_PTR(lgrp_fini_func_t, dlsym(handle, "lgrp_fini")));
4866 4866 os::Solaris::set_lgrp_root(CAST_TO_FN_PTR(lgrp_root_func_t, dlsym(handle, "lgrp_root")));
4867 4867 os::Solaris::set_lgrp_children(CAST_TO_FN_PTR(lgrp_children_func_t, dlsym(handle, "lgrp_children")));
4868 4868 os::Solaris::set_lgrp_resources(CAST_TO_FN_PTR(lgrp_resources_func_t, dlsym(handle, "lgrp_resources")));
4869 4869 os::Solaris::set_lgrp_nlgrps(CAST_TO_FN_PTR(lgrp_nlgrps_func_t, dlsym(handle, "lgrp_nlgrps")));
4870 4870 os::Solaris::set_lgrp_cookie_stale(CAST_TO_FN_PTR(lgrp_cookie_stale_func_t,
4871 4871 dlsym(handle, "lgrp_cookie_stale")));
4872 4872
4873 4873 lgrp_cookie_t c = lgrp_init(LGRP_VIEW_CALLER);
4874 4874 set_lgrp_cookie(c);
4875 4875 return true;
4876 4876 }
4877 4877 return false;
4878 4878 }
4879 4879
4880 4880 void os::Solaris::misc_sym_init() {
4881 4881 address func;
4882 4882
4883 4883 // getisax
4884 4884 func = resolve_symbol_lazy("getisax");
4885 4885 if (func != NULL) {
4886 4886 os::Solaris::_getisax = CAST_TO_FN_PTR(getisax_func_t, func);
4887 4887 }
4888 4888
4889 4889 // meminfo
4890 4890 func = resolve_symbol_lazy("meminfo");
4891 4891 if (func != NULL) {
4892 4892 os::Solaris::set_meminfo(CAST_TO_FN_PTR(meminfo_func_t, func));
4893 4893 }
4894 4894 }
4895 4895
4896 4896 uint_t os::Solaris::getisax(uint32_t* array, uint_t n) {
4897 4897 assert(_getisax != NULL, "_getisax not set");
4898 4898 return _getisax(array, n);
4899 4899 }
4900 4900
4901 4901 // Symbol doesn't exist in Solaris 8 pset.h
4902 4902 #ifndef PS_MYID
4903 4903 #define PS_MYID -3
4904 4904 #endif
4905 4905
4906 4906 // int pset_getloadavg(psetid_t pset, double loadavg[], int nelem);
4907 4907 typedef long (*pset_getloadavg_type)(psetid_t pset, double loadavg[], int nelem);
4908 4908 static pset_getloadavg_type pset_getloadavg_ptr = NULL;
4909 4909
4910 4910 void init_pset_getloadavg_ptr(void) {
4911 4911 pset_getloadavg_ptr =
4912 4912 (pset_getloadavg_type)dlsym(RTLD_DEFAULT, "pset_getloadavg");
4913 4913 if (PrintMiscellaneous && Verbose && pset_getloadavg_ptr == NULL) {
4914 4914 warning("pset_getloadavg function not found");
4915 4915 }
4916 4916 }
4917 4917
4918 4918 int os::Solaris::_dev_zero_fd = -1;
4919 4919
4920 4920 // this is called _before_ the global arguments have been parsed
4921 4921 void os::init(void) {
4922 4922 _initial_pid = getpid();
4923 4923
4924 4924 max_hrtime = first_hrtime = gethrtime();
4925 4925
4926 4926 init_random(1234567);
4927 4927
4928 4928 page_size = sysconf(_SC_PAGESIZE);
4929 4929 if (page_size == -1)
4930 4930 fatal(err_msg("os_solaris.cpp: os::init: sysconf failed (%s)",
4931 4931 strerror(errno)));
4932 4932 init_page_sizes((size_t) page_size);
4933 4933
4934 4934 Solaris::initialize_system_info();
4935 4935
4936 4936 // Initialize misc. symbols as soon as possible, so we can use them
4937 4937 // if we need them.
4938 4938 Solaris::misc_sym_init();
4939 4939
4940 4940 int fd = ::open("/dev/zero", O_RDWR);
4941 4941 if (fd < 0) {
4942 4942 fatal(err_msg("os::init: cannot open /dev/zero (%s)", strerror(errno)));
4943 4943 } else {
4944 4944 Solaris::set_dev_zero_fd(fd);
4945 4945
4946 4946 // Close on exec, child won't inherit.
4947 4947 fcntl(fd, F_SETFD, FD_CLOEXEC);
4948 4948 }
4949 4949
4950 4950 clock_tics_per_sec = CLK_TCK;
4951 4951
4952 4952 // check if dladdr1() exists; dladdr1 can provide more information than
4953 4953 // dladdr for os::dll_address_to_function_name. It comes with SunOS 5.9
4954 4954 // and is available on linker patches for 5.7 and 5.8.
4955 4955 // libdl.so must have been loaded, this call is just an entry lookup
4956 4956 void * hdl = dlopen("libdl.so", RTLD_NOW);
4957 4957 if (hdl)
4958 4958 dladdr1_func = CAST_TO_FN_PTR(dladdr1_func_type, dlsym(hdl, "dladdr1"));
4959 4959
4960 4960 // (Solaris only) this switches to calls that actually do locking.
4961 4961 ThreadCritical::initialize();
4962 4962
4963 4963 main_thread = thr_self();
4964 4964
4965 4965 // Constant minimum stack size allowed. It must be at least
4966 4966 // the minimum of what the OS supports (thr_min_stack()), and
4967 4967 // enough to allow the thread to get to user bytecode execution.
4968 4968 Solaris::min_stack_allowed = MAX2(thr_min_stack(), Solaris::min_stack_allowed);
4969 4969 // If the pagesize of the VM is greater than 8K determine the appropriate
4970 4970 // number of initial guard pages. The user can change this with the
4971 4971 // command line arguments, if needed.
4972 4972 if (vm_page_size() > 8*K) {
4973 4973 StackYellowPages = 1;
4974 4974 StackRedPages = 1;
4975 4975 StackShadowPages = round_to((StackShadowPages*8*K), vm_page_size()) / vm_page_size();
4976 4976 }
4977 4977 }
4978 4978
4979 4979 // To install functions for atexit system call
4980 4980 extern "C" {
4981 4981 static void perfMemory_exit_helper() {
4982 4982 perfMemory_exit();
4983 4983 }
4984 4984 }
4985 4985
4986 4986 // this is called _after_ the global arguments have been parsed
4987 4987 jint os::init_2(void) {
4988 4988 // try to enable extended file IO ASAP, see 6431278
4989 4989 os::Solaris::try_enable_extended_io();
4990 4990
4991 4991 // Allocate a single page and mark it as readable for safepoint polling. Also
4992 4992 // use this first mmap call to check support for MAP_ALIGN.
4993 4993 address polling_page = (address)Solaris::mmap_chunk((char*)page_size,
4994 4994 page_size,
4995 4995 MAP_PRIVATE | MAP_ALIGN,
4996 4996 PROT_READ);
4997 4997 if (polling_page == NULL) {
4998 4998 has_map_align = false;
4999 4999 polling_page = (address)Solaris::mmap_chunk(NULL, page_size, MAP_PRIVATE,
5000 5000 PROT_READ);
5001 5001 }
5002 5002
5003 5003 os::set_polling_page(polling_page);
5004 5004
5005 5005 #ifndef PRODUCT
5006 5006 if( Verbose && PrintMiscellaneous )
5007 5007 tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n", (intptr_t)polling_page);
5008 5008 #endif
5009 5009
5010 5010 if (!UseMembar) {
5011 5011 address mem_serialize_page = (address)Solaris::mmap_chunk( NULL, page_size, MAP_PRIVATE, PROT_READ | PROT_WRITE );
5012 5012 guarantee( mem_serialize_page != NULL, "mmap Failed for memory serialize page");
5013 5013 os::set_memory_serialize_page( mem_serialize_page );
5014 5014
5015 5015 #ifndef PRODUCT
5016 5016 if(Verbose && PrintMiscellaneous)
5017 5017 tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
5018 5018 #endif
5019 5019 }
5020 5020
5021 5021 os::large_page_init();
5022 5022
5023 5023 // Check minimum allowable stack size for thread creation and to initialize
5024 5024 // the java system classes, including StackOverflowError - depends on page
5025 5025 // size. Add a page for compiler2 recursion in main thread.
5026 5026 // Add in 2*BytesPerWord times page size to account for VM stack during
5027 5027 // class initialization depending on 32 or 64 bit VM.
5028 5028 os::Solaris::min_stack_allowed = MAX2(os::Solaris::min_stack_allowed,
5029 5029 (size_t)(StackYellowPages+StackRedPages+StackShadowPages+
5030 5030 2*BytesPerWord COMPILER2_PRESENT(+1)) * page_size);
5031 5031
5032 5032 size_t threadStackSizeInBytes = ThreadStackSize * K;
5033 5033 if (threadStackSizeInBytes != 0 &&
5034 5034 threadStackSizeInBytes < os::Solaris::min_stack_allowed) {
5035 5035 tty->print_cr("\nThe stack size specified is too small, Specify at least %dk",
5036 5036 os::Solaris::min_stack_allowed/K);
5037 5037 return JNI_ERR;
5038 5038 }
5039 5039
5040 5040 // For 64kbps there will be a 64kb page size, which makes
5041 5041 // the usable default stack size quite a bit less. Increase the
5042 5042 // stack for 64kb (or any > than 8kb) pages, this increases
5043 5043 // virtual memory fragmentation (since we're not creating the
5044 5044 // stack on a power of 2 boundary. The real fix for this
5045 5045 // should be to fix the guard page mechanism.
5046 5046
5047 5047 if (vm_page_size() > 8*K) {
5048 5048 threadStackSizeInBytes = (threadStackSizeInBytes != 0)
5049 5049 ? threadStackSizeInBytes +
5050 5050 ((StackYellowPages + StackRedPages) * vm_page_size())
5051 5051 : 0;
5052 5052 ThreadStackSize = threadStackSizeInBytes/K;
5053 5053 }
5054 5054
5055 5055 // Make the stack size a multiple of the page size so that
5056 5056 // the yellow/red zones can be guarded.
5057 5057 JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes,
5058 5058 vm_page_size()));
5059 5059
5060 5060 Solaris::libthread_init();
5061 5061
5062 5062 if (UseNUMA) {
5063 5063 if (!Solaris::liblgrp_init()) {
5064 5064 UseNUMA = false;
5065 5065 } else {
5066 5066 size_t lgrp_limit = os::numa_get_groups_num();
5067 5067 int *lgrp_ids = NEW_C_HEAP_ARRAY(int, lgrp_limit);
5068 5068 size_t lgrp_num = os::numa_get_leaf_groups(lgrp_ids, lgrp_limit);
5069 5069 FREE_C_HEAP_ARRAY(int, lgrp_ids);
5070 5070 if (lgrp_num < 2) {
5071 5071 // There's only one locality group, disable NUMA.
5072 5072 UseNUMA = false;
5073 5073 }
5074 5074 }
5075 5075 // ISM is not compatible with the NUMA allocator - it always allocates
5076 5076 // pages round-robin across the lgroups.
5077 5077 if (UseNUMA && UseLargePages && UseISM) {
5078 5078 if (!FLAG_IS_DEFAULT(UseNUMA)) {
5079 5079 if (FLAG_IS_DEFAULT(UseLargePages) && FLAG_IS_DEFAULT(UseISM)) {
5080 5080 UseLargePages = false;
5081 5081 } else {
5082 5082 warning("UseNUMA is not compatible with ISM large pages, disabling NUMA allocator");
5083 5083 UseNUMA = false;
5084 5084 }
5085 5085 } else {
5086 5086 UseNUMA = false;
5087 5087 }
5088 5088 }
5089 5089 if (!UseNUMA && ForceNUMA) {
5090 5090 UseNUMA = true;
5091 5091 }
5092 5092 }
5093 5093
5094 5094 Solaris::signal_sets_init();
5095 5095 Solaris::init_signal_mem();
5096 5096 Solaris::install_signal_handlers();
5097 5097
5098 5098 if (libjsigversion < JSIG_VERSION_1_4_1) {
5099 5099 Maxlibjsigsigs = OLDMAXSIGNUM;
5100 5100 }
5101 5101
5102 5102 // initialize synchronization primitives to use either thread or
5103 5103 // lwp synchronization (controlled by UseLWPSynchronization)
5104 5104 Solaris::synchronization_init();
5105 5105
5106 5106 if (MaxFDLimit) {
5107 5107 // set the number of file descriptors to max. print out error
5108 5108 // if getrlimit/setrlimit fails but continue regardless.
5109 5109 struct rlimit nbr_files;
5110 5110 int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
5111 5111 if (status != 0) {
5112 5112 if (PrintMiscellaneous && (Verbose || WizardMode))
5113 5113 perror("os::init_2 getrlimit failed");
5114 5114 } else {
5115 5115 nbr_files.rlim_cur = nbr_files.rlim_max;
5116 5116 status = setrlimit(RLIMIT_NOFILE, &nbr_files);
5117 5117 if (status != 0) {
5118 5118 if (PrintMiscellaneous && (Verbose || WizardMode))
5119 5119 perror("os::init_2 setrlimit failed");
5120 5120 }
5121 5121 }
5122 5122 }
5123 5123
5124 5124 // Calculate theoretical max. size of Threads to guard gainst
5125 5125 // artifical out-of-memory situations, where all available address-
5126 5126 // space has been reserved by thread stacks. Default stack size is 1Mb.
5127 5127 size_t pre_thread_stack_size = (JavaThread::stack_size_at_create()) ?
5128 5128 JavaThread::stack_size_at_create() : (1*K*K);
5129 5129 assert(pre_thread_stack_size != 0, "Must have a stack");
5130 5130 // Solaris has a maximum of 4Gb of user programs. Calculate the thread limit when
5131 5131 // we should start doing Virtual Memory banging. Currently when the threads will
5132 5132 // have used all but 200Mb of space.
5133 5133 size_t max_address_space = ((unsigned int)4 * K * K * K) - (200 * K * K);
5134 5134 Solaris::_os_thread_limit = max_address_space / pre_thread_stack_size;
5135 5135
5136 5136 // at-exit methods are called in the reverse order of their registration.
5137 5137 // In Solaris 7 and earlier, atexit functions are called on return from
5138 5138 // main or as a result of a call to exit(3C). There can be only 32 of
5139 5139 // these functions registered and atexit() does not set errno. In Solaris
5140 5140 // 8 and later, there is no limit to the number of functions registered
5141 5141 // and atexit() sets errno. In addition, in Solaris 8 and later, atexit
5142 5142 // functions are called upon dlclose(3DL) in addition to return from main
5143 5143 // and exit(3C).
5144 5144
5145 5145 if (PerfAllowAtExitRegistration) {
5146 5146 // only register atexit functions if PerfAllowAtExitRegistration is set.
5147 5147 // atexit functions can be delayed until process exit time, which
5148 5148 // can be problematic for embedded VM situations. Embedded VMs should
5149 5149 // call DestroyJavaVM() to assure that VM resources are released.
5150 5150
5151 5151 // note: perfMemory_exit_helper atexit function may be removed in
5152 5152 // the future if the appropriate cleanup code can be added to the
5153 5153 // VM_Exit VMOperation's doit method.
5154 5154 if (atexit(perfMemory_exit_helper) != 0) {
5155 5155 warning("os::init2 atexit(perfMemory_exit_helper) failed");
5156 5156 }
5157 5157 }
5158 5158
5159 5159 // Init pset_loadavg function pointer
5160 5160 init_pset_getloadavg_ptr();
5161 5161
5162 5162 return JNI_OK;
5163 5163 }
5164 5164
5165 5165 void os::init_3(void) {
5166 5166 return;
5167 5167 }
5168 5168
5169 5169 // Mark the polling page as unreadable
5170 5170 void os::make_polling_page_unreadable(void) {
5171 5171 if( mprotect((char *)_polling_page, page_size, PROT_NONE) != 0 )
5172 5172 fatal("Could not disable polling page");
5173 5173 };
5174 5174
5175 5175 // Mark the polling page as readable
5176 5176 void os::make_polling_page_readable(void) {
5177 5177 if( mprotect((char *)_polling_page, page_size, PROT_READ) != 0 )
5178 5178 fatal("Could not enable polling page");
5179 5179 };
5180 5180
5181 5181 // OS interface.
5182 5182
5183 5183 bool os::check_heap(bool force) { return true; }
5184 5184
5185 5185 typedef int (*vsnprintf_t)(char* buf, size_t count, const char* fmt, va_list argptr);
5186 5186 static vsnprintf_t sol_vsnprintf = NULL;
5187 5187
5188 5188 int local_vsnprintf(char* buf, size_t count, const char* fmt, va_list argptr) {
5189 5189 if (!sol_vsnprintf) {
5190 5190 //search for the named symbol in the objects that were loaded after libjvm
5191 5191 void* where = RTLD_NEXT;
5192 5192 if ((sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "__vsnprintf"))) == NULL)
5193 5193 sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "vsnprintf"));
5194 5194 if (!sol_vsnprintf){
5195 5195 //search for the named symbol in the objects that were loaded before libjvm
5196 5196 where = RTLD_DEFAULT;
5197 5197 if ((sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "__vsnprintf"))) == NULL)
5198 5198 sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "vsnprintf"));
5199 5199 assert(sol_vsnprintf != NULL, "vsnprintf not found");
5200 5200 }
5201 5201 }
5202 5202 return (*sol_vsnprintf)(buf, count, fmt, argptr);
5203 5203 }
5204 5204
5205 5205
5206 5206 // Is a (classpath) directory empty?
5207 5207 bool os::dir_is_empty(const char* path) {
5208 5208 DIR *dir = NULL;
5209 5209 struct dirent *ptr;
5210 5210
5211 5211 dir = opendir(path);
5212 5212 if (dir == NULL) return true;
5213 5213
5214 5214 /* Scan the directory */
5215 5215 bool result = true;
5216 5216 char buf[sizeof(struct dirent) + MAX_PATH];
5217 5217 struct dirent *dbuf = (struct dirent *) buf;
5218 5218 while (result && (ptr = readdir(dir, dbuf)) != NULL) {
5219 5219 if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
5220 5220 result = false;
5221 5221 }
5222 5222 }
5223 5223 closedir(dir);
5224 5224 return result;
5225 5225 }
5226 5226
5227 5227 // This code originates from JDK's sysOpen and open64_w
5228 5228 // from src/solaris/hpi/src/system_md.c
5229 5229
5230 5230 #ifndef O_DELETE
5231 5231 #define O_DELETE 0x10000
5232 5232 #endif
5233 5233
5234 5234 // Open a file. Unlink the file immediately after open returns
5235 5235 // if the specified oflag has the O_DELETE flag set.
5236 5236 // O_DELETE is used only in j2se/src/share/native/java/util/zip/ZipFile.c
5237 5237
5238 5238 int os::open(const char *path, int oflag, int mode) {
5239 5239 if (strlen(path) > MAX_PATH - 1) {
5240 5240 errno = ENAMETOOLONG;
5241 5241 return -1;
5242 5242 }
5243 5243 int fd;
5244 5244 int o_delete = (oflag & O_DELETE);
5245 5245 oflag = oflag & ~O_DELETE;
5246 5246
5247 5247 fd = ::open64(path, oflag, mode);
5248 5248 if (fd == -1) return -1;
5249 5249
5250 5250 //If the open succeeded, the file might still be a directory
5251 5251 {
5252 5252 struct stat64 buf64;
5253 5253 int ret = ::fstat64(fd, &buf64);
5254 5254 int st_mode = buf64.st_mode;
5255 5255
5256 5256 if (ret != -1) {
5257 5257 if ((st_mode & S_IFMT) == S_IFDIR) {
5258 5258 errno = EISDIR;
5259 5259 ::close(fd);
5260 5260 return -1;
5261 5261 }
5262 5262 } else {
5263 5263 ::close(fd);
5264 5264 return -1;
5265 5265 }
5266 5266 }
5267 5267 /*
5268 5268 * 32-bit Solaris systems suffer from:
5269 5269 *
5270 5270 * - an historical default soft limit of 256 per-process file
5271 5271 * descriptors that is too low for many Java programs.
5272 5272 *
5273 5273 * - a design flaw where file descriptors created using stdio
5274 5274 * fopen must be less than 256, _even_ when the first limit above
5275 5275 * has been raised. This can cause calls to fopen (but not calls to
5276 5276 * open, for example) to fail mysteriously, perhaps in 3rd party
5277 5277 * native code (although the JDK itself uses fopen). One can hardly
5278 5278 * criticize them for using this most standard of all functions.
5279 5279 *
5280 5280 * We attempt to make everything work anyways by:
5281 5281 *
5282 5282 * - raising the soft limit on per-process file descriptors beyond
5283 5283 * 256
5284 5284 *
5285 5285 * - As of Solaris 10u4, we can request that Solaris raise the 256
5286 5286 * stdio fopen limit by calling function enable_extended_FILE_stdio.
5287 5287 * This is done in init_2 and recorded in enabled_extended_FILE_stdio
5288 5288 *
5289 5289 * - If we are stuck on an old (pre 10u4) Solaris system, we can
5290 5290 * workaround the bug by remapping non-stdio file descriptors below
5291 5291 * 256 to ones beyond 256, which is done below.
5292 5292 *
5293 5293 * See:
5294 5294 * 1085341: 32-bit stdio routines should support file descriptors >255
5295 5295 * 6533291: Work around 32-bit Solaris stdio limit of 256 open files
5296 5296 * 6431278: Netbeans crash on 32 bit Solaris: need to call
5297 5297 * enable_extended_FILE_stdio() in VM initialisation
5298 5298 * Giri Mandalika's blog
5299 5299 * http://technopark02.blogspot.com/2005_05_01_archive.html
5300 5300 */
5301 5301 #ifndef _LP64
5302 5302 if ((!enabled_extended_FILE_stdio) && fd < 256) {
5303 5303 int newfd = ::fcntl(fd, F_DUPFD, 256);
5304 5304 if (newfd != -1) {
5305 5305 ::close(fd);
5306 5306 fd = newfd;
5307 5307 }
5308 5308 }
5309 5309 #endif // 32-bit Solaris
5310 5310 /*
5311 5311 * All file descriptors that are opened in the JVM and not
5312 5312 * specifically destined for a subprocess should have the
5313 5313 * close-on-exec flag set. If we don't set it, then careless 3rd
5314 5314 * party native code might fork and exec without closing all
5315 5315 * appropriate file descriptors (e.g. as we do in closeDescriptors in
5316 5316 * UNIXProcess.c), and this in turn might:
5317 5317 *
5318 5318 * - cause end-of-file to fail to be detected on some file
5319 5319 * descriptors, resulting in mysterious hangs, or
5320 5320 *
5321 5321 * - might cause an fopen in the subprocess to fail on a system
5322 5322 * suffering from bug 1085341.
5323 5323 *
5324 5324 * (Yes, the default setting of the close-on-exec flag is a Unix
5325 5325 * design flaw)
5326 5326 *
5327 5327 * See:
5328 5328 * 1085341: 32-bit stdio routines should support file descriptors >255
5329 5329 * 4843136: (process) pipe file descriptor from Runtime.exec not being closed
5330 5330 * 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
5331 5331 */
5332 5332 #ifdef FD_CLOEXEC
5333 5333 {
5334 5334 int flags = ::fcntl(fd, F_GETFD);
5335 5335 if (flags != -1)
5336 5336 ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
5337 5337 }
5338 5338 #endif
5339 5339
5340 5340 if (o_delete != 0) {
5341 5341 ::unlink(path);
5342 5342 }
5343 5343 return fd;
5344 5344 }
5345 5345
5346 5346 // create binary file, rewriting existing file if required
5347 5347 int os::create_binary_file(const char* path, bool rewrite_existing) {
5348 5348 int oflags = O_WRONLY | O_CREAT;
5349 5349 if (!rewrite_existing) {
5350 5350 oflags |= O_EXCL;
5351 5351 }
5352 5352 return ::open64(path, oflags, S_IREAD | S_IWRITE);
5353 5353 }
5354 5354
5355 5355 // return current position of file pointer
5356 5356 jlong os::current_file_offset(int fd) {
5357 5357 return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR);
5358 5358 }
5359 5359
5360 5360 // move file pointer to the specified offset
5361 5361 jlong os::seek_to_file_offset(int fd, jlong offset) {
5362 5362 return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET);
5363 5363 }
5364 5364
5365 5365 jlong os::lseek(int fd, jlong offset, int whence) {
5366 5366 return (jlong) ::lseek64(fd, offset, whence);
5367 5367 }
5368 5368
5369 5369 char * os::native_path(char *path) {
5370 5370 return path;
5371 5371 }
5372 5372
5373 5373 int os::ftruncate(int fd, jlong length) {
5374 5374 return ::ftruncate64(fd, length);
5375 5375 }
5376 5376
5377 5377 int os::fsync(int fd) {
5378 5378 RESTARTABLE_RETURN_INT(::fsync(fd));
5379 5379 }
5380 5380
5381 5381 int os::available(int fd, jlong *bytes) {
5382 5382 jlong cur, end;
5383 5383 int mode;
5384 5384 struct stat64 buf64;
5385 5385
5386 5386 if (::fstat64(fd, &buf64) >= 0) {
5387 5387 mode = buf64.st_mode;
5388 5388 if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
5389 5389 /*
5390 5390 * XXX: is the following call interruptible? If so, this might
5391 5391 * need to go through the INTERRUPT_IO() wrapper as for other
5392 5392 * blocking, interruptible calls in this file.
5393 5393 */
5394 5394 int n,ioctl_return;
5395 5395
5396 5396 INTERRUPTIBLE(::ioctl(fd, FIONREAD, &n),ioctl_return,os::Solaris::clear_interrupted);
5397 5397 if (ioctl_return>= 0) {
5398 5398 *bytes = n;
5399 5399 return 1;
5400 5400 }
5401 5401 }
5402 5402 }
5403 5403 if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) {
5404 5404 return 0;
5405 5405 } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) {
5406 5406 return 0;
5407 5407 } else if (::lseek64(fd, cur, SEEK_SET) == -1) {
5408 5408 return 0;
5409 5409 }
5410 5410 *bytes = end - cur;
5411 5411 return 1;
5412 5412 }
5413 5413
5414 5414 // Map a block of memory.
5415 5415 char* os::map_memory(int fd, const char* file_name, size_t file_offset,
5416 5416 char *addr, size_t bytes, bool read_only,
5417 5417 bool allow_exec) {
5418 5418 int prot;
5419 5419 int flags;
5420 5420
5421 5421 if (read_only) {
5422 5422 prot = PROT_READ;
5423 5423 flags = MAP_SHARED;
5424 5424 } else {
5425 5425 prot = PROT_READ | PROT_WRITE;
5426 5426 flags = MAP_PRIVATE;
5427 5427 }
5428 5428
5429 5429 if (allow_exec) {
5430 5430 prot |= PROT_EXEC;
5431 5431 }
5432 5432
5433 5433 if (addr != NULL) {
5434 5434 flags |= MAP_FIXED;
5435 5435 }
5436 5436
5437 5437 char* mapped_address = (char*)mmap(addr, (size_t)bytes, prot, flags,
5438 5438 fd, file_offset);
5439 5439 if (mapped_address == MAP_FAILED) {
5440 5440 return NULL;
5441 5441 }
5442 5442 return mapped_address;
5443 5443 }
5444 5444
5445 5445
5446 5446 // Remap a block of memory.
5447 5447 char* os::remap_memory(int fd, const char* file_name, size_t file_offset,
5448 5448 char *addr, size_t bytes, bool read_only,
5449 5449 bool allow_exec) {
5450 5450 // same as map_memory() on this OS
5451 5451 return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
5452 5452 allow_exec);
5453 5453 }
5454 5454
5455 5455
5456 5456 // Unmap a block of memory.
5457 5457 bool os::unmap_memory(char* addr, size_t bytes) {
5458 5458 return munmap(addr, bytes) == 0;
5459 5459 }
5460 5460
5461 5461 void os::pause() {
5462 5462 char filename[MAX_PATH];
5463 5463 if (PauseAtStartupFile && PauseAtStartupFile[0]) {
5464 5464 jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
5465 5465 } else {
5466 5466 jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
5467 5467 }
5468 5468
5469 5469 int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
5470 5470 if (fd != -1) {
5471 5471 struct stat buf;
5472 5472 ::close(fd);
5473 5473 while (::stat(filename, &buf) == 0) {
5474 5474 (void)::poll(NULL, 0, 100);
5475 5475 }
5476 5476 } else {
5477 5477 jio_fprintf(stderr,
5478 5478 "Could not open pause file '%s', continuing immediately.\n", filename);
5479 5479 }
5480 5480 }
5481 5481
5482 5482 #ifndef PRODUCT
5483 5483 #ifdef INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS
5484 5484 // Turn this on if you need to trace synch operations.
5485 5485 // Set RECORD_SYNCH_LIMIT to a large-enough value,
5486 5486 // and call record_synch_enable and record_synch_disable
5487 5487 // around the computation of interest.
5488 5488
5489 5489 void record_synch(char* name, bool returning); // defined below
5490 5490
5491 5491 class RecordSynch {
5492 5492 char* _name;
5493 5493 public:
5494 5494 RecordSynch(char* name) :_name(name)
5495 5495 { record_synch(_name, false); }
5496 5496 ~RecordSynch() { record_synch(_name, true); }
5497 5497 };
5498 5498
5499 5499 #define CHECK_SYNCH_OP(ret, name, params, args, inner) \
5500 5500 extern "C" ret name params { \
5501 5501 typedef ret name##_t params; \
5502 5502 static name##_t* implem = NULL; \
5503 5503 static int callcount = 0; \
5504 5504 if (implem == NULL) { \
5505 5505 implem = (name##_t*) dlsym(RTLD_NEXT, #name); \
5506 5506 if (implem == NULL) fatal(dlerror()); \
5507 5507 } \
5508 5508 ++callcount; \
5509 5509 RecordSynch _rs(#name); \
5510 5510 inner; \
5511 5511 return implem args; \
5512 5512 }
5513 5513 // in dbx, examine callcounts this way:
5514 5514 // for n in $(eval whereis callcount | awk '{print $2}'); do print $n; done
5515 5515
5516 5516 #define CHECK_POINTER_OK(p) \
5517 5517 (Universe::perm_gen() == NULL || !Universe::is_reserved_heap((oop)(p)))
5518 5518 #define CHECK_MU \
5519 5519 if (!CHECK_POINTER_OK(mu)) fatal("Mutex must be in C heap only.");
5520 5520 #define CHECK_CV \
5521 5521 if (!CHECK_POINTER_OK(cv)) fatal("Condvar must be in C heap only.");
5522 5522 #define CHECK_P(p) \
5523 5523 if (!CHECK_POINTER_OK(p)) fatal(false, "Pointer must be in C heap only.");
5524 5524
5525 5525 #define CHECK_MUTEX(mutex_op) \
5526 5526 CHECK_SYNCH_OP(int, mutex_op, (mutex_t *mu), (mu), CHECK_MU);
5527 5527
5528 5528 CHECK_MUTEX( mutex_lock)
5529 5529 CHECK_MUTEX( _mutex_lock)
5530 5530 CHECK_MUTEX( mutex_unlock)
5531 5531 CHECK_MUTEX(_mutex_unlock)
5532 5532 CHECK_MUTEX( mutex_trylock)
5533 5533 CHECK_MUTEX(_mutex_trylock)
5534 5534
5535 5535 #define CHECK_COND(cond_op) \
5536 5536 CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu), (cv, mu), CHECK_MU;CHECK_CV);
5537 5537
5538 5538 CHECK_COND( cond_wait);
5539 5539 CHECK_COND(_cond_wait);
5540 5540 CHECK_COND(_cond_wait_cancel);
5541 5541
5542 5542 #define CHECK_COND2(cond_op) \
5543 5543 CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu, timestruc_t* ts), (cv, mu, ts), CHECK_MU;CHECK_CV);
5544 5544
5545 5545 CHECK_COND2( cond_timedwait);
5546 5546 CHECK_COND2(_cond_timedwait);
5547 5547 CHECK_COND2(_cond_timedwait_cancel);
5548 5548
5549 5549 // do the _lwp_* versions too
5550 5550 #define mutex_t lwp_mutex_t
5551 5551 #define cond_t lwp_cond_t
5552 5552 CHECK_MUTEX( _lwp_mutex_lock)
5553 5553 CHECK_MUTEX( _lwp_mutex_unlock)
5554 5554 CHECK_MUTEX( _lwp_mutex_trylock)
5555 5555 CHECK_MUTEX( __lwp_mutex_lock)
5556 5556 CHECK_MUTEX( __lwp_mutex_unlock)
5557 5557 CHECK_MUTEX( __lwp_mutex_trylock)
5558 5558 CHECK_MUTEX(___lwp_mutex_lock)
5559 5559 CHECK_MUTEX(___lwp_mutex_unlock)
5560 5560
5561 5561 CHECK_COND( _lwp_cond_wait);
5562 5562 CHECK_COND( __lwp_cond_wait);
5563 5563 CHECK_COND(___lwp_cond_wait);
5564 5564
5565 5565 CHECK_COND2( _lwp_cond_timedwait);
5566 5566 CHECK_COND2( __lwp_cond_timedwait);
5567 5567 #undef mutex_t
5568 5568 #undef cond_t
5569 5569
5570 5570 CHECK_SYNCH_OP(int, _lwp_suspend2, (int lwp, int *n), (lwp, n), 0);
5571 5571 CHECK_SYNCH_OP(int,__lwp_suspend2, (int lwp, int *n), (lwp, n), 0);
5572 5572 CHECK_SYNCH_OP(int, _lwp_kill, (int lwp, int n), (lwp, n), 0);
5573 5573 CHECK_SYNCH_OP(int,__lwp_kill, (int lwp, int n), (lwp, n), 0);
5574 5574 CHECK_SYNCH_OP(int, _lwp_sema_wait, (lwp_sema_t* p), (p), CHECK_P(p));
5575 5575 CHECK_SYNCH_OP(int,__lwp_sema_wait, (lwp_sema_t* p), (p), CHECK_P(p));
5576 5576 CHECK_SYNCH_OP(int, _lwp_cond_broadcast, (lwp_cond_t* cv), (cv), CHECK_CV);
5577 5577 CHECK_SYNCH_OP(int,__lwp_cond_broadcast, (lwp_cond_t* cv), (cv), CHECK_CV);
5578 5578
5579 5579
5580 5580 // recording machinery:
5581 5581
5582 5582 enum { RECORD_SYNCH_LIMIT = 200 };
5583 5583 char* record_synch_name[RECORD_SYNCH_LIMIT];
5584 5584 void* record_synch_arg0ptr[RECORD_SYNCH_LIMIT];
5585 5585 bool record_synch_returning[RECORD_SYNCH_LIMIT];
5586 5586 thread_t record_synch_thread[RECORD_SYNCH_LIMIT];
5587 5587 int record_synch_count = 0;
5588 5588 bool record_synch_enabled = false;
5589 5589
5590 5590 // in dbx, examine recorded data this way:
5591 5591 // for n in name arg0ptr returning thread; do print record_synch_$n[0..record_synch_count-1]; done
5592 5592
5593 5593 void record_synch(char* name, bool returning) {
5594 5594 if (record_synch_enabled) {
5595 5595 if (record_synch_count < RECORD_SYNCH_LIMIT) {
5596 5596 record_synch_name[record_synch_count] = name;
5597 5597 record_synch_returning[record_synch_count] = returning;
5598 5598 record_synch_thread[record_synch_count] = thr_self();
5599 5599 record_synch_arg0ptr[record_synch_count] = &name;
5600 5600 record_synch_count++;
5601 5601 }
5602 5602 // put more checking code here:
5603 5603 // ...
5604 5604 }
5605 5605 }
5606 5606
5607 5607 void record_synch_enable() {
5608 5608 // start collecting trace data, if not already doing so
5609 5609 if (!record_synch_enabled) record_synch_count = 0;
5610 5610 record_synch_enabled = true;
5611 5611 }
5612 5612
5613 5613 void record_synch_disable() {
5614 5614 // stop collecting trace data
5615 5615 record_synch_enabled = false;
5616 5616 }
5617 5617
5618 5618 #endif // INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS
5619 5619 #endif // PRODUCT
5620 5620
5621 5621 const intptr_t thr_time_off = (intptr_t)(&((prusage_t *)(NULL))->pr_utime);
5622 5622 const intptr_t thr_time_size = (intptr_t)(&((prusage_t *)(NULL))->pr_ttime) -
5623 5623 (intptr_t)(&((prusage_t *)(NULL))->pr_utime);
5624 5624
5625 5625
5626 5626 // JVMTI & JVM monitoring and management support
5627 5627 // The thread_cpu_time() and current_thread_cpu_time() are only
5628 5628 // supported if is_thread_cpu_time_supported() returns true.
5629 5629 // They are not supported on Solaris T1.
5630 5630
5631 5631 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
5632 5632 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
5633 5633 // of a thread.
5634 5634 //
5635 5635 // current_thread_cpu_time() and thread_cpu_time(Thread *)
5636 5636 // returns the fast estimate available on the platform.
5637 5637
5638 5638 // hrtime_t gethrvtime() return value includes
5639 5639 // user time but does not include system time
5640 5640 jlong os::current_thread_cpu_time() {
5641 5641 return (jlong) gethrvtime();
5642 5642 }
5643 5643
5644 5644 jlong os::thread_cpu_time(Thread *thread) {
5645 5645 // return user level CPU time only to be consistent with
5646 5646 // what current_thread_cpu_time returns.
5647 5647 // thread_cpu_time_info() must be changed if this changes
5648 5648 return os::thread_cpu_time(thread, false /* user time only */);
5649 5649 }
5650 5650
5651 5651 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
5652 5652 if (user_sys_cpu_time) {
5653 5653 return os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
5654 5654 } else {
5655 5655 return os::current_thread_cpu_time();
5656 5656 }
5657 5657 }
5658 5658
5659 5659 jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
5660 5660 char proc_name[64];
5661 5661 int count;
5662 5662 prusage_t prusage;
5663 5663 jlong lwp_time;
5664 5664 int fd;
5665 5665
5666 5666 sprintf(proc_name, "/proc/%d/lwp/%d/lwpusage",
5667 5667 getpid(),
5668 5668 thread->osthread()->lwp_id());
5669 5669 fd = ::open(proc_name, O_RDONLY);
5670 5670 if ( fd == -1 ) return -1;
5671 5671
5672 5672 do {
5673 5673 count = ::pread(fd,
5674 5674 (void *)&prusage.pr_utime,
5675 5675 thr_time_size,
5676 5676 thr_time_off);
5677 5677 } while (count < 0 && errno == EINTR);
5678 5678 ::close(fd);
5679 5679 if ( count < 0 ) return -1;
5680 5680
5681 5681 if (user_sys_cpu_time) {
5682 5682 // user + system CPU time
5683 5683 lwp_time = (((jlong)prusage.pr_stime.tv_sec +
5684 5684 (jlong)prusage.pr_utime.tv_sec) * (jlong)1000000000) +
5685 5685 (jlong)prusage.pr_stime.tv_nsec +
5686 5686 (jlong)prusage.pr_utime.tv_nsec;
5687 5687 } else {
5688 5688 // user level CPU time only
5689 5689 lwp_time = ((jlong)prusage.pr_utime.tv_sec * (jlong)1000000000) +
5690 5690 (jlong)prusage.pr_utime.tv_nsec;
5691 5691 }
5692 5692
5693 5693 return(lwp_time);
5694 5694 }
5695 5695
5696 5696 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
5697 5697 info_ptr->max_value = ALL_64_BITS; // will not wrap in less than 64 bits
5698 5698 info_ptr->may_skip_backward = false; // elapsed time not wall time
5699 5699 info_ptr->may_skip_forward = false; // elapsed time not wall time
5700 5700 info_ptr->kind = JVMTI_TIMER_USER_CPU; // only user time is returned
5701 5701 }
5702 5702
5703 5703 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
5704 5704 info_ptr->max_value = ALL_64_BITS; // will not wrap in less than 64 bits
5705 5705 info_ptr->may_skip_backward = false; // elapsed time not wall time
5706 5706 info_ptr->may_skip_forward = false; // elapsed time not wall time
5707 5707 info_ptr->kind = JVMTI_TIMER_USER_CPU; // only user time is returned
5708 5708 }
5709 5709
5710 5710 bool os::is_thread_cpu_time_supported() {
5711 5711 if ( os::Solaris::T2_libthread() || UseBoundThreads ) {
5712 5712 return true;
5713 5713 } else {
5714 5714 return false;
5715 5715 }
5716 5716 }
5717 5717
5718 5718 // System loadavg support. Returns -1 if load average cannot be obtained.
5719 5719 // Return the load average for our processor set if the primitive exists
5720 5720 // (Solaris 9 and later). Otherwise just return system wide loadavg.
5721 5721 int os::loadavg(double loadavg[], int nelem) {
5722 5722 if (pset_getloadavg_ptr != NULL) {
5723 5723 return (*pset_getloadavg_ptr)(PS_MYID, loadavg, nelem);
5724 5724 } else {
5725 5725 return ::getloadavg(loadavg, nelem);
5726 5726 }
5727 5727 }
5728 5728
5729 5729 //---------------------------------------------------------------------------------
5730 5730
5731 5731 static address same_page(address x, address y) {
5732 5732 intptr_t page_bits = -os::vm_page_size();
5733 5733 if ((intptr_t(x) & page_bits) == (intptr_t(y) & page_bits))
5734 5734 return x;
5735 5735 else if (x > y)
5736 5736 return (address)(intptr_t(y) | ~page_bits) + 1;
5737 5737 else
5738 5738 return (address)(intptr_t(y) & page_bits);
5739 5739 }
5740 5740
5741 5741 bool os::find(address addr, outputStream* st) {
5742 5742 Dl_info dlinfo;
5743 5743 memset(&dlinfo, 0, sizeof(dlinfo));
5744 5744 if (dladdr(addr, &dlinfo)) {
5745 5745 #ifdef _LP64
5746 5746 st->print("0x%016lx: ", addr);
5747 5747 #else
5748 5748 st->print("0x%08x: ", addr);
5749 5749 #endif
5750 5750 if (dlinfo.dli_sname != NULL)
5751 5751 st->print("%s+%#lx", dlinfo.dli_sname, addr-(intptr_t)dlinfo.dli_saddr);
5752 5752 else if (dlinfo.dli_fname)
5753 5753 st->print("<offset %#lx>", addr-(intptr_t)dlinfo.dli_fbase);
5754 5754 else
5755 5755 st->print("<absolute address>");
5756 5756 if (dlinfo.dli_fname) st->print(" in %s", dlinfo.dli_fname);
5757 5757 #ifdef _LP64
5758 5758 if (dlinfo.dli_fbase) st->print(" at 0x%016lx", dlinfo.dli_fbase);
5759 5759 #else
5760 5760 if (dlinfo.dli_fbase) st->print(" at 0x%08x", dlinfo.dli_fbase);
5761 5761 #endif
5762 5762 st->cr();
5763 5763
5764 5764 if (Verbose) {
5765 5765 // decode some bytes around the PC
5766 5766 address begin = same_page(addr-40, addr);
5767 5767 address end = same_page(addr+40, addr);
5768 5768 address lowest = (address) dlinfo.dli_sname;
5769 5769 if (!lowest) lowest = (address) dlinfo.dli_fbase;
5770 5770 if (begin < lowest) begin = lowest;
5771 5771 Dl_info dlinfo2;
5772 5772 if (dladdr(end, &dlinfo2) && dlinfo2.dli_saddr != dlinfo.dli_saddr
5773 5773 && end > dlinfo2.dli_saddr && dlinfo2.dli_saddr > begin)
5774 5774 end = (address) dlinfo2.dli_saddr;
5775 5775 Disassembler::decode(begin, end, st);
5776 5776 }
5777 5777 return true;
5778 5778 }
5779 5779 return false;
5780 5780 }
5781 5781
5782 5782 // Following function has been added to support HotSparc's libjvm.so running
5783 5783 // under Solaris production JDK 1.2.2 / 1.3.0. These came from
5784 5784 // src/solaris/hpi/native_threads in the EVM codebase.
5785 5785 //
5786 5786 // NOTE: This is no longer needed in the 1.3.1 and 1.4 production release
5787 5787 // libraries and should thus be removed. We will leave it behind for a while
5788 5788 // until we no longer want to able to run on top of 1.3.0 Solaris production
5789 5789 // JDK. See 4341971.
5790 5790
5791 5791 #define STACK_SLACK 0x800
5792 5792
5793 5793 extern "C" {
5794 5794 intptr_t sysThreadAvailableStackWithSlack() {
5795 5795 stack_t st;
5796 5796 intptr_t retval, stack_top;
5797 5797 retval = thr_stksegment(&st);
5798 5798 assert(retval == 0, "incorrect return value from thr_stksegment");
5799 5799 assert((address)&st < (address)st.ss_sp, "Invalid stack base returned");
5800 5800 assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned");
5801 5801 stack_top=(intptr_t)st.ss_sp-st.ss_size;
5802 5802 return ((intptr_t)&stack_top - stack_top - STACK_SLACK);
5803 5803 }
5804 5804 }
5805 5805
5806 5806 // Just to get the Kernel build to link on solaris for testing.
5807 5807
5808 5808 extern "C" {
5809 5809 class ASGCT_CallTrace;
5810 5810 void AsyncGetCallTrace(ASGCT_CallTrace *trace, jint depth, void* ucontext)
5811 5811 KERNEL_RETURN;
5812 5812 }
5813 5813
5814 5814
5815 5815 // ObjectMonitor park-unpark infrastructure ...
5816 5816 //
5817 5817 // We implement Solaris and Linux PlatformEvents with the
5818 5818 // obvious condvar-mutex-flag triple.
5819 5819 // Another alternative that works quite well is pipes:
5820 5820 // Each PlatformEvent consists of a pipe-pair.
5821 5821 // The thread associated with the PlatformEvent
5822 5822 // calls park(), which reads from the input end of the pipe.
5823 5823 // Unpark() writes into the other end of the pipe.
5824 5824 // The write-side of the pipe must be set NDELAY.
5825 5825 // Unfortunately pipes consume a large # of handles.
5826 5826 // Native solaris lwp_park() and lwp_unpark() work nicely, too.
5827 5827 // Using pipes for the 1st few threads might be workable, however.
5828 5828 //
5829 5829 // park() is permitted to return spuriously.
5830 5830 // Callers of park() should wrap the call to park() in
5831 5831 // an appropriate loop. A litmus test for the correct
5832 5832 // usage of park is the following: if park() were modified
5833 5833 // to immediately return 0 your code should still work,
5834 5834 // albeit degenerating to a spin loop.
5835 5835 //
5836 5836 // An interesting optimization for park() is to use a trylock()
5837 5837 // to attempt to acquire the mutex. If the trylock() fails
5838 5838 // then we know that a concurrent unpark() operation is in-progress.
5839 5839 // in that case the park() code could simply set _count to 0
5840 5840 // and return immediately. The subsequent park() operation *might*
5841 5841 // return immediately. That's harmless as the caller of park() is
5842 5842 // expected to loop. By using trylock() we will have avoided a
5843 5843 // avoided a context switch caused by contention on the per-thread mutex.
5844 5844 //
5845 5845 // TODO-FIXME:
5846 5846 // 1. Reconcile Doug's JSR166 j.u.c park-unpark with the
5847 5847 // objectmonitor implementation.
5848 5848 // 2. Collapse the JSR166 parker event, and the
5849 5849 // objectmonitor ParkEvent into a single "Event" construct.
5850 5850 // 3. In park() and unpark() add:
5851 5851 // assert (Thread::current() == AssociatedWith).
5852 5852 // 4. add spurious wakeup injection on a -XX:EarlyParkReturn=N switch.
5853 5853 // 1-out-of-N park() operations will return immediately.
5854 5854 //
5855 5855 // _Event transitions in park()
5856 5856 // -1 => -1 : illegal
5857 5857 // 1 => 0 : pass - return immediately
5858 5858 // 0 => -1 : block
5859 5859 //
5860 5860 // _Event serves as a restricted-range semaphore.
5861 5861 //
5862 5862 // Another possible encoding of _Event would be with
5863 5863 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits.
5864 5864 //
5865 5865 // TODO-FIXME: add DTRACE probes for:
5866 5866 // 1. Tx parks
5867 5867 // 2. Ty unparks Tx
5868 5868 // 3. Tx resumes from park
5869 5869
5870 5870
5871 5871 // value determined through experimentation
5872 5872 #define ROUNDINGFIX 11
5873 5873
5874 5874 // utility to compute the abstime argument to timedwait.
5875 5875 // TODO-FIXME: switch from compute_abstime() to unpackTime().
5876 5876
5877 5877 static timestruc_t* compute_abstime(timestruc_t* abstime, jlong millis) {
5878 5878 // millis is the relative timeout time
5879 5879 // abstime will be the absolute timeout time
5880 5880 if (millis < 0) millis = 0;
5881 5881 struct timeval now;
5882 5882 int status = gettimeofday(&now, NULL);
5883 5883 assert(status == 0, "gettimeofday");
5884 5884 jlong seconds = millis / 1000;
5885 5885 jlong max_wait_period;
5886 5886
5887 5887 if (UseLWPSynchronization) {
5888 5888 // forward port of fix for 4275818 (not sleeping long enough)
5889 5889 // There was a bug in Solaris 6, 7 and pre-patch 5 of 8 where
5890 5890 // _lwp_cond_timedwait() used a round_down algorithm rather
5891 5891 // than a round_up. For millis less than our roundfactor
5892 5892 // it rounded down to 0 which doesn't meet the spec.
5893 5893 // For millis > roundfactor we may return a bit sooner, but
5894 5894 // since we can not accurately identify the patch level and
5895 5895 // this has already been fixed in Solaris 9 and 8 we will
5896 5896 // leave it alone rather than always rounding down.
5897 5897
5898 5898 if (millis > 0 && millis < ROUNDINGFIX) millis = ROUNDINGFIX;
5899 5899 // It appears that when we go directly through Solaris _lwp_cond_timedwait()
5900 5900 // the acceptable max time threshold is smaller than for libthread on 2.5.1 and 2.6
5901 5901 max_wait_period = 21000000;
5902 5902 } else {
5903 5903 max_wait_period = 50000000;
5904 5904 }
5905 5905 millis %= 1000;
5906 5906 if (seconds > max_wait_period) { // see man cond_timedwait(3T)
5907 5907 seconds = max_wait_period;
5908 5908 }
5909 5909 abstime->tv_sec = now.tv_sec + seconds;
5910 5910 long usec = now.tv_usec + millis * 1000;
5911 5911 if (usec >= 1000000) {
5912 5912 abstime->tv_sec += 1;
5913 5913 usec -= 1000000;
5914 5914 }
5915 5915 abstime->tv_nsec = usec * 1000;
5916 5916 return abstime;
5917 5917 }
5918 5918
5919 5919 // Test-and-clear _Event, always leaves _Event set to 0, returns immediately.
5920 5920 // Conceptually TryPark() should be equivalent to park(0).
5921 5921
5922 5922 int os::PlatformEvent::TryPark() {
5923 5923 for (;;) {
5924 5924 const int v = _Event ;
5925 5925 guarantee ((v == 0) || (v == 1), "invariant") ;
5926 5926 if (Atomic::cmpxchg (0, &_Event, v) == v) return v ;
5927 5927 }
5928 5928 }
5929 5929
5930 5930 void os::PlatformEvent::park() { // AKA: down()
5931 5931 // Invariant: Only the thread associated with the Event/PlatformEvent
5932 5932 // may call park().
5933 5933 int v ;
5934 5934 for (;;) {
5935 5935 v = _Event ;
5936 5936 if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
5937 5937 }
5938 5938 guarantee (v >= 0, "invariant") ;
5939 5939 if (v == 0) {
5940 5940 // Do this the hard way by blocking ...
5941 5941 // See http://monaco.sfbay/detail.jsf?cr=5094058.
5942 5942 // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking.
5943 5943 // Only for SPARC >= V8PlusA
5944 5944 #if defined(__sparc) && defined(COMPILER2)
5945 5945 if (ClearFPUAtPark) { _mark_fpu_nosave() ; }
5946 5946 #endif
5947 5947 int status = os::Solaris::mutex_lock(_mutex);
5948 5948 assert_status(status == 0, status, "mutex_lock");
5949 5949 guarantee (_nParked == 0, "invariant") ;
5950 5950 ++ _nParked ;
5951 5951 while (_Event < 0) {
5952 5952 // for some reason, under 2.7 lwp_cond_wait() may return ETIME ...
5953 5953 // Treat this the same as if the wait was interrupted
5954 5954 // With usr/lib/lwp going to kernel, always handle ETIME
5955 5955 status = os::Solaris::cond_wait(_cond, _mutex);
5956 5956 if (status == ETIME) status = EINTR ;
5957 5957 assert_status(status == 0 || status == EINTR, status, "cond_wait");
5958 5958 }
5959 5959 -- _nParked ;
5960 5960 _Event = 0 ;
5961 5961 status = os::Solaris::mutex_unlock(_mutex);
5962 5962 assert_status(status == 0, status, "mutex_unlock");
5963 5963 }
5964 5964 }
5965 5965
5966 5966 int os::PlatformEvent::park(jlong millis) {
5967 5967 guarantee (_nParked == 0, "invariant") ;
5968 5968 int v ;
5969 5969 for (;;) {
5970 5970 v = _Event ;
5971 5971 if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
5972 5972 }
5973 5973 guarantee (v >= 0, "invariant") ;
5974 5974 if (v != 0) return OS_OK ;
5975 5975
5976 5976 int ret = OS_TIMEOUT;
5977 5977 timestruc_t abst;
5978 5978 compute_abstime (&abst, millis);
5979 5979
5980 5980 // See http://monaco.sfbay/detail.jsf?cr=5094058.
5981 5981 // For Solaris SPARC set fprs.FEF=0 prior to parking.
5982 5982 // Only for SPARC >= V8PlusA
5983 5983 #if defined(__sparc) && defined(COMPILER2)
5984 5984 if (ClearFPUAtPark) { _mark_fpu_nosave() ; }
5985 5985 #endif
5986 5986 int status = os::Solaris::mutex_lock(_mutex);
5987 5987 assert_status(status == 0, status, "mutex_lock");
5988 5988 guarantee (_nParked == 0, "invariant") ;
5989 5989 ++ _nParked ;
5990 5990 while (_Event < 0) {
5991 5991 int status = os::Solaris::cond_timedwait(_cond, _mutex, &abst);
5992 5992 assert_status(status == 0 || status == EINTR ||
5993 5993 status == ETIME || status == ETIMEDOUT,
5994 5994 status, "cond_timedwait");
5995 5995 if (!FilterSpuriousWakeups) break ; // previous semantics
5996 5996 if (status == ETIME || status == ETIMEDOUT) break ;
5997 5997 // We consume and ignore EINTR and spurious wakeups.
5998 5998 }
5999 5999 -- _nParked ;
6000 6000 if (_Event >= 0) ret = OS_OK ;
6001 6001 _Event = 0 ;
6002 6002 status = os::Solaris::mutex_unlock(_mutex);
6003 6003 assert_status(status == 0, status, "mutex_unlock");
6004 6004 return ret;
6005 6005 }
6006 6006
6007 6007 void os::PlatformEvent::unpark() {
6008 6008 int v, AnyWaiters;
6009 6009
6010 6010 // Increment _Event.
6011 6011 // Another acceptable implementation would be to simply swap 1
6012 6012 // into _Event:
6013 6013 // if (Swap (&_Event, 1) < 0) {
6014 6014 // mutex_lock (_mutex) ; AnyWaiters = nParked; mutex_unlock (_mutex) ;
6015 6015 // if (AnyWaiters) cond_signal (_cond) ;
6016 6016 // }
6017 6017
6018 6018 for (;;) {
6019 6019 v = _Event ;
6020 6020 if (v > 0) {
6021 6021 // The LD of _Event could have reordered or be satisfied
6022 6022 // by a read-aside from this processor's write buffer.
6023 6023 // To avoid problems execute a barrier and then
6024 6024 // ratify the value. A degenerate CAS() would also work.
6025 6025 // Viz., CAS (v+0, &_Event, v) == v).
6026 6026 OrderAccess::fence() ;
6027 6027 if (_Event == v) return ;
6028 6028 continue ;
6029 6029 }
6030 6030 if (Atomic::cmpxchg (v+1, &_Event, v) == v) break ;
6031 6031 }
6032 6032
6033 6033 // If the thread associated with the event was parked, wake it.
6034 6034 if (v < 0) {
6035 6035 int status ;
6036 6036 // Wait for the thread assoc with the PlatformEvent to vacate.
6037 6037 status = os::Solaris::mutex_lock(_mutex);
6038 6038 assert_status(status == 0, status, "mutex_lock");
6039 6039 AnyWaiters = _nParked ;
6040 6040 status = os::Solaris::mutex_unlock(_mutex);
6041 6041 assert_status(status == 0, status, "mutex_unlock");
6042 6042 guarantee (AnyWaiters == 0 || AnyWaiters == 1, "invariant") ;
6043 6043 if (AnyWaiters != 0) {
6044 6044 // We intentional signal *after* dropping the lock
6045 6045 // to avoid a common class of futile wakeups.
6046 6046 status = os::Solaris::cond_signal(_cond);
6047 6047 assert_status(status == 0, status, "cond_signal");
6048 6048 }
6049 6049 }
6050 6050 }
6051 6051
6052 6052 // JSR166
6053 6053 // -------------------------------------------------------
6054 6054
6055 6055 /*
6056 6056 * The solaris and linux implementations of park/unpark are fairly
6057 6057 * conservative for now, but can be improved. They currently use a
6058 6058 * mutex/condvar pair, plus _counter.
6059 6059 * Park decrements _counter if > 0, else does a condvar wait. Unpark
6060 6060 * sets count to 1 and signals condvar. Only one thread ever waits
6061 6061 * on the condvar. Contention seen when trying to park implies that someone
6062 6062 * is unparking you, so don't wait. And spurious returns are fine, so there
6063 6063 * is no need to track notifications.
6064 6064 */
6065 6065
6066 6066 #define MAX_SECS 100000000
6067 6067 /*
6068 6068 * This code is common to linux and solaris and will be moved to a
6069 6069 * common place in dolphin.
6070 6070 *
6071 6071 * The passed in time value is either a relative time in nanoseconds
6072 6072 * or an absolute time in milliseconds. Either way it has to be unpacked
6073 6073 * into suitable seconds and nanoseconds components and stored in the
6074 6074 * given timespec structure.
6075 6075 * Given time is a 64-bit value and the time_t used in the timespec is only
6076 6076 * a signed-32-bit value (except on 64-bit Linux) we have to watch for
6077 6077 * overflow if times way in the future are given. Further on Solaris versions
6078 6078 * prior to 10 there is a restriction (see cond_timedwait) that the specified
6079 6079 * number of seconds, in abstime, is less than current_time + 100,000,000.
6080 6080 * As it will be 28 years before "now + 100000000" will overflow we can
6081 6081 * ignore overflow and just impose a hard-limit on seconds using the value
6082 6082 * of "now + 100,000,000". This places a limit on the timeout of about 3.17
6083 6083 * years from "now".
6084 6084 */
6085 6085 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
6086 6086 assert (time > 0, "convertTime");
6087 6087
6088 6088 struct timeval now;
6089 6089 int status = gettimeofday(&now, NULL);
6090 6090 assert(status == 0, "gettimeofday");
6091 6091
6092 6092 time_t max_secs = now.tv_sec + MAX_SECS;
6093 6093
6094 6094 if (isAbsolute) {
6095 6095 jlong secs = time / 1000;
6096 6096 if (secs > max_secs) {
6097 6097 absTime->tv_sec = max_secs;
6098 6098 }
6099 6099 else {
6100 6100 absTime->tv_sec = secs;
6101 6101 }
6102 6102 absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
6103 6103 }
6104 6104 else {
6105 6105 jlong secs = time / NANOSECS_PER_SEC;
6106 6106 if (secs >= MAX_SECS) {
6107 6107 absTime->tv_sec = max_secs;
6108 6108 absTime->tv_nsec = 0;
6109 6109 }
6110 6110 else {
6111 6111 absTime->tv_sec = now.tv_sec + secs;
6112 6112 absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
6113 6113 if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
6114 6114 absTime->tv_nsec -= NANOSECS_PER_SEC;
6115 6115 ++absTime->tv_sec; // note: this must be <= max_secs
6116 6116 }
6117 6117 }
6118 6118 }
6119 6119 assert(absTime->tv_sec >= 0, "tv_sec < 0");
6120 6120 assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs");
6121 6121 assert(absTime->tv_nsec >= 0, "tv_nsec < 0");
6122 6122 assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec");
6123 6123 }
6124 6124
6125 6125 void Parker::park(bool isAbsolute, jlong time) {
6126 6126
6127 6127 // Optional fast-path check:
6128 6128 // Return immediately if a permit is available.
6129 6129 if (_counter > 0) {
6130 6130 _counter = 0 ;
6131 6131 OrderAccess::fence();
6132 6132 return ;
6133 6133 }
6134 6134
6135 6135 // Optional fast-exit: Check interrupt before trying to wait
6136 6136 Thread* thread = Thread::current();
6137 6137 assert(thread->is_Java_thread(), "Must be JavaThread");
6138 6138 JavaThread *jt = (JavaThread *)thread;
6139 6139 if (Thread::is_interrupted(thread, false)) {
6140 6140 return;
6141 6141 }
6142 6142
6143 6143 // First, demultiplex/decode time arguments
6144 6144 timespec absTime;
6145 6145 if (time < 0 || (isAbsolute && time == 0) ) { // don't wait at all
6146 6146 return;
6147 6147 }
6148 6148 if (time > 0) {
6149 6149 // Warning: this code might be exposed to the old Solaris time
6150 6150 // round-down bugs. Grep "roundingFix" for details.
6151 6151 unpackTime(&absTime, isAbsolute, time);
6152 6152 }
6153 6153
6154 6154 // Enter safepoint region
6155 6155 // Beware of deadlocks such as 6317397.
6156 6156 // The per-thread Parker:: _mutex is a classic leaf-lock.
6157 6157 // In particular a thread must never block on the Threads_lock while
6158 6158 // holding the Parker:: mutex. If safepoints are pending both the
6159 6159 // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
6160 6160 ThreadBlockInVM tbivm(jt);
6161 6161
6162 6162 // Don't wait if cannot get lock since interference arises from
6163 6163 // unblocking. Also. check interrupt before trying wait
6164 6164 if (Thread::is_interrupted(thread, false) ||
6165 6165 os::Solaris::mutex_trylock(_mutex) != 0) {
6166 6166 return;
6167 6167 }
6168 6168
6169 6169 int status ;
6170 6170
6171 6171 if (_counter > 0) { // no wait needed
6172 6172 _counter = 0;
6173 6173 status = os::Solaris::mutex_unlock(_mutex);
6174 6174 assert (status == 0, "invariant") ;
6175 6175 OrderAccess::fence();
6176 6176 return;
6177 6177 }
6178 6178
6179 6179 #ifdef ASSERT
6180 6180 // Don't catch signals while blocked; let the running threads have the signals.
6181 6181 // (This allows a debugger to break into the running thread.)
6182 6182 sigset_t oldsigs;
6183 6183 sigset_t* allowdebug_blocked = os::Solaris::allowdebug_blocked_signals();
6184 6184 thr_sigsetmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
6185 6185 #endif
6186 6186
6187 6187 OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
6188 6188 jt->set_suspend_equivalent();
6189 6189 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
6190 6190
6191 6191 // Do this the hard way by blocking ...
6192 6192 // See http://monaco.sfbay/detail.jsf?cr=5094058.
6193 6193 // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking.
6194 6194 // Only for SPARC >= V8PlusA
6195 6195 #if defined(__sparc) && defined(COMPILER2)
6196 6196 if (ClearFPUAtPark) { _mark_fpu_nosave() ; }
6197 6197 #endif
6198 6198
6199 6199 if (time == 0) {
6200 6200 status = os::Solaris::cond_wait (_cond, _mutex) ;
6201 6201 } else {
6202 6202 status = os::Solaris::cond_timedwait (_cond, _mutex, &absTime);
6203 6203 }
6204 6204 // Note that an untimed cond_wait() can sometimes return ETIME on older
6205 6205 // versions of the Solaris.
6206 6206 assert_status(status == 0 || status == EINTR ||
6207 6207 status == ETIME || status == ETIMEDOUT,
6208 6208 status, "cond_timedwait");
6209 6209
6210 6210 #ifdef ASSERT
6211 6211 thr_sigsetmask(SIG_SETMASK, &oldsigs, NULL);
6212 6212 #endif
6213 6213 _counter = 0 ;
6214 6214 status = os::Solaris::mutex_unlock(_mutex);
6215 6215 assert_status(status == 0, status, "mutex_unlock") ;
6216 6216
6217 6217 // If externally suspended while waiting, re-suspend
6218 6218 if (jt->handle_special_suspend_equivalent_condition()) {
6219 6219 jt->java_suspend_self();
6220 6220 }
6221 6221 OrderAccess::fence();
6222 6222 }
6223 6223
6224 6224 void Parker::unpark() {
6225 6225 int s, status ;
6226 6226 status = os::Solaris::mutex_lock (_mutex) ;
6227 6227 assert (status == 0, "invariant") ;
6228 6228 s = _counter;
6229 6229 _counter = 1;
6230 6230 status = os::Solaris::mutex_unlock (_mutex) ;
6231 6231 assert (status == 0, "invariant") ;
6232 6232
6233 6233 if (s < 1) {
6234 6234 status = os::Solaris::cond_signal (_cond) ;
6235 6235 assert (status == 0, "invariant") ;
6236 6236 }
6237 6237 }
6238 6238
6239 6239 extern char** environ;
6240 6240
6241 6241 // Run the specified command in a separate process. Return its exit value,
6242 6242 // or -1 on failure (e.g. can't fork a new process).
6243 6243 // Unlike system(), this function can be called from signal handler. It
6244 6244 // doesn't block SIGINT et al.
6245 6245 int os::fork_and_exec(char* cmd) {
6246 6246 char * argv[4];
6247 6247 argv[0] = (char *)"sh";
6248 6248 argv[1] = (char *)"-c";
6249 6249 argv[2] = cmd;
6250 6250 argv[3] = NULL;
6251 6251
6252 6252 // fork is async-safe, fork1 is not so can't use in signal handler
6253 6253 pid_t pid;
6254 6254 Thread* t = ThreadLocalStorage::get_thread_slow();
6255 6255 if (t != NULL && t->is_inside_signal_handler()) {
6256 6256 pid = fork();
6257 6257 } else {
6258 6258 pid = fork1();
6259 6259 }
6260 6260
6261 6261 if (pid < 0) {
6262 6262 // fork failed
6263 6263 warning("fork failed: %s", strerror(errno));
6264 6264 return -1;
6265 6265
6266 6266 } else if (pid == 0) {
6267 6267 // child process
6268 6268
6269 6269 // try to be consistent with system(), which uses "/usr/bin/sh" on Solaris
6270 6270 execve("/usr/bin/sh", argv, environ);
6271 6271
6272 6272 // execve failed
6273 6273 _exit(-1);
6274 6274
6275 6275 } else {
6276 6276 // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
6277 6277 // care about the actual exit code, for now.
6278 6278
6279 6279 int status;
6280 6280
6281 6281 // Wait for the child process to exit. This returns immediately if
6282 6282 // the child has already exited. */
6283 6283 while (waitpid(pid, &status, 0) < 0) {
6284 6284 switch (errno) {
6285 6285 case ECHILD: return 0;
6286 6286 case EINTR: break;
6287 6287 default: return -1;
6288 6288 }
6289 6289 }
6290 6290
6291 6291 if (WIFEXITED(status)) {
6292 6292 // The child exited normally; get its exit code.
6293 6293 return WEXITSTATUS(status);
6294 6294 } else if (WIFSIGNALED(status)) {
6295 6295 // The child exited because of a signal
6296 6296 // The best value to return is 0x80 + signal number,
6297 6297 // because that is what all Unix shells do, and because
6298 6298 // it allows callers to distinguish between process exit and
6299 6299 // process death by signal.
6300 6300 return 0x80 + WTERMSIG(status);
6301 6301 } else {
6302 6302 // Unknown exit code; pass it through
6303 6303 return status;
6304 6304 }
6305 6305 }
6306 6306 }
6307 6307
6308 6308 // is_headless_jre()
6309 6309 //
6310 6310 // Test for the existence of xawt/libmawt.so or libawt_xawt.so
6311 6311 // in order to report if we are running in a headless jre
6312 6312 //
6313 6313 // Since JDK8 xawt/libmawt.so was moved into the same directory
6314 6314 // as libawt.so, and renamed libawt_xawt.so
6315 6315 //
6316 6316 bool os::is_headless_jre() {
6317 6317 struct stat statbuf;
6318 6318 char buf[MAXPATHLEN];
6319 6319 char libmawtpath[MAXPATHLEN];
6320 6320 const char *xawtstr = "/xawt/libmawt.so";
6321 6321 const char *new_xawtstr = "/libawt_xawt.so";
6322 6322 char *p;
6323 6323
6324 6324 // Get path to libjvm.so
6325 6325 os::jvm_path(buf, sizeof(buf));
6326 6326
6327 6327 // Get rid of libjvm.so
6328 6328 p = strrchr(buf, '/');
6329 6329 if (p == NULL) return false;
6330 6330 else *p = '\0';
6331 6331
6332 6332 // Get rid of client or server
6333 6333 p = strrchr(buf, '/');
6334 6334 if (p == NULL) return false;
6335 6335 else *p = '\0';
6336 6336
6337 6337 // check xawt/libmawt.so
6338 6338 strcpy(libmawtpath, buf);
6339 6339 strcat(libmawtpath, xawtstr);
6340 6340 if (::stat(libmawtpath, &statbuf) == 0) return false;
6341 6341
6342 6342 // check libawt_xawt.so
6343 6343 strcpy(libmawtpath, buf);
6344 6344 strcat(libmawtpath, new_xawtstr);
6345 6345 if (::stat(libmawtpath, &statbuf) == 0) return false;
6346 6346
6347 6347 return true;
6348 6348 }
6349 6349
6350 6350 size_t os::write(int fd, const void *buf, unsigned int nBytes) {
6351 6351 INTERRUPTIBLE_RETURN_INT(::write(fd, buf, nBytes), os::Solaris::clear_interrupted);
6352 6352 }
6353 6353
6354 6354 int os::close(int fd) {
6355 6355 RESTARTABLE_RETURN_INT(::close(fd));
6356 6356 }
6357 6357
6358 6358 int os::socket_close(int fd) {
6359 6359 RESTARTABLE_RETURN_INT(::close(fd));
6360 6360 }
6361 6361
6362 6362 int os::recv(int fd, char *buf, int nBytes, int flags) {
6363 6363 INTERRUPTIBLE_RETURN_INT(::recv(fd, buf, nBytes, flags), os::Solaris::clear_interrupted);
6364 6364 }
6365 6365
6366 6366
6367 6367 int os::send(int fd, char *buf, int nBytes, int flags) {
6368 6368 INTERRUPTIBLE_RETURN_INT(::send(fd, buf, nBytes, flags), os::Solaris::clear_interrupted);
6369 6369 }
6370 6370
6371 6371 int os::raw_send(int fd, char *buf, int nBytes, int flags) {
6372 6372 RESTARTABLE_RETURN_INT(::send(fd, buf, nBytes, flags));
6373 6373 }
6374 6374
6375 6375 // As both poll and select can be interrupted by signals, we have to be
6376 6376 // prepared to restart the system call after updating the timeout, unless
6377 6377 // a poll() is done with timeout == -1, in which case we repeat with this
6378 6378 // "wait forever" value.
6379 6379
6380 6380 int os::timeout(int fd, long timeout) {
6381 6381 int res;
6382 6382 struct timeval t;
6383 6383 julong prevtime, newtime;
6384 6384 static const char* aNull = 0;
6385 6385 struct pollfd pfd;
6386 6386 pfd.fd = fd;
6387 6387 pfd.events = POLLIN;
6388 6388
6389 6389 gettimeofday(&t, &aNull);
6390 6390 prevtime = ((julong)t.tv_sec * 1000) + t.tv_usec / 1000;
6391 6391
6392 6392 for(;;) {
6393 6393 INTERRUPTIBLE_NORESTART(::poll(&pfd, 1, timeout), res, os::Solaris::clear_interrupted);
6394 6394 if(res == OS_ERR && errno == EINTR) {
6395 6395 if(timeout != -1) {
6396 6396 gettimeofday(&t, &aNull);
6397 6397 newtime = ((julong)t.tv_sec * 1000) + t.tv_usec /1000;
6398 6398 timeout -= newtime - prevtime;
6399 6399 if(timeout <= 0)
6400 6400 return OS_OK;
6401 6401 prevtime = newtime;
6402 6402 }
6403 6403 } else return res;
6404 6404 }
6405 6405 }
6406 6406
6407 6407 int os::connect(int fd, struct sockaddr *him, int len) {
6408 6408 int _result;
6409 6409 INTERRUPTIBLE_NORESTART(::connect(fd, him, len), _result,
6410 6410 os::Solaris::clear_interrupted);
6411 6411
6412 6412 // Depending on when thread interruption is reset, _result could be
6413 6413 // one of two values when errno == EINTR
6414 6414
6415 6415 if (((_result == OS_INTRPT) || (_result == OS_ERR))
6416 6416 && (errno == EINTR)) {
6417 6417 /* restarting a connect() changes its errno semantics */
6418 6418 INTERRUPTIBLE(::connect(fd, him, len), _result,
6419 6419 os::Solaris::clear_interrupted);
6420 6420 /* undo these changes */
6421 6421 if (_result == OS_ERR) {
6422 6422 if (errno == EALREADY) {
6423 6423 errno = EINPROGRESS; /* fall through */
6424 6424 } else if (errno == EISCONN) {
6425 6425 errno = 0;
6426 6426 return OS_OK;
6427 6427 }
6428 6428 }
6429 6429 }
6430 6430 return _result;
6431 6431 }
6432 6432
6433 6433 int os::accept(int fd, struct sockaddr *him, int *len) {
6434 6434 if (fd < 0)
6435 6435 return OS_ERR;
6436 6436 INTERRUPTIBLE_RETURN_INT((int)::accept(fd, him,\
6437 6437 (socklen_t*) len), os::Solaris::clear_interrupted);
6438 6438 }
6439 6439
6440 6440 int os::recvfrom(int fd, char *buf, int nBytes, int flags,
6441 6441 sockaddr *from, int *fromlen) {
6442 6442 //%%note jvm_r11
6443 6443 INTERRUPTIBLE_RETURN_INT((int)::recvfrom(fd, buf, nBytes,\
6444 6444 flags, from, fromlen), os::Solaris::clear_interrupted);
6445 6445 }
6446 6446
6447 6447 int os::sendto(int fd, char *buf, int len, int flags,
6448 6448 struct sockaddr *to, int tolen) {
6449 6449 //%%note jvm_r11
6450 6450 INTERRUPTIBLE_RETURN_INT((int)::sendto(fd, buf, len, flags,\
6451 6451 to, tolen), os::Solaris::clear_interrupted);
6452 6452 }
6453 6453
6454 6454 int os::socket_available(int fd, jint *pbytes) {
6455 6455 if (fd < 0)
6456 6456 return OS_OK;
6457 6457
6458 6458 int ret;
6459 6459
6460 6460 RESTARTABLE(::ioctl(fd, FIONREAD, pbytes), ret);
6461 6461
6462 6462 //%% note ioctl can return 0 when successful, JVM_SocketAvailable
6463 6463 // is expected to return 0 on failure and 1 on success to the jdk.
6464 6464
6465 6465 return (ret == OS_ERR) ? 0 : 1;
6466 6466 }
6467 6467
6468 6468
6469 6469 int os::bind(int fd, struct sockaddr *him, int len) {
6470 6470 INTERRUPTIBLE_RETURN_INT_NORESTART(::bind(fd, him, len),\
6471 6471 os::Solaris::clear_interrupted);
6472 6472 }
↓ open down ↓ |
3638 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX