Print this page
rev 2870 : 7117303: VM uses non-monotonic time source and complains that it is non-monotonic
Summary: Replaces calls to os::javaTimeMillis(), which does not guarantee montonicity, in GC code to os::javaTimeNanos() with a suitable conversion factor. os::javaTimeNanos is guaranteed monotonic if the underlying platform provides a monotonic timesource. Changes in OS files are to make use of the newly defined constants in globalDefinitions.hpp.
Reviewed-by: dholmes
Split |
Close |
Expand all |
Collapse all |
--- old/src/os/solaris/vm/os_solaris.cpp
+++ new/src/os/solaris/vm/os_solaris.cpp
1 1 /*
2 2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 // no precompiled headers
26 26 #include "classfile/classLoader.hpp"
27 27 #include "classfile/systemDictionary.hpp"
28 28 #include "classfile/vmSymbols.hpp"
29 29 #include "code/icBuffer.hpp"
30 30 #include "code/vtableStubs.hpp"
31 31 #include "compiler/compileBroker.hpp"
32 32 #include "interpreter/interpreter.hpp"
33 33 #include "jvm_solaris.h"
34 34 #include "memory/allocation.inline.hpp"
35 35 #include "memory/filemap.hpp"
36 36 #include "mutex_solaris.inline.hpp"
37 37 #include "oops/oop.inline.hpp"
38 38 #include "os_share_solaris.hpp"
39 39 #include "prims/jniFastGetField.hpp"
40 40 #include "prims/jvm.h"
41 41 #include "prims/jvm_misc.hpp"
42 42 #include "runtime/arguments.hpp"
43 43 #include "runtime/extendedPC.hpp"
44 44 #include "runtime/globals.hpp"
45 45 #include "runtime/interfaceSupport.hpp"
46 46 #include "runtime/java.hpp"
47 47 #include "runtime/javaCalls.hpp"
48 48 #include "runtime/mutexLocker.hpp"
49 49 #include "runtime/objectMonitor.hpp"
50 50 #include "runtime/osThread.hpp"
51 51 #include "runtime/perfMemory.hpp"
52 52 #include "runtime/sharedRuntime.hpp"
53 53 #include "runtime/statSampler.hpp"
54 54 #include "runtime/stubRoutines.hpp"
55 55 #include "runtime/threadCritical.hpp"
56 56 #include "runtime/timer.hpp"
57 57 #include "services/attachListener.hpp"
58 58 #include "services/runtimeService.hpp"
59 59 #include "thread_solaris.inline.hpp"
60 60 #include "utilities/decoder.hpp"
61 61 #include "utilities/defaultStream.hpp"
62 62 #include "utilities/events.hpp"
63 63 #include "utilities/growableArray.hpp"
64 64 #include "utilities/vmError.hpp"
65 65 #ifdef TARGET_ARCH_x86
66 66 # include "assembler_x86.inline.hpp"
67 67 # include "nativeInst_x86.hpp"
68 68 #endif
69 69 #ifdef TARGET_ARCH_sparc
70 70 # include "assembler_sparc.inline.hpp"
71 71 # include "nativeInst_sparc.hpp"
72 72 #endif
73 73 #ifdef COMPILER1
74 74 #include "c1/c1_Runtime1.hpp"
75 75 #endif
76 76 #ifdef COMPILER2
77 77 #include "opto/runtime.hpp"
78 78 #endif
79 79
80 80 // put OS-includes here
81 81 # include <dlfcn.h>
82 82 # include <errno.h>
83 83 # include <exception>
84 84 # include <link.h>
85 85 # include <poll.h>
86 86 # include <pthread.h>
87 87 # include <pwd.h>
88 88 # include <schedctl.h>
89 89 # include <setjmp.h>
90 90 # include <signal.h>
91 91 # include <stdio.h>
92 92 # include <alloca.h>
93 93 # include <sys/filio.h>
94 94 # include <sys/ipc.h>
95 95 # include <sys/lwp.h>
96 96 # include <sys/machelf.h> // for elf Sym structure used by dladdr1
97 97 # include <sys/mman.h>
98 98 # include <sys/processor.h>
99 99 # include <sys/procset.h>
100 100 # include <sys/pset.h>
101 101 # include <sys/resource.h>
102 102 # include <sys/shm.h>
103 103 # include <sys/socket.h>
104 104 # include <sys/stat.h>
105 105 # include <sys/systeminfo.h>
106 106 # include <sys/time.h>
107 107 # include <sys/times.h>
108 108 # include <sys/types.h>
109 109 # include <sys/wait.h>
110 110 # include <sys/utsname.h>
111 111 # include <thread.h>
112 112 # include <unistd.h>
113 113 # include <sys/priocntl.h>
114 114 # include <sys/rtpriocntl.h>
115 115 # include <sys/tspriocntl.h>
116 116 # include <sys/iapriocntl.h>
117 117 # include <sys/loadavg.h>
118 118 # include <string.h>
119 119 # include <stdio.h>
120 120
121 121 # define _STRUCTURED_PROC 1 // this gets us the new structured proc interfaces of 5.6 & later
122 122 # include <sys/procfs.h> // see comment in <sys/procfs.h>
123 123
124 124 #define MAX_PATH (2 * K)
125 125
126 126 // for timer info max values which include all bits
127 127 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
128 128
129 129 #ifdef _GNU_SOURCE
130 130 // See bug #6514594
131 131 extern "C" int madvise(caddr_t, size_t, int);
132 132 extern "C" int memcntl(caddr_t addr, size_t len, int cmd, caddr_t arg,
133 133 int attr, int mask);
134 134 #endif //_GNU_SOURCE
135 135
136 136 /*
137 137 MPSS Changes Start.
138 138 The JVM binary needs to be built and run on pre-Solaris 9
139 139 systems, but the constants needed by MPSS are only in Solaris 9
140 140 header files. They are textually replicated here to allow
141 141 building on earlier systems. Once building on Solaris 8 is
142 142 no longer a requirement, these #defines can be replaced by ordinary
143 143 system .h inclusion.
144 144
145 145 In earlier versions of the JDK and Solaris, we used ISM for large pages.
146 146 But ISM requires shared memory to achieve this and thus has many caveats.
147 147 MPSS is a fully transparent and is a cleaner way to get large pages.
148 148 Although we still require keeping ISM for backward compatiblitiy as well as
149 149 giving the opportunity to use large pages on older systems it is
150 150 recommended that MPSS be used for Solaris 9 and above.
151 151
152 152 */
153 153
154 154 #ifndef MC_HAT_ADVISE
155 155
156 156 struct memcntl_mha {
157 157 uint_t mha_cmd; /* command(s) */
158 158 uint_t mha_flags;
159 159 size_t mha_pagesize;
160 160 };
161 161 #define MC_HAT_ADVISE 7 /* advise hat map size */
162 162 #define MHA_MAPSIZE_VA 0x1 /* set preferred page size */
163 163 #define MAP_ALIGN 0x200 /* addr specifies alignment */
164 164
165 165 #endif
166 166 // MPSS Changes End.
167 167
168 168
169 169 // Here are some liblgrp types from sys/lgrp_user.h to be able to
170 170 // compile on older systems without this header file.
171 171
172 172 #ifndef MADV_ACCESS_LWP
173 173 # define MADV_ACCESS_LWP 7 /* next LWP to access heavily */
174 174 #endif
175 175 #ifndef MADV_ACCESS_MANY
176 176 # define MADV_ACCESS_MANY 8 /* many processes to access heavily */
177 177 #endif
178 178
179 179 #ifndef LGRP_RSRC_CPU
180 180 # define LGRP_RSRC_CPU 0 /* CPU resources */
181 181 #endif
182 182 #ifndef LGRP_RSRC_MEM
183 183 # define LGRP_RSRC_MEM 1 /* memory resources */
184 184 #endif
185 185
186 186 // Some more macros from sys/mman.h that are not present in Solaris 8.
187 187
188 188 #ifndef MAX_MEMINFO_CNT
189 189 /*
190 190 * info_req request type definitions for meminfo
191 191 * request types starting with MEMINFO_V are used for Virtual addresses
192 192 * and should not be mixed with MEMINFO_PLGRP which is targeted for Physical
193 193 * addresses
194 194 */
195 195 # define MEMINFO_SHIFT 16
196 196 # define MEMINFO_MASK (0xFF << MEMINFO_SHIFT)
197 197 # define MEMINFO_VPHYSICAL (0x01 << MEMINFO_SHIFT) /* get physical addr */
198 198 # define MEMINFO_VLGRP (0x02 << MEMINFO_SHIFT) /* get lgroup */
199 199 # define MEMINFO_VPAGESIZE (0x03 << MEMINFO_SHIFT) /* size of phys page */
200 200 # define MEMINFO_VREPLCNT (0x04 << MEMINFO_SHIFT) /* no. of replica */
201 201 # define MEMINFO_VREPL (0x05 << MEMINFO_SHIFT) /* physical replica */
202 202 # define MEMINFO_VREPL_LGRP (0x06 << MEMINFO_SHIFT) /* lgrp of replica */
203 203 # define MEMINFO_PLGRP (0x07 << MEMINFO_SHIFT) /* lgroup for paddr */
204 204
205 205 /* maximum number of addresses meminfo() can process at a time */
206 206 # define MAX_MEMINFO_CNT 256
207 207
208 208 /* maximum number of request types */
209 209 # define MAX_MEMINFO_REQ 31
210 210 #endif
211 211
212 212 // see thr_setprio(3T) for the basis of these numbers
213 213 #define MinimumPriority 0
214 214 #define NormalPriority 64
215 215 #define MaximumPriority 127
216 216
217 217 // Values for ThreadPriorityPolicy == 1
218 218 int prio_policy1[MaxPriority+1] = { -99999, 0, 16, 32, 48, 64,
219 219 80, 96, 112, 124, 127 };
220 220
221 221 // System parameters used internally
222 222 static clock_t clock_tics_per_sec = 100;
223 223
224 224 // Track if we have called enable_extended_FILE_stdio (on Solaris 10u4+)
225 225 static bool enabled_extended_FILE_stdio = false;
226 226
227 227 // For diagnostics to print a message once. see run_periodic_checks
228 228 static bool check_addr0_done = false;
229 229 static sigset_t check_signal_done;
230 230 static bool check_signals = true;
231 231
232 232 address os::Solaris::handler_start; // start pc of thr_sighndlrinfo
233 233 address os::Solaris::handler_end; // end pc of thr_sighndlrinfo
234 234
235 235 address os::Solaris::_main_stack_base = NULL; // 4352906 workaround
236 236
237 237
238 238 // "default" initializers for missing libc APIs
239 239 extern "C" {
240 240 static int lwp_mutex_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; }
241 241 static int lwp_mutex_destroy(mutex_t *mx) { return 0; }
242 242
243 243 static int lwp_cond_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; }
244 244 static int lwp_cond_destroy(cond_t *cv) { return 0; }
245 245 }
246 246
247 247 // "default" initializers for pthread-based synchronization
248 248 extern "C" {
249 249 static int pthread_mutex_default_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; }
250 250 static int pthread_cond_default_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; }
251 251 }
252 252
253 253 // Thread Local Storage
254 254 // This is common to all Solaris platforms so it is defined here,
255 255 // in this common file.
256 256 // The declarations are in the os_cpu threadLS*.hpp files.
257 257 //
258 258 // Static member initialization for TLS
259 259 Thread* ThreadLocalStorage::_get_thread_cache[ThreadLocalStorage::_pd_cache_size] = {NULL};
260 260
261 261 #ifndef PRODUCT
262 262 #define _PCT(n,d) ((100.0*(double)(n))/(double)(d))
263 263
264 264 int ThreadLocalStorage::_tcacheHit = 0;
265 265 int ThreadLocalStorage::_tcacheMiss = 0;
266 266
267 267 void ThreadLocalStorage::print_statistics() {
268 268 int total = _tcacheMiss+_tcacheHit;
269 269 tty->print_cr("Thread cache hits %d misses %d total %d percent %f\n",
270 270 _tcacheHit, _tcacheMiss, total, _PCT(_tcacheHit, total));
271 271 }
272 272 #undef _PCT
273 273 #endif // PRODUCT
274 274
275 275 Thread* ThreadLocalStorage::get_thread_via_cache_slowly(uintptr_t raw_id,
276 276 int index) {
277 277 Thread *thread = get_thread_slow();
278 278 if (thread != NULL) {
279 279 address sp = os::current_stack_pointer();
280 280 guarantee(thread->_stack_base == NULL ||
281 281 (sp <= thread->_stack_base &&
282 282 sp >= thread->_stack_base - thread->_stack_size) ||
283 283 is_error_reported(),
284 284 "sp must be inside of selected thread stack");
285 285
286 286 thread->set_self_raw_id(raw_id); // mark for quick retrieval
287 287 _get_thread_cache[ index ] = thread;
288 288 }
289 289 return thread;
290 290 }
291 291
292 292
293 293 static const double all_zero[ sizeof(Thread) / sizeof(double) + 1 ] = {0};
294 294 #define NO_CACHED_THREAD ((Thread*)all_zero)
295 295
296 296 void ThreadLocalStorage::pd_set_thread(Thread* thread) {
297 297
298 298 // Store the new value before updating the cache to prevent a race
299 299 // between get_thread_via_cache_slowly() and this store operation.
300 300 os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread);
301 301
302 302 // Update thread cache with new thread if setting on thread create,
303 303 // or NO_CACHED_THREAD (zeroed) thread if resetting thread on exit.
304 304 uintptr_t raw = pd_raw_thread_id();
305 305 int ix = pd_cache_index(raw);
306 306 _get_thread_cache[ix] = thread == NULL ? NO_CACHED_THREAD : thread;
307 307 }
308 308
309 309 void ThreadLocalStorage::pd_init() {
310 310 for (int i = 0; i < _pd_cache_size; i++) {
311 311 _get_thread_cache[i] = NO_CACHED_THREAD;
312 312 }
313 313 }
314 314
315 315 // Invalidate all the caches (happens to be the same as pd_init).
316 316 void ThreadLocalStorage::pd_invalidate_all() { pd_init(); }
317 317
318 318 #undef NO_CACHED_THREAD
319 319
320 320 // END Thread Local Storage
321 321
322 322 static inline size_t adjust_stack_size(address base, size_t size) {
323 323 if ((ssize_t)size < 0) {
324 324 // 4759953: Compensate for ridiculous stack size.
325 325 size = max_intx;
326 326 }
327 327 if (size > (size_t)base) {
328 328 // 4812466: Make sure size doesn't allow the stack to wrap the address space.
329 329 size = (size_t)base;
330 330 }
331 331 return size;
332 332 }
333 333
334 334 static inline stack_t get_stack_info() {
335 335 stack_t st;
336 336 int retval = thr_stksegment(&st);
337 337 st.ss_size = adjust_stack_size((address)st.ss_sp, st.ss_size);
338 338 assert(retval == 0, "incorrect return value from thr_stksegment");
339 339 assert((address)&st < (address)st.ss_sp, "Invalid stack base returned");
340 340 assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned");
341 341 return st;
342 342 }
343 343
344 344 address os::current_stack_base() {
345 345 int r = thr_main() ;
346 346 guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ;
347 347 bool is_primordial_thread = r;
348 348
349 349 // Workaround 4352906, avoid calls to thr_stksegment by
350 350 // thr_main after the first one (it looks like we trash
351 351 // some data, causing the value for ss_sp to be incorrect).
352 352 if (!is_primordial_thread || os::Solaris::_main_stack_base == NULL) {
353 353 stack_t st = get_stack_info();
354 354 if (is_primordial_thread) {
355 355 // cache initial value of stack base
356 356 os::Solaris::_main_stack_base = (address)st.ss_sp;
357 357 }
358 358 return (address)st.ss_sp;
359 359 } else {
360 360 guarantee(os::Solaris::_main_stack_base != NULL, "Attempt to use null cached stack base");
361 361 return os::Solaris::_main_stack_base;
362 362 }
363 363 }
364 364
365 365 size_t os::current_stack_size() {
366 366 size_t size;
367 367
368 368 int r = thr_main() ;
369 369 guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ;
370 370 if(!r) {
371 371 size = get_stack_info().ss_size;
372 372 } else {
373 373 struct rlimit limits;
374 374 getrlimit(RLIMIT_STACK, &limits);
375 375 size = adjust_stack_size(os::Solaris::_main_stack_base, (size_t)limits.rlim_cur);
376 376 }
377 377 // base may not be page aligned
378 378 address base = current_stack_base();
379 379 address bottom = (address)align_size_up((intptr_t)(base - size), os::vm_page_size());;
380 380 return (size_t)(base - bottom);
381 381 }
382 382
383 383 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
384 384 return localtime_r(clock, res);
385 385 }
386 386
387 387 // interruptible infrastructure
388 388
389 389 // setup_interruptible saves the thread state before going into an
390 390 // interruptible system call.
391 391 // The saved state is used to restore the thread to
392 392 // its former state whether or not an interrupt is received.
393 393 // Used by classloader os::read
394 394 // os::restartable_read calls skip this layer and stay in _thread_in_native
395 395
396 396 void os::Solaris::setup_interruptible(JavaThread* thread) {
397 397
398 398 JavaThreadState thread_state = thread->thread_state();
399 399
400 400 assert(thread_state != _thread_blocked, "Coming from the wrong thread");
401 401 assert(thread_state != _thread_in_native, "Native threads skip setup_interruptible");
402 402 OSThread* osthread = thread->osthread();
403 403 osthread->set_saved_interrupt_thread_state(thread_state);
404 404 thread->frame_anchor()->make_walkable(thread);
405 405 ThreadStateTransition::transition(thread, thread_state, _thread_blocked);
406 406 }
407 407
408 408 // Version of setup_interruptible() for threads that are already in
409 409 // _thread_blocked. Used by os_sleep().
410 410 void os::Solaris::setup_interruptible_already_blocked(JavaThread* thread) {
411 411 thread->frame_anchor()->make_walkable(thread);
412 412 }
413 413
414 414 JavaThread* os::Solaris::setup_interruptible() {
415 415 JavaThread* thread = (JavaThread*)ThreadLocalStorage::thread();
416 416 setup_interruptible(thread);
417 417 return thread;
418 418 }
419 419
420 420 void os::Solaris::try_enable_extended_io() {
421 421 typedef int (*enable_extended_FILE_stdio_t)(int, int);
422 422
423 423 if (!UseExtendedFileIO) {
424 424 return;
425 425 }
426 426
427 427 enable_extended_FILE_stdio_t enabler =
428 428 (enable_extended_FILE_stdio_t) dlsym(RTLD_DEFAULT,
429 429 "enable_extended_FILE_stdio");
430 430 if (enabler) {
431 431 enabler(-1, -1);
432 432 }
433 433 }
434 434
435 435
436 436 #ifdef ASSERT
437 437
438 438 JavaThread* os::Solaris::setup_interruptible_native() {
439 439 JavaThread* thread = (JavaThread*)ThreadLocalStorage::thread();
440 440 JavaThreadState thread_state = thread->thread_state();
441 441 assert(thread_state == _thread_in_native, "Assumed thread_in_native");
442 442 return thread;
443 443 }
444 444
445 445 void os::Solaris::cleanup_interruptible_native(JavaThread* thread) {
446 446 JavaThreadState thread_state = thread->thread_state();
447 447 assert(thread_state == _thread_in_native, "Assumed thread_in_native");
448 448 }
449 449 #endif
450 450
451 451 // cleanup_interruptible reverses the effects of setup_interruptible
452 452 // setup_interruptible_already_blocked() does not need any cleanup.
453 453
454 454 void os::Solaris::cleanup_interruptible(JavaThread* thread) {
455 455 OSThread* osthread = thread->osthread();
456 456
457 457 ThreadStateTransition::transition(thread, _thread_blocked, osthread->saved_interrupt_thread_state());
458 458 }
459 459
460 460 // I/O interruption related counters called in _INTERRUPTIBLE
461 461
462 462 void os::Solaris::bump_interrupted_before_count() {
463 463 RuntimeService::record_interrupted_before_count();
464 464 }
465 465
466 466 void os::Solaris::bump_interrupted_during_count() {
467 467 RuntimeService::record_interrupted_during_count();
468 468 }
469 469
470 470 static int _processors_online = 0;
471 471
472 472 jint os::Solaris::_os_thread_limit = 0;
473 473 volatile jint os::Solaris::_os_thread_count = 0;
474 474
475 475 julong os::available_memory() {
476 476 return Solaris::available_memory();
477 477 }
478 478
479 479 julong os::Solaris::available_memory() {
480 480 return (julong)sysconf(_SC_AVPHYS_PAGES) * os::vm_page_size();
481 481 }
482 482
483 483 julong os::Solaris::_physical_memory = 0;
484 484
485 485 julong os::physical_memory() {
486 486 return Solaris::physical_memory();
487 487 }
488 488
489 489 julong os::allocatable_physical_memory(julong size) {
490 490 #ifdef _LP64
491 491 return size;
492 492 #else
493 493 julong result = MIN2(size, (julong)3835*M);
494 494 if (!is_allocatable(result)) {
495 495 // Memory allocations will be aligned but the alignment
496 496 // is not known at this point. Alignments will
497 497 // be at most to LargePageSizeInBytes. Protect
498 498 // allocations from alignments up to illegal
499 499 // values. If at this point 2G is illegal.
500 500 julong reasonable_size = (julong)2*G - 2 * LargePageSizeInBytes;
501 501 result = MIN2(size, reasonable_size);
502 502 }
503 503 return result;
504 504 #endif
505 505 }
506 506
507 507 static hrtime_t first_hrtime = 0;
508 508 static const hrtime_t hrtime_hz = 1000*1000*1000;
509 509 const int LOCK_BUSY = 1;
510 510 const int LOCK_FREE = 0;
511 511 const int LOCK_INVALID = -1;
512 512 static volatile hrtime_t max_hrtime = 0;
513 513 static volatile int max_hrtime_lock = LOCK_FREE; // Update counter with LSB as lock-in-progress
514 514
515 515
516 516 void os::Solaris::initialize_system_info() {
517 517 set_processor_count(sysconf(_SC_NPROCESSORS_CONF));
518 518 _processors_online = sysconf (_SC_NPROCESSORS_ONLN);
519 519 _physical_memory = (julong)sysconf(_SC_PHYS_PAGES) * (julong)sysconf(_SC_PAGESIZE);
520 520 }
521 521
522 522 int os::active_processor_count() {
523 523 int online_cpus = sysconf(_SC_NPROCESSORS_ONLN);
524 524 pid_t pid = getpid();
525 525 psetid_t pset = PS_NONE;
526 526 // Are we running in a processor set or is there any processor set around?
527 527 if (pset_bind(PS_QUERY, P_PID, pid, &pset) == 0) {
528 528 uint_t pset_cpus;
529 529 // Query the number of cpus available to us.
530 530 if (pset_info(pset, NULL, &pset_cpus, NULL) == 0) {
531 531 assert(pset_cpus > 0 && pset_cpus <= online_cpus, "sanity check");
532 532 _processors_online = pset_cpus;
533 533 return pset_cpus;
534 534 }
535 535 }
536 536 // Otherwise return number of online cpus
537 537 return online_cpus;
538 538 }
539 539
540 540 static bool find_processors_in_pset(psetid_t pset,
541 541 processorid_t** id_array,
542 542 uint_t* id_length) {
543 543 bool result = false;
544 544 // Find the number of processors in the processor set.
545 545 if (pset_info(pset, NULL, id_length, NULL) == 0) {
546 546 // Make up an array to hold their ids.
547 547 *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length);
548 548 // Fill in the array with their processor ids.
549 549 if (pset_info(pset, NULL, id_length, *id_array) == 0) {
550 550 result = true;
551 551 }
552 552 }
553 553 return result;
554 554 }
555 555
556 556 // Callers of find_processors_online() must tolerate imprecise results --
557 557 // the system configuration can change asynchronously because of DR
558 558 // or explicit psradm operations.
559 559 //
560 560 // We also need to take care that the loop (below) terminates as the
561 561 // number of processors online can change between the _SC_NPROCESSORS_ONLN
562 562 // request and the loop that builds the list of processor ids. Unfortunately
563 563 // there's no reliable way to determine the maximum valid processor id,
564 564 // so we use a manifest constant, MAX_PROCESSOR_ID, instead. See p_online
565 565 // man pages, which claim the processor id set is "sparse, but
566 566 // not too sparse". MAX_PROCESSOR_ID is used to ensure that we eventually
567 567 // exit the loop.
568 568 //
569 569 // In the future we'll be able to use sysconf(_SC_CPUID_MAX), but that's
570 570 // not available on S8.0.
571 571
572 572 static bool find_processors_online(processorid_t** id_array,
573 573 uint* id_length) {
574 574 const processorid_t MAX_PROCESSOR_ID = 100000 ;
575 575 // Find the number of processors online.
576 576 *id_length = sysconf(_SC_NPROCESSORS_ONLN);
577 577 // Make up an array to hold their ids.
578 578 *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length);
579 579 // Processors need not be numbered consecutively.
580 580 long found = 0;
581 581 processorid_t next = 0;
582 582 while (found < *id_length && next < MAX_PROCESSOR_ID) {
583 583 processor_info_t info;
584 584 if (processor_info(next, &info) == 0) {
585 585 // NB, PI_NOINTR processors are effectively online ...
586 586 if (info.pi_state == P_ONLINE || info.pi_state == P_NOINTR) {
587 587 (*id_array)[found] = next;
588 588 found += 1;
589 589 }
590 590 }
591 591 next += 1;
592 592 }
593 593 if (found < *id_length) {
594 594 // The loop above didn't identify the expected number of processors.
595 595 // We could always retry the operation, calling sysconf(_SC_NPROCESSORS_ONLN)
596 596 // and re-running the loop, above, but there's no guarantee of progress
597 597 // if the system configuration is in flux. Instead, we just return what
598 598 // we've got. Note that in the worst case find_processors_online() could
599 599 // return an empty set. (As a fall-back in the case of the empty set we
600 600 // could just return the ID of the current processor).
601 601 *id_length = found ;
602 602 }
603 603
604 604 return true;
605 605 }
606 606
607 607 static bool assign_distribution(processorid_t* id_array,
608 608 uint id_length,
609 609 uint* distribution,
610 610 uint distribution_length) {
611 611 // We assume we can assign processorid_t's to uint's.
612 612 assert(sizeof(processorid_t) == sizeof(uint),
613 613 "can't convert processorid_t to uint");
614 614 // Quick check to see if we won't succeed.
615 615 if (id_length < distribution_length) {
616 616 return false;
617 617 }
618 618 // Assign processor ids to the distribution.
619 619 // Try to shuffle processors to distribute work across boards,
620 620 // assuming 4 processors per board.
621 621 const uint processors_per_board = ProcessDistributionStride;
622 622 // Find the maximum processor id.
623 623 processorid_t max_id = 0;
624 624 for (uint m = 0; m < id_length; m += 1) {
625 625 max_id = MAX2(max_id, id_array[m]);
626 626 }
627 627 // The next id, to limit loops.
628 628 const processorid_t limit_id = max_id + 1;
629 629 // Make up markers for available processors.
630 630 bool* available_id = NEW_C_HEAP_ARRAY(bool, limit_id);
631 631 for (uint c = 0; c < limit_id; c += 1) {
632 632 available_id[c] = false;
633 633 }
634 634 for (uint a = 0; a < id_length; a += 1) {
635 635 available_id[id_array[a]] = true;
636 636 }
637 637 // Step by "boards", then by "slot", copying to "assigned".
638 638 // NEEDS_CLEANUP: The assignment of processors should be stateful,
639 639 // remembering which processors have been assigned by
640 640 // previous calls, etc., so as to distribute several
641 641 // independent calls of this method. What we'd like is
642 642 // It would be nice to have an API that let us ask
643 643 // how many processes are bound to a processor,
644 644 // but we don't have that, either.
645 645 // In the short term, "board" is static so that
646 646 // subsequent distributions don't all start at board 0.
647 647 static uint board = 0;
648 648 uint assigned = 0;
649 649 // Until we've found enough processors ....
650 650 while (assigned < distribution_length) {
651 651 // ... find the next available processor in the board.
652 652 for (uint slot = 0; slot < processors_per_board; slot += 1) {
653 653 uint try_id = board * processors_per_board + slot;
654 654 if ((try_id < limit_id) && (available_id[try_id] == true)) {
655 655 distribution[assigned] = try_id;
656 656 available_id[try_id] = false;
657 657 assigned += 1;
658 658 break;
659 659 }
660 660 }
661 661 board += 1;
662 662 if (board * processors_per_board + 0 >= limit_id) {
663 663 board = 0;
664 664 }
665 665 }
666 666 if (available_id != NULL) {
667 667 FREE_C_HEAP_ARRAY(bool, available_id);
668 668 }
669 669 return true;
670 670 }
671 671
672 672 void os::set_native_thread_name(const char *name) {
673 673 // Not yet implemented.
674 674 return;
675 675 }
676 676
677 677 bool os::distribute_processes(uint length, uint* distribution) {
678 678 bool result = false;
679 679 // Find the processor id's of all the available CPUs.
680 680 processorid_t* id_array = NULL;
681 681 uint id_length = 0;
682 682 // There are some races between querying information and using it,
683 683 // since processor sets can change dynamically.
684 684 psetid_t pset = PS_NONE;
685 685 // Are we running in a processor set?
686 686 if ((pset_bind(PS_QUERY, P_PID, P_MYID, &pset) == 0) && pset != PS_NONE) {
687 687 result = find_processors_in_pset(pset, &id_array, &id_length);
688 688 } else {
689 689 result = find_processors_online(&id_array, &id_length);
690 690 }
691 691 if (result == true) {
692 692 if (id_length >= length) {
693 693 result = assign_distribution(id_array, id_length, distribution, length);
694 694 } else {
695 695 result = false;
696 696 }
697 697 }
698 698 if (id_array != NULL) {
699 699 FREE_C_HEAP_ARRAY(processorid_t, id_array);
700 700 }
701 701 return result;
702 702 }
703 703
704 704 bool os::bind_to_processor(uint processor_id) {
705 705 // We assume that a processorid_t can be stored in a uint.
706 706 assert(sizeof(uint) == sizeof(processorid_t),
707 707 "can't convert uint to processorid_t");
708 708 int bind_result =
709 709 processor_bind(P_LWPID, // bind LWP.
710 710 P_MYID, // bind current LWP.
711 711 (processorid_t) processor_id, // id.
712 712 NULL); // don't return old binding.
713 713 return (bind_result == 0);
714 714 }
715 715
716 716 bool os::getenv(const char* name, char* buffer, int len) {
717 717 char* val = ::getenv( name );
718 718 if ( val == NULL
719 719 || strlen(val) + 1 > len ) {
720 720 if (len > 0) buffer[0] = 0; // return a null string
721 721 return false;
722 722 }
723 723 strcpy( buffer, val );
724 724 return true;
725 725 }
726 726
727 727
728 728 // Return true if user is running as root.
729 729
730 730 bool os::have_special_privileges() {
731 731 static bool init = false;
732 732 static bool privileges = false;
733 733 if (!init) {
734 734 privileges = (getuid() != geteuid()) || (getgid() != getegid());
735 735 init = true;
736 736 }
737 737 return privileges;
738 738 }
739 739
740 740
741 741 void os::init_system_properties_values() {
742 742 char arch[12];
743 743 sysinfo(SI_ARCHITECTURE, arch, sizeof(arch));
744 744
745 745 // The next steps are taken in the product version:
746 746 //
747 747 // Obtain the JAVA_HOME value from the location of libjvm[_g].so.
748 748 // This library should be located at:
749 749 // <JAVA_HOME>/jre/lib/<arch>/{client|server}/libjvm[_g].so.
750 750 //
751 751 // If "/jre/lib/" appears at the right place in the path, then we
752 752 // assume libjvm[_g].so is installed in a JDK and we use this path.
753 753 //
754 754 // Otherwise exit with message: "Could not create the Java virtual machine."
755 755 //
756 756 // The following extra steps are taken in the debugging version:
757 757 //
758 758 // If "/jre/lib/" does NOT appear at the right place in the path
759 759 // instead of exit check for $JAVA_HOME environment variable.
760 760 //
761 761 // If it is defined and we are able to locate $JAVA_HOME/jre/lib/<arch>,
762 762 // then we append a fake suffix "hotspot/libjvm[_g].so" to this path so
763 763 // it looks like libjvm[_g].so is installed there
764 764 // <JAVA_HOME>/jre/lib/<arch>/hotspot/libjvm[_g].so.
765 765 //
766 766 // Otherwise exit.
767 767 //
768 768 // Important note: if the location of libjvm.so changes this
769 769 // code needs to be changed accordingly.
770 770
771 771 // The next few definitions allow the code to be verbatim:
772 772 #define malloc(n) (char*)NEW_C_HEAP_ARRAY(char, (n))
773 773 #define free(p) FREE_C_HEAP_ARRAY(char, p)
774 774 #define getenv(n) ::getenv(n)
775 775
776 776 #define EXTENSIONS_DIR "/lib/ext"
777 777 #define ENDORSED_DIR "/lib/endorsed"
778 778 #define COMMON_DIR "/usr/jdk/packages"
779 779
780 780 {
781 781 /* sysclasspath, java_home, dll_dir */
782 782 {
783 783 char *home_path;
784 784 char *dll_path;
785 785 char *pslash;
786 786 char buf[MAXPATHLEN];
787 787 os::jvm_path(buf, sizeof(buf));
788 788
789 789 // Found the full path to libjvm.so.
790 790 // Now cut the path to <java_home>/jre if we can.
791 791 *(strrchr(buf, '/')) = '\0'; /* get rid of /libjvm.so */
792 792 pslash = strrchr(buf, '/');
793 793 if (pslash != NULL)
794 794 *pslash = '\0'; /* get rid of /{client|server|hotspot} */
795 795 dll_path = malloc(strlen(buf) + 1);
796 796 if (dll_path == NULL)
797 797 return;
798 798 strcpy(dll_path, buf);
799 799 Arguments::set_dll_dir(dll_path);
800 800
801 801 if (pslash != NULL) {
802 802 pslash = strrchr(buf, '/');
803 803 if (pslash != NULL) {
804 804 *pslash = '\0'; /* get rid of /<arch> */
805 805 pslash = strrchr(buf, '/');
806 806 if (pslash != NULL)
807 807 *pslash = '\0'; /* get rid of /lib */
808 808 }
809 809 }
810 810
811 811 home_path = malloc(strlen(buf) + 1);
812 812 if (home_path == NULL)
813 813 return;
814 814 strcpy(home_path, buf);
815 815 Arguments::set_java_home(home_path);
816 816
817 817 if (!set_boot_path('/', ':'))
818 818 return;
819 819 }
820 820
821 821 /*
822 822 * Where to look for native libraries
823 823 */
824 824 {
825 825 // Use dlinfo() to determine the correct java.library.path.
826 826 //
827 827 // If we're launched by the Java launcher, and the user
828 828 // does not set java.library.path explicitly on the commandline,
829 829 // the Java launcher sets LD_LIBRARY_PATH for us and unsets
830 830 // LD_LIBRARY_PATH_32 and LD_LIBRARY_PATH_64. In this case
831 831 // dlinfo returns LD_LIBRARY_PATH + crle settings (including
832 832 // /usr/lib), which is exactly what we want.
833 833 //
834 834 // If the user does set java.library.path, it completely
835 835 // overwrites this setting, and always has.
836 836 //
837 837 // If we're not launched by the Java launcher, we may
838 838 // get here with any/all of the LD_LIBRARY_PATH[_32|64]
839 839 // settings. Again, dlinfo does exactly what we want.
840 840
841 841 Dl_serinfo _info, *info = &_info;
842 842 Dl_serpath *path;
843 843 char* library_path;
844 844 char *common_path;
845 845 int i;
846 846
847 847 // determine search path count and required buffer size
848 848 if (dlinfo(RTLD_SELF, RTLD_DI_SERINFOSIZE, (void *)info) == -1) {
849 849 vm_exit_during_initialization("dlinfo SERINFOSIZE request", dlerror());
850 850 }
851 851
852 852 // allocate new buffer and initialize
853 853 info = (Dl_serinfo*)malloc(_info.dls_size);
854 854 if (info == NULL) {
855 855 vm_exit_out_of_memory(_info.dls_size,
856 856 "init_system_properties_values info");
857 857 }
858 858 info->dls_size = _info.dls_size;
859 859 info->dls_cnt = _info.dls_cnt;
860 860
861 861 // obtain search path information
862 862 if (dlinfo(RTLD_SELF, RTLD_DI_SERINFO, (void *)info) == -1) {
863 863 free(info);
864 864 vm_exit_during_initialization("dlinfo SERINFO request", dlerror());
865 865 }
866 866
867 867 path = &info->dls_serpath[0];
868 868
869 869 // Note: Due to a legacy implementation, most of the library path
870 870 // is set in the launcher. This was to accomodate linking restrictions
871 871 // on legacy Solaris implementations (which are no longer supported).
872 872 // Eventually, all the library path setting will be done here.
873 873 //
874 874 // However, to prevent the proliferation of improperly built native
875 875 // libraries, the new path component /usr/jdk/packages is added here.
876 876
877 877 // Determine the actual CPU architecture.
878 878 char cpu_arch[12];
879 879 sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch));
880 880 #ifdef _LP64
881 881 // If we are a 64-bit vm, perform the following translations:
882 882 // sparc -> sparcv9
883 883 // i386 -> amd64
884 884 if (strcmp(cpu_arch, "sparc") == 0)
885 885 strcat(cpu_arch, "v9");
886 886 else if (strcmp(cpu_arch, "i386") == 0)
887 887 strcpy(cpu_arch, "amd64");
888 888 #endif
889 889
890 890 // Construct the invariant part of ld_library_path. Note that the
891 891 // space for the colon and the trailing null are provided by the
892 892 // nulls included by the sizeof operator.
893 893 size_t bufsize = sizeof(COMMON_DIR) + sizeof("/lib/") + strlen(cpu_arch);
894 894 common_path = malloc(bufsize);
895 895 if (common_path == NULL) {
896 896 free(info);
897 897 vm_exit_out_of_memory(bufsize,
898 898 "init_system_properties_values common_path");
899 899 }
900 900 sprintf(common_path, COMMON_DIR "/lib/%s", cpu_arch);
901 901
902 902 // struct size is more than sufficient for the path components obtained
903 903 // through the dlinfo() call, so only add additional space for the path
904 904 // components explicitly added here.
905 905 bufsize = info->dls_size + strlen(common_path);
906 906 library_path = malloc(bufsize);
907 907 if (library_path == NULL) {
908 908 free(info);
909 909 free(common_path);
910 910 vm_exit_out_of_memory(bufsize,
911 911 "init_system_properties_values library_path");
912 912 }
913 913 library_path[0] = '\0';
914 914
915 915 // Construct the desired Java library path from the linker's library
916 916 // search path.
917 917 //
918 918 // For compatibility, it is optimal that we insert the additional path
919 919 // components specific to the Java VM after those components specified
920 920 // in LD_LIBRARY_PATH (if any) but before those added by the ld.so
921 921 // infrastructure.
922 922 if (info->dls_cnt == 0) { // Not sure this can happen, but allow for it
923 923 strcpy(library_path, common_path);
924 924 } else {
925 925 int inserted = 0;
926 926 for (i = 0; i < info->dls_cnt; i++, path++) {
927 927 uint_t flags = path->dls_flags & LA_SER_MASK;
928 928 if (((flags & LA_SER_LIBPATH) == 0) && !inserted) {
929 929 strcat(library_path, common_path);
930 930 strcat(library_path, os::path_separator());
931 931 inserted = 1;
932 932 }
933 933 strcat(library_path, path->dls_name);
934 934 strcat(library_path, os::path_separator());
935 935 }
936 936 // eliminate trailing path separator
937 937 library_path[strlen(library_path)-1] = '\0';
938 938 }
939 939
940 940 // happens before argument parsing - can't use a trace flag
941 941 // tty->print_raw("init_system_properties_values: native lib path: ");
942 942 // tty->print_raw_cr(library_path);
943 943
944 944 // callee copies into its own buffer
945 945 Arguments::set_library_path(library_path);
946 946
947 947 free(common_path);
948 948 free(library_path);
949 949 free(info);
950 950 }
951 951
952 952 /*
953 953 * Extensions directories.
954 954 *
955 955 * Note that the space for the colon and the trailing null are provided
956 956 * by the nulls included by the sizeof operator (so actually one byte more
957 957 * than necessary is allocated).
958 958 */
959 959 {
960 960 char *buf = (char *) malloc(strlen(Arguments::get_java_home()) +
961 961 sizeof(EXTENSIONS_DIR) + sizeof(COMMON_DIR) +
962 962 sizeof(EXTENSIONS_DIR));
963 963 sprintf(buf, "%s" EXTENSIONS_DIR ":" COMMON_DIR EXTENSIONS_DIR,
964 964 Arguments::get_java_home());
965 965 Arguments::set_ext_dirs(buf);
966 966 }
967 967
968 968 /* Endorsed standards default directory. */
969 969 {
970 970 char * buf = malloc(strlen(Arguments::get_java_home()) + sizeof(ENDORSED_DIR));
971 971 sprintf(buf, "%s" ENDORSED_DIR, Arguments::get_java_home());
972 972 Arguments::set_endorsed_dirs(buf);
973 973 }
974 974 }
975 975
976 976 #undef malloc
977 977 #undef free
978 978 #undef getenv
979 979 #undef EXTENSIONS_DIR
980 980 #undef ENDORSED_DIR
981 981 #undef COMMON_DIR
982 982
983 983 }
984 984
985 985 void os::breakpoint() {
986 986 BREAKPOINT;
987 987 }
988 988
989 989 bool os::obsolete_option(const JavaVMOption *option)
990 990 {
991 991 if (!strncmp(option->optionString, "-Xt", 3)) {
992 992 return true;
993 993 } else if (!strncmp(option->optionString, "-Xtm", 4)) {
994 994 return true;
995 995 } else if (!strncmp(option->optionString, "-Xverifyheap", 12)) {
996 996 return true;
997 997 } else if (!strncmp(option->optionString, "-Xmaxjitcodesize", 16)) {
998 998 return true;
999 999 }
1000 1000 return false;
1001 1001 }
1002 1002
1003 1003 bool os::Solaris::valid_stack_address(Thread* thread, address sp) {
1004 1004 address stackStart = (address)thread->stack_base();
1005 1005 address stackEnd = (address)(stackStart - (address)thread->stack_size());
1006 1006 if (sp < stackStart && sp >= stackEnd ) return true;
1007 1007 return false;
1008 1008 }
1009 1009
1010 1010 extern "C" void breakpoint() {
1011 1011 // use debugger to set breakpoint here
1012 1012 }
1013 1013
1014 1014 // Returns an estimate of the current stack pointer. Result must be guaranteed to
1015 1015 // point into the calling threads stack, and be no lower than the current stack
1016 1016 // pointer.
1017 1017 address os::current_stack_pointer() {
1018 1018 volatile int dummy;
1019 1019 address sp = (address)&dummy + 8; // %%%% need to confirm if this is right
1020 1020 return sp;
1021 1021 }
1022 1022
1023 1023 static thread_t main_thread;
1024 1024
1025 1025 // Thread start routine for all new Java threads
1026 1026 extern "C" void* java_start(void* thread_addr) {
1027 1027 // Try to randomize the cache line index of hot stack frames.
1028 1028 // This helps when threads of the same stack traces evict each other's
1029 1029 // cache lines. The threads can be either from the same JVM instance, or
1030 1030 // from different JVM instances. The benefit is especially true for
1031 1031 // processors with hyperthreading technology.
1032 1032 static int counter = 0;
1033 1033 int pid = os::current_process_id();
1034 1034 alloca(((pid ^ counter++) & 7) * 128);
1035 1035
1036 1036 int prio;
1037 1037 Thread* thread = (Thread*)thread_addr;
1038 1038 OSThread* osthr = thread->osthread();
1039 1039
1040 1040 osthr->set_lwp_id( _lwp_self() ); // Store lwp in case we are bound
1041 1041 thread->_schedctl = (void *) schedctl_init () ;
1042 1042
1043 1043 if (UseNUMA) {
1044 1044 int lgrp_id = os::numa_get_group_id();
1045 1045 if (lgrp_id != -1) {
1046 1046 thread->set_lgrp_id(lgrp_id);
1047 1047 }
1048 1048 }
1049 1049
1050 1050 // If the creator called set priority before we started,
1051 1051 // we need to call set priority now that we have an lwp.
1052 1052 // Get the priority from libthread and set the priority
1053 1053 // for the new Solaris lwp.
1054 1054 if ( osthr->thread_id() != -1 ) {
1055 1055 if ( UseThreadPriorities ) {
1056 1056 thr_getprio(osthr->thread_id(), &prio);
1057 1057 if (ThreadPriorityVerbose) {
1058 1058 tty->print_cr("Starting Thread " INTPTR_FORMAT ", LWP is " INTPTR_FORMAT ", setting priority: %d\n",
1059 1059 osthr->thread_id(), osthr->lwp_id(), prio );
1060 1060 }
1061 1061 os::set_native_priority(thread, prio);
1062 1062 }
1063 1063 } else if (ThreadPriorityVerbose) {
1064 1064 warning("Can't set priority in _start routine, thread id hasn't been set\n");
1065 1065 }
1066 1066
1067 1067 assert(osthr->get_state() == RUNNABLE, "invalid os thread state");
1068 1068
1069 1069 // initialize signal mask for this thread
1070 1070 os::Solaris::hotspot_sigmask(thread);
1071 1071
1072 1072 thread->run();
1073 1073
1074 1074 // One less thread is executing
1075 1075 // When the VMThread gets here, the main thread may have already exited
1076 1076 // which frees the CodeHeap containing the Atomic::dec code
1077 1077 if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) {
1078 1078 Atomic::dec(&os::Solaris::_os_thread_count);
1079 1079 }
1080 1080
1081 1081 if (UseDetachedThreads) {
1082 1082 thr_exit(NULL);
1083 1083 ShouldNotReachHere();
1084 1084 }
1085 1085 return NULL;
1086 1086 }
1087 1087
1088 1088 static OSThread* create_os_thread(Thread* thread, thread_t thread_id) {
1089 1089 // Allocate the OSThread object
1090 1090 OSThread* osthread = new OSThread(NULL, NULL);
1091 1091 if (osthread == NULL) return NULL;
1092 1092
1093 1093 // Store info on the Solaris thread into the OSThread
1094 1094 osthread->set_thread_id(thread_id);
1095 1095 osthread->set_lwp_id(_lwp_self());
1096 1096 thread->_schedctl = (void *) schedctl_init () ;
1097 1097
1098 1098 if (UseNUMA) {
1099 1099 int lgrp_id = os::numa_get_group_id();
1100 1100 if (lgrp_id != -1) {
1101 1101 thread->set_lgrp_id(lgrp_id);
1102 1102 }
1103 1103 }
1104 1104
1105 1105 if ( ThreadPriorityVerbose ) {
1106 1106 tty->print_cr("In create_os_thread, Thread " INTPTR_FORMAT ", LWP is " INTPTR_FORMAT "\n",
1107 1107 osthread->thread_id(), osthread->lwp_id() );
1108 1108 }
1109 1109
1110 1110 // Initial thread state is INITIALIZED, not SUSPENDED
1111 1111 osthread->set_state(INITIALIZED);
1112 1112
1113 1113 return osthread;
1114 1114 }
1115 1115
1116 1116 void os::Solaris::hotspot_sigmask(Thread* thread) {
1117 1117
1118 1118 //Save caller's signal mask
1119 1119 sigset_t sigmask;
1120 1120 thr_sigsetmask(SIG_SETMASK, NULL, &sigmask);
1121 1121 OSThread *osthread = thread->osthread();
1122 1122 osthread->set_caller_sigmask(sigmask);
1123 1123
1124 1124 thr_sigsetmask(SIG_UNBLOCK, os::Solaris::unblocked_signals(), NULL);
1125 1125 if (!ReduceSignalUsage) {
1126 1126 if (thread->is_VM_thread()) {
1127 1127 // Only the VM thread handles BREAK_SIGNAL ...
1128 1128 thr_sigsetmask(SIG_UNBLOCK, vm_signals(), NULL);
1129 1129 } else {
1130 1130 // ... all other threads block BREAK_SIGNAL
1131 1131 assert(!sigismember(vm_signals(), SIGINT), "SIGINT should not be blocked");
1132 1132 thr_sigsetmask(SIG_BLOCK, vm_signals(), NULL);
1133 1133 }
1134 1134 }
1135 1135 }
1136 1136
1137 1137 bool os::create_attached_thread(JavaThread* thread) {
1138 1138 #ifdef ASSERT
1139 1139 thread->verify_not_published();
1140 1140 #endif
1141 1141 OSThread* osthread = create_os_thread(thread, thr_self());
1142 1142 if (osthread == NULL) {
1143 1143 return false;
1144 1144 }
1145 1145
1146 1146 // Initial thread state is RUNNABLE
1147 1147 osthread->set_state(RUNNABLE);
1148 1148 thread->set_osthread(osthread);
1149 1149
1150 1150 // initialize signal mask for this thread
1151 1151 // and save the caller's signal mask
1152 1152 os::Solaris::hotspot_sigmask(thread);
1153 1153
1154 1154 return true;
1155 1155 }
1156 1156
1157 1157 bool os::create_main_thread(JavaThread* thread) {
1158 1158 #ifdef ASSERT
1159 1159 thread->verify_not_published();
1160 1160 #endif
1161 1161 if (_starting_thread == NULL) {
1162 1162 _starting_thread = create_os_thread(thread, main_thread);
1163 1163 if (_starting_thread == NULL) {
1164 1164 return false;
1165 1165 }
1166 1166 }
1167 1167
1168 1168 // The primodial thread is runnable from the start
1169 1169 _starting_thread->set_state(RUNNABLE);
1170 1170
1171 1171 thread->set_osthread(_starting_thread);
1172 1172
1173 1173 // initialize signal mask for this thread
1174 1174 // and save the caller's signal mask
1175 1175 os::Solaris::hotspot_sigmask(thread);
1176 1176
1177 1177 return true;
1178 1178 }
1179 1179
1180 1180 // _T2_libthread is true if we believe we are running with the newer
1181 1181 // SunSoft lwp/libthread.so (2.8 patch, 2.9 default)
1182 1182 bool os::Solaris::_T2_libthread = false;
1183 1183
1184 1184 bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
1185 1185 // Allocate the OSThread object
1186 1186 OSThread* osthread = new OSThread(NULL, NULL);
1187 1187 if (osthread == NULL) {
1188 1188 return false;
1189 1189 }
1190 1190
1191 1191 if ( ThreadPriorityVerbose ) {
1192 1192 char *thrtyp;
1193 1193 switch ( thr_type ) {
1194 1194 case vm_thread:
1195 1195 thrtyp = (char *)"vm";
1196 1196 break;
1197 1197 case cgc_thread:
1198 1198 thrtyp = (char *)"cgc";
1199 1199 break;
1200 1200 case pgc_thread:
1201 1201 thrtyp = (char *)"pgc";
1202 1202 break;
1203 1203 case java_thread:
1204 1204 thrtyp = (char *)"java";
1205 1205 break;
1206 1206 case compiler_thread:
1207 1207 thrtyp = (char *)"compiler";
1208 1208 break;
1209 1209 case watcher_thread:
1210 1210 thrtyp = (char *)"watcher";
1211 1211 break;
1212 1212 default:
1213 1213 thrtyp = (char *)"unknown";
1214 1214 break;
1215 1215 }
1216 1216 tty->print_cr("In create_thread, creating a %s thread\n", thrtyp);
1217 1217 }
1218 1218
1219 1219 // Calculate stack size if it's not specified by caller.
1220 1220 if (stack_size == 0) {
1221 1221 // The default stack size 1M (2M for LP64).
1222 1222 stack_size = (BytesPerWord >> 2) * K * K;
1223 1223
1224 1224 switch (thr_type) {
1225 1225 case os::java_thread:
1226 1226 // Java threads use ThreadStackSize which default value can be changed with the flag -Xss
1227 1227 if (JavaThread::stack_size_at_create() > 0) stack_size = JavaThread::stack_size_at_create();
1228 1228 break;
1229 1229 case os::compiler_thread:
1230 1230 if (CompilerThreadStackSize > 0) {
1231 1231 stack_size = (size_t)(CompilerThreadStackSize * K);
1232 1232 break;
1233 1233 } // else fall through:
1234 1234 // use VMThreadStackSize if CompilerThreadStackSize is not defined
1235 1235 case os::vm_thread:
1236 1236 case os::pgc_thread:
1237 1237 case os::cgc_thread:
1238 1238 case os::watcher_thread:
1239 1239 if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
1240 1240 break;
1241 1241 }
1242 1242 }
1243 1243 stack_size = MAX2(stack_size, os::Solaris::min_stack_allowed);
1244 1244
1245 1245 // Initial state is ALLOCATED but not INITIALIZED
1246 1246 osthread->set_state(ALLOCATED);
1247 1247
1248 1248 if (os::Solaris::_os_thread_count > os::Solaris::_os_thread_limit) {
1249 1249 // We got lots of threads. Check if we still have some address space left.
1250 1250 // Need to be at least 5Mb of unreserved address space. We do check by
1251 1251 // trying to reserve some.
1252 1252 const size_t VirtualMemoryBangSize = 20*K*K;
1253 1253 char* mem = os::reserve_memory(VirtualMemoryBangSize);
1254 1254 if (mem == NULL) {
1255 1255 delete osthread;
1256 1256 return false;
1257 1257 } else {
1258 1258 // Release the memory again
1259 1259 os::release_memory(mem, VirtualMemoryBangSize);
1260 1260 }
1261 1261 }
1262 1262
1263 1263 // Setup osthread because the child thread may need it.
1264 1264 thread->set_osthread(osthread);
1265 1265
1266 1266 // Create the Solaris thread
1267 1267 // explicit THR_BOUND for T2_libthread case in case
1268 1268 // that assumption is not accurate, but our alternate signal stack
1269 1269 // handling is based on it which must have bound threads
1270 1270 thread_t tid = 0;
1271 1271 long flags = (UseDetachedThreads ? THR_DETACHED : 0) | THR_SUSPENDED
1272 1272 | ((UseBoundThreads || os::Solaris::T2_libthread() ||
1273 1273 (thr_type == vm_thread) ||
1274 1274 (thr_type == cgc_thread) ||
1275 1275 (thr_type == pgc_thread) ||
1276 1276 (thr_type == compiler_thread && BackgroundCompilation)) ?
1277 1277 THR_BOUND : 0);
1278 1278 int status;
1279 1279
1280 1280 // 4376845 -- libthread/kernel don't provide enough LWPs to utilize all CPUs.
1281 1281 //
1282 1282 // On multiprocessors systems, libthread sometimes under-provisions our
1283 1283 // process with LWPs. On a 30-way systems, for instance, we could have
1284 1284 // 50 user-level threads in ready state and only 2 or 3 LWPs assigned
1285 1285 // to our process. This can result in under utilization of PEs.
1286 1286 // I suspect the problem is related to libthread's LWP
1287 1287 // pool management and to the kernel's SIGBLOCKING "last LWP parked"
1288 1288 // upcall policy.
1289 1289 //
1290 1290 // The following code is palliative -- it attempts to ensure that our
1291 1291 // process has sufficient LWPs to take advantage of multiple PEs.
1292 1292 // Proper long-term cures include using user-level threads bound to LWPs
1293 1293 // (THR_BOUND) or using LWP-based synchronization. Note that there is a
1294 1294 // slight timing window with respect to sampling _os_thread_count, but
1295 1295 // the race is benign. Also, we should periodically recompute
1296 1296 // _processors_online as the min of SC_NPROCESSORS_ONLN and the
1297 1297 // the number of PEs in our partition. You might be tempted to use
1298 1298 // THR_NEW_LWP here, but I'd recommend against it as that could
1299 1299 // result in undesirable growth of the libthread's LWP pool.
1300 1300 // The fix below isn't sufficient; for instance, it doesn't take into count
1301 1301 // LWPs parked on IO. It does, however, help certain CPU-bound benchmarks.
1302 1302 //
1303 1303 // Some pathologies this scheme doesn't handle:
1304 1304 // * Threads can block, releasing the LWPs. The LWPs can age out.
1305 1305 // When a large number of threads become ready again there aren't
1306 1306 // enough LWPs available to service them. This can occur when the
1307 1307 // number of ready threads oscillates.
1308 1308 // * LWPs/Threads park on IO, thus taking the LWP out of circulation.
1309 1309 //
1310 1310 // Finally, we should call thr_setconcurrency() periodically to refresh
1311 1311 // the LWP pool and thwart the LWP age-out mechanism.
1312 1312 // The "+3" term provides a little slop -- we want to slightly overprovision.
1313 1313
1314 1314 if (AdjustConcurrency && os::Solaris::_os_thread_count < (_processors_online+3)) {
1315 1315 if (!(flags & THR_BOUND)) {
1316 1316 thr_setconcurrency (os::Solaris::_os_thread_count); // avoid starvation
1317 1317 }
1318 1318 }
1319 1319 // Although this doesn't hurt, we should warn of undefined behavior
1320 1320 // when using unbound T1 threads with schedctl(). This should never
1321 1321 // happen, as the compiler and VM threads are always created bound
1322 1322 DEBUG_ONLY(
1323 1323 if ((VMThreadHintNoPreempt || CompilerThreadHintNoPreempt) &&
1324 1324 (!os::Solaris::T2_libthread() && (!(flags & THR_BOUND))) &&
1325 1325 ((thr_type == vm_thread) || (thr_type == cgc_thread) ||
1326 1326 (thr_type == pgc_thread) || (thr_type == compiler_thread && BackgroundCompilation))) {
1327 1327 warning("schedctl behavior undefined when Compiler/VM/GC Threads are Unbound");
1328 1328 }
1329 1329 );
1330 1330
1331 1331
1332 1332 // Mark that we don't have an lwp or thread id yet.
1333 1333 // In case we attempt to set the priority before the thread starts.
1334 1334 osthread->set_lwp_id(-1);
1335 1335 osthread->set_thread_id(-1);
1336 1336
1337 1337 status = thr_create(NULL, stack_size, java_start, thread, flags, &tid);
1338 1338 if (status != 0) {
1339 1339 if (PrintMiscellaneous && (Verbose || WizardMode)) {
1340 1340 perror("os::create_thread");
1341 1341 }
1342 1342 thread->set_osthread(NULL);
1343 1343 // Need to clean up stuff we've allocated so far
1344 1344 delete osthread;
1345 1345 return false;
1346 1346 }
1347 1347
1348 1348 Atomic::inc(&os::Solaris::_os_thread_count);
1349 1349
1350 1350 // Store info on the Solaris thread into the OSThread
1351 1351 osthread->set_thread_id(tid);
1352 1352
1353 1353 // Remember that we created this thread so we can set priority on it
1354 1354 osthread->set_vm_created();
1355 1355
1356 1356 // Set the default thread priority otherwise use NormalPriority
1357 1357
1358 1358 if ( UseThreadPriorities ) {
1359 1359 thr_setprio(tid, (DefaultThreadPriority == -1) ?
1360 1360 java_to_os_priority[NormPriority] :
1361 1361 DefaultThreadPriority);
1362 1362 }
1363 1363
1364 1364 // Initial thread state is INITIALIZED, not SUSPENDED
1365 1365 osthread->set_state(INITIALIZED);
1366 1366
1367 1367 // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain
1368 1368 return true;
1369 1369 }
1370 1370
1371 1371 /* defined for >= Solaris 10. This allows builds on earlier versions
1372 1372 * of Solaris to take advantage of the newly reserved Solaris JVM signals
1373 1373 * With SIGJVM1, SIGJVM2, INTERRUPT_SIGNAL is SIGJVM1, ASYNC_SIGNAL is SIGJVM2
1374 1374 * and -XX:+UseAltSigs does nothing since these should have no conflict
1375 1375 */
1376 1376 #if !defined(SIGJVM1)
1377 1377 #define SIGJVM1 39
1378 1378 #define SIGJVM2 40
1379 1379 #endif
1380 1380
1381 1381 debug_only(static bool signal_sets_initialized = false);
1382 1382 static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs;
1383 1383 int os::Solaris::_SIGinterrupt = INTERRUPT_SIGNAL;
1384 1384 int os::Solaris::_SIGasync = ASYNC_SIGNAL;
1385 1385
1386 1386 bool os::Solaris::is_sig_ignored(int sig) {
1387 1387 struct sigaction oact;
1388 1388 sigaction(sig, (struct sigaction*)NULL, &oact);
1389 1389 void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*, oact.sa_sigaction)
1390 1390 : CAST_FROM_FN_PTR(void*, oact.sa_handler);
1391 1391 if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN))
1392 1392 return true;
1393 1393 else
1394 1394 return false;
1395 1395 }
1396 1396
1397 1397 // Note: SIGRTMIN is a macro that calls sysconf() so it will
1398 1398 // dynamically detect SIGRTMIN value for the system at runtime, not buildtime
1399 1399 static bool isJVM1available() {
1400 1400 return SIGJVM1 < SIGRTMIN;
1401 1401 }
1402 1402
1403 1403 void os::Solaris::signal_sets_init() {
1404 1404 // Should also have an assertion stating we are still single-threaded.
1405 1405 assert(!signal_sets_initialized, "Already initialized");
1406 1406 // Fill in signals that are necessarily unblocked for all threads in
1407 1407 // the VM. Currently, we unblock the following signals:
1408 1408 // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden
1409 1409 // by -Xrs (=ReduceSignalUsage));
1410 1410 // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all
1411 1411 // other threads. The "ReduceSignalUsage" boolean tells us not to alter
1412 1412 // the dispositions or masks wrt these signals.
1413 1413 // Programs embedding the VM that want to use the above signals for their
1414 1414 // own purposes must, at this time, use the "-Xrs" option to prevent
1415 1415 // interference with shutdown hooks and BREAK_SIGNAL thread dumping.
1416 1416 // (See bug 4345157, and other related bugs).
1417 1417 // In reality, though, unblocking these signals is really a nop, since
1418 1418 // these signals are not blocked by default.
1419 1419 sigemptyset(&unblocked_sigs);
1420 1420 sigemptyset(&allowdebug_blocked_sigs);
1421 1421 sigaddset(&unblocked_sigs, SIGILL);
1422 1422 sigaddset(&unblocked_sigs, SIGSEGV);
1423 1423 sigaddset(&unblocked_sigs, SIGBUS);
1424 1424 sigaddset(&unblocked_sigs, SIGFPE);
1425 1425
1426 1426 if (isJVM1available) {
1427 1427 os::Solaris::set_SIGinterrupt(SIGJVM1);
1428 1428 os::Solaris::set_SIGasync(SIGJVM2);
1429 1429 } else if (UseAltSigs) {
1430 1430 os::Solaris::set_SIGinterrupt(ALT_INTERRUPT_SIGNAL);
1431 1431 os::Solaris::set_SIGasync(ALT_ASYNC_SIGNAL);
1432 1432 } else {
1433 1433 os::Solaris::set_SIGinterrupt(INTERRUPT_SIGNAL);
1434 1434 os::Solaris::set_SIGasync(ASYNC_SIGNAL);
1435 1435 }
1436 1436
1437 1437 sigaddset(&unblocked_sigs, os::Solaris::SIGinterrupt());
1438 1438 sigaddset(&unblocked_sigs, os::Solaris::SIGasync());
1439 1439
1440 1440 if (!ReduceSignalUsage) {
1441 1441 if (!os::Solaris::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
1442 1442 sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
1443 1443 sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL);
1444 1444 }
1445 1445 if (!os::Solaris::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
1446 1446 sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
1447 1447 sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL);
1448 1448 }
1449 1449 if (!os::Solaris::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
1450 1450 sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
1451 1451 sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL);
1452 1452 }
1453 1453 }
1454 1454 // Fill in signals that are blocked by all but the VM thread.
1455 1455 sigemptyset(&vm_sigs);
1456 1456 if (!ReduceSignalUsage)
1457 1457 sigaddset(&vm_sigs, BREAK_SIGNAL);
1458 1458 debug_only(signal_sets_initialized = true);
1459 1459
1460 1460 // For diagnostics only used in run_periodic_checks
1461 1461 sigemptyset(&check_signal_done);
1462 1462 }
1463 1463
1464 1464 // These are signals that are unblocked while a thread is running Java.
1465 1465 // (For some reason, they get blocked by default.)
1466 1466 sigset_t* os::Solaris::unblocked_signals() {
1467 1467 assert(signal_sets_initialized, "Not initialized");
1468 1468 return &unblocked_sigs;
1469 1469 }
1470 1470
1471 1471 // These are the signals that are blocked while a (non-VM) thread is
1472 1472 // running Java. Only the VM thread handles these signals.
1473 1473 sigset_t* os::Solaris::vm_signals() {
1474 1474 assert(signal_sets_initialized, "Not initialized");
1475 1475 return &vm_sigs;
1476 1476 }
1477 1477
1478 1478 // These are signals that are blocked during cond_wait to allow debugger in
1479 1479 sigset_t* os::Solaris::allowdebug_blocked_signals() {
1480 1480 assert(signal_sets_initialized, "Not initialized");
1481 1481 return &allowdebug_blocked_sigs;
1482 1482 }
1483 1483
1484 1484
1485 1485 void _handle_uncaught_cxx_exception() {
1486 1486 VMError err("An uncaught C++ exception");
1487 1487 err.report_and_die();
1488 1488 }
1489 1489
1490 1490
1491 1491 // First crack at OS-specific initialization, from inside the new thread.
1492 1492 void os::initialize_thread() {
1493 1493 int r = thr_main() ;
1494 1494 guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ;
1495 1495 if (r) {
1496 1496 JavaThread* jt = (JavaThread *)Thread::current();
1497 1497 assert(jt != NULL,"Sanity check");
1498 1498 size_t stack_size;
1499 1499 address base = jt->stack_base();
1500 1500 if (Arguments::created_by_java_launcher()) {
1501 1501 // Use 2MB to allow for Solaris 7 64 bit mode.
1502 1502 stack_size = JavaThread::stack_size_at_create() == 0
1503 1503 ? 2048*K : JavaThread::stack_size_at_create();
1504 1504
1505 1505 // There are rare cases when we may have already used more than
1506 1506 // the basic stack size allotment before this method is invoked.
1507 1507 // Attempt to allow for a normally sized java_stack.
1508 1508 size_t current_stack_offset = (size_t)(base - (address)&stack_size);
1509 1509 stack_size += ReservedSpace::page_align_size_down(current_stack_offset);
1510 1510 } else {
1511 1511 // 6269555: If we were not created by a Java launcher, i.e. if we are
1512 1512 // running embedded in a native application, treat the primordial thread
1513 1513 // as much like a native attached thread as possible. This means using
1514 1514 // the current stack size from thr_stksegment(), unless it is too large
1515 1515 // to reliably setup guard pages. A reasonable max size is 8MB.
1516 1516 size_t current_size = current_stack_size();
1517 1517 // This should never happen, but just in case....
1518 1518 if (current_size == 0) current_size = 2 * K * K;
1519 1519 stack_size = current_size > (8 * K * K) ? (8 * K * K) : current_size;
1520 1520 }
1521 1521 address bottom = (address)align_size_up((intptr_t)(base - stack_size), os::vm_page_size());;
1522 1522 stack_size = (size_t)(base - bottom);
1523 1523
1524 1524 assert(stack_size > 0, "Stack size calculation problem");
1525 1525
1526 1526 if (stack_size > jt->stack_size()) {
1527 1527 NOT_PRODUCT(
1528 1528 struct rlimit limits;
1529 1529 getrlimit(RLIMIT_STACK, &limits);
1530 1530 size_t size = adjust_stack_size(base, (size_t)limits.rlim_cur);
1531 1531 assert(size >= jt->stack_size(), "Stack size problem in main thread");
1532 1532 )
1533 1533 tty->print_cr(
1534 1534 "Stack size of %d Kb exceeds current limit of %d Kb.\n"
1535 1535 "(Stack sizes are rounded up to a multiple of the system page size.)\n"
1536 1536 "See limit(1) to increase the stack size limit.",
1537 1537 stack_size / K, jt->stack_size() / K);
1538 1538 vm_exit(1);
1539 1539 }
1540 1540 assert(jt->stack_size() >= stack_size,
1541 1541 "Attempt to map more stack than was allocated");
1542 1542 jt->set_stack_size(stack_size);
1543 1543 }
1544 1544
1545 1545 // 5/22/01: Right now alternate signal stacks do not handle
1546 1546 // throwing stack overflow exceptions, see bug 4463178
1547 1547 // Until a fix is found for this, T2 will NOT imply alternate signal
1548 1548 // stacks.
1549 1549 // If using T2 libthread threads, install an alternate signal stack.
1550 1550 // Because alternate stacks associate with LWPs on Solaris,
1551 1551 // see sigaltstack(2), if using UNBOUND threads, or if UseBoundThreads
1552 1552 // we prefer to explicitly stack bang.
1553 1553 // If not using T2 libthread, but using UseBoundThreads any threads
1554 1554 // (primordial thread, jni_attachCurrentThread) we do not create,
1555 1555 // probably are not bound, therefore they can not have an alternate
1556 1556 // signal stack. Since our stack banging code is generated and
1557 1557 // is shared across threads, all threads must be bound to allow
1558 1558 // using alternate signal stacks. The alternative is to interpose
1559 1559 // on _lwp_create to associate an alt sig stack with each LWP,
1560 1560 // and this could be a problem when the JVM is embedded.
1561 1561 // We would prefer to use alternate signal stacks with T2
1562 1562 // Since there is currently no accurate way to detect T2
1563 1563 // we do not. Assuming T2 when running T1 causes sig 11s or assertions
1564 1564 // on installing alternate signal stacks
1565 1565
1566 1566
1567 1567 // 05/09/03: removed alternate signal stack support for Solaris
1568 1568 // The alternate signal stack mechanism is no longer needed to
1569 1569 // handle stack overflow. This is now handled by allocating
1570 1570 // guard pages (red zone) and stackbanging.
1571 1571 // Initially the alternate signal stack mechanism was removed because
1572 1572 // it did not work with T1 llibthread. Alternate
1573 1573 // signal stacks MUST have all threads bound to lwps. Applications
1574 1574 // can create their own threads and attach them without their being
1575 1575 // bound under T1. This is frequently the case for the primordial thread.
1576 1576 // If we were ever to reenable this mechanism we would need to
1577 1577 // use the dynamic check for T2 libthread.
1578 1578
1579 1579 os::Solaris::init_thread_fpu_state();
1580 1580 std::set_terminate(_handle_uncaught_cxx_exception);
1581 1581 }
1582 1582
1583 1583
1584 1584
1585 1585 // Free Solaris resources related to the OSThread
1586 1586 void os::free_thread(OSThread* osthread) {
1587 1587 assert(osthread != NULL, "os::free_thread but osthread not set");
1588 1588
1589 1589
1590 1590 // We are told to free resources of the argument thread,
1591 1591 // but we can only really operate on the current thread.
1592 1592 // The main thread must take the VMThread down synchronously
1593 1593 // before the main thread exits and frees up CodeHeap
1594 1594 guarantee((Thread::current()->osthread() == osthread
1595 1595 || (osthread == VMThread::vm_thread()->osthread())), "os::free_thread but not current thread");
1596 1596 if (Thread::current()->osthread() == osthread) {
1597 1597 // Restore caller's signal mask
1598 1598 sigset_t sigmask = osthread->caller_sigmask();
1599 1599 thr_sigsetmask(SIG_SETMASK, &sigmask, NULL);
1600 1600 }
1601 1601 delete osthread;
1602 1602 }
1603 1603
1604 1604 void os::pd_start_thread(Thread* thread) {
1605 1605 int status = thr_continue(thread->osthread()->thread_id());
1606 1606 assert_status(status == 0, status, "thr_continue failed");
1607 1607 }
1608 1608
1609 1609
1610 1610 intx os::current_thread_id() {
1611 1611 return (intx)thr_self();
1612 1612 }
1613 1613
1614 1614 static pid_t _initial_pid = 0;
1615 1615
1616 1616 int os::current_process_id() {
1617 1617 return (int)(_initial_pid ? _initial_pid : getpid());
1618 1618 }
1619 1619
1620 1620 int os::allocate_thread_local_storage() {
1621 1621 // %%% in Win32 this allocates a memory segment pointed to by a
1622 1622 // register. Dan Stein can implement a similar feature in
1623 1623 // Solaris. Alternatively, the VM can do the same thing
1624 1624 // explicitly: malloc some storage and keep the pointer in a
1625 1625 // register (which is part of the thread's context) (or keep it
1626 1626 // in TLS).
1627 1627 // %%% In current versions of Solaris, thr_self and TSD can
1628 1628 // be accessed via short sequences of displaced indirections.
1629 1629 // The value of thr_self is available as %g7(36).
1630 1630 // The value of thr_getspecific(k) is stored in %g7(12)(4)(k*4-4),
1631 1631 // assuming that the current thread already has a value bound to k.
1632 1632 // It may be worth experimenting with such access patterns,
1633 1633 // and later having the parameters formally exported from a Solaris
1634 1634 // interface. I think, however, that it will be faster to
1635 1635 // maintain the invariant that %g2 always contains the
1636 1636 // JavaThread in Java code, and have stubs simply
1637 1637 // treat %g2 as a caller-save register, preserving it in a %lN.
1638 1638 thread_key_t tk;
1639 1639 if (thr_keycreate( &tk, NULL ) )
1640 1640 fatal(err_msg("os::allocate_thread_local_storage: thr_keycreate failed "
1641 1641 "(%s)", strerror(errno)));
1642 1642 return int(tk);
1643 1643 }
1644 1644
1645 1645 void os::free_thread_local_storage(int index) {
1646 1646 // %%% don't think we need anything here
1647 1647 // if ( pthread_key_delete((pthread_key_t) tk) )
1648 1648 // fatal("os::free_thread_local_storage: pthread_key_delete failed");
1649 1649 }
1650 1650
1651 1651 #define SMALLINT 32 // libthread allocate for tsd_common is a version specific
1652 1652 // small number - point is NO swap space available
1653 1653 void os::thread_local_storage_at_put(int index, void* value) {
1654 1654 // %%% this is used only in threadLocalStorage.cpp
1655 1655 if (thr_setspecific((thread_key_t)index, value)) {
1656 1656 if (errno == ENOMEM) {
1657 1657 vm_exit_out_of_memory(SMALLINT, "thr_setspecific: out of swap space");
1658 1658 } else {
1659 1659 fatal(err_msg("os::thread_local_storage_at_put: thr_setspecific failed "
1660 1660 "(%s)", strerror(errno)));
1661 1661 }
1662 1662 } else {
1663 1663 ThreadLocalStorage::set_thread_in_slot ((Thread *) value) ;
1664 1664 }
1665 1665 }
1666 1666
↓ open down ↓ |
1666 lines elided |
↑ open up ↑ |
1667 1667 // This function could be called before TLS is initialized, for example, when
1668 1668 // VM receives an async signal or when VM causes a fatal error during
1669 1669 // initialization. Return NULL if thr_getspecific() fails.
1670 1670 void* os::thread_local_storage_at(int index) {
1671 1671 // %%% this is used only in threadLocalStorage.cpp
1672 1672 void* r = NULL;
1673 1673 return thr_getspecific((thread_key_t)index, &r) != 0 ? NULL : r;
1674 1674 }
1675 1675
1676 1676
1677 -const int NANOSECS_PER_MILLISECS = 1000000;
1678 1677 // gethrtime can move backwards if read from one cpu and then a different cpu
1679 1678 // getTimeNanos is guaranteed to not move backward on Solaris
1680 1679 // local spinloop created as faster for a CAS on an int than
1681 1680 // a CAS on a 64bit jlong. Also Atomic::cmpxchg for jlong is not
1682 1681 // supported on sparc v8 or pre supports_cx8 intel boxes.
1683 1682 // oldgetTimeNanos for systems which do not support CAS on 64bit jlong
1684 1683 // i.e. sparc v8 and pre supports_cx8 (i486) intel boxes
1685 1684 inline hrtime_t oldgetTimeNanos() {
1686 1685 int gotlock = LOCK_INVALID;
1687 1686 hrtime_t newtime = gethrtime();
1688 1687
1689 1688 for (;;) {
1690 1689 // grab lock for max_hrtime
1691 1690 int curlock = max_hrtime_lock;
1692 1691 if (curlock & LOCK_BUSY) continue;
1693 1692 if (gotlock = Atomic::cmpxchg(LOCK_BUSY, &max_hrtime_lock, LOCK_FREE) != LOCK_FREE) continue;
1694 1693 if (newtime > max_hrtime) {
1695 1694 max_hrtime = newtime;
1696 1695 } else {
1697 1696 newtime = max_hrtime;
1698 1697 }
1699 1698 // release lock
1700 1699 max_hrtime_lock = LOCK_FREE;
1701 1700 return newtime;
1702 1701 }
1703 1702 }
1704 1703 // gethrtime can move backwards if read from one cpu and then a different cpu
1705 1704 // getTimeNanos is guaranteed to not move backward on Solaris
1706 1705 inline hrtime_t getTimeNanos() {
1707 1706 if (VM_Version::supports_cx8()) {
1708 1707 const hrtime_t now = gethrtime();
1709 1708 // Use atomic long load since 32-bit x86 uses 2 registers to keep long.
1710 1709 const hrtime_t prev = Atomic::load((volatile jlong*)&max_hrtime);
1711 1710 if (now <= prev) return prev; // same or retrograde time;
1712 1711 const hrtime_t obsv = Atomic::cmpxchg(now, (volatile jlong*)&max_hrtime, prev);
1713 1712 assert(obsv >= prev, "invariant"); // Monotonicity
1714 1713 // If the CAS succeeded then we're done and return "now".
1715 1714 // If the CAS failed and the observed value "obs" is >= now then
1716 1715 // we should return "obs". If the CAS failed and now > obs > prv then
1717 1716 // some other thread raced this thread and installed a new value, in which case
1718 1717 // we could either (a) retry the entire operation, (b) retry trying to install now
1719 1718 // or (c) just return obs. We use (c). No loop is required although in some cases
1720 1719 // we might discard a higher "now" value in deference to a slightly lower but freshly
1721 1720 // installed obs value. That's entirely benign -- it admits no new orderings compared
1722 1721 // to (a) or (b) -- and greatly reduces coherence traffic.
1723 1722 // We might also condition (c) on the magnitude of the delta between obs and now.
1724 1723 // Avoiding excessive CAS operations to hot RW locations is critical.
1725 1724 // See http://blogs.sun.com/dave/entry/cas_and_cache_trivia_invalidate
1726 1725 return (prev == obsv) ? now : obsv ;
1727 1726 } else {
1728 1727 return oldgetTimeNanos();
1729 1728 }
1730 1729 }
1731 1730
1732 1731 // Time since start-up in seconds to a fine granularity.
1733 1732 // Used by VMSelfDestructTimer and the MemProfiler.
1734 1733 double os::elapsedTime() {
1735 1734 return (double)(getTimeNanos() - first_hrtime) / (double)hrtime_hz;
1736 1735 }
1737 1736
1738 1737 jlong os::elapsed_counter() {
1739 1738 return (jlong)(getTimeNanos() - first_hrtime);
1740 1739 }
1741 1740
1742 1741 jlong os::elapsed_frequency() {
1743 1742 return hrtime_hz;
1744 1743 }
1745 1744
1746 1745 // Return the real, user, and system times in seconds from an
1747 1746 // arbitrary fixed point in the past.
1748 1747 bool os::getTimesSecs(double* process_real_time,
1749 1748 double* process_user_time,
1750 1749 double* process_system_time) {
1751 1750 struct tms ticks;
1752 1751 clock_t real_ticks = times(&ticks);
1753 1752
1754 1753 if (real_ticks == (clock_t) (-1)) {
1755 1754 return false;
1756 1755 } else {
1757 1756 double ticks_per_second = (double) clock_tics_per_sec;
1758 1757 *process_user_time = ((double) ticks.tms_utime) / ticks_per_second;
1759 1758 *process_system_time = ((double) ticks.tms_stime) / ticks_per_second;
1760 1759 // For consistency return the real time from getTimeNanos()
1761 1760 // converted to seconds.
1762 1761 *process_real_time = ((double) getTimeNanos()) / ((double) NANOUNITS);
1763 1762
1764 1763 return true;
1765 1764 }
1766 1765 }
1767 1766
1768 1767 bool os::supports_vtime() { return true; }
1769 1768
1770 1769 bool os::enable_vtime() {
1771 1770 int fd = ::open("/proc/self/ctl", O_WRONLY);
1772 1771 if (fd == -1)
1773 1772 return false;
1774 1773
1775 1774 long cmd[] = { PCSET, PR_MSACCT };
1776 1775 int res = ::write(fd, cmd, sizeof(long) * 2);
1777 1776 ::close(fd);
1778 1777 if (res != sizeof(long) * 2)
1779 1778 return false;
1780 1779
1781 1780 return true;
1782 1781 }
1783 1782
1784 1783 bool os::vtime_enabled() {
1785 1784 int fd = ::open("/proc/self/status", O_RDONLY);
1786 1785 if (fd == -1)
1787 1786 return false;
1788 1787
1789 1788 pstatus_t status;
1790 1789 int res = os::read(fd, (void*) &status, sizeof(pstatus_t));
1791 1790 ::close(fd);
1792 1791 if (res != sizeof(pstatus_t))
1793 1792 return false;
1794 1793
1795 1794 return status.pr_flags & PR_MSACCT;
↓ open down ↓ |
108 lines elided |
↑ open up ↑ |
1796 1795 }
1797 1796
1798 1797 double os::elapsedVTime() {
1799 1798 return (double)gethrvtime() / (double)hrtime_hz;
1800 1799 }
1801 1800
1802 1801 // Used internally for comparisons only
1803 1802 // getTimeMillis guaranteed to not move backwards on Solaris
1804 1803 jlong getTimeMillis() {
1805 1804 jlong nanotime = getTimeNanos();
1806 - return (jlong)(nanotime / NANOSECS_PER_MILLISECS);
1805 + return (jlong)(nanotime / NANOSECS_PER_MILLISEC);
1807 1806 }
1808 1807
1809 1808 // Must return millis since Jan 1 1970 for JVM_CurrentTimeMillis
1810 1809 jlong os::javaTimeMillis() {
1811 1810 timeval t;
1812 1811 if (gettimeofday( &t, NULL) == -1)
1813 1812 fatal(err_msg("os::javaTimeMillis: gettimeofday (%s)", strerror(errno)));
1814 1813 return jlong(t.tv_sec) * 1000 + jlong(t.tv_usec) / 1000;
1815 1814 }
1816 1815
1817 1816 jlong os::javaTimeNanos() {
1818 1817 return (jlong)getTimeNanos();
1819 1818 }
1820 1819
1821 1820 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
1822 1821 info_ptr->max_value = ALL_64_BITS; // gethrtime() uses all 64 bits
1823 1822 info_ptr->may_skip_backward = false; // not subject to resetting or drifting
1824 1823 info_ptr->may_skip_forward = false; // not subject to resetting or drifting
1825 1824 info_ptr->kind = JVMTI_TIMER_ELAPSED; // elapsed not CPU time
1826 1825 }
1827 1826
1828 1827 char * os::local_time_string(char *buf, size_t buflen) {
1829 1828 struct tm t;
1830 1829 time_t long_time;
1831 1830 time(&long_time);
1832 1831 localtime_r(&long_time, &t);
1833 1832 jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
1834 1833 t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
1835 1834 t.tm_hour, t.tm_min, t.tm_sec);
1836 1835 return buf;
1837 1836 }
1838 1837
1839 1838 // Note: os::shutdown() might be called very early during initialization, or
1840 1839 // called from signal handler. Before adding something to os::shutdown(), make
1841 1840 // sure it is async-safe and can handle partially initialized VM.
1842 1841 void os::shutdown() {
1843 1842
1844 1843 // allow PerfMemory to attempt cleanup of any persistent resources
1845 1844 perfMemory_exit();
1846 1845
1847 1846 // needs to remove object in file system
1848 1847 AttachListener::abort();
1849 1848
1850 1849 // flush buffered output, finish log files
1851 1850 ostream_abort();
1852 1851
1853 1852 // Check for abort hook
1854 1853 abort_hook_t abort_hook = Arguments::abort_hook();
1855 1854 if (abort_hook != NULL) {
1856 1855 abort_hook();
1857 1856 }
1858 1857 }
1859 1858
1860 1859 // Note: os::abort() might be called very early during initialization, or
1861 1860 // called from signal handler. Before adding something to os::abort(), make
1862 1861 // sure it is async-safe and can handle partially initialized VM.
1863 1862 void os::abort(bool dump_core) {
1864 1863 os::shutdown();
1865 1864 if (dump_core) {
1866 1865 #ifndef PRODUCT
1867 1866 fdStream out(defaultStream::output_fd());
1868 1867 out.print_raw("Current thread is ");
1869 1868 char buf[16];
1870 1869 jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());
1871 1870 out.print_raw_cr(buf);
1872 1871 out.print_raw_cr("Dumping core ...");
1873 1872 #endif
1874 1873 ::abort(); // dump core (for debugging)
1875 1874 }
1876 1875
1877 1876 ::exit(1);
1878 1877 }
1879 1878
1880 1879 // Die immediately, no exit hook, no abort hook, no cleanup.
1881 1880 void os::die() {
1882 1881 _exit(-1);
1883 1882 }
1884 1883
1885 1884 // unused
1886 1885 void os::set_error_file(const char *logfile) {}
1887 1886
1888 1887 // DLL functions
1889 1888
1890 1889 const char* os::dll_file_extension() { return ".so"; }
1891 1890
1892 1891 // This must be hard coded because it's the system's temporary
1893 1892 // directory not the java application's temp directory, ala java.io.tmpdir.
1894 1893 const char* os::get_temp_directory() { return "/tmp"; }
1895 1894
1896 1895 static bool file_exists(const char* filename) {
1897 1896 struct stat statbuf;
1898 1897 if (filename == NULL || strlen(filename) == 0) {
1899 1898 return false;
1900 1899 }
1901 1900 return os::stat(filename, &statbuf) == 0;
1902 1901 }
1903 1902
1904 1903 void os::dll_build_name(char* buffer, size_t buflen,
1905 1904 const char* pname, const char* fname) {
1906 1905 const size_t pnamelen = pname ? strlen(pname) : 0;
1907 1906
1908 1907 // Quietly truncate on buffer overflow. Should be an error.
1909 1908 if (pnamelen + strlen(fname) + 10 > (size_t) buflen) {
1910 1909 *buffer = '\0';
1911 1910 return;
1912 1911 }
1913 1912
1914 1913 if (pnamelen == 0) {
1915 1914 snprintf(buffer, buflen, "lib%s.so", fname);
1916 1915 } else if (strchr(pname, *os::path_separator()) != NULL) {
1917 1916 int n;
1918 1917 char** pelements = split_path(pname, &n);
1919 1918 for (int i = 0 ; i < n ; i++) {
1920 1919 // really shouldn't be NULL but what the heck, check can't hurt
1921 1920 if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
1922 1921 continue; // skip the empty path values
1923 1922 }
1924 1923 snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname);
1925 1924 if (file_exists(buffer)) {
1926 1925 break;
1927 1926 }
1928 1927 }
1929 1928 // release the storage
1930 1929 for (int i = 0 ; i < n ; i++) {
1931 1930 if (pelements[i] != NULL) {
1932 1931 FREE_C_HEAP_ARRAY(char, pelements[i]);
1933 1932 }
1934 1933 }
1935 1934 if (pelements != NULL) {
1936 1935 FREE_C_HEAP_ARRAY(char*, pelements);
1937 1936 }
1938 1937 } else {
1939 1938 snprintf(buffer, buflen, "%s/lib%s.so", pname, fname);
1940 1939 }
1941 1940 }
1942 1941
1943 1942 const char* os::get_current_directory(char *buf, int buflen) {
1944 1943 return getcwd(buf, buflen);
1945 1944 }
1946 1945
1947 1946 // check if addr is inside libjvm[_g].so
1948 1947 bool os::address_is_in_vm(address addr) {
1949 1948 static address libjvm_base_addr;
1950 1949 Dl_info dlinfo;
1951 1950
1952 1951 if (libjvm_base_addr == NULL) {
1953 1952 dladdr(CAST_FROM_FN_PTR(void *, os::address_is_in_vm), &dlinfo);
1954 1953 libjvm_base_addr = (address)dlinfo.dli_fbase;
1955 1954 assert(libjvm_base_addr !=NULL, "Cannot obtain base address for libjvm");
1956 1955 }
1957 1956
1958 1957 if (dladdr((void *)addr, &dlinfo)) {
1959 1958 if (libjvm_base_addr == (address)dlinfo.dli_fbase) return true;
1960 1959 }
1961 1960
1962 1961 return false;
1963 1962 }
1964 1963
1965 1964 typedef int (*dladdr1_func_type) (void *, Dl_info *, void **, int);
1966 1965 static dladdr1_func_type dladdr1_func = NULL;
1967 1966
1968 1967 bool os::dll_address_to_function_name(address addr, char *buf,
1969 1968 int buflen, int * offset) {
1970 1969 Dl_info dlinfo;
1971 1970
1972 1971 // dladdr1_func was initialized in os::init()
1973 1972 if (dladdr1_func){
1974 1973 // yes, we have dladdr1
1975 1974
1976 1975 // Support for dladdr1 is checked at runtime; it may be
1977 1976 // available even if the vm is built on a machine that does
1978 1977 // not have dladdr1 support. Make sure there is a value for
1979 1978 // RTLD_DL_SYMENT.
1980 1979 #ifndef RTLD_DL_SYMENT
1981 1980 #define RTLD_DL_SYMENT 1
1982 1981 #endif
1983 1982 #ifdef _LP64
1984 1983 Elf64_Sym * info;
1985 1984 #else
1986 1985 Elf32_Sym * info;
1987 1986 #endif
1988 1987 if (dladdr1_func((void *)addr, &dlinfo, (void **)&info,
1989 1988 RTLD_DL_SYMENT)) {
1990 1989 if ((char *)dlinfo.dli_saddr + info->st_size > (char *)addr) {
1991 1990 if (buf != NULL) {
1992 1991 if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen))
1993 1992 jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname);
1994 1993 }
1995 1994 if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
1996 1995 return true;
1997 1996 }
1998 1997 }
1999 1998 if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != 0) {
2000 1999 if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
2001 2000 dlinfo.dli_fname, buf, buflen, offset) == Decoder::no_error) {
2002 2001 return true;
2003 2002 }
2004 2003 }
2005 2004 if (buf != NULL) buf[0] = '\0';
2006 2005 if (offset != NULL) *offset = -1;
2007 2006 return false;
2008 2007 } else {
2009 2008 // no, only dladdr is available
2010 2009 if (dladdr((void *)addr, &dlinfo)) {
2011 2010 if (buf != NULL) {
2012 2011 if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen))
2013 2012 jio_snprintf(buf, buflen, dlinfo.dli_sname);
2014 2013 }
2015 2014 if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
2016 2015 return true;
2017 2016 } else if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != 0) {
2018 2017 if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
2019 2018 dlinfo.dli_fname, buf, buflen, offset) == Decoder::no_error) {
2020 2019 return true;
2021 2020 }
2022 2021 }
2023 2022 if (buf != NULL) buf[0] = '\0';
2024 2023 if (offset != NULL) *offset = -1;
2025 2024 return false;
2026 2025 }
2027 2026 }
2028 2027
2029 2028 bool os::dll_address_to_library_name(address addr, char* buf,
2030 2029 int buflen, int* offset) {
2031 2030 Dl_info dlinfo;
2032 2031
2033 2032 if (dladdr((void*)addr, &dlinfo)){
2034 2033 if (buf) jio_snprintf(buf, buflen, "%s", dlinfo.dli_fname);
2035 2034 if (offset) *offset = addr - (address)dlinfo.dli_fbase;
2036 2035 return true;
2037 2036 } else {
2038 2037 if (buf) buf[0] = '\0';
2039 2038 if (offset) *offset = -1;
2040 2039 return false;
2041 2040 }
2042 2041 }
2043 2042
2044 2043 // Prints the names and full paths of all opened dynamic libraries
2045 2044 // for current process
2046 2045 void os::print_dll_info(outputStream * st) {
2047 2046 Dl_info dli;
2048 2047 void *handle;
2049 2048 Link_map *map;
2050 2049 Link_map *p;
2051 2050
2052 2051 st->print_cr("Dynamic libraries:"); st->flush();
2053 2052
2054 2053 if (!dladdr(CAST_FROM_FN_PTR(void *, os::print_dll_info), &dli)) {
2055 2054 st->print_cr("Error: Cannot print dynamic libraries.");
2056 2055 return;
2057 2056 }
2058 2057 handle = dlopen(dli.dli_fname, RTLD_LAZY);
2059 2058 if (handle == NULL) {
2060 2059 st->print_cr("Error: Cannot print dynamic libraries.");
2061 2060 return;
2062 2061 }
2063 2062 dlinfo(handle, RTLD_DI_LINKMAP, &map);
2064 2063 if (map == NULL) {
2065 2064 st->print_cr("Error: Cannot print dynamic libraries.");
2066 2065 return;
2067 2066 }
2068 2067
2069 2068 while (map->l_prev != NULL)
2070 2069 map = map->l_prev;
2071 2070
2072 2071 while (map != NULL) {
2073 2072 st->print_cr(PTR_FORMAT " \t%s", map->l_addr, map->l_name);
2074 2073 map = map->l_next;
2075 2074 }
2076 2075
2077 2076 dlclose(handle);
2078 2077 }
2079 2078
2080 2079 // Loads .dll/.so and
2081 2080 // in case of error it checks if .dll/.so was built for the
2082 2081 // same architecture as Hotspot is running on
2083 2082
2084 2083 void * os::dll_load(const char *filename, char *ebuf, int ebuflen)
2085 2084 {
2086 2085 void * result= ::dlopen(filename, RTLD_LAZY);
2087 2086 if (result != NULL) {
2088 2087 // Successful loading
2089 2088 return result;
2090 2089 }
2091 2090
2092 2091 Elf32_Ehdr elf_head;
2093 2092
2094 2093 // Read system error message into ebuf
2095 2094 // It may or may not be overwritten below
2096 2095 ::strncpy(ebuf, ::dlerror(), ebuflen-1);
2097 2096 ebuf[ebuflen-1]='\0';
2098 2097 int diag_msg_max_length=ebuflen-strlen(ebuf);
2099 2098 char* diag_msg_buf=ebuf+strlen(ebuf);
2100 2099
2101 2100 if (diag_msg_max_length==0) {
2102 2101 // No more space in ebuf for additional diagnostics message
2103 2102 return NULL;
2104 2103 }
2105 2104
2106 2105
2107 2106 int file_descriptor= ::open(filename, O_RDONLY | O_NONBLOCK);
2108 2107
2109 2108 if (file_descriptor < 0) {
2110 2109 // Can't open library, report dlerror() message
2111 2110 return NULL;
2112 2111 }
2113 2112
2114 2113 bool failed_to_read_elf_head=
2115 2114 (sizeof(elf_head)!=
2116 2115 (::read(file_descriptor, &elf_head,sizeof(elf_head)))) ;
2117 2116
2118 2117 ::close(file_descriptor);
2119 2118 if (failed_to_read_elf_head) {
2120 2119 // file i/o error - report dlerror() msg
2121 2120 return NULL;
2122 2121 }
2123 2122
2124 2123 typedef struct {
2125 2124 Elf32_Half code; // Actual value as defined in elf.h
2126 2125 Elf32_Half compat_class; // Compatibility of archs at VM's sense
2127 2126 char elf_class; // 32 or 64 bit
2128 2127 char endianess; // MSB or LSB
2129 2128 char* name; // String representation
2130 2129 } arch_t;
2131 2130
2132 2131 static const arch_t arch_array[]={
2133 2132 {EM_386, EM_386, ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
2134 2133 {EM_486, EM_386, ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
2135 2134 {EM_IA_64, EM_IA_64, ELFCLASS64, ELFDATA2LSB, (char*)"IA 64"},
2136 2135 {EM_X86_64, EM_X86_64, ELFCLASS64, ELFDATA2LSB, (char*)"AMD 64"},
2137 2136 {EM_SPARC, EM_SPARC, ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
2138 2137 {EM_SPARC32PLUS, EM_SPARC, ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
2139 2138 {EM_SPARCV9, EM_SPARCV9, ELFCLASS64, ELFDATA2MSB, (char*)"Sparc v9 64"},
2140 2139 {EM_PPC, EM_PPC, ELFCLASS32, ELFDATA2MSB, (char*)"Power PC 32"},
2141 2140 {EM_PPC64, EM_PPC64, ELFCLASS64, ELFDATA2MSB, (char*)"Power PC 64"},
2142 2141 {EM_ARM, EM_ARM, ELFCLASS32, ELFDATA2LSB, (char*)"ARM 32"}
2143 2142 };
2144 2143
2145 2144 #if (defined IA32)
2146 2145 static Elf32_Half running_arch_code=EM_386;
2147 2146 #elif (defined AMD64)
2148 2147 static Elf32_Half running_arch_code=EM_X86_64;
2149 2148 #elif (defined IA64)
2150 2149 static Elf32_Half running_arch_code=EM_IA_64;
2151 2150 #elif (defined __sparc) && (defined _LP64)
2152 2151 static Elf32_Half running_arch_code=EM_SPARCV9;
2153 2152 #elif (defined __sparc) && (!defined _LP64)
2154 2153 static Elf32_Half running_arch_code=EM_SPARC;
2155 2154 #elif (defined __powerpc64__)
2156 2155 static Elf32_Half running_arch_code=EM_PPC64;
2157 2156 #elif (defined __powerpc__)
2158 2157 static Elf32_Half running_arch_code=EM_PPC;
2159 2158 #elif (defined ARM)
2160 2159 static Elf32_Half running_arch_code=EM_ARM;
2161 2160 #else
2162 2161 #error Method os::dll_load requires that one of following is defined:\
2163 2162 IA32, AMD64, IA64, __sparc, __powerpc__, ARM, ARM
2164 2163 #endif
2165 2164
2166 2165 // Identify compatability class for VM's architecture and library's architecture
2167 2166 // Obtain string descriptions for architectures
2168 2167
2169 2168 arch_t lib_arch={elf_head.e_machine,0,elf_head.e_ident[EI_CLASS], elf_head.e_ident[EI_DATA], NULL};
2170 2169 int running_arch_index=-1;
2171 2170
2172 2171 for (unsigned int i=0 ; i < ARRAY_SIZE(arch_array) ; i++ ) {
2173 2172 if (running_arch_code == arch_array[i].code) {
2174 2173 running_arch_index = i;
2175 2174 }
2176 2175 if (lib_arch.code == arch_array[i].code) {
2177 2176 lib_arch.compat_class = arch_array[i].compat_class;
2178 2177 lib_arch.name = arch_array[i].name;
2179 2178 }
2180 2179 }
2181 2180
2182 2181 assert(running_arch_index != -1,
2183 2182 "Didn't find running architecture code (running_arch_code) in arch_array");
2184 2183 if (running_arch_index == -1) {
2185 2184 // Even though running architecture detection failed
2186 2185 // we may still continue with reporting dlerror() message
2187 2186 return NULL;
2188 2187 }
2189 2188
2190 2189 if (lib_arch.endianess != arch_array[running_arch_index].endianess) {
2191 2190 ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: endianness mismatch)");
2192 2191 return NULL;
2193 2192 }
2194 2193
2195 2194 if (lib_arch.elf_class != arch_array[running_arch_index].elf_class) {
2196 2195 ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: architecture word width mismatch)");
2197 2196 return NULL;
2198 2197 }
2199 2198
2200 2199 if (lib_arch.compat_class != arch_array[running_arch_index].compat_class) {
2201 2200 if ( lib_arch.name!=NULL ) {
2202 2201 ::snprintf(diag_msg_buf, diag_msg_max_length-1,
2203 2202 " (Possible cause: can't load %s-bit .so on a %s-bit platform)",
2204 2203 lib_arch.name, arch_array[running_arch_index].name);
2205 2204 } else {
2206 2205 ::snprintf(diag_msg_buf, diag_msg_max_length-1,
2207 2206 " (Possible cause: can't load this .so (machine code=0x%x) on a %s-bit platform)",
2208 2207 lib_arch.code,
2209 2208 arch_array[running_arch_index].name);
2210 2209 }
2211 2210 }
2212 2211
2213 2212 return NULL;
2214 2213 }
2215 2214
2216 2215 void* os::dll_lookup(void* handle, const char* name) {
2217 2216 return dlsym(handle, name);
2218 2217 }
2219 2218
2220 2219 int os::stat(const char *path, struct stat *sbuf) {
2221 2220 char pathbuf[MAX_PATH];
2222 2221 if (strlen(path) > MAX_PATH - 1) {
2223 2222 errno = ENAMETOOLONG;
2224 2223 return -1;
2225 2224 }
2226 2225 os::native_path(strcpy(pathbuf, path));
2227 2226 return ::stat(pathbuf, sbuf);
2228 2227 }
2229 2228
2230 2229 static bool _print_ascii_file(const char* filename, outputStream* st) {
2231 2230 int fd = ::open(filename, O_RDONLY);
2232 2231 if (fd == -1) {
2233 2232 return false;
2234 2233 }
2235 2234
2236 2235 char buf[32];
2237 2236 int bytes;
2238 2237 while ((bytes = ::read(fd, buf, sizeof(buf))) > 0) {
2239 2238 st->print_raw(buf, bytes);
2240 2239 }
2241 2240
2242 2241 ::close(fd);
2243 2242
2244 2243 return true;
2245 2244 }
2246 2245
2247 2246 void os::print_os_info(outputStream* st) {
2248 2247 st->print("OS:");
2249 2248
2250 2249 if (!_print_ascii_file("/etc/release", st)) {
2251 2250 st->print("Solaris");
2252 2251 }
2253 2252 st->cr();
2254 2253
2255 2254 // kernel
2256 2255 st->print("uname:");
2257 2256 struct utsname name;
2258 2257 uname(&name);
2259 2258 st->print(name.sysname); st->print(" ");
2260 2259 st->print(name.release); st->print(" ");
2261 2260 st->print(name.version); st->print(" ");
2262 2261 st->print(name.machine);
2263 2262
2264 2263 // libthread
2265 2264 if (os::Solaris::T2_libthread()) st->print(" (T2 libthread)");
2266 2265 else st->print(" (T1 libthread)");
2267 2266 st->cr();
2268 2267
2269 2268 // rlimit
2270 2269 st->print("rlimit:");
2271 2270 struct rlimit rlim;
2272 2271
2273 2272 st->print(" STACK ");
2274 2273 getrlimit(RLIMIT_STACK, &rlim);
2275 2274 if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
2276 2275 else st->print("%uk", rlim.rlim_cur >> 10);
2277 2276
2278 2277 st->print(", CORE ");
2279 2278 getrlimit(RLIMIT_CORE, &rlim);
2280 2279 if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
2281 2280 else st->print("%uk", rlim.rlim_cur >> 10);
2282 2281
2283 2282 st->print(", NOFILE ");
2284 2283 getrlimit(RLIMIT_NOFILE, &rlim);
2285 2284 if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
2286 2285 else st->print("%d", rlim.rlim_cur);
2287 2286
2288 2287 st->print(", AS ");
2289 2288 getrlimit(RLIMIT_AS, &rlim);
2290 2289 if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
2291 2290 else st->print("%uk", rlim.rlim_cur >> 10);
2292 2291 st->cr();
2293 2292
2294 2293 // load average
2295 2294 st->print("load average:");
2296 2295 double loadavg[3];
2297 2296 os::loadavg(loadavg, 3);
2298 2297 st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]);
2299 2298 st->cr();
2300 2299 }
2301 2300
2302 2301
2303 2302 static bool check_addr0(outputStream* st) {
2304 2303 jboolean status = false;
2305 2304 int fd = ::open("/proc/self/map",O_RDONLY);
2306 2305 if (fd >= 0) {
2307 2306 prmap_t p;
2308 2307 while(::read(fd, &p, sizeof(p)) > 0) {
2309 2308 if (p.pr_vaddr == 0x0) {
2310 2309 st->print("Warning: Address: 0x%x, Size: %dK, ",p.pr_vaddr, p.pr_size/1024, p.pr_mapname);
2311 2310 st->print("Mapped file: %s, ", p.pr_mapname[0] == '\0' ? "None" : p.pr_mapname);
2312 2311 st->print("Access:");
2313 2312 st->print("%s",(p.pr_mflags & MA_READ) ? "r" : "-");
2314 2313 st->print("%s",(p.pr_mflags & MA_WRITE) ? "w" : "-");
2315 2314 st->print("%s",(p.pr_mflags & MA_EXEC) ? "x" : "-");
2316 2315 st->cr();
2317 2316 status = true;
2318 2317 }
2319 2318 ::close(fd);
2320 2319 }
2321 2320 }
2322 2321 return status;
2323 2322 }
2324 2323
2325 2324 void os::pd_print_cpu_info(outputStream* st) {
2326 2325 // Nothing to do for now.
2327 2326 }
2328 2327
2329 2328 void os::print_memory_info(outputStream* st) {
2330 2329 st->print("Memory:");
2331 2330 st->print(" %dk page", os::vm_page_size()>>10);
2332 2331 st->print(", physical " UINT64_FORMAT "k", os::physical_memory()>>10);
2333 2332 st->print("(" UINT64_FORMAT "k free)", os::available_memory() >> 10);
2334 2333 st->cr();
2335 2334 (void) check_addr0(st);
2336 2335 }
2337 2336
2338 2337 // Taken from /usr/include/sys/machsig.h Supposed to be architecture specific
2339 2338 // but they're the same for all the solaris architectures that we support.
2340 2339 const char *ill_names[] = { "ILL0", "ILL_ILLOPC", "ILL_ILLOPN", "ILL_ILLADR",
2341 2340 "ILL_ILLTRP", "ILL_PRVOPC", "ILL_PRVREG",
2342 2341 "ILL_COPROC", "ILL_BADSTK" };
2343 2342
2344 2343 const char *fpe_names[] = { "FPE0", "FPE_INTDIV", "FPE_INTOVF", "FPE_FLTDIV",
2345 2344 "FPE_FLTOVF", "FPE_FLTUND", "FPE_FLTRES",
2346 2345 "FPE_FLTINV", "FPE_FLTSUB" };
2347 2346
2348 2347 const char *segv_names[] = { "SEGV0", "SEGV_MAPERR", "SEGV_ACCERR" };
2349 2348
2350 2349 const char *bus_names[] = { "BUS0", "BUS_ADRALN", "BUS_ADRERR", "BUS_OBJERR" };
2351 2350
2352 2351 void os::print_siginfo(outputStream* st, void* siginfo) {
2353 2352 st->print("siginfo:");
2354 2353
2355 2354 const int buflen = 100;
2356 2355 char buf[buflen];
2357 2356 siginfo_t *si = (siginfo_t*)siginfo;
2358 2357 st->print("si_signo=%s: ", os::exception_name(si->si_signo, buf, buflen));
2359 2358 char *err = strerror(si->si_errno);
2360 2359 if (si->si_errno != 0 && err != NULL) {
2361 2360 st->print("si_errno=%s", err);
2362 2361 } else {
2363 2362 st->print("si_errno=%d", si->si_errno);
2364 2363 }
2365 2364 const int c = si->si_code;
2366 2365 assert(c > 0, "unexpected si_code");
2367 2366 switch (si->si_signo) {
2368 2367 case SIGILL:
2369 2368 st->print(", si_code=%d (%s)", c, c > 8 ? "" : ill_names[c]);
2370 2369 st->print(", si_addr=" PTR_FORMAT, si->si_addr);
2371 2370 break;
2372 2371 case SIGFPE:
2373 2372 st->print(", si_code=%d (%s)", c, c > 9 ? "" : fpe_names[c]);
2374 2373 st->print(", si_addr=" PTR_FORMAT, si->si_addr);
2375 2374 break;
2376 2375 case SIGSEGV:
2377 2376 st->print(", si_code=%d (%s)", c, c > 2 ? "" : segv_names[c]);
2378 2377 st->print(", si_addr=" PTR_FORMAT, si->si_addr);
2379 2378 break;
2380 2379 case SIGBUS:
2381 2380 st->print(", si_code=%d (%s)", c, c > 3 ? "" : bus_names[c]);
2382 2381 st->print(", si_addr=" PTR_FORMAT, si->si_addr);
2383 2382 break;
2384 2383 default:
2385 2384 st->print(", si_code=%d", si->si_code);
2386 2385 // no si_addr
2387 2386 }
2388 2387
2389 2388 if ((si->si_signo == SIGBUS || si->si_signo == SIGSEGV) &&
2390 2389 UseSharedSpaces) {
2391 2390 FileMapInfo* mapinfo = FileMapInfo::current_info();
2392 2391 if (mapinfo->is_in_shared_space(si->si_addr)) {
2393 2392 st->print("\n\nError accessing class data sharing archive." \
2394 2393 " Mapped file inaccessible during execution, " \
2395 2394 " possible disk/network problem.");
2396 2395 }
2397 2396 }
2398 2397 st->cr();
2399 2398 }
2400 2399
2401 2400 // Moved from whole group, because we need them here for diagnostic
2402 2401 // prints.
2403 2402 #define OLDMAXSIGNUM 32
2404 2403 static int Maxsignum = 0;
2405 2404 static int *ourSigFlags = NULL;
2406 2405
2407 2406 extern "C" void sigINTRHandler(int, siginfo_t*, void*);
2408 2407
2409 2408 int os::Solaris::get_our_sigflags(int sig) {
2410 2409 assert(ourSigFlags!=NULL, "signal data structure not initialized");
2411 2410 assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range");
2412 2411 return ourSigFlags[sig];
2413 2412 }
2414 2413
2415 2414 void os::Solaris::set_our_sigflags(int sig, int flags) {
2416 2415 assert(ourSigFlags!=NULL, "signal data structure not initialized");
2417 2416 assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range");
2418 2417 ourSigFlags[sig] = flags;
2419 2418 }
2420 2419
2421 2420
2422 2421 static const char* get_signal_handler_name(address handler,
2423 2422 char* buf, int buflen) {
2424 2423 int offset;
2425 2424 bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset);
2426 2425 if (found) {
2427 2426 // skip directory names
2428 2427 const char *p1, *p2;
2429 2428 p1 = buf;
2430 2429 size_t len = strlen(os::file_separator());
2431 2430 while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len;
2432 2431 jio_snprintf(buf, buflen, "%s+0x%x", p1, offset);
2433 2432 } else {
2434 2433 jio_snprintf(buf, buflen, PTR_FORMAT, handler);
2435 2434 }
2436 2435 return buf;
2437 2436 }
2438 2437
2439 2438 static void print_signal_handler(outputStream* st, int sig,
2440 2439 char* buf, size_t buflen) {
2441 2440 struct sigaction sa;
2442 2441
2443 2442 sigaction(sig, NULL, &sa);
2444 2443
2445 2444 st->print("%s: ", os::exception_name(sig, buf, buflen));
2446 2445
2447 2446 address handler = (sa.sa_flags & SA_SIGINFO)
2448 2447 ? CAST_FROM_FN_PTR(address, sa.sa_sigaction)
2449 2448 : CAST_FROM_FN_PTR(address, sa.sa_handler);
2450 2449
2451 2450 if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) {
2452 2451 st->print("SIG_DFL");
2453 2452 } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) {
2454 2453 st->print("SIG_IGN");
2455 2454 } else {
2456 2455 st->print("[%s]", get_signal_handler_name(handler, buf, buflen));
2457 2456 }
2458 2457
2459 2458 st->print(", sa_mask[0]=" PTR32_FORMAT, *(uint32_t*)&sa.sa_mask);
2460 2459
2461 2460 address rh = VMError::get_resetted_sighandler(sig);
2462 2461 // May be, handler was resetted by VMError?
2463 2462 if(rh != NULL) {
2464 2463 handler = rh;
2465 2464 sa.sa_flags = VMError::get_resetted_sigflags(sig);
2466 2465 }
2467 2466
2468 2467 st->print(", sa_flags=" PTR32_FORMAT, sa.sa_flags);
2469 2468
2470 2469 // Check: is it our handler?
2471 2470 if(handler == CAST_FROM_FN_PTR(address, signalHandler) ||
2472 2471 handler == CAST_FROM_FN_PTR(address, sigINTRHandler)) {
2473 2472 // It is our signal handler
2474 2473 // check for flags
2475 2474 if(sa.sa_flags != os::Solaris::get_our_sigflags(sig)) {
2476 2475 st->print(
2477 2476 ", flags was changed from " PTR32_FORMAT ", consider using jsig library",
2478 2477 os::Solaris::get_our_sigflags(sig));
2479 2478 }
2480 2479 }
2481 2480 st->cr();
2482 2481 }
2483 2482
2484 2483 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
2485 2484 st->print_cr("Signal Handlers:");
2486 2485 print_signal_handler(st, SIGSEGV, buf, buflen);
2487 2486 print_signal_handler(st, SIGBUS , buf, buflen);
2488 2487 print_signal_handler(st, SIGFPE , buf, buflen);
2489 2488 print_signal_handler(st, SIGPIPE, buf, buflen);
2490 2489 print_signal_handler(st, SIGXFSZ, buf, buflen);
2491 2490 print_signal_handler(st, SIGILL , buf, buflen);
2492 2491 print_signal_handler(st, INTERRUPT_SIGNAL, buf, buflen);
2493 2492 print_signal_handler(st, ASYNC_SIGNAL, buf, buflen);
2494 2493 print_signal_handler(st, BREAK_SIGNAL, buf, buflen);
2495 2494 print_signal_handler(st, SHUTDOWN1_SIGNAL , buf, buflen);
2496 2495 print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
2497 2496 print_signal_handler(st, SHUTDOWN3_SIGNAL, buf, buflen);
2498 2497 print_signal_handler(st, os::Solaris::SIGinterrupt(), buf, buflen);
2499 2498 print_signal_handler(st, os::Solaris::SIGasync(), buf, buflen);
2500 2499 }
2501 2500
2502 2501 static char saved_jvm_path[MAXPATHLEN] = { 0 };
2503 2502
2504 2503 // Find the full path to the current module, libjvm.so or libjvm_g.so
2505 2504 void os::jvm_path(char *buf, jint buflen) {
2506 2505 // Error checking.
2507 2506 if (buflen < MAXPATHLEN) {
2508 2507 assert(false, "must use a large-enough buffer");
2509 2508 buf[0] = '\0';
2510 2509 return;
2511 2510 }
2512 2511 // Lazy resolve the path to current module.
2513 2512 if (saved_jvm_path[0] != 0) {
2514 2513 strcpy(buf, saved_jvm_path);
2515 2514 return;
2516 2515 }
2517 2516
2518 2517 Dl_info dlinfo;
2519 2518 int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
2520 2519 assert(ret != 0, "cannot locate libjvm");
2521 2520 realpath((char *)dlinfo.dli_fname, buf);
2522 2521
2523 2522 if (Arguments::created_by_gamma_launcher()) {
2524 2523 // Support for the gamma launcher. Typical value for buf is
2525 2524 // "<JAVA_HOME>/jre/lib/<arch>/<vmtype>/libjvm.so". If "/jre/lib/" appears at
2526 2525 // the right place in the string, then assume we are installed in a JDK and
2527 2526 // we're done. Otherwise, check for a JAVA_HOME environment variable and fix
2528 2527 // up the path so it looks like libjvm.so is installed there (append a
2529 2528 // fake suffix hotspot/libjvm.so).
2530 2529 const char *p = buf + strlen(buf) - 1;
2531 2530 for (int count = 0; p > buf && count < 5; ++count) {
2532 2531 for (--p; p > buf && *p != '/'; --p)
2533 2532 /* empty */ ;
2534 2533 }
2535 2534
2536 2535 if (strncmp(p, "/jre/lib/", 9) != 0) {
2537 2536 // Look for JAVA_HOME in the environment.
2538 2537 char* java_home_var = ::getenv("JAVA_HOME");
2539 2538 if (java_home_var != NULL && java_home_var[0] != 0) {
2540 2539 char cpu_arch[12];
2541 2540 char* jrelib_p;
2542 2541 int len;
2543 2542 sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch));
2544 2543 #ifdef _LP64
2545 2544 // If we are on sparc running a 64-bit vm, look in jre/lib/sparcv9.
2546 2545 if (strcmp(cpu_arch, "sparc") == 0) {
2547 2546 strcat(cpu_arch, "v9");
2548 2547 } else if (strcmp(cpu_arch, "i386") == 0) {
2549 2548 strcpy(cpu_arch, "amd64");
2550 2549 }
2551 2550 #endif
2552 2551 // Check the current module name "libjvm.so" or "libjvm_g.so".
2553 2552 p = strrchr(buf, '/');
2554 2553 assert(strstr(p, "/libjvm") == p, "invalid library name");
2555 2554 p = strstr(p, "_g") ? "_g" : "";
2556 2555
2557 2556 realpath(java_home_var, buf);
2558 2557 // determine if this is a legacy image or modules image
2559 2558 // modules image doesn't have "jre" subdirectory
2560 2559 len = strlen(buf);
2561 2560 jrelib_p = buf + len;
2562 2561 snprintf(jrelib_p, buflen-len, "/jre/lib/%s", cpu_arch);
2563 2562 if (0 != access(buf, F_OK)) {
2564 2563 snprintf(jrelib_p, buflen-len, "/lib/%s", cpu_arch);
2565 2564 }
2566 2565
2567 2566 if (0 == access(buf, F_OK)) {
2568 2567 // Use current module name "libjvm[_g].so" instead of
2569 2568 // "libjvm"debug_only("_g")".so" since for fastdebug version
2570 2569 // we should have "libjvm.so" but debug_only("_g") adds "_g"!
2571 2570 len = strlen(buf);
2572 2571 snprintf(buf + len, buflen-len, "/hotspot/libjvm%s.so", p);
2573 2572 } else {
2574 2573 // Go back to path of .so
2575 2574 realpath((char *)dlinfo.dli_fname, buf);
2576 2575 }
2577 2576 }
2578 2577 }
2579 2578 }
2580 2579
2581 2580 strcpy(saved_jvm_path, buf);
2582 2581 }
2583 2582
2584 2583
2585 2584 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
2586 2585 // no prefix required, not even "_"
2587 2586 }
2588 2587
2589 2588
2590 2589 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
2591 2590 // no suffix required
2592 2591 }
2593 2592
2594 2593 // This method is a copy of JDK's sysGetLastErrorString
2595 2594 // from src/solaris/hpi/src/system_md.c
2596 2595
2597 2596 size_t os::lasterror(char *buf, size_t len) {
2598 2597
2599 2598 if (errno == 0) return 0;
2600 2599
2601 2600 const char *s = ::strerror(errno);
2602 2601 size_t n = ::strlen(s);
2603 2602 if (n >= len) {
2604 2603 n = len - 1;
2605 2604 }
2606 2605 ::strncpy(buf, s, n);
2607 2606 buf[n] = '\0';
2608 2607 return n;
2609 2608 }
2610 2609
2611 2610
2612 2611 // sun.misc.Signal
2613 2612
2614 2613 extern "C" {
2615 2614 static void UserHandler(int sig, void *siginfo, void *context) {
2616 2615 // Ctrl-C is pressed during error reporting, likely because the error
2617 2616 // handler fails to abort. Let VM die immediately.
2618 2617 if (sig == SIGINT && is_error_reported()) {
2619 2618 os::die();
2620 2619 }
2621 2620
2622 2621 os::signal_notify(sig);
2623 2622 // We do not need to reinstate the signal handler each time...
2624 2623 }
2625 2624 }
2626 2625
2627 2626 void* os::user_handler() {
2628 2627 return CAST_FROM_FN_PTR(void*, UserHandler);
2629 2628 }
2630 2629
2631 2630 extern "C" {
2632 2631 typedef void (*sa_handler_t)(int);
2633 2632 typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
2634 2633 }
2635 2634
2636 2635 void* os::signal(int signal_number, void* handler) {
2637 2636 struct sigaction sigAct, oldSigAct;
2638 2637 sigfillset(&(sigAct.sa_mask));
2639 2638 sigAct.sa_flags = SA_RESTART & ~SA_RESETHAND;
2640 2639 sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler);
2641 2640
2642 2641 if (sigaction(signal_number, &sigAct, &oldSigAct))
2643 2642 // -1 means registration failed
2644 2643 return (void *)-1;
2645 2644
2646 2645 return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler);
2647 2646 }
2648 2647
2649 2648 void os::signal_raise(int signal_number) {
2650 2649 raise(signal_number);
2651 2650 }
2652 2651
2653 2652 /*
2654 2653 * The following code is moved from os.cpp for making this
2655 2654 * code platform specific, which it is by its very nature.
2656 2655 */
2657 2656
2658 2657 // a counter for each possible signal value
2659 2658 static int Sigexit = 0;
2660 2659 static int Maxlibjsigsigs;
2661 2660 static jint *pending_signals = NULL;
2662 2661 static int *preinstalled_sigs = NULL;
2663 2662 static struct sigaction *chainedsigactions = NULL;
2664 2663 static sema_t sig_sem;
2665 2664 typedef int (*version_getting_t)();
2666 2665 version_getting_t os::Solaris::get_libjsig_version = NULL;
2667 2666 static int libjsigversion = NULL;
2668 2667
2669 2668 int os::sigexitnum_pd() {
2670 2669 assert(Sigexit > 0, "signal memory not yet initialized");
2671 2670 return Sigexit;
2672 2671 }
2673 2672
2674 2673 void os::Solaris::init_signal_mem() {
2675 2674 // Initialize signal structures
2676 2675 Maxsignum = SIGRTMAX;
2677 2676 Sigexit = Maxsignum+1;
2678 2677 assert(Maxsignum >0, "Unable to obtain max signal number");
2679 2678
2680 2679 Maxlibjsigsigs = Maxsignum;
2681 2680
2682 2681 // pending_signals has one int per signal
2683 2682 // The additional signal is for SIGEXIT - exit signal to signal_thread
2684 2683 pending_signals = (jint *)os::malloc(sizeof(jint) * (Sigexit+1));
2685 2684 memset(pending_signals, 0, (sizeof(jint) * (Sigexit+1)));
2686 2685
2687 2686 if (UseSignalChaining) {
2688 2687 chainedsigactions = (struct sigaction *)malloc(sizeof(struct sigaction)
2689 2688 * (Maxsignum + 1));
2690 2689 memset(chainedsigactions, 0, (sizeof(struct sigaction) * (Maxsignum + 1)));
2691 2690 preinstalled_sigs = (int *)os::malloc(sizeof(int) * (Maxsignum + 1));
2692 2691 memset(preinstalled_sigs, 0, (sizeof(int) * (Maxsignum + 1)));
2693 2692 }
2694 2693 ourSigFlags = (int*)malloc(sizeof(int) * (Maxsignum + 1 ));
2695 2694 memset(ourSigFlags, 0, sizeof(int) * (Maxsignum + 1));
2696 2695 }
2697 2696
2698 2697 void os::signal_init_pd() {
2699 2698 int ret;
2700 2699
2701 2700 ret = ::sema_init(&sig_sem, 0, NULL, NULL);
2702 2701 assert(ret == 0, "sema_init() failed");
2703 2702 }
2704 2703
2705 2704 void os::signal_notify(int signal_number) {
2706 2705 int ret;
2707 2706
2708 2707 Atomic::inc(&pending_signals[signal_number]);
2709 2708 ret = ::sema_post(&sig_sem);
2710 2709 assert(ret == 0, "sema_post() failed");
2711 2710 }
2712 2711
2713 2712 static int check_pending_signals(bool wait_for_signal) {
2714 2713 int ret;
2715 2714 while (true) {
2716 2715 for (int i = 0; i < Sigexit + 1; i++) {
2717 2716 jint n = pending_signals[i];
2718 2717 if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
2719 2718 return i;
2720 2719 }
2721 2720 }
2722 2721 if (!wait_for_signal) {
2723 2722 return -1;
2724 2723 }
2725 2724 JavaThread *thread = JavaThread::current();
2726 2725 ThreadBlockInVM tbivm(thread);
2727 2726
2728 2727 bool threadIsSuspended;
2729 2728 do {
2730 2729 thread->set_suspend_equivalent();
2731 2730 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
2732 2731 while((ret = ::sema_wait(&sig_sem)) == EINTR)
2733 2732 ;
2734 2733 assert(ret == 0, "sema_wait() failed");
2735 2734
2736 2735 // were we externally suspended while we were waiting?
2737 2736 threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
2738 2737 if (threadIsSuspended) {
2739 2738 //
2740 2739 // The semaphore has been incremented, but while we were waiting
2741 2740 // another thread suspended us. We don't want to continue running
2742 2741 // while suspended because that would surprise the thread that
2743 2742 // suspended us.
2744 2743 //
2745 2744 ret = ::sema_post(&sig_sem);
2746 2745 assert(ret == 0, "sema_post() failed");
2747 2746
2748 2747 thread->java_suspend_self();
2749 2748 }
2750 2749 } while (threadIsSuspended);
2751 2750 }
2752 2751 }
2753 2752
2754 2753 int os::signal_lookup() {
2755 2754 return check_pending_signals(false);
2756 2755 }
2757 2756
2758 2757 int os::signal_wait() {
2759 2758 return check_pending_signals(true);
2760 2759 }
2761 2760
2762 2761 ////////////////////////////////////////////////////////////////////////////////
2763 2762 // Virtual Memory
2764 2763
2765 2764 static int page_size = -1;
2766 2765
2767 2766 // The mmap MAP_ALIGN flag is supported on Solaris 9 and later. init_2() will
2768 2767 // clear this var if support is not available.
2769 2768 static bool has_map_align = true;
2770 2769
2771 2770 int os::vm_page_size() {
2772 2771 assert(page_size != -1, "must call os::init");
2773 2772 return page_size;
2774 2773 }
2775 2774
2776 2775 // Solaris allocates memory by pages.
2777 2776 int os::vm_allocation_granularity() {
2778 2777 assert(page_size != -1, "must call os::init");
2779 2778 return page_size;
2780 2779 }
2781 2780
2782 2781 bool os::commit_memory(char* addr, size_t bytes, bool exec) {
2783 2782 int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
2784 2783 size_t size = bytes;
2785 2784 char *res = Solaris::mmap_chunk(addr, size, MAP_PRIVATE|MAP_FIXED, prot);
2786 2785 if (res != NULL) {
2787 2786 if (UseNUMAInterleaving) {
2788 2787 numa_make_global(addr, bytes);
2789 2788 }
2790 2789 return true;
2791 2790 }
2792 2791 return false;
2793 2792 }
2794 2793
2795 2794 bool os::commit_memory(char* addr, size_t bytes, size_t alignment_hint,
2796 2795 bool exec) {
2797 2796 if (commit_memory(addr, bytes, exec)) {
2798 2797 if (UseMPSS && alignment_hint > (size_t)vm_page_size()) {
2799 2798 // If the large page size has been set and the VM
2800 2799 // is using large pages, use the large page size
2801 2800 // if it is smaller than the alignment hint. This is
2802 2801 // a case where the VM wants to use a larger alignment size
2803 2802 // for its own reasons but still want to use large pages
2804 2803 // (which is what matters to setting the mpss range.
2805 2804 size_t page_size = 0;
2806 2805 if (large_page_size() < alignment_hint) {
2807 2806 assert(UseLargePages, "Expected to be here for large page use only");
2808 2807 page_size = large_page_size();
2809 2808 } else {
2810 2809 // If the alignment hint is less than the large page
2811 2810 // size, the VM wants a particular alignment (thus the hint)
2812 2811 // for internal reasons. Try to set the mpss range using
2813 2812 // the alignment_hint.
2814 2813 page_size = alignment_hint;
2815 2814 }
2816 2815 // Since this is a hint, ignore any failures.
2817 2816 (void)Solaris::set_mpss_range(addr, bytes, page_size);
2818 2817 }
2819 2818 return true;
2820 2819 }
2821 2820 return false;
2822 2821 }
2823 2822
2824 2823 // Uncommit the pages in a specified region.
2825 2824 void os::free_memory(char* addr, size_t bytes) {
2826 2825 if (madvise(addr, bytes, MADV_FREE) < 0) {
2827 2826 debug_only(warning("MADV_FREE failed."));
2828 2827 return;
2829 2828 }
2830 2829 }
2831 2830
2832 2831 bool os::create_stack_guard_pages(char* addr, size_t size) {
2833 2832 return os::commit_memory(addr, size);
2834 2833 }
2835 2834
2836 2835 bool os::remove_stack_guard_pages(char* addr, size_t size) {
2837 2836 return os::uncommit_memory(addr, size);
2838 2837 }
2839 2838
2840 2839 // Change the page size in a given range.
2841 2840 void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
2842 2841 assert((intptr_t)addr % alignment_hint == 0, "Address should be aligned.");
2843 2842 assert((intptr_t)(addr + bytes) % alignment_hint == 0, "End should be aligned.");
2844 2843 if (UseLargePages && UseMPSS) {
2845 2844 Solaris::set_mpss_range(addr, bytes, alignment_hint);
2846 2845 }
2847 2846 }
2848 2847
2849 2848 // Tell the OS to make the range local to the first-touching LWP
2850 2849 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
2851 2850 assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned.");
2852 2851 if (madvise(addr, bytes, MADV_ACCESS_LWP) < 0) {
2853 2852 debug_only(warning("MADV_ACCESS_LWP failed."));
2854 2853 }
2855 2854 }
2856 2855
2857 2856 // Tell the OS that this range would be accessed from different LWPs.
2858 2857 void os::numa_make_global(char *addr, size_t bytes) {
2859 2858 assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned.");
2860 2859 if (madvise(addr, bytes, MADV_ACCESS_MANY) < 0) {
2861 2860 debug_only(warning("MADV_ACCESS_MANY failed."));
2862 2861 }
2863 2862 }
2864 2863
2865 2864 // Get the number of the locality groups.
2866 2865 size_t os::numa_get_groups_num() {
2867 2866 size_t n = Solaris::lgrp_nlgrps(Solaris::lgrp_cookie());
2868 2867 return n != -1 ? n : 1;
2869 2868 }
2870 2869
2871 2870 // Get a list of leaf locality groups. A leaf lgroup is group that
2872 2871 // doesn't have any children. Typical leaf group is a CPU or a CPU/memory
2873 2872 // board. An LWP is assigned to one of these groups upon creation.
2874 2873 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
2875 2874 if ((ids[0] = Solaris::lgrp_root(Solaris::lgrp_cookie())) == -1) {
2876 2875 ids[0] = 0;
2877 2876 return 1;
2878 2877 }
2879 2878 int result_size = 0, top = 1, bottom = 0, cur = 0;
2880 2879 for (int k = 0; k < size; k++) {
2881 2880 int r = Solaris::lgrp_children(Solaris::lgrp_cookie(), ids[cur],
2882 2881 (Solaris::lgrp_id_t*)&ids[top], size - top);
2883 2882 if (r == -1) {
2884 2883 ids[0] = 0;
2885 2884 return 1;
2886 2885 }
2887 2886 if (!r) {
2888 2887 // That's a leaf node.
2889 2888 assert (bottom <= cur, "Sanity check");
2890 2889 // Check if the node has memory
2891 2890 if (Solaris::lgrp_resources(Solaris::lgrp_cookie(), ids[cur],
2892 2891 NULL, 0, LGRP_RSRC_MEM) > 0) {
2893 2892 ids[bottom++] = ids[cur];
2894 2893 }
2895 2894 }
2896 2895 top += r;
2897 2896 cur++;
2898 2897 }
2899 2898 if (bottom == 0) {
2900 2899 // Handle a situation, when the OS reports no memory available.
2901 2900 // Assume UMA architecture.
2902 2901 ids[0] = 0;
2903 2902 return 1;
2904 2903 }
2905 2904 return bottom;
2906 2905 }
2907 2906
2908 2907 // Detect the topology change. Typically happens during CPU plugging-unplugging.
2909 2908 bool os::numa_topology_changed() {
2910 2909 int is_stale = Solaris::lgrp_cookie_stale(Solaris::lgrp_cookie());
2911 2910 if (is_stale != -1 && is_stale) {
2912 2911 Solaris::lgrp_fini(Solaris::lgrp_cookie());
2913 2912 Solaris::lgrp_cookie_t c = Solaris::lgrp_init(Solaris::LGRP_VIEW_CALLER);
2914 2913 assert(c != 0, "Failure to initialize LGRP API");
2915 2914 Solaris::set_lgrp_cookie(c);
2916 2915 return true;
2917 2916 }
2918 2917 return false;
2919 2918 }
2920 2919
2921 2920 // Get the group id of the current LWP.
2922 2921 int os::numa_get_group_id() {
2923 2922 int lgrp_id = Solaris::lgrp_home(P_LWPID, P_MYID);
2924 2923 if (lgrp_id == -1) {
2925 2924 return 0;
2926 2925 }
2927 2926 const int size = os::numa_get_groups_num();
2928 2927 int *ids = (int*)alloca(size * sizeof(int));
2929 2928
2930 2929 // Get the ids of all lgroups with memory; r is the count.
2931 2930 int r = Solaris::lgrp_resources(Solaris::lgrp_cookie(), lgrp_id,
2932 2931 (Solaris::lgrp_id_t*)ids, size, LGRP_RSRC_MEM);
2933 2932 if (r <= 0) {
2934 2933 return 0;
2935 2934 }
2936 2935 return ids[os::random() % r];
2937 2936 }
2938 2937
2939 2938 // Request information about the page.
2940 2939 bool os::get_page_info(char *start, page_info* info) {
2941 2940 const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE };
2942 2941 uint64_t addr = (uintptr_t)start;
2943 2942 uint64_t outdata[2];
2944 2943 uint_t validity = 0;
2945 2944
2946 2945 if (os::Solaris::meminfo(&addr, 1, info_types, 2, outdata, &validity) < 0) {
2947 2946 return false;
2948 2947 }
2949 2948
2950 2949 info->size = 0;
2951 2950 info->lgrp_id = -1;
2952 2951
2953 2952 if ((validity & 1) != 0) {
2954 2953 if ((validity & 2) != 0) {
2955 2954 info->lgrp_id = outdata[0];
2956 2955 }
2957 2956 if ((validity & 4) != 0) {
2958 2957 info->size = outdata[1];
2959 2958 }
2960 2959 return true;
2961 2960 }
2962 2961 return false;
2963 2962 }
2964 2963
2965 2964 // Scan the pages from start to end until a page different than
2966 2965 // the one described in the info parameter is encountered.
2967 2966 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
2968 2967 const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE };
2969 2968 const size_t types = sizeof(info_types) / sizeof(info_types[0]);
2970 2969 uint64_t addrs[MAX_MEMINFO_CNT], outdata[types * MAX_MEMINFO_CNT];
2971 2970 uint_t validity[MAX_MEMINFO_CNT];
2972 2971
2973 2972 size_t page_size = MAX2((size_t)os::vm_page_size(), page_expected->size);
2974 2973 uint64_t p = (uint64_t)start;
2975 2974 while (p < (uint64_t)end) {
2976 2975 addrs[0] = p;
2977 2976 size_t addrs_count = 1;
2978 2977 while (addrs_count < MAX_MEMINFO_CNT && addrs[addrs_count - 1] < (uint64_t)end) {
2979 2978 addrs[addrs_count] = addrs[addrs_count - 1] + page_size;
2980 2979 addrs_count++;
2981 2980 }
2982 2981
2983 2982 if (os::Solaris::meminfo(addrs, addrs_count, info_types, types, outdata, validity) < 0) {
2984 2983 return NULL;
2985 2984 }
2986 2985
2987 2986 size_t i = 0;
2988 2987 for (; i < addrs_count; i++) {
2989 2988 if ((validity[i] & 1) != 0) {
2990 2989 if ((validity[i] & 4) != 0) {
2991 2990 if (outdata[types * i + 1] != page_expected->size) {
2992 2991 break;
2993 2992 }
2994 2993 } else
2995 2994 if (page_expected->size != 0) {
2996 2995 break;
2997 2996 }
2998 2997
2999 2998 if ((validity[i] & 2) != 0 && page_expected->lgrp_id > 0) {
3000 2999 if (outdata[types * i] != page_expected->lgrp_id) {
3001 3000 break;
3002 3001 }
3003 3002 }
3004 3003 } else {
3005 3004 return NULL;
3006 3005 }
3007 3006 }
3008 3007
3009 3008 if (i != addrs_count) {
3010 3009 if ((validity[i] & 2) != 0) {
3011 3010 page_found->lgrp_id = outdata[types * i];
3012 3011 } else {
3013 3012 page_found->lgrp_id = -1;
3014 3013 }
3015 3014 if ((validity[i] & 4) != 0) {
3016 3015 page_found->size = outdata[types * i + 1];
3017 3016 } else {
3018 3017 page_found->size = 0;
3019 3018 }
3020 3019 return (char*)addrs[i];
3021 3020 }
3022 3021
3023 3022 p = addrs[addrs_count - 1] + page_size;
3024 3023 }
3025 3024 return end;
3026 3025 }
3027 3026
3028 3027 bool os::uncommit_memory(char* addr, size_t bytes) {
3029 3028 size_t size = bytes;
3030 3029 // Map uncommitted pages PROT_NONE so we fail early if we touch an
3031 3030 // uncommitted page. Otherwise, the read/write might succeed if we
3032 3031 // have enough swap space to back the physical page.
3033 3032 return
3034 3033 NULL != Solaris::mmap_chunk(addr, size,
3035 3034 MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE,
3036 3035 PROT_NONE);
3037 3036 }
3038 3037
3039 3038 char* os::Solaris::mmap_chunk(char *addr, size_t size, int flags, int prot) {
3040 3039 char *b = (char *)mmap(addr, size, prot, flags, os::Solaris::_dev_zero_fd, 0);
3041 3040
3042 3041 if (b == MAP_FAILED) {
3043 3042 return NULL;
3044 3043 }
3045 3044 return b;
3046 3045 }
3047 3046
3048 3047 char* os::Solaris::anon_mmap(char* requested_addr, size_t bytes, size_t alignment_hint, bool fixed) {
3049 3048 char* addr = requested_addr;
3050 3049 int flags = MAP_PRIVATE | MAP_NORESERVE;
3051 3050
3052 3051 assert(!(fixed && (alignment_hint > 0)), "alignment hint meaningless with fixed mmap");
3053 3052
3054 3053 if (fixed) {
3055 3054 flags |= MAP_FIXED;
3056 3055 } else if (has_map_align && (alignment_hint > (size_t) vm_page_size())) {
3057 3056 flags |= MAP_ALIGN;
3058 3057 addr = (char*) alignment_hint;
3059 3058 }
3060 3059
3061 3060 // Map uncommitted pages PROT_NONE so we fail early if we touch an
3062 3061 // uncommitted page. Otherwise, the read/write might succeed if we
3063 3062 // have enough swap space to back the physical page.
3064 3063 return mmap_chunk(addr, bytes, flags, PROT_NONE);
3065 3064 }
3066 3065
3067 3066 char* os::reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
3068 3067 char* addr = Solaris::anon_mmap(requested_addr, bytes, alignment_hint, (requested_addr != NULL));
3069 3068
3070 3069 guarantee(requested_addr == NULL || requested_addr == addr,
3071 3070 "OS failed to return requested mmap address.");
3072 3071 return addr;
3073 3072 }
3074 3073
3075 3074 // Reserve memory at an arbitrary address, only if that area is
3076 3075 // available (and not reserved for something else).
3077 3076
3078 3077 char* os::attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
3079 3078 const int max_tries = 10;
3080 3079 char* base[max_tries];
3081 3080 size_t size[max_tries];
3082 3081
3083 3082 // Solaris adds a gap between mmap'ed regions. The size of the gap
3084 3083 // is dependent on the requested size and the MMU. Our initial gap
3085 3084 // value here is just a guess and will be corrected later.
3086 3085 bool had_top_overlap = false;
3087 3086 bool have_adjusted_gap = false;
3088 3087 size_t gap = 0x400000;
3089 3088
3090 3089 // Assert only that the size is a multiple of the page size, since
3091 3090 // that's all that mmap requires, and since that's all we really know
3092 3091 // about at this low abstraction level. If we need higher alignment,
3093 3092 // we can either pass an alignment to this method or verify alignment
3094 3093 // in one of the methods further up the call chain. See bug 5044738.
3095 3094 assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block");
3096 3095
3097 3096 // Since snv_84, Solaris attempts to honor the address hint - see 5003415.
3098 3097 // Give it a try, if the kernel honors the hint we can return immediately.
3099 3098 char* addr = Solaris::anon_mmap(requested_addr, bytes, 0, false);
3100 3099 volatile int err = errno;
3101 3100 if (addr == requested_addr) {
3102 3101 return addr;
3103 3102 } else if (addr != NULL) {
3104 3103 unmap_memory(addr, bytes);
3105 3104 }
3106 3105
3107 3106 if (PrintMiscellaneous && Verbose) {
3108 3107 char buf[256];
3109 3108 buf[0] = '\0';
3110 3109 if (addr == NULL) {
3111 3110 jio_snprintf(buf, sizeof(buf), ": %s", strerror(err));
3112 3111 }
3113 3112 warning("attempt_reserve_memory_at: couldn't reserve " SIZE_FORMAT " bytes at "
3114 3113 PTR_FORMAT ": reserve_memory_helper returned " PTR_FORMAT
3115 3114 "%s", bytes, requested_addr, addr, buf);
3116 3115 }
3117 3116
3118 3117 // Address hint method didn't work. Fall back to the old method.
3119 3118 // In theory, once SNV becomes our oldest supported platform, this
3120 3119 // code will no longer be needed.
3121 3120 //
3122 3121 // Repeatedly allocate blocks until the block is allocated at the
3123 3122 // right spot. Give up after max_tries.
3124 3123 int i;
3125 3124 for (i = 0; i < max_tries; ++i) {
3126 3125 base[i] = reserve_memory(bytes);
3127 3126
3128 3127 if (base[i] != NULL) {
3129 3128 // Is this the block we wanted?
3130 3129 if (base[i] == requested_addr) {
3131 3130 size[i] = bytes;
3132 3131 break;
3133 3132 }
3134 3133
3135 3134 // check that the gap value is right
3136 3135 if (had_top_overlap && !have_adjusted_gap) {
3137 3136 size_t actual_gap = base[i-1] - base[i] - bytes;
3138 3137 if (gap != actual_gap) {
3139 3138 // adjust the gap value and retry the last 2 allocations
3140 3139 assert(i > 0, "gap adjustment code problem");
3141 3140 have_adjusted_gap = true; // adjust the gap only once, just in case
3142 3141 gap = actual_gap;
3143 3142 if (PrintMiscellaneous && Verbose) {
3144 3143 warning("attempt_reserve_memory_at: adjusted gap to 0x%lx", gap);
3145 3144 }
3146 3145 unmap_memory(base[i], bytes);
3147 3146 unmap_memory(base[i-1], size[i-1]);
3148 3147 i-=2;
3149 3148 continue;
3150 3149 }
3151 3150 }
3152 3151
3153 3152 // Does this overlap the block we wanted? Give back the overlapped
3154 3153 // parts and try again.
3155 3154 //
3156 3155 // There is still a bug in this code: if top_overlap == bytes,
3157 3156 // the overlap is offset from requested region by the value of gap.
3158 3157 // In this case giving back the overlapped part will not work,
3159 3158 // because we'll give back the entire block at base[i] and
3160 3159 // therefore the subsequent allocation will not generate a new gap.
3161 3160 // This could be fixed with a new algorithm that used larger
3162 3161 // or variable size chunks to find the requested region -
3163 3162 // but such a change would introduce additional complications.
3164 3163 // It's rare enough that the planets align for this bug,
3165 3164 // so we'll just wait for a fix for 6204603/5003415 which
3166 3165 // will provide a mmap flag to allow us to avoid this business.
3167 3166
3168 3167 size_t top_overlap = requested_addr + (bytes + gap) - base[i];
3169 3168 if (top_overlap >= 0 && top_overlap < bytes) {
3170 3169 had_top_overlap = true;
3171 3170 unmap_memory(base[i], top_overlap);
3172 3171 base[i] += top_overlap;
3173 3172 size[i] = bytes - top_overlap;
3174 3173 } else {
3175 3174 size_t bottom_overlap = base[i] + bytes - requested_addr;
3176 3175 if (bottom_overlap >= 0 && bottom_overlap < bytes) {
3177 3176 if (PrintMiscellaneous && Verbose && bottom_overlap == 0) {
3178 3177 warning("attempt_reserve_memory_at: possible alignment bug");
3179 3178 }
3180 3179 unmap_memory(requested_addr, bottom_overlap);
3181 3180 size[i] = bytes - bottom_overlap;
3182 3181 } else {
3183 3182 size[i] = bytes;
3184 3183 }
3185 3184 }
3186 3185 }
3187 3186 }
3188 3187
3189 3188 // Give back the unused reserved pieces.
3190 3189
3191 3190 for (int j = 0; j < i; ++j) {
3192 3191 if (base[j] != NULL) {
3193 3192 unmap_memory(base[j], size[j]);
3194 3193 }
3195 3194 }
3196 3195
3197 3196 return (i < max_tries) ? requested_addr : NULL;
3198 3197 }
3199 3198
3200 3199 bool os::release_memory(char* addr, size_t bytes) {
3201 3200 size_t size = bytes;
3202 3201 return munmap(addr, size) == 0;
3203 3202 }
3204 3203
3205 3204 static bool solaris_mprotect(char* addr, size_t bytes, int prot) {
3206 3205 assert(addr == (char*)align_size_down((uintptr_t)addr, os::vm_page_size()),
3207 3206 "addr must be page aligned");
3208 3207 int retVal = mprotect(addr, bytes, prot);
3209 3208 return retVal == 0;
3210 3209 }
3211 3210
3212 3211 // Protect memory (Used to pass readonly pages through
3213 3212 // JNI GetArray<type>Elements with empty arrays.)
3214 3213 // Also, used for serialization page and for compressed oops null pointer
3215 3214 // checking.
3216 3215 bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
3217 3216 bool is_committed) {
3218 3217 unsigned int p = 0;
3219 3218 switch (prot) {
3220 3219 case MEM_PROT_NONE: p = PROT_NONE; break;
3221 3220 case MEM_PROT_READ: p = PROT_READ; break;
3222 3221 case MEM_PROT_RW: p = PROT_READ|PROT_WRITE; break;
3223 3222 case MEM_PROT_RWX: p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
3224 3223 default:
3225 3224 ShouldNotReachHere();
3226 3225 }
3227 3226 // is_committed is unused.
3228 3227 return solaris_mprotect(addr, bytes, p);
3229 3228 }
3230 3229
3231 3230 // guard_memory and unguard_memory only happens within stack guard pages.
3232 3231 // Since ISM pertains only to the heap, guard and unguard memory should not
3233 3232 /// happen with an ISM region.
3234 3233 bool os::guard_memory(char* addr, size_t bytes) {
3235 3234 return solaris_mprotect(addr, bytes, PROT_NONE);
3236 3235 }
3237 3236
3238 3237 bool os::unguard_memory(char* addr, size_t bytes) {
3239 3238 return solaris_mprotect(addr, bytes, PROT_READ|PROT_WRITE);
3240 3239 }
3241 3240
3242 3241 // Large page support
3243 3242
3244 3243 // UseLargePages is the master flag to enable/disable large page memory.
3245 3244 // UseMPSS and UseISM are supported for compatibility reasons. Their combined
3246 3245 // effects can be described in the following table:
3247 3246 //
3248 3247 // UseLargePages UseMPSS UseISM
3249 3248 // false * * => UseLargePages is the master switch, turning
3250 3249 // it off will turn off both UseMPSS and
3251 3250 // UseISM. VM will not use large page memory
3252 3251 // regardless the settings of UseMPSS/UseISM.
3253 3252 // true false false => Unless future Solaris provides other
3254 3253 // mechanism to use large page memory, this
3255 3254 // combination is equivalent to -UseLargePages,
3256 3255 // VM will not use large page memory
3257 3256 // true true false => JVM will use MPSS for large page memory.
3258 3257 // This is the default behavior.
3259 3258 // true false true => JVM will use ISM for large page memory.
3260 3259 // true true true => JVM will use ISM if it is available.
3261 3260 // Otherwise, JVM will fall back to MPSS.
3262 3261 // Becaues ISM is now available on all
3263 3262 // supported Solaris versions, this combination
3264 3263 // is equivalent to +UseISM -UseMPSS.
3265 3264
3266 3265 static size_t _large_page_size = 0;
3267 3266
3268 3267 bool os::Solaris::ism_sanity_check(bool warn, size_t * page_size) {
3269 3268 // x86 uses either 2M or 4M page, depending on whether PAE (Physical Address
3270 3269 // Extensions) mode is enabled. AMD64/EM64T uses 2M page in 64bit mode. Sparc
3271 3270 // can support multiple page sizes.
3272 3271
3273 3272 // Don't bother to probe page size because getpagesizes() comes with MPSS.
3274 3273 // ISM is only recommended on old Solaris where there is no MPSS support.
3275 3274 // Simply choose a conservative value as default.
3276 3275 *page_size = LargePageSizeInBytes ? LargePageSizeInBytes :
3277 3276 SPARC_ONLY(4 * M) IA32_ONLY(4 * M) AMD64_ONLY(2 * M)
3278 3277 ARM_ONLY(2 * M);
3279 3278
3280 3279 // ISM is available on all supported Solaris versions
3281 3280 return true;
3282 3281 }
3283 3282
3284 3283 // Insertion sort for small arrays (descending order).
3285 3284 static void insertion_sort_descending(size_t* array, int len) {
3286 3285 for (int i = 0; i < len; i++) {
3287 3286 size_t val = array[i];
3288 3287 for (size_t key = i; key > 0 && array[key - 1] < val; --key) {
3289 3288 size_t tmp = array[key];
3290 3289 array[key] = array[key - 1];
3291 3290 array[key - 1] = tmp;
3292 3291 }
3293 3292 }
3294 3293 }
3295 3294
3296 3295 bool os::Solaris::mpss_sanity_check(bool warn, size_t * page_size) {
3297 3296 const unsigned int usable_count = VM_Version::page_size_count();
3298 3297 if (usable_count == 1) {
3299 3298 return false;
3300 3299 }
3301 3300
3302 3301 // Find the right getpagesizes interface. When solaris 11 is the minimum
3303 3302 // build platform, getpagesizes() (without the '2') can be called directly.
3304 3303 typedef int (*gps_t)(size_t[], int);
3305 3304 gps_t gps_func = CAST_TO_FN_PTR(gps_t, dlsym(RTLD_DEFAULT, "getpagesizes2"));
3306 3305 if (gps_func == NULL) {
3307 3306 gps_func = CAST_TO_FN_PTR(gps_t, dlsym(RTLD_DEFAULT, "getpagesizes"));
3308 3307 if (gps_func == NULL) {
3309 3308 if (warn) {
3310 3309 warning("MPSS is not supported by the operating system.");
3311 3310 }
3312 3311 return false;
3313 3312 }
3314 3313 }
3315 3314
3316 3315 // Fill the array of page sizes.
3317 3316 int n = (*gps_func)(_page_sizes, page_sizes_max);
3318 3317 assert(n > 0, "Solaris bug?");
3319 3318
3320 3319 if (n == page_sizes_max) {
3321 3320 // Add a sentinel value (necessary only if the array was completely filled
3322 3321 // since it is static (zeroed at initialization)).
3323 3322 _page_sizes[--n] = 0;
3324 3323 DEBUG_ONLY(warning("increase the size of the os::_page_sizes array.");)
3325 3324 }
3326 3325 assert(_page_sizes[n] == 0, "missing sentinel");
3327 3326 trace_page_sizes("available page sizes", _page_sizes, n);
3328 3327
3329 3328 if (n == 1) return false; // Only one page size available.
3330 3329
3331 3330 // Skip sizes larger than 4M (or LargePageSizeInBytes if it was set) and
3332 3331 // select up to usable_count elements. First sort the array, find the first
3333 3332 // acceptable value, then copy the usable sizes to the top of the array and
3334 3333 // trim the rest. Make sure to include the default page size :-).
3335 3334 //
3336 3335 // A better policy could get rid of the 4M limit by taking the sizes of the
3337 3336 // important VM memory regions (java heap and possibly the code cache) into
3338 3337 // account.
3339 3338 insertion_sort_descending(_page_sizes, n);
3340 3339 const size_t size_limit =
3341 3340 FLAG_IS_DEFAULT(LargePageSizeInBytes) ? 4 * M : LargePageSizeInBytes;
3342 3341 int beg;
3343 3342 for (beg = 0; beg < n && _page_sizes[beg] > size_limit; ++beg) /* empty */ ;
3344 3343 const int end = MIN2((int)usable_count, n) - 1;
3345 3344 for (int cur = 0; cur < end; ++cur, ++beg) {
3346 3345 _page_sizes[cur] = _page_sizes[beg];
3347 3346 }
3348 3347 _page_sizes[end] = vm_page_size();
3349 3348 _page_sizes[end + 1] = 0;
3350 3349
3351 3350 if (_page_sizes[end] > _page_sizes[end - 1]) {
3352 3351 // Default page size is not the smallest; sort again.
3353 3352 insertion_sort_descending(_page_sizes, end + 1);
3354 3353 }
3355 3354 *page_size = _page_sizes[0];
3356 3355
3357 3356 trace_page_sizes("usable page sizes", _page_sizes, end + 1);
3358 3357 return true;
3359 3358 }
3360 3359
3361 3360 void os::large_page_init() {
3362 3361 if (!UseLargePages) {
3363 3362 UseISM = false;
3364 3363 UseMPSS = false;
3365 3364 return;
3366 3365 }
3367 3366
3368 3367 // print a warning if any large page related flag is specified on command line
3369 3368 bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) ||
3370 3369 !FLAG_IS_DEFAULT(UseISM) ||
3371 3370 !FLAG_IS_DEFAULT(UseMPSS) ||
3372 3371 !FLAG_IS_DEFAULT(LargePageSizeInBytes);
3373 3372 UseISM = UseISM &&
3374 3373 Solaris::ism_sanity_check(warn_on_failure, &_large_page_size);
3375 3374 if (UseISM) {
3376 3375 // ISM disables MPSS to be compatible with old JDK behavior
3377 3376 UseMPSS = false;
3378 3377 _page_sizes[0] = _large_page_size;
3379 3378 _page_sizes[1] = vm_page_size();
3380 3379 }
3381 3380
3382 3381 UseMPSS = UseMPSS &&
3383 3382 Solaris::mpss_sanity_check(warn_on_failure, &_large_page_size);
3384 3383
3385 3384 UseLargePages = UseISM || UseMPSS;
3386 3385 }
3387 3386
3388 3387 bool os::Solaris::set_mpss_range(caddr_t start, size_t bytes, size_t align) {
3389 3388 // Signal to OS that we want large pages for addresses
3390 3389 // from addr, addr + bytes
3391 3390 struct memcntl_mha mpss_struct;
3392 3391 mpss_struct.mha_cmd = MHA_MAPSIZE_VA;
3393 3392 mpss_struct.mha_pagesize = align;
3394 3393 mpss_struct.mha_flags = 0;
3395 3394 if (memcntl(start, bytes, MC_HAT_ADVISE,
3396 3395 (caddr_t) &mpss_struct, 0, 0) < 0) {
3397 3396 debug_only(warning("Attempt to use MPSS failed."));
3398 3397 return false;
3399 3398 }
3400 3399 return true;
3401 3400 }
3402 3401
3403 3402 char* os::reserve_memory_special(size_t size, char* addr, bool exec) {
3404 3403 // "exec" is passed in but not used. Creating the shared image for
3405 3404 // the code cache doesn't have an SHM_X executable permission to check.
3406 3405 assert(UseLargePages && UseISM, "only for ISM large pages");
3407 3406
3408 3407 char* retAddr = NULL;
3409 3408 int shmid;
3410 3409 key_t ismKey;
3411 3410
3412 3411 bool warn_on_failure = UseISM &&
3413 3412 (!FLAG_IS_DEFAULT(UseLargePages) ||
3414 3413 !FLAG_IS_DEFAULT(UseISM) ||
3415 3414 !FLAG_IS_DEFAULT(LargePageSizeInBytes)
3416 3415 );
3417 3416 char msg[128];
3418 3417
3419 3418 ismKey = IPC_PRIVATE;
3420 3419
3421 3420 // Create a large shared memory region to attach to based on size.
3422 3421 // Currently, size is the total size of the heap
3423 3422 shmid = shmget(ismKey, size, SHM_R | SHM_W | IPC_CREAT);
3424 3423 if (shmid == -1){
3425 3424 if (warn_on_failure) {
3426 3425 jio_snprintf(msg, sizeof(msg), "Failed to reserve shared memory (errno = %d).", errno);
3427 3426 warning(msg);
3428 3427 }
3429 3428 return NULL;
3430 3429 }
3431 3430
3432 3431 // Attach to the region
3433 3432 retAddr = (char *) shmat(shmid, 0, SHM_SHARE_MMU | SHM_R | SHM_W);
3434 3433 int err = errno;
3435 3434
3436 3435 // Remove shmid. If shmat() is successful, the actual shared memory segment
3437 3436 // will be deleted when it's detached by shmdt() or when the process
3438 3437 // terminates. If shmat() is not successful this will remove the shared
3439 3438 // segment immediately.
3440 3439 shmctl(shmid, IPC_RMID, NULL);
3441 3440
3442 3441 if (retAddr == (char *) -1) {
3443 3442 if (warn_on_failure) {
3444 3443 jio_snprintf(msg, sizeof(msg), "Failed to attach shared memory (errno = %d).", err);
3445 3444 warning(msg);
3446 3445 }
3447 3446 return NULL;
3448 3447 }
3449 3448 if ((retAddr != NULL) && UseNUMAInterleaving) {
3450 3449 numa_make_global(retAddr, size);
3451 3450 }
3452 3451 return retAddr;
3453 3452 }
3454 3453
3455 3454 bool os::release_memory_special(char* base, size_t bytes) {
3456 3455 // detaching the SHM segment will also delete it, see reserve_memory_special()
3457 3456 int rslt = shmdt(base);
3458 3457 return rslt == 0;
3459 3458 }
3460 3459
3461 3460 size_t os::large_page_size() {
3462 3461 return _large_page_size;
3463 3462 }
3464 3463
3465 3464 // MPSS allows application to commit large page memory on demand; with ISM
3466 3465 // the entire memory region must be allocated as shared memory.
3467 3466 bool os::can_commit_large_page_memory() {
3468 3467 return UseISM ? false : true;
3469 3468 }
3470 3469
3471 3470 bool os::can_execute_large_page_memory() {
3472 3471 return UseISM ? false : true;
3473 3472 }
3474 3473
3475 3474 static int os_sleep(jlong millis, bool interruptible) {
3476 3475 const jlong limit = INT_MAX;
3477 3476 jlong prevtime;
3478 3477 int res;
3479 3478
3480 3479 while (millis > limit) {
3481 3480 if ((res = os_sleep(limit, interruptible)) != OS_OK)
3482 3481 return res;
3483 3482 millis -= limit;
3484 3483 }
3485 3484
3486 3485 // Restart interrupted polls with new parameters until the proper delay
3487 3486 // has been completed.
3488 3487
3489 3488 prevtime = getTimeMillis();
3490 3489
3491 3490 while (millis > 0) {
3492 3491 jlong newtime;
3493 3492
3494 3493 if (!interruptible) {
3495 3494 // Following assert fails for os::yield_all:
3496 3495 // assert(!thread->is_Java_thread(), "must not be java thread");
3497 3496 res = poll(NULL, 0, millis);
3498 3497 } else {
3499 3498 JavaThread *jt = JavaThread::current();
3500 3499
3501 3500 INTERRUPTIBLE_NORESTART_VM_ALWAYS(poll(NULL, 0, millis), res, jt,
3502 3501 os::Solaris::clear_interrupted);
3503 3502 }
3504 3503
3505 3504 // INTERRUPTIBLE_NORESTART_VM_ALWAYS returns res == OS_INTRPT for
3506 3505 // thread.Interrupt.
3507 3506
3508 3507 // See c/r 6751923. Poll can return 0 before time
3509 3508 // has elapsed if time is set via clock_settime (as NTP does).
3510 3509 // res == 0 if poll timed out (see man poll RETURN VALUES)
3511 3510 // using the logic below checks that we really did
3512 3511 // sleep at least "millis" if not we'll sleep again.
3513 3512 if( ( res == 0 ) || ((res == OS_ERR) && (errno == EINTR))) {
3514 3513 newtime = getTimeMillis();
3515 3514 assert(newtime >= prevtime, "time moving backwards");
3516 3515 /* Doing prevtime and newtime in microseconds doesn't help precision,
3517 3516 and trying to round up to avoid lost milliseconds can result in a
3518 3517 too-short delay. */
3519 3518 millis -= newtime - prevtime;
3520 3519 if(millis <= 0)
3521 3520 return OS_OK;
3522 3521 prevtime = newtime;
3523 3522 } else
3524 3523 return res;
3525 3524 }
3526 3525
3527 3526 return OS_OK;
3528 3527 }
3529 3528
3530 3529 // Read calls from inside the vm need to perform state transitions
3531 3530 size_t os::read(int fd, void *buf, unsigned int nBytes) {
3532 3531 INTERRUPTIBLE_RETURN_INT_VM(::read(fd, buf, nBytes), os::Solaris::clear_interrupted);
3533 3532 }
3534 3533
3535 3534 size_t os::restartable_read(int fd, void *buf, unsigned int nBytes) {
3536 3535 INTERRUPTIBLE_RETURN_INT(::read(fd, buf, nBytes), os::Solaris::clear_interrupted);
3537 3536 }
3538 3537
3539 3538 int os::sleep(Thread* thread, jlong millis, bool interruptible) {
3540 3539 assert(thread == Thread::current(), "thread consistency check");
3541 3540
3542 3541 // TODO-FIXME: this should be removed.
3543 3542 // On Solaris machines (especially 2.5.1) we found that sometimes the VM gets into a live lock
3544 3543 // situation with a JavaThread being starved out of a lwp. The kernel doesn't seem to generate
3545 3544 // a SIGWAITING signal which would enable the threads library to create a new lwp for the starving
3546 3545 // thread. We suspect that because the Watcher thread keeps waking up at periodic intervals the kernel
3547 3546 // is fooled into believing that the system is making progress. In the code below we block the
3548 3547 // the watcher thread while safepoint is in progress so that it would not appear as though the
3549 3548 // system is making progress.
3550 3549 if (!Solaris::T2_libthread() &&
3551 3550 thread->is_Watcher_thread() && SafepointSynchronize::is_synchronizing() && !Arguments::has_profile()) {
3552 3551 // We now try to acquire the threads lock. Since this lock is held by the VM thread during
3553 3552 // the entire safepoint, the watcher thread will line up here during the safepoint.
3554 3553 Threads_lock->lock_without_safepoint_check();
3555 3554 Threads_lock->unlock();
3556 3555 }
3557 3556
3558 3557 if (thread->is_Java_thread()) {
3559 3558 // This is a JavaThread so we honor the _thread_blocked protocol
3560 3559 // even for sleeps of 0 milliseconds. This was originally done
3561 3560 // as a workaround for bug 4338139. However, now we also do it
3562 3561 // to honor the suspend-equivalent protocol.
3563 3562
3564 3563 JavaThread *jt = (JavaThread *) thread;
3565 3564 ThreadBlockInVM tbivm(jt);
3566 3565
3567 3566 jt->set_suspend_equivalent();
3568 3567 // cleared by handle_special_suspend_equivalent_condition() or
3569 3568 // java_suspend_self() via check_and_wait_while_suspended()
3570 3569
3571 3570 int ret_code;
3572 3571 if (millis <= 0) {
3573 3572 thr_yield();
3574 3573 ret_code = 0;
3575 3574 } else {
3576 3575 // The original sleep() implementation did not create an
3577 3576 // OSThreadWaitState helper for sleeps of 0 milliseconds.
3578 3577 // I'm preserving that decision for now.
3579 3578 OSThreadWaitState osts(jt->osthread(), false /* not Object.wait() */);
3580 3579
3581 3580 ret_code = os_sleep(millis, interruptible);
3582 3581 }
3583 3582
3584 3583 // were we externally suspended while we were waiting?
3585 3584 jt->check_and_wait_while_suspended();
3586 3585
3587 3586 return ret_code;
3588 3587 }
3589 3588
3590 3589 // non-JavaThread from this point on:
3591 3590
3592 3591 if (millis <= 0) {
3593 3592 thr_yield();
3594 3593 return 0;
3595 3594 }
3596 3595
3597 3596 OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
3598 3597
3599 3598 return os_sleep(millis, interruptible);
3600 3599 }
3601 3600
3602 3601 int os::naked_sleep() {
3603 3602 // %% make the sleep time an integer flag. for now use 1 millisec.
3604 3603 return os_sleep(1, false);
3605 3604 }
3606 3605
3607 3606 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
3608 3607 void os::infinite_sleep() {
3609 3608 while (true) { // sleep forever ...
3610 3609 ::sleep(100); // ... 100 seconds at a time
3611 3610 }
3612 3611 }
3613 3612
3614 3613 // Used to convert frequent JVM_Yield() to nops
3615 3614 bool os::dont_yield() {
3616 3615 if (DontYieldALot) {
3617 3616 static hrtime_t last_time = 0;
3618 3617 hrtime_t diff = getTimeNanos() - last_time;
3619 3618
3620 3619 if (diff < DontYieldALotInterval * 1000000)
3621 3620 return true;
3622 3621
3623 3622 last_time += diff;
3624 3623
3625 3624 return false;
3626 3625 }
3627 3626 else {
3628 3627 return false;
3629 3628 }
3630 3629 }
3631 3630
3632 3631 // Caveat: Solaris os::yield() causes a thread-state transition whereas
3633 3632 // the linux and win32 implementations do not. This should be checked.
3634 3633
3635 3634 void os::yield() {
3636 3635 // Yields to all threads with same or greater priority
3637 3636 os::sleep(Thread::current(), 0, false);
3638 3637 }
3639 3638
3640 3639 // Note that yield semantics are defined by the scheduling class to which
3641 3640 // the thread currently belongs. Typically, yield will _not yield to
3642 3641 // other equal or higher priority threads that reside on the dispatch queues
3643 3642 // of other CPUs.
3644 3643
3645 3644 os::YieldResult os::NakedYield() { thr_yield(); return os::YIELD_UNKNOWN; }
3646 3645
3647 3646
3648 3647 // On Solaris we found that yield_all doesn't always yield to all other threads.
3649 3648 // There have been cases where there is a thread ready to execute but it doesn't
3650 3649 // get an lwp as the VM thread continues to spin with sleeps of 1 millisecond.
3651 3650 // The 1 millisecond wait doesn't seem long enough for the kernel to issue a
3652 3651 // SIGWAITING signal which will cause a new lwp to be created. So we count the
3653 3652 // number of times yield_all is called in the one loop and increase the sleep
3654 3653 // time after 8 attempts. If this fails too we increase the concurrency level
3655 3654 // so that the starving thread would get an lwp
3656 3655
3657 3656 void os::yield_all(int attempts) {
3658 3657 // Yields to all threads, including threads with lower priorities
3659 3658 if (attempts == 0) {
3660 3659 os::sleep(Thread::current(), 1, false);
3661 3660 } else {
3662 3661 int iterations = attempts % 30;
3663 3662 if (iterations == 0 && !os::Solaris::T2_libthread()) {
3664 3663 // thr_setconcurrency and _getconcurrency make sense only under T1.
3665 3664 int noofLWPS = thr_getconcurrency();
3666 3665 if (noofLWPS < (Threads::number_of_threads() + 2)) {
3667 3666 thr_setconcurrency(thr_getconcurrency() + 1);
3668 3667 }
3669 3668 } else if (iterations < 25) {
3670 3669 os::sleep(Thread::current(), 1, false);
3671 3670 } else {
3672 3671 os::sleep(Thread::current(), 10, false);
3673 3672 }
3674 3673 }
3675 3674 }
3676 3675
3677 3676 // Called from the tight loops to possibly influence time-sharing heuristics
3678 3677 void os::loop_breaker(int attempts) {
3679 3678 os::yield_all(attempts);
3680 3679 }
3681 3680
3682 3681
3683 3682 // Interface for setting lwp priorities. If we are using T2 libthread,
3684 3683 // which forces the use of BoundThreads or we manually set UseBoundThreads,
3685 3684 // all of our threads will be assigned to real lwp's. Using the thr_setprio
3686 3685 // function is meaningless in this mode so we must adjust the real lwp's priority
3687 3686 // The routines below implement the getting and setting of lwp priorities.
3688 3687 //
3689 3688 // Note: There are three priority scales used on Solaris. Java priotities
3690 3689 // which range from 1 to 10, libthread "thr_setprio" scale which range
3691 3690 // from 0 to 127, and the current scheduling class of the process we
3692 3691 // are running in. This is typically from -60 to +60.
3693 3692 // The setting of the lwp priorities in done after a call to thr_setprio
3694 3693 // so Java priorities are mapped to libthread priorities and we map from
3695 3694 // the latter to lwp priorities. We don't keep priorities stored in
3696 3695 // Java priorities since some of our worker threads want to set priorities
3697 3696 // higher than all Java threads.
3698 3697 //
3699 3698 // For related information:
3700 3699 // (1) man -s 2 priocntl
3701 3700 // (2) man -s 4 priocntl
3702 3701 // (3) man dispadmin
3703 3702 // = librt.so
3704 3703 // = libthread/common/rtsched.c - thrp_setlwpprio().
3705 3704 // = ps -cL <pid> ... to validate priority.
3706 3705 // = sched_get_priority_min and _max
3707 3706 // pthread_create
3708 3707 // sched_setparam
3709 3708 // pthread_setschedparam
3710 3709 //
3711 3710 // Assumptions:
3712 3711 // + We assume that all threads in the process belong to the same
3713 3712 // scheduling class. IE. an homogenous process.
3714 3713 // + Must be root or in IA group to change change "interactive" attribute.
3715 3714 // Priocntl() will fail silently. The only indication of failure is when
3716 3715 // we read-back the value and notice that it hasn't changed.
3717 3716 // + Interactive threads enter the runq at the head, non-interactive at the tail.
3718 3717 // + For RT, change timeslice as well. Invariant:
3719 3718 // constant "priority integral"
3720 3719 // Konst == TimeSlice * (60-Priority)
3721 3720 // Given a priority, compute appropriate timeslice.
3722 3721 // + Higher numerical values have higher priority.
3723 3722
3724 3723 // sched class attributes
3725 3724 typedef struct {
3726 3725 int schedPolicy; // classID
3727 3726 int maxPrio;
3728 3727 int minPrio;
3729 3728 } SchedInfo;
3730 3729
3731 3730
3732 3731 static SchedInfo tsLimits, iaLimits, rtLimits;
3733 3732
3734 3733 #ifdef ASSERT
3735 3734 static int ReadBackValidate = 1;
3736 3735 #endif
3737 3736 static int myClass = 0;
3738 3737 static int myMin = 0;
3739 3738 static int myMax = 0;
3740 3739 static int myCur = 0;
3741 3740 static bool priocntl_enable = false;
3742 3741
3743 3742
3744 3743 // Call the version of priocntl suitable for all supported versions
3745 3744 // of Solaris. We need to call through this wrapper so that we can
3746 3745 // build on Solaris 9 and run on Solaris 8, 9 and 10.
3747 3746 //
3748 3747 // This code should be removed if we ever stop supporting Solaris 8
3749 3748 // and earlier releases.
3750 3749
3751 3750 static long priocntl_stub(int pcver, idtype_t idtype, id_t id, int cmd, caddr_t arg);
3752 3751 typedef long (*priocntl_type)(int pcver, idtype_t idtype, id_t id, int cmd, caddr_t arg);
3753 3752 static priocntl_type priocntl_ptr = priocntl_stub;
3754 3753
3755 3754 // Stub to set the value of the real pointer, and then call the real
3756 3755 // function.
3757 3756
3758 3757 static long priocntl_stub(int pcver, idtype_t idtype, id_t id, int cmd, caddr_t arg) {
3759 3758 // Try Solaris 8- name only.
3760 3759 priocntl_type tmp = (priocntl_type)dlsym(RTLD_DEFAULT, "__priocntl");
3761 3760 guarantee(tmp != NULL, "priocntl function not found.");
3762 3761 priocntl_ptr = tmp;
3763 3762 return (*priocntl_ptr)(PC_VERSION, idtype, id, cmd, arg);
3764 3763 }
3765 3764
3766 3765
3767 3766 // lwp_priocntl_init
3768 3767 //
3769 3768 // Try to determine the priority scale for our process.
3770 3769 //
3771 3770 // Return errno or 0 if OK.
3772 3771 //
3773 3772 static
3774 3773 int lwp_priocntl_init ()
3775 3774 {
3776 3775 int rslt;
3777 3776 pcinfo_t ClassInfo;
3778 3777 pcparms_t ParmInfo;
3779 3778 int i;
3780 3779
3781 3780 if (!UseThreadPriorities) return 0;
3782 3781
3783 3782 // We are using Bound threads, we need to determine our priority ranges
3784 3783 if (os::Solaris::T2_libthread() || UseBoundThreads) {
3785 3784 // If ThreadPriorityPolicy is 1, switch tables
3786 3785 if (ThreadPriorityPolicy == 1) {
3787 3786 for (i = 0 ; i < MaxPriority+1; i++)
3788 3787 os::java_to_os_priority[i] = prio_policy1[i];
3789 3788 }
3790 3789 }
3791 3790 // Not using Bound Threads, set to ThreadPolicy 1
3792 3791 else {
3793 3792 for ( i = 0 ; i < MaxPriority+1; i++ ) {
3794 3793 os::java_to_os_priority[i] = prio_policy1[i];
3795 3794 }
3796 3795 return 0;
3797 3796 }
3798 3797
3799 3798
3800 3799 // Get IDs for a set of well-known scheduling classes.
3801 3800 // TODO-FIXME: GETCLINFO returns the current # of classes in the
3802 3801 // the system. We should have a loop that iterates over the
3803 3802 // classID values, which are known to be "small" integers.
3804 3803
3805 3804 strcpy(ClassInfo.pc_clname, "TS");
3806 3805 ClassInfo.pc_cid = -1;
3807 3806 rslt = (*priocntl_ptr)(PC_VERSION, P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3808 3807 if (rslt < 0) return errno;
3809 3808 assert(ClassInfo.pc_cid != -1, "cid for TS class is -1");
3810 3809 tsLimits.schedPolicy = ClassInfo.pc_cid;
3811 3810 tsLimits.maxPrio = ((tsinfo_t*)ClassInfo.pc_clinfo)->ts_maxupri;
3812 3811 tsLimits.minPrio = -tsLimits.maxPrio;
3813 3812
3814 3813 strcpy(ClassInfo.pc_clname, "IA");
3815 3814 ClassInfo.pc_cid = -1;
3816 3815 rslt = (*priocntl_ptr)(PC_VERSION, P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3817 3816 if (rslt < 0) return errno;
3818 3817 assert(ClassInfo.pc_cid != -1, "cid for IA class is -1");
3819 3818 iaLimits.schedPolicy = ClassInfo.pc_cid;
3820 3819 iaLimits.maxPrio = ((iainfo_t*)ClassInfo.pc_clinfo)->ia_maxupri;
3821 3820 iaLimits.minPrio = -iaLimits.maxPrio;
3822 3821
3823 3822 strcpy(ClassInfo.pc_clname, "RT");
3824 3823 ClassInfo.pc_cid = -1;
3825 3824 rslt = (*priocntl_ptr)(PC_VERSION, P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3826 3825 if (rslt < 0) return errno;
3827 3826 assert(ClassInfo.pc_cid != -1, "cid for RT class is -1");
3828 3827 rtLimits.schedPolicy = ClassInfo.pc_cid;
3829 3828 rtLimits.maxPrio = ((rtinfo_t*)ClassInfo.pc_clinfo)->rt_maxpri;
3830 3829 rtLimits.minPrio = 0;
3831 3830
3832 3831
3833 3832 // Query our "current" scheduling class.
3834 3833 // This will normally be IA,TS or, rarely, RT.
3835 3834 memset (&ParmInfo, 0, sizeof(ParmInfo));
3836 3835 ParmInfo.pc_cid = PC_CLNULL;
3837 3836 rslt = (*priocntl_ptr) (PC_VERSION, P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo );
3838 3837 if ( rslt < 0 ) return errno;
3839 3838 myClass = ParmInfo.pc_cid;
3840 3839
3841 3840 // We now know our scheduling classId, get specific information
3842 3841 // the class.
3843 3842 ClassInfo.pc_cid = myClass;
3844 3843 ClassInfo.pc_clname[0] = 0;
3845 3844 rslt = (*priocntl_ptr) (PC_VERSION, (idtype)0, 0, PC_GETCLINFO, (caddr_t)&ClassInfo );
3846 3845 if ( rslt < 0 ) return errno;
3847 3846
3848 3847 if (ThreadPriorityVerbose)
3849 3848 tty->print_cr ("lwp_priocntl_init: Class=%d(%s)...", myClass, ClassInfo.pc_clname);
3850 3849
3851 3850 memset(&ParmInfo, 0, sizeof(pcparms_t));
3852 3851 ParmInfo.pc_cid = PC_CLNULL;
3853 3852 rslt = (*priocntl_ptr)(PC_VERSION, P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo);
3854 3853 if (rslt < 0) return errno;
3855 3854
3856 3855 if (ParmInfo.pc_cid == rtLimits.schedPolicy) {
3857 3856 myMin = rtLimits.minPrio;
3858 3857 myMax = rtLimits.maxPrio;
3859 3858 } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) {
3860 3859 iaparms_t *iaInfo = (iaparms_t*)ParmInfo.pc_clparms;
3861 3860 myMin = iaLimits.minPrio;
3862 3861 myMax = iaLimits.maxPrio;
3863 3862 myMax = MIN2(myMax, (int)iaInfo->ia_uprilim); // clamp - restrict
3864 3863 } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) {
3865 3864 tsparms_t *tsInfo = (tsparms_t*)ParmInfo.pc_clparms;
3866 3865 myMin = tsLimits.minPrio;
3867 3866 myMax = tsLimits.maxPrio;
3868 3867 myMax = MIN2(myMax, (int)tsInfo->ts_uprilim); // clamp - restrict
3869 3868 } else {
3870 3869 // No clue - punt
3871 3870 if (ThreadPriorityVerbose)
3872 3871 tty->print_cr ("Unknown scheduling class: %s ... \n", ClassInfo.pc_clname);
3873 3872 return EINVAL; // no clue, punt
3874 3873 }
3875 3874
3876 3875 if (ThreadPriorityVerbose)
3877 3876 tty->print_cr ("Thread priority Range: [%d..%d]\n", myMin, myMax);
3878 3877
3879 3878 priocntl_enable = true; // Enable changing priorities
3880 3879 return 0;
3881 3880 }
3882 3881
3883 3882 #define IAPRI(x) ((iaparms_t *)((x).pc_clparms))
3884 3883 #define RTPRI(x) ((rtparms_t *)((x).pc_clparms))
3885 3884 #define TSPRI(x) ((tsparms_t *)((x).pc_clparms))
3886 3885
3887 3886
3888 3887 // scale_to_lwp_priority
3889 3888 //
3890 3889 // Convert from the libthread "thr_setprio" scale to our current
3891 3890 // lwp scheduling class scale.
3892 3891 //
3893 3892 static
3894 3893 int scale_to_lwp_priority (int rMin, int rMax, int x)
3895 3894 {
3896 3895 int v;
3897 3896
3898 3897 if (x == 127) return rMax; // avoid round-down
3899 3898 v = (((x*(rMax-rMin)))/128)+rMin;
3900 3899 return v;
3901 3900 }
3902 3901
3903 3902
3904 3903 // set_lwp_priority
3905 3904 //
3906 3905 // Set the priority of the lwp. This call should only be made
3907 3906 // when using bound threads (T2 threads are bound by default).
3908 3907 //
3909 3908 int set_lwp_priority (int ThreadID, int lwpid, int newPrio )
3910 3909 {
3911 3910 int rslt;
3912 3911 int Actual, Expected, prv;
3913 3912 pcparms_t ParmInfo; // for GET-SET
3914 3913 #ifdef ASSERT
3915 3914 pcparms_t ReadBack; // for readback
3916 3915 #endif
3917 3916
3918 3917 // Set priority via PC_GETPARMS, update, PC_SETPARMS
3919 3918 // Query current values.
3920 3919 // TODO: accelerate this by eliminating the PC_GETPARMS call.
3921 3920 // Cache "pcparms_t" in global ParmCache.
3922 3921 // TODO: elide set-to-same-value
3923 3922
3924 3923 // If something went wrong on init, don't change priorities.
3925 3924 if ( !priocntl_enable ) {
3926 3925 if (ThreadPriorityVerbose)
3927 3926 tty->print_cr("Trying to set priority but init failed, ignoring");
3928 3927 return EINVAL;
3929 3928 }
3930 3929
3931 3930
3932 3931 // If lwp hasn't started yet, just return
3933 3932 // the _start routine will call us again.
3934 3933 if ( lwpid <= 0 ) {
3935 3934 if (ThreadPriorityVerbose) {
3936 3935 tty->print_cr ("deferring the set_lwp_priority of thread " INTPTR_FORMAT " to %d, lwpid not set",
3937 3936 ThreadID, newPrio);
3938 3937 }
3939 3938 return 0;
3940 3939 }
3941 3940
3942 3941 if (ThreadPriorityVerbose) {
3943 3942 tty->print_cr ("set_lwp_priority(" INTPTR_FORMAT "@" INTPTR_FORMAT " %d) ",
3944 3943 ThreadID, lwpid, newPrio);
3945 3944 }
3946 3945
3947 3946 memset(&ParmInfo, 0, sizeof(pcparms_t));
3948 3947 ParmInfo.pc_cid = PC_CLNULL;
3949 3948 rslt = (*priocntl_ptr)(PC_VERSION, P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ParmInfo);
3950 3949 if (rslt < 0) return errno;
3951 3950
3952 3951 if (ParmInfo.pc_cid == rtLimits.schedPolicy) {
3953 3952 rtparms_t *rtInfo = (rtparms_t*)ParmInfo.pc_clparms;
3954 3953 rtInfo->rt_pri = scale_to_lwp_priority (rtLimits.minPrio, rtLimits.maxPrio, newPrio);
3955 3954 rtInfo->rt_tqsecs = RT_NOCHANGE;
3956 3955 rtInfo->rt_tqnsecs = RT_NOCHANGE;
3957 3956 if (ThreadPriorityVerbose) {
3958 3957 tty->print_cr("RT: %d->%d\n", newPrio, rtInfo->rt_pri);
3959 3958 }
3960 3959 } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) {
3961 3960 iaparms_t *iaInfo = (iaparms_t*)ParmInfo.pc_clparms;
3962 3961 int maxClamped = MIN2(iaLimits.maxPrio, (int)iaInfo->ia_uprilim);
3963 3962 iaInfo->ia_upri = scale_to_lwp_priority(iaLimits.minPrio, maxClamped, newPrio);
3964 3963 iaInfo->ia_uprilim = IA_NOCHANGE;
3965 3964 iaInfo->ia_mode = IA_NOCHANGE;
3966 3965 if (ThreadPriorityVerbose) {
3967 3966 tty->print_cr ("IA: [%d...%d] %d->%d\n",
3968 3967 iaLimits.minPrio, maxClamped, newPrio, iaInfo->ia_upri);
3969 3968 }
3970 3969 } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) {
3971 3970 tsparms_t *tsInfo = (tsparms_t*)ParmInfo.pc_clparms;
3972 3971 int maxClamped = MIN2(tsLimits.maxPrio, (int)tsInfo->ts_uprilim);
3973 3972 prv = tsInfo->ts_upri;
3974 3973 tsInfo->ts_upri = scale_to_lwp_priority(tsLimits.minPrio, maxClamped, newPrio);
3975 3974 tsInfo->ts_uprilim = IA_NOCHANGE;
3976 3975 if (ThreadPriorityVerbose) {
3977 3976 tty->print_cr ("TS: %d [%d...%d] %d->%d\n",
3978 3977 prv, tsLimits.minPrio, maxClamped, newPrio, tsInfo->ts_upri);
3979 3978 }
3980 3979 if (prv == tsInfo->ts_upri) return 0;
3981 3980 } else {
3982 3981 if ( ThreadPriorityVerbose ) {
3983 3982 tty->print_cr ("Unknown scheduling class\n");
3984 3983 }
3985 3984 return EINVAL; // no clue, punt
3986 3985 }
3987 3986
3988 3987 rslt = (*priocntl_ptr)(PC_VERSION, P_LWPID, lwpid, PC_SETPARMS, (caddr_t)&ParmInfo);
3989 3988 if (ThreadPriorityVerbose && rslt) {
3990 3989 tty->print_cr ("PC_SETPARMS ->%d %d\n", rslt, errno);
3991 3990 }
3992 3991 if (rslt < 0) return errno;
3993 3992
3994 3993 #ifdef ASSERT
3995 3994 // Sanity check: read back what we just attempted to set.
3996 3995 // In theory it could have changed in the interim ...
3997 3996 //
3998 3997 // The priocntl system call is tricky.
3999 3998 // Sometimes it'll validate the priority value argument and
4000 3999 // return EINVAL if unhappy. At other times it fails silently.
4001 4000 // Readbacks are prudent.
4002 4001
4003 4002 if (!ReadBackValidate) return 0;
4004 4003
4005 4004 memset(&ReadBack, 0, sizeof(pcparms_t));
4006 4005 ReadBack.pc_cid = PC_CLNULL;
4007 4006 rslt = (*priocntl_ptr)(PC_VERSION, P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ReadBack);
4008 4007 assert(rslt >= 0, "priocntl failed");
4009 4008 Actual = Expected = 0xBAD;
4010 4009 assert(ParmInfo.pc_cid == ReadBack.pc_cid, "cid's don't match");
4011 4010 if (ParmInfo.pc_cid == rtLimits.schedPolicy) {
4012 4011 Actual = RTPRI(ReadBack)->rt_pri;
4013 4012 Expected = RTPRI(ParmInfo)->rt_pri;
4014 4013 } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) {
4015 4014 Actual = IAPRI(ReadBack)->ia_upri;
4016 4015 Expected = IAPRI(ParmInfo)->ia_upri;
4017 4016 } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) {
4018 4017 Actual = TSPRI(ReadBack)->ts_upri;
4019 4018 Expected = TSPRI(ParmInfo)->ts_upri;
4020 4019 } else {
4021 4020 if ( ThreadPriorityVerbose ) {
4022 4021 tty->print_cr("set_lwp_priority: unexpected class in readback: %d\n", ParmInfo.pc_cid);
4023 4022 }
4024 4023 }
4025 4024
4026 4025 if (Actual != Expected) {
4027 4026 if ( ThreadPriorityVerbose ) {
4028 4027 tty->print_cr ("set_lwp_priority(%d %d) Class=%d: actual=%d vs expected=%d\n",
4029 4028 lwpid, newPrio, ReadBack.pc_cid, Actual, Expected);
4030 4029 }
4031 4030 }
4032 4031 #endif
4033 4032
4034 4033 return 0;
4035 4034 }
4036 4035
4037 4036
4038 4037
4039 4038 // Solaris only gives access to 128 real priorities at a time,
4040 4039 // so we expand Java's ten to fill this range. This would be better
4041 4040 // if we dynamically adjusted relative priorities.
4042 4041 //
4043 4042 // The ThreadPriorityPolicy option allows us to select 2 different
4044 4043 // priority scales.
4045 4044 //
4046 4045 // ThreadPriorityPolicy=0
4047 4046 // Since the Solaris' default priority is MaximumPriority, we do not
4048 4047 // set a priority lower than Max unless a priority lower than
4049 4048 // NormPriority is requested.
4050 4049 //
4051 4050 // ThreadPriorityPolicy=1
4052 4051 // This mode causes the priority table to get filled with
4053 4052 // linear values. NormPriority get's mapped to 50% of the
4054 4053 // Maximum priority an so on. This will cause VM threads
4055 4054 // to get unfair treatment against other Solaris processes
4056 4055 // which do not explicitly alter their thread priorities.
4057 4056 //
4058 4057
4059 4058
4060 4059 int os::java_to_os_priority[MaxPriority + 1] = {
4061 4060 -99999, // 0 Entry should never be used
4062 4061
4063 4062 0, // 1 MinPriority
4064 4063 32, // 2
4065 4064 64, // 3
4066 4065
4067 4066 96, // 4
4068 4067 127, // 5 NormPriority
4069 4068 127, // 6
4070 4069
4071 4070 127, // 7
4072 4071 127, // 8
4073 4072 127, // 9 NearMaxPriority
4074 4073
4075 4074 127 // 10 MaxPriority
4076 4075 };
4077 4076
4078 4077
4079 4078 OSReturn os::set_native_priority(Thread* thread, int newpri) {
4080 4079 assert(newpri >= MinimumPriority && newpri <= MaximumPriority, "bad priority mapping");
4081 4080 if ( !UseThreadPriorities ) return OS_OK;
4082 4081 int status = thr_setprio(thread->osthread()->thread_id(), newpri);
4083 4082 if ( os::Solaris::T2_libthread() || (UseBoundThreads && thread->osthread()->is_vm_created()) )
4084 4083 status |= (set_lwp_priority (thread->osthread()->thread_id(),
4085 4084 thread->osthread()->lwp_id(), newpri ));
4086 4085 return (status == 0) ? OS_OK : OS_ERR;
4087 4086 }
4088 4087
4089 4088
4090 4089 OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) {
4091 4090 int p;
4092 4091 if ( !UseThreadPriorities ) {
4093 4092 *priority_ptr = NormalPriority;
4094 4093 return OS_OK;
4095 4094 }
4096 4095 int status = thr_getprio(thread->osthread()->thread_id(), &p);
4097 4096 if (status != 0) {
4098 4097 return OS_ERR;
4099 4098 }
4100 4099 *priority_ptr = p;
4101 4100 return OS_OK;
4102 4101 }
4103 4102
4104 4103
4105 4104 // Hint to the underlying OS that a task switch would not be good.
4106 4105 // Void return because it's a hint and can fail.
4107 4106 void os::hint_no_preempt() {
4108 4107 schedctl_start(schedctl_init());
4109 4108 }
4110 4109
4111 4110 void os::interrupt(Thread* thread) {
4112 4111 assert(Thread::current() == thread || Threads_lock->owned_by_self(), "possibility of dangling Thread pointer");
4113 4112
4114 4113 OSThread* osthread = thread->osthread();
4115 4114
4116 4115 int isInterrupted = osthread->interrupted();
4117 4116 if (!isInterrupted) {
4118 4117 osthread->set_interrupted(true);
4119 4118 OrderAccess::fence();
4120 4119 // os::sleep() is implemented with either poll (NULL,0,timeout) or
4121 4120 // by parking on _SleepEvent. If the former, thr_kill will unwedge
4122 4121 // the sleeper by SIGINTR, otherwise the unpark() will wake the sleeper.
4123 4122 ParkEvent * const slp = thread->_SleepEvent ;
4124 4123 if (slp != NULL) slp->unpark() ;
4125 4124 }
4126 4125
4127 4126 // For JSR166: unpark after setting status but before thr_kill -dl
4128 4127 if (thread->is_Java_thread()) {
4129 4128 ((JavaThread*)thread)->parker()->unpark();
4130 4129 }
4131 4130
4132 4131 // Handle interruptible wait() ...
4133 4132 ParkEvent * const ev = thread->_ParkEvent ;
4134 4133 if (ev != NULL) ev->unpark() ;
4135 4134
4136 4135 // When events are used everywhere for os::sleep, then this thr_kill
4137 4136 // will only be needed if UseVMInterruptibleIO is true.
4138 4137
4139 4138 if (!isInterrupted) {
4140 4139 int status = thr_kill(osthread->thread_id(), os::Solaris::SIGinterrupt());
4141 4140 assert_status(status == 0, status, "thr_kill");
4142 4141
4143 4142 // Bump thread interruption counter
4144 4143 RuntimeService::record_thread_interrupt_signaled_count();
4145 4144 }
4146 4145 }
4147 4146
4148 4147
4149 4148 bool os::is_interrupted(Thread* thread, bool clear_interrupted) {
4150 4149 assert(Thread::current() == thread || Threads_lock->owned_by_self(), "possibility of dangling Thread pointer");
4151 4150
4152 4151 OSThread* osthread = thread->osthread();
4153 4152
4154 4153 bool res = osthread->interrupted();
4155 4154
4156 4155 // NOTE that since there is no "lock" around these two operations,
4157 4156 // there is the possibility that the interrupted flag will be
4158 4157 // "false" but that the interrupt event will be set. This is
4159 4158 // intentional. The effect of this is that Object.wait() will appear
4160 4159 // to have a spurious wakeup, which is not harmful, and the
4161 4160 // possibility is so rare that it is not worth the added complexity
4162 4161 // to add yet another lock. It has also been recommended not to put
4163 4162 // the interrupted flag into the os::Solaris::Event structure,
4164 4163 // because it hides the issue.
4165 4164 if (res && clear_interrupted) {
4166 4165 osthread->set_interrupted(false);
4167 4166 }
4168 4167 return res;
4169 4168 }
4170 4169
4171 4170
4172 4171 void os::print_statistics() {
4173 4172 }
4174 4173
4175 4174 int os::message_box(const char* title, const char* message) {
4176 4175 int i;
4177 4176 fdStream err(defaultStream::error_fd());
4178 4177 for (i = 0; i < 78; i++) err.print_raw("=");
4179 4178 err.cr();
4180 4179 err.print_raw_cr(title);
4181 4180 for (i = 0; i < 78; i++) err.print_raw("-");
4182 4181 err.cr();
4183 4182 err.print_raw_cr(message);
4184 4183 for (i = 0; i < 78; i++) err.print_raw("=");
4185 4184 err.cr();
4186 4185
4187 4186 char buf[16];
4188 4187 // Prevent process from exiting upon "read error" without consuming all CPU
4189 4188 while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); }
4190 4189
4191 4190 return buf[0] == 'y' || buf[0] == 'Y';
4192 4191 }
4193 4192
4194 4193 // A lightweight implementation that does not suspend the target thread and
4195 4194 // thus returns only a hint. Used for profiling only!
4196 4195 ExtendedPC os::get_thread_pc(Thread* thread) {
4197 4196 // Make sure that it is called by the watcher and the Threads lock is owned.
4198 4197 assert(Thread::current()->is_Watcher_thread(), "Must be watcher and own Threads_lock");
4199 4198 // For now, is only used to profile the VM Thread
4200 4199 assert(thread->is_VM_thread(), "Can only be called for VMThread");
4201 4200 ExtendedPC epc;
4202 4201
4203 4202 GetThreadPC_Callback cb(ProfileVM_lock);
4204 4203 OSThread *osthread = thread->osthread();
4205 4204 const int time_to_wait = 400; // 400ms wait for initial response
4206 4205 int status = cb.interrupt(thread, time_to_wait);
4207 4206
4208 4207 if (cb.is_done() ) {
4209 4208 epc = cb.addr();
4210 4209 } else {
4211 4210 DEBUG_ONLY(tty->print_cr("Failed to get pc for thread: %d got %d status",
4212 4211 osthread->thread_id(), status););
4213 4212 // epc is already NULL
4214 4213 }
4215 4214 return epc;
4216 4215 }
4217 4216
4218 4217
4219 4218 // This does not do anything on Solaris. This is basically a hook for being
4220 4219 // able to use structured exception handling (thread-local exception filters) on, e.g., Win32.
4221 4220 void os::os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread) {
4222 4221 f(value, method, args, thread);
4223 4222 }
4224 4223
4225 4224 // This routine may be used by user applications as a "hook" to catch signals.
4226 4225 // The user-defined signal handler must pass unrecognized signals to this
4227 4226 // routine, and if it returns true (non-zero), then the signal handler must
4228 4227 // return immediately. If the flag "abort_if_unrecognized" is true, then this
4229 4228 // routine will never retun false (zero), but instead will execute a VM panic
4230 4229 // routine kill the process.
4231 4230 //
4232 4231 // If this routine returns false, it is OK to call it again. This allows
4233 4232 // the user-defined signal handler to perform checks either before or after
4234 4233 // the VM performs its own checks. Naturally, the user code would be making
4235 4234 // a serious error if it tried to handle an exception (such as a null check
4236 4235 // or breakpoint) that the VM was generating for its own correct operation.
4237 4236 //
4238 4237 // This routine may recognize any of the following kinds of signals:
4239 4238 // SIGBUS, SIGSEGV, SIGILL, SIGFPE, BREAK_SIGNAL, SIGPIPE, SIGXFSZ,
4240 4239 // os::Solaris::SIGasync
4241 4240 // It should be consulted by handlers for any of those signals.
4242 4241 // It explicitly does not recognize os::Solaris::SIGinterrupt
4243 4242 //
4244 4243 // The caller of this routine must pass in the three arguments supplied
4245 4244 // to the function referred to in the "sa_sigaction" (not the "sa_handler")
4246 4245 // field of the structure passed to sigaction(). This routine assumes that
4247 4246 // the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART.
4248 4247 //
4249 4248 // Note that the VM will print warnings if it detects conflicting signal
4250 4249 // handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers".
4251 4250 //
4252 4251 extern "C" JNIEXPORT int
4253 4252 JVM_handle_solaris_signal(int signo, siginfo_t* siginfo, void* ucontext,
4254 4253 int abort_if_unrecognized);
4255 4254
4256 4255
4257 4256 void signalHandler(int sig, siginfo_t* info, void* ucVoid) {
4258 4257 JVM_handle_solaris_signal(sig, info, ucVoid, true);
4259 4258 }
4260 4259
4261 4260 /* Do not delete - if guarantee is ever removed, a signal handler (even empty)
4262 4261 is needed to provoke threads blocked on IO to return an EINTR
4263 4262 Note: this explicitly does NOT call JVM_handle_solaris_signal and
4264 4263 does NOT participate in signal chaining due to requirement for
4265 4264 NOT setting SA_RESTART to make EINTR work. */
4266 4265 extern "C" void sigINTRHandler(int sig, siginfo_t* info, void* ucVoid) {
4267 4266 if (UseSignalChaining) {
4268 4267 struct sigaction *actp = os::Solaris::get_chained_signal_action(sig);
4269 4268 if (actp && actp->sa_handler) {
4270 4269 vm_exit_during_initialization("Signal chaining detected for VM interrupt signal, try -XX:+UseAltSigs");
4271 4270 }
4272 4271 }
4273 4272 }
4274 4273
4275 4274 // This boolean allows users to forward their own non-matching signals
4276 4275 // to JVM_handle_solaris_signal, harmlessly.
4277 4276 bool os::Solaris::signal_handlers_are_installed = false;
4278 4277
4279 4278 // For signal-chaining
4280 4279 bool os::Solaris::libjsig_is_loaded = false;
4281 4280 typedef struct sigaction *(*get_signal_t)(int);
4282 4281 get_signal_t os::Solaris::get_signal_action = NULL;
4283 4282
4284 4283 struct sigaction* os::Solaris::get_chained_signal_action(int sig) {
4285 4284 struct sigaction *actp = NULL;
4286 4285
4287 4286 if ((libjsig_is_loaded) && (sig <= Maxlibjsigsigs)) {
4288 4287 // Retrieve the old signal handler from libjsig
4289 4288 actp = (*get_signal_action)(sig);
4290 4289 }
4291 4290 if (actp == NULL) {
4292 4291 // Retrieve the preinstalled signal handler from jvm
4293 4292 actp = get_preinstalled_handler(sig);
4294 4293 }
4295 4294
4296 4295 return actp;
4297 4296 }
4298 4297
4299 4298 static bool call_chained_handler(struct sigaction *actp, int sig,
4300 4299 siginfo_t *siginfo, void *context) {
4301 4300 // Call the old signal handler
4302 4301 if (actp->sa_handler == SIG_DFL) {
4303 4302 // It's more reasonable to let jvm treat it as an unexpected exception
4304 4303 // instead of taking the default action.
4305 4304 return false;
4306 4305 } else if (actp->sa_handler != SIG_IGN) {
4307 4306 if ((actp->sa_flags & SA_NODEFER) == 0) {
4308 4307 // automaticlly block the signal
4309 4308 sigaddset(&(actp->sa_mask), sig);
4310 4309 }
4311 4310
4312 4311 sa_handler_t hand;
4313 4312 sa_sigaction_t sa;
4314 4313 bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0;
4315 4314 // retrieve the chained handler
4316 4315 if (siginfo_flag_set) {
4317 4316 sa = actp->sa_sigaction;
4318 4317 } else {
4319 4318 hand = actp->sa_handler;
4320 4319 }
4321 4320
4322 4321 if ((actp->sa_flags & SA_RESETHAND) != 0) {
4323 4322 actp->sa_handler = SIG_DFL;
4324 4323 }
4325 4324
4326 4325 // try to honor the signal mask
4327 4326 sigset_t oset;
4328 4327 thr_sigsetmask(SIG_SETMASK, &(actp->sa_mask), &oset);
4329 4328
4330 4329 // call into the chained handler
4331 4330 if (siginfo_flag_set) {
4332 4331 (*sa)(sig, siginfo, context);
4333 4332 } else {
4334 4333 (*hand)(sig);
4335 4334 }
4336 4335
4337 4336 // restore the signal mask
4338 4337 thr_sigsetmask(SIG_SETMASK, &oset, 0);
4339 4338 }
4340 4339 // Tell jvm's signal handler the signal is taken care of.
4341 4340 return true;
4342 4341 }
4343 4342
4344 4343 bool os::Solaris::chained_handler(int sig, siginfo_t* siginfo, void* context) {
4345 4344 bool chained = false;
4346 4345 // signal-chaining
4347 4346 if (UseSignalChaining) {
4348 4347 struct sigaction *actp = get_chained_signal_action(sig);
4349 4348 if (actp != NULL) {
4350 4349 chained = call_chained_handler(actp, sig, siginfo, context);
4351 4350 }
4352 4351 }
4353 4352 return chained;
4354 4353 }
4355 4354
4356 4355 struct sigaction* os::Solaris::get_preinstalled_handler(int sig) {
4357 4356 assert((chainedsigactions != (struct sigaction *)NULL) && (preinstalled_sigs != (int *)NULL) , "signals not yet initialized");
4358 4357 if (preinstalled_sigs[sig] != 0) {
4359 4358 return &chainedsigactions[sig];
4360 4359 }
4361 4360 return NULL;
4362 4361 }
4363 4362
4364 4363 void os::Solaris::save_preinstalled_handler(int sig, struct sigaction& oldAct) {
4365 4364
4366 4365 assert(sig > 0 && sig <= Maxsignum, "vm signal out of expected range");
4367 4366 assert((chainedsigactions != (struct sigaction *)NULL) && (preinstalled_sigs != (int *)NULL) , "signals not yet initialized");
4368 4367 chainedsigactions[sig] = oldAct;
4369 4368 preinstalled_sigs[sig] = 1;
4370 4369 }
4371 4370
4372 4371 void os::Solaris::set_signal_handler(int sig, bool set_installed, bool oktochain) {
4373 4372 // Check for overwrite.
4374 4373 struct sigaction oldAct;
4375 4374 sigaction(sig, (struct sigaction*)NULL, &oldAct);
4376 4375 void* oldhand = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
4377 4376 : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
4378 4377 if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
4379 4378 oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
4380 4379 oldhand != CAST_FROM_FN_PTR(void*, signalHandler)) {
4381 4380 if (AllowUserSignalHandlers || !set_installed) {
4382 4381 // Do not overwrite; user takes responsibility to forward to us.
4383 4382 return;
4384 4383 } else if (UseSignalChaining) {
4385 4384 if (oktochain) {
4386 4385 // save the old handler in jvm
4387 4386 save_preinstalled_handler(sig, oldAct);
4388 4387 } else {
4389 4388 vm_exit_during_initialization("Signal chaining not allowed for VM interrupt signal, try -XX:+UseAltSigs.");
4390 4389 }
4391 4390 // libjsig also interposes the sigaction() call below and saves the
4392 4391 // old sigaction on it own.
4393 4392 } else {
4394 4393 fatal(err_msg("Encountered unexpected pre-existing sigaction handler "
4395 4394 "%#lx for signal %d.", (long)oldhand, sig));
4396 4395 }
4397 4396 }
4398 4397
4399 4398 struct sigaction sigAct;
4400 4399 sigfillset(&(sigAct.sa_mask));
4401 4400 sigAct.sa_handler = SIG_DFL;
4402 4401
4403 4402 sigAct.sa_sigaction = signalHandler;
4404 4403 // Handle SIGSEGV on alternate signal stack if
4405 4404 // not using stack banging
4406 4405 if (!UseStackBanging && sig == SIGSEGV) {
4407 4406 sigAct.sa_flags = SA_SIGINFO | SA_RESTART | SA_ONSTACK;
4408 4407 // Interruptible i/o requires SA_RESTART cleared so EINTR
4409 4408 // is returned instead of restarting system calls
4410 4409 } else if (sig == os::Solaris::SIGinterrupt()) {
4411 4410 sigemptyset(&sigAct.sa_mask);
4412 4411 sigAct.sa_handler = NULL;
4413 4412 sigAct.sa_flags = SA_SIGINFO;
4414 4413 sigAct.sa_sigaction = sigINTRHandler;
4415 4414 } else {
4416 4415 sigAct.sa_flags = SA_SIGINFO | SA_RESTART;
4417 4416 }
4418 4417 os::Solaris::set_our_sigflags(sig, sigAct.sa_flags);
4419 4418
4420 4419 sigaction(sig, &sigAct, &oldAct);
4421 4420
4422 4421 void* oldhand2 = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
4423 4422 : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
4424 4423 assert(oldhand2 == oldhand, "no concurrent signal handler installation");
4425 4424 }
4426 4425
4427 4426
4428 4427 #define DO_SIGNAL_CHECK(sig) \
4429 4428 if (!sigismember(&check_signal_done, sig)) \
4430 4429 os::Solaris::check_signal_handler(sig)
4431 4430
4432 4431 // This method is a periodic task to check for misbehaving JNI applications
4433 4432 // under CheckJNI, we can add any periodic checks here
4434 4433
4435 4434 void os::run_periodic_checks() {
4436 4435 // A big source of grief is hijacking virt. addr 0x0 on Solaris,
4437 4436 // thereby preventing a NULL checks.
4438 4437 if(!check_addr0_done) check_addr0_done = check_addr0(tty);
4439 4438
4440 4439 if (check_signals == false) return;
4441 4440
4442 4441 // SEGV and BUS if overridden could potentially prevent
4443 4442 // generation of hs*.log in the event of a crash, debugging
4444 4443 // such a case can be very challenging, so we absolutely
4445 4444 // check for the following for a good measure:
4446 4445 DO_SIGNAL_CHECK(SIGSEGV);
4447 4446 DO_SIGNAL_CHECK(SIGILL);
4448 4447 DO_SIGNAL_CHECK(SIGFPE);
4449 4448 DO_SIGNAL_CHECK(SIGBUS);
4450 4449 DO_SIGNAL_CHECK(SIGPIPE);
4451 4450 DO_SIGNAL_CHECK(SIGXFSZ);
4452 4451
4453 4452 // ReduceSignalUsage allows the user to override these handlers
4454 4453 // see comments at the very top and jvm_solaris.h
4455 4454 if (!ReduceSignalUsage) {
4456 4455 DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL);
4457 4456 DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL);
4458 4457 DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL);
4459 4458 DO_SIGNAL_CHECK(BREAK_SIGNAL);
4460 4459 }
4461 4460
4462 4461 // See comments above for using JVM1/JVM2 and UseAltSigs
4463 4462 DO_SIGNAL_CHECK(os::Solaris::SIGinterrupt());
4464 4463 DO_SIGNAL_CHECK(os::Solaris::SIGasync());
4465 4464
4466 4465 }
4467 4466
4468 4467 typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);
4469 4468
4470 4469 static os_sigaction_t os_sigaction = NULL;
4471 4470
4472 4471 void os::Solaris::check_signal_handler(int sig) {
4473 4472 char buf[O_BUFLEN];
4474 4473 address jvmHandler = NULL;
4475 4474
4476 4475 struct sigaction act;
4477 4476 if (os_sigaction == NULL) {
4478 4477 // only trust the default sigaction, in case it has been interposed
4479 4478 os_sigaction = (os_sigaction_t)dlsym(RTLD_DEFAULT, "sigaction");
4480 4479 if (os_sigaction == NULL) return;
4481 4480 }
4482 4481
4483 4482 os_sigaction(sig, (struct sigaction*)NULL, &act);
4484 4483
4485 4484 address thisHandler = (act.sa_flags & SA_SIGINFO)
4486 4485 ? CAST_FROM_FN_PTR(address, act.sa_sigaction)
4487 4486 : CAST_FROM_FN_PTR(address, act.sa_handler) ;
4488 4487
4489 4488
4490 4489 switch(sig) {
4491 4490 case SIGSEGV:
4492 4491 case SIGBUS:
4493 4492 case SIGFPE:
4494 4493 case SIGPIPE:
4495 4494 case SIGXFSZ:
4496 4495 case SIGILL:
4497 4496 jvmHandler = CAST_FROM_FN_PTR(address, signalHandler);
4498 4497 break;
4499 4498
4500 4499 case SHUTDOWN1_SIGNAL:
4501 4500 case SHUTDOWN2_SIGNAL:
4502 4501 case SHUTDOWN3_SIGNAL:
4503 4502 case BREAK_SIGNAL:
4504 4503 jvmHandler = (address)user_handler();
4505 4504 break;
4506 4505
4507 4506 default:
4508 4507 int intrsig = os::Solaris::SIGinterrupt();
4509 4508 int asynsig = os::Solaris::SIGasync();
4510 4509
4511 4510 if (sig == intrsig) {
4512 4511 jvmHandler = CAST_FROM_FN_PTR(address, sigINTRHandler);
4513 4512 } else if (sig == asynsig) {
4514 4513 jvmHandler = CAST_FROM_FN_PTR(address, signalHandler);
4515 4514 } else {
4516 4515 return;
4517 4516 }
4518 4517 break;
4519 4518 }
4520 4519
4521 4520
4522 4521 if (thisHandler != jvmHandler) {
4523 4522 tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN));
4524 4523 tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN));
4525 4524 tty->print_cr(" found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
4526 4525 // No need to check this sig any longer
4527 4526 sigaddset(&check_signal_done, sig);
4528 4527 } else if(os::Solaris::get_our_sigflags(sig) != 0 && act.sa_flags != os::Solaris::get_our_sigflags(sig)) {
4529 4528 tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
4530 4529 tty->print("expected:" PTR32_FORMAT, os::Solaris::get_our_sigflags(sig));
4531 4530 tty->print_cr(" found:" PTR32_FORMAT, act.sa_flags);
4532 4531 // No need to check this sig any longer
4533 4532 sigaddset(&check_signal_done, sig);
4534 4533 }
4535 4534
4536 4535 // Print all the signal handler state
4537 4536 if (sigismember(&check_signal_done, sig)) {
4538 4537 print_signal_handlers(tty, buf, O_BUFLEN);
4539 4538 }
4540 4539
4541 4540 }
4542 4541
4543 4542 void os::Solaris::install_signal_handlers() {
4544 4543 bool libjsigdone = false;
4545 4544 signal_handlers_are_installed = true;
4546 4545
4547 4546 // signal-chaining
4548 4547 typedef void (*signal_setting_t)();
4549 4548 signal_setting_t begin_signal_setting = NULL;
4550 4549 signal_setting_t end_signal_setting = NULL;
4551 4550 begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
4552 4551 dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
4553 4552 if (begin_signal_setting != NULL) {
4554 4553 end_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
4555 4554 dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
4556 4555 get_signal_action = CAST_TO_FN_PTR(get_signal_t,
4557 4556 dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
4558 4557 get_libjsig_version = CAST_TO_FN_PTR(version_getting_t,
4559 4558 dlsym(RTLD_DEFAULT, "JVM_get_libjsig_version"));
4560 4559 libjsig_is_loaded = true;
4561 4560 if (os::Solaris::get_libjsig_version != NULL) {
4562 4561 libjsigversion = (*os::Solaris::get_libjsig_version)();
4563 4562 }
4564 4563 assert(UseSignalChaining, "should enable signal-chaining");
4565 4564 }
4566 4565 if (libjsig_is_loaded) {
4567 4566 // Tell libjsig jvm is setting signal handlers
4568 4567 (*begin_signal_setting)();
4569 4568 }
4570 4569
4571 4570 set_signal_handler(SIGSEGV, true, true);
4572 4571 set_signal_handler(SIGPIPE, true, true);
4573 4572 set_signal_handler(SIGXFSZ, true, true);
4574 4573 set_signal_handler(SIGBUS, true, true);
4575 4574 set_signal_handler(SIGILL, true, true);
4576 4575 set_signal_handler(SIGFPE, true, true);
4577 4576
4578 4577
4579 4578 if (os::Solaris::SIGinterrupt() > OLDMAXSIGNUM || os::Solaris::SIGasync() > OLDMAXSIGNUM) {
4580 4579
4581 4580 // Pre-1.4.1 Libjsig limited to signal chaining signals <= 32 so
4582 4581 // can not register overridable signals which might be > 32
4583 4582 if (libjsig_is_loaded && libjsigversion <= JSIG_VERSION_1_4_1) {
4584 4583 // Tell libjsig jvm has finished setting signal handlers
4585 4584 (*end_signal_setting)();
4586 4585 libjsigdone = true;
4587 4586 }
4588 4587 }
4589 4588
4590 4589 // Never ok to chain our SIGinterrupt
4591 4590 set_signal_handler(os::Solaris::SIGinterrupt(), true, false);
4592 4591 set_signal_handler(os::Solaris::SIGasync(), true, true);
4593 4592
4594 4593 if (libjsig_is_loaded && !libjsigdone) {
4595 4594 // Tell libjsig jvm finishes setting signal handlers
4596 4595 (*end_signal_setting)();
4597 4596 }
4598 4597
4599 4598 // We don't activate signal checker if libjsig is in place, we trust ourselves
4600 4599 // and if UserSignalHandler is installed all bets are off.
4601 4600 // Log that signal checking is off only if -verbose:jni is specified.
4602 4601 if (CheckJNICalls) {
4603 4602 if (libjsig_is_loaded) {
4604 4603 if (PrintJNIResolving) {
4605 4604 tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
4606 4605 }
4607 4606 check_signals = false;
4608 4607 }
4609 4608 if (AllowUserSignalHandlers) {
4610 4609 if (PrintJNIResolving) {
4611 4610 tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
4612 4611 }
4613 4612 check_signals = false;
4614 4613 }
4615 4614 }
4616 4615 }
4617 4616
4618 4617
4619 4618 void report_error(const char* file_name, int line_no, const char* title, const char* format, ...);
4620 4619
4621 4620 const char * signames[] = {
4622 4621 "SIG0",
4623 4622 "SIGHUP", "SIGINT", "SIGQUIT", "SIGILL", "SIGTRAP",
4624 4623 "SIGABRT", "SIGEMT", "SIGFPE", "SIGKILL", "SIGBUS",
4625 4624 "SIGSEGV", "SIGSYS", "SIGPIPE", "SIGALRM", "SIGTERM",
4626 4625 "SIGUSR1", "SIGUSR2", "SIGCLD", "SIGPWR", "SIGWINCH",
4627 4626 "SIGURG", "SIGPOLL", "SIGSTOP", "SIGTSTP", "SIGCONT",
4628 4627 "SIGTTIN", "SIGTTOU", "SIGVTALRM", "SIGPROF", "SIGXCPU",
4629 4628 "SIGXFSZ", "SIGWAITING", "SIGLWP", "SIGFREEZE", "SIGTHAW",
4630 4629 "SIGCANCEL", "SIGLOST"
4631 4630 };
4632 4631
4633 4632 const char* os::exception_name(int exception_code, char* buf, size_t size) {
4634 4633 if (0 < exception_code && exception_code <= SIGRTMAX) {
4635 4634 // signal
4636 4635 if (exception_code < sizeof(signames)/sizeof(const char*)) {
4637 4636 jio_snprintf(buf, size, "%s", signames[exception_code]);
4638 4637 } else {
4639 4638 jio_snprintf(buf, size, "SIG%d", exception_code);
4640 4639 }
4641 4640 return buf;
4642 4641 } else {
4643 4642 return NULL;
4644 4643 }
4645 4644 }
4646 4645
4647 4646 // (Static) wrappers for the new libthread API
4648 4647 int_fnP_thread_t_iP_uP_stack_tP_gregset_t os::Solaris::_thr_getstate;
4649 4648 int_fnP_thread_t_i_gregset_t os::Solaris::_thr_setstate;
4650 4649 int_fnP_thread_t_i os::Solaris::_thr_setmutator;
4651 4650 int_fnP_thread_t os::Solaris::_thr_suspend_mutator;
4652 4651 int_fnP_thread_t os::Solaris::_thr_continue_mutator;
4653 4652
4654 4653 // (Static) wrapper for getisax(2) call.
4655 4654 os::Solaris::getisax_func_t os::Solaris::_getisax = 0;
4656 4655
4657 4656 // (Static) wrappers for the liblgrp API
4658 4657 os::Solaris::lgrp_home_func_t os::Solaris::_lgrp_home;
4659 4658 os::Solaris::lgrp_init_func_t os::Solaris::_lgrp_init;
4660 4659 os::Solaris::lgrp_fini_func_t os::Solaris::_lgrp_fini;
4661 4660 os::Solaris::lgrp_root_func_t os::Solaris::_lgrp_root;
4662 4661 os::Solaris::lgrp_children_func_t os::Solaris::_lgrp_children;
4663 4662 os::Solaris::lgrp_resources_func_t os::Solaris::_lgrp_resources;
4664 4663 os::Solaris::lgrp_nlgrps_func_t os::Solaris::_lgrp_nlgrps;
4665 4664 os::Solaris::lgrp_cookie_stale_func_t os::Solaris::_lgrp_cookie_stale;
4666 4665 os::Solaris::lgrp_cookie_t os::Solaris::_lgrp_cookie = 0;
4667 4666
4668 4667 // (Static) wrapper for meminfo() call.
4669 4668 os::Solaris::meminfo_func_t os::Solaris::_meminfo = 0;
4670 4669
4671 4670 static address resolve_symbol_lazy(const char* name) {
4672 4671 address addr = (address) dlsym(RTLD_DEFAULT, name);
4673 4672 if(addr == NULL) {
4674 4673 // RTLD_DEFAULT was not defined on some early versions of 2.5.1
4675 4674 addr = (address) dlsym(RTLD_NEXT, name);
4676 4675 }
4677 4676 return addr;
4678 4677 }
4679 4678
4680 4679 static address resolve_symbol(const char* name) {
4681 4680 address addr = resolve_symbol_lazy(name);
4682 4681 if(addr == NULL) {
4683 4682 fatal(dlerror());
4684 4683 }
4685 4684 return addr;
4686 4685 }
4687 4686
4688 4687
4689 4688
4690 4689 // isT2_libthread()
4691 4690 //
4692 4691 // Routine to determine if we are currently using the new T2 libthread.
4693 4692 //
4694 4693 // We determine if we are using T2 by reading /proc/self/lstatus and
4695 4694 // looking for a thread with the ASLWP bit set. If we find this status
4696 4695 // bit set, we must assume that we are NOT using T2. The T2 team
4697 4696 // has approved this algorithm.
4698 4697 //
4699 4698 // We need to determine if we are running with the new T2 libthread
4700 4699 // since setting native thread priorities is handled differently
4701 4700 // when using this library. All threads created using T2 are bound
4702 4701 // threads. Calling thr_setprio is meaningless in this case.
4703 4702 //
4704 4703 bool isT2_libthread() {
4705 4704 static prheader_t * lwpArray = NULL;
4706 4705 static int lwpSize = 0;
4707 4706 static int lwpFile = -1;
4708 4707 lwpstatus_t * that;
4709 4708 char lwpName [128];
4710 4709 bool isT2 = false;
4711 4710
4712 4711 #define ADR(x) ((uintptr_t)(x))
4713 4712 #define LWPINDEX(ary,ix) ((lwpstatus_t *)(((ary)->pr_entsize * (ix)) + (ADR((ary) + 1))))
4714 4713
4715 4714 lwpFile = ::open("/proc/self/lstatus", O_RDONLY, 0);
4716 4715 if (lwpFile < 0) {
4717 4716 if (ThreadPriorityVerbose) warning ("Couldn't open /proc/self/lstatus\n");
4718 4717 return false;
4719 4718 }
4720 4719 lwpSize = 16*1024;
4721 4720 for (;;) {
4722 4721 ::lseek64 (lwpFile, 0, SEEK_SET);
4723 4722 lwpArray = (prheader_t *)NEW_C_HEAP_ARRAY(char, lwpSize);
4724 4723 if (::read(lwpFile, lwpArray, lwpSize) < 0) {
4725 4724 if (ThreadPriorityVerbose) warning("Error reading /proc/self/lstatus\n");
4726 4725 break;
4727 4726 }
4728 4727 if ((lwpArray->pr_nent * lwpArray->pr_entsize) <= lwpSize) {
4729 4728 // We got a good snapshot - now iterate over the list.
4730 4729 int aslwpcount = 0;
4731 4730 for (int i = 0; i < lwpArray->pr_nent; i++ ) {
4732 4731 that = LWPINDEX(lwpArray,i);
4733 4732 if (that->pr_flags & PR_ASLWP) {
4734 4733 aslwpcount++;
4735 4734 }
4736 4735 }
4737 4736 if (aslwpcount == 0) isT2 = true;
4738 4737 break;
4739 4738 }
4740 4739 lwpSize = lwpArray->pr_nent * lwpArray->pr_entsize;
4741 4740 FREE_C_HEAP_ARRAY(char, lwpArray); // retry.
4742 4741 }
4743 4742
4744 4743 FREE_C_HEAP_ARRAY(char, lwpArray);
4745 4744 ::close (lwpFile);
4746 4745 if (ThreadPriorityVerbose) {
4747 4746 if (isT2) tty->print_cr("We are running with a T2 libthread\n");
4748 4747 else tty->print_cr("We are not running with a T2 libthread\n");
4749 4748 }
4750 4749 return isT2;
4751 4750 }
4752 4751
4753 4752
4754 4753 void os::Solaris::libthread_init() {
4755 4754 address func = (address)dlsym(RTLD_DEFAULT, "_thr_suspend_allmutators");
4756 4755
4757 4756 // Determine if we are running with the new T2 libthread
4758 4757 os::Solaris::set_T2_libthread(isT2_libthread());
4759 4758
4760 4759 lwp_priocntl_init();
4761 4760
4762 4761 // RTLD_DEFAULT was not defined on some early versions of 5.5.1
4763 4762 if(func == NULL) {
4764 4763 func = (address) dlsym(RTLD_NEXT, "_thr_suspend_allmutators");
4765 4764 // Guarantee that this VM is running on an new enough OS (5.6 or
4766 4765 // later) that it will have a new enough libthread.so.
4767 4766 guarantee(func != NULL, "libthread.so is too old.");
4768 4767 }
4769 4768
4770 4769 // Initialize the new libthread getstate API wrappers
4771 4770 func = resolve_symbol("thr_getstate");
4772 4771 os::Solaris::set_thr_getstate(CAST_TO_FN_PTR(int_fnP_thread_t_iP_uP_stack_tP_gregset_t, func));
4773 4772
4774 4773 func = resolve_symbol("thr_setstate");
4775 4774 os::Solaris::set_thr_setstate(CAST_TO_FN_PTR(int_fnP_thread_t_i_gregset_t, func));
4776 4775
4777 4776 func = resolve_symbol("thr_setmutator");
4778 4777 os::Solaris::set_thr_setmutator(CAST_TO_FN_PTR(int_fnP_thread_t_i, func));
4779 4778
4780 4779 func = resolve_symbol("thr_suspend_mutator");
4781 4780 os::Solaris::set_thr_suspend_mutator(CAST_TO_FN_PTR(int_fnP_thread_t, func));
4782 4781
4783 4782 func = resolve_symbol("thr_continue_mutator");
4784 4783 os::Solaris::set_thr_continue_mutator(CAST_TO_FN_PTR(int_fnP_thread_t, func));
4785 4784
4786 4785 int size;
4787 4786 void (*handler_info_func)(address *, int *);
4788 4787 handler_info_func = CAST_TO_FN_PTR(void (*)(address *, int *), resolve_symbol("thr_sighndlrinfo"));
4789 4788 handler_info_func(&handler_start, &size);
4790 4789 handler_end = handler_start + size;
4791 4790 }
4792 4791
4793 4792
4794 4793 int_fnP_mutex_tP os::Solaris::_mutex_lock;
4795 4794 int_fnP_mutex_tP os::Solaris::_mutex_trylock;
4796 4795 int_fnP_mutex_tP os::Solaris::_mutex_unlock;
4797 4796 int_fnP_mutex_tP_i_vP os::Solaris::_mutex_init;
4798 4797 int_fnP_mutex_tP os::Solaris::_mutex_destroy;
4799 4798 int os::Solaris::_mutex_scope = USYNC_THREAD;
4800 4799
4801 4800 int_fnP_cond_tP_mutex_tP_timestruc_tP os::Solaris::_cond_timedwait;
4802 4801 int_fnP_cond_tP_mutex_tP os::Solaris::_cond_wait;
4803 4802 int_fnP_cond_tP os::Solaris::_cond_signal;
4804 4803 int_fnP_cond_tP os::Solaris::_cond_broadcast;
4805 4804 int_fnP_cond_tP_i_vP os::Solaris::_cond_init;
4806 4805 int_fnP_cond_tP os::Solaris::_cond_destroy;
4807 4806 int os::Solaris::_cond_scope = USYNC_THREAD;
4808 4807
4809 4808 void os::Solaris::synchronization_init() {
4810 4809 if(UseLWPSynchronization) {
4811 4810 os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_lock")));
4812 4811 os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_trylock")));
4813 4812 os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_unlock")));
4814 4813 os::Solaris::set_mutex_init(lwp_mutex_init);
4815 4814 os::Solaris::set_mutex_destroy(lwp_mutex_destroy);
4816 4815 os::Solaris::set_mutex_scope(USYNC_THREAD);
4817 4816
4818 4817 os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("_lwp_cond_timedwait")));
4819 4818 os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("_lwp_cond_wait")));
4820 4819 os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_signal")));
4821 4820 os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_broadcast")));
4822 4821 os::Solaris::set_cond_init(lwp_cond_init);
4823 4822 os::Solaris::set_cond_destroy(lwp_cond_destroy);
4824 4823 os::Solaris::set_cond_scope(USYNC_THREAD);
4825 4824 }
4826 4825 else {
4827 4826 os::Solaris::set_mutex_scope(USYNC_THREAD);
4828 4827 os::Solaris::set_cond_scope(USYNC_THREAD);
4829 4828
4830 4829 if(UsePthreads) {
4831 4830 os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_lock")));
4832 4831 os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_trylock")));
4833 4832 os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_unlock")));
4834 4833 os::Solaris::set_mutex_init(pthread_mutex_default_init);
4835 4834 os::Solaris::set_mutex_destroy(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_destroy")));
4836 4835
4837 4836 os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("pthread_cond_timedwait")));
4838 4837 os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("pthread_cond_wait")));
4839 4838 os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_signal")));
4840 4839 os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_broadcast")));
4841 4840 os::Solaris::set_cond_init(pthread_cond_default_init);
4842 4841 os::Solaris::set_cond_destroy(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_destroy")));
4843 4842 }
4844 4843 else {
4845 4844 os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_lock")));
4846 4845 os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_trylock")));
4847 4846 os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_unlock")));
4848 4847 os::Solaris::set_mutex_init(::mutex_init);
4849 4848 os::Solaris::set_mutex_destroy(::mutex_destroy);
4850 4849
4851 4850 os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("cond_timedwait")));
4852 4851 os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("cond_wait")));
4853 4852 os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_signal")));
4854 4853 os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_broadcast")));
4855 4854 os::Solaris::set_cond_init(::cond_init);
4856 4855 os::Solaris::set_cond_destroy(::cond_destroy);
4857 4856 }
4858 4857 }
4859 4858 }
4860 4859
4861 4860 bool os::Solaris::liblgrp_init() {
4862 4861 void *handle = dlopen("liblgrp.so.1", RTLD_LAZY);
4863 4862 if (handle != NULL) {
4864 4863 os::Solaris::set_lgrp_home(CAST_TO_FN_PTR(lgrp_home_func_t, dlsym(handle, "lgrp_home")));
4865 4864 os::Solaris::set_lgrp_init(CAST_TO_FN_PTR(lgrp_init_func_t, dlsym(handle, "lgrp_init")));
4866 4865 os::Solaris::set_lgrp_fini(CAST_TO_FN_PTR(lgrp_fini_func_t, dlsym(handle, "lgrp_fini")));
4867 4866 os::Solaris::set_lgrp_root(CAST_TO_FN_PTR(lgrp_root_func_t, dlsym(handle, "lgrp_root")));
4868 4867 os::Solaris::set_lgrp_children(CAST_TO_FN_PTR(lgrp_children_func_t, dlsym(handle, "lgrp_children")));
4869 4868 os::Solaris::set_lgrp_resources(CAST_TO_FN_PTR(lgrp_resources_func_t, dlsym(handle, "lgrp_resources")));
4870 4869 os::Solaris::set_lgrp_nlgrps(CAST_TO_FN_PTR(lgrp_nlgrps_func_t, dlsym(handle, "lgrp_nlgrps")));
4871 4870 os::Solaris::set_lgrp_cookie_stale(CAST_TO_FN_PTR(lgrp_cookie_stale_func_t,
4872 4871 dlsym(handle, "lgrp_cookie_stale")));
4873 4872
4874 4873 lgrp_cookie_t c = lgrp_init(LGRP_VIEW_CALLER);
4875 4874 set_lgrp_cookie(c);
4876 4875 return true;
4877 4876 }
4878 4877 return false;
4879 4878 }
4880 4879
4881 4880 void os::Solaris::misc_sym_init() {
4882 4881 address func;
4883 4882
4884 4883 // getisax
4885 4884 func = resolve_symbol_lazy("getisax");
4886 4885 if (func != NULL) {
4887 4886 os::Solaris::_getisax = CAST_TO_FN_PTR(getisax_func_t, func);
4888 4887 }
4889 4888
4890 4889 // meminfo
4891 4890 func = resolve_symbol_lazy("meminfo");
4892 4891 if (func != NULL) {
4893 4892 os::Solaris::set_meminfo(CAST_TO_FN_PTR(meminfo_func_t, func));
4894 4893 }
4895 4894 }
4896 4895
4897 4896 uint_t os::Solaris::getisax(uint32_t* array, uint_t n) {
4898 4897 assert(_getisax != NULL, "_getisax not set");
4899 4898 return _getisax(array, n);
4900 4899 }
4901 4900
4902 4901 // Symbol doesn't exist in Solaris 8 pset.h
4903 4902 #ifndef PS_MYID
4904 4903 #define PS_MYID -3
4905 4904 #endif
4906 4905
4907 4906 // int pset_getloadavg(psetid_t pset, double loadavg[], int nelem);
4908 4907 typedef long (*pset_getloadavg_type)(psetid_t pset, double loadavg[], int nelem);
4909 4908 static pset_getloadavg_type pset_getloadavg_ptr = NULL;
4910 4909
4911 4910 void init_pset_getloadavg_ptr(void) {
4912 4911 pset_getloadavg_ptr =
4913 4912 (pset_getloadavg_type)dlsym(RTLD_DEFAULT, "pset_getloadavg");
4914 4913 if (PrintMiscellaneous && Verbose && pset_getloadavg_ptr == NULL) {
4915 4914 warning("pset_getloadavg function not found");
4916 4915 }
4917 4916 }
4918 4917
4919 4918 int os::Solaris::_dev_zero_fd = -1;
4920 4919
4921 4920 // this is called _before_ the global arguments have been parsed
4922 4921 void os::init(void) {
4923 4922 _initial_pid = getpid();
4924 4923
4925 4924 max_hrtime = first_hrtime = gethrtime();
4926 4925
4927 4926 init_random(1234567);
4928 4927
4929 4928 page_size = sysconf(_SC_PAGESIZE);
4930 4929 if (page_size == -1)
4931 4930 fatal(err_msg("os_solaris.cpp: os::init: sysconf failed (%s)",
4932 4931 strerror(errno)));
4933 4932 init_page_sizes((size_t) page_size);
4934 4933
4935 4934 Solaris::initialize_system_info();
4936 4935
4937 4936 // Initialize misc. symbols as soon as possible, so we can use them
4938 4937 // if we need them.
4939 4938 Solaris::misc_sym_init();
4940 4939
4941 4940 int fd = ::open("/dev/zero", O_RDWR);
4942 4941 if (fd < 0) {
4943 4942 fatal(err_msg("os::init: cannot open /dev/zero (%s)", strerror(errno)));
4944 4943 } else {
4945 4944 Solaris::set_dev_zero_fd(fd);
4946 4945
4947 4946 // Close on exec, child won't inherit.
4948 4947 fcntl(fd, F_SETFD, FD_CLOEXEC);
4949 4948 }
4950 4949
4951 4950 clock_tics_per_sec = CLK_TCK;
4952 4951
4953 4952 // check if dladdr1() exists; dladdr1 can provide more information than
4954 4953 // dladdr for os::dll_address_to_function_name. It comes with SunOS 5.9
4955 4954 // and is available on linker patches for 5.7 and 5.8.
4956 4955 // libdl.so must have been loaded, this call is just an entry lookup
4957 4956 void * hdl = dlopen("libdl.so", RTLD_NOW);
4958 4957 if (hdl)
4959 4958 dladdr1_func = CAST_TO_FN_PTR(dladdr1_func_type, dlsym(hdl, "dladdr1"));
4960 4959
4961 4960 // (Solaris only) this switches to calls that actually do locking.
4962 4961 ThreadCritical::initialize();
4963 4962
4964 4963 main_thread = thr_self();
4965 4964
4966 4965 // Constant minimum stack size allowed. It must be at least
4967 4966 // the minimum of what the OS supports (thr_min_stack()), and
4968 4967 // enough to allow the thread to get to user bytecode execution.
4969 4968 Solaris::min_stack_allowed = MAX2(thr_min_stack(), Solaris::min_stack_allowed);
4970 4969 // If the pagesize of the VM is greater than 8K determine the appropriate
4971 4970 // number of initial guard pages. The user can change this with the
4972 4971 // command line arguments, if needed.
4973 4972 if (vm_page_size() > 8*K) {
4974 4973 StackYellowPages = 1;
4975 4974 StackRedPages = 1;
4976 4975 StackShadowPages = round_to((StackShadowPages*8*K), vm_page_size()) / vm_page_size();
4977 4976 }
4978 4977 }
4979 4978
4980 4979 // To install functions for atexit system call
4981 4980 extern "C" {
4982 4981 static void perfMemory_exit_helper() {
4983 4982 perfMemory_exit();
4984 4983 }
4985 4984 }
4986 4985
4987 4986 // this is called _after_ the global arguments have been parsed
4988 4987 jint os::init_2(void) {
4989 4988 // try to enable extended file IO ASAP, see 6431278
4990 4989 os::Solaris::try_enable_extended_io();
4991 4990
4992 4991 // Allocate a single page and mark it as readable for safepoint polling. Also
4993 4992 // use this first mmap call to check support for MAP_ALIGN.
4994 4993 address polling_page = (address)Solaris::mmap_chunk((char*)page_size,
4995 4994 page_size,
4996 4995 MAP_PRIVATE | MAP_ALIGN,
4997 4996 PROT_READ);
4998 4997 if (polling_page == NULL) {
4999 4998 has_map_align = false;
5000 4999 polling_page = (address)Solaris::mmap_chunk(NULL, page_size, MAP_PRIVATE,
5001 5000 PROT_READ);
5002 5001 }
5003 5002
5004 5003 os::set_polling_page(polling_page);
5005 5004
5006 5005 #ifndef PRODUCT
5007 5006 if( Verbose && PrintMiscellaneous )
5008 5007 tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n", (intptr_t)polling_page);
5009 5008 #endif
5010 5009
5011 5010 if (!UseMembar) {
5012 5011 address mem_serialize_page = (address)Solaris::mmap_chunk( NULL, page_size, MAP_PRIVATE, PROT_READ | PROT_WRITE );
5013 5012 guarantee( mem_serialize_page != NULL, "mmap Failed for memory serialize page");
5014 5013 os::set_memory_serialize_page( mem_serialize_page );
5015 5014
5016 5015 #ifndef PRODUCT
5017 5016 if(Verbose && PrintMiscellaneous)
5018 5017 tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
5019 5018 #endif
5020 5019 }
5021 5020
5022 5021 os::large_page_init();
5023 5022
5024 5023 // Check minimum allowable stack size for thread creation and to initialize
5025 5024 // the java system classes, including StackOverflowError - depends on page
5026 5025 // size. Add a page for compiler2 recursion in main thread.
5027 5026 // Add in 2*BytesPerWord times page size to account for VM stack during
5028 5027 // class initialization depending on 32 or 64 bit VM.
5029 5028 os::Solaris::min_stack_allowed = MAX2(os::Solaris::min_stack_allowed,
5030 5029 (size_t)(StackYellowPages+StackRedPages+StackShadowPages+
5031 5030 2*BytesPerWord COMPILER2_PRESENT(+1)) * page_size);
5032 5031
5033 5032 size_t threadStackSizeInBytes = ThreadStackSize * K;
5034 5033 if (threadStackSizeInBytes != 0 &&
5035 5034 threadStackSizeInBytes < os::Solaris::min_stack_allowed) {
5036 5035 tty->print_cr("\nThe stack size specified is too small, Specify at least %dk",
5037 5036 os::Solaris::min_stack_allowed/K);
5038 5037 return JNI_ERR;
5039 5038 }
5040 5039
5041 5040 // For 64kbps there will be a 64kb page size, which makes
5042 5041 // the usable default stack size quite a bit less. Increase the
5043 5042 // stack for 64kb (or any > than 8kb) pages, this increases
5044 5043 // virtual memory fragmentation (since we're not creating the
5045 5044 // stack on a power of 2 boundary. The real fix for this
5046 5045 // should be to fix the guard page mechanism.
5047 5046
5048 5047 if (vm_page_size() > 8*K) {
5049 5048 threadStackSizeInBytes = (threadStackSizeInBytes != 0)
5050 5049 ? threadStackSizeInBytes +
5051 5050 ((StackYellowPages + StackRedPages) * vm_page_size())
5052 5051 : 0;
5053 5052 ThreadStackSize = threadStackSizeInBytes/K;
5054 5053 }
5055 5054
5056 5055 // Make the stack size a multiple of the page size so that
5057 5056 // the yellow/red zones can be guarded.
5058 5057 JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes,
5059 5058 vm_page_size()));
5060 5059
5061 5060 Solaris::libthread_init();
5062 5061
5063 5062 if (UseNUMA) {
5064 5063 if (!Solaris::liblgrp_init()) {
5065 5064 UseNUMA = false;
5066 5065 } else {
5067 5066 size_t lgrp_limit = os::numa_get_groups_num();
5068 5067 int *lgrp_ids = NEW_C_HEAP_ARRAY(int, lgrp_limit);
5069 5068 size_t lgrp_num = os::numa_get_leaf_groups(lgrp_ids, lgrp_limit);
5070 5069 FREE_C_HEAP_ARRAY(int, lgrp_ids);
5071 5070 if (lgrp_num < 2) {
5072 5071 // There's only one locality group, disable NUMA.
5073 5072 UseNUMA = false;
5074 5073 }
5075 5074 }
5076 5075 // ISM is not compatible with the NUMA allocator - it always allocates
5077 5076 // pages round-robin across the lgroups.
5078 5077 if (UseNUMA && UseLargePages && UseISM) {
5079 5078 if (!FLAG_IS_DEFAULT(UseNUMA)) {
5080 5079 if (FLAG_IS_DEFAULT(UseLargePages) && FLAG_IS_DEFAULT(UseISM)) {
5081 5080 UseLargePages = false;
5082 5081 } else {
5083 5082 warning("UseNUMA is not compatible with ISM large pages, disabling NUMA allocator");
5084 5083 UseNUMA = false;
5085 5084 }
5086 5085 } else {
5087 5086 UseNUMA = false;
5088 5087 }
5089 5088 }
5090 5089 if (!UseNUMA && ForceNUMA) {
5091 5090 UseNUMA = true;
5092 5091 }
5093 5092 }
5094 5093
5095 5094 Solaris::signal_sets_init();
5096 5095 Solaris::init_signal_mem();
5097 5096 Solaris::install_signal_handlers();
5098 5097
5099 5098 if (libjsigversion < JSIG_VERSION_1_4_1) {
5100 5099 Maxlibjsigsigs = OLDMAXSIGNUM;
5101 5100 }
5102 5101
5103 5102 // initialize synchronization primitives to use either thread or
5104 5103 // lwp synchronization (controlled by UseLWPSynchronization)
5105 5104 Solaris::synchronization_init();
5106 5105
5107 5106 if (MaxFDLimit) {
5108 5107 // set the number of file descriptors to max. print out error
5109 5108 // if getrlimit/setrlimit fails but continue regardless.
5110 5109 struct rlimit nbr_files;
5111 5110 int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
5112 5111 if (status != 0) {
5113 5112 if (PrintMiscellaneous && (Verbose || WizardMode))
5114 5113 perror("os::init_2 getrlimit failed");
5115 5114 } else {
5116 5115 nbr_files.rlim_cur = nbr_files.rlim_max;
5117 5116 status = setrlimit(RLIMIT_NOFILE, &nbr_files);
5118 5117 if (status != 0) {
5119 5118 if (PrintMiscellaneous && (Verbose || WizardMode))
5120 5119 perror("os::init_2 setrlimit failed");
5121 5120 }
5122 5121 }
5123 5122 }
5124 5123
5125 5124 // Calculate theoretical max. size of Threads to guard gainst
5126 5125 // artifical out-of-memory situations, where all available address-
5127 5126 // space has been reserved by thread stacks. Default stack size is 1Mb.
5128 5127 size_t pre_thread_stack_size = (JavaThread::stack_size_at_create()) ?
5129 5128 JavaThread::stack_size_at_create() : (1*K*K);
5130 5129 assert(pre_thread_stack_size != 0, "Must have a stack");
5131 5130 // Solaris has a maximum of 4Gb of user programs. Calculate the thread limit when
5132 5131 // we should start doing Virtual Memory banging. Currently when the threads will
5133 5132 // have used all but 200Mb of space.
5134 5133 size_t max_address_space = ((unsigned int)4 * K * K * K) - (200 * K * K);
5135 5134 Solaris::_os_thread_limit = max_address_space / pre_thread_stack_size;
5136 5135
5137 5136 // at-exit methods are called in the reverse order of their registration.
5138 5137 // In Solaris 7 and earlier, atexit functions are called on return from
5139 5138 // main or as a result of a call to exit(3C). There can be only 32 of
5140 5139 // these functions registered and atexit() does not set errno. In Solaris
5141 5140 // 8 and later, there is no limit to the number of functions registered
5142 5141 // and atexit() sets errno. In addition, in Solaris 8 and later, atexit
5143 5142 // functions are called upon dlclose(3DL) in addition to return from main
5144 5143 // and exit(3C).
5145 5144
5146 5145 if (PerfAllowAtExitRegistration) {
5147 5146 // only register atexit functions if PerfAllowAtExitRegistration is set.
5148 5147 // atexit functions can be delayed until process exit time, which
5149 5148 // can be problematic for embedded VM situations. Embedded VMs should
5150 5149 // call DestroyJavaVM() to assure that VM resources are released.
5151 5150
5152 5151 // note: perfMemory_exit_helper atexit function may be removed in
5153 5152 // the future if the appropriate cleanup code can be added to the
5154 5153 // VM_Exit VMOperation's doit method.
5155 5154 if (atexit(perfMemory_exit_helper) != 0) {
5156 5155 warning("os::init2 atexit(perfMemory_exit_helper) failed");
5157 5156 }
5158 5157 }
5159 5158
5160 5159 // Init pset_loadavg function pointer
5161 5160 init_pset_getloadavg_ptr();
5162 5161
5163 5162 return JNI_OK;
5164 5163 }
5165 5164
5166 5165 void os::init_3(void) {
5167 5166 return;
5168 5167 }
5169 5168
5170 5169 // Mark the polling page as unreadable
5171 5170 void os::make_polling_page_unreadable(void) {
5172 5171 if( mprotect((char *)_polling_page, page_size, PROT_NONE) != 0 )
5173 5172 fatal("Could not disable polling page");
5174 5173 };
5175 5174
5176 5175 // Mark the polling page as readable
5177 5176 void os::make_polling_page_readable(void) {
5178 5177 if( mprotect((char *)_polling_page, page_size, PROT_READ) != 0 )
5179 5178 fatal("Could not enable polling page");
5180 5179 };
5181 5180
5182 5181 // OS interface.
5183 5182
5184 5183 bool os::check_heap(bool force) { return true; }
5185 5184
5186 5185 typedef int (*vsnprintf_t)(char* buf, size_t count, const char* fmt, va_list argptr);
5187 5186 static vsnprintf_t sol_vsnprintf = NULL;
5188 5187
5189 5188 int local_vsnprintf(char* buf, size_t count, const char* fmt, va_list argptr) {
5190 5189 if (!sol_vsnprintf) {
5191 5190 //search for the named symbol in the objects that were loaded after libjvm
5192 5191 void* where = RTLD_NEXT;
5193 5192 if ((sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "__vsnprintf"))) == NULL)
5194 5193 sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "vsnprintf"));
5195 5194 if (!sol_vsnprintf){
5196 5195 //search for the named symbol in the objects that were loaded before libjvm
5197 5196 where = RTLD_DEFAULT;
5198 5197 if ((sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "__vsnprintf"))) == NULL)
5199 5198 sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "vsnprintf"));
5200 5199 assert(sol_vsnprintf != NULL, "vsnprintf not found");
5201 5200 }
5202 5201 }
5203 5202 return (*sol_vsnprintf)(buf, count, fmt, argptr);
5204 5203 }
5205 5204
5206 5205
5207 5206 // Is a (classpath) directory empty?
5208 5207 bool os::dir_is_empty(const char* path) {
5209 5208 DIR *dir = NULL;
5210 5209 struct dirent *ptr;
5211 5210
5212 5211 dir = opendir(path);
5213 5212 if (dir == NULL) return true;
5214 5213
5215 5214 /* Scan the directory */
5216 5215 bool result = true;
5217 5216 char buf[sizeof(struct dirent) + MAX_PATH];
5218 5217 struct dirent *dbuf = (struct dirent *) buf;
5219 5218 while (result && (ptr = readdir(dir, dbuf)) != NULL) {
5220 5219 if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
5221 5220 result = false;
5222 5221 }
5223 5222 }
5224 5223 closedir(dir);
5225 5224 return result;
5226 5225 }
5227 5226
5228 5227 // This code originates from JDK's sysOpen and open64_w
5229 5228 // from src/solaris/hpi/src/system_md.c
5230 5229
5231 5230 #ifndef O_DELETE
5232 5231 #define O_DELETE 0x10000
5233 5232 #endif
5234 5233
5235 5234 // Open a file. Unlink the file immediately after open returns
5236 5235 // if the specified oflag has the O_DELETE flag set.
5237 5236 // O_DELETE is used only in j2se/src/share/native/java/util/zip/ZipFile.c
5238 5237
5239 5238 int os::open(const char *path, int oflag, int mode) {
5240 5239 if (strlen(path) > MAX_PATH - 1) {
5241 5240 errno = ENAMETOOLONG;
5242 5241 return -1;
5243 5242 }
5244 5243 int fd;
5245 5244 int o_delete = (oflag & O_DELETE);
5246 5245 oflag = oflag & ~O_DELETE;
5247 5246
5248 5247 fd = ::open64(path, oflag, mode);
5249 5248 if (fd == -1) return -1;
5250 5249
5251 5250 //If the open succeeded, the file might still be a directory
5252 5251 {
5253 5252 struct stat64 buf64;
5254 5253 int ret = ::fstat64(fd, &buf64);
5255 5254 int st_mode = buf64.st_mode;
5256 5255
5257 5256 if (ret != -1) {
5258 5257 if ((st_mode & S_IFMT) == S_IFDIR) {
5259 5258 errno = EISDIR;
5260 5259 ::close(fd);
5261 5260 return -1;
5262 5261 }
5263 5262 } else {
5264 5263 ::close(fd);
5265 5264 return -1;
5266 5265 }
5267 5266 }
5268 5267 /*
5269 5268 * 32-bit Solaris systems suffer from:
5270 5269 *
5271 5270 * - an historical default soft limit of 256 per-process file
5272 5271 * descriptors that is too low for many Java programs.
5273 5272 *
5274 5273 * - a design flaw where file descriptors created using stdio
5275 5274 * fopen must be less than 256, _even_ when the first limit above
5276 5275 * has been raised. This can cause calls to fopen (but not calls to
5277 5276 * open, for example) to fail mysteriously, perhaps in 3rd party
5278 5277 * native code (although the JDK itself uses fopen). One can hardly
5279 5278 * criticize them for using this most standard of all functions.
5280 5279 *
5281 5280 * We attempt to make everything work anyways by:
5282 5281 *
5283 5282 * - raising the soft limit on per-process file descriptors beyond
5284 5283 * 256
5285 5284 *
5286 5285 * - As of Solaris 10u4, we can request that Solaris raise the 256
5287 5286 * stdio fopen limit by calling function enable_extended_FILE_stdio.
5288 5287 * This is done in init_2 and recorded in enabled_extended_FILE_stdio
5289 5288 *
5290 5289 * - If we are stuck on an old (pre 10u4) Solaris system, we can
5291 5290 * workaround the bug by remapping non-stdio file descriptors below
5292 5291 * 256 to ones beyond 256, which is done below.
5293 5292 *
5294 5293 * See:
5295 5294 * 1085341: 32-bit stdio routines should support file descriptors >255
5296 5295 * 6533291: Work around 32-bit Solaris stdio limit of 256 open files
5297 5296 * 6431278: Netbeans crash on 32 bit Solaris: need to call
5298 5297 * enable_extended_FILE_stdio() in VM initialisation
5299 5298 * Giri Mandalika's blog
5300 5299 * http://technopark02.blogspot.com/2005_05_01_archive.html
5301 5300 */
5302 5301 #ifndef _LP64
5303 5302 if ((!enabled_extended_FILE_stdio) && fd < 256) {
5304 5303 int newfd = ::fcntl(fd, F_DUPFD, 256);
5305 5304 if (newfd != -1) {
5306 5305 ::close(fd);
5307 5306 fd = newfd;
5308 5307 }
5309 5308 }
5310 5309 #endif // 32-bit Solaris
5311 5310 /*
5312 5311 * All file descriptors that are opened in the JVM and not
5313 5312 * specifically destined for a subprocess should have the
5314 5313 * close-on-exec flag set. If we don't set it, then careless 3rd
5315 5314 * party native code might fork and exec without closing all
5316 5315 * appropriate file descriptors (e.g. as we do in closeDescriptors in
5317 5316 * UNIXProcess.c), and this in turn might:
5318 5317 *
5319 5318 * - cause end-of-file to fail to be detected on some file
5320 5319 * descriptors, resulting in mysterious hangs, or
5321 5320 *
5322 5321 * - might cause an fopen in the subprocess to fail on a system
5323 5322 * suffering from bug 1085341.
5324 5323 *
5325 5324 * (Yes, the default setting of the close-on-exec flag is a Unix
5326 5325 * design flaw)
5327 5326 *
5328 5327 * See:
5329 5328 * 1085341: 32-bit stdio routines should support file descriptors >255
5330 5329 * 4843136: (process) pipe file descriptor from Runtime.exec not being closed
5331 5330 * 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
5332 5331 */
5333 5332 #ifdef FD_CLOEXEC
5334 5333 {
5335 5334 int flags = ::fcntl(fd, F_GETFD);
5336 5335 if (flags != -1)
5337 5336 ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
5338 5337 }
5339 5338 #endif
5340 5339
5341 5340 if (o_delete != 0) {
5342 5341 ::unlink(path);
5343 5342 }
5344 5343 return fd;
5345 5344 }
5346 5345
5347 5346 // create binary file, rewriting existing file if required
5348 5347 int os::create_binary_file(const char* path, bool rewrite_existing) {
5349 5348 int oflags = O_WRONLY | O_CREAT;
5350 5349 if (!rewrite_existing) {
5351 5350 oflags |= O_EXCL;
5352 5351 }
5353 5352 return ::open64(path, oflags, S_IREAD | S_IWRITE);
5354 5353 }
5355 5354
5356 5355 // return current position of file pointer
5357 5356 jlong os::current_file_offset(int fd) {
5358 5357 return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR);
5359 5358 }
5360 5359
5361 5360 // move file pointer to the specified offset
5362 5361 jlong os::seek_to_file_offset(int fd, jlong offset) {
5363 5362 return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET);
5364 5363 }
5365 5364
5366 5365 jlong os::lseek(int fd, jlong offset, int whence) {
5367 5366 return (jlong) ::lseek64(fd, offset, whence);
5368 5367 }
5369 5368
5370 5369 char * os::native_path(char *path) {
5371 5370 return path;
5372 5371 }
5373 5372
5374 5373 int os::ftruncate(int fd, jlong length) {
5375 5374 return ::ftruncate64(fd, length);
5376 5375 }
5377 5376
5378 5377 int os::fsync(int fd) {
5379 5378 RESTARTABLE_RETURN_INT(::fsync(fd));
5380 5379 }
5381 5380
5382 5381 int os::available(int fd, jlong *bytes) {
5383 5382 jlong cur, end;
5384 5383 int mode;
5385 5384 struct stat64 buf64;
5386 5385
5387 5386 if (::fstat64(fd, &buf64) >= 0) {
5388 5387 mode = buf64.st_mode;
5389 5388 if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
5390 5389 /*
5391 5390 * XXX: is the following call interruptible? If so, this might
5392 5391 * need to go through the INTERRUPT_IO() wrapper as for other
5393 5392 * blocking, interruptible calls in this file.
5394 5393 */
5395 5394 int n,ioctl_return;
5396 5395
5397 5396 INTERRUPTIBLE(::ioctl(fd, FIONREAD, &n),ioctl_return,os::Solaris::clear_interrupted);
5398 5397 if (ioctl_return>= 0) {
5399 5398 *bytes = n;
5400 5399 return 1;
5401 5400 }
5402 5401 }
5403 5402 }
5404 5403 if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) {
5405 5404 return 0;
5406 5405 } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) {
5407 5406 return 0;
5408 5407 } else if (::lseek64(fd, cur, SEEK_SET) == -1) {
5409 5408 return 0;
5410 5409 }
5411 5410 *bytes = end - cur;
5412 5411 return 1;
5413 5412 }
5414 5413
5415 5414 // Map a block of memory.
5416 5415 char* os::map_memory(int fd, const char* file_name, size_t file_offset,
5417 5416 char *addr, size_t bytes, bool read_only,
5418 5417 bool allow_exec) {
5419 5418 int prot;
5420 5419 int flags;
5421 5420
5422 5421 if (read_only) {
5423 5422 prot = PROT_READ;
5424 5423 flags = MAP_SHARED;
5425 5424 } else {
5426 5425 prot = PROT_READ | PROT_WRITE;
5427 5426 flags = MAP_PRIVATE;
5428 5427 }
5429 5428
5430 5429 if (allow_exec) {
5431 5430 prot |= PROT_EXEC;
5432 5431 }
5433 5432
5434 5433 if (addr != NULL) {
5435 5434 flags |= MAP_FIXED;
5436 5435 }
5437 5436
5438 5437 char* mapped_address = (char*)mmap(addr, (size_t)bytes, prot, flags,
5439 5438 fd, file_offset);
5440 5439 if (mapped_address == MAP_FAILED) {
5441 5440 return NULL;
5442 5441 }
5443 5442 return mapped_address;
5444 5443 }
5445 5444
5446 5445
5447 5446 // Remap a block of memory.
5448 5447 char* os::remap_memory(int fd, const char* file_name, size_t file_offset,
5449 5448 char *addr, size_t bytes, bool read_only,
5450 5449 bool allow_exec) {
5451 5450 // same as map_memory() on this OS
5452 5451 return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
5453 5452 allow_exec);
5454 5453 }
5455 5454
5456 5455
5457 5456 // Unmap a block of memory.
5458 5457 bool os::unmap_memory(char* addr, size_t bytes) {
5459 5458 return munmap(addr, bytes) == 0;
5460 5459 }
5461 5460
5462 5461 void os::pause() {
5463 5462 char filename[MAX_PATH];
5464 5463 if (PauseAtStartupFile && PauseAtStartupFile[0]) {
5465 5464 jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
5466 5465 } else {
5467 5466 jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
5468 5467 }
5469 5468
5470 5469 int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
5471 5470 if (fd != -1) {
5472 5471 struct stat buf;
5473 5472 ::close(fd);
5474 5473 while (::stat(filename, &buf) == 0) {
5475 5474 (void)::poll(NULL, 0, 100);
5476 5475 }
5477 5476 } else {
5478 5477 jio_fprintf(stderr,
5479 5478 "Could not open pause file '%s', continuing immediately.\n", filename);
5480 5479 }
5481 5480 }
5482 5481
5483 5482 #ifndef PRODUCT
5484 5483 #ifdef INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS
5485 5484 // Turn this on if you need to trace synch operations.
5486 5485 // Set RECORD_SYNCH_LIMIT to a large-enough value,
5487 5486 // and call record_synch_enable and record_synch_disable
5488 5487 // around the computation of interest.
5489 5488
5490 5489 void record_synch(char* name, bool returning); // defined below
5491 5490
5492 5491 class RecordSynch {
5493 5492 char* _name;
5494 5493 public:
5495 5494 RecordSynch(char* name) :_name(name)
5496 5495 { record_synch(_name, false); }
5497 5496 ~RecordSynch() { record_synch(_name, true); }
5498 5497 };
5499 5498
5500 5499 #define CHECK_SYNCH_OP(ret, name, params, args, inner) \
5501 5500 extern "C" ret name params { \
5502 5501 typedef ret name##_t params; \
5503 5502 static name##_t* implem = NULL; \
5504 5503 static int callcount = 0; \
5505 5504 if (implem == NULL) { \
5506 5505 implem = (name##_t*) dlsym(RTLD_NEXT, #name); \
5507 5506 if (implem == NULL) fatal(dlerror()); \
5508 5507 } \
5509 5508 ++callcount; \
5510 5509 RecordSynch _rs(#name); \
5511 5510 inner; \
5512 5511 return implem args; \
5513 5512 }
5514 5513 // in dbx, examine callcounts this way:
5515 5514 // for n in $(eval whereis callcount | awk '{print $2}'); do print $n; done
5516 5515
5517 5516 #define CHECK_POINTER_OK(p) \
5518 5517 (Universe::perm_gen() == NULL || !Universe::is_reserved_heap((oop)(p)))
5519 5518 #define CHECK_MU \
5520 5519 if (!CHECK_POINTER_OK(mu)) fatal("Mutex must be in C heap only.");
5521 5520 #define CHECK_CV \
5522 5521 if (!CHECK_POINTER_OK(cv)) fatal("Condvar must be in C heap only.");
5523 5522 #define CHECK_P(p) \
5524 5523 if (!CHECK_POINTER_OK(p)) fatal(false, "Pointer must be in C heap only.");
5525 5524
5526 5525 #define CHECK_MUTEX(mutex_op) \
5527 5526 CHECK_SYNCH_OP(int, mutex_op, (mutex_t *mu), (mu), CHECK_MU);
5528 5527
5529 5528 CHECK_MUTEX( mutex_lock)
5530 5529 CHECK_MUTEX( _mutex_lock)
5531 5530 CHECK_MUTEX( mutex_unlock)
5532 5531 CHECK_MUTEX(_mutex_unlock)
5533 5532 CHECK_MUTEX( mutex_trylock)
5534 5533 CHECK_MUTEX(_mutex_trylock)
5535 5534
5536 5535 #define CHECK_COND(cond_op) \
5537 5536 CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu), (cv, mu), CHECK_MU;CHECK_CV);
5538 5537
5539 5538 CHECK_COND( cond_wait);
5540 5539 CHECK_COND(_cond_wait);
5541 5540 CHECK_COND(_cond_wait_cancel);
5542 5541
5543 5542 #define CHECK_COND2(cond_op) \
5544 5543 CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu, timestruc_t* ts), (cv, mu, ts), CHECK_MU;CHECK_CV);
5545 5544
5546 5545 CHECK_COND2( cond_timedwait);
5547 5546 CHECK_COND2(_cond_timedwait);
5548 5547 CHECK_COND2(_cond_timedwait_cancel);
5549 5548
5550 5549 // do the _lwp_* versions too
5551 5550 #define mutex_t lwp_mutex_t
5552 5551 #define cond_t lwp_cond_t
5553 5552 CHECK_MUTEX( _lwp_mutex_lock)
5554 5553 CHECK_MUTEX( _lwp_mutex_unlock)
5555 5554 CHECK_MUTEX( _lwp_mutex_trylock)
5556 5555 CHECK_MUTEX( __lwp_mutex_lock)
5557 5556 CHECK_MUTEX( __lwp_mutex_unlock)
5558 5557 CHECK_MUTEX( __lwp_mutex_trylock)
5559 5558 CHECK_MUTEX(___lwp_mutex_lock)
5560 5559 CHECK_MUTEX(___lwp_mutex_unlock)
5561 5560
5562 5561 CHECK_COND( _lwp_cond_wait);
5563 5562 CHECK_COND( __lwp_cond_wait);
5564 5563 CHECK_COND(___lwp_cond_wait);
5565 5564
5566 5565 CHECK_COND2( _lwp_cond_timedwait);
5567 5566 CHECK_COND2( __lwp_cond_timedwait);
5568 5567 #undef mutex_t
5569 5568 #undef cond_t
5570 5569
5571 5570 CHECK_SYNCH_OP(int, _lwp_suspend2, (int lwp, int *n), (lwp, n), 0);
5572 5571 CHECK_SYNCH_OP(int,__lwp_suspend2, (int lwp, int *n), (lwp, n), 0);
5573 5572 CHECK_SYNCH_OP(int, _lwp_kill, (int lwp, int n), (lwp, n), 0);
5574 5573 CHECK_SYNCH_OP(int,__lwp_kill, (int lwp, int n), (lwp, n), 0);
5575 5574 CHECK_SYNCH_OP(int, _lwp_sema_wait, (lwp_sema_t* p), (p), CHECK_P(p));
5576 5575 CHECK_SYNCH_OP(int,__lwp_sema_wait, (lwp_sema_t* p), (p), CHECK_P(p));
5577 5576 CHECK_SYNCH_OP(int, _lwp_cond_broadcast, (lwp_cond_t* cv), (cv), CHECK_CV);
5578 5577 CHECK_SYNCH_OP(int,__lwp_cond_broadcast, (lwp_cond_t* cv), (cv), CHECK_CV);
5579 5578
5580 5579
5581 5580 // recording machinery:
5582 5581
5583 5582 enum { RECORD_SYNCH_LIMIT = 200 };
5584 5583 char* record_synch_name[RECORD_SYNCH_LIMIT];
5585 5584 void* record_synch_arg0ptr[RECORD_SYNCH_LIMIT];
5586 5585 bool record_synch_returning[RECORD_SYNCH_LIMIT];
5587 5586 thread_t record_synch_thread[RECORD_SYNCH_LIMIT];
5588 5587 int record_synch_count = 0;
5589 5588 bool record_synch_enabled = false;
5590 5589
5591 5590 // in dbx, examine recorded data this way:
5592 5591 // for n in name arg0ptr returning thread; do print record_synch_$n[0..record_synch_count-1]; done
5593 5592
5594 5593 void record_synch(char* name, bool returning) {
5595 5594 if (record_synch_enabled) {
5596 5595 if (record_synch_count < RECORD_SYNCH_LIMIT) {
5597 5596 record_synch_name[record_synch_count] = name;
5598 5597 record_synch_returning[record_synch_count] = returning;
5599 5598 record_synch_thread[record_synch_count] = thr_self();
5600 5599 record_synch_arg0ptr[record_synch_count] = &name;
5601 5600 record_synch_count++;
5602 5601 }
5603 5602 // put more checking code here:
5604 5603 // ...
5605 5604 }
5606 5605 }
5607 5606
5608 5607 void record_synch_enable() {
5609 5608 // start collecting trace data, if not already doing so
5610 5609 if (!record_synch_enabled) record_synch_count = 0;
5611 5610 record_synch_enabled = true;
5612 5611 }
5613 5612
5614 5613 void record_synch_disable() {
5615 5614 // stop collecting trace data
5616 5615 record_synch_enabled = false;
5617 5616 }
5618 5617
5619 5618 #endif // INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS
5620 5619 #endif // PRODUCT
5621 5620
5622 5621 const intptr_t thr_time_off = (intptr_t)(&((prusage_t *)(NULL))->pr_utime);
5623 5622 const intptr_t thr_time_size = (intptr_t)(&((prusage_t *)(NULL))->pr_ttime) -
5624 5623 (intptr_t)(&((prusage_t *)(NULL))->pr_utime);
5625 5624
5626 5625
5627 5626 // JVMTI & JVM monitoring and management support
5628 5627 // The thread_cpu_time() and current_thread_cpu_time() are only
5629 5628 // supported if is_thread_cpu_time_supported() returns true.
5630 5629 // They are not supported on Solaris T1.
5631 5630
5632 5631 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
5633 5632 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
5634 5633 // of a thread.
5635 5634 //
5636 5635 // current_thread_cpu_time() and thread_cpu_time(Thread *)
5637 5636 // returns the fast estimate available on the platform.
5638 5637
5639 5638 // hrtime_t gethrvtime() return value includes
5640 5639 // user time but does not include system time
5641 5640 jlong os::current_thread_cpu_time() {
5642 5641 return (jlong) gethrvtime();
5643 5642 }
5644 5643
5645 5644 jlong os::thread_cpu_time(Thread *thread) {
5646 5645 // return user level CPU time only to be consistent with
5647 5646 // what current_thread_cpu_time returns.
5648 5647 // thread_cpu_time_info() must be changed if this changes
5649 5648 return os::thread_cpu_time(thread, false /* user time only */);
5650 5649 }
5651 5650
5652 5651 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
5653 5652 if (user_sys_cpu_time) {
5654 5653 return os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
5655 5654 } else {
5656 5655 return os::current_thread_cpu_time();
5657 5656 }
5658 5657 }
5659 5658
5660 5659 jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
5661 5660 char proc_name[64];
5662 5661 int count;
5663 5662 prusage_t prusage;
5664 5663 jlong lwp_time;
5665 5664 int fd;
5666 5665
5667 5666 sprintf(proc_name, "/proc/%d/lwp/%d/lwpusage",
5668 5667 getpid(),
5669 5668 thread->osthread()->lwp_id());
5670 5669 fd = ::open(proc_name, O_RDONLY);
5671 5670 if ( fd == -1 ) return -1;
5672 5671
5673 5672 do {
5674 5673 count = ::pread(fd,
5675 5674 (void *)&prusage.pr_utime,
5676 5675 thr_time_size,
5677 5676 thr_time_off);
5678 5677 } while (count < 0 && errno == EINTR);
5679 5678 ::close(fd);
5680 5679 if ( count < 0 ) return -1;
5681 5680
5682 5681 if (user_sys_cpu_time) {
5683 5682 // user + system CPU time
5684 5683 lwp_time = (((jlong)prusage.pr_stime.tv_sec +
5685 5684 (jlong)prusage.pr_utime.tv_sec) * (jlong)1000000000) +
5686 5685 (jlong)prusage.pr_stime.tv_nsec +
5687 5686 (jlong)prusage.pr_utime.tv_nsec;
5688 5687 } else {
5689 5688 // user level CPU time only
5690 5689 lwp_time = ((jlong)prusage.pr_utime.tv_sec * (jlong)1000000000) +
5691 5690 (jlong)prusage.pr_utime.tv_nsec;
5692 5691 }
5693 5692
5694 5693 return(lwp_time);
5695 5694 }
5696 5695
5697 5696 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
5698 5697 info_ptr->max_value = ALL_64_BITS; // will not wrap in less than 64 bits
5699 5698 info_ptr->may_skip_backward = false; // elapsed time not wall time
5700 5699 info_ptr->may_skip_forward = false; // elapsed time not wall time
5701 5700 info_ptr->kind = JVMTI_TIMER_USER_CPU; // only user time is returned
5702 5701 }
5703 5702
5704 5703 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
5705 5704 info_ptr->max_value = ALL_64_BITS; // will not wrap in less than 64 bits
5706 5705 info_ptr->may_skip_backward = false; // elapsed time not wall time
5707 5706 info_ptr->may_skip_forward = false; // elapsed time not wall time
5708 5707 info_ptr->kind = JVMTI_TIMER_USER_CPU; // only user time is returned
5709 5708 }
5710 5709
5711 5710 bool os::is_thread_cpu_time_supported() {
5712 5711 if ( os::Solaris::T2_libthread() || UseBoundThreads ) {
5713 5712 return true;
5714 5713 } else {
5715 5714 return false;
5716 5715 }
5717 5716 }
5718 5717
5719 5718 // System loadavg support. Returns -1 if load average cannot be obtained.
5720 5719 // Return the load average for our processor set if the primitive exists
5721 5720 // (Solaris 9 and later). Otherwise just return system wide loadavg.
5722 5721 int os::loadavg(double loadavg[], int nelem) {
5723 5722 if (pset_getloadavg_ptr != NULL) {
5724 5723 return (*pset_getloadavg_ptr)(PS_MYID, loadavg, nelem);
5725 5724 } else {
5726 5725 return ::getloadavg(loadavg, nelem);
5727 5726 }
5728 5727 }
5729 5728
5730 5729 //---------------------------------------------------------------------------------
5731 5730
5732 5731 static address same_page(address x, address y) {
5733 5732 intptr_t page_bits = -os::vm_page_size();
5734 5733 if ((intptr_t(x) & page_bits) == (intptr_t(y) & page_bits))
5735 5734 return x;
5736 5735 else if (x > y)
5737 5736 return (address)(intptr_t(y) | ~page_bits) + 1;
5738 5737 else
5739 5738 return (address)(intptr_t(y) & page_bits);
5740 5739 }
5741 5740
5742 5741 bool os::find(address addr, outputStream* st) {
5743 5742 Dl_info dlinfo;
5744 5743 memset(&dlinfo, 0, sizeof(dlinfo));
5745 5744 if (dladdr(addr, &dlinfo)) {
5746 5745 #ifdef _LP64
5747 5746 st->print("0x%016lx: ", addr);
5748 5747 #else
5749 5748 st->print("0x%08x: ", addr);
5750 5749 #endif
5751 5750 if (dlinfo.dli_sname != NULL)
5752 5751 st->print("%s+%#lx", dlinfo.dli_sname, addr-(intptr_t)dlinfo.dli_saddr);
5753 5752 else if (dlinfo.dli_fname)
5754 5753 st->print("<offset %#lx>", addr-(intptr_t)dlinfo.dli_fbase);
5755 5754 else
5756 5755 st->print("<absolute address>");
5757 5756 if (dlinfo.dli_fname) st->print(" in %s", dlinfo.dli_fname);
5758 5757 #ifdef _LP64
5759 5758 if (dlinfo.dli_fbase) st->print(" at 0x%016lx", dlinfo.dli_fbase);
5760 5759 #else
5761 5760 if (dlinfo.dli_fbase) st->print(" at 0x%08x", dlinfo.dli_fbase);
5762 5761 #endif
5763 5762 st->cr();
5764 5763
5765 5764 if (Verbose) {
5766 5765 // decode some bytes around the PC
5767 5766 address begin = same_page(addr-40, addr);
5768 5767 address end = same_page(addr+40, addr);
5769 5768 address lowest = (address) dlinfo.dli_sname;
5770 5769 if (!lowest) lowest = (address) dlinfo.dli_fbase;
5771 5770 if (begin < lowest) begin = lowest;
5772 5771 Dl_info dlinfo2;
5773 5772 if (dladdr(end, &dlinfo2) && dlinfo2.dli_saddr != dlinfo.dli_saddr
5774 5773 && end > dlinfo2.dli_saddr && dlinfo2.dli_saddr > begin)
5775 5774 end = (address) dlinfo2.dli_saddr;
5776 5775 Disassembler::decode(begin, end, st);
5777 5776 }
5778 5777 return true;
5779 5778 }
5780 5779 return false;
5781 5780 }
5782 5781
5783 5782 // Following function has been added to support HotSparc's libjvm.so running
5784 5783 // under Solaris production JDK 1.2.2 / 1.3.0. These came from
5785 5784 // src/solaris/hpi/native_threads in the EVM codebase.
5786 5785 //
5787 5786 // NOTE: This is no longer needed in the 1.3.1 and 1.4 production release
5788 5787 // libraries and should thus be removed. We will leave it behind for a while
5789 5788 // until we no longer want to able to run on top of 1.3.0 Solaris production
5790 5789 // JDK. See 4341971.
5791 5790
5792 5791 #define STACK_SLACK 0x800
5793 5792
5794 5793 extern "C" {
5795 5794 intptr_t sysThreadAvailableStackWithSlack() {
5796 5795 stack_t st;
5797 5796 intptr_t retval, stack_top;
5798 5797 retval = thr_stksegment(&st);
5799 5798 assert(retval == 0, "incorrect return value from thr_stksegment");
5800 5799 assert((address)&st < (address)st.ss_sp, "Invalid stack base returned");
5801 5800 assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned");
5802 5801 stack_top=(intptr_t)st.ss_sp-st.ss_size;
5803 5802 return ((intptr_t)&stack_top - stack_top - STACK_SLACK);
5804 5803 }
5805 5804 }
5806 5805
5807 5806 // Just to get the Kernel build to link on solaris for testing.
5808 5807
5809 5808 extern "C" {
5810 5809 class ASGCT_CallTrace;
5811 5810 void AsyncGetCallTrace(ASGCT_CallTrace *trace, jint depth, void* ucontext)
5812 5811 KERNEL_RETURN;
5813 5812 }
5814 5813
5815 5814
5816 5815 // ObjectMonitor park-unpark infrastructure ...
5817 5816 //
5818 5817 // We implement Solaris and Linux PlatformEvents with the
5819 5818 // obvious condvar-mutex-flag triple.
5820 5819 // Another alternative that works quite well is pipes:
5821 5820 // Each PlatformEvent consists of a pipe-pair.
5822 5821 // The thread associated with the PlatformEvent
5823 5822 // calls park(), which reads from the input end of the pipe.
5824 5823 // Unpark() writes into the other end of the pipe.
5825 5824 // The write-side of the pipe must be set NDELAY.
5826 5825 // Unfortunately pipes consume a large # of handles.
5827 5826 // Native solaris lwp_park() and lwp_unpark() work nicely, too.
5828 5827 // Using pipes for the 1st few threads might be workable, however.
5829 5828 //
5830 5829 // park() is permitted to return spuriously.
5831 5830 // Callers of park() should wrap the call to park() in
5832 5831 // an appropriate loop. A litmus test for the correct
5833 5832 // usage of park is the following: if park() were modified
5834 5833 // to immediately return 0 your code should still work,
5835 5834 // albeit degenerating to a spin loop.
5836 5835 //
5837 5836 // An interesting optimization for park() is to use a trylock()
5838 5837 // to attempt to acquire the mutex. If the trylock() fails
5839 5838 // then we know that a concurrent unpark() operation is in-progress.
5840 5839 // in that case the park() code could simply set _count to 0
5841 5840 // and return immediately. The subsequent park() operation *might*
5842 5841 // return immediately. That's harmless as the caller of park() is
5843 5842 // expected to loop. By using trylock() we will have avoided a
5844 5843 // avoided a context switch caused by contention on the per-thread mutex.
5845 5844 //
5846 5845 // TODO-FIXME:
5847 5846 // 1. Reconcile Doug's JSR166 j.u.c park-unpark with the
5848 5847 // objectmonitor implementation.
5849 5848 // 2. Collapse the JSR166 parker event, and the
5850 5849 // objectmonitor ParkEvent into a single "Event" construct.
5851 5850 // 3. In park() and unpark() add:
5852 5851 // assert (Thread::current() == AssociatedWith).
5853 5852 // 4. add spurious wakeup injection on a -XX:EarlyParkReturn=N switch.
5854 5853 // 1-out-of-N park() operations will return immediately.
5855 5854 //
5856 5855 // _Event transitions in park()
5857 5856 // -1 => -1 : illegal
5858 5857 // 1 => 0 : pass - return immediately
5859 5858 // 0 => -1 : block
5860 5859 //
5861 5860 // _Event serves as a restricted-range semaphore.
5862 5861 //
5863 5862 // Another possible encoding of _Event would be with
5864 5863 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits.
5865 5864 //
5866 5865 // TODO-FIXME: add DTRACE probes for:
5867 5866 // 1. Tx parks
5868 5867 // 2. Ty unparks Tx
5869 5868 // 3. Tx resumes from park
5870 5869
5871 5870
5872 5871 // value determined through experimentation
5873 5872 #define ROUNDINGFIX 11
5874 5873
5875 5874 // utility to compute the abstime argument to timedwait.
5876 5875 // TODO-FIXME: switch from compute_abstime() to unpackTime().
5877 5876
5878 5877 static timestruc_t* compute_abstime(timestruc_t* abstime, jlong millis) {
5879 5878 // millis is the relative timeout time
5880 5879 // abstime will be the absolute timeout time
5881 5880 if (millis < 0) millis = 0;
5882 5881 struct timeval now;
5883 5882 int status = gettimeofday(&now, NULL);
5884 5883 assert(status == 0, "gettimeofday");
5885 5884 jlong seconds = millis / 1000;
5886 5885 jlong max_wait_period;
5887 5886
5888 5887 if (UseLWPSynchronization) {
5889 5888 // forward port of fix for 4275818 (not sleeping long enough)
5890 5889 // There was a bug in Solaris 6, 7 and pre-patch 5 of 8 where
5891 5890 // _lwp_cond_timedwait() used a round_down algorithm rather
5892 5891 // than a round_up. For millis less than our roundfactor
5893 5892 // it rounded down to 0 which doesn't meet the spec.
5894 5893 // For millis > roundfactor we may return a bit sooner, but
5895 5894 // since we can not accurately identify the patch level and
5896 5895 // this has already been fixed in Solaris 9 and 8 we will
5897 5896 // leave it alone rather than always rounding down.
5898 5897
5899 5898 if (millis > 0 && millis < ROUNDINGFIX) millis = ROUNDINGFIX;
5900 5899 // It appears that when we go directly through Solaris _lwp_cond_timedwait()
5901 5900 // the acceptable max time threshold is smaller than for libthread on 2.5.1 and 2.6
5902 5901 max_wait_period = 21000000;
5903 5902 } else {
5904 5903 max_wait_period = 50000000;
5905 5904 }
5906 5905 millis %= 1000;
5907 5906 if (seconds > max_wait_period) { // see man cond_timedwait(3T)
5908 5907 seconds = max_wait_period;
5909 5908 }
5910 5909 abstime->tv_sec = now.tv_sec + seconds;
5911 5910 long usec = now.tv_usec + millis * 1000;
5912 5911 if (usec >= 1000000) {
5913 5912 abstime->tv_sec += 1;
5914 5913 usec -= 1000000;
5915 5914 }
5916 5915 abstime->tv_nsec = usec * 1000;
5917 5916 return abstime;
5918 5917 }
5919 5918
5920 5919 // Test-and-clear _Event, always leaves _Event set to 0, returns immediately.
5921 5920 // Conceptually TryPark() should be equivalent to park(0).
5922 5921
5923 5922 int os::PlatformEvent::TryPark() {
5924 5923 for (;;) {
5925 5924 const int v = _Event ;
5926 5925 guarantee ((v == 0) || (v == 1), "invariant") ;
5927 5926 if (Atomic::cmpxchg (0, &_Event, v) == v) return v ;
5928 5927 }
5929 5928 }
5930 5929
5931 5930 void os::PlatformEvent::park() { // AKA: down()
5932 5931 // Invariant: Only the thread associated with the Event/PlatformEvent
5933 5932 // may call park().
5934 5933 int v ;
5935 5934 for (;;) {
5936 5935 v = _Event ;
5937 5936 if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
5938 5937 }
5939 5938 guarantee (v >= 0, "invariant") ;
5940 5939 if (v == 0) {
5941 5940 // Do this the hard way by blocking ...
5942 5941 // See http://monaco.sfbay/detail.jsf?cr=5094058.
5943 5942 // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking.
5944 5943 // Only for SPARC >= V8PlusA
5945 5944 #if defined(__sparc) && defined(COMPILER2)
5946 5945 if (ClearFPUAtPark) { _mark_fpu_nosave() ; }
5947 5946 #endif
5948 5947 int status = os::Solaris::mutex_lock(_mutex);
5949 5948 assert_status(status == 0, status, "mutex_lock");
5950 5949 guarantee (_nParked == 0, "invariant") ;
5951 5950 ++ _nParked ;
5952 5951 while (_Event < 0) {
5953 5952 // for some reason, under 2.7 lwp_cond_wait() may return ETIME ...
5954 5953 // Treat this the same as if the wait was interrupted
5955 5954 // With usr/lib/lwp going to kernel, always handle ETIME
5956 5955 status = os::Solaris::cond_wait(_cond, _mutex);
5957 5956 if (status == ETIME) status = EINTR ;
5958 5957 assert_status(status == 0 || status == EINTR, status, "cond_wait");
5959 5958 }
5960 5959 -- _nParked ;
5961 5960 _Event = 0 ;
5962 5961 status = os::Solaris::mutex_unlock(_mutex);
5963 5962 assert_status(status == 0, status, "mutex_unlock");
5964 5963 }
5965 5964 }
5966 5965
5967 5966 int os::PlatformEvent::park(jlong millis) {
5968 5967 guarantee (_nParked == 0, "invariant") ;
5969 5968 int v ;
5970 5969 for (;;) {
5971 5970 v = _Event ;
5972 5971 if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
5973 5972 }
5974 5973 guarantee (v >= 0, "invariant") ;
5975 5974 if (v != 0) return OS_OK ;
5976 5975
5977 5976 int ret = OS_TIMEOUT;
5978 5977 timestruc_t abst;
5979 5978 compute_abstime (&abst, millis);
5980 5979
5981 5980 // See http://monaco.sfbay/detail.jsf?cr=5094058.
5982 5981 // For Solaris SPARC set fprs.FEF=0 prior to parking.
5983 5982 // Only for SPARC >= V8PlusA
5984 5983 #if defined(__sparc) && defined(COMPILER2)
5985 5984 if (ClearFPUAtPark) { _mark_fpu_nosave() ; }
5986 5985 #endif
5987 5986 int status = os::Solaris::mutex_lock(_mutex);
5988 5987 assert_status(status == 0, status, "mutex_lock");
5989 5988 guarantee (_nParked == 0, "invariant") ;
5990 5989 ++ _nParked ;
5991 5990 while (_Event < 0) {
5992 5991 int status = os::Solaris::cond_timedwait(_cond, _mutex, &abst);
5993 5992 assert_status(status == 0 || status == EINTR ||
5994 5993 status == ETIME || status == ETIMEDOUT,
5995 5994 status, "cond_timedwait");
5996 5995 if (!FilterSpuriousWakeups) break ; // previous semantics
5997 5996 if (status == ETIME || status == ETIMEDOUT) break ;
5998 5997 // We consume and ignore EINTR and spurious wakeups.
5999 5998 }
6000 5999 -- _nParked ;
6001 6000 if (_Event >= 0) ret = OS_OK ;
6002 6001 _Event = 0 ;
6003 6002 status = os::Solaris::mutex_unlock(_mutex);
6004 6003 assert_status(status == 0, status, "mutex_unlock");
6005 6004 return ret;
6006 6005 }
6007 6006
6008 6007 void os::PlatformEvent::unpark() {
6009 6008 int v, AnyWaiters;
6010 6009
6011 6010 // Increment _Event.
6012 6011 // Another acceptable implementation would be to simply swap 1
6013 6012 // into _Event:
6014 6013 // if (Swap (&_Event, 1) < 0) {
6015 6014 // mutex_lock (_mutex) ; AnyWaiters = nParked; mutex_unlock (_mutex) ;
6016 6015 // if (AnyWaiters) cond_signal (_cond) ;
6017 6016 // }
6018 6017
6019 6018 for (;;) {
6020 6019 v = _Event ;
6021 6020 if (v > 0) {
6022 6021 // The LD of _Event could have reordered or be satisfied
6023 6022 // by a read-aside from this processor's write buffer.
6024 6023 // To avoid problems execute a barrier and then
6025 6024 // ratify the value. A degenerate CAS() would also work.
6026 6025 // Viz., CAS (v+0, &_Event, v) == v).
6027 6026 OrderAccess::fence() ;
6028 6027 if (_Event == v) return ;
6029 6028 continue ;
6030 6029 }
6031 6030 if (Atomic::cmpxchg (v+1, &_Event, v) == v) break ;
6032 6031 }
6033 6032
6034 6033 // If the thread associated with the event was parked, wake it.
6035 6034 if (v < 0) {
6036 6035 int status ;
6037 6036 // Wait for the thread assoc with the PlatformEvent to vacate.
6038 6037 status = os::Solaris::mutex_lock(_mutex);
6039 6038 assert_status(status == 0, status, "mutex_lock");
6040 6039 AnyWaiters = _nParked ;
6041 6040 status = os::Solaris::mutex_unlock(_mutex);
6042 6041 assert_status(status == 0, status, "mutex_unlock");
6043 6042 guarantee (AnyWaiters == 0 || AnyWaiters == 1, "invariant") ;
6044 6043 if (AnyWaiters != 0) {
6045 6044 // We intentional signal *after* dropping the lock
6046 6045 // to avoid a common class of futile wakeups.
6047 6046 status = os::Solaris::cond_signal(_cond);
6048 6047 assert_status(status == 0, status, "cond_signal");
6049 6048 }
6050 6049 }
6051 6050 }
6052 6051
6053 6052 // JSR166
6054 6053 // -------------------------------------------------------
6055 6054
6056 6055 /*
↓ open down ↓ |
4240 lines elided |
↑ open up ↑ |
6057 6056 * The solaris and linux implementations of park/unpark are fairly
6058 6057 * conservative for now, but can be improved. They currently use a
6059 6058 * mutex/condvar pair, plus _counter.
6060 6059 * Park decrements _counter if > 0, else does a condvar wait. Unpark
6061 6060 * sets count to 1 and signals condvar. Only one thread ever waits
6062 6061 * on the condvar. Contention seen when trying to park implies that someone
6063 6062 * is unparking you, so don't wait. And spurious returns are fine, so there
6064 6063 * is no need to track notifications.
6065 6064 */
6066 6065
6067 -#define NANOSECS_PER_SEC 1000000000
6068 -#define NANOSECS_PER_MILLISEC 1000000
6069 6066 #define MAX_SECS 100000000
6070 -
6071 6067 /*
6072 6068 * This code is common to linux and solaris and will be moved to a
6073 6069 * common place in dolphin.
6074 6070 *
6075 6071 * The passed in time value is either a relative time in nanoseconds
6076 6072 * or an absolute time in milliseconds. Either way it has to be unpacked
6077 6073 * into suitable seconds and nanoseconds components and stored in the
6078 6074 * given timespec structure.
6079 6075 * Given time is a 64-bit value and the time_t used in the timespec is only
6080 6076 * a signed-32-bit value (except on 64-bit Linux) we have to watch for
6081 6077 * overflow if times way in the future are given. Further on Solaris versions
6082 6078 * prior to 10 there is a restriction (see cond_timedwait) that the specified
6083 6079 * number of seconds, in abstime, is less than current_time + 100,000,000.
6084 6080 * As it will be 28 years before "now + 100000000" will overflow we can
6085 6081 * ignore overflow and just impose a hard-limit on seconds using the value
6086 6082 * of "now + 100,000,000". This places a limit on the timeout of about 3.17
6087 6083 * years from "now".
6088 6084 */
6089 6085 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
6090 6086 assert (time > 0, "convertTime");
6091 6087
6092 6088 struct timeval now;
6093 6089 int status = gettimeofday(&now, NULL);
6094 6090 assert(status == 0, "gettimeofday");
6095 6091
6096 6092 time_t max_secs = now.tv_sec + MAX_SECS;
6097 6093
6098 6094 if (isAbsolute) {
6099 6095 jlong secs = time / 1000;
6100 6096 if (secs > max_secs) {
6101 6097 absTime->tv_sec = max_secs;
6102 6098 }
6103 6099 else {
6104 6100 absTime->tv_sec = secs;
6105 6101 }
6106 6102 absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
6107 6103 }
6108 6104 else {
6109 6105 jlong secs = time / NANOSECS_PER_SEC;
6110 6106 if (secs >= MAX_SECS) {
6111 6107 absTime->tv_sec = max_secs;
6112 6108 absTime->tv_nsec = 0;
6113 6109 }
6114 6110 else {
6115 6111 absTime->tv_sec = now.tv_sec + secs;
6116 6112 absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
6117 6113 if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
6118 6114 absTime->tv_nsec -= NANOSECS_PER_SEC;
6119 6115 ++absTime->tv_sec; // note: this must be <= max_secs
6120 6116 }
6121 6117 }
6122 6118 }
6123 6119 assert(absTime->tv_sec >= 0, "tv_sec < 0");
6124 6120 assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs");
6125 6121 assert(absTime->tv_nsec >= 0, "tv_nsec < 0");
6126 6122 assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec");
6127 6123 }
6128 6124
6129 6125 void Parker::park(bool isAbsolute, jlong time) {
6130 6126
6131 6127 // Optional fast-path check:
6132 6128 // Return immediately if a permit is available.
6133 6129 if (_counter > 0) {
6134 6130 _counter = 0 ;
6135 6131 OrderAccess::fence();
6136 6132 return ;
6137 6133 }
6138 6134
6139 6135 // Optional fast-exit: Check interrupt before trying to wait
6140 6136 Thread* thread = Thread::current();
6141 6137 assert(thread->is_Java_thread(), "Must be JavaThread");
6142 6138 JavaThread *jt = (JavaThread *)thread;
6143 6139 if (Thread::is_interrupted(thread, false)) {
6144 6140 return;
6145 6141 }
6146 6142
6147 6143 // First, demultiplex/decode time arguments
6148 6144 timespec absTime;
6149 6145 if (time < 0 || (isAbsolute && time == 0) ) { // don't wait at all
6150 6146 return;
6151 6147 }
6152 6148 if (time > 0) {
6153 6149 // Warning: this code might be exposed to the old Solaris time
6154 6150 // round-down bugs. Grep "roundingFix" for details.
6155 6151 unpackTime(&absTime, isAbsolute, time);
6156 6152 }
6157 6153
6158 6154 // Enter safepoint region
6159 6155 // Beware of deadlocks such as 6317397.
6160 6156 // The per-thread Parker:: _mutex is a classic leaf-lock.
6161 6157 // In particular a thread must never block on the Threads_lock while
6162 6158 // holding the Parker:: mutex. If safepoints are pending both the
6163 6159 // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
6164 6160 ThreadBlockInVM tbivm(jt);
6165 6161
6166 6162 // Don't wait if cannot get lock since interference arises from
6167 6163 // unblocking. Also. check interrupt before trying wait
6168 6164 if (Thread::is_interrupted(thread, false) ||
6169 6165 os::Solaris::mutex_trylock(_mutex) != 0) {
6170 6166 return;
6171 6167 }
6172 6168
6173 6169 int status ;
6174 6170
6175 6171 if (_counter > 0) { // no wait needed
6176 6172 _counter = 0;
6177 6173 status = os::Solaris::mutex_unlock(_mutex);
6178 6174 assert (status == 0, "invariant") ;
6179 6175 OrderAccess::fence();
6180 6176 return;
6181 6177 }
6182 6178
6183 6179 #ifdef ASSERT
6184 6180 // Don't catch signals while blocked; let the running threads have the signals.
6185 6181 // (This allows a debugger to break into the running thread.)
6186 6182 sigset_t oldsigs;
6187 6183 sigset_t* allowdebug_blocked = os::Solaris::allowdebug_blocked_signals();
6188 6184 thr_sigsetmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
6189 6185 #endif
6190 6186
6191 6187 OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
6192 6188 jt->set_suspend_equivalent();
6193 6189 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
6194 6190
6195 6191 // Do this the hard way by blocking ...
6196 6192 // See http://monaco.sfbay/detail.jsf?cr=5094058.
6197 6193 // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking.
6198 6194 // Only for SPARC >= V8PlusA
6199 6195 #if defined(__sparc) && defined(COMPILER2)
6200 6196 if (ClearFPUAtPark) { _mark_fpu_nosave() ; }
6201 6197 #endif
6202 6198
6203 6199 if (time == 0) {
6204 6200 status = os::Solaris::cond_wait (_cond, _mutex) ;
6205 6201 } else {
6206 6202 status = os::Solaris::cond_timedwait (_cond, _mutex, &absTime);
6207 6203 }
6208 6204 // Note that an untimed cond_wait() can sometimes return ETIME on older
6209 6205 // versions of the Solaris.
6210 6206 assert_status(status == 0 || status == EINTR ||
6211 6207 status == ETIME || status == ETIMEDOUT,
6212 6208 status, "cond_timedwait");
6213 6209
6214 6210 #ifdef ASSERT
6215 6211 thr_sigsetmask(SIG_SETMASK, &oldsigs, NULL);
6216 6212 #endif
6217 6213 _counter = 0 ;
6218 6214 status = os::Solaris::mutex_unlock(_mutex);
6219 6215 assert_status(status == 0, status, "mutex_unlock") ;
6220 6216
6221 6217 // If externally suspended while waiting, re-suspend
6222 6218 if (jt->handle_special_suspend_equivalent_condition()) {
6223 6219 jt->java_suspend_self();
6224 6220 }
6225 6221 OrderAccess::fence();
6226 6222 }
6227 6223
6228 6224 void Parker::unpark() {
6229 6225 int s, status ;
6230 6226 status = os::Solaris::mutex_lock (_mutex) ;
6231 6227 assert (status == 0, "invariant") ;
6232 6228 s = _counter;
6233 6229 _counter = 1;
6234 6230 status = os::Solaris::mutex_unlock (_mutex) ;
6235 6231 assert (status == 0, "invariant") ;
6236 6232
6237 6233 if (s < 1) {
6238 6234 status = os::Solaris::cond_signal (_cond) ;
6239 6235 assert (status == 0, "invariant") ;
6240 6236 }
6241 6237 }
6242 6238
6243 6239 extern char** environ;
6244 6240
6245 6241 // Run the specified command in a separate process. Return its exit value,
6246 6242 // or -1 on failure (e.g. can't fork a new process).
6247 6243 // Unlike system(), this function can be called from signal handler. It
6248 6244 // doesn't block SIGINT et al.
6249 6245 int os::fork_and_exec(char* cmd) {
6250 6246 char * argv[4];
6251 6247 argv[0] = (char *)"sh";
6252 6248 argv[1] = (char *)"-c";
6253 6249 argv[2] = cmd;
6254 6250 argv[3] = NULL;
6255 6251
6256 6252 // fork is async-safe, fork1 is not so can't use in signal handler
6257 6253 pid_t pid;
6258 6254 Thread* t = ThreadLocalStorage::get_thread_slow();
6259 6255 if (t != NULL && t->is_inside_signal_handler()) {
6260 6256 pid = fork();
6261 6257 } else {
6262 6258 pid = fork1();
6263 6259 }
6264 6260
6265 6261 if (pid < 0) {
6266 6262 // fork failed
6267 6263 warning("fork failed: %s", strerror(errno));
6268 6264 return -1;
6269 6265
6270 6266 } else if (pid == 0) {
6271 6267 // child process
6272 6268
6273 6269 // try to be consistent with system(), which uses "/usr/bin/sh" on Solaris
6274 6270 execve("/usr/bin/sh", argv, environ);
6275 6271
6276 6272 // execve failed
6277 6273 _exit(-1);
6278 6274
6279 6275 } else {
6280 6276 // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
6281 6277 // care about the actual exit code, for now.
6282 6278
6283 6279 int status;
6284 6280
6285 6281 // Wait for the child process to exit. This returns immediately if
6286 6282 // the child has already exited. */
6287 6283 while (waitpid(pid, &status, 0) < 0) {
6288 6284 switch (errno) {
6289 6285 case ECHILD: return 0;
6290 6286 case EINTR: break;
6291 6287 default: return -1;
6292 6288 }
6293 6289 }
6294 6290
6295 6291 if (WIFEXITED(status)) {
6296 6292 // The child exited normally; get its exit code.
6297 6293 return WEXITSTATUS(status);
6298 6294 } else if (WIFSIGNALED(status)) {
6299 6295 // The child exited because of a signal
6300 6296 // The best value to return is 0x80 + signal number,
6301 6297 // because that is what all Unix shells do, and because
6302 6298 // it allows callers to distinguish between process exit and
6303 6299 // process death by signal.
6304 6300 return 0x80 + WTERMSIG(status);
6305 6301 } else {
6306 6302 // Unknown exit code; pass it through
6307 6303 return status;
6308 6304 }
6309 6305 }
6310 6306 }
6311 6307
6312 6308 // is_headless_jre()
6313 6309 //
6314 6310 // Test for the existence of xawt/libmawt.so or libawt_xawt.so
6315 6311 // in order to report if we are running in a headless jre
6316 6312 //
6317 6313 // Since JDK8 xawt/libmawt.so was moved into the same directory
6318 6314 // as libawt.so, and renamed libawt_xawt.so
6319 6315 //
6320 6316 bool os::is_headless_jre() {
6321 6317 struct stat statbuf;
6322 6318 char buf[MAXPATHLEN];
6323 6319 char libmawtpath[MAXPATHLEN];
6324 6320 const char *xawtstr = "/xawt/libmawt.so";
6325 6321 const char *new_xawtstr = "/libawt_xawt.so";
6326 6322 char *p;
6327 6323
6328 6324 // Get path to libjvm.so
6329 6325 os::jvm_path(buf, sizeof(buf));
6330 6326
6331 6327 // Get rid of libjvm.so
6332 6328 p = strrchr(buf, '/');
6333 6329 if (p == NULL) return false;
6334 6330 else *p = '\0';
6335 6331
6336 6332 // Get rid of client or server
6337 6333 p = strrchr(buf, '/');
6338 6334 if (p == NULL) return false;
6339 6335 else *p = '\0';
6340 6336
6341 6337 // check xawt/libmawt.so
6342 6338 strcpy(libmawtpath, buf);
6343 6339 strcat(libmawtpath, xawtstr);
6344 6340 if (::stat(libmawtpath, &statbuf) == 0) return false;
6345 6341
6346 6342 // check libawt_xawt.so
6347 6343 strcpy(libmawtpath, buf);
6348 6344 strcat(libmawtpath, new_xawtstr);
6349 6345 if (::stat(libmawtpath, &statbuf) == 0) return false;
6350 6346
6351 6347 return true;
6352 6348 }
6353 6349
6354 6350 size_t os::write(int fd, const void *buf, unsigned int nBytes) {
6355 6351 INTERRUPTIBLE_RETURN_INT(::write(fd, buf, nBytes), os::Solaris::clear_interrupted);
6356 6352 }
6357 6353
6358 6354 int os::close(int fd) {
6359 6355 RESTARTABLE_RETURN_INT(::close(fd));
6360 6356 }
6361 6357
6362 6358 int os::socket_close(int fd) {
6363 6359 RESTARTABLE_RETURN_INT(::close(fd));
6364 6360 }
6365 6361
6366 6362 int os::recv(int fd, char *buf, int nBytes, int flags) {
6367 6363 INTERRUPTIBLE_RETURN_INT(::recv(fd, buf, nBytes, flags), os::Solaris::clear_interrupted);
6368 6364 }
6369 6365
6370 6366
6371 6367 int os::send(int fd, char *buf, int nBytes, int flags) {
6372 6368 INTERRUPTIBLE_RETURN_INT(::send(fd, buf, nBytes, flags), os::Solaris::clear_interrupted);
6373 6369 }
6374 6370
6375 6371 int os::raw_send(int fd, char *buf, int nBytes, int flags) {
6376 6372 RESTARTABLE_RETURN_INT(::send(fd, buf, nBytes, flags));
6377 6373 }
6378 6374
6379 6375 // As both poll and select can be interrupted by signals, we have to be
6380 6376 // prepared to restart the system call after updating the timeout, unless
6381 6377 // a poll() is done with timeout == -1, in which case we repeat with this
6382 6378 // "wait forever" value.
6383 6379
6384 6380 int os::timeout(int fd, long timeout) {
6385 6381 int res;
6386 6382 struct timeval t;
6387 6383 julong prevtime, newtime;
6388 6384 static const char* aNull = 0;
6389 6385 struct pollfd pfd;
6390 6386 pfd.fd = fd;
6391 6387 pfd.events = POLLIN;
6392 6388
6393 6389 gettimeofday(&t, &aNull);
6394 6390 prevtime = ((julong)t.tv_sec * 1000) + t.tv_usec / 1000;
6395 6391
6396 6392 for(;;) {
6397 6393 INTERRUPTIBLE_NORESTART(::poll(&pfd, 1, timeout), res, os::Solaris::clear_interrupted);
6398 6394 if(res == OS_ERR && errno == EINTR) {
6399 6395 if(timeout != -1) {
6400 6396 gettimeofday(&t, &aNull);
6401 6397 newtime = ((julong)t.tv_sec * 1000) + t.tv_usec /1000;
6402 6398 timeout -= newtime - prevtime;
6403 6399 if(timeout <= 0)
6404 6400 return OS_OK;
6405 6401 prevtime = newtime;
6406 6402 }
6407 6403 } else return res;
6408 6404 }
6409 6405 }
6410 6406
6411 6407 int os::connect(int fd, struct sockaddr *him, int len) {
6412 6408 int _result;
6413 6409 INTERRUPTIBLE_NORESTART(::connect(fd, him, len), _result,
6414 6410 os::Solaris::clear_interrupted);
6415 6411
6416 6412 // Depending on when thread interruption is reset, _result could be
6417 6413 // one of two values when errno == EINTR
6418 6414
6419 6415 if (((_result == OS_INTRPT) || (_result == OS_ERR))
6420 6416 && (errno == EINTR)) {
6421 6417 /* restarting a connect() changes its errno semantics */
6422 6418 INTERRUPTIBLE(::connect(fd, him, len), _result,
6423 6419 os::Solaris::clear_interrupted);
6424 6420 /* undo these changes */
6425 6421 if (_result == OS_ERR) {
6426 6422 if (errno == EALREADY) {
6427 6423 errno = EINPROGRESS; /* fall through */
6428 6424 } else if (errno == EISCONN) {
6429 6425 errno = 0;
6430 6426 return OS_OK;
6431 6427 }
6432 6428 }
6433 6429 }
6434 6430 return _result;
6435 6431 }
6436 6432
6437 6433 int os::accept(int fd, struct sockaddr *him, int *len) {
6438 6434 if (fd < 0)
6439 6435 return OS_ERR;
6440 6436 INTERRUPTIBLE_RETURN_INT((int)::accept(fd, him,\
6441 6437 (socklen_t*) len), os::Solaris::clear_interrupted);
6442 6438 }
6443 6439
6444 6440 int os::recvfrom(int fd, char *buf, int nBytes, int flags,
6445 6441 sockaddr *from, int *fromlen) {
6446 6442 //%%note jvm_r11
6447 6443 INTERRUPTIBLE_RETURN_INT((int)::recvfrom(fd, buf, nBytes,\
6448 6444 flags, from, fromlen), os::Solaris::clear_interrupted);
6449 6445 }
6450 6446
6451 6447 int os::sendto(int fd, char *buf, int len, int flags,
6452 6448 struct sockaddr *to, int tolen) {
6453 6449 //%%note jvm_r11
6454 6450 INTERRUPTIBLE_RETURN_INT((int)::sendto(fd, buf, len, flags,\
6455 6451 to, tolen), os::Solaris::clear_interrupted);
6456 6452 }
6457 6453
6458 6454 int os::socket_available(int fd, jint *pbytes) {
6459 6455 if (fd < 0)
6460 6456 return OS_OK;
6461 6457
6462 6458 int ret;
6463 6459
6464 6460 RESTARTABLE(::ioctl(fd, FIONREAD, pbytes), ret);
6465 6461
6466 6462 //%% note ioctl can return 0 when successful, JVM_SocketAvailable
6467 6463 // is expected to return 0 on failure and 1 on success to the jdk.
6468 6464
6469 6465 return (ret == OS_ERR) ? 0 : 1;
6470 6466 }
6471 6467
6472 6468
6473 6469 int os::bind(int fd, struct sockaddr *him, int len) {
6474 6470 INTERRUPTIBLE_RETURN_INT_NORESTART(::bind(fd, him, len),\
6475 6471 os::Solaris::clear_interrupted);
6476 6472 }
↓ open down ↓ |
396 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX