145
146 static int clock_tics_per_sec = 100;
147
148 // For diagnostics to print a message once. see run_periodic_checks
149 static sigset_t check_signal_done;
150 static bool check_signals = true;
151
152 static pid_t _initial_pid = 0;
153
154 // Signal number used to suspend/resume a thread
155
156 // do not use any signal number less than SIGSEGV, see 4355769
157 static int SR_signum = SIGUSR2;
158 sigset_t SR_sigset;
159
160
161 ////////////////////////////////////////////////////////////////////////////////
162 // utility functions
163
164 static int SR_initialize();
165 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time);
166
167 julong os::available_memory() {
168 return Bsd::available_memory();
169 }
170
171 // available here means free
172 julong os::Bsd::available_memory() {
173 uint64_t available = physical_memory() >> 2;
174 #ifdef __APPLE__
175 mach_msg_type_number_t count = HOST_VM_INFO64_COUNT;
176 vm_statistics64_data_t vmstat;
177 kern_return_t kerr = host_statistics64(mach_host_self(), HOST_VM_INFO64,
178 (host_info64_t)&vmstat, &count);
179 assert(kerr == KERN_SUCCESS,
180 "host_statistics64 failed - check mach_host_self() and count");
181 if (kerr == KERN_SUCCESS) {
182 available = vmstat.free_count * os::vm_page_size();
183 }
184 #endif
185 return available;
516
517 #undef SYS_EXT_DIR
518 #undef EXTENSIONS_DIR
519 }
520
521 ////////////////////////////////////////////////////////////////////////////////
522 // breakpoint support
523
524 void os::breakpoint() {
525 BREAKPOINT;
526 }
527
528 extern "C" void breakpoint() {
529 // use debugger to set breakpoint here
530 }
531
532 ////////////////////////////////////////////////////////////////////////////////
533 // signal support
534
535 debug_only(static bool signal_sets_initialized = false);
536 static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs;
537
538 bool os::Bsd::is_sig_ignored(int sig) {
539 struct sigaction oact;
540 sigaction(sig, (struct sigaction*)NULL, &oact);
541 void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*, oact.sa_sigaction)
542 : CAST_FROM_FN_PTR(void*, oact.sa_handler);
543 if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN)) {
544 return true;
545 } else {
546 return false;
547 }
548 }
549
550 void os::Bsd::signal_sets_init() {
551 // Should also have an assertion stating we are still single-threaded.
552 assert(!signal_sets_initialized, "Already initialized");
553 // Fill in signals that are necessarily unblocked for all threads in
554 // the VM. Currently, we unblock the following signals:
555 // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden
556 // by -Xrs (=ReduceSignalUsage));
557 // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all
558 // other threads. The "ReduceSignalUsage" boolean tells us not to alter
559 // the dispositions or masks wrt these signals.
560 // Programs embedding the VM that want to use the above signals for their
561 // own purposes must, at this time, use the "-Xrs" option to prevent
562 // interference with shutdown hooks and BREAK_SIGNAL thread dumping.
563 // (See bug 4345157, and other related bugs).
564 // In reality, though, unblocking these signals is really a nop, since
565 // these signals are not blocked by default.
566 sigemptyset(&unblocked_sigs);
567 sigemptyset(&allowdebug_blocked_sigs);
568 sigaddset(&unblocked_sigs, SIGILL);
569 sigaddset(&unblocked_sigs, SIGSEGV);
570 sigaddset(&unblocked_sigs, SIGBUS);
571 sigaddset(&unblocked_sigs, SIGFPE);
572 sigaddset(&unblocked_sigs, SR_signum);
573
574 if (!ReduceSignalUsage) {
575 if (!os::Bsd::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
576 sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
577 sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL);
578 }
579 if (!os::Bsd::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
580 sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
581 sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL);
582 }
583 if (!os::Bsd::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
584 sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
585 sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL);
586 }
587 }
588 // Fill in signals that are blocked by all but the VM thread.
589 sigemptyset(&vm_sigs);
590 if (!ReduceSignalUsage) {
591 sigaddset(&vm_sigs, BREAK_SIGNAL);
592 }
593 debug_only(signal_sets_initialized = true);
594
595 }
596
597 // These are signals that are unblocked while a thread is running Java.
598 // (For some reason, they get blocked by default.)
599 sigset_t* os::Bsd::unblocked_signals() {
600 assert(signal_sets_initialized, "Not initialized");
601 return &unblocked_sigs;
602 }
603
604 // These are the signals that are blocked while a (non-VM) thread is
605 // running Java. Only the VM thread handles these signals.
606 sigset_t* os::Bsd::vm_signals() {
607 assert(signal_sets_initialized, "Not initialized");
608 return &vm_sigs;
609 }
610
611 // These are signals that are blocked during cond_wait to allow debugger in
612 sigset_t* os::Bsd::allowdebug_blocked_signals() {
613 assert(signal_sets_initialized, "Not initialized");
614 return &allowdebug_blocked_sigs;
615 }
616
617 void os::Bsd::hotspot_sigmask(Thread* thread) {
618
619 //Save caller's signal mask before setting VM signal mask
620 sigset_t caller_sigmask;
621 pthread_sigmask(SIG_BLOCK, NULL, &caller_sigmask);
622
623 OSThread* osthread = thread->osthread();
624 osthread->set_caller_sigmask(caller_sigmask);
625
626 pthread_sigmask(SIG_UNBLOCK, os::Bsd::unblocked_signals(), NULL);
627
628 if (!ReduceSignalUsage) {
629 if (thread->is_VM_thread()) {
630 // Only the VM thread handles BREAK_SIGNAL ...
631 pthread_sigmask(SIG_UNBLOCK, vm_signals(), NULL);
632 } else {
633 // ... all other threads block BREAK_SIGNAL
634 pthread_sigmask(SIG_BLOCK, vm_signals(), NULL);
635 }
636 }
3387 tty->cr();
3388 tty->print(" found:");
3389 os::Posix::print_sa_flags(tty, act.sa_flags);
3390 tty->cr();
3391 // No need to check this sig any longer
3392 sigaddset(&check_signal_done, sig);
3393 }
3394
3395 // Dump all the signal
3396 if (sigismember(&check_signal_done, sig)) {
3397 print_signal_handlers(tty, buf, O_BUFLEN);
3398 }
3399 }
3400
3401 extern void report_error(char* file_name, int line_no, char* title,
3402 char* format, ...);
3403
3404 // this is called _before_ the most of global arguments have been parsed
3405 void os::init(void) {
3406 char dummy; // used to get a guess on initial stack address
3407 // first_hrtime = gethrtime();
3408
3409 // With BsdThreads the JavaMain thread pid (primordial thread)
3410 // is different than the pid of the java launcher thread.
3411 // So, on Bsd, the launcher thread pid is passed to the VM
3412 // via the sun.java.launcher.pid property.
3413 // Use this property instead of getpid() if it was correctly passed.
3414 // See bug 6351349.
3415 pid_t java_launcher_pid = (pid_t) Arguments::sun_java_launcher_pid();
3416
3417 _initial_pid = (java_launcher_pid > 0) ? java_launcher_pid : getpid();
3418
3419 clock_tics_per_sec = CLK_TCK;
3420
3421 init_random(1234567);
3422
3423 ThreadCritical::initialize();
3424
3425 Bsd::set_page_size(getpagesize());
3426 if (Bsd::page_size() == -1) {
3427 fatal("os_bsd.cpp: os::init: sysconf failed (%s)", os::strerror(errno));
3428 }
3429 init_page_sizes((size_t) Bsd::page_size());
3430
3431 Bsd::initialize_system_info();
3432
3433 // main_thread points to the aboriginal thread
3434 Bsd::_main_thread = pthread_self();
3435
3436 Bsd::clock_init();
3437 initial_time_count = javaTimeNanos();
3438
3439 #ifdef __APPLE__
3440 // XXXDARWIN
3441 // Work around the unaligned VM callbacks in hotspot's
3442 // sharedRuntime. The callbacks don't use SSE2 instructions, and work on
3443 // Linux, Solaris, and FreeBSD. On Mac OS X, dyld (rightly so) enforces
3444 // alignment when doing symbol lookup. To work around this, we force early
3445 // binding of all symbols now, thus binding when alignment is known-good.
3446 _dyld_bind_fully_image_containing_address((const void *) &os::init);
3447 #endif
3448 }
3449
3450 // To install functions for atexit system call
3451 extern "C" {
3452 static void perfMemory_exit_helper() {
3453 perfMemory_exit();
3454 }
3455 }
3456
3457 // this is called _after_ the global arguments have been parsed
3458 jint os::init_2(void) {
3459 // Allocate a single page and mark it as readable for safepoint polling
3460 address polling_page = (address) ::mmap(NULL, Bsd::page_size(), PROT_READ, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
3461 guarantee(polling_page != MAP_FAILED, "os::init_2: failed to allocate polling page");
3462
3463 os::set_polling_page(polling_page);
3464 log_info(os)("SafePoint Polling address: " INTPTR_FORMAT, p2i(polling_page));
3465
3466 if (!UseMembar) {
3467 address mem_serialize_page = (address) ::mmap(NULL, Bsd::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
3468 guarantee(mem_serialize_page != MAP_FAILED, "mmap Failed for memory serialize page");
3469 os::set_memory_serialize_page(mem_serialize_page);
3470 log_info(os)("Memory Serialize Page address: " INTPTR_FORMAT, p2i(mem_serialize_page));
3471 }
3472
3473 // initialize suspend/resume support - must do this before signal_sets_init()
3474 if (SR_initialize() != 0) {
3475 perror("SR_initialize failed");
3476 return JNI_ERR;
3477 }
3478
3506 log_info(os)("os::init_2 setrlimit failed: %s", os::strerror(errno));
3507 }
3508 }
3509 }
3510
3511 // at-exit methods are called in the reverse order of their registration.
3512 // atexit functions are called on return from main or as a result of a
3513 // call to exit(3C). There can be only 32 of these functions registered
3514 // and atexit() does not set errno.
3515
3516 if (PerfAllowAtExitRegistration) {
3517 // only register atexit functions if PerfAllowAtExitRegistration is set.
3518 // atexit functions can be delayed until process exit time, which
3519 // can be problematic for embedded VM situations. Embedded VMs should
3520 // call DestroyJavaVM() to assure that VM resources are released.
3521
3522 // note: perfMemory_exit_helper atexit function may be removed in
3523 // the future if the appropriate cleanup code can be added to the
3524 // VM_Exit VMOperation's doit method.
3525 if (atexit(perfMemory_exit_helper) != 0) {
3526 warning("os::init2 atexit(perfMemory_exit_helper) failed");
3527 }
3528 }
3529
3530 // initialize thread priority policy
3531 prio_init();
3532
3533 #ifdef __APPLE__
3534 // dynamically link to objective c gc registration
3535 void *handleLibObjc = dlopen(OBJC_LIB, RTLD_LAZY);
3536 if (handleLibObjc != NULL) {
3537 objc_registerThreadWithCollectorFunction = (objc_registerThreadWithCollector_t) dlsym(handleLibObjc, OBJC_GCREGISTER);
3538 }
3539 #endif
3540
3541 return JNI_OK;
3542 }
3543
3544 // Mark the polling page as unreadable
3545 void os::make_polling_page_unreadable(void) {
3546 if (!guard_memory((char*)_polling_page, Bsd::page_size())) {
4010 void os::pause() {
4011 char filename[MAX_PATH];
4012 if (PauseAtStartupFile && PauseAtStartupFile[0]) {
4013 jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
4014 } else {
4015 jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
4016 }
4017
4018 int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
4019 if (fd != -1) {
4020 struct stat buf;
4021 ::close(fd);
4022 while (::stat(filename, &buf) == 0) {
4023 (void)::poll(NULL, 0, 100);
4024 }
4025 } else {
4026 jio_fprintf(stderr,
4027 "Could not open pause file '%s', continuing immediately.\n", filename);
4028 }
4029 }
4030
4031
4032 // Refer to the comments in os_solaris.cpp park-unpark. The next two
4033 // comment paragraphs are worth repeating here:
4034 //
4035 // Assumption:
4036 // Only one parker can exist on an event, which is why we allocate
4037 // them per-thread. Multiple unparkers can coexist.
4038 //
4039 // _Event serves as a restricted-range semaphore.
4040 // -1 : thread is blocked, i.e. there is a waiter
4041 // 0 : neutral: thread is running or ready,
4042 // could have been signaled after a wait started
4043 // 1 : signaled - thread is running or ready
4044 //
4045
4046 // utility to compute the abstime argument to timedwait:
4047 // millis is the relative timeout time
4048 // abstime will be the absolute timeout time
4049 // TODO: replace compute_abstime() with unpackTime()
4050
4051 static struct timespec* compute_abstime(struct timespec* abstime,
4052 jlong millis) {
4053 if (millis < 0) millis = 0;
4054 struct timeval now;
4055 int status = gettimeofday(&now, NULL);
4056 assert(status == 0, "gettimeofday");
4057 jlong seconds = millis / 1000;
4058 millis %= 1000;
4059 if (seconds > 50000000) { // see man cond_timedwait(3T)
4060 seconds = 50000000;
4061 }
4062 abstime->tv_sec = now.tv_sec + seconds;
4063 long usec = now.tv_usec + millis * 1000;
4064 if (usec >= 1000000) {
4065 abstime->tv_sec += 1;
4066 usec -= 1000000;
4067 }
4068 abstime->tv_nsec = usec * 1000;
4069 return abstime;
4070 }
4071
4072 void os::PlatformEvent::park() { // AKA "down()"
4073 // Transitions for _Event:
4074 // -1 => -1 : illegal
4075 // 1 => 0 : pass - return immediately
4076 // 0 => -1 : block; then set _Event to 0 before returning
4077
4078 // Invariant: Only the thread associated with the Event/PlatformEvent
4079 // may call park().
4080 // TODO: assert that _Assoc != NULL or _Assoc == Self
4081 assert(_nParked == 0, "invariant");
4082
4083 int v;
4084 for (;;) {
4085 v = _Event;
4086 if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
4087 }
4088 guarantee(v >= 0, "invariant");
4089 if (v == 0) {
4090 // Do this the hard way by blocking ...
4091 int status = pthread_mutex_lock(_mutex);
4092 assert_status(status == 0, status, "mutex_lock");
4093 guarantee(_nParked == 0, "invariant");
4094 ++_nParked;
4095 while (_Event < 0) {
4096 status = pthread_cond_wait(_cond, _mutex);
4097 // for some reason, under 2.7 lwp_cond_wait() may return ETIME ...
4098 // Treat this the same as if the wait was interrupted
4099 if (status == ETIMEDOUT) { status = EINTR; }
4100 assert_status(status == 0 || status == EINTR, status, "cond_wait");
4101 }
4102 --_nParked;
4103
4104 _Event = 0;
4105 status = pthread_mutex_unlock(_mutex);
4106 assert_status(status == 0, status, "mutex_unlock");
4107 // Paranoia to ensure our locked and lock-free paths interact
4108 // correctly with each other.
4109 OrderAccess::fence();
4110 }
4111 guarantee(_Event >= 0, "invariant");
4112 }
4113
4114 int os::PlatformEvent::park(jlong millis) {
4115 // Transitions for _Event:
4116 // -1 => -1 : illegal
4117 // 1 => 0 : pass - return immediately
4118 // 0 => -1 : block; then set _Event to 0 before returning
4119
4120 guarantee(_nParked == 0, "invariant");
4121
4122 int v;
4123 for (;;) {
4124 v = _Event;
4125 if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
4126 }
4127 guarantee(v >= 0, "invariant");
4128 if (v != 0) return OS_OK;
4129
4130 // We do this the hard way, by blocking the thread.
4131 // Consider enforcing a minimum timeout value.
4132 struct timespec abst;
4133 compute_abstime(&abst, millis);
4134
4135 int ret = OS_TIMEOUT;
4136 int status = pthread_mutex_lock(_mutex);
4137 assert_status(status == 0, status, "mutex_lock");
4138 guarantee(_nParked == 0, "invariant");
4139 ++_nParked;
4140
4141 // Object.wait(timo) will return because of
4142 // (a) notification
4143 // (b) timeout
4144 // (c) thread.interrupt
4145 //
4146 // Thread.interrupt and object.notify{All} both call Event::set.
4147 // That is, we treat thread.interrupt as a special case of notification.
4148 // We ignore spurious OS wakeups unless FilterSpuriousWakeups is false.
4149 // We assume all ETIME returns are valid.
4150 //
4151 // TODO: properly differentiate simultaneous notify+interrupt.
4152 // In that case, we should propagate the notify to another waiter.
4153
4154 while (_Event < 0) {
4155 status = pthread_cond_timedwait(_cond, _mutex, &abst);
4156 assert_status(status == 0 || status == EINTR ||
4157 status == ETIMEDOUT,
4158 status, "cond_timedwait");
4159 if (!FilterSpuriousWakeups) break; // previous semantics
4160 if (status == ETIMEDOUT) break;
4161 // We consume and ignore EINTR and spurious wakeups.
4162 }
4163 --_nParked;
4164 if (_Event >= 0) {
4165 ret = OS_OK;
4166 }
4167 _Event = 0;
4168 status = pthread_mutex_unlock(_mutex);
4169 assert_status(status == 0, status, "mutex_unlock");
4170 assert(_nParked == 0, "invariant");
4171 // Paranoia to ensure our locked and lock-free paths interact
4172 // correctly with each other.
4173 OrderAccess::fence();
4174 return ret;
4175 }
4176
4177 void os::PlatformEvent::unpark() {
4178 // Transitions for _Event:
4179 // 0 => 1 : just return
4180 // 1 => 1 : just return
4181 // -1 => either 0 or 1; must signal target thread
4182 // That is, we can safely transition _Event from -1 to either
4183 // 0 or 1.
4184 // See also: "Semaphores in Plan 9" by Mullender & Cox
4185 //
4186 // Note: Forcing a transition from "-1" to "1" on an unpark() means
4187 // that it will take two back-to-back park() calls for the owning
4188 // thread to block. This has the benefit of forcing a spurious return
4189 // from the first park() call after an unpark() call which will help
4190 // shake out uses of park() and unpark() without condition variables.
4191
4192 if (Atomic::xchg(1, &_Event) >= 0) return;
4193
4194 // Wait for the thread associated with the event to vacate
4195 int status = pthread_mutex_lock(_mutex);
4196 assert_status(status == 0, status, "mutex_lock");
4197 int AnyWaiters = _nParked;
4198 assert(AnyWaiters == 0 || AnyWaiters == 1, "invariant");
4199 status = pthread_mutex_unlock(_mutex);
4200 assert_status(status == 0, status, "mutex_unlock");
4201 if (AnyWaiters != 0) {
4202 // Note that we signal() *after* dropping the lock for "immortal" Events.
4203 // This is safe and avoids a common class of futile wakeups. In rare
4204 // circumstances this can cause a thread to return prematurely from
4205 // cond_{timed}wait() but the spurious wakeup is benign and the victim
4206 // will simply re-test the condition and re-park itself.
4207 // This provides particular benefit if the underlying platform does not
4208 // provide wait morphing.
4209 status = pthread_cond_signal(_cond);
4210 assert_status(status == 0, status, "cond_signal");
4211 }
4212 }
4213
4214
4215 // JSR166
4216 // -------------------------------------------------------
4217
4218 // The solaris and bsd implementations of park/unpark are fairly
4219 // conservative for now, but can be improved. They currently use a
4220 // mutex/condvar pair, plus a a count.
4221 // Park decrements count if > 0, else does a condvar wait. Unpark
4222 // sets count to 1 and signals condvar. Only one thread ever waits
4223 // on the condvar. Contention seen when trying to park implies that someone
4224 // is unparking you, so don't wait. And spurious returns are fine, so there
4225 // is no need to track notifications.
4226
4227 #define MAX_SECS 100000000
4228
4229 // This code is common to bsd and solaris and will be moved to a
4230 // common place in dolphin.
4231 //
4232 // The passed in time value is either a relative time in nanoseconds
4233 // or an absolute time in milliseconds. Either way it has to be unpacked
4234 // into suitable seconds and nanoseconds components and stored in the
4235 // given timespec structure.
4236 // Given time is a 64-bit value and the time_t used in the timespec is only
4237 // a signed-32-bit value (except on 64-bit Bsd) we have to watch for
4238 // overflow if times way in the future are given. Further on Solaris versions
4239 // prior to 10 there is a restriction (see cond_timedwait) that the specified
4240 // number of seconds, in abstime, is less than current_time + 100,000,000.
4241 // As it will be 28 years before "now + 100000000" will overflow we can
4242 // ignore overflow and just impose a hard-limit on seconds using the value
4243 // of "now + 100,000,000". This places a limit on the timeout of about 3.17
4244 // years from "now".
4245
4246 static void unpackTime(struct timespec* absTime, bool isAbsolute, jlong time) {
4247 assert(time > 0, "convertTime");
4248
4249 struct timeval now;
4250 int status = gettimeofday(&now, NULL);
4251 assert(status == 0, "gettimeofday");
4252
4253 time_t max_secs = now.tv_sec + MAX_SECS;
4254
4255 if (isAbsolute) {
4256 jlong secs = time / 1000;
4257 if (secs > max_secs) {
4258 absTime->tv_sec = max_secs;
4259 } else {
4260 absTime->tv_sec = secs;
4261 }
4262 absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
4263 } else {
4264 jlong secs = time / NANOSECS_PER_SEC;
4265 if (secs >= MAX_SECS) {
4266 absTime->tv_sec = max_secs;
4267 absTime->tv_nsec = 0;
4268 } else {
4269 absTime->tv_sec = now.tv_sec + secs;
4270 absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
4271 if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
4272 absTime->tv_nsec -= NANOSECS_PER_SEC;
4273 ++absTime->tv_sec; // note: this must be <= max_secs
4274 }
4275 }
4276 }
4277 assert(absTime->tv_sec >= 0, "tv_sec < 0");
4278 assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs");
4279 assert(absTime->tv_nsec >= 0, "tv_nsec < 0");
4280 assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec");
4281 }
4282
4283 void Parker::park(bool isAbsolute, jlong time) {
4284 // Ideally we'd do something useful while spinning, such
4285 // as calling unpackTime().
4286
4287 // Optional fast-path check:
4288 // Return immediately if a permit is available.
4289 // We depend on Atomic::xchg() having full barrier semantics
4290 // since we are doing a lock-free update to _counter.
4291 if (Atomic::xchg(0, &_counter) > 0) return;
4292
4293 Thread* thread = Thread::current();
4294 assert(thread->is_Java_thread(), "Must be JavaThread");
4295 JavaThread *jt = (JavaThread *)thread;
4296
4297 // Optional optimization -- avoid state transitions if there's an interrupt pending.
4298 // Check interrupt before trying to wait
4299 if (Thread::is_interrupted(thread, false)) {
4300 return;
4301 }
4302
4303 // Next, demultiplex/decode time arguments
4304 struct timespec absTime;
4305 if (time < 0 || (isAbsolute && time == 0)) { // don't wait at all
4306 return;
4307 }
4308 if (time > 0) {
4309 unpackTime(&absTime, isAbsolute, time);
4310 }
4311
4312
4313 // Enter safepoint region
4314 // Beware of deadlocks such as 6317397.
4315 // The per-thread Parker:: mutex is a classic leaf-lock.
4316 // In particular a thread must never block on the Threads_lock while
4317 // holding the Parker:: mutex. If safepoints are pending both the
4318 // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
4319 ThreadBlockInVM tbivm(jt);
4320
4321 // Don't wait if cannot get lock since interference arises from
4322 // unblocking. Also. check interrupt before trying wait
4323 if (Thread::is_interrupted(thread, false) || pthread_mutex_trylock(_mutex) != 0) {
4324 return;
4325 }
4326
4327 int status;
4328 if (_counter > 0) { // no wait needed
4329 _counter = 0;
4330 status = pthread_mutex_unlock(_mutex);
4331 assert_status(status == 0, status, "invariant");
4332 // Paranoia to ensure our locked and lock-free paths interact
4333 // correctly with each other and Java-level accesses.
4334 OrderAccess::fence();
4335 return;
4336 }
4337
4338 #ifdef ASSERT
4339 // Don't catch signals while blocked; let the running threads have the signals.
4340 // (This allows a debugger to break into the running thread.)
4341 sigset_t oldsigs;
4342 sigset_t* allowdebug_blocked = os::Bsd::allowdebug_blocked_signals();
4343 pthread_sigmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
4344 #endif
4345
4346 OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
4347 jt->set_suspend_equivalent();
4348 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
4349
4350 if (time == 0) {
4351 status = pthread_cond_wait(_cond, _mutex);
4352 } else {
4353 status = pthread_cond_timedwait(_cond, _mutex, &absTime);
4354 }
4355 assert_status(status == 0 || status == EINTR ||
4356 status == ETIMEDOUT,
4357 status, "cond_timedwait");
4358
4359 #ifdef ASSERT
4360 pthread_sigmask(SIG_SETMASK, &oldsigs, NULL);
4361 #endif
4362
4363 _counter = 0;
4364 status = pthread_mutex_unlock(_mutex);
4365 assert_status(status == 0, status, "invariant");
4366 // Paranoia to ensure our locked and lock-free paths interact
4367 // correctly with each other and Java-level accesses.
4368 OrderAccess::fence();
4369
4370 // If externally suspended while waiting, re-suspend
4371 if (jt->handle_special_suspend_equivalent_condition()) {
4372 jt->java_suspend_self();
4373 }
4374 }
4375
4376 void Parker::unpark() {
4377 int status = pthread_mutex_lock(_mutex);
4378 assert_status(status == 0, status, "invariant");
4379 const int s = _counter;
4380 _counter = 1;
4381 status = pthread_mutex_unlock(_mutex);
4382 assert_status(status == 0, status, "invariant");
4383 if (s < 1) {
4384 status = pthread_cond_signal(_cond);
4385 assert_status(status == 0, status, "invariant");
4386 }
4387 }
4388
4389
4390 // Darwin has no "environ" in a dynamic library.
4391 #ifdef __APPLE__
4392 #include <crt_externs.h>
4393 #define environ (*_NSGetEnviron())
4394 #else
4395 extern char** environ;
4396 #endif
4397
4398 // Run the specified command in a separate process. Return its exit value,
4399 // or -1 on failure (e.g. can't fork a new process).
4400 // Unlike system(), this function can be called from signal handler. It
4401 // doesn't block SIGINT et al.
4402 int os::fork_and_exec(char* cmd) {
4403 const char * argv[4] = {"sh", "-c", cmd, NULL};
4404
4405 // fork() in BsdThreads/NPTL is not async-safe. It needs to run
4406 // pthread_atfork handlers and reset pthread library. All we need is a
4407 // separate process to execve. Make a direct syscall to fork process.
4408 // On IA64 there's no fork syscall, we have to use fork() and hope for
|
145
146 static int clock_tics_per_sec = 100;
147
148 // For diagnostics to print a message once. see run_periodic_checks
149 static sigset_t check_signal_done;
150 static bool check_signals = true;
151
152 static pid_t _initial_pid = 0;
153
154 // Signal number used to suspend/resume a thread
155
156 // do not use any signal number less than SIGSEGV, see 4355769
157 static int SR_signum = SIGUSR2;
158 sigset_t SR_sigset;
159
160
161 ////////////////////////////////////////////////////////////////////////////////
162 // utility functions
163
164 static int SR_initialize();
165
166 julong os::available_memory() {
167 return Bsd::available_memory();
168 }
169
170 // available here means free
171 julong os::Bsd::available_memory() {
172 uint64_t available = physical_memory() >> 2;
173 #ifdef __APPLE__
174 mach_msg_type_number_t count = HOST_VM_INFO64_COUNT;
175 vm_statistics64_data_t vmstat;
176 kern_return_t kerr = host_statistics64(mach_host_self(), HOST_VM_INFO64,
177 (host_info64_t)&vmstat, &count);
178 assert(kerr == KERN_SUCCESS,
179 "host_statistics64 failed - check mach_host_self() and count");
180 if (kerr == KERN_SUCCESS) {
181 available = vmstat.free_count * os::vm_page_size();
182 }
183 #endif
184 return available;
515
516 #undef SYS_EXT_DIR
517 #undef EXTENSIONS_DIR
518 }
519
520 ////////////////////////////////////////////////////////////////////////////////
521 // breakpoint support
522
523 void os::breakpoint() {
524 BREAKPOINT;
525 }
526
527 extern "C" void breakpoint() {
528 // use debugger to set breakpoint here
529 }
530
531 ////////////////////////////////////////////////////////////////////////////////
532 // signal support
533
534 debug_only(static bool signal_sets_initialized = false);
535 static sigset_t unblocked_sigs, vm_sigs;
536
537 bool os::Bsd::is_sig_ignored(int sig) {
538 struct sigaction oact;
539 sigaction(sig, (struct sigaction*)NULL, &oact);
540 void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*, oact.sa_sigaction)
541 : CAST_FROM_FN_PTR(void*, oact.sa_handler);
542 if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN)) {
543 return true;
544 } else {
545 return false;
546 }
547 }
548
549 void os::Bsd::signal_sets_init() {
550 // Should also have an assertion stating we are still single-threaded.
551 assert(!signal_sets_initialized, "Already initialized");
552 // Fill in signals that are necessarily unblocked for all threads in
553 // the VM. Currently, we unblock the following signals:
554 // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden
555 // by -Xrs (=ReduceSignalUsage));
556 // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all
557 // other threads. The "ReduceSignalUsage" boolean tells us not to alter
558 // the dispositions or masks wrt these signals.
559 // Programs embedding the VM that want to use the above signals for their
560 // own purposes must, at this time, use the "-Xrs" option to prevent
561 // interference with shutdown hooks and BREAK_SIGNAL thread dumping.
562 // (See bug 4345157, and other related bugs).
563 // In reality, though, unblocking these signals is really a nop, since
564 // these signals are not blocked by default.
565 sigemptyset(&unblocked_sigs);
566 sigaddset(&unblocked_sigs, SIGILL);
567 sigaddset(&unblocked_sigs, SIGSEGV);
568 sigaddset(&unblocked_sigs, SIGBUS);
569 sigaddset(&unblocked_sigs, SIGFPE);
570 sigaddset(&unblocked_sigs, SR_signum);
571
572 if (!ReduceSignalUsage) {
573 if (!os::Bsd::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
574 sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
575
576 }
577 if (!os::Bsd::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
578 sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
579 }
580 if (!os::Bsd::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
581 sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
582 }
583 }
584 // Fill in signals that are blocked by all but the VM thread.
585 sigemptyset(&vm_sigs);
586 if (!ReduceSignalUsage) {
587 sigaddset(&vm_sigs, BREAK_SIGNAL);
588 }
589 debug_only(signal_sets_initialized = true);
590
591 }
592
593 // These are signals that are unblocked while a thread is running Java.
594 // (For some reason, they get blocked by default.)
595 sigset_t* os::Bsd::unblocked_signals() {
596 assert(signal_sets_initialized, "Not initialized");
597 return &unblocked_sigs;
598 }
599
600 // These are the signals that are blocked while a (non-VM) thread is
601 // running Java. Only the VM thread handles these signals.
602 sigset_t* os::Bsd::vm_signals() {
603 assert(signal_sets_initialized, "Not initialized");
604 return &vm_sigs;
605 }
606
607 void os::Bsd::hotspot_sigmask(Thread* thread) {
608
609 //Save caller's signal mask before setting VM signal mask
610 sigset_t caller_sigmask;
611 pthread_sigmask(SIG_BLOCK, NULL, &caller_sigmask);
612
613 OSThread* osthread = thread->osthread();
614 osthread->set_caller_sigmask(caller_sigmask);
615
616 pthread_sigmask(SIG_UNBLOCK, os::Bsd::unblocked_signals(), NULL);
617
618 if (!ReduceSignalUsage) {
619 if (thread->is_VM_thread()) {
620 // Only the VM thread handles BREAK_SIGNAL ...
621 pthread_sigmask(SIG_UNBLOCK, vm_signals(), NULL);
622 } else {
623 // ... all other threads block BREAK_SIGNAL
624 pthread_sigmask(SIG_BLOCK, vm_signals(), NULL);
625 }
626 }
3377 tty->cr();
3378 tty->print(" found:");
3379 os::Posix::print_sa_flags(tty, act.sa_flags);
3380 tty->cr();
3381 // No need to check this sig any longer
3382 sigaddset(&check_signal_done, sig);
3383 }
3384
3385 // Dump all the signal
3386 if (sigismember(&check_signal_done, sig)) {
3387 print_signal_handlers(tty, buf, O_BUFLEN);
3388 }
3389 }
3390
3391 extern void report_error(char* file_name, int line_no, char* title,
3392 char* format, ...);
3393
3394 // this is called _before_ the most of global arguments have been parsed
3395 void os::init(void) {
3396 char dummy; // used to get a guess on initial stack address
3397
3398 // With BsdThreads the JavaMain thread pid (primordial thread)
3399 // is different than the pid of the java launcher thread.
3400 // So, on Bsd, the launcher thread pid is passed to the VM
3401 // via the sun.java.launcher.pid property.
3402 // Use this property instead of getpid() if it was correctly passed.
3403 // See bug 6351349.
3404 pid_t java_launcher_pid = (pid_t) Arguments::sun_java_launcher_pid();
3405
3406 _initial_pid = (java_launcher_pid > 0) ? java_launcher_pid : getpid();
3407
3408 clock_tics_per_sec = CLK_TCK;
3409
3410 init_random(1234567);
3411
3412 ThreadCritical::initialize();
3413
3414 Bsd::set_page_size(getpagesize());
3415 if (Bsd::page_size() == -1) {
3416 fatal("os_bsd.cpp: os::init: sysconf failed (%s)", os::strerror(errno));
3417 }
3418 init_page_sizes((size_t) Bsd::page_size());
3419
3420 Bsd::initialize_system_info();
3421
3422 // main_thread points to the aboriginal thread
3423 Bsd::_main_thread = pthread_self();
3424
3425 Bsd::clock_init();
3426 initial_time_count = javaTimeNanos();
3427
3428 #ifdef __APPLE__
3429 // XXXDARWIN
3430 // Work around the unaligned VM callbacks in hotspot's
3431 // sharedRuntime. The callbacks don't use SSE2 instructions, and work on
3432 // Linux, Solaris, and FreeBSD. On Mac OS X, dyld (rightly so) enforces
3433 // alignment when doing symbol lookup. To work around this, we force early
3434 // binding of all symbols now, thus binding when alignment is known-good.
3435 _dyld_bind_fully_image_containing_address((const void *) &os::init);
3436 #endif
3437
3438 os::Posix::init();
3439 }
3440
3441 // To install functions for atexit system call
3442 extern "C" {
3443 static void perfMemory_exit_helper() {
3444 perfMemory_exit();
3445 }
3446 }
3447
3448 // this is called _after_ the global arguments have been parsed
3449 jint os::init_2(void) {
3450
3451 os::Posix::init_2();
3452
3453 // Allocate a single page and mark it as readable for safepoint polling
3454 address polling_page = (address) ::mmap(NULL, Bsd::page_size(), PROT_READ, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
3455 guarantee(polling_page != MAP_FAILED, "os::init_2: failed to allocate polling page");
3456
3457 os::set_polling_page(polling_page);
3458 log_info(os)("SafePoint Polling address: " INTPTR_FORMAT, p2i(polling_page));
3459
3460 if (!UseMembar) {
3461 address mem_serialize_page = (address) ::mmap(NULL, Bsd::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
3462 guarantee(mem_serialize_page != MAP_FAILED, "mmap Failed for memory serialize page");
3463 os::set_memory_serialize_page(mem_serialize_page);
3464 log_info(os)("Memory Serialize Page address: " INTPTR_FORMAT, p2i(mem_serialize_page));
3465 }
3466
3467 // initialize suspend/resume support - must do this before signal_sets_init()
3468 if (SR_initialize() != 0) {
3469 perror("SR_initialize failed");
3470 return JNI_ERR;
3471 }
3472
3500 log_info(os)("os::init_2 setrlimit failed: %s", os::strerror(errno));
3501 }
3502 }
3503 }
3504
3505 // at-exit methods are called in the reverse order of their registration.
3506 // atexit functions are called on return from main or as a result of a
3507 // call to exit(3C). There can be only 32 of these functions registered
3508 // and atexit() does not set errno.
3509
3510 if (PerfAllowAtExitRegistration) {
3511 // only register atexit functions if PerfAllowAtExitRegistration is set.
3512 // atexit functions can be delayed until process exit time, which
3513 // can be problematic for embedded VM situations. Embedded VMs should
3514 // call DestroyJavaVM() to assure that VM resources are released.
3515
3516 // note: perfMemory_exit_helper atexit function may be removed in
3517 // the future if the appropriate cleanup code can be added to the
3518 // VM_Exit VMOperation's doit method.
3519 if (atexit(perfMemory_exit_helper) != 0) {
3520 warning("os::init_2 atexit(perfMemory_exit_helper) failed");
3521 }
3522 }
3523
3524 // initialize thread priority policy
3525 prio_init();
3526
3527 #ifdef __APPLE__
3528 // dynamically link to objective c gc registration
3529 void *handleLibObjc = dlopen(OBJC_LIB, RTLD_LAZY);
3530 if (handleLibObjc != NULL) {
3531 objc_registerThreadWithCollectorFunction = (objc_registerThreadWithCollector_t) dlsym(handleLibObjc, OBJC_GCREGISTER);
3532 }
3533 #endif
3534
3535 return JNI_OK;
3536 }
3537
3538 // Mark the polling page as unreadable
3539 void os::make_polling_page_unreadable(void) {
3540 if (!guard_memory((char*)_polling_page, Bsd::page_size())) {
4004 void os::pause() {
4005 char filename[MAX_PATH];
4006 if (PauseAtStartupFile && PauseAtStartupFile[0]) {
4007 jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
4008 } else {
4009 jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
4010 }
4011
4012 int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
4013 if (fd != -1) {
4014 struct stat buf;
4015 ::close(fd);
4016 while (::stat(filename, &buf) == 0) {
4017 (void)::poll(NULL, 0, 100);
4018 }
4019 } else {
4020 jio_fprintf(stderr,
4021 "Could not open pause file '%s', continuing immediately.\n", filename);
4022 }
4023 }
4024
4025 // Darwin has no "environ" in a dynamic library.
4026 #ifdef __APPLE__
4027 #include <crt_externs.h>
4028 #define environ (*_NSGetEnviron())
4029 #else
4030 extern char** environ;
4031 #endif
4032
4033 // Run the specified command in a separate process. Return its exit value,
4034 // or -1 on failure (e.g. can't fork a new process).
4035 // Unlike system(), this function can be called from signal handler. It
4036 // doesn't block SIGINT et al.
4037 int os::fork_and_exec(char* cmd) {
4038 const char * argv[4] = {"sh", "-c", cmd, NULL};
4039
4040 // fork() in BsdThreads/NPTL is not async-safe. It needs to run
4041 // pthread_atfork handlers and reset pthread library. All we need is a
4042 // separate process to execve. Make a direct syscall to fork process.
4043 // On IA64 there's no fork syscall, we have to use fork() and hope for
|