578
579 #undef DEFAULT_LIBPATH
580 #undef EXTENSIONS_DIR
581 }
582
583 ////////////////////////////////////////////////////////////////////////////////
584 // breakpoint support
585
586 void os::breakpoint() {
587 BREAKPOINT;
588 }
589
590 extern "C" void breakpoint() {
591 // use debugger to set breakpoint here
592 }
593
594 ////////////////////////////////////////////////////////////////////////////////
595 // signal support
596
597 debug_only(static bool signal_sets_initialized = false);
598 static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs;
599
600 bool os::Aix::is_sig_ignored(int sig) {
601 struct sigaction oact;
602 sigaction(sig, (struct sigaction*)NULL, &oact);
603 void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*, oact.sa_sigaction)
604 : CAST_FROM_FN_PTR(void*, oact.sa_handler);
605 if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN)) {
606 return true;
607 } else {
608 return false;
609 }
610 }
611
612 void os::Aix::signal_sets_init() {
613 // Should also have an assertion stating we are still single-threaded.
614 assert(!signal_sets_initialized, "Already initialized");
615 // Fill in signals that are necessarily unblocked for all threads in
616 // the VM. Currently, we unblock the following signals:
617 // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden
618 // by -Xrs (=ReduceSignalUsage));
619 // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all
620 // other threads. The "ReduceSignalUsage" boolean tells us not to alter
621 // the dispositions or masks wrt these signals.
622 // Programs embedding the VM that want to use the above signals for their
623 // own purposes must, at this time, use the "-Xrs" option to prevent
624 // interference with shutdown hooks and BREAK_SIGNAL thread dumping.
625 // (See bug 4345157, and other related bugs).
626 // In reality, though, unblocking these signals is really a nop, since
627 // these signals are not blocked by default.
628 sigemptyset(&unblocked_sigs);
629 sigemptyset(&allowdebug_blocked_sigs);
630 sigaddset(&unblocked_sigs, SIGILL);
631 sigaddset(&unblocked_sigs, SIGSEGV);
632 sigaddset(&unblocked_sigs, SIGBUS);
633 sigaddset(&unblocked_sigs, SIGFPE);
634 sigaddset(&unblocked_sigs, SIGTRAP);
635 sigaddset(&unblocked_sigs, SR_signum);
636
637 if (!ReduceSignalUsage) {
638 if (!os::Aix::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
639 sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
640 sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL);
641 }
642 if (!os::Aix::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
643 sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
644 sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL);
645 }
646 if (!os::Aix::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
647 sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
648 sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL);
649 }
650 }
651 // Fill in signals that are blocked by all but the VM thread.
652 sigemptyset(&vm_sigs);
653 if (!ReduceSignalUsage)
654 sigaddset(&vm_sigs, BREAK_SIGNAL);
655 debug_only(signal_sets_initialized = true);
656 }
657
658 // These are signals that are unblocked while a thread is running Java.
659 // (For some reason, they get blocked by default.)
660 sigset_t* os::Aix::unblocked_signals() {
661 assert(signal_sets_initialized, "Not initialized");
662 return &unblocked_sigs;
663 }
664
665 // These are the signals that are blocked while a (non-VM) thread is
666 // running Java. Only the VM thread handles these signals.
667 sigset_t* os::Aix::vm_signals() {
668 assert(signal_sets_initialized, "Not initialized");
669 return &vm_sigs;
670 }
671
672 // These are signals that are blocked during cond_wait to allow debugger in
673 sigset_t* os::Aix::allowdebug_blocked_signals() {
674 assert(signal_sets_initialized, "Not initialized");
675 return &allowdebug_blocked_sigs;
676 }
677
678 void os::Aix::hotspot_sigmask(Thread* thread) {
679
680 //Save caller's signal mask before setting VM signal mask
681 sigset_t caller_sigmask;
682 pthread_sigmask(SIG_BLOCK, NULL, &caller_sigmask);
683
684 OSThread* osthread = thread->osthread();
685 osthread->set_caller_sigmask(caller_sigmask);
686
687 pthread_sigmask(SIG_UNBLOCK, os::Aix::unblocked_signals(), NULL);
688
689 if (!ReduceSignalUsage) {
690 if (thread->is_VM_thread()) {
691 // Only the VM thread handles BREAK_SIGNAL ...
692 pthread_sigmask(SIG_UNBLOCK, vm_signals(), NULL);
693 } else {
694 // ... all other threads block BREAK_SIGNAL
695 pthread_sigmask(SIG_BLOCK, vm_signals(), NULL);
696 }
697 }
3465
3466 // Reset the perfstat information provided by ODM.
3467 if (os::Aix::on_aix()) {
3468 libperfstat::perfstat_reset();
3469 }
3470
3471 // Now initialze basic system properties. Note that for some of the values we
3472 // need libperfstat etc.
3473 os::Aix::initialize_system_info();
3474
3475 clock_tics_per_sec = sysconf(_SC_CLK_TCK);
3476
3477 init_random(1234567);
3478
3479 ThreadCritical::initialize();
3480
3481 // Main_thread points to the aboriginal thread.
3482 Aix::_main_thread = pthread_self();
3483
3484 initial_time_count = os::elapsed_counter();
3485 }
3486
3487 // This is called _after_ the global arguments have been parsed.
3488 jint os::init_2(void) {
3489
3490 if (os::Aix::on_pase()) {
3491 trcVerbose("Running on PASE.");
3492 } else {
3493 trcVerbose("Running on AIX (not PASE).");
3494 }
3495
3496 trcVerbose("processor count: %d", os::_processor_count);
3497 trcVerbose("physical memory: %lu", Aix::_physical_memory);
3498
3499 // Initially build up the loaded dll map.
3500 LoadedLibraries::reload();
3501 if (Verbose) {
3502 trcVerbose("Loaded Libraries: ");
3503 LoadedLibraries::print(tty);
3504 }
3505
3506 const int page_size = Aix::page_size();
3507 const int map_size = page_size;
3508
3509 address map_address = (address) MAP_FAILED;
4350
4351 if (p_stack_size) {
4352 *p_stack_size = stack_size;
4353 }
4354
4355 return true;
4356 }
4357
4358 // Get the current stack base from the OS (actually, the pthread library).
4359 address os::current_stack_base() {
4360 address p;
4361 query_stack_dimensions(&p, 0);
4362 return p;
4363 }
4364
4365 // Get the current stack size from the OS (actually, the pthread library).
4366 size_t os::current_stack_size() {
4367 size_t s;
4368 query_stack_dimensions(0, &s);
4369 return s;
4370 }
4371
4372 // Refer to the comments in os_solaris.cpp park-unpark.
4373
4374 // utility to compute the abstime argument to timedwait:
4375 // millis is the relative timeout time
4376 // abstime will be the absolute timeout time
4377 // TODO: replace compute_abstime() with unpackTime()
4378
4379 static struct timespec* compute_abstime(timespec* abstime, jlong millis) {
4380 if (millis < 0) millis = 0;
4381 struct timeval now;
4382 int status = gettimeofday(&now, NULL);
4383 assert(status == 0, "gettimeofday");
4384 jlong seconds = millis / 1000;
4385 millis %= 1000;
4386 if (seconds > 50000000) { // see man cond_timedwait(3T)
4387 seconds = 50000000;
4388 }
4389 abstime->tv_sec = now.tv_sec + seconds;
4390 long usec = now.tv_usec + millis * 1000;
4391 if (usec >= 1000000) {
4392 abstime->tv_sec += 1;
4393 usec -= 1000000;
4394 }
4395 abstime->tv_nsec = usec * 1000;
4396 return abstime;
4397 }
4398
4399 // Test-and-clear _Event, always leaves _Event set to 0, returns immediately.
4400 // Conceptually TryPark() should be equivalent to park(0).
4401
4402 int os::PlatformEvent::TryPark() {
4403 for (;;) {
4404 const int v = _Event;
4405 guarantee ((v == 0) || (v == 1), "invariant");
4406 if (Atomic::cmpxchg (0, &_Event, v) == v) return v;
4407 }
4408 }
4409
4410 void os::PlatformEvent::park() { // AKA "down()"
4411 // Invariant: Only the thread associated with the Event/PlatformEvent
4412 // may call park().
4413 // TODO: assert that _Assoc != NULL or _Assoc == Self
4414 int v;
4415 for (;;) {
4416 v = _Event;
4417 if (Atomic::cmpxchg (v-1, &_Event, v) == v) break;
4418 }
4419 guarantee (v >= 0, "invariant");
4420 if (v == 0) {
4421 // Do this the hard way by blocking ...
4422 int status = pthread_mutex_lock(_mutex);
4423 assert_status(status == 0, status, "mutex_lock");
4424 guarantee (_nParked == 0, "invariant");
4425 ++ _nParked;
4426 while (_Event < 0) {
4427 status = pthread_cond_wait(_cond, _mutex);
4428 assert_status(status == 0 || status == ETIMEDOUT, status, "cond_timedwait");
4429 }
4430 -- _nParked;
4431
4432 // In theory we could move the ST of 0 into _Event past the unlock(),
4433 // but then we'd need a MEMBAR after the ST.
4434 _Event = 0;
4435 status = pthread_mutex_unlock(_mutex);
4436 assert_status(status == 0, status, "mutex_unlock");
4437 }
4438 guarantee (_Event >= 0, "invariant");
4439 }
4440
4441 int os::PlatformEvent::park(jlong millis) {
4442 guarantee (_nParked == 0, "invariant");
4443
4444 int v;
4445 for (;;) {
4446 v = _Event;
4447 if (Atomic::cmpxchg (v-1, &_Event, v) == v) break;
4448 }
4449 guarantee (v >= 0, "invariant");
4450 if (v != 0) return OS_OK;
4451
4452 // We do this the hard way, by blocking the thread.
4453 // Consider enforcing a minimum timeout value.
4454 struct timespec abst;
4455 compute_abstime(&abst, millis);
4456
4457 int ret = OS_TIMEOUT;
4458 int status = pthread_mutex_lock(_mutex);
4459 assert_status(status == 0, status, "mutex_lock");
4460 guarantee (_nParked == 0, "invariant");
4461 ++_nParked;
4462
4463 // Object.wait(timo) will return because of
4464 // (a) notification
4465 // (b) timeout
4466 // (c) thread.interrupt
4467 //
4468 // Thread.interrupt and object.notify{All} both call Event::set.
4469 // That is, we treat thread.interrupt as a special case of notification.
4470 // We ignore spurious OS wakeups unless FilterSpuriousWakeups is false.
4471 // We assume all ETIME returns are valid.
4472 //
4473 // TODO: properly differentiate simultaneous notify+interrupt.
4474 // In that case, we should propagate the notify to another waiter.
4475
4476 while (_Event < 0) {
4477 status = pthread_cond_timedwait(_cond, _mutex, &abst);
4478 assert_status(status == 0 || status == ETIMEDOUT,
4479 status, "cond_timedwait");
4480 if (!FilterSpuriousWakeups) break; // previous semantics
4481 if (status == ETIMEDOUT) break;
4482 // We consume and ignore EINTR and spurious wakeups.
4483 }
4484 --_nParked;
4485 if (_Event >= 0) {
4486 ret = OS_OK;
4487 }
4488 _Event = 0;
4489 status = pthread_mutex_unlock(_mutex);
4490 assert_status(status == 0, status, "mutex_unlock");
4491 assert (_nParked == 0, "invariant");
4492 return ret;
4493 }
4494
4495 void os::PlatformEvent::unpark() {
4496 int v, AnyWaiters;
4497 for (;;) {
4498 v = _Event;
4499 if (v > 0) {
4500 // The LD of _Event could have reordered or be satisfied
4501 // by a read-aside from this processor's write buffer.
4502 // To avoid problems execute a barrier and then
4503 // ratify the value.
4504 OrderAccess::fence();
4505 if (_Event == v) return;
4506 continue;
4507 }
4508 if (Atomic::cmpxchg (v+1, &_Event, v) == v) break;
4509 }
4510 if (v < 0) {
4511 // Wait for the thread associated with the event to vacate
4512 int status = pthread_mutex_lock(_mutex);
4513 assert_status(status == 0, status, "mutex_lock");
4514 AnyWaiters = _nParked;
4515
4516 if (AnyWaiters != 0) {
4517 // We intentional signal *after* dropping the lock
4518 // to avoid a common class of futile wakeups.
4519 status = pthread_cond_signal(_cond);
4520 assert_status(status == 0, status, "cond_signal");
4521 }
4522 // Mutex should be locked for pthread_cond_signal(_cond).
4523 status = pthread_mutex_unlock(_mutex);
4524 assert_status(status == 0, status, "mutex_unlock");
4525 }
4526
4527 // Note that we signal() _after dropping the lock for "immortal" Events.
4528 // This is safe and avoids a common class of futile wakeups. In rare
4529 // circumstances this can cause a thread to return prematurely from
4530 // cond_{timed}wait() but the spurious wakeup is benign and the victim will
4531 // simply re-test the condition and re-park itself.
4532 }
4533
4534
4535 // JSR166
4536 // -------------------------------------------------------
4537
4538 //
4539 // The solaris and linux implementations of park/unpark are fairly
4540 // conservative for now, but can be improved. They currently use a
4541 // mutex/condvar pair, plus a a count.
4542 // Park decrements count if > 0, else does a condvar wait. Unpark
4543 // sets count to 1 and signals condvar. Only one thread ever waits
4544 // on the condvar. Contention seen when trying to park implies that someone
4545 // is unparking you, so don't wait. And spurious returns are fine, so there
4546 // is no need to track notifications.
4547 //
4548
4549 #define MAX_SECS 100000000
4550 //
4551 // This code is common to linux and solaris and will be moved to a
4552 // common place in dolphin.
4553 //
4554 // The passed in time value is either a relative time in nanoseconds
4555 // or an absolute time in milliseconds. Either way it has to be unpacked
4556 // into suitable seconds and nanoseconds components and stored in the
4557 // given timespec structure.
4558 // Given time is a 64-bit value and the time_t used in the timespec is only
4559 // a signed-32-bit value (except on 64-bit Linux) we have to watch for
4560 // overflow if times way in the future are given. Further on Solaris versions
4561 // prior to 10 there is a restriction (see cond_timedwait) that the specified
4562 // number of seconds, in abstime, is less than current_time + 100,000,000.
4563 // As it will be 28 years before "now + 100000000" will overflow we can
4564 // ignore overflow and just impose a hard-limit on seconds using the value
4565 // of "now + 100,000,000". This places a limit on the timeout of about 3.17
4566 // years from "now".
4567 //
4568
4569 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
4570 assert (time > 0, "convertTime");
4571
4572 struct timeval now;
4573 int status = gettimeofday(&now, NULL);
4574 assert(status == 0, "gettimeofday");
4575
4576 time_t max_secs = now.tv_sec + MAX_SECS;
4577
4578 if (isAbsolute) {
4579 jlong secs = time / 1000;
4580 if (secs > max_secs) {
4581 absTime->tv_sec = max_secs;
4582 }
4583 else {
4584 absTime->tv_sec = secs;
4585 }
4586 absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
4587 }
4588 else {
4589 jlong secs = time / NANOSECS_PER_SEC;
4590 if (secs >= MAX_SECS) {
4591 absTime->tv_sec = max_secs;
4592 absTime->tv_nsec = 0;
4593 }
4594 else {
4595 absTime->tv_sec = now.tv_sec + secs;
4596 absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
4597 if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
4598 absTime->tv_nsec -= NANOSECS_PER_SEC;
4599 ++absTime->tv_sec; // note: this must be <= max_secs
4600 }
4601 }
4602 }
4603 assert(absTime->tv_sec >= 0, "tv_sec < 0");
4604 assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs");
4605 assert(absTime->tv_nsec >= 0, "tv_nsec < 0");
4606 assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec");
4607 }
4608
4609 void Parker::park(bool isAbsolute, jlong time) {
4610 // Optional fast-path check:
4611 // Return immediately if a permit is available.
4612 if (_counter > 0) {
4613 _counter = 0;
4614 OrderAccess::fence();
4615 return;
4616 }
4617
4618 Thread* thread = Thread::current();
4619 assert(thread->is_Java_thread(), "Must be JavaThread");
4620 JavaThread *jt = (JavaThread *)thread;
4621
4622 // Optional optimization -- avoid state transitions if there's an interrupt pending.
4623 // Check interrupt before trying to wait
4624 if (Thread::is_interrupted(thread, false)) {
4625 return;
4626 }
4627
4628 // Next, demultiplex/decode time arguments
4629 timespec absTime;
4630 if (time < 0 || (isAbsolute && time == 0)) { // don't wait at all
4631 return;
4632 }
4633 if (time > 0) {
4634 unpackTime(&absTime, isAbsolute, time);
4635 }
4636
4637 // Enter safepoint region
4638 // Beware of deadlocks such as 6317397.
4639 // The per-thread Parker:: mutex is a classic leaf-lock.
4640 // In particular a thread must never block on the Threads_lock while
4641 // holding the Parker:: mutex. If safepoints are pending both the
4642 // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
4643 ThreadBlockInVM tbivm(jt);
4644
4645 // Don't wait if cannot get lock since interference arises from
4646 // unblocking. Also. check interrupt before trying wait
4647 if (Thread::is_interrupted(thread, false) || pthread_mutex_trylock(_mutex) != 0) {
4648 return;
4649 }
4650
4651 int status;
4652 if (_counter > 0) { // no wait needed
4653 _counter = 0;
4654 status = pthread_mutex_unlock(_mutex);
4655 assert (status == 0, "invariant");
4656 OrderAccess::fence();
4657 return;
4658 }
4659
4660 #ifdef ASSERT
4661 // Don't catch signals while blocked; let the running threads have the signals.
4662 // (This allows a debugger to break into the running thread.)
4663 sigset_t oldsigs;
4664 sigset_t* allowdebug_blocked = os::Aix::allowdebug_blocked_signals();
4665 pthread_sigmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
4666 #endif
4667
4668 OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
4669 jt->set_suspend_equivalent();
4670 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
4671
4672 if (time == 0) {
4673 status = pthread_cond_wait (_cond, _mutex);
4674 } else {
4675 status = pthread_cond_timedwait (_cond, _mutex, &absTime);
4676 }
4677 assert_status(status == 0 || status == EINTR ||
4678 status == ETIME || status == ETIMEDOUT,
4679 status, "cond_timedwait");
4680
4681 #ifdef ASSERT
4682 pthread_sigmask(SIG_SETMASK, &oldsigs, NULL);
4683 #endif
4684
4685 _counter = 0;
4686 status = pthread_mutex_unlock(_mutex);
4687 assert_status(status == 0, status, "invariant");
4688 // If externally suspended while waiting, re-suspend
4689 if (jt->handle_special_suspend_equivalent_condition()) {
4690 jt->java_suspend_self();
4691 }
4692
4693 OrderAccess::fence();
4694 }
4695
4696 void Parker::unpark() {
4697 int s, status;
4698 status = pthread_mutex_lock(_mutex);
4699 assert (status == 0, "invariant");
4700 s = _counter;
4701 _counter = 1;
4702 if (s < 1) {
4703 status = pthread_mutex_unlock(_mutex);
4704 assert (status == 0, "invariant");
4705 status = pthread_cond_signal (_cond);
4706 assert (status == 0, "invariant");
4707 } else {
4708 pthread_mutex_unlock(_mutex);
4709 assert (status == 0, "invariant");
4710 }
4711 }
4712
4713 extern char** environ;
4714
4715 // Run the specified command in a separate process. Return its exit value,
4716 // or -1 on failure (e.g. can't fork a new process).
4717 // Unlike system(), this function can be called from signal handler. It
4718 // doesn't block SIGINT et al.
4719 int os::fork_and_exec(char* cmd) {
4720 char * argv[4] = {"sh", "-c", cmd, NULL};
4721
4722 pid_t pid = fork();
4723
4724 if (pid < 0) {
4725 // fork failed
4726 return -1;
4727
4728 } else if (pid == 0) {
4729 // child process
4730
|
578
579 #undef DEFAULT_LIBPATH
580 #undef EXTENSIONS_DIR
581 }
582
583 ////////////////////////////////////////////////////////////////////////////////
584 // breakpoint support
585
586 void os::breakpoint() {
587 BREAKPOINT;
588 }
589
590 extern "C" void breakpoint() {
591 // use debugger to set breakpoint here
592 }
593
594 ////////////////////////////////////////////////////////////////////////////////
595 // signal support
596
597 debug_only(static bool signal_sets_initialized = false);
598 static sigset_t unblocked_sigs, vm_sigs;
599
600 bool os::Aix::is_sig_ignored(int sig) {
601 struct sigaction oact;
602 sigaction(sig, (struct sigaction*)NULL, &oact);
603 void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*, oact.sa_sigaction)
604 : CAST_FROM_FN_PTR(void*, oact.sa_handler);
605 if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN)) {
606 return true;
607 } else {
608 return false;
609 }
610 }
611
612 void os::Aix::signal_sets_init() {
613 // Should also have an assertion stating we are still single-threaded.
614 assert(!signal_sets_initialized, "Already initialized");
615 // Fill in signals that are necessarily unblocked for all threads in
616 // the VM. Currently, we unblock the following signals:
617 // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden
618 // by -Xrs (=ReduceSignalUsage));
619 // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all
620 // other threads. The "ReduceSignalUsage" boolean tells us not to alter
621 // the dispositions or masks wrt these signals.
622 // Programs embedding the VM that want to use the above signals for their
623 // own purposes must, at this time, use the "-Xrs" option to prevent
624 // interference with shutdown hooks and BREAK_SIGNAL thread dumping.
625 // (See bug 4345157, and other related bugs).
626 // In reality, though, unblocking these signals is really a nop, since
627 // these signals are not blocked by default.
628 sigemptyset(&unblocked_sigs);
629 sigaddset(&unblocked_sigs, SIGILL);
630 sigaddset(&unblocked_sigs, SIGSEGV);
631 sigaddset(&unblocked_sigs, SIGBUS);
632 sigaddset(&unblocked_sigs, SIGFPE);
633 sigaddset(&unblocked_sigs, SIGTRAP);
634 sigaddset(&unblocked_sigs, SR_signum);
635
636 if (!ReduceSignalUsage) {
637 if (!os::Aix::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
638 sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
639 }
640 if (!os::Aix::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
641 sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
642 }
643 if (!os::Aix::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
644 sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
645 }
646 }
647 // Fill in signals that are blocked by all but the VM thread.
648 sigemptyset(&vm_sigs);
649 if (!ReduceSignalUsage)
650 sigaddset(&vm_sigs, BREAK_SIGNAL);
651 debug_only(signal_sets_initialized = true);
652 }
653
654 // These are signals that are unblocked while a thread is running Java.
655 // (For some reason, they get blocked by default.)
656 sigset_t* os::Aix::unblocked_signals() {
657 assert(signal_sets_initialized, "Not initialized");
658 return &unblocked_sigs;
659 }
660
661 // These are the signals that are blocked while a (non-VM) thread is
662 // running Java. Only the VM thread handles these signals.
663 sigset_t* os::Aix::vm_signals() {
664 assert(signal_sets_initialized, "Not initialized");
665 return &vm_sigs;
666 }
667
668 void os::Aix::hotspot_sigmask(Thread* thread) {
669
670 //Save caller's signal mask before setting VM signal mask
671 sigset_t caller_sigmask;
672 pthread_sigmask(SIG_BLOCK, NULL, &caller_sigmask);
673
674 OSThread* osthread = thread->osthread();
675 osthread->set_caller_sigmask(caller_sigmask);
676
677 pthread_sigmask(SIG_UNBLOCK, os::Aix::unblocked_signals(), NULL);
678
679 if (!ReduceSignalUsage) {
680 if (thread->is_VM_thread()) {
681 // Only the VM thread handles BREAK_SIGNAL ...
682 pthread_sigmask(SIG_UNBLOCK, vm_signals(), NULL);
683 } else {
684 // ... all other threads block BREAK_SIGNAL
685 pthread_sigmask(SIG_BLOCK, vm_signals(), NULL);
686 }
687 }
3455
3456 // Reset the perfstat information provided by ODM.
3457 if (os::Aix::on_aix()) {
3458 libperfstat::perfstat_reset();
3459 }
3460
3461 // Now initialze basic system properties. Note that for some of the values we
3462 // need libperfstat etc.
3463 os::Aix::initialize_system_info();
3464
3465 clock_tics_per_sec = sysconf(_SC_CLK_TCK);
3466
3467 init_random(1234567);
3468
3469 ThreadCritical::initialize();
3470
3471 // Main_thread points to the aboriginal thread.
3472 Aix::_main_thread = pthread_self();
3473
3474 initial_time_count = os::elapsed_counter();
3475
3476 os::Posix::init();
3477 }
3478
3479 // This is called _after_ the global arguments have been parsed.
3480 jint os::init_2(void) {
3481
3482 os::Posix::init_2();
3483
3484 if (os::Aix::on_pase()) {
3485 trcVerbose("Running on PASE.");
3486 } else {
3487 trcVerbose("Running on AIX (not PASE).");
3488 }
3489
3490 trcVerbose("processor count: %d", os::_processor_count);
3491 trcVerbose("physical memory: %lu", Aix::_physical_memory);
3492
3493 // Initially build up the loaded dll map.
3494 LoadedLibraries::reload();
3495 if (Verbose) {
3496 trcVerbose("Loaded Libraries: ");
3497 LoadedLibraries::print(tty);
3498 }
3499
3500 const int page_size = Aix::page_size();
3501 const int map_size = page_size;
3502
3503 address map_address = (address) MAP_FAILED;
4344
4345 if (p_stack_size) {
4346 *p_stack_size = stack_size;
4347 }
4348
4349 return true;
4350 }
4351
4352 // Get the current stack base from the OS (actually, the pthread library).
4353 address os::current_stack_base() {
4354 address p;
4355 query_stack_dimensions(&p, 0);
4356 return p;
4357 }
4358
4359 // Get the current stack size from the OS (actually, the pthread library).
4360 size_t os::current_stack_size() {
4361 size_t s;
4362 query_stack_dimensions(0, &s);
4363 return s;
4364 }
4365
4366 extern char** environ;
4367
4368 // Run the specified command in a separate process. Return its exit value,
4369 // or -1 on failure (e.g. can't fork a new process).
4370 // Unlike system(), this function can be called from signal handler. It
4371 // doesn't block SIGINT et al.
4372 int os::fork_and_exec(char* cmd) {
4373 char * argv[4] = {"sh", "-c", cmd, NULL};
4374
4375 pid_t pid = fork();
4376
4377 if (pid < 0) {
4378 // fork failed
4379 return -1;
4380
4381 } else if (pid == 0) {
4382 // child process
4383
|