< prev index next >

src/hotspot/os/posix/os_posix.cpp

Print this page




 628 void os::naked_short_nanosleep(jlong ns) {
 629   struct timespec req;
 630   assert(ns > -1 && ns < NANOUNITS, "Un-interruptable sleep, short time use only");
 631   req.tv_sec = 0;
 632   req.tv_nsec = ns;
 633   ::nanosleep(&req, NULL);
 634   return;
 635 }
 636 
 637 void os::naked_short_sleep(jlong ms) {
 638   assert(ms < MILLIUNITS, "Un-interruptable sleep, short time use only");
 639   os::naked_short_nanosleep(ms * (NANOUNITS / MILLIUNITS));
 640   return;
 641 }
 642 
 643 ////////////////////////////////////////////////////////////////////////////////
 644 // interrupt support
 645 
 646 void os::interrupt(Thread* thread) {
 647   debug_only(Thread::check_for_dangling_thread_pointer(thread);)
 648 

 649   OSThread* osthread = thread->osthread();
 650 
 651   if (!osthread->interrupted()) {
 652     osthread->set_interrupted(true);
 653     // More than one thread can get here with the same value of osthread,
 654     // resulting in multiple notifications.  We do, however, want the store
 655     // to interrupted() to be visible to other threads before we execute unpark().
 656     OrderAccess::fence();
 657     ParkEvent * const slp = thread->_SleepEvent ;
 658     if (slp != NULL) slp->unpark() ;
 659   }
 660 
 661   // For JSR166. Unpark even if interrupt status already was set
 662   if (thread->is_Java_thread())
 663     ((JavaThread*)thread)->parker()->unpark();
 664 
 665   ParkEvent * ev = thread->_ParkEvent ;
 666   if (ev != NULL) ev->unpark() ;
 667 }
 668 
 669 bool os::is_interrupted(Thread* thread, bool clear_interrupted) {
 670   debug_only(Thread::check_for_dangling_thread_pointer(thread);)
 671 
 672   OSThread* osthread = thread->osthread();
 673 
 674   bool interrupted = osthread->interrupted();
 675 
 676   // NOTE that since there is no "lock" around the interrupt and
 677   // is_interrupted operations, there is the possibility that the
 678   // interrupted flag (in osThread) will be "false" but that the
 679   // low-level events will be in the signaled state. This is
 680   // intentional. The effect of this is that Object.wait() and
 681   // LockSupport.park() will appear to have a spurious wakeup, which
 682   // is allowed and not harmful, and the possibility is so rare that
 683   // it is not worth the added complexity to add yet another lock.
 684   // For the sleep event an explicit reset is performed on entry
 685   // to os::sleep, so there is no early return. It has also been
 686   // recommended not to put the interrupted flag into the "event"
 687   // structure because it hides the issue.
 688   if (interrupted && clear_interrupted) {
 689     osthread->set_interrupted(false);
 690     // consider thread->_SleepEvent->reset() ... optional optimization
 691   }
 692 
 693   return interrupted;
 694 }
 695 
 696 
 697 
 698 static const struct {
 699   int sig; const char* name;
 700 }
 701  g_signal_info[] =
 702   {
 703   {  SIGABRT,     "SIGABRT" },
 704 #ifdef SIGAIO
 705   {  SIGAIO,      "SIGAIO" },


2032   }
2033   return OS_OK;
2034 }
2035 
2036 void os::PlatformEvent::unpark() {
2037   // Transitions for _event:
2038   //    0 => 1 : just return
2039   //    1 => 1 : just return
2040   //   -1 => either 0 or 1; must signal target thread
2041   //         That is, we can safely transition _event from -1 to either
2042   //         0 or 1.
2043   // See also: "Semaphores in Plan 9" by Mullender & Cox
2044   //
2045   // Note: Forcing a transition from "-1" to "1" on an unpark() means
2046   // that it will take two back-to-back park() calls for the owning
2047   // thread to block. This has the benefit of forcing a spurious return
2048   // from the first park() call after an unpark() call which will help
2049   // shake out uses of park() and unpark() without checking state conditions
2050   // properly. This spurious return doesn't manifest itself in any user code
2051   // but only in the correctly written condition checking loops of ObjectMonitor,
2052   // Mutex/Monitor, Thread::muxAcquire and os::sleep
2053 
2054   if (Atomic::xchg(1, &_event) >= 0) return;
2055 
2056   int status = pthread_mutex_lock(_mutex);
2057   assert_status(status == 0, status, "mutex_lock");
2058   int anyWaiters = _nParked;
2059   assert(anyWaiters == 0 || anyWaiters == 1, "invariant");
2060   status = pthread_mutex_unlock(_mutex);
2061   assert_status(status == 0, status, "mutex_unlock");
2062 
2063   // Note that we signal() *after* dropping the lock for "immortal" Events.
2064   // This is safe and avoids a common class of futile wakeups.  In rare
2065   // circumstances this can cause a thread to return prematurely from
2066   // cond_{timed}wait() but the spurious wakeup is benign and the victim
2067   // will simply re-test the condition and re-park itself.
2068   // This provides particular benefit if the underlying platform does not
2069   // provide wait morphing.
2070 
2071   if (anyWaiters != 0) {
2072     status = pthread_cond_signal(_cond);




 628 void os::naked_short_nanosleep(jlong ns) {
 629   struct timespec req;
 630   assert(ns > -1 && ns < NANOUNITS, "Un-interruptable sleep, short time use only");
 631   req.tv_sec = 0;
 632   req.tv_nsec = ns;
 633   ::nanosleep(&req, NULL);
 634   return;
 635 }
 636 
 637 void os::naked_short_sleep(jlong ms) {
 638   assert(ms < MILLIUNITS, "Un-interruptable sleep, short time use only");
 639   os::naked_short_nanosleep(ms * (NANOUNITS / MILLIUNITS));
 640   return;
 641 }
 642 
 643 ////////////////////////////////////////////////////////////////////////////////
 644 // interrupt support
 645 
 646 void os::interrupt(Thread* thread) {
 647   debug_only(Thread::check_for_dangling_thread_pointer(thread);)
 648   assert(thread->is_Java_thread(), "invariant");
 649   JavaThread* jt = (JavaThread*) thread;
 650   OSThread* osthread = thread->osthread();
 651 
 652   if (!osthread->interrupted()) {
 653     osthread->set_interrupted(true);
 654     // More than one thread can get here with the same value of osthread,
 655     // resulting in multiple notifications.  We do, however, want the store
 656     // to interrupted() to be visible to other threads before we execute unpark().
 657     OrderAccess::fence();
 658     ParkEvent * const slp = jt->_SleepEvent ;
 659     if (slp != NULL) slp->unpark() ;
 660   }
 661 
 662   // For JSR166. Unpark even if interrupt status already was set
 663   jt->parker()->unpark();

 664 
 665   ParkEvent * ev = thread->_ParkEvent ;
 666   if (ev != NULL) ev->unpark() ;
 667 }
 668 
 669 bool os::is_interrupted(Thread* thread, bool clear_interrupted) {
 670   debug_only(Thread::check_for_dangling_thread_pointer(thread);)
 671 
 672   OSThread* osthread = thread->osthread();
 673 
 674   bool interrupted = osthread->interrupted();
 675 
 676   // NOTE that since there is no "lock" around the interrupt and
 677   // is_interrupted operations, there is the possibility that the
 678   // interrupted flag (in osThread) will be "false" but that the
 679   // low-level events will be in the signaled state. This is
 680   // intentional. The effect of this is that Object.wait() and
 681   // LockSupport.park() will appear to have a spurious wakeup, which
 682   // is allowed and not harmful, and the possibility is so rare that
 683   // it is not worth the added complexity to add yet another lock.
 684   // For the sleep event an explicit reset is performed on entry
 685   // to JavaThread::sleep, so there is no early return. It has also been
 686   // recommended not to put the interrupted flag into the "event"
 687   // structure because it hides the issue.
 688   if (interrupted && clear_interrupted) {
 689     osthread->set_interrupted(false);
 690     // consider thread->_SleepEvent->reset() ... optional optimization
 691   }
 692 
 693   return interrupted;
 694 }
 695 
 696 
 697 
 698 static const struct {
 699   int sig; const char* name;
 700 }
 701  g_signal_info[] =
 702   {
 703   {  SIGABRT,     "SIGABRT" },
 704 #ifdef SIGAIO
 705   {  SIGAIO,      "SIGAIO" },


2032   }
2033   return OS_OK;
2034 }
2035 
2036 void os::PlatformEvent::unpark() {
2037   // Transitions for _event:
2038   //    0 => 1 : just return
2039   //    1 => 1 : just return
2040   //   -1 => either 0 or 1; must signal target thread
2041   //         That is, we can safely transition _event from -1 to either
2042   //         0 or 1.
2043   // See also: "Semaphores in Plan 9" by Mullender & Cox
2044   //
2045   // Note: Forcing a transition from "-1" to "1" on an unpark() means
2046   // that it will take two back-to-back park() calls for the owning
2047   // thread to block. This has the benefit of forcing a spurious return
2048   // from the first park() call after an unpark() call which will help
2049   // shake out uses of park() and unpark() without checking state conditions
2050   // properly. This spurious return doesn't manifest itself in any user code
2051   // but only in the correctly written condition checking loops of ObjectMonitor,
2052   // Mutex/Monitor, Thread::muxAcquire and JavaThread::sleep
2053 
2054   if (Atomic::xchg(1, &_event) >= 0) return;
2055 
2056   int status = pthread_mutex_lock(_mutex);
2057   assert_status(status == 0, status, "mutex_lock");
2058   int anyWaiters = _nParked;
2059   assert(anyWaiters == 0 || anyWaiters == 1, "invariant");
2060   status = pthread_mutex_unlock(_mutex);
2061   assert_status(status == 0, status, "mutex_unlock");
2062 
2063   // Note that we signal() *after* dropping the lock for "immortal" Events.
2064   // This is safe and avoids a common class of futile wakeups.  In rare
2065   // circumstances this can cause a thread to return prematurely from
2066   // cond_{timed}wait() but the spurious wakeup is benign and the victim
2067   // will simply re-test the condition and re-park itself.
2068   // This provides particular benefit if the underlying platform does not
2069   // provide wait morphing.
2070 
2071   if (anyWaiters != 0) {
2072     status = pthread_cond_signal(_cond);


< prev index next >