< prev index next >

src/hotspot/os/windows/os_windows.cpp

Print this page




2079 
2080   // Add a CTRL-C handler
2081   SetConsoleCtrlHandler(consoleHandler, TRUE);
2082 }
2083 
2084 void os::signal_notify(int sig) {
2085   if (sig_sem != NULL) {
2086     Atomic::inc(&pending_signals[sig]);
2087     sig_sem->signal();
2088   } else {
2089     // Signal thread is not created with ReduceSignalUsage and jdk_misc_signal_init
2090     // initialization isn't called.
2091     assert(ReduceSignalUsage, "signal semaphore should be created");
2092   }
2093 }
2094 
2095 static int check_pending_signals() {
2096   while (true) {
2097     for (int i = 0; i < NSIG + 1; i++) {
2098       jint n = pending_signals[i];
2099       if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
2100         return i;
2101       }
2102     }
2103     JavaThread *thread = JavaThread::current();
2104 
2105     ThreadBlockInVM tbivm(thread);
2106 
2107     bool threadIsSuspended;
2108     do {
2109       thread->set_suspend_equivalent();
2110       // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
2111       sig_sem->wait();
2112 
2113       // were we externally suspended while we were waiting?
2114       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
2115       if (threadIsSuspended) {
2116         // The semaphore has been incremented, but while we were waiting
2117         // another thread suspended us. We don't want to continue running
2118         // while suspended because that would surprise the thread that
2119         // suspended us.


3734     static CRITICAL_SECTION crit_sect;
3735     static volatile DWORD process_exiting = 0;
3736     int i, j;
3737     DWORD res;
3738     HANDLE hproc, hthr;
3739 
3740     // We only attempt to register threads until a process exiting
3741     // thread manages to set the process_exiting flag. Any threads
3742     // that come through here after the process_exiting flag is set
3743     // are unregistered and will be caught in the SuspendThread()
3744     // infinite loop below.
3745     bool registered = false;
3746 
3747     // The first thread that reached this point, initializes the critical section.
3748     if (!InitOnceExecuteOnce(&init_once_crit_sect, init_crit_sect_call, &crit_sect, NULL)) {
3749       warning("crit_sect initialization failed in %s: %d\n", __FILE__, __LINE__);
3750     } else if (Atomic::load_acquire(&process_exiting) == 0) {
3751       if (what != EPT_THREAD) {
3752         // Atomically set process_exiting before the critical section
3753         // to increase the visibility between racing threads.
3754         Atomic::cmpxchg(GetCurrentThreadId(), &process_exiting, (DWORD)0);
3755       }
3756       EnterCriticalSection(&crit_sect);
3757 
3758       if (what == EPT_THREAD && Atomic::load_acquire(&process_exiting) == 0) {
3759         // Remove from the array those handles of the threads that have completed exiting.
3760         for (i = 0, j = 0; i < handle_count; ++i) {
3761           res = WaitForSingleObject(handles[i], 0 /* don't wait */);
3762           if (res == WAIT_TIMEOUT) {
3763             handles[j++] = handles[i];
3764           } else {
3765             if (res == WAIT_FAILED) {
3766               warning("WaitForSingleObject failed (%u) in %s: %d\n",
3767                       GetLastError(), __FILE__, __LINE__);
3768             }
3769             // Don't keep the handle, if we failed waiting for it.
3770             CloseHandle(handles[i]);
3771           }
3772         }
3773 
3774         // If there's no free slot in the array of the kept handles, we'll have to


5119 // Another possible encoding of _Event would be with
5120 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits.
5121 //
5122 
5123 int os::PlatformEvent::park(jlong Millis) {
5124   // Transitions for _Event:
5125   //   -1 => -1 : illegal
5126   //    1 =>  0 : pass - return immediately
5127   //    0 => -1 : block; then set _Event to 0 before returning
5128 
5129   guarantee(_ParkHandle != NULL , "Invariant");
5130   guarantee(Millis > 0          , "Invariant");
5131 
5132   // CONSIDER: defer assigning a CreateEvent() handle to the Event until
5133   // the initial park() operation.
5134   // Consider: use atomic decrement instead of CAS-loop
5135 
5136   int v;
5137   for (;;) {
5138     v = _Event;
5139     if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
5140   }
5141   guarantee((v == 0) || (v == 1), "invariant");
5142   if (v != 0) return OS_OK;
5143 
5144   // Do this the hard way by blocking ...
5145   // TODO: consider a brief spin here, gated on the success of recent
5146   // spin attempts by this thread.
5147   //
5148   // We decompose long timeouts into series of shorter timed waits.
5149   // Evidently large timo values passed in WaitForSingleObject() are problematic on some
5150   // versions of Windows.  See EventWait() for details.  This may be superstition.  Or not.
5151   // We trust the WAIT_TIMEOUT indication and don't track the elapsed wait time
5152   // with os::javaTimeNanos().  Furthermore, we assume that spurious returns from
5153   // ::WaitForSingleObject() caused by latent ::setEvent() operations will tend
5154   // to happen early in the wait interval.  Specifically, after a spurious wakeup (rv ==
5155   // WAIT_OBJECT_0 but _Event is still < 0) we don't bother to recompute Millis to compensate
5156   // for the already waited time.  This policy does not admit any new outcomes.
5157   // In the future, however, we might want to track the accumulated wait time and
5158   // adjust Millis accordingly if we encounter a spurious wakeup.
5159 


5181   OrderAccess::fence();
5182   // If we encounter a nearly simultanous timeout expiry and unpark()
5183   // we return OS_OK indicating we awoke via unpark().
5184   // Implementor's license -- returning OS_TIMEOUT would be equally valid, however.
5185   return (v >= 0) ? OS_OK : OS_TIMEOUT;
5186 }
5187 
5188 void os::PlatformEvent::park() {
5189   // Transitions for _Event:
5190   //   -1 => -1 : illegal
5191   //    1 =>  0 : pass - return immediately
5192   //    0 => -1 : block; then set _Event to 0 before returning
5193 
5194   guarantee(_ParkHandle != NULL, "Invariant");
5195   // Invariant: Only the thread associated with the Event/PlatformEvent
5196   // may call park().
5197   // Consider: use atomic decrement instead of CAS-loop
5198   int v;
5199   for (;;) {
5200     v = _Event;
5201     if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
5202   }
5203   guarantee((v == 0) || (v == 1), "invariant");
5204   if (v != 0) return;
5205 
5206   // Do this the hard way by blocking ...
5207   // TODO: consider a brief spin here, gated on the success of recent
5208   // spin attempts by this thread.
5209   while (_Event < 0) {
5210     DWORD rv = ::WaitForSingleObject(_ParkHandle, INFINITE);
5211     assert(rv == WAIT_OBJECT_0, "WaitForSingleObject failed");
5212   }
5213 
5214   // Usually we'll find _Event == 0 at this point, but as
5215   // an optional optimization we clear it, just in case can
5216   // multiple unpark() operations drove _Event up to 1.
5217   _Event = 0;
5218   OrderAccess::fence();
5219   guarantee(_Event >= 0, "invariant");
5220 }
5221 




2079 
2080   // Add a CTRL-C handler
2081   SetConsoleCtrlHandler(consoleHandler, TRUE);
2082 }
2083 
2084 void os::signal_notify(int sig) {
2085   if (sig_sem != NULL) {
2086     Atomic::inc(&pending_signals[sig]);
2087     sig_sem->signal();
2088   } else {
2089     // Signal thread is not created with ReduceSignalUsage and jdk_misc_signal_init
2090     // initialization isn't called.
2091     assert(ReduceSignalUsage, "signal semaphore should be created");
2092   }
2093 }
2094 
2095 static int check_pending_signals() {
2096   while (true) {
2097     for (int i = 0; i < NSIG + 1; i++) {
2098       jint n = pending_signals[i];
2099       if (n > 0 && n == Atomic::cmpxchg(&pending_signals[i], n, n - 1)) {
2100         return i;
2101       }
2102     }
2103     JavaThread *thread = JavaThread::current();
2104 
2105     ThreadBlockInVM tbivm(thread);
2106 
2107     bool threadIsSuspended;
2108     do {
2109       thread->set_suspend_equivalent();
2110       // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
2111       sig_sem->wait();
2112 
2113       // were we externally suspended while we were waiting?
2114       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
2115       if (threadIsSuspended) {
2116         // The semaphore has been incremented, but while we were waiting
2117         // another thread suspended us. We don't want to continue running
2118         // while suspended because that would surprise the thread that
2119         // suspended us.


3734     static CRITICAL_SECTION crit_sect;
3735     static volatile DWORD process_exiting = 0;
3736     int i, j;
3737     DWORD res;
3738     HANDLE hproc, hthr;
3739 
3740     // We only attempt to register threads until a process exiting
3741     // thread manages to set the process_exiting flag. Any threads
3742     // that come through here after the process_exiting flag is set
3743     // are unregistered and will be caught in the SuspendThread()
3744     // infinite loop below.
3745     bool registered = false;
3746 
3747     // The first thread that reached this point, initializes the critical section.
3748     if (!InitOnceExecuteOnce(&init_once_crit_sect, init_crit_sect_call, &crit_sect, NULL)) {
3749       warning("crit_sect initialization failed in %s: %d\n", __FILE__, __LINE__);
3750     } else if (Atomic::load_acquire(&process_exiting) == 0) {
3751       if (what != EPT_THREAD) {
3752         // Atomically set process_exiting before the critical section
3753         // to increase the visibility between racing threads.
3754         Atomic::cmpxchg(&process_exiting, (DWORD)0, GetCurrentThreadId());
3755       }
3756       EnterCriticalSection(&crit_sect);
3757 
3758       if (what == EPT_THREAD && Atomic::load_acquire(&process_exiting) == 0) {
3759         // Remove from the array those handles of the threads that have completed exiting.
3760         for (i = 0, j = 0; i < handle_count; ++i) {
3761           res = WaitForSingleObject(handles[i], 0 /* don't wait */);
3762           if (res == WAIT_TIMEOUT) {
3763             handles[j++] = handles[i];
3764           } else {
3765             if (res == WAIT_FAILED) {
3766               warning("WaitForSingleObject failed (%u) in %s: %d\n",
3767                       GetLastError(), __FILE__, __LINE__);
3768             }
3769             // Don't keep the handle, if we failed waiting for it.
3770             CloseHandle(handles[i]);
3771           }
3772         }
3773 
3774         // If there's no free slot in the array of the kept handles, we'll have to


5119 // Another possible encoding of _Event would be with
5120 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits.
5121 //
5122 
5123 int os::PlatformEvent::park(jlong Millis) {
5124   // Transitions for _Event:
5125   //   -1 => -1 : illegal
5126   //    1 =>  0 : pass - return immediately
5127   //    0 => -1 : block; then set _Event to 0 before returning
5128 
5129   guarantee(_ParkHandle != NULL , "Invariant");
5130   guarantee(Millis > 0          , "Invariant");
5131 
5132   // CONSIDER: defer assigning a CreateEvent() handle to the Event until
5133   // the initial park() operation.
5134   // Consider: use atomic decrement instead of CAS-loop
5135 
5136   int v;
5137   for (;;) {
5138     v = _Event;
5139     if (Atomic::cmpxchg(&_Event, v, v-1) == v) break;
5140   }
5141   guarantee((v == 0) || (v == 1), "invariant");
5142   if (v != 0) return OS_OK;
5143 
5144   // Do this the hard way by blocking ...
5145   // TODO: consider a brief spin here, gated on the success of recent
5146   // spin attempts by this thread.
5147   //
5148   // We decompose long timeouts into series of shorter timed waits.
5149   // Evidently large timo values passed in WaitForSingleObject() are problematic on some
5150   // versions of Windows.  See EventWait() for details.  This may be superstition.  Or not.
5151   // We trust the WAIT_TIMEOUT indication and don't track the elapsed wait time
5152   // with os::javaTimeNanos().  Furthermore, we assume that spurious returns from
5153   // ::WaitForSingleObject() caused by latent ::setEvent() operations will tend
5154   // to happen early in the wait interval.  Specifically, after a spurious wakeup (rv ==
5155   // WAIT_OBJECT_0 but _Event is still < 0) we don't bother to recompute Millis to compensate
5156   // for the already waited time.  This policy does not admit any new outcomes.
5157   // In the future, however, we might want to track the accumulated wait time and
5158   // adjust Millis accordingly if we encounter a spurious wakeup.
5159 


5181   OrderAccess::fence();
5182   // If we encounter a nearly simultanous timeout expiry and unpark()
5183   // we return OS_OK indicating we awoke via unpark().
5184   // Implementor's license -- returning OS_TIMEOUT would be equally valid, however.
5185   return (v >= 0) ? OS_OK : OS_TIMEOUT;
5186 }
5187 
5188 void os::PlatformEvent::park() {
5189   // Transitions for _Event:
5190   //   -1 => -1 : illegal
5191   //    1 =>  0 : pass - return immediately
5192   //    0 => -1 : block; then set _Event to 0 before returning
5193 
5194   guarantee(_ParkHandle != NULL, "Invariant");
5195   // Invariant: Only the thread associated with the Event/PlatformEvent
5196   // may call park().
5197   // Consider: use atomic decrement instead of CAS-loop
5198   int v;
5199   for (;;) {
5200     v = _Event;
5201     if (Atomic::cmpxchg(&_Event, v, v-1) == v) break;
5202   }
5203   guarantee((v == 0) || (v == 1), "invariant");
5204   if (v != 0) return;
5205 
5206   // Do this the hard way by blocking ...
5207   // TODO: consider a brief spin here, gated on the success of recent
5208   // spin attempts by this thread.
5209   while (_Event < 0) {
5210     DWORD rv = ::WaitForSingleObject(_ParkHandle, INFINITE);
5211     assert(rv == WAIT_OBJECT_0, "WaitForSingleObject failed");
5212   }
5213 
5214   // Usually we'll find _Event == 0 at this point, but as
5215   // an optional optimization we clear it, just in case can
5216   // multiple unpark() operations drove _Event up to 1.
5217   _Event = 0;
5218   OrderAccess::fence();
5219   guarantee(_Event >= 0, "invariant");
5220 }
5221 


< prev index next >