< prev index next >

src/hotspot/share/prims/jvmtiRawMonitor.cpp

Print this page




 157   QNode* w = _entry_list;
 158   if (w != NULL) {
 159     _entry_list = w->_next;
 160   }
 161   RawMonitor_lock->unlock();
 162   if (w != NULL) {
 163     guarantee(w ->_t_state == QNode::TS_ENTER, "invariant");
 164     // Once we set _t_state to TS_RUN the waiting thread can complete
 165     // simple_enter and 'w' is pointing into random stack space. So we have
 166     // to ensure we extract the ParkEvent (which is in type-stable memory)
 167     // before we set the state, and then don't access 'w'.
 168     ParkEvent* ev = w->_event;
 169     OrderAccess::loadstore();
 170     w->_t_state = QNode::TS_RUN;
 171     OrderAccess::fence();
 172     ev->unpark();
 173   }
 174   return;
 175 }
 176 
 177 int JvmtiRawMonitor::simple_wait(Thread* self, jlong millis) {
 178   guarantee(_owner == self  , "invariant");
 179   guarantee(_recursions == 0, "invariant");
 180 
 181   QNode node(self);
 182   node._notified = 0;
 183   node._t_state = QNode::TS_WAIT;
 184 
 185   RawMonitor_lock->lock_without_safepoint_check();
 186   node._next = _wait_set;
 187   _wait_set = &node;
 188   RawMonitor_lock->unlock();

 189 
 190   simple_exit(self);
 191   guarantee(_owner != self, "invariant");
 192 
 193   int ret = OS_OK;
 194   if (millis <= 0) {
 195     self->_ParkEvent->park();
 196   } else {
 197     ret = self->_ParkEvent->park(millis);
 198   }
 199 
 200   // If thread still resides on the waitset then unlink it.
 201   // Double-checked locking -- the usage is safe in this context
 202   // as _t_state is volatile and the lock-unlock operators are
 203   // serializing (barrier-equivalent).
 204 
 205   if (node._t_state == QNode::TS_WAIT) {
 206     RawMonitor_lock->lock_without_safepoint_check();
 207     if (node._t_state == QNode::TS_WAIT) {
 208       // Simple O(n) unlink, but performance isn't critical here.
 209       QNode* p;
 210       QNode* q = NULL;
 211       for (p = _wait_set; p != &node; p = p->_next) {
 212         q = p;
 213       }
 214       guarantee(p == &node, "invariant");
 215       if (q == NULL) {
 216         guarantee (p == _wait_set, "invariant");
 217         _wait_set = p->_next;
 218       } else {
 219         guarantee(p == q->_next, "invariant");
 220         q->_next = p->_next;
 221       }
 222       node._t_state = QNode::TS_RUN;
 223     }
 224     RawMonitor_lock->unlock();
 225   }
 226 
 227   guarantee(node._t_state == QNode::TS_RUN, "invariant");
 228   simple_enter(self);


















 229 































 230   guarantee(_owner == self, "invariant");
 231   guarantee(_recursions == 0, "invariant");

 232   return ret;
 233 }
 234 
 235 void JvmtiRawMonitor::simple_notify(Thread* self, bool all) {
 236   guarantee(_owner == self, "invariant");
 237   if (_wait_set == NULL) {
 238     return;
 239   }
 240 
 241   // We have two options:
 242   // A. Transfer the threads from the _wait_set to the _entry_list
 243   // B. Remove the thread from the _wait_set and unpark() it.
 244   //
 245   // We use (B), which is crude and results in lots of futile
 246   // context switching.  In particular (B) induces lots of contention.
 247 
 248   ParkEvent* ev = NULL;       // consider using a small auto array ...
 249   RawMonitor_lock->lock_without_safepoint_check();
 250   for (;;) {
 251     QNode* w = _wait_set;


 334 
 335   self->set_current_pending_raw_monitor(NULL);
 336 
 337   guarantee(_owner == self, "invariant");
 338   guarantee(_recursions == 0, "invariant");
 339 }
 340 
 341 int JvmtiRawMonitor::raw_exit(Thread* self) {
 342   if (self != _owner) {
 343     return M_ILLEGAL_MONITOR_STATE;
 344   }
 345   if (_recursions > 0) {
 346     _recursions--;
 347   } else {
 348     simple_exit(self);
 349   }
 350 
 351   return M_OK;
 352 }
 353 
 354 // All JavaThreads will enter here with state _thread_blocked
 355 
 356 int JvmtiRawMonitor::raw_wait(jlong millis, bool interruptible, Thread* self) {
 357   if (self != _owner) {
 358     return M_ILLEGAL_MONITOR_STATE;
 359   }
 360 


 361   // To avoid spurious wakeups we reset the parkevent. This is strictly optional.
 362   // The caller must be able to tolerate spurious returns from raw_wait().
 363   self->_ParkEvent->reset();
 364   OrderAccess::fence();
 365 
 366   JavaThread* jt = NULL;
 367   // check interrupt event
 368   if (interruptible) {
 369     assert(self->is_Java_thread(), "Only JavaThreads can be interruptible");
 370     jt = (JavaThread*)self;
 371     if (jt->is_interrupted(true)) {
 372       return M_INTERRUPTED;
 373     }
 374   } else {
 375     assert(!self->is_Java_thread(), "JavaThreads must be interuptible");
 376   }
 377 
 378   intptr_t save = _recursions;
 379   _recursions = 0;
 380   _waiters++;
 381   if (self->is_Java_thread()) {
 382     guarantee(jt->thread_state() == _thread_blocked, "invariant");
 383     jt->set_suspend_equivalent();
 384   }
 385   int rv = simple_wait(self, millis);
 386   _recursions = save;
 387   _waiters--;
 388 
 389   guarantee(self == _owner, "invariant");

 390   if (self->is_Java_thread()) {

 391     for (;;) {

 392       if (!jt->handle_special_suspend_equivalent_condition()) {
 393         break;
 394       }





 395       simple_exit(jt);




 396       jt->java_suspend_self();





 397       simple_enter(jt);
 398       jt->set_suspend_equivalent();
 399     }
 400     guarantee(jt == _owner, "invariant");
 401   }
 402 
 403   if (interruptible && jt->is_interrupted(true)) {
 404     return M_INTERRUPTED;
 405   }
 406 
 407   return M_OK;
 408 }
 409 
 410 int JvmtiRawMonitor::raw_notify(Thread* self) {
 411   if (self != _owner) {
 412     return M_ILLEGAL_MONITOR_STATE;
 413   }
 414   simple_notify(self, false);
 415   return M_OK;
 416 }
 417 
 418 int JvmtiRawMonitor::raw_notifyAll(Thread* self) {
 419   if (self != _owner) {
 420     return M_ILLEGAL_MONITOR_STATE;
 421   }
 422   simple_notify(self, true);
 423   return M_OK;
 424 }


 157   QNode* w = _entry_list;
 158   if (w != NULL) {
 159     _entry_list = w->_next;
 160   }
 161   RawMonitor_lock->unlock();
 162   if (w != NULL) {
 163     guarantee(w ->_t_state == QNode::TS_ENTER, "invariant");
 164     // Once we set _t_state to TS_RUN the waiting thread can complete
 165     // simple_enter and 'w' is pointing into random stack space. So we have
 166     // to ensure we extract the ParkEvent (which is in type-stable memory)
 167     // before we set the state, and then don't access 'w'.
 168     ParkEvent* ev = w->_event;
 169     OrderAccess::loadstore();
 170     w->_t_state = QNode::TS_RUN;
 171     OrderAccess::fence();
 172     ev->unpark();
 173   }
 174   return;
 175 }
 176 
 177 inline void JvmtiRawMonitor::enqueue_waiter(QNode& node) {




 178   node._notified = 0;
 179   node._t_state = QNode::TS_WAIT;

 180   RawMonitor_lock->lock_without_safepoint_check();
 181   node._next = _wait_set;
 182   _wait_set = &node;
 183   RawMonitor_lock->unlock();
 184 }
 185 
 186 inline void JvmtiRawMonitor::dequeue_waiter(QNode& node) {









 187   // If thread still resides on the waitset then unlink it.
 188   // Double-checked locking -- the usage is safe in this context
 189   // as _t_state is volatile and the lock-unlock operators are
 190   // serializing (barrier-equivalent).
 191 
 192   if (node._t_state == QNode::TS_WAIT) {
 193     RawMonitor_lock->lock_without_safepoint_check();
 194     if (node._t_state == QNode::TS_WAIT) {
 195       // Simple O(n) unlink, but performance isn't critical here.
 196       QNode* p;
 197       QNode* q = NULL;
 198       for (p = _wait_set; p != &node; p = p->_next) {
 199         q = p;
 200       }
 201       guarantee(p == &node, "invariant");
 202       if (q == NULL) {
 203         guarantee (p == _wait_set, "invariant");
 204         _wait_set = p->_next;
 205       } else {
 206         guarantee(p == q->_next, "invariant");
 207         q->_next = p->_next;
 208       }
 209       node._t_state = QNode::TS_RUN;
 210     }
 211     RawMonitor_lock->unlock();
 212   }
 213 
 214   guarantee(node._t_state == QNode::TS_RUN, "invariant");
 215 }
 216 
 217 // simple_wait is not quite so simple as we have to deal with the interaction
 218 // with the Thread interrupt state, which resides in the java.lang.Thread object.
 219 // That state must only be accessed while _thread_in_vm and requires proper thread-state
 220 // transitions. However, we cannot perform such transitions whilst we hold the RawMonitor,
 221 // else we can deadlock with the VMThread (which may also use RawMonitors as part of
 222 // executing various callbacks).
 223 // Returns M_OK usually, but M_INTERRUPTED if the thread is a JavaThread and was
 224 // interrupted.
 225 int JvmtiRawMonitor::simple_wait(Thread* self, jlong millis) {
 226   guarantee(_owner == self  , "invariant");
 227   guarantee(_recursions == 0, "invariant");
 228 
 229   QNode node(self);
 230   enqueue_waiter(node);
 231 
 232   simple_exit(self);
 233   guarantee(_owner != self, "invariant");
 234 
 235   int ret = M_OK;
 236   if (self->is_Java_thread()) {
 237     JavaThread* jt = (JavaThread*) self;
 238     // Transition to VM so we can check interrupt state
 239     ThreadInVMfromNative tivm(jt);
 240     if (jt->is_interrupted(true)) {
 241         ret = M_INTERRUPTED;
 242     } else {
 243       ThreadBlockInVM tbivm(jt);
 244       jt->set_suspend_equivalent();
 245       if (millis <= 0) {
 246         self->_ParkEvent->park();
 247       } else {
 248         self->_ParkEvent->park(millis);
 249       }
 250       // Return to VM before post-check of interrupt state
 251     }
 252     if (jt->is_interrupted(true)) {
 253       ret = M_INTERRUPTED;
 254     }
 255   } else {
 256     if (millis <= 0) {
 257       self->_ParkEvent->park();
 258     } else {
 259       self->_ParkEvent->park(millis);
 260     }
 261   }
 262 
 263   dequeue_waiter(node);
 264 
 265   simple_enter(self);
 266   guarantee(_owner == self, "invariant");
 267   guarantee(_recursions == 0, "invariant");
 268 
 269   return ret;
 270 }
 271 
 272 void JvmtiRawMonitor::simple_notify(Thread* self, bool all) {
 273   guarantee(_owner == self, "invariant");
 274   if (_wait_set == NULL) {
 275     return;
 276   }
 277 
 278   // We have two options:
 279   // A. Transfer the threads from the _wait_set to the _entry_list
 280   // B. Remove the thread from the _wait_set and unpark() it.
 281   //
 282   // We use (B), which is crude and results in lots of futile
 283   // context switching.  In particular (B) induces lots of contention.
 284 
 285   ParkEvent* ev = NULL;       // consider using a small auto array ...
 286   RawMonitor_lock->lock_without_safepoint_check();
 287   for (;;) {
 288     QNode* w = _wait_set;


 371 
 372   self->set_current_pending_raw_monitor(NULL);
 373 
 374   guarantee(_owner == self, "invariant");
 375   guarantee(_recursions == 0, "invariant");
 376 }
 377 
 378 int JvmtiRawMonitor::raw_exit(Thread* self) {
 379   if (self != _owner) {
 380     return M_ILLEGAL_MONITOR_STATE;
 381   }
 382   if (_recursions > 0) {
 383     _recursions--;
 384   } else {
 385     simple_exit(self);
 386   }
 387 
 388   return M_OK;
 389 }
 390 
 391 int JvmtiRawMonitor::raw_wait(jlong millis, Thread* self) {


 392   if (self != _owner) {
 393     return M_ILLEGAL_MONITOR_STATE;
 394   }
 395 
 396   int ret = M_OK;
 397 
 398   // To avoid spurious wakeups we reset the parkevent. This is strictly optional.
 399   // The caller must be able to tolerate spurious returns from raw_wait().
 400   self->_ParkEvent->reset();
 401   OrderAccess::fence();
 402 












 403   intptr_t save = _recursions;
 404   _recursions = 0;
 405   _waiters++;
 406   ret = simple_wait(self, millis);




 407   _recursions = save;
 408   _waiters--;
 409 
 410   guarantee(self == _owner, "invariant");
 411 
 412   if (self->is_Java_thread()) {
 413     JavaThread* jt = (JavaThread*)self;
 414     for (;;) {
 415       jt->set_suspend_equivalent();
 416       if (!jt->handle_special_suspend_equivalent_condition()) {
 417         break;
 418       } else {
 419         // We've been suspended whilst waiting and so we have to
 420         // relinquish the raw monitor until we are resumed. Of course
 421         // after reacquiring we have to re-check for suspension again.
 422         // Suspension requires we are _thread_blocked, and we also have to
 423         // recheck for being interrupted.
 424         simple_exit(jt);
 425         {
 426           ThreadInVMfromNative tivm(jt);
 427           {
 428             ThreadBlockInVM tbivm(jt);
 429             jt->java_suspend_self();
 430           }
 431           if (jt->is_interrupted(true)) {
 432             ret = M_INTERRUPTED;
 433           }
 434         }
 435         simple_enter(jt);

 436       }

 437     }
 438     guarantee(jt == _owner, "invariant");
 439   } else {
 440     assert(ret != M_INTERRUPTED, "Only JavaThreads can be interrupted");
 441   }
 442 
 443   return ret;
 444 }
 445 
 446 int JvmtiRawMonitor::raw_notify(Thread* self) {
 447   if (self != _owner) {
 448     return M_ILLEGAL_MONITOR_STATE;
 449   }
 450   simple_notify(self, false);
 451   return M_OK;
 452 }
 453 
 454 int JvmtiRawMonitor::raw_notifyAll(Thread* self) {
 455   if (self != _owner) {
 456     return M_ILLEGAL_MONITOR_STATE;
 457   }
 458   simple_notify(self, true);
 459   return M_OK;
 460 }
< prev index next >