223 // Case: light contention possibly amenable to TLE
224 // Case: TLE inimical operations such as nested/recursive synchronization
225
226 if (owner == self) {
227 m->_recursions++;
228 return true;
229 }
230
231 // This Java Monitor is inflated so obj's header will never be
232 // displaced to this thread's BasicLock. Make the displaced header
233 // non-NULL so this BasicLock is not seen as recursive nor as
234 // being locked. We do this unconditionally so that this thread's
235 // BasicLock cannot be mis-interpreted by any stack walkers. For
236 // performance reasons, stack walkers generally first check for
237 // Biased Locking in the object's header, the second check is for
238 // stack-locking in the object's header, the third check is for
239 // recursive stack-locking in the displaced header in the BasicLock,
240 // and last are the inflated Java Monitor (ObjectMonitor) checks.
241 lock->set_displaced_header(markWord::unused_mark());
242
243 if (owner == NULL && Atomic::replace_if_null(&(m->_owner), self)) {
244 assert(m->_recursions == 0, "invariant");
245 return true;
246 }
247 }
248
249 // Note that we could inflate in quick_enter.
250 // This is likely a useful optimization
251 // Critically, in quick_enter() we must not:
252 // -- perform bias revocation, or
253 // -- block indefinitely, or
254 // -- reach a safepoint
255
256 return false; // revert to slow-path
257 }
258
259 // -----------------------------------------------------------------------------
260 // Monitor Enter/Exit
261 // The interpreter and compiler assembly code tries to lock using the fast path
262 // of this algorithm. Make sure to update that code if the following function is
263 // changed. The implementation is extremely sensitive to race condition. Be careful.
1387 // 0 serves as a "BUSY" inflate-in-progress indicator.
1388
1389
1390 // fetch the displaced mark from the owner's stack.
1391 // The owner can't die or unwind past the lock while our INFLATING
1392 // object is in the mark. Furthermore the owner can't complete
1393 // an unlock on the object, either.
1394 markWord dmw = mark.displaced_mark_helper();
1395 // Catch if the object's header is not neutral (not locked and
1396 // not marked is what we care about here).
1397 assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
1398
1399 // Setup monitor fields to proper values -- prepare the monitor
1400 m->set_header(dmw);
1401
1402 // Optimization: if the mark.locker stack address is associated
1403 // with this thread we could simply set m->_owner = self.
1404 // Note that a thread can inflate an object
1405 // that it has stack-locked -- as might happen in wait() -- directly
1406 // with CAS. That is, we can avoid the xchg-NULL .... ST idiom.
1407 m->set_owner(mark.locker());
1408 m->set_object(object);
1409 // TODO-FIXME: assert BasicLock->dhw != 0.
1410
1411 // Must preserve store ordering. The monitor state must
1412 // be stable at the time of publishing the monitor address.
1413 guarantee(object->mark() == markWord::INFLATING(), "invariant");
1414 object->release_set_mark(markWord::encode(m));
1415
1416 // Hopefully the performance counters are allocated on distinct cache lines
1417 // to avoid false sharing on MP systems ...
1418 OM_PERFDATA_OP(Inflations, inc());
1419 if (log_is_enabled(Trace, monitorinflation)) {
1420 ResourceMark rm(self);
1421 lsh.print_cr("inflate(has_locker): object=" INTPTR_FORMAT ", mark="
1422 INTPTR_FORMAT ", type='%s'", p2i(object),
1423 object->mark().value(), object->klass()->external_name());
1424 }
1425 if (event.should_commit()) {
1426 post_monitor_inflate_event(&event, object, cause);
1427 }
|
223 // Case: light contention possibly amenable to TLE
224 // Case: TLE inimical operations such as nested/recursive synchronization
225
226 if (owner == self) {
227 m->_recursions++;
228 return true;
229 }
230
231 // This Java Monitor is inflated so obj's header will never be
232 // displaced to this thread's BasicLock. Make the displaced header
233 // non-NULL so this BasicLock is not seen as recursive nor as
234 // being locked. We do this unconditionally so that this thread's
235 // BasicLock cannot be mis-interpreted by any stack walkers. For
236 // performance reasons, stack walkers generally first check for
237 // Biased Locking in the object's header, the second check is for
238 // stack-locking in the object's header, the third check is for
239 // recursive stack-locking in the displaced header in the BasicLock,
240 // and last are the inflated Java Monitor (ObjectMonitor) checks.
241 lock->set_displaced_header(markWord::unused_mark());
242
243 if (owner == NULL && m->try_set_owner_from(NULL, self) == NULL) {
244 assert(m->_recursions == 0, "invariant");
245 return true;
246 }
247 }
248
249 // Note that we could inflate in quick_enter.
250 // This is likely a useful optimization
251 // Critically, in quick_enter() we must not:
252 // -- perform bias revocation, or
253 // -- block indefinitely, or
254 // -- reach a safepoint
255
256 return false; // revert to slow-path
257 }
258
259 // -----------------------------------------------------------------------------
260 // Monitor Enter/Exit
261 // The interpreter and compiler assembly code tries to lock using the fast path
262 // of this algorithm. Make sure to update that code if the following function is
263 // changed. The implementation is extremely sensitive to race condition. Be careful.
1387 // 0 serves as a "BUSY" inflate-in-progress indicator.
1388
1389
1390 // fetch the displaced mark from the owner's stack.
1391 // The owner can't die or unwind past the lock while our INFLATING
1392 // object is in the mark. Furthermore the owner can't complete
1393 // an unlock on the object, either.
1394 markWord dmw = mark.displaced_mark_helper();
1395 // Catch if the object's header is not neutral (not locked and
1396 // not marked is what we care about here).
1397 assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
1398
1399 // Setup monitor fields to proper values -- prepare the monitor
1400 m->set_header(dmw);
1401
1402 // Optimization: if the mark.locker stack address is associated
1403 // with this thread we could simply set m->_owner = self.
1404 // Note that a thread can inflate an object
1405 // that it has stack-locked -- as might happen in wait() -- directly
1406 // with CAS. That is, we can avoid the xchg-NULL .... ST idiom.
1407 m->simply_set_owner_from(NULL, mark.locker());
1408 m->set_object(object);
1409 // TODO-FIXME: assert BasicLock->dhw != 0.
1410
1411 // Must preserve store ordering. The monitor state must
1412 // be stable at the time of publishing the monitor address.
1413 guarantee(object->mark() == markWord::INFLATING(), "invariant");
1414 object->release_set_mark(markWord::encode(m));
1415
1416 // Hopefully the performance counters are allocated on distinct cache lines
1417 // to avoid false sharing on MP systems ...
1418 OM_PERFDATA_OP(Inflations, inc());
1419 if (log_is_enabled(Trace, monitorinflation)) {
1420 ResourceMark rm(self);
1421 lsh.print_cr("inflate(has_locker): object=" INTPTR_FORMAT ", mark="
1422 INTPTR_FORMAT ", type='%s'", p2i(object),
1423 object->mark().value(), object->klass()->external_name());
1424 }
1425 if (event.should_commit()) {
1426 post_monitor_inflate_event(&event, object, cause);
1427 }
|