192 // Diagnostics -- Could be: stack-locked, inflating, inflated.
193 mark = object->mark() ;
194 assert (!mark->is_neutral(), "invariant") ;
195 if (mark->has_locker() && mark != markOopDesc::INFLATING()) {
196 assert(THREAD->is_lock_owned((address)mark->locker()), "invariant") ;
197 }
198 if (mark->has_monitor()) {
199 ObjectMonitor * m = mark->monitor() ;
200 assert(((oop)(m->object()))->mark() == mark, "invariant") ;
201 assert(m->is_entered(THREAD), "invariant") ;
202 }
203 return ;
204 }
205
206 mark = object->mark() ;
207
208 // If the object is stack-locked by the current thread, try to
209 // swing the displaced header from the box back to the mark.
210 if (mark == (markOop) lock) {
211 assert (dhw->is_neutral(), "invariant") ;
212 if (object->cas_set_mark(dhw, mark) == mark) {
213 TEVENT (fast_exit: release stacklock) ;
214 return;
215 }
216 }
217
218 ObjectSynchronizer::inflate(THREAD, object)->exit (true, THREAD) ;
219 }
220
221 // -----------------------------------------------------------------------------
222 // Interpreter/Compiler Slow Case
223 // This routine is used to handle interpreter/compiler slow case
224 // We don't need to use fast path here, because it must have been
225 // failed in the interpreter/compiler code.
226 void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) {
227 markOop mark = obj->mark();
228 assert(!mark->has_bias_pattern(), "should not see bias pattern here");
229
230 if (mark->is_neutral()) {
231 // Anticipate successful CAS -- the ST of the displaced mark must
232 // be visible <= the ST performed by the CAS.
233 lock->set_displaced_header(mark);
234 if (mark == obj()->cas_set_mark((markOop) lock, mark)) {
235 TEVENT (slow_enter: release stacklock) ;
236 return ;
237 }
238 // Fall through to inflate() ...
239 } else
240 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
241 assert(lock != mark->locker(), "must not re-lock the same lock");
242 assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock");
243 lock->set_displaced_header(NULL);
244 return;
245 }
246
247 #if 0
248 // The following optimization isn't particularly useful.
249 if (mark->has_monitor() && mark->monitor()->is_entered(THREAD)) {
250 lock->set_displaced_header (NULL) ;
251 return ;
252 }
253 #endif
254
633 Self->is_Java_thread() , "invariant") ;
634 assert (Universe::verify_in_progress() ||
635 ((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant") ;
636
637 ObjectMonitor* monitor = NULL;
638 markOop temp, test;
639 intptr_t hash;
640 markOop mark = ReadStableMark (obj);
641
642 // object should remain ineligible for biased locking
643 assert (!mark->has_bias_pattern(), "invariant") ;
644
645 if (mark->is_neutral()) {
646 hash = mark->hash(); // this is a normal header
647 if (hash) { // if it has hash, just return it
648 return hash;
649 }
650 hash = get_next_hash(Self, obj); // allocate a new hash code
651 temp = mark->copy_set_hash(hash); // merge the hash code into header
652 // use (machine word version) atomic operation to install the hash
653 test = obj->cas_set_mark(temp, mark);
654 if (test == mark) {
655 return hash;
656 }
657 // If atomic operation failed, we must inflate the header
658 // into heavy weight monitor. We could add more code here
659 // for fast path, but it does not worth the complexity.
660 } else if (mark->has_monitor()) {
661 monitor = mark->monitor();
662 temp = monitor->header();
663 assert (temp->is_neutral(), "invariant") ;
664 hash = temp->hash();
665 if (hash) {
666 return hash;
667 }
668 // Skip to the following code to reduce code size
669 } else if (Self->is_lock_owned((address)mark->locker())) {
670 temp = mark->displaced_mark_helper(); // this is a lightweight monitor owned
671 assert (temp->is_neutral(), "invariant") ;
672 hash = temp->hash(); // by current thread, check if the displaced
673 if (hash) { // header contains hash code
1198 // Inflate mutates the heap ...
1199 // Relaxing assertion for bug 6320749.
1200 assert (Universe::verify_in_progress() ||
1201 !SafepointSynchronize::is_at_safepoint(), "invariant") ;
1202
1203 for (;;) {
1204 const markOop mark = object->mark() ;
1205 assert (!mark->has_bias_pattern(), "invariant") ;
1206
1207 // The mark can be in one of the following states:
1208 // * Inflated - just return
1209 // * Stack-locked - coerce it to inflated
1210 // * INFLATING - busy wait for conversion to complete
1211 // * Neutral - aggressively inflate the object.
1212 // * BIASED - Illegal. We should never see this
1213
1214 // CASE: inflated
1215 if (mark->has_monitor()) {
1216 ObjectMonitor * inf = mark->monitor() ;
1217 assert (inf->header()->is_neutral(), "invariant");
1218 assert ((oop) inf->object() == object, "invariant") ;
1219 assert (ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid");
1220 return inf ;
1221 }
1222
1223 // CASE: inflation in progress - inflating over a stack-lock.
1224 // Some other thread is converting from stack-locked to inflated.
1225 // Only that thread can complete inflation -- other threads must wait.
1226 // The INFLATING value is transient.
1227 // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
1228 // We could always eliminate polling by parking the thread on some auxiliary list.
1229 if (mark == markOopDesc::INFLATING()) {
1230 TEVENT (Inflate: spin while INFLATING) ;
1231 ReadStableMark(object) ;
1232 continue ;
1233 }
1234
1235 // CASE: stack-locked
1236 // Could be stack-locked either by this thread or by some other thread.
1237 //
1238 // Note that we allocate the objectmonitor speculatively, _before_ attempting
1245 // We now use per-thread private objectmonitor free lists.
1246 // These list are reprovisioned from the global free list outside the
1247 // critical INFLATING...ST interval. A thread can transfer
1248 // multiple objectmonitors en-mass from the global free list to its local free list.
1249 // This reduces coherency traffic and lock contention on the global free list.
1250 // Using such local free lists, it doesn't matter if the omAlloc() call appears
1251 // before or after the CAS(INFLATING) operation.
1252 // See the comments in omAlloc().
1253
1254 if (mark->has_locker()) {
1255 ObjectMonitor * m = omAlloc (Self) ;
1256 // Optimistically prepare the objectmonitor - anticipate successful CAS
1257 // We do this before the CAS in order to minimize the length of time
1258 // in which INFLATING appears in the mark.
1259 m->Recycle();
1260 m->_Responsible = NULL ;
1261 m->OwnerIsThread = 0 ;
1262 m->_recursions = 0 ;
1263 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit ; // Consider: maintain by type/class
1264
1265 markOop cmp = object->cas_set_mark(markOopDesc::INFLATING(), mark);
1266 if (cmp != mark) {
1267 omRelease (Self, m, true) ;
1268 continue ; // Interference -- just retry
1269 }
1270
1271 // We've successfully installed INFLATING (0) into the mark-word.
1272 // This is the only case where 0 will appear in a mark-work.
1273 // Only the singular thread that successfully swings the mark-word
1274 // to 0 can perform (or more precisely, complete) inflation.
1275 //
1276 // Why do we CAS a 0 into the mark-word instead of just CASing the
1277 // mark-word from the stack-locked value directly to the new inflated state?
1278 // Consider what happens when a thread unlocks a stack-locked object.
1279 // It attempts to use CAS to swing the displaced header value from the
1280 // on-stack basiclock back into the object header. Recall also that the
1281 // header value (hashcode, etc) can reside in (a) the object header, or
1282 // (b) a displaced header associated with the stack-lock, or (c) a displaced
1283 // header in an objectMonitor. The inflate() routine must copy the header
1284 // value from the basiclock on the owner's stack to the objectMonitor, all
1285 // the while preserving the hashCode stability invariants. If the owner
1338 // If we know we're inflating for entry it's better to inflate by swinging a
1339 // pre-locked objectMonitor pointer into the object header. A successful
1340 // CAS inflates the object *and* confers ownership to the inflating thread.
1341 // In the current implementation we use a 2-step mechanism where we CAS()
1342 // to inflate and then CAS() again to try to swing _owner from NULL to Self.
1343 // An inflateTry() method that we could call from fast_enter() and slow_enter()
1344 // would be useful.
1345
1346 assert (mark->is_neutral(), "invariant");
1347 ObjectMonitor * m = omAlloc (Self) ;
1348 // prepare m for installation - set monitor to initial state
1349 m->Recycle();
1350 m->set_header(mark);
1351 m->set_owner(NULL);
1352 m->set_object(object);
1353 m->OwnerIsThread = 1 ;
1354 m->_recursions = 0 ;
1355 m->_Responsible = NULL ;
1356 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit ; // consider: keep metastats by type/class
1357
1358 if (object->cas_set_mark(markOopDesc::encode(m), mark) != mark) {
1359 m->set_object (NULL) ;
1360 m->set_owner (NULL) ;
1361 m->OwnerIsThread = 0 ;
1362 m->Recycle() ;
1363 omRelease (Self, m, true) ;
1364 m = NULL ;
1365 continue ;
1366 // interference - the markword changed - just retry.
1367 // The state-transitions are one-way, so there's no chance of
1368 // live-lock -- "Inflated" is an absorbing state.
1369 }
1370
1371 // Hopefully the performance counters are allocated on distinct
1372 // cache lines to avoid false sharing on MP systems ...
1373 if (ObjectMonitor::_sync_Inflations != NULL) ObjectMonitor::_sync_Inflations->inc() ;
1374 TEVENT(Inflate: overwrite neutral) ;
1375 if (TraceMonitorInflation) {
1376 if (object->is_instance()) {
1377 ResourceMark rm;
1378 tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
|
192 // Diagnostics -- Could be: stack-locked, inflating, inflated.
193 mark = object->mark() ;
194 assert (!mark->is_neutral(), "invariant") ;
195 if (mark->has_locker() && mark != markOopDesc::INFLATING()) {
196 assert(THREAD->is_lock_owned((address)mark->locker()), "invariant") ;
197 }
198 if (mark->has_monitor()) {
199 ObjectMonitor * m = mark->monitor() ;
200 assert(((oop)(m->object()))->mark() == mark, "invariant") ;
201 assert(m->is_entered(THREAD), "invariant") ;
202 }
203 return ;
204 }
205
206 mark = object->mark() ;
207
208 // If the object is stack-locked by the current thread, try to
209 // swing the displaced header from the box back to the mark.
210 if (mark == (markOop) lock) {
211 assert (dhw->is_neutral(), "invariant") ;
212 if ((markOop) Atomic::cmpxchg_ptr (dhw, object->mark_addr(), mark) == mark) {
213 TEVENT (fast_exit: release stacklock) ;
214 return;
215 }
216 }
217
218 ObjectSynchronizer::inflate(THREAD, object)->exit (true, THREAD) ;
219 }
220
221 // -----------------------------------------------------------------------------
222 // Interpreter/Compiler Slow Case
223 // This routine is used to handle interpreter/compiler slow case
224 // We don't need to use fast path here, because it must have been
225 // failed in the interpreter/compiler code.
226 void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) {
227 markOop mark = obj->mark();
228 assert(!mark->has_bias_pattern(), "should not see bias pattern here");
229
230 if (mark->is_neutral()) {
231 // Anticipate successful CAS -- the ST of the displaced mark must
232 // be visible <= the ST performed by the CAS.
233 lock->set_displaced_header(mark);
234 if (mark == (markOop) Atomic::cmpxchg_ptr(lock, obj()->mark_addr(), mark)) {
235 TEVENT (slow_enter: release stacklock) ;
236 return ;
237 }
238 // Fall through to inflate() ...
239 } else
240 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
241 assert(lock != mark->locker(), "must not re-lock the same lock");
242 assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock");
243 lock->set_displaced_header(NULL);
244 return;
245 }
246
247 #if 0
248 // The following optimization isn't particularly useful.
249 if (mark->has_monitor() && mark->monitor()->is_entered(THREAD)) {
250 lock->set_displaced_header (NULL) ;
251 return ;
252 }
253 #endif
254
633 Self->is_Java_thread() , "invariant") ;
634 assert (Universe::verify_in_progress() ||
635 ((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant") ;
636
637 ObjectMonitor* monitor = NULL;
638 markOop temp, test;
639 intptr_t hash;
640 markOop mark = ReadStableMark (obj);
641
642 // object should remain ineligible for biased locking
643 assert (!mark->has_bias_pattern(), "invariant") ;
644
645 if (mark->is_neutral()) {
646 hash = mark->hash(); // this is a normal header
647 if (hash) { // if it has hash, just return it
648 return hash;
649 }
650 hash = get_next_hash(Self, obj); // allocate a new hash code
651 temp = mark->copy_set_hash(hash); // merge the hash code into header
652 // use (machine word version) atomic operation to install the hash
653 test = (markOop) Atomic::cmpxchg_ptr(temp, obj->mark_addr(), mark);
654 if (test == mark) {
655 return hash;
656 }
657 // If atomic operation failed, we must inflate the header
658 // into heavy weight monitor. We could add more code here
659 // for fast path, but it does not worth the complexity.
660 } else if (mark->has_monitor()) {
661 monitor = mark->monitor();
662 temp = monitor->header();
663 assert (temp->is_neutral(), "invariant") ;
664 hash = temp->hash();
665 if (hash) {
666 return hash;
667 }
668 // Skip to the following code to reduce code size
669 } else if (Self->is_lock_owned((address)mark->locker())) {
670 temp = mark->displaced_mark_helper(); // this is a lightweight monitor owned
671 assert (temp->is_neutral(), "invariant") ;
672 hash = temp->hash(); // by current thread, check if the displaced
673 if (hash) { // header contains hash code
1198 // Inflate mutates the heap ...
1199 // Relaxing assertion for bug 6320749.
1200 assert (Universe::verify_in_progress() ||
1201 !SafepointSynchronize::is_at_safepoint(), "invariant") ;
1202
1203 for (;;) {
1204 const markOop mark = object->mark() ;
1205 assert (!mark->has_bias_pattern(), "invariant") ;
1206
1207 // The mark can be in one of the following states:
1208 // * Inflated - just return
1209 // * Stack-locked - coerce it to inflated
1210 // * INFLATING - busy wait for conversion to complete
1211 // * Neutral - aggressively inflate the object.
1212 // * BIASED - Illegal. We should never see this
1213
1214 // CASE: inflated
1215 if (mark->has_monitor()) {
1216 ObjectMonitor * inf = mark->monitor() ;
1217 assert (inf->header()->is_neutral(), "invariant");
1218 assert (inf->object() == object, "invariant") ;
1219 assert (ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid");
1220 return inf ;
1221 }
1222
1223 // CASE: inflation in progress - inflating over a stack-lock.
1224 // Some other thread is converting from stack-locked to inflated.
1225 // Only that thread can complete inflation -- other threads must wait.
1226 // The INFLATING value is transient.
1227 // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
1228 // We could always eliminate polling by parking the thread on some auxiliary list.
1229 if (mark == markOopDesc::INFLATING()) {
1230 TEVENT (Inflate: spin while INFLATING) ;
1231 ReadStableMark(object) ;
1232 continue ;
1233 }
1234
1235 // CASE: stack-locked
1236 // Could be stack-locked either by this thread or by some other thread.
1237 //
1238 // Note that we allocate the objectmonitor speculatively, _before_ attempting
1245 // We now use per-thread private objectmonitor free lists.
1246 // These list are reprovisioned from the global free list outside the
1247 // critical INFLATING...ST interval. A thread can transfer
1248 // multiple objectmonitors en-mass from the global free list to its local free list.
1249 // This reduces coherency traffic and lock contention on the global free list.
1250 // Using such local free lists, it doesn't matter if the omAlloc() call appears
1251 // before or after the CAS(INFLATING) operation.
1252 // See the comments in omAlloc().
1253
1254 if (mark->has_locker()) {
1255 ObjectMonitor * m = omAlloc (Self) ;
1256 // Optimistically prepare the objectmonitor - anticipate successful CAS
1257 // We do this before the CAS in order to minimize the length of time
1258 // in which INFLATING appears in the mark.
1259 m->Recycle();
1260 m->_Responsible = NULL ;
1261 m->OwnerIsThread = 0 ;
1262 m->_recursions = 0 ;
1263 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit ; // Consider: maintain by type/class
1264
1265 markOop cmp = (markOop) Atomic::cmpxchg_ptr (markOopDesc::INFLATING(), object->mark_addr(), mark) ;
1266 if (cmp != mark) {
1267 omRelease (Self, m, true) ;
1268 continue ; // Interference -- just retry
1269 }
1270
1271 // We've successfully installed INFLATING (0) into the mark-word.
1272 // This is the only case where 0 will appear in a mark-work.
1273 // Only the singular thread that successfully swings the mark-word
1274 // to 0 can perform (or more precisely, complete) inflation.
1275 //
1276 // Why do we CAS a 0 into the mark-word instead of just CASing the
1277 // mark-word from the stack-locked value directly to the new inflated state?
1278 // Consider what happens when a thread unlocks a stack-locked object.
1279 // It attempts to use CAS to swing the displaced header value from the
1280 // on-stack basiclock back into the object header. Recall also that the
1281 // header value (hashcode, etc) can reside in (a) the object header, or
1282 // (b) a displaced header associated with the stack-lock, or (c) a displaced
1283 // header in an objectMonitor. The inflate() routine must copy the header
1284 // value from the basiclock on the owner's stack to the objectMonitor, all
1285 // the while preserving the hashCode stability invariants. If the owner
1338 // If we know we're inflating for entry it's better to inflate by swinging a
1339 // pre-locked objectMonitor pointer into the object header. A successful
1340 // CAS inflates the object *and* confers ownership to the inflating thread.
1341 // In the current implementation we use a 2-step mechanism where we CAS()
1342 // to inflate and then CAS() again to try to swing _owner from NULL to Self.
1343 // An inflateTry() method that we could call from fast_enter() and slow_enter()
1344 // would be useful.
1345
1346 assert (mark->is_neutral(), "invariant");
1347 ObjectMonitor * m = omAlloc (Self) ;
1348 // prepare m for installation - set monitor to initial state
1349 m->Recycle();
1350 m->set_header(mark);
1351 m->set_owner(NULL);
1352 m->set_object(object);
1353 m->OwnerIsThread = 1 ;
1354 m->_recursions = 0 ;
1355 m->_Responsible = NULL ;
1356 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit ; // consider: keep metastats by type/class
1357
1358 if (Atomic::cmpxchg_ptr (markOopDesc::encode(m), object->mark_addr(), mark) != mark) {
1359 m->set_object (NULL) ;
1360 m->set_owner (NULL) ;
1361 m->OwnerIsThread = 0 ;
1362 m->Recycle() ;
1363 omRelease (Self, m, true) ;
1364 m = NULL ;
1365 continue ;
1366 // interference - the markword changed - just retry.
1367 // The state-transitions are one-way, so there's no chance of
1368 // live-lock -- "Inflated" is an absorbing state.
1369 }
1370
1371 // Hopefully the performance counters are allocated on distinct
1372 // cache lines to avoid false sharing on MP systems ...
1373 if (ObjectMonitor::_sync_Inflations != NULL) ObjectMonitor::_sync_Inflations->inc() ;
1374 TEVENT(Inflate: overwrite neutral) ;
1375 if (TraceMonitorInflation) {
1376 if (object->is_instance()) {
1377 ResourceMark rm;
1378 tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
|