199 if (mark->has_monitor()) {
200 ObjectMonitor * m = mark->monitor() ;
201 assert(((oop)(m->object()))->mark() == mark, "invariant") ;
202 assert(m->is_entered(THREAD), "invariant") ;
203 }
204 return ;
205 }
206
207 mark = object->mark() ;
208
209 // If the object is stack-locked by the current thread, try to
210 // swing the displaced header from the box back to the mark.
211 if (mark == (markOop) lock) {
212 assert (dhw->is_neutral(), "invariant") ;
213 if ((markOop) Atomic::cmpxchg_ptr (dhw, object->mark_addr(), mark) == mark) {
214 TEVENT (fast_exit: release stacklock) ;
215 return;
216 }
217 }
218
219 ObjectSynchronizer::inflate(THREAD, object)->exit (true, THREAD) ;
220 }
221
222 // -----------------------------------------------------------------------------
223 // Interpreter/Compiler Slow Case
224 // This routine is used to handle interpreter/compiler slow case
225 // We don't need to use fast path here, because it must have been
226 // failed in the interpreter/compiler code.
227 void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) {
228 markOop mark = obj->mark();
229 assert(!mark->has_bias_pattern(), "should not see bias pattern here");
230
231 if (mark->is_neutral()) {
232 // Anticipate successful CAS -- the ST of the displaced mark must
233 // be visible <= the ST performed by the CAS.
234 lock->set_displaced_header(mark);
235 if (mark == (markOop) Atomic::cmpxchg_ptr(lock, obj()->mark_addr(), mark)) {
236 TEVENT (slow_enter: release stacklock) ;
237 return ;
238 }
239 // Fall through to inflate() ...
241 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
242 assert(lock != mark->locker(), "must not re-lock the same lock");
243 assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock");
244 lock->set_displaced_header(NULL);
245 return;
246 }
247
248 #if 0
249 // The following optimization isn't particularly useful.
250 if (mark->has_monitor() && mark->monitor()->is_entered(THREAD)) {
251 lock->set_displaced_header (NULL) ;
252 return ;
253 }
254 #endif
255
256 // The object header will never be displaced to this lock,
257 // so it does not matter what the value is, except that it
258 // must be non-zero to avoid looking like a re-entrant lock,
259 // and must not look locked either.
260 lock->set_displaced_header(markOopDesc::unused_mark());
261 ObjectSynchronizer::inflate(THREAD, obj())->enter(THREAD);
262 }
263
264 // This routine is used to handle interpreter/compiler slow case
265 // We don't need to use fast path here, because it must have
266 // failed in the interpreter/compiler code. Simply use the heavy
267 // weight monitor should be ok, unless someone find otherwise.
268 void ObjectSynchronizer::slow_exit(oop object, BasicLock* lock, TRAPS) {
269 fast_exit (object, lock, THREAD) ;
270 }
271
272 // -----------------------------------------------------------------------------
273 // Class Loader support to workaround deadlocks on the class loader lock objects
274 // Also used by GC
275 // complete_exit()/reenter() are used to wait on a nested lock
276 // i.e. to give up an outer lock completely and then re-enter
277 // Used when holding nested locks - lock acquisition order: lock1 then lock2
278 // 1) complete_exit lock1 - saving recursion count
279 // 2) wait on lock2
280 // 3) when notified on lock2, unlock lock2
281 // 4) reenter lock1 with original recursion count
282 // 5) lock lock2
283 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
284 intptr_t ObjectSynchronizer::complete_exit(Handle obj, TRAPS) {
285 TEVENT (complete_exit) ;
286 if (UseBiasedLocking) {
287 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
288 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
289 }
290
291 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj());
292
293 return monitor->complete_exit(THREAD);
294 }
295
296 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
297 void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) {
298 TEVENT (reenter) ;
299 if (UseBiasedLocking) {
300 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
301 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
302 }
303
304 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj());
305
306 monitor->reenter(recursion, THREAD);
307 }
308 // -----------------------------------------------------------------------------
309 // JNI locks on java objects
310 // NOTE: must use heavy weight monitor to handle jni monitor enter
311 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) { // possible entry from jni enter
312 // the current locking is from JNI instead of Java code
313 TEVENT (jni_enter) ;
314 if (UseBiasedLocking) {
315 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
316 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
317 }
318 THREAD->set_current_pending_monitor_is_from_java(false);
319 ObjectSynchronizer::inflate(THREAD, obj())->enter(THREAD);
320 THREAD->set_current_pending_monitor_is_from_java(true);
321 }
322
323 // NOTE: must use heavy weight monitor to handle jni monitor enter
324 bool ObjectSynchronizer::jni_try_enter(Handle obj, Thread* THREAD) {
325 if (UseBiasedLocking) {
326 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
327 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
328 }
329
330 ObjectMonitor* monitor = ObjectSynchronizer::inflate_helper(obj());
331 return monitor->try_enter(THREAD);
332 }
333
334
335 // NOTE: must use heavy weight monitor to handle jni monitor exit
336 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) {
337 TEVENT (jni_exit) ;
338 if (UseBiasedLocking) {
339 Handle h_obj(THREAD, obj);
340 BiasedLocking::revoke_and_rebias(h_obj, false, THREAD);
341 obj = h_obj();
342 }
343 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
344
345 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj);
346 // If this thread has locked the object, exit the monitor. Note: can't use
347 // monitor->check(CHECK); must exit even if an exception is pending.
348 if (monitor->check(THREAD)) {
349 monitor->exit(true, THREAD);
350 }
351 }
352
353 // -----------------------------------------------------------------------------
354 // Internal VM locks on java objects
355 // standard constructor, allows locking failures
356 ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool doLock) {
357 _dolock = doLock;
358 _thread = thread;
359 debug_only(if (StrictSafepointChecks) _thread->check_for_valid_safepoint_state(false);)
360 _obj = obj;
361
362 if (_dolock) {
363 TEVENT (ObjectLocker) ;
364
365 ObjectSynchronizer::fast_enter(_obj, &_lock, false, _thread);
368
369 ObjectLocker::~ObjectLocker() {
370 if (_dolock) {
371 ObjectSynchronizer::fast_exit(_obj(), &_lock, _thread);
372 }
373 }
374
375
376 // -----------------------------------------------------------------------------
377 // Wait/Notify/NotifyAll
378 // NOTE: must use heavy weight monitor to handle wait()
379 void ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
380 if (UseBiasedLocking) {
381 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
382 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
383 }
384 if (millis < 0) {
385 TEVENT (wait - throw IAX) ;
386 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
387 }
388 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj());
389 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis);
390 monitor->wait(millis, true, THREAD);
391
392 /* This dummy call is in place to get around dtrace bug 6254741. Once
393 that's fixed we can uncomment the following line and remove the call */
394 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
395 dtrace_waited_probe(monitor, obj, THREAD);
396 }
397
398 void ObjectSynchronizer::waitUninterruptibly (Handle obj, jlong millis, TRAPS) {
399 if (UseBiasedLocking) {
400 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
401 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
402 }
403 if (millis < 0) {
404 TEVENT (wait - throw IAX) ;
405 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
406 }
407 ObjectSynchronizer::inflate(THREAD, obj()) -> wait(millis, false, THREAD) ;
408 }
409
410 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
411 if (UseBiasedLocking) {
412 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
413 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
414 }
415
416 markOop mark = obj->mark();
417 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
418 return;
419 }
420 ObjectSynchronizer::inflate(THREAD, obj())->notify(THREAD);
421 }
422
423 // NOTE: see comment of notify()
424 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
425 if (UseBiasedLocking) {
426 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
427 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
428 }
429
430 markOop mark = obj->mark();
431 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
432 return;
433 }
434 ObjectSynchronizer::inflate(THREAD, obj())->notifyAll(THREAD);
435 }
436
437 // -----------------------------------------------------------------------------
438 // Hash Code handling
439 //
440 // Performance concern:
441 // OrderAccess::storestore() calls release() which at one time stored 0
442 // into the global volatile OrderAccess::dummy variable. This store was
443 // unnecessary for correctness. Many threads storing into a common location
444 // causes considerable cache migration or "sloshing" on large SMP systems.
445 // As such, I avoided using OrderAccess::storestore(). In some cases
446 // OrderAccess::fence() -- which incurs local latency on the executing
447 // processor -- is a better choice as it scales on SMP systems.
448 //
449 // See http://blogs.oracle.com/dave/entry/biased_locking_in_hotspot for
450 // a discussion of coherency costs. Note that all our current reference
451 // platforms provide strong ST-ST order, so the issue is moot on IA32,
452 // x64, and SPARC.
453 //
454 // As a general policy we use "volatile" to control compiler-based reordering
669 // Skip to the following code to reduce code size
670 } else if (Self->is_lock_owned((address)mark->locker())) {
671 temp = mark->displaced_mark_helper(); // this is a lightweight monitor owned
672 assert (temp->is_neutral(), "invariant") ;
673 hash = temp->hash(); // by current thread, check if the displaced
674 if (hash) { // header contains hash code
675 return hash;
676 }
677 // WARNING:
678 // The displaced header is strictly immutable.
679 // It can NOT be changed in ANY cases. So we have
680 // to inflate the header into heavyweight monitor
681 // even the current thread owns the lock. The reason
682 // is the BasicLock (stack slot) will be asynchronously
683 // read by other threads during the inflate() function.
684 // Any change to stack may not propagate to other threads
685 // correctly.
686 }
687
688 // Inflate the monitor to set hash code
689 monitor = ObjectSynchronizer::inflate(Self, obj);
690 // Load displaced header and check it has hash code
691 mark = monitor->header();
692 assert (mark->is_neutral(), "invariant") ;
693 hash = mark->hash();
694 if (hash == 0) {
695 hash = get_next_hash(Self, obj);
696 temp = mark->copy_set_hash(hash); // merge hash code into header
697 assert (temp->is_neutral(), "invariant") ;
698 test = (markOop) Atomic::cmpxchg_ptr(temp, monitor, mark);
699 if (test != mark) {
700 // The only update to the header in the monitor (outside GC)
701 // is install the hash code. If someone add new usage of
702 // displaced header, please update this code
703 hash = test->hash();
704 assert (test->is_neutral(), "invariant") ;
705 assert (hash != 0, "Trivial unexpected object/monitor header usage.");
706 }
707 }
708 // We finally get the hash
709 return hash;
1166 guarantee (InUseTail != NULL && InUseList != NULL, "invariant");
1167 }
1168
1169 Thread::muxAcquire (&ListLock, "omFlush") ;
1170 if (Tail != NULL) {
1171 Tail->FreeNext = gFreeList ;
1172 gFreeList = List ;
1173 MonitorFreeCount += Tally;
1174 }
1175
1176 if (InUseTail != NULL) {
1177 InUseTail->FreeNext = gOmInUseList;
1178 gOmInUseList = InUseList;
1179 gOmInUseCount += InUseTally;
1180 }
1181
1182 Thread::muxRelease (&ListLock) ;
1183 TEVENT (omFlush) ;
1184 }
1185
1186 static void post_monitor_inflate_event(EventJavaMonitorInflate* event,
1187 const oop obj) {
1188 assert(event != NULL, "invariant");
1189 assert(event->should_commit(), "invariant");
1190 event->set_monitorClass(obj->klass());
1191 event->set_address((uintptr_t)(void*)obj);
1192 event->commit();
1193 }
1194
1195 // Fast path code shared by multiple functions
1196 ObjectMonitor* ObjectSynchronizer::inflate_helper(oop obj) {
1197 markOop mark = obj->mark();
1198 if (mark->has_monitor()) {
1199 assert(ObjectSynchronizer::verify_objmon_isinpool(mark->monitor()), "monitor is invalid");
1200 assert(mark->monitor()->header()->is_neutral(), "monitor must record a good object header");
1201 return mark->monitor();
1202 }
1203 return ObjectSynchronizer::inflate(Thread::current(), obj);
1204 }
1205
1206
1207 // Note that we could encounter some performance loss through false-sharing as
1208 // multiple locks occupy the same $ line. Padding might be appropriate.
1209
1210
1211 ObjectMonitor * ATTR ObjectSynchronizer::inflate (Thread * Self, oop object) {
1212 // Inflate mutates the heap ...
1213 // Relaxing assertion for bug 6320749.
1214 assert (Universe::verify_in_progress() ||
1215 !SafepointSynchronize::is_at_safepoint(), "invariant") ;
1216
1217 EventJavaMonitorInflate event;
1218
1219 for (;;) {
1220 const markOop mark = object->mark() ;
1221 assert (!mark->has_bias_pattern(), "invariant") ;
1222
1223 // The mark can be in one of the following states:
1224 // * Inflated - just return
1225 // * Stack-locked - coerce it to inflated
1226 // * INFLATING - busy wait for conversion to complete
1227 // * Neutral - aggressively inflate the object.
1228 // * BIASED - Illegal. We should never see this
1229
1230 // CASE: inflated
1231 if (mark->has_monitor()) {
1330 // TODO-FIXME: assert BasicLock->dhw != 0.
1331
1332 // Must preserve store ordering. The monitor state must
1333 // be stable at the time of publishing the monitor address.
1334 guarantee (object->mark() == markOopDesc::INFLATING(), "invariant") ;
1335 object->release_set_mark(markOopDesc::encode(m));
1336
1337 // Hopefully the performance counters are allocated on distinct cache lines
1338 // to avoid false sharing on MP systems ...
1339 if (ObjectMonitor::_sync_Inflations != NULL) ObjectMonitor::_sync_Inflations->inc() ;
1340 TEVENT(Inflate: overwrite stacklock) ;
1341 if (TraceMonitorInflation) {
1342 if (object->is_instance()) {
1343 ResourceMark rm;
1344 tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
1345 (void *) object, (intptr_t) object->mark(),
1346 object->klass()->external_name());
1347 }
1348 }
1349 if (event.should_commit()) {
1350 post_monitor_inflate_event(&event, object);
1351 }
1352 return m ;
1353 }
1354
1355 // CASE: neutral
1356 // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
1357 // If we know we're inflating for entry it's better to inflate by swinging a
1358 // pre-locked objectMonitor pointer into the object header. A successful
1359 // CAS inflates the object *and* confers ownership to the inflating thread.
1360 // In the current implementation we use a 2-step mechanism where we CAS()
1361 // to inflate and then CAS() again to try to swing _owner from NULL to Self.
1362 // An inflateTry() method that we could call from fast_enter() and slow_enter()
1363 // would be useful.
1364
1365 assert (mark->is_neutral(), "invariant");
1366 ObjectMonitor * m = omAlloc (Self) ;
1367 // prepare m for installation - set monitor to initial state
1368 m->Recycle();
1369 m->set_header(mark);
1370 m->set_owner(NULL);
1383 m = NULL ;
1384 continue ;
1385 // interference - the markword changed - just retry.
1386 // The state-transitions are one-way, so there's no chance of
1387 // live-lock -- "Inflated" is an absorbing state.
1388 }
1389
1390 // Hopefully the performance counters are allocated on distinct
1391 // cache lines to avoid false sharing on MP systems ...
1392 if (ObjectMonitor::_sync_Inflations != NULL) ObjectMonitor::_sync_Inflations->inc() ;
1393 TEVENT(Inflate: overwrite neutral) ;
1394 if (TraceMonitorInflation) {
1395 if (object->is_instance()) {
1396 ResourceMark rm;
1397 tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
1398 (void *) object, (intptr_t) object->mark(),
1399 object->klass()->external_name());
1400 }
1401 }
1402 if (event.should_commit()) {
1403 post_monitor_inflate_event(&event, object);
1404 }
1405 return m ;
1406 }
1407 }
1408
1409 // Note that we could encounter some performance loss through false-sharing as
1410 // multiple locks occupy the same $ line. Padding might be appropriate.
1411
1412
1413 // Deflate_idle_monitors() is called at all safepoints, immediately
1414 // after all mutators are stopped, but before any objects have moved.
1415 // It traverses the list of known monitors, deflating where possible.
1416 // The scavenged monitor are returned to the monitor free list.
1417 //
1418 // Beware that we scavenge at *every* stop-the-world point.
1419 // Having a large number of monitors in-circulation negatively
1420 // impacts the performance of some applications (e.g., PointBase).
1421 // Broadly, we want to minimize the # of monitors in circulation.
1422 //
1423 // We have added a flag, MonitorInUseLists, which creates a list
|
199 if (mark->has_monitor()) {
200 ObjectMonitor * m = mark->monitor() ;
201 assert(((oop)(m->object()))->mark() == mark, "invariant") ;
202 assert(m->is_entered(THREAD), "invariant") ;
203 }
204 return ;
205 }
206
207 mark = object->mark() ;
208
209 // If the object is stack-locked by the current thread, try to
210 // swing the displaced header from the box back to the mark.
211 if (mark == (markOop) lock) {
212 assert (dhw->is_neutral(), "invariant") ;
213 if ((markOop) Atomic::cmpxchg_ptr (dhw, object->mark_addr(), mark) == mark) {
214 TEVENT (fast_exit: release stacklock) ;
215 return;
216 }
217 }
218
219 ObjectSynchronizer::inflate(THREAD,
220 object,
221 inflate_cause_vm_internal)->exit(true, THREAD);
222 }
223
224 // -----------------------------------------------------------------------------
225 // Interpreter/Compiler Slow Case
226 // This routine is used to handle interpreter/compiler slow case
227 // We don't need to use fast path here, because it must have been
228 // failed in the interpreter/compiler code.
229 void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) {
230 markOop mark = obj->mark();
231 assert(!mark->has_bias_pattern(), "should not see bias pattern here");
232
233 if (mark->is_neutral()) {
234 // Anticipate successful CAS -- the ST of the displaced mark must
235 // be visible <= the ST performed by the CAS.
236 lock->set_displaced_header(mark);
237 if (mark == (markOop) Atomic::cmpxchg_ptr(lock, obj()->mark_addr(), mark)) {
238 TEVENT (slow_enter: release stacklock) ;
239 return ;
240 }
241 // Fall through to inflate() ...
243 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
244 assert(lock != mark->locker(), "must not re-lock the same lock");
245 assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock");
246 lock->set_displaced_header(NULL);
247 return;
248 }
249
250 #if 0
251 // The following optimization isn't particularly useful.
252 if (mark->has_monitor() && mark->monitor()->is_entered(THREAD)) {
253 lock->set_displaced_header (NULL) ;
254 return ;
255 }
256 #endif
257
258 // The object header will never be displaced to this lock,
259 // so it does not matter what the value is, except that it
260 // must be non-zero to avoid looking like a re-entrant lock,
261 // and must not look locked either.
262 lock->set_displaced_header(markOopDesc::unused_mark());
263 ObjectSynchronizer::inflate(THREAD,
264 obj(),
265 inflate_cause_monitor_enter)->enter(THREAD);
266 }
267
268 // This routine is used to handle interpreter/compiler slow case
269 // We don't need to use fast path here, because it must have
270 // failed in the interpreter/compiler code. Simply use the heavy
271 // weight monitor should be ok, unless someone find otherwise.
272 void ObjectSynchronizer::slow_exit(oop object, BasicLock* lock, TRAPS) {
273 fast_exit (object, lock, THREAD) ;
274 }
275
276 // -----------------------------------------------------------------------------
277 // Class Loader support to workaround deadlocks on the class loader lock objects
278 // Also used by GC
279 // complete_exit()/reenter() are used to wait on a nested lock
280 // i.e. to give up an outer lock completely and then re-enter
281 // Used when holding nested locks - lock acquisition order: lock1 then lock2
282 // 1) complete_exit lock1 - saving recursion count
283 // 2) wait on lock2
284 // 3) when notified on lock2, unlock lock2
285 // 4) reenter lock1 with original recursion count
286 // 5) lock lock2
287 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
288 intptr_t ObjectSynchronizer::complete_exit(Handle obj, TRAPS) {
289 TEVENT (complete_exit) ;
290 if (UseBiasedLocking) {
291 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
292 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
293 }
294
295 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD,
296 obj(),
297 inflate_cause_vm_internal);
298
299 return monitor->complete_exit(THREAD);
300 }
301
302 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
303 void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) {
304 TEVENT (reenter) ;
305 if (UseBiasedLocking) {
306 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
307 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
308 }
309
310 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD,
311 obj(),
312 inflate_cause_vm_internal);
313
314 monitor->reenter(recursion, THREAD);
315 }
316 // -----------------------------------------------------------------------------
317 // JNI locks on java objects
318 // NOTE: must use heavy weight monitor to handle jni monitor enter
319 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) { // possible entry from jni enter
320 // the current locking is from JNI instead of Java code
321 TEVENT (jni_enter) ;
322 if (UseBiasedLocking) {
323 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
324 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
325 }
326 THREAD->set_current_pending_monitor_is_from_java(false);
327 ObjectSynchronizer::inflate(THREAD, obj(), inflate_cause_jni_enter)->enter(THREAD);
328 THREAD->set_current_pending_monitor_is_from_java(true);
329 }
330
331 // NOTE: must use heavy weight monitor to handle jni monitor enter
332 bool ObjectSynchronizer::jni_try_enter(Handle obj, Thread* THREAD) {
333 if (UseBiasedLocking) {
334 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
335 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
336 }
337
338 ObjectMonitor* monitor = ObjectSynchronizer::inflate_helper(obj());
339 return monitor->try_enter(THREAD);
340 }
341
342
343 // NOTE: must use heavy weight monitor to handle jni monitor exit
344 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) {
345 TEVENT (jni_exit) ;
346 if (UseBiasedLocking) {
347 Handle h_obj(THREAD, obj);
348 BiasedLocking::revoke_and_rebias(h_obj, false, THREAD);
349 obj = h_obj();
350 }
351 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
352
353 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD,
354 obj,
355 inflate_cause_jni_exit);
356 // If this thread has locked the object, exit the monitor. Note: can't use
357 // monitor->check(CHECK); must exit even if an exception is pending.
358 if (monitor->check(THREAD)) {
359 monitor->exit(true, THREAD);
360 }
361 }
362
363 // -----------------------------------------------------------------------------
364 // Internal VM locks on java objects
365 // standard constructor, allows locking failures
366 ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool doLock) {
367 _dolock = doLock;
368 _thread = thread;
369 debug_only(if (StrictSafepointChecks) _thread->check_for_valid_safepoint_state(false);)
370 _obj = obj;
371
372 if (_dolock) {
373 TEVENT (ObjectLocker) ;
374
375 ObjectSynchronizer::fast_enter(_obj, &_lock, false, _thread);
378
379 ObjectLocker::~ObjectLocker() {
380 if (_dolock) {
381 ObjectSynchronizer::fast_exit(_obj(), &_lock, _thread);
382 }
383 }
384
385
386 // -----------------------------------------------------------------------------
387 // Wait/Notify/NotifyAll
388 // NOTE: must use heavy weight monitor to handle wait()
389 void ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
390 if (UseBiasedLocking) {
391 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
392 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
393 }
394 if (millis < 0) {
395 TEVENT (wait - throw IAX) ;
396 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
397 }
398 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD,
399 obj(),
400 inflate_cause_wait);
401
402 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis);
403 monitor->wait(millis, true, THREAD);
404
405 /* This dummy call is in place to get around dtrace bug 6254741. Once
406 that's fixed we can uncomment the following line and remove the call */
407 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
408 dtrace_waited_probe(monitor, obj, THREAD);
409 }
410
411 void ObjectSynchronizer::waitUninterruptibly (Handle obj, jlong millis, TRAPS) {
412 if (UseBiasedLocking) {
413 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
414 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
415 }
416 if (millis < 0) {
417 TEVENT (wait - throw IAX) ;
418 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
419 }
420 ObjectSynchronizer::inflate(THREAD,
421 obj(),
422 inflate_cause_wait)->wait(millis, false, THREAD) ;
423 }
424
425 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
426 if (UseBiasedLocking) {
427 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
428 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
429 }
430
431 markOop mark = obj->mark();
432 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
433 return;
434 }
435 ObjectSynchronizer::inflate(THREAD,
436 obj(),
437 inflate_cause_notify)->notify(THREAD);
438 }
439
440 // NOTE: see comment of notify()
441 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
442 if (UseBiasedLocking) {
443 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
444 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
445 }
446
447 markOop mark = obj->mark();
448 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
449 return;
450 }
451 ObjectSynchronizer::inflate(THREAD,
452 obj(),
453 inflate_cause_notify)->notifyAll(THREAD);
454 }
455
456 // -----------------------------------------------------------------------------
457 // Hash Code handling
458 //
459 // Performance concern:
460 // OrderAccess::storestore() calls release() which at one time stored 0
461 // into the global volatile OrderAccess::dummy variable. This store was
462 // unnecessary for correctness. Many threads storing into a common location
463 // causes considerable cache migration or "sloshing" on large SMP systems.
464 // As such, I avoided using OrderAccess::storestore(). In some cases
465 // OrderAccess::fence() -- which incurs local latency on the executing
466 // processor -- is a better choice as it scales on SMP systems.
467 //
468 // See http://blogs.oracle.com/dave/entry/biased_locking_in_hotspot for
469 // a discussion of coherency costs. Note that all our current reference
470 // platforms provide strong ST-ST order, so the issue is moot on IA32,
471 // x64, and SPARC.
472 //
473 // As a general policy we use "volatile" to control compiler-based reordering
688 // Skip to the following code to reduce code size
689 } else if (Self->is_lock_owned((address)mark->locker())) {
690 temp = mark->displaced_mark_helper(); // this is a lightweight monitor owned
691 assert (temp->is_neutral(), "invariant") ;
692 hash = temp->hash(); // by current thread, check if the displaced
693 if (hash) { // header contains hash code
694 return hash;
695 }
696 // WARNING:
697 // The displaced header is strictly immutable.
698 // It can NOT be changed in ANY cases. So we have
699 // to inflate the header into heavyweight monitor
700 // even the current thread owns the lock. The reason
701 // is the BasicLock (stack slot) will be asynchronously
702 // read by other threads during the inflate() function.
703 // Any change to stack may not propagate to other threads
704 // correctly.
705 }
706
707 // Inflate the monitor to set hash code
708 monitor = ObjectSynchronizer::inflate(Self, obj, inflate_cause_hash_code);
709 // Load displaced header and check it has hash code
710 mark = monitor->header();
711 assert (mark->is_neutral(), "invariant") ;
712 hash = mark->hash();
713 if (hash == 0) {
714 hash = get_next_hash(Self, obj);
715 temp = mark->copy_set_hash(hash); // merge hash code into header
716 assert (temp->is_neutral(), "invariant") ;
717 test = (markOop) Atomic::cmpxchg_ptr(temp, monitor, mark);
718 if (test != mark) {
719 // The only update to the header in the monitor (outside GC)
720 // is install the hash code. If someone add new usage of
721 // displaced header, please update this code
722 hash = test->hash();
723 assert (test->is_neutral(), "invariant") ;
724 assert (hash != 0, "Trivial unexpected object/monitor header usage.");
725 }
726 }
727 // We finally get the hash
728 return hash;
1185 guarantee (InUseTail != NULL && InUseList != NULL, "invariant");
1186 }
1187
1188 Thread::muxAcquire (&ListLock, "omFlush") ;
1189 if (Tail != NULL) {
1190 Tail->FreeNext = gFreeList ;
1191 gFreeList = List ;
1192 MonitorFreeCount += Tally;
1193 }
1194
1195 if (InUseTail != NULL) {
1196 InUseTail->FreeNext = gOmInUseList;
1197 gOmInUseList = InUseList;
1198 gOmInUseCount += InUseTally;
1199 }
1200
1201 Thread::muxRelease (&ListLock) ;
1202 TEVENT (omFlush) ;
1203 }
1204
1205 const char* ObjectSynchronizer::inflate_cause_name(const InflateCause cause) {
1206 switch (cause) {
1207 case inflate_cause_vm_internal: return "VM Internal";
1208 case inflate_cause_monitor_enter: return "Monitor Enter";
1209 case inflate_cause_wait: return "Monitor Wait";
1210 case inflate_cause_notify: return "Monitor Notify";
1211 case inflate_cause_hash_code: return "Monitor Hash Code";
1212 case inflate_cause_jni_enter: return "JNI Monitor Enter";
1213 case inflate_cause_jni_exit: return "JNI Monitor Exit";
1214 default:
1215 ShouldNotReachHere();
1216 }
1217 return "Unknown";
1218 }
1219
1220 static void post_monitor_inflate_event(EventJavaMonitorInflate* event,
1221 const oop obj,
1222 const ObjectSynchronizer::InflateCause cause) {
1223 assert(event != NULL, "invariant");
1224 assert(event->should_commit(), "invariant");
1225 event->set_monitorClass(obj->klass());
1226 event->set_address((uintptr_t)(void*)obj);
1227 event->set_cause((u1)cause);
1228 event->commit();
1229 }
1230
1231 // Fast path code shared by multiple functions
1232 ObjectMonitor* ObjectSynchronizer::inflate_helper(oop obj) {
1233 markOop mark = obj->mark();
1234 if (mark->has_monitor()) {
1235 assert(ObjectSynchronizer::verify_objmon_isinpool(mark->monitor()), "monitor is invalid");
1236 assert(mark->monitor()->header()->is_neutral(), "monitor must record a good object header");
1237 return mark->monitor();
1238 }
1239 return ObjectSynchronizer::inflate(Thread::current(),
1240 obj,
1241 inflate_cause_vm_internal);
1242 }
1243
1244
1245 // Note that we could encounter some performance loss through false-sharing as
1246 // multiple locks occupy the same $ line. Padding might be appropriate.
1247
1248
1249 ObjectMonitor * ATTR ObjectSynchronizer::inflate (Thread * Self,
1250 oop object,
1251 const InflateCause cause) {
1252 // Inflate mutates the heap ...
1253 // Relaxing assertion for bug 6320749.
1254 assert (Universe::verify_in_progress() ||
1255 !SafepointSynchronize::is_at_safepoint(), "invariant") ;
1256
1257 EventJavaMonitorInflate event;
1258
1259 for (;;) {
1260 const markOop mark = object->mark() ;
1261 assert (!mark->has_bias_pattern(), "invariant") ;
1262
1263 // The mark can be in one of the following states:
1264 // * Inflated - just return
1265 // * Stack-locked - coerce it to inflated
1266 // * INFLATING - busy wait for conversion to complete
1267 // * Neutral - aggressively inflate the object.
1268 // * BIASED - Illegal. We should never see this
1269
1270 // CASE: inflated
1271 if (mark->has_monitor()) {
1370 // TODO-FIXME: assert BasicLock->dhw != 0.
1371
1372 // Must preserve store ordering. The monitor state must
1373 // be stable at the time of publishing the monitor address.
1374 guarantee (object->mark() == markOopDesc::INFLATING(), "invariant") ;
1375 object->release_set_mark(markOopDesc::encode(m));
1376
1377 // Hopefully the performance counters are allocated on distinct cache lines
1378 // to avoid false sharing on MP systems ...
1379 if (ObjectMonitor::_sync_Inflations != NULL) ObjectMonitor::_sync_Inflations->inc() ;
1380 TEVENT(Inflate: overwrite stacklock) ;
1381 if (TraceMonitorInflation) {
1382 if (object->is_instance()) {
1383 ResourceMark rm;
1384 tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
1385 (void *) object, (intptr_t) object->mark(),
1386 object->klass()->external_name());
1387 }
1388 }
1389 if (event.should_commit()) {
1390 post_monitor_inflate_event(&event, object, cause);
1391 }
1392 return m ;
1393 }
1394
1395 // CASE: neutral
1396 // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
1397 // If we know we're inflating for entry it's better to inflate by swinging a
1398 // pre-locked objectMonitor pointer into the object header. A successful
1399 // CAS inflates the object *and* confers ownership to the inflating thread.
1400 // In the current implementation we use a 2-step mechanism where we CAS()
1401 // to inflate and then CAS() again to try to swing _owner from NULL to Self.
1402 // An inflateTry() method that we could call from fast_enter() and slow_enter()
1403 // would be useful.
1404
1405 assert (mark->is_neutral(), "invariant");
1406 ObjectMonitor * m = omAlloc (Self) ;
1407 // prepare m for installation - set monitor to initial state
1408 m->Recycle();
1409 m->set_header(mark);
1410 m->set_owner(NULL);
1423 m = NULL ;
1424 continue ;
1425 // interference - the markword changed - just retry.
1426 // The state-transitions are one-way, so there's no chance of
1427 // live-lock -- "Inflated" is an absorbing state.
1428 }
1429
1430 // Hopefully the performance counters are allocated on distinct
1431 // cache lines to avoid false sharing on MP systems ...
1432 if (ObjectMonitor::_sync_Inflations != NULL) ObjectMonitor::_sync_Inflations->inc() ;
1433 TEVENT(Inflate: overwrite neutral) ;
1434 if (TraceMonitorInflation) {
1435 if (object->is_instance()) {
1436 ResourceMark rm;
1437 tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
1438 (void *) object, (intptr_t) object->mark(),
1439 object->klass()->external_name());
1440 }
1441 }
1442 if (event.should_commit()) {
1443 post_monitor_inflate_event(&event, object, cause);
1444 }
1445 return m ;
1446 }
1447 }
1448
1449 // Note that we could encounter some performance loss through false-sharing as
1450 // multiple locks occupy the same $ line. Padding might be appropriate.
1451
1452
1453 // Deflate_idle_monitors() is called at all safepoints, immediately
1454 // after all mutators are stopped, but before any objects have moved.
1455 // It traverses the list of known monitors, deflating where possible.
1456 // The scavenged monitor are returned to the monitor free list.
1457 //
1458 // Beware that we scavenge at *every* stop-the-world point.
1459 // Having a large number of monitors in-circulation negatively
1460 // impacts the performance of some applications (e.g., PointBase).
1461 // Broadly, we want to minimize the # of monitors in circulation.
1462 //
1463 // We have added a flag, MonitorInUseLists, which creates a list
|