23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/vmSymbols.hpp"
27 #include "memory/resourceArea.hpp"
28 #include "oops/markOop.hpp"
29 #include "oops/oop.inline.hpp"
30 #include "runtime/biasedLocking.hpp"
31 #include "runtime/handles.inline.hpp"
32 #include "runtime/interfaceSupport.hpp"
33 #include "runtime/mutexLocker.hpp"
34 #include "runtime/objectMonitor.hpp"
35 #include "runtime/objectMonitor.inline.hpp"
36 #include "runtime/osThread.hpp"
37 #include "runtime/stubRoutines.hpp"
38 #include "runtime/synchronizer.hpp"
39 #include "runtime/thread.inline.hpp"
40 #include "utilities/dtrace.hpp"
41 #include "utilities/events.hpp"
42 #include "utilities/preserveException.hpp"
43 #ifdef TARGET_OS_FAMILY_linux
44 # include "os_linux.inline.hpp"
45 #endif
46 #ifdef TARGET_OS_FAMILY_solaris
47 # include "os_solaris.inline.hpp"
48 #endif
49 #ifdef TARGET_OS_FAMILY_windows
50 # include "os_windows.inline.hpp"
51 #endif
52 #ifdef TARGET_OS_FAMILY_bsd
53 # include "os_bsd.inline.hpp"
54 #endif
55
56 #if defined(__GNUC__) && !defined(PPC64)
57 // Need to inhibit inlining for older versions of GCC to avoid build-time failures
58 #define ATTR __attribute__((noinline))
59 #else
60 #define ATTR
61 #endif
62
263 // This routine is used to handle interpreter/compiler slow case
264 // We don't need to use fast path here, because it must have
265 // failed in the interpreter/compiler code. Simply use the heavy
266 // weight monitor should be ok, unless someone find otherwise.
267 void ObjectSynchronizer::slow_exit(oop object, BasicLock* lock, TRAPS) {
268 fast_exit (object, lock, THREAD) ;
269 }
270
271 // -----------------------------------------------------------------------------
272 // Class Loader support to workaround deadlocks on the class loader lock objects
273 // Also used by GC
274 // complete_exit()/reenter() are used to wait on a nested lock
275 // i.e. to give up an outer lock completely and then re-enter
276 // Used when holding nested locks - lock acquisition order: lock1 then lock2
277 // 1) complete_exit lock1 - saving recursion count
278 // 2) wait on lock2
279 // 3) when notified on lock2, unlock lock2
280 // 4) reenter lock1 with original recursion count
281 // 5) lock lock2
282 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
283 intptr_t ObjectSynchronizer::complete_exit(Handle obj, TRAPS) {
284 TEVENT (complete_exit) ;
285 if (UseBiasedLocking) {
286 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
287 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
288 }
289
290 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj());
291
292 return monitor->complete_exit(THREAD);
293 }
294
295 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
296 void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) {
297 TEVENT (reenter) ;
298 if (UseBiasedLocking) {
299 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
300 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
301 }
302
303 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj());
304
305 monitor->reenter(recursion, THREAD);
306 }
307 // -----------------------------------------------------------------------------
308 // JNI locks on java objects
309 // NOTE: must use heavy weight monitor to handle jni monitor enter
310 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) { // possible entry from jni enter
311 // the current locking is from JNI instead of Java code
312 TEVENT (jni_enter) ;
313 if (UseBiasedLocking) {
314 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
315 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
316 }
317 THREAD->set_current_pending_monitor_is_from_java(false);
318 ObjectSynchronizer::inflate(THREAD, obj())->enter(THREAD);
319 THREAD->set_current_pending_monitor_is_from_java(true);
320 }
321
322 // NOTE: must use heavy weight monitor to handle jni monitor enter
323 bool ObjectSynchronizer::jni_try_enter(Handle obj, Thread* THREAD) {
324 if (UseBiasedLocking) {
325 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
1293
1294 // fetch the displaced mark from the owner's stack.
1295 // The owner can't die or unwind past the lock while our INFLATING
1296 // object is in the mark. Furthermore the owner can't complete
1297 // an unlock on the object, either.
1298 markOop dmw = mark->displaced_mark_helper() ;
1299 assert (dmw->is_neutral(), "invariant") ;
1300
1301 // Setup monitor fields to proper values -- prepare the monitor
1302 m->set_header(dmw) ;
1303
1304 // Optimization: if the mark->locker stack address is associated
1305 // with this thread we could simply set m->_owner = Self and
1306 // m->OwnerIsThread = 1. Note that a thread can inflate an object
1307 // that it has stack-locked -- as might happen in wait() -- directly
1308 // with CAS. That is, we can avoid the xchg-NULL .... ST idiom.
1309 m->set_owner(mark->locker());
1310 m->set_object(object);
1311 // TODO-FIXME: assert BasicLock->dhw != 0.
1312
1313 // Must preserve store ordering. The monitor state must
1314 // be stable at the time of publishing the monitor address.
1315 guarantee (object->mark() == markOopDesc::INFLATING(), "invariant") ;
1316 object->release_set_mark(markOopDesc::encode(m));
1317
1318 // Hopefully the performance counters are allocated on distinct cache lines
1319 // to avoid false sharing on MP systems ...
1320 if (ObjectMonitor::_sync_Inflations != NULL) ObjectMonitor::_sync_Inflations->inc() ;
1321 TEVENT(Inflate: overwrite stacklock) ;
1322 if (TraceMonitorInflation) {
1323 if (object->is_instance()) {
1324 ResourceMark rm;
1325 tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
1326 (void *) object, (intptr_t) object->mark(),
1327 object->klass()->external_name());
1328 }
1329 }
1330 return m ;
1331 }
1332
1333 // CASE: neutral
1334 // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
1335 // If we know we're inflating for entry it's better to inflate by swinging a
1336 // pre-locked objectMonitor pointer into the object header. A successful
1337 // CAS inflates the object *and* confers ownership to the inflating thread.
1338 // In the current implementation we use a 2-step mechanism where we CAS()
1339 // to inflate and then CAS() again to try to swing _owner from NULL to Self.
1340 // An inflateTry() method that we could call from fast_enter() and slow_enter()
1341 // would be useful.
1342
1343 assert (mark->is_neutral(), "invariant");
1344 ObjectMonitor * m = omAlloc (Self) ;
1345 // prepare m for installation - set monitor to initial state
1346 m->Recycle();
1347 m->set_header(mark);
1348 m->set_owner(NULL);
1349 m->set_object(object);
1350 m->OwnerIsThread = 1 ;
1351 m->_recursions = 0 ;
1352 m->_Responsible = NULL ;
1353 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit ; // consider: keep metastats by type/class
1354
1355 if (Atomic::cmpxchg_ptr (markOopDesc::encode(m), object->mark_addr(), mark) != mark) {
1356 m->set_object (NULL) ;
1357 m->set_owner (NULL) ;
1358 m->OwnerIsThread = 0 ;
1359 m->Recycle() ;
1360 omRelease (Self, m, true) ;
1361 m = NULL ;
1362 continue ;
1363 // interference - the markword changed - just retry.
1364 // The state-transitions are one-way, so there's no chance of
1365 // live-lock -- "Inflated" is an absorbing state.
1366 }
1367
1368 // Hopefully the performance counters are allocated on distinct
1369 // cache lines to avoid false sharing on MP systems ...
1370 if (ObjectMonitor::_sync_Inflations != NULL) ObjectMonitor::_sync_Inflations->inc() ;
1371 TEVENT(Inflate: overwrite neutral) ;
1372 if (TraceMonitorInflation) {
1373 if (object->is_instance()) {
1374 ResourceMark rm;
1375 tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
1376 (void *) object, (intptr_t) object->mark(),
1377 object->klass()->external_name());
1378 }
1379 }
1380 return m ;
1381 }
1382 }
1383
1384 // Note that we could encounter some performance loss through false-sharing as
1385 // multiple locks occupy the same $ line. Padding might be appropriate.
1386
1387
1388 // Deflate_idle_monitors() is called at all safepoints, immediately
1389 // after all mutators are stopped, but before any objects have moved.
1390 // It traverses the list of known monitors, deflating where possible.
1391 // The scavenged monitor are returned to the monitor free list.
1392 //
1393 // Beware that we scavenge at *every* stop-the-world point.
1394 // Having a large number of monitors in-circulation negatively
1395 // impacts the performance of some applications (e.g., PointBase).
1396 // Broadly, we want to minimize the # of monitors in circulation.
1397 //
1398 // We have added a flag, MonitorInUseLists, which creates a list
1399 // of active monitors for each thread. deflate_idle_monitors()
1427 // Normal case ... The monitor is associated with obj.
1428 guarantee (obj->mark() == markOopDesc::encode(mid), "invariant") ;
1429 guarantee (mid == obj->mark()->monitor(), "invariant");
1430 guarantee (mid->header()->is_neutral(), "invariant");
1431
1432 if (mid->is_busy()) {
1433 if (ClearResponsibleAtSTW) mid->_Responsible = NULL ;
1434 deflated = false;
1435 } else {
1436 // Deflate the monitor if it is no longer being used
1437 // It's idle - scavenge and return to the global free list
1438 // plain old deflation ...
1439 TEVENT (deflate_idle_monitors - scavenge1) ;
1440 if (TraceMonitorInflation) {
1441 if (obj->is_instance()) {
1442 ResourceMark rm;
1443 tty->print_cr("Deflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
1444 (void *) obj, (intptr_t) obj->mark(), obj->klass()->external_name());
1445 }
1446 }
1447
1448 // Restore the header back to obj
1449 obj->release_set_mark(mid->header());
1450 mid->clear();
1451
1452 assert (mid->object() == NULL, "invariant") ;
1453
1454 // Move the object to the working free list defined by FreeHead,FreeTail.
1455 if (*FreeHeadp == NULL) *FreeHeadp = mid;
1456 if (*FreeTailp != NULL) {
1457 ObjectMonitor * prevtail = *FreeTailp;
1458 assert(prevtail->FreeNext == NULL, "cleaned up deflated?"); // TODO KK
1459 prevtail->FreeNext = mid;
1460 }
1461 *FreeTailp = mid;
1462 deflated = true;
1463 }
1464 return deflated;
1465 }
1466
1588
1589 // TODO: Add objectMonitor leak detection.
1590 // Audit/inventory the objectMonitors -- make sure they're all accounted for.
1591 GVars.stwRandom = os::random() ;
1592 GVars.stwCycle ++ ;
1593 }
1594
1595 // Monitor cleanup on JavaThread::exit
1596
1597 // Iterate through monitor cache and attempt to release thread's monitors
1598 // Gives up on a particular monitor if an exception occurs, but continues
1599 // the overall iteration, swallowing the exception.
1600 class ReleaseJavaMonitorsClosure: public MonitorClosure {
1601 private:
1602 TRAPS;
1603
1604 public:
1605 ReleaseJavaMonitorsClosure(Thread* thread) : THREAD(thread) {}
1606 void do_monitor(ObjectMonitor* mid) {
1607 if (mid->owner() == THREAD) {
1608 (void)mid->complete_exit(CHECK);
1609 }
1610 }
1611 };
1612
1613 // Release all inflated monitors owned by THREAD. Lightweight monitors are
1614 // ignored. This is meant to be called during JNI thread detach which assumes
1615 // all remaining monitors are heavyweight. All exceptions are swallowed.
1616 // Scanning the extant monitor list can be time consuming.
1617 // A simple optimization is to add a per-thread flag that indicates a thread
1618 // called jni_monitorenter() during its lifetime.
1619 //
1620 // Instead of No_Savepoint_Verifier it might be cheaper to
1621 // use an idiom of the form:
1622 // auto int tmp = SafepointSynchronize::_safepoint_counter ;
1623 // <code that must not run at safepoint>
1624 // guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ;
1625 // Since the tests are extremely cheap we could leave them enabled
1626 // for normal product builds.
1627
1628 void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) {
|
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/vmSymbols.hpp"
27 #include "memory/resourceArea.hpp"
28 #include "oops/markOop.hpp"
29 #include "oops/oop.inline.hpp"
30 #include "runtime/biasedLocking.hpp"
31 #include "runtime/handles.inline.hpp"
32 #include "runtime/interfaceSupport.hpp"
33 #include "runtime/mutexLocker.hpp"
34 #include "runtime/objectMonitor.hpp"
35 #include "runtime/objectMonitor.inline.hpp"
36 #include "runtime/osThread.hpp"
37 #include "runtime/stubRoutines.hpp"
38 #include "runtime/synchronizer.hpp"
39 #include "runtime/thread.inline.hpp"
40 #include "utilities/dtrace.hpp"
41 #include "utilities/events.hpp"
42 #include "utilities/preserveException.hpp"
43 #include "evtrace/traceEvents.hpp"
44 #ifdef TARGET_OS_FAMILY_linux
45 # include "os_linux.inline.hpp"
46 #endif
47 #ifdef TARGET_OS_FAMILY_solaris
48 # include "os_solaris.inline.hpp"
49 #endif
50 #ifdef TARGET_OS_FAMILY_windows
51 # include "os_windows.inline.hpp"
52 #endif
53 #ifdef TARGET_OS_FAMILY_bsd
54 # include "os_bsd.inline.hpp"
55 #endif
56
57 #if defined(__GNUC__) && !defined(PPC64)
58 // Need to inhibit inlining for older versions of GCC to avoid build-time failures
59 #define ATTR __attribute__((noinline))
60 #else
61 #define ATTR
62 #endif
63
264 // This routine is used to handle interpreter/compiler slow case
265 // We don't need to use fast path here, because it must have
266 // failed in the interpreter/compiler code. Simply use the heavy
267 // weight monitor should be ok, unless someone find otherwise.
268 void ObjectSynchronizer::slow_exit(oop object, BasicLock* lock, TRAPS) {
269 fast_exit (object, lock, THREAD) ;
270 }
271
272 // -----------------------------------------------------------------------------
273 // Class Loader support to workaround deadlocks on the class loader lock objects
274 // Also used by GC
275 // complete_exit()/reenter() are used to wait on a nested lock
276 // i.e. to give up an outer lock completely and then re-enter
277 // Used when holding nested locks - lock acquisition order: lock1 then lock2
278 // 1) complete_exit lock1 - saving recursion count
279 // 2) wait on lock2
280 // 3) when notified on lock2, unlock lock2
281 // 4) reenter lock1 with original recursion count
282 // 5) lock lock2
283 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
284 void ObjectSynchronizer::complete_exit(Handle obj, intptr_t *saved_recursions, intptr_t *saved_trace_exit_stack, TRAPS) {
285 TEVENT (complete_exit) ;
286 if (UseBiasedLocking) {
287 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
288 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
289 }
290
291 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj());
292
293 monitor->complete_exit(saved_recursions, saved_trace_exit_stack, THREAD);
294 }
295
296 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
297 void ObjectSynchronizer::reenter(Handle obj, intptr_t saved_recursions, intptr_t saved_trace_exit_stack, TRAPS) {
298 TEVENT (reenter) ;
299 if (UseBiasedLocking) {
300 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
301 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
302 }
303
304 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj());
305
306 monitor->reenter(saved_recursions, saved_trace_exit_stack, THREAD);
307 }
308 // -----------------------------------------------------------------------------
309 // JNI locks on java objects
310 // NOTE: must use heavy weight monitor to handle jni monitor enter
311 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) { // possible entry from jni enter
312 // the current locking is from JNI instead of Java code
313 TEVENT (jni_enter) ;
314 if (UseBiasedLocking) {
315 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
316 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
317 }
318 THREAD->set_current_pending_monitor_is_from_java(false);
319 ObjectSynchronizer::inflate(THREAD, obj())->enter(THREAD);
320 THREAD->set_current_pending_monitor_is_from_java(true);
321 }
322
323 // NOTE: must use heavy weight monitor to handle jni monitor enter
324 bool ObjectSynchronizer::jni_try_enter(Handle obj, Thread* THREAD) {
325 if (UseBiasedLocking) {
326 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
1294
1295 // fetch the displaced mark from the owner's stack.
1296 // The owner can't die or unwind past the lock while our INFLATING
1297 // object is in the mark. Furthermore the owner can't complete
1298 // an unlock on the object, either.
1299 markOop dmw = mark->displaced_mark_helper() ;
1300 assert (dmw->is_neutral(), "invariant") ;
1301
1302 // Setup monitor fields to proper values -- prepare the monitor
1303 m->set_header(dmw) ;
1304
1305 // Optimization: if the mark->locker stack address is associated
1306 // with this thread we could simply set m->_owner = Self and
1307 // m->OwnerIsThread = 1. Note that a thread can inflate an object
1308 // that it has stack-locked -- as might happen in wait() -- directly
1309 // with CAS. That is, we can avoid the xchg-NULL .... ST idiom.
1310 m->set_owner(mark->locker());
1311 m->set_object(object);
1312 // TODO-FIXME: assert BasicLock->dhw != 0.
1313
1314 // must get a sequence number before the monitor is published below
1315 No_Safepoint_Verifier nsv(true, false);
1316 intptr_t trace_seq;
1317 if (EnableEventTracing) {
1318 trace_seq = m->next_trace_seq();
1319 }
1320
1321 // Must preserve store ordering. The monitor state must
1322 // be stable at the time of publishing the monitor address.
1323 guarantee (object->mark() == markOopDesc::INFLATING(), "invariant") ;
1324 object->release_set_mark(markOopDesc::encode(m));
1325
1326 // Hopefully the performance counters are allocated on distinct cache lines
1327 // to avoid false sharing on MP systems ...
1328 if (ObjectMonitor::_sync_Inflations != NULL) ObjectMonitor::_sync_Inflations->inc() ;
1329 TEVENT(Inflate: overwrite stacklock) ;
1330 if (TraceMonitorInflation) {
1331 if (object->is_instance()) {
1332 ResourceMark rm;
1333 tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
1334 (void *) object, (intptr_t) object->mark(),
1335 object->klass()->external_name());
1336 }
1337 }
1338 if (EnableEventTracing) {
1339 TraceEvents::write_monitor_inflate(m, trace_seq);
1340 }
1341 return m ;
1342 }
1343
1344 // CASE: neutral
1345 // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
1346 // If we know we're inflating for entry it's better to inflate by swinging a
1347 // pre-locked objectMonitor pointer into the object header. A successful
1348 // CAS inflates the object *and* confers ownership to the inflating thread.
1349 // In the current implementation we use a 2-step mechanism where we CAS()
1350 // to inflate and then CAS() again to try to swing _owner from NULL to Self.
1351 // An inflateTry() method that we could call from fast_enter() and slow_enter()
1352 // would be useful.
1353
1354 assert (mark->is_neutral(), "invariant");
1355 ObjectMonitor * m = omAlloc (Self) ;
1356 // prepare m for installation - set monitor to initial state
1357 m->Recycle();
1358 m->set_header(mark);
1359 m->set_owner(NULL);
1360 m->set_object(object);
1361 m->OwnerIsThread = 1 ;
1362 m->_recursions = 0 ;
1363 m->_Responsible = NULL ;
1364 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit ; // consider: keep metastats by type/class
1365
1366 // must get a sequence number before the monitor is published below
1367 No_Safepoint_Verifier nsv(true, false);
1368 intptr_t trace_seq;
1369 if (EnableEventTracing) {
1370 trace_seq = m->next_trace_seq();
1371 }
1372
1373 if (Atomic::cmpxchg_ptr (markOopDesc::encode(m), object->mark_addr(), mark) != mark) {
1374 m->set_object (NULL) ;
1375 m->set_owner (NULL) ;
1376 m->OwnerIsThread = 0 ;
1377 m->Recycle() ;
1378 omRelease (Self, m, true) ;
1379
1380 if (EnableEventTracing) { // must still consume our sequence number
1381 TraceEvents::write_monitor_dummy(m, trace_seq);
1382 }
1383
1384 m = NULL ;
1385 continue ;
1386 // interference - the markword changed - just retry.
1387 // The state-transitions are one-way, so there's no chance of
1388 // live-lock -- "Inflated" is an absorbing state.
1389 }
1390
1391 // Hopefully the performance counters are allocated on distinct
1392 // cache lines to avoid false sharing on MP systems ...
1393 if (ObjectMonitor::_sync_Inflations != NULL) ObjectMonitor::_sync_Inflations->inc() ;
1394 TEVENT(Inflate: overwrite neutral) ;
1395 if (TraceMonitorInflation) {
1396 if (object->is_instance()) {
1397 ResourceMark rm;
1398 tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
1399 (void *) object, (intptr_t) object->mark(),
1400 object->klass()->external_name());
1401 }
1402 }
1403 if (EnableEventTracing) {
1404 TraceEvents::write_monitor_inflate(m, trace_seq);
1405 }
1406 return m ;
1407 }
1408 }
1409
1410 // Note that we could encounter some performance loss through false-sharing as
1411 // multiple locks occupy the same $ line. Padding might be appropriate.
1412
1413
1414 // Deflate_idle_monitors() is called at all safepoints, immediately
1415 // after all mutators are stopped, but before any objects have moved.
1416 // It traverses the list of known monitors, deflating where possible.
1417 // The scavenged monitor are returned to the monitor free list.
1418 //
1419 // Beware that we scavenge at *every* stop-the-world point.
1420 // Having a large number of monitors in-circulation negatively
1421 // impacts the performance of some applications (e.g., PointBase).
1422 // Broadly, we want to minimize the # of monitors in circulation.
1423 //
1424 // We have added a flag, MonitorInUseLists, which creates a list
1425 // of active monitors for each thread. deflate_idle_monitors()
1453 // Normal case ... The monitor is associated with obj.
1454 guarantee (obj->mark() == markOopDesc::encode(mid), "invariant") ;
1455 guarantee (mid == obj->mark()->monitor(), "invariant");
1456 guarantee (mid->header()->is_neutral(), "invariant");
1457
1458 if (mid->is_busy()) {
1459 if (ClearResponsibleAtSTW) mid->_Responsible = NULL ;
1460 deflated = false;
1461 } else {
1462 // Deflate the monitor if it is no longer being used
1463 // It's idle - scavenge and return to the global free list
1464 // plain old deflation ...
1465 TEVENT (deflate_idle_monitors - scavenge1) ;
1466 if (TraceMonitorInflation) {
1467 if (obj->is_instance()) {
1468 ResourceMark rm;
1469 tty->print_cr("Deflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
1470 (void *) obj, (intptr_t) obj->mark(), obj->klass()->external_name());
1471 }
1472 }
1473 if (EnableEventTracing) {
1474 TraceEvents::write_monitor_deflate(mid);
1475 }
1476
1477 // Restore the header back to obj
1478 obj->release_set_mark(mid->header());
1479 mid->clear();
1480
1481 assert (mid->object() == NULL, "invariant") ;
1482
1483 // Move the object to the working free list defined by FreeHead,FreeTail.
1484 if (*FreeHeadp == NULL) *FreeHeadp = mid;
1485 if (*FreeTailp != NULL) {
1486 ObjectMonitor * prevtail = *FreeTailp;
1487 assert(prevtail->FreeNext == NULL, "cleaned up deflated?"); // TODO KK
1488 prevtail->FreeNext = mid;
1489 }
1490 *FreeTailp = mid;
1491 deflated = true;
1492 }
1493 return deflated;
1494 }
1495
1617
1618 // TODO: Add objectMonitor leak detection.
1619 // Audit/inventory the objectMonitors -- make sure they're all accounted for.
1620 GVars.stwRandom = os::random() ;
1621 GVars.stwCycle ++ ;
1622 }
1623
1624 // Monitor cleanup on JavaThread::exit
1625
1626 // Iterate through monitor cache and attempt to release thread's monitors
1627 // Gives up on a particular monitor if an exception occurs, but continues
1628 // the overall iteration, swallowing the exception.
1629 class ReleaseJavaMonitorsClosure: public MonitorClosure {
1630 private:
1631 TRAPS;
1632
1633 public:
1634 ReleaseJavaMonitorsClosure(Thread* thread) : THREAD(thread) {}
1635 void do_monitor(ObjectMonitor* mid) {
1636 if (mid->owner() == THREAD) {
1637 mid->complete_exit(NULL, NULL, CHECK);
1638 }
1639 }
1640 };
1641
1642 // Release all inflated monitors owned by THREAD. Lightweight monitors are
1643 // ignored. This is meant to be called during JNI thread detach which assumes
1644 // all remaining monitors are heavyweight. All exceptions are swallowed.
1645 // Scanning the extant monitor list can be time consuming.
1646 // A simple optimization is to add a per-thread flag that indicates a thread
1647 // called jni_monitorenter() during its lifetime.
1648 //
1649 // Instead of No_Savepoint_Verifier it might be cheaper to
1650 // use an idiom of the form:
1651 // auto int tmp = SafepointSynchronize::_safepoint_counter ;
1652 // <code that must not run at safepoint>
1653 // guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ;
1654 // Since the tests are extremely cheap we could leave them enabled
1655 // for normal product builds.
1656
1657 void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) {
|