25 #include "precompiled.hpp"
26 #include "classfile/vmSymbols.hpp"
27 #include "logging/log.hpp"
28 #include "memory/metaspaceShared.hpp"
29 #include "memory/padded.hpp"
30 #include "memory/resourceArea.hpp"
31 #include "oops/markOop.hpp"
32 #include "oops/oop.inline.hpp"
33 #include "runtime/atomic.hpp"
34 #include "runtime/biasedLocking.hpp"
35 #include "runtime/handles.inline.hpp"
36 #include "runtime/interfaceSupport.hpp"
37 #include "runtime/mutexLocker.hpp"
38 #include "runtime/objectMonitor.hpp"
39 #include "runtime/objectMonitor.inline.hpp"
40 #include "runtime/osThread.hpp"
41 #include "runtime/stubRoutines.hpp"
42 #include "runtime/synchronizer.hpp"
43 #include "runtime/thread.inline.hpp"
44 #include "runtime/vframe.hpp"
45 #include "trace/traceMacros.hpp"
46 #include "trace/tracing.hpp"
47 #include "utilities/dtrace.hpp"
48 #include "utilities/events.hpp"
49 #include "utilities/preserveException.hpp"
50
51 // The "core" versions of monitor enter and exit reside in this file.
52 // The interpreter and compilers contain specialized transliterated
53 // variants of the enter-exit fast-path operations. See i486.ad fast_lock(),
54 // for instance. If you make changes here, make sure to modify the
55 // interpreter, and both C1 and C2 fast-path inline locking code emission.
56 //
57 // -----------------------------------------------------------------------------
58
59 #ifdef DTRACE_ENABLED
60
61 // Only bother with this argument setup if dtrace is available
62 // TODO-FIXME: probes should not fire when caller is _blocked. assert() accordingly.
63
64 #define DTRACE_MONITOR_PROBE_COMMON(obj, thread) \
65 char* bytes = NULL; \
66 int len = 0; \
67 jlong jtid = SharedRuntime::get_java_tid(thread); \
68 Symbol* klassname = ((oop)(obj))->klass()->name(); \
69 if (klassname != NULL) { \
108
109 #define NINFLATIONLOCKS 256
110 static volatile intptr_t gInflationLocks[NINFLATIONLOCKS];
111
112 // global list of blocks of monitors
113 // gBlockList is really PaddedEnd<ObjectMonitor> *, but we don't
114 // want to expose the PaddedEnd template more than necessary.
115 ObjectMonitor * volatile ObjectSynchronizer::gBlockList = NULL;
116 // global monitor free list
117 ObjectMonitor * volatile ObjectSynchronizer::gFreeList = NULL;
118 // global monitor in-use list, for moribund threads,
119 // monitors they inflated need to be scanned for deflation
120 ObjectMonitor * volatile ObjectSynchronizer::gOmInUseList = NULL;
121 // count of entries in gOmInUseList
122 int ObjectSynchronizer::gOmInUseCount = 0;
123
124 static volatile intptr_t gListLock = 0; // protects global monitor lists
125 static volatile int gMonitorFreeCount = 0; // # on gFreeList
126 static volatile int gMonitorPopulation = 0; // # Extant -- in circulation
127
128 static void post_monitor_inflate_event(EventJavaMonitorInflate&,
129 const oop,
130 const ObjectSynchronizer::InflateCause);
131
132 #define CHAINMARKER (cast_to_oop<intptr_t>(-1))
133
134
135 // =====================> Quick functions
136
137 // The quick_* forms are special fast-path variants used to improve
138 // performance. In the simplest case, a "quick_*" implementation could
139 // simply return false, in which case the caller will perform the necessary
140 // state transitions and call the slow-path form.
141 // The fast-path is designed to handle frequently arising cases in an efficient
142 // manner and is just a degenerate "optimistic" variant of the slow-path.
143 // returns true -- to indicate the call was satisfied.
144 // returns false -- to indicate the call needs the services of the slow-path.
145 // A no-loitering ordinance is in effect for code in the quick_* family
146 // operators: safepoints or indefinite blocking (blocking that might span a
147 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
148 // entry.
149 //
150 // Consider: An interesting optimization is to have the JIT recognize the
151 // following common idiom:
1363 gOmInUseCount += inUseTally;
1364 }
1365
1366 Thread::muxRelease(&gListLock);
1367 TEVENT(omFlush);
1368 }
1369
1370 // Fast path code shared by multiple functions
1371 ObjectMonitor* ObjectSynchronizer::inflate_helper(oop obj) {
1372 markOop mark = obj->mark();
1373 if (mark->has_monitor()) {
1374 assert(ObjectSynchronizer::verify_objmon_isinpool(mark->monitor()), "monitor is invalid");
1375 assert(mark->monitor()->header()->is_neutral(), "monitor must record a good object header");
1376 return mark->monitor();
1377 }
1378 return ObjectSynchronizer::inflate(Thread::current(),
1379 obj,
1380 inflate_cause_vm_internal);
1381 }
1382
1383 ObjectMonitor* ObjectSynchronizer::inflate(Thread * Self,
1384 oop object,
1385 const InflateCause cause) {
1386
1387 // Inflate mutates the heap ...
1388 // Relaxing assertion for bug 6320749.
1389 assert(Universe::verify_in_progress() ||
1390 !SafepointSynchronize::is_at_safepoint(), "invariant");
1391
1392 EventJavaMonitorInflate event;
1393
1394 for (;;) {
1395 const markOop mark = object->mark();
1396 assert(!mark->has_bias_pattern(), "invariant");
1397
1398 // The mark can be in one of the following states:
1399 // * Inflated - just return
1400 // * Stack-locked - coerce it to inflated
1401 // * INFLATING - busy wait for conversion to complete
1402 // * Neutral - aggressively inflate the object.
1403 // * BIASED - Illegal. We should never see this
1404
1405 // CASE: inflated
1406 if (mark->has_monitor()) {
1407 ObjectMonitor * inf = mark->monitor();
1408 assert(inf->header()->is_neutral(), "invariant");
1409 assert(inf->object() == object, "invariant");
1410 assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid");
1411 event.cancel(); // let's not post an inflation event, unless we did the deed ourselves
1412 return inf;
1413 }
1414
1415 // CASE: inflation in progress - inflating over a stack-lock.
1416 // Some other thread is converting from stack-locked to inflated.
1417 // Only that thread can complete inflation -- other threads must wait.
1418 // The INFLATING value is transient.
1419 // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
1420 // We could always eliminate polling by parking the thread on some auxiliary list.
1421 if (mark == markOopDesc::INFLATING()) {
1422 TEVENT(Inflate: spin while INFLATING);
1423 ReadStableMark(object);
1424 continue;
1425 }
1426
1427 // CASE: stack-locked
1428 // Could be stack-locked either by this thread or by some other thread.
1429 //
1430 // Note that we allocate the objectmonitor speculatively, _before_ attempting
1431 // to install INFLATING into the mark word. We originally installed INFLATING,
1504 m->set_object(object);
1505 // TODO-FIXME: assert BasicLock->dhw != 0.
1506
1507 // Must preserve store ordering. The monitor state must
1508 // be stable at the time of publishing the monitor address.
1509 guarantee(object->mark() == markOopDesc::INFLATING(), "invariant");
1510 object->release_set_mark(markOopDesc::encode(m));
1511
1512 // Hopefully the performance counters are allocated on distinct cache lines
1513 // to avoid false sharing on MP systems ...
1514 OM_PERFDATA_OP(Inflations, inc());
1515 TEVENT(Inflate: overwrite stacklock);
1516 if (log_is_enabled(Debug, monitorinflation)) {
1517 if (object->is_instance()) {
1518 ResourceMark rm;
1519 log_debug(monitorinflation)("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
1520 p2i(object), p2i(object->mark()),
1521 object->klass()->external_name());
1522 }
1523 }
1524 if (event.should_commit()) {
1525 post_monitor_inflate_event(event, object, cause);
1526 }
1527 return m;
1528 }
1529
1530 // CASE: neutral
1531 // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
1532 // If we know we're inflating for entry it's better to inflate by swinging a
1533 // pre-locked objectMonitor pointer into the object header. A successful
1534 // CAS inflates the object *and* confers ownership to the inflating thread.
1535 // In the current implementation we use a 2-step mechanism where we CAS()
1536 // to inflate and then CAS() again to try to swing _owner from NULL to Self.
1537 // An inflateTry() method that we could call from fast_enter() and slow_enter()
1538 // would be useful.
1539
1540 assert(mark->is_neutral(), "invariant");
1541 ObjectMonitor * m = omAlloc(Self);
1542 // prepare m for installation - set monitor to initial state
1543 m->Recycle();
1544 m->set_header(mark);
1545 m->set_owner(NULL);
1546 m->set_object(object);
1555 omRelease(Self, m, true);
1556 m = NULL;
1557 continue;
1558 // interference - the markword changed - just retry.
1559 // The state-transitions are one-way, so there's no chance of
1560 // live-lock -- "Inflated" is an absorbing state.
1561 }
1562
1563 // Hopefully the performance counters are allocated on distinct
1564 // cache lines to avoid false sharing on MP systems ...
1565 OM_PERFDATA_OP(Inflations, inc());
1566 TEVENT(Inflate: overwrite neutral);
1567 if (log_is_enabled(Debug, monitorinflation)) {
1568 if (object->is_instance()) {
1569 ResourceMark rm;
1570 log_debug(monitorinflation)("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
1571 p2i(object), p2i(object->mark()),
1572 object->klass()->external_name());
1573 }
1574 }
1575 if (event.should_commit()) {
1576 post_monitor_inflate_event(event, object, cause);
1577 }
1578 return m;
1579 }
1580 }
1581
1582
1583 // Deflate_idle_monitors() is called at all safepoints, immediately
1584 // after all mutators are stopped, but before any objects have moved.
1585 // It traverses the list of known monitors, deflating where possible.
1586 // The scavenged monitor are returned to the monitor free list.
1587 //
1588 // Beware that we scavenge at *every* stop-the-world point.
1589 // Having a large number of monitors in-circulation negatively
1590 // impacts the performance of some applications (e.g., PointBase).
1591 // Broadly, we want to minimize the # of monitors in circulation.
1592 //
1593 // We have added a flag, MonitorInUseLists, which creates a list
1594 // of active monitors for each thread. deflate_idle_monitors()
1595 // only scans the per-thread in-use lists. omAlloc() puts all
1596 // assigned monitors on the per-thread list. deflate_idle_monitors()
1597 // returns the non-busy monitors to the global free list.
1845 ObjectSynchronizer::monitors_iterate(&rjmc);
1846 Thread::muxRelease(&gListLock);
1847 THREAD->clear_pending_exception();
1848 }
1849
1850 const char* ObjectSynchronizer::inflate_cause_name(const InflateCause cause) {
1851 switch (cause) {
1852 case inflate_cause_vm_internal: return "VM Internal";
1853 case inflate_cause_monitor_enter: return "Monitor Enter";
1854 case inflate_cause_wait: return "Monitor Wait";
1855 case inflate_cause_notify: return "Monitor Notify";
1856 case inflate_cause_hash_code: return "Monitor Hash Code";
1857 case inflate_cause_jni_enter: return "JNI Monitor Enter";
1858 case inflate_cause_jni_exit: return "JNI Monitor Exit";
1859 default:
1860 ShouldNotReachHere();
1861 }
1862 return "Unknown";
1863 }
1864
1865 static void post_monitor_inflate_event(EventJavaMonitorInflate& event,
1866 const oop obj,
1867 const ObjectSynchronizer::InflateCause cause) {
1868 #if INCLUDE_TRACE
1869 assert(event.should_commit(), "check outside");
1870 event.set_monitorClass(obj->klass());
1871 event.set_address((TYPE_ADDRESS)(uintptr_t)(void*)obj);
1872 event.set_cause((u1)cause);
1873 event.commit();
1874 #endif
1875 }
1876
1877 //------------------------------------------------------------------------------
1878 // Debugging code
1879
1880 void ObjectSynchronizer::sanity_checks(const bool verbose,
1881 const uint cache_line_size,
1882 int *error_cnt_ptr,
1883 int *warning_cnt_ptr) {
1884 u_char *addr_begin = (u_char*)&GVars;
1885 u_char *addr_stwRandom = (u_char*)&GVars.stwRandom;
1886 u_char *addr_hcSequence = (u_char*)&GVars.hcSequence;
1887
1888 if (verbose) {
1889 tty->print_cr("INFO: sizeof(SharedGlobals)=" SIZE_FORMAT,
1890 sizeof(SharedGlobals));
1891 }
1892
1893 uint offset_stwRandom = (uint)(addr_stwRandom - addr_begin);
1894 if (verbose) tty->print_cr("INFO: offset(stwRandom)=%u", offset_stwRandom);
1895
1896 uint offset_hcSequence = (uint)(addr_hcSequence - addr_begin);
|
25 #include "precompiled.hpp"
26 #include "classfile/vmSymbols.hpp"
27 #include "logging/log.hpp"
28 #include "memory/metaspaceShared.hpp"
29 #include "memory/padded.hpp"
30 #include "memory/resourceArea.hpp"
31 #include "oops/markOop.hpp"
32 #include "oops/oop.inline.hpp"
33 #include "runtime/atomic.hpp"
34 #include "runtime/biasedLocking.hpp"
35 #include "runtime/handles.inline.hpp"
36 #include "runtime/interfaceSupport.hpp"
37 #include "runtime/mutexLocker.hpp"
38 #include "runtime/objectMonitor.hpp"
39 #include "runtime/objectMonitor.inline.hpp"
40 #include "runtime/osThread.hpp"
41 #include "runtime/stubRoutines.hpp"
42 #include "runtime/synchronizer.hpp"
43 #include "runtime/thread.inline.hpp"
44 #include "runtime/vframe.hpp"
45 #include "utilities/dtrace.hpp"
46 #include "utilities/events.hpp"
47 #include "utilities/macros.hpp"
48 #include "utilities/preserveException.hpp"
49 #if INCLUDE_TRACE
50 #include "trace/tracing.hpp"
51 #endif
52
53 // The "core" versions of monitor enter and exit reside in this file.
54 // The interpreter and compilers contain specialized transliterated
55 // variants of the enter-exit fast-path operations. See i486.ad fast_lock(),
56 // for instance. If you make changes here, make sure to modify the
57 // interpreter, and both C1 and C2 fast-path inline locking code emission.
58 //
59 // -----------------------------------------------------------------------------
60
61 #ifdef DTRACE_ENABLED
62
63 // Only bother with this argument setup if dtrace is available
64 // TODO-FIXME: probes should not fire when caller is _blocked. assert() accordingly.
65
66 #define DTRACE_MONITOR_PROBE_COMMON(obj, thread) \
67 char* bytes = NULL; \
68 int len = 0; \
69 jlong jtid = SharedRuntime::get_java_tid(thread); \
70 Symbol* klassname = ((oop)(obj))->klass()->name(); \
71 if (klassname != NULL) { \
110
111 #define NINFLATIONLOCKS 256
112 static volatile intptr_t gInflationLocks[NINFLATIONLOCKS];
113
114 // global list of blocks of monitors
115 // gBlockList is really PaddedEnd<ObjectMonitor> *, but we don't
116 // want to expose the PaddedEnd template more than necessary.
117 ObjectMonitor * volatile ObjectSynchronizer::gBlockList = NULL;
118 // global monitor free list
119 ObjectMonitor * volatile ObjectSynchronizer::gFreeList = NULL;
120 // global monitor in-use list, for moribund threads,
121 // monitors they inflated need to be scanned for deflation
122 ObjectMonitor * volatile ObjectSynchronizer::gOmInUseList = NULL;
123 // count of entries in gOmInUseList
124 int ObjectSynchronizer::gOmInUseCount = 0;
125
126 static volatile intptr_t gListLock = 0; // protects global monitor lists
127 static volatile int gMonitorFreeCount = 0; // # on gFreeList
128 static volatile int gMonitorPopulation = 0; // # Extant -- in circulation
129
130 #define CHAINMARKER (cast_to_oop<intptr_t>(-1))
131
132
133 // =====================> Quick functions
134
135 // The quick_* forms are special fast-path variants used to improve
136 // performance. In the simplest case, a "quick_*" implementation could
137 // simply return false, in which case the caller will perform the necessary
138 // state transitions and call the slow-path form.
139 // The fast-path is designed to handle frequently arising cases in an efficient
140 // manner and is just a degenerate "optimistic" variant of the slow-path.
141 // returns true -- to indicate the call was satisfied.
142 // returns false -- to indicate the call needs the services of the slow-path.
143 // A no-loitering ordinance is in effect for code in the quick_* family
144 // operators: safepoints or indefinite blocking (blocking that might span a
145 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
146 // entry.
147 //
148 // Consider: An interesting optimization is to have the JIT recognize the
149 // following common idiom:
1361 gOmInUseCount += inUseTally;
1362 }
1363
1364 Thread::muxRelease(&gListLock);
1365 TEVENT(omFlush);
1366 }
1367
1368 // Fast path code shared by multiple functions
1369 ObjectMonitor* ObjectSynchronizer::inflate_helper(oop obj) {
1370 markOop mark = obj->mark();
1371 if (mark->has_monitor()) {
1372 assert(ObjectSynchronizer::verify_objmon_isinpool(mark->monitor()), "monitor is invalid");
1373 assert(mark->monitor()->header()->is_neutral(), "monitor must record a good object header");
1374 return mark->monitor();
1375 }
1376 return ObjectSynchronizer::inflate(Thread::current(),
1377 obj,
1378 inflate_cause_vm_internal);
1379 }
1380
1381 #if INCLUDE_TRACE
1382 static void post_monitor_inflate_event(EventJavaMonitorInflate* event,
1383 const oop obj,
1384 ObjectSynchronizer::InflateCause cause) {
1385 assert(event != NULL, "invariant");
1386 if (event->should_commit()) {
1387 event->set_monitorClass(obj->klass());
1388 event->set_address((TYPE_ADDRESS)(uintptr_t)(void*)obj);
1389 event->set_cause((u1)cause);
1390 event->commit();
1391 }
1392 }
1393 #endif
1394
1395 ObjectMonitor* ObjectSynchronizer::inflate(Thread * Self,
1396 oop object,
1397 const InflateCause cause) {
1398
1399 // Inflate mutates the heap ...
1400 // Relaxing assertion for bug 6320749.
1401 assert(Universe::verify_in_progress() ||
1402 !SafepointSynchronize::is_at_safepoint(), "invariant");
1403
1404 TRACE_ONLY(EventJavaMonitorInflate event;)
1405
1406 for (;;) {
1407 const markOop mark = object->mark();
1408 assert(!mark->has_bias_pattern(), "invariant");
1409
1410 // The mark can be in one of the following states:
1411 // * Inflated - just return
1412 // * Stack-locked - coerce it to inflated
1413 // * INFLATING - busy wait for conversion to complete
1414 // * Neutral - aggressively inflate the object.
1415 // * BIASED - Illegal. We should never see this
1416
1417 // CASE: inflated
1418 if (mark->has_monitor()) {
1419 ObjectMonitor * inf = mark->monitor();
1420 assert(inf->header()->is_neutral(), "invariant");
1421 assert(inf->object() == object, "invariant");
1422 assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid");
1423 return inf;
1424 }
1425
1426 // CASE: inflation in progress - inflating over a stack-lock.
1427 // Some other thread is converting from stack-locked to inflated.
1428 // Only that thread can complete inflation -- other threads must wait.
1429 // The INFLATING value is transient.
1430 // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
1431 // We could always eliminate polling by parking the thread on some auxiliary list.
1432 if (mark == markOopDesc::INFLATING()) {
1433 TEVENT(Inflate: spin while INFLATING);
1434 ReadStableMark(object);
1435 continue;
1436 }
1437
1438 // CASE: stack-locked
1439 // Could be stack-locked either by this thread or by some other thread.
1440 //
1441 // Note that we allocate the objectmonitor speculatively, _before_ attempting
1442 // to install INFLATING into the mark word. We originally installed INFLATING,
1515 m->set_object(object);
1516 // TODO-FIXME: assert BasicLock->dhw != 0.
1517
1518 // Must preserve store ordering. The monitor state must
1519 // be stable at the time of publishing the monitor address.
1520 guarantee(object->mark() == markOopDesc::INFLATING(), "invariant");
1521 object->release_set_mark(markOopDesc::encode(m));
1522
1523 // Hopefully the performance counters are allocated on distinct cache lines
1524 // to avoid false sharing on MP systems ...
1525 OM_PERFDATA_OP(Inflations, inc());
1526 TEVENT(Inflate: overwrite stacklock);
1527 if (log_is_enabled(Debug, monitorinflation)) {
1528 if (object->is_instance()) {
1529 ResourceMark rm;
1530 log_debug(monitorinflation)("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
1531 p2i(object), p2i(object->mark()),
1532 object->klass()->external_name());
1533 }
1534 }
1535 TRACE_ONLY(post_monitor_inflate_event(&event, object, cause);)
1536 return m;
1537 }
1538
1539 // CASE: neutral
1540 // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
1541 // If we know we're inflating for entry it's better to inflate by swinging a
1542 // pre-locked objectMonitor pointer into the object header. A successful
1543 // CAS inflates the object *and* confers ownership to the inflating thread.
1544 // In the current implementation we use a 2-step mechanism where we CAS()
1545 // to inflate and then CAS() again to try to swing _owner from NULL to Self.
1546 // An inflateTry() method that we could call from fast_enter() and slow_enter()
1547 // would be useful.
1548
1549 assert(mark->is_neutral(), "invariant");
1550 ObjectMonitor * m = omAlloc(Self);
1551 // prepare m for installation - set monitor to initial state
1552 m->Recycle();
1553 m->set_header(mark);
1554 m->set_owner(NULL);
1555 m->set_object(object);
1564 omRelease(Self, m, true);
1565 m = NULL;
1566 continue;
1567 // interference - the markword changed - just retry.
1568 // The state-transitions are one-way, so there's no chance of
1569 // live-lock -- "Inflated" is an absorbing state.
1570 }
1571
1572 // Hopefully the performance counters are allocated on distinct
1573 // cache lines to avoid false sharing on MP systems ...
1574 OM_PERFDATA_OP(Inflations, inc());
1575 TEVENT(Inflate: overwrite neutral);
1576 if (log_is_enabled(Debug, monitorinflation)) {
1577 if (object->is_instance()) {
1578 ResourceMark rm;
1579 log_debug(monitorinflation)("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
1580 p2i(object), p2i(object->mark()),
1581 object->klass()->external_name());
1582 }
1583 }
1584 TRACE_ONLY(post_monitor_inflate_event(&event, object, cause);)
1585 return m;
1586 }
1587 }
1588
1589
1590 // Deflate_idle_monitors() is called at all safepoints, immediately
1591 // after all mutators are stopped, but before any objects have moved.
1592 // It traverses the list of known monitors, deflating where possible.
1593 // The scavenged monitor are returned to the monitor free list.
1594 //
1595 // Beware that we scavenge at *every* stop-the-world point.
1596 // Having a large number of monitors in-circulation negatively
1597 // impacts the performance of some applications (e.g., PointBase).
1598 // Broadly, we want to minimize the # of monitors in circulation.
1599 //
1600 // We have added a flag, MonitorInUseLists, which creates a list
1601 // of active monitors for each thread. deflate_idle_monitors()
1602 // only scans the per-thread in-use lists. omAlloc() puts all
1603 // assigned monitors on the per-thread list. deflate_idle_monitors()
1604 // returns the non-busy monitors to the global free list.
1852 ObjectSynchronizer::monitors_iterate(&rjmc);
1853 Thread::muxRelease(&gListLock);
1854 THREAD->clear_pending_exception();
1855 }
1856
1857 const char* ObjectSynchronizer::inflate_cause_name(const InflateCause cause) {
1858 switch (cause) {
1859 case inflate_cause_vm_internal: return "VM Internal";
1860 case inflate_cause_monitor_enter: return "Monitor Enter";
1861 case inflate_cause_wait: return "Monitor Wait";
1862 case inflate_cause_notify: return "Monitor Notify";
1863 case inflate_cause_hash_code: return "Monitor Hash Code";
1864 case inflate_cause_jni_enter: return "JNI Monitor Enter";
1865 case inflate_cause_jni_exit: return "JNI Monitor Exit";
1866 default:
1867 ShouldNotReachHere();
1868 }
1869 return "Unknown";
1870 }
1871
1872 //------------------------------------------------------------------------------
1873 // Debugging code
1874
1875 void ObjectSynchronizer::sanity_checks(const bool verbose,
1876 const uint cache_line_size,
1877 int *error_cnt_ptr,
1878 int *warning_cnt_ptr) {
1879 u_char *addr_begin = (u_char*)&GVars;
1880 u_char *addr_stwRandom = (u_char*)&GVars.stwRandom;
1881 u_char *addr_hcSequence = (u_char*)&GVars.hcSequence;
1882
1883 if (verbose) {
1884 tty->print_cr("INFO: sizeof(SharedGlobals)=" SIZE_FORMAT,
1885 sizeof(SharedGlobals));
1886 }
1887
1888 uint offset_stwRandom = (uint)(addr_stwRandom - addr_begin);
1889 if (verbose) tty->print_cr("INFO: offset(stwRandom)=%u", offset_stwRandom);
1890
1891 uint offset_hcSequence = (uint)(addr_hcSequence - addr_begin);
|