39 inline volatile markOop* ObjectMonitor::header_addr() {
40 assert((intptr_t)this == (intptr_t)&_header, "sync code expects this");
41 return &_header;
42 }
43
44 inline void ObjectMonitor::set_header(markOop hdr) {
45 _header = hdr;
46 }
47
48 inline jint ObjectMonitor::waiters() const {
49 return _waiters;
50 }
51
52 // Returns NULL if DEFLATER_MARKER is observed.
53 inline void* ObjectMonitor::owner() const {
54 void* owner = _owner;
55 return owner != DEFLATER_MARKER ? owner : NULL;
56 }
57
58 inline void ObjectMonitor::clear() {
59 assert(_contentions == 0, "must be 0: contentions=%d", _contentions);
60 assert(_owner == NULL, "must be NULL: owner=" INTPTR_FORMAT, p2i(_owner));
61
62 clear_using_JT();
63 }
64
65 inline void ObjectMonitor::clear_using_JT() {
66 // When clearing using a JavaThread, we leave _owner == DEFLATER_MARKER
67 // and _contentions < 0 to force any racing threads to retry. Unlike other
68 // *_using_JT() functions, we cannot assert AsyncDeflateIdleMonitors
69 // or Thread::current()->is_Java_thread() because clear() calls this
70 // function for the rest of its checks.
71
72 assert(_header != NULL, "must be non-NULL");
73 assert(_waiters == 0, "must be 0: waiters=%d", _waiters);
74 assert(_recursions == 0, "must be 0: recursions=" INTPTR_FORMAT, _recursions);
75 assert(_object != NULL, "must be non-NULL");
76 // Do not assert _ref_count == 0 here because a racing thread could
77 // increment _ref_count, observe _owner == DEFLATER_MARKER and then
78 // decrement _ref_count.
79
80 set_allocation_state(Free);
81 _header = NULL;
82 _object = NULL;
83 // Do not clear _ref_count here because _ref_count is for indicating
84 // that the ObjectMonitor* is in use which is orthogonal to whether
85 // the ObjectMonitor itself is in use for a locking operation.
86 }
87
88 inline void* ObjectMonitor::object() const {
89 return _object;
90 }
91
92 inline void* ObjectMonitor::object_addr() {
93 return (void *)(&_object);
94 }
95
96 inline void ObjectMonitor::set_object(void* obj) {
97 _object = obj;
98 }
99
100 inline bool ObjectMonitor::check(TRAPS) {
101 if (THREAD != _owner) {
131 return _allocation_state;
132 }
133
134 inline bool ObjectMonitor::is_free() const {
135 return _allocation_state == Free;
136 }
137
138 inline bool ObjectMonitor::is_active() const {
139 return !is_free();
140 }
141
142 inline bool ObjectMonitor::is_old() const {
143 return _allocation_state == Old;
144 }
145
146 inline bool ObjectMonitor::is_new() const {
147 return _allocation_state == New;
148 }
149
150 inline void ObjectMonitor::dec_ref_count() {
151 // The decrement needs to be MO_ACQ_REL. At the moment, the Atomic::dec
152 // backend on PPC does not yet conform to these requirements. Therefore
153 // the decrement is simulated with an Atomic::sub(1, &addr). Without
154 // this MO_ACQ_REL Atomic::dec simulation, AsyncDeflateIdleMonitors is
155 // not safe.
156 Atomic::sub((jint)1, &_ref_count);
157 guarantee(_ref_count >= 0, "sanity check: ref_count=%d", _ref_count);
158 }
159
160 inline void ObjectMonitor::inc_ref_count() {
161 // The increment needs to be MO_SEQ_CST. At the moment, the Atomic::inc
162 // backend on PPC does not yet conform to these requirements. Therefore
163 // the increment is simulated with a load phi; cas phi + 1; loop.
164 // Without this MO_SEQ_CST Atomic::inc simulation, AsyncDeflateIdleMonitors
165 // is not safe.
166 for (;;) {
167 jint sample = OrderAccess::load_acquire(&_ref_count);
168 guarantee(sample >= 0, "sanity check: sample=%d", (int)sample);
169 if (Atomic::cmpxchg(sample + 1, &_ref_count, sample) == sample) {
170 // Incremented _ref_count without interference.
171 return;
172 }
173 // Implied else: Saw interference so loop and try again.
174 }
175 }
176
177 inline jint ObjectMonitor::ref_count() const {
178 return OrderAccess::load_acquire(&_ref_count);
179 }
180
181 #endif // SHARE_RUNTIME_OBJECTMONITOR_INLINE_HPP
|
39 inline volatile markOop* ObjectMonitor::header_addr() {
40 assert((intptr_t)this == (intptr_t)&_header, "sync code expects this");
41 return &_header;
42 }
43
44 inline void ObjectMonitor::set_header(markOop hdr) {
45 _header = hdr;
46 }
47
48 inline jint ObjectMonitor::waiters() const {
49 return _waiters;
50 }
51
52 // Returns NULL if DEFLATER_MARKER is observed.
53 inline void* ObjectMonitor::owner() const {
54 void* owner = _owner;
55 return owner != DEFLATER_MARKER ? owner : NULL;
56 }
57
58 inline void ObjectMonitor::clear() {
59 assert(_header != NULL, "must be non-NULL");
60 assert(_contentions == 0, "must be 0: contentions=%d", _contentions);
61 assert(_owner == NULL, "must be NULL: owner=" INTPTR_FORMAT, p2i(_owner));
62
63 _header = NULL;
64
65 clear_using_JT();
66 }
67
68 inline void ObjectMonitor::clear_using_JT() {
69 // Unlike other *_using_JT() functions, we cannot assert
70 // AsyncDeflateIdleMonitors or Thread::current()->is_Java_thread()
71 // because clear() calls this function for the rest of its checks.
72
73 if (AsyncDeflateIdleMonitors) {
74 // Async deflation protocol uses the _header, _contentions and _owner
75 // fields. While the ObjectMonitor being deflated is on the global free
76 // list, we leave those three fields alone; _owner == DEFLATER_MARKER
77 // and _contentions < 0 will force any racing threads to retry. The
78 // _header field is used by install_displaced_markword_in_object()
79 // in the last part of the deflation protocol so we cannot check
80 // its values here.
81 guarantee(_owner == NULL || _owner == DEFLATER_MARKER,
82 "must be NULL or DEFLATER_MARKER: owner=" INTPTR_FORMAT,
83 p2i(_owner));
84 guarantee(_contentions <= 0, "must be <= 0: contentions=%d", _contentions);
85 }
86 assert(_waiters == 0, "must be 0: waiters=%d", _waiters);
87 assert(_recursions == 0, "must be 0: recursions=" INTPTR_FORMAT, _recursions);
88 assert(_object != NULL, "must be non-NULL");
89 // Do not assert _ref_count == 0 here because a racing thread could
90 // increment _ref_count, observe _owner == DEFLATER_MARKER and then
91 // decrement _ref_count.
92
93 set_allocation_state(Free);
94 _object = NULL;
95 // Do not clear _ref_count here because _ref_count is for indicating
96 // that the ObjectMonitor* is in use which is orthogonal to whether
97 // the ObjectMonitor itself is in use for a locking operation.
98 }
99
100 inline void* ObjectMonitor::object() const {
101 return _object;
102 }
103
104 inline void* ObjectMonitor::object_addr() {
105 return (void *)(&_object);
106 }
107
108 inline void ObjectMonitor::set_object(void* obj) {
109 _object = obj;
110 }
111
112 inline bool ObjectMonitor::check(TRAPS) {
113 if (THREAD != _owner) {
143 return _allocation_state;
144 }
145
146 inline bool ObjectMonitor::is_free() const {
147 return _allocation_state == Free;
148 }
149
150 inline bool ObjectMonitor::is_active() const {
151 return !is_free();
152 }
153
154 inline bool ObjectMonitor::is_old() const {
155 return _allocation_state == Old;
156 }
157
158 inline bool ObjectMonitor::is_new() const {
159 return _allocation_state == New;
160 }
161
162 inline void ObjectMonitor::dec_ref_count() {
163 // The decrement only needs to be MO_ACQ_REL since the reference
164 // counter is volatile.
165 Atomic::dec(&_ref_count);
166 guarantee(_ref_count >= 0, "sanity check: ref_count=%d", _ref_count);
167 }
168
169 inline void ObjectMonitor::inc_ref_count() {
170 // The increment needs to be MO_SEQ_CST so that the reference
171 // counter update is seen as soon as possible in a race with the
172 // async deflation protocol.
173 Atomic::inc(&_ref_count);
174 guarantee(_ref_count > 0, "sanity check: ref_count=%d", _ref_count);
175 }
176
177 inline jint ObjectMonitor::ref_count() const {
178 return OrderAccess::load_acquire(&_ref_count);
179 }
180
181 #endif // SHARE_RUNTIME_OBJECTMONITOR_INLINE_HPP
|