32 return 0;
33 }
34
35 inline markOop ObjectMonitor::header() const {
36 return _header;
37 }
38
39 inline volatile markOop* ObjectMonitor::header_addr() {
40 assert((intptr_t)this == (intptr_t)&_header, "sync code expects this");
41 return &_header;
42 }
43
44 inline void ObjectMonitor::set_header(markOop hdr) {
45 _header = hdr;
46 }
47
48 inline jint ObjectMonitor::waiters() const {
49 return _waiters;
50 }
51
52 inline void* ObjectMonitor::owner() const {
53 return _owner;
54 }
55
56 inline void ObjectMonitor::clear() {
57 assert(_header != NULL, "must be non-NULL");
58 assert(_contentions == 0, "must be 0: contentions=%d", _contentions);
59 assert(_waiters == 0, "must be 0: waiters=%d", _waiters);
60 assert(_recursions == 0, "must be 0: recursions=" INTPTR_FORMAT, _recursions);
61 assert(_object != NULL, "must be non-NULL");
62 assert(_owner == NULL, "must be NULL: owner=" INTPTR_FORMAT, p2i(_owner));
63
64 _header = NULL;
65 _object = NULL;
66 }
67
68 inline void* ObjectMonitor::object() const {
69 return _object;
70 }
71
72 inline void* ObjectMonitor::object_addr() {
73 return (void *)(&_object);
74 }
75
76 inline void ObjectMonitor::set_object(void* obj) {
77 _object = obj;
78 }
79
80 inline bool ObjectMonitor::check(TRAPS) {
81 if (THREAD != _owner) {
82 if (THREAD->is_lock_owned((address) _owner)) {
83 _owner = THREAD; // regain ownership of inflated monitor
84 assert (_recursions == 0, "invariant") ;
85 } else {
86 check_slow(THREAD);
87 return false;
88 }
89 }
90 return true;
91 }
92
93 // return number of threads contending for this monitor
94 inline jint ObjectMonitor::contentions() const {
95 return _contentions;
96 }
97
98 // Do NOT set _contentions = 0. There is a race such that _contentions could
99 // be set while inflating prior to setting _owner
100 // Just use Atomic::inc/dec and assert 0 when monitor put on free list
101 inline void ObjectMonitor::set_owner(void* owner) {
102 _owner = owner;
103 _recursions = 0;
104 }
105
106 #endif // SHARE_RUNTIME_OBJECTMONITOR_INLINE_HPP
|
32 return 0;
33 }
34
35 inline markOop ObjectMonitor::header() const {
36 return _header;
37 }
38
39 inline volatile markOop* ObjectMonitor::header_addr() {
40 assert((intptr_t)this == (intptr_t)&_header, "sync code expects this");
41 return &_header;
42 }
43
44 inline void ObjectMonitor::set_header(markOop hdr) {
45 _header = hdr;
46 }
47
48 inline jint ObjectMonitor::waiters() const {
49 return _waiters;
50 }
51
52 // Returns NULL if DEFLATER_MARKER is observed.
53 inline void* ObjectMonitor::owner() const {
54 void* owner = _owner;
55 return owner != DEFLATER_MARKER ? owner : NULL;
56 }
57
58 inline void ObjectMonitor::clear() {
59 assert(_header != NULL, "must be non-NULL");
60 assert(_contentions == 0, "must be 0: contentions=%d", _contentions);
61 assert(_owner == NULL, "must be NULL: owner=" INTPTR_FORMAT, p2i(_owner));
62
63 _header = NULL;
64
65 clear_using_JT();
66 }
67
68 inline void ObjectMonitor::clear_using_JT() {
69 // Unlike other *_using_JT() functions, we cannot assert
70 // AsyncDeflateIdleMonitors or Thread::current()->is_Java_thread()
71 // because clear() calls this function for the rest of its checks.
72
73 if (AsyncDeflateIdleMonitors) {
74 // Async deflation protocol uses the _header, _contentions and _owner
75 // fields. While the ObjectMonitor being deflated is on the global free
76 // list, we leave those three fields alone; _owner == DEFLATER_MARKER
77 // and _contentions < 0 will force any racing threads to retry. The
78 // _header field is used by install_displaced_markword_in_object()
79 // in the last part of the deflation protocol so we cannot check
80 // its values here.
81 guarantee(_owner == NULL || _owner == DEFLATER_MARKER,
82 "must be NULL or DEFLATER_MARKER: owner=" INTPTR_FORMAT,
83 p2i(_owner));
84 guarantee(_contentions <= 0, "must be <= 0: contentions=%d", _contentions);
85 }
86 assert(_waiters == 0, "must be 0: waiters=%d", _waiters);
87 assert(_recursions == 0, "must be 0: recursions=" INTPTR_FORMAT, _recursions);
88 assert(_object != NULL, "must be non-NULL");
89 // Do not assert _ref_count == 0 here because a racing thread could
90 // increment _ref_count, observe _owner == DEFLATER_MARKER and then
91 // decrement _ref_count.
92
93 set_allocation_state(Free);
94 _object = NULL;
95 // Do not clear _ref_count here because _ref_count is for indicating
96 // that the ObjectMonitor* is in use which is orthogonal to whether
97 // the ObjectMonitor itself is in use for a locking operation.
98 }
99
100 inline void* ObjectMonitor::object() const {
101 return _object;
102 }
103
104 inline void* ObjectMonitor::object_addr() {
105 return (void *)(&_object);
106 }
107
108 inline void ObjectMonitor::set_object(void* obj) {
109 _object = obj;
110 }
111
112 inline bool ObjectMonitor::check(TRAPS) {
113 if (THREAD != _owner) {
114 if (THREAD->is_lock_owned((address) _owner)) {
115 _owner = THREAD; // regain ownership of inflated monitor
116 assert (_recursions == 0, "invariant") ;
117 } else {
118 check_slow(THREAD);
119 return false;
120 }
121 }
122 return true;
123 }
124
125 // return number of threads contending for this monitor
126 inline jint ObjectMonitor::contentions() const {
127 return _contentions;
128 }
129
130 // Do NOT set _contentions = 0. There is a race such that _contentions could
131 // be set while inflating prior to setting _owner
132 // Just use Atomic::inc/dec and assert 0 when monitor put on free list
133 inline void ObjectMonitor::set_owner(void* owner) {
134 _owner = owner;
135 _recursions = 0;
136 }
137
138 inline void ObjectMonitor::set_allocation_state(ObjectMonitor::AllocationState s) {
139 _allocation_state = s;
140 }
141
142 inline ObjectMonitor::AllocationState ObjectMonitor::allocation_state() const {
143 return _allocation_state;
144 }
145
146 inline bool ObjectMonitor::is_free() const {
147 return _allocation_state == Free;
148 }
149
150 inline bool ObjectMonitor::is_active() const {
151 return !is_free();
152 }
153
154 inline bool ObjectMonitor::is_old() const {
155 return _allocation_state == Old;
156 }
157
158 inline bool ObjectMonitor::is_new() const {
159 return _allocation_state == New;
160 }
161
162 inline void ObjectMonitor::dec_ref_count() {
163 // The decrement only needs to be MO_ACQ_REL since the reference
164 // counter is volatile.
165 Atomic::dec(&_ref_count);
166 guarantee(_ref_count >= 0, "sanity check: ref_count=%d", _ref_count);
167 }
168
169 inline void ObjectMonitor::inc_ref_count() {
170 // The increment needs to be MO_SEQ_CST so that the reference
171 // counter update is seen as soon as possible in a race with the
172 // async deflation protocol.
173 Atomic::inc(&_ref_count);
174 guarantee(_ref_count > 0, "sanity check: ref_count=%d", _ref_count);
175 }
176
177 inline jint ObjectMonitor::ref_count() const {
178 return OrderAccess::load_acquire(&_ref_count);
179 }
180
181 #endif // SHARE_RUNTIME_OBJECTMONITOR_INLINE_HPP
|