roman_version
155 // That is, we find a notify() or notifyAll() call that immediately precedes
156 // the monitorexit operation. In that case the JIT could fuse the operations
157 // into a single notifyAndExit() runtime primitive.
158
159 bool ObjectSynchronizer::quick_notify(oopDesc * obj, Thread * self, bool all) {
160 assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
161 assert(self->is_Java_thread(), "invariant");
162 assert(((JavaThread *) self)->thread_state() == _thread_in_Java, "invariant");
163 NoSafepointVerifier nsv;
164 if (obj == NULL) return false; // slow-path for invalid obj
165 const markOop mark = obj->mark();
166
167 if (mark->has_locker() && self->is_lock_owned((address)mark->locker())) {
168 // Degenerate notify
169 // stack-locked by caller so by definition the implied waitset is empty.
170 return true;
171 }
172
173 if (mark->has_monitor()) {
174 ObjectMonitor * const mon = mark->monitor();
175 assert(mon->object() == obj, "invariant");
176 if (mon->owner() != self) return false; // slow-path for IMS exception
177
178 if (mon->first_waiter() != NULL) {
179 // We have one or more waiters. Since this is an inflated monitor
180 // that we own, we can transfer one or more threads from the waitset
181 // to the entrylist here and now, avoiding the slow-path.
182 if (all) {
183 DTRACE_MONITOR_PROBE(notifyAll, mon, obj, self);
184 } else {
185 DTRACE_MONITOR_PROBE(notify, mon, obj, self);
186 }
187 int tally = 0;
188 do {
189 mon->INotify(self);
190 ++tally;
191 } while (mon->first_waiter() != NULL && all);
192 OM_PERFDATA_OP(Notifications, inc(tally));
193 }
194 return true;
|
155 // That is, we find a notify() or notifyAll() call that immediately precedes
156 // the monitorexit operation. In that case the JIT could fuse the operations
157 // into a single notifyAndExit() runtime primitive.
158
159 bool ObjectSynchronizer::quick_notify(oopDesc * obj, Thread * self, bool all) {
160 assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
161 assert(self->is_Java_thread(), "invariant");
162 assert(((JavaThread *) self)->thread_state() == _thread_in_Java, "invariant");
163 NoSafepointVerifier nsv;
164 if (obj == NULL) return false; // slow-path for invalid obj
165 const markOop mark = obj->mark();
166
167 if (mark->has_locker() && self->is_lock_owned((address)mark->locker())) {
168 // Degenerate notify
169 // stack-locked by caller so by definition the implied waitset is empty.
170 return true;
171 }
172
173 if (mark->has_monitor()) {
174 ObjectMonitor * const mon = mark->monitor();
175 assert(oopDesc::equals((oop) mon->object(), obj), "invariant");
176 if (mon->owner() != self) return false; // slow-path for IMS exception
177
178 if (mon->first_waiter() != NULL) {
179 // We have one or more waiters. Since this is an inflated monitor
180 // that we own, we can transfer one or more threads from the waitset
181 // to the entrylist here and now, avoiding the slow-path.
182 if (all) {
183 DTRACE_MONITOR_PROBE(notifyAll, mon, obj, self);
184 } else {
185 DTRACE_MONITOR_PROBE(notify, mon, obj, self);
186 }
187 int tally = 0;
188 do {
189 mon->INotify(self);
190 ++tally;
191 } while (mon->first_waiter() != NULL && all);
192 OM_PERFDATA_OP(Notifications, inc(tally));
193 }
194 return true;
|
199 }
200
201
202 // The LockNode emitted directly at the synchronization site would have
203 // been too big if it were to have included support for the cases of inflated
204 // recursive enter and exit, so they go here instead.
205 // Note that we can't safely call AsyncPrintJavaStack() from within
206 // quick_enter() as our thread state remains _in_Java.
207
208 bool ObjectSynchronizer::quick_enter(oop obj, Thread * Self,
209 BasicLock * lock) {
210 assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
211 assert(Self->is_Java_thread(), "invariant");
212 assert(((JavaThread *) Self)->thread_state() == _thread_in_Java, "invariant");
213 NoSafepointVerifier nsv;
214 if (obj == NULL) return false; // Need to throw NPE
215 const markOop mark = obj->mark();
216
217 if (mark->has_monitor()) {
218 ObjectMonitor * const m = mark->monitor();
219 assert(m->object() == obj, "invariant");
220 Thread * const owner = (Thread *) m->_owner;
221
222 // Lock contention and Transactional Lock Elision (TLE) diagnostics
223 // and observability
224 // Case: light contention possibly amenable to TLE
225 // Case: TLE inimical operations such as nested/recursive synchronization
226
227 if (owner == Self) {
228 m->_recursions++;
229 return true;
230 }
231
232 // This Java Monitor is inflated so obj's header will never be
233 // displaced to this thread's BasicLock. Make the displaced header
234 // non-NULL so this BasicLock is not seen as recursive nor as
235 // being locked. We do this unconditionally so that this thread's
236 // BasicLock cannot be mis-interpreted by any stack walkers. For
237 // performance reasons, stack walkers generally first check for
238 // Biased Locking in the object's header, the second check is for
|
199 }
200
201
202 // The LockNode emitted directly at the synchronization site would have
203 // been too big if it were to have included support for the cases of inflated
204 // recursive enter and exit, so they go here instead.
205 // Note that we can't safely call AsyncPrintJavaStack() from within
206 // quick_enter() as our thread state remains _in_Java.
207
208 bool ObjectSynchronizer::quick_enter(oop obj, Thread * Self,
209 BasicLock * lock) {
210 assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
211 assert(Self->is_Java_thread(), "invariant");
212 assert(((JavaThread *) Self)->thread_state() == _thread_in_Java, "invariant");
213 NoSafepointVerifier nsv;
214 if (obj == NULL) return false; // Need to throw NPE
215 const markOop mark = obj->mark();
216
217 if (mark->has_monitor()) {
218 ObjectMonitor * const m = mark->monitor();
219 assert(oopDesc::equals((oop) m->object(), obj), "invariant");
220 Thread * const owner = (Thread *) m->_owner;
221
222 // Lock contention and Transactional Lock Elision (TLE) diagnostics
223 // and observability
224 // Case: light contention possibly amenable to TLE
225 // Case: TLE inimical operations such as nested/recursive synchronization
226
227 if (owner == Self) {
228 m->_recursions++;
229 return true;
230 }
231
232 // This Java Monitor is inflated so obj's header will never be
233 // displaced to this thread's BasicLock. Make the displaced header
234 // non-NULL so this BasicLock is not seen as recursive nor as
235 // being locked. We do this unconditionally so that this thread's
236 // BasicLock cannot be mis-interpreted by any stack walkers. For
237 // performance reasons, stack walkers generally first check for
238 // Biased Locking in the object's header, the second check is for
|
1386 assert(Universe::verify_in_progress() ||
1387 !SafepointSynchronize::is_at_safepoint(), "invariant");
1388
1389 EventJavaMonitorInflate event;
1390
1391 for (;;) {
1392 const markOop mark = object->mark();
1393 assert(!mark->has_bias_pattern(), "invariant");
1394
1395 // The mark can be in one of the following states:
1396 // * Inflated - just return
1397 // * Stack-locked - coerce it to inflated
1398 // * INFLATING - busy wait for conversion to complete
1399 // * Neutral - aggressively inflate the object.
1400 // * BIASED - Illegal. We should never see this
1401
1402 // CASE: inflated
1403 if (mark->has_monitor()) {
1404 ObjectMonitor * inf = mark->monitor();
1405 assert(inf->header()->is_neutral(), "invariant");
1406 assert(inf->object() == object, "invariant");
1407 assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid");
1408 return inf;
1409 }
1410
1411 // CASE: inflation in progress - inflating over a stack-lock.
1412 // Some other thread is converting from stack-locked to inflated.
1413 // Only that thread can complete inflation -- other threads must wait.
1414 // The INFLATING value is transient.
1415 // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
1416 // We could always eliminate polling by parking the thread on some auxiliary list.
1417 if (mark == markOopDesc::INFLATING()) {
1418 TEVENT(Inflate: spin while INFLATING);
1419 ReadStableMark(object);
1420 continue;
1421 }
1422
1423 // CASE: stack-locked
1424 // Could be stack-locked either by this thread or by some other thread.
1425 //
|
1386 assert(Universe::verify_in_progress() ||
1387 !SafepointSynchronize::is_at_safepoint(), "invariant");
1388
1389 EventJavaMonitorInflate event;
1390
1391 for (;;) {
1392 const markOop mark = object->mark();
1393 assert(!mark->has_bias_pattern(), "invariant");
1394
1395 // The mark can be in one of the following states:
1396 // * Inflated - just return
1397 // * Stack-locked - coerce it to inflated
1398 // * INFLATING - busy wait for conversion to complete
1399 // * Neutral - aggressively inflate the object.
1400 // * BIASED - Illegal. We should never see this
1401
1402 // CASE: inflated
1403 if (mark->has_monitor()) {
1404 ObjectMonitor * inf = mark->monitor();
1405 assert(inf->header()->is_neutral(), "invariant");
1406 assert(oopDesc::equals((oop) inf->object(), object), "invariant");
1407 assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid");
1408 return inf;
1409 }
1410
1411 // CASE: inflation in progress - inflating over a stack-lock.
1412 // Some other thread is converting from stack-locked to inflated.
1413 // Only that thread can complete inflation -- other threads must wait.
1414 // The INFLATING value is transient.
1415 // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
1416 // We could always eliminate polling by parking the thread on some auxiliary list.
1417 if (mark == markOopDesc::INFLATING()) {
1418 TEVENT(Inflate: spin while INFLATING);
1419 ReadStableMark(object);
1420 continue;
1421 }
1422
1423 // CASE: stack-locked
1424 // Could be stack-locked either by this thread or by some other thread.
1425 //
|