1 /*
2 * Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "oops/klass.inline.hpp"
27 #include "oops/markOop.hpp"
28 #include "oops/oop.inline.hpp"
29 #include "runtime/atomic.inline.hpp"
30 #include "runtime/basicLock.hpp"
31 #include "runtime/biasedLocking.hpp"
32 #include "runtime/task.hpp"
33 #include "runtime/vframe.hpp"
34 #include "runtime/vmThread.hpp"
35 #include "runtime/vm_operations.hpp"
36
37 static bool _biased_locking_enabled = false;
38 BiasedLockingCounters BiasedLocking::_counters;
39
40 static GrowableArray<Handle>* _preserved_oop_stack = NULL;
41 static GrowableArray<markOop>* _preserved_mark_stack = NULL;
42
43 static void enable_biased_locking(Klass* k) {
44 k->set_prototype_header(markOopDesc::biased_locking_prototype());
45 }
46
47 class VM_EnableBiasedLocking: public VM_Operation {
48 private:
49 bool _is_cheap_allocated;
50 public:
51 VM_EnableBiasedLocking(bool is_cheap_allocated) { _is_cheap_allocated = is_cheap_allocated; }
52 VMOp_Type type() const { return VMOp_EnableBiasedLocking; }
53 Mode evaluation_mode() const { return _is_cheap_allocated ? _async_safepoint : _safepoint; }
54 bool is_cheap_allocated() const { return _is_cheap_allocated; }
55
56 void doit() {
57 // Iterate the system dictionary enabling biased locking for all
58 // currently loaded classes
59 SystemDictionary::classes_do(enable_biased_locking);
60 // Indicate that future instances should enable it as well
61 _biased_locking_enabled = true;
62
63 if (TraceBiasedLocking) {
64 tty->print_cr("Biased locking enabled");
65 }
66 }
67
68 bool allow_nested_vm_operations() const { return false; }
69 };
70
71
72 // One-shot PeriodicTask subclass for enabling biased locking
73 class EnableBiasedLockingTask : public PeriodicTask {
74 public:
75 EnableBiasedLockingTask(size_t interval_time) : PeriodicTask(interval_time) {}
76
77 virtual void task() {
78 // Use async VM operation to avoid blocking the Watcher thread.
79 // VM Thread will free C heap storage.
80 VM_EnableBiasedLocking *op = new VM_EnableBiasedLocking(true);
81 VMThread::execute(op);
82
83 // Reclaim our storage and disenroll ourself
84 delete this;
85 }
127 GrowableArray<MonitorInfo*> *monitors = vf->monitors();
128 if (monitors != NULL) {
129 int len = monitors->length();
130 // Walk monitors youngest to oldest
131 for (int i = len - 1; i >= 0; i--) {
132 MonitorInfo* mon_info = monitors->at(i);
133 if (mon_info->eliminated()) continue;
134 oop owner = mon_info->owner();
135 if (owner != NULL) {
136 info->append(mon_info);
137 }
138 }
139 }
140 }
141 }
142
143 thread->set_cached_monitor_info(info);
144 return info;
145 }
146
147
148 static BiasedLocking::Condition revoke_bias(oop obj, bool allow_rebias, bool is_bulk, JavaThread* requesting_thread) {
149 markOop mark = obj->mark();
150 if (!mark->has_bias_pattern()) {
151 if (TraceBiasedLocking) {
152 ResourceMark rm;
153 tty->print_cr(" (Skipping revocation of object of type %s because it's no longer biased)",
154 obj->klass()->external_name());
155 }
156 return BiasedLocking::NOT_BIASED;
157 }
158
159 uint age = mark->age();
160 markOop biased_prototype = markOopDesc::biased_locking_prototype()->set_age(age);
161 markOop unbiased_prototype = markOopDesc::prototype()->set_age(age);
162
163 if (TraceBiasedLocking && (Verbose || !is_bulk)) {
164 ResourceMark rm;
165 tty->print_cr("Revoking bias of object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s , prototype header " INTPTR_FORMAT " , allow rebias %d , requesting thread " INTPTR_FORMAT,
166 p2i((void *)obj), (intptr_t) mark, obj->klass()->external_name(), (intptr_t) obj->klass()->prototype_header(), (allow_rebias ? 1 : 0), (intptr_t) requesting_thread);
167 }
168
169 JavaThread* biased_thread = mark->biased_locker();
170 if (biased_thread == NULL) {
171 // Object is anonymously biased. We can get here if, for
172 // example, we revoke the bias due to an identity hash code
173 // being computed for an object.
174 if (!allow_rebias) {
175 obj->set_mark(unbiased_prototype);
176 }
177 if (TraceBiasedLocking && (Verbose || !is_bulk)) {
178 tty->print_cr(" Revoked bias of anonymously-biased object");
179 }
180 return BiasedLocking::BIAS_REVOKED;
181 }
182
183 // Handle case where the thread toward which the object was biased has exited
184 bool thread_is_alive = false;
185 if (requesting_thread == biased_thread) {
186 thread_is_alive = true;
187 } else {
188 for (JavaThread* cur_thread = Threads::first(); cur_thread != NULL; cur_thread = cur_thread->next()) {
189 if (cur_thread == biased_thread) {
190 thread_is_alive = true;
191 break;
192 }
193 }
194 }
195 if (!thread_is_alive) {
196 if (allow_rebias) {
197 obj->set_mark(biased_prototype);
198 } else {
199 obj->set_mark(unbiased_prototype);
200 }
201 if (TraceBiasedLocking && (Verbose || !is_bulk)) {
202 tty->print_cr(" Revoked bias of object biased toward dead thread");
203 }
204 return BiasedLocking::BIAS_REVOKED;
205 }
206
207 // Thread owning bias is alive.
208 // Check to see whether it currently owns the lock and, if so,
209 // write down the needed displaced headers to the thread's stack.
210 // Otherwise, restore the object's header either to the unlocked
211 // or unbiased state.
212 GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(biased_thread);
213 BasicLock* highest_lock = NULL;
214 for (int i = 0; i < cached_monitor_info->length(); i++) {
215 MonitorInfo* mon_info = cached_monitor_info->at(i);
216 if (mon_info->owner() == obj) {
217 if (TraceBiasedLocking && Verbose) {
218 tty->print_cr(" mon_info->owner (" PTR_FORMAT ") == obj (" PTR_FORMAT ")",
219 p2i((void *) mon_info->owner()),
220 p2i((void *) obj));
221 }
222 // Assume recursive case and fix up highest lock later
223 markOop mark = markOopDesc::encode((BasicLock*) NULL);
224 highest_lock = mon_info->lock();
225 highest_lock->set_displaced_header(mark);
226 } else {
227 if (TraceBiasedLocking && Verbose) {
228 tty->print_cr(" mon_info->owner (" PTR_FORMAT ") != obj (" PTR_FORMAT ")",
229 p2i((void *) mon_info->owner()),
230 p2i((void *) obj));
231 }
232 }
233 }
234 if (highest_lock != NULL) {
235 // Fix up highest lock to contain displaced header and point
236 // object at it
237 highest_lock->set_displaced_header(unbiased_prototype);
238 // Reset object header to point to displaced mark.
239 // Must release storing the lock address for platforms without TSO
240 // ordering (e.g. ppc).
241 obj->release_set_mark(markOopDesc::encode(highest_lock));
242 assert(!obj->mark()->has_bias_pattern(), "illegal mark state: stack lock used bias bit");
243 if (TraceBiasedLocking && (Verbose || !is_bulk)) {
244 tty->print_cr(" Revoked bias of currently-locked object");
245 }
246 } else {
247 if (TraceBiasedLocking && (Verbose || !is_bulk)) {
248 tty->print_cr(" Revoked bias of currently-unlocked object");
249 }
250 if (allow_rebias) {
251 obj->set_mark(biased_prototype);
252 } else {
253 // Store the unlocked value into the object's header.
254 obj->set_mark(unbiased_prototype);
255 }
256 }
257
258 return BiasedLocking::BIAS_REVOKED;
259 }
260
261
262 enum HeuristicsResult {
263 HR_NOT_BIASED = 1,
264 HR_SINGLE_REVOKE = 2,
265 HR_BULK_REBIAS = 3,
266 HR_BULK_REVOKE = 4
267 };
268
309 }
310
311 if (revocation_count == BiasedLockingBulkRevokeThreshold) {
312 return HR_BULK_REVOKE;
313 }
314
315 if (revocation_count == BiasedLockingBulkRebiasThreshold) {
316 return HR_BULK_REBIAS;
317 }
318
319 return HR_SINGLE_REVOKE;
320 }
321
322
323 static BiasedLocking::Condition bulk_revoke_or_rebias_at_safepoint(oop o,
324 bool bulk_rebias,
325 bool attempt_rebias_of_object,
326 JavaThread* requesting_thread) {
327 assert(SafepointSynchronize::is_at_safepoint(), "must be done at safepoint");
328
329 if (TraceBiasedLocking) {
330 tty->print_cr("* Beginning bulk revocation (kind == %s) because of object "
331 INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
332 (bulk_rebias ? "rebias" : "revoke"),
333 p2i((void *) o), (intptr_t) o->mark(), o->klass()->external_name());
334 }
335
336 jlong cur_time = os::javaTimeMillis();
337 o->klass()->set_last_biased_lock_bulk_revocation_time(cur_time);
338
339
340 Klass* k_o = o->klass();
341 Klass* klass = k_o;
342
343 if (bulk_rebias) {
344 // Use the epoch in the klass of the object to implicitly revoke
345 // all biases of objects of this data type and force them to be
346 // reacquired. However, we also need to walk the stacks of all
347 // threads and update the headers of lightweight locked objects
348 // with biases to have the current epoch.
349
350 // If the prototype header doesn't have the bias pattern, don't
351 // try to update the epoch -- assume another VM operation came in
352 // and reset the header to the unbiased state, which will
353 // implicitly cause all existing biases to be revoked
354 if (klass->prototype_header()->has_bias_pattern()) {
360 // and locked objects of this data type we encounter
361 for (JavaThread* thr = Threads::first(); thr != NULL; thr = thr->next()) {
362 GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr);
363 for (int i = 0; i < cached_monitor_info->length(); i++) {
364 MonitorInfo* mon_info = cached_monitor_info->at(i);
365 oop owner = mon_info->owner();
366 markOop mark = owner->mark();
367 if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
368 // We might have encountered this object already in the case of recursive locking
369 assert(mark->bias_epoch() == prev_epoch || mark->bias_epoch() == cur_epoch, "error in bias epoch adjustment");
370 owner->set_mark(mark->set_bias_epoch(cur_epoch));
371 }
372 }
373 }
374 }
375
376 // At this point we're done. All we have to do is potentially
377 // adjust the header of the given object to revoke its bias.
378 revoke_bias(o, attempt_rebias_of_object && klass->prototype_header()->has_bias_pattern(), true, requesting_thread);
379 } else {
380 if (TraceBiasedLocking) {
381 ResourceMark rm;
382 tty->print_cr("* Disabling biased locking for type %s", klass->external_name());
383 }
384
385 // Disable biased locking for this data type. Not only will this
386 // cause future instances to not be biased, but existing biased
387 // instances will notice that this implicitly caused their biases
388 // to be revoked.
389 klass->set_prototype_header(markOopDesc::prototype());
390
391 // Now walk all threads' stacks and forcibly revoke the biases of
392 // any locked and biased objects of this data type we encounter.
393 for (JavaThread* thr = Threads::first(); thr != NULL; thr = thr->next()) {
394 GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr);
395 for (int i = 0; i < cached_monitor_info->length(); i++) {
396 MonitorInfo* mon_info = cached_monitor_info->at(i);
397 oop owner = mon_info->owner();
398 markOop mark = owner->mark();
399 if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
400 revoke_bias(owner, false, true, requesting_thread);
401 }
402 }
403 }
404
405 // Must force the bias of the passed object to be forcibly revoked
406 // as well to ensure guarantees to callers
407 revoke_bias(o, false, true, requesting_thread);
408 }
409
410 if (TraceBiasedLocking) {
411 tty->print_cr("* Ending bulk revocation");
412 }
413
414 BiasedLocking::Condition status_code = BiasedLocking::BIAS_REVOKED;
415
416 if (attempt_rebias_of_object &&
417 o->mark()->has_bias_pattern() &&
418 klass->prototype_header()->has_bias_pattern()) {
419 markOop new_mark = markOopDesc::encode(requesting_thread, o->mark()->age(),
420 klass->prototype_header()->bias_epoch());
421 o->set_mark(new_mark);
422 status_code = BiasedLocking::BIAS_REVOKED_AND_REBIASED;
423 if (TraceBiasedLocking) {
424 tty->print_cr(" Rebiased object toward thread " INTPTR_FORMAT, (intptr_t) requesting_thread);
425 }
426 }
427
428 assert(!o->mark()->has_bias_pattern() ||
429 (attempt_rebias_of_object && (o->mark()->biased_locker() == requesting_thread)),
430 "bug in bulk bias revocation");
431
432 return status_code;
433 }
434
435
436 static void clean_up_cached_monitor_info() {
437 // Walk the thread list clearing out the cached monitors
438 for (JavaThread* thr = Threads::first(); thr != NULL; thr = thr->next()) {
439 thr->set_cached_monitor_info(NULL);
440 }
441 }
442
443
444 class VM_RevokeBias : public VM_Operation {
445 protected:
468 // give us locked object(s). If we don't find any biased objects
469 // there is nothing to do and we avoid a safepoint.
470 if (_obj != NULL) {
471 markOop mark = (*_obj)()->mark();
472 if (mark->has_bias_pattern()) {
473 return true;
474 }
475 } else {
476 for ( int i = 0 ; i < _objs->length(); i++ ) {
477 markOop mark = (_objs->at(i))()->mark();
478 if (mark->has_bias_pattern()) {
479 return true;
480 }
481 }
482 }
483 return false;
484 }
485
486 virtual void doit() {
487 if (_obj != NULL) {
488 if (TraceBiasedLocking) {
489 tty->print_cr("Revoking bias with potentially per-thread safepoint:");
490 }
491 _status_code = revoke_bias((*_obj)(), false, false, _requesting_thread);
492 clean_up_cached_monitor_info();
493 return;
494 } else {
495 if (TraceBiasedLocking) {
496 tty->print_cr("Revoking bias with global safepoint:");
497 }
498 BiasedLocking::revoke_at_safepoint(_objs);
499 }
500 }
501
502 BiasedLocking::Condition status_code() const {
503 return _status_code;
504 }
505 };
506
507
508 class VM_BulkRevokeBias : public VM_RevokeBias {
509 private:
510 bool _bulk_rebias;
511 bool _attempt_rebias_of_object;
512
513 public:
514 VM_BulkRevokeBias(Handle* obj, JavaThread* requesting_thread,
515 bool bulk_rebias,
516 bool attempt_rebias_of_object)
517 : VM_RevokeBias(obj, requesting_thread)
591 }
592
593 HeuristicsResult heuristics = update_heuristics(obj(), attempt_rebias);
594 if (heuristics == HR_NOT_BIASED) {
595 return NOT_BIASED;
596 } else if (heuristics == HR_SINGLE_REVOKE) {
597 Klass *k = obj->klass();
598 markOop prototype_header = k->prototype_header();
599 if (mark->biased_locker() == THREAD &&
600 prototype_header->bias_epoch() == mark->bias_epoch()) {
601 // A thread is trying to revoke the bias of an object biased
602 // toward it, again likely due to an identity hash code
603 // computation. We can again avoid a safepoint in this case
604 // since we are only going to walk our own stack. There are no
605 // races with revocations occurring in other threads because we
606 // reach no safepoints in the revocation path.
607 // Also check the epoch because even if threads match, another thread
608 // can come in with a CAS to steal the bias of an object that has a
609 // stale epoch.
610 ResourceMark rm;
611 if (TraceBiasedLocking) {
612 tty->print_cr("Revoking bias by walking my own stack:");
613 }
614 BiasedLocking::Condition cond = revoke_bias(obj(), false, false, (JavaThread*) THREAD);
615 ((JavaThread*) THREAD)->set_cached_monitor_info(NULL);
616 assert(cond == BIAS_REVOKED, "why not?");
617 return cond;
618 } else {
619 VM_RevokeBias revoke(&obj, (JavaThread*) THREAD);
620 VMThread::execute(&revoke);
621 return revoke.status_code();
622 }
623 }
624
625 assert((heuristics == HR_BULK_REVOKE) ||
626 (heuristics == HR_BULK_REBIAS), "?");
627 VM_BulkRevokeBias bulk_revoke(&obj, (JavaThread*) THREAD,
628 (heuristics == HR_BULK_REBIAS),
629 attempt_rebias);
630 VMThread::execute(&bulk_revoke);
631 return bulk_revoke.status_code();
632 }
633
|
1 /*
2 * Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "logging/log.hpp"
27 #include "memory/resourceArea.hpp"
28 #include "oops/klass.inline.hpp"
29 #include "oops/markOop.hpp"
30 #include "oops/oop.inline.hpp"
31 #include "runtime/atomic.inline.hpp"
32 #include "runtime/basicLock.hpp"
33 #include "runtime/biasedLocking.hpp"
34 #include "runtime/task.hpp"
35 #include "runtime/vframe.hpp"
36 #include "runtime/vmThread.hpp"
37 #include "runtime/vm_operations.hpp"
38
39 static bool _biased_locking_enabled = false;
40 BiasedLockingCounters BiasedLocking::_counters;
41
42 static GrowableArray<Handle>* _preserved_oop_stack = NULL;
43 static GrowableArray<markOop>* _preserved_mark_stack = NULL;
44
45 static void enable_biased_locking(Klass* k) {
46 k->set_prototype_header(markOopDesc::biased_locking_prototype());
47 }
48
49 class VM_EnableBiasedLocking: public VM_Operation {
50 private:
51 bool _is_cheap_allocated;
52 public:
53 VM_EnableBiasedLocking(bool is_cheap_allocated) { _is_cheap_allocated = is_cheap_allocated; }
54 VMOp_Type type() const { return VMOp_EnableBiasedLocking; }
55 Mode evaluation_mode() const { return _is_cheap_allocated ? _async_safepoint : _safepoint; }
56 bool is_cheap_allocated() const { return _is_cheap_allocated; }
57
58 void doit() {
59 // Iterate the system dictionary enabling biased locking for all
60 // currently loaded classes
61 SystemDictionary::classes_do(enable_biased_locking);
62 // Indicate that future instances should enable it as well
63 _biased_locking_enabled = true;
64
65 log_info(biasedlocking)("Biased locking enabled");
66 }
67
68 bool allow_nested_vm_operations() const { return false; }
69 };
70
71
72 // One-shot PeriodicTask subclass for enabling biased locking
73 class EnableBiasedLockingTask : public PeriodicTask {
74 public:
75 EnableBiasedLockingTask(size_t interval_time) : PeriodicTask(interval_time) {}
76
77 virtual void task() {
78 // Use async VM operation to avoid blocking the Watcher thread.
79 // VM Thread will free C heap storage.
80 VM_EnableBiasedLocking *op = new VM_EnableBiasedLocking(true);
81 VMThread::execute(op);
82
83 // Reclaim our storage and disenroll ourself
84 delete this;
85 }
127 GrowableArray<MonitorInfo*> *monitors = vf->monitors();
128 if (monitors != NULL) {
129 int len = monitors->length();
130 // Walk monitors youngest to oldest
131 for (int i = len - 1; i >= 0; i--) {
132 MonitorInfo* mon_info = monitors->at(i);
133 if (mon_info->eliminated()) continue;
134 oop owner = mon_info->owner();
135 if (owner != NULL) {
136 info->append(mon_info);
137 }
138 }
139 }
140 }
141 }
142
143 thread->set_cached_monitor_info(info);
144 return info;
145 }
146
147 static BiasedLocking::Condition revoke_bias(oop obj, bool allow_rebias, bool is_bulk, JavaThread* requesting_thread) {
148 markOop mark = obj->mark();
149 if (!mark->has_bias_pattern()) {
150 if (log_is_enabled(Info, biasedlocking)) {
151 ResourceMark rm;
152 log_info(biasedlocking)(" (Skipping revocation of object of type %s "
153 "because it's no longer biased)",
154 obj->klass()->external_name());
155 }
156 return BiasedLocking::NOT_BIASED;
157 }
158
159 uint age = mark->age();
160 markOop biased_prototype = markOopDesc::biased_locking_prototype()->set_age(age);
161 markOop unbiased_prototype = markOopDesc::prototype()->set_age(age);
162
163 // Log at "info" level if not bulk, else "trace" level
164 if (!is_bulk) {
165 ResourceMark rm;
166 log_info(biasedlocking)("Revoking bias of object " INTPTR_FORMAT " , mark "
167 INTPTR_FORMAT " , type %s , prototype header " INTPTR_FORMAT
168 " , allow rebias %d , requesting thread " INTPTR_FORMAT,
169 p2i((void *)obj),
170 (intptr_t) mark,
171 obj->klass()->external_name(),
172 (intptr_t) obj->klass()->prototype_header(),
173 (allow_rebias ? 1 : 0),
174 (intptr_t) requesting_thread);
175 } else {
176 ResourceMark rm;
177 log_trace(biasedlocking)("Revoking bias of object " INTPTR_FORMAT " , mark "
178 INTPTR_FORMAT " , type %s , prototype header " INTPTR_FORMAT
179 " , allow rebias %d , requesting thread " INTPTR_FORMAT,
180 p2i((void *)obj),
181 (intptr_t) mark,
182 obj->klass()->external_name(),
183 (intptr_t) obj->klass()->prototype_header(),
184 (allow_rebias ? 1 : 0),
185 (intptr_t) requesting_thread);
186 }
187
188 JavaThread* biased_thread = mark->biased_locker();
189 if (biased_thread == NULL) {
190 // Object is anonymously biased. We can get here if, for
191 // example, we revoke the bias due to an identity hash code
192 // being computed for an object.
193 if (!allow_rebias) {
194 obj->set_mark(unbiased_prototype);
195 }
196 if (!is_bulk) {
197 log_info(biasedlocking)(" Revoked bias of anonymously-biased object");
198 } else {
199 log_trace(biasedlocking)(" Revoked bias of anonymously-biased object");
200 }
201 return BiasedLocking::BIAS_REVOKED;
202 }
203
204 // Handle case where the thread toward which the object was biased has exited
205 bool thread_is_alive = false;
206 if (requesting_thread == biased_thread) {
207 thread_is_alive = true;
208 } else {
209 for (JavaThread* cur_thread = Threads::first(); cur_thread != NULL; cur_thread = cur_thread->next()) {
210 if (cur_thread == biased_thread) {
211 thread_is_alive = true;
212 break;
213 }
214 }
215 }
216 if (!thread_is_alive) {
217 if (allow_rebias) {
218 obj->set_mark(biased_prototype);
219 } else {
220 obj->set_mark(unbiased_prototype);
221 }
222 // Log at "info" level if not bulk, else "trace" level
223 if (!is_bulk) {
224 log_info(biasedlocking)(" Revoked bias of object biased toward dead thread");
225 } else {
226 log_trace(biasedlocking)(" Revoked bias of object biased toward dead thread");
227 }
228 return BiasedLocking::BIAS_REVOKED;
229 }
230
231 // Thread owning bias is alive.
232 // Check to see whether it currently owns the lock and, if so,
233 // write down the needed displaced headers to the thread's stack.
234 // Otherwise, restore the object's header either to the unlocked
235 // or unbiased state.
236 GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(biased_thread);
237 BasicLock* highest_lock = NULL;
238 for (int i = 0; i < cached_monitor_info->length(); i++) {
239 MonitorInfo* mon_info = cached_monitor_info->at(i);
240 if (mon_info->owner() == obj) {
241 log_trace(biasedlocking)(" mon_info->owner (" PTR_FORMAT ") == obj (" PTR_FORMAT ")",
242 p2i((void *) mon_info->owner()),
243 p2i((void *) obj));
244 // Assume recursive case and fix up highest lock later
245 markOop mark = markOopDesc::encode((BasicLock*) NULL);
246 highest_lock = mon_info->lock();
247 highest_lock->set_displaced_header(mark);
248 } else {
249 log_trace(biasedlocking)(" mon_info->owner (" PTR_FORMAT ") != obj (" PTR_FORMAT ")",
250 p2i((void *) mon_info->owner()),
251 p2i((void *) obj));
252 }
253 }
254 if (highest_lock != NULL) {
255 // Fix up highest lock to contain displaced header and point
256 // object at it
257 highest_lock->set_displaced_header(unbiased_prototype);
258 // Reset object header to point to displaced mark.
259 // Must release storing the lock address for platforms without TSO
260 // ordering (e.g. ppc).
261 obj->release_set_mark(markOopDesc::encode(highest_lock));
262 assert(!obj->mark()->has_bias_pattern(), "illegal mark state: stack lock used bias bit");
263 // Log at "info" level if not bulk, else "trace" level
264 if (!is_bulk) {
265 log_info(biasedlocking)(" Revoked bias of currently-locked object");
266 } else {
267 log_trace(biasedlocking)(" Revoked bias of currently-locked object");
268 }
269 } else {
270 // Log at "info" level if not bulk, else "trace" level
271 if (!is_bulk) {
272 log_info(biasedlocking)(" Revoked bias of currently-unlocked object");
273 } else {
274 log_trace(biasedlocking)(" Revoked bias of currently-unlocked object");
275 }
276 if (allow_rebias) {
277 obj->set_mark(biased_prototype);
278 } else {
279 // Store the unlocked value into the object's header.
280 obj->set_mark(unbiased_prototype);
281 }
282 }
283
284 return BiasedLocking::BIAS_REVOKED;
285 }
286
287
288 enum HeuristicsResult {
289 HR_NOT_BIASED = 1,
290 HR_SINGLE_REVOKE = 2,
291 HR_BULK_REBIAS = 3,
292 HR_BULK_REVOKE = 4
293 };
294
335 }
336
337 if (revocation_count == BiasedLockingBulkRevokeThreshold) {
338 return HR_BULK_REVOKE;
339 }
340
341 if (revocation_count == BiasedLockingBulkRebiasThreshold) {
342 return HR_BULK_REBIAS;
343 }
344
345 return HR_SINGLE_REVOKE;
346 }
347
348
349 static BiasedLocking::Condition bulk_revoke_or_rebias_at_safepoint(oop o,
350 bool bulk_rebias,
351 bool attempt_rebias_of_object,
352 JavaThread* requesting_thread) {
353 assert(SafepointSynchronize::is_at_safepoint(), "must be done at safepoint");
354
355 log_info(biasedlocking)("* Beginning bulk revocation (kind == %s) because of object "
356 INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
357 (bulk_rebias ? "rebias" : "revoke"),
358 p2i((void *) o),
359 (intptr_t) o->mark(),
360 o->klass()->external_name());
361
362 jlong cur_time = os::javaTimeMillis();
363 o->klass()->set_last_biased_lock_bulk_revocation_time(cur_time);
364
365
366 Klass* k_o = o->klass();
367 Klass* klass = k_o;
368
369 if (bulk_rebias) {
370 // Use the epoch in the klass of the object to implicitly revoke
371 // all biases of objects of this data type and force them to be
372 // reacquired. However, we also need to walk the stacks of all
373 // threads and update the headers of lightweight locked objects
374 // with biases to have the current epoch.
375
376 // If the prototype header doesn't have the bias pattern, don't
377 // try to update the epoch -- assume another VM operation came in
378 // and reset the header to the unbiased state, which will
379 // implicitly cause all existing biases to be revoked
380 if (klass->prototype_header()->has_bias_pattern()) {
386 // and locked objects of this data type we encounter
387 for (JavaThread* thr = Threads::first(); thr != NULL; thr = thr->next()) {
388 GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr);
389 for (int i = 0; i < cached_monitor_info->length(); i++) {
390 MonitorInfo* mon_info = cached_monitor_info->at(i);
391 oop owner = mon_info->owner();
392 markOop mark = owner->mark();
393 if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
394 // We might have encountered this object already in the case of recursive locking
395 assert(mark->bias_epoch() == prev_epoch || mark->bias_epoch() == cur_epoch, "error in bias epoch adjustment");
396 owner->set_mark(mark->set_bias_epoch(cur_epoch));
397 }
398 }
399 }
400 }
401
402 // At this point we're done. All we have to do is potentially
403 // adjust the header of the given object to revoke its bias.
404 revoke_bias(o, attempt_rebias_of_object && klass->prototype_header()->has_bias_pattern(), true, requesting_thread);
405 } else {
406 if (log_is_enabled(Info, biasedlocking)) {
407 ResourceMark rm;
408 log_info(biasedlocking)("* Disabling biased locking for type %s", klass->external_name());
409 }
410
411 // Disable biased locking for this data type. Not only will this
412 // cause future instances to not be biased, but existing biased
413 // instances will notice that this implicitly caused their biases
414 // to be revoked.
415 klass->set_prototype_header(markOopDesc::prototype());
416
417 // Now walk all threads' stacks and forcibly revoke the biases of
418 // any locked and biased objects of this data type we encounter.
419 for (JavaThread* thr = Threads::first(); thr != NULL; thr = thr->next()) {
420 GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr);
421 for (int i = 0; i < cached_monitor_info->length(); i++) {
422 MonitorInfo* mon_info = cached_monitor_info->at(i);
423 oop owner = mon_info->owner();
424 markOop mark = owner->mark();
425 if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
426 revoke_bias(owner, false, true, requesting_thread);
427 }
428 }
429 }
430
431 // Must force the bias of the passed object to be forcibly revoked
432 // as well to ensure guarantees to callers
433 revoke_bias(o, false, true, requesting_thread);
434 }
435
436 log_info(biasedlocking)("* Ending bulk revocation");
437
438 BiasedLocking::Condition status_code = BiasedLocking::BIAS_REVOKED;
439
440 if (attempt_rebias_of_object &&
441 o->mark()->has_bias_pattern() &&
442 klass->prototype_header()->has_bias_pattern()) {
443 markOop new_mark = markOopDesc::encode(requesting_thread, o->mark()->age(),
444 klass->prototype_header()->bias_epoch());
445 o->set_mark(new_mark);
446 status_code = BiasedLocking::BIAS_REVOKED_AND_REBIASED;
447 log_info(biasedlocking)(" Rebiased object toward thread " INTPTR_FORMAT, (intptr_t) requesting_thread);
448 }
449
450 assert(!o->mark()->has_bias_pattern() ||
451 (attempt_rebias_of_object && (o->mark()->biased_locker() == requesting_thread)),
452 "bug in bulk bias revocation");
453
454 return status_code;
455 }
456
457
458 static void clean_up_cached_monitor_info() {
459 // Walk the thread list clearing out the cached monitors
460 for (JavaThread* thr = Threads::first(); thr != NULL; thr = thr->next()) {
461 thr->set_cached_monitor_info(NULL);
462 }
463 }
464
465
466 class VM_RevokeBias : public VM_Operation {
467 protected:
490 // give us locked object(s). If we don't find any biased objects
491 // there is nothing to do and we avoid a safepoint.
492 if (_obj != NULL) {
493 markOop mark = (*_obj)()->mark();
494 if (mark->has_bias_pattern()) {
495 return true;
496 }
497 } else {
498 for ( int i = 0 ; i < _objs->length(); i++ ) {
499 markOop mark = (_objs->at(i))()->mark();
500 if (mark->has_bias_pattern()) {
501 return true;
502 }
503 }
504 }
505 return false;
506 }
507
508 virtual void doit() {
509 if (_obj != NULL) {
510 log_info(biasedlocking)("Revoking bias with potentially per-thread safepoint:");
511 _status_code = revoke_bias((*_obj)(), false, false, _requesting_thread);
512 clean_up_cached_monitor_info();
513 return;
514 } else {
515 log_info(biasedlocking)("Revoking bias with global safepoint:");
516 BiasedLocking::revoke_at_safepoint(_objs);
517 }
518 }
519
520 BiasedLocking::Condition status_code() const {
521 return _status_code;
522 }
523 };
524
525
526 class VM_BulkRevokeBias : public VM_RevokeBias {
527 private:
528 bool _bulk_rebias;
529 bool _attempt_rebias_of_object;
530
531 public:
532 VM_BulkRevokeBias(Handle* obj, JavaThread* requesting_thread,
533 bool bulk_rebias,
534 bool attempt_rebias_of_object)
535 : VM_RevokeBias(obj, requesting_thread)
609 }
610
611 HeuristicsResult heuristics = update_heuristics(obj(), attempt_rebias);
612 if (heuristics == HR_NOT_BIASED) {
613 return NOT_BIASED;
614 } else if (heuristics == HR_SINGLE_REVOKE) {
615 Klass *k = obj->klass();
616 markOop prototype_header = k->prototype_header();
617 if (mark->biased_locker() == THREAD &&
618 prototype_header->bias_epoch() == mark->bias_epoch()) {
619 // A thread is trying to revoke the bias of an object biased
620 // toward it, again likely due to an identity hash code
621 // computation. We can again avoid a safepoint in this case
622 // since we are only going to walk our own stack. There are no
623 // races with revocations occurring in other threads because we
624 // reach no safepoints in the revocation path.
625 // Also check the epoch because even if threads match, another thread
626 // can come in with a CAS to steal the bias of an object that has a
627 // stale epoch.
628 ResourceMark rm;
629 log_info(biasedlocking)("Revoking bias by walking my own stack:");
630 BiasedLocking::Condition cond = revoke_bias(obj(), false, false, (JavaThread*) THREAD);
631 ((JavaThread*) THREAD)->set_cached_monitor_info(NULL);
632 assert(cond == BIAS_REVOKED, "why not?");
633 return cond;
634 } else {
635 VM_RevokeBias revoke(&obj, (JavaThread*) THREAD);
636 VMThread::execute(&revoke);
637 return revoke.status_code();
638 }
639 }
640
641 assert((heuristics == HR_BULK_REVOKE) ||
642 (heuristics == HR_BULK_REBIAS), "?");
643 VM_BulkRevokeBias bulk_revoke(&obj, (JavaThread*) THREAD,
644 (heuristics == HR_BULK_REBIAS),
645 attempt_rebias);
646 VMThread::execute(&bulk_revoke);
647 return bulk_revoke.status_code();
648 }
649
|