1 /*
2 * Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "oops/klass.inline.hpp"
27 #include "oops/markOop.hpp"
28 #include "runtime/basicLock.hpp"
29 #include "runtime/biasedLocking.hpp"
30 #include "runtime/task.hpp"
31 #include "runtime/vframe.hpp"
32 #include "runtime/vmThread.hpp"
33 #include "runtime/vm_operations.hpp"
34
35 static bool _biased_locking_enabled = false;
36 BiasedLockingCounters BiasedLocking::_counters;
37
38 static GrowableArray<Handle>* _preserved_oop_stack = NULL;
39 static GrowableArray<markOop>* _preserved_mark_stack = NULL;
40
41 static void enable_biased_locking(Klass* k) {
42 k->set_prototype_header(markOopDesc::biased_locking_prototype());
43 }
44
45 class VM_EnableBiasedLocking: public VM_Operation {
46 private:
47 bool _is_cheap_allocated;
48 public:
49 VM_EnableBiasedLocking(bool is_cheap_allocated) { _is_cheap_allocated = is_cheap_allocated; }
50 VMOp_Type type() const { return VMOp_EnableBiasedLocking; }
51 Mode evaluation_mode() const { return _is_cheap_allocated ? _async_safepoint : _safepoint; }
52 bool is_cheap_allocated() const { return _is_cheap_allocated; }
53
126 if (monitors != NULL) {
127 int len = monitors->length();
128 // Walk monitors youngest to oldest
129 for (int i = len - 1; i >= 0; i--) {
130 MonitorInfo* mon_info = monitors->at(i);
131 if (mon_info->eliminated()) continue;
132 oop owner = mon_info->owner();
133 if (owner != NULL) {
134 info->append(mon_info);
135 }
136 }
137 }
138 }
139 }
140
141 thread->set_cached_monitor_info(info);
142 return info;
143 }
144
145
146 static BiasedLocking::Condition revoke_bias(oop obj, bool allow_rebias, bool is_bulk, JavaThread* requesting_thread) {
147 markOop mark = obj->mark();
148 if (!mark->has_bias_pattern()) {
149 if (TraceBiasedLocking) {
150 ResourceMark rm;
151 tty->print_cr(" (Skipping revocation of object of type %s because it's no longer biased)",
152 obj->klass()->external_name());
153 }
154 return BiasedLocking::NOT_BIASED;
155 }
156
157 uint age = mark->age();
158 markOop biased_prototype = markOopDesc::biased_locking_prototype()->set_age(age);
159 markOop unbiased_prototype = markOopDesc::prototype()->set_age(age);
160
161 if (TraceBiasedLocking && (Verbose || !is_bulk)) {
162 ResourceMark rm;
163 tty->print_cr("Revoking bias of object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s , prototype header " INTPTR_FORMAT " , allow rebias %d , requesting thread " INTPTR_FORMAT,
164 p2i((void *)obj), (intptr_t) mark, obj->klass()->external_name(), (intptr_t) obj->klass()->prototype_header(), (allow_rebias ? 1 : 0), (intptr_t) requesting_thread);
165 }
166
236 // Reset object header to point to displaced mark.
237 // Must release storing the lock address for platforms without TSO
238 // ordering (e.g. ppc).
239 obj->release_set_mark(markOopDesc::encode(highest_lock));
240 assert(!obj->mark()->has_bias_pattern(), "illegal mark state: stack lock used bias bit");
241 if (TraceBiasedLocking && (Verbose || !is_bulk)) {
242 tty->print_cr(" Revoked bias of currently-locked object");
243 }
244 } else {
245 if (TraceBiasedLocking && (Verbose || !is_bulk)) {
246 tty->print_cr(" Revoked bias of currently-unlocked object");
247 }
248 if (allow_rebias) {
249 obj->set_mark(biased_prototype);
250 } else {
251 // Store the unlocked value into the object's header.
252 obj->set_mark(unbiased_prototype);
253 }
254 }
255
256 return BiasedLocking::BIAS_REVOKED;
257 }
258
259
260 enum HeuristicsResult {
261 HR_NOT_BIASED = 1,
262 HR_SINGLE_REVOKE = 2,
263 HR_BULK_REBIAS = 3,
264 HR_BULK_REVOKE = 4
265 };
266
267
268 static HeuristicsResult update_heuristics(oop o, bool allow_rebias) {
269 markOop mark = o->mark();
270 if (!mark->has_bias_pattern()) {
271 return HR_NOT_BIASED;
272 }
273
274 // Heuristics to attempt to throttle the number of revocations.
275 // Stages:
356
357 // Now walk all threads' stacks and adjust epochs of any biased
358 // and locked objects of this data type we encounter
359 for (JavaThread* thr = Threads::first(); thr != NULL; thr = thr->next()) {
360 GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr);
361 for (int i = 0; i < cached_monitor_info->length(); i++) {
362 MonitorInfo* mon_info = cached_monitor_info->at(i);
363 oop owner = mon_info->owner();
364 markOop mark = owner->mark();
365 if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
366 // We might have encountered this object already in the case of recursive locking
367 assert(mark->bias_epoch() == prev_epoch || mark->bias_epoch() == cur_epoch, "error in bias epoch adjustment");
368 owner->set_mark(mark->set_bias_epoch(cur_epoch));
369 }
370 }
371 }
372 }
373
374 // At this point we're done. All we have to do is potentially
375 // adjust the header of the given object to revoke its bias.
376 revoke_bias(o, attempt_rebias_of_object && klass->prototype_header()->has_bias_pattern(), true, requesting_thread);
377 } else {
378 if (TraceBiasedLocking) {
379 ResourceMark rm;
380 tty->print_cr("* Disabling biased locking for type %s", klass->external_name());
381 }
382
383 // Disable biased locking for this data type. Not only will this
384 // cause future instances to not be biased, but existing biased
385 // instances will notice that this implicitly caused their biases
386 // to be revoked.
387 klass->set_prototype_header(markOopDesc::prototype());
388
389 // Now walk all threads' stacks and forcibly revoke the biases of
390 // any locked and biased objects of this data type we encounter.
391 for (JavaThread* thr = Threads::first(); thr != NULL; thr = thr->next()) {
392 GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr);
393 for (int i = 0; i < cached_monitor_info->length(); i++) {
394 MonitorInfo* mon_info = cached_monitor_info->at(i);
395 oop owner = mon_info->owner();
396 markOop mark = owner->mark();
397 if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
398 revoke_bias(owner, false, true, requesting_thread);
399 }
400 }
401 }
402
403 // Must force the bias of the passed object to be forcibly revoked
404 // as well to ensure guarantees to callers
405 revoke_bias(o, false, true, requesting_thread);
406 }
407
408 if (TraceBiasedLocking) {
409 tty->print_cr("* Ending bulk revocation");
410 }
411
412 BiasedLocking::Condition status_code = BiasedLocking::BIAS_REVOKED;
413
414 if (attempt_rebias_of_object &&
415 o->mark()->has_bias_pattern() &&
416 klass->prototype_header()->has_bias_pattern()) {
417 markOop new_mark = markOopDesc::encode(requesting_thread, o->mark()->age(),
418 klass->prototype_header()->bias_epoch());
419 o->set_mark(new_mark);
420 status_code = BiasedLocking::BIAS_REVOKED_AND_REBIASED;
421 if (TraceBiasedLocking) {
422 tty->print_cr(" Rebiased object toward thread " INTPTR_FORMAT, (intptr_t) requesting_thread);
423 }
424 }
425
428 "bug in bulk bias revocation");
429
430 return status_code;
431 }
432
433
434 static void clean_up_cached_monitor_info() {
435 // Walk the thread list clearing out the cached monitors
436 for (JavaThread* thr = Threads::first(); thr != NULL; thr = thr->next()) {
437 thr->set_cached_monitor_info(NULL);
438 }
439 }
440
441
442 class VM_RevokeBias : public VM_Operation {
443 protected:
444 Handle* _obj;
445 GrowableArray<Handle>* _objs;
446 JavaThread* _requesting_thread;
447 BiasedLocking::Condition _status_code;
448
449 public:
450 VM_RevokeBias(Handle* obj, JavaThread* requesting_thread)
451 : _obj(obj)
452 , _objs(NULL)
453 , _requesting_thread(requesting_thread)
454 , _status_code(BiasedLocking::NOT_BIASED) {}
455
456 VM_RevokeBias(GrowableArray<Handle>* objs, JavaThread* requesting_thread)
457 : _obj(NULL)
458 , _objs(objs)
459 , _requesting_thread(requesting_thread)
460 , _status_code(BiasedLocking::NOT_BIASED) {}
461
462 virtual VMOp_Type type() const { return VMOp_RevokeBias; }
463
464 virtual bool doit_prologue() {
465 // Verify that there is actual work to do since the callers just
466 // give us locked object(s). If we don't find any biased objects
467 // there is nothing to do and we avoid a safepoint.
468 if (_obj != NULL) {
469 markOop mark = (*_obj)()->mark();
470 if (mark->has_bias_pattern()) {
471 return true;
472 }
473 } else {
474 for ( int i = 0 ; i < _objs->length(); i++ ) {
475 markOop mark = (_objs->at(i))()->mark();
476 if (mark->has_bias_pattern()) {
477 return true;
478 }
479 }
480 }
481 return false;
482 }
483
484 virtual void doit() {
485 if (_obj != NULL) {
486 if (TraceBiasedLocking) {
487 tty->print_cr("Revoking bias with potentially per-thread safepoint:");
488 }
489 _status_code = revoke_bias((*_obj)(), false, false, _requesting_thread);
490 clean_up_cached_monitor_info();
491 return;
492 } else {
493 if (TraceBiasedLocking) {
494 tty->print_cr("Revoking bias with global safepoint:");
495 }
496 BiasedLocking::revoke_at_safepoint(_objs);
497 }
498 }
499
500 BiasedLocking::Condition status_code() const {
501 return _status_code;
502 }
503 };
504
505
506 class VM_BulkRevokeBias : public VM_RevokeBias {
507 private:
508 bool _bulk_rebias;
509 bool _attempt_rebias_of_object;
510
511 public:
512 VM_BulkRevokeBias(Handle* obj, JavaThread* requesting_thread,
513 bool bulk_rebias,
514 bool attempt_rebias_of_object)
515 : VM_RevokeBias(obj, requesting_thread)
516 , _bulk_rebias(bulk_rebias)
517 , _attempt_rebias_of_object(attempt_rebias_of_object) {}
518
519 virtual VMOp_Type type() const { return VMOp_BulkRevokeBias; }
520 virtual bool doit_prologue() { return true; }
521
522 virtual void doit() {
592 if (heuristics == HR_NOT_BIASED) {
593 return NOT_BIASED;
594 } else if (heuristics == HR_SINGLE_REVOKE) {
595 Klass *k = obj->klass();
596 markOop prototype_header = k->prototype_header();
597 if (mark->biased_locker() == THREAD &&
598 prototype_header->bias_epoch() == mark->bias_epoch()) {
599 // A thread is trying to revoke the bias of an object biased
600 // toward it, again likely due to an identity hash code
601 // computation. We can again avoid a safepoint in this case
602 // since we are only going to walk our own stack. There are no
603 // races with revocations occurring in other threads because we
604 // reach no safepoints in the revocation path.
605 // Also check the epoch because even if threads match, another thread
606 // can come in with a CAS to steal the bias of an object that has a
607 // stale epoch.
608 ResourceMark rm;
609 if (TraceBiasedLocking) {
610 tty->print_cr("Revoking bias by walking my own stack:");
611 }
612 BiasedLocking::Condition cond = revoke_bias(obj(), false, false, (JavaThread*) THREAD);
613 ((JavaThread*) THREAD)->set_cached_monitor_info(NULL);
614 assert(cond == BIAS_REVOKED, "why not?");
615 return cond;
616 } else {
617 VM_RevokeBias revoke(&obj, (JavaThread*) THREAD);
618 VMThread::execute(&revoke);
619 return revoke.status_code();
620 }
621 }
622
623 assert((heuristics == HR_BULK_REVOKE) ||
624 (heuristics == HR_BULK_REBIAS), "?");
625 VM_BulkRevokeBias bulk_revoke(&obj, (JavaThread*) THREAD,
626 (heuristics == HR_BULK_REBIAS),
627 attempt_rebias);
628 VMThread::execute(&bulk_revoke);
629 return bulk_revoke.status_code();
630 }
631
632
633 void BiasedLocking::revoke(GrowableArray<Handle>* objs) {
634 assert(!SafepointSynchronize::is_at_safepoint(), "must not be called while at safepoint");
635 if (objs->length() == 0) {
636 return;
637 }
638 VM_RevokeBias revoke(objs, JavaThread::current());
639 VMThread::execute(&revoke);
640 }
641
642
643 void BiasedLocking::revoke_at_safepoint(Handle h_obj) {
644 assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint");
645 oop obj = h_obj();
646 HeuristicsResult heuristics = update_heuristics(obj, false);
647 if (heuristics == HR_SINGLE_REVOKE) {
648 revoke_bias(obj, false, false, NULL);
649 } else if ((heuristics == HR_BULK_REBIAS) ||
650 (heuristics == HR_BULK_REVOKE)) {
651 bulk_revoke_or_rebias_at_safepoint(obj, (heuristics == HR_BULK_REBIAS), false, NULL);
652 }
653 clean_up_cached_monitor_info();
654 }
655
656
657 void BiasedLocking::revoke_at_safepoint(GrowableArray<Handle>* objs) {
658 assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint");
659 int len = objs->length();
660 for (int i = 0; i < len; i++) {
661 oop obj = (objs->at(i))();
662 HeuristicsResult heuristics = update_heuristics(obj, false);
663 if (heuristics == HR_SINGLE_REVOKE) {
664 revoke_bias(obj, false, false, NULL);
665 } else if ((heuristics == HR_BULK_REBIAS) ||
666 (heuristics == HR_BULK_REVOKE)) {
667 bulk_revoke_or_rebias_at_safepoint(obj, (heuristics == HR_BULK_REBIAS), false, NULL);
668 }
669 }
670 clean_up_cached_monitor_info();
671 }
672
673
674 void BiasedLocking::preserve_marks() {
675 if (!UseBiasedLocking)
676 return;
677
678 assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint");
679
680 assert(_preserved_oop_stack == NULL, "double initialization");
681 assert(_preserved_mark_stack == NULL, "double initialization");
682
683 // In order to reduce the number of mark words preserved during GC
684 // due to the presence of biased locking, we reinitialize most mark
|
1 /*
2 * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "oops/klass.inline.hpp"
27 #include "oops/markOop.hpp"
28 #include "runtime/basicLock.hpp"
29 #include "runtime/biasedLocking.hpp"
30 #include "runtime/task.hpp"
31 #include "runtime/vframe.hpp"
32 #include "runtime/vmThread.hpp"
33 #include "runtime/vm_operations.hpp"
34 #include "trace/tracing.hpp"
35
36 static bool _biased_locking_enabled = false;
37 BiasedLockingCounters BiasedLocking::_counters;
38
39 static GrowableArray<Handle>* _preserved_oop_stack = NULL;
40 static GrowableArray<markOop>* _preserved_mark_stack = NULL;
41
42 static void enable_biased_locking(Klass* k) {
43 k->set_prototype_header(markOopDesc::biased_locking_prototype());
44 }
45
46 class VM_EnableBiasedLocking: public VM_Operation {
47 private:
48 bool _is_cheap_allocated;
49 public:
50 VM_EnableBiasedLocking(bool is_cheap_allocated) { _is_cheap_allocated = is_cheap_allocated; }
51 VMOp_Type type() const { return VMOp_EnableBiasedLocking; }
52 Mode evaluation_mode() const { return _is_cheap_allocated ? _async_safepoint : _safepoint; }
53 bool is_cheap_allocated() const { return _is_cheap_allocated; }
54
127 if (monitors != NULL) {
128 int len = monitors->length();
129 // Walk monitors youngest to oldest
130 for (int i = len - 1; i >= 0; i--) {
131 MonitorInfo* mon_info = monitors->at(i);
132 if (mon_info->eliminated()) continue;
133 oop owner = mon_info->owner();
134 if (owner != NULL) {
135 info->append(mon_info);
136 }
137 }
138 }
139 }
140 }
141
142 thread->set_cached_monitor_info(info);
143 return info;
144 }
145
146
147 static BiasedLocking::Condition revoke_bias(oop obj, bool allow_rebias, bool is_bulk, JavaThread* requesting_thread, JavaThread** biased_locker) {
148 markOop mark = obj->mark();
149 if (!mark->has_bias_pattern()) {
150 if (TraceBiasedLocking) {
151 ResourceMark rm;
152 tty->print_cr(" (Skipping revocation of object of type %s because it's no longer biased)",
153 obj->klass()->external_name());
154 }
155 return BiasedLocking::NOT_BIASED;
156 }
157
158 uint age = mark->age();
159 markOop biased_prototype = markOopDesc::biased_locking_prototype()->set_age(age);
160 markOop unbiased_prototype = markOopDesc::prototype()->set_age(age);
161
162 if (TraceBiasedLocking && (Verbose || !is_bulk)) {
163 ResourceMark rm;
164 tty->print_cr("Revoking bias of object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s , prototype header " INTPTR_FORMAT " , allow rebias %d , requesting thread " INTPTR_FORMAT,
165 p2i((void *)obj), (intptr_t) mark, obj->klass()->external_name(), (intptr_t) obj->klass()->prototype_header(), (allow_rebias ? 1 : 0), (intptr_t) requesting_thread);
166 }
167
237 // Reset object header to point to displaced mark.
238 // Must release storing the lock address for platforms without TSO
239 // ordering (e.g. ppc).
240 obj->release_set_mark(markOopDesc::encode(highest_lock));
241 assert(!obj->mark()->has_bias_pattern(), "illegal mark state: stack lock used bias bit");
242 if (TraceBiasedLocking && (Verbose || !is_bulk)) {
243 tty->print_cr(" Revoked bias of currently-locked object");
244 }
245 } else {
246 if (TraceBiasedLocking && (Verbose || !is_bulk)) {
247 tty->print_cr(" Revoked bias of currently-unlocked object");
248 }
249 if (allow_rebias) {
250 obj->set_mark(biased_prototype);
251 } else {
252 // Store the unlocked value into the object's header.
253 obj->set_mark(unbiased_prototype);
254 }
255 }
256
257 // If requested, return information on which thread held the bias
258 if (EnableJFR && biased_locker != NULL) {
259 *biased_locker = biased_thread;
260 }
261
262 return BiasedLocking::BIAS_REVOKED;
263 }
264
265
266 enum HeuristicsResult {
267 HR_NOT_BIASED = 1,
268 HR_SINGLE_REVOKE = 2,
269 HR_BULK_REBIAS = 3,
270 HR_BULK_REVOKE = 4
271 };
272
273
274 static HeuristicsResult update_heuristics(oop o, bool allow_rebias) {
275 markOop mark = o->mark();
276 if (!mark->has_bias_pattern()) {
277 return HR_NOT_BIASED;
278 }
279
280 // Heuristics to attempt to throttle the number of revocations.
281 // Stages:
362
363 // Now walk all threads' stacks and adjust epochs of any biased
364 // and locked objects of this data type we encounter
365 for (JavaThread* thr = Threads::first(); thr != NULL; thr = thr->next()) {
366 GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr);
367 for (int i = 0; i < cached_monitor_info->length(); i++) {
368 MonitorInfo* mon_info = cached_monitor_info->at(i);
369 oop owner = mon_info->owner();
370 markOop mark = owner->mark();
371 if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
372 // We might have encountered this object already in the case of recursive locking
373 assert(mark->bias_epoch() == prev_epoch || mark->bias_epoch() == cur_epoch, "error in bias epoch adjustment");
374 owner->set_mark(mark->set_bias_epoch(cur_epoch));
375 }
376 }
377 }
378 }
379
380 // At this point we're done. All we have to do is potentially
381 // adjust the header of the given object to revoke its bias.
382 revoke_bias(o, attempt_rebias_of_object && klass->prototype_header()->has_bias_pattern(), true, requesting_thread, NULL);
383 } else {
384 if (TraceBiasedLocking) {
385 ResourceMark rm;
386 tty->print_cr("* Disabling biased locking for type %s", klass->external_name());
387 }
388
389 // Disable biased locking for this data type. Not only will this
390 // cause future instances to not be biased, but existing biased
391 // instances will notice that this implicitly caused their biases
392 // to be revoked.
393 klass->set_prototype_header(markOopDesc::prototype());
394
395 // Now walk all threads' stacks and forcibly revoke the biases of
396 // any locked and biased objects of this data type we encounter.
397 for (JavaThread* thr = Threads::first(); thr != NULL; thr = thr->next()) {
398 GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr);
399 for (int i = 0; i < cached_monitor_info->length(); i++) {
400 MonitorInfo* mon_info = cached_monitor_info->at(i);
401 oop owner = mon_info->owner();
402 markOop mark = owner->mark();
403 if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
404 revoke_bias(owner, false, true, requesting_thread, NULL);
405 }
406 }
407 }
408
409 // Must force the bias of the passed object to be forcibly revoked
410 // as well to ensure guarantees to callers
411 revoke_bias(o, false, true, requesting_thread, NULL);
412 }
413
414 if (TraceBiasedLocking) {
415 tty->print_cr("* Ending bulk revocation");
416 }
417
418 BiasedLocking::Condition status_code = BiasedLocking::BIAS_REVOKED;
419
420 if (attempt_rebias_of_object &&
421 o->mark()->has_bias_pattern() &&
422 klass->prototype_header()->has_bias_pattern()) {
423 markOop new_mark = markOopDesc::encode(requesting_thread, o->mark()->age(),
424 klass->prototype_header()->bias_epoch());
425 o->set_mark(new_mark);
426 status_code = BiasedLocking::BIAS_REVOKED_AND_REBIASED;
427 if (TraceBiasedLocking) {
428 tty->print_cr(" Rebiased object toward thread " INTPTR_FORMAT, (intptr_t) requesting_thread);
429 }
430 }
431
434 "bug in bulk bias revocation");
435
436 return status_code;
437 }
438
439
440 static void clean_up_cached_monitor_info() {
441 // Walk the thread list clearing out the cached monitors
442 for (JavaThread* thr = Threads::first(); thr != NULL; thr = thr->next()) {
443 thr->set_cached_monitor_info(NULL);
444 }
445 }
446
447
448 class VM_RevokeBias : public VM_Operation {
449 protected:
450 Handle* _obj;
451 GrowableArray<Handle>* _objs;
452 JavaThread* _requesting_thread;
453 BiasedLocking::Condition _status_code;
454 traceid _biased_locker_id;
455
456 public:
457 VM_RevokeBias(Handle* obj, JavaThread* requesting_thread)
458 : _obj(obj)
459 , _objs(NULL)
460 , _requesting_thread(requesting_thread)
461 , _status_code(BiasedLocking::NOT_BIASED)
462 , _biased_locker_id(0) {}
463
464 VM_RevokeBias(GrowableArray<Handle>* objs, JavaThread* requesting_thread)
465 : _obj(NULL)
466 , _objs(objs)
467 , _requesting_thread(requesting_thread)
468 , _status_code(BiasedLocking::NOT_BIASED)
469 , _biased_locker_id(0) {}
470
471 virtual VMOp_Type type() const { return VMOp_RevokeBias; }
472
473 virtual bool doit_prologue() {
474 // Verify that there is actual work to do since the callers just
475 // give us locked object(s). If we don't find any biased objects
476 // there is nothing to do and we avoid a safepoint.
477 if (_obj != NULL) {
478 markOop mark = (*_obj)()->mark();
479 if (mark->has_bias_pattern()) {
480 return true;
481 }
482 } else {
483 for ( int i = 0 ; i < _objs->length(); i++ ) {
484 markOop mark = (_objs->at(i))()->mark();
485 if (mark->has_bias_pattern()) {
486 return true;
487 }
488 }
489 }
490 return false;
491 }
492
493 virtual void doit() {
494 if (_obj != NULL) {
495 if (TraceBiasedLocking) {
496 tty->print_cr("Revoking bias with potentially per-thread safepoint:");
497 }
498 JavaThread* biased_locker = NULL;
499 _status_code = revoke_bias((*_obj)(), false, false, _requesting_thread, &biased_locker);
500 if (biased_locker != NULL) {
501 _biased_locker_id = THREAD_TRACE_ID(biased_locker);
502 }
503 clean_up_cached_monitor_info();
504 return;
505 } else {
506 if (TraceBiasedLocking) {
507 tty->print_cr("Revoking bias with global safepoint:");
508 }
509 BiasedLocking::revoke_at_safepoint(_objs);
510 }
511 }
512
513 BiasedLocking::Condition status_code() const {
514 return _status_code;
515 }
516
517 traceid biased_locker() const {
518 return _biased_locker_id;
519 }
520 };
521
522
523 class VM_BulkRevokeBias : public VM_RevokeBias {
524 private:
525 bool _bulk_rebias;
526 bool _attempt_rebias_of_object;
527
528 public:
529 VM_BulkRevokeBias(Handle* obj, JavaThread* requesting_thread,
530 bool bulk_rebias,
531 bool attempt_rebias_of_object)
532 : VM_RevokeBias(obj, requesting_thread)
533 , _bulk_rebias(bulk_rebias)
534 , _attempt_rebias_of_object(attempt_rebias_of_object) {}
535
536 virtual VMOp_Type type() const { return VMOp_BulkRevokeBias; }
537 virtual bool doit_prologue() { return true; }
538
539 virtual void doit() {
609 if (heuristics == HR_NOT_BIASED) {
610 return NOT_BIASED;
611 } else if (heuristics == HR_SINGLE_REVOKE) {
612 Klass *k = obj->klass();
613 markOop prototype_header = k->prototype_header();
614 if (mark->biased_locker() == THREAD &&
615 prototype_header->bias_epoch() == mark->bias_epoch()) {
616 // A thread is trying to revoke the bias of an object biased
617 // toward it, again likely due to an identity hash code
618 // computation. We can again avoid a safepoint in this case
619 // since we are only going to walk our own stack. There are no
620 // races with revocations occurring in other threads because we
621 // reach no safepoints in the revocation path.
622 // Also check the epoch because even if threads match, another thread
623 // can come in with a CAS to steal the bias of an object that has a
624 // stale epoch.
625 ResourceMark rm;
626 if (TraceBiasedLocking) {
627 tty->print_cr("Revoking bias by walking my own stack:");
628 }
629 EventBiasedLockSelfRevocation event;
630 BiasedLocking::Condition cond = revoke_bias(obj(), false, false, (JavaThread*) THREAD, NULL);
631 ((JavaThread*) THREAD)->set_cached_monitor_info(NULL);
632 assert(cond == BIAS_REVOKED, "why not?");
633 if (event.should_commit()) {
634 event.set_lockClass(k);
635 event.commit();
636 }
637 return cond;
638 } else {
639 EventBiasedLockRevocation event;
640 VM_RevokeBias revoke(&obj, (JavaThread*) THREAD);
641 VMThread::execute(&revoke);
642 if (event.should_commit() && (revoke.status_code() != NOT_BIASED)) {
643 event.set_lockClass(k);
644 // Subtract 1 to match the id of events committed inside the safepoint
645 event.set_safepointId(SafepointSynchronize::safepoint_counter() - 1);
646 event.set_previousOwner(revoke.biased_locker());
647 event.commit();
648 }
649 return revoke.status_code();
650 }
651 }
652
653 assert((heuristics == HR_BULK_REVOKE) ||
654 (heuristics == HR_BULK_REBIAS), "?");
655 EventBiasedLockClassRevocation event;
656 VM_BulkRevokeBias bulk_revoke(&obj, (JavaThread*) THREAD,
657 (heuristics == HR_BULK_REBIAS),
658 attempt_rebias);
659 VMThread::execute(&bulk_revoke);
660 if (event.should_commit()) {
661 event.set_revokedClass(obj->klass());
662 event.set_disableBiasing((heuristics != HR_BULK_REBIAS));
663 // Subtract 1 to match the id of events committed inside the safepoint
664 event.set_safepointId(SafepointSynchronize::safepoint_counter() - 1);
665 event.commit();
666 }
667 return bulk_revoke.status_code();
668 }
669
670
671 void BiasedLocking::revoke(GrowableArray<Handle>* objs) {
672 assert(!SafepointSynchronize::is_at_safepoint(), "must not be called while at safepoint");
673 if (objs->length() == 0) {
674 return;
675 }
676 VM_RevokeBias revoke(objs, JavaThread::current());
677 VMThread::execute(&revoke);
678 }
679
680
681 void BiasedLocking::revoke_at_safepoint(Handle h_obj) {
682 assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint");
683 oop obj = h_obj();
684 HeuristicsResult heuristics = update_heuristics(obj, false);
685 if (heuristics == HR_SINGLE_REVOKE) {
686 revoke_bias(obj, false, false, NULL, NULL);
687 } else if ((heuristics == HR_BULK_REBIAS) ||
688 (heuristics == HR_BULK_REVOKE)) {
689 bulk_revoke_or_rebias_at_safepoint(obj, (heuristics == HR_BULK_REBIAS), false, NULL);
690 }
691 clean_up_cached_monitor_info();
692 }
693
694
695 void BiasedLocking::revoke_at_safepoint(GrowableArray<Handle>* objs) {
696 assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint");
697 int len = objs->length();
698 for (int i = 0; i < len; i++) {
699 oop obj = (objs->at(i))();
700 HeuristicsResult heuristics = update_heuristics(obj, false);
701 if (heuristics == HR_SINGLE_REVOKE) {
702 revoke_bias(obj, false, false, NULL, NULL);
703 } else if ((heuristics == HR_BULK_REBIAS) ||
704 (heuristics == HR_BULK_REVOKE)) {
705 bulk_revoke_or_rebias_at_safepoint(obj, (heuristics == HR_BULK_REBIAS), false, NULL);
706 }
707 }
708 clean_up_cached_monitor_info();
709 }
710
711
712 void BiasedLocking::preserve_marks() {
713 if (!UseBiasedLocking)
714 return;
715
716 assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint");
717
718 assert(_preserved_oop_stack == NULL, "double initialization");
719 assert(_preserved_mark_stack == NULL, "double initialization");
720
721 // In order to reduce the number of mark words preserved during GC
722 // due to the presence of biased locking, we reinitialize most mark
|