18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "logging/log.hpp"
27 #include "memory/resourceArea.hpp"
28 #include "oops/klass.inline.hpp"
29 #include "oops/markOop.hpp"
30 #include "oops/oop.inline.hpp"
31 #include "runtime/atomic.hpp"
32 #include "runtime/basicLock.hpp"
33 #include "runtime/biasedLocking.hpp"
34 #include "runtime/task.hpp"
35 #include "runtime/vframe.hpp"
36 #include "runtime/vmThread.hpp"
37 #include "runtime/vm_operations.hpp"
38
39 static bool _biased_locking_enabled = false;
40 BiasedLockingCounters BiasedLocking::_counters;
41
42 static GrowableArray<Handle>* _preserved_oop_stack = NULL;
43 static GrowableArray<markOop>* _preserved_mark_stack = NULL;
44
45 static void enable_biased_locking(InstanceKlass* k) {
46 k->set_prototype_header(markOopDesc::biased_locking_prototype());
47 }
48
49 class VM_EnableBiasedLocking: public VM_Operation {
50 private:
51 bool _is_cheap_allocated;
52 public:
53 VM_EnableBiasedLocking(bool is_cheap_allocated) { _is_cheap_allocated = is_cheap_allocated; }
54 VMOp_Type type() const { return VMOp_EnableBiasedLocking; }
55 Mode evaluation_mode() const { return _is_cheap_allocated ? _async_safepoint : _safepoint; }
56 bool is_cheap_allocated() const { return _is_cheap_allocated; }
57
631 Klass *k = obj->klass();
632 markOop prototype_header = k->prototype_header();
633 if (mark->biased_locker() == THREAD &&
634 prototype_header->bias_epoch() == mark->bias_epoch()) {
635 // A thread is trying to revoke the bias of an object biased
636 // toward it, again likely due to an identity hash code
637 // computation. We can again avoid a safepoint in this case
638 // since we are only going to walk our own stack. There are no
639 // races with revocations occurring in other threads because we
640 // reach no safepoints in the revocation path.
641 // Also check the epoch because even if threads match, another thread
642 // can come in with a CAS to steal the bias of an object that has a
643 // stale epoch.
644 ResourceMark rm;
645 log_info(biasedlocking)("Revoking bias by walking my own stack:");
646 BiasedLocking::Condition cond = revoke_bias(obj(), false, false, (JavaThread*) THREAD);
647 ((JavaThread*) THREAD)->set_cached_monitor_info(NULL);
648 assert(cond == BIAS_REVOKED, "why not?");
649 return cond;
650 } else {
651 VM_RevokeBias revoke(&obj, (JavaThread*) THREAD);
652 VMThread::execute(&revoke);
653 return revoke.status_code();
654 }
655 }
656
657 assert((heuristics == HR_BULK_REVOKE) ||
658 (heuristics == HR_BULK_REBIAS), "?");
659 VM_BulkRevokeBias bulk_revoke(&obj, (JavaThread*) THREAD,
660 (heuristics == HR_BULK_REBIAS),
661 attempt_rebias);
662 VMThread::execute(&bulk_revoke);
663 return bulk_revoke.status_code();
664 }
665
666
667 void BiasedLocking::revoke(GrowableArray<Handle>* objs) {
668 assert(!SafepointSynchronize::is_at_safepoint(), "must not be called while at safepoint");
669 if (objs->length() == 0) {
670 return;
671 }
672 VM_RevokeBias revoke(objs, JavaThread::current());
673 VMThread::execute(&revoke);
674 }
675
676
677 void BiasedLocking::revoke_at_safepoint(Handle h_obj) {
678 assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint");
679 oop obj = h_obj();
680 HeuristicsResult heuristics = update_heuristics(obj, false);
681 if (heuristics == HR_SINGLE_REVOKE) {
682 revoke_bias(obj, false, false, NULL);
|
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "logging/log.hpp"
27 #include "memory/resourceArea.hpp"
28 #include "oops/klass.inline.hpp"
29 #include "oops/markOop.hpp"
30 #include "oops/oop.inline.hpp"
31 #include "runtime/atomic.hpp"
32 #include "runtime/basicLock.hpp"
33 #include "runtime/biasedLocking.hpp"
34 #include "runtime/task.hpp"
35 #include "runtime/vframe.hpp"
36 #include "runtime/vmThread.hpp"
37 #include "runtime/vm_operations.hpp"
38 #include "trace/tracing.hpp"
39
40 static bool _biased_locking_enabled = false;
41 BiasedLockingCounters BiasedLocking::_counters;
42
43 static GrowableArray<Handle>* _preserved_oop_stack = NULL;
44 static GrowableArray<markOop>* _preserved_mark_stack = NULL;
45
46 static void enable_biased_locking(InstanceKlass* k) {
47 k->set_prototype_header(markOopDesc::biased_locking_prototype());
48 }
49
50 class VM_EnableBiasedLocking: public VM_Operation {
51 private:
52 bool _is_cheap_allocated;
53 public:
54 VM_EnableBiasedLocking(bool is_cheap_allocated) { _is_cheap_allocated = is_cheap_allocated; }
55 VMOp_Type type() const { return VMOp_EnableBiasedLocking; }
56 Mode evaluation_mode() const { return _is_cheap_allocated ? _async_safepoint : _safepoint; }
57 bool is_cheap_allocated() const { return _is_cheap_allocated; }
58
632 Klass *k = obj->klass();
633 markOop prototype_header = k->prototype_header();
634 if (mark->biased_locker() == THREAD &&
635 prototype_header->bias_epoch() == mark->bias_epoch()) {
636 // A thread is trying to revoke the bias of an object biased
637 // toward it, again likely due to an identity hash code
638 // computation. We can again avoid a safepoint in this case
639 // since we are only going to walk our own stack. There are no
640 // races with revocations occurring in other threads because we
641 // reach no safepoints in the revocation path.
642 // Also check the epoch because even if threads match, another thread
643 // can come in with a CAS to steal the bias of an object that has a
644 // stale epoch.
645 ResourceMark rm;
646 log_info(biasedlocking)("Revoking bias by walking my own stack:");
647 BiasedLocking::Condition cond = revoke_bias(obj(), false, false, (JavaThread*) THREAD);
648 ((JavaThread*) THREAD)->set_cached_monitor_info(NULL);
649 assert(cond == BIAS_REVOKED, "why not?");
650 return cond;
651 } else {
652 EventBiasedLockRevocation event;
653 VM_RevokeBias revoke(&obj, (JavaThread*) THREAD);
654 VMThread::execute(&revoke);
655 if (event.should_commit() && (revoke.status_code() != NOT_BIASED)) {
656 event.set_lockClass(k);
657 // Subtract 1 to match the id of events committed inside the safepoint
658 event.set_safepointId(SafepointSynchronize::safepoint_counter() - 1);
659 event.commit();
660 }
661 return revoke.status_code();
662 }
663 }
664
665 assert((heuristics == HR_BULK_REVOKE) ||
666 (heuristics == HR_BULK_REBIAS), "?");
667 EventBiasedLockClassRevocation event;
668 VM_BulkRevokeBias bulk_revoke(&obj, (JavaThread*) THREAD,
669 (heuristics == HR_BULK_REBIAS),
670 attempt_rebias);
671 VMThread::execute(&bulk_revoke);
672 if (event.should_commit()) {
673 event.set_revokedClass(obj->klass());
674 event.set_disableBiasing((heuristics != HR_BULK_REBIAS));
675 // Subtract 1 to match the id of events committed inside the safepoint
676 event.set_safepointId(SafepointSynchronize::safepoint_counter() - 1);
677 event.commit();
678 }
679 return bulk_revoke.status_code();
680 }
681
682
683 void BiasedLocking::revoke(GrowableArray<Handle>* objs) {
684 assert(!SafepointSynchronize::is_at_safepoint(), "must not be called while at safepoint");
685 if (objs->length() == 0) {
686 return;
687 }
688 VM_RevokeBias revoke(objs, JavaThread::current());
689 VMThread::execute(&revoke);
690 }
691
692
693 void BiasedLocking::revoke_at_safepoint(Handle h_obj) {
694 assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint");
695 oop obj = h_obj();
696 HeuristicsResult heuristics = update_heuristics(obj, false);
697 if (heuristics == HR_SINGLE_REVOKE) {
698 revoke_bias(obj, false, false, NULL);
|