--- old/src/share/vm/runtime/biasedLocking.cpp 2020-01-16 16:56:32.935387057 +0300 +++ new/src/share/vm/runtime/biasedLocking.cpp 2020-01-16 16:56:32.887388607 +0300 @@ -31,8 +31,10 @@ #include "runtime/vframe.hpp" #include "runtime/vmThread.hpp" #include "runtime/vm_operations.hpp" +#if INCLUDE_JFR #include "jfr/support/jfrThreadId.hpp" #include "jfr/jfrEvents.hpp" +#endif static bool _biased_locking_enabled = false; BiasedLockingCounters BiasedLocking::_counters; @@ -453,7 +455,7 @@ GrowableArray* _objs; JavaThread* _requesting_thread; BiasedLocking::Condition _status_code; - traceid _biased_locker_id; + JFR_ONLY(traceid _biased_locker_id;) public: VM_RevokeBias(Handle* obj, JavaThread* requesting_thread) @@ -461,14 +463,20 @@ , _objs(NULL) , _requesting_thread(requesting_thread) , _status_code(BiasedLocking::NOT_BIASED) - , _biased_locker_id(0) {} +#if INCLUDE_JFR + , _biased_locker_id(0) +#endif + {} VM_RevokeBias(GrowableArray* objs, JavaThread* requesting_thread) : _obj(NULL) , _objs(objs) , _requesting_thread(requesting_thread) , _status_code(BiasedLocking::NOT_BIASED) - , _biased_locker_id(0) {} +#if INCLUDE_JFR + , _biased_locker_id(0) +#endif + {} virtual VMOp_Type type() const { return VMOp_RevokeBias; } @@ -499,9 +507,11 @@ } JavaThread* biased_locker = NULL; _status_code = revoke_bias((*_obj)(), false, false, _requesting_thread, &biased_locker); +#if INCLUDE_JFR if (biased_locker != NULL) { _biased_locker_id = JFR_THREAD_ID(biased_locker); } +#endif clean_up_cached_monitor_info(); return; } else { @@ -516,9 +526,11 @@ return _status_code; } +#if INCLUDE_JFR traceid biased_locker() const { return _biased_locker_id; } +#endif }; @@ -628,19 +640,26 @@ if (TraceBiasedLocking) { tty->print_cr("Revoking bias by walking my own stack:"); } +#if INCLUDE_JFR EventBiasedLockSelfRevocation event; +#endif BiasedLocking::Condition cond = revoke_bias(obj(), false, false, (JavaThread*) THREAD, NULL); ((JavaThread*) THREAD)->set_cached_monitor_info(NULL); assert(cond == BIAS_REVOKED, "why not?"); +#if INCLUDE_JFR if (event.should_commit()) { event.set_lockClass(k); event.commit(); } +#endif return cond; } else { +#if INCLUDE_JFR EventBiasedLockRevocation event; +#endif VM_RevokeBias revoke(&obj, (JavaThread*) THREAD); VMThread::execute(&revoke); +#if INCLUDE_JFR if (event.should_commit() && (revoke.status_code() != NOT_BIASED)) { event.set_lockClass(k); // Subtract 1 to match the id of events committed inside the safepoint @@ -648,17 +667,21 @@ event.set_previousOwner(revoke.biased_locker()); event.commit(); } +#endif return revoke.status_code(); } } assert((heuristics == HR_BULK_REVOKE) || (heuristics == HR_BULK_REBIAS), "?"); +#if INCLUDE_JFR EventBiasedLockClassRevocation event; +#endif VM_BulkRevokeBias bulk_revoke(&obj, (JavaThread*) THREAD, (heuristics == HR_BULK_REBIAS), attempt_rebias); VMThread::execute(&bulk_revoke); +#if INCLUDE_JFR if (event.should_commit()) { event.set_revokedClass(obj->klass()); event.set_disableBiasing((heuristics != HR_BULK_REBIAS)); @@ -666,6 +689,7 @@ event.set_safepointId(SafepointSynchronize::safepoint_counter() - 1); event.commit(); } +#endif return bulk_revoke.status_code(); }