< prev index next >

src/share/vm/runtime/biasedLocking.cpp

Print this page

        

*** 29,40 **** --- 29,42 ---- #include "runtime/biasedLocking.hpp" #include "runtime/task.hpp" #include "runtime/vframe.hpp" #include "runtime/vmThread.hpp" #include "runtime/vm_operations.hpp" + #if INCLUDE_JFR #include "jfr/support/jfrThreadId.hpp" #include "jfr/jfrEvents.hpp" + #endif static bool _biased_locking_enabled = false; BiasedLockingCounters BiasedLocking::_counters; static GrowableArray<Handle>* _preserved_oop_stack = NULL;
*** 451,476 **** protected: Handle* _obj; GrowableArray<Handle>* _objs; JavaThread* _requesting_thread; BiasedLocking::Condition _status_code; ! traceid _biased_locker_id; public: VM_RevokeBias(Handle* obj, JavaThread* requesting_thread) : _obj(obj) , _objs(NULL) , _requesting_thread(requesting_thread) , _status_code(BiasedLocking::NOT_BIASED) ! , _biased_locker_id(0) {} VM_RevokeBias(GrowableArray<Handle>* objs, JavaThread* requesting_thread) : _obj(NULL) , _objs(objs) , _requesting_thread(requesting_thread) , _status_code(BiasedLocking::NOT_BIASED) ! , _biased_locker_id(0) {} virtual VMOp_Type type() const { return VMOp_RevokeBias; } virtual bool doit_prologue() { // Verify that there is actual work to do since the callers just --- 453,484 ---- protected: Handle* _obj; GrowableArray<Handle>* _objs; JavaThread* _requesting_thread; BiasedLocking::Condition _status_code; ! JFR_ONLY(traceid _biased_locker_id;) public: VM_RevokeBias(Handle* obj, JavaThread* requesting_thread) : _obj(obj) , _objs(NULL) , _requesting_thread(requesting_thread) , _status_code(BiasedLocking::NOT_BIASED) ! #if INCLUDE_JFR ! , _biased_locker_id(0) ! #endif ! {} VM_RevokeBias(GrowableArray<Handle>* objs, JavaThread* requesting_thread) : _obj(NULL) , _objs(objs) , _requesting_thread(requesting_thread) , _status_code(BiasedLocking::NOT_BIASED) ! #if INCLUDE_JFR ! , _biased_locker_id(0) ! #endif ! {} virtual VMOp_Type type() const { return VMOp_RevokeBias; } virtual bool doit_prologue() { // Verify that there is actual work to do since the callers just
*** 497,509 **** --- 505,519 ---- if (TraceBiasedLocking) { tty->print_cr("Revoking bias with potentially per-thread safepoint:"); } JavaThread* biased_locker = NULL; _status_code = revoke_bias((*_obj)(), false, false, _requesting_thread, &biased_locker); + #if INCLUDE_JFR if (biased_locker != NULL) { _biased_locker_id = JFR_THREAD_ID(biased_locker); } + #endif clean_up_cached_monitor_info(); return; } else { if (TraceBiasedLocking) { tty->print_cr("Revoking bias with global safepoint:");
*** 514,526 **** --- 524,538 ---- BiasedLocking::Condition status_code() const { return _status_code; } + #if INCLUDE_JFR traceid biased_locker() const { return _biased_locker_id; } + #endif }; class VM_BulkRevokeBias : public VM_RevokeBias { private:
*** 626,673 **** --- 638,697 ---- // stale epoch. ResourceMark rm; if (TraceBiasedLocking) { tty->print_cr("Revoking bias by walking my own stack:"); } + #if INCLUDE_JFR EventBiasedLockSelfRevocation event; + #endif BiasedLocking::Condition cond = revoke_bias(obj(), false, false, (JavaThread*) THREAD, NULL); ((JavaThread*) THREAD)->set_cached_monitor_info(NULL); assert(cond == BIAS_REVOKED, "why not?"); + #if INCLUDE_JFR if (event.should_commit()) { event.set_lockClass(k); event.commit(); } + #endif return cond; } else { + #if INCLUDE_JFR EventBiasedLockRevocation event; + #endif VM_RevokeBias revoke(&obj, (JavaThread*) THREAD); VMThread::execute(&revoke); + #if INCLUDE_JFR if (event.should_commit() && (revoke.status_code() != NOT_BIASED)) { event.set_lockClass(k); // Subtract 1 to match the id of events committed inside the safepoint event.set_safepointId(SafepointSynchronize::safepoint_counter() - 1); event.set_previousOwner(revoke.biased_locker()); event.commit(); } + #endif return revoke.status_code(); } } assert((heuristics == HR_BULK_REVOKE) || (heuristics == HR_BULK_REBIAS), "?"); + #if INCLUDE_JFR EventBiasedLockClassRevocation event; + #endif VM_BulkRevokeBias bulk_revoke(&obj, (JavaThread*) THREAD, (heuristics == HR_BULK_REBIAS), attempt_rebias); VMThread::execute(&bulk_revoke); + #if INCLUDE_JFR if (event.should_commit()) { event.set_revokedClass(obj->klass()); event.set_disableBiasing((heuristics != HR_BULK_REBIAS)); // Subtract 1 to match the id of events committed inside the safepoint event.set_safepointId(SafepointSynchronize::safepoint_counter() - 1); event.commit(); } + #endif return bulk_revoke.status_code(); } void BiasedLocking::revoke(GrowableArray<Handle>* objs) {
< prev index next >