< prev index next >
src/share/vm/runtime/synchronizer.cpp
Print this page
rev 13259 : 8184181: Use oopDesc::cas_set_mark() instead of raw CAS when accessing oop header
Reviewed-by: dcubed, kbarret
*** 321,331 ****
if (mark == (markOop) lock) {
// If the object is stack-locked by the current thread, try to
// swing the displaced header from the BasicLock back to the mark.
assert(dhw->is_neutral(), "invariant");
! if ((markOop) Atomic::cmpxchg_ptr(dhw, object->mark_addr(), mark) == mark) {
TEVENT(fast_exit: release stack-lock);
return;
}
}
--- 321,331 ----
if (mark == (markOop) lock) {
// If the object is stack-locked by the current thread, try to
// swing the displaced header from the BasicLock back to the mark.
assert(dhw->is_neutral(), "invariant");
! if (object->cas_set_mark(dhw, mark) == mark) {
TEVENT(fast_exit: release stack-lock);
return;
}
}
*** 346,356 ****
if (mark->is_neutral()) {
// Anticipate successful CAS -- the ST of the displaced mark must
// be visible <= the ST performed by the CAS.
lock->set_displaced_header(mark);
! if (mark == (markOop) Atomic::cmpxchg_ptr(lock, obj()->mark_addr(), mark)) {
TEVENT(slow_enter: release stacklock);
return;
}
// Fall through to inflate() ...
} else if (mark->has_locker() &&
--- 346,356 ----
if (mark->is_neutral()) {
// Anticipate successful CAS -- the ST of the displaced mark must
// be visible <= the ST performed by the CAS.
lock->set_displaced_header(mark);
! if (mark == obj()->cas_set_mark((markOop) lock, mark)) {
TEVENT(slow_enter: release stacklock);
return;
}
// Fall through to inflate() ...
} else if (mark->has_locker() &&
*** 756,766 ****
return hash;
}
hash = get_next_hash(Self, obj); // allocate a new hash code
temp = mark->copy_set_hash(hash); // merge the hash code into header
// use (machine word version) atomic operation to install the hash
! test = (markOop) Atomic::cmpxchg_ptr(temp, obj->mark_addr(), mark);
if (test == mark) {
return hash;
}
// If atomic operation failed, we must inflate the header
// into heavy weight monitor. We could add more code here
--- 756,766 ----
return hash;
}
hash = get_next_hash(Self, obj); // allocate a new hash code
temp = mark->copy_set_hash(hash); // merge the hash code into header
// use (machine word version) atomic operation to install the hash
! test = obj->cas_set_mark(temp, mark);
if (test == mark) {
return hash;
}
// If atomic operation failed, we must inflate the header
// into heavy weight monitor. We could add more code here
*** 1450,1460 ****
m->Recycle();
m->_Responsible = NULL;
m->_recursions = 0;
m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // Consider: maintain by type/class
! markOop cmp = (markOop) Atomic::cmpxchg_ptr(markOopDesc::INFLATING(), object->mark_addr(), mark);
if (cmp != mark) {
omRelease(Self, m, true);
continue; // Interference -- just retry
}
--- 1450,1460 ----
m->Recycle();
m->_Responsible = NULL;
m->_recursions = 0;
m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // Consider: maintain by type/class
! markOop cmp = object->cas_set_mark(markOopDesc::INFLATING(), mark);
if (cmp != mark) {
omRelease(Self, m, true);
continue; // Interference -- just retry
}
*** 1545,1555 ****
m->set_object(object);
m->_recursions = 0;
m->_Responsible = NULL;
m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // consider: keep metastats by type/class
! if (Atomic::cmpxchg_ptr (markOopDesc::encode(m), object->mark_addr(), mark) != mark) {
m->set_object(NULL);
m->set_owner(NULL);
m->Recycle();
omRelease(Self, m, true);
m = NULL;
--- 1545,1555 ----
m->set_object(object);
m->_recursions = 0;
m->_Responsible = NULL;
m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // consider: keep metastats by type/class
! if (object->cas_set_mark(markOopDesc::encode(m), mark) != mark) {
m->set_object(NULL);
m->set_owner(NULL);
m->Recycle();
omRelease(Self, m, true);
m = NULL;
< prev index next >