< prev index next >

src/share/vm/runtime/biasedLocking.cpp

Print this page

        

*** 144,153 **** --- 144,154 ---- return info; } static BiasedLocking::Condition revoke_bias(oop obj, bool allow_rebias, bool is_bulk, JavaThread* requesting_thread) { + assert(obj == oopDesc::bs()->read_barrier(obj), "expect to-space copy"); markOop mark = obj->mark(); if (!mark->has_bias_pattern()) { if (TraceBiasedLocking) { ResourceMark rm; tty->print_cr(" (Skipping revocation of object of type %s because it's no longer biased)",
*** 323,333 **** static BiasedLocking::Condition bulk_revoke_or_rebias_at_safepoint(oop o, bool bulk_rebias, bool attempt_rebias_of_object, JavaThread* requesting_thread) { assert(SafepointSynchronize::is_at_safepoint(), "must be done at safepoint"); ! if (TraceBiasedLocking) { tty->print_cr("* Beginning bulk revocation (kind == %s) because of object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s", (bulk_rebias ? "rebias" : "revoke"), p2i((void *) o), (intptr_t) o->mark(), o->klass()->external_name()); --- 324,334 ---- static BiasedLocking::Condition bulk_revoke_or_rebias_at_safepoint(oop o, bool bulk_rebias, bool attempt_rebias_of_object, JavaThread* requesting_thread) { assert(SafepointSynchronize::is_at_safepoint(), "must be done at safepoint"); ! assert(o == oopDesc::bs()->read_barrier(o), "expect to-space copy"); if (TraceBiasedLocking) { tty->print_cr("* Beginning bulk revocation (kind == %s) because of object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s", (bulk_rebias ? "rebias" : "revoke"), p2i((void *) o), (intptr_t) o->mark(), o->klass()->external_name());
*** 365,374 **** --- 366,376 ---- oop owner = mon_info->owner(); markOop mark = owner->mark(); if ((owner->klass() == k_o) && mark->has_bias_pattern()) { // We might have encountered this object already in the case of recursive locking assert(mark->bias_epoch() == prev_epoch || mark->bias_epoch() == cur_epoch, "error in bias epoch adjustment"); + assert(owner == oopDesc::bs()->read_barrier(owner), "expect to-space copy"); owner->set_mark(mark->set_bias_epoch(cur_epoch)); } } } }
*** 529,538 **** --- 531,542 ---- BiasedLocking::Condition BiasedLocking::revoke_and_rebias(Handle obj, bool attempt_rebias, TRAPS) { assert(!SafepointSynchronize::is_at_safepoint(), "must not be called while at safepoint"); + assert(obj() == oopDesc::bs()->read_barrier(obj()), "must be to-space copy"); + // We can revoke the biases of anonymously-biased objects // efficiently enough that we should not cause these revocations to // update the heuristics because doing so may cause unwanted bulk // revocations (which are expensive) to occur. markOop mark = obj->mark();
*** 643,652 **** --- 647,657 ---- void BiasedLocking::revoke_at_safepoint(Handle h_obj) { assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint"); oop obj = h_obj(); + assert(obj == oopDesc::bs()->read_barrier(obj), "expect to-space copy"); HeuristicsResult heuristics = update_heuristics(obj, false); if (heuristics == HR_SINGLE_REVOKE) { revoke_bias(obj, false, false, NULL); } else if ((heuristics == HR_BULK_REBIAS) || (heuristics == HR_BULK_REVOKE)) {
*** 706,715 **** --- 711,721 ---- // Walk monitors youngest to oldest for (int i = len - 1; i >= 0; i--) { MonitorInfo* mon_info = monitors->at(i); if (mon_info->owner_is_scalar_replaced()) continue; oop owner = mon_info->owner(); + assert(owner == oopDesc::bs()->read_barrier(owner), "expect to-space copy"); if (owner != NULL) { markOop mark = owner->mark(); if (mark->has_bias_pattern()) { _preserved_oop_stack->push(Handle(cur, owner)); _preserved_mark_stack->push(mark);
< prev index next >