< prev index next > src/hotspot/share/gc/shared/c1/barrierSetC1.cpp
BarrierSetC1_v3
BarrierSetC1_v2
#ifndef PATCHED_ADDR
#define PATCHED_ADDR (max_jint)
#endif
#ifdef ASSERT
-#define __ lir_generator->lir(__FILE__, __LINE__)->
+#define __ gen->lir(__FILE__, __LINE__)->
#else
-#define __ lir_generator->lir()->
+#define __ gen->lir()->
#endif
-LIR_Opr BarrierSetC1::resolve_address(LIRGenerator *lir_generator, DecoratorSet decorators, BasicType type,
- LIRItem& base, LIR_Opr offset, bool resolve_in_register) {
+LIR_Opr BarrierSetC1::resolve_address(LIRAccess& access, bool resolve_in_register) {
+ DecoratorSet decorators = access.decorators();
bool on_array = (decorators & IN_HEAP_ARRAY) != 0;
bool needs_patching = (decorators & C1_NEEDS_PATCHING) != 0;
- LIR_Opr addr;
+
+ LIRItem& base = access.base().item();
+ LIR_Opr offset = access.offset().opr();
+ LIRGenerator *gen = access.gen();
+
+ LIR_Opr addr_opr;
if (on_array) {
- addr = LIR_OprFact::address(lir_generator->emit_array_address(base.result(), offset, type));
+ addr_opr = LIR_OprFact::address(gen->emit_array_address(base.result(), offset, access.type()));
} else if (needs_patching) {
// we need to patch the offset in the instruction so don't allow
// generate_address to try to be smart about emitting the -1.
// Otherwise the patching code won't know how to find the
// instruction to patch.
- addr = LIR_OprFact::address(new LIR_Address(base.result(), PATCHED_ADDR, type));
+ addr_opr = LIR_OprFact::address(new LIR_Address(base.result(), PATCHED_ADDR, access.type()));
} else {
- addr = LIR_OprFact::address(lir_generator->generate_address(base.result(), offset, 0, 0, type));
+ addr_opr = LIR_OprFact::address(gen->generate_address(base.result(), offset, 0, 0, access.type()));
}
if (resolve_in_register) {
- LIR_Opr resolved_addr = lir_generator->new_pointer_register();
- __ leal(addr, resolved_addr);
- resolved_addr = LIR_OprFact::address(new LIR_Address(resolved_addr, type));
+ LIR_Opr resolved_addr = gen->new_pointer_register();
+ __ leal(addr_opr, resolved_addr);
+ resolved_addr = LIR_OprFact::address(new LIR_Address(resolved_addr, access.type()));
return resolved_addr;
} else {
- return addr;
+ return addr_opr;
}
}
-void BarrierSetC1::store_at(LIRGenerator *lir_generator, DecoratorSet decorators, BasicType type,
- LIRItem& base, LIR_Opr offset, LIR_Opr value,
- CodeEmitInfo* patch_info, CodeEmitInfo* store_emit_info) {
+void BarrierSetC1::store_at(LIRAccess& access,LIR_Opr value) {
+ DecoratorSet decorators = access.decorators();
bool in_heap = (decorators & IN_HEAP) != 0;
assert(in_heap, "not supported yet");
- LIR_Opr addr = resolve_address(lir_generator, decorators, type, base, offset, false);
- store_at_resolved(lir_generator, decorators, type, addr, base, offset, value, patch_info, store_emit_info);
+ LIR_Opr resolved = resolve_address(access, false);
+ access.set_resolved_addr(resolved);
+ store_at_resolved(access, value);
}
-void BarrierSetC1::load_at(LIRGenerator *lir_generator, DecoratorSet decorators, BasicType type,
- LIRItem& base, LIR_Opr offset, LIR_Opr result,
- CodeEmitInfo* patch_info, CodeEmitInfo* load_emit_info) {
+void BarrierSetC1::load_at(LIRAccess& access, LIR_Opr result) {
+ DecoratorSet decorators = access.decorators();
bool in_heap = (decorators & IN_HEAP) != 0;
assert(in_heap, "not supported yet");
- LIR_Opr addr = resolve_address(lir_generator, decorators, type, base, offset, false);
- load_at_resolved(lir_generator, decorators, type, addr, base, offset, result, patch_info, load_emit_info);
+ LIR_Opr resolved = resolve_address(access, false);
+ access.set_resolved_addr(resolved);
+ load_at_resolved(access, result);
}
-LIR_Opr BarrierSetC1::atomic_cmpxchg_at(LIRGenerator *lir_generator, DecoratorSet decorators, BasicType type,
- LIRItem& base, LIRItem& offset, LIRItem& cmp_value, LIRItem& new_value) {
+LIR_Opr BarrierSetC1::atomic_cmpxchg_at(LIRAccess& access, LIRItem& cmp_value, LIRItem& new_value) {
+ DecoratorSet decorators = access.decorators();
bool in_heap = (decorators & IN_HEAP) != 0;
assert(in_heap, "not supported yet");
- base.load_item();
- offset.load_nonconstant();
+ access.load_address();
- LIR_Opr addr = resolve_address(lir_generator, decorators, type, base, offset.result(), true);
- return atomic_cmpxchg_resolved(lir_generator, decorators, type, addr, base, offset, cmp_value, new_value);
+ LIR_Opr resolved = resolve_address(access, true);
+ access.set_resolved_addr(resolved);
+ return atomic_cmpxchg_resolved(access, cmp_value, new_value);
}
-LIR_Opr BarrierSetC1::atomic_xchg(LIRGenerator* lir_generator, DecoratorSet decorators, BasicType type,
- LIRItem& base, LIRItem& offset, LIRItem& value) {
+LIR_Opr BarrierSetC1::atomic_xchg(LIRAccess& access, LIRItem& value) {
+ DecoratorSet decorators = access.decorators();
bool in_heap = (decorators & IN_HEAP) != 0;
assert(in_heap, "not supported yet");
- base.load_item();
- offset.load_nonconstant();
+ access.load_address();
- LIR_Opr addr = resolve_address(lir_generator, decorators, type, base, offset.result(), true);
- return atomic_xchg_resolved(lir_generator, decorators, type, addr, base, offset, value);
+ LIR_Opr resolved = resolve_address(access, true);
+ access.set_resolved_addr(resolved);
+ return atomic_xchg_resolved(access, value);
}
-LIR_Opr BarrierSetC1::atomic_add_at(LIRGenerator* lir_generator, DecoratorSet decorators, BasicType type,
- LIRItem& base, LIRItem& offset, LIRItem& value) {
+LIR_Opr BarrierSetC1::atomic_add_at(LIRAccess& access, LIRItem& value) {
+ DecoratorSet decorators = access.decorators();
bool in_heap = (decorators & IN_HEAP) != 0;
assert(in_heap, "not supported yet");
- base.load_item();
- offset.load_nonconstant();
+ access.load_address();
- LIR_Opr addr = resolve_address(lir_generator, decorators, type, base, offset.result(), true);
- return atomic_add_at_resolved(lir_generator, decorators, type, addr, base, offset, value);
+ LIR_Opr resolved = resolve_address(access, true);
+ access.set_resolved_addr(resolved);
+ return atomic_add_at_resolved(access, value);
}
-void BarrierSetC1::store_at_resolved(LIRGenerator *lir_generator, DecoratorSet decorators, BasicType type,
- LIR_Opr addr, LIRItem& base, LIR_Opr offset, LIR_Opr value,
- CodeEmitInfo* patch_info, CodeEmitInfo* store_emit_info) {
+void BarrierSetC1::store_at_resolved(LIRAccess& access, LIR_Opr value) {
+ DecoratorSet decorators = access.decorators();
bool is_volatile = (((decorators & MO_SEQ_CST) != 0) || AlwaysAtomicAccesses) && os::is_MP();
bool needs_patching = (decorators & C1_NEEDS_PATCHING) != 0;
bool mask_boolean = (decorators & C1_MASK_BOOLEAN) != 0;
+ LIRGenerator* gen = access.gen();
if (mask_boolean) {
- value = lir_generator->mask_boolean(base.result(), value, store_emit_info);
+ value = gen->mask_boolean(access.base().opr(), value, access.access_emit_info());
}
if (is_volatile && os::is_MP()) {
__ membar_release();
}
LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
if (is_volatile && !needs_patching) {
- lir_generator->volatile_field_store(value, addr->as_address_ptr(), store_emit_info);
+ gen->volatile_field_store(value, access.resolved_addr()->as_address_ptr(), access.access_emit_info());
} else {
- __ store(value, addr->as_address_ptr(), store_emit_info, patch_code);
+ __ store(value, access.resolved_addr()->as_address_ptr(), access.access_emit_info(), patch_code);
}
if (is_volatile && !support_IRIW_for_not_multiple_copy_atomic_cpu) {
__ membar();
}
}
-void BarrierSetC1::load_at_resolved(LIRGenerator *lir_generator, DecoratorSet decorators, BasicType type,
- LIR_Opr addr, LIRItem& base, LIR_Opr offset, LIR_Opr result,
- CodeEmitInfo* patch_info, CodeEmitInfo* load_emit_info) {
+void BarrierSetC1::load_at_resolved(LIRAccess& access, LIR_Opr result) {
+ LIRGenerator *gen = access.gen();
+ DecoratorSet decorators = access.decorators();
bool is_volatile = (((decorators & MO_SEQ_CST) != 0) || AlwaysAtomicAccesses) && os::is_MP();
bool needs_patching = (decorators & C1_NEEDS_PATCHING) != 0;
bool mask_boolean = (decorators & C1_MASK_BOOLEAN) != 0;
if (support_IRIW_for_not_multiple_copy_atomic_cpu && is_volatile) {
__ membar();
}
LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
if (is_volatile && !needs_patching) {
- lir_generator->volatile_field_load(addr->as_address_ptr(), result, load_emit_info);
+ gen->volatile_field_load(access.resolved_addr()->as_address_ptr(), result, access.access_emit_info());
} else {
- __ load(addr->as_address_ptr(), result, load_emit_info, patch_code);
+ __ load(access.resolved_addr()->as_address_ptr(), result, access.access_emit_info(), patch_code);
}
if (is_volatile && os::is_MP()) {
__ membar_acquire();
}
__ move(LIR_OprFact::intConst(1), result);
__ branch_destination(equalZeroLabel->label());
}
}
-LIR_Opr BarrierSetC1::atomic_cmpxchg_resolved(LIRGenerator *lir_generator, DecoratorSet decorators, BasicType type,
- LIR_Opr addr, LIRItem& base, LIRItem& offset, LIRItem& cmp_value, LIRItem& new_value) {
- return lir_generator->atomic_cmpxchg(type, addr, cmp_value, new_value);
+LIR_Opr BarrierSetC1::atomic_cmpxchg_resolved(LIRAccess& access, LIRItem& cmp_value, LIRItem& new_value) {
+ LIRGenerator *gen = access.gen();
+ return gen->atomic_cmpxchg(access.type(), access.resolved_addr(), cmp_value, new_value);
}
-LIR_Opr BarrierSetC1::atomic_xchg_resolved(LIRGenerator* lir_generator, DecoratorSet decorators, BasicType type,
- LIR_Opr addr, LIRItem& base, LIRItem& offset, LIRItem& value) {
- return lir_generator->atomic_xchg(type, addr, value);
+LIR_Opr BarrierSetC1::atomic_xchg_resolved(LIRAccess& access, LIRItem& value) {
+ LIRGenerator *gen = access.gen();
+ return gen->atomic_xchg(access.type(), access.resolved_addr(), value);
}
-LIR_Opr BarrierSetC1::atomic_add_at_resolved(LIRGenerator* lir_generator, DecoratorSet decorators, BasicType type,
- LIR_Opr addr, LIRItem& base, LIRItem& offset, LIRItem& value) {
- return lir_generator->atomic_add(type, addr, value);
+LIR_Opr BarrierSetC1::atomic_add_at_resolved(LIRAccess& access, LIRItem& value) {
+ LIRGenerator *gen = access.gen();
+ return gen->atomic_add(access.type(), access.resolved_addr(), value);
}
-void BarrierSetC1::generate_referent_check(LIRGenerator* lir_generator, LIRItem& base, LIR_Opr offset, LabelObj* cont) {
+void BarrierSetC1::generate_referent_check(LIRAccess& access, LabelObj* cont) {
// We might be reading the value of the referent field of a
// Reference object in order to attach it back to the live
// object graph. If G1 is enabled then we need to record
// the value that is being returned in an SATB log buffer.
//
bool gen_pre_barrier = true; // Assume we need to generate pre_barrier.
bool gen_offset_check = true; // Assume we need to generate the offset guard.
bool gen_source_check = true; // Assume we need to check the src object for null.
bool gen_type_check = true; // Assume we need to check the reference_type.
+ LIRGenerator *gen = access.gen();
+
+ LIRItem& base = access.base().item();
+ LIR_Opr offset = access.offset().opr();
+
if (offset->is_constant()) {
LIR_Const* constant = offset->as_constant_ptr();
jlong off_con = (constant->type() == T_INT ?
(jlong)constant->as_jint() :
constant->as_jlong());
if (gen_pre_barrier && !PatchALot) {
// Can the klass of object be statically determined to be
// a sub-class of Reference?
ciType* type = base.value()->declared_type();
if ((type != NULL) && type->is_loaded()) {
- if (type->is_subtype_of(lir_generator->compilation()->env()->Reference_klass())) {
+ if (type->is_subtype_of(gen->compilation()->env()->Reference_klass())) {
gen_type_check = false;
} else if (type->is_klass() &&
- !lir_generator->compilation()->env()->Object_klass()->is_subtype_of(type->as_klass())) {
+ !gen->compilation()->env()->Object_klass()->is_subtype_of(type->as_klass())) {
// Not Reference and not Object klass.
gen_pre_barrier = false;
}
}
}
if (offset->type() == T_INT) {
referent_off = LIR_OprFact::intConst(java_lang_ref_Reference::referent_offset);
} else {
assert(offset->type() == T_LONG, "what else?");
- referent_off = lir_generator->new_register(T_LONG);
+ referent_off = gen->new_register(T_LONG);
__ move(LIR_OprFact::longConst(java_lang_ref_Reference::referent_offset), referent_off);
}
__ cmp(lir_cond_notEqual, offset, referent_off);
__ branch(lir_cond_notEqual, offset->type(), cont->label());
}
// offset is a const and equals referent offset
// if (source == null) -> continue
__ cmp(lir_cond_equal, base.result(), LIR_OprFact::oopConst(NULL));
__ branch(lir_cond_equal, T_OBJECT, cont->label());
}
- LIR_Opr src_klass = lir_generator->new_register(T_OBJECT);
+ LIR_Opr src_klass = gen->new_register(T_OBJECT);
if (gen_type_check) {
// We have determined that offset == referent_offset && src != null.
// if (src->_klass->_reference_type == REF_NONE) -> continue
__ move(new LIR_Address(base.result(), oopDesc::klass_offset_in_bytes(), T_ADDRESS), src_klass);
LIR_Address* reference_type_addr = new LIR_Address(src_klass, in_bytes(InstanceKlass::reference_type_offset()), T_BYTE);
- LIR_Opr reference_type = lir_generator->new_register(T_INT);
+ LIR_Opr reference_type = gen->new_register(T_INT);
__ move(reference_type_addr, reference_type);
__ cmp(lir_cond_equal, reference_type, LIR_OprFact::intConst(REF_NONE));
__ branch(lir_cond_equal, T_INT, cont->label());
}
}
< prev index next >