# HG changeset patch # User rkennke # Date 1581705797 -3600 # Fri Feb 14 19:43:17 2020 +0100 # Node ID 414c8cf4c22abb716be648388252d679d3f1be80 # Parent 9b4d873446c9ed605229b420b405312b35fd4b43 8239081: Shenandoah: Consolidate C1 LRB and native barriers diff --git a/src/hotspot/cpu/aarch64/gc/shenandoah/c1/shenandoahBarrierSetC1_aarch64.cpp b/src/hotspot/cpu/aarch64/gc/shenandoah/c1/shenandoahBarrierSetC1_aarch64.cpp --- a/src/hotspot/cpu/aarch64/gc/shenandoah/c1/shenandoahBarrierSetC1_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/gc/shenandoah/c1/shenandoahBarrierSetC1_aarch64.cpp @@ -100,7 +100,7 @@ __ xchg(access.resolved_addr(), value_opr, result, tmp); if (access.is_oop()) { - result = load_reference_barrier(access.gen(), result, LIR_OprFact::addressConst(0)); + result = load_reference_barrier(access.gen(), result, LIR_OprFact::addressConst(0), false); LIR_Opr tmp = gen->new_register(type); __ move(result, tmp); result = tmp; diff --git a/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp --- a/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp @@ -581,7 +581,11 @@ __ bind(slow_path); ce->store_parameter(res, 0); ce->store_parameter(addr, 1); - __ far_call(RuntimeAddress(bs->load_reference_barrier_rt_code_blob()->code_begin())); + if (stub->is_native()) { + __ far_call(RuntimeAddress(bs->load_reference_barrier_native_rt_code_blob()->code_begin())); + } else { + __ far_call(RuntimeAddress(bs->load_reference_barrier_rt_code_blob()->code_begin())); + } __ b(*stub->continuation()); } @@ -636,14 +640,16 @@ __ epilogue(); } -void ShenandoahBarrierSetAssembler::generate_c1_load_reference_barrier_runtime_stub(StubAssembler* sasm) { +void ShenandoahBarrierSetAssembler::generate_c1_load_reference_barrier_runtime_stub(StubAssembler* sasm, bool native) { __ prologue("shenandoah_load_reference_barrier", false); // arg0 : object to be resolved __ push_call_clobbered_registers(); __ load_parameter(0, r0); __ load_parameter(1, r1); - if (UseCompressedOops) { + if (native) { + __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_native)); + } else if (UseCompressedOops) { __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_narrow)); } else { __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier)); diff --git a/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.hpp --- a/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.hpp @@ -72,7 +72,7 @@ void gen_pre_barrier_stub(LIR_Assembler* ce, ShenandoahPreBarrierStub* stub); void gen_load_reference_barrier_stub(LIR_Assembler* ce, ShenandoahLoadReferenceBarrierStub* stub); void generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm); - void generate_c1_load_reference_barrier_runtime_stub(StubAssembler* sasm); + void generate_c1_load_reference_barrier_runtime_stub(StubAssembler* sasm, bool native); #endif virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop, diff --git a/src/hotspot/cpu/x86/gc/shenandoah/c1/shenandoahBarrierSetC1_x86.cpp b/src/hotspot/cpu/x86/gc/shenandoah/c1/shenandoahBarrierSetC1_x86.cpp --- a/src/hotspot/cpu/x86/gc/shenandoah/c1/shenandoahBarrierSetC1_x86.cpp +++ b/src/hotspot/cpu/x86/gc/shenandoah/c1/shenandoahBarrierSetC1_x86.cpp @@ -111,7 +111,7 @@ __ xchg(access.resolved_addr(), result, result, LIR_OprFact::illegalOpr); if (access.is_oop()) { - result = load_reference_barrier(access.gen(), result, LIR_OprFact::addressConst(0)); + result = load_reference_barrier(access.gen(), result, LIR_OprFact::addressConst(0), false); LIR_Opr tmp = gen->new_register(type); __ move(result, tmp); result = tmp; diff --git a/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.cpp b/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.cpp --- a/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.cpp @@ -859,8 +859,11 @@ __ bind(slow_path); ce->store_parameter(res, 0); ce->store_parameter(addr, 1); - __ call(RuntimeAddress(bs->load_reference_barrier_rt_code_blob()->code_begin())); - + if (stub->is_native()) { + __ call(RuntimeAddress(bs->load_reference_barrier_native_rt_code_blob()->code_begin())); + } else { + __ call(RuntimeAddress(bs->load_reference_barrier_rt_code_blob()->code_begin())); + } __ jmp(*stub->continuation()); } @@ -924,7 +927,7 @@ __ epilogue(); } -void ShenandoahBarrierSetAssembler::generate_c1_load_reference_barrier_runtime_stub(StubAssembler* sasm) { +void ShenandoahBarrierSetAssembler::generate_c1_load_reference_barrier_runtime_stub(StubAssembler* sasm, bool native) { __ prologue("shenandoah_load_reference_barrier", false); // arg0 : object to be resolved @@ -933,7 +936,9 @@ #ifdef _LP64 __ load_parameter(0, c_rarg0); __ load_parameter(1, c_rarg1); - if (UseCompressedOops) { + if (native) { + __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_native), c_rarg0, c_rarg1); + } else if (UseCompressedOops) { __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_narrow), c_rarg0, c_rarg1); } else { __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier), c_rarg0, c_rarg1); @@ -941,7 +946,11 @@ #else __ load_parameter(0, rax); __ load_parameter(1, rbx); - __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier), rax, rbx); + if (native) { + __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_native), rax, rbx); + } else { + __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier), rax, rbx); + } #endif __ restore_live_registers_except_rax(true); diff --git a/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.hpp b/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.hpp --- a/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.hpp +++ b/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.hpp @@ -70,7 +70,7 @@ void gen_pre_barrier_stub(LIR_Assembler* ce, ShenandoahPreBarrierStub* stub); void gen_load_reference_barrier_stub(LIR_Assembler* ce, ShenandoahLoadReferenceBarrierStub* stub); void generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm); - void generate_c1_load_reference_barrier_runtime_stub(StubAssembler* sasm); + void generate_c1_load_reference_barrier_runtime_stub(StubAssembler* sasm, bool native); #endif void load_reference_barrier(MacroAssembler* masm, Register dst, Address src); diff --git a/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.cpp b/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.cpp --- a/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.cpp +++ b/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.cpp @@ -107,15 +107,15 @@ __ branch_destination(slow->continuation()); } -LIR_Opr ShenandoahBarrierSetC1::load_reference_barrier(LIRGenerator* gen, LIR_Opr obj, LIR_Opr addr) { +LIR_Opr ShenandoahBarrierSetC1::load_reference_barrier(LIRGenerator* gen, LIR_Opr obj, LIR_Opr addr, bool native) { if (ShenandoahLoadRefBarrier) { - return load_reference_barrier_impl(gen, obj, addr); + return load_reference_barrier_impl(gen, obj, addr, native); } else { return obj; } } -LIR_Opr ShenandoahBarrierSetC1::load_reference_barrier_impl(LIRGenerator* gen, LIR_Opr obj, LIR_Opr addr) { +LIR_Opr ShenandoahBarrierSetC1::load_reference_barrier_impl(LIRGenerator* gen, LIR_Opr obj, LIR_Opr addr, bool native) { assert(ShenandoahLoadRefBarrier, "Should be enabled"); obj = ensure_in_register(gen, obj, T_OBJECT); @@ -150,7 +150,7 @@ } __ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0)); - CodeStub* slow = new ShenandoahLoadReferenceBarrierStub(obj, addr, result, tmp1, tmp2); + CodeStub* slow = new ShenandoahLoadReferenceBarrierStub(obj, addr, result, tmp1, tmp2, native); __ branch(lir_cond_notEqual, T_INT, slow); __ branch_destination(slow->continuation()); @@ -211,26 +211,11 @@ // 2: load a reference from src location and apply LRB if ShenandoahLoadRefBarrier is set if (ShenandoahBarrierSet::need_load_reference_barrier(decorators, type)) { - if (ShenandoahBarrierSet::use_load_reference_barrier_native(decorators, type)) { - BarrierSetC1::load_at_resolved(access, result); - LIR_OprList* args = new LIR_OprList(); - LIR_Opr addr = access.resolved_addr(); - addr = ensure_in_register(gen, addr, T_ADDRESS); - args->append(result); - args->append(addr); - BasicTypeList signature; - signature.append(T_OBJECT); - signature.append(T_ADDRESS); - LIR_Opr call_result = gen->call_runtime(&signature, args, - CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_native), - objectType, NULL); - __ move(call_result, result); - } else { - LIR_Opr tmp = gen->new_register(T_OBJECT); - BarrierSetC1::load_at_resolved(access, tmp); - tmp = load_reference_barrier(gen, tmp, access.resolved_addr()); - __ move(tmp, result); - } + LIR_Opr tmp = gen->new_register(T_OBJECT); + BarrierSetC1::load_at_resolved(access, tmp); + bool native = ShenandoahBarrierSet::use_load_reference_barrier_native(decorators, type); + tmp = load_reference_barrier(gen, tmp, access.resolved_addr(), native); + __ move(tmp, result); } else { BarrierSetC1::load_at_resolved(access, result); } @@ -268,9 +253,15 @@ }; class C1ShenandoahLoadReferenceBarrierCodeGenClosure : public StubAssemblerCodeGenClosure { +private: + bool _native; + +public: + C1ShenandoahLoadReferenceBarrierCodeGenClosure(bool native) : _native(native) {} + virtual OopMapSet* generate_code(StubAssembler* sasm) { ShenandoahBarrierSetAssembler* bs = (ShenandoahBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler(); - bs->generate_c1_load_reference_barrier_runtime_stub(sasm); + bs->generate_c1_load_reference_barrier_runtime_stub(sasm, _native); return NULL; } }; @@ -281,16 +272,14 @@ "shenandoah_pre_barrier_slow", false, &pre_code_gen_cl); if (ShenandoahLoadRefBarrier) { - C1ShenandoahLoadReferenceBarrierCodeGenClosure lrb_code_gen_cl; + C1ShenandoahLoadReferenceBarrierCodeGenClosure lrb_code_gen_cl(false); _load_reference_barrier_rt_code_blob = Runtime1::generate_blob(buffer_blob, -1, "shenandoah_load_reference_barrier_slow", false, &lrb_code_gen_cl); + + C1ShenandoahLoadReferenceBarrierCodeGenClosure lrb_native_code_gen_cl(true); + _load_reference_barrier_native_rt_code_blob = Runtime1::generate_blob(buffer_blob, -1, + "shenandoah_load_reference_barrier_native_slow", + false, &lrb_native_code_gen_cl); } } - -const char* ShenandoahBarrierSetC1::rtcall_name_for_address(address entry) { - if (entry == CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_native)) { - return "ShenandoahRuntime::load_reference_barrier_native"; - } - return NULL; -} diff --git a/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.hpp b/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.hpp --- a/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.hpp +++ b/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.hpp @@ -94,10 +94,10 @@ LIR_Opr _result; LIR_Opr _tmp1; LIR_Opr _tmp2; - + bool _native; public: - ShenandoahLoadReferenceBarrierStub(LIR_Opr obj, LIR_Opr addr, LIR_Opr result, LIR_Opr tmp1, LIR_Opr tmp2) : - _obj(obj), _addr(addr), _result(result), _tmp1(tmp1), _tmp2(tmp2) + ShenandoahLoadReferenceBarrierStub(LIR_Opr obj, LIR_Opr addr, LIR_Opr result, LIR_Opr tmp1, LIR_Opr tmp2, bool native) : + _obj(obj), _addr(addr), _result(result), _tmp1(tmp1), _tmp2(tmp2), _native(native) { assert(_obj->is_register(), "should be register"); assert(_addr->is_register(), "should be register"); @@ -111,6 +111,7 @@ LIR_Opr result() const { return _result; } LIR_Opr tmp1() const { return _tmp1; } LIR_Opr tmp2() const { return _tmp2; } + bool is_native() const { return _native; } virtual void emit_code(LIR_Assembler* e); virtual void visit(LIR_OpVisitState* visitor) { @@ -190,13 +191,14 @@ private: CodeBlob* _pre_barrier_c1_runtime_code_blob; CodeBlob* _load_reference_barrier_rt_code_blob; + CodeBlob* _load_reference_barrier_native_rt_code_blob; void pre_barrier(LIRGenerator* gen, CodeEmitInfo* info, DecoratorSet decorators, LIR_Opr addr_opr, LIR_Opr pre_val); - LIR_Opr load_reference_barrier(LIRGenerator* gen, LIR_Opr obj, LIR_Opr addr); + LIR_Opr load_reference_barrier(LIRGenerator* gen, LIR_Opr obj, LIR_Opr addr, bool native); LIR_Opr storeval_barrier(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, DecoratorSet decorators); - LIR_Opr load_reference_barrier_impl(LIRGenerator* gen, LIR_Opr obj, LIR_Opr addr); + LIR_Opr load_reference_barrier_impl(LIRGenerator* gen, LIR_Opr obj, LIR_Opr addr, bool native); LIR_Opr ensure_in_register(LIRGenerator* gen, LIR_Opr obj, BasicType type); @@ -213,6 +215,10 @@ return _load_reference_barrier_rt_code_blob; } + CodeBlob* load_reference_barrier_native_rt_code_blob() { + assert(_load_reference_barrier_native_rt_code_blob != NULL, ""); + return _load_reference_barrier_native_rt_code_blob; + } protected: virtual void store_at_resolved(LIRAccess& access, LIR_Opr value); @@ -226,7 +232,6 @@ public: virtual void generate_c1_runtime_stubs(BufferBlob* buffer_blob); - virtual const char* rtcall_name_for_address(address entry); }; #endif // SHARE_GC_SHENANDOAH_C1_SHENANDOAHBARRIERSETC1_HPP