< prev index next >

src/hotspot/cpu/arm/gc/g1/g1BarrierSetAssembler_arm.cpp

Move G1 runtime barrier entries
 
 #include "precompiled.hpp"
 #include "asm/macroAssembler.inline.hpp"
 #include "gc/g1/g1BarrierSet.hpp"
 #include "gc/g1/g1BarrierSetAssembler.hpp"
+#include "gc/g1/g1BarrierSetRuntime.hpp"
 #include "gc/g1/g1ThreadLocalData.hpp"
 #include "gc/g1/g1CardTable.hpp"
 #include "gc/g1/g1ThreadLocalData.hpp"
 #include "gc/g1/heapRegion.hpp"
 #include "interpreter/interp_masm.hpp"

@@ -72,21 +73,21 if (addr != R0) { assert_different_registers(count, R0); __ mov(R0, addr); } #ifdef AARCH64 - __ zero_extend(R1, count, 32); // G1BarrierSet::write_ref_array_pre_*_entry takes size_t + __ zero_extend(R1, count, 32); // G1BarrierSetRuntime::write_ref_array_pre_*_entry takes size_t #else if (count != R1) { __ mov(R1, count); } #endif // AARCH64 if (UseCompressedOops) { - __ call(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_narrow_oop_entry)); + __ call(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_pre_narrow_oop_entry)); } else { - __ call(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_oop_entry)); + __ call(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_pre_oop_entry)); } #ifdef AARCH64 for (int i = callee_saved_regs - 2; i >= 0; i -= 2) { __ raw_pop(as_Register(i), as_Register(i+1));
@@ -104,11 +105,11 if (addr != R0) { assert_different_registers(count, R0); __ mov(R0, addr); } #ifdef AARCH64 - __ zero_extend(R1, count, 32); // G1BarrierSet::write_ref_array_post_entry takes size_t + __ zero_extend(R1, count, 32); // G1BarrierSetRuntime::write_ref_array_post_entry takes size_t #else if (count != R1) { __ mov(R1, count); } #if R9_IS_SCRATCHED
@@ -118,11 +119,11 // is scratched. Note that the optimization might not be to // difficult for this particular call site. __ push(R9); #endif // !R9_IS_SCRATCHED #endif // !AARCH64 - __ call(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_post_entry)); + __ call(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_post_entry)); #ifndef AARCH64 #if R9_IS_SCRATCHED __ pop(R9); #endif // !R9_IS_SCRATCHED #endif // !AARCH64
@@ -203,11 +204,11 if (pre_val != R0) { __ mov(R0, pre_val); } __ mov(R1, Rthread); - __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), R0, R1); + __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), R0, R1); #ifdef AARCH64 if (store_addr != noreg) { __ raw_pop(store_addr, new_val); } else {
@@ -294,11 +295,11 if (card_addr != R0) { __ mov(R0, card_addr); } __ mov(R1, Rthread); - __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), R0, R1); + __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), R0, R1); __ bind(done); } void G1BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
@@ -465,11 +466,11 __ save_live_registers(); assert(r_pre_val_0 == c_rarg0, "pre_val should be in R0"); __ mov(c_rarg1, Rthread); - __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), c_rarg0, c_rarg1); + __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), c_rarg0, c_rarg1); __ restore_live_registers_without_return(); __ b(done); }
@@ -572,11 +573,11 __ save_live_registers(); assert(r_card_addr_0 == c_rarg0, "card_addr should be in R0"); __ mov(c_rarg1, Rthread); - __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), c_rarg0, c_rarg1); + __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), c_rarg0, c_rarg1); __ restore_live_registers_without_return(); __ b(done); }
< prev index next >