26 #include "gc/shenandoah/shenandoahHeap.hpp"
27 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
28 #include "gc/shenandoah/shenandoahHeuristics.hpp"
29 #include "gc/shenandoah/shenandoahRuntime.hpp"
30 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
31 #include "interpreter/interpreter.hpp"
32 #include "interpreter/interp_masm.hpp"
33 #include "runtime/sharedRuntime.hpp"
34 #include "runtime/thread.hpp"
35 #ifdef COMPILER1
36 #include "c1/c1_LIRAssembler.hpp"
37 #include "c1/c1_MacroAssembler.hpp"
38 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
39 #endif
40
41 #define __ masm->
42
43 address ShenandoahBarrierSetAssembler::_shenandoah_wb = NULL;
44
45 void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
46 Register addr, Register count, RegSet saved_regs) {
47 if (is_oop) {
48 bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0;
49 if (!dest_uninitialized && !ShenandoahHeap::heap()->heuristics()->can_do_traversal_gc()) {
50 __ push(saved_regs, sp);
51 if (count == c_rarg0) {
52 if (addr == c_rarg1) {
53 // exactly backwards!!
54 __ mov(rscratch1, c_rarg0);
55 __ mov(c_rarg0, c_rarg1);
56 __ mov(c_rarg1, rscratch1);
57 } else {
58 __ mov(c_rarg1, count);
59 __ mov(c_rarg0, addr);
60 }
61 } else {
62 __ mov(c_rarg0, addr);
63 __ mov(c_rarg1, count);
64 }
65 if (UseCompressedOops) {
66 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_array_pre_narrow_oop_entry), 2);
67 } else {
68 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_array_pre_oop_entry), 2);
69 }
70 __ pop(saved_regs, sp);
71 }
72 }
73 }
74
75 void ShenandoahBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
76 Register start, Register end, Register scratch, RegSet saved_regs) {
77 if (is_oop) {
78 __ push(saved_regs, sp);
79 // must compute element count unless barrier set interface is changed (other platforms supply count)
80 assert_different_registers(start, end, scratch);
81 __ lea(scratch, Address(end, BytesPerHeapOop));
82 __ sub(scratch, scratch, start); // subtract start to get #bytes
|
26 #include "gc/shenandoah/shenandoahHeap.hpp"
27 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
28 #include "gc/shenandoah/shenandoahHeuristics.hpp"
29 #include "gc/shenandoah/shenandoahRuntime.hpp"
30 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
31 #include "interpreter/interpreter.hpp"
32 #include "interpreter/interp_masm.hpp"
33 #include "runtime/sharedRuntime.hpp"
34 #include "runtime/thread.hpp"
35 #ifdef COMPILER1
36 #include "c1/c1_LIRAssembler.hpp"
37 #include "c1/c1_MacroAssembler.hpp"
38 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
39 #endif
40
41 #define __ masm->
42
43 address ShenandoahBarrierSetAssembler::_shenandoah_wb = NULL;
44
45 void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
46 Register src, Register dst, Register count, RegSet saved_regs) {
47 if (is_oop) {
48 bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0;
49 if (!dest_uninitialized && !ShenandoahHeap::heap()->heuristics()->can_do_traversal_gc()) {
50 __ push(saved_regs, sp);
51 if (count == c_rarg0) {
52 if (dst == c_rarg1) {
53 // exactly backwards!!
54 __ mov(rscratch1, c_rarg0);
55 __ mov(c_rarg0, c_rarg1);
56 __ mov(c_rarg1, rscratch1);
57 } else {
58 __ mov(c_rarg1, count);
59 __ mov(c_rarg0, dst);
60 }
61 } else {
62 __ mov(c_rarg0, dst);
63 __ mov(c_rarg1, count);
64 }
65 if (UseCompressedOops) {
66 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_array_pre_narrow_oop_entry), 2);
67 } else {
68 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_array_pre_oop_entry), 2);
69 }
70 __ pop(saved_regs, sp);
71 }
72 }
73 }
74
75 void ShenandoahBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
76 Register start, Register end, Register scratch, RegSet saved_regs) {
77 if (is_oop) {
78 __ push(saved_regs, sp);
79 // must compute element count unless barrier set interface is changed (other platforms supply count)
80 assert_different_registers(start, end, scratch);
81 __ lea(scratch, Address(end, BytesPerHeapOop));
82 __ sub(scratch, scratch, start); // subtract start to get #bytes
|