8 * This code is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
11 * version 2 for more details (a copy is included in the LICENSE file that
12 * accompanied this code).
13 *
14 * You should have received a copy of the GNU General Public License version
15 * 2 along with this work; if not, write to the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
19 * or visit www.oracle.com if you need additional information or have any
20 * questions.
21 *
22 */
23
24 #include "precompiled.hpp"
25 #include "asm/macroAssembler.inline.hpp"
26 #include "gc/g1/g1BarrierSet.hpp"
27 #include "gc/g1/g1BarrierSetAssembler.hpp"
28 #include "gc/g1/g1ThreadLocalData.hpp"
29 #include "gc/g1/g1CardTable.hpp"
30 #include "gc/g1/g1ThreadLocalData.hpp"
31 #include "gc/g1/heapRegion.hpp"
32 #include "interpreter/interp_masm.hpp"
33 #include "runtime/sharedRuntime.hpp"
34 #include "runtime/thread.hpp"
35 #include "utilities/macros.hpp"
36 #ifdef COMPILER1
37 #include "c1/c1_LIRAssembler.hpp"
38 #include "c1/c1_MacroAssembler.hpp"
39 #include "gc/g1/c1/g1BarrierSetC1.hpp"
40 #endif
41
42 #define __ masm->
43
44 #ifdef PRODUCT
45 #define BLOCK_COMMENT(str) /* nothing */
46 #else
|
8 * This code is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
11 * version 2 for more details (a copy is included in the LICENSE file that
12 * accompanied this code).
13 *
14 * You should have received a copy of the GNU General Public License version
15 * 2 along with this work; if not, write to the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
19 * or visit www.oracle.com if you need additional information or have any
20 * questions.
21 *
22 */
23
24 #include "precompiled.hpp"
25 #include "asm/macroAssembler.inline.hpp"
26 #include "gc/g1/g1BarrierSet.hpp"
27 #include "gc/g1/g1BarrierSetAssembler.hpp"
28 #include "gc/g1/g1BarrierSetRuntime.hpp"
29 #include "gc/g1/g1ThreadLocalData.hpp"
30 #include "gc/g1/g1CardTable.hpp"
31 #include "gc/g1/g1ThreadLocalData.hpp"
32 #include "gc/g1/heapRegion.hpp"
33 #include "interpreter/interp_masm.hpp"
34 #include "runtime/sharedRuntime.hpp"
35 #include "runtime/thread.hpp"
36 #include "utilities/macros.hpp"
37 #ifdef COMPILER1
38 #include "c1/c1_LIRAssembler.hpp"
39 #include "c1/c1_MacroAssembler.hpp"
40 #include "gc/g1/c1/g1BarrierSetC1.hpp"
41 #endif
42
43 #define __ masm->
44
45 #ifdef PRODUCT
46 #define BLOCK_COMMENT(str) /* nothing */
47 #else
|
56 assert( addr->encoding() < callee_saved_regs, "addr must be saved");
57 assert(count->encoding() < callee_saved_regs, "count must be saved");
58
59 BLOCK_COMMENT("PreBarrier");
60
61 #ifdef AARCH64
62 callee_saved_regs = align_up(callee_saved_regs, 2);
63 for (int i = 0; i < callee_saved_regs; i += 2) {
64 __ raw_push(as_Register(i), as_Register(i+1));
65 }
66 #else
67 RegisterSet saved_regs = RegisterSet(R0, as_Register(callee_saved_regs-1));
68 __ push(saved_regs | R9ifScratched);
69 #endif // AARCH64
70
71 if (addr != R0) {
72 assert_different_registers(count, R0);
73 __ mov(R0, addr);
74 }
75 #ifdef AARCH64
76 __ zero_extend(R1, count, 32); // G1BarrierSet::write_ref_array_pre_*_entry takes size_t
77 #else
78 if (count != R1) {
79 __ mov(R1, count);
80 }
81 #endif // AARCH64
82
83 if (UseCompressedOops) {
84 __ call(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_narrow_oop_entry));
85 } else {
86 __ call(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_oop_entry));
87 }
88
89 #ifdef AARCH64
90 for (int i = callee_saved_regs - 2; i >= 0; i -= 2) {
91 __ raw_pop(as_Register(i), as_Register(i+1));
92 }
93 #else
94 __ pop(saved_regs | R9ifScratched);
95 #endif // AARCH64
96 }
97 }
98
99 void G1BarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
100 Register addr, Register count, Register tmp) {
101
102 BLOCK_COMMENT("G1PostBarrier");
103 if (addr != R0) {
104 assert_different_registers(count, R0);
105 __ mov(R0, addr);
106 }
107 #ifdef AARCH64
108 __ zero_extend(R1, count, 32); // G1BarrierSet::write_ref_array_post_entry takes size_t
109 #else
110 if (count != R1) {
111 __ mov(R1, count);
112 }
113 #if R9_IS_SCRATCHED
114 // Safer to save R9 here since callers may have been written
115 // assuming R9 survives. This is suboptimal but is not in
116 // general worth optimizing for the few platforms where R9
117 // is scratched. Note that the optimization might not be to
118 // difficult for this particular call site.
119 __ push(R9);
120 #endif // !R9_IS_SCRATCHED
121 #endif // !AARCH64
122 __ call(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_post_entry));
123 #ifndef AARCH64
124 #if R9_IS_SCRATCHED
125 __ pop(R9);
126 #endif // !R9_IS_SCRATCHED
127 #endif // !AARCH64
128 }
129
130 // G1 pre-barrier.
131 // Blows all volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR).
132 // If store_addr != noreg, then previous value is loaded from [store_addr];
133 // in such case store_addr and new_val registers are preserved;
134 // otherwise pre_val register is preserved.
135 void G1BarrierSetAssembler::g1_write_barrier_pre(MacroAssembler* masm,
136 Register store_addr,
137 Register new_val,
138 Register pre_val,
139 Register tmp1,
140 Register tmp2) {
141 Label done;
|
57 assert( addr->encoding() < callee_saved_regs, "addr must be saved");
58 assert(count->encoding() < callee_saved_regs, "count must be saved");
59
60 BLOCK_COMMENT("PreBarrier");
61
62 #ifdef AARCH64
63 callee_saved_regs = align_up(callee_saved_regs, 2);
64 for (int i = 0; i < callee_saved_regs; i += 2) {
65 __ raw_push(as_Register(i), as_Register(i+1));
66 }
67 #else
68 RegisterSet saved_regs = RegisterSet(R0, as_Register(callee_saved_regs-1));
69 __ push(saved_regs | R9ifScratched);
70 #endif // AARCH64
71
72 if (addr != R0) {
73 assert_different_registers(count, R0);
74 __ mov(R0, addr);
75 }
76 #ifdef AARCH64
77 __ zero_extend(R1, count, 32); // G1BarrierSetRuntime::write_ref_array_pre_*_entry takes size_t
78 #else
79 if (count != R1) {
80 __ mov(R1, count);
81 }
82 #endif // AARCH64
83
84 if (UseCompressedOops) {
85 __ call(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_pre_narrow_oop_entry));
86 } else {
87 __ call(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_pre_oop_entry));
88 }
89
90 #ifdef AARCH64
91 for (int i = callee_saved_regs - 2; i >= 0; i -= 2) {
92 __ raw_pop(as_Register(i), as_Register(i+1));
93 }
94 #else
95 __ pop(saved_regs | R9ifScratched);
96 #endif // AARCH64
97 }
98 }
99
100 void G1BarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
101 Register addr, Register count, Register tmp) {
102
103 BLOCK_COMMENT("G1PostBarrier");
104 if (addr != R0) {
105 assert_different_registers(count, R0);
106 __ mov(R0, addr);
107 }
108 #ifdef AARCH64
109 __ zero_extend(R1, count, 32); // G1BarrierSetRuntime::write_ref_array_post_entry takes size_t
110 #else
111 if (count != R1) {
112 __ mov(R1, count);
113 }
114 #if R9_IS_SCRATCHED
115 // Safer to save R9 here since callers may have been written
116 // assuming R9 survives. This is suboptimal but is not in
117 // general worth optimizing for the few platforms where R9
118 // is scratched. Note that the optimization might not be to
119 // difficult for this particular call site.
120 __ push(R9);
121 #endif // !R9_IS_SCRATCHED
122 #endif // !AARCH64
123 __ call(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_post_entry));
124 #ifndef AARCH64
125 #if R9_IS_SCRATCHED
126 __ pop(R9);
127 #endif // !R9_IS_SCRATCHED
128 #endif // !AARCH64
129 }
130
131 // G1 pre-barrier.
132 // Blows all volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR).
133 // If store_addr != noreg, then previous value is loaded from [store_addr];
134 // in such case store_addr and new_val registers are preserved;
135 // otherwise pre_val register is preserved.
136 void G1BarrierSetAssembler::g1_write_barrier_pre(MacroAssembler* masm,
137 Register store_addr,
138 Register new_val,
139 Register pre_val,
140 Register tmp1,
141 Register tmp2) {
142 Label done;
|
187 #ifdef AARCH64
188 if (store_addr != noreg) {
189 __ raw_push(store_addr, new_val);
190 } else {
191 __ raw_push(pre_val, ZR);
192 }
193 #else
194 if (store_addr != noreg) {
195 // avoid raw_push to support any ordering of store_addr and new_val
196 __ push(RegisterSet(store_addr) | RegisterSet(new_val));
197 } else {
198 __ push(pre_val);
199 }
200 #endif // AARCH64
201
202 if (pre_val != R0) {
203 __ mov(R0, pre_val);
204 }
205 __ mov(R1, Rthread);
206
207 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), R0, R1);
208
209 #ifdef AARCH64
210 if (store_addr != noreg) {
211 __ raw_pop(store_addr, new_val);
212 } else {
213 __ raw_pop(pre_val, ZR);
214 }
215 #else
216 if (store_addr != noreg) {
217 __ pop(RegisterSet(store_addr) | RegisterSet(new_val));
218 } else {
219 __ pop(pre_val);
220 }
221 #endif // AARCH64
222
223 __ bind(done);
224 }
225
226 // G1 post-barrier.
|
188 #ifdef AARCH64
189 if (store_addr != noreg) {
190 __ raw_push(store_addr, new_val);
191 } else {
192 __ raw_push(pre_val, ZR);
193 }
194 #else
195 if (store_addr != noreg) {
196 // avoid raw_push to support any ordering of store_addr and new_val
197 __ push(RegisterSet(store_addr) | RegisterSet(new_val));
198 } else {
199 __ push(pre_val);
200 }
201 #endif // AARCH64
202
203 if (pre_val != R0) {
204 __ mov(R0, pre_val);
205 }
206 __ mov(R1, Rthread);
207
208 __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), R0, R1);
209
210 #ifdef AARCH64
211 if (store_addr != noreg) {
212 __ raw_pop(store_addr, new_val);
213 } else {
214 __ raw_pop(pre_val, ZR);
215 }
216 #else
217 if (store_addr != noreg) {
218 __ pop(RegisterSet(store_addr) | RegisterSet(new_val));
219 } else {
220 __ pop(pre_val);
221 }
222 #endif // AARCH64
223
224 __ bind(done);
225 }
226
227 // G1 post-barrier.
|
278
279 __ strb(__ zero_register(tmp2), Address(card_addr));
280
281 __ ldr(tmp2, queue_index);
282 __ ldr(tmp3, buffer);
283
284 __ subs(tmp2, tmp2, wordSize);
285 __ b(runtime, lt); // go to runtime if now negative
286
287 __ str(tmp2, queue_index);
288
289 __ str(card_addr, Address(tmp3, tmp2));
290 __ b(done);
291
292 __ bind(runtime);
293
294 if (card_addr != R0) {
295 __ mov(R0, card_addr);
296 }
297 __ mov(R1, Rthread);
298 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), R0, R1);
299
300 __ bind(done);
301 }
302
303 void G1BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
304 Register dst, Address src, Register tmp1, Register tmp2, Register tmp3) {
305 bool on_oop = type == T_OBJECT || type == T_ARRAY;
306 bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0;
307 bool on_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0;
308 bool on_reference = on_weak || on_phantom;
309
310 ModRefBarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp2, tmp3);
311 if (on_oop && on_reference) {
312 // Generate the G1 pre-barrier code to log the value of
313 // the referent field in an SATB buffer.
314 g1_write_barrier_pre(masm, noreg, noreg, dst, tmp1, tmp2);
315 }
316 }
317
|
279
280 __ strb(__ zero_register(tmp2), Address(card_addr));
281
282 __ ldr(tmp2, queue_index);
283 __ ldr(tmp3, buffer);
284
285 __ subs(tmp2, tmp2, wordSize);
286 __ b(runtime, lt); // go to runtime if now negative
287
288 __ str(tmp2, queue_index);
289
290 __ str(card_addr, Address(tmp3, tmp2));
291 __ b(done);
292
293 __ bind(runtime);
294
295 if (card_addr != R0) {
296 __ mov(R0, card_addr);
297 }
298 __ mov(R1, Rthread);
299 __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), R0, R1);
300
301 __ bind(done);
302 }
303
304 void G1BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
305 Register dst, Address src, Register tmp1, Register tmp2, Register tmp3) {
306 bool on_oop = type == T_OBJECT || type == T_ARRAY;
307 bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0;
308 bool on_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0;
309 bool on_reference = on_weak || on_phantom;
310
311 ModRefBarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp2, tmp3);
312 if (on_oop && on_reference) {
313 // Generate the G1 pre-barrier code to log the value of
314 // the referent field in an SATB buffer.
315 g1_write_barrier_pre(masm, noreg, noreg, dst, tmp1, tmp2);
316 }
317 }
318
|
449 __ str(r_index_1, queue_index);
450 __ str(r_pre_val_0, Address(r_buffer_2, r_index_1));
451
452 __ bind(done);
453
454 #ifdef AARCH64
455 __ raw_pop(R2, R3);
456 __ raw_pop(R0, R1);
457 #else // AARCH64
458 __ pop(saved_regs);
459 #endif // AARCH64
460
461 __ ret();
462
463 __ bind(runtime);
464
465 __ save_live_registers();
466
467 assert(r_pre_val_0 == c_rarg0, "pre_val should be in R0");
468 __ mov(c_rarg1, Rthread);
469 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), c_rarg0, c_rarg1);
470
471 __ restore_live_registers_without_return();
472
473 __ b(done);
474 }
475
476 void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler* sasm) {
477 // Input:
478 // - store_addr, pushed on the stack
479
480 __ set_info("g1_post_barrier_slow_id", false);
481
482 Label done;
483 Label recheck;
484 Label runtime;
485
486 Address queue_index(Rthread, in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset()));
487 Address buffer(Rthread, in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset()));
488
|
450 __ str(r_index_1, queue_index);
451 __ str(r_pre_val_0, Address(r_buffer_2, r_index_1));
452
453 __ bind(done);
454
455 #ifdef AARCH64
456 __ raw_pop(R2, R3);
457 __ raw_pop(R0, R1);
458 #else // AARCH64
459 __ pop(saved_regs);
460 #endif // AARCH64
461
462 __ ret();
463
464 __ bind(runtime);
465
466 __ save_live_registers();
467
468 assert(r_pre_val_0 == c_rarg0, "pre_val should be in R0");
469 __ mov(c_rarg1, Rthread);
470 __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), c_rarg0, c_rarg1);
471
472 __ restore_live_registers_without_return();
473
474 __ b(done);
475 }
476
477 void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler* sasm) {
478 // Input:
479 // - store_addr, pushed on the stack
480
481 __ set_info("g1_post_barrier_slow_id", false);
482
483 Label done;
484 Label recheck;
485 Label runtime;
486
487 Address queue_index(Rthread, in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset()));
488 Address buffer(Rthread, in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset()));
489
|
556 }
557
558 __ ldr(r_index_2, queue_index);
559 __ ldr(r_buffer_3, buffer);
560
561 __ subs(r_index_2, r_index_2, wordSize);
562 __ b(runtime, lt); // go to runtime if now negative
563
564 __ str(r_index_2, queue_index);
565
566 __ str(r_card_addr_0, Address(r_buffer_3, r_index_2));
567
568 __ b(done);
569
570 __ bind(runtime);
571
572 __ save_live_registers();
573
574 assert(r_card_addr_0 == c_rarg0, "card_addr should be in R0");
575 __ mov(c_rarg1, Rthread);
576 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), c_rarg0, c_rarg1);
577
578 __ restore_live_registers_without_return();
579
580 __ b(done);
581 }
582
583 #undef __
584
585 #endif // COMPILER1
|
557 }
558
559 __ ldr(r_index_2, queue_index);
560 __ ldr(r_buffer_3, buffer);
561
562 __ subs(r_index_2, r_index_2, wordSize);
563 __ b(runtime, lt); // go to runtime if now negative
564
565 __ str(r_index_2, queue_index);
566
567 __ str(r_card_addr_0, Address(r_buffer_3, r_index_2));
568
569 __ b(done);
570
571 __ bind(runtime);
572
573 __ save_live_registers();
574
575 assert(r_card_addr_0 == c_rarg0, "card_addr should be in R0");
576 __ mov(c_rarg1, Rthread);
577 __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), c_rarg0, c_rarg1);
578
579 __ restore_live_registers_without_return();
580
581 __ b(done);
582 }
583
584 #undef __
585
586 #endif // COMPILER1
|