43
44 #define __ masm->
45
46 #ifdef PRODUCT
47 #define BLOCK_COMMENT(str) /* nothing */
48 #else
49 #define BLOCK_COMMENT(str) __ block_comment(str)
50 #endif
51
52 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
53
54 void G1BarrierSetAssembler::gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators,
55 Register addr, Register count, int callee_saved_regs) {
56 bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0;
57 if (!dest_uninitialized) {
58 assert( addr->encoding() < callee_saved_regs, "addr must be saved");
59 assert(count->encoding() < callee_saved_regs, "count must be saved");
60
61 BLOCK_COMMENT("PreBarrier");
62
63 #ifdef AARCH64
64 callee_saved_regs = align_up(callee_saved_regs, 2);
65 for (int i = 0; i < callee_saved_regs; i += 2) {
66 __ raw_push(as_Register(i), as_Register(i+1));
67 }
68 #else
69 RegisterSet saved_regs = RegisterSet(R0, as_Register(callee_saved_regs-1));
70 __ push(saved_regs | R9ifScratched);
71 #endif // AARCH64
72
73 if (addr != R0) {
74 assert_different_registers(count, R0);
75 __ mov(R0, addr);
76 }
77 #ifdef AARCH64
78 __ zero_extend(R1, count, 32); // G1BarrierSetRuntime::write_ref_array_pre_*_entry takes size_t
79 #else
80 if (count != R1) {
81 __ mov(R1, count);
82 }
83 #endif // AARCH64
84
85 if (UseCompressedOops) {
86 __ call(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_pre_narrow_oop_entry));
87 } else {
88 __ call(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_pre_oop_entry));
89 }
90
91 #ifdef AARCH64
92 for (int i = callee_saved_regs - 2; i >= 0; i -= 2) {
93 __ raw_pop(as_Register(i), as_Register(i+1));
94 }
95 #else
96 __ pop(saved_regs | R9ifScratched);
97 #endif // AARCH64
98 }
99 }
100
101 void G1BarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
102 Register addr, Register count, Register tmp) {
103
104 BLOCK_COMMENT("G1PostBarrier");
105 if (addr != R0) {
106 assert_different_registers(count, R0);
107 __ mov(R0, addr);
108 }
109 #ifdef AARCH64
110 __ zero_extend(R1, count, 32); // G1BarrierSetRuntime::write_ref_array_post_entry takes size_t
111 #else
112 if (count != R1) {
113 __ mov(R1, count);
114 }
115 #if R9_IS_SCRATCHED
116 // Safer to save R9 here since callers may have been written
117 // assuming R9 survives. This is suboptimal but is not in
118 // general worth optimizing for the few platforms where R9
119 // is scratched. Note that the optimization might not be to
120 // difficult for this particular call site.
121 __ push(R9);
122 #endif // !R9_IS_SCRATCHED
123 #endif // !AARCH64
124 __ call(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_post_entry));
125 #ifndef AARCH64
126 #if R9_IS_SCRATCHED
127 __ pop(R9);
128 #endif // !R9_IS_SCRATCHED
129 #endif // !AARCH64
130 }
131
132 // G1 pre-barrier.
133 // Blows all volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR).
134 // If store_addr != noreg, then previous value is loaded from [store_addr];
135 // in such case store_addr and new_val registers are preserved;
136 // otherwise pre_val register is preserved.
137 void G1BarrierSetAssembler::g1_write_barrier_pre(MacroAssembler* masm,
138 Register store_addr,
139 Register new_val,
140 Register pre_val,
141 Register tmp1,
142 Register tmp2) {
143 Label done;
144 Label runtime;
145
146 if (store_addr != noreg) {
147 assert_different_registers(store_addr, new_val, pre_val, tmp1, tmp2, noreg);
148 } else {
149 assert (new_val == noreg, "should be");
150 assert_different_registers(pre_val, tmp1, tmp2, noreg);
151 }
152
153 Address in_progress(Rthread, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()));
169
170 // Can we store original value in the thread's buffer?
171 // Is index == 0?
172 // (The index field is typed as size_t.)
173
174 __ ldr(tmp1, index); // tmp1 := *index_adr
175 __ ldr(tmp2, buffer);
176
177 __ subs(tmp1, tmp1, wordSize); // tmp1 := tmp1 - wordSize
178 __ b(runtime, lt); // If negative, goto runtime
179
180 __ str(tmp1, index); // *index_adr := tmp1
181
182 // Record the previous value
183 __ str(pre_val, Address(tmp2, tmp1));
184 __ b(done);
185
186 __ bind(runtime);
187
188 // save the live input values
189 #ifdef AARCH64
190 if (store_addr != noreg) {
191 __ raw_push(store_addr, new_val);
192 } else {
193 __ raw_push(pre_val, ZR);
194 }
195 #else
196 if (store_addr != noreg) {
197 // avoid raw_push to support any ordering of store_addr and new_val
198 __ push(RegisterSet(store_addr) | RegisterSet(new_val));
199 } else {
200 __ push(pre_val);
201 }
202 #endif // AARCH64
203
204 if (pre_val != R0) {
205 __ mov(R0, pre_val);
206 }
207 __ mov(R1, Rthread);
208
209 __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), R0, R1);
210
211 #ifdef AARCH64
212 if (store_addr != noreg) {
213 __ raw_pop(store_addr, new_val);
214 } else {
215 __ raw_pop(pre_val, ZR);
216 }
217 #else
218 if (store_addr != noreg) {
219 __ pop(RegisterSet(store_addr) | RegisterSet(new_val));
220 } else {
221 __ pop(pre_val);
222 }
223 #endif // AARCH64
224
225 __ bind(done);
226 }
227
228 // G1 post-barrier.
229 // Blows all volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR).
230 void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm,
231 Register store_addr,
232 Register new_val,
233 Register tmp1,
234 Register tmp2,
235 Register tmp3) {
236
237 Address queue_index(Rthread, in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset()));
238 Address buffer(Rthread, in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset()));
239
240 BarrierSet* bs = BarrierSet::barrier_set();
241 CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
242 CardTable* ct = ctbs->card_table();
243 Label done;
244 Label runtime;
245
246 // Does store cross heap regions?
247
248 __ eor(tmp1, store_addr, new_val);
249 #ifdef AARCH64
250 __ logical_shift_right(tmp1, tmp1, HeapRegion::LogOfHRGrainBytes);
251 __ cbz(tmp1, done);
252 #else
253 __ movs(tmp1, AsmOperand(tmp1, lsr, HeapRegion::LogOfHRGrainBytes));
254 __ b(done, eq);
255 #endif
256
257 // crosses regions, storing NULL?
258
259 __ cbz(new_val, done);
260
261 // storing region crossing non-NULL, is card already dirty?
262 const Register card_addr = tmp1;
263 assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
264
265 __ mov_address(tmp2, (address)ct->byte_map_base(), symbolic_Relocation::card_table_reference);
266 __ add(card_addr, tmp2, AsmOperand(store_addr, lsr, CardTable::card_shift));
267
268 __ ldrb(tmp2, Address(card_addr));
269 __ cmp(tmp2, (int)G1CardTable::g1_young_card_val());
270 __ b(done, eq);
271
272 __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad), tmp2);
273
274 assert(CardTable::dirty_card_val() == 0, "adjust this code");
275 __ ldrb(tmp2, Address(card_addr));
316 g1_write_barrier_pre(masm, noreg, noreg, dst, tmp1, tmp2);
317 }
318 }
319
320
321 void G1BarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
322 Address obj, Register new_val, Register tmp1, Register tmp2, Register tmp3, bool is_null) {
323 bool in_heap = (decorators & IN_HEAP) != 0;
324 bool as_normal = (decorators & AS_NORMAL) != 0;
325 assert((decorators & IS_DEST_UNINITIALIZED) == 0, "unsupported");
326
327 bool needs_pre_barrier = as_normal;
328 bool needs_post_barrier = (new_val != noreg) && in_heap;
329
330 // flatten object address if needed
331 assert (obj.mode() == basic_offset, "pre- or post-indexing is not supported here");
332
333 const Register store_addr = obj.base();
334 if (obj.index() != noreg) {
335 assert (obj.disp() == 0, "index or displacement, not both");
336 #ifdef AARCH64
337 __ add(store_addr, obj.base(), obj.index(), obj.extend(), obj.shift_imm());
338 #else
339 assert(obj.offset_op() == add_offset, "addition is expected");
340 __ add(store_addr, obj.base(), AsmOperand(obj.index(), obj.shift(), obj.shift_imm()));
341 #endif // AARCH64
342 } else if (obj.disp() != 0) {
343 __ add(store_addr, obj.base(), obj.disp());
344 }
345
346 if (needs_pre_barrier) {
347 g1_write_barrier_pre(masm, store_addr, new_val, tmp1, tmp2, tmp3);
348 }
349
350 if (is_null) {
351 BarrierSetAssembler::store_at(masm, decorators, type, Address(store_addr), new_val, tmp1, tmp2, tmp3, true);
352 } else {
353 // G1 barrier needs uncompressed oop for region cross check.
354 Register val_to_store = new_val;
355 if (UseCompressedOops) {
356 val_to_store = tmp1;
357 __ mov(val_to_store, new_val);
358 }
359 BarrierSetAssembler::store_at(masm, decorators, type, Address(store_addr), val_to_store, tmp1, tmp2, tmp3, false);
360 if (needs_post_barrier) {
361 g1_write_barrier_post(masm, store_addr, new_val, tmp1, tmp2, tmp3);
398 assert(stub->addr()->is_register(), "Precondition.");
399 assert(stub->new_val()->is_register(), "Precondition.");
400 Register new_val_reg = stub->new_val()->as_register();
401 __ cbz(new_val_reg, *stub->continuation());
402 ce->verify_reserved_argument_area_size(1);
403 __ str(stub->addr()->as_pointer_register(), Address(SP));
404 __ call(bs->post_barrier_c1_runtime_code_blob()->code_begin(), relocInfo::runtime_call_type);
405 __ b(*stub->continuation());
406 }
407
408 #undef __
409 #define __ sasm->
410
411 void G1BarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) {
412 // Input:
413 // - pre_val pushed on the stack
414
415 __ set_info("g1_pre_barrier_slow_id", false);
416
417 // save at least the registers that need saving if the runtime is called
418 #ifdef AARCH64
419 __ raw_push(R0, R1);
420 __ raw_push(R2, R3);
421 const int nb_saved_regs = 4;
422 #else // AARCH64
423 const RegisterSet saved_regs = RegisterSet(R0,R3) | RegisterSet(R12) | RegisterSet(LR);
424 const int nb_saved_regs = 6;
425 assert(nb_saved_regs == saved_regs.size(), "fix nb_saved_regs");
426 __ push(saved_regs);
427 #endif // AARCH64
428
429 const Register r_pre_val_0 = R0; // must be R0, to be ready for the runtime call
430 const Register r_index_1 = R1;
431 const Register r_buffer_2 = R2;
432
433 Address queue_active(Rthread, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()));
434 Address queue_index(Rthread, in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset()));
435 Address buffer(Rthread, in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset()));
436
437 Label done;
438 Label runtime;
439
440 // Is marking still active?
441 assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
442 __ ldrb(R1, queue_active);
443 __ cbz(R1, done);
444
445 __ ldr(r_index_1, queue_index);
446 __ ldr(r_pre_val_0, Address(SP, nb_saved_regs*wordSize));
447 __ ldr(r_buffer_2, buffer);
448
449 __ subs(r_index_1, r_index_1, wordSize);
450 __ b(runtime, lt);
451
452 __ str(r_index_1, queue_index);
453 __ str(r_pre_val_0, Address(r_buffer_2, r_index_1));
454
455 __ bind(done);
456
457 #ifdef AARCH64
458 __ raw_pop(R2, R3);
459 __ raw_pop(R0, R1);
460 #else // AARCH64
461 __ pop(saved_regs);
462 #endif // AARCH64
463
464 __ ret();
465
466 __ bind(runtime);
467
468 __ save_live_registers();
469
470 assert(r_pre_val_0 == c_rarg0, "pre_val should be in R0");
471 __ mov(c_rarg1, Rthread);
472 __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), c_rarg0, c_rarg1);
473
474 __ restore_live_registers_without_return();
475
476 __ b(done);
477 }
478
479 void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler* sasm) {
480 // Input:
481 // - store_addr, pushed on the stack
482
483 __ set_info("g1_post_barrier_slow_id", false);
484
485 Label done;
486 Label recheck;
487 Label runtime;
488
489 Address queue_index(Rthread, in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset()));
490 Address buffer(Rthread, in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset()));
491
492 AddressLiteral cardtable(ci_card_table_address_as<address>(), relocInfo::none);
493
494 // save at least the registers that need saving if the runtime is called
495 #ifdef AARCH64
496 __ raw_push(R0, R1);
497 __ raw_push(R2, R3);
498 const int nb_saved_regs = 4;
499 #else // AARCH64
500 const RegisterSet saved_regs = RegisterSet(R0,R3) | RegisterSet(R12) | RegisterSet(LR);
501 const int nb_saved_regs = 6;
502 assert(nb_saved_regs == saved_regs.size(), "fix nb_saved_regs");
503 __ push(saved_regs);
504 #endif // AARCH64
505
506 const Register r_card_addr_0 = R0; // must be R0 for the slow case
507 const Register r_obj_0 = R0;
508 const Register r_card_base_1 = R1;
509 const Register r_tmp2 = R2;
510 const Register r_index_2 = R2;
511 const Register r_buffer_3 = R3;
512 const Register tmp1 = Rtemp;
513
514 __ ldr(r_obj_0, Address(SP, nb_saved_regs*wordSize));
515 // Note: there is a comment in x86 code about not using
516 // ExternalAddress / lea, due to relocation not working
517 // properly for that address. Should be OK for arm, where we
518 // explicitly specify that 'cardtable' has a relocInfo::none
519 // type.
520 __ lea(r_card_base_1, cardtable);
521 __ add(r_card_addr_0, r_card_base_1, AsmOperand(r_obj_0, lsr, CardTable::card_shift));
522
523 // first quick check without barrier
524 __ ldrb(r_tmp2, Address(r_card_addr_0));
525
526 __ cmp(r_tmp2, (int)G1CardTable::g1_young_card_val());
527 __ b(recheck, ne);
528
529 __ bind(done);
530
531 #ifdef AARCH64
532 __ raw_pop(R2, R3);
533 __ raw_pop(R0, R1);
534 #else // AARCH64
535 __ pop(saved_regs);
536 #endif // AARCH64
537
538 __ ret();
539
540 __ bind(recheck);
541
542 __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad), tmp1);
543
544 // reload card state after the barrier that ensures the stored oop was visible
545 __ ldrb(r_tmp2, Address(r_card_addr_0));
546
547 assert(CardTable::dirty_card_val() == 0, "adjust this code");
548 __ cbz(r_tmp2, done);
549
550 // storing region crossing non-NULL, card is clean.
551 // dirty card and log.
552
553 assert(0 == (int)CardTable::dirty_card_val(), "adjust this code");
554 if ((ci_card_table_address_as<intptr_t>() & 0xff) == 0) {
555 // Card table is aligned so the lowest byte of the table address base is zero.
556 __ strb(r_card_base_1, Address(r_card_addr_0));
|
43
44 #define __ masm->
45
46 #ifdef PRODUCT
47 #define BLOCK_COMMENT(str) /* nothing */
48 #else
49 #define BLOCK_COMMENT(str) __ block_comment(str)
50 #endif
51
52 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
53
54 void G1BarrierSetAssembler::gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators,
55 Register addr, Register count, int callee_saved_regs) {
56 bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0;
57 if (!dest_uninitialized) {
58 assert( addr->encoding() < callee_saved_regs, "addr must be saved");
59 assert(count->encoding() < callee_saved_regs, "count must be saved");
60
61 BLOCK_COMMENT("PreBarrier");
62
63 RegisterSet saved_regs = RegisterSet(R0, as_Register(callee_saved_regs-1));
64 __ push(saved_regs | R9ifScratched);
65
66 if (addr != R0) {
67 assert_different_registers(count, R0);
68 __ mov(R0, addr);
69 }
70 if (count != R1) {
71 __ mov(R1, count);
72 }
73
74 if (UseCompressedOops) {
75 __ call(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_pre_narrow_oop_entry));
76 } else {
77 __ call(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_pre_oop_entry));
78 }
79
80 __ pop(saved_regs | R9ifScratched);
81 }
82 }
83
84 void G1BarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
85 Register addr, Register count, Register tmp) {
86
87 BLOCK_COMMENT("G1PostBarrier");
88 if (addr != R0) {
89 assert_different_registers(count, R0);
90 __ mov(R0, addr);
91 }
92 if (count != R1) {
93 __ mov(R1, count);
94 }
95 #if R9_IS_SCRATCHED
96 // Safer to save R9 here since callers may have been written
97 // assuming R9 survives. This is suboptimal but is not in
98 // general worth optimizing for the few platforms where R9
99 // is scratched. Note that the optimization might not be to
100 // difficult for this particular call site.
101 __ push(R9);
102 #endif // !R9_IS_SCRATCHED
103 __ call(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_post_entry));
104 #if R9_IS_SCRATCHED
105 __ pop(R9);
106 #endif // !R9_IS_SCRATCHED
107 }
108
109 // G1 pre-barrier.
110 // Blows all volatile registers R0-R3, Rtemp, LR).
111 // If store_addr != noreg, then previous value is loaded from [store_addr];
112 // in such case store_addr and new_val registers are preserved;
113 // otherwise pre_val register is preserved.
114 void G1BarrierSetAssembler::g1_write_barrier_pre(MacroAssembler* masm,
115 Register store_addr,
116 Register new_val,
117 Register pre_val,
118 Register tmp1,
119 Register tmp2) {
120 Label done;
121 Label runtime;
122
123 if (store_addr != noreg) {
124 assert_different_registers(store_addr, new_val, pre_val, tmp1, tmp2, noreg);
125 } else {
126 assert (new_val == noreg, "should be");
127 assert_different_registers(pre_val, tmp1, tmp2, noreg);
128 }
129
130 Address in_progress(Rthread, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()));
146
147 // Can we store original value in the thread's buffer?
148 // Is index == 0?
149 // (The index field is typed as size_t.)
150
151 __ ldr(tmp1, index); // tmp1 := *index_adr
152 __ ldr(tmp2, buffer);
153
154 __ subs(tmp1, tmp1, wordSize); // tmp1 := tmp1 - wordSize
155 __ b(runtime, lt); // If negative, goto runtime
156
157 __ str(tmp1, index); // *index_adr := tmp1
158
159 // Record the previous value
160 __ str(pre_val, Address(tmp2, tmp1));
161 __ b(done);
162
163 __ bind(runtime);
164
165 // save the live input values
166 if (store_addr != noreg) {
167 // avoid raw_push to support any ordering of store_addr and new_val
168 __ push(RegisterSet(store_addr) | RegisterSet(new_val));
169 } else {
170 __ push(pre_val);
171 }
172
173 if (pre_val != R0) {
174 __ mov(R0, pre_val);
175 }
176 __ mov(R1, Rthread);
177
178 __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), R0, R1);
179
180 if (store_addr != noreg) {
181 __ pop(RegisterSet(store_addr) | RegisterSet(new_val));
182 } else {
183 __ pop(pre_val);
184 }
185
186 __ bind(done);
187 }
188
189 // G1 post-barrier.
190 // Blows all volatile registers R0-R3, Rtemp, LR).
191 void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm,
192 Register store_addr,
193 Register new_val,
194 Register tmp1,
195 Register tmp2,
196 Register tmp3) {
197
198 Address queue_index(Rthread, in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset()));
199 Address buffer(Rthread, in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset()));
200
201 BarrierSet* bs = BarrierSet::barrier_set();
202 CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
203 CardTable* ct = ctbs->card_table();
204 Label done;
205 Label runtime;
206
207 // Does store cross heap regions?
208
209 __ eor(tmp1, store_addr, new_val);
210 __ movs(tmp1, AsmOperand(tmp1, lsr, HeapRegion::LogOfHRGrainBytes));
211 __ b(done, eq);
212
213 // crosses regions, storing NULL?
214
215 __ cbz(new_val, done);
216
217 // storing region crossing non-NULL, is card already dirty?
218 const Register card_addr = tmp1;
219 assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
220
221 __ mov_address(tmp2, (address)ct->byte_map_base(), symbolic_Relocation::card_table_reference);
222 __ add(card_addr, tmp2, AsmOperand(store_addr, lsr, CardTable::card_shift));
223
224 __ ldrb(tmp2, Address(card_addr));
225 __ cmp(tmp2, (int)G1CardTable::g1_young_card_val());
226 __ b(done, eq);
227
228 __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad), tmp2);
229
230 assert(CardTable::dirty_card_val() == 0, "adjust this code");
231 __ ldrb(tmp2, Address(card_addr));
272 g1_write_barrier_pre(masm, noreg, noreg, dst, tmp1, tmp2);
273 }
274 }
275
276
277 void G1BarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
278 Address obj, Register new_val, Register tmp1, Register tmp2, Register tmp3, bool is_null) {
279 bool in_heap = (decorators & IN_HEAP) != 0;
280 bool as_normal = (decorators & AS_NORMAL) != 0;
281 assert((decorators & IS_DEST_UNINITIALIZED) == 0, "unsupported");
282
283 bool needs_pre_barrier = as_normal;
284 bool needs_post_barrier = (new_val != noreg) && in_heap;
285
286 // flatten object address if needed
287 assert (obj.mode() == basic_offset, "pre- or post-indexing is not supported here");
288
289 const Register store_addr = obj.base();
290 if (obj.index() != noreg) {
291 assert (obj.disp() == 0, "index or displacement, not both");
292 assert(obj.offset_op() == add_offset, "addition is expected");
293 __ add(store_addr, obj.base(), AsmOperand(obj.index(), obj.shift(), obj.shift_imm()));
294 } else if (obj.disp() != 0) {
295 __ add(store_addr, obj.base(), obj.disp());
296 }
297
298 if (needs_pre_barrier) {
299 g1_write_barrier_pre(masm, store_addr, new_val, tmp1, tmp2, tmp3);
300 }
301
302 if (is_null) {
303 BarrierSetAssembler::store_at(masm, decorators, type, Address(store_addr), new_val, tmp1, tmp2, tmp3, true);
304 } else {
305 // G1 barrier needs uncompressed oop for region cross check.
306 Register val_to_store = new_val;
307 if (UseCompressedOops) {
308 val_to_store = tmp1;
309 __ mov(val_to_store, new_val);
310 }
311 BarrierSetAssembler::store_at(masm, decorators, type, Address(store_addr), val_to_store, tmp1, tmp2, tmp3, false);
312 if (needs_post_barrier) {
313 g1_write_barrier_post(masm, store_addr, new_val, tmp1, tmp2, tmp3);
350 assert(stub->addr()->is_register(), "Precondition.");
351 assert(stub->new_val()->is_register(), "Precondition.");
352 Register new_val_reg = stub->new_val()->as_register();
353 __ cbz(new_val_reg, *stub->continuation());
354 ce->verify_reserved_argument_area_size(1);
355 __ str(stub->addr()->as_pointer_register(), Address(SP));
356 __ call(bs->post_barrier_c1_runtime_code_blob()->code_begin(), relocInfo::runtime_call_type);
357 __ b(*stub->continuation());
358 }
359
360 #undef __
361 #define __ sasm->
362
363 void G1BarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) {
364 // Input:
365 // - pre_val pushed on the stack
366
367 __ set_info("g1_pre_barrier_slow_id", false);
368
369 // save at least the registers that need saving if the runtime is called
370 const RegisterSet saved_regs = RegisterSet(R0,R3) | RegisterSet(R12) | RegisterSet(LR);
371 const int nb_saved_regs = 6;
372 assert(nb_saved_regs == saved_regs.size(), "fix nb_saved_regs");
373 __ push(saved_regs);
374
375 const Register r_pre_val_0 = R0; // must be R0, to be ready for the runtime call
376 const Register r_index_1 = R1;
377 const Register r_buffer_2 = R2;
378
379 Address queue_active(Rthread, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()));
380 Address queue_index(Rthread, in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset()));
381 Address buffer(Rthread, in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset()));
382
383 Label done;
384 Label runtime;
385
386 // Is marking still active?
387 assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
388 __ ldrb(R1, queue_active);
389 __ cbz(R1, done);
390
391 __ ldr(r_index_1, queue_index);
392 __ ldr(r_pre_val_0, Address(SP, nb_saved_regs*wordSize));
393 __ ldr(r_buffer_2, buffer);
394
395 __ subs(r_index_1, r_index_1, wordSize);
396 __ b(runtime, lt);
397
398 __ str(r_index_1, queue_index);
399 __ str(r_pre_val_0, Address(r_buffer_2, r_index_1));
400
401 __ bind(done);
402
403 __ pop(saved_regs);
404
405 __ ret();
406
407 __ bind(runtime);
408
409 __ save_live_registers();
410
411 assert(r_pre_val_0 == c_rarg0, "pre_val should be in R0");
412 __ mov(c_rarg1, Rthread);
413 __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), c_rarg0, c_rarg1);
414
415 __ restore_live_registers_without_return();
416
417 __ b(done);
418 }
419
420 void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler* sasm) {
421 // Input:
422 // - store_addr, pushed on the stack
423
424 __ set_info("g1_post_barrier_slow_id", false);
425
426 Label done;
427 Label recheck;
428 Label runtime;
429
430 Address queue_index(Rthread, in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset()));
431 Address buffer(Rthread, in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset()));
432
433 AddressLiteral cardtable(ci_card_table_address_as<address>(), relocInfo::none);
434
435 // save at least the registers that need saving if the runtime is called
436 const RegisterSet saved_regs = RegisterSet(R0,R3) | RegisterSet(R12) | RegisterSet(LR);
437 const int nb_saved_regs = 6;
438 assert(nb_saved_regs == saved_regs.size(), "fix nb_saved_regs");
439 __ push(saved_regs);
440
441 const Register r_card_addr_0 = R0; // must be R0 for the slow case
442 const Register r_obj_0 = R0;
443 const Register r_card_base_1 = R1;
444 const Register r_tmp2 = R2;
445 const Register r_index_2 = R2;
446 const Register r_buffer_3 = R3;
447 const Register tmp1 = Rtemp;
448
449 __ ldr(r_obj_0, Address(SP, nb_saved_regs*wordSize));
450 // Note: there is a comment in x86 code about not using
451 // ExternalAddress / lea, due to relocation not working
452 // properly for that address. Should be OK for arm, where we
453 // explicitly specify that 'cardtable' has a relocInfo::none
454 // type.
455 __ lea(r_card_base_1, cardtable);
456 __ add(r_card_addr_0, r_card_base_1, AsmOperand(r_obj_0, lsr, CardTable::card_shift));
457
458 // first quick check without barrier
459 __ ldrb(r_tmp2, Address(r_card_addr_0));
460
461 __ cmp(r_tmp2, (int)G1CardTable::g1_young_card_val());
462 __ b(recheck, ne);
463
464 __ bind(done);
465
466 __ pop(saved_regs);
467
468 __ ret();
469
470 __ bind(recheck);
471
472 __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad), tmp1);
473
474 // reload card state after the barrier that ensures the stored oop was visible
475 __ ldrb(r_tmp2, Address(r_card_addr_0));
476
477 assert(CardTable::dirty_card_val() == 0, "adjust this code");
478 __ cbz(r_tmp2, done);
479
480 // storing region crossing non-NULL, card is clean.
481 // dirty card and log.
482
483 assert(0 == (int)CardTable::dirty_card_val(), "adjust this code");
484 if ((ci_card_table_address_as<intptr_t>() & 0xff) == 0) {
485 // Card table is aligned so the lowest byte of the table address base is zero.
486 __ strb(r_card_base_1, Address(r_card_addr_0));
|