1 /*
  2  * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "asm/macroAssembler.inline.hpp"
 27 #include "gc/g1/g1BarrierSet.hpp"
 28 #include "gc/g1/g1BarrierSetAssembler.hpp"
 29 #include "gc/g1/g1CardTable.hpp"
 30 #include "gc/g1/heapRegion.hpp"
 31 #include "interpreter/interp_masm.hpp"
 32 #include "runtime/sharedRuntime.hpp"
 33 #include "runtime/thread.hpp"
 34 #include "utilities/macros.hpp"
 35 
 36 #define __ masm->
 37 
 38 #ifdef PRODUCT
 39 #define BLOCK_COMMENT(str) /* nothing */
 40 #else
 41 #define BLOCK_COMMENT(str) __ block_comment(str)
 42 #endif
 43 
 44 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
 45 
 46 void G1BarrierSetAssembler::gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators,
 47                                                             Register addr, Register count, int callee_saved_regs) {
 48   bool dest_uninitialized = (decorators & AS_DEST_NOT_INITIALIZED) != 0;
 49   if (!dest_uninitialized) {
 50     assert( addr->encoding() < callee_saved_regs, "addr must be saved");
 51     assert(count->encoding() < callee_saved_regs, "count must be saved");
 52 
 53     BLOCK_COMMENT("PreBarrier");
 54 
 55 #ifdef AARCH64
 56     callee_saved_regs = align_up(callee_saved_regs, 2);
 57     for (int i = 0; i < callee_saved_regs; i += 2) {
 58       __ raw_push(as_Register(i), as_Register(i+1));
 59     }
 60 #else
 61     RegisterSet saved_regs = RegisterSet(R0, as_Register(callee_saved_regs-1));
 62     __ push(saved_regs | R9ifScratched);
 63 #endif // AARCH64
 64 
 65     if (addr != R0) {
 66       assert_different_registers(count, R0);
 67       __ mov(R0, addr);
 68     }
 69 #ifdef AARCH64
 70     __ zero_extend(R1, count, 32); // G1BarrierSet::write_ref_array_pre_*_entry takes size_t
 71 #else
 72     if (count != R1) {
 73       __ mov(R1, count);
 74     }
 75 #endif // AARCH64
 76 
 77     if (UseCompressedOops) {
 78       __ call(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_narrow_oop_entry));
 79     } else {
 80       __ call(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_oop_entry));
 81     }
 82 
 83 #ifdef AARCH64
 84     for (int i = callee_saved_regs - 2; i >= 0; i -= 2) {
 85       __ raw_pop(as_Register(i), as_Register(i+1));
 86     }
 87 #else
 88     __ pop(saved_regs | R9ifScratched);
 89 #endif // AARCH64
 90   }
 91 }
 92 
 93 void G1BarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
 94                                                              Register addr, Register count, Register tmp) {
 95 
 96   BLOCK_COMMENT("G1PostBarrier");
 97   if (addr != R0) {
 98     assert_different_registers(count, R0);
 99     __ mov(R0, addr);
100   }
101 #ifdef AARCH64
102   __ zero_extend(R1, count, 32); // G1BarrierSet::write_ref_array_post_entry takes size_t
103 #else
104   if (count != R1) {
105     __ mov(R1, count);
106   }
107 #if R9_IS_SCRATCHED
108   // Safer to save R9 here since callers may have been written
109   // assuming R9 survives. This is suboptimal but is not in
110   // general worth optimizing for the few platforms where R9
111   // is scratched. Note that the optimization might not be to
112   // difficult for this particular call site.
113   __ push(R9);
114 #endif // !R9_IS_SCRATCHED
115 #endif // !AARCH64
116   __ call(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_post_entry));
117 #ifndef AARCH64
118 #if R9_IS_SCRATCHED
119   __ pop(R9);
120 #endif // !R9_IS_SCRATCHED
121 #endif // !AARCH64
122 }
123 
124 #undef __
125 #define __ ce->masm()->
126 
127 void G1BarrierSetAssembler::gen_g1_pre_barrier_stub(LIR_Assembler* ce, G1PreBarrierStub* stub) {
128   // At this point we know that marking is in progress.
129   // If do_load() is true then we have to emit the
130   // load of the previous value; otherwise it has already
131   // been loaded into _pre_val.
132 
133   __ bind(*stub->entry());
134   assert(stub->pre_val()->is_register(), "Precondition.");
135 
136   Register pre_val_reg = stub->pre_val()->as_register();
137 
138   if (stub->do_load()) {
139     ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /*wide*/, false /*unaligned*/);
140   }
141 
142   __ cbz(pre_val_reg, *stub->continuation());
143   ce->verify_reserved_argument_area_size(1);
144   __ str(pre_val_reg, Address(SP));
145   __ call(Runtime1::entry_for(Runtime1::g1_pre_barrier_slow_id), relocInfo::runtime_call_type);
146 
147   __ b(*stub->continuation());
148 }
149 
150 void G1BarrierSetAssembler::gen_g1_post_barrier_stub(LIR_Assembler* ce, G1PostBarrierStub* stub) {
151   __ bind(*stub->entry());
152   assert(stub->addr()->is_register(), "Precondition.");
153   assert(stub->new_val()->is_register(), "Precondition.");
154   Register new_val_reg = stub->new_val()->as_register();
155   __ cbz(new_val_reg, *stub->continuation());
156   ce->verify_reserved_argument_area_size(1);
157   __ str(stub->addr()->as_pointer_register(), Address(SP));
158   __ call(Runtime1::entry_for(Runtime1::g1_post_barrier_slow_id), relocInfo::runtime_call_type);
159   __ b(*stub->continuation());
160 }
161 
162 #undef __
163 #define __ sasm->
164 
165 void G1BarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) {
166   // Input:
167   // - pre_val pushed on the stack
168 
169   __ set_info("g1_pre_barrier_slow_id", dont_gc_arguments);
170 
171   BarrierSet* bs = BarrierSet::barrier_set();
172   if (bs->kind() != BarrierSet::G1BarrierSet) {
173     __ mov(R0, (int)id);
174     __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), R0);
175     __ should_not_reach_here();
176     break;
177   }
178 
179   // save at least the registers that need saving if the runtime is called
180 #ifdef AARCH64
181   __ raw_push(R0, R1);
182   __ raw_push(R2, R3);
183   const int nb_saved_regs = 4;
184 #else // AARCH64
185   const RegisterSet saved_regs = RegisterSet(R0,R3) | RegisterSet(R12) | RegisterSet(LR);
186   const int nb_saved_regs = 6;
187   assert(nb_saved_regs == saved_regs.size(), "fix nb_saved_regs");
188   __ push(saved_regs);
189 #endif // AARCH64
190 
191   const Register r_pre_val_0  = R0; // must be R0, to be ready for the runtime call
192   const Register r_index_1    = R1;
193   const Register r_buffer_2   = R2;
194 
195   Address queue_active(Rthread, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()));
196   Address queue_index(Rthread, in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset()));
197   Address buffer(Rthread, in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset()));
198 
199   Label done;
200   Label runtime;
201 
202   // Is marking still active?
203   assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
204   __ ldrb(R1, queue_active);
205   __ cbz(R1, done);
206 
207   __ ldr(r_index_1, queue_index);
208   __ ldr(r_pre_val_0, Address(SP, nb_saved_regs*wordSize));
209   __ ldr(r_buffer_2, buffer);
210 
211   __ subs(r_index_1, r_index_1, wordSize);
212   __ b(runtime, lt);
213 
214   __ str(r_index_1, queue_index);
215   __ str(r_pre_val_0, Address(r_buffer_2, r_index_1));
216 
217   __ bind(done);
218 
219 #ifdef AARCH64
220   __ raw_pop(R2, R3);
221   __ raw_pop(R0, R1);
222 #else // AARCH64
223   __ pop(saved_regs);
224 #endif // AARCH64
225 
226   __ ret();
227 
228   __ bind(runtime);
229 
230   __ save_live_registers();
231 
232   assert(r_pre_val_0 == c_rarg0, "pre_val should be in R0");
233   __ mov(c_rarg1, Rthread);
234   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), c_rarg0, c_rarg1);
235 
236   __ restore_live_registers_without_return();
237 
238   __ b(done);
239 }
240 
241 void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler* sasm) {
242   // Input:
243   // - store_addr, pushed on the stack
244 
245   __ set_info("g1_post_barrier_slow_id", dont_gc_arguments);
246 
247   BarrierSet* bs = BarrierSet::barrier_set();
248   if (bs->kind() != BarrierSet::G1BarrierSet) {
249     __ mov(R0, (int)id);
250     __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), R0);
251     __ should_not_reach_here();
252     break;
253   }
254 
255   Label done;
256   Label recheck;
257   Label runtime;
258 
259   Address queue_index(Rthread, in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset()));
260   Address buffer(Rthread, in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset()));
261 
262   AddressLiteral cardtable(ci_card_table_address_as<address>(), relocInfo::none);
263 
264   // save at least the registers that need saving if the runtime is called
265 #ifdef AARCH64
266   __ raw_push(R0, R1);
267   __ raw_push(R2, R3);
268   const int nb_saved_regs = 4;
269 #else // AARCH64
270   const RegisterSet saved_regs = RegisterSet(R0,R3) | RegisterSet(R12) | RegisterSet(LR);
271   const int nb_saved_regs = 6;
272   assert(nb_saved_regs == saved_regs.size(), "fix nb_saved_regs");
273   __ push(saved_regs);
274 #endif // AARCH64
275 
276   const Register r_card_addr_0 = R0; // must be R0 for the slow case
277   const Register r_obj_0 = R0;
278   const Register r_card_base_1 = R1;
279   const Register r_tmp2 = R2;
280   const Register r_index_2 = R2;
281   const Register r_buffer_3 = R3;
282   const Register tmp1 = Rtemp;
283 
284   __ ldr(r_obj_0, Address(SP, nb_saved_regs*wordSize));
285   // Note: there is a comment in x86 code about not using
286   // ExternalAddress / lea, due to relocation not working
287   // properly for that address. Should be OK for arm, where we
288   // explicitly specify that 'cardtable' has a relocInfo::none
289   // type.
290   __ lea(r_card_base_1, cardtable);
291   __ add(r_card_addr_0, r_card_base_1, AsmOperand(r_obj_0, lsr, CardTable::card_shift));
292 
293   // first quick check without barrier
294   __ ldrb(r_tmp2, Address(r_card_addr_0));
295 
296   __ cmp(r_tmp2, (int)G1CardTable::g1_young_card_val());
297   __ b(recheck, ne);
298 
299   __ bind(done);
300 
301 #ifdef AARCH64
302   __ raw_pop(R2, R3);
303   __ raw_pop(R0, R1);
304 #else // AARCH64
305   __ pop(saved_regs);
306 #endif // AARCH64
307 
308   __ ret();
309 
310   __ bind(recheck);
311 
312   __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad), tmp1);
313 
314   // reload card state after the barrier that ensures the stored oop was visible
315   __ ldrb(r_tmp2, Address(r_card_addr_0));
316 
317   assert(CardTable::dirty_card_val() == 0, "adjust this code");
318   __ cbz(r_tmp2, done);
319 
320   // storing region crossing non-NULL, card is clean.
321   // dirty card and log.
322 
323   assert(0 == (int)CardTable::dirty_card_val(), "adjust this code");
324   if ((ci_card_table_address_as<intptr_t>() & 0xff) == 0) {
325     // Card table is aligned so the lowest byte of the table address base is zero.
326     __ strb(r_card_base_1, Address(r_card_addr_0));
327   } else {
328     __ strb(__ zero_register(r_tmp2), Address(r_card_addr_0));
329   }
330 
331   __ ldr(r_index_2, queue_index);
332   __ ldr(r_buffer_3, buffer);
333 
334   __ subs(r_index_2, r_index_2, wordSize);
335   __ b(runtime, lt); // go to runtime if now negative
336 
337   __ str(r_index_2, queue_index);
338 
339   __ str(r_card_addr_0, Address(r_buffer_3, r_index_2));
340 
341   __ b(done);
342 
343   __ bind(runtime);
344 
345   __ save_live_registers();
346 
347   assert(r_card_addr_0 == c_rarg0, "card_addr should be in R0");
348   __ mov(c_rarg1, Rthread);
349   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), c_rarg0, c_rarg1);
350 
351   __ restore_live_registers_without_return();
352 
353   __ b(done);
354 }
355 
356 #undef __