1 /*
  2  * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
  3  * Copyright (c) 2018, SAP SE. All rights reserved.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #include "precompiled.hpp"
 27 #include "asm/macroAssembler.inline.hpp"
 28 #include "c1/c1_LIRAssembler.hpp"
 29 #include "c1/c1_MacroAssembler.hpp"
 30 #include "gc/g1/c1/g1BarrierSetC1.hpp"
 31 #include "gc/g1/g1BarrierSet.hpp"
 32 #include "gc/g1/g1BarrierSetAssembler.hpp"
 33 #include "gc/g1/g1CardTable.hpp"
 34 #include "gc/g1/g1ThreadLocalData.hpp"
 35 #include "gc/g1/heapRegion.hpp"
 36 #include "interpreter/interp_masm.hpp"
 37 #include "runtime/sharedRuntime.hpp"
 38 
 39 #define __ masm->
 40 
 41 void G1BarrierSetAssembler::gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators,
 42                                                             Register from, Register to, Register count,
 43                                                             Register preserve1, Register preserve2) {
 44   bool dest_uninitialized = (decorators & AS_DEST_NOT_INITIALIZED) != 0;
 45   // With G1, don't generate the call if we statically know that the target in uninitialized
 46   if (!dest_uninitialized) {
 47     int spill_slots = 3;
 48     if (preserve1 != noreg) { spill_slots++; }
 49     if (preserve2 != noreg) { spill_slots++; }
 50     const int frame_size = align_up(frame::abi_reg_args_size + spill_slots * BytesPerWord, frame::alignment_in_bytes);
 51     Label filtered;
 52 
 53     // Is marking active?
 54     if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
 55       __ lwz(R0, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()), R16_thread);
 56     } else {
 57       guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
 58       __ lbz(R0, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()), R16_thread);
 59     }
 60     __ cmpdi(CCR0, R0, 0);
 61     __ beq(CCR0, filtered);
 62 
 63     __ save_LR_CR(R0);
 64     __ push_frame(frame_size, R0);
 65     int slot_nr = 0;
 66     __ std(from,  frame_size - (++slot_nr) * wordSize, R1_SP);
 67     __ std(to,    frame_size - (++slot_nr) * wordSize, R1_SP);
 68     __ std(count, frame_size - (++slot_nr) * wordSize, R1_SP);
 69     if (preserve1 != noreg) { __ std(preserve1, frame_size - (++slot_nr) * wordSize, R1_SP); }
 70     if (preserve2 != noreg) { __ std(preserve2, frame_size - (++slot_nr) * wordSize, R1_SP); }
 71 
 72     if (UseCompressedOops) {
 73       __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_narrow_oop_entry), to, count);
 74     } else {
 75       __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_oop_entry), to, count);
 76     }
 77 
 78     slot_nr = 0;
 79     __ ld(from,  frame_size - (++slot_nr) * wordSize, R1_SP);
 80     __ ld(to,    frame_size - (++slot_nr) * wordSize, R1_SP);
 81     __ ld(count, frame_size - (++slot_nr) * wordSize, R1_SP);
 82     if (preserve1 != noreg) { __ ld(preserve1, frame_size - (++slot_nr) * wordSize, R1_SP); }
 83     if (preserve2 != noreg) { __ ld(preserve2, frame_size - (++slot_nr) * wordSize, R1_SP); }
 84     __ addi(R1_SP, R1_SP, frame_size); // pop_frame()
 85     __ restore_LR_CR(R0);
 86 
 87     __ bind(filtered);
 88   }
 89 }
 90 
 91 void G1BarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
 92                                                              Register addr, Register count, Register preserve) {
 93   int spill_slots = (preserve != noreg) ? 1 : 0;
 94   const int frame_size = align_up(frame::abi_reg_args_size + spill_slots * BytesPerWord, frame::alignment_in_bytes);
 95 
 96   __ save_LR_CR(R0);
 97   __ push_frame(frame_size, R0);
 98   if (preserve != noreg) { __ std(preserve, frame_size - 1 * wordSize, R1_SP); }
 99   __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_post_entry), addr, count);
100   if (preserve != noreg) { __ ld(preserve, frame_size - 1 * wordSize, R1_SP); }
101   __ addi(R1_SP, R1_SP, frame_size); // pop_frame();
102   __ restore_LR_CR(R0);
103 }
104 
105 void G1BarrierSetAssembler::g1_write_barrier_pre(MacroAssembler* masm, DecoratorSet decorators, Register obj, RegisterOrConstant ind_or_offs, Register pre_val,
106                                                  Register tmp1, Register tmp2, bool needs_frame) {
107   bool not_null  = (decorators & OOP_NOT_NULL) != 0,
108        preloaded = obj == noreg;
109   Register nv_save = noreg;
110 
111   if (preloaded) {
112     // We are not loading the previous value so make
113     // sure that we don't trash the value in pre_val
114     // with the code below.
115     assert_different_registers(pre_val, tmp1, tmp2);
116     if (pre_val->is_volatile()) {
117       nv_save = !tmp1->is_volatile() ? tmp1 : tmp2;
118       assert(!nv_save->is_volatile(), "need one nv temp register if pre_val lives in volatile register");
119     }
120   }
121 
122   Label runtime, filtered;
123 
124   // Is marking active?
125   if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
126     __ lwz(tmp1, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()), R16_thread);
127   } else {
128     guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
129     __ lbz(tmp1, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()), R16_thread);
130   }
131   __ cmpdi(CCR0, tmp1, 0);
132   __ beq(CCR0, filtered);
133 
134   // Do we need to load the previous value?
135   if (!preloaded) {
136     // Load the previous value...
137     if (UseCompressedOops) {
138       __ lwz(pre_val, ind_or_offs, obj);
139     } else {
140       __ ld(pre_val, ind_or_offs, obj);
141     }
142     // Previous value has been loaded into Rpre_val.
143   }
144   assert(pre_val != noreg, "must have a real register");
145 
146   // Is the previous value null?
147   if (preloaded && not_null) {
148 #ifdef ASSERT
149     __ cmpdi(CCR0, pre_val, 0);
150     __ asm_assert_ne("null oop not allowed (G1 pre)", 0x321); // Checked by caller.
151 #endif
152   } else {
153     __ cmpdi(CCR0, pre_val, 0);
154     __ beq(CCR0, filtered);
155   }
156 
157   if (!preloaded && UseCompressedOops) {
158     __ decode_heap_oop_not_null(pre_val);
159   }
160 
161   // OK, it's not filtered, so we'll need to call enqueue. In the normal
162   // case, pre_val will be a scratch G-reg, but there are some cases in
163   // which it's an O-reg. In the first case, do a normal call. In the
164   // latter, do a save here and call the frameless version.
165 
166   // Can we store original value in the thread's buffer?
167   // Is index == 0?
168   // (The index field is typed as size_t.)
169   const Register Rbuffer = tmp1, Rindex = tmp2;
170 
171   __ ld(Rindex, in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset()), R16_thread);
172   __ cmpdi(CCR0, Rindex, 0);
173   __ beq(CCR0, runtime); // If index == 0, goto runtime.
174   __ ld(Rbuffer, in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset()), R16_thread);
175 
176   __ addi(Rindex, Rindex, -wordSize); // Decrement index.
177   __ std(Rindex, in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset()), R16_thread);
178 
179   // Record the previous value.
180   __ stdx(pre_val, Rbuffer, Rindex);
181   __ b(filtered);
182 
183   __ bind(runtime);
184 
185   // May need to preserve LR. Also needed if current frame is not compatible with C calling convention.
186   if (needs_frame) {
187     __ save_LR_CR(tmp1);
188     __ push_frame_reg_args(0, tmp2);
189   }
190 
191   if (pre_val->is_volatile() && preloaded) { __ mr(nv_save, pre_val); } // Save pre_val across C call if it was preloaded.
192   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), pre_val, R16_thread);
193   if (pre_val->is_volatile() && preloaded) { __ mr(pre_val, nv_save); } // restore
194 
195   if (needs_frame) {
196     __ pop_frame();
197     __ restore_LR_CR(tmp1);
198   }
199 
200   __ bind(filtered);
201 }
202 
203 void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm, DecoratorSet decorators, Register store_addr, Register new_val,
204                                                   Register tmp1, Register tmp2, Register tmp3) {
205   bool not_null = (decorators & OOP_NOT_NULL) != 0;
206 
207   Label runtime, filtered;
208   assert_different_registers(store_addr, new_val, tmp1, tmp2);
209 
210   CardTableBarrierSet* ct = barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
211   assert(sizeof(*ct->card_table()->byte_map_base()) == sizeof(jbyte), "adjust this code");
212 
213   // Does store cross heap regions?
214   if (G1RSBarrierRegionFilter) {
215     __ xorr(tmp1, store_addr, new_val);
216     __ srdi_(tmp1, tmp1, HeapRegion::LogOfHRGrainBytes);
217     __ beq(CCR0, filtered);
218   }
219 
220   // Crosses regions, storing NULL?
221   if (not_null) {
222 #ifdef ASSERT
223     __ cmpdi(CCR0, new_val, 0);
224     __ asm_assert_ne("null oop not allowed (G1 post)", 0x322); // Checked by caller.
225 #endif
226   } else {
227     __ cmpdi(CCR0, new_val, 0);
228     __ beq(CCR0, filtered);
229   }
230 
231   // Storing region crossing non-NULL, is card already dirty?
232   const Register Rcard_addr = tmp1;
233   Register Rbase = tmp2;
234   __ load_const_optimized(Rbase, (address)(ct->card_table()->byte_map_base()), /*temp*/ tmp3);
235 
236   __ srdi(Rcard_addr, store_addr, CardTable::card_shift);
237 
238   // Get the address of the card.
239   __ lbzx(/*card value*/ tmp3, Rbase, Rcard_addr);
240   __ cmpwi(CCR0, tmp3, (int)G1CardTable::g1_young_card_val());
241   __ beq(CCR0, filtered);
242 
243   __ membar(Assembler::StoreLoad);
244   __ lbzx(/*card value*/ tmp3, Rbase, Rcard_addr);  // Reload after membar.
245   __ cmpwi(CCR0, tmp3 /* card value */, (int)G1CardTable::dirty_card_val());
246   __ beq(CCR0, filtered);
247 
248   // Storing a region crossing, non-NULL oop, card is clean.
249   // Dirty card and log.
250   __ li(tmp3, (int)G1CardTable::dirty_card_val());
251   //release(); // G1: oops are allowed to get visible after dirty marking.
252   __ stbx(tmp3, Rbase, Rcard_addr);
253 
254   __ add(Rcard_addr, Rbase, Rcard_addr); // This is the address which needs to get enqueued.
255   Rbase = noreg; // end of lifetime
256 
257   const Register Rqueue_index = tmp2,
258                  Rqueue_buf   = tmp3;
259   __ ld(Rqueue_index, in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset()), R16_thread);
260   __ cmpdi(CCR0, Rqueue_index, 0);
261   __ beq(CCR0, runtime); // index == 0 then jump to runtime
262   __ ld(Rqueue_buf, in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset()), R16_thread);
263 
264   __ addi(Rqueue_index, Rqueue_index, -wordSize); // decrement index
265   __ std(Rqueue_index, in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset()), R16_thread);
266 
267   __ stdx(Rcard_addr, Rqueue_buf, Rqueue_index); // store card
268   __ b(filtered);
269 
270   __ bind(runtime);
271 
272   // Save the live input values.
273   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), Rcard_addr, R16_thread);
274 
275   __ bind(filtered);
276 }
277 
278 void G1BarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
279                                        Register base, RegisterOrConstant ind_or_offs, Register val,
280                                        Register tmp1, Register tmp2, Register tmp3, bool needs_frame) {
281   bool on_array = (decorators & IN_HEAP_ARRAY) != 0;
282   bool on_anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;
283   bool precise = on_array || on_anonymous;
284   // Load and record the previous value.
285   g1_write_barrier_pre(masm, decorators, base, ind_or_offs,
286                        tmp1, tmp2, tmp3, needs_frame);
287 
288   BarrierSetAssembler::store_at(masm, decorators, type, base, ind_or_offs, val, tmp1, tmp2, tmp3, needs_frame);
289 
290   // No need for post barrier if storing NULL
291   if (val != noreg) {
292     if (precise) {
293       if (ind_or_offs.is_constant()) {
294         __ add_const_optimized(base, base, ind_or_offs.as_constant(), tmp1);
295       } else {
296         __ add(base, ind_or_offs.as_register(), base);
297       }
298     }
299     g1_write_barrier_post(masm, decorators, base, val, tmp1, tmp2, tmp3);
300   }
301 }
302 
303 void G1BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
304                                     Register base, RegisterOrConstant ind_or_offs, Register dst,
305                                     Register tmp1, Register tmp2, bool needs_frame, Label *is_null) {
306   bool on_oop = type == T_OBJECT || type == T_ARRAY;
307   bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0;
308   bool on_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0;
309   bool on_reference = on_weak || on_phantom;
310   Label done;
311   if (on_oop && on_reference && is_null == NULL) { is_null = &done; }
312   // Load the value of the referent field.
313   ModRefBarrierSetAssembler::load_at(masm, decorators, type, base, ind_or_offs, dst, tmp1, tmp2, needs_frame, is_null);
314   if (on_oop && on_reference) {
315     // Generate the G1 pre-barrier code to log the value of
316     // the referent field in an SATB buffer. Note with
317     // these parameters the pre-barrier does not generate
318     // the load of the previous value
319     // We only reach here if value is not null.
320     g1_write_barrier_pre(masm, decorators | OOP_NOT_NULL, noreg /* obj */, (intptr_t)0, dst /* pre_val */,
321                          tmp1, tmp2, needs_frame);
322   }
323   __ bind(done);
324 }
325 
326 void G1BarrierSetAssembler::resolve_jobject(MacroAssembler* masm, Register value, Register tmp1, Register tmp2, bool needs_frame) {
327   Label done, not_weak;
328   __ cmpdi(CCR0, value, 0);
329   __ beq(CCR0, done);         // Use NULL as-is.
330 
331   __ clrrdi(tmp1, value, JNIHandles::weak_tag_size);
332   __ andi_(tmp2, value, JNIHandles::weak_tag_mask);
333   __ ld(value, 0, tmp1);      // Resolve (untagged) jobject.
334 
335   __ beq(CCR0, not_weak);     // Test for jweak tag.
336   __ verify_oop(value);
337   g1_write_barrier_pre(masm, IN_ROOT | ON_PHANTOM_OOP_REF,
338                        noreg, noreg, value,
339                        tmp1, tmp2, needs_frame);
340   __ bind(not_weak);
341   __ verify_oop(value);
342   __ bind(done);
343 }
344 
345 #ifdef COMPILER1
346 
347 #undef __
348 #define __ ce->masm()->
349 
350 void G1BarrierSetAssembler::gen_g1_pre_barrier_stub(LIR_Assembler* ce, G1PreBarrierStub* stub) {
351   G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
352   // At this point we know that marking is in progress.
353   // If do_load() is true then we have to emit the
354   // load of the previous value; otherwise it has already
355   // been loaded into _pre_val.
356 
357   __ bind(*stub->entry());
358 
359   assert(stub->pre_val()->is_register(), "Precondition.");
360   Register pre_val_reg = stub->pre_val()->as_register();
361 
362   if (stub->do_load()) {
363     ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /*wide*/, false /*unaligned*/);
364   }
365 
366   __ cmpdi(CCR0, pre_val_reg, 0);
367   __ bc_far_optimized(Assembler::bcondCRbiIs1, __ bi0(CCR0, Assembler::equal), *stub->continuation());
368 
369   address c_code = bs->pre_barrier_c1_runtime_code_blob()->code_begin();
370   //__ load_const_optimized(R0, c_code);
371   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(c_code));
372   __ std(pre_val_reg, -8, R1_SP); // Pass pre_val on stack.
373   __ mtctr(R0);
374   __ bctrl();
375   __ b(*stub->continuation());
376 }
377 
378 void G1BarrierSetAssembler::gen_g1_post_barrier_stub(LIR_Assembler* ce, G1PostBarrierStub* stub) {
379   G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
380   __ bind(*stub->entry());
381 
382   assert(stub->addr()->is_register(), "Precondition.");
383   assert(stub->new_val()->is_register(), "Precondition.");
384   Register addr_reg = stub->addr()->as_pointer_register();
385   Register new_val_reg = stub->new_val()->as_register();
386 
387   __ cmpdi(CCR0, new_val_reg, 0);
388   __ bc_far_optimized(Assembler::bcondCRbiIs1, __ bi0(CCR0, Assembler::equal), *stub->continuation());
389 
390   address c_code = bs->post_barrier_c1_runtime_code_blob()->code_begin();
391   //__ load_const_optimized(R0, c_code);
392   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(c_code));
393   __ mtctr(R0);
394   __ mr(R0, addr_reg); // Pass addr in R0.
395   __ bctrl();
396   __ b(*stub->continuation());
397 }
398 
399 #undef __
400 #define __ sasm->
401 
402 void G1BarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) {
403   BarrierSet* bs = BarrierSet::barrier_set();
404 
405   __ set_info("g1_pre_barrier_slow_id", false);
406 
407   // Using stack slots: pre_val (pre-pushed), spill tmp, spill tmp2.
408   const int stack_slots = 3;
409   Register pre_val = R0; // previous value of memory
410   Register tmp  = R14;
411   Register tmp2 = R15;
412 
413   Label refill, restart, marking_not_active;
414   int satb_q_active_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset());
415   int satb_q_index_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset());
416   int satb_q_buf_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset());
417 
418   // Spill
419   __ std(tmp, -16, R1_SP);
420   __ std(tmp2, -24, R1_SP);
421 
422   // Is marking still active?
423   if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
424     __ lwz(tmp, satb_q_active_byte_offset, R16_thread);
425   } else {
426     assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
427     __ lbz(tmp, satb_q_active_byte_offset, R16_thread);
428   }
429   __ cmpdi(CCR0, tmp, 0);
430   __ beq(CCR0, marking_not_active);
431 
432   __ bind(restart);
433   // Load the index into the SATB buffer. SATBMarkQueue::_index is a
434   // size_t so ld_ptr is appropriate.
435   __ ld(tmp, satb_q_index_byte_offset, R16_thread);
436 
437   // index == 0?
438   __ cmpdi(CCR0, tmp, 0);
439   __ beq(CCR0, refill);
440 
441   __ ld(tmp2, satb_q_buf_byte_offset, R16_thread);
442   __ ld(pre_val, -8, R1_SP); // Load from stack.
443   __ addi(tmp, tmp, -oopSize);
444 
445   __ std(tmp, satb_q_index_byte_offset, R16_thread);
446   __ stdx(pre_val, tmp2, tmp); // [_buf + index] := <address_of_card>
447 
448   __ bind(marking_not_active);
449   // Restore temp registers and return-from-leaf.
450   __ ld(tmp2, -24, R1_SP);
451   __ ld(tmp, -16, R1_SP);
452   __ blr();
453 
454   __ bind(refill);
455   const int nbytes_save = (MacroAssembler::num_volatile_regs + stack_slots) * BytesPerWord;
456   __ save_volatile_gprs(R1_SP, -nbytes_save); // except R0
457   __ mflr(R0);
458   __ std(R0, _abi(lr), R1_SP);
459   __ push_frame_reg_args(nbytes_save, R0); // dummy frame for C call
460   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SATBMarkQueueSet::handle_zero_index_for_thread), R16_thread);
461   __ pop_frame();
462   __ ld(R0, _abi(lr), R1_SP);
463   __ mtlr(R0);
464   __ restore_volatile_gprs(R1_SP, -nbytes_save); // except R0
465   __ b(restart);
466 }
467 
468 void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler* sasm) {
469   G1BarrierSet* bs = barrier_set_cast<G1BarrierSet>(BarrierSet::barrier_set());
470 
471   __ set_info("g1_post_barrier_slow_id", false);
472 
473   // Using stack slots: spill addr, spill tmp2
474   const int stack_slots = 2;
475   Register tmp = R0;
476   Register addr = R14;
477   Register tmp2 = R15;
478   jbyte* byte_map_base = bs->card_table()->byte_map_base();
479 
480   Label restart, refill, ret;
481 
482   // Spill
483   __ std(addr, -8, R1_SP);
484   __ std(tmp2, -16, R1_SP);
485 
486   __ srdi(addr, R0, CardTable::card_shift); // Addr is passed in R0.
487   __ load_const_optimized(/*cardtable*/ tmp2, byte_map_base, tmp);
488   __ add(addr, tmp2, addr);
489   __ lbz(tmp, 0, addr); // tmp := [addr + cardtable]
490 
491   // Return if young card.
492   __ cmpwi(CCR0, tmp, G1CardTable::g1_young_card_val());
493   __ beq(CCR0, ret);
494 
495   // Return if sequential consistent value is already dirty.
496   __ membar(Assembler::StoreLoad);
497   __ lbz(tmp, 0, addr); // tmp := [addr + cardtable]
498 
499   __ cmpwi(CCR0, tmp, G1CardTable::dirty_card_val());
500   __ beq(CCR0, ret);
501 
502   // Not dirty.
503 
504   // First, dirty it.
505   __ li(tmp, G1CardTable::dirty_card_val());
506   __ stb(tmp, 0, addr);
507 
508   int dirty_card_q_index_byte_offset = in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset());
509   int dirty_card_q_buf_byte_offset = in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset());
510 
511   __ bind(restart);
512 
513   // Get the index into the update buffer. DirtyCardQueue::_index is
514   // a size_t so ld_ptr is appropriate here.
515   __ ld(tmp2, dirty_card_q_index_byte_offset, R16_thread);
516 
517   // index == 0?
518   __ cmpdi(CCR0, tmp2, 0);
519   __ beq(CCR0, refill);
520 
521   __ ld(tmp, dirty_card_q_buf_byte_offset, R16_thread);
522   __ addi(tmp2, tmp2, -oopSize);
523 
524   __ std(tmp2, dirty_card_q_index_byte_offset, R16_thread);
525   __ add(tmp2, tmp, tmp2);
526   __ std(addr, 0, tmp2); // [_buf + index] := <address_of_card>
527 
528   // Restore temp registers and return-from-leaf.
529   __ bind(ret);
530   __ ld(tmp2, -16, R1_SP);
531   __ ld(addr, -8, R1_SP);
532   __ blr();
533 
534   __ bind(refill);
535   const int nbytes_save = (MacroAssembler::num_volatile_regs + stack_slots) * BytesPerWord;
536   __ save_volatile_gprs(R1_SP, -nbytes_save); // except R0
537   __ mflr(R0);
538   __ std(R0, _abi(lr), R1_SP);
539   __ push_frame_reg_args(nbytes_save, R0); // dummy frame for C call
540   __ call_VM_leaf(CAST_FROM_FN_PTR(address, DirtyCardQueueSet::handle_zero_index_for_thread), R16_thread);
541   __ pop_frame();
542   __ ld(R0, _abi(lr), R1_SP);
543   __ mtlr(R0);
544   __ restore_volatile_gprs(R1_SP, -nbytes_save); // except R0
545   __ b(restart);
546 }
547 
548 #undef __
549 
550 #endif // COMPILER1