1 /*
   2  * Copyright (c) 2018, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/shenandoah/brooksPointer.hpp"
  26 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
  27 #include "gc/shenandoah/shenandoahConnectionMatrix.hpp"
  28 #include "gc/shenandoah/shenandoahHeap.hpp"
  29 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  30 #include "gc/shenandoah/shenandoahRuntime.hpp"
  31 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
  32 #include "interpreter/interpreter.hpp"
  33 #include "interpreter/interp_masm.hpp"
  34 #include "runtime/sharedRuntime.hpp"
  35 #include "runtime/thread.hpp"
  36 #ifdef COMPILER1
  37 #include "c1/c1_LIRAssembler.hpp"
  38 #include "c1/c1_MacroAssembler.hpp"
  39 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
  40 #endif
  41 
  42 #define __ masm->
  43 
  44 address ShenandoahBarrierSetAssembler::_shenandoah_wb = NULL;
  45 address ShenandoahBarrierSetAssembler::_shenandoah_wb_C = NULL;
  46 
  47 void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
  48                                                        Register addr, Register count, RegSet saved_regs) {
  49   if (is_oop) {
  50     bool dest_uninitialized = (decorators & AS_DEST_NOT_INITIALIZED) != 0;
  51     if (!dest_uninitialized) {
  52       __ push(saved_regs, sp);
  53       if (count == c_rarg0) {
  54         if (addr == c_rarg1) {
  55           // exactly backwards!!
  56           __ mov(rscratch1, c_rarg0);
  57           __ mov(c_rarg0, c_rarg1);
  58           __ mov(c_rarg1, rscratch1);
  59         } else {
  60           __ mov(c_rarg1, count);
  61           __ mov(c_rarg0, addr);
  62         }
  63       } else {
  64         __ mov(c_rarg0, addr);
  65         __ mov(c_rarg1, count);
  66       }
  67       if (UseCompressedOops) {
  68         __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_array_pre_narrow_oop_entry), 2);
  69       } else {
  70         __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_array_pre_oop_entry), 2);
  71       }
  72       __ pop(saved_regs, sp);
  73     }
  74   }
  75 }
  76 
  77 void ShenandoahBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
  78                                                        Register start, Register end, Register scratch, RegSet saved_regs) {
  79   if (is_oop) {
  80     __ push(saved_regs, sp);
  81     // must compute element count unless barrier set interface is changed (other platforms supply count)
  82     assert_different_registers(start, end, scratch);
  83     __ lea(scratch, Address(end, BytesPerHeapOop));
  84     __ sub(scratch, scratch, start);               // subtract start to get #bytes
  85     __ lsr(scratch, scratch, LogBytesPerHeapOop);  // convert to element count
  86     __ mov(c_rarg0, start);
  87     __ mov(c_rarg1, scratch);
  88     __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_array_post_entry), 2);
  89     __ pop(saved_regs, sp);
  90   }
  91 }
  92 
  93 void ShenandoahBarrierSetAssembler::shenandoah_write_barrier_pre(MacroAssembler* masm,
  94                                                                  Register obj,
  95                                                                  Register pre_val,
  96                                                                  Register thread,
  97                                                                  Register tmp,
  98                                                                  bool tosca_live,
  99                                                                  bool expand_call) {
 100   if (ShenandoahSATBBarrier) {
 101     satb_write_barrier_pre(masm, obj, pre_val, thread, tmp, tosca_live, expand_call);
 102   }
 103 }
 104 
 105 void ShenandoahBarrierSetAssembler::satb_write_barrier_pre(MacroAssembler* masm,
 106                                                            Register obj,
 107                                                            Register pre_val,
 108                                                            Register thread,
 109                                                            Register tmp,
 110                                                            bool tosca_live,
 111                                                            bool expand_call) {
 112   // If expand_call is true then we expand the call_VM_leaf macro
 113   // directly to skip generating the check by
 114   // InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
 115 
 116   assert(thread == rthread, "must be");
 117 
 118   Label done;
 119   Label runtime;
 120 
 121   assert_different_registers(obj, pre_val, tmp, rscratch1);
 122   assert(pre_val != noreg &&  tmp != noreg, "expecting a register");
 123 
 124   Address in_progress(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_active_offset()));
 125   Address index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
 126   Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
 127 
 128   // Is marking active?
 129   if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
 130     __ ldrw(tmp, in_progress);
 131   } else {
 132     assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
 133     __ ldrb(tmp, in_progress);
 134   }
 135   __ cbzw(tmp, done);
 136 
 137   // Do we need to load the previous value?
 138   if (obj != noreg) {
 139     __ load_heap_oop(pre_val, Address(obj, 0), noreg, noreg, AS_RAW);
 140   }
 141 
 142   // Is the previous value null?
 143   __ cbz(pre_val, done);
 144 
 145   // Can we store original value in the thread's buffer?
 146   // Is index == 0?
 147   // (The index field is typed as size_t.)
 148 
 149   __ ldr(tmp, index);                      // tmp := *index_adr
 150   __ cbz(tmp, runtime);                    // tmp == 0?
 151                                         // If yes, goto runtime
 152 
 153   __ sub(tmp, tmp, wordSize);              // tmp := tmp - wordSize
 154   __ str(tmp, index);                      // *index_adr := tmp
 155   __ ldr(rscratch1, buffer);
 156   __ add(tmp, tmp, rscratch1);             // tmp := tmp + *buffer_adr
 157 
 158   // Record the previous value
 159   __ str(pre_val, Address(tmp, 0));
 160   __ b(done);
 161 
 162   __ bind(runtime);
 163   // save the live input values
 164   RegSet saved = RegSet::of(pre_val);
 165   if (tosca_live) saved += RegSet::of(r0);
 166   if (obj != noreg) saved += RegSet::of(obj);
 167 
 168   __ push(saved, sp);
 169 
 170   // Calling the runtime using the regular call_VM_leaf mechanism generates
 171   // code (generated by InterpreterMacroAssember::call_VM_leaf_base)
 172   // that checks that the *(rfp+frame::interpreter_frame_last_sp) == NULL.
 173   //
 174   // If we care generating the pre-barrier without a frame (e.g. in the
 175   // intrinsified Reference.get() routine) then ebp might be pointing to
 176   // the caller frame and so this check will most likely fail at runtime.
 177   //
 178   // Expanding the call directly bypasses the generation of the check.
 179   // So when we do not have have a full interpreter frame on the stack
 180   // expand_call should be passed true.
 181 
 182   if (expand_call) {
 183     assert(pre_val != c_rarg1, "smashed arg");
 184     __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), pre_val, thread);
 185   } else {
 186     __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), pre_val, thread);
 187   }
 188 
 189   __ pop(saved, sp);
 190 
 191   __ bind(done);
 192 }
 193 
 194 void ShenandoahBarrierSetAssembler::shenandoah_write_barrier_post(MacroAssembler* masm,
 195                                                                   Register store_addr,
 196                                                                   Register new_val,
 197                                                                   Register thread,
 198                                                                   Register tmp,
 199                                                                   Register tmp2) {
 200   assert(thread == rthread, "must be");
 201   assert(UseShenandoahGC, "expect Shenandoah GC");
 202 
 203   if (! UseShenandoahMatrix) {
 204     // No need for that barrier if not using matrix.
 205     return;
 206   }
 207 
 208   assert_different_registers(store_addr, new_val, thread, tmp, tmp2, rscratch1);
 209 
 210   Label done;
 211   __ cbz(new_val, done);
 212 
 213   ShenandoahConnectionMatrix* matrix = ShenandoahHeap::heap()->connection_matrix();
 214 
 215   // Compute to-region index
 216   __ lsr(tmp, new_val, ShenandoahHeapRegion::region_size_bytes_shift_jint());
 217 
 218   // Compute from-region index
 219   __ lsr(tmp2, store_addr, ShenandoahHeapRegion::region_size_bytes_shift_jint());
 220 
 221   // Compute matrix index
 222   __ mov(rscratch1, matrix->stride_jint());
 223   // Address is _matrix[to * stride + from]
 224   __ madd(tmp, tmp, rscratch1, tmp2);
 225   __ mov(rscratch1, matrix->magic_offset());
 226   Address loc(tmp, rscratch1);
 227 
 228   __ ldrb(tmp2, loc);
 229   __ cbnz(tmp2, done);
 230   __ mov(tmp2, 1);
 231   __ strb(tmp2, loc);
 232   __ bind(done);
 233 }
 234 
 235 void ShenandoahBarrierSetAssembler::read_barrier(MacroAssembler* masm, Register dst) {
 236   if (ShenandoahReadBarrier) {
 237     read_barrier_impl(masm, dst);
 238   }
 239 }
 240 
 241 void ShenandoahBarrierSetAssembler::read_barrier_impl(MacroAssembler* masm, Register dst) {
 242   assert(UseShenandoahGC && (ShenandoahReadBarrier || ShenandoahStoreValReadBarrier), "should be enabled");
 243   Label is_null;
 244   __ cbz(dst, is_null);
 245   read_barrier_not_null_impl(masm, dst);
 246   __ bind(is_null);
 247 }
 248 
 249 void ShenandoahBarrierSetAssembler::read_barrier_not_null(MacroAssembler* masm, Register dst) {
 250   if (ShenandoahReadBarrier) {
 251     read_barrier_not_null_impl(masm, dst);
 252   }
 253 }
 254 
 255 
 256 void ShenandoahBarrierSetAssembler::read_barrier_not_null_impl(MacroAssembler* masm, Register dst) {
 257   assert(UseShenandoahGC && (ShenandoahReadBarrier || ShenandoahStoreValReadBarrier), "should be enabled");
 258   __ ldr(dst, Address(dst, BrooksPointer::byte_offset()));
 259 }
 260 
 261 void ShenandoahBarrierSetAssembler::write_barrier(MacroAssembler* masm, Register dst) {
 262   if (ShenandoahWriteBarrier) {
 263     write_barrier_impl(masm, dst);
 264   }
 265 }
 266 
 267 void ShenandoahBarrierSetAssembler::write_barrier_impl(MacroAssembler* masm, Register dst) {
 268   assert(UseShenandoahGC && (ShenandoahWriteBarrier || ShenandoahStoreValEnqueueBarrier), "should be enabled");
 269   assert(dst != rscratch1, "different regs");
 270   assert(dst != rscratch2, "Need rscratch2");
 271 
 272   Label done;
 273 
 274   Address gc_state(rthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
 275   __ ldrb(rscratch1, gc_state);
 276   __ membar(Assembler::LoadLoad);
 277 
 278   // Now check if evacuation is in progress.
 279   read_barrier_not_null(masm, dst);
 280 
 281   __ mov(rscratch2, ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL);
 282   __ tst(rscratch1, rscratch2);
 283   __ br(Assembler::EQ, done);
 284 
 285   __ lsr(rscratch1, dst, ShenandoahHeapRegion::region_size_bytes_shift_jint());
 286   __ mov(rscratch2,  ShenandoahHeap::in_cset_fast_test_addr());
 287   __ ldrb(rscratch2, Address(rscratch2, rscratch1));
 288   __ tst(rscratch2, 0x1);
 289   __ br(Assembler::EQ, done);
 290 
 291   // Save possibly live regs.
 292   RegSet live_regs = RegSet::range(r0, r4) - dst;
 293   __ push(live_regs, sp);
 294   __ strd(v0, __ pre(sp, 2 * -wordSize));
 295 
 296   // Call into runtime
 297   __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_IRT), dst);
 298 
 299   // Move result into dst reg.
 300   __ mov(dst, r0);
 301 
 302   // Restore possibly live regs.
 303   __ ldrd(v0, __ post(sp, 2 * wordSize));
 304   __ pop(live_regs, sp);
 305 
 306   __ bind(done);
 307 }
 308 
 309 void ShenandoahBarrierSetAssembler::storeval_barrier(MacroAssembler* masm, Register dst, Register tmp) {
 310   if (ShenandoahStoreValEnqueueBarrier) {
 311     Label is_null;
 312     __ cbz(dst, is_null);
 313     write_barrier_impl(masm, dst);
 314     __ bind(is_null);
 315     // Save possibly live regs.
 316     RegSet live_regs = RegSet::range(r0, r4) - dst;
 317     __ push(live_regs, sp);
 318     __ strd(v0, __ pre(sp, 2 * -wordSize));
 319 
 320     satb_write_barrier_pre(masm, noreg, dst, rthread, tmp, true, false);
 321 
 322     // Restore possibly live regs.
 323     __ ldrd(v0, __ post(sp, 2 * wordSize));
 324     __ pop(live_regs, sp);
 325   }
 326   if (ShenandoahStoreValReadBarrier) {
 327     read_barrier_impl(masm, dst);
 328   }
 329 }
 330 
 331 void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
 332                                             Register dst, Address src, Register tmp1, Register tmp_thread) {
 333   bool on_oop = type == T_OBJECT || type == T_ARRAY;
 334   bool in_heap = (decorators & IN_HEAP) != 0;
 335   bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0;
 336   bool on_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0;
 337   bool on_reference = on_weak || on_phantom;
 338 
 339   if (in_heap) {
 340     read_barrier_not_null(masm, src.base());
 341   }
 342 
 343   BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
 344   if (ShenandoahKeepAliveBarrier && on_oop && on_reference) {
 345     __ enter();
 346     satb_write_barrier_pre(masm /* masm */,
 347                            noreg /* obj */,
 348                            dst /* pre_val */,
 349                            rthread /* thread */,
 350                            tmp1 /* tmp */,
 351                            true /* tosca_live */,
 352                            true /* expand_call */);
 353     __ leave();
 354   }
 355 }
 356 
 357 void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
 358                                              Address dst, Register val, Register tmp1, Register tmp2) {
 359   bool on_oop = type == T_OBJECT || type == T_ARRAY;
 360   bool in_heap = (decorators & IN_HEAP) != 0;
 361   if (in_heap) {
 362     write_barrier(masm, dst.base());
 363   }
 364   if (!on_oop) {
 365     BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2);
 366     return;
 367   }
 368 
 369   // flatten object address if needed
 370   if (dst.index() == noreg && dst.offset() == 0) {
 371     if (dst.base() != r3) {
 372       __ mov(r3, dst.base());
 373     }
 374   } else {
 375     __ lea(r3, dst);
 376   }
 377 
 378   shenandoah_write_barrier_pre(masm,
 379                                r3 /* obj */,
 380                                tmp2 /* pre_val */,
 381                                rthread /* thread */,
 382                                tmp1  /* tmp */,
 383                                val != noreg /* tosca_live */,
 384                                false /* expand_call */);
 385 
 386   if (val == noreg) {
 387     BarrierSetAssembler::store_at(masm, decorators, type, Address(r3, 0), noreg, noreg, noreg);
 388   } else {
 389     storeval_barrier(masm, val, tmp1);
 390     // G1 barrier needs uncompressed oop for region cross check.
 391     Register new_val = val;
 392     if (UseCompressedOops) {
 393       new_val = rscratch2;
 394       __ mov(new_val, val);
 395     }
 396     BarrierSetAssembler::store_at(masm, decorators, type, Address(r3, 0), val, noreg, noreg);
 397     shenandoah_write_barrier_post(masm,
 398                                   r3 /* store_adr */,
 399                                   new_val /* new_val */,
 400                                   rthread /* thread */,
 401                                   tmp1 /* tmp */,
 402                                   tmp2 /* tmp2 */);
 403   }
 404 
 405 }
 406 
 407 void ShenandoahBarrierSetAssembler::obj_equals(MacroAssembler* masm, DecoratorSet decorators, Register op1, Register op2) {
 408   __ cmp(op1, op2);
 409   if (ShenandoahAcmpBarrier) {
 410     Label done;
 411     __ br(Assembler::EQ, done);
 412     // The object may have been evacuated, but we won't see it without a
 413     // membar here.
 414     __ membar(Assembler::LoadStore| Assembler::LoadLoad);
 415     read_barrier(masm, op1);
 416     read_barrier(masm, op2);
 417     __ cmp(op1, op2);
 418     __ bind(done);
 419   }
 420 }
 421 
 422 void ShenandoahBarrierSetAssembler::resolve_for_read(MacroAssembler* masm, DecoratorSet decorators, Register obj) {
 423   bool oop_not_null = (decorators & OOP_NOT_NULL) != 0;
 424   if (oop_not_null) {
 425     read_barrier_not_null(masm, obj);
 426   } else {
 427     read_barrier(masm, obj);
 428   }
 429 }
 430 
 431 void ShenandoahBarrierSetAssembler::resolve_for_write(MacroAssembler* masm, DecoratorSet decorators, Register obj) {
 432   write_barrier(masm, obj);
 433 }
 434 
 435 void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm, Register addr, Register expected, Register new_val,
 436                                                 bool acquire, bool release, bool weak, bool encode,
 437                                                 Register tmp1, Register tmp2, Register tmp3,
 438                                                 Register result) {
 439 
 440   if (encode) {
 441     storeval_barrier(masm, new_val, tmp3);
 442   }
 443 
 444   if (UseCompressedOops) {
 445     if (encode) {
 446       __ encode_heap_oop(tmp1, expected);
 447       expected = tmp1;
 448       __ encode_heap_oop(tmp2, new_val);
 449       new_val = tmp2;
 450     }
 451   }
 452   bool is_cae = (result != noreg);
 453   bool is_narrow = UseCompressedOops;
 454   Assembler::operand_size size = is_narrow ? Assembler::word : Assembler::xword;
 455   if (! is_cae) result = rscratch1;
 456 
 457   assert_different_registers(addr, expected, new_val, result, tmp3);
 458 
 459   Label retry, done, fail;
 460 
 461   // CAS, using LL/SC pair.
 462   __ bind(retry);
 463   __ load_exclusive(result, addr, size, acquire);
 464   if (is_narrow) {
 465     __ cmpw(result, expected);
 466   } else {
 467     __ cmp(result, expected);
 468   }
 469   __ br(Assembler::NE, fail);
 470   __ store_exclusive(tmp3, new_val, addr, size, release);
 471   if (weak) {
 472     __ cmpw(tmp3, 0u); // If the store fails, return NE to our caller
 473   } else {
 474     __ cbnzw(tmp3, retry);
 475   }
 476   __ b(done);
 477 
 478  __  bind(fail);
 479   // Check if rb(expected)==rb(result)
 480   // Shuffle registers so that we have memory value ready for next expected.
 481   __ mov(tmp3, expected);
 482   __ mov(expected, result);
 483   if (is_narrow) {
 484     __ decode_heap_oop(result, result);
 485     __ decode_heap_oop(tmp3, tmp3);
 486   }
 487   __ resolve_for_read(0, result);
 488   __ resolve_for_read(0, tmp3);
 489   __ cmp(result, tmp3);
 490   // Retry with expected now being the value we just loaded from addr.
 491   __ br(Assembler::EQ, retry);
 492   if (is_narrow && is_cae) {
 493     // For cmp-and-exchange and narrow oops, we need to restore
 494     // the compressed old-value. We moved it to 'expected' a few lines up.
 495     __ mov(result, expected);
 496   }
 497   __ bind(done);
 498 
 499 }
 500 
 501 #ifdef COMPILER1
 502 
 503 #undef __
 504 #define __ ce->masm()->
 505 
 506 void ShenandoahBarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, ShenandoahPreBarrierStub* stub) {
 507   ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
 508   // At this point we know that marking is in progress.
 509   // If do_load() is true then we have to emit the
 510   // load of the previous value; otherwise it has already
 511   // been loaded into _pre_val.
 512 
 513   __ bind(*stub->entry());
 514 
 515   assert(stub->pre_val()->is_register(), "Precondition.");
 516 
 517   Register pre_val_reg = stub->pre_val()->as_register();
 518 
 519   if (stub->do_load()) {
 520     ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /*wide*/, false /*unaligned*/);
 521   }
 522   __ cbz(pre_val_reg, *stub->continuation());
 523   ce->store_parameter(stub->pre_val()->as_register(), 0);
 524   __ far_call(RuntimeAddress(bs->pre_barrier_c1_runtime_code_blob()->code_begin()));
 525   __ b(*stub->continuation());
 526 }
 527 
 528 #undef __
 529 
 530 #define __ sasm->
 531 
 532 void ShenandoahBarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) {
 533   __ prologue("shenandoah_pre_barrier", false);
 534 
 535   // arg0 : previous value of memory
 536 
 537   BarrierSet* bs = BarrierSet::barrier_set();
 538 
 539   const Register pre_val = r0;
 540   const Register thread = rthread;
 541   const Register tmp = rscratch1;
 542 
 543   Address queue_index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
 544   Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
 545 
 546   Label done;
 547   Label runtime;
 548 
 549   // Is marking still active?
 550   Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
 551   __ ldrb(tmp, gc_state);
 552   __ mov(rscratch2, ShenandoahHeap::MARKING | ShenandoahHeap::TRAVERSAL);
 553   __ tst(tmp, rscratch2);
 554   __ br(Assembler::EQ, done);
 555 
 556   // Can we store original value in the thread's buffer?
 557   __ ldr(tmp, queue_index);
 558   __ cbz(tmp, runtime);
 559 
 560   __ sub(tmp, tmp, wordSize);
 561   __ str(tmp, queue_index);
 562   __ ldr(rscratch2, buffer);
 563   __ add(tmp, tmp, rscratch2);
 564   __ load_parameter(0, rscratch2);
 565   __ str(rscratch2, Address(tmp, 0));
 566   __ b(done);
 567 
 568   __ bind(runtime);
 569   __ push_call_clobbered_registers();
 570   __ load_parameter(0, pre_val);
 571   __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), pre_val, thread);
 572   __ pop_call_clobbered_registers();
 573   __ bind(done);
 574 
 575   __ epilogue();
 576 }
 577 
 578 #undef __
 579 
 580 #endif // COMPILER1
 581 
 582 address ShenandoahBarrierSetAssembler::shenandoah_wb() {
 583   assert(_shenandoah_wb != NULL, "need write barrier stub");
 584   return _shenandoah_wb;
 585 }
 586 
 587 address ShenandoahBarrierSetAssembler::shenandoah_wb_C() {
 588   assert(_shenandoah_wb_C != NULL, "need write barrier stub");
 589   return _shenandoah_wb_C;
 590 }
 591 
 592 #define __ cgen->assembler()->
 593 
 594 // Shenandoah write barrier.
 595 //
 596 // Input:
 597 //   r0: OOP to evacuate.  Not null.
 598 //
 599 // Output:
 600 //   r0: Pointer to evacuated OOP.
 601 //
 602 // Trash rscratch1, rscratch2.  Preserve everything else.
 603 address ShenandoahBarrierSetAssembler::generate_shenandoah_wb(StubCodeGenerator* cgen, bool c_abi, bool do_cset_test) {
 604 
 605   __ align(6);
 606   StubCodeMark mark(cgen, "StubRoutines", "shenandoah_wb");
 607   address start = __ pc();
 608 
 609   if (do_cset_test) {
 610     Label work;
 611     __ mov(rscratch2, ShenandoahHeap::in_cset_fast_test_addr());
 612     __ lsr(rscratch1, r0, ShenandoahHeapRegion::region_size_bytes_shift_jint());
 613     __ ldrb(rscratch2, Address(rscratch2, rscratch1));
 614     __ tbnz(rscratch2, 0, work);
 615     __ ret(lr);
 616     __ bind(work);
 617   }
 618 
 619   Register obj = r0;
 620 
 621   __ enter(); // required for proper stackwalking of RuntimeStub frame
 622 
 623   if (!c_abi) {
 624     __ push_call_clobbered_registers();
 625   } else {
 626     __ push_call_clobbered_fp_registers();
 627   }
 628 
 629   __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_JRT));
 630   __ blrt(lr, 1, 0, MacroAssembler::ret_type_integral);
 631   if (!c_abi) {
 632     __ mov(rscratch1, obj);
 633     __ pop_call_clobbered_registers();
 634     __ mov(obj, rscratch1);
 635   } else {
 636     __ pop_call_clobbered_fp_registers();
 637   }
 638 
 639   __ leave(); // required for proper stackwalking of RuntimeStub frame
 640   __ ret(lr);
 641 
 642   return start;
 643 }
 644 
 645 #undef __
 646 
 647 void ShenandoahBarrierSetAssembler::barrier_stubs_init() {
 648   if (ShenandoahWriteBarrier || ShenandoahStoreValEnqueueBarrier) {
 649     int stub_code_size = 2048;
 650     ResourceMark rm;
 651     BufferBlob* bb = BufferBlob::create("shenandoah_barrier_stubs", stub_code_size);
 652     CodeBuffer buf(bb);
 653     StubCodeGenerator cgen(&buf);
 654     _shenandoah_wb = generate_shenandoah_wb(&cgen, false, true);
 655     _shenandoah_wb_C = generate_shenandoah_wb(&cgen, true, !ShenandoahWriteBarrierCsetTestInIR);
 656   }
 657 }