1 /*
   2  * Copyright (c) 2018, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
  26 #include "gc/shenandoah/shenandoahHeap.hpp"
  27 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  28 #include "gc/shenandoah/shenandoahHeuristics.hpp"
  29 #include "gc/shenandoah/shenandoahRuntime.hpp"
  30 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
  31 #include "interpreter/interpreter.hpp"
  32 #include "interpreter/interp_masm.hpp"
  33 #include "runtime/sharedRuntime.hpp"
  34 #include "runtime/thread.hpp"
  35 #ifdef COMPILER1
  36 #include "c1/c1_LIRAssembler.hpp"
  37 #include "c1/c1_MacroAssembler.hpp"
  38 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
  39 #endif
  40 
  41 #define __ masm->
  42 
  43 address ShenandoahBarrierSetAssembler::_shenandoah_wb = NULL;
  44 
  45 void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
  46                                                        Register addr, Register count, RegSet saved_regs) {
  47   if (is_oop) {
  48     bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0;
  49     if (!dest_uninitialized && !ShenandoahHeap::heap()->heuristics()->can_do_traversal_gc()) {
  50       __ push(saved_regs, sp);
  51       if (count == c_rarg0) {
  52         if (addr == c_rarg1) {
  53           // exactly backwards!!
  54           __ mov(rscratch1, c_rarg0);
  55           __ mov(c_rarg0, c_rarg1);
  56           __ mov(c_rarg1, rscratch1);
  57         } else {
  58           __ mov(c_rarg1, count);
  59           __ mov(c_rarg0, addr);
  60         }
  61       } else {
  62         __ mov(c_rarg0, addr);
  63         __ mov(c_rarg1, count);
  64       }
  65       if (UseCompressedOops) {
  66         __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_array_pre_narrow_oop_entry), 2);
  67       } else {
  68         __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_array_pre_oop_entry), 2);
  69       }
  70       __ pop(saved_regs, sp);
  71     }
  72   }
  73 }
  74 
  75 void ShenandoahBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
  76                                                        Register start, Register end, Register scratch, RegSet saved_regs) {
  77   if (is_oop) {
  78     __ push(saved_regs, sp);
  79     // must compute element count unless barrier set interface is changed (other platforms supply count)
  80     assert_different_registers(start, end, scratch);
  81     __ lea(scratch, Address(end, BytesPerHeapOop));
  82     __ sub(scratch, scratch, start);               // subtract start to get #bytes
  83     __ lsr(scratch, scratch, LogBytesPerHeapOop);  // convert to element count
  84     __ mov(c_rarg0, start);
  85     __ mov(c_rarg1, scratch);
  86     __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_array_post_entry), 2);
  87     __ pop(saved_regs, sp);
  88   }
  89 }
  90 
  91 void ShenandoahBarrierSetAssembler::shenandoah_write_barrier_pre(MacroAssembler* masm,
  92                                                                  Register obj,
  93                                                                  Register pre_val,
  94                                                                  Register thread,
  95                                                                  Register tmp,
  96                                                                  bool tosca_live,
  97                                                                  bool expand_call) {
  98   if (ShenandoahSATBBarrier) {
  99     satb_write_barrier_pre(masm, obj, pre_val, thread, tmp, tosca_live, expand_call);
 100   }
 101 }
 102 
 103 void ShenandoahBarrierSetAssembler::satb_write_barrier_pre(MacroAssembler* masm,
 104                                                            Register obj,
 105                                                            Register pre_val,
 106                                                            Register thread,
 107                                                            Register tmp,
 108                                                            bool tosca_live,
 109                                                            bool expand_call) {
 110   // If expand_call is true then we expand the call_VM_leaf macro
 111   // directly to skip generating the check by
 112   // InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
 113 
 114   assert(thread == rthread, "must be");
 115 
 116   Label done;
 117   Label runtime;
 118 
 119   assert_different_registers(obj, pre_val, tmp, rscratch1);
 120   assert(pre_val != noreg &&  tmp != noreg, "expecting a register");
 121 
 122   Address in_progress(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_active_offset()));
 123   Address index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
 124   Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
 125 
 126   // Is marking active?
 127   if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
 128     __ ldrw(tmp, in_progress);
 129   } else {
 130     assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
 131     __ ldrb(tmp, in_progress);
 132   }
 133   __ cbzw(tmp, done);
 134 
 135   // Do we need to load the previous value?
 136   if (obj != noreg) {
 137     __ load_heap_oop(pre_val, Address(obj, 0), noreg, noreg, AS_RAW);
 138   }
 139 
 140   // Is the previous value null?
 141   __ cbz(pre_val, done);
 142 
 143   // Can we store original value in the thread's buffer?
 144   // Is index == 0?
 145   // (The index field is typed as size_t.)
 146 
 147   __ ldr(tmp, index);                      // tmp := *index_adr
 148   __ cbz(tmp, runtime);                    // tmp == 0?
 149                                         // If yes, goto runtime
 150 
 151   __ sub(tmp, tmp, wordSize);              // tmp := tmp - wordSize
 152   __ str(tmp, index);                      // *index_adr := tmp
 153   __ ldr(rscratch1, buffer);
 154   __ add(tmp, tmp, rscratch1);             // tmp := tmp + *buffer_adr
 155 
 156   // Record the previous value
 157   __ str(pre_val, Address(tmp, 0));
 158   __ b(done);
 159 
 160   __ bind(runtime);
 161   // save the live input values
 162   RegSet saved = RegSet::of(pre_val);
 163   if (tosca_live) saved += RegSet::of(r0);
 164   if (obj != noreg) saved += RegSet::of(obj);
 165 
 166   __ push(saved, sp);
 167 
 168   // Calling the runtime using the regular call_VM_leaf mechanism generates
 169   // code (generated by InterpreterMacroAssember::call_VM_leaf_base)
 170   // that checks that the *(rfp+frame::interpreter_frame_last_sp) == NULL.
 171   //
 172   // If we care generating the pre-barrier without a frame (e.g. in the
 173   // intrinsified Reference.get() routine) then ebp might be pointing to
 174   // the caller frame and so this check will most likely fail at runtime.
 175   //
 176   // Expanding the call directly bypasses the generation of the check.
 177   // So when we do not have have a full interpreter frame on the stack
 178   // expand_call should be passed true.
 179 
 180   if (expand_call) {
 181     assert(pre_val != c_rarg1, "smashed arg");
 182     __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), pre_val, thread);
 183   } else {
 184     __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), pre_val, thread);
 185   }
 186 
 187   __ pop(saved, sp);
 188 
 189   __ bind(done);
 190 }
 191 
 192 void ShenandoahBarrierSetAssembler::read_barrier(MacroAssembler* masm, Register dst) {
 193   if (ShenandoahReadBarrier) {
 194     read_barrier_impl(masm, dst);
 195   }
 196 }
 197 
 198 void ShenandoahBarrierSetAssembler::read_barrier_impl(MacroAssembler* masm, Register dst) {
 199   assert(UseShenandoahGC && (ShenandoahReadBarrier || ShenandoahStoreValReadBarrier || ShenandoahCASBarrier), "should be enabled");
 200   Label is_null;
 201   __ cbz(dst, is_null);
 202   read_barrier_not_null_impl(masm, dst);
 203   __ bind(is_null);
 204 }
 205 
 206 void ShenandoahBarrierSetAssembler::read_barrier_not_null(MacroAssembler* masm, Register dst) {
 207   if (ShenandoahReadBarrier) {
 208     read_barrier_not_null_impl(masm, dst);
 209   }
 210 }
 211 
 212 
 213 void ShenandoahBarrierSetAssembler::read_barrier_not_null_impl(MacroAssembler* masm, Register dst) {
 214   assert(UseShenandoahGC && (ShenandoahReadBarrier || ShenandoahStoreValReadBarrier || ShenandoahCASBarrier), "should be enabled");
 215   __ ldr(dst, Address(dst, ShenandoahBrooksPointer::byte_offset()));
 216 }
 217 
 218 void ShenandoahBarrierSetAssembler::write_barrier(MacroAssembler* masm, Register dst) {
 219   if (ShenandoahWriteBarrier) {
 220     write_barrier_impl(masm, dst);
 221   }
 222 }
 223 
 224 void ShenandoahBarrierSetAssembler::write_barrier_impl(MacroAssembler* masm, Register dst) {
 225   assert(UseShenandoahGC && (ShenandoahWriteBarrier || ShenandoahStoreValEnqueueBarrier), "Should be enabled");
 226   assert(dst != rscratch1, "need rscratch1");
 227   assert(dst != rscratch2, "need rscratch2");
 228 
 229   Label done;
 230 
 231   Address gc_state(rthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
 232   __ ldrb(rscratch1, gc_state);
 233 
 234   // Check for heap stability
 235   __ mov(rscratch2, ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL);
 236   __ tst(rscratch1, rscratch2);
 237   __ br(Assembler::EQ, done);
 238 
 239   // Heap is unstable, need to perform the read-barrier even if WB is inactive
 240   if (ShenandoahWriteBarrierRB) {
 241     __ ldr(dst, Address(dst, ShenandoahBrooksPointer::byte_offset()));
 242   }
 243 
 244   // Check for evacuation-in-progress and jump to WB slow-path if needed
 245   __ mov(rscratch2, ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL);
 246   __ tst(rscratch1, rscratch2);
 247   __ br(Assembler::EQ, done);
 248 
 249   RegSet to_save = RegSet::of(r0);
 250   if (dst != r0) {
 251     __ push(to_save, sp);
 252     __ mov(r0, dst);
 253   }
 254 
 255   __ far_call(RuntimeAddress(CAST_FROM_FN_PTR(address, ShenandoahBarrierSetAssembler::shenandoah_wb())));
 256 
 257   if (dst != r0) {
 258     __ mov(dst, r0);
 259     __ pop(to_save, sp);
 260   }
 261 
 262   __ bind(done);
 263 }
 264 
 265 void ShenandoahBarrierSetAssembler::storeval_barrier(MacroAssembler* masm, Register dst, Register tmp) {
 266   if (ShenandoahStoreValEnqueueBarrier) {
 267     Label is_null;
 268     __ cbz(dst, is_null);
 269     write_barrier_impl(masm, dst);
 270     __ bind(is_null);
 271     // Save possibly live regs.
 272     RegSet live_regs = RegSet::range(r0, r4) - dst;
 273     __ push(live_regs, sp);
 274     __ strd(v0, __ pre(sp, 2 * -wordSize));
 275 
 276     satb_write_barrier_pre(masm, noreg, dst, rthread, tmp, true, false);
 277 
 278     // Restore possibly live regs.
 279     __ ldrd(v0, __ post(sp, 2 * wordSize));
 280     __ pop(live_regs, sp);
 281   }
 282   if (ShenandoahStoreValReadBarrier) {
 283     read_barrier_impl(masm, dst);
 284   }
 285 }
 286 
 287 void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
 288                                             Register dst, Address src, Register tmp1, Register tmp_thread) {
 289   bool on_oop = type == T_OBJECT || type == T_ARRAY;
 290   bool in_heap = (decorators & IN_HEAP) != 0;
 291   bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0;
 292   bool on_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0;
 293   bool on_reference = on_weak || on_phantom;
 294 
 295   if (in_heap) {
 296     read_barrier_not_null(masm, src.base());
 297   }
 298 
 299   BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
 300   if (ShenandoahKeepAliveBarrier && on_oop && on_reference) {
 301     __ enter();
 302     satb_write_barrier_pre(masm /* masm */,
 303                            noreg /* obj */,
 304                            dst /* pre_val */,
 305                            rthread /* thread */,
 306                            tmp1 /* tmp */,
 307                            true /* tosca_live */,
 308                            true /* expand_call */);
 309     __ leave();
 310   }
 311 }
 312 
 313 void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
 314                                              Address dst, Register val, Register tmp1, Register tmp2) {
 315   bool on_oop = type == T_OBJECT || type == T_ARRAY;
 316   bool in_heap = (decorators & IN_HEAP) != 0;
 317   if (in_heap) {
 318     write_barrier(masm, dst.base());
 319   }
 320   if (!on_oop) {
 321     BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2);
 322     return;
 323   }
 324 
 325   // flatten object address if needed
 326   if (dst.index() == noreg && dst.offset() == 0) {
 327     if (dst.base() != r3) {
 328       __ mov(r3, dst.base());
 329     }
 330   } else {
 331     __ lea(r3, dst);
 332   }
 333 
 334   shenandoah_write_barrier_pre(masm,
 335                                r3 /* obj */,
 336                                tmp2 /* pre_val */,
 337                                rthread /* thread */,
 338                                tmp1  /* tmp */,
 339                                val != noreg /* tosca_live */,
 340                                false /* expand_call */);
 341 
 342   if (val == noreg) {
 343     BarrierSetAssembler::store_at(masm, decorators, type, Address(r3, 0), noreg, noreg, noreg);
 344   } else {
 345     storeval_barrier(masm, val, tmp1);
 346     // G1 barrier needs uncompressed oop for region cross check.
 347     Register new_val = val;
 348     if (UseCompressedOops) {
 349       new_val = rscratch2;
 350       __ mov(new_val, val);
 351     }
 352     BarrierSetAssembler::store_at(masm, decorators, type, Address(r3, 0), val, noreg, noreg);
 353   }
 354 
 355 }
 356 
 357 void ShenandoahBarrierSetAssembler::obj_equals(MacroAssembler* masm, Register op1, Register op2) {
 358   __ cmp(op1, op2);
 359   if (ShenandoahAcmpBarrier) {
 360     Label done;
 361     __ br(Assembler::EQ, done);
 362     // The object may have been evacuated, but we won't see it without a
 363     // membar here.
 364     __ membar(Assembler::LoadStore| Assembler::LoadLoad);
 365     read_barrier(masm, op1);
 366     read_barrier(masm, op2);
 367     __ cmp(op1, op2);
 368     __ bind(done);
 369   }
 370 }
 371 
 372 void ShenandoahBarrierSetAssembler::tlab_allocate(MacroAssembler* masm, Register obj,
 373                                                   Register var_size_in_bytes,
 374                                                   int con_size_in_bytes,
 375                                                   Register t1,
 376                                                   Register t2,
 377                                                   Label& slow_case) {
 378 
 379   assert_different_registers(obj, t2);
 380   assert_different_registers(obj, var_size_in_bytes);
 381   Register end = t2;
 382 
 383   __ ldr(obj, Address(rthread, JavaThread::tlab_top_offset()));
 384   if (var_size_in_bytes == noreg) {
 385     __ lea(end, Address(obj, (int) (con_size_in_bytes + ShenandoahBrooksPointer::byte_size())));
 386   } else {
 387     __ add(var_size_in_bytes, var_size_in_bytes, ShenandoahBrooksPointer::byte_size());
 388     __ lea(end, Address(obj, var_size_in_bytes));
 389   }
 390   __ ldr(rscratch1, Address(rthread, JavaThread::tlab_end_offset()));
 391   __ cmp(end, rscratch1);
 392   __ br(Assembler::HI, slow_case);
 393 
 394   // update the tlab top pointer
 395   __ str(end, Address(rthread, JavaThread::tlab_top_offset()));
 396 
 397   __ add(obj, obj, ShenandoahBrooksPointer::byte_size());
 398   __ str(obj, Address(obj, ShenandoahBrooksPointer::byte_offset()));
 399 
 400   // recover var_size_in_bytes if necessary
 401   if (var_size_in_bytes == end) {
 402     __ sub(var_size_in_bytes, var_size_in_bytes, obj);
 403   }
 404 }
 405 
 406 void ShenandoahBarrierSetAssembler::resolve(MacroAssembler* masm, DecoratorSet decorators, Register obj) {
 407   bool oop_not_null = (decorators & IS_NOT_NULL) != 0;
 408   bool is_write = (decorators & ACCESS_WRITE) != 0;
 409   if (is_write) {
 410     if (oop_not_null) {
 411       write_barrier(masm, obj);
 412     } else {
 413       Label done;
 414       __ cbz(obj, done);
 415       write_barrier(masm, obj);
 416       __ bind(done);
 417     }
 418   } else {
 419     if (oop_not_null) {
 420       read_barrier_not_null(masm, obj);
 421     } else {
 422       read_barrier(masm, obj);
 423     }
 424   }
 425 }
 426 
 427 void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm, Register addr, Register expected, Register new_val,
 428                                                 bool acquire, bool release, bool weak, bool encode,
 429                                                 Register tmp1, Register tmp2, Register tmp3,
 430                                                 Register result) {
 431 
 432   if (!ShenandoahCASBarrier) {
 433     if (UseCompressedOops) {
 434       if (encode) {
 435         __ encode_heap_oop(tmp1, expected);
 436         expected = tmp1;
 437         __ encode_heap_oop(tmp3, new_val);
 438         new_val = tmp3;
 439       }
 440       __ cmpxchg(addr, expected, new_val, Assembler::word, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
 441       __ membar(__ AnyAny);
 442     } else {
 443       __ cmpxchg(addr, expected, new_val, Assembler::xword, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
 444       __ membar(__ AnyAny);
 445     }
 446     return;
 447   }
 448 
 449   if (encode) {
 450     storeval_barrier(masm, new_val, tmp3);
 451   }
 452 
 453   if (UseCompressedOops) {
 454     if (encode) {
 455       __ encode_heap_oop(tmp1, expected);
 456       expected = tmp1;
 457       __ encode_heap_oop(tmp2, new_val);
 458       new_val = tmp2;
 459     }
 460   }
 461   bool is_cae = (result != noreg);
 462   bool is_narrow = UseCompressedOops;
 463   Assembler::operand_size size = is_narrow ? Assembler::word : Assembler::xword;
 464   if (! is_cae) result = rscratch1;
 465 
 466   assert_different_registers(addr, expected, new_val, result, tmp3);
 467 
 468   Label retry, done, fail;
 469 
 470   // CAS, using LL/SC pair.
 471   __ bind(retry);
 472   __ load_exclusive(result, addr, size, acquire);
 473   if (is_narrow) {
 474     __ cmpw(result, expected);
 475   } else {
 476     __ cmp(result, expected);
 477   }
 478   __ br(Assembler::NE, fail);
 479   __ store_exclusive(tmp3, new_val, addr, size, release);
 480   if (weak) {
 481     __ cmpw(tmp3, 0u); // If the store fails, return NE to our caller
 482   } else {
 483     __ cbnzw(tmp3, retry);
 484   }
 485   __ b(done);
 486 
 487  __  bind(fail);
 488   // Check if rb(expected)==rb(result)
 489   // Shuffle registers so that we have memory value ready for next expected.
 490   __ mov(tmp3, expected);
 491   __ mov(expected, result);
 492   if (is_narrow) {
 493     __ decode_heap_oop(result, result);
 494     __ decode_heap_oop(tmp3, tmp3);
 495   }
 496   read_barrier_impl(masm, result);
 497   read_barrier_impl(masm, tmp3);
 498   __ cmp(result, tmp3);
 499   // Retry with expected now being the value we just loaded from addr.
 500   __ br(Assembler::EQ, retry);
 501   if (is_narrow && is_cae) {
 502     // For cmp-and-exchange and narrow oops, we need to restore
 503     // the compressed old-value. We moved it to 'expected' a few lines up.
 504     __ mov(result, expected);
 505   }
 506   __ bind(done);
 507 
 508 }
 509 
 510 #ifdef COMPILER1
 511 
 512 #undef __
 513 #define __ ce->masm()->
 514 
 515 void ShenandoahBarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, ShenandoahPreBarrierStub* stub) {
 516   ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
 517   // At this point we know that marking is in progress.
 518   // If do_load() is true then we have to emit the
 519   // load of the previous value; otherwise it has already
 520   // been loaded into _pre_val.
 521 
 522   __ bind(*stub->entry());
 523 
 524   assert(stub->pre_val()->is_register(), "Precondition.");
 525 
 526   Register pre_val_reg = stub->pre_val()->as_register();
 527 
 528   if (stub->do_load()) {
 529     ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /*wide*/, false /*unaligned*/);
 530   }
 531   __ cbz(pre_val_reg, *stub->continuation());
 532   ce->store_parameter(stub->pre_val()->as_register(), 0);
 533   __ far_call(RuntimeAddress(bs->pre_barrier_c1_runtime_code_blob()->code_begin()));
 534   __ b(*stub->continuation());
 535 }
 536 
 537 void ShenandoahBarrierSetAssembler::gen_write_barrier_stub(LIR_Assembler* ce, ShenandoahWriteBarrierStub* stub) {
 538 
 539   Register obj = stub->obj()->as_register();
 540   Register res = stub->result()->as_register();
 541 
 542   Label done;
 543 
 544   __ bind(*stub->entry());
 545 
 546   if (res != obj) {
 547     __ mov(res, obj);
 548   }
 549   // Check for null.
 550   if (stub->needs_null_check()) {
 551     __ cbz(res, done);
 552   }
 553 
 554   write_barrier(ce->masm(), res);
 555 
 556   __ bind(done);
 557   __ b(*stub->continuation());
 558 }
 559 
 560 #undef __
 561 
 562 #define __ sasm->
 563 
 564 void ShenandoahBarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) {
 565   __ prologue("shenandoah_pre_barrier", false);
 566 
 567   // arg0 : previous value of memory
 568 
 569   BarrierSet* bs = BarrierSet::barrier_set();
 570 
 571   const Register pre_val = r0;
 572   const Register thread = rthread;
 573   const Register tmp = rscratch1;
 574 
 575   Address queue_index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
 576   Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
 577 
 578   Label done;
 579   Label runtime;
 580 
 581   // Is marking still active?
 582   Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
 583   __ ldrb(tmp, gc_state);
 584   __ mov(rscratch2, ShenandoahHeap::MARKING | ShenandoahHeap::TRAVERSAL);
 585   __ tst(tmp, rscratch2);
 586   __ br(Assembler::EQ, done);
 587 
 588   // Can we store original value in the thread's buffer?
 589   __ ldr(tmp, queue_index);
 590   __ cbz(tmp, runtime);
 591 
 592   __ sub(tmp, tmp, wordSize);
 593   __ str(tmp, queue_index);
 594   __ ldr(rscratch2, buffer);
 595   __ add(tmp, tmp, rscratch2);
 596   __ load_parameter(0, rscratch2);
 597   __ str(rscratch2, Address(tmp, 0));
 598   __ b(done);
 599 
 600   __ bind(runtime);
 601   __ push_call_clobbered_registers();
 602   __ load_parameter(0, pre_val);
 603   __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), pre_val, thread);
 604   __ pop_call_clobbered_registers();
 605   __ bind(done);
 606 
 607   __ epilogue();
 608 }
 609 
 610 #undef __
 611 
 612 #endif // COMPILER1
 613 
 614 address ShenandoahBarrierSetAssembler::shenandoah_wb() {
 615   assert(_shenandoah_wb != NULL, "need write barrier stub");
 616   return _shenandoah_wb;
 617 }
 618 
 619 #define __ cgen->assembler()->
 620 
 621 // Shenandoah write barrier.
 622 //
 623 // Input:
 624 //   r0: OOP to evacuate.  Not null.
 625 //
 626 // Output:
 627 //   r0: Pointer to evacuated OOP.
 628 //
 629 // Trash rscratch1, rscratch2.  Preserve everything else.
 630 address ShenandoahBarrierSetAssembler::generate_shenandoah_wb(StubCodeGenerator* cgen) {
 631 
 632   __ align(6);
 633   StubCodeMark mark(cgen, "StubRoutines", "shenandoah_wb");
 634   address start = __ pc();
 635 
 636   Label work;
 637   __ mov(rscratch2, ShenandoahHeap::in_cset_fast_test_addr());
 638   __ lsr(rscratch1, r0, ShenandoahHeapRegion::region_size_bytes_shift_jint());
 639   __ ldrb(rscratch2, Address(rscratch2, rscratch1));
 640   __ tbnz(rscratch2, 0, work);
 641   __ ret(lr);
 642   __ bind(work);
 643 
 644   Register obj = r0;
 645 
 646   __ enter(); // required for proper stackwalking of RuntimeStub frame
 647 
 648   __ push_call_clobbered_registers();
 649 
 650   __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_JRT));
 651   __ blrt(lr, 1, 0, MacroAssembler::ret_type_integral);
 652   __ mov(rscratch1, obj);
 653   __ pop_call_clobbered_registers();
 654   __ mov(obj, rscratch1);
 655 
 656   __ leave(); // required for proper stackwalking of RuntimeStub frame
 657   __ ret(lr);
 658 
 659   return start;
 660 }
 661 
 662 #undef __
 663 
 664 void ShenandoahBarrierSetAssembler::barrier_stubs_init() {
 665   if (ShenandoahWriteBarrier || ShenandoahStoreValEnqueueBarrier) {
 666     int stub_code_size = 2048;
 667     ResourceMark rm;
 668     BufferBlob* bb = BufferBlob::create("shenandoah_barrier_stubs", stub_code_size);
 669     CodeBuffer buf(bb);
 670     StubCodeGenerator cgen(&buf);
 671     _shenandoah_wb = generate_shenandoah_wb(&cgen);
 672   }
 673 }