1 /*
   2  * Copyright (c) 2018, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/shenandoah/brooksPointer.hpp"
  26 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
  27 #include "gc/shenandoah/shenandoahConnectionMatrix.hpp"
  28 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  29 #include "gc/shenandoah/shenandoahRuntime.hpp"
  30 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
  31 #include "interpreter/interpreter.hpp"
  32 #include "interpreter/interp_masm.hpp"
  33 #include "runtime/sharedRuntime.hpp"
  34 #include "runtime/thread.hpp"
  35 #ifdef COMPILER1
  36 #include "c1/c1_LIRAssembler.hpp"
  37 #include "c1/c1_MacroAssembler.hpp"
  38 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
  39 #endif
  40 
  41 #define __ masm->
  42 
  43 void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
  44                                                        Register addr, Register count, RegSet saved_regs) {
  45   if (is_oop) {
  46     bool dest_uninitialized = (decorators & AS_DEST_NOT_INITIALIZED) != 0;
  47     if (!dest_uninitialized) {
  48       __ push(saved_regs, sp);
  49       if (count == c_rarg0) {
  50         if (addr == c_rarg1) {
  51           // exactly backwards!!
  52           __ mov(rscratch1, c_rarg0);
  53           __ mov(c_rarg0, c_rarg1);
  54           __ mov(c_rarg1, rscratch1);
  55         } else {
  56           __ mov(c_rarg1, count);
  57           __ mov(c_rarg0, addr);
  58         }
  59       } else {
  60         __ mov(c_rarg0, addr);
  61         __ mov(c_rarg1, count);
  62       }
  63       if (UseCompressedOops) {
  64         __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_array_pre_narrow_oop_entry), 2);
  65       } else {
  66         __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_array_pre_oop_entry), 2);
  67       }
  68       __ pop(saved_regs, sp);
  69     }
  70   }
  71 }
  72 
  73 void ShenandoahBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
  74                                                        Register start, Register end, Register scratch, RegSet saved_regs) {
  75   if (is_oop) {
  76     __ push(saved_regs, sp);
  77     // must compute element count unless barrier set interface is changed (other platforms supply count)
  78     assert_different_registers(start, end, scratch);
  79     __ lea(scratch, Address(end, BytesPerHeapOop));
  80     __ sub(scratch, scratch, start);               // subtract start to get #bytes
  81     __ lsr(scratch, scratch, LogBytesPerHeapOop);  // convert to element count
  82     __ mov(c_rarg0, start);
  83     __ mov(c_rarg1, scratch);
  84     __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_array_post_entry), 2);
  85     __ pop(saved_regs, sp);
  86   }
  87 }
  88 
  89 void ShenandoahBarrierSetAssembler::shenandoah_write_barrier_pre(MacroAssembler* masm,
  90                                                                  Register obj,
  91                                                                  Register pre_val,
  92                                                                  Register thread,
  93                                                                  Register tmp,
  94                                                                  bool tosca_live,
  95                                                                  bool expand_call) {
  96   if (ShenandoahSATBBarrier) {
  97     satb_write_barrier_pre(masm, obj, pre_val, thread, tmp, tosca_live, expand_call);
  98   }
  99 }
 100 
 101 void ShenandoahBarrierSetAssembler::satb_write_barrier_pre(MacroAssembler* masm,
 102                                                            Register obj,
 103                                                            Register pre_val,
 104                                                            Register thread,
 105                                                            Register tmp,
 106                                                            bool tosca_live,
 107                                                            bool expand_call) {
 108   // If expand_call is true then we expand the call_VM_leaf macro
 109   // directly to skip generating the check by
 110   // InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
 111 
 112   assert(thread == rthread, "must be");
 113 
 114   Label done;
 115   Label runtime;
 116 
 117   assert_different_registers(obj, pre_val, tmp, rscratch1);
 118   assert(pre_val != noreg &&  tmp != noreg, "expecting a register");
 119 
 120   Address in_progress(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_active_offset()));
 121   Address index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
 122   Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
 123 
 124   // Is marking active?
 125   if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
 126     __ ldrw(tmp, in_progress);
 127   } else {
 128     assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
 129     __ ldrb(tmp, in_progress);
 130   }
 131   __ cbzw(tmp, done);
 132 
 133   // Do we need to load the previous value?
 134   if (obj != noreg) {
 135     __ load_heap_oop(pre_val, Address(obj, 0), noreg, noreg, AS_RAW);
 136   }
 137 
 138   // Is the previous value null?
 139   __ cbz(pre_val, done);
 140 
 141   // Can we store original value in the thread's buffer?
 142   // Is index == 0?
 143   // (The index field is typed as size_t.)
 144 
 145   __ ldr(tmp, index);                      // tmp := *index_adr
 146   __ cbz(tmp, runtime);                    // tmp == 0?
 147                                         // If yes, goto runtime
 148 
 149   __ sub(tmp, tmp, wordSize);              // tmp := tmp - wordSize
 150   __ str(tmp, index);                      // *index_adr := tmp
 151   __ ldr(rscratch1, buffer);
 152   __ add(tmp, tmp, rscratch1);             // tmp := tmp + *buffer_adr
 153 
 154   // Record the previous value
 155   __ str(pre_val, Address(tmp, 0));
 156   __ b(done);
 157 
 158   __ bind(runtime);
 159   // save the live input values
 160   RegSet saved = RegSet::of(pre_val);
 161   if (tosca_live) saved += RegSet::of(r0);
 162   if (obj != noreg) saved += RegSet::of(obj);
 163 
 164   __ push(saved, sp);
 165 
 166   // Calling the runtime using the regular call_VM_leaf mechanism generates
 167   // code (generated by InterpreterMacroAssember::call_VM_leaf_base)
 168   // that checks that the *(rfp+frame::interpreter_frame_last_sp) == NULL.
 169   //
 170   // If we care generating the pre-barrier without a frame (e.g. in the
 171   // intrinsified Reference.get() routine) then ebp might be pointing to
 172   // the caller frame and so this check will most likely fail at runtime.
 173   //
 174   // Expanding the call directly bypasses the generation of the check.
 175   // So when we do not have have a full interpreter frame on the stack
 176   // expand_call should be passed true.
 177 
 178   if (expand_call) {
 179     assert(pre_val != c_rarg1, "smashed arg");
 180     __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), pre_val, thread);
 181   } else {
 182     __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), pre_val, thread);
 183   }
 184 
 185   __ pop(saved, sp);
 186 
 187   __ bind(done);
 188 }
 189 
 190 void ShenandoahBarrierSetAssembler::shenandoah_write_barrier_post(MacroAssembler* masm,
 191                                                                   Register store_addr,
 192                                                                   Register new_val,
 193                                                                   Register thread,
 194                                                                   Register tmp,
 195                                                                   Register tmp2) {
 196   assert(thread == rthread, "must be");
 197   assert(UseShenandoahGC, "expect Shenandoah GC");
 198 
 199   if (! UseShenandoahMatrix) {
 200     // No need for that barrier if not using matrix.
 201     return;
 202   }
 203 
 204   assert_different_registers(store_addr, new_val, thread, tmp, tmp2, rscratch1);
 205 
 206   Label done;
 207   __ cbz(new_val, done);
 208 
 209   ShenandoahConnectionMatrix* matrix = ShenandoahHeap::heap()->connection_matrix();
 210 
 211   // Compute to-region index
 212   __ lsr(tmp, new_val, ShenandoahHeapRegion::region_size_bytes_shift_jint());
 213 
 214   // Compute from-region index
 215   __ lsr(tmp2, store_addr, ShenandoahHeapRegion::region_size_bytes_shift_jint());
 216 
 217   // Compute matrix index
 218   __ mov(rscratch1, matrix->stride_jint());
 219   // Address is _matrix[to * stride + from]
 220   __ madd(tmp, tmp, rscratch1, tmp2);
 221   __ mov(rscratch1, matrix->magic_offset());
 222   Address loc(tmp, rscratch1);
 223 
 224   __ ldrb(tmp2, loc);
 225   __ cbnz(tmp2, done);
 226   __ mov(tmp2, 1);
 227   __ strb(tmp2, loc);
 228   __ bind(done);
 229 }
 230 
 231 void ShenandoahBarrierSetAssembler::read_barrier(MacroAssembler* masm, Register dst) {
 232   if (ShenandoahReadBarrier) {
 233     read_barrier_impl(masm, dst);
 234   }
 235 }
 236 
 237 void ShenandoahBarrierSetAssembler::read_barrier_impl(MacroAssembler* masm, Register dst) {
 238   assert(UseShenandoahGC && (ShenandoahReadBarrier || ShenandoahStoreValReadBarrier), "should be enabled");
 239   Label is_null;
 240   __ cbz(dst, is_null);
 241   read_barrier_not_null_impl(masm, dst);
 242   __ bind(is_null);
 243 }
 244 
 245 void ShenandoahBarrierSetAssembler::read_barrier_not_null(MacroAssembler* masm, Register dst) {
 246   if (ShenandoahReadBarrier) {
 247     read_barrier_not_null_impl(masm, dst);
 248   }
 249 }
 250 
 251 
 252 void ShenandoahBarrierSetAssembler::read_barrier_not_null_impl(MacroAssembler* masm, Register dst) {
 253   assert(UseShenandoahGC && (ShenandoahReadBarrier || ShenandoahStoreValReadBarrier), "should be enabled");
 254   __ ldr(dst, Address(dst, BrooksPointer::byte_offset()));
 255 }
 256 
 257 void ShenandoahBarrierSetAssembler::write_barrier(MacroAssembler* masm, Register dst) {
 258   if (ShenandoahWriteBarrier) {
 259     write_barrier_impl(masm, dst);
 260   }
 261 }
 262 
 263 void ShenandoahBarrierSetAssembler::write_barrier_impl(MacroAssembler* masm, Register dst) {
 264   assert(UseShenandoahGC && (ShenandoahWriteBarrier || ShenandoahStoreValEnqueueBarrier), "should be enabled");
 265   assert(dst != rscratch1, "different regs");
 266   assert(dst != rscratch2, "Need rscratch2");
 267 
 268   Label done;
 269 
 270   Address gc_state(rthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
 271   __ ldrb(rscratch1, gc_state);
 272   __ membar(Assembler::LoadLoad);
 273 
 274   // Now check if evacuation is in progress.
 275   read_barrier_not_null(masm, dst);
 276 
 277   __ mov(rscratch2, ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL);
 278   __ tst(rscratch1, rscratch2);
 279   __ br(Assembler::EQ, done);
 280 
 281   __ lsr(rscratch1, dst, ShenandoahHeapRegion::region_size_bytes_shift_jint());
 282   __ mov(rscratch2,  ShenandoahHeap::in_cset_fast_test_addr());
 283   __ ldrb(rscratch2, Address(rscratch2, rscratch1));
 284   __ tst(rscratch2, 0x1);
 285   __ br(Assembler::EQ, done);
 286 
 287   // Save possibly live regs.
 288   RegSet live_regs = RegSet::range(r0, r4) - dst;
 289   __ push(live_regs, sp);
 290   __ strd(v0, __ pre(sp, 2 * -wordSize));
 291 
 292   // Call into runtime
 293   __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_IRT), dst);
 294 
 295   // Move result into dst reg.
 296   __ mov(dst, r0);
 297 
 298   // Restore possibly live regs.
 299   __ ldrd(v0, __ post(sp, 2 * wordSize));
 300   __ pop(live_regs, sp);
 301 
 302   __ bind(done);
 303 }
 304 
 305 void ShenandoahBarrierSetAssembler::storeval_barrier(MacroAssembler* masm, Register dst, Register tmp) {
 306   if (ShenandoahStoreValEnqueueBarrier) {
 307     Label is_null;
 308     __ cbz(dst, is_null);
 309     write_barrier_impl(masm, dst);
 310     __ bind(is_null);
 311     // Save possibly live regs.
 312     RegSet live_regs = RegSet::range(r0, r4) - dst;
 313     __ push(live_regs, sp);
 314     __ strd(v0, __ pre(sp, 2 * -wordSize));
 315 
 316     satb_write_barrier_pre(masm, noreg, dst, rthread, tmp, true, false);
 317 
 318     // Restore possibly live regs.
 319     __ ldrd(v0, __ post(sp, 2 * wordSize));
 320     __ pop(live_regs, sp);
 321   }
 322   if (ShenandoahStoreValReadBarrier) {
 323     read_barrier_impl(masm, dst);
 324   }
 325 }
 326 
 327 void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
 328                                             Register dst, Address src, Register tmp1, Register tmp_thread) {
 329   bool on_oop = type == T_OBJECT || type == T_ARRAY;
 330   bool in_heap = (decorators & IN_HEAP) != 0;
 331   bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0;
 332   bool on_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0;
 333   bool on_reference = on_weak || on_phantom;
 334 
 335   if (in_heap) {
 336     read_barrier_not_null(masm, src.base());
 337   }
 338 
 339   BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
 340   if (ShenandoahKeepAliveBarrier && on_oop && on_reference) {
 341     __ enter();
 342     satb_write_barrier_pre(masm /* masm */,
 343                            noreg /* obj */,
 344                            dst /* pre_val */,
 345                            rthread /* thread */,
 346                            tmp1 /* tmp */,
 347                            true /* tosca_live */,
 348                            true /* expand_call */);
 349     __ leave();
 350   }
 351 }
 352 
 353 void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
 354                                              Address dst, Register val, Register tmp1, Register tmp2) {
 355   bool on_oop = type == T_OBJECT || type == T_ARRAY;
 356   bool in_heap = (decorators & IN_HEAP) != 0;
 357   if (in_heap) {
 358     write_barrier(masm, dst.base());
 359   }
 360   if (!on_oop) {
 361     BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2);
 362     return;
 363   }
 364 
 365   // flatten object address if needed
 366   if (dst.index() == noreg && dst.offset() == 0) {
 367     if (dst.base() != r3) {
 368       __ mov(r3, dst.base());
 369     }
 370   } else {
 371     __ lea(r3, dst);
 372   }
 373 
 374   shenandoah_write_barrier_pre(masm,
 375                                r3 /* obj */,
 376                                tmp2 /* pre_val */,
 377                                rthread /* thread */,
 378                                tmp1  /* tmp */,
 379                                val != noreg /* tosca_live */,
 380                                false /* expand_call */);
 381 
 382   if (val == noreg) {
 383     BarrierSetAssembler::store_at(masm, decorators, type, Address(r3, 0), noreg, noreg, noreg);
 384   } else {
 385     storeval_barrier(masm, val, tmp1);
 386     // G1 barrier needs uncompressed oop for region cross check.
 387     Register new_val = val;
 388     if (UseCompressedOops) {
 389       new_val = rscratch2;
 390       __ mov(new_val, val);
 391     }
 392     BarrierSetAssembler::store_at(masm, decorators, type, Address(r3, 0), val, noreg, noreg);
 393     shenandoah_write_barrier_post(masm,
 394                                   r3 /* store_adr */,
 395                                   new_val /* new_val */,
 396                                   rthread /* thread */,
 397                                   tmp1 /* tmp */,
 398                                   tmp2 /* tmp2 */);
 399   }
 400 
 401 }
 402 
 403 void ShenandoahBarrierSetAssembler::obj_equals(MacroAssembler* masm, DecoratorSet decorators, Register op1, Register op2) {
 404   __ cmp(op1, op2);
 405   if (ShenandoahAcmpBarrier) {
 406     Label done;
 407     __ br(Assembler::EQ, done);
 408     // The object may have been evacuated, but we won't see it without a
 409     // membar here.
 410     __ membar(Assembler::LoadStore| Assembler::LoadLoad);
 411     read_barrier(masm, op1);
 412     read_barrier(masm, op2);
 413     __ cmp(op1, op2);
 414     __ bind(done);
 415   }
 416 }
 417 
 418 void ShenandoahBarrierSetAssembler::resolve_for_read(MacroAssembler* masm, DecoratorSet decorators, Register obj) {
 419   bool oop_not_null = (decorators & OOP_NOT_NULL) != 0;
 420   if (oop_not_null) {
 421     read_barrier_not_null(masm, obj);
 422   } else {
 423     read_barrier(masm, obj);
 424   }
 425 }
 426 
 427 void ShenandoahBarrierSetAssembler::resolve_for_write(MacroAssembler* masm, DecoratorSet decorators, Register obj) {
 428   write_barrier(masm, obj);
 429 }
 430 
 431 void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm, Register addr, Register expected, Register new_val,
 432                                                 bool acquire, bool release, bool weak, bool encode,
 433                                                 Register tmp1, Register tmp2, Register tmp3,
 434                                                 Register result) {
 435 
 436   if (encode) {
 437     storeval_barrier(masm, new_val, tmp3);
 438   }
 439 
 440   if (UseCompressedOops) {
 441     if (encode) {
 442       __ encode_heap_oop(tmp1, expected);
 443       expected = tmp1;
 444       __ encode_heap_oop(tmp2, new_val);
 445       new_val = tmp2;
 446     }
 447   }
 448   bool is_cae = (result != noreg);
 449   bool is_narrow = UseCompressedOops;
 450   Assembler::operand_size size = is_narrow ? Assembler::word : Assembler::xword;
 451   if (! is_cae) result = rscratch1;
 452 
 453   assert_different_registers(addr, expected, new_val, result, tmp3);
 454 
 455   Label retry, done, fail;
 456 
 457   // CAS, using LL/SC pair.
 458   __ bind(retry);
 459   __ load_exclusive(result, addr, size, acquire);
 460   if (is_narrow) {
 461     __ cmpw(result, expected);
 462   } else {
 463     __ cmp(result, expected);
 464   }
 465   __ br(Assembler::NE, fail);
 466   __ store_exclusive(tmp3, new_val, addr, size, release);
 467   if (weak) {
 468     __ cmpw(tmp3, 0u); // If the store fails, return NE to our caller
 469   } else {
 470     __ cbnzw(tmp3, retry);
 471   }
 472   __ b(done);
 473 
 474  __  bind(fail);
 475   // Check if rb(expected)==rb(result)
 476   // Shuffle registers so that we have memory value ready for next expected.
 477   __ mov(tmp3, expected);
 478   __ mov(expected, result);
 479   if (is_narrow) {
 480     __ decode_heap_oop(result, result);
 481     __ decode_heap_oop(tmp3, tmp3);
 482   }
 483   __ resolve_for_read(0, result);
 484   __ resolve_for_read(0, tmp3);
 485   __ cmp(result, tmp3);
 486   // Retry with expected now being the value we just loaded from addr.
 487   __ br(Assembler::EQ, retry);
 488   if (is_narrow && is_cae) {
 489     // For cmp-and-exchange and narrow oops, we need to restore
 490     // the compressed old-value. We moved it to 'expected' a few lines up.
 491     __ mov(result, expected);
 492   }
 493   __ bind(done);
 494 
 495 }
 496 
 497 #ifdef COMPILER1
 498 
 499 #undef __
 500 #define __ ce->masm()->
 501 
 502 void ShenandoahBarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, ShenandoahPreBarrierStub* stub) {
 503   ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
 504   // At this point we know that marking is in progress.
 505   // If do_load() is true then we have to emit the
 506   // load of the previous value; otherwise it has already
 507   // been loaded into _pre_val.
 508 
 509   __ bind(*stub->entry());
 510 
 511   assert(stub->pre_val()->is_register(), "Precondition.");
 512 
 513   Register pre_val_reg = stub->pre_val()->as_register();
 514 
 515   if (stub->do_load()) {
 516     ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /*wide*/, false /*unaligned*/);
 517   }
 518   __ cbz(pre_val_reg, *stub->continuation());
 519   ce->store_parameter(stub->pre_val()->as_register(), 0);
 520   __ far_call(RuntimeAddress(bs->pre_barrier_c1_runtime_code_blob()->code_begin()));
 521   __ b(*stub->continuation());
 522 }
 523 
 524 #undef __
 525 
 526 #define __ sasm->
 527 
 528 void ShenandoahBarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) {
 529   __ prologue("shenandoah_pre_barrier", false);
 530 
 531   // arg0 : previous value of memory
 532 
 533   BarrierSet* bs = BarrierSet::barrier_set();
 534 
 535   const Register pre_val = r0;
 536   const Register thread = rthread;
 537   const Register tmp = rscratch1;
 538 
 539   Address queue_index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
 540   Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
 541 
 542   Label done;
 543   Label runtime;
 544 
 545   // Is marking still active?
 546   Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
 547   __ ldrb(tmp, gc_state);
 548   __ mov(rscratch2, ShenandoahHeap::MARKING | ShenandoahHeap::TRAVERSAL);
 549   __ tst(tmp, rscratch2);
 550   __ br(Assembler::EQ, done);
 551 
 552   // Can we store original value in the thread's buffer?
 553   __ ldr(tmp, queue_index);
 554   __ cbz(tmp, runtime);
 555 
 556   __ sub(tmp, tmp, wordSize);
 557   __ str(tmp, queue_index);
 558   __ ldr(rscratch2, buffer);
 559   __ add(tmp, tmp, rscratch2);
 560   __ load_parameter(0, rscratch2);
 561   __ str(rscratch2, Address(tmp, 0));
 562   __ b(done);
 563 
 564   __ bind(runtime);
 565   __ push_call_clobbered_registers();
 566   __ load_parameter(0, pre_val);
 567   __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), pre_val, thread);
 568   __ pop_call_clobbered_registers();
 569   __ bind(done);
 570 
 571   __ epilogue();
 572 }
 573 
 574 #undef __
 575 
 576 #endif // COMPILER1