1 /*
   2  * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "asm/macroAssembler.inline.hpp"
  26 #include "code/codeBlob.hpp"
  27 #include "code/vmreg.inline.hpp"
  28 #include "gc/z/zBarrier.inline.hpp"
  29 #include "gc/z/zBarrierSet.hpp"
  30 #include "gc/z/zBarrierSetAssembler.hpp"
  31 #include "gc/z/zBarrierSetRuntime.hpp"
  32 #include "memory/resourceArea.hpp"
  33 #include "runtime/sharedRuntime.hpp"
  34 #include "utilities/macros.hpp"
  35 #ifdef COMPILER1
  36 #include "c1/c1_LIRAssembler.hpp"
  37 #include "c1/c1_MacroAssembler.hpp"
  38 #include "gc/z/c1/zBarrierSetC1.hpp"
  39 #endif // COMPILER1
  40 #ifdef COMPILER2
  41 #include "gc/z/c2/zBarrierSetC2.hpp"
  42 #endif // COMPILER2
  43 
  44 #ifdef PRODUCT
  45 #define BLOCK_COMMENT(str) /* nothing */
  46 #else
  47 #define BLOCK_COMMENT(str) __ block_comment(str)
  48 #endif
  49 
  50 #undef __
  51 #define __ masm->
  52 
  53 static void call_vm(MacroAssembler* masm,
  54                     address entry_point,
  55                     Register arg0,
  56                     Register arg1) {
  57   // Setup arguments
  58   if (arg1 == c_rarg0) {
  59     if (arg0 == c_rarg1) {
  60       __ xchgptr(c_rarg1, c_rarg0);
  61     } else {
  62       __ movptr(c_rarg1, arg1);
  63       __ movptr(c_rarg0, arg0);
  64     }
  65   } else {
  66     if (arg0 != c_rarg0) {
  67       __ movptr(c_rarg0, arg0);
  68     }
  69     if (arg1 != c_rarg1) {
  70       __ movptr(c_rarg1, arg1);
  71     }
  72   }
  73 
  74   // Call VM
  75   __ MacroAssembler::call_VM_leaf_base(entry_point, 2);
  76 }
  77 
  78 void ZBarrierSetAssembler::load_at(MacroAssembler* masm,
  79                                    DecoratorSet decorators,
  80                                    BasicType type,
  81                                    Register dst,
  82                                    Address src,
  83                                    Register tmp1,
  84                                    Register tmp_thread) {
  85   if (!ZBarrierSet::barrier_needed(decorators, type)) {
  86     // Barrier not needed
  87     BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
  88     return;
  89   }
  90 
  91   BLOCK_COMMENT("ZBarrierSetAssembler::load_at {");
  92 
  93   // Allocate scratch register
  94   Register scratch = tmp1;
  95   if (tmp1 == noreg) {
  96     scratch = r12;
  97     __ push(scratch);
  98   }
  99 
 100   assert_different_registers(dst, scratch);
 101 
 102   Label done;
 103 
 104   //
 105   // Fast Path
 106   //
 107 
 108   // Load address
 109   __ lea(scratch, src);
 110 
 111   // Load oop at address
 112   __ movptr(dst, Address(scratch, 0));
 113 
 114   // Test address bad mask
 115   __ testptr(dst, address_bad_mask_from_thread(r15_thread));
 116   __ jcc(Assembler::zero, done);
 117 
 118   //
 119   // Slow path
 120   //
 121 
 122   // Save registers
 123   __ push(rax);
 124   __ push(rcx);
 125   __ push(rdx);
 126   __ push(rdi);
 127   __ push(rsi);
 128   __ push(r8);
 129   __ push(r9);
 130   __ push(r10);
 131   __ push(r11);
 132 
 133   // We may end up here from generate_native_wrapper, then the method may have
 134   // floats as arguments, and we must spill them before calling the VM runtime
 135   // leaf. From the interpreter all floats are passed on the stack.
 136   assert(Argument::n_float_register_parameters_j == 8, "Assumption");
 137   const int xmm_size = wordSize * 2;
 138   const int xmm_spill_size = xmm_size * Argument::n_float_register_parameters_j;
 139   __ subptr(rsp, xmm_spill_size);
 140   __ movdqu(Address(rsp, xmm_size * 7), xmm7);
 141   __ movdqu(Address(rsp, xmm_size * 6), xmm6);
 142   __ movdqu(Address(rsp, xmm_size * 5), xmm5);
 143   __ movdqu(Address(rsp, xmm_size * 4), xmm4);
 144   __ movdqu(Address(rsp, xmm_size * 3), xmm3);
 145   __ movdqu(Address(rsp, xmm_size * 2), xmm2);
 146   __ movdqu(Address(rsp, xmm_size * 1), xmm1);
 147   __ movdqu(Address(rsp, xmm_size * 0), xmm0);
 148 
 149   // Call VM
 150   call_vm(masm, ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), dst, scratch);
 151 
 152   // Restore registers
 153   __ movdqu(xmm0, Address(rsp, xmm_size * 0));
 154   __ movdqu(xmm1, Address(rsp, xmm_size * 1));
 155   __ movdqu(xmm2, Address(rsp, xmm_size * 2));
 156   __ movdqu(xmm3, Address(rsp, xmm_size * 3));
 157   __ movdqu(xmm4, Address(rsp, xmm_size * 4));
 158   __ movdqu(xmm5, Address(rsp, xmm_size * 5));
 159   __ movdqu(xmm6, Address(rsp, xmm_size * 6));
 160   __ movdqu(xmm7, Address(rsp, xmm_size * 7));
 161   __ addptr(rsp, xmm_spill_size);
 162 
 163   __ pop(r11);
 164   __ pop(r10);
 165   __ pop(r9);
 166   __ pop(r8);
 167   __ pop(rsi);
 168   __ pop(rdi);
 169   __ pop(rdx);
 170   __ pop(rcx);
 171 
 172   if (dst == rax) {
 173     __ addptr(rsp, wordSize);
 174   } else {
 175     __ movptr(dst, rax);
 176     __ pop(rax);
 177   }
 178 
 179   __ bind(done);
 180 
 181   // Restore scratch register
 182   if (tmp1 == noreg) {
 183     __ pop(scratch);
 184   }
 185 
 186   BLOCK_COMMENT("} ZBarrierSetAssembler::load_at");
 187 }
 188 
 189 #ifdef ASSERT
 190 
 191 void ZBarrierSetAssembler::store_at(MacroAssembler* masm,
 192                                     DecoratorSet decorators,
 193                                     BasicType type,
 194                                     Address dst,
 195                                     Register src,
 196                                     Register tmp1,
 197                                     Register tmp2) {
 198   BLOCK_COMMENT("ZBarrierSetAssembler::store_at {");
 199 
 200   // Verify oop store
 201   if (is_reference_type(type)) {
 202     // Note that src could be noreg, which means we
 203     // are storing null and can skip verification.
 204     if (src != noreg) {
 205       Label done;
 206       __ testptr(src, address_bad_mask_from_thread(r15_thread));
 207       __ jcc(Assembler::zero, done);
 208       __ stop("Verify oop store failed");
 209       __ should_not_reach_here();
 210       __ bind(done);
 211     }
 212   }
 213 
 214   // Store value
 215   BarrierSetAssembler::store_at(masm, decorators, type, dst, src, tmp1, tmp2);
 216 
 217   BLOCK_COMMENT("} ZBarrierSetAssembler::store_at");
 218 }
 219 
 220 #endif // ASSERT
 221 
 222 void ZBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm,
 223                                               DecoratorSet decorators,
 224                                               BasicType type,
 225                                               Register src,
 226                                               Register dst,
 227                                               Register count) {
 228   if (!ZBarrierSet::barrier_needed(decorators, type)) {
 229     // Barrier not needed
 230     return;
 231   }
 232 
 233   BLOCK_COMMENT("ZBarrierSetAssembler::arraycopy_prologue {");
 234 
 235   // Save registers
 236   __ pusha();
 237 
 238   // Call VM
 239   call_vm(masm, ZBarrierSetRuntime::load_barrier_on_oop_array_addr(), src, count);
 240 
 241   // Restore registers
 242   __ popa();
 243 
 244   BLOCK_COMMENT("} ZBarrierSetAssembler::arraycopy_prologue");
 245 }
 246 
 247 void ZBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm,
 248                                                          Register jni_env,
 249                                                          Register obj,
 250                                                          Register tmp,
 251                                                          Label& slowpath) {
 252   BLOCK_COMMENT("ZBarrierSetAssembler::try_resolve_jobject_in_native {");
 253 
 254   // Resolve jobject
 255   BarrierSetAssembler::try_resolve_jobject_in_native(masm, jni_env, obj, tmp, slowpath);
 256 
 257   // Test address bad mask
 258   __ testptr(obj, address_bad_mask_from_jni_env(jni_env));
 259   __ jcc(Assembler::notZero, slowpath);
 260 
 261   BLOCK_COMMENT("} ZBarrierSetAssembler::try_resolve_jobject_in_native");
 262 }
 263 
 264 #ifdef COMPILER1
 265 
 266 #undef __
 267 #define __ ce->masm()->
 268 
 269 void ZBarrierSetAssembler::generate_c1_load_barrier_test(LIR_Assembler* ce,
 270                                                          LIR_Opr ref) const {
 271   __ testptr(ref->as_register(), address_bad_mask_from_thread(r15_thread));
 272 }
 273 
 274 void ZBarrierSetAssembler::generate_c1_load_barrier_stub(LIR_Assembler* ce,
 275                                                          ZLoadBarrierStubC1* stub) const {
 276   // Stub entry
 277   __ bind(*stub->entry());
 278 
 279   Register ref = stub->ref()->as_register();
 280   Register ref_addr = noreg;
 281   Register tmp = noreg;
 282 
 283   if (stub->tmp()->is_valid()) {
 284     // Load address into tmp register
 285     ce->leal(stub->ref_addr(), stub->tmp());
 286     ref_addr = tmp = stub->tmp()->as_pointer_register();
 287   } else {
 288     // Address already in register
 289     ref_addr = stub->ref_addr()->as_address_ptr()->base()->as_pointer_register();
 290   }
 291 
 292   assert_different_registers(ref, ref_addr, noreg);
 293 
 294   // Save rax unless it is the result or tmp register
 295   if (ref != rax && tmp != rax) {
 296     __ push(rax);
 297   }
 298 
 299   // Setup arguments and call runtime stub
 300   __ subptr(rsp, 2 * BytesPerWord);
 301   ce->store_parameter(ref_addr, 1);
 302   ce->store_parameter(ref, 0);
 303   __ call(RuntimeAddress(stub->runtime_stub()));
 304   __ addptr(rsp, 2 * BytesPerWord);
 305 
 306   // Verify result
 307   __ verify_oop(rax, "Bad oop");
 308 
 309   // Move result into place
 310   if (ref != rax) {
 311     __ movptr(ref, rax);
 312   }
 313 
 314   // Restore rax unless it is the result or tmp register
 315   if (ref != rax && tmp != rax) {
 316     __ pop(rax);
 317   }
 318 
 319   // Stub exit
 320   __ jmp(*stub->continuation());
 321 }
 322 
 323 #undef __
 324 #define __ sasm->
 325 
 326 void ZBarrierSetAssembler::generate_c1_load_barrier_runtime_stub(StubAssembler* sasm,
 327                                                                  DecoratorSet decorators) const {
 328   // Enter and save registers
 329   __ enter();
 330   __ save_live_registers_no_oop_map(true /* save_fpu_registers */);
 331 
 332   // Setup arguments
 333   __ load_parameter(1, c_rarg1);
 334   __ load_parameter(0, c_rarg0);
 335 
 336   // Call VM
 337   __ call_VM_leaf(ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr(decorators), c_rarg0, c_rarg1);
 338 
 339   // Restore registers and return
 340   __ restore_live_registers_except_rax(true /* restore_fpu_registers */);
 341   __ leave();
 342   __ ret(0);
 343 }
 344 
 345 #endif // COMPILER1
 346 
 347 #ifdef COMPILER2
 348 
 349 OptoReg::Name ZBarrierSetAssembler::refine_register(const Node* node, OptoReg::Name opto_reg) {
 350   if (!OptoReg::is_reg(opto_reg)) {
 351     return OptoReg::Bad;
 352   }
 353 
 354   const VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
 355   if (vm_reg->is_XMMRegister()) {
 356     opto_reg &= ~15;
 357     switch (node->ideal_reg()) {
 358       case Op_VecX:
 359         opto_reg |= 2;
 360         break;
 361       case Op_VecY:
 362         opto_reg |= 4;
 363         break;
 364       case Op_VecZ:
 365         opto_reg |= 8;
 366         break;
 367       default:
 368         opto_reg |= 1;
 369         break;
 370     }
 371   }
 372 
 373   return opto_reg;
 374 }
 375 
 376 // We use the vec_spill_helper from the x86.ad file to avoid reinventing this wheel
 377 extern int vec_spill_helper(CodeBuffer *cbuf, bool do_size, bool is_load,
 378                             int stack_offset, int reg, uint ireg, outputStream* st);
 379 
 380 #undef __
 381 #define __ _masm->
 382 
 383 class ZSaveLiveRegisters {
 384 private:
 385   struct XMMRegisterData {
 386     XMMRegister _reg;
 387     int         _size;
 388 
 389     // Used by GrowableArray::find()
 390     bool operator == (const XMMRegisterData& other) {
 391       return _reg == other._reg;
 392     }
 393   };
 394 
 395   MacroAssembler* const          _masm;
 396   GrowableArray<Register>        _gp_registers;
 397   GrowableArray<XMMRegisterData> _xmm_registers;
 398   int                            _spill_size;
 399   int                            _spill_offset;
 400 
 401   static int xmm_compare_register_size(XMMRegisterData* left, XMMRegisterData* right) {
 402     if (left->_size == right->_size) {
 403       return 0;
 404     }
 405 
 406     return (left->_size < right->_size) ? -1 : 1;
 407   }
 408 
 409   static int xmm_slot_size(OptoReg::Name opto_reg) {
 410     // The low order 4 bytes denote what size of the XMM register is live
 411     return (opto_reg & 15) << 3;
 412   }
 413 
 414   static uint xmm_ideal_reg_for_size(int reg_size) {
 415     switch (reg_size) {
 416     case 8:
 417       return Op_VecD;
 418     case 16:
 419       return Op_VecX;
 420     case 32:
 421       return Op_VecY;
 422     case 64:
 423       return Op_VecZ;
 424     default:
 425       fatal("Invalid register size %d", reg_size);
 426       return 0;
 427     }
 428   }
 429 
 430   bool xmm_needs_vzeroupper() const {
 431     return _xmm_registers.is_nonempty() && _xmm_registers.at(0)._size > 16;
 432   }
 433 
 434   void xmm_register_save(const XMMRegisterData& reg_data) {
 435     const OptoReg::Name opto_reg = OptoReg::as_OptoReg(reg_data._reg->as_VMReg());
 436     const uint ideal_reg = xmm_ideal_reg_for_size(reg_data._size);
 437     _spill_offset -= reg_data._size;
 438     vec_spill_helper(__ code(), false /* do_size */, false /* is_load */, _spill_offset, opto_reg, ideal_reg, tty);
 439   }
 440 
 441   void xmm_register_restore(const XMMRegisterData& reg_data) {
 442     const OptoReg::Name opto_reg = OptoReg::as_OptoReg(reg_data._reg->as_VMReg());
 443     const uint ideal_reg = xmm_ideal_reg_for_size(reg_data._size);
 444     vec_spill_helper(__ code(), false /* do_size */, true /* is_load */, _spill_offset, opto_reg, ideal_reg, tty);
 445     _spill_offset += reg_data._size;
 446   }
 447 
 448   void gp_register_save(Register reg) {
 449     _spill_offset -= 8;
 450     __ movq(Address(rsp, _spill_offset), reg);
 451   }
 452 
 453   void gp_register_restore(Register reg) {
 454     __ movq(reg, Address(rsp, _spill_offset));
 455     _spill_offset += 8;
 456   }
 457 
 458   void initialize(ZLoadBarrierStubC2* stub) {
 459     // Create mask of caller saved registers that need to
 460     // be saved/restored if live
 461     RegMask caller_saved;
 462     caller_saved.Insert(OptoReg::as_OptoReg(rax->as_VMReg()));
 463     caller_saved.Insert(OptoReg::as_OptoReg(rcx->as_VMReg()));
 464     caller_saved.Insert(OptoReg::as_OptoReg(rdx->as_VMReg()));
 465     caller_saved.Insert(OptoReg::as_OptoReg(rsi->as_VMReg()));
 466     caller_saved.Insert(OptoReg::as_OptoReg(rdi->as_VMReg()));
 467     caller_saved.Insert(OptoReg::as_OptoReg(r8->as_VMReg()));
 468     caller_saved.Insert(OptoReg::as_OptoReg(r9->as_VMReg()));
 469     caller_saved.Insert(OptoReg::as_OptoReg(r10->as_VMReg()));
 470     caller_saved.Insert(OptoReg::as_OptoReg(r11->as_VMReg()));
 471     caller_saved.Remove(OptoReg::as_OptoReg(stub->ref()->as_VMReg()));
 472 
 473     // Create mask of live registers
 474     RegMask live = stub->live();
 475     if (stub->tmp() != noreg) {
 476       live.Insert(OptoReg::as_OptoReg(stub->tmp()->as_VMReg()));
 477     }
 478 
 479     int gp_spill_size = 0;
 480     int xmm_spill_size = 0;
 481 
 482     // Record registers that needs to be saved/restored
 483     while (live.is_NotEmpty()) {
 484       const OptoReg::Name opto_reg = live.find_first_elem();
 485       const VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
 486 
 487       live.Remove(opto_reg);
 488 
 489       if (vm_reg->is_Register()) {
 490         if (caller_saved.Member(opto_reg)) {
 491           _gp_registers.append(vm_reg->as_Register());
 492           gp_spill_size += 8;
 493         }
 494       } else if (vm_reg->is_XMMRegister()) {
 495         // We encode in the low order 4 bits of the opto_reg, how large part of the register is live
 496         const VMReg vm_reg_base = OptoReg::as_VMReg(opto_reg & ~15);
 497         const int reg_size = xmm_slot_size(opto_reg);
 498         const XMMRegisterData reg_data = { vm_reg_base->as_XMMRegister(), reg_size };
 499         const int reg_index = _xmm_registers.find(reg_data);
 500         if (reg_index == -1) {
 501           // Not previously appended
 502           _xmm_registers.append(reg_data);
 503           xmm_spill_size += reg_size;
 504         } else {
 505           // Previously appended, update size
 506           const int reg_size_prev = _xmm_registers.at(reg_index)._size;
 507           if (reg_size > reg_size_prev) {
 508             _xmm_registers.at_put(reg_index, reg_data);
 509             xmm_spill_size += reg_size - reg_size_prev;
 510           }
 511         }
 512       } else {
 513         fatal("Unexpected register type");
 514       }
 515     }
 516 
 517     // Sort by size, largest first
 518     _xmm_registers.sort(xmm_compare_register_size);
 519 
 520     // Stack pointer must be 16 bytes aligned for the call
 521     _spill_offset = _spill_size = align_up(xmm_spill_size + gp_spill_size, 16);
 522   }
 523 
 524 public:
 525   ZSaveLiveRegisters(MacroAssembler* masm, ZLoadBarrierStubC2* stub) :
 526       _masm(masm),
 527       _gp_registers(),
 528       _xmm_registers(),
 529       _spill_size(0),
 530       _spill_offset(0) {
 531 
 532     //
 533     // Stack layout after registers have been spilled:
 534     //
 535     // | ...            | original rsp, 16 bytes aligned
 536     // ------------------
 537     // | zmm0 high      |
 538     // | ...            |
 539     // | zmm0 low       | 16 bytes aligned
 540     // | ...            |
 541     // | ymm1 high      |
 542     // | ...            |
 543     // | ymm1 low       | 16 bytes aligned
 544     // | ...            |
 545     // | xmmN high      |
 546     // | ...            |
 547     // | xmmN low       | 8 bytes aligned
 548     // | reg0           | 8 bytes aligned
 549     // | reg1           |
 550     // | ...            |
 551     // | regN           | new rsp, if 16 bytes aligned
 552     // | <padding>      | else new rsp, 16 bytes aligned
 553     // ------------------
 554     //
 555 
 556     // Figure out what registers to save/restore
 557     initialize(stub);
 558 
 559     // Allocate stack space
 560     if (_spill_size > 0) {
 561       __ subptr(rsp, _spill_size);
 562     }
 563 
 564     // Save XMM/YMM/ZMM registers
 565     for (int i = 0; i < _xmm_registers.length(); i++) {
 566       xmm_register_save(_xmm_registers.at(i));
 567     }
 568 
 569     if (xmm_needs_vzeroupper()) {
 570       __ vzeroupper();
 571     }
 572 
 573     // Save general purpose registers
 574     for (int i = 0; i < _gp_registers.length(); i++) {
 575       gp_register_save(_gp_registers.at(i));
 576     }
 577   }
 578 
 579   ~ZSaveLiveRegisters() {
 580     // Restore general purpose registers
 581     for (int i = _gp_registers.length() - 1; i >= 0; i--) {
 582       gp_register_restore(_gp_registers.at(i));
 583     }
 584 
 585     __ vzeroupper();
 586 
 587     // Restore XMM/YMM/ZMM registers
 588     for (int i = _xmm_registers.length() - 1; i >= 0; i--) {
 589       xmm_register_restore(_xmm_registers.at(i));
 590     }
 591 
 592     // Free stack space
 593     if (_spill_size > 0) {
 594       __ addptr(rsp, _spill_size);
 595     }
 596   }
 597 };
 598 
 599 class ZSetupArguments {
 600 private:
 601   MacroAssembler* const _masm;
 602   const Register        _ref;
 603   const Address         _ref_addr;
 604 
 605 public:
 606   ZSetupArguments(MacroAssembler* masm, ZLoadBarrierStubC2* stub) :
 607       _masm(masm),
 608       _ref(stub->ref()),
 609       _ref_addr(stub->ref_addr()) {
 610 
 611     // Setup arguments
 612     if (_ref_addr.base() == noreg) {
 613       // No self healing
 614       if (_ref != c_rarg0) {
 615         __ movq(c_rarg0, _ref);
 616       }
 617       __ xorptr(c_rarg1, c_rarg1);
 618     } else {
 619       // Self healing
 620       if (_ref == c_rarg0) {
 621         __ lea(c_rarg1, _ref_addr);
 622       } else if (_ref != c_rarg1) {
 623         __ lea(c_rarg1, _ref_addr);
 624         __ movq(c_rarg0, _ref);
 625       } else if (_ref_addr.base() != c_rarg0 && _ref_addr.index() != c_rarg0) {
 626         __ movq(c_rarg0, _ref);
 627         __ lea(c_rarg1, _ref_addr);
 628       } else {
 629         __ xchgq(c_rarg0, c_rarg1);
 630         if (_ref_addr.base() == c_rarg0) {
 631           __ lea(c_rarg1, Address(c_rarg1, _ref_addr.index(), _ref_addr.scale(), _ref_addr.disp()));
 632         } else if (_ref_addr.index() == c_rarg0) {
 633           __ lea(c_rarg1, Address(_ref_addr.base(), c_rarg1, _ref_addr.scale(), _ref_addr.disp()));
 634         } else {
 635           ShouldNotReachHere();
 636         }
 637       }
 638     }
 639   }
 640 
 641   ~ZSetupArguments() {
 642     // Transfer result
 643     if (_ref != rax) {
 644       __ movq(_ref, rax);
 645     }
 646   }
 647 };
 648 
 649 #undef __
 650 #define __ masm->
 651 
 652 void ZBarrierSetAssembler::generate_c2_load_barrier_stub(MacroAssembler* masm, ZLoadBarrierStubC2* stub) const {
 653   BLOCK_COMMENT("ZLoadBarrierStubC2");
 654 
 655   // Stub entry
 656   __ bind(*stub->entry());
 657 
 658   {
 659     ZSaveLiveRegisters save_live_registers(masm, stub);
 660     ZSetupArguments setup_arguments(masm, stub);
 661     __ call(RuntimeAddress(stub->slow_path()));
 662   }
 663 
 664   // Stub exit
 665   __ jmp(*stub->continuation());
 666 }
 667 
 668 #undef __
 669 
 670 #endif // COMPILER2