1 /*
   2  * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2012, 2015 SAP SE. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "c1/c1_CodeStubs.hpp"
  28 #include "c1/c1_FrameMap.hpp"
  29 #include "c1/c1_LIRAssembler.hpp"
  30 #include "c1/c1_MacroAssembler.hpp"
  31 #include "c1/c1_Runtime1.hpp"
  32 #include "nativeInst_ppc.hpp"
  33 #include "runtime/sharedRuntime.hpp"
  34 #include "utilities/macros.hpp"
  35 #include "vmreg_ppc.inline.hpp"
  36 #if INCLUDE_ALL_GCS
  37 #include "gc/g1/g1BarrierSet.hpp"
  38 #endif // INCLUDE_ALL_GCS
  39 
  40 #define __ ce->masm()->
  41 
  42 
  43 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, LIR_Opr array)
  44   : _throw_index_out_of_bounds_exception(false), _index(index), _array(array) {
  45   assert(info != NULL, "must have info");
  46   _info = new CodeEmitInfo(info);
  47 }
  48 
  49 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index)
  50   : _throw_index_out_of_bounds_exception(true), _index(index), _array(NULL) {
  51   assert(info != NULL, "must have info");
  52   _info = new CodeEmitInfo(info);
  53 }
  54 
  55 void RangeCheckStub::emit_code(LIR_Assembler* ce) {
  56   __ bind(_entry);
  57 
  58   if (_info->deoptimize_on_exception()) {
  59     address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
  60     // May be used by optimizations like LoopInvariantCodeMotion or RangeCheckEliminator.
  61     DEBUG_ONLY( __ untested("RangeCheckStub: predicate_failed_trap_id"); )
  62     //__ load_const_optimized(R0, a);
  63     __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(a));
  64     __ mtctr(R0);
  65     __ bctrl();
  66     ce->add_call_info_here(_info);
  67     ce->verify_oop_map(_info);
  68     debug_only(__ illtrap());
  69     return;
  70   }
  71 
  72   address stub = _throw_index_out_of_bounds_exception ? Runtime1::entry_for(Runtime1::throw_index_exception_id)
  73                                                       : Runtime1::entry_for(Runtime1::throw_range_check_failed_id);
  74   //__ load_const_optimized(R0, stub);
  75   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
  76   __ mtctr(R0);
  77 
  78   Register index = R0; // pass in R0
  79   if (_index->is_register()) {
  80     __ extsw(index, _index->as_register());
  81   } else {
  82     __ load_const_optimized(index, _index->as_jint());
  83   }
  84   if (_array) {
  85     __ std(_array->as_pointer_register(), -8, R1_SP);
  86   }
  87   __ std(R0, -16, R1_SP);
  88 
  89   __ bctrl();
  90   ce->add_call_info_here(_info);
  91   ce->verify_oop_map(_info);
  92   debug_only(__ illtrap());
  93 }
  94 
  95 
  96 PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
  97   _info = new CodeEmitInfo(info);
  98 }
  99 
 100 void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
 101   __ bind(_entry);
 102   address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
 103   //__ load_const_optimized(R0, a);
 104   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(a));
 105   __ mtctr(R0);
 106   __ bctrl();
 107   ce->add_call_info_here(_info);
 108   ce->verify_oop_map(_info);
 109   debug_only(__ illtrap());
 110 }
 111 
 112 
 113 void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
 114   __ bind(_entry);
 115 
 116   // Parameter 1: bci
 117   __ load_const_optimized(R0, _bci);
 118   __ std(R0, -16, R1_SP);
 119 
 120   // Parameter 2: Method*
 121   Metadata *m = _method->as_constant_ptr()->as_metadata();
 122   AddressLiteral md = __ constant_metadata_address(m); // Notify OOP recorder (don't need the relocation).
 123   __ load_const_optimized(R0, md.value());
 124   __ std(R0, -8, R1_SP);
 125 
 126   address a = Runtime1::entry_for(Runtime1::counter_overflow_id);
 127   //__ load_const_optimized(R0, a);
 128   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(a));
 129   __ mtctr(R0);
 130   __ bctrl();
 131   ce->add_call_info_here(_info);
 132   ce->verify_oop_map(_info);
 133 
 134   __ b(_continuation);
 135 }
 136 
 137 
 138 void DivByZeroStub::emit_code(LIR_Assembler* ce) {
 139   if (_offset != -1) {
 140     ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
 141   }
 142   __ bind(_entry);
 143   address stub = Runtime1::entry_for(Runtime1::throw_div0_exception_id);
 144   //__ load_const_optimized(R0, stub);
 145   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
 146   __ mtctr(R0);
 147   __ bctrl();
 148   ce->add_call_info_here(_info);
 149   ce->verify_oop_map(_info);
 150   debug_only(__ illtrap());
 151 }
 152 
 153 
 154 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
 155   address a;
 156   if (_info->deoptimize_on_exception()) {
 157     // Deoptimize, do not throw the exception, because it is probably wrong to do it here.
 158     a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
 159   } else {
 160     a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id);
 161   }
 162 
 163   if (ImplicitNullChecks || TrapBasedNullChecks) {
 164     ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
 165   }
 166   __ bind(_entry);
 167   //__ load_const_optimized(R0, a);
 168   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(a));
 169   __ mtctr(R0);
 170   __ bctrl();
 171   ce->add_call_info_here(_info);
 172   ce->verify_oop_map(_info);
 173   debug_only(__ illtrap());
 174 }
 175 
 176 
 177 // Implementation of SimpleExceptionStub
 178 void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
 179   __ bind(_entry);
 180   address stub = Runtime1::entry_for(_stub);
 181   //__ load_const_optimized(R0, stub);
 182   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
 183   if (_obj->is_valid()) { __ mr_if_needed(/*tmp1 in do_CheckCast*/ R4_ARG2, _obj->as_register()); }
 184   __ mtctr(R0);
 185   __ bctrl();
 186   ce->add_call_info_here(_info);
 187   debug_only( __ illtrap(); )
 188 }
 189 
 190 
 191 // Implementation of NewInstanceStub
 192 NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) {
 193   _result = result;
 194   _klass = klass;
 195   _klass_reg = klass_reg;
 196   _info = new CodeEmitInfo(info);
 197   assert(stub_id == Runtime1::new_instance_id                 ||
 198          stub_id == Runtime1::fast_new_instance_id            ||
 199          stub_id == Runtime1::fast_new_instance_init_check_id,
 200          "need new_instance id");
 201   _stub_id = stub_id;
 202 }
 203 
 204 void NewInstanceStub::emit_code(LIR_Assembler* ce) {
 205   __ bind(_entry);
 206 
 207   address entry = Runtime1::entry_for(_stub_id);
 208   //__ load_const_optimized(R0, entry);
 209   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(entry));
 210   __ mtctr(R0);
 211   __ bctrl();
 212   ce->add_call_info_here(_info);
 213   ce->verify_oop_map(_info);
 214   __ b(_continuation);
 215 }
 216 
 217 
 218 // Implementation of NewTypeArrayStub
 219 NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
 220   _klass_reg = klass_reg;
 221   _length = length;
 222   _result = result;
 223   _info = new CodeEmitInfo(info);
 224 }
 225 
 226 void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
 227   __ bind(_entry);
 228 
 229   address entry = Runtime1::entry_for(Runtime1::new_type_array_id);
 230   //__ load_const_optimized(R0, entry);
 231   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(entry));
 232   __ mr_if_needed(/*op->tmp1()->as_register()*/ R5_ARG3, _length->as_register()); // already sign-extended
 233   __ mtctr(R0);
 234   __ bctrl();
 235   ce->add_call_info_here(_info);
 236   ce->verify_oop_map(_info);
 237   __ b(_continuation);
 238 }
 239 
 240 
 241 // Implementation of NewObjectArrayStub
 242 NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
 243   _klass_reg = klass_reg;
 244   _length = length;
 245   _result = result;
 246   _info = new CodeEmitInfo(info);
 247 }
 248 
 249 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
 250   __ bind(_entry);
 251 
 252   address entry = Runtime1::entry_for(Runtime1::new_object_array_id);
 253   //__ load_const_optimized(R0, entry);
 254   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(entry));
 255   __ mr_if_needed(/*op->tmp1()->as_register()*/ R5_ARG3, _length->as_register()); // already sign-extended
 256   __ mtctr(R0);
 257   __ bctrl();
 258   ce->add_call_info_here(_info);
 259   ce->verify_oop_map(_info);
 260   __ b(_continuation);
 261 }
 262 
 263 
 264 // Implementation of MonitorAccessStubs
 265 MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info)
 266   : MonitorAccessStub(obj_reg, lock_reg) {
 267   _info = new CodeEmitInfo(info);
 268 }
 269 
 270 void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
 271   __ bind(_entry);
 272   address stub = Runtime1::entry_for(ce->compilation()->has_fpu_code() ? Runtime1::monitorenter_id : Runtime1::monitorenter_nofpu_id);
 273   //__ load_const_optimized(R0, stub);
 274   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
 275   __ mr_if_needed(/*scratch_opr()->as_register()*/ R4_ARG2, _obj_reg->as_register());
 276   assert(_lock_reg->as_register() == R5_ARG3, "");
 277   __ mtctr(R0);
 278   __ bctrl();
 279   ce->add_call_info_here(_info);
 280   ce->verify_oop_map(_info);
 281   __ b(_continuation);
 282 }
 283 
 284 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
 285   __ bind(_entry);
 286   if (_compute_lock) {
 287     ce->monitor_address(_monitor_ix, _lock_reg);
 288   }
 289   address stub = Runtime1::entry_for(ce->compilation()->has_fpu_code() ? Runtime1::monitorexit_id : Runtime1::monitorexit_nofpu_id);
 290   //__ load_const_optimized(R0, stub);
 291   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
 292   assert(_lock_reg->as_register() == R4_ARG2, "");
 293   __ mtctr(R0);
 294   __ bctrl();
 295   __ b(_continuation);
 296 }
 297 
 298 
 299 // Implementation of patching:
 300 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes).
 301 // - Replace original code with a call to the stub.
 302 // At Runtime:
 303 // - call to stub, jump to runtime
 304 // - in runtime: preserve all registers (especially objects, i.e., source and destination object)
 305 // - in runtime: after initializing class, restore original code, reexecute instruction
 306 
 307 int PatchingStub::_patch_info_offset = -(5 * BytesPerInstWord);
 308 
 309 void PatchingStub::align_patch_site(MacroAssembler* ) {
 310   // Patch sites on ppc are always properly aligned.
 311 }
 312 
 313 #ifdef ASSERT
 314 inline void compare_with_patch_site(address template_start, address pc_start, int bytes_to_copy) {
 315   address start = template_start;
 316   for (int i = 0; i < bytes_to_copy; i++) {
 317     address ptr = (address)(pc_start + i);
 318     int a_byte = (*ptr) & 0xFF;
 319     assert(a_byte == *start++, "should be the same code");
 320   }
 321 }
 322 #endif
 323 
 324 void PatchingStub::emit_code(LIR_Assembler* ce) {
 325   // copy original code here
 326   assert(NativeGeneralJump::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF,
 327          "not enough room for call");
 328   assert((_bytes_to_copy & 0x3) == 0, "must copy a multiple of four bytes");
 329 
 330   Label call_patch;
 331 
 332   int being_initialized_entry = __ offset();
 333 
 334   if (_id == load_klass_id) {
 335     // Produce a copy of the load klass instruction for use by the being initialized case.
 336     AddressLiteral addrlit((address)NULL, metadata_Relocation::spec(_index));
 337     __ load_const(_obj, addrlit, R0);
 338     DEBUG_ONLY( compare_with_patch_site(__ code_section()->start() + being_initialized_entry, _pc_start, _bytes_to_copy); )
 339   } else if (_id == load_mirror_id || _id == load_appendix_id) {
 340     // Produce a copy of the load mirror instruction for use by the being initialized case.
 341     AddressLiteral addrlit((address)NULL, oop_Relocation::spec(_index));
 342     __ load_const(_obj, addrlit, R0);
 343     DEBUG_ONLY( compare_with_patch_site(__ code_section()->start() + being_initialized_entry, _pc_start, _bytes_to_copy); )
 344   } else {
 345     // Make a copy the code which is going to be patched.
 346     for (int i = 0; i < _bytes_to_copy; i++) {
 347       address ptr = (address)(_pc_start + i);
 348       int a_byte = (*ptr) & 0xFF;
 349       __ emit_int8 (a_byte);
 350     }
 351   }
 352 
 353   address end_of_patch = __ pc();
 354   int bytes_to_skip = 0;
 355   if (_id == load_mirror_id) {
 356     int offset = __ offset();
 357     __ block_comment(" being_initialized check");
 358 
 359     // Static field accesses have special semantics while the class
 360     // initializer is being run so we emit a test which can be used to
 361     // check that this code is being executed by the initializing
 362     // thread.
 363     assert(_obj != noreg, "must be a valid register");
 364     assert(_index >= 0, "must have oop index");
 365     __ mr(R0, _obj); // spill
 366     __ ld(_obj, java_lang_Class::klass_offset_in_bytes(), _obj);
 367     __ ld(_obj, in_bytes(InstanceKlass::init_thread_offset()), _obj);
 368     __ cmpd(CCR0, _obj, R16_thread);
 369     __ mr(_obj, R0); // restore
 370     __ bne(CCR0, call_patch);
 371 
 372     // Load_klass patches may execute the patched code before it's
 373     // copied back into place so we need to jump back into the main
 374     // code of the nmethod to continue execution.
 375     __ b(_patch_site_continuation);
 376 
 377     // Make sure this extra code gets skipped.
 378     bytes_to_skip += __ offset() - offset;
 379   }
 380 
 381   // Now emit the patch record telling the runtime how to find the
 382   // pieces of the patch.  We only need 3 bytes but it has to be
 383   // aligned as an instruction so emit 4 bytes.
 384   int sizeof_patch_record = 4;
 385   bytes_to_skip += sizeof_patch_record;
 386 
 387   // Emit the offsets needed to find the code to patch.
 388   int being_initialized_entry_offset = __ offset() - being_initialized_entry + sizeof_patch_record;
 389 
 390   // Emit the patch record.  We need to emit a full word, so emit an extra empty byte.
 391   __ emit_int8(0);
 392   __ emit_int8(being_initialized_entry_offset);
 393   __ emit_int8(bytes_to_skip);
 394   __ emit_int8(_bytes_to_copy);
 395   address patch_info_pc = __ pc();
 396   assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info");
 397 
 398   address entry = __ pc();
 399   NativeGeneralJump::insert_unconditional((address)_pc_start, entry);
 400   address target = NULL;
 401   relocInfo::relocType reloc_type = relocInfo::none;
 402   switch (_id) {
 403     case access_field_id:  target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
 404     case load_klass_id:    target = Runtime1::entry_for(Runtime1::load_klass_patching_id);
 405                            reloc_type = relocInfo::metadata_type; break;
 406     case load_mirror_id:   target = Runtime1::entry_for(Runtime1::load_mirror_patching_id);
 407                            reloc_type = relocInfo::oop_type; break;
 408     case load_appendix_id: target = Runtime1::entry_for(Runtime1::load_appendix_patching_id);
 409                            reloc_type = relocInfo::oop_type; break;
 410     default: ShouldNotReachHere();
 411   }
 412   __ bind(call_patch);
 413 
 414   __ block_comment("patch entry point");
 415   //__ load_const(R0, target); + mtctr + bctrl must have size -_patch_info_offset
 416   __ load_const32(R0, MacroAssembler::offset_to_global_toc(target));
 417   __ add(R0, R29_TOC, R0);
 418   __ mtctr(R0);
 419   __ bctrl();
 420   assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change");
 421   ce->add_call_info_here(_info);
 422   __ b(_patch_site_entry);
 423   if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) {
 424     CodeSection* cs = __ code_section();
 425     address pc = (address)_pc_start;
 426     RelocIterator iter(cs, pc, pc + 1);
 427     relocInfo::change_reloc_info_for_address(&iter, (address) pc, reloc_type, relocInfo::none);
 428   }
 429 }
 430 
 431 
 432 void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
 433   __ bind(_entry);
 434   address stub = Runtime1::entry_for(Runtime1::deoptimize_id);
 435   //__ load_const_optimized(R0, stub);
 436   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
 437   __ mtctr(R0);
 438 
 439   __ load_const_optimized(R0, _trap_request); // Pass trap request in R0.
 440   __ bctrl();
 441   ce->add_call_info_here(_info);
 442   debug_only(__ illtrap());
 443 }
 444 
 445 
 446 void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
 447   //---------------slow case: call to native-----------------
 448   __ bind(_entry);
 449   __ mr(R3_ARG1, src()->as_register());
 450   __ extsw(R4_ARG2, src_pos()->as_register());
 451   __ mr(R5_ARG3, dst()->as_register());
 452   __ extsw(R6_ARG4, dst_pos()->as_register());
 453   __ extsw(R7_ARG5, length()->as_register());
 454 
 455   ce->emit_static_call_stub();
 456 
 457   bool success = ce->emit_trampoline_stub_for_call(SharedRuntime::get_resolve_static_call_stub());
 458   if (!success) { return; }
 459 
 460   __ relocate(relocInfo::static_call_type);
 461   // Note: At this point we do not have the address of the trampoline
 462   // stub, and the entry point might be too far away for bl, so __ pc()
 463   // serves as dummy and the bl will be patched later.
 464   __ code()->set_insts_mark();
 465   __ bl(__ pc());
 466   ce->add_call_info_here(info());
 467   ce->verify_oop_map(info());
 468 
 469 #ifndef PRODUCT
 470   const address counter = (address)&Runtime1::_arraycopy_slowcase_cnt;
 471   const Register tmp = R3, tmp2 = R4;
 472   int simm16_offs = __ load_const_optimized(tmp, counter, tmp2, true);
 473   __ lwz(tmp2, simm16_offs, tmp);
 474   __ addi(tmp2, tmp2, 1);
 475   __ stw(tmp2, simm16_offs, tmp);
 476 #endif
 477 
 478   __ b(_continuation);
 479 }
 480 
 481 
 482 ///////////////////////////////////////////////////////////////////////////////////
 483 #if INCLUDE_ALL_GCS
 484 
 485 void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
 486   // At this point we know that marking is in progress.
 487   // If do_load() is true then we have to emit the
 488   // load of the previous value; otherwise it has already
 489   // been loaded into _pre_val.
 490 
 491   __ bind(_entry);
 492 
 493   assert(pre_val()->is_register(), "Precondition.");
 494   Register pre_val_reg = pre_val()->as_register();
 495 
 496   if (do_load()) {
 497     ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/);
 498   }
 499 
 500   __ cmpdi(CCR0, pre_val_reg, 0);
 501   __ bc_far_optimized(Assembler::bcondCRbiIs1, __ bi0(CCR0, Assembler::equal), _continuation);
 502 
 503   address stub = Runtime1::entry_for(Runtime1::Runtime1::g1_pre_barrier_slow_id);
 504   //__ load_const_optimized(R0, stub);
 505   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
 506   __ std(pre_val_reg, -8, R1_SP); // Pass pre_val on stack.
 507   __ mtctr(R0);
 508   __ bctrl();
 509   __ b(_continuation);
 510 }
 511 
 512 void G1PostBarrierStub::emit_code(LIR_Assembler* ce) {
 513   __ bind(_entry);
 514 
 515   assert(addr()->is_register(), "Precondition.");
 516   assert(new_val()->is_register(), "Precondition.");
 517   Register addr_reg = addr()->as_pointer_register();
 518   Register new_val_reg = new_val()->as_register();
 519 
 520   __ cmpdi(CCR0, new_val_reg, 0);
 521   __ bc_far_optimized(Assembler::bcondCRbiIs1, __ bi0(CCR0, Assembler::equal), _continuation);
 522 
 523   address stub = Runtime1::entry_for(Runtime1::Runtime1::g1_post_barrier_slow_id);
 524   //__ load_const_optimized(R0, stub);
 525   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
 526   __ mtctr(R0);
 527   __ mr(R0, addr_reg); // Pass addr in R0.
 528   __ bctrl();
 529   __ b(_continuation);
 530 }
 531 
 532 #endif // INCLUDE_ALL_GCS
 533 ///////////////////////////////////////////////////////////////////////////////////
 534 
 535 #undef __