1 /*
   2  * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2012, 2015 SAP SE. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "c1/c1_CodeStubs.hpp"
  28 #include "c1/c1_FrameMap.hpp"
  29 #include "c1/c1_LIRAssembler.hpp"
  30 #include "c1/c1_MacroAssembler.hpp"
  31 #include "c1/c1_Runtime1.hpp"
  32 #include "nativeInst_ppc.hpp"
  33 #include "runtime/sharedRuntime.hpp"
  34 #include "utilities/macros.hpp"
  35 #include "vmreg_ppc.inline.hpp"
  36 
  37 #define __ ce->masm()->
  38 
  39 
  40 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index,
  41                                bool throw_index_out_of_bounds_exception)
  42   : _throw_index_out_of_bounds_exception(throw_index_out_of_bounds_exception)
  43   , _index(index) {
  44   assert(info != NULL, "must have info");
  45   _info = new CodeEmitInfo(info);
  46 }
  47 
  48 void RangeCheckStub::emit_code(LIR_Assembler* ce) {
  49   __ bind(_entry);
  50 
  51   if (_info->deoptimize_on_exception()) {
  52     address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
  53     // May be used by optimizations like LoopInvariantCodeMotion or RangeCheckEliminator.
  54     DEBUG_ONLY( __ untested("RangeCheckStub: predicate_failed_trap_id"); )
  55     //__ load_const_optimized(R0, a);
  56     __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(a));
  57     __ mtctr(R0);
  58     __ bctrl();
  59     ce->add_call_info_here(_info);
  60     ce->verify_oop_map(_info);
  61     debug_only(__ illtrap());
  62     return;
  63   }
  64 
  65   address stub = _throw_index_out_of_bounds_exception ? Runtime1::entry_for(Runtime1::throw_index_exception_id)
  66                                                       : Runtime1::entry_for(Runtime1::throw_range_check_failed_id);
  67   //__ load_const_optimized(R0, stub);
  68   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
  69   __ mtctr(R0);
  70 
  71   Register index = R0; // pass in R0
  72   if (_index->is_register()) {
  73     __ extsw(index, _index->as_register());
  74   } else {
  75     __ load_const_optimized(index, _index->as_jint());
  76   }
  77 
  78   __ bctrl();
  79   ce->add_call_info_here(_info);
  80   ce->verify_oop_map(_info);
  81   debug_only(__ illtrap());
  82 }
  83 
  84 
  85 PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
  86   _info = new CodeEmitInfo(info);
  87 }
  88 
  89 void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
  90   __ bind(_entry);
  91   address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
  92   //__ load_const_optimized(R0, a);
  93   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(a));
  94   __ mtctr(R0);
  95   __ bctrl();
  96   ce->add_call_info_here(_info);
  97   ce->verify_oop_map(_info);
  98   debug_only(__ illtrap());
  99 }
 100 
 101 
 102 void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
 103   __ bind(_entry);
 104 
 105   // Parameter 1: bci
 106   __ load_const_optimized(R0, _bci);
 107   __ std(R0, -16, R1_SP);
 108 
 109   // Parameter 2: Method*
 110   Metadata *m = _method->as_constant_ptr()->as_metadata();
 111   AddressLiteral md = __ constant_metadata_address(m); // Notify OOP recorder (don't need the relocation).
 112   __ load_const_optimized(R0, md.value());
 113   __ std(R0, -8, R1_SP);
 114 
 115   address a = Runtime1::entry_for(Runtime1::counter_overflow_id);
 116   //__ load_const_optimized(R0, a);
 117   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(a));
 118   __ mtctr(R0);
 119   __ bctrl();
 120   ce->add_call_info_here(_info);
 121   ce->verify_oop_map(_info);
 122 
 123   __ b(_continuation);
 124 }
 125 
 126 
 127 void DivByZeroStub::emit_code(LIR_Assembler* ce) {
 128   if (_offset != -1) {
 129     ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
 130   }
 131   __ bind(_entry);
 132   address stub = Runtime1::entry_for(Runtime1::throw_div0_exception_id);
 133   //__ load_const_optimized(R0, stub);
 134   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
 135   __ mtctr(R0);
 136   __ bctrl();
 137   ce->add_call_info_here(_info);
 138   ce->verify_oop_map(_info);
 139   debug_only(__ illtrap());
 140 }
 141 
 142 
 143 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
 144   address a;
 145   if (_info->deoptimize_on_exception()) {
 146     // Deoptimize, do not throw the exception, because it is probably wrong to do it here.
 147     a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
 148   } else {
 149     a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id);
 150   }
 151 
 152   if (ImplicitNullChecks || TrapBasedNullChecks) {
 153     ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
 154   }
 155   __ bind(_entry);
 156   //__ load_const_optimized(R0, a);
 157   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(a));
 158   __ mtctr(R0);
 159   __ bctrl();
 160   ce->add_call_info_here(_info);
 161   ce->verify_oop_map(_info);
 162   debug_only(__ illtrap());
 163 }
 164 
 165 
 166 // Implementation of SimpleExceptionStub
 167 void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
 168   __ bind(_entry);
 169   address stub = Runtime1::entry_for(_stub);
 170   //__ load_const_optimized(R0, stub);
 171   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
 172   if (_obj->is_valid()) { __ mr_if_needed(/*tmp1 in do_CheckCast*/ R4_ARG2, _obj->as_register()); }
 173   __ mtctr(R0);
 174   __ bctrl();
 175   ce->add_call_info_here(_info);
 176   debug_only( __ illtrap(); )
 177 }
 178 
 179 
 180 // Implementation of NewInstanceStub
 181 NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) {
 182   _result = result;
 183   _klass = klass;
 184   _klass_reg = klass_reg;
 185   _info = new CodeEmitInfo(info);
 186   assert(stub_id == Runtime1::new_instance_id                 ||
 187          stub_id == Runtime1::fast_new_instance_id            ||
 188          stub_id == Runtime1::fast_new_instance_init_check_id,
 189          "need new_instance id");
 190   _stub_id = stub_id;
 191 }
 192 
 193 void NewInstanceStub::emit_code(LIR_Assembler* ce) {
 194   __ bind(_entry);
 195 
 196   address entry = Runtime1::entry_for(_stub_id);
 197   //__ load_const_optimized(R0, entry);
 198   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(entry));
 199   __ mtctr(R0);
 200   __ bctrl();
 201   ce->add_call_info_here(_info);
 202   ce->verify_oop_map(_info);
 203   __ b(_continuation);
 204 }
 205 
 206 
 207 // Implementation of NewTypeArrayStub
 208 NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
 209   _klass_reg = klass_reg;
 210   _length = length;
 211   _result = result;
 212   _info = new CodeEmitInfo(info);
 213 }
 214 
 215 void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
 216   __ bind(_entry);
 217 
 218   address entry = Runtime1::entry_for(Runtime1::new_type_array_id);
 219   //__ load_const_optimized(R0, entry);
 220   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(entry));
 221   __ mr_if_needed(/*op->tmp1()->as_register()*/ R5_ARG3, _length->as_register()); // already sign-extended
 222   __ mtctr(R0);
 223   __ bctrl();
 224   ce->add_call_info_here(_info);
 225   ce->verify_oop_map(_info);
 226   __ b(_continuation);
 227 }
 228 
 229 
 230 // Implementation of NewObjectArrayStub
 231 NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
 232   _klass_reg = klass_reg;
 233   _length = length;
 234   _result = result;
 235   _info = new CodeEmitInfo(info);
 236 }
 237 
 238 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
 239   __ bind(_entry);
 240 
 241   address entry = Runtime1::entry_for(Runtime1::new_object_array_id);
 242   //__ load_const_optimized(R0, entry);
 243   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(entry));
 244   __ mr_if_needed(/*op->tmp1()->as_register()*/ R5_ARG3, _length->as_register()); // already sign-extended
 245   __ mtctr(R0);
 246   __ bctrl();
 247   ce->add_call_info_here(_info);
 248   ce->verify_oop_map(_info);
 249   __ b(_continuation);
 250 }
 251 
 252 
 253 // Implementation of MonitorAccessStubs
 254 MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info)
 255   : MonitorAccessStub(obj_reg, lock_reg) {
 256   _info = new CodeEmitInfo(info);
 257 }
 258 
 259 void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
 260   __ bind(_entry);
 261   address stub = Runtime1::entry_for(ce->compilation()->has_fpu_code() ? Runtime1::monitorenter_id : Runtime1::monitorenter_nofpu_id);
 262   //__ load_const_optimized(R0, stub);
 263   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
 264   __ mr_if_needed(/*scratch_opr()->as_register()*/ R4_ARG2, _obj_reg->as_register());
 265   assert(_lock_reg->as_register() == R5_ARG3, "");
 266   __ mtctr(R0);
 267   __ bctrl();
 268   ce->add_call_info_here(_info);
 269   ce->verify_oop_map(_info);
 270   __ b(_continuation);
 271 }
 272 
 273 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
 274   __ bind(_entry);
 275   if (_compute_lock) {
 276     ce->monitor_address(_monitor_ix, _lock_reg);
 277   }
 278   address stub = Runtime1::entry_for(ce->compilation()->has_fpu_code() ? Runtime1::monitorexit_id : Runtime1::monitorexit_nofpu_id);
 279   //__ load_const_optimized(R0, stub);
 280   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
 281   assert(_lock_reg->as_register() == R4_ARG2, "");
 282   __ mtctr(R0);
 283   __ bctrl();
 284   __ b(_continuation);
 285 }
 286 
 287 
 288 // Implementation of patching:
 289 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes).
 290 // - Replace original code with a call to the stub.
 291 // At Runtime:
 292 // - call to stub, jump to runtime
 293 // - in runtime: preserve all registers (especially objects, i.e., source and destination object)
 294 // - in runtime: after initializing class, restore original code, reexecute instruction
 295 
 296 int PatchingStub::_patch_info_offset = -(5 * BytesPerInstWord);
 297 
 298 void PatchingStub::align_patch_site(MacroAssembler* ) {
 299   // Patch sites on ppc are always properly aligned.
 300 }
 301 
 302 #ifdef ASSERT
 303 inline void compare_with_patch_site(address template_start, address pc_start, int bytes_to_copy) {
 304   address start = template_start;
 305   for (int i = 0; i < bytes_to_copy; i++) {
 306     address ptr = (address)(pc_start + i);
 307     int a_byte = (*ptr) & 0xFF;
 308     assert(a_byte == *start++, "should be the same code");
 309   }
 310 }
 311 #endif
 312 
 313 void PatchingStub::emit_code(LIR_Assembler* ce) {
 314   // copy original code here
 315   assert(NativeGeneralJump::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF,
 316          "not enough room for call");
 317   assert((_bytes_to_copy & 0x3) == 0, "must copy a multiple of four bytes");
 318 
 319   Label call_patch;
 320 
 321   int being_initialized_entry = __ offset();
 322 
 323   if (_id == load_klass_id) {
 324     // Produce a copy of the load klass instruction for use by the being initialized case.
 325     AddressLiteral addrlit((address)NULL, metadata_Relocation::spec(_index));
 326     __ load_const(_obj, addrlit, R0);
 327     DEBUG_ONLY( compare_with_patch_site(__ code_section()->start() + being_initialized_entry, _pc_start, _bytes_to_copy); )
 328   } else if (_id == load_mirror_id || _id == load_appendix_id) {
 329     // Produce a copy of the load mirror instruction for use by the being initialized case.
 330     AddressLiteral addrlit((address)NULL, oop_Relocation::spec(_index));
 331     __ load_const(_obj, addrlit, R0);
 332     DEBUG_ONLY( compare_with_patch_site(__ code_section()->start() + being_initialized_entry, _pc_start, _bytes_to_copy); )
 333   } else {
 334     // Make a copy the code which is going to be patched.
 335     for (int i = 0; i < _bytes_to_copy; i++) {
 336       address ptr = (address)(_pc_start + i);
 337       int a_byte = (*ptr) & 0xFF;
 338       __ emit_int8 (a_byte);
 339     }
 340   }
 341 
 342   address end_of_patch = __ pc();
 343   int bytes_to_skip = 0;
 344   if (_id == load_mirror_id) {
 345     int offset = __ offset();
 346     __ block_comment(" being_initialized check");
 347 
 348     // Static field accesses have special semantics while the class
 349     // initializer is being run so we emit a test which can be used to
 350     // check that this code is being executed by the initializing
 351     // thread.
 352     assert(_obj != noreg, "must be a valid register");
 353     assert(_index >= 0, "must have oop index");
 354     __ mr(R0, _obj); // spill
 355     __ ld(_obj, java_lang_Class::klass_offset_in_bytes(), _obj);
 356     __ ld(_obj, in_bytes(InstanceKlass::init_thread_offset()), _obj);
 357     __ cmpd(CCR0, _obj, R16_thread);
 358     __ mr(_obj, R0); // restore
 359     __ bne(CCR0, call_patch);
 360 
 361     // Load_klass patches may execute the patched code before it's
 362     // copied back into place so we need to jump back into the main
 363     // code of the nmethod to continue execution.
 364     __ b(_patch_site_continuation);
 365 
 366     // Make sure this extra code gets skipped.
 367     bytes_to_skip += __ offset() - offset;
 368   }
 369 
 370   // Now emit the patch record telling the runtime how to find the
 371   // pieces of the patch.  We only need 3 bytes but it has to be
 372   // aligned as an instruction so emit 4 bytes.
 373   int sizeof_patch_record = 4;
 374   bytes_to_skip += sizeof_patch_record;
 375 
 376   // Emit the offsets needed to find the code to patch.
 377   int being_initialized_entry_offset = __ offset() - being_initialized_entry + sizeof_patch_record;
 378 
 379   // Emit the patch record.  We need to emit a full word, so emit an extra empty byte.
 380   __ emit_int8(0);
 381   __ emit_int8(being_initialized_entry_offset);
 382   __ emit_int8(bytes_to_skip);
 383   __ emit_int8(_bytes_to_copy);
 384   address patch_info_pc = __ pc();
 385   assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info");
 386 
 387   address entry = __ pc();
 388   NativeGeneralJump::insert_unconditional((address)_pc_start, entry);
 389   address target = NULL;
 390   relocInfo::relocType reloc_type = relocInfo::none;
 391   switch (_id) {
 392     case access_field_id:  target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
 393     case load_klass_id:    target = Runtime1::entry_for(Runtime1::load_klass_patching_id);
 394                            reloc_type = relocInfo::metadata_type; break;
 395     case load_mirror_id:   target = Runtime1::entry_for(Runtime1::load_mirror_patching_id);
 396                            reloc_type = relocInfo::oop_type; break;
 397     case load_appendix_id: target = Runtime1::entry_for(Runtime1::load_appendix_patching_id);
 398                            reloc_type = relocInfo::oop_type; break;
 399     default: ShouldNotReachHere();
 400   }
 401   __ bind(call_patch);
 402 
 403   __ block_comment("patch entry point");
 404   //__ load_const(R0, target); + mtctr + bctrl must have size -_patch_info_offset
 405   __ load_const32(R0, MacroAssembler::offset_to_global_toc(target));
 406   __ add(R0, R29_TOC, R0);
 407   __ mtctr(R0);
 408   __ bctrl();
 409   assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change");
 410   ce->add_call_info_here(_info);
 411   __ b(_patch_site_entry);
 412   if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) {
 413     CodeSection* cs = __ code_section();
 414     address pc = (address)_pc_start;
 415     RelocIterator iter(cs, pc, pc + 1);
 416     relocInfo::change_reloc_info_for_address(&iter, (address) pc, reloc_type, relocInfo::none);
 417   }
 418 }
 419 
 420 
 421 void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
 422   __ bind(_entry);
 423   address stub = Runtime1::entry_for(Runtime1::deoptimize_id);
 424   //__ load_const_optimized(R0, stub);
 425   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
 426   __ mtctr(R0);
 427 
 428   __ load_const_optimized(R0, _trap_request); // Pass trap request in R0.
 429   __ bctrl();
 430   ce->add_call_info_here(_info);
 431   debug_only(__ illtrap());
 432 }
 433 
 434 
 435 void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
 436   //---------------slow case: call to native-----------------
 437   __ bind(_entry);
 438   __ mr(R3_ARG1, src()->as_register());
 439   __ extsw(R4_ARG2, src_pos()->as_register());
 440   __ mr(R5_ARG3, dst()->as_register());
 441   __ extsw(R6_ARG4, dst_pos()->as_register());
 442   __ extsw(R7_ARG5, length()->as_register());
 443 
 444   ce->emit_static_call_stub();
 445 
 446   bool success = ce->emit_trampoline_stub_for_call(SharedRuntime::get_resolve_static_call_stub());
 447   if (!success) { return; }
 448 
 449   __ relocate(relocInfo::static_call_type);
 450   // Note: At this point we do not have the address of the trampoline
 451   // stub, and the entry point might be too far away for bl, so __ pc()
 452   // serves as dummy and the bl will be patched later.
 453   __ code()->set_insts_mark();
 454   __ bl(__ pc());
 455   ce->add_call_info_here(info());
 456   ce->verify_oop_map(info());
 457 
 458 #ifndef PRODUCT
 459   const address counter = (address)&Runtime1::_arraycopy_slowcase_cnt;
 460   const Register tmp = R3, tmp2 = R4;
 461   int simm16_offs = __ load_const_optimized(tmp, counter, tmp2, true);
 462   __ lwz(tmp2, simm16_offs, tmp);
 463   __ addi(tmp2, tmp2, 1);
 464   __ stw(tmp2, simm16_offs, tmp);
 465 #endif
 466 
 467   __ b(_continuation);
 468 }
 469 
 470 #undef __