1 /*
   2  * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2012, 2018 SAP SE. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "c1/c1_CodeStubs.hpp"
  29 #include "c1/c1_FrameMap.hpp"
  30 #include "c1/c1_LIRAssembler.hpp"
  31 #include "c1/c1_MacroAssembler.hpp"
  32 #include "c1/c1_Runtime1.hpp"
  33 #include "nativeInst_ppc.hpp"
  34 #include "runtime/sharedRuntime.hpp"
  35 #include "utilities/macros.hpp"
  36 #include "vmreg_ppc.inline.hpp"
  37 
  38 #define __ ce->masm()->
  39 
  40 
  41 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, LIR_Opr array)
  42   : _index(index), _array(array), _throw_index_out_of_bounds_exception(false) {
  43   assert(info != NULL, "must have info");
  44   _info = new CodeEmitInfo(info);
  45 }
  46 
  47 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index)
  48   : _index(index), _array(NULL), _throw_index_out_of_bounds_exception(true) {
  49   assert(info != NULL, "must have info");
  50   _info = new CodeEmitInfo(info);
  51 }
  52 
  53 void RangeCheckStub::emit_code(LIR_Assembler* ce) {
  54   __ bind(_entry);
  55 
  56   if (_info->deoptimize_on_exception()) {
  57     address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
  58     // May be used by optimizations like LoopInvariantCodeMotion or RangeCheckEliminator.
  59     DEBUG_ONLY( __ untested("RangeCheckStub: predicate_failed_trap_id"); )
  60     //__ load_const_optimized(R0, a);
  61     __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(a));
  62     __ mtctr(R0);
  63     __ bctrl();
  64     ce->add_call_info_here(_info);
  65     ce->verify_oop_map(_info);
  66     debug_only(__ illtrap());
  67     return;
  68   }
  69 
  70   address stub = _throw_index_out_of_bounds_exception ? Runtime1::entry_for(Runtime1::throw_index_exception_id)
  71                                                       : Runtime1::entry_for(Runtime1::throw_range_check_failed_id);
  72   //__ load_const_optimized(R0, stub);
  73   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
  74   __ mtctr(R0);
  75 
  76   Register index = R0;
  77   if (_index->is_register()) {
  78     __ extsw(index, _index->as_register());
  79   } else {
  80     __ load_const_optimized(index, _index->as_jint());
  81   }
  82   if (_array) {
  83     __ std(_array->as_pointer_register(), -8, R1_SP);
  84   }
  85   __ std(index, -16, R1_SP);
  86 
  87   __ bctrl();
  88   ce->add_call_info_here(_info);
  89   ce->verify_oop_map(_info);
  90   debug_only(__ illtrap());
  91 }
  92 
  93 
  94 PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
  95   _info = new CodeEmitInfo(info);
  96 }
  97 
  98 void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
  99   __ bind(_entry);
 100   address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
 101   //__ load_const_optimized(R0, a);
 102   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(a));
 103   __ mtctr(R0);
 104   __ bctrl();
 105   ce->add_call_info_here(_info);
 106   ce->verify_oop_map(_info);
 107   debug_only(__ illtrap());
 108 }
 109 
 110 
 111 void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
 112   __ bind(_entry);
 113 
 114   // Parameter 1: bci
 115   __ load_const_optimized(R0, _bci);
 116   __ std(R0, -16, R1_SP);
 117 
 118   // Parameter 2: Method*
 119   Metadata *m = _method->as_constant_ptr()->as_metadata();
 120   AddressLiteral md = __ constant_metadata_address(m); // Notify OOP recorder (don't need the relocation).
 121   __ load_const_optimized(R0, md.value());
 122   __ std(R0, -8, R1_SP);
 123 
 124   address a = Runtime1::entry_for(Runtime1::counter_overflow_id);
 125   //__ load_const_optimized(R0, a);
 126   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(a));
 127   __ mtctr(R0);
 128   __ bctrl();
 129   ce->add_call_info_here(_info);
 130   ce->verify_oop_map(_info);
 131 
 132   __ b(_continuation);
 133 }
 134 
 135 
 136 void DivByZeroStub::emit_code(LIR_Assembler* ce) {
 137   if (_offset != -1) {
 138     ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
 139   }
 140   __ bind(_entry);
 141   address stub = Runtime1::entry_for(Runtime1::throw_div0_exception_id);
 142   //__ load_const_optimized(R0, stub);
 143   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
 144   __ mtctr(R0);
 145   __ bctrl();
 146   ce->add_call_info_here(_info);
 147   ce->verify_oop_map(_info);
 148   debug_only(__ illtrap());
 149 }
 150 
 151 
 152 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
 153   address a;
 154   if (_info->deoptimize_on_exception()) {
 155     // Deoptimize, do not throw the exception, because it is probably wrong to do it here.
 156     a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
 157   } else {
 158     a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id);
 159   }
 160 
 161   if (ImplicitNullChecks || TrapBasedNullChecks) {
 162     ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
 163   }
 164   __ bind(_entry);
 165   //__ load_const_optimized(R0, a);
 166   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(a));
 167   __ mtctr(R0);
 168   __ bctrl();
 169   ce->add_call_info_here(_info);
 170   ce->verify_oop_map(_info);
 171   debug_only(__ illtrap());
 172 }
 173 
 174 
 175 // Implementation of SimpleExceptionStub
 176 void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
 177   __ bind(_entry);
 178   address stub = Runtime1::entry_for(_stub);
 179   //__ load_const_optimized(R0, stub);
 180   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
 181   if (_obj->is_valid()) { __ mr_if_needed(/*tmp1 in do_CheckCast*/ R4_ARG2, _obj->as_register()); }
 182   __ mtctr(R0);
 183   __ bctrl();
 184   ce->add_call_info_here(_info);
 185   debug_only( __ illtrap(); )
 186 }
 187 
 188 
 189 // Implementation of NewInstanceStub
 190 NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) {
 191   _result = result;
 192   _klass = klass;
 193   _klass_reg = klass_reg;
 194   _info = new CodeEmitInfo(info);
 195   assert(stub_id == Runtime1::new_instance_id                 ||
 196          stub_id == Runtime1::fast_new_instance_id            ||
 197          stub_id == Runtime1::fast_new_instance_init_check_id,
 198          "need new_instance id");
 199   _stub_id = stub_id;
 200 }
 201 
 202 void NewInstanceStub::emit_code(LIR_Assembler* ce) {
 203   __ bind(_entry);
 204 
 205   address entry = Runtime1::entry_for(_stub_id);
 206   //__ load_const_optimized(R0, entry);
 207   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(entry));
 208   __ mtctr(R0);
 209   __ bctrl();
 210   ce->add_call_info_here(_info);
 211   ce->verify_oop_map(_info);
 212   __ b(_continuation);
 213 }
 214 
 215 
 216 // Implementation of NewTypeArrayStub
 217 NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
 218   _klass_reg = klass_reg;
 219   _length = length;
 220   _result = result;
 221   _info = new CodeEmitInfo(info);
 222 }
 223 
 224 void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
 225   __ bind(_entry);
 226 
 227   address entry = Runtime1::entry_for(Runtime1::new_type_array_id);
 228   //__ load_const_optimized(R0, entry);
 229   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(entry));
 230   __ mr_if_needed(/*op->tmp1()->as_register()*/ R5_ARG3, _length->as_register()); // already sign-extended
 231   __ mtctr(R0);
 232   __ bctrl();
 233   ce->add_call_info_here(_info);
 234   ce->verify_oop_map(_info);
 235   __ b(_continuation);
 236 }
 237 
 238 
 239 // Implementation of NewObjectArrayStub
 240 NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
 241   _klass_reg = klass_reg;
 242   _length = length;
 243   _result = result;
 244   _info = new CodeEmitInfo(info);
 245 }
 246 
 247 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
 248   __ bind(_entry);
 249 
 250   address entry = Runtime1::entry_for(Runtime1::new_object_array_id);
 251   //__ load_const_optimized(R0, entry);
 252   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(entry));
 253   __ mr_if_needed(/*op->tmp1()->as_register()*/ R5_ARG3, _length->as_register()); // already sign-extended
 254   __ mtctr(R0);
 255   __ bctrl();
 256   ce->add_call_info_here(_info);
 257   ce->verify_oop_map(_info);
 258   __ b(_continuation);
 259 }
 260 
 261 
 262 // Implementation of MonitorAccessStubs
 263 MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info)
 264   : MonitorAccessStub(obj_reg, lock_reg) {
 265   _info = new CodeEmitInfo(info);
 266 }
 267 
 268 void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
 269   __ bind(_entry);
 270   address stub = Runtime1::entry_for(ce->compilation()->has_fpu_code() ? Runtime1::monitorenter_id : Runtime1::monitorenter_nofpu_id);
 271   //__ load_const_optimized(R0, stub);
 272   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
 273   __ mr_if_needed(/*scratch_opr()->as_register()*/ R4_ARG2, _obj_reg->as_register());
 274   assert(_lock_reg->as_register() == R5_ARG3, "");
 275   __ mtctr(R0);
 276   __ bctrl();
 277   ce->add_call_info_here(_info);
 278   ce->verify_oop_map(_info);
 279   __ b(_continuation);
 280 }
 281 
 282 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
 283   __ bind(_entry);
 284   if (_compute_lock) {
 285     ce->monitor_address(_monitor_ix, _lock_reg);
 286   }
 287   address stub = Runtime1::entry_for(ce->compilation()->has_fpu_code() ? Runtime1::monitorexit_id : Runtime1::monitorexit_nofpu_id);
 288   //__ load_const_optimized(R0, stub);
 289   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
 290   assert(_lock_reg->as_register() == R4_ARG2, "");
 291   __ mtctr(R0);
 292   __ bctrl();
 293   __ b(_continuation);
 294 }
 295 
 296 
 297 // Implementation of patching:
 298 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes).
 299 // - Replace original code with a call to the stub.
 300 // At Runtime:
 301 // - call to stub, jump to runtime
 302 // - in runtime: preserve all registers (especially objects, i.e., source and destination object)
 303 // - in runtime: after initializing class, restore original code, reexecute instruction
 304 
 305 int PatchingStub::_patch_info_offset = -(5 * BytesPerInstWord);
 306 
 307 void PatchingStub::align_patch_site(MacroAssembler* ) {
 308   // Patch sites on ppc are always properly aligned.
 309 }
 310 
 311 #ifdef ASSERT
 312 inline void compare_with_patch_site(address template_start, address pc_start, int bytes_to_copy) {
 313   address start = template_start;
 314   for (int i = 0; i < bytes_to_copy; i++) {
 315     address ptr = (address)(pc_start + i);
 316     int a_byte = (*ptr) & 0xFF;
 317     assert(a_byte == *start++, "should be the same code");
 318   }
 319 }
 320 #endif
 321 
 322 void PatchingStub::emit_code(LIR_Assembler* ce) {
 323   // copy original code here
 324   assert(NativeGeneralJump::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF,
 325          "not enough room for call, need %d", _bytes_to_copy);
 326   assert((_bytes_to_copy & 0x3) == 0, "must copy a multiple of four bytes");
 327 
 328   Label call_patch;
 329 
 330   int being_initialized_entry = __ offset();
 331 
 332   if (_id == load_klass_id) {
 333     // Produce a copy of the load klass instruction for use by the being initialized case.
 334     AddressLiteral addrlit((address)NULL, metadata_Relocation::spec(_index));
 335     __ load_const(_obj, addrlit, R0);
 336     DEBUG_ONLY( compare_with_patch_site(__ code_section()->start() + being_initialized_entry, _pc_start, _bytes_to_copy); )
 337   } else if (_id == load_mirror_id || _id == load_appendix_id) {
 338     // Produce a copy of the load mirror instruction for use by the being initialized case.
 339     AddressLiteral addrlit((address)NULL, oop_Relocation::spec(_index));
 340     __ load_const(_obj, addrlit, R0);
 341     DEBUG_ONLY( compare_with_patch_site(__ code_section()->start() + being_initialized_entry, _pc_start, _bytes_to_copy); )
 342   } else {
 343     // Make a copy of the code which is going to be patched.
 344     for (int i = 0; i < _bytes_to_copy; i++) {
 345       address ptr = (address)(_pc_start + i);
 346       int a_byte = (*ptr) & 0xFF;
 347       __ emit_int8 (a_byte);
 348     }
 349   }
 350 
 351   address end_of_patch = __ pc();
 352   int bytes_to_skip = 0;
 353   if (_id == load_mirror_id) {
 354     int offset = __ offset();
 355     __ block_comment(" being_initialized check");
 356 
 357     // Static field accesses have special semantics while the class
 358     // initializer is being run so we emit a test which can be used to
 359     // check that this code is being executed by the initializing
 360     // thread.
 361     assert(_obj != noreg, "must be a valid register");
 362     assert(_index >= 0, "must have oop index");
 363     __ mr(R0, _obj); // spill
 364     __ ld(_obj, java_lang_Class::klass_offset_in_bytes(), _obj);
 365     __ ld(_obj, in_bytes(InstanceKlass::init_thread_offset()), _obj);
 366     __ cmpd(CCR0, _obj, R16_thread);
 367     __ mr(_obj, R0); // restore
 368     __ bne(CCR0, call_patch);
 369 
 370     // Load_klass patches may execute the patched code before it's
 371     // copied back into place so we need to jump back into the main
 372     // code of the nmethod to continue execution.
 373     __ b(_patch_site_continuation);
 374 
 375     // Make sure this extra code gets skipped.
 376     bytes_to_skip += __ offset() - offset;
 377   }
 378 
 379   // Now emit the patch record telling the runtime how to find the
 380   // pieces of the patch.  We only need 3 bytes but it has to be
 381   // aligned as an instruction so emit 4 bytes.
 382   int sizeof_patch_record = 4;
 383   bytes_to_skip += sizeof_patch_record;
 384 
 385   // Emit the offsets needed to find the code to patch.
 386   int being_initialized_entry_offset = __ offset() - being_initialized_entry + sizeof_patch_record;
 387 
 388   // Emit the patch record.  We need to emit a full word, so emit an extra empty byte.
 389   __ emit_int8(0);
 390   __ emit_int8(being_initialized_entry_offset);
 391   __ emit_int8(bytes_to_skip);
 392   __ emit_int8(_bytes_to_copy);
 393   address patch_info_pc = __ pc();
 394   assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info");
 395 
 396   address entry = __ pc();
 397   NativeGeneralJump::insert_unconditional((address)_pc_start, entry);
 398   address target = NULL;
 399   relocInfo::relocType reloc_type = relocInfo::none;
 400   switch (_id) {
 401     case access_field_id:  target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
 402     case load_klass_id:    target = Runtime1::entry_for(Runtime1::load_klass_patching_id);
 403                            reloc_type = relocInfo::metadata_type; break;
 404     case load_mirror_id:   target = Runtime1::entry_for(Runtime1::load_mirror_patching_id);
 405                            reloc_type = relocInfo::oop_type; break;
 406     case load_appendix_id: target = Runtime1::entry_for(Runtime1::load_appendix_patching_id);
 407                            reloc_type = relocInfo::oop_type; break;
 408     default: ShouldNotReachHere();
 409   }
 410   __ bind(call_patch);
 411 
 412   __ block_comment("patch entry point");
 413   //__ load_const(R0, target); + mtctr + bctrl must have size -_patch_info_offset
 414   __ load_const32(R0, MacroAssembler::offset_to_global_toc(target));
 415   __ add(R0, R29_TOC, R0);
 416   __ mtctr(R0);
 417   __ bctrl();
 418   assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change");
 419   ce->add_call_info_here(_info);
 420   __ b(_patch_site_entry);
 421   if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) {
 422     CodeSection* cs = __ code_section();
 423     address pc = (address)_pc_start;
 424     RelocIterator iter(cs, pc, pc + 1);
 425     relocInfo::change_reloc_info_for_address(&iter, (address) pc, reloc_type, relocInfo::none);
 426   }
 427 }
 428 
 429 
 430 void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
 431   __ bind(_entry);
 432   address stub = Runtime1::entry_for(Runtime1::deoptimize_id);
 433   //__ load_const_optimized(R0, stub);
 434   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
 435   __ mtctr(R0);
 436 
 437   __ load_const_optimized(R0, _trap_request); // Pass trap request in R0.
 438   __ bctrl();
 439   ce->add_call_info_here(_info);
 440   debug_only(__ illtrap());
 441 }
 442 
 443 
 444 void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
 445   //---------------slow case: call to native-----------------
 446   __ bind(_entry);
 447   __ mr(R3_ARG1, src()->as_register());
 448   __ extsw(R4_ARG2, src_pos()->as_register());
 449   __ mr(R5_ARG3, dst()->as_register());
 450   __ extsw(R6_ARG4, dst_pos()->as_register());
 451   __ extsw(R7_ARG5, length()->as_register());
 452 
 453   ce->emit_static_call_stub();
 454 
 455   bool success = ce->emit_trampoline_stub_for_call(SharedRuntime::get_resolve_static_call_stub());
 456   if (!success) { return; }
 457 
 458   __ relocate(relocInfo::static_call_type);
 459   // Note: At this point we do not have the address of the trampoline
 460   // stub, and the entry point might be too far away for bl, so __ pc()
 461   // serves as dummy and the bl will be patched later.
 462   __ code()->set_insts_mark();
 463   __ bl(__ pc());
 464   ce->add_call_info_here(info());
 465   ce->verify_oop_map(info());
 466 
 467 #ifndef PRODUCT
 468   const address counter = (address)&Runtime1::_arraycopy_slowcase_cnt;
 469   const Register tmp = R3, tmp2 = R4;
 470   int simm16_offs = __ load_const_optimized(tmp, counter, tmp2, true);
 471   __ lwz(tmp2, simm16_offs, tmp);
 472   __ addi(tmp2, tmp2, 1);
 473   __ stw(tmp2, simm16_offs, tmp);
 474 #endif
 475 
 476   __ b(_continuation);
 477 }
 478 
 479 #undef __