1 /*
   2  * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2016 SAP SE. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "c1/c1_CodeStubs.hpp"
  28 #include "c1/c1_FrameMap.hpp"
  29 #include "c1/c1_LIRAssembler.hpp"
  30 #include "c1/c1_MacroAssembler.hpp"
  31 #include "c1/c1_Runtime1.hpp"
  32 #include "nativeInst_s390.hpp"
  33 #include "runtime/sharedRuntime.hpp"
  34 #include "utilities/align.hpp"
  35 #include "utilities/macros.hpp"
  36 #include "vmreg_s390.inline.hpp"
  37 #if INCLUDE_ALL_GCS
  38 #include "gc/g1/g1BarrierSet.hpp"
  39 #endif // INCLUDE_ALL_GCS
  40 
  41 #define __ ce->masm()->
  42 #undef  CHECK_BAILOUT
  43 #define CHECK_BAILOUT() { if (ce->compilation()->bailed_out()) return; }
  44 
  45 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index,
  46                                bool throw_index_out_of_bounds_exception) :
  47   _throw_index_out_of_bounds_exception(throw_index_out_of_bounds_exception),
  48   _index(index) {
  49   assert(info != NULL, "must have info");
  50   _info = new CodeEmitInfo(info);
  51 }
  52 
  53 void RangeCheckStub::emit_code(LIR_Assembler* ce) {
  54   __ bind(_entry);
  55   if (_info->deoptimize_on_exception()) {
  56     address a = Runtime1::entry_for (Runtime1::predicate_failed_trap_id);
  57     ce->emit_call_c(a);
  58     CHECK_BAILOUT();
  59     ce->add_call_info_here(_info);
  60     ce->verify_oop_map(_info);
  61     debug_only(__ should_not_reach_here());
  62     return;
  63   }
  64 
  65   // Pass the array index in Z_R1_scratch which is not managed by linear scan.
  66   if (_index->is_cpu_register()) {
  67     __ lgr_if_needed(Z_R1_scratch, _index->as_register());
  68   } else {
  69     __ load_const_optimized(Z_R1_scratch, _index->as_jint());
  70   }
  71 
  72   Runtime1::StubID stub_id;
  73   if (_throw_index_out_of_bounds_exception) {
  74     stub_id = Runtime1::throw_index_exception_id;
  75   } else {
  76     stub_id = Runtime1::throw_range_check_failed_id;
  77   }
  78   ce->emit_call_c(Runtime1::entry_for (stub_id));
  79   CHECK_BAILOUT();
  80   ce->add_call_info_here(_info);
  81   ce->verify_oop_map(_info);
  82   debug_only(__ should_not_reach_here());
  83 }
  84 
  85 PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
  86   _info = new CodeEmitInfo(info);
  87 }
  88 
  89 void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
  90   __ bind(_entry);
  91   address a = Runtime1::entry_for (Runtime1::predicate_failed_trap_id);
  92   ce->emit_call_c(a);
  93   CHECK_BAILOUT();
  94   ce->add_call_info_here(_info);
  95   ce->verify_oop_map(_info);
  96   debug_only(__ should_not_reach_here());
  97 }
  98 
  99 void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
 100   __ bind(_entry);
 101   Metadata *m = _method->as_constant_ptr()->as_metadata();
 102   bool success = __ set_metadata_constant(m, Z_R1_scratch);
 103   if (!success) {
 104     ce->compilation()->bailout("const section overflow");
 105     return;
 106   }
 107   ce->store_parameter(/*_method->as_register()*/ Z_R1_scratch, 1);
 108   ce->store_parameter(_bci, 0);
 109   ce->emit_call_c(Runtime1::entry_for (Runtime1::counter_overflow_id));
 110   CHECK_BAILOUT();
 111   ce->add_call_info_here(_info);
 112   ce->verify_oop_map(_info);
 113   __ branch_optimized(Assembler::bcondAlways, _continuation);
 114 }
 115 
 116 void DivByZeroStub::emit_code(LIR_Assembler* ce) {
 117   if (_offset != -1) {
 118     ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
 119   }
 120   __ bind(_entry);
 121   ce->emit_call_c(Runtime1::entry_for (Runtime1::throw_div0_exception_id));
 122   CHECK_BAILOUT();
 123   ce->add_call_info_here(_info);
 124   debug_only(__ should_not_reach_here());
 125 }
 126 
 127 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
 128   address a;
 129   if (_info->deoptimize_on_exception()) {
 130     // Deoptimize, do not throw the exception, because it is probably wrong to do it here.
 131     a = Runtime1::entry_for (Runtime1::predicate_failed_trap_id);
 132   } else {
 133     a = Runtime1::entry_for (Runtime1::throw_null_pointer_exception_id);
 134   }
 135 
 136   ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
 137   __ bind(_entry);
 138   ce->emit_call_c(a);
 139   CHECK_BAILOUT();
 140   ce->add_call_info_here(_info);
 141   ce->verify_oop_map(_info);
 142   debug_only(__ should_not_reach_here());
 143 }
 144 
 145 // Note: pass object in Z_R1_scratch
 146 void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
 147   __ bind(_entry);
 148   if (_obj->is_valid()) {
 149     __ z_lgr(Z_R1_scratch, _obj->as_register()); // _obj contains the optional argument to the stub
 150   }
 151   address a = Runtime1::entry_for (_stub);
 152   ce->emit_call_c(a);
 153   CHECK_BAILOUT();
 154   ce->add_call_info_here(_info);
 155   debug_only(__ should_not_reach_here());
 156 }
 157 
 158 NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) {
 159   _result = result;
 160   _klass = klass;
 161   _klass_reg = klass_reg;
 162   _info = new CodeEmitInfo(info);
 163   assert(stub_id == Runtime1::new_instance_id                 ||
 164          stub_id == Runtime1::fast_new_instance_id            ||
 165          stub_id == Runtime1::fast_new_instance_init_check_id,
 166          "need new_instance id");
 167   _stub_id = stub_id;
 168 }
 169 
 170 void NewInstanceStub::emit_code(LIR_Assembler* ce) {
 171   __ bind(_entry);
 172   assert(_klass_reg->as_register() == Z_R11, "call target expects klass in Z_R11");
 173   address a = Runtime1::entry_for (_stub_id);
 174   ce->emit_call_c(a);
 175   CHECK_BAILOUT();
 176   ce->add_call_info_here(_info);
 177   ce->verify_oop_map(_info);
 178   assert(_result->as_register() == Z_R2, "callee returns result in Z_R2,");
 179   __ z_brul(_continuation);
 180 }
 181 
 182 NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
 183   _klass_reg = klass_reg;
 184   _length = length;
 185   _result = result;
 186   _info = new CodeEmitInfo(info);
 187 }
 188 
 189 void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
 190   __ bind(_entry);
 191   assert(_klass_reg->as_register() == Z_R11, "call target expects klass in Z_R11");
 192   __ lgr_if_needed(Z_R13, _length->as_register());
 193   address a = Runtime1::entry_for (Runtime1::new_type_array_id);
 194   ce->emit_call_c(a);
 195   CHECK_BAILOUT();
 196   ce->add_call_info_here(_info);
 197   ce->verify_oop_map(_info);
 198   assert(_result->as_register() == Z_R2, "callee returns result in Z_R2,");
 199   __ z_brul(_continuation);
 200 }
 201 
 202 NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
 203   _klass_reg = klass_reg;
 204   _length = length;
 205   _result = result;
 206   _info = new CodeEmitInfo(info);
 207 }
 208 
 209 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
 210   __ bind(_entry);
 211   assert(_klass_reg->as_register() == Z_R11, "call target expects klass in Z_R11");
 212   __ lgr_if_needed(Z_R13, _length->as_register());
 213   address a = Runtime1::entry_for (Runtime1::new_object_array_id);
 214   ce->emit_call_c(a);
 215   CHECK_BAILOUT();
 216   ce->add_call_info_here(_info);
 217   ce->verify_oop_map(_info);
 218   assert(_result->as_register() == Z_R2, "callee returns result in Z_R2,");
 219   __ z_brul(_continuation);
 220 }
 221 
 222 MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info)
 223   : MonitorAccessStub(obj_reg, lock_reg) {
 224   _info = new CodeEmitInfo(info);
 225 }
 226 
 227 void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
 228   __ bind(_entry);
 229   Runtime1::StubID enter_id;
 230   if (ce->compilation()->has_fpu_code()) {
 231     enter_id = Runtime1::monitorenter_id;
 232   } else {
 233     enter_id = Runtime1::monitorenter_nofpu_id;
 234   }
 235   __ lgr_if_needed(Z_R1_scratch, _obj_reg->as_register());
 236   __ lgr_if_needed(Z_R13, _lock_reg->as_register()); // See LIRGenerator::syncTempOpr().
 237   ce->emit_call_c(Runtime1::entry_for (enter_id));
 238   CHECK_BAILOUT();
 239   ce->add_call_info_here(_info);
 240   ce->verify_oop_map(_info);
 241   __ branch_optimized(Assembler::bcondAlways, _continuation);
 242 }
 243 
 244 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
 245   __ bind(_entry);
 246   // Move address of the BasicObjectLock into Z_R1_scratch.
 247   if (_compute_lock) {
 248     // Lock_reg was destroyed by fast unlocking attempt => recompute it.
 249     ce->monitor_address(_monitor_ix, FrameMap::as_opr(Z_R1_scratch));
 250   } else {
 251     __ lgr_if_needed(Z_R1_scratch, _lock_reg->as_register());
 252   }
 253   // Note: non-blocking leaf routine => no call info needed.
 254   Runtime1::StubID exit_id;
 255   if (ce->compilation()->has_fpu_code()) {
 256     exit_id = Runtime1::monitorexit_id;
 257   } else {
 258     exit_id = Runtime1::monitorexit_nofpu_id;
 259   }
 260   ce->emit_call_c(Runtime1::entry_for (exit_id));
 261   CHECK_BAILOUT();
 262   __ branch_optimized(Assembler::bcondAlways, _continuation);
 263 }
 264 
 265 // Implementation of patching:
 266 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes).
 267 // - Replace original code with a call to the stub.
 268 // At Runtime:
 269 // - call to stub, jump to runtime.
 270 // - in runtime: Preserve all registers (especially objects, i.e., source and destination object).
 271 // - in runtime: After initializing class, restore original code, reexecute instruction.
 272 
 273 int PatchingStub::_patch_info_offset = - (12 /* load const */ + 2 /*BASR*/);
 274 
 275 void PatchingStub::align_patch_site(MacroAssembler* masm) {
 276 #ifndef PRODUCT
 277   const char* bc;
 278   switch (_id) {
 279   case access_field_id: bc = "patch site (access_field)"; break;
 280   case load_klass_id: bc = "patch site (load_klass)"; break;
 281   case load_mirror_id: bc = "patch site (load_mirror)"; break;
 282   case load_appendix_id: bc = "patch site (load_appendix)"; break;
 283   default: bc = "patch site (unknown patch id)"; break;
 284   }
 285   masm->block_comment(bc);
 286 #endif
 287 
 288   masm->align(align_up((int)NativeGeneralJump::instruction_size, wordSize));
 289 }
 290 
 291 void PatchingStub::emit_code(LIR_Assembler* ce) {
 292   // Copy original code here.
 293   assert(NativeGeneralJump::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF,
 294          "not enough room for call");
 295 
 296   NearLabel call_patch;
 297 
 298   int being_initialized_entry = __ offset();
 299 
 300   if (_id == load_klass_id) {
 301     // Produce a copy of the load klass instruction for use by the case being initialized.
 302 #ifdef ASSERT
 303     address start = __ pc();
 304 #endif
 305     AddressLiteral addrlit((intptr_t)0, metadata_Relocation::spec(_index));
 306     __ load_const(_obj, addrlit);
 307 
 308 #ifdef ASSERT
 309     for (int i = 0; i < _bytes_to_copy; i++) {
 310       address ptr = (address)(_pc_start + i);
 311       int a_byte = (*ptr) & 0xFF;
 312       assert(a_byte == *start++, "should be the same code");
 313     }
 314 #endif
 315   } else if (_id == load_mirror_id || _id == load_appendix_id) {
 316     // Produce a copy of the load mirror instruction for use by the case being initialized.
 317 #ifdef ASSERT
 318     address start = __ pc();
 319 #endif
 320     AddressLiteral addrlit((intptr_t)0, oop_Relocation::spec(_index));
 321     __ load_const(_obj, addrlit);
 322 
 323 #ifdef ASSERT
 324     for (int i = 0; i < _bytes_to_copy; i++) {
 325       address ptr = (address)(_pc_start + i);
 326       int a_byte = (*ptr) & 0xFF;
 327       assert(a_byte == *start++, "should be the same code");
 328     }
 329 #endif
 330   } else {
 331     // Make a copy the code which is going to be patched.
 332     for (int i = 0; i < _bytes_to_copy; i++) {
 333       address ptr = (address)(_pc_start + i);
 334       int a_byte = (*ptr) & 0xFF;
 335       __ emit_int8 (a_byte);
 336     }
 337   }
 338 
 339   address end_of_patch = __ pc();
 340   int bytes_to_skip = 0;
 341   if (_id == load_mirror_id) {
 342     int offset = __ offset();
 343     if (CommentedAssembly) {
 344       __ block_comment(" being_initialized check");
 345     }
 346 
 347     // Static field accesses have special semantics while the class
 348     // initializer is being run, so we emit a test which can be used to
 349     // check that this code is being executed by the initializing
 350     // thread.
 351     assert(_obj != noreg, "must be a valid register");
 352     assert(_index >= 0, "must have oop index");
 353     __ z_lg(Z_R1_scratch, java_lang_Class::klass_offset_in_bytes(), _obj);
 354     __ z_cg(Z_thread, Address(Z_R1_scratch, InstanceKlass::init_thread_offset()));
 355     __ branch_optimized(Assembler::bcondNotEqual, call_patch);
 356 
 357     // Load_klass patches may execute the patched code before it's
 358     // copied back into place so we need to jump back into the main
 359     // code of the nmethod to continue execution.
 360     __ branch_optimized(Assembler::bcondAlways, _patch_site_continuation);
 361 
 362     // Make sure this extra code gets skipped.
 363     bytes_to_skip += __ offset() - offset;
 364   }
 365 
 366   // Now emit the patch record telling the runtime how to find the
 367   // pieces of the patch. We only need 3 bytes but to help the disassembler
 368   // we make the data look like a the following add instruction:
 369   //   A R1, D2(X2, B2)
 370   // which requires 4 bytes.
 371   int sizeof_patch_record = 4;
 372   bytes_to_skip += sizeof_patch_record;
 373 
 374   // Emit the offsets needed to find the code to patch.
 375   int being_initialized_entry_offset = __ offset() - being_initialized_entry + sizeof_patch_record;
 376 
 377   // Emit the patch record: opcode of the add followed by 3 bytes patch record data.
 378   __ emit_int8((int8_t)(A_ZOPC>>24));
 379   __ emit_int8(being_initialized_entry_offset);
 380   __ emit_int8(bytes_to_skip);
 381   __ emit_int8(_bytes_to_copy);
 382   address patch_info_pc = __ pc();
 383   assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info");
 384 
 385   address entry = __ pc();
 386   NativeGeneralJump::insert_unconditional((address)_pc_start, entry);
 387   address target = NULL;
 388   relocInfo::relocType reloc_type = relocInfo::none;
 389   switch (_id) {
 390     case access_field_id:  target = Runtime1::entry_for (Runtime1::access_field_patching_id); break;
 391     case load_klass_id:    target = Runtime1::entry_for (Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;
 392     case load_mirror_id:   target = Runtime1::entry_for (Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;
 393     case load_appendix_id: target = Runtime1::entry_for (Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break;
 394     default: ShouldNotReachHere();
 395   }
 396   __ bind(call_patch);
 397 
 398   if (CommentedAssembly) {
 399     __ block_comment("patch entry point");
 400   }
 401   // Cannot use call_c_opt() because its size is not constant.
 402   __ load_const(Z_R1_scratch, target); // Must not optimize in order to keep constant _patch_info_offset constant.
 403   __ z_basr(Z_R14, Z_R1_scratch);
 404   assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change");
 405   ce->add_call_info_here(_info);
 406   __ z_brcl(Assembler::bcondAlways, _patch_site_entry);
 407   if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) {
 408     CodeSection* cs = __ code_section();
 409     address pc = (address)_pc_start;
 410     RelocIterator iter(cs, pc, pc + 1);
 411     relocInfo::change_reloc_info_for_address(&iter, (address) pc, reloc_type, relocInfo::none);
 412   }
 413 }
 414 
 415 void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
 416   __ bind(_entry);
 417   __ load_const_optimized(Z_R1_scratch, _trap_request); // Pass trap request in Z_R1_scratch.
 418   ce->emit_call_c(Runtime1::entry_for (Runtime1::deoptimize_id));
 419   CHECK_BAILOUT();
 420   ce->add_call_info_here(_info);
 421   DEBUG_ONLY(__ should_not_reach_here());
 422 }
 423 
 424 void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
 425   // Slow case: call to native.
 426   __ bind(_entry);
 427   __ lgr_if_needed(Z_ARG1, src()->as_register());
 428   __ lgr_if_needed(Z_ARG2, src_pos()->as_register());
 429   __ lgr_if_needed(Z_ARG3, dst()->as_register());
 430   __ lgr_if_needed(Z_ARG4, dst_pos()->as_register());
 431   __ lgr_if_needed(Z_ARG5, length()->as_register());
 432 
 433   // Must align calls sites, otherwise they can't be updated atomically on MP hardware.
 434   ce->align_call(lir_static_call);
 435 
 436   assert((__ offset() + NativeCall::call_far_pcrelative_displacement_offset) % NativeCall::call_far_pcrelative_displacement_alignment == 0,
 437          "must be aligned");
 438 
 439   ce->emit_static_call_stub();
 440 
 441   // Prepend each BRASL with a nop.
 442   __ relocate(relocInfo::static_call_type);
 443   __ z_nop();
 444   __ z_brasl(Z_R14, SharedRuntime::get_resolve_static_call_stub());
 445   ce->add_call_info_here(info());
 446   ce->verify_oop_map(info());
 447 
 448 #ifndef PRODUCT
 449   __ load_const_optimized(Z_R1_scratch, (address)&Runtime1::_arraycopy_slowcase_cnt);
 450   __ add2mem_32(Address(Z_R1_scratch), 1, Z_R0_scratch);
 451 #endif
 452 
 453   __ branch_optimized(Assembler::bcondAlways, _continuation);
 454 }
 455 
 456 
 457 ///////////////////////////////////////////////////////////////////////////////////
 458 #if INCLUDE_ALL_GCS
 459 
 460 void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
 461   // At this point we know that marking is in progress.
 462   // If do_load() is true then we have to emit the
 463   // load of the previous value; otherwise it has already
 464   // been loaded into _pre_val.
 465   __ bind(_entry);
 466   ce->check_reserved_argument_area(16); // RT stub needs 2 spill slots.
 467   assert(pre_val()->is_register(), "Precondition.");
 468 
 469   Register pre_val_reg = pre_val()->as_register();
 470 
 471   if (do_load()) {
 472     ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/);
 473   }
 474 
 475   __ z_ltgr(Z_R1_scratch, pre_val_reg); // Pass oop in Z_R1_scratch to Runtime1::g1_pre_barrier_slow_id.
 476   __ branch_optimized(Assembler::bcondZero, _continuation);
 477   ce->emit_call_c(Runtime1::entry_for (Runtime1::g1_pre_barrier_slow_id));
 478   CHECK_BAILOUT();
 479   __ branch_optimized(Assembler::bcondAlways, _continuation);
 480 }
 481 
 482 void G1PostBarrierStub::emit_code(LIR_Assembler* ce) {
 483   __ bind(_entry);
 484   ce->check_reserved_argument_area(16); // RT stub needs 2 spill slots.
 485   assert(addr()->is_register(), "Precondition.");
 486   assert(new_val()->is_register(), "Precondition.");
 487   Register new_val_reg = new_val()->as_register();
 488   __ z_ltgr(new_val_reg, new_val_reg);
 489   __ branch_optimized(Assembler::bcondZero, _continuation);
 490   __ z_lgr(Z_R1_scratch, addr()->as_pointer_register());
 491   ce->emit_call_c(Runtime1::entry_for (Runtime1::g1_post_barrier_slow_id));
 492   CHECK_BAILOUT();
 493   __ branch_optimized(Assembler::bcondAlways, _continuation);
 494 }
 495 
 496 #endif // INCLUDE_ALL_GCS
 497 
 498 #undef __