1 /*
   2  * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2016, 2018 SAP SE. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "c1/c1_CodeStubs.hpp"
  28 #include "c1/c1_FrameMap.hpp"
  29 #include "c1/c1_LIRAssembler.hpp"
  30 #include "c1/c1_MacroAssembler.hpp"
  31 #include "c1/c1_Runtime1.hpp"
  32 #include "nativeInst_s390.hpp"
  33 #include "runtime/sharedRuntime.hpp"
  34 #include "utilities/align.hpp"
  35 #include "utilities/macros.hpp"
  36 #include "vmreg_s390.inline.hpp"
  37 
  38 #define __ ce->masm()->
  39 #undef  CHECK_BAILOUT
  40 #define CHECK_BAILOUT() { if (ce->compilation()->bailed_out()) return; }
  41 
  42 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, LIR_Opr array)
  43   : _throw_index_out_of_bounds_exception(false), _index(index), _array(array) {
  44   assert(info != NULL, "must have info");
  45   _info = new CodeEmitInfo(info);
  46 }
  47 
  48 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index)
  49   : _throw_index_out_of_bounds_exception(true), _index(index), _array(NULL) {
  50   assert(info != NULL, "must have info");
  51   _info = new CodeEmitInfo(info);
  52 }
  53 
  54 void RangeCheckStub::emit_code(LIR_Assembler* ce) {
  55   __ bind(_entry);
  56   if (_info->deoptimize_on_exception()) {
  57     address a = Runtime1::entry_for (Runtime1::predicate_failed_trap_id);
  58     ce->emit_call_c(a);
  59     CHECK_BAILOUT();
  60     ce->add_call_info_here(_info);
  61     ce->verify_oop_map(_info);
  62     debug_only(__ should_not_reach_here());
  63     return;
  64   }
  65 
  66   // Pass the array index in Z_R1_scratch which is not managed by linear scan.
  67   if (_index->is_cpu_register()) {
  68     __ lgr_if_needed(Z_R1_scratch, _index->as_register());
  69   } else {
  70     __ load_const_optimized(Z_R1_scratch, _index->as_jint());
  71   }
  72 
  73   Runtime1::StubID stub_id;
  74   if (_throw_index_out_of_bounds_exception) {
  75     stub_id = Runtime1::throw_index_exception_id;
  76   } else {
  77     stub_id = Runtime1::throw_range_check_failed_id;
  78     __ lgr_if_needed(Z_R0_scratch, _array->as_pointer_register());
  79   }
  80   ce->emit_call_c(Runtime1::entry_for (stub_id));
  81   CHECK_BAILOUT();
  82   ce->add_call_info_here(_info);
  83   ce->verify_oop_map(_info);
  84   debug_only(__ should_not_reach_here());
  85 }
  86 
  87 PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
  88   _info = new CodeEmitInfo(info);
  89 }
  90 
  91 void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
  92   __ bind(_entry);
  93   address a = Runtime1::entry_for (Runtime1::predicate_failed_trap_id);
  94   ce->emit_call_c(a);
  95   CHECK_BAILOUT();
  96   ce->add_call_info_here(_info);
  97   ce->verify_oop_map(_info);
  98   debug_only(__ should_not_reach_here());
  99 }
 100 
 101 void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
 102   __ bind(_entry);
 103   Metadata *m = _method->as_constant_ptr()->as_metadata();
 104   bool success = __ set_metadata_constant(m, Z_R1_scratch);
 105   if (!success) {
 106     ce->compilation()->bailout("const section overflow");
 107     return;
 108   }
 109   ce->store_parameter(/*_method->as_register()*/ Z_R1_scratch, 1);
 110   ce->store_parameter(_bci, 0);
 111   ce->emit_call_c(Runtime1::entry_for (Runtime1::counter_overflow_id));
 112   CHECK_BAILOUT();
 113   ce->add_call_info_here(_info);
 114   ce->verify_oop_map(_info);
 115   __ branch_optimized(Assembler::bcondAlways, _continuation);
 116 }
 117 
 118 void DivByZeroStub::emit_code(LIR_Assembler* ce) {
 119   if (_offset != -1) {
 120     ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
 121   }
 122   __ bind(_entry);
 123   ce->emit_call_c(Runtime1::entry_for (Runtime1::throw_div0_exception_id));
 124   CHECK_BAILOUT();
 125   ce->add_call_info_here(_info);
 126   debug_only(__ should_not_reach_here());
 127 }
 128 
 129 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
 130   address a;
 131   if (_info->deoptimize_on_exception()) {
 132     // Deoptimize, do not throw the exception, because it is probably wrong to do it here.
 133     a = Runtime1::entry_for (Runtime1::predicate_failed_trap_id);
 134   } else {
 135     a = Runtime1::entry_for (Runtime1::throw_null_pointer_exception_id);
 136   }
 137 
 138   ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
 139   __ bind(_entry);
 140   ce->emit_call_c(a);
 141   CHECK_BAILOUT();
 142   ce->add_call_info_here(_info);
 143   ce->verify_oop_map(_info);
 144   debug_only(__ should_not_reach_here());
 145 }
 146 
 147 // Note: pass object in Z_R1_scratch
 148 void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
 149   __ bind(_entry);
 150   if (_obj->is_valid()) {
 151     __ z_lgr(Z_R1_scratch, _obj->as_register()); // _obj contains the optional argument to the stub
 152   }
 153   address a = Runtime1::entry_for (_stub);
 154   ce->emit_call_c(a);
 155   CHECK_BAILOUT();
 156   ce->add_call_info_here(_info);
 157   debug_only(__ should_not_reach_here());
 158 }
 159 
 160 NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) {
 161   _result = result;
 162   _klass = klass;
 163   _klass_reg = klass_reg;
 164   _info = new CodeEmitInfo(info);
 165   assert(stub_id == Runtime1::new_instance_id                 ||
 166          stub_id == Runtime1::fast_new_instance_id            ||
 167          stub_id == Runtime1::fast_new_instance_init_check_id,
 168          "need new_instance id");
 169   _stub_id = stub_id;
 170 }
 171 
 172 void NewInstanceStub::emit_code(LIR_Assembler* ce) {
 173   __ bind(_entry);
 174   assert(_klass_reg->as_register() == Z_R11, "call target expects klass in Z_R11");
 175   address a = Runtime1::entry_for (_stub_id);
 176   ce->emit_call_c(a);
 177   CHECK_BAILOUT();
 178   ce->add_call_info_here(_info);
 179   ce->verify_oop_map(_info);
 180   assert(_result->as_register() == Z_R2, "callee returns result in Z_R2,");
 181   __ z_brul(_continuation);
 182 }
 183 
 184 NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
 185   _klass_reg = klass_reg;
 186   _length = length;
 187   _result = result;
 188   _info = new CodeEmitInfo(info);
 189 }
 190 
 191 void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
 192   __ bind(_entry);
 193   assert(_klass_reg->as_register() == Z_R11, "call target expects klass in Z_R11");
 194   __ lgr_if_needed(Z_R13, _length->as_register());
 195   address a = Runtime1::entry_for (Runtime1::new_type_array_id);
 196   ce->emit_call_c(a);
 197   CHECK_BAILOUT();
 198   ce->add_call_info_here(_info);
 199   ce->verify_oop_map(_info);
 200   assert(_result->as_register() == Z_R2, "callee returns result in Z_R2,");
 201   __ z_brul(_continuation);
 202 }
 203 
 204 NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
 205   _klass_reg = klass_reg;
 206   _length = length;
 207   _result = result;
 208   _info = new CodeEmitInfo(info);
 209 }
 210 
 211 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
 212   __ bind(_entry);
 213   assert(_klass_reg->as_register() == Z_R11, "call target expects klass in Z_R11");
 214   __ lgr_if_needed(Z_R13, _length->as_register());
 215   address a = Runtime1::entry_for (Runtime1::new_object_array_id);
 216   ce->emit_call_c(a);
 217   CHECK_BAILOUT();
 218   ce->add_call_info_here(_info);
 219   ce->verify_oop_map(_info);
 220   assert(_result->as_register() == Z_R2, "callee returns result in Z_R2,");
 221   __ z_brul(_continuation);
 222 }
 223 
 224 MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info)
 225   : MonitorAccessStub(obj_reg, lock_reg) {
 226   _info = new CodeEmitInfo(info);
 227 }
 228 
 229 void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
 230   __ bind(_entry);
 231   Runtime1::StubID enter_id;
 232   if (ce->compilation()->has_fpu_code()) {
 233     enter_id = Runtime1::monitorenter_id;
 234   } else {
 235     enter_id = Runtime1::monitorenter_nofpu_id;
 236   }
 237   __ lgr_if_needed(Z_R1_scratch, _obj_reg->as_register());
 238   __ lgr_if_needed(Z_R13, _lock_reg->as_register()); // See LIRGenerator::syncTempOpr().
 239   ce->emit_call_c(Runtime1::entry_for (enter_id));
 240   CHECK_BAILOUT();
 241   ce->add_call_info_here(_info);
 242   ce->verify_oop_map(_info);
 243   __ branch_optimized(Assembler::bcondAlways, _continuation);
 244 }
 245 
 246 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
 247   __ bind(_entry);
 248   // Move address of the BasicObjectLock into Z_R1_scratch.
 249   if (_compute_lock) {
 250     // Lock_reg was destroyed by fast unlocking attempt => recompute it.
 251     ce->monitor_address(_monitor_ix, FrameMap::as_opr(Z_R1_scratch));
 252   } else {
 253     __ lgr_if_needed(Z_R1_scratch, _lock_reg->as_register());
 254   }
 255   // Note: non-blocking leaf routine => no call info needed.
 256   Runtime1::StubID exit_id;
 257   if (ce->compilation()->has_fpu_code()) {
 258     exit_id = Runtime1::monitorexit_id;
 259   } else {
 260     exit_id = Runtime1::monitorexit_nofpu_id;
 261   }
 262   ce->emit_call_c(Runtime1::entry_for (exit_id));
 263   CHECK_BAILOUT();
 264   __ branch_optimized(Assembler::bcondAlways, _continuation);
 265 }
 266 
 267 // Implementation of patching:
 268 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes).
 269 // - Replace original code with a call to the stub.
 270 // At Runtime:
 271 // - call to stub, jump to runtime.
 272 // - in runtime: Preserve all registers (especially objects, i.e., source and destination object).
 273 // - in runtime: After initializing class, restore original code, reexecute instruction.
 274 
 275 int PatchingStub::_patch_info_offset = - (12 /* load const */ + 2 /*BASR*/);
 276 
 277 void PatchingStub::align_patch_site(MacroAssembler* masm) {
 278 #ifndef PRODUCT
 279   const char* bc;
 280   switch (_id) {
 281   case access_field_id: bc = "patch site (access_field)"; break;
 282   case load_klass_id: bc = "patch site (load_klass)"; break;
 283   case load_mirror_id: bc = "patch site (load_mirror)"; break;
 284   case load_appendix_id: bc = "patch site (load_appendix)"; break;
 285   default: bc = "patch site (unknown patch id)"; break;
 286   }
 287   masm->block_comment(bc);
 288 #endif
 289 
 290   masm->align(align_up((int)NativeGeneralJump::instruction_size, wordSize));
 291 }
 292 
 293 void PatchingStub::emit_code(LIR_Assembler* ce) {
 294   // Copy original code here.
 295   assert(NativeGeneralJump::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF,
 296          "not enough room for call");
 297 
 298   NearLabel call_patch;
 299 
 300   int being_initialized_entry = __ offset();
 301 
 302   if (_id == load_klass_id) {
 303     // Produce a copy of the load klass instruction for use by the case being initialized.
 304 #ifdef ASSERT
 305     address start = __ pc();
 306 #endif
 307     AddressLiteral addrlit((intptr_t)0, metadata_Relocation::spec(_index));
 308     __ load_const(_obj, addrlit);
 309 
 310 #ifdef ASSERT
 311     for (int i = 0; i < _bytes_to_copy; i++) {
 312       address ptr = (address)(_pc_start + i);
 313       int a_byte = (*ptr) & 0xFF;
 314       assert(a_byte == *start++, "should be the same code");
 315     }
 316 #endif
 317   } else if (_id == load_mirror_id || _id == load_appendix_id) {
 318     // Produce a copy of the load mirror instruction for use by the case being initialized.
 319 #ifdef ASSERT
 320     address start = __ pc();
 321 #endif
 322     AddressLiteral addrlit((intptr_t)0, oop_Relocation::spec(_index));
 323     __ load_const(_obj, addrlit);
 324 
 325 #ifdef ASSERT
 326     for (int i = 0; i < _bytes_to_copy; i++) {
 327       address ptr = (address)(_pc_start + i);
 328       int a_byte = (*ptr) & 0xFF;
 329       assert(a_byte == *start++, "should be the same code");
 330     }
 331 #endif
 332   } else {
 333     // Make a copy the code which is going to be patched.
 334     for (int i = 0; i < _bytes_to_copy; i++) {
 335       address ptr = (address)(_pc_start + i);
 336       int a_byte = (*ptr) & 0xFF;
 337       __ emit_int8 (a_byte);
 338     }
 339   }
 340 
 341   address end_of_patch = __ pc();
 342   int bytes_to_skip = 0;
 343   if (_id == load_mirror_id) {
 344     int offset = __ offset();
 345     if (CommentedAssembly) {
 346       __ block_comment(" being_initialized check");
 347     }
 348 
 349     // Static field accesses have special semantics while the class
 350     // initializer is being run, so we emit a test which can be used to
 351     // check that this code is being executed by the initializing
 352     // thread.
 353     assert(_obj != noreg, "must be a valid register");
 354     assert(_index >= 0, "must have oop index");
 355     __ z_lg(Z_R1_scratch, java_lang_Class::klass_offset_in_bytes(), _obj);
 356     __ z_cg(Z_thread, Address(Z_R1_scratch, InstanceKlass::init_thread_offset()));
 357     __ branch_optimized(Assembler::bcondNotEqual, call_patch);
 358 
 359     // Load_klass patches may execute the patched code before it's
 360     // copied back into place so we need to jump back into the main
 361     // code of the nmethod to continue execution.
 362     __ branch_optimized(Assembler::bcondAlways, _patch_site_continuation);
 363 
 364     // Make sure this extra code gets skipped.
 365     bytes_to_skip += __ offset() - offset;
 366   }
 367 
 368   // Now emit the patch record telling the runtime how to find the
 369   // pieces of the patch. We only need 3 bytes but to help the disassembler
 370   // we make the data look like a the following add instruction:
 371   //   A R1, D2(X2, B2)
 372   // which requires 4 bytes.
 373   int sizeof_patch_record = 4;
 374   bytes_to_skip += sizeof_patch_record;
 375 
 376   // Emit the offsets needed to find the code to patch.
 377   int being_initialized_entry_offset = __ offset() - being_initialized_entry + sizeof_patch_record;
 378 
 379   // Emit the patch record: opcode of the add followed by 3 bytes patch record data.
 380   __ emit_int8((int8_t)(A_ZOPC>>24));
 381   __ emit_int8(being_initialized_entry_offset);
 382   __ emit_int8(bytes_to_skip);
 383   __ emit_int8(_bytes_to_copy);
 384   address patch_info_pc = __ pc();
 385   assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info");
 386 
 387   address entry = __ pc();
 388   NativeGeneralJump::insert_unconditional((address)_pc_start, entry);
 389   address target = NULL;
 390   relocInfo::relocType reloc_type = relocInfo::none;
 391   switch (_id) {
 392     case access_field_id:  target = Runtime1::entry_for (Runtime1::access_field_patching_id); break;
 393     case load_klass_id:    target = Runtime1::entry_for (Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;
 394     case load_mirror_id:   target = Runtime1::entry_for (Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;
 395     case load_appendix_id: target = Runtime1::entry_for (Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break;
 396     default: ShouldNotReachHere();
 397   }
 398   __ bind(call_patch);
 399 
 400   if (CommentedAssembly) {
 401     __ block_comment("patch entry point");
 402   }
 403   // Cannot use call_c_opt() because its size is not constant.
 404   __ load_const(Z_R1_scratch, target); // Must not optimize in order to keep constant _patch_info_offset constant.
 405   __ z_basr(Z_R14, Z_R1_scratch);
 406   assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change");
 407   ce->add_call_info_here(_info);
 408   __ z_brcl(Assembler::bcondAlways, _patch_site_entry);
 409   if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) {
 410     CodeSection* cs = __ code_section();
 411     address pc = (address)_pc_start;
 412     RelocIterator iter(cs, pc, pc + 1);
 413     relocInfo::change_reloc_info_for_address(&iter, (address) pc, reloc_type, relocInfo::none);
 414   }
 415 }
 416 
 417 void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
 418   __ bind(_entry);
 419   __ load_const_optimized(Z_R1_scratch, _trap_request); // Pass trap request in Z_R1_scratch.
 420   ce->emit_call_c(Runtime1::entry_for (Runtime1::deoptimize_id));
 421   CHECK_BAILOUT();
 422   ce->add_call_info_here(_info);
 423   DEBUG_ONLY(__ should_not_reach_here());
 424 }
 425 
 426 void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
 427   // Slow case: call to native.
 428   __ bind(_entry);
 429   __ lgr_if_needed(Z_ARG1, src()->as_register());
 430   __ lgr_if_needed(Z_ARG2, src_pos()->as_register());
 431   __ lgr_if_needed(Z_ARG3, dst()->as_register());
 432   __ lgr_if_needed(Z_ARG4, dst_pos()->as_register());
 433   __ lgr_if_needed(Z_ARG5, length()->as_register());
 434 
 435   // Must align calls sites, otherwise they can't be updated atomically on MP hardware.
 436   ce->align_call(lir_static_call);
 437 
 438   assert((__ offset() + NativeCall::call_far_pcrelative_displacement_offset) % NativeCall::call_far_pcrelative_displacement_alignment == 0,
 439          "must be aligned");
 440 
 441   ce->emit_static_call_stub();
 442 
 443   // Prepend each BRASL with a nop.
 444   __ relocate(relocInfo::static_call_type);
 445   __ z_nop();
 446   __ z_brasl(Z_R14, SharedRuntime::get_resolve_static_call_stub());
 447   ce->add_call_info_here(info());
 448   ce->verify_oop_map(info());
 449 
 450 #ifndef PRODUCT
 451   __ load_const_optimized(Z_R1_scratch, (address)&Runtime1::_arraycopy_slowcase_cnt);
 452   __ add2mem_32(Address(Z_R1_scratch), 1, Z_R0_scratch);
 453 #endif
 454 
 455   __ branch_optimized(Assembler::bcondAlways, _continuation);
 456 }
 457 
 458 #undef __