1 /*
   2  * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.inline.hpp"
  27 #include "c1/c1_CodeStubs.hpp"
  28 #include "c1/c1_FrameMap.hpp"
  29 #include "c1/c1_LIRAssembler.hpp"
  30 #include "c1/c1_MacroAssembler.hpp"
  31 #include "c1/c1_Runtime1.hpp"
  32 #include "nativeInst_arm.hpp"
  33 #include "runtime/sharedRuntime.hpp"
  34 #include "utilities/macros.hpp"
  35 #include "vmreg_arm.inline.hpp"
  36 
  37 #define __ ce->masm()->
  38 
  39 void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
  40   __ bind(_entry);
  41   ce->store_parameter(_bci, 0);
  42   ce->store_parameter(_method->as_constant_ptr()->as_metadata(), 1);
  43   __ call(Runtime1::entry_for(Runtime1::counter_overflow_id), relocInfo::runtime_call_type);
  44   ce->add_call_info_here(_info);
  45   ce->verify_oop_map(_info);
  46 
  47   __ b(_continuation);
  48 }
  49 
  50 
  51 // TODO: ARM - is it possible to inline these stubs into the main code stream?
  52 
  53 
  54 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, LIR_Opr array)
  55   : _index(index), _array(array), _throw_index_out_of_bounds_exception(false) {
  56   assert(info != NULL, "must have info");
  57   _info = new CodeEmitInfo(info);
  58 }
  59 
  60 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index)
  61   : _index(index), _array(NULL), _throw_index_out_of_bounds_exception(true) {
  62   assert(info != NULL, "must have info");
  63   _info = new CodeEmitInfo(info);
  64 }
  65 
  66 void RangeCheckStub::emit_code(LIR_Assembler* ce) {
  67   __ bind(_entry);
  68 
  69   if (_info->deoptimize_on_exception()) {
  70     __ call(Runtime1::entry_for(Runtime1::predicate_failed_trap_id), relocInfo::runtime_call_type);
  71     ce->add_call_info_here(_info);
  72     ce->verify_oop_map(_info);
  73     debug_only(__ should_not_reach_here());
  74     return;
  75   }
  76   // Pass the array index on stack because all registers must be preserved
  77   ce->verify_reserved_argument_area_size(_throw_index_out_of_bounds_exception ? 1 : 2);
  78   if (_index->is_cpu_register()) {
  79     __ str_32(_index->as_register(), Address(SP));
  80   } else {
  81     __ mov_slow(Rtemp, _index->as_jint()); // Rtemp should be OK in C1
  82     __ str_32(Rtemp, Address(SP));
  83   }
  84 
  85   if (_throw_index_out_of_bounds_exception) {
  86     __ call(Runtime1::entry_for(Runtime1::throw_index_exception_id), relocInfo::runtime_call_type);
  87   } else {
  88     __ str(_array->as_pointer_register(), Address(SP, BytesPerWord)); // ??? Correct offset? Correct instruction?
  89     __ call(Runtime1::entry_for(Runtime1::throw_range_check_failed_id), relocInfo::runtime_call_type);
  90   }
  91   ce->add_call_info_here(_info);
  92   ce->verify_oop_map(_info);
  93   DEBUG_ONLY(STOP("RangeCheck");)
  94 }
  95 
  96 PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
  97   _info = new CodeEmitInfo(info);
  98 }
  99 
 100 void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
 101   __ bind(_entry);
 102   __ call(Runtime1::entry_for(Runtime1::predicate_failed_trap_id), relocInfo::runtime_call_type);
 103   ce->add_call_info_here(_info);
 104   ce->verify_oop_map(_info);
 105   debug_only(__ should_not_reach_here());
 106 }
 107 
 108 void DivByZeroStub::emit_code(LIR_Assembler* ce) {
 109   if (_offset != -1) {
 110     ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
 111   }
 112   __ bind(_entry);
 113   __ call(Runtime1::entry_for(Runtime1::throw_div0_exception_id),
 114           relocInfo::runtime_call_type);
 115   ce->add_call_info_here(_info);
 116   DEBUG_ONLY(STOP("DivByZero");)
 117 }
 118 
 119 
 120 // Implementation of NewInstanceStub
 121 
 122 NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) {
 123   _result = result;
 124   _klass = klass;
 125   _klass_reg = klass_reg;
 126   _info = new CodeEmitInfo(info);
 127   assert(stub_id == Runtime1::new_instance_id                 ||
 128          stub_id == Runtime1::fast_new_instance_id            ||
 129          stub_id == Runtime1::fast_new_instance_init_check_id,
 130          "need new_instance id");
 131   _stub_id   = stub_id;
 132 }
 133 
 134 
 135 void NewInstanceStub::emit_code(LIR_Assembler* ce) {
 136   assert(_result->as_register() == R0, "runtime call setup");
 137   assert(_klass_reg->as_register() == R1, "runtime call setup");
 138   __ bind(_entry);
 139   __ call(Runtime1::entry_for(_stub_id), relocInfo::runtime_call_type);
 140   ce->add_call_info_here(_info);
 141   ce->verify_oop_map(_info);
 142   __ b(_continuation);
 143 }
 144 
 145 
 146 // Implementation of NewTypeArrayStub
 147 
 148 NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
 149   _klass_reg = klass_reg;
 150   _length = length;
 151   _result = result;
 152   _info = new CodeEmitInfo(info);
 153 }
 154 
 155 
 156 void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
 157   assert(_result->as_register() == R0, "runtime call setup");
 158   assert(_klass_reg->as_register() == R1, "runtime call setup");
 159   assert(_length->as_register() == R2, "runtime call setup");
 160   __ bind(_entry);
 161   __ call(Runtime1::entry_for(Runtime1::new_type_array_id), relocInfo::runtime_call_type);
 162   ce->add_call_info_here(_info);
 163   ce->verify_oop_map(_info);
 164   __ b(_continuation);
 165 }
 166 
 167 
 168 // Implementation of NewObjectArrayStub
 169 
 170 NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
 171   _klass_reg = klass_reg;
 172   _result = result;
 173   _length = length;
 174   _info = new CodeEmitInfo(info);
 175 }
 176 
 177 
 178 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
 179   assert(_result->as_register() == R0, "runtime call setup");
 180   assert(_klass_reg->as_register() == R1, "runtime call setup");
 181   assert(_length->as_register() == R2, "runtime call setup");
 182   __ bind(_entry);
 183   __ call(Runtime1::entry_for(Runtime1::new_object_array_id), relocInfo::runtime_call_type);
 184   ce->add_call_info_here(_info);
 185   ce->verify_oop_map(_info);
 186   __ b(_continuation);
 187 }
 188 
 189 
 190 // Implementation of MonitorAccessStubs
 191 
 192 MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info)
 193 : MonitorAccessStub(obj_reg, lock_reg)
 194 {
 195   _info = new CodeEmitInfo(info);
 196 }
 197 
 198 
 199 void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
 200   __ bind(_entry);
 201   const Register obj_reg = _obj_reg->as_pointer_register();
 202   const Register lock_reg = _lock_reg->as_pointer_register();
 203 
 204   ce->verify_reserved_argument_area_size(2);
 205   if (obj_reg < lock_reg) {
 206     __ stmia(SP, RegisterSet(obj_reg) | RegisterSet(lock_reg));
 207   } else {
 208     __ str(obj_reg, Address(SP));
 209     __ str(lock_reg, Address(SP, BytesPerWord));
 210   }
 211 
 212   Runtime1::StubID enter_id = ce->compilation()->has_fpu_code() ?
 213                               Runtime1::monitorenter_id :
 214                               Runtime1::monitorenter_nofpu_id;
 215   __ call(Runtime1::entry_for(enter_id), relocInfo::runtime_call_type);
 216   ce->add_call_info_here(_info);
 217   ce->verify_oop_map(_info);
 218   __ b(_continuation);
 219 }
 220 
 221 
 222 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
 223   __ bind(_entry);
 224   if (_compute_lock) {
 225     ce->monitor_address(_monitor_ix, _lock_reg);
 226   }
 227   const Register lock_reg = _lock_reg->as_pointer_register();
 228 
 229   ce->verify_reserved_argument_area_size(1);
 230   __ str(lock_reg, Address(SP));
 231 
 232   // Non-blocking leaf routine - no call info needed
 233   Runtime1::StubID exit_id = ce->compilation()->has_fpu_code() ?
 234                              Runtime1::monitorexit_id :
 235                              Runtime1::monitorexit_nofpu_id;
 236   __ call(Runtime1::entry_for(exit_id), relocInfo::runtime_call_type);
 237   __ b(_continuation);
 238 }
 239 
 240 
 241 // Call return is directly after patch word
 242 int PatchingStub::_patch_info_offset = 0;
 243 
 244 void PatchingStub::align_patch_site(MacroAssembler* masm) {
 245 #if 0
 246   // TODO: investigate if we required to implement this
 247     ShouldNotReachHere();
 248 #endif
 249 }
 250 
 251 void PatchingStub::emit_code(LIR_Assembler* ce) {
 252   const int patchable_instruction_offset = 0;
 253 
 254   assert(NativeCall::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF,
 255          "not enough room for call");
 256   assert((_bytes_to_copy & 3) == 0, "must copy a multiple of four bytes");
 257   Label call_patch;
 258   bool is_load = (_id == load_klass_id) || (_id == load_mirror_id) || (_id == load_appendix_id);
 259 
 260 
 261   if (is_load && !VM_Version::supports_movw()) {
 262     address start = __ pc();
 263 
 264     // The following sequence duplicates code provided in MacroAssembler::patchable_mov_oop()
 265     // without creating relocation info entry.
 266 
 267     assert((__ pc() - start) == patchable_instruction_offset, "should be");
 268     __ ldr(_obj, Address(PC));
 269     // Extra nop to handle case of large offset of oop placeholder (see NativeMovConstReg::set_data).
 270     __ nop();
 271 
 272 #ifdef ASSERT
 273     for (int i = 0; i < _bytes_to_copy; i++) {
 274       assert(((address)_pc_start)[i] == start[i], "should be the same code");
 275     }
 276 #endif // ASSERT
 277   }
 278 
 279   address being_initialized_entry = __ pc();
 280   if (CommentedAssembly) {
 281     __ block_comment(" patch template");
 282   }
 283   if (is_load) {
 284     address start = __ pc();
 285     if (_id == load_mirror_id || _id == load_appendix_id) {
 286       __ patchable_mov_oop(_obj, (jobject)Universe::non_oop_word(), _index);
 287     } else {
 288       __ patchable_mov_metadata(_obj, (Metadata*)Universe::non_oop_word(), _index);
 289     }
 290 #ifdef ASSERT
 291     for (int i = 0; i < _bytes_to_copy; i++) {
 292       assert(((address)_pc_start)[i] == start[i], "should be the same code");
 293     }
 294 #endif // ASSERT
 295   } else {
 296     int* start = (int*)_pc_start;
 297     int* end = start + (_bytes_to_copy / BytesPerInt);
 298     while (start < end) {
 299       __ emit_int32(*start++);
 300     }
 301   }
 302   address end_of_patch = __ pc();
 303 
 304   int bytes_to_skip = 0;
 305   if (_id == load_mirror_id) {
 306     int offset = __ offset();
 307     if (CommentedAssembly) {
 308       __ block_comment(" being_initialized check");
 309     }
 310 
 311     assert(_obj != noreg, "must be a valid register");
 312     // Rtemp should be OK in C1
 313     __ ldr(Rtemp, Address(_obj, java_lang_Class::klass_offset_in_bytes()));
 314     __ ldr(Rtemp, Address(Rtemp, InstanceKlass::init_thread_offset()));
 315     __ cmp(Rtemp, Rthread);
 316     __ b(call_patch, ne);
 317     __ b(_patch_site_continuation);
 318 
 319     bytes_to_skip += __ offset() - offset;
 320   }
 321 
 322   if (CommentedAssembly) {
 323     __ block_comment("patch data - 3 high bytes of the word");
 324   }
 325   const int sizeof_patch_record = 4;
 326   bytes_to_skip += sizeof_patch_record;
 327   int being_initialized_entry_offset = __ pc() - being_initialized_entry + sizeof_patch_record;
 328   __ emit_int32(0xff | being_initialized_entry_offset << 8 | bytes_to_skip << 16 | _bytes_to_copy << 24);
 329 
 330   address patch_info_pc = __ pc();
 331   assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info");
 332 
 333   // runtime call will return here
 334   Label call_return;
 335   __ bind(call_return);
 336   ce->add_call_info_here(_info);
 337   assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change");
 338   __ b(_patch_site_entry);
 339 
 340   address entry = __ pc();
 341   NativeGeneralJump::insert_unconditional((address)_pc_start, entry);
 342   address target = NULL;
 343   relocInfo::relocType reloc_type = relocInfo::none;
 344   switch (_id) {
 345     case access_field_id:  target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
 346     case load_klass_id:    target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;
 347     case load_mirror_id:   target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;
 348     case load_appendix_id: target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break;
 349     default: ShouldNotReachHere();
 350   }
 351   __ bind(call_patch);
 352 
 353   if (CommentedAssembly) {
 354     __ block_comment("patch entry point");
 355   }
 356 
 357   // arrange for call to return just after patch word
 358   __ adr(LR, call_return);
 359   __ jump(target, relocInfo::runtime_call_type, Rtemp);
 360 
 361   if (is_load) {
 362     CodeSection* cs = __ code_section();
 363     address pc = (address)_pc_start;
 364     RelocIterator iter(cs, pc, pc + 1);
 365     relocInfo::change_reloc_info_for_address(&iter, pc, reloc_type, relocInfo::none);
 366   }
 367 }
 368 
 369 void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
 370   __ bind(_entry);
 371   __ mov_slow(Rtemp, _trap_request);
 372   ce->verify_reserved_argument_area_size(1);
 373   __ str(Rtemp, Address(SP));
 374   __ call(Runtime1::entry_for(Runtime1::deoptimize_id), relocInfo::runtime_call_type);
 375   ce->add_call_info_here(_info);
 376   DEBUG_ONLY(__ should_not_reach_here());
 377 }
 378 
 379 
 380 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
 381   address a;
 382   if (_info->deoptimize_on_exception()) {
 383     // Deoptimize, do not throw the exception, because it is
 384     // probably wrong to do it here.
 385     a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
 386   } else {
 387     a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id);
 388   }
 389   ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
 390   __ bind(_entry);
 391   __ call(a, relocInfo::runtime_call_type);
 392   ce->add_call_info_here(_info);
 393   ce->verify_oop_map(_info);
 394   DEBUG_ONLY(STOP("ImplicitNullCheck");)
 395 }
 396 
 397 
 398 void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
 399   __ bind(_entry);
 400   // Pass the object on stack because all registers must be preserved
 401   if (_obj->is_cpu_register()) {
 402     ce->verify_reserved_argument_area_size(1);
 403     __ str(_obj->as_pointer_register(), Address(SP));
 404   } else {
 405     assert(_obj->is_illegal(), "should be");
 406   }
 407   __ call(Runtime1::entry_for(_stub), relocInfo::runtime_call_type);
 408   ce->add_call_info_here(_info);
 409   DEBUG_ONLY(STOP("SimpleException");)
 410 }
 411 
 412 
 413 void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
 414   __ bind(_entry);
 415 
 416   VMRegPair args[5];
 417   BasicType signature[5] = { T_OBJECT, T_INT, T_OBJECT, T_INT, T_INT };
 418   SharedRuntime::java_calling_convention(signature, args, 5, true);
 419 
 420   Register r[5];
 421   r[0] = src()->as_pointer_register();
 422   r[1] = src_pos()->as_register();
 423   r[2] = dst()->as_pointer_register();
 424   r[3] = dst_pos()->as_register();
 425   r[4] = length()->as_register();
 426 
 427   for (int i = 0; i < 5; i++) {
 428     VMReg arg = args[i].first();
 429     if (arg->is_stack()) {
 430       __ str(r[i], Address(SP, arg->reg2stack() * VMRegImpl::stack_slot_size));
 431     } else {
 432       assert(r[i] == arg->as_Register(), "Calling conventions must match");
 433     }
 434   }
 435 
 436   ce->emit_static_call_stub();
 437   if (ce->compilation()->bailed_out()) {
 438     return; // CodeCache is full
 439   }
 440   int ret_addr_offset = __ patchable_call(SharedRuntime::get_resolve_static_call_stub(), relocInfo::static_call_type);
 441   assert(ret_addr_offset == __ offset(), "embedded return address not allowed");
 442   ce->add_call_info_here(info());
 443   ce->verify_oop_map(info());
 444   __ b(_continuation);
 445 }
 446 
 447 #undef __