1 /*
   2  * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "c1/c1_CodeStubs.hpp"
  28 #include "c1/c1_FrameMap.hpp"
  29 #include "c1/c1_LIRAssembler.hpp"
  30 #include "c1/c1_MacroAssembler.hpp"
  31 #include "c1/c1_Runtime1.hpp"
  32 #include "nativeInst_arm.hpp"
  33 #include "runtime/sharedRuntime.hpp"
  34 #include "utilities/macros.hpp"
  35 #include "vmreg_arm.inline.hpp"
  36 
  37 #define __ ce->masm()->
  38 
  39 void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
  40   __ bind(_entry);
  41   ce->store_parameter(_bci, 0);
  42   ce->store_parameter(_method->as_constant_ptr()->as_metadata(), 1);
  43   __ call(Runtime1::entry_for(Runtime1::counter_overflow_id), relocInfo::runtime_call_type);
  44   ce->add_call_info_here(_info);
  45   ce->verify_oop_map(_info);
  46 
  47   __ b(_continuation);
  48 }
  49 
  50 
  51 // TODO: ARM - is it possible to inline these stubs into the main code stream?
  52 
  53 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index,
  54                                bool throw_index_out_of_bounds_exception)
  55   : _throw_index_out_of_bounds_exception(throw_index_out_of_bounds_exception)
  56   , _index(index)
  57 {
  58   _info = info == NULL ? NULL : new CodeEmitInfo(info);
  59 }
  60 
  61 
  62 void RangeCheckStub::emit_code(LIR_Assembler* ce) {
  63   __ bind(_entry);
  64 
  65   if (_info->deoptimize_on_exception()) {
  66 #ifdef AARCH64
  67     __ NOT_TESTED();
  68 #endif
  69     __ call(Runtime1::entry_for(Runtime1::predicate_failed_trap_id), relocInfo::runtime_call_type);
  70     ce->add_call_info_here(_info);
  71     ce->verify_oop_map(_info);
  72     debug_only(__ should_not_reach_here());
  73     return;
  74   }
  75   // Pass the array index on stack because all registers must be preserved
  76   ce->verify_reserved_argument_area_size(1);
  77   if (_index->is_cpu_register()) {
  78     __ str_32(_index->as_register(), Address(SP));
  79   } else {
  80     __ mov_slow(Rtemp, _index->as_jint()); // Rtemp should be OK in C1
  81     __ str_32(Rtemp, Address(SP));
  82   }
  83 
  84   if (_throw_index_out_of_bounds_exception) {
  85 #ifdef AARCH64
  86     __ NOT_TESTED();
  87 #endif
  88     __ call(Runtime1::entry_for(Runtime1::throw_index_exception_id), relocInfo::runtime_call_type);
  89   } else {
  90     __ call(Runtime1::entry_for(Runtime1::throw_range_check_failed_id), relocInfo::runtime_call_type);
  91   }
  92   ce->add_call_info_here(_info);
  93   ce->verify_oop_map(_info);
  94   DEBUG_ONLY(STOP("RangeCheck");)
  95 }
  96 
  97 PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
  98   _info = new CodeEmitInfo(info);
  99 }
 100 
 101 void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
 102   __ bind(_entry);
 103   __ call(Runtime1::entry_for(Runtime1::predicate_failed_trap_id), relocInfo::runtime_call_type);
 104   ce->add_call_info_here(_info);
 105   ce->verify_oop_map(_info);
 106   debug_only(__ should_not_reach_here());
 107 }
 108 
 109 void DivByZeroStub::emit_code(LIR_Assembler* ce) {
 110   if (_offset != -1) {
 111     ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
 112   }
 113   __ bind(_entry);
 114   __ call(Runtime1::entry_for(Runtime1::throw_div0_exception_id),
 115           relocInfo::runtime_call_type);
 116   ce->add_call_info_here(_info);
 117   DEBUG_ONLY(STOP("DivByZero");)
 118 }
 119 
 120 
 121 // Implementation of NewInstanceStub
 122 
 123 NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) {
 124   _result = result;
 125   _klass = klass;
 126   _klass_reg = klass_reg;
 127   _info = new CodeEmitInfo(info);
 128   assert(stub_id == Runtime1::new_instance_id                 ||
 129          stub_id == Runtime1::fast_new_instance_id            ||
 130          stub_id == Runtime1::fast_new_instance_init_check_id,
 131          "need new_instance id");
 132   _stub_id   = stub_id;
 133 }
 134 
 135 
 136 void NewInstanceStub::emit_code(LIR_Assembler* ce) {
 137   assert(_result->as_register() == R0, "runtime call setup");
 138   assert(_klass_reg->as_register() == R1, "runtime call setup");
 139   __ bind(_entry);
 140   __ call(Runtime1::entry_for(_stub_id), relocInfo::runtime_call_type);
 141   ce->add_call_info_here(_info);
 142   ce->verify_oop_map(_info);
 143   __ b(_continuation);
 144 }
 145 
 146 
 147 // Implementation of NewTypeArrayStub
 148 
 149 NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
 150   _klass_reg = klass_reg;
 151   _length = length;
 152   _result = result;
 153   _info = new CodeEmitInfo(info);
 154 }
 155 
 156 
 157 void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
 158   assert(_result->as_register() == R0, "runtime call setup");
 159   assert(_klass_reg->as_register() == R1, "runtime call setup");
 160   assert(_length->as_register() == R2, "runtime call setup");
 161   __ bind(_entry);
 162   __ call(Runtime1::entry_for(Runtime1::new_type_array_id), relocInfo::runtime_call_type);
 163   ce->add_call_info_here(_info);
 164   ce->verify_oop_map(_info);
 165   __ b(_continuation);
 166 }
 167 
 168 
 169 // Implementation of NewObjectArrayStub
 170 
 171 NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
 172   _klass_reg = klass_reg;
 173   _result = result;
 174   _length = length;
 175   _info = new CodeEmitInfo(info);
 176 }
 177 
 178 
 179 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
 180   assert(_result->as_register() == R0, "runtime call setup");
 181   assert(_klass_reg->as_register() == R1, "runtime call setup");
 182   assert(_length->as_register() == R2, "runtime call setup");
 183   __ bind(_entry);
 184   __ call(Runtime1::entry_for(Runtime1::new_object_array_id), relocInfo::runtime_call_type);
 185   ce->add_call_info_here(_info);
 186   ce->verify_oop_map(_info);
 187   __ b(_continuation);
 188 }
 189 
 190 
 191 // Implementation of MonitorAccessStubs
 192 
 193 MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info)
 194 : MonitorAccessStub(obj_reg, lock_reg)
 195 {
 196   _info = new CodeEmitInfo(info);
 197 }
 198 
 199 
 200 void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
 201   __ bind(_entry);
 202   const Register obj_reg = _obj_reg->as_pointer_register();
 203   const Register lock_reg = _lock_reg->as_pointer_register();
 204 
 205   ce->verify_reserved_argument_area_size(2);
 206 #ifdef AARCH64
 207   __ stp(obj_reg, lock_reg, Address(SP));
 208 #else
 209   if (obj_reg < lock_reg) {
 210     __ stmia(SP, RegisterSet(obj_reg) | RegisterSet(lock_reg));
 211   } else {
 212     __ str(obj_reg, Address(SP));
 213     __ str(lock_reg, Address(SP, BytesPerWord));
 214   }
 215 #endif // AARCH64
 216 
 217   Runtime1::StubID enter_id = ce->compilation()->has_fpu_code() ?
 218                               Runtime1::monitorenter_id :
 219                               Runtime1::monitorenter_nofpu_id;
 220   __ call(Runtime1::entry_for(enter_id), relocInfo::runtime_call_type);
 221   ce->add_call_info_here(_info);
 222   ce->verify_oop_map(_info);
 223   __ b(_continuation);
 224 }
 225 
 226 
 227 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
 228   __ bind(_entry);
 229   if (_compute_lock) {
 230     ce->monitor_address(_monitor_ix, _lock_reg);
 231   }
 232   const Register lock_reg = _lock_reg->as_pointer_register();
 233 
 234   ce->verify_reserved_argument_area_size(1);
 235   __ str(lock_reg, Address(SP));
 236 
 237   // Non-blocking leaf routine - no call info needed
 238   Runtime1::StubID exit_id = ce->compilation()->has_fpu_code() ?
 239                              Runtime1::monitorexit_id :
 240                              Runtime1::monitorexit_nofpu_id;
 241   __ call(Runtime1::entry_for(exit_id), relocInfo::runtime_call_type);
 242   __ b(_continuation);
 243 }
 244 
 245 
 246 // Call return is directly after patch word
 247 int PatchingStub::_patch_info_offset = 0;
 248 
 249 void PatchingStub::align_patch_site(MacroAssembler* masm) {
 250 #if 0
 251   // TODO: investigate if we required to implement this
 252     ShouldNotReachHere();
 253 #endif
 254 }
 255 
 256 void PatchingStub::emit_code(LIR_Assembler* ce) {
 257   const int patchable_instruction_offset = AARCH64_ONLY(NativeInstruction::instruction_size) NOT_AARCH64(0);
 258 
 259   assert(NativeCall::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF,
 260          "not enough room for call");
 261   assert((_bytes_to_copy & 3) == 0, "must copy a multiple of four bytes");
 262   Label call_patch;
 263   bool is_load = (_id == load_klass_id) || (_id == load_mirror_id) || (_id == load_appendix_id);
 264 
 265 #ifdef AARCH64
 266   assert(nativeInstruction_at(_pc_start)->is_nop(), "required for MT safe patching");
 267 
 268   // Same alignment of reg2mem code and PatchingStub code. Required to make copied bind_literal() code properly aligned.
 269   __ align(wordSize);
 270 #endif // AARCH64
 271 
 272   if (is_load NOT_AARCH64(&& !VM_Version::supports_movw())) {
 273     address start = __ pc();
 274 
 275     // The following sequence duplicates code provided in MacroAssembler::patchable_mov_oop()
 276     // without creating relocation info entry.
 277 #ifdef AARCH64
 278     // Extra nop for MT safe patching
 279     __ nop();
 280 #endif // AARCH64
 281 
 282     assert((__ pc() - start) == patchable_instruction_offset, "should be");
 283 #ifdef AARCH64
 284     __ ldr(_obj, __ pc());
 285 #else
 286     __ ldr(_obj, Address(PC));
 287     // Extra nop to handle case of large offset of oop placeholder (see NativeMovConstReg::set_data).
 288     __ nop();
 289 #endif // AARCH64
 290 
 291 #ifdef ASSERT
 292     for (int i = 0; i < _bytes_to_copy; i++) {
 293       assert(((address)_pc_start)[i] == start[i], "should be the same code");
 294     }
 295 #endif // ASSERT
 296   }
 297 
 298   address being_initialized_entry = __ pc();
 299   if (CommentedAssembly) {
 300     __ block_comment(" patch template");
 301   }
 302   if (is_load) {
 303     address start = __ pc();
 304     if (_id == load_mirror_id || _id == load_appendix_id) {
 305       __ patchable_mov_oop(_obj, (jobject)Universe::non_oop_word(), _index);
 306     } else {
 307       __ patchable_mov_metadata(_obj, (Metadata*)Universe::non_oop_word(), _index);
 308     }
 309 #ifdef ASSERT
 310     for (int i = 0; i < _bytes_to_copy; i++) {
 311       assert(((address)_pc_start)[i] == start[i], "should be the same code");
 312     }
 313 #endif // ASSERT
 314   } else {
 315     int* start = (int*)_pc_start;
 316     int* end = start + (_bytes_to_copy / BytesPerInt);
 317     while (start < end) {
 318       __ emit_int32(*start++);
 319     }
 320   }
 321   address end_of_patch = __ pc();
 322 
 323   int bytes_to_skip = 0;
 324   if (_id == load_mirror_id) {
 325     int offset = __ offset();
 326     if (CommentedAssembly) {
 327       __ block_comment(" being_initialized check");
 328     }
 329 
 330     assert(_obj != noreg, "must be a valid register");
 331     // Rtemp should be OK in C1
 332     __ ldr(Rtemp, Address(_obj, java_lang_Class::klass_offset_in_bytes()));
 333     __ ldr(Rtemp, Address(Rtemp, InstanceKlass::init_thread_offset()));
 334     __ cmp(Rtemp, Rthread);
 335     __ b(call_patch, ne);
 336     __ b(_patch_site_continuation);
 337 
 338     bytes_to_skip += __ offset() - offset;
 339   }
 340 
 341   if (CommentedAssembly) {
 342     __ block_comment("patch data - 3 high bytes of the word");
 343   }
 344   const int sizeof_patch_record = 4;
 345   bytes_to_skip += sizeof_patch_record;
 346   int being_initialized_entry_offset = __ pc() - being_initialized_entry + sizeof_patch_record;
 347   __ emit_int32(0xff | being_initialized_entry_offset << 8 | bytes_to_skip << 16 | _bytes_to_copy << 24);
 348 
 349   address patch_info_pc = __ pc();
 350   assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info");
 351 
 352   // runtime call will return here
 353   Label call_return;
 354   __ bind(call_return);
 355   ce->add_call_info_here(_info);
 356   assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change");
 357   __ b(_patch_site_entry);
 358 
 359   address entry = __ pc();
 360   NativeGeneralJump::insert_unconditional((address)_pc_start, entry);
 361   address target = NULL;
 362   relocInfo::relocType reloc_type = relocInfo::none;
 363   switch (_id) {
 364     case access_field_id:  target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
 365     case load_klass_id:    target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;
 366     case load_mirror_id:   target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;
 367     case load_appendix_id: target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break;
 368     default: ShouldNotReachHere();
 369   }
 370   __ bind(call_patch);
 371 
 372   if (CommentedAssembly) {
 373     __ block_comment("patch entry point");
 374   }
 375 
 376   // arrange for call to return just after patch word
 377   __ adr(LR, call_return);
 378   __ jump(target, relocInfo::runtime_call_type, Rtemp);
 379 
 380   if (is_load) {
 381     CodeSection* cs = __ code_section();
 382     address pc = (address)_pc_start;
 383     RelocIterator iter(cs, pc, pc + 1);
 384     relocInfo::change_reloc_info_for_address(&iter, pc, reloc_type, relocInfo::none);
 385   }
 386 }
 387 
 388 void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
 389   __ bind(_entry);
 390   __ mov_slow(Rtemp, _trap_request);
 391   ce->verify_reserved_argument_area_size(1);
 392   __ str(Rtemp, Address(SP));
 393   __ call(Runtime1::entry_for(Runtime1::deoptimize_id), relocInfo::runtime_call_type);
 394   ce->add_call_info_here(_info);
 395   DEBUG_ONLY(__ should_not_reach_here());
 396 }
 397 
 398 
 399 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
 400   address a;
 401   if (_info->deoptimize_on_exception()) {
 402     // Deoptimize, do not throw the exception, because it is
 403     // probably wrong to do it here.
 404     a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
 405   } else {
 406     a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id);
 407   }
 408   ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
 409   __ bind(_entry);
 410   __ call(a, relocInfo::runtime_call_type);
 411   ce->add_call_info_here(_info);
 412   ce->verify_oop_map(_info);
 413   DEBUG_ONLY(STOP("ImplicitNullCheck");)
 414 }
 415 
 416 
 417 void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
 418   __ bind(_entry);
 419   // Pass the object on stack because all registers must be preserved
 420   if (_obj->is_cpu_register()) {
 421     ce->verify_reserved_argument_area_size(1);
 422     __ str(_obj->as_pointer_register(), Address(SP));
 423   } else {
 424     assert(_obj->is_illegal(), "should be");
 425   }
 426   __ call(Runtime1::entry_for(_stub), relocInfo::runtime_call_type);
 427   ce->add_call_info_here(_info);
 428   DEBUG_ONLY(STOP("SimpleException");)
 429 }
 430 
 431 
 432 void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
 433   __ bind(_entry);
 434 
 435   VMRegPair args[5];
 436   BasicType signature[5] = { T_OBJECT, T_INT, T_OBJECT, T_INT, T_INT };
 437   SharedRuntime::java_calling_convention(signature, args, 5, true);
 438 
 439   Register r[5];
 440   r[0] = src()->as_pointer_register();
 441   r[1] = src_pos()->as_register();
 442   r[2] = dst()->as_pointer_register();
 443   r[3] = dst_pos()->as_register();
 444   r[4] = length()->as_register();
 445 
 446   for (int i = 0; i < 5; i++) {
 447     VMReg arg = args[i].first();
 448     if (arg->is_stack()) {
 449       __ str(r[i], Address(SP, arg->reg2stack() * VMRegImpl::stack_slot_size));
 450     } else {
 451       assert(r[i] == arg->as_Register(), "Calling conventions must match");
 452     }
 453   }
 454 
 455   ce->emit_static_call_stub();
 456   if (ce->compilation()->bailed_out()) {
 457     return; // CodeCache is full
 458   }
 459   int ret_addr_offset = __ patchable_call(SharedRuntime::get_resolve_static_call_stub(), relocInfo::static_call_type);
 460   assert(ret_addr_offset == __ offset(), "embedded return address not allowed");
 461   ce->add_call_info_here(info());
 462   ce->verify_oop_map(info());
 463   __ b(_continuation);
 464 }
 465 
 466 #undef __