1 /*
   2  * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "c1/c1_CodeStubs.hpp"
  28 #include "c1/c1_FrameMap.hpp"
  29 #include "c1/c1_LIRAssembler.hpp"
  30 #include "c1/c1_MacroAssembler.hpp"
  31 #include "c1/c1_Runtime1.hpp"
  32 #include "nativeInst_arm.hpp"
  33 #include "runtime/sharedRuntime.hpp"
  34 #include "utilities/macros.hpp"
  35 #include "vmreg_arm.inline.hpp"
  36 #if INCLUDE_ALL_GCS
  37 #include "gc/g1/g1BarrierSet.hpp"
  38 #endif // INCLUDE_ALL_GCS
  39 
  40 #define __ ce->masm()->
  41 
  42 void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
  43   __ bind(_entry);
  44   ce->store_parameter(_bci, 0);
  45   ce->store_parameter(_method->as_constant_ptr()->as_metadata(), 1);
  46   __ call(Runtime1::entry_for(Runtime1::counter_overflow_id), relocInfo::runtime_call_type);
  47   ce->add_call_info_here(_info);
  48   ce->verify_oop_map(_info);
  49 
  50   __ b(_continuation);
  51 }
  52 
  53 
  54 // TODO: ARM - is it possible to inline these stubs into the main code stream?
  55 
  56 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, LIR_Opr array)
  57   : _throw_index_out_of_bounds_exception(array == NULL), _index(index), _array(array) {
  58   _info = info == NULL ? NULL : new CodeEmitInfo(info);
  59 }
  60 
  61 
  62 void RangeCheckStub::emit_code(LIR_Assembler* ce) {
  63   __ bind(_entry);
  64 
  65   if (_info->deoptimize_on_exception()) {
  66 #ifdef AARCH64
  67     __ NOT_TESTED();
  68 #endif
  69     __ call(Runtime1::entry_for(Runtime1::predicate_failed_trap_id), relocInfo::runtime_call_type);
  70     ce->add_call_info_here(_info);
  71     ce->verify_oop_map(_info);
  72     debug_only(__ should_not_reach_here());
  73     return;
  74   }
  75   // Pass the array index on stack because all registers must be preserved
  76   ce->verify_reserved_argument_area_size(1);
  77   if (_index->is_cpu_register()) {
  78     __ str_32(_index->as_register(), Address(SP));
  79   } else {
  80     __ mov_slow(Rtemp, _index->as_jint()); // Rtemp should be OK in C1
  81     __ str_32(Rtemp, Address(SP));
  82   }
  83   __ mov_slow(Rtemp, _array->as_pointer_register());
  84   __ str(Rtemp, Address(SP, BytesPerWord)); // ??? Correct offset? Correct instruction?
  85 
  86   if (_throw_index_out_of_bounds_exception) {
  87 #ifdef AARCH64
  88     __ NOT_TESTED();
  89 #endif
  90     __ call(Runtime1::entry_for(Runtime1::throw_index_exception_id), relocInfo::runtime_call_type);
  91   } else {
  92     __ call(Runtime1::entry_for(Runtime1::throw_range_check_failed_id), relocInfo::runtime_call_type);
  93   }
  94   ce->add_call_info_here(_info);
  95   ce->verify_oop_map(_info);
  96   DEBUG_ONLY(STOP("RangeCheck");)
  97 }
  98 
  99 PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
 100   _info = new CodeEmitInfo(info);
 101 }
 102 
 103 void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
 104   __ bind(_entry);
 105   __ call(Runtime1::entry_for(Runtime1::predicate_failed_trap_id), relocInfo::runtime_call_type);
 106   ce->add_call_info_here(_info);
 107   ce->verify_oop_map(_info);
 108   debug_only(__ should_not_reach_here());
 109 }
 110 
 111 void DivByZeroStub::emit_code(LIR_Assembler* ce) {
 112   if (_offset != -1) {
 113     ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
 114   }
 115   __ bind(_entry);
 116   __ call(Runtime1::entry_for(Runtime1::throw_div0_exception_id),
 117           relocInfo::runtime_call_type);
 118   ce->add_call_info_here(_info);
 119   DEBUG_ONLY(STOP("DivByZero");)
 120 }
 121 
 122 
 123 // Implementation of NewInstanceStub
 124 
 125 NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) {
 126   _result = result;
 127   _klass = klass;
 128   _klass_reg = klass_reg;
 129   _info = new CodeEmitInfo(info);
 130   assert(stub_id == Runtime1::new_instance_id                 ||
 131          stub_id == Runtime1::fast_new_instance_id            ||
 132          stub_id == Runtime1::fast_new_instance_init_check_id,
 133          "need new_instance id");
 134   _stub_id   = stub_id;
 135 }
 136 
 137 
 138 void NewInstanceStub::emit_code(LIR_Assembler* ce) {
 139   assert(_result->as_register() == R0, "runtime call setup");
 140   assert(_klass_reg->as_register() == R1, "runtime call setup");
 141   __ bind(_entry);
 142   __ call(Runtime1::entry_for(_stub_id), relocInfo::runtime_call_type);
 143   ce->add_call_info_here(_info);
 144   ce->verify_oop_map(_info);
 145   __ b(_continuation);
 146 }
 147 
 148 
 149 // Implementation of NewTypeArrayStub
 150 
 151 NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
 152   _klass_reg = klass_reg;
 153   _length = length;
 154   _result = result;
 155   _info = new CodeEmitInfo(info);
 156 }
 157 
 158 
 159 void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
 160   assert(_result->as_register() == R0, "runtime call setup");
 161   assert(_klass_reg->as_register() == R1, "runtime call setup");
 162   assert(_length->as_register() == R2, "runtime call setup");
 163   __ bind(_entry);
 164   __ call(Runtime1::entry_for(Runtime1::new_type_array_id), relocInfo::runtime_call_type);
 165   ce->add_call_info_here(_info);
 166   ce->verify_oop_map(_info);
 167   __ b(_continuation);
 168 }
 169 
 170 
 171 // Implementation of NewObjectArrayStub
 172 
 173 NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
 174   _klass_reg = klass_reg;
 175   _result = result;
 176   _length = length;
 177   _info = new CodeEmitInfo(info);
 178 }
 179 
 180 
 181 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
 182   assert(_result->as_register() == R0, "runtime call setup");
 183   assert(_klass_reg->as_register() == R1, "runtime call setup");
 184   assert(_length->as_register() == R2, "runtime call setup");
 185   __ bind(_entry);
 186   __ call(Runtime1::entry_for(Runtime1::new_object_array_id), relocInfo::runtime_call_type);
 187   ce->add_call_info_here(_info);
 188   ce->verify_oop_map(_info);
 189   __ b(_continuation);
 190 }
 191 
 192 
 193 // Implementation of MonitorAccessStubs
 194 
 195 MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info)
 196 : MonitorAccessStub(obj_reg, lock_reg)
 197 {
 198   _info = new CodeEmitInfo(info);
 199 }
 200 
 201 
 202 void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
 203   __ bind(_entry);
 204   const Register obj_reg = _obj_reg->as_pointer_register();
 205   const Register lock_reg = _lock_reg->as_pointer_register();
 206 
 207   ce->verify_reserved_argument_area_size(2);
 208 #ifdef AARCH64
 209   __ stp(obj_reg, lock_reg, Address(SP));
 210 #else
 211   if (obj_reg < lock_reg) {
 212     __ stmia(SP, RegisterSet(obj_reg) | RegisterSet(lock_reg));
 213   } else {
 214     __ str(obj_reg, Address(SP));
 215     __ str(lock_reg, Address(SP, BytesPerWord));
 216   }
 217 #endif // AARCH64
 218 
 219   Runtime1::StubID enter_id = ce->compilation()->has_fpu_code() ?
 220                               Runtime1::monitorenter_id :
 221                               Runtime1::monitorenter_nofpu_id;
 222   __ call(Runtime1::entry_for(enter_id), relocInfo::runtime_call_type);
 223   ce->add_call_info_here(_info);
 224   ce->verify_oop_map(_info);
 225   __ b(_continuation);
 226 }
 227 
 228 
 229 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
 230   __ bind(_entry);
 231   if (_compute_lock) {
 232     ce->monitor_address(_monitor_ix, _lock_reg);
 233   }
 234   const Register lock_reg = _lock_reg->as_pointer_register();
 235 
 236   ce->verify_reserved_argument_area_size(1);
 237   __ str(lock_reg, Address(SP));
 238 
 239   // Non-blocking leaf routine - no call info needed
 240   Runtime1::StubID exit_id = ce->compilation()->has_fpu_code() ?
 241                              Runtime1::monitorexit_id :
 242                              Runtime1::monitorexit_nofpu_id;
 243   __ call(Runtime1::entry_for(exit_id), relocInfo::runtime_call_type);
 244   __ b(_continuation);
 245 }
 246 
 247 
 248 // Call return is directly after patch word
 249 int PatchingStub::_patch_info_offset = 0;
 250 
 251 void PatchingStub::align_patch_site(MacroAssembler* masm) {
 252 #if 0
 253   // TODO: investigate if we required to implement this
 254     ShouldNotReachHere();
 255 #endif
 256 }
 257 
 258 void PatchingStub::emit_code(LIR_Assembler* ce) {
 259   const int patchable_instruction_offset = AARCH64_ONLY(NativeInstruction::instruction_size) NOT_AARCH64(0);
 260 
 261   assert(NativeCall::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF,
 262          "not enough room for call");
 263   assert((_bytes_to_copy & 3) == 0, "must copy a multiple of four bytes");
 264   Label call_patch;
 265   bool is_load = (_id == load_klass_id) || (_id == load_mirror_id) || (_id == load_appendix_id);
 266 
 267 #ifdef AARCH64
 268   assert(nativeInstruction_at(_pc_start)->is_nop(), "required for MT safe patching");
 269 
 270   // Same alignment of reg2mem code and PatchingStub code. Required to make copied bind_literal() code properly aligned.
 271   __ align(wordSize);
 272 #endif // AARCH64
 273 
 274   if (is_load NOT_AARCH64(&& !VM_Version::supports_movw())) {
 275     address start = __ pc();
 276 
 277     // The following sequence duplicates code provided in MacroAssembler::patchable_mov_oop()
 278     // without creating relocation info entry.
 279 #ifdef AARCH64
 280     // Extra nop for MT safe patching
 281     __ nop();
 282 #endif // AARCH64
 283 
 284     assert((__ pc() - start) == patchable_instruction_offset, "should be");
 285 #ifdef AARCH64
 286     __ ldr(_obj, __ pc());
 287 #else
 288     __ ldr(_obj, Address(PC));
 289     // Extra nop to handle case of large offset of oop placeholder (see NativeMovConstReg::set_data).
 290     __ nop();
 291 #endif // AARCH64
 292 
 293 #ifdef ASSERT
 294     for (int i = 0; i < _bytes_to_copy; i++) {
 295       assert(((address)_pc_start)[i] == start[i], "should be the same code");
 296     }
 297 #endif // ASSERT
 298   }
 299 
 300   address being_initialized_entry = __ pc();
 301   if (CommentedAssembly) {
 302     __ block_comment(" patch template");
 303   }
 304   if (is_load) {
 305     address start = __ pc();
 306     if (_id == load_mirror_id || _id == load_appendix_id) {
 307       __ patchable_mov_oop(_obj, (jobject)Universe::non_oop_word(), _index);
 308     } else {
 309       __ patchable_mov_metadata(_obj, (Metadata*)Universe::non_oop_word(), _index);
 310     }
 311 #ifdef ASSERT
 312     for (int i = 0; i < _bytes_to_copy; i++) {
 313       assert(((address)_pc_start)[i] == start[i], "should be the same code");
 314     }
 315 #endif // ASSERT
 316   } else {
 317     int* start = (int*)_pc_start;
 318     int* end = start + (_bytes_to_copy / BytesPerInt);
 319     while (start < end) {
 320       __ emit_int32(*start++);
 321     }
 322   }
 323   address end_of_patch = __ pc();
 324 
 325   int bytes_to_skip = 0;
 326   if (_id == load_mirror_id) {
 327     int offset = __ offset();
 328     if (CommentedAssembly) {
 329       __ block_comment(" being_initialized check");
 330     }
 331 
 332     assert(_obj != noreg, "must be a valid register");
 333     // Rtemp should be OK in C1
 334     __ ldr(Rtemp, Address(_obj, java_lang_Class::klass_offset_in_bytes()));
 335     __ ldr(Rtemp, Address(Rtemp, InstanceKlass::init_thread_offset()));
 336     __ cmp(Rtemp, Rthread);
 337     __ b(call_patch, ne);
 338     __ b(_patch_site_continuation);
 339 
 340     bytes_to_skip += __ offset() - offset;
 341   }
 342 
 343   if (CommentedAssembly) {
 344     __ block_comment("patch data - 3 high bytes of the word");
 345   }
 346   const int sizeof_patch_record = 4;
 347   bytes_to_skip += sizeof_patch_record;
 348   int being_initialized_entry_offset = __ pc() - being_initialized_entry + sizeof_patch_record;
 349   __ emit_int32(0xff | being_initialized_entry_offset << 8 | bytes_to_skip << 16 | _bytes_to_copy << 24);
 350 
 351   address patch_info_pc = __ pc();
 352   assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info");
 353 
 354   // runtime call will return here
 355   Label call_return;
 356   __ bind(call_return);
 357   ce->add_call_info_here(_info);
 358   assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change");
 359   __ b(_patch_site_entry);
 360 
 361   address entry = __ pc();
 362   NativeGeneralJump::insert_unconditional((address)_pc_start, entry);
 363   address target = NULL;
 364   relocInfo::relocType reloc_type = relocInfo::none;
 365   switch (_id) {
 366     case access_field_id:  target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
 367     case load_klass_id:    target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;
 368     case load_mirror_id:   target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;
 369     case load_appendix_id: target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break;
 370     default: ShouldNotReachHere();
 371   }
 372   __ bind(call_patch);
 373 
 374   if (CommentedAssembly) {
 375     __ block_comment("patch entry point");
 376   }
 377 
 378   // arrange for call to return just after patch word
 379   __ adr(LR, call_return);
 380   __ jump(target, relocInfo::runtime_call_type, Rtemp);
 381 
 382   if (is_load) {
 383     CodeSection* cs = __ code_section();
 384     address pc = (address)_pc_start;
 385     RelocIterator iter(cs, pc, pc + 1);
 386     relocInfo::change_reloc_info_for_address(&iter, pc, reloc_type, relocInfo::none);
 387   }
 388 }
 389 
 390 void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
 391   __ bind(_entry);
 392   __ mov_slow(Rtemp, _trap_request);
 393   ce->verify_reserved_argument_area_size(1);
 394   __ str(Rtemp, Address(SP));
 395   __ call(Runtime1::entry_for(Runtime1::deoptimize_id), relocInfo::runtime_call_type);
 396   ce->add_call_info_here(_info);
 397   DEBUG_ONLY(__ should_not_reach_here());
 398 }
 399 
 400 
 401 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
 402   address a;
 403   if (_info->deoptimize_on_exception()) {
 404     // Deoptimize, do not throw the exception, because it is
 405     // probably wrong to do it here.
 406     a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
 407   } else {
 408     a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id);
 409   }
 410   ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
 411   __ bind(_entry);
 412   __ call(a, relocInfo::runtime_call_type);
 413   ce->add_call_info_here(_info);
 414   ce->verify_oop_map(_info);
 415   DEBUG_ONLY(STOP("ImplicitNullCheck");)
 416 }
 417 
 418 
 419 void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
 420   __ bind(_entry);
 421   // Pass the object on stack because all registers must be preserved
 422   if (_obj->is_cpu_register()) {
 423     ce->verify_reserved_argument_area_size(1);
 424     __ str(_obj->as_pointer_register(), Address(SP));
 425   } else {
 426     assert(_obj->is_illegal(), "should be");
 427   }
 428   __ call(Runtime1::entry_for(_stub), relocInfo::runtime_call_type);
 429   ce->add_call_info_here(_info);
 430   DEBUG_ONLY(STOP("SimpleException");)
 431 }
 432 
 433 
 434 void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
 435   __ bind(_entry);
 436 
 437   VMRegPair args[5];
 438   BasicType signature[5] = { T_OBJECT, T_INT, T_OBJECT, T_INT, T_INT };
 439   SharedRuntime::java_calling_convention(signature, args, 5, true);
 440 
 441   Register r[5];
 442   r[0] = src()->as_pointer_register();
 443   r[1] = src_pos()->as_register();
 444   r[2] = dst()->as_pointer_register();
 445   r[3] = dst_pos()->as_register();
 446   r[4] = length()->as_register();
 447 
 448   for (int i = 0; i < 5; i++) {
 449     VMReg arg = args[i].first();
 450     if (arg->is_stack()) {
 451       __ str(r[i], Address(SP, arg->reg2stack() * VMRegImpl::stack_slot_size));
 452     } else {
 453       assert(r[i] == arg->as_Register(), "Calling conventions must match");
 454     }
 455   }
 456 
 457   ce->emit_static_call_stub();
 458   if (ce->compilation()->bailed_out()) {
 459     return; // CodeCache is full
 460   }
 461   int ret_addr_offset = __ patchable_call(SharedRuntime::get_resolve_static_call_stub(), relocInfo::static_call_type);
 462   assert(ret_addr_offset == __ offset(), "embedded return address not allowed");
 463   ce->add_call_info_here(info());
 464   ce->verify_oop_map(info());
 465   __ b(_continuation);
 466 }
 467 
 468 /////////////////////////////////////////////////////////////////////////////
 469 #if INCLUDE_ALL_GCS
 470 
 471 void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
 472   // At this point we know that marking is in progress.
 473   // If do_load() is true then we have to emit the
 474   // load of the previous value; otherwise it has already
 475   // been loaded into _pre_val.
 476 
 477   __ bind(_entry);
 478   assert(pre_val()->is_register(), "Precondition.");
 479 
 480   Register pre_val_reg = pre_val()->as_register();
 481 
 482   if (do_load()) {
 483     ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/);
 484   }
 485 
 486   __ cbz(pre_val_reg, _continuation);
 487   ce->verify_reserved_argument_area_size(1);
 488   __ str(pre_val_reg, Address(SP));
 489   __ call(Runtime1::entry_for(Runtime1::g1_pre_barrier_slow_id), relocInfo::runtime_call_type);
 490 
 491   __ b(_continuation);
 492 }
 493 
 494 void G1PostBarrierStub::emit_code(LIR_Assembler* ce) {
 495   __ bind(_entry);
 496   assert(addr()->is_register(), "Precondition.");
 497   assert(new_val()->is_register(), "Precondition.");
 498   Register new_val_reg = new_val()->as_register();
 499   __ cbz(new_val_reg, _continuation);
 500   ce->verify_reserved_argument_area_size(1);
 501   __ str(addr()->as_pointer_register(), Address(SP));
 502   __ call(Runtime1::entry_for(Runtime1::g1_post_barrier_slow_id), relocInfo::runtime_call_type);
 503   __ b(_continuation);
 504 }
 505 
 506 #endif // INCLUDE_ALL_GCS
 507 /////////////////////////////////////////////////////////////////////////////
 508 
 509 #undef __