1 /* 2 * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "c1/c1_CodeStubs.hpp" 28 #include "c1/c1_FrameMap.hpp" 29 #include "c1/c1_LIRAssembler.hpp" 30 #include "c1/c1_MacroAssembler.hpp" 31 #include "c1/c1_Runtime1.hpp" 32 #include "nativeInst_arm.hpp" 33 #include "runtime/sharedRuntime.hpp" 34 #include "utilities/macros.hpp" 35 #include "vmreg_arm.inline.hpp" 36 #if INCLUDE_ALL_GCS 37 #include "gc/g1/g1BarrierSet.hpp" 38 #endif // INCLUDE_ALL_GCS 39 40 #define __ ce->masm()-> 41 42 void CounterOverflowStub::emit_code(LIR_Assembler* ce) { 43 __ bind(_entry); 44 ce->store_parameter(_bci, 0); 45 ce->store_parameter(_method->as_constant_ptr()->as_metadata(), 1); 46 __ call(Runtime1::entry_for(Runtime1::counter_overflow_id), relocInfo::runtime_call_type); 47 ce->add_call_info_here(_info); 48 ce->verify_oop_map(_info); 49 50 __ b(_continuation); 51 } 52 53 54 // TODO: ARM - is it possible to inline these stubs into the main code stream? 55 56 57 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, LIR_Opr array) 58 : _throw_index_out_of_bounds_exception(false), _index(index), _array(array) { 59 assert(info != NULL, "must have info"); 60 _info = new CodeEmitInfo(info); 61 } 62 63 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index) 64 : _throw_index_out_of_bounds_exception(true), _index(index), _array(NULL) { 65 assert(info != NULL, "must have info"); 66 _info = new CodeEmitInfo(info); 67 } 68 69 void RangeCheckStub::emit_code(LIR_Assembler* ce) { 70 __ bind(_entry); 71 72 if (_info->deoptimize_on_exception()) { 73 #ifdef AARCH64 74 __ NOT_TESTED(); 75 #endif 76 __ call(Runtime1::entry_for(Runtime1::predicate_failed_trap_id), relocInfo::runtime_call_type); 77 ce->add_call_info_here(_info); 78 ce->verify_oop_map(_info); 79 debug_only(__ should_not_reach_here()); 80 return; 81 } 82 // Pass the array index on stack because all registers must be preserved 83 ce->verify_reserved_argument_area_size(1); 84 if (_index->is_cpu_register()) { 85 __ str_32(_index->as_register(), Address(SP)); 86 } else { 87 __ mov_slow(Rtemp, _index->as_jint()); // Rtemp should be OK in C1 88 __ str_32(Rtemp, Address(SP)); 89 } 90 __ mov_slow(Rtemp, _array->as_pointer_register()); 91 __ str(Rtemp, Address(SP, BytesPerWord)); // ??? Correct offset? Correct instruction? 92 93 if (_throw_index_out_of_bounds_exception) { 94 #ifdef AARCH64 95 __ NOT_TESTED(); 96 #endif 97 __ call(Runtime1::entry_for(Runtime1::throw_index_exception_id), relocInfo::runtime_call_type); 98 } else { 99 __ call(Runtime1::entry_for(Runtime1::throw_range_check_failed_id), relocInfo::runtime_call_type); 100 } 101 ce->add_call_info_here(_info); 102 ce->verify_oop_map(_info); 103 DEBUG_ONLY(STOP("RangeCheck");) 104 } 105 106 PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) { 107 _info = new CodeEmitInfo(info); 108 } 109 110 void PredicateFailedStub::emit_code(LIR_Assembler* ce) { 111 __ bind(_entry); 112 __ call(Runtime1::entry_for(Runtime1::predicate_failed_trap_id), relocInfo::runtime_call_type); 113 ce->add_call_info_here(_info); 114 ce->verify_oop_map(_info); 115 debug_only(__ should_not_reach_here()); 116 } 117 118 void DivByZeroStub::emit_code(LIR_Assembler* ce) { 119 if (_offset != -1) { 120 ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); 121 } 122 __ bind(_entry); 123 __ call(Runtime1::entry_for(Runtime1::throw_div0_exception_id), 124 relocInfo::runtime_call_type); 125 ce->add_call_info_here(_info); 126 DEBUG_ONLY(STOP("DivByZero");) 127 } 128 129 130 // Implementation of NewInstanceStub 131 132 NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) { 133 _result = result; 134 _klass = klass; 135 _klass_reg = klass_reg; 136 _info = new CodeEmitInfo(info); 137 assert(stub_id == Runtime1::new_instance_id || 138 stub_id == Runtime1::fast_new_instance_id || 139 stub_id == Runtime1::fast_new_instance_init_check_id, 140 "need new_instance id"); 141 _stub_id = stub_id; 142 } 143 144 145 void NewInstanceStub::emit_code(LIR_Assembler* ce) { 146 assert(_result->as_register() == R0, "runtime call setup"); 147 assert(_klass_reg->as_register() == R1, "runtime call setup"); 148 __ bind(_entry); 149 __ call(Runtime1::entry_for(_stub_id), relocInfo::runtime_call_type); 150 ce->add_call_info_here(_info); 151 ce->verify_oop_map(_info); 152 __ b(_continuation); 153 } 154 155 156 // Implementation of NewTypeArrayStub 157 158 NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) { 159 _klass_reg = klass_reg; 160 _length = length; 161 _result = result; 162 _info = new CodeEmitInfo(info); 163 } 164 165 166 void NewTypeArrayStub::emit_code(LIR_Assembler* ce) { 167 assert(_result->as_register() == R0, "runtime call setup"); 168 assert(_klass_reg->as_register() == R1, "runtime call setup"); 169 assert(_length->as_register() == R2, "runtime call setup"); 170 __ bind(_entry); 171 __ call(Runtime1::entry_for(Runtime1::new_type_array_id), relocInfo::runtime_call_type); 172 ce->add_call_info_here(_info); 173 ce->verify_oop_map(_info); 174 __ b(_continuation); 175 } 176 177 178 // Implementation of NewObjectArrayStub 179 180 NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) { 181 _klass_reg = klass_reg; 182 _result = result; 183 _length = length; 184 _info = new CodeEmitInfo(info); 185 } 186 187 188 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) { 189 assert(_result->as_register() == R0, "runtime call setup"); 190 assert(_klass_reg->as_register() == R1, "runtime call setup"); 191 assert(_length->as_register() == R2, "runtime call setup"); 192 __ bind(_entry); 193 __ call(Runtime1::entry_for(Runtime1::new_object_array_id), relocInfo::runtime_call_type); 194 ce->add_call_info_here(_info); 195 ce->verify_oop_map(_info); 196 __ b(_continuation); 197 } 198 199 200 // Implementation of MonitorAccessStubs 201 202 MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info) 203 : MonitorAccessStub(obj_reg, lock_reg) 204 { 205 _info = new CodeEmitInfo(info); 206 } 207 208 209 void MonitorEnterStub::emit_code(LIR_Assembler* ce) { 210 __ bind(_entry); 211 const Register obj_reg = _obj_reg->as_pointer_register(); 212 const Register lock_reg = _lock_reg->as_pointer_register(); 213 214 ce->verify_reserved_argument_area_size(2); 215 #ifdef AARCH64 216 __ stp(obj_reg, lock_reg, Address(SP)); 217 #else 218 if (obj_reg < lock_reg) { 219 __ stmia(SP, RegisterSet(obj_reg) | RegisterSet(lock_reg)); 220 } else { 221 __ str(obj_reg, Address(SP)); 222 __ str(lock_reg, Address(SP, BytesPerWord)); 223 } 224 #endif // AARCH64 225 226 Runtime1::StubID enter_id = ce->compilation()->has_fpu_code() ? 227 Runtime1::monitorenter_id : 228 Runtime1::monitorenter_nofpu_id; 229 __ call(Runtime1::entry_for(enter_id), relocInfo::runtime_call_type); 230 ce->add_call_info_here(_info); 231 ce->verify_oop_map(_info); 232 __ b(_continuation); 233 } 234 235 236 void MonitorExitStub::emit_code(LIR_Assembler* ce) { 237 __ bind(_entry); 238 if (_compute_lock) { 239 ce->monitor_address(_monitor_ix, _lock_reg); 240 } 241 const Register lock_reg = _lock_reg->as_pointer_register(); 242 243 ce->verify_reserved_argument_area_size(1); 244 __ str(lock_reg, Address(SP)); 245 246 // Non-blocking leaf routine - no call info needed 247 Runtime1::StubID exit_id = ce->compilation()->has_fpu_code() ? 248 Runtime1::monitorexit_id : 249 Runtime1::monitorexit_nofpu_id; 250 __ call(Runtime1::entry_for(exit_id), relocInfo::runtime_call_type); 251 __ b(_continuation); 252 } 253 254 255 // Call return is directly after patch word 256 int PatchingStub::_patch_info_offset = 0; 257 258 void PatchingStub::align_patch_site(MacroAssembler* masm) { 259 #if 0 260 // TODO: investigate if we required to implement this 261 ShouldNotReachHere(); 262 #endif 263 } 264 265 void PatchingStub::emit_code(LIR_Assembler* ce) { 266 const int patchable_instruction_offset = AARCH64_ONLY(NativeInstruction::instruction_size) NOT_AARCH64(0); 267 268 assert(NativeCall::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF, 269 "not enough room for call"); 270 assert((_bytes_to_copy & 3) == 0, "must copy a multiple of four bytes"); 271 Label call_patch; 272 bool is_load = (_id == load_klass_id) || (_id == load_mirror_id) || (_id == load_appendix_id); 273 274 #ifdef AARCH64 275 assert(nativeInstruction_at(_pc_start)->is_nop(), "required for MT safe patching"); 276 277 // Same alignment of reg2mem code and PatchingStub code. Required to make copied bind_literal() code properly aligned. 278 __ align(wordSize); 279 #endif // AARCH64 280 281 if (is_load NOT_AARCH64(&& !VM_Version::supports_movw())) { 282 address start = __ pc(); 283 284 // The following sequence duplicates code provided in MacroAssembler::patchable_mov_oop() 285 // without creating relocation info entry. 286 #ifdef AARCH64 287 // Extra nop for MT safe patching 288 __ nop(); 289 #endif // AARCH64 290 291 assert((__ pc() - start) == patchable_instruction_offset, "should be"); 292 #ifdef AARCH64 293 __ ldr(_obj, __ pc()); 294 #else 295 __ ldr(_obj, Address(PC)); 296 // Extra nop to handle case of large offset of oop placeholder (see NativeMovConstReg::set_data). 297 __ nop(); 298 #endif // AARCH64 299 300 #ifdef ASSERT 301 for (int i = 0; i < _bytes_to_copy; i++) { 302 assert(((address)_pc_start)[i] == start[i], "should be the same code"); 303 } 304 #endif // ASSERT 305 } 306 307 address being_initialized_entry = __ pc(); 308 if (CommentedAssembly) { 309 __ block_comment(" patch template"); 310 } 311 if (is_load) { 312 address start = __ pc(); 313 if (_id == load_mirror_id || _id == load_appendix_id) { 314 __ patchable_mov_oop(_obj, (jobject)Universe::non_oop_word(), _index); 315 } else { 316 __ patchable_mov_metadata(_obj, (Metadata*)Universe::non_oop_word(), _index); 317 } 318 #ifdef ASSERT 319 for (int i = 0; i < _bytes_to_copy; i++) { 320 assert(((address)_pc_start)[i] == start[i], "should be the same code"); 321 } 322 #endif // ASSERT 323 } else { 324 int* start = (int*)_pc_start; 325 int* end = start + (_bytes_to_copy / BytesPerInt); 326 while (start < end) { 327 __ emit_int32(*start++); 328 } 329 } 330 address end_of_patch = __ pc(); 331 332 int bytes_to_skip = 0; 333 if (_id == load_mirror_id) { 334 int offset = __ offset(); 335 if (CommentedAssembly) { 336 __ block_comment(" being_initialized check"); 337 } 338 339 assert(_obj != noreg, "must be a valid register"); 340 // Rtemp should be OK in C1 341 __ ldr(Rtemp, Address(_obj, java_lang_Class::klass_offset_in_bytes())); 342 __ ldr(Rtemp, Address(Rtemp, InstanceKlass::init_thread_offset())); 343 __ cmp(Rtemp, Rthread); 344 __ b(call_patch, ne); 345 __ b(_patch_site_continuation); 346 347 bytes_to_skip += __ offset() - offset; 348 } 349 350 if (CommentedAssembly) { 351 __ block_comment("patch data - 3 high bytes of the word"); 352 } 353 const int sizeof_patch_record = 4; 354 bytes_to_skip += sizeof_patch_record; 355 int being_initialized_entry_offset = __ pc() - being_initialized_entry + sizeof_patch_record; 356 __ emit_int32(0xff | being_initialized_entry_offset << 8 | bytes_to_skip << 16 | _bytes_to_copy << 24); 357 358 address patch_info_pc = __ pc(); 359 assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info"); 360 361 // runtime call will return here 362 Label call_return; 363 __ bind(call_return); 364 ce->add_call_info_here(_info); 365 assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change"); 366 __ b(_patch_site_entry); 367 368 address entry = __ pc(); 369 NativeGeneralJump::insert_unconditional((address)_pc_start, entry); 370 address target = NULL; 371 relocInfo::relocType reloc_type = relocInfo::none; 372 switch (_id) { 373 case access_field_id: target = Runtime1::entry_for(Runtime1::access_field_patching_id); break; 374 case load_klass_id: target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break; 375 case load_mirror_id: target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break; 376 case load_appendix_id: target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break; 377 default: ShouldNotReachHere(); 378 } 379 __ bind(call_patch); 380 381 if (CommentedAssembly) { 382 __ block_comment("patch entry point"); 383 } 384 385 // arrange for call to return just after patch word 386 __ adr(LR, call_return); 387 __ jump(target, relocInfo::runtime_call_type, Rtemp); 388 389 if (is_load) { 390 CodeSection* cs = __ code_section(); 391 address pc = (address)_pc_start; 392 RelocIterator iter(cs, pc, pc + 1); 393 relocInfo::change_reloc_info_for_address(&iter, pc, reloc_type, relocInfo::none); 394 } 395 } 396 397 void DeoptimizeStub::emit_code(LIR_Assembler* ce) { 398 __ bind(_entry); 399 __ mov_slow(Rtemp, _trap_request); 400 ce->verify_reserved_argument_area_size(1); 401 __ str(Rtemp, Address(SP)); 402 __ call(Runtime1::entry_for(Runtime1::deoptimize_id), relocInfo::runtime_call_type); 403 ce->add_call_info_here(_info); 404 DEBUG_ONLY(__ should_not_reach_here()); 405 } 406 407 408 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) { 409 address a; 410 if (_info->deoptimize_on_exception()) { 411 // Deoptimize, do not throw the exception, because it is 412 // probably wrong to do it here. 413 a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id); 414 } else { 415 a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id); 416 } 417 ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); 418 __ bind(_entry); 419 __ call(a, relocInfo::runtime_call_type); 420 ce->add_call_info_here(_info); 421 ce->verify_oop_map(_info); 422 DEBUG_ONLY(STOP("ImplicitNullCheck");) 423 } 424 425 426 void SimpleExceptionStub::emit_code(LIR_Assembler* ce) { 427 __ bind(_entry); 428 // Pass the object on stack because all registers must be preserved 429 if (_obj->is_cpu_register()) { 430 ce->verify_reserved_argument_area_size(1); 431 __ str(_obj->as_pointer_register(), Address(SP)); 432 } else { 433 assert(_obj->is_illegal(), "should be"); 434 } 435 __ call(Runtime1::entry_for(_stub), relocInfo::runtime_call_type); 436 ce->add_call_info_here(_info); 437 DEBUG_ONLY(STOP("SimpleException");) 438 } 439 440 441 void ArrayCopyStub::emit_code(LIR_Assembler* ce) { 442 __ bind(_entry); 443 444 VMRegPair args[5]; 445 BasicType signature[5] = { T_OBJECT, T_INT, T_OBJECT, T_INT, T_INT }; 446 SharedRuntime::java_calling_convention(signature, args, 5, true); 447 448 Register r[5]; 449 r[0] = src()->as_pointer_register(); 450 r[1] = src_pos()->as_register(); 451 r[2] = dst()->as_pointer_register(); 452 r[3] = dst_pos()->as_register(); 453 r[4] = length()->as_register(); 454 455 for (int i = 0; i < 5; i++) { 456 VMReg arg = args[i].first(); 457 if (arg->is_stack()) { 458 __ str(r[i], Address(SP, arg->reg2stack() * VMRegImpl::stack_slot_size)); 459 } else { 460 assert(r[i] == arg->as_Register(), "Calling conventions must match"); 461 } 462 } 463 464 ce->emit_static_call_stub(); 465 if (ce->compilation()->bailed_out()) { 466 return; // CodeCache is full 467 } 468 int ret_addr_offset = __ patchable_call(SharedRuntime::get_resolve_static_call_stub(), relocInfo::static_call_type); 469 assert(ret_addr_offset == __ offset(), "embedded return address not allowed"); 470 ce->add_call_info_here(info()); 471 ce->verify_oop_map(info()); 472 __ b(_continuation); 473 } 474 475 ///////////////////////////////////////////////////////////////////////////// 476 #if INCLUDE_ALL_GCS 477 478 void G1PreBarrierStub::emit_code(LIR_Assembler* ce) { 479 // At this point we know that marking is in progress. 480 // If do_load() is true then we have to emit the 481 // load of the previous value; otherwise it has already 482 // been loaded into _pre_val. 483 484 __ bind(_entry); 485 assert(pre_val()->is_register(), "Precondition."); 486 487 Register pre_val_reg = pre_val()->as_register(); 488 489 if (do_load()) { 490 ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/); 491 } 492 493 __ cbz(pre_val_reg, _continuation); 494 ce->verify_reserved_argument_area_size(1); 495 __ str(pre_val_reg, Address(SP)); 496 __ call(Runtime1::entry_for(Runtime1::g1_pre_barrier_slow_id), relocInfo::runtime_call_type); 497 498 __ b(_continuation); 499 } 500 501 void G1PostBarrierStub::emit_code(LIR_Assembler* ce) { 502 __ bind(_entry); 503 assert(addr()->is_register(), "Precondition."); 504 assert(new_val()->is_register(), "Precondition."); 505 Register new_val_reg = new_val()->as_register(); 506 __ cbz(new_val_reg, _continuation); 507 ce->verify_reserved_argument_area_size(1); 508 __ str(addr()->as_pointer_register(), Address(SP)); 509 __ call(Runtime1::entry_for(Runtime1::g1_post_barrier_slow_id), relocInfo::runtime_call_type); 510 __ b(_continuation); 511 } 512 513 #endif // INCLUDE_ALL_GCS 514 ///////////////////////////////////////////////////////////////////////////// 515 516 #undef __