1 /* 2 * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2014, Red Hat Inc. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 // This file is a derivative work resulting from (and including) modifications 26 // made by Azul Systems, Inc. The dates of such changes are 2013-2016. 27 // Copyright 2013-2016 Azul Systems, Inc. All Rights Reserved. 28 // 29 // Please contact Azul Systems, 385 Moffett Park Drive, Suite 115, Sunnyvale, 30 // CA 94089 USA or visit www.azul.com if you need additional information or 31 // have any questions. 32 33 #include "precompiled.hpp" 34 #include "c1/c1_CodeStubs.hpp" 35 #include "c1/c1_FrameMap.hpp" 36 #include "c1/c1_LIRAssembler.hpp" 37 #include "c1/c1_MacroAssembler.hpp" 38 #include "c1/c1_Runtime1.hpp" 39 #include "nativeInst_aarch32.hpp" 40 #include "runtime/sharedRuntime.hpp" 41 #include "vmreg_aarch32.inline.hpp" 42 #if INCLUDE_ALL_GCS 43 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" 44 #endif 45 46 #define __ ce->masm()-> 47 48 #define should_not_reach_here() should_not_reach_here_line(__FILE__, __LINE__) 49 50 void CounterOverflowStub::emit_code(LIR_Assembler* ce) { 51 __ bind(_entry); 52 ce->store_parameter(_method->as_register(), 1); 53 ce->store_parameter(_bci, 0); 54 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::counter_overflow_id))); 55 ce->add_call_info_here(_info); 56 ce->verify_oop_map(_info); 57 __ b(_continuation); 58 } 59 60 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, 61 bool throw_index_out_of_bounds_exception) 62 : _throw_index_out_of_bounds_exception(throw_index_out_of_bounds_exception) 63 , _index(index) 64 { 65 assert(info != NULL, "must have info"); 66 _info = new CodeEmitInfo(info); 67 } 68 69 void RangeCheckStub::emit_code(LIR_Assembler* ce) { 70 __ bind(_entry); 71 if (_info->deoptimize_on_exception()) { 72 address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id); 73 __ far_call(RuntimeAddress(a)); 74 ce->add_call_info_here(_info); 75 ce->verify_oop_map(_info); 76 debug_only(__ should_not_reach_here()); 77 return; 78 } 79 80 if (_index->is_cpu_register()) { 81 __ mov(rscratch1, _index->as_register()); 82 } else { 83 __ mov(rscratch1, _index->as_jint()); 84 } 85 Runtime1::StubID stub_id; 86 if (_throw_index_out_of_bounds_exception) { 87 stub_id = Runtime1::throw_index_exception_id; 88 } else { 89 stub_id = Runtime1::throw_range_check_failed_id; 90 } 91 __ far_call(RuntimeAddress(Runtime1::entry_for(stub_id)), NULL, rscratch2); 92 ce->add_call_info_here(_info); 93 ce->verify_oop_map(_info); 94 debug_only(__ should_not_reach_here()); 95 } 96 97 PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) { 98 _info = new CodeEmitInfo(info); 99 } 100 101 void PredicateFailedStub::emit_code(LIR_Assembler* ce) { 102 __ bind(_entry); 103 address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id); 104 __ far_call(RuntimeAddress(a)); 105 ce->add_call_info_here(_info); 106 ce->verify_oop_map(_info); 107 debug_only(__ should_not_reach_here()); 108 } 109 110 void DivByZeroStub::emit_code(LIR_Assembler* ce) { 111 if (_offset != -1) { 112 ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); 113 } 114 __ bind(_entry); 115 __ far_call(Address(Runtime1::entry_for(Runtime1::throw_div0_exception_id), relocInfo::runtime_call_type)); 116 ce->add_call_info_here(_info); 117 ce->verify_oop_map(_info); 118 #ifdef ASSERT 119 __ should_not_reach_here(); 120 #endif 121 } 122 123 124 125 // Implementation of NewInstanceStub 126 127 NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) { 128 _result = result; 129 _klass = klass; 130 _klass_reg = klass_reg; 131 _info = new CodeEmitInfo(info); 132 assert(stub_id == Runtime1::new_instance_id || 133 stub_id == Runtime1::fast_new_instance_id || 134 stub_id == Runtime1::fast_new_instance_init_check_id, 135 "need new_instance id"); 136 _stub_id = stub_id; 137 } 138 139 140 141 void NewInstanceStub::emit_code(LIR_Assembler* ce) { 142 assert(__ rsp_offset() == 0, "frame size should be fixed"); 143 __ bind(_entry); 144 __ mov(r3, _klass_reg->as_register()); 145 __ far_call(RuntimeAddress(Runtime1::entry_for(_stub_id))); 146 ce->add_call_info_here(_info); 147 ce->verify_oop_map(_info); 148 assert(_result->as_register() == r0, "result must in r0,"); 149 __ b(_continuation); 150 } 151 152 153 // Implementation of NewTypeArrayStub 154 155 // Implementation of NewTypeArrayStub 156 157 NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) { 158 _klass_reg = klass_reg; 159 _length = length; 160 _result = result; 161 _info = new CodeEmitInfo(info); 162 } 163 164 165 void NewTypeArrayStub::emit_code(LIR_Assembler* ce) { 166 assert(__ rsp_offset() == 0, "frame size should be fixed"); 167 __ bind(_entry); 168 assert(_length->as_register() == r6, "length must in r6,"); 169 assert(_klass_reg->as_register() == r3, "klass_reg must in r3"); 170 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_type_array_id))); 171 ce->add_call_info_here(_info); 172 ce->verify_oop_map(_info); 173 assert(_result->as_register() == r0, "result must in r0"); 174 __ b(_continuation); 175 } 176 177 178 // Implementation of NewObjectArrayStub 179 180 NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) { 181 _klass_reg = klass_reg; 182 _result = result; 183 _length = length; 184 _info = new CodeEmitInfo(info); 185 } 186 187 188 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) { 189 assert(__ rsp_offset() == 0, "frame size should be fixed"); 190 __ bind(_entry); 191 assert(_length->as_register() == r6, "length must in r6"); 192 assert(_klass_reg->as_register() == r3, "klass_reg must in r3"); 193 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_object_array_id))); 194 ce->add_call_info_here(_info); 195 ce->verify_oop_map(_info); 196 assert(_result->as_register() == r0, "result must in r0"); 197 __ b(_continuation); 198 } 199 // Implementation of MonitorAccessStubs 200 201 MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info) 202 : MonitorAccessStub(obj_reg, lock_reg) 203 { 204 _info = new CodeEmitInfo(info); 205 } 206 207 208 void MonitorEnterStub::emit_code(LIR_Assembler* ce) { 209 assert(__ rsp_offset() == 0, "frame size should be fixed"); 210 __ bind(_entry); 211 ce->store_parameter(_obj_reg->as_register(), 1); 212 ce->store_parameter(_lock_reg->as_register(), 0); 213 Runtime1::StubID enter_id; 214 if (ce->compilation()->has_fpu_code()) { 215 enter_id = Runtime1::monitorenter_id; 216 } else { 217 enter_id = Runtime1::monitorenter_nofpu_id; 218 } 219 __ far_call(RuntimeAddress(Runtime1::entry_for(enter_id))); 220 ce->add_call_info_here(_info); 221 ce->verify_oop_map(_info); 222 __ b(_continuation); 223 } 224 225 226 void MonitorExitStub::emit_code(LIR_Assembler* ce) { 227 __ bind(_entry); 228 if (_compute_lock) { 229 // lock_reg was destroyed by fast unlocking attempt => recompute it 230 ce->monitor_address(_monitor_ix, _lock_reg); 231 } 232 ce->store_parameter(_lock_reg->as_register(), 0); 233 // note: non-blocking leaf routine => no call info needed 234 Runtime1::StubID exit_id; 235 if (ce->compilation()->has_fpu_code()) { 236 exit_id = Runtime1::monitorexit_id; 237 } else { 238 exit_id = Runtime1::monitorexit_nofpu_id; 239 } 240 __ adr(lr, _continuation); 241 __ far_jump(RuntimeAddress(Runtime1::entry_for(exit_id))); 242 } 243 244 245 // Implementation of patching: 246 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes) 247 // - Replace original code with a call to the stub 248 // At Runtime: 249 // - call to stub, jump to runtime 250 // - in runtime: preserve all registers (rspecially objects, i.e., source and destination object) 251 // - in runtime: after initializing class, restore original code, reexecute instruction 252 253 int PatchingStub::_patch_info_offset = 0; 254 255 void PatchingStub::align_patch_site(MacroAssembler* masm) { 256 } 257 258 void PatchingStub::emit_code(LIR_Assembler* ce) { 259 // NativeCall::instruction_size is dynamically calculated based on CPU, 260 // armv7 -> 3 instructions, armv6 -> 5 instructions. Initialize _patch_info_offset 261 // here, when CPU is determined already. 262 if (!_patch_info_offset) 263 _patch_info_offset = -NativeCall::instruction_size; 264 assert(_patch_info_offset == -NativeCall::instruction_size, "must not change"); 265 assert(NativeCall::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF, "not enough room for call"); 266 267 Label call_patch; 268 269 // static field accesses have special semantics while the class 270 // initializer is being run so we emit a test which can be used to 271 // check that this code is being executed by the initializing 272 // thread. 273 address being_initialized_entry = __ pc(); 274 if (CommentedAssembly) { 275 __ block_comment(" patch template"); 276 } 277 if (_id == load_klass_id) { 278 // produce a copy of the load klass instruction for use by the being initialized case 279 #ifdef ASSERT 280 address start = __ pc(); 281 #endif 282 Metadata* o = NULL; 283 __ mov_metadata(_obj, o); 284 __ nop(); // added to call site by LIR_Assembler::patching_epilog 285 #ifdef ASSERT 286 for (int i = 0; i < _bytes_to_copy; i++) { 287 address ptr = (address)(_pc_start + i); 288 int a_byte = (*ptr) & 0xFF; 289 assert(a_byte == *start++, "should be the same code"); 290 } 291 #endif 292 } else if (_id == load_mirror_id || _id == load_appendix_id) { 293 // produce a copy of the load mirror instruction for use by the being 294 // initialized case 295 #ifdef ASSERT 296 address start = __ pc(); 297 #endif 298 jobject o = NULL; 299 __ movoop(_obj, o, true); 300 __ nop(); // added to call site by LIR_Assembler::patching_epilog 301 #ifdef ASSERT 302 for (int i = 0; i < _bytes_to_copy; i++) { 303 address ptr = (address)(_pc_start + i); 304 int a_byte = (*ptr) & 0xFF; 305 assert(a_byte == *start++, "should be the same code"); 306 } 307 #endif 308 } else { 309 // make a copy the code which is going to be patched. 310 assert(_bytes_to_copy % BytesPerWord == 0, "all instructions are 4byte"); 311 assert(((unsigned long) _pc_start) % BytesPerWord == 0, "patch offset should be aligned"); 312 const int words_to_copy = _bytes_to_copy / BytesPerWord; 313 for (int i = 0; i < words_to_copy; i++) { 314 int *ptr = ((int *) _pc_start) + i; 315 __ emit_int32(*ptr); 316 *ptr = 0xe320f000; // make the site look like a nop 317 } 318 } 319 320 int bytes_to_skip = _bytes_to_copy; 321 322 // this switch will be patched by NativeGeneralJump::replace_mt_safe, 323 // it inteded to distinguish enters from by being_initialized_entry and 324 // from call site 325 int switch_offset = __ offset(); 326 Label patching_switch; 327 __ b(patching_switch); 328 __ bind(patching_switch); 329 bytes_to_skip += __ offset() - switch_offset; 330 331 if (_id == load_mirror_id) { 332 int offset = __ offset(); 333 if (CommentedAssembly) { 334 __ block_comment(" being_initialized check"); 335 } 336 assert(_obj != noreg, "must be a valid register"); 337 // Load without verification to keep code size small. We need it because 338 // begin_initialized_entry_offset has to fit in a byte. Also, we know it's not null. 339 __ ldr(rscratch1, Address(_obj, java_lang_Class::klass_offset_in_bytes())); 340 __ ldr(rscratch1, Address(rscratch1, InstanceKlass::init_thread_offset())); 341 __ cmp(rthread, rscratch1); 342 __ b(call_patch, Assembler::NE); 343 344 // access_field patches may execute the patched code before it's 345 // copied back into place so we need to jump back into the main 346 // code of the nmethod to continue execution. 347 __ b(_patch_site_continuation); 348 // make sure this extra code gets skipped 349 bytes_to_skip += __ offset() - offset; 350 } 351 352 // Now emit the patch record telling the runtime how to find the 353 // pieces of the patch. We only need 3 bytes but it has to be 354 // aligned as an instruction so emit 4 bytes. 355 int sizeof_patch_record = 4; 356 bytes_to_skip += sizeof_patch_record; 357 358 // emit the offsets needed to find the code to patch 359 int being_initialized_entry_offset = __ pc() - being_initialized_entry + sizeof_patch_record; 360 361 __ emit_int8(0); 362 __ emit_int8(being_initialized_entry_offset); 363 __ emit_int8(bytes_to_skip); 364 __ emit_int8(0); 365 366 address patch_info_pc = __ pc(); 367 368 address entry = __ pc(); 369 NativeGeneralJump::insert_unconditional((address)_pc_start, entry); 370 address target = NULL; 371 relocInfo::relocType reloc_type = relocInfo::none; 372 switch (_id) { 373 case access_field_id: target = Runtime1::entry_for(Runtime1::access_field_patching_id); break; 374 case load_klass_id: target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break; 375 case load_mirror_id: target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break; 376 case load_appendix_id: target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break; 377 default: ShouldNotReachHere(); 378 } 379 __ bind(call_patch); 380 381 if (CommentedAssembly) { 382 __ block_comment("patch entry point"); 383 } 384 __ mov(rscratch1, RuntimeAddress(target)); 385 __ bl(rscratch1); 386 // pad with nops to globally known upper bound of patch site size 387 while (patch_info_pc - __ pc() < _patch_info_offset) 388 __ nop(); 389 assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change, required by shared code"); 390 ce->add_call_info_here(_info); 391 int jmp_off = __ offset(); 392 __ b(_patch_site_entry); 393 // Add enough nops so deoptimization can overwrite the jmp above with a call 394 // and not destroy the world. 395 for (int j = __ offset() ; j < jmp_off + NativeCall::instruction_size; j += NativeInstruction::arm_insn_sz) { 396 __ nop(); 397 } 398 399 if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) { 400 CodeSection* cs = __ code_section(); 401 RelocIterator iter(cs, (address)_pc_start, (address)(_pc_start + 1)); 402 relocInfo::change_reloc_info_for_address(&iter, (address) _pc_start, reloc_type, relocInfo::none); 403 } 404 } 405 406 407 void DeoptimizeStub::emit_code(LIR_Assembler* ce) { 408 __ bind(_entry); 409 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::deoptimize_id))); 410 ce->add_call_info_here(_info); 411 DEBUG_ONLY(__ should_not_reach_here()); 412 } 413 414 415 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) { 416 address a; 417 if (_info->deoptimize_on_exception()) { 418 // Deoptimize, do not throw the exception, because it is probably wrong to do it here. 419 a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id); 420 } else { 421 a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id); 422 } 423 424 ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); 425 __ bind(_entry); 426 __ far_call(RuntimeAddress(a)); 427 ce->add_call_info_here(_info); 428 ce->verify_oop_map(_info); 429 debug_only(__ should_not_reach_here()); 430 } 431 432 433 void SimpleExceptionStub::emit_code(LIR_Assembler* ce) { 434 assert(__ rsp_offset() == 0, "frame size should be fixed"); 435 436 __ bind(_entry); 437 // pass the object in a scratch register because all other registers 438 // must be preserved 439 if (_obj->is_cpu_register()) { 440 __ mov(rscratch1, _obj->as_register()); 441 } 442 __ far_call(RuntimeAddress(Runtime1::entry_for(_stub)), NULL, rscratch2); 443 ce->add_call_info_here(_info); 444 debug_only(__ should_not_reach_here()); 445 } 446 447 448 void ArrayCopyStub::emit_code(LIR_Assembler* ce) { 449 //---------------slow case: call to native----------------- 450 __ bind(_entry); 451 // Figure out where the args should go 452 // This should really convert the IntrinsicID to the Method* and signature 453 // but I don't know how to do that. 454 // 455 VMRegPair args[5]; 456 BasicType signature[5] = { T_OBJECT, T_INT, T_OBJECT, T_INT, T_INT}; 457 SharedRuntime::java_calling_convention(signature, args, 5, true); 458 459 // push parameters 460 // (src, src_pos, dest, destPos, length) 461 Register r[5]; 462 r[0] = src()->as_register(); 463 r[1] = src_pos()->as_register(); 464 r[2] = dst()->as_register(); 465 r[3] = dst_pos()->as_register(); 466 r[4] = length()->as_register(); 467 468 // next registers will get stored on the stack 469 for (int i = 0; i < 5 ; i++ ) { 470 VMReg r_1 = args[i].first(); 471 if (r_1->is_stack()) { 472 int st_off = r_1->reg2stack() * wordSize; 473 __ str (r[i], Address(sp, st_off)); 474 } else { 475 assert(r[i] == args[i].first()->as_Register(), "Wrong register for arg "); 476 } 477 } 478 479 ce->align_call(lir_static_call); 480 481 ce->emit_static_call_stub(); 482 Address resolve(SharedRuntime::get_resolve_static_call_stub(), 483 relocInfo::static_call_type); 484 __ trampoline_call(resolve); 485 ce->add_call_info_here(info()); 486 487 #ifndef PRODUCT 488 __ lea(rscratch2, ExternalAddress((address)&Runtime1::_arraycopy_slowcase_cnt)); 489 __ increment(Address(rscratch2)); 490 #endif 491 492 __ b(_continuation); 493 } 494 495 496 ///////////////////////////////////////////////////////////////////////////// 497 #if INCLUDE_ALL_GCS 498 499 void G1PreBarrierStub::emit_code(LIR_Assembler* ce) { 500 // At this point we know that marking is in progress. 501 // If do_load() is true then we have to emit the 502 // load of the previous value; otherwise it has already 503 // been loaded into _pre_val. 504 505 __ bind(_entry); 506 assert(pre_val()->is_register(), "Precondition."); 507 508 Register pre_val_reg = pre_val()->as_register(); 509 510 if (do_load()) { 511 ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/); 512 } 513 __ cbz(pre_val_reg, _continuation); 514 ce->store_parameter(pre_val()->as_register(), 0); 515 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::g1_pre_barrier_slow_id))); 516 __ b(_continuation); 517 } 518 519 void G1PostBarrierStub::emit_code(LIR_Assembler* ce) { 520 __ bind(_entry); 521 assert(addr()->is_register(), "Precondition."); 522 assert(new_val()->is_register(), "Precondition."); 523 Register new_val_reg = new_val()->as_register(); 524 __ cbz(new_val_reg, _continuation); 525 ce->store_parameter(addr()->as_pointer_register(), 0); 526 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::g1_post_barrier_slow_id))); 527 __ b(_continuation); 528 } 529 530 #endif // INCLUDE_ALL_GCS 531 ///////////////////////////////////////////////////////////////////////////// 532 533 #undef __