1 /* 2 * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "c1/c1_Defs.hpp" 28 #include "c1/c1_LIRAssembler.hpp" 29 #include "c1/c1_MacroAssembler.hpp" 30 #include "c1/c1_Runtime1.hpp" 31 #include "ci/ciUtilities.hpp" 32 #include "gc/shared/cardTable.hpp" 33 #include "gc/shared/cardTableBarrierSet.hpp" 34 #include "interpreter/interpreter.hpp" 35 #include "nativeInst_arm.hpp" 36 #include "oops/compiledICHolder.hpp" 37 #include "oops/oop.inline.hpp" 38 #include "prims/jvmtiExport.hpp" 39 #include "register_arm.hpp" 40 #include "runtime/sharedRuntime.hpp" 41 #include "runtime/signature.hpp" 42 #include "runtime/vframeArray.hpp" 43 #include "utilities/align.hpp" 44 #include "vmreg_arm.inline.hpp" 45 #if INCLUDE_ALL_GCS 46 #include "gc/g1/g1BarrierSet.hpp" 47 #include "gc/g1/g1CardTable.hpp" 48 #include "gc/g1/g1ThreadLocalData.hpp" 49 #endif 50 51 // Note: Rtemp usage is this file should not impact C2 and should be 52 // correct as long as it is not implicitly used in lower layers (the 53 // arm [macro]assembler) and used with care in the other C1 specific 54 // files. 55 56 // Implementation of StubAssembler 57 58 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, int args_size) { 59 mov(R0, Rthread); 60 61 int call_offset = set_last_Java_frame(SP, FP, false, Rtemp); 62 63 call(entry); 64 if (call_offset == -1) { // PC not saved 65 call_offset = offset(); 66 } 67 reset_last_Java_frame(Rtemp); 68 69 assert(frame_size() != no_frame_size, "frame must be fixed"); 70 if (_stub_id != Runtime1::forward_exception_id) { 71 ldr(R3, Address(Rthread, Thread::pending_exception_offset())); 72 } 73 74 if (oop_result1->is_valid()) { 75 assert_different_registers(oop_result1, R3, Rtemp); 76 get_vm_result(oop_result1, Rtemp); 77 } 78 if (metadata_result->is_valid()) { 79 assert_different_registers(metadata_result, R3, Rtemp); 80 get_vm_result_2(metadata_result, Rtemp); 81 } 82 83 // Check for pending exception 84 // unpack_with_exception_in_tls path is taken through 85 // Runtime1::exception_handler_for_pc 86 if (_stub_id != Runtime1::forward_exception_id) { 87 assert(frame_size() != no_frame_size, "cannot directly call forward_exception_id"); 88 #ifdef AARCH64 89 Label skip; 90 cbz(R3, skip); 91 jump(Runtime1::entry_for(Runtime1::forward_exception_id), relocInfo::runtime_call_type, Rtemp); 92 bind(skip); 93 #else 94 cmp(R3, 0); 95 jump(Runtime1::entry_for(Runtime1::forward_exception_id), relocInfo::runtime_call_type, Rtemp, ne); 96 #endif // AARCH64 97 } else { 98 #ifdef ASSERT 99 // Should not have pending exception in forward_exception stub 100 ldr(R3, Address(Rthread, Thread::pending_exception_offset())); 101 cmp(R3, 0); 102 breakpoint(ne); 103 #endif // ASSERT 104 } 105 return call_offset; 106 } 107 108 109 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1) { 110 if (arg1 != R1) { 111 mov(R1, arg1); 112 } 113 return call_RT(oop_result1, metadata_result, entry, 1); 114 } 115 116 117 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2) { 118 assert(arg1 == R1 && arg2 == R2, "cannot handle otherwise"); 119 return call_RT(oop_result1, metadata_result, entry, 2); 120 } 121 122 123 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2, Register arg3) { 124 assert(arg1 == R1 && arg2 == R2 && arg3 == R3, "cannot handle otherwise"); 125 return call_RT(oop_result1, metadata_result, entry, 3); 126 } 127 128 129 #define __ sasm-> 130 131 // TODO: ARM - does this duplicate RegisterSaver in SharedRuntime? 132 #ifdef AARCH64 133 134 // 135 // On AArch64 registers save area has the following layout: 136 // 137 // |---------------------| 138 // | return address (LR) | 139 // | FP | 140 // |---------------------| 141 // | D31 | 142 // | ... | 143 // | D0 | 144 // |---------------------| 145 // | padding | 146 // |---------------------| 147 // | R28 | 148 // | ... | 149 // | R0 | 150 // |---------------------| <-- SP 151 // 152 153 enum RegisterLayout { 154 number_of_saved_gprs = 29, 155 number_of_saved_fprs = FloatRegisterImpl::number_of_registers, 156 157 R0_offset = 0, 158 D0_offset = R0_offset + number_of_saved_gprs + 1, 159 FP_offset = D0_offset + number_of_saved_fprs, 160 LR_offset = FP_offset + 1, 161 162 reg_save_size = LR_offset + 1, 163 164 arg1_offset = reg_save_size * wordSize, 165 arg2_offset = (reg_save_size + 1) * wordSize 166 }; 167 168 #else 169 170 enum RegisterLayout { 171 fpu_save_size = pd_nof_fpu_regs_reg_alloc, 172 #ifndef __SOFTFP__ 173 D0_offset = 0, 174 #endif 175 R0_offset = fpu_save_size, 176 R1_offset, 177 R2_offset, 178 R3_offset, 179 R4_offset, 180 R5_offset, 181 R6_offset, 182 #if (FP_REG_NUM != 7) 183 R7_offset, 184 #endif 185 R8_offset, 186 R9_offset, 187 R10_offset, 188 #if (FP_REG_NUM != 11) 189 R11_offset, 190 #endif 191 R12_offset, 192 FP_offset, 193 LR_offset, 194 reg_save_size, 195 arg1_offset = reg_save_size * wordSize, 196 arg2_offset = (reg_save_size + 1) * wordSize 197 }; 198 199 #endif // AARCH64 200 201 static OopMap* generate_oop_map(StubAssembler* sasm, bool save_fpu_registers = HaveVFP) { 202 sasm->set_frame_size(reg_save_size /* in words */); 203 204 // Record saved value locations in an OopMap. 205 // Locations are offsets from sp after runtime call. 206 OopMap* map = new OopMap(VMRegImpl::slots_per_word * reg_save_size, 0); 207 208 #ifdef AARCH64 209 for (int i = 0; i < number_of_saved_gprs; i++) { 210 map->set_callee_saved(VMRegImpl::stack2reg((R0_offset + i) * VMRegImpl::slots_per_word), as_Register(i)->as_VMReg()); 211 } 212 map->set_callee_saved(VMRegImpl::stack2reg(FP_offset * VMRegImpl::slots_per_word), FP->as_VMReg()); 213 map->set_callee_saved(VMRegImpl::stack2reg(LR_offset * VMRegImpl::slots_per_word), LR->as_VMReg()); 214 215 if (save_fpu_registers) { 216 for (int i = 0; i < number_of_saved_fprs; i++) { 217 map->set_callee_saved(VMRegImpl::stack2reg((D0_offset + i) * VMRegImpl::slots_per_word), as_FloatRegister(i)->as_VMReg()); 218 } 219 } 220 #else 221 int j=0; 222 for (int i = R0_offset; i < R10_offset; i++) { 223 if (j == FP_REG_NUM) { 224 // skip the FP register, saved below 225 j++; 226 } 227 map->set_callee_saved(VMRegImpl::stack2reg(i), as_Register(j)->as_VMReg()); 228 j++; 229 } 230 assert(j == R10->encoding(), "must be"); 231 #if (FP_REG_NUM != 11) 232 // add R11, if not saved as FP 233 map->set_callee_saved(VMRegImpl::stack2reg(R11_offset), R11->as_VMReg()); 234 #endif 235 map->set_callee_saved(VMRegImpl::stack2reg(FP_offset), FP->as_VMReg()); 236 map->set_callee_saved(VMRegImpl::stack2reg(LR_offset), LR->as_VMReg()); 237 238 if (save_fpu_registers) { 239 for (int i = 0; i < fpu_save_size; i++) { 240 map->set_callee_saved(VMRegImpl::stack2reg(i), as_FloatRegister(i)->as_VMReg()); 241 } 242 } 243 #endif // AARCH64 244 245 return map; 246 } 247 248 static OopMap* save_live_registers(StubAssembler* sasm, bool save_fpu_registers = HaveVFP) { 249 __ block_comment("save_live_registers"); 250 sasm->set_frame_size(reg_save_size /* in words */); 251 252 #ifdef AARCH64 253 assert((reg_save_size * wordSize) % StackAlignmentInBytes == 0, "SP should be aligned"); 254 255 __ raw_push(FP, LR); 256 257 __ sub(SP, SP, (reg_save_size - 2) * wordSize); 258 259 for (int i = 0; i < align_down((int)number_of_saved_gprs, 2); i += 2) { 260 __ stp(as_Register(i), as_Register(i+1), Address(SP, (R0_offset + i) * wordSize)); 261 } 262 263 if (is_odd(number_of_saved_gprs)) { 264 int i = number_of_saved_gprs - 1; 265 __ str(as_Register(i), Address(SP, (R0_offset + i) * wordSize)); 266 } 267 268 if (save_fpu_registers) { 269 assert (is_even(number_of_saved_fprs), "adjust this code"); 270 for (int i = 0; i < number_of_saved_fprs; i += 2) { 271 __ stp_d(as_FloatRegister(i), as_FloatRegister(i+1), Address(SP, (D0_offset + i) * wordSize)); 272 } 273 } 274 #else 275 __ push(RegisterSet(FP) | RegisterSet(LR)); 276 __ push(RegisterSet(R0, R6) | RegisterSet(R8, R10) | R12 | altFP_7_11); 277 if (save_fpu_registers) { 278 __ fstmdbd(SP, FloatRegisterSet(D0, fpu_save_size / 2), writeback); 279 } else { 280 __ sub(SP, SP, fpu_save_size * wordSize); 281 } 282 #endif // AARCH64 283 284 return generate_oop_map(sasm, save_fpu_registers); 285 } 286 287 288 static void restore_live_registers(StubAssembler* sasm, 289 bool restore_R0, 290 bool restore_FP_LR, 291 bool do_return, 292 bool restore_fpu_registers = HaveVFP) { 293 __ block_comment("restore_live_registers"); 294 295 #ifdef AARCH64 296 if (restore_R0) { 297 __ ldr(R0, Address(SP, R0_offset * wordSize)); 298 } 299 300 assert(is_odd(number_of_saved_gprs), "adjust this code"); 301 for (int i = 1; i < number_of_saved_gprs; i += 2) { 302 __ ldp(as_Register(i), as_Register(i+1), Address(SP, (R0_offset + i) * wordSize)); 303 } 304 305 if (restore_fpu_registers) { 306 assert (is_even(number_of_saved_fprs), "adjust this code"); 307 for (int i = 0; i < number_of_saved_fprs; i += 2) { 308 __ ldp_d(as_FloatRegister(i), as_FloatRegister(i+1), Address(SP, (D0_offset + i) * wordSize)); 309 } 310 } 311 312 __ add(SP, SP, (reg_save_size - 2) * wordSize); 313 314 if (restore_FP_LR) { 315 __ raw_pop(FP, LR); 316 if (do_return) { 317 __ ret(); 318 } 319 } else { 320 assert (!do_return, "return without restoring FP/LR"); 321 } 322 #else 323 if (restore_fpu_registers) { 324 __ fldmiad(SP, FloatRegisterSet(D0, fpu_save_size / 2), writeback); 325 if (!restore_R0) { 326 __ add(SP, SP, (R1_offset - fpu_save_size) * wordSize); 327 } 328 } else { 329 __ add(SP, SP, (restore_R0 ? fpu_save_size : R1_offset) * wordSize); 330 } 331 __ pop(RegisterSet((restore_R0 ? R0 : R1), R6) | RegisterSet(R8, R10) | R12 | altFP_7_11); 332 if (restore_FP_LR) { 333 __ pop(RegisterSet(FP) | RegisterSet(do_return ? PC : LR)); 334 } else { 335 assert (!do_return, "return without restoring FP/LR"); 336 } 337 #endif // AARCH64 338 } 339 340 341 static void restore_live_registers_except_R0(StubAssembler* sasm, bool restore_fpu_registers = HaveVFP) { 342 restore_live_registers(sasm, false, true, true, restore_fpu_registers); 343 } 344 345 static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = HaveVFP) { 346 restore_live_registers(sasm, true, true, true, restore_fpu_registers); 347 } 348 349 #ifndef AARCH64 350 static void restore_live_registers_except_FP_LR(StubAssembler* sasm, bool restore_fpu_registers = HaveVFP) { 351 restore_live_registers(sasm, true, false, false, restore_fpu_registers); 352 } 353 #endif // !AARCH64 354 355 static void restore_live_registers_without_return(StubAssembler* sasm, bool restore_fpu_registers = HaveVFP) { 356 restore_live_registers(sasm, true, true, false, restore_fpu_registers); 357 } 358 359 360 void Runtime1::initialize_pd() { 361 } 362 363 364 OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) { 365 OopMap* oop_map = save_live_registers(sasm); 366 367 if (has_argument) { 368 __ ldr(R1, Address(SP, arg1_offset)); 369 } 370 371 int call_offset = __ call_RT(noreg, noreg, target); 372 OopMapSet* oop_maps = new OopMapSet(); 373 oop_maps->add_gc_map(call_offset, oop_map); 374 375 DEBUG_ONLY(STOP("generate_exception_throw");) // Should not reach here 376 return oop_maps; 377 } 378 379 380 static void restore_sp_for_method_handle(StubAssembler* sasm) { 381 // Restore SP from its saved reg (FP) if the exception PC is a MethodHandle call site. 382 __ ldr_s32(Rtemp, Address(Rthread, JavaThread::is_method_handle_return_offset())); 383 #ifdef AARCH64 384 Label skip; 385 __ cbz(Rtemp, skip); 386 __ mov(SP, Rmh_SP_save); 387 __ bind(skip); 388 #else 389 __ cmp(Rtemp, 0); 390 __ mov(SP, Rmh_SP_save, ne); 391 #endif // AARCH64 392 } 393 394 395 OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler* sasm) { 396 __ block_comment("generate_handle_exception"); 397 398 bool save_fpu_registers = false; 399 400 // Save registers, if required. 401 OopMapSet* oop_maps = new OopMapSet(); 402 OopMap* oop_map = NULL; 403 404 switch (id) { 405 case forward_exception_id: { 406 save_fpu_registers = HaveVFP; 407 oop_map = generate_oop_map(sasm); 408 __ ldr(Rexception_obj, Address(Rthread, Thread::pending_exception_offset())); 409 __ ldr(Rexception_pc, Address(SP, LR_offset * wordSize)); 410 Register zero = __ zero_register(Rtemp); 411 __ str(zero, Address(Rthread, Thread::pending_exception_offset())); 412 break; 413 } 414 case handle_exception_id: 415 save_fpu_registers = HaveVFP; 416 // fall-through 417 case handle_exception_nofpu_id: 418 // At this point all registers MAY be live. 419 oop_map = save_live_registers(sasm, save_fpu_registers); 420 break; 421 case handle_exception_from_callee_id: 422 // At this point all registers except exception oop (R4/R19) and 423 // exception pc (R5/R20) are dead. 424 oop_map = save_live_registers(sasm); // TODO it's not required to save all registers 425 break; 426 default: ShouldNotReachHere(); 427 } 428 429 __ str(Rexception_obj, Address(Rthread, JavaThread::exception_oop_offset())); 430 __ str(Rexception_pc, Address(Rthread, JavaThread::exception_pc_offset())); 431 432 __ str(Rexception_pc, Address(SP, LR_offset * wordSize)); // patch throwing pc into return address 433 434 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc)); 435 oop_maps->add_gc_map(call_offset, oop_map); 436 437 // Exception handler found 438 __ str(R0, Address(SP, LR_offset * wordSize)); // patch the return address 439 440 // Restore the registers that were saved at the beginning, remove 441 // frame and jump to the exception handler. 442 switch (id) { 443 case forward_exception_id: 444 case handle_exception_nofpu_id: 445 case handle_exception_id: 446 restore_live_registers(sasm, save_fpu_registers); 447 // Note: the restore live registers includes the jump to LR (patched to R0) 448 break; 449 case handle_exception_from_callee_id: 450 restore_live_registers_without_return(sasm); // must not jump immediatly to handler 451 restore_sp_for_method_handle(sasm); 452 __ ret(); 453 break; 454 default: ShouldNotReachHere(); 455 } 456 457 DEBUG_ONLY(STOP("generate_handle_exception");) // Should not reach here 458 459 return oop_maps; 460 } 461 462 463 void Runtime1::generate_unwind_exception(StubAssembler* sasm) { 464 // FP no longer used to find the frame start 465 // on entry, remove_frame() has already been called (restoring FP and LR) 466 467 // search the exception handler address of the caller (using the return address) 468 __ mov(c_rarg0, Rthread); 469 __ mov(Rexception_pc, LR); 470 __ mov(c_rarg1, LR); 471 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), c_rarg0, c_rarg1); 472 473 // Exception oop should be still in Rexception_obj and pc in Rexception_pc 474 // Jump to handler 475 __ verify_not_null_oop(Rexception_obj); 476 477 // JSR292 extension 478 restore_sp_for_method_handle(sasm); 479 480 __ jump(R0); 481 } 482 483 484 OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) { 485 OopMap* oop_map = save_live_registers(sasm); 486 487 // call the runtime patching routine, returns non-zero if nmethod got deopted. 488 int call_offset = __ call_RT(noreg, noreg, target); 489 OopMapSet* oop_maps = new OopMapSet(); 490 oop_maps->add_gc_map(call_offset, oop_map); 491 492 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); 493 assert(deopt_blob != NULL, "deoptimization blob must have been created"); 494 495 __ cmp_32(R0, 0); 496 497 #ifdef AARCH64 498 Label call_deopt; 499 500 restore_live_registers_without_return(sasm); 501 __ b(call_deopt, ne); 502 __ ret(); 503 504 __ bind(call_deopt); 505 #else 506 restore_live_registers_except_FP_LR(sasm); 507 __ pop(RegisterSet(FP) | RegisterSet(PC), eq); 508 509 // Deoptimization needed 510 // TODO: ARM - no need to restore FP & LR because unpack_with_reexecution() stores them back 511 __ pop(RegisterSet(FP) | RegisterSet(LR)); 512 #endif // AARCH64 513 514 __ jump(deopt_blob->unpack_with_reexecution(), relocInfo::runtime_call_type, Rtemp); 515 516 DEBUG_ONLY(STOP("generate_patching");) // Should not reach here 517 return oop_maps; 518 } 519 520 521 OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { 522 const bool must_gc_arguments = true; 523 const bool dont_gc_arguments = false; 524 525 OopMapSet* oop_maps = NULL; 526 bool save_fpu_registers = HaveVFP; 527 528 switch (id) { 529 case forward_exception_id: 530 { 531 oop_maps = generate_handle_exception(id, sasm); 532 // does not return on ARM 533 } 534 break; 535 536 #if INCLUDE_ALL_GCS 537 case g1_pre_barrier_slow_id: 538 { 539 // Input: 540 // - pre_val pushed on the stack 541 542 __ set_info("g1_pre_barrier_slow_id", dont_gc_arguments); 543 544 BarrierSet* bs = BarrierSet::barrier_set(); 545 if (bs->kind() != BarrierSet::G1BarrierSet) { 546 __ mov(R0, (int)id); 547 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), R0); 548 __ should_not_reach_here(); 549 break; 550 } 551 552 // save at least the registers that need saving if the runtime is called 553 #ifdef AARCH64 554 __ raw_push(R0, R1); 555 __ raw_push(R2, R3); 556 const int nb_saved_regs = 4; 557 #else // AARCH64 558 const RegisterSet saved_regs = RegisterSet(R0,R3) | RegisterSet(R12) | RegisterSet(LR); 559 const int nb_saved_regs = 6; 560 assert(nb_saved_regs == saved_regs.size(), "fix nb_saved_regs"); 561 __ push(saved_regs); 562 #endif // AARCH64 563 564 const Register r_pre_val_0 = R0; // must be R0, to be ready for the runtime call 565 const Register r_index_1 = R1; 566 const Register r_buffer_2 = R2; 567 568 Address queue_active(Rthread, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset())); 569 Address queue_index(Rthread, in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset())); 570 Address buffer(Rthread, in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset())); 571 572 Label done; 573 Label runtime; 574 575 // Is marking still active? 576 assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption"); 577 __ ldrb(R1, queue_active); 578 __ cbz(R1, done); 579 580 __ ldr(r_index_1, queue_index); 581 __ ldr(r_pre_val_0, Address(SP, nb_saved_regs*wordSize)); 582 __ ldr(r_buffer_2, buffer); 583 584 __ subs(r_index_1, r_index_1, wordSize); 585 __ b(runtime, lt); 586 587 __ str(r_index_1, queue_index); 588 __ str(r_pre_val_0, Address(r_buffer_2, r_index_1)); 589 590 __ bind(done); 591 592 #ifdef AARCH64 593 __ raw_pop(R2, R3); 594 __ raw_pop(R0, R1); 595 #else // AARCH64 596 __ pop(saved_regs); 597 #endif // AARCH64 598 599 __ ret(); 600 601 __ bind(runtime); 602 603 save_live_registers(sasm); 604 605 assert(r_pre_val_0 == c_rarg0, "pre_val should be in R0"); 606 __ mov(c_rarg1, Rthread); 607 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), c_rarg0, c_rarg1); 608 609 restore_live_registers_without_return(sasm); 610 611 __ b(done); 612 } 613 break; 614 case g1_post_barrier_slow_id: 615 { 616 // Input: 617 // - store_addr, pushed on the stack 618 619 __ set_info("g1_post_barrier_slow_id", dont_gc_arguments); 620 621 BarrierSet* bs = BarrierSet::barrier_set(); 622 if (bs->kind() != BarrierSet::G1BarrierSet) { 623 __ mov(R0, (int)id); 624 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), R0); 625 __ should_not_reach_here(); 626 break; 627 } 628 629 Label done; 630 Label recheck; 631 Label runtime; 632 633 Address queue_index(Rthread, in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset())); 634 Address buffer(Rthread, in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset())); 635 636 AddressLiteral cardtable(ci_card_table_address_as<address>(), relocInfo::none); 637 638 // save at least the registers that need saving if the runtime is called 639 #ifdef AARCH64 640 __ raw_push(R0, R1); 641 __ raw_push(R2, R3); 642 const int nb_saved_regs = 4; 643 #else // AARCH64 644 const RegisterSet saved_regs = RegisterSet(R0,R3) | RegisterSet(R12) | RegisterSet(LR); 645 const int nb_saved_regs = 6; 646 assert(nb_saved_regs == saved_regs.size(), "fix nb_saved_regs"); 647 __ push(saved_regs); 648 #endif // AARCH64 649 650 const Register r_card_addr_0 = R0; // must be R0 for the slow case 651 const Register r_obj_0 = R0; 652 const Register r_card_base_1 = R1; 653 const Register r_tmp2 = R2; 654 const Register r_index_2 = R2; 655 const Register r_buffer_3 = R3; 656 const Register tmp1 = Rtemp; 657 658 __ ldr(r_obj_0, Address(SP, nb_saved_regs*wordSize)); 659 // Note: there is a comment in x86 code about not using 660 // ExternalAddress / lea, due to relocation not working 661 // properly for that address. Should be OK for arm, where we 662 // explicitly specify that 'cardtable' has a relocInfo::none 663 // type. 664 __ lea(r_card_base_1, cardtable); 665 __ add(r_card_addr_0, r_card_base_1, AsmOperand(r_obj_0, lsr, CardTable::card_shift)); 666 667 // first quick check without barrier 668 __ ldrb(r_tmp2, Address(r_card_addr_0)); 669 670 __ cmp(r_tmp2, (int)G1CardTable::g1_young_card_val()); 671 __ b(recheck, ne); 672 673 __ bind(done); 674 675 #ifdef AARCH64 676 __ raw_pop(R2, R3); 677 __ raw_pop(R0, R1); 678 #else // AARCH64 679 __ pop(saved_regs); 680 #endif // AARCH64 681 682 __ ret(); 683 684 __ bind(recheck); 685 686 __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad), tmp1); 687 688 // reload card state after the barrier that ensures the stored oop was visible 689 __ ldrb(r_tmp2, Address(r_card_addr_0)); 690 691 assert(CardTable::dirty_card_val() == 0, "adjust this code"); 692 __ cbz(r_tmp2, done); 693 694 // storing region crossing non-NULL, card is clean. 695 // dirty card and log. 696 697 assert(0 == (int)CardTable::dirty_card_val(), "adjust this code"); 698 if ((ci_card_table_address_as<intptr_t>() & 0xff) == 0) { 699 // Card table is aligned so the lowest byte of the table address base is zero. 700 __ strb(r_card_base_1, Address(r_card_addr_0)); 701 } else { 702 __ strb(__ zero_register(r_tmp2), Address(r_card_addr_0)); 703 } 704 705 __ ldr(r_index_2, queue_index); 706 __ ldr(r_buffer_3, buffer); 707 708 __ subs(r_index_2, r_index_2, wordSize); 709 __ b(runtime, lt); // go to runtime if now negative 710 711 __ str(r_index_2, queue_index); 712 713 __ str(r_card_addr_0, Address(r_buffer_3, r_index_2)); 714 715 __ b(done); 716 717 __ bind(runtime); 718 719 save_live_registers(sasm); 720 721 assert(r_card_addr_0 == c_rarg0, "card_addr should be in R0"); 722 __ mov(c_rarg1, Rthread); 723 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), c_rarg0, c_rarg1); 724 725 restore_live_registers_without_return(sasm); 726 727 __ b(done); 728 } 729 break; 730 #endif // INCLUDE_ALL_GCS 731 case new_instance_id: 732 case fast_new_instance_id: 733 case fast_new_instance_init_check_id: 734 { 735 const Register result = R0; 736 const Register klass = R1; 737 738 if (UseTLAB && Universe::heap()->supports_inline_contig_alloc() && id != new_instance_id) { 739 // We come here when TLAB allocation failed. 740 // In this case we try to allocate directly from eden. 741 Label slow_case, slow_case_no_pop; 742 743 // Make sure the class is fully initialized 744 if (id == fast_new_instance_init_check_id) { 745 __ ldrb(result, Address(klass, InstanceKlass::init_state_offset())); 746 __ cmp(result, InstanceKlass::fully_initialized); 747 __ b(slow_case_no_pop, ne); 748 } 749 750 // Free some temporary registers 751 const Register obj_size = R4; 752 const Register tmp1 = R5; 753 const Register tmp2 = LR; 754 const Register obj_end = Rtemp; 755 756 __ raw_push(R4, R5, LR); 757 758 __ ldr_u32(obj_size, Address(klass, Klass::layout_helper_offset())); 759 __ eden_allocate(result, obj_end, tmp1, tmp2, obj_size, slow_case); // initializes result and obj_end 760 __ incr_allocated_bytes(obj_size, tmp2); 761 __ initialize_object(result, obj_end, klass, noreg /* len */, tmp1, tmp2, 762 instanceOopDesc::header_size() * HeapWordSize, -1, 763 /* is_tlab_allocated */ false); 764 __ raw_pop_and_ret(R4, R5); 765 766 __ bind(slow_case); 767 __ raw_pop(R4, R5, LR); 768 769 __ bind(slow_case_no_pop); 770 } 771 772 OopMap* map = save_live_registers(sasm); 773 int call_offset = __ call_RT(result, noreg, CAST_FROM_FN_PTR(address, new_instance), klass); 774 oop_maps = new OopMapSet(); 775 oop_maps->add_gc_map(call_offset, map); 776 777 // MacroAssembler::StoreStore useless (included in the runtime exit path) 778 779 restore_live_registers_except_R0(sasm); 780 } 781 break; 782 783 case counter_overflow_id: 784 { 785 OopMap* oop_map = save_live_registers(sasm); 786 __ ldr(R1, Address(SP, arg1_offset)); 787 __ ldr(R2, Address(SP, arg2_offset)); 788 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), R1, R2); 789 oop_maps = new OopMapSet(); 790 oop_maps->add_gc_map(call_offset, oop_map); 791 restore_live_registers(sasm); 792 } 793 break; 794 795 case new_type_array_id: 796 case new_object_array_id: 797 { 798 if (id == new_type_array_id) { 799 __ set_info("new_type_array", dont_gc_arguments); 800 } else { 801 __ set_info("new_object_array", dont_gc_arguments); 802 } 803 804 const Register result = R0; 805 const Register klass = R1; 806 const Register length = R2; 807 808 if (UseTLAB && Universe::heap()->supports_inline_contig_alloc()) { 809 // We come here when TLAB allocation failed. 810 // In this case we try to allocate directly from eden. 811 Label slow_case, slow_case_no_pop; 812 813 #ifdef AARCH64 814 __ mov_slow(Rtemp, C1_MacroAssembler::max_array_allocation_length); 815 __ cmp_32(length, Rtemp); 816 #else 817 __ cmp_32(length, C1_MacroAssembler::max_array_allocation_length); 818 #endif // AARCH64 819 __ b(slow_case_no_pop, hs); 820 821 // Free some temporary registers 822 const Register arr_size = R4; 823 const Register tmp1 = R5; 824 const Register tmp2 = LR; 825 const Register tmp3 = Rtemp; 826 const Register obj_end = tmp3; 827 828 __ raw_push(R4, R5, LR); 829 830 // Get the allocation size: round_up((length << (layout_helper & 0xff)) + header_size) 831 __ ldr_u32(tmp1, Address(klass, Klass::layout_helper_offset())); 832 __ mov(arr_size, MinObjAlignmentInBytesMask); 833 __ and_32(tmp2, tmp1, (unsigned int)(Klass::_lh_header_size_mask << Klass::_lh_header_size_shift)); 834 835 #ifdef AARCH64 836 __ lslv_w(tmp3, length, tmp1); 837 __ add(arr_size, arr_size, tmp3); 838 #else 839 __ add(arr_size, arr_size, AsmOperand(length, lsl, tmp1)); 840 #endif // AARCH64 841 842 __ add(arr_size, arr_size, AsmOperand(tmp2, lsr, Klass::_lh_header_size_shift)); 843 __ align_reg(arr_size, arr_size, MinObjAlignmentInBytes); 844 845 // eden_allocate destroys tmp2, so reload header_size after allocation 846 // eden_allocate initializes result and obj_end 847 __ eden_allocate(result, obj_end, tmp1, tmp2, arr_size, slow_case); 848 __ incr_allocated_bytes(arr_size, tmp2); 849 __ ldrb(tmp2, Address(klass, in_bytes(Klass::layout_helper_offset()) + 850 Klass::_lh_header_size_shift / BitsPerByte)); 851 __ initialize_object(result, obj_end, klass, length, tmp1, tmp2, tmp2, -1, /* is_tlab_allocated */ false); 852 __ raw_pop_and_ret(R4, R5); 853 854 __ bind(slow_case); 855 __ raw_pop(R4, R5, LR); 856 __ bind(slow_case_no_pop); 857 } 858 859 OopMap* map = save_live_registers(sasm); 860 int call_offset; 861 if (id == new_type_array_id) { 862 call_offset = __ call_RT(result, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length); 863 } else { 864 call_offset = __ call_RT(result, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length); 865 } 866 oop_maps = new OopMapSet(); 867 oop_maps->add_gc_map(call_offset, map); 868 869 // MacroAssembler::StoreStore useless (included in the runtime exit path) 870 871 restore_live_registers_except_R0(sasm); 872 } 873 break; 874 875 case new_multi_array_id: 876 { 877 __ set_info("new_multi_array", dont_gc_arguments); 878 879 // R0: klass 880 // R2: rank 881 // SP: address of 1st dimension 882 const Register result = R0; 883 OopMap* map = save_live_registers(sasm); 884 885 __ mov(R1, R0); 886 __ add(R3, SP, arg1_offset); 887 int call_offset = __ call_RT(result, noreg, CAST_FROM_FN_PTR(address, new_multi_array), R1, R2, R3); 888 889 oop_maps = new OopMapSet(); 890 oop_maps->add_gc_map(call_offset, map); 891 892 // MacroAssembler::StoreStore useless (included in the runtime exit path) 893 894 restore_live_registers_except_R0(sasm); 895 } 896 break; 897 898 case register_finalizer_id: 899 { 900 __ set_info("register_finalizer", dont_gc_arguments); 901 902 // Do not call runtime if JVM_ACC_HAS_FINALIZER flag is not set 903 __ load_klass(Rtemp, R0); 904 __ ldr_u32(Rtemp, Address(Rtemp, Klass::access_flags_offset())); 905 906 #ifdef AARCH64 907 Label L; 908 __ tbnz(Rtemp, exact_log2(JVM_ACC_HAS_FINALIZER), L); 909 __ ret(); 910 __ bind(L); 911 #else 912 __ tst(Rtemp, JVM_ACC_HAS_FINALIZER); 913 __ bx(LR, eq); 914 #endif // AARCH64 915 916 // Call VM 917 OopMap* map = save_live_registers(sasm); 918 oop_maps = new OopMapSet(); 919 int call_offset = __ call_RT(noreg, noreg, 920 CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), R0); 921 oop_maps->add_gc_map(call_offset, map); 922 restore_live_registers(sasm); 923 } 924 break; 925 926 case throw_range_check_failed_id: 927 { 928 __ set_info("range_check_failed", dont_gc_arguments); 929 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true); 930 } 931 break; 932 933 case throw_index_exception_id: 934 { 935 __ set_info("index_range_check_failed", dont_gc_arguments); 936 #ifdef AARCH64 937 __ NOT_TESTED(); 938 #endif 939 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true); 940 } 941 break; 942 943 case throw_div0_exception_id: 944 { 945 __ set_info("throw_div0_exception", dont_gc_arguments); 946 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false); 947 } 948 break; 949 950 case throw_null_pointer_exception_id: 951 { 952 __ set_info("throw_null_pointer_exception", dont_gc_arguments); 953 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false); 954 } 955 break; 956 957 case handle_exception_nofpu_id: 958 case handle_exception_id: 959 { 960 __ set_info("handle_exception", dont_gc_arguments); 961 oop_maps = generate_handle_exception(id, sasm); 962 } 963 break; 964 965 case handle_exception_from_callee_id: 966 { 967 __ set_info("handle_exception_from_callee", dont_gc_arguments); 968 oop_maps = generate_handle_exception(id, sasm); 969 } 970 break; 971 972 case unwind_exception_id: 973 { 974 __ set_info("unwind_exception", dont_gc_arguments); 975 generate_unwind_exception(sasm); 976 } 977 break; 978 979 case throw_array_store_exception_id: 980 { 981 __ set_info("throw_array_store_exception", dont_gc_arguments); 982 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true); 983 } 984 break; 985 986 case throw_class_cast_exception_id: 987 { 988 __ set_info("throw_class_cast_exception", dont_gc_arguments); 989 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true); 990 } 991 break; 992 993 case throw_incompatible_class_change_error_id: 994 { 995 __ set_info("throw_incompatible_class_cast_exception", dont_gc_arguments); 996 #ifdef AARCH64 997 __ NOT_TESTED(); 998 #endif 999 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false); 1000 } 1001 break; 1002 1003 case slow_subtype_check_id: 1004 { 1005 // (in) R0 - sub, destroyed, 1006 // (in) R1 - super, not changed 1007 // (out) R0 - result: 1 if check passed, 0 otherwise 1008 __ raw_push(R2, R3, LR); 1009 1010 // Load an array of secondary_supers 1011 __ ldr(R2, Address(R0, Klass::secondary_supers_offset())); 1012 // Length goes to R3 1013 __ ldr_s32(R3, Address(R2, Array<Klass*>::length_offset_in_bytes())); 1014 __ add(R2, R2, Array<Klass*>::base_offset_in_bytes()); 1015 1016 Label loop, miss; 1017 __ bind(loop); 1018 __ cbz(R3, miss); 1019 __ ldr(LR, Address(R2, wordSize, post_indexed)); 1020 __ sub(R3, R3, 1); 1021 __ cmp(LR, R1); 1022 __ b(loop, ne); 1023 1024 // We get here if an equal cache entry is found 1025 __ str(R1, Address(R0, Klass::secondary_super_cache_offset())); 1026 __ mov(R0, 1); 1027 __ raw_pop_and_ret(R2, R3); 1028 1029 // A cache entry not found - return false 1030 __ bind(miss); 1031 __ mov(R0, 0); 1032 __ raw_pop_and_ret(R2, R3); 1033 } 1034 break; 1035 1036 case monitorenter_nofpu_id: 1037 save_fpu_registers = false; 1038 // fall through 1039 case monitorenter_id: 1040 { 1041 __ set_info("monitorenter", dont_gc_arguments); 1042 const Register obj = R1; 1043 const Register lock = R2; 1044 OopMap* map = save_live_registers(sasm, save_fpu_registers); 1045 __ ldr(obj, Address(SP, arg1_offset)); 1046 __ ldr(lock, Address(SP, arg2_offset)); 1047 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), obj, lock); 1048 oop_maps = new OopMapSet(); 1049 oop_maps->add_gc_map(call_offset, map); 1050 restore_live_registers(sasm, save_fpu_registers); 1051 } 1052 break; 1053 1054 case monitorexit_nofpu_id: 1055 save_fpu_registers = false; 1056 // fall through 1057 case monitorexit_id: 1058 { 1059 __ set_info("monitorexit", dont_gc_arguments); 1060 const Register lock = R1; 1061 OopMap* map = save_live_registers(sasm, save_fpu_registers); 1062 __ ldr(lock, Address(SP, arg1_offset)); 1063 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), lock); 1064 oop_maps = new OopMapSet(); 1065 oop_maps->add_gc_map(call_offset, map); 1066 restore_live_registers(sasm, save_fpu_registers); 1067 } 1068 break; 1069 1070 case deoptimize_id: 1071 { 1072 __ set_info("deoptimize", dont_gc_arguments); 1073 OopMap* oop_map = save_live_registers(sasm); 1074 const Register trap_request = R1; 1075 __ ldr(trap_request, Address(SP, arg1_offset)); 1076 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, deoptimize), trap_request); 1077 oop_maps = new OopMapSet(); 1078 oop_maps->add_gc_map(call_offset, oop_map); 1079 restore_live_registers_without_return(sasm); 1080 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); 1081 assert(deopt_blob != NULL, "deoptimization blob must have been created"); 1082 __ jump(deopt_blob->unpack_with_reexecution(), relocInfo::runtime_call_type, AARCH64_ONLY(Rtemp) NOT_AARCH64(noreg)); 1083 } 1084 break; 1085 1086 case access_field_patching_id: 1087 { 1088 __ set_info("access_field_patching", dont_gc_arguments); 1089 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching)); 1090 } 1091 break; 1092 1093 case load_klass_patching_id: 1094 { 1095 __ set_info("load_klass_patching", dont_gc_arguments); 1096 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching)); 1097 } 1098 break; 1099 1100 case load_appendix_patching_id: 1101 { 1102 __ set_info("load_appendix_patching", dont_gc_arguments); 1103 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching)); 1104 } 1105 break; 1106 1107 case load_mirror_patching_id: 1108 { 1109 __ set_info("load_mirror_patching", dont_gc_arguments); 1110 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching)); 1111 } 1112 break; 1113 1114 case predicate_failed_trap_id: 1115 { 1116 __ set_info("predicate_failed_trap", dont_gc_arguments); 1117 1118 OopMap* oop_map = save_live_registers(sasm); 1119 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap)); 1120 1121 oop_maps = new OopMapSet(); 1122 oop_maps->add_gc_map(call_offset, oop_map); 1123 1124 restore_live_registers_without_return(sasm); 1125 1126 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); 1127 assert(deopt_blob != NULL, "deoptimization blob must have been created"); 1128 __ jump(deopt_blob->unpack_with_reexecution(), relocInfo::runtime_call_type, Rtemp); 1129 } 1130 break; 1131 1132 default: 1133 { 1134 __ set_info("unimplemented entry", dont_gc_arguments); 1135 STOP("unimplemented entry"); 1136 } 1137 break; 1138 } 1139 return oop_maps; 1140 } 1141 1142 #undef __ 1143 1144 #ifdef __SOFTFP__ 1145 const char *Runtime1::pd_name_for_address(address entry) { 1146 1147 #define FUNCTION_CASE(a, f) \ 1148 if ((intptr_t)a == CAST_FROM_FN_PTR(intptr_t, f)) return #f 1149 1150 FUNCTION_CASE(entry, __aeabi_fadd_glibc); 1151 FUNCTION_CASE(entry, __aeabi_fmul); 1152 FUNCTION_CASE(entry, __aeabi_fsub_glibc); 1153 FUNCTION_CASE(entry, __aeabi_fdiv); 1154 1155 // __aeabi_XXXX_glibc: Imported code from glibc soft-fp bundle for calculation accuracy improvement. See CR 6757269. 1156 FUNCTION_CASE(entry, __aeabi_dadd_glibc); 1157 FUNCTION_CASE(entry, __aeabi_dmul); 1158 FUNCTION_CASE(entry, __aeabi_dsub_glibc); 1159 FUNCTION_CASE(entry, __aeabi_ddiv); 1160 1161 FUNCTION_CASE(entry, __aeabi_f2d); 1162 FUNCTION_CASE(entry, __aeabi_d2f); 1163 FUNCTION_CASE(entry, __aeabi_i2f); 1164 FUNCTION_CASE(entry, __aeabi_i2d); 1165 FUNCTION_CASE(entry, __aeabi_f2iz); 1166 1167 FUNCTION_CASE(entry, SharedRuntime::fcmpl); 1168 FUNCTION_CASE(entry, SharedRuntime::fcmpg); 1169 FUNCTION_CASE(entry, SharedRuntime::dcmpl); 1170 FUNCTION_CASE(entry, SharedRuntime::dcmpg); 1171 1172 FUNCTION_CASE(entry, SharedRuntime::unordered_fcmplt); 1173 FUNCTION_CASE(entry, SharedRuntime::unordered_dcmplt); 1174 FUNCTION_CASE(entry, SharedRuntime::unordered_fcmple); 1175 FUNCTION_CASE(entry, SharedRuntime::unordered_dcmple); 1176 FUNCTION_CASE(entry, SharedRuntime::unordered_fcmpge); 1177 FUNCTION_CASE(entry, SharedRuntime::unordered_dcmpge); 1178 FUNCTION_CASE(entry, SharedRuntime::unordered_fcmpgt); 1179 FUNCTION_CASE(entry, SharedRuntime::unordered_dcmpgt); 1180 1181 FUNCTION_CASE(entry, SharedRuntime::fneg); 1182 FUNCTION_CASE(entry, SharedRuntime::dneg); 1183 1184 FUNCTION_CASE(entry, __aeabi_fcmpeq); 1185 FUNCTION_CASE(entry, __aeabi_fcmplt); 1186 FUNCTION_CASE(entry, __aeabi_fcmple); 1187 FUNCTION_CASE(entry, __aeabi_fcmpge); 1188 FUNCTION_CASE(entry, __aeabi_fcmpgt); 1189 1190 FUNCTION_CASE(entry, __aeabi_dcmpeq); 1191 FUNCTION_CASE(entry, __aeabi_dcmplt); 1192 FUNCTION_CASE(entry, __aeabi_dcmple); 1193 FUNCTION_CASE(entry, __aeabi_dcmpge); 1194 FUNCTION_CASE(entry, __aeabi_dcmpgt); 1195 #undef FUNCTION_CASE 1196 return ""; 1197 } 1198 #else // __SOFTFP__ 1199 const char *Runtime1::pd_name_for_address(address entry) { 1200 return "<unknown function>"; 1201 } 1202 #endif // __SOFTFP__