1 /* 2 * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2014, Red Hat Inc. All rights reserved. 4 * Copyright (c) 2015-2018, Azul Systems, Inc. All rights reserved. 5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 6 * 7 * This code is free software; you can redistribute it and/or modify it 8 * under the terms of the GNU General Public License version 2 only, as 9 * published by the Free Software Foundation. 10 * 11 * This code is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14 * version 2 for more details (a copy is included in the LICENSE file that 15 * accompanied this code). 16 * 17 * You should have received a copy of the GNU General Public License version 18 * 2 along with this work; if not, write to the Free Software Foundation, 19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 20 * 21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 22 * or visit www.oracle.com if you need additional information or have any 23 * questions. 24 * 25 */ 26 27 #include "precompiled.hpp" 28 #include "asm/assembler.hpp" 29 #include "c1/c1_CodeStubs.hpp" 30 #include "c1/c1_Defs.hpp" 31 #include "c1/c1_MacroAssembler.hpp" 32 #include "c1/c1_Runtime1.hpp" 33 #include "compiler/disassembler.hpp" 34 #include "gc/shared/cardTable.hpp" 35 #include "gc/shared/cardTableBarrierSet.hpp" 36 #include "interpreter/interpreter.hpp" 37 #include "nativeInst_aarch32.hpp" 38 #include "oops/compiledICHolder.hpp" 39 #include "oops/oop.inline.hpp" 40 #include "prims/jvmtiExport.hpp" 41 #include "register_aarch32.hpp" 42 #include "runtime/sharedRuntime.hpp" 43 #include "runtime/signature.hpp" 44 #include "runtime/vframe.hpp" 45 #include "runtime/vframeArray.hpp" 46 #include "vmreg_aarch32.inline.hpp" 47 48 // Implementation of StubAssembler 49 50 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, int args_size) { 51 // setup registers 52 assert(!(oop_result1->is_valid() || metadata_result->is_valid()) || oop_result1 != metadata_result, "registers must be different"); 53 assert(oop_result1 != rthread && metadata_result != rthread, "registers must be different"); 54 assert(args_size >= 0, "illegal args_size"); 55 56 mov(c_rarg0, rthread); 57 set_num_rt_args(0); // Nothing on stack 58 59 Label retaddr; 60 set_last_Java_frame(sp, rfp, retaddr, rscratch1); 61 62 // do the call 63 lea(rscratch1, RuntimeAddress(entry)); 64 bl(rscratch1); 65 bind(retaddr); 66 int call_offset = offset(); 67 // verify callee-saved register 68 #ifdef ASSERT 69 push(r0, sp); 70 { Label L; 71 get_thread(r0); 72 cmp(rthread, r0); 73 b(L, Assembler::EQ); 74 stop("StubAssembler::call_RT: rthread not callee saved?"); 75 bind(L); 76 } 77 pop(r0, sp); 78 #endif 79 reset_last_Java_frame(true); 80 maybe_isb(); 81 82 // check for pending exceptions 83 { Label L; 84 // check for pending exceptions (java_thread is set upon return) 85 ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset()))); 86 cbz(rscratch1, L); 87 mov(rscratch1, 0); 88 // exception pending => remove activation and forward to exception handler 89 // make sure that the vm_results are cleared 90 if (oop_result1->is_valid()) { 91 str(rscratch1, Address(rthread, JavaThread::vm_result_offset())); 92 } 93 if (metadata_result->is_valid()) { 94 str(rscratch1, Address(rthread, JavaThread::vm_result_2_offset())); 95 } 96 if (frame_size() == no_frame_size) { 97 leave(); 98 far_jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 99 } else if (_stub_id == Runtime1::forward_exception_id) { 100 should_not_reach_here(); 101 } else { 102 far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id))); 103 } 104 bind(L); 105 } 106 // get oop results if there are any and reset the values in the thread 107 if (oop_result1->is_valid()) { 108 get_vm_result(oop_result1, rthread); 109 } 110 if (metadata_result->is_valid()) { 111 get_vm_result_2(metadata_result, rthread); 112 } 113 return call_offset; 114 } 115 116 117 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1) { 118 mov(c_rarg1, arg1); 119 return call_RT(oop_result1, metadata_result, entry, 1); 120 } 121 122 123 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2) { 124 if (c_rarg1 == arg2) { 125 if (c_rarg2 == arg1) { 126 mov(rscratch1, arg1); 127 mov(arg1, arg2); 128 mov(arg2, rscratch1); 129 } else { 130 mov(c_rarg2, arg2); 131 mov(c_rarg1, arg1); 132 } 133 } else { 134 mov(c_rarg1, arg1); 135 mov(c_rarg2, arg2); 136 } 137 return call_RT(oop_result1, metadata_result, entry, 2); 138 } 139 140 141 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2, Register arg3) { 142 // if there is any conflict use the stack 143 if (arg1 == c_rarg2 || arg1 == c_rarg3 || 144 arg2 == c_rarg1 || arg2 == c_rarg3 || 145 arg3 == c_rarg1 || arg3 == c_rarg2) { 146 push(arg2); 147 push(arg3); 148 push(arg1); 149 pop(c_rarg1); 150 pop(c_rarg3); 151 pop(c_rarg2); 152 } else { 153 mov(c_rarg1, arg1); 154 mov(c_rarg2, arg2); 155 mov(c_rarg3, arg3); 156 } 157 return call_RT(oop_result1, metadata_result, entry, 3); 158 } 159 160 // Implementation of StubFrame 161 162 class StubFrame: public StackObj { 163 private: 164 StubAssembler* _sasm; 165 166 public: 167 StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments); 168 void load_argument(int offset_in_words, Register reg); 169 170 ~StubFrame(); 171 };; 172 173 void StubAssembler::prologue(const char* name, bool must_gc_arguments) { 174 set_info(name, must_gc_arguments); 175 enter(); 176 } 177 178 void StubAssembler::epilogue() { 179 leave(); 180 ret(lr); 181 } 182 183 #define __ _sasm-> 184 185 StubFrame::StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments) { 186 _sasm = sasm; 187 __ prologue(name, must_gc_arguments); 188 } 189 190 // load parameters that were stored with LIR_Assembler::store_parameter 191 // Note: offsets for store_parameter and load_argument must match 192 void StubFrame::load_argument(int offset_in_words, Register reg) { 193 __ load_parameter(offset_in_words, reg); 194 } 195 196 197 StubFrame::~StubFrame() { 198 __ epilogue(); 199 } 200 201 #undef __ 202 203 204 // Implementation of Runtime1 205 206 #define __ sasm-> 207 208 209 // Stack layout for saving/restoring all the registers needed during a runtime 210 // call (this includes deoptimization) 211 // Note: note that users of this frame may well have arguments to some runtime 212 // while these values are on the stack. These positions neglect those arguments 213 // but the code in save_live_registers will take the argument count into 214 // account. 215 // 216 217 enum reg_save_layout { 218 reg_save_s0, 219 reg_save_s31 = reg_save_s0 + FrameMap::nof_fpu_regs - 1, 220 reg_save_pad, // to align to doubleword to simplify conformance to APCS 221 reg_save_r0, 222 reg_save_r1, 223 reg_save_r2, 224 reg_save_r3, 225 reg_save_r4, 226 reg_save_r5, 227 reg_save_r6, 228 reg_save_r7, 229 reg_save_r8, 230 reg_save_r9, 231 reg_save_r10, 232 reg_save_r11, 233 reg_save_r12, 234 reg_save_frame_size 235 // remaining words pushed by enter 236 }; 237 238 // Save off registers which might be killed by calls into the runtime. 239 // Tries to smart of about FP registers. In particular we separate 240 // saving and describing the FPU registers for deoptimization since we 241 // have to save the FPU registers twice if we describe them. The 242 // deopt blob is the only thing which needs to describe FPU registers. 243 // In all other cases it should be sufficient to simply save their 244 // current value. 245 246 static int cpu_reg_save_offsets[FrameMap::nof_cpu_regs]; 247 static int fpu_reg_save_offsets[FrameMap::nof_fpu_regs]; 248 static int reg_save_size_in_words; 249 static int frame_size_in_bytes = -1; 250 251 static OopMap* generate_oop_map(StubAssembler* sasm, bool save_fpu_registers) { 252 int frame_size_in_bytes = (reg_save_frame_size + frame::get_frame_size()) * BytesPerWord; 253 sasm->set_frame_size(frame_size_in_bytes / BytesPerWord); 254 int frame_size_in_slots = frame_size_in_bytes / sizeof(jint); 255 OopMap* oop_map = new OopMap(frame_size_in_slots, 0); 256 257 oop_map->set_callee_saved(VMRegImpl::stack2reg(reg_save_r0), r0->as_VMReg()); 258 oop_map->set_callee_saved(VMRegImpl::stack2reg(reg_save_r1), r1->as_VMReg()); 259 oop_map->set_callee_saved(VMRegImpl::stack2reg(reg_save_r2), r2->as_VMReg()); 260 oop_map->set_callee_saved(VMRegImpl::stack2reg(reg_save_r3), r3->as_VMReg()); 261 oop_map->set_callee_saved(VMRegImpl::stack2reg(reg_save_r4), r4->as_VMReg()); 262 oop_map->set_callee_saved(VMRegImpl::stack2reg(reg_save_r5), r5->as_VMReg()); 263 oop_map->set_callee_saved(VMRegImpl::stack2reg(reg_save_r6), r6->as_VMReg()); 264 oop_map->set_callee_saved(VMRegImpl::stack2reg(reg_save_r7), r7->as_VMReg()); 265 oop_map->set_callee_saved(VMRegImpl::stack2reg(reg_save_r8), r8->as_VMReg()); 266 oop_map->set_callee_saved(VMRegImpl::stack2reg(reg_save_r9), r9->as_VMReg()); 267 oop_map->set_callee_saved(VMRegImpl::stack2reg(reg_save_r10), r10->as_VMReg()); 268 oop_map->set_callee_saved(VMRegImpl::stack2reg(reg_save_r11), r11->as_VMReg()); 269 oop_map->set_callee_saved(VMRegImpl::stack2reg(reg_save_r12), r12->as_VMReg()); 270 if (hasFPU()) { 271 for (int i = 0; i < FrameMap::nof_fpu_regs; ++i) { 272 oop_map->set_callee_saved(VMRegImpl::stack2reg(reg_save_s0 + i), as_FloatRegister(i)->as_VMReg()); 273 } 274 } 275 276 return oop_map; 277 } 278 279 static OopMap* save_live_registers(StubAssembler* sasm, 280 bool save_fpu_registers = true) { 281 __ block_comment("save_live_registers"); 282 283 __ push(RegSet::range(r0, r12), sp); // integer registers except lr & sp 284 __ sub(sp, sp, 4); // align to 8 bytes 285 286 if (save_fpu_registers && hasFPU()) { 287 __ vstmdb_f64(sp, (1 << FrameMap::nof_fpu_regs / 2) - 1); 288 } else { 289 __ sub(sp, sp, FrameMap::nof_fpu_regs * 4); 290 } 291 292 return generate_oop_map(sasm, save_fpu_registers); 293 } 294 295 static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) { 296 297 if (restore_fpu_registers && hasFPU()) { 298 __ vldmia_f64(sp, (1 << FrameMap::nof_fpu_regs / 2) - 1); 299 } else { 300 __ add(sp, sp, FrameMap::nof_fpu_regs * 4); 301 } 302 303 __ add(sp, sp, 4); 304 __ pop(RegSet::range(r0, r12), sp); 305 } 306 307 static void restore_live_registers_except_r0(StubAssembler* sasm, bool restore_fpu_registers = true) { 308 309 if (restore_fpu_registers && hasFPU()) { 310 __ vldmia_f64(sp, (1 << FrameMap::nof_fpu_regs / 2) - 1); 311 } else { 312 __ add(sp, sp, FrameMap::nof_fpu_regs * 4); 313 } 314 315 __ add(sp, sp, 8); 316 __ pop(RegSet::range(r1, r12), sp); 317 } 318 319 void Runtime1::initialize_pd() { 320 } 321 322 // target: the entry point of the method that creates and posts the exception oop 323 // has_argument: true if the exception needs arguments (passed in rscratch1 and rscratch2) 324 325 OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) { 326 // make a frame and preserve the caller's caller-save registers 327 OopMap* oop_map = save_live_registers(sasm); 328 int call_offset; 329 if (!has_argument) { 330 call_offset = __ call_RT(noreg, noreg, target); 331 } else { 332 call_offset = __ call_RT(noreg, noreg, target, rscratch1, rscratch2); 333 } 334 OopMapSet* oop_maps = new OopMapSet(); 335 oop_maps->add_gc_map(call_offset, oop_map); 336 337 __ should_not_reach_here(); 338 return oop_maps; 339 } 340 341 342 OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) { 343 __ block_comment("generate_handle_exception"); 344 345 // incoming parameters 346 const Register exception_oop = r0; 347 const Register exception_pc = r3; 348 // other registers used in this stub 349 350 // Save registers, if required. 351 OopMapSet* oop_maps = new OopMapSet(); 352 OopMap* oop_map = NULL; 353 switch (id) { 354 case forward_exception_id: 355 // We're handling an exception in the context of a compiled frame. 356 // The registers have been saved in the standard places. Perform 357 // an exception lookup in the caller and dispatch to the handler 358 // if found. Otherwise unwind and dispatch to the callers 359 // exception handler. 360 oop_map = generate_oop_map(sasm, 1 /*thread*/); 361 __ mov(rscratch1, 0); 362 363 // load and clear pending exception oop into r0 364 __ ldr(exception_oop, Address(rthread, Thread::pending_exception_offset())); 365 __ str(rscratch1, Address(rthread, Thread::pending_exception_offset())); 366 367 // load issuing PC (the return address for this stub) into r3 368 __ ldr(exception_pc, Address(rfp, wordSize * frame::get_return_addr_offset())); 369 370 // make sure that the vm_results are cleared (may be unnecessary) 371 __ str(rscratch1, Address(rthread, JavaThread::vm_result_offset())); 372 __ str(rscratch1, Address(rthread, JavaThread::vm_result_2_offset())); 373 break; 374 case handle_exception_nofpu_id: 375 case handle_exception_id: 376 // At this point all registers MAY be live. 377 oop_map = save_live_registers(sasm, id != handle_exception_nofpu_id); 378 break; 379 case handle_exception_from_callee_id: { 380 // At this point all registers except exception oop (r0) and 381 // exception pc (lr) are dead. 382 const int frame_size = frame::get_frame_size() /*fp, return address, ...*/; 383 assert(frame_size*wordSize % StackAlignmentInBytes == 0, "must be"); 384 oop_map = new OopMap(frame_size * VMRegImpl::slots_per_word, 0); 385 sasm->set_frame_size(frame_size); 386 break; 387 } 388 default: 389 __ should_not_reach_here(); 390 break; 391 } 392 393 // verify that only r0 and r3 are valid at this time 394 __ invalidate_registers(false, true, false); 395 // verify that r0 contains a valid exception 396 __ verify_not_null_oop(exception_oop); 397 398 #ifdef ASSERT 399 // check that fields in JavaThread for exception oop and issuing pc are 400 // empty before writing to them 401 Label oop_empty; 402 __ ldr(rscratch1, Address(rthread, JavaThread::exception_oop_offset())); 403 __ cbz(rscratch1, oop_empty); 404 __ stop("exception oop already set"); 405 __ bind(oop_empty); 406 407 Label pc_empty; 408 __ ldr(rscratch1, Address(rthread, JavaThread::exception_pc_offset())); 409 __ cbz(rscratch1, pc_empty); 410 __ stop("exception pc already set"); 411 __ bind(pc_empty); 412 #endif 413 414 // save exception oop and issuing pc into JavaThread 415 // (exception handler will load it from here) 416 __ str(exception_oop, Address(rthread, JavaThread::exception_oop_offset())); 417 __ str(exception_pc, Address(rthread, JavaThread::exception_pc_offset())); 418 419 // patch throwing pc into return address (has bci & oop map) 420 __ str(exception_pc, Address(rfp, wordSize * frame::get_return_addr_offset())); 421 422 // compute the exception handler. 423 // the exception oop and the throwing pc are read from the fields in JavaThread 424 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc)); 425 oop_maps->add_gc_map(call_offset, oop_map); 426 427 // r0: handler address 428 // will be the deopt blob if nmethod was deoptimized while we looked up 429 // handler regardless of whether handler existed in the nmethod. 430 431 // only r0 is valid at this time, all other registers have been destroyed by the runtime call 432 __ invalidate_registers(false, true, true); 433 434 // patch the return address, this stub will directly return to the exception handler 435 __ str(r0, Address(rfp, wordSize * frame::get_return_addr_offset())); 436 437 switch (id) { 438 case forward_exception_id: 439 case handle_exception_nofpu_id: 440 case handle_exception_id: 441 // Restore the registers that were saved at the beginning. 442 restore_live_registers(sasm, id != handle_exception_nofpu_id); 443 break; 444 case handle_exception_from_callee_id: 445 // Pop the return address. 446 __ leave(); 447 __ ret(lr); // jump to exception handler 448 break; 449 default: ShouldNotReachHere(); 450 } 451 452 return oop_maps; 453 } 454 455 456 void Runtime1::generate_unwind_exception(StubAssembler *sasm) { 457 // incoming parameters 458 const Register exception_oop = r0; 459 // other registers used in this stub 460 const Register exception_pc = r3; 461 const Register handler_addr = r1; 462 463 // verify that only r0, is valid at this time 464 __ invalidate_registers(false, true, true); 465 466 #ifdef ASSERT 467 // check that fields in JavaThread for exception oop and issuing pc are empty 468 Label oop_empty; 469 __ ldr(rscratch1, Address(rthread, JavaThread::exception_oop_offset())); 470 __ cbz(rscratch1, oop_empty); 471 __ stop("exception oop must be empty"); 472 __ bind(oop_empty); 473 474 Label pc_empty; 475 __ ldr(rscratch1, Address(rthread, JavaThread::exception_pc_offset())); 476 __ cbz(rscratch1, pc_empty); 477 __ stop("exception pc must be empty"); 478 __ bind(pc_empty); 479 #endif 480 481 // Save our return address because 482 // exception_handler_for_return_address will destroy it. We also 483 // save exception_oop 484 __ push(exception_oop); 485 __ push(lr); 486 487 // search the exception handler address of the caller (using the return address) 488 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), rthread, lr); 489 // r0: exception handler address of the caller 490 491 // Only R0 is valid at this time; all other registers have been 492 // destroyed by the call. 493 __ invalidate_registers(false, true, true); 494 495 // move result of call into correct register 496 __ mov(handler_addr, r0); 497 498 // get throwing pc (= return address). 499 // lr has been destroyed by the call 500 __ pop(lr); 501 __ pop(exception_oop); 502 __ mov(r3, lr); 503 504 __ verify_not_null_oop(exception_oop); 505 506 // continue at exception handler (return address removed) 507 // note: do *not* remove arguments when unwinding the 508 // activation since the caller assumes having 509 // all arguments on the stack when entering the 510 // runtime to determine the exception handler 511 // (GC happens at call site with arguments!) 512 // r0: exception oop 513 // r3: throwing pc 514 // r1: exception handler 515 __ b(handler_addr); 516 } 517 518 519 520 OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) { 521 // use the maximum number of runtime-arguments here because it is difficult to 522 // distinguish each RT-Call. 523 // Note: This number affects also the RT-Call in generate_handle_exception because 524 // the oop-map is shared for all calls. 525 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); 526 assert(deopt_blob != NULL, "deoptimization blob must have been created"); 527 528 OopMap* oop_map = save_live_registers(sasm); 529 530 __ mov(c_rarg0, rthread); 531 Label retaddr; 532 __ set_last_Java_frame(sp, rfp, retaddr, rscratch1); 533 // do the call 534 __ lea(rscratch1, RuntimeAddress(target)); 535 __ bl(rscratch1); 536 __ bind(retaddr); 537 OopMapSet* oop_maps = new OopMapSet(); 538 oop_maps->add_gc_map(__ offset(), oop_map); 539 // verify callee-saved register 540 #ifdef ASSERT 541 { Label L; 542 __ get_thread(rscratch1); 543 __ cmp(rthread, rscratch1); 544 __ b(L, Assembler::EQ); 545 __ stop("StubAssembler::call_RT: rthread not callee saved?"); 546 __ bind(L); 547 } 548 #endif 549 __ reset_last_Java_frame(true); 550 __ maybe_isb(); 551 552 // check for pending exceptions 553 { Label L; 554 __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset())); 555 __ cbz(rscratch1, L); 556 // exception pending => remove activation and forward to exception handler 557 558 { Label L1; 559 __ cbnz(r0, L1); // have we deoptimized? 560 __ far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id))); 561 __ bind(L1); 562 } 563 564 // the deopt blob expects exceptions in the special fields of 565 // JavaThread, so copy and clear pending exception. 566 567 // load and clear pending exception 568 __ ldr(r0, Address(rthread, Thread::pending_exception_offset())); 569 __ mov(rscratch1, 0); 570 __ str(rscratch1, Address(rthread, Thread::pending_exception_offset())); 571 572 // check that there is really a valid exception 573 __ verify_not_null_oop(r0); 574 575 // load throwing pc: this is the return address of the stub 576 __ ldr(r3, Address(rfp, wordSize * frame::get_return_addr_offset())); 577 578 #ifdef ASSERT 579 // check that fields in JavaThread for exception oop and issuing pc are empty 580 Label oop_empty; 581 __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset())); 582 __ cbz(rscratch1, oop_empty); 583 __ stop("exception oop must be empty"); 584 __ bind(oop_empty); 585 586 Label pc_empty; 587 __ ldr(rscratch1, Address(rthread, JavaThread::exception_pc_offset())); 588 __ cbz(rscratch1, pc_empty); 589 __ stop("exception pc must be empty"); 590 __ bind(pc_empty); 591 #endif 592 593 // store exception oop and throwing pc to JavaThread 594 __ str(r0, Address(rthread, JavaThread::exception_oop_offset())); 595 __ str(r3, Address(rthread, JavaThread::exception_pc_offset())); 596 597 restore_live_registers(sasm); 598 599 __ leave(); 600 601 // Forward the exception directly to deopt blob. We can blow no 602 // registers and must leave throwing pc on the stack. A patch may 603 // have values live in registers so the entry point with the 604 // exception in tls. 605 __ far_jump(RuntimeAddress(deopt_blob->unpack_with_exception_in_tls())); 606 607 __ bind(L); 608 } 609 610 611 // Runtime will return true if the nmethod has been deoptimized during 612 // the patching process. In that case we must do a deopt reexecute instead. 613 614 Label reexecuteEntry, cont; 615 616 __ cbz(r0, cont); // have we deoptimized? 617 618 // Will reexecute. Proper return address is already on the stack we just restore 619 // registers, pop all of our frame but the return address and jump to the deopt blob 620 restore_live_registers(sasm); 621 __ leave(); 622 __ far_jump(RuntimeAddress(deopt_blob->unpack_with_reexecution())); 623 624 __ bind(cont); 625 restore_live_registers(sasm); 626 __ leave(); 627 __ ret(lr); 628 629 return oop_maps; 630 } 631 632 633 OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { 634 635 const Register exception_oop = r0; 636 const Register exception_pc = r3; 637 638 // for better readability 639 const bool must_gc_arguments = true; 640 const bool dont_gc_arguments = false; 641 642 // default value; overwritten for some optimized stubs that are called from methods that do not use the fpu 643 bool save_fpu_registers = true; 644 645 // stub code & info for the different stubs 646 OopMapSet* oop_maps = NULL; 647 OopMap* oop_map = NULL; 648 switch (id) { 649 { 650 case forward_exception_id: 651 { 652 oop_maps = generate_handle_exception(id, sasm); 653 __ leave(); 654 __ ret(lr); 655 } 656 break; 657 658 case throw_div0_exception_id: 659 { StubFrame f(sasm, "throw_div0_exception", dont_gc_arguments); 660 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false); 661 } 662 break; 663 664 case throw_null_pointer_exception_id: 665 { StubFrame f(sasm, "throw_null_pointer_exception", dont_gc_arguments); 666 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false); 667 } 668 break; 669 670 case new_instance_id: 671 case fast_new_instance_id: 672 case fast_new_instance_init_check_id: 673 { 674 Register klass = r3; // Incoming 675 Register obj = r0; // Result 676 677 if (id == new_instance_id) { 678 __ set_info("new_instance", dont_gc_arguments); 679 } else if (id == fast_new_instance_id) { 680 __ set_info("fast new_instance", dont_gc_arguments); 681 } else { 682 assert(id == fast_new_instance_init_check_id, "bad StubID"); 683 __ set_info("fast new_instance init check", dont_gc_arguments); 684 } 685 686 // If TLAB is disabled, see if there is support for inlining contiguous 687 // allocations. 688 // Otherwise, just go to the slow path. 689 if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) && 690 !UseTLAB && Universe::heap()->supports_inline_contig_alloc()) { 691 Label slow_path; 692 Register obj_size = r2; 693 Register t1 = r5; 694 Register t2 = r4; 695 assert_different_registers(klass, obj, obj_size, t1, t2); 696 697 __ push(t1); 698 __ push(t2); 699 if (id == fast_new_instance_init_check_id) { 700 // make sure the klass is initialized 701 __ ldrb(rscratch1, Address(klass, InstanceKlass::init_state_offset())); 702 __ cmp(rscratch1, InstanceKlass::fully_initialized); 703 __ b(slow_path, Assembler::NE); 704 } 705 706 #ifdef ASSERT 707 // assert object can be fast path allocated 708 { 709 Label ok, not_ok; 710 __ ldr(obj_size, Address(klass, Klass::layout_helper_offset())); 711 __ cmp(obj_size, 0u); 712 __ b(not_ok, Assembler::LE); // Make sure it's an instance (layout helper is positive) 713 __ tst(obj_size, Klass::_lh_instance_slow_path_bit); 714 __ b(ok, Assembler::EQ); 715 __ bind(not_ok); 716 __ stop("assert(can be fast path allocated)"); 717 __ should_not_reach_here(); 718 __ bind(ok); 719 } 720 #endif // ASSERT 721 722 // get the instance size 723 __ ldr(obj_size, Address(klass, Klass::layout_helper_offset())); 724 725 __ eden_allocate(obj, obj_size, 0, t1, slow_path); 726 727 __ initialize_object(obj, klass, obj_size, 0, t1, t2, /* is_tlab_allocated */ false); 728 __ verify_oop(obj); 729 __ pop(t2); 730 __ pop(t1); 731 __ ret(lr); 732 733 __ bind(slow_path); 734 __ pop(t2); 735 __ pop(t1); 736 } 737 738 __ enter(); 739 OopMap* map = save_live_registers(sasm); 740 int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass); 741 oop_maps = new OopMapSet(); 742 oop_maps->add_gc_map(call_offset, map); 743 restore_live_registers_except_r0(sasm); 744 __ verify_oop(obj); 745 __ leave(); 746 __ ret(lr); 747 748 // r0,: new instance 749 } 750 751 break; 752 753 case counter_overflow_id: 754 { 755 Register bci = r0, method = r1; 756 __ enter(); 757 OopMap* map = save_live_registers(sasm); 758 // Retrieve bci 759 __ ldr(bci, Address(rfp, 1*BytesPerWord)); 760 // And a pointer to the Method* 761 __ ldr(method, Address(rfp, 2*BytesPerWord)); 762 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method); 763 oop_maps = new OopMapSet(); 764 oop_maps->add_gc_map(call_offset, map); 765 restore_live_registers(sasm); 766 __ leave(); 767 __ ret(lr); 768 } 769 break; 770 771 case new_type_array_id: 772 case new_object_array_id: 773 { 774 Register length = r6; // Incoming 775 Register klass = r3; // Incoming 776 Register obj = r0; // Result 777 778 if (id == new_type_array_id) { 779 __ set_info("new_type_array", dont_gc_arguments); 780 } else { 781 __ set_info("new_object_array", dont_gc_arguments); 782 } 783 784 #ifdef ASSERT 785 // assert object type is really an array of the proper kind 786 { 787 Label ok; 788 Register t0 = obj; 789 __ ldr(t0, Address(klass, Klass::layout_helper_offset())); 790 __ asr(t0, t0, Klass::_lh_array_tag_shift); 791 int tag = ((id == new_type_array_id) 792 ? Klass::_lh_array_tag_type_value 793 : Klass::_lh_array_tag_obj_value); 794 __ mov(rscratch1, tag); 795 __ cmp(t0, rscratch1); 796 __ b(ok, Assembler::EQ); 797 __ stop("assert(is an array klass)"); 798 __ should_not_reach_here(); 799 __ bind(ok); 800 } 801 #endif // ASSERT 802 803 // If TLAB is disabled, see if there is support for inlining contiguous 804 // allocations. 805 // Otherwise, just go to the slow path. 806 if (!UseTLAB && Universe::heap()->supports_inline_contig_alloc()) { 807 Register arr_size = r4; 808 Register t1 = r2; 809 Register t2 = r5; 810 Label slow_path; 811 assert_different_registers(length, klass, obj, arr_size, t1, t2); 812 813 // check that array length is small enough for fast path. 814 __ mov(rscratch1, C1_MacroAssembler::max_array_allocation_length); 815 __ cmp(length, rscratch1); 816 __ b(slow_path, Assembler::HI); 817 818 // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F)) 819 __ ldr(t1, Address(klass, Klass::layout_helper_offset())); 820 __ andr(rscratch1, t1, 0x1f); 821 __ lsl(arr_size, length, rscratch1); 822 __ extract_bits(t1, t1, Klass::_lh_header_size_shift, 823 exact_log2(Klass::_lh_header_size_mask + 1)); 824 __ add(arr_size, arr_size, t1); 825 __ add(arr_size, arr_size, MinObjAlignmentInBytesMask); // align up 826 __ mov(rscratch1, ~MinObjAlignmentInBytesMask); 827 __ andr(arr_size, arr_size, rscratch1); 828 829 __ eden_allocate(obj, arr_size, 0, t1, slow_path); // preserves arr_size 830 831 __ initialize_header(obj, klass, length, t1, t2); 832 // Assume Little-Endian 833 __ ldrb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte))); 834 assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise"); 835 assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise"); 836 __ andr(t1, t1, Klass::_lh_header_size_mask); 837 __ sub(arr_size, arr_size, t1); // body length 838 __ add(t1, t1, obj); // body start 839 __ initialize_body(t1, arr_size, 0, t2); 840 __ verify_oop(obj); 841 842 __ ret(lr); 843 844 __ bind(slow_path); 845 } 846 847 __ enter(); 848 OopMap* map = save_live_registers(sasm); 849 int call_offset; 850 if (id == new_type_array_id) { 851 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length); 852 } else { 853 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length); 854 } 855 856 oop_maps = new OopMapSet(); 857 oop_maps->add_gc_map(call_offset, map); 858 restore_live_registers_except_r0(sasm); 859 860 __ verify_oop(obj); 861 __ leave(); 862 __ ret(lr); 863 864 // r0: new array 865 } 866 break; 867 868 case new_multi_array_id: 869 { StubFrame f(sasm, "new_multi_array", dont_gc_arguments); 870 // r1: klass 871 // r2: rank 872 // r3: address of 1st dimension 873 OopMap* map = save_live_registers(sasm); 874 int call_offset = __ call_RT(r0, noreg, CAST_FROM_FN_PTR(address, new_multi_array), r1, r2, r3); 875 876 oop_maps = new OopMapSet(); 877 oop_maps->add_gc_map(call_offset, map); 878 restore_live_registers_except_r0(sasm); 879 880 // r0,: new multi array 881 __ verify_oop(r0); 882 } 883 break; 884 885 case register_finalizer_id: 886 { 887 __ set_info("register_finalizer", dont_gc_arguments); 888 889 // This is called via call_runtime so the arguments 890 // will be place in C abi locations 891 892 __ verify_oop(c_rarg0); 893 894 // load the klass and check the has finalizer flag 895 Label register_finalizer; 896 Register t = r5; 897 __ load_klass(t, r0); 898 __ ldr(t, Address(t, Klass::access_flags_offset())); 899 __ tst(t, JVM_ACC_HAS_FINALIZER); 900 __ b(register_finalizer, Assembler::NE); 901 __ ret(lr); 902 903 __ bind(register_finalizer); 904 __ enter(); 905 OopMap* oop_map = save_live_registers(sasm); 906 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), r0); 907 oop_maps = new OopMapSet(); 908 oop_maps->add_gc_map(call_offset, oop_map); 909 910 // Now restore all the live registers 911 restore_live_registers(sasm); 912 913 __ leave(); 914 __ ret(lr); 915 } 916 break; 917 918 case throw_class_cast_exception_id: 919 { StubFrame f(sasm, "throw_class_cast_exception", dont_gc_arguments); 920 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true); 921 } 922 break; 923 924 case throw_incompatible_class_change_error_id: 925 { StubFrame f(sasm, "throw_incompatible_class_cast_exception", dont_gc_arguments); 926 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false); 927 } 928 break; 929 930 case slow_subtype_check_id: 931 { 932 // Typical calling sequence: 933 // __ push(klass_RInfo); // object klass or other subclass 934 // __ push(sup_k_RInfo); // array element klass or other superclass 935 // __ bl(slow_subtype_check); 936 // Note that the subclass is pushed first, and is therefore deepest. 937 enum layout { 938 r0_off, 939 r2_off, 940 r4_off, 941 r5_off, 942 sup_k_off, 943 klass_off, 944 framesize, 945 result_off = sup_k_off 946 }; 947 948 __ set_info("slow_subtype_check", dont_gc_arguments); 949 __ push(RegSet::of(r0, r2, r4, r5), sp); 950 951 // This is called by pushing args and not with C abi 952 __ ldr(r4, Address(sp, (klass_off) * VMRegImpl::stack_slot_size)); // subclass 953 __ ldr(r0, Address(sp, (sup_k_off) * VMRegImpl::stack_slot_size)); // superclass 954 955 956 Label miss; 957 __ check_klass_subtype_slow_path(r4, r0, r2, r5, NULL, &miss); 958 959 // fallthrough on success: 960 __ mov(rscratch1, 1); 961 __ str(rscratch1, Address(sp, (result_off) * VMRegImpl::stack_slot_size)); // result 962 __ pop(RegSet::of(r0, r2, r4, r5), sp); 963 __ ret(lr); 964 965 __ bind(miss); 966 __ mov(rscratch1, 0); 967 __ str(rscratch1, Address(sp, (result_off) * VMRegImpl::stack_slot_size)); // result 968 __ pop(RegSet::of(r0, r2, r4, r5), sp); 969 __ ret(lr); 970 } 971 break; 972 973 case monitorenter_nofpu_id: 974 save_fpu_registers = false; 975 // fall through 976 case monitorenter_id: 977 { 978 StubFrame f(sasm, "monitorenter", dont_gc_arguments); 979 OopMap* map = save_live_registers(sasm, save_fpu_registers); 980 981 // Called with store_parameter and not C abi 982 983 f.load_argument(1, r0); // r0,: object 984 f.load_argument(0, r1); // r1,: lock address 985 986 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), r0, r1); 987 988 oop_maps = new OopMapSet(); 989 oop_maps->add_gc_map(call_offset, map); 990 restore_live_registers(sasm, save_fpu_registers); 991 } 992 break; 993 994 case monitorexit_nofpu_id: 995 save_fpu_registers = false; 996 // fall through 997 case monitorexit_id: 998 { 999 StubFrame f(sasm, "monitorexit", dont_gc_arguments); 1000 OopMap* map = save_live_registers(sasm, save_fpu_registers); 1001 1002 // Called with store_parameter and not C abi 1003 1004 f.load_argument(0, r0); // r0,: lock address 1005 1006 // note: really a leaf routine but must setup last java sp 1007 // => use call_RT for now (speed can be improved by 1008 // doing last java sp setup manually) 1009 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), r0); 1010 1011 oop_maps = new OopMapSet(); 1012 oop_maps->add_gc_map(call_offset, map); 1013 restore_live_registers(sasm, save_fpu_registers); 1014 } 1015 break; 1016 1017 case deoptimize_id: 1018 { 1019 StubFrame f(sasm, "deoptimize", dont_gc_arguments); 1020 OopMap* oop_map = save_live_registers(sasm); 1021 f.load_argument(0, c_rarg1); 1022 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, deoptimize), c_rarg1); 1023 1024 oop_maps = new OopMapSet(); 1025 oop_maps->add_gc_map(call_offset, oop_map); 1026 restore_live_registers(sasm); 1027 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); 1028 assert(deopt_blob != NULL, "deoptimization blob must have been created"); 1029 __ leave(); 1030 __ far_jump(RuntimeAddress(deopt_blob->unpack_with_reexecution())); 1031 } 1032 break; 1033 1034 case throw_range_check_failed_id: 1035 { StubFrame f(sasm, "range_check_failed", dont_gc_arguments); 1036 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true); 1037 } 1038 break; 1039 1040 case unwind_exception_id: 1041 { __ set_info("unwind_exception", dont_gc_arguments); 1042 // note: no stubframe since we are about to leave the current 1043 // activation and we are calling a leaf VM function only. 1044 generate_unwind_exception(sasm); 1045 } 1046 break; 1047 1048 case access_field_patching_id: 1049 { StubFrame f(sasm, "access_field_patching", dont_gc_arguments); 1050 // we should set up register map 1051 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching)); 1052 } 1053 break; 1054 1055 case load_klass_patching_id: 1056 { StubFrame f(sasm, "load_klass_patching", dont_gc_arguments); 1057 // we should set up register map 1058 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching)); 1059 } 1060 break; 1061 1062 case load_mirror_patching_id: 1063 { StubFrame f(sasm, "load_mirror_patching", dont_gc_arguments); 1064 // we should set up register map 1065 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching)); 1066 } 1067 break; 1068 1069 case load_appendix_patching_id: 1070 { StubFrame f(sasm, "load_appendix_patching", dont_gc_arguments); 1071 // we should set up register map 1072 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching)); 1073 } 1074 break; 1075 1076 case handle_exception_nofpu_id: 1077 case handle_exception_id: 1078 { StubFrame f(sasm, "handle_exception", dont_gc_arguments); 1079 oop_maps = generate_handle_exception(id, sasm); 1080 } 1081 break; 1082 1083 case handle_exception_from_callee_id: 1084 { StubFrame f(sasm, "handle_exception_from_callee", dont_gc_arguments); 1085 oop_maps = generate_handle_exception(id, sasm); 1086 } 1087 break; 1088 1089 case throw_index_exception_id: 1090 { StubFrame f(sasm, "index_range_check_failed", dont_gc_arguments); 1091 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true); 1092 } 1093 break; 1094 1095 case throw_array_store_exception_id: 1096 { StubFrame f(sasm, "throw_array_store_exception", dont_gc_arguments); 1097 // tos + 0: link 1098 // + 1: return address 1099 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true); 1100 } 1101 break; 1102 1103 case predicate_failed_trap_id: 1104 { 1105 StubFrame f(sasm, "predicate_failed_trap", dont_gc_arguments); 1106 1107 OopMap* map = save_live_registers(sasm); 1108 1109 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap)); 1110 oop_maps = new OopMapSet(); 1111 oop_maps->add_gc_map(call_offset, map); 1112 restore_live_registers(sasm); 1113 __ leave(); 1114 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); 1115 assert(deopt_blob != NULL, "deoptimization blob must have been created"); 1116 1117 __ far_jump(RuntimeAddress(deopt_blob->unpack_with_reexecution())); 1118 } 1119 break; 1120 1121 1122 default: 1123 { StubFrame f(sasm, "unimplemented entry", dont_gc_arguments); 1124 __ mov(r0, (int)id); 1125 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), r0); 1126 __ should_not_reach_here(); 1127 } 1128 break; 1129 } 1130 } 1131 return oop_maps; 1132 } 1133 1134 #undef __ 1135 1136 const char *Runtime1::pd_name_for_address(address entry) { 1137 #ifdef __SOFTFP__ 1138 #define FUNCTION_CASE(a, f) \ 1139 if ((intptr_t)a == CAST_FROM_FN_PTR(intptr_t, f)) return #f 1140 1141 FUNCTION_CASE(entry, SharedRuntime::i2f); 1142 FUNCTION_CASE(entry, SharedRuntime::i2d); 1143 FUNCTION_CASE(entry, SharedRuntime::f2d); 1144 FUNCTION_CASE(entry, SharedRuntime::fcmpg); 1145 FUNCTION_CASE(entry, SharedRuntime::fcmpl); 1146 FUNCTION_CASE(entry, SharedRuntime::dcmpg); 1147 FUNCTION_CASE(entry, SharedRuntime::dcmpl); 1148 FUNCTION_CASE(entry, SharedRuntime::unordered_fcmple); 1149 FUNCTION_CASE(entry, SharedRuntime::unordered_dcmple); 1150 #undef FUNCTION_CASE 1151 #endif 1152 1153 return "Unknown_Func_Ptr"; 1154 }