1 /* 2 * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2014, Red Hat Inc. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/assembler.hpp" 28 #include "c1/c1_CodeStubs.hpp" 29 #include "c1/c1_Defs.hpp" 30 #include "c1/c1_MacroAssembler.hpp" 31 #include "c1/c1_Runtime1.hpp" 32 #include "compiler/disassembler.hpp" 33 #include "gc/shared/cardTable.hpp" 34 #include "gc/shared/cardTableBarrierSet.hpp" 35 #include "interpreter/interpreter.hpp" 36 #include "nativeInst_aarch64.hpp" 37 #include "oops/compiledICHolder.hpp" 38 #include "oops/oop.inline.hpp" 39 #include "prims/jvmtiExport.hpp" 40 #include "register_aarch64.hpp" 41 #include "runtime/sharedRuntime.hpp" 42 #include "runtime/signature.hpp" 43 #include "runtime/vframe.hpp" 44 #include "runtime/vframeArray.hpp" 45 #include "vmreg_aarch64.inline.hpp" 46 #if INCLUDE_ALL_GCS 47 #include "gc/g1/g1BarrierSet.hpp" 48 #include "gc/g1/g1CardTable.hpp" 49 #include "gc/shenandoah/shenandoahHeap.hpp" 50 #include "gc/shenandoah/shenandoahThreadLocalData.hpp" 51 #include "gc/g1/g1ThreadLocalData.hpp" 52 #endif 53 54 55 // Implementation of StubAssembler 56 57 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, int args_size) { 58 // setup registers 59 assert(!(oop_result1->is_valid() || metadata_result->is_valid()) || oop_result1 != metadata_result, "registers must be different"); 60 assert(oop_result1 != rthread && metadata_result != rthread, "registers must be different"); 61 assert(args_size >= 0, "illegal args_size"); 62 bool align_stack = false; 63 64 mov(c_rarg0, rthread); 65 set_num_rt_args(0); // Nothing on stack 66 67 Label retaddr; 68 set_last_Java_frame(sp, rfp, retaddr, rscratch1); 69 70 // do the call 71 lea(rscratch1, RuntimeAddress(entry)); 72 blrt(rscratch1, args_size + 1, 8, 1); 73 bind(retaddr); 74 int call_offset = offset(); 75 // verify callee-saved register 76 #ifdef ASSERT 77 push(r0, sp); 78 { Label L; 79 get_thread(r0); 80 cmp(rthread, r0); 81 br(Assembler::EQ, L); 82 stop("StubAssembler::call_RT: rthread not callee saved?"); 83 bind(L); 84 } 85 pop(r0, sp); 86 #endif 87 reset_last_Java_frame(true); 88 maybe_isb(); 89 90 // check for pending exceptions 91 { Label L; 92 // check for pending exceptions (java_thread is set upon return) 93 ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset()))); 94 cbz(rscratch1, L); 95 // exception pending => remove activation and forward to exception handler 96 // make sure that the vm_results are cleared 97 if (oop_result1->is_valid()) { 98 str(zr, Address(rthread, JavaThread::vm_result_offset())); 99 } 100 if (metadata_result->is_valid()) { 101 str(zr, Address(rthread, JavaThread::vm_result_2_offset())); 102 } 103 if (frame_size() == no_frame_size) { 104 leave(); 105 far_jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 106 } else if (_stub_id == Runtime1::forward_exception_id) { 107 should_not_reach_here(); 108 } else { 109 far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id))); 110 } 111 bind(L); 112 } 113 // get oop results if there are any and reset the values in the thread 114 if (oop_result1->is_valid()) { 115 get_vm_result(oop_result1, rthread); 116 } 117 if (metadata_result->is_valid()) { 118 get_vm_result_2(metadata_result, rthread); 119 } 120 return call_offset; 121 } 122 123 124 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1) { 125 mov(c_rarg1, arg1); 126 return call_RT(oop_result1, metadata_result, entry, 1); 127 } 128 129 130 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2) { 131 if (c_rarg1 == arg2) { 132 if (c_rarg2 == arg1) { 133 mov(rscratch1, arg1); 134 mov(arg1, arg2); 135 mov(arg2, rscratch1); 136 } else { 137 mov(c_rarg2, arg2); 138 mov(c_rarg1, arg1); 139 } 140 } else { 141 mov(c_rarg1, arg1); 142 mov(c_rarg2, arg2); 143 } 144 return call_RT(oop_result1, metadata_result, entry, 2); 145 } 146 147 148 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2, Register arg3) { 149 // if there is any conflict use the stack 150 if (arg1 == c_rarg2 || arg1 == c_rarg3 || 151 arg2 == c_rarg1 || arg1 == c_rarg3 || 152 arg3 == c_rarg1 || arg1 == c_rarg2) { 153 stp(arg3, arg2, Address(pre(sp, 2 * wordSize))); 154 stp(arg1, zr, Address(pre(sp, -2 * wordSize))); 155 ldp(c_rarg1, zr, Address(post(sp, 2 * wordSize))); 156 ldp(c_rarg3, c_rarg2, Address(post(sp, 2 * wordSize))); 157 } else { 158 mov(c_rarg1, arg1); 159 mov(c_rarg2, arg2); 160 mov(c_rarg3, arg3); 161 } 162 return call_RT(oop_result1, metadata_result, entry, 3); 163 } 164 165 // Implementation of StubFrame 166 167 class StubFrame: public StackObj { 168 private: 169 StubAssembler* _sasm; 170 171 public: 172 StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments); 173 void load_argument(int offset_in_words, Register reg); 174 175 ~StubFrame(); 176 };; 177 178 179 #define __ _sasm-> 180 181 StubFrame::StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments) { 182 _sasm = sasm; 183 __ set_info(name, must_gc_arguments); 184 __ enter(); 185 } 186 187 // load parameters that were stored with LIR_Assembler::store_parameter 188 // Note: offsets for store_parameter and load_argument must match 189 void StubFrame::load_argument(int offset_in_words, Register reg) { 190 // rbp, + 0: link 191 // + 1: return address 192 // + 2: argument with offset 0 193 // + 3: argument with offset 1 194 // + 4: ... 195 196 __ ldr(reg, Address(rfp, (offset_in_words + 2) * BytesPerWord)); 197 } 198 199 200 StubFrame::~StubFrame() { 201 __ leave(); 202 __ ret(lr); 203 } 204 205 #undef __ 206 207 208 // Implementation of Runtime1 209 210 #define __ sasm-> 211 212 const int float_regs_as_doubles_size_in_slots = pd_nof_fpu_regs_frame_map * 2; 213 214 // Stack layout for saving/restoring all the registers needed during a runtime 215 // call (this includes deoptimization) 216 // Note: note that users of this frame may well have arguments to some runtime 217 // while these values are on the stack. These positions neglect those arguments 218 // but the code in save_live_registers will take the argument count into 219 // account. 220 // 221 222 enum reg_save_layout { 223 reg_save_frame_size = 32 /* float */ + 32 /* integer */ 224 }; 225 226 // Save off registers which might be killed by calls into the runtime. 227 // Tries to smart of about FP registers. In particular we separate 228 // saving and describing the FPU registers for deoptimization since we 229 // have to save the FPU registers twice if we describe them. The 230 // deopt blob is the only thing which needs to describe FPU registers. 231 // In all other cases it should be sufficient to simply save their 232 // current value. 233 234 static int cpu_reg_save_offsets[FrameMap::nof_cpu_regs]; 235 static int fpu_reg_save_offsets[FrameMap::nof_fpu_regs]; 236 static int reg_save_size_in_words; 237 static int frame_size_in_bytes = -1; 238 239 static OopMap* generate_oop_map(StubAssembler* sasm, bool save_fpu_registers) { 240 int frame_size_in_bytes = reg_save_frame_size * BytesPerWord; 241 sasm->set_frame_size(frame_size_in_bytes / BytesPerWord); 242 int frame_size_in_slots = frame_size_in_bytes / sizeof(jint); 243 OopMap* oop_map = new OopMap(frame_size_in_slots, 0); 244 245 for (int i = 0; i < FrameMap::nof_cpu_regs; i++) { 246 Register r = as_Register(i); 247 if (i <= 18 && i != rscratch1->encoding() && i != rscratch2->encoding()) { 248 int sp_offset = cpu_reg_save_offsets[i]; 249 oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset), 250 r->as_VMReg()); 251 } 252 } 253 254 if (save_fpu_registers) { 255 for (int i = 0; i < FrameMap::nof_fpu_regs; i++) { 256 FloatRegister r = as_FloatRegister(i); 257 { 258 int sp_offset = fpu_reg_save_offsets[i]; 259 oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset), 260 r->as_VMReg()); 261 } 262 } 263 } 264 return oop_map; 265 } 266 267 static OopMap* save_live_registers(StubAssembler* sasm, 268 bool save_fpu_registers = true) { 269 __ block_comment("save_live_registers"); 270 271 __ push(RegSet::range(r0, r29), sp); // integer registers except lr & sp 272 273 if (save_fpu_registers) { 274 for (int i = 30; i >= 0; i -= 2) 275 __ stpd(as_FloatRegister(i), as_FloatRegister(i+1), 276 Address(__ pre(sp, -2 * wordSize))); 277 } else { 278 __ add(sp, sp, -32 * wordSize); 279 } 280 281 return generate_oop_map(sasm, save_fpu_registers); 282 } 283 284 static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) { 285 if (restore_fpu_registers) { 286 for (int i = 0; i < 32; i += 2) 287 __ ldpd(as_FloatRegister(i), as_FloatRegister(i+1), 288 Address(__ post(sp, 2 * wordSize))); 289 } else { 290 __ add(sp, sp, 32 * wordSize); 291 } 292 293 __ pop(RegSet::range(r0, r29), sp); 294 } 295 296 static void restore_live_registers_except_r0(StubAssembler* sasm, bool restore_fpu_registers = true) { 297 298 if (restore_fpu_registers) { 299 for (int i = 0; i < 32; i += 2) 300 __ ldpd(as_FloatRegister(i), as_FloatRegister(i+1), 301 Address(__ post(sp, 2 * wordSize))); 302 } else { 303 __ add(sp, sp, 32 * wordSize); 304 } 305 306 __ ldp(zr, r1, Address(__ post(sp, 16))); 307 __ pop(RegSet::range(r2, r29), sp); 308 } 309 310 311 312 void Runtime1::initialize_pd() { 313 int i; 314 int sp_offset = 0; 315 316 // all float registers are saved explicitly 317 assert(FrameMap::nof_fpu_regs == 32, "double registers not handled here"); 318 for (i = 0; i < FrameMap::nof_fpu_regs; i++) { 319 fpu_reg_save_offsets[i] = sp_offset; 320 sp_offset += 2; // SP offsets are in halfwords 321 } 322 323 for (i = 0; i < FrameMap::nof_cpu_regs; i++) { 324 Register r = as_Register(i); 325 cpu_reg_save_offsets[i] = sp_offset; 326 sp_offset += 2; // SP offsets are in halfwords 327 } 328 } 329 330 331 // target: the entry point of the method that creates and posts the exception oop 332 // has_argument: true if the exception needs an argument (passed in rscratch1) 333 334 OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) { 335 // make a frame and preserve the caller's caller-save registers 336 OopMap* oop_map = save_live_registers(sasm); 337 int call_offset; 338 if (!has_argument) { 339 call_offset = __ call_RT(noreg, noreg, target); 340 } else { 341 call_offset = __ call_RT(noreg, noreg, target, rscratch1); 342 } 343 OopMapSet* oop_maps = new OopMapSet(); 344 oop_maps->add_gc_map(call_offset, oop_map); 345 346 __ should_not_reach_here(); 347 return oop_maps; 348 } 349 350 351 OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) { 352 __ block_comment("generate_handle_exception"); 353 354 // incoming parameters 355 const Register exception_oop = r0; 356 const Register exception_pc = r3; 357 // other registers used in this stub 358 359 // Save registers, if required. 360 OopMapSet* oop_maps = new OopMapSet(); 361 OopMap* oop_map = NULL; 362 switch (id) { 363 case forward_exception_id: 364 // We're handling an exception in the context of a compiled frame. 365 // The registers have been saved in the standard places. Perform 366 // an exception lookup in the caller and dispatch to the handler 367 // if found. Otherwise unwind and dispatch to the callers 368 // exception handler. 369 oop_map = generate_oop_map(sasm, 1 /*thread*/); 370 371 // load and clear pending exception oop into r0 372 __ ldr(exception_oop, Address(rthread, Thread::pending_exception_offset())); 373 __ str(zr, Address(rthread, Thread::pending_exception_offset())); 374 375 // load issuing PC (the return address for this stub) into r3 376 __ ldr(exception_pc, Address(rfp, 1*BytesPerWord)); 377 378 // make sure that the vm_results are cleared (may be unnecessary) 379 __ str(zr, Address(rthread, JavaThread::vm_result_offset())); 380 __ str(zr, Address(rthread, JavaThread::vm_result_2_offset())); 381 break; 382 case handle_exception_nofpu_id: 383 case handle_exception_id: 384 // At this point all registers MAY be live. 385 oop_map = save_live_registers(sasm, id != handle_exception_nofpu_id); 386 break; 387 case handle_exception_from_callee_id: { 388 // At this point all registers except exception oop (r0) and 389 // exception pc (lr) are dead. 390 const int frame_size = 2 /*fp, return address*/; 391 oop_map = new OopMap(frame_size * VMRegImpl::slots_per_word, 0); 392 sasm->set_frame_size(frame_size); 393 break; 394 } 395 default: 396 __ should_not_reach_here(); 397 break; 398 } 399 400 // verify that only r0 and r3 are valid at this time 401 __ invalidate_registers(false, true, true, false, true, true); 402 // verify that r0 contains a valid exception 403 __ verify_not_null_oop(exception_oop); 404 405 #ifdef ASSERT 406 // check that fields in JavaThread for exception oop and issuing pc are 407 // empty before writing to them 408 Label oop_empty; 409 __ ldr(rscratch1, Address(rthread, JavaThread::exception_oop_offset())); 410 __ cbz(rscratch1, oop_empty); 411 __ stop("exception oop already set"); 412 __ bind(oop_empty); 413 414 Label pc_empty; 415 __ ldr(rscratch1, Address(rthread, JavaThread::exception_pc_offset())); 416 __ cbz(rscratch1, pc_empty); 417 __ stop("exception pc already set"); 418 __ bind(pc_empty); 419 #endif 420 421 // save exception oop and issuing pc into JavaThread 422 // (exception handler will load it from here) 423 __ str(exception_oop, Address(rthread, JavaThread::exception_oop_offset())); 424 __ str(exception_pc, Address(rthread, JavaThread::exception_pc_offset())); 425 426 // patch throwing pc into return address (has bci & oop map) 427 __ str(exception_pc, Address(rfp, 1*BytesPerWord)); 428 429 // compute the exception handler. 430 // the exception oop and the throwing pc are read from the fields in JavaThread 431 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc)); 432 oop_maps->add_gc_map(call_offset, oop_map); 433 434 // r0: handler address 435 // will be the deopt blob if nmethod was deoptimized while we looked up 436 // handler regardless of whether handler existed in the nmethod. 437 438 // only r0 is valid at this time, all other registers have been destroyed by the runtime call 439 __ invalidate_registers(false, true, true, true, true, true); 440 441 // patch the return address, this stub will directly return to the exception handler 442 __ str(r0, Address(rfp, 1*BytesPerWord)); 443 444 switch (id) { 445 case forward_exception_id: 446 case handle_exception_nofpu_id: 447 case handle_exception_id: 448 // Restore the registers that were saved at the beginning. 449 restore_live_registers(sasm, id != handle_exception_nofpu_id); 450 break; 451 case handle_exception_from_callee_id: 452 // Pop the return address. 453 __ leave(); 454 __ ret(lr); // jump to exception handler 455 break; 456 default: ShouldNotReachHere(); 457 } 458 459 return oop_maps; 460 } 461 462 463 void Runtime1::generate_unwind_exception(StubAssembler *sasm) { 464 // incoming parameters 465 const Register exception_oop = r0; 466 // callee-saved copy of exception_oop during runtime call 467 const Register exception_oop_callee_saved = r19; 468 // other registers used in this stub 469 const Register exception_pc = r3; 470 const Register handler_addr = r1; 471 472 // verify that only r0, is valid at this time 473 __ invalidate_registers(false, true, true, true, true, true); 474 475 #ifdef ASSERT 476 // check that fields in JavaThread for exception oop and issuing pc are empty 477 Label oop_empty; 478 __ ldr(rscratch1, Address(rthread, JavaThread::exception_oop_offset())); 479 __ cbz(rscratch1, oop_empty); 480 __ stop("exception oop must be empty"); 481 __ bind(oop_empty); 482 483 Label pc_empty; 484 __ ldr(rscratch1, Address(rthread, JavaThread::exception_pc_offset())); 485 __ cbz(rscratch1, pc_empty); 486 __ stop("exception pc must be empty"); 487 __ bind(pc_empty); 488 #endif 489 490 // Save our return address because 491 // exception_handler_for_return_address will destroy it. We also 492 // save exception_oop 493 __ stp(lr, exception_oop, Address(__ pre(sp, -2 * wordSize))); 494 495 // search the exception handler address of the caller (using the return address) 496 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), rthread, lr); 497 // r0: exception handler address of the caller 498 499 // Only R0 is valid at this time; all other registers have been 500 // destroyed by the call. 501 __ invalidate_registers(false, true, true, true, false, true); 502 503 // move result of call into correct register 504 __ mov(handler_addr, r0); 505 506 // get throwing pc (= return address). 507 // lr has been destroyed by the call 508 __ ldp(lr, exception_oop, Address(__ post(sp, 2 * wordSize))); 509 __ mov(r3, lr); 510 511 __ verify_not_null_oop(exception_oop); 512 513 // continue at exception handler (return address removed) 514 // note: do *not* remove arguments when unwinding the 515 // activation since the caller assumes having 516 // all arguments on the stack when entering the 517 // runtime to determine the exception handler 518 // (GC happens at call site with arguments!) 519 // r0: exception oop 520 // r3: throwing pc 521 // r1: exception handler 522 __ br(handler_addr); 523 } 524 525 526 527 OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) { 528 // use the maximum number of runtime-arguments here because it is difficult to 529 // distinguish each RT-Call. 530 // Note: This number affects also the RT-Call in generate_handle_exception because 531 // the oop-map is shared for all calls. 532 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); 533 assert(deopt_blob != NULL, "deoptimization blob must have been created"); 534 535 OopMap* oop_map = save_live_registers(sasm); 536 537 __ mov(c_rarg0, rthread); 538 Label retaddr; 539 __ set_last_Java_frame(sp, rfp, retaddr, rscratch1); 540 // do the call 541 __ lea(rscratch1, RuntimeAddress(target)); 542 __ blrt(rscratch1, 1, 0, 1); 543 __ bind(retaddr); 544 OopMapSet* oop_maps = new OopMapSet(); 545 oop_maps->add_gc_map(__ offset(), oop_map); 546 // verify callee-saved register 547 #ifdef ASSERT 548 { Label L; 549 __ get_thread(rscratch1); 550 __ cmp(rthread, rscratch1); 551 __ br(Assembler::EQ, L); 552 __ stop("StubAssembler::call_RT: rthread not callee saved?"); 553 __ bind(L); 554 } 555 #endif 556 __ reset_last_Java_frame(true); 557 __ maybe_isb(); 558 559 // check for pending exceptions 560 { Label L; 561 __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset())); 562 __ cbz(rscratch1, L); 563 // exception pending => remove activation and forward to exception handler 564 565 { Label L1; 566 __ cbnz(r0, L1); // have we deoptimized? 567 __ far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id))); 568 __ bind(L1); 569 } 570 571 // the deopt blob expects exceptions in the special fields of 572 // JavaThread, so copy and clear pending exception. 573 574 // load and clear pending exception 575 __ ldr(r0, Address(rthread, Thread::pending_exception_offset())); 576 __ str(zr, Address(rthread, Thread::pending_exception_offset())); 577 578 // check that there is really a valid exception 579 __ verify_not_null_oop(r0); 580 581 // load throwing pc: this is the return address of the stub 582 __ mov(r3, lr); 583 584 #ifdef ASSERT 585 // check that fields in JavaThread for exception oop and issuing pc are empty 586 Label oop_empty; 587 __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset())); 588 __ cbz(rscratch1, oop_empty); 589 __ stop("exception oop must be empty"); 590 __ bind(oop_empty); 591 592 Label pc_empty; 593 __ ldr(rscratch1, Address(rthread, JavaThread::exception_pc_offset())); 594 __ cbz(rscratch1, pc_empty); 595 __ stop("exception pc must be empty"); 596 __ bind(pc_empty); 597 #endif 598 599 // store exception oop and throwing pc to JavaThread 600 __ str(r0, Address(rthread, JavaThread::exception_oop_offset())); 601 __ str(r3, Address(rthread, JavaThread::exception_pc_offset())); 602 603 restore_live_registers(sasm); 604 605 __ leave(); 606 607 // Forward the exception directly to deopt blob. We can blow no 608 // registers and must leave throwing pc on the stack. A patch may 609 // have values live in registers so the entry point with the 610 // exception in tls. 611 __ far_jump(RuntimeAddress(deopt_blob->unpack_with_exception_in_tls())); 612 613 __ bind(L); 614 } 615 616 617 // Runtime will return true if the nmethod has been deoptimized during 618 // the patching process. In that case we must do a deopt reexecute instead. 619 620 Label reexecuteEntry, cont; 621 622 __ cbz(r0, cont); // have we deoptimized? 623 624 // Will reexecute. Proper return address is already on the stack we just restore 625 // registers, pop all of our frame but the return address and jump to the deopt blob 626 restore_live_registers(sasm); 627 __ leave(); 628 __ far_jump(RuntimeAddress(deopt_blob->unpack_with_reexecution())); 629 630 __ bind(cont); 631 restore_live_registers(sasm); 632 __ leave(); 633 __ ret(lr); 634 635 return oop_maps; 636 } 637 638 639 OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { 640 641 const Register exception_oop = r0; 642 const Register exception_pc = r3; 643 644 // for better readability 645 const bool must_gc_arguments = true; 646 const bool dont_gc_arguments = false; 647 648 // default value; overwritten for some optimized stubs that are called from methods that do not use the fpu 649 bool save_fpu_registers = true; 650 651 // stub code & info for the different stubs 652 OopMapSet* oop_maps = NULL; 653 OopMap* oop_map = NULL; 654 switch (id) { 655 { 656 case forward_exception_id: 657 { 658 oop_maps = generate_handle_exception(id, sasm); 659 __ leave(); 660 __ ret(lr); 661 } 662 break; 663 664 case throw_div0_exception_id: 665 { StubFrame f(sasm, "throw_div0_exception", dont_gc_arguments); 666 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false); 667 } 668 break; 669 670 case throw_null_pointer_exception_id: 671 { StubFrame f(sasm, "throw_null_pointer_exception", dont_gc_arguments); 672 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false); 673 } 674 break; 675 676 case new_instance_id: 677 case fast_new_instance_id: 678 case fast_new_instance_init_check_id: 679 { 680 Register klass = r3; // Incoming 681 Register obj = r0; // Result 682 683 if (id == new_instance_id) { 684 __ set_info("new_instance", dont_gc_arguments); 685 } else if (id == fast_new_instance_id) { 686 __ set_info("fast new_instance", dont_gc_arguments); 687 } else { 688 assert(id == fast_new_instance_init_check_id, "bad StubID"); 689 __ set_info("fast new_instance init check", dont_gc_arguments); 690 } 691 692 if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) && 693 UseTLAB && Universe::heap()->supports_inline_contig_alloc()) { 694 Label slow_path; 695 Register obj_size = r2; 696 Register t1 = r19; 697 Register t2 = r4; 698 assert_different_registers(klass, obj, obj_size, t1, t2); 699 700 __ stp(r19, zr, Address(__ pre(sp, -2 * wordSize))); 701 702 if (id == fast_new_instance_init_check_id) { 703 // make sure the klass is initialized 704 __ ldrb(rscratch1, Address(klass, InstanceKlass::init_state_offset())); 705 __ cmpw(rscratch1, InstanceKlass::fully_initialized); 706 __ br(Assembler::NE, slow_path); 707 } 708 709 #ifdef ASSERT 710 // assert object can be fast path allocated 711 { 712 Label ok, not_ok; 713 __ ldrw(obj_size, Address(klass, Klass::layout_helper_offset())); 714 __ cmp(obj_size, 0u); 715 __ br(Assembler::LE, not_ok); // make sure it's an instance (LH > 0) 716 __ tstw(obj_size, Klass::_lh_instance_slow_path_bit); 717 __ br(Assembler::EQ, ok); 718 __ bind(not_ok); 719 __ stop("assert(can be fast path allocated)"); 720 __ should_not_reach_here(); 721 __ bind(ok); 722 } 723 #endif // ASSERT 724 725 // get the instance size (size is postive so movl is fine for 64bit) 726 __ ldrw(obj_size, Address(klass, Klass::layout_helper_offset())); 727 728 __ eden_allocate(obj, obj_size, 0, t1, slow_path); 729 __ incr_allocated_bytes(rthread, obj_size, 0, rscratch1); 730 731 __ initialize_object(obj, klass, obj_size, 0, t1, t2, /* is_tlab_allocated */ false); 732 __ verify_oop(obj); 733 __ ldp(r19, zr, Address(__ post(sp, 2 * wordSize))); 734 __ ret(lr); 735 736 __ bind(slow_path); 737 __ ldp(r19, zr, Address(__ post(sp, 2 * wordSize))); 738 } 739 740 __ enter(); 741 OopMap* map = save_live_registers(sasm); 742 int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass); 743 oop_maps = new OopMapSet(); 744 oop_maps->add_gc_map(call_offset, map); 745 restore_live_registers_except_r0(sasm); 746 __ verify_oop(obj); 747 __ leave(); 748 __ ret(lr); 749 750 // r0,: new instance 751 } 752 753 break; 754 755 case counter_overflow_id: 756 { 757 Register bci = r0, method = r1; 758 __ enter(); 759 OopMap* map = save_live_registers(sasm); 760 // Retrieve bci 761 __ ldrw(bci, Address(rfp, 2*BytesPerWord)); 762 // And a pointer to the Method* 763 __ ldr(method, Address(rfp, 3*BytesPerWord)); 764 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method); 765 oop_maps = new OopMapSet(); 766 oop_maps->add_gc_map(call_offset, map); 767 restore_live_registers(sasm); 768 __ leave(); 769 __ ret(lr); 770 } 771 break; 772 773 case new_type_array_id: 774 case new_object_array_id: 775 { 776 Register length = r19; // Incoming 777 Register klass = r3; // Incoming 778 Register obj = r0; // Result 779 780 if (id == new_type_array_id) { 781 __ set_info("new_type_array", dont_gc_arguments); 782 } else { 783 __ set_info("new_object_array", dont_gc_arguments); 784 } 785 786 #ifdef ASSERT 787 // assert object type is really an array of the proper kind 788 { 789 Label ok; 790 Register t0 = obj; 791 __ ldrw(t0, Address(klass, Klass::layout_helper_offset())); 792 __ asrw(t0, t0, Klass::_lh_array_tag_shift); 793 int tag = ((id == new_type_array_id) 794 ? Klass::_lh_array_tag_type_value 795 : Klass::_lh_array_tag_obj_value); 796 __ mov(rscratch1, tag); 797 __ cmpw(t0, rscratch1); 798 __ br(Assembler::EQ, ok); 799 __ stop("assert(is an array klass)"); 800 __ should_not_reach_here(); 801 __ bind(ok); 802 } 803 #endif // ASSERT 804 805 if (UseTLAB && Universe::heap()->supports_inline_contig_alloc()) { 806 Register arr_size = r4; 807 Register t1 = r2; 808 Register t2 = r5; 809 Label slow_path; 810 assert_different_registers(length, klass, obj, arr_size, t1, t2); 811 812 // check that array length is small enough for fast path. 813 __ mov(rscratch1, C1_MacroAssembler::max_array_allocation_length); 814 __ cmpw(length, rscratch1); 815 __ br(Assembler::HI, slow_path); 816 817 // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F)) 818 // since size is positive ldrw does right thing on 64bit 819 __ ldrw(t1, Address(klass, Klass::layout_helper_offset())); 820 // since size is positive movw does right thing on 64bit 821 __ movw(arr_size, length); 822 __ lslvw(arr_size, length, t1); 823 __ ubfx(t1, t1, Klass::_lh_header_size_shift, 824 exact_log2(Klass::_lh_header_size_mask + 1)); 825 __ add(arr_size, arr_size, t1); 826 __ add(arr_size, arr_size, MinObjAlignmentInBytesMask); // align up 827 __ andr(arr_size, arr_size, ~MinObjAlignmentInBytesMask); 828 829 __ eden_allocate(obj, arr_size, 0, t1, slow_path); // preserves arr_size 830 __ incr_allocated_bytes(rthread, arr_size, 0, rscratch1); 831 832 __ initialize_header(obj, klass, length, t1, t2); 833 __ ldrb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte))); 834 assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise"); 835 assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise"); 836 __ andr(t1, t1, Klass::_lh_header_size_mask); 837 __ sub(arr_size, arr_size, t1); // body length 838 __ add(t1, t1, obj); // body start 839 __ initialize_body(t1, arr_size, 0, t2); 840 __ verify_oop(obj); 841 842 __ ret(lr); 843 844 __ bind(slow_path); 845 } 846 847 __ enter(); 848 OopMap* map = save_live_registers(sasm); 849 int call_offset; 850 if (id == new_type_array_id) { 851 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length); 852 } else { 853 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length); 854 } 855 856 oop_maps = new OopMapSet(); 857 oop_maps->add_gc_map(call_offset, map); 858 restore_live_registers_except_r0(sasm); 859 860 __ verify_oop(obj); 861 __ leave(); 862 __ ret(lr); 863 864 // r0: new array 865 } 866 break; 867 868 case new_multi_array_id: 869 { StubFrame f(sasm, "new_multi_array", dont_gc_arguments); 870 // r0,: klass 871 // r19,: rank 872 // r2: address of 1st dimension 873 OopMap* map = save_live_registers(sasm); 874 __ mov(c_rarg1, r0); 875 __ mov(c_rarg3, r2); 876 __ mov(c_rarg2, r19); 877 int call_offset = __ call_RT(r0, noreg, CAST_FROM_FN_PTR(address, new_multi_array), r1, r2, r3); 878 879 oop_maps = new OopMapSet(); 880 oop_maps->add_gc_map(call_offset, map); 881 restore_live_registers_except_r0(sasm); 882 883 // r0,: new multi array 884 __ verify_oop(r0); 885 } 886 break; 887 888 case register_finalizer_id: 889 { 890 __ set_info("register_finalizer", dont_gc_arguments); 891 892 // This is called via call_runtime so the arguments 893 // will be place in C abi locations 894 895 __ verify_oop(c_rarg0); 896 897 // load the klass and check the has finalizer flag 898 Label register_finalizer; 899 Register t = r5; 900 __ load_klass(t, r0); 901 __ ldrw(t, Address(t, Klass::access_flags_offset())); 902 __ tbnz(t, exact_log2(JVM_ACC_HAS_FINALIZER), register_finalizer); 903 __ ret(lr); 904 905 __ bind(register_finalizer); 906 __ enter(); 907 OopMap* oop_map = save_live_registers(sasm); 908 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), r0); 909 oop_maps = new OopMapSet(); 910 oop_maps->add_gc_map(call_offset, oop_map); 911 912 // Now restore all the live registers 913 restore_live_registers(sasm); 914 915 __ leave(); 916 __ ret(lr); 917 } 918 break; 919 920 case throw_class_cast_exception_id: 921 { StubFrame f(sasm, "throw_class_cast_exception", dont_gc_arguments); 922 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true); 923 } 924 break; 925 926 case throw_incompatible_class_change_error_id: 927 { StubFrame f(sasm, "throw_incompatible_class_cast_exception", dont_gc_arguments); 928 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false); 929 } 930 break; 931 932 case slow_subtype_check_id: 933 { 934 // Typical calling sequence: 935 // __ push(klass_RInfo); // object klass or other subclass 936 // __ push(sup_k_RInfo); // array element klass or other superclass 937 // __ bl(slow_subtype_check); 938 // Note that the subclass is pushed first, and is therefore deepest. 939 enum layout { 940 r0_off, r0_off_hi, 941 r2_off, r2_off_hi, 942 r4_off, r4_off_hi, 943 r5_off, r5_off_hi, 944 sup_k_off, sup_k_off_hi, 945 klass_off, klass_off_hi, 946 framesize, 947 result_off = sup_k_off 948 }; 949 950 __ set_info("slow_subtype_check", dont_gc_arguments); 951 __ push(RegSet::of(r0, r2, r4, r5), sp); 952 953 // This is called by pushing args and not with C abi 954 // __ ldr(r4, Address(sp, (klass_off) * VMRegImpl::stack_slot_size)); // subclass 955 // __ ldr(r0, Address(sp, (sup_k_off) * VMRegImpl::stack_slot_size)); // superclass 956 957 __ ldp(r4, r0, Address(sp, (sup_k_off) * VMRegImpl::stack_slot_size)); 958 959 Label miss; 960 __ check_klass_subtype_slow_path(r4, r0, r2, r5, NULL, &miss); 961 962 // fallthrough on success: 963 __ mov(rscratch1, 1); 964 __ str(rscratch1, Address(sp, (result_off) * VMRegImpl::stack_slot_size)); // result 965 __ pop(RegSet::of(r0, r2, r4, r5), sp); 966 __ ret(lr); 967 968 __ bind(miss); 969 __ str(zr, Address(sp, (result_off) * VMRegImpl::stack_slot_size)); // result 970 __ pop(RegSet::of(r0, r2, r4, r5), sp); 971 __ ret(lr); 972 } 973 break; 974 975 case monitorenter_nofpu_id: 976 save_fpu_registers = false; 977 // fall through 978 case monitorenter_id: 979 { 980 StubFrame f(sasm, "monitorenter", dont_gc_arguments); 981 OopMap* map = save_live_registers(sasm, save_fpu_registers); 982 983 // Called with store_parameter and not C abi 984 985 f.load_argument(1, r0); // r0,: object 986 f.load_argument(0, r1); // r1,: lock address 987 988 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), r0, r1); 989 990 oop_maps = new OopMapSet(); 991 oop_maps->add_gc_map(call_offset, map); 992 restore_live_registers(sasm, save_fpu_registers); 993 } 994 break; 995 996 case monitorexit_nofpu_id: 997 save_fpu_registers = false; 998 // fall through 999 case monitorexit_id: 1000 { 1001 StubFrame f(sasm, "monitorexit", dont_gc_arguments); 1002 OopMap* map = save_live_registers(sasm, save_fpu_registers); 1003 1004 // Called with store_parameter and not C abi 1005 1006 f.load_argument(0, r0); // r0,: lock address 1007 1008 // note: really a leaf routine but must setup last java sp 1009 // => use call_RT for now (speed can be improved by 1010 // doing last java sp setup manually) 1011 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), r0); 1012 1013 oop_maps = new OopMapSet(); 1014 oop_maps->add_gc_map(call_offset, map); 1015 restore_live_registers(sasm, save_fpu_registers); 1016 } 1017 break; 1018 1019 case deoptimize_id: 1020 { 1021 StubFrame f(sasm, "deoptimize", dont_gc_arguments); 1022 OopMap* oop_map = save_live_registers(sasm); 1023 f.load_argument(0, c_rarg1); 1024 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, deoptimize), c_rarg1); 1025 1026 oop_maps = new OopMapSet(); 1027 oop_maps->add_gc_map(call_offset, oop_map); 1028 restore_live_registers(sasm); 1029 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); 1030 assert(deopt_blob != NULL, "deoptimization blob must have been created"); 1031 __ leave(); 1032 __ far_jump(RuntimeAddress(deopt_blob->unpack_with_reexecution())); 1033 } 1034 break; 1035 1036 case throw_range_check_failed_id: 1037 { StubFrame f(sasm, "range_check_failed", dont_gc_arguments); 1038 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true); 1039 } 1040 break; 1041 1042 case unwind_exception_id: 1043 { __ set_info("unwind_exception", dont_gc_arguments); 1044 // note: no stubframe since we are about to leave the current 1045 // activation and we are calling a leaf VM function only. 1046 generate_unwind_exception(sasm); 1047 } 1048 break; 1049 1050 case access_field_patching_id: 1051 { StubFrame f(sasm, "access_field_patching", dont_gc_arguments); 1052 // we should set up register map 1053 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching)); 1054 } 1055 break; 1056 1057 case load_klass_patching_id: 1058 { StubFrame f(sasm, "load_klass_patching", dont_gc_arguments); 1059 // we should set up register map 1060 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching)); 1061 } 1062 break; 1063 1064 case load_mirror_patching_id: 1065 { StubFrame f(sasm, "load_mirror_patching", dont_gc_arguments); 1066 // we should set up register map 1067 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching)); 1068 } 1069 break; 1070 1071 case load_appendix_patching_id: 1072 { StubFrame f(sasm, "load_appendix_patching", dont_gc_arguments); 1073 // we should set up register map 1074 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching)); 1075 } 1076 break; 1077 1078 case handle_exception_nofpu_id: 1079 case handle_exception_id: 1080 { StubFrame f(sasm, "handle_exception", dont_gc_arguments); 1081 oop_maps = generate_handle_exception(id, sasm); 1082 } 1083 break; 1084 1085 case handle_exception_from_callee_id: 1086 { StubFrame f(sasm, "handle_exception_from_callee", dont_gc_arguments); 1087 oop_maps = generate_handle_exception(id, sasm); 1088 } 1089 break; 1090 1091 case throw_index_exception_id: 1092 { StubFrame f(sasm, "index_range_check_failed", dont_gc_arguments); 1093 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true); 1094 } 1095 break; 1096 1097 case throw_array_store_exception_id: 1098 { StubFrame f(sasm, "throw_array_store_exception", dont_gc_arguments); 1099 // tos + 0: link 1100 // + 1: return address 1101 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true); 1102 } 1103 break; 1104 1105 #if INCLUDE_ALL_GCS 1106 1107 case g1_pre_barrier_slow_id: 1108 { 1109 StubFrame f(sasm, "g1_pre_barrier", dont_gc_arguments); 1110 // arg0 : previous value of memory 1111 1112 BarrierSet* bs = BarrierSet::barrier_set(); 1113 if (bs->kind() != BarrierSet::G1BarrierSet && bs->kind() != BarrierSet::Shenandoah) { 1114 __ mov(r0, (int)id); 1115 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), r0); 1116 __ should_not_reach_here(); 1117 break; 1118 } 1119 1120 if (bs->kind() == BarrierSet::Shenandoah && !ShenandoahSATBBarrier && !ShenandoahConditionalSATBBarrier) { 1121 break; 1122 } 1123 1124 const Register pre_val = r0; 1125 const Register thread = rthread; 1126 const Register tmp = rscratch1; 1127 1128 Address in_progress(thread, in_bytes(UseG1GC ? G1ThreadLocalData::satb_mark_queue_active_offset() 1129 : ShenandoahThreadLocalData::satb_mark_queue_active_offset())); 1130 Address queue_index(thread, in_bytes(UseG1GC ? G1ThreadLocalData::satb_mark_queue_index_offset() 1131 : ShenandoahThreadLocalData::satb_mark_queue_index_offset())); 1132 Address buffer(thread, in_bytes(UseG1GC ? G1ThreadLocalData::satb_mark_queue_buffer_offset() 1133 : ShenandoahThreadLocalData::satb_mark_queue_buffer_offset())); 1134 1135 Label done; 1136 Label runtime; 1137 1138 if (UseShenandoahGC) { 1139 Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset())); 1140 __ ldrb(tmp, gc_state); 1141 __ tbz(tmp, ShenandoahHeap::MARKING_BITPOS, done); 1142 } else { 1143 assert(UseG1GC, "Should be"); 1144 // Is marking still active? 1145 if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) { 1146 __ ldrw(tmp, in_progress); 1147 } else { 1148 assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption"); 1149 __ ldrb(tmp, in_progress); 1150 } 1151 __ cbzw(tmp, done); 1152 } 1153 1154 // Can we store original value in the thread's buffer? 1155 __ ldr(tmp, queue_index); 1156 __ cbz(tmp, runtime); 1157 1158 __ sub(tmp, tmp, wordSize); 1159 __ str(tmp, queue_index); 1160 __ ldr(rscratch2, buffer); 1161 __ add(tmp, tmp, rscratch2); 1162 f.load_argument(0, rscratch2); 1163 __ str(rscratch2, Address(tmp, 0)); 1164 __ b(done); 1165 1166 __ bind(runtime); 1167 __ push_call_clobbered_registers(); 1168 f.load_argument(0, pre_val); 1169 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), pre_val, thread); 1170 __ pop_call_clobbered_registers(); 1171 __ bind(done); 1172 } 1173 break; 1174 case g1_post_barrier_slow_id: 1175 { 1176 StubFrame f(sasm, "g1_post_barrier", dont_gc_arguments); 1177 1178 BarrierSet* bs = BarrierSet::barrier_set(); 1179 if (bs->kind() != BarrierSet::G1BarrierSet) { 1180 __ mov(r0, (int)id); 1181 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), r0); 1182 __ should_not_reach_here(); 1183 break; 1184 } 1185 1186 // arg0: store_address 1187 Address store_addr(rfp, 2*BytesPerWord); 1188 1189 Label done; 1190 Label runtime; 1191 1192 // At this point we know new_value is non-NULL and the new_value crosses regions. 1193 // Must check to see if card is already dirty 1194 1195 const Register thread = rthread; 1196 1197 Address queue_index(thread, in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset())); 1198 Address buffer(thread, in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset())); 1199 1200 const Register card_offset = rscratch2; 1201 // LR is free here, so we can use it to hold the byte_map_base. 1202 const Register byte_map_base = lr; 1203 1204 assert_different_registers(card_offset, byte_map_base, rscratch1); 1205 1206 f.load_argument(0, card_offset); 1207 __ lsr(card_offset, card_offset, CardTable::card_shift); 1208 __ load_byte_map_base(byte_map_base); 1209 __ ldrb(rscratch1, Address(byte_map_base, card_offset)); 1210 __ cmpw(rscratch1, (int)G1CardTable::g1_young_card_val()); 1211 __ br(Assembler::EQ, done); 1212 1213 assert((int)CardTable::dirty_card_val() == 0, "must be 0"); 1214 1215 __ membar(Assembler::StoreLoad); 1216 __ ldrb(rscratch1, Address(byte_map_base, card_offset)); 1217 __ cbzw(rscratch1, done); 1218 1219 // storing region crossing non-NULL, card is clean. 1220 // dirty card and log. 1221 __ strb(zr, Address(byte_map_base, card_offset)); 1222 1223 // Convert card offset into an address in card_addr 1224 Register card_addr = card_offset; 1225 __ add(card_addr, byte_map_base, card_addr); 1226 1227 __ ldr(rscratch1, queue_index); 1228 __ cbz(rscratch1, runtime); 1229 __ sub(rscratch1, rscratch1, wordSize); 1230 __ str(rscratch1, queue_index); 1231 1232 // Reuse LR to hold buffer_addr 1233 const Register buffer_addr = lr; 1234 1235 __ ldr(buffer_addr, buffer); 1236 __ str(card_addr, Address(buffer_addr, rscratch1)); 1237 __ b(done); 1238 1239 __ bind(runtime); 1240 __ push_call_clobbered_registers(); 1241 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread); 1242 __ pop_call_clobbered_registers(); 1243 __ bind(done); 1244 1245 } 1246 break; 1247 #endif 1248 1249 case predicate_failed_trap_id: 1250 { 1251 StubFrame f(sasm, "predicate_failed_trap", dont_gc_arguments); 1252 1253 OopMap* map = save_live_registers(sasm); 1254 1255 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap)); 1256 oop_maps = new OopMapSet(); 1257 oop_maps->add_gc_map(call_offset, map); 1258 restore_live_registers(sasm); 1259 __ leave(); 1260 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); 1261 assert(deopt_blob != NULL, "deoptimization blob must have been created"); 1262 1263 __ far_jump(RuntimeAddress(deopt_blob->unpack_with_reexecution())); 1264 } 1265 break; 1266 1267 1268 default: 1269 { StubFrame f(sasm, "unimplemented entry", dont_gc_arguments); 1270 __ mov(r0, (int)id); 1271 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), r0); 1272 __ should_not_reach_here(); 1273 } 1274 break; 1275 } 1276 } 1277 return oop_maps; 1278 } 1279 1280 #undef __ 1281 1282 const char *Runtime1::pd_name_for_address(address entry) { Unimplemented(); return 0; }