1 /* 2 * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2014, Red Hat Inc. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/assembler.hpp" 28 #include "c1/c1_CodeStubs.hpp" 29 #include "c1/c1_Defs.hpp" 30 #include "c1/c1_MacroAssembler.hpp" 31 #include "c1/c1_Runtime1.hpp" 32 #include "compiler/disassembler.hpp" 33 #include "gc/shared/cardTable.hpp" 34 #include "gc/shared/cardTableModRefBS.hpp" 35 #include "interpreter/interpreter.hpp" 36 #include "nativeInst_aarch64.hpp" 37 #include "oops/compiledICHolder.hpp" 38 #include "oops/oop.inline.hpp" 39 #include "prims/jvmtiExport.hpp" 40 #include "register_aarch64.hpp" 41 #include "runtime/sharedRuntime.hpp" 42 #include "runtime/signature.hpp" 43 #include "runtime/vframe.hpp" 44 #include "runtime/vframeArray.hpp" 45 #include "vmreg_aarch64.inline.hpp" 46 #if INCLUDE_ALL_GCS 47 #include "gc/g1/g1CardTable.hpp" 48 #include "gc/g1/g1BarrierSet.hpp" 49 #endif 50 51 52 // Implementation of StubAssembler 53 54 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, int args_size) { 55 // setup registers 56 assert(!(oop_result1->is_valid() || metadata_result->is_valid()) || oop_result1 != metadata_result, "registers must be different"); 57 assert(oop_result1 != rthread && metadata_result != rthread, "registers must be different"); 58 assert(args_size >= 0, "illegal args_size"); 59 bool align_stack = false; 60 61 mov(c_rarg0, rthread); 62 set_num_rt_args(0); // Nothing on stack 63 64 Label retaddr; 65 set_last_Java_frame(sp, rfp, retaddr, rscratch1); 66 67 // do the call 68 lea(rscratch1, RuntimeAddress(entry)); 69 blrt(rscratch1, args_size + 1, 8, 1); 70 bind(retaddr); 71 int call_offset = offset(); 72 // verify callee-saved register 73 #ifdef ASSERT 74 push(r0, sp); 75 { Label L; 76 get_thread(r0); 77 cmp(rthread, r0); 78 br(Assembler::EQ, L); 79 stop("StubAssembler::call_RT: rthread not callee saved?"); 80 bind(L); 81 } 82 pop(r0, sp); 83 #endif 84 reset_last_Java_frame(true); 85 maybe_isb(); 86 87 // check for pending exceptions 88 { Label L; 89 // check for pending exceptions (java_thread is set upon return) 90 ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset()))); 91 cbz(rscratch1, L); 92 // exception pending => remove activation and forward to exception handler 93 // make sure that the vm_results are cleared 94 if (oop_result1->is_valid()) { 95 str(zr, Address(rthread, JavaThread::vm_result_offset())); 96 } 97 if (metadata_result->is_valid()) { 98 str(zr, Address(rthread, JavaThread::vm_result_2_offset())); 99 } 100 if (frame_size() == no_frame_size) { 101 leave(); 102 far_jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 103 } else if (_stub_id == Runtime1::forward_exception_id) { 104 should_not_reach_here(); 105 } else { 106 far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id))); 107 } 108 bind(L); 109 } 110 // get oop results if there are any and reset the values in the thread 111 if (oop_result1->is_valid()) { 112 get_vm_result(oop_result1, rthread); 113 } 114 if (metadata_result->is_valid()) { 115 get_vm_result_2(metadata_result, rthread); 116 } 117 return call_offset; 118 } 119 120 121 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1) { 122 mov(c_rarg1, arg1); 123 return call_RT(oop_result1, metadata_result, entry, 1); 124 } 125 126 127 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2) { 128 if (c_rarg1 == arg2) { 129 if (c_rarg2 == arg1) { 130 mov(rscratch1, arg1); 131 mov(arg1, arg2); 132 mov(arg2, rscratch1); 133 } else { 134 mov(c_rarg2, arg2); 135 mov(c_rarg1, arg1); 136 } 137 } else { 138 mov(c_rarg1, arg1); 139 mov(c_rarg2, arg2); 140 } 141 return call_RT(oop_result1, metadata_result, entry, 2); 142 } 143 144 145 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2, Register arg3) { 146 // if there is any conflict use the stack 147 if (arg1 == c_rarg2 || arg1 == c_rarg3 || 148 arg2 == c_rarg1 || arg1 == c_rarg3 || 149 arg3 == c_rarg1 || arg1 == c_rarg2) { 150 stp(arg3, arg2, Address(pre(sp, 2 * wordSize))); 151 stp(arg1, zr, Address(pre(sp, -2 * wordSize))); 152 ldp(c_rarg1, zr, Address(post(sp, 2 * wordSize))); 153 ldp(c_rarg3, c_rarg2, Address(post(sp, 2 * wordSize))); 154 } else { 155 mov(c_rarg1, arg1); 156 mov(c_rarg2, arg2); 157 mov(c_rarg3, arg3); 158 } 159 return call_RT(oop_result1, metadata_result, entry, 3); 160 } 161 162 // Implementation of StubFrame 163 164 class StubFrame: public StackObj { 165 private: 166 StubAssembler* _sasm; 167 168 public: 169 StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments); 170 void load_argument(int offset_in_words, Register reg); 171 172 ~StubFrame(); 173 };; 174 175 176 #define __ _sasm-> 177 178 StubFrame::StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments) { 179 _sasm = sasm; 180 __ set_info(name, must_gc_arguments); 181 __ enter(); 182 } 183 184 // load parameters that were stored with LIR_Assembler::store_parameter 185 // Note: offsets for store_parameter and load_argument must match 186 void StubFrame::load_argument(int offset_in_words, Register reg) { 187 // rbp, + 0: link 188 // + 1: return address 189 // + 2: argument with offset 0 190 // + 3: argument with offset 1 191 // + 4: ... 192 193 __ ldr(reg, Address(rfp, (offset_in_words + 2) * BytesPerWord)); 194 } 195 196 197 StubFrame::~StubFrame() { 198 __ leave(); 199 __ ret(lr); 200 } 201 202 #undef __ 203 204 205 // Implementation of Runtime1 206 207 #define __ sasm-> 208 209 const int float_regs_as_doubles_size_in_slots = pd_nof_fpu_regs_frame_map * 2; 210 211 // Stack layout for saving/restoring all the registers needed during a runtime 212 // call (this includes deoptimization) 213 // Note: note that users of this frame may well have arguments to some runtime 214 // while these values are on the stack. These positions neglect those arguments 215 // but the code in save_live_registers will take the argument count into 216 // account. 217 // 218 219 enum reg_save_layout { 220 reg_save_frame_size = 32 /* float */ + 32 /* integer */ 221 }; 222 223 // Save off registers which might be killed by calls into the runtime. 224 // Tries to smart of about FP registers. In particular we separate 225 // saving and describing the FPU registers for deoptimization since we 226 // have to save the FPU registers twice if we describe them. The 227 // deopt blob is the only thing which needs to describe FPU registers. 228 // In all other cases it should be sufficient to simply save their 229 // current value. 230 231 static int cpu_reg_save_offsets[FrameMap::nof_cpu_regs]; 232 static int fpu_reg_save_offsets[FrameMap::nof_fpu_regs]; 233 static int reg_save_size_in_words; 234 static int frame_size_in_bytes = -1; 235 236 static OopMap* generate_oop_map(StubAssembler* sasm, bool save_fpu_registers) { 237 int frame_size_in_bytes = reg_save_frame_size * BytesPerWord; 238 sasm->set_frame_size(frame_size_in_bytes / BytesPerWord); 239 int frame_size_in_slots = frame_size_in_bytes / sizeof(jint); 240 OopMap* oop_map = new OopMap(frame_size_in_slots, 0); 241 242 for (int i = 0; i < FrameMap::nof_cpu_regs; i++) { 243 Register r = as_Register(i); 244 if (i <= 18 && i != rscratch1->encoding() && i != rscratch2->encoding()) { 245 int sp_offset = cpu_reg_save_offsets[i]; 246 oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset), 247 r->as_VMReg()); 248 } 249 } 250 251 if (save_fpu_registers) { 252 for (int i = 0; i < FrameMap::nof_fpu_regs; i++) { 253 FloatRegister r = as_FloatRegister(i); 254 { 255 int sp_offset = fpu_reg_save_offsets[i]; 256 oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset), 257 r->as_VMReg()); 258 } 259 } 260 } 261 return oop_map; 262 } 263 264 static OopMap* save_live_registers(StubAssembler* sasm, 265 bool save_fpu_registers = true) { 266 __ block_comment("save_live_registers"); 267 268 __ push(RegSet::range(r0, r29), sp); // integer registers except lr & sp 269 270 if (save_fpu_registers) { 271 for (int i = 30; i >= 0; i -= 2) 272 __ stpd(as_FloatRegister(i), as_FloatRegister(i+1), 273 Address(__ pre(sp, -2 * wordSize))); 274 } else { 275 __ add(sp, sp, -32 * wordSize); 276 } 277 278 return generate_oop_map(sasm, save_fpu_registers); 279 } 280 281 static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) { 282 if (restore_fpu_registers) { 283 for (int i = 0; i < 32; i += 2) 284 __ ldpd(as_FloatRegister(i), as_FloatRegister(i+1), 285 Address(__ post(sp, 2 * wordSize))); 286 } else { 287 __ add(sp, sp, 32 * wordSize); 288 } 289 290 __ pop(RegSet::range(r0, r29), sp); 291 } 292 293 static void restore_live_registers_except_r0(StubAssembler* sasm, bool restore_fpu_registers = true) { 294 295 if (restore_fpu_registers) { 296 for (int i = 0; i < 32; i += 2) 297 __ ldpd(as_FloatRegister(i), as_FloatRegister(i+1), 298 Address(__ post(sp, 2 * wordSize))); 299 } else { 300 __ add(sp, sp, 32 * wordSize); 301 } 302 303 __ ldp(zr, r1, Address(__ post(sp, 16))); 304 __ pop(RegSet::range(r2, r29), sp); 305 } 306 307 308 309 void Runtime1::initialize_pd() { 310 int i; 311 int sp_offset = 0; 312 313 // all float registers are saved explicitly 314 assert(FrameMap::nof_fpu_regs == 32, "double registers not handled here"); 315 for (i = 0; i < FrameMap::nof_fpu_regs; i++) { 316 fpu_reg_save_offsets[i] = sp_offset; 317 sp_offset += 2; // SP offsets are in halfwords 318 } 319 320 for (i = 0; i < FrameMap::nof_cpu_regs; i++) { 321 Register r = as_Register(i); 322 cpu_reg_save_offsets[i] = sp_offset; 323 sp_offset += 2; // SP offsets are in halfwords 324 } 325 } 326 327 328 // target: the entry point of the method that creates and posts the exception oop 329 // has_argument: true if the exception needs an argument (passed in rscratch1) 330 331 OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) { 332 // make a frame and preserve the caller's caller-save registers 333 OopMap* oop_map = save_live_registers(sasm); 334 int call_offset; 335 if (!has_argument) { 336 call_offset = __ call_RT(noreg, noreg, target); 337 } else { 338 call_offset = __ call_RT(noreg, noreg, target, rscratch1); 339 } 340 OopMapSet* oop_maps = new OopMapSet(); 341 oop_maps->add_gc_map(call_offset, oop_map); 342 343 __ should_not_reach_here(); 344 return oop_maps; 345 } 346 347 348 OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) { 349 __ block_comment("generate_handle_exception"); 350 351 // incoming parameters 352 const Register exception_oop = r0; 353 const Register exception_pc = r3; 354 // other registers used in this stub 355 356 // Save registers, if required. 357 OopMapSet* oop_maps = new OopMapSet(); 358 OopMap* oop_map = NULL; 359 switch (id) { 360 case forward_exception_id: 361 // We're handling an exception in the context of a compiled frame. 362 // The registers have been saved in the standard places. Perform 363 // an exception lookup in the caller and dispatch to the handler 364 // if found. Otherwise unwind and dispatch to the callers 365 // exception handler. 366 oop_map = generate_oop_map(sasm, 1 /*thread*/); 367 368 // load and clear pending exception oop into r0 369 __ ldr(exception_oop, Address(rthread, Thread::pending_exception_offset())); 370 __ str(zr, Address(rthread, Thread::pending_exception_offset())); 371 372 // load issuing PC (the return address for this stub) into r3 373 __ ldr(exception_pc, Address(rfp, 1*BytesPerWord)); 374 375 // make sure that the vm_results are cleared (may be unnecessary) 376 __ str(zr, Address(rthread, JavaThread::vm_result_offset())); 377 __ str(zr, Address(rthread, JavaThread::vm_result_2_offset())); 378 break; 379 case handle_exception_nofpu_id: 380 case handle_exception_id: 381 // At this point all registers MAY be live. 382 oop_map = save_live_registers(sasm, id != handle_exception_nofpu_id); 383 break; 384 case handle_exception_from_callee_id: { 385 // At this point all registers except exception oop (r0) and 386 // exception pc (lr) are dead. 387 const int frame_size = 2 /*fp, return address*/; 388 oop_map = new OopMap(frame_size * VMRegImpl::slots_per_word, 0); 389 sasm->set_frame_size(frame_size); 390 break; 391 } 392 default: 393 __ should_not_reach_here(); 394 break; 395 } 396 397 // verify that only r0 and r3 are valid at this time 398 __ invalidate_registers(false, true, true, false, true, true); 399 // verify that r0 contains a valid exception 400 __ verify_not_null_oop(exception_oop); 401 402 #ifdef ASSERT 403 // check that fields in JavaThread for exception oop and issuing pc are 404 // empty before writing to them 405 Label oop_empty; 406 __ ldr(rscratch1, Address(rthread, JavaThread::exception_oop_offset())); 407 __ cbz(rscratch1, oop_empty); 408 __ stop("exception oop already set"); 409 __ bind(oop_empty); 410 411 Label pc_empty; 412 __ ldr(rscratch1, Address(rthread, JavaThread::exception_pc_offset())); 413 __ cbz(rscratch1, pc_empty); 414 __ stop("exception pc already set"); 415 __ bind(pc_empty); 416 #endif 417 418 // save exception oop and issuing pc into JavaThread 419 // (exception handler will load it from here) 420 __ str(exception_oop, Address(rthread, JavaThread::exception_oop_offset())); 421 __ str(exception_pc, Address(rthread, JavaThread::exception_pc_offset())); 422 423 // patch throwing pc into return address (has bci & oop map) 424 __ str(exception_pc, Address(rfp, 1*BytesPerWord)); 425 426 // compute the exception handler. 427 // the exception oop and the throwing pc are read from the fields in JavaThread 428 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc)); 429 oop_maps->add_gc_map(call_offset, oop_map); 430 431 // r0: handler address 432 // will be the deopt blob if nmethod was deoptimized while we looked up 433 // handler regardless of whether handler existed in the nmethod. 434 435 // only r0 is valid at this time, all other registers have been destroyed by the runtime call 436 __ invalidate_registers(false, true, true, true, true, true); 437 438 // patch the return address, this stub will directly return to the exception handler 439 __ str(r0, Address(rfp, 1*BytesPerWord)); 440 441 switch (id) { 442 case forward_exception_id: 443 case handle_exception_nofpu_id: 444 case handle_exception_id: 445 // Restore the registers that were saved at the beginning. 446 restore_live_registers(sasm, id != handle_exception_nofpu_id); 447 break; 448 case handle_exception_from_callee_id: 449 // Pop the return address. 450 __ leave(); 451 __ ret(lr); // jump to exception handler 452 break; 453 default: ShouldNotReachHere(); 454 } 455 456 return oop_maps; 457 } 458 459 460 void Runtime1::generate_unwind_exception(StubAssembler *sasm) { 461 // incoming parameters 462 const Register exception_oop = r0; 463 // callee-saved copy of exception_oop during runtime call 464 const Register exception_oop_callee_saved = r19; 465 // other registers used in this stub 466 const Register exception_pc = r3; 467 const Register handler_addr = r1; 468 469 // verify that only r0, is valid at this time 470 __ invalidate_registers(false, true, true, true, true, true); 471 472 #ifdef ASSERT 473 // check that fields in JavaThread for exception oop and issuing pc are empty 474 Label oop_empty; 475 __ ldr(rscratch1, Address(rthread, JavaThread::exception_oop_offset())); 476 __ cbz(rscratch1, oop_empty); 477 __ stop("exception oop must be empty"); 478 __ bind(oop_empty); 479 480 Label pc_empty; 481 __ ldr(rscratch1, Address(rthread, JavaThread::exception_pc_offset())); 482 __ cbz(rscratch1, pc_empty); 483 __ stop("exception pc must be empty"); 484 __ bind(pc_empty); 485 #endif 486 487 // Save our return address because 488 // exception_handler_for_return_address will destroy it. We also 489 // save exception_oop 490 __ stp(lr, exception_oop, Address(__ pre(sp, -2 * wordSize))); 491 492 // search the exception handler address of the caller (using the return address) 493 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), rthread, lr); 494 // r0: exception handler address of the caller 495 496 // Only R0 is valid at this time; all other registers have been 497 // destroyed by the call. 498 __ invalidate_registers(false, true, true, true, false, true); 499 500 // move result of call into correct register 501 __ mov(handler_addr, r0); 502 503 // get throwing pc (= return address). 504 // lr has been destroyed by the call 505 __ ldp(lr, exception_oop, Address(__ post(sp, 2 * wordSize))); 506 __ mov(r3, lr); 507 508 __ verify_not_null_oop(exception_oop); 509 510 // continue at exception handler (return address removed) 511 // note: do *not* remove arguments when unwinding the 512 // activation since the caller assumes having 513 // all arguments on the stack when entering the 514 // runtime to determine the exception handler 515 // (GC happens at call site with arguments!) 516 // r0: exception oop 517 // r3: throwing pc 518 // r1: exception handler 519 __ br(handler_addr); 520 } 521 522 523 524 OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) { 525 // use the maximum number of runtime-arguments here because it is difficult to 526 // distinguish each RT-Call. 527 // Note: This number affects also the RT-Call in generate_handle_exception because 528 // the oop-map is shared for all calls. 529 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); 530 assert(deopt_blob != NULL, "deoptimization blob must have been created"); 531 532 OopMap* oop_map = save_live_registers(sasm); 533 534 __ mov(c_rarg0, rthread); 535 Label retaddr; 536 __ set_last_Java_frame(sp, rfp, retaddr, rscratch1); 537 // do the call 538 __ lea(rscratch1, RuntimeAddress(target)); 539 __ blrt(rscratch1, 1, 0, 1); 540 __ bind(retaddr); 541 OopMapSet* oop_maps = new OopMapSet(); 542 oop_maps->add_gc_map(__ offset(), oop_map); 543 // verify callee-saved register 544 #ifdef ASSERT 545 { Label L; 546 __ get_thread(rscratch1); 547 __ cmp(rthread, rscratch1); 548 __ br(Assembler::EQ, L); 549 __ stop("StubAssembler::call_RT: rthread not callee saved?"); 550 __ bind(L); 551 } 552 #endif 553 __ reset_last_Java_frame(true); 554 __ maybe_isb(); 555 556 // check for pending exceptions 557 { Label L; 558 __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset())); 559 __ cbz(rscratch1, L); 560 // exception pending => remove activation and forward to exception handler 561 562 { Label L1; 563 __ cbnz(r0, L1); // have we deoptimized? 564 __ far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id))); 565 __ bind(L1); 566 } 567 568 // the deopt blob expects exceptions in the special fields of 569 // JavaThread, so copy and clear pending exception. 570 571 // load and clear pending exception 572 __ ldr(r0, Address(rthread, Thread::pending_exception_offset())); 573 __ str(zr, Address(rthread, Thread::pending_exception_offset())); 574 575 // check that there is really a valid exception 576 __ verify_not_null_oop(r0); 577 578 // load throwing pc: this is the return address of the stub 579 __ mov(r3, lr); 580 581 #ifdef ASSERT 582 // check that fields in JavaThread for exception oop and issuing pc are empty 583 Label oop_empty; 584 __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset())); 585 __ cbz(rscratch1, oop_empty); 586 __ stop("exception oop must be empty"); 587 __ bind(oop_empty); 588 589 Label pc_empty; 590 __ ldr(rscratch1, Address(rthread, JavaThread::exception_pc_offset())); 591 __ cbz(rscratch1, pc_empty); 592 __ stop("exception pc must be empty"); 593 __ bind(pc_empty); 594 #endif 595 596 // store exception oop and throwing pc to JavaThread 597 __ str(r0, Address(rthread, JavaThread::exception_oop_offset())); 598 __ str(r3, Address(rthread, JavaThread::exception_pc_offset())); 599 600 restore_live_registers(sasm); 601 602 __ leave(); 603 604 // Forward the exception directly to deopt blob. We can blow no 605 // registers and must leave throwing pc on the stack. A patch may 606 // have values live in registers so the entry point with the 607 // exception in tls. 608 __ far_jump(RuntimeAddress(deopt_blob->unpack_with_exception_in_tls())); 609 610 __ bind(L); 611 } 612 613 614 // Runtime will return true if the nmethod has been deoptimized during 615 // the patching process. In that case we must do a deopt reexecute instead. 616 617 Label reexecuteEntry, cont; 618 619 __ cbz(r0, cont); // have we deoptimized? 620 621 // Will reexecute. Proper return address is already on the stack we just restore 622 // registers, pop all of our frame but the return address and jump to the deopt blob 623 restore_live_registers(sasm); 624 __ leave(); 625 __ far_jump(RuntimeAddress(deopt_blob->unpack_with_reexecution())); 626 627 __ bind(cont); 628 restore_live_registers(sasm); 629 __ leave(); 630 __ ret(lr); 631 632 return oop_maps; 633 } 634 635 636 OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { 637 638 const Register exception_oop = r0; 639 const Register exception_pc = r3; 640 641 // for better readability 642 const bool must_gc_arguments = true; 643 const bool dont_gc_arguments = false; 644 645 // default value; overwritten for some optimized stubs that are called from methods that do not use the fpu 646 bool save_fpu_registers = true; 647 648 // stub code & info for the different stubs 649 OopMapSet* oop_maps = NULL; 650 OopMap* oop_map = NULL; 651 switch (id) { 652 { 653 case forward_exception_id: 654 { 655 oop_maps = generate_handle_exception(id, sasm); 656 __ leave(); 657 __ ret(lr); 658 } 659 break; 660 661 case throw_div0_exception_id: 662 { StubFrame f(sasm, "throw_div0_exception", dont_gc_arguments); 663 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false); 664 } 665 break; 666 667 case throw_null_pointer_exception_id: 668 { StubFrame f(sasm, "throw_null_pointer_exception", dont_gc_arguments); 669 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false); 670 } 671 break; 672 673 case new_instance_id: 674 case fast_new_instance_id: 675 case fast_new_instance_init_check_id: 676 { 677 Register klass = r3; // Incoming 678 Register obj = r0; // Result 679 680 if (id == new_instance_id) { 681 __ set_info("new_instance", dont_gc_arguments); 682 } else if (id == fast_new_instance_id) { 683 __ set_info("fast new_instance", dont_gc_arguments); 684 } else { 685 assert(id == fast_new_instance_init_check_id, "bad StubID"); 686 __ set_info("fast new_instance init check", dont_gc_arguments); 687 } 688 689 if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) && 690 UseTLAB && FastTLABRefill) { 691 Label slow_path; 692 Register obj_size = r2; 693 Register t1 = r19; 694 Register t2 = r4; 695 assert_different_registers(klass, obj, obj_size, t1, t2); 696 697 __ stp(r5, r19, Address(__ pre(sp, -2 * wordSize))); 698 699 if (id == fast_new_instance_init_check_id) { 700 // make sure the klass is initialized 701 __ ldrb(rscratch1, Address(klass, InstanceKlass::init_state_offset())); 702 __ cmpw(rscratch1, InstanceKlass::fully_initialized); 703 __ br(Assembler::NE, slow_path); 704 } 705 706 #ifdef ASSERT 707 // assert object can be fast path allocated 708 { 709 Label ok, not_ok; 710 __ ldrw(obj_size, Address(klass, Klass::layout_helper_offset())); 711 __ cmp(obj_size, 0u); 712 __ br(Assembler::LE, not_ok); // make sure it's an instance (LH > 0) 713 __ tstw(obj_size, Klass::_lh_instance_slow_path_bit); 714 __ br(Assembler::EQ, ok); 715 __ bind(not_ok); 716 __ stop("assert(can be fast path allocated)"); 717 __ should_not_reach_here(); 718 __ bind(ok); 719 } 720 #endif // ASSERT 721 722 // if we got here then the TLAB allocation failed, so try 723 // refilling the TLAB or allocating directly from eden. 724 Label retry_tlab, try_eden; 725 __ tlab_refill(retry_tlab, try_eden, slow_path); // does not destroy r3 (klass), returns r5 726 727 __ bind(retry_tlab); 728 729 // get the instance size (size is postive so movl is fine for 64bit) 730 __ ldrw(obj_size, Address(klass, Klass::layout_helper_offset())); 731 732 __ tlab_allocate(obj, obj_size, 0, t1, t2, slow_path); 733 734 __ initialize_object(obj, klass, obj_size, 0, t1, t2, /* is_tlab_allocated */ true); 735 __ verify_oop(obj); 736 __ ldp(r5, r19, Address(__ post(sp, 2 * wordSize))); 737 __ ret(lr); 738 739 __ bind(try_eden); 740 // get the instance size (size is postive so movl is fine for 64bit) 741 __ ldrw(obj_size, Address(klass, Klass::layout_helper_offset())); 742 743 __ eden_allocate(obj, obj_size, 0, t1, slow_path); 744 __ incr_allocated_bytes(rthread, obj_size, 0, rscratch1); 745 746 __ initialize_object(obj, klass, obj_size, 0, t1, t2, /* is_tlab_allocated */ false); 747 __ verify_oop(obj); 748 __ ldp(r5, r19, Address(__ post(sp, 2 * wordSize))); 749 __ ret(lr); 750 751 __ bind(slow_path); 752 __ ldp(r5, r19, Address(__ post(sp, 2 * wordSize))); 753 } 754 755 __ enter(); 756 OopMap* map = save_live_registers(sasm); 757 int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass); 758 oop_maps = new OopMapSet(); 759 oop_maps->add_gc_map(call_offset, map); 760 restore_live_registers_except_r0(sasm); 761 __ verify_oop(obj); 762 __ leave(); 763 __ ret(lr); 764 765 // r0,: new instance 766 } 767 768 break; 769 770 case counter_overflow_id: 771 { 772 Register bci = r0, method = r1; 773 __ enter(); 774 OopMap* map = save_live_registers(sasm); 775 // Retrieve bci 776 __ ldrw(bci, Address(rfp, 2*BytesPerWord)); 777 // And a pointer to the Method* 778 __ ldr(method, Address(rfp, 3*BytesPerWord)); 779 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method); 780 oop_maps = new OopMapSet(); 781 oop_maps->add_gc_map(call_offset, map); 782 restore_live_registers(sasm); 783 __ leave(); 784 __ ret(lr); 785 } 786 break; 787 788 case new_type_array_id: 789 case new_object_array_id: 790 { 791 Register length = r19; // Incoming 792 Register klass = r3; // Incoming 793 Register obj = r0; // Result 794 795 if (id == new_type_array_id) { 796 __ set_info("new_type_array", dont_gc_arguments); 797 } else { 798 __ set_info("new_object_array", dont_gc_arguments); 799 } 800 801 #ifdef ASSERT 802 // assert object type is really an array of the proper kind 803 { 804 Label ok; 805 Register t0 = obj; 806 __ ldrw(t0, Address(klass, Klass::layout_helper_offset())); 807 __ asrw(t0, t0, Klass::_lh_array_tag_shift); 808 int tag = ((id == new_type_array_id) 809 ? Klass::_lh_array_tag_type_value 810 : Klass::_lh_array_tag_obj_value); 811 __ mov(rscratch1, tag); 812 __ cmpw(t0, rscratch1); 813 __ br(Assembler::EQ, ok); 814 __ stop("assert(is an array klass)"); 815 __ should_not_reach_here(); 816 __ bind(ok); 817 } 818 #endif // ASSERT 819 820 if (UseTLAB && FastTLABRefill) { 821 Register arr_size = r4; 822 Register t1 = r2; 823 Register t2 = r5; 824 Label slow_path; 825 assert_different_registers(length, klass, obj, arr_size, t1, t2); 826 827 // check that array length is small enough for fast path. 828 __ mov(rscratch1, C1_MacroAssembler::max_array_allocation_length); 829 __ cmpw(length, rscratch1); 830 __ br(Assembler::HI, slow_path); 831 832 // if we got here then the TLAB allocation failed, so try 833 // refilling the TLAB or allocating directly from eden. 834 Label retry_tlab, try_eden; 835 const Register thread = 836 __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves r19 & r3, returns rthread 837 838 __ bind(retry_tlab); 839 840 // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F)) 841 // since size is positive ldrw does right thing on 64bit 842 __ ldrw(t1, Address(klass, Klass::layout_helper_offset())); 843 __ lslvw(arr_size, length, t1); 844 __ ubfx(t1, t1, Klass::_lh_header_size_shift, 845 exact_log2(Klass::_lh_header_size_mask + 1)); 846 __ add(arr_size, arr_size, t1); 847 __ add(arr_size, arr_size, MinObjAlignmentInBytesMask); // align up 848 __ andr(arr_size, arr_size, ~MinObjAlignmentInBytesMask); 849 850 __ tlab_allocate(obj, arr_size, 0, t1, t2, slow_path); // preserves arr_size 851 852 __ initialize_header(obj, klass, length, t1, t2); 853 __ ldrb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte))); 854 assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise"); 855 assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise"); 856 __ andr(t1, t1, Klass::_lh_header_size_mask); 857 __ sub(arr_size, arr_size, t1); // body length 858 __ add(t1, t1, obj); // body start 859 if (!ZeroTLAB) { 860 __ initialize_body(t1, arr_size, 0, t2); 861 } 862 __ verify_oop(obj); 863 864 __ ret(lr); 865 866 __ bind(try_eden); 867 // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F)) 868 // since size is positive ldrw does right thing on 64bit 869 __ ldrw(t1, Address(klass, Klass::layout_helper_offset())); 870 // since size is postive movw does right thing on 64bit 871 __ movw(arr_size, length); 872 __ lslvw(arr_size, length, t1); 873 __ ubfx(t1, t1, Klass::_lh_header_size_shift, 874 exact_log2(Klass::_lh_header_size_mask + 1)); 875 __ add(arr_size, arr_size, t1); 876 __ add(arr_size, arr_size, MinObjAlignmentInBytesMask); // align up 877 __ andr(arr_size, arr_size, ~MinObjAlignmentInBytesMask); 878 879 __ eden_allocate(obj, arr_size, 0, t1, slow_path); // preserves arr_size 880 __ incr_allocated_bytes(thread, arr_size, 0, rscratch1); 881 882 __ initialize_header(obj, klass, length, t1, t2); 883 __ ldrb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte))); 884 assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise"); 885 assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise"); 886 __ andr(t1, t1, Klass::_lh_header_size_mask); 887 __ sub(arr_size, arr_size, t1); // body length 888 __ add(t1, t1, obj); // body start 889 __ initialize_body(t1, arr_size, 0, t2); 890 __ verify_oop(obj); 891 892 __ ret(lr); 893 894 __ bind(slow_path); 895 } 896 897 __ enter(); 898 OopMap* map = save_live_registers(sasm); 899 int call_offset; 900 if (id == new_type_array_id) { 901 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length); 902 } else { 903 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length); 904 } 905 906 oop_maps = new OopMapSet(); 907 oop_maps->add_gc_map(call_offset, map); 908 restore_live_registers_except_r0(sasm); 909 910 __ verify_oop(obj); 911 __ leave(); 912 __ ret(lr); 913 914 // r0: new array 915 } 916 break; 917 918 case new_multi_array_id: 919 { StubFrame f(sasm, "new_multi_array", dont_gc_arguments); 920 // r0,: klass 921 // r19,: rank 922 // r2: address of 1st dimension 923 OopMap* map = save_live_registers(sasm); 924 __ mov(c_rarg1, r0); 925 __ mov(c_rarg3, r2); 926 __ mov(c_rarg2, r19); 927 int call_offset = __ call_RT(r0, noreg, CAST_FROM_FN_PTR(address, new_multi_array), r1, r2, r3); 928 929 oop_maps = new OopMapSet(); 930 oop_maps->add_gc_map(call_offset, map); 931 restore_live_registers_except_r0(sasm); 932 933 // r0,: new multi array 934 __ verify_oop(r0); 935 } 936 break; 937 938 case register_finalizer_id: 939 { 940 __ set_info("register_finalizer", dont_gc_arguments); 941 942 // This is called via call_runtime so the arguments 943 // will be place in C abi locations 944 945 __ verify_oop(c_rarg0); 946 947 // load the klass and check the has finalizer flag 948 Label register_finalizer; 949 Register t = r5; 950 __ load_klass(t, r0); 951 __ ldrw(t, Address(t, Klass::access_flags_offset())); 952 __ tbnz(t, exact_log2(JVM_ACC_HAS_FINALIZER), register_finalizer); 953 __ ret(lr); 954 955 __ bind(register_finalizer); 956 __ enter(); 957 OopMap* oop_map = save_live_registers(sasm); 958 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), r0); 959 oop_maps = new OopMapSet(); 960 oop_maps->add_gc_map(call_offset, oop_map); 961 962 // Now restore all the live registers 963 restore_live_registers(sasm); 964 965 __ leave(); 966 __ ret(lr); 967 } 968 break; 969 970 case throw_class_cast_exception_id: 971 { StubFrame f(sasm, "throw_class_cast_exception", dont_gc_arguments); 972 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true); 973 } 974 break; 975 976 case throw_incompatible_class_change_error_id: 977 { StubFrame f(sasm, "throw_incompatible_class_cast_exception", dont_gc_arguments); 978 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false); 979 } 980 break; 981 982 case slow_subtype_check_id: 983 { 984 // Typical calling sequence: 985 // __ push(klass_RInfo); // object klass or other subclass 986 // __ push(sup_k_RInfo); // array element klass or other superclass 987 // __ bl(slow_subtype_check); 988 // Note that the subclass is pushed first, and is therefore deepest. 989 enum layout { 990 r0_off, r0_off_hi, 991 r2_off, r2_off_hi, 992 r4_off, r4_off_hi, 993 r5_off, r5_off_hi, 994 sup_k_off, sup_k_off_hi, 995 klass_off, klass_off_hi, 996 framesize, 997 result_off = sup_k_off 998 }; 999 1000 __ set_info("slow_subtype_check", dont_gc_arguments); 1001 __ push(RegSet::of(r0, r2, r4, r5), sp); 1002 1003 // This is called by pushing args and not with C abi 1004 // __ ldr(r4, Address(sp, (klass_off) * VMRegImpl::stack_slot_size)); // subclass 1005 // __ ldr(r0, Address(sp, (sup_k_off) * VMRegImpl::stack_slot_size)); // superclass 1006 1007 __ ldp(r4, r0, Address(sp, (sup_k_off) * VMRegImpl::stack_slot_size)); 1008 1009 Label miss; 1010 __ check_klass_subtype_slow_path(r4, r0, r2, r5, NULL, &miss); 1011 1012 // fallthrough on success: 1013 __ mov(rscratch1, 1); 1014 __ str(rscratch1, Address(sp, (result_off) * VMRegImpl::stack_slot_size)); // result 1015 __ pop(RegSet::of(r0, r2, r4, r5), sp); 1016 __ ret(lr); 1017 1018 __ bind(miss); 1019 __ str(zr, Address(sp, (result_off) * VMRegImpl::stack_slot_size)); // result 1020 __ pop(RegSet::of(r0, r2, r4, r5), sp); 1021 __ ret(lr); 1022 } 1023 break; 1024 1025 case monitorenter_nofpu_id: 1026 save_fpu_registers = false; 1027 // fall through 1028 case monitorenter_id: 1029 { 1030 StubFrame f(sasm, "monitorenter", dont_gc_arguments); 1031 OopMap* map = save_live_registers(sasm, save_fpu_registers); 1032 1033 // Called with store_parameter and not C abi 1034 1035 f.load_argument(1, r0); // r0,: object 1036 f.load_argument(0, r1); // r1,: lock address 1037 1038 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), r0, r1); 1039 1040 oop_maps = new OopMapSet(); 1041 oop_maps->add_gc_map(call_offset, map); 1042 restore_live_registers(sasm, save_fpu_registers); 1043 } 1044 break; 1045 1046 case monitorexit_nofpu_id: 1047 save_fpu_registers = false; 1048 // fall through 1049 case monitorexit_id: 1050 { 1051 StubFrame f(sasm, "monitorexit", dont_gc_arguments); 1052 OopMap* map = save_live_registers(sasm, save_fpu_registers); 1053 1054 // Called with store_parameter and not C abi 1055 1056 f.load_argument(0, r0); // r0,: lock address 1057 1058 // note: really a leaf routine but must setup last java sp 1059 // => use call_RT for now (speed can be improved by 1060 // doing last java sp setup manually) 1061 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), r0); 1062 1063 oop_maps = new OopMapSet(); 1064 oop_maps->add_gc_map(call_offset, map); 1065 restore_live_registers(sasm, save_fpu_registers); 1066 } 1067 break; 1068 1069 case deoptimize_id: 1070 { 1071 StubFrame f(sasm, "deoptimize", dont_gc_arguments); 1072 OopMap* oop_map = save_live_registers(sasm); 1073 f.load_argument(0, c_rarg1); 1074 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, deoptimize), c_rarg1); 1075 1076 oop_maps = new OopMapSet(); 1077 oop_maps->add_gc_map(call_offset, oop_map); 1078 restore_live_registers(sasm); 1079 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); 1080 assert(deopt_blob != NULL, "deoptimization blob must have been created"); 1081 __ leave(); 1082 __ far_jump(RuntimeAddress(deopt_blob->unpack_with_reexecution())); 1083 } 1084 break; 1085 1086 case throw_range_check_failed_id: 1087 { StubFrame f(sasm, "range_check_failed", dont_gc_arguments); 1088 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true); 1089 } 1090 break; 1091 1092 case unwind_exception_id: 1093 { __ set_info("unwind_exception", dont_gc_arguments); 1094 // note: no stubframe since we are about to leave the current 1095 // activation and we are calling a leaf VM function only. 1096 generate_unwind_exception(sasm); 1097 } 1098 break; 1099 1100 case access_field_patching_id: 1101 { StubFrame f(sasm, "access_field_patching", dont_gc_arguments); 1102 // we should set up register map 1103 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching)); 1104 } 1105 break; 1106 1107 case load_klass_patching_id: 1108 { StubFrame f(sasm, "load_klass_patching", dont_gc_arguments); 1109 // we should set up register map 1110 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching)); 1111 } 1112 break; 1113 1114 case load_mirror_patching_id: 1115 { StubFrame f(sasm, "load_mirror_patching", dont_gc_arguments); 1116 // we should set up register map 1117 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching)); 1118 } 1119 break; 1120 1121 case load_appendix_patching_id: 1122 { StubFrame f(sasm, "load_appendix_patching", dont_gc_arguments); 1123 // we should set up register map 1124 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching)); 1125 } 1126 break; 1127 1128 case handle_exception_nofpu_id: 1129 case handle_exception_id: 1130 { StubFrame f(sasm, "handle_exception", dont_gc_arguments); 1131 oop_maps = generate_handle_exception(id, sasm); 1132 } 1133 break; 1134 1135 case handle_exception_from_callee_id: 1136 { StubFrame f(sasm, "handle_exception_from_callee", dont_gc_arguments); 1137 oop_maps = generate_handle_exception(id, sasm); 1138 } 1139 break; 1140 1141 case throw_index_exception_id: 1142 { StubFrame f(sasm, "index_range_check_failed", dont_gc_arguments); 1143 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true); 1144 } 1145 break; 1146 1147 case throw_array_store_exception_id: 1148 { StubFrame f(sasm, "throw_array_store_exception", dont_gc_arguments); 1149 // tos + 0: link 1150 // + 1: return address 1151 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true); 1152 } 1153 break; 1154 1155 #if INCLUDE_ALL_GCS 1156 1157 case g1_pre_barrier_slow_id: 1158 { 1159 StubFrame f(sasm, "g1_pre_barrier", dont_gc_arguments); 1160 // arg0 : previous value of memory 1161 1162 BarrierSet* bs = Universe::heap()->barrier_set(); 1163 if (bs->kind() != BarrierSet::G1BarrierSet) { 1164 __ mov(r0, (int)id); 1165 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), r0); 1166 __ should_not_reach_here(); 1167 break; 1168 } 1169 1170 const Register pre_val = r0; 1171 const Register thread = rthread; 1172 const Register tmp = rscratch1; 1173 1174 Address in_progress(thread, in_bytes(JavaThread::satb_mark_queue_offset() + 1175 SATBMarkQueue::byte_offset_of_active())); 1176 1177 Address queue_index(thread, in_bytes(JavaThread::satb_mark_queue_offset() + 1178 SATBMarkQueue::byte_offset_of_index())); 1179 Address buffer(thread, in_bytes(JavaThread::satb_mark_queue_offset() + 1180 SATBMarkQueue::byte_offset_of_buf())); 1181 1182 Label done; 1183 Label runtime; 1184 1185 // Is marking still active? 1186 if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) { 1187 __ ldrw(tmp, in_progress); 1188 } else { 1189 assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption"); 1190 __ ldrb(tmp, in_progress); 1191 } 1192 __ cbzw(tmp, done); 1193 1194 // Can we store original value in the thread's buffer? 1195 __ ldr(tmp, queue_index); 1196 __ cbz(tmp, runtime); 1197 1198 __ sub(tmp, tmp, wordSize); 1199 __ str(tmp, queue_index); 1200 __ ldr(rscratch2, buffer); 1201 __ add(tmp, tmp, rscratch2); 1202 f.load_argument(0, rscratch2); 1203 __ str(rscratch2, Address(tmp, 0)); 1204 __ b(done); 1205 1206 __ bind(runtime); 1207 __ push_call_clobbered_registers(); 1208 f.load_argument(0, pre_val); 1209 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), pre_val, thread); 1210 __ pop_call_clobbered_registers(); 1211 __ bind(done); 1212 } 1213 break; 1214 case g1_post_barrier_slow_id: 1215 { 1216 StubFrame f(sasm, "g1_post_barrier", dont_gc_arguments); 1217 1218 // arg0: store_address 1219 Address store_addr(rfp, 2*BytesPerWord); 1220 1221 BarrierSet* bs = Universe::heap()->barrier_set(); 1222 CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs); 1223 CardTable* ct = ctbs->card_table(); 1224 assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code"); 1225 1226 Label done; 1227 Label runtime; 1228 1229 // At this point we know new_value is non-NULL and the new_value crosses regions. 1230 // Must check to see if card is already dirty 1231 1232 const Register thread = rthread; 1233 1234 Address queue_index(thread, in_bytes(JavaThread::dirty_card_queue_offset() + 1235 DirtyCardQueue::byte_offset_of_index())); 1236 Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() + 1237 DirtyCardQueue::byte_offset_of_buf())); 1238 1239 const Register card_offset = rscratch2; 1240 // LR is free here, so we can use it to hold the byte_map_base. 1241 const Register byte_map_base = lr; 1242 1243 assert_different_registers(card_offset, byte_map_base, rscratch1); 1244 1245 f.load_argument(0, card_offset); 1246 __ lsr(card_offset, card_offset, CardTable::card_shift); 1247 __ load_byte_map_base(byte_map_base); 1248 __ ldrb(rscratch1, Address(byte_map_base, card_offset)); 1249 __ cmpw(rscratch1, (int)G1CardTable::g1_young_card_val()); 1250 __ br(Assembler::EQ, done); 1251 1252 assert((int)CardTable::dirty_card_val() == 0, "must be 0"); 1253 1254 __ membar(Assembler::StoreLoad); 1255 __ ldrb(rscratch1, Address(byte_map_base, card_offset)); 1256 __ cbzw(rscratch1, done); 1257 1258 // storing region crossing non-NULL, card is clean. 1259 // dirty card and log. 1260 __ strb(zr, Address(byte_map_base, card_offset)); 1261 1262 // Convert card offset into an address in card_addr 1263 Register card_addr = card_offset; 1264 __ add(card_addr, byte_map_base, card_addr); 1265 1266 __ ldr(rscratch1, queue_index); 1267 __ cbz(rscratch1, runtime); 1268 __ sub(rscratch1, rscratch1, wordSize); 1269 __ str(rscratch1, queue_index); 1270 1271 // Reuse LR to hold buffer_addr 1272 const Register buffer_addr = lr; 1273 1274 __ ldr(buffer_addr, buffer); 1275 __ str(card_addr, Address(buffer_addr, rscratch1)); 1276 __ b(done); 1277 1278 __ bind(runtime); 1279 __ push_call_clobbered_registers(); 1280 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread); 1281 __ pop_call_clobbered_registers(); 1282 __ bind(done); 1283 1284 } 1285 break; 1286 #endif 1287 1288 case predicate_failed_trap_id: 1289 { 1290 StubFrame f(sasm, "predicate_failed_trap", dont_gc_arguments); 1291 1292 OopMap* map = save_live_registers(sasm); 1293 1294 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap)); 1295 oop_maps = new OopMapSet(); 1296 oop_maps->add_gc_map(call_offset, map); 1297 restore_live_registers(sasm); 1298 __ leave(); 1299 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); 1300 assert(deopt_blob != NULL, "deoptimization blob must have been created"); 1301 1302 __ far_jump(RuntimeAddress(deopt_blob->unpack_with_reexecution())); 1303 } 1304 break; 1305 1306 1307 default: 1308 { StubFrame f(sasm, "unimplemented entry", dont_gc_arguments); 1309 __ mov(r0, (int)id); 1310 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), r0); 1311 __ should_not_reach_here(); 1312 } 1313 break; 1314 } 1315 } 1316 return oop_maps; 1317 } 1318 1319 #undef __ 1320 1321 const char *Runtime1::pd_name_for_address(address entry) { Unimplemented(); return 0; }