1 /* 2 * Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "c1/c1_Defs.hpp" 28 #include "c1/c1_LIRAssembler.hpp" 29 #include "c1/c1_MacroAssembler.hpp" 30 #include "c1/c1_Runtime1.hpp" 31 #include "interpreter/interpreter.hpp" 32 #include "nativeInst_arm.hpp" 33 #include "oops/compiledICHolder.hpp" 34 #include "oops/oop.inline.hpp" 35 #include "prims/jvmtiExport.hpp" 36 #include "register_arm.hpp" 37 #include "runtime/sharedRuntime.hpp" 38 #include "runtime/signature.hpp" 39 #include "runtime/vframeArray.hpp" 40 #include "vmreg_arm.inline.hpp" 41 #if INCLUDE_ALL_GCS 42 #include "gc/g1/g1SATBCardTableModRefBS.hpp" 43 #endif 44 45 // Note: Rtemp usage is this file should not impact C2 and should be 46 // correct as long as it is not implicitly used in lower layers (the 47 // arm [macro]assembler) and used with care in the other C1 specific 48 // files. 49 50 // Implementation of StubAssembler 51 52 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, int args_size) { 53 mov(R0, Rthread); 54 55 int call_offset = set_last_Java_frame(SP, FP, false, Rtemp); 56 57 call(entry); 58 if (call_offset == -1) { // PC not saved 59 call_offset = offset(); 60 } 61 reset_last_Java_frame(Rtemp); 62 63 assert(frame_size() != no_frame_size, "frame must be fixed"); 64 if (_stub_id != Runtime1::forward_exception_id) { 65 ldr(R3, Address(Rthread, Thread::pending_exception_offset())); 66 } 67 68 if (oop_result1->is_valid()) { 69 assert_different_registers(oop_result1, R3, Rtemp); 70 get_vm_result(oop_result1, Rtemp); 71 } 72 if (metadata_result->is_valid()) { 73 assert_different_registers(metadata_result, R3, Rtemp); 74 get_vm_result_2(metadata_result, Rtemp); 75 } 76 77 // Check for pending exception 78 // unpack_with_exception_in_tls path is taken through 79 // Runtime1::exception_handler_for_pc 80 if (_stub_id != Runtime1::forward_exception_id) { 81 assert(frame_size() != no_frame_size, "cannot directly call forward_exception_id"); 82 #ifdef AARCH64 83 Label skip; 84 cbz(R3, skip); 85 jump(Runtime1::entry_for(Runtime1::forward_exception_id), relocInfo::runtime_call_type, Rtemp); 86 bind(skip); 87 #else 88 cmp(R3, 0); 89 jump(Runtime1::entry_for(Runtime1::forward_exception_id), relocInfo::runtime_call_type, Rtemp, ne); 90 #endif // AARCH64 91 } else { 92 #ifdef ASSERT 93 // Should not have pending exception in forward_exception stub 94 ldr(R3, Address(Rthread, Thread::pending_exception_offset())); 95 cmp(R3, 0); 96 breakpoint(ne); 97 #endif // ASSERT 98 } 99 return call_offset; 100 } 101 102 103 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1) { 104 if (arg1 != R1) { 105 mov(R1, arg1); 106 } 107 return call_RT(oop_result1, metadata_result, entry, 1); 108 } 109 110 111 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2) { 112 assert(arg1 == R1 && arg2 == R2, "cannot handle otherwise"); 113 return call_RT(oop_result1, metadata_result, entry, 2); 114 } 115 116 117 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2, Register arg3) { 118 assert(arg1 == R1 && arg2 == R2 && arg3 == R3, "cannot handle otherwise"); 119 return call_RT(oop_result1, metadata_result, entry, 3); 120 } 121 122 123 #define __ sasm-> 124 125 // TODO: ARM - does this duplicate RegisterSaver in SharedRuntime? 126 #ifdef AARCH64 127 128 // 129 // On AArch64 registers save area has the following layout: 130 // 131 // |---------------------| 132 // | return address (LR) | 133 // | FP | 134 // |---------------------| 135 // | D31 | 136 // | ... | 137 // | D0 | 138 // |---------------------| 139 // | padding | 140 // |---------------------| 141 // | R28 | 142 // | ... | 143 // | R0 | 144 // |---------------------| <-- SP 145 // 146 147 enum RegisterLayout { 148 number_of_saved_gprs = 29, 149 number_of_saved_fprs = FloatRegisterImpl::number_of_registers, 150 151 R0_offset = 0, 152 D0_offset = R0_offset + number_of_saved_gprs + 1, 153 FP_offset = D0_offset + number_of_saved_fprs, 154 LR_offset = FP_offset + 1, 155 156 reg_save_size = LR_offset + 1, 157 158 arg1_offset = reg_save_size * wordSize, 159 arg2_offset = (reg_save_size + 1) * wordSize 160 }; 161 162 #else 163 164 enum RegisterLayout { 165 fpu_save_size = pd_nof_fpu_regs_reg_alloc, 166 #ifndef __SOFTFP__ 167 D0_offset = 0, 168 #endif 169 R0_offset = fpu_save_size, 170 R1_offset, 171 R2_offset, 172 R3_offset, 173 R4_offset, 174 R5_offset, 175 R6_offset, 176 #if (FP_REG_NUM != 7) 177 R7_offset, 178 #endif 179 R8_offset, 180 R9_offset, 181 R10_offset, 182 #if (FP_REG_NUM != 11) 183 R11_offset, 184 #endif 185 R12_offset, 186 FP_offset, 187 LR_offset, 188 reg_save_size, 189 arg1_offset = reg_save_size * wordSize, 190 arg2_offset = (reg_save_size + 1) * wordSize 191 }; 192 193 #endif // AARCH64 194 195 static OopMap* generate_oop_map(StubAssembler* sasm, bool save_fpu_registers = HaveVFP) { 196 sasm->set_frame_size(reg_save_size /* in words */); 197 198 // Record saved value locations in an OopMap. 199 // Locations are offsets from sp after runtime call. 200 OopMap* map = new OopMap(VMRegImpl::slots_per_word * reg_save_size, 0); 201 202 #ifdef AARCH64 203 for (int i = 0; i < number_of_saved_gprs; i++) { 204 map->set_callee_saved(VMRegImpl::stack2reg((R0_offset + i) * VMRegImpl::slots_per_word), as_Register(i)->as_VMReg()); 205 } 206 map->set_callee_saved(VMRegImpl::stack2reg(FP_offset * VMRegImpl::slots_per_word), FP->as_VMReg()); 207 map->set_callee_saved(VMRegImpl::stack2reg(LR_offset * VMRegImpl::slots_per_word), LR->as_VMReg()); 208 209 if (save_fpu_registers) { 210 for (int i = 0; i < number_of_saved_fprs; i++) { 211 map->set_callee_saved(VMRegImpl::stack2reg((D0_offset + i) * VMRegImpl::slots_per_word), as_FloatRegister(i)->as_VMReg()); 212 } 213 } 214 #else 215 int j=0; 216 for (int i = R0_offset; i < R10_offset; i++) { 217 if (j == FP_REG_NUM) { 218 // skip the FP register, saved below 219 j++; 220 } 221 map->set_callee_saved(VMRegImpl::stack2reg(i), as_Register(j)->as_VMReg()); 222 j++; 223 } 224 assert(j == R10->encoding(), "must be"); 225 #if (FP_REG_NUM != 11) 226 // add R11, if not saved as FP 227 map->set_callee_saved(VMRegImpl::stack2reg(R11_offset), R11->as_VMReg()); 228 #endif 229 map->set_callee_saved(VMRegImpl::stack2reg(FP_offset), FP->as_VMReg()); 230 map->set_callee_saved(VMRegImpl::stack2reg(LR_offset), LR->as_VMReg()); 231 232 if (save_fpu_registers) { 233 for (int i = 0; i < fpu_save_size; i++) { 234 map->set_callee_saved(VMRegImpl::stack2reg(i), as_FloatRegister(i)->as_VMReg()); 235 } 236 } 237 #endif // AARCH64 238 239 return map; 240 } 241 242 static OopMap* save_live_registers(StubAssembler* sasm, bool save_fpu_registers = HaveVFP) { 243 __ block_comment("save_live_registers"); 244 sasm->set_frame_size(reg_save_size /* in words */); 245 246 #ifdef AARCH64 247 assert((reg_save_size * wordSize) % StackAlignmentInBytes == 0, "SP should be aligned"); 248 249 __ raw_push(FP, LR); 250 251 __ sub(SP, SP, (reg_save_size - 2) * wordSize); 252 253 for (int i = 0; i < round_down(number_of_saved_gprs, 2); i += 2) { 254 __ stp(as_Register(i), as_Register(i+1), Address(SP, (R0_offset + i) * wordSize)); 255 } 256 257 if (is_odd(number_of_saved_gprs)) { 258 int i = number_of_saved_gprs - 1; 259 __ str(as_Register(i), Address(SP, (R0_offset + i) * wordSize)); 260 } 261 262 if (save_fpu_registers) { 263 assert (is_even(number_of_saved_fprs), "adjust this code"); 264 for (int i = 0; i < number_of_saved_fprs; i += 2) { 265 __ stp_d(as_FloatRegister(i), as_FloatRegister(i+1), Address(SP, (D0_offset + i) * wordSize)); 266 } 267 } 268 #else 269 __ push(RegisterSet(FP) | RegisterSet(LR)); 270 __ push(RegisterSet(R0, R6) | RegisterSet(R8, R10) | R12 | altFP_7_11); 271 if (save_fpu_registers) { 272 __ fstmdbd(SP, FloatRegisterSet(D0, fpu_save_size / 2), writeback); 273 } else { 274 __ sub(SP, SP, fpu_save_size * wordSize); 275 } 276 #endif // AARCH64 277 278 return generate_oop_map(sasm, save_fpu_registers); 279 } 280 281 282 static void restore_live_registers(StubAssembler* sasm, 283 bool restore_R0, 284 bool restore_FP_LR, 285 bool do_return, 286 bool restore_fpu_registers = HaveVFP) { 287 __ block_comment("restore_live_registers"); 288 289 #ifdef AARCH64 290 if (restore_R0) { 291 __ ldr(R0, Address(SP, R0_offset * wordSize)); 292 } 293 294 assert(is_odd(number_of_saved_gprs), "adjust this code"); 295 for (int i = 1; i < number_of_saved_gprs; i += 2) { 296 __ ldp(as_Register(i), as_Register(i+1), Address(SP, (R0_offset + i) * wordSize)); 297 } 298 299 if (restore_fpu_registers) { 300 assert (is_even(number_of_saved_fprs), "adjust this code"); 301 for (int i = 0; i < number_of_saved_fprs; i += 2) { 302 __ ldp_d(as_FloatRegister(i), as_FloatRegister(i+1), Address(SP, (D0_offset + i) * wordSize)); 303 } 304 } 305 306 __ add(SP, SP, (reg_save_size - 2) * wordSize); 307 308 if (restore_FP_LR) { 309 __ raw_pop(FP, LR); 310 if (do_return) { 311 __ ret(); 312 } 313 } else { 314 assert (!do_return, "return without restoring FP/LR"); 315 } 316 #else 317 if (restore_fpu_registers) { 318 __ fldmiad(SP, FloatRegisterSet(D0, fpu_save_size / 2), writeback); 319 if (!restore_R0) { 320 __ add(SP, SP, (R1_offset - fpu_save_size) * wordSize); 321 } 322 } else { 323 __ add(SP, SP, (restore_R0 ? fpu_save_size : R1_offset) * wordSize); 324 } 325 __ pop(RegisterSet((restore_R0 ? R0 : R1), R6) | RegisterSet(R8, R10) | R12 | altFP_7_11); 326 if (restore_FP_LR) { 327 __ pop(RegisterSet(FP) | RegisterSet(do_return ? PC : LR)); 328 } else { 329 assert (!do_return, "return without restoring FP/LR"); 330 } 331 #endif // AARCH64 332 } 333 334 335 static void restore_live_registers_except_R0(StubAssembler* sasm, bool restore_fpu_registers = HaveVFP) { 336 restore_live_registers(sasm, false, true, true, restore_fpu_registers); 337 } 338 339 static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = HaveVFP) { 340 restore_live_registers(sasm, true, true, true, restore_fpu_registers); 341 } 342 343 #ifndef AARCH64 344 static void restore_live_registers_except_FP_LR(StubAssembler* sasm, bool restore_fpu_registers = HaveVFP) { 345 restore_live_registers(sasm, true, false, false, restore_fpu_registers); 346 } 347 #endif // !AARCH64 348 349 static void restore_live_registers_without_return(StubAssembler* sasm, bool restore_fpu_registers = HaveVFP) { 350 restore_live_registers(sasm, true, true, false, restore_fpu_registers); 351 } 352 353 354 void Runtime1::initialize_pd() { 355 LIR_Assembler::exception_handler_size = AARCH64_ONLY(256) NOT_AARCH64(68); 356 #ifndef PRODUCT 357 if (VerifyOops) LIR_Assembler::exception_handler_size += AARCH64_ONLY(216) NOT_AARCH64(60); 358 #endif // !PRODUCT 359 } 360 361 362 OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) { 363 OopMap* oop_map = save_live_registers(sasm); 364 365 if (has_argument) { 366 __ ldr(R1, Address(SP, arg1_offset)); 367 } 368 369 int call_offset = __ call_RT(noreg, noreg, target); 370 OopMapSet* oop_maps = new OopMapSet(); 371 oop_maps->add_gc_map(call_offset, oop_map); 372 373 DEBUG_ONLY(STOP("generate_exception_throw");) // Should not reach here 374 return oop_maps; 375 } 376 377 378 static void restore_sp_for_method_handle(StubAssembler* sasm) { 379 // Restore SP from its saved reg (FP) if the exception PC is a MethodHandle call site. 380 __ ldr_s32(Rtemp, Address(Rthread, JavaThread::is_method_handle_return_offset())); 381 #ifdef AARCH64 382 Label skip; 383 __ cbz(Rtemp, skip); 384 __ mov(SP, Rmh_SP_save); 385 __ bind(skip); 386 #else 387 __ cmp(Rtemp, 0); 388 __ mov(SP, Rmh_SP_save, ne); 389 #endif // AARCH64 390 } 391 392 393 OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler* sasm) { 394 __ block_comment("generate_handle_exception"); 395 396 bool save_fpu_registers = false; 397 398 // Save registers, if required. 399 OopMapSet* oop_maps = new OopMapSet(); 400 OopMap* oop_map = NULL; 401 402 switch (id) { 403 case forward_exception_id: { 404 save_fpu_registers = HaveVFP; 405 oop_map = generate_oop_map(sasm); 406 __ ldr(Rexception_obj, Address(Rthread, Thread::pending_exception_offset())); 407 __ ldr(Rexception_pc, Address(SP, LR_offset * wordSize)); 408 Register zero = __ zero_register(Rtemp); 409 __ str(zero, Address(Rthread, Thread::pending_exception_offset())); 410 break; 411 } 412 case handle_exception_id: 413 save_fpu_registers = HaveVFP; 414 // fall-through 415 case handle_exception_nofpu_id: 416 // At this point all registers MAY be live. 417 oop_map = save_live_registers(sasm, save_fpu_registers); 418 break; 419 case handle_exception_from_callee_id: 420 // At this point all registers except exception oop (R4/R19) and 421 // exception pc (R5/R20) are dead. 422 oop_map = save_live_registers(sasm); // TODO it's not required to save all registers 423 break; 424 default: ShouldNotReachHere(); 425 } 426 427 __ str(Rexception_obj, Address(Rthread, JavaThread::exception_oop_offset())); 428 __ str(Rexception_pc, Address(Rthread, JavaThread::exception_pc_offset())); 429 430 __ str(Rexception_pc, Address(SP, LR_offset * wordSize)); // patch throwing pc into return address 431 432 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc)); 433 oop_maps->add_gc_map(call_offset, oop_map); 434 435 // Exception handler found 436 __ str(R0, Address(SP, LR_offset * wordSize)); // patch the return address 437 438 // Restore the registers that were saved at the beginning, remove 439 // frame and jump to the exception handler. 440 switch (id) { 441 case forward_exception_id: 442 case handle_exception_nofpu_id: 443 case handle_exception_id: 444 restore_live_registers(sasm, save_fpu_registers); 445 // Note: the restore live registers includes the jump to LR (patched to R0) 446 break; 447 case handle_exception_from_callee_id: 448 restore_live_registers_without_return(sasm); // must not jump immediatly to handler 449 restore_sp_for_method_handle(sasm); 450 __ ret(); 451 break; 452 default: ShouldNotReachHere(); 453 } 454 455 DEBUG_ONLY(STOP("generate_handle_exception");) // Should not reach here 456 457 return oop_maps; 458 } 459 460 461 void Runtime1::generate_unwind_exception(StubAssembler* sasm) { 462 // FP no longer used to find the frame start 463 // on entry, remove_frame() has already been called (restoring FP and LR) 464 465 // search the exception handler address of the caller (using the return address) 466 __ mov(c_rarg0, Rthread); 467 __ mov(Rexception_pc, LR); 468 __ mov(c_rarg1, LR); 469 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), c_rarg0, c_rarg1); 470 471 // Exception oop should be still in Rexception_obj and pc in Rexception_pc 472 // Jump to handler 473 __ verify_not_null_oop(Rexception_obj); 474 475 // JSR292 extension 476 restore_sp_for_method_handle(sasm); 477 478 __ jump(R0); 479 } 480 481 482 OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) { 483 OopMap* oop_map = save_live_registers(sasm); 484 485 // call the runtime patching routine, returns non-zero if nmethod got deopted. 486 int call_offset = __ call_RT(noreg, noreg, target); 487 OopMapSet* oop_maps = new OopMapSet(); 488 oop_maps->add_gc_map(call_offset, oop_map); 489 490 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); 491 assert(deopt_blob != NULL, "deoptimization blob must have been created"); 492 493 __ cmp_32(R0, 0); 494 495 #ifdef AARCH64 496 Label call_deopt; 497 498 restore_live_registers_without_return(sasm); 499 __ b(call_deopt, ne); 500 __ ret(); 501 502 __ bind(call_deopt); 503 #else 504 restore_live_registers_except_FP_LR(sasm); 505 __ pop(RegisterSet(FP) | RegisterSet(PC), eq); 506 507 // Deoptimization needed 508 // TODO: ARM - no need to restore FP & LR because unpack_with_reexecution() stores them back 509 __ pop(RegisterSet(FP) | RegisterSet(LR)); 510 #endif // AARCH64 511 512 __ jump(deopt_blob->unpack_with_reexecution(), relocInfo::runtime_call_type, Rtemp); 513 514 DEBUG_ONLY(STOP("generate_patching");) // Should not reach here 515 return oop_maps; 516 } 517 518 519 OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { 520 const bool must_gc_arguments = true; 521 const bool dont_gc_arguments = false; 522 523 OopMapSet* oop_maps = NULL; 524 bool save_fpu_registers = HaveVFP; 525 526 switch (id) { 527 case forward_exception_id: 528 { 529 oop_maps = generate_handle_exception(id, sasm); 530 // does not return on ARM 531 } 532 break; 533 534 #if INCLUDE_ALL_GCS 535 case g1_pre_barrier_slow_id: 536 { 537 // Input: 538 // - pre_val pushed on the stack 539 540 __ set_info("g1_pre_barrier_slow_id", dont_gc_arguments); 541 542 // save at least the registers that need saving if the runtime is called 543 #ifdef AARCH64 544 __ raw_push(R0, R1); 545 __ raw_push(R2, R3); 546 const int nb_saved_regs = 4; 547 #else // AARCH64 548 const RegisterSet saved_regs = RegisterSet(R0,R3) | RegisterSet(R12) | RegisterSet(LR); 549 const int nb_saved_regs = 6; 550 assert(nb_saved_regs == saved_regs.size(), "fix nb_saved_regs"); 551 __ push(saved_regs); 552 #endif // AARCH64 553 554 const Register r_pre_val_0 = R0; // must be R0, to be ready for the runtime call 555 const Register r_index_1 = R1; 556 const Register r_buffer_2 = R2; 557 558 Address queue_index(Rthread, in_bytes(JavaThread::satb_mark_queue_offset() + 559 SATBMarkQueue::byte_offset_of_index())); 560 Address buffer(Rthread, in_bytes(JavaThread::satb_mark_queue_offset() + 561 SATBMarkQueue::byte_offset_of_buf())); 562 563 Label done; 564 Label runtime; 565 566 __ ldr(r_index_1, queue_index); 567 __ ldr(r_pre_val_0, Address(SP, nb_saved_regs*wordSize)); 568 __ ldr(r_buffer_2, buffer); 569 570 __ subs(r_index_1, r_index_1, wordSize); 571 __ b(runtime, lt); 572 573 __ str(r_index_1, queue_index); 574 __ str(r_pre_val_0, Address(r_buffer_2, r_index_1)); 575 576 __ bind(done); 577 578 #ifdef AARCH64 579 __ raw_pop(R2, R3); 580 __ raw_pop(R0, R1); 581 #else // AARCH64 582 __ pop(saved_regs); 583 #endif // AARCH64 584 585 __ ret(); 586 587 __ bind(runtime); 588 589 save_live_registers(sasm); 590 591 assert(r_pre_val_0 == c_rarg0, "pre_val should be in R0"); 592 __ mov(c_rarg1, Rthread); 593 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), c_rarg0, c_rarg1); 594 595 restore_live_registers_without_return(sasm); 596 597 __ b(done); 598 } 599 break; 600 case g1_post_barrier_slow_id: 601 { 602 // Input: 603 // - store_addr, pushed on the stack 604 605 __ set_info("g1_post_barrier_slow_id", dont_gc_arguments); 606 607 BarrierSet* bs = Universe::heap()->barrier_set(); 608 CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs); 609 Label done; 610 Label recheck; 611 Label runtime; 612 613 Address queue_index(Rthread, in_bytes(JavaThread::dirty_card_queue_offset() + 614 DirtyCardQueue::byte_offset_of_index())); 615 Address buffer(Rthread, in_bytes(JavaThread::dirty_card_queue_offset() + 616 DirtyCardQueue::byte_offset_of_buf())); 617 618 AddressLiteral cardtable((address)ct->byte_map_base); 619 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); 620 621 // save at least the registers that need saving if the runtime is called 622 #ifdef AARCH64 623 __ raw_push(R0, R1); 624 __ raw_push(R2, R3); 625 const int nb_saved_regs = 4; 626 #else // AARCH64 627 const RegisterSet saved_regs = RegisterSet(R0,R3) | RegisterSet(R12) | RegisterSet(LR); 628 const int nb_saved_regs = 6; 629 assert(nb_saved_regs == saved_regs.size(), "fix nb_saved_regs"); 630 __ push(saved_regs); 631 #endif // AARCH64 632 633 const Register r_card_addr_0 = R0; // must be R0 for the slow case 634 const Register r_obj_0 = R0; 635 const Register r_card_base_1 = R1; 636 const Register r_tmp2 = R2; 637 const Register r_index_2 = R2; 638 const Register r_buffer_3 = R3; 639 const Register tmp1 = Rtemp; 640 641 __ ldr(r_obj_0, Address(SP, nb_saved_regs*wordSize)); 642 // Note: there is a comment in x86 code about not using 643 // ExternalAddress / lea, due to relocation not working 644 // properly for that address. Should be OK for arm, where we 645 // explicitly specify that 'cartable' has a relocInfo::none 646 // type. 647 __ lea(r_card_base_1, cardtable); 648 __ add(r_card_addr_0, r_card_base_1, AsmOperand(r_obj_0, lsr, CardTableModRefBS::card_shift)); 649 650 // first quick check without barrier 651 __ ldrb(r_tmp2, Address(r_card_addr_0)); 652 653 __ cmp(r_tmp2, (int)G1SATBCardTableModRefBS::g1_young_card_val()); 654 __ b(recheck, ne); 655 656 __ bind(done); 657 658 #ifdef AARCH64 659 __ raw_pop(R2, R3); 660 __ raw_pop(R0, R1); 661 #else // AARCH64 662 __ pop(saved_regs); 663 #endif // AARCH64 664 665 __ ret(); 666 667 __ bind(recheck); 668 669 __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad), tmp1); 670 671 // reload card state after the barrier that ensures the stored oop was visible 672 __ ldrb(r_tmp2, Address(r_card_addr_0)); 673 674 assert(CardTableModRefBS::dirty_card_val() == 0, "adjust this code"); 675 __ cbz(r_tmp2, done); 676 677 // storing region crossing non-NULL, card is clean. 678 // dirty card and log. 679 680 assert(0 == (int)CardTableModRefBS::dirty_card_val(), "adjust this code"); 681 if (((intptr_t)ct->byte_map_base & 0xff) == 0) { 682 // Card table is aligned so the lowest byte of the table address base is zero. 683 __ strb(r_card_base_1, Address(r_card_addr_0)); 684 } else { 685 __ strb(__ zero_register(r_tmp2), Address(r_card_addr_0)); 686 } 687 688 __ ldr(r_index_2, queue_index); 689 __ ldr(r_buffer_3, buffer); 690 691 __ subs(r_index_2, r_index_2, wordSize); 692 __ b(runtime, lt); // go to runtime if now negative 693 694 __ str(r_index_2, queue_index); 695 696 __ str(r_card_addr_0, Address(r_buffer_3, r_index_2)); 697 698 __ b(done); 699 700 __ bind(runtime); 701 702 save_live_registers(sasm); 703 704 assert(r_card_addr_0 == c_rarg0, "card_addr should be in R0"); 705 __ mov(c_rarg1, Rthread); 706 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), c_rarg0, c_rarg1); 707 708 restore_live_registers_without_return(sasm); 709 710 __ b(done); 711 } 712 break; 713 #endif // INCLUDE_ALL_GCS 714 case new_instance_id: 715 case fast_new_instance_id: 716 case fast_new_instance_init_check_id: 717 { 718 const Register result = R0; 719 const Register klass = R1; 720 721 if (UseTLAB && FastTLABRefill && id != new_instance_id) { 722 // We come here when TLAB allocation failed. 723 // In this case we either refill TLAB or allocate directly from eden. 724 Label retry_tlab, try_eden, slow_case, slow_case_no_pop; 725 726 // Make sure the class is fully initialized 727 if (id == fast_new_instance_init_check_id) { 728 __ ldrb(result, Address(klass, InstanceKlass::init_state_offset())); 729 __ cmp(result, InstanceKlass::fully_initialized); 730 __ b(slow_case_no_pop, ne); 731 } 732 733 // Free some temporary registers 734 const Register obj_size = R4; 735 const Register tmp1 = R5; 736 const Register tmp2 = LR; 737 const Register obj_end = Rtemp; 738 739 __ raw_push(R4, R5, LR); 740 741 __ tlab_refill(result, obj_size, tmp1, tmp2, obj_end, try_eden, slow_case); 742 743 __ bind(retry_tlab); 744 __ ldr_u32(obj_size, Address(klass, Klass::layout_helper_offset())); 745 __ tlab_allocate(result, obj_end, tmp1, obj_size, slow_case); // initializes result and obj_end 746 __ initialize_object(result, obj_end, klass, noreg /* len */, tmp1, tmp2, 747 instanceOopDesc::header_size() * HeapWordSize, -1, 748 /* is_tlab_allocated */ true); 749 __ raw_pop_and_ret(R4, R5); 750 751 __ bind(try_eden); 752 __ ldr_u32(obj_size, Address(klass, Klass::layout_helper_offset())); 753 __ eden_allocate(result, obj_end, tmp1, tmp2, obj_size, slow_case); // initializes result and obj_end 754 __ incr_allocated_bytes(obj_size, tmp2); 755 __ initialize_object(result, obj_end, klass, noreg /* len */, tmp1, tmp2, 756 instanceOopDesc::header_size() * HeapWordSize, -1, 757 /* is_tlab_allocated */ false); 758 __ raw_pop_and_ret(R4, R5); 759 760 __ bind(slow_case); 761 __ raw_pop(R4, R5, LR); 762 763 __ bind(slow_case_no_pop); 764 } 765 766 OopMap* map = save_live_registers(sasm); 767 int call_offset = __ call_RT(result, noreg, CAST_FROM_FN_PTR(address, new_instance), klass); 768 oop_maps = new OopMapSet(); 769 oop_maps->add_gc_map(call_offset, map); 770 771 // MacroAssembler::StoreStore useless (included in the runtime exit path) 772 773 restore_live_registers_except_R0(sasm); 774 } 775 break; 776 777 case counter_overflow_id: 778 { 779 OopMap* oop_map = save_live_registers(sasm); 780 __ ldr(R1, Address(SP, arg1_offset)); 781 __ ldr(R2, Address(SP, arg2_offset)); 782 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), R1, R2); 783 oop_maps = new OopMapSet(); 784 oop_maps->add_gc_map(call_offset, oop_map); 785 restore_live_registers(sasm); 786 } 787 break; 788 789 case new_type_array_id: 790 case new_object_array_id: 791 { 792 if (id == new_type_array_id) { 793 __ set_info("new_type_array", dont_gc_arguments); 794 } else { 795 __ set_info("new_object_array", dont_gc_arguments); 796 } 797 798 const Register result = R0; 799 const Register klass = R1; 800 const Register length = R2; 801 802 if (UseTLAB && FastTLABRefill) { 803 // We come here when TLAB allocation failed. 804 // In this case we either refill TLAB or allocate directly from eden. 805 Label retry_tlab, try_eden, slow_case, slow_case_no_pop; 806 807 #ifdef AARCH64 808 __ mov_slow(Rtemp, C1_MacroAssembler::max_array_allocation_length); 809 __ cmp_32(length, Rtemp); 810 #else 811 __ cmp_32(length, C1_MacroAssembler::max_array_allocation_length); 812 #endif // AARCH64 813 __ b(slow_case_no_pop, hs); 814 815 // Free some temporary registers 816 const Register arr_size = R4; 817 const Register tmp1 = R5; 818 const Register tmp2 = LR; 819 const Register tmp3 = Rtemp; 820 const Register obj_end = tmp3; 821 822 __ raw_push(R4, R5, LR); 823 824 __ tlab_refill(result, arr_size, tmp1, tmp2, tmp3, try_eden, slow_case); 825 826 __ bind(retry_tlab); 827 // Get the allocation size: round_up((length << (layout_helper & 0xff)) + header_size) 828 __ ldr_u32(tmp1, Address(klass, Klass::layout_helper_offset())); 829 __ mov(arr_size, MinObjAlignmentInBytesMask); 830 __ and_32(tmp2, tmp1, (unsigned int)(Klass::_lh_header_size_mask << Klass::_lh_header_size_shift)); 831 832 #ifdef AARCH64 833 __ lslv_w(tmp3, length, tmp1); 834 __ add(arr_size, arr_size, tmp3); 835 #else 836 __ add(arr_size, arr_size, AsmOperand(length, lsl, tmp1)); 837 #endif // AARCH64 838 839 __ add(arr_size, arr_size, AsmOperand(tmp2, lsr, Klass::_lh_header_size_shift)); 840 __ align_reg(arr_size, arr_size, MinObjAlignmentInBytes); 841 842 // tlab_allocate initializes result and obj_end, and preserves tmp2 which contains header_size 843 __ tlab_allocate(result, obj_end, tmp1, arr_size, slow_case); 844 845 assert_different_registers(result, obj_end, klass, length, tmp1, tmp2); 846 __ initialize_header(result, klass, length, tmp1); 847 848 __ add(tmp2, result, AsmOperand(tmp2, lsr, Klass::_lh_header_size_shift)); 849 if (!ZeroTLAB) { 850 __ initialize_body(tmp2, obj_end, tmp1); 851 } 852 853 __ membar(MacroAssembler::StoreStore, tmp1); 854 855 __ raw_pop_and_ret(R4, R5); 856 857 __ bind(try_eden); 858 // Get the allocation size: round_up((length << (layout_helper & 0xff)) + header_size) 859 __ ldr_u32(tmp1, Address(klass, Klass::layout_helper_offset())); 860 __ mov(arr_size, MinObjAlignmentInBytesMask); 861 __ and_32(tmp2, tmp1, (unsigned int)(Klass::_lh_header_size_mask << Klass::_lh_header_size_shift)); 862 863 #ifdef AARCH64 864 __ lslv_w(tmp3, length, tmp1); 865 __ add(arr_size, arr_size, tmp3); 866 #else 867 __ add(arr_size, arr_size, AsmOperand(length, lsl, tmp1)); 868 #endif // AARCH64 869 870 __ add(arr_size, arr_size, AsmOperand(tmp2, lsr, Klass::_lh_header_size_shift)); 871 __ align_reg(arr_size, arr_size, MinObjAlignmentInBytes); 872 873 // eden_allocate destroys tmp2, so reload header_size after allocation 874 // eden_allocate initializes result and obj_end 875 __ eden_allocate(result, obj_end, tmp1, tmp2, arr_size, slow_case); 876 __ incr_allocated_bytes(arr_size, tmp2); 877 __ ldrb(tmp2, Address(klass, in_bytes(Klass::layout_helper_offset()) + 878 Klass::_lh_header_size_shift / BitsPerByte)); 879 __ initialize_object(result, obj_end, klass, length, tmp1, tmp2, tmp2, -1, /* is_tlab_allocated */ false); 880 __ raw_pop_and_ret(R4, R5); 881 882 __ bind(slow_case); 883 __ raw_pop(R4, R5, LR); 884 __ bind(slow_case_no_pop); 885 } 886 887 OopMap* map = save_live_registers(sasm); 888 int call_offset; 889 if (id == new_type_array_id) { 890 call_offset = __ call_RT(result, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length); 891 } else { 892 call_offset = __ call_RT(result, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length); 893 } 894 oop_maps = new OopMapSet(); 895 oop_maps->add_gc_map(call_offset, map); 896 897 // MacroAssembler::StoreStore useless (included in the runtime exit path) 898 899 restore_live_registers_except_R0(sasm); 900 } 901 break; 902 903 case new_multi_array_id: 904 { 905 __ set_info("new_multi_array", dont_gc_arguments); 906 907 // R0: klass 908 // R2: rank 909 // SP: address of 1st dimension 910 const Register result = R0; 911 OopMap* map = save_live_registers(sasm); 912 913 __ mov(R1, R0); 914 __ add(R3, SP, arg1_offset); 915 int call_offset = __ call_RT(result, noreg, CAST_FROM_FN_PTR(address, new_multi_array), R1, R2, R3); 916 917 oop_maps = new OopMapSet(); 918 oop_maps->add_gc_map(call_offset, map); 919 920 // MacroAssembler::StoreStore useless (included in the runtime exit path) 921 922 restore_live_registers_except_R0(sasm); 923 } 924 break; 925 926 case register_finalizer_id: 927 { 928 __ set_info("register_finalizer", dont_gc_arguments); 929 930 // Do not call runtime if JVM_ACC_HAS_FINALIZER flag is not set 931 __ load_klass(Rtemp, R0); 932 __ ldr_u32(Rtemp, Address(Rtemp, Klass::access_flags_offset())); 933 934 #ifdef AARCH64 935 Label L; 936 __ tbnz(Rtemp, exact_log2(JVM_ACC_HAS_FINALIZER), L); 937 __ ret(); 938 __ bind(L); 939 #else 940 __ tst(Rtemp, JVM_ACC_HAS_FINALIZER); 941 __ bx(LR, eq); 942 #endif // AARCH64 943 944 // Call VM 945 OopMap* map = save_live_registers(sasm); 946 oop_maps = new OopMapSet(); 947 int call_offset = __ call_RT(noreg, noreg, 948 CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), R0); 949 oop_maps->add_gc_map(call_offset, map); 950 restore_live_registers(sasm); 951 } 952 break; 953 954 case throw_range_check_failed_id: 955 { 956 __ set_info("range_check_failed", dont_gc_arguments); 957 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true); 958 } 959 break; 960 961 case throw_index_exception_id: 962 { 963 __ set_info("index_range_check_failed", dont_gc_arguments); 964 #ifdef AARCH64 965 __ NOT_TESTED(); 966 #endif 967 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true); 968 } 969 break; 970 971 case throw_div0_exception_id: 972 { 973 __ set_info("throw_div0_exception", dont_gc_arguments); 974 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false); 975 } 976 break; 977 978 case throw_null_pointer_exception_id: 979 { 980 __ set_info("throw_null_pointer_exception", dont_gc_arguments); 981 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false); 982 } 983 break; 984 985 case handle_exception_nofpu_id: 986 case handle_exception_id: 987 { 988 __ set_info("handle_exception", dont_gc_arguments); 989 oop_maps = generate_handle_exception(id, sasm); 990 } 991 break; 992 993 case handle_exception_from_callee_id: 994 { 995 __ set_info("handle_exception_from_callee", dont_gc_arguments); 996 oop_maps = generate_handle_exception(id, sasm); 997 } 998 break; 999 1000 case unwind_exception_id: 1001 { 1002 __ set_info("unwind_exception", dont_gc_arguments); 1003 generate_unwind_exception(sasm); 1004 } 1005 break; 1006 1007 case throw_array_store_exception_id: 1008 { 1009 __ set_info("throw_array_store_exception", dont_gc_arguments); 1010 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true); 1011 } 1012 break; 1013 1014 case throw_class_cast_exception_id: 1015 { 1016 __ set_info("throw_class_cast_exception", dont_gc_arguments); 1017 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true); 1018 } 1019 break; 1020 1021 case throw_incompatible_class_change_error_id: 1022 { 1023 __ set_info("throw_incompatible_class_cast_exception", dont_gc_arguments); 1024 #ifdef AARCH64 1025 __ NOT_TESTED(); 1026 #endif 1027 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false); 1028 } 1029 break; 1030 1031 case slow_subtype_check_id: 1032 { 1033 // (in) R0 - sub, destroyed, 1034 // (in) R1 - super, not changed 1035 // (out) R0 - result: 1 if check passed, 0 otherwise 1036 __ raw_push(R2, R3, LR); 1037 1038 // Load an array of secondary_supers 1039 __ ldr(R2, Address(R0, Klass::secondary_supers_offset())); 1040 // Length goes to R3 1041 __ ldr_s32(R3, Address(R2, Array<Klass*>::length_offset_in_bytes())); 1042 __ add(R2, R2, Array<Klass*>::base_offset_in_bytes()); 1043 1044 Label loop, miss; 1045 __ bind(loop); 1046 __ cbz(R3, miss); 1047 __ ldr(LR, Address(R2, wordSize, post_indexed)); 1048 __ sub(R3, R3, 1); 1049 __ cmp(LR, R1); 1050 __ b(loop, ne); 1051 1052 // We get here if an equal cache entry is found 1053 __ str(R1, Address(R0, Klass::secondary_super_cache_offset())); 1054 __ mov(R0, 1); 1055 __ raw_pop_and_ret(R2, R3); 1056 1057 // A cache entry not found - return false 1058 __ bind(miss); 1059 __ mov(R0, 0); 1060 __ raw_pop_and_ret(R2, R3); 1061 } 1062 break; 1063 1064 case monitorenter_nofpu_id: 1065 save_fpu_registers = false; 1066 // fall through 1067 case monitorenter_id: 1068 { 1069 __ set_info("monitorenter", dont_gc_arguments); 1070 const Register obj = R1; 1071 const Register lock = R2; 1072 OopMap* map = save_live_registers(sasm, save_fpu_registers); 1073 __ ldr(obj, Address(SP, arg1_offset)); 1074 __ ldr(lock, Address(SP, arg2_offset)); 1075 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), obj, lock); 1076 oop_maps = new OopMapSet(); 1077 oop_maps->add_gc_map(call_offset, map); 1078 restore_live_registers(sasm, save_fpu_registers); 1079 } 1080 break; 1081 1082 case monitorexit_nofpu_id: 1083 save_fpu_registers = false; 1084 // fall through 1085 case monitorexit_id: 1086 { 1087 __ set_info("monitorexit", dont_gc_arguments); 1088 const Register lock = R1; 1089 OopMap* map = save_live_registers(sasm, save_fpu_registers); 1090 __ ldr(lock, Address(SP, arg1_offset)); 1091 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), lock); 1092 oop_maps = new OopMapSet(); 1093 oop_maps->add_gc_map(call_offset, map); 1094 restore_live_registers(sasm, save_fpu_registers); 1095 } 1096 break; 1097 1098 case deoptimize_id: 1099 { 1100 __ set_info("deoptimize", dont_gc_arguments); 1101 OopMap* oop_map = save_live_registers(sasm); 1102 const Register trap_request = R1; 1103 __ ldr(trap_request, Address(SP, arg1_offset)); 1104 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, deoptimize), trap_request); 1105 oop_maps = new OopMapSet(); 1106 oop_maps->add_gc_map(call_offset, oop_map); 1107 restore_live_registers_without_return(sasm); 1108 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); 1109 assert(deopt_blob != NULL, "deoptimization blob must have been created"); 1110 __ jump(deopt_blob->unpack_with_reexecution(), relocInfo::runtime_call_type, AARCH64_ONLY(Rtemp) NOT_AARCH64(noreg)); 1111 } 1112 break; 1113 1114 case access_field_patching_id: 1115 { 1116 __ set_info("access_field_patching", dont_gc_arguments); 1117 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching)); 1118 } 1119 break; 1120 1121 case load_klass_patching_id: 1122 { 1123 __ set_info("load_klass_patching", dont_gc_arguments); 1124 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching)); 1125 } 1126 break; 1127 1128 case load_appendix_patching_id: 1129 { 1130 __ set_info("load_appendix_patching", dont_gc_arguments); 1131 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching)); 1132 } 1133 break; 1134 1135 case load_mirror_patching_id: 1136 { 1137 __ set_info("load_mirror_patching", dont_gc_arguments); 1138 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching)); 1139 } 1140 break; 1141 1142 case predicate_failed_trap_id: 1143 { 1144 __ set_info("predicate_failed_trap", dont_gc_arguments); 1145 1146 OopMap* oop_map = save_live_registers(sasm); 1147 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap)); 1148 1149 oop_maps = new OopMapSet(); 1150 oop_maps->add_gc_map(call_offset, oop_map); 1151 1152 restore_live_registers_without_return(sasm); 1153 1154 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); 1155 assert(deopt_blob != NULL, "deoptimization blob must have been created"); 1156 __ jump(deopt_blob->unpack_with_reexecution(), relocInfo::runtime_call_type, Rtemp); 1157 } 1158 break; 1159 1160 default: 1161 { 1162 __ set_info("unimplemented entry", dont_gc_arguments); 1163 STOP("unimplemented entry"); 1164 } 1165 break; 1166 } 1167 return oop_maps; 1168 } 1169 1170 #undef __ 1171 1172 #ifdef __SOFTFP__ 1173 const char *Runtime1::pd_name_for_address(address entry) { 1174 1175 #define FUNCTION_CASE(a, f) \ 1176 if ((intptr_t)a == CAST_FROM_FN_PTR(intptr_t, f)) return #f 1177 1178 FUNCTION_CASE(entry, __aeabi_fadd_glibc); 1179 FUNCTION_CASE(entry, __aeabi_fmul); 1180 FUNCTION_CASE(entry, __aeabi_fsub_glibc); 1181 FUNCTION_CASE(entry, __aeabi_fdiv); 1182 1183 // __aeabi_XXXX_glibc: Imported code from glibc soft-fp bundle for calculation accuracy improvement. See CR 6757269. 1184 FUNCTION_CASE(entry, __aeabi_dadd_glibc); 1185 FUNCTION_CASE(entry, __aeabi_dmul); 1186 FUNCTION_CASE(entry, __aeabi_dsub_glibc); 1187 FUNCTION_CASE(entry, __aeabi_ddiv); 1188 1189 FUNCTION_CASE(entry, __aeabi_f2d); 1190 FUNCTION_CASE(entry, __aeabi_d2f); 1191 FUNCTION_CASE(entry, __aeabi_i2f); 1192 FUNCTION_CASE(entry, __aeabi_i2d); 1193 FUNCTION_CASE(entry, __aeabi_f2iz); 1194 1195 FUNCTION_CASE(entry, SharedRuntime::fcmpl); 1196 FUNCTION_CASE(entry, SharedRuntime::fcmpg); 1197 FUNCTION_CASE(entry, SharedRuntime::dcmpl); 1198 FUNCTION_CASE(entry, SharedRuntime::dcmpg); 1199 1200 FUNCTION_CASE(entry, SharedRuntime::unordered_fcmplt); 1201 FUNCTION_CASE(entry, SharedRuntime::unordered_dcmplt); 1202 FUNCTION_CASE(entry, SharedRuntime::unordered_fcmple); 1203 FUNCTION_CASE(entry, SharedRuntime::unordered_dcmple); 1204 FUNCTION_CASE(entry, SharedRuntime::unordered_fcmpge); 1205 FUNCTION_CASE(entry, SharedRuntime::unordered_dcmpge); 1206 FUNCTION_CASE(entry, SharedRuntime::unordered_fcmpgt); 1207 FUNCTION_CASE(entry, SharedRuntime::unordered_dcmpgt); 1208 1209 FUNCTION_CASE(entry, SharedRuntime::fneg); 1210 FUNCTION_CASE(entry, SharedRuntime::dneg); 1211 1212 FUNCTION_CASE(entry, __aeabi_fcmpeq); 1213 FUNCTION_CASE(entry, __aeabi_fcmplt); 1214 FUNCTION_CASE(entry, __aeabi_fcmple); 1215 FUNCTION_CASE(entry, __aeabi_fcmpge); 1216 FUNCTION_CASE(entry, __aeabi_fcmpgt); 1217 1218 FUNCTION_CASE(entry, __aeabi_dcmpeq); 1219 FUNCTION_CASE(entry, __aeabi_dcmplt); 1220 FUNCTION_CASE(entry, __aeabi_dcmple); 1221 FUNCTION_CASE(entry, __aeabi_dcmpge); 1222 FUNCTION_CASE(entry, __aeabi_dcmpgt); 1223 #undef FUNCTION_CASE 1224 return ""; 1225 } 1226 #else // __SOFTFP__ 1227 const char *Runtime1::pd_name_for_address(address entry) { 1228 return "<unknown function>"; 1229 } 1230 #endif // __SOFTFP__