1 /* 2 * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2014, 2108, Red Hat Inc. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #ifndef CPU_AARCH64_NATIVEINST_AARCH64_HPP 27 #define CPU_AARCH64_NATIVEINST_AARCH64_HPP 28 29 #include "asm/assembler.hpp" 30 #include "runtime/icache.hpp" 31 #include "runtime/os.hpp" 32 33 // We have interfaces for the following instructions: 34 // - NativeInstruction 35 // - - NativeCall 36 // - - NativeMovConstReg 37 // - - NativeMovConstRegPatching 38 // - - NativeMovRegMem 39 // - - NativeMovRegMemPatching 40 // - - NativeJump 41 // - - NativeIllegalOpCode 42 // - - NativeGeneralJump 43 // - - NativeReturn 44 // - - NativeReturnX (return with argument) 45 // - - NativePushConst 46 // - - NativeTstRegMem 47 48 // The base class for different kinds of native instruction abstractions. 49 // Provides the primitive operations to manipulate code relative to this. 50 51 class NativeCall; 52 53 class NativeInstruction { 54 friend class Relocation; 55 friend bool is_NativeCallTrampolineStub_at(address); 56 public: 57 enum { 58 instruction_size = 4 59 }; 60 61 juint encoding() const { 62 return uint_at(0); 63 } 64 65 bool is_blr() const { return (encoding() & 0xff9ffc1f) == 0xd61f0000; } // blr(register) or br(register) 66 bool is_adr_aligned() const { return (encoding() & 0xff000000) == 0x10000000; } // adr Xn, <label>, where label is aligned to 4 bytes (address of instruction). 67 68 inline bool is_nop(); 69 inline bool is_illegal(); 70 inline bool is_return(); 71 bool is_jump(); 72 bool is_general_jump(); 73 inline bool is_jump_or_nop(); 74 inline bool is_cond_jump(); 75 bool is_safepoint_poll(); 76 bool is_movz(); 77 bool is_movk(); 78 bool is_sigill_zombie_not_entrant(); 79 80 protected: 81 address addr_at(int offset) const { return address(this) + offset; } 82 83 s_char sbyte_at(int offset) const { return *(s_char*) addr_at(offset); } 84 u_char ubyte_at(int offset) const { return *(u_char*) addr_at(offset); } 85 86 jint int_at(int offset) const { return *(jint*) addr_at(offset); } 87 juint uint_at(int offset) const { return *(juint*) addr_at(offset); } 88 89 address ptr_at(int offset) const { return *(address*) addr_at(offset); } 90 91 oop oop_at (int offset) const { return *(oop*) addr_at(offset); } 92 93 94 void set_char_at(int offset, char c) { *addr_at(offset) = (u_char)c; } 95 void set_int_at(int offset, jint i) { *(jint*)addr_at(offset) = i; } 96 void set_uint_at(int offset, jint i) { *(juint*)addr_at(offset) = i; } 97 void set_ptr_at (int offset, address ptr) { *(address*) addr_at(offset) = ptr; } 98 void set_oop_at (int offset, oop o) { *(oop*) addr_at(offset) = o; } 99 100 void wrote(int offset); 101 102 public: 103 104 // unit test stuff 105 static void test() {} // override for testing 106 107 inline friend NativeInstruction* nativeInstruction_at(address address); 108 109 static bool is_adrp_at(address instr); 110 111 static bool is_ldr_literal_at(address instr); 112 113 bool is_ldr_literal() { 114 return is_ldr_literal_at(addr_at(0)); 115 } 116 117 static bool is_ldrw_to_zr(address instr); 118 119 static bool is_call_at(address instr) { 120 const uint32_t insn = (*(uint32_t*)instr); 121 return (insn >> 26) == 0b100101; 122 } 123 124 bool is_call() { 125 return is_call_at(addr_at(0)); 126 } 127 128 static bool maybe_cpool_ref(address instr) { 129 return is_adrp_at(instr) || is_ldr_literal_at(instr); 130 } 131 132 bool is_Membar() { 133 unsigned int insn = uint_at(0); 134 return Instruction_aarch64::extract(insn, 31, 12) == 0b11010101000000110011 && 135 Instruction_aarch64::extract(insn, 7, 0) == 0b10111111; 136 } 137 138 bool is_Imm_LdSt() { 139 unsigned int insn = uint_at(0); 140 return Instruction_aarch64::extract(insn, 29, 27) == 0b111 && 141 Instruction_aarch64::extract(insn, 23, 23) == 0b0 && 142 Instruction_aarch64::extract(insn, 26, 25) == 0b00; 143 } 144 }; 145 146 inline NativeInstruction* nativeInstruction_at(address address) { 147 return (NativeInstruction*)address; 148 } 149 150 // The natural type of an AArch64 instruction is uint32_t 151 inline NativeInstruction* nativeInstruction_at(uint32_t *address) { 152 return (NativeInstruction*)address; 153 } 154 155 class NativePltCall: public NativeInstruction { 156 public: 157 enum Arm_specific_constants { 158 instruction_size = 4, 159 instruction_offset = 0, 160 displacement_offset = 1, 161 return_address_offset = 4 162 }; 163 address instruction_address() const { return addr_at(instruction_offset); } 164 address next_instruction_address() const { return addr_at(return_address_offset); } 165 address displacement_address() const { return addr_at(displacement_offset); } 166 int displacement() const { return (jint) int_at(displacement_offset); } 167 address return_address() const { return addr_at(return_address_offset); } 168 address destination() const; 169 address plt_entry() const; 170 address plt_jump() const; 171 address plt_load_got() const; 172 address plt_resolve_call() const; 173 address plt_c2i_stub() const; 174 void set_stub_to_clean(); 175 176 void reset_to_plt_resolve_call(); 177 void set_destination_mt_safe(address dest); 178 179 void verify() const; 180 }; 181 182 inline NativePltCall* nativePltCall_at(address address) { 183 NativePltCall* call = (NativePltCall*) address; 184 #ifdef ASSERT 185 call->verify(); 186 #endif 187 return call; 188 } 189 190 inline NativePltCall* nativePltCall_before(address addr) { 191 address at = addr - NativePltCall::instruction_size; 192 return nativePltCall_at(at); 193 } 194 195 inline NativeCall* nativeCall_at(address address); 196 // The NativeCall is an abstraction for accessing/manipulating native 197 // call instructions (used to manipulate inline caches, primitive & 198 // DSO calls, etc.). 199 200 class NativeCall: public NativeInstruction { 201 public: 202 enum Aarch64_specific_constants { 203 instruction_size = 4, 204 instruction_offset = 0, 205 displacement_offset = 0, 206 return_address_offset = 4 207 }; 208 209 address instruction_address() const { return addr_at(instruction_offset); } 210 address next_instruction_address() const { return addr_at(return_address_offset); } 211 int displacement() const { return (int_at(displacement_offset) << 6) >> 4; } 212 address displacement_address() const { return addr_at(displacement_offset); } 213 address return_address() const { return addr_at(return_address_offset); } 214 address destination() const; 215 216 void set_destination(address dest) { 217 int offset = dest - instruction_address(); 218 unsigned int insn = 0b100101 << 26; 219 assert((offset & 3) == 0, "should be"); 220 offset >>= 2; 221 offset &= (1 << 26) - 1; // mask off insn part 222 insn |= offset; 223 set_int_at(displacement_offset, insn); 224 } 225 226 void verify_alignment() { ; } 227 void verify(); 228 void print(); 229 230 // Creation 231 inline friend NativeCall* nativeCall_at(address address); 232 inline friend NativeCall* nativeCall_before(address return_address); 233 234 static bool is_call_before(address return_address) { 235 return is_call_at(return_address - NativeCall::return_address_offset); 236 } 237 238 #if INCLUDE_AOT 239 // Return true iff a call from instr to target is out of range. 240 // Used for calls from JIT- to AOT-compiled code. 241 static bool is_far_call(address instr, address target) { 242 // On AArch64 we use trampolines which can reach anywhere in the 243 // address space, so calls are never out of range. 244 return false; 245 } 246 #endif 247 248 // MT-safe patching of a call instruction. 249 static void insert(address code_pos, address entry); 250 251 static void replace_mt_safe(address instr_addr, address code_buffer); 252 253 // Similar to replace_mt_safe, but just changes the destination. The 254 // important thing is that free-running threads are able to execute 255 // this call instruction at all times. If the call is an immediate BL 256 // instruction we can simply rely on atomicity of 32-bit writes to 257 // make sure other threads will see no intermediate states. 258 259 // We cannot rely on locks here, since the free-running threads must run at 260 // full speed. 261 // 262 // Used in the runtime linkage of calls; see class CompiledIC. 263 // (Cf. 4506997 and 4479829, where threads witnessed garbage displacements.) 264 265 // The parameter assert_lock disables the assertion during code generation. 266 void set_destination_mt_safe(address dest, bool assert_lock = true); 267 268 address get_trampoline(); 269 address trampoline_jump(CodeBuffer &cbuf, address dest); 270 }; 271 272 inline NativeCall* nativeCall_at(address address) { 273 NativeCall* call = (NativeCall*)(address - NativeCall::instruction_offset); 274 #ifdef ASSERT 275 call->verify(); 276 #endif 277 return call; 278 } 279 280 inline NativeCall* nativeCall_before(address return_address) { 281 NativeCall* call = (NativeCall*)(return_address - NativeCall::return_address_offset); 282 #ifdef ASSERT 283 call->verify(); 284 #endif 285 return call; 286 } 287 288 // An interface for accessing/manipulating native mov reg, imm instructions. 289 // (used to manipulate inlined 64-bit data calls, etc.) 290 class NativeMovConstReg: public NativeInstruction { 291 public: 292 enum Aarch64_specific_constants { 293 instruction_size = 3 * 4, // movz, movk, movk. See movptr(). 294 instruction_offset = 0, 295 displacement_offset = 0, 296 }; 297 298 address instruction_address() const { return addr_at(instruction_offset); } 299 address next_instruction_address() const { 300 if (nativeInstruction_at(instruction_address())->is_movz()) 301 // Assume movz, movk, movk 302 return addr_at(instruction_size); 303 else if (is_adrp_at(instruction_address())) 304 return addr_at(2*4); 305 else if (is_ldr_literal_at(instruction_address())) 306 return(addr_at(4)); 307 assert(false, "Unknown instruction in NativeMovConstReg"); 308 return NULL; 309 } 310 311 intptr_t data() const; 312 void set_data(intptr_t x); 313 314 void flush() { 315 if (! maybe_cpool_ref(instruction_address())) { 316 ICache::invalidate_range(instruction_address(), instruction_size); 317 } 318 } 319 320 void verify(); 321 void print(); 322 323 // unit test stuff 324 static void test() {} 325 326 // Creation 327 inline friend NativeMovConstReg* nativeMovConstReg_at(address address); 328 inline friend NativeMovConstReg* nativeMovConstReg_before(address address); 329 }; 330 331 inline NativeMovConstReg* nativeMovConstReg_at(address address) { 332 NativeMovConstReg* test = (NativeMovConstReg*)(address - NativeMovConstReg::instruction_offset); 333 #ifdef ASSERT 334 test->verify(); 335 #endif 336 return test; 337 } 338 339 inline NativeMovConstReg* nativeMovConstReg_before(address address) { 340 NativeMovConstReg* test = (NativeMovConstReg*)(address - NativeMovConstReg::instruction_size - NativeMovConstReg::instruction_offset); 341 #ifdef ASSERT 342 test->verify(); 343 #endif 344 return test; 345 } 346 347 class NativeMovConstRegPatching: public NativeMovConstReg { 348 private: 349 friend NativeMovConstRegPatching* nativeMovConstRegPatching_at(address address) { 350 NativeMovConstRegPatching* test = (NativeMovConstRegPatching*)(address - instruction_offset); 351 #ifdef ASSERT 352 test->verify(); 353 #endif 354 return test; 355 } 356 }; 357 358 // An interface for accessing/manipulating native moves of the form: 359 // mov[b/w/l/q] [reg + offset], reg (instruction_code_reg2mem) 360 // mov[b/w/l/q] reg, [reg+offset] (instruction_code_mem2reg 361 // mov[s/z]x[w/b/q] [reg + offset], reg 362 // fld_s [reg+offset] 363 // fld_d [reg+offset] 364 // fstp_s [reg + offset] 365 // fstp_d [reg + offset] 366 // mov_literal64 scratch,<pointer> ; mov[b/w/l/q] 0(scratch),reg | mov[b/w/l/q] reg,0(scratch) 367 // 368 // Warning: These routines must be able to handle any instruction sequences 369 // that are generated as a result of the load/store byte,word,long 370 // macros. For example: The load_unsigned_byte instruction generates 371 // an xor reg,reg inst prior to generating the movb instruction. This 372 // class must skip the xor instruction. 373 374 class NativeMovRegMem: public NativeInstruction { 375 enum AArch64_specific_constants { 376 instruction_size = 4, 377 instruction_offset = 0, 378 data_offset = 0, 379 next_instruction_offset = 4 380 }; 381 382 public: 383 // helper 384 int instruction_start() const { return instruction_offset; } 385 386 address instruction_address() const { return addr_at(instruction_offset); } 387 388 int num_bytes_to_end_of_patch() const { return instruction_offset + instruction_size; } 389 390 int offset() const; 391 392 void set_offset(int x); 393 394 void add_offset_in_bytes(int add_offset) { set_offset ( ( offset() + add_offset ) ); } 395 396 void verify(); 397 void print (); 398 399 // unit test stuff 400 static void test() {} 401 402 private: 403 inline friend NativeMovRegMem* nativeMovRegMem_at (address address); 404 }; 405 406 inline NativeMovRegMem* nativeMovRegMem_at (address address) { 407 NativeMovRegMem* test = (NativeMovRegMem*)(address - NativeMovRegMem::instruction_offset); 408 #ifdef ASSERT 409 test->verify(); 410 #endif 411 return test; 412 } 413 414 class NativeMovRegMemPatching: public NativeMovRegMem { 415 private: 416 friend NativeMovRegMemPatching* nativeMovRegMemPatching_at (address address) {Unimplemented(); return 0; } 417 }; 418 419 // An interface for accessing/manipulating native leal instruction of form: 420 // leal reg, [reg + offset] 421 422 class NativeLoadAddress: public NativeInstruction { 423 enum AArch64_specific_constants { 424 instruction_size = 4, 425 instruction_offset = 0, 426 data_offset = 0, 427 next_instruction_offset = 4 428 }; 429 430 public: 431 void verify(); 432 void print (); 433 434 // unit test stuff 435 static void test() {} 436 }; 437 438 // adrp x16, #page 439 // add x16, x16, #offset 440 // ldr x16, [x16] 441 class NativeLoadGot: public NativeInstruction { 442 public: 443 enum AArch64_specific_constants { 444 instruction_length = 4 * NativeInstruction::instruction_size, 445 offset_offset = 0, 446 }; 447 448 address instruction_address() const { return addr_at(0); } 449 address return_address() const { return addr_at(instruction_length); } 450 address got_address() const; 451 address next_instruction_address() const { return return_address(); } 452 intptr_t data() const; 453 void set_data(intptr_t data) { 454 intptr_t *addr = (intptr_t *) got_address(); 455 *addr = data; 456 } 457 458 void verify() const; 459 private: 460 void report_and_fail() const; 461 }; 462 463 inline NativeLoadGot* nativeLoadGot_at(address addr) { 464 NativeLoadGot* load = (NativeLoadGot*) addr; 465 #ifdef ASSERT 466 load->verify(); 467 #endif 468 return load; 469 } 470 471 class NativeJump: public NativeInstruction { 472 public: 473 enum AArch64_specific_constants { 474 instruction_size = 4, 475 instruction_offset = 0, 476 data_offset = 0, 477 next_instruction_offset = 4 478 }; 479 480 address instruction_address() const { return addr_at(instruction_offset); } 481 address next_instruction_address() const { return addr_at(instruction_size); } 482 address jump_destination() const; 483 void set_jump_destination(address dest); 484 485 // Creation 486 inline friend NativeJump* nativeJump_at(address address); 487 488 void verify(); 489 490 // Unit testing stuff 491 static void test() {} 492 493 // Insertion of native jump instruction 494 static void insert(address code_pos, address entry); 495 // MT-safe insertion of native jump at verified method entry 496 static void check_verified_entry_alignment(address entry, address verified_entry); 497 static void patch_verified_entry(address entry, address verified_entry, address dest); 498 }; 499 500 inline NativeJump* nativeJump_at(address address) { 501 NativeJump* jump = (NativeJump*)(address - NativeJump::instruction_offset); 502 #ifdef ASSERT 503 jump->verify(); 504 #endif 505 return jump; 506 } 507 508 class NativeGeneralJump: public NativeJump { 509 public: 510 enum AArch64_specific_constants { 511 instruction_size = 4 * 4, 512 instruction_offset = 0, 513 data_offset = 0, 514 next_instruction_offset = 4 * 4 515 }; 516 517 address jump_destination() const; 518 void set_jump_destination(address dest); 519 520 static void insert_unconditional(address code_pos, address entry); 521 static void replace_mt_safe(address instr_addr, address code_buffer); 522 static void verify(); 523 }; 524 525 inline NativeGeneralJump* nativeGeneralJump_at(address address) { 526 NativeGeneralJump* jump = (NativeGeneralJump*)(address); 527 debug_only(jump->verify();) 528 return jump; 529 } 530 531 class NativeGotJump: public NativeInstruction { 532 public: 533 enum AArch64_specific_constants { 534 instruction_size = 4 * NativeInstruction::instruction_size, 535 }; 536 537 void verify() const; 538 address instruction_address() const { return addr_at(0); } 539 address destination() const; 540 address return_address() const { return addr_at(instruction_size); } 541 address got_address() const; 542 address next_instruction_address() const { return addr_at(instruction_size); } 543 bool is_GotJump() const; 544 545 void set_jump_destination(address dest) { 546 address* got = (address *)got_address(); 547 *got = dest; 548 } 549 }; 550 551 inline NativeGotJump* nativeGotJump_at(address addr) { 552 NativeGotJump* jump = (NativeGotJump*)(addr); 553 return jump; 554 } 555 556 class NativePopReg : public NativeInstruction { 557 public: 558 // Insert a pop instruction 559 static void insert(address code_pos, Register reg); 560 }; 561 562 563 class NativeIllegalInstruction: public NativeInstruction { 564 public: 565 // Insert illegal opcode as specific address 566 static void insert(address code_pos); 567 }; 568 569 // return instruction that does not pop values of the stack 570 class NativeReturn: public NativeInstruction { 571 public: 572 }; 573 574 // return instruction that does pop values of the stack 575 class NativeReturnX: public NativeInstruction { 576 public: 577 }; 578 579 // Simple test vs memory 580 class NativeTstRegMem: public NativeInstruction { 581 public: 582 }; 583 584 inline bool NativeInstruction::is_nop() { 585 uint32_t insn = *(uint32_t*)addr_at(0); 586 return insn == 0xd503201f; 587 } 588 589 inline bool NativeInstruction::is_jump() { 590 uint32_t insn = *(uint32_t*)addr_at(0); 591 592 if (Instruction_aarch64::extract(insn, 30, 26) == 0b00101) { 593 // Unconditional branch (immediate) 594 return true; 595 } else if (Instruction_aarch64::extract(insn, 31, 25) == 0b0101010) { 596 // Conditional branch (immediate) 597 return true; 598 } else if (Instruction_aarch64::extract(insn, 30, 25) == 0b011010) { 599 // Compare & branch (immediate) 600 return true; 601 } else if (Instruction_aarch64::extract(insn, 30, 25) == 0b011011) { 602 // Test & branch (immediate) 603 return true; 604 } else 605 return false; 606 } 607 608 inline bool NativeInstruction::is_jump_or_nop() { 609 return is_nop() || is_jump(); 610 } 611 612 // Call trampoline stubs. 613 class NativeCallTrampolineStub : public NativeInstruction { 614 public: 615 616 enum AArch64_specific_constants { 617 instruction_size = 4 * 4, 618 instruction_offset = 0, 619 data_offset = 2 * 4, 620 next_instruction_offset = 4 * 4 621 }; 622 623 address destination(nmethod *nm = NULL) const; 624 void set_destination(address new_destination); 625 ptrdiff_t destination_offset() const; 626 }; 627 628 inline bool is_NativeCallTrampolineStub_at(address addr) { 629 // Ensure that the stub is exactly 630 // ldr xscratch1, L 631 // br xscratch1 632 // L: 633 uint32_t *i = (uint32_t *)addr; 634 return i[0] == 0x58000048 && i[1] == 0xd61f0100; 635 } 636 637 inline NativeCallTrampolineStub* nativeCallTrampolineStub_at(address addr) { 638 assert(is_NativeCallTrampolineStub_at(addr), "no call trampoline found"); 639 return (NativeCallTrampolineStub*)addr; 640 } 641 642 class NativeMembar : public NativeInstruction { 643 public: 644 unsigned int get_kind() { return Instruction_aarch64::extract(uint_at(0), 11, 8); } 645 void set_kind(int order_kind) { Instruction_aarch64::patch(addr_at(0), 11, 8, order_kind); } 646 }; 647 648 inline NativeMembar *NativeMembar_at(address addr) { 649 assert(nativeInstruction_at(addr)->is_Membar(), "no membar found"); 650 return (NativeMembar*)addr; 651 } 652 653 class NativeLdSt : public NativeInstruction { 654 private: 655 int32_t size() { return Instruction_aarch64::extract(uint_at(0), 31, 30); } 656 // Check whether instruction is with unscaled offset. 657 bool is_ldst_ur() { 658 return (Instruction_aarch64::extract(uint_at(0), 29, 21) == 0b111000010 || 659 Instruction_aarch64::extract(uint_at(0), 29, 21) == 0b111000000) && 660 Instruction_aarch64::extract(uint_at(0), 11, 10) == 0b00; 661 } 662 bool is_ldst_unsigned_offset() { 663 return Instruction_aarch64::extract(uint_at(0), 29, 22) == 0b11100101 || 664 Instruction_aarch64::extract(uint_at(0), 29, 22) == 0b11100100; 665 } 666 public: 667 Register target() { 668 uint32_t r = Instruction_aarch64::extract(uint_at(0), 4, 0); 669 return r == 0x1f ? zr : as_Register(r); 670 } 671 Register base() { 672 uint32_t b = Instruction_aarch64::extract(uint_at(0), 9, 5); 673 return b == 0x1f ? sp : as_Register(b); 674 } 675 int64_t offset() { 676 if (is_ldst_ur()) { 677 return Instruction_aarch64::sextract(uint_at(0), 20, 12); 678 } else if (is_ldst_unsigned_offset()) { 679 return Instruction_aarch64::extract(uint_at(0), 21, 10) << size(); 680 } else { 681 // others like: pre-index or post-index. 682 ShouldNotReachHere(); 683 return 0; 684 } 685 } 686 size_t size_in_bytes() { return 1 << size(); } 687 bool is_not_pre_post_index() { return (is_ldst_ur() || is_ldst_unsigned_offset()); } 688 bool is_load() { 689 assert(Instruction_aarch64::extract(uint_at(0), 23, 22) == 0b01 || 690 Instruction_aarch64::extract(uint_at(0), 23, 22) == 0b00, "must be ldr or str"); 691 692 return Instruction_aarch64::extract(uint_at(0), 23, 22) == 0b01; 693 } 694 bool is_store() { 695 assert(Instruction_aarch64::extract(uint_at(0), 23, 22) == 0b01 || 696 Instruction_aarch64::extract(uint_at(0), 23, 22) == 0b00, "must be ldr or str"); 697 698 return Instruction_aarch64::extract(uint_at(0), 23, 22) == 0b00; 699 } 700 }; 701 702 inline NativeLdSt *NativeLdSt_at(address addr) { 703 assert(nativeInstruction_at(addr)->is_Imm_LdSt(), "no immediate load/store found"); 704 return (NativeLdSt*)addr; 705 } 706 #endif // CPU_AARCH64_NATIVEINST_AARCH64_HPP