1 /* 2 * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2014, 2108, Red Hat Inc. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #ifndef CPU_AARCH64_VM_NATIVEINST_AARCH64_HPP 27 #define CPU_AARCH64_VM_NATIVEINST_AARCH64_HPP 28 29 #include "asm/assembler.hpp" 30 #include "runtime/icache.hpp" 31 #include "runtime/os.hpp" 32 33 // We have interfaces for the following instructions: 34 // - NativeInstruction 35 // - - NativeCall 36 // - - NativeMovConstReg 37 // - - NativeMovConstRegPatching 38 // - - NativeMovRegMem 39 // - - NativeMovRegMemPatching 40 // - - NativeJump 41 // - - NativeIllegalOpCode 42 // - - NativeGeneralJump 43 // - - NativeReturn 44 // - - NativeReturnX (return with argument) 45 // - - NativePushConst 46 // - - NativeTstRegMem 47 48 // The base class for different kinds of native instruction abstractions. 49 // Provides the primitive operations to manipulate code relative to this. 50 51 class NativeInstruction { 52 friend class Relocation; 53 friend bool is_NativeCallTrampolineStub_at(address); 54 public: 55 enum { 56 instruction_size = 4 57 }; 58 59 juint encoding() const { 60 return uint_at(0); 61 } 62 63 bool is_blr() const { return (encoding() & 0xff9ffc1f) == 0xd61f0000; } // blr(register) or br(register) 64 bool is_adr_aligned() const { return (encoding() & 0xff000000) == 0x10000000; } // adr Xn, <label>, where label is aligned to 4 bytes (address of instruction). 65 66 inline bool is_nop(); 67 inline bool is_illegal(); 68 inline bool is_return(); 69 bool is_jump(); 70 bool is_general_jump(); 71 inline bool is_jump_or_nop(); 72 inline bool is_cond_jump(); 73 bool is_safepoint_poll(); 74 bool is_movz(); 75 bool is_movk(); 76 bool is_sigill_zombie_not_entrant(); 77 78 protected: 79 address addr_at(int offset) const { return address(this) + offset; } 80 81 s_char sbyte_at(int offset) const { return *(s_char*) addr_at(offset); } 82 u_char ubyte_at(int offset) const { return *(u_char*) addr_at(offset); } 83 84 jint int_at(int offset) const { return *(jint*) addr_at(offset); } 85 juint uint_at(int offset) const { return *(juint*) addr_at(offset); } 86 87 address ptr_at(int offset) const { return *(address*) addr_at(offset); } 88 89 oop oop_at (int offset) const { return *(oop*) addr_at(offset); } 90 91 92 void set_char_at(int offset, char c) { *addr_at(offset) = (u_char)c; } 93 void set_int_at(int offset, jint i) { *(jint*)addr_at(offset) = i; } 94 void set_uint_at(int offset, jint i) { *(juint*)addr_at(offset) = i; } 95 void set_ptr_at (int offset, address ptr) { *(address*) addr_at(offset) = ptr; } 96 void set_oop_at (int offset, oop o) { *(oop*) addr_at(offset) = o; } 97 98 void wrote(int offset); 99 100 public: 101 102 // unit test stuff 103 static void test() {} // override for testing 104 105 inline friend NativeInstruction* nativeInstruction_at(address address); 106 107 static bool is_adrp_at(address instr); 108 109 static bool is_ldr_literal_at(address instr); 110 111 bool is_ldr_literal() { 112 return is_ldr_literal_at(addr_at(0)); 113 } 114 115 static bool is_ldrw_to_zr(address instr); 116 117 static bool is_call_at(address instr) { 118 const uint32_t insn = (*(uint32_t*)instr); 119 return (insn >> 26) == 0b100101; 120 } 121 122 bool is_call() { 123 return is_call_at(addr_at(0)); 124 } 125 126 static bool maybe_cpool_ref(address instr) { 127 return is_adrp_at(instr) || is_ldr_literal_at(instr); 128 } 129 130 bool is_Membar() { 131 unsigned int insn = uint_at(0); 132 return Instruction_aarch64::extract(insn, 31, 12) == 0b11010101000000110011 && 133 Instruction_aarch64::extract(insn, 7, 0) == 0b10111111; 134 } 135 136 bool is_Imm_LdSt() { 137 unsigned int insn = uint_at(0); 138 return Instruction_aarch64::extract(insn, 29, 27) == 0b111 && 139 Instruction_aarch64::extract(insn, 23, 23) == 0b0 && 140 Instruction_aarch64::extract(insn, 26, 25) == 0b00; 141 } 142 }; 143 144 inline NativeInstruction* nativeInstruction_at(address address) { 145 return (NativeInstruction*)address; 146 } 147 148 // The natural type of an AArch64 instruction is uint32_t 149 inline NativeInstruction* nativeInstruction_at(uint32_t *address) { 150 return (NativeInstruction*)address; 151 } 152 153 class NativePltCall: public NativeInstruction { 154 public: 155 enum Arm_specific_constants { 156 instruction_size = 4, 157 instruction_offset = 0, 158 displacement_offset = 1, 159 return_address_offset = 4 160 }; 161 address instruction_address() const { return addr_at(instruction_offset); } 162 address next_instruction_address() const { return addr_at(return_address_offset); } 163 address displacement_address() const { return addr_at(displacement_offset); } 164 int displacement() const { return (jint) int_at(displacement_offset); } 165 address return_address() const { return addr_at(return_address_offset); } 166 address destination() const; 167 address plt_entry() const; 168 address plt_jump() const; 169 address plt_load_got() const; 170 address plt_resolve_call() const; 171 address plt_c2i_stub() const; 172 void set_stub_to_clean(); 173 174 void reset_to_plt_resolve_call(); 175 void set_destination_mt_safe(address dest); 176 177 void verify() const; 178 }; 179 180 inline NativePltCall* nativePltCall_at(address address) { 181 NativePltCall* call = (NativePltCall*) address; 182 #ifdef ASSERT 183 call->verify(); 184 #endif 185 return call; 186 } 187 188 inline NativePltCall* nativePltCall_before(address addr) { 189 address at = addr - NativePltCall::instruction_size; 190 return nativePltCall_at(at); 191 } 192 193 inline NativeCall* nativeCall_at(address address); 194 // The NativeCall is an abstraction for accessing/manipulating native 195 // call instructions (used to manipulate inline caches, primitive & 196 // DSO calls, etc.). 197 198 class NativeCall: public NativeInstruction { 199 public: 200 enum Aarch64_specific_constants { 201 instruction_size = 4, 202 instruction_offset = 0, 203 displacement_offset = 0, 204 return_address_offset = 4 205 }; 206 207 address instruction_address() const { return addr_at(instruction_offset); } 208 address next_instruction_address() const { return addr_at(return_address_offset); } 209 int displacement() const { return (int_at(displacement_offset) << 6) >> 4; } 210 address displacement_address() const { return addr_at(displacement_offset); } 211 address return_address() const { return addr_at(return_address_offset); } 212 address destination() const; 213 214 void set_destination(address dest) { 215 int offset = dest - instruction_address(); 216 unsigned int insn = 0b100101 << 26; 217 assert((offset & 3) == 0, "should be"); 218 offset >>= 2; 219 offset &= (1 << 26) - 1; // mask off insn part 220 insn |= offset; 221 set_int_at(displacement_offset, insn); 222 } 223 224 void verify_alignment() { ; } 225 void verify(); 226 void print(); 227 228 // Creation 229 inline friend NativeCall* nativeCall_at(address address); 230 inline friend NativeCall* nativeCall_before(address return_address); 231 232 static bool is_call_before(address return_address) { 233 return is_call_at(return_address - NativeCall::return_address_offset); 234 } 235 236 #if INCLUDE_AOT 237 static bool is_far_call(address instr, address target) { 238 return !Assembler::reachable_from_branch_at(instr, target); 239 } 240 #endif 241 242 // MT-safe patching of a call instruction. 243 static void insert(address code_pos, address entry); 244 245 static void replace_mt_safe(address instr_addr, address code_buffer); 246 247 // Similar to replace_mt_safe, but just changes the destination. The 248 // important thing is that free-running threads are able to execute 249 // this call instruction at all times. If the call is an immediate BL 250 // instruction we can simply rely on atomicity of 32-bit writes to 251 // make sure other threads will see no intermediate states. 252 253 // We cannot rely on locks here, since the free-running threads must run at 254 // full speed. 255 // 256 // Used in the runtime linkage of calls; see class CompiledIC. 257 // (Cf. 4506997 and 4479829, where threads witnessed garbage displacements.) 258 259 // The parameter assert_lock disables the assertion during code generation. 260 void set_destination_mt_safe(address dest, bool assert_lock = true); 261 262 address get_trampoline(); 263 address trampoline_jump(CodeBuffer &cbuf, address dest); 264 }; 265 266 inline NativeCall* nativeCall_at(address address) { 267 NativeCall* call = (NativeCall*)(address - NativeCall::instruction_offset); 268 #ifdef ASSERT 269 call->verify(); 270 #endif 271 return call; 272 } 273 274 inline NativeCall* nativeCall_before(address return_address) { 275 NativeCall* call = (NativeCall*)(return_address - NativeCall::return_address_offset); 276 #ifdef ASSERT 277 call->verify(); 278 #endif 279 return call; 280 } 281 282 // An interface for accessing/manipulating native mov reg, imm instructions. 283 // (used to manipulate inlined 64-bit data calls, etc.) 284 class NativeMovConstReg: public NativeInstruction { 285 public: 286 enum Aarch64_specific_constants { 287 instruction_size = 3 * 4, // movz, movk, movk. See movptr(). 288 instruction_offset = 0, 289 displacement_offset = 0, 290 }; 291 292 address instruction_address() const { return addr_at(instruction_offset); } 293 address next_instruction_address() const { 294 if (nativeInstruction_at(instruction_address())->is_movz()) 295 // Assume movz, movk, movk 296 return addr_at(instruction_size); 297 else if (is_adrp_at(instruction_address())) 298 return addr_at(2*4); 299 else if (is_ldr_literal_at(instruction_address())) 300 return(addr_at(4)); 301 assert(false, "Unknown instruction in NativeMovConstReg"); 302 return NULL; 303 } 304 305 intptr_t data() const; 306 void set_data(intptr_t x); 307 308 void flush() { 309 if (! maybe_cpool_ref(instruction_address())) { 310 ICache::invalidate_range(instruction_address(), instruction_size); 311 } 312 } 313 314 void verify(); 315 void print(); 316 317 // unit test stuff 318 static void test() {} 319 320 // Creation 321 inline friend NativeMovConstReg* nativeMovConstReg_at(address address); 322 inline friend NativeMovConstReg* nativeMovConstReg_before(address address); 323 }; 324 325 inline NativeMovConstReg* nativeMovConstReg_at(address address) { 326 NativeMovConstReg* test = (NativeMovConstReg*)(address - NativeMovConstReg::instruction_offset); 327 #ifdef ASSERT 328 test->verify(); 329 #endif 330 return test; 331 } 332 333 inline NativeMovConstReg* nativeMovConstReg_before(address address) { 334 NativeMovConstReg* test = (NativeMovConstReg*)(address - NativeMovConstReg::instruction_size - NativeMovConstReg::instruction_offset); 335 #ifdef ASSERT 336 test->verify(); 337 #endif 338 return test; 339 } 340 341 class NativeMovConstRegPatching: public NativeMovConstReg { 342 private: 343 friend NativeMovConstRegPatching* nativeMovConstRegPatching_at(address address) { 344 NativeMovConstRegPatching* test = (NativeMovConstRegPatching*)(address - instruction_offset); 345 #ifdef ASSERT 346 test->verify(); 347 #endif 348 return test; 349 } 350 }; 351 352 // An interface for accessing/manipulating native moves of the form: 353 // mov[b/w/l/q] [reg + offset], reg (instruction_code_reg2mem) 354 // mov[b/w/l/q] reg, [reg+offset] (instruction_code_mem2reg 355 // mov[s/z]x[w/b/q] [reg + offset], reg 356 // fld_s [reg+offset] 357 // fld_d [reg+offset] 358 // fstp_s [reg + offset] 359 // fstp_d [reg + offset] 360 // mov_literal64 scratch,<pointer> ; mov[b/w/l/q] 0(scratch),reg | mov[b/w/l/q] reg,0(scratch) 361 // 362 // Warning: These routines must be able to handle any instruction sequences 363 // that are generated as a result of the load/store byte,word,long 364 // macros. For example: The load_unsigned_byte instruction generates 365 // an xor reg,reg inst prior to generating the movb instruction. This 366 // class must skip the xor instruction. 367 368 class NativeMovRegMem: public NativeInstruction { 369 enum AArch64_specific_constants { 370 instruction_size = 4, 371 instruction_offset = 0, 372 data_offset = 0, 373 next_instruction_offset = 4 374 }; 375 376 public: 377 // helper 378 int instruction_start() const; 379 380 address instruction_address() const; 381 382 address next_instruction_address() const; 383 384 int offset() const; 385 386 void set_offset(int x); 387 388 void add_offset_in_bytes(int add_offset) { set_offset ( ( offset() + add_offset ) ); } 389 390 void verify(); 391 void print (); 392 393 // unit test stuff 394 static void test() {} 395 396 private: 397 inline friend NativeMovRegMem* nativeMovRegMem_at (address address); 398 }; 399 400 inline NativeMovRegMem* nativeMovRegMem_at (address address) { 401 NativeMovRegMem* test = (NativeMovRegMem*)(address - NativeMovRegMem::instruction_offset); 402 #ifdef ASSERT 403 test->verify(); 404 #endif 405 return test; 406 } 407 408 class NativeMovRegMemPatching: public NativeMovRegMem { 409 private: 410 friend NativeMovRegMemPatching* nativeMovRegMemPatching_at (address address) {Unimplemented(); return 0; } 411 }; 412 413 // An interface for accessing/manipulating native leal instruction of form: 414 // leal reg, [reg + offset] 415 416 class NativeLoadAddress: public NativeInstruction { 417 enum AArch64_specific_constants { 418 instruction_size = 4, 419 instruction_offset = 0, 420 data_offset = 0, 421 next_instruction_offset = 4 422 }; 423 424 public: 425 void verify(); 426 void print (); 427 428 // unit test stuff 429 static void test() {} 430 }; 431 432 // adrp x16, #page 433 // add x16, x16, #offset 434 // ldr x16, [x16] 435 class NativeLoadGot: public NativeInstruction { 436 public: 437 enum AArch64_specific_constants { 438 instruction_length = 4 * NativeInstruction::instruction_size, 439 offset_offset = 0, 440 }; 441 442 address instruction_address() const { return addr_at(0); } 443 address return_address() const { return addr_at(instruction_length); } 444 address got_address() const; 445 address next_instruction_address() const { return return_address(); } 446 intptr_t data() const; 447 void set_data(intptr_t data) { 448 intptr_t *addr = (intptr_t *) got_address(); 449 *addr = data; 450 } 451 452 void verify() const; 453 private: 454 void report_and_fail() const; 455 }; 456 457 inline NativeLoadGot* nativeLoadGot_at(address addr) { 458 NativeLoadGot* load = (NativeLoadGot*) addr; 459 #ifdef ASSERT 460 load->verify(); 461 #endif 462 return load; 463 } 464 465 class NativeJump: public NativeInstruction { 466 public: 467 enum AArch64_specific_constants { 468 instruction_size = 4, 469 instruction_offset = 0, 470 data_offset = 0, 471 next_instruction_offset = 4 472 }; 473 474 address instruction_address() const { return addr_at(instruction_offset); } 475 address next_instruction_address() const { return addr_at(instruction_size); } 476 address jump_destination() const; 477 void set_jump_destination(address dest); 478 479 // Creation 480 inline friend NativeJump* nativeJump_at(address address); 481 482 void verify(); 483 484 // Unit testing stuff 485 static void test() {} 486 487 // Insertion of native jump instruction 488 static void insert(address code_pos, address entry); 489 // MT-safe insertion of native jump at verified method entry 490 static void check_verified_entry_alignment(address entry, address verified_entry); 491 static void patch_verified_entry(address entry, address verified_entry, address dest); 492 }; 493 494 inline NativeJump* nativeJump_at(address address) { 495 NativeJump* jump = (NativeJump*)(address - NativeJump::instruction_offset); 496 #ifdef ASSERT 497 jump->verify(); 498 #endif 499 return jump; 500 } 501 502 class NativeGeneralJump: public NativeJump { 503 public: 504 enum AArch64_specific_constants { 505 instruction_size = 4 * 4, 506 instruction_offset = 0, 507 data_offset = 0, 508 next_instruction_offset = 4 * 4 509 }; 510 511 address jump_destination() const; 512 void set_jump_destination(address dest); 513 514 static void insert_unconditional(address code_pos, address entry); 515 static void replace_mt_safe(address instr_addr, address code_buffer); 516 static void verify(); 517 }; 518 519 inline NativeGeneralJump* nativeGeneralJump_at(address address) { 520 NativeGeneralJump* jump = (NativeGeneralJump*)(address); 521 debug_only(jump->verify();) 522 return jump; 523 } 524 525 class NativeGotJump: public NativeInstruction { 526 public: 527 enum AArch64_specific_constants { 528 instruction_size = 4 * NativeInstruction::instruction_size, 529 }; 530 531 void verify() const; 532 address instruction_address() const { return addr_at(0); } 533 address destination() const; 534 address return_address() const { return addr_at(instruction_size); } 535 address got_address() const; 536 address next_instruction_address() const { return addr_at(instruction_size); } 537 bool is_GotJump() const; 538 539 void set_jump_destination(address dest) { 540 address* got = (address *)got_address(); 541 *got = dest; 542 } 543 }; 544 545 inline NativeGotJump* nativeGotJump_at(address addr) { 546 NativeGotJump* jump = (NativeGotJump*)(addr); 547 return jump; 548 } 549 550 class NativePopReg : public NativeInstruction { 551 public: 552 // Insert a pop instruction 553 static void insert(address code_pos, Register reg); 554 }; 555 556 557 class NativeIllegalInstruction: public NativeInstruction { 558 public: 559 // Insert illegal opcode as specific address 560 static void insert(address code_pos); 561 }; 562 563 // return instruction that does not pop values of the stack 564 class NativeReturn: public NativeInstruction { 565 public: 566 }; 567 568 // return instruction that does pop values of the stack 569 class NativeReturnX: public NativeInstruction { 570 public: 571 }; 572 573 // Simple test vs memory 574 class NativeTstRegMem: public NativeInstruction { 575 public: 576 }; 577 578 inline bool NativeInstruction::is_nop() { 579 uint32_t insn = *(uint32_t*)addr_at(0); 580 return insn == 0xd503201f; 581 } 582 583 inline bool NativeInstruction::is_jump() { 584 uint32_t insn = *(uint32_t*)addr_at(0); 585 586 if (Instruction_aarch64::extract(insn, 30, 26) == 0b00101) { 587 // Unconditional branch (immediate) 588 return true; 589 } else if (Instruction_aarch64::extract(insn, 31, 25) == 0b0101010) { 590 // Conditional branch (immediate) 591 return true; 592 } else if (Instruction_aarch64::extract(insn, 30, 25) == 0b011010) { 593 // Compare & branch (immediate) 594 return true; 595 } else if (Instruction_aarch64::extract(insn, 30, 25) == 0b011011) { 596 // Test & branch (immediate) 597 return true; 598 } else 599 return false; 600 } 601 602 inline bool NativeInstruction::is_jump_or_nop() { 603 return is_nop() || is_jump(); 604 } 605 606 // Call trampoline stubs. 607 class NativeCallTrampolineStub : public NativeInstruction { 608 public: 609 610 enum AArch64_specific_constants { 611 instruction_size = 4 * 4, 612 instruction_offset = 0, 613 data_offset = 2 * 4, 614 next_instruction_offset = 4 * 4 615 }; 616 617 address destination(nmethod *nm = NULL) const; 618 void set_destination(address new_destination); 619 ptrdiff_t destination_offset() const; 620 }; 621 622 inline bool is_NativeCallTrampolineStub_at(address addr) { 623 // Ensure that the stub is exactly 624 // ldr xscratch1, L 625 // br xscratch1 626 // L: 627 uint32_t *i = (uint32_t *)addr; 628 return i[0] == 0x58000048 && i[1] == 0xd61f0100; 629 } 630 631 inline NativeCallTrampolineStub* nativeCallTrampolineStub_at(address addr) { 632 assert(is_NativeCallTrampolineStub_at(addr), "no call trampoline found"); 633 return (NativeCallTrampolineStub*)addr; 634 } 635 636 class NativeMembar : public NativeInstruction { 637 public: 638 unsigned int get_kind() { return Instruction_aarch64::extract(uint_at(0), 11, 8); } 639 void set_kind(int order_kind) { Instruction_aarch64::patch(addr_at(0), 11, 8, order_kind); } 640 }; 641 642 inline NativeMembar *NativeMembar_at(address addr) { 643 assert(nativeInstruction_at(addr)->is_Membar(), "no membar found"); 644 return (NativeMembar*)addr; 645 } 646 647 class NativeLdSt : public NativeInstruction { 648 private: 649 int32_t size() { return Instruction_aarch64::extract(uint_at(0), 31, 30); } 650 // Check whether instruction is with unscaled offset. 651 bool is_ldst_ur() { 652 return (Instruction_aarch64::extract(uint_at(0), 29, 21) == 0b111000010 || 653 Instruction_aarch64::extract(uint_at(0), 29, 21) == 0b111000000) && 654 Instruction_aarch64::extract(uint_at(0), 11, 10) == 0b00; 655 } 656 bool is_ldst_unsigned_offset() { 657 return Instruction_aarch64::extract(uint_at(0), 29, 22) == 0b11100101 || 658 Instruction_aarch64::extract(uint_at(0), 29, 22) == 0b11100100; 659 } 660 public: 661 Register target() { 662 uint32_t r = Instruction_aarch64::extract(uint_at(0), 4, 0); 663 return r == 0x1f ? zr : as_Register(r); 664 } 665 Register base() { 666 uint32_t b = Instruction_aarch64::extract(uint_at(0), 9, 5); 667 return b == 0x1f ? sp : as_Register(b); 668 } 669 int64_t offset() { 670 if (is_ldst_ur()) { 671 return Instruction_aarch64::sextract(uint_at(0), 20, 12); 672 } else if (is_ldst_unsigned_offset()) { 673 return Instruction_aarch64::extract(uint_at(0), 21, 10) << size(); 674 } else { 675 // others like: pre-index or post-index. 676 ShouldNotReachHere(); 677 return 0; 678 } 679 } 680 size_t size_in_bytes() { return 1 << size(); } 681 bool is_not_pre_post_index() { return (is_ldst_ur() || is_ldst_unsigned_offset()); } 682 bool is_load() { 683 assert(Instruction_aarch64::extract(uint_at(0), 23, 22) == 0b01 || 684 Instruction_aarch64::extract(uint_at(0), 23, 22) == 0b00, "must be ldr or str"); 685 686 return Instruction_aarch64::extract(uint_at(0), 23, 22) == 0b01; 687 } 688 bool is_store() { 689 assert(Instruction_aarch64::extract(uint_at(0), 23, 22) == 0b01 || 690 Instruction_aarch64::extract(uint_at(0), 23, 22) == 0b00, "must be ldr or str"); 691 692 return Instruction_aarch64::extract(uint_at(0), 23, 22) == 0b00; 693 } 694 }; 695 696 inline NativeLdSt *NativeLdSt_at(address addr) { 697 assert(nativeInstruction_at(addr)->is_Imm_LdSt(), "no immediate load/store found"); 698 return (NativeLdSt*)addr; 699 } 700 #endif // CPU_AARCH64_VM_NATIVEINST_AARCH64_HPP