1 /* 2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2014, Red Hat Inc. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #ifndef CPU_AARCH64_VM_NATIVEINST_AARCH64_HPP 27 #define CPU_AARCH64_VM_NATIVEINST_AARCH64_HPP 28 29 #include "asm/assembler.hpp" 30 #include "memory/allocation.hpp" 31 #include "runtime/icache.hpp" 32 #include "runtime/os.hpp" 33 34 // We have interfaces for the following instructions: 35 // - NativeInstruction 36 // - - NativeCall 37 // - - NativeMovConstReg 38 // - - NativeMovConstRegPatching 39 // - - NativeMovRegMem 40 // - - NativeMovRegMemPatching 41 // - - NativeJump 42 // - - NativeIllegalOpCode 43 // - - NativeGeneralJump 44 // - - NativeReturn 45 // - - NativeReturnX (return with argument) 46 // - - NativePushConst 47 // - - NativeTstRegMem 48 49 // The base class for different kinds of native instruction abstractions. 50 // Provides the primitive operations to manipulate code relative to this. 51 52 class NativeInstruction VALUE_OBJ_CLASS_SPEC { 53 friend class Relocation; 54 friend bool is_NativeCallTrampolineStub_at(address); 55 public: 56 enum { 57 instruction_size = 4 58 }; 59 60 juint encoding() const { 61 return uint_at(0); 62 } 63 64 bool is_blr() const { return (encoding() & 0xff9ffc1f) == 0xd61f0000; } // blr(register) or br(register) 65 bool is_adr_aligned() const { return (encoding() & 0xff000000) == 0x10000000; } // adr Xn, <label>, where label is aligned to 4 bytes (address of instruction). 66 67 inline bool is_nop(); 68 inline bool is_illegal(); 69 inline bool is_return(); 70 bool is_jump(); 71 bool is_general_jump(); 72 inline bool is_jump_or_nop(); 73 inline bool is_cond_jump(); 74 bool is_safepoint_poll(); 75 bool is_movz(); 76 bool is_movk(); 77 bool is_sigill_zombie_not_entrant(); 78 79 protected: 80 address addr_at(int offset) const { return address(this) + offset; } 81 82 s_char sbyte_at(int offset) const { return *(s_char*) addr_at(offset); } 83 u_char ubyte_at(int offset) const { return *(u_char*) addr_at(offset); } 84 85 jint int_at(int offset) const { return *(jint*) addr_at(offset); } 86 juint uint_at(int offset) const { return *(juint*) addr_at(offset); } 87 88 address ptr_at(int offset) const { return *(address*) addr_at(offset); } 89 90 oop oop_at (int offset) const { return *(oop*) addr_at(offset); } 91 92 93 void set_char_at(int offset, char c) { *addr_at(offset) = (u_char)c; } 94 void set_int_at(int offset, jint i) { *(jint*)addr_at(offset) = i; } 95 void set_uint_at(int offset, jint i) { *(juint*)addr_at(offset) = i; } 96 void set_ptr_at (int offset, address ptr) { *(address*) addr_at(offset) = ptr; } 97 void set_oop_at (int offset, oop o) { *(oop*) addr_at(offset) = o; } 98 99 void wrote(int offset); 100 101 public: 102 103 // unit test stuff 104 static void test() {} // override for testing 105 106 inline friend NativeInstruction* nativeInstruction_at(address address); 107 108 static bool is_adrp_at(address instr); 109 110 static bool is_ldr_literal_at(address instr); 111 112 bool is_ldr_literal() { 113 return is_ldr_literal_at(addr_at(0)); 114 } 115 116 static bool is_ldrw_to_zr(address instr); 117 118 static bool is_call_at(address instr) { 119 const uint32_t insn = (*(uint32_t*)instr); 120 return (insn >> 26) == 0b100101; 121 } 122 123 bool is_call() { 124 return is_call_at(addr_at(0)); 125 } 126 127 static bool maybe_cpool_ref(address instr) { 128 return is_adrp_at(instr) || is_ldr_literal_at(instr); 129 } 130 131 bool is_Membar() { 132 unsigned int insn = uint_at(0); 133 return Instruction_aarch64::extract(insn, 31, 12) == 0b11010101000000110011 && 134 Instruction_aarch64::extract(insn, 7, 0) == 0b10111111; 135 } 136 }; 137 138 inline NativeInstruction* nativeInstruction_at(address address) { 139 return (NativeInstruction*)address; 140 } 141 142 // The natural type of an AArch64 instruction is uint32_t 143 inline NativeInstruction* nativeInstruction_at(uint32_t *address) { 144 return (NativeInstruction*)address; 145 } 146 147 class NativePltCall: public NativeInstruction { 148 public: 149 enum Intel_specific_constants { 150 instruction_size = 4, 151 instruction_offset = 0, 152 displacement_offset = 1, 153 return_address_offset = 4 154 }; 155 address instruction_address() const { return addr_at(instruction_offset); } 156 address next_instruction_address() const { return addr_at(return_address_offset); } 157 address displacement_address() const { return addr_at(displacement_offset); } 158 int displacement() const { return (jint) int_at(displacement_offset); } 159 address return_address() const { return addr_at(return_address_offset); } 160 address destination() const; 161 address plt_entry() const; 162 address plt_jump() const; 163 address plt_load_got() const; 164 address plt_resolve_call() const; 165 address plt_c2i_stub() const; 166 void set_stub_to_clean(); 167 168 void reset_to_plt_resolve_call(); 169 void set_destination_mt_safe(address dest); 170 171 void verify() const; 172 }; 173 174 inline NativePltCall* nativePltCall_at(address address) { 175 NativePltCall* call = (NativePltCall*) address; 176 #ifdef ASSERT 177 call->verify(); 178 #endif 179 return call; 180 } 181 182 inline NativePltCall* nativePltCall_before(address addr) { 183 address at = addr - NativePltCall::instruction_size; 184 return nativePltCall_at(at); 185 } 186 187 inline NativeCall* nativeCall_at(address address); 188 // The NativeCall is an abstraction for accessing/manipulating native 189 // call instructions (used to manipulate inline caches, primitive & 190 // DSO calls, etc.). 191 192 class NativeCall: public NativeInstruction { 193 public: 194 enum Aarch64_specific_constants { 195 instruction_size = 4, 196 instruction_offset = 0, 197 displacement_offset = 0, 198 return_address_offset = 4 199 }; 200 201 address instruction_address() const { return addr_at(instruction_offset); } 202 address next_instruction_address() const { return addr_at(return_address_offset); } 203 int displacement() const { return (int_at(displacement_offset) << 6) >> 4; } 204 address displacement_address() const { return addr_at(displacement_offset); } 205 address return_address() const { return addr_at(return_address_offset); } 206 address destination() const; 207 208 void set_destination(address dest) { 209 int offset = dest - instruction_address(); 210 unsigned int insn = 0b100101 << 26; 211 assert((offset & 3) == 0, "should be"); 212 offset >>= 2; 213 offset &= (1 << 26) - 1; // mask off insn part 214 insn |= offset; 215 set_int_at(displacement_offset, insn); 216 } 217 218 void verify_alignment() { ; } 219 void verify(); 220 void print(); 221 222 // Creation 223 inline friend NativeCall* nativeCall_at(address address); 224 inline friend NativeCall* nativeCall_before(address return_address); 225 226 static bool is_call_before(address return_address) { 227 return is_call_at(return_address - NativeCall::return_address_offset); 228 } 229 230 #if INCLUDE_AOT 231 static bool is_far_call(address instr, address target) { 232 return Assembler::reachable_from_branch_at(instr, target); 233 } 234 #endif 235 236 // MT-safe patching of a call instruction. 237 static void insert(address code_pos, address entry); 238 239 static void replace_mt_safe(address instr_addr, address code_buffer); 240 241 // Similar to replace_mt_safe, but just changes the destination. The 242 // important thing is that free-running threads are able to execute 243 // this call instruction at all times. If the call is an immediate BL 244 // instruction we can simply rely on atomicity of 32-bit writes to 245 // make sure other threads will see no intermediate states. 246 247 // We cannot rely on locks here, since the free-running threads must run at 248 // full speed. 249 // 250 // Used in the runtime linkage of calls; see class CompiledIC. 251 // (Cf. 4506997 and 4479829, where threads witnessed garbage displacements.) 252 253 // The parameter assert_lock disables the assertion during code generation. 254 void set_destination_mt_safe(address dest, bool assert_lock = true); 255 256 address get_trampoline(); 257 address trampoline_jump(CodeBuffer &cbuf, address dest); 258 }; 259 260 inline NativeCall* nativeCall_at(address address) { 261 NativeCall* call = (NativeCall*)(address - NativeCall::instruction_offset); 262 #ifdef ASSERT 263 call->verify(); 264 #endif 265 return call; 266 } 267 268 inline NativeCall* nativeCall_before(address return_address) { 269 NativeCall* call = (NativeCall*)(return_address - NativeCall::return_address_offset); 270 #ifdef ASSERT 271 call->verify(); 272 #endif 273 return call; 274 } 275 276 // An interface for accessing/manipulating native mov reg, imm instructions. 277 // (used to manipulate inlined 64-bit data calls, etc.) 278 class NativeMovConstReg: public NativeInstruction { 279 public: 280 enum Aarch64_specific_constants { 281 instruction_size = 3 * 4, // movz, movk, movk. See movptr(). 282 instruction_offset = 0, 283 displacement_offset = 0, 284 }; 285 286 address instruction_address() const { return addr_at(instruction_offset); } 287 address next_instruction_address() const { 288 if (nativeInstruction_at(instruction_address())->is_movz()) 289 // Assume movz, movk, movk 290 return addr_at(instruction_size); 291 else if (is_adrp_at(instruction_address())) 292 return addr_at(2*4); 293 else if (is_ldr_literal_at(instruction_address())) 294 return(addr_at(4)); 295 assert(false, "Unknown instruction in NativeMovConstReg"); 296 return NULL; 297 } 298 299 intptr_t data() const; 300 void set_data(intptr_t x); 301 302 void flush() { 303 if (! maybe_cpool_ref(instruction_address())) { 304 ICache::invalidate_range(instruction_address(), instruction_size); 305 } 306 } 307 308 void verify(); 309 void print(); 310 311 // unit test stuff 312 static void test() {} 313 314 // Creation 315 inline friend NativeMovConstReg* nativeMovConstReg_at(address address); 316 inline friend NativeMovConstReg* nativeMovConstReg_before(address address); 317 }; 318 319 inline NativeMovConstReg* nativeMovConstReg_at(address address) { 320 NativeMovConstReg* test = (NativeMovConstReg*)(address - NativeMovConstReg::instruction_offset); 321 #ifdef ASSERT 322 test->verify(); 323 #endif 324 return test; 325 } 326 327 inline NativeMovConstReg* nativeMovConstReg_before(address address) { 328 NativeMovConstReg* test = (NativeMovConstReg*)(address - NativeMovConstReg::instruction_size - NativeMovConstReg::instruction_offset); 329 #ifdef ASSERT 330 test->verify(); 331 #endif 332 return test; 333 } 334 335 class NativeMovConstRegPatching: public NativeMovConstReg { 336 private: 337 friend NativeMovConstRegPatching* nativeMovConstRegPatching_at(address address) { 338 NativeMovConstRegPatching* test = (NativeMovConstRegPatching*)(address - instruction_offset); 339 #ifdef ASSERT 340 test->verify(); 341 #endif 342 return test; 343 } 344 }; 345 346 // An interface for accessing/manipulating native moves of the form: 347 // mov[b/w/l/q] [reg + offset], reg (instruction_code_reg2mem) 348 // mov[b/w/l/q] reg, [reg+offset] (instruction_code_mem2reg 349 // mov[s/z]x[w/b/q] [reg + offset], reg 350 // fld_s [reg+offset] 351 // fld_d [reg+offset] 352 // fstp_s [reg + offset] 353 // fstp_d [reg + offset] 354 // mov_literal64 scratch,<pointer> ; mov[b/w/l/q] 0(scratch),reg | mov[b/w/l/q] reg,0(scratch) 355 // 356 // Warning: These routines must be able to handle any instruction sequences 357 // that are generated as a result of the load/store byte,word,long 358 // macros. For example: The load_unsigned_byte instruction generates 359 // an xor reg,reg inst prior to generating the movb instruction. This 360 // class must skip the xor instruction. 361 362 class NativeMovRegMem: public NativeInstruction { 363 enum AArch64_specific_constants { 364 instruction_size = 4, 365 instruction_offset = 0, 366 data_offset = 0, 367 next_instruction_offset = 4 368 }; 369 370 public: 371 // helper 372 int instruction_start() const; 373 374 address instruction_address() const; 375 376 address next_instruction_address() const; 377 378 int offset() const; 379 380 void set_offset(int x); 381 382 void add_offset_in_bytes(int add_offset) { set_offset ( ( offset() + add_offset ) ); } 383 384 void verify(); 385 void print (); 386 387 // unit test stuff 388 static void test() {} 389 390 private: 391 inline friend NativeMovRegMem* nativeMovRegMem_at (address address); 392 }; 393 394 inline NativeMovRegMem* nativeMovRegMem_at (address address) { 395 NativeMovRegMem* test = (NativeMovRegMem*)(address - NativeMovRegMem::instruction_offset); 396 #ifdef ASSERT 397 test->verify(); 398 #endif 399 return test; 400 } 401 402 class NativeMovRegMemPatching: public NativeMovRegMem { 403 private: 404 friend NativeMovRegMemPatching* nativeMovRegMemPatching_at (address address) {Unimplemented(); return 0; } 405 }; 406 407 // An interface for accessing/manipulating native leal instruction of form: 408 // leal reg, [reg + offset] 409 410 class NativeLoadAddress: public NativeInstruction { 411 enum AArch64_specific_constants { 412 instruction_size = 4, 413 instruction_offset = 0, 414 data_offset = 0, 415 next_instruction_offset = 4 416 }; 417 418 public: 419 void verify(); 420 void print (); 421 422 // unit test stuff 423 static void test() {} 424 }; 425 426 // adrp x16, #page 427 // add x16, x16, #offset 428 // ldr x16, [x16] 429 class NativeLoadGot: public NativeInstruction { 430 public: 431 enum AArch64_specific_constants { 432 instruction_length = 4 * NativeInstruction::instruction_size, 433 offset_offset = 0, 434 }; 435 436 address instruction_address() const { return addr_at(0); } 437 address return_address() const { return addr_at(instruction_length); } 438 address got_address() const; 439 address next_instruction_address() const { return return_address(); } 440 intptr_t data() const; 441 void set_data(intptr_t data) { 442 intptr_t *addr = (intptr_t *) got_address(); 443 *addr = data; 444 } 445 446 void verify() const; 447 private: 448 void report_and_fail() const; 449 }; 450 451 inline NativeLoadGot* nativeLoadGot_at(address addr) { 452 NativeLoadGot* load = (NativeLoadGot*) addr; 453 #ifdef ASSERT 454 load->verify(); 455 #endif 456 return load; 457 } 458 459 class NativeJump: public NativeInstruction { 460 public: 461 enum AArch64_specific_constants { 462 instruction_size = 4, 463 instruction_offset = 0, 464 data_offset = 0, 465 next_instruction_offset = 4 466 }; 467 468 address instruction_address() const { return addr_at(instruction_offset); } 469 address next_instruction_address() const { return addr_at(instruction_size); } 470 address jump_destination() const; 471 void set_jump_destination(address dest); 472 473 // Creation 474 inline friend NativeJump* nativeJump_at(address address); 475 476 void verify(); 477 478 // Unit testing stuff 479 static void test() {} 480 481 // Insertion of native jump instruction 482 static void insert(address code_pos, address entry); 483 // MT-safe insertion of native jump at verified method entry 484 static void check_verified_entry_alignment(address entry, address verified_entry); 485 static void patch_verified_entry(address entry, address verified_entry, address dest); 486 }; 487 488 inline NativeJump* nativeJump_at(address address) { 489 NativeJump* jump = (NativeJump*)(address - NativeJump::instruction_offset); 490 #ifdef ASSERT 491 jump->verify(); 492 #endif 493 return jump; 494 } 495 496 class NativeGeneralJump: public NativeJump { 497 public: 498 enum AArch64_specific_constants { 499 instruction_size = 4 * 4, 500 instruction_offset = 0, 501 data_offset = 0, 502 next_instruction_offset = 4 * 4 503 }; 504 505 address jump_destination() const; 506 void set_jump_destination(address dest); 507 508 static void insert_unconditional(address code_pos, address entry); 509 static void replace_mt_safe(address instr_addr, address code_buffer); 510 static void verify(); 511 }; 512 513 inline NativeGeneralJump* nativeGeneralJump_at(address address) { 514 NativeGeneralJump* jump = (NativeGeneralJump*)(address); 515 debug_only(jump->verify();) 516 return jump; 517 } 518 519 class NativeGotJump: public NativeInstruction { 520 public: 521 enum AArch64_specific_constants { 522 instruction_size = 4 * NativeInstruction::instruction_size, 523 }; 524 525 void verify() const; 526 address instruction_address() const { return addr_at(0); } 527 address destination() const; 528 address return_address() const { return addr_at(instruction_size); } 529 address got_address() const; 530 address next_instruction_address() const { return addr_at(instruction_size); } 531 bool is_GotJump() const; 532 533 void set_jump_destination(address dest) { 534 address* got = (address *)got_address(); 535 *got = dest; 536 } 537 }; 538 539 inline NativeGotJump* nativeGotJump_at(address addr) { 540 NativeGotJump* jump = (NativeGotJump*)(addr); 541 return jump; 542 } 543 544 class NativePopReg : public NativeInstruction { 545 public: 546 // Insert a pop instruction 547 static void insert(address code_pos, Register reg); 548 }; 549 550 551 class NativeIllegalInstruction: public NativeInstruction { 552 public: 553 // Insert illegal opcode as specific address 554 static void insert(address code_pos); 555 }; 556 557 // return instruction that does not pop values of the stack 558 class NativeReturn: public NativeInstruction { 559 public: 560 }; 561 562 // return instruction that does pop values of the stack 563 class NativeReturnX: public NativeInstruction { 564 public: 565 }; 566 567 // Simple test vs memory 568 class NativeTstRegMem: public NativeInstruction { 569 public: 570 }; 571 572 inline bool NativeInstruction::is_nop() { 573 uint32_t insn = *(uint32_t*)addr_at(0); 574 return insn == 0xd503201f; 575 } 576 577 inline bool NativeInstruction::is_jump() { 578 uint32_t insn = *(uint32_t*)addr_at(0); 579 580 if (Instruction_aarch64::extract(insn, 30, 26) == 0b00101) { 581 // Unconditional branch (immediate) 582 return true; 583 } else if (Instruction_aarch64::extract(insn, 31, 25) == 0b0101010) { 584 // Conditional branch (immediate) 585 return true; 586 } else if (Instruction_aarch64::extract(insn, 30, 25) == 0b011010) { 587 // Compare & branch (immediate) 588 return true; 589 } else if (Instruction_aarch64::extract(insn, 30, 25) == 0b011011) { 590 // Test & branch (immediate) 591 return true; 592 } else 593 return false; 594 } 595 596 inline bool NativeInstruction::is_jump_or_nop() { 597 return is_nop() || is_jump(); 598 } 599 600 // Call trampoline stubs. 601 class NativeCallTrampolineStub : public NativeInstruction { 602 public: 603 604 enum AArch64_specific_constants { 605 instruction_size = 4 * 4, 606 instruction_offset = 0, 607 data_offset = 2 * 4, 608 next_instruction_offset = 4 * 4 609 }; 610 611 address destination(nmethod *nm = NULL) const; 612 void set_destination(address new_destination); 613 ptrdiff_t destination_offset() const; 614 }; 615 616 inline bool is_NativeCallTrampolineStub_at(address addr) { 617 // Ensure that the stub is exactly 618 // ldr xscratch1, L 619 // br xscratch1 620 // L: 621 uint32_t *i = (uint32_t *)addr; 622 return i[0] == 0x58000048 && i[1] == 0xd61f0100; 623 } 624 625 inline NativeCallTrampolineStub* nativeCallTrampolineStub_at(address addr) { 626 assert(is_NativeCallTrampolineStub_at(addr), "no call trampoline found"); 627 return (NativeCallTrampolineStub*)addr; 628 } 629 630 class NativeMembar : public NativeInstruction { 631 public: 632 unsigned int get_kind() { return Instruction_aarch64::extract(uint_at(0), 11, 8); } 633 void set_kind(int order_kind) { Instruction_aarch64::patch(addr_at(0), 11, 8, order_kind); } 634 }; 635 636 inline NativeMembar *NativeMembar_at(address addr) { 637 assert(nativeInstruction_at(addr)->is_Membar(), "no membar found"); 638 return (NativeMembar*)addr; 639 } 640 641 #endif // CPU_AARCH64_VM_NATIVEINST_AARCH64_HPP