1 /* 2 * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2014, Red Hat Inc. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #ifndef CPU_AARCH64_VM_NATIVEINST_AARCH64_HPP 27 #define CPU_AARCH64_VM_NATIVEINST_AARCH64_HPP 28 29 #include "asm/assembler.hpp" 30 #include "runtime/icache.hpp" 31 #include "runtime/os.hpp" 32 33 // We have interfaces for the following instructions: 34 // - NativeInstruction 35 // - - NativeCall 36 // - - NativeMovConstReg 37 // - - NativeMovConstRegPatching 38 // - - NativeMovRegMem 39 // - - NativeMovRegMemPatching 40 // - - NativeJump 41 // - - NativeIllegalOpCode 42 // - - NativeGeneralJump 43 // - - NativeReturn 44 // - - NativeReturnX (return with argument) 45 // - - NativePushConst 46 // - - NativeTstRegMem 47 48 // The base class for different kinds of native instruction abstractions. 49 // Provides the primitive operations to manipulate code relative to this. 50 51 class NativeInstruction { 52 friend class Relocation; 53 friend bool is_NativeCallTrampolineStub_at(address); 54 public: 55 enum { 56 instruction_size = 4 57 }; 58 59 juint encoding() const { 60 return uint_at(0); 61 } 62 63 bool is_blr() const { return (encoding() & 0xff9ffc1f) == 0xd61f0000; } // blr(register) or br(register) 64 bool is_adr_aligned() const { return (encoding() & 0xff000000) == 0x10000000; } // adr Xn, <label>, where label is aligned to 4 bytes (address of instruction). 65 66 inline bool is_nop(); 67 inline bool is_illegal(); 68 inline bool is_return(); 69 bool is_jump(); 70 bool is_general_jump(); 71 inline bool is_jump_or_nop(); 72 inline bool is_cond_jump(); 73 bool is_safepoint_poll(); 74 bool is_movz(); 75 bool is_movk(); 76 bool is_sigill_zombie_not_entrant(); 77 78 protected: 79 address addr_at(int offset) const { return address(this) + offset; } 80 81 s_char sbyte_at(int offset) const { return *(s_char*) addr_at(offset); } 82 u_char ubyte_at(int offset) const { return *(u_char*) addr_at(offset); } 83 84 jint int_at(int offset) const { return *(jint*) addr_at(offset); } 85 juint uint_at(int offset) const { return *(juint*) addr_at(offset); } 86 87 address ptr_at(int offset) const { return *(address*) addr_at(offset); } 88 89 oop oop_at (int offset) const { return *(oop*) addr_at(offset); } 90 91 92 void set_char_at(int offset, char c) { *addr_at(offset) = (u_char)c; } 93 void set_int_at(int offset, jint i) { *(jint*)addr_at(offset) = i; } 94 void set_uint_at(int offset, jint i) { *(juint*)addr_at(offset) = i; } 95 void set_ptr_at (int offset, address ptr) { *(address*) addr_at(offset) = ptr; } 96 void set_oop_at (int offset, oop o) { *(oop*) addr_at(offset) = o; } 97 98 public: 99 100 // unit test stuff 101 static void test() {} // override for testing 102 103 inline friend NativeInstruction* nativeInstruction_at(address address); 104 105 static bool is_adrp_at(address instr); 106 107 static bool is_ldr_literal_at(address instr); 108 109 bool is_ldr_literal() { 110 return is_ldr_literal_at(addr_at(0)); 111 } 112 113 static bool is_ldrw_to_zr(address instr); 114 115 static bool is_call_at(address instr) { 116 const uint32_t insn = (*(uint32_t*)instr); 117 return (insn >> 26) == 0b100101; 118 } 119 120 bool is_call() { 121 return is_call_at(addr_at(0)); 122 } 123 124 static bool maybe_cpool_ref(address instr) { 125 return is_adrp_at(instr) || is_ldr_literal_at(instr); 126 } 127 128 bool is_Membar() { 129 unsigned int insn = uint_at(0); 130 return Instruction_aarch64::extract(insn, 31, 12) == 0b11010101000000110011 && 131 Instruction_aarch64::extract(insn, 7, 0) == 0b10111111; 132 } 133 134 bool is_Imm_LdSt() { 135 unsigned int insn = uint_at(0); 136 return Instruction_aarch64::extract(insn, 29, 27) == 0b111 && 137 Instruction_aarch64::extract(insn, 23, 23) == 0b0 && 138 Instruction_aarch64::extract(insn, 26, 25) == 0b00; 139 } 140 }; 141 142 inline NativeInstruction* nativeInstruction_at(address address) { 143 return (NativeInstruction*)address; 144 } 145 146 // The natural type of an AArch64 instruction is uint32_t 147 inline NativeInstruction* nativeInstruction_at(uint32_t *address) { 148 return (NativeInstruction*)address; 149 } 150 151 inline NativeCall* nativeCall_at(address address); 152 // The NativeCall is an abstraction for accessing/manipulating native 153 // call instructions (used to manipulate inline caches, primitive & 154 // DSO calls, etc.). 155 156 class NativeCall: public NativeInstruction { 157 public: 158 enum Aarch64_specific_constants { 159 instruction_size = 4, 160 instruction_offset = 0, 161 displacement_offset = 0, 162 return_address_offset = 4 163 }; 164 165 address instruction_address() const { return addr_at(instruction_offset); } 166 address next_instruction_address() const { return addr_at(return_address_offset); } 167 int displacement() const { return (int_at(displacement_offset) << 6) >> 4; } 168 address displacement_address() const { return addr_at(displacement_offset); } 169 address return_address() const { return addr_at(return_address_offset); } 170 address destination() const; 171 172 void set_destination(address dest) { 173 int offset = dest - instruction_address(); 174 unsigned int insn = 0b100101 << 26; 175 assert((offset & 3) == 0, "should be"); 176 offset >>= 2; 177 offset &= (1 << 26) - 1; // mask off insn part 178 insn |= offset; 179 set_int_at(displacement_offset, insn); 180 } 181 182 void verify_alignment() { ; } 183 void verify(); 184 void print(); 185 186 // Creation 187 inline friend NativeCall* nativeCall_at(address address); 188 inline friend NativeCall* nativeCall_before(address return_address); 189 190 static bool is_call_before(address return_address) { 191 return is_call_at(return_address - NativeCall::return_address_offset); 192 } 193 194 // MT-safe patching of a call instruction. 195 static void insert(address code_pos, address entry); 196 197 static void replace_mt_safe(address instr_addr, address code_buffer); 198 199 // Similar to replace_mt_safe, but just changes the destination. The 200 // important thing is that free-running threads are able to execute 201 // this call instruction at all times. If the call is an immediate BL 202 // instruction we can simply rely on atomicity of 32-bit writes to 203 // make sure other threads will see no intermediate states. 204 205 // We cannot rely on locks here, since the free-running threads must run at 206 // full speed. 207 // 208 // Used in the runtime linkage of calls; see class CompiledIC. 209 // (Cf. 4506997 and 4479829, where threads witnessed garbage displacements.) 210 211 // The parameter assert_lock disables the assertion during code generation. 212 void set_destination_mt_safe(address dest, bool assert_lock = true); 213 214 address get_trampoline(); 215 address trampoline_jump(CodeBuffer &cbuf, address dest); 216 }; 217 218 inline NativeCall* nativeCall_at(address address) { 219 NativeCall* call = (NativeCall*)(address - NativeCall::instruction_offset); 220 #ifdef ASSERT 221 call->verify(); 222 #endif 223 return call; 224 } 225 226 inline NativeCall* nativeCall_before(address return_address) { 227 NativeCall* call = (NativeCall*)(return_address - NativeCall::return_address_offset); 228 #ifdef ASSERT 229 call->verify(); 230 #endif 231 return call; 232 } 233 234 // An interface for accessing/manipulating native mov reg, imm instructions. 235 // (used to manipulate inlined 64-bit data calls, etc.) 236 class NativeMovConstReg: public NativeInstruction { 237 public: 238 enum Aarch64_specific_constants { 239 instruction_size = 3 * 4, // movz, movk, movk. See movptr(). 240 instruction_offset = 0, 241 displacement_offset = 0, 242 }; 243 244 address instruction_address() const { return addr_at(instruction_offset); } 245 address next_instruction_address() const { 246 if (nativeInstruction_at(instruction_address())->is_movz()) 247 // Assume movz, movk, movk 248 return addr_at(instruction_size); 249 else if (is_adrp_at(instruction_address())) 250 return addr_at(2*4); 251 else if (is_ldr_literal_at(instruction_address())) 252 return(addr_at(4)); 253 assert(false, "Unknown instruction in NativeMovConstReg"); 254 return NULL; 255 } 256 257 intptr_t data() const; 258 void set_data(intptr_t x); 259 260 void flush() { 261 if (! maybe_cpool_ref(instruction_address())) { 262 ICache::invalidate_range(instruction_address(), instruction_size); 263 } 264 } 265 266 void verify(); 267 void print(); 268 269 // unit test stuff 270 static void test() {} 271 272 // Creation 273 inline friend NativeMovConstReg* nativeMovConstReg_at(address address); 274 inline friend NativeMovConstReg* nativeMovConstReg_before(address address); 275 }; 276 277 inline NativeMovConstReg* nativeMovConstReg_at(address address) { 278 NativeMovConstReg* test = (NativeMovConstReg*)(address - NativeMovConstReg::instruction_offset); 279 #ifdef ASSERT 280 test->verify(); 281 #endif 282 return test; 283 } 284 285 inline NativeMovConstReg* nativeMovConstReg_before(address address) { 286 NativeMovConstReg* test = (NativeMovConstReg*)(address - NativeMovConstReg::instruction_size - NativeMovConstReg::instruction_offset); 287 #ifdef ASSERT 288 test->verify(); 289 #endif 290 return test; 291 } 292 293 class NativeMovConstRegPatching: public NativeMovConstReg { 294 private: 295 friend NativeMovConstRegPatching* nativeMovConstRegPatching_at(address address) { 296 NativeMovConstRegPatching* test = (NativeMovConstRegPatching*)(address - instruction_offset); 297 #ifdef ASSERT 298 test->verify(); 299 #endif 300 return test; 301 } 302 }; 303 304 // An interface for accessing/manipulating native moves of the form: 305 // mov[b/w/l/q] [reg + offset], reg (instruction_code_reg2mem) 306 // mov[b/w/l/q] reg, [reg+offset] (instruction_code_mem2reg 307 // mov[s/z]x[w/b/q] [reg + offset], reg 308 // fld_s [reg+offset] 309 // fld_d [reg+offset] 310 // fstp_s [reg + offset] 311 // fstp_d [reg + offset] 312 // mov_literal64 scratch,<pointer> ; mov[b/w/l/q] 0(scratch),reg | mov[b/w/l/q] reg,0(scratch) 313 // 314 // Warning: These routines must be able to handle any instruction sequences 315 // that are generated as a result of the load/store byte,word,long 316 // macros. For example: The load_unsigned_byte instruction generates 317 // an xor reg,reg inst prior to generating the movb instruction. This 318 // class must skip the xor instruction. 319 320 class NativeMovRegMem: public NativeInstruction { 321 enum AArch64_specific_constants { 322 instruction_size = 4, 323 instruction_offset = 0, 324 data_offset = 0, 325 next_instruction_offset = 4 326 }; 327 328 public: 329 // helper 330 int instruction_start() const; 331 332 address instruction_address() const; 333 334 address next_instruction_address() const; 335 336 int offset() const; 337 338 void set_offset(int x); 339 340 void add_offset_in_bytes(int add_offset) { set_offset ( ( offset() + add_offset ) ); } 341 342 void verify(); 343 void print (); 344 345 // unit test stuff 346 static void test() {} 347 348 private: 349 inline friend NativeMovRegMem* nativeMovRegMem_at (address address); 350 }; 351 352 inline NativeMovRegMem* nativeMovRegMem_at (address address) { 353 NativeMovRegMem* test = (NativeMovRegMem*)(address - NativeMovRegMem::instruction_offset); 354 #ifdef ASSERT 355 test->verify(); 356 #endif 357 return test; 358 } 359 360 class NativeMovRegMemPatching: public NativeMovRegMem { 361 private: 362 friend NativeMovRegMemPatching* nativeMovRegMemPatching_at (address address) {Unimplemented(); return 0; } 363 }; 364 365 // An interface for accessing/manipulating native leal instruction of form: 366 // leal reg, [reg + offset] 367 368 class NativeLoadAddress: public NativeInstruction { 369 enum AArch64_specific_constants { 370 instruction_size = 4, 371 instruction_offset = 0, 372 data_offset = 0, 373 next_instruction_offset = 4 374 }; 375 376 public: 377 void verify(); 378 void print (); 379 380 // unit test stuff 381 static void test() {} 382 }; 383 384 class NativeJump: public NativeInstruction { 385 public: 386 enum AArch64_specific_constants { 387 instruction_size = 4, 388 instruction_offset = 0, 389 data_offset = 0, 390 next_instruction_offset = 4 391 }; 392 393 address instruction_address() const { return addr_at(instruction_offset); } 394 address next_instruction_address() const { return addr_at(instruction_size); } 395 address jump_destination() const; 396 void set_jump_destination(address dest); 397 398 // Creation 399 inline friend NativeJump* nativeJump_at(address address); 400 401 void verify(); 402 403 // Unit testing stuff 404 static void test() {} 405 406 // Insertion of native jump instruction 407 static void insert(address code_pos, address entry); 408 // MT-safe insertion of native jump at verified method entry 409 static void check_verified_entry_alignment(address entry, address verified_entry); 410 static void patch_verified_entry(address entry, address verified_entry, address dest); 411 }; 412 413 inline NativeJump* nativeJump_at(address address) { 414 NativeJump* jump = (NativeJump*)(address - NativeJump::instruction_offset); 415 #ifdef ASSERT 416 jump->verify(); 417 #endif 418 return jump; 419 } 420 421 class NativeGeneralJump: public NativeJump { 422 public: 423 enum AArch64_specific_constants { 424 instruction_size = 4 * 4, 425 instruction_offset = 0, 426 data_offset = 0, 427 next_instruction_offset = 4 * 4 428 }; 429 430 address jump_destination() const; 431 void set_jump_destination(address dest); 432 433 static void insert_unconditional(address code_pos, address entry); 434 static void replace_mt_safe(address instr_addr, address code_buffer); 435 static void verify(); 436 }; 437 438 inline NativeGeneralJump* nativeGeneralJump_at(address address) { 439 NativeGeneralJump* jump = (NativeGeneralJump*)(address); 440 debug_only(jump->verify();) 441 return jump; 442 } 443 444 class NativePopReg : public NativeInstruction { 445 public: 446 // Insert a pop instruction 447 static void insert(address code_pos, Register reg); 448 }; 449 450 451 class NativeIllegalInstruction: public NativeInstruction { 452 public: 453 // Insert illegal opcode as specific address 454 static void insert(address code_pos); 455 }; 456 457 // return instruction that does not pop values of the stack 458 class NativeReturn: public NativeInstruction { 459 public: 460 }; 461 462 // return instruction that does pop values of the stack 463 class NativeReturnX: public NativeInstruction { 464 public: 465 }; 466 467 // Simple test vs memory 468 class NativeTstRegMem: public NativeInstruction { 469 public: 470 }; 471 472 inline bool NativeInstruction::is_nop() { 473 uint32_t insn = *(uint32_t*)addr_at(0); 474 return insn == 0xd503201f; 475 } 476 477 inline bool NativeInstruction::is_jump() { 478 uint32_t insn = *(uint32_t*)addr_at(0); 479 480 if (Instruction_aarch64::extract(insn, 30, 26) == 0b00101) { 481 // Unconditional branch (immediate) 482 return true; 483 } else if (Instruction_aarch64::extract(insn, 31, 25) == 0b0101010) { 484 // Conditional branch (immediate) 485 return true; 486 } else if (Instruction_aarch64::extract(insn, 30, 25) == 0b011010) { 487 // Compare & branch (immediate) 488 return true; 489 } else if (Instruction_aarch64::extract(insn, 30, 25) == 0b011011) { 490 // Test & branch (immediate) 491 return true; 492 } else 493 return false; 494 } 495 496 inline bool NativeInstruction::is_jump_or_nop() { 497 return is_nop() || is_jump(); 498 } 499 500 // Call trampoline stubs. 501 class NativeCallTrampolineStub : public NativeInstruction { 502 public: 503 504 enum AArch64_specific_constants { 505 instruction_size = 4 * 4, 506 instruction_offset = 0, 507 data_offset = 2 * 4, 508 next_instruction_offset = 4 * 4 509 }; 510 511 address destination(nmethod *nm = NULL) const; 512 void set_destination(address new_destination); 513 ptrdiff_t destination_offset() const; 514 }; 515 516 inline bool is_NativeCallTrampolineStub_at(address addr) { 517 // Ensure that the stub is exactly 518 // ldr xscratch1, L 519 // br xscratch1 520 // L: 521 uint32_t *i = (uint32_t *)addr; 522 return i[0] == 0x58000048 && i[1] == 0xd61f0100; 523 } 524 525 inline NativeCallTrampolineStub* nativeCallTrampolineStub_at(address addr) { 526 assert(is_NativeCallTrampolineStub_at(addr), "no call trampoline found"); 527 return (NativeCallTrampolineStub*)addr; 528 } 529 530 class NativeMembar : public NativeInstruction { 531 public: 532 unsigned int get_kind() { return Instruction_aarch64::extract(uint_at(0), 11, 8); } 533 void set_kind(int order_kind) { Instruction_aarch64::patch(addr_at(0), 11, 8, order_kind); } 534 }; 535 536 inline NativeMembar *NativeMembar_at(address addr) { 537 assert(nativeInstruction_at(addr)->is_Membar(), "no membar found"); 538 return (NativeMembar*)addr; 539 } 540 541 class NativeLdSt : public NativeInstruction { 542 private: 543 int32_t size() { return Instruction_aarch64::extract(uint_at(0), 31, 30); } 544 // Check whether instruction is with unscaled offset. 545 bool is_ldst_ur() { 546 return (Instruction_aarch64::extract(uint_at(0), 29, 21) == 0b111000010 || 547 Instruction_aarch64::extract(uint_at(0), 29, 21) == 0b111000000) && 548 Instruction_aarch64::extract(uint_at(0), 11, 10) == 0b00; 549 } 550 bool is_ldst_unsigned_offset() { 551 return Instruction_aarch64::extract(uint_at(0), 29, 22) == 0b11100101 || 552 Instruction_aarch64::extract(uint_at(0), 29, 22) == 0b11100100; 553 } 554 public: 555 Register target() { 556 uint32_t r = Instruction_aarch64::extract(uint_at(0), 4, 0); 557 return r == 0x1f ? zr : as_Register(r); 558 } 559 Register base() { 560 uint32_t b = Instruction_aarch64::extract(uint_at(0), 9, 5); 561 return b == 0x1f ? sp : as_Register(b); 562 } 563 int64_t offset() { 564 if (is_ldst_ur()) { 565 return Instruction_aarch64::sextract(uint_at(0), 20, 12); 566 } else if (is_ldst_unsigned_offset()) { 567 return Instruction_aarch64::extract(uint_at(0), 21, 10) << size(); 568 } else { 569 // others like: pre-index or post-index. 570 ShouldNotReachHere(); 571 return 0; 572 } 573 } 574 size_t size_in_bytes() { return 1 << size(); } 575 bool is_not_pre_post_index() { return (is_ldst_ur() || is_ldst_unsigned_offset()); } 576 bool is_load() { 577 assert(Instruction_aarch64::extract(uint_at(0), 23, 22) == 0b01 || 578 Instruction_aarch64::extract(uint_at(0), 23, 22) == 0b00, "must be ldr or str"); 579 580 return Instruction_aarch64::extract(uint_at(0), 23, 22) == 0b01; 581 } 582 bool is_store() { 583 assert(Instruction_aarch64::extract(uint_at(0), 23, 22) == 0b01 || 584 Instruction_aarch64::extract(uint_at(0), 23, 22) == 0b00, "must be ldr or str"); 585 586 return Instruction_aarch64::extract(uint_at(0), 23, 22) == 0b00; 587 } 588 }; 589 590 inline NativeLdSt *NativeLdSt_at(address addr) { 591 assert(nativeInstruction_at(addr)->is_Imm_LdSt(), "no immediate load/store found"); 592 return (NativeLdSt*)addr; 593 } 594 #endif // CPU_AARCH64_VM_NATIVEINST_AARCH64_HPP