1 /* 2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2014, Red Hat Inc. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #ifndef CPU_AARCH64_VM_NATIVEINST_AARCH64_HPP 27 #define CPU_AARCH64_VM_NATIVEINST_AARCH64_HPP 28 29 #include "asm/assembler.hpp" 30 #include "memory/allocation.hpp" 31 #include "runtime/icache.hpp" 32 #include "runtime/os.hpp" 33 #include "utilities/top.hpp" 34 35 // We have interfaces for the following instructions: 36 // - NativeInstruction 37 // - - NativeCall 38 // - - NativeMovConstReg 39 // - - NativeMovConstRegPatching 40 // - - NativeMovRegMem 41 // - - NativeMovRegMemPatching 42 // - - NativeJump 43 // - - NativeIllegalOpCode 44 // - - NativeGeneralJump 45 // - - NativeReturn 46 // - - NativeReturnX (return with argument) 47 // - - NativePushConst 48 // - - NativeTstRegMem 49 50 // The base class for different kinds of native instruction abstractions. 51 // Provides the primitive operations to manipulate code relative to this. 52 53 class NativeInstruction VALUE_OBJ_CLASS_SPEC { 54 friend class Relocation; 55 friend bool is_NativeCallTrampolineStub_at(address); 56 public: 57 enum { instruction_size = 4 }; 58 inline bool is_nop(); 59 inline bool is_illegal(); 60 inline bool is_return(); 61 bool is_jump(); 62 inline bool is_jump_or_nop(); 63 inline bool is_cond_jump(); 64 bool is_safepoint_poll(); 65 inline bool is_mov_literal64(); 66 bool is_movz(); 67 bool is_movk(); 68 bool is_sigill_zombie_not_entrant(); 69 70 protected: 71 address addr_at(int offset) const { return address(this) + offset; } 72 73 s_char sbyte_at(int offset) const { return *(s_char*) addr_at(offset); } 74 u_char ubyte_at(int offset) const { return *(u_char*) addr_at(offset); } 75 76 jint int_at(int offset) const { return *(jint*) addr_at(offset); } 77 juint uint_at(int offset) const { return *(juint*) addr_at(offset); } 78 79 address ptr_at(int offset) const { return *(address*) addr_at(offset); } 80 81 oop oop_at (int offset) const { return *(oop*) addr_at(offset); } 82 83 84 void set_char_at(int offset, char c) { *addr_at(offset) = (u_char)c; } 85 void set_int_at(int offset, jint i) { *(jint*)addr_at(offset) = i; } 86 void set_uint_at(int offset, jint i) { *(juint*)addr_at(offset) = i; } 87 void set_ptr_at (int offset, address ptr) { *(address*) addr_at(offset) = ptr; } 88 void set_oop_at (int offset, oop o) { *(oop*) addr_at(offset) = o; } 89 90 public: 91 92 // unit test stuff 93 static void test() {} // override for testing 94 95 inline friend NativeInstruction* nativeInstruction_at(address address); 96 97 static bool is_adrp_at(address instr); 98 static bool is_ldr_literal_at(address instr); 99 static bool is_ldrw_to_zr(address instr); 100 101 static bool maybe_cpool_ref(address instr) { 102 return is_adrp_at(instr) || is_ldr_literal_at(instr); 103 } 104 }; 105 106 inline NativeInstruction* nativeInstruction_at(address address) { 107 return (NativeInstruction*)address; 108 } 109 110 // The natural type of an AArch64 instruction is uint32_t 111 inline NativeInstruction* nativeInstruction_at(uint32_t *address) { 112 return (NativeInstruction*)address; 113 } 114 115 inline NativeCall* nativeCall_at(address address); 116 // The NativeCall is an abstraction for accessing/manipulating native call imm32/rel32off 117 // instructions (used to manipulate inline caches, primitive & dll calls, etc.). 118 119 class NativeCall: public NativeInstruction { 120 public: 121 enum Aarch64_specific_constants { 122 instruction_size = 4, 123 instruction_offset = 0, 124 displacement_offset = 0, 125 return_address_offset = 4 126 }; 127 128 enum { cache_line_size = BytesPerWord }; // conservative estimate! 129 address instruction_address() const { return addr_at(instruction_offset); } 130 address next_instruction_address() const { return addr_at(return_address_offset); } 131 int displacement() const { return (int_at(displacement_offset) << 6) >> 4; } 132 address displacement_address() const { return addr_at(displacement_offset); } 133 address return_address() const { return addr_at(return_address_offset); } 134 address destination() const; 135 136 void set_destination(address dest) { 137 int offset = dest - instruction_address(); 138 unsigned int insn = 0b100101 << 26; 139 assert((offset & 3) == 0, "should be"); 140 offset >>= 2; 141 offset &= (1 << 26) - 1; // mask off insn part 142 insn |= offset; 143 set_int_at(displacement_offset, insn); 144 } 145 146 void verify_alignment() { ; } 147 void verify(); 148 void print(); 149 150 // Creation 151 inline friend NativeCall* nativeCall_at(address address); 152 inline friend NativeCall* nativeCall_before(address return_address); 153 154 static bool is_call_at(address instr) { 155 const uint32_t insn = (*(uint32_t*)instr); 156 return (insn >> 26) == 0b100101; 157 } 158 159 static bool is_call_before(address return_address) { 160 return is_call_at(return_address - NativeCall::return_address_offset); 161 } 162 163 // MT-safe patching of a call instruction. 164 static void insert(address code_pos, address entry); 165 166 static void replace_mt_safe(address instr_addr, address code_buffer); 167 168 // Similar to replace_mt_safe, but just changes the destination. The 169 // important thing is that free-running threads are able to execute 170 // this call instruction at all times. If the call is an immediate BL 171 // instruction we can simply rely on atomicity of 32-bit writes to 172 // make sure other threads will see no intermediate states. 173 174 // We cannot rely on locks here, since the free-running threads must run at 175 // full speed. 176 // 177 // Used in the runtime linkage of calls; see class CompiledIC. 178 // (Cf. 4506997 and 4479829, where threads witnessed garbage displacements.) 179 180 // The parameter assert_lock disables the assertion during code generation. 181 void set_destination_mt_safe(address dest, bool assert_lock = true); 182 183 address get_trampoline(); 184 }; 185 186 inline NativeCall* nativeCall_at(address address) { 187 NativeCall* call = (NativeCall*)(address - NativeCall::instruction_offset); 188 #ifdef ASSERT 189 call->verify(); 190 #endif 191 return call; 192 } 193 194 inline NativeCall* nativeCall_before(address return_address) { 195 NativeCall* call = (NativeCall*)(return_address - NativeCall::return_address_offset); 196 #ifdef ASSERT 197 call->verify(); 198 #endif 199 return call; 200 } 201 202 // An interface for accessing/manipulating native mov reg, imm instructions. 203 // (used to manipulate inlined 64-bit data calls, etc.) 204 class NativeMovConstReg: public NativeInstruction { 205 public: 206 enum Aarch64_specific_constants { 207 instruction_size = 3 * 4, // movz, movk, movk. See movptr(). 208 instruction_offset = 0, 209 displacement_offset = 0, 210 }; 211 212 address instruction_address() const { return addr_at(instruction_offset); } 213 address next_instruction_address() const { 214 if (nativeInstruction_at(instruction_address())->is_movz()) 215 // Assume movz, movk, movk 216 return addr_at(instruction_size); 217 else if (is_adrp_at(instruction_address())) 218 return addr_at(2*4); 219 else if (is_ldr_literal_at(instruction_address())) 220 return(addr_at(4)); 221 assert(false, "Unknown instruction in NativeMovConstReg"); 222 return NULL; 223 } 224 225 intptr_t data() const; 226 void set_data(intptr_t x); 227 228 void flush() { 229 if (! maybe_cpool_ref(instruction_address())) { 230 ICache::invalidate_range(instruction_address(), instruction_size); 231 } 232 } 233 234 void verify(); 235 void print(); 236 237 // unit test stuff 238 static void test() {} 239 240 // Creation 241 inline friend NativeMovConstReg* nativeMovConstReg_at(address address); 242 inline friend NativeMovConstReg* nativeMovConstReg_before(address address); 243 }; 244 245 inline NativeMovConstReg* nativeMovConstReg_at(address address) { 246 NativeMovConstReg* test = (NativeMovConstReg*)(address - NativeMovConstReg::instruction_offset); 247 #ifdef ASSERT 248 test->verify(); 249 #endif 250 return test; 251 } 252 253 inline NativeMovConstReg* nativeMovConstReg_before(address address) { 254 NativeMovConstReg* test = (NativeMovConstReg*)(address - NativeMovConstReg::instruction_size - NativeMovConstReg::instruction_offset); 255 #ifdef ASSERT 256 test->verify(); 257 #endif 258 return test; 259 } 260 261 class NativeMovConstRegPatching: public NativeMovConstReg { 262 private: 263 friend NativeMovConstRegPatching* nativeMovConstRegPatching_at(address address) { 264 NativeMovConstRegPatching* test = (NativeMovConstRegPatching*)(address - instruction_offset); 265 #ifdef ASSERT 266 test->verify(); 267 #endif 268 return test; 269 } 270 }; 271 272 // An interface for accessing/manipulating native moves of the form: 273 // mov[b/w/l/q] [reg + offset], reg (instruction_code_reg2mem) 274 // mov[b/w/l/q] reg, [reg+offset] (instruction_code_mem2reg 275 // mov[s/z]x[w/b/q] [reg + offset], reg 276 // fld_s [reg+offset] 277 // fld_d [reg+offset] 278 // fstp_s [reg + offset] 279 // fstp_d [reg + offset] 280 // mov_literal64 scratch,<pointer> ; mov[b/w/l/q] 0(scratch),reg | mov[b/w/l/q] reg,0(scratch) 281 // 282 // Warning: These routines must be able to handle any instruction sequences 283 // that are generated as a result of the load/store byte,word,long 284 // macros. For example: The load_unsigned_byte instruction generates 285 // an xor reg,reg inst prior to generating the movb instruction. This 286 // class must skip the xor instruction. 287 288 class NativeMovRegMem: public NativeInstruction { 289 enum AArch64_specific_constants { 290 instruction_size = 4, 291 instruction_offset = 0, 292 data_offset = 0, 293 next_instruction_offset = 4 294 }; 295 296 public: 297 // helper 298 int instruction_start() const; 299 300 address instruction_address() const; 301 302 address next_instruction_address() const; 303 304 int offset() const; 305 306 void set_offset(int x); 307 308 void add_offset_in_bytes(int add_offset) { set_offset ( ( offset() + add_offset ) ); } 309 310 void verify(); 311 void print (); 312 313 // unit test stuff 314 static void test() {} 315 316 private: 317 inline friend NativeMovRegMem* nativeMovRegMem_at (address address); 318 }; 319 320 inline NativeMovRegMem* nativeMovRegMem_at (address address) { 321 NativeMovRegMem* test = (NativeMovRegMem*)(address - NativeMovRegMem::instruction_offset); 322 #ifdef ASSERT 323 test->verify(); 324 #endif 325 return test; 326 } 327 328 class NativeMovRegMemPatching: public NativeMovRegMem { 329 private: 330 friend NativeMovRegMemPatching* nativeMovRegMemPatching_at (address address) {Unimplemented(); return 0; } 331 }; 332 333 // An interface for accessing/manipulating native leal instruction of form: 334 // leal reg, [reg + offset] 335 336 class NativeLoadAddress: public NativeMovRegMem { 337 static const bool has_rex = true; 338 static const int rex_size = 1; 339 public: 340 341 void verify(); 342 void print (); 343 344 // unit test stuff 345 static void test() {} 346 }; 347 348 class NativeJump: public NativeInstruction { 349 public: 350 enum AArch64_specific_constants { 351 instruction_size = 4, 352 instruction_offset = 0, 353 data_offset = 0, 354 next_instruction_offset = 4 355 }; 356 357 address instruction_address() const { return addr_at(instruction_offset); } 358 address next_instruction_address() const { return addr_at(instruction_size); } 359 address jump_destination() const; 360 void set_jump_destination(address dest); 361 362 // Creation 363 inline friend NativeJump* nativeJump_at(address address); 364 365 void verify(); 366 367 // Unit testing stuff 368 static void test() {} 369 370 // Insertion of native jump instruction 371 static void insert(address code_pos, address entry); 372 // MT-safe insertion of native jump at verified method entry 373 static void check_verified_entry_alignment(address entry, address verified_entry); 374 static void patch_verified_entry(address entry, address verified_entry, address dest); 375 }; 376 377 inline NativeJump* nativeJump_at(address address) { 378 NativeJump* jump = (NativeJump*)(address - NativeJump::instruction_offset); 379 #ifdef ASSERT 380 jump->verify(); 381 #endif 382 return jump; 383 } 384 385 class NativeGeneralJump: public NativeJump { 386 public: 387 enum AArch64_specific_constants { 388 instruction_size = 4 * 4, 389 instruction_offset = 0, 390 data_offset = 0, 391 next_instruction_offset = 4 * 4 392 }; 393 static void insert_unconditional(address code_pos, address entry); 394 static void replace_mt_safe(address instr_addr, address code_buffer); 395 static void verify(); 396 }; 397 398 inline NativeGeneralJump* nativeGeneralJump_at(address address) { 399 NativeGeneralJump* jump = (NativeGeneralJump*)(address); 400 debug_only(jump->verify();) 401 return jump; 402 } 403 404 class NativePopReg : public NativeInstruction { 405 public: 406 // Insert a pop instruction 407 static void insert(address code_pos, Register reg); 408 }; 409 410 411 class NativeIllegalInstruction: public NativeInstruction { 412 public: 413 // Insert illegal opcode as specific address 414 static void insert(address code_pos); 415 }; 416 417 // return instruction that does not pop values of the stack 418 class NativeReturn: public NativeInstruction { 419 public: 420 }; 421 422 // return instruction that does pop values of the stack 423 class NativeReturnX: public NativeInstruction { 424 public: 425 }; 426 427 // Simple test vs memory 428 class NativeTstRegMem: public NativeInstruction { 429 public: 430 }; 431 432 inline bool NativeInstruction::is_nop() { 433 uint32_t insn = *(uint32_t*)addr_at(0); 434 return insn == 0xd503201f; 435 } 436 437 inline bool NativeInstruction::is_jump() { 438 uint32_t insn = *(uint32_t*)addr_at(0); 439 440 if (Instruction_aarch64::extract(insn, 30, 26) == 0b00101) { 441 // Unconditional branch (immediate) 442 return true; 443 } else if (Instruction_aarch64::extract(insn, 31, 25) == 0b0101010) { 444 // Conditional branch (immediate) 445 return true; 446 } else if (Instruction_aarch64::extract(insn, 30, 25) == 0b011010) { 447 // Compare & branch (immediate) 448 return true; 449 } else if (Instruction_aarch64::extract(insn, 30, 25) == 0b011011) { 450 // Test & branch (immediate) 451 return true; 452 } else 453 return false; 454 } 455 456 inline bool NativeInstruction::is_jump_or_nop() { 457 return is_nop() || is_jump(); 458 } 459 460 // Call trampoline stubs. 461 class NativeCallTrampolineStub : public NativeInstruction { 462 public: 463 464 enum AArch64_specific_constants { 465 instruction_size = 4 * 4, 466 instruction_offset = 0, 467 data_offset = 2 * 4, 468 next_instruction_offset = 4 * 4 469 }; 470 471 address destination(nmethod *nm = NULL) const; 472 void set_destination(address new_destination); 473 ptrdiff_t destination_offset() const; 474 }; 475 476 inline bool is_NativeCallTrampolineStub_at(address addr) { 477 // Ensure that the stub is exactly 478 // ldr xscratch1, L 479 // br xscratch1 480 // L: 481 uint32_t *i = (uint32_t *)addr; 482 return i[0] == 0x58000048 && i[1] == 0xd61f0100; 483 } 484 485 inline NativeCallTrampolineStub* nativeCallTrampolineStub_at(address addr) { 486 assert(is_NativeCallTrampolineStub_at(addr), "no call trampoline found"); 487 return (NativeCallTrampolineStub*)addr; 488 } 489 490 #endif // CPU_AARCH64_VM_NATIVEINST_AARCH64_HPP