1 /* 2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef CPU_SPARC_VM_NATIVEINST_SPARC_HPP 26 #define CPU_SPARC_VM_NATIVEINST_SPARC_HPP 27 28 #include "asm/macroAssembler.hpp" 29 #include "memory/allocation.hpp" 30 #include "runtime/icache.hpp" 31 #include "runtime/os.hpp" 32 33 // We have interface for the following instructions: 34 // - NativeInstruction 35 // - - NativeCall 36 // - - NativeFarCall 37 // - - NativeMovConstReg 38 // - - NativeMovConstRegPatching 39 // - - NativeMovRegMem 40 // - - NativeJump 41 // - - NativeGeneralJump 42 // - - NativeIllegalInstruction 43 // The base class for different kinds of native instruction abstractions. 44 // Provides the primitive operations to manipulate code relative to this. 45 class NativeInstruction VALUE_OBJ_CLASS_SPEC { 46 friend class Relocation; 47 48 public: 49 enum Sparc_specific_constants { 50 nop_instruction_size = 4 51 }; 52 53 bool is_nop() { return long_at(0) == nop_instruction(); } 54 bool is_call() { return is_op(long_at(0), Assembler::call_op); } 55 bool is_call_reg() { return is_op(long_at(0), Assembler::arith_op); } 56 bool is_sethi() { return (is_op2(long_at(0), Assembler::sethi_op2) 57 && inv_rd(long_at(0)) != G0); } 58 59 bool sets_cc() { 60 // conservative (returns true for some instructions that do not set the 61 // the condition code, such as, "save". 62 // Does not return true for the deprecated tagged instructions, such as, TADDcc 63 int x = long_at(0); 64 return (is_op(x, Assembler::arith_op) && 65 (inv_op3(x) & Assembler::cc_bit_op3) == Assembler::cc_bit_op3); 66 } 67 bool is_illegal(); 68 bool is_zombie() { 69 int x = long_at(0); 70 return is_op3(x, 71 Assembler::ldsw_op3, 72 Assembler::ldst_op) 73 && Assembler::inv_rs1(x) == G0 74 && Assembler::inv_rd(x) == O7; 75 } 76 bool is_ic_miss_trap(); // Inline-cache uses a trap to detect a miss 77 bool is_return() { 78 // is it the output of MacroAssembler::ret or MacroAssembler::retl? 79 int x = long_at(0); 80 const int pc_return_offset = 8; // see frame_sparc.hpp 81 return is_op3(x, Assembler::jmpl_op3, Assembler::arith_op) 82 && (inv_rs1(x) == I7 || inv_rs1(x) == O7) 83 && inv_immed(x) && inv_simm(x, 13) == pc_return_offset 84 && inv_rd(x) == G0; 85 } 86 bool is_int_jump() { 87 // is it the output of MacroAssembler::b? 88 int x = long_at(0); 89 return is_op2(x, Assembler::bp_op2) || is_op2(x, Assembler::br_op2); 90 } 91 bool is_float_jump() { 92 // is it the output of MacroAssembler::fb? 93 int x = long_at(0); 94 return is_op2(x, Assembler::fbp_op2) || is_op2(x, Assembler::fb_op2); 95 } 96 bool is_jump() { 97 return is_int_jump() || is_float_jump(); 98 } 99 bool is_cond_jump() { 100 int x = long_at(0); 101 return (is_int_jump() && Assembler::inv_cond(x) != Assembler::always) || 102 (is_float_jump() && Assembler::inv_cond(x) != Assembler::f_always); 103 } 104 105 bool is_stack_bang() { 106 int x = long_at(0); 107 return is_op3(x, Assembler::stw_op3, Assembler::ldst_op) && 108 (inv_rd(x) == G0) && (inv_rs1(x) == SP) && (inv_rs2(x) == G3_scratch); 109 } 110 111 bool is_prefetch() { 112 int x = long_at(0); 113 return is_op3(x, Assembler::prefetch_op3, Assembler::ldst_op); 114 } 115 116 bool is_membar() { 117 int x = long_at(0); 118 return is_op3(x, Assembler::membar_op3, Assembler::arith_op) && 119 (inv_rd(x) == G0) && (inv_rs1(x) == O7); 120 } 121 122 bool is_safepoint_poll() { 123 int x = long_at(0); 124 #ifdef _LP64 125 return is_op3(x, Assembler::ldx_op3, Assembler::ldst_op) && 126 #else 127 return is_op3(x, Assembler::lduw_op3, Assembler::ldst_op) && 128 #endif 129 (inv_rd(x) == G0) && (inv_immed(x) ? Assembler::inv_simm13(x) == 0 : inv_rs2(x) == G0); 130 } 131 132 bool is_zero_test(Register ®); 133 bool is_load_store_with_small_offset(Register reg); 134 135 public: 136 #ifdef ASSERT 137 static int rdpc_instruction() { return Assembler::op(Assembler::arith_op ) | Assembler::op3(Assembler::rdreg_op3) | Assembler::u_field(5, 18, 14) | Assembler::rd(O7); } 138 #else 139 // Temporary fix: in optimized mode, u_field is a macro for efficiency reasons (see Assembler::u_field) - needs to be fixed 140 static int rdpc_instruction() { return Assembler::op(Assembler::arith_op ) | Assembler::op3(Assembler::rdreg_op3) | u_field(5, 18, 14) | Assembler::rd(O7); } 141 #endif 142 static int nop_instruction() { return Assembler::op(Assembler::branch_op) | Assembler::op2(Assembler::sethi_op2); } 143 static int illegal_instruction(); // the output of __ breakpoint_trap() 144 static int call_instruction(address destination, address pc) { return Assembler::op(Assembler::call_op) | Assembler::wdisp((intptr_t)destination, (intptr_t)pc, 30); } 145 146 static int branch_instruction(Assembler::op2s op2val, Assembler::Condition c, bool a) { 147 return Assembler::op(Assembler::branch_op) | Assembler::op2(op2val) | Assembler::annul(a) | Assembler::cond(c); 148 } 149 150 static int op3_instruction(Assembler::ops opval, Register rd, Assembler::op3s op3val, Register rs1, int simm13a) { 151 return Assembler::op(opval) | Assembler::rd(rd) | Assembler::op3(op3val) | Assembler::rs1(rs1) | Assembler::immed(true) | Assembler::simm(simm13a, 13); 152 } 153 154 static int sethi_instruction(Register rd, int imm22a) { 155 return Assembler::op(Assembler::branch_op) | Assembler::rd(rd) | Assembler::op2(Assembler::sethi_op2) | Assembler::hi22(imm22a); 156 } 157 158 protected: 159 address addr_at(int offset) const { return address(this) + offset; } 160 int long_at(int offset) const { return *(int*)addr_at(offset); } 161 void set_long_at(int offset, int i); /* deals with I-cache */ 162 void set_jlong_at(int offset, jlong i); /* deals with I-cache */ 163 void set_addr_at(int offset, address x); /* deals with I-cache */ 164 165 address instruction_address() const { return addr_at(0); } 166 address next_instruction_address() const { return addr_at(BytesPerInstWord); } 167 168 static bool is_op( int x, Assembler::ops opval) { 169 return Assembler::inv_op(x) == opval; 170 } 171 static bool is_op2(int x, Assembler::op2s op2val) { 172 return Assembler::inv_op(x) == Assembler::branch_op && Assembler::inv_op2(x) == op2val; 173 } 174 static bool is_op3(int x, Assembler::op3s op3val, Assembler::ops opval) { 175 return Assembler::inv_op(x) == opval && Assembler::inv_op3(x) == op3val; 176 } 177 178 // utilities to help subclasses decode: 179 static Register inv_rd( int x ) { return Assembler::inv_rd( x); } 180 static Register inv_rs1( int x ) { return Assembler::inv_rs1(x); } 181 static Register inv_rs2( int x ) { return Assembler::inv_rs2(x); } 182 183 static bool inv_immed( int x ) { return Assembler::inv_immed(x); } 184 static bool inv_annul( int x ) { return (Assembler::annul(true) & x) != 0; } 185 static int inv_cond( int x ) { return Assembler::inv_cond(x); } 186 187 static int inv_op( int x ) { return Assembler::inv_op( x); } 188 static int inv_op2( int x ) { return Assembler::inv_op2(x); } 189 static int inv_op3( int x ) { return Assembler::inv_op3(x); } 190 191 static int inv_simm( int x, int nbits ) { return Assembler::inv_simm(x, nbits); } 192 static intptr_t inv_wdisp( int x, int nbits ) { return Assembler::inv_wdisp( x, 0, nbits); } 193 static intptr_t inv_wdisp16( int x ) { return Assembler::inv_wdisp16(x, 0); } 194 static int branch_destination_offset(int x) { return MacroAssembler::branch_destination(x, 0); } 195 static int patch_branch_destination_offset(int dest_offset, int x) { 196 return MacroAssembler::patched_branch(dest_offset, x, 0); 197 } 198 199 // utility for checking if x is either of 2 small constants 200 static bool is_either(int x, int k1, int k2) { 201 // return x == k1 || x == k2; 202 return (1 << x) & (1 << k1 | 1 << k2); 203 } 204 205 // utility for checking overflow of signed instruction fields 206 static bool fits_in_simm(int x, int nbits) { 207 // cf. Assembler::assert_signed_range() 208 // return -(1 << nbits-1) <= x && x < ( 1 << nbits-1), 209 return (unsigned)(x + (1 << nbits-1)) < (unsigned)(1 << nbits); 210 } 211 212 // set a signed immediate field 213 static int set_simm(int insn, int imm, int nbits) { 214 return (insn &~ Assembler::simm(-1, nbits)) | Assembler::simm(imm, nbits); 215 } 216 217 // set a wdisp field (disp should be the difference of two addresses) 218 static int set_wdisp(int insn, intptr_t disp, int nbits) { 219 return (insn &~ Assembler::wdisp((intptr_t)-4, (intptr_t)0, nbits)) | Assembler::wdisp(disp, 0, nbits); 220 } 221 222 static int set_wdisp16(int insn, intptr_t disp) { 223 return (insn &~ Assembler::wdisp16((intptr_t)-4, 0)) | Assembler::wdisp16(disp, 0); 224 } 225 226 // get a simm13 field from an arithmetic or memory instruction 227 static int get_simm13(int insn) { 228 assert(is_either(Assembler::inv_op(insn), 229 Assembler::arith_op, Assembler::ldst_op) && 230 (insn & Assembler::immed(true)), "must have a simm13 field"); 231 return Assembler::inv_simm(insn, 13); 232 } 233 234 // set the simm13 field of an arithmetic or memory instruction 235 static bool set_simm13(int insn, int imm) { 236 get_simm13(insn); // tickle the assertion check 237 return set_simm(insn, imm, 13); 238 } 239 240 // combine the fields of a sethi stream (7 instructions ) and an add, jmp or ld/st 241 static intptr_t data64( address pc, int arith_insn ) { 242 assert(is_op2(*(unsigned int *)pc, Assembler::sethi_op2), "must be sethi"); 243 intptr_t hi = (intptr_t)gethi( (unsigned int *)pc ); 244 intptr_t lo = (intptr_t)get_simm13(arith_insn); 245 assert((unsigned)lo < (1 << 10), "offset field of set_metadata must be 10 bits"); 246 return hi | lo; 247 } 248 249 // Regenerate the instruction sequence that performs the 64 bit 250 // sethi. This only does the sethi. The disp field (bottom 10 bits) 251 // must be handled separately. 252 static void set_data64_sethi(address instaddr, intptr_t x); 253 static void verify_data64_sethi(address instaddr, intptr_t x); 254 255 // combine the fields of a sethi/simm13 pair (simm13 = or, add, jmpl, ld/st) 256 static int data32(int sethi_insn, int arith_insn) { 257 assert(is_op2(sethi_insn, Assembler::sethi_op2), "must be sethi"); 258 int hi = Assembler::inv_hi22(sethi_insn); 259 int lo = get_simm13(arith_insn); 260 assert((unsigned)lo < (1 << 10), "offset field of set_metadata must be 10 bits"); 261 return hi | lo; 262 } 263 264 static int set_data32_sethi(int sethi_insn, int imm) { 265 // note that Assembler::hi22 clips the low 10 bits for us 266 assert(is_op2(sethi_insn, Assembler::sethi_op2), "must be sethi"); 267 return (sethi_insn &~ Assembler::hi22(-1)) | Assembler::hi22(imm); 268 } 269 270 static int set_data32_simm13(int arith_insn, int imm) { 271 get_simm13(arith_insn); // tickle the assertion check 272 int imm10 = Assembler::low10(imm); 273 return (arith_insn &~ Assembler::simm(-1, 13)) | Assembler::simm(imm10, 13); 274 } 275 276 static int low10(int imm) { 277 return Assembler::low10(imm); 278 } 279 280 // Perform the inverse of the LP64 Macroassembler::sethi 281 // routine. Extracts the 54 bits of address from the instruction 282 // stream. This routine must agree with the sethi routine in 283 // assembler_inline_sparc.hpp 284 static address gethi( unsigned int *pc ) { 285 int i = 0; 286 uintptr_t adr; 287 // We first start out with the real sethi instruction 288 assert(is_op2(*pc, Assembler::sethi_op2), "in gethi - must be sethi"); 289 adr = (unsigned int)Assembler::inv_hi22( *(pc++) ); 290 i++; 291 while ( i < 7 ) { 292 // We're done if we hit a nop 293 if ( (int)*pc == nop_instruction() ) break; 294 assert ( Assembler::inv_op(*pc) == Assembler::arith_op, "in gethi - must be arith_op" ); 295 switch ( Assembler::inv_op3(*pc) ) { 296 case Assembler::xor_op3: 297 adr ^= (intptr_t)get_simm13( *pc ); 298 return ( (address)adr ); 299 break; 300 case Assembler::sll_op3: 301 adr <<= ( *pc & 0x3f ); 302 break; 303 case Assembler::or_op3: 304 adr |= (intptr_t)get_simm13( *pc ); 305 break; 306 default: 307 assert ( 0, "in gethi - Should not reach here" ); 308 break; 309 } 310 pc++; 311 i++; 312 } 313 return ( (address)adr ); 314 } 315 316 public: 317 void verify(); 318 void print(); 319 320 // unit test stuff 321 static void test() {} // override for testing 322 323 inline friend NativeInstruction* nativeInstruction_at(address address); 324 }; 325 326 inline NativeInstruction* nativeInstruction_at(address address) { 327 NativeInstruction* inst = (NativeInstruction*)address; 328 #ifdef ASSERT 329 inst->verify(); 330 #endif 331 return inst; 332 } 333 334 335 336 //----------------------------------------------------------------------------- 337 338 // The NativeCall is an abstraction for accessing/manipulating native call imm32 instructions. 339 // (used to manipulate inline caches, primitive & dll calls, etc.) 340 inline NativeCall* nativeCall_at(address instr); 341 inline NativeCall* nativeCall_overwriting_at(address instr, 342 address destination); 343 inline NativeCall* nativeCall_before(address return_address); 344 class NativeCall: public NativeInstruction { 345 public: 346 enum Sparc_specific_constants { 347 instruction_size = 8, 348 return_address_offset = 8, 349 call_displacement_width = 30, 350 displacement_offset = 0, 351 instruction_offset = 0 352 }; 353 address instruction_address() const { return addr_at(0); } 354 address next_instruction_address() const { return addr_at(instruction_size); } 355 address return_address() const { return addr_at(return_address_offset); } 356 357 address destination() const { return inv_wdisp(long_at(0), call_displacement_width) + instruction_address(); } 358 address displacement_address() const { return addr_at(displacement_offset); } 359 void set_destination(address dest) { set_long_at(0, set_wdisp(long_at(0), dest - instruction_address(), call_displacement_width)); } 360 void set_destination_mt_safe(address dest); 361 362 void verify_alignment() {} // do nothing on sparc 363 void verify(); 364 void print(); 365 366 // unit test stuff 367 static void test(); 368 369 // Creation 370 friend inline NativeCall* nativeCall_at(address instr); 371 friend NativeCall* nativeCall_overwriting_at(address instr, address destination = NULL) { 372 // insert a "blank" call: 373 NativeCall* call = (NativeCall*)instr; 374 call->set_long_at(0 * BytesPerInstWord, call_instruction(destination, instr)); 375 call->set_long_at(1 * BytesPerInstWord, nop_instruction()); 376 assert(call->addr_at(2 * BytesPerInstWord) - instr == instruction_size, "instruction size"); 377 // check its structure now: 378 assert(nativeCall_at(instr)->destination() == destination, "correct call destination"); 379 return call; 380 } 381 382 friend inline NativeCall* nativeCall_before(address return_address) { 383 NativeCall* call = (NativeCall*)(return_address - return_address_offset); 384 #ifdef ASSERT 385 call->verify(); 386 #endif 387 return call; 388 } 389 390 static bool is_call_at(address instr) { 391 return nativeInstruction_at(instr)->is_call(); 392 } 393 394 static bool is_call_before(address instr) { 395 return nativeInstruction_at(instr - return_address_offset)->is_call(); 396 } 397 398 static bool is_call_to(address instr, address target) { 399 return nativeInstruction_at(instr)->is_call() && 400 nativeCall_at(instr)->destination() == target; 401 } 402 403 // MT-safe patching of a call instruction. 404 static void insert(address code_pos, address entry) { 405 (void)nativeCall_overwriting_at(code_pos, entry); 406 } 407 408 static void replace_mt_safe(address instr_addr, address code_buffer); 409 }; 410 inline NativeCall* nativeCall_at(address instr) { 411 NativeCall* call = (NativeCall*)instr; 412 #ifdef ASSERT 413 call->verify(); 414 #endif 415 return call; 416 } 417 418 class NativeCallReg: public NativeInstruction { 419 public: 420 enum Sparc_specific_constants { 421 instruction_size = 8, 422 return_address_offset = 8, 423 instruction_offset = 0 424 }; 425 426 address next_instruction_address() const { 427 return addr_at(instruction_size); 428 } 429 }; 430 431 // The NativeFarCall is an abstraction for accessing/manipulating native call-anywhere 432 // instructions in the sparcv9 vm. Used to call native methods which may be loaded 433 // anywhere in the address space, possibly out of reach of a call instruction. 434 435 #ifndef _LP64 436 437 // On 32-bit systems, a far call is the same as a near one. 438 class NativeFarCall; 439 inline NativeFarCall* nativeFarCall_at(address instr); 440 class NativeFarCall : public NativeCall { 441 public: 442 friend inline NativeFarCall* nativeFarCall_at(address instr) { return (NativeFarCall*)nativeCall_at(instr); } 443 friend NativeFarCall* nativeFarCall_overwriting_at(address instr, address destination = NULL) 444 { return (NativeFarCall*)nativeCall_overwriting_at(instr, destination); } 445 friend NativeFarCall* nativeFarCall_before(address return_address) 446 { return (NativeFarCall*)nativeCall_before(return_address); } 447 }; 448 449 #else 450 451 // The format of this extended-range call is: 452 // jumpl_to addr, lreg 453 // == sethi %hi54(addr), O7 ; jumpl O7, %lo10(addr), O7 ; <delay> 454 // That is, it is essentially the same as a NativeJump. 455 class NativeFarCall; 456 inline NativeFarCall* nativeFarCall_overwriting_at(address instr, address destination); 457 inline NativeFarCall* nativeFarCall_at(address instr); 458 class NativeFarCall: public NativeInstruction { 459 public: 460 enum Sparc_specific_constants { 461 // instruction_size includes the delay slot instruction. 462 instruction_size = 9 * BytesPerInstWord, 463 return_address_offset = 9 * BytesPerInstWord, 464 jmpl_offset = 7 * BytesPerInstWord, 465 displacement_offset = 0, 466 instruction_offset = 0 467 }; 468 address instruction_address() const { return addr_at(0); } 469 address next_instruction_address() const { return addr_at(instruction_size); } 470 address return_address() const { return addr_at(return_address_offset); } 471 472 address destination() const { 473 return (address) data64(addr_at(0), long_at(jmpl_offset)); 474 } 475 address displacement_address() const { return addr_at(displacement_offset); } 476 void set_destination(address dest); 477 478 bool destination_is_compiled_verified_entry_point(); 479 480 void verify(); 481 void print(); 482 483 // unit test stuff 484 static void test(); 485 486 // Creation 487 friend inline NativeFarCall* nativeFarCall_at(address instr) { 488 NativeFarCall* call = (NativeFarCall*)instr; 489 #ifdef ASSERT 490 call->verify(); 491 #endif 492 return call; 493 } 494 495 friend inline NativeFarCall* nativeFarCall_overwriting_at(address instr, address destination = NULL) { 496 Unimplemented(); 497 NativeFarCall* call = (NativeFarCall*)instr; 498 return call; 499 } 500 501 friend NativeFarCall* nativeFarCall_before(address return_address) { 502 NativeFarCall* call = (NativeFarCall*)(return_address - return_address_offset); 503 #ifdef ASSERT 504 call->verify(); 505 #endif 506 return call; 507 } 508 509 static bool is_call_at(address instr); 510 511 // MT-safe patching of a call instruction. 512 static void insert(address code_pos, address entry) { 513 (void)nativeFarCall_overwriting_at(code_pos, entry); 514 } 515 static void replace_mt_safe(address instr_addr, address code_buffer); 516 }; 517 518 #endif // _LP64 519 520 // An interface for accessing/manipulating 32 bit native set_metadata imm, reg instructions 521 // (used to manipulate inlined data references, etc.) 522 // set_metadata imm, reg 523 // == sethi %hi22(imm), reg ; add reg, %lo10(imm), reg 524 class NativeMovConstReg32; 525 inline NativeMovConstReg32* nativeMovConstReg32_at(address address); 526 class NativeMovConstReg32: public NativeInstruction { 527 public: 528 enum Sparc_specific_constants { 529 sethi_offset = 0, 530 add_offset = 4, 531 instruction_size = 8 532 }; 533 534 address instruction_address() const { return addr_at(0); } 535 address next_instruction_address() const { return addr_at(instruction_size); } 536 537 // (The [set_]data accessor respects oop_type relocs also.) 538 intptr_t data() const; 539 void set_data(intptr_t x); 540 541 // report the destination register 542 Register destination() { return inv_rd(long_at(sethi_offset)); } 543 544 void verify(); 545 void print(); 546 547 // unit test stuff 548 static void test(); 549 550 // Creation 551 friend inline NativeMovConstReg32* nativeMovConstReg32_at(address address) { 552 NativeMovConstReg32* test = (NativeMovConstReg32*)address; 553 #ifdef ASSERT 554 test->verify(); 555 #endif 556 return test; 557 } 558 }; 559 560 // An interface for accessing/manipulating native set_metadata imm, reg instructions. 561 // (used to manipulate inlined data references, etc.) 562 // set_metadata imm, reg 563 // == sethi %hi22(imm), reg ; add reg, %lo10(imm), reg 564 class NativeMovConstReg; 565 inline NativeMovConstReg* nativeMovConstReg_at(address address); 566 class NativeMovConstReg: public NativeInstruction { 567 public: 568 enum Sparc_specific_constants { 569 sethi_offset = 0, 570 #ifdef _LP64 571 add_offset = 7 * BytesPerInstWord, 572 instruction_size = 8 * BytesPerInstWord 573 #else 574 add_offset = 4, 575 instruction_size = 8 576 #endif 577 }; 578 579 address instruction_address() const { return addr_at(0); } 580 address next_instruction_address() const { return addr_at(instruction_size); } 581 582 // (The [set_]data accessor respects oop_type relocs also.) 583 intptr_t data() const; 584 void set_data(intptr_t x); 585 586 // report the destination register 587 Register destination() { return inv_rd(long_at(sethi_offset)); } 588 589 void verify(); 590 void print(); 591 592 // unit test stuff 593 static void test(); 594 595 // Creation 596 friend inline NativeMovConstReg* nativeMovConstReg_at(address address) { 597 NativeMovConstReg* test = (NativeMovConstReg*)address; 598 #ifdef ASSERT 599 test->verify(); 600 #endif 601 return test; 602 } 603 604 605 friend NativeMovConstReg* nativeMovConstReg_before(address address) { 606 NativeMovConstReg* test = (NativeMovConstReg*)(address - instruction_size); 607 #ifdef ASSERT 608 test->verify(); 609 #endif 610 return test; 611 } 612 613 }; 614 615 616 // An interface for accessing/manipulating native set_metadata imm, reg instructions. 617 // (used to manipulate inlined data references, etc.) 618 // set_metadata imm, reg 619 // == sethi %hi22(imm), reg; nop; add reg, %lo10(imm), reg 620 // 621 // Note that it is identical to NativeMovConstReg with the exception of a nop between the 622 // sethi and the add. The nop is required to be in the delay slot of the call instruction 623 // which overwrites the sethi during patching. 624 class NativeMovConstRegPatching; 625 inline NativeMovConstRegPatching* nativeMovConstRegPatching_at(address address);class NativeMovConstRegPatching: public NativeInstruction { 626 public: 627 enum Sparc_specific_constants { 628 sethi_offset = 0, 629 #ifdef _LP64 630 nop_offset = 7 * BytesPerInstWord, 631 #else 632 nop_offset = sethi_offset + BytesPerInstWord, 633 #endif 634 add_offset = nop_offset + BytesPerInstWord, 635 instruction_size = add_offset + BytesPerInstWord 636 }; 637 638 address instruction_address() const { return addr_at(0); } 639 address next_instruction_address() const { return addr_at(instruction_size); } 640 641 // (The [set_]data accessor respects oop_type relocs also.) 642 int data() const; 643 void set_data(int x); 644 645 // report the destination register 646 Register destination() { return inv_rd(long_at(sethi_offset)); } 647 648 void verify(); 649 void print(); 650 651 // unit test stuff 652 static void test(); 653 654 // Creation 655 friend inline NativeMovConstRegPatching* nativeMovConstRegPatching_at(address address) { 656 NativeMovConstRegPatching* test = (NativeMovConstRegPatching*)address; 657 #ifdef ASSERT 658 test->verify(); 659 #endif 660 return test; 661 } 662 663 664 friend NativeMovConstRegPatching* nativeMovConstRegPatching_before(address address) { 665 NativeMovConstRegPatching* test = (NativeMovConstRegPatching*)(address - instruction_size); 666 #ifdef ASSERT 667 test->verify(); 668 #endif 669 return test; 670 } 671 672 }; 673 674 675 // An interface for accessing/manipulating native memory ops 676 // ld* [reg + offset], reg 677 // st* reg, [reg + offset] 678 // sethi %hi(imm), reg; add reg, %lo(imm), reg; ld* [reg1 + reg], reg2 679 // sethi %hi(imm), reg; add reg, %lo(imm), reg; st* reg2, [reg1 + reg] 680 // Ops covered: {lds,ldu,st}{w,b,h}, {ld,st}{d,x} 681 // 682 class NativeMovRegMem; 683 inline NativeMovRegMem* nativeMovRegMem_at (address address); 684 class NativeMovRegMem: public NativeInstruction { 685 public: 686 enum Sparc_specific_constants { 687 op3_mask_ld = 1 << Assembler::lduw_op3 | 688 1 << Assembler::ldub_op3 | 689 1 << Assembler::lduh_op3 | 690 1 << Assembler::ldd_op3 | 691 1 << Assembler::ldsw_op3 | 692 1 << Assembler::ldsb_op3 | 693 1 << Assembler::ldsh_op3 | 694 1 << Assembler::ldx_op3, 695 op3_mask_st = 1 << Assembler::stw_op3 | 696 1 << Assembler::stb_op3 | 697 1 << Assembler::sth_op3 | 698 1 << Assembler::std_op3 | 699 1 << Assembler::stx_op3, 700 op3_ldst_int_limit = Assembler::ldf_op3, 701 op3_mask_ldf = 1 << (Assembler::ldf_op3 - op3_ldst_int_limit) | 702 1 << (Assembler::lddf_op3 - op3_ldst_int_limit), 703 op3_mask_stf = 1 << (Assembler::stf_op3 - op3_ldst_int_limit) | 704 1 << (Assembler::stdf_op3 - op3_ldst_int_limit), 705 706 offset_width = 13, 707 sethi_offset = 0, 708 #ifdef _LP64 709 add_offset = 7 * BytesPerInstWord, 710 #else 711 add_offset = 4, 712 #endif 713 ldst_offset = add_offset + BytesPerInstWord 714 }; 715 bool is_immediate() const { 716 // check if instruction is ld* [reg + offset], reg or st* reg, [reg + offset] 717 int i0 = long_at(0); 718 return (is_op(i0, Assembler::ldst_op)); 719 } 720 721 address instruction_address() const { return addr_at(0); } 722 address next_instruction_address() const { 723 #ifdef _LP64 724 return addr_at(is_immediate() ? 4 : (7 * BytesPerInstWord)); 725 #else 726 return addr_at(is_immediate() ? 4 : 12); 727 #endif 728 } 729 intptr_t offset() const { 730 return is_immediate()? inv_simm(long_at(0), offset_width) : 731 nativeMovConstReg_at(addr_at(0))->data(); 732 } 733 void set_offset(intptr_t x) { 734 if (is_immediate()) { 735 guarantee(fits_in_simm(x, offset_width), "data block offset overflow"); 736 set_long_at(0, set_simm(long_at(0), x, offset_width)); 737 } else 738 nativeMovConstReg_at(addr_at(0))->set_data(x); 739 } 740 741 void add_offset_in_bytes(intptr_t radd_offset) { 742 set_offset (offset() + radd_offset); 743 } 744 745 void copy_instruction_to(address new_instruction_address); 746 747 void verify(); 748 void print (); 749 750 // unit test stuff 751 static void test(); 752 753 private: 754 friend inline NativeMovRegMem* nativeMovRegMem_at (address address) { 755 NativeMovRegMem* test = (NativeMovRegMem*)address; 756 #ifdef ASSERT 757 test->verify(); 758 #endif 759 return test; 760 } 761 }; 762 763 764 // An interface for accessing/manipulating native jumps 765 // jump_to addr 766 // == sethi %hi22(addr), temp ; jumpl reg, %lo10(addr), G0 ; <delay> 767 // jumpl_to addr, lreg 768 // == sethi %hi22(addr), temp ; jumpl reg, %lo10(addr), lreg ; <delay> 769 class NativeJump; 770 inline NativeJump* nativeJump_at(address address); 771 class NativeJump: public NativeInstruction { 772 private: 773 void guarantee_displacement(int disp, int width) { 774 guarantee(fits_in_simm(disp, width + 2), "branch displacement overflow"); 775 } 776 777 public: 778 enum Sparc_specific_constants { 779 sethi_offset = 0, 780 #ifdef _LP64 781 jmpl_offset = 7 * BytesPerInstWord, 782 instruction_size = 9 * BytesPerInstWord // includes delay slot 783 #else 784 jmpl_offset = 1 * BytesPerInstWord, 785 instruction_size = 3 * BytesPerInstWord // includes delay slot 786 #endif 787 }; 788 789 address instruction_address() const { return addr_at(0); } 790 address next_instruction_address() const { return addr_at(instruction_size); } 791 792 #ifdef _LP64 793 address jump_destination() const { 794 return (address) data64(instruction_address(), long_at(jmpl_offset)); 795 } 796 void set_jump_destination(address dest) { 797 set_data64_sethi( instruction_address(), (intptr_t)dest); 798 set_long_at(jmpl_offset, set_data32_simm13( long_at(jmpl_offset), (intptr_t)dest)); 799 } 800 #else 801 address jump_destination() const { 802 return (address) data32(long_at(sethi_offset), long_at(jmpl_offset)); 803 } 804 void set_jump_destination(address dest) { 805 set_long_at(sethi_offset, set_data32_sethi( long_at(sethi_offset), (intptr_t)dest)); 806 set_long_at(jmpl_offset, set_data32_simm13( long_at(jmpl_offset), (intptr_t)dest)); 807 } 808 #endif 809 810 // Creation 811 friend inline NativeJump* nativeJump_at(address address) { 812 NativeJump* jump = (NativeJump*)address; 813 #ifdef ASSERT 814 jump->verify(); 815 #endif 816 return jump; 817 } 818 819 void verify(); 820 void print(); 821 822 // Unit testing stuff 823 static void test(); 824 825 // Insertion of native jump instruction 826 static void insert(address code_pos, address entry); 827 // MT-safe insertion of native jump at verified method entry 828 static void check_verified_entry_alignment(address entry, address verified_entry) { 829 // nothing to do for sparc. 830 } 831 static void patch_verified_entry(address entry, address verified_entry, address dest); 832 }; 833 834 835 836 // Despite the name, handles only simple branches. 837 class NativeGeneralJump; 838 inline NativeGeneralJump* nativeGeneralJump_at(address address); 839 class NativeGeneralJump: public NativeInstruction { 840 public: 841 enum Sparc_specific_constants { 842 instruction_size = 8 843 }; 844 845 address instruction_address() const { return addr_at(0); } 846 address jump_destination() const { return addr_at(0) + branch_destination_offset(long_at(0)); } 847 void set_jump_destination(address dest) { 848 int patched_instr = patch_branch_destination_offset(dest - addr_at(0), long_at(0)); 849 set_long_at(0, patched_instr); 850 } 851 NativeInstruction *delay_slot_instr() { return nativeInstruction_at(addr_at(4));} 852 void fill_delay_slot(int instr) { set_long_at(4, instr);} 853 Assembler::Condition condition() { 854 int x = long_at(0); 855 return (Assembler::Condition) Assembler::inv_cond(x); 856 } 857 858 // Creation 859 friend inline NativeGeneralJump* nativeGeneralJump_at(address address) { 860 NativeGeneralJump* jump = (NativeGeneralJump*)(address); 861 #ifdef ASSERT 862 jump->verify(); 863 #endif 864 return jump; 865 } 866 867 // Insertion of native general jump instruction 868 static void insert_unconditional(address code_pos, address entry); 869 static void replace_mt_safe(address instr_addr, address code_buffer); 870 871 void verify(); 872 }; 873 874 875 class NativeIllegalInstruction: public NativeInstruction { 876 public: 877 enum Sparc_specific_constants { 878 instruction_size = 4 879 }; 880 881 // Insert illegal opcode as specific address 882 static void insert(address code_pos); 883 }; 884 885 #endif // CPU_SPARC_VM_NATIVEINST_SPARC_HPP