1 /* 2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef CPU_SPARC_VM_NATIVEINST_SPARC_HPP 26 #define CPU_SPARC_VM_NATIVEINST_SPARC_HPP 27 28 #include "asm/assembler.hpp" 29 #include "memory/allocation.hpp" 30 #include "runtime/icache.hpp" 31 #include "runtime/os.hpp" 32 #include "utilities/top.hpp" 33 34 // We have interface for the following instructions: 35 // - NativeInstruction 36 // - - NativeCall 37 // - - NativeFarCall 38 // - - NativeMovConstReg 39 // - - NativeMovConstRegPatching 40 // - - NativeMovRegMem 41 // - - NativeMovRegMemPatching 42 // - - NativeJump 43 // - - NativeGeneralJump 44 // - - NativeIllegalInstruction 45 // The base class for different kinds of native instruction abstractions. 46 // Provides the primitive operations to manipulate code relative to this. 47 class NativeInstruction VALUE_OBJ_CLASS_SPEC { 48 friend class Relocation; 49 50 public: 51 enum Sparc_specific_constants { 52 nop_instruction_size = 4 53 }; 54 55 bool is_dtrace_trap(); 56 bool is_nop() { return long_at(0) == nop_instruction(); } 57 bool is_call() { return is_op(long_at(0), Assembler::call_op); } 58 bool is_sethi() { return (is_op2(long_at(0), Assembler::sethi_op2) 59 && inv_rd(long_at(0)) != G0); } 60 61 bool sets_cc() { 62 // conservative (returns true for some instructions that do not set the 63 // the condition code, such as, "save". 64 // Does not return true for the deprecated tagged instructions, such as, TADDcc 65 int x = long_at(0); 66 return (is_op(x, Assembler::arith_op) && 67 (inv_op3(x) & Assembler::cc_bit_op3) == Assembler::cc_bit_op3); 68 } 69 bool is_illegal(); 70 bool is_zombie() { 71 int x = long_at(0); 72 return is_op3(x, 73 VM_Version::v9_instructions_work() ? 74 Assembler::ldsw_op3 : Assembler::lduw_op3, 75 Assembler::ldst_op) 76 && Assembler::inv_rs1(x) == G0 77 && Assembler::inv_rd(x) == O7; 78 } 79 bool is_ic_miss_trap(); // Inline-cache uses a trap to detect a miss 80 bool is_return() { 81 // is it the output of MacroAssembler::ret or MacroAssembler::retl? 82 int x = long_at(0); 83 const int pc_return_offset = 8; // see frame_sparc.hpp 84 return is_op3(x, Assembler::jmpl_op3, Assembler::arith_op) 85 && (inv_rs1(x) == I7 || inv_rs1(x) == O7) 86 && inv_immed(x) && inv_simm(x, 13) == pc_return_offset 87 && inv_rd(x) == G0; 88 } 89 bool is_int_jump() { 90 // is it the output of MacroAssembler::b? 91 int x = long_at(0); 92 return is_op2(x, Assembler::bp_op2) || is_op2(x, Assembler::br_op2); 93 } 94 bool is_float_jump() { 95 // is it the output of MacroAssembler::fb? 96 int x = long_at(0); 97 return is_op2(x, Assembler::fbp_op2) || is_op2(x, Assembler::fb_op2); 98 } 99 bool is_jump() { 100 return is_int_jump() || is_float_jump(); 101 } 102 bool is_cond_jump() { 103 int x = long_at(0); 104 return (is_int_jump() && Assembler::inv_cond(x) != Assembler::always) || 105 (is_float_jump() && Assembler::inv_cond(x) != Assembler::f_always); 106 } 107 108 bool is_stack_bang() { 109 int x = long_at(0); 110 return is_op3(x, Assembler::stw_op3, Assembler::ldst_op) && 111 (inv_rd(x) == G0) && (inv_rs1(x) == SP) && (inv_rs2(x) == G3_scratch); 112 } 113 114 bool is_prefetch() { 115 int x = long_at(0); 116 return is_op3(x, Assembler::prefetch_op3, Assembler::ldst_op); 117 } 118 119 bool is_membar() { 120 int x = long_at(0); 121 return is_op3(x, Assembler::membar_op3, Assembler::arith_op) && 122 (inv_rd(x) == G0) && (inv_rs1(x) == O7); 123 } 124 125 bool is_safepoint_poll() { 126 int x = long_at(0); 127 #ifdef _LP64 128 return is_op3(x, Assembler::ldx_op3, Assembler::ldst_op) && 129 #else 130 return is_op3(x, Assembler::lduw_op3, Assembler::ldst_op) && 131 #endif 132 (inv_rd(x) == G0) && (inv_immed(x) ? Assembler::inv_simm13(x) == 0 : inv_rs2(x) == G0); 133 } 134 135 bool is_zero_test(Register ®); 136 bool is_load_store_with_small_offset(Register reg); 137 138 public: 139 #ifdef ASSERT 140 static int rdpc_instruction() { return Assembler::op(Assembler::arith_op ) | Assembler::op3(Assembler::rdreg_op3) | Assembler::u_field(5, 18, 14) | Assembler::rd(O7); } 141 #else 142 // Temporary fix: in optimized mode, u_field is a macro for efficiency reasons (see Assembler::u_field) - needs to be fixed 143 static int rdpc_instruction() { return Assembler::op(Assembler::arith_op ) | Assembler::op3(Assembler::rdreg_op3) | u_field(5, 18, 14) | Assembler::rd(O7); } 144 #endif 145 static int nop_instruction() { return Assembler::op(Assembler::branch_op) | Assembler::op2(Assembler::sethi_op2); } 146 static int illegal_instruction(); // the output of __ breakpoint_trap() 147 static int call_instruction(address destination, address pc) { return Assembler::op(Assembler::call_op) | Assembler::wdisp((intptr_t)destination, (intptr_t)pc, 30); } 148 149 static int branch_instruction(Assembler::op2s op2val, Assembler::Condition c, bool a) { 150 return Assembler::op(Assembler::branch_op) | Assembler::op2(op2val) | Assembler::annul(a) | Assembler::cond(c); 151 } 152 153 static int op3_instruction(Assembler::ops opval, Register rd, Assembler::op3s op3val, Register rs1, int simm13a) { 154 return Assembler::op(opval) | Assembler::rd(rd) | Assembler::op3(op3val) | Assembler::rs1(rs1) | Assembler::immed(true) | Assembler::simm(simm13a, 13); 155 } 156 157 static int sethi_instruction(Register rd, int imm22a) { 158 return Assembler::op(Assembler::branch_op) | Assembler::rd(rd) | Assembler::op2(Assembler::sethi_op2) | Assembler::hi22(imm22a); 159 } 160 161 protected: 162 address addr_at(int offset) const { return address(this) + offset; } 163 int long_at(int offset) const { return *(int*)addr_at(offset); } 164 void set_long_at(int offset, int i); /* deals with I-cache */ 165 void set_jlong_at(int offset, jlong i); /* deals with I-cache */ 166 void set_addr_at(int offset, address x); /* deals with I-cache */ 167 168 address instruction_address() const { return addr_at(0); } 169 address next_instruction_address() const { return addr_at(BytesPerInstWord); } 170 171 static bool is_op( int x, Assembler::ops opval) { 172 return Assembler::inv_op(x) == opval; 173 } 174 static bool is_op2(int x, Assembler::op2s op2val) { 175 return Assembler::inv_op(x) == Assembler::branch_op && Assembler::inv_op2(x) == op2val; 176 } 177 static bool is_op3(int x, Assembler::op3s op3val, Assembler::ops opval) { 178 return Assembler::inv_op(x) == opval && Assembler::inv_op3(x) == op3val; 179 } 180 181 // utilities to help subclasses decode: 182 static Register inv_rd( int x ) { return Assembler::inv_rd( x); } 183 static Register inv_rs1( int x ) { return Assembler::inv_rs1(x); } 184 static Register inv_rs2( int x ) { return Assembler::inv_rs2(x); } 185 186 static bool inv_immed( int x ) { return Assembler::inv_immed(x); } 187 static bool inv_annul( int x ) { return (Assembler::annul(true) & x) != 0; } 188 static int inv_cond( int x ) { return Assembler::inv_cond(x); } 189 190 static int inv_op( int x ) { return Assembler::inv_op( x); } 191 static int inv_op2( int x ) { return Assembler::inv_op2(x); } 192 static int inv_op3( int x ) { return Assembler::inv_op3(x); } 193 194 static int inv_simm( int x, int nbits ) { return Assembler::inv_simm(x, nbits); } 195 static intptr_t inv_wdisp( int x, int nbits ) { return Assembler::inv_wdisp( x, 0, nbits); } 196 static intptr_t inv_wdisp16( int x ) { return Assembler::inv_wdisp16(x, 0); } 197 static int branch_destination_offset(int x) { return Assembler::branch_destination(x, 0); } 198 static int patch_branch_destination_offset(int dest_offset, int x) { 199 return Assembler::patched_branch(dest_offset, x, 0); 200 } 201 void set_annul_bit() { set_long_at(0, long_at(0) | Assembler::annul(true)); } 202 203 // utility for checking if x is either of 2 small constants 204 static bool is_either(int x, int k1, int k2) { 205 // return x == k1 || x == k2; 206 return (1 << x) & (1 << k1 | 1 << k2); 207 } 208 209 // utility for checking overflow of signed instruction fields 210 static bool fits_in_simm(int x, int nbits) { 211 // cf. Assembler::assert_signed_range() 212 // return -(1 << nbits-1) <= x && x < ( 1 << nbits-1), 213 return (unsigned)(x + (1 << nbits-1)) < (unsigned)(1 << nbits); 214 } 215 216 // set a signed immediate field 217 static int set_simm(int insn, int imm, int nbits) { 218 return (insn &~ Assembler::simm(-1, nbits)) | Assembler::simm(imm, nbits); 219 } 220 221 // set a wdisp field (disp should be the difference of two addresses) 222 static int set_wdisp(int insn, intptr_t disp, int nbits) { 223 return (insn &~ Assembler::wdisp((intptr_t)-4, (intptr_t)0, nbits)) | Assembler::wdisp(disp, 0, nbits); 224 } 225 226 static int set_wdisp16(int insn, intptr_t disp) { 227 return (insn &~ Assembler::wdisp16((intptr_t)-4, 0)) | Assembler::wdisp16(disp, 0); 228 } 229 230 // get a simm13 field from an arithmetic or memory instruction 231 static int get_simm13(int insn) { 232 assert(is_either(Assembler::inv_op(insn), 233 Assembler::arith_op, Assembler::ldst_op) && 234 (insn & Assembler::immed(true)), "must have a simm13 field"); 235 return Assembler::inv_simm(insn, 13); 236 } 237 238 // set the simm13 field of an arithmetic or memory instruction 239 static bool set_simm13(int insn, int imm) { 240 get_simm13(insn); // tickle the assertion check 241 return set_simm(insn, imm, 13); 242 } 243 244 // combine the fields of a sethi stream (7 instructions ) and an add, jmp or ld/st 245 static intptr_t data64( address pc, int arith_insn ) { 246 assert(is_op2(*(unsigned int *)pc, Assembler::sethi_op2), "must be sethi"); 247 intptr_t hi = (intptr_t)gethi( (unsigned int *)pc ); 248 intptr_t lo = (intptr_t)get_simm13(arith_insn); 249 assert((unsigned)lo < (1 << 10), "offset field of set_oop must be 10 bits"); 250 return hi | lo; 251 } 252 253 // Regenerate the instruction sequence that performs the 64 bit 254 // sethi. This only does the sethi. The disp field (bottom 10 bits) 255 // must be handled separately. 256 static void set_data64_sethi(address instaddr, intptr_t x); 257 258 // combine the fields of a sethi/simm13 pair (simm13 = or, add, jmpl, ld/st) 259 static int data32(int sethi_insn, int arith_insn) { 260 assert(is_op2(sethi_insn, Assembler::sethi_op2), "must be sethi"); 261 int hi = Assembler::inv_hi22(sethi_insn); 262 int lo = get_simm13(arith_insn); 263 assert((unsigned)lo < (1 << 10), "offset field of set_oop must be 10 bits"); 264 return hi | lo; 265 } 266 267 static int set_data32_sethi(int sethi_insn, int imm) { 268 // note that Assembler::hi22 clips the low 10 bits for us 269 assert(is_op2(sethi_insn, Assembler::sethi_op2), "must be sethi"); 270 return (sethi_insn &~ Assembler::hi22(-1)) | Assembler::hi22(imm); 271 } 272 273 static int set_data32_simm13(int arith_insn, int imm) { 274 get_simm13(arith_insn); // tickle the assertion check 275 int imm10 = Assembler::low10(imm); 276 return (arith_insn &~ Assembler::simm(-1, 13)) | Assembler::simm(imm10, 13); 277 } 278 279 static int low10(int imm) { 280 return Assembler::low10(imm); 281 } 282 283 // Perform the inverse of the LP64 Macroassembler::sethi 284 // routine. Extracts the 54 bits of address from the instruction 285 // stream. This routine must agree with the sethi routine in 286 // assembler_inline_sparc.hpp 287 static address gethi( unsigned int *pc ) { 288 int i = 0; 289 uintptr_t adr; 290 // We first start out with the real sethi instruction 291 assert(is_op2(*pc, Assembler::sethi_op2), "in gethi - must be sethi"); 292 adr = (unsigned int)Assembler::inv_hi22( *(pc++) ); 293 i++; 294 while ( i < 7 ) { 295 // We're done if we hit a nop 296 if ( (int)*pc == nop_instruction() ) break; 297 assert ( Assembler::inv_op(*pc) == Assembler::arith_op, "in gethi - must be arith_op" ); 298 switch ( Assembler::inv_op3(*pc) ) { 299 case Assembler::xor_op3: 300 adr ^= (intptr_t)get_simm13( *pc ); 301 return ( (address)adr ); 302 break; 303 case Assembler::sll_op3: 304 adr <<= ( *pc & 0x3f ); 305 break; 306 case Assembler::or_op3: 307 adr |= (intptr_t)get_simm13( *pc ); 308 break; 309 default: 310 assert ( 0, "in gethi - Should not reach here" ); 311 break; 312 } 313 pc++; 314 i++; 315 } 316 return ( (address)adr ); 317 } 318 319 public: 320 void verify(); 321 void print(); 322 323 // unit test stuff 324 static void test() {} // override for testing 325 326 inline friend NativeInstruction* nativeInstruction_at(address address); 327 }; 328 329 inline NativeInstruction* nativeInstruction_at(address address) { 330 NativeInstruction* inst = (NativeInstruction*)address; 331 #ifdef ASSERT 332 inst->verify(); 333 #endif 334 return inst; 335 } 336 337 338 339 //----------------------------------------------------------------------------- 340 341 // The NativeCall is an abstraction for accessing/manipulating native call imm32 instructions. 342 // (used to manipulate inline caches, primitive & dll calls, etc.) 343 inline NativeCall* nativeCall_at(address instr); 344 inline NativeCall* nativeCall_overwriting_at(address instr, 345 address destination); 346 inline NativeCall* nativeCall_before(address return_address); 347 class NativeCall: public NativeInstruction { 348 public: 349 enum Sparc_specific_constants { 350 instruction_size = 8, 351 return_address_offset = 8, 352 call_displacement_width = 30, 353 displacement_offset = 0, 354 instruction_offset = 0 355 }; 356 address instruction_address() const { return addr_at(0); } 357 address next_instruction_address() const { return addr_at(instruction_size); } 358 address return_address() const { return addr_at(return_address_offset); } 359 360 address destination() const { return inv_wdisp(long_at(0), call_displacement_width) + instruction_address(); } 361 address displacement_address() const { return addr_at(displacement_offset); } 362 void set_destination(address dest) { set_long_at(0, set_wdisp(long_at(0), dest - instruction_address(), call_displacement_width)); } 363 void set_destination_mt_safe(address dest); 364 365 void verify_alignment() {} // do nothing on sparc 366 void verify(); 367 void print(); 368 369 // unit test stuff 370 static void test(); 371 372 // Creation 373 friend inline NativeCall* nativeCall_at(address instr); 374 friend NativeCall* nativeCall_overwriting_at(address instr, address destination = NULL) { 375 // insert a "blank" call: 376 NativeCall* call = (NativeCall*)instr; 377 call->set_long_at(0 * BytesPerInstWord, call_instruction(destination, instr)); 378 call->set_long_at(1 * BytesPerInstWord, nop_instruction()); 379 assert(call->addr_at(2 * BytesPerInstWord) - instr == instruction_size, "instruction size"); 380 // check its structure now: 381 assert(nativeCall_at(instr)->destination() == destination, "correct call destination"); 382 return call; 383 } 384 385 friend inline NativeCall* nativeCall_before(address return_address) { 386 NativeCall* call = (NativeCall*)(return_address - return_address_offset); 387 #ifdef ASSERT 388 call->verify(); 389 #endif 390 return call; 391 } 392 393 static bool is_call_at(address instr) { 394 return nativeInstruction_at(instr)->is_call(); 395 } 396 397 static bool is_call_before(address instr) { 398 return nativeInstruction_at(instr - return_address_offset)->is_call(); 399 } 400 401 static bool is_call_to(address instr, address target) { 402 return nativeInstruction_at(instr)->is_call() && 403 nativeCall_at(instr)->destination() == target; 404 } 405 406 // MT-safe patching of a call instruction. 407 static void insert(address code_pos, address entry) { 408 (void)nativeCall_overwriting_at(code_pos, entry); 409 } 410 411 static void replace_mt_safe(address instr_addr, address code_buffer); 412 }; 413 inline NativeCall* nativeCall_at(address instr) { 414 NativeCall* call = (NativeCall*)instr; 415 #ifdef ASSERT 416 call->verify(); 417 #endif 418 return call; 419 } 420 421 // The NativeFarCall is an abstraction for accessing/manipulating native call-anywhere 422 // instructions in the sparcv9 vm. Used to call native methods which may be loaded 423 // anywhere in the address space, possibly out of reach of a call instruction. 424 425 #ifndef _LP64 426 427 // On 32-bit systems, a far call is the same as a near one. 428 class NativeFarCall; 429 inline NativeFarCall* nativeFarCall_at(address instr); 430 class NativeFarCall : public NativeCall { 431 public: 432 friend inline NativeFarCall* nativeFarCall_at(address instr) { return (NativeFarCall*)nativeCall_at(instr); } 433 friend NativeFarCall* nativeFarCall_overwriting_at(address instr, address destination = NULL) 434 { return (NativeFarCall*)nativeCall_overwriting_at(instr, destination); } 435 friend NativeFarCall* nativeFarCall_before(address return_address) 436 { return (NativeFarCall*)nativeCall_before(return_address); } 437 }; 438 439 #else 440 441 // The format of this extended-range call is: 442 // jumpl_to addr, lreg 443 // == sethi %hi54(addr), O7 ; jumpl O7, %lo10(addr), O7 ; <delay> 444 // That is, it is essentially the same as a NativeJump. 445 class NativeFarCall; 446 inline NativeFarCall* nativeFarCall_overwriting_at(address instr, address destination); 447 inline NativeFarCall* nativeFarCall_at(address instr); 448 class NativeFarCall: public NativeInstruction { 449 public: 450 enum Sparc_specific_constants { 451 // instruction_size includes the delay slot instruction. 452 instruction_size = 9 * BytesPerInstWord, 453 return_address_offset = 9 * BytesPerInstWord, 454 jmpl_offset = 7 * BytesPerInstWord, 455 displacement_offset = 0, 456 instruction_offset = 0 457 }; 458 address instruction_address() const { return addr_at(0); } 459 address next_instruction_address() const { return addr_at(instruction_size); } 460 address return_address() const { return addr_at(return_address_offset); } 461 462 address destination() const { 463 return (address) data64(addr_at(0), long_at(jmpl_offset)); 464 } 465 address displacement_address() const { return addr_at(displacement_offset); } 466 void set_destination(address dest); 467 468 bool destination_is_compiled_verified_entry_point(); 469 470 void verify(); 471 void print(); 472 473 // unit test stuff 474 static void test(); 475 476 // Creation 477 friend inline NativeFarCall* nativeFarCall_at(address instr) { 478 NativeFarCall* call = (NativeFarCall*)instr; 479 #ifdef ASSERT 480 call->verify(); 481 #endif 482 return call; 483 } 484 485 friend inline NativeFarCall* nativeFarCall_overwriting_at(address instr, address destination = NULL) { 486 Unimplemented(); 487 NativeFarCall* call = (NativeFarCall*)instr; 488 return call; 489 } 490 491 friend NativeFarCall* nativeFarCall_before(address return_address) { 492 NativeFarCall* call = (NativeFarCall*)(return_address - return_address_offset); 493 #ifdef ASSERT 494 call->verify(); 495 #endif 496 return call; 497 } 498 499 static bool is_call_at(address instr); 500 501 // MT-safe patching of a call instruction. 502 static void insert(address code_pos, address entry) { 503 (void)nativeFarCall_overwriting_at(code_pos, entry); 504 } 505 static void replace_mt_safe(address instr_addr, address code_buffer); 506 }; 507 508 #endif // _LP64 509 510 // An interface for accessing/manipulating native set_oop imm, reg instructions. 511 // (used to manipulate inlined data references, etc.) 512 // set_oop imm, reg 513 // == sethi %hi22(imm), reg ; add reg, %lo10(imm), reg 514 class NativeMovConstReg; 515 inline NativeMovConstReg* nativeMovConstReg_at(address address); 516 class NativeMovConstReg: public NativeInstruction { 517 public: 518 enum Sparc_specific_constants { 519 sethi_offset = 0, 520 #ifdef _LP64 521 add_offset = 7 * BytesPerInstWord, 522 instruction_size = 8 * BytesPerInstWord 523 #else 524 add_offset = 4, 525 instruction_size = 8 526 #endif 527 }; 528 529 address instruction_address() const { return addr_at(0); } 530 address next_instruction_address() const { return addr_at(instruction_size); } 531 532 // (The [set_]data accessor respects oop_type relocs also.) 533 intptr_t data() const; 534 void set_data(intptr_t x); 535 536 // report the destination register 537 Register destination() { return inv_rd(long_at(sethi_offset)); } 538 539 void verify(); 540 void print(); 541 542 // unit test stuff 543 static void test(); 544 545 // Creation 546 friend inline NativeMovConstReg* nativeMovConstReg_at(address address) { 547 NativeMovConstReg* test = (NativeMovConstReg*)address; 548 #ifdef ASSERT 549 test->verify(); 550 #endif 551 return test; 552 } 553 554 555 friend NativeMovConstReg* nativeMovConstReg_before(address address) { 556 NativeMovConstReg* test = (NativeMovConstReg*)(address - instruction_size); 557 #ifdef ASSERT 558 test->verify(); 559 #endif 560 return test; 561 } 562 563 }; 564 565 566 // An interface for accessing/manipulating native set_oop imm, reg instructions. 567 // (used to manipulate inlined data references, etc.) 568 // set_oop imm, reg 569 // == sethi %hi22(imm), reg; nop; add reg, %lo10(imm), reg 570 // 571 // Note that it is identical to NativeMovConstReg with the exception of a nop between the 572 // sethi and the add. The nop is required to be in the delay slot of the call instruction 573 // which overwrites the sethi during patching. 574 class NativeMovConstRegPatching; 575 inline NativeMovConstRegPatching* nativeMovConstRegPatching_at(address address);class NativeMovConstRegPatching: public NativeInstruction { 576 public: 577 enum Sparc_specific_constants { 578 sethi_offset = 0, 579 #ifdef _LP64 580 nop_offset = 7 * BytesPerInstWord, 581 #else 582 nop_offset = sethi_offset + BytesPerInstWord, 583 #endif 584 add_offset = nop_offset + BytesPerInstWord, 585 instruction_size = add_offset + BytesPerInstWord 586 }; 587 588 address instruction_address() const { return addr_at(0); } 589 address next_instruction_address() const { return addr_at(instruction_size); } 590 591 // (The [set_]data accessor respects oop_type relocs also.) 592 int data() const; 593 void set_data(int x); 594 595 // report the destination register 596 Register destination() { return inv_rd(long_at(sethi_offset)); } 597 598 void verify(); 599 void print(); 600 601 // unit test stuff 602 static void test(); 603 604 // Creation 605 friend inline NativeMovConstRegPatching* nativeMovConstRegPatching_at(address address) { 606 NativeMovConstRegPatching* test = (NativeMovConstRegPatching*)address; 607 #ifdef ASSERT 608 test->verify(); 609 #endif 610 return test; 611 } 612 613 614 friend NativeMovConstRegPatching* nativeMovConstRegPatching_before(address address) { 615 NativeMovConstRegPatching* test = (NativeMovConstRegPatching*)(address - instruction_size); 616 #ifdef ASSERT 617 test->verify(); 618 #endif 619 return test; 620 } 621 622 }; 623 624 625 // An interface for accessing/manipulating native memory ops 626 // ld* [reg + offset], reg 627 // st* reg, [reg + offset] 628 // sethi %hi(imm), reg; add reg, %lo(imm), reg; ld* [reg1 + reg], reg2 629 // sethi %hi(imm), reg; add reg, %lo(imm), reg; st* reg2, [reg1 + reg] 630 // Ops covered: {lds,ldu,st}{w,b,h}, {ld,st}{d,x} 631 // 632 class NativeMovRegMem; 633 inline NativeMovRegMem* nativeMovRegMem_at (address address); 634 class NativeMovRegMem: public NativeInstruction { 635 public: 636 enum Sparc_specific_constants { 637 op3_mask_ld = 1 << Assembler::lduw_op3 | 638 1 << Assembler::ldub_op3 | 639 1 << Assembler::lduh_op3 | 640 1 << Assembler::ldd_op3 | 641 1 << Assembler::ldsw_op3 | 642 1 << Assembler::ldsb_op3 | 643 1 << Assembler::ldsh_op3 | 644 1 << Assembler::ldx_op3, 645 op3_mask_st = 1 << Assembler::stw_op3 | 646 1 << Assembler::stb_op3 | 647 1 << Assembler::sth_op3 | 648 1 << Assembler::std_op3 | 649 1 << Assembler::stx_op3, 650 op3_ldst_int_limit = Assembler::ldf_op3, 651 op3_mask_ldf = 1 << (Assembler::ldf_op3 - op3_ldst_int_limit) | 652 1 << (Assembler::lddf_op3 - op3_ldst_int_limit), 653 op3_mask_stf = 1 << (Assembler::stf_op3 - op3_ldst_int_limit) | 654 1 << (Assembler::stdf_op3 - op3_ldst_int_limit), 655 656 offset_width = 13, 657 sethi_offset = 0, 658 #ifdef _LP64 659 add_offset = 7 * BytesPerInstWord, 660 #else 661 add_offset = 4, 662 #endif 663 ldst_offset = add_offset + BytesPerInstWord 664 }; 665 bool is_immediate() const { 666 // check if instruction is ld* [reg + offset], reg or st* reg, [reg + offset] 667 int i0 = long_at(0); 668 return (is_op(i0, Assembler::ldst_op)); 669 } 670 671 address instruction_address() const { return addr_at(0); } 672 address next_instruction_address() const { 673 #ifdef _LP64 674 return addr_at(is_immediate() ? 4 : (7 * BytesPerInstWord)); 675 #else 676 return addr_at(is_immediate() ? 4 : 12); 677 #endif 678 } 679 intptr_t offset() const { 680 return is_immediate()? inv_simm(long_at(0), offset_width) : 681 nativeMovConstReg_at(addr_at(0))->data(); 682 } 683 void set_offset(intptr_t x) { 684 if (is_immediate()) { 685 guarantee(fits_in_simm(x, offset_width), "data block offset overflow"); 686 set_long_at(0, set_simm(long_at(0), x, offset_width)); 687 } else 688 nativeMovConstReg_at(addr_at(0))->set_data(x); 689 } 690 691 void add_offset_in_bytes(intptr_t radd_offset) { 692 set_offset (offset() + radd_offset); 693 } 694 695 void copy_instruction_to(address new_instruction_address); 696 697 void verify(); 698 void print (); 699 700 // unit test stuff 701 static void test(); 702 703 private: 704 friend inline NativeMovRegMem* nativeMovRegMem_at (address address) { 705 NativeMovRegMem* test = (NativeMovRegMem*)address; 706 #ifdef ASSERT 707 test->verify(); 708 #endif 709 return test; 710 } 711 }; 712 713 714 // An interface for accessing/manipulating native memory ops 715 // ld* [reg + offset], reg 716 // st* reg, [reg + offset] 717 // sethi %hi(imm), reg; nop; add reg, %lo(imm), reg; ld* [reg1 + reg], reg2 718 // sethi %hi(imm), reg; nop; add reg, %lo(imm), reg; st* reg2, [reg1 + reg] 719 // Ops covered: {lds,ldu,st}{w,b,h}, {ld,st}{d,x} 720 // 721 // Note that it is identical to NativeMovRegMem with the exception of a nop between the 722 // sethi and the add. The nop is required to be in the delay slot of the call instruction 723 // which overwrites the sethi during patching. 724 class NativeMovRegMemPatching; 725 inline NativeMovRegMemPatching* nativeMovRegMemPatching_at (address address); 726 class NativeMovRegMemPatching: public NativeInstruction { 727 public: 728 enum Sparc_specific_constants { 729 op3_mask_ld = 1 << Assembler::lduw_op3 | 730 1 << Assembler::ldub_op3 | 731 1 << Assembler::lduh_op3 | 732 1 << Assembler::ldd_op3 | 733 1 << Assembler::ldsw_op3 | 734 1 << Assembler::ldsb_op3 | 735 1 << Assembler::ldsh_op3 | 736 1 << Assembler::ldx_op3, 737 op3_mask_st = 1 << Assembler::stw_op3 | 738 1 << Assembler::stb_op3 | 739 1 << Assembler::sth_op3 | 740 1 << Assembler::std_op3 | 741 1 << Assembler::stx_op3, 742 op3_ldst_int_limit = Assembler::ldf_op3, 743 op3_mask_ldf = 1 << (Assembler::ldf_op3 - op3_ldst_int_limit) | 744 1 << (Assembler::lddf_op3 - op3_ldst_int_limit), 745 op3_mask_stf = 1 << (Assembler::stf_op3 - op3_ldst_int_limit) | 746 1 << (Assembler::stdf_op3 - op3_ldst_int_limit), 747 748 offset_width = 13, 749 sethi_offset = 0, 750 #ifdef _LP64 751 nop_offset = 7 * BytesPerInstWord, 752 #else 753 nop_offset = 4, 754 #endif 755 add_offset = nop_offset + BytesPerInstWord, 756 ldst_offset = add_offset + BytesPerInstWord 757 }; 758 bool is_immediate() const { 759 // check if instruction is ld* [reg + offset], reg or st* reg, [reg + offset] 760 int i0 = long_at(0); 761 return (is_op(i0, Assembler::ldst_op)); 762 } 763 764 address instruction_address() const { return addr_at(0); } 765 address next_instruction_address() const { 766 return addr_at(is_immediate()? 4 : 16); 767 } 768 int offset() const { 769 return is_immediate()? inv_simm(long_at(0), offset_width) : 770 nativeMovConstRegPatching_at(addr_at(0))->data(); 771 } 772 void set_offset(int x) { 773 if (is_immediate()) { 774 guarantee(fits_in_simm(x, offset_width), "data block offset overflow"); 775 set_long_at(0, set_simm(long_at(0), x, offset_width)); 776 } 777 else 778 nativeMovConstRegPatching_at(addr_at(0))->set_data(x); 779 } 780 781 void add_offset_in_bytes(intptr_t radd_offset) { 782 set_offset (offset() + radd_offset); 783 } 784 785 void copy_instruction_to(address new_instruction_address); 786 787 void verify(); 788 void print (); 789 790 // unit test stuff 791 static void test(); 792 793 private: 794 friend inline NativeMovRegMemPatching* nativeMovRegMemPatching_at (address address) { 795 NativeMovRegMemPatching* test = (NativeMovRegMemPatching*)address; 796 #ifdef ASSERT 797 test->verify(); 798 #endif 799 return test; 800 } 801 }; 802 803 804 // An interface for accessing/manipulating native jumps 805 // jump_to addr 806 // == sethi %hi22(addr), temp ; jumpl reg, %lo10(addr), G0 ; <delay> 807 // jumpl_to addr, lreg 808 // == sethi %hi22(addr), temp ; jumpl reg, %lo10(addr), lreg ; <delay> 809 class NativeJump; 810 inline NativeJump* nativeJump_at(address address); 811 class NativeJump: public NativeInstruction { 812 private: 813 void guarantee_displacement(int disp, int width) { 814 guarantee(fits_in_simm(disp, width + 2), "branch displacement overflow"); 815 } 816 817 public: 818 enum Sparc_specific_constants { 819 sethi_offset = 0, 820 #ifdef _LP64 821 jmpl_offset = 7 * BytesPerInstWord, 822 instruction_size = 9 * BytesPerInstWord // includes delay slot 823 #else 824 jmpl_offset = 1 * BytesPerInstWord, 825 instruction_size = 3 * BytesPerInstWord // includes delay slot 826 #endif 827 }; 828 829 address instruction_address() const { return addr_at(0); } 830 address next_instruction_address() const { return addr_at(instruction_size); } 831 832 #ifdef _LP64 833 address jump_destination() const { 834 return (address) data64(instruction_address(), long_at(jmpl_offset)); 835 } 836 void set_jump_destination(address dest) { 837 set_data64_sethi( instruction_address(), (intptr_t)dest); 838 set_long_at(jmpl_offset, set_data32_simm13( long_at(jmpl_offset), (intptr_t)dest)); 839 } 840 #else 841 address jump_destination() const { 842 return (address) data32(long_at(sethi_offset), long_at(jmpl_offset)); 843 } 844 void set_jump_destination(address dest) { 845 set_long_at(sethi_offset, set_data32_sethi( long_at(sethi_offset), (intptr_t)dest)); 846 set_long_at(jmpl_offset, set_data32_simm13( long_at(jmpl_offset), (intptr_t)dest)); 847 } 848 #endif 849 850 // Creation 851 friend inline NativeJump* nativeJump_at(address address) { 852 NativeJump* jump = (NativeJump*)address; 853 #ifdef ASSERT 854 jump->verify(); 855 #endif 856 return jump; 857 } 858 859 void verify(); 860 void print(); 861 862 // Unit testing stuff 863 static void test(); 864 865 // Insertion of native jump instruction 866 static void insert(address code_pos, address entry); 867 // MT-safe insertion of native jump at verified method entry 868 static void check_verified_entry_alignment(address entry, address verified_entry) { 869 // nothing to do for sparc. 870 } 871 static void patch_verified_entry(address entry, address verified_entry, address dest); 872 }; 873 874 875 876 // Despite the name, handles only simple branches. 877 class NativeGeneralJump; 878 inline NativeGeneralJump* nativeGeneralJump_at(address address); 879 class NativeGeneralJump: public NativeInstruction { 880 public: 881 enum Sparc_specific_constants { 882 instruction_size = 8 883 }; 884 885 address instruction_address() const { return addr_at(0); } 886 address jump_destination() const { return addr_at(0) + branch_destination_offset(long_at(0)); } 887 void set_jump_destination(address dest) { 888 int patched_instr = patch_branch_destination_offset(dest - addr_at(0), long_at(0)); 889 set_long_at(0, patched_instr); 890 } 891 void set_annul() { set_annul_bit(); } 892 NativeInstruction *delay_slot_instr() { return nativeInstruction_at(addr_at(4));} 893 void fill_delay_slot(int instr) { set_long_at(4, instr);} 894 Assembler::Condition condition() { 895 int x = long_at(0); 896 return (Assembler::Condition) Assembler::inv_cond(x); 897 } 898 899 // Creation 900 friend inline NativeGeneralJump* nativeGeneralJump_at(address address) { 901 NativeGeneralJump* jump = (NativeGeneralJump*)(address); 902 #ifdef ASSERT 903 jump->verify(); 904 #endif 905 return jump; 906 } 907 908 // Insertion of native general jump instruction 909 static void insert_unconditional(address code_pos, address entry); 910 static void replace_mt_safe(address instr_addr, address code_buffer); 911 912 void verify(); 913 }; 914 915 916 class NativeIllegalInstruction: public NativeInstruction { 917 public: 918 enum Sparc_specific_constants { 919 instruction_size = 4 920 }; 921 922 // Insert illegal opcode as specific address 923 static void insert(address code_pos); 924 }; 925 926 #endif // CPU_SPARC_VM_NATIVEINST_SPARC_HPP