1 #ifdef USE_PRAGMA_IDENT_HDR
   2 #pragma ident "@(#)nativeInst_sparc.hpp 1.89 07/05/05 17:04:31 JVM"
   3 #endif
   4 /*
   5  * Copyright 1997-2008 Sun Microsystems, Inc.  All Rights Reserved.
   6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   7  *
   8  * This code is free software; you can redistribute it and/or modify it
   9  * under the terms of the GNU General Public License version 2 only, as
  10  * published by the Free Software Foundation.
  11  *
  12  * This code is distributed in the hope that it will be useful, but WITHOUT
  13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  15  * version 2 for more details (a copy is included in the LICENSE file that
  16  * accompanied this code).
  17  *
  18  * You should have received a copy of the GNU General Public License version
  19  * 2 along with this work; if not, write to the Free Software Foundation,
  20  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  21  *
  22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
  23  * CA 95054 USA or visit www.sun.com if you need additional information or
  24  * have any questions.
  25  *  
  26  */
  27 
  28 // We have interface for the following instructions:
  29 // - NativeInstruction
  30 // - - NativeCall
  31 // - - NativeFarCall
  32 // - - NativeMovConstReg
  33 // - - NativeMovConstRegPatching
  34 // - - NativeMovRegMem
  35 // - - NativeMovRegMemPatching
  36 // - - NativeJump
  37 // - - NativeGeneralJump
  38 // - - NativeIllegalInstruction
  39 // The base class for different kinds of native instruction abstractions.
  40 // Provides the primitive operations to manipulate code relative to this.
  41 class NativeInstruction VALUE_OBJ_CLASS_SPEC {
  42   friend class Relocation;
  43 
  44  public:
  45   enum Sparc_specific_constants {
  46     nop_instruction_size        =    4
  47   };
  48 
  49   bool is_dtrace_trap();
  50   bool is_nop()                        { return long_at(0) == nop_instruction(); }
  51   bool is_call()                       { return is_op(long_at(0), Assembler::call_op); }
  52   bool is_sethi()                      { return (is_op2(long_at(0), Assembler::sethi_op2) 
  53                                           && inv_rd(long_at(0)) != G0); } 
  54 
  55   bool sets_cc() {
  56     // conservative (returns true for some instructions that do not set the
  57     // the condition code, such as, "save".
  58     // Does not return true for the deprecated tagged instructions, such as, TADDcc
  59     int x = long_at(0);
  60     return (is_op(x, Assembler::arith_op) &&
  61             (inv_op3(x) & Assembler::cc_bit_op3) == Assembler::cc_bit_op3);
  62   }
  63   bool is_illegal();
  64   bool is_zombie() {
  65     int x = long_at(0);
  66     return is_op3(x, 
  67                   VM_Version::v9_instructions_work() ? 
  68                     Assembler::ldsw_op3 : Assembler::lduw_op3, 
  69                   Assembler::ldst_op)
  70         && Assembler::inv_rs1(x) == G0
  71         && Assembler::inv_rd(x) == O7;
  72   }
  73   bool is_ic_miss_trap();       // Inline-cache uses a trap to detect a miss
  74   bool is_return() {
  75     // is it the output of MacroAssembler::ret or MacroAssembler::retl?
  76     int x = long_at(0);
  77     const int pc_return_offset = 8; // see frame_sparc.hpp
  78     return is_op3(x, Assembler::jmpl_op3, Assembler::arith_op)
  79         && (inv_rs1(x) == I7 || inv_rs1(x) == O7)
  80         && inv_immed(x) && inv_simm(x, 13) == pc_return_offset
  81         && inv_rd(x) == G0;
  82   }
  83   bool is_int_jump() {
  84     // is it the output of MacroAssembler::b?
  85     int x = long_at(0);
  86     return is_op2(x, Assembler::bp_op2) || is_op2(x, Assembler::br_op2);
  87   }
  88   bool is_float_jump() {
  89     // is it the output of MacroAssembler::fb?
  90     int x = long_at(0);
  91     return is_op2(x, Assembler::fbp_op2) || is_op2(x, Assembler::fb_op2);
  92   }
  93   bool is_jump() {
  94     return is_int_jump() || is_float_jump();
  95   }
  96   bool is_cond_jump() {
  97     int x = long_at(0);
  98     return (is_int_jump() && Assembler::inv_cond(x) != Assembler::always) ||
  99            (is_float_jump() && Assembler::inv_cond(x) != Assembler::f_always);
 100   }
 101 
 102   bool is_stack_bang() {
 103     int x = long_at(0);
 104     return is_op3(x, Assembler::stw_op3, Assembler::ldst_op) &&
 105       (inv_rd(x) == G0) && (inv_rs1(x) == SP) && (inv_rs2(x) == G3_scratch); 
 106   }
 107 
 108   bool is_prefetch() {
 109     int x = long_at(0);
 110     return is_op3(x, Assembler::prefetch_op3, Assembler::ldst_op);
 111   }
 112 
 113   bool is_membar() {
 114     int x = long_at(0);
 115     return is_op3(x, Assembler::membar_op3, Assembler::arith_op) &&
 116       (inv_rd(x) == G0) && (inv_rs1(x) == O7);
 117   }
 118 
 119   bool is_safepoint_poll() {
 120     int x = long_at(0);
 121 #ifdef _LP64
 122     return is_op3(x, Assembler::ldx_op3,  Assembler::ldst_op) &&
 123 #else
 124     return is_op3(x, Assembler::lduw_op3, Assembler::ldst_op) &&
 125 #endif
 126       (inv_rd(x) == G0) && (inv_immed(x) ? Assembler::inv_simm13(x) == 0 : inv_rs2(x) == G0);
 127   }
 128 
 129   bool is_zero_test(Register &reg);
 130   bool is_load_store_with_small_offset(Register reg);
 131 
 132  public:
 133 #ifdef ASSERT
 134   static int rdpc_instruction()        { return Assembler::op(Assembler::arith_op ) | Assembler::op3(Assembler::rdreg_op3) | Assembler::u_field(5, 18, 14) | Assembler::rd(O7); }
 135 #else
 136   // Temporary fix: in optimized mode, u_field is a macro for efficiency reasons (see Assembler::u_field) - needs to be fixed
 137   static int rdpc_instruction()        { return Assembler::op(Assembler::arith_op ) | Assembler::op3(Assembler::rdreg_op3) |            u_field(5, 18, 14) | Assembler::rd(O7); }
 138 #endif
 139   static int nop_instruction()         { return Assembler::op(Assembler::branch_op) | Assembler::op2(Assembler::sethi_op2); }
 140   static int illegal_instruction();    // the output of __ breakpoint_trap()
 141   static int call_instruction(address destination, address pc) { return Assembler::op(Assembler::call_op) | Assembler::wdisp((intptr_t)destination, (intptr_t)pc, 30); }
 142 
 143   static int branch_instruction(Assembler::op2s op2val, Assembler::Condition c, bool a) {
 144     return Assembler::op(Assembler::branch_op) | Assembler::op2(op2val) | Assembler::annul(a) | Assembler::cond(c);
 145   }
 146 
 147   static int op3_instruction(Assembler::ops opval, Register rd, Assembler::op3s op3val, Register rs1, int simm13a) {
 148     return Assembler::op(opval) | Assembler::rd(rd) | Assembler::op3(op3val) | Assembler::rs1(rs1) | Assembler::immed(true) | Assembler::simm(simm13a, 13);
 149   }
 150 
 151   static int sethi_instruction(Register rd, int imm22a) {
 152     return Assembler::op(Assembler::branch_op) | Assembler::rd(rd) | Assembler::op2(Assembler::sethi_op2) | Assembler::hi22(imm22a);
 153   }
 154 
 155  protected:
 156   address  addr_at(int offset) const    { return address(this) + offset; }
 157   int      long_at(int offset) const    { return *(int*)addr_at(offset); }
 158   void set_long_at(int offset, int i);      /* deals with I-cache */
 159   void set_jlong_at(int offset, jlong i);   /* deals with I-cache */
 160   void set_addr_at(int offset, address x);  /* deals with I-cache */
 161 
 162   address instruction_address() const       { return addr_at(0); }
 163   address next_instruction_address() const  { return addr_at(BytesPerInstWord); }
 164 
 165   static bool is_op( int x, Assembler::ops opval)  {
 166     return Assembler::inv_op(x) == opval;
 167   }
 168   static bool is_op2(int x, Assembler::op2s op2val) {
 169     return Assembler::inv_op(x) == Assembler::branch_op && Assembler::inv_op2(x) == op2val;
 170   }
 171   static bool is_op3(int x, Assembler::op3s op3val, Assembler::ops opval) {
 172     return Assembler::inv_op(x) == opval && Assembler::inv_op3(x) == op3val;
 173   }
 174 
 175   // utilities to help subclasses decode:
 176   static Register inv_rd(  int x ) { return Assembler::inv_rd( x); }
 177   static Register inv_rs1( int x ) { return Assembler::inv_rs1(x); }
 178   static Register inv_rs2( int x ) { return Assembler::inv_rs2(x); }
 179 
 180   static bool inv_immed( int x ) { return Assembler::inv_immed(x); }
 181   static bool inv_annul( int x ) { return (Assembler::annul(true) & x) != 0; }
 182   static int  inv_cond(  int x ) { return Assembler::inv_cond(x); }
 183 
 184   static int inv_op(  int x ) { return Assembler::inv_op( x); }
 185   static int inv_op2( int x ) { return Assembler::inv_op2(x); } 
 186   static int inv_op3( int x ) { return Assembler::inv_op3(x); } 
 187 
 188   static int inv_simm(    int x, int nbits ) { return Assembler::inv_simm(x, nbits); }
 189   static intptr_t inv_wdisp(   int x, int nbits ) { return Assembler::inv_wdisp(  x, 0, nbits); }
 190   static intptr_t inv_wdisp16( int x )            { return Assembler::inv_wdisp16(x, 0); }
 191   static int branch_destination_offset(int x) { return Assembler::branch_destination(x, 0); }
 192   static int patch_branch_destination_offset(int dest_offset, int x) {
 193     return Assembler::patched_branch(dest_offset, x, 0);
 194   }
 195   void set_annul_bit() { set_long_at(0, long_at(0) | Assembler::annul(true)); }
 196 
 197   // utility for checking if x is either of 2 small constants
 198   static bool is_either(int x, int k1, int k2) {
 199     // return x == k1 || x == k2;
 200     return (1 << x) & (1 << k1 | 1 << k2);
 201   }
 202 
 203   // utility for checking overflow of signed instruction fields
 204   static bool fits_in_simm(int x, int nbits) {
 205     // cf. Assembler::assert_signed_range()
 206     // return -(1 << nbits-1) <= x  &&  x < ( 1 << nbits-1),
 207     return (unsigned)(x + (1 << nbits-1)) < (unsigned)(1 << nbits);
 208   }
 209 
 210   // set a signed immediate field
 211   static int set_simm(int insn, int imm, int nbits) {
 212     return (insn &~ Assembler::simm(-1, nbits)) | Assembler::simm(imm, nbits);
 213   }
 214 
 215   // set a wdisp field (disp should be the difference of two addresses)
 216   static int set_wdisp(int insn, intptr_t disp, int nbits) {
 217     return (insn &~ Assembler::wdisp((intptr_t)-4, (intptr_t)0, nbits)) | Assembler::wdisp(disp, 0, nbits);
 218   }
 219 
 220   static int set_wdisp16(int insn, intptr_t disp) {
 221     return (insn &~ Assembler::wdisp16((intptr_t)-4, 0)) | Assembler::wdisp16(disp, 0);
 222   }
 223 
 224   // get a simm13 field from an arithmetic or memory instruction
 225   static int get_simm13(int insn) {
 226     assert(is_either(Assembler::inv_op(insn),
 227                      Assembler::arith_op, Assembler::ldst_op) &&
 228             (insn & Assembler::immed(true)), "must have a simm13 field");
 229     return Assembler::inv_simm(insn, 13);
 230   }
 231 
 232   // set the simm13 field of an arithmetic or memory instruction
 233   static bool set_simm13(int insn, int imm) {
 234     get_simm13(insn);           // tickle the assertion check
 235     return set_simm(insn, imm, 13);
 236   }
 237 
 238   // combine the fields of a sethi stream (7 instructions ) and an add, jmp or ld/st 
 239   static intptr_t data64( address pc, int arith_insn ) {
 240     assert(is_op2(*(unsigned int *)pc, Assembler::sethi_op2), "must be sethi");
 241     intptr_t hi = (intptr_t)gethi( (unsigned int *)pc );
 242     intptr_t lo = (intptr_t)get_simm13(arith_insn);
 243     assert((unsigned)lo < (1 << 10), "offset field of set_oop must be 10 bits");
 244     return hi | lo;
 245   }
 246 
 247   // Regenerate the instruction sequence that performs the 64 bit
 248   // sethi.  This only does the sethi.  The disp field (bottom 10 bits) 
 249   // must be handled seperately.
 250   static void set_data64_sethi(address instaddr, intptr_t x);
 251 
 252   // combine the fields of a sethi/simm13 pair (simm13 = or, add, jmpl, ld/st)
 253   static int data32(int sethi_insn, int arith_insn) {
 254     assert(is_op2(sethi_insn, Assembler::sethi_op2), "must be sethi");
 255     int hi = Assembler::inv_hi22(sethi_insn);
 256     int lo = get_simm13(arith_insn);
 257     assert((unsigned)lo < (1 << 10), "offset field of set_oop must be 10 bits");
 258     return hi | lo;
 259   }
 260 
 261   static int set_data32_sethi(int sethi_insn, int imm) {
 262     // note that Assembler::hi22 clips the low 10 bits for us
 263     assert(is_op2(sethi_insn, Assembler::sethi_op2), "must be sethi");
 264     return (sethi_insn &~ Assembler::hi22(-1)) | Assembler::hi22(imm);
 265   }
 266 
 267   static int set_data32_simm13(int arith_insn, int imm) {
 268     get_simm13(arith_insn);             // tickle the assertion check
 269     int imm10 = Assembler::low10(imm);
 270     return (arith_insn &~ Assembler::simm(-1, 13)) | Assembler::simm(imm10, 13);
 271   }
 272 
 273   static int low10(int imm) {
 274     return Assembler::low10(imm);
 275   }
 276 
 277   // Perform the inverse of the LP64 Macroassembler::sethi
 278   // routine.  Extracts the 54 bits of address from the instruction
 279   // stream. This routine must agree with the sethi routine in 
 280   // assembler_inline_sparc.hpp
 281   static address gethi( unsigned int *pc ) {
 282     int i = 0;
 283     uintptr_t adr;
 284     // We first start out with the real sethi instruction
 285     assert(is_op2(*pc, Assembler::sethi_op2), "in gethi - must be sethi");
 286     adr = (unsigned int)Assembler::inv_hi22( *(pc++) );
 287     i++;
 288     while ( i < 7 ) {
 289        // We're done if we hit a nop
 290        if ( (int)*pc == nop_instruction() ) break;
 291        assert ( Assembler::inv_op(*pc) == Assembler::arith_op, "in gethi - must be arith_op" );
 292        switch  ( Assembler::inv_op3(*pc) ) {
 293          case Assembler::xor_op3:
 294            adr ^= (intptr_t)get_simm13( *pc );
 295            return ( (address)adr );
 296            break;
 297          case Assembler::sll_op3:
 298            adr <<= ( *pc & 0x3f );
 299            break;
 300          case Assembler::or_op3:
 301            adr |= (intptr_t)get_simm13( *pc );
 302            break;
 303          default:
 304            assert ( 0, "in gethi - Should not reach here" );
 305            break;
 306        }
 307        pc++;
 308        i++;
 309     }
 310     return ( (address)adr );
 311   }
 312 
 313  public:
 314   void  verify();
 315   void  print();
 316 
 317   // unit test stuff
 318   static void test() {}                 // override for testing
 319 
 320   inline friend NativeInstruction* nativeInstruction_at(address address);
 321 };
 322 
 323 inline NativeInstruction* nativeInstruction_at(address address) {
 324     NativeInstruction* inst = (NativeInstruction*)address;
 325 #ifdef ASSERT
 326       inst->verify();
 327 #endif
 328     return inst;
 329 }
 330 
 331 
 332 
 333 //-----------------------------------------------------------------------------
 334 
 335 // The NativeCall is an abstraction for accessing/manipulating native call imm32 instructions.
 336 // (used to manipulate inline caches, primitive & dll calls, etc.)
 337 inline NativeCall* nativeCall_at(address instr);
 338 inline NativeCall* nativeCall_overwriting_at(address instr, 
 339                                              address destination);
 340 inline NativeCall* nativeCall_before(address return_address);
 341 class NativeCall: public NativeInstruction {
 342  public:
 343   enum Sparc_specific_constants {
 344     instruction_size                   = 8,
 345     return_address_offset              = 8,
 346     call_displacement_width            = 30,
 347     displacement_offset                = 0,
 348     instruction_offset                 = 0
 349   };
 350   address instruction_address() const       { return addr_at(0); }
 351   address next_instruction_address() const  { return addr_at(instruction_size); }
 352   address return_address() const            { return addr_at(return_address_offset); }
 353 
 354   address destination() const               { return inv_wdisp(long_at(0), call_displacement_width) + instruction_address(); }
 355   address displacement_address() const      { return addr_at(displacement_offset); }
 356   void  set_destination(address dest)       { set_long_at(0, set_wdisp(long_at(0), dest - instruction_address(), call_displacement_width)); }
 357   void  set_destination_mt_safe(address dest);
 358 
 359   void  verify_alignment() {} // do nothing on sparc
 360   void  verify();
 361   void  print();
 362 
 363   // unit test stuff
 364   static void  test();
 365  
 366   // Creation
 367   friend inline NativeCall* nativeCall_at(address instr);  
 368   friend NativeCall* nativeCall_overwriting_at(address instr, address destination = NULL) {
 369     // insert a "blank" call:
 370     NativeCall* call = (NativeCall*)instr;
 371     call->set_long_at(0 * BytesPerInstWord, call_instruction(destination, instr));
 372     call->set_long_at(1 * BytesPerInstWord, nop_instruction());
 373     assert(call->addr_at(2 * BytesPerInstWord) - instr == instruction_size, "instruction size");
 374     // check its structure now:
 375     assert(nativeCall_at(instr)->destination() == destination, "correct call destination");
 376     return call;
 377   }
 378 
 379   friend inline NativeCall* nativeCall_before(address return_address) {
 380     NativeCall* call = (NativeCall*)(return_address - return_address_offset);
 381     #ifdef ASSERT
 382       call->verify();
 383     #endif
 384     return call;
 385   }
 386 
 387   static bool is_call_at(address instr) {
 388     return nativeInstruction_at(instr)->is_call();
 389   }
 390 
 391   static bool is_call_before(address instr) {
 392     return nativeInstruction_at(instr - return_address_offset)->is_call();
 393   }
 394 
 395   static bool is_call_to(address instr, address target) {
 396     return nativeInstruction_at(instr)->is_call() &&
 397       nativeCall_at(instr)->destination() == target;
 398   }
 399 
 400   // MT-safe patching of a call instruction.
 401   static void insert(address code_pos, address entry) {
 402     (void)nativeCall_overwriting_at(code_pos, entry);
 403   }
 404 
 405   static void replace_mt_safe(address instr_addr, address code_buffer);  
 406 };
 407 inline NativeCall* nativeCall_at(address instr) {
 408   NativeCall* call = (NativeCall*)instr;
 409 #ifdef ASSERT
 410   call->verify();
 411 #endif
 412   return call;
 413 }
 414 
 415 // The NativeFarCall is an abstraction for accessing/manipulating native call-anywhere
 416 // instructions in the sparcv9 vm.  Used to call native methods which may be loaded
 417 // anywhere in the address space, possibly out of reach of a call instruction.
 418 
 419 #ifndef _LP64
 420 
 421 // On 32-bit systems, a far call is the same as a near one.
 422 class NativeFarCall;
 423 inline NativeFarCall* nativeFarCall_at(address instr);
 424 class NativeFarCall : public NativeCall {
 425 public:
 426   friend inline NativeFarCall* nativeFarCall_at(address instr) { return (NativeFarCall*)nativeCall_at(instr); }
 427   friend NativeFarCall* nativeFarCall_overwriting_at(address instr, address destination = NULL)
 428                                                         { return (NativeFarCall*)nativeCall_overwriting_at(instr, destination); }
 429   friend NativeFarCall* nativeFarCall_before(address return_address)
 430                                                         { return (NativeFarCall*)nativeCall_before(return_address); }
 431 };
 432 
 433 #else
 434 
 435 // The format of this extended-range call is:
 436 //      jumpl_to addr, lreg
 437 //      == sethi %hi54(addr), O7 ;  jumpl O7, %lo10(addr), O7 ;  <delay>
 438 // That is, it is essentially the same as a NativeJump.
 439 class NativeFarCall;
 440 inline NativeFarCall* nativeFarCall_overwriting_at(address instr, address destination);
 441 inline NativeFarCall* nativeFarCall_at(address instr);
 442 class NativeFarCall: public NativeInstruction {
 443  public:
 444   enum Sparc_specific_constants {
 445     // instruction_size includes the delay slot instruction.
 446     instruction_size                   = 9 * BytesPerInstWord,
 447     return_address_offset              = 9 * BytesPerInstWord,
 448     jmpl_offset                        = 7 * BytesPerInstWord,
 449     displacement_offset                = 0,
 450     instruction_offset                 = 0
 451   };
 452   address instruction_address() const       { return addr_at(0); }
 453   address next_instruction_address() const  { return addr_at(instruction_size); }
 454   address return_address() const            { return addr_at(return_address_offset); }
 455 
 456   address destination() const {
 457     return (address) data64(addr_at(0), long_at(jmpl_offset));
 458   }
 459   address displacement_address() const      { return addr_at(displacement_offset); }
 460   void set_destination(address dest);
 461 
 462   bool destination_is_compiled_verified_entry_point();
 463 
 464   void  verify();
 465   void  print();
 466 
 467   // unit test stuff
 468   static void  test();
 469  
 470   // Creation
 471   friend inline NativeFarCall* nativeFarCall_at(address instr) {
 472     NativeFarCall* call = (NativeFarCall*)instr;
 473     #ifdef ASSERT
 474       call->verify();
 475     #endif
 476     return call;
 477   }
 478 
 479   friend inline NativeFarCall* nativeFarCall_overwriting_at(address instr, address destination = NULL) {
 480     Unimplemented();
 481     NativeFarCall* call = (NativeFarCall*)instr;
 482     return call;
 483   }
 484 
 485   friend NativeFarCall* nativeFarCall_before(address return_address) {
 486     NativeFarCall* call = (NativeFarCall*)(return_address - return_address_offset);
 487     #ifdef ASSERT
 488       call->verify();
 489     #endif
 490     return call;
 491   }
 492 
 493   static bool is_call_at(address instr);
 494 
 495   // MT-safe patching of a call instruction.
 496   static void insert(address code_pos, address entry) {
 497     (void)nativeFarCall_overwriting_at(code_pos, entry);
 498   }
 499   static void replace_mt_safe(address instr_addr, address code_buffer);  
 500 };
 501 
 502 #endif // _LP64
 503 
 504 // An interface for accessing/manipulating native set_oop imm, reg instructions.
 505 // (used to manipulate inlined data references, etc.)
 506 //      set_oop imm, reg
 507 //      == sethi %hi22(imm), reg ;  add reg, %lo10(imm), reg
 508 class NativeMovConstReg;
 509 inline NativeMovConstReg* nativeMovConstReg_at(address address);
 510 class NativeMovConstReg: public NativeInstruction {
 511  public:
 512   enum Sparc_specific_constants {
 513     sethi_offset           = 0,
 514 #ifdef _LP64
 515     add_offset             = 7 * BytesPerInstWord,
 516     instruction_size       = 8 * BytesPerInstWord
 517 #else
 518     add_offset             = 4,
 519     instruction_size       = 8
 520 #endif
 521   };
 522 
 523   address instruction_address() const       { return addr_at(0); }
 524   address next_instruction_address() const  { return addr_at(instruction_size); }
 525 
 526   // (The [set_]data accessor respects oop_type relocs also.)
 527   intptr_t data() const;
 528   void set_data(intptr_t x);
 529 
 530   // report the destination register
 531   Register destination() { return inv_rd(long_at(sethi_offset)); }
 532 
 533   void  verify();
 534   void  print();
 535   
 536   // unit test stuff
 537   static void test();
 538 
 539   // Creation
 540   friend inline NativeMovConstReg* nativeMovConstReg_at(address address) {
 541     NativeMovConstReg* test = (NativeMovConstReg*)address;
 542     #ifdef ASSERT
 543       test->verify();
 544     #endif
 545     return test;
 546   }
 547 
 548 
 549   friend NativeMovConstReg* nativeMovConstReg_before(address address) {
 550     NativeMovConstReg* test = (NativeMovConstReg*)(address - instruction_size);
 551     #ifdef ASSERT
 552       test->verify();
 553     #endif
 554     return test;
 555   }
 556 
 557 };
 558 
 559 
 560 // An interface for accessing/manipulating native set_oop imm, reg instructions.
 561 // (used to manipulate inlined data references, etc.)
 562 //      set_oop imm, reg
 563 //      == sethi %hi22(imm), reg; nop; add reg, %lo10(imm), reg
 564 //
 565 // Note that it is identical to NativeMovConstReg with the exception of a nop between the
 566 // sethi and the add.  The nop is required to be in the delay slot of the call instruction
 567 // which overwrites the sethi during patching.
 568 class NativeMovConstRegPatching;
 569 inline NativeMovConstRegPatching* nativeMovConstRegPatching_at(address address);class NativeMovConstRegPatching: public NativeInstruction {
 570  public:
 571   enum Sparc_specific_constants {
 572     sethi_offset           = 0,
 573 #ifdef _LP64
 574     nop_offset             = 7 * BytesPerInstWord,
 575 #else
 576     nop_offset             = sethi_offset + BytesPerInstWord,
 577 #endif
 578     add_offset             = nop_offset   + BytesPerInstWord,
 579     instruction_size       = add_offset   + BytesPerInstWord
 580   };
 581 
 582   address instruction_address() const       { return addr_at(0); }
 583   address next_instruction_address() const  { return addr_at(instruction_size); }
 584 
 585   // (The [set_]data accessor respects oop_type relocs also.)
 586   int data() const;
 587   void  set_data(int x);
 588 
 589   // report the destination register
 590   Register destination() { return inv_rd(long_at(sethi_offset)); }
 591 
 592   void  verify();
 593   void  print();
 594   
 595   // unit test stuff
 596   static void test();
 597 
 598   // Creation
 599   friend inline NativeMovConstRegPatching* nativeMovConstRegPatching_at(address address) {
 600     NativeMovConstRegPatching* test = (NativeMovConstRegPatching*)address;
 601     #ifdef ASSERT
 602       test->verify();
 603     #endif
 604     return test;
 605   }
 606 
 607 
 608   friend NativeMovConstRegPatching* nativeMovConstRegPatching_before(address address) {
 609     NativeMovConstRegPatching* test = (NativeMovConstRegPatching*)(address - instruction_size);
 610     #ifdef ASSERT
 611       test->verify();
 612     #endif
 613     return test;
 614   }
 615 
 616 };
 617 
 618 
 619 // An interface for accessing/manipulating native memory ops
 620 //      ld* [reg + offset], reg
 621 //      st* reg, [reg + offset]
 622 //      sethi %hi(imm), reg; add reg, %lo(imm), reg; ld* [reg1 + reg], reg2
 623 //      sethi %hi(imm), reg; add reg, %lo(imm), reg; st* reg2, [reg1 + reg]
 624 // Ops covered: {lds,ldu,st}{w,b,h}, {ld,st}{d,x}
 625 //
 626 class NativeMovRegMem;
 627 inline NativeMovRegMem* nativeMovRegMem_at (address address);
 628 class NativeMovRegMem: public NativeInstruction {
 629  public:
 630   enum Sparc_specific_constants {
 631     op3_mask_ld = 1 << Assembler::lduw_op3 |
 632                   1 << Assembler::ldub_op3 |
 633                   1 << Assembler::lduh_op3 |
 634                   1 << Assembler::ldd_op3 |
 635                   1 << Assembler::ldsw_op3 |
 636                   1 << Assembler::ldsb_op3 |
 637                   1 << Assembler::ldsh_op3 |
 638                   1 << Assembler::ldx_op3,
 639     op3_mask_st = 1 << Assembler::stw_op3 |
 640                   1 << Assembler::stb_op3 |
 641                   1 << Assembler::sth_op3 |
 642                   1 << Assembler::std_op3 |
 643                   1 << Assembler::stx_op3,
 644     op3_ldst_int_limit = Assembler::ldf_op3,
 645     op3_mask_ldf = 1 << (Assembler::ldf_op3  - op3_ldst_int_limit) |
 646                    1 << (Assembler::lddf_op3 - op3_ldst_int_limit),
 647     op3_mask_stf = 1 << (Assembler::stf_op3  - op3_ldst_int_limit) |
 648                    1 << (Assembler::stdf_op3 - op3_ldst_int_limit),
 649 
 650     offset_width    = 13,
 651     sethi_offset    = 0,
 652 #ifdef _LP64
 653     add_offset      = 7 * BytesPerInstWord,
 654 #else
 655     add_offset      = 4,
 656 #endif
 657     ldst_offset     = add_offset + BytesPerInstWord 
 658   };
 659   bool is_immediate() const {
 660     // check if instruction is ld* [reg + offset], reg or st* reg, [reg + offset]
 661     int i0 = long_at(0);
 662     return (is_op(i0, Assembler::ldst_op));
 663   }
 664 
 665   address instruction_address() const           { return addr_at(0); }
 666   address next_instruction_address() const      { 
 667 #ifdef _LP64
 668     return addr_at(is_immediate() ? 4 : (7 * BytesPerInstWord));
 669 #else
 670     return addr_at(is_immediate() ? 4 : 12);
 671 #endif
 672   }
 673   intptr_t   offset() const                             {
 674      return is_immediate()? inv_simm(long_at(0), offset_width) :
 675                             nativeMovConstReg_at(addr_at(0))->data();
 676   }
 677   void  set_offset(intptr_t x) {
 678     if (is_immediate()) {
 679       guarantee(fits_in_simm(x, offset_width), "data block offset overflow");
 680       set_long_at(0, set_simm(long_at(0), x, offset_width));
 681     } else
 682       nativeMovConstReg_at(addr_at(0))->set_data(x);
 683   }
 684 
 685   void  add_offset_in_bytes(intptr_t radd_offset)     {
 686       set_offset (offset() + radd_offset);
 687   }
 688 
 689   void  copy_instruction_to(address new_instruction_address);
 690 
 691   void verify();
 692   void print ();
 693 
 694   // unit test stuff
 695   static void test();
 696 
 697  private:
 698   friend inline NativeMovRegMem* nativeMovRegMem_at (address address) {
 699     NativeMovRegMem* test = (NativeMovRegMem*)address;
 700     #ifdef ASSERT
 701       test->verify();
 702     #endif
 703     return test;
 704   }
 705 };
 706 
 707 
 708 // An interface for accessing/manipulating native memory ops
 709 //      ld* [reg + offset], reg
 710 //      st* reg, [reg + offset]
 711 //      sethi %hi(imm), reg; nop; add reg, %lo(imm), reg; ld* [reg1 + reg], reg2
 712 //      sethi %hi(imm), reg; nop; add reg, %lo(imm), reg; st* reg2, [reg1 + reg]
 713 // Ops covered: {lds,ldu,st}{w,b,h}, {ld,st}{d,x}
 714 //
 715 // Note that it is identical to NativeMovRegMem with the exception of a nop between the
 716 // sethi and the add.  The nop is required to be in the delay slot of the call instruction
 717 // which overwrites the sethi during patching.
 718 class NativeMovRegMemPatching;
 719 inline NativeMovRegMemPatching* nativeMovRegMemPatching_at (address address);
 720 class NativeMovRegMemPatching: public NativeInstruction {
 721  public:
 722   enum Sparc_specific_constants {
 723     op3_mask_ld = 1 << Assembler::lduw_op3 |
 724                   1 << Assembler::ldub_op3 |
 725                   1 << Assembler::lduh_op3 |
 726                   1 << Assembler::ldd_op3 |
 727                   1 << Assembler::ldsw_op3 |
 728                   1 << Assembler::ldsb_op3 |
 729                   1 << Assembler::ldsh_op3 |
 730                   1 << Assembler::ldx_op3,
 731     op3_mask_st = 1 << Assembler::stw_op3 |
 732                   1 << Assembler::stb_op3 |
 733                   1 << Assembler::sth_op3 |
 734                   1 << Assembler::std_op3 |
 735                   1 << Assembler::stx_op3,
 736     op3_ldst_int_limit = Assembler::ldf_op3,
 737     op3_mask_ldf = 1 << (Assembler::ldf_op3  - op3_ldst_int_limit) |
 738                    1 << (Assembler::lddf_op3 - op3_ldst_int_limit),
 739     op3_mask_stf = 1 << (Assembler::stf_op3  - op3_ldst_int_limit) |
 740                    1 << (Assembler::stdf_op3 - op3_ldst_int_limit),
 741 
 742     offset_width    = 13,
 743     sethi_offset    = 0,
 744 #ifdef _LP64
 745     nop_offset      = 7 * BytesPerInstWord,
 746 #else
 747     nop_offset      = 4,
 748 #endif
 749     add_offset      = nop_offset + BytesPerInstWord,
 750     ldst_offset     = add_offset + BytesPerInstWord
 751   };
 752   bool is_immediate() const {
 753     // check if instruction is ld* [reg + offset], reg or st* reg, [reg + offset]
 754     int i0 = long_at(0);
 755     return (is_op(i0, Assembler::ldst_op));
 756   }
 757 
 758   address instruction_address() const           { return addr_at(0); }
 759   address next_instruction_address() const      { 
 760     return addr_at(is_immediate()? 4 : 16);
 761   }
 762   int   offset() const                          {
 763      return is_immediate()? inv_simm(long_at(0), offset_width) :
 764                             nativeMovConstRegPatching_at(addr_at(0))->data();
 765   }
 766   void  set_offset(int x) {
 767     if (is_immediate()) {
 768       guarantee(fits_in_simm(x, offset_width), "data block offset overflow");
 769       set_long_at(0, set_simm(long_at(0), x, offset_width));
 770     }
 771     else
 772       nativeMovConstRegPatching_at(addr_at(0))->set_data(x);
 773   }
 774 
 775   void  add_offset_in_bytes(intptr_t radd_offset)     {
 776       set_offset (offset() + radd_offset);
 777   }
 778 
 779   void  copy_instruction_to(address new_instruction_address);
 780 
 781   void verify();
 782   void print ();
 783 
 784   // unit test stuff
 785   static void test();
 786 
 787  private:
 788   friend inline NativeMovRegMemPatching* nativeMovRegMemPatching_at (address address) {
 789     NativeMovRegMemPatching* test = (NativeMovRegMemPatching*)address;
 790     #ifdef ASSERT
 791       test->verify();
 792     #endif
 793     return test;
 794   }
 795 };
 796 
 797 
 798 // An interface for accessing/manipulating native jumps
 799 //      jump_to addr
 800 //      == sethi %hi22(addr), temp ;  jumpl reg, %lo10(addr), G0 ;  <delay>
 801 //      jumpl_to addr, lreg
 802 //      == sethi %hi22(addr), temp ;  jumpl reg, %lo10(addr), lreg ;  <delay>
 803 class NativeJump;
 804 inline NativeJump* nativeJump_at(address address);
 805 class NativeJump: public NativeInstruction {
 806  private:
 807   void guarantee_displacement(int disp, int width) {
 808     guarantee(fits_in_simm(disp, width + 2), "branch displacement overflow");
 809   }
 810 
 811  public:
 812   enum Sparc_specific_constants {
 813     sethi_offset           = 0,
 814 #ifdef _LP64
 815     jmpl_offset            = 7 * BytesPerInstWord,
 816     instruction_size       = 9 * BytesPerInstWord  // includes delay slot
 817 #else
 818     jmpl_offset            = 1 * BytesPerInstWord,
 819     instruction_size       = 3 * BytesPerInstWord  // includes delay slot
 820 #endif
 821   };
 822 
 823   address instruction_address() const       { return addr_at(0); }
 824   address next_instruction_address() const  { return addr_at(instruction_size); }
 825 
 826 #ifdef _LP64
 827   address jump_destination() const {
 828     return (address) data64(instruction_address(), long_at(jmpl_offset));
 829   }
 830   void set_jump_destination(address dest) {
 831     set_data64_sethi( instruction_address(), (intptr_t)dest);
 832     set_long_at(jmpl_offset,  set_data32_simm13( long_at(jmpl_offset),  (intptr_t)dest));
 833   }
 834 #else
 835   address jump_destination() const {
 836     return (address) data32(long_at(sethi_offset), long_at(jmpl_offset));
 837   }
 838   void set_jump_destination(address dest) {
 839     set_long_at(sethi_offset, set_data32_sethi(  long_at(sethi_offset), (intptr_t)dest));
 840     set_long_at(jmpl_offset,  set_data32_simm13( long_at(jmpl_offset),  (intptr_t)dest));
 841   }
 842 #endif
 843 
 844   // Creation
 845   friend inline NativeJump* nativeJump_at(address address) {
 846     NativeJump* jump = (NativeJump*)address;
 847     #ifdef ASSERT
 848       jump->verify();
 849     #endif
 850     return jump;
 851   }
 852 
 853   void verify();
 854   void print();
 855 
 856   // Unit testing stuff
 857   static void test();
 858 
 859   // Insertion of native jump instruction
 860   static void insert(address code_pos, address entry);
 861   // MT-safe insertion of native jump at verified method entry
 862   static void check_verified_entry_alignment(address entry, address verified_entry) {
 863     // nothing to do for sparc.
 864   }
 865   static void patch_verified_entry(address entry, address verified_entry, address dest);
 866 };
 867 
 868 
 869 
 870 // Despite the name, handles only simple branches.
 871 class NativeGeneralJump;
 872 inline NativeGeneralJump* nativeGeneralJump_at(address address);
 873 class NativeGeneralJump: public NativeInstruction {
 874  public:
 875   enum Sparc_specific_constants {
 876     instruction_size                   = 8
 877   };
 878 
 879   address instruction_address() const       { return addr_at(0); }  
 880   address jump_destination()    const       { return addr_at(0) + branch_destination_offset(long_at(0)); }
 881   void set_jump_destination(address dest) {
 882     int patched_instr = patch_branch_destination_offset(dest - addr_at(0), long_at(0));
 883     set_long_at(0, patched_instr);
 884   }
 885   void set_annul() { set_annul_bit(); }
 886   NativeInstruction *delay_slot_instr() { return nativeInstruction_at(addr_at(4));}
 887   void fill_delay_slot(int instr) { set_long_at(4, instr);}
 888   Assembler::Condition condition() {
 889     int x = long_at(0);
 890     return (Assembler::Condition) Assembler::inv_cond(x);
 891   }
 892 
 893   // Creation
 894   friend inline NativeGeneralJump* nativeGeneralJump_at(address address) {
 895     NativeGeneralJump* jump = (NativeGeneralJump*)(address);
 896 #ifdef ASSERT
 897       jump->verify();
 898 #endif
 899     return jump;
 900   }
 901 
 902   // Insertion of native general jump instruction
 903   static void insert_unconditional(address code_pos, address entry);
 904   static void replace_mt_safe(address instr_addr, address code_buffer);  
 905 
 906   void verify();
 907 };
 908 
 909 
 910 class NativeIllegalInstruction: public NativeInstruction {
 911  public:
 912   enum Sparc_specific_constants {
 913     instruction_size            =    4
 914   };
 915 
 916   // Insert illegal opcode as specific address
 917   static void insert(address code_pos);
 918 };