1 /*
   2  * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef CPU_SPARC_VM_NATIVEINST_SPARC_HPP
  26 #define CPU_SPARC_VM_NATIVEINST_SPARC_HPP
  27 
  28 #include "asm/macroAssembler.hpp"
  29 #include "memory/allocation.hpp"
  30 #include "runtime/icache.hpp"
  31 #include "runtime/os.hpp"
  32 #include "utilities/top.hpp"
  33 
  34 // We have interface for the following instructions:
  35 // - NativeInstruction
  36 // - - NativeCall
  37 // - - NativeFarCall
  38 // - - NativeMovConstReg
  39 // - - NativeMovConstRegPatching
  40 // - - NativeMovRegMem
  41 // - - NativeMovRegMemPatching
  42 // - - NativeJump
  43 // - - NativeGeneralJump
  44 // - - NativeIllegalInstruction
  45 // The base class for different kinds of native instruction abstractions.
  46 // Provides the primitive operations to manipulate code relative to this.
  47 class NativeInstruction VALUE_OBJ_CLASS_SPEC {
  48   friend class Relocation;
  49 
  50  public:
  51   enum Sparc_specific_constants {
  52     nop_instruction_size        =    4
  53   };
  54 
  55   bool is_dtrace_trap();
  56   bool is_nop()                        { return long_at(0) == nop_instruction(); }
  57   bool is_call()                       { return is_op(long_at(0), Assembler::call_op); }
  58   bool is_sethi()                      { return (is_op2(long_at(0), Assembler::sethi_op2)
  59                                           && inv_rd(long_at(0)) != G0); }
  60 
  61   bool sets_cc() {
  62     // conservative (returns true for some instructions that do not set the
  63     // the condition code, such as, "save".
  64     // Does not return true for the deprecated tagged instructions, such as, TADDcc
  65     int x = long_at(0);
  66     return (is_op(x, Assembler::arith_op) &&
  67             (inv_op3(x) & Assembler::cc_bit_op3) == Assembler::cc_bit_op3);
  68   }
  69   bool is_illegal();
  70   bool is_zombie() {
  71     int x = long_at(0);
  72     return is_op3(x,
  73                   Assembler::ldsw_op3,
  74                   Assembler::ldst_op)
  75         && Assembler::inv_rs1(x) == G0
  76         && Assembler::inv_rd(x) == O7;
  77   }
  78   bool is_ic_miss_trap();       // Inline-cache uses a trap to detect a miss
  79   bool is_return() {
  80     // is it the output of MacroAssembler::ret or MacroAssembler::retl?
  81     int x = long_at(0);
  82     const int pc_return_offset = 8; // see frame_sparc.hpp
  83     return is_op3(x, Assembler::jmpl_op3, Assembler::arith_op)
  84         && (inv_rs1(x) == I7 || inv_rs1(x) == O7)
  85         && inv_immed(x) && inv_simm(x, 13) == pc_return_offset
  86         && inv_rd(x) == G0;
  87   }
  88   bool is_int_jump() {
  89     // is it the output of MacroAssembler::b?
  90     int x = long_at(0);
  91     return is_op2(x, Assembler::bp_op2) || is_op2(x, Assembler::br_op2);
  92   }
  93   bool is_float_jump() {
  94     // is it the output of MacroAssembler::fb?
  95     int x = long_at(0);
  96     return is_op2(x, Assembler::fbp_op2) || is_op2(x, Assembler::fb_op2);
  97   }
  98   bool is_jump() {
  99     return is_int_jump() || is_float_jump();
 100   }
 101   bool is_cond_jump() {
 102     int x = long_at(0);
 103     return (is_int_jump() && Assembler::inv_cond(x) != Assembler::always) ||
 104            (is_float_jump() && Assembler::inv_cond(x) != Assembler::f_always);
 105   }
 106 
 107   bool is_stack_bang() {
 108     int x = long_at(0);
 109     return is_op3(x, Assembler::stw_op3, Assembler::ldst_op) &&
 110       (inv_rd(x) == G0) && (inv_rs1(x) == SP) && (inv_rs2(x) == G3_scratch);
 111   }
 112 
 113   bool is_prefetch() {
 114     int x = long_at(0);
 115     return is_op3(x, Assembler::prefetch_op3, Assembler::ldst_op);
 116   }
 117 
 118   bool is_membar() {
 119     int x = long_at(0);
 120     return is_op3(x, Assembler::membar_op3, Assembler::arith_op) &&
 121       (inv_rd(x) == G0) && (inv_rs1(x) == O7);
 122   }
 123 
 124   bool is_safepoint_poll() {
 125     int x = long_at(0);
 126 #ifdef _LP64
 127     return is_op3(x, Assembler::ldx_op3,  Assembler::ldst_op) &&
 128 #else
 129     return is_op3(x, Assembler::lduw_op3, Assembler::ldst_op) &&
 130 #endif
 131       (inv_rd(x) == G0) && (inv_immed(x) ? Assembler::inv_simm13(x) == 0 : inv_rs2(x) == G0);
 132   }
 133 
 134   bool is_zero_test(Register &reg);
 135   bool is_load_store_with_small_offset(Register reg);
 136 
 137  public:
 138 #ifdef ASSERT
 139   static int rdpc_instruction()        { return Assembler::op(Assembler::arith_op ) | Assembler::op3(Assembler::rdreg_op3) | Assembler::u_field(5, 18, 14) | Assembler::rd(O7); }
 140 #else
 141   // Temporary fix: in optimized mode, u_field is a macro for efficiency reasons (see Assembler::u_field) - needs to be fixed
 142   static int rdpc_instruction()        { return Assembler::op(Assembler::arith_op ) | Assembler::op3(Assembler::rdreg_op3) |            u_field(5, 18, 14) | Assembler::rd(O7); }
 143 #endif
 144   static int nop_instruction()         { return Assembler::op(Assembler::branch_op) | Assembler::op2(Assembler::sethi_op2); }
 145   static int illegal_instruction();    // the output of __ breakpoint_trap()
 146   static int call_instruction(address destination, address pc) { return Assembler::op(Assembler::call_op) | Assembler::wdisp((intptr_t)destination, (intptr_t)pc, 30); }
 147 
 148   static int branch_instruction(Assembler::op2s op2val, Assembler::Condition c, bool a) {
 149     return Assembler::op(Assembler::branch_op) | Assembler::op2(op2val) | Assembler::annul(a) | Assembler::cond(c);
 150   }
 151 
 152   static int op3_instruction(Assembler::ops opval, Register rd, Assembler::op3s op3val, Register rs1, int simm13a) {
 153     return Assembler::op(opval) | Assembler::rd(rd) | Assembler::op3(op3val) | Assembler::rs1(rs1) | Assembler::immed(true) | Assembler::simm(simm13a, 13);
 154   }
 155 
 156   static int sethi_instruction(Register rd, int imm22a) {
 157     return Assembler::op(Assembler::branch_op) | Assembler::rd(rd) | Assembler::op2(Assembler::sethi_op2) | Assembler::hi22(imm22a);
 158   }
 159 
 160  protected:
 161   address  addr_at(int offset) const    { return address(this) + offset; }
 162   int      long_at(int offset) const    { return *(int*)addr_at(offset); }
 163   void set_long_at(int offset, int i);      /* deals with I-cache */
 164   void set_jlong_at(int offset, jlong i);   /* deals with I-cache */
 165   void set_addr_at(int offset, address x);  /* deals with I-cache */
 166 
 167   address instruction_address() const       { return addr_at(0); }
 168   address next_instruction_address() const  { return addr_at(BytesPerInstWord); }
 169 
 170   static bool is_op( int x, Assembler::ops opval)  {
 171     return Assembler::inv_op(x) == opval;
 172   }
 173   static bool is_op2(int x, Assembler::op2s op2val) {
 174     return Assembler::inv_op(x) == Assembler::branch_op && Assembler::inv_op2(x) == op2val;
 175   }
 176   static bool is_op3(int x, Assembler::op3s op3val, Assembler::ops opval) {
 177     return Assembler::inv_op(x) == opval && Assembler::inv_op3(x) == op3val;
 178   }
 179 
 180   // utilities to help subclasses decode:
 181   static Register inv_rd(  int x ) { return Assembler::inv_rd( x); }
 182   static Register inv_rs1( int x ) { return Assembler::inv_rs1(x); }
 183   static Register inv_rs2( int x ) { return Assembler::inv_rs2(x); }
 184 
 185   static bool inv_immed( int x ) { return Assembler::inv_immed(x); }
 186   static bool inv_annul( int x ) { return (Assembler::annul(true) & x) != 0; }
 187   static int  inv_cond(  int x ) { return Assembler::inv_cond(x); }
 188 
 189   static int inv_op(  int x ) { return Assembler::inv_op( x); }
 190   static int inv_op2( int x ) { return Assembler::inv_op2(x); }
 191   static int inv_op3( int x ) { return Assembler::inv_op3(x); }
 192 
 193   static int inv_simm(    int x, int nbits ) { return Assembler::inv_simm(x, nbits); }
 194   static intptr_t inv_wdisp(   int x, int nbits ) { return Assembler::inv_wdisp(  x, 0, nbits); }
 195   static intptr_t inv_wdisp16( int x )            { return Assembler::inv_wdisp16(x, 0); }
 196   static int branch_destination_offset(int x) { return MacroAssembler::branch_destination(x, 0); }
 197   static int patch_branch_destination_offset(int dest_offset, int x) {
 198     return MacroAssembler::patched_branch(dest_offset, x, 0);
 199   }
 200 
 201   // utility for checking if x is either of 2 small constants
 202   static bool is_either(int x, int k1, int k2) {
 203     // return x == k1 || x == k2;
 204     return (1 << x) & (1 << k1 | 1 << k2);
 205   }
 206 
 207   // utility for checking overflow of signed instruction fields
 208   static bool fits_in_simm(int x, int nbits) {
 209     // cf. Assembler::assert_signed_range()
 210     // return -(1 << nbits-1) <= x  &&  x < ( 1 << nbits-1),
 211     return (unsigned)(x + (1 << nbits-1)) < (unsigned)(1 << nbits);
 212   }
 213 
 214   // set a signed immediate field
 215   static int set_simm(int insn, int imm, int nbits) {
 216     return (insn &~ Assembler::simm(-1, nbits)) | Assembler::simm(imm, nbits);
 217   }
 218 
 219   // set a wdisp field (disp should be the difference of two addresses)
 220   static int set_wdisp(int insn, intptr_t disp, int nbits) {
 221     return (insn &~ Assembler::wdisp((intptr_t)-4, (intptr_t)0, nbits)) | Assembler::wdisp(disp, 0, nbits);
 222   }
 223 
 224   static int set_wdisp16(int insn, intptr_t disp) {
 225     return (insn &~ Assembler::wdisp16((intptr_t)-4, 0)) | Assembler::wdisp16(disp, 0);
 226   }
 227 
 228   // get a simm13 field from an arithmetic or memory instruction
 229   static int get_simm13(int insn) {
 230     assert(is_either(Assembler::inv_op(insn),
 231                      Assembler::arith_op, Assembler::ldst_op) &&
 232             (insn & Assembler::immed(true)), "must have a simm13 field");
 233     return Assembler::inv_simm(insn, 13);
 234   }
 235 
 236   // set the simm13 field of an arithmetic or memory instruction
 237   static bool set_simm13(int insn, int imm) {
 238     get_simm13(insn);           // tickle the assertion check
 239     return set_simm(insn, imm, 13);
 240   }
 241 
 242   // combine the fields of a sethi stream (7 instructions ) and an add, jmp or ld/st
 243   static intptr_t data64( address pc, int arith_insn ) {
 244     assert(is_op2(*(unsigned int *)pc, Assembler::sethi_op2), "must be sethi");
 245     intptr_t hi = (intptr_t)gethi( (unsigned int *)pc );
 246     intptr_t lo = (intptr_t)get_simm13(arith_insn);
 247     assert((unsigned)lo < (1 << 10), "offset field of set_metadata must be 10 bits");
 248     return hi | lo;
 249   }
 250 
 251   // Regenerate the instruction sequence that performs the 64 bit
 252   // sethi.  This only does the sethi.  The disp field (bottom 10 bits)
 253   // must be handled separately.
 254   static void set_data64_sethi(address instaddr, intptr_t x);
 255   static void verify_data64_sethi(address instaddr, intptr_t x);
 256 
 257   // combine the fields of a sethi/simm13 pair (simm13 = or, add, jmpl, ld/st)
 258   static int data32(int sethi_insn, int arith_insn) {
 259     assert(is_op2(sethi_insn, Assembler::sethi_op2), "must be sethi");
 260     int hi = Assembler::inv_hi22(sethi_insn);
 261     int lo = get_simm13(arith_insn);
 262     assert((unsigned)lo < (1 << 10), "offset field of set_metadata must be 10 bits");
 263     return hi | lo;
 264   }
 265 
 266   static int set_data32_sethi(int sethi_insn, int imm) {
 267     // note that Assembler::hi22 clips the low 10 bits for us
 268     assert(is_op2(sethi_insn, Assembler::sethi_op2), "must be sethi");
 269     return (sethi_insn &~ Assembler::hi22(-1)) | Assembler::hi22(imm);
 270   }
 271 
 272   static int set_data32_simm13(int arith_insn, int imm) {
 273     get_simm13(arith_insn);             // tickle the assertion check
 274     int imm10 = Assembler::low10(imm);
 275     return (arith_insn &~ Assembler::simm(-1, 13)) | Assembler::simm(imm10, 13);
 276   }
 277 
 278   static int low10(int imm) {
 279     return Assembler::low10(imm);
 280   }
 281 
 282   // Perform the inverse of the LP64 Macroassembler::sethi
 283   // routine.  Extracts the 54 bits of address from the instruction
 284   // stream. This routine must agree with the sethi routine in
 285   // assembler_inline_sparc.hpp
 286   static address gethi( unsigned int *pc ) {
 287     int i = 0;
 288     uintptr_t adr;
 289     // We first start out with the real sethi instruction
 290     assert(is_op2(*pc, Assembler::sethi_op2), "in gethi - must be sethi");
 291     adr = (unsigned int)Assembler::inv_hi22( *(pc++) );
 292     i++;
 293     while ( i < 7 ) {
 294        // We're done if we hit a nop
 295        if ( (int)*pc == nop_instruction() ) break;
 296        assert ( Assembler::inv_op(*pc) == Assembler::arith_op, "in gethi - must be arith_op" );
 297        switch  ( Assembler::inv_op3(*pc) ) {
 298          case Assembler::xor_op3:
 299            adr ^= (intptr_t)get_simm13( *pc );
 300            return ( (address)adr );
 301            break;
 302          case Assembler::sll_op3:
 303            adr <<= ( *pc & 0x3f );
 304            break;
 305          case Assembler::or_op3:
 306            adr |= (intptr_t)get_simm13( *pc );
 307            break;
 308          default:
 309            assert ( 0, "in gethi - Should not reach here" );
 310            break;
 311        }
 312        pc++;
 313        i++;
 314     }
 315     return ( (address)adr );
 316   }
 317 
 318  public:
 319   void  verify();
 320   void  print();
 321 
 322   // unit test stuff
 323   static void test() {}                 // override for testing
 324 
 325   inline friend NativeInstruction* nativeInstruction_at(address address);
 326 };
 327 
 328 inline NativeInstruction* nativeInstruction_at(address address) {
 329     NativeInstruction* inst = (NativeInstruction*)address;
 330 #ifdef ASSERT
 331       inst->verify();
 332 #endif
 333     return inst;
 334 }
 335 
 336 
 337 
 338 //-----------------------------------------------------------------------------
 339 
 340 // The NativeCall is an abstraction for accessing/manipulating native call imm32 instructions.
 341 // (used to manipulate inline caches, primitive & dll calls, etc.)
 342 inline NativeCall* nativeCall_at(address instr);
 343 inline NativeCall* nativeCall_overwriting_at(address instr,
 344                                              address destination);
 345 inline NativeCall* nativeCall_before(address return_address);
 346 class NativeCall: public NativeInstruction {
 347  public:
 348   enum Sparc_specific_constants {
 349     instruction_size                   = 8,
 350     return_address_offset              = 8,
 351     call_displacement_width            = 30,
 352     displacement_offset                = 0,
 353     instruction_offset                 = 0
 354   };
 355   address instruction_address() const       { return addr_at(0); }
 356   address next_instruction_address() const  { return addr_at(instruction_size); }
 357   address return_address() const            { return addr_at(return_address_offset); }
 358 
 359   address destination() const               { return inv_wdisp(long_at(0), call_displacement_width) + instruction_address(); }
 360   address displacement_address() const      { return addr_at(displacement_offset); }
 361   void  set_destination(address dest)       { set_long_at(0, set_wdisp(long_at(0), dest - instruction_address(), call_displacement_width)); }
 362   void  set_destination_mt_safe(address dest);
 363 
 364   void  verify_alignment() {} // do nothing on sparc
 365   void  verify();
 366   void  print();
 367 
 368   // unit test stuff
 369   static void  test();
 370 
 371   // Creation
 372   friend inline NativeCall* nativeCall_at(address instr);
 373   friend NativeCall* nativeCall_overwriting_at(address instr, address destination = NULL) {
 374     // insert a "blank" call:
 375     NativeCall* call = (NativeCall*)instr;
 376     call->set_long_at(0 * BytesPerInstWord, call_instruction(destination, instr));
 377     call->set_long_at(1 * BytesPerInstWord, nop_instruction());
 378     assert(call->addr_at(2 * BytesPerInstWord) - instr == instruction_size, "instruction size");
 379     // check its structure now:
 380     assert(nativeCall_at(instr)->destination() == destination, "correct call destination");
 381     return call;
 382   }
 383 
 384   friend inline NativeCall* nativeCall_before(address return_address) {
 385     NativeCall* call = (NativeCall*)(return_address - return_address_offset);
 386     #ifdef ASSERT
 387       call->verify();
 388     #endif
 389     return call;
 390   }
 391 
 392   static bool is_call_at(address instr) {
 393     return nativeInstruction_at(instr)->is_call();
 394   }
 395 
 396   static bool is_call_before(address instr) {
 397     return nativeInstruction_at(instr - return_address_offset)->is_call();
 398   }
 399 
 400   static bool is_call_to(address instr, address target) {
 401     return nativeInstruction_at(instr)->is_call() &&
 402       nativeCall_at(instr)->destination() == target;
 403   }
 404 
 405   // MT-safe patching of a call instruction.
 406   static void insert(address code_pos, address entry) {
 407     (void)nativeCall_overwriting_at(code_pos, entry);
 408   }
 409 
 410   static void replace_mt_safe(address instr_addr, address code_buffer);
 411 };
 412 inline NativeCall* nativeCall_at(address instr) {
 413   NativeCall* call = (NativeCall*)instr;
 414 #ifdef ASSERT
 415   call->verify();
 416 #endif
 417   return call;
 418 }
 419 
 420 // The NativeFarCall is an abstraction for accessing/manipulating native call-anywhere
 421 // instructions in the sparcv9 vm.  Used to call native methods which may be loaded
 422 // anywhere in the address space, possibly out of reach of a call instruction.
 423 
 424 #ifndef _LP64
 425 
 426 // On 32-bit systems, a far call is the same as a near one.
 427 class NativeFarCall;
 428 inline NativeFarCall* nativeFarCall_at(address instr);
 429 class NativeFarCall : public NativeCall {
 430 public:
 431   friend inline NativeFarCall* nativeFarCall_at(address instr) { return (NativeFarCall*)nativeCall_at(instr); }
 432   friend NativeFarCall* nativeFarCall_overwriting_at(address instr, address destination = NULL)
 433                                                         { return (NativeFarCall*)nativeCall_overwriting_at(instr, destination); }
 434   friend NativeFarCall* nativeFarCall_before(address return_address)
 435                                                         { return (NativeFarCall*)nativeCall_before(return_address); }
 436 };
 437 
 438 #else
 439 
 440 // The format of this extended-range call is:
 441 //      jumpl_to addr, lreg
 442 //      == sethi %hi54(addr), O7 ;  jumpl O7, %lo10(addr), O7 ;  <delay>
 443 // That is, it is essentially the same as a NativeJump.
 444 class NativeFarCall;
 445 inline NativeFarCall* nativeFarCall_overwriting_at(address instr, address destination);
 446 inline NativeFarCall* nativeFarCall_at(address instr);
 447 class NativeFarCall: public NativeInstruction {
 448  public:
 449   enum Sparc_specific_constants {
 450     // instruction_size includes the delay slot instruction.
 451     instruction_size                   = 9 * BytesPerInstWord,
 452     return_address_offset              = 9 * BytesPerInstWord,
 453     jmpl_offset                        = 7 * BytesPerInstWord,
 454     displacement_offset                = 0,
 455     instruction_offset                 = 0
 456   };
 457   address instruction_address() const       { return addr_at(0); }
 458   address next_instruction_address() const  { return addr_at(instruction_size); }
 459   address return_address() const            { return addr_at(return_address_offset); }
 460 
 461   address destination() const {
 462     return (address) data64(addr_at(0), long_at(jmpl_offset));
 463   }
 464   address displacement_address() const      { return addr_at(displacement_offset); }
 465   void set_destination(address dest);
 466 
 467   bool destination_is_compiled_verified_entry_point();
 468 
 469   void  verify();
 470   void  print();
 471 
 472   // unit test stuff
 473   static void  test();
 474 
 475   // Creation
 476   friend inline NativeFarCall* nativeFarCall_at(address instr) {
 477     NativeFarCall* call = (NativeFarCall*)instr;
 478     #ifdef ASSERT
 479       call->verify();
 480     #endif
 481     return call;
 482   }
 483 
 484   friend inline NativeFarCall* nativeFarCall_overwriting_at(address instr, address destination = NULL) {
 485     Unimplemented();
 486     NativeFarCall* call = (NativeFarCall*)instr;
 487     return call;
 488   }
 489 
 490   friend NativeFarCall* nativeFarCall_before(address return_address) {
 491     NativeFarCall* call = (NativeFarCall*)(return_address - return_address_offset);
 492     #ifdef ASSERT
 493       call->verify();
 494     #endif
 495     return call;
 496   }
 497 
 498   static bool is_call_at(address instr);
 499 
 500   // MT-safe patching of a call instruction.
 501   static void insert(address code_pos, address entry) {
 502     (void)nativeFarCall_overwriting_at(code_pos, entry);
 503   }
 504   static void replace_mt_safe(address instr_addr, address code_buffer);
 505 };
 506 
 507 #endif // _LP64
 508 
 509 // An interface for accessing/manipulating native set_metadata imm, reg instructions.
 510 // (used to manipulate inlined data references, etc.)
 511 //      set_metadata imm, reg
 512 //      == sethi %hi22(imm), reg ;  add reg, %lo10(imm), reg
 513 class NativeMovConstReg;
 514 inline NativeMovConstReg* nativeMovConstReg_at(address address);
 515 class NativeMovConstReg: public NativeInstruction {
 516  public:
 517   enum Sparc_specific_constants {
 518     sethi_offset           = 0,
 519 #ifdef _LP64
 520     add_offset             = 7 * BytesPerInstWord,
 521     instruction_size       = 8 * BytesPerInstWord
 522 #else
 523     add_offset             = 4,
 524     instruction_size       = 8
 525 #endif
 526   };
 527 
 528   address instruction_address() const       { return addr_at(0); }
 529   address next_instruction_address() const  { return addr_at(instruction_size); }
 530 
 531   // (The [set_]data accessor respects oop_type relocs also.)
 532   intptr_t data() const;
 533   void set_data(intptr_t x);
 534 
 535   // report the destination register
 536   Register destination() { return inv_rd(long_at(sethi_offset)); }
 537 
 538   void  verify();
 539   void  print();
 540 
 541   // unit test stuff
 542   static void test();
 543 
 544   // Creation
 545   friend inline NativeMovConstReg* nativeMovConstReg_at(address address) {
 546     NativeMovConstReg* test = (NativeMovConstReg*)address;
 547     #ifdef ASSERT
 548       test->verify();
 549     #endif
 550     return test;
 551   }
 552 
 553 
 554   friend NativeMovConstReg* nativeMovConstReg_before(address address) {
 555     NativeMovConstReg* test = (NativeMovConstReg*)(address - instruction_size);
 556     #ifdef ASSERT
 557       test->verify();
 558     #endif
 559     return test;
 560   }
 561 
 562 };
 563 
 564 
 565 // An interface for accessing/manipulating native set_metadata imm, reg instructions.
 566 // (used to manipulate inlined data references, etc.)
 567 //      set_metadata imm, reg
 568 //      == sethi %hi22(imm), reg; nop; add reg, %lo10(imm), reg
 569 //
 570 // Note that it is identical to NativeMovConstReg with the exception of a nop between the
 571 // sethi and the add.  The nop is required to be in the delay slot of the call instruction
 572 // which overwrites the sethi during patching.
 573 class NativeMovConstRegPatching;
 574 inline NativeMovConstRegPatching* nativeMovConstRegPatching_at(address address);class NativeMovConstRegPatching: public NativeInstruction {
 575  public:
 576   enum Sparc_specific_constants {
 577     sethi_offset           = 0,
 578 #ifdef _LP64
 579     nop_offset             = 7 * BytesPerInstWord,
 580 #else
 581     nop_offset             = sethi_offset + BytesPerInstWord,
 582 #endif
 583     add_offset             = nop_offset   + BytesPerInstWord,
 584     instruction_size       = add_offset   + BytesPerInstWord
 585   };
 586 
 587   address instruction_address() const       { return addr_at(0); }
 588   address next_instruction_address() const  { return addr_at(instruction_size); }
 589 
 590   // (The [set_]data accessor respects oop_type relocs also.)
 591   int data() const;
 592   void  set_data(int x);
 593 
 594   // report the destination register
 595   Register destination() { return inv_rd(long_at(sethi_offset)); }
 596 
 597   void  verify();
 598   void  print();
 599 
 600   // unit test stuff
 601   static void test();
 602 
 603   // Creation
 604   friend inline NativeMovConstRegPatching* nativeMovConstRegPatching_at(address address) {
 605     NativeMovConstRegPatching* test = (NativeMovConstRegPatching*)address;
 606     #ifdef ASSERT
 607       test->verify();
 608     #endif
 609     return test;
 610   }
 611 
 612 
 613   friend NativeMovConstRegPatching* nativeMovConstRegPatching_before(address address) {
 614     NativeMovConstRegPatching* test = (NativeMovConstRegPatching*)(address - instruction_size);
 615     #ifdef ASSERT
 616       test->verify();
 617     #endif
 618     return test;
 619   }
 620 
 621 };
 622 
 623 
 624 // An interface for accessing/manipulating native memory ops
 625 //      ld* [reg + offset], reg
 626 //      st* reg, [reg + offset]
 627 //      sethi %hi(imm), reg; add reg, %lo(imm), reg; ld* [reg1 + reg], reg2
 628 //      sethi %hi(imm), reg; add reg, %lo(imm), reg; st* reg2, [reg1 + reg]
 629 // Ops covered: {lds,ldu,st}{w,b,h}, {ld,st}{d,x}
 630 //
 631 class NativeMovRegMem;
 632 inline NativeMovRegMem* nativeMovRegMem_at (address address);
 633 class NativeMovRegMem: public NativeInstruction {
 634  public:
 635   enum Sparc_specific_constants {
 636     op3_mask_ld = 1 << Assembler::lduw_op3 |
 637                   1 << Assembler::ldub_op3 |
 638                   1 << Assembler::lduh_op3 |
 639                   1 << Assembler::ldd_op3 |
 640                   1 << Assembler::ldsw_op3 |
 641                   1 << Assembler::ldsb_op3 |
 642                   1 << Assembler::ldsh_op3 |
 643                   1 << Assembler::ldx_op3,
 644     op3_mask_st = 1 << Assembler::stw_op3 |
 645                   1 << Assembler::stb_op3 |
 646                   1 << Assembler::sth_op3 |
 647                   1 << Assembler::std_op3 |
 648                   1 << Assembler::stx_op3,
 649     op3_ldst_int_limit = Assembler::ldf_op3,
 650     op3_mask_ldf = 1 << (Assembler::ldf_op3  - op3_ldst_int_limit) |
 651                    1 << (Assembler::lddf_op3 - op3_ldst_int_limit),
 652     op3_mask_stf = 1 << (Assembler::stf_op3  - op3_ldst_int_limit) |
 653                    1 << (Assembler::stdf_op3 - op3_ldst_int_limit),
 654 
 655     offset_width    = 13,
 656     sethi_offset    = 0,
 657 #ifdef _LP64
 658     add_offset      = 7 * BytesPerInstWord,
 659 #else
 660     add_offset      = 4,
 661 #endif
 662     ldst_offset     = add_offset + BytesPerInstWord
 663   };
 664   bool is_immediate() const {
 665     // check if instruction is ld* [reg + offset], reg or st* reg, [reg + offset]
 666     int i0 = long_at(0);
 667     return (is_op(i0, Assembler::ldst_op));
 668   }
 669 
 670   address instruction_address() const           { return addr_at(0); }
 671   address next_instruction_address() const      {
 672 #ifdef _LP64
 673     return addr_at(is_immediate() ? 4 : (7 * BytesPerInstWord));
 674 #else
 675     return addr_at(is_immediate() ? 4 : 12);
 676 #endif
 677   }
 678   intptr_t   offset() const                             {
 679      return is_immediate()? inv_simm(long_at(0), offset_width) :
 680                             nativeMovConstReg_at(addr_at(0))->data();
 681   }
 682   void  set_offset(intptr_t x) {
 683     if (is_immediate()) {
 684       guarantee(fits_in_simm(x, offset_width), "data block offset overflow");
 685       set_long_at(0, set_simm(long_at(0), x, offset_width));
 686     } else
 687       nativeMovConstReg_at(addr_at(0))->set_data(x);
 688   }
 689 
 690   void  add_offset_in_bytes(intptr_t radd_offset)     {
 691       set_offset (offset() + radd_offset);
 692   }
 693 
 694   void  copy_instruction_to(address new_instruction_address);
 695 
 696   void verify();
 697   void print ();
 698 
 699   // unit test stuff
 700   static void test();
 701 
 702  private:
 703   friend inline NativeMovRegMem* nativeMovRegMem_at (address address) {
 704     NativeMovRegMem* test = (NativeMovRegMem*)address;
 705     #ifdef ASSERT
 706       test->verify();
 707     #endif
 708     return test;
 709   }
 710 };
 711 
 712 
 713 // An interface for accessing/manipulating native memory ops
 714 //      ld* [reg + offset], reg
 715 //      st* reg, [reg + offset]
 716 //      sethi %hi(imm), reg; nop; add reg, %lo(imm), reg; ld* [reg1 + reg], reg2
 717 //      sethi %hi(imm), reg; nop; add reg, %lo(imm), reg; st* reg2, [reg1 + reg]
 718 // Ops covered: {lds,ldu,st}{w,b,h}, {ld,st}{d,x}
 719 //
 720 // Note that it is identical to NativeMovRegMem with the exception of a nop between the
 721 // sethi and the add.  The nop is required to be in the delay slot of the call instruction
 722 // which overwrites the sethi during patching.
 723 class NativeMovRegMemPatching;
 724 inline NativeMovRegMemPatching* nativeMovRegMemPatching_at (address address);
 725 class NativeMovRegMemPatching: public NativeInstruction {
 726  public:
 727   enum Sparc_specific_constants {
 728     op3_mask_ld = 1 << Assembler::lduw_op3 |
 729                   1 << Assembler::ldub_op3 |
 730                   1 << Assembler::lduh_op3 |
 731                   1 << Assembler::ldd_op3 |
 732                   1 << Assembler::ldsw_op3 |
 733                   1 << Assembler::ldsb_op3 |
 734                   1 << Assembler::ldsh_op3 |
 735                   1 << Assembler::ldx_op3,
 736     op3_mask_st = 1 << Assembler::stw_op3 |
 737                   1 << Assembler::stb_op3 |
 738                   1 << Assembler::sth_op3 |
 739                   1 << Assembler::std_op3 |
 740                   1 << Assembler::stx_op3,
 741     op3_ldst_int_limit = Assembler::ldf_op3,
 742     op3_mask_ldf = 1 << (Assembler::ldf_op3  - op3_ldst_int_limit) |
 743                    1 << (Assembler::lddf_op3 - op3_ldst_int_limit),
 744     op3_mask_stf = 1 << (Assembler::stf_op3  - op3_ldst_int_limit) |
 745                    1 << (Assembler::stdf_op3 - op3_ldst_int_limit),
 746 
 747     offset_width    = 13,
 748     sethi_offset    = 0,
 749 #ifdef _LP64
 750     nop_offset      = 7 * BytesPerInstWord,
 751 #else
 752     nop_offset      = 4,
 753 #endif
 754     add_offset      = nop_offset + BytesPerInstWord,
 755     ldst_offset     = add_offset + BytesPerInstWord
 756   };
 757   bool is_immediate() const {
 758     // check if instruction is ld* [reg + offset], reg or st* reg, [reg + offset]
 759     int i0 = long_at(0);
 760     return (is_op(i0, Assembler::ldst_op));
 761   }
 762 
 763   address instruction_address() const           { return addr_at(0); }
 764   address next_instruction_address() const      {
 765     return addr_at(is_immediate()? 4 : 16);
 766   }
 767   int   offset() const                          {
 768      return is_immediate()? inv_simm(long_at(0), offset_width) :
 769                             nativeMovConstRegPatching_at(addr_at(0))->data();
 770   }
 771   void  set_offset(int x) {
 772     if (is_immediate()) {
 773       guarantee(fits_in_simm(x, offset_width), "data block offset overflow");
 774       set_long_at(0, set_simm(long_at(0), x, offset_width));
 775     }
 776     else
 777       nativeMovConstRegPatching_at(addr_at(0))->set_data(x);
 778   }
 779 
 780   void  add_offset_in_bytes(intptr_t radd_offset)     {
 781       set_offset (offset() + radd_offset);
 782   }
 783 
 784   void  copy_instruction_to(address new_instruction_address);
 785 
 786   void verify();
 787   void print ();
 788 
 789   // unit test stuff
 790   static void test();
 791 
 792  private:
 793   friend inline NativeMovRegMemPatching* nativeMovRegMemPatching_at (address address) {
 794     NativeMovRegMemPatching* test = (NativeMovRegMemPatching*)address;
 795     #ifdef ASSERT
 796       test->verify();
 797     #endif
 798     return test;
 799   }
 800 };
 801 
 802 
 803 // An interface for accessing/manipulating native jumps
 804 //      jump_to addr
 805 //      == sethi %hi22(addr), temp ;  jumpl reg, %lo10(addr), G0 ;  <delay>
 806 //      jumpl_to addr, lreg
 807 //      == sethi %hi22(addr), temp ;  jumpl reg, %lo10(addr), lreg ;  <delay>
 808 class NativeJump;
 809 inline NativeJump* nativeJump_at(address address);
 810 class NativeJump: public NativeInstruction {
 811  private:
 812   void guarantee_displacement(int disp, int width) {
 813     guarantee(fits_in_simm(disp, width + 2), "branch displacement overflow");
 814   }
 815 
 816  public:
 817   enum Sparc_specific_constants {
 818     sethi_offset           = 0,
 819 #ifdef _LP64
 820     jmpl_offset            = 7 * BytesPerInstWord,
 821     instruction_size       = 9 * BytesPerInstWord  // includes delay slot
 822 #else
 823     jmpl_offset            = 1 * BytesPerInstWord,
 824     instruction_size       = 3 * BytesPerInstWord  // includes delay slot
 825 #endif
 826   };
 827 
 828   address instruction_address() const       { return addr_at(0); }
 829   address next_instruction_address() const  { return addr_at(instruction_size); }
 830 
 831 #ifdef _LP64
 832   address jump_destination() const {
 833     return (address) data64(instruction_address(), long_at(jmpl_offset));
 834   }
 835   void set_jump_destination(address dest) {
 836     set_data64_sethi( instruction_address(), (intptr_t)dest);
 837     set_long_at(jmpl_offset,  set_data32_simm13( long_at(jmpl_offset),  (intptr_t)dest));
 838   }
 839 #else
 840   address jump_destination() const {
 841     return (address) data32(long_at(sethi_offset), long_at(jmpl_offset));
 842   }
 843   void set_jump_destination(address dest) {
 844     set_long_at(sethi_offset, set_data32_sethi(  long_at(sethi_offset), (intptr_t)dest));
 845     set_long_at(jmpl_offset,  set_data32_simm13( long_at(jmpl_offset),  (intptr_t)dest));
 846   }
 847 #endif
 848 
 849   // Creation
 850   friend inline NativeJump* nativeJump_at(address address) {
 851     NativeJump* jump = (NativeJump*)address;
 852     #ifdef ASSERT
 853       jump->verify();
 854     #endif
 855     return jump;
 856   }
 857 
 858   void verify();
 859   void print();
 860 
 861   // Unit testing stuff
 862   static void test();
 863 
 864   // Insertion of native jump instruction
 865   static void insert(address code_pos, address entry);
 866   // MT-safe insertion of native jump at verified method entry
 867   static void check_verified_entry_alignment(address entry, address verified_entry) {
 868     // nothing to do for sparc.
 869   }
 870   static void patch_verified_entry(address entry, address verified_entry, address dest);
 871 };
 872 
 873 
 874 
 875 // Despite the name, handles only simple branches.
 876 class NativeGeneralJump;
 877 inline NativeGeneralJump* nativeGeneralJump_at(address address);
 878 class NativeGeneralJump: public NativeInstruction {
 879  public:
 880   enum Sparc_specific_constants {
 881     instruction_size                   = 8
 882   };
 883 
 884   address instruction_address() const       { return addr_at(0); }
 885   address jump_destination()    const       { return addr_at(0) + branch_destination_offset(long_at(0)); }
 886   void set_jump_destination(address dest) {
 887     int patched_instr = patch_branch_destination_offset(dest - addr_at(0), long_at(0));
 888     set_long_at(0, patched_instr);
 889   }
 890   NativeInstruction *delay_slot_instr() { return nativeInstruction_at(addr_at(4));}
 891   void fill_delay_slot(int instr) { set_long_at(4, instr);}
 892   Assembler::Condition condition() {
 893     int x = long_at(0);
 894     return (Assembler::Condition) Assembler::inv_cond(x);
 895   }
 896 
 897   // Creation
 898   friend inline NativeGeneralJump* nativeGeneralJump_at(address address) {
 899     NativeGeneralJump* jump = (NativeGeneralJump*)(address);
 900 #ifdef ASSERT
 901       jump->verify();
 902 #endif
 903     return jump;
 904   }
 905 
 906   // Insertion of native general jump instruction
 907   static void insert_unconditional(address code_pos, address entry);
 908   static void replace_mt_safe(address instr_addr, address code_buffer);
 909 
 910   void verify();
 911 };
 912 
 913 
 914 class NativeIllegalInstruction: public NativeInstruction {
 915  public:
 916   enum Sparc_specific_constants {
 917     instruction_size            =    4
 918   };
 919 
 920   // Insert illegal opcode as specific address
 921   static void insert(address code_pos);
 922 };
 923 
 924 #endif // CPU_SPARC_VM_NATIVEINST_SPARC_HPP