1 /*
   2  * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef CPU_SPARC_VM_ASSEMBLER_SPARC_HPP
  26 #define CPU_SPARC_VM_ASSEMBLER_SPARC_HPP
  27 
  28 class BiasedLockingCounters;
  29 
  30 // <sys/trap.h> promises that the system will not use traps 16-31
  31 #define ST_RESERVED_FOR_USER_0 0x10
  32 
  33 /* Written: David Ungar 4/19/97 */
  34 
  35 // Contains all the definitions needed for sparc assembly code generation.
  36 
  37 // Register aliases for parts of the system:
  38 
  39 // 64 bit values can be kept in g1-g5, o1-o5 and o7 and all 64 bits are safe
  40 // across context switches in V8+ ABI.  Of course, there are no 64 bit regs
  41 // in V8 ABI. All 64 bits are preserved in V9 ABI for all registers.
  42 
  43 // g2-g4 are scratch registers called "application globals".  Their
  44 // meaning is reserved to the "compilation system"--which means us!
  45 // They are are not supposed to be touched by ordinary C code, although
  46 // highly-optimized C code might steal them for temps.  They are safe
  47 // across thread switches, and the ABI requires that they be safe
  48 // across function calls.
  49 //
  50 // g1 and g3 are touched by more modules.  V8 allows g1 to be clobbered
  51 // across func calls, and V8+ also allows g5 to be clobbered across
  52 // func calls.  Also, g1 and g5 can get touched while doing shared
  53 // library loading.
  54 //
  55 // We must not touch g7 (it is the thread-self register) and g6 is
  56 // reserved for certain tools.  g0, of course, is always zero.
  57 //
  58 // (Sources:  SunSoft Compilers Group, thread library engineers.)
  59 
  60 // %%%% The interpreter should be revisited to reduce global scratch regs.
  61 
  62 // This global always holds the current JavaThread pointer:
  63 
  64 REGISTER_DECLARATION(Register, G2_thread , G2);
  65 REGISTER_DECLARATION(Register, G6_heapbase , G6);
  66 
  67 // The following globals are part of the Java calling convention:
  68 
  69 REGISTER_DECLARATION(Register, G5_method             , G5);
  70 REGISTER_DECLARATION(Register, G5_megamorphic_method , G5_method);
  71 REGISTER_DECLARATION(Register, G5_inline_cache_reg   , G5_method);
  72 
  73 // The following globals are used for the new C1 & interpreter calling convention:
  74 REGISTER_DECLARATION(Register, Gargs        , G4); // pointing to the last argument
  75 
  76 // This local is used to preserve G2_thread in the interpreter and in stubs:
  77 REGISTER_DECLARATION(Register, L7_thread_cache , L7);
  78 
  79 // These globals are used as scratch registers in the interpreter:
  80 
  81 REGISTER_DECLARATION(Register, Gframe_size   , G1); // SAME REG as G1_scratch
  82 REGISTER_DECLARATION(Register, G1_scratch    , G1); // also SAME
  83 REGISTER_DECLARATION(Register, G3_scratch    , G3);
  84 REGISTER_DECLARATION(Register, G4_scratch    , G4);
  85 
  86 // These globals are used as short-lived scratch registers in the compiler:
  87 
  88 REGISTER_DECLARATION(Register, Gtemp  , G5);
  89 
  90 // JSR 292 fixed register usages:
  91 REGISTER_DECLARATION(Register, G5_method_type        , G5);
  92 REGISTER_DECLARATION(Register, G3_method_handle      , G3);
  93 REGISTER_DECLARATION(Register, L7_mh_SP_save         , L7);
  94 
  95 // The compiler requires that G5_megamorphic_method is G5_inline_cache_klass,
  96 // because a single patchable "set" instruction (NativeMovConstReg,
  97 // or NativeMovConstPatching for compiler1) instruction
  98 // serves to set up either quantity, depending on whether the compiled
  99 // call site is an inline cache or is megamorphic.  See the function
 100 // CompiledIC::set_to_megamorphic.
 101 //
 102 // If a inline cache targets an interpreted method, then the
 103 // G5 register will be used twice during the call.  First,
 104 // the call site will be patched to load a compiledICHolder
 105 // into G5. (This is an ordered pair of ic_klass, method.)
 106 // The c2i adapter will first check the ic_klass, then load
 107 // G5_method with the method part of the pair just before
 108 // jumping into the interpreter.
 109 //
 110 // Note that G5_method is only the method-self for the interpreter,
 111 // and is logically unrelated to G5_megamorphic_method.
 112 //
 113 // Invariants on G2_thread (the JavaThread pointer):
 114 //  - it should not be used for any other purpose anywhere
 115 //  - it must be re-initialized by StubRoutines::call_stub()
 116 //  - it must be preserved around every use of call_VM
 117 
 118 // We can consider using g2/g3/g4 to cache more values than the
 119 // JavaThread, such as the card-marking base or perhaps pointers into
 120 // Eden.  It's something of a waste to use them as scratch temporaries,
 121 // since they are not supposed to be volatile.  (Of course, if we find
 122 // that Java doesn't benefit from application globals, then we can just
 123 // use them as ordinary temporaries.)
 124 //
 125 // Since g1 and g5 (and/or g6) are the volatile (caller-save) registers,
 126 // it makes sense to use them routinely for procedure linkage,
 127 // whenever the On registers are not applicable.  Examples:  G5_method,
 128 // G5_inline_cache_klass, and a double handful of miscellaneous compiler
 129 // stubs.  This means that compiler stubs, etc., should be kept to a
 130 // maximum of two or three G-register arguments.
 131 
 132 
 133 // stub frames
 134 
 135 REGISTER_DECLARATION(Register, Lentry_args      , L0); // pointer to args passed to callee (interpreter) not stub itself
 136 
 137 // Interpreter frames
 138 
 139 #ifdef CC_INTERP
 140 REGISTER_DECLARATION(Register, Lstate           , L0); // interpreter state object pointer
 141 REGISTER_DECLARATION(Register, L1_scratch       , L1); // scratch
 142 REGISTER_DECLARATION(Register, Lmirror          , L1); // mirror (for native methods only)
 143 REGISTER_DECLARATION(Register, L2_scratch       , L2);
 144 REGISTER_DECLARATION(Register, L3_scratch       , L3);
 145 REGISTER_DECLARATION(Register, L4_scratch       , L4);
 146 REGISTER_DECLARATION(Register, Lscratch         , L5); // C1 uses
 147 REGISTER_DECLARATION(Register, Lscratch2        , L6); // C1 uses
 148 REGISTER_DECLARATION(Register, L7_scratch       , L7); // constant pool cache
 149 REGISTER_DECLARATION(Register, O5_savedSP       , O5);
 150 REGISTER_DECLARATION(Register, I5_savedSP       , I5); // Saved SP before bumping for locals.  This is simply
 151                                                        // a copy SP, so in 64-bit it's a biased value.  The bias
 152                                                        // is added and removed as needed in the frame code.
 153 // Interface to signature handler
 154 REGISTER_DECLARATION(Register, Llocals          , L7); // pointer to locals for signature handler
 155 REGISTER_DECLARATION(Register, Lmethod          , L6); // methodOop when calling signature handler
 156 
 157 #else
 158 REGISTER_DECLARATION(Register, Lesp             , L0); // expression stack pointer
 159 REGISTER_DECLARATION(Register, Lbcp             , L1); // pointer to next bytecode
 160 REGISTER_DECLARATION(Register, Lmethod          , L2);
 161 REGISTER_DECLARATION(Register, Llocals          , L3);
 162 REGISTER_DECLARATION(Register, Largs            , L3); // pointer to locals for signature handler
 163                                                        // must match Llocals in asm interpreter
 164 REGISTER_DECLARATION(Register, Lmonitors        , L4);
 165 REGISTER_DECLARATION(Register, Lbyte_code       , L5);
 166 // When calling out from the interpreter we record SP so that we can remove any extra stack
 167 // space allocated during adapter transitions. This register is only live from the point
 168 // of the call until we return.
 169 REGISTER_DECLARATION(Register, Llast_SP         , L5);
 170 REGISTER_DECLARATION(Register, Lscratch         , L5);
 171 REGISTER_DECLARATION(Register, Lscratch2        , L6);
 172 REGISTER_DECLARATION(Register, LcpoolCache      , L6); // constant pool cache
 173 
 174 REGISTER_DECLARATION(Register, O5_savedSP       , O5);
 175 REGISTER_DECLARATION(Register, I5_savedSP       , I5); // Saved SP before bumping for locals.  This is simply
 176                                                        // a copy SP, so in 64-bit it's a biased value.  The bias
 177                                                        // is added and removed as needed in the frame code.
 178 REGISTER_DECLARATION(Register, IdispatchTables  , I4); // Base address of the bytecode dispatch tables
 179 REGISTER_DECLARATION(Register, IdispatchAddress , I3); // Register which saves the dispatch address for each bytecode
 180 REGISTER_DECLARATION(Register, ImethodDataPtr   , I2); // Pointer to the current method data
 181 #endif /* CC_INTERP */
 182 
 183 // NOTE: Lscratch2 and LcpoolCache point to the same registers in
 184 //       the interpreter code. If Lscratch2 needs to be used for some
 185 //       purpose than LcpoolCache should be restore after that for
 186 //       the interpreter to work right
 187 // (These assignments must be compatible with L7_thread_cache; see above.)
 188 
 189 // Since Lbcp points into the middle of the method object,
 190 // it is temporarily converted into a "bcx" during GC.
 191 
 192 // Exception processing
 193 // These registers are passed into exception handlers.
 194 // All exception handlers require the exception object being thrown.
 195 // In addition, an nmethod's exception handler must be passed
 196 // the address of the call site within the nmethod, to allow
 197 // proper selection of the applicable catch block.
 198 // (Interpreter frames use their own bcp() for this purpose.)
 199 //
 200 // The Oissuing_pc value is not always needed.  When jumping to a
 201 // handler that is known to be interpreted, the Oissuing_pc value can be
 202 // omitted.  An actual catch block in compiled code receives (from its
 203 // nmethod's exception handler) the thrown exception in the Oexception,
 204 // but it doesn't need the Oissuing_pc.
 205 //
 206 // If an exception handler (either interpreted or compiled)
 207 // discovers there is no applicable catch block, it updates
 208 // the Oissuing_pc to the continuation PC of its own caller,
 209 // pops back to that caller's stack frame, and executes that
 210 // caller's exception handler.  Obviously, this process will
 211 // iterate until the control stack is popped back to a method
 212 // containing an applicable catch block.  A key invariant is
 213 // that the Oissuing_pc value is always a value local to
 214 // the method whose exception handler is currently executing.
 215 //
 216 // Note:  The issuing PC value is __not__ a raw return address (I7 value).
 217 // It is a "return pc", the address __following__ the call.
 218 // Raw return addresses are converted to issuing PCs by frame::pc(),
 219 // or by stubs.  Issuing PCs can be used directly with PC range tables.
 220 //
 221 REGISTER_DECLARATION(Register, Oexception  , O0); // exception being thrown
 222 REGISTER_DECLARATION(Register, Oissuing_pc , O1); // where the exception is coming from
 223 
 224 
 225 // These must occur after the declarations above
 226 #ifndef DONT_USE_REGISTER_DEFINES
 227 
 228 #define Gthread             AS_REGISTER(Register, Gthread)
 229 #define Gmethod             AS_REGISTER(Register, Gmethod)
 230 #define Gmegamorphic_method AS_REGISTER(Register, Gmegamorphic_method)
 231 #define Ginline_cache_reg   AS_REGISTER(Register, Ginline_cache_reg)
 232 #define Gargs               AS_REGISTER(Register, Gargs)
 233 #define Lthread_cache       AS_REGISTER(Register, Lthread_cache)
 234 #define Gframe_size         AS_REGISTER(Register, Gframe_size)
 235 #define Gtemp               AS_REGISTER(Register, Gtemp)
 236 
 237 #ifdef CC_INTERP
 238 #define Lstate              AS_REGISTER(Register, Lstate)
 239 #define Lesp                AS_REGISTER(Register, Lesp)
 240 #define L1_scratch          AS_REGISTER(Register, L1_scratch)
 241 #define Lmirror             AS_REGISTER(Register, Lmirror)
 242 #define L2_scratch          AS_REGISTER(Register, L2_scratch)
 243 #define L3_scratch          AS_REGISTER(Register, L3_scratch)
 244 #define L4_scratch          AS_REGISTER(Register, L4_scratch)
 245 #define Lscratch            AS_REGISTER(Register, Lscratch)
 246 #define Lscratch2           AS_REGISTER(Register, Lscratch2)
 247 #define L7_scratch          AS_REGISTER(Register, L7_scratch)
 248 #define Ostate              AS_REGISTER(Register, Ostate)
 249 #else
 250 #define Lesp                AS_REGISTER(Register, Lesp)
 251 #define Lbcp                AS_REGISTER(Register, Lbcp)
 252 #define Lmethod             AS_REGISTER(Register, Lmethod)
 253 #define Llocals             AS_REGISTER(Register, Llocals)
 254 #define Lmonitors           AS_REGISTER(Register, Lmonitors)
 255 #define Lbyte_code          AS_REGISTER(Register, Lbyte_code)
 256 #define Lscratch            AS_REGISTER(Register, Lscratch)
 257 #define Lscratch2           AS_REGISTER(Register, Lscratch2)
 258 #define LcpoolCache         AS_REGISTER(Register, LcpoolCache)
 259 #endif /* ! CC_INTERP */
 260 
 261 #define Lentry_args         AS_REGISTER(Register, Lentry_args)
 262 #define I5_savedSP          AS_REGISTER(Register, I5_savedSP)
 263 #define O5_savedSP          AS_REGISTER(Register, O5_savedSP)
 264 #define IdispatchAddress    AS_REGISTER(Register, IdispatchAddress)
 265 #define ImethodDataPtr      AS_REGISTER(Register, ImethodDataPtr)
 266 #define IdispatchTables     AS_REGISTER(Register, IdispatchTables)
 267 
 268 #define Oexception          AS_REGISTER(Register, Oexception)
 269 #define Oissuing_pc         AS_REGISTER(Register, Oissuing_pc)
 270 
 271 
 272 #endif
 273 
 274 // Address is an abstraction used to represent a memory location.
 275 //
 276 // Note: A register location is represented via a Register, not
 277 //       via an address for efficiency & simplicity reasons.
 278 
 279 class Address VALUE_OBJ_CLASS_SPEC {
 280  private:
 281   Register           _base;           // Base register.
 282   RegisterOrConstant _index_or_disp;  // Index register or constant displacement.
 283   RelocationHolder   _rspec;
 284 
 285  public:
 286   Address() : _base(noreg), _index_or_disp(noreg) {}
 287 
 288   Address(Register base, RegisterOrConstant index_or_disp)
 289     : _base(base),
 290       _index_or_disp(index_or_disp) {
 291   }
 292 
 293   Address(Register base, Register index)
 294     : _base(base),
 295       _index_or_disp(index) {
 296   }
 297 
 298   Address(Register base, int disp)
 299     : _base(base),
 300       _index_or_disp(disp) {
 301   }
 302 
 303 #ifdef ASSERT
 304   // ByteSize is only a class when ASSERT is defined, otherwise it's an int.
 305   Address(Register base, ByteSize disp)
 306     : _base(base),
 307       _index_or_disp(in_bytes(disp)) {
 308   }
 309 #endif
 310 
 311   // accessors
 312   Register base()      const { return _base; }
 313   Register index()     const { return _index_or_disp.as_register(); }
 314   int      disp()      const { return _index_or_disp.as_constant(); }
 315 
 316   bool     has_index() const { return _index_or_disp.is_register(); }
 317   bool     has_disp()  const { return _index_or_disp.is_constant(); }
 318 
 319   const relocInfo::relocType rtype() { return _rspec.type(); }
 320   const RelocationHolder&    rspec() { return _rspec; }
 321 
 322   RelocationHolder rspec(int offset) const {
 323     return offset == 0 ? _rspec : _rspec.plus(offset);
 324   }
 325 
 326   inline bool is_simm13(int offset = 0);  // check disp+offset for overflow
 327 
 328   Address plus_disp(int plusdisp) const {     // bump disp by a small amount
 329     assert(_index_or_disp.is_constant(), "must have a displacement");
 330     Address a(base(), disp() + plusdisp);
 331     return a;
 332   }
 333 
 334   Address after_save() const {
 335     Address a = (*this);
 336     a._base = a._base->after_save();
 337     return a;
 338   }
 339 
 340   Address after_restore() const {
 341     Address a = (*this);
 342     a._base = a._base->after_restore();
 343     return a;
 344   }
 345 
 346   // Convert the raw encoding form into the form expected by the
 347   // constructor for Address.
 348   static Address make_raw(int base, int index, int scale, int disp, bool disp_is_oop);
 349 
 350   friend class Assembler;
 351 };
 352 
 353 
 354 class AddressLiteral VALUE_OBJ_CLASS_SPEC {
 355  private:
 356   address          _address;
 357   RelocationHolder _rspec;
 358 
 359   RelocationHolder rspec_from_rtype(relocInfo::relocType rtype, address addr) {
 360     switch (rtype) {
 361     case relocInfo::external_word_type:
 362       return external_word_Relocation::spec(addr);
 363     case relocInfo::internal_word_type:
 364       return internal_word_Relocation::spec(addr);
 365 #ifdef _LP64
 366     case relocInfo::opt_virtual_call_type:
 367       return opt_virtual_call_Relocation::spec();
 368     case relocInfo::static_call_type:
 369       return static_call_Relocation::spec();
 370     case relocInfo::runtime_call_type:
 371       return runtime_call_Relocation::spec();
 372 #endif
 373     case relocInfo::none:
 374       return RelocationHolder();
 375     default:
 376       ShouldNotReachHere();
 377       return RelocationHolder();
 378     }
 379   }
 380 
 381  protected:
 382   // creation
 383   AddressLiteral() : _address(NULL), _rspec(NULL) {}
 384 
 385  public:
 386   AddressLiteral(address addr, RelocationHolder const& rspec)
 387     : _address(addr),
 388       _rspec(rspec) {}
 389 
 390   // Some constructors to avoid casting at the call site.
 391   AddressLiteral(jobject obj, RelocationHolder const& rspec)
 392     : _address((address) obj),
 393       _rspec(rspec) {}
 394 
 395   AddressLiteral(intptr_t value, RelocationHolder const& rspec)
 396     : _address((address) value),
 397       _rspec(rspec) {}
 398 
 399   AddressLiteral(address addr, relocInfo::relocType rtype = relocInfo::none)
 400     : _address((address) addr),
 401     _rspec(rspec_from_rtype(rtype, (address) addr)) {}
 402 
 403   // Some constructors to avoid casting at the call site.
 404   AddressLiteral(address* addr, relocInfo::relocType rtype = relocInfo::none)
 405     : _address((address) addr),
 406     _rspec(rspec_from_rtype(rtype, (address) addr)) {}
 407 
 408   AddressLiteral(bool* addr, relocInfo::relocType rtype = relocInfo::none)
 409     : _address((address) addr),
 410       _rspec(rspec_from_rtype(rtype, (address) addr)) {}
 411 
 412   AddressLiteral(const bool* addr, relocInfo::relocType rtype = relocInfo::none)
 413     : _address((address) addr),
 414       _rspec(rspec_from_rtype(rtype, (address) addr)) {}
 415 
 416   AddressLiteral(signed char* addr, relocInfo::relocType rtype = relocInfo::none)
 417     : _address((address) addr),
 418       _rspec(rspec_from_rtype(rtype, (address) addr)) {}
 419 
 420   AddressLiteral(int* addr, relocInfo::relocType rtype = relocInfo::none)
 421     : _address((address) addr),
 422       _rspec(rspec_from_rtype(rtype, (address) addr)) {}
 423 
 424   AddressLiteral(intptr_t addr, relocInfo::relocType rtype = relocInfo::none)
 425     : _address((address) addr),
 426       _rspec(rspec_from_rtype(rtype, (address) addr)) {}
 427 
 428 #ifdef _LP64
 429   // 32-bit complains about a multiple declaration for int*.
 430   AddressLiteral(intptr_t* addr, relocInfo::relocType rtype = relocInfo::none)
 431     : _address((address) addr),
 432       _rspec(rspec_from_rtype(rtype, (address) addr)) {}
 433 #endif
 434 
 435   AddressLiteral(oop addr, relocInfo::relocType rtype = relocInfo::none)
 436     : _address((address) addr),
 437       _rspec(rspec_from_rtype(rtype, (address) addr)) {}
 438 
 439   AddressLiteral(float* addr, relocInfo::relocType rtype = relocInfo::none)
 440     : _address((address) addr),
 441       _rspec(rspec_from_rtype(rtype, (address) addr)) {}
 442 
 443   AddressLiteral(double* addr, relocInfo::relocType rtype = relocInfo::none)
 444     : _address((address) addr),
 445       _rspec(rspec_from_rtype(rtype, (address) addr)) {}
 446 
 447   intptr_t value() const { return (intptr_t) _address; }
 448   int      low10() const;
 449 
 450   const relocInfo::relocType rtype() const { return _rspec.type(); }
 451   const RelocationHolder&    rspec() const { return _rspec; }
 452 
 453   RelocationHolder rspec(int offset) const {
 454     return offset == 0 ? _rspec : _rspec.plus(offset);
 455   }
 456 };
 457 
 458 
 459 inline Address RegisterImpl::address_in_saved_window() const {
 460    return (Address(SP, (sp_offset_in_saved_window() * wordSize) + STACK_BIAS));
 461 }
 462 
 463 
 464 
 465 // Argument is an abstraction used to represent an outgoing
 466 // actual argument or an incoming formal parameter, whether
 467 // it resides in memory or in a register, in a manner consistent
 468 // with the SPARC Application Binary Interface, or ABI.  This is
 469 // often referred to as the native or C calling convention.
 470 
 471 class Argument VALUE_OBJ_CLASS_SPEC {
 472  private:
 473   int _number;
 474   bool _is_in;
 475 
 476  public:
 477 #ifdef _LP64
 478   enum {
 479     n_register_parameters = 6,          // only 6 registers may contain integer parameters
 480     n_float_register_parameters = 16    // Can have up to 16 floating registers
 481   };
 482 #else
 483   enum {
 484     n_register_parameters = 6           // only 6 registers may contain integer parameters
 485   };
 486 #endif
 487 
 488   // creation
 489   Argument(int number, bool is_in) : _number(number), _is_in(is_in) {}
 490 
 491   int  number() const  { return _number;  }
 492   bool is_in()  const  { return _is_in;   }
 493   bool is_out() const  { return !is_in(); }
 494 
 495   Argument successor() const  { return Argument(number() + 1, is_in()); }
 496   Argument as_in()     const  { return Argument(number(), true ); }
 497   Argument as_out()    const  { return Argument(number(), false); }
 498 
 499   // locating register-based arguments:
 500   bool is_register() const { return _number < n_register_parameters; }
 501 
 502 #ifdef _LP64
 503   // locating Floating Point register-based arguments:
 504   bool is_float_register() const { return _number < n_float_register_parameters; }
 505 
 506   FloatRegister as_float_register() const {
 507     assert(is_float_register(), "must be a register argument");
 508     return as_FloatRegister(( number() *2 ) + 1);
 509   }
 510   FloatRegister as_double_register() const {
 511     assert(is_float_register(), "must be a register argument");
 512     return as_FloatRegister(( number() *2 ));
 513   }
 514 #endif
 515 
 516   Register as_register() const {
 517     assert(is_register(), "must be a register argument");
 518     return is_in() ? as_iRegister(number()) : as_oRegister(number());
 519   }
 520 
 521   // locating memory-based arguments
 522   Address as_address() const {
 523     assert(!is_register(), "must be a memory argument");
 524     return address_in_frame();
 525   }
 526 
 527   // When applied to a register-based argument, give the corresponding address
 528   // into the 6-word area "into which callee may store register arguments"
 529   // (This is a different place than the corresponding register-save area location.)
 530   Address address_in_frame() const;
 531 
 532   // debugging
 533   const char* name() const;
 534 
 535   friend class Assembler;
 536 };
 537 
 538 
 539 // The SPARC Assembler: Pure assembler doing NO optimizations on the instruction
 540 // level; i.e., what you write
 541 // is what you get. The Assembler is generating code into a CodeBuffer.
 542 
 543 class Assembler : public AbstractAssembler  {
 544  protected:
 545 
 546   static void print_instruction(int inst);
 547   static int  patched_branch(int dest_pos, int inst, int inst_pos);
 548   static int  branch_destination(int inst, int pos);
 549 
 550 
 551   friend class AbstractAssembler;
 552   friend class AddressLiteral;
 553 
 554   // code patchers need various routines like inv_wdisp()
 555   friend class NativeInstruction;
 556   friend class NativeGeneralJump;
 557   friend class Relocation;
 558   friend class Label;
 559 
 560  public:
 561   // op carries format info; see page 62 & 267
 562 
 563   enum ops {
 564     call_op   = 1, // fmt 1
 565     branch_op = 0, // also sethi (fmt2)
 566     arith_op  = 2, // fmt 3, arith & misc
 567     ldst_op   = 3  // fmt 3, load/store
 568   };
 569 
 570   enum op2s {
 571     bpr_op2   = 3,
 572     fb_op2    = 6,
 573     fbp_op2   = 5,
 574     br_op2    = 2,
 575     bp_op2    = 1,
 576     cb_op2    = 7, // V8
 577     sethi_op2 = 4
 578   };
 579 
 580   enum op3s {
 581     // selected op3s
 582     add_op3      = 0x00,
 583     and_op3      = 0x01,
 584     or_op3       = 0x02,
 585     xor_op3      = 0x03,
 586     sub_op3      = 0x04,
 587     andn_op3     = 0x05,
 588     orn_op3      = 0x06,
 589     xnor_op3     = 0x07,
 590     addc_op3     = 0x08,
 591     mulx_op3     = 0x09,
 592     umul_op3     = 0x0a,
 593     smul_op3     = 0x0b,
 594     subc_op3     = 0x0c,
 595     udivx_op3    = 0x0d,
 596     udiv_op3     = 0x0e,
 597     sdiv_op3     = 0x0f,
 598 
 599     addcc_op3    = 0x10,
 600     andcc_op3    = 0x11,
 601     orcc_op3     = 0x12,
 602     xorcc_op3    = 0x13,
 603     subcc_op3    = 0x14,
 604     andncc_op3   = 0x15,
 605     orncc_op3    = 0x16,
 606     xnorcc_op3   = 0x17,
 607     addccc_op3   = 0x18,
 608     umulcc_op3   = 0x1a,
 609     smulcc_op3   = 0x1b,
 610     subccc_op3   = 0x1c,
 611     udivcc_op3   = 0x1e,
 612     sdivcc_op3   = 0x1f,
 613 
 614     taddcc_op3   = 0x20,
 615     tsubcc_op3   = 0x21,
 616     taddcctv_op3 = 0x22,
 617     tsubcctv_op3 = 0x23,
 618     mulscc_op3   = 0x24,
 619     sll_op3      = 0x25,
 620     sllx_op3     = 0x25,
 621     srl_op3      = 0x26,
 622     srlx_op3     = 0x26,
 623     sra_op3      = 0x27,
 624     srax_op3     = 0x27,
 625     rdreg_op3    = 0x28,
 626     membar_op3   = 0x28,
 627 
 628     flushw_op3   = 0x2b,
 629     movcc_op3    = 0x2c,
 630     sdivx_op3    = 0x2d,
 631     popc_op3     = 0x2e,
 632     movr_op3     = 0x2f,
 633 
 634     sir_op3      = 0x30,
 635     wrreg_op3    = 0x30,
 636     saved_op3    = 0x31,
 637 
 638     fpop1_op3    = 0x34,
 639     fpop2_op3    = 0x35,
 640     impdep1_op3  = 0x36,
 641     impdep2_op3  = 0x37,
 642     jmpl_op3     = 0x38,
 643     rett_op3     = 0x39,
 644     trap_op3     = 0x3a,
 645     flush_op3    = 0x3b,
 646     save_op3     = 0x3c,
 647     restore_op3  = 0x3d,
 648     done_op3     = 0x3e,
 649     retry_op3    = 0x3e,
 650 
 651     lduw_op3     = 0x00,
 652     ldub_op3     = 0x01,
 653     lduh_op3     = 0x02,
 654     ldd_op3      = 0x03,
 655     stw_op3      = 0x04,
 656     stb_op3      = 0x05,
 657     sth_op3      = 0x06,
 658     std_op3      = 0x07,
 659     ldsw_op3     = 0x08,
 660     ldsb_op3     = 0x09,
 661     ldsh_op3     = 0x0a,
 662     ldx_op3      = 0x0b,
 663 
 664     ldstub_op3   = 0x0d,
 665     stx_op3      = 0x0e,
 666     swap_op3     = 0x0f,
 667 
 668     stwa_op3     = 0x14,
 669     stxa_op3     = 0x1e,
 670 
 671     ldf_op3      = 0x20,
 672     ldfsr_op3    = 0x21,
 673     ldqf_op3     = 0x22,
 674     lddf_op3     = 0x23,
 675     stf_op3      = 0x24,
 676     stfsr_op3    = 0x25,
 677     stqf_op3     = 0x26,
 678     stdf_op3     = 0x27,
 679 
 680     prefetch_op3 = 0x2d,
 681 
 682 
 683     ldc_op3      = 0x30,
 684     ldcsr_op3    = 0x31,
 685     lddc_op3     = 0x33,
 686     stc_op3      = 0x34,
 687     stcsr_op3    = 0x35,
 688     stdcq_op3    = 0x36,
 689     stdc_op3     = 0x37,
 690 
 691     casa_op3     = 0x3c,
 692     casxa_op3    = 0x3e,
 693 
 694     alt_bit_op3  = 0x10,
 695      cc_bit_op3  = 0x10
 696   };
 697 
 698   enum opfs {
 699     // selected opfs
 700     fmovs_opf   = 0x01,
 701     fmovd_opf   = 0x02,
 702 
 703     fnegs_opf   = 0x05,
 704     fnegd_opf   = 0x06,
 705 
 706     fadds_opf   = 0x41,
 707     faddd_opf   = 0x42,
 708     fsubs_opf   = 0x45,
 709     fsubd_opf   = 0x46,
 710 
 711     fmuls_opf   = 0x49,
 712     fmuld_opf   = 0x4a,
 713     fdivs_opf   = 0x4d,
 714     fdivd_opf   = 0x4e,
 715 
 716     fcmps_opf   = 0x51,
 717     fcmpd_opf   = 0x52,
 718 
 719     fstox_opf   = 0x81,
 720     fdtox_opf   = 0x82,
 721     fxtos_opf   = 0x84,
 722     fxtod_opf   = 0x88,
 723     fitos_opf   = 0xc4,
 724     fdtos_opf   = 0xc6,
 725     fitod_opf   = 0xc8,
 726     fstod_opf   = 0xc9,
 727     fstoi_opf   = 0xd1,
 728     fdtoi_opf   = 0xd2
 729   };
 730 
 731   enum RCondition {  rc_z = 1,  rc_lez = 2,  rc_lz = 3, rc_nz = 5, rc_gz = 6, rc_gez = 7  };
 732 
 733   enum Condition {
 734      // for FBfcc & FBPfcc instruction
 735     f_never                     = 0,
 736     f_notEqual                  = 1,
 737     f_notZero                   = 1,
 738     f_lessOrGreater             = 2,
 739     f_unorderedOrLess           = 3,
 740     f_less                      = 4,
 741     f_unorderedOrGreater        = 5,
 742     f_greater                   = 6,
 743     f_unordered                 = 7,
 744     f_always                    = 8,
 745     f_equal                     = 9,
 746     f_zero                      = 9,
 747     f_unorderedOrEqual          = 10,
 748     f_greaterOrEqual            = 11,
 749     f_unorderedOrGreaterOrEqual = 12,
 750     f_lessOrEqual               = 13,
 751     f_unorderedOrLessOrEqual    = 14,
 752     f_ordered                   = 15,
 753 
 754     // V8 coproc, pp 123 v8 manual
 755 
 756     cp_always  = 8,
 757     cp_never   = 0,
 758     cp_3       = 7,
 759     cp_2       = 6,
 760     cp_2or3    = 5,
 761     cp_1       = 4,
 762     cp_1or3    = 3,
 763     cp_1or2    = 2,
 764     cp_1or2or3 = 1,
 765     cp_0       = 9,
 766     cp_0or3    = 10,
 767     cp_0or2    = 11,
 768     cp_0or2or3 = 12,
 769     cp_0or1    = 13,
 770     cp_0or1or3 = 14,
 771     cp_0or1or2 = 15,
 772 
 773 
 774     // for integers
 775 
 776     never                 =  0,
 777     equal                 =  1,
 778     zero                  =  1,
 779     lessEqual             =  2,
 780     less                  =  3,
 781     lessEqualUnsigned     =  4,
 782     lessUnsigned          =  5,
 783     carrySet              =  5,
 784     negative              =  6,
 785     overflowSet           =  7,
 786     always                =  8,
 787     notEqual              =  9,
 788     notZero               =  9,
 789     greater               =  10,
 790     greaterEqual          =  11,
 791     greaterUnsigned       =  12,
 792     greaterEqualUnsigned  =  13,
 793     carryClear            =  13,
 794     positive              =  14,
 795     overflowClear         =  15
 796   };
 797 
 798   enum CC {
 799     icc  = 0,  xcc  = 2,
 800     // ptr_cc is the correct condition code for a pointer or intptr_t:
 801     ptr_cc = NOT_LP64(icc) LP64_ONLY(xcc),
 802     fcc0 = 0,  fcc1 = 1, fcc2 = 2, fcc3 = 3
 803   };
 804 
 805   enum PrefetchFcn {
 806     severalReads = 0,  oneRead = 1,  severalWritesAndPossiblyReads = 2, oneWrite = 3, page = 4
 807   };
 808 
 809  public:
 810   // Helper functions for groups of instructions
 811 
 812   enum Predict { pt = 1, pn = 0 }; // pt = predict taken
 813 
 814   enum Membar_mask_bits { // page 184, v9
 815     StoreStore = 1 << 3,
 816     LoadStore  = 1 << 2,
 817     StoreLoad  = 1 << 1,
 818     LoadLoad   = 1 << 0,
 819 
 820     Sync       = 1 << 6,
 821     MemIssue   = 1 << 5,
 822     Lookaside  = 1 << 4
 823   };
 824 
 825   // test if x is within signed immediate range for nbits
 826   static bool is_simm(int x, int nbits) { return -( 1 << nbits-1 )  <= x   &&   x  <  ( 1 << nbits-1 ); }
 827 
 828   // test if -4096 <= x <= 4095
 829   static bool is_simm13(int x) { return is_simm(x, 13); }
 830 
 831   enum ASIs { // page 72, v9
 832     ASI_PRIMARY        = 0x80,
 833     ASI_PRIMARY_LITTLE = 0x88
 834     // add more from book as needed
 835   };
 836 
 837  protected:
 838   // helpers
 839 
 840   // x is supposed to fit in a field "nbits" wide
 841   // and be sign-extended. Check the range.
 842 
 843   static void assert_signed_range(intptr_t x, int nbits) {
 844     assert( nbits == 32
 845         ||  -(1 << nbits-1) <= x  &&  x < ( 1 << nbits-1),
 846       "value out of range");
 847   }
 848 
 849   static void assert_signed_word_disp_range(intptr_t x, int nbits) {
 850     assert( (x & 3) == 0, "not word aligned");
 851     assert_signed_range(x, nbits + 2);
 852   }
 853 
 854   static void assert_unsigned_const(int x, int nbits) {
 855     assert( juint(x)  <  juint(1 << nbits), "unsigned constant out of range");
 856   }
 857 
 858   // fields: note bits numbered from LSB = 0,
 859   //  fields known by inclusive bit range
 860 
 861   static int fmask(juint hi_bit, juint lo_bit) {
 862     assert( hi_bit >= lo_bit  &&  0 <= lo_bit  &&  hi_bit < 32, "bad bits");
 863     return (1 << ( hi_bit-lo_bit + 1 )) - 1;
 864   }
 865 
 866   // inverse of u_field
 867 
 868   static int inv_u_field(int x, int hi_bit, int lo_bit) {
 869     juint r = juint(x) >> lo_bit;
 870     r &= fmask( hi_bit, lo_bit);
 871     return int(r);
 872   }
 873 
 874 
 875   // signed version: extract from field and sign-extend
 876 
 877   static int inv_s_field(int x, int hi_bit, int lo_bit) {
 878     int sign_shift = 31 - hi_bit;
 879     return inv_u_field( ((x << sign_shift) >> sign_shift), hi_bit, lo_bit);
 880   }
 881 
 882   // given a field that ranges from hi_bit to lo_bit (inclusive,
 883   // LSB = 0), and an unsigned value for the field,
 884   // shift it into the field
 885 
 886 #ifdef ASSERT
 887   static int u_field(int x, int hi_bit, int lo_bit) {
 888     assert( ( x & ~fmask(hi_bit, lo_bit))  == 0,
 889             "value out of range");
 890     int r = x << lo_bit;
 891     assert( inv_u_field(r, hi_bit, lo_bit) == x, "just checking");
 892     return r;
 893   }
 894 #else
 895   // make sure this is inlined as it will reduce code size significantly
 896   #define u_field(x, hi_bit, lo_bit)   ((x) << (lo_bit))
 897 #endif
 898 
 899   static int inv_op(  int x ) { return inv_u_field(x, 31, 30); }
 900   static int inv_op2( int x ) { return inv_u_field(x, 24, 22); }
 901   static int inv_op3( int x ) { return inv_u_field(x, 24, 19); }
 902   static int inv_cond( int x ){ return inv_u_field(x, 28, 25); }
 903 
 904   static bool inv_immed( int x ) { return (x & Assembler::immed(true)) != 0; }
 905 
 906   static Register inv_rd(  int x ) { return as_Register(inv_u_field(x, 29, 25)); }
 907   static Register inv_rs1( int x ) { return as_Register(inv_u_field(x, 18, 14)); }
 908   static Register inv_rs2( int x ) { return as_Register(inv_u_field(x,  4,  0)); }
 909 
 910   static int op(       int         x)  { return  u_field(x,             31, 30); }
 911   static int rd(       Register    r)  { return  u_field(r->encoding(), 29, 25); }
 912   static int fcn(      int         x)  { return  u_field(x,             29, 25); }
 913   static int op3(      int         x)  { return  u_field(x,             24, 19); }
 914   static int rs1(      Register    r)  { return  u_field(r->encoding(), 18, 14); }
 915   static int rs2(      Register    r)  { return  u_field(r->encoding(),  4,  0); }
 916   static int annul(    bool        a)  { return  u_field(a ? 1 : 0,     29, 29); }
 917   static int cond(     int         x)  { return  u_field(x,             28, 25); }
 918   static int cond_mov( int         x)  { return  u_field(x,             17, 14); }
 919   static int rcond(    RCondition  x)  { return  u_field(x,             12, 10); }
 920   static int op2(      int         x)  { return  u_field(x,             24, 22); }
 921   static int predict(  bool        p)  { return  u_field(p ? 1 : 0,     19, 19); }
 922   static int branchcc( CC       fcca)  { return  u_field(fcca,          21, 20); }
 923   static int cmpcc(    CC       fcca)  { return  u_field(fcca,          26, 25); }
 924   static int imm_asi(  int         x)  { return  u_field(x,             12,  5); }
 925   static int immed(    bool        i)  { return  u_field(i ? 1 : 0,     13, 13); }
 926   static int opf_low6( int         w)  { return  u_field(w,             10,  5); }
 927   static int opf_low5( int         w)  { return  u_field(w,              9,  5); }
 928   static int trapcc(   CC         cc)  { return  u_field(cc,            12, 11); }
 929   static int sx(       int         i)  { return  u_field(i,             12, 12); } // shift x=1 means 64-bit
 930   static int opf(      int         x)  { return  u_field(x,             13,  5); }
 931 
 932   static int opf_cc(   CC          c, bool useFloat ) { return u_field((useFloat ? 0 : 4) + c, 13, 11); }
 933   static int mov_cc(   CC          c, bool useFloat ) { return u_field(useFloat ? 0 : 1,  18, 18) | u_field(c, 12, 11); }
 934 
 935   static int fd( FloatRegister r,  FloatRegisterImpl::Width fwa) { return u_field(r->encoding(fwa), 29, 25); };
 936   static int fs1(FloatRegister r,  FloatRegisterImpl::Width fwa) { return u_field(r->encoding(fwa), 18, 14); };
 937   static int fs2(FloatRegister r,  FloatRegisterImpl::Width fwa) { return u_field(r->encoding(fwa),  4,  0); };
 938 
 939   // some float instructions use this encoding on the op3 field
 940   static int alt_op3(int op, FloatRegisterImpl::Width w) {
 941     int r;
 942     switch(w) {
 943      case FloatRegisterImpl::S: r = op + 0;  break;
 944      case FloatRegisterImpl::D: r = op + 3;  break;
 945      case FloatRegisterImpl::Q: r = op + 2;  break;
 946      default: ShouldNotReachHere(); break;
 947     }
 948     return op3(r);
 949   }
 950 
 951 
 952   // compute inverse of simm
 953   static int inv_simm(int x, int nbits) {
 954     return (int)(x << (32 - nbits)) >> (32 - nbits);
 955   }
 956 
 957   static int inv_simm13( int x ) { return inv_simm(x, 13); }
 958 
 959   // signed immediate, in low bits, nbits long
 960   static int simm(int x, int nbits) {
 961     assert_signed_range(x, nbits);
 962     return x  &  (( 1 << nbits ) - 1);
 963   }
 964 
 965   // compute inverse of wdisp16
 966   static intptr_t inv_wdisp16(int x, intptr_t pos) {
 967     int lo = x & (( 1 << 14 ) - 1);
 968     int hi = (x >> 20) & 3;
 969     if (hi >= 2) hi |= ~1;
 970     return (((hi << 14) | lo) << 2) + pos;
 971   }
 972 
 973   // word offset, 14 bits at LSend, 2 bits at B21, B20
 974   static int wdisp16(intptr_t x, intptr_t off) {
 975     intptr_t xx = x - off;
 976     assert_signed_word_disp_range(xx, 16);
 977     int r =  (xx >> 2) & ((1 << 14) - 1)
 978            |  (  ( (xx>>(2+14)) & 3 )  <<  20 );
 979     assert( inv_wdisp16(r, off) == x,  "inverse is not inverse");
 980     return r;
 981   }
 982 
 983 
 984   // word displacement in low-order nbits bits
 985 
 986   static intptr_t inv_wdisp( int x, intptr_t pos, int nbits ) {
 987     int pre_sign_extend = x & (( 1 << nbits ) - 1);
 988     int r =  pre_sign_extend >= ( 1 << (nbits-1) )
 989        ?   pre_sign_extend | ~(( 1 << nbits ) - 1)
 990        :   pre_sign_extend;
 991     return (r << 2) + pos;
 992   }
 993 
 994   static int wdisp( intptr_t x, intptr_t off, int nbits ) {
 995     intptr_t xx = x - off;
 996     assert_signed_word_disp_range(xx, nbits);
 997     int r =  (xx >> 2) & (( 1 << nbits ) - 1);
 998     assert( inv_wdisp( r, off, nbits )  ==  x, "inverse not inverse");
 999     return r;
1000   }
1001 
1002 
1003   // Extract the top 32 bits in a 64 bit word
1004   static int32_t hi32( int64_t x ) {
1005     int32_t r = int32_t( (uint64_t)x >> 32 );
1006     return r;
1007   }
1008 
1009   // given a sethi instruction, extract the constant, left-justified
1010   static int inv_hi22( int x ) {
1011     return x << 10;
1012   }
1013 
1014   // create an imm22 field, given a 32-bit left-justified constant
1015   static int hi22( int x ) {
1016     int r = int( juint(x) >> 10 );
1017     assert( (r & ~((1 << 22) - 1))  ==  0, "just checkin'");
1018     return r;
1019   }
1020 
1021   // create a low10 __value__ (not a field) for a given a 32-bit constant
1022   static int low10( int x ) {
1023     return x & ((1 << 10) - 1);
1024   }
1025 
1026   // instruction only in v9
1027   static void v9_only() { assert( VM_Version::v9_instructions_work(), "This instruction only works on SPARC V9"); }
1028 
1029   // instruction only in v8
1030   static void v8_only() { assert( VM_Version::v8_instructions_work(), "This instruction only works on SPARC V8"); }
1031 
1032   // instruction deprecated in v9
1033   static void v9_dep()  { } // do nothing for now
1034 
1035   // some float instructions only exist for single prec. on v8
1036   static void v8_s_only(FloatRegisterImpl::Width w)  { if (w != FloatRegisterImpl::S)  v9_only(); }
1037 
1038   // v8 has no CC field
1039   static void v8_no_cc(CC cc)  { if (cc)  v9_only(); }
1040 
1041  protected:
1042   // Simple delay-slot scheme:
1043   // In order to check the programmer, the assembler keeps track of deley slots.
1044   // It forbids CTIs in delay slots (conservative, but should be OK).
1045   // Also, when putting an instruction into a delay slot, you must say
1046   // asm->delayed()->add(...), in order to check that you don't omit
1047   // delay-slot instructions.
1048   // To implement this, we use a simple FSA
1049 
1050 #ifdef ASSERT
1051   #define CHECK_DELAY
1052 #endif
1053 #ifdef CHECK_DELAY
1054   enum Delay_state { no_delay, at_delay_slot, filling_delay_slot } delay_state;
1055 #endif
1056 
1057  public:
1058   // Tells assembler next instruction must NOT be in delay slot.
1059   // Use at start of multinstruction macros.
1060   void assert_not_delayed() {
1061     // This is a separate overloading to avoid creation of string constants
1062     // in non-asserted code--with some compilers this pollutes the object code.
1063 #ifdef CHECK_DELAY
1064     assert_not_delayed("next instruction should not be a delay slot");
1065 #endif
1066   }
1067   void assert_not_delayed(const char* msg) {
1068 #ifdef CHECK_DELAY
1069     assert(delay_state == no_delay, msg);
1070 #endif
1071   }
1072 
1073  protected:
1074   // Delay slot helpers
1075   // cti is called when emitting control-transfer instruction,
1076   // BEFORE doing the emitting.
1077   // Only effective when assertion-checking is enabled.
1078   void cti() {
1079 #ifdef CHECK_DELAY
1080     assert_not_delayed("cti should not be in delay slot");
1081 #endif
1082   }
1083 
1084   // called when emitting cti with a delay slot, AFTER emitting
1085   void has_delay_slot() {
1086 #ifdef CHECK_DELAY
1087     assert_not_delayed("just checking");
1088     delay_state = at_delay_slot;
1089 #endif
1090   }
1091 
1092 public:
1093   // Tells assembler you know that next instruction is delayed
1094   Assembler* delayed() {
1095 #ifdef CHECK_DELAY
1096     assert ( delay_state == at_delay_slot, "delayed instruction is not in delay slot");
1097     delay_state = filling_delay_slot;
1098 #endif
1099     return this;
1100   }
1101 
1102   void flush() {
1103 #ifdef CHECK_DELAY
1104     assert ( delay_state == no_delay, "ending code with a delay slot");
1105 #endif
1106     AbstractAssembler::flush();
1107   }
1108 
1109   inline void emit_long(int);  // shadows AbstractAssembler::emit_long
1110   inline void emit_data(int x) { emit_long(x); }
1111   inline void emit_data(int, RelocationHolder const&);
1112   inline void emit_data(int, relocInfo::relocType rtype);
1113   // helper for above fcns
1114   inline void check_delay();
1115 
1116 
1117  public:
1118   // instructions, refer to page numbers in the SPARC Architecture Manual, V9
1119 
1120   // pp 135 (addc was addx in v8)
1121 
1122   inline void add(Register s1, Register s2, Register d );
1123   inline void add(Register s1, int simm13a, Register d, relocInfo::relocType rtype = relocInfo::none);
1124   inline void add(Register s1, int simm13a, Register d, RelocationHolder const& rspec);
1125   inline void add(Register s1, RegisterOrConstant s2, Register d, int offset = 0);
1126   inline void add(const Address& a, Register d, int offset = 0) { add( a.base(), a.disp() + offset, d, a.rspec(offset)); }
1127 
1128   void addcc(  Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(add_op3  | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
1129   void addcc(  Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(add_op3  | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1130   void addc(   Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(addc_op3             ) | rs1(s1) | rs2(s2) ); }
1131   void addc(   Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(addc_op3             ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1132   void addccc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(addc_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
1133   void addccc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(addc_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1134 
1135   // pp 136
1136 
1137   inline void bpr( RCondition c, bool a, Predict p, Register s1, address d, relocInfo::relocType rt = relocInfo::none );
1138   inline void bpr( RCondition c, bool a, Predict p, Register s1, Label& L);
1139 
1140  protected: // use MacroAssembler::br instead
1141 
1142   // pp 138
1143 
1144   inline void fb( Condition c, bool a, address d, relocInfo::relocType rt = relocInfo::none );
1145   inline void fb( Condition c, bool a, Label& L );
1146 
1147   // pp 141
1148 
1149   inline void fbp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
1150   inline void fbp( Condition c, bool a, CC cc, Predict p, Label& L );
1151 
1152  public:
1153 
1154   // pp 144
1155 
1156   inline void br( Condition c, bool a, address d, relocInfo::relocType rt = relocInfo::none );
1157   inline void br( Condition c, bool a, Label& L );
1158 
1159   // pp 146
1160 
1161   inline void bp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
1162   inline void bp( Condition c, bool a, CC cc, Predict p, Label& L );
1163 
1164   // pp 121 (V8)
1165 
1166   inline void cb( Condition c, bool a, address d, relocInfo::relocType rt = relocInfo::none );
1167   inline void cb( Condition c, bool a, Label& L );
1168 
1169   // pp 149
1170 
1171   inline void call( address d,  relocInfo::relocType rt = relocInfo::runtime_call_type );
1172   inline void call( Label& L,   relocInfo::relocType rt = relocInfo::runtime_call_type );
1173 
1174   // pp 150
1175 
1176   // These instructions compare the contents of s2 with the contents of
1177   // memory at address in s1. If the values are equal, the contents of memory
1178   // at address s1 is swapped with the data in d. If the values are not equal,
1179   // the the contents of memory at s1 is loaded into d, without the swap.
1180 
1181   void casa(  Register s1, Register s2, Register d, int ia = -1 ) { v9_only();  emit_long( op(ldst_op) | rd(d) | op3(casa_op3 ) | rs1(s1) | (ia == -1  ? immed(true) : imm_asi(ia)) | rs2(s2)); }
1182   void casxa( Register s1, Register s2, Register d, int ia = -1 ) { v9_only();  emit_long( op(ldst_op) | rd(d) | op3(casxa_op3) | rs1(s1) | (ia == -1  ? immed(true) : imm_asi(ia)) | rs2(s2)); }
1183 
1184   // pp 152
1185 
1186   void udiv(   Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(udiv_op3             ) | rs1(s1) | rs2(s2)); }
1187   void udiv(   Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(udiv_op3             ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1188   void sdiv(   Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(sdiv_op3             ) | rs1(s1) | rs2(s2)); }
1189   void sdiv(   Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(sdiv_op3             ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1190   void udivcc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(udiv_op3 | cc_bit_op3) | rs1(s1) | rs2(s2)); }
1191   void udivcc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(udiv_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1192   void sdivcc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(sdiv_op3 | cc_bit_op3) | rs1(s1) | rs2(s2)); }
1193   void sdivcc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(sdiv_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1194 
1195   // pp 155
1196 
1197   void done()  { v9_only();  cti();  emit_long( op(arith_op) | fcn(0) | op3(done_op3) ); }
1198   void retry() { v9_only();  cti();  emit_long( op(arith_op) | fcn(1) | op3(retry_op3) ); }
1199 
1200   // pp 156
1201 
1202   void fadd( FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister s2, FloatRegister d ) { emit_long( op(arith_op) | fd(d, w) | op3(fpop1_op3) | fs1(s1, w) | opf(0x40 + w) | fs2(s2, w)); }
1203   void fsub( FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister s2, FloatRegister d ) { emit_long( op(arith_op) | fd(d, w) | op3(fpop1_op3) | fs1(s1, w) | opf(0x44 + w) | fs2(s2, w)); }
1204 
1205   // pp 157
1206 
1207   void fcmp(  FloatRegisterImpl::Width w, CC cc, FloatRegister s1, FloatRegister s2) { v8_no_cc(cc);  emit_long( op(arith_op) | cmpcc(cc) | op3(fpop2_op3) | fs1(s1, w) | opf(0x50 + w) | fs2(s2, w)); }
1208   void fcmpe( FloatRegisterImpl::Width w, CC cc, FloatRegister s1, FloatRegister s2) { v8_no_cc(cc);  emit_long( op(arith_op) | cmpcc(cc) | op3(fpop2_op3) | fs1(s1, w) | opf(0x54 + w) | fs2(s2, w)); }
1209 
1210   // pp 159
1211 
1212   void ftox( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { v9_only();  emit_long( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x80 + w) | fs2(s, w)); }
1213   void ftoi( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) {             emit_long( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0xd0 + w) | fs2(s, w)); }
1214 
1215   // pp 160
1216 
1217   void ftof( FloatRegisterImpl::Width sw, FloatRegisterImpl::Width dw, FloatRegister s, FloatRegister d ) { emit_long( op(arith_op) | fd(d, dw) | op3(fpop1_op3) | opf(0xc0 + sw + dw*4) | fs2(s, sw)); }
1218 
1219   // pp 161
1220 
1221   void fxtof( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { v9_only();  emit_long( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x80 + w*4) | fs2(s, w)); }
1222   void fitof( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) {             emit_long( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0xc0 + w*4) | fs2(s, w)); }
1223 
1224   // pp 162
1225 
1226   void fmov( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { v8_s_only(w);  emit_long( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x00 + w) | fs2(s, w)); }
1227 
1228   void fneg( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { v8_s_only(w);  emit_long( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x04 + w) | fs2(s, w)); }
1229 
1230   // page 144 sparc v8 architecture (double prec works on v8 if the source and destination registers are the same). fnegs is the only instruction available
1231   // on v8 to do negation of single, double and quad precision floats.
1232 
1233   void fneg( FloatRegisterImpl::Width w, FloatRegister sd ) { if (VM_Version::v9_instructions_work()) emit_long( op(arith_op) | fd(sd, w) | op3(fpop1_op3) | opf(0x04 + w) | fs2(sd, w)); else emit_long( op(arith_op) | fd(sd, w) | op3(fpop1_op3) |  opf(0x05) | fs2(sd, w)); }
1234 
1235   void fabs( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { v8_s_only(w);  emit_long( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x08 + w) | fs2(s, w)); }
1236 
1237   // page 144 sparc v8 architecture (double prec works on v8 if the source and destination registers are the same). fabss is the only instruction available
1238   // on v8 to do abs operation on single/double/quad precision floats.
1239 
1240   void fabs( FloatRegisterImpl::Width w, FloatRegister sd ) { if (VM_Version::v9_instructions_work()) emit_long( op(arith_op) | fd(sd, w) | op3(fpop1_op3) | opf(0x08 + w) | fs2(sd, w)); else emit_long( op(arith_op) | fd(sd, w) | op3(fpop1_op3) | opf(0x09) | fs2(sd, w)); }
1241 
1242   // pp 163
1243 
1244   void fmul( FloatRegisterImpl::Width w,                            FloatRegister s1, FloatRegister s2, FloatRegister d ) { emit_long( op(arith_op) | fd(d, w)  | op3(fpop1_op3) | fs1(s1, w)  | opf(0x48 + w)         | fs2(s2, w)); }
1245   void fmul( FloatRegisterImpl::Width sw, FloatRegisterImpl::Width dw,  FloatRegister s1, FloatRegister s2, FloatRegister d ) { emit_long( op(arith_op) | fd(d, dw) | op3(fpop1_op3) | fs1(s1, sw) | opf(0x60 + sw + dw*4) | fs2(s2, sw)); }
1246   void fdiv( FloatRegisterImpl::Width w,                            FloatRegister s1, FloatRegister s2, FloatRegister d ) { emit_long( op(arith_op) | fd(d, w)  | op3(fpop1_op3) | fs1(s1, w)  | opf(0x4c + w)         | fs2(s2, w)); }
1247 
1248   // pp 164
1249 
1250   void fsqrt( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { emit_long( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x28 + w) | fs2(s, w)); }
1251 
1252   // pp 165
1253 
1254   inline void flush( Register s1, Register s2 );
1255   inline void flush( Register s1, int simm13a);
1256 
1257   // pp 167
1258 
1259   void flushw() { v9_only();  emit_long( op(arith_op) | op3(flushw_op3) ); }
1260 
1261   // pp 168
1262 
1263   void illtrap( int const22a) { if (const22a != 0) v9_only();  emit_long( op(branch_op) | u_field(const22a, 21, 0) ); }
1264   // v8 unimp == illtrap(0)
1265 
1266   // pp 169
1267 
1268   void impdep1( int id1, int const19a ) { v9_only();  emit_long( op(arith_op) | fcn(id1) | op3(impdep1_op3) | u_field(const19a, 18, 0)); }
1269   void impdep2( int id1, int const19a ) { v9_only();  emit_long( op(arith_op) | fcn(id1) | op3(impdep2_op3) | u_field(const19a, 18, 0)); }
1270 
1271   // pp 149 (v8)
1272 
1273   void cpop1( int opc, int cr1, int cr2, int crd ) { v8_only();  emit_long( op(arith_op) | fcn(crd) | op3(impdep1_op3) | u_field(cr1, 18, 14) | opf(opc) | u_field(cr2, 4, 0)); }
1274   void cpop2( int opc, int cr1, int cr2, int crd ) { v8_only();  emit_long( op(arith_op) | fcn(crd) | op3(impdep2_op3) | u_field(cr1, 18, 14) | opf(opc) | u_field(cr2, 4, 0)); }
1275 
1276   // pp 170
1277 
1278   void jmpl( Register s1, Register s2, Register d );
1279   void jmpl( Register s1, int simm13a, Register d, RelocationHolder const& rspec = RelocationHolder() );
1280 
1281   // 171
1282 
1283   inline void ldf(FloatRegisterImpl::Width w, Register s1, RegisterOrConstant s2, FloatRegister d);
1284   inline void ldf(FloatRegisterImpl::Width w, Register s1, Register s2, FloatRegister d);
1285   inline void ldf(FloatRegisterImpl::Width w, Register s1, int simm13a, FloatRegister d, RelocationHolder const& rspec = RelocationHolder());
1286 
1287   inline void ldf(FloatRegisterImpl::Width w, const Address& a, FloatRegister d, int offset = 0);
1288 
1289 
1290   inline void ldfsr(  Register s1, Register s2 );
1291   inline void ldfsr(  Register s1, int simm13a);
1292   inline void ldxfsr( Register s1, Register s2 );
1293   inline void ldxfsr( Register s1, int simm13a);
1294 
1295   // pp 94 (v8)
1296 
1297   inline void ldc(   Register s1, Register s2, int crd );
1298   inline void ldc(   Register s1, int simm13a, int crd);
1299   inline void lddc(  Register s1, Register s2, int crd );
1300   inline void lddc(  Register s1, int simm13a, int crd);
1301   inline void ldcsr( Register s1, Register s2, int crd );
1302   inline void ldcsr( Register s1, int simm13a, int crd);
1303 
1304 
1305   // 173
1306 
1307   void ldfa(  FloatRegisterImpl::Width w, Register s1, Register s2, int ia, FloatRegister d ) { v9_only();  emit_long( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3 | alt_bit_op3, w) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
1308   void ldfa(  FloatRegisterImpl::Width w, Register s1, int simm13a,         FloatRegister d ) { v9_only();  emit_long( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3 | alt_bit_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1309 
1310   // pp 175, lduw is ld on v8
1311 
1312   inline void ldsb(  Register s1, Register s2, Register d );
1313   inline void ldsb(  Register s1, int simm13a, Register d);
1314   inline void ldsh(  Register s1, Register s2, Register d );
1315   inline void ldsh(  Register s1, int simm13a, Register d);
1316   inline void ldsw(  Register s1, Register s2, Register d );
1317   inline void ldsw(  Register s1, int simm13a, Register d);
1318   inline void ldub(  Register s1, Register s2, Register d );
1319   inline void ldub(  Register s1, int simm13a, Register d);
1320   inline void lduh(  Register s1, Register s2, Register d );
1321   inline void lduh(  Register s1, int simm13a, Register d);
1322   inline void lduw(  Register s1, Register s2, Register d );
1323   inline void lduw(  Register s1, int simm13a, Register d);
1324   inline void ldx(   Register s1, Register s2, Register d );
1325   inline void ldx(   Register s1, int simm13a, Register d);
1326   inline void ld(    Register s1, Register s2, Register d );
1327   inline void ld(    Register s1, int simm13a, Register d);
1328   inline void ldd(   Register s1, Register s2, Register d );
1329   inline void ldd(   Register s1, int simm13a, Register d);
1330 
1331 #ifdef ASSERT
1332   // ByteSize is only a class when ASSERT is defined, otherwise it's an int.
1333   inline void ld(    Register s1, ByteSize simm13a, Register d);
1334 #endif
1335 
1336   inline void ldsb(const Address& a, Register d, int offset = 0);
1337   inline void ldsh(const Address& a, Register d, int offset = 0);
1338   inline void ldsw(const Address& a, Register d, int offset = 0);
1339   inline void ldub(const Address& a, Register d, int offset = 0);
1340   inline void lduh(const Address& a, Register d, int offset = 0);
1341   inline void lduw(const Address& a, Register d, int offset = 0);
1342   inline void ldx( const Address& a, Register d, int offset = 0);
1343   inline void ld(  const Address& a, Register d, int offset = 0);
1344   inline void ldd( const Address& a, Register d, int offset = 0);
1345 
1346   inline void ldub(  Register s1, RegisterOrConstant s2, Register d );
1347   inline void ldsb(  Register s1, RegisterOrConstant s2, Register d );
1348   inline void lduh(  Register s1, RegisterOrConstant s2, Register d );
1349   inline void ldsh(  Register s1, RegisterOrConstant s2, Register d );
1350   inline void lduw(  Register s1, RegisterOrConstant s2, Register d );
1351   inline void ldsw(  Register s1, RegisterOrConstant s2, Register d );
1352   inline void ldx(   Register s1, RegisterOrConstant s2, Register d );
1353   inline void ld(    Register s1, RegisterOrConstant s2, Register d );
1354   inline void ldd(   Register s1, RegisterOrConstant s2, Register d );
1355 
1356   // pp 177
1357 
1358   void ldsba(  Register s1, Register s2, int ia, Register d ) {             emit_long( op(ldst_op) | rd(d) | op3(ldsb_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
1359   void ldsba(  Register s1, int simm13a,         Register d ) {             emit_long( op(ldst_op) | rd(d) | op3(ldsb_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1360   void ldsha(  Register s1, Register s2, int ia, Register d ) {             emit_long( op(ldst_op) | rd(d) | op3(ldsh_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
1361   void ldsha(  Register s1, int simm13a,         Register d ) {             emit_long( op(ldst_op) | rd(d) | op3(ldsh_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1362   void ldswa(  Register s1, Register s2, int ia, Register d ) { v9_only();  emit_long( op(ldst_op) | rd(d) | op3(ldsw_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
1363   void ldswa(  Register s1, int simm13a,         Register d ) { v9_only();  emit_long( op(ldst_op) | rd(d) | op3(ldsw_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1364   void lduba(  Register s1, Register s2, int ia, Register d ) {             emit_long( op(ldst_op) | rd(d) | op3(ldub_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
1365   void lduba(  Register s1, int simm13a,         Register d ) {             emit_long( op(ldst_op) | rd(d) | op3(ldub_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1366   void lduha(  Register s1, Register s2, int ia, Register d ) {             emit_long( op(ldst_op) | rd(d) | op3(lduh_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
1367   void lduha(  Register s1, int simm13a,         Register d ) {             emit_long( op(ldst_op) | rd(d) | op3(lduh_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1368   void lduwa(  Register s1, Register s2, int ia, Register d ) {             emit_long( op(ldst_op) | rd(d) | op3(lduw_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
1369   void lduwa(  Register s1, int simm13a,         Register d ) {             emit_long( op(ldst_op) | rd(d) | op3(lduw_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1370   void ldxa(   Register s1, Register s2, int ia, Register d ) { v9_only();  emit_long( op(ldst_op) | rd(d) | op3(ldx_op3  | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
1371   void ldxa(   Register s1, int simm13a,         Register d ) { v9_only();  emit_long( op(ldst_op) | rd(d) | op3(ldx_op3  | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1372   void ldda(   Register s1, Register s2, int ia, Register d ) { v9_dep();   emit_long( op(ldst_op) | rd(d) | op3(ldd_op3  | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
1373   void ldda(   Register s1, int simm13a,         Register d ) { v9_dep();   emit_long( op(ldst_op) | rd(d) | op3(ldd_op3  | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1374 
1375   // pp 179
1376 
1377   inline void ldstub(  Register s1, Register s2, Register d );
1378   inline void ldstub(  Register s1, int simm13a, Register d);
1379 
1380   // pp 180
1381 
1382   void ldstuba( Register s1, Register s2, int ia, Register d ) { emit_long( op(ldst_op) | rd(d) | op3(ldstub_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
1383   void ldstuba( Register s1, int simm13a,         Register d ) { emit_long( op(ldst_op) | rd(d) | op3(ldstub_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1384 
1385   // pp 181
1386 
1387   void and3(    Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(and_op3              ) | rs1(s1) | rs2(s2) ); }
1388   void and3(    Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(and_op3              ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1389   void andcc(   Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(and_op3  | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
1390   void andcc(   Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(and_op3  | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1391   void andn(    Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(andn_op3             ) | rs1(s1) | rs2(s2) ); }
1392   void andn(    Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(andn_op3             ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1393   void andn(    Register s1, RegisterOrConstant s2, Register d);
1394   void andncc(  Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(andn_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
1395   void andncc(  Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(andn_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1396   void or3(     Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(or_op3               ) | rs1(s1) | rs2(s2) ); }
1397   void or3(     Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(or_op3               ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1398   void orcc(    Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(or_op3   | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
1399   void orcc(    Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(or_op3   | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1400   void orn(     Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(orn_op3) | rs1(s1) | rs2(s2) ); }
1401   void orn(     Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(orn_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1402   void orncc(   Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(orn_op3  | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
1403   void orncc(   Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(orn_op3  | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1404   void xor3(    Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xor_op3              ) | rs1(s1) | rs2(s2) ); }
1405   void xor3(    Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xor_op3              ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1406   void xorcc(   Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xor_op3  | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
1407   void xorcc(   Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xor_op3  | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1408   void xnor(    Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xnor_op3             ) | rs1(s1) | rs2(s2) ); }
1409   void xnor(    Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xnor_op3             ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1410   void xnorcc(  Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xnor_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
1411   void xnorcc(  Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(xnor_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1412 
1413   // pp 183
1414 
1415   void membar( Membar_mask_bits const7a ) { v9_only(); emit_long( op(arith_op) | op3(membar_op3) | rs1(O7) | immed(true) | u_field( int(const7a), 6, 0)); }
1416 
1417   // pp 185
1418 
1419   void fmov( FloatRegisterImpl::Width w, Condition c,  bool floatCC, CC cca, FloatRegister s2, FloatRegister d ) { v9_only();  emit_long( op(arith_op) | fd(d, w) | op3(fpop2_op3) | cond_mov(c) | opf_cc(cca, floatCC) | opf_low6(w) | fs2(s2, w)); }
1420 
1421   // pp 189
1422 
1423   void fmov( FloatRegisterImpl::Width w, RCondition c, Register s1,  FloatRegister s2, FloatRegister d ) { v9_only();  emit_long( op(arith_op) | fd(d, w) | op3(fpop2_op3) | rs1(s1) | rcond(c) | opf_low5(4 + w) | fs2(s2, w)); }
1424 
1425   // pp 191
1426 
1427   void movcc( Condition c, bool floatCC, CC cca, Register s2, Register d ) { v9_only();  emit_long( op(arith_op) | rd(d) | op3(movcc_op3) | mov_cc(cca, floatCC) | cond_mov(c) | rs2(s2) ); }
1428   void movcc( Condition c, bool floatCC, CC cca, int simm11a, Register d ) { v9_only();  emit_long( op(arith_op) | rd(d) | op3(movcc_op3) | mov_cc(cca, floatCC) | cond_mov(c) | immed(true) | simm(simm11a, 11) ); }
1429 
1430   // pp 195
1431 
1432   void movr( RCondition c, Register s1, Register s2,  Register d ) { v9_only();  emit_long( op(arith_op) | rd(d) | op3(movr_op3) | rs1(s1) | rcond(c) | rs2(s2) ); }
1433   void movr( RCondition c, Register s1, int simm10a,  Register d ) { v9_only();  emit_long( op(arith_op) | rd(d) | op3(movr_op3) | rs1(s1) | rcond(c) | immed(true) | simm(simm10a, 10) ); }
1434 
1435   // pp 196
1436 
1437   void mulx(  Register s1, Register s2, Register d ) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(mulx_op3 ) | rs1(s1) | rs2(s2) ); }
1438   void mulx(  Register s1, int simm13a, Register d ) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(mulx_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1439   void sdivx( Register s1, Register s2, Register d ) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(sdivx_op3) | rs1(s1) | rs2(s2) ); }
1440   void sdivx( Register s1, int simm13a, Register d ) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(sdivx_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1441   void udivx( Register s1, Register s2, Register d ) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(udivx_op3) | rs1(s1) | rs2(s2) ); }
1442   void udivx( Register s1, int simm13a, Register d ) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(udivx_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1443 
1444   // pp 197
1445 
1446   void umul(   Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(umul_op3             ) | rs1(s1) | rs2(s2) ); }
1447   void umul(   Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(umul_op3             ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1448   void smul(   Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(smul_op3             ) | rs1(s1) | rs2(s2) ); }
1449   void smul(   Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(smul_op3             ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1450   void umulcc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(umul_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
1451   void umulcc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(umul_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1452   void smulcc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(smul_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
1453   void smulcc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(smul_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1454 
1455   // pp 199
1456 
1457   void mulscc(   Register s1, Register s2, Register d ) { v9_dep();  emit_long( op(arith_op) | rd(d) | op3(mulscc_op3) | rs1(s1) | rs2(s2) ); }
1458   void mulscc(   Register s1, int simm13a, Register d ) { v9_dep();  emit_long( op(arith_op) | rd(d) | op3(mulscc_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1459 
1460   // pp 201
1461 
1462   void nop() { emit_long( op(branch_op) | op2(sethi_op2) ); }
1463 
1464 
1465   // pp 202
1466 
1467   void popc( Register s,  Register d) { v9_only();  emit_long( op(arith_op) | rd(d) | op3(popc_op3) | rs2(s)); }
1468   void popc( int simm13a, Register d) { v9_only();  emit_long( op(arith_op) | rd(d) | op3(popc_op3) | immed(true) | simm(simm13a, 13)); }
1469 
1470   // pp 203
1471 
1472   void prefetch(   Register s1, Register s2,         PrefetchFcn f);
1473   void prefetch(   Register s1, int simm13a,         PrefetchFcn f);
1474   void prefetcha(  Register s1, Register s2, int ia, PrefetchFcn f ) { v9_only();  emit_long( op(ldst_op) | fcn(f) | op3(prefetch_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
1475   void prefetcha(  Register s1, int simm13a,         PrefetchFcn f ) { v9_only();  emit_long( op(ldst_op) | fcn(f) | op3(prefetch_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1476 
1477   inline void prefetch(const Address& a, PrefetchFcn F, int offset = 0);
1478 
1479   // pp 208
1480 
1481   // not implementing read privileged register
1482 
1483   inline void rdy(    Register d) { v9_dep();  emit_long( op(arith_op) | rd(d) | op3(rdreg_op3) | u_field(0, 18, 14)); }
1484   inline void rdccr(  Register d) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(rdreg_op3) | u_field(2, 18, 14)); }
1485   inline void rdasi(  Register d) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(rdreg_op3) | u_field(3, 18, 14)); }
1486   inline void rdtick( Register d) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(rdreg_op3) | u_field(4, 18, 14)); } // Spoon!
1487   inline void rdpc(   Register d) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(rdreg_op3) | u_field(5, 18, 14)); }
1488   inline void rdfprs( Register d) { v9_only(); emit_long( op(arith_op) | rd(d) | op3(rdreg_op3) | u_field(6, 18, 14)); }
1489 
1490   // pp 213
1491 
1492   inline void rett( Register s1, Register s2);
1493   inline void rett( Register s1, int simm13a, relocInfo::relocType rt = relocInfo::none);
1494 
1495   // pp 214
1496 
1497   void save(    Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(save_op3) | rs1(s1) | rs2(s2) ); }
1498   void save(    Register s1, int simm13a, Register d ) {
1499     // make sure frame is at least large enough for the register save area
1500     assert(-simm13a >= 16 * wordSize, "frame too small");
1501     emit_long( op(arith_op) | rd(d) | op3(save_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) );
1502   }
1503 
1504   void restore( Register s1 = G0,  Register s2 = G0, Register d = G0 ) { emit_long( op(arith_op) | rd(d) | op3(restore_op3) | rs1(s1) | rs2(s2) ); }
1505   void restore( Register s1,       int simm13a,      Register d      ) { emit_long( op(arith_op) | rd(d) | op3(restore_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1506 
1507   // pp 216
1508 
1509   void saved()    { v9_only();  emit_long( op(arith_op) | fcn(0) | op3(saved_op3)); }
1510   void restored() { v9_only();  emit_long( op(arith_op) | fcn(1) | op3(saved_op3)); }
1511 
1512   // pp 217
1513 
1514   inline void sethi( int imm22a, Register d, RelocationHolder const& rspec = RelocationHolder() );
1515   // pp 218
1516 
1517   void sll(  Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(sll_op3) | rs1(s1) | sx(0) | rs2(s2) ); }
1518   void sll(  Register s1, int imm5a,   Register d ) { emit_long( op(arith_op) | rd(d) | op3(sll_op3) | rs1(s1) | sx(0) | immed(true) | u_field(imm5a, 4, 0) ); }
1519   void srl(  Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(srl_op3) | rs1(s1) | sx(0) | rs2(s2) ); }
1520   void srl(  Register s1, int imm5a,   Register d ) { emit_long( op(arith_op) | rd(d) | op3(srl_op3) | rs1(s1) | sx(0) | immed(true) | u_field(imm5a, 4, 0) ); }
1521   void sra(  Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(sra_op3) | rs1(s1) | sx(0) | rs2(s2) ); }
1522   void sra(  Register s1, int imm5a,   Register d ) { emit_long( op(arith_op) | rd(d) | op3(sra_op3) | rs1(s1) | sx(0) | immed(true) | u_field(imm5a, 4, 0) ); }
1523 
1524   void sllx( Register s1, Register s2, Register d ) { v9_only();  emit_long( op(arith_op) | rd(d) | op3(sll_op3) | rs1(s1) | sx(1) | rs2(s2) ); }
1525   void sllx( Register s1, int imm6a,   Register d ) { v9_only();  emit_long( op(arith_op) | rd(d) | op3(sll_op3) | rs1(s1) | sx(1) | immed(true) | u_field(imm6a, 5, 0) ); }
1526   void srlx( Register s1, Register s2, Register d ) { v9_only();  emit_long( op(arith_op) | rd(d) | op3(srl_op3) | rs1(s1) | sx(1) | rs2(s2) ); }
1527   void srlx( Register s1, int imm6a,   Register d ) { v9_only();  emit_long( op(arith_op) | rd(d) | op3(srl_op3) | rs1(s1) | sx(1) | immed(true) | u_field(imm6a, 5, 0) ); }
1528   void srax( Register s1, Register s2, Register d ) { v9_only();  emit_long( op(arith_op) | rd(d) | op3(sra_op3) | rs1(s1) | sx(1) | rs2(s2) ); }
1529   void srax( Register s1, int imm6a,   Register d ) { v9_only();  emit_long( op(arith_op) | rd(d) | op3(sra_op3) | rs1(s1) | sx(1) | immed(true) | u_field(imm6a, 5, 0) ); }
1530 
1531   // pp 220
1532 
1533   void sir( int simm13a ) { emit_long( op(arith_op) | fcn(15) | op3(sir_op3) | immed(true) | simm(simm13a, 13)); }
1534 
1535   // pp 221
1536 
1537   void stbar() { emit_long( op(arith_op) | op3(membar_op3) | u_field(15, 18, 14)); }
1538 
1539   // pp 222
1540 
1541   inline void stf(    FloatRegisterImpl::Width w, FloatRegister d, Register s1, RegisterOrConstant s2);
1542   inline void stf(    FloatRegisterImpl::Width w, FloatRegister d, Register s1, Register s2);
1543   inline void stf(    FloatRegisterImpl::Width w, FloatRegister d, Register s1, int simm13a);
1544   inline void stf(    FloatRegisterImpl::Width w, FloatRegister d, const Address& a, int offset = 0);
1545 
1546   inline void stfsr(  Register s1, Register s2 );
1547   inline void stfsr(  Register s1, int simm13a);
1548   inline void stxfsr( Register s1, Register s2 );
1549   inline void stxfsr( Register s1, int simm13a);
1550 
1551   //  pp 224
1552 
1553   void stfa(  FloatRegisterImpl::Width w, FloatRegister d, Register s1, Register s2, int ia ) { v9_only();  emit_long( op(ldst_op) | fd(d, w) | alt_op3(stf_op3 | alt_bit_op3, w) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
1554   void stfa(  FloatRegisterImpl::Width w, FloatRegister d, Register s1, int simm13a         ) { v9_only();  emit_long( op(ldst_op) | fd(d, w) | alt_op3(stf_op3 | alt_bit_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1555 
1556   // p 226
1557 
1558   inline void stb(  Register d, Register s1, Register s2 );
1559   inline void stb(  Register d, Register s1, int simm13a);
1560   inline void sth(  Register d, Register s1, Register s2 );
1561   inline void sth(  Register d, Register s1, int simm13a);
1562   inline void stw(  Register d, Register s1, Register s2 );
1563   inline void stw(  Register d, Register s1, int simm13a);
1564   inline void st(   Register d, Register s1, Register s2 );
1565   inline void st(   Register d, Register s1, int simm13a);
1566   inline void stx(  Register d, Register s1, Register s2 );
1567   inline void stx(  Register d, Register s1, int simm13a);
1568   inline void std(  Register d, Register s1, Register s2 );
1569   inline void std(  Register d, Register s1, int simm13a);
1570 
1571 #ifdef ASSERT
1572   // ByteSize is only a class when ASSERT is defined, otherwise it's an int.
1573   inline void st(   Register d, Register s1, ByteSize simm13a);
1574 #endif
1575 
1576   inline void stb(  Register d, const Address& a, int offset = 0 );
1577   inline void sth(  Register d, const Address& a, int offset = 0 );
1578   inline void stw(  Register d, const Address& a, int offset = 0 );
1579   inline void stx(  Register d, const Address& a, int offset = 0 );
1580   inline void st(   Register d, const Address& a, int offset = 0 );
1581   inline void std(  Register d, const Address& a, int offset = 0 );
1582 
1583   inline void stb(  Register d, Register s1, RegisterOrConstant s2 );
1584   inline void sth(  Register d, Register s1, RegisterOrConstant s2 );
1585   inline void stw(  Register d, Register s1, RegisterOrConstant s2 );
1586   inline void stx(  Register d, Register s1, RegisterOrConstant s2 );
1587   inline void std(  Register d, Register s1, RegisterOrConstant s2 );
1588   inline void st(   Register d, Register s1, RegisterOrConstant s2 );
1589 
1590   // pp 177
1591 
1592   void stba(  Register d, Register s1, Register s2, int ia ) {             emit_long( op(ldst_op) | rd(d) | op3(stb_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
1593   void stba(  Register d, Register s1, int simm13a         ) {             emit_long( op(ldst_op) | rd(d) | op3(stb_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1594   void stha(  Register d, Register s1, Register s2, int ia ) {             emit_long( op(ldst_op) | rd(d) | op3(sth_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
1595   void stha(  Register d, Register s1, int simm13a         ) {             emit_long( op(ldst_op) | rd(d) | op3(sth_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1596   void stwa(  Register d, Register s1, Register s2, int ia ) {             emit_long( op(ldst_op) | rd(d) | op3(stw_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
1597   void stwa(  Register d, Register s1, int simm13a         ) {             emit_long( op(ldst_op) | rd(d) | op3(stw_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1598   void stxa(  Register d, Register s1, Register s2, int ia ) { v9_only();  emit_long( op(ldst_op) | rd(d) | op3(stx_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
1599   void stxa(  Register d, Register s1, int simm13a         ) { v9_only();  emit_long( op(ldst_op) | rd(d) | op3(stx_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1600   void stda(  Register d, Register s1, Register s2, int ia ) {             emit_long( op(ldst_op) | rd(d) | op3(std_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
1601   void stda(  Register d, Register s1, int simm13a         ) {             emit_long( op(ldst_op) | rd(d) | op3(std_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1602 
1603   // pp 97 (v8)
1604 
1605   inline void stc(   int crd, Register s1, Register s2 );
1606   inline void stc(   int crd, Register s1, int simm13a);
1607   inline void stdc(  int crd, Register s1, Register s2 );
1608   inline void stdc(  int crd, Register s1, int simm13a);
1609   inline void stcsr( int crd, Register s1, Register s2 );
1610   inline void stcsr( int crd, Register s1, int simm13a);
1611   inline void stdcq( int crd, Register s1, Register s2 );
1612   inline void stdcq( int crd, Register s1, int simm13a);
1613 
1614   // pp 230
1615 
1616   void sub(    Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(sub_op3              ) | rs1(s1) | rs2(s2) ); }
1617   void sub(    Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(sub_op3              ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1618   void subcc(  Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(sub_op3 | cc_bit_op3 ) | rs1(s1) | rs2(s2) ); }
1619   void subcc(  Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(sub_op3 | cc_bit_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1620   void subc(   Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(subc_op3             ) | rs1(s1) | rs2(s2) ); }
1621   void subc(   Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(subc_op3             ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1622   void subccc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(subc_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
1623   void subccc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(subc_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1624 
1625   // pp 231
1626 
1627   inline void swap( Register s1, Register s2, Register d );
1628   inline void swap( Register s1, int simm13a, Register d);
1629   inline void swap( Address& a,               Register d, int offset = 0 );
1630 
1631   // pp 232
1632 
1633   void swapa(   Register s1, Register s2, int ia, Register d ) { v9_dep();  emit_long( op(ldst_op) | rd(d) | op3(swap_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
1634   void swapa(   Register s1, int simm13a,         Register d ) { v9_dep();  emit_long( op(ldst_op) | rd(d) | op3(swap_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1635 
1636   // pp 234, note op in book is wrong, see pp 268
1637 
1638   void taddcc(    Register s1, Register s2, Register d ) {            emit_long( op(arith_op) | rd(d) | op3(taddcc_op3  ) | rs1(s1) | rs2(s2) ); }
1639   void taddcc(    Register s1, int simm13a, Register d ) {            emit_long( op(arith_op) | rd(d) | op3(taddcc_op3  ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1640   void taddcctv(  Register s1, Register s2, Register d ) { v9_dep();  emit_long( op(arith_op) | rd(d) | op3(taddcctv_op3) | rs1(s1) | rs2(s2) ); }
1641   void taddcctv(  Register s1, int simm13a, Register d ) { v9_dep();  emit_long( op(arith_op) | rd(d) | op3(taddcctv_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1642 
1643   // pp 235
1644 
1645   void tsubcc(    Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(tsubcc_op3  ) | rs1(s1) | rs2(s2) ); }
1646   void tsubcc(    Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(tsubcc_op3  ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1647   void tsubcctv(  Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(tsubcctv_op3) | rs1(s1) | rs2(s2) ); }
1648   void tsubcctv(  Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(tsubcctv_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
1649 
1650   // pp 237
1651 
1652   void trap( Condition c, CC cc, Register s1, Register s2 ) { v8_no_cc(cc);  emit_long( op(arith_op) | cond(c) | op3(trap_op3) | rs1(s1) | trapcc(cc) | rs2(s2)); }
1653   void trap( Condition c, CC cc, Register s1, int trapa   ) { v8_no_cc(cc);  emit_long( op(arith_op) | cond(c) | op3(trap_op3) | rs1(s1) | trapcc(cc) | immed(true) | u_field(trapa, 6, 0)); }
1654   // simple uncond. trap
1655   void trap( int trapa ) { trap( always, icc, G0, trapa ); }
1656 
1657   // pp 239 omit write priv register for now
1658 
1659   inline void wry(    Register d) { v9_dep();  emit_long( op(arith_op) | rs1(d) | op3(wrreg_op3) | u_field(0, 29, 25)); }
1660   inline void wrccr(Register s) { v9_only(); emit_long( op(arith_op) | rs1(s) | op3(wrreg_op3) | u_field(2, 29, 25)); }
1661   inline void wrccr(Register s, int simm13a) { v9_only(); emit_long( op(arith_op) |
1662                                                                            rs1(s) |
1663                                                                            op3(wrreg_op3) |
1664                                                                            u_field(2, 29, 25) |
1665                                                                            u_field(1, 13, 13) |
1666                                                                            simm(simm13a, 13)); }
1667   inline void wrasi(  Register d) { v9_only(); emit_long( op(arith_op) | rs1(d) | op3(wrreg_op3) | u_field(3, 29, 25)); }
1668   inline void wrfprs( Register d) { v9_only(); emit_long( op(arith_op) | rs1(d) | op3(wrreg_op3) | u_field(6, 29, 25)); }
1669 
1670   // For a given register condition, return the appropriate condition code
1671   // Condition (the one you would use to get the same effect after "tst" on
1672   // the target register.)
1673   Assembler::Condition reg_cond_to_cc_cond(RCondition in);
1674 
1675 
1676   // Creation
1677   Assembler(CodeBuffer* code) : AbstractAssembler(code) {
1678 #ifdef CHECK_DELAY
1679     delay_state = no_delay;
1680 #endif
1681   }
1682 
1683   // Testing
1684 #ifndef PRODUCT
1685   void test_v9();
1686   void test_v8_onlys();
1687 #endif
1688 };
1689 
1690 
1691 class RegistersForDebugging : public StackObj {
1692  public:
1693   intptr_t i[8], l[8], o[8], g[8];
1694   float    f[32];
1695   double   d[32];
1696 
1697   void print(outputStream* s);
1698 
1699   static int i_offset(int j) { return offset_of(RegistersForDebugging, i[j]); }
1700   static int l_offset(int j) { return offset_of(RegistersForDebugging, l[j]); }
1701   static int o_offset(int j) { return offset_of(RegistersForDebugging, o[j]); }
1702   static int g_offset(int j) { return offset_of(RegistersForDebugging, g[j]); }
1703   static int f_offset(int j) { return offset_of(RegistersForDebugging, f[j]); }
1704   static int d_offset(int j) { return offset_of(RegistersForDebugging, d[j / 2]); }
1705 
1706   // gen asm code to save regs
1707   static void save_registers(MacroAssembler* a);
1708 
1709   // restore global registers in case C code disturbed them
1710   static void restore_registers(MacroAssembler* a, Register r);
1711 
1712 
1713 };
1714 
1715 
1716 // MacroAssembler extends Assembler by a few frequently used macros.
1717 //
1718 // Most of the standard SPARC synthetic ops are defined here.
1719 // Instructions for which a 'better' code sequence exists depending
1720 // on arguments should also go in here.
1721 
1722 #define JMP2(r1, r2) jmp(r1, r2, __FILE__, __LINE__)
1723 #define JMP(r1, off) jmp(r1, off, __FILE__, __LINE__)
1724 #define JUMP(a, temp, off)     jump(a, temp, off, __FILE__, __LINE__)
1725 #define JUMPL(a, temp, d, off) jumpl(a, temp, d, off, __FILE__, __LINE__)
1726 
1727 
1728 class MacroAssembler: public Assembler {
1729  protected:
1730   // Support for VM calls
1731   // This is the base routine called by the different versions of call_VM_leaf. The interpreter
1732   // may customize this version by overriding it for its purposes (e.g., to save/restore
1733   // additional registers when doing a VM call).
1734 #ifdef CC_INTERP
1735   #define VIRTUAL
1736 #else
1737   #define VIRTUAL virtual
1738 #endif
1739 
1740   VIRTUAL void call_VM_leaf_base(Register thread_cache, address entry_point, int number_of_arguments);
1741 
1742   //
1743   // It is imperative that all calls into the VM are handled via the call_VM macros.
1744   // They make sure that the stack linkage is setup correctly. call_VM's correspond
1745   // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points.
1746   //
1747   // This is the base routine called by the different versions of call_VM. The interpreter
1748   // may customize this version by overriding it for its purposes (e.g., to save/restore
1749   // additional registers when doing a VM call).
1750   //
1751   // A non-volatile java_thread_cache register should be specified so
1752   // that the G2_thread value can be preserved across the call.
1753   // (If java_thread_cache is noreg, then a slow get_thread call
1754   // will re-initialize the G2_thread.) call_VM_base returns the register that contains the
1755   // thread.
1756   //
1757   // If no last_java_sp is specified (noreg) than SP will be used instead.
1758 
1759   virtual void call_VM_base(
1760     Register        oop_result,             // where an oop-result ends up if any; use noreg otherwise
1761     Register        java_thread_cache,      // the thread if computed before     ; use noreg otherwise
1762     Register        last_java_sp,           // to set up last_Java_frame in stubs; use noreg otherwise
1763     address         entry_point,            // the entry point
1764     int             number_of_arguments,    // the number of arguments (w/o thread) to pop after call
1765     bool            check_exception=true    // flag which indicates if exception should be checked
1766   );
1767 
1768   // This routine should emit JVMTI PopFrame and ForceEarlyReturn handling code.
1769   // The implementation is only non-empty for the InterpreterMacroAssembler,
1770   // as only the interpreter handles and ForceEarlyReturn PopFrame requests.
1771   virtual void check_and_handle_popframe(Register scratch_reg);
1772   virtual void check_and_handle_earlyret(Register scratch_reg);
1773 
1774  public:
1775   MacroAssembler(CodeBuffer* code) : Assembler(code) {}
1776 
1777   // Support for NULL-checks
1778   //
1779   // Generates code that causes a NULL OS exception if the content of reg is NULL.
1780   // If the accessed location is M[reg + offset] and the offset is known, provide the
1781   // offset.  No explicit code generation is needed if the offset is within a certain
1782   // range (0 <= offset <= page_size).
1783   //
1784   // %%%%%% Currently not done for SPARC
1785 
1786   void null_check(Register reg, int offset = -1);
1787   static bool needs_explicit_null_check(intptr_t offset);
1788 
1789   // support for delayed instructions
1790   MacroAssembler* delayed() { Assembler::delayed();  return this; }
1791 
1792   // branches that use right instruction for v8 vs. v9
1793   inline void br( Condition c, bool a, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
1794   inline void br( Condition c, bool a, Predict p, Label& L );
1795   inline void fb( Condition c, bool a, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
1796   inline void fb( Condition c, bool a, Predict p, Label& L );
1797 
1798   // compares register with zero and branches (V9 and V8 instructions)
1799   void br_zero( Condition c, bool a, Predict p, Register s1, Label& L);
1800   // Compares a pointer register with zero and branches on (not)null.
1801   // Does a test & branch on 32-bit systems and a register-branch on 64-bit.
1802   void br_null   ( Register s1, bool a, Predict p, Label& L );
1803   void br_notnull( Register s1, bool a, Predict p, Label& L );
1804 
1805   // These versions will do the most efficient thing on v8 and v9.  Perhaps
1806   // this is what the routine above was meant to do, but it didn't (and
1807   // didn't cover both target address kinds.)
1808   void br_on_reg_cond( RCondition c, bool a, Predict p, Register s1, address d, relocInfo::relocType rt = relocInfo::none );
1809   void br_on_reg_cond( RCondition c, bool a, Predict p, Register s1, Label& L);
1810 
1811   inline void bp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
1812   inline void bp( Condition c, bool a, CC cc, Predict p, Label& L );
1813 
1814   // Branch that tests xcc in LP64 and icc in !LP64
1815   inline void brx( Condition c, bool a, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
1816   inline void brx( Condition c, bool a, Predict p, Label& L );
1817 
1818   // unconditional short branch
1819   inline void ba( bool a, Label& L );
1820 
1821   // Branch that tests fp condition codes
1822   inline void fbp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
1823   inline void fbp( Condition c, bool a, CC cc, Predict p, Label& L );
1824 
1825   // get PC the best way
1826   inline int get_pc( Register d );
1827 
1828   // Sparc shorthands(pp 85, V8 manual, pp 289 V9 manual)
1829   inline void cmp(  Register s1, Register s2 ) { subcc( s1, s2, G0 ); }
1830   inline void cmp(  Register s1, int simm13a ) { subcc( s1, simm13a, G0 ); }
1831 
1832   inline void jmp( Register s1, Register s2 );
1833   inline void jmp( Register s1, int simm13a, RelocationHolder const& rspec = RelocationHolder() );
1834 
1835   inline void call( address d,  relocInfo::relocType rt = relocInfo::runtime_call_type );
1836   inline void call( Label& L,   relocInfo::relocType rt = relocInfo::runtime_call_type );
1837   inline void callr( Register s1, Register s2 );
1838   inline void callr( Register s1, int simm13a, RelocationHolder const& rspec = RelocationHolder() );
1839 
1840   // Emits nothing on V8
1841   inline void iprefetch( address d, relocInfo::relocType rt = relocInfo::none );
1842   inline void iprefetch( Label& L);
1843 
1844   inline void tst( Register s ) { orcc( G0, s, G0 ); }
1845 
1846 #ifdef PRODUCT
1847   inline void ret(  bool trace = TraceJumps )   { if (trace) {
1848                                                     mov(I7, O7); // traceable register
1849                                                     JMP(O7, 2 * BytesPerInstWord);
1850                                                   } else {
1851                                                     jmpl( I7, 2 * BytesPerInstWord, G0 );
1852                                                   }
1853                                                 }
1854 
1855   inline void retl( bool trace = TraceJumps )  { if (trace) JMP(O7, 2 * BytesPerInstWord);
1856                                                  else jmpl( O7, 2 * BytesPerInstWord, G0 ); }
1857 #else
1858   void ret(  bool trace = TraceJumps );
1859   void retl( bool trace = TraceJumps );
1860 #endif /* PRODUCT */
1861 
1862   // Required platform-specific helpers for Label::patch_instructions.
1863   // They _shadow_ the declarations in AbstractAssembler, which are undefined.
1864   void pd_patch_instruction(address branch, address target);
1865 #ifndef PRODUCT
1866   static void pd_print_patched_instruction(address branch);
1867 #endif
1868 
1869   // sethi Macro handles optimizations and relocations
1870 private:
1871   void internal_sethi(const AddressLiteral& addrlit, Register d, bool ForceRelocatable);
1872 public:
1873   void sethi(const AddressLiteral& addrlit, Register d);
1874   void patchable_sethi(const AddressLiteral& addrlit, Register d);
1875 
1876   // compute the size of a sethi/set
1877   static int  size_of_sethi( address a, bool worst_case = false );
1878   static int  worst_case_size_of_set();
1879 
1880   // set may be either setsw or setuw (high 32 bits may be zero or sign)
1881 private:
1882   void internal_set(const AddressLiteral& al, Register d, bool ForceRelocatable);
1883 public:
1884   void set(const AddressLiteral& addrlit, Register d);
1885   void set(intptr_t value, Register d);
1886   void set(address addr, Register d, RelocationHolder const& rspec);
1887   void patchable_set(const AddressLiteral& addrlit, Register d);
1888   void patchable_set(intptr_t value, Register d);
1889   void set64(jlong value, Register d, Register tmp);
1890 
1891   // sign-extend 32 to 64
1892   inline void signx( Register s, Register d ) { sra( s, G0, d); }
1893   inline void signx( Register d )             { sra( d, G0, d); }
1894 
1895   inline void not1( Register s, Register d ) { xnor( s, G0, d ); }
1896   inline void not1( Register d )             { xnor( d, G0, d ); }
1897 
1898   inline void neg( Register s, Register d ) { sub( G0, s, d ); }
1899   inline void neg( Register d )             { sub( G0, d, d ); }
1900 
1901   inline void cas(  Register s1, Register s2, Register d) { casa( s1, s2, d, ASI_PRIMARY); }
1902   inline void casx( Register s1, Register s2, Register d) { casxa(s1, s2, d, ASI_PRIMARY); }
1903   // Functions for isolating 64 bit atomic swaps for LP64
1904   // cas_ptr will perform cas for 32 bit VM's and casx for 64 bit VM's
1905   inline void cas_ptr(  Register s1, Register s2, Register d) {
1906 #ifdef _LP64
1907     casx( s1, s2, d );
1908 #else
1909     cas( s1, s2, d );
1910 #endif
1911   }
1912 
1913   // Functions for isolating 64 bit shifts for LP64
1914   inline void sll_ptr( Register s1, Register s2, Register d );
1915   inline void sll_ptr( Register s1, int imm6a,   Register d );
1916   inline void sll_ptr( Register s1, RegisterOrConstant s2, Register d );
1917   inline void srl_ptr( Register s1, Register s2, Register d );
1918   inline void srl_ptr( Register s1, int imm6a,   Register d );
1919 
1920   // little-endian
1921   inline void casl(  Register s1, Register s2, Register d) { casa( s1, s2, d, ASI_PRIMARY_LITTLE); }
1922   inline void casxl( Register s1, Register s2, Register d) { casxa(s1, s2, d, ASI_PRIMARY_LITTLE); }
1923 
1924   inline void inc(   Register d,  int const13 = 1 ) { add(   d, const13, d); }
1925   inline void inccc( Register d,  int const13 = 1 ) { addcc( d, const13, d); }
1926 
1927   inline void dec(   Register d,  int const13 = 1 ) { sub(   d, const13, d); }
1928   inline void deccc( Register d,  int const13 = 1 ) { subcc( d, const13, d); }
1929 
1930   inline void btst( Register s1,  Register s2 ) { andcc( s1, s2, G0 ); }
1931   inline void btst( int simm13a,  Register s )  { andcc( s,  simm13a, G0 ); }
1932 
1933   inline void bset( Register s1,  Register s2 ) { or3( s1, s2, s2 ); }
1934   inline void bset( int simm13a,  Register s )  { or3( s,  simm13a, s ); }
1935 
1936   inline void bclr( Register s1,  Register s2 ) { andn( s1, s2, s2 ); }
1937   inline void bclr( int simm13a,  Register s )  { andn( s,  simm13a, s ); }
1938 
1939   inline void btog( Register s1,  Register s2 ) { xor3( s1, s2, s2 ); }
1940   inline void btog( int simm13a,  Register s )  { xor3( s,  simm13a, s ); }
1941 
1942   inline void clr( Register d ) { or3( G0, G0, d ); }
1943 
1944   inline void clrb( Register s1, Register s2);
1945   inline void clrh( Register s1, Register s2);
1946   inline void clr(  Register s1, Register s2);
1947   inline void clrx( Register s1, Register s2);
1948 
1949   inline void clrb( Register s1, int simm13a);
1950   inline void clrh( Register s1, int simm13a);
1951   inline void clr(  Register s1, int simm13a);
1952   inline void clrx( Register s1, int simm13a);
1953 
1954   // copy & clear upper word
1955   inline void clruw( Register s, Register d ) { srl( s, G0, d); }
1956   // clear upper word
1957   inline void clruwu( Register d ) { srl( d, G0, d); }
1958 
1959   // membar psuedo instruction.  takes into account target memory model.
1960   inline void membar( Assembler::Membar_mask_bits const7a );
1961 
1962   // returns if membar generates anything.
1963   inline bool membar_has_effect( Assembler::Membar_mask_bits const7a );
1964 
1965   // mov pseudo instructions
1966   inline void mov( Register s,  Register d) {
1967     if ( s != d )    or3( G0, s, d);
1968     else             assert_not_delayed();  // Put something useful in the delay slot!
1969   }
1970 
1971   inline void mov_or_nop( Register s,  Register d) {
1972     if ( s != d )    or3( G0, s, d);
1973     else             nop();
1974   }
1975 
1976   inline void mov( int simm13a, Register d) { or3( G0, simm13a, d); }
1977 
1978   // address pseudos: make these names unlike instruction names to avoid confusion
1979   inline intptr_t load_pc_address( Register reg, int bytes_to_skip );
1980   inline void load_contents(const AddressLiteral& addrlit, Register d, int offset = 0);
1981   inline void load_ptr_contents(const AddressLiteral& addrlit, Register d, int offset = 0);
1982   inline void store_contents(Register s, const AddressLiteral& addrlit, Register temp, int offset = 0);
1983   inline void store_ptr_contents(Register s, const AddressLiteral& addrlit, Register temp, int offset = 0);
1984   inline void jumpl_to(const AddressLiteral& addrlit, Register temp, Register d, int offset = 0);
1985   inline void jump_to(const AddressLiteral& addrlit, Register temp, int offset = 0);
1986   inline void jump_indirect_to(Address& a, Register temp, int ld_offset = 0, int jmp_offset = 0);
1987 
1988   // ring buffer traceable jumps
1989 
1990   void jmp2( Register r1, Register r2, const char* file, int line );
1991   void jmp ( Register r1, int offset,  const char* file, int line );
1992 
1993   void jumpl(const AddressLiteral& addrlit, Register temp, Register d, int offset, const char* file, int line);
1994   void jump (const AddressLiteral& addrlit, Register temp,             int offset, const char* file, int line);
1995 
1996 
1997   // argument pseudos:
1998 
1999   inline void load_argument( Argument& a, Register  d );
2000   inline void store_argument( Register s, Argument& a );
2001   inline void store_ptr_argument( Register s, Argument& a );
2002   inline void store_float_argument( FloatRegister s, Argument& a );
2003   inline void store_double_argument( FloatRegister s, Argument& a );
2004   inline void store_long_argument( Register s, Argument& a );
2005 
2006   // handy macros:
2007 
2008   inline void round_to( Register r, int modulus ) {
2009     assert_not_delayed();
2010     inc( r, modulus - 1 );
2011     and3( r, -modulus, r );
2012   }
2013 
2014   // --------------------------------------------------
2015 
2016   // Functions for isolating 64 bit loads for LP64
2017   // ld_ptr will perform ld for 32 bit VM's and ldx for 64 bit VM's
2018   // st_ptr will perform st for 32 bit VM's and stx for 64 bit VM's
2019   inline void ld_ptr(Register s1, Register s2, Register d);
2020   inline void ld_ptr(Register s1, int simm13a, Register d);
2021   inline void ld_ptr(Register s1, RegisterOrConstant s2, Register d);
2022   inline void ld_ptr(const Address& a, Register d, int offset = 0);
2023   inline void st_ptr(Register d, Register s1, Register s2);
2024   inline void st_ptr(Register d, Register s1, int simm13a);
2025   inline void st_ptr(Register d, Register s1, RegisterOrConstant s2);
2026   inline void st_ptr(Register d, const Address& a, int offset = 0);
2027 
2028 #ifdef ASSERT
2029   // ByteSize is only a class when ASSERT is defined, otherwise it's an int.
2030   inline void ld_ptr(Register s1, ByteSize simm13a, Register d);
2031   inline void st_ptr(Register d, Register s1, ByteSize simm13a);
2032 #endif
2033 
2034   // ld_long will perform ldd for 32 bit VM's and ldx for 64 bit VM's
2035   // st_long will perform std for 32 bit VM's and stx for 64 bit VM's
2036   inline void ld_long(Register s1, Register s2, Register d);
2037   inline void ld_long(Register s1, int simm13a, Register d);
2038   inline void ld_long(Register s1, RegisterOrConstant s2, Register d);
2039   inline void ld_long(const Address& a, Register d, int offset = 0);
2040   inline void st_long(Register d, Register s1, Register s2);
2041   inline void st_long(Register d, Register s1, int simm13a);
2042   inline void st_long(Register d, Register s1, RegisterOrConstant s2);
2043   inline void st_long(Register d, const Address& a, int offset = 0);
2044 
2045   // Helpers for address formation.
2046   // - They emit only a move if s2 is a constant zero.
2047   // - If dest is a constant and either s1 or s2 is a register, the temp argument is required and becomes the result.
2048   // - If dest is a register and either s1 or s2 is a non-simm13 constant, the temp argument is required and used to materialize the constant.
2049   RegisterOrConstant regcon_andn_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp = noreg);
2050   RegisterOrConstant regcon_inc_ptr( RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp = noreg);
2051   RegisterOrConstant regcon_sll_ptr( RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp = noreg);
2052 
2053   RegisterOrConstant ensure_simm13_or_reg(RegisterOrConstant src, Register temp) {
2054     if (is_simm13(src.constant_or_zero()))
2055       return src;               // register or short constant
2056     guarantee(temp != noreg, "constant offset overflow");
2057     set(src.as_constant(), temp);
2058     return temp;
2059   }
2060 
2061   // --------------------------------------------------
2062 
2063  public:
2064   // traps as per trap.h (SPARC ABI?)
2065 
2066   void breakpoint_trap();
2067   void breakpoint_trap(Condition c, CC cc = icc);
2068   void flush_windows_trap();
2069   void clean_windows_trap();
2070   void get_psr_trap();
2071   void set_psr_trap();
2072 
2073   // V8/V9 flush_windows
2074   void flush_windows();
2075 
2076   // Support for serializing memory accesses between threads
2077   void serialize_memory(Register thread, Register tmp1, Register tmp2);
2078 
2079   // Stack frame creation/removal
2080   void enter();
2081   void leave();
2082 
2083   // V8/V9 integer multiply
2084   void mult(Register s1, Register s2, Register d);
2085   void mult(Register s1, int simm13a, Register d);
2086 
2087   // V8/V9 read and write of condition codes.
2088   void read_ccr(Register d);
2089   void write_ccr(Register s);
2090 
2091   // Manipulation of C++ bools
2092   // These are idioms to flag the need for care with accessing bools but on
2093   // this platform we assume byte size
2094 
2095   inline void stbool(Register d, const Address& a) { stb(d, a); }
2096   inline void ldbool(const Address& a, Register d) { ldsb(a, d); }
2097   inline void tstbool( Register s ) { tst(s); }
2098   inline void movbool( bool boolconst, Register d) { mov( (int) boolconst, d); }
2099 
2100   // klass oop manipulations if compressed
2101   void load_klass(Register src_oop, Register klass);
2102   void store_klass(Register klass, Register dst_oop);
2103   void store_klass_gap(Register s, Register dst_oop);
2104 
2105    // oop manipulations
2106   void load_heap_oop(const Address& s, Register d);
2107   void load_heap_oop(Register s1, Register s2, Register d);
2108   void load_heap_oop(Register s1, int simm13a, Register d);
2109   void store_heap_oop(Register d, Register s1, Register s2);
2110   void store_heap_oop(Register d, Register s1, int simm13a);
2111   void store_heap_oop(Register d, const Address& a, int offset = 0);
2112 
2113   void encode_heap_oop(Register src, Register dst);
2114   void encode_heap_oop(Register r) {
2115     encode_heap_oop(r, r);
2116   }
2117   void decode_heap_oop(Register src, Register dst);
2118   void decode_heap_oop(Register r) {
2119     decode_heap_oop(r, r);
2120   }
2121   void encode_heap_oop_not_null(Register r);
2122   void decode_heap_oop_not_null(Register r);
2123   void encode_heap_oop_not_null(Register src, Register dst);
2124   void decode_heap_oop_not_null(Register src, Register dst);
2125 
2126   // Support for managing the JavaThread pointer (i.e.; the reference to
2127   // thread-local information).
2128   void get_thread();                                // load G2_thread
2129   void verify_thread();                             // verify G2_thread contents
2130   void save_thread   (const Register threache); // save to cache
2131   void restore_thread(const Register thread_cache); // restore from cache
2132 
2133   // Support for last Java frame (but use call_VM instead where possible)
2134   void set_last_Java_frame(Register last_java_sp, Register last_Java_pc);
2135   void reset_last_Java_frame(void);
2136 
2137   // Call into the VM.
2138   // Passes the thread pointer (in O0) as a prepended argument.
2139   // Makes sure oop return values are visible to the GC.
2140   void call_VM(Register oop_result, address entry_point, int number_of_arguments = 0, bool check_exceptions = true);
2141   void call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions = true);
2142   void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
2143   void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true);
2144 
2145   // these overloadings are not presently used on SPARC:
2146   void call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true);
2147   void call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true);
2148   void call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
2149   void call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true);
2150 
2151   void call_VM_leaf(Register thread_cache, address entry_point, int number_of_arguments = 0);
2152   void call_VM_leaf(Register thread_cache, address entry_point, Register arg_1);
2153   void call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2);
2154   void call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2, Register arg_3);
2155 
2156   void get_vm_result  (Register oop_result);
2157   void get_vm_result_2(Register oop_result);
2158 
2159   // vm result is currently getting hijacked to for oop preservation
2160   void set_vm_result(Register oop_result);
2161 
2162   // if call_VM_base was called with check_exceptions=false, then call
2163   // check_and_forward_exception to handle exceptions when it is safe
2164   void check_and_forward_exception(Register scratch_reg);
2165 
2166  private:
2167   // For V8
2168   void read_ccr_trap(Register ccr_save);
2169   void write_ccr_trap(Register ccr_save1, Register scratch1, Register scratch2);
2170 
2171 #ifdef ASSERT
2172   // For V8 debugging.  Uses V8 instruction sequence and checks
2173   // result with V9 insturctions rdccr and wrccr.
2174   // Uses Gscatch and Gscatch2
2175   void read_ccr_v8_assert(Register ccr_save);
2176   void write_ccr_v8_assert(Register ccr_save);
2177 #endif // ASSERT
2178 
2179  public:
2180 
2181   // Write to card table for - register is destroyed afterwards.
2182   void card_table_write(jbyte* byte_map_base, Register tmp, Register obj);
2183 
2184   void card_write_barrier_post(Register store_addr, Register new_val, Register tmp);
2185 
2186 #ifndef SERIALGC
2187   // Array store and offset
2188   void g1_write_barrier_pre(Register obj, Register index, int offset, Register tmp, bool preserve_o_regs);
2189 
2190   void g1_write_barrier_post(Register store_addr, Register new_val, Register tmp);
2191 
2192   // May do filtering, depending on the boolean arguments.
2193   void g1_card_table_write(jbyte* byte_map_base,
2194                            Register tmp, Register obj, Register new_val,
2195                            bool region_filter, bool null_filter);
2196 #endif // SERIALGC
2197 
2198   // pushes double TOS element of FPU stack on CPU stack; pops from FPU stack
2199   void push_fTOS();
2200 
2201   // pops double TOS element from CPU stack and pushes on FPU stack
2202   void pop_fTOS();
2203 
2204   void empty_FPU_stack();
2205 
2206   void push_IU_state();
2207   void pop_IU_state();
2208 
2209   void push_FPU_state();
2210   void pop_FPU_state();
2211 
2212   void push_CPU_state();
2213   void pop_CPU_state();
2214 
2215   // if heap base register is used - reinit it with the correct value
2216   void reinit_heapbase();
2217 
2218   // Debugging
2219   void _verify_oop(Register reg, const char * msg, const char * file, int line);
2220   void _verify_oop_addr(Address addr, const char * msg, const char * file, int line);
2221 
2222 #define verify_oop(reg) _verify_oop(reg, "broken oop " #reg, __FILE__, __LINE__)
2223 #define verify_oop_addr(addr) _verify_oop_addr(addr, "broken oop addr ", __FILE__, __LINE__)
2224 
2225         // only if +VerifyOops
2226   void verify_FPU(int stack_depth, const char* s = "illegal FPU state");
2227         // only if +VerifyFPU
2228   void stop(const char* msg);                          // prints msg, dumps registers and stops execution
2229   void warn(const char* msg);                          // prints msg, but don't stop
2230   void untested(const char* what = "");
2231   void unimplemented(const char* what = "")              { char* b = new char[1024];  sprintf(b, "unimplemented: %s", what);  stop(b); }
2232   void should_not_reach_here()                   { stop("should not reach here"); }
2233   void print_CPU_state();
2234 
2235   // oops in code
2236   AddressLiteral allocate_oop_address(jobject obj);                          // allocate_index
2237   AddressLiteral constant_oop_address(jobject obj);                          // find_index
2238   inline void    set_oop             (jobject obj, Register d);              // uses allocate_oop_address
2239   inline void    set_oop_constant    (jobject obj, Register d);              // uses constant_oop_address
2240   inline void    set_oop             (const AddressLiteral& obj_addr, Register d); // same as load_address
2241 
2242   void set_narrow_oop( jobject obj, Register d );
2243 
2244   // nop padding
2245   void align(int modulus);
2246 
2247   // declare a safepoint
2248   void safepoint();
2249 
2250   // factor out part of stop into subroutine to save space
2251   void stop_subroutine();
2252   // factor out part of verify_oop into subroutine to save space
2253   void verify_oop_subroutine();
2254 
2255   // side-door communication with signalHandler in os_solaris.cpp
2256   static address _verify_oop_implicit_branch[3];
2257 
2258 #ifndef PRODUCT
2259   static void test();
2260 #endif
2261 
2262   // convert an incoming arglist to varargs format; put the pointer in d
2263   void set_varargs( Argument a, Register d );
2264 
2265   int total_frame_size_in_bytes(int extraWords);
2266 
2267   // used when extraWords known statically
2268   void save_frame(int extraWords);
2269   void save_frame_c1(int size_in_bytes);
2270   // make a frame, and simultaneously pass up one or two register value
2271   // into the new register window
2272   void save_frame_and_mov(int extraWords, Register s1, Register d1, Register s2 = Register(), Register d2 = Register());
2273 
2274   // give no. (outgoing) params, calc # of words will need on frame
2275   void calc_mem_param_words(Register Rparam_words, Register Rresult);
2276 
2277   // used to calculate frame size dynamically
2278   // result is in bytes and must be negated for save inst
2279   void calc_frame_size(Register extraWords, Register resultReg);
2280 
2281   // calc and also save
2282   void calc_frame_size_and_save(Register extraWords, Register resultReg);
2283 
2284   static void debug(char* msg, RegistersForDebugging* outWindow);
2285 
2286   // implementations of bytecodes used by both interpreter and compiler
2287 
2288   void lcmp( Register Ra_hi, Register Ra_low,
2289              Register Rb_hi, Register Rb_low,
2290              Register Rresult);
2291 
2292   void lneg( Register Rhi, Register Rlow );
2293 
2294   void lshl(  Register Rin_high,  Register Rin_low,  Register Rcount,
2295               Register Rout_high, Register Rout_low, Register Rtemp );
2296 
2297   void lshr(  Register Rin_high,  Register Rin_low,  Register Rcount,
2298               Register Rout_high, Register Rout_low, Register Rtemp );
2299 
2300   void lushr( Register Rin_high,  Register Rin_low,  Register Rcount,
2301               Register Rout_high, Register Rout_low, Register Rtemp );
2302 
2303 #ifdef _LP64
2304   void lcmp( Register Ra, Register Rb, Register Rresult);
2305 #endif
2306 
2307   // Loading values by size and signed-ness
2308   void load_sized_value(Address src, Register dst, size_t size_in_bytes, bool is_signed);
2309 
2310   void float_cmp( bool is_float, int unordered_result,
2311                   FloatRegister Fa, FloatRegister Fb,
2312                   Register Rresult);
2313 
2314   void fneg( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d);
2315   void fneg( FloatRegisterImpl::Width w, FloatRegister sd ) { Assembler::fneg(w, sd); }
2316   void fmov( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d);
2317   void fabs( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d);
2318 
2319   void save_all_globals_into_locals();
2320   void restore_globals_from_locals();
2321 
2322   void casx_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg,
2323     address lock_addr=0, bool use_call_vm=false);
2324   void cas_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg,
2325     address lock_addr=0, bool use_call_vm=false);
2326   void casn (Register addr_reg, Register cmp_reg, Register set_reg) ;
2327 
2328   // These set the icc condition code to equal if the lock succeeded
2329   // and notEqual if it failed and requires a slow case
2330   void compiler_lock_object(Register Roop, Register Rmark, Register Rbox,
2331                             Register Rscratch,
2332                             BiasedLockingCounters* counters = NULL,
2333                             bool try_bias = UseBiasedLocking);
2334   void compiler_unlock_object(Register Roop, Register Rmark, Register Rbox,
2335                               Register Rscratch,
2336                               bool try_bias = UseBiasedLocking);
2337 
2338   // Biased locking support
2339   // Upon entry, lock_reg must point to the lock record on the stack,
2340   // obj_reg must contain the target object, and mark_reg must contain
2341   // the target object's header.
2342   // Destroys mark_reg if an attempt is made to bias an anonymously
2343   // biased lock. In this case a failure will go either to the slow
2344   // case or fall through with the notEqual condition code set with
2345   // the expectation that the slow case in the runtime will be called.
2346   // In the fall-through case where the CAS-based lock is done,
2347   // mark_reg is not destroyed.
2348   void biased_locking_enter(Register obj_reg, Register mark_reg, Register temp_reg,
2349                             Label& done, Label* slow_case = NULL,
2350                             BiasedLockingCounters* counters = NULL);
2351   // Upon entry, the base register of mark_addr must contain the oop.
2352   // Destroys temp_reg.
2353 
2354   // If allow_delay_slot_filling is set to true, the next instruction
2355   // emitted after this one will go in an annulled delay slot if the
2356   // biased locking exit case failed.
2357   void biased_locking_exit(Address mark_addr, Register temp_reg, Label& done, bool allow_delay_slot_filling = false);
2358 
2359   // allocation
2360   void eden_allocate(
2361     Register obj,                      // result: pointer to object after successful allocation
2362     Register var_size_in_bytes,        // object size in bytes if unknown at compile time; invalid otherwise
2363     int      con_size_in_bytes,        // object size in bytes if   known at compile time
2364     Register t1,                       // temp register
2365     Register t2,                       // temp register
2366     Label&   slow_case                 // continuation point if fast allocation fails
2367   );
2368   void tlab_allocate(
2369     Register obj,                      // result: pointer to object after successful allocation
2370     Register var_size_in_bytes,        // object size in bytes if unknown at compile time; invalid otherwise
2371     int      con_size_in_bytes,        // object size in bytes if   known at compile time
2372     Register t1,                       // temp register
2373     Label&   slow_case                 // continuation point if fast allocation fails
2374   );
2375   void tlab_refill(Label& retry_tlab, Label& try_eden, Label& slow_case);
2376 
2377   // interface method calling
2378   void lookup_interface_method(Register recv_klass,
2379                                Register intf_klass,
2380                                RegisterOrConstant itable_index,
2381                                Register method_result,
2382                                Register temp_reg, Register temp2_reg,
2383                                Label& no_such_interface);
2384 
2385   // Test sub_klass against super_klass, with fast and slow paths.
2386 
2387   // The fast path produces a tri-state answer: yes / no / maybe-slow.
2388   // One of the three labels can be NULL, meaning take the fall-through.
2389   // If super_check_offset is -1, the value is loaded up from super_klass.
2390   // No registers are killed, except temp_reg and temp2_reg.
2391   // If super_check_offset is not -1, temp2_reg is not used and can be noreg.
2392   void check_klass_subtype_fast_path(Register sub_klass,
2393                                      Register super_klass,
2394                                      Register temp_reg,
2395                                      Register temp2_reg,
2396                                      Label* L_success,
2397                                      Label* L_failure,
2398                                      Label* L_slow_path,
2399                 RegisterOrConstant super_check_offset = RegisterOrConstant(-1),
2400                 Register instanceof_hack = noreg);
2401 
2402   // The rest of the type check; must be wired to a corresponding fast path.
2403   // It does not repeat the fast path logic, so don't use it standalone.
2404   // The temp_reg can be noreg, if no temps are available.
2405   // It can also be sub_klass or super_klass, meaning it's OK to kill that one.
2406   // Updates the sub's secondary super cache as necessary.
2407   void check_klass_subtype_slow_path(Register sub_klass,
2408                                      Register super_klass,
2409                                      Register temp_reg,
2410                                      Register temp2_reg,
2411                                      Register temp3_reg,
2412                                      Register temp4_reg,
2413                                      Label* L_success,
2414                                      Label* L_failure);
2415 
2416   // Simplified, combined version, good for typical uses.
2417   // Falls through on failure.
2418   void check_klass_subtype(Register sub_klass,
2419                            Register super_klass,
2420                            Register temp_reg,
2421                            Register temp2_reg,
2422                            Label& L_success);
2423 
2424   // method handles (JSR 292)
2425   void check_method_handle_type(Register mtype_reg, Register mh_reg,
2426                                 Register temp_reg,
2427                                 Label& wrong_method_type);
2428   void load_method_handle_vmslots(Register vmslots_reg, Register mh_reg,
2429                                   Register temp_reg);
2430   void jump_to_method_handle_entry(Register mh_reg, Register temp_reg, bool emit_delayed_nop = true);
2431   // offset relative to Gargs of argument at tos[arg_slot].
2432   // (arg_slot == 0 means the last argument, not the first).
2433   RegisterOrConstant argument_offset(RegisterOrConstant arg_slot,
2434                                      int extra_slot_offset = 0);
2435   // Address of Gargs and argument_offset.
2436   Address            argument_address(RegisterOrConstant arg_slot,
2437                                       int extra_slot_offset = 0);
2438 
2439   // Stack overflow checking
2440 
2441   // Note: this clobbers G3_scratch
2442   void bang_stack_with_offset(int offset) {
2443     // stack grows down, caller passes positive offset
2444     assert(offset > 0, "must bang with negative offset");
2445     set((-offset)+STACK_BIAS, G3_scratch);
2446     st(G0, SP, G3_scratch);
2447   }
2448 
2449   // Writes to stack successive pages until offset reached to check for
2450   // stack overflow + shadow pages.  Clobbers tsp and scratch registers.
2451   void bang_stack_size(Register Rsize, Register Rtsp, Register Rscratch);
2452 
2453   virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr, Register tmp, int offset);
2454 
2455   void verify_tlab();
2456 
2457   Condition negate_condition(Condition cond);
2458 
2459   // Helper functions for statistics gathering.
2460   // Conditionally (non-atomically) increments passed counter address, preserving condition codes.
2461   void cond_inc(Condition cond, address counter_addr, Register Rtemp1, Register Rtemp2);
2462   // Unconditional increment.
2463   void inc_counter(address counter_addr, Register Rtmp1, Register Rtmp2);
2464   void inc_counter(int*    counter_addr, Register Rtmp1, Register Rtmp2);
2465 
2466   // Compare char[] arrays aligned to 4 bytes.
2467   void char_arrays_equals(Register ary1, Register ary2,
2468                           Register limit, Register result,
2469                           Register chr1, Register chr2, Label& Ldone);
2470 
2471 #undef VIRTUAL
2472 
2473 };
2474 
2475 /**
2476  * class SkipIfEqual:
2477  *
2478  * Instantiating this class will result in assembly code being output that will
2479  * jump around any code emitted between the creation of the instance and it's
2480  * automatic destruction at the end of a scope block, depending on the value of
2481  * the flag passed to the constructor, which will be checked at run-time.
2482  */
2483 class SkipIfEqual : public StackObj {
2484  private:
2485   MacroAssembler* _masm;
2486   Label _label;
2487 
2488  public:
2489    // 'temp' is a temp register that this object can use (and trash)
2490    SkipIfEqual(MacroAssembler*, Register temp,
2491                const bool* flag_addr, Assembler::Condition condition);
2492    ~SkipIfEqual();
2493 };
2494 
2495 #ifdef ASSERT
2496 // On RISC, there's no benefit to verifying instruction boundaries.
2497 inline bool AbstractAssembler::pd_check_instruction_mark() { return false; }
2498 #endif
2499 
2500 #endif // CPU_SPARC_VM_ASSEMBLER_SPARC_HPP