1 /*
   2  * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef CPU_SPARC_VM_MACROASSEMBLER_SPARC_HPP
  26 #define CPU_SPARC_VM_MACROASSEMBLER_SPARC_HPP
  27 
  28 #include "asm/assembler.hpp"
  29 #include "utilities/macros.hpp"
  30 
  31 // <sys/trap.h> promises that the system will not use traps 16-31
  32 #define ST_RESERVED_FOR_USER_0 0x10
  33 
  34 class BiasedLockingCounters;
  35 
  36 
  37 // Register aliases for parts of the system:
  38 
  39 // 64 bit values can be kept in g1-g5, o1-o5 and o7 and all 64 bits are safe
  40 // across context switches in V8+ ABI.  Of course, there are no 64 bit regs
  41 // in V8 ABI. All 64 bits are preserved in V9 ABI for all registers.
  42 
  43 // g2-g4 are scratch registers called "application globals".  Their
  44 // meaning is reserved to the "compilation system"--which means us!
  45 // They are are not supposed to be touched by ordinary C code, although
  46 // highly-optimized C code might steal them for temps.  They are safe
  47 // across thread switches, and the ABI requires that they be safe
  48 // across function calls.
  49 //
  50 // g1 and g3 are touched by more modules.  V8 allows g1 to be clobbered
  51 // across func calls, and V8+ also allows g5 to be clobbered across
  52 // func calls.  Also, g1 and g5 can get touched while doing shared
  53 // library loading.
  54 //
  55 // We must not touch g7 (it is the thread-self register) and g6 is
  56 // reserved for certain tools.  g0, of course, is always zero.
  57 //
  58 // (Sources:  SunSoft Compilers Group, thread library engineers.)
  59 
  60 // %%%% The interpreter should be revisited to reduce global scratch regs.
  61 
  62 // This global always holds the current JavaThread pointer:
  63 
  64 REGISTER_DECLARATION(Register, G2_thread , G2);
  65 REGISTER_DECLARATION(Register, G6_heapbase , G6);
  66 
  67 // The following globals are part of the Java calling convention:
  68 
  69 REGISTER_DECLARATION(Register, G5_method             , G5);
  70 REGISTER_DECLARATION(Register, G5_megamorphic_method , G5_method);
  71 REGISTER_DECLARATION(Register, G5_inline_cache_reg   , G5_method);
  72 
  73 // The following globals are used for the new C1 & interpreter calling convention:
  74 REGISTER_DECLARATION(Register, Gargs        , G4); // pointing to the last argument
  75 
  76 // This local is used to preserve G2_thread in the interpreter and in stubs:
  77 REGISTER_DECLARATION(Register, L7_thread_cache , L7);
  78 
  79 // These globals are used as scratch registers in the interpreter:
  80 
  81 REGISTER_DECLARATION(Register, Gframe_size   , G1); // SAME REG as G1_scratch
  82 REGISTER_DECLARATION(Register, G1_scratch    , G1); // also SAME
  83 REGISTER_DECLARATION(Register, G3_scratch    , G3);
  84 REGISTER_DECLARATION(Register, G4_scratch    , G4);
  85 
  86 // These globals are used as short-lived scratch registers in the compiler:
  87 
  88 REGISTER_DECLARATION(Register, Gtemp  , G5);
  89 
  90 // JSR 292 fixed register usages:
  91 REGISTER_DECLARATION(Register, G5_method_type        , G5);
  92 REGISTER_DECLARATION(Register, G3_method_handle      , G3);
  93 REGISTER_DECLARATION(Register, L7_mh_SP_save         , L7);
  94 
  95 // The compiler requires that G5_megamorphic_method is G5_inline_cache_klass,
  96 // because a single patchable "set" instruction (NativeMovConstReg,
  97 // or NativeMovConstPatching for compiler1) instruction
  98 // serves to set up either quantity, depending on whether the compiled
  99 // call site is an inline cache or is megamorphic.  See the function
 100 // CompiledIC::set_to_megamorphic.
 101 //
 102 // If a inline cache targets an interpreted method, then the
 103 // G5 register will be used twice during the call.  First,
 104 // the call site will be patched to load a compiledICHolder
 105 // into G5. (This is an ordered pair of ic_klass, method.)
 106 // The c2i adapter will first check the ic_klass, then load
 107 // G5_method with the method part of the pair just before
 108 // jumping into the interpreter.
 109 //
 110 // Note that G5_method is only the method-self for the interpreter,
 111 // and is logically unrelated to G5_megamorphic_method.
 112 //
 113 // Invariants on G2_thread (the JavaThread pointer):
 114 //  - it should not be used for any other purpose anywhere
 115 //  - it must be re-initialized by StubRoutines::call_stub()
 116 //  - it must be preserved around every use of call_VM
 117 
 118 // We can consider using g2/g3/g4 to cache more values than the
 119 // JavaThread, such as the card-marking base or perhaps pointers into
 120 // Eden.  It's something of a waste to use them as scratch temporaries,
 121 // since they are not supposed to be volatile.  (Of course, if we find
 122 // that Java doesn't benefit from application globals, then we can just
 123 // use them as ordinary temporaries.)
 124 //
 125 // Since g1 and g5 (and/or g6) are the volatile (caller-save) registers,
 126 // it makes sense to use them routinely for procedure linkage,
 127 // whenever the On registers are not applicable.  Examples:  G5_method,
 128 // G5_inline_cache_klass, and a double handful of miscellaneous compiler
 129 // stubs.  This means that compiler stubs, etc., should be kept to a
 130 // maximum of two or three G-register arguments.
 131 
 132 
 133 // stub frames
 134 
 135 REGISTER_DECLARATION(Register, Lentry_args      , L0); // pointer to args passed to callee (interpreter) not stub itself
 136 
 137 // Interpreter frames
 138 
 139 #ifdef CC_INTERP
 140 REGISTER_DECLARATION(Register, Lstate           , L0); // interpreter state object pointer
 141 REGISTER_DECLARATION(Register, L1_scratch       , L1); // scratch
 142 REGISTER_DECLARATION(Register, Lmirror          , L1); // mirror (for native methods only)
 143 REGISTER_DECLARATION(Register, L2_scratch       , L2);
 144 REGISTER_DECLARATION(Register, L3_scratch       , L3);
 145 REGISTER_DECLARATION(Register, L4_scratch       , L4);
 146 REGISTER_DECLARATION(Register, Lscratch         , L5); // C1 uses
 147 REGISTER_DECLARATION(Register, Lscratch2        , L6); // C1 uses
 148 REGISTER_DECLARATION(Register, L7_scratch       , L7); // constant pool cache
 149 REGISTER_DECLARATION(Register, O5_savedSP       , O5);
 150 REGISTER_DECLARATION(Register, I5_savedSP       , I5); // Saved SP before bumping for locals.  This is simply
 151                                                        // a copy SP, so in 64-bit it's a biased value.  The bias
 152                                                        // is added and removed as needed in the frame code.
 153 // Interface to signature handler
 154 REGISTER_DECLARATION(Register, Llocals          , L7); // pointer to locals for signature handler
 155 REGISTER_DECLARATION(Register, Lmethod          , L6); // Method* when calling signature handler
 156 
 157 #else
 158 REGISTER_DECLARATION(Register, Lesp             , L0); // expression stack pointer
 159 REGISTER_DECLARATION(Register, Lbcp             , L1); // pointer to next bytecode
 160 REGISTER_DECLARATION(Register, Lmethod          , L2);
 161 REGISTER_DECLARATION(Register, Llocals          , L3);
 162 REGISTER_DECLARATION(Register, Largs            , L3); // pointer to locals for signature handler
 163                                                        // must match Llocals in asm interpreter
 164 REGISTER_DECLARATION(Register, Lmonitors        , L4);
 165 REGISTER_DECLARATION(Register, Lbyte_code       , L5);
 166 // When calling out from the interpreter we record SP so that we can remove any extra stack
 167 // space allocated during adapter transitions. This register is only live from the point
 168 // of the call until we return.
 169 REGISTER_DECLARATION(Register, Llast_SP         , L5);
 170 REGISTER_DECLARATION(Register, Lscratch         , L5);
 171 REGISTER_DECLARATION(Register, Lscratch2        , L6);
 172 REGISTER_DECLARATION(Register, LcpoolCache      , L6); // constant pool cache
 173 
 174 REGISTER_DECLARATION(Register, O5_savedSP       , O5);
 175 REGISTER_DECLARATION(Register, I5_savedSP       , I5); // Saved SP before bumping for locals.  This is simply
 176                                                        // a copy SP, so in 64-bit it's a biased value.  The bias
 177                                                        // is added and removed as needed in the frame code.
 178 REGISTER_DECLARATION(Register, IdispatchTables  , I4); // Base address of the bytecode dispatch tables
 179 REGISTER_DECLARATION(Register, IdispatchAddress , I3); // Register which saves the dispatch address for each bytecode
 180 REGISTER_DECLARATION(Register, ImethodDataPtr   , I2); // Pointer to the current method data
 181 #endif /* CC_INTERP */
 182 
 183 // NOTE: Lscratch2 and LcpoolCache point to the same registers in
 184 //       the interpreter code. If Lscratch2 needs to be used for some
 185 //       purpose than LcpoolCache should be restore after that for
 186 //       the interpreter to work right
 187 // (These assignments must be compatible with L7_thread_cache; see above.)
 188 
 189 // Since Lbcp points into the middle of the method object,
 190 // it is temporarily converted into a "bcx" during GC.
 191 
 192 // Exception processing
 193 // These registers are passed into exception handlers.
 194 // All exception handlers require the exception object being thrown.
 195 // In addition, an nmethod's exception handler must be passed
 196 // the address of the call site within the nmethod, to allow
 197 // proper selection of the applicable catch block.
 198 // (Interpreter frames use their own bcp() for this purpose.)
 199 //
 200 // The Oissuing_pc value is not always needed.  When jumping to a
 201 // handler that is known to be interpreted, the Oissuing_pc value can be
 202 // omitted.  An actual catch block in compiled code receives (from its
 203 // nmethod's exception handler) the thrown exception in the Oexception,
 204 // but it doesn't need the Oissuing_pc.
 205 //
 206 // If an exception handler (either interpreted or compiled)
 207 // discovers there is no applicable catch block, it updates
 208 // the Oissuing_pc to the continuation PC of its own caller,
 209 // pops back to that caller's stack frame, and executes that
 210 // caller's exception handler.  Obviously, this process will
 211 // iterate until the control stack is popped back to a method
 212 // containing an applicable catch block.  A key invariant is
 213 // that the Oissuing_pc value is always a value local to
 214 // the method whose exception handler is currently executing.
 215 //
 216 // Note:  The issuing PC value is __not__ a raw return address (I7 value).
 217 // It is a "return pc", the address __following__ the call.
 218 // Raw return addresses are converted to issuing PCs by frame::pc(),
 219 // or by stubs.  Issuing PCs can be used directly with PC range tables.
 220 //
 221 REGISTER_DECLARATION(Register, Oexception  , O0); // exception being thrown
 222 REGISTER_DECLARATION(Register, Oissuing_pc , O1); // where the exception is coming from
 223 
 224 
 225 // These must occur after the declarations above
 226 #ifndef DONT_USE_REGISTER_DEFINES
 227 
 228 #define Gthread             AS_REGISTER(Register, Gthread)
 229 #define Gmethod             AS_REGISTER(Register, Gmethod)
 230 #define Gmegamorphic_method AS_REGISTER(Register, Gmegamorphic_method)
 231 #define Ginline_cache_reg   AS_REGISTER(Register, Ginline_cache_reg)
 232 #define Gargs               AS_REGISTER(Register, Gargs)
 233 #define Lthread_cache       AS_REGISTER(Register, Lthread_cache)
 234 #define Gframe_size         AS_REGISTER(Register, Gframe_size)
 235 #define Gtemp               AS_REGISTER(Register, Gtemp)
 236 
 237 #ifdef CC_INTERP
 238 #define Lstate              AS_REGISTER(Register, Lstate)
 239 #define Lesp                AS_REGISTER(Register, Lesp)
 240 #define L1_scratch          AS_REGISTER(Register, L1_scratch)
 241 #define Lmirror             AS_REGISTER(Register, Lmirror)
 242 #define L2_scratch          AS_REGISTER(Register, L2_scratch)
 243 #define L3_scratch          AS_REGISTER(Register, L3_scratch)
 244 #define L4_scratch          AS_REGISTER(Register, L4_scratch)
 245 #define Lscratch            AS_REGISTER(Register, Lscratch)
 246 #define Lscratch2           AS_REGISTER(Register, Lscratch2)
 247 #define L7_scratch          AS_REGISTER(Register, L7_scratch)
 248 #define Ostate              AS_REGISTER(Register, Ostate)
 249 #else
 250 #define Lesp                AS_REGISTER(Register, Lesp)
 251 #define Lbcp                AS_REGISTER(Register, Lbcp)
 252 #define Lmethod             AS_REGISTER(Register, Lmethod)
 253 #define Llocals             AS_REGISTER(Register, Llocals)
 254 #define Lmonitors           AS_REGISTER(Register, Lmonitors)
 255 #define Lbyte_code          AS_REGISTER(Register, Lbyte_code)
 256 #define Lscratch            AS_REGISTER(Register, Lscratch)
 257 #define Lscratch2           AS_REGISTER(Register, Lscratch2)
 258 #define LcpoolCache         AS_REGISTER(Register, LcpoolCache)
 259 #endif /* ! CC_INTERP */
 260 
 261 #define Lentry_args         AS_REGISTER(Register, Lentry_args)
 262 #define I5_savedSP          AS_REGISTER(Register, I5_savedSP)
 263 #define O5_savedSP          AS_REGISTER(Register, O5_savedSP)
 264 #define IdispatchAddress    AS_REGISTER(Register, IdispatchAddress)
 265 #define ImethodDataPtr      AS_REGISTER(Register, ImethodDataPtr)
 266 #define IdispatchTables     AS_REGISTER(Register, IdispatchTables)
 267 
 268 #define Oexception          AS_REGISTER(Register, Oexception)
 269 #define Oissuing_pc         AS_REGISTER(Register, Oissuing_pc)
 270 
 271 #endif
 272 
 273 
 274 // Address is an abstraction used to represent a memory location.
 275 //
 276 // Note: A register location is represented via a Register, not
 277 //       via an address for efficiency & simplicity reasons.
 278 
 279 class Address VALUE_OBJ_CLASS_SPEC {
 280  private:
 281   Register           _base;           // Base register.
 282   RegisterOrConstant _index_or_disp;  // Index register or constant displacement.
 283   RelocationHolder   _rspec;
 284 
 285  public:
 286   Address() : _base(noreg), _index_or_disp(noreg) {}
 287 
 288   Address(Register base, RegisterOrConstant index_or_disp)
 289     : _base(base),
 290       _index_or_disp(index_or_disp) {
 291   }
 292 
 293   Address(Register base, Register index)
 294     : _base(base),
 295       _index_or_disp(index) {
 296   }
 297 
 298   Address(Register base, int disp)
 299     : _base(base),
 300       _index_or_disp(disp) {
 301   }
 302 
 303 #ifdef ASSERT
 304   // ByteSize is only a class when ASSERT is defined, otherwise it's an int.
 305   Address(Register base, ByteSize disp)
 306     : _base(base),
 307       _index_or_disp(in_bytes(disp)) {
 308   }
 309 #endif
 310 
 311   // accessors
 312   Register base()             const { return _base; }
 313   Register index()            const { return _index_or_disp.as_register(); }
 314   int      disp()             const { return _index_or_disp.as_constant(); }
 315 
 316   bool     has_index()        const { return _index_or_disp.is_register(); }
 317   bool     has_disp()         const { return _index_or_disp.is_constant(); }
 318 
 319   bool     uses(Register reg) const { return base() == reg || (has_index() && index() == reg); }
 320 
 321   const relocInfo::relocType rtype() { return _rspec.type(); }
 322   const RelocationHolder&    rspec() { return _rspec; }
 323 
 324   RelocationHolder rspec(int offset) const {
 325     return offset == 0 ? _rspec : _rspec.plus(offset);
 326   }
 327 
 328   inline bool is_simm13(int offset = 0);  // check disp+offset for overflow
 329 
 330   Address plus_disp(int plusdisp) const {     // bump disp by a small amount
 331     assert(_index_or_disp.is_constant(), "must have a displacement");
 332     Address a(base(), disp() + plusdisp);
 333     return a;
 334   }
 335   bool is_same_address(Address a) const {
 336     // disregard _rspec
 337     return base() == a.base() && (has_index() ? index() == a.index() : disp() == a.disp());
 338   }
 339 
 340   Address after_save() const {
 341     Address a = (*this);
 342     a._base = a._base->after_save();
 343     return a;
 344   }
 345 
 346   Address after_restore() const {
 347     Address a = (*this);
 348     a._base = a._base->after_restore();
 349     return a;
 350   }
 351 
 352   // Convert the raw encoding form into the form expected by the
 353   // constructor for Address.
 354   static Address make_raw(int base, int index, int scale, int disp, relocInfo::relocType disp_reloc);
 355 
 356   friend class Assembler;
 357 };
 358 
 359 
 360 class AddressLiteral VALUE_OBJ_CLASS_SPEC {
 361  private:
 362   address          _address;
 363   RelocationHolder _rspec;
 364 
 365   RelocationHolder rspec_from_rtype(relocInfo::relocType rtype, address addr) {
 366     switch (rtype) {
 367     case relocInfo::external_word_type:
 368       return external_word_Relocation::spec(addr);
 369     case relocInfo::internal_word_type:
 370       return internal_word_Relocation::spec(addr);
 371 #ifdef _LP64
 372     case relocInfo::opt_virtual_call_type:
 373       return opt_virtual_call_Relocation::spec();
 374     case relocInfo::static_call_type:
 375       return static_call_Relocation::spec();
 376     case relocInfo::runtime_call_type:
 377       return runtime_call_Relocation::spec();
 378 #endif
 379     case relocInfo::none:
 380       return RelocationHolder();
 381     default:
 382       ShouldNotReachHere();
 383       return RelocationHolder();
 384     }
 385   }
 386 
 387  protected:
 388   // creation
 389   AddressLiteral() : _address(NULL), _rspec(NULL) {}
 390 
 391  public:
 392   AddressLiteral(address addr, RelocationHolder const& rspec)
 393     : _address(addr),
 394       _rspec(rspec) {}
 395 
 396   // Some constructors to avoid casting at the call site.
 397   AddressLiteral(jobject obj, RelocationHolder const& rspec)
 398     : _address((address) obj),
 399       _rspec(rspec) {}
 400 
 401   AddressLiteral(intptr_t value, RelocationHolder const& rspec)
 402     : _address((address) value),
 403       _rspec(rspec) {}
 404 
 405   AddressLiteral(address addr, relocInfo::relocType rtype = relocInfo::none)
 406     : _address((address) addr),
 407     _rspec(rspec_from_rtype(rtype, (address) addr)) {}
 408 
 409   // Some constructors to avoid casting at the call site.
 410   AddressLiteral(address* addr, relocInfo::relocType rtype = relocInfo::none)
 411     : _address((address) addr),
 412     _rspec(rspec_from_rtype(rtype, (address) addr)) {}
 413 
 414   AddressLiteral(bool* addr, relocInfo::relocType rtype = relocInfo::none)
 415     : _address((address) addr),
 416       _rspec(rspec_from_rtype(rtype, (address) addr)) {}
 417 
 418   AddressLiteral(const bool* addr, relocInfo::relocType rtype = relocInfo::none)
 419     : _address((address) addr),
 420       _rspec(rspec_from_rtype(rtype, (address) addr)) {}
 421 
 422   AddressLiteral(signed char* addr, relocInfo::relocType rtype = relocInfo::none)
 423     : _address((address) addr),
 424       _rspec(rspec_from_rtype(rtype, (address) addr)) {}
 425 
 426   AddressLiteral(int* addr, relocInfo::relocType rtype = relocInfo::none)
 427     : _address((address) addr),
 428       _rspec(rspec_from_rtype(rtype, (address) addr)) {}
 429 
 430   AddressLiteral(intptr_t addr, relocInfo::relocType rtype = relocInfo::none)
 431     : _address((address) addr),
 432       _rspec(rspec_from_rtype(rtype, (address) addr)) {}
 433 
 434 #ifdef _LP64
 435   // 32-bit complains about a multiple declaration for int*.
 436   AddressLiteral(intptr_t* addr, relocInfo::relocType rtype = relocInfo::none)
 437     : _address((address) addr),
 438       _rspec(rspec_from_rtype(rtype, (address) addr)) {}
 439 #endif
 440 
 441   AddressLiteral(Metadata* addr, relocInfo::relocType rtype = relocInfo::none)
 442     : _address((address) addr),
 443       _rspec(rspec_from_rtype(rtype, (address) addr)) {}
 444 
 445   AddressLiteral(Metadata** addr, relocInfo::relocType rtype = relocInfo::none)
 446     : _address((address) addr),
 447       _rspec(rspec_from_rtype(rtype, (address) addr)) {}
 448 
 449   AddressLiteral(float* addr, relocInfo::relocType rtype = relocInfo::none)
 450     : _address((address) addr),
 451       _rspec(rspec_from_rtype(rtype, (address) addr)) {}
 452 
 453   AddressLiteral(double* addr, relocInfo::relocType rtype = relocInfo::none)
 454     : _address((address) addr),
 455       _rspec(rspec_from_rtype(rtype, (address) addr)) {}
 456 
 457   intptr_t value() const { return (intptr_t) _address; }
 458   int      low10() const;
 459 
 460   const relocInfo::relocType rtype() const { return _rspec.type(); }
 461   const RelocationHolder&    rspec() const { return _rspec; }
 462 
 463   RelocationHolder rspec(int offset) const {
 464     return offset == 0 ? _rspec : _rspec.plus(offset);
 465   }
 466 };
 467 
 468 // Convenience classes
 469 class ExternalAddress: public AddressLiteral {
 470  private:
 471   static relocInfo::relocType reloc_for_target(address target) {
 472     // Sometimes ExternalAddress is used for values which aren't
 473     // exactly addresses, like the card table base.
 474     // external_word_type can't be used for values in the first page
 475     // so just skip the reloc in that case.
 476     return external_word_Relocation::can_be_relocated(target) ? relocInfo::external_word_type : relocInfo::none;
 477   }
 478 
 479  public:
 480   ExternalAddress(address target) : AddressLiteral(target, reloc_for_target(          target)) {}
 481   ExternalAddress(Metadata** target) : AddressLiteral(target, reloc_for_target((address) target)) {}
 482 };
 483 
 484 inline Address RegisterImpl::address_in_saved_window() const {
 485    return (Address(SP, (sp_offset_in_saved_window() * wordSize) + STACK_BIAS));
 486 }
 487 
 488 
 489 
 490 // Argument is an abstraction used to represent an outgoing
 491 // actual argument or an incoming formal parameter, whether
 492 // it resides in memory or in a register, in a manner consistent
 493 // with the SPARC Application Binary Interface, or ABI.  This is
 494 // often referred to as the native or C calling convention.
 495 
 496 class Argument VALUE_OBJ_CLASS_SPEC {
 497  private:
 498   int _number;
 499   bool _is_in;
 500 
 501  public:
 502 #ifdef _LP64
 503   enum {
 504     n_register_parameters = 6,          // only 6 registers may contain integer parameters
 505     n_float_register_parameters = 16    // Can have up to 16 floating registers
 506   };
 507 #else
 508   enum {
 509     n_register_parameters = 6           // only 6 registers may contain integer parameters
 510   };
 511 #endif
 512 
 513   // creation
 514   Argument(int number, bool is_in) : _number(number), _is_in(is_in) {}
 515 
 516   int  number() const  { return _number;  }
 517   bool is_in()  const  { return _is_in;   }
 518   bool is_out() const  { return !is_in(); }
 519 
 520   Argument successor() const  { return Argument(number() + 1, is_in()); }
 521   Argument as_in()     const  { return Argument(number(), true ); }
 522   Argument as_out()    const  { return Argument(number(), false); }
 523 
 524   // locating register-based arguments:
 525   bool is_register() const { return _number < n_register_parameters; }
 526 
 527 #ifdef _LP64
 528   // locating Floating Point register-based arguments:
 529   bool is_float_register() const { return _number < n_float_register_parameters; }
 530 
 531   FloatRegister as_float_register() const {
 532     assert(is_float_register(), "must be a register argument");
 533     return as_FloatRegister(( number() *2 ) + 1);
 534   }
 535   FloatRegister as_double_register() const {
 536     assert(is_float_register(), "must be a register argument");
 537     return as_FloatRegister(( number() *2 ));
 538   }
 539 #endif
 540 
 541   Register as_register() const {
 542     assert(is_register(), "must be a register argument");
 543     return is_in() ? as_iRegister(number()) : as_oRegister(number());
 544   }
 545 
 546   // locating memory-based arguments
 547   Address as_address() const {
 548     assert(!is_register(), "must be a memory argument");
 549     return address_in_frame();
 550   }
 551 
 552   // When applied to a register-based argument, give the corresponding address
 553   // into the 6-word area "into which callee may store register arguments"
 554   // (This is a different place than the corresponding register-save area location.)
 555   Address address_in_frame() const;
 556 
 557   // debugging
 558   const char* name() const;
 559 
 560   friend class Assembler;
 561 };
 562 
 563 
 564 class RegistersForDebugging : public StackObj {
 565  public:
 566   intptr_t i[8], l[8], o[8], g[8];
 567   float    f[32];
 568   double   d[32];
 569 
 570   void print(outputStream* s);
 571 
 572   static int i_offset(int j) { return offset_of(RegistersForDebugging, i[j]); }
 573   static int l_offset(int j) { return offset_of(RegistersForDebugging, l[j]); }
 574   static int o_offset(int j) { return offset_of(RegistersForDebugging, o[j]); }
 575   static int g_offset(int j) { return offset_of(RegistersForDebugging, g[j]); }
 576   static int f_offset(int j) { return offset_of(RegistersForDebugging, f[j]); }
 577   static int d_offset(int j) { return offset_of(RegistersForDebugging, d[j / 2]); }
 578 
 579   // gen asm code to save regs
 580   static void save_registers(MacroAssembler* a);
 581 
 582   // restore global registers in case C code disturbed them
 583   static void restore_registers(MacroAssembler* a, Register r);
 584 };
 585 
 586 
 587 // MacroAssembler extends Assembler by a few frequently used macros.
 588 //
 589 // Most of the standard SPARC synthetic ops are defined here.
 590 // Instructions for which a 'better' code sequence exists depending
 591 // on arguments should also go in here.
 592 
 593 #define JMP2(r1, r2) jmp(r1, r2, __FILE__, __LINE__)
 594 #define JMP(r1, off) jmp(r1, off, __FILE__, __LINE__)
 595 #define JUMP(a, temp, off)     jump(a, temp, off, __FILE__, __LINE__)
 596 #define JUMPL(a, temp, d, off) jumpl(a, temp, d, off, __FILE__, __LINE__)
 597 
 598 
 599 class MacroAssembler : public Assembler {
 600   // code patchers need various routines like inv_wdisp()
 601   friend class NativeInstruction;
 602   friend class NativeGeneralJump;
 603   friend class Relocation;
 604   friend class Label;
 605 
 606  protected:
 607   static int  patched_branch(int dest_pos, int inst, int inst_pos);
 608   static int  branch_destination(int inst, int pos);
 609 
 610   // Support for VM calls
 611   // This is the base routine called by the different versions of call_VM_leaf. The interpreter
 612   // may customize this version by overriding it for its purposes (e.g., to save/restore
 613   // additional registers when doing a VM call).
 614 #ifdef CC_INTERP
 615   #define VIRTUAL
 616 #else
 617   #define VIRTUAL virtual
 618 #endif
 619 
 620   VIRTUAL void call_VM_leaf_base(Register thread_cache, address entry_point, int number_of_arguments);
 621 
 622   //
 623   // It is imperative that all calls into the VM are handled via the call_VM macros.
 624   // They make sure that the stack linkage is setup correctly. call_VM's correspond
 625   // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points.
 626   //
 627   // This is the base routine called by the different versions of call_VM. The interpreter
 628   // may customize this version by overriding it for its purposes (e.g., to save/restore
 629   // additional registers when doing a VM call).
 630   //
 631   // A non-volatile java_thread_cache register should be specified so
 632   // that the G2_thread value can be preserved across the call.
 633   // (If java_thread_cache is noreg, then a slow get_thread call
 634   // will re-initialize the G2_thread.) call_VM_base returns the register that contains the
 635   // thread.
 636   //
 637   // If no last_java_sp is specified (noreg) than SP will be used instead.
 638 
 639   virtual void call_VM_base(
 640     Register        oop_result,             // where an oop-result ends up if any; use noreg otherwise
 641     Register        java_thread_cache,      // the thread if computed before     ; use noreg otherwise
 642     Register        last_java_sp,           // to set up last_Java_frame in stubs; use noreg otherwise
 643     address         entry_point,            // the entry point
 644     int             number_of_arguments,    // the number of arguments (w/o thread) to pop after call
 645     bool            check_exception=true    // flag which indicates if exception should be checked
 646   );
 647 
 648   // This routine should emit JVMTI PopFrame and ForceEarlyReturn handling code.
 649   // The implementation is only non-empty for the InterpreterMacroAssembler,
 650   // as only the interpreter handles and ForceEarlyReturn PopFrame requests.
 651   virtual void check_and_handle_popframe(Register scratch_reg);
 652   virtual void check_and_handle_earlyret(Register scratch_reg);
 653 
 654  public:
 655   MacroAssembler(CodeBuffer* code) : Assembler(code) {}
 656 
 657   // Support for NULL-checks
 658   //
 659   // Generates code that causes a NULL OS exception if the content of reg is NULL.
 660   // If the accessed location is M[reg + offset] and the offset is known, provide the
 661   // offset.  No explicit code generation is needed if the offset is within a certain
 662   // range (0 <= offset <= page_size).
 663   //
 664   // %%%%%% Currently not done for SPARC
 665 
 666   void null_check(Register reg, int offset = -1);
 667   static bool needs_explicit_null_check(intptr_t offset);
 668 
 669   // support for delayed instructions
 670   MacroAssembler* delayed() { Assembler::delayed();  return this; }
 671 
 672   // branches that use right instruction for v8 vs. v9
 673   inline void br( Condition c, bool a, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
 674   inline void br( Condition c, bool a, Predict p, Label& L );
 675 
 676   inline void fb( Condition c, bool a, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
 677   inline void fb( Condition c, bool a, Predict p, Label& L );
 678 
 679   // compares register with zero (32 bit) and branches (V9 and V8 instructions)
 680   void cmp_zero_and_br( Condition c, Register s1, Label& L, bool a = false, Predict p = pn );
 681   // Compares a pointer register with zero and branches on (not)null.
 682   // Does a test & branch on 32-bit systems and a register-branch on 64-bit.
 683   void br_null   ( Register s1, bool a, Predict p, Label& L );
 684   void br_notnull( Register s1, bool a, Predict p, Label& L );
 685 
 686   //
 687   // Compare registers and branch with nop in delay slot or cbcond without delay slot.
 688   //
 689   // ATTENTION: use these instructions with caution because cbcond instruction
 690   //            has very short distance: 512 instructions (2Kbyte).
 691 
 692   // Compare integer (32 bit) values (icc only).
 693   void cmp_and_br_short(Register s1, Register s2, Condition c, Predict p, Label& L);
 694   void cmp_and_br_short(Register s1, int simm13a, Condition c, Predict p, Label& L);
 695   // Platform depending version for pointer compare (icc on !LP64 and xcc on LP64).
 696   void cmp_and_brx_short(Register s1, Register s2, Condition c, Predict p, Label& L);
 697   void cmp_and_brx_short(Register s1, int simm13a, Condition c, Predict p, Label& L);
 698 
 699   // Short branch version for compares a pointer pwith zero.
 700   void br_null_short   ( Register s1, Predict p, Label& L );
 701   void br_notnull_short( Register s1, Predict p, Label& L );
 702 
 703   // unconditional short branch
 704   void ba_short(Label& L);
 705 
 706   inline void bp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
 707   inline void bp( Condition c, bool a, CC cc, Predict p, Label& L );
 708 
 709   // Branch that tests xcc in LP64 and icc in !LP64
 710   inline void brx( Condition c, bool a, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
 711   inline void brx( Condition c, bool a, Predict p, Label& L );
 712 
 713   // unconditional branch
 714   inline void ba( Label& L );
 715 
 716   // Branch that tests fp condition codes
 717   inline void fbp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
 718   inline void fbp( Condition c, bool a, CC cc, Predict p, Label& L );
 719 
 720   // get PC the best way
 721   inline int get_pc( Register d );
 722 
 723   // Sparc shorthands(pp 85, V8 manual, pp 289 V9 manual)
 724   inline void cmp(  Register s1, Register s2 ) { subcc( s1, s2, G0 ); }
 725   inline void cmp(  Register s1, int simm13a ) { subcc( s1, simm13a, G0 ); }
 726 
 727   inline void jmp( Register s1, Register s2 );
 728   inline void jmp( Register s1, int simm13a, RelocationHolder const& rspec = RelocationHolder() );
 729 
 730   // Check if the call target is out of wdisp30 range (relative to the code cache)
 731   static inline bool is_far_target(address d);
 732   inline void call( address d,  relocInfo::relocType rt = relocInfo::runtime_call_type );
 733   inline void call( Label& L,   relocInfo::relocType rt = relocInfo::runtime_call_type );
 734   inline void callr( Register s1, Register s2 );
 735   inline void callr( Register s1, int simm13a, RelocationHolder const& rspec = RelocationHolder() );
 736 
 737   // Emits nothing on V8
 738   inline void iprefetch( address d, relocInfo::relocType rt = relocInfo::none );
 739   inline void iprefetch( Label& L);
 740 
 741   inline void tst( Register s ) { orcc( G0, s, G0 ); }
 742 
 743 #ifdef PRODUCT
 744   inline void ret(  bool trace = TraceJumps )   { if (trace) {
 745                                                     mov(I7, O7); // traceable register
 746                                                     JMP(O7, 2 * BytesPerInstWord);
 747                                                   } else {
 748                                                     jmpl( I7, 2 * BytesPerInstWord, G0 );
 749                                                   }
 750                                                 }
 751 
 752   inline void retl( bool trace = TraceJumps )  { if (trace) JMP(O7, 2 * BytesPerInstWord);
 753                                                  else jmpl( O7, 2 * BytesPerInstWord, G0 ); }
 754 #else
 755   void ret(  bool trace = TraceJumps );
 756   void retl( bool trace = TraceJumps );
 757 #endif /* PRODUCT */
 758 
 759   // Required platform-specific helpers for Label::patch_instructions.
 760   // They _shadow_ the declarations in AbstractAssembler, which are undefined.
 761   void pd_patch_instruction(address branch, address target);
 762 
 763   // sethi Macro handles optimizations and relocations
 764 private:
 765   void internal_sethi(const AddressLiteral& addrlit, Register d, bool ForceRelocatable);
 766 public:
 767   void sethi(const AddressLiteral& addrlit, Register d);
 768   void patchable_sethi(const AddressLiteral& addrlit, Register d);
 769 
 770   // compute the number of instructions for a sethi/set
 771   static int  insts_for_sethi( address a, bool worst_case = false );
 772   static int  worst_case_insts_for_set();
 773 
 774   // set may be either setsw or setuw (high 32 bits may be zero or sign)
 775 private:
 776   void internal_set(const AddressLiteral& al, Register d, bool ForceRelocatable);
 777   static int insts_for_internal_set(intptr_t value);
 778 public:
 779   void set(const AddressLiteral& addrlit, Register d);
 780   void set(intptr_t value, Register d);
 781   void set(address addr, Register d, RelocationHolder const& rspec);
 782   static int insts_for_set(intptr_t value) { return insts_for_internal_set(value); }
 783 
 784   void patchable_set(const AddressLiteral& addrlit, Register d);
 785   void patchable_set(intptr_t value, Register d);
 786   void set64(jlong value, Register d, Register tmp);
 787   static int insts_for_set64(jlong value);
 788 
 789   // sign-extend 32 to 64
 790   inline void signx( Register s, Register d ) { sra( s, G0, d); }
 791   inline void signx( Register d )             { sra( d, G0, d); }
 792 
 793   inline void not1( Register s, Register d ) { xnor( s, G0, d ); }
 794   inline void not1( Register d )             { xnor( d, G0, d ); }
 795 
 796   inline void neg( Register s, Register d ) { sub( G0, s, d ); }
 797   inline void neg( Register d )             { sub( G0, d, d ); }
 798 
 799   inline void cas(  Register s1, Register s2, Register d) { casa( s1, s2, d, ASI_PRIMARY); }
 800   inline void casx( Register s1, Register s2, Register d) { casxa(s1, s2, d, ASI_PRIMARY); }
 801   // Functions for isolating 64 bit atomic swaps for LP64
 802   // cas_ptr will perform cas for 32 bit VM's and casx for 64 bit VM's
 803   inline void cas_ptr(  Register s1, Register s2, Register d) {
 804 #ifdef _LP64
 805     casx( s1, s2, d );
 806 #else
 807     cas( s1, s2, d );
 808 #endif
 809   }
 810 
 811   // Functions for isolating 64 bit shifts for LP64
 812   inline void sll_ptr( Register s1, Register s2, Register d );
 813   inline void sll_ptr( Register s1, int imm6a,   Register d );
 814   inline void sll_ptr( Register s1, RegisterOrConstant s2, Register d );
 815   inline void srl_ptr( Register s1, Register s2, Register d );
 816   inline void srl_ptr( Register s1, int imm6a,   Register d );
 817 
 818   // little-endian
 819   inline void casl(  Register s1, Register s2, Register d) { casa( s1, s2, d, ASI_PRIMARY_LITTLE); }
 820   inline void casxl( Register s1, Register s2, Register d) { casxa(s1, s2, d, ASI_PRIMARY_LITTLE); }
 821 
 822   inline void inc(   Register d,  int const13 = 1 ) { add(   d, const13, d); }
 823   inline void inccc( Register d,  int const13 = 1 ) { addcc( d, const13, d); }
 824 
 825   inline void dec(   Register d,  int const13 = 1 ) { sub(   d, const13, d); }
 826   inline void deccc( Register d,  int const13 = 1 ) { subcc( d, const13, d); }
 827 
 828   using Assembler::add;
 829   inline void add(Register s1, int simm13a, Register d, relocInfo::relocType rtype);
 830   inline void add(Register s1, int simm13a, Register d, RelocationHolder const& rspec);
 831   inline void add(Register s1, RegisterOrConstant s2, Register d, int offset = 0);
 832   inline void add(const Address& a, Register d, int offset = 0);
 833 
 834   using Assembler::andn;
 835   inline void andn(  Register s1, RegisterOrConstant s2, Register d);
 836 
 837   inline void btst( Register s1,  Register s2 ) { andcc( s1, s2, G0 ); }
 838   inline void btst( int simm13a,  Register s )  { andcc( s,  simm13a, G0 ); }
 839 
 840   inline void bset( Register s1,  Register s2 ) { or3( s1, s2, s2 ); }
 841   inline void bset( int simm13a,  Register s )  { or3( s,  simm13a, s ); }
 842 
 843   inline void bclr( Register s1,  Register s2 ) { andn( s1, s2, s2 ); }
 844   inline void bclr( int simm13a,  Register s )  { andn( s,  simm13a, s ); }
 845 
 846   inline void btog( Register s1,  Register s2 ) { xor3( s1, s2, s2 ); }
 847   inline void btog( int simm13a,  Register s )  { xor3( s,  simm13a, s ); }
 848 
 849   inline void clr( Register d ) { or3( G0, G0, d ); }
 850 
 851   inline void clrb( Register s1, Register s2);
 852   inline void clrh( Register s1, Register s2);
 853   inline void clr(  Register s1, Register s2);
 854   inline void clrx( Register s1, Register s2);
 855 
 856   inline void clrb( Register s1, int simm13a);
 857   inline void clrh( Register s1, int simm13a);
 858   inline void clr(  Register s1, int simm13a);
 859   inline void clrx( Register s1, int simm13a);
 860 
 861   // copy & clear upper word
 862   inline void clruw( Register s, Register d ) { srl( s, G0, d); }
 863   // clear upper word
 864   inline void clruwu( Register d ) { srl( d, G0, d); }
 865 
 866   using Assembler::ldsb;
 867   using Assembler::ldsh;
 868   using Assembler::ldsw;
 869   using Assembler::ldub;
 870   using Assembler::lduh;
 871   using Assembler::lduw;
 872   using Assembler::ldx;
 873   using Assembler::ldd;
 874 
 875 #ifdef ASSERT
 876   // ByteSize is only a class when ASSERT is defined, otherwise it's an int.
 877   inline void ld(Register s1, ByteSize simm13a, Register d);
 878 #endif
 879 
 880   inline void ld(Register s1, Register s2, Register d);
 881   inline void ld(Register s1, int simm13a, Register d);
 882 
 883   inline void ldsb(const Address& a, Register d, int offset = 0);
 884   inline void ldsh(const Address& a, Register d, int offset = 0);
 885   inline void ldsw(const Address& a, Register d, int offset = 0);
 886   inline void ldub(const Address& a, Register d, int offset = 0);
 887   inline void lduh(const Address& a, Register d, int offset = 0);
 888   inline void lduw(const Address& a, Register d, int offset = 0);
 889   inline void ldx( const Address& a, Register d, int offset = 0);
 890   inline void ld(  const Address& a, Register d, int offset = 0);
 891   inline void ldd( const Address& a, Register d, int offset = 0);
 892 
 893   inline void ldub(Register s1, RegisterOrConstant s2, Register d );
 894   inline void ldsb(Register s1, RegisterOrConstant s2, Register d );
 895   inline void lduh(Register s1, RegisterOrConstant s2, Register d );
 896   inline void ldsh(Register s1, RegisterOrConstant s2, Register d );
 897   inline void lduw(Register s1, RegisterOrConstant s2, Register d );
 898   inline void ldsw(Register s1, RegisterOrConstant s2, Register d );
 899   inline void ldx( Register s1, RegisterOrConstant s2, Register d );
 900   inline void ld(  Register s1, RegisterOrConstant s2, Register d );
 901   inline void ldd( Register s1, RegisterOrConstant s2, Register d );
 902 
 903   using Assembler::ldf;
 904   inline void ldf(FloatRegisterImpl::Width w, Register s1, RegisterOrConstant s2, FloatRegister d);
 905   inline void ldf(FloatRegisterImpl::Width w, const Address& a, FloatRegister d, int offset = 0);
 906 
 907   // membar psuedo instruction.  takes into account target memory model.
 908   inline void membar( Assembler::Membar_mask_bits const7a );
 909 
 910   // returns if membar generates anything.
 911   inline bool membar_has_effect( Assembler::Membar_mask_bits const7a );
 912 
 913   // mov pseudo instructions
 914   inline void mov( Register s,  Register d) {
 915     if ( s != d )    or3( G0, s, d);
 916     else             assert_not_delayed();  // Put something useful in the delay slot!
 917   }
 918 
 919   inline void mov_or_nop( Register s,  Register d) {
 920     if ( s != d )    or3( G0, s, d);
 921     else             nop();
 922   }
 923 
 924   inline void mov( int simm13a, Register d) { or3( G0, simm13a, d); }
 925 
 926   using Assembler::prefetch;
 927   inline void prefetch(const Address& a, PrefetchFcn F, int offset = 0);
 928 
 929   using Assembler::stb;
 930   using Assembler::sth;
 931   using Assembler::stw;
 932   using Assembler::stx;
 933   using Assembler::std;
 934 
 935 #ifdef ASSERT
 936   // ByteSize is only a class when ASSERT is defined, otherwise it's an int.
 937   inline void st(Register d, Register s1, ByteSize simm13a);
 938 #endif
 939 
 940   inline void st(Register d, Register s1, Register s2);
 941   inline void st(Register d, Register s1, int simm13a);
 942 
 943   inline void stb(Register d, const Address& a, int offset = 0 );
 944   inline void sth(Register d, const Address& a, int offset = 0 );
 945   inline void stw(Register d, const Address& a, int offset = 0 );
 946   inline void stx(Register d, const Address& a, int offset = 0 );
 947   inline void st( Register d, const Address& a, int offset = 0 );
 948   inline void std(Register d, const Address& a, int offset = 0 );
 949 
 950   inline void stb(Register d, Register s1, RegisterOrConstant s2 );
 951   inline void sth(Register d, Register s1, RegisterOrConstant s2 );
 952   inline void stw(Register d, Register s1, RegisterOrConstant s2 );
 953   inline void stx(Register d, Register s1, RegisterOrConstant s2 );
 954   inline void std(Register d, Register s1, RegisterOrConstant s2 );
 955   inline void st( Register d, Register s1, RegisterOrConstant s2 );
 956 
 957   using Assembler::stf;
 958   inline void stf(FloatRegisterImpl::Width w, FloatRegister d, Register s1, RegisterOrConstant s2);
 959   inline void stf(FloatRegisterImpl::Width w, FloatRegister d, const Address& a, int offset = 0);
 960 
 961   // Note: offset is added to s2.
 962   using Assembler::sub;
 963   inline void sub(Register s1, RegisterOrConstant s2, Register d, int offset = 0);
 964 
 965   using Assembler::swap;
 966   inline void swap(Address& a, Register d, int offset = 0);
 967 
 968   // address pseudos: make these names unlike instruction names to avoid confusion
 969   inline intptr_t load_pc_address( Register reg, int bytes_to_skip );
 970   inline void load_contents(const AddressLiteral& addrlit, Register d, int offset = 0);
 971   inline void load_bool_contents(const AddressLiteral& addrlit, Register d, int offset = 0);
 972   inline void load_ptr_contents(const AddressLiteral& addrlit, Register d, int offset = 0);
 973   inline void store_contents(Register s, const AddressLiteral& addrlit, Register temp, int offset = 0);
 974   inline void store_ptr_contents(Register s, const AddressLiteral& addrlit, Register temp, int offset = 0);
 975   inline void jumpl_to(const AddressLiteral& addrlit, Register temp, Register d, int offset = 0);
 976   inline void jump_to(const AddressLiteral& addrlit, Register temp, int offset = 0);
 977   inline void jump_indirect_to(Address& a, Register temp, int ld_offset = 0, int jmp_offset = 0);
 978 
 979   // ring buffer traceable jumps
 980 
 981   void jmp2( Register r1, Register r2, const char* file, int line );
 982   void jmp ( Register r1, int offset,  const char* file, int line );
 983 
 984   void jumpl(const AddressLiteral& addrlit, Register temp, Register d, int offset, const char* file, int line);
 985   void jump (const AddressLiteral& addrlit, Register temp,             int offset, const char* file, int line);
 986 
 987 
 988   // argument pseudos:
 989 
 990   inline void load_argument( Argument& a, Register  d );
 991   inline void store_argument( Register s, Argument& a );
 992   inline void store_ptr_argument( Register s, Argument& a );
 993   inline void store_float_argument( FloatRegister s, Argument& a );
 994   inline void store_double_argument( FloatRegister s, Argument& a );
 995   inline void store_long_argument( Register s, Argument& a );
 996 
 997   // handy macros:
 998 
 999   inline void round_to( Register r, int modulus ) {
1000     assert_not_delayed();
1001     inc( r, modulus - 1 );
1002     and3( r, -modulus, r );
1003   }
1004 
1005   // --------------------------------------------------
1006 
1007   // Functions for isolating 64 bit loads for LP64
1008   // ld_ptr will perform ld for 32 bit VM's and ldx for 64 bit VM's
1009   // st_ptr will perform st for 32 bit VM's and stx for 64 bit VM's
1010   inline void ld_ptr(Register s1, Register s2, Register d);
1011   inline void ld_ptr(Register s1, int simm13a, Register d);
1012   inline void ld_ptr(Register s1, RegisterOrConstant s2, Register d);
1013   inline void ld_ptr(const Address& a, Register d, int offset = 0);
1014   inline void st_ptr(Register d, Register s1, Register s2);
1015   inline void st_ptr(Register d, Register s1, int simm13a);
1016   inline void st_ptr(Register d, Register s1, RegisterOrConstant s2);
1017   inline void st_ptr(Register d, const Address& a, int offset = 0);
1018 
1019 #ifdef ASSERT
1020   // ByteSize is only a class when ASSERT is defined, otherwise it's an int.
1021   inline void ld_ptr(Register s1, ByteSize simm13a, Register d);
1022   inline void st_ptr(Register d, Register s1, ByteSize simm13a);
1023 #endif
1024 
1025   // ld_long will perform ldd for 32 bit VM's and ldx for 64 bit VM's
1026   // st_long will perform std for 32 bit VM's and stx for 64 bit VM's
1027   inline void ld_long(Register s1, Register s2, Register d);
1028   inline void ld_long(Register s1, int simm13a, Register d);
1029   inline void ld_long(Register s1, RegisterOrConstant s2, Register d);
1030   inline void ld_long(const Address& a, Register d, int offset = 0);
1031   inline void st_long(Register d, Register s1, Register s2);
1032   inline void st_long(Register d, Register s1, int simm13a);
1033   inline void st_long(Register d, Register s1, RegisterOrConstant s2);
1034   inline void st_long(Register d, const Address& a, int offset = 0);
1035 
1036   // Helpers for address formation.
1037   // - They emit only a move if s2 is a constant zero.
1038   // - If dest is a constant and either s1 or s2 is a register, the temp argument is required and becomes the result.
1039   // - If dest is a register and either s1 or s2 is a non-simm13 constant, the temp argument is required and used to materialize the constant.
1040   RegisterOrConstant regcon_andn_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp = noreg);
1041   RegisterOrConstant regcon_inc_ptr( RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp = noreg);
1042   RegisterOrConstant regcon_sll_ptr( RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp = noreg);
1043 
1044   RegisterOrConstant ensure_simm13_or_reg(RegisterOrConstant src, Register temp) {
1045     if (is_simm13(src.constant_or_zero()))
1046       return src;               // register or short constant
1047     guarantee(temp != noreg, "constant offset overflow");
1048     set(src.as_constant(), temp);
1049     return temp;
1050   }
1051 
1052   // --------------------------------------------------
1053 
1054  public:
1055   // traps as per trap.h (SPARC ABI?)
1056 
1057   void breakpoint_trap();
1058   void breakpoint_trap(Condition c, CC cc);
1059 
1060   // Support for serializing memory accesses between threads
1061   void serialize_memory(Register thread, Register tmp1, Register tmp2);
1062 
1063   // Stack frame creation/removal
1064   void enter();
1065   void leave();
1066 
1067   // V8/V9 integer multiply
1068   void mult(Register s1, Register s2, Register d);
1069   void mult(Register s1, int simm13a, Register d);
1070 
1071   // Manipulation of C++ bools
1072   // These are idioms to flag the need for care with accessing bools but on
1073   // this platform we assume byte size
1074 
1075   inline void stbool(Register d, const Address& a) { stb(d, a); }
1076   inline void ldbool(const Address& a, Register d) { ldub(a, d); }
1077   inline void movbool( bool boolconst, Register d) { mov( (int) boolconst, d); }
1078 
1079   // klass oop manipulations if compressed
1080   void load_klass(Register src_oop, Register klass);
1081   void store_klass(Register klass, Register dst_oop);
1082   void store_klass_gap(Register s, Register dst_oop);
1083 
1084    // oop manipulations
1085   void load_heap_oop(const Address& s, Register d);
1086   void load_heap_oop(Register s1, Register s2, Register d);
1087   void load_heap_oop(Register s1, int simm13a, Register d);
1088   void load_heap_oop(Register s1, RegisterOrConstant s2, Register d);
1089   void store_heap_oop(Register d, Register s1, Register s2);
1090   void store_heap_oop(Register d, Register s1, int simm13a);
1091   void store_heap_oop(Register d, const Address& a, int offset = 0);
1092 
1093   void encode_heap_oop(Register src, Register dst);
1094   void encode_heap_oop(Register r) {
1095     encode_heap_oop(r, r);
1096   }
1097   void decode_heap_oop(Register src, Register dst);
1098   void decode_heap_oop(Register r) {
1099     decode_heap_oop(r, r);
1100   }
1101   void encode_heap_oop_not_null(Register r);
1102   void decode_heap_oop_not_null(Register r);
1103   void encode_heap_oop_not_null(Register src, Register dst);
1104   void decode_heap_oop_not_null(Register src, Register dst);
1105 
1106   void encode_klass_not_null(Register r);
1107   void decode_klass_not_null(Register r);
1108   void encode_klass_not_null(Register src, Register dst);
1109   void decode_klass_not_null(Register src, Register dst);
1110 
1111   // Support for managing the JavaThread pointer (i.e.; the reference to
1112   // thread-local information).
1113   void get_thread();                                // load G2_thread
1114   void verify_thread();                             // verify G2_thread contents
1115   void save_thread   (const Register threache); // save to cache
1116   void restore_thread(const Register thread_cache); // restore from cache
1117 
1118   // Support for last Java frame (but use call_VM instead where possible)
1119   void set_last_Java_frame(Register last_java_sp, Register last_Java_pc);
1120   void reset_last_Java_frame(void);
1121 
1122   // Call into the VM.
1123   // Passes the thread pointer (in O0) as a prepended argument.
1124   // Makes sure oop return values are visible to the GC.
1125   void call_VM(Register oop_result, address entry_point, int number_of_arguments = 0, bool check_exceptions = true);
1126   void call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions = true);
1127   void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
1128   void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true);
1129 
1130   // these overloadings are not presently used on SPARC:
1131   void call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true);
1132   void call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true);
1133   void call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
1134   void call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true);
1135 
1136   void call_VM_leaf(Register thread_cache, address entry_point, int number_of_arguments = 0);
1137   void call_VM_leaf(Register thread_cache, address entry_point, Register arg_1);
1138   void call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2);
1139   void call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2, Register arg_3);
1140 
1141   void get_vm_result  (Register oop_result);
1142   void get_vm_result_2(Register metadata_result);
1143 
1144   // vm result is currently getting hijacked to for oop preservation
1145   void set_vm_result(Register oop_result);
1146 
1147   // Emit the CompiledIC call idiom
1148   void ic_call(address entry, bool emit_delay = true);
1149 
1150   // if call_VM_base was called with check_exceptions=false, then call
1151   // check_and_forward_exception to handle exceptions when it is safe
1152   void check_and_forward_exception(Register scratch_reg);
1153 
1154   // Write to card table for - register is destroyed afterwards.
1155   void card_table_write(jbyte* byte_map_base, Register tmp, Register obj);
1156 
1157   void card_write_barrier_post(Register store_addr, Register new_val, Register tmp);
1158 
1159 #if INCLUDE_ALL_GCS
1160   // General G1 pre-barrier generator.
1161   void g1_write_barrier_pre(Register obj, Register index, int offset, Register pre_val, Register tmp, bool preserve_o_regs);
1162 
1163   // General G1 post-barrier generator
1164   void g1_write_barrier_post(Register store_addr, Register new_val, Register tmp);
1165 #endif // INCLUDE_ALL_GCS
1166 
1167   // pushes double TOS element of FPU stack on CPU stack; pops from FPU stack
1168   void push_fTOS();
1169 
1170   // pops double TOS element from CPU stack and pushes on FPU stack
1171   void pop_fTOS();
1172 
1173   void empty_FPU_stack();
1174 
1175   void push_IU_state();
1176   void pop_IU_state();
1177 
1178   void push_FPU_state();
1179   void pop_FPU_state();
1180 
1181   void push_CPU_state();
1182   void pop_CPU_state();
1183 
1184   // if heap base register is used - reinit it with the correct value
1185   void reinit_heapbase();
1186 
1187   // Debugging
1188   void _verify_oop(Register reg, const char * msg, const char * file, int line);
1189   void _verify_oop_addr(Address addr, const char * msg, const char * file, int line);
1190 
1191   // TODO: verify_method and klass metadata (compare against vptr?)
1192   void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {}
1193   void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line){}
1194 
1195 #define verify_oop(reg) _verify_oop(reg, "broken oop " #reg, __FILE__, __LINE__)
1196 #define verify_oop_addr(addr) _verify_oop_addr(addr, "broken oop addr ", __FILE__, __LINE__)
1197 #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__)
1198 #define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__)
1199 
1200         // only if +VerifyOops
1201   void verify_FPU(int stack_depth, const char* s = "illegal FPU state");
1202         // only if +VerifyFPU
1203   void stop(const char* msg);                          // prints msg, dumps registers and stops execution
1204   void warn(const char* msg);                          // prints msg, but don't stop
1205   void untested(const char* what = "");
1206   void unimplemented(const char* what = "")      { char* b = new char[1024];  jio_snprintf(b, 1024, "unimplemented: %s", what);  stop(b); }
1207   void should_not_reach_here()                   { stop("should not reach here"); }
1208   void print_CPU_state();
1209 
1210   // oops in code
1211   AddressLiteral allocate_oop_address(jobject obj);                          // allocate_index
1212   AddressLiteral constant_oop_address(jobject obj);                          // find_index
1213   inline void    set_oop             (jobject obj, Register d);              // uses allocate_oop_address
1214   inline void    set_oop_constant    (jobject obj, Register d);              // uses constant_oop_address
1215   inline void    set_oop             (const AddressLiteral& obj_addr, Register d); // same as load_address
1216 
1217   // metadata in code that we have to keep track of
1218   AddressLiteral allocate_metadata_address(Metadata* obj); // allocate_index
1219   AddressLiteral constant_metadata_address(Metadata* obj); // find_index
1220   inline void    set_metadata             (Metadata* obj, Register d);              // uses allocate_metadata_address
1221   inline void    set_metadata_constant    (Metadata* obj, Register d);              // uses constant_metadata_address
1222   inline void    set_metadata             (const AddressLiteral& obj_addr, Register d); // same as load_address
1223 
1224   void set_narrow_oop( jobject obj, Register d );
1225   void set_narrow_klass( Klass* k, Register d );
1226 
1227   // nop padding
1228   void align(int modulus);
1229 
1230   // declare a safepoint
1231   void safepoint();
1232 
1233   // factor out part of stop into subroutine to save space
1234   void stop_subroutine();
1235   // factor out part of verify_oop into subroutine to save space
1236   void verify_oop_subroutine();
1237 
1238   // side-door communication with signalHandler in os_solaris.cpp
1239   static address _verify_oop_implicit_branch[3];
1240 
1241   int total_frame_size_in_bytes(int extraWords);
1242 
1243   // used when extraWords known statically
1244   void save_frame(int extraWords = 0);
1245   void save_frame_c1(int size_in_bytes);
1246   // make a frame, and simultaneously pass up one or two register value
1247   // into the new register window
1248   void save_frame_and_mov(int extraWords, Register s1, Register d1, Register s2 = Register(), Register d2 = Register());
1249 
1250   // give no. (outgoing) params, calc # of words will need on frame
1251   void calc_mem_param_words(Register Rparam_words, Register Rresult);
1252 
1253   // used to calculate frame size dynamically
1254   // result is in bytes and must be negated for save inst
1255   void calc_frame_size(Register extraWords, Register resultReg);
1256 
1257   // calc and also save
1258   void calc_frame_size_and_save(Register extraWords, Register resultReg);
1259 
1260   static void debug(char* msg, RegistersForDebugging* outWindow);
1261 
1262   // implementations of bytecodes used by both interpreter and compiler
1263 
1264   void lcmp( Register Ra_hi, Register Ra_low,
1265              Register Rb_hi, Register Rb_low,
1266              Register Rresult);
1267 
1268   void lneg( Register Rhi, Register Rlow );
1269 
1270   void lshl(  Register Rin_high,  Register Rin_low,  Register Rcount,
1271               Register Rout_high, Register Rout_low, Register Rtemp );
1272 
1273   void lshr(  Register Rin_high,  Register Rin_low,  Register Rcount,
1274               Register Rout_high, Register Rout_low, Register Rtemp );
1275 
1276   void lushr( Register Rin_high,  Register Rin_low,  Register Rcount,
1277               Register Rout_high, Register Rout_low, Register Rtemp );
1278 
1279 #ifdef _LP64
1280   void lcmp( Register Ra, Register Rb, Register Rresult);
1281 #endif
1282 
1283   // Load and store values by size and signed-ness
1284   void load_sized_value( Address src, Register dst, size_t size_in_bytes, bool is_signed);
1285   void store_sized_value(Register src, Address dst, size_t size_in_bytes);
1286 
1287   void float_cmp( bool is_float, int unordered_result,
1288                   FloatRegister Fa, FloatRegister Fb,
1289                   Register Rresult);
1290 
1291   void save_all_globals_into_locals();
1292   void restore_globals_from_locals();
1293 
1294   // These set the icc condition code to equal if the lock succeeded
1295   // and notEqual if it failed and requires a slow case
1296   void compiler_lock_object(Register Roop, Register Rmark, Register Rbox,
1297                             Register Rscratch,
1298                             BiasedLockingCounters* counters = NULL,
1299                             bool try_bias = UseBiasedLocking);
1300   void compiler_unlock_object(Register Roop, Register Rmark, Register Rbox,
1301                               Register Rscratch,
1302                               bool try_bias = UseBiasedLocking);
1303 
1304   // Biased locking support
1305   // Upon entry, lock_reg must point to the lock record on the stack,
1306   // obj_reg must contain the target object, and mark_reg must contain
1307   // the target object's header.
1308   // Destroys mark_reg if an attempt is made to bias an anonymously
1309   // biased lock. In this case a failure will go either to the slow
1310   // case or fall through with the notEqual condition code set with
1311   // the expectation that the slow case in the runtime will be called.
1312   // In the fall-through case where the CAS-based lock is done,
1313   // mark_reg is not destroyed.
1314   void biased_locking_enter(Register obj_reg, Register mark_reg, Register temp_reg,
1315                             Label& done, Label* slow_case = NULL,
1316                             BiasedLockingCounters* counters = NULL);
1317   // Upon entry, the base register of mark_addr must contain the oop.
1318   // Destroys temp_reg.
1319 
1320   // If allow_delay_slot_filling is set to true, the next instruction
1321   // emitted after this one will go in an annulled delay slot if the
1322   // biased locking exit case failed.
1323   void biased_locking_exit(Address mark_addr, Register temp_reg, Label& done, bool allow_delay_slot_filling = false);
1324 
1325   // allocation
1326   void eden_allocate(
1327     Register obj,                      // result: pointer to object after successful allocation
1328     Register var_size_in_bytes,        // object size in bytes if unknown at compile time; invalid otherwise
1329     int      con_size_in_bytes,        // object size in bytes if   known at compile time
1330     Register t1,                       // temp register
1331     Register t2,                       // temp register
1332     Label&   slow_case                 // continuation point if fast allocation fails
1333   );
1334   void tlab_allocate(
1335     Register obj,                      // result: pointer to object after successful allocation
1336     Register var_size_in_bytes,        // object size in bytes if unknown at compile time; invalid otherwise
1337     int      con_size_in_bytes,        // object size in bytes if   known at compile time
1338     Register t1,                       // temp register
1339     Label&   slow_case                 // continuation point if fast allocation fails
1340   );
1341   void tlab_refill(Label& retry_tlab, Label& try_eden, Label& slow_case);
1342   void incr_allocated_bytes(RegisterOrConstant size_in_bytes,
1343                             Register t1, Register t2);
1344 
1345   // interface method calling
1346   void lookup_interface_method(Register recv_klass,
1347                                Register intf_klass,
1348                                RegisterOrConstant itable_index,
1349                                Register method_result,
1350                                Register temp_reg, Register temp2_reg,
1351                                Label& no_such_interface);
1352 
1353   // virtual method calling
1354   void lookup_virtual_method(Register recv_klass,
1355                              RegisterOrConstant vtable_index,
1356                              Register method_result);
1357 
1358   // Test sub_klass against super_klass, with fast and slow paths.
1359 
1360   // The fast path produces a tri-state answer: yes / no / maybe-slow.
1361   // One of the three labels can be NULL, meaning take the fall-through.
1362   // If super_check_offset is -1, the value is loaded up from super_klass.
1363   // No registers are killed, except temp_reg and temp2_reg.
1364   // If super_check_offset is not -1, temp2_reg is not used and can be noreg.
1365   void check_klass_subtype_fast_path(Register sub_klass,
1366                                      Register super_klass,
1367                                      Register temp_reg,
1368                                      Register temp2_reg,
1369                                      Label* L_success,
1370                                      Label* L_failure,
1371                                      Label* L_slow_path,
1372                 RegisterOrConstant super_check_offset = RegisterOrConstant(-1));
1373 
1374   // The rest of the type check; must be wired to a corresponding fast path.
1375   // It does not repeat the fast path logic, so don't use it standalone.
1376   // The temp_reg can be noreg, if no temps are available.
1377   // It can also be sub_klass or super_klass, meaning it's OK to kill that one.
1378   // Updates the sub's secondary super cache as necessary.
1379   void check_klass_subtype_slow_path(Register sub_klass,
1380                                      Register super_klass,
1381                                      Register temp_reg,
1382                                      Register temp2_reg,
1383                                      Register temp3_reg,
1384                                      Register temp4_reg,
1385                                      Label* L_success,
1386                                      Label* L_failure);
1387 
1388   // Simplified, combined version, good for typical uses.
1389   // Falls through on failure.
1390   void check_klass_subtype(Register sub_klass,
1391                            Register super_klass,
1392                            Register temp_reg,
1393                            Register temp2_reg,
1394                            Label& L_success);
1395 
1396   // method handles (JSR 292)
1397   // offset relative to Gargs of argument at tos[arg_slot].
1398   // (arg_slot == 0 means the last argument, not the first).
1399   RegisterOrConstant argument_offset(RegisterOrConstant arg_slot,
1400                                      Register temp_reg,
1401                                      int extra_slot_offset = 0);
1402   // Address of Gargs and argument_offset.
1403   Address            argument_address(RegisterOrConstant arg_slot,
1404                                       Register temp_reg = noreg,
1405                                       int extra_slot_offset = 0);
1406 
1407   // Stack overflow checking
1408 
1409   // Note: this clobbers G3_scratch
1410   void bang_stack_with_offset(int offset) {
1411     // stack grows down, caller passes positive offset
1412     assert(offset > 0, "must bang with negative offset");
1413     set((-offset)+STACK_BIAS, G3_scratch);
1414     st(G0, SP, G3_scratch);
1415   }
1416 
1417   // Writes to stack successive pages until offset reached to check for
1418   // stack overflow + shadow pages.  Clobbers tsp and scratch registers.
1419   void bang_stack_size(Register Rsize, Register Rtsp, Register Rscratch);
1420 
1421   virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr, Register tmp, int offset);
1422 
1423   void verify_tlab();
1424 
1425   Condition negate_condition(Condition cond);
1426 
1427   // Helper functions for statistics gathering.
1428   // Conditionally (non-atomically) increments passed counter address, preserving condition codes.
1429   void cond_inc(Condition cond, address counter_addr, Register Rtemp1, Register Rtemp2);
1430   // Unconditional increment.
1431   void inc_counter(address counter_addr, Register Rtmp1, Register Rtmp2);
1432   void inc_counter(int*    counter_addr, Register Rtmp1, Register Rtmp2);
1433 
1434   // Compare char[] arrays aligned to 4 bytes.
1435   void char_arrays_equals(Register ary1, Register ary2,
1436                           Register limit, Register result,
1437                           Register chr1, Register chr2, Label& Ldone);
1438   // Use BIS for zeroing
1439   void bis_zeroing(Register to, Register count, Register temp, Label& Ldone);
1440 
1441 #undef VIRTUAL
1442 };
1443 
1444 /**
1445  * class SkipIfEqual:
1446  *
1447  * Instantiating this class will result in assembly code being output that will
1448  * jump around any code emitted between the creation of the instance and it's
1449  * automatic destruction at the end of a scope block, depending on the value of
1450  * the flag passed to the constructor, which will be checked at run-time.
1451  */
1452 class SkipIfEqual : public StackObj {
1453  private:
1454   MacroAssembler* _masm;
1455   Label _label;
1456 
1457  public:
1458    // 'temp' is a temp register that this object can use (and trash)
1459    SkipIfEqual(MacroAssembler*, Register temp,
1460                const bool* flag_addr, Assembler::Condition condition);
1461    ~SkipIfEqual();
1462 };
1463 
1464 #endif // CPU_SPARC_VM_MACROASSEMBLER_SPARC_HPP