1 /*
   2  * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef CPU_SPARC_VM_MACROASSEMBLER_SPARC_INLINE_HPP
  26 #define CPU_SPARC_VM_MACROASSEMBLER_SPARC_INLINE_HPP
  27 
  28 #include "asm/assembler.inline.hpp"
  29 #include "asm/macroAssembler.hpp"
  30 #include "asm/codeBuffer.hpp"
  31 #include "code/codeCache.hpp"
  32 
  33 inline bool Address::is_simm13(int offset) { return Assembler::is_simm13(disp() + offset); }
  34 
  35 
  36 inline int AddressLiteral::low10() const {
  37   return Assembler::low10(value());
  38 }
  39 
  40 
  41 inline void MacroAssembler::pd_patch_instruction(address branch, address target) {
  42   jint& stub_inst = *(jint*) branch;
  43   stub_inst = patched_branch(target - branch, stub_inst, 0);
  44 }
  45 
  46 // Use the right loads/stores for the platform
  47 inline void MacroAssembler::ld_ptr( Register s1, Register s2, Register d ) {
  48   Assembler::ldx(s1, s2, d);
  49 }
  50 
  51 inline void MacroAssembler::ld_ptr( Register s1, int simm13a, Register d ) {
  52   Assembler::ldx(s1, simm13a, d);
  53 }
  54 
  55 #ifdef ASSERT
  56 // ByteSize is only a class when ASSERT is defined, otherwise it's an int.
  57 inline void MacroAssembler::ld_ptr( Register s1, ByteSize simm13a, Register d ) {
  58   ld_ptr(s1, in_bytes(simm13a), d);
  59 }
  60 #endif
  61 
  62 inline void MacroAssembler::ld_ptr( Register s1, RegisterOrConstant s2, Register d ) {
  63   ldx(s1, s2, d);
  64 }
  65 
  66 inline void MacroAssembler::ld_ptr(const Address& a, Register d, int offset) {
  67   ldx(a, d, offset);
  68 }
  69 
  70 inline void MacroAssembler::st_ptr( Register d, Register s1, Register s2 ) {
  71   Assembler::stx(d, s1, s2);
  72 }
  73 
  74 inline void MacroAssembler::st_ptr( Register d, Register s1, int simm13a ) {
  75   Assembler::stx(d, s1, simm13a);
  76 }
  77 
  78 #ifdef ASSERT
  79 // ByteSize is only a class when ASSERT is defined, otherwise it's an int.
  80 inline void MacroAssembler::st_ptr( Register d, Register s1, ByteSize simm13a ) {
  81   st_ptr(d, s1, in_bytes(simm13a));
  82 }
  83 #endif
  84 
  85 inline void MacroAssembler::st_ptr( Register d, Register s1, RegisterOrConstant s2 ) {
  86   stx(d, s1, s2);
  87 }
  88 
  89 inline void MacroAssembler::st_ptr(Register d, const Address& a, int offset) {
  90   stx(d, a, offset);
  91 }
  92 
  93 // Use the right loads/stores for the platform
  94 inline void MacroAssembler::ld_long( Register s1, Register s2, Register d ) {
  95   Assembler::ldx(s1, s2, d);
  96 }
  97 
  98 inline void MacroAssembler::ld_long( Register s1, int simm13a, Register d ) {
  99   Assembler::ldx(s1, simm13a, d);
 100 }
 101 
 102 inline void MacroAssembler::ld_long( Register s1, RegisterOrConstant s2, Register d ) {
 103   ldx(s1, s2, d);
 104 }
 105 
 106 inline void MacroAssembler::ld_long(const Address& a, Register d, int offset) {
 107   ldx(a, d, offset);
 108 }
 109 
 110 inline void MacroAssembler::st_long( Register d, Register s1, Register s2 ) {
 111   Assembler::stx(d, s1, s2);
 112 }
 113 
 114 inline void MacroAssembler::st_long( Register d, Register s1, int simm13a ) {
 115   Assembler::stx(d, s1, simm13a);
 116 }
 117 
 118 inline void MacroAssembler::st_long( Register d, Register s1, RegisterOrConstant s2 ) {
 119   stx(d, s1, s2);
 120 }
 121 
 122 inline void MacroAssembler::st_long( Register d, const Address& a, int offset ) {
 123   stx(d, a, offset);
 124 }
 125 
 126 inline void MacroAssembler::stbool(Register d, const Address& a) { stb(d, a); }
 127 inline void MacroAssembler::ldbool(const Address& a, Register d) { ldub(a, d); }
 128 inline void MacroAssembler::movbool( bool boolconst, Register d) { mov( (int) boolconst, d); }
 129 
 130 
 131 inline void MacroAssembler::signx( Register s, Register d ) { sra( s, G0, d); }
 132 inline void MacroAssembler::signx( Register d )             { sra( d, G0, d); }
 133 
 134 inline void MacroAssembler::not1( Register s, Register d ) { xnor( s, G0, d ); }
 135 inline void MacroAssembler::not1( Register d )             { xnor( d, G0, d ); }
 136 
 137 inline void MacroAssembler::neg( Register s, Register d ) { sub( G0, s, d ); }
 138 inline void MacroAssembler::neg( Register d )             { sub( G0, d, d ); }
 139 
 140 inline void MacroAssembler::cas(  Register s1, Register s2, Register d) { casa( s1, s2, d, ASI_PRIMARY); }
 141 inline void MacroAssembler::casx( Register s1, Register s2, Register d) { casxa(s1, s2, d, ASI_PRIMARY); }
 142 
 143 // Functions for isolating 64 bit atomic swaps for LP64
 144 // cas_ptr will perform cas for 32 bit VM's and casx for 64 bit VM's
 145 inline void MacroAssembler::cas_ptr(  Register s1, Register s2, Register d) {
 146   casx( s1, s2, d );
 147 }
 148 
 149 // Functions for isolating 64 bit shifts for LP64
 150 
 151 inline void MacroAssembler::sll_ptr( Register s1, Register s2, Register d ) {
 152   Assembler::sllx(s1, s2, d);
 153 }
 154 
 155 inline void MacroAssembler::sll_ptr( Register s1, int imm6a,   Register d ) {
 156   Assembler::sllx(s1, imm6a, d);
 157 }
 158 
 159 inline void MacroAssembler::srl_ptr( Register s1, Register s2, Register d ) {
 160   Assembler::srlx(s1, s2, d);
 161 }
 162 
 163 inline void MacroAssembler::srl_ptr( Register s1, int imm6a,   Register d ) {
 164   Assembler::srlx(s1, imm6a, d);
 165 }
 166 
 167 inline void MacroAssembler::sll_ptr( Register s1, RegisterOrConstant s2, Register d ) {
 168   if (s2.is_register())  sll_ptr(s1, s2.as_register(), d);
 169   else                   sll_ptr(s1, s2.as_constant(), d);
 170 }
 171 
 172 inline void MacroAssembler::casl(  Register s1, Register s2, Register d) { casa( s1, s2, d, ASI_PRIMARY_LITTLE); }
 173 inline void MacroAssembler::casxl( Register s1, Register s2, Register d) { casxa(s1, s2, d, ASI_PRIMARY_LITTLE); }
 174 
 175 inline void MacroAssembler::inc(   Register d,  int const13 ) { add(   d, const13, d); }
 176 inline void MacroAssembler::inccc( Register d,  int const13 ) { addcc( d, const13, d); }
 177 
 178 inline void MacroAssembler::dec(   Register d,  int const13 ) { sub(   d, const13, d); }
 179 inline void MacroAssembler::deccc( Register d,  int const13 ) { subcc( d, const13, d); }
 180 
 181 // Use the right branch for the platform
 182 
 183 inline void MacroAssembler::br( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) {
 184   Assembler::bp(c, a, icc, p, d, rt);
 185 }
 186 
 187 inline void MacroAssembler::br( Condition c, bool a, Predict p, Label& L ) {
 188   insert_nop_after_cbcond();
 189   br(c, a, p, target(L));
 190 }
 191 
 192 
 193 // Branch that tests either xcc or icc depending on the
 194 // architecture compiled (LP64 or not)
 195 inline void MacroAssembler::brx( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) {
 196     Assembler::bp(c, a, xcc, p, d, rt);
 197 }
 198 
 199 inline void MacroAssembler::brx( Condition c, bool a, Predict p, Label& L ) {
 200   insert_nop_after_cbcond();
 201   brx(c, a, p, target(L));
 202 }
 203 
 204 inline void MacroAssembler::ba( Label& L ) {
 205   br(always, false, pt, L);
 206 }
 207 
 208 // Warning: V9 only functions
 209 inline void MacroAssembler::bp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) {
 210   Assembler::bp(c, a, cc, p, d, rt);
 211 }
 212 
 213 inline void MacroAssembler::bp( Condition c, bool a, CC cc, Predict p, Label& L ) {
 214   Assembler::bp(c, a, cc, p, L);
 215 }
 216 
 217 inline void MacroAssembler::fb( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) {
 218   fbp(c, a, fcc0, p, d, rt);
 219 }
 220 
 221 inline void MacroAssembler::fb( Condition c, bool a, Predict p, Label& L ) {
 222   insert_nop_after_cbcond();
 223   fb(c, a, p, target(L));
 224 }
 225 
 226 inline void MacroAssembler::fbp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) {
 227   Assembler::fbp(c, a, cc, p, d, rt);
 228 }
 229 
 230 inline void MacroAssembler::fbp( Condition c, bool a, CC cc, Predict p, Label& L ) {
 231   Assembler::fbp(c, a, cc, p, L);
 232 }
 233 
 234 inline void MacroAssembler::jmp( Register s1, Register s2 ) { jmpl( s1, s2, G0 ); }
 235 inline void MacroAssembler::jmp( Register s1, int simm13a, RelocationHolder const& rspec ) { jmpl( s1, simm13a, G0, rspec); }
 236 
 237 inline bool MacroAssembler::is_far_target(address d) {
 238   if (ForceUnreachable) {
 239     // References outside the code cache should be treated as far
 240     return d < CodeCache::low_bound() || d > CodeCache::high_bound();
 241   }
 242   return !is_in_wdisp30_range(d, CodeCache::low_bound()) || !is_in_wdisp30_range(d, CodeCache::high_bound());
 243 }
 244 
 245 // Call with a check to see if we need to deal with the added
 246 // expense of relocation and if we overflow the displacement
 247 // of the quick call instruction.
 248 inline void MacroAssembler::call( address d, relocInfo::relocType rt ) {
 249   MacroAssembler::call(d, Relocation::spec_simple(rt));
 250 }
 251 
 252 inline void MacroAssembler::call( address d, RelocationHolder const& rspec ) {
 253   intptr_t disp;
 254   // NULL is ok because it will be relocated later.
 255   // Must change NULL to a reachable address in order to
 256   // pass asserts here and in wdisp.
 257   if ( d == NULL )
 258     d = pc();
 259 
 260   // Is this address within range of the call instruction?
 261   // If not, use the expensive instruction sequence
 262   if (is_far_target(d)) {
 263     relocate(rspec);
 264     AddressLiteral dest(d);
 265     jumpl_to(dest, O7, O7);
 266   } else {
 267     Assembler::call(d, rspec);
 268   }
 269 }
 270 
 271 inline void MacroAssembler::call( Label& L,   relocInfo::relocType rt ) {
 272   insert_nop_after_cbcond();
 273   MacroAssembler::call( target(L), rt);
 274 }
 275 
 276 
 277 
 278 inline void MacroAssembler::callr( Register s1, Register s2 ) { jmpl( s1, s2, O7 ); }
 279 inline void MacroAssembler::callr( Register s1, int simm13a, RelocationHolder const& rspec ) { jmpl( s1, simm13a, O7, rspec); }
 280 
 281 // prefetch instruction
 282 inline void MacroAssembler::iprefetch( address d, relocInfo::relocType rt ) {
 283   Assembler::bp( never, true, xcc, pt, d, rt );
 284     Assembler::bp( never, true, xcc, pt, d, rt );
 285 }
 286 inline void MacroAssembler::iprefetch( Label& L) { iprefetch( target(L) ); }
 287 
 288 inline void MacroAssembler::tst( Register s ) { orcc( G0, s, G0 ); }
 289 
 290 inline void MacroAssembler::ret( bool trace ) {
 291   if (trace) {
 292     mov(I7, O7); // traceable register
 293     JMP(O7, 2 * BytesPerInstWord);
 294   } else {
 295     jmpl( I7, 2 * BytesPerInstWord, G0 );
 296   }
 297 }
 298 
 299 inline void MacroAssembler::retl( bool trace ) {
 300   if (trace) {
 301     JMP(O7, 2 * BytesPerInstWord);
 302   } else {
 303     jmpl( O7, 2 * BytesPerInstWord, G0 );
 304   }
 305 }
 306 
 307 // clobbers o7 on V8!!
 308 // returns delta from gotten pc to addr after
 309 inline int MacroAssembler::get_pc( Register d ) {
 310   int x = offset();
 311   rdpc(d);
 312   return offset() - x;
 313 }
 314 
 315 inline void MacroAssembler::cmp(  Register s1, Register s2 ) { subcc( s1, s2, G0 ); }
 316 inline void MacroAssembler::cmp(  Register s1, int simm13a ) { subcc( s1, simm13a, G0 ); }
 317 
 318 // Note:  All MacroAssembler::set_foo functions are defined out-of-line.
 319 
 320 
 321 // Loads the current PC of the following instruction as an immediate value in
 322 // 2 instructions.  All PCs in the CodeCache are within 2 Gig of each other.
 323 inline intptr_t MacroAssembler::load_pc_address( Register reg, int bytes_to_skip ) {
 324   intptr_t thepc = (intptr_t)pc() + 2*BytesPerInstWord + bytes_to_skip;
 325   Unimplemented();
 326   return thepc;
 327 }
 328 
 329 
 330 inline void MacroAssembler::load_contents(const AddressLiteral& addrlit, Register d, int offset) {
 331   assert_not_delayed();
 332   if (ForceUnreachable) {
 333     patchable_sethi(addrlit, d);
 334   } else {
 335     sethi(addrlit, d);
 336   }
 337   ld(d, addrlit.low10() + offset, d);
 338 }
 339 
 340 
 341 inline void MacroAssembler::load_bool_contents(const AddressLiteral& addrlit, Register d, int offset) {
 342   assert_not_delayed();
 343   if (ForceUnreachable) {
 344     patchable_sethi(addrlit, d);
 345   } else {
 346     sethi(addrlit, d);
 347   }
 348   ldub(d, addrlit.low10() + offset, d);
 349 }
 350 
 351 
 352 inline void MacroAssembler::load_ptr_contents(const AddressLiteral& addrlit, Register d, int offset) {
 353   assert_not_delayed();
 354   if (ForceUnreachable) {
 355     patchable_sethi(addrlit, d);
 356   } else {
 357     sethi(addrlit, d);
 358   }
 359   ld_ptr(d, addrlit.low10() + offset, d);
 360 }
 361 
 362 
 363 inline void MacroAssembler::store_contents(Register s, const AddressLiteral& addrlit, Register temp, int offset) {
 364   assert_not_delayed();
 365   if (ForceUnreachable) {
 366     patchable_sethi(addrlit, temp);
 367   } else {
 368     sethi(addrlit, temp);
 369   }
 370   st(s, temp, addrlit.low10() + offset);
 371 }
 372 
 373 
 374 inline void MacroAssembler::store_ptr_contents(Register s, const AddressLiteral& addrlit, Register temp, int offset) {
 375   assert_not_delayed();
 376   if (ForceUnreachable) {
 377     patchable_sethi(addrlit, temp);
 378   } else {
 379     sethi(addrlit, temp);
 380   }
 381   st_ptr(s, temp, addrlit.low10() + offset);
 382 }
 383 
 384 
 385 // This code sequence is relocatable to any address, even on LP64.
 386 inline void MacroAssembler::jumpl_to(const AddressLiteral& addrlit, Register temp, Register d, int offset) {
 387   assert_not_delayed();
 388   // Force fixed length sethi because NativeJump and NativeFarCall don't handle
 389   // variable length instruction streams.
 390   patchable_sethi(addrlit, temp);
 391   jmpl(temp, addrlit.low10() + offset, d);
 392 }
 393 
 394 
 395 inline void MacroAssembler::jump_to(const AddressLiteral& addrlit, Register temp, int offset) {
 396   jumpl_to(addrlit, temp, G0, offset);
 397 }
 398 
 399 
 400 inline void MacroAssembler::jump_indirect_to(Address& a, Register temp,
 401                                              int ld_offset, int jmp_offset) {
 402   assert_not_delayed();
 403   //sethi(al);                   // sethi is caller responsibility for this one
 404   ld_ptr(a, temp, ld_offset);
 405   jmp(temp, jmp_offset);
 406 }
 407 
 408 
 409 inline void MacroAssembler::set_metadata(Metadata* obj, Register d) {
 410   set_metadata(allocate_metadata_address(obj), d);
 411 }
 412 
 413 inline void MacroAssembler::set_metadata_constant(Metadata* obj, Register d) {
 414   set_metadata(constant_metadata_address(obj), d);
 415 }
 416 
 417 inline void MacroAssembler::set_metadata(const AddressLiteral& obj_addr, Register d) {
 418   assert(obj_addr.rspec().type() == relocInfo::metadata_type, "must be a metadata reloc");
 419   set(obj_addr, d);
 420 }
 421 
 422 inline void MacroAssembler::set_oop(jobject obj, Register d) {
 423   set_oop(allocate_oop_address(obj), d);
 424 }
 425 
 426 
 427 inline void MacroAssembler::set_oop_constant(jobject obj, Register d) {
 428   set_oop(constant_oop_address(obj), d);
 429 }
 430 
 431 
 432 inline void MacroAssembler::set_oop(const AddressLiteral& obj_addr, Register d) {
 433   assert(obj_addr.rspec().type() == relocInfo::oop_type, "must be an oop reloc");
 434   set(obj_addr, d);
 435 }
 436 
 437 
 438 inline void MacroAssembler::load_argument( Argument& a, Register  d ) {
 439   if (a.is_register())
 440     mov(a.as_register(), d);
 441   else
 442     ld (a.as_address(),  d);
 443 }
 444 
 445 inline void MacroAssembler::store_argument( Register s, Argument& a ) {
 446   if (a.is_register())
 447     mov(s, a.as_register());
 448   else
 449     st_ptr (s, a.as_address());         // ABI says everything is right justified.
 450 }
 451 
 452 inline void MacroAssembler::store_ptr_argument( Register s, Argument& a ) {
 453   if (a.is_register())
 454     mov(s, a.as_register());
 455   else
 456     st_ptr (s, a.as_address());
 457 }
 458 
 459 
 460 inline void MacroAssembler::store_float_argument( FloatRegister s, Argument& a ) {
 461   if (a.is_float_register())
 462 // V9 ABI has F1, F3, F5 are used to pass instead of O0, O1, O2
 463     fmov(FloatRegisterImpl::S, s, a.as_float_register() );
 464   else
 465     // Floats are stored in the high half of the stack entry
 466     // The low half is undefined per the ABI.
 467     stf(FloatRegisterImpl::S, s, a.as_address(), sizeof(jfloat));
 468 }
 469 
 470 inline void MacroAssembler::store_double_argument( FloatRegister s, Argument& a ) {
 471   if (a.is_float_register())
 472 // V9 ABI has D0, D2, D4 are used to pass instead of O0, O1, O2
 473     fmov(FloatRegisterImpl::D, s, a.as_double_register() );
 474   else
 475     stf(FloatRegisterImpl::D, s, a.as_address());
 476 }
 477 
 478 inline void MacroAssembler::store_long_argument( Register s, Argument& a ) {
 479   if (a.is_register())
 480     mov(s, a.as_register());
 481   else
 482     stx(s, a.as_address());
 483 }
 484 
 485 inline void MacroAssembler::round_to( Register r, int modulus ) {
 486   assert_not_delayed();
 487   inc( r, modulus - 1 );
 488   and3( r, -modulus, r );
 489 }
 490 
 491 inline void MacroAssembler::add(Register s1, int simm13a, Register d, relocInfo::relocType rtype) {
 492   relocate(rtype);
 493   add(s1, simm13a, d);
 494 }
 495 inline void MacroAssembler::add(Register s1, int simm13a, Register d, RelocationHolder const& rspec) {
 496   relocate(rspec);
 497   add(s1, simm13a, d);
 498 }
 499 
 500 // form effective addresses this way:
 501 inline void MacroAssembler::add(const Address& a, Register d, int offset) {
 502   if (a.has_index())   add(a.base(), a.index(),         d);
 503   else               { add(a.base(), a.disp() + offset, d, a.rspec(offset)); offset = 0; }
 504   if (offset != 0)     add(d,        offset,            d);
 505 }
 506 inline void MacroAssembler::add(Register s1, RegisterOrConstant s2, Register d, int offset) {
 507   if (s2.is_register())  add(s1, s2.as_register(),          d);
 508   else                 { add(s1, s2.as_constant() + offset, d); offset = 0; }
 509   if (offset != 0)       add(d,  offset,                    d);
 510 }
 511 
 512 inline void MacroAssembler::andn(Register s1, RegisterOrConstant s2, Register d) {
 513   if (s2.is_register())  andn(s1, s2.as_register(), d);
 514   else                   andn(s1, s2.as_constant(), d);
 515 }
 516 
 517 inline void MacroAssembler::btst( Register s1,  Register s2 ) { andcc( s1, s2, G0 ); }
 518 inline void MacroAssembler::btst( int simm13a,  Register s )  { andcc( s,  simm13a, G0 ); }
 519 
 520 inline void MacroAssembler::bset( Register s1,  Register s2 ) { or3( s1, s2, s2 ); }
 521 inline void MacroAssembler::bset( int simm13a,  Register s )  { or3( s,  simm13a, s ); }
 522 
 523 inline void MacroAssembler::bclr( Register s1,  Register s2 ) { andn( s1, s2, s2 ); }
 524 inline void MacroAssembler::bclr( int simm13a,  Register s )  { andn( s,  simm13a, s ); }
 525 
 526 inline void MacroAssembler::btog( Register s1,  Register s2 ) { xor3( s1, s2, s2 ); }
 527 inline void MacroAssembler::btog( int simm13a,  Register s )  { xor3( s,  simm13a, s ); }
 528 
 529 inline void MacroAssembler::clr( Register d ) { or3( G0, G0, d ); }
 530 
 531 inline void MacroAssembler::clrb( Register s1, Register s2) { stb( G0, s1, s2 ); }
 532 inline void MacroAssembler::clrh( Register s1, Register s2) { sth( G0, s1, s2 ); }
 533 inline void MacroAssembler::clr(  Register s1, Register s2) { stw( G0, s1, s2 ); }
 534 inline void MacroAssembler::clrx( Register s1, Register s2) { stx( G0, s1, s2 ); }
 535 
 536 inline void MacroAssembler::clrb( Register s1, int simm13a) { stb( G0, s1, simm13a); }
 537 inline void MacroAssembler::clrh( Register s1, int simm13a) { sth( G0, s1, simm13a); }
 538 inline void MacroAssembler::clr(  Register s1, int simm13a) { stw( G0, s1, simm13a); }
 539 inline void MacroAssembler::clrx( Register s1, int simm13a) { stx( G0, s1, simm13a); }
 540 
 541 inline void MacroAssembler::clruw( Register s, Register d ) { srl( s, G0, d); }
 542 inline void MacroAssembler::clruwu( Register d ) { srl( d, G0, d); }
 543 
 544 // Make all 32 bit loads signed so 64 bit registers maintain proper sign
 545 inline void MacroAssembler::ld(  Register s1, Register s2, Register d)      { ldsw( s1, s2, d); }
 546 inline void MacroAssembler::ld(  Register s1, int simm13a, Register d)      { ldsw( s1, simm13a, d); }
 547 
 548 #ifdef ASSERT
 549   // ByteSize is only a class when ASSERT is defined, otherwise it's an int.
 550 inline void MacroAssembler::ld(Register s1, ByteSize simm13a, Register d) { ldsw( s1, in_bytes(simm13a), d); }
 551 #endif
 552 
 553 inline void MacroAssembler::ld(  const Address& a, Register d, int offset) {
 554   if (a.has_index()) { assert(offset == 0, ""); ld(  a.base(), a.index(),         d); }
 555   else               {                          ld(  a.base(), a.disp() + offset, d); }
 556 }
 557 
 558 inline void MacroAssembler::ldsb(const Address& a, Register d, int offset) {
 559   if (a.has_index()) { assert(offset == 0, ""); ldsb(a.base(), a.index(),         d); }
 560   else               {                          ldsb(a.base(), a.disp() + offset, d); }
 561 }
 562 inline void MacroAssembler::ldsh(const Address& a, Register d, int offset) {
 563   if (a.has_index()) { assert(offset == 0, ""); ldsh(a.base(), a.index(),         d); }
 564   else               {                          ldsh(a.base(), a.disp() + offset, d); }
 565 }
 566 inline void MacroAssembler::ldsw(const Address& a, Register d, int offset) {
 567   if (a.has_index()) { assert(offset == 0, ""); ldsw(a.base(), a.index(),         d); }
 568   else               {                          ldsw(a.base(), a.disp() + offset, d); }
 569 }
 570 inline void MacroAssembler::ldub(const Address& a, Register d, int offset) {
 571   if (a.has_index()) { assert(offset == 0, ""); ldub(a.base(), a.index(),         d); }
 572   else               {                          ldub(a.base(), a.disp() + offset, d); }
 573 }
 574 inline void MacroAssembler::lduh(const Address& a, Register d, int offset) {
 575   if (a.has_index()) { assert(offset == 0, ""); lduh(a.base(), a.index(),         d); }
 576   else               {                          lduh(a.base(), a.disp() + offset, d); }
 577 }
 578 inline void MacroAssembler::lduw(const Address& a, Register d, int offset) {
 579   if (a.has_index()) { assert(offset == 0, ""); lduw(a.base(), a.index(),         d); }
 580   else               {                          lduw(a.base(), a.disp() + offset, d); }
 581 }
 582 inline void MacroAssembler::ldd( const Address& a, Register d, int offset) {
 583   if (a.has_index()) { assert(offset == 0, ""); ldd( a.base(), a.index(),         d); }
 584   else               {                          ldd( a.base(), a.disp() + offset, d); }
 585 }
 586 inline void MacroAssembler::ldx( const Address& a, Register d, int offset) {
 587   if (a.has_index()) { assert(offset == 0, ""); ldx( a.base(), a.index(),         d); }
 588   else               {                          ldx( a.base(), a.disp() + offset, d); }
 589 }
 590 
 591 inline void MacroAssembler::ldub(Register s1, RegisterOrConstant s2, Register d) { ldub(Address(s1, s2), d); }
 592 inline void MacroAssembler::ldsb(Register s1, RegisterOrConstant s2, Register d) { ldsb(Address(s1, s2), d); }
 593 inline void MacroAssembler::lduh(Register s1, RegisterOrConstant s2, Register d) { lduh(Address(s1, s2), d); }
 594 inline void MacroAssembler::ldsh(Register s1, RegisterOrConstant s2, Register d) { ldsh(Address(s1, s2), d); }
 595 inline void MacroAssembler::lduw(Register s1, RegisterOrConstant s2, Register d) { lduw(Address(s1, s2), d); }
 596 inline void MacroAssembler::ldsw(Register s1, RegisterOrConstant s2, Register d) { ldsw(Address(s1, s2), d); }
 597 inline void MacroAssembler::ldx( Register s1, RegisterOrConstant s2, Register d) { ldx( Address(s1, s2), d); }
 598 inline void MacroAssembler::ld(  Register s1, RegisterOrConstant s2, Register d) { ld(  Address(s1, s2), d); }
 599 inline void MacroAssembler::ldd( Register s1, RegisterOrConstant s2, Register d) { ldd( Address(s1, s2), d); }
 600 
 601 inline void MacroAssembler::ldf(FloatRegisterImpl::Width w, Register s1, RegisterOrConstant s2, FloatRegister d) {
 602   if (s2.is_register())  ldf(w, s1, s2.as_register(), d);
 603   else                   ldf(w, s1, s2.as_constant(), d);
 604 }
 605 
 606 inline void MacroAssembler::ldf(FloatRegisterImpl::Width w, const Address& a, FloatRegister d, int offset) {
 607   relocate(a.rspec(offset));
 608   if (a.has_index()) {
 609     assert(offset == 0, "");
 610     ldf(w, a.base(), a.index(), d);
 611   } else {
 612     ldf(w, a.base(), a.disp() + offset, d);
 613   }
 614 }
 615 
 616 inline void MacroAssembler::lduwl(Register s1, Register s2, Register d) { lduwa(s1, s2, ASI_PRIMARY_LITTLE, d); }
 617 inline void MacroAssembler::ldswl(Register s1, Register s2, Register d) { ldswa(s1, s2, ASI_PRIMARY_LITTLE, d);}
 618 inline void MacroAssembler::ldxl( Register s1, Register s2, Register d) { ldxa(s1, s2, ASI_PRIMARY_LITTLE, d); }
 619 inline void MacroAssembler::ldfl(FloatRegisterImpl::Width w, Register s1, Register s2, FloatRegister d) { ldfa(w, s1, s2, ASI_PRIMARY_LITTLE, d); }
 620 
 621 // returns if membar generates anything, obviously this code should mirror
 622 // membar below.
 623 inline bool MacroAssembler::membar_has_effect( Membar_mask_bits const7a ) {
 624   if (!os::is_MP())
 625     return false;  // Not needed on single CPU
 626   const Membar_mask_bits effective_mask =
 627       Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore));
 628   return (effective_mask != 0);
 629 }
 630 
 631 inline void MacroAssembler::membar( Membar_mask_bits const7a ) {
 632   // Uniprocessors do not need memory barriers
 633   if (!os::is_MP())
 634     return;
 635   // Weakened for current Sparcs and TSO.  See the v9 manual, sections 8.4.3,
 636   // 8.4.4.3, a.31 and a.50.
 637   // Under TSO, setting bit 3, 2, or 0 is redundant, so the only value
 638   // of the mmask subfield of const7a that does anything that isn't done
 639   // implicitly is StoreLoad.
 640   const Membar_mask_bits effective_mask =
 641       Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore));
 642   if (effective_mask != 0) {
 643     Assembler::membar(effective_mask);
 644   }
 645 }
 646 
 647 inline void MacroAssembler::mov(Register s, Register d) {
 648   if (s != d) {
 649     or3(G0, s, d);
 650   } else {
 651     assert_not_delayed();  // Put something useful in the delay slot!
 652   }
 653 }
 654 
 655 inline void MacroAssembler::mov_or_nop(Register s, Register d) {
 656   if (s != d) {
 657     or3(G0, s, d);
 658   } else {
 659     nop();
 660   }
 661 }
 662 
 663 inline void MacroAssembler::mov( int simm13a, Register d) { or3( G0, simm13a, d); }
 664 
 665 inline void MacroAssembler::prefetch(const Address& a, PrefetchFcn f, int offset) {
 666   relocate(a.rspec(offset));
 667   assert(!a.has_index(), "");
 668   prefetch(a.base(), a.disp() + offset, f);
 669 }
 670 
 671 inline void MacroAssembler::st(Register d, Register s1, Register s2)      { stw(d, s1, s2); }
 672 inline void MacroAssembler::st(Register d, Register s1, int simm13a)      { stw(d, s1, simm13a); }
 673 
 674 #ifdef ASSERT
 675 // ByteSize is only a class when ASSERT is defined, otherwise it's an int.
 676 inline void MacroAssembler::st(Register d, Register s1, ByteSize simm13a) { stw(d, s1, in_bytes(simm13a)); }
 677 #endif
 678 
 679 inline void MacroAssembler::st(Register d, const Address& a, int offset) {
 680   if (a.has_index()) { assert(offset == 0, ""); st( d, a.base(), a.index()        ); }
 681   else               {                          st( d, a.base(), a.disp() + offset); }
 682 }
 683 
 684 inline void MacroAssembler::stb(Register d, const Address& a, int offset) {
 685   if (a.has_index()) { assert(offset == 0, ""); stb(d, a.base(), a.index()        ); }
 686   else               {                          stb(d, a.base(), a.disp() + offset); }
 687 }
 688 inline void MacroAssembler::sth(Register d, const Address& a, int offset) {
 689   if (a.has_index()) { assert(offset == 0, ""); sth(d, a.base(), a.index()        ); }
 690   else               {                          sth(d, a.base(), a.disp() + offset); }
 691 }
 692 inline void MacroAssembler::stw(Register d, const Address& a, int offset) {
 693   if (a.has_index()) { assert(offset == 0, ""); stw(d, a.base(), a.index()        ); }
 694   else               {                          stw(d, a.base(), a.disp() + offset); }
 695 }
 696 inline void MacroAssembler::std(Register d, const Address& a, int offset) {
 697   if (a.has_index()) { assert(offset == 0, ""); std(d, a.base(), a.index()        ); }
 698   else               {                          std(d, a.base(), a.disp() + offset); }
 699 }
 700 inline void MacroAssembler::stx(Register d, const Address& a, int offset) {
 701   if (a.has_index()) { assert(offset == 0, ""); stx(d, a.base(), a.index()        ); }
 702   else               {                          stx(d, a.base(), a.disp() + offset); }
 703 }
 704 
 705 inline void MacroAssembler::stb(Register d, Register s1, RegisterOrConstant s2) { stb(d, Address(s1, s2)); }
 706 inline void MacroAssembler::sth(Register d, Register s1, RegisterOrConstant s2) { sth(d, Address(s1, s2)); }
 707 inline void MacroAssembler::stw(Register d, Register s1, RegisterOrConstant s2) { stw(d, Address(s1, s2)); }
 708 inline void MacroAssembler::stx(Register d, Register s1, RegisterOrConstant s2) { stx(d, Address(s1, s2)); }
 709 inline void MacroAssembler::std(Register d, Register s1, RegisterOrConstant s2) { std(d, Address(s1, s2)); }
 710 inline void MacroAssembler::st( Register d, Register s1, RegisterOrConstant s2) { st( d, Address(s1, s2)); }
 711 
 712 inline void MacroAssembler::stf(FloatRegisterImpl::Width w, FloatRegister d, Register s1, RegisterOrConstant s2) {
 713   if (s2.is_register())  stf(w, d, s1, s2.as_register());
 714   else                   stf(w, d, s1, s2.as_constant());
 715 }
 716 
 717 inline void MacroAssembler::stf(FloatRegisterImpl::Width w, FloatRegister d, const Address& a, int offset) {
 718   relocate(a.rspec(offset));
 719   if (a.has_index()) { assert(offset == 0, ""); stf(w, d, a.base(), a.index()        ); }
 720   else               {                          stf(w, d, a.base(), a.disp() + offset); }
 721 }
 722 
 723 inline void MacroAssembler::sub(Register s1, RegisterOrConstant s2, Register d, int offset) {
 724   if (s2.is_register())  sub(s1, s2.as_register(),          d);
 725   else                 { sub(s1, s2.as_constant() + offset, d); offset = 0; }
 726   if (offset != 0)       sub(d,  offset,                    d);
 727 }
 728 
 729 inline void MacroAssembler::swap(const Address& a, Register d, int offset) {
 730   relocate(a.rspec(offset));
 731   if (a.has_index()) { assert(offset == 0, ""); swap(a.base(), a.index(), d        ); }
 732   else               {                          swap(a.base(), a.disp() + offset, d); }
 733 }
 734 
 735 inline void MacroAssembler::bang_stack_with_offset(int offset) {
 736   // stack grows down, caller passes positive offset
 737   assert(offset > 0, "must bang with negative offset");
 738   set((-offset)+STACK_BIAS, G3_scratch);
 739   st(G0, SP, G3_scratch);
 740 }
 741 
 742 #endif // CPU_SPARC_VM_MACROASSEMBLER_SPARC_INLINE_HPP