src/cpu/sparc/vm/macroAssembler_sparc.inline.hpp

Print this page




 212   Assembler::srl( s1, s2, d);
 213 #endif
 214 }
 215 
 216 inline void MacroAssembler::srl_ptr( Register s1, int imm6a,   Register d ) {
 217 #ifdef _LP64
 218   Assembler::srlx(s1, imm6a, d);
 219 #else
 220   Assembler::srl( s1, imm6a, d);
 221 #endif
 222 }
 223 
 224 inline void MacroAssembler::sll_ptr( Register s1, RegisterOrConstant s2, Register d ) {
 225   if (s2.is_register())  sll_ptr(s1, s2.as_register(), d);
 226   else                   sll_ptr(s1, s2.as_constant(), d);
 227 }
 228 
 229 // Use the right branch for the platform
 230 
 231 inline void MacroAssembler::br( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) {
 232   if (VM_Version::v9_instructions_work())
 233     Assembler::bp(c, a, icc, p, d, rt);
 234   else
 235     Assembler::br(c, a, d, rt);
 236 }
 237 
 238 inline void MacroAssembler::br( Condition c, bool a, Predict p, Label& L ) {
 239   br(c, a, p, target(L));
 240 }
 241 
 242 
 243 // Branch that tests either xcc or icc depending on the
 244 // architecture compiled (LP64 or not)
 245 inline void MacroAssembler::brx( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) {
 246 #ifdef _LP64
 247     Assembler::bp(c, a, xcc, p, d, rt);
 248 #else
 249     MacroAssembler::br(c, a, p, d, rt);
 250 #endif
 251 }
 252 
 253 inline void MacroAssembler::brx( Condition c, bool a, Predict p, Label& L ) {
 254   brx(c, a, p, target(L));
 255 }
 256 
 257 inline void MacroAssembler::ba( Label& L ) {
 258   br(always, false, pt, L);
 259 }
 260 
 261 // Warning: V9 only functions
 262 inline void MacroAssembler::bp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) {
 263   Assembler::bp(c, a, cc, p, d, rt);
 264 }
 265 
 266 inline void MacroAssembler::bp( Condition c, bool a, CC cc, Predict p, Label& L ) {
 267   Assembler::bp(c, a, cc, p, L);
 268 }
 269 
 270 inline void MacroAssembler::fb( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) {
 271   if (VM_Version::v9_instructions_work())
 272     fbp(c, a, fcc0, p, d, rt);
 273   else
 274     Assembler::fb(c, a, d, rt);
 275 }
 276 
 277 inline void MacroAssembler::fb( Condition c, bool a, Predict p, Label& L ) {
 278   fb(c, a, p, target(L));
 279 }
 280 
 281 inline void MacroAssembler::fbp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) {
 282   Assembler::fbp(c, a, cc, p, d, rt);
 283 }
 284 
 285 inline void MacroAssembler::fbp( Condition c, bool a, CC cc, Predict p, Label& L ) {
 286   Assembler::fbp(c, a, cc, p, L);
 287 }
 288 
 289 inline void MacroAssembler::jmp( Register s1, Register s2 ) { jmpl( s1, s2, G0 ); }
 290 inline void MacroAssembler::jmp( Register s1, int simm13a, RelocationHolder const& rspec ) { jmpl( s1, simm13a, G0, rspec); }
 291 
 292 inline bool MacroAssembler::is_far_target(address d) {
 293   if (ForceUnreachable) {
 294     // References outside the code cache should be treated as far


 317     jumpl_to(dest, O7, O7);
 318   } else {
 319     Assembler::call(d, rt);
 320   }
 321 #else
 322   Assembler::call( d, rt );
 323 #endif
 324 }
 325 
 326 inline void MacroAssembler::call( Label& L,   relocInfo::relocType rt ) {
 327   MacroAssembler::call( target(L), rt);
 328 }
 329 
 330 
 331 
 332 inline void MacroAssembler::callr( Register s1, Register s2 ) { jmpl( s1, s2, O7 ); }
 333 inline void MacroAssembler::callr( Register s1, int simm13a, RelocationHolder const& rspec ) { jmpl( s1, simm13a, O7, rspec); }
 334 
 335 // prefetch instruction
 336 inline void MacroAssembler::iprefetch( address d, relocInfo::relocType rt ) {
 337   if (VM_Version::v9_instructions_work())
 338     Assembler::bp( never, true, xcc, pt, d, rt );
 339 }
 340 inline void MacroAssembler::iprefetch( Label& L) { iprefetch( target(L) ); }
 341 
 342 
 343 // clobbers o7 on V8!!
 344 // returns delta from gotten pc to addr after
 345 inline int MacroAssembler::get_pc( Register d ) {
 346   int x = offset();
 347   if (VM_Version::v9_instructions_work())
 348     rdpc(d);
 349   else {
 350     Label lbl;
 351     Assembler::call(lbl, relocInfo::none);  // No relocation as this is call to pc+0x8
 352     if (d == O7)  delayed()->nop();
 353     else          delayed()->mov(O7, d);
 354     bind(lbl);
 355   }
 356   return offset() - x;
 357 }
 358 
 359 
 360 // Note:  All MacroAssembler::set_foo functions are defined out-of-line.
 361 
 362 
 363 // Loads the current PC of the following instruction as an immediate value in
 364 // 2 instructions.  All PCs in the CodeCache are within 2 Gig of each other.
 365 inline intptr_t MacroAssembler::load_pc_address( Register reg, int bytes_to_skip ) {
 366   intptr_t thepc = (intptr_t)pc() + 2*BytesPerInstWord + bytes_to_skip;
 367 #ifdef _LP64
 368   Unimplemented();
 369 #else
 370   Assembler::sethi(   thepc & ~0x3ff, reg, internal_word_Relocation::spec((address)thepc));
 371              add(reg, thepc &  0x3ff, reg, internal_word_Relocation::spec((address)thepc));
 372 #endif
 373   return thepc;
 374 }
 375 


 629 inline void MacroAssembler::ldsh(Register s1, RegisterOrConstant s2, Register d) { ldsh(Address(s1, s2), d); }
 630 inline void MacroAssembler::lduw(Register s1, RegisterOrConstant s2, Register d) { lduw(Address(s1, s2), d); }
 631 inline void MacroAssembler::ldsw(Register s1, RegisterOrConstant s2, Register d) { ldsw(Address(s1, s2), d); }
 632 inline void MacroAssembler::ldx( Register s1, RegisterOrConstant s2, Register d) { ldx( Address(s1, s2), d); }
 633 inline void MacroAssembler::ld(  Register s1, RegisterOrConstant s2, Register d) { ld(  Address(s1, s2), d); }
 634 inline void MacroAssembler::ldd( Register s1, RegisterOrConstant s2, Register d) { ldd( Address(s1, s2), d); }
 635 
 636 inline void MacroAssembler::ldf(FloatRegisterImpl::Width w, Register s1, RegisterOrConstant s2, FloatRegister d) {
 637   if (s2.is_register())  ldf(w, s1, s2.as_register(), d);
 638   else                   ldf(w, s1, s2.as_constant(), d);
 639 }
 640 
 641 inline void MacroAssembler::ldf(FloatRegisterImpl::Width w, const Address& a, FloatRegister d, int offset) {
 642   relocate(a.rspec(offset));
 643   ldf(w, a.base(), a.disp() + offset, d);
 644 }
 645 
 646 // returns if membar generates anything, obviously this code should mirror
 647 // membar below.
 648 inline bool MacroAssembler::membar_has_effect( Membar_mask_bits const7a ) {
 649   if( !os::is_MP() ) return false;  // Not needed on single CPU
 650   if( VM_Version::v9_instructions_work() ) {
 651     const Membar_mask_bits effective_mask =
 652         Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore));
 653     return (effective_mask != 0);
 654   } else {
 655     return true;
 656   }
 657 }
 658 
 659 inline void MacroAssembler::membar( Membar_mask_bits const7a ) {
 660   // Uniprocessors do not need memory barriers
 661   if (!os::is_MP()) return;

 662   // Weakened for current Sparcs and TSO.  See the v9 manual, sections 8.4.3,
 663   // 8.4.4.3, a.31 and a.50.
 664   if( VM_Version::v9_instructions_work() ) {
 665     // Under TSO, setting bit 3, 2, or 0 is redundant, so the only value
 666     // of the mmask subfield of const7a that does anything that isn't done
 667     // implicitly is StoreLoad.
 668     const Membar_mask_bits effective_mask =
 669         Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore));
 670     if ( effective_mask != 0 ) {
 671       Assembler::membar( effective_mask );
 672     }
 673   } else {
 674     // stbar is the closest there is on v8.  Equivalent to membar(StoreStore).  We
 675     // do not issue the stbar because to my knowledge all v8 machines implement TSO,
 676     // which guarantees that all stores behave as if an stbar were issued just after
 677     // each one of them.  On these machines, stbar ought to be a nop.  There doesn't
 678     // appear to be an equivalent of membar(StoreLoad) on v8: TSO doesn't require it,
 679     // it can't be specified by stbar, nor have I come up with a way to simulate it.
 680     //
 681     // Addendum.  Dave says that ldstub guarantees a write buffer flush to coherent
 682     // space.  Put one here to be on the safe side.
 683     Assembler::ldstub(SP, 0, G0);
 684   }
 685 }
 686 
 687 inline void MacroAssembler::prefetch(const Address& a, PrefetchFcn f, int offset) {
 688   relocate(a.rspec(offset));
 689   assert(!a.has_index(), "");
 690   prefetch(a.base(), a.disp() + offset, f);
 691 }
 692 
 693 inline void MacroAssembler::st(Register d, Register s1, Register s2)      { stw(d, s1, s2); }
 694 inline void MacroAssembler::st(Register d, Register s1, int simm13a)      { stw(d, s1, simm13a); }
 695 
 696 #ifdef ASSERT
 697 // ByteSize is only a class when ASSERT is defined, otherwise it's an int.
 698 inline void MacroAssembler::st(Register d, Register s1, ByteSize simm13a) { stw(d, s1, in_bytes(simm13a)); }
 699 #endif
 700 
 701 inline void MacroAssembler::st(Register d, const Address& a, int offset) {
 702   if (a.has_index()) { assert(offset == 0, ""); st( d, a.base(), a.index()        ); }
 703   else               {                          st( d, a.base(), a.disp() + offset); }




 212   Assembler::srl( s1, s2, d);
 213 #endif
 214 }
 215 
 216 inline void MacroAssembler::srl_ptr( Register s1, int imm6a,   Register d ) {
 217 #ifdef _LP64
 218   Assembler::srlx(s1, imm6a, d);
 219 #else
 220   Assembler::srl( s1, imm6a, d);
 221 #endif
 222 }
 223 
 224 inline void MacroAssembler::sll_ptr( Register s1, RegisterOrConstant s2, Register d ) {
 225   if (s2.is_register())  sll_ptr(s1, s2.as_register(), d);
 226   else                   sll_ptr(s1, s2.as_constant(), d);
 227 }
 228 
 229 // Use the right branch for the platform
 230 
 231 inline void MacroAssembler::br( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) {

 232   Assembler::bp(c, a, icc, p, d, rt);


 233 }
 234 
 235 inline void MacroAssembler::br( Condition c, bool a, Predict p, Label& L ) {
 236   br(c, a, p, target(L));
 237 }
 238 
 239 
 240 // Branch that tests either xcc or icc depending on the
 241 // architecture compiled (LP64 or not)
 242 inline void MacroAssembler::brx( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) {
 243 #ifdef _LP64
 244     Assembler::bp(c, a, xcc, p, d, rt);
 245 #else
 246     MacroAssembler::br(c, a, p, d, rt);
 247 #endif
 248 }
 249 
 250 inline void MacroAssembler::brx( Condition c, bool a, Predict p, Label& L ) {
 251   brx(c, a, p, target(L));
 252 }
 253 
 254 inline void MacroAssembler::ba( Label& L ) {
 255   br(always, false, pt, L);
 256 }
 257 
 258 // Warning: V9 only functions
 259 inline void MacroAssembler::bp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) {
 260   Assembler::bp(c, a, cc, p, d, rt);
 261 }
 262 
 263 inline void MacroAssembler::bp( Condition c, bool a, CC cc, Predict p, Label& L ) {
 264   Assembler::bp(c, a, cc, p, L);
 265 }
 266 
 267 inline void MacroAssembler::fb( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) {

 268   fbp(c, a, fcc0, p, d, rt);


 269 }
 270 
 271 inline void MacroAssembler::fb( Condition c, bool a, Predict p, Label& L ) {
 272   fb(c, a, p, target(L));
 273 }
 274 
 275 inline void MacroAssembler::fbp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) {
 276   Assembler::fbp(c, a, cc, p, d, rt);
 277 }
 278 
 279 inline void MacroAssembler::fbp( Condition c, bool a, CC cc, Predict p, Label& L ) {
 280   Assembler::fbp(c, a, cc, p, L);
 281 }
 282 
 283 inline void MacroAssembler::jmp( Register s1, Register s2 ) { jmpl( s1, s2, G0 ); }
 284 inline void MacroAssembler::jmp( Register s1, int simm13a, RelocationHolder const& rspec ) { jmpl( s1, simm13a, G0, rspec); }
 285 
 286 inline bool MacroAssembler::is_far_target(address d) {
 287   if (ForceUnreachable) {
 288     // References outside the code cache should be treated as far


 311     jumpl_to(dest, O7, O7);
 312   } else {
 313     Assembler::call(d, rt);
 314   }
 315 #else
 316   Assembler::call( d, rt );
 317 #endif
 318 }
 319 
 320 inline void MacroAssembler::call( Label& L,   relocInfo::relocType rt ) {
 321   MacroAssembler::call( target(L), rt);
 322 }
 323 
 324 
 325 
 326 inline void MacroAssembler::callr( Register s1, Register s2 ) { jmpl( s1, s2, O7 ); }
 327 inline void MacroAssembler::callr( Register s1, int simm13a, RelocationHolder const& rspec ) { jmpl( s1, simm13a, O7, rspec); }
 328 
 329 // prefetch instruction
 330 inline void MacroAssembler::iprefetch( address d, relocInfo::relocType rt ) {
 331   Assembler::bp( never, true, xcc, pt, d, rt );
 332     Assembler::bp( never, true, xcc, pt, d, rt );
 333 }
 334 inline void MacroAssembler::iprefetch( Label& L) { iprefetch( target(L) ); }
 335 
 336 
 337 // clobbers o7 on V8!!
 338 // returns delta from gotten pc to addr after
 339 inline int MacroAssembler::get_pc( Register d ) {
 340   int x = offset();

 341   rdpc(d);







 342   return offset() - x;
 343 }
 344 
 345 
 346 // Note:  All MacroAssembler::set_foo functions are defined out-of-line.
 347 
 348 
 349 // Loads the current PC of the following instruction as an immediate value in
 350 // 2 instructions.  All PCs in the CodeCache are within 2 Gig of each other.
 351 inline intptr_t MacroAssembler::load_pc_address( Register reg, int bytes_to_skip ) {
 352   intptr_t thepc = (intptr_t)pc() + 2*BytesPerInstWord + bytes_to_skip;
 353 #ifdef _LP64
 354   Unimplemented();
 355 #else
 356   Assembler::sethi(   thepc & ~0x3ff, reg, internal_word_Relocation::spec((address)thepc));
 357              add(reg, thepc &  0x3ff, reg, internal_word_Relocation::spec((address)thepc));
 358 #endif
 359   return thepc;
 360 }
 361 


 615 inline void MacroAssembler::ldsh(Register s1, RegisterOrConstant s2, Register d) { ldsh(Address(s1, s2), d); }
 616 inline void MacroAssembler::lduw(Register s1, RegisterOrConstant s2, Register d) { lduw(Address(s1, s2), d); }
 617 inline void MacroAssembler::ldsw(Register s1, RegisterOrConstant s2, Register d) { ldsw(Address(s1, s2), d); }
 618 inline void MacroAssembler::ldx( Register s1, RegisterOrConstant s2, Register d) { ldx( Address(s1, s2), d); }
 619 inline void MacroAssembler::ld(  Register s1, RegisterOrConstant s2, Register d) { ld(  Address(s1, s2), d); }
 620 inline void MacroAssembler::ldd( Register s1, RegisterOrConstant s2, Register d) { ldd( Address(s1, s2), d); }
 621 
 622 inline void MacroAssembler::ldf(FloatRegisterImpl::Width w, Register s1, RegisterOrConstant s2, FloatRegister d) {
 623   if (s2.is_register())  ldf(w, s1, s2.as_register(), d);
 624   else                   ldf(w, s1, s2.as_constant(), d);
 625 }
 626 
 627 inline void MacroAssembler::ldf(FloatRegisterImpl::Width w, const Address& a, FloatRegister d, int offset) {
 628   relocate(a.rspec(offset));
 629   ldf(w, a.base(), a.disp() + offset, d);
 630 }
 631 
 632 // returns if membar generates anything, obviously this code should mirror
 633 // membar below.
 634 inline bool MacroAssembler::membar_has_effect( Membar_mask_bits const7a ) {
 635   if (!os::is_MP())
 636     return false;  // Not needed on single CPU
 637   const Membar_mask_bits effective_mask =
 638       Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore));
 639   return (effective_mask != 0);



 640 }
 641 
 642 inline void MacroAssembler::membar( Membar_mask_bits const7a ) {
 643   // Uniprocessors do not need memory barriers
 644   if (!os::is_MP())
 645     return;
 646   // Weakened for current Sparcs and TSO.  See the v9 manual, sections 8.4.3,
 647   // 8.4.4.3, a.31 and a.50.

 648   // Under TSO, setting bit 3, 2, or 0 is redundant, so the only value
 649   // of the mmask subfield of const7a that does anything that isn't done
 650   // implicitly is StoreLoad.
 651   const Membar_mask_bits effective_mask =
 652       Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore));
 653   if (effective_mask != 0) {
 654     Assembler::membar(effective_mask);












 655   }
 656 }
 657 
 658 inline void MacroAssembler::prefetch(const Address& a, PrefetchFcn f, int offset) {
 659   relocate(a.rspec(offset));
 660   assert(!a.has_index(), "");
 661   prefetch(a.base(), a.disp() + offset, f);
 662 }
 663 
 664 inline void MacroAssembler::st(Register d, Register s1, Register s2)      { stw(d, s1, s2); }
 665 inline void MacroAssembler::st(Register d, Register s1, int simm13a)      { stw(d, s1, simm13a); }
 666 
 667 #ifdef ASSERT
 668 // ByteSize is only a class when ASSERT is defined, otherwise it's an int.
 669 inline void MacroAssembler::st(Register d, Register s1, ByteSize simm13a) { stw(d, s1, in_bytes(simm13a)); }
 670 #endif
 671 
 672 inline void MacroAssembler::st(Register d, const Address& a, int offset) {
 673   if (a.has_index()) { assert(offset == 0, ""); st( d, a.base(), a.index()        ); }
 674   else               {                          st( d, a.base(), a.disp() + offset); }