src/cpu/sparc/vm/assembler_sparc.inline.hpp

Print this page
rev 3419 : 7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
Summary: use shorter instruction sequences for atomic add and atomic exchange when possible.
Reviewed-by:


 330 inline void Assembler::stc(    int crd, Register s1, Register s2) { v8_only();  emit_long( op(ldst_op) | fcn(crd) | op3(stc_op3 ) | rs1(s1) | rs2(s2) ); }
 331 inline void Assembler::stc(    int crd, Register s1, int simm13a) { v8_only();  emit_data( op(ldst_op) | fcn(crd) | op3(stc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
 332 inline void Assembler::stdc(   int crd, Register s1, Register s2) { v8_only();  emit_long( op(ldst_op) | fcn(crd) | op3(stdc_op3) | rs1(s1) | rs2(s2) ); }
 333 inline void Assembler::stdc(   int crd, Register s1, int simm13a) { v8_only();  emit_data( op(ldst_op) | fcn(crd) | op3(stdc_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
 334 inline void Assembler::stcsr(  int crd, Register s1, Register s2) { v8_only();  emit_long( op(ldst_op) | fcn(crd) | op3(stcsr_op3) | rs1(s1) | rs2(s2) ); }
 335 inline void Assembler::stcsr(  int crd, Register s1, int simm13a) { v8_only();  emit_data( op(ldst_op) | fcn(crd) | op3(stcsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
 336 inline void Assembler::stdcq(  int crd, Register s1, Register s2) { v8_only();  emit_long( op(ldst_op) | fcn(crd) | op3(stdcq_op3) | rs1(s1) | rs2(s2) ); }
 337 inline void Assembler::stdcq(  int crd, Register s1, int simm13a) { v8_only();  emit_data( op(ldst_op) | fcn(crd) | op3(stdcq_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
 338 
 339 inline void Assembler::sub(Register s1, RegisterOrConstant s2, Register d, int offset) {
 340   if (s2.is_register())  sub(s1, s2.as_register(),          d);
 341   else                 { sub(s1, s2.as_constant() + offset, d); offset = 0; }
 342   if (offset != 0)       sub(d,  offset,                    d);
 343 }
 344 
 345 // pp 231
 346 
 347 inline void Assembler::swap(    Register s1, Register s2, Register d) { v9_dep();  emit_long( op(ldst_op) | rd(d) | op3(swap_op3) | rs1(s1) | rs2(s2) ); }
 348 inline void Assembler::swap(    Register s1, int simm13a, Register d) { v9_dep();  emit_data( op(ldst_op) | rd(d) | op3(swap_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
 349 
 350 inline void Assembler::swap(    Address& a, Register d, int offset ) { relocate(a.rspec(offset)); swap(  a.base(), a.disp() + offset, d ); }




 351 
 352 
 353 // Use the right loads/stores for the platform
 354 inline void MacroAssembler::ld_ptr( Register s1, Register s2, Register d ) {
 355 #ifdef _LP64
 356   Assembler::ldx(s1, s2, d);
 357 #else
 358   Assembler::ld( s1, s2, d);
 359 #endif
 360 }
 361 
 362 inline void MacroAssembler::ld_ptr( Register s1, int simm13a, Register d ) {
 363 #ifdef _LP64
 364   Assembler::ldx(s1, simm13a, d);
 365 #else
 366   Assembler::ld( s1, simm13a, d);
 367 #endif
 368 }
 369 
 370 #ifdef ASSERT




 330 inline void Assembler::stc(    int crd, Register s1, Register s2) { v8_only();  emit_long( op(ldst_op) | fcn(crd) | op3(stc_op3 ) | rs1(s1) | rs2(s2) ); }
 331 inline void Assembler::stc(    int crd, Register s1, int simm13a) { v8_only();  emit_data( op(ldst_op) | fcn(crd) | op3(stc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
 332 inline void Assembler::stdc(   int crd, Register s1, Register s2) { v8_only();  emit_long( op(ldst_op) | fcn(crd) | op3(stdc_op3) | rs1(s1) | rs2(s2) ); }
 333 inline void Assembler::stdc(   int crd, Register s1, int simm13a) { v8_only();  emit_data( op(ldst_op) | fcn(crd) | op3(stdc_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
 334 inline void Assembler::stcsr(  int crd, Register s1, Register s2) { v8_only();  emit_long( op(ldst_op) | fcn(crd) | op3(stcsr_op3) | rs1(s1) | rs2(s2) ); }
 335 inline void Assembler::stcsr(  int crd, Register s1, int simm13a) { v8_only();  emit_data( op(ldst_op) | fcn(crd) | op3(stcsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
 336 inline void Assembler::stdcq(  int crd, Register s1, Register s2) { v8_only();  emit_long( op(ldst_op) | fcn(crd) | op3(stdcq_op3) | rs1(s1) | rs2(s2) ); }
 337 inline void Assembler::stdcq(  int crd, Register s1, int simm13a) { v8_only();  emit_data( op(ldst_op) | fcn(crd) | op3(stdcq_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
 338 
 339 inline void Assembler::sub(Register s1, RegisterOrConstant s2, Register d, int offset) {
 340   if (s2.is_register())  sub(s1, s2.as_register(),          d);
 341   else                 { sub(s1, s2.as_constant() + offset, d); offset = 0; }
 342   if (offset != 0)       sub(d,  offset,                    d);
 343 }
 344 
 345 // pp 231
 346 
 347 inline void Assembler::swap(    Register s1, Register s2, Register d) { v9_dep();  emit_long( op(ldst_op) | rd(d) | op3(swap_op3) | rs1(s1) | rs2(s2) ); }
 348 inline void Assembler::swap(    Register s1, int simm13a, Register d) { v9_dep();  emit_data( op(ldst_op) | rd(d) | op3(swap_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
 349 
 350 inline void Assembler::swap(    Address& a, Register d, int offset ) { 
 351   relocate(a.rspec(offset)); 
 352   if (a.has_index()) { assert(offset == 0, ""); swap( a.base(), a.index(), d         ); }
 353   else               {                          swap( a.base(), a.disp() + offset, d ); }
 354 }
 355 
 356 
 357 // Use the right loads/stores for the platform
 358 inline void MacroAssembler::ld_ptr( Register s1, Register s2, Register d ) {
 359 #ifdef _LP64
 360   Assembler::ldx(s1, s2, d);
 361 #else
 362   Assembler::ld( s1, s2, d);
 363 #endif
 364 }
 365 
 366 inline void MacroAssembler::ld_ptr( Register s1, int simm13a, Register d ) {
 367 #ifdef _LP64
 368   Assembler::ldx(s1, simm13a, d);
 369 #else
 370   Assembler::ld( s1, simm13a, d);
 371 #endif
 372 }
 373 
 374 #ifdef ASSERT