597 } 598 599 inline void MacroAssembler::ldf(FloatRegisterImpl::Width w, const Address& a, FloatRegister d, int offset) { 600 relocate(a.rspec(offset)); 601 if (a.has_index()) { 602 assert(offset == 0, ""); 603 ldf(w, a.base(), a.index(), d); 604 } else { 605 ldf(w, a.base(), a.disp() + offset, d); 606 } 607 } 608 609 inline void MacroAssembler::lduwl(Register s1, Register s2, Register d) { lduwa(s1, s2, ASI_PRIMARY_LITTLE, d); } 610 inline void MacroAssembler::ldswl(Register s1, Register s2, Register d) { ldswa(s1, s2, ASI_PRIMARY_LITTLE, d);} 611 inline void MacroAssembler::ldxl( Register s1, Register s2, Register d) { ldxa(s1, s2, ASI_PRIMARY_LITTLE, d); } 612 inline void MacroAssembler::ldfl(FloatRegisterImpl::Width w, Register s1, Register s2, FloatRegister d) { ldfa(w, s1, s2, ASI_PRIMARY_LITTLE, d); } 613 614 // returns if membar generates anything, obviously this code should mirror 615 // membar below. 616 inline bool MacroAssembler::membar_has_effect( Membar_mask_bits const7a ) { 617 if (!os::is_MP()) 618 return false; // Not needed on single CPU 619 const Membar_mask_bits effective_mask = 620 Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore)); 621 return (effective_mask != 0); 622 } 623 624 inline void MacroAssembler::membar( Membar_mask_bits const7a ) { 625 // Uniprocessors do not need memory barriers 626 if (!os::is_MP()) 627 return; 628 // Weakened for current Sparcs and TSO. See the v9 manual, sections 8.4.3, 629 // 8.4.4.3, a.31 and a.50. 630 // Under TSO, setting bit 3, 2, or 0 is redundant, so the only value 631 // of the mmask subfield of const7a that does anything that isn't done 632 // implicitly is StoreLoad. 633 const Membar_mask_bits effective_mask = 634 Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore)); 635 if (effective_mask != 0) { 636 Assembler::membar(effective_mask); 637 } 638 } 639 640 inline void MacroAssembler::mov(Register s, Register d) { 641 if (s != d) { 642 or3(G0, s, d); 643 } else { 644 assert_not_delayed(); // Put something useful in the delay slot! 645 } 646 } 647 | 597 } 598 599 inline void MacroAssembler::ldf(FloatRegisterImpl::Width w, const Address& a, FloatRegister d, int offset) { 600 relocate(a.rspec(offset)); 601 if (a.has_index()) { 602 assert(offset == 0, ""); 603 ldf(w, a.base(), a.index(), d); 604 } else { 605 ldf(w, a.base(), a.disp() + offset, d); 606 } 607 } 608 609 inline void MacroAssembler::lduwl(Register s1, Register s2, Register d) { lduwa(s1, s2, ASI_PRIMARY_LITTLE, d); } 610 inline void MacroAssembler::ldswl(Register s1, Register s2, Register d) { ldswa(s1, s2, ASI_PRIMARY_LITTLE, d);} 611 inline void MacroAssembler::ldxl( Register s1, Register s2, Register d) { ldxa(s1, s2, ASI_PRIMARY_LITTLE, d); } 612 inline void MacroAssembler::ldfl(FloatRegisterImpl::Width w, Register s1, Register s2, FloatRegister d) { ldfa(w, s1, s2, ASI_PRIMARY_LITTLE, d); } 613 614 // returns if membar generates anything, obviously this code should mirror 615 // membar below. 616 inline bool MacroAssembler::membar_has_effect( Membar_mask_bits const7a ) { 617 const Membar_mask_bits effective_mask = 618 Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore)); 619 return (effective_mask != 0); 620 } 621 622 inline void MacroAssembler::membar( Membar_mask_bits const7a ) { 623 // Weakened for current Sparcs and TSO. See the v9 manual, sections 8.4.3, 624 // 8.4.4.3, a.31 and a.50. 625 // Under TSO, setting bit 3, 2, or 0 is redundant, so the only value 626 // of the mmask subfield of const7a that does anything that isn't done 627 // implicitly is StoreLoad. 628 const Membar_mask_bits effective_mask = 629 Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore)); 630 if (effective_mask != 0) { 631 Assembler::membar(effective_mask); 632 } 633 } 634 635 inline void MacroAssembler::mov(Register s, Register d) { 636 if (s != d) { 637 or3(G0, s, d); 638 } else { 639 assert_not_delayed(); // Put something useful in the delay slot! 640 } 641 } 642 |