< prev index next >

src/cpu/ppc/vm/assembler_ppc.inline.hpp

Print this page




 847 // ra0 version
 848 inline void Assembler::stfs( FloatRegister s, int si16)   { emit_int32( STFS_OPCODE  | frs(s) | simm(si16, 16)); }
 849 inline void Assembler::stfsx(FloatRegister s, Register b) { emit_int32( STFSX_OPCODE | frs(s) | rb(b)); }
 850 inline void Assembler::stfd( FloatRegister s, int si16)   { emit_int32( STFD_OPCODE  | frs(s) | simm(si16, 16)); }
 851 inline void Assembler::stfdx(FloatRegister s, Register b) { emit_int32( STFDX_OPCODE | frs(s) | rb(b)); }
 852 
 853 // ra0 version
 854 inline void Assembler::lvebx( VectorRegister d, Register s2) { emit_int32( LVEBX_OPCODE  | vrt(d) | rb(s2)); }
 855 inline void Assembler::lvehx( VectorRegister d, Register s2) { emit_int32( LVEHX_OPCODE  | vrt(d) | rb(s2)); }
 856 inline void Assembler::lvewx( VectorRegister d, Register s2) { emit_int32( LVEWX_OPCODE  | vrt(d) | rb(s2)); }
 857 inline void Assembler::lvx(   VectorRegister d, Register s2) { emit_int32( LVX_OPCODE    | vrt(d) | rb(s2)); }
 858 inline void Assembler::lvxl(  VectorRegister d, Register s2) { emit_int32( LVXL_OPCODE   | vrt(d) | rb(s2)); }
 859 inline void Assembler::stvebx(VectorRegister d, Register s2) { emit_int32( STVEBX_OPCODE | vrt(d) | rb(s2)); }
 860 inline void Assembler::stvehx(VectorRegister d, Register s2) { emit_int32( STVEHX_OPCODE | vrt(d) | rb(s2)); }
 861 inline void Assembler::stvewx(VectorRegister d, Register s2) { emit_int32( STVEWX_OPCODE | vrt(d) | rb(s2)); }
 862 inline void Assembler::stvx(  VectorRegister d, Register s2) { emit_int32( STVX_OPCODE   | vrt(d) | rb(s2)); }
 863 inline void Assembler::stvxl( VectorRegister d, Register s2) { emit_int32( STVXL_OPCODE  | vrt(d) | rb(s2)); }
 864 inline void Assembler::lvsl(  VectorRegister d, Register s2) { emit_int32( LVSL_OPCODE   | vrt(d) | rb(s2)); }
 865 inline void Assembler::lvsr(  VectorRegister d, Register s2) { emit_int32( LVSR_OPCODE   | vrt(d) | rb(s2)); }
 866 
























 867 inline void Assembler::load_const(Register d, void* x, Register tmp) {
 868    load_const(d, (long)x, tmp);
 869 }
 870 
 871 // Load a 64 bit constant encoded by a `Label'. This works for bound
 872 // labels as well as unbound ones. For unbound labels, the code will
 873 // be patched as soon as the label gets bound.
 874 inline void Assembler::load_const(Register d, Label& L, Register tmp) {
 875   load_const(d, target(L), tmp);
 876 }
 877 
 878 // Load a 64 bit constant encoded by an AddressLiteral. patchable.
 879 inline void Assembler::load_const(Register d, AddressLiteral& a, Register tmp) {
 880   assert(d != R0, "R0 not allowed");
 881   // First relocate (we don't change the offset in the RelocationHolder,
 882   // just pass a.rspec()), then delegate to load_const(Register, long).
 883   relocate(a.rspec());
 884   load_const(d, (long)a.value(), tmp);
 885 }
 886 


 847 // ra0 version
 848 inline void Assembler::stfs( FloatRegister s, int si16)   { emit_int32( STFS_OPCODE  | frs(s) | simm(si16, 16)); }
 849 inline void Assembler::stfsx(FloatRegister s, Register b) { emit_int32( STFSX_OPCODE | frs(s) | rb(b)); }
 850 inline void Assembler::stfd( FloatRegister s, int si16)   { emit_int32( STFD_OPCODE  | frs(s) | simm(si16, 16)); }
 851 inline void Assembler::stfdx(FloatRegister s, Register b) { emit_int32( STFDX_OPCODE | frs(s) | rb(b)); }
 852 
 853 // ra0 version
 854 inline void Assembler::lvebx( VectorRegister d, Register s2) { emit_int32( LVEBX_OPCODE  | vrt(d) | rb(s2)); }
 855 inline void Assembler::lvehx( VectorRegister d, Register s2) { emit_int32( LVEHX_OPCODE  | vrt(d) | rb(s2)); }
 856 inline void Assembler::lvewx( VectorRegister d, Register s2) { emit_int32( LVEWX_OPCODE  | vrt(d) | rb(s2)); }
 857 inline void Assembler::lvx(   VectorRegister d, Register s2) { emit_int32( LVX_OPCODE    | vrt(d) | rb(s2)); }
 858 inline void Assembler::lvxl(  VectorRegister d, Register s2) { emit_int32( LVXL_OPCODE   | vrt(d) | rb(s2)); }
 859 inline void Assembler::stvebx(VectorRegister d, Register s2) { emit_int32( STVEBX_OPCODE | vrt(d) | rb(s2)); }
 860 inline void Assembler::stvehx(VectorRegister d, Register s2) { emit_int32( STVEHX_OPCODE | vrt(d) | rb(s2)); }
 861 inline void Assembler::stvewx(VectorRegister d, Register s2) { emit_int32( STVEWX_OPCODE | vrt(d) | rb(s2)); }
 862 inline void Assembler::stvx(  VectorRegister d, Register s2) { emit_int32( STVX_OPCODE   | vrt(d) | rb(s2)); }
 863 inline void Assembler::stvxl( VectorRegister d, Register s2) { emit_int32( STVXL_OPCODE  | vrt(d) | rb(s2)); }
 864 inline void Assembler::lvsl(  VectorRegister d, Register s2) { emit_int32( LVSL_OPCODE   | vrt(d) | rb(s2)); }
 865 inline void Assembler::lvsr(  VectorRegister d, Register s2) { emit_int32( LVSR_OPCODE   | vrt(d) | rb(s2)); }
 866 
 867 inline void Assembler::load_perm(VectorRegister perm, Register addr) {
 868 #if defined(VM_LITTLE_ENDIAN)
 869   lvsr(perm, addr);
 870 #else
 871   lvsl(perm, addr);
 872 #endif
 873 }
 874 
 875 inline void Assembler::vec_perm(VectorRegister first_dest, VectorRegister second, VectorRegister perm) {
 876 #if defined(VM_LITTLE_ENDIAN)
 877   vperm(first_dest, second, first_dest, perm);
 878 #else
 879   vperm(first_dest, first_dest, second, perm);
 880 #endif
 881 }
 882 
 883 inline void Assembler::vec_perm(VectorRegister dest, VectorRegister first, VectorRegister second, VectorRegister perm) {
 884 #if defined(VM_LITTLE_ENDIAN)
 885   vperm(dest, second, first, perm);
 886 #else
 887   vperm(dest, first, second, perm);
 888 #endif
 889 }
 890 
 891 inline void Assembler::load_const(Register d, void* x, Register tmp) {
 892    load_const(d, (long)x, tmp);
 893 }
 894 
 895 // Load a 64 bit constant encoded by a `Label'. This works for bound
 896 // labels as well as unbound ones. For unbound labels, the code will
 897 // be patched as soon as the label gets bound.
 898 inline void Assembler::load_const(Register d, Label& L, Register tmp) {
 899   load_const(d, target(L), tmp);
 900 }
 901 
 902 // Load a 64 bit constant encoded by an AddressLiteral. patchable.
 903 inline void Assembler::load_const(Register d, AddressLiteral& a, Register tmp) {
 904   assert(d != R0, "R0 not allowed");
 905   // First relocate (we don't change the offset in the RelocationHolder,
 906   // just pass a.rspec()), then delegate to load_const(Register, long).
 907   relocate(a.rspec());
 908   load_const(d, (long)a.value(), tmp);
 909 }
 910 
< prev index next >