852 // Emit the CompiledIC call idiom
853 void ic_call(address entry);
854
855 // Jumps
856
857 // NOTE: these jumps tranfer to the effective address of dst NOT
858 // the address contained by dst. This is because this is more natural
859 // for jumps/calls.
860 void jump(AddressLiteral dst);
861 void jump_cc(Condition cc, AddressLiteral dst);
862
863 // 32bit can do a case table jump in one instruction but we no longer allow the base
864 // to be installed in the Address class. This jump will tranfers to the address
865 // contained in the location described by entry (not the address of entry)
866 void jump(ArrayAddress entry);
867
868 // Floating
869
870 void andpd(XMMRegister dst, Address src) { Assembler::andpd(dst, src); }
871 void andpd(XMMRegister dst, AddressLiteral src);
872
873 void andps(XMMRegister dst, XMMRegister src) { Assembler::andps(dst, src); }
874 void andps(XMMRegister dst, Address src) { Assembler::andps(dst, src); }
875 void andps(XMMRegister dst, AddressLiteral src);
876
877 void comiss(XMMRegister dst, XMMRegister src) { Assembler::comiss(dst, src); }
878 void comiss(XMMRegister dst, Address src) { Assembler::comiss(dst, src); }
879 void comiss(XMMRegister dst, AddressLiteral src);
880
881 void comisd(XMMRegister dst, XMMRegister src) { Assembler::comisd(dst, src); }
882 void comisd(XMMRegister dst, Address src) { Assembler::comisd(dst, src); }
883 void comisd(XMMRegister dst, AddressLiteral src);
884
885 void fadd_s(Address src) { Assembler::fadd_s(src); }
886 void fadd_s(AddressLiteral src) { Assembler::fadd_s(as_Address(src)); }
887
888 void fldcw(Address src) { Assembler::fldcw(src); }
889 void fldcw(AddressLiteral src);
890
891 void fld_s(int index) { Assembler::fld_s(index); }
899 void fld_x(AddressLiteral src);
900
901 void fmul_s(Address src) { Assembler::fmul_s(src); }
902 void fmul_s(AddressLiteral src) { Assembler::fmul_s(as_Address(src)); }
903
904 void ldmxcsr(Address src) { Assembler::ldmxcsr(src); }
905 void ldmxcsr(AddressLiteral src);
906
907 // compute pow(x,y) and exp(x) with x86 instructions. Don't cover
908 // all corner cases and may result in NaN and require fallback to a
909 // runtime call.
910 void fast_pow();
911 void fast_exp(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
912 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
913 Register rax, Register rcx, Register rdx, Register tmp);
914
915 void fast_log(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
916 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
917 Register rax, Register rcx, Register rdx, Register tmp1 LP64_ONLY(COMMA Register tmp2));
918
919 void increase_precision();
920 void restore_precision();
921
922 // computes pow(x,y). Fallback to runtime call included.
923 void pow_with_fallback(int num_fpu_regs_in_use) { pow_or_exp(num_fpu_regs_in_use); }
924
925 private:
926
927 // call runtime as a fallback for trig functions and pow/exp.
928 void fp_runtime_fallback(address runtime_entry, int nb_args, int num_fpu_regs_in_use);
929
930 // computes 2^(Ylog2X); Ylog2X in ST(0)
931 void pow_exp_core_encoding();
932
933 // computes pow(x,y) or exp(x). Fallback to runtime call included.
934 void pow_or_exp(int num_fpu_regs_in_use);
935
936 // these are private because users should be doing movflt/movdbl
937
938 void movss(Address dst, XMMRegister src) { Assembler::movss(dst, src); }
939 void movss(XMMRegister dst, XMMRegister src) { Assembler::movss(dst, src); }
940 void movss(XMMRegister dst, Address src) { Assembler::movss(dst, src); }
941 void movss(XMMRegister dst, AddressLiteral src);
942
943 void movlpd(XMMRegister dst, Address src) {Assembler::movlpd(dst, src); }
944 void movlpd(XMMRegister dst, AddressLiteral src);
945
946 public:
947
948 void addsd(XMMRegister dst, XMMRegister src) { Assembler::addsd(dst, src); }
949 void addsd(XMMRegister dst, Address src) { Assembler::addsd(dst, src); }
950 void addsd(XMMRegister dst, AddressLiteral src);
951
952 void addss(XMMRegister dst, XMMRegister src) { Assembler::addss(dst, src); }
953 void addss(XMMRegister dst, Address src) { Assembler::addss(dst, src); }
954 void addss(XMMRegister dst, AddressLiteral src);
955
956 void divsd(XMMRegister dst, XMMRegister src) { Assembler::divsd(dst, src); }
957 void divsd(XMMRegister dst, Address src) { Assembler::divsd(dst, src); }
958 void divsd(XMMRegister dst, AddressLiteral src);
959
960 void divss(XMMRegister dst, XMMRegister src) { Assembler::divss(dst, src); }
961 void divss(XMMRegister dst, Address src) { Assembler::divss(dst, src); }
962 void divss(XMMRegister dst, AddressLiteral src);
963
964 // Move Unaligned Double Quadword
965 void movdqu(Address dst, XMMRegister src);
966 void movdqu(XMMRegister dst, Address src);
967 void movdqu(XMMRegister dst, XMMRegister src);
968 void movdqu(XMMRegister dst, AddressLiteral src);
969 // AVX Unaligned forms
970 void vmovdqu(Address dst, XMMRegister src);
971 void vmovdqu(XMMRegister dst, Address src);
972 void vmovdqu(XMMRegister dst, XMMRegister src);
973 void vmovdqu(XMMRegister dst, AddressLiteral src);
974
|
852 // Emit the CompiledIC call idiom
853 void ic_call(address entry);
854
855 // Jumps
856
857 // NOTE: these jumps tranfer to the effective address of dst NOT
858 // the address contained by dst. This is because this is more natural
859 // for jumps/calls.
860 void jump(AddressLiteral dst);
861 void jump_cc(Condition cc, AddressLiteral dst);
862
863 // 32bit can do a case table jump in one instruction but we no longer allow the base
864 // to be installed in the Address class. This jump will tranfers to the address
865 // contained in the location described by entry (not the address of entry)
866 void jump(ArrayAddress entry);
867
868 // Floating
869
870 void andpd(XMMRegister dst, Address src) { Assembler::andpd(dst, src); }
871 void andpd(XMMRegister dst, AddressLiteral src);
872 void andpd(XMMRegister dst, XMMRegister src) { Assembler::andpd(dst, src); }
873
874 void andps(XMMRegister dst, XMMRegister src) { Assembler::andps(dst, src); }
875 void andps(XMMRegister dst, Address src) { Assembler::andps(dst, src); }
876 void andps(XMMRegister dst, AddressLiteral src);
877
878 void comiss(XMMRegister dst, XMMRegister src) { Assembler::comiss(dst, src); }
879 void comiss(XMMRegister dst, Address src) { Assembler::comiss(dst, src); }
880 void comiss(XMMRegister dst, AddressLiteral src);
881
882 void comisd(XMMRegister dst, XMMRegister src) { Assembler::comisd(dst, src); }
883 void comisd(XMMRegister dst, Address src) { Assembler::comisd(dst, src); }
884 void comisd(XMMRegister dst, AddressLiteral src);
885
886 void fadd_s(Address src) { Assembler::fadd_s(src); }
887 void fadd_s(AddressLiteral src) { Assembler::fadd_s(as_Address(src)); }
888
889 void fldcw(Address src) { Assembler::fldcw(src); }
890 void fldcw(AddressLiteral src);
891
892 void fld_s(int index) { Assembler::fld_s(index); }
900 void fld_x(AddressLiteral src);
901
902 void fmul_s(Address src) { Assembler::fmul_s(src); }
903 void fmul_s(AddressLiteral src) { Assembler::fmul_s(as_Address(src)); }
904
905 void ldmxcsr(Address src) { Assembler::ldmxcsr(src); }
906 void ldmxcsr(AddressLiteral src);
907
908 // compute pow(x,y) and exp(x) with x86 instructions. Don't cover
909 // all corner cases and may result in NaN and require fallback to a
910 // runtime call.
911 void fast_pow();
912 void fast_exp(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
913 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
914 Register rax, Register rcx, Register rdx, Register tmp);
915
916 void fast_log(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
917 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
918 Register rax, Register rcx, Register rdx, Register tmp1 LP64_ONLY(COMMA Register tmp2));
919
920 void fast_sin(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
921 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
922 Register rax, Register rbx LP64_ONLY(COMMA Register rcx), Register rdx
923 LP64_ONLY(COMMA Register tmp1) LP64_ONLY(COMMA Register tmp2)
924 LP64_ONLY(COMMA Register tmp3) LP64_ONLY(COMMA Register tmp4));
925
926 void fast_cos(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3,
927 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7,
928 Register rax, Register rcx, Register rdx NOT_LP64(COMMA Register tmp)
929 LP64_ONLY(COMMA Register r8) LP64_ONLY(COMMA Register r9)
930 LP64_ONLY(COMMA Register r10) LP64_ONLY(COMMA Register r11));
931
932 #ifndef _LP64
933 void libm_sincos_huge(XMMRegister xmm0, XMMRegister xmm1, Register eax, Register ecx,
934 Register edx, Register ebx, Register esi, Register edi,
935 Register ebp, Register esp);
936 void libm_reduce_pi04l(Register eax, Register ecx, Register edx, Register ebx,
937 Register esi, Register edi, Register ebp, Register esp);
938 #endif
939
940 void increase_precision();
941 void restore_precision();
942
943 // computes pow(x,y). Fallback to runtime call included.
944 void pow_with_fallback(int num_fpu_regs_in_use) { pow_or_exp(num_fpu_regs_in_use); }
945
946 private:
947
948 // call runtime as a fallback for trig functions and pow/exp.
949 void fp_runtime_fallback(address runtime_entry, int nb_args, int num_fpu_regs_in_use);
950
951 // computes 2^(Ylog2X); Ylog2X in ST(0)
952 void pow_exp_core_encoding();
953
954 // computes pow(x,y) or exp(x). Fallback to runtime call included.
955 void pow_or_exp(int num_fpu_regs_in_use);
956
957 // these are private because users should be doing movflt/movdbl
958
959 void movss(Address dst, XMMRegister src) { Assembler::movss(dst, src); }
960 void movss(XMMRegister dst, XMMRegister src) { Assembler::movss(dst, src); }
961 void movss(XMMRegister dst, Address src) { Assembler::movss(dst, src); }
962 void movss(XMMRegister dst, AddressLiteral src);
963
964 void movlpd(XMMRegister dst, Address src) {Assembler::movlpd(dst, src); }
965 void movlpd(XMMRegister dst, AddressLiteral src);
966
967 public:
968
969 void addsd(XMMRegister dst, XMMRegister src) { Assembler::addsd(dst, src); }
970 void addsd(XMMRegister dst, Address src) { Assembler::addsd(dst, src); }
971 void addsd(XMMRegister dst, AddressLiteral src);
972
973 void addss(XMMRegister dst, XMMRegister src) { Assembler::addss(dst, src); }
974 void addss(XMMRegister dst, Address src) { Assembler::addss(dst, src); }
975 void addss(XMMRegister dst, AddressLiteral src);
976
977 void addpd(XMMRegister dst, XMMRegister src) { Assembler::addpd(dst, src); }
978 void addpd(XMMRegister dst, Address src) { Assembler::addpd(dst, src); }
979 void addpd(XMMRegister dst, AddressLiteral src);
980
981 void divsd(XMMRegister dst, XMMRegister src) { Assembler::divsd(dst, src); }
982 void divsd(XMMRegister dst, Address src) { Assembler::divsd(dst, src); }
983 void divsd(XMMRegister dst, AddressLiteral src);
984
985 void divss(XMMRegister dst, XMMRegister src) { Assembler::divss(dst, src); }
986 void divss(XMMRegister dst, Address src) { Assembler::divss(dst, src); }
987 void divss(XMMRegister dst, AddressLiteral src);
988
989 // Move Unaligned Double Quadword
990 void movdqu(Address dst, XMMRegister src);
991 void movdqu(XMMRegister dst, Address src);
992 void movdqu(XMMRegister dst, XMMRegister src);
993 void movdqu(XMMRegister dst, AddressLiteral src);
994 // AVX Unaligned forms
995 void vmovdqu(Address dst, XMMRegister src);
996 void vmovdqu(XMMRegister dst, Address src);
997 void vmovdqu(XMMRegister dst, XMMRegister src);
998 void vmovdqu(XMMRegister dst, AddressLiteral src);
999
|