< prev index next >

src/hotspot/cpu/x86/x86.ad

Print this page




2907   ins_pipe( fpu_reg_reg );
2908 %}
2909 
2910 // Load vectors (16 bytes long)
2911 instruct loadV16(vecX dst, memory mem) %{
2912   predicate(n->as_LoadVector()->memory_size() == 16);
2913   match(Set dst (LoadVector mem));
2914   ins_cost(125);
2915   format %{ "movdqu  $dst,$mem\t! load vector (16 bytes)" %}
2916   ins_encode %{
2917     __ movdqu($dst$$XMMRegister, $mem$$Address);
2918   %}
2919   ins_pipe( pipe_slow );
2920 %}
2921 
2922 // Load vectors (16 bytes long)
2923 instruct MoveVecX2Leg(legVecX dst, vecX src) %{
2924   match(Set dst src);
2925   format %{ "movdqu $dst,$src\t! load vector (16 bytes)" %}
2926   ins_encode %{
2927     if (UseAVX < 2 || VM_Version::supports_avx512vl()) {
2928       __ movdqu($dst$$XMMRegister, $src$$XMMRegister);
2929     } else {
2930       int vector_len = 2;
2931       __ evmovdquq($dst$$XMMRegister, $src$$XMMRegister, vector_len);
2932     }
2933   %}
2934   ins_pipe( fpu_reg_reg );
2935 %}
2936 
2937 // Load vectors (16 bytes long)
2938 instruct MoveLeg2VecX(vecX dst, legVecX src) %{
2939   match(Set dst src);
2940   format %{ "movdqu $dst,$src\t! load vector (16 bytes)" %}
2941   ins_encode %{
2942     if (UseAVX < 2 || VM_Version::supports_avx512vl()) {
2943       __ movdqu($dst$$XMMRegister, $src$$XMMRegister);
2944     } else {
2945       int vector_len = 2;
2946       __ evmovdquq($dst$$XMMRegister, $src$$XMMRegister, vector_len);
2947     }
2948   %}
2949   ins_pipe( fpu_reg_reg );
2950 %}
2951 
2952 // Load vectors (32 bytes long)
2953 instruct loadV32(vecY dst, memory mem) %{
2954   predicate(n->as_LoadVector()->memory_size() == 32);
2955   match(Set dst (LoadVector mem));
2956   ins_cost(125);
2957   format %{ "vmovdqu $dst,$mem\t! load vector (32 bytes)" %}
2958   ins_encode %{
2959     __ vmovdqu($dst$$XMMRegister, $mem$$Address);
2960   %}
2961   ins_pipe( pipe_slow );
2962 %}
2963 
2964 // Load vectors (32 bytes long)
2965 instruct MoveVecY2Leg(legVecY dst, vecY src) %{
2966   match(Set dst src);
2967   format %{ "vmovdqu $dst,$src\t! load vector (32 bytes)" %}
2968   ins_encode %{
2969     if (UseAVX < 2 || VM_Version::supports_avx512vl()) {
2970       __ vmovdqu($dst$$XMMRegister, $src$$XMMRegister);
2971     } else {
2972       int vector_len = 2;
2973       __ evmovdquq($dst$$XMMRegister, $src$$XMMRegister, vector_len);
2974     }
2975   %}
2976   ins_pipe( fpu_reg_reg );
2977 %}
2978 
2979 // Load vectors (32 bytes long)
2980 instruct MoveLeg2VecY(vecY dst, legVecY src) %{
2981   match(Set dst src);
2982   format %{ "vmovdqu $dst,$src\t! load vector (32 bytes)" %}
2983   ins_encode %{
2984     if (UseAVX < 2 || VM_Version::supports_avx512vl()) {
2985       __ vmovdqu($dst$$XMMRegister, $src$$XMMRegister);
2986     } else {
2987       int vector_len = 2;
2988       __ evmovdquq($dst$$XMMRegister, $src$$XMMRegister, vector_len);
2989     }
2990   %}
2991   ins_pipe( fpu_reg_reg );
2992 %}
2993 
2994 // Load vectors (64 bytes long)
2995 instruct loadV64_dword(vecZ dst, memory mem) %{
2996   predicate(n->as_LoadVector()->memory_size() == 64 && n->as_LoadVector()->element_size() <= 4);
2997   match(Set dst (LoadVector mem));
2998   ins_cost(125);
2999   format %{ "vmovdqul $dst k0,$mem\t! load vector (64 bytes)" %}
3000   ins_encode %{
3001     int vector_len = 2;
3002     __ evmovdqul($dst$$XMMRegister, $mem$$Address, vector_len);
3003   %}
3004   ins_pipe( pipe_slow );




2907   ins_pipe( fpu_reg_reg );
2908 %}
2909 
2910 // Load vectors (16 bytes long)
2911 instruct loadV16(vecX dst, memory mem) %{
2912   predicate(n->as_LoadVector()->memory_size() == 16);
2913   match(Set dst (LoadVector mem));
2914   ins_cost(125);
2915   format %{ "movdqu  $dst,$mem\t! load vector (16 bytes)" %}
2916   ins_encode %{
2917     __ movdqu($dst$$XMMRegister, $mem$$Address);
2918   %}
2919   ins_pipe( pipe_slow );
2920 %}
2921 
2922 // Load vectors (16 bytes long)
2923 instruct MoveVecX2Leg(legVecX dst, vecX src) %{
2924   match(Set dst src);
2925   format %{ "movdqu $dst,$src\t! load vector (16 bytes)" %}
2926   ins_encode %{
2927     if (UseAVX <= 2 || VM_Version::supports_avx512vl()) {
2928       __ movdqu($dst$$XMMRegister, $src$$XMMRegister);
2929     } else {
2930       int vector_len = 2;
2931       __ evmovdquq($dst$$XMMRegister, $src$$XMMRegister, vector_len);
2932     }
2933   %}
2934   ins_pipe( fpu_reg_reg );
2935 %}
2936 
2937 // Load vectors (16 bytes long)
2938 instruct MoveLeg2VecX(vecX dst, legVecX src) %{
2939   match(Set dst src);
2940   format %{ "movdqu $dst,$src\t! load vector (16 bytes)" %}
2941   ins_encode %{
2942     if (UseAVX <= 2 || VM_Version::supports_avx512vl()) {
2943       __ movdqu($dst$$XMMRegister, $src$$XMMRegister);
2944     } else {
2945       int vector_len = 2;
2946       __ evmovdquq($dst$$XMMRegister, $src$$XMMRegister, vector_len);
2947     }
2948   %}
2949   ins_pipe( fpu_reg_reg );
2950 %}
2951 
2952 // Load vectors (32 bytes long)
2953 instruct loadV32(vecY dst, memory mem) %{
2954   predicate(n->as_LoadVector()->memory_size() == 32);
2955   match(Set dst (LoadVector mem));
2956   ins_cost(125);
2957   format %{ "vmovdqu $dst,$mem\t! load vector (32 bytes)" %}
2958   ins_encode %{
2959     __ vmovdqu($dst$$XMMRegister, $mem$$Address);
2960   %}
2961   ins_pipe( pipe_slow );
2962 %}
2963 
2964 // Load vectors (32 bytes long)
2965 instruct MoveVecY2Leg(legVecY dst, vecY src) %{
2966   match(Set dst src);
2967   format %{ "vmovdqu $dst,$src\t! load vector (32 bytes)" %}
2968   ins_encode %{
2969     if (UseAVX <= 2 || VM_Version::supports_avx512vl()) {
2970       __ vmovdqu($dst$$XMMRegister, $src$$XMMRegister);
2971     } else {
2972       int vector_len = 2;
2973       __ evmovdquq($dst$$XMMRegister, $src$$XMMRegister, vector_len);
2974     }
2975   %}
2976   ins_pipe( fpu_reg_reg );
2977 %}
2978 
2979 // Load vectors (32 bytes long)
2980 instruct MoveLeg2VecY(vecY dst, legVecY src) %{
2981   match(Set dst src);
2982   format %{ "vmovdqu $dst,$src\t! load vector (32 bytes)" %}
2983   ins_encode %{
2984     if (UseAVX <= 2 || VM_Version::supports_avx512vl()) {
2985       __ vmovdqu($dst$$XMMRegister, $src$$XMMRegister);
2986     } else {
2987       int vector_len = 2;
2988       __ evmovdquq($dst$$XMMRegister, $src$$XMMRegister, vector_len);
2989     }
2990   %}
2991   ins_pipe( fpu_reg_reg );
2992 %}
2993 
2994 // Load vectors (64 bytes long)
2995 instruct loadV64_dword(vecZ dst, memory mem) %{
2996   predicate(n->as_LoadVector()->memory_size() == 64 && n->as_LoadVector()->element_size() <= 4);
2997   match(Set dst (LoadVector mem));
2998   ins_cost(125);
2999   format %{ "vmovdqul $dst k0,$mem\t! load vector (64 bytes)" %}
3000   ins_encode %{
3001     int vector_len = 2;
3002     __ evmovdqul($dst$$XMMRegister, $mem$$Address, vector_len);
3003   %}
3004   ins_pipe( pipe_slow );


< prev index next >