< prev index next >

src/cpu/x86/vm/x86.ad

Print this page




2114   }
2115   int offset_size = 0;
2116   int size = 5;
2117   if (UseAVX > 2 ) {
2118     if (VM_Version::supports_avx512novl() && (vec_len == 2)) {
2119       offset_size = (stack_offset == 0) ? 0 : ((is_single_byte) ? 1 : 4);
2120       size += 2; // Need an additional two bytes for EVEX encoding
2121     } else if (VM_Version::supports_avx512novl() && (vec_len < 2)) {
2122       offset_size = (stack_offset == 0) ? 0 : ((stack_offset <= 127) ? 1 : 4);
2123     } else {
2124       offset_size = (stack_offset == 0) ? 0 : ((is_single_byte) ? 1 : 4);
2125       size += 2; // Need an additional two bytes for EVEX encodding
2126     }
2127   } else {
2128     offset_size = (stack_offset == 0) ? 0 : ((stack_offset <= 127) ? 1 : 4);
2129   }
2130   // VEX_2bytes prefix is used if UseAVX > 0, so it takes the same 2 bytes as SIMD prefix.
2131   return size+offset_size;
2132 }
2133 
2134 static inline jfloat replicate4_imm(int con, int width) {
2135   // Load a constant of "width" (in bytes) and replicate it to fill 32bit.
2136   assert(width == 1 || width == 2, "only byte or short types here");
2137   int bit_width = width * 8;
2138   jint val = con;
2139   val &= (1 << bit_width) - 1;  // mask off sign bits
2140   while(bit_width < 32) {
2141     val |= (val << bit_width);
2142     bit_width <<= 1;
2143   }
2144   jfloat fval = *((jfloat*) &val);  // coerce to float type
2145   return fval;
2146 }
2147 
2148 static inline jdouble replicate8_imm(int con, int width) {
2149   // Load a constant of "width" (in bytes) and replicate it to fill 64bit.
2150   assert(width == 1 || width == 2 || width == 4, "only byte, short or int types here");
2151   int bit_width = width * 8;
2152   jlong val = con;
2153   val &= (((jlong) 1) << bit_width) - 1;  // mask off sign bits
2154   while(bit_width < 64) {
2155     val |= (val << bit_width);
2156     bit_width <<= 1;
2157   }
2158   jdouble dval = *((jdouble*) &val);  // coerce to double type
2159   return dval;
2160 }
2161 
2162 #ifndef PRODUCT
2163   void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
2164     st->print("nop \t# %d bytes pad for loops and calls", _count);
2165   }
2166 #endif
2167 
2168   void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const {
2169     MacroAssembler _masm(&cbuf);
2170     __ nop(_count);
2171   }
2172 
2173   uint MachNopNode::size(PhaseRegAlloc*) const {
2174     return _count;
2175   }
2176 
2177 #ifndef PRODUCT
2178   void MachBreakpointNode::format(PhaseRegAlloc*, outputStream* st) const {
2179     st->print("# breakpoint");




2114   }
2115   int offset_size = 0;
2116   int size = 5;
2117   if (UseAVX > 2 ) {
2118     if (VM_Version::supports_avx512novl() && (vec_len == 2)) {
2119       offset_size = (stack_offset == 0) ? 0 : ((is_single_byte) ? 1 : 4);
2120       size += 2; // Need an additional two bytes for EVEX encoding
2121     } else if (VM_Version::supports_avx512novl() && (vec_len < 2)) {
2122       offset_size = (stack_offset == 0) ? 0 : ((stack_offset <= 127) ? 1 : 4);
2123     } else {
2124       offset_size = (stack_offset == 0) ? 0 : ((is_single_byte) ? 1 : 4);
2125       size += 2; // Need an additional two bytes for EVEX encodding
2126     }
2127   } else {
2128     offset_size = (stack_offset == 0) ? 0 : ((stack_offset <= 127) ? 1 : 4);
2129   }
2130   // VEX_2bytes prefix is used if UseAVX > 0, so it takes the same 2 bytes as SIMD prefix.
2131   return size+offset_size;
2132 }
2133 
2134 static inline jint replicate4_imm(int con, int width) {
2135   // Load a constant of "width" (in bytes) and replicate it to fill 32bit.
2136   assert(width == 1 || width == 2, "only byte or short types here");
2137   int bit_width = width * 8;
2138   jint val = con;
2139   val &= (1 << bit_width) - 1;  // mask off sign bits
2140   while(bit_width < 32) {
2141     val |= (val << bit_width);
2142     bit_width <<= 1;
2143   }
2144   return val;

2145 }
2146 
2147 static inline jlong replicate8_imm(int con, int width) {
2148   // Load a constant of "width" (in bytes) and replicate it to fill 64bit.
2149   assert(width == 1 || width == 2 || width == 4, "only byte, short or int types here");
2150   int bit_width = width * 8;
2151   jlong val = con;
2152   val &= (((jlong) 1) << bit_width) - 1;  // mask off sign bits
2153   while(bit_width < 64) {
2154     val |= (val << bit_width);
2155     bit_width <<= 1;
2156   }
2157   return val;

2158 }
2159 
2160 #ifndef PRODUCT
2161   void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
2162     st->print("nop \t# %d bytes pad for loops and calls", _count);
2163   }
2164 #endif
2165 
2166   void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const {
2167     MacroAssembler _masm(&cbuf);
2168     __ nop(_count);
2169   }
2170 
2171   uint MachNopNode::size(PhaseRegAlloc*) const {
2172     return _count;
2173   }
2174 
2175 #ifndef PRODUCT
2176   void MachBreakpointNode::format(PhaseRegAlloc*, outputStream* st) const {
2177     st->print("# breakpoint");


< prev index next >