src/cpu/x86/vm/assembler_x86.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File 7181494 Sdiff src/cpu/x86/vm

src/cpu/x86/vm/assembler_x86.cpp

Print this page




2556   emit_byte(0x60);
2557   emit_byte(0xC0 | encode);
2558 }
2559 
2560 void Assembler::punpckldq(XMMRegister dst, Address src) {
2561   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2562   assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
2563   InstructionMark im(this);
2564   simd_prefix(dst, dst, src, VEX_SIMD_66);
2565   emit_byte(0x62);
2566   emit_operand(dst, src);
2567 }
2568 
2569 void Assembler::punpckldq(XMMRegister dst, XMMRegister src) {
2570   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2571   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66);
2572   emit_byte(0x62);
2573   emit_byte(0xC0 | encode);
2574 }
2575 
















2576 void Assembler::push(int32_t imm32) {
2577   // in 64bits we push 64bits onto the stack but only
2578   // take a 32bit immediate
2579   emit_byte(0x68);
2580   emit_long(imm32);
2581 }
2582 
2583 void Assembler::push(Register src) {
2584   int encode = prefix_and_encode(src->encoding());
2585 
2586   emit_byte(0x50 | encode);
2587 }
2588 
2589 void Assembler::pushf() {
2590   emit_byte(0x9C);
2591 }
2592 
2593 #ifndef _LP64 // no 32bit push/pop on amd64
2594 void Assembler::pushl(Address src) {
2595   // Note this will push 64bit on 64bit


3161   int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, vector256);
3162   emit_byte(0x57);
3163   emit_byte(0xC0 | encode);
3164 }
3165 
3166 void Assembler::vxorps(XMMRegister dst, XMMRegister nds, Address src) {
3167   assert(VM_Version::supports_avx(), "");
3168   InstructionMark im(this);
3169   vex_prefix(dst, nds, src, VEX_SIMD_NONE); // 128-bit vector
3170   emit_byte(0x57);
3171   emit_operand(dst, src);
3172 }
3173 
3174 void Assembler::vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
3175   assert(VM_Version::supports_avx(), "");
3176   int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_NONE, vector256);
3177   emit_byte(0x57);
3178   emit_byte(0xC0 | encode);
3179 }
3180 







3181 void Assembler::vinsertf128h(XMMRegister dst, XMMRegister nds, XMMRegister src) {
3182   assert(VM_Version::supports_avx(), "");
3183   bool vector256 = true;
3184   int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, vector256, VEX_OPCODE_0F_3A);
3185   emit_byte(0x18);
3186   emit_byte(0xC0 | encode);
3187   // 0x00 - insert into lower 128 bits
3188   // 0x01 - insert into upper 128 bits
3189   emit_byte(0x01);
3190 }
3191 











3192 void Assembler::vzeroupper() {
3193   assert(VM_Version::supports_avx(), "");
3194   (void)vex_prefix_and_encode(xmm0, xmm0, xmm0, VEX_SIMD_NONE);
3195   emit_byte(0x77);
3196 }
3197 
3198 
3199 #ifndef _LP64
3200 // 32bit only pieces of the assembler
3201 
3202 void Assembler::cmp_literal32(Register src1, int32_t imm32, RelocationHolder const& rspec) {
3203   // NO PREFIX AS NEVER 64BIT
3204   InstructionMark im(this);
3205   emit_byte(0x81);
3206   emit_byte(0xF8 | src1->encoding());
3207   emit_data(imm32, rspec, 0);
3208 }
3209 
3210 void Assembler::cmp_literal32(Address src1, int32_t imm32, RelocationHolder const& rspec) {
3211   // NO PREFIX AS NEVER 64BIT (not even 32bit versions of 64bit regs


7463     // unsupported
7464     ShouldNotReachHere();
7465 }
7466 
7467 void MacroAssembler::movbool(Address dst, Register src) {
7468   if(sizeof(bool) == 1)
7469     movb(dst, src);
7470   else if(sizeof(bool) == 2)
7471     movw(dst, src);
7472   else if(sizeof(bool) == 4)
7473     movl(dst, src);
7474   else
7475     // unsupported
7476     ShouldNotReachHere();
7477 }
7478 
7479 void MacroAssembler::movbyte(ArrayAddress dst, int src) {
7480   movb(as_Address(dst), src);
7481 }
7482 


















7483 void MacroAssembler::movdbl(XMMRegister dst, AddressLiteral src) {
7484   if (reachable(src)) {
7485     if (UseXmmLoadAndClearUpper) {
7486       movsd (dst, as_Address(src));
7487     } else {
7488       movlpd(dst, as_Address(src));
7489     }
7490   } else {
7491     lea(rscratch1, src);
7492     if (UseXmmLoadAndClearUpper) {
7493       movsd (dst, Address(rscratch1, 0));
7494     } else {
7495       movlpd(dst, Address(rscratch1, 0));
7496     }
7497   }
7498 }
7499 
7500 void MacroAssembler::movflt(XMMRegister dst, AddressLiteral src) {
7501   if (reachable(src)) {
7502     movss(dst, as_Address(src));




2556   emit_byte(0x60);
2557   emit_byte(0xC0 | encode);
2558 }
2559 
2560 void Assembler::punpckldq(XMMRegister dst, Address src) {
2561   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2562   assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
2563   InstructionMark im(this);
2564   simd_prefix(dst, dst, src, VEX_SIMD_66);
2565   emit_byte(0x62);
2566   emit_operand(dst, src);
2567 }
2568 
2569 void Assembler::punpckldq(XMMRegister dst, XMMRegister src) {
2570   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2571   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66);
2572   emit_byte(0x62);
2573   emit_byte(0xC0 | encode);
2574 }
2575 
2576 void Assembler::punpcklqdq(XMMRegister dst, Address src) {
2577   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2578   assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
2579   InstructionMark im(this);
2580   simd_prefix(dst, dst, src, VEX_SIMD_66);
2581   emit_byte(0x6C);
2582   emit_operand(dst, src);
2583 }
2584 
2585 void Assembler::punpcklqdq(XMMRegister dst, XMMRegister src) {
2586   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2587   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66);
2588   emit_byte(0x6C);
2589   emit_byte(0xC0 | encode);
2590 }
2591 
2592 void Assembler::push(int32_t imm32) {
2593   // in 64bits we push 64bits onto the stack but only
2594   // take a 32bit immediate
2595   emit_byte(0x68);
2596   emit_long(imm32);
2597 }
2598 
2599 void Assembler::push(Register src) {
2600   int encode = prefix_and_encode(src->encoding());
2601 
2602   emit_byte(0x50 | encode);
2603 }
2604 
2605 void Assembler::pushf() {
2606   emit_byte(0x9C);
2607 }
2608 
2609 #ifndef _LP64 // no 32bit push/pop on amd64
2610 void Assembler::pushl(Address src) {
2611   // Note this will push 64bit on 64bit


3177   int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, vector256);
3178   emit_byte(0x57);
3179   emit_byte(0xC0 | encode);
3180 }
3181 
3182 void Assembler::vxorps(XMMRegister dst, XMMRegister nds, Address src) {
3183   assert(VM_Version::supports_avx(), "");
3184   InstructionMark im(this);
3185   vex_prefix(dst, nds, src, VEX_SIMD_NONE); // 128-bit vector
3186   emit_byte(0x57);
3187   emit_operand(dst, src);
3188 }
3189 
3190 void Assembler::vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
3191   assert(VM_Version::supports_avx(), "");
3192   int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_NONE, vector256);
3193   emit_byte(0x57);
3194   emit_byte(0xC0 | encode);
3195 }
3196 
3197 void Assembler::vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
3198   assert(VM_Version::supports_avx2() || (!vector256) && VM_Version::supports_avx(), "");
3199   int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, vector256);
3200   emit_byte(0xEF);
3201   emit_byte(0xC0 | encode);
3202 }
3203 
3204 void Assembler::vinsertf128h(XMMRegister dst, XMMRegister nds, XMMRegister src) {
3205   assert(VM_Version::supports_avx(), "");
3206   bool vector256 = true;
3207   int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, vector256, VEX_OPCODE_0F_3A);
3208   emit_byte(0x18);
3209   emit_byte(0xC0 | encode);
3210   // 0x00 - insert into lower 128 bits
3211   // 0x01 - insert into upper 128 bits
3212   emit_byte(0x01);
3213 }
3214 
3215 void Assembler::vinserti128h(XMMRegister dst, XMMRegister nds, XMMRegister src) {
3216   assert(VM_Version::supports_avx2(), "");
3217   bool vector256 = true;
3218   int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, vector256, VEX_OPCODE_0F_3A);
3219   emit_byte(0x38);
3220   emit_byte(0xC0 | encode);
3221   // 0x00 - insert into lower 128 bits
3222   // 0x01 - insert into upper 128 bits
3223   emit_byte(0x01);
3224 }
3225 
3226 void Assembler::vzeroupper() {
3227   assert(VM_Version::supports_avx(), "");
3228   (void)vex_prefix_and_encode(xmm0, xmm0, xmm0, VEX_SIMD_NONE);
3229   emit_byte(0x77);
3230 }
3231 
3232 
3233 #ifndef _LP64
3234 // 32bit only pieces of the assembler
3235 
3236 void Assembler::cmp_literal32(Register src1, int32_t imm32, RelocationHolder const& rspec) {
3237   // NO PREFIX AS NEVER 64BIT
3238   InstructionMark im(this);
3239   emit_byte(0x81);
3240   emit_byte(0xF8 | src1->encoding());
3241   emit_data(imm32, rspec, 0);
3242 }
3243 
3244 void Assembler::cmp_literal32(Address src1, int32_t imm32, RelocationHolder const& rspec) {
3245   // NO PREFIX AS NEVER 64BIT (not even 32bit versions of 64bit regs


7497     // unsupported
7498     ShouldNotReachHere();
7499 }
7500 
7501 void MacroAssembler::movbool(Address dst, Register src) {
7502   if(sizeof(bool) == 1)
7503     movb(dst, src);
7504   else if(sizeof(bool) == 2)
7505     movw(dst, src);
7506   else if(sizeof(bool) == 4)
7507     movl(dst, src);
7508   else
7509     // unsupported
7510     ShouldNotReachHere();
7511 }
7512 
7513 void MacroAssembler::movbyte(ArrayAddress dst, int src) {
7514   movb(as_Address(dst), src);
7515 }
7516 
7517 void MacroAssembler::movdl(XMMRegister dst, AddressLiteral src) {
7518   if (reachable(src)) {
7519     movdl(dst, as_Address(src));
7520   } else {
7521     lea(rscratch1, src);
7522     movdl(dst, Address(rscratch1, 0));
7523   }
7524 }
7525 
7526 void MacroAssembler::movq(XMMRegister dst, AddressLiteral src) {
7527   if (reachable(src)) {
7528     movq(dst, as_Address(src));
7529   } else {
7530     lea(rscratch1, src);
7531     movq(dst, Address(rscratch1, 0));
7532   }
7533 }
7534 
7535 void MacroAssembler::movdbl(XMMRegister dst, AddressLiteral src) {
7536   if (reachable(src)) {
7537     if (UseXmmLoadAndClearUpper) {
7538       movsd (dst, as_Address(src));
7539     } else {
7540       movlpd(dst, as_Address(src));
7541     }
7542   } else {
7543     lea(rscratch1, src);
7544     if (UseXmmLoadAndClearUpper) {
7545       movsd (dst, Address(rscratch1, 0));
7546     } else {
7547       movlpd(dst, Address(rscratch1, 0));
7548     }
7549   }
7550 }
7551 
7552 void MacroAssembler::movflt(XMMRegister dst, AddressLiteral src) {
7553   if (reachable(src)) {
7554     movss(dst, as_Address(src));


src/cpu/x86/vm/assembler_x86.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File