--- old/src/hotspot/cpu/x86/x86_64.ad 2018-03-14 08:46:03.557237010 +0100 +++ new/src/hotspot/cpu/x86/x86_64.ad 2018-03-14 08:46:03.399230241 +0100 @@ -7821,7 +7821,7 @@ } __ cmpxchgq($newval$$Register, $mem_ptr$$Address); %} - + ins_pipe( pipe_cmpxchg ); %} @@ -11736,7 +11736,7 @@ instruct testL_reg_reg(rFlagsReg cr, rRegP src, r12_RegL other, immL0 zero) %{ match(Set cr (CmpL (AndL (CastP2X src) other) zero)); - ins_cost(50); + ins_cost(50); format %{ "testq $src, $other\t# long" %} ins_encode %{ @@ -12521,173 +12521,193 @@ ins_pipe(pipe_jmp); %} -// Execute GC load barrier (strong) slow path +// +// Execute ZGC load barrier (strong) slow path +// // When running without XMM regs - instruct loadBarrierSlowRegNoVec(rRegP dst, memory mem, rFlagsReg cr) %{ - match(Set dst (LoadBarrierSlowReg mem)); + match(Set dst (LoadBarrierSlowReg mem)); predicate(MaxVectorSize < 16); effect(DEF dst, KILL cr); - format %{"LoadBarrierSlowRegNoVec $dst,$mem" %} + format %{"LoadBarrierSlowRegNoVec $dst, $mem" %} ins_encode %{ - Register d = $dst$$Register; - - assert( d != r12, "Can't be R12!"); - assert( d != r15, "Can't be R15!"); - assert( d != rsp, "Can't be RSP!"); - __ lea(d,$mem$$Address); - __ call(RuntimeAddress(StubRoutines::x86::load_barrier_slow_stub(d))); - %} - ins_pipe(pipe_slow); -%} + Register d = $dst$$Register; -// For XMM and YMM enabled processors + assert(d != r12, "Can't be R12!"); + assert(d != r15, "Can't be R15!"); + assert(d != rsp, "Can't be RSP!"); -instruct loadBarrierSlowRegXmm(rRegP dst, memory mem, rFlagsReg cr, - rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3, rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7, - rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11, rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15 - ) %{ + __ lea(d, $mem$$Address); + __ call(RuntimeAddress(StubRoutines::x86::load_barrier_slow_stub(d))); + %} + ins_pipe(pipe_slow); +%} + +// For XMM and YMM enabled processors +instruct loadBarrierSlowRegXmmAndYmm(rRegP dst, memory mem, rFlagsReg cr, + rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3, + rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7, + rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11, + rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15) %{ - match(Set dst (LoadBarrierSlowReg mem)); + match(Set dst (LoadBarrierSlowReg mem)); predicate((UseSSE > 0) && (UseAVX <= 2) && (MaxVectorSize >= 16)); - effect(DEF dst, KILL cr, - KILL x0, KILL x1, KILL x2, KILL x3, KILL x4, KILL x5, KILL x6, KILL x7, - KILL x8, KILL x9, KILL x10, KILL x11, KILL x12, KILL x13, KILL x14, KILL x15 - ); - - format %{"LoadBarrierSlowRegXmm $dst,$mem" %} - ins_encode %{ - Register d = $dst$$Register; - - assert( d != r12, "Can't be R12!"); - assert( d != r15, "Can't be R15!"); - assert( d != rsp, "Can't be RSP!"); - __ lea(d,$mem$$Address); - __ call(RuntimeAddress(StubRoutines::x86::load_barrier_slow_stub(d))); - %} - ins_pipe(pipe_slow); -%} - -// For ZMM enabled processors + effect(DEF dst, KILL cr, + KILL x0, KILL x1, KILL x2, KILL x3, + KILL x4, KILL x5, KILL x6, KILL x7, + KILL x8, KILL x9, KILL x10, KILL x11, + KILL x12, KILL x13, KILL x14, KILL x15); + + format %{"LoadBarrierSlowRegXmm $dst, $mem" %} + ins_encode %{ + Register d = $dst$$Register; + + assert(d != r12, "Can't be R12!"); + assert(d != r15, "Can't be R15!"); + assert(d != rsp, "Can't be RSP!"); + + __ lea(d, $mem$$Address); + __ call(RuntimeAddress(StubRoutines::x86::load_barrier_slow_stub(d))); + %} + ins_pipe(pipe_slow); +%} -instruct loadBarrierSlowRegZmm(rRegP dst, memory mem, rFlagsReg cr, - rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3, rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7, - rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11, rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15, - rxmm16 x16, rxmm17 x17, rxmm18 x18, rxmm19 x19, rxmm20 x20, rxmm21 x21, rxmm22 x22, rxmm23 x23, - rxmm24 x24, rxmm25 x25, rxmm26 x26, rxmm27 x27, rxmm28 x28, rxmm29 x29, rxmm30 x30, rxmm31 x31 - ) %{ +// For ZMM enabled processors +instruct loadBarrierSlowRegZmm(rRegP dst, memory mem, rFlagsReg cr, + rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3, + rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7, + rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11, + rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15, + rxmm16 x16, rxmm17 x17, rxmm18 x18, rxmm19 x19, + rxmm20 x20, rxmm21 x21, rxmm22 x22, rxmm23 x23, + rxmm24 x24, rxmm25 x25, rxmm26 x26, rxmm27 x27, + rxmm28 x28, rxmm29 x29, rxmm30 x30, rxmm31 x31) %{ - match(Set dst (LoadBarrierSlowReg mem)); + match(Set dst (LoadBarrierSlowReg mem)); predicate((UseAVX == 3) && (MaxVectorSize >= 16)); - effect(DEF dst, KILL cr, - KILL x0, KILL x1, KILL x2, KILL x3, KILL x4, KILL x5, KILL x6, KILL x7, - KILL x8, KILL x9, KILL x10, KILL x11, KILL x12, KILL x13, KILL x14, KILL x15, - KILL x16, KILL x17, KILL x18, KILL x19, KILL x20, KILL x21, KILL x22, KILL x23, - KILL x24, KILL x25, KILL x26, KILL x27, KILL x28, KILL x29, KILL x30, KILL x31 - ); - - format %{"LoadBarrierSlowRegZmm $dst,$mem" %} - ins_encode %{ - Register d = $dst$$Register; - - assert( d != r12, "Can't be R12!"); - assert( d != r15, "Can't be R15!"); - assert( d != rsp, "Can't be RSP!"); - __ lea(d,$mem$$Address); - __ call(RuntimeAddress(StubRoutines::x86::load_barrier_slow_stub(d))); - %} - ins_pipe(pipe_slow); -%} - -// Execute GC load barrier weak slow path + effect(DEF dst, KILL cr, + KILL x0, KILL x1, KILL x2, KILL x3, + KILL x4, KILL x5, KILL x6, KILL x7, + KILL x8, KILL x9, KILL x10, KILL x11, + KILL x12, KILL x13, KILL x14, KILL x15, + KILL x16, KILL x17, KILL x18, KILL x19, + KILL x20, KILL x21, KILL x22, KILL x23, + KILL x24, KILL x25, KILL x26, KILL x27, + KILL x28, KILL x29, KILL x30, KILL x31); + + format %{"LoadBarrierSlowRegZmm $dst, $mem" %} + ins_encode %{ + Register d = $dst$$Register; + + assert(d != r12, "Can't be R12!"); + assert(d != r15, "Can't be R15!"); + assert(d != rsp, "Can't be RSP!"); -// When running without XMM regs + __ lea(d, $mem$$Address); + __ call(RuntimeAddress(StubRoutines::x86::load_barrier_slow_stub(d))); + %} + ins_pipe(pipe_slow); +%} +// +// Execute ZGC load barrier (weak) slow path +// + +// When running without XMM regs instruct loadBarrierWeakSlowRegNoVec(rRegP dst, memory mem, rFlagsReg cr) %{ - match(Set dst (LoadBarrierSlowReg mem)); + match(Set dst (LoadBarrierSlowReg mem)); predicate(MaxVectorSize < 16); effect(DEF dst, KILL cr); - format %{"LoadBarrierSlowRegNoVec $dst,$mem" %} + format %{"LoadBarrierSlowRegNoVec $dst, $mem" %} ins_encode %{ - Register d = $dst$$Register; - - assert( d != r12, "Can't be R12!"); - assert( d != r15, "Can't be R15!"); - assert( d != rsp, "Can't be RSP!"); - __ lea(d,$mem$$Address); - __ call(RuntimeAddress(StubRoutines::x86::load_barrier_weak_slow_stub(d))); - %} - ins_pipe(pipe_slow); -%} + Register d = $dst$$Register; -// For XMM and YMM enabled processors + assert(d != r12, "Can't be R12!"); + assert(d != r15, "Can't be R15!"); + assert(d != rsp, "Can't be RSP!"); -instruct loadBarrierWeakSlowRegXmm(rRegP dst, memory mem, rFlagsReg cr, - rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3, rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7, - rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11, rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15 - ) %{ + __ lea(d, $mem$$Address); + __ call(RuntimeAddress(StubRoutines::x86::load_barrier_weak_slow_stub(d))); + %} + ins_pipe(pipe_slow); +%} + +// For XMM and YMM enabled processors +instruct loadBarrierWeakSlowRegXmmAndYmm(rRegP dst, memory mem, rFlagsReg cr, + rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3, + rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7, + rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11, + rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15) %{ - match(Set dst (LoadBarrierWeakSlowReg mem)); + match(Set dst (LoadBarrierWeakSlowReg mem)); predicate((UseSSE > 0) && (UseAVX <= 2) && (MaxVectorSize >= 16)); - effect(DEF dst, KILL cr, - KILL x0, KILL x1, KILL x2, KILL x3, KILL x4, KILL x5, KILL x6, KILL x7, - KILL x8, KILL x9, KILL x10, KILL x11, KILL x12, KILL x13, KILL x14, KILL x15 - ); - - format %{"LoadBarrierWeakSlowRegXmm $dst,$mem" %} - ins_encode %{ - Register d = $dst$$Register; - - assert( d != r12, "Can't be R12!"); - assert( d != r15, "Can't be R15!"); - assert( d != rsp, "Can't be RSP!"); - __ lea(d,$mem$$Address); - __ call(RuntimeAddress(StubRoutines::x86::load_barrier_weak_slow_stub(d))); - %} - ins_pipe(pipe_slow); -%} - -// For ZMM enabled processors + effect(DEF dst, KILL cr, + KILL x0, KILL x1, KILL x2, KILL x3, + KILL x4, KILL x5, KILL x6, KILL x7, + KILL x8, KILL x9, KILL x10, KILL x11, + KILL x12, KILL x13, KILL x14, KILL x15); + + format %{"LoadBarrierWeakSlowRegXmm $dst, $mem" %} + ins_encode %{ + Register d = $dst$$Register; + + assert(d != r12, "Can't be R12!"); + assert(d != r15, "Can't be R15!"); + assert(d != rsp, "Can't be RSP!"); + + __ lea(d,$mem$$Address); + __ call(RuntimeAddress(StubRoutines::x86::load_barrier_weak_slow_stub(d))); + %} + ins_pipe(pipe_slow); +%} -instruct loadBarrierWeakSlowRegZmm(rRegP dst, memory mem, rFlagsReg cr, - rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3, rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7, - rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11, rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15, - rxmm16 x16, rxmm17 x17, rxmm18 x18, rxmm19 x19, rxmm20 x20, rxmm21 x21, rxmm22 x22, rxmm23 x23, - rxmm24 x24, rxmm25 x25, rxmm26 x26, rxmm27 x27, rxmm28 x28, rxmm29 x29, rxmm30 x30, rxmm31 x31 - ) %{ +// For ZMM enabled processors +instruct loadBarrierWeakSlowRegZmm(rRegP dst, memory mem, rFlagsReg cr, + rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3, + rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7, + rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11, + rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15, + rxmm16 x16, rxmm17 x17, rxmm18 x18, rxmm19 x19, + rxmm20 x20, rxmm21 x21, rxmm22 x22, rxmm23 x23, + rxmm24 x24, rxmm25 x25, rxmm26 x26, rxmm27 x27, + rxmm28 x28, rxmm29 x29, rxmm30 x30, rxmm31 x31) %{ - match(Set dst (LoadBarrierWeakSlowReg mem)); + match(Set dst (LoadBarrierWeakSlowReg mem)); predicate((UseAVX == 3) && (MaxVectorSize >= 16)); - effect(DEF dst, KILL cr, - KILL x0, KILL x1, KILL x2, KILL x3, KILL x4, KILL x5, KILL x6, KILL x7, - KILL x8, KILL x9, KILL x10, KILL x11, KILL x12, KILL x13, KILL x14, KILL x15, - KILL x16, KILL x17, KILL x18, KILL x19, KILL x20, KILL x21, KILL x22, KILL x23, - KILL x24, KILL x25, KILL x26, KILL x27, KILL x28, KILL x29, KILL x30, KILL x31 - ); - - format %{"LoadBarrierWeakSlowRegZmm $dst,$mem" %} - ins_encode %{ - Register d = $dst$$Register; - - assert( d != r12, "Can't be R12!"); - assert( d != r15, "Can't be R15!"); - assert( d != rsp, "Can't be RSP!"); - __ lea(d,$mem$$Address); - __ call(RuntimeAddress(StubRoutines::x86::load_barrier_weak_slow_stub(d))); - %} - ins_pipe(pipe_slow); -%} + effect(DEF dst, KILL cr, + KILL x0, KILL x1, KILL x2, KILL x3, + KILL x4, KILL x5, KILL x6, KILL x7, + KILL x8, KILL x9, KILL x10, KILL x11, + KILL x12, KILL x13, KILL x14, KILL x15, + KILL x16, KILL x17, KILL x18, KILL x19, + KILL x20, KILL x21, KILL x22, KILL x23, + KILL x24, KILL x25, KILL x26, KILL x27, + KILL x28, KILL x29, KILL x30, KILL x31); + + format %{"LoadBarrierWeakSlowRegZmm $dst, $mem" %} + ins_encode %{ + Register d = $dst$$Register; + + assert(d != r12, "Can't be R12!"); + assert(d != r15, "Can't be R15!"); + assert(d != rsp, "Can't be RSP!"); + + __ lea(d,$mem$$Address); + __ call(RuntimeAddress(StubRoutines::x86::load_barrier_weak_slow_stub(d))); + %} + ins_pipe(pipe_slow); +%} // ============================================================================ // This name is KNOWN by the ADLC and cannot be changed.