< prev index next >
src/hotspot/cpu/x86/x86_64.ad
Print this page
*** 12519,12690 ****
format %{ "jmp rethrow_stub" %}
ins_encode(enc_rethrow);
ins_pipe(pipe_jmp);
%}
! // Execute GC load barrier (strong) slow path
// When running without XMM regs
-
instruct loadBarrierSlowRegNoVec(rRegP dst, memory mem, rFlagsReg cr) %{
match(Set dst (LoadBarrierSlowReg mem));
predicate(MaxVectorSize < 16);
effect(DEF dst, KILL cr);
! format %{"LoadBarrierSlowRegNoVec $dst,$mem" %}
ins_encode %{
Register d = $dst$$Register;
! assert( d != r12, "Can't be R12!");
! assert( d != r15, "Can't be R15!");
! assert( d != rsp, "Can't be RSP!");
! __ lea(d,$mem$$Address);
__ call(RuntimeAddress(StubRoutines::x86::load_barrier_slow_stub(d)));
%}
ins_pipe(pipe_slow);
%}
// For XMM and YMM enabled processors
!
! instruct loadBarrierSlowRegXmm(rRegP dst, memory mem, rFlagsReg cr,
! rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3, rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
! rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11, rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15
! ) %{
match(Set dst (LoadBarrierSlowReg mem));
predicate((UseSSE > 0) && (UseAVX <= 2) && (MaxVectorSize >= 16));
effect(DEF dst, KILL cr,
! KILL x0, KILL x1, KILL x2, KILL x3, KILL x4, KILL x5, KILL x6, KILL x7,
! KILL x8, KILL x9, KILL x10, KILL x11, KILL x12, KILL x13, KILL x14, KILL x15
! );
! format %{"LoadBarrierSlowRegXmm $dst,$mem" %}
ins_encode %{
Register d = $dst$$Register;
! assert( d != r12, "Can't be R12!");
! assert( d != r15, "Can't be R15!");
! assert( d != rsp, "Can't be RSP!");
! __ lea(d,$mem$$Address);
__ call(RuntimeAddress(StubRoutines::x86::load_barrier_slow_stub(d)));
%}
ins_pipe(pipe_slow);
%}
// For ZMM enabled processors
-
instruct loadBarrierSlowRegZmm(rRegP dst, memory mem, rFlagsReg cr,
! rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3, rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
! rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11, rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15,
! rxmm16 x16, rxmm17 x17, rxmm18 x18, rxmm19 x19, rxmm20 x20, rxmm21 x21, rxmm22 x22, rxmm23 x23,
! rxmm24 x24, rxmm25 x25, rxmm26 x26, rxmm27 x27, rxmm28 x28, rxmm29 x29, rxmm30 x30, rxmm31 x31
! ) %{
match(Set dst (LoadBarrierSlowReg mem));
predicate((UseAVX == 3) && (MaxVectorSize >= 16));
effect(DEF dst, KILL cr,
! KILL x0, KILL x1, KILL x2, KILL x3, KILL x4, KILL x5, KILL x6, KILL x7,
! KILL x8, KILL x9, KILL x10, KILL x11, KILL x12, KILL x13, KILL x14, KILL x15,
! KILL x16, KILL x17, KILL x18, KILL x19, KILL x20, KILL x21, KILL x22, KILL x23,
! KILL x24, KILL x25, KILL x26, KILL x27, KILL x28, KILL x29, KILL x30, KILL x31
! );
! format %{"LoadBarrierSlowRegZmm $dst,$mem" %}
ins_encode %{
Register d = $dst$$Register;
! assert( d != r12, "Can't be R12!");
! assert( d != r15, "Can't be R15!");
! assert( d != rsp, "Can't be RSP!");
! __ lea(d,$mem$$Address);
__ call(RuntimeAddress(StubRoutines::x86::load_barrier_slow_stub(d)));
%}
ins_pipe(pipe_slow);
%}
! // Execute GC load barrier weak slow path
// When running without XMM regs
-
instruct loadBarrierWeakSlowRegNoVec(rRegP dst, memory mem, rFlagsReg cr) %{
match(Set dst (LoadBarrierSlowReg mem));
predicate(MaxVectorSize < 16);
effect(DEF dst, KILL cr);
! format %{"LoadBarrierSlowRegNoVec $dst,$mem" %}
ins_encode %{
Register d = $dst$$Register;
! assert( d != r12, "Can't be R12!");
! assert( d != r15, "Can't be R15!");
! assert( d != rsp, "Can't be RSP!");
! __ lea(d,$mem$$Address);
__ call(RuntimeAddress(StubRoutines::x86::load_barrier_weak_slow_stub(d)));
%}
ins_pipe(pipe_slow);
%}
// For XMM and YMM enabled processors
!
! instruct loadBarrierWeakSlowRegXmm(rRegP dst, memory mem, rFlagsReg cr,
! rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3, rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
! rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11, rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15
! ) %{
match(Set dst (LoadBarrierWeakSlowReg mem));
predicate((UseSSE > 0) && (UseAVX <= 2) && (MaxVectorSize >= 16));
effect(DEF dst, KILL cr,
! KILL x0, KILL x1, KILL x2, KILL x3, KILL x4, KILL x5, KILL x6, KILL x7,
! KILL x8, KILL x9, KILL x10, KILL x11, KILL x12, KILL x13, KILL x14, KILL x15
! );
! format %{"LoadBarrierWeakSlowRegXmm $dst,$mem" %}
ins_encode %{
Register d = $dst$$Register;
! assert( d != r12, "Can't be R12!");
! assert( d != r15, "Can't be R15!");
! assert( d != rsp, "Can't be RSP!");
__ lea(d,$mem$$Address);
__ call(RuntimeAddress(StubRoutines::x86::load_barrier_weak_slow_stub(d)));
%}
ins_pipe(pipe_slow);
%}
// For ZMM enabled processors
-
instruct loadBarrierWeakSlowRegZmm(rRegP dst, memory mem, rFlagsReg cr,
! rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3, rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
! rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11, rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15,
! rxmm16 x16, rxmm17 x17, rxmm18 x18, rxmm19 x19, rxmm20 x20, rxmm21 x21, rxmm22 x22, rxmm23 x23,
! rxmm24 x24, rxmm25 x25, rxmm26 x26, rxmm27 x27, rxmm28 x28, rxmm29 x29, rxmm30 x30, rxmm31 x31
! ) %{
match(Set dst (LoadBarrierWeakSlowReg mem));
predicate((UseAVX == 3) && (MaxVectorSize >= 16));
effect(DEF dst, KILL cr,
! KILL x0, KILL x1, KILL x2, KILL x3, KILL x4, KILL x5, KILL x6, KILL x7,
! KILL x8, KILL x9, KILL x10, KILL x11, KILL x12, KILL x13, KILL x14, KILL x15,
! KILL x16, KILL x17, KILL x18, KILL x19, KILL x20, KILL x21, KILL x22, KILL x23,
! KILL x24, KILL x25, KILL x26, KILL x27, KILL x28, KILL x29, KILL x30, KILL x31
! );
! format %{"LoadBarrierWeakSlowRegZmm $dst,$mem" %}
ins_encode %{
Register d = $dst$$Register;
! assert( d != r12, "Can't be R12!");
! assert( d != r15, "Can't be R15!");
! assert( d != rsp, "Can't be RSP!");
__ lea(d,$mem$$Address);
__ call(RuntimeAddress(StubRoutines::x86::load_barrier_weak_slow_stub(d)));
%}
ins_pipe(pipe_slow);
%}
--- 12519,12710 ----
format %{ "jmp rethrow_stub" %}
ins_encode(enc_rethrow);
ins_pipe(pipe_jmp);
%}
! //
! // Execute ZGC load barrier (strong) slow path
! //
// When running without XMM regs
instruct loadBarrierSlowRegNoVec(rRegP dst, memory mem, rFlagsReg cr) %{
match(Set dst (LoadBarrierSlowReg mem));
predicate(MaxVectorSize < 16);
effect(DEF dst, KILL cr);
! format %{"LoadBarrierSlowRegNoVec $dst, $mem" %}
ins_encode %{
Register d = $dst$$Register;
! assert(d != r12, "Can't be R12!");
! assert(d != r15, "Can't be R15!");
! assert(d != rsp, "Can't be RSP!");
!
! __ lea(d, $mem$$Address);
__ call(RuntimeAddress(StubRoutines::x86::load_barrier_slow_stub(d)));
%}
ins_pipe(pipe_slow);
%}
// For XMM and YMM enabled processors
! instruct loadBarrierSlowRegXmmAndYmm(rRegP dst, memory mem, rFlagsReg cr,
! rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
! rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
! rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
! rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15) %{
match(Set dst (LoadBarrierSlowReg mem));
predicate((UseSSE > 0) && (UseAVX <= 2) && (MaxVectorSize >= 16));
effect(DEF dst, KILL cr,
! KILL x0, KILL x1, KILL x2, KILL x3,
! KILL x4, KILL x5, KILL x6, KILL x7,
! KILL x8, KILL x9, KILL x10, KILL x11,
! KILL x12, KILL x13, KILL x14, KILL x15);
! format %{"LoadBarrierSlowRegXmm $dst, $mem" %}
ins_encode %{
Register d = $dst$$Register;
! assert(d != r12, "Can't be R12!");
! assert(d != r15, "Can't be R15!");
! assert(d != rsp, "Can't be RSP!");
!
! __ lea(d, $mem$$Address);
__ call(RuntimeAddress(StubRoutines::x86::load_barrier_slow_stub(d)));
%}
ins_pipe(pipe_slow);
%}
// For ZMM enabled processors
instruct loadBarrierSlowRegZmm(rRegP dst, memory mem, rFlagsReg cr,
! rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
! rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
! rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
! rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15,
! rxmm16 x16, rxmm17 x17, rxmm18 x18, rxmm19 x19,
! rxmm20 x20, rxmm21 x21, rxmm22 x22, rxmm23 x23,
! rxmm24 x24, rxmm25 x25, rxmm26 x26, rxmm27 x27,
! rxmm28 x28, rxmm29 x29, rxmm30 x30, rxmm31 x31) %{
match(Set dst (LoadBarrierSlowReg mem));
predicate((UseAVX == 3) && (MaxVectorSize >= 16));
effect(DEF dst, KILL cr,
! KILL x0, KILL x1, KILL x2, KILL x3,
! KILL x4, KILL x5, KILL x6, KILL x7,
! KILL x8, KILL x9, KILL x10, KILL x11,
! KILL x12, KILL x13, KILL x14, KILL x15,
! KILL x16, KILL x17, KILL x18, KILL x19,
! KILL x20, KILL x21, KILL x22, KILL x23,
! KILL x24, KILL x25, KILL x26, KILL x27,
! KILL x28, KILL x29, KILL x30, KILL x31);
! format %{"LoadBarrierSlowRegZmm $dst, $mem" %}
ins_encode %{
Register d = $dst$$Register;
! assert(d != r12, "Can't be R12!");
! assert(d != r15, "Can't be R15!");
! assert(d != rsp, "Can't be RSP!");
!
! __ lea(d, $mem$$Address);
__ call(RuntimeAddress(StubRoutines::x86::load_barrier_slow_stub(d)));
%}
ins_pipe(pipe_slow);
%}
! //
! // Execute ZGC load barrier (weak) slow path
! //
// When running without XMM regs
instruct loadBarrierWeakSlowRegNoVec(rRegP dst, memory mem, rFlagsReg cr) %{
match(Set dst (LoadBarrierSlowReg mem));
predicate(MaxVectorSize < 16);
effect(DEF dst, KILL cr);
! format %{"LoadBarrierSlowRegNoVec $dst, $mem" %}
ins_encode %{
Register d = $dst$$Register;
! assert(d != r12, "Can't be R12!");
! assert(d != r15, "Can't be R15!");
! assert(d != rsp, "Can't be RSP!");
!
! __ lea(d, $mem$$Address);
__ call(RuntimeAddress(StubRoutines::x86::load_barrier_weak_slow_stub(d)));
%}
ins_pipe(pipe_slow);
%}
// For XMM and YMM enabled processors
! instruct loadBarrierWeakSlowRegXmmAndYmm(rRegP dst, memory mem, rFlagsReg cr,
! rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
! rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
! rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
! rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15) %{
match(Set dst (LoadBarrierWeakSlowReg mem));
predicate((UseSSE > 0) && (UseAVX <= 2) && (MaxVectorSize >= 16));
effect(DEF dst, KILL cr,
! KILL x0, KILL x1, KILL x2, KILL x3,
! KILL x4, KILL x5, KILL x6, KILL x7,
! KILL x8, KILL x9, KILL x10, KILL x11,
! KILL x12, KILL x13, KILL x14, KILL x15);
! format %{"LoadBarrierWeakSlowRegXmm $dst, $mem" %}
ins_encode %{
Register d = $dst$$Register;
! assert(d != r12, "Can't be R12!");
! assert(d != r15, "Can't be R15!");
! assert(d != rsp, "Can't be RSP!");
!
__ lea(d,$mem$$Address);
__ call(RuntimeAddress(StubRoutines::x86::load_barrier_weak_slow_stub(d)));
%}
ins_pipe(pipe_slow);
%}
// For ZMM enabled processors
instruct loadBarrierWeakSlowRegZmm(rRegP dst, memory mem, rFlagsReg cr,
! rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
! rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
! rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
! rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15,
! rxmm16 x16, rxmm17 x17, rxmm18 x18, rxmm19 x19,
! rxmm20 x20, rxmm21 x21, rxmm22 x22, rxmm23 x23,
! rxmm24 x24, rxmm25 x25, rxmm26 x26, rxmm27 x27,
! rxmm28 x28, rxmm29 x29, rxmm30 x30, rxmm31 x31) %{
match(Set dst (LoadBarrierWeakSlowReg mem));
predicate((UseAVX == 3) && (MaxVectorSize >= 16));
effect(DEF dst, KILL cr,
! KILL x0, KILL x1, KILL x2, KILL x3,
! KILL x4, KILL x5, KILL x6, KILL x7,
! KILL x8, KILL x9, KILL x10, KILL x11,
! KILL x12, KILL x13, KILL x14, KILL x15,
! KILL x16, KILL x17, KILL x18, KILL x19,
! KILL x20, KILL x21, KILL x22, KILL x23,
! KILL x24, KILL x25, KILL x26, KILL x27,
! KILL x28, KILL x29, KILL x30, KILL x31);
! format %{"LoadBarrierWeakSlowRegZmm $dst, $mem" %}
ins_encode %{
Register d = $dst$$Register;
! assert(d != r12, "Can't be R12!");
! assert(d != r15, "Can't be R15!");
! assert(d != rsp, "Can't be RSP!");
!
__ lea(d,$mem$$Address);
__ call(RuntimeAddress(StubRoutines::x86::load_barrier_weak_slow_stub(d)));
%}
ins_pipe(pipe_slow);
%}
< prev index next >