< prev index next >

src/hotspot/cpu/x86/x86_64.ad

Print this page

        

@@ -536,10 +536,16 @@
 // Singleton class for instruction pointer
 // reg_class ip_reg(RIP);
 
 %}
 
+source_hpp %{
+#if INCLUDE_ZGC
+#include "gc/z/zBarrierSetAssembler.hpp"
+#endif
+%}
+
 //----------SOURCE BLOCK-------------------------------------------------------
 // This is a block of C++ code which provides values, functions, and
 // definitions necessary in the rest of the architecture description
 source %{
 #define   RELOC_IMM64    Assembler::imm_operand

@@ -4219,10 +4225,139 @@
     overflow(0x0, "o");
     no_overflow(0x1, "no");
   %}
 %}
 
+// Operands for bound floating pointer register arguments
+operand rxmm0() %{
+  constraint(ALLOC_IN_RC(xmm0_reg));  match(VecX);
+  predicate((UseSSE > 0) && (UseAVX<= 2));  format%{%}  interface(REG_INTER);
+%}
+operand rxmm1() %{
+  constraint(ALLOC_IN_RC(xmm1_reg));  match(VecX);
+  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+%}
+operand rxmm2() %{
+  constraint(ALLOC_IN_RC(xmm2_reg));  match(VecX);
+  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+%}
+operand rxmm3() %{
+  constraint(ALLOC_IN_RC(xmm3_reg));  match(VecX);
+  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+%}
+operand rxmm4() %{
+  constraint(ALLOC_IN_RC(xmm4_reg));  match(VecX);
+  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+%}
+operand rxmm5() %{
+  constraint(ALLOC_IN_RC(xmm5_reg));  match(VecX);
+  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+%}
+operand rxmm6() %{
+  constraint(ALLOC_IN_RC(xmm6_reg));  match(VecX);
+  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+%}
+operand rxmm7() %{
+  constraint(ALLOC_IN_RC(xmm7_reg));  match(VecX);
+  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+%}
+operand rxmm8() %{
+  constraint(ALLOC_IN_RC(xmm8_reg));  match(VecX);
+  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+%}
+operand rxmm9() %{
+  constraint(ALLOC_IN_RC(xmm9_reg));  match(VecX);
+  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+%}
+operand rxmm10() %{
+  constraint(ALLOC_IN_RC(xmm10_reg));  match(VecX);
+  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+%}
+operand rxmm11() %{
+  constraint(ALLOC_IN_RC(xmm11_reg));  match(VecX);
+  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+%}
+operand rxmm12() %{
+  constraint(ALLOC_IN_RC(xmm12_reg));  match(VecX);
+  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+%}
+operand rxmm13() %{
+  constraint(ALLOC_IN_RC(xmm13_reg));  match(VecX);
+  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+%}
+operand rxmm14() %{
+  constraint(ALLOC_IN_RC(xmm14_reg));  match(VecX);
+  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+%}
+operand rxmm15() %{
+  constraint(ALLOC_IN_RC(xmm15_reg));  match(VecX);
+  predicate((UseSSE > 0) && (UseAVX <= 2));  format%{%}  interface(REG_INTER);
+%}
+operand rxmm16() %{
+  constraint(ALLOC_IN_RC(xmm16_reg));  match(VecX);
+  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+%}
+operand rxmm17() %{
+  constraint(ALLOC_IN_RC(xmm17_reg));  match(VecX);
+  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+%}
+operand rxmm18() %{
+  constraint(ALLOC_IN_RC(xmm18_reg));  match(VecX);
+  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+%}
+operand rxmm19() %{
+  constraint(ALLOC_IN_RC(xmm19_reg));  match(VecX);
+  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+%}
+operand rxmm20() %{
+  constraint(ALLOC_IN_RC(xmm20_reg));  match(VecX);
+  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+%}
+operand rxmm21() %{
+  constraint(ALLOC_IN_RC(xmm21_reg));  match(VecX);
+  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+%}
+operand rxmm22() %{
+  constraint(ALLOC_IN_RC(xmm22_reg));  match(VecX);
+  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+%}
+operand rxmm23() %{
+  constraint(ALLOC_IN_RC(xmm23_reg));  match(VecX);
+  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+%}
+operand rxmm24() %{
+  constraint(ALLOC_IN_RC(xmm24_reg));  match(VecX);
+  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+%}
+operand rxmm25() %{
+  constraint(ALLOC_IN_RC(xmm25_reg));  match(VecX);
+  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+%}
+operand rxmm26() %{
+  constraint(ALLOC_IN_RC(xmm26_reg));  match(VecX);
+  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+%}
+operand rxmm27() %{
+  constraint(ALLOC_IN_RC(xmm27_reg));  match(VecX);
+  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+%}
+operand rxmm28() %{
+  constraint(ALLOC_IN_RC(xmm28_reg));  match(VecX);
+  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+%}
+operand rxmm29() %{
+  constraint(ALLOC_IN_RC(xmm29_reg));  match(VecX);
+  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+%}
+operand rxmm30() %{
+  constraint(ALLOC_IN_RC(xmm30_reg));  match(VecX);
+  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+%}
+operand rxmm31() %{
+  constraint(ALLOC_IN_RC(xmm31_reg));  match(VecX);
+  predicate(UseAVX == 3);  format%{%}  interface(REG_INTER);
+%}
 
 //----------OPERAND CLASSES----------------------------------------------------
 // Operand Classes are groups of operands that are used as to simplify
 // instruction definitions by not requiring the AD writer to specify separate
 // instructions for every form of operand when the instruction accepts

@@ -11545,10 +11680,20 @@
   opcode(0x85);
   ins_encode(REX_reg_mem_wide(src, mem), OpcP, reg_mem(src, mem));
   ins_pipe(ialu_cr_reg_mem);
 %}
 
+instruct testL_reg_mem2(rFlagsReg cr, rRegP src, memory mem, immL0 zero)
+%{
+  match(Set cr (CmpL (AndL (CastP2X src) (LoadL mem)) zero));
+
+  format %{ "testq   $src, $mem" %}
+  opcode(0x85);
+  ins_encode(REX_reg_mem_wide(src, mem), OpcP, reg_mem(src, mem));
+  ins_pipe(ialu_cr_reg_mem);
+%}
+
 // Manifest a CmpL result in an integer register.  Very painful.
 // This is the test to avoid.
 instruct cmpL3_reg_reg(rRegI dst, rRegL src1, rRegL src2, rFlagsReg flags)
 %{
   match(Set dst (CmpL3 src1 src2));

@@ -12338,10 +12483,227 @@
   format %{ "jmp     rethrow_stub" %}
   ins_encode(enc_rethrow);
   ins_pipe(pipe_jmp);
 %}
 
+//
+// Execute ZGC load barrier (strong) slow path
+//
+
+// When running without XMM regs
+instruct loadBarrierSlowRegNoVec(rRegP dst, memory mem, rFlagsReg cr) %{
+
+  match(Set dst (LoadBarrierSlowReg mem));
+  predicate(MaxVectorSize < 16);
+
+  effect(DEF dst, KILL cr);
+
+  format %{"LoadBarrierSlowRegNoVec $dst, $mem" %}
+  ins_encode %{
+#if INCLUDE_ZGC
+    Register d = $dst$$Register;
+    ZBarrierSetAssembler* bs = (ZBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
+
+    assert(d != r12, "Can't be R12!");
+    assert(d != r15, "Can't be R15!");
+    assert(d != rsp, "Can't be RSP!");
+
+    __ lea(d, $mem$$Address);
+    __ call(RuntimeAddress(bs->load_barrier_slow_stub(d)));
+#else
+    ShouldNotReachHere();
+#endif
+  %}
+  ins_pipe(pipe_slow);
+%}
+
+// For XMM and YMM enabled processors
+instruct loadBarrierSlowRegXmmAndYmm(rRegP dst, memory mem, rFlagsReg cr,
+                                     rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
+                                     rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
+                                     rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
+                                     rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15) %{
+
+  match(Set dst (LoadBarrierSlowReg mem));
+  predicate((UseSSE > 0) && (UseAVX <= 2) && (MaxVectorSize >= 16));
+
+  effect(DEF dst, KILL cr,
+         KILL x0, KILL x1, KILL x2, KILL x3,
+         KILL x4, KILL x5, KILL x6, KILL x7,
+         KILL x8, KILL x9, KILL x10, KILL x11,
+         KILL x12, KILL x13, KILL x14, KILL x15);
+
+  format %{"LoadBarrierSlowRegXmm $dst, $mem" %}
+  ins_encode %{
+#if INCLUDE_ZGC
+    Register d = $dst$$Register;
+    ZBarrierSetAssembler* bs = (ZBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
+
+    assert(d != r12, "Can't be R12!");
+    assert(d != r15, "Can't be R15!");
+    assert(d != rsp, "Can't be RSP!");
+
+    __ lea(d, $mem$$Address);
+    __ call(RuntimeAddress(bs->load_barrier_slow_stub(d)));
+#else
+    ShouldNotReachHere();
+#endif
+  %}
+  ins_pipe(pipe_slow);
+%}
+
+// For ZMM enabled processors
+instruct loadBarrierSlowRegZmm(rRegP dst, memory mem, rFlagsReg cr,
+                               rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
+                               rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
+                               rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
+                               rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15,
+                               rxmm16 x16, rxmm17 x17, rxmm18 x18, rxmm19 x19,
+                               rxmm20 x20, rxmm21 x21, rxmm22 x22, rxmm23 x23,
+                               rxmm24 x24, rxmm25 x25, rxmm26 x26, rxmm27 x27,
+                               rxmm28 x28, rxmm29 x29, rxmm30 x30, rxmm31 x31) %{
+
+  match(Set dst (LoadBarrierSlowReg mem));
+  predicate((UseAVX == 3) && (MaxVectorSize >= 16));
+
+  effect(DEF dst, KILL cr,
+         KILL x0, KILL x1, KILL x2, KILL x3,
+         KILL x4, KILL x5, KILL x6, KILL x7,
+         KILL x8, KILL x9, KILL x10, KILL x11,
+         KILL x12, KILL x13, KILL x14, KILL x15,
+         KILL x16, KILL x17, KILL x18, KILL x19,
+         KILL x20, KILL x21, KILL x22, KILL x23,
+         KILL x24, KILL x25, KILL x26, KILL x27,
+         KILL x28, KILL x29, KILL x30, KILL x31);
+
+  format %{"LoadBarrierSlowRegZmm $dst, $mem" %}
+  ins_encode %{
+#if INCLUDE_ZGC
+    Register d = $dst$$Register;
+    ZBarrierSetAssembler* bs = (ZBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
+
+    assert(d != r12, "Can't be R12!");
+    assert(d != r15, "Can't be R15!");
+    assert(d != rsp, "Can't be RSP!");
+
+    __ lea(d, $mem$$Address);
+    __ call(RuntimeAddress(bs->load_barrier_slow_stub(d)));
+#else
+    ShouldNotReachHere();
+#endif
+  %}
+  ins_pipe(pipe_slow);
+%}
+
+//
+// Execute ZGC load barrier (weak) slow path
+//
+
+// When running without XMM regs
+instruct loadBarrierWeakSlowRegNoVec(rRegP dst, memory mem, rFlagsReg cr) %{
+
+  match(Set dst (LoadBarrierSlowReg mem));
+  predicate(MaxVectorSize < 16);
+
+  effect(DEF dst, KILL cr);
+
+  format %{"LoadBarrierSlowRegNoVec $dst, $mem" %}
+  ins_encode %{
+#if INCLUDE_ZGC
+    Register d = $dst$$Register;
+    ZBarrierSetAssembler* bs = (ZBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
+
+    assert(d != r12, "Can't be R12!");
+    assert(d != r15, "Can't be R15!");
+    assert(d != rsp, "Can't be RSP!");
+
+    __ lea(d, $mem$$Address);
+    __ call(RuntimeAddress(bs->load_barrier_weak_slow_stub(d)));
+#else
+    ShouldNotReachHere();
+#endif
+  %}
+  ins_pipe(pipe_slow);
+%}
+
+// For XMM and YMM enabled processors
+instruct loadBarrierWeakSlowRegXmmAndYmm(rRegP dst, memory mem, rFlagsReg cr,
+                                         rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
+                                         rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
+                                         rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
+                                         rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15) %{
+
+  match(Set dst (LoadBarrierWeakSlowReg mem));
+  predicate((UseSSE > 0) && (UseAVX <= 2) && (MaxVectorSize >= 16));
+
+  effect(DEF dst, KILL cr,
+         KILL x0, KILL x1, KILL x2, KILL x3,
+         KILL x4, KILL x5, KILL x6, KILL x7,
+         KILL x8, KILL x9, KILL x10, KILL x11,
+         KILL x12, KILL x13, KILL x14, KILL x15);
+
+  format %{"LoadBarrierWeakSlowRegXmm $dst, $mem" %}
+  ins_encode %{
+#if INCLUDE_ZGC
+    Register d = $dst$$Register;
+    ZBarrierSetAssembler* bs = (ZBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
+
+    assert(d != r12, "Can't be R12!");
+    assert(d != r15, "Can't be R15!");
+    assert(d != rsp, "Can't be RSP!");
+
+    __ lea(d,$mem$$Address);
+    __ call(RuntimeAddress(bs->load_barrier_weak_slow_stub(d)));
+#else
+    ShouldNotReachHere();
+#endif
+  %}
+  ins_pipe(pipe_slow);
+%}
+
+// For ZMM enabled processors
+instruct loadBarrierWeakSlowRegZmm(rRegP dst, memory mem, rFlagsReg cr,
+                                   rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
+                                   rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
+                                   rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
+                                   rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15,
+                                   rxmm16 x16, rxmm17 x17, rxmm18 x18, rxmm19 x19,
+                                   rxmm20 x20, rxmm21 x21, rxmm22 x22, rxmm23 x23,
+                                   rxmm24 x24, rxmm25 x25, rxmm26 x26, rxmm27 x27,
+                                   rxmm28 x28, rxmm29 x29, rxmm30 x30, rxmm31 x31) %{
+
+  match(Set dst (LoadBarrierWeakSlowReg mem));
+  predicate((UseAVX == 3) && (MaxVectorSize >= 16));
+
+  effect(DEF dst, KILL cr,
+         KILL x0, KILL x1, KILL x2, KILL x3,
+         KILL x4, KILL x5, KILL x6, KILL x7,
+         KILL x8, KILL x9, KILL x10, KILL x11,
+         KILL x12, KILL x13, KILL x14, KILL x15,
+         KILL x16, KILL x17, KILL x18, KILL x19,
+         KILL x20, KILL x21, KILL x22, KILL x23,
+         KILL x24, KILL x25, KILL x26, KILL x27,
+         KILL x28, KILL x29, KILL x30, KILL x31);
+
+  format %{"LoadBarrierWeakSlowRegZmm $dst, $mem" %}
+  ins_encode %{
+#if INCLUDE_ZGC
+    Register d = $dst$$Register;
+    ZBarrierSetAssembler* bs = (ZBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
+
+    assert(d != r12, "Can't be R12!");
+    assert(d != r15, "Can't be R15!");
+    assert(d != rsp, "Can't be RSP!");
+
+    __ lea(d,$mem$$Address);
+    __ call(RuntimeAddress(bs->load_barrier_weak_slow_stub(d)));
+#else
+    ShouldNotReachHere();
+#endif
+  %}
+  ins_pipe(pipe_slow);
+%}
 
 // ============================================================================
 // This name is KNOWN by the ADLC and cannot be changed.
 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
 // for this guy.
< prev index next >