< prev index next >

src/cpu/aarch64/vm/aarch64.ad

Print this page
rev 10850 : 8154537: AArch64: some integer rotate instructions are never emitted
Summary: some integer rotate rules in ad file can't be matched
Reviewed-by:
rev 10955 : undo
rev 10970 : 8154826: AArch64: take advantage better of base + shifted offset addressing mode
Summary: reshape address subtree to fit aarch64 addressing mode
Reviewed-by:
rev 10971 : more
rev 10972 : more
rev 10976 : 8155612: Aarch64: vector nodes need to support misaligned offset
Reviewed-by:

@@ -5304,20 +5304,80 @@
   op_cost(0);
   format %{ %}
   interface(CONST_INTER);
 %}
 
+operand immIOffset4()
+%{
+  predicate(Address::offset_ok_for_immed(n->get_int(), 2));
+  match(ConI);
+
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+operand immIOffset8()
+%{
+  predicate(Address::offset_ok_for_immed(n->get_int(), 3));
+  match(ConI);
+
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+operand immIOffset16()
+%{
+  predicate(Address::offset_ok_for_immed(n->get_int(), 4));
+  match(ConI);
+
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
 operand immLoffset()
 %{
   predicate(Address::offset_ok_for_immed(n->get_long()));
   match(ConL);
 
   op_cost(0);
   format %{ %}
   interface(CONST_INTER);
 %}
 
+operand immLoffset4()
+%{
+  predicate(Address::offset_ok_for_immed(n->get_long(), 2));
+  match(ConL);
+
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+operand immLoffset8()
+%{
+  predicate(Address::offset_ok_for_immed(n->get_long(), 3));
+  match(ConL);
+
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+operand immLoffset16()
+%{
+  predicate(Address::offset_ok_for_immed(n->get_long(), 4));
+  match(ConL);
+
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
 // 32 bit integer valid for add sub immediate
 operand immIAddSub()
 %{
   predicate(Assembler::operand_valid_for_add_sub_immediate((long)n->get_int()));
   match(ConI);

@@ -6148,10 +6208,52 @@
     scale(0x0);
     disp($off);
   %}
 %}
 
+operand indOffI4(iRegP reg, immIOffset4 off)
+%{
+  constraint(ALLOC_IN_RC(ptr_reg));
+  match(AddP reg off);
+  op_cost(0);
+  format %{ "[$reg, $off]" %}
+  interface(MEMORY_INTER) %{
+    base($reg);
+    index(0xffffffff);
+    scale(0x0);
+    disp($off);
+  %}
+%}
+
+operand indOffI8(iRegP reg, immIOffset8 off)
+%{
+  constraint(ALLOC_IN_RC(ptr_reg));
+  match(AddP reg off);
+  op_cost(0);
+  format %{ "[$reg, $off]" %}
+  interface(MEMORY_INTER) %{
+    base($reg);
+    index(0xffffffff);
+    scale(0x0);
+    disp($off);
+  %}
+%}
+
+operand indOffI16(iRegP reg, immIOffset16 off)
+%{
+  constraint(ALLOC_IN_RC(ptr_reg));
+  match(AddP reg off);
+  op_cost(0);
+  format %{ "[$reg, $off]" %}
+  interface(MEMORY_INTER) %{
+    base($reg);
+    index(0xffffffff);
+    scale(0x0);
+    disp($off);
+  %}
+%}
+
 operand indOffL(iRegP reg, immLoffset off)
 %{
   constraint(ALLOC_IN_RC(ptr_reg));
   match(AddP reg off);
   op_cost(0);

@@ -6162,10 +6264,51 @@
     scale(0x0);
     disp($off);
   %}
 %}
 
+operand indOffL4(iRegP reg, immLoffset4 off)
+%{
+  constraint(ALLOC_IN_RC(ptr_reg));
+  match(AddP reg off);
+  op_cost(0);
+  format %{ "[$reg, $off]" %}
+  interface(MEMORY_INTER) %{
+    base($reg);
+    index(0xffffffff);
+    scale(0x0);
+    disp($off);
+  %}
+%}
+
+operand indOffL8(iRegP reg, immLoffset8 off)
+%{
+  constraint(ALLOC_IN_RC(ptr_reg));
+  match(AddP reg off);
+  op_cost(0);
+  format %{ "[$reg, $off]" %}
+  interface(MEMORY_INTER) %{
+    base($reg);
+    index(0xffffffff);
+    scale(0x0);
+    disp($off);
+  %}
+%}
+
+operand indOffL16(iRegP reg, immLoffset16 off)
+%{
+  constraint(ALLOC_IN_RC(ptr_reg));
+  match(AddP reg off);
+  op_cost(0);
+  format %{ "[$reg, $off]" %}
+  interface(MEMORY_INTER) %{
+    base($reg);
+    index(0xffffffff);
+    scale(0x0);
+    disp($off);
+  %}
+%}
 
 operand indirectN(iRegN reg)
 %{
   predicate(Universe::narrow_oop_shift() == 0);
   constraint(ALLOC_IN_RC(ptr_reg));

@@ -6474,11 +6617,13 @@
   format %{ "l2i($reg)" %}
 
   interface(REG_INTER)
 %}
 
-opclass vmem(indirect, indIndex, indOffI, indOffL);
+opclass vmem4(indirect, indIndex, indOffI4, indOffL4);
+opclass vmem8(indirect, indIndex, indOffI8, indOffL8);
+opclass vmem16(indirect, indIndex, indOffI16, indOffL16);
 
 //----------OPERAND CLASSES----------------------------------------------------
 // Operand Classes are groups of operands that are used as to simplify
 // instruction definitions by not requiring the AD writer to specify
 // separate instructions for every form of operand when the

@@ -7006,38 +7151,38 @@
   dst    : S3(write);
   INS0   : ISS;
   NEON_FP : S3;
 %}
 
-pipe_class vload_reg_mem64(vecD dst, vmem mem)
+pipe_class vload_reg_mem64(vecD dst, vmem8 mem)
 %{
   single_instruction;
   dst    : S5(write);
   mem    : ISS(read);
   INS01  : ISS;
   NEON_FP : S3;
 %}
 
-pipe_class vload_reg_mem128(vecX dst, vmem mem)
+pipe_class vload_reg_mem128(vecX dst, vmem16 mem)
 %{
   single_instruction;
   dst    : S5(write);
   mem    : ISS(read);
   INS01  : ISS;
   NEON_FP : S3;
 %}
 
-pipe_class vstore_reg_mem64(vecD src, vmem mem)
+pipe_class vstore_reg_mem64(vecD src, vmem8 mem)
 %{
   single_instruction;
   mem    : ISS(read);
   src    : S2(read);
   INS01  : ISS;
   NEON_FP : S3;
 %}
 
-pipe_class vstore_reg_mem128(vecD src, vmem mem)
+pipe_class vstore_reg_mem128(vecD src, vmem16 mem)
 %{
   single_instruction;
   mem    : ISS(read);
   src    : S2(read);
   INS01  : ISS;

@@ -14917,66 +15062,66 @@
 %}
 
 // ====================VECTOR INSTRUCTIONS=====================================
 
 // Load vector (32 bits)
-instruct loadV4(vecD dst, vmem mem)
+instruct loadV4(vecD dst, vmem4 mem)
 %{
   predicate(n->as_LoadVector()->memory_size() == 4);
   match(Set dst (LoadVector mem));
   ins_cost(4 * INSN_COST);
   format %{ "ldrs   $dst,$mem\t# vector (32 bits)" %}
   ins_encode( aarch64_enc_ldrvS(dst, mem) );
   ins_pipe(vload_reg_mem64);
 %}
 
 // Load vector (64 bits)
-instruct loadV8(vecD dst, vmem mem)
+instruct loadV8(vecD dst, vmem8 mem)
 %{
   predicate(n->as_LoadVector()->memory_size() == 8);
   match(Set dst (LoadVector mem));
   ins_cost(4 * INSN_COST);
   format %{ "ldrd   $dst,$mem\t# vector (64 bits)" %}
   ins_encode( aarch64_enc_ldrvD(dst, mem) );
   ins_pipe(vload_reg_mem64);
 %}
 
 // Load Vector (128 bits)
-instruct loadV16(vecX dst, vmem mem)
+instruct loadV16(vecX dst, vmem16 mem)
 %{
   predicate(n->as_LoadVector()->memory_size() == 16);
   match(Set dst (LoadVector mem));
   ins_cost(4 * INSN_COST);
   format %{ "ldrq   $dst,$mem\t# vector (128 bits)" %}
   ins_encode( aarch64_enc_ldrvQ(dst, mem) );
   ins_pipe(vload_reg_mem128);
 %}
 
 // Store Vector (32 bits)
-instruct storeV4(vecD src, vmem mem)
+instruct storeV4(vecD src, vmem4 mem)
 %{
   predicate(n->as_StoreVector()->memory_size() == 4);
   match(Set mem (StoreVector mem src));
   ins_cost(4 * INSN_COST);
   format %{ "strs   $mem,$src\t# vector (32 bits)" %}
   ins_encode( aarch64_enc_strvS(src, mem) );
   ins_pipe(vstore_reg_mem64);
 %}
 
 // Store Vector (64 bits)
-instruct storeV8(vecD src, vmem mem)
+instruct storeV8(vecD src, vmem8 mem)
 %{
   predicate(n->as_StoreVector()->memory_size() == 8);
   match(Set mem (StoreVector mem src));
   ins_cost(4 * INSN_COST);
   format %{ "strd   $mem,$src\t# vector (64 bits)" %}
   ins_encode( aarch64_enc_strvD(src, mem) );
   ins_pipe(vstore_reg_mem64);
 %}
 
 // Store Vector (128 bits)
-instruct storeV16(vecX src, vmem mem)
+instruct storeV16(vecX src, vmem16 mem)
 %{
   predicate(n->as_StoreVector()->memory_size() == 16);
   match(Set mem (StoreVector mem src));
   ins_cost(4 * INSN_COST);
   format %{ "strq   $mem,$src\t# vector (128 bits)" %}
< prev index next >