383 reg_class int_rdi_reg(RDI);
384
385 // Singleton class for instruction pointer
386 // reg_class ip_reg(RIP);
387
388 %}
389
390 //----------SOURCE BLOCK-------------------------------------------------------
391 // This is a block of C++ code which provides values, functions, and
392 // definitions necessary in the rest of the architecture description
393 source %{
394 #define RELOC_IMM64 Assembler::imm_operand
395 #define RELOC_DISP32 Assembler::disp32_operand
396
397 #define __ _masm.
398
399 static int preserve_SP_size() {
400 return 3; // rex.w, op, rm(reg/reg)
401 }
402 static int clear_avx_size() {
403 return (Compile::current()->max_vector_size() > 16) ? 3 : 0; // vzeroupper
404 }
405
406 // !!!!! Special hack to get all types of calls to specify the byte offset
407 // from the start of the call to the point where the return address
408 // will point.
409 int MachCallStaticJavaNode::ret_addr_offset()
410 {
411 int offset = 5; // 5 bytes from start of call to where return address points
412 offset += clear_avx_size();
413 if (_method_handle_invoke)
414 offset += preserve_SP_size();
415 return offset;
416 }
417
418 int MachCallDynamicJavaNode::ret_addr_offset()
419 {
420 int offset = 15; // 15 bytes from start of call to where return address points
421 offset += clear_avx_size();
422 return offset;
423 }
870 int MachEpilogNode::reloc() const
871 {
872 return 2; // a large enough number
873 }
874
875 const Pipeline* MachEpilogNode::pipeline() const
876 {
877 return MachNode::pipeline_class();
878 }
879
880 int MachEpilogNode::safepoint_offset() const
881 {
882 return 0;
883 }
884
885 //=============================================================================
886
887 enum RC {
888 rc_bad,
889 rc_int,
890 rc_float,
891 rc_stack
892 };
893
894 static enum RC rc_class(OptoReg::Name reg)
895 {
896 if( !OptoReg::is_valid(reg) ) return rc_bad;
897
898 if (OptoReg::is_stack(reg)) return rc_stack;
899
900 VMReg r = OptoReg::as_VMReg(reg);
901
902 if (r->is_Register()) return rc_int;
903
904 assert(r->is_XMMRegister(), "must be");
905 return rc_float;
906 }
907
908 // Next two methods are shared by 32- and 64-bit VM. They are defined in x86.ad.
909 static int vec_mov_helper(CodeBuffer *cbuf, bool do_size, int src_lo, int dst_lo,
910 int src_hi, int dst_hi, uint ireg, outputStream* st);
911
912 static int vec_spill_helper(CodeBuffer *cbuf, bool do_size, bool is_load,
913 int stack_offset, int reg, uint ireg, outputStream* st);
914
915 static void vec_stack_to_stack_helper(CodeBuffer *cbuf, int src_offset,
916 int dst_offset, uint ireg, outputStream* st) {
917 if (cbuf) {
918 MacroAssembler _masm(cbuf);
919 switch (ireg) {
920 case Op_VecS:
921 __ movq(Address(rsp, -8), rax);
922 __ movl(rax, Address(rsp, src_offset));
923 __ movl(Address(rsp, dst_offset), rax);
924 __ movq(rax, Address(rsp, -8));
925 break;
926 case Op_VecD:
927 __ pushq(Address(rsp, src_offset));
928 __ popq (Address(rsp, dst_offset));
929 break;
930 case Op_VecX:
931 __ pushq(Address(rsp, src_offset));
932 __ popq (Address(rsp, dst_offset));
933 __ pushq(Address(rsp, src_offset+8));
934 __ popq (Address(rsp, dst_offset+8));
935 break;
936 case Op_VecY:
937 __ vmovdqu(Address(rsp, -32), xmm0);
938 __ vmovdqu(xmm0, Address(rsp, src_offset));
939 __ vmovdqu(Address(rsp, dst_offset), xmm0);
940 __ vmovdqu(xmm0, Address(rsp, -32));
941 break;
942 default:
943 ShouldNotReachHere();
944 }
945 #ifndef PRODUCT
946 } else {
947 switch (ireg) {
948 case Op_VecS:
949 st->print("movq [rsp - #8], rax\t# 32-bit mem-mem spill\n\t"
950 "movl rax, [rsp + #%d]\n\t"
951 "movl [rsp + #%d], rax\n\t"
952 "movq rax, [rsp - #8]",
953 src_offset, dst_offset);
954 break;
955 case Op_VecD:
956 st->print("pushq [rsp + #%d]\t# 64-bit mem-mem spill\n\t"
957 "popq [rsp + #%d]",
958 src_offset, dst_offset);
959 break;
960 case Op_VecX:
961 st->print("pushq [rsp + #%d]\t# 128-bit mem-mem spill\n\t"
962 "popq [rsp + #%d]\n\t"
963 "pushq [rsp + #%d]\n\t"
964 "popq [rsp + #%d]",
965 src_offset, dst_offset, src_offset+8, dst_offset+8);
966 break;
967 case Op_VecY:
968 st->print("vmovdqu [rsp - #32], xmm0\t# 256-bit mem-mem spill\n\t"
969 "vmovdqu xmm0, [rsp + #%d]\n\t"
970 "vmovdqu [rsp + #%d], xmm0\n\t"
971 "vmovdqu xmm0, [rsp - #32]",
972 src_offset, dst_offset);
973 break;
974 default:
975 ShouldNotReachHere();
976 }
977 #endif
978 }
979 }
980
981 uint MachSpillCopyNode::implementation(CodeBuffer* cbuf,
982 PhaseRegAlloc* ra_,
983 bool do_size,
984 outputStream* st) const {
985 assert(cbuf != NULL || st != NULL, "sanity");
986 // Get registers to move
987 OptoReg::Name src_second = ra_->get_reg_second(in(1));
988 OptoReg::Name src_first = ra_->get_reg_first(in(1));
989 OptoReg::Name dst_second = ra_->get_reg_second(this);
990 OptoReg::Name dst_first = ra_->get_reg_first(this);
991
992 enum RC src_second_rc = rc_class(src_second);
993 enum RC src_first_rc = rc_class(src_first);
994 enum RC dst_second_rc = rc_class(dst_second);
995 enum RC dst_first_rc = rc_class(dst_first);
996
997 assert(OptoReg::is_valid(src_first) && OptoReg::is_valid(dst_first),
998 "must move at least 1 register" );
999
1000 if (src_first == dst_first && src_second == dst_second) {
1001 // Self copy, no move
1002 return 0;
1003 }
1004 if (bottom_type()->isa_vect() != NULL) {
1005 uint ireg = ideal_reg();
1006 assert((src_first_rc != rc_int && dst_first_rc != rc_int), "sanity");
1007 assert((ireg == Op_VecS || ireg == Op_VecD || ireg == Op_VecX || ireg == Op_VecY), "sanity");
1008 if( src_first_rc == rc_stack && dst_first_rc == rc_stack ) {
1009 // mem -> mem
1010 int src_offset = ra_->reg2offset(src_first);
1011 int dst_offset = ra_->reg2offset(dst_first);
1012 vec_stack_to_stack_helper(cbuf, src_offset, dst_offset, ireg, st);
1013 } else if (src_first_rc == rc_float && dst_first_rc == rc_float ) {
1014 vec_mov_helper(cbuf, false, src_first, dst_first, src_second, dst_second, ireg, st);
1015 } else if (src_first_rc == rc_float && dst_first_rc == rc_stack ) {
1016 int stack_offset = ra_->reg2offset(dst_first);
1017 vec_spill_helper(cbuf, false, false, stack_offset, src_first, ireg, st);
1018 } else if (src_first_rc == rc_stack && dst_first_rc == rc_float ) {
1019 int stack_offset = ra_->reg2offset(src_first);
1020 vec_spill_helper(cbuf, false, true, stack_offset, dst_first, ireg, st);
1021 } else {
1022 ShouldNotReachHere();
1023 }
1024 return 0;
1025 }
1026 if (src_first_rc == rc_stack) {
1027 // mem ->
2669 c_calling_convention
2670 %{
2671 // This is obviously always outgoing
2672 (void) SharedRuntime::c_calling_convention(sig_bt, regs, /*regs2=*/NULL, length);
2673 %}
2674
2675 // Location of compiled Java return values. Same as C for now.
2676 return_value
2677 %{
2678 assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
2679 "only return normal values");
2680
2681 static const int lo[Op_RegL + 1] = {
2682 0,
2683 0,
2684 RAX_num, // Op_RegN
2685 RAX_num, // Op_RegI
2686 RAX_num, // Op_RegP
2687 XMM0_num, // Op_RegF
2688 XMM0_num, // Op_RegD
2689 RAX_num // Op_RegL
2690 };
2691 static const int hi[Op_RegL + 1] = {
2692 0,
2693 0,
2694 OptoReg::Bad, // Op_RegN
2695 OptoReg::Bad, // Op_RegI
2696 RAX_H_num, // Op_RegP
2697 OptoReg::Bad, // Op_RegF
2698 XMM0b_num, // Op_RegD
2699 RAX_H_num // Op_RegL
2700 };
2701 // Excluded flags and vector registers.
2702 assert(ARRAY_SIZE(hi) == _last_machine_leaf - 5, "missing type");
2703 return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
2704 %}
2705 %}
2706
2707 //----------ATTRIBUTES---------------------------------------------------------
2708 //----------Operand Attributes-------------------------------------------------
2709 op_attrib op_cost(0); // Required cost attribute
2710
2711 //----------Instruction Attributes---------------------------------------------
2712 ins_attrib ins_cost(100); // Required cost attribute
2713 ins_attrib ins_size(8); // Required size attribute (in bits)
2714 ins_attrib ins_short_branch(0); // Required flag: is this instruction
2715 // a non-matching short branch variant
2716 // of some long branch?
2717 ins_attrib ins_alignment(1); // Required alignment attribute (must
2718 // be a power of 2) specifies the
2719 // alignment that some part of the
2720 // instruction (not necessarily the
2721 // start) requires. If > 1, a
2722 // compute_padding() function must be
3443
3444 // Flags register, used as output of FLOATING POINT compare instructions
3445 operand rFlagsRegU()
3446 %{
3447 constraint(ALLOC_IN_RC(int_flags));
3448 match(RegFlags);
3449
3450 format %{ "RFLAGS_U" %}
3451 interface(REG_INTER);
3452 %}
3453
3454 operand rFlagsRegUCF() %{
3455 constraint(ALLOC_IN_RC(int_flags));
3456 match(RegFlags);
3457 predicate(false);
3458
3459 format %{ "RFLAGS_U_CF" %}
3460 interface(REG_INTER);
3461 %}
3462
3463 // Float register operands
3464 operand regF()
3465 %{
3466 constraint(ALLOC_IN_RC(float_reg));
3467 match(RegF);
3468
3469 format %{ %}
3470 interface(REG_INTER);
3471 %}
3472
3473 // Double register operands
3474 operand regD()
3475 %{
3476 constraint(ALLOC_IN_RC(double_reg));
3477 match(RegD);
3478
3479 format %{ %}
3480 interface(REG_INTER);
3481 %}
3482
3483 //----------Memory Operands----------------------------------------------------
3484 // Direct Memory Operand
3485 // operand direct(immP addr)
3486 // %{
3487 // match(addr);
3488
3489 // format %{ "[$addr]" %}
3490 // interface(MEMORY_INTER) %{
3491 // base(0xFFFFFFFF);
3492 // index(0x4);
3493 // scale(0x0);
3494 // disp($addr);
3495 // %}
3496 // %}
3497
3498 // Indirect Memory Operand
3499 operand indirect(any_RegP reg)
3500 %{
3501 constraint(ALLOC_IN_RC(ptr_reg));
3502 match(reg);
|
383 reg_class int_rdi_reg(RDI);
384
385 // Singleton class for instruction pointer
386 // reg_class ip_reg(RIP);
387
388 %}
389
390 //----------SOURCE BLOCK-------------------------------------------------------
391 // This is a block of C++ code which provides values, functions, and
392 // definitions necessary in the rest of the architecture description
393 source %{
394 #define RELOC_IMM64 Assembler::imm_operand
395 #define RELOC_DISP32 Assembler::disp32_operand
396
397 #define __ _masm.
398
399 static int preserve_SP_size() {
400 return 3; // rex.w, op, rm(reg/reg)
401 }
402 static int clear_avx_size() {
403 if(UseAVX > 2) {
404 return 0; // vzeroupper is ignored
405 } else {
406 return (Compile::current()->max_vector_size() > 16) ? 3 : 0; // vzeroupper
407 }
408 }
409
410 // !!!!! Special hack to get all types of calls to specify the byte offset
411 // from the start of the call to the point where the return address
412 // will point.
413 int MachCallStaticJavaNode::ret_addr_offset()
414 {
415 int offset = 5; // 5 bytes from start of call to where return address points
416 offset += clear_avx_size();
417 if (_method_handle_invoke)
418 offset += preserve_SP_size();
419 return offset;
420 }
421
422 int MachCallDynamicJavaNode::ret_addr_offset()
423 {
424 int offset = 15; // 15 bytes from start of call to where return address points
425 offset += clear_avx_size();
426 return offset;
427 }
874 int MachEpilogNode::reloc() const
875 {
876 return 2; // a large enough number
877 }
878
879 const Pipeline* MachEpilogNode::pipeline() const
880 {
881 return MachNode::pipeline_class();
882 }
883
884 int MachEpilogNode::safepoint_offset() const
885 {
886 return 0;
887 }
888
889 //=============================================================================
890
891 enum RC {
892 rc_bad,
893 rc_int,
894 rc_mask,
895 rc_float,
896 rc_stack
897 };
898
899 static enum RC rc_class(OptoReg::Name reg)
900 {
901 if( !OptoReg::is_valid(reg) ) return rc_bad;
902
903 if (OptoReg::is_stack(reg)) return rc_stack;
904
905 VMReg r = OptoReg::as_VMReg(reg);
906
907 if (r->is_Register()) return rc_int;
908
909 if (r->is_KRegister()) {
910 assert(UseAVX > 2, "must be used in AVX3 mode");
911 return rc_mask;
912 }
913
914 assert(r->is_XMMRegister(), "must be");
915 return rc_float;
916 }
917
918 // Next two methods are shared by 32- and 64-bit VM. They are defined in x86.ad.
919 static int vec_mov_helper(CodeBuffer *cbuf, bool do_size, int src_lo, int dst_lo,
920 int src_hi, int dst_hi, uint ireg, outputStream* st);
921
922 static int vec_spill_helper(CodeBuffer *cbuf, bool do_size, bool is_load,
923 int stack_offset, int reg, uint ireg, outputStream* st);
924
925 static void vec_stack_to_stack_helper(CodeBuffer *cbuf, int src_offset,
926 int dst_offset, uint ireg, outputStream* st) {
927 if (cbuf) {
928 MacroAssembler _masm(cbuf);
929 switch (ireg) {
930 case Op_VecS:
931 __ movq(Address(rsp, -8), rax);
932 __ movl(rax, Address(rsp, src_offset));
933 __ movl(Address(rsp, dst_offset), rax);
934 __ movq(rax, Address(rsp, -8));
935 break;
936 case Op_VecD:
937 __ pushq(Address(rsp, src_offset));
938 __ popq (Address(rsp, dst_offset));
939 break;
940 case Op_VecX:
941 __ pushq(Address(rsp, src_offset));
942 __ popq (Address(rsp, dst_offset));
943 __ pushq(Address(rsp, src_offset+8));
944 __ popq (Address(rsp, dst_offset+8));
945 break;
946 case Op_VecY:
947 __ vmovdqu(Address(rsp, -32), xmm0);
948 __ vmovdqu(xmm0, Address(rsp, src_offset));
949 __ vmovdqu(Address(rsp, dst_offset), xmm0);
950 __ vmovdqu(xmm0, Address(rsp, -32));
951 case Op_VecZ:
952 __ evmovdqu(Address(rsp, -64), xmm0, 2);
953 __ evmovdqu(xmm0, Address(rsp, src_offset), 2);
954 __ evmovdqu(Address(rsp, dst_offset), xmm0, 2);
955 __ evmovdqu(xmm0, Address(rsp, -64), 2);
956 break;
957 default:
958 ShouldNotReachHere();
959 }
960 #ifndef PRODUCT
961 } else {
962 switch (ireg) {
963 case Op_VecS:
964 st->print("movq [rsp - #8], rax\t# 32-bit mem-mem spill\n\t"
965 "movl rax, [rsp + #%d]\n\t"
966 "movl [rsp + #%d], rax\n\t"
967 "movq rax, [rsp - #8]",
968 src_offset, dst_offset);
969 break;
970 case Op_VecD:
971 st->print("pushq [rsp + #%d]\t# 64-bit mem-mem spill\n\t"
972 "popq [rsp + #%d]",
973 src_offset, dst_offset);
974 break;
975 case Op_VecX:
976 st->print("pushq [rsp + #%d]\t# 128-bit mem-mem spill\n\t"
977 "popq [rsp + #%d]\n\t"
978 "pushq [rsp + #%d]\n\t"
979 "popq [rsp + #%d]",
980 src_offset, dst_offset, src_offset+8, dst_offset+8);
981 break;
982 case Op_VecY:
983 st->print("vmovdqu [rsp - #32], xmm0\t# 256-bit mem-mem spill\n\t"
984 "vmovdqu xmm0, [rsp + #%d]\n\t"
985 "vmovdqu [rsp + #%d], xmm0\n\t"
986 "vmovdqu xmm0, [rsp - #32]",
987 src_offset, dst_offset);
988 break;
989 case Op_VecZ:
990 st->print("vmovdqu [rsp - #64], xmm0\t# 512-bit mem-mem spill\n\t"
991 "vmovdqu xmm0, [rsp + #%d]\n\t"
992 "vmovdqu [rsp + #%d], xmm0\n\t"
993 "vmovdqu xmm0, [rsp - #64]",
994 src_offset, dst_offset);
995 break;
996 default:
997 ShouldNotReachHere();
998 }
999 #endif
1000 }
1001 }
1002
1003 uint MachSpillCopyNode::implementation(CodeBuffer* cbuf,
1004 PhaseRegAlloc* ra_,
1005 bool do_size,
1006 outputStream* st) const {
1007 assert(cbuf != NULL || st != NULL, "sanity");
1008 // Get registers to move
1009 OptoReg::Name src_second = ra_->get_reg_second(in(1));
1010 OptoReg::Name src_first = ra_->get_reg_first(in(1));
1011 OptoReg::Name dst_second = ra_->get_reg_second(this);
1012 OptoReg::Name dst_first = ra_->get_reg_first(this);
1013
1014 enum RC src_second_rc = rc_class(src_second);
1015 enum RC src_first_rc = rc_class(src_first);
1016 enum RC dst_second_rc = rc_class(dst_second);
1017 enum RC dst_first_rc = rc_class(dst_first);
1018
1019 assert(OptoReg::is_valid(src_first) && OptoReg::is_valid(dst_first),
1020 "must move at least 1 register" );
1021
1022 if (src_first == dst_first && src_second == dst_second) {
1023 // Self copy, no move
1024 return 0;
1025 }
1026 if (bottom_type()->isa_vect() != NULL) {
1027 uint ireg = ideal_reg();
1028 assert((src_first_rc != rc_int && dst_first_rc != rc_int), "sanity");
1029 assert((ireg == Op_VecS || ireg == Op_VecD || ireg == Op_VecX || ireg == Op_VecY || ireg == Op_VecZ ), "sanity");
1030 if( src_first_rc == rc_stack && dst_first_rc == rc_stack ) {
1031 // mem -> mem
1032 int src_offset = ra_->reg2offset(src_first);
1033 int dst_offset = ra_->reg2offset(dst_first);
1034 vec_stack_to_stack_helper(cbuf, src_offset, dst_offset, ireg, st);
1035 } else if (src_first_rc == rc_float && dst_first_rc == rc_float ) {
1036 vec_mov_helper(cbuf, false, src_first, dst_first, src_second, dst_second, ireg, st);
1037 } else if (src_first_rc == rc_float && dst_first_rc == rc_stack ) {
1038 int stack_offset = ra_->reg2offset(dst_first);
1039 vec_spill_helper(cbuf, false, false, stack_offset, src_first, ireg, st);
1040 } else if (src_first_rc == rc_stack && dst_first_rc == rc_float ) {
1041 int stack_offset = ra_->reg2offset(src_first);
1042 vec_spill_helper(cbuf, false, true, stack_offset, dst_first, ireg, st);
1043 } else {
1044 ShouldNotReachHere();
1045 }
1046 return 0;
1047 }
1048 if (src_first_rc == rc_stack) {
1049 // mem ->
2691 c_calling_convention
2692 %{
2693 // This is obviously always outgoing
2694 (void) SharedRuntime::c_calling_convention(sig_bt, regs, /*regs2=*/NULL, length);
2695 %}
2696
2697 // Location of compiled Java return values. Same as C for now.
2698 return_value
2699 %{
2700 assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
2701 "only return normal values");
2702
2703 static const int lo[Op_RegL + 1] = {
2704 0,
2705 0,
2706 RAX_num, // Op_RegN
2707 RAX_num, // Op_RegI
2708 RAX_num, // Op_RegP
2709 XMM0_num, // Op_RegF
2710 XMM0_num, // Op_RegD
2711 0, // Op_RegK
2712 RAX_num // Op_RegL
2713 };
2714 static const int hi[Op_RegL + 1] = {
2715 0,
2716 0,
2717 OptoReg::Bad, // Op_RegN
2718 OptoReg::Bad, // Op_RegI
2719 RAX_H_num, // Op_RegP
2720 OptoReg::Bad, // Op_RegF
2721 XMM0b_num, // Op_RegD
2722 0, // Op_RegK
2723 RAX_H_num // Op_RegL
2724 };
2725 // Excluded flags and vector registers.
2726 assert(ARRAY_SIZE(hi) == _last_machine_leaf - 6, "missing type");
2727 return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
2728 %}
2729 %}
2730
2731 //----------ATTRIBUTES---------------------------------------------------------
2732 //----------Operand Attributes-------------------------------------------------
2733 op_attrib op_cost(0); // Required cost attribute
2734
2735 //----------Instruction Attributes---------------------------------------------
2736 ins_attrib ins_cost(100); // Required cost attribute
2737 ins_attrib ins_size(8); // Required size attribute (in bits)
2738 ins_attrib ins_short_branch(0); // Required flag: is this instruction
2739 // a non-matching short branch variant
2740 // of some long branch?
2741 ins_attrib ins_alignment(1); // Required alignment attribute (must
2742 // be a power of 2) specifies the
2743 // alignment that some part of the
2744 // instruction (not necessarily the
2745 // start) requires. If > 1, a
2746 // compute_padding() function must be
3467
3468 // Flags register, used as output of FLOATING POINT compare instructions
3469 operand rFlagsRegU()
3470 %{
3471 constraint(ALLOC_IN_RC(int_flags));
3472 match(RegFlags);
3473
3474 format %{ "RFLAGS_U" %}
3475 interface(REG_INTER);
3476 %}
3477
3478 operand rFlagsRegUCF() %{
3479 constraint(ALLOC_IN_RC(int_flags));
3480 match(RegFlags);
3481 predicate(false);
3482
3483 format %{ "RFLAGS_U_CF" %}
3484 interface(REG_INTER);
3485 %}
3486
3487 operand regF() %{
3488 constraint(ALLOC_IN_RC(float_reg));
3489 match(RegF);
3490
3491 format %{ %}
3492 interface(REG_INTER);
3493 %}
3494
3495 operand regD() %{
3496 constraint(ALLOC_IN_RC(double_reg));
3497 match(RegD);
3498
3499 format %{ %}
3500 interface(REG_INTER);
3501 %}
3502
3503 // Vectors
3504 operand vecS() %{
3505 constraint(ALLOC_IN_RC(vectors_reg));
3506 match(VecS);
3507
3508 format %{ %}
3509 interface(REG_INTER);
3510 %}
3511
3512 operand vecD() %{
3513 constraint(ALLOC_IN_RC(vectord_reg));
3514 match(VecD);
3515
3516 format %{ %}
3517 interface(REG_INTER);
3518 %}
3519
3520 operand vecX() %{
3521 constraint(ALLOC_IN_RC(vectorx_reg));
3522 match(VecX);
3523
3524 format %{ %}
3525 interface(REG_INTER);
3526 %}
3527
3528 operand vecY() %{
3529 constraint(ALLOC_IN_RC(vectory_reg));
3530 match(VecY);
3531
3532 format %{ %}
3533 interface(REG_INTER);
3534 %}
3535
3536 //----------Memory Operands----------------------------------------------------
3537 // Direct Memory Operand
3538 // operand direct(immP addr)
3539 // %{
3540 // match(addr);
3541
3542 // format %{ "[$addr]" %}
3543 // interface(MEMORY_INTER) %{
3544 // base(0xFFFFFFFF);
3545 // index(0x4);
3546 // scale(0x0);
3547 // disp($addr);
3548 // %}
3549 // %}
3550
3551 // Indirect Memory Operand
3552 operand indirect(any_RegP reg)
3553 %{
3554 constraint(ALLOC_IN_RC(ptr_reg));
3555 match(reg);
|