2496 mstack.push(m->in(AddPNode::Address), Pre_Visit);
2497 mstack.push(m->in(AddPNode::Base), Pre_Visit);
2498 return true;
2499 }
2500 return false;
2501 }
2502
2503 void Compile::reshape_address(AddPNode* addp) {
2504 }
2505
2506
2507 #define MOV_VOLATILE(REG, BASE, INDEX, SCALE, DISP, SCRATCH, INSN) \
2508 MacroAssembler _masm(&cbuf); \
2509 { \
2510 guarantee(INDEX == -1, "mode not permitted for volatile"); \
2511 guarantee(DISP == 0, "mode not permitted for volatile"); \
2512 guarantee(SCALE == 0, "mode not permitted for volatile"); \
2513 __ INSN(REG, as_Register(BASE)); \
2514 }
2515
2516 typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr);
2517 typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr);
2518 typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
2519 MacroAssembler::SIMD_RegVariant T, const Address &adr);
2520
2521 // Used for all non-volatile memory accesses. The use of
2522 // $mem->opcode() to discover whether this pattern uses sign-extended
2523 // offsets is something of a kludge.
2524 static void loadStore(MacroAssembler masm, mem_insn insn,
2525 Register reg, int opcode,
2526 Register base, int index, int size, int disp)
2527 {
2528 Address::extend scale;
2529
2530 // Hooboy, this is fugly. We need a way to communicate to the
2531 // encoder that the index needs to be sign extended, so we have to
2532 // enumerate all the cases.
2533 switch (opcode) {
2534 case INDINDEXSCALEDI2L:
2535 case INDINDEXSCALEDI2LN:
2536 case INDINDEXI2L:
2537 case INDINDEXI2LN:
2538 scale = Address::sxtw(size);
2539 break;
2540 default:
2541 scale = Address::lsl(size);
2542 }
2543
2544 if (index == -1) {
2545 (masm.*insn)(reg, Address(base, disp));
2546 } else {
2547 assert(disp == 0, "unsupported address mode: disp = %d", disp);
2548 (masm.*insn)(reg, Address(base, as_Register(index), scale));
2549 }
2550 }
2551
2552 static void loadStore(MacroAssembler masm, mem_float_insn insn,
2553 FloatRegister reg, int opcode,
2554 Register base, int index, int size, int disp)
2555 {
2556 Address::extend scale;
2557
2558 switch (opcode) {
2559 case INDINDEXSCALEDI2L:
2560 case INDINDEXSCALEDI2LN:
2561 scale = Address::sxtw(size);
2562 break;
2563 default:
2564 scale = Address::lsl(size);
2565 }
2566
2567 if (index == -1) {
2568 (masm.*insn)(reg, Address(base, disp));
2569 } else {
2570 assert(disp == 0, "unsupported address mode: disp = %d", disp);
2571 (masm.*insn)(reg, Address(base, as_Register(index), scale));
2572 }
2573 }
2574
2575 static void loadStore(MacroAssembler masm, mem_vector_insn insn,
2576 FloatRegister reg, MacroAssembler::SIMD_RegVariant T,
2577 int opcode, Register base, int index, int size, int disp)
2578 {
2579 if (index == -1) {
2580 (masm.*insn)(reg, T, Address(base, disp));
2581 } else {
2582 assert(disp == 0, "unsupported address mode");
2583 (masm.*insn)(reg, T, Address(base, as_Register(index), Address::lsl(size)));
2584 }
2585 }
2586
2587 %}
2588
2589
2590
2591 //----------ENCODING BLOCK-----------------------------------------------------
2592 // This block specifies the encoding classes used by the compiler to
2593 // output byte streams. Encoding classes are parameterized macros
2594 // used by Machine Instruction Nodes in order to generate the bit
2595 // encoding of the instruction. Operands specify their base encoding
2596 // interface with the interface keyword. There are currently
2597 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
3774 return_value
3775 %{
3776 // TODO do we allow ideal_reg == Op_RegN???
3777 assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
3778 "only return normal values");
3779
3780 static const int lo[Op_RegL + 1] = { // enum name
3781 0, // Op_Node
3782 0, // Op_Set
3783 R0_num, // Op_RegN
3784 R0_num, // Op_RegI
3785 R0_num, // Op_RegP
3786 V0_num, // Op_RegF
3787 V0_num, // Op_RegD
3788 R0_num // Op_RegL
3789 };
3790
3791 static const int hi[Op_RegL + 1] = { // enum name
3792 0, // Op_Node
3793 0, // Op_Set
3794 OptoReg::Bad, // Op_RegN
3795 OptoReg::Bad, // Op_RegI
3796 R0_H_num, // Op_RegP
3797 OptoReg::Bad, // Op_RegF
3798 V0_H_num, // Op_RegD
3799 R0_H_num // Op_RegL
3800 };
3801
3802 return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
3803 %}
3804 %}
3805
3806 //----------ATTRIBUTES---------------------------------------------------------
3807 //----------Operand Attributes-------------------------------------------------
3808 op_attrib op_cost(1); // Required cost attribute
3809
3810 //----------Instruction Attributes---------------------------------------------
3811 ins_attrib ins_cost(INSN_COST); // Required cost attribute
3812 ins_attrib ins_size(32); // Required size attribute (in bits)
3813 ins_attrib ins_short_branch(0); // Required flag: is this instruction
3814 // a non-matching short branch variant
6906 ins_pipe(iload_reg_mem);
6907 %}
6908
6909 // Load Range
6910 instruct loadRange(iRegINoSp dst, memory mem)
6911 %{
6912 match(Set dst (LoadRange mem));
6913
6914 ins_cost(4 * INSN_COST);
6915 format %{ "ldrw $dst, $mem\t# range" %}
6916
6917 ins_encode(aarch64_enc_ldrw(dst, mem));
6918
6919 ins_pipe(iload_reg_mem);
6920 %}
6921
6922 // Load Pointer
6923 instruct loadP(iRegPNoSp dst, memory mem)
6924 %{
6925 match(Set dst (LoadP mem));
6926 predicate(!needs_acquiring_load(n));
6927
6928 ins_cost(4 * INSN_COST);
6929 format %{ "ldr $dst, $mem\t# ptr" %}
6930
6931 ins_encode(aarch64_enc_ldr(dst, mem));
6932
6933 ins_pipe(iload_reg_mem);
6934 %}
6935
6936 // Load Compressed Pointer
6937 instruct loadN(iRegNNoSp dst, memory mem)
6938 %{
6939 match(Set dst (LoadN mem));
6940 predicate(!needs_acquiring_load(n));
6941
6942 ins_cost(4 * INSN_COST);
6943 format %{ "ldrw $dst, $mem\t# compressed ptr" %}
6944
6945 ins_encode(aarch64_enc_ldrw(dst, mem));
6946
7599 ins_pipe(pipe_serial);
7600 %}
7601
7602 // Load Long (64 bit signed)
7603 instruct loadL_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7604 %{
7605 match(Set dst (LoadL mem));
7606
7607 ins_cost(VOLATILE_REF_COST);
7608 format %{ "ldar $dst, $mem\t# int" %}
7609
7610 ins_encode(aarch64_enc_ldar(dst, mem));
7611
7612 ins_pipe(pipe_serial);
7613 %}
7614
7615 // Load Pointer
7616 instruct loadP_volatile(iRegPNoSp dst, /* sync_memory*/indirect mem)
7617 %{
7618 match(Set dst (LoadP mem));
7619
7620 ins_cost(VOLATILE_REF_COST);
7621 format %{ "ldar $dst, $mem\t# ptr" %}
7622
7623 ins_encode(aarch64_enc_ldar(dst, mem));
7624
7625 ins_pipe(pipe_serial);
7626 %}
7627
7628 // Load Compressed Pointer
7629 instruct loadN_volatile(iRegNNoSp dst, /* sync_memory*/indirect mem)
7630 %{
7631 match(Set dst (LoadN mem));
7632
7633 ins_cost(VOLATILE_REF_COST);
7634 format %{ "ldarw $dst, $mem\t# compressed ptr" %}
7635
7636 ins_encode(aarch64_enc_ldarw(dst, mem));
7637
7638 ins_pipe(pipe_serial);
8535
8536 match(Set res (CompareAndSwapL mem (Binary oldval newval)));
8537 ins_cost(2 * VOLATILE_REF_COST);
8538
8539 effect(KILL cr);
8540
8541 format %{
8542 "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
8543 "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8544 %}
8545
8546 ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
8547 aarch64_enc_cset_eq(res));
8548
8549 ins_pipe(pipe_slow);
8550 %}
8551
8552 instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8553
8554 match(Set res (CompareAndSwapP mem (Binary oldval newval)));
8555 ins_cost(2 * VOLATILE_REF_COST);
8556
8557 effect(KILL cr);
8558
8559 format %{
8560 "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
8561 "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8562 %}
8563
8564 ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
8565 aarch64_enc_cset_eq(res));
8566
8567 ins_pipe(pipe_slow);
8568 %}
8569
8570 instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
8571
8572 match(Set res (CompareAndSwapN mem (Binary oldval newval)));
8573 ins_cost(2 * VOLATILE_REF_COST);
8574
8648
8649 predicate(needs_acquiring_load_exclusive(n));
8650 match(Set res (CompareAndSwapL mem (Binary oldval newval)));
8651 ins_cost(VOLATILE_REF_COST);
8652
8653 effect(KILL cr);
8654
8655 format %{
8656 "cmpxchg_acq $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
8657 "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8658 %}
8659
8660 ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
8661 aarch64_enc_cset_eq(res));
8662
8663 ins_pipe(pipe_slow);
8664 %}
8665
8666 instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8667
8668 predicate(needs_acquiring_load_exclusive(n));
8669 match(Set res (CompareAndSwapP mem (Binary oldval newval)));
8670 ins_cost(VOLATILE_REF_COST);
8671
8672 effect(KILL cr);
8673
8674 format %{
8675 "cmpxchg_acq $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
8676 "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8677 %}
8678
8679 ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
8680 aarch64_enc_cset_eq(res));
8681
8682 ins_pipe(pipe_slow);
8683 %}
8684
8685 instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
8686
8687 predicate(needs_acquiring_load_exclusive(n));
8688 match(Set res (CompareAndSwapN mem (Binary oldval newval)));
8779 %}
8780 ins_pipe(pipe_slow);
8781 %}
8782
8783 instruct compareAndExchangeN(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
8784 match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
8785 ins_cost(2 * VOLATILE_REF_COST);
8786 effect(TEMP_DEF res, KILL cr);
8787 format %{
8788 "cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
8789 %}
8790 ins_encode %{
8791 __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8792 Assembler::word, /*acquire*/ false, /*release*/ true,
8793 /*weak*/ false, $res$$Register);
8794 %}
8795 ins_pipe(pipe_slow);
8796 %}
8797
8798 instruct compareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8799 match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
8800 ins_cost(2 * VOLATILE_REF_COST);
8801 effect(TEMP_DEF res, KILL cr);
8802 format %{
8803 "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
8804 %}
8805 ins_encode %{
8806 __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8807 Assembler::xword, /*acquire*/ false, /*release*/ true,
8808 /*weak*/ false, $res$$Register);
8809 %}
8810 ins_pipe(pipe_slow);
8811 %}
8812
8813 instruct compareAndExchangeBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8814 predicate(needs_acquiring_load_exclusive(n));
8815 match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
8816 ins_cost(VOLATILE_REF_COST);
8817 effect(TEMP_DEF res, KILL cr);
8818 format %{
8878 %}
8879
8880
8881 instruct compareAndExchangeNAcq(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
8882 predicate(needs_acquiring_load_exclusive(n));
8883 match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
8884 ins_cost(VOLATILE_REF_COST);
8885 effect(TEMP_DEF res, KILL cr);
8886 format %{
8887 "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
8888 %}
8889 ins_encode %{
8890 __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8891 Assembler::word, /*acquire*/ true, /*release*/ true,
8892 /*weak*/ false, $res$$Register);
8893 %}
8894 ins_pipe(pipe_slow);
8895 %}
8896
8897 instruct compareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8898 predicate(needs_acquiring_load_exclusive(n));
8899 match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
8900 ins_cost(VOLATILE_REF_COST);
8901 effect(TEMP_DEF res, KILL cr);
8902 format %{
8903 "cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
8904 %}
8905 ins_encode %{
8906 __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8907 Assembler::xword, /*acquire*/ true, /*release*/ true,
8908 /*weak*/ false, $res$$Register);
8909 %}
8910 ins_pipe(pipe_slow);
8911 %}
8912
8913 instruct weakCompareAndSwapB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8914 match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
8915 ins_cost(2 * VOLATILE_REF_COST);
8916 effect(KILL cr);
8917 format %{
8918 "cmpxchgb $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
8979 %}
8980
8981 instruct weakCompareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
8982 match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
8983 ins_cost(2 * VOLATILE_REF_COST);
8984 effect(KILL cr);
8985 format %{
8986 "cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
8987 "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8988 %}
8989 ins_encode %{
8990 __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8991 Assembler::word, /*acquire*/ false, /*release*/ true,
8992 /*weak*/ true, noreg);
8993 __ csetw($res$$Register, Assembler::EQ);
8994 %}
8995 ins_pipe(pipe_slow);
8996 %}
8997
8998 instruct weakCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8999 match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
9000 ins_cost(2 * VOLATILE_REF_COST);
9001 effect(KILL cr);
9002 format %{
9003 "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
9004 "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9005 %}
9006 ins_encode %{
9007 __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9008 Assembler::xword, /*acquire*/ false, /*release*/ true,
9009 /*weak*/ true, noreg);
9010 __ csetw($res$$Register, Assembler::EQ);
9011 %}
9012 ins_pipe(pipe_slow);
9013 %}
9014
9015 instruct weakCompareAndSwapBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
9016 predicate(needs_acquiring_load_exclusive(n));
9017 match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
9018 ins_cost(VOLATILE_REF_COST);
9086
9087 instruct weakCompareAndSwapNAcq(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
9088 predicate(needs_acquiring_load_exclusive(n));
9089 match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
9090 ins_cost(VOLATILE_REF_COST);
9091 effect(KILL cr);
9092 format %{
9093 "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
9094 "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9095 %}
9096 ins_encode %{
9097 __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9098 Assembler::word, /*acquire*/ true, /*release*/ true,
9099 /*weak*/ true, noreg);
9100 __ csetw($res$$Register, Assembler::EQ);
9101 %}
9102 ins_pipe(pipe_slow);
9103 %}
9104
9105 instruct weakCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
9106 predicate(needs_acquiring_load_exclusive(n));
9107 match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
9108 ins_cost(VOLATILE_REF_COST);
9109 effect(KILL cr);
9110 format %{
9111 "cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
9112 "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9113 %}
9114 ins_encode %{
9115 __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9116 Assembler::xword, /*acquire*/ true, /*release*/ true,
9117 /*weak*/ true, noreg);
9118 __ csetw($res$$Register, Assembler::EQ);
9119 %}
9120 ins_pipe(pipe_slow);
9121 %}
9122
9123 // END This section of the file is automatically generated. Do not edit --------------
9124 // ---------------------------------------------------------------------
9125
9126 instruct get_and_setI(indirect mem, iRegI newv, iRegINoSp prev) %{
9127 match(Set prev (GetAndSetI mem newv));
9137 match(Set prev (GetAndSetL mem newv));
9138 ins_cost(2 * VOLATILE_REF_COST);
9139 format %{ "atomic_xchg $prev, $newv, [$mem]" %}
9140 ins_encode %{
9141 __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
9142 %}
9143 ins_pipe(pipe_serial);
9144 %}
9145
9146 instruct get_and_setN(indirect mem, iRegN newv, iRegINoSp prev) %{
9147 match(Set prev (GetAndSetN mem newv));
9148 ins_cost(2 * VOLATILE_REF_COST);
9149 format %{ "atomic_xchgw $prev, $newv, [$mem]" %}
9150 ins_encode %{
9151 __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
9152 %}
9153 ins_pipe(pipe_serial);
9154 %}
9155
9156 instruct get_and_setP(indirect mem, iRegP newv, iRegPNoSp prev) %{
9157 match(Set prev (GetAndSetP mem newv));
9158 ins_cost(2 * VOLATILE_REF_COST);
9159 format %{ "atomic_xchg $prev, $newv, [$mem]" %}
9160 ins_encode %{
9161 __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
9162 %}
9163 ins_pipe(pipe_serial);
9164 %}
9165
9166 instruct get_and_setIAcq(indirect mem, iRegI newv, iRegINoSp prev) %{
9167 predicate(needs_acquiring_load_exclusive(n));
9168 match(Set prev (GetAndSetI mem newv));
9169 ins_cost(VOLATILE_REF_COST);
9170 format %{ "atomic_xchgw_acq $prev, $newv, [$mem]" %}
9171 ins_encode %{
9172 __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
9173 %}
9174 ins_pipe(pipe_serial);
9175 %}
9176
9180 ins_cost(VOLATILE_REF_COST);
9181 format %{ "atomic_xchg_acq $prev, $newv, [$mem]" %}
9182 ins_encode %{
9183 __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
9184 %}
9185 ins_pipe(pipe_serial);
9186 %}
9187
9188 instruct get_and_setNAcq(indirect mem, iRegN newv, iRegINoSp prev) %{
9189 predicate(needs_acquiring_load_exclusive(n));
9190 match(Set prev (GetAndSetN mem newv));
9191 ins_cost(VOLATILE_REF_COST);
9192 format %{ "atomic_xchgw_acq $prev, $newv, [$mem]" %}
9193 ins_encode %{
9194 __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
9195 %}
9196 ins_pipe(pipe_serial);
9197 %}
9198
9199 instruct get_and_setPAcq(indirect mem, iRegP newv, iRegPNoSp prev) %{
9200 predicate(needs_acquiring_load_exclusive(n));
9201 match(Set prev (GetAndSetP mem newv));
9202 ins_cost(VOLATILE_REF_COST);
9203 format %{ "atomic_xchg_acq $prev, $newv, [$mem]" %}
9204 ins_encode %{
9205 __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
9206 %}
9207 ins_pipe(pipe_serial);
9208 %}
9209
9210
9211 instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr) %{
9212 match(Set newval (GetAndAddL mem incr));
9213 ins_cost(2 * VOLATILE_REF_COST + 1);
9214 format %{ "get_and_addL $newval, [$mem], $incr" %}
9215 ins_encode %{
9216 __ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
9217 %}
9218 ins_pipe(pipe_serial);
9219 %}
9220
|
2496 mstack.push(m->in(AddPNode::Address), Pre_Visit);
2497 mstack.push(m->in(AddPNode::Base), Pre_Visit);
2498 return true;
2499 }
2500 return false;
2501 }
2502
2503 void Compile::reshape_address(AddPNode* addp) {
2504 }
2505
2506
2507 #define MOV_VOLATILE(REG, BASE, INDEX, SCALE, DISP, SCRATCH, INSN) \
2508 MacroAssembler _masm(&cbuf); \
2509 { \
2510 guarantee(INDEX == -1, "mode not permitted for volatile"); \
2511 guarantee(DISP == 0, "mode not permitted for volatile"); \
2512 guarantee(SCALE == 0, "mode not permitted for volatile"); \
2513 __ INSN(REG, as_Register(BASE)); \
2514 }
2515
2516
2517 static Address mem2address(int opcode, Register base, int index, int size, int disp)
2518 {
2519 Address::extend scale;
2520
2521 // Hooboy, this is fugly. We need a way to communicate to the
2522 // encoder that the index needs to be sign extended, so we have to
2523 // enumerate all the cases.
2524 switch (opcode) {
2525 case INDINDEXSCALEDI2L:
2526 case INDINDEXSCALEDI2LN:
2527 case INDINDEXI2L:
2528 case INDINDEXI2LN:
2529 scale = Address::sxtw(size);
2530 break;
2531 default:
2532 scale = Address::lsl(size);
2533 }
2534
2535 if (index == -1) {
2536 return Address(base, disp);
2537 } else {
2538 assert(disp == 0, "unsupported address mode: disp = %d", disp);
2539 return Address(base, as_Register(index), scale);
2540 }
2541 }
2542
2543
2544 typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr);
2545 typedef void (MacroAssembler::* mem_insn2)(Register Rt, Register adr);
2546 typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr);
2547 typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
2548 MacroAssembler::SIMD_RegVariant T, const Address &adr);
2549
2550 // Used for all non-volatile memory accesses. The use of
2551 // $mem->opcode() to discover whether this pattern uses sign-extended
2552 // offsets is something of a kludge.
2553 static void loadStore(MacroAssembler masm, mem_insn insn,
2554 Register reg, int opcode,
2555 Register base, int index, int size, int disp)
2556 {
2557 Address addr = mem2address(opcode, base, index, size, disp);
2558 (masm.*insn)(reg, addr);
2559 }
2560
2561 static void loadStore(MacroAssembler masm, mem_float_insn insn,
2562 FloatRegister reg, int opcode,
2563 Register base, int index, int size, int disp)
2564 {
2565 Address::extend scale;
2566
2567 switch (opcode) {
2568 case INDINDEXSCALEDI2L:
2569 case INDINDEXSCALEDI2LN:
2570 scale = Address::sxtw(size);
2571 break;
2572 default:
2573 scale = Address::lsl(size);
2574 }
2575
2576 if (index == -1) {
2577 (masm.*insn)(reg, Address(base, disp));
2578 } else {
2579 assert(disp == 0, "unsupported address mode: disp = %d", disp);
2580 (masm.*insn)(reg, Address(base, as_Register(index), scale));
2581 }
2582 }
2583
2584 static void loadStore(MacroAssembler masm, mem_vector_insn insn,
2585 FloatRegister reg, MacroAssembler::SIMD_RegVariant T,
2586 int opcode, Register base, int index, int size, int disp)
2587 {
2588 if (index == -1) {
2589 (masm.*insn)(reg, T, Address(base, disp));
2590 } else {
2591 assert(disp == 0, "unsupported address mode");
2592 (masm.*insn)(reg, T, Address(base, as_Register(index), Address::lsl(size)));
2593 }
2594 }
2595
2596 %}
2597
2598
2599
2600 //----------ENCODING BLOCK-----------------------------------------------------
2601 // This block specifies the encoding classes used by the compiler to
2602 // output byte streams. Encoding classes are parameterized macros
2603 // used by Machine Instruction Nodes in order to generate the bit
2604 // encoding of the instruction. Operands specify their base encoding
2605 // interface with the interface keyword. There are currently
2606 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
3783 return_value
3784 %{
3785 // TODO do we allow ideal_reg == Op_RegN???
3786 assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
3787 "only return normal values");
3788
3789 static const int lo[Op_RegL + 1] = { // enum name
3790 0, // Op_Node
3791 0, // Op_Set
3792 R0_num, // Op_RegN
3793 R0_num, // Op_RegI
3794 R0_num, // Op_RegP
3795 V0_num, // Op_RegF
3796 V0_num, // Op_RegD
3797 R0_num // Op_RegL
3798 };
3799
3800 static const int hi[Op_RegL + 1] = { // enum name
3801 0, // Op_Node
3802 0, // Op_Set
3803 OptoReg::Bad, // Op_RegN
3804 OptoReg::Bad, // Op_RegI
3805 R0_H_num, // Op_RegP
3806 OptoReg::Bad, // Op_RegF
3807 V0_H_num, // Op_RegD
3808 R0_H_num // Op_RegL
3809 };
3810
3811 return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
3812 %}
3813 %}
3814
3815 //----------ATTRIBUTES---------------------------------------------------------
3816 //----------Operand Attributes-------------------------------------------------
3817 op_attrib op_cost(1); // Required cost attribute
3818
3819 //----------Instruction Attributes---------------------------------------------
3820 ins_attrib ins_cost(INSN_COST); // Required cost attribute
3821 ins_attrib ins_size(32); // Required size attribute (in bits)
3822 ins_attrib ins_short_branch(0); // Required flag: is this instruction
3823 // a non-matching short branch variant
6915 ins_pipe(iload_reg_mem);
6916 %}
6917
6918 // Load Range
6919 instruct loadRange(iRegINoSp dst, memory mem)
6920 %{
6921 match(Set dst (LoadRange mem));
6922
6923 ins_cost(4 * INSN_COST);
6924 format %{ "ldrw $dst, $mem\t# range" %}
6925
6926 ins_encode(aarch64_enc_ldrw(dst, mem));
6927
6928 ins_pipe(iload_reg_mem);
6929 %}
6930
6931 // Load Pointer
6932 instruct loadP(iRegPNoSp dst, memory mem)
6933 %{
6934 match(Set dst (LoadP mem));
6935 predicate(!needs_acquiring_load(n) && (n->as_Load()->barrier_data() == 0));
6936
6937 ins_cost(4 * INSN_COST);
6938 format %{ "ldr $dst, $mem\t# ptr" %}
6939
6940 ins_encode(aarch64_enc_ldr(dst, mem));
6941
6942 ins_pipe(iload_reg_mem);
6943 %}
6944
6945 // Load Compressed Pointer
6946 instruct loadN(iRegNNoSp dst, memory mem)
6947 %{
6948 match(Set dst (LoadN mem));
6949 predicate(!needs_acquiring_load(n));
6950
6951 ins_cost(4 * INSN_COST);
6952 format %{ "ldrw $dst, $mem\t# compressed ptr" %}
6953
6954 ins_encode(aarch64_enc_ldrw(dst, mem));
6955
7608 ins_pipe(pipe_serial);
7609 %}
7610
7611 // Load Long (64 bit signed)
7612 instruct loadL_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
7613 %{
7614 match(Set dst (LoadL mem));
7615
7616 ins_cost(VOLATILE_REF_COST);
7617 format %{ "ldar $dst, $mem\t# int" %}
7618
7619 ins_encode(aarch64_enc_ldar(dst, mem));
7620
7621 ins_pipe(pipe_serial);
7622 %}
7623
7624 // Load Pointer
7625 instruct loadP_volatile(iRegPNoSp dst, /* sync_memory*/indirect mem)
7626 %{
7627 match(Set dst (LoadP mem));
7628 predicate(n->as_Load()->barrier_data() == 0);
7629
7630 ins_cost(VOLATILE_REF_COST);
7631 format %{ "ldar $dst, $mem\t# ptr" %}
7632
7633 ins_encode(aarch64_enc_ldar(dst, mem));
7634
7635 ins_pipe(pipe_serial);
7636 %}
7637
7638 // Load Compressed Pointer
7639 instruct loadN_volatile(iRegNNoSp dst, /* sync_memory*/indirect mem)
7640 %{
7641 match(Set dst (LoadN mem));
7642
7643 ins_cost(VOLATILE_REF_COST);
7644 format %{ "ldarw $dst, $mem\t# compressed ptr" %}
7645
7646 ins_encode(aarch64_enc_ldarw(dst, mem));
7647
7648 ins_pipe(pipe_serial);
8545
8546 match(Set res (CompareAndSwapL mem (Binary oldval newval)));
8547 ins_cost(2 * VOLATILE_REF_COST);
8548
8549 effect(KILL cr);
8550
8551 format %{
8552 "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
8553 "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8554 %}
8555
8556 ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
8557 aarch64_enc_cset_eq(res));
8558
8559 ins_pipe(pipe_slow);
8560 %}
8561
8562 instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8563
8564 match(Set res (CompareAndSwapP mem (Binary oldval newval)));
8565 predicate(n->as_LoadStore()->barrier_data() == 0);
8566 ins_cost(2 * VOLATILE_REF_COST);
8567
8568 effect(KILL cr);
8569
8570 format %{
8571 "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
8572 "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8573 %}
8574
8575 ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
8576 aarch64_enc_cset_eq(res));
8577
8578 ins_pipe(pipe_slow);
8579 %}
8580
8581 instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
8582
8583 match(Set res (CompareAndSwapN mem (Binary oldval newval)));
8584 ins_cost(2 * VOLATILE_REF_COST);
8585
8659
8660 predicate(needs_acquiring_load_exclusive(n));
8661 match(Set res (CompareAndSwapL mem (Binary oldval newval)));
8662 ins_cost(VOLATILE_REF_COST);
8663
8664 effect(KILL cr);
8665
8666 format %{
8667 "cmpxchg_acq $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
8668 "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8669 %}
8670
8671 ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
8672 aarch64_enc_cset_eq(res));
8673
8674 ins_pipe(pipe_slow);
8675 %}
8676
8677 instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8678
8679 predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
8680 match(Set res (CompareAndSwapP mem (Binary oldval newval)));
8681 ins_cost(VOLATILE_REF_COST);
8682
8683 effect(KILL cr);
8684
8685 format %{
8686 "cmpxchg_acq $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
8687 "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8688 %}
8689
8690 ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
8691 aarch64_enc_cset_eq(res));
8692
8693 ins_pipe(pipe_slow);
8694 %}
8695
8696 instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
8697
8698 predicate(needs_acquiring_load_exclusive(n));
8699 match(Set res (CompareAndSwapN mem (Binary oldval newval)));
8790 %}
8791 ins_pipe(pipe_slow);
8792 %}
8793
8794 instruct compareAndExchangeN(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
8795 match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
8796 ins_cost(2 * VOLATILE_REF_COST);
8797 effect(TEMP_DEF res, KILL cr);
8798 format %{
8799 "cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
8800 %}
8801 ins_encode %{
8802 __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8803 Assembler::word, /*acquire*/ false, /*release*/ true,
8804 /*weak*/ false, $res$$Register);
8805 %}
8806 ins_pipe(pipe_slow);
8807 %}
8808
8809 instruct compareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8810 predicate(n->as_LoadStore()->barrier_data() == 0);
8811 match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
8812 ins_cost(2 * VOLATILE_REF_COST);
8813 effect(TEMP_DEF res, KILL cr);
8814 format %{
8815 "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
8816 %}
8817 ins_encode %{
8818 __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8819 Assembler::xword, /*acquire*/ false, /*release*/ true,
8820 /*weak*/ false, $res$$Register);
8821 %}
8822 ins_pipe(pipe_slow);
8823 %}
8824
8825 instruct compareAndExchangeBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8826 predicate(needs_acquiring_load_exclusive(n));
8827 match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
8828 ins_cost(VOLATILE_REF_COST);
8829 effect(TEMP_DEF res, KILL cr);
8830 format %{
8890 %}
8891
8892
8893 instruct compareAndExchangeNAcq(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
8894 predicate(needs_acquiring_load_exclusive(n));
8895 match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
8896 ins_cost(VOLATILE_REF_COST);
8897 effect(TEMP_DEF res, KILL cr);
8898 format %{
8899 "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
8900 %}
8901 ins_encode %{
8902 __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8903 Assembler::word, /*acquire*/ true, /*release*/ true,
8904 /*weak*/ false, $res$$Register);
8905 %}
8906 ins_pipe(pipe_slow);
8907 %}
8908
8909 instruct compareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8910 predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
8911 match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
8912 ins_cost(VOLATILE_REF_COST);
8913 effect(TEMP_DEF res, KILL cr);
8914 format %{
8915 "cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
8916 %}
8917 ins_encode %{
8918 __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8919 Assembler::xword, /*acquire*/ true, /*release*/ true,
8920 /*weak*/ false, $res$$Register);
8921 %}
8922 ins_pipe(pipe_slow);
8923 %}
8924
8925 instruct weakCompareAndSwapB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8926 match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
8927 ins_cost(2 * VOLATILE_REF_COST);
8928 effect(KILL cr);
8929 format %{
8930 "cmpxchgb $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
8991 %}
8992
8993 instruct weakCompareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
8994 match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
8995 ins_cost(2 * VOLATILE_REF_COST);
8996 effect(KILL cr);
8997 format %{
8998 "cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
8999 "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9000 %}
9001 ins_encode %{
9002 __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9003 Assembler::word, /*acquire*/ false, /*release*/ true,
9004 /*weak*/ true, noreg);
9005 __ csetw($res$$Register, Assembler::EQ);
9006 %}
9007 ins_pipe(pipe_slow);
9008 %}
9009
9010 instruct weakCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
9011 predicate(n->as_LoadStore()->barrier_data() == 0);
9012 match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
9013 ins_cost(2 * VOLATILE_REF_COST);
9014 effect(KILL cr);
9015 format %{
9016 "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
9017 "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9018 %}
9019 ins_encode %{
9020 __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9021 Assembler::xword, /*acquire*/ false, /*release*/ true,
9022 /*weak*/ true, noreg);
9023 __ csetw($res$$Register, Assembler::EQ);
9024 %}
9025 ins_pipe(pipe_slow);
9026 %}
9027
9028 instruct weakCompareAndSwapBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
9029 predicate(needs_acquiring_load_exclusive(n));
9030 match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
9031 ins_cost(VOLATILE_REF_COST);
9099
9100 instruct weakCompareAndSwapNAcq(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
9101 predicate(needs_acquiring_load_exclusive(n));
9102 match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
9103 ins_cost(VOLATILE_REF_COST);
9104 effect(KILL cr);
9105 format %{
9106 "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
9107 "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9108 %}
9109 ins_encode %{
9110 __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9111 Assembler::word, /*acquire*/ true, /*release*/ true,
9112 /*weak*/ true, noreg);
9113 __ csetw($res$$Register, Assembler::EQ);
9114 %}
9115 ins_pipe(pipe_slow);
9116 %}
9117
9118 instruct weakCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
9119 match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
9120 predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
9121 ins_cost(VOLATILE_REF_COST);
9122 effect(KILL cr);
9123 format %{
9124 "cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
9125 "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9126 %}
9127 ins_encode %{
9128 __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9129 Assembler::xword, /*acquire*/ true, /*release*/ true,
9130 /*weak*/ true, noreg);
9131 __ csetw($res$$Register, Assembler::EQ);
9132 %}
9133 ins_pipe(pipe_slow);
9134 %}
9135
9136 // END This section of the file is automatically generated. Do not edit --------------
9137 // ---------------------------------------------------------------------
9138
9139 instruct get_and_setI(indirect mem, iRegI newv, iRegINoSp prev) %{
9140 match(Set prev (GetAndSetI mem newv));
9150 match(Set prev (GetAndSetL mem newv));
9151 ins_cost(2 * VOLATILE_REF_COST);
9152 format %{ "atomic_xchg $prev, $newv, [$mem]" %}
9153 ins_encode %{
9154 __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
9155 %}
9156 ins_pipe(pipe_serial);
9157 %}
9158
9159 instruct get_and_setN(indirect mem, iRegN newv, iRegINoSp prev) %{
9160 match(Set prev (GetAndSetN mem newv));
9161 ins_cost(2 * VOLATILE_REF_COST);
9162 format %{ "atomic_xchgw $prev, $newv, [$mem]" %}
9163 ins_encode %{
9164 __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
9165 %}
9166 ins_pipe(pipe_serial);
9167 %}
9168
9169 instruct get_and_setP(indirect mem, iRegP newv, iRegPNoSp prev) %{
9170 predicate(n->as_LoadStore()->barrier_data() == 0);
9171 match(Set prev (GetAndSetP mem newv));
9172 ins_cost(2 * VOLATILE_REF_COST);
9173 format %{ "atomic_xchg $prev, $newv, [$mem]" %}
9174 ins_encode %{
9175 __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
9176 %}
9177 ins_pipe(pipe_serial);
9178 %}
9179
9180 instruct get_and_setIAcq(indirect mem, iRegI newv, iRegINoSp prev) %{
9181 predicate(needs_acquiring_load_exclusive(n));
9182 match(Set prev (GetAndSetI mem newv));
9183 ins_cost(VOLATILE_REF_COST);
9184 format %{ "atomic_xchgw_acq $prev, $newv, [$mem]" %}
9185 ins_encode %{
9186 __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
9187 %}
9188 ins_pipe(pipe_serial);
9189 %}
9190
9194 ins_cost(VOLATILE_REF_COST);
9195 format %{ "atomic_xchg_acq $prev, $newv, [$mem]" %}
9196 ins_encode %{
9197 __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
9198 %}
9199 ins_pipe(pipe_serial);
9200 %}
9201
9202 instruct get_and_setNAcq(indirect mem, iRegN newv, iRegINoSp prev) %{
9203 predicate(needs_acquiring_load_exclusive(n));
9204 match(Set prev (GetAndSetN mem newv));
9205 ins_cost(VOLATILE_REF_COST);
9206 format %{ "atomic_xchgw_acq $prev, $newv, [$mem]" %}
9207 ins_encode %{
9208 __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
9209 %}
9210 ins_pipe(pipe_serial);
9211 %}
9212
9213 instruct get_and_setPAcq(indirect mem, iRegP newv, iRegPNoSp prev) %{
9214 predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
9215 match(Set prev (GetAndSetP mem newv));
9216 ins_cost(VOLATILE_REF_COST);
9217 format %{ "atomic_xchg_acq $prev, $newv, [$mem]" %}
9218 ins_encode %{
9219 __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
9220 %}
9221 ins_pipe(pipe_serial);
9222 %}
9223
9224
9225 instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr) %{
9226 match(Set newval (GetAndAddL mem incr));
9227 ins_cost(2 * VOLATILE_REF_COST + 1);
9228 format %{ "get_and_addL $newval, [$mem], $incr" %}
9229 ins_encode %{
9230 __ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
9231 %}
9232 ins_pipe(pipe_serial);
9233 %}
9234
|