< prev index next >

src/hotspot/cpu/aarch64/aarch64.ad

Print this page
8248238: Adding Windows support to OpenJDK on AArch64

Summary: LP64 vs LLP64 changes to add Windows support

Contributed-by: Monica Beckwith <monica.beckwith@microsoft.com>, Ludovic Henry <luhenry@microsoft.com>
Reviewed-by:


1609     st->print("ldr  rscratch1, [guard]\n\t");
1610     st->print("dmb ishld\n\t");
1611     st->print("ldr  rscratch2, [rthread, #thread_disarmed_offset]\n\t");
1612     st->print("cmp  rscratch1, rscratch2\n\t");
1613     st->print("b.eq skip");
1614     st->print("\n\t");
1615     st->print("blr #nmethod_entry_barrier_stub\n\t");
1616     st->print("b skip\n\t");
1617     st->print("guard: int\n\t");
1618     st->print("\n\t");
1619     st->print("skip:\n\t");
1620   }
1621 }
1622 #endif
1623 
1624 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1625   Compile* C = ra_->C;
1626   C2_MacroAssembler _masm(&cbuf);
1627 
1628   // n.b. frame size includes space for return pc and rfp
1629   const long framesize = C->output()->frame_size_in_bytes();
1630   assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment");
1631 
1632   // insert a nop at the start of the prolog so we can patch in a
1633   // branch if we need to invalidate the method later
1634   __ nop();
1635 
1636   if (C->clinit_barrier_on_entry()) {
1637     assert(!C->method()->holder()->is_not_initialized(), "initialization should have been started");
1638 
1639     Label L_skip_barrier;
1640 
1641     __ mov_metadata(rscratch2, C->method()->holder()->constant_encoding());
1642     __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
1643     __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
1644     __ bind(L_skip_barrier);
1645   }
1646 
1647   int bangsize = C->output()->bang_size_in_bytes();
1648   if (C->output()->need_stack_bang(bangsize) && UseStackBanging)
1649     __ generate_stack_overflow_check(bangsize);


3101     int index = $mem$$index;
3102     int scale = $mem$$scale;
3103     int disp = $mem$$disp;
3104     if (index == -1) {
3105       __ prfm(Address(base, disp), PSTL1KEEP);
3106     } else {
3107       Register index_reg = as_Register(index);
3108       if (disp == 0) {
3109         __ prfm(Address(base, index_reg, Address::lsl(scale)), PSTL1KEEP);
3110       } else {
3111         __ lea(rscratch1, Address(base, disp));
3112         __ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PSTL1KEEP);
3113       }
3114     }
3115   %}
3116 
3117   /// mov envcodings
3118 
3119   enc_class aarch64_enc_movw_imm(iRegI dst, immI src) %{
3120     C2_MacroAssembler _masm(&cbuf);
3121     u_int32_t con = (u_int32_t)$src$$constant;
3122     Register dst_reg = as_Register($dst$$reg);
3123     if (con == 0) {
3124       __ movw(dst_reg, zr);
3125     } else {
3126       __ movw(dst_reg, con);
3127     }
3128   %}
3129 
3130   enc_class aarch64_enc_mov_imm(iRegL dst, immL src) %{
3131     C2_MacroAssembler _masm(&cbuf);
3132     Register dst_reg = as_Register($dst$$reg);
3133     u_int64_t con = (u_int64_t)$src$$constant;
3134     if (con == 0) {
3135       __ mov(dst_reg, zr);
3136     } else {
3137       __ mov(dst_reg, con);
3138     }
3139   %}
3140 
3141   enc_class aarch64_enc_mov_p(iRegP dst, immP src) %{
3142     C2_MacroAssembler _masm(&cbuf);
3143     Register dst_reg = as_Register($dst$$reg);
3144     address con = (address)$src$$constant;
3145     if (con == NULL || con == (address)1) {
3146       ShouldNotReachHere();
3147     } else {
3148       relocInfo::relocType rtype = $src->constant_reloc();
3149       if (rtype == relocInfo::oop_type) {
3150         __ movoop(dst_reg, (jobject)con, /*immediate*/true);
3151       } else if (rtype == relocInfo::metadata_type) {
3152         __ mov_metadata(dst_reg, (Metadata*)con);
3153       } else {
3154         assert(rtype == relocInfo::none, "unexpected reloc type");
3155         if (con < (address)(uintptr_t)os::vm_page_size()) {
3156           __ mov(dst_reg, con);
3157         } else {
3158           unsigned long offset;
3159           __ adrp(dst_reg, con, offset);
3160           __ add(dst_reg, dst_reg, offset);
3161         }
3162       }
3163     }
3164   %}
3165 
3166   enc_class aarch64_enc_mov_p0(iRegP dst, immP0 src) %{
3167     C2_MacroAssembler _masm(&cbuf);
3168     Register dst_reg = as_Register($dst$$reg);
3169     __ mov(dst_reg, zr);
3170   %}
3171 
3172   enc_class aarch64_enc_mov_p1(iRegP dst, immP_1 src) %{
3173     C2_MacroAssembler _masm(&cbuf);
3174     Register dst_reg = as_Register($dst$$reg);
3175     __ mov(dst_reg, (u_int64_t)1);
3176   %}
3177 
3178   enc_class aarch64_enc_mov_byte_map_base(iRegP dst, immByteMapBase src) %{
3179     C2_MacroAssembler _masm(&cbuf);
3180     __ load_byte_map_base($dst$$Register);
3181   %}
3182 
3183   enc_class aarch64_enc_mov_n(iRegN dst, immN src) %{
3184     C2_MacroAssembler _masm(&cbuf);
3185     Register dst_reg = as_Register($dst$$reg);
3186     address con = (address)$src$$constant;
3187     if (con == NULL) {
3188       ShouldNotReachHere();
3189     } else {
3190       relocInfo::relocType rtype = $src->constant_reloc();
3191       assert(rtype == relocInfo::oop_type, "unexpected reloc type");
3192       __ set_narrow_oop(dst_reg, (jobject)con);
3193     }
3194   %}
3195 


3280     C2_MacroAssembler _masm(&cbuf);
3281     Register reg1 = as_Register($src1$$reg);
3282     Register reg2 = as_Register($src2$$reg);
3283     __ cmpw(reg1, reg2);
3284   %}
3285 
3286   enc_class aarch64_enc_cmpw_imm_addsub(iRegI src1, immIAddSub src2) %{
3287     C2_MacroAssembler _masm(&cbuf);
3288     Register reg = as_Register($src1$$reg);
3289     int32_t val = $src2$$constant;
3290     if (val >= 0) {
3291       __ subsw(zr, reg, val);
3292     } else {
3293       __ addsw(zr, reg, -val);
3294     }
3295   %}
3296 
3297   enc_class aarch64_enc_cmpw_imm(iRegI src1, immI src2) %{
3298     C2_MacroAssembler _masm(&cbuf);
3299     Register reg1 = as_Register($src1$$reg);
3300     u_int32_t val = (u_int32_t)$src2$$constant;
3301     __ movw(rscratch1, val);
3302     __ cmpw(reg1, rscratch1);
3303   %}
3304 
3305   enc_class aarch64_enc_cmp(iRegL src1, iRegL src2) %{
3306     C2_MacroAssembler _masm(&cbuf);
3307     Register reg1 = as_Register($src1$$reg);
3308     Register reg2 = as_Register($src2$$reg);
3309     __ cmp(reg1, reg2);
3310   %}
3311 
3312   enc_class aarch64_enc_cmp_imm_addsub(iRegL src1, immL12 src2) %{
3313     C2_MacroAssembler _masm(&cbuf);
3314     Register reg = as_Register($src1$$reg);
3315     int64_t val = $src2$$constant;
3316     if (val >= 0) {
3317       __ subs(zr, reg, val);
3318     } else if (val != -val) {
3319       __ adds(zr, reg, -val);
3320     } else {
3321     // aargh, Long.MIN_VALUE is a special case
3322       __ orr(rscratch1, zr, (u_int64_t)val);
3323       __ subs(zr, reg, rscratch1);
3324     }
3325   %}
3326 
3327   enc_class aarch64_enc_cmp_imm(iRegL src1, immL src2) %{
3328     C2_MacroAssembler _masm(&cbuf);
3329     Register reg1 = as_Register($src1$$reg);
3330     u_int64_t val = (u_int64_t)$src2$$constant;
3331     __ mov(rscratch1, val);
3332     __ cmp(reg1, rscratch1);
3333   %}
3334 
3335   enc_class aarch64_enc_cmpp(iRegP src1, iRegP src2) %{
3336     C2_MacroAssembler _masm(&cbuf);
3337     Register reg1 = as_Register($src1$$reg);
3338     Register reg2 = as_Register($src2$$reg);
3339     __ cmp(reg1, reg2);
3340   %}
3341 
3342   enc_class aarch64_enc_cmpn(iRegN src1, iRegN src2) %{
3343     C2_MacroAssembler _masm(&cbuf);
3344     Register reg1 = as_Register($src1$$reg);
3345     Register reg2 = as_Register($src2$$reg);
3346     __ cmpw(reg1, reg2);
3347   %}
3348 
3349   enc_class aarch64_enc_testp(iRegP src) %{
3350     C2_MacroAssembler _masm(&cbuf);


4210   match(ConL);
4211 
4212   op_cost(0);
4213   format %{ %}
4214   interface(CONST_INTER);
4215 %}
4216 
4217 operand immLoffset16()
4218 %{
4219   predicate(Address::offset_ok_for_immed(n->get_long(), 4));
4220   match(ConL);
4221 
4222   op_cost(0);
4223   format %{ %}
4224   interface(CONST_INTER);
4225 %}
4226 
4227 // 32 bit integer valid for add sub immediate
4228 operand immIAddSub()
4229 %{
4230   predicate(Assembler::operand_valid_for_add_sub_immediate((long)n->get_int()));
4231   match(ConI);
4232   op_cost(0);
4233   format %{ %}
4234   interface(CONST_INTER);
4235 %}
4236 
4237 // 32 bit unsigned integer valid for logical immediate
4238 // TODO -- check this is right when e.g the mask is 0x80000000
4239 operand immILog()
4240 %{
4241   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/true, (unsigned long)n->get_int()));
4242   match(ConI);
4243 
4244   op_cost(0);
4245   format %{ %}
4246   interface(CONST_INTER);
4247 %}
4248 
4249 // Integer operands 64 bit
4250 // 64 bit immediate
4251 operand immL()
4252 %{
4253   match(ConL);
4254 
4255   op_cost(0);
4256   format %{ %}
4257   interface(CONST_INTER);
4258 %}
4259 
4260 // 64 bit zero
4261 operand immL0()


4299   match(ConL);
4300 
4301   op_cost(0);
4302   format %{ %}
4303   interface(CONST_INTER);
4304 %}
4305 
4306 // 64 bit integer valid for add sub immediate
4307 operand immLAddSub()
4308 %{
4309   predicate(Assembler::operand_valid_for_add_sub_immediate(n->get_long()));
4310   match(ConL);
4311   op_cost(0);
4312   format %{ %}
4313   interface(CONST_INTER);
4314 %}
4315 
4316 // 64 bit integer valid for logical immediate
4317 operand immLLog()
4318 %{
4319   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/false, (unsigned long)n->get_long()));
4320   match(ConL);
4321   op_cost(0);
4322   format %{ %}
4323   interface(CONST_INTER);
4324 %}
4325 
4326 // Long Immediate: low 32-bit mask
4327 operand immL_32bits()
4328 %{
4329   predicate(n->get_long() == 0xFFFFFFFFL);
4330   match(ConL);
4331   op_cost(0);
4332   format %{ %}
4333   interface(CONST_INTER);
4334 %}
4335 
4336 // Pointer operands
4337 // Pointer Immediate
4338 operand immP()
4339 %{


7095 
7096 // Load Int Constant
7097 instruct loadConI(iRegINoSp dst, immI src)
7098 %{
7099   match(Set dst src);
7100 
7101   ins_cost(INSN_COST);
7102   format %{ "mov $dst, $src\t# int" %}
7103 
7104   ins_encode( aarch64_enc_movw_imm(dst, src) );
7105 
7106   ins_pipe(ialu_imm);
7107 %}
7108 
7109 // Load Long Constant
7110 instruct loadConL(iRegLNoSp dst, immL src)
7111 %{
7112   match(Set dst src);
7113 
7114   ins_cost(INSN_COST);
7115   format %{ "mov $dst, $src\t# long" %}
7116 
7117   ins_encode( aarch64_enc_mov_imm(dst, src) );
7118 
7119   ins_pipe(ialu_imm);
7120 %}
7121 
7122 // Load Pointer Constant
7123 
7124 instruct loadConP(iRegPNoSp dst, immP con)
7125 %{
7126   match(Set dst con);
7127 
7128   ins_cost(INSN_COST * 4);
7129   format %{
7130     "mov  $dst, $con\t# ptr\n\t"
7131   %}
7132 
7133   ins_encode(aarch64_enc_mov_p(dst, con));
7134 
7135   ins_pipe(ialu_imm);


8231   ins_cost(VOLATILE_REF_COST*100);
8232 
8233   format %{ "membar_volatile\n\t"
8234              "dmb ish"%}
8235 
8236   ins_encode %{
8237     __ block_comment("membar_volatile");
8238     __ membar(Assembler::StoreLoad);
8239   %}
8240 
8241   ins_pipe(pipe_serial);
8242 %}
8243 
8244 // ============================================================================
8245 // Cast/Convert Instructions
8246 
8247 instruct castX2P(iRegPNoSp dst, iRegL src) %{
8248   match(Set dst (CastX2P src));
8249 
8250   ins_cost(INSN_COST);
8251   format %{ "mov $dst, $src\t# long -> ptr" %}
8252 
8253   ins_encode %{
8254     if ($dst$$reg != $src$$reg) {
8255       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
8256     }
8257   %}
8258 
8259   ins_pipe(ialu_reg);
8260 %}
8261 
8262 instruct castP2X(iRegLNoSp dst, iRegP src) %{
8263   match(Set dst (CastP2X src));
8264 
8265   ins_cost(INSN_COST);
8266   format %{ "mov $dst, $src\t# ptr -> long" %}
8267 
8268   ins_encode %{
8269     if ($dst$$reg != $src$$reg) {
8270       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
8271     }
8272   %}
8273 
8274   ins_pipe(ialu_reg);
8275 %}
8276 
8277 // Convert oop into int for vectors alignment masking
8278 instruct convP2I(iRegINoSp dst, iRegP src) %{
8279   match(Set dst (ConvL2I (CastP2X src)));
8280 
8281   ins_cost(INSN_COST);
8282   format %{ "movw $dst, $src\t# ptr -> int" %}
8283   ins_encode %{
8284     __ movw($dst$$Register, $src$$Register);
8285   %}
8286 


8584 
8585  format %{
8586     "cmpxchgw $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8587     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8588  %}
8589 
8590  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
8591             aarch64_enc_cset_eq(res));
8592 
8593   ins_pipe(pipe_slow);
8594 %}
8595 
8596 instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
8597 
8598   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
8599   ins_cost(2 * VOLATILE_REF_COST);
8600 
8601   effect(KILL cr);
8602 
8603  format %{
8604     "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
8605     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8606  %}
8607 
8608  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
8609             aarch64_enc_cset_eq(res));
8610 
8611   ins_pipe(pipe_slow);
8612 %}
8613 
8614 instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8615 
8616   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
8617   predicate(n->as_LoadStore()->barrier_data() == 0);
8618   ins_cost(2 * VOLATILE_REF_COST);
8619 
8620   effect(KILL cr);
8621 
8622  format %{
8623     "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
8624     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"


8699  format %{
8700     "cmpxchgw_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8701     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8702  %}
8703 
8704  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
8705             aarch64_enc_cset_eq(res));
8706 
8707   ins_pipe(pipe_slow);
8708 %}
8709 
8710 instruct compareAndSwapLAcq(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
8711 
8712   predicate(needs_acquiring_load_exclusive(n));
8713   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
8714   ins_cost(VOLATILE_REF_COST);
8715 
8716   effect(KILL cr);
8717 
8718  format %{
8719     "cmpxchg_acq $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
8720     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8721  %}
8722 
8723  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
8724             aarch64_enc_cset_eq(res));
8725 
8726   ins_pipe(pipe_slow);
8727 %}
8728 
8729 instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8730 
8731   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
8732   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
8733   ins_cost(VOLATILE_REF_COST);
8734 
8735   effect(KILL cr);
8736 
8737  format %{
8738     "cmpxchg_acq $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
8739     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"


8816 instruct compareAndExchangeI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8817   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
8818   ins_cost(2 * VOLATILE_REF_COST);
8819   effect(TEMP_DEF res, KILL cr);
8820   format %{
8821     "cmpxchgw $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
8822   %}
8823   ins_encode %{
8824     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8825                Assembler::word, /*acquire*/ false, /*release*/ true,
8826                /*weak*/ false, $res$$Register);
8827   %}
8828   ins_pipe(pipe_slow);
8829 %}
8830 
8831 instruct compareAndExchangeL(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
8832   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
8833   ins_cost(2 * VOLATILE_REF_COST);
8834   effect(TEMP_DEF res, KILL cr);
8835   format %{
8836     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
8837   %}
8838   ins_encode %{
8839     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8840                Assembler::xword, /*acquire*/ false, /*release*/ true,
8841                /*weak*/ false, $res$$Register);
8842   %}
8843   ins_pipe(pipe_slow);
8844 %}
8845 
8846 instruct compareAndExchangeN(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
8847   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
8848   ins_cost(2 * VOLATILE_REF_COST);
8849   effect(TEMP_DEF res, KILL cr);
8850   format %{
8851     "cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
8852   %}
8853   ins_encode %{
8854     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8855                Assembler::word, /*acquire*/ false, /*release*/ true,
8856                /*weak*/ false, $res$$Register);


8914   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
8915   ins_cost(VOLATILE_REF_COST);
8916   effect(TEMP_DEF res, KILL cr);
8917   format %{
8918     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
8919   %}
8920   ins_encode %{
8921     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8922                Assembler::word, /*acquire*/ true, /*release*/ true,
8923                /*weak*/ false, $res$$Register);
8924   %}
8925   ins_pipe(pipe_slow);
8926 %}
8927 
8928 instruct compareAndExchangeLAcq(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
8929   predicate(needs_acquiring_load_exclusive(n));
8930   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
8931   ins_cost(VOLATILE_REF_COST);
8932   effect(TEMP_DEF res, KILL cr);
8933   format %{
8934     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
8935   %}
8936   ins_encode %{
8937     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8938                Assembler::xword, /*acquire*/ true, /*release*/ true,
8939                /*weak*/ false, $res$$Register);
8940   %}
8941   ins_pipe(pipe_slow);
8942 %}
8943 
8944 
8945 instruct compareAndExchangeNAcq(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
8946   predicate(needs_acquiring_load_exclusive(n));
8947   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
8948   ins_cost(VOLATILE_REF_COST);
8949   effect(TEMP_DEF res, KILL cr);
8950   format %{
8951     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
8952   %}
8953   ins_encode %{
8954     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,


9013   ins_cost(2 * VOLATILE_REF_COST);
9014   effect(KILL cr);
9015   format %{
9016     "cmpxchgw $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
9017     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9018   %}
9019   ins_encode %{
9020     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9021                Assembler::word, /*acquire*/ false, /*release*/ true,
9022                /*weak*/ true, noreg);
9023     __ csetw($res$$Register, Assembler::EQ);
9024   %}
9025   ins_pipe(pipe_slow);
9026 %}
9027 
9028 instruct weakCompareAndSwapL(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
9029   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
9030   ins_cost(2 * VOLATILE_REF_COST);
9031   effect(KILL cr);
9032   format %{
9033     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
9034     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9035   %}
9036   ins_encode %{
9037     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9038                Assembler::xword, /*acquire*/ false, /*release*/ true,
9039                /*weak*/ true, noreg);
9040     __ csetw($res$$Register, Assembler::EQ);
9041   %}
9042   ins_pipe(pipe_slow);
9043 %}
9044 
9045 instruct weakCompareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
9046   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
9047   ins_cost(2 * VOLATILE_REF_COST);
9048   effect(KILL cr);
9049   format %{
9050     "cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
9051     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9052   %}
9053   ins_encode %{


9120   effect(KILL cr);
9121   format %{
9122     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
9123     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9124   %}
9125   ins_encode %{
9126     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9127                Assembler::word, /*acquire*/ true, /*release*/ true,
9128                /*weak*/ true, noreg);
9129     __ csetw($res$$Register, Assembler::EQ);
9130   %}
9131   ins_pipe(pipe_slow);
9132 %}
9133 
9134 instruct weakCompareAndSwapLAcq(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
9135   predicate(needs_acquiring_load_exclusive(n));
9136   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
9137   ins_cost(VOLATILE_REF_COST);
9138   effect(KILL cr);
9139   format %{
9140     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
9141     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9142   %}
9143   ins_encode %{
9144     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9145                Assembler::xword, /*acquire*/ true, /*release*/ true,
9146                /*weak*/ true, noreg);
9147     __ csetw($res$$Register, Assembler::EQ);
9148   %}
9149   ins_pipe(pipe_slow);
9150 %}
9151 
9152 instruct weakCompareAndSwapNAcq(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
9153   predicate(needs_acquiring_load_exclusive(n));
9154   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
9155   ins_cost(VOLATILE_REF_COST);
9156   effect(KILL cr);
9157   format %{
9158     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
9159     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9160   %}


9642   ins_cost(INSN_COST * 2);
9643   format %{ "csincw $dst, zr, zr $cmp\t# unsigned, int"  %}
9644 
9645   ins_encode %{
9646     // equivalently
9647     // cset(as_Register($dst$$reg),
9648     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
9649     __ csincw(as_Register($dst$$reg),
9650              zr,
9651              zr,
9652              (Assembler::Condition)$cmp$$cmpcode);
9653   %}
9654 
9655   ins_pipe(icond_none);
9656 %}
9657 
9658 instruct cmovL_reg_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
9659   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
9660 
9661   ins_cost(INSN_COST * 2);
9662   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, long"  %}
9663 
9664   ins_encode %{
9665     __ csel(as_Register($dst$$reg),
9666             as_Register($src2$$reg),
9667             as_Register($src1$$reg),
9668             (Assembler::Condition)$cmp$$cmpcode);
9669   %}
9670 
9671   ins_pipe(icond_reg_reg);
9672 %}
9673 
9674 instruct cmovUL_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
9675   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
9676 
9677   ins_cost(INSN_COST * 2);
9678   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, long"  %}
9679 
9680   ins_encode %{
9681     __ csel(as_Register($dst$$reg),
9682             as_Register($src2$$reg),
9683             as_Register($src1$$reg),
9684             (Assembler::Condition)$cmp$$cmpcode);
9685   %}
9686 
9687   ins_pipe(icond_reg_reg);
9688 %}
9689 
9690 // special cases where one arg is zero
9691 
9692 instruct cmovL_reg_zero(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
9693   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
9694 
9695   ins_cost(INSN_COST * 2);
9696   format %{ "csel $dst, zr, $src $cmp\t# signed, long"  %}
9697 
9698   ins_encode %{
9699     __ csel(as_Register($dst$$reg),
9700             zr,
9701             as_Register($src$$reg),
9702             (Assembler::Condition)$cmp$$cmpcode);
9703   %}
9704 
9705   ins_pipe(icond_reg);
9706 %}
9707 
9708 instruct cmovUL_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
9709   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
9710 
9711   ins_cost(INSN_COST * 2);
9712   format %{ "csel $dst, zr, $src $cmp\t# unsigned, long"  %}
9713 
9714   ins_encode %{
9715     __ csel(as_Register($dst$$reg),
9716             zr,
9717             as_Register($src$$reg),
9718             (Assembler::Condition)$cmp$$cmpcode);
9719   %}
9720 
9721   ins_pipe(icond_reg);
9722 %}
9723 
9724 instruct cmovL_zero_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
9725   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
9726 
9727   ins_cost(INSN_COST * 2);
9728   format %{ "csel $dst, $src, zr $cmp\t# signed, long"  %}
9729 
9730   ins_encode %{
9731     __ csel(as_Register($dst$$reg),
9732             as_Register($src$$reg),
9733             zr,
9734             (Assembler::Condition)$cmp$$cmpcode);
9735   %}
9736 
9737   ins_pipe(icond_reg);
9738 %}
9739 
9740 instruct cmovUL_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
9741   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
9742 
9743   ins_cost(INSN_COST * 2);
9744   format %{ "csel $dst, $src, zr $cmp\t# unsigned, long"  %}
9745 
9746   ins_encode %{
9747     __ csel(as_Register($dst$$reg),
9748             as_Register($src$$reg),
9749             zr,
9750             (Assembler::Condition)$cmp$$cmpcode);
9751   %}
9752 
9753   ins_pipe(icond_reg);
9754 %}
9755 
9756 instruct cmovP_reg_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
9757   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
9758 
9759   ins_cost(INSN_COST * 2);
9760   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, ptr"  %}
9761 
9762   ins_encode %{
9763     __ csel(as_Register($dst$$reg),
9764             as_Register($src2$$reg),


10269 instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr) %{
10270   match(Set dst (SubI zero src));
10271 
10272   ins_cost(INSN_COST);
10273   format %{ "negw $dst, $src\t# int" %}
10274 
10275   ins_encode %{
10276     __ negw(as_Register($dst$$reg),
10277             as_Register($src$$reg));
10278   %}
10279 
10280   ins_pipe(ialu_reg);
10281 %}
10282 
10283 // Long Negation
10284 
10285 instruct negL_reg(iRegLNoSp dst, iRegL src, immL0 zero, rFlagsReg cr) %{
10286   match(Set dst (SubL zero src));
10287 
10288   ins_cost(INSN_COST);
10289   format %{ "neg $dst, $src\t# long" %}
10290 
10291   ins_encode %{
10292     __ neg(as_Register($dst$$reg),
10293            as_Register($src$$reg));
10294   %}
10295 
10296   ins_pipe(ialu_reg);
10297 %}
10298 
10299 // Integer Multiply
10300 
10301 instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10302   match(Set dst (MulI src1 src2));
10303 
10304   ins_cost(INSN_COST * 3);
10305   format %{ "mulw  $dst, $src1, $src2" %}
10306 
10307   ins_encode %{
10308     __ mulw(as_Register($dst$$reg),
10309             as_Register($src1$$reg),


11903     int r = (rshift - lshift) & 31;
11904     __ ubfmw(as_Register($dst$$reg),
11905             as_Register($src$$reg),
11906             r, s);
11907   %}
11908 
11909   ins_pipe(ialu_reg_shift);
11910 %}
11911 // Bitfield extract with shift & mask
11912 
11913 instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
11914 %{
11915   match(Set dst (AndI (URShiftI src rshift) mask));
11916   // Make sure we are not going to exceed what ubfxw can do.
11917   predicate((exact_log2(n->in(2)->get_int() + 1) + (n->in(1)->in(2)->get_int() & 31)) <= (31 + 1));
11918 
11919   ins_cost(INSN_COST);
11920   format %{ "ubfxw $dst, $src, $rshift, $mask" %}
11921   ins_encode %{
11922     int rshift = $rshift$$constant & 31;
11923     long mask = $mask$$constant;
11924     int width = exact_log2(mask+1);
11925     __ ubfxw(as_Register($dst$$reg),
11926             as_Register($src$$reg), rshift, width);
11927   %}
11928   ins_pipe(ialu_reg_shift);
11929 %}
11930 instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask)
11931 %{
11932   match(Set dst (AndL (URShiftL src rshift) mask));
11933   // Make sure we are not going to exceed what ubfx can do.
11934   predicate((exact_log2_long(n->in(2)->get_long() + 1) + (n->in(1)->in(2)->get_int() & 63)) <= (63 + 1));
11935 
11936   ins_cost(INSN_COST);
11937   format %{ "ubfx $dst, $src, $rshift, $mask" %}
11938   ins_encode %{
11939     int rshift = $rshift$$constant & 63;
11940     long mask = $mask$$constant;
11941     int width = exact_log2_long(mask+1);
11942     __ ubfx(as_Register($dst$$reg),
11943             as_Register($src$$reg), rshift, width);
11944   %}
11945   ins_pipe(ialu_reg_shift);
11946 %}
11947 
11948 // We can use ubfx when extending an And with a mask when we know mask
11949 // is positive.  We know that because immI_bitmask guarantees it.
11950 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
11951 %{
11952   match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
11953   // Make sure we are not going to exceed what ubfxw can do.
11954   predicate((exact_log2(n->in(1)->in(2)->get_int() + 1) + (n->in(1)->in(1)->in(2)->get_int() & 31)) <= (31 + 1));
11955 
11956   ins_cost(INSN_COST * 2);
11957   format %{ "ubfx $dst, $src, $rshift, $mask" %}
11958   ins_encode %{
11959     int rshift = $rshift$$constant & 31;
11960     long mask = $mask$$constant;
11961     int width = exact_log2(mask+1);
11962     __ ubfx(as_Register($dst$$reg),
11963             as_Register($src$$reg), rshift, width);
11964   %}
11965   ins_pipe(ialu_reg_shift);
11966 %}
11967 
11968 // We can use ubfiz when masking by a positive number and then left shifting the result.
11969 // We know that the mask is positive because immI_bitmask guarantees it.
11970 instruct ubfizwI(iRegINoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
11971 %{
11972   match(Set dst (LShiftI (AndI src mask) lshift));
11973   predicate((exact_log2(n->in(1)->in(2)->get_int() + 1) + (n->in(2)->get_int() & 31)) <= (31 + 1));
11974 
11975   ins_cost(INSN_COST);
11976   format %{ "ubfizw $dst, $src, $lshift, $mask" %}
11977   ins_encode %{
11978     int lshift = $lshift$$constant & 31;
11979     long mask = $mask$$constant;
11980     int width = exact_log2(mask+1);
11981     __ ubfizw(as_Register($dst$$reg),
11982           as_Register($src$$reg), lshift, width);
11983   %}
11984   ins_pipe(ialu_reg_shift);
11985 %}
11986 // We can use ubfiz when masking by a positive number and then left shifting the result.
11987 // We know that the mask is positive because immL_bitmask guarantees it.
11988 instruct ubfizL(iRegLNoSp dst, iRegL src, immI lshift, immL_bitmask mask)
11989 %{
11990   match(Set dst (LShiftL (AndL src mask) lshift));
11991   predicate((exact_log2_long(n->in(1)->in(2)->get_long() + 1) + (n->in(2)->get_int() & 63)) <= (63 + 1));
11992 
11993   ins_cost(INSN_COST);
11994   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
11995   ins_encode %{
11996     int lshift = $lshift$$constant & 63;
11997     long mask = $mask$$constant;
11998     int width = exact_log2_long(mask+1);
11999     __ ubfiz(as_Register($dst$$reg),
12000           as_Register($src$$reg), lshift, width);
12001   %}
12002   ins_pipe(ialu_reg_shift);
12003 %}
12004 
12005 // If there is a convert I to L block between and AndI and a LShiftL, we can also match ubfiz
12006 instruct ubfizIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
12007 %{
12008   match(Set dst (LShiftL (ConvI2L (AndI src mask)) lshift));
12009   predicate((exact_log2(n->in(1)->in(1)->in(2)->get_int() + 1) + (n->in(2)->get_int() & 63)) <= (63 + 1));
12010 
12011   ins_cost(INSN_COST);
12012   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12013   ins_encode %{
12014     int lshift = $lshift$$constant & 63;
12015     long mask = $mask$$constant;
12016     int width = exact_log2(mask+1);
12017     __ ubfiz(as_Register($dst$$reg),
12018              as_Register($src$$reg), lshift, width);
12019   %}
12020   ins_pipe(ialu_reg_shift);
12021 %}
12022 
12023 // Rotations
12024 
12025 instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
12026 %{
12027   match(Set dst (OrL (LShiftL src1 lshift) (URShiftL src2 rshift)));
12028   predicate(0 == (((n->in(1)->in(2)->get_int() & 63) + (n->in(2)->in(2)->get_int() & 63)) & 63));
12029 
12030   ins_cost(INSN_COST);
12031   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12032 
12033   ins_encode %{
12034     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12035             $rshift$$constant & 63);


13232 
13233   ins_cost(INSN_COST);
13234   ins_encode %{
13235     __ andw(as_Register($dst$$reg),
13236             as_Register($src1$$reg),
13237             as_Register($src2$$reg));
13238   %}
13239 
13240   ins_pipe(ialu_reg_reg);
13241 %}
13242 
13243 instruct andI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2, rFlagsReg cr) %{
13244   match(Set dst (AndI src1 src2));
13245 
13246   format %{ "andsw  $dst, $src1, $src2\t# int" %}
13247 
13248   ins_cost(INSN_COST);
13249   ins_encode %{
13250     __ andw(as_Register($dst$$reg),
13251             as_Register($src1$$reg),
13252             (unsigned long)($src2$$constant));
13253   %}
13254 
13255   ins_pipe(ialu_reg_imm);
13256 %}
13257 
13258 // Or Instructions
13259 
13260 instruct orI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
13261   match(Set dst (OrI src1 src2));
13262 
13263   format %{ "orrw  $dst, $src1, $src2\t# int" %}
13264 
13265   ins_cost(INSN_COST);
13266   ins_encode %{
13267     __ orrw(as_Register($dst$$reg),
13268             as_Register($src1$$reg),
13269             as_Register($src2$$reg));
13270   %}
13271 
13272   ins_pipe(ialu_reg_reg);
13273 %}
13274 
13275 instruct orI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
13276   match(Set dst (OrI src1 src2));
13277 
13278   format %{ "orrw  $dst, $src1, $src2\t# int" %}
13279 
13280   ins_cost(INSN_COST);
13281   ins_encode %{
13282     __ orrw(as_Register($dst$$reg),
13283             as_Register($src1$$reg),
13284             (unsigned long)($src2$$constant));
13285   %}
13286 
13287   ins_pipe(ialu_reg_imm);
13288 %}
13289 
13290 // Xor Instructions
13291 
13292 instruct xorI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
13293   match(Set dst (XorI src1 src2));
13294 
13295   format %{ "eorw  $dst, $src1, $src2\t# int" %}
13296 
13297   ins_cost(INSN_COST);
13298   ins_encode %{
13299     __ eorw(as_Register($dst$$reg),
13300             as_Register($src1$$reg),
13301             as_Register($src2$$reg));
13302   %}
13303 
13304   ins_pipe(ialu_reg_reg);
13305 %}
13306 
13307 instruct xorI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
13308   match(Set dst (XorI src1 src2));
13309 
13310   format %{ "eorw  $dst, $src1, $src2\t# int" %}
13311 
13312   ins_cost(INSN_COST);
13313   ins_encode %{
13314     __ eorw(as_Register($dst$$reg),
13315             as_Register($src1$$reg),
13316             (unsigned long)($src2$$constant));
13317   %}
13318 
13319   ins_pipe(ialu_reg_imm);
13320 %}
13321 
13322 // Long Logical Instructions
13323 // TODO
13324 
13325 instruct andL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr) %{
13326   match(Set dst (AndL src1 src2));
13327 
13328   format %{ "and  $dst, $src1, $src2\t# int" %}
13329 
13330   ins_cost(INSN_COST);
13331   ins_encode %{
13332     __ andr(as_Register($dst$$reg),
13333             as_Register($src1$$reg),
13334             as_Register($src2$$reg));
13335   %}
13336 
13337   ins_pipe(ialu_reg_reg);
13338 %}
13339 
13340 instruct andL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2, rFlagsReg cr) %{
13341   match(Set dst (AndL src1 src2));
13342 
13343   format %{ "and  $dst, $src1, $src2\t# int" %}
13344 
13345   ins_cost(INSN_COST);
13346   ins_encode %{
13347     __ andr(as_Register($dst$$reg),
13348             as_Register($src1$$reg),
13349             (unsigned long)($src2$$constant));
13350   %}
13351 
13352   ins_pipe(ialu_reg_imm);
13353 %}
13354 
13355 // Or Instructions
13356 
13357 instruct orL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
13358   match(Set dst (OrL src1 src2));
13359 
13360   format %{ "orr  $dst, $src1, $src2\t# int" %}
13361 
13362   ins_cost(INSN_COST);
13363   ins_encode %{
13364     __ orr(as_Register($dst$$reg),
13365            as_Register($src1$$reg),
13366            as_Register($src2$$reg));
13367   %}
13368 
13369   ins_pipe(ialu_reg_reg);
13370 %}
13371 
13372 instruct orL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
13373   match(Set dst (OrL src1 src2));
13374 
13375   format %{ "orr  $dst, $src1, $src2\t# int" %}
13376 
13377   ins_cost(INSN_COST);
13378   ins_encode %{
13379     __ orr(as_Register($dst$$reg),
13380            as_Register($src1$$reg),
13381            (unsigned long)($src2$$constant));
13382   %}
13383 
13384   ins_pipe(ialu_reg_imm);
13385 %}
13386 
13387 // Xor Instructions
13388 
13389 instruct xorL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
13390   match(Set dst (XorL src1 src2));
13391 
13392   format %{ "eor  $dst, $src1, $src2\t# int" %}
13393 
13394   ins_cost(INSN_COST);
13395   ins_encode %{
13396     __ eor(as_Register($dst$$reg),
13397            as_Register($src1$$reg),
13398            as_Register($src2$$reg));
13399   %}
13400 
13401   ins_pipe(ialu_reg_reg);
13402 %}
13403 
13404 instruct xorL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
13405   match(Set dst (XorL src1 src2));
13406 
13407   ins_cost(INSN_COST);
13408   format %{ "eor  $dst, $src1, $src2\t# int" %}
13409 
13410   ins_encode %{
13411     __ eor(as_Register($dst$$reg),
13412            as_Register($src1$$reg),
13413            (unsigned long)($src2$$constant));
13414   %}
13415 
13416   ins_pipe(ialu_reg_imm);
13417 %}
13418 
13419 instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src)
13420 %{
13421   match(Set dst (ConvI2L src));
13422 
13423   ins_cost(INSN_COST);
13424   format %{ "sxtw  $dst, $src\t# i2l" %}
13425   ins_encode %{
13426     __ sbfm($dst$$Register, $src$$Register, 0, 31);
13427   %}
13428   ins_pipe(ialu_reg_shift);
13429 %}
13430 
13431 // this pattern occurs in bigmath arithmetic
13432 instruct convUI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask)
13433 %{


13842 // ============================================================================
13843 // clearing of an array
13844 
13845 instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
13846 %{
13847   match(Set dummy (ClearArray cnt base));
13848   effect(USE_KILL cnt, USE_KILL base);
13849 
13850   ins_cost(4 * INSN_COST);
13851   format %{ "ClearArray $cnt, $base" %}
13852 
13853   ins_encode %{
13854     __ zero_words($base$$Register, $cnt$$Register);
13855   %}
13856 
13857   ins_pipe(pipe_class_memory);
13858 %}
13859 
13860 instruct clearArray_imm_reg(immL cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
13861 %{
13862   predicate((u_int64_t)n->in(2)->get_long()
13863             < (u_int64_t)(BlockZeroingLowLimit >> LogBytesPerWord));
13864   match(Set dummy (ClearArray cnt base));
13865   effect(USE_KILL base);
13866 
13867   ins_cost(4 * INSN_COST);
13868   format %{ "ClearArray $cnt, $base" %}
13869 
13870   ins_encode %{
13871     __ zero_words($base$$Register, (u_int64_t)$cnt$$constant);
13872   %}
13873 
13874   ins_pipe(pipe_class_memory);
13875 %}
13876 
13877 // ============================================================================
13878 // Overflow Math Instructions
13879 
13880 instruct overflowAddI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13881 %{
13882   match(Set cr (OverflowAddI op1 op2));
13883 
13884   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
13885   ins_cost(INSN_COST);
13886   ins_encode %{
13887     __ cmnw($op1$$Register, $op2$$Register);
13888   %}
13889 
13890   ins_pipe(icmp_reg_reg);
13891 %}
13892 
13893 instruct overflowAddI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
13894 %{
13895   match(Set cr (OverflowAddI op1 op2));
13896 
13897   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
13898   ins_cost(INSN_COST);
13899   ins_encode %{
13900     __ cmnw($op1$$Register, $op2$$constant);
13901   %}
13902 
13903   ins_pipe(icmp_reg_imm);
13904 %}
13905 
13906 instruct overflowAddL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13907 %{
13908   match(Set cr (OverflowAddL op1 op2));
13909 
13910   format %{ "cmn   $op1, $op2\t# overflow check long" %}
13911   ins_cost(INSN_COST);
13912   ins_encode %{
13913     __ cmn($op1$$Register, $op2$$Register);
13914   %}
13915 
13916   ins_pipe(icmp_reg_reg);
13917 %}
13918 
13919 instruct overflowAddL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
13920 %{
13921   match(Set cr (OverflowAddL op1 op2));
13922 
13923   format %{ "cmn   $op1, $op2\t# overflow check long" %}
13924   ins_cost(INSN_COST);
13925   ins_encode %{
13926     __ cmn($op1$$Register, $op2$$constant);
13927   %}
13928 
13929   ins_pipe(icmp_reg_imm);
13930 %}
13931 
13932 instruct overflowSubI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13933 %{
13934   match(Set cr (OverflowSubI op1 op2));
13935 
13936   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
13937   ins_cost(INSN_COST);
13938   ins_encode %{
13939     __ cmpw($op1$$Register, $op2$$Register);
13940   %}
13941 
13942   ins_pipe(icmp_reg_reg);
13943 %}
13944 
13945 instruct overflowSubI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
13946 %{
13947   match(Set cr (OverflowSubI op1 op2));
13948 
13949   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
13950   ins_cost(INSN_COST);
13951   ins_encode %{
13952     __ cmpw($op1$$Register, $op2$$constant);
13953   %}
13954 
13955   ins_pipe(icmp_reg_imm);
13956 %}
13957 
13958 instruct overflowSubL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13959 %{
13960   match(Set cr (OverflowSubL op1 op2));
13961 
13962   format %{ "cmp   $op1, $op2\t# overflow check long" %}
13963   ins_cost(INSN_COST);
13964   ins_encode %{
13965     __ cmp($op1$$Register, $op2$$Register);
13966   %}
13967 
13968   ins_pipe(icmp_reg_reg);
13969 %}
13970 
13971 instruct overflowSubL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
13972 %{
13973   match(Set cr (OverflowSubL op1 op2));
13974 
13975   format %{ "cmp   $op1, $op2\t# overflow check long" %}
13976   ins_cost(INSN_COST);
13977   ins_encode %{
13978     __ subs(zr, $op1$$Register, $op2$$constant);
13979   %}
13980 
13981   ins_pipe(icmp_reg_imm);
13982 %}
13983 
13984 instruct overflowNegI_reg(rFlagsReg cr, immI0 zero, iRegIorL2I op1)
13985 %{
13986   match(Set cr (OverflowSubI zero op1));
13987 
13988   format %{ "cmpw  zr, $op1\t# overflow check int" %}
13989   ins_cost(INSN_COST);
13990   ins_encode %{
13991     __ cmpw(zr, $op1$$Register);
13992   %}
13993 
13994   ins_pipe(icmp_reg_imm);
13995 %}
13996 
13997 instruct overflowNegL_reg(rFlagsReg cr, immI0 zero, iRegL op1)
13998 %{
13999   match(Set cr (OverflowSubL zero op1));
14000 
14001   format %{ "cmp   zr, $op1\t# overflow check long" %}
14002   ins_cost(INSN_COST);
14003   ins_encode %{
14004     __ cmp(zr, $op1$$Register);
14005   %}
14006 
14007   ins_pipe(icmp_reg_imm);
14008 %}
14009 
14010 instruct overflowMulI_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
14011 %{
14012   match(Set cr (OverflowMulI op1 op2));
14013 
14014   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
14015             "cmp   rscratch1, rscratch1, sxtw\n\t"
14016             "movw  rscratch1, #0x80000000\n\t"
14017             "cselw rscratch1, rscratch1, zr, NE\n\t"
14018             "cmpw  rscratch1, #1" %}
14019   ins_cost(5 * INSN_COST);
14020   ins_encode %{
14021     __ smull(rscratch1, $op1$$Register, $op2$$Register);


14037 
14038   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
14039             "cmp   rscratch1, rscratch1, sxtw\n\t"
14040             "b$cmp   $labl" %}
14041   ins_cost(3 * INSN_COST); // Branch is rare so treat as INSN_COST
14042   ins_encode %{
14043     Label* L = $labl$$label;
14044     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14045     __ smull(rscratch1, $op1$$Register, $op2$$Register);
14046     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
14047     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
14048   %}
14049 
14050   ins_pipe(pipe_serial);
14051 %}
14052 
14053 instruct overflowMulL_reg(rFlagsReg cr, iRegL op1, iRegL op2)
14054 %{
14055   match(Set cr (OverflowMulL op1 op2));
14056 
14057   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
14058             "smulh rscratch2, $op1, $op2\n\t"
14059             "cmp   rscratch2, rscratch1, ASR #63\n\t"
14060             "movw  rscratch1, #0x80000000\n\t"
14061             "cselw rscratch1, rscratch1, zr, NE\n\t"
14062             "cmpw  rscratch1, #1" %}
14063   ins_cost(6 * INSN_COST);
14064   ins_encode %{
14065     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
14066     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
14067     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
14068     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
14069     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
14070     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
14071   %}
14072 
14073   ins_pipe(pipe_slow);
14074 %}
14075 
14076 instruct overflowMulL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, rFlagsReg cr)
14077 %{
14078   match(If cmp (OverflowMulL op1 op2));
14079   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
14080             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
14081   effect(USE labl, KILL cr);
14082 
14083   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
14084             "smulh rscratch2, $op1, $op2\n\t"
14085             "cmp   rscratch2, rscratch1, ASR #63\n\t"
14086             "b$cmp $labl" %}
14087   ins_cost(4 * INSN_COST); // Branch is rare so treat as INSN_COST
14088   ins_encode %{
14089     Label* L = $labl$$label;
14090     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14091     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
14092     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
14093     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
14094     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
14095   %}
14096 
14097   ins_pipe(pipe_serial);
14098 %}
14099 
14100 // ============================================================================
14101 // Compare Instructions
14102 
14103 instruct compI_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)


14827   format %{ "cb$cmp   $op1, $labl" %}
14828   ins_encode %{
14829     Label* L = $labl$$label;
14830     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14831     if (cond == Assembler::EQ || cond == Assembler::LS)
14832       __ cbz($op1$$Register, *L);
14833     else
14834       __ cbnz($op1$$Register, *L);
14835   %}
14836   ins_pipe(pipe_cmp_branch);
14837 %}
14838 
14839 // Test bit and Branch
14840 
14841 // Patterns for short (< 32KiB) variants
14842 instruct cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
14843   match(If cmp (CmpL op1 op2));
14844   effect(USE labl);
14845 
14846   ins_cost(BRANCH_COST);
14847   format %{ "cb$cmp   $op1, $labl # long" %}
14848   ins_encode %{
14849     Label* L = $labl$$label;
14850     Assembler::Condition cond =
14851       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14852     __ tbr(cond, $op1$$Register, 63, *L);
14853   %}
14854   ins_pipe(pipe_cmp_branch);
14855   ins_short_branch(1);
14856 %}
14857 
14858 instruct cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
14859   match(If cmp (CmpI op1 op2));
14860   effect(USE labl);
14861 
14862   ins_cost(BRANCH_COST);
14863   format %{ "cb$cmp   $op1, $labl # int" %}
14864   ins_encode %{
14865     Label* L = $labl$$label;
14866     Assembler::Condition cond =
14867       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;


14894   effect(USE labl);
14895 
14896   ins_cost(BRANCH_COST);
14897   format %{ "tb$cmp   $op1, $op2, $labl" %}
14898   ins_encode %{
14899     Label* L = $labl$$label;
14900     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14901     int bit = exact_log2((juint)$op2$$constant);
14902     __ tbr(cond, $op1$$Register, bit, *L);
14903   %}
14904   ins_pipe(pipe_cmp_branch);
14905   ins_short_branch(1);
14906 %}
14907 
14908 // And far variants
14909 instruct far_cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
14910   match(If cmp (CmpL op1 op2));
14911   effect(USE labl);
14912 
14913   ins_cost(BRANCH_COST);
14914   format %{ "cb$cmp   $op1, $labl # long" %}
14915   ins_encode %{
14916     Label* L = $labl$$label;
14917     Assembler::Condition cond =
14918       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14919     __ tbr(cond, $op1$$Register, 63, *L, /*far*/true);
14920   %}
14921   ins_pipe(pipe_cmp_branch);
14922 %}
14923 
14924 instruct far_cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
14925   match(If cmp (CmpI op1 op2));
14926   effect(USE labl);
14927 
14928   ins_cost(BRANCH_COST);
14929   format %{ "cb$cmp   $op1, $labl # int" %}
14930   ins_encode %{
14931     Label* L = $labl$$label;
14932     Assembler::Condition cond =
14933       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14934     __ tbr(cond, $op1$$Register, 31, *L, /*far*/true);


14959 
14960   ins_cost(BRANCH_COST);
14961   format %{ "tb$cmp   $op1, $op2, $labl" %}
14962   ins_encode %{
14963     Label* L = $labl$$label;
14964     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14965     int bit = exact_log2((juint)$op2$$constant);
14966     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
14967   %}
14968   ins_pipe(pipe_cmp_branch);
14969 %}
14970 
14971 // Test bits
14972 
14973 instruct cmpL_and(cmpOp cmp, iRegL op1, immL op2, immL0 op3, rFlagsReg cr) %{
14974   match(Set cr (CmpL (AndL op1 op2) op3));
14975   predicate(Assembler::operand_valid_for_logical_immediate
14976             (/*is_32*/false, n->in(1)->in(2)->get_long()));
14977 
14978   ins_cost(INSN_COST);
14979   format %{ "tst $op1, $op2 # long" %}
14980   ins_encode %{
14981     __ tst($op1$$Register, $op2$$constant);
14982   %}
14983   ins_pipe(ialu_reg_reg);
14984 %}
14985 
14986 instruct cmpI_and(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, rFlagsReg cr) %{
14987   match(Set cr (CmpI (AndI op1 op2) op3));
14988   predicate(Assembler::operand_valid_for_logical_immediate
14989             (/*is_32*/true, n->in(1)->in(2)->get_int()));
14990 
14991   ins_cost(INSN_COST);
14992   format %{ "tst $op1, $op2 # int" %}
14993   ins_encode %{
14994     __ tstw($op1$$Register, $op2$$constant);
14995   %}
14996   ins_pipe(ialu_reg_reg);
14997 %}
14998 
14999 instruct cmpL_and_reg(cmpOp cmp, iRegL op1, iRegL op2, immL0 op3, rFlagsReg cr) %{
15000   match(Set cr (CmpL (AndL op1 op2) op3));
15001 
15002   ins_cost(INSN_COST);
15003   format %{ "tst $op1, $op2 # long" %}
15004   ins_encode %{
15005     __ tst($op1$$Register, $op2$$Register);
15006   %}
15007   ins_pipe(ialu_reg_reg);
15008 %}
15009 
15010 instruct cmpI_and_reg(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, immI0 op3, rFlagsReg cr) %{
15011   match(Set cr (CmpI (AndI op1 op2) op3));
15012 
15013   ins_cost(INSN_COST);
15014   format %{ "tstw $op1, $op2 # int" %}
15015   ins_encode %{
15016     __ tstw($op1$$Register, $op2$$Register);
15017   %}
15018   ins_pipe(ialu_reg_reg);
15019 %}
15020 
15021 
15022 // Conditional Far Branch
15023 // Conditional Far Branch Unsigned




1609     st->print("ldr  rscratch1, [guard]\n\t");
1610     st->print("dmb ishld\n\t");
1611     st->print("ldr  rscratch2, [rthread, #thread_disarmed_offset]\n\t");
1612     st->print("cmp  rscratch1, rscratch2\n\t");
1613     st->print("b.eq skip");
1614     st->print("\n\t");
1615     st->print("blr #nmethod_entry_barrier_stub\n\t");
1616     st->print("b skip\n\t");
1617     st->print("guard: int\n\t");
1618     st->print("\n\t");
1619     st->print("skip:\n\t");
1620   }
1621 }
1622 #endif
1623 
1624 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1625   Compile* C = ra_->C;
1626   C2_MacroAssembler _masm(&cbuf);
1627 
1628   // n.b. frame size includes space for return pc and rfp
1629   const int64_t framesize = C->output()->frame_size_in_bytes();
1630   assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment");
1631 
1632   // insert a nop at the start of the prolog so we can patch in a
1633   // branch if we need to invalidate the method later
1634   __ nop();
1635 
1636   if (C->clinit_barrier_on_entry()) {
1637     assert(!C->method()->holder()->is_not_initialized(), "initialization should have been started");
1638 
1639     Label L_skip_barrier;
1640 
1641     __ mov_metadata(rscratch2, C->method()->holder()->constant_encoding());
1642     __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
1643     __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
1644     __ bind(L_skip_barrier);
1645   }
1646 
1647   int bangsize = C->output()->bang_size_in_bytes();
1648   if (C->output()->need_stack_bang(bangsize) && UseStackBanging)
1649     __ generate_stack_overflow_check(bangsize);


3101     int index = $mem$$index;
3102     int scale = $mem$$scale;
3103     int disp = $mem$$disp;
3104     if (index == -1) {
3105       __ prfm(Address(base, disp), PSTL1KEEP);
3106     } else {
3107       Register index_reg = as_Register(index);
3108       if (disp == 0) {
3109         __ prfm(Address(base, index_reg, Address::lsl(scale)), PSTL1KEEP);
3110       } else {
3111         __ lea(rscratch1, Address(base, disp));
3112         __ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PSTL1KEEP);
3113       }
3114     }
3115   %}
3116 
3117   /// mov envcodings
3118 
3119   enc_class aarch64_enc_movw_imm(iRegI dst, immI src) %{
3120     C2_MacroAssembler _masm(&cbuf);
3121     uint32_t con = (uint32_t)$src$$constant;
3122     Register dst_reg = as_Register($dst$$reg);
3123     if (con == 0) {
3124       __ movw(dst_reg, zr);
3125     } else {
3126       __ movw(dst_reg, con);
3127     }
3128   %}
3129 
3130   enc_class aarch64_enc_mov_imm(iRegL dst, immL src) %{
3131     C2_MacroAssembler _masm(&cbuf);
3132     Register dst_reg = as_Register($dst$$reg);
3133     uint64_t con = (uint64_t)$src$$constant;
3134     if (con == 0) {
3135       __ mov(dst_reg, zr);
3136     } else {
3137       __ mov(dst_reg, con);
3138     }
3139   %}
3140 
3141   enc_class aarch64_enc_mov_p(iRegP dst, immP src) %{
3142     C2_MacroAssembler _masm(&cbuf);
3143     Register dst_reg = as_Register($dst$$reg);
3144     address con = (address)$src$$constant;
3145     if (con == NULL || con == (address)1) {
3146       ShouldNotReachHere();
3147     } else {
3148       relocInfo::relocType rtype = $src->constant_reloc();
3149       if (rtype == relocInfo::oop_type) {
3150         __ movoop(dst_reg, (jobject)con, /*immediate*/true);
3151       } else if (rtype == relocInfo::metadata_type) {
3152         __ mov_metadata(dst_reg, (Metadata*)con);
3153       } else {
3154         assert(rtype == relocInfo::none, "unexpected reloc type");
3155         if (con < (address)(uintptr_t)os::vm_page_size()) {
3156           __ mov(dst_reg, con);
3157         } else {
3158           uint64_t offset;
3159           __ adrp(dst_reg, con, offset);
3160           __ add(dst_reg, dst_reg, offset);
3161         }
3162       }
3163     }
3164   %}
3165 
3166   enc_class aarch64_enc_mov_p0(iRegP dst, immP0 src) %{
3167     C2_MacroAssembler _masm(&cbuf);
3168     Register dst_reg = as_Register($dst$$reg);
3169     __ mov(dst_reg, zr);
3170   %}
3171 
3172   enc_class aarch64_enc_mov_p1(iRegP dst, immP_1 src) %{
3173     C2_MacroAssembler _masm(&cbuf);
3174     Register dst_reg = as_Register($dst$$reg);
3175     __ mov(dst_reg, (uint64_t)1);
3176   %}
3177 
3178   enc_class aarch64_enc_mov_byte_map_base(iRegP dst, immByteMapBase src) %{
3179     C2_MacroAssembler _masm(&cbuf);
3180     __ load_byte_map_base($dst$$Register);
3181   %}
3182 
3183   enc_class aarch64_enc_mov_n(iRegN dst, immN src) %{
3184     C2_MacroAssembler _masm(&cbuf);
3185     Register dst_reg = as_Register($dst$$reg);
3186     address con = (address)$src$$constant;
3187     if (con == NULL) {
3188       ShouldNotReachHere();
3189     } else {
3190       relocInfo::relocType rtype = $src->constant_reloc();
3191       assert(rtype == relocInfo::oop_type, "unexpected reloc type");
3192       __ set_narrow_oop(dst_reg, (jobject)con);
3193     }
3194   %}
3195 


3280     C2_MacroAssembler _masm(&cbuf);
3281     Register reg1 = as_Register($src1$$reg);
3282     Register reg2 = as_Register($src2$$reg);
3283     __ cmpw(reg1, reg2);
3284   %}
3285 
3286   enc_class aarch64_enc_cmpw_imm_addsub(iRegI src1, immIAddSub src2) %{
3287     C2_MacroAssembler _masm(&cbuf);
3288     Register reg = as_Register($src1$$reg);
3289     int32_t val = $src2$$constant;
3290     if (val >= 0) {
3291       __ subsw(zr, reg, val);
3292     } else {
3293       __ addsw(zr, reg, -val);
3294     }
3295   %}
3296 
3297   enc_class aarch64_enc_cmpw_imm(iRegI src1, immI src2) %{
3298     C2_MacroAssembler _masm(&cbuf);
3299     Register reg1 = as_Register($src1$$reg);
3300     uint32_t val = (uint32_t)$src2$$constant;
3301     __ movw(rscratch1, val);
3302     __ cmpw(reg1, rscratch1);
3303   %}
3304 
3305   enc_class aarch64_enc_cmp(iRegL src1, iRegL src2) %{
3306     C2_MacroAssembler _masm(&cbuf);
3307     Register reg1 = as_Register($src1$$reg);
3308     Register reg2 = as_Register($src2$$reg);
3309     __ cmp(reg1, reg2);
3310   %}
3311 
3312   enc_class aarch64_enc_cmp_imm_addsub(iRegL src1, immL12 src2) %{
3313     C2_MacroAssembler _masm(&cbuf);
3314     Register reg = as_Register($src1$$reg);
3315     int64_t val = $src2$$constant;
3316     if (val >= 0) {
3317       __ subs(zr, reg, val);
3318     } else if (val != -val) {
3319       __ adds(zr, reg, -val);
3320     } else {
3321     // aargh, Long.MIN_VALUE is a special case
3322       __ orr(rscratch1, zr, (uint64_t)val);
3323       __ subs(zr, reg, rscratch1);
3324     }
3325   %}
3326 
3327   enc_class aarch64_enc_cmp_imm(iRegL src1, immL src2) %{
3328     C2_MacroAssembler _masm(&cbuf);
3329     Register reg1 = as_Register($src1$$reg);
3330     uint64_t val = (uint64_t)$src2$$constant;
3331     __ mov(rscratch1, val);
3332     __ cmp(reg1, rscratch1);
3333   %}
3334 
3335   enc_class aarch64_enc_cmpp(iRegP src1, iRegP src2) %{
3336     C2_MacroAssembler _masm(&cbuf);
3337     Register reg1 = as_Register($src1$$reg);
3338     Register reg2 = as_Register($src2$$reg);
3339     __ cmp(reg1, reg2);
3340   %}
3341 
3342   enc_class aarch64_enc_cmpn(iRegN src1, iRegN src2) %{
3343     C2_MacroAssembler _masm(&cbuf);
3344     Register reg1 = as_Register($src1$$reg);
3345     Register reg2 = as_Register($src2$$reg);
3346     __ cmpw(reg1, reg2);
3347   %}
3348 
3349   enc_class aarch64_enc_testp(iRegP src) %{
3350     C2_MacroAssembler _masm(&cbuf);


4210   match(ConL);
4211 
4212   op_cost(0);
4213   format %{ %}
4214   interface(CONST_INTER);
4215 %}
4216 
4217 operand immLoffset16()
4218 %{
4219   predicate(Address::offset_ok_for_immed(n->get_long(), 4));
4220   match(ConL);
4221 
4222   op_cost(0);
4223   format %{ %}
4224   interface(CONST_INTER);
4225 %}
4226 
4227 // 32 bit integer valid for add sub immediate
4228 operand immIAddSub()
4229 %{
4230   predicate(Assembler::operand_valid_for_add_sub_immediate((int64_t)n->get_int()));
4231   match(ConI);
4232   op_cost(0);
4233   format %{ %}
4234   interface(CONST_INTER);
4235 %}
4236 
4237 // 32 bit unsigned integer valid for logical immediate
4238 // TODO -- check this is right when e.g the mask is 0x80000000
4239 operand immILog()
4240 %{
4241   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/true, (uint64_t)n->get_int()));
4242   match(ConI);
4243 
4244   op_cost(0);
4245   format %{ %}
4246   interface(CONST_INTER);
4247 %}
4248 
4249 // Integer operands 64 bit
4250 // 64 bit immediate
4251 operand immL()
4252 %{
4253   match(ConL);
4254 
4255   op_cost(0);
4256   format %{ %}
4257   interface(CONST_INTER);
4258 %}
4259 
4260 // 64 bit zero
4261 operand immL0()


4299   match(ConL);
4300 
4301   op_cost(0);
4302   format %{ %}
4303   interface(CONST_INTER);
4304 %}
4305 
4306 // 64 bit integer valid for add sub immediate
4307 operand immLAddSub()
4308 %{
4309   predicate(Assembler::operand_valid_for_add_sub_immediate(n->get_long()));
4310   match(ConL);
4311   op_cost(0);
4312   format %{ %}
4313   interface(CONST_INTER);
4314 %}
4315 
4316 // 64 bit integer valid for logical immediate
4317 operand immLLog()
4318 %{
4319   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/false, (uint64_t)n->get_long()));
4320   match(ConL);
4321   op_cost(0);
4322   format %{ %}
4323   interface(CONST_INTER);
4324 %}
4325 
4326 // Long Immediate: low 32-bit mask
4327 operand immL_32bits()
4328 %{
4329   predicate(n->get_long() == 0xFFFFFFFFL);
4330   match(ConL);
4331   op_cost(0);
4332   format %{ %}
4333   interface(CONST_INTER);
4334 %}
4335 
4336 // Pointer operands
4337 // Pointer Immediate
4338 operand immP()
4339 %{


7095 
7096 // Load Int Constant
7097 instruct loadConI(iRegINoSp dst, immI src)
7098 %{
7099   match(Set dst src);
7100 
7101   ins_cost(INSN_COST);
7102   format %{ "mov $dst, $src\t# int" %}
7103 
7104   ins_encode( aarch64_enc_movw_imm(dst, src) );
7105 
7106   ins_pipe(ialu_imm);
7107 %}
7108 
7109 // Load Long Constant
7110 instruct loadConL(iRegLNoSp dst, immL src)
7111 %{
7112   match(Set dst src);
7113 
7114   ins_cost(INSN_COST);
7115   format %{ "mov $dst, $src\t# int64_t" %}
7116 
7117   ins_encode( aarch64_enc_mov_imm(dst, src) );
7118 
7119   ins_pipe(ialu_imm);
7120 %}
7121 
7122 // Load Pointer Constant
7123 
7124 instruct loadConP(iRegPNoSp dst, immP con)
7125 %{
7126   match(Set dst con);
7127 
7128   ins_cost(INSN_COST * 4);
7129   format %{
7130     "mov  $dst, $con\t# ptr\n\t"
7131   %}
7132 
7133   ins_encode(aarch64_enc_mov_p(dst, con));
7134 
7135   ins_pipe(ialu_imm);


8231   ins_cost(VOLATILE_REF_COST*100);
8232 
8233   format %{ "membar_volatile\n\t"
8234              "dmb ish"%}
8235 
8236   ins_encode %{
8237     __ block_comment("membar_volatile");
8238     __ membar(Assembler::StoreLoad);
8239   %}
8240 
8241   ins_pipe(pipe_serial);
8242 %}
8243 
8244 // ============================================================================
8245 // Cast/Convert Instructions
8246 
8247 instruct castX2P(iRegPNoSp dst, iRegL src) %{
8248   match(Set dst (CastX2P src));
8249 
8250   ins_cost(INSN_COST);
8251   format %{ "mov $dst, $src\t# int64_t -> ptr" %}
8252 
8253   ins_encode %{
8254     if ($dst$$reg != $src$$reg) {
8255       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
8256     }
8257   %}
8258 
8259   ins_pipe(ialu_reg);
8260 %}
8261 
8262 instruct castP2X(iRegLNoSp dst, iRegP src) %{
8263   match(Set dst (CastP2X src));
8264 
8265   ins_cost(INSN_COST);
8266   format %{ "mov $dst, $src\t# ptr -> int64_t" %}
8267 
8268   ins_encode %{
8269     if ($dst$$reg != $src$$reg) {
8270       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
8271     }
8272   %}
8273 
8274   ins_pipe(ialu_reg);
8275 %}
8276 
8277 // Convert oop into int for vectors alignment masking
8278 instruct convP2I(iRegINoSp dst, iRegP src) %{
8279   match(Set dst (ConvL2I (CastP2X src)));
8280 
8281   ins_cost(INSN_COST);
8282   format %{ "movw $dst, $src\t# ptr -> int" %}
8283   ins_encode %{
8284     __ movw($dst$$Register, $src$$Register);
8285   %}
8286 


8584 
8585  format %{
8586     "cmpxchgw $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8587     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8588  %}
8589 
8590  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
8591             aarch64_enc_cset_eq(res));
8592 
8593   ins_pipe(pipe_slow);
8594 %}
8595 
8596 instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
8597 
8598   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
8599   ins_cost(2 * VOLATILE_REF_COST);
8600 
8601   effect(KILL cr);
8602 
8603  format %{
8604     "cmpxchg $mem, $oldval, $newval\t# (int64_t) if $mem == $oldval then $mem <-- $newval"
8605     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8606  %}
8607 
8608  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
8609             aarch64_enc_cset_eq(res));
8610 
8611   ins_pipe(pipe_slow);
8612 %}
8613 
8614 instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8615 
8616   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
8617   predicate(n->as_LoadStore()->barrier_data() == 0);
8618   ins_cost(2 * VOLATILE_REF_COST);
8619 
8620   effect(KILL cr);
8621 
8622  format %{
8623     "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
8624     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"


8699  format %{
8700     "cmpxchgw_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
8701     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8702  %}
8703 
8704  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
8705             aarch64_enc_cset_eq(res));
8706 
8707   ins_pipe(pipe_slow);
8708 %}
8709 
8710 instruct compareAndSwapLAcq(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
8711 
8712   predicate(needs_acquiring_load_exclusive(n));
8713   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
8714   ins_cost(VOLATILE_REF_COST);
8715 
8716   effect(KILL cr);
8717 
8718  format %{
8719     "cmpxchg_acq $mem, $oldval, $newval\t# (int64_t) if $mem == $oldval then $mem <-- $newval"
8720     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
8721  %}
8722 
8723  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
8724             aarch64_enc_cset_eq(res));
8725 
8726   ins_pipe(pipe_slow);
8727 %}
8728 
8729 instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
8730 
8731   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
8732   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
8733   ins_cost(VOLATILE_REF_COST);
8734 
8735   effect(KILL cr);
8736 
8737  format %{
8738     "cmpxchg_acq $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
8739     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"


8816 instruct compareAndExchangeI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
8817   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
8818   ins_cost(2 * VOLATILE_REF_COST);
8819   effect(TEMP_DEF res, KILL cr);
8820   format %{
8821     "cmpxchgw $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
8822   %}
8823   ins_encode %{
8824     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8825                Assembler::word, /*acquire*/ false, /*release*/ true,
8826                /*weak*/ false, $res$$Register);
8827   %}
8828   ins_pipe(pipe_slow);
8829 %}
8830 
8831 instruct compareAndExchangeL(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
8832   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
8833   ins_cost(2 * VOLATILE_REF_COST);
8834   effect(TEMP_DEF res, KILL cr);
8835   format %{
8836     "cmpxchg $res = $mem, $oldval, $newval\t# (int64_t, weak) if $mem == $oldval then $mem <-- $newval"
8837   %}
8838   ins_encode %{
8839     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8840                Assembler::xword, /*acquire*/ false, /*release*/ true,
8841                /*weak*/ false, $res$$Register);
8842   %}
8843   ins_pipe(pipe_slow);
8844 %}
8845 
8846 instruct compareAndExchangeN(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
8847   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
8848   ins_cost(2 * VOLATILE_REF_COST);
8849   effect(TEMP_DEF res, KILL cr);
8850   format %{
8851     "cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
8852   %}
8853   ins_encode %{
8854     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8855                Assembler::word, /*acquire*/ false, /*release*/ true,
8856                /*weak*/ false, $res$$Register);


8914   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
8915   ins_cost(VOLATILE_REF_COST);
8916   effect(TEMP_DEF res, KILL cr);
8917   format %{
8918     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
8919   %}
8920   ins_encode %{
8921     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8922                Assembler::word, /*acquire*/ true, /*release*/ true,
8923                /*weak*/ false, $res$$Register);
8924   %}
8925   ins_pipe(pipe_slow);
8926 %}
8927 
8928 instruct compareAndExchangeLAcq(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
8929   predicate(needs_acquiring_load_exclusive(n));
8930   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
8931   ins_cost(VOLATILE_REF_COST);
8932   effect(TEMP_DEF res, KILL cr);
8933   format %{
8934     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (int64_t, weak) if $mem == $oldval then $mem <-- $newval"
8935   %}
8936   ins_encode %{
8937     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
8938                Assembler::xword, /*acquire*/ true, /*release*/ true,
8939                /*weak*/ false, $res$$Register);
8940   %}
8941   ins_pipe(pipe_slow);
8942 %}
8943 
8944 
8945 instruct compareAndExchangeNAcq(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
8946   predicate(needs_acquiring_load_exclusive(n));
8947   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
8948   ins_cost(VOLATILE_REF_COST);
8949   effect(TEMP_DEF res, KILL cr);
8950   format %{
8951     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
8952   %}
8953   ins_encode %{
8954     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,


9013   ins_cost(2 * VOLATILE_REF_COST);
9014   effect(KILL cr);
9015   format %{
9016     "cmpxchgw $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
9017     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9018   %}
9019   ins_encode %{
9020     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9021                Assembler::word, /*acquire*/ false, /*release*/ true,
9022                /*weak*/ true, noreg);
9023     __ csetw($res$$Register, Assembler::EQ);
9024   %}
9025   ins_pipe(pipe_slow);
9026 %}
9027 
9028 instruct weakCompareAndSwapL(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
9029   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
9030   ins_cost(2 * VOLATILE_REF_COST);
9031   effect(KILL cr);
9032   format %{
9033     "cmpxchg $res = $mem, $oldval, $newval\t# (int64_t, weak) if $mem == $oldval then $mem <-- $newval"
9034     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9035   %}
9036   ins_encode %{
9037     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9038                Assembler::xword, /*acquire*/ false, /*release*/ true,
9039                /*weak*/ true, noreg);
9040     __ csetw($res$$Register, Assembler::EQ);
9041   %}
9042   ins_pipe(pipe_slow);
9043 %}
9044 
9045 instruct weakCompareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
9046   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
9047   ins_cost(2 * VOLATILE_REF_COST);
9048   effect(KILL cr);
9049   format %{
9050     "cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
9051     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9052   %}
9053   ins_encode %{


9120   effect(KILL cr);
9121   format %{
9122     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
9123     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9124   %}
9125   ins_encode %{
9126     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9127                Assembler::word, /*acquire*/ true, /*release*/ true,
9128                /*weak*/ true, noreg);
9129     __ csetw($res$$Register, Assembler::EQ);
9130   %}
9131   ins_pipe(pipe_slow);
9132 %}
9133 
9134 instruct weakCompareAndSwapLAcq(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
9135   predicate(needs_acquiring_load_exclusive(n));
9136   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
9137   ins_cost(VOLATILE_REF_COST);
9138   effect(KILL cr);
9139   format %{
9140     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (int64_t, weak) if $mem == $oldval then $mem <-- $newval"
9141     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9142   %}
9143   ins_encode %{
9144     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
9145                Assembler::xword, /*acquire*/ true, /*release*/ true,
9146                /*weak*/ true, noreg);
9147     __ csetw($res$$Register, Assembler::EQ);
9148   %}
9149   ins_pipe(pipe_slow);
9150 %}
9151 
9152 instruct weakCompareAndSwapNAcq(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
9153   predicate(needs_acquiring_load_exclusive(n));
9154   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
9155   ins_cost(VOLATILE_REF_COST);
9156   effect(KILL cr);
9157   format %{
9158     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
9159     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
9160   %}


9642   ins_cost(INSN_COST * 2);
9643   format %{ "csincw $dst, zr, zr $cmp\t# unsigned, int"  %}
9644 
9645   ins_encode %{
9646     // equivalently
9647     // cset(as_Register($dst$$reg),
9648     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
9649     __ csincw(as_Register($dst$$reg),
9650              zr,
9651              zr,
9652              (Assembler::Condition)$cmp$$cmpcode);
9653   %}
9654 
9655   ins_pipe(icond_none);
9656 %}
9657 
9658 instruct cmovL_reg_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
9659   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
9660 
9661   ins_cost(INSN_COST * 2);
9662   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, int64_t"  %}
9663 
9664   ins_encode %{
9665     __ csel(as_Register($dst$$reg),
9666             as_Register($src2$$reg),
9667             as_Register($src1$$reg),
9668             (Assembler::Condition)$cmp$$cmpcode);
9669   %}
9670 
9671   ins_pipe(icond_reg_reg);
9672 %}
9673 
9674 instruct cmovUL_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
9675   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
9676 
9677   ins_cost(INSN_COST * 2);
9678   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, int64_t"  %}
9679 
9680   ins_encode %{
9681     __ csel(as_Register($dst$$reg),
9682             as_Register($src2$$reg),
9683             as_Register($src1$$reg),
9684             (Assembler::Condition)$cmp$$cmpcode);
9685   %}
9686 
9687   ins_pipe(icond_reg_reg);
9688 %}
9689 
9690 // special cases where one arg is zero
9691 
9692 instruct cmovL_reg_zero(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
9693   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
9694 
9695   ins_cost(INSN_COST * 2);
9696   format %{ "csel $dst, zr, $src $cmp\t# signed, int64_t"  %}
9697 
9698   ins_encode %{
9699     __ csel(as_Register($dst$$reg),
9700             zr,
9701             as_Register($src$$reg),
9702             (Assembler::Condition)$cmp$$cmpcode);
9703   %}
9704 
9705   ins_pipe(icond_reg);
9706 %}
9707 
9708 instruct cmovUL_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
9709   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
9710 
9711   ins_cost(INSN_COST * 2);
9712   format %{ "csel $dst, zr, $src $cmp\t# unsigned, int64_t"  %}
9713 
9714   ins_encode %{
9715     __ csel(as_Register($dst$$reg),
9716             zr,
9717             as_Register($src$$reg),
9718             (Assembler::Condition)$cmp$$cmpcode);
9719   %}
9720 
9721   ins_pipe(icond_reg);
9722 %}
9723 
9724 instruct cmovL_zero_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
9725   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
9726 
9727   ins_cost(INSN_COST * 2);
9728   format %{ "csel $dst, $src, zr $cmp\t# signed, int64_t"  %}
9729 
9730   ins_encode %{
9731     __ csel(as_Register($dst$$reg),
9732             as_Register($src$$reg),
9733             zr,
9734             (Assembler::Condition)$cmp$$cmpcode);
9735   %}
9736 
9737   ins_pipe(icond_reg);
9738 %}
9739 
9740 instruct cmovUL_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
9741   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
9742 
9743   ins_cost(INSN_COST * 2);
9744   format %{ "csel $dst, $src, zr $cmp\t# unsigned, int64_t"  %}
9745 
9746   ins_encode %{
9747     __ csel(as_Register($dst$$reg),
9748             as_Register($src$$reg),
9749             zr,
9750             (Assembler::Condition)$cmp$$cmpcode);
9751   %}
9752 
9753   ins_pipe(icond_reg);
9754 %}
9755 
9756 instruct cmovP_reg_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
9757   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
9758 
9759   ins_cost(INSN_COST * 2);
9760   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, ptr"  %}
9761 
9762   ins_encode %{
9763     __ csel(as_Register($dst$$reg),
9764             as_Register($src2$$reg),


10269 instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr) %{
10270   match(Set dst (SubI zero src));
10271 
10272   ins_cost(INSN_COST);
10273   format %{ "negw $dst, $src\t# int" %}
10274 
10275   ins_encode %{
10276     __ negw(as_Register($dst$$reg),
10277             as_Register($src$$reg));
10278   %}
10279 
10280   ins_pipe(ialu_reg);
10281 %}
10282 
10283 // Long Negation
10284 
10285 instruct negL_reg(iRegLNoSp dst, iRegL src, immL0 zero, rFlagsReg cr) %{
10286   match(Set dst (SubL zero src));
10287 
10288   ins_cost(INSN_COST);
10289   format %{ "neg $dst, $src\t# int64_t" %}
10290 
10291   ins_encode %{
10292     __ neg(as_Register($dst$$reg),
10293            as_Register($src$$reg));
10294   %}
10295 
10296   ins_pipe(ialu_reg);
10297 %}
10298 
10299 // Integer Multiply
10300 
10301 instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10302   match(Set dst (MulI src1 src2));
10303 
10304   ins_cost(INSN_COST * 3);
10305   format %{ "mulw  $dst, $src1, $src2" %}
10306 
10307   ins_encode %{
10308     __ mulw(as_Register($dst$$reg),
10309             as_Register($src1$$reg),


11903     int r = (rshift - lshift) & 31;
11904     __ ubfmw(as_Register($dst$$reg),
11905             as_Register($src$$reg),
11906             r, s);
11907   %}
11908 
11909   ins_pipe(ialu_reg_shift);
11910 %}
11911 // Bitfield extract with shift & mask
11912 
11913 instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
11914 %{
11915   match(Set dst (AndI (URShiftI src rshift) mask));
11916   // Make sure we are not going to exceed what ubfxw can do.
11917   predicate((exact_log2(n->in(2)->get_int() + 1) + (n->in(1)->in(2)->get_int() & 31)) <= (31 + 1));
11918 
11919   ins_cost(INSN_COST);
11920   format %{ "ubfxw $dst, $src, $rshift, $mask" %}
11921   ins_encode %{
11922     int rshift = $rshift$$constant & 31;
11923     int64_t mask = $mask$$constant;
11924     int width = exact_log2(mask+1);
11925     __ ubfxw(as_Register($dst$$reg),
11926             as_Register($src$$reg), rshift, width);
11927   %}
11928   ins_pipe(ialu_reg_shift);
11929 %}
11930 instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask)
11931 %{
11932   match(Set dst (AndL (URShiftL src rshift) mask));
11933   // Make sure we are not going to exceed what ubfx can do.
11934   predicate((exact_log2_long(n->in(2)->get_long() + 1) + (n->in(1)->in(2)->get_int() & 63)) <= (63 + 1));
11935 
11936   ins_cost(INSN_COST);
11937   format %{ "ubfx $dst, $src, $rshift, $mask" %}
11938   ins_encode %{
11939     int rshift = $rshift$$constant & 63;
11940     int64_t mask = $mask$$constant;
11941     int width = exact_log2_long(mask+1);
11942     __ ubfx(as_Register($dst$$reg),
11943             as_Register($src$$reg), rshift, width);
11944   %}
11945   ins_pipe(ialu_reg_shift);
11946 %}
11947 
11948 // We can use ubfx when extending an And with a mask when we know mask
11949 // is positive.  We know that because immI_bitmask guarantees it.
11950 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
11951 %{
11952   match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
11953   // Make sure we are not going to exceed what ubfxw can do.
11954   predicate((exact_log2(n->in(1)->in(2)->get_int() + 1) + (n->in(1)->in(1)->in(2)->get_int() & 31)) <= (31 + 1));
11955 
11956   ins_cost(INSN_COST * 2);
11957   format %{ "ubfx $dst, $src, $rshift, $mask" %}
11958   ins_encode %{
11959     int rshift = $rshift$$constant & 31;
11960     int64_t mask = $mask$$constant;
11961     int width = exact_log2(mask+1);
11962     __ ubfx(as_Register($dst$$reg),
11963             as_Register($src$$reg), rshift, width);
11964   %}
11965   ins_pipe(ialu_reg_shift);
11966 %}
11967 
11968 // We can use ubfiz when masking by a positive number and then left shifting the result.
11969 // We know that the mask is positive because immI_bitmask guarantees it.
11970 instruct ubfizwI(iRegINoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
11971 %{
11972   match(Set dst (LShiftI (AndI src mask) lshift));
11973   predicate((exact_log2(n->in(1)->in(2)->get_int() + 1) + (n->in(2)->get_int() & 31)) <= (31 + 1));
11974 
11975   ins_cost(INSN_COST);
11976   format %{ "ubfizw $dst, $src, $lshift, $mask" %}
11977   ins_encode %{
11978     int lshift = $lshift$$constant & 31;
11979     int64_t mask = $mask$$constant;
11980     int width = exact_log2(mask+1);
11981     __ ubfizw(as_Register($dst$$reg),
11982           as_Register($src$$reg), lshift, width);
11983   %}
11984   ins_pipe(ialu_reg_shift);
11985 %}
11986 // We can use ubfiz when masking by a positive number and then left shifting the result.
11987 // We know that the mask is positive because immL_bitmask guarantees it.
11988 instruct ubfizL(iRegLNoSp dst, iRegL src, immI lshift, immL_bitmask mask)
11989 %{
11990   match(Set dst (LShiftL (AndL src mask) lshift));
11991   predicate((exact_log2_long(n->in(1)->in(2)->get_long() + 1) + (n->in(2)->get_int() & 63)) <= (63 + 1));
11992 
11993   ins_cost(INSN_COST);
11994   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
11995   ins_encode %{
11996     int lshift = $lshift$$constant & 63;
11997     int64_t mask = $mask$$constant;
11998     int width = exact_log2_long(mask+1);
11999     __ ubfiz(as_Register($dst$$reg),
12000           as_Register($src$$reg), lshift, width);
12001   %}
12002   ins_pipe(ialu_reg_shift);
12003 %}
12004 
12005 // If there is a convert I to L block between and AndI and a LShiftL, we can also match ubfiz
12006 instruct ubfizIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
12007 %{
12008   match(Set dst (LShiftL (ConvI2L (AndI src mask)) lshift));
12009   predicate((exact_log2(n->in(1)->in(1)->in(2)->get_int() + 1) + (n->in(2)->get_int() & 63)) <= (63 + 1));
12010 
12011   ins_cost(INSN_COST);
12012   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12013   ins_encode %{
12014     int lshift = $lshift$$constant & 63;
12015     int64_t mask = $mask$$constant;
12016     int width = exact_log2(mask+1);
12017     __ ubfiz(as_Register($dst$$reg),
12018              as_Register($src$$reg), lshift, width);
12019   %}
12020   ins_pipe(ialu_reg_shift);
12021 %}
12022 
12023 // Rotations
12024 
12025 instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
12026 %{
12027   match(Set dst (OrL (LShiftL src1 lshift) (URShiftL src2 rshift)));
12028   predicate(0 == (((n->in(1)->in(2)->get_int() & 63) + (n->in(2)->in(2)->get_int() & 63)) & 63));
12029 
12030   ins_cost(INSN_COST);
12031   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12032 
12033   ins_encode %{
12034     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12035             $rshift$$constant & 63);


13232 
13233   ins_cost(INSN_COST);
13234   ins_encode %{
13235     __ andw(as_Register($dst$$reg),
13236             as_Register($src1$$reg),
13237             as_Register($src2$$reg));
13238   %}
13239 
13240   ins_pipe(ialu_reg_reg);
13241 %}
13242 
13243 instruct andI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2, rFlagsReg cr) %{
13244   match(Set dst (AndI src1 src2));
13245 
13246   format %{ "andsw  $dst, $src1, $src2\t# int" %}
13247 
13248   ins_cost(INSN_COST);
13249   ins_encode %{
13250     __ andw(as_Register($dst$$reg),
13251             as_Register($src1$$reg),
13252             (uint64_t)($src2$$constant));
13253   %}
13254 
13255   ins_pipe(ialu_reg_imm);
13256 %}
13257 
13258 // Or Instructions
13259 
13260 instruct orI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
13261   match(Set dst (OrI src1 src2));
13262 
13263   format %{ "orrw  $dst, $src1, $src2\t# int" %}
13264 
13265   ins_cost(INSN_COST);
13266   ins_encode %{
13267     __ orrw(as_Register($dst$$reg),
13268             as_Register($src1$$reg),
13269             as_Register($src2$$reg));
13270   %}
13271 
13272   ins_pipe(ialu_reg_reg);
13273 %}
13274 
13275 instruct orI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
13276   match(Set dst (OrI src1 src2));
13277 
13278   format %{ "orrw  $dst, $src1, $src2\t# int" %}
13279 
13280   ins_cost(INSN_COST);
13281   ins_encode %{
13282     __ orrw(as_Register($dst$$reg),
13283             as_Register($src1$$reg),
13284             (uint64_t)($src2$$constant));
13285   %}
13286 
13287   ins_pipe(ialu_reg_imm);
13288 %}
13289 
13290 // Xor Instructions
13291 
13292 instruct xorI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
13293   match(Set dst (XorI src1 src2));
13294 
13295   format %{ "eorw  $dst, $src1, $src2\t# int" %}
13296 
13297   ins_cost(INSN_COST);
13298   ins_encode %{
13299     __ eorw(as_Register($dst$$reg),
13300             as_Register($src1$$reg),
13301             as_Register($src2$$reg));
13302   %}
13303 
13304   ins_pipe(ialu_reg_reg);
13305 %}
13306 
13307 instruct xorI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
13308   match(Set dst (XorI src1 src2));
13309 
13310   format %{ "eorw  $dst, $src1, $src2\t# int" %}
13311 
13312   ins_cost(INSN_COST);
13313   ins_encode %{
13314     __ eorw(as_Register($dst$$reg),
13315             as_Register($src1$$reg),
13316             (uint64_t)($src2$$constant));
13317   %}
13318 
13319   ins_pipe(ialu_reg_imm);
13320 %}
13321 
13322 // Long Logical Instructions
13323 // TODO
13324 
13325 instruct andL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr) %{
13326   match(Set dst (AndL src1 src2));
13327 
13328   format %{ "and  $dst, $src1, $src2\t# int" %}
13329 
13330   ins_cost(INSN_COST);
13331   ins_encode %{
13332     __ andr(as_Register($dst$$reg),
13333             as_Register($src1$$reg),
13334             as_Register($src2$$reg));
13335   %}
13336 
13337   ins_pipe(ialu_reg_reg);
13338 %}
13339 
13340 instruct andL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2, rFlagsReg cr) %{
13341   match(Set dst (AndL src1 src2));
13342 
13343   format %{ "and  $dst, $src1, $src2\t# int" %}
13344 
13345   ins_cost(INSN_COST);
13346   ins_encode %{
13347     __ andr(as_Register($dst$$reg),
13348             as_Register($src1$$reg),
13349             (uint64_t)($src2$$constant));
13350   %}
13351 
13352   ins_pipe(ialu_reg_imm);
13353 %}
13354 
13355 // Or Instructions
13356 
13357 instruct orL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
13358   match(Set dst (OrL src1 src2));
13359 
13360   format %{ "orr  $dst, $src1, $src2\t# int" %}
13361 
13362   ins_cost(INSN_COST);
13363   ins_encode %{
13364     __ orr(as_Register($dst$$reg),
13365            as_Register($src1$$reg),
13366            as_Register($src2$$reg));
13367   %}
13368 
13369   ins_pipe(ialu_reg_reg);
13370 %}
13371 
13372 instruct orL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
13373   match(Set dst (OrL src1 src2));
13374 
13375   format %{ "orr  $dst, $src1, $src2\t# int" %}
13376 
13377   ins_cost(INSN_COST);
13378   ins_encode %{
13379     __ orr(as_Register($dst$$reg),
13380            as_Register($src1$$reg),
13381            (uint64_t)($src2$$constant));
13382   %}
13383 
13384   ins_pipe(ialu_reg_imm);
13385 %}
13386 
13387 // Xor Instructions
13388 
13389 instruct xorL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
13390   match(Set dst (XorL src1 src2));
13391 
13392   format %{ "eor  $dst, $src1, $src2\t# int" %}
13393 
13394   ins_cost(INSN_COST);
13395   ins_encode %{
13396     __ eor(as_Register($dst$$reg),
13397            as_Register($src1$$reg),
13398            as_Register($src2$$reg));
13399   %}
13400 
13401   ins_pipe(ialu_reg_reg);
13402 %}
13403 
13404 instruct xorL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
13405   match(Set dst (XorL src1 src2));
13406 
13407   ins_cost(INSN_COST);
13408   format %{ "eor  $dst, $src1, $src2\t# int" %}
13409 
13410   ins_encode %{
13411     __ eor(as_Register($dst$$reg),
13412            as_Register($src1$$reg),
13413            (uint64_t)($src2$$constant));
13414   %}
13415 
13416   ins_pipe(ialu_reg_imm);
13417 %}
13418 
13419 instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src)
13420 %{
13421   match(Set dst (ConvI2L src));
13422 
13423   ins_cost(INSN_COST);
13424   format %{ "sxtw  $dst, $src\t# i2l" %}
13425   ins_encode %{
13426     __ sbfm($dst$$Register, $src$$Register, 0, 31);
13427   %}
13428   ins_pipe(ialu_reg_shift);
13429 %}
13430 
13431 // this pattern occurs in bigmath arithmetic
13432 instruct convUI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask)
13433 %{


13842 // ============================================================================
13843 // clearing of an array
13844 
13845 instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
13846 %{
13847   match(Set dummy (ClearArray cnt base));
13848   effect(USE_KILL cnt, USE_KILL base);
13849 
13850   ins_cost(4 * INSN_COST);
13851   format %{ "ClearArray $cnt, $base" %}
13852 
13853   ins_encode %{
13854     __ zero_words($base$$Register, $cnt$$Register);
13855   %}
13856 
13857   ins_pipe(pipe_class_memory);
13858 %}
13859 
13860 instruct clearArray_imm_reg(immL cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
13861 %{
13862   predicate((uint64_t)n->in(2)->get_long()
13863             < (uint64_t)(BlockZeroingLowLimit >> LogBytesPerWord));
13864   match(Set dummy (ClearArray cnt base));
13865   effect(USE_KILL base);
13866 
13867   ins_cost(4 * INSN_COST);
13868   format %{ "ClearArray $cnt, $base" %}
13869 
13870   ins_encode %{
13871     __ zero_words($base$$Register, (uint64_t)$cnt$$constant);
13872   %}
13873 
13874   ins_pipe(pipe_class_memory);
13875 %}
13876 
13877 // ============================================================================
13878 // Overflow Math Instructions
13879 
13880 instruct overflowAddI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13881 %{
13882   match(Set cr (OverflowAddI op1 op2));
13883 
13884   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
13885   ins_cost(INSN_COST);
13886   ins_encode %{
13887     __ cmnw($op1$$Register, $op2$$Register);
13888   %}
13889 
13890   ins_pipe(icmp_reg_reg);
13891 %}
13892 
13893 instruct overflowAddI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
13894 %{
13895   match(Set cr (OverflowAddI op1 op2));
13896 
13897   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
13898   ins_cost(INSN_COST);
13899   ins_encode %{
13900     __ cmnw($op1$$Register, $op2$$constant);
13901   %}
13902 
13903   ins_pipe(icmp_reg_imm);
13904 %}
13905 
13906 instruct overflowAddL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13907 %{
13908   match(Set cr (OverflowAddL op1 op2));
13909 
13910   format %{ "cmn   $op1, $op2\t# overflow check int64_t" %}
13911   ins_cost(INSN_COST);
13912   ins_encode %{
13913     __ cmn($op1$$Register, $op2$$Register);
13914   %}
13915 
13916   ins_pipe(icmp_reg_reg);
13917 %}
13918 
13919 instruct overflowAddL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
13920 %{
13921   match(Set cr (OverflowAddL op1 op2));
13922 
13923   format %{ "cmn   $op1, $op2\t# overflow check int64_t" %}
13924   ins_cost(INSN_COST);
13925   ins_encode %{
13926     __ cmn($op1$$Register, $op2$$constant);
13927   %}
13928 
13929   ins_pipe(icmp_reg_imm);
13930 %}
13931 
13932 instruct overflowSubI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
13933 %{
13934   match(Set cr (OverflowSubI op1 op2));
13935 
13936   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
13937   ins_cost(INSN_COST);
13938   ins_encode %{
13939     __ cmpw($op1$$Register, $op2$$Register);
13940   %}
13941 
13942   ins_pipe(icmp_reg_reg);
13943 %}
13944 
13945 instruct overflowSubI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
13946 %{
13947   match(Set cr (OverflowSubI op1 op2));
13948 
13949   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
13950   ins_cost(INSN_COST);
13951   ins_encode %{
13952     __ cmpw($op1$$Register, $op2$$constant);
13953   %}
13954 
13955   ins_pipe(icmp_reg_imm);
13956 %}
13957 
13958 instruct overflowSubL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
13959 %{
13960   match(Set cr (OverflowSubL op1 op2));
13961 
13962   format %{ "cmp   $op1, $op2\t# overflow check int64_t" %}
13963   ins_cost(INSN_COST);
13964   ins_encode %{
13965     __ cmp($op1$$Register, $op2$$Register);
13966   %}
13967 
13968   ins_pipe(icmp_reg_reg);
13969 %}
13970 
13971 instruct overflowSubL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
13972 %{
13973   match(Set cr (OverflowSubL op1 op2));
13974 
13975   format %{ "cmp   $op1, $op2\t# overflow check int64_t" %}
13976   ins_cost(INSN_COST);
13977   ins_encode %{
13978     __ subs(zr, $op1$$Register, $op2$$constant);
13979   %}
13980 
13981   ins_pipe(icmp_reg_imm);
13982 %}
13983 
13984 instruct overflowNegI_reg(rFlagsReg cr, immI0 zero, iRegIorL2I op1)
13985 %{
13986   match(Set cr (OverflowSubI zero op1));
13987 
13988   format %{ "cmpw  zr, $op1\t# overflow check int" %}
13989   ins_cost(INSN_COST);
13990   ins_encode %{
13991     __ cmpw(zr, $op1$$Register);
13992   %}
13993 
13994   ins_pipe(icmp_reg_imm);
13995 %}
13996 
13997 instruct overflowNegL_reg(rFlagsReg cr, immI0 zero, iRegL op1)
13998 %{
13999   match(Set cr (OverflowSubL zero op1));
14000 
14001   format %{ "cmp   zr, $op1\t# overflow check int64_t" %}
14002   ins_cost(INSN_COST);
14003   ins_encode %{
14004     __ cmp(zr, $op1$$Register);
14005   %}
14006 
14007   ins_pipe(icmp_reg_imm);
14008 %}
14009 
14010 instruct overflowMulI_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
14011 %{
14012   match(Set cr (OverflowMulI op1 op2));
14013 
14014   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
14015             "cmp   rscratch1, rscratch1, sxtw\n\t"
14016             "movw  rscratch1, #0x80000000\n\t"
14017             "cselw rscratch1, rscratch1, zr, NE\n\t"
14018             "cmpw  rscratch1, #1" %}
14019   ins_cost(5 * INSN_COST);
14020   ins_encode %{
14021     __ smull(rscratch1, $op1$$Register, $op2$$Register);


14037 
14038   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
14039             "cmp   rscratch1, rscratch1, sxtw\n\t"
14040             "b$cmp   $labl" %}
14041   ins_cost(3 * INSN_COST); // Branch is rare so treat as INSN_COST
14042   ins_encode %{
14043     Label* L = $labl$$label;
14044     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14045     __ smull(rscratch1, $op1$$Register, $op2$$Register);
14046     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
14047     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
14048   %}
14049 
14050   ins_pipe(pipe_serial);
14051 %}
14052 
14053 instruct overflowMulL_reg(rFlagsReg cr, iRegL op1, iRegL op2)
14054 %{
14055   match(Set cr (OverflowMulL op1 op2));
14056 
14057   format %{ "mul   rscratch1, $op1, $op2\t#overflow check int64_t\n\t"
14058             "smulh rscratch2, $op1, $op2\n\t"
14059             "cmp   rscratch2, rscratch1, ASR #63\n\t"
14060             "movw  rscratch1, #0x80000000\n\t"
14061             "cselw rscratch1, rscratch1, zr, NE\n\t"
14062             "cmpw  rscratch1, #1" %}
14063   ins_cost(6 * INSN_COST);
14064   ins_encode %{
14065     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
14066     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
14067     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
14068     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
14069     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
14070     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
14071   %}
14072 
14073   ins_pipe(pipe_slow);
14074 %}
14075 
14076 instruct overflowMulL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, rFlagsReg cr)
14077 %{
14078   match(If cmp (OverflowMulL op1 op2));
14079   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
14080             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
14081   effect(USE labl, KILL cr);
14082 
14083   format %{ "mul   rscratch1, $op1, $op2\t#overflow check int64_t\n\t"
14084             "smulh rscratch2, $op1, $op2\n\t"
14085             "cmp   rscratch2, rscratch1, ASR #63\n\t"
14086             "b$cmp $labl" %}
14087   ins_cost(4 * INSN_COST); // Branch is rare so treat as INSN_COST
14088   ins_encode %{
14089     Label* L = $labl$$label;
14090     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14091     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
14092     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
14093     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
14094     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
14095   %}
14096 
14097   ins_pipe(pipe_serial);
14098 %}
14099 
14100 // ============================================================================
14101 // Compare Instructions
14102 
14103 instruct compI_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)


14827   format %{ "cb$cmp   $op1, $labl" %}
14828   ins_encode %{
14829     Label* L = $labl$$label;
14830     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14831     if (cond == Assembler::EQ || cond == Assembler::LS)
14832       __ cbz($op1$$Register, *L);
14833     else
14834       __ cbnz($op1$$Register, *L);
14835   %}
14836   ins_pipe(pipe_cmp_branch);
14837 %}
14838 
14839 // Test bit and Branch
14840 
14841 // Patterns for short (< 32KiB) variants
14842 instruct cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
14843   match(If cmp (CmpL op1 op2));
14844   effect(USE labl);
14845 
14846   ins_cost(BRANCH_COST);
14847   format %{ "cb$cmp   $op1, $labl # int64_t" %}
14848   ins_encode %{
14849     Label* L = $labl$$label;
14850     Assembler::Condition cond =
14851       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14852     __ tbr(cond, $op1$$Register, 63, *L);
14853   %}
14854   ins_pipe(pipe_cmp_branch);
14855   ins_short_branch(1);
14856 %}
14857 
14858 instruct cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
14859   match(If cmp (CmpI op1 op2));
14860   effect(USE labl);
14861 
14862   ins_cost(BRANCH_COST);
14863   format %{ "cb$cmp   $op1, $labl # int" %}
14864   ins_encode %{
14865     Label* L = $labl$$label;
14866     Assembler::Condition cond =
14867       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;


14894   effect(USE labl);
14895 
14896   ins_cost(BRANCH_COST);
14897   format %{ "tb$cmp   $op1, $op2, $labl" %}
14898   ins_encode %{
14899     Label* L = $labl$$label;
14900     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14901     int bit = exact_log2((juint)$op2$$constant);
14902     __ tbr(cond, $op1$$Register, bit, *L);
14903   %}
14904   ins_pipe(pipe_cmp_branch);
14905   ins_short_branch(1);
14906 %}
14907 
14908 // And far variants
14909 instruct far_cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
14910   match(If cmp (CmpL op1 op2));
14911   effect(USE labl);
14912 
14913   ins_cost(BRANCH_COST);
14914   format %{ "cb$cmp   $op1, $labl # int64_t" %}
14915   ins_encode %{
14916     Label* L = $labl$$label;
14917     Assembler::Condition cond =
14918       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14919     __ tbr(cond, $op1$$Register, 63, *L, /*far*/true);
14920   %}
14921   ins_pipe(pipe_cmp_branch);
14922 %}
14923 
14924 instruct far_cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
14925   match(If cmp (CmpI op1 op2));
14926   effect(USE labl);
14927 
14928   ins_cost(BRANCH_COST);
14929   format %{ "cb$cmp   $op1, $labl # int" %}
14930   ins_encode %{
14931     Label* L = $labl$$label;
14932     Assembler::Condition cond =
14933       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
14934     __ tbr(cond, $op1$$Register, 31, *L, /*far*/true);


14959 
14960   ins_cost(BRANCH_COST);
14961   format %{ "tb$cmp   $op1, $op2, $labl" %}
14962   ins_encode %{
14963     Label* L = $labl$$label;
14964     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
14965     int bit = exact_log2((juint)$op2$$constant);
14966     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
14967   %}
14968   ins_pipe(pipe_cmp_branch);
14969 %}
14970 
14971 // Test bits
14972 
14973 instruct cmpL_and(cmpOp cmp, iRegL op1, immL op2, immL0 op3, rFlagsReg cr) %{
14974   match(Set cr (CmpL (AndL op1 op2) op3));
14975   predicate(Assembler::operand_valid_for_logical_immediate
14976             (/*is_32*/false, n->in(1)->in(2)->get_long()));
14977 
14978   ins_cost(INSN_COST);
14979   format %{ "tst $op1, $op2 # int64_t" %}
14980   ins_encode %{
14981     __ tst($op1$$Register, $op2$$constant);
14982   %}
14983   ins_pipe(ialu_reg_reg);
14984 %}
14985 
14986 instruct cmpI_and(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, rFlagsReg cr) %{
14987   match(Set cr (CmpI (AndI op1 op2) op3));
14988   predicate(Assembler::operand_valid_for_logical_immediate
14989             (/*is_32*/true, n->in(1)->in(2)->get_int()));
14990 
14991   ins_cost(INSN_COST);
14992   format %{ "tst $op1, $op2 # int" %}
14993   ins_encode %{
14994     __ tstw($op1$$Register, $op2$$constant);
14995   %}
14996   ins_pipe(ialu_reg_reg);
14997 %}
14998 
14999 instruct cmpL_and_reg(cmpOp cmp, iRegL op1, iRegL op2, immL0 op3, rFlagsReg cr) %{
15000   match(Set cr (CmpL (AndL op1 op2) op3));
15001 
15002   ins_cost(INSN_COST);
15003   format %{ "tst $op1, $op2 # int64_t" %}
15004   ins_encode %{
15005     __ tst($op1$$Register, $op2$$Register);
15006   %}
15007   ins_pipe(ialu_reg_reg);
15008 %}
15009 
15010 instruct cmpI_and_reg(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, immI0 op3, rFlagsReg cr) %{
15011   match(Set cr (CmpI (AndI op1 op2) op3));
15012 
15013   ins_cost(INSN_COST);
15014   format %{ "tstw $op1, $op2 # int" %}
15015   ins_encode %{
15016     __ tstw($op1$$Register, $op2$$Register);
15017   %}
15018   ins_pipe(ialu_reg_reg);
15019 %}
15020 
15021 
15022 // Conditional Far Branch
15023 // Conditional Far Branch Unsigned


< prev index next >