< prev index next >

src/cpu/aarch64/vm/aarch64.ad

Print this page
rev 10850 : 8154537: AArch64: some integer rotate instructions are never emitted
Summary: some integer rotate rules in ad file can't be matched
Reviewed-by:
rev 10955 : undo
rev 10957 : 8154826: AArch64: take advantage better of base + shifted offset addressing mode
Summary: reshape address subtree to fit aarch64 addressing mode
Reviewed-by:
rev 10958 : more
rev 10959 : more
rev 10960 : more addressing
rev 10962 : 8155015: Aarch64: bad assert in spill generation code
Reviewed-by:


3060            "expected aligned-adjacent pairs");
3061   }
3062 
3063   if (src_lo == dst_lo && src_hi == dst_hi) {
3064     return 0;            // Self copy, no move.
3065   }
3066 
3067   bool is64 = (src_lo & 1) == 0 && src_lo + 1 == src_hi &&
3068               (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi;
3069   int src_offset = ra_->reg2offset(src_lo);
3070   int dst_offset = ra_->reg2offset(dst_lo);
3071 
3072   if (bottom_type()->isa_vect() != NULL) {
3073     uint ireg = ideal_reg();
3074     assert(ireg == Op_VecD || ireg == Op_VecX, "must be 64 bit or 128 bit vector");
3075     if (cbuf) {
3076       MacroAssembler _masm(cbuf);
3077       assert((src_lo_rc != rc_int && dst_lo_rc != rc_int), "sanity");
3078       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
3079         // stack->stack
3080         assert((src_offset & 7) && (dst_offset & 7), "unaligned stack offset");
3081         if (ireg == Op_VecD) {
3082           __ unspill(rscratch1, true, src_offset);
3083           __ spill(rscratch1, true, dst_offset);
3084         } else {
3085           __ spill_copy128(src_offset, dst_offset);
3086         }
3087       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
3088         __ mov(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3089                ireg == Op_VecD ? __ T8B : __ T16B,
3090                as_FloatRegister(Matcher::_regEncode[src_lo]));
3091       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
3092         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
3093                        ireg == Op_VecD ? __ D : __ Q,
3094                        ra_->reg2offset(dst_lo));
3095       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
3096         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3097                        ireg == Op_VecD ? __ D : __ Q,
3098                        ra_->reg2offset(src_lo));
3099       } else {
3100         ShouldNotReachHere();




3060            "expected aligned-adjacent pairs");
3061   }
3062 
3063   if (src_lo == dst_lo && src_hi == dst_hi) {
3064     return 0;            // Self copy, no move.
3065   }
3066 
3067   bool is64 = (src_lo & 1) == 0 && src_lo + 1 == src_hi &&
3068               (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi;
3069   int src_offset = ra_->reg2offset(src_lo);
3070   int dst_offset = ra_->reg2offset(dst_lo);
3071 
3072   if (bottom_type()->isa_vect() != NULL) {
3073     uint ireg = ideal_reg();
3074     assert(ireg == Op_VecD || ireg == Op_VecX, "must be 64 bit or 128 bit vector");
3075     if (cbuf) {
3076       MacroAssembler _masm(cbuf);
3077       assert((src_lo_rc != rc_int && dst_lo_rc != rc_int), "sanity");
3078       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
3079         // stack->stack
3080         assert((src_offset & 7) == 0 && (dst_offset & 7) == 0, "unaligned stack offset");
3081         if (ireg == Op_VecD) {
3082           __ unspill(rscratch1, true, src_offset);
3083           __ spill(rscratch1, true, dst_offset);
3084         } else {
3085           __ spill_copy128(src_offset, dst_offset);
3086         }
3087       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
3088         __ mov(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3089                ireg == Op_VecD ? __ T8B : __ T16B,
3090                as_FloatRegister(Matcher::_regEncode[src_lo]));
3091       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
3092         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
3093                        ireg == Op_VecD ? __ D : __ Q,
3094                        ra_->reg2offset(dst_lo));
3095       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
3096         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
3097                        ireg == Op_VecD ? __ D : __ Q,
3098                        ra_->reg2offset(src_lo));
3099       } else {
3100         ShouldNotReachHere();


< prev index next >