1 /*
   2  * Copyright (c) 2008, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef CPU_ARM_VM_ASSEMBLER_ARM_HPP
  26 #define CPU_ARM_VM_ASSEMBLER_ARM_HPP
  27 
  28 #include "utilities/macros.hpp"
  29 
  30 enum AsmCondition {
  31   eq, ne, cs, cc, mi, pl, vs, vc,
  32   hi, ls, ge, lt, gt, le, al, nv,
  33   number_of_conditions,
  34   // alternative names
  35   hs = cs,
  36   lo = cc
  37 };
  38 
  39 enum AsmShift {
  40   lsl, lsr, asr, ror
  41 };
  42 
  43 #ifdef AARCH64
  44 enum AsmExtendOp {
  45   ex_uxtb, ex_uxth, ex_uxtw, ex_uxtx,
  46   ex_sxtb, ex_sxth, ex_sxtw, ex_sxtx,
  47 
  48   ex_lsl = ex_uxtx
  49 };
  50 #endif
  51 
  52 enum AsmOffset {
  53 #ifdef AARCH64
  54   basic_offset = 0b00,
  55   pre_indexed  = 0b11,
  56   post_indexed = 0b01
  57 #else
  58   basic_offset = 1 << 24,
  59   pre_indexed  = 1 << 24 | 1 << 21,
  60   post_indexed = 0
  61 #endif
  62 };
  63 
  64 
  65 #ifndef AARCH64
  66 enum AsmWriteback {
  67   no_writeback,
  68   writeback
  69 };
  70 
  71 enum AsmOffsetOp {
  72   sub_offset = 0,
  73   add_offset = 1
  74 };
  75 #endif
  76 
  77 
  78 // ARM Addressing Modes 2 and 3 - Load and store
  79 class Address VALUE_OBJ_CLASS_SPEC {
  80  private:
  81   Register  _base;
  82   Register  _index;
  83   int       _disp;
  84   AsmOffset _mode;
  85   RelocationHolder   _rspec;
  86   int       _shift_imm;
  87 #ifdef AARCH64
  88   AsmExtendOp _extend;
  89 #else
  90   AsmShift  _shift;
  91   AsmOffsetOp _offset_op;
  92 
  93   static inline int abs(int x) { return x < 0 ? -x : x; }
  94   static inline int up (int x) { return x < 0 ?  0 : 1; }
  95 #endif
  96 
  97 #ifdef AARCH64
  98   static const AsmExtendOp LSL = ex_lsl;
  99 #else
 100   static const AsmShift LSL = lsl;
 101 #endif
 102 
 103  public:
 104   Address() : _base(noreg) {}
 105 
 106   Address(Register rn, int offset = 0, AsmOffset mode = basic_offset) {
 107     _base = rn;
 108     _index = noreg;
 109     _disp = offset;
 110     _mode = mode;
 111     _shift_imm = 0;
 112 #ifdef AARCH64
 113     _extend = ex_lsl;
 114 #else
 115     _shift = lsl;
 116     _offset_op = add_offset;
 117 #endif
 118   }
 119 
 120 #ifdef ASSERT
 121   Address(Register rn, ByteSize offset, AsmOffset mode = basic_offset) {
 122     _base = rn;
 123     _index = noreg;
 124     _disp = in_bytes(offset);
 125     _mode = mode;
 126     _shift_imm = 0;
 127 #ifdef AARCH64
 128     _extend = ex_lsl;
 129 #else
 130     _shift = lsl;
 131     _offset_op = add_offset;
 132 #endif
 133   }
 134 #endif
 135 
 136 #ifdef AARCH64
 137   Address(Register rn, Register rm, AsmExtendOp extend = ex_lsl, int shift_imm = 0) {
 138     assert ((extend == ex_uxtw) || (extend == ex_lsl) || (extend == ex_sxtw) || (extend == ex_sxtx), "invalid extend for address mode");
 139     assert ((0 <= shift_imm) && (shift_imm <= 4), "shift amount is out of range");
 140     _base = rn;
 141     _index = rm;
 142     _disp = 0;
 143     _mode = basic_offset;
 144     _extend = extend;
 145     _shift_imm = shift_imm;
 146   }
 147 #else
 148   Address(Register rn, Register rm, AsmShift shift = lsl,
 149           int shift_imm = 0, AsmOffset mode = basic_offset,
 150           AsmOffsetOp offset_op = add_offset) {
 151     _base = rn;
 152     _index = rm;
 153     _disp = 0;
 154     _shift = shift;
 155     _shift_imm = shift_imm;
 156     _mode = mode;
 157     _offset_op = offset_op;
 158   }
 159 
 160   Address(Register rn, RegisterOrConstant offset, AsmShift shift = lsl,
 161           int shift_imm = 0) {
 162     _base = rn;
 163     if (offset.is_constant()) {
 164       _index = noreg;
 165       {
 166         int off = (int) offset.as_constant();
 167         if (shift_imm != 0) {
 168           assert(shift == lsl,"shift not yet encoded");
 169           off =  off << shift_imm;
 170         }
 171         _disp = off;
 172       }
 173       _shift = lsl;
 174       _shift_imm = 0;
 175     } else {
 176       _index = offset.as_register();
 177       _disp = 0;
 178       _shift = shift;
 179       _shift_imm = shift_imm;
 180     }
 181     _mode = basic_offset;
 182     _offset_op = add_offset;
 183   }
 184 #endif // AARCH64
 185 
 186   // [base + index * wordSize]
 187   static Address indexed_ptr(Register base, Register index) {
 188     return Address(base, index, LSL, LogBytesPerWord);
 189   }
 190 
 191   // [base + index * BytesPerInt]
 192   static Address indexed_32(Register base, Register index) {
 193     return Address(base, index, LSL, LogBytesPerInt);
 194   }
 195 
 196   // [base + index * BytesPerHeapOop]
 197   static Address indexed_oop(Register base, Register index) {
 198     return Address(base, index, LSL, LogBytesPerHeapOop);
 199   }
 200 
 201   Address plus_disp(int disp) const {
 202     assert((disp == 0) || (_index == noreg),"can't apply an offset to a register indexed address");
 203     Address a = (*this);
 204     a._disp += disp;
 205     return a;
 206   }
 207 
 208   Address rebase(Register new_base) const {
 209     Address a = (*this);
 210     a._base = new_base;
 211     return a;
 212   }
 213 
 214 #ifdef AARCH64
 215   int encoding_simd() const {
 216     assert(_index != SP, "encoding constraint");
 217     assert(_disp == 0 || _mode == post_indexed,  "encoding constraint");
 218     assert(_index == noreg || _mode == basic_offset, "encoding constraint");
 219     assert(_mode == basic_offset || _mode == post_indexed, "encoding constraint");
 220     assert(_extend == ex_lsl, "encoding constraint");
 221     int index;
 222     if (_index == noreg) {
 223       if (_mode == post_indexed)
 224         index = 0b100 << 5 | 31;
 225       else
 226         index = 0;
 227     } else {
 228       index = 0b100 << 5 | _index->encoding();
 229     }
 230     return index << 16 | _base->encoding_with_sp() << 5;
 231   }
 232 #else /* !AARCH64 */
 233   int encoding2() const {
 234     assert(_mode == basic_offset || _base != PC, "unpredictable instruction");
 235     if (_index == noreg) {
 236       assert(-4096 < _disp && _disp < 4096, "encoding constraint");
 237       return _mode | up(_disp) << 23 | _base->encoding() << 16 | abs(_disp);
 238     } else {
 239       assert(_index != PC && (_mode == basic_offset || _index != _base), "unpredictable instruction");
 240       assert(_disp == 0 && (_shift_imm >> 5) == 0, "encoding constraint");
 241       return 1 << 25 | _offset_op << 23 | _mode | _base->encoding() << 16 |
 242              _shift_imm << 7 | _shift << 5 | _index->encoding();
 243     }
 244   }
 245 
 246   int encoding3() const {
 247     assert(_mode == basic_offset || _base != PC, "unpredictable instruction");
 248     if (_index == noreg) {
 249       assert(-256 < _disp && _disp < 256, "encoding constraint");
 250       return _mode | up(_disp) << 23 | 1 << 22 | _base->encoding() << 16 |
 251              (abs(_disp) & 0xf0) << 4 | abs(_disp) & 0x0f;
 252     } else {
 253       assert(_index != PC && (_mode == basic_offset || _index != _base), "unpredictable instruction");
 254       assert(_disp == 0 && _shift == lsl && _shift_imm == 0, "encoding constraint");
 255       return _mode | _offset_op << 23 | _base->encoding() << 16 | _index->encoding();
 256     }
 257   }
 258 
 259   int encoding_ex() const {
 260     assert(_index == noreg && _disp == 0 && _mode == basic_offset &&
 261            _base != PC, "encoding constraint");
 262     return _base->encoding() << 16;
 263   }
 264 
 265   int encoding_vfp() const {
 266     assert(_index == noreg && _mode == basic_offset, "encoding constraint");
 267     assert(-1024 < _disp && _disp < 1024 && (_disp & 3) == 0, "encoding constraint");
 268     return _base->encoding() << 16 | up(_disp) << 23 | abs(_disp) >> 2;
 269   }
 270 
 271   int encoding_simd() const {
 272     assert(_base != PC, "encoding constraint");
 273     assert(_index != PC && _index != SP, "encoding constraint");
 274     assert(_disp == 0, "encoding constraint");
 275     assert(_shift == 0, "encoding constraint");
 276     assert(_index == noreg || _mode == basic_offset, "encoding constraint");
 277     assert(_mode == basic_offset || _mode == post_indexed, "encoding constraint");
 278     int index;
 279     if (_index == noreg) {
 280       if (_mode == post_indexed)
 281         index = 13;
 282       else
 283         index = 15;
 284     } else {
 285       index = _index->encoding();
 286     }
 287 
 288     return _base->encoding() << 16 | index;
 289   }
 290 #endif // !AARCH64
 291 
 292   Register base() const {
 293     return _base;
 294   }
 295 
 296   Register index() const {
 297     return _index;
 298   }
 299 
 300   int disp() const {
 301     return _disp;
 302   }
 303 
 304   AsmOffset mode() const {
 305     return _mode;
 306   }
 307 
 308   int shift_imm() const {
 309     return _shift_imm;
 310   }
 311 
 312 #ifdef AARCH64
 313   AsmExtendOp extend() const {
 314     return _extend;
 315   }
 316 #else
 317   AsmShift shift() const {
 318     return _shift;
 319   }
 320 
 321   AsmOffsetOp offset_op() const {
 322     return _offset_op;
 323   }
 324 #endif
 325 
 326   bool uses(Register reg) const { return _base == reg || _index == reg; }
 327 
 328   const relocInfo::relocType rtype() { return _rspec.type(); }
 329   const RelocationHolder&    rspec() { return _rspec; }
 330 
 331   // Convert the raw encoding form into the form expected by the
 332   // constructor for Address.
 333   static Address make_raw(int base, int index, int scale, int disp, relocInfo::relocType disp_reloc);
 334 };
 335 
 336 #ifdef COMPILER2
 337 class VFP VALUE_OBJ_CLASS_SPEC {
 338   // Helper classes to detect whether a floating point constant can be
 339   // encoded in a fconstd or fconsts instruction
 340   // The conversion from the imm8, 8 bit constant, to the floating
 341   // point value encoding is done with either:
 342   // for single precision: imm8<7>:NOT(imm8<6>):Replicate(imm8<6>,5):imm8<5:0>:Zeros(19)
 343   // or
 344   // for double precision: imm8<7>:NOT(imm8<6>):Replicate(imm8<6>,8):imm8<5:0>:Zeros(48)
 345 
 346  private:
 347   class fpnum {
 348    public:
 349     virtual unsigned int f_hi4() const = 0;
 350     virtual bool f_lo_is_null() const = 0;
 351     virtual int e() const = 0;
 352     virtual unsigned int s() const = 0;
 353 
 354     inline bool can_be_imm8() const { return e() >= -3 && e() <= 4 && f_lo_is_null(); }
 355     inline unsigned char imm8() const { int v = (s() << 7) | (((e() - 1) & 0x7) << 4) | f_hi4(); assert((v >> 8) == 0, "overflow"); return v; }
 356   };
 357 
 358  public:
 359   class float_num : public fpnum {
 360    public:
 361     float_num(float v) {
 362       _num.val = v;
 363     }
 364 
 365     virtual unsigned int f_hi4() const { return (_num.bits << 9) >> (19+9); }
 366     virtual bool f_lo_is_null() const { return (_num.bits & ((1 << 19) - 1)) == 0; }
 367     virtual int e() const { return ((_num.bits << 1) >> (23+1)) - 127; }
 368     virtual unsigned int s() const { return _num.bits >> 31; }
 369 
 370    private:
 371     union {
 372       float val;
 373       unsigned int bits;
 374     } _num;
 375   };
 376 
 377   class double_num : public fpnum {
 378    public:
 379     double_num(double v) {
 380       _num.val = v;
 381     }
 382 
 383     virtual unsigned int f_hi4() const { return (_num.bits << 12) >> (48+12); }
 384     virtual bool f_lo_is_null() const { return (_num.bits & ((1LL << 48) - 1)) == 0; }
 385     virtual int e() const { return ((_num.bits << 1) >> (52+1)) - 1023; }
 386     virtual unsigned int s() const { return _num.bits >> 63; }
 387 
 388    private:
 389     union {
 390       double val;
 391       unsigned long long bits;
 392     } _num;
 393   };
 394 };
 395 #endif
 396 
 397 #ifdef AARCH64
 398 #include "assembler_arm_64.hpp"
 399 #else
 400 #include "assembler_arm_32.hpp"
 401 #endif
 402 
 403 
 404 #endif // CPU_ARM_VM_ASSEMBLER_ARM_HPP