1 /* 2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved. 4 * Copyright (c) 2015, Linaro Ltd. All rights reserved. 5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 6 * 7 * This code is free software; you can redistribute it and/or modify it 8 * under the terms of the GNU General Public License version 2 only, as 9 * published by the Free Software Foundation. 10 * 11 * This code is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14 * version 2 for more details (a copy is included in the LICENSE file that 15 * accompanied this code). 16 * 17 * You should have received a copy of the GNU General Public License version 18 * 2 along with this work; if not, write to the Free Software Foundation, 19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 20 * 21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 22 * or visit www.oracle.com if you need additional information or have any 23 * questions. 24 * 25 */ 26 27 #ifndef CPU_AARCH32_VM_ASSEMBLER_AARCH32_HPP 28 #define CPU_AARCH32_VM_ASSEMBLER_AARCH32_HPP 29 30 #include "asm/register.hpp" 31 #include "vm_version_aarch32.hpp" 32 33 // Definitions of various symbolic names for machine registers 34 35 // Here we define how many integer and double precision floating point 36 // registers are used for passing parameters by the C and Java calling 37 // conventions. Each double precision floating point register can be used 38 // as two single precision registers. 39 40 class Argument VALUE_OBJ_CLASS_SPEC { 41 public: 42 enum { 43 n_int_register_parameters_c = 4, // c_rarg0, c_rarg1, c_rarg2, c_rarg3 44 #ifdef HARD_FLOAT_CC 45 n_float_register_parameters_c = 8, // c_farg0, c_farg1, ..., c_farg7 46 #else // HARD_FLOAT_CC 47 n_float_register_parameters_c = 0, // 0 registers used to pass arguments 48 #endif // HARD_FLOAT_CC 49 n_int_register_parameters_j = 4, // j_rarg0, j_rarg1, j_rarg2, j_rarg3 50 #ifdef HARD_FLOAT_CC 51 n_float_register_parameters_j = 8 // j_farg0, j_farg1, ..., j_farg7 52 #else // HARD_FLOAT_CC 53 n_float_register_parameters_j = 0 // 0 registers used to pass arguments 54 #endif // HARD_FLOAT_CC 55 }; 56 }; 57 58 // Symbolic names for the register arguments used by the C calling convention 59 // (the calling convention for C runtime calls and calls to JNI native 60 // methods) 61 62 REGISTER_DECLARATION(Register, c_rarg0, r0); 63 REGISTER_DECLARATION(Register, c_rarg1, r1); 64 REGISTER_DECLARATION(Register, c_rarg2, r2); 65 REGISTER_DECLARATION(Register, c_rarg3, r3); 66 67 // Symbolic names for the register arguments used by the Java calling 68 // convention (the calling convention for calls to compiled Java methods) 69 70 // We have control over the convention for Java so we can do what we please. 71 // What pleases us is to offset the Java calling convention so that when 72 // we call a suitable JNI method the arguments are lined up and we don't 73 // have to do much shuffling. A suitable JNI method is non-static and with 74 // a small number of arguments. 75 // 76 // |-----------------------------------| 77 // | c_rarg0 c_rarg1 c_rarg2 c_rarg3 | 78 // |-----------------------------------| 79 // | r0 r1 r2 r3 | 80 // |-----------------------------------| 81 // | j_rarg3 j_rarg0 j_rarg1 j_rarg2 | 82 // |-----------------------------------| 83 84 85 REGISTER_DECLARATION(Register, j_rarg0, c_rarg1); 86 REGISTER_DECLARATION(Register, j_rarg1, c_rarg2); 87 REGISTER_DECLARATION(Register, j_rarg2, c_rarg3); 88 REGISTER_DECLARATION(Register, j_rarg3, c_rarg0); 89 90 // Common register aliases used in assembler code 91 92 // These registers are used to hold VM data either temporarily within a method 93 // or across method calls. According to AAPCS, r0-r3 and r12 are caller-saved, 94 // the rest are callee-saved. 95 96 // These 4 aliases are used in the template interpreter only. 97 98 REGISTER_DECLARATION(Register, rdispatch, r4); // Address of dispatch table 99 REGISTER_DECLARATION(Register, rbcp, r5); // Bytecode pointer 100 REGISTER_DECLARATION(Register, rlocals, r6); // Address of local variables section of current frame 101 REGISTER_DECLARATION(Register, rcpool, r7); // Address of constant pool cache 102 103 // The following aliases are used in all VM components. 104 105 REGISTER_DECLARATION(Register, rthread, r8); // Address of current thread 106 REGISTER_DECLARATION(Register, rscratch1, r9); // Scratch register 107 REGISTER_DECLARATION(Register, rmethod, r10); // Address of current method 108 REGISTER_DECLARATION(Register, rfp, r11); // Frame pointer 109 REGISTER_DECLARATION(Register, rscratch2, r12); // Scratch register 110 REGISTER_DECLARATION(Register, sp, r13); // Stack pointer 111 REGISTER_DECLARATION(Register, lr, r14); // Link register 112 REGISTER_DECLARATION(Register, r15_pc, r15); // Program counter 113 114 115 extern "C" void entry(CodeBuffer *cb); 116 117 118 #define assert_cond(ARG1) assert(ARG1, #ARG1) 119 120 class Assembler; 121 122 class Instruction_aarch32 { 123 unsigned insn; 124 #ifdef ASSERT 125 unsigned bits; 126 #endif 127 Assembler *assem; 128 129 public: 130 131 Instruction_aarch32(class Assembler *as) { 132 #ifdef ASSERT 133 bits = 0; 134 #endif 135 insn = 0; 136 assem = as; 137 } 138 139 inline ~Instruction_aarch32(); 140 141 unsigned &get_insn() { return insn; } 142 #ifdef ASSERT 143 unsigned &get_bits() { return bits; } 144 #endif 145 146 static inline int32_t extend(unsigned val, int hi = 31, int lo = 0) { 147 union { 148 unsigned u; 149 int n; 150 }; 151 152 u = val << (31 - hi); 153 n = n >> (31 - hi + lo); 154 return n; 155 } 156 157 static inline uint32_t extract(uint32_t val, int msb, int lsb) { 158 int nbits = msb - lsb + 1; 159 assert_cond(msb >= lsb); 160 uint32_t mask = (1U << nbits) - 1; 161 uint32_t result = val >> lsb; 162 result &= mask; 163 return result; 164 } 165 166 static inline int32_t sextract(uint32_t val, int msb, int lsb) { 167 uint32_t uval = extract(val, msb, lsb); 168 return extend(uval, msb - lsb); 169 } 170 171 static void patch(address a, int msb, int lsb, unsigned long val) { 172 int nbits = msb - lsb + 1; 173 guarantee(val < (1U << nbits), "Field too big for insn"); 174 assert_cond(msb >= lsb); 175 unsigned mask = (1U << nbits) - 1; 176 val <<= lsb; 177 mask <<= lsb; 178 unsigned target = *(unsigned *)a; 179 target &= ~mask; 180 target |= val; 181 *(unsigned *)a = target; 182 } 183 184 static void spatch(address a, int msb, int lsb, long val) { 185 int nbits = msb - lsb + 1; 186 long chk = val >> (nbits - 1); 187 guarantee (chk == -1 || chk == 0, "Field too big for insn"); 188 unsigned uval = val; 189 unsigned mask = (1U << nbits) - 1; 190 uval &= mask; 191 uval <<= lsb; 192 mask <<= lsb; 193 unsigned target = *(unsigned *)a; 194 target &= ~mask; 195 target |= uval; 196 *(unsigned *)a = target; 197 } 198 199 /* void f(unsigned val, int msb, int lsb) { 200 int nbits = msb - lsb + 1; 201 guarantee(val < (1U << nbits), "Field too big for insn"); 202 assert_cond(msb >= lsb); 203 unsigned mask = (1U << nbits) - 1; 204 val <<= lsb; 205 mask <<= lsb; 206 insn |= val; 207 assert_cond((bits & mask) == 0); 208 #ifdef ASSERT 209 bits |= mask; 210 #endif 211 }*/ 212 213 void f(unsigned val, int msb, int lsb) { 214 int nbits = msb - lsb + 1; 215 guarantee(val < (1U << nbits), "Field too big for insn"); 216 assert_cond(msb >= lsb); 217 unsigned mask = (1U << nbits) - 1; 218 val <<= lsb; 219 mask <<= lsb; 220 insn &= ~mask; 221 insn |= val; 222 #ifdef ASSERT 223 bits |= mask; 224 #endif 225 } 226 227 void f(unsigned val, int bit) { 228 f(val, bit, bit); 229 } 230 231 void sf(long val, int msb, int lsb) { 232 int nbits = msb - lsb + 1; 233 long chk = val >> (nbits - 1); 234 guarantee (chk == -1 || chk == 0, "Field too big for insn"); 235 unsigned uval = val; 236 unsigned mask = (1U << nbits) - 1; 237 uval &= mask; 238 f(uval, lsb + nbits - 1, lsb); 239 } 240 241 void rf(Register r, int lsb) { 242 f(r->encoding_nocheck(), lsb + 3, lsb); 243 } 244 245 void rf(FloatRegister r, int lsb) { 246 f(r->encoding_nocheck(), lsb + 4, lsb); 247 } 248 249 unsigned get(int msb = 31, int lsb = 0) { 250 int nbits = msb - lsb + 1; 251 unsigned mask = ((1U << nbits) - 1) << lsb; 252 assert_cond((bits & mask) == mask); 253 return (insn & mask) >> lsb; 254 } 255 256 void fixed(unsigned value, unsigned mask) { 257 assert_cond ((mask & bits) == 0); 258 #ifdef ASSERT 259 bits |= mask; 260 #endif 261 insn |= value; 262 } 263 }; 264 265 #define starti Instruction_aarch32 do_not_use(this); set_current(&do_not_use) 266 267 // abs methods which cannot overflow and so are well-defined across 268 // the entire domain of integer types. 269 static inline unsigned int uabs(unsigned int n) { 270 union { 271 unsigned int result; 272 int value; 273 }; 274 result = n; 275 if (value < 0) result = -result; 276 return result; 277 } 278 static inline unsigned long uabs(unsigned long n) { 279 union { 280 unsigned long result; 281 long value; 282 }; 283 result = n; 284 if (value < 0) result = -result; 285 return result; 286 } 287 static inline unsigned long uabs(long n) { return uabs((unsigned long)n); } 288 static inline unsigned long uabs(int n) { return uabs((unsigned int)n); } 289 290 #define S_DFLT ::lsl() 291 #define C_DFLT AL 292 293 294 // Shift for base reg + reg offset addressing 295 class shift_op { 296 public: 297 enum shift_kind { LSL, LSR, ASR, ROR }; 298 private: 299 enum shift_source { imm_s, reg_s }; 300 enum shift_source _source; 301 enum shift_kind _op; 302 int _shift; 303 Register _reg; 304 305 bool check_valid() { 306 if(imm_s == _source) { 307 switch(_op) { 308 case LSL: return _shift >= 0 && _shift <= 31; 309 case ROR: return _shift >= 1 && _shift <= 32; 310 default: return _shift >= 1 && _shift <= 32; 311 } 312 } 313 return true; //Don't check register shifts 314 } 315 public: 316 // Default shift is lsl(0) 317 shift_op() 318 : _source(imm_s), _op(LSL), _shift(0) { } 319 shift_op(enum shift_kind op, int shift) 320 : _source(imm_s), _op(op), _shift(shift) { 321 if(!shift) { 322 // All zero shift encodings map to LSL 0 323 _shift = 0; 324 _op = LSL; 325 } 326 int pshift = _shift; 327 if(-1 == _shift && ROR == _op) { 328 // This is an RRX, make shift valid for the check 329 _shift = 1; 330 pshift = 0; //set to zero 331 } 332 assert(check_valid(), "Invalid shift quantity"); 333 _shift = pshift; //restore shift 334 } 335 shift_op(enum shift_kind op, Register r) 336 : _source(reg_s), _op(op), _reg(r) {} 337 338 shift_kind kind() const { 339 return _op; 340 } 341 342 int shift() const { 343 assert(imm_s == _source, "Not an immediate shift"); 344 return _shift % 32; 345 } 346 Register reg() const { 347 assert(reg_s == _source, "Not a register shift"); 348 return _reg; 349 } 350 bool is_register() { 351 return reg_s == _source; 352 } 353 bool operator==(const shift_op& other) const { 354 if(imm_s == _source && imm_s == other._source) { 355 return _op == other._op && _shift == other._shift; 356 } else if (reg_s == _source && imm_s == _source) { 357 return _op == other._op && _reg == other._reg; 358 } 359 return false; 360 } 361 bool operator!=(const shift_op& other) const { 362 return !( *this == other); 363 } 364 }; 365 class lsl : public shift_op { 366 public: 367 lsl(int sft = 0): shift_op(LSL, sft) { } 368 lsl(Register r): shift_op(LSL, r) { } 369 }; 370 class lsr : public shift_op { 371 public: 372 lsr(int sft = 0): shift_op(LSR, sft) { } 373 lsr(Register r): shift_op(LSR, r) { } 374 }; 375 class asr : public shift_op { 376 public: 377 asr(int sft = 0): shift_op(ASR, sft) { } 378 asr(Register r): shift_op(ASR, r) { } 379 }; 380 class ror : public shift_op { 381 public: 382 ror(int sft = 0): shift_op(ROR, sft) {} 383 ror(Register r): shift_op(ROR, r) { } 384 }; 385 class rrx : public shift_op { 386 public: 387 rrx(): shift_op(ROR, -1) {} 388 }; 389 390 391 // Addressing modes 392 class Address VALUE_OBJ_CLASS_SPEC { 393 public: 394 enum access_mode { no_mode, imm, reg, lit }; 395 //literal is class of imm? -> potentially have to split later if some instructions work 396 // with one but not other although can be determined from registers. 397 enum wb_mode { off, pre, post }; 398 399 enum reg_op { ADD, SUB }; 400 401 private: 402 Register _base; 403 Register _index; 404 int _offset; 405 enum access_mode _acc_mode; 406 enum wb_mode _wb_mode; 407 enum reg_op _as_op; 408 shift_op _shift; 409 410 RelocationHolder _rspec; 411 412 // Typically we use AddressLiterals we want to use their rval 413 // However in some situations we want the lval (effect address) of 414 // the item. We provide a special factory for making those lvals. 415 bool _is_lval; 416 417 // If the target is far we'll need to load the ea of this to a 418 // register to reach it. Otherwise if near we can do PC-relative 419 // addressing. 420 address _target; 421 422 public: 423 Address() 424 : _acc_mode(no_mode) { } 425 //immediate & literal 426 Address(Register r, enum wb_mode mode = off) 427 : _base(r), _index(noreg), _offset(0), _acc_mode(imm), _wb_mode(mode), 428 _shift(lsl()), _target(0) { 429 assert(!(r == r15_pc && _wb_mode == pre), "The PC can't be pre-indexed."); 430 } 431 Address(Register r, int o, enum wb_mode mode = off) 432 : _base(r), _index(noreg), _offset(o), _acc_mode(imm), _wb_mode(mode), 433 _shift(lsl()), _target(0) { 434 assert(!(r == r15_pc && _wb_mode == pre), "The PC can't be pre-indexed."); 435 } 436 Address(Register r, long o, enum wb_mode mode = off) 437 : _base(r), _index(noreg), _offset(o), _acc_mode(imm), _wb_mode(mode), 438 _shift(lsl()), _target(0) { 439 assert(!(r == r15_pc && _wb_mode == pre), "The PC can't be pre-indexed."); 440 } 441 Address(Register r, unsigned long o, enum wb_mode mode = off) 442 : _base(r), _index(noreg), _offset(o), _acc_mode(imm), _wb_mode(mode), 443 _shift(lsl()), _target(0) { 444 assert(!(r == r15_pc && _wb_mode == pre), "The PC can't be pre-indexed."); 445 } 446 Address(Register r, unsigned int o, enum wb_mode mode = off) 447 : _base(r), _index(noreg), _offset(o), _acc_mode(imm), _wb_mode(mode), 448 _shift(lsl()), _target(0) { 449 assert(!(r == r15_pc && _wb_mode == pre), "The PC can't be pre-indexed."); 450 } 451 #ifdef ASSERT 452 Address(Register r, ByteSize disp) 453 : _base(r), _index(noreg), _offset(in_bytes(disp)), _acc_mode(imm), _wb_mode(off), 454 _shift(lsl()), _target(0) { 455 assert(!(r == r15_pc && _wb_mode == pre), "The PC can't be pre-indexed."); 456 } 457 #endif 458 459 460 //Register-offset 461 Address(Register r, Register r1, shift_op shift = lsl(), enum reg_op op = ADD, 462 enum wb_mode wbm = off) 463 : _base(r), _index(r1), _offset(0), _acc_mode(reg), _wb_mode(wbm), _as_op(op), 464 _shift(shift), _target(0) { 465 assert(!shift.is_register(), "Can't shift a register-offset address by a register"); 466 } 467 468 Address(address target, RelocationHolder const& rspec) 469 : _acc_mode(lit), 470 _base(sp), 471 _wb_mode(off), 472 _rspec(rspec), 473 _is_lval(false), 474 _target(target) 475 { } 476 Address(address target, relocInfo::relocType rtype = relocInfo::external_word_type); 477 478 private: 479 //Could be either 480 void AddressConstruct(Register base, RegisterOrConstant index, enum reg_op op, shift_op shift, 481 enum wb_mode mode); 482 public: 483 484 Address(Register base, RegisterOrConstant index, enum reg_op op, enum wb_mode mode) { 485 AddressConstruct(base, index, op, lsl(), mode); 486 } 487 Address(Register base, RegisterOrConstant index, shift_op shift = lsl(), enum reg_op op = ADD, 488 enum wb_mode mode = off) { 489 if(shift.kind() != lsl().kind()) { 490 assert(index.is_register(), "should be"); 491 } 492 AddressConstruct(base, index, op, shift, mode); 493 } 494 495 496 Register base() const { 497 //in aarch64 this didn't apply to preindex mode -> why? 498 guarantee(_acc_mode == imm || _acc_mode == reg, "wrong mode"); 499 return _base; 500 } 501 long offset() const { 502 return _offset; 503 } 504 Register index() const { 505 return _index; 506 } 507 shift_op shift() const { 508 return _shift; 509 } 510 reg_op op() const { 511 return _as_op; 512 } 513 access_mode get_mode() const { 514 return _acc_mode; 515 } 516 wb_mode get_wb_mode() const { 517 return _wb_mode; 518 } 519 bool uses(Register reg) const { return _base == reg || _index == reg; } 520 address target() const { return _target; } 521 const RelocationHolder& rspec() const { return _rspec; } 522 523 void encode(Instruction_aarch32 *i, CodeSection *sec, address pc) const; 524 525 void fp_encode(Instruction_aarch32 *i, CodeSection *sec, address pc) const; 526 527 void lea(MacroAssembler *, Register) const; 528 529 typedef enum { 530 IDT_BOOLEAN = T_BOOLEAN, 531 IDT_CHAR = T_CHAR, 532 IDT_FLOAT = T_FLOAT, 533 IDT_DOUBLE = T_DOUBLE, 534 IDT_BYTE = T_BYTE, 535 IDT_SHORT = T_SHORT, 536 IDT_INT = T_INT, 537 IDT_LONG = T_LONG, 538 IDT_OBJECT = T_OBJECT, 539 IDT_ARRAY = T_ARRAY, 540 IDT_ADDRESS = T_ADDRESS, 541 IDT_METADATA = T_METADATA, 542 // not really a data type, denotes the use when address value is needed 543 // itself, and Address instance is not used to fetch actual data from memory 544 IDT_LEA = 100, 545 // multi-word memory access insn (ldmia/stmia etc) 546 IDT_MULTIWORD 547 } InsnDataType; 548 549 inline static InsnDataType toInsnDataType(BasicType type) { 550 return (InsnDataType)type; 551 } 552 553 Address safe_for(InsnDataType type, MacroAssembler *, Register temp); 554 bool is_safe_for(InsnDataType); 555 556 static bool offset_ok_for_immed(long offset, InsnDataType type); 557 static bool shift_ok_for_index(shift_op shift, InsnDataType type); 558 }; 559 560 // Convience classes 561 class RuntimeAddress: public Address { 562 public: 563 RuntimeAddress(address target) : Address(target, relocInfo::runtime_call_type) {} 564 }; 565 566 class OopAddress: public Address { 567 public: 568 OopAddress(address target) : Address(target, relocInfo::oop_type){} 569 }; 570 571 class ExternalAddress: public Address { 572 private: 573 static relocInfo::relocType reloc_for_target(address target) { 574 // Sometimes ExternalAddress is used for values which aren't 575 // exactly addresses, like the card table base. 576 // external_word_type can't be used for values in the first page 577 // so just skip the reloc in that case. 578 return external_word_Relocation::can_be_relocated(target) ? relocInfo::external_word_type : relocInfo::none; 579 } 580 581 public: 582 ExternalAddress(address target) : Address(target, reloc_for_target(target)) {} 583 }; 584 585 class InternalAddress: public Address { 586 public: 587 InternalAddress(address target) : Address(target, relocInfo::internal_word_type) {} 588 }; 589 590 591 const int FPUStateSizeInWords = 16 * 2; 592 593 594 class Assembler : public AbstractAssembler { 595 void emit_long(jint x) { 596 AbstractAssembler::emit_int32(x); 597 } 598 599 public: 600 //TODO REMOVE shift_kind from here once done 601 enum shift_kind { LSL, LSR, ASR, ROR }; 602 // NOTE RRX is a special case of ROR with shift = 0# 603 604 // Helper functions for shifts 605 // Here to allow compiler to find global shift_op without :: prefix as lsl is a 606 // standalone instruction 607 #define HELPER(NAME) \ 608 shift_op NAME(int sft = 0) { return ::NAME(sft); } \ 609 shift_op NAME(Register r) { return ::NAME(r); } 610 HELPER(lsl); 611 HELPER(lsr); 612 HELPER(asr); 613 HELPER(ror); 614 shift_op rrx() { return ::rrx(); } 615 #undef HELPER 616 617 typedef enum { 618 EQ, NE, HS, CS=HS, LO, CC=LO, MI, PL, VS, VC, HI, LS, GE, LT, GT, LE, AL, NV 619 } Condition; 620 621 enum { instruction_size = 4 }; 622 623 static const uint32_t nop_insn = 0xe1a00000; 624 625 Address adjust(Register base, int offset, bool preIncrement) { 626 if (preIncrement) 627 return Address(base, offset, Address::pre); 628 else 629 return Address(base, offset, Address::post); 630 } 631 632 Address adjust(Register base, Register index, shift_op shift, 633 enum Address::reg_op op, bool preIncrement) { 634 return Address(base, index, shift, op, preIncrement ? Address::pre : Address::post); 635 } 636 637 Address pre(Register base, int offset) { 638 return adjust(base, offset, true); 639 } 640 641 Address pre(Register base, Register index, shift_op shift, enum Address::reg_op op) { 642 return adjust(base, index, shift, op, true); 643 } 644 645 Address post (Register base, int offset) { 646 return adjust(base, offset, false); 647 } 648 649 Instruction_aarch32* current; 650 651 void set_current(Instruction_aarch32* i) { current = i; } 652 653 void f(unsigned val, int msb, int lsb) { 654 current->f(val, msb, lsb); 655 } 656 void f(unsigned val, int msb) { 657 current->f(val, msb, msb); 658 } 659 void sf(long val, int msb, int lsb) { 660 current->sf(val, msb, lsb); 661 } 662 void rf(Register reg, int lsb) { 663 current->rf(reg, lsb); 664 } 665 void rf(FloatRegister reg, int lsb) { 666 current->rf(reg, lsb); 667 } 668 void fixed(unsigned value, unsigned mask) { 669 current->fixed(value, mask); 670 } 671 672 void emit() { 673 emit_long(current->get_insn()); 674 assert_cond(current->get_bits() == 0xffffffff); 675 current = NULL; 676 } 677 678 typedef void (Assembler::* uncond_branch_insn)(address dest); 679 typedef void (Assembler::* cond_branch_insn)(address dest, Condition cond); 680 typedef void (Assembler::* cond_ldst_insn)(Register Rt, address dest, Condition cond); 681 typedef void (Assembler::* cond_fp_ldst_insn)(FloatRegister Vd, address dest, Condition cond); 682 683 void wrap_label(Label &L, uncond_branch_insn insn); 684 void wrap_label(Label &L, Condition cond, cond_branch_insn insn); 685 void wrap_label(Register r, Label &L, Condition cond, cond_ldst_insn insn); 686 void wrap_label(FloatRegister r, Label &L, Condition cond, cond_fp_ldst_insn insn); 687 688 #undef INSN 689 690 // AARCH32 Instructions 691 // Defined roughly in the order they are found in 692 // ARM Archicture Reference Manual, section 5 693 694 #define ZERO_ADDR_REG r0 695 #define ONES_ADDR_REG r15 696 697 // Data processing (register & register-shifted-register) 698 void reg_instr(int decode, shift_op shift, Condition cond, bool s) { 699 f(cond, 31, 28), f(0b000, 27, 25), f(decode, 24, 21), f(s, 20); 700 f(shift.shift(), 11, 7), f(shift.kind(), 6, 5), f(0, 4); 701 } 702 void reg_shift_reg_instr(int decode, enum shift_op::shift_kind kind, 703 Condition cond, bool s) { 704 f(cond, 31, 28), f(0b000, 27, 25), f(decode, 24, 21), f(s, 20); 705 f(0, 7), f(kind, 6, 5), f(1, 4); 706 } 707 708 #define INSN(NAME, decode, s_flg) \ 709 void NAME(Register Rd, Register Rn, Register Rm, shift_op shift = S_DFLT, \ 710 Condition cond = C_DFLT) { \ 711 starti; \ 712 if(shift.is_register()) { \ 713 reg_shift_reg_instr(decode, shift.kind(), cond, s_flg); \ 714 rf(Rn, 16), rf(Rd, 12), rf(shift.reg(), 8), rf(Rm, 0); \ 715 } else { \ 716 reg_instr(decode, shift, cond, s_flg); \ 717 rf(Rn, 16), rf(Rd, 12), rf(Rm, 0); \ 718 } \ 719 } 720 INSN(andr, 0b0000, 0); 721 INSN(eor, 0b0001, 0); 722 INSN(sub, 0b0010, 0); 723 INSN(rsb, 0b0011, 0); 724 INSN(add, 0b0100, 0); 725 INSN(adc, 0b0101, 0); 726 INSN(sbc, 0b0110, 0); 727 INSN(rsc, 0b0111, 0); 728 INSN(orr, 0b1100, 0); 729 INSN(bic, 0b1110, 0); 730 731 INSN(ands, 0b0000, 1); 732 INSN(eors, 0b0001, 1); 733 INSN(subs, 0b0010, 1); 734 INSN(rsbs, 0b0011, 1); 735 INSN(adds, 0b0100, 1); 736 INSN(adcs, 0b0101, 1); 737 INSN(sbcs, 0b0110, 1); 738 INSN(rscs, 0b0111, 1); 739 INSN(orrs, 0b1100, 1); 740 INSN(bics, 0b1110, 1); 741 742 #undef INSN 743 744 #define INSN(NAME, decode) \ 745 void NAME(Register Rn, Register Rm, Condition cond) { \ 746 NAME(Rn, Rm, S_DFLT, cond); \ 747 } \ 748 void NAME(Register Rn, Register Rm, shift_op shift = S_DFLT, \ 749 Condition cond = C_DFLT) { \ 750 starti; \ 751 if(shift.is_register()) { \ 752 reg_shift_reg_instr(decode, shift.kind(), cond, true); \ 753 rf(Rn, 16), f(0b0000, 15, 12), rf(shift.reg(), 8), rf(Rm, 0); \ 754 } else { \ 755 reg_instr(decode, shift, cond, true); \ 756 rf(Rn, 16), f(0, 15, 12), rf(Rm, 0); \ 757 } \ 758 } 759 INSN(tst, 0b1000); 760 INSN(teq, 0b1001); 761 INSN(cmp, 0b1010); 762 INSN(cmn, 0b1011); 763 #undef INSN 764 765 // TODO appears that if Rd = 15 and s flag set then perhaps different method 766 void mov_internal(int decode, Register Rd, Register Rnm, shift_op shift, bool s, Condition cond) { 767 starti; 768 if(shift.is_register()) { 769 reg_shift_reg_instr(decode, shift.kind(), cond, s); 770 f(0b0000, 19, 16), rf(Rd, 12), rf(shift.reg(), 8), rf(Rnm, 0); 771 } else { 772 reg_instr(decode, shift, cond, s); 773 f(0, 19, 16), rf(Rd, 12), rf(Rnm, 0); 774 } 775 } 776 void mov(Register Rd, Register Rm, shift_op shift, Condition cond = C_DFLT) { 777 mov_internal(0b1101, Rd, Rm, shift, false, cond); 778 } 779 void movs(Register Rd, Register Rm, shift_op shift, Condition cond = C_DFLT) { 780 mov_internal(0b1101, Rd, Rm, shift, true, cond); 781 } 782 void mov(Register Rd, Register Rm, Condition cond = C_DFLT) { 783 mov_internal(0b1101, Rd, Rm, S_DFLT, false, cond); 784 } 785 void movs(Register Rd, Register Rm, Condition cond = C_DFLT) { 786 mov_internal(0b1101, Rd, Rm, S_DFLT, true, cond); 787 } 788 789 void mvn(Register Rd, Register Rm, shift_op shift, Condition cond = C_DFLT) { 790 mov_internal(0b1111, Rd, Rm, shift, false, cond); 791 } 792 void mvns(Register Rd, Register Rm, shift_op shift, Condition cond = C_DFLT) { 793 mov_internal(0b1111, Rd, Rm, shift, true, cond); 794 } 795 void mvn(Register Rd, Register Rm, Condition cond = C_DFLT) { 796 mov_internal(0b1111, Rd, Rm, S_DFLT, false, cond); 797 } 798 void mvns(Register Rd, Register Rm, Condition cond = C_DFLT) { 799 mov_internal(0b1111, Rd, Rm, S_DFLT, true, cond); 800 } 801 802 #define INSN(NAME, type, s_flg, ASSERTION) \ 803 void NAME(Register Rd, Register Rm, unsigned shift, Condition cond = C_DFLT) { \ 804 assert_cond(ASSERTION); \ 805 if(s_flg) movs(Rd, Rm, shift_op(type, shift), cond); \ 806 else mov(Rd, Rm, shift_op(type, shift), cond); \ 807 } 808 INSN(lsl, shift_op::LSL, 0, true); 809 INSN(lsr, shift_op::LSR, 0, true); 810 INSN(asr, shift_op::ASR, 0, true); 811 INSN(ror, shift_op::ROR, 0, shift != 0); //shift == 0 => RRX 812 813 INSN(lsls, shift_op::LSL, 1, true); 814 INSN(lsrs, shift_op::LSR, 1, true); 815 INSN(asrs, shift_op::ASR, 1, true); 816 INSN(rors, shift_op::ROR, 1, shift != 0); //shift == 0 => RRX 817 #undef INSN 818 819 #define INSN(NAME, type, s_flg) \ 820 void NAME(Register Rd, Register Rm, Condition cond = C_DFLT) { \ 821 if(s_flg) movs(Rd, Rm, shift_op(type, 0), cond); \ 822 else mov(Rd, Rm, shift_op(type, 0), cond); \ 823 } 824 INSN(rrx, shift_op::LSR, 0); 825 INSN(rrxs, shift_op::LSR, 1); 826 #undef INSN 827 828 //Data processing (register-shifted-register) 829 #define INSN(NAME, type, s_flg) \ 830 void NAME(Register Rd, Register Rn, Register Rm, Condition cond = C_DFLT) { \ 831 if(s_flg) movs(Rd, Rn, shift_op(type, Rm), cond); \ 832 else mov(Rd, Rn, shift_op(type, Rm), cond); \ 833 } 834 INSN(lsl, shift_op::LSL, 0); 835 INSN(lsr, shift_op::LSR, 0); 836 INSN(asr, shift_op::ASR, 0); 837 INSN(ror, shift_op::ROR, 0); 838 839 INSN(lsls, shift_op::LSL, 1); 840 INSN(lsrs, shift_op::LSR, 1); 841 INSN(asrs, shift_op::ASR, 1); 842 INSN(rors, shift_op::ROR, 1); 843 #undef INSN 844 845 bool imm_instr(int decode, Register Rd, Register Rn, int imm, Condition cond, 846 bool s) { 847 if(!is_valid_for_imm12(imm)) 848 return false; 849 { 850 starti; 851 f(cond, 31, 28), f(0b001, 27, 25), f(decode, 24, 21), f(s, 20), rf(Rn, 16); 852 int imm12 = encode_imm12(imm); 853 rf(Rd, 12), f(imm12, 11, 0); 854 } 855 return true; 856 } 857 858 #define INSN(NAME, decode, s_flg) \ 859 inline void NAME(Register Rd, Register Rn, unsigned imm, Condition cond = C_DFLT) {\ 860 bool status = imm_instr(decode, Rd, Rn, imm, cond, s_flg); \ 861 assert(status, "invalid imm"); \ 862 } 863 INSN(andr, 0b0000, 0); 864 INSN(eor, 0b0001, 0); 865 INSN(orr, 0b1100, 0); 866 INSN(bic, 0b1110, 0); 867 868 INSN(ands, 0b0000, 1); 869 INSN(eors, 0b0001, 1); 870 INSN(orrs, 0b1100, 1); 871 INSN(bics, 0b1110, 1); 872 //NOTE: arithmetic immediate instructions are defined below to allow dispatch. 873 #undef INSN 874 protected: 875 // Mov data to destination register in the shortest number of instructions 876 // possible. 877 void mov_immediate(Register dst, u_int32_t imm32, Condition cond, bool s); 878 // Mov data to destination register but always emit enough instructions that would 879 // permit any 32-bit constant to be loaded. (Allow for rewriting later). 880 void mov_immediate32(Register dst, u_int32_t imm32, Condition cond, bool s); 881 882 void add_sub_imm(int decode, Register Rd, Register Rn, int imm, 883 Condition cond, bool s); 884 885 public: 886 #define INSN(NAME, decode, s_flg) \ 887 inline void NAME(Register Rd, Register Rn, int imm, Condition cond = C_DFLT) { \ 888 add_sub_imm(decode, Rd, Rn, imm, cond, s_flg); \ 889 } \ 890 inline void NAME(Register Rd, Register Rn, unsigned imm, \ 891 Condition cond = C_DFLT) { \ 892 add_sub_imm(decode, Rd, Rn, imm, cond, s_flg); \ 893 } \ 894 inline void NAME(Register Rd, Register Rn, long imm, Condition cond = C_DFLT) { \ 895 add_sub_imm(decode, Rd, Rn, imm, cond, s_flg); \ 896 } \ 897 inline void NAME(Register Rd, Register Rn, unsigned long imm, \ 898 Condition cond = C_DFLT) { \ 899 add_sub_imm(decode, Rd, Rn, imm, cond, s_flg); \ 900 } \ 901 /*Addition dispatch - place in macroassembler?*/ \ 902 void NAME(Register Rd, Register Rn, RegisterOrConstant operand, \ 903 Condition cond = C_DFLT) { \ 904 if(operand.is_register()) { \ 905 NAME(Rd, Rn, (Register)operand.as_register(), lsl(), cond); \ 906 } else { \ 907 NAME(Rd, Rn, (unsigned)operand.as_constant(), cond); \ 908 } \ 909 } \ 910 inline void NAME(Register Rd, Register Rn, unsigned imm, Register Rtmp, \ 911 Condition cond = C_DFLT) { \ 912 if (Assembler::operand_valid_for_add_sub_immediate(imm)) \ 913 NAME(Rd, Rn, imm, cond); \ 914 else { \ 915 mov_immediate(Rtmp, imm, cond, false); \ 916 NAME(Rd, Rn, Rtmp, cond); \ 917 } \ 918 } \ 919 //Note that the RegisterOrConstant version can't take a shift even though 920 // one of the instructions dispatched to can 921 INSN(sub, 0b0010, 0); 922 INSN(rsb, 0b0011, 0); 923 INSN(add, 0b0100, 0); 924 INSN(adc, 0b0101, 0); 925 INSN(sbc, 0b0110, 0); 926 INSN(rsc, 0b0111, 0); 927 928 INSN(subs, 0b0010, 1); 929 INSN(rsbs, 0b0011, 1); 930 INSN(adds, 0b0100, 1); 931 INSN(adcs, 0b0101, 1); 932 INSN(sbcs, 0b0110, 1); 933 INSN(rscs, 0b0111, 1); 934 #undef INSN 935 //No need to do reverse as register subtracted from immediate 936 937 // alias for mvn 938 void inv(Register Rd, Register Rn, Condition cond = C_DFLT) { 939 mvn(Rd, Rn, cond); 940 } 941 //alias for rsb 942 void neg(Register Rd, Register Rn, Condition cond = C_DFLT) { 943 rsb(Rd, Rn, 0, cond); 944 } 945 void negs(Register Rd, Register Rn, Condition cond = C_DFLT) { 946 rsbs(Rd, Rn, 0, cond); 947 } 948 949 // PC-rel. addressing 950 void adr_encode(Register Rd, int imm, Condition cond) { 951 if (is_valid_for_imm12(imm) || is_valid_for_imm12(-imm)) { 952 add_sub_imm(0b0100, Rd, r15_pc, imm, cond, false); //opcode for add 953 } else { 954 int adjust = 0; 955 if (VM_Version::features() & (FT_ARMV7 | FT_ARMV6T2)) { 956 adjust = 8; // mov_w/mov_t 957 } else { 958 adjust = 16; // mov and 3 orr 959 } 960 mov_immediate32(Rd, imm - adjust, cond, false); 961 add(Rd, r15_pc, Rd, cond); 962 } 963 } 964 965 void adr(Register Rd, address dest, Condition cond = C_DFLT); 966 967 void adr(Register Rd, const Address &dest, Condition cond = C_DFLT); 968 969 void adr(Register Rd, Label &L, Condition cond = C_DFLT) { 970 wrap_label(Rd, L, cond, &Assembler::Assembler::adr); 971 } 972 973 private: 974 friend void entry(CodeBuffer *cb); 975 #define INSN(NAME, decode, s_flg) \ 976 inline void NAME(Register Rd, unsigned imm, Condition cond = C_DFLT) { \ 977 bool status = imm_instr(decode, Rd, ZERO_ADDR_REG, imm, cond, s_flg); \ 978 assert(status, "invalid imm"); \ 979 } \ 980 inline void NAME(Register Rd, int imm, Condition cond = C_DFLT) { \ 981 bool status = imm_instr(decode, Rd, ZERO_ADDR_REG, imm, cond, s_flg); \ 982 assert(status, "invalid imm"); \ 983 } 984 INSN(mov_i, 0b1101, 0); 985 INSN(mvn_i, 0b1111, 0); 986 987 INSN(movs_i, 0b1101, 1); 988 INSN(mvns_i, 0b1111, 1); 989 #undef INSN 990 991 void movw_i(Register Rd, unsigned imm, Condition cond = C_DFLT) { 992 starti; 993 assert(imm < (1 << 16), "Immediate too big for movw"); 994 f(cond, 31, 28), f(0b00110000, 27, 20), f(imm >> 12, 19, 16); 995 rf(Rd, 12), f(imm & 0xfff, 11, 0); 996 } 997 998 void movt_i(Register Rd, unsigned imm, Condition cond = C_DFLT) { 999 starti; 1000 assert(imm < (1 << 16), "Immediate too big for movt"); 1001 f(cond, 31, 28), f(0b00110100, 27, 20), f(imm >> 12, 19, 16); 1002 rf(Rd, 12), f(imm & 0xfff, 11, 0); 1003 } 1004 public: 1005 1006 #define INSN(NAME, decode) \ 1007 inline void NAME(Register Rn, int imm, Condition cond = C_DFLT) { \ 1008 bool status = imm_instr(decode, ZERO_ADDR_REG, Rn, imm, cond, true); \ 1009 assert(status, "invalid imm"); \ 1010 } \ 1011 inline void NAME(Register Rn, unsigned imm, Condition cond = C_DFLT) { \ 1012 bool status = imm_instr(decode, ZERO_ADDR_REG, Rn, imm, cond, true); \ 1013 assert(status, "invalid imm"); \ 1014 } \ 1015 inline void NAME(Register Rn, int imm, Register Rtmp, Condition cond = C_DFLT) { \ 1016 if (Assembler::operand_valid_for_add_sub_immediate(imm)) \ 1017 NAME(Rn, imm, cond); \ 1018 else { \ 1019 mov_immediate(Rtmp, imm, cond, false); \ 1020 NAME(Rn, Rtmp, cond); \ 1021 } \ 1022 } \ 1023 inline void NAME(Register Rn, unsigned imm, Register Rtmp, Condition cond = C_DFLT) { \ 1024 if (Assembler::operand_valid_for_add_sub_immediate(imm)) \ 1025 NAME(Rn, imm, cond); \ 1026 else { \ 1027 mov_immediate(Rtmp, imm, cond, false); \ 1028 NAME(Rn, Rtmp, cond); \ 1029 } \ 1030 } 1031 INSN(tst, 0b1000); 1032 INSN(teq, 0b1001); 1033 INSN(cmp, 0b1010); 1034 INSN(cmn, 0b1011); 1035 #undef INSN 1036 1037 1038 // Multiply and multiply accumulate 1039 void mult_instr(int decode, Register a, Register b, Register c, 1040 Register d, Condition cond, bool s) { 1041 starti; 1042 f(cond, 31, 28), f(0b0000, 27, 24), f(decode, 23, 21), f(s, 20); 1043 rf(a, 16), rf(b, 12), rf(c, 8), rf(d, 0), f(0b1001, 7, 4); 1044 } 1045 1046 void mul(Register Rd, Register Rn, Register Rm, Condition cond = C_DFLT) { 1047 mult_instr(0b000, Rd, ZERO_ADDR_REG, Rm, Rn, cond, false); 1048 } 1049 void muls(Register Rd, Register Rn, Register Rm, Condition cond = C_DFLT) { 1050 mult_instr(0b000, Rd, ZERO_ADDR_REG, Rm, Rn, cond, true); 1051 } 1052 1053 void mla(Register Rd, Register Rn, Register Rm, Register Ra, Condition cond = C_DFLT) { 1054 mult_instr(0b001, Rd, Ra, Rm, Rn, cond, false); 1055 } 1056 void mlas(Register Rd, Register Rn, Register Rm, Register Ra, Condition cond = C_DFLT) { 1057 mult_instr(0b001, Rd, Ra, Rm, Rn, cond, true); 1058 } 1059 1060 void mls(Register Rd, Register Rn, Register Rm, Register Ra, Condition cond = C_DFLT) { 1061 mult_instr(0b011, Rd, Ra, Rm, Rn, cond, false); 1062 } 1063 1064 void umaal(Register RdLo, Register RdHi, Register Rn, Register Rm, Condition cond = C_DFLT) { 1065 mult_instr(0b010, RdHi, RdLo, Rm, Rn, cond, false); 1066 } 1067 1068 #define INSN(NAME, decode, s_flg) \ 1069 void NAME(Register RdLo, Register RdHi, Register Rn, Register Rm, \ 1070 Condition cond = C_DFLT) { \ 1071 mult_instr(decode, RdHi, RdLo, Rm, Rn, cond, s_flg); \ 1072 } 1073 INSN(umull, 0b100, 0); 1074 INSN(umlal, 0b101, 0); 1075 INSN(smull, 0b110, 0); 1076 INSN(smlal, 0b111, 0); 1077 1078 INSN(umulls, 0b100, 1); 1079 INSN(umlals, 0b101, 1); 1080 INSN(smulls, 0b110, 1); 1081 INSN(smlals, 0b111, 1); 1082 1083 #undef INSN 1084 1085 //Saturating addition and subtraction 1086 #define INSN(NAME, decode) \ 1087 void NAME(Register Rd, Register Rm, Register Rn, Condition cond = C_DFLT) { \ 1088 starti; \ 1089 f(cond, 31, 28), f( 0b00010, 27, 23), f(decode, 22, 21), f(0, 20); \ 1090 rf(Rn, 16), rf(Rd, 12), f( 0b00000101, 11, 4), rf(Rm, 0); \ 1091 } 1092 INSN(qadd, 0b00); 1093 INSN(qsub, 0b01); 1094 INSN(qdadd, 0b10); 1095 INSN(qdsub, 0b11); 1096 #undef INSN 1097 1098 // Halfword multiply and multiply accumulate 1099 void mul_instr(int decode, Register Ra, Register Rb, Register Rc, Register Rd, 1100 bool N, bool M, Condition cond) { 1101 starti; 1102 f(cond, 31, 28), f(0b00010, 27, 23), f(decode, 22, 21), f(0, 20); 1103 rf(Ra, 16), rf(Rb, 12), rf(Rc, 8), f(1, 7), f(M, 6), f(N, 5), f(0, 4); 1104 rf(Rd, 0); 1105 } 1106 1107 #define INSN(NAME, decode, N, M) \ 1108 void NAME(Register Rd, Register Rn, Register Rm, Register Ra, \ 1109 Condition cond = C_DFLT) { \ 1110 mul_instr(decode, Rd, Ra, Rm, Rn, N, M, cond); \ 1111 } 1112 INSN(smlabb, 0b00, 0, 0); 1113 INSN(smlabt, 0b00, 0, 1) 1114 INSN(smlatb, 0b00, 1, 0) 1115 INSN(smlatt, 0b00, 1, 1) 1116 1117 INSN(smlawb, 0b01, 0, 0); 1118 INSN(smlawt, 0b01, 0, 1); 1119 #undef INSN 1120 1121 #define INSN(NAME, decode, N, M) \ 1122 void NAME(Register RdLo, Register RdHi, Register Rn, Register Rm, \ 1123 Condition cond = C_DFLT) { \ 1124 mul_instr(decode, RdHi, RdLo, Rm, Rn, N, M, cond); \ 1125 } 1126 INSN(smlalbb, 0b10, 0, 0); 1127 INSN(smlalbt, 0b10, 0, 1); 1128 INSN(smlaltb, 0b10, 1, 0); 1129 INSN(smlaltt, 0b10, 1, 1); 1130 #undef INSN 1131 1132 #define INSN(NAME, decode, N, M) \ 1133 void NAME(Register Rd, Register Rn, Register Rm, Condition cond = C_DFLT) { \ 1134 mul_instr(decode, Rd, ZERO_ADDR_REG, Rm, Rn, N, M, cond); \ 1135 } 1136 INSN(smulwb, 0b01, 1, 0); 1137 INSN(smulwt, 0b01, 1, 1); 1138 1139 INSN(smulbb, 0b11, 0, 0); 1140 INSN(smulbt, 0b11, 0, 1); 1141 INSN(smultb, 0b11, 1, 0); 1142 INSN(smultt, 0b11, 1, 1); 1143 #undef INSN 1144 1145 // For Extra load/store instructions, see load/store section 1146 // For Synchronization primitives, see load/store section 1147 1148 // MSR(immediate), and hints 1149 #define INSN(NAME, decode) \ 1150 void NAME(Condition cond = C_DFLT) { \ 1151 starti; \ 1152 f(cond, 31, 28), f(0b001100100000, 27, 16), f(0b11110000, 15, 8); \ 1153 f(decode, 7, 0); \ 1154 } 1155 INSN(nop, 0b000); 1156 INSN(yield, 0b001); 1157 INSN(wfe, 0b010); 1158 INSN(wfi, 0b011); 1159 INSN(sev, 0b100); 1160 void dbg(int dbg_hint, Condition cond = C_DFLT) { 1161 f(cond, 31, 28), f(0b001100100000, 27, 16), f(0b11110000, 15, 8); 1162 f(0b1111, 7, 4); f(dbg_hint, 3, 0); 1163 } 1164 #undef INSN 1165 1166 //TODO Misc instructions 1167 void bkpt(unsigned imm) { 1168 starti; 1169 f(AL, 31, 28), f(0b00010010, 27, 20); 1170 f(imm >> 4, 19, 8), f(0b0111, 7, 4), f(imm & 0xf, 3, 0); 1171 } 1172 void hlt(unsigned imm) { 1173 bkpt(imm); 1174 // FIXME This seemed like the best option! 1175 } 1176 1177 // Load/store register (all modes) 1178 void load_store_instr(Register Rt, const Address &adr, int op, int op2, int a, int b, 1179 Condition cond) { 1180 starti; 1181 f(cond, 31, 28), f(op, 27, 25), f(a, 22), f(b, 20); 1182 if(op2 >= 0) 1183 f(op2, 7, 4); 1184 //Destination 1185 rf(Rt, 12); 1186 adr.encode(current, code_section(), pc()); 1187 } 1188 1189 bool encodeable(int decode, address dest) { 1190 long offset = dest - pc(); 1191 switch(decode) { 1192 case 0b010: 1193 // LDR, LDRB, STR, STRB 1194 return uabs(offset) < (1 << 12); 1195 case 0b000: 1196 //LDRD, LDRH, LDRSB, LDRSH, STRH, STRD 1197 return uabs(offset) < (1 << 8); 1198 default: 1199 ShouldNotReachHere(); 1200 } 1201 return false; 1202 } 1203 1204 1205 1206 #define INSN_INT(NAME, op, op2, a, b, isload) \ 1207 void NAME(Register Rt, address dest, Condition cond = C_DFLT) { \ 1208 if(encodeable(op, dest)) { /* Plan A */ \ 1209 long offset = dest - pc(); \ 1210 NAME(Rt, Address(r15_pc, offset), cond); \ 1211 } else if(isload){ /* Plan B */ \ 1212 /* TODO check we don't have to relocate this*/ \ 1213 mov_immediate(Rt, (u_int32_t)dest, cond, false); \ 1214 NAME(Rt, Address(Rt, 0), cond); \ 1215 } else { /* There is no plan C */ \ 1216 ShouldNotReachHere(); \ 1217 } \ 1218 } \ 1219 void NAME(Register Rt, address dest, relocInfo::relocType rtype, \ 1220 Condition cond = C_DFLT) { \ 1221 guarantee(rtype == relocInfo::internal_word_type, \ 1222 "only internal_word_type relocs make sense here"); \ 1223 NAME(Rt, InternalAddress(dest), cond); \ 1224 } \ 1225 void NAME(Register Rt, Label &L, Condition cond = C_DFLT) { \ 1226 wrap_label(Rt, L, cond, &Assembler::NAME); \ 1227 } 1228 1229 #define INSN(NAME, op, op2, a, b, isload) \ 1230 void NAME(Register Rt, const Address &adr, Condition cond = C_DFLT) { \ 1231 load_store_instr(Rt, adr, op, op2, a, b, cond); \ 1232 } \ 1233 INSN_INT(NAME, op, op2, a, b, isload); 1234 INSN(ldr, 0b010, -1, 0, 1, 1); 1235 INSN(ldrb, 0b010, -1, 1, 1, 1); 1236 1237 INSN(ldrsb, 0b000, 0b1101, 0, 1, 1); 1238 INSN(ldrh, 0b000, 0b1011, 0, 1, 1); 1239 INSN(ldrsh, 0b000, 0b1111, 0, 1, 1); 1240 1241 INSN(str, 0b010, -1, 0, 0, 0); 1242 INSN(strb, 0b010, -1, 1, 0, 0); 1243 INSN(strh, 0b000, 0b1011, 0, 0, 0); 1244 //Note LDRD & STRD are defined with the load/store multiple instructions 1245 1246 //TODO Need to introduce ldrsb ldrsh - then check that the encoding works properly! 1247 #undef INSN 1248 1249 1250 //Synchronization primitives 1251 void sync_instr(int decode, Register Ra, Register Rb, Register Rc, Register Rd, 1252 Condition cond) { 1253 starti; 1254 f(cond, 31, 28), f(0b0001, 27, 24), f(decode, 23, 20), rf(Ra, 16), rf(Rb, 12); 1255 rf(Rc, 8), f(0b1001, 7, 4), rf(Rd, 0); 1256 } 1257 1258 #define INSN(NAME, decode) \ 1259 void NAME(Register Rd, Register Rt, Register Rn, Condition cond = C_DFLT) { \ 1260 assert(r15_pc != Rn, "Unpredictable"); \ 1261 sync_instr(decode, Rn, Rd, ONES_ADDR_REG, Rt, cond); \ 1262 } 1263 INSN( strex, 0b1000); 1264 INSN(strexd, 0b1010); 1265 INSN(strexb, 0b1100); 1266 INSN(strexh, 0b1110); 1267 #undef INSN 1268 1269 #define INSN(NAME, decode) \ 1270 void NAME(Register Rt, Register Rn, Condition cond = C_DFLT) { \ 1271 assert(r15_pc != Rn, "Unpredictable"); \ 1272 sync_instr(decode, Rn, Rt, ONES_ADDR_REG, ONES_ADDR_REG, cond); \ 1273 } 1274 INSN(ldrex, 0b1001); 1275 INSN(ldrexd, 0b1011); 1276 INSN(ldrexb, 0b1101); 1277 INSN(ldrexh, 0b1111); 1278 #undef INSN 1279 1280 // Media instructions 1281 void media_instr(int decode, int decode2, Condition cond) { 1282 f(cond, 31, 28), f(0b011, 27, 25), f(decode, 24, 20); 1283 f(decode2, 7, 5), f(1, 4); 1284 } 1285 1286 #define INSN(NAME, decode, decode2) \ 1287 void NAME(Register Rd, Register Rn, Register Rm, Condition cond = C_DFLT) { \ 1288 starti; \ 1289 media_instr(0b00000 | decode, decode2, cond); \ 1290 rf(Rn, 16), rf(Rd, 12), f(0b1111, 11, 8), rf(Rm, 0); \ 1291 } 1292 INSN(sadd16, 0b01, 0b000); 1293 INSN(sasx, 0b01, 0b001); 1294 INSN(ssax, 0b01, 0b010); 1295 INSN(ssub16, 0b01, 0b011); 1296 INSN(sadd8, 0b01, 0b100); 1297 INSN(ssub8, 0b01, 0b111); 1298 //Saturating 1299 INSN(qadd16, 0b10, 0b000); 1300 INSN(qasx, 0b10, 0b001); 1301 INSN(qsax, 0b10, 0b010); 1302 INSN(qsub16, 0b10, 0b011); 1303 INSN(qadd8, 0b10, 0b100); 1304 INSN(qsub8, 0b10, 0b111); 1305 //Halving 1306 INSN(shadd16, 0b11, 0b000); 1307 INSN(shasx, 0b11, 0b001); 1308 INSN(shsax, 0b11, 0b010); 1309 INSN(shsub16, 0b11, 0b011); 1310 INSN(shadd8, 0b11, 0b100); 1311 INSN(shsub8, 0b11, 0b111); 1312 1313 //Now unsigned 1314 INSN(uadd16, 0b101, 0b000); 1315 INSN(uasx, 0b101, 0b001); 1316 INSN(usax, 0b101, 0b010); 1317 INSN(usub16, 0b101, 0b011); 1318 INSN(uadd8, 0b101, 0b100); 1319 INSN(usub8, 0b101, 0b111); 1320 //Saturating 1321 INSN(uqadd16, 0b110, 0b000); 1322 INSN(uqasx, 0b110, 0b001); 1323 INSN(uqsax, 0b110, 0b010); 1324 INSN(uqsub16, 0b110, 0b011); 1325 INSN(uqadd8, 0b110, 0b100); 1326 INSN(uqsub8, 0b110, 0b111); 1327 //Halving 1328 INSN(uhadd16, 0b111, 0b000); 1329 INSN(uhasx, 0b111, 0b001); 1330 INSN(uhsax, 0b111, 0b010); 1331 INSN(uhsub16, 0b111, 0b011); 1332 INSN(uhadd8, 0b111, 0b100); 1333 INSN(uhsub8, 0b111, 0b111); 1334 #undef INSN 1335 1336 //Packing, unpacking, saturation and reversal 1337 // Note rotation can only be one of ROR #0 ROR #8 ROR #16 ROR #24 1338 void extend_instr(int decode, int decode2, int decode3, Register Rd, Register Rn, 1339 Register Rm, shift_op shift, Condition cond) { 1340 starti; 1341 assert(0 == shift.shift() || 1342 shift_op::ROR == shift.kind(), "Only ROR may be used for op"); 1343 // All zero shifts are mapped to LSL #0 1344 int shift_enc = 0; 1345 switch(shift.shift()) { 1346 case 0: break; 1347 case 8: shift_enc = 1; break; 1348 case 16: shift_enc = 2; break; 1349 case 24: shift_enc = 3; break; 1350 default: assert(false, "Invalid shift quantity"); 1351 } 1352 media_instr(0b01000 | decode, decode2, cond); 1353 rf(Rn, 16), rf(Rd, 12), f(shift_enc, 11, 10), f(decode3, 9, 8), rf(Rm, 0); 1354 } 1355 1356 #define INSN(NAME, decode, decode2) \ 1357 void NAME(Register Rd, Register Rn, Register Rm, shift_op shift = ::ror(), \ 1358 Condition cond = C_DFLT) { \ 1359 assert(0xf != Rn->encoding_nocheck(), "Rn = pc makes different instruction"); \ 1360 extend_instr(decode, decode2, 0b00, Rd, Rn, Rm, shift, cond); \ 1361 } 1362 INSN(sxtab16, 0b000, 0b011); 1363 INSN(sxtab, 0b010, 0b011); 1364 INSN(sxtah, 0b011, 0b011); 1365 INSN(uxtab16, 0b100, 0b011); 1366 INSN(uxtab, 0b110, 0b011); 1367 INSN(uxtah, 0b111, 0b011); 1368 #undef INSN 1369 1370 #define INSN(NAME, decode, decode2) \ 1371 void NAME(Register Rd, Register Rm, shift_op shift = ::ror(), \ 1372 Condition cond = C_DFLT) { \ 1373 extend_instr(decode, decode2, 0b00, Rd, ONES_ADDR_REG, Rm, shift, cond); \ 1374 } 1375 INSN(sxtb16, 0b000, 0b011); 1376 INSN(sxtb, 0b010, 0b011); 1377 INSN(sxth, 0b011, 0b011); 1378 INSN(uxtb16, 0b100, 0b011); 1379 INSN(uxtb, 0b110, 0b011); 1380 INSN(uxth, 0b111, 0b011); 1381 #undef INSN 1382 1383 //Reverse instructions 1384 #define INSN(NAME, decode, decode2) \ 1385 void NAME(Register Rd, Register Rm, Condition cond = C_DFLT) { \ 1386 extend_instr(decode, decode2, 0b11, Rd, ONES_ADDR_REG, Rm, ::ror(24), cond); \ 1387 } 1388 INSN(rev, 0b011, 0b001); 1389 INSN(rev16, 0b011, 0b101); 1390 INSN(rbit, 0b111, 0b001); 1391 INSN(revsh, 0b111, 0b101); 1392 #undef INSN 1393 1394 // Signed multiply, signed and unsigned divide 1395 #define INSN(NAME, decode, decode2) \ 1396 void NAME(Register Rd, Register Rn, Register Rm, Condition cond = C_DFLT) { \ 1397 starti; \ 1398 media_instr(0b10000 | decode, decode2, cond); \ 1399 rf(Rd, 16), f(0b1111, 15, 12), rf(Rm, 8), rf(Rn, 0); \ 1400 } 1401 INSN(sdiv, 0b001, 0b000); 1402 INSN(udiv, 0b011, 0b000); 1403 //TODO ALL THE REST! 1404 #undef INSN 1405 1406 // Remainder of things 1407 //TODO USAD8 1408 #define INSN(NAME, decode, decode2) \ 1409 void NAME(Register Rd, Register Rn, int lsb, int width, \ 1410 Condition cond = C_DFLT) { \ 1411 starti; \ 1412 assert(lsb >= 0 && lsb < 32, "lsb out of range"); \ 1413 assert(width > 0 && width <= 32 - lsb, "width out of range"); \ 1414 media_instr(decode, decode2, cond); \ 1415 f(width - 1, 20, 16), rf(Rd, 12), f(lsb, 11, 7), rf(Rn, 0); \ 1416 } 1417 INSN(sbfx, 0b11010, 0b010); 1418 INSN(ubfx, 0b11110, 0b010); 1419 #undef INSN 1420 1421 void bfi(Register Rd, Register Rn, int lsb, int width, Condition cond = C_DFLT) { 1422 assert(VM_Version::features() & (FT_ARMV6T2 | FT_ARMV7), "unsupported on the cpu"); 1423 int msb = lsb + width - 1; 1424 assert(lsb >= 0 && lsb < 32, "lsb out of range"); 1425 assert(msb < 32 && msb >= lsb, "width out of range"); 1426 starti; 1427 media_instr(0b11100, 0b000, cond); 1428 f(msb, 20, 16), rf(Rd, 12), f(lsb, 11, 7), rf(Rn, 0); 1429 } 1430 1431 void bfc(Register Rd, int lsb, int width, Condition cond = C_DFLT) { 1432 assert(VM_Version::features() & (FT_ARMV6T2 | FT_ARMV7), "unsupported on the cpu"); 1433 int msb = lsb + width - 1; 1434 assert(lsb >= 0 && lsb < 32, "lsb out of range"); 1435 assert(msb < 32 && msb >= lsb, "width out of range"); 1436 starti; 1437 media_instr(0b11100, 0b000, cond); 1438 f(msb, 20, 16), rf(Rd, 12), f(lsb, 11, 7), f(0b1111, 3, 0); 1439 } 1440 1441 //Branch, branch with link, and block data transfer 1442 1443 void block_imm_instr(int decode, int w, Register Rn, unsigned regset, 1444 Condition cond) { 1445 starti; 1446 f(cond, 31, 28), f(0b10, 27, 26), f(decode | (w << 1), 25, 20); 1447 rf(Rn, 16), f(regset, 15, 0); 1448 } 1449 #define INSN(NAME, decode) \ 1450 void NAME(Register Rn, unsigned regset, bool wb = true, Condition cond = C_DFLT) { \ 1451 block_imm_instr(decode, wb, Rn, regset, cond); \ 1452 } 1453 INSN(stmda, 0b000000); 1454 INSN(stmed, 0b000000); 1455 1456 INSN(ldmda, 0b000001); 1457 INSN(ldmfa, 0b000001); 1458 1459 //INSN(stm, 0b001000); 1460 INSN(stmia, 0b001000); 1461 INSN(stmea, 0b001000); 1462 1463 //INSN(ldm, 0b001001); 1464 INSN(ldmia, 0b001001); 1465 INSN(ldmfd, 0b001001); 1466 1467 INSN(stmdb, 0b010000); 1468 INSN(stmfd, 0b010000); 1469 1470 INSN(ldmdb, 0b010001); 1471 INSN(ldmea, 0b010001); 1472 1473 INSN(stmib, 0b011000); 1474 INSN(stmfa, 0b011000); 1475 1476 INSN(ldmib, 0b011001); 1477 INSN(ldmed, 0b011001); 1478 #undef INSN 1479 1480 unsigned count_bits(unsigned val); 1481 bool can_ldst_multiple( unsigned regset, const Address& adr); 1482 1483 //NOTE!! Have repurposed stm and ldm for auto dispatch instructions 1484 #define INSN(NAME, PREFIX) \ 1485 void NAME(unsigned regset, const Address& adr, Condition cond = C_DFLT) { \ 1486 assert(can_ldst_multiple(regset, adr), "Can't do anything with this!"); \ 1487 int offset = adr.offset(); \ 1488 switch(adr.get_wb_mode()) { \ 1489 case Address::pre: \ 1490 if(offset > 0) PREFIX##mib(adr.base(), regset, true, cond); \ 1491 else PREFIX##mdb(adr.base(), regset, true, cond); \ 1492 break; \ 1493 case Address::post: \ 1494 if(offset > 0) PREFIX##mia(adr.base(), regset, true, cond); \ 1495 else PREFIX##mda(adr.base(), regset, offset != 0, cond); \ 1496 break; \ 1497 case Address::off: \ 1498 if(offset > 0) PREFIX##mib(adr.base(), regset, false, cond); \ 1499 else if(!offset) PREFIX##mia(adr.base(), regset, false, cond); \ 1500 else PREFIX##mdb(adr.base(), regset, false, cond); \ 1501 break; \ 1502 default: \ 1503 ShouldNotReachHere(); \ 1504 } \ 1505 } 1506 INSN(ldm, ld); 1507 INSN(stm, st); 1508 #undef INSN 1509 1510 //Made push and pop operate on full descending stacks 1511 #define INSN(NAME, CNAME) \ 1512 inline void NAME(unsigned regset, Condition cond = C_DFLT) { \ 1513 CNAME(r13, regset, true, cond); \ 1514 } 1515 INSN(pop, ldmia); 1516 INSN(push, stmdb); 1517 #undef INSN 1518 1519 public: 1520 1521 #define INSN(NAME, PREFIX, op, op2, a, b, isload) \ 1522 void NAME(Register Rt, const Address& adr, Condition cond = C_DFLT) { \ 1523 load_store_instr(Rt, adr, op, op2, a, b, cond); \ 1524 } \ 1525 INSN_INT(NAME, op, op2, a, b, isload); 1526 1527 INSN(ldrd, ld, 0b000, 0b1101, 0, 0, 1); 1528 INSN(strd, st, 0b000, 0b1111, 0, 0, 0); 1529 #undef INSN 1530 #undef INSN_INT 1531 1532 // Branches 1533 1534 // For immediate branches: 1535 // The maximum range of a branch is fixed for the aarch32 1536 // architecture. In debug mode we shrink it in order to test 1537 // trampolines, but not so small that branches in the interpreter 1538 // are out of range. 1539 static const unsigned long branch_range = NOT_DEBUG(32 * M) DEBUG_ONLY(2 * M); 1540 static bool reachable_from_branch_at(address branch, address target) { 1541 return uabs(target - branch) < branch_range; 1542 } 1543 1544 void branch_imm_instr(int decode, address dest, Condition cond) { 1545 starti; 1546 // Correct PC for as it will be when executing this instruction 1547 int offset = (dest - (pc() + 8)) >> 2; 1548 assert(reachable_from_branch_at(pc(), dest), "branch target unreachable"); 1549 f(cond, 31, 28), f(decode, 27, 24), sf(offset, 23, 0); 1550 } 1551 1552 void branch_reg_instr(int decode, Register Rm, Condition cond) { 1553 starti; 1554 f(cond, 31, 28), f(0b00010010, 27, 20); 1555 f(0b111111111111, 19, 8), f(decode, 7, 4), rf(Rm, 0); 1556 } 1557 1558 #define INSN(NAME, decode_imm, decode_reg) \ 1559 void NAME(Register Rm, Condition cond = C_DFLT) { \ 1560 branch_reg_instr(decode_reg, Rm, cond); \ 1561 } \ 1562 void NAME(address dest, Condition cond = C_DFLT) { \ 1563 branch_imm_instr(decode_imm, dest, cond); \ 1564 } \ 1565 void NAME(Label &L, Condition cond = C_DFLT) { \ 1566 wrap_label(L, cond, &Assembler::NAME); \ 1567 } \ 1568 void NAME(const Address &dest, Condition cond = C_DFLT) { \ 1569 code_section()->relocate(pc(), dest.rspec()); \ 1570 NAME(dest.target(), cond); \ 1571 } 1572 //TODO assert type of address 1573 INSN(b, 0b1010, 0b0001); // B & BX 1574 INSN(bl, 0b1011, 0b0011); // BL & BLX 1575 #undef INSN 1576 1577 1578 //TODO Coprocessor instructions, and Supervisor Call 1579 1580 1581 // Unconditional Instructions 1582 enum barrier {OSHST = 0b0010, OSH, 1583 NSHST = 0b0110, NSH, 1584 ISHST = 0b1010, ISH, 1585 ST = 0b1110, SY}; 1586 1587 void sync_instr(int decode, enum barrier option) { 1588 starti; 1589 f(0b11110, 31, 27), f(0b1010111, 26, 20), f(0b111111110000, 19, 8); 1590 f(decode, 7, 4), f(option, 3, 0); 1591 } 1592 void clrex() { 1593 sync_instr(0b0001, SY); 1594 } 1595 void dsb(enum barrier option) { 1596 sync_instr(0b0100, option); 1597 } 1598 void dmb(enum barrier option) { 1599 sync_instr(0b0100, option); 1600 } 1601 void bkpt(); 1602 void isb() { 1603 sync_instr(0b0110, SY); 1604 } 1605 1606 // And the relevant instructions for ARMv6. 1607 1608 // MCR<c> <coproc>, <opc1>, <Rt>, <CRn>, <CRm>{, <opc2>} 1609 void mcr(int cpc_dex, int opc1, Register Rt, int cpc_reg_dex1, 1610 int cpc_reg_dex2, int opc2, Condition cond = C_DFLT) { 1611 starti; 1612 f(cond, 31, 28), f(0b1110, 27, 24), f(opc1, 23, 21), f(0, 20); 1613 f(cpc_reg_dex1, 19, 16), rf(Rt, 12), f(cpc_dex, 11, 8); 1614 f(opc2, 7, 5), f(1, 4), f(cpc_reg_dex2, 3, 0); 1615 } 1616 1617 // These instructions do not read the value of the register passed, 1618 // can be any. Chosen r0. 1619 void cp15dmb(Condition cond = C_DFLT) { 1620 mcr(15, 0, r0, 7, 10, 5, cond); 1621 } 1622 1623 void cp15dsb(Condition cond = C_DFLT) { 1624 mcr(15, 0, r0, 7, 10, 4, cond); 1625 } 1626 1627 void cp15isb(Condition cond = C_DFLT) { 1628 mcr(15, 0, r0, 7, 5, 4, cond); 1629 } 1630 1631 enum Membar_mask_bits { 1632 // We can use ISH for a barrier because the ARM ARM says "This 1633 // architecture assumes that all Processing Elements that use the 1634 // same operating system or hypervisor are in the same Inner 1635 // Shareable shareability domain." 1636 StoreStore = ISHST, 1637 LoadStore = ISH, //ISHLD, Changed to 1638 LoadLoad = ISH, //ISHLD, 1639 StoreLoad = ISH, 1640 AnyAny = ISH 1641 }; 1642 1643 void mrs(Register Rd, Condition cond = C_DFLT) { 1644 starti; 1645 f(cond, 31, 28), f(0b00010, 27, 23), f(0, 22), f(0b00, 21, 20), f(0b1111, 19, 16); 1646 rf(Rd, 12), f(0b000000000000, 11, 0); 1647 } 1648 1649 void msr(Register Rn, bool nzcvq = true, bool g = true, Condition cond = C_DFLT) { 1650 starti; 1651 f(cond, 31, 28), f(0b00010, 27, 23), f(0, 22), f(0b10, 21, 20); 1652 f(nzcvq ? 1 : 0, 19), f(g ? 1 : 0, 18), f(0b00, 17, 16); 1653 f(0b111100000000, 15, 4), rf(Rn, 0); 1654 } 1655 1656 // Floating point operations 1657 1658 enum fpscr_cond { FP_EQ = 0b0110 << 28, 1659 FP_LT = 0b1000 << 28, 1660 FP_GT = 0b0010 << 28, 1661 FP_UN = 0b0011 << 28, 1662 FP_MASK = 0b1111 << 28 }; 1663 1664 void fp_instr_base(bool is64bit, Condition cond) { 1665 f(cond, 31, 28), f(0b1110, 27, 24), f(0b101, 11, 9), f(is64bit, 8), f(0, 4); 1666 } 1667 1668 void fp_rencode(FloatRegister reg, bool is64bit, int base, int bit) { 1669 int reg_val = reg->encoding_nocheck(); 1670 if(!is64bit) { 1671 f( reg_val >> 1, base + 3, base); 1672 f( reg_val & 1, bit); 1673 } else { 1674 f( reg_val & 0xf, base + 3, base); 1675 f( reg_val >> 4, bit); 1676 } 1677 } 1678 1679 void fp_instr(int decode, int op, bool is64bit, FloatRegister Rd, FloatRegister Rn, 1680 FloatRegister Rm, Condition cond) { 1681 fp_instr_base(is64bit, cond); 1682 f(decode, 23, 20), f(op, 6); 1683 // Register encoding is a bit involved 1684 // double register passed (see 'd0'-'dN' encoding), not reencode it's number 1685 fp_rencode(Rn, false, 16, 7); 1686 fp_rencode(Rd, false, 12, 22); 1687 fp_rencode(Rm, false, 0, 5); 1688 } 1689 1690 #define INSN(NAME, decode, op, is64bit) \ 1691 void NAME(FloatRegister Rd, FloatRegister Rn, FloatRegister Rm, \ 1692 Condition cond = C_DFLT) { \ 1693 starti; \ 1694 fp_instr(decode, op, is64bit, Rd, Rn, Rm, cond); \ 1695 } 1696 INSN(vmla_f32, 0b0000, 0, 0); 1697 INSN(vmla_f64, 0b0000, 0, 1); 1698 INSN(vmls_f32, 0b0000, 1, 0); 1699 INSN(vmls_f64, 0b0000, 1, 1); 1700 1701 INSN(vnmla_f32, 0b0001, 1, 0); 1702 INSN(vnmla_f64, 0b0001, 1, 1); 1703 INSN(vnmls_f32, 0b0001, 0, 0); 1704 INSN(vnmls_f64, 0b0001, 0, 1); 1705 INSN(vnmul_f32, 0b0010, 1, 0); 1706 INSN(vnmul_f64, 0b0010, 1, 1); 1707 INSN(vmul_f32, 0b0010, 0, 0); 1708 INSN(vmul_f64, 0b0010, 0, 1); 1709 1710 INSN(vadd_f32, 0b0011, 0, 0); 1711 INSN(vadd_f64, 0b0011, 0, 1); 1712 INSN(vsub_f32, 0b0011, 1, 0); 1713 INSN(vsub_f64, 0b0011, 1, 1); 1714 1715 INSN(vdiv_f32, 0b1000, 0, 0); 1716 INSN(vdiv_f64, 0b1000, 0, 1); 1717 1718 INSN(vfnma_f32, 0b1001, 1, 0); 1719 INSN(vfnma_f64, 0b1001, 1, 1); 1720 INSN(vfnms_f32, 0b1001, 0, 0); 1721 INSN(vfnms_f64, 0b1001, 0, 1); 1722 1723 INSN(vfma_f32, 0b1010, 0, 0); 1724 INSN(vfma_f64, 0b1010, 0, 1); 1725 INSN(vfms_f32, 0b1010, 1, 0); 1726 INSN(vfms_f64, 0b1010, 1, 1); 1727 #undef INSN 1728 1729 1730 void vmov_imm(FloatRegister Rd, unsigned imm, bool is64bit, Condition cond); 1731 void vmov_imm_zero(FloatRegister Rd, bool is64bit, Condition cond); 1732 1733 unsigned encode_float_fp_imm(float imm_f); 1734 1735 void vmov_f32(FloatRegister Rd, float imm, Condition cond = C_DFLT) { 1736 vmov_imm(Rd, encode_float_fp_imm(imm), false, cond); 1737 } 1738 1739 unsigned encode_double_fp_imm(double imm_f); 1740 1741 void vmov_f64(FloatRegister Rd, double imm, Condition cond = C_DFLT) { 1742 bool positive_zero = (imm == 0.0) && !signbit(imm); 1743 if(positive_zero) vmov_imm_zero(Rd, true, cond); 1744 else vmov_imm(Rd, encode_double_fp_imm(imm), true, cond); 1745 } 1746 1747 1748 #define INSN(NAME, decode, op, is64bit) \ 1749 void NAME(FloatRegister Rd, FloatRegister Rm, Condition cond = C_DFLT) { \ 1750 starti; \ 1751 fp_instr_base(is64bit, cond); \ 1752 f(0b1011, 23, 20), f(decode, 19, 16), f(op, 7, 6), f(0b00, 5, 4); \ 1753 /* double register passed (see 'd0'-'dN' encoding), not reencode it's number */ \ 1754 fp_rencode(Rd, false, 12, 22); \ 1755 fp_rencode(Rm, false, 0, 5); \ 1756 } 1757 INSN(vmov_f32, 0b0000, 0b01, 0); 1758 INSN(vmov_f64, 0b0000, 0b01, 1); 1759 INSN(vabs_f32, 0b0000, 0b11, 0); 1760 INSN(vabs_f64, 0b0000, 0b11, 1); 1761 INSN(vneg_f32, 0b0001, 0b01, 0); 1762 INSN(vneg_f64, 0b0001, 0b01, 1); 1763 INSN(vsqrt_f32, 0b0001, 0b11, 0); 1764 INSN(vsqrt_f64, 0b0001, 0b11, 1); 1765 #undef INSN 1766 1767 //ARM -> FP, FP -> ARM 1768 // NOTE - Have only implemented the double precision variant as only operating on 1769 // double registers - can still be used to copy single precision 1770 void vmov64_instr_base(FloatRegister Rm, Register Rt, Register Rt2, int op, 1771 Condition cond) { 1772 starti; 1773 f(cond, 31, 28), f(0b1100010, 27, 21), f(op, 20); 1774 rf(Rt2, 16), rf(Rt, 12), f(0b101100, 11, 6), f(1, 4); 1775 // double register passed (see 'd0'-'dN' encoding), not reencode it's number 1776 fp_rencode(Rm, false, 0, 5); 1777 } 1778 1779 void vmov_f64(FloatRegister Rm, Register Rt, Register Rt2, Condition cond = C_DFLT) { 1780 vmov64_instr_base(Rm, Rt, Rt2, 0, cond); 1781 } 1782 void vmov_f64(Register Rt, Register Rt2, FloatRegister Rm, Condition cond = C_DFLT) { 1783 vmov64_instr_base(Rm, Rt, Rt2, 1, cond); 1784 } 1785 1786 void vmov_f32(FloatRegister Rn, Register Rt, Condition cond = C_DFLT) { 1787 starti; 1788 fp_instr_base(false, cond); 1789 f(0b000, 23, 21), f(0, 20); 1790 rf(Rt, 12), f(0b101000010000, 11, 0); 1791 // double register passed (see 'd0'-'dN' encoding), not reencode it's number 1792 fp_rencode(Rn, false, 16, 7); 1793 } 1794 void vmov_f32(Register Rt, FloatRegister Rn, Condition cond = C_DFLT) { 1795 starti; 1796 fp_instr_base(false, cond); 1797 f(0b000, 23, 21), f(1, 20); 1798 rf(Rt, 12), f(0b101000010000, 11, 0); 1799 // double register passed (see 'd0'-'dN' encoding), not reencode it's number 1800 fp_rencode(Rn, false, 16, 7); 1801 } 1802 1803 // Floating-point comparison 1804 #define INSN(NAME, E, is64bit) \ 1805 void NAME(FloatRegister Rd, int imm, Condition cond = C_DFLT) { \ 1806 assert(0 == imm, "vector compare can only be with another vector or zero"); \ 1807 starti; \ 1808 fp_instr_base(is64bit, cond); \ 1809 f(0b10110101, 23, 16), f(E, 7), f(0b1000000, 6, 0); \ 1810 /* double register passed (see 'd0'-'dN' encoding), not reencode it's number */ \ 1811 fp_rencode(Rd, false, 12, 22); \ 1812 } \ 1813 void NAME(FloatRegister Vd, FloatRegister Vm, Condition cond = C_DFLT) { \ 1814 starti; \ 1815 fp_instr_base(is64bit, cond); \ 1816 f(0b10110100, 23, 16), f(E, 7), f(1, 6), f(0, 4); \ 1817 /* double register passed (see 'd0'-'dN' encoding), not reencode it's number */ \ 1818 fp_rencode(Vd, false, 12, 22), fp_rencode(Vm, false, 0, 5); \ 1819 } 1820 INSN(vcmpe_f64, 1, 1); 1821 INSN(vcmpe_f32, 1, 0); 1822 INSN( vcmp_f64, 0, 1); 1823 INSN( vcmp_f32, 0, 0); 1824 #undef INSN 1825 1826 //Move FPSCR to ARM register 1827 void vmrs(Register Rt, Condition cond = C_DFLT) { 1828 starti; 1829 f(cond, 31, 28), f(0b111011110001, 27, 16), rf(Rt, 12), f(0b101000010000, 11, 0); 1830 } 1831 1832 //Move ARM register to FPSCR 1833 void vmsr(Register Rt, Condition cond = C_DFLT) { 1834 starti; 1835 f(cond, 31, 28), f(0b111011100001, 27, 16), rf(Rt, 12), f(0b101000010000, 11, 0); 1836 } 1837 1838 // TODO These instructions use round towards zero mode. It is possible 1839 // for the mode to be taken from the FPSCR however it doesn't do it currently 1840 #define INSN(NAME, decode2, b19, op, is64bitRd, is64bitRm, sz) \ 1841 void NAME(FloatRegister Rd, FloatRegister Rm, Condition cond = C_DFLT) { \ 1842 starti; \ 1843 fp_instr_base(sz, cond); \ 1844 f(0b1011, 23, 20), f(b19, 19), f(decode2, 18, 16), f(op, 7), f(0b100, 6, 4); \ 1845 /* double register passed (see 'd0'-'dN' encoding), not reencode it's number */ \ 1846 fp_rencode(Rd, false, 12, 22); \ 1847 fp_rencode(Rm, false, 0, 5); \ 1848 } 1849 INSN(vcvt_s32_f32, 0b101, 1, 1, 0, 0, 0); 1850 INSN(vcvt_s32_f64, 0b101, 1, 1, 0, 1, 1); 1851 INSN(vcvt_u32_f32, 0b100, 1, 1, 0, 0, 0); 1852 INSN(vcvt_u32_f64, 0b100, 1, 1, 0, 1, 1); 1853 1854 INSN(vcvt_f64_s32, 0b000, 1, 1, 1, 0, 1); 1855 INSN(vcvt_f64_u32, 0b000, 1, 0, 1, 0, 1); 1856 INSN(vcvt_f32_s32, 0b000, 1, 1, 0, 0, 0); 1857 INSN(vcvt_f32_u32, 0b000, 1, 0, 0, 0, 0); 1858 1859 INSN(vcvt_f32_f64, 0b111, 0, 1, 0, 1, 1); 1860 INSN(vcvt_f64_f32, 0b111, 0, 1, 1, 0, 0); 1861 #undef INSN 1862 1863 //Vector load/store 1864 private: 1865 void fp_ldst_instr(int decode, bool is64bit, const Address& adr, Condition cond); 1866 public: 1867 1868 #define INSN(NAME, decode, is64bit) \ 1869 void NAME(FloatRegister Vd, const Address &adr, Condition cond = C_DFLT) { \ 1870 starti; \ 1871 fp_ldst_instr(decode, is64bit, adr, cond); \ 1872 /* double register passed (see 'd0'-'dN' encoding), not reencode it's number */ \ 1873 fp_rencode(Vd, false, 12, 22); \ 1874 } \ 1875 void NAME(FloatRegister Vd, address dest, Condition cond = C_DFLT) { \ 1876 long offset = dest - pc(); \ 1877 NAME(Vd, Address(r15_pc, offset), cond); \ 1878 } \ 1879 void NAME(FloatRegister Vd, address dest, relocInfo::relocType rtype, \ 1880 Condition cond = C_DFLT) { \ 1881 guarantee(rtype == relocInfo::internal_word_type, \ 1882 "only internal_word_type relocs make sense here"); \ 1883 NAME(Vd, InternalAddress(dest), cond); \ 1884 } \ 1885 void NAME(FloatRegister Vd, Label &L, Condition cond = C_DFLT) { \ 1886 wrap_label(Vd, L, cond, &Assembler::NAME); \ 1887 } 1888 INSN(vstr_f64, 0b10000, 1); 1889 INSN(vstr_f32, 0b10000, 0); 1890 INSN(vldr_f64, 0b10001, 1); 1891 INSN(vldr_f32, 0b10001, 0); 1892 #undef INSN 1893 1894 private: 1895 enum fp_mode { ia_wb, ia, db_wb }; 1896 void fp_ldst_mul(Register Rn, int regset, bool load, bool is64bit, enum fp_mode mode, Condition cond); 1897 public: 1898 #define INSN(NAME, EXT, is64bit, load) \ 1899 inline void NAME##ia##EXT(Register Rn, unsigned regset, bool wb = true, \ 1900 Condition cond = C_DFLT) { \ 1901 fp_ldst_mul(Rn, regset, load, is64bit, \ 1902 (enum fp_mode)( ia_wb + ( wb?0:1 )), cond); \ 1903 } \ 1904 inline void NAME##db##EXT(Register Rn, unsigned regset, Condition cond = C_DFLT) { \ 1905 fp_ldst_mul(Rn, regset, load, is64bit, db_wb, cond); \ 1906 } 1907 INSN(vldm, _f32, 0, 1); 1908 INSN(vldm, _f64, 1, 1); 1909 INSN(vstm, _f32, 0, 0); 1910 INSN(vstm, _f64, 1, 0); 1911 #undef INSN 1912 1913 public: 1914 #define INSN(NAME, r) \ 1915 inline void NAME(Register Rb, int imm) { \ 1916 starti; \ 1917 f(0b1111, 31, 28); \ 1918 f(0b0101, 27, 24), f(0b01, 21, 20); \ 1919 f(0b1111, 15, 12); \ 1920 f(imm >= 0 ? 1 : 0, 23); \ 1921 f(r, 22); \ 1922 rf(Rb, 16); \ 1923 f(imm >= 0 ? imm : -imm, 11, 0); \ 1924 } 1925 INSN(pld, 1); 1926 INSN(pldw, 0); 1927 #undef INSN 1928 1929 #undef ZERO_ADDR_REG 1930 #undef ONES_ADDR_REG 1931 1932 /* SIMD extensions 1933 * 1934 * We just use FloatRegister in the following. They are exactly the same 1935 * as SIMD registers. 1936 */ 1937 public: 1938 enum SIMD_Align { 1939 ALIGN_STD = 0b00, ALIGN_64 = 0b01, ALIGN_128 = 0b10, ALIGN_256 = 0b11 1940 }; 1941 private: 1942 void simd_ld(FloatRegister, unsigned type, unsigned size, unsigned xfer_size, 1943 const Address &addr, enum SIMD_Align align); 1944 public: 1945 #define INSN(NAME, size) \ 1946 inline void NAME(FloatRegister Dd, const Address &addr, enum SIMD_Align align) { \ 1947 simd_ld(Dd, 0b0111, size, 1, addr, align); \ 1948 } \ 1949 inline void NAME(FloatRegister Dd, FloatRegister Dd1, const Address &addr, \ 1950 enum SIMD_Align align) { \ 1951 assert(Dd->successor(FloatRegisterImpl::DOUBLE) == Dd1, "Must be consecutive"); \ 1952 simd_ld(Dd, 0b1010, size, 2, addr, align); \ 1953 } \ 1954 inline void NAME(FloatRegister Dd, FloatRegister Dd1, FloatRegister Dd2, \ 1955 const Address &addr, enum SIMD_Align align) { \ 1956 assert(Dd->successor(FloatRegisterImpl::DOUBLE) == Dd1, "Must be consecutive"); \ 1957 assert(Dd1->successor(FloatRegisterImpl::DOUBLE) == Dd2, "Must be consecutive"); \ 1958 simd_ld(Dd, 0b0110, size, 3, addr, align); \ 1959 } \ 1960 inline void NAME(FloatRegister Dd, FloatRegister Dd1, FloatRegister Dd2, \ 1961 FloatRegister Dd3, const Address &addr, enum SIMD_Align align) { \ 1962 assert(Dd->successor(FloatRegisterImpl::DOUBLE) == Dd1, "Must be consecutive"); \ 1963 assert(Dd1->successor(FloatRegisterImpl::DOUBLE) == Dd2, "Must be consecutive"); \ 1964 assert(Dd2->successor(FloatRegisterImpl::DOUBLE) == Dd3, "Must be consecutive"); \ 1965 simd_ld(Dd, 0b0010, size, 4, addr, align); \ 1966 } 1967 INSN(vld1_8, 0b00); 1968 INSN(vld1_16, 0b01); 1969 INSN(vld1_32, 0b10); 1970 INSN(vld1_64, 0b11); 1971 #undef INSN 1972 1973 private: 1974 void simd_vmov(FloatRegister Dd, unsigned index, Register Rt, bool advsimd, 1975 unsigned index_bits, unsigned bit20, unsigned opc, Condition cond); 1976 public: 1977 #define INSN(NAME, advsimd, opc, index_bits) \ 1978 inline void NAME(FloatRegister Rd, unsigned index, Register Rt, \ 1979 Condition cond = Assembler::AL) { \ 1980 simd_vmov(Rd, index, Rt, advsimd, index_bits, 0, opc, cond); \ 1981 } 1982 INSN(vmov_8, true, 0b1000, 2); 1983 INSN(vmov_16, true, 0b0001, 1); 1984 INSN(vmov_32, false, 0b0000, 0); 1985 #undef INSN 1986 #define INSN(NAME, advsimd, opc, index_bits) \ 1987 inline void NAME(Register Rt, FloatRegister Rd, unsigned index, \ 1988 Condition cond = Assembler::AL) { \ 1989 simd_vmov(Rd, index, Rt, advsimd, index_bits, 1, opc, cond); \ 1990 } 1991 INSN(vmov_8s, true, 0b01000, 3); 1992 INSN(vmov_16s, true, 0b00001, 2); 1993 INSN(vmov_8u, true, 0b11000, 3); 1994 INSN(vmov_16u, true, 0b10001, 2); 1995 INSN(vmov_32, false, 0b00000, 1); 1996 #undef INSN 1997 1998 private: 1999 void simd_eor(FloatRegister Dd, FloatRegister Dn, FloatRegister Dm, unsigned q); 2000 public: 2001 #define INSN(NAME, q) \ 2002 inline void NAME(FloatRegister Dd, FloatRegister Dn, FloatRegister Dm) { \ 2003 simd_eor(Dd, Dn, Dm, q); \ 2004 } 2005 INSN(veor_64, 0); 2006 INSN(veor_128, 1); 2007 #undef INSN 2008 2009 private: 2010 void simd_vmul(FloatRegister Dd, FloatRegister Dn, FloatRegister Dm, 2011 unsigned bit24, unsigned bit9, unsigned size, unsigned mul, unsigned bit6); 2012 public: 2013 #define INSN(NAME, bit24, bit9, size, mul, bit6) \ 2014 inline void NAME(FloatRegister Dd, FloatRegister Dn, FloatRegister Dm) { \ 2015 simd_vmul(Dd, Dn, Dm, bit24, bit9, size, mul, bit6); \ 2016 } 2017 INSN(vmul_64_8, 0, 0, 0b00, 1, 0); 2018 INSN(vmul_64_16, 0, 0, 0b01, 1, 0); 2019 INSN(vmul_64_32, 0, 0, 0b10, 1, 0); 2020 INSN(vmulp_64_8, 1, 0, 0b00, 1, 0); 2021 INSN(vmul_128_8, 0, 0, 0b00, 1, 1); 2022 INSN(vmul_128_16, 0, 0, 0b01, 1, 1); 2023 INSN(vmul_128_32, 0, 0, 0b10, 1, 1); 2024 INSN(vmulp_128_8, 1, 0, 0b00, 1, 1); 2025 INSN(vmull_8s, 0, 0, 0b00, 0, 0); 2026 INSN(vmull_16s, 0, 0, 0b01, 0, 0); 2027 INSN(vmull_32s, 0, 0, 0b10, 0, 0); 2028 INSN(vmull_8u, 1, 0, 0b00, 0, 0); 2029 INSN(vmull_16u, 1, 0, 0b01, 0, 0); 2030 INSN(vmull_32u, 1, 0, 0b10, 0, 0); 2031 INSN(vmullp_8, 0, 1, 0b00, 0, 0); 2032 #undef INSN 2033 2034 private: 2035 void simd_vuzp(FloatRegister Dd, FloatRegister Dm, unsigned size, unsigned q); 2036 public: 2037 #define INSN(NAME, size, q) \ 2038 inline void NAME(FloatRegister Dd, FloatRegister Dm) { \ 2039 simd_vuzp(Dd, Dm, size, q); \ 2040 } 2041 INSN(vuzp_64_8, 0b00, 0); 2042 INSN(vuzp_64_16, 0b01, 0); 2043 INSN(vuzp_64_32, 0b10, 0); 2044 INSN(vuzp_128_8, 0b00, 1); 2045 INSN(vuzp_128_16, 0b01, 1); 2046 INSN(vuzp_128_32, 0b10, 1); 2047 #undef INSN 2048 2049 private: 2050 void simd_vshl(FloatRegister Dd, FloatRegister Dm, unsigned imm, unsigned size, 2051 unsigned q, unsigned bit24, unsigned encode); 2052 public: 2053 #define INSN(NAME, size, q, bit24, encode, checkDd) \ 2054 inline void NAME(FloatRegister Dd, FloatRegister Dm, unsigned imm) { \ 2055 assert(!checkDd || (Dd->encoding() & 2) == 0, "Odd register"); \ 2056 simd_vshl(Dd, Dm, imm, size, q, bit24, encode); \ 2057 } 2058 INSN(vshl_64_8, 3, 0, 0, 0b0101, false); 2059 INSN(vshl_64_16, 4, 0, 0, 0b0101, false); 2060 INSN(vshl_64_32, 5, 0, 0, 0b0101, false); 2061 INSN(vshl_64_64, 6, 0, 0, 0b0101, false); 2062 INSN(vshl_128_8, 3, 1, 0, 0b0101, false); 2063 INSN(vshl_128_16, 4, 1, 0, 0b0101, false); 2064 INSN(vshl_128_32, 5, 1, 0, 0b0101, false); 2065 INSN(vshl_128_64, 6, 1, 0, 0b0101, false); 2066 INSN(vshll_8s, 3, 1, 0, 0b1010, true); 2067 INSN(vshll_8u, 3, 0, 1, 0b1010, true); 2068 INSN(vshll_16s, 4, 0, 0, 0b1010, true); 2069 INSN(vshll_16u, 4, 0, 1, 0b1010, true); 2070 INSN(vshll_32s, 5, 0, 0, 0b1010, true); 2071 INSN(vshll_32u, 5, 0, 1, 0b1010, true); 2072 #undef INSN 2073 2074 private: 2075 void simd_rev(FloatRegister Dd, FloatRegister Dm, unsigned q, unsigned size, 2076 unsigned op); 2077 public: 2078 #define INSN(NAME, q, size, op) \ 2079 inline void NAME(FloatRegister Dd, FloatRegister Dm) { \ 2080 simd_rev(Dd, Dm, q, size, op); \ 2081 } 2082 INSN(vrev16_64_8, 0, 0, 2); 2083 INSN(vrev16_128_8, 1, 0, 2); 2084 INSN(vrev32_64_8, 0, 0, 1); 2085 INSN(vrev32_128_8, 1, 0, 1); 2086 INSN(vrev32_64_16, 0, 1, 1); 2087 INSN(vrev32_128_16, 1, 1, 1); 2088 INSN(vrev64_64_8, 0, 0, 0); 2089 INSN(vrev64_128_8, 1, 0, 0); 2090 INSN(vrev64_64_16, 0, 1, 0); 2091 INSN(vrev64_128_16, 1, 1, 0); 2092 INSN(vrev64_64_32, 0, 2, 0); 2093 INSN(vrev64_128_32, 1, 2, 0); 2094 #undef INSN 2095 2096 private: 2097 void v8_crc32(Register Rd, Register Rn, Register Rm, unsigned size, Condition cond); 2098 public: 2099 #define INSN(NAME, size) \ 2100 inline void NAME(Register Rd, Register Rn, Register Rm, Condition cond = C_DFLT) { \ 2101 v8_crc32(Rd, Rn, Rm, size, cond); \ 2102 } 2103 INSN(crc32b, 0); 2104 INSN(crc32h, 1); 2105 INSN(crc32w, 2); 2106 #undef INSN 2107 2108 Assembler(CodeBuffer* code) : AbstractAssembler(code) {} 2109 2110 virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr, 2111 Register tmp, 2112 int offset) { 2113 ShouldNotCallThis(); 2114 return RegisterOrConstant(); 2115 } 2116 2117 // Stack overflow checking 2118 virtual void bang_stack_with_offset(int offset); 2119 2120 // Immediate values checks and transformations 2121 2122 static uint32_t encode_imm12(int imm); 2123 static int decode_imm12(uint32_t imm12); 2124 static bool is_valid_for_imm12(int imm); 2125 2126 static bool is_valid_for_offset_imm(int imm, int nbits) { 2127 return uabs(imm) < (1u << nbits); 2128 } 2129 2130 static bool operand_valid_for_logical_immediate(bool is32, uint64_t imm); 2131 static bool operand_valid_for_add_sub_immediate(int imm); 2132 static bool operand_valid_for_add_sub_immediate(unsigned imm); 2133 static bool operand_valid_for_add_sub_immediate(unsigned long imm); 2134 static bool operand_valid_for_add_sub_immediate(jlong imm); 2135 static bool operand_valid_for_float_immediate(float imm); 2136 static bool operand_valid_for_double_immediate(double imm); 2137 2138 void emit_data64(jlong data, relocInfo::relocType rtype, int format = 0); 2139 void emit_data64(jlong data, RelocationHolder const& rspec, int format = 0); 2140 2141 // useful to revert back the effect of post/pre addressing modifications 2142 // applied to the base register 2143 void compensate_addr_offset(const Address &adr, Condition cond) { 2144 compensate_addr_offset(adr.base(), adr.index(), adr.shift(), adr.op() == Address::ADD, cond); 2145 } 2146 void compensate_addr_offset(Register Rd, Register Roff, shift_op shift, bool isAdd, Condition cond) { 2147 shift_op shift_back; 2148 2149 if (shift.is_register()) { 2150 switch (shift.kind()) { 2151 case shift_op::LSL: 2152 case shift_op::LSR: 2153 shift_back = asr(shift.reg()); 2154 break; 2155 case shift_op::ASR: 2156 shift_back = lsl(shift.reg()); 2157 break; 2158 case shift_op::ROR: 2159 Unimplemented(); // need a temp register here 2160 break; 2161 default: 2162 ShouldNotReachHere(); 2163 } 2164 } else { 2165 switch (shift.kind()) { 2166 case shift_op::LSL: 2167 case shift_op::LSR: 2168 shift_back = asr(shift.shift()); 2169 break; 2170 case shift_op::ASR: 2171 shift_back = lsl(shift.shift()); 2172 break; 2173 case shift_op::ROR: 2174 shift_back = ror(32-shift.shift()); 2175 break; 2176 default: 2177 ShouldNotReachHere(); 2178 } 2179 } 2180 if (isAdd) 2181 sub(Rd, Rd, Roff, shift_back, cond); 2182 else 2183 add(Rd, Rd, Roff, shift_back, cond); 2184 } 2185 }; 2186 2187 inline Assembler::Membar_mask_bits operator|(Assembler::Membar_mask_bits a, 2188 Assembler::Membar_mask_bits b) { 2189 return Assembler::Membar_mask_bits(unsigned(a)|unsigned(b)); 2190 } 2191 2192 Instruction_aarch32::~Instruction_aarch32() { 2193 assem->emit(); 2194 } 2195 2196 #undef starti 2197 2198 // Invert a condition 2199 inline const Assembler::Condition operator~(const Assembler::Condition cond) { 2200 return Assembler::Condition(int(cond) ^ 1); 2201 } 2202 2203 class BiasedLockingCounters; 2204 2205 extern "C" void das(uint64_t start, int len); 2206 2207 #endif // CPU_AARCH32_VM_ASSEMBLER_AARCH32_HPP