1 /* 2 * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #ifndef CPU_AARCH64_ASSEMBLER_AARCH64_HPP 27 #define CPU_AARCH64_ASSEMBLER_AARCH64_HPP 28 29 #include "asm/register.hpp" 30 #include "utilities/powerOfTwo.hpp" 31 32 #ifdef __GNUC__ 33 34 // __nop needs volatile so that compiler doesn't optimize it away 35 #define NOP() asm volatile ("nop"); 36 37 #elif defined(_MSC_VER) 38 39 // Use MSVC instrinsic: https://docs.microsoft.com/en-us/cpp/intrinsics/arm64-intrinsics?view=vs-2019#I 40 #define NOP() __nop(); 41 42 #endif 43 44 45 46 // definitions of various symbolic names for machine registers 47 48 // First intercalls between C and Java which use 8 general registers 49 // and 8 floating registers 50 51 // we also have to copy between x86 and ARM registers but that's a 52 // secondary complication -- not all code employing C call convention 53 // executes as x86 code though -- we generate some of it 54 55 class Argument { 56 public: 57 enum { 58 n_int_register_parameters_c = 8, // r0, r1, ... r7 (c_rarg0, c_rarg1, ...) 59 n_float_register_parameters_c = 8, // v0, v1, ... v7 (c_farg0, c_farg1, ... ) 60 61 n_int_register_parameters_j = 8, // r1, ... r7, r0 (rj_rarg0, j_rarg1, ... 62 n_float_register_parameters_j = 8 // v0, v1, ... v7 (j_farg0, j_farg1, ... 63 }; 64 }; 65 66 REGISTER_DECLARATION(Register, c_rarg0, r0); 67 REGISTER_DECLARATION(Register, c_rarg1, r1); 68 REGISTER_DECLARATION(Register, c_rarg2, r2); 69 REGISTER_DECLARATION(Register, c_rarg3, r3); 70 REGISTER_DECLARATION(Register, c_rarg4, r4); 71 REGISTER_DECLARATION(Register, c_rarg5, r5); 72 REGISTER_DECLARATION(Register, c_rarg6, r6); 73 REGISTER_DECLARATION(Register, c_rarg7, r7); 74 75 REGISTER_DECLARATION(FloatRegister, c_farg0, v0); 76 REGISTER_DECLARATION(FloatRegister, c_farg1, v1); 77 REGISTER_DECLARATION(FloatRegister, c_farg2, v2); 78 REGISTER_DECLARATION(FloatRegister, c_farg3, v3); 79 REGISTER_DECLARATION(FloatRegister, c_farg4, v4); 80 REGISTER_DECLARATION(FloatRegister, c_farg5, v5); 81 REGISTER_DECLARATION(FloatRegister, c_farg6, v6); 82 REGISTER_DECLARATION(FloatRegister, c_farg7, v7); 83 84 // Symbolically name the register arguments used by the Java calling convention. 85 // We have control over the convention for java so we can do what we please. 86 // What pleases us is to offset the java calling convention so that when 87 // we call a suitable jni method the arguments are lined up and we don't 88 // have to do much shuffling. A suitable jni method is non-static and a 89 // small number of arguments 90 // 91 // |--------------------------------------------------------------------| 92 // | c_rarg0 c_rarg1 c_rarg2 c_rarg3 c_rarg4 c_rarg5 c_rarg6 c_rarg7 | 93 // |--------------------------------------------------------------------| 94 // | r0 r1 r2 r3 r4 r5 r6 r7 | 95 // |--------------------------------------------------------------------| 96 // | j_rarg7 j_rarg0 j_rarg1 j_rarg2 j_rarg3 j_rarg4 j_rarg5 j_rarg6 | 97 // |--------------------------------------------------------------------| 98 99 100 REGISTER_DECLARATION(Register, j_rarg0, c_rarg1); 101 REGISTER_DECLARATION(Register, j_rarg1, c_rarg2); 102 REGISTER_DECLARATION(Register, j_rarg2, c_rarg3); 103 REGISTER_DECLARATION(Register, j_rarg3, c_rarg4); 104 REGISTER_DECLARATION(Register, j_rarg4, c_rarg5); 105 REGISTER_DECLARATION(Register, j_rarg5, c_rarg6); 106 REGISTER_DECLARATION(Register, j_rarg6, c_rarg7); 107 REGISTER_DECLARATION(Register, j_rarg7, c_rarg0); 108 109 // Java floating args are passed as per C 110 111 REGISTER_DECLARATION(FloatRegister, j_farg0, v0); 112 REGISTER_DECLARATION(FloatRegister, j_farg1, v1); 113 REGISTER_DECLARATION(FloatRegister, j_farg2, v2); 114 REGISTER_DECLARATION(FloatRegister, j_farg3, v3); 115 REGISTER_DECLARATION(FloatRegister, j_farg4, v4); 116 REGISTER_DECLARATION(FloatRegister, j_farg5, v5); 117 REGISTER_DECLARATION(FloatRegister, j_farg6, v6); 118 REGISTER_DECLARATION(FloatRegister, j_farg7, v7); 119 120 // registers used to hold VM data either temporarily within a method 121 // or across method calls 122 123 // volatile (caller-save) registers 124 125 // r8 is used for indirect result location return 126 // we use it and r9 as scratch registers 127 REGISTER_DECLARATION(Register, rscratch1, r8); 128 REGISTER_DECLARATION(Register, rscratch2, r9); 129 130 // current method -- must be in a call-clobbered register 131 REGISTER_DECLARATION(Register, rmethod, r12); 132 133 // non-volatile (callee-save) registers are r16-29 134 // of which the following are dedicated global state 135 136 // link register 137 REGISTER_DECLARATION(Register, lr, r30); 138 // frame pointer 139 REGISTER_DECLARATION(Register, rfp, r29); 140 // current thread 141 REGISTER_DECLARATION(Register, rthread, r28); 142 // base of heap 143 REGISTER_DECLARATION(Register, rheapbase, r27); 144 // constant pool cache 145 REGISTER_DECLARATION(Register, rcpool, r26); 146 // monitors allocated on stack 147 REGISTER_DECLARATION(Register, rmonitors, r25); 148 // locals on stack 149 REGISTER_DECLARATION(Register, rlocals, r24); 150 // bytecode pointer 151 REGISTER_DECLARATION(Register, rbcp, r22); 152 // Dispatch table base 153 REGISTER_DECLARATION(Register, rdispatch, r21); 154 // Java stack pointer 155 REGISTER_DECLARATION(Register, esp, r20); 156 157 #define assert_cond(ARG1) assert(ARG1, #ARG1) 158 159 namespace asm_util { 160 uint32_t encode_logical_immediate(bool is32, uint64_t imm); 161 }; 162 163 using namespace asm_util; 164 165 166 class Assembler; 167 168 class Instruction_aarch64 { 169 unsigned insn; 170 #ifdef ASSERT 171 unsigned bits; 172 #endif 173 Assembler *assem; 174 175 public: 176 177 Instruction_aarch64(class Assembler *as) { 178 #ifdef ASSERT 179 bits = 0; 180 #endif 181 insn = 0; 182 assem = as; 183 } 184 185 inline ~Instruction_aarch64(); 186 187 unsigned &get_insn() { return insn; } 188 #ifdef ASSERT 189 unsigned &get_bits() { return bits; } 190 #endif 191 192 static inline int32_t extend(unsigned val, int hi = 31, int lo = 0) { 193 union { 194 unsigned u; 195 int n; 196 }; 197 198 u = val << (31 - hi); 199 n = n >> (31 - hi + lo); 200 return n; 201 } 202 203 static inline uint32_t extract(uint32_t val, int msb, int lsb) { 204 int nbits = msb - lsb + 1; 205 assert_cond(msb >= lsb); 206 uint32_t mask = (1U << nbits) - 1; 207 uint32_t result = val >> lsb; 208 result &= mask; 209 return result; 210 } 211 212 static inline int32_t sextract(uint32_t val, int msb, int lsb) { 213 uint32_t uval = extract(val, msb, lsb); 214 return extend(uval, msb - lsb); 215 } 216 217 static void patch(address a, int msb, int lsb, uint64_t val) { 218 int nbits = msb - lsb + 1; 219 guarantee(val < (1U << nbits), "Field too big for insn"); 220 assert_cond(msb >= lsb); 221 unsigned mask = (1U << nbits) - 1; 222 val <<= lsb; 223 mask <<= lsb; 224 unsigned target = *(unsigned *)a; 225 target &= ~mask; 226 target |= val; 227 *(unsigned *)a = target; 228 } 229 230 static void spatch(address a, int msb, int lsb, int64_t val) { 231 int nbits = msb - lsb + 1; 232 int64_t chk = val >> (nbits - 1); 233 guarantee (chk == -1 || chk == 0, "Field too big for insn"); 234 unsigned uval = val; 235 unsigned mask = (1U << nbits) - 1; 236 uval &= mask; 237 uval <<= lsb; 238 mask <<= lsb; 239 unsigned target = *(unsigned *)a; 240 target &= ~mask; 241 target |= uval; 242 *(unsigned *)a = target; 243 } 244 245 void f(unsigned val, int msb, int lsb) { 246 int nbits = msb - lsb + 1; 247 guarantee(val < (1U << nbits), "Field too big for insn"); 248 assert_cond(msb >= lsb); 249 unsigned mask = (1U << nbits) - 1; 250 val <<= lsb; 251 mask <<= lsb; 252 insn |= val; 253 assert_cond((bits & mask) == 0); 254 #ifdef ASSERT 255 bits |= mask; 256 #endif 257 } 258 259 void f(unsigned val, int bit) { 260 f(val, bit, bit); 261 } 262 263 void sf(int64_t val, int msb, int lsb) { 264 int nbits = msb - lsb + 1; 265 int64_t chk = val >> (nbits - 1); 266 guarantee (chk == -1 || chk == 0, "Field too big for insn"); 267 unsigned uval = val; 268 unsigned mask = (1U << nbits) - 1; 269 uval &= mask; 270 f(uval, lsb + nbits - 1, lsb); 271 } 272 273 void rf(Register r, int lsb) { 274 f(r->encoding_nocheck(), lsb + 4, lsb); 275 } 276 277 // reg|ZR 278 void zrf(Register r, int lsb) { 279 f(r->encoding_nocheck() - (r == zr), lsb + 4, lsb); 280 } 281 282 // reg|SP 283 void srf(Register r, int lsb) { 284 f(r == sp ? 31 : r->encoding_nocheck(), lsb + 4, lsb); 285 } 286 287 void rf(FloatRegister r, int lsb) { 288 f(r->encoding_nocheck(), lsb + 4, lsb); 289 } 290 291 unsigned get(int msb = 31, int lsb = 0) { 292 int nbits = msb - lsb + 1; 293 unsigned mask = ((1U << nbits) - 1) << lsb; 294 assert_cond((bits & mask) == mask); 295 return (insn & mask) >> lsb; 296 } 297 298 void fixed(unsigned value, unsigned mask) { 299 assert_cond ((mask & bits) == 0); 300 #ifdef ASSERT 301 bits |= mask; 302 #endif 303 insn |= value; 304 } 305 }; 306 307 #define starti Instruction_aarch64 do_not_use(this); set_current(&do_not_use) 308 309 class PrePost { 310 int _offset; 311 Register _r; 312 public: 313 PrePost(Register reg, int o) : _offset(o), _r(reg) { } 314 int offset() { return _offset; } 315 Register reg() { return _r; } 316 }; 317 318 class Pre : public PrePost { 319 public: 320 Pre(Register reg, int o) : PrePost(reg, o) { } 321 }; 322 class Post : public PrePost { 323 Register _idx; 324 bool _is_postreg; 325 public: 326 Post(Register reg, int o) : PrePost(reg, o) { _idx = NULL; _is_postreg = false; } 327 Post(Register reg, Register idx) : PrePost(reg, 0) { _idx = idx; _is_postreg = true; } 328 Register idx_reg() { return _idx; } 329 bool is_postreg() {return _is_postreg; } 330 }; 331 332 namespace ext 333 { 334 enum operation { uxtb, uxth, uxtw, uxtx, sxtb, sxth, sxtw, sxtx }; 335 }; 336 337 // Addressing modes 338 class Address { 339 public: 340 341 enum mode { no_mode, base_plus_offset, pre, post, post_reg, pcrel, 342 base_plus_offset_reg, literal }; 343 344 // Shift and extend for base reg + reg offset addressing 345 class extend { 346 int _option, _shift; 347 ext::operation _op; 348 public: 349 extend() { } 350 extend(int s, int o, ext::operation op) : _option(o), _shift(s), _op(op) { } 351 int option() const{ return _option; } 352 int shift() const { return _shift; } 353 ext::operation op() const { return _op; } 354 }; 355 class uxtw : public extend { 356 public: 357 uxtw(int shift = -1): extend(shift, 0b010, ext::uxtw) { } 358 }; 359 class lsl : public extend { 360 public: 361 lsl(int shift = -1): extend(shift, 0b011, ext::uxtx) { } 362 }; 363 class sxtw : public extend { 364 public: 365 sxtw(int shift = -1): extend(shift, 0b110, ext::sxtw) { } 366 }; 367 class sxtx : public extend { 368 public: 369 sxtx(int shift = -1): extend(shift, 0b111, ext::sxtx) { } 370 }; 371 372 private: 373 Register _base; 374 Register _index; 375 int64_t _offset; 376 enum mode _mode; 377 extend _ext; 378 379 RelocationHolder _rspec; 380 381 // Typically we use AddressLiterals we want to use their rval 382 // However in some situations we want the lval (effect address) of 383 // the item. We provide a special factory for making those lvals. 384 bool _is_lval; 385 386 // If the target is far we'll need to load the ea of this to a 387 // register to reach it. Otherwise if near we can do PC-relative 388 // addressing. 389 address _target; 390 391 public: 392 Address() 393 : _mode(no_mode) { } 394 Address(Register r) 395 : _base(r), _index(noreg), _offset(0), _mode(base_plus_offset), _target(0) { } 396 Address(Register r, int o) 397 : _base(r), _index(noreg), _offset(o), _mode(base_plus_offset), _target(0) { } 398 Address(Register r, int64_t o) 399 : _base(r), _index(noreg), _offset(o), _mode(base_plus_offset), _target(0) { } 400 Address(Register r, uint64_t o) 401 : _base(r), _index(noreg), _offset(o), _mode(base_plus_offset), _target(0) { } 402 #ifdef ASSERT 403 Address(Register r, ByteSize disp) 404 : _base(r), _index(noreg), _offset(in_bytes(disp)), _mode(base_plus_offset), _target(0) { } 405 #endif 406 Address(Register r, Register r1, extend ext = lsl()) 407 : _base(r), _index(r1), _offset(0), _mode(base_plus_offset_reg), 408 _ext(ext), _target(0) { } 409 Address(Pre p) 410 : _base(p.reg()), _offset(p.offset()), _mode(pre) { } 411 Address(Post p) 412 : _base(p.reg()), _index(p.idx_reg()), _offset(p.offset()), 413 _mode(p.is_postreg() ? post_reg : post), _target(0) { } 414 Address(address target, RelocationHolder const& rspec) 415 : _mode(literal), 416 _rspec(rspec), 417 _is_lval(false), 418 _target(target) { } 419 Address(address target, relocInfo::relocType rtype = relocInfo::external_word_type); 420 Address(Register base, RegisterOrConstant index, extend ext = lsl()) 421 : _base (base), 422 _offset(0), _ext(ext), _target(0) { 423 if (index.is_register()) { 424 _mode = base_plus_offset_reg; 425 _index = index.as_register(); 426 } else { 427 guarantee(ext.option() == ext::uxtx, "should be"); 428 assert(index.is_constant(), "should be"); 429 _mode = base_plus_offset; 430 _offset = index.as_constant() << ext.shift(); 431 } 432 } 433 434 Register base() const { 435 guarantee((_mode == base_plus_offset | _mode == base_plus_offset_reg 436 | _mode == post | _mode == post_reg), 437 "wrong mode"); 438 return _base; 439 } 440 int64_t offset() const { 441 return _offset; 442 } 443 Register index() const { 444 return _index; 445 } 446 mode getMode() const { 447 return _mode; 448 } 449 bool uses(Register reg) const { return _base == reg || _index == reg; } 450 address target() const { return _target; } 451 const RelocationHolder& rspec() const { return _rspec; } 452 453 void encode(Instruction_aarch64 *i) const { 454 i->f(0b111, 29, 27); 455 i->srf(_base, 5); 456 457 switch(_mode) { 458 case base_plus_offset: 459 { 460 unsigned size = i->get(31, 30); 461 if (i->get(26, 26) && i->get(23, 23)) { 462 // SIMD Q Type - Size = 128 bits 463 assert(size == 0, "bad size"); 464 size = 0b100; 465 } 466 unsigned mask = (1 << size) - 1; 467 if (_offset < 0 || _offset & mask) 468 { 469 i->f(0b00, 25, 24); 470 i->f(0, 21), i->f(0b00, 11, 10); 471 i->sf(_offset, 20, 12); 472 } else { 473 i->f(0b01, 25, 24); 474 i->f(_offset >> size, 21, 10); 475 } 476 } 477 break; 478 479 case base_plus_offset_reg: 480 { 481 i->f(0b00, 25, 24); 482 i->f(1, 21); 483 i->rf(_index, 16); 484 i->f(_ext.option(), 15, 13); 485 unsigned size = i->get(31, 30); 486 if (i->get(26, 26) && i->get(23, 23)) { 487 // SIMD Q Type - Size = 128 bits 488 assert(size == 0, "bad size"); 489 size = 0b100; 490 } 491 if (size == 0) // It's a byte 492 i->f(_ext.shift() >= 0, 12); 493 else { 494 if (_ext.shift() > 0) 495 assert(_ext.shift() == (int)size, "bad shift"); 496 i->f(_ext.shift() > 0, 12); 497 } 498 i->f(0b10, 11, 10); 499 } 500 break; 501 502 case pre: 503 i->f(0b00, 25, 24); 504 i->f(0, 21), i->f(0b11, 11, 10); 505 i->sf(_offset, 20, 12); 506 break; 507 508 case post: 509 i->f(0b00, 25, 24); 510 i->f(0, 21), i->f(0b01, 11, 10); 511 i->sf(_offset, 20, 12); 512 break; 513 514 default: 515 ShouldNotReachHere(); 516 } 517 } 518 519 void encode_pair(Instruction_aarch64 *i) const { 520 switch(_mode) { 521 case base_plus_offset: 522 i->f(0b010, 25, 23); 523 break; 524 case pre: 525 i->f(0b011, 25, 23); 526 break; 527 case post: 528 i->f(0b001, 25, 23); 529 break; 530 default: 531 ShouldNotReachHere(); 532 } 533 534 unsigned size; // Operand shift in 32-bit words 535 536 if (i->get(26, 26)) { // float 537 switch(i->get(31, 30)) { 538 case 0b10: 539 size = 2; break; 540 case 0b01: 541 size = 1; break; 542 case 0b00: 543 size = 0; break; 544 default: 545 ShouldNotReachHere(); 546 size = 0; // unreachable 547 } 548 } else { 549 size = i->get(31, 31); 550 } 551 552 size = 4 << size; 553 guarantee(_offset % size == 0, "bad offset"); 554 i->sf(_offset / size, 21, 15); 555 i->srf(_base, 5); 556 } 557 558 void encode_nontemporal_pair(Instruction_aarch64 *i) const { 559 // Only base + offset is allowed 560 i->f(0b000, 25, 23); 561 unsigned size = i->get(31, 31); 562 size = 4 << size; 563 guarantee(_offset % size == 0, "bad offset"); 564 i->sf(_offset / size, 21, 15); 565 i->srf(_base, 5); 566 guarantee(_mode == Address::base_plus_offset, 567 "Bad addressing mode for non-temporal op"); 568 } 569 570 void lea(MacroAssembler *, Register) const; 571 572 static bool offset_ok_for_immed(int64_t offset, uint shift); 573 }; 574 575 // Convience classes 576 class RuntimeAddress: public Address { 577 578 public: 579 580 RuntimeAddress(address target) : Address(target, relocInfo::runtime_call_type) {} 581 582 }; 583 584 class OopAddress: public Address { 585 586 public: 587 588 OopAddress(address target) : Address(target, relocInfo::oop_type){} 589 590 }; 591 592 class ExternalAddress: public Address { 593 private: 594 static relocInfo::relocType reloc_for_target(address target) { 595 // Sometimes ExternalAddress is used for values which aren't 596 // exactly addresses, like the card table base. 597 // external_word_type can't be used for values in the first page 598 // so just skip the reloc in that case. 599 return external_word_Relocation::can_be_relocated(target) ? relocInfo::external_word_type : relocInfo::none; 600 } 601 602 public: 603 604 ExternalAddress(address target) : Address(target, reloc_for_target(target)) {} 605 606 }; 607 608 class InternalAddress: public Address { 609 610 public: 611 612 InternalAddress(address target) : Address(target, relocInfo::internal_word_type) {} 613 }; 614 615 const int FPUStateSizeInWords = FloatRegisterImpl::number_of_registers * 616 FloatRegisterImpl::save_slots_per_register; 617 618 typedef enum { 619 PLDL1KEEP = 0b00000, PLDL1STRM, PLDL2KEEP, PLDL2STRM, PLDL3KEEP, PLDL3STRM, 620 PSTL1KEEP = 0b10000, PSTL1STRM, PSTL2KEEP, PSTL2STRM, PSTL3KEEP, PSTL3STRM, 621 PLIL1KEEP = 0b01000, PLIL1STRM, PLIL2KEEP, PLIL2STRM, PLIL3KEEP, PLIL3STRM 622 } prfop; 623 624 class Assembler : public AbstractAssembler { 625 626 #ifndef PRODUCT 627 static const uintptr_t asm_bp; 628 629 void emit_long(jint x) { 630 if ((uintptr_t)pc() == asm_bp) 631 NOP(); 632 AbstractAssembler::emit_int32(x); 633 } 634 #else 635 void emit_long(jint x) { 636 AbstractAssembler::emit_int32(x); 637 } 638 #endif 639 640 public: 641 642 enum { instruction_size = 4 }; 643 644 //---< calculate length of instruction >--- 645 // We just use the values set above. 646 // instruction must start at passed address 647 static unsigned int instr_len(unsigned char *instr) { return instruction_size; } 648 649 //---< longest instructions >--- 650 static unsigned int instr_maxlen() { return instruction_size; } 651 652 Address adjust(Register base, int offset, bool preIncrement) { 653 if (preIncrement) 654 return Address(Pre(base, offset)); 655 else 656 return Address(Post(base, offset)); 657 } 658 659 Address pre(Register base, int offset) { 660 return adjust(base, offset, true); 661 } 662 663 Address post(Register base, int offset) { 664 return adjust(base, offset, false); 665 } 666 667 Address post(Register base, Register idx) { 668 return Address(Post(base, idx)); 669 } 670 671 static address locate_next_instruction(address inst); 672 673 Instruction_aarch64* current; 674 675 void set_current(Instruction_aarch64* i) { current = i; } 676 677 void f(unsigned val, int msb, int lsb) { 678 current->f(val, msb, lsb); 679 } 680 void f(unsigned val, int msb) { 681 current->f(val, msb, msb); 682 } 683 void sf(int64_t val, int msb, int lsb) { 684 current->sf(val, msb, lsb); 685 } 686 void rf(Register reg, int lsb) { 687 current->rf(reg, lsb); 688 } 689 void srf(Register reg, int lsb) { 690 current->srf(reg, lsb); 691 } 692 void zrf(Register reg, int lsb) { 693 current->zrf(reg, lsb); 694 } 695 void rf(FloatRegister reg, int lsb) { 696 current->rf(reg, lsb); 697 } 698 void fixed(unsigned value, unsigned mask) { 699 current->fixed(value, mask); 700 } 701 702 void emit() { 703 emit_long(current->get_insn()); 704 assert_cond(current->get_bits() == 0xffffffff); 705 current = NULL; 706 } 707 708 typedef void (Assembler::* uncond_branch_insn)(address dest); 709 typedef void (Assembler::* compare_and_branch_insn)(Register Rt, address dest); 710 typedef void (Assembler::* test_and_branch_insn)(Register Rt, int bitpos, address dest); 711 typedef void (Assembler::* prefetch_insn)(address target, prfop); 712 713 void wrap_label(Label &L, uncond_branch_insn insn); 714 void wrap_label(Register r, Label &L, compare_and_branch_insn insn); 715 void wrap_label(Register r, int bitpos, Label &L, test_and_branch_insn insn); 716 void wrap_label(Label &L, prfop, prefetch_insn insn); 717 718 // PC-rel. addressing 719 720 void adr(Register Rd, address dest); 721 void _adrp(Register Rd, address dest); 722 723 void adr(Register Rd, const Address &dest); 724 void _adrp(Register Rd, const Address &dest); 725 726 void adr(Register Rd, Label &L) { 727 wrap_label(Rd, L, &Assembler::Assembler::adr); 728 } 729 void _adrp(Register Rd, Label &L) { 730 wrap_label(Rd, L, &Assembler::_adrp); 731 } 732 733 void adrp(Register Rd, const Address &dest, uint64_t &offset); 734 735 #undef INSN 736 737 void add_sub_immediate(Register Rd, Register Rn, unsigned uimm, int op, 738 int negated_op); 739 740 // Add/subtract (immediate) 741 #define INSN(NAME, decode, negated) \ 742 void NAME(Register Rd, Register Rn, unsigned imm, unsigned shift) { \ 743 starti; \ 744 f(decode, 31, 29), f(0b10001, 28, 24), f(shift, 23, 22), f(imm, 21, 10); \ 745 zrf(Rd, 0), srf(Rn, 5); \ 746 } \ 747 \ 748 void NAME(Register Rd, Register Rn, unsigned imm) { \ 749 starti; \ 750 add_sub_immediate(Rd, Rn, imm, decode, negated); \ 751 } 752 753 INSN(addsw, 0b001, 0b011); 754 INSN(subsw, 0b011, 0b001); 755 INSN(adds, 0b101, 0b111); 756 INSN(subs, 0b111, 0b101); 757 758 #undef INSN 759 760 #define INSN(NAME, decode, negated) \ 761 void NAME(Register Rd, Register Rn, unsigned imm) { \ 762 starti; \ 763 add_sub_immediate(Rd, Rn, imm, decode, negated); \ 764 } 765 766 INSN(addw, 0b000, 0b010); 767 INSN(subw, 0b010, 0b000); 768 INSN(add, 0b100, 0b110); 769 INSN(sub, 0b110, 0b100); 770 771 #undef INSN 772 773 // Logical (immediate) 774 #define INSN(NAME, decode, is32) \ 775 void NAME(Register Rd, Register Rn, uint64_t imm) { \ 776 starti; \ 777 uint32_t val = encode_logical_immediate(is32, imm); \ 778 f(decode, 31, 29), f(0b100100, 28, 23), f(val, 22, 10); \ 779 srf(Rd, 0), zrf(Rn, 5); \ 780 } 781 782 INSN(andw, 0b000, true); 783 INSN(orrw, 0b001, true); 784 INSN(eorw, 0b010, true); 785 INSN(andr, 0b100, false); 786 INSN(orr, 0b101, false); 787 INSN(eor, 0b110, false); 788 789 #undef INSN 790 791 #define INSN(NAME, decode, is32) \ 792 void NAME(Register Rd, Register Rn, uint64_t imm) { \ 793 starti; \ 794 uint32_t val = encode_logical_immediate(is32, imm); \ 795 f(decode, 31, 29), f(0b100100, 28, 23), f(val, 22, 10); \ 796 zrf(Rd, 0), zrf(Rn, 5); \ 797 } 798 799 INSN(ands, 0b111, false); 800 INSN(andsw, 0b011, true); 801 802 #undef INSN 803 804 // Move wide (immediate) 805 #define INSN(NAME, opcode) \ 806 void NAME(Register Rd, unsigned imm, unsigned shift = 0) { \ 807 assert_cond((shift/16)*16 == shift); \ 808 starti; \ 809 f(opcode, 31, 29), f(0b100101, 28, 23), f(shift/16, 22, 21), \ 810 f(imm, 20, 5); \ 811 rf(Rd, 0); \ 812 } 813 814 INSN(movnw, 0b000); 815 INSN(movzw, 0b010); 816 INSN(movkw, 0b011); 817 INSN(movn, 0b100); 818 INSN(movz, 0b110); 819 INSN(movk, 0b111); 820 821 #undef INSN 822 823 // Bitfield 824 #define INSN(NAME, opcode, size) \ 825 void NAME(Register Rd, Register Rn, unsigned immr, unsigned imms) { \ 826 starti; \ 827 guarantee(size == 1 || (immr < 32 && imms < 32), "incorrect immr/imms");\ 828 f(opcode, 31, 22), f(immr, 21, 16), f(imms, 15, 10); \ 829 zrf(Rn, 5), rf(Rd, 0); \ 830 } 831 832 INSN(sbfmw, 0b0001001100, 0); 833 INSN(bfmw, 0b0011001100, 0); 834 INSN(ubfmw, 0b0101001100, 0); 835 INSN(sbfm, 0b1001001101, 1); 836 INSN(bfm, 0b1011001101, 1); 837 INSN(ubfm, 0b1101001101, 1); 838 839 #undef INSN 840 841 // Extract 842 #define INSN(NAME, opcode, size) \ 843 void NAME(Register Rd, Register Rn, Register Rm, unsigned imms) { \ 844 starti; \ 845 guarantee(size == 1 || imms < 32, "incorrect imms"); \ 846 f(opcode, 31, 21), f(imms, 15, 10); \ 847 zrf(Rm, 16), zrf(Rn, 5), zrf(Rd, 0); \ 848 } 849 850 INSN(extrw, 0b00010011100, 0); 851 INSN(extr, 0b10010011110, 1); 852 853 #undef INSN 854 855 // The maximum range of a branch is fixed for the AArch64 856 // architecture. In debug mode we shrink it in order to test 857 // trampolines, but not so small that branches in the interpreter 858 // are out of range. 859 static const uint64_t branch_range = NOT_DEBUG(128 * M) DEBUG_ONLY(2 * M); 860 861 static bool reachable_from_branch_at(address branch, address target) { 862 return uabs(target - branch) < branch_range; 863 } 864 865 // Unconditional branch (immediate) 866 #define INSN(NAME, opcode) \ 867 void NAME(address dest) { \ 868 starti; \ 869 int64_t offset = (dest - pc()) >> 2; \ 870 DEBUG_ONLY(assert(reachable_from_branch_at(pc(), dest), "debug only")); \ 871 f(opcode, 31), f(0b00101, 30, 26), sf(offset, 25, 0); \ 872 } \ 873 void NAME(Label &L) { \ 874 wrap_label(L, &Assembler::NAME); \ 875 } \ 876 void NAME(const Address &dest); 877 878 INSN(b, 0); 879 INSN(bl, 1); 880 881 #undef INSN 882 883 // Compare & branch (immediate) 884 #define INSN(NAME, opcode) \ 885 void NAME(Register Rt, address dest) { \ 886 int64_t offset = (dest - pc()) >> 2; \ 887 starti; \ 888 f(opcode, 31, 24), sf(offset, 23, 5), rf(Rt, 0); \ 889 } \ 890 void NAME(Register Rt, Label &L) { \ 891 wrap_label(Rt, L, &Assembler::NAME); \ 892 } 893 894 INSN(cbzw, 0b00110100); 895 INSN(cbnzw, 0b00110101); 896 INSN(cbz, 0b10110100); 897 INSN(cbnz, 0b10110101); 898 899 #undef INSN 900 901 // Test & branch (immediate) 902 #define INSN(NAME, opcode) \ 903 void NAME(Register Rt, int bitpos, address dest) { \ 904 int64_t offset = (dest - pc()) >> 2; \ 905 int b5 = bitpos >> 5; \ 906 bitpos &= 0x1f; \ 907 starti; \ 908 f(b5, 31), f(opcode, 30, 24), f(bitpos, 23, 19), sf(offset, 18, 5); \ 909 rf(Rt, 0); \ 910 } \ 911 void NAME(Register Rt, int bitpos, Label &L) { \ 912 wrap_label(Rt, bitpos, L, &Assembler::NAME); \ 913 } 914 915 INSN(tbz, 0b0110110); 916 INSN(tbnz, 0b0110111); 917 918 #undef INSN 919 920 // Conditional branch (immediate) 921 enum Condition 922 {EQ, NE, HS, CS=HS, LO, CC=LO, MI, PL, VS, VC, HI, LS, GE, LT, GT, LE, AL, NV}; 923 924 void br(Condition cond, address dest) { 925 int64_t offset = (dest - pc()) >> 2; 926 starti; 927 f(0b0101010, 31, 25), f(0, 24), sf(offset, 23, 5), f(0, 4), f(cond, 3, 0); 928 } 929 930 #define INSN(NAME, cond) \ 931 void NAME(address dest) { \ 932 br(cond, dest); \ 933 } 934 935 INSN(beq, EQ); 936 INSN(bne, NE); 937 INSN(bhs, HS); 938 INSN(bcs, CS); 939 INSN(blo, LO); 940 INSN(bcc, CC); 941 INSN(bmi, MI); 942 INSN(bpl, PL); 943 INSN(bvs, VS); 944 INSN(bvc, VC); 945 INSN(bhi, HI); 946 INSN(bls, LS); 947 INSN(bge, GE); 948 INSN(blt, LT); 949 INSN(bgt, GT); 950 INSN(ble, LE); 951 INSN(bal, AL); 952 INSN(bnv, NV); 953 954 void br(Condition cc, Label &L); 955 956 #undef INSN 957 958 // Exception generation 959 void generate_exception(int opc, int op2, int LL, unsigned imm) { 960 starti; 961 f(0b11010100, 31, 24); 962 f(opc, 23, 21), f(imm, 20, 5), f(op2, 4, 2), f(LL, 1, 0); 963 } 964 965 #define INSN(NAME, opc, op2, LL) \ 966 void NAME(unsigned imm) { \ 967 generate_exception(opc, op2, LL, imm); \ 968 } 969 970 INSN(svc, 0b000, 0, 0b01); 971 INSN(hvc, 0b000, 0, 0b10); 972 INSN(smc, 0b000, 0, 0b11); 973 INSN(brk, 0b001, 0, 0b00); 974 INSN(hlt, 0b010, 0, 0b00); 975 INSN(dcps1, 0b101, 0, 0b01); 976 INSN(dcps2, 0b101, 0, 0b10); 977 INSN(dcps3, 0b101, 0, 0b11); 978 979 #undef INSN 980 981 // System 982 void system(int op0, int op1, int CRn, int CRm, int op2, 983 Register rt = dummy_reg) 984 { 985 starti; 986 f(0b11010101000, 31, 21); 987 f(op0, 20, 19); 988 f(op1, 18, 16); 989 f(CRn, 15, 12); 990 f(CRm, 11, 8); 991 f(op2, 7, 5); 992 rf(rt, 0); 993 } 994 995 void hint(int imm) { 996 system(0b00, 0b011, 0b0010, 0b0000, imm); 997 } 998 999 void nop() { 1000 hint(0); 1001 } 1002 1003 void yield() { 1004 hint(1); 1005 } 1006 1007 void wfe() { 1008 hint(2); 1009 } 1010 1011 void wfi() { 1012 hint(3); 1013 } 1014 1015 void sev() { 1016 hint(4); 1017 } 1018 1019 void sevl() { 1020 hint(5); 1021 } 1022 1023 // we only provide mrs and msr for the special purpose system 1024 // registers where op1 (instr[20:19]) == 11 and, (currently) only 1025 // use it for FPSR n.b msr has L (instr[21]) == 0 mrs has L == 1 1026 1027 void msr(int op1, int CRn, int CRm, int op2, Register rt) { 1028 starti; 1029 f(0b1101010100011, 31, 19); 1030 f(op1, 18, 16); 1031 f(CRn, 15, 12); 1032 f(CRm, 11, 8); 1033 f(op2, 7, 5); 1034 // writing zr is ok 1035 zrf(rt, 0); 1036 } 1037 1038 void mrs(int op1, int CRn, int CRm, int op2, Register rt) { 1039 starti; 1040 f(0b1101010100111, 31, 19); 1041 f(op1, 18, 16); 1042 f(CRn, 15, 12); 1043 f(CRm, 11, 8); 1044 f(op2, 7, 5); 1045 // reading to zr is a mistake 1046 rf(rt, 0); 1047 } 1048 1049 enum barrier {OSHLD = 0b0001, OSHST, OSH, NSHLD=0b0101, NSHST, NSH, 1050 ISHLD = 0b1001, ISHST, ISH, LD=0b1101, ST, SY}; 1051 1052 void dsb(barrier imm) { 1053 system(0b00, 0b011, 0b00011, imm, 0b100); 1054 } 1055 1056 void dmb(barrier imm) { 1057 system(0b00, 0b011, 0b00011, imm, 0b101); 1058 } 1059 1060 void isb() { 1061 system(0b00, 0b011, 0b00011, SY, 0b110); 1062 } 1063 1064 void sys(int op1, int CRn, int CRm, int op2, 1065 Register rt = (Register)0b11111) { 1066 system(0b01, op1, CRn, CRm, op2, rt); 1067 } 1068 1069 // Only implement operations accessible from EL0 or higher, i.e., 1070 // op1 CRn CRm op2 1071 // IC IVAU 3 7 5 1 1072 // DC CVAC 3 7 10 1 1073 // DC CVAP 3 7 12 1 1074 // DC CVAU 3 7 11 1 1075 // DC CIVAC 3 7 14 1 1076 // DC ZVA 3 7 4 1 1077 // So only deal with the CRm field. 1078 enum icache_maintenance {IVAU = 0b0101}; 1079 enum dcache_maintenance {CVAC = 0b1010, CVAP = 0b1100, CVAU = 0b1011, CIVAC = 0b1110, ZVA = 0b100}; 1080 1081 void dc(dcache_maintenance cm, Register Rt) { 1082 sys(0b011, 0b0111, cm, 0b001, Rt); 1083 } 1084 1085 void ic(icache_maintenance cm, Register Rt) { 1086 sys(0b011, 0b0111, cm, 0b001, Rt); 1087 } 1088 1089 // A more convenient access to dmb for our purposes 1090 enum Membar_mask_bits { 1091 // We can use ISH for a barrier because the ARM ARM says "This 1092 // architecture assumes that all Processing Elements that use the 1093 // same operating system or hypervisor are in the same Inner 1094 // Shareable shareability domain." 1095 StoreStore = ISHST, 1096 LoadStore = ISHLD, 1097 LoadLoad = ISHLD, 1098 StoreLoad = ISH, 1099 AnyAny = ISH 1100 }; 1101 1102 void membar(Membar_mask_bits order_constraint) { 1103 dmb(Assembler::barrier(order_constraint)); 1104 } 1105 1106 // Unconditional branch (register) 1107 void branch_reg(Register R, int opc) { 1108 starti; 1109 f(0b1101011, 31, 25); 1110 f(opc, 24, 21); 1111 f(0b11111000000, 20, 10); 1112 rf(R, 5); 1113 f(0b00000, 4, 0); 1114 } 1115 1116 #define INSN(NAME, opc) \ 1117 void NAME(Register R) { \ 1118 branch_reg(R, opc); \ 1119 } 1120 1121 INSN(br, 0b0000); 1122 INSN(blr, 0b0001); 1123 INSN(ret, 0b0010); 1124 1125 void ret(void *p); // This forces a compile-time error for ret(0) 1126 1127 #undef INSN 1128 1129 #define INSN(NAME, opc) \ 1130 void NAME() { \ 1131 branch_reg(dummy_reg, opc); \ 1132 } 1133 1134 INSN(eret, 0b0100); 1135 INSN(drps, 0b0101); 1136 1137 #undef INSN 1138 1139 // Load/store exclusive 1140 enum operand_size { byte, halfword, word, xword }; 1141 1142 void load_store_exclusive(Register Rs, Register Rt1, Register Rt2, 1143 Register Rn, enum operand_size sz, int op, bool ordered) { 1144 starti; 1145 f(sz, 31, 30), f(0b001000, 29, 24), f(op, 23, 21); 1146 rf(Rs, 16), f(ordered, 15), zrf(Rt2, 10), srf(Rn, 5), zrf(Rt1, 0); 1147 } 1148 1149 void load_exclusive(Register dst, Register addr, 1150 enum operand_size sz, bool ordered) { 1151 load_store_exclusive(dummy_reg, dst, dummy_reg, addr, 1152 sz, 0b010, ordered); 1153 } 1154 1155 void store_exclusive(Register status, Register new_val, Register addr, 1156 enum operand_size sz, bool ordered) { 1157 load_store_exclusive(status, new_val, dummy_reg, addr, 1158 sz, 0b000, ordered); 1159 } 1160 1161 #define INSN4(NAME, sz, op, o0) /* Four registers */ \ 1162 void NAME(Register Rs, Register Rt1, Register Rt2, Register Rn) { \ 1163 guarantee(Rs != Rn && Rs != Rt1 && Rs != Rt2, "unpredictable instruction"); \ 1164 load_store_exclusive(Rs, Rt1, Rt2, Rn, sz, op, o0); \ 1165 } 1166 1167 #define INSN3(NAME, sz, op, o0) /* Three registers */ \ 1168 void NAME(Register Rs, Register Rt, Register Rn) { \ 1169 guarantee(Rs != Rn && Rs != Rt, "unpredictable instruction"); \ 1170 load_store_exclusive(Rs, Rt, dummy_reg, Rn, sz, op, o0); \ 1171 } 1172 1173 #define INSN2(NAME, sz, op, o0) /* Two registers */ \ 1174 void NAME(Register Rt, Register Rn) { \ 1175 load_store_exclusive(dummy_reg, Rt, dummy_reg, \ 1176 Rn, sz, op, o0); \ 1177 } 1178 1179 #define INSN_FOO(NAME, sz, op, o0) /* Three registers, encoded differently */ \ 1180 void NAME(Register Rt1, Register Rt2, Register Rn) { \ 1181 guarantee(Rt1 != Rt2, "unpredictable instruction"); \ 1182 load_store_exclusive(dummy_reg, Rt1, Rt2, Rn, sz, op, o0); \ 1183 } 1184 1185 // bytes 1186 INSN3(stxrb, byte, 0b000, 0); 1187 INSN3(stlxrb, byte, 0b000, 1); 1188 INSN2(ldxrb, byte, 0b010, 0); 1189 INSN2(ldaxrb, byte, 0b010, 1); 1190 INSN2(stlrb, byte, 0b100, 1); 1191 INSN2(ldarb, byte, 0b110, 1); 1192 1193 // halfwords 1194 INSN3(stxrh, halfword, 0b000, 0); 1195 INSN3(stlxrh, halfword, 0b000, 1); 1196 INSN2(ldxrh, halfword, 0b010, 0); 1197 INSN2(ldaxrh, halfword, 0b010, 1); 1198 INSN2(stlrh, halfword, 0b100, 1); 1199 INSN2(ldarh, halfword, 0b110, 1); 1200 1201 // words 1202 INSN3(stxrw, word, 0b000, 0); 1203 INSN3(stlxrw, word, 0b000, 1); 1204 INSN4(stxpw, word, 0b001, 0); 1205 INSN4(stlxpw, word, 0b001, 1); 1206 INSN2(ldxrw, word, 0b010, 0); 1207 INSN2(ldaxrw, word, 0b010, 1); 1208 INSN_FOO(ldxpw, word, 0b011, 0); 1209 INSN_FOO(ldaxpw, word, 0b011, 1); 1210 INSN2(stlrw, word, 0b100, 1); 1211 INSN2(ldarw, word, 0b110, 1); 1212 1213 // xwords 1214 INSN3(stxr, xword, 0b000, 0); 1215 INSN3(stlxr, xword, 0b000, 1); 1216 INSN4(stxp, xword, 0b001, 0); 1217 INSN4(stlxp, xword, 0b001, 1); 1218 INSN2(ldxr, xword, 0b010, 0); 1219 INSN2(ldaxr, xword, 0b010, 1); 1220 INSN_FOO(ldxp, xword, 0b011, 0); 1221 INSN_FOO(ldaxp, xword, 0b011, 1); 1222 INSN2(stlr, xword, 0b100, 1); 1223 INSN2(ldar, xword, 0b110, 1); 1224 1225 #undef INSN2 1226 #undef INSN3 1227 #undef INSN4 1228 #undef INSN_FOO 1229 1230 // 8.1 Compare and swap extensions 1231 void lse_cas(Register Rs, Register Rt, Register Rn, 1232 enum operand_size sz, bool a, bool r, bool not_pair) { 1233 starti; 1234 if (! not_pair) { // Pair 1235 assert(sz == word || sz == xword, "invalid size"); 1236 /* The size bit is in bit 30, not 31 */ 1237 sz = (operand_size)(sz == word ? 0b00:0b01); 1238 } 1239 f(sz, 31, 30), f(0b001000, 29, 24), f(not_pair ? 1 : 0, 23), f(a, 22), f(1, 21); 1240 zrf(Rs, 16), f(r, 15), f(0b11111, 14, 10), srf(Rn, 5), zrf(Rt, 0); 1241 } 1242 1243 // CAS 1244 #define INSN(NAME, a, r) \ 1245 void NAME(operand_size sz, Register Rs, Register Rt, Register Rn) { \ 1246 assert(Rs != Rn && Rs != Rt, "unpredictable instruction"); \ 1247 lse_cas(Rs, Rt, Rn, sz, a, r, true); \ 1248 } 1249 INSN(cas, false, false) 1250 INSN(casa, true, false) 1251 INSN(casl, false, true) 1252 INSN(casal, true, true) 1253 #undef INSN 1254 1255 // CASP 1256 #define INSN(NAME, a, r) \ 1257 void NAME(operand_size sz, Register Rs, Register Rs1, \ 1258 Register Rt, Register Rt1, Register Rn) { \ 1259 assert((Rs->encoding() & 1) == 0 && (Rt->encoding() & 1) == 0 && \ 1260 Rs->successor() == Rs1 && Rt->successor() == Rt1 && \ 1261 Rs != Rn && Rs1 != Rn && Rs != Rt, "invalid registers"); \ 1262 lse_cas(Rs, Rt, Rn, sz, a, r, false); \ 1263 } 1264 INSN(casp, false, false) 1265 INSN(caspa, true, false) 1266 INSN(caspl, false, true) 1267 INSN(caspal, true, true) 1268 #undef INSN 1269 1270 // 8.1 Atomic operations 1271 void lse_atomic(Register Rs, Register Rt, Register Rn, 1272 enum operand_size sz, int op1, int op2, bool a, bool r) { 1273 starti; 1274 f(sz, 31, 30), f(0b111000, 29, 24), f(a, 23), f(r, 22), f(1, 21); 1275 zrf(Rs, 16), f(op1, 15), f(op2, 14, 12), f(0, 11, 10), srf(Rn, 5), zrf(Rt, 0); 1276 } 1277 1278 #define INSN(NAME, NAME_A, NAME_L, NAME_AL, op1, op2) \ 1279 void NAME(operand_size sz, Register Rs, Register Rt, Register Rn) { \ 1280 lse_atomic(Rs, Rt, Rn, sz, op1, op2, false, false); \ 1281 } \ 1282 void NAME_A(operand_size sz, Register Rs, Register Rt, Register Rn) { \ 1283 lse_atomic(Rs, Rt, Rn, sz, op1, op2, true, false); \ 1284 } \ 1285 void NAME_L(operand_size sz, Register Rs, Register Rt, Register Rn) { \ 1286 lse_atomic(Rs, Rt, Rn, sz, op1, op2, false, true); \ 1287 } \ 1288 void NAME_AL(operand_size sz, Register Rs, Register Rt, Register Rn) {\ 1289 lse_atomic(Rs, Rt, Rn, sz, op1, op2, true, true); \ 1290 } 1291 INSN(ldadd, ldadda, ldaddl, ldaddal, 0, 0b000); 1292 INSN(ldbic, ldbica, ldbicl, ldbical, 0, 0b001); 1293 INSN(ldeor, ldeora, ldeorl, ldeoral, 0, 0b010); 1294 INSN(ldorr, ldorra, ldorrl, ldorral, 0, 0b011); 1295 INSN(ldsmax, ldsmaxa, ldsmaxl, ldsmaxal, 0, 0b100); 1296 INSN(ldsmin, ldsmina, ldsminl, ldsminal, 0, 0b101); 1297 INSN(ldumax, ldumaxa, ldumaxl, ldumaxal, 0, 0b110); 1298 INSN(ldumin, ldumina, lduminl, lduminal, 0, 0b111); 1299 INSN(swp, swpa, swpl, swpal, 1, 0b000); 1300 #undef INSN 1301 1302 // Load register (literal) 1303 #define INSN(NAME, opc, V) \ 1304 void NAME(Register Rt, address dest) { \ 1305 int64_t offset = (dest - pc()) >> 2; \ 1306 starti; \ 1307 f(opc, 31, 30), f(0b011, 29, 27), f(V, 26), f(0b00, 25, 24), \ 1308 sf(offset, 23, 5); \ 1309 rf(Rt, 0); \ 1310 } \ 1311 void NAME(Register Rt, address dest, relocInfo::relocType rtype) { \ 1312 InstructionMark im(this); \ 1313 guarantee(rtype == relocInfo::internal_word_type, \ 1314 "only internal_word_type relocs make sense here"); \ 1315 code_section()->relocate(inst_mark(), InternalAddress(dest).rspec()); \ 1316 NAME(Rt, dest); \ 1317 } \ 1318 void NAME(Register Rt, Label &L) { \ 1319 wrap_label(Rt, L, &Assembler::NAME); \ 1320 } 1321 1322 INSN(ldrw, 0b00, 0); 1323 INSN(ldr, 0b01, 0); 1324 INSN(ldrsw, 0b10, 0); 1325 1326 #undef INSN 1327 1328 #define INSN(NAME, opc, V) \ 1329 void NAME(FloatRegister Rt, address dest) { \ 1330 int64_t offset = (dest - pc()) >> 2; \ 1331 starti; \ 1332 f(opc, 31, 30), f(0b011, 29, 27), f(V, 26), f(0b00, 25, 24), \ 1333 sf(offset, 23, 5); \ 1334 rf((Register)Rt, 0); \ 1335 } 1336 1337 INSN(ldrs, 0b00, 1); 1338 INSN(ldrd, 0b01, 1); 1339 INSN(ldrq, 0b10, 1); 1340 1341 #undef INSN 1342 1343 #define INSN(NAME, opc, V) \ 1344 void NAME(address dest, prfop op = PLDL1KEEP) { \ 1345 int64_t offset = (dest - pc()) >> 2; \ 1346 starti; \ 1347 f(opc, 31, 30), f(0b011, 29, 27), f(V, 26), f(0b00, 25, 24), \ 1348 sf(offset, 23, 5); \ 1349 f(op, 4, 0); \ 1350 } \ 1351 void NAME(Label &L, prfop op = PLDL1KEEP) { \ 1352 wrap_label(L, op, &Assembler::NAME); \ 1353 } 1354 1355 INSN(prfm, 0b11, 0); 1356 1357 #undef INSN 1358 1359 // Load/store 1360 void ld_st1(int opc, int p1, int V, int L, 1361 Register Rt1, Register Rt2, Address adr, bool no_allocate) { 1362 starti; 1363 f(opc, 31, 30), f(p1, 29, 27), f(V, 26), f(L, 22); 1364 zrf(Rt2, 10), zrf(Rt1, 0); 1365 if (no_allocate) { 1366 adr.encode_nontemporal_pair(current); 1367 } else { 1368 adr.encode_pair(current); 1369 } 1370 } 1371 1372 // Load/store register pair (offset) 1373 #define INSN(NAME, size, p1, V, L, no_allocate) \ 1374 void NAME(Register Rt1, Register Rt2, Address adr) { \ 1375 ld_st1(size, p1, V, L, Rt1, Rt2, adr, no_allocate); \ 1376 } 1377 1378 INSN(stpw, 0b00, 0b101, 0, 0, false); 1379 INSN(ldpw, 0b00, 0b101, 0, 1, false); 1380 INSN(ldpsw, 0b01, 0b101, 0, 1, false); 1381 INSN(stp, 0b10, 0b101, 0, 0, false); 1382 INSN(ldp, 0b10, 0b101, 0, 1, false); 1383 1384 // Load/store no-allocate pair (offset) 1385 INSN(stnpw, 0b00, 0b101, 0, 0, true); 1386 INSN(ldnpw, 0b00, 0b101, 0, 1, true); 1387 INSN(stnp, 0b10, 0b101, 0, 0, true); 1388 INSN(ldnp, 0b10, 0b101, 0, 1, true); 1389 1390 #undef INSN 1391 1392 #define INSN(NAME, size, p1, V, L, no_allocate) \ 1393 void NAME(FloatRegister Rt1, FloatRegister Rt2, Address adr) { \ 1394 ld_st1(size, p1, V, L, (Register)Rt1, (Register)Rt2, adr, no_allocate); \ 1395 } 1396 1397 INSN(stps, 0b00, 0b101, 1, 0, false); 1398 INSN(ldps, 0b00, 0b101, 1, 1, false); 1399 INSN(stpd, 0b01, 0b101, 1, 0, false); 1400 INSN(ldpd, 0b01, 0b101, 1, 1, false); 1401 INSN(stpq, 0b10, 0b101, 1, 0, false); 1402 INSN(ldpq, 0b10, 0b101, 1, 1, false); 1403 1404 #undef INSN 1405 1406 // Load/store register (all modes) 1407 void ld_st2(Register Rt, const Address &adr, int size, int op, int V = 0) { 1408 starti; 1409 1410 f(V, 26); // general reg? 1411 zrf(Rt, 0); 1412 1413 // Encoding for literal loads is done here (rather than pushed 1414 // down into Address::encode) because the encoding of this 1415 // instruction is too different from all of the other forms to 1416 // make it worth sharing. 1417 if (adr.getMode() == Address::literal) { 1418 assert(size == 0b10 || size == 0b11, "bad operand size in ldr"); 1419 assert(op == 0b01, "literal form can only be used with loads"); 1420 f(size & 0b01, 31, 30), f(0b011, 29, 27), f(0b00, 25, 24); 1421 int64_t offset = (adr.target() - pc()) >> 2; 1422 sf(offset, 23, 5); 1423 code_section()->relocate(pc(), adr.rspec()); 1424 return; 1425 } 1426 1427 f(size, 31, 30); 1428 f(op, 23, 22); // str 1429 adr.encode(current); 1430 } 1431 1432 #define INSN(NAME, size, op) \ 1433 void NAME(Register Rt, const Address &adr) { \ 1434 ld_st2(Rt, adr, size, op); \ 1435 } \ 1436 1437 INSN(str, 0b11, 0b00); 1438 INSN(strw, 0b10, 0b00); 1439 INSN(strb, 0b00, 0b00); 1440 INSN(strh, 0b01, 0b00); 1441 1442 INSN(ldr, 0b11, 0b01); 1443 INSN(ldrw, 0b10, 0b01); 1444 INSN(ldrb, 0b00, 0b01); 1445 INSN(ldrh, 0b01, 0b01); 1446 1447 INSN(ldrsb, 0b00, 0b10); 1448 INSN(ldrsbw, 0b00, 0b11); 1449 INSN(ldrsh, 0b01, 0b10); 1450 INSN(ldrshw, 0b01, 0b11); 1451 INSN(ldrsw, 0b10, 0b10); 1452 1453 #undef INSN 1454 1455 #define INSN(NAME, size, op) \ 1456 void NAME(const Address &adr, prfop pfop = PLDL1KEEP) { \ 1457 ld_st2((Register)pfop, adr, size, op); \ 1458 } 1459 1460 INSN(prfm, 0b11, 0b10); // FIXME: PRFM should not be used with 1461 // writeback modes, but the assembler 1462 // doesn't enfore that. 1463 1464 #undef INSN 1465 1466 #define INSN(NAME, size, op) \ 1467 void NAME(FloatRegister Rt, const Address &adr) { \ 1468 ld_st2((Register)Rt, adr, size, op, 1); \ 1469 } 1470 1471 INSN(strd, 0b11, 0b00); 1472 INSN(strs, 0b10, 0b00); 1473 INSN(ldrd, 0b11, 0b01); 1474 INSN(ldrs, 0b10, 0b01); 1475 INSN(strq, 0b00, 0b10); 1476 INSN(ldrq, 0x00, 0b11); 1477 1478 #undef INSN 1479 1480 enum shift_kind { LSL, LSR, ASR, ROR }; 1481 1482 void op_shifted_reg(unsigned decode, 1483 enum shift_kind kind, unsigned shift, 1484 unsigned size, unsigned op) { 1485 f(size, 31); 1486 f(op, 30, 29); 1487 f(decode, 28, 24); 1488 f(shift, 15, 10); 1489 f(kind, 23, 22); 1490 } 1491 1492 // Logical (shifted register) 1493 #define INSN(NAME, size, op, N) \ 1494 void NAME(Register Rd, Register Rn, Register Rm, \ 1495 enum shift_kind kind = LSL, unsigned shift = 0) { \ 1496 starti; \ 1497 guarantee(size == 1 || shift < 32, "incorrect shift"); \ 1498 f(N, 21); \ 1499 zrf(Rm, 16), zrf(Rn, 5), zrf(Rd, 0); \ 1500 op_shifted_reg(0b01010, kind, shift, size, op); \ 1501 } 1502 1503 INSN(andr, 1, 0b00, 0); 1504 INSN(orr, 1, 0b01, 0); 1505 INSN(eor, 1, 0b10, 0); 1506 INSN(ands, 1, 0b11, 0); 1507 INSN(andw, 0, 0b00, 0); 1508 INSN(orrw, 0, 0b01, 0); 1509 INSN(eorw, 0, 0b10, 0); 1510 INSN(andsw, 0, 0b11, 0); 1511 1512 #undef INSN 1513 1514 #define INSN(NAME, size, op, N) \ 1515 void NAME(Register Rd, Register Rn, Register Rm, \ 1516 enum shift_kind kind = LSL, unsigned shift = 0) { \ 1517 starti; \ 1518 f(N, 21); \ 1519 zrf(Rm, 16), zrf(Rn, 5), zrf(Rd, 0); \ 1520 op_shifted_reg(0b01010, kind, shift, size, op); \ 1521 } \ 1522 \ 1523 /* These instructions have no immediate form. Provide an overload so \ 1524 that if anyone does try to use an immediate operand -- this has \ 1525 happened! -- we'll get a compile-time error. */ \ 1526 void NAME(Register Rd, Register Rn, unsigned imm, \ 1527 enum shift_kind kind = LSL, unsigned shift = 0) { \ 1528 assert(false, " can't be used with immediate operand"); \ 1529 } 1530 1531 INSN(bic, 1, 0b00, 1); 1532 INSN(orn, 1, 0b01, 1); 1533 INSN(eon, 1, 0b10, 1); 1534 INSN(bics, 1, 0b11, 1); 1535 INSN(bicw, 0, 0b00, 1); 1536 INSN(ornw, 0, 0b01, 1); 1537 INSN(eonw, 0, 0b10, 1); 1538 INSN(bicsw, 0, 0b11, 1); 1539 1540 #undef INSN 1541 1542 #ifdef _WIN64 1543 // In MSVC, `mvn` is defined as a macro and it affects compilation 1544 #undef mvn 1545 #endif 1546 1547 // Aliases for short forms of orn 1548 void mvn(Register Rd, Register Rm, 1549 enum shift_kind kind = LSL, unsigned shift = 0) { 1550 orn(Rd, zr, Rm, kind, shift); 1551 } 1552 1553 void mvnw(Register Rd, Register Rm, 1554 enum shift_kind kind = LSL, unsigned shift = 0) { 1555 ornw(Rd, zr, Rm, kind, shift); 1556 } 1557 1558 // Add/subtract (shifted register) 1559 #define INSN(NAME, size, op) \ 1560 void NAME(Register Rd, Register Rn, Register Rm, \ 1561 enum shift_kind kind, unsigned shift = 0) { \ 1562 starti; \ 1563 f(0, 21); \ 1564 assert_cond(kind != ROR); \ 1565 guarantee(size == 1 || shift < 32, "incorrect shift");\ 1566 zrf(Rd, 0), zrf(Rn, 5), zrf(Rm, 16); \ 1567 op_shifted_reg(0b01011, kind, shift, size, op); \ 1568 } 1569 1570 INSN(add, 1, 0b000); 1571 INSN(sub, 1, 0b10); 1572 INSN(addw, 0, 0b000); 1573 INSN(subw, 0, 0b10); 1574 1575 INSN(adds, 1, 0b001); 1576 INSN(subs, 1, 0b11); 1577 INSN(addsw, 0, 0b001); 1578 INSN(subsw, 0, 0b11); 1579 1580 #undef INSN 1581 1582 // Add/subtract (extended register) 1583 #define INSN(NAME, op) \ 1584 void NAME(Register Rd, Register Rn, Register Rm, \ 1585 ext::operation option, int amount = 0) { \ 1586 starti; \ 1587 zrf(Rm, 16), srf(Rn, 5), srf(Rd, 0); \ 1588 add_sub_extended_reg(op, 0b01011, Rd, Rn, Rm, 0b00, option, amount); \ 1589 } 1590 1591 void add_sub_extended_reg(unsigned op, unsigned decode, 1592 Register Rd, Register Rn, Register Rm, 1593 unsigned opt, ext::operation option, unsigned imm) { 1594 guarantee(imm <= 4, "shift amount must be <= 4"); 1595 f(op, 31, 29), f(decode, 28, 24), f(opt, 23, 22), f(1, 21); 1596 f(option, 15, 13), f(imm, 12, 10); 1597 } 1598 1599 INSN(addw, 0b000); 1600 INSN(subw, 0b010); 1601 INSN(add, 0b100); 1602 INSN(sub, 0b110); 1603 1604 #undef INSN 1605 1606 #define INSN(NAME, op) \ 1607 void NAME(Register Rd, Register Rn, Register Rm, \ 1608 ext::operation option, int amount = 0) { \ 1609 starti; \ 1610 zrf(Rm, 16), srf(Rn, 5), zrf(Rd, 0); \ 1611 add_sub_extended_reg(op, 0b01011, Rd, Rn, Rm, 0b00, option, amount); \ 1612 } 1613 1614 INSN(addsw, 0b001); 1615 INSN(subsw, 0b011); 1616 INSN(adds, 0b101); 1617 INSN(subs, 0b111); 1618 1619 #undef INSN 1620 1621 // Aliases for short forms of add and sub 1622 #define INSN(NAME) \ 1623 void NAME(Register Rd, Register Rn, Register Rm) { \ 1624 if (Rd == sp || Rn == sp) \ 1625 NAME(Rd, Rn, Rm, ext::uxtx); \ 1626 else \ 1627 NAME(Rd, Rn, Rm, LSL); \ 1628 } 1629 1630 INSN(addw); 1631 INSN(subw); 1632 INSN(add); 1633 INSN(sub); 1634 1635 INSN(addsw); 1636 INSN(subsw); 1637 INSN(adds); 1638 INSN(subs); 1639 1640 #undef INSN 1641 1642 // Add/subtract (with carry) 1643 void add_sub_carry(unsigned op, Register Rd, Register Rn, Register Rm) { 1644 starti; 1645 f(op, 31, 29); 1646 f(0b11010000, 28, 21); 1647 f(0b000000, 15, 10); 1648 zrf(Rm, 16), zrf(Rn, 5), zrf(Rd, 0); 1649 } 1650 1651 #define INSN(NAME, op) \ 1652 void NAME(Register Rd, Register Rn, Register Rm) { \ 1653 add_sub_carry(op, Rd, Rn, Rm); \ 1654 } 1655 1656 INSN(adcw, 0b000); 1657 INSN(adcsw, 0b001); 1658 INSN(sbcw, 0b010); 1659 INSN(sbcsw, 0b011); 1660 INSN(adc, 0b100); 1661 INSN(adcs, 0b101); 1662 INSN(sbc,0b110); 1663 INSN(sbcs, 0b111); 1664 1665 #undef INSN 1666 1667 // Conditional compare (both kinds) 1668 void conditional_compare(unsigned op, int o1, int o2, int o3, 1669 Register Rn, unsigned imm5, unsigned nzcv, 1670 unsigned cond) { 1671 starti; 1672 f(op, 31, 29); 1673 f(0b11010010, 28, 21); 1674 f(cond, 15, 12); 1675 f(o1, 11); 1676 f(o2, 10); 1677 f(o3, 4); 1678 f(nzcv, 3, 0); 1679 f(imm5, 20, 16), zrf(Rn, 5); 1680 } 1681 1682 #define INSN(NAME, op) \ 1683 void NAME(Register Rn, Register Rm, int imm, Condition cond) { \ 1684 int regNumber = (Rm == zr ? 31 : (uintptr_t)Rm); \ 1685 conditional_compare(op, 0, 0, 0, Rn, regNumber, imm, cond); \ 1686 } \ 1687 \ 1688 void NAME(Register Rn, int imm5, int imm, Condition cond) { \ 1689 conditional_compare(op, 1, 0, 0, Rn, imm5, imm, cond); \ 1690 } 1691 1692 INSN(ccmnw, 0b001); 1693 INSN(ccmpw, 0b011); 1694 INSN(ccmn, 0b101); 1695 INSN(ccmp, 0b111); 1696 1697 #undef INSN 1698 1699 // Conditional select 1700 void conditional_select(unsigned op, unsigned op2, 1701 Register Rd, Register Rn, Register Rm, 1702 unsigned cond) { 1703 starti; 1704 f(op, 31, 29); 1705 f(0b11010100, 28, 21); 1706 f(cond, 15, 12); 1707 f(op2, 11, 10); 1708 zrf(Rm, 16), zrf(Rn, 5), rf(Rd, 0); 1709 } 1710 1711 #define INSN(NAME, op, op2) \ 1712 void NAME(Register Rd, Register Rn, Register Rm, Condition cond) { \ 1713 conditional_select(op, op2, Rd, Rn, Rm, cond); \ 1714 } 1715 1716 INSN(cselw, 0b000, 0b00); 1717 INSN(csincw, 0b000, 0b01); 1718 INSN(csinvw, 0b010, 0b00); 1719 INSN(csnegw, 0b010, 0b01); 1720 INSN(csel, 0b100, 0b00); 1721 INSN(csinc, 0b100, 0b01); 1722 INSN(csinv, 0b110, 0b00); 1723 INSN(csneg, 0b110, 0b01); 1724 1725 #undef INSN 1726 1727 // Data processing 1728 void data_processing(unsigned op29, unsigned opcode, 1729 Register Rd, Register Rn) { 1730 f(op29, 31, 29), f(0b11010110, 28, 21); 1731 f(opcode, 15, 10); 1732 rf(Rn, 5), rf(Rd, 0); 1733 } 1734 1735 // (1 source) 1736 #define INSN(NAME, op29, opcode2, opcode) \ 1737 void NAME(Register Rd, Register Rn) { \ 1738 starti; \ 1739 f(opcode2, 20, 16); \ 1740 data_processing(op29, opcode, Rd, Rn); \ 1741 } 1742 1743 INSN(rbitw, 0b010, 0b00000, 0b00000); 1744 INSN(rev16w, 0b010, 0b00000, 0b00001); 1745 INSN(revw, 0b010, 0b00000, 0b00010); 1746 INSN(clzw, 0b010, 0b00000, 0b00100); 1747 INSN(clsw, 0b010, 0b00000, 0b00101); 1748 1749 INSN(rbit, 0b110, 0b00000, 0b00000); 1750 INSN(rev16, 0b110, 0b00000, 0b00001); 1751 INSN(rev32, 0b110, 0b00000, 0b00010); 1752 INSN(rev, 0b110, 0b00000, 0b00011); 1753 INSN(clz, 0b110, 0b00000, 0b00100); 1754 INSN(cls, 0b110, 0b00000, 0b00101); 1755 1756 #undef INSN 1757 1758 // (2 sources) 1759 #define INSN(NAME, op29, opcode) \ 1760 void NAME(Register Rd, Register Rn, Register Rm) { \ 1761 starti; \ 1762 rf(Rm, 16); \ 1763 data_processing(op29, opcode, Rd, Rn); \ 1764 } 1765 1766 INSN(udivw, 0b000, 0b000010); 1767 INSN(sdivw, 0b000, 0b000011); 1768 INSN(lslvw, 0b000, 0b001000); 1769 INSN(lsrvw, 0b000, 0b001001); 1770 INSN(asrvw, 0b000, 0b001010); 1771 INSN(rorvw, 0b000, 0b001011); 1772 1773 INSN(udiv, 0b100, 0b000010); 1774 INSN(sdiv, 0b100, 0b000011); 1775 INSN(lslv, 0b100, 0b001000); 1776 INSN(lsrv, 0b100, 0b001001); 1777 INSN(asrv, 0b100, 0b001010); 1778 INSN(rorv, 0b100, 0b001011); 1779 1780 #undef INSN 1781 1782 // (3 sources) 1783 void data_processing(unsigned op54, unsigned op31, unsigned o0, 1784 Register Rd, Register Rn, Register Rm, 1785 Register Ra) { 1786 starti; 1787 f(op54, 31, 29), f(0b11011, 28, 24); 1788 f(op31, 23, 21), f(o0, 15); 1789 zrf(Rm, 16), zrf(Ra, 10), zrf(Rn, 5), zrf(Rd, 0); 1790 } 1791 1792 #define INSN(NAME, op54, op31, o0) \ 1793 void NAME(Register Rd, Register Rn, Register Rm, Register Ra) { \ 1794 data_processing(op54, op31, o0, Rd, Rn, Rm, Ra); \ 1795 } 1796 1797 INSN(maddw, 0b000, 0b000, 0); 1798 INSN(msubw, 0b000, 0b000, 1); 1799 INSN(madd, 0b100, 0b000, 0); 1800 INSN(msub, 0b100, 0b000, 1); 1801 INSN(smaddl, 0b100, 0b001, 0); 1802 INSN(smsubl, 0b100, 0b001, 1); 1803 INSN(umaddl, 0b100, 0b101, 0); 1804 INSN(umsubl, 0b100, 0b101, 1); 1805 1806 #undef INSN 1807 1808 #define INSN(NAME, op54, op31, o0) \ 1809 void NAME(Register Rd, Register Rn, Register Rm) { \ 1810 data_processing(op54, op31, o0, Rd, Rn, Rm, (Register)31); \ 1811 } 1812 1813 INSN(smulh, 0b100, 0b010, 0); 1814 INSN(umulh, 0b100, 0b110, 0); 1815 1816 #undef INSN 1817 1818 // Floating-point data-processing (1 source) 1819 void data_processing(unsigned op31, unsigned type, unsigned opcode, 1820 FloatRegister Vd, FloatRegister Vn) { 1821 starti; 1822 f(op31, 31, 29); 1823 f(0b11110, 28, 24); 1824 f(type, 23, 22), f(1, 21), f(opcode, 20, 15), f(0b10000, 14, 10); 1825 rf(Vn, 5), rf(Vd, 0); 1826 } 1827 1828 #define INSN(NAME, op31, type, opcode) \ 1829 void NAME(FloatRegister Vd, FloatRegister Vn) { \ 1830 data_processing(op31, type, opcode, Vd, Vn); \ 1831 } 1832 1833 private: 1834 INSN(i_fmovs, 0b000, 0b00, 0b000000); 1835 public: 1836 INSN(fabss, 0b000, 0b00, 0b000001); 1837 INSN(fnegs, 0b000, 0b00, 0b000010); 1838 INSN(fsqrts, 0b000, 0b00, 0b000011); 1839 INSN(fcvts, 0b000, 0b00, 0b000101); // Single-precision to double-precision 1840 1841 private: 1842 INSN(i_fmovd, 0b000, 0b01, 0b000000); 1843 public: 1844 INSN(fabsd, 0b000, 0b01, 0b000001); 1845 INSN(fnegd, 0b000, 0b01, 0b000010); 1846 INSN(fsqrtd, 0b000, 0b01, 0b000011); 1847 INSN(fcvtd, 0b000, 0b01, 0b000100); // Double-precision to single-precision 1848 1849 void fmovd(FloatRegister Vd, FloatRegister Vn) { 1850 assert(Vd != Vn, "should be"); 1851 i_fmovd(Vd, Vn); 1852 } 1853 1854 void fmovs(FloatRegister Vd, FloatRegister Vn) { 1855 assert(Vd != Vn, "should be"); 1856 i_fmovs(Vd, Vn); 1857 } 1858 1859 #undef INSN 1860 1861 // Floating-point data-processing (2 source) 1862 void data_processing(unsigned op31, unsigned type, unsigned opcode, 1863 FloatRegister Vd, FloatRegister Vn, FloatRegister Vm) { 1864 starti; 1865 f(op31, 31, 29); 1866 f(0b11110, 28, 24); 1867 f(type, 23, 22), f(1, 21), f(opcode, 15, 12), f(0b10, 11, 10); 1868 rf(Vm, 16), rf(Vn, 5), rf(Vd, 0); 1869 } 1870 1871 #define INSN(NAME, op31, type, opcode) \ 1872 void NAME(FloatRegister Vd, FloatRegister Vn, FloatRegister Vm) { \ 1873 data_processing(op31, type, opcode, Vd, Vn, Vm); \ 1874 } 1875 1876 INSN(fmuls, 0b000, 0b00, 0b0000); 1877 INSN(fdivs, 0b000, 0b00, 0b0001); 1878 INSN(fadds, 0b000, 0b00, 0b0010); 1879 INSN(fsubs, 0b000, 0b00, 0b0011); 1880 INSN(fmaxs, 0b000, 0b00, 0b0100); 1881 INSN(fmins, 0b000, 0b00, 0b0101); 1882 INSN(fnmuls, 0b000, 0b00, 0b1000); 1883 1884 INSN(fmuld, 0b000, 0b01, 0b0000); 1885 INSN(fdivd, 0b000, 0b01, 0b0001); 1886 INSN(faddd, 0b000, 0b01, 0b0010); 1887 INSN(fsubd, 0b000, 0b01, 0b0011); 1888 INSN(fmaxd, 0b000, 0b01, 0b0100); 1889 INSN(fmind, 0b000, 0b01, 0b0101); 1890 INSN(fnmuld, 0b000, 0b01, 0b1000); 1891 1892 #undef INSN 1893 1894 // Floating-point data-processing (3 source) 1895 void data_processing(unsigned op31, unsigned type, unsigned o1, unsigned o0, 1896 FloatRegister Vd, FloatRegister Vn, FloatRegister Vm, 1897 FloatRegister Va) { 1898 starti; 1899 f(op31, 31, 29); 1900 f(0b11111, 28, 24); 1901 f(type, 23, 22), f(o1, 21), f(o0, 15); 1902 rf(Vm, 16), rf(Va, 10), rf(Vn, 5), rf(Vd, 0); 1903 } 1904 1905 #define INSN(NAME, op31, type, o1, o0) \ 1906 void NAME(FloatRegister Vd, FloatRegister Vn, FloatRegister Vm, \ 1907 FloatRegister Va) { \ 1908 data_processing(op31, type, o1, o0, Vd, Vn, Vm, Va); \ 1909 } 1910 1911 INSN(fmadds, 0b000, 0b00, 0, 0); 1912 INSN(fmsubs, 0b000, 0b00, 0, 1); 1913 INSN(fnmadds, 0b000, 0b00, 1, 0); 1914 INSN(fnmsubs, 0b000, 0b00, 1, 1); 1915 1916 INSN(fmaddd, 0b000, 0b01, 0, 0); 1917 INSN(fmsubd, 0b000, 0b01, 0, 1); 1918 INSN(fnmaddd, 0b000, 0b01, 1, 0); 1919 INSN(fnmsub, 0b000, 0b01, 1, 1); 1920 1921 #undef INSN 1922 1923 // Floating-point conditional select 1924 void fp_conditional_select(unsigned op31, unsigned type, 1925 unsigned op1, unsigned op2, 1926 Condition cond, FloatRegister Vd, 1927 FloatRegister Vn, FloatRegister Vm) { 1928 starti; 1929 f(op31, 31, 29); 1930 f(0b11110, 28, 24); 1931 f(type, 23, 22); 1932 f(op1, 21, 21); 1933 f(op2, 11, 10); 1934 f(cond, 15, 12); 1935 rf(Vm, 16), rf(Vn, 5), rf(Vd, 0); 1936 } 1937 1938 #define INSN(NAME, op31, type, op1, op2) \ 1939 void NAME(FloatRegister Vd, FloatRegister Vn, \ 1940 FloatRegister Vm, Condition cond) { \ 1941 fp_conditional_select(op31, type, op1, op2, cond, Vd, Vn, Vm); \ 1942 } 1943 1944 INSN(fcsels, 0b000, 0b00, 0b1, 0b11); 1945 INSN(fcseld, 0b000, 0b01, 0b1, 0b11); 1946 1947 #undef INSN 1948 1949 // Floating-point<->integer conversions 1950 void float_int_convert(unsigned op31, unsigned type, 1951 unsigned rmode, unsigned opcode, 1952 Register Rd, Register Rn) { 1953 starti; 1954 f(op31, 31, 29); 1955 f(0b11110, 28, 24); 1956 f(type, 23, 22), f(1, 21), f(rmode, 20, 19); 1957 f(opcode, 18, 16), f(0b000000, 15, 10); 1958 zrf(Rn, 5), zrf(Rd, 0); 1959 } 1960 1961 #define INSN(NAME, op31, type, rmode, opcode) \ 1962 void NAME(Register Rd, FloatRegister Vn) { \ 1963 float_int_convert(op31, type, rmode, opcode, Rd, (Register)Vn); \ 1964 } 1965 1966 INSN(fcvtzsw, 0b000, 0b00, 0b11, 0b000); 1967 INSN(fcvtzs, 0b100, 0b00, 0b11, 0b000); 1968 INSN(fcvtzdw, 0b000, 0b01, 0b11, 0b000); 1969 INSN(fcvtzd, 0b100, 0b01, 0b11, 0b000); 1970 1971 INSN(fmovs, 0b000, 0b00, 0b00, 0b110); 1972 INSN(fmovd, 0b100, 0b01, 0b00, 0b110); 1973 1974 // INSN(fmovhid, 0b100, 0b10, 0b01, 0b110); 1975 1976 #undef INSN 1977 1978 #define INSN(NAME, op31, type, rmode, opcode) \ 1979 void NAME(FloatRegister Vd, Register Rn) { \ 1980 float_int_convert(op31, type, rmode, opcode, (Register)Vd, Rn); \ 1981 } 1982 1983 INSN(fmovs, 0b000, 0b00, 0b00, 0b111); 1984 INSN(fmovd, 0b100, 0b01, 0b00, 0b111); 1985 1986 INSN(scvtfws, 0b000, 0b00, 0b00, 0b010); 1987 INSN(scvtfs, 0b100, 0b00, 0b00, 0b010); 1988 INSN(scvtfwd, 0b000, 0b01, 0b00, 0b010); 1989 INSN(scvtfd, 0b100, 0b01, 0b00, 0b010); 1990 1991 // INSN(fmovhid, 0b100, 0b10, 0b01, 0b111); 1992 1993 #undef INSN 1994 1995 // Floating-point compare 1996 void float_compare(unsigned op31, unsigned type, 1997 unsigned op, unsigned op2, 1998 FloatRegister Vn, FloatRegister Vm = (FloatRegister)0) { 1999 starti; 2000 f(op31, 31, 29); 2001 f(0b11110, 28, 24); 2002 f(type, 23, 22), f(1, 21); 2003 f(op, 15, 14), f(0b1000, 13, 10), f(op2, 4, 0); 2004 rf(Vn, 5), rf(Vm, 16); 2005 } 2006 2007 2008 #define INSN(NAME, op31, type, op, op2) \ 2009 void NAME(FloatRegister Vn, FloatRegister Vm) { \ 2010 float_compare(op31, type, op, op2, Vn, Vm); \ 2011 } 2012 2013 #define INSN1(NAME, op31, type, op, op2) \ 2014 void NAME(FloatRegister Vn, double d) { \ 2015 assert_cond(d == 0.0); \ 2016 float_compare(op31, type, op, op2, Vn); \ 2017 } 2018 2019 INSN(fcmps, 0b000, 0b00, 0b00, 0b00000); 2020 INSN1(fcmps, 0b000, 0b00, 0b00, 0b01000); 2021 // INSN(fcmpes, 0b000, 0b00, 0b00, 0b10000); 2022 // INSN1(fcmpes, 0b000, 0b00, 0b00, 0b11000); 2023 2024 INSN(fcmpd, 0b000, 0b01, 0b00, 0b00000); 2025 INSN1(fcmpd, 0b000, 0b01, 0b00, 0b01000); 2026 // INSN(fcmped, 0b000, 0b01, 0b00, 0b10000); 2027 // INSN1(fcmped, 0b000, 0b01, 0b00, 0b11000); 2028 2029 #undef INSN 2030 #undef INSN1 2031 2032 // Floating-point Move (immediate) 2033 private: 2034 unsigned pack(double value); 2035 2036 void fmov_imm(FloatRegister Vn, double value, unsigned size) { 2037 starti; 2038 f(0b00011110, 31, 24), f(size, 23, 22), f(1, 21); 2039 f(pack(value), 20, 13), f(0b10000000, 12, 5); 2040 rf(Vn, 0); 2041 } 2042 2043 public: 2044 2045 void fmovs(FloatRegister Vn, double value) { 2046 if (value) 2047 fmov_imm(Vn, value, 0b00); 2048 else 2049 fmovs(Vn, zr); 2050 } 2051 void fmovd(FloatRegister Vn, double value) { 2052 if (value) 2053 fmov_imm(Vn, value, 0b01); 2054 else 2055 fmovd(Vn, zr); 2056 } 2057 2058 // Floating-point rounding 2059 // type: half-precision = 11 2060 // single = 00 2061 // double = 01 2062 // rmode: A = Away = 100 2063 // I = current = 111 2064 // M = MinusInf = 010 2065 // N = eveN = 000 2066 // P = PlusInf = 001 2067 // X = eXact = 110 2068 // Z = Zero = 011 2069 void float_round(unsigned type, unsigned rmode, FloatRegister Rd, FloatRegister Rn) { 2070 starti; 2071 f(0b00011110, 31, 24); 2072 f(type, 23, 22); 2073 f(0b1001, 21, 18); 2074 f(rmode, 17, 15); 2075 f(0b10000, 14, 10); 2076 rf(Rn, 5), rf(Rd, 0); 2077 } 2078 #define INSN(NAME, type, rmode) \ 2079 void NAME(FloatRegister Vd, FloatRegister Vn) { \ 2080 float_round(type, rmode, Vd, Vn); \ 2081 } 2082 2083 public: 2084 INSN(frintah, 0b11, 0b100); 2085 INSN(frintih, 0b11, 0b111); 2086 INSN(frintmh, 0b11, 0b010); 2087 INSN(frintnh, 0b11, 0b000); 2088 INSN(frintph, 0b11, 0b001); 2089 INSN(frintxh, 0b11, 0b110); 2090 INSN(frintzh, 0b11, 0b011); 2091 2092 INSN(frintas, 0b00, 0b100); 2093 INSN(frintis, 0b00, 0b111); 2094 INSN(frintms, 0b00, 0b010); 2095 INSN(frintns, 0b00, 0b000); 2096 INSN(frintps, 0b00, 0b001); 2097 INSN(frintxs, 0b00, 0b110); 2098 INSN(frintzs, 0b00, 0b011); 2099 2100 INSN(frintad, 0b01, 0b100); 2101 INSN(frintid, 0b01, 0b111); 2102 INSN(frintmd, 0b01, 0b010); 2103 INSN(frintnd, 0b01, 0b000); 2104 INSN(frintpd, 0b01, 0b001); 2105 INSN(frintxd, 0b01, 0b110); 2106 INSN(frintzd, 0b01, 0b011); 2107 #undef INSN 2108 2109 /* SIMD extensions 2110 * 2111 * We just use FloatRegister in the following. They are exactly the same 2112 * as SIMD registers. 2113 */ 2114 public: 2115 2116 enum SIMD_Arrangement { 2117 T8B, T16B, T4H, T8H, T2S, T4S, T1D, T2D, T1Q 2118 }; 2119 2120 enum SIMD_RegVariant { 2121 B, H, S, D, Q 2122 }; 2123 2124 private: 2125 static short SIMD_Size_in_bytes[]; 2126 2127 public: 2128 #define INSN(NAME, op) \ 2129 void NAME(FloatRegister Rt, SIMD_RegVariant T, const Address &adr) { \ 2130 ld_st2((Register)Rt, adr, (int)T & 3, op + ((T==Q) ? 0b10:0b00), 1); \ 2131 } \ 2132 2133 INSN(ldr, 1); 2134 INSN(str, 0); 2135 2136 #undef INSN 2137 2138 private: 2139 2140 void ld_st(FloatRegister Vt, SIMD_Arrangement T, Register Xn, int op1, int op2) { 2141 starti; 2142 f(0,31), f((int)T & 1, 30); 2143 f(op1, 29, 21), f(0, 20, 16), f(op2, 15, 12); 2144 f((int)T >> 1, 11, 10), srf(Xn, 5), rf(Vt, 0); 2145 } 2146 void ld_st(FloatRegister Vt, SIMD_Arrangement T, Register Xn, 2147 int imm, int op1, int op2, int regs) { 2148 2149 bool replicate = op2 >> 2 == 3; 2150 // post-index value (imm) is formed differently for replicate/non-replicate ld* instructions 2151 int expectedImmediate = replicate ? regs * (1 << (T >> 1)) : SIMD_Size_in_bytes[T] * regs; 2152 guarantee(T < T1Q , "incorrect arrangement"); 2153 guarantee(imm == expectedImmediate, "bad offset"); 2154 starti; 2155 f(0,31), f((int)T & 1, 30); 2156 f(op1 | 0b100, 29, 21), f(0b11111, 20, 16), f(op2, 15, 12); 2157 f((int)T >> 1, 11, 10), srf(Xn, 5), rf(Vt, 0); 2158 } 2159 void ld_st(FloatRegister Vt, SIMD_Arrangement T, Register Xn, 2160 Register Xm, int op1, int op2) { 2161 starti; 2162 f(0,31), f((int)T & 1, 30); 2163 f(op1 | 0b100, 29, 21), rf(Xm, 16), f(op2, 15, 12); 2164 f((int)T >> 1, 11, 10), srf(Xn, 5), rf(Vt, 0); 2165 } 2166 2167 void ld_st(FloatRegister Vt, SIMD_Arrangement T, Address a, int op1, int op2, int regs) { 2168 switch (a.getMode()) { 2169 case Address::base_plus_offset: 2170 guarantee(a.offset() == 0, "no offset allowed here"); 2171 ld_st(Vt, T, a.base(), op1, op2); 2172 break; 2173 case Address::post: 2174 ld_st(Vt, T, a.base(), a.offset(), op1, op2, regs); 2175 break; 2176 case Address::post_reg: 2177 ld_st(Vt, T, a.base(), a.index(), op1, op2); 2178 break; 2179 default: 2180 ShouldNotReachHere(); 2181 } 2182 } 2183 2184 public: 2185 2186 #define INSN1(NAME, op1, op2) \ 2187 void NAME(FloatRegister Vt, SIMD_Arrangement T, const Address &a) { \ 2188 ld_st(Vt, T, a, op1, op2, 1); \ 2189 } 2190 2191 #define INSN2(NAME, op1, op2) \ 2192 void NAME(FloatRegister Vt, FloatRegister Vt2, SIMD_Arrangement T, const Address &a) { \ 2193 assert(Vt->successor() == Vt2, "Registers must be ordered"); \ 2194 ld_st(Vt, T, a, op1, op2, 2); \ 2195 } 2196 2197 #define INSN3(NAME, op1, op2) \ 2198 void NAME(FloatRegister Vt, FloatRegister Vt2, FloatRegister Vt3, \ 2199 SIMD_Arrangement T, const Address &a) { \ 2200 assert(Vt->successor() == Vt2 && Vt2->successor() == Vt3, \ 2201 "Registers must be ordered"); \ 2202 ld_st(Vt, T, a, op1, op2, 3); \ 2203 } 2204 2205 #define INSN4(NAME, op1, op2) \ 2206 void NAME(FloatRegister Vt, FloatRegister Vt2, FloatRegister Vt3, \ 2207 FloatRegister Vt4, SIMD_Arrangement T, const Address &a) { \ 2208 assert(Vt->successor() == Vt2 && Vt2->successor() == Vt3 && \ 2209 Vt3->successor() == Vt4, "Registers must be ordered"); \ 2210 ld_st(Vt, T, a, op1, op2, 4); \ 2211 } 2212 2213 INSN1(ld1, 0b001100010, 0b0111); 2214 INSN2(ld1, 0b001100010, 0b1010); 2215 INSN3(ld1, 0b001100010, 0b0110); 2216 INSN4(ld1, 0b001100010, 0b0010); 2217 2218 INSN2(ld2, 0b001100010, 0b1000); 2219 INSN3(ld3, 0b001100010, 0b0100); 2220 INSN4(ld4, 0b001100010, 0b0000); 2221 2222 INSN1(st1, 0b001100000, 0b0111); 2223 INSN2(st1, 0b001100000, 0b1010); 2224 INSN3(st1, 0b001100000, 0b0110); 2225 INSN4(st1, 0b001100000, 0b0010); 2226 2227 INSN2(st2, 0b001100000, 0b1000); 2228 INSN3(st3, 0b001100000, 0b0100); 2229 INSN4(st4, 0b001100000, 0b0000); 2230 2231 INSN1(ld1r, 0b001101010, 0b1100); 2232 INSN2(ld2r, 0b001101011, 0b1100); 2233 INSN3(ld3r, 0b001101010, 0b1110); 2234 INSN4(ld4r, 0b001101011, 0b1110); 2235 2236 #undef INSN1 2237 #undef INSN2 2238 #undef INSN3 2239 #undef INSN4 2240 2241 #define INSN(NAME, opc) \ 2242 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, FloatRegister Vm) { \ 2243 starti; \ 2244 assert(T == T8B || T == T16B, "must be T8B or T16B"); \ 2245 f(0, 31), f((int)T & 1, 30), f(opc, 29, 21); \ 2246 rf(Vm, 16), f(0b000111, 15, 10), rf(Vn, 5), rf(Vd, 0); \ 2247 } 2248 2249 INSN(eor, 0b101110001); 2250 INSN(orr, 0b001110101); 2251 INSN(andr, 0b001110001); 2252 INSN(bic, 0b001110011); 2253 INSN(bif, 0b101110111); 2254 INSN(bit, 0b101110101); 2255 INSN(bsl, 0b101110011); 2256 INSN(orn, 0b001110111); 2257 2258 #undef INSN 2259 2260 #define INSN(NAME, opc, opc2, acceptT2D) \ 2261 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, FloatRegister Vm) { \ 2262 guarantee(T != T1Q && T != T1D, "incorrect arrangement"); \ 2263 if (!acceptT2D) guarantee(T != T2D, "incorrect arrangement"); \ 2264 starti; \ 2265 f(0, 31), f((int)T & 1, 30), f(opc, 29), f(0b01110, 28, 24); \ 2266 f((int)T >> 1, 23, 22), f(1, 21), rf(Vm, 16), f(opc2, 15, 10); \ 2267 rf(Vn, 5), rf(Vd, 0); \ 2268 } 2269 2270 INSN(addv, 0, 0b100001, true); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S, T2D 2271 INSN(subv, 1, 0b100001, true); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S, T2D 2272 INSN(mulv, 0, 0b100111, false); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S 2273 INSN(mlav, 0, 0b100101, false); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S 2274 INSN(mlsv, 1, 0b100101, false); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S 2275 INSN(sshl, 0, 0b010001, true); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S, T2D 2276 INSN(ushl, 1, 0b010001, true); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S, T2D 2277 INSN(addpv, 0, 0b101111, true); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S, T2D 2278 INSN(smullv, 0, 0b110000, false); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S 2279 INSN(umullv, 1, 0b110000, false); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S 2280 INSN(umlalv, 1, 0b100000, false); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S 2281 2282 #undef INSN 2283 2284 #define INSN(NAME, opc, opc2, accepted) \ 2285 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn) { \ 2286 guarantee(T != T1Q && T != T1D, "incorrect arrangement"); \ 2287 if (accepted < 3) guarantee(T != T2D, "incorrect arrangement"); \ 2288 if (accepted < 2) guarantee(T != T2S, "incorrect arrangement"); \ 2289 if (accepted < 1) guarantee(T == T8B || T == T16B, "incorrect arrangement"); \ 2290 starti; \ 2291 f(0, 31), f((int)T & 1, 30), f(opc, 29), f(0b01110, 28, 24); \ 2292 f((int)T >> 1, 23, 22), f(opc2, 21, 10); \ 2293 rf(Vn, 5), rf(Vd, 0); \ 2294 } 2295 2296 INSN(absr, 0, 0b100000101110, 3); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S, T2D 2297 INSN(negr, 1, 0b100000101110, 3); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S, T2D 2298 INSN(notr, 1, 0b100000010110, 0); // accepted arrangements: T8B, T16B 2299 INSN(addv, 0, 0b110001101110, 1); // accepted arrangements: T8B, T16B, T4H, T8H, T4S 2300 INSN(cls, 0, 0b100000010010, 2); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S 2301 INSN(clz, 1, 0b100000010010, 2); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S 2302 INSN(cnt, 0, 0b100000010110, 0); // accepted arrangements: T8B, T16B 2303 INSN(uaddlp, 1, 0b100000001010, 2); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S 2304 INSN(uaddlv, 1, 0b110000001110, 1); // accepted arrangements: T8B, T16B, T4H, T8H, T4S 2305 2306 #undef INSN 2307 2308 #define INSN(NAME, opc) \ 2309 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn) { \ 2310 starti; \ 2311 assert(T == T4S, "arrangement must be T4S"); \ 2312 f(0, 31), f((int)T & 1, 30), f(0b101110, 29, 24), f(opc, 23), \ 2313 f(T == T4S ? 0 : 1, 22), f(0b110000111110, 21, 10); rf(Vn, 5), rf(Vd, 0); \ 2314 } 2315 2316 INSN(fmaxv, 0); 2317 INSN(fminv, 1); 2318 2319 #undef INSN 2320 2321 #define INSN(NAME, op0, cmode0) \ 2322 void NAME(FloatRegister Vd, SIMD_Arrangement T, unsigned imm8, unsigned lsl = 0) { \ 2323 unsigned cmode = cmode0; \ 2324 unsigned op = op0; \ 2325 starti; \ 2326 assert(lsl == 0 || \ 2327 ((T == T4H || T == T8H) && lsl == 8) || \ 2328 ((T == T2S || T == T4S) && ((lsl >> 3) < 4) && ((lsl & 7) == 0)), "invalid shift");\ 2329 cmode |= lsl >> 2; \ 2330 if (T == T4H || T == T8H) cmode |= 0b1000; \ 2331 if (!(T == T4H || T == T8H || T == T2S || T == T4S)) { \ 2332 assert(op == 0 && cmode0 == 0, "must be MOVI"); \ 2333 cmode = 0b1110; \ 2334 if (T == T1D || T == T2D) op = 1; \ 2335 } \ 2336 f(0, 31), f((int)T & 1, 30), f(op, 29), f(0b0111100000, 28, 19); \ 2337 f(imm8 >> 5, 18, 16), f(cmode, 15, 12), f(0x01, 11, 10), f(imm8 & 0b11111, 9, 5); \ 2338 rf(Vd, 0); \ 2339 } 2340 2341 INSN(movi, 0, 0); 2342 INSN(orri, 0, 1); 2343 INSN(mvni, 1, 0); 2344 INSN(bici, 1, 1); 2345 2346 #undef INSN 2347 2348 #define INSN(NAME, op1, op2, op3) \ 2349 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, FloatRegister Vm) { \ 2350 starti; \ 2351 assert(T == T2S || T == T4S || T == T2D, "invalid arrangement"); \ 2352 f(0, 31), f((int)T & 1, 30), f(op1, 29), f(0b01110, 28, 24), f(op2, 23); \ 2353 f(T==T2D ? 1:0, 22); f(1, 21), rf(Vm, 16), f(op3, 15, 10), rf(Vn, 5), rf(Vd, 0); \ 2354 } 2355 2356 INSN(fadd, 0, 0, 0b110101); 2357 INSN(fdiv, 1, 0, 0b111111); 2358 INSN(fmul, 1, 0, 0b110111); 2359 INSN(fsub, 0, 1, 0b110101); 2360 INSN(fmla, 0, 0, 0b110011); 2361 INSN(fmls, 0, 1, 0b110011); 2362 INSN(fmax, 0, 0, 0b111101); 2363 INSN(fmin, 0, 1, 0b111101); 2364 2365 #undef INSN 2366 2367 #define INSN(NAME, opc) \ 2368 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, FloatRegister Vm) { \ 2369 starti; \ 2370 assert(T == T4S, "arrangement must be T4S"); \ 2371 f(0b01011110000, 31, 21), rf(Vm, 16), f(opc, 15, 10), rf(Vn, 5), rf(Vd, 0); \ 2372 } 2373 2374 INSN(sha1c, 0b000000); 2375 INSN(sha1m, 0b001000); 2376 INSN(sha1p, 0b000100); 2377 INSN(sha1su0, 0b001100); 2378 INSN(sha256h2, 0b010100); 2379 INSN(sha256h, 0b010000); 2380 INSN(sha256su1, 0b011000); 2381 2382 #undef INSN 2383 2384 #define INSN(NAME, opc) \ 2385 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn) { \ 2386 starti; \ 2387 assert(T == T4S, "arrangement must be T4S"); \ 2388 f(0b0101111000101000, 31, 16), f(opc, 15, 10), rf(Vn, 5), rf(Vd, 0); \ 2389 } 2390 2391 INSN(sha1h, 0b000010); 2392 INSN(sha1su1, 0b000110); 2393 INSN(sha256su0, 0b001010); 2394 2395 #undef INSN 2396 2397 #define INSN(NAME, opc) \ 2398 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, FloatRegister Vm) { \ 2399 starti; \ 2400 assert(T == T2D, "arrangement must be T2D"); \ 2401 f(0b11001110011, 31, 21), rf(Vm, 16), f(opc, 15, 10), rf(Vn, 5), rf(Vd, 0); \ 2402 } 2403 2404 INSN(sha512h, 0b100000); 2405 INSN(sha512h2, 0b100001); 2406 INSN(sha512su1, 0b100010); 2407 2408 #undef INSN 2409 2410 #define INSN(NAME, opc) \ 2411 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn) { \ 2412 starti; \ 2413 assert(T == T2D, "arrangement must be T2D"); \ 2414 f(opc, 31, 10), rf(Vn, 5), rf(Vd, 0); \ 2415 } 2416 2417 INSN(sha512su0, 0b1100111011000000100000); 2418 2419 #undef INSN 2420 2421 #define INSN(NAME, opc) \ 2422 void NAME(FloatRegister Vd, FloatRegister Vn) { \ 2423 starti; \ 2424 f(opc, 31, 10), rf(Vn, 5), rf(Vd, 0); \ 2425 } 2426 2427 INSN(aese, 0b0100111000101000010010); 2428 INSN(aesd, 0b0100111000101000010110); 2429 INSN(aesmc, 0b0100111000101000011010); 2430 INSN(aesimc, 0b0100111000101000011110); 2431 2432 #undef INSN 2433 2434 #define INSN(NAME, op1, op2) \ 2435 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, FloatRegister Vm, int index = 0) { \ 2436 starti; \ 2437 assert(T == T2S || T == T4S || T == T2D, "invalid arrangement"); \ 2438 assert(index >= 0 && ((T == T2D && index <= 1) || (T != T2D && index <= 3)), "invalid index"); \ 2439 f(0, 31), f((int)T & 1, 30), f(op1, 29); f(0b011111, 28, 23); \ 2440 f(T == T2D ? 1 : 0, 22), f(T == T2D ? 0 : index & 1, 21), rf(Vm, 16); \ 2441 f(op2, 15, 12), f(T == T2D ? index : (index >> 1), 11), f(0, 10); \ 2442 rf(Vn, 5), rf(Vd, 0); \ 2443 } 2444 2445 // FMLA/FMLS - Vector - Scalar 2446 INSN(fmlavs, 0, 0b0001); 2447 INSN(fmlsvs, 0, 0b0101); 2448 // FMULX - Vector - Scalar 2449 INSN(fmulxvs, 1, 0b1001); 2450 2451 #undef INSN 2452 2453 // Floating-point Reciprocal Estimate 2454 void frecpe(FloatRegister Vd, FloatRegister Vn, SIMD_RegVariant type) { 2455 assert(type == D || type == S, "Wrong type for frecpe"); 2456 starti; 2457 f(0b010111101, 31, 23); 2458 f(type == D ? 1 : 0, 22); 2459 f(0b100001110110, 21, 10); 2460 rf(Vn, 5), rf(Vd, 0); 2461 } 2462 2463 // (double) {a, b} -> (a + b) 2464 void faddpd(FloatRegister Vd, FloatRegister Vn) { 2465 starti; 2466 f(0b0111111001110000110110, 31, 10); 2467 rf(Vn, 5), rf(Vd, 0); 2468 } 2469 2470 void ins(FloatRegister Vd, SIMD_RegVariant T, FloatRegister Vn, int didx, int sidx) { 2471 starti; 2472 assert(T != Q, "invalid register variant"); 2473 f(0b01101110000, 31, 21), f(((didx<<1)|1)<<(int)T, 20, 16), f(0, 15); 2474 f(sidx<<(int)T, 14, 11), f(1, 10), rf(Vn, 5), rf(Vd, 0); 2475 } 2476 2477 void umov(Register Rd, FloatRegister Vn, SIMD_RegVariant T, int idx) { 2478 starti; 2479 f(0, 31), f(T==D ? 1:0, 30), f(0b001110000, 29, 21); 2480 f(((idx<<1)|1)<<(int)T, 20, 16), f(0b001111, 15, 10); 2481 rf(Vn, 5), rf(Rd, 0); 2482 } 2483 2484 #define INSN(NAME, opc, opc2, isSHR) \ 2485 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, int shift){ \ 2486 starti; \ 2487 /* The encodings for the immh:immb fields (bits 22:16) in *SHR are \ 2488 * 0001 xxx 8B/16B, shift = 16 - UInt(immh:immb) \ 2489 * 001x xxx 4H/8H, shift = 32 - UInt(immh:immb) \ 2490 * 01xx xxx 2S/4S, shift = 64 - UInt(immh:immb) \ 2491 * 1xxx xxx 1D/2D, shift = 128 - UInt(immh:immb) \ 2492 * (1D is RESERVED) \ 2493 * for SHL shift is calculated as: \ 2494 * 0001 xxx 8B/16B, shift = UInt(immh:immb) - 8 \ 2495 * 001x xxx 4H/8H, shift = UInt(immh:immb) - 16 \ 2496 * 01xx xxx 2S/4S, shift = UInt(immh:immb) - 32 \ 2497 * 1xxx xxx 1D/2D, shift = UInt(immh:immb) - 64 \ 2498 * (1D is RESERVED) \ 2499 */ \ 2500 assert((1 << ((T>>1)+3)) > shift, "Invalid Shift value"); \ 2501 int cVal = (1 << (((T >> 1) + 3) + (isSHR ? 1 : 0))); \ 2502 int encodedShift = isSHR ? cVal - shift : cVal + shift; \ 2503 f(0, 31), f(T & 1, 30), f(opc, 29), f(0b011110, 28, 23), \ 2504 f(encodedShift, 22, 16); f(opc2, 15, 10), rf(Vn, 5), rf(Vd, 0); \ 2505 } 2506 2507 INSN(shl, 0, 0b010101, /* isSHR = */ false); 2508 INSN(sshr, 0, 0b000001, /* isSHR = */ true); 2509 INSN(ushr, 1, 0b000001, /* isSHR = */ true); 2510 2511 #undef INSN 2512 2513 private: 2514 void _ushll(FloatRegister Vd, SIMD_Arrangement Ta, FloatRegister Vn, SIMD_Arrangement Tb, int shift) { 2515 starti; 2516 /* The encodings for the immh:immb fields (bits 22:16) are 2517 * 0001 xxx 8H, 8B/16b shift = xxx 2518 * 001x xxx 4S, 4H/8H shift = xxxx 2519 * 01xx xxx 2D, 2S/4S shift = xxxxx 2520 * 1xxx xxx RESERVED 2521 */ 2522 assert((Tb >> 1) + 1 == (Ta >> 1), "Incompatible arrangement"); 2523 assert((1 << ((Tb>>1)+3)) > shift, "Invalid shift value"); 2524 f(0, 31), f(Tb & 1, 30), f(0b1011110, 29, 23), f((1 << ((Tb>>1)+3))|shift, 22, 16); 2525 f(0b101001, 15, 10), rf(Vn, 5), rf(Vd, 0); 2526 } 2527 2528 public: 2529 void ushll(FloatRegister Vd, SIMD_Arrangement Ta, FloatRegister Vn, SIMD_Arrangement Tb, int shift) { 2530 assert(Tb == T8B || Tb == T4H || Tb == T2S, "invalid arrangement"); 2531 _ushll(Vd, Ta, Vn, Tb, shift); 2532 } 2533 2534 void ushll2(FloatRegister Vd, SIMD_Arrangement Ta, FloatRegister Vn, SIMD_Arrangement Tb, int shift) { 2535 assert(Tb == T16B || Tb == T8H || Tb == T4S, "invalid arrangement"); 2536 _ushll(Vd, Ta, Vn, Tb, shift); 2537 } 2538 2539 // Move from general purpose register 2540 // mov Vd.T[index], Rn 2541 void mov(FloatRegister Vd, SIMD_Arrangement T, int index, Register Xn) { 2542 starti; 2543 f(0b01001110000, 31, 21), f(((1 << (T >> 1)) | (index << ((T >> 1) + 1))), 20, 16); 2544 f(0b000111, 15, 10), zrf(Xn, 5), rf(Vd, 0); 2545 } 2546 2547 // Move to general purpose register 2548 // mov Rd, Vn.T[index] 2549 void mov(Register Xd, FloatRegister Vn, SIMD_Arrangement T, int index) { 2550 guarantee(T >= T2S && T < T1Q, "only D and S arrangements are supported"); 2551 starti; 2552 f(0, 31), f((T >= T1D) ? 1:0, 30), f(0b001110000, 29, 21); 2553 f(((1 << (T >> 1)) | (index << ((T >> 1) + 1))), 20, 16); 2554 f(0b001111, 15, 10), rf(Vn, 5), rf(Xd, 0); 2555 } 2556 2557 private: 2558 void _pmull(FloatRegister Vd, SIMD_Arrangement Ta, FloatRegister Vn, FloatRegister Vm, SIMD_Arrangement Tb) { 2559 starti; 2560 assert((Ta == T1Q && (Tb == T1D || Tb == T2D)) || 2561 (Ta == T8H && (Tb == T8B || Tb == T16B)), "Invalid Size specifier"); 2562 int size = (Ta == T1Q) ? 0b11 : 0b00; 2563 f(0, 31), f(Tb & 1, 30), f(0b001110, 29, 24), f(size, 23, 22); 2564 f(1, 21), rf(Vm, 16), f(0b111000, 15, 10), rf(Vn, 5), rf(Vd, 0); 2565 } 2566 2567 public: 2568 void pmull(FloatRegister Vd, SIMD_Arrangement Ta, FloatRegister Vn, FloatRegister Vm, SIMD_Arrangement Tb) { 2569 assert(Tb == T1D || Tb == T8B, "pmull assumes T1D or T8B as the second size specifier"); 2570 _pmull(Vd, Ta, Vn, Vm, Tb); 2571 } 2572 2573 void pmull2(FloatRegister Vd, SIMD_Arrangement Ta, FloatRegister Vn, FloatRegister Vm, SIMD_Arrangement Tb) { 2574 assert(Tb == T2D || Tb == T16B, "pmull2 assumes T2D or T16B as the second size specifier"); 2575 _pmull(Vd, Ta, Vn, Vm, Tb); 2576 } 2577 2578 void uqxtn(FloatRegister Vd, SIMD_Arrangement Tb, FloatRegister Vn, SIMD_Arrangement Ta) { 2579 starti; 2580 int size_b = (int)Tb >> 1; 2581 int size_a = (int)Ta >> 1; 2582 assert(size_b < 3 && size_b == size_a - 1, "Invalid size specifier"); 2583 f(0, 31), f(Tb & 1, 30), f(0b101110, 29, 24), f(size_b, 23, 22); 2584 f(0b100001010010, 21, 10), rf(Vn, 5), rf(Vd, 0); 2585 } 2586 2587 void dup(FloatRegister Vd, SIMD_Arrangement T, Register Xs) 2588 { 2589 starti; 2590 assert(T != T1D, "reserved encoding"); 2591 f(0,31), f((int)T & 1, 30), f(0b001110000, 29, 21); 2592 f((1 << (T >> 1)), 20, 16), f(0b000011, 15, 10), zrf(Xs, 5), rf(Vd, 0); 2593 } 2594 2595 void dup(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, int index = 0) 2596 { 2597 starti; 2598 assert(T != T1D, "reserved encoding"); 2599 f(0, 31), f((int)T & 1, 30), f(0b001110000, 29, 21); 2600 f(((1 << (T >> 1)) | (index << ((T >> 1) + 1))), 20, 16); 2601 f(0b000001, 15, 10), rf(Vn, 5), rf(Vd, 0); 2602 } 2603 2604 // AdvSIMD ZIP/UZP/TRN 2605 #define INSN(NAME, opcode) \ 2606 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, FloatRegister Vm) { \ 2607 guarantee(T != T1D && T != T1Q, "invalid arrangement"); \ 2608 starti; \ 2609 f(0, 31), f(0b001110, 29, 24), f(0, 21), f(0, 15); \ 2610 f(opcode, 14, 12), f(0b10, 11, 10); \ 2611 rf(Vm, 16), rf(Vn, 5), rf(Vd, 0); \ 2612 f(T & 1, 30), f(T >> 1, 23, 22); \ 2613 } 2614 2615 INSN(uzp1, 0b001); 2616 INSN(trn1, 0b010); 2617 INSN(zip1, 0b011); 2618 INSN(uzp2, 0b101); 2619 INSN(trn2, 0b110); 2620 INSN(zip2, 0b111); 2621 2622 #undef INSN 2623 2624 // CRC32 instructions 2625 #define INSN(NAME, c, sf, sz) \ 2626 void NAME(Register Rd, Register Rn, Register Rm) { \ 2627 starti; \ 2628 f(sf, 31), f(0b0011010110, 30, 21), f(0b010, 15, 13), f(c, 12); \ 2629 f(sz, 11, 10), rf(Rm, 16), rf(Rn, 5), rf(Rd, 0); \ 2630 } 2631 2632 INSN(crc32b, 0, 0, 0b00); 2633 INSN(crc32h, 0, 0, 0b01); 2634 INSN(crc32w, 0, 0, 0b10); 2635 INSN(crc32x, 0, 1, 0b11); 2636 INSN(crc32cb, 1, 0, 0b00); 2637 INSN(crc32ch, 1, 0, 0b01); 2638 INSN(crc32cw, 1, 0, 0b10); 2639 INSN(crc32cx, 1, 1, 0b11); 2640 2641 #undef INSN 2642 2643 // Table vector lookup 2644 #define INSN(NAME, op) \ 2645 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, unsigned registers, FloatRegister Vm) { \ 2646 starti; \ 2647 assert(T == T8B || T == T16B, "invalid arrangement"); \ 2648 assert(0 < registers && registers <= 4, "invalid number of registers"); \ 2649 f(0, 31), f((int)T & 1, 30), f(0b001110000, 29, 21), rf(Vm, 16), f(0, 15); \ 2650 f(registers - 1, 14, 13), f(op, 12),f(0b00, 11, 10), rf(Vn, 5), rf(Vd, 0); \ 2651 } 2652 2653 INSN(tbl, 0); 2654 INSN(tbx, 1); 2655 2656 #undef INSN 2657 2658 // AdvSIMD two-reg misc 2659 // In this instruction group, the 2 bits in the size field ([23:22]) may be 2660 // fixed or determined by the "SIMD_Arrangement T", or both. The additional 2661 // parameter "tmask" is a 2-bit mask used to indicate which bits in the size 2662 // field are determined by the SIMD_Arrangement. The bit of "tmask" should be 2663 // set to 1 if corresponding bit marked as "x" in the ArmARM. 2664 #define INSN(NAME, U, size, tmask, opcode) \ 2665 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn) { \ 2666 starti; \ 2667 assert((ASSERTION), MSG); \ 2668 f(0, 31), f((int)T & 1, 30), f(U, 29), f(0b01110, 28, 24); \ 2669 f(size | ((int)(T >> 1) & tmask), 23, 22), f(0b10000, 21, 17); \ 2670 f(opcode, 16, 12), f(0b10, 11, 10), rf(Vn, 5), rf(Vd, 0); \ 2671 } 2672 2673 #define MSG "invalid arrangement" 2674 2675 #define ASSERTION (T == T2S || T == T4S || T == T2D) 2676 INSN(fsqrt, 1, 0b10, 0b01, 0b11111); 2677 INSN(fabs, 0, 0b10, 0b01, 0b01111); 2678 INSN(fneg, 1, 0b10, 0b01, 0b01111); 2679 INSN(frintn, 0, 0b00, 0b01, 0b11000); 2680 INSN(frintm, 0, 0b00, 0b01, 0b11001); 2681 INSN(frintp, 0, 0b10, 0b01, 0b11000); 2682 #undef ASSERTION 2683 2684 #define ASSERTION (T == T8B || T == T16B || T == T4H || T == T8H || T == T2S || T == T4S) 2685 INSN(rev64, 0, 0b00, 0b11, 0b00000); 2686 #undef ASSERTION 2687 2688 #define ASSERTION (T == T8B || T == T16B || T == T4H || T == T8H) 2689 INSN(rev32, 1, 0b00, 0b11, 0b00000); 2690 #undef ASSERTION 2691 2692 #define ASSERTION (T == T8B || T == T16B) 2693 INSN(rev16, 0, 0b00, 0b11, 0b00001); 2694 INSN(rbit, 1, 0b01, 0b00, 0b00101); 2695 #undef ASSERTION 2696 2697 #undef MSG 2698 2699 #undef INSN 2700 2701 void ext(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, FloatRegister Vm, int index) 2702 { 2703 starti; 2704 assert(T == T8B || T == T16B, "invalid arrangement"); 2705 assert((T == T8B && index <= 0b0111) || (T == T16B && index <= 0b1111), "Invalid index value"); 2706 f(0, 31), f((int)T & 1, 30), f(0b101110000, 29, 21); 2707 rf(Vm, 16), f(0, 15), f(index, 14, 11); 2708 f(0, 10), rf(Vn, 5), rf(Vd, 0); 2709 } 2710 2711 Assembler(CodeBuffer* code) : AbstractAssembler(code) { 2712 } 2713 2714 virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr, 2715 Register tmp, 2716 int offset) { 2717 ShouldNotCallThis(); 2718 return RegisterOrConstant(); 2719 } 2720 2721 // Stack overflow checking 2722 virtual void bang_stack_with_offset(int offset); 2723 2724 static bool operand_valid_for_logical_immediate(bool is32, uint64_t imm); 2725 static bool operand_valid_for_add_sub_immediate(int64_t imm); 2726 static bool operand_valid_for_float_immediate(double imm); 2727 2728 void emit_data64(jlong data, relocInfo::relocType rtype, int format = 0); 2729 void emit_data64(jlong data, RelocationHolder const& rspec, int format = 0); 2730 }; 2731 2732 inline Assembler::Membar_mask_bits operator|(Assembler::Membar_mask_bits a, 2733 Assembler::Membar_mask_bits b) { 2734 return Assembler::Membar_mask_bits(unsigned(a)|unsigned(b)); 2735 } 2736 2737 Instruction_aarch64::~Instruction_aarch64() { 2738 assem->emit(); 2739 } 2740 2741 #undef starti 2742 2743 // Invert a condition 2744 inline const Assembler::Condition operator~(const Assembler::Condition cond) { 2745 return Assembler::Condition(int(cond) ^ 1); 2746 } 2747 2748 class BiasedLockingCounters; 2749 2750 extern "C" void das(uint64_t start, int len); 2751 2752 #endif // CPU_AARCH64_ASSEMBLER_AARCH64_HPP