1 /* 2 * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #ifndef CPU_AARCH64_ASSEMBLER_AARCH64_HPP 27 #define CPU_AARCH64_ASSEMBLER_AARCH64_HPP 28 29 #include "asm/register.hpp" 30 31 // definitions of various symbolic names for machine registers 32 33 // First intercalls between C and Java which use 8 general registers 34 // and 8 floating registers 35 36 // we also have to copy between x86 and ARM registers but that's a 37 // secondary complication -- not all code employing C call convention 38 // executes as x86 code though -- we generate some of it 39 40 class Argument { 41 public: 42 enum { 43 n_int_register_parameters_c = 8, // r0, r1, ... r7 (c_rarg0, c_rarg1, ...) 44 n_float_register_parameters_c = 8, // v0, v1, ... v7 (c_farg0, c_farg1, ... ) 45 46 n_int_register_parameters_j = 8, // r1, ... r7, r0 (rj_rarg0, j_rarg1, ... 47 n_float_register_parameters_j = 8 // v0, v1, ... v7 (j_farg0, j_farg1, ... 48 }; 49 }; 50 51 REGISTER_DECLARATION(Register, c_rarg0, r0); 52 REGISTER_DECLARATION(Register, c_rarg1, r1); 53 REGISTER_DECLARATION(Register, c_rarg2, r2); 54 REGISTER_DECLARATION(Register, c_rarg3, r3); 55 REGISTER_DECLARATION(Register, c_rarg4, r4); 56 REGISTER_DECLARATION(Register, c_rarg5, r5); 57 REGISTER_DECLARATION(Register, c_rarg6, r6); 58 REGISTER_DECLARATION(Register, c_rarg7, r7); 59 60 REGISTER_DECLARATION(FloatRegister, c_farg0, v0); 61 REGISTER_DECLARATION(FloatRegister, c_farg1, v1); 62 REGISTER_DECLARATION(FloatRegister, c_farg2, v2); 63 REGISTER_DECLARATION(FloatRegister, c_farg3, v3); 64 REGISTER_DECLARATION(FloatRegister, c_farg4, v4); 65 REGISTER_DECLARATION(FloatRegister, c_farg5, v5); 66 REGISTER_DECLARATION(FloatRegister, c_farg6, v6); 67 REGISTER_DECLARATION(FloatRegister, c_farg7, v7); 68 69 // Symbolically name the register arguments used by the Java calling convention. 70 // We have control over the convention for java so we can do what we please. 71 // What pleases us is to offset the java calling convention so that when 72 // we call a suitable jni method the arguments are lined up and we don't 73 // have to do much shuffling. A suitable jni method is non-static and a 74 // small number of arguments 75 // 76 // |--------------------------------------------------------------------| 77 // | c_rarg0 c_rarg1 c_rarg2 c_rarg3 c_rarg4 c_rarg5 c_rarg6 c_rarg7 | 78 // |--------------------------------------------------------------------| 79 // | r0 r1 r2 r3 r4 r5 r6 r7 | 80 // |--------------------------------------------------------------------| 81 // | j_rarg7 j_rarg0 j_rarg1 j_rarg2 j_rarg3 j_rarg4 j_rarg5 j_rarg6 | 82 // |--------------------------------------------------------------------| 83 84 85 REGISTER_DECLARATION(Register, j_rarg0, c_rarg1); 86 REGISTER_DECLARATION(Register, j_rarg1, c_rarg2); 87 REGISTER_DECLARATION(Register, j_rarg2, c_rarg3); 88 REGISTER_DECLARATION(Register, j_rarg3, c_rarg4); 89 REGISTER_DECLARATION(Register, j_rarg4, c_rarg5); 90 REGISTER_DECLARATION(Register, j_rarg5, c_rarg6); 91 REGISTER_DECLARATION(Register, j_rarg6, c_rarg7); 92 REGISTER_DECLARATION(Register, j_rarg7, c_rarg0); 93 94 // Java floating args are passed as per C 95 96 REGISTER_DECLARATION(FloatRegister, j_farg0, v0); 97 REGISTER_DECLARATION(FloatRegister, j_farg1, v1); 98 REGISTER_DECLARATION(FloatRegister, j_farg2, v2); 99 REGISTER_DECLARATION(FloatRegister, j_farg3, v3); 100 REGISTER_DECLARATION(FloatRegister, j_farg4, v4); 101 REGISTER_DECLARATION(FloatRegister, j_farg5, v5); 102 REGISTER_DECLARATION(FloatRegister, j_farg6, v6); 103 REGISTER_DECLARATION(FloatRegister, j_farg7, v7); 104 105 // registers used to hold VM data either temporarily within a method 106 // or across method calls 107 108 // volatile (caller-save) registers 109 110 // r8 is used for indirect result location return 111 // we use it and r9 as scratch registers 112 REGISTER_DECLARATION(Register, rscratch1, r8); 113 REGISTER_DECLARATION(Register, rscratch2, r9); 114 115 // current method -- must be in a call-clobbered register 116 REGISTER_DECLARATION(Register, rmethod, r12); 117 118 // non-volatile (callee-save) registers are r16-29 119 // of which the following are dedicated global state 120 121 // link register 122 REGISTER_DECLARATION(Register, lr, r30); 123 // frame pointer 124 REGISTER_DECLARATION(Register, rfp, r29); 125 // current thread 126 REGISTER_DECLARATION(Register, rthread, r28); 127 // base of heap 128 REGISTER_DECLARATION(Register, rheapbase, r27); 129 // constant pool cache 130 REGISTER_DECLARATION(Register, rcpool, r26); 131 // monitors allocated on stack 132 REGISTER_DECLARATION(Register, rmonitors, r25); 133 // locals on stack 134 REGISTER_DECLARATION(Register, rlocals, r24); 135 // bytecode pointer 136 REGISTER_DECLARATION(Register, rbcp, r22); 137 // Dispatch table base 138 REGISTER_DECLARATION(Register, rdispatch, r21); 139 // Java stack pointer 140 REGISTER_DECLARATION(Register, esp, r20); 141 142 // Preserved predicate register with all elements set TRUE. 143 REGISTER_DECLARATION(PRegister, ptrue, p7); 144 145 #define assert_cond(ARG1) assert(ARG1, #ARG1) 146 147 namespace asm_util { 148 uint32_t encode_logical_immediate(bool is32, uint64_t imm); 149 }; 150 151 using namespace asm_util; 152 153 154 class Assembler; 155 156 class Instruction_aarch64 { 157 unsigned insn; 158 #ifdef ASSERT 159 unsigned bits; 160 #endif 161 Assembler *assem; 162 163 public: 164 165 Instruction_aarch64(class Assembler *as) { 166 #ifdef ASSERT 167 bits = 0; 168 #endif 169 insn = 0; 170 assem = as; 171 } 172 173 inline ~Instruction_aarch64(); 174 175 unsigned &get_insn() { return insn; } 176 #ifdef ASSERT 177 unsigned &get_bits() { return bits; } 178 #endif 179 180 static inline int32_t extend(unsigned val, int hi = 31, int lo = 0) { 181 union { 182 unsigned u; 183 int n; 184 }; 185 186 u = val << (31 - hi); 187 n = n >> (31 - hi + lo); 188 return n; 189 } 190 191 static inline uint32_t extract(uint32_t val, int msb, int lsb) { 192 int nbits = msb - lsb + 1; 193 assert_cond(msb >= lsb); 194 uint32_t mask = (1U << nbits) - 1; 195 uint32_t result = val >> lsb; 196 result &= mask; 197 return result; 198 } 199 200 static inline int32_t sextract(uint32_t val, int msb, int lsb) { 201 uint32_t uval = extract(val, msb, lsb); 202 return extend(uval, msb - lsb); 203 } 204 205 static void patch(address a, int msb, int lsb, uint64_t val) { 206 int nbits = msb - lsb + 1; 207 guarantee(val < (1U << nbits), "Field too big for insn"); 208 assert_cond(msb >= lsb); 209 unsigned mask = (1U << nbits) - 1; 210 val <<= lsb; 211 mask <<= lsb; 212 unsigned target = *(unsigned *)a; 213 target &= ~mask; 214 target |= val; 215 *(unsigned *)a = target; 216 } 217 218 static void spatch(address a, int msb, int lsb, int64_t val) { 219 int nbits = msb - lsb + 1; 220 int64_t chk = val >> (nbits - 1); 221 guarantee (chk == -1 || chk == 0, "Field too big for insn"); 222 unsigned uval = val; 223 unsigned mask = (1U << nbits) - 1; 224 uval &= mask; 225 uval <<= lsb; 226 mask <<= lsb; 227 unsigned target = *(unsigned *)a; 228 target &= ~mask; 229 target |= uval; 230 *(unsigned *)a = target; 231 } 232 233 void f(unsigned val, int msb, int lsb) { 234 int nbits = msb - lsb + 1; 235 guarantee(val < (1U << nbits), "Field too big for insn"); 236 assert_cond(msb >= lsb); 237 unsigned mask = (1U << nbits) - 1; 238 val <<= lsb; 239 mask <<= lsb; 240 insn |= val; 241 assert_cond((bits & mask) == 0); 242 #ifdef ASSERT 243 bits |= mask; 244 #endif 245 } 246 247 void f(unsigned val, int bit) { 248 f(val, bit, bit); 249 } 250 251 void sf(int64_t val, int msb, int lsb) { 252 int nbits = msb - lsb + 1; 253 int64_t chk = val >> (nbits - 1); 254 guarantee (chk == -1 || chk == 0, "Field too big for insn"); 255 unsigned uval = val; 256 unsigned mask = (1U << nbits) - 1; 257 uval &= mask; 258 f(uval, lsb + nbits - 1, lsb); 259 } 260 261 void rf(Register r, int lsb) { 262 f(r->encoding_nocheck(), lsb + 4, lsb); 263 } 264 265 // reg|ZR 266 void zrf(Register r, int lsb) { 267 f(r->encoding_nocheck() - (r == zr), lsb + 4, lsb); 268 } 269 270 // reg|SP 271 void srf(Register r, int lsb) { 272 f(r == sp ? 31 : r->encoding_nocheck(), lsb + 4, lsb); 273 } 274 275 void rf(FloatRegister r, int lsb) { 276 f(r->encoding_nocheck(), lsb + 4, lsb); 277 } 278 279 void prf(PRegister r, int lsb) { 280 f(r->encoding_nocheck(), lsb + 3, lsb); 281 } 282 283 void pgrf(PRegister r, int lsb) { 284 f(r->encoding_nocheck(), lsb + 2, lsb); 285 } 286 287 unsigned get(int msb = 31, int lsb = 0) { 288 int nbits = msb - lsb + 1; 289 unsigned mask = ((1U << nbits) - 1) << lsb; 290 assert_cond((bits & mask) == mask); 291 return (insn & mask) >> lsb; 292 } 293 294 void fixed(unsigned value, unsigned mask) { 295 assert_cond ((mask & bits) == 0); 296 #ifdef ASSERT 297 bits |= mask; 298 #endif 299 insn |= value; 300 } 301 }; 302 303 #define starti Instruction_aarch64 do_not_use(this); set_current(&do_not_use) 304 305 class PrePost { 306 int _offset; 307 Register _r; 308 public: 309 PrePost(Register reg, int o) : _offset(o), _r(reg) { } 310 int offset() { return _offset; } 311 Register reg() { return _r; } 312 }; 313 314 class Pre : public PrePost { 315 public: 316 Pre(Register reg, int o) : PrePost(reg, o) { } 317 }; 318 class Post : public PrePost { 319 Register _idx; 320 bool _is_postreg; 321 public: 322 Post(Register reg, int o) : PrePost(reg, o) { _idx = NULL; _is_postreg = false; } 323 Post(Register reg, Register idx) : PrePost(reg, 0) { _idx = idx; _is_postreg = true; } 324 Register idx_reg() { return _idx; } 325 bool is_postreg() {return _is_postreg; } 326 }; 327 328 namespace ext 329 { 330 enum operation { uxtb, uxth, uxtw, uxtx, sxtb, sxth, sxtw, sxtx }; 331 }; 332 333 // Addressing modes 334 class Address { 335 public: 336 337 enum mode { no_mode, base_plus_offset, pre, post, post_reg, pcrel, 338 base_plus_offset_reg, literal }; 339 340 // Shift and extend for base reg + reg offset addressing 341 class extend { 342 int _option, _shift; 343 ext::operation _op; 344 public: 345 extend() { } 346 extend(int s, int o, ext::operation op) : _option(o), _shift(s), _op(op) { } 347 int option() const{ return _option; } 348 int shift() const { return _shift; } 349 ext::operation op() const { return _op; } 350 }; 351 class uxtw : public extend { 352 public: 353 uxtw(int shift = -1): extend(shift, 0b010, ext::uxtw) { } 354 }; 355 class lsl : public extend { 356 public: 357 lsl(int shift = -1): extend(shift, 0b011, ext::uxtx) { } 358 }; 359 class sxtw : public extend { 360 public: 361 sxtw(int shift = -1): extend(shift, 0b110, ext::sxtw) { } 362 }; 363 class sxtx : public extend { 364 public: 365 sxtx(int shift = -1): extend(shift, 0b111, ext::sxtx) { } 366 }; 367 368 private: 369 Register _base; 370 Register _index; 371 int64_t _offset; 372 enum mode _mode; 373 extend _ext; 374 375 RelocationHolder _rspec; 376 377 // Typically we use AddressLiterals we want to use their rval 378 // However in some situations we want the lval (effect address) of 379 // the item. We provide a special factory for making those lvals. 380 bool _is_lval; 381 382 // If the target is far we'll need to load the ea of this to a 383 // register to reach it. Otherwise if near we can do PC-relative 384 // addressing. 385 address _target; 386 387 public: 388 Address() 389 : _mode(no_mode) { } 390 Address(Register r) 391 : _base(r), _index(noreg), _offset(0), _mode(base_plus_offset), _target(0) { } 392 Address(Register r, int o) 393 : _base(r), _index(noreg), _offset(o), _mode(base_plus_offset), _target(0) { } 394 Address(Register r, int64_t o) 395 : _base(r), _index(noreg), _offset(o), _mode(base_plus_offset), _target(0) { } 396 Address(Register r, uint64_t o) 397 : _base(r), _index(noreg), _offset(o), _mode(base_plus_offset), _target(0) { } 398 #ifdef ASSERT 399 Address(Register r, ByteSize disp) 400 : _base(r), _index(noreg), _offset(in_bytes(disp)), _mode(base_plus_offset), _target(0) { } 401 #endif 402 Address(Register r, Register r1, extend ext = lsl()) 403 : _base(r), _index(r1), _offset(0), _mode(base_plus_offset_reg), 404 _ext(ext), _target(0) { } 405 Address(Pre p) 406 : _base(p.reg()), _offset(p.offset()), _mode(pre) { } 407 Address(Post p) 408 : _base(p.reg()), _index(p.idx_reg()), _offset(p.offset()), 409 _mode(p.is_postreg() ? post_reg : post), _target(0) { } 410 Address(address target, RelocationHolder const& rspec) 411 : _mode(literal), 412 _rspec(rspec), 413 _is_lval(false), 414 _target(target) { } 415 Address(address target, relocInfo::relocType rtype = relocInfo::external_word_type); 416 Address(Register base, RegisterOrConstant index, extend ext = lsl()) 417 : _base (base), 418 _offset(0), _ext(ext), _target(0) { 419 if (index.is_register()) { 420 _mode = base_plus_offset_reg; 421 _index = index.as_register(); 422 } else { 423 guarantee(ext.option() == ext::uxtx, "should be"); 424 assert(index.is_constant(), "should be"); 425 _mode = base_plus_offset; 426 _offset = index.as_constant() << ext.shift(); 427 } 428 } 429 430 Register base() const { 431 guarantee((_mode == base_plus_offset | _mode == base_plus_offset_reg 432 | _mode == post | _mode == post_reg), 433 "wrong mode"); 434 return _base; 435 } 436 int64_t offset() const { 437 return _offset; 438 } 439 Register index() const { 440 return _index; 441 } 442 mode getMode() const { 443 return _mode; 444 } 445 bool uses(Register reg) const { return _base == reg || _index == reg; } 446 address target() const { return _target; } 447 const RelocationHolder& rspec() const { return _rspec; } 448 449 void encode(Instruction_aarch64 *i) const { 450 i->f(0b111, 29, 27); 451 i->srf(_base, 5); 452 453 switch(_mode) { 454 case base_plus_offset: 455 { 456 unsigned size = i->get(31, 30); 457 if (i->get(26, 26) && i->get(23, 23)) { 458 // SIMD Q Type - Size = 128 bits 459 assert(size == 0, "bad size"); 460 size = 0b100; 461 } 462 unsigned mask = (1 << size) - 1; 463 if (_offset < 0 || _offset & mask) 464 { 465 i->f(0b00, 25, 24); 466 i->f(0, 21), i->f(0b00, 11, 10); 467 i->sf(_offset, 20, 12); 468 } else { 469 i->f(0b01, 25, 24); 470 i->f(_offset >> size, 21, 10); 471 } 472 } 473 break; 474 475 case base_plus_offset_reg: 476 { 477 i->f(0b00, 25, 24); 478 i->f(1, 21); 479 i->rf(_index, 16); 480 i->f(_ext.option(), 15, 13); 481 unsigned size = i->get(31, 30); 482 if (i->get(26, 26) && i->get(23, 23)) { 483 // SIMD Q Type - Size = 128 bits 484 assert(size == 0, "bad size"); 485 size = 0b100; 486 } 487 if (size == 0) // It's a byte 488 i->f(_ext.shift() >= 0, 12); 489 else { 490 if (_ext.shift() > 0) 491 assert(_ext.shift() == (int)size, "bad shift"); 492 i->f(_ext.shift() > 0, 12); 493 } 494 i->f(0b10, 11, 10); 495 } 496 break; 497 498 case pre: 499 i->f(0b00, 25, 24); 500 i->f(0, 21), i->f(0b11, 11, 10); 501 i->sf(_offset, 20, 12); 502 break; 503 504 case post: 505 i->f(0b00, 25, 24); 506 i->f(0, 21), i->f(0b01, 11, 10); 507 i->sf(_offset, 20, 12); 508 break; 509 510 default: 511 ShouldNotReachHere(); 512 } 513 } 514 515 void encode_pair(Instruction_aarch64 *i) const { 516 switch(_mode) { 517 case base_plus_offset: 518 i->f(0b010, 25, 23); 519 break; 520 case pre: 521 i->f(0b011, 25, 23); 522 break; 523 case post: 524 i->f(0b001, 25, 23); 525 break; 526 default: 527 ShouldNotReachHere(); 528 } 529 530 unsigned size; // Operand shift in 32-bit words 531 532 if (i->get(26, 26)) { // float 533 switch(i->get(31, 30)) { 534 case 0b10: 535 size = 2; break; 536 case 0b01: 537 size = 1; break; 538 case 0b00: 539 size = 0; break; 540 default: 541 ShouldNotReachHere(); 542 size = 0; // unreachable 543 } 544 } else { 545 size = i->get(31, 31); 546 } 547 548 size = 4 << size; 549 guarantee(_offset % size == 0, "bad offset"); 550 i->sf(_offset / size, 21, 15); 551 i->srf(_base, 5); 552 } 553 554 void encode_nontemporal_pair(Instruction_aarch64 *i) const { 555 // Only base + offset is allowed 556 i->f(0b000, 25, 23); 557 unsigned size = i->get(31, 31); 558 size = 4 << size; 559 guarantee(_offset % size == 0, "bad offset"); 560 i->sf(_offset / size, 21, 15); 561 i->srf(_base, 5); 562 guarantee(_mode == Address::base_plus_offset, 563 "Bad addressing mode for non-temporal op"); 564 } 565 566 void lea(MacroAssembler *, Register) const; 567 568 static bool offset_ok_for_immed(int64_t offset, uint shift); 569 570 static bool offset_ok_for_sve_immed(long offset, int shift, int vl /* sve vector length */) { 571 if (offset % vl == 0) { 572 // Convert address offset into sve imm offset (MUL VL). 573 int sve_offset = offset / vl; 574 if (((-(1 << (shift - 1))) <= sve_offset) && (sve_offset < (1 << (shift - 1)))) { 575 // sve_offset can be encoded 576 return true; 577 } 578 } 579 return false; 580 } 581 }; 582 583 // Convience classes 584 class RuntimeAddress: public Address { 585 586 public: 587 588 RuntimeAddress(address target) : Address(target, relocInfo::runtime_call_type) {} 589 590 }; 591 592 class OopAddress: public Address { 593 594 public: 595 596 OopAddress(address target) : Address(target, relocInfo::oop_type){} 597 598 }; 599 600 class ExternalAddress: public Address { 601 private: 602 static relocInfo::relocType reloc_for_target(address target) { 603 // Sometimes ExternalAddress is used for values which aren't 604 // exactly addresses, like the card table base. 605 // external_word_type can't be used for values in the first page 606 // so just skip the reloc in that case. 607 return external_word_Relocation::can_be_relocated(target) ? relocInfo::external_word_type : relocInfo::none; 608 } 609 610 public: 611 612 ExternalAddress(address target) : Address(target, reloc_for_target(target)) {} 613 614 }; 615 616 class InternalAddress: public Address { 617 618 public: 619 620 InternalAddress(address target) : Address(target, relocInfo::internal_word_type) {} 621 }; 622 623 const int FPUStateSizeInWords = FloatRegisterImpl::number_of_registers * 624 FloatRegisterImpl::save_slots_per_register; 625 626 typedef enum { 627 PLDL1KEEP = 0b00000, PLDL1STRM, PLDL2KEEP, PLDL2STRM, PLDL3KEEP, PLDL3STRM, 628 PSTL1KEEP = 0b10000, PSTL1STRM, PSTL2KEEP, PSTL2STRM, PSTL3KEEP, PSTL3STRM, 629 PLIL1KEEP = 0b01000, PLIL1STRM, PLIL2KEEP, PLIL2STRM, PLIL3KEEP, PLIL3STRM 630 } prfop; 631 632 class Assembler : public AbstractAssembler { 633 634 #ifndef PRODUCT 635 static const uintptr_t asm_bp; 636 637 void emit_long(jint x) { 638 if ((uintptr_t)pc() == asm_bp) 639 asm volatile ("nop"); 640 AbstractAssembler::emit_int32(x); 641 } 642 #else 643 void emit_long(jint x) { 644 AbstractAssembler::emit_int32(x); 645 } 646 #endif 647 648 public: 649 650 enum { instruction_size = 4 }; 651 652 //---< calculate length of instruction >--- 653 // We just use the values set above. 654 // instruction must start at passed address 655 static unsigned int instr_len(unsigned char *instr) { return instruction_size; } 656 657 //---< longest instructions >--- 658 static unsigned int instr_maxlen() { return instruction_size; } 659 660 Address adjust(Register base, int offset, bool preIncrement) { 661 if (preIncrement) 662 return Address(Pre(base, offset)); 663 else 664 return Address(Post(base, offset)); 665 } 666 667 Address pre(Register base, int offset) { 668 return adjust(base, offset, true); 669 } 670 671 Address post(Register base, int offset) { 672 return adjust(base, offset, false); 673 } 674 675 Address post(Register base, Register idx) { 676 return Address(Post(base, idx)); 677 } 678 679 Instruction_aarch64* current; 680 681 void set_current(Instruction_aarch64* i) { current = i; } 682 683 void f(unsigned val, int msb, int lsb) { 684 current->f(val, msb, lsb); 685 } 686 void f(unsigned val, int msb) { 687 current->f(val, msb, msb); 688 } 689 void sf(int64_t val, int msb, int lsb) { 690 current->sf(val, msb, lsb); 691 } 692 void rf(Register reg, int lsb) { 693 current->rf(reg, lsb); 694 } 695 void srf(Register reg, int lsb) { 696 current->srf(reg, lsb); 697 } 698 void zrf(Register reg, int lsb) { 699 current->zrf(reg, lsb); 700 } 701 void rf(FloatRegister reg, int lsb) { 702 current->rf(reg, lsb); 703 } 704 void prf(PRegister reg, int lsb) { 705 current->prf(reg, lsb); 706 } 707 void pgrf(PRegister reg, int lsb) { 708 current->pgrf(reg, lsb); 709 } 710 void fixed(unsigned value, unsigned mask) { 711 current->fixed(value, mask); 712 } 713 714 void emit() { 715 emit_long(current->get_insn()); 716 assert_cond(current->get_bits() == 0xffffffff); 717 current = NULL; 718 } 719 720 typedef void (Assembler::* uncond_branch_insn)(address dest); 721 typedef void (Assembler::* compare_and_branch_insn)(Register Rt, address dest); 722 typedef void (Assembler::* test_and_branch_insn)(Register Rt, int bitpos, address dest); 723 typedef void (Assembler::* prefetch_insn)(address target, prfop); 724 725 void wrap_label(Label &L, uncond_branch_insn insn); 726 void wrap_label(Register r, Label &L, compare_and_branch_insn insn); 727 void wrap_label(Register r, int bitpos, Label &L, test_and_branch_insn insn); 728 void wrap_label(Label &L, prfop, prefetch_insn insn); 729 730 // PC-rel. addressing 731 732 void adr(Register Rd, address dest); 733 void _adrp(Register Rd, address dest); 734 735 void adr(Register Rd, const Address &dest); 736 void _adrp(Register Rd, const Address &dest); 737 738 void adr(Register Rd, Label &L) { 739 wrap_label(Rd, L, &Assembler::Assembler::adr); 740 } 741 void _adrp(Register Rd, Label &L) { 742 wrap_label(Rd, L, &Assembler::_adrp); 743 } 744 745 void adrp(Register Rd, const Address &dest, uint64_t &offset); 746 747 #undef INSN 748 749 void add_sub_immediate(Register Rd, Register Rn, unsigned uimm, int op, 750 int negated_op); 751 752 // Add/subtract (immediate) 753 #define INSN(NAME, decode, negated) \ 754 void NAME(Register Rd, Register Rn, unsigned imm, unsigned shift) { \ 755 starti; \ 756 f(decode, 31, 29), f(0b10001, 28, 24), f(shift, 23, 22), f(imm, 21, 10); \ 757 zrf(Rd, 0), srf(Rn, 5); \ 758 } \ 759 \ 760 void NAME(Register Rd, Register Rn, unsigned imm) { \ 761 starti; \ 762 add_sub_immediate(Rd, Rn, imm, decode, negated); \ 763 } 764 765 INSN(addsw, 0b001, 0b011); 766 INSN(subsw, 0b011, 0b001); 767 INSN(adds, 0b101, 0b111); 768 INSN(subs, 0b111, 0b101); 769 770 #undef INSN 771 772 #define INSN(NAME, decode, negated) \ 773 void NAME(Register Rd, Register Rn, unsigned imm) { \ 774 starti; \ 775 add_sub_immediate(Rd, Rn, imm, decode, negated); \ 776 } 777 778 INSN(addw, 0b000, 0b010); 779 INSN(subw, 0b010, 0b000); 780 INSN(add, 0b100, 0b110); 781 INSN(sub, 0b110, 0b100); 782 783 #undef INSN 784 785 // Logical (immediate) 786 #define INSN(NAME, decode, is32) \ 787 void NAME(Register Rd, Register Rn, uint64_t imm) { \ 788 starti; \ 789 uint32_t val = encode_logical_immediate(is32, imm); \ 790 f(decode, 31, 29), f(0b100100, 28, 23), f(val, 22, 10); \ 791 srf(Rd, 0), zrf(Rn, 5); \ 792 } 793 794 INSN(andw, 0b000, true); 795 INSN(orrw, 0b001, true); 796 INSN(eorw, 0b010, true); 797 INSN(andr, 0b100, false); 798 INSN(orr, 0b101, false); 799 INSN(eor, 0b110, false); 800 801 #undef INSN 802 803 #define INSN(NAME, decode, is32) \ 804 void NAME(Register Rd, Register Rn, uint64_t imm) { \ 805 starti; \ 806 uint32_t val = encode_logical_immediate(is32, imm); \ 807 f(decode, 31, 29), f(0b100100, 28, 23), f(val, 22, 10); \ 808 zrf(Rd, 0), zrf(Rn, 5); \ 809 } 810 811 INSN(ands, 0b111, false); 812 INSN(andsw, 0b011, true); 813 814 #undef INSN 815 816 // Move wide (immediate) 817 #define INSN(NAME, opcode) \ 818 void NAME(Register Rd, unsigned imm, unsigned shift = 0) { \ 819 assert_cond((shift/16)*16 == shift); \ 820 starti; \ 821 f(opcode, 31, 29), f(0b100101, 28, 23), f(shift/16, 22, 21), \ 822 f(imm, 20, 5); \ 823 rf(Rd, 0); \ 824 } 825 826 INSN(movnw, 0b000); 827 INSN(movzw, 0b010); 828 INSN(movkw, 0b011); 829 INSN(movn, 0b100); 830 INSN(movz, 0b110); 831 INSN(movk, 0b111); 832 833 #undef INSN 834 835 // Bitfield 836 #define INSN(NAME, opcode, size) \ 837 void NAME(Register Rd, Register Rn, unsigned immr, unsigned imms) { \ 838 starti; \ 839 guarantee(size == 1 || (immr < 32 && imms < 32), "incorrect immr/imms");\ 840 f(opcode, 31, 22), f(immr, 21, 16), f(imms, 15, 10); \ 841 zrf(Rn, 5), rf(Rd, 0); \ 842 } 843 844 INSN(sbfmw, 0b0001001100, 0); 845 INSN(bfmw, 0b0011001100, 0); 846 INSN(ubfmw, 0b0101001100, 0); 847 INSN(sbfm, 0b1001001101, 1); 848 INSN(bfm, 0b1011001101, 1); 849 INSN(ubfm, 0b1101001101, 1); 850 851 #undef INSN 852 853 // Extract 854 #define INSN(NAME, opcode, size) \ 855 void NAME(Register Rd, Register Rn, Register Rm, unsigned imms) { \ 856 starti; \ 857 guarantee(size == 1 || imms < 32, "incorrect imms"); \ 858 f(opcode, 31, 21), f(imms, 15, 10); \ 859 zrf(Rm, 16), zrf(Rn, 5), zrf(Rd, 0); \ 860 } 861 862 INSN(extrw, 0b00010011100, 0); 863 INSN(extr, 0b10010011110, 1); 864 865 #undef INSN 866 867 // The maximum range of a branch is fixed for the AArch64 868 // architecture. In debug mode we shrink it in order to test 869 // trampolines, but not so small that branches in the interpreter 870 // are out of range. 871 static const uint64_t branch_range = NOT_DEBUG(128 * M) DEBUG_ONLY(2 * M); 872 873 static bool reachable_from_branch_at(address branch, address target) { 874 return uabs(target - branch) < branch_range; 875 } 876 877 // Unconditional branch (immediate) 878 #define INSN(NAME, opcode) \ 879 void NAME(address dest) { \ 880 starti; \ 881 int64_t offset = (dest - pc()) >> 2; \ 882 DEBUG_ONLY(assert(reachable_from_branch_at(pc(), dest), "debug only")); \ 883 f(opcode, 31), f(0b00101, 30, 26), sf(offset, 25, 0); \ 884 } \ 885 void NAME(Label &L) { \ 886 wrap_label(L, &Assembler::NAME); \ 887 } \ 888 void NAME(const Address &dest); 889 890 INSN(b, 0); 891 INSN(bl, 1); 892 893 #undef INSN 894 895 // Compare & branch (immediate) 896 #define INSN(NAME, opcode) \ 897 void NAME(Register Rt, address dest) { \ 898 int64_t offset = (dest - pc()) >> 2; \ 899 starti; \ 900 f(opcode, 31, 24), sf(offset, 23, 5), rf(Rt, 0); \ 901 } \ 902 void NAME(Register Rt, Label &L) { \ 903 wrap_label(Rt, L, &Assembler::NAME); \ 904 } 905 906 INSN(cbzw, 0b00110100); 907 INSN(cbnzw, 0b00110101); 908 INSN(cbz, 0b10110100); 909 INSN(cbnz, 0b10110101); 910 911 #undef INSN 912 913 // Test & branch (immediate) 914 #define INSN(NAME, opcode) \ 915 void NAME(Register Rt, int bitpos, address dest) { \ 916 int64_t offset = (dest - pc()) >> 2; \ 917 int b5 = bitpos >> 5; \ 918 bitpos &= 0x1f; \ 919 starti; \ 920 f(b5, 31), f(opcode, 30, 24), f(bitpos, 23, 19), sf(offset, 18, 5); \ 921 rf(Rt, 0); \ 922 } \ 923 void NAME(Register Rt, int bitpos, Label &L) { \ 924 wrap_label(Rt, bitpos, L, &Assembler::NAME); \ 925 } 926 927 INSN(tbz, 0b0110110); 928 INSN(tbnz, 0b0110111); 929 930 #undef INSN 931 932 // Conditional branch (immediate) 933 enum Condition 934 {EQ, NE, HS, CS=HS, LO, CC=LO, MI, PL, VS, VC, HI, LS, GE, LT, GT, LE, AL, NV}; 935 936 void br(Condition cond, address dest) { 937 int64_t offset = (dest - pc()) >> 2; 938 starti; 939 f(0b0101010, 31, 25), f(0, 24), sf(offset, 23, 5), f(0, 4), f(cond, 3, 0); 940 } 941 942 #define INSN(NAME, cond) \ 943 void NAME(address dest) { \ 944 br(cond, dest); \ 945 } 946 947 INSN(beq, EQ); 948 INSN(bne, NE); 949 INSN(bhs, HS); 950 INSN(bcs, CS); 951 INSN(blo, LO); 952 INSN(bcc, CC); 953 INSN(bmi, MI); 954 INSN(bpl, PL); 955 INSN(bvs, VS); 956 INSN(bvc, VC); 957 INSN(bhi, HI); 958 INSN(bls, LS); 959 INSN(bge, GE); 960 INSN(blt, LT); 961 INSN(bgt, GT); 962 INSN(ble, LE); 963 INSN(bal, AL); 964 INSN(bnv, NV); 965 966 void br(Condition cc, Label &L); 967 968 #undef INSN 969 970 // Exception generation 971 void generate_exception(int opc, int op2, int LL, unsigned imm) { 972 starti; 973 f(0b11010100, 31, 24); 974 f(opc, 23, 21), f(imm, 20, 5), f(op2, 4, 2), f(LL, 1, 0); 975 } 976 977 #define INSN(NAME, opc, op2, LL) \ 978 void NAME(unsigned imm) { \ 979 generate_exception(opc, op2, LL, imm); \ 980 } 981 982 INSN(svc, 0b000, 0, 0b01); 983 INSN(hvc, 0b000, 0, 0b10); 984 INSN(smc, 0b000, 0, 0b11); 985 INSN(brk, 0b001, 0, 0b00); 986 INSN(hlt, 0b010, 0, 0b00); 987 INSN(dcps1, 0b101, 0, 0b01); 988 INSN(dcps2, 0b101, 0, 0b10); 989 INSN(dcps3, 0b101, 0, 0b11); 990 991 #undef INSN 992 993 // System 994 void system(int op0, int op1, int CRn, int CRm, int op2, 995 Register rt = dummy_reg) 996 { 997 starti; 998 f(0b11010101000, 31, 21); 999 f(op0, 20, 19); 1000 f(op1, 18, 16); 1001 f(CRn, 15, 12); 1002 f(CRm, 11, 8); 1003 f(op2, 7, 5); 1004 rf(rt, 0); 1005 } 1006 1007 void hint(int imm) { 1008 system(0b00, 0b011, 0b0010, 0b0000, imm); 1009 } 1010 1011 void nop() { 1012 hint(0); 1013 } 1014 1015 void yield() { 1016 hint(1); 1017 } 1018 1019 void wfe() { 1020 hint(2); 1021 } 1022 1023 void wfi() { 1024 hint(3); 1025 } 1026 1027 void sev() { 1028 hint(4); 1029 } 1030 1031 void sevl() { 1032 hint(5); 1033 } 1034 1035 // we only provide mrs and msr for the special purpose system 1036 // registers where op1 (instr[20:19]) == 11 and, (currently) only 1037 // use it for FPSR n.b msr has L (instr[21]) == 0 mrs has L == 1 1038 1039 void msr(int op1, int CRn, int CRm, int op2, Register rt) { 1040 starti; 1041 f(0b1101010100011, 31, 19); 1042 f(op1, 18, 16); 1043 f(CRn, 15, 12); 1044 f(CRm, 11, 8); 1045 f(op2, 7, 5); 1046 // writing zr is ok 1047 zrf(rt, 0); 1048 } 1049 1050 void mrs(int op1, int CRn, int CRm, int op2, Register rt) { 1051 starti; 1052 f(0b1101010100111, 31, 19); 1053 f(op1, 18, 16); 1054 f(CRn, 15, 12); 1055 f(CRm, 11, 8); 1056 f(op2, 7, 5); 1057 // reading to zr is a mistake 1058 rf(rt, 0); 1059 } 1060 1061 enum barrier {OSHLD = 0b0001, OSHST, OSH, NSHLD=0b0101, NSHST, NSH, 1062 ISHLD = 0b1001, ISHST, ISH, LD=0b1101, ST, SY}; 1063 1064 void dsb(barrier imm) { 1065 system(0b00, 0b011, 0b00011, imm, 0b100); 1066 } 1067 1068 void dmb(barrier imm) { 1069 system(0b00, 0b011, 0b00011, imm, 0b101); 1070 } 1071 1072 void isb() { 1073 system(0b00, 0b011, 0b00011, SY, 0b110); 1074 } 1075 1076 void sys(int op1, int CRn, int CRm, int op2, 1077 Register rt = (Register)0b11111) { 1078 system(0b01, op1, CRn, CRm, op2, rt); 1079 } 1080 1081 // Only implement operations accessible from EL0 or higher, i.e., 1082 // op1 CRn CRm op2 1083 // IC IVAU 3 7 5 1 1084 // DC CVAC 3 7 10 1 1085 // DC CVAP 3 7 12 1 1086 // DC CVAU 3 7 11 1 1087 // DC CIVAC 3 7 14 1 1088 // DC ZVA 3 7 4 1 1089 // So only deal with the CRm field. 1090 enum icache_maintenance {IVAU = 0b0101}; 1091 enum dcache_maintenance {CVAC = 0b1010, CVAP = 0b1100, CVAU = 0b1011, CIVAC = 0b1110, ZVA = 0b100}; 1092 1093 void dc(dcache_maintenance cm, Register Rt) { 1094 sys(0b011, 0b0111, cm, 0b001, Rt); 1095 } 1096 1097 void ic(icache_maintenance cm, Register Rt) { 1098 sys(0b011, 0b0111, cm, 0b001, Rt); 1099 } 1100 1101 // A more convenient access to dmb for our purposes 1102 enum Membar_mask_bits { 1103 // We can use ISH for a barrier because the ARM ARM says "This 1104 // architecture assumes that all Processing Elements that use the 1105 // same operating system or hypervisor are in the same Inner 1106 // Shareable shareability domain." 1107 StoreStore = ISHST, 1108 LoadStore = ISHLD, 1109 LoadLoad = ISHLD, 1110 StoreLoad = ISH, 1111 AnyAny = ISH 1112 }; 1113 1114 void membar(Membar_mask_bits order_constraint) { 1115 dmb(Assembler::barrier(order_constraint)); 1116 } 1117 1118 // Unconditional branch (register) 1119 void branch_reg(Register R, int opc) { 1120 starti; 1121 f(0b1101011, 31, 25); 1122 f(opc, 24, 21); 1123 f(0b11111000000, 20, 10); 1124 rf(R, 5); 1125 f(0b00000, 4, 0); 1126 } 1127 1128 #define INSN(NAME, opc) \ 1129 void NAME(Register R) { \ 1130 branch_reg(R, opc); \ 1131 } 1132 1133 INSN(br, 0b0000); 1134 INSN(blr, 0b0001); 1135 INSN(ret, 0b0010); 1136 1137 void ret(void *p); // This forces a compile-time error for ret(0) 1138 1139 #undef INSN 1140 1141 #define INSN(NAME, opc) \ 1142 void NAME() { \ 1143 branch_reg(dummy_reg, opc); \ 1144 } 1145 1146 INSN(eret, 0b0100); 1147 INSN(drps, 0b0101); 1148 1149 #undef INSN 1150 1151 // Load/store exclusive 1152 enum operand_size { byte, halfword, word, xword }; 1153 1154 void load_store_exclusive(Register Rs, Register Rt1, Register Rt2, 1155 Register Rn, enum operand_size sz, int op, bool ordered) { 1156 starti; 1157 f(sz, 31, 30), f(0b001000, 29, 24), f(op, 23, 21); 1158 rf(Rs, 16), f(ordered, 15), zrf(Rt2, 10), srf(Rn, 5), zrf(Rt1, 0); 1159 } 1160 1161 void load_exclusive(Register dst, Register addr, 1162 enum operand_size sz, bool ordered) { 1163 load_store_exclusive(dummy_reg, dst, dummy_reg, addr, 1164 sz, 0b010, ordered); 1165 } 1166 1167 void store_exclusive(Register status, Register new_val, Register addr, 1168 enum operand_size sz, bool ordered) { 1169 load_store_exclusive(status, new_val, dummy_reg, addr, 1170 sz, 0b000, ordered); 1171 } 1172 1173 #define INSN4(NAME, sz, op, o0) /* Four registers */ \ 1174 void NAME(Register Rs, Register Rt1, Register Rt2, Register Rn) { \ 1175 guarantee(Rs != Rn && Rs != Rt1 && Rs != Rt2, "unpredictable instruction"); \ 1176 load_store_exclusive(Rs, Rt1, Rt2, Rn, sz, op, o0); \ 1177 } 1178 1179 #define INSN3(NAME, sz, op, o0) /* Three registers */ \ 1180 void NAME(Register Rs, Register Rt, Register Rn) { \ 1181 guarantee(Rs != Rn && Rs != Rt, "unpredictable instruction"); \ 1182 load_store_exclusive(Rs, Rt, dummy_reg, Rn, sz, op, o0); \ 1183 } 1184 1185 #define INSN2(NAME, sz, op, o0) /* Two registers */ \ 1186 void NAME(Register Rt, Register Rn) { \ 1187 load_store_exclusive(dummy_reg, Rt, dummy_reg, \ 1188 Rn, sz, op, o0); \ 1189 } 1190 1191 #define INSN_FOO(NAME, sz, op, o0) /* Three registers, encoded differently */ \ 1192 void NAME(Register Rt1, Register Rt2, Register Rn) { \ 1193 guarantee(Rt1 != Rt2, "unpredictable instruction"); \ 1194 load_store_exclusive(dummy_reg, Rt1, Rt2, Rn, sz, op, o0); \ 1195 } 1196 1197 // bytes 1198 INSN3(stxrb, byte, 0b000, 0); 1199 INSN3(stlxrb, byte, 0b000, 1); 1200 INSN2(ldxrb, byte, 0b010, 0); 1201 INSN2(ldaxrb, byte, 0b010, 1); 1202 INSN2(stlrb, byte, 0b100, 1); 1203 INSN2(ldarb, byte, 0b110, 1); 1204 1205 // halfwords 1206 INSN3(stxrh, halfword, 0b000, 0); 1207 INSN3(stlxrh, halfword, 0b000, 1); 1208 INSN2(ldxrh, halfword, 0b010, 0); 1209 INSN2(ldaxrh, halfword, 0b010, 1); 1210 INSN2(stlrh, halfword, 0b100, 1); 1211 INSN2(ldarh, halfword, 0b110, 1); 1212 1213 // words 1214 INSN3(stxrw, word, 0b000, 0); 1215 INSN3(stlxrw, word, 0b000, 1); 1216 INSN4(stxpw, word, 0b001, 0); 1217 INSN4(stlxpw, word, 0b001, 1); 1218 INSN2(ldxrw, word, 0b010, 0); 1219 INSN2(ldaxrw, word, 0b010, 1); 1220 INSN_FOO(ldxpw, word, 0b011, 0); 1221 INSN_FOO(ldaxpw, word, 0b011, 1); 1222 INSN2(stlrw, word, 0b100, 1); 1223 INSN2(ldarw, word, 0b110, 1); 1224 1225 // xwords 1226 INSN3(stxr, xword, 0b000, 0); 1227 INSN3(stlxr, xword, 0b000, 1); 1228 INSN4(stxp, xword, 0b001, 0); 1229 INSN4(stlxp, xword, 0b001, 1); 1230 INSN2(ldxr, xword, 0b010, 0); 1231 INSN2(ldaxr, xword, 0b010, 1); 1232 INSN_FOO(ldxp, xword, 0b011, 0); 1233 INSN_FOO(ldaxp, xword, 0b011, 1); 1234 INSN2(stlr, xword, 0b100, 1); 1235 INSN2(ldar, xword, 0b110, 1); 1236 1237 #undef INSN2 1238 #undef INSN3 1239 #undef INSN4 1240 #undef INSN_FOO 1241 1242 // 8.1 Compare and swap extensions 1243 void lse_cas(Register Rs, Register Rt, Register Rn, 1244 enum operand_size sz, bool a, bool r, bool not_pair) { 1245 starti; 1246 if (! not_pair) { // Pair 1247 assert(sz == word || sz == xword, "invalid size"); 1248 /* The size bit is in bit 30, not 31 */ 1249 sz = (operand_size)(sz == word ? 0b00:0b01); 1250 } 1251 f(sz, 31, 30), f(0b001000, 29, 24), f(not_pair ? 1 : 0, 23), f(a, 22), f(1, 21); 1252 zrf(Rs, 16), f(r, 15), f(0b11111, 14, 10), srf(Rn, 5), zrf(Rt, 0); 1253 } 1254 1255 // CAS 1256 #define INSN(NAME, a, r) \ 1257 void NAME(operand_size sz, Register Rs, Register Rt, Register Rn) { \ 1258 assert(Rs != Rn && Rs != Rt, "unpredictable instruction"); \ 1259 lse_cas(Rs, Rt, Rn, sz, a, r, true); \ 1260 } 1261 INSN(cas, false, false) 1262 INSN(casa, true, false) 1263 INSN(casl, false, true) 1264 INSN(casal, true, true) 1265 #undef INSN 1266 1267 // CASP 1268 #define INSN(NAME, a, r) \ 1269 void NAME(operand_size sz, Register Rs, Register Rs1, \ 1270 Register Rt, Register Rt1, Register Rn) { \ 1271 assert((Rs->encoding() & 1) == 0 && (Rt->encoding() & 1) == 0 && \ 1272 Rs->successor() == Rs1 && Rt->successor() == Rt1 && \ 1273 Rs != Rn && Rs1 != Rn && Rs != Rt, "invalid registers"); \ 1274 lse_cas(Rs, Rt, Rn, sz, a, r, false); \ 1275 } 1276 INSN(casp, false, false) 1277 INSN(caspa, true, false) 1278 INSN(caspl, false, true) 1279 INSN(caspal, true, true) 1280 #undef INSN 1281 1282 // 8.1 Atomic operations 1283 void lse_atomic(Register Rs, Register Rt, Register Rn, 1284 enum operand_size sz, int op1, int op2, bool a, bool r) { 1285 starti; 1286 f(sz, 31, 30), f(0b111000, 29, 24), f(a, 23), f(r, 22), f(1, 21); 1287 zrf(Rs, 16), f(op1, 15), f(op2, 14, 12), f(0, 11, 10), srf(Rn, 5), zrf(Rt, 0); 1288 } 1289 1290 #define INSN(NAME, NAME_A, NAME_L, NAME_AL, op1, op2) \ 1291 void NAME(operand_size sz, Register Rs, Register Rt, Register Rn) { \ 1292 lse_atomic(Rs, Rt, Rn, sz, op1, op2, false, false); \ 1293 } \ 1294 void NAME_A(operand_size sz, Register Rs, Register Rt, Register Rn) { \ 1295 lse_atomic(Rs, Rt, Rn, sz, op1, op2, true, false); \ 1296 } \ 1297 void NAME_L(operand_size sz, Register Rs, Register Rt, Register Rn) { \ 1298 lse_atomic(Rs, Rt, Rn, sz, op1, op2, false, true); \ 1299 } \ 1300 void NAME_AL(operand_size sz, Register Rs, Register Rt, Register Rn) {\ 1301 lse_atomic(Rs, Rt, Rn, sz, op1, op2, true, true); \ 1302 } 1303 INSN(ldadd, ldadda, ldaddl, ldaddal, 0, 0b000); 1304 INSN(ldbic, ldbica, ldbicl, ldbical, 0, 0b001); 1305 INSN(ldeor, ldeora, ldeorl, ldeoral, 0, 0b010); 1306 INSN(ldorr, ldorra, ldorrl, ldorral, 0, 0b011); 1307 INSN(ldsmax, ldsmaxa, ldsmaxl, ldsmaxal, 0, 0b100); 1308 INSN(ldsmin, ldsmina, ldsminl, ldsminal, 0, 0b101); 1309 INSN(ldumax, ldumaxa, ldumaxl, ldumaxal, 0, 0b110); 1310 INSN(ldumin, ldumina, lduminl, lduminal, 0, 0b111); 1311 INSN(swp, swpa, swpl, swpal, 1, 0b000); 1312 #undef INSN 1313 1314 // Load register (literal) 1315 #define INSN(NAME, opc, V) \ 1316 void NAME(Register Rt, address dest) { \ 1317 int64_t offset = (dest - pc()) >> 2; \ 1318 starti; \ 1319 f(opc, 31, 30), f(0b011, 29, 27), f(V, 26), f(0b00, 25, 24), \ 1320 sf(offset, 23, 5); \ 1321 rf(Rt, 0); \ 1322 } \ 1323 void NAME(Register Rt, address dest, relocInfo::relocType rtype) { \ 1324 InstructionMark im(this); \ 1325 guarantee(rtype == relocInfo::internal_word_type, \ 1326 "only internal_word_type relocs make sense here"); \ 1327 code_section()->relocate(inst_mark(), InternalAddress(dest).rspec()); \ 1328 NAME(Rt, dest); \ 1329 } \ 1330 void NAME(Register Rt, Label &L) { \ 1331 wrap_label(Rt, L, &Assembler::NAME); \ 1332 } 1333 1334 INSN(ldrw, 0b00, 0); 1335 INSN(ldr, 0b01, 0); 1336 INSN(ldrsw, 0b10, 0); 1337 1338 #undef INSN 1339 1340 #define INSN(NAME, opc, V) \ 1341 void NAME(FloatRegister Rt, address dest) { \ 1342 int64_t offset = (dest - pc()) >> 2; \ 1343 starti; \ 1344 f(opc, 31, 30), f(0b011, 29, 27), f(V, 26), f(0b00, 25, 24), \ 1345 sf(offset, 23, 5); \ 1346 rf((Register)Rt, 0); \ 1347 } 1348 1349 INSN(ldrs, 0b00, 1); 1350 INSN(ldrd, 0b01, 1); 1351 INSN(ldrq, 0b10, 1); 1352 1353 #undef INSN 1354 1355 #define INSN(NAME, opc, V) \ 1356 void NAME(address dest, prfop op = PLDL1KEEP) { \ 1357 int64_t offset = (dest - pc()) >> 2; \ 1358 starti; \ 1359 f(opc, 31, 30), f(0b011, 29, 27), f(V, 26), f(0b00, 25, 24), \ 1360 sf(offset, 23, 5); \ 1361 f(op, 4, 0); \ 1362 } \ 1363 void NAME(Label &L, prfop op = PLDL1KEEP) { \ 1364 wrap_label(L, op, &Assembler::NAME); \ 1365 } 1366 1367 INSN(prfm, 0b11, 0); 1368 1369 #undef INSN 1370 1371 // Load/store 1372 void ld_st1(int opc, int p1, int V, int L, 1373 Register Rt1, Register Rt2, Address adr, bool no_allocate) { 1374 starti; 1375 f(opc, 31, 30), f(p1, 29, 27), f(V, 26), f(L, 22); 1376 zrf(Rt2, 10), zrf(Rt1, 0); 1377 if (no_allocate) { 1378 adr.encode_nontemporal_pair(current); 1379 } else { 1380 adr.encode_pair(current); 1381 } 1382 } 1383 1384 // Load/store register pair (offset) 1385 #define INSN(NAME, size, p1, V, L, no_allocate) \ 1386 void NAME(Register Rt1, Register Rt2, Address adr) { \ 1387 ld_st1(size, p1, V, L, Rt1, Rt2, adr, no_allocate); \ 1388 } 1389 1390 INSN(stpw, 0b00, 0b101, 0, 0, false); 1391 INSN(ldpw, 0b00, 0b101, 0, 1, false); 1392 INSN(ldpsw, 0b01, 0b101, 0, 1, false); 1393 INSN(stp, 0b10, 0b101, 0, 0, false); 1394 INSN(ldp, 0b10, 0b101, 0, 1, false); 1395 1396 // Load/store no-allocate pair (offset) 1397 INSN(stnpw, 0b00, 0b101, 0, 0, true); 1398 INSN(ldnpw, 0b00, 0b101, 0, 1, true); 1399 INSN(stnp, 0b10, 0b101, 0, 0, true); 1400 INSN(ldnp, 0b10, 0b101, 0, 1, true); 1401 1402 #undef INSN 1403 1404 #define INSN(NAME, size, p1, V, L, no_allocate) \ 1405 void NAME(FloatRegister Rt1, FloatRegister Rt2, Address adr) { \ 1406 ld_st1(size, p1, V, L, (Register)Rt1, (Register)Rt2, adr, no_allocate); \ 1407 } 1408 1409 INSN(stps, 0b00, 0b101, 1, 0, false); 1410 INSN(ldps, 0b00, 0b101, 1, 1, false); 1411 INSN(stpd, 0b01, 0b101, 1, 0, false); 1412 INSN(ldpd, 0b01, 0b101, 1, 1, false); 1413 INSN(stpq, 0b10, 0b101, 1, 0, false); 1414 INSN(ldpq, 0b10, 0b101, 1, 1, false); 1415 1416 #undef INSN 1417 1418 // Load/store register (all modes) 1419 void ld_st2(Register Rt, const Address &adr, int size, int op, int V = 0) { 1420 starti; 1421 1422 f(V, 26); // general reg? 1423 zrf(Rt, 0); 1424 1425 // Encoding for literal loads is done here (rather than pushed 1426 // down into Address::encode) because the encoding of this 1427 // instruction is too different from all of the other forms to 1428 // make it worth sharing. 1429 if (adr.getMode() == Address::literal) { 1430 assert(size == 0b10 || size == 0b11, "bad operand size in ldr"); 1431 assert(op == 0b01, "literal form can only be used with loads"); 1432 f(size & 0b01, 31, 30), f(0b011, 29, 27), f(0b00, 25, 24); 1433 int64_t offset = (adr.target() - pc()) >> 2; 1434 sf(offset, 23, 5); 1435 code_section()->relocate(pc(), adr.rspec()); 1436 return; 1437 } 1438 1439 f(size, 31, 30); 1440 f(op, 23, 22); // str 1441 adr.encode(current); 1442 } 1443 1444 #define INSN(NAME, size, op) \ 1445 void NAME(Register Rt, const Address &adr) { \ 1446 ld_st2(Rt, adr, size, op); \ 1447 } \ 1448 1449 INSN(str, 0b11, 0b00); 1450 INSN(strw, 0b10, 0b00); 1451 INSN(strb, 0b00, 0b00); 1452 INSN(strh, 0b01, 0b00); 1453 1454 INSN(ldr, 0b11, 0b01); 1455 INSN(ldrw, 0b10, 0b01); 1456 INSN(ldrb, 0b00, 0b01); 1457 INSN(ldrh, 0b01, 0b01); 1458 1459 INSN(ldrsb, 0b00, 0b10); 1460 INSN(ldrsbw, 0b00, 0b11); 1461 INSN(ldrsh, 0b01, 0b10); 1462 INSN(ldrshw, 0b01, 0b11); 1463 INSN(ldrsw, 0b10, 0b10); 1464 1465 #undef INSN 1466 1467 #define INSN(NAME, size, op) \ 1468 void NAME(const Address &adr, prfop pfop = PLDL1KEEP) { \ 1469 ld_st2((Register)pfop, adr, size, op); \ 1470 } 1471 1472 INSN(prfm, 0b11, 0b10); // FIXME: PRFM should not be used with 1473 // writeback modes, but the assembler 1474 // doesn't enfore that. 1475 1476 #undef INSN 1477 1478 #define INSN(NAME, size, op) \ 1479 void NAME(FloatRegister Rt, const Address &adr) { \ 1480 ld_st2((Register)Rt, adr, size, op, 1); \ 1481 } 1482 1483 INSN(strd, 0b11, 0b00); 1484 INSN(strs, 0b10, 0b00); 1485 INSN(ldrd, 0b11, 0b01); 1486 INSN(ldrs, 0b10, 0b01); 1487 INSN(strq, 0b00, 0b10); 1488 INSN(ldrq, 0x00, 0b11); 1489 1490 #undef INSN 1491 1492 enum shift_kind { LSL, LSR, ASR, ROR }; 1493 1494 void op_shifted_reg(unsigned decode, 1495 enum shift_kind kind, unsigned shift, 1496 unsigned size, unsigned op) { 1497 f(size, 31); 1498 f(op, 30, 29); 1499 f(decode, 28, 24); 1500 f(shift, 15, 10); 1501 f(kind, 23, 22); 1502 } 1503 1504 // Logical (shifted register) 1505 #define INSN(NAME, size, op, N) \ 1506 void NAME(Register Rd, Register Rn, Register Rm, \ 1507 enum shift_kind kind = LSL, unsigned shift = 0) { \ 1508 starti; \ 1509 guarantee(size == 1 || shift < 32, "incorrect shift"); \ 1510 f(N, 21); \ 1511 zrf(Rm, 16), zrf(Rn, 5), zrf(Rd, 0); \ 1512 op_shifted_reg(0b01010, kind, shift, size, op); \ 1513 } 1514 1515 INSN(andr, 1, 0b00, 0); 1516 INSN(orr, 1, 0b01, 0); 1517 INSN(eor, 1, 0b10, 0); 1518 INSN(ands, 1, 0b11, 0); 1519 INSN(andw, 0, 0b00, 0); 1520 INSN(orrw, 0, 0b01, 0); 1521 INSN(eorw, 0, 0b10, 0); 1522 INSN(andsw, 0, 0b11, 0); 1523 1524 #undef INSN 1525 1526 #define INSN(NAME, size, op, N) \ 1527 void NAME(Register Rd, Register Rn, Register Rm, \ 1528 enum shift_kind kind = LSL, unsigned shift = 0) { \ 1529 starti; \ 1530 f(N, 21); \ 1531 zrf(Rm, 16), zrf(Rn, 5), zrf(Rd, 0); \ 1532 op_shifted_reg(0b01010, kind, shift, size, op); \ 1533 } \ 1534 \ 1535 /* These instructions have no immediate form. Provide an overload so \ 1536 that if anyone does try to use an immediate operand -- this has \ 1537 happened! -- we'll get a compile-time error. */ \ 1538 void NAME(Register Rd, Register Rn, unsigned imm, \ 1539 enum shift_kind kind = LSL, unsigned shift = 0) { \ 1540 assert(false, " can't be used with immediate operand"); \ 1541 } 1542 1543 INSN(bic, 1, 0b00, 1); 1544 INSN(orn, 1, 0b01, 1); 1545 INSN(eon, 1, 0b10, 1); 1546 INSN(bics, 1, 0b11, 1); 1547 INSN(bicw, 0, 0b00, 1); 1548 INSN(ornw, 0, 0b01, 1); 1549 INSN(eonw, 0, 0b10, 1); 1550 INSN(bicsw, 0, 0b11, 1); 1551 1552 #undef INSN 1553 1554 // Aliases for short forms of orn 1555 void mvn(Register Rd, Register Rm, 1556 enum shift_kind kind = LSL, unsigned shift = 0) { 1557 orn(Rd, zr, Rm, kind, shift); 1558 } 1559 1560 void mvnw(Register Rd, Register Rm, 1561 enum shift_kind kind = LSL, unsigned shift = 0) { 1562 ornw(Rd, zr, Rm, kind, shift); 1563 } 1564 1565 // Add/subtract (shifted register) 1566 #define INSN(NAME, size, op) \ 1567 void NAME(Register Rd, Register Rn, Register Rm, \ 1568 enum shift_kind kind, unsigned shift = 0) { \ 1569 starti; \ 1570 f(0, 21); \ 1571 assert_cond(kind != ROR); \ 1572 guarantee(size == 1 || shift < 32, "incorrect shift");\ 1573 zrf(Rd, 0), zrf(Rn, 5), zrf(Rm, 16); \ 1574 op_shifted_reg(0b01011, kind, shift, size, op); \ 1575 } 1576 1577 INSN(add, 1, 0b000); 1578 INSN(sub, 1, 0b10); 1579 INSN(addw, 0, 0b000); 1580 INSN(subw, 0, 0b10); 1581 1582 INSN(adds, 1, 0b001); 1583 INSN(subs, 1, 0b11); 1584 INSN(addsw, 0, 0b001); 1585 INSN(subsw, 0, 0b11); 1586 1587 #undef INSN 1588 1589 // Add/subtract (extended register) 1590 #define INSN(NAME, op) \ 1591 void NAME(Register Rd, Register Rn, Register Rm, \ 1592 ext::operation option, int amount = 0) { \ 1593 starti; \ 1594 zrf(Rm, 16), srf(Rn, 5), srf(Rd, 0); \ 1595 add_sub_extended_reg(op, 0b01011, Rd, Rn, Rm, 0b00, option, amount); \ 1596 } 1597 1598 void add_sub_extended_reg(unsigned op, unsigned decode, 1599 Register Rd, Register Rn, Register Rm, 1600 unsigned opt, ext::operation option, unsigned imm) { 1601 guarantee(imm <= 4, "shift amount must be <= 4"); 1602 f(op, 31, 29), f(decode, 28, 24), f(opt, 23, 22), f(1, 21); 1603 f(option, 15, 13), f(imm, 12, 10); 1604 } 1605 1606 INSN(addw, 0b000); 1607 INSN(subw, 0b010); 1608 INSN(add, 0b100); 1609 INSN(sub, 0b110); 1610 1611 #undef INSN 1612 1613 #define INSN(NAME, op) \ 1614 void NAME(Register Rd, Register Rn, Register Rm, \ 1615 ext::operation option, int amount = 0) { \ 1616 starti; \ 1617 zrf(Rm, 16), srf(Rn, 5), zrf(Rd, 0); \ 1618 add_sub_extended_reg(op, 0b01011, Rd, Rn, Rm, 0b00, option, amount); \ 1619 } 1620 1621 INSN(addsw, 0b001); 1622 INSN(subsw, 0b011); 1623 INSN(adds, 0b101); 1624 INSN(subs, 0b111); 1625 1626 #undef INSN 1627 1628 // Aliases for short forms of add and sub 1629 #define INSN(NAME) \ 1630 void NAME(Register Rd, Register Rn, Register Rm) { \ 1631 if (Rd == sp || Rn == sp) \ 1632 NAME(Rd, Rn, Rm, ext::uxtx); \ 1633 else \ 1634 NAME(Rd, Rn, Rm, LSL); \ 1635 } 1636 1637 INSN(addw); 1638 INSN(subw); 1639 INSN(add); 1640 INSN(sub); 1641 1642 INSN(addsw); 1643 INSN(subsw); 1644 INSN(adds); 1645 INSN(subs); 1646 1647 #undef INSN 1648 1649 // Add/subtract (with carry) 1650 void add_sub_carry(unsigned op, Register Rd, Register Rn, Register Rm) { 1651 starti; 1652 f(op, 31, 29); 1653 f(0b11010000, 28, 21); 1654 f(0b000000, 15, 10); 1655 zrf(Rm, 16), zrf(Rn, 5), zrf(Rd, 0); 1656 } 1657 1658 #define INSN(NAME, op) \ 1659 void NAME(Register Rd, Register Rn, Register Rm) { \ 1660 add_sub_carry(op, Rd, Rn, Rm); \ 1661 } 1662 1663 INSN(adcw, 0b000); 1664 INSN(adcsw, 0b001); 1665 INSN(sbcw, 0b010); 1666 INSN(sbcsw, 0b011); 1667 INSN(adc, 0b100); 1668 INSN(adcs, 0b101); 1669 INSN(sbc,0b110); 1670 INSN(sbcs, 0b111); 1671 1672 #undef INSN 1673 1674 // Conditional compare (both kinds) 1675 void conditional_compare(unsigned op, int o1, int o2, int o3, 1676 Register Rn, unsigned imm5, unsigned nzcv, 1677 unsigned cond) { 1678 starti; 1679 f(op, 31, 29); 1680 f(0b11010010, 28, 21); 1681 f(cond, 15, 12); 1682 f(o1, 11); 1683 f(o2, 10); 1684 f(o3, 4); 1685 f(nzcv, 3, 0); 1686 f(imm5, 20, 16), zrf(Rn, 5); 1687 } 1688 1689 #define INSN(NAME, op) \ 1690 void NAME(Register Rn, Register Rm, int imm, Condition cond) { \ 1691 int regNumber = (Rm == zr ? 31 : (uintptr_t)Rm); \ 1692 conditional_compare(op, 0, 0, 0, Rn, regNumber, imm, cond); \ 1693 } \ 1694 \ 1695 void NAME(Register Rn, int imm5, int imm, Condition cond) { \ 1696 conditional_compare(op, 1, 0, 0, Rn, imm5, imm, cond); \ 1697 } 1698 1699 INSN(ccmnw, 0b001); 1700 INSN(ccmpw, 0b011); 1701 INSN(ccmn, 0b101); 1702 INSN(ccmp, 0b111); 1703 1704 #undef INSN 1705 1706 // Conditional select 1707 void conditional_select(unsigned op, unsigned op2, 1708 Register Rd, Register Rn, Register Rm, 1709 unsigned cond) { 1710 starti; 1711 f(op, 31, 29); 1712 f(0b11010100, 28, 21); 1713 f(cond, 15, 12); 1714 f(op2, 11, 10); 1715 zrf(Rm, 16), zrf(Rn, 5), rf(Rd, 0); 1716 } 1717 1718 #define INSN(NAME, op, op2) \ 1719 void NAME(Register Rd, Register Rn, Register Rm, Condition cond) { \ 1720 conditional_select(op, op2, Rd, Rn, Rm, cond); \ 1721 } 1722 1723 INSN(cselw, 0b000, 0b00); 1724 INSN(csincw, 0b000, 0b01); 1725 INSN(csinvw, 0b010, 0b00); 1726 INSN(csnegw, 0b010, 0b01); 1727 INSN(csel, 0b100, 0b00); 1728 INSN(csinc, 0b100, 0b01); 1729 INSN(csinv, 0b110, 0b00); 1730 INSN(csneg, 0b110, 0b01); 1731 1732 #undef INSN 1733 1734 // Data processing 1735 void data_processing(unsigned op29, unsigned opcode, 1736 Register Rd, Register Rn) { 1737 f(op29, 31, 29), f(0b11010110, 28, 21); 1738 f(opcode, 15, 10); 1739 rf(Rn, 5), rf(Rd, 0); 1740 } 1741 1742 // (1 source) 1743 #define INSN(NAME, op29, opcode2, opcode) \ 1744 void NAME(Register Rd, Register Rn) { \ 1745 starti; \ 1746 f(opcode2, 20, 16); \ 1747 data_processing(op29, opcode, Rd, Rn); \ 1748 } 1749 1750 INSN(rbitw, 0b010, 0b00000, 0b00000); 1751 INSN(rev16w, 0b010, 0b00000, 0b00001); 1752 INSN(revw, 0b010, 0b00000, 0b00010); 1753 INSN(clzw, 0b010, 0b00000, 0b00100); 1754 INSN(clsw, 0b010, 0b00000, 0b00101); 1755 1756 INSN(rbit, 0b110, 0b00000, 0b00000); 1757 INSN(rev16, 0b110, 0b00000, 0b00001); 1758 INSN(rev32, 0b110, 0b00000, 0b00010); 1759 INSN(rev, 0b110, 0b00000, 0b00011); 1760 INSN(clz, 0b110, 0b00000, 0b00100); 1761 INSN(cls, 0b110, 0b00000, 0b00101); 1762 1763 #undef INSN 1764 1765 // (2 sources) 1766 #define INSN(NAME, op29, opcode) \ 1767 void NAME(Register Rd, Register Rn, Register Rm) { \ 1768 starti; \ 1769 rf(Rm, 16); \ 1770 data_processing(op29, opcode, Rd, Rn); \ 1771 } 1772 1773 INSN(udivw, 0b000, 0b000010); 1774 INSN(sdivw, 0b000, 0b000011); 1775 INSN(lslvw, 0b000, 0b001000); 1776 INSN(lsrvw, 0b000, 0b001001); 1777 INSN(asrvw, 0b000, 0b001010); 1778 INSN(rorvw, 0b000, 0b001011); 1779 1780 INSN(udiv, 0b100, 0b000010); 1781 INSN(sdiv, 0b100, 0b000011); 1782 INSN(lslv, 0b100, 0b001000); 1783 INSN(lsrv, 0b100, 0b001001); 1784 INSN(asrv, 0b100, 0b001010); 1785 INSN(rorv, 0b100, 0b001011); 1786 1787 #undef INSN 1788 1789 // (3 sources) 1790 void data_processing(unsigned op54, unsigned op31, unsigned o0, 1791 Register Rd, Register Rn, Register Rm, 1792 Register Ra) { 1793 starti; 1794 f(op54, 31, 29), f(0b11011, 28, 24); 1795 f(op31, 23, 21), f(o0, 15); 1796 zrf(Rm, 16), zrf(Ra, 10), zrf(Rn, 5), zrf(Rd, 0); 1797 } 1798 1799 #define INSN(NAME, op54, op31, o0) \ 1800 void NAME(Register Rd, Register Rn, Register Rm, Register Ra) { \ 1801 data_processing(op54, op31, o0, Rd, Rn, Rm, Ra); \ 1802 } 1803 1804 INSN(maddw, 0b000, 0b000, 0); 1805 INSN(msubw, 0b000, 0b000, 1); 1806 INSN(madd, 0b100, 0b000, 0); 1807 INSN(msub, 0b100, 0b000, 1); 1808 INSN(smaddl, 0b100, 0b001, 0); 1809 INSN(smsubl, 0b100, 0b001, 1); 1810 INSN(umaddl, 0b100, 0b101, 0); 1811 INSN(umsubl, 0b100, 0b101, 1); 1812 1813 #undef INSN 1814 1815 #define INSN(NAME, op54, op31, o0) \ 1816 void NAME(Register Rd, Register Rn, Register Rm) { \ 1817 data_processing(op54, op31, o0, Rd, Rn, Rm, (Register)31); \ 1818 } 1819 1820 INSN(smulh, 0b100, 0b010, 0); 1821 INSN(umulh, 0b100, 0b110, 0); 1822 1823 #undef INSN 1824 1825 // Floating-point data-processing (1 source) 1826 void data_processing(unsigned op31, unsigned type, unsigned opcode, 1827 FloatRegister Vd, FloatRegister Vn) { 1828 starti; 1829 f(op31, 31, 29); 1830 f(0b11110, 28, 24); 1831 f(type, 23, 22), f(1, 21), f(opcode, 20, 15), f(0b10000, 14, 10); 1832 rf(Vn, 5), rf(Vd, 0); 1833 } 1834 1835 #define INSN(NAME, op31, type, opcode) \ 1836 void NAME(FloatRegister Vd, FloatRegister Vn) { \ 1837 data_processing(op31, type, opcode, Vd, Vn); \ 1838 } 1839 1840 private: 1841 INSN(i_fmovs, 0b000, 0b00, 0b000000); 1842 public: 1843 INSN(fabss, 0b000, 0b00, 0b000001); 1844 INSN(fnegs, 0b000, 0b00, 0b000010); 1845 INSN(fsqrts, 0b000, 0b00, 0b000011); 1846 INSN(fcvts, 0b000, 0b00, 0b000101); // Single-precision to double-precision 1847 1848 private: 1849 INSN(i_fmovd, 0b000, 0b01, 0b000000); 1850 public: 1851 INSN(fabsd, 0b000, 0b01, 0b000001); 1852 INSN(fnegd, 0b000, 0b01, 0b000010); 1853 INSN(fsqrtd, 0b000, 0b01, 0b000011); 1854 INSN(fcvtd, 0b000, 0b01, 0b000100); // Double-precision to single-precision 1855 1856 void fmovd(FloatRegister Vd, FloatRegister Vn) { 1857 assert(Vd != Vn, "should be"); 1858 i_fmovd(Vd, Vn); 1859 } 1860 1861 void fmovs(FloatRegister Vd, FloatRegister Vn) { 1862 assert(Vd != Vn, "should be"); 1863 i_fmovs(Vd, Vn); 1864 } 1865 1866 #undef INSN 1867 1868 // Floating-point data-processing (2 source) 1869 void data_processing(unsigned op31, unsigned type, unsigned opcode, 1870 FloatRegister Vd, FloatRegister Vn, FloatRegister Vm) { 1871 starti; 1872 f(op31, 31, 29); 1873 f(0b11110, 28, 24); 1874 f(type, 23, 22), f(1, 21), f(opcode, 15, 12), f(0b10, 11, 10); 1875 rf(Vm, 16), rf(Vn, 5), rf(Vd, 0); 1876 } 1877 1878 #define INSN(NAME, op31, type, opcode) \ 1879 void NAME(FloatRegister Vd, FloatRegister Vn, FloatRegister Vm) { \ 1880 data_processing(op31, type, opcode, Vd, Vn, Vm); \ 1881 } 1882 1883 INSN(fmuls, 0b000, 0b00, 0b0000); 1884 INSN(fdivs, 0b000, 0b00, 0b0001); 1885 INSN(fadds, 0b000, 0b00, 0b0010); 1886 INSN(fsubs, 0b000, 0b00, 0b0011); 1887 INSN(fmaxs, 0b000, 0b00, 0b0100); 1888 INSN(fmins, 0b000, 0b00, 0b0101); 1889 INSN(fnmuls, 0b000, 0b00, 0b1000); 1890 1891 INSN(fmuld, 0b000, 0b01, 0b0000); 1892 INSN(fdivd, 0b000, 0b01, 0b0001); 1893 INSN(faddd, 0b000, 0b01, 0b0010); 1894 INSN(fsubd, 0b000, 0b01, 0b0011); 1895 INSN(fmaxd, 0b000, 0b01, 0b0100); 1896 INSN(fmind, 0b000, 0b01, 0b0101); 1897 INSN(fnmuld, 0b000, 0b01, 0b1000); 1898 1899 #undef INSN 1900 1901 // Floating-point data-processing (3 source) 1902 void data_processing(unsigned op31, unsigned type, unsigned o1, unsigned o0, 1903 FloatRegister Vd, FloatRegister Vn, FloatRegister Vm, 1904 FloatRegister Va) { 1905 starti; 1906 f(op31, 31, 29); 1907 f(0b11111, 28, 24); 1908 f(type, 23, 22), f(o1, 21), f(o0, 15); 1909 rf(Vm, 16), rf(Va, 10), rf(Vn, 5), rf(Vd, 0); 1910 } 1911 1912 #define INSN(NAME, op31, type, o1, o0) \ 1913 void NAME(FloatRegister Vd, FloatRegister Vn, FloatRegister Vm, \ 1914 FloatRegister Va) { \ 1915 data_processing(op31, type, o1, o0, Vd, Vn, Vm, Va); \ 1916 } 1917 1918 INSN(fmadds, 0b000, 0b00, 0, 0); 1919 INSN(fmsubs, 0b000, 0b00, 0, 1); 1920 INSN(fnmadds, 0b000, 0b00, 1, 0); 1921 INSN(fnmsubs, 0b000, 0b00, 1, 1); 1922 1923 INSN(fmaddd, 0b000, 0b01, 0, 0); 1924 INSN(fmsubd, 0b000, 0b01, 0, 1); 1925 INSN(fnmaddd, 0b000, 0b01, 1, 0); 1926 INSN(fnmsub, 0b000, 0b01, 1, 1); 1927 1928 #undef INSN 1929 1930 // Floating-point conditional select 1931 void fp_conditional_select(unsigned op31, unsigned type, 1932 unsigned op1, unsigned op2, 1933 Condition cond, FloatRegister Vd, 1934 FloatRegister Vn, FloatRegister Vm) { 1935 starti; 1936 f(op31, 31, 29); 1937 f(0b11110, 28, 24); 1938 f(type, 23, 22); 1939 f(op1, 21, 21); 1940 f(op2, 11, 10); 1941 f(cond, 15, 12); 1942 rf(Vm, 16), rf(Vn, 5), rf(Vd, 0); 1943 } 1944 1945 #define INSN(NAME, op31, type, op1, op2) \ 1946 void NAME(FloatRegister Vd, FloatRegister Vn, \ 1947 FloatRegister Vm, Condition cond) { \ 1948 fp_conditional_select(op31, type, op1, op2, cond, Vd, Vn, Vm); \ 1949 } 1950 1951 INSN(fcsels, 0b000, 0b00, 0b1, 0b11); 1952 INSN(fcseld, 0b000, 0b01, 0b1, 0b11); 1953 1954 #undef INSN 1955 1956 // Floating-point<->integer conversions 1957 void float_int_convert(unsigned op31, unsigned type, 1958 unsigned rmode, unsigned opcode, 1959 Register Rd, Register Rn) { 1960 starti; 1961 f(op31, 31, 29); 1962 f(0b11110, 28, 24); 1963 f(type, 23, 22), f(1, 21), f(rmode, 20, 19); 1964 f(opcode, 18, 16), f(0b000000, 15, 10); 1965 zrf(Rn, 5), zrf(Rd, 0); 1966 } 1967 1968 #define INSN(NAME, op31, type, rmode, opcode) \ 1969 void NAME(Register Rd, FloatRegister Vn) { \ 1970 float_int_convert(op31, type, rmode, opcode, Rd, (Register)Vn); \ 1971 } 1972 1973 INSN(fcvtzsw, 0b000, 0b00, 0b11, 0b000); 1974 INSN(fcvtzs, 0b100, 0b00, 0b11, 0b000); 1975 INSN(fcvtzdw, 0b000, 0b01, 0b11, 0b000); 1976 INSN(fcvtzd, 0b100, 0b01, 0b11, 0b000); 1977 1978 INSN(fmovs, 0b000, 0b00, 0b00, 0b110); 1979 INSN(fmovd, 0b100, 0b01, 0b00, 0b110); 1980 1981 // INSN(fmovhid, 0b100, 0b10, 0b01, 0b110); 1982 1983 #undef INSN 1984 1985 #define INSN(NAME, op31, type, rmode, opcode) \ 1986 void NAME(FloatRegister Vd, Register Rn) { \ 1987 float_int_convert(op31, type, rmode, opcode, (Register)Vd, Rn); \ 1988 } 1989 1990 INSN(fmovs, 0b000, 0b00, 0b00, 0b111); 1991 INSN(fmovd, 0b100, 0b01, 0b00, 0b111); 1992 1993 INSN(scvtfws, 0b000, 0b00, 0b00, 0b010); 1994 INSN(scvtfs, 0b100, 0b00, 0b00, 0b010); 1995 INSN(scvtfwd, 0b000, 0b01, 0b00, 0b010); 1996 INSN(scvtfd, 0b100, 0b01, 0b00, 0b010); 1997 1998 // INSN(fmovhid, 0b100, 0b10, 0b01, 0b111); 1999 2000 #undef INSN 2001 2002 // Floating-point compare 2003 void float_compare(unsigned op31, unsigned type, 2004 unsigned op, unsigned op2, 2005 FloatRegister Vn, FloatRegister Vm = (FloatRegister)0) { 2006 starti; 2007 f(op31, 31, 29); 2008 f(0b11110, 28, 24); 2009 f(type, 23, 22), f(1, 21); 2010 f(op, 15, 14), f(0b1000, 13, 10), f(op2, 4, 0); 2011 rf(Vn, 5), rf(Vm, 16); 2012 } 2013 2014 2015 #define INSN(NAME, op31, type, op, op2) \ 2016 void NAME(FloatRegister Vn, FloatRegister Vm) { \ 2017 float_compare(op31, type, op, op2, Vn, Vm); \ 2018 } 2019 2020 #define INSN1(NAME, op31, type, op, op2) \ 2021 void NAME(FloatRegister Vn, double d) { \ 2022 assert_cond(d == 0.0); \ 2023 float_compare(op31, type, op, op2, Vn); \ 2024 } 2025 2026 INSN(fcmps, 0b000, 0b00, 0b00, 0b00000); 2027 INSN1(fcmps, 0b000, 0b00, 0b00, 0b01000); 2028 // INSN(fcmpes, 0b000, 0b00, 0b00, 0b10000); 2029 // INSN1(fcmpes, 0b000, 0b00, 0b00, 0b11000); 2030 2031 INSN(fcmpd, 0b000, 0b01, 0b00, 0b00000); 2032 INSN1(fcmpd, 0b000, 0b01, 0b00, 0b01000); 2033 // INSN(fcmped, 0b000, 0b01, 0b00, 0b10000); 2034 // INSN1(fcmped, 0b000, 0b01, 0b00, 0b11000); 2035 2036 #undef INSN 2037 #undef INSN1 2038 2039 // Floating-point Move (immediate) 2040 private: 2041 unsigned pack(double value); 2042 2043 void fmov_imm(FloatRegister Vn, double value, unsigned size) { 2044 starti; 2045 f(0b00011110, 31, 24), f(size, 23, 22), f(1, 21); 2046 f(pack(value), 20, 13), f(0b10000000, 12, 5); 2047 rf(Vn, 0); 2048 } 2049 2050 public: 2051 2052 void fmovs(FloatRegister Vn, double value) { 2053 if (value) 2054 fmov_imm(Vn, value, 0b00); 2055 else 2056 fmovs(Vn, zr); 2057 } 2058 void fmovd(FloatRegister Vn, double value) { 2059 if (value) 2060 fmov_imm(Vn, value, 0b01); 2061 else 2062 fmovd(Vn, zr); 2063 } 2064 2065 // Floating-point rounding 2066 // type: half-precision = 11 2067 // single = 00 2068 // double = 01 2069 // rmode: A = Away = 100 2070 // I = current = 111 2071 // M = MinusInf = 010 2072 // N = eveN = 000 2073 // P = PlusInf = 001 2074 // X = eXact = 110 2075 // Z = Zero = 011 2076 void float_round(unsigned type, unsigned rmode, FloatRegister Rd, FloatRegister Rn) { 2077 starti; 2078 f(0b00011110, 31, 24); 2079 f(type, 23, 22); 2080 f(0b1001, 21, 18); 2081 f(rmode, 17, 15); 2082 f(0b10000, 14, 10); 2083 rf(Rn, 5), rf(Rd, 0); 2084 } 2085 #define INSN(NAME, type, rmode) \ 2086 void NAME(FloatRegister Vd, FloatRegister Vn) { \ 2087 float_round(type, rmode, Vd, Vn); \ 2088 } 2089 2090 public: 2091 INSN(frintah, 0b11, 0b100); 2092 INSN(frintih, 0b11, 0b111); 2093 INSN(frintmh, 0b11, 0b010); 2094 INSN(frintnh, 0b11, 0b000); 2095 INSN(frintph, 0b11, 0b001); 2096 INSN(frintxh, 0b11, 0b110); 2097 INSN(frintzh, 0b11, 0b011); 2098 2099 INSN(frintas, 0b00, 0b100); 2100 INSN(frintis, 0b00, 0b111); 2101 INSN(frintms, 0b00, 0b010); 2102 INSN(frintns, 0b00, 0b000); 2103 INSN(frintps, 0b00, 0b001); 2104 INSN(frintxs, 0b00, 0b110); 2105 INSN(frintzs, 0b00, 0b011); 2106 2107 INSN(frintad, 0b01, 0b100); 2108 INSN(frintid, 0b01, 0b111); 2109 INSN(frintmd, 0b01, 0b010); 2110 INSN(frintnd, 0b01, 0b000); 2111 INSN(frintpd, 0b01, 0b001); 2112 INSN(frintxd, 0b01, 0b110); 2113 INSN(frintzd, 0b01, 0b011); 2114 #undef INSN 2115 2116 /* SIMD extensions 2117 * 2118 * We just use FloatRegister in the following. They are exactly the same 2119 * as SIMD registers. 2120 */ 2121 public: 2122 2123 enum SIMD_Arrangement { 2124 T8B, T16B, T4H, T8H, T2S, T4S, T1D, T2D, T1Q 2125 }; 2126 2127 enum SIMD_RegVariant { 2128 B, H, S, D, Q 2129 }; 2130 2131 private: 2132 static short SIMD_Size_in_bytes[]; 2133 2134 public: 2135 #define INSN(NAME, op) \ 2136 void NAME(FloatRegister Rt, SIMD_RegVariant T, const Address &adr) { \ 2137 ld_st2((Register)Rt, adr, (int)T & 3, op + ((T==Q) ? 0b10:0b00), 1); \ 2138 } \ 2139 2140 INSN(ldr, 1); 2141 INSN(str, 0); 2142 2143 #undef INSN 2144 2145 private: 2146 2147 void ld_st(FloatRegister Vt, SIMD_Arrangement T, Register Xn, int op1, int op2) { 2148 starti; 2149 f(0,31), f((int)T & 1, 30); 2150 f(op1, 29, 21), f(0, 20, 16), f(op2, 15, 12); 2151 f((int)T >> 1, 11, 10), srf(Xn, 5), rf(Vt, 0); 2152 } 2153 void ld_st(FloatRegister Vt, SIMD_Arrangement T, Register Xn, 2154 int imm, int op1, int op2, int regs) { 2155 2156 bool replicate = op2 >> 2 == 3; 2157 // post-index value (imm) is formed differently for replicate/non-replicate ld* instructions 2158 int expectedImmediate = replicate ? regs * (1 << (T >> 1)) : SIMD_Size_in_bytes[T] * regs; 2159 guarantee(T < T1Q , "incorrect arrangement"); 2160 guarantee(imm == expectedImmediate, "bad offset"); 2161 starti; 2162 f(0,31), f((int)T & 1, 30); 2163 f(op1 | 0b100, 29, 21), f(0b11111, 20, 16), f(op2, 15, 12); 2164 f((int)T >> 1, 11, 10), srf(Xn, 5), rf(Vt, 0); 2165 } 2166 void ld_st(FloatRegister Vt, SIMD_Arrangement T, Register Xn, 2167 Register Xm, int op1, int op2) { 2168 starti; 2169 f(0,31), f((int)T & 1, 30); 2170 f(op1 | 0b100, 29, 21), rf(Xm, 16), f(op2, 15, 12); 2171 f((int)T >> 1, 11, 10), srf(Xn, 5), rf(Vt, 0); 2172 } 2173 2174 void ld_st(FloatRegister Vt, SIMD_Arrangement T, Address a, int op1, int op2, int regs) { 2175 switch (a.getMode()) { 2176 case Address::base_plus_offset: 2177 guarantee(a.offset() == 0, "no offset allowed here"); 2178 ld_st(Vt, T, a.base(), op1, op2); 2179 break; 2180 case Address::post: 2181 ld_st(Vt, T, a.base(), a.offset(), op1, op2, regs); 2182 break; 2183 case Address::post_reg: 2184 ld_st(Vt, T, a.base(), a.index(), op1, op2); 2185 break; 2186 default: 2187 ShouldNotReachHere(); 2188 } 2189 } 2190 2191 public: 2192 2193 #define INSN1(NAME, op1, op2) \ 2194 void NAME(FloatRegister Vt, SIMD_Arrangement T, const Address &a) { \ 2195 ld_st(Vt, T, a, op1, op2, 1); \ 2196 } 2197 2198 #define INSN2(NAME, op1, op2) \ 2199 void NAME(FloatRegister Vt, FloatRegister Vt2, SIMD_Arrangement T, const Address &a) { \ 2200 assert(Vt->successor() == Vt2, "Registers must be ordered"); \ 2201 ld_st(Vt, T, a, op1, op2, 2); \ 2202 } 2203 2204 #define INSN3(NAME, op1, op2) \ 2205 void NAME(FloatRegister Vt, FloatRegister Vt2, FloatRegister Vt3, \ 2206 SIMD_Arrangement T, const Address &a) { \ 2207 assert(Vt->successor() == Vt2 && Vt2->successor() == Vt3, \ 2208 "Registers must be ordered"); \ 2209 ld_st(Vt, T, a, op1, op2, 3); \ 2210 } 2211 2212 #define INSN4(NAME, op1, op2) \ 2213 void NAME(FloatRegister Vt, FloatRegister Vt2, FloatRegister Vt3, \ 2214 FloatRegister Vt4, SIMD_Arrangement T, const Address &a) { \ 2215 assert(Vt->successor() == Vt2 && Vt2->successor() == Vt3 && \ 2216 Vt3->successor() == Vt4, "Registers must be ordered"); \ 2217 ld_st(Vt, T, a, op1, op2, 4); \ 2218 } 2219 2220 INSN1(ld1, 0b001100010, 0b0111); 2221 INSN2(ld1, 0b001100010, 0b1010); 2222 INSN3(ld1, 0b001100010, 0b0110); 2223 INSN4(ld1, 0b001100010, 0b0010); 2224 2225 INSN2(ld2, 0b001100010, 0b1000); 2226 INSN3(ld3, 0b001100010, 0b0100); 2227 INSN4(ld4, 0b001100010, 0b0000); 2228 2229 INSN1(st1, 0b001100000, 0b0111); 2230 INSN2(st1, 0b001100000, 0b1010); 2231 INSN3(st1, 0b001100000, 0b0110); 2232 INSN4(st1, 0b001100000, 0b0010); 2233 2234 INSN2(st2, 0b001100000, 0b1000); 2235 INSN3(st3, 0b001100000, 0b0100); 2236 INSN4(st4, 0b001100000, 0b0000); 2237 2238 INSN1(ld1r, 0b001101010, 0b1100); 2239 INSN2(ld2r, 0b001101011, 0b1100); 2240 INSN3(ld3r, 0b001101010, 0b1110); 2241 INSN4(ld4r, 0b001101011, 0b1110); 2242 2243 #undef INSN1 2244 #undef INSN2 2245 #undef INSN3 2246 #undef INSN4 2247 2248 #define INSN(NAME, opc) \ 2249 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, FloatRegister Vm) { \ 2250 starti; \ 2251 assert(T == T8B || T == T16B, "must be T8B or T16B"); \ 2252 f(0, 31), f((int)T & 1, 30), f(opc, 29, 21); \ 2253 rf(Vm, 16), f(0b000111, 15, 10), rf(Vn, 5), rf(Vd, 0); \ 2254 } 2255 2256 INSN(eor, 0b101110001); 2257 INSN(orr, 0b001110101); 2258 INSN(andr, 0b001110001); 2259 INSN(bic, 0b001110011); 2260 INSN(bif, 0b101110111); 2261 INSN(bit, 0b101110101); 2262 INSN(bsl, 0b101110011); 2263 INSN(orn, 0b001110111); 2264 2265 #undef INSN 2266 2267 #define INSN(NAME, opc, opc2, acceptT2D) \ 2268 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, FloatRegister Vm) { \ 2269 guarantee(T != T1Q && T != T1D, "incorrect arrangement"); \ 2270 if (!acceptT2D) guarantee(T != T2D, "incorrect arrangement"); \ 2271 starti; \ 2272 f(0, 31), f((int)T & 1, 30), f(opc, 29), f(0b01110, 28, 24); \ 2273 f((int)T >> 1, 23, 22), f(1, 21), rf(Vm, 16), f(opc2, 15, 10); \ 2274 rf(Vn, 5), rf(Vd, 0); \ 2275 } 2276 2277 INSN(addv, 0, 0b100001, true); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S, T2D 2278 INSN(subv, 1, 0b100001, true); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S, T2D 2279 INSN(mulv, 0, 0b100111, false); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S 2280 INSN(mlav, 0, 0b100101, false); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S 2281 INSN(mlsv, 1, 0b100101, false); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S 2282 INSN(sshl, 0, 0b010001, true); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S, T2D 2283 INSN(ushl, 1, 0b010001, true); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S, T2D 2284 INSN(addpv, 0, 0b101111, true); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S, T2D 2285 INSN(smullv, 0, 0b110000, false); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S 2286 INSN(umullv, 1, 0b110000, false); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S 2287 INSN(umlalv, 1, 0b100000, false); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S 2288 2289 #undef INSN 2290 2291 #define INSN(NAME, opc, opc2, accepted) \ 2292 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn) { \ 2293 guarantee(T != T1Q && T != T1D, "incorrect arrangement"); \ 2294 if (accepted < 3) guarantee(T != T2D, "incorrect arrangement"); \ 2295 if (accepted < 2) guarantee(T != T2S, "incorrect arrangement"); \ 2296 if (accepted < 1) guarantee(T == T8B || T == T16B, "incorrect arrangement"); \ 2297 starti; \ 2298 f(0, 31), f((int)T & 1, 30), f(opc, 29), f(0b01110, 28, 24); \ 2299 f((int)T >> 1, 23, 22), f(opc2, 21, 10); \ 2300 rf(Vn, 5), rf(Vd, 0); \ 2301 } 2302 2303 INSN(absr, 0, 0b100000101110, 3); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S, T2D 2304 INSN(negr, 1, 0b100000101110, 3); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S, T2D 2305 INSN(notr, 1, 0b100000010110, 0); // accepted arrangements: T8B, T16B 2306 INSN(addv, 0, 0b110001101110, 1); // accepted arrangements: T8B, T16B, T4H, T8H, T4S 2307 INSN(cls, 0, 0b100000010010, 2); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S 2308 INSN(clz, 1, 0b100000010010, 2); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S 2309 INSN(cnt, 0, 0b100000010110, 0); // accepted arrangements: T8B, T16B 2310 INSN(uaddlp, 1, 0b100000001010, 2); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S 2311 INSN(uaddlv, 1, 0b110000001110, 1); // accepted arrangements: T8B, T16B, T4H, T8H, T4S 2312 2313 #undef INSN 2314 2315 #define INSN(NAME, opc) \ 2316 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn) { \ 2317 starti; \ 2318 assert(T == T4S, "arrangement must be T4S"); \ 2319 f(0, 31), f((int)T & 1, 30), f(0b101110, 29, 24), f(opc, 23), \ 2320 f(T == T4S ? 0 : 1, 22), f(0b110000111110, 21, 10); rf(Vn, 5), rf(Vd, 0); \ 2321 } 2322 2323 INSN(fmaxv, 0); 2324 INSN(fminv, 1); 2325 2326 #undef INSN 2327 2328 #define INSN(NAME, op0, cmode0) \ 2329 void NAME(FloatRegister Vd, SIMD_Arrangement T, unsigned imm8, unsigned lsl = 0) { \ 2330 unsigned cmode = cmode0; \ 2331 unsigned op = op0; \ 2332 starti; \ 2333 assert(lsl == 0 || \ 2334 ((T == T4H || T == T8H) && lsl == 8) || \ 2335 ((T == T2S || T == T4S) && ((lsl >> 3) < 4) && ((lsl & 7) == 0)), "invalid shift");\ 2336 cmode |= lsl >> 2; \ 2337 if (T == T4H || T == T8H) cmode |= 0b1000; \ 2338 if (!(T == T4H || T == T8H || T == T2S || T == T4S)) { \ 2339 assert(op == 0 && cmode0 == 0, "must be MOVI"); \ 2340 cmode = 0b1110; \ 2341 if (T == T1D || T == T2D) op = 1; \ 2342 } \ 2343 f(0, 31), f((int)T & 1, 30), f(op, 29), f(0b0111100000, 28, 19); \ 2344 f(imm8 >> 5, 18, 16), f(cmode, 15, 12), f(0x01, 11, 10), f(imm8 & 0b11111, 9, 5); \ 2345 rf(Vd, 0); \ 2346 } 2347 2348 INSN(movi, 0, 0); 2349 INSN(orri, 0, 1); 2350 INSN(mvni, 1, 0); 2351 INSN(bici, 1, 1); 2352 2353 #undef INSN 2354 2355 #define INSN(NAME, op1, op2, op3) \ 2356 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, FloatRegister Vm) { \ 2357 starti; \ 2358 assert(T == T2S || T == T4S || T == T2D, "invalid arrangement"); \ 2359 f(0, 31), f((int)T & 1, 30), f(op1, 29), f(0b01110, 28, 24), f(op2, 23); \ 2360 f(T==T2D ? 1:0, 22); f(1, 21), rf(Vm, 16), f(op3, 15, 10), rf(Vn, 5), rf(Vd, 0); \ 2361 } 2362 2363 INSN(fadd, 0, 0, 0b110101); 2364 INSN(fdiv, 1, 0, 0b111111); 2365 INSN(fmul, 1, 0, 0b110111); 2366 INSN(fsub, 0, 1, 0b110101); 2367 INSN(fmla, 0, 0, 0b110011); 2368 INSN(fmls, 0, 1, 0b110011); 2369 INSN(fmax, 0, 0, 0b111101); 2370 INSN(fmin, 0, 1, 0b111101); 2371 2372 #undef INSN 2373 2374 #define INSN(NAME, opc) \ 2375 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, FloatRegister Vm) { \ 2376 starti; \ 2377 assert(T == T4S, "arrangement must be T4S"); \ 2378 f(0b01011110000, 31, 21), rf(Vm, 16), f(opc, 15, 10), rf(Vn, 5), rf(Vd, 0); \ 2379 } 2380 2381 INSN(sha1c, 0b000000); 2382 INSN(sha1m, 0b001000); 2383 INSN(sha1p, 0b000100); 2384 INSN(sha1su0, 0b001100); 2385 INSN(sha256h2, 0b010100); 2386 INSN(sha256h, 0b010000); 2387 INSN(sha256su1, 0b011000); 2388 2389 #undef INSN 2390 2391 #define INSN(NAME, opc) \ 2392 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn) { \ 2393 starti; \ 2394 assert(T == T4S, "arrangement must be T4S"); \ 2395 f(0b0101111000101000, 31, 16), f(opc, 15, 10), rf(Vn, 5), rf(Vd, 0); \ 2396 } 2397 2398 INSN(sha1h, 0b000010); 2399 INSN(sha1su1, 0b000110); 2400 INSN(sha256su0, 0b001010); 2401 2402 #undef INSN 2403 2404 #define INSN(NAME, opc) \ 2405 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, FloatRegister Vm) { \ 2406 starti; \ 2407 assert(T == T2D, "arrangement must be T2D"); \ 2408 f(0b11001110011, 31, 21), rf(Vm, 16), f(opc, 15, 10), rf(Vn, 5), rf(Vd, 0); \ 2409 } 2410 2411 INSN(sha512h, 0b100000); 2412 INSN(sha512h2, 0b100001); 2413 INSN(sha512su1, 0b100010); 2414 2415 #undef INSN 2416 2417 #define INSN(NAME, opc) \ 2418 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn) { \ 2419 starti; \ 2420 assert(T == T2D, "arrangement must be T2D"); \ 2421 f(opc, 31, 10), rf(Vn, 5), rf(Vd, 0); \ 2422 } 2423 2424 INSN(sha512su0, 0b1100111011000000100000); 2425 2426 #undef INSN 2427 2428 #define INSN(NAME, opc) \ 2429 void NAME(FloatRegister Vd, FloatRegister Vn) { \ 2430 starti; \ 2431 f(opc, 31, 10), rf(Vn, 5), rf(Vd, 0); \ 2432 } 2433 2434 INSN(aese, 0b0100111000101000010010); 2435 INSN(aesd, 0b0100111000101000010110); 2436 INSN(aesmc, 0b0100111000101000011010); 2437 INSN(aesimc, 0b0100111000101000011110); 2438 2439 #undef INSN 2440 2441 #define INSN(NAME, op1, op2) \ 2442 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, FloatRegister Vm, int index = 0) { \ 2443 starti; \ 2444 assert(T == T2S || T == T4S || T == T2D, "invalid arrangement"); \ 2445 assert(index >= 0 && ((T == T2D && index <= 1) || (T != T2D && index <= 3)), "invalid index"); \ 2446 f(0, 31), f((int)T & 1, 30), f(op1, 29); f(0b011111, 28, 23); \ 2447 f(T == T2D ? 1 : 0, 22), f(T == T2D ? 0 : index & 1, 21), rf(Vm, 16); \ 2448 f(op2, 15, 12), f(T == T2D ? index : (index >> 1), 11), f(0, 10); \ 2449 rf(Vn, 5), rf(Vd, 0); \ 2450 } 2451 2452 // FMLA/FMLS - Vector - Scalar 2453 INSN(fmlavs, 0, 0b0001); 2454 INSN(fmlsvs, 0, 0b0101); 2455 // FMULX - Vector - Scalar 2456 INSN(fmulxvs, 1, 0b1001); 2457 2458 #undef INSN 2459 2460 // Floating-point Reciprocal Estimate 2461 void frecpe(FloatRegister Vd, FloatRegister Vn, SIMD_RegVariant type) { 2462 assert(type == D || type == S, "Wrong type for frecpe"); 2463 starti; 2464 f(0b010111101, 31, 23); 2465 f(type == D ? 1 : 0, 22); 2466 f(0b100001110110, 21, 10); 2467 rf(Vn, 5), rf(Vd, 0); 2468 } 2469 2470 // (double) {a, b} -> (a + b) 2471 void faddpd(FloatRegister Vd, FloatRegister Vn) { 2472 starti; 2473 f(0b0111111001110000110110, 31, 10); 2474 rf(Vn, 5), rf(Vd, 0); 2475 } 2476 2477 void ins(FloatRegister Vd, SIMD_RegVariant T, FloatRegister Vn, int didx, int sidx) { 2478 starti; 2479 assert(T != Q, "invalid register variant"); 2480 f(0b01101110000, 31, 21), f(((didx<<1)|1)<<(int)T, 20, 16), f(0, 15); 2481 f(sidx<<(int)T, 14, 11), f(1, 10), rf(Vn, 5), rf(Vd, 0); 2482 } 2483 2484 #define INSN(NAME, op) \ 2485 void NAME(Register Rd, FloatRegister Vn, SIMD_RegVariant T, int idx) { \ 2486 starti; \ 2487 f(0, 31), f(T==D ? 1:0, 30), f(0b001110000, 29, 21); \ 2488 f(((idx<<1)|1)<<(int)T, 20, 16), f(op, 15, 10); \ 2489 rf(Vn, 5), rf(Rd, 0); \ 2490 } 2491 2492 INSN(umov, 0b001111); 2493 INSN(smov, 0b001011); 2494 #undef INSN 2495 2496 #define INSN(NAME, opc, opc2, isSHR) \ 2497 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, int shift){ \ 2498 starti; \ 2499 /* The encodings for the immh:immb fields (bits 22:16) in *SHR are \ 2500 * 0001 xxx 8B/16B, shift = 16 - UInt(immh:immb) \ 2501 * 001x xxx 4H/8H, shift = 32 - UInt(immh:immb) \ 2502 * 01xx xxx 2S/4S, shift = 64 - UInt(immh:immb) \ 2503 * 1xxx xxx 1D/2D, shift = 128 - UInt(immh:immb) \ 2504 * (1D is RESERVED) \ 2505 * for SHL shift is calculated as: \ 2506 * 0001 xxx 8B/16B, shift = UInt(immh:immb) - 8 \ 2507 * 001x xxx 4H/8H, shift = UInt(immh:immb) - 16 \ 2508 * 01xx xxx 2S/4S, shift = UInt(immh:immb) - 32 \ 2509 * 1xxx xxx 1D/2D, shift = UInt(immh:immb) - 64 \ 2510 * (1D is RESERVED) \ 2511 */ \ 2512 assert((1 << ((T>>1)+3)) > shift, "Invalid Shift value"); \ 2513 int cVal = (1 << (((T >> 1) + 3) + (isSHR ? 1 : 0))); \ 2514 int encodedShift = isSHR ? cVal - shift : cVal + shift; \ 2515 f(0, 31), f(T & 1, 30), f(opc, 29), f(0b011110, 28, 23), \ 2516 f(encodedShift, 22, 16); f(opc2, 15, 10), rf(Vn, 5), rf(Vd, 0); \ 2517 } 2518 2519 INSN(shl, 0, 0b010101, /* isSHR = */ false); 2520 INSN(sshr, 0, 0b000001, /* isSHR = */ true); 2521 INSN(ushr, 1, 0b000001, /* isSHR = */ true); 2522 2523 #undef INSN 2524 2525 private: 2526 void _ushll(FloatRegister Vd, SIMD_Arrangement Ta, FloatRegister Vn, SIMD_Arrangement Tb, int shift) { 2527 starti; 2528 /* The encodings for the immh:immb fields (bits 22:16) are 2529 * 0001 xxx 8H, 8B/16b shift = xxx 2530 * 001x xxx 4S, 4H/8H shift = xxxx 2531 * 01xx xxx 2D, 2S/4S shift = xxxxx 2532 * 1xxx xxx RESERVED 2533 */ 2534 assert((Tb >> 1) + 1 == (Ta >> 1), "Incompatible arrangement"); 2535 assert((1 << ((Tb>>1)+3)) > shift, "Invalid shift value"); 2536 f(0, 31), f(Tb & 1, 30), f(0b1011110, 29, 23), f((1 << ((Tb>>1)+3))|shift, 22, 16); 2537 f(0b101001, 15, 10), rf(Vn, 5), rf(Vd, 0); 2538 } 2539 2540 public: 2541 void ushll(FloatRegister Vd, SIMD_Arrangement Ta, FloatRegister Vn, SIMD_Arrangement Tb, int shift) { 2542 assert(Tb == T8B || Tb == T4H || Tb == T2S, "invalid arrangement"); 2543 _ushll(Vd, Ta, Vn, Tb, shift); 2544 } 2545 2546 void ushll2(FloatRegister Vd, SIMD_Arrangement Ta, FloatRegister Vn, SIMD_Arrangement Tb, int shift) { 2547 assert(Tb == T16B || Tb == T8H || Tb == T4S, "invalid arrangement"); 2548 _ushll(Vd, Ta, Vn, Tb, shift); 2549 } 2550 2551 // Move from general purpose register 2552 // mov Vd.T[index], Rn 2553 void mov(FloatRegister Vd, SIMD_Arrangement T, int index, Register Xn) { 2554 starti; 2555 f(0b01001110000, 31, 21), f(((1 << (T >> 1)) | (index << ((T >> 1) + 1))), 20, 16); 2556 f(0b000111, 15, 10), zrf(Xn, 5), rf(Vd, 0); 2557 } 2558 2559 // Move to general purpose register 2560 // mov Rd, Vn.T[index] 2561 void mov(Register Xd, FloatRegister Vn, SIMD_Arrangement T, int index) { 2562 guarantee(T >= T2S && T < T1Q, "only D and S arrangements are supported"); 2563 starti; 2564 f(0, 31), f((T >= T1D) ? 1:0, 30), f(0b001110000, 29, 21); 2565 f(((1 << (T >> 1)) | (index << ((T >> 1) + 1))), 20, 16); 2566 f(0b001111, 15, 10), rf(Vn, 5), rf(Xd, 0); 2567 } 2568 2569 private: 2570 void _pmull(FloatRegister Vd, SIMD_Arrangement Ta, FloatRegister Vn, FloatRegister Vm, SIMD_Arrangement Tb) { 2571 starti; 2572 assert((Ta == T1Q && (Tb == T1D || Tb == T2D)) || 2573 (Ta == T8H && (Tb == T8B || Tb == T16B)), "Invalid Size specifier"); 2574 int size = (Ta == T1Q) ? 0b11 : 0b00; 2575 f(0, 31), f(Tb & 1, 30), f(0b001110, 29, 24), f(size, 23, 22); 2576 f(1, 21), rf(Vm, 16), f(0b111000, 15, 10), rf(Vn, 5), rf(Vd, 0); 2577 } 2578 2579 public: 2580 void pmull(FloatRegister Vd, SIMD_Arrangement Ta, FloatRegister Vn, FloatRegister Vm, SIMD_Arrangement Tb) { 2581 assert(Tb == T1D || Tb == T8B, "pmull assumes T1D or T8B as the second size specifier"); 2582 _pmull(Vd, Ta, Vn, Vm, Tb); 2583 } 2584 2585 void pmull2(FloatRegister Vd, SIMD_Arrangement Ta, FloatRegister Vn, FloatRegister Vm, SIMD_Arrangement Tb) { 2586 assert(Tb == T2D || Tb == T16B, "pmull2 assumes T2D or T16B as the second size specifier"); 2587 _pmull(Vd, Ta, Vn, Vm, Tb); 2588 } 2589 2590 void uqxtn(FloatRegister Vd, SIMD_Arrangement Tb, FloatRegister Vn, SIMD_Arrangement Ta) { 2591 starti; 2592 int size_b = (int)Tb >> 1; 2593 int size_a = (int)Ta >> 1; 2594 assert(size_b < 3 && size_b == size_a - 1, "Invalid size specifier"); 2595 f(0, 31), f(Tb & 1, 30), f(0b101110, 29, 24), f(size_b, 23, 22); 2596 f(0b100001010010, 21, 10), rf(Vn, 5), rf(Vd, 0); 2597 } 2598 2599 void dup(FloatRegister Vd, SIMD_Arrangement T, Register Xs) 2600 { 2601 starti; 2602 assert(T != T1D, "reserved encoding"); 2603 f(0,31), f((int)T & 1, 30), f(0b001110000, 29, 21); 2604 f((1 << (T >> 1)), 20, 16), f(0b000011, 15, 10), zrf(Xs, 5), rf(Vd, 0); 2605 } 2606 2607 void dup(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, int index = 0) 2608 { 2609 starti; 2610 assert(T != T1D, "reserved encoding"); 2611 f(0, 31), f((int)T & 1, 30), f(0b001110000, 29, 21); 2612 f(((1 << (T >> 1)) | (index << ((T >> 1) + 1))), 20, 16); 2613 f(0b000001, 15, 10), rf(Vn, 5), rf(Vd, 0); 2614 } 2615 2616 // AdvSIMD ZIP/UZP/TRN 2617 #define INSN(NAME, opcode) \ 2618 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, FloatRegister Vm) { \ 2619 guarantee(T != T1D && T != T1Q, "invalid arrangement"); \ 2620 starti; \ 2621 f(0, 31), f(0b001110, 29, 24), f(0, 21), f(0, 15); \ 2622 f(opcode, 14, 12), f(0b10, 11, 10); \ 2623 rf(Vm, 16), rf(Vn, 5), rf(Vd, 0); \ 2624 f(T & 1, 30), f(T >> 1, 23, 22); \ 2625 } 2626 2627 INSN(uzp1, 0b001); 2628 INSN(trn1, 0b010); 2629 INSN(zip1, 0b011); 2630 INSN(uzp2, 0b101); 2631 INSN(trn2, 0b110); 2632 INSN(zip2, 0b111); 2633 2634 #undef INSN 2635 2636 // CRC32 instructions 2637 #define INSN(NAME, c, sf, sz) \ 2638 void NAME(Register Rd, Register Rn, Register Rm) { \ 2639 starti; \ 2640 f(sf, 31), f(0b0011010110, 30, 21), f(0b010, 15, 13), f(c, 12); \ 2641 f(sz, 11, 10), rf(Rm, 16), rf(Rn, 5), rf(Rd, 0); \ 2642 } 2643 2644 INSN(crc32b, 0, 0, 0b00); 2645 INSN(crc32h, 0, 0, 0b01); 2646 INSN(crc32w, 0, 0, 0b10); 2647 INSN(crc32x, 0, 1, 0b11); 2648 INSN(crc32cb, 1, 0, 0b00); 2649 INSN(crc32ch, 1, 0, 0b01); 2650 INSN(crc32cw, 1, 0, 0b10); 2651 INSN(crc32cx, 1, 1, 0b11); 2652 2653 #undef INSN 2654 2655 // Table vector lookup 2656 #define INSN(NAME, op) \ 2657 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, unsigned registers, FloatRegister Vm) { \ 2658 starti; \ 2659 assert(T == T8B || T == T16B, "invalid arrangement"); \ 2660 assert(0 < registers && registers <= 4, "invalid number of registers"); \ 2661 f(0, 31), f((int)T & 1, 30), f(0b001110000, 29, 21), rf(Vm, 16), f(0, 15); \ 2662 f(registers - 1, 14, 13), f(op, 12),f(0b00, 11, 10), rf(Vn, 5), rf(Vd, 0); \ 2663 } 2664 2665 INSN(tbl, 0); 2666 INSN(tbx, 1); 2667 2668 #undef INSN 2669 2670 // AdvSIMD two-reg misc 2671 // In this instruction group, the 2 bits in the size field ([23:22]) may be 2672 // fixed or determined by the "SIMD_Arrangement T", or both. The additional 2673 // parameter "tmask" is a 2-bit mask used to indicate which bits in the size 2674 // field are determined by the SIMD_Arrangement. The bit of "tmask" should be 2675 // set to 1 if corresponding bit marked as "x" in the ArmARM. 2676 #define INSN(NAME, U, size, tmask, opcode) \ 2677 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn) { \ 2678 starti; \ 2679 assert((ASSERTION), MSG); \ 2680 f(0, 31), f((int)T & 1, 30), f(U, 29), f(0b01110, 28, 24); \ 2681 f(size | ((int)(T >> 1) & tmask), 23, 22), f(0b10000, 21, 17); \ 2682 f(opcode, 16, 12), f(0b10, 11, 10), rf(Vn, 5), rf(Vd, 0); \ 2683 } 2684 2685 #define MSG "invalid arrangement" 2686 2687 #define ASSERTION (T == T2S || T == T4S || T == T2D) 2688 INSN(fsqrt, 1, 0b10, 0b01, 0b11111); 2689 INSN(fabs, 0, 0b10, 0b01, 0b01111); 2690 INSN(fneg, 1, 0b10, 0b01, 0b01111); 2691 INSN(frintn, 0, 0b00, 0b01, 0b11000); 2692 INSN(frintm, 0, 0b00, 0b01, 0b11001); 2693 INSN(frintp, 0, 0b10, 0b01, 0b11000); 2694 #undef ASSERTION 2695 2696 #define ASSERTION (T == T8B || T == T16B || T == T4H || T == T8H || T == T2S || T == T4S) 2697 INSN(rev64, 0, 0b00, 0b11, 0b00000); 2698 #undef ASSERTION 2699 2700 #define ASSERTION (T == T8B || T == T16B || T == T4H || T == T8H) 2701 INSN(rev32, 1, 0b00, 0b11, 0b00000); 2702 #undef ASSERTION 2703 2704 #define ASSERTION (T == T8B || T == T16B) 2705 INSN(rev16, 0, 0b00, 0b11, 0b00001); 2706 INSN(rbit, 1, 0b01, 0b00, 0b00101); 2707 #undef ASSERTION 2708 2709 #undef MSG 2710 2711 #undef INSN 2712 2713 void ext(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, FloatRegister Vm, int index) 2714 { 2715 starti; 2716 assert(T == T8B || T == T16B, "invalid arrangement"); 2717 assert((T == T8B && index <= 0b0111) || (T == T16B && index <= 0b1111), "Invalid index value"); 2718 f(0, 31), f((int)T & 1, 30), f(0b101110000, 29, 21); 2719 rf(Vm, 16), f(0, 15), f(index, 14, 11); 2720 f(0, 10), rf(Vn, 5), rf(Vd, 0); 2721 } 2722 2723 // SVE arithmetics - unpredicated 2724 #define INSN(NAME, opcode) \ 2725 void NAME(FloatRegister Zd, SIMD_RegVariant T, FloatRegister Zn, FloatRegister Zm) { \ 2726 starti; \ 2727 assert(T != Q, "invalid register variant"); \ 2728 f(0b00000100, 31, 24), f(T, 23, 22), f(1, 21), \ 2729 rf(Zm, 16), f(0, 15, 13), f(opcode, 12, 10), rf(Zn, 5), rf(Zd, 0); \ 2730 } 2731 INSN(sve_add, 0b000); 2732 INSN(sve_sub, 0b001); 2733 #undef INSN 2734 2735 // SVE floating-point arithmetic - unpredicated 2736 #define INSN(NAME, opcode) \ 2737 void NAME(FloatRegister Zd, SIMD_RegVariant T, FloatRegister Zn, FloatRegister Zm) { \ 2738 starti; \ 2739 assert(T == S || T == D, "invalid register variant"); \ 2740 f(0b01100101, 31, 24), f(T, 23, 22), f(0, 21), \ 2741 rf(Zm, 16), f(0, 15, 13), f(opcode, 12, 10), rf(Zn, 5), rf(Zd, 0); \ 2742 } 2743 2744 INSN(sve_fadd, 0b000); 2745 INSN(sve_fmul, 0b010); 2746 INSN(sve_fsub, 0b001); 2747 #undef INSN 2748 2749 private: 2750 void sve_predicate_reg_insn(unsigned op24, unsigned op13, 2751 FloatRegister Zd_or_Vd, SIMD_RegVariant T, 2752 PRegister Pg, FloatRegister Zn_or_Vn) { 2753 starti; 2754 f(op24, 31, 24), f(T, 23, 22), f(op13, 21, 13); 2755 pgrf(Pg, 10), rf(Zn_or_Vn, 5), rf(Zd_or_Vd, 0); 2756 } 2757 2758 public: 2759 2760 // SVE integer arithmetics - predicate 2761 #define INSN(NAME, op1, op2) \ 2762 void NAME(FloatRegister Zdn_or_Zd_or_Vd, SIMD_RegVariant T, PRegister Pg, FloatRegister Znm_or_Vn) { \ 2763 assert(T != Q, "invalid register variant"); \ 2764 sve_predicate_reg_insn(op1, op2, Zdn_or_Zd_or_Vd, T, Pg, Znm_or_Vn); \ 2765 } 2766 2767 INSN(sve_abs, 0b00000100, 0b010110101); // vector abs, unary 2768 INSN(sve_add, 0b00000100, 0b000000000); // vector add 2769 INSN(sve_andv, 0b00000100, 0b011010001); // bitwise and reduction to scalar 2770 INSN(sve_asr, 0b00000100, 0b010000100); // vector arithmetic shift right 2771 INSN(sve_cnt, 0b00000100, 0b011010101) // count non-zero bits 2772 INSN(sve_cpy, 0b00000101, 0b100000100); // copy scalar to each active vector element 2773 INSN(sve_eorv, 0b00000100, 0b011001001); // bitwise xor reduction to scalar 2774 INSN(sve_lsl, 0b00000100, 0b010011100); // vector logical shift left 2775 INSN(sve_lsr, 0b00000100, 0b010001100); // vector logical shift right 2776 INSN(sve_mul, 0b00000100, 0b010000000); // vector mul 2777 INSN(sve_neg, 0b00000100, 0b010111101); // vector neg, unary 2778 INSN(sve_not, 0b00000100, 0b011110101); // bitwise invert vector, unary 2779 INSN(sve_orv, 0b00000100, 0b011000001); // bitwise or reduction to scalar 2780 INSN(sve_smax, 0b00000100, 0b001000000); // signed maximum vectors 2781 INSN(sve_smaxv, 0b00000100, 0b001000001); // signed maximum reduction to scalar 2782 INSN(sve_smin, 0b00000100, 0b001010000); // signed minimum vectors 2783 INSN(sve_sminv, 0b00000100, 0b001010001); // signed minimum reduction to scalar 2784 INSN(sve_sub, 0b00000100, 0b000001000); // vector sub 2785 INSN(sve_uaddv, 0b00000100, 0b000001001); // unsigned add reduction to scalar 2786 #undef INSN 2787 2788 // SVE floating-point arithmetics - predicate 2789 #define INSN(NAME, op1, op2) \ 2790 void NAME(FloatRegister Zd_or_Zdn_or_Vd, SIMD_RegVariant T, PRegister Pg, FloatRegister Zn_or_Zm) { \ 2791 assert(T == S || T == D, "invalid register variant"); \ 2792 sve_predicate_reg_insn(op1, op2, Zd_or_Zdn_or_Vd, T, Pg, Zn_or_Zm); \ 2793 } 2794 2795 INSN(sve_fabs, 0b00000100, 0b011100101); 2796 INSN(sve_fadd, 0b01100101, 0b000000100); 2797 INSN(sve_fadda, 0b01100101, 0b011000001); // add strictly-ordered reduction to scalar Vd 2798 INSN(sve_fdiv, 0b01100101, 0b001101100); 2799 INSN(sve_fmax, 0b01100101, 0b000110100); // floating-point maximum 2800 INSN(sve_fmaxv, 0b01100101, 0b000110001); // floating-point maximum recursive reduction to scalar 2801 INSN(sve_fmin, 0b01100101, 0b000111100); // floating-point minimum 2802 INSN(sve_fminv, 0b01100101, 0b000111001); // floating-point minimum recursive reduction to scalar 2803 INSN(sve_fmul, 0b01100101, 0b000010100); 2804 INSN(sve_fneg, 0b00000100, 0b011101101); 2805 INSN(sve_frintm, 0b01100101, 0b000010101); // floating-point round to integral value, toward minus infinity 2806 INSN(sve_frintn, 0b01100101, 0b000000101); // floating-point round to integral value, nearest with ties to even 2807 INSN(sve_frintp, 0b01100101, 0b000001101); // floating-point round to integral value, toward plus infinity 2808 INSN(sve_fsqrt, 0b01100101, 0b001101101); 2809 INSN(sve_fsub, 0b01100101, 0b000001100); 2810 #undef INSN 2811 2812 // SVE multiple-add/sub - predicated 2813 #define INSN(NAME, op0, op1, op2) \ 2814 void NAME(FloatRegister Zda, SIMD_RegVariant T, PRegister Pg, FloatRegister Zn, FloatRegister Zm) { \ 2815 starti; \ 2816 assert(T != Q, "invalid size"); \ 2817 f(op0, 31, 24), f(T, 23, 22), f(op1, 21), rf(Zm, 16); \ 2818 f(op2, 15, 13), pgrf(Pg, 10), rf(Zn, 5), rf(Zda, 0); \ 2819 } 2820 2821 INSN(sve_fmla, 0b01100101, 1, 0b000); // floating-point fused multiply-add: Zda = Zda + Zn * Zm 2822 INSN(sve_fmls, 0b01100101, 1, 0b001); // floating-point fused multiply-subtract: Zda = Zda + -Zn * Zm 2823 INSN(sve_fnmla, 0b01100101, 1, 0b010); // floating-point negated fused multiply-add: Zda = -Zda + -Zn * Zm 2824 INSN(sve_fnmls, 0b01100101, 1, 0b011); // floating-point negated fused multiply-subtract: Zda = -Zda + Zn * Zm 2825 INSN(sve_mla, 0b00000100, 0, 0b010); // multiply-add: Zda = Zda + Zn*Zm 2826 INSN(sve_mls, 0b00000100, 0, 0b011); // multiply-subtract: Zda = Zda + -Zn*Zm 2827 #undef INSN 2828 2829 // SVE bitwise logical - unpredicated 2830 #define INSN(NAME, opc) \ 2831 void NAME(FloatRegister Zd, FloatRegister Zn, FloatRegister Zm) { \ 2832 starti; \ 2833 f(0b00000100, 31, 24), f(opc, 23, 22), f(1, 21), \ 2834 rf(Zm, 16), f(0b001100, 15, 10), rf(Zn, 5), rf(Zd, 0); \ 2835 } 2836 INSN(sve_and, 0b00); 2837 INSN(sve_eor, 0b10); 2838 INSN(sve_orr, 0b01); 2839 #undef INSN 2840 2841 // SVE shift immediate - unpredicated 2842 #define INSN(NAME, opc, isSHR) \ 2843 void NAME(FloatRegister Zd, SIMD_RegVariant T, FloatRegister Zn, int shift) { \ 2844 starti; \ 2845 /* The encodings for the tszh:tszl:imm3 fields (bits 23:22 20:19 18:16) \ 2846 * for shift right is calculated as: \ 2847 * 0001 xxx B, shift = 16 - UInt(tszh:tszl:imm3) \ 2848 * 001x xxx H, shift = 32 - UInt(tszh:tszl:imm3) \ 2849 * 01xx xxx S, shift = 64 - UInt(tszh:tszl:imm3) \ 2850 * 1xxx xxx D, shift = 128 - UInt(tszh:tszl:imm3) \ 2851 * for shift left is calculated as: \ 2852 * 0001 xxx B, shift = UInt(tszh:tszl:imm3) - 8 \ 2853 * 001x xxx H, shift = UInt(tszh:tszl:imm3) - 16 \ 2854 * 01xx xxx S, shift = UInt(tszh:tszl:imm3) - 32 \ 2855 * 1xxx xxx D, shift = UInt(tszh:tszl:imm3) - 64 \ 2856 */ \ 2857 assert(T != Q, "Invalid register variant"); \ 2858 if (isSHR) { \ 2859 assert(((1 << (T + 3)) >= shift) && (shift > 0) , "Invalid shift value"); \ 2860 } else { \ 2861 assert(((1 << (T + 3)) > shift) && (shift >= 0) , "Invalid shift value"); \ 2862 } \ 2863 int cVal = (1 << ((T + 3) + (isSHR ? 1 : 0))); \ 2864 int encodedShift = isSHR ? cVal - shift : cVal + shift; \ 2865 int tszh = encodedShift >> 5; \ 2866 int tszl_imm = encodedShift & 0x1f; \ 2867 f(0b00000100, 31, 24); \ 2868 f(tszh, 23, 22), f(1,21), f(tszl_imm, 20, 16); \ 2869 f(0b100, 15, 13), f(opc, 12, 10), rf(Zn, 5), rf(Zd, 0); \ 2870 } 2871 2872 INSN(sve_asr, 0b100, /* isSHR = */ true); 2873 INSN(sve_lsl, 0b111, /* isSHR = */ false); 2874 INSN(sve_lsr, 0b101, /* isSHR = */ true); 2875 #undef INSN 2876 2877 private: 2878 2879 // Scalar base + immediate index 2880 void sve_ld_st1(FloatRegister Zt, Register Xn, int imm, PRegister Pg, 2881 SIMD_RegVariant T, int op1, int type, int op2) { 2882 starti; 2883 assert_cond(T >= type); 2884 f(op1, 31, 25), f(type, 24, 23), f(T, 22, 21); 2885 f(0, 20), sf(imm, 19, 16), f(op2, 15, 13); 2886 pgrf(Pg, 10), srf(Xn, 5), rf(Zt, 0); 2887 } 2888 2889 // Scalar base + scalar index 2890 void sve_ld_st1(FloatRegister Zt, Register Xn, Register Xm, PRegister Pg, 2891 SIMD_RegVariant T, int op1, int type, int op2) { 2892 starti; 2893 assert_cond(T >= type); 2894 f(op1, 31, 25), f(type, 24, 23), f(T, 22, 21); 2895 rf(Xm, 16), f(op2, 15, 13); 2896 pgrf(Pg, 10), srf(Xn, 5), rf(Zt, 0); 2897 } 2898 2899 void sve_ld_st1(FloatRegister Zt, PRegister Pg, 2900 SIMD_RegVariant T, const Address &a, 2901 int op1, int type, int imm_op2, int scalar_op2) { 2902 switch (a.getMode()) { 2903 case Address::base_plus_offset: 2904 sve_ld_st1(Zt, a.base(), a.offset(), Pg, T, op1, type, imm_op2); 2905 break; 2906 case Address::base_plus_offset_reg: 2907 sve_ld_st1(Zt, a.base(), a.index(), Pg, T, op1, type, scalar_op2); 2908 break; 2909 default: 2910 ShouldNotReachHere(); 2911 } 2912 } 2913 2914 public: 2915 2916 // SVE load/store - predicated 2917 #define INSN(NAME, op1, type, imm_op2, scalar_op2) \ 2918 void NAME(FloatRegister Zt, SIMD_RegVariant T, PRegister Pg, const Address &a) { \ 2919 assert(T != Q, "invalid register variant"); \ 2920 sve_ld_st1(Zt, Pg, T, a, op1, type, imm_op2, scalar_op2); \ 2921 } 2922 2923 INSN(sve_ld1b, 0b1010010, 0b00, 0b101, 0b010); 2924 INSN(sve_st1b, 0b1110010, 0b00, 0b111, 0b010); 2925 INSN(sve_ld1h, 0b1010010, 0b01, 0b101, 0b010); 2926 INSN(sve_st1h, 0b1110010, 0b01, 0b111, 0b010); 2927 INSN(sve_ld1w, 0b1010010, 0b10, 0b101, 0b010); 2928 INSN(sve_st1w, 0b1110010, 0b10, 0b111, 0b010); 2929 INSN(sve_ld1d, 0b1010010, 0b11, 0b101, 0b010); 2930 INSN(sve_st1d, 0b1110010, 0b11, 0b111, 0b010); 2931 #undef INSN 2932 2933 // SVE load/store - unpredicated 2934 #define INSN(NAME, op1) \ 2935 void NAME(FloatRegister Zt, const Address &a) { \ 2936 starti; \ 2937 assert(a.index() == noreg, "invalid address variant"); \ 2938 f(op1, 31, 29), f(0b0010110, 28, 22), sf(a.offset() >> 3, 21, 16), \ 2939 f(0b010, 15, 13), f(a.offset() & 0x7, 12, 10), srf(a.base(), 5), rf(Zt, 0); \ 2940 } 2941 2942 INSN(sve_ldr, 0b100); // LDR (vector) 2943 INSN(sve_str, 0b111); // STR (vector) 2944 #undef INSN 2945 2946 #define INSN(NAME, op) \ 2947 void NAME(Register Xd, Register Xn, int imm6) { \ 2948 starti; \ 2949 f(0b000001000, 31, 23), f(op, 22, 21); \ 2950 srf(Xn, 16), f(0b01010, 15, 11), sf(imm6, 10, 5), srf(Xd, 0); \ 2951 } 2952 2953 INSN(sve_addvl, 0b01); 2954 INSN(sve_addpl, 0b11); 2955 #undef INSN 2956 2957 // SVE inc/dec register by element count 2958 #define INSN(NAME, op) \ 2959 void NAME(Register Xdn, SIMD_RegVariant T, unsigned imm4 = 1, int pattern = 0b11111) { \ 2960 starti; \ 2961 assert(T != Q, "invalid size"); \ 2962 f(0b00000100,31, 24), f(T, 23, 22), f(0b11, 21, 20); \ 2963 f(imm4 - 1, 19, 16), f(0b11100, 15, 11), f(op, 10), f(pattern, 9, 5), rf(Xdn, 0); \ 2964 } 2965 2966 INSN(sve_inc, 0); 2967 INSN(sve_dec, 1); 2968 #undef INSN 2969 2970 // SVE predicate count 2971 void sve_cntp(Register Xd, SIMD_RegVariant T, PRegister Pg, PRegister Pn) { 2972 starti; 2973 assert(T != Q, "invalid size"); 2974 f(0b00100101, 31, 24), f(T, 23, 22), f(0b10000010, 21, 14); 2975 prf(Pg, 10), f(0, 9), prf(Pn, 5), rf(Xd, 0); 2976 } 2977 2978 // SVE dup scalar 2979 void sve_dup(FloatRegister Zd, SIMD_RegVariant T, Register Rn) { 2980 starti; 2981 assert(T != Q, "invalid size"); 2982 f(0b00000101, 31, 24), f(T, 23, 22), f(0b100000001110, 21, 10); 2983 srf(Rn, 5), rf(Zd, 0); 2984 } 2985 2986 // SVE dup imm 2987 void sve_dup(FloatRegister Zd, SIMD_RegVariant T, int imm8) { 2988 starti; 2989 assert(T != Q, "invalid size"); 2990 int sh = 0; 2991 if (imm8 <= 127 && imm8 >= -128) { 2992 sh = 0; 2993 } else if (T != B && imm8 <= 32512 && imm8 >= -32768 && (imm8 & 0xff) == 0) { 2994 sh = 1; 2995 imm8 = (imm8 >> 8); 2996 } else { 2997 guarantee(false, "invalid immediate"); 2998 } 2999 f(0b00100101, 31, 24), f(T, 23, 22), f(0b11100011, 21, 14); 3000 f(sh, 13), sf(imm8, 12, 5), rf(Zd, 0); 3001 } 3002 3003 void sve_ptrue(PRegister pd, SIMD_RegVariant esize, int pattern = 0b11111) { 3004 starti; 3005 f(0b00100101, 31, 24), f(esize, 23, 22), f(0b011000111000, 21, 10); 3006 f(pattern, 9, 5), f(0b0, 4), prf(pd, 0); 3007 } 3008 3009 Assembler(CodeBuffer* code) : AbstractAssembler(code) { 3010 } 3011 3012 virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr, 3013 Register tmp, 3014 int offset) { 3015 ShouldNotCallThis(); 3016 return RegisterOrConstant(); 3017 } 3018 3019 // Stack overflow checking 3020 virtual void bang_stack_with_offset(int offset); 3021 3022 static bool operand_valid_for_logical_immediate(bool is32, uint64_t imm); 3023 static bool operand_valid_for_add_sub_immediate(int64_t imm); 3024 static bool operand_valid_for_float_immediate(double imm); 3025 3026 void emit_data64(jlong data, relocInfo::relocType rtype, int format = 0); 3027 void emit_data64(jlong data, RelocationHolder const& rspec, int format = 0); 3028 }; 3029 3030 inline Assembler::Membar_mask_bits operator|(Assembler::Membar_mask_bits a, 3031 Assembler::Membar_mask_bits b) { 3032 return Assembler::Membar_mask_bits(unsigned(a)|unsigned(b)); 3033 } 3034 3035 Instruction_aarch64::~Instruction_aarch64() { 3036 assem->emit(); 3037 } 3038 3039 #undef starti 3040 3041 // Invert a condition 3042 inline const Assembler::Condition operator~(const Assembler::Condition cond) { 3043 return Assembler::Condition(int(cond) ^ 1); 3044 } 3045 3046 class BiasedLockingCounters; 3047 3048 extern "C" void das(uint64_t start, int len); 3049 3050 #endif // CPU_AARCH64_ASSEMBLER_AARCH64_HPP