1 /* 2 * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #ifndef CPU_AARCH64_ASSEMBLER_AARCH64_HPP 27 #define CPU_AARCH64_ASSEMBLER_AARCH64_HPP 28 29 #include "asm/register.hpp" 30 31 // definitions of various symbolic names for machine registers 32 33 // First intercalls between C and Java which use 8 general registers 34 // and 8 floating registers 35 36 // we also have to copy between x86 and ARM registers but that's a 37 // secondary complication -- not all code employing C call convention 38 // executes as x86 code though -- we generate some of it 39 40 class Argument { 41 public: 42 enum { 43 n_int_register_parameters_c = 8, // r0, r1, ... r7 (c_rarg0, c_rarg1, ...) 44 n_float_register_parameters_c = 8, // v0, v1, ... v7 (c_farg0, c_farg1, ... ) 45 46 n_int_register_parameters_j = 8, // r1, ... r7, r0 (rj_rarg0, j_rarg1, ... 47 n_float_register_parameters_j = 8 // v0, v1, ... v7 (j_farg0, j_farg1, ... 48 }; 49 }; 50 51 REGISTER_DECLARATION(Register, c_rarg0, r0); 52 REGISTER_DECLARATION(Register, c_rarg1, r1); 53 REGISTER_DECLARATION(Register, c_rarg2, r2); 54 REGISTER_DECLARATION(Register, c_rarg3, r3); 55 REGISTER_DECLARATION(Register, c_rarg4, r4); 56 REGISTER_DECLARATION(Register, c_rarg5, r5); 57 REGISTER_DECLARATION(Register, c_rarg6, r6); 58 REGISTER_DECLARATION(Register, c_rarg7, r7); 59 60 REGISTER_DECLARATION(FloatRegister, c_farg0, v0); 61 REGISTER_DECLARATION(FloatRegister, c_farg1, v1); 62 REGISTER_DECLARATION(FloatRegister, c_farg2, v2); 63 REGISTER_DECLARATION(FloatRegister, c_farg3, v3); 64 REGISTER_DECLARATION(FloatRegister, c_farg4, v4); 65 REGISTER_DECLARATION(FloatRegister, c_farg5, v5); 66 REGISTER_DECLARATION(FloatRegister, c_farg6, v6); 67 REGISTER_DECLARATION(FloatRegister, c_farg7, v7); 68 69 // Symbolically name the register arguments used by the Java calling convention. 70 // We have control over the convention for java so we can do what we please. 71 // What pleases us is to offset the java calling convention so that when 72 // we call a suitable jni method the arguments are lined up and we don't 73 // have to do much shuffling. A suitable jni method is non-static and a 74 // small number of arguments 75 // 76 // |--------------------------------------------------------------------| 77 // | c_rarg0 c_rarg1 c_rarg2 c_rarg3 c_rarg4 c_rarg5 c_rarg6 c_rarg7 | 78 // |--------------------------------------------------------------------| 79 // | r0 r1 r2 r3 r4 r5 r6 r7 | 80 // |--------------------------------------------------------------------| 81 // | j_rarg7 j_rarg0 j_rarg1 j_rarg2 j_rarg3 j_rarg4 j_rarg5 j_rarg6 | 82 // |--------------------------------------------------------------------| 83 84 85 REGISTER_DECLARATION(Register, j_rarg0, c_rarg1); 86 REGISTER_DECLARATION(Register, j_rarg1, c_rarg2); 87 REGISTER_DECLARATION(Register, j_rarg2, c_rarg3); 88 REGISTER_DECLARATION(Register, j_rarg3, c_rarg4); 89 REGISTER_DECLARATION(Register, j_rarg4, c_rarg5); 90 REGISTER_DECLARATION(Register, j_rarg5, c_rarg6); 91 REGISTER_DECLARATION(Register, j_rarg6, c_rarg7); 92 REGISTER_DECLARATION(Register, j_rarg7, c_rarg0); 93 94 // Java floating args are passed as per C 95 96 REGISTER_DECLARATION(FloatRegister, j_farg0, v0); 97 REGISTER_DECLARATION(FloatRegister, j_farg1, v1); 98 REGISTER_DECLARATION(FloatRegister, j_farg2, v2); 99 REGISTER_DECLARATION(FloatRegister, j_farg3, v3); 100 REGISTER_DECLARATION(FloatRegister, j_farg4, v4); 101 REGISTER_DECLARATION(FloatRegister, j_farg5, v5); 102 REGISTER_DECLARATION(FloatRegister, j_farg6, v6); 103 REGISTER_DECLARATION(FloatRegister, j_farg7, v7); 104 105 // registers used to hold VM data either temporarily within a method 106 // or across method calls 107 108 // volatile (caller-save) registers 109 110 // r8 is used for indirect result location return 111 // we use it and r9 as scratch registers 112 REGISTER_DECLARATION(Register, rscratch1, r8); 113 REGISTER_DECLARATION(Register, rscratch2, r9); 114 115 // current method -- must be in a call-clobbered register 116 REGISTER_DECLARATION(Register, rmethod, r12); 117 118 // non-volatile (callee-save) registers are r16-29 119 // of which the following are dedicated global state 120 121 // link register 122 REGISTER_DECLARATION(Register, lr, r30); 123 // frame pointer 124 REGISTER_DECLARATION(Register, rfp, r29); 125 // current thread 126 REGISTER_DECLARATION(Register, rthread, r28); 127 // base of heap 128 REGISTER_DECLARATION(Register, rheapbase, r27); 129 // constant pool cache 130 REGISTER_DECLARATION(Register, rcpool, r26); 131 // monitors allocated on stack 132 REGISTER_DECLARATION(Register, rmonitors, r25); 133 // locals on stack 134 REGISTER_DECLARATION(Register, rlocals, r24); 135 // bytecode pointer 136 REGISTER_DECLARATION(Register, rbcp, r22); 137 // Dispatch table base 138 REGISTER_DECLARATION(Register, rdispatch, r21); 139 // Java stack pointer 140 REGISTER_DECLARATION(Register, esp, r20); 141 142 #define assert_cond(ARG1) assert(ARG1, #ARG1) 143 144 namespace asm_util { 145 uint32_t encode_logical_immediate(bool is32, uint64_t imm); 146 }; 147 148 using namespace asm_util; 149 150 151 class Assembler; 152 153 class Instruction_aarch64 { 154 unsigned insn; 155 #ifdef ASSERT 156 unsigned bits; 157 #endif 158 Assembler *assem; 159 160 public: 161 162 Instruction_aarch64(class Assembler *as) { 163 #ifdef ASSERT 164 bits = 0; 165 #endif 166 insn = 0; 167 assem = as; 168 } 169 170 inline ~Instruction_aarch64(); 171 172 unsigned &get_insn() { return insn; } 173 #ifdef ASSERT 174 unsigned &get_bits() { return bits; } 175 #endif 176 177 static inline int32_t extend(unsigned val, int hi = 31, int lo = 0) { 178 union { 179 unsigned u; 180 int n; 181 }; 182 183 u = val << (31 - hi); 184 n = n >> (31 - hi + lo); 185 return n; 186 } 187 188 static inline uint32_t extract(uint32_t val, int msb, int lsb) { 189 int nbits = msb - lsb + 1; 190 assert_cond(msb >= lsb); 191 uint32_t mask = (1U << nbits) - 1; 192 uint32_t result = val >> lsb; 193 result &= mask; 194 return result; 195 } 196 197 static inline int32_t sextract(uint32_t val, int msb, int lsb) { 198 uint32_t uval = extract(val, msb, lsb); 199 return extend(uval, msb - lsb); 200 } 201 202 static void patch(address a, int msb, int lsb, uint64_t val) { 203 int nbits = msb - lsb + 1; 204 guarantee(val < (1U << nbits), "Field too big for insn"); 205 assert_cond(msb >= lsb); 206 unsigned mask = (1U << nbits) - 1; 207 val <<= lsb; 208 mask <<= lsb; 209 unsigned target = *(unsigned *)a; 210 target &= ~mask; 211 target |= val; 212 *(unsigned *)a = target; 213 } 214 215 static void spatch(address a, int msb, int lsb, int64_t val) { 216 int nbits = msb - lsb + 1; 217 int64_t chk = val >> (nbits - 1); 218 guarantee (chk == -1 || chk == 0, "Field too big for insn"); 219 unsigned uval = val; 220 unsigned mask = (1U << nbits) - 1; 221 uval &= mask; 222 uval <<= lsb; 223 mask <<= lsb; 224 unsigned target = *(unsigned *)a; 225 target &= ~mask; 226 target |= uval; 227 *(unsigned *)a = target; 228 } 229 230 void f(unsigned val, int msb, int lsb) { 231 int nbits = msb - lsb + 1; 232 guarantee(val < (1U << nbits), "Field too big for insn"); 233 assert_cond(msb >= lsb); 234 unsigned mask = (1U << nbits) - 1; 235 val <<= lsb; 236 mask <<= lsb; 237 insn |= val; 238 assert_cond((bits & mask) == 0); 239 #ifdef ASSERT 240 bits |= mask; 241 #endif 242 } 243 244 void f(unsigned val, int bit) { 245 f(val, bit, bit); 246 } 247 248 void sf(int64_t val, int msb, int lsb) { 249 int nbits = msb - lsb + 1; 250 int64_t chk = val >> (nbits - 1); 251 guarantee (chk == -1 || chk == 0, "Field too big for insn"); 252 unsigned uval = val; 253 unsigned mask = (1U << nbits) - 1; 254 uval &= mask; 255 f(uval, lsb + nbits - 1, lsb); 256 } 257 258 void rf(Register r, int lsb) { 259 f(r->encoding_nocheck(), lsb + 4, lsb); 260 } 261 262 // reg|ZR 263 void zrf(Register r, int lsb) { 264 f(r->encoding_nocheck() - (r == zr), lsb + 4, lsb); 265 } 266 267 // reg|SP 268 void srf(Register r, int lsb) { 269 f(r == sp ? 31 : r->encoding_nocheck(), lsb + 4, lsb); 270 } 271 272 void rf(FloatRegister r, int lsb) { 273 f(r->encoding_nocheck(), lsb + 4, lsb); 274 } 275 276 unsigned get(int msb = 31, int lsb = 0) { 277 int nbits = msb - lsb + 1; 278 unsigned mask = ((1U << nbits) - 1) << lsb; 279 assert_cond((bits & mask) == mask); 280 return (insn & mask) >> lsb; 281 } 282 283 void fixed(unsigned value, unsigned mask) { 284 assert_cond ((mask & bits) == 0); 285 #ifdef ASSERT 286 bits |= mask; 287 #endif 288 insn |= value; 289 } 290 }; 291 292 #define starti Instruction_aarch64 do_not_use(this); set_current(&do_not_use) 293 294 class PrePost { 295 int _offset; 296 Register _r; 297 public: 298 PrePost(Register reg, int o) : _offset(o), _r(reg) { } 299 int offset() { return _offset; } 300 Register reg() { return _r; } 301 }; 302 303 class Pre : public PrePost { 304 public: 305 Pre(Register reg, int o) : PrePost(reg, o) { } 306 }; 307 class Post : public PrePost { 308 Register _idx; 309 bool _is_postreg; 310 public: 311 Post(Register reg, int o) : PrePost(reg, o) { _idx = NULL; _is_postreg = false; } 312 Post(Register reg, Register idx) : PrePost(reg, 0) { _idx = idx; _is_postreg = true; } 313 Register idx_reg() { return _idx; } 314 bool is_postreg() {return _is_postreg; } 315 }; 316 317 namespace ext 318 { 319 enum operation { uxtb, uxth, uxtw, uxtx, sxtb, sxth, sxtw, sxtx }; 320 }; 321 322 // Addressing modes 323 class Address { 324 public: 325 326 enum mode { no_mode, base_plus_offset, pre, post, post_reg, pcrel, 327 base_plus_offset_reg, literal }; 328 329 // Shift and extend for base reg + reg offset addressing 330 class extend { 331 int _option, _shift; 332 ext::operation _op; 333 public: 334 extend() { } 335 extend(int s, int o, ext::operation op) : _option(o), _shift(s), _op(op) { } 336 int option() const{ return _option; } 337 int shift() const { return _shift; } 338 ext::operation op() const { return _op; } 339 }; 340 class uxtw : public extend { 341 public: 342 uxtw(int shift = -1): extend(shift, 0b010, ext::uxtw) { } 343 }; 344 class lsl : public extend { 345 public: 346 lsl(int shift = -1): extend(shift, 0b011, ext::uxtx) { } 347 }; 348 class sxtw : public extend { 349 public: 350 sxtw(int shift = -1): extend(shift, 0b110, ext::sxtw) { } 351 }; 352 class sxtx : public extend { 353 public: 354 sxtx(int shift = -1): extend(shift, 0b111, ext::sxtx) { } 355 }; 356 357 private: 358 Register _base; 359 Register _index; 360 int64_t _offset; 361 enum mode _mode; 362 extend _ext; 363 364 RelocationHolder _rspec; 365 366 // Typically we use AddressLiterals we want to use their rval 367 // However in some situations we want the lval (effect address) of 368 // the item. We provide a special factory for making those lvals. 369 bool _is_lval; 370 371 // If the target is far we'll need to load the ea of this to a 372 // register to reach it. Otherwise if near we can do PC-relative 373 // addressing. 374 address _target; 375 376 public: 377 Address() 378 : _mode(no_mode) { } 379 Address(Register r) 380 : _base(r), _index(noreg), _offset(0), _mode(base_plus_offset), _target(0) { } 381 Address(Register r, int o) 382 : _base(r), _index(noreg), _offset(o), _mode(base_plus_offset), _target(0) { } 383 Address(Register r, int64_t o) 384 : _base(r), _index(noreg), _offset(o), _mode(base_plus_offset), _target(0) { } 385 Address(Register r, uint64_t o) 386 : _base(r), _index(noreg), _offset(o), _mode(base_plus_offset), _target(0) { } 387 #ifdef ASSERT 388 Address(Register r, ByteSize disp) 389 : _base(r), _index(noreg), _offset(in_bytes(disp)), _mode(base_plus_offset), _target(0) { } 390 #endif 391 Address(Register r, Register r1, extend ext = lsl()) 392 : _base(r), _index(r1), _offset(0), _mode(base_plus_offset_reg), 393 _ext(ext), _target(0) { } 394 Address(Pre p) 395 : _base(p.reg()), _offset(p.offset()), _mode(pre) { } 396 Address(Post p) 397 : _base(p.reg()), _index(p.idx_reg()), _offset(p.offset()), 398 _mode(p.is_postreg() ? post_reg : post), _target(0) { } 399 Address(address target, RelocationHolder const& rspec) 400 : _mode(literal), 401 _rspec(rspec), 402 _is_lval(false), 403 _target(target) { } 404 Address(address target, relocInfo::relocType rtype = relocInfo::external_word_type); 405 Address(Register base, RegisterOrConstant index, extend ext = lsl()) 406 : _base (base), 407 _offset(0), _ext(ext), _target(0) { 408 if (index.is_register()) { 409 _mode = base_plus_offset_reg; 410 _index = index.as_register(); 411 } else { 412 guarantee(ext.option() == ext::uxtx, "should be"); 413 assert(index.is_constant(), "should be"); 414 _mode = base_plus_offset; 415 _offset = index.as_constant() << ext.shift(); 416 } 417 } 418 419 Register base() const { 420 guarantee((_mode == base_plus_offset | _mode == base_plus_offset_reg 421 | _mode == post | _mode == post_reg), 422 "wrong mode"); 423 return _base; 424 } 425 int64_t offset() const { 426 return _offset; 427 } 428 Register index() const { 429 return _index; 430 } 431 mode getMode() const { 432 return _mode; 433 } 434 bool uses(Register reg) const { return _base == reg || _index == reg; } 435 address target() const { return _target; } 436 const RelocationHolder& rspec() const { return _rspec; } 437 438 void encode(Instruction_aarch64 *i) const { 439 i->f(0b111, 29, 27); 440 i->srf(_base, 5); 441 442 switch(_mode) { 443 case base_plus_offset: 444 { 445 unsigned size = i->get(31, 30); 446 if (i->get(26, 26) && i->get(23, 23)) { 447 // SIMD Q Type - Size = 128 bits 448 assert(size == 0, "bad size"); 449 size = 0b100; 450 } 451 unsigned mask = (1 << size) - 1; 452 if (_offset < 0 || _offset & mask) 453 { 454 i->f(0b00, 25, 24); 455 i->f(0, 21), i->f(0b00, 11, 10); 456 i->sf(_offset, 20, 12); 457 } else { 458 i->f(0b01, 25, 24); 459 i->f(_offset >> size, 21, 10); 460 } 461 } 462 break; 463 464 case base_plus_offset_reg: 465 { 466 i->f(0b00, 25, 24); 467 i->f(1, 21); 468 i->rf(_index, 16); 469 i->f(_ext.option(), 15, 13); 470 unsigned size = i->get(31, 30); 471 if (i->get(26, 26) && i->get(23, 23)) { 472 // SIMD Q Type - Size = 128 bits 473 assert(size == 0, "bad size"); 474 size = 0b100; 475 } 476 if (size == 0) // It's a byte 477 i->f(_ext.shift() >= 0, 12); 478 else { 479 if (_ext.shift() > 0) 480 assert(_ext.shift() == (int)size, "bad shift"); 481 i->f(_ext.shift() > 0, 12); 482 } 483 i->f(0b10, 11, 10); 484 } 485 break; 486 487 case pre: 488 i->f(0b00, 25, 24); 489 i->f(0, 21), i->f(0b11, 11, 10); 490 i->sf(_offset, 20, 12); 491 break; 492 493 case post: 494 i->f(0b00, 25, 24); 495 i->f(0, 21), i->f(0b01, 11, 10); 496 i->sf(_offset, 20, 12); 497 break; 498 499 default: 500 ShouldNotReachHere(); 501 } 502 } 503 504 void encode_pair(Instruction_aarch64 *i) const { 505 switch(_mode) { 506 case base_plus_offset: 507 i->f(0b010, 25, 23); 508 break; 509 case pre: 510 i->f(0b011, 25, 23); 511 break; 512 case post: 513 i->f(0b001, 25, 23); 514 break; 515 default: 516 ShouldNotReachHere(); 517 } 518 519 unsigned size; // Operand shift in 32-bit words 520 521 if (i->get(26, 26)) { // float 522 switch(i->get(31, 30)) { 523 case 0b10: 524 size = 2; break; 525 case 0b01: 526 size = 1; break; 527 case 0b00: 528 size = 0; break; 529 default: 530 ShouldNotReachHere(); 531 size = 0; // unreachable 532 } 533 } else { 534 size = i->get(31, 31); 535 } 536 537 size = 4 << size; 538 guarantee(_offset % size == 0, "bad offset"); 539 i->sf(_offset / size, 21, 15); 540 i->srf(_base, 5); 541 } 542 543 void encode_nontemporal_pair(Instruction_aarch64 *i) const { 544 // Only base + offset is allowed 545 i->f(0b000, 25, 23); 546 unsigned size = i->get(31, 31); 547 size = 4 << size; 548 guarantee(_offset % size == 0, "bad offset"); 549 i->sf(_offset / size, 21, 15); 550 i->srf(_base, 5); 551 guarantee(_mode == Address::base_plus_offset, 552 "Bad addressing mode for non-temporal op"); 553 } 554 555 void lea(MacroAssembler *, Register) const; 556 557 static bool offset_ok_for_immed(int64_t offset, uint shift); 558 }; 559 560 // Convience classes 561 class RuntimeAddress: public Address { 562 563 public: 564 565 RuntimeAddress(address target) : Address(target, relocInfo::runtime_call_type) {} 566 567 }; 568 569 class OopAddress: public Address { 570 571 public: 572 573 OopAddress(address target) : Address(target, relocInfo::oop_type){} 574 575 }; 576 577 class ExternalAddress: public Address { 578 private: 579 static relocInfo::relocType reloc_for_target(address target) { 580 // Sometimes ExternalAddress is used for values which aren't 581 // exactly addresses, like the card table base. 582 // external_word_type can't be used for values in the first page 583 // so just skip the reloc in that case. 584 return external_word_Relocation::can_be_relocated(target) ? relocInfo::external_word_type : relocInfo::none; 585 } 586 587 public: 588 589 ExternalAddress(address target) : Address(target, reloc_for_target(target)) {} 590 591 }; 592 593 class InternalAddress: public Address { 594 595 public: 596 597 InternalAddress(address target) : Address(target, relocInfo::internal_word_type) {} 598 }; 599 600 const int FPUStateSizeInWords = FloatRegisterImpl::number_of_registers * 601 FloatRegisterImpl::save_slots_per_register; 602 603 typedef enum { 604 PLDL1KEEP = 0b00000, PLDL1STRM, PLDL2KEEP, PLDL2STRM, PLDL3KEEP, PLDL3STRM, 605 PSTL1KEEP = 0b10000, PSTL1STRM, PSTL2KEEP, PSTL2STRM, PSTL3KEEP, PSTL3STRM, 606 PLIL1KEEP = 0b01000, PLIL1STRM, PLIL2KEEP, PLIL2STRM, PLIL3KEEP, PLIL3STRM 607 } prfop; 608 609 class Assembler : public AbstractAssembler { 610 611 #ifndef PRODUCT 612 static const uintptr_t asm_bp; 613 614 void emit_long(jint x) { 615 if ((uintptr_t)pc() == asm_bp) 616 asm volatile ("nop"); 617 AbstractAssembler::emit_int32(x); 618 } 619 #else 620 void emit_long(jint x) { 621 AbstractAssembler::emit_int32(x); 622 } 623 #endif 624 625 public: 626 627 enum { instruction_size = 4 }; 628 629 //---< calculate length of instruction >--- 630 // We just use the values set above. 631 // instruction must start at passed address 632 static unsigned int instr_len(unsigned char *instr) { return instruction_size; } 633 634 //---< longest instructions >--- 635 static unsigned int instr_maxlen() { return instruction_size; } 636 637 Address adjust(Register base, int offset, bool preIncrement) { 638 if (preIncrement) 639 return Address(Pre(base, offset)); 640 else 641 return Address(Post(base, offset)); 642 } 643 644 Address pre(Register base, int offset) { 645 return adjust(base, offset, true); 646 } 647 648 Address post(Register base, int offset) { 649 return adjust(base, offset, false); 650 } 651 652 Address post(Register base, Register idx) { 653 return Address(Post(base, idx)); 654 } 655 656 Instruction_aarch64* current; 657 658 void set_current(Instruction_aarch64* i) { current = i; } 659 660 void f(unsigned val, int msb, int lsb) { 661 current->f(val, msb, lsb); 662 } 663 void f(unsigned val, int msb) { 664 current->f(val, msb, msb); 665 } 666 void sf(int64_t val, int msb, int lsb) { 667 current->sf(val, msb, lsb); 668 } 669 void rf(Register reg, int lsb) { 670 current->rf(reg, lsb); 671 } 672 void srf(Register reg, int lsb) { 673 current->srf(reg, lsb); 674 } 675 void zrf(Register reg, int lsb) { 676 current->zrf(reg, lsb); 677 } 678 void rf(FloatRegister reg, int lsb) { 679 current->rf(reg, lsb); 680 } 681 void fixed(unsigned value, unsigned mask) { 682 current->fixed(value, mask); 683 } 684 685 void emit() { 686 emit_long(current->get_insn()); 687 assert_cond(current->get_bits() == 0xffffffff); 688 current = NULL; 689 } 690 691 typedef void (Assembler::* uncond_branch_insn)(address dest); 692 typedef void (Assembler::* compare_and_branch_insn)(Register Rt, address dest); 693 typedef void (Assembler::* test_and_branch_insn)(Register Rt, int bitpos, address dest); 694 typedef void (Assembler::* prefetch_insn)(address target, prfop); 695 696 void wrap_label(Label &L, uncond_branch_insn insn); 697 void wrap_label(Register r, Label &L, compare_and_branch_insn insn); 698 void wrap_label(Register r, int bitpos, Label &L, test_and_branch_insn insn); 699 void wrap_label(Label &L, prfop, prefetch_insn insn); 700 701 // PC-rel. addressing 702 703 void adr(Register Rd, address dest); 704 void _adrp(Register Rd, address dest); 705 706 void adr(Register Rd, const Address &dest); 707 void _adrp(Register Rd, const Address &dest); 708 709 void adr(Register Rd, Label &L) { 710 wrap_label(Rd, L, &Assembler::Assembler::adr); 711 } 712 void _adrp(Register Rd, Label &L) { 713 wrap_label(Rd, L, &Assembler::_adrp); 714 } 715 716 void adrp(Register Rd, const Address &dest, uint64_t &offset); 717 718 #undef INSN 719 720 void add_sub_immediate(Register Rd, Register Rn, unsigned uimm, int op, 721 int negated_op); 722 723 // Add/subtract (immediate) 724 #define INSN(NAME, decode, negated) \ 725 void NAME(Register Rd, Register Rn, unsigned imm, unsigned shift) { \ 726 starti; \ 727 f(decode, 31, 29), f(0b10001, 28, 24), f(shift, 23, 22), f(imm, 21, 10); \ 728 zrf(Rd, 0), srf(Rn, 5); \ 729 } \ 730 \ 731 void NAME(Register Rd, Register Rn, unsigned imm) { \ 732 starti; \ 733 add_sub_immediate(Rd, Rn, imm, decode, negated); \ 734 } 735 736 INSN(addsw, 0b001, 0b011); 737 INSN(subsw, 0b011, 0b001); 738 INSN(adds, 0b101, 0b111); 739 INSN(subs, 0b111, 0b101); 740 741 #undef INSN 742 743 #define INSN(NAME, decode, negated) \ 744 void NAME(Register Rd, Register Rn, unsigned imm) { \ 745 starti; \ 746 add_sub_immediate(Rd, Rn, imm, decode, negated); \ 747 } 748 749 INSN(addw, 0b000, 0b010); 750 INSN(subw, 0b010, 0b000); 751 INSN(add, 0b100, 0b110); 752 INSN(sub, 0b110, 0b100); 753 754 #undef INSN 755 756 // Logical (immediate) 757 #define INSN(NAME, decode, is32) \ 758 void NAME(Register Rd, Register Rn, uint64_t imm) { \ 759 starti; \ 760 uint32_t val = encode_logical_immediate(is32, imm); \ 761 f(decode, 31, 29), f(0b100100, 28, 23), f(val, 22, 10); \ 762 srf(Rd, 0), zrf(Rn, 5); \ 763 } 764 765 INSN(andw, 0b000, true); 766 INSN(orrw, 0b001, true); 767 INSN(eorw, 0b010, true); 768 INSN(andr, 0b100, false); 769 INSN(orr, 0b101, false); 770 INSN(eor, 0b110, false); 771 772 #undef INSN 773 774 #define INSN(NAME, decode, is32) \ 775 void NAME(Register Rd, Register Rn, uint64_t imm) { \ 776 starti; \ 777 uint32_t val = encode_logical_immediate(is32, imm); \ 778 f(decode, 31, 29), f(0b100100, 28, 23), f(val, 22, 10); \ 779 zrf(Rd, 0), zrf(Rn, 5); \ 780 } 781 782 INSN(ands, 0b111, false); 783 INSN(andsw, 0b011, true); 784 785 #undef INSN 786 787 // Move wide (immediate) 788 #define INSN(NAME, opcode) \ 789 void NAME(Register Rd, unsigned imm, unsigned shift = 0) { \ 790 assert_cond((shift/16)*16 == shift); \ 791 starti; \ 792 f(opcode, 31, 29), f(0b100101, 28, 23), f(shift/16, 22, 21), \ 793 f(imm, 20, 5); \ 794 rf(Rd, 0); \ 795 } 796 797 INSN(movnw, 0b000); 798 INSN(movzw, 0b010); 799 INSN(movkw, 0b011); 800 INSN(movn, 0b100); 801 INSN(movz, 0b110); 802 INSN(movk, 0b111); 803 804 #undef INSN 805 806 // Bitfield 807 #define INSN(NAME, opcode, size) \ 808 void NAME(Register Rd, Register Rn, unsigned immr, unsigned imms) { \ 809 starti; \ 810 guarantee(size == 1 || (immr < 32 && imms < 32), "incorrect immr/imms");\ 811 f(opcode, 31, 22), f(immr, 21, 16), f(imms, 15, 10); \ 812 zrf(Rn, 5), rf(Rd, 0); \ 813 } 814 815 INSN(sbfmw, 0b0001001100, 0); 816 INSN(bfmw, 0b0011001100, 0); 817 INSN(ubfmw, 0b0101001100, 0); 818 INSN(sbfm, 0b1001001101, 1); 819 INSN(bfm, 0b1011001101, 1); 820 INSN(ubfm, 0b1101001101, 1); 821 822 #undef INSN 823 824 // Extract 825 #define INSN(NAME, opcode, size) \ 826 void NAME(Register Rd, Register Rn, Register Rm, unsigned imms) { \ 827 starti; \ 828 guarantee(size == 1 || imms < 32, "incorrect imms"); \ 829 f(opcode, 31, 21), f(imms, 15, 10); \ 830 zrf(Rm, 16), zrf(Rn, 5), zrf(Rd, 0); \ 831 } 832 833 INSN(extrw, 0b00010011100, 0); 834 INSN(extr, 0b10010011110, 1); 835 836 #undef INSN 837 838 // The maximum range of a branch is fixed for the AArch64 839 // architecture. In debug mode we shrink it in order to test 840 // trampolines, but not so small that branches in the interpreter 841 // are out of range. 842 static const uint64_t branch_range = NOT_DEBUG(128 * M) DEBUG_ONLY(2 * M); 843 844 static bool reachable_from_branch_at(address branch, address target) { 845 return uabs(target - branch) < branch_range; 846 } 847 848 // Unconditional branch (immediate) 849 #define INSN(NAME, opcode) \ 850 void NAME(address dest) { \ 851 starti; \ 852 int64_t offset = (dest - pc()) >> 2; \ 853 DEBUG_ONLY(assert(reachable_from_branch_at(pc(), dest), "debug only")); \ 854 f(opcode, 31), f(0b00101, 30, 26), sf(offset, 25, 0); \ 855 } \ 856 void NAME(Label &L) { \ 857 wrap_label(L, &Assembler::NAME); \ 858 } \ 859 void NAME(const Address &dest); 860 861 INSN(b, 0); 862 INSN(bl, 1); 863 864 #undef INSN 865 866 // Compare & branch (immediate) 867 #define INSN(NAME, opcode) \ 868 void NAME(Register Rt, address dest) { \ 869 int64_t offset = (dest - pc()) >> 2; \ 870 starti; \ 871 f(opcode, 31, 24), sf(offset, 23, 5), rf(Rt, 0); \ 872 } \ 873 void NAME(Register Rt, Label &L) { \ 874 wrap_label(Rt, L, &Assembler::NAME); \ 875 } 876 877 INSN(cbzw, 0b00110100); 878 INSN(cbnzw, 0b00110101); 879 INSN(cbz, 0b10110100); 880 INSN(cbnz, 0b10110101); 881 882 #undef INSN 883 884 // Test & branch (immediate) 885 #define INSN(NAME, opcode) \ 886 void NAME(Register Rt, int bitpos, address dest) { \ 887 int64_t offset = (dest - pc()) >> 2; \ 888 int b5 = bitpos >> 5; \ 889 bitpos &= 0x1f; \ 890 starti; \ 891 f(b5, 31), f(opcode, 30, 24), f(bitpos, 23, 19), sf(offset, 18, 5); \ 892 rf(Rt, 0); \ 893 } \ 894 void NAME(Register Rt, int bitpos, Label &L) { \ 895 wrap_label(Rt, bitpos, L, &Assembler::NAME); \ 896 } 897 898 INSN(tbz, 0b0110110); 899 INSN(tbnz, 0b0110111); 900 901 #undef INSN 902 903 // Conditional branch (immediate) 904 enum Condition 905 {EQ, NE, HS, CS=HS, LO, CC=LO, MI, PL, VS, VC, HI, LS, GE, LT, GT, LE, AL, NV}; 906 907 void br(Condition cond, address dest) { 908 int64_t offset = (dest - pc()) >> 2; 909 starti; 910 f(0b0101010, 31, 25), f(0, 24), sf(offset, 23, 5), f(0, 4), f(cond, 3, 0); 911 } 912 913 #define INSN(NAME, cond) \ 914 void NAME(address dest) { \ 915 br(cond, dest); \ 916 } 917 918 INSN(beq, EQ); 919 INSN(bne, NE); 920 INSN(bhs, HS); 921 INSN(bcs, CS); 922 INSN(blo, LO); 923 INSN(bcc, CC); 924 INSN(bmi, MI); 925 INSN(bpl, PL); 926 INSN(bvs, VS); 927 INSN(bvc, VC); 928 INSN(bhi, HI); 929 INSN(bls, LS); 930 INSN(bge, GE); 931 INSN(blt, LT); 932 INSN(bgt, GT); 933 INSN(ble, LE); 934 INSN(bal, AL); 935 INSN(bnv, NV); 936 937 void br(Condition cc, Label &L); 938 939 #undef INSN 940 941 // Exception generation 942 void generate_exception(int opc, int op2, int LL, unsigned imm) { 943 starti; 944 f(0b11010100, 31, 24); 945 f(opc, 23, 21), f(imm, 20, 5), f(op2, 4, 2), f(LL, 1, 0); 946 } 947 948 #define INSN(NAME, opc, op2, LL) \ 949 void NAME(unsigned imm) { \ 950 generate_exception(opc, op2, LL, imm); \ 951 } 952 953 INSN(svc, 0b000, 0, 0b01); 954 INSN(hvc, 0b000, 0, 0b10); 955 INSN(smc, 0b000, 0, 0b11); 956 INSN(brk, 0b001, 0, 0b00); 957 INSN(hlt, 0b010, 0, 0b00); 958 INSN(dcps1, 0b101, 0, 0b01); 959 INSN(dcps2, 0b101, 0, 0b10); 960 INSN(dcps3, 0b101, 0, 0b11); 961 962 #undef INSN 963 964 // System 965 void system(int op0, int op1, int CRn, int CRm, int op2, 966 Register rt = dummy_reg) 967 { 968 starti; 969 f(0b11010101000, 31, 21); 970 f(op0, 20, 19); 971 f(op1, 18, 16); 972 f(CRn, 15, 12); 973 f(CRm, 11, 8); 974 f(op2, 7, 5); 975 rf(rt, 0); 976 } 977 978 void hint(int imm) { 979 system(0b00, 0b011, 0b0010, 0b0000, imm); 980 } 981 982 void nop() { 983 hint(0); 984 } 985 986 void yield() { 987 hint(1); 988 } 989 990 void wfe() { 991 hint(2); 992 } 993 994 void wfi() { 995 hint(3); 996 } 997 998 void sev() { 999 hint(4); 1000 } 1001 1002 void sevl() { 1003 hint(5); 1004 } 1005 1006 // we only provide mrs and msr for the special purpose system 1007 // registers where op1 (instr[20:19]) == 11 and, (currently) only 1008 // use it for FPSR n.b msr has L (instr[21]) == 0 mrs has L == 1 1009 1010 void msr(int op1, int CRn, int CRm, int op2, Register rt) { 1011 starti; 1012 f(0b1101010100011, 31, 19); 1013 f(op1, 18, 16); 1014 f(CRn, 15, 12); 1015 f(CRm, 11, 8); 1016 f(op2, 7, 5); 1017 // writing zr is ok 1018 zrf(rt, 0); 1019 } 1020 1021 void mrs(int op1, int CRn, int CRm, int op2, Register rt) { 1022 starti; 1023 f(0b1101010100111, 31, 19); 1024 f(op1, 18, 16); 1025 f(CRn, 15, 12); 1026 f(CRm, 11, 8); 1027 f(op2, 7, 5); 1028 // reading to zr is a mistake 1029 rf(rt, 0); 1030 } 1031 1032 enum barrier {OSHLD = 0b0001, OSHST, OSH, NSHLD=0b0101, NSHST, NSH, 1033 ISHLD = 0b1001, ISHST, ISH, LD=0b1101, ST, SY}; 1034 1035 void dsb(barrier imm) { 1036 system(0b00, 0b011, 0b00011, imm, 0b100); 1037 } 1038 1039 void dmb(barrier imm) { 1040 system(0b00, 0b011, 0b00011, imm, 0b101); 1041 } 1042 1043 void isb() { 1044 system(0b00, 0b011, 0b00011, SY, 0b110); 1045 } 1046 1047 void sys(int op1, int CRn, int CRm, int op2, 1048 Register rt = (Register)0b11111) { 1049 system(0b01, op1, CRn, CRm, op2, rt); 1050 } 1051 1052 // Only implement operations accessible from EL0 or higher, i.e., 1053 // op1 CRn CRm op2 1054 // IC IVAU 3 7 5 1 1055 // DC CVAC 3 7 10 1 1056 // DC CVAP 3 7 12 1 1057 // DC CVAU 3 7 11 1 1058 // DC CIVAC 3 7 14 1 1059 // DC ZVA 3 7 4 1 1060 // So only deal with the CRm field. 1061 enum icache_maintenance {IVAU = 0b0101}; 1062 enum dcache_maintenance {CVAC = 0b1010, CVAP = 0b1100, CVAU = 0b1011, CIVAC = 0b1110, ZVA = 0b100}; 1063 1064 void dc(dcache_maintenance cm, Register Rt) { 1065 sys(0b011, 0b0111, cm, 0b001, Rt); 1066 } 1067 1068 void ic(icache_maintenance cm, Register Rt) { 1069 sys(0b011, 0b0111, cm, 0b001, Rt); 1070 } 1071 1072 // A more convenient access to dmb for our purposes 1073 enum Membar_mask_bits { 1074 // We can use ISH for a barrier because the ARM ARM says "This 1075 // architecture assumes that all Processing Elements that use the 1076 // same operating system or hypervisor are in the same Inner 1077 // Shareable shareability domain." 1078 StoreStore = ISHST, 1079 LoadStore = ISHLD, 1080 LoadLoad = ISHLD, 1081 StoreLoad = ISH, 1082 AnyAny = ISH 1083 }; 1084 1085 void membar(Membar_mask_bits order_constraint) { 1086 dmb(Assembler::barrier(order_constraint)); 1087 } 1088 1089 // Unconditional branch (register) 1090 void branch_reg(Register R, int opc) { 1091 starti; 1092 f(0b1101011, 31, 25); 1093 f(opc, 24, 21); 1094 f(0b11111000000, 20, 10); 1095 rf(R, 5); 1096 f(0b00000, 4, 0); 1097 } 1098 1099 #define INSN(NAME, opc) \ 1100 void NAME(Register R) { \ 1101 branch_reg(R, opc); \ 1102 } 1103 1104 INSN(br, 0b0000); 1105 INSN(blr, 0b0001); 1106 INSN(ret, 0b0010); 1107 1108 void ret(void *p); // This forces a compile-time error for ret(0) 1109 1110 #undef INSN 1111 1112 #define INSN(NAME, opc) \ 1113 void NAME() { \ 1114 branch_reg(dummy_reg, opc); \ 1115 } 1116 1117 INSN(eret, 0b0100); 1118 INSN(drps, 0b0101); 1119 1120 #undef INSN 1121 1122 // Load/store exclusive 1123 enum operand_size { byte, halfword, word, xword }; 1124 1125 void load_store_exclusive(Register Rs, Register Rt1, Register Rt2, 1126 Register Rn, enum operand_size sz, int op, bool ordered) { 1127 starti; 1128 f(sz, 31, 30), f(0b001000, 29, 24), f(op, 23, 21); 1129 rf(Rs, 16), f(ordered, 15), zrf(Rt2, 10), srf(Rn, 5), zrf(Rt1, 0); 1130 } 1131 1132 void load_exclusive(Register dst, Register addr, 1133 enum operand_size sz, bool ordered) { 1134 load_store_exclusive(dummy_reg, dst, dummy_reg, addr, 1135 sz, 0b010, ordered); 1136 } 1137 1138 void store_exclusive(Register status, Register new_val, Register addr, 1139 enum operand_size sz, bool ordered) { 1140 load_store_exclusive(status, new_val, dummy_reg, addr, 1141 sz, 0b000, ordered); 1142 } 1143 1144 #define INSN4(NAME, sz, op, o0) /* Four registers */ \ 1145 void NAME(Register Rs, Register Rt1, Register Rt2, Register Rn) { \ 1146 guarantee(Rs != Rn && Rs != Rt1 && Rs != Rt2, "unpredictable instruction"); \ 1147 load_store_exclusive(Rs, Rt1, Rt2, Rn, sz, op, o0); \ 1148 } 1149 1150 #define INSN3(NAME, sz, op, o0) /* Three registers */ \ 1151 void NAME(Register Rs, Register Rt, Register Rn) { \ 1152 guarantee(Rs != Rn && Rs != Rt, "unpredictable instruction"); \ 1153 load_store_exclusive(Rs, Rt, dummy_reg, Rn, sz, op, o0); \ 1154 } 1155 1156 #define INSN2(NAME, sz, op, o0) /* Two registers */ \ 1157 void NAME(Register Rt, Register Rn) { \ 1158 load_store_exclusive(dummy_reg, Rt, dummy_reg, \ 1159 Rn, sz, op, o0); \ 1160 } 1161 1162 #define INSN_FOO(NAME, sz, op, o0) /* Three registers, encoded differently */ \ 1163 void NAME(Register Rt1, Register Rt2, Register Rn) { \ 1164 guarantee(Rt1 != Rt2, "unpredictable instruction"); \ 1165 load_store_exclusive(dummy_reg, Rt1, Rt2, Rn, sz, op, o0); \ 1166 } 1167 1168 // bytes 1169 INSN3(stxrb, byte, 0b000, 0); 1170 INSN3(stlxrb, byte, 0b000, 1); 1171 INSN2(ldxrb, byte, 0b010, 0); 1172 INSN2(ldaxrb, byte, 0b010, 1); 1173 INSN2(stlrb, byte, 0b100, 1); 1174 INSN2(ldarb, byte, 0b110, 1); 1175 1176 // halfwords 1177 INSN3(stxrh, halfword, 0b000, 0); 1178 INSN3(stlxrh, halfword, 0b000, 1); 1179 INSN2(ldxrh, halfword, 0b010, 0); 1180 INSN2(ldaxrh, halfword, 0b010, 1); 1181 INSN2(stlrh, halfword, 0b100, 1); 1182 INSN2(ldarh, halfword, 0b110, 1); 1183 1184 // words 1185 INSN3(stxrw, word, 0b000, 0); 1186 INSN3(stlxrw, word, 0b000, 1); 1187 INSN4(stxpw, word, 0b001, 0); 1188 INSN4(stlxpw, word, 0b001, 1); 1189 INSN2(ldxrw, word, 0b010, 0); 1190 INSN2(ldaxrw, word, 0b010, 1); 1191 INSN_FOO(ldxpw, word, 0b011, 0); 1192 INSN_FOO(ldaxpw, word, 0b011, 1); 1193 INSN2(stlrw, word, 0b100, 1); 1194 INSN2(ldarw, word, 0b110, 1); 1195 1196 // xwords 1197 INSN3(stxr, xword, 0b000, 0); 1198 INSN3(stlxr, xword, 0b000, 1); 1199 INSN4(stxp, xword, 0b001, 0); 1200 INSN4(stlxp, xword, 0b001, 1); 1201 INSN2(ldxr, xword, 0b010, 0); 1202 INSN2(ldaxr, xword, 0b010, 1); 1203 INSN_FOO(ldxp, xword, 0b011, 0); 1204 INSN_FOO(ldaxp, xword, 0b011, 1); 1205 INSN2(stlr, xword, 0b100, 1); 1206 INSN2(ldar, xword, 0b110, 1); 1207 1208 #undef INSN2 1209 #undef INSN3 1210 #undef INSN4 1211 #undef INSN_FOO 1212 1213 // 8.1 Compare and swap extensions 1214 void lse_cas(Register Rs, Register Rt, Register Rn, 1215 enum operand_size sz, bool a, bool r, bool not_pair) { 1216 starti; 1217 if (! not_pair) { // Pair 1218 assert(sz == word || sz == xword, "invalid size"); 1219 /* The size bit is in bit 30, not 31 */ 1220 sz = (operand_size)(sz == word ? 0b00:0b01); 1221 } 1222 f(sz, 31, 30), f(0b001000, 29, 24), f(not_pair ? 1 : 0, 23), f(a, 22), f(1, 21); 1223 zrf(Rs, 16), f(r, 15), f(0b11111, 14, 10), srf(Rn, 5), zrf(Rt, 0); 1224 } 1225 1226 // CAS 1227 #define INSN(NAME, a, r) \ 1228 void NAME(operand_size sz, Register Rs, Register Rt, Register Rn) { \ 1229 assert(Rs != Rn && Rs != Rt, "unpredictable instruction"); \ 1230 lse_cas(Rs, Rt, Rn, sz, a, r, true); \ 1231 } 1232 INSN(cas, false, false) 1233 INSN(casa, true, false) 1234 INSN(casl, false, true) 1235 INSN(casal, true, true) 1236 #undef INSN 1237 1238 // CASP 1239 #define INSN(NAME, a, r) \ 1240 void NAME(operand_size sz, Register Rs, Register Rs1, \ 1241 Register Rt, Register Rt1, Register Rn) { \ 1242 assert((Rs->encoding() & 1) == 0 && (Rt->encoding() & 1) == 0 && \ 1243 Rs->successor() == Rs1 && Rt->successor() == Rt1 && \ 1244 Rs != Rn && Rs1 != Rn && Rs != Rt, "invalid registers"); \ 1245 lse_cas(Rs, Rt, Rn, sz, a, r, false); \ 1246 } 1247 INSN(casp, false, false) 1248 INSN(caspa, true, false) 1249 INSN(caspl, false, true) 1250 INSN(caspal, true, true) 1251 #undef INSN 1252 1253 // 8.1 Atomic operations 1254 void lse_atomic(Register Rs, Register Rt, Register Rn, 1255 enum operand_size sz, int op1, int op2, bool a, bool r) { 1256 starti; 1257 f(sz, 31, 30), f(0b111000, 29, 24), f(a, 23), f(r, 22), f(1, 21); 1258 zrf(Rs, 16), f(op1, 15), f(op2, 14, 12), f(0, 11, 10), srf(Rn, 5), zrf(Rt, 0); 1259 } 1260 1261 #define INSN(NAME, NAME_A, NAME_L, NAME_AL, op1, op2) \ 1262 void NAME(operand_size sz, Register Rs, Register Rt, Register Rn) { \ 1263 lse_atomic(Rs, Rt, Rn, sz, op1, op2, false, false); \ 1264 } \ 1265 void NAME_A(operand_size sz, Register Rs, Register Rt, Register Rn) { \ 1266 lse_atomic(Rs, Rt, Rn, sz, op1, op2, true, false); \ 1267 } \ 1268 void NAME_L(operand_size sz, Register Rs, Register Rt, Register Rn) { \ 1269 lse_atomic(Rs, Rt, Rn, sz, op1, op2, false, true); \ 1270 } \ 1271 void NAME_AL(operand_size sz, Register Rs, Register Rt, Register Rn) {\ 1272 lse_atomic(Rs, Rt, Rn, sz, op1, op2, true, true); \ 1273 } 1274 INSN(ldadd, ldadda, ldaddl, ldaddal, 0, 0b000); 1275 INSN(ldbic, ldbica, ldbicl, ldbical, 0, 0b001); 1276 INSN(ldeor, ldeora, ldeorl, ldeoral, 0, 0b010); 1277 INSN(ldorr, ldorra, ldorrl, ldorral, 0, 0b011); 1278 INSN(ldsmax, ldsmaxa, ldsmaxl, ldsmaxal, 0, 0b100); 1279 INSN(ldsmin, ldsmina, ldsminl, ldsminal, 0, 0b101); 1280 INSN(ldumax, ldumaxa, ldumaxl, ldumaxal, 0, 0b110); 1281 INSN(ldumin, ldumina, lduminl, lduminal, 0, 0b111); 1282 INSN(swp, swpa, swpl, swpal, 1, 0b000); 1283 #undef INSN 1284 1285 // Load register (literal) 1286 #define INSN(NAME, opc, V) \ 1287 void NAME(Register Rt, address dest) { \ 1288 int64_t offset = (dest - pc()) >> 2; \ 1289 starti; \ 1290 f(opc, 31, 30), f(0b011, 29, 27), f(V, 26), f(0b00, 25, 24), \ 1291 sf(offset, 23, 5); \ 1292 rf(Rt, 0); \ 1293 } \ 1294 void NAME(Register Rt, address dest, relocInfo::relocType rtype) { \ 1295 InstructionMark im(this); \ 1296 guarantee(rtype == relocInfo::internal_word_type, \ 1297 "only internal_word_type relocs make sense here"); \ 1298 code_section()->relocate(inst_mark(), InternalAddress(dest).rspec()); \ 1299 NAME(Rt, dest); \ 1300 } \ 1301 void NAME(Register Rt, Label &L) { \ 1302 wrap_label(Rt, L, &Assembler::NAME); \ 1303 } 1304 1305 INSN(ldrw, 0b00, 0); 1306 INSN(ldr, 0b01, 0); 1307 INSN(ldrsw, 0b10, 0); 1308 1309 #undef INSN 1310 1311 #define INSN(NAME, opc, V) \ 1312 void NAME(FloatRegister Rt, address dest) { \ 1313 int64_t offset = (dest - pc()) >> 2; \ 1314 starti; \ 1315 f(opc, 31, 30), f(0b011, 29, 27), f(V, 26), f(0b00, 25, 24), \ 1316 sf(offset, 23, 5); \ 1317 rf((Register)Rt, 0); \ 1318 } 1319 1320 INSN(ldrs, 0b00, 1); 1321 INSN(ldrd, 0b01, 1); 1322 INSN(ldrq, 0b10, 1); 1323 1324 #undef INSN 1325 1326 #define INSN(NAME, opc, V) \ 1327 void NAME(address dest, prfop op = PLDL1KEEP) { \ 1328 int64_t offset = (dest - pc()) >> 2; \ 1329 starti; \ 1330 f(opc, 31, 30), f(0b011, 29, 27), f(V, 26), f(0b00, 25, 24), \ 1331 sf(offset, 23, 5); \ 1332 f(op, 4, 0); \ 1333 } \ 1334 void NAME(Label &L, prfop op = PLDL1KEEP) { \ 1335 wrap_label(L, op, &Assembler::NAME); \ 1336 } 1337 1338 INSN(prfm, 0b11, 0); 1339 1340 #undef INSN 1341 1342 // Load/store 1343 void ld_st1(int opc, int p1, int V, int L, 1344 Register Rt1, Register Rt2, Address adr, bool no_allocate) { 1345 starti; 1346 f(opc, 31, 30), f(p1, 29, 27), f(V, 26), f(L, 22); 1347 zrf(Rt2, 10), zrf(Rt1, 0); 1348 if (no_allocate) { 1349 adr.encode_nontemporal_pair(current); 1350 } else { 1351 adr.encode_pair(current); 1352 } 1353 } 1354 1355 // Load/store register pair (offset) 1356 #define INSN(NAME, size, p1, V, L, no_allocate) \ 1357 void NAME(Register Rt1, Register Rt2, Address adr) { \ 1358 ld_st1(size, p1, V, L, Rt1, Rt2, adr, no_allocate); \ 1359 } 1360 1361 INSN(stpw, 0b00, 0b101, 0, 0, false); 1362 INSN(ldpw, 0b00, 0b101, 0, 1, false); 1363 INSN(ldpsw, 0b01, 0b101, 0, 1, false); 1364 INSN(stp, 0b10, 0b101, 0, 0, false); 1365 INSN(ldp, 0b10, 0b101, 0, 1, false); 1366 1367 // Load/store no-allocate pair (offset) 1368 INSN(stnpw, 0b00, 0b101, 0, 0, true); 1369 INSN(ldnpw, 0b00, 0b101, 0, 1, true); 1370 INSN(stnp, 0b10, 0b101, 0, 0, true); 1371 INSN(ldnp, 0b10, 0b101, 0, 1, true); 1372 1373 #undef INSN 1374 1375 #define INSN(NAME, size, p1, V, L, no_allocate) \ 1376 void NAME(FloatRegister Rt1, FloatRegister Rt2, Address adr) { \ 1377 ld_st1(size, p1, V, L, (Register)Rt1, (Register)Rt2, adr, no_allocate); \ 1378 } 1379 1380 INSN(stps, 0b00, 0b101, 1, 0, false); 1381 INSN(ldps, 0b00, 0b101, 1, 1, false); 1382 INSN(stpd, 0b01, 0b101, 1, 0, false); 1383 INSN(ldpd, 0b01, 0b101, 1, 1, false); 1384 INSN(stpq, 0b10, 0b101, 1, 0, false); 1385 INSN(ldpq, 0b10, 0b101, 1, 1, false); 1386 1387 #undef INSN 1388 1389 // Load/store register (all modes) 1390 void ld_st2(Register Rt, const Address &adr, int size, int op, int V = 0) { 1391 starti; 1392 1393 f(V, 26); // general reg? 1394 zrf(Rt, 0); 1395 1396 // Encoding for literal loads is done here (rather than pushed 1397 // down into Address::encode) because the encoding of this 1398 // instruction is too different from all of the other forms to 1399 // make it worth sharing. 1400 if (adr.getMode() == Address::literal) { 1401 assert(size == 0b10 || size == 0b11, "bad operand size in ldr"); 1402 assert(op == 0b01, "literal form can only be used with loads"); 1403 f(size & 0b01, 31, 30), f(0b011, 29, 27), f(0b00, 25, 24); 1404 int64_t offset = (adr.target() - pc()) >> 2; 1405 sf(offset, 23, 5); 1406 code_section()->relocate(pc(), adr.rspec()); 1407 return; 1408 } 1409 1410 f(size, 31, 30); 1411 f(op, 23, 22); // str 1412 adr.encode(current); 1413 } 1414 1415 #define INSN(NAME, size, op) \ 1416 void NAME(Register Rt, const Address &adr) { \ 1417 ld_st2(Rt, adr, size, op); \ 1418 } \ 1419 1420 INSN(str, 0b11, 0b00); 1421 INSN(strw, 0b10, 0b00); 1422 INSN(strb, 0b00, 0b00); 1423 INSN(strh, 0b01, 0b00); 1424 1425 INSN(ldr, 0b11, 0b01); 1426 INSN(ldrw, 0b10, 0b01); 1427 INSN(ldrb, 0b00, 0b01); 1428 INSN(ldrh, 0b01, 0b01); 1429 1430 INSN(ldrsb, 0b00, 0b10); 1431 INSN(ldrsbw, 0b00, 0b11); 1432 INSN(ldrsh, 0b01, 0b10); 1433 INSN(ldrshw, 0b01, 0b11); 1434 INSN(ldrsw, 0b10, 0b10); 1435 1436 #undef INSN 1437 1438 #define INSN(NAME, size, op) \ 1439 void NAME(const Address &adr, prfop pfop = PLDL1KEEP) { \ 1440 ld_st2((Register)pfop, adr, size, op); \ 1441 } 1442 1443 INSN(prfm, 0b11, 0b10); // FIXME: PRFM should not be used with 1444 // writeback modes, but the assembler 1445 // doesn't enfore that. 1446 1447 #undef INSN 1448 1449 #define INSN(NAME, size, op) \ 1450 void NAME(FloatRegister Rt, const Address &adr) { \ 1451 ld_st2((Register)Rt, adr, size, op, 1); \ 1452 } 1453 1454 INSN(strd, 0b11, 0b00); 1455 INSN(strs, 0b10, 0b00); 1456 INSN(ldrd, 0b11, 0b01); 1457 INSN(ldrs, 0b10, 0b01); 1458 INSN(strq, 0b00, 0b10); 1459 INSN(ldrq, 0x00, 0b11); 1460 1461 #undef INSN 1462 1463 enum shift_kind { LSL, LSR, ASR, ROR }; 1464 1465 void op_shifted_reg(unsigned decode, 1466 enum shift_kind kind, unsigned shift, 1467 unsigned size, unsigned op) { 1468 f(size, 31); 1469 f(op, 30, 29); 1470 f(decode, 28, 24); 1471 f(shift, 15, 10); 1472 f(kind, 23, 22); 1473 } 1474 1475 // Logical (shifted register) 1476 #define INSN(NAME, size, op, N) \ 1477 void NAME(Register Rd, Register Rn, Register Rm, \ 1478 enum shift_kind kind = LSL, unsigned shift = 0) { \ 1479 starti; \ 1480 guarantee(size == 1 || shift < 32, "incorrect shift"); \ 1481 f(N, 21); \ 1482 zrf(Rm, 16), zrf(Rn, 5), zrf(Rd, 0); \ 1483 op_shifted_reg(0b01010, kind, shift, size, op); \ 1484 } 1485 1486 INSN(andr, 1, 0b00, 0); 1487 INSN(orr, 1, 0b01, 0); 1488 INSN(eor, 1, 0b10, 0); 1489 INSN(ands, 1, 0b11, 0); 1490 INSN(andw, 0, 0b00, 0); 1491 INSN(orrw, 0, 0b01, 0); 1492 INSN(eorw, 0, 0b10, 0); 1493 INSN(andsw, 0, 0b11, 0); 1494 1495 #undef INSN 1496 1497 #define INSN(NAME, size, op, N) \ 1498 void NAME(Register Rd, Register Rn, Register Rm, \ 1499 enum shift_kind kind = LSL, unsigned shift = 0) { \ 1500 starti; \ 1501 f(N, 21); \ 1502 zrf(Rm, 16), zrf(Rn, 5), zrf(Rd, 0); \ 1503 op_shifted_reg(0b01010, kind, shift, size, op); \ 1504 } \ 1505 \ 1506 /* These instructions have no immediate form. Provide an overload so \ 1507 that if anyone does try to use an immediate operand -- this has \ 1508 happened! -- we'll get a compile-time error. */ \ 1509 void NAME(Register Rd, Register Rn, unsigned imm, \ 1510 enum shift_kind kind = LSL, unsigned shift = 0) { \ 1511 assert(false, " can't be used with immediate operand"); \ 1512 } 1513 1514 INSN(bic, 1, 0b00, 1); 1515 INSN(orn, 1, 0b01, 1); 1516 INSN(eon, 1, 0b10, 1); 1517 INSN(bics, 1, 0b11, 1); 1518 INSN(bicw, 0, 0b00, 1); 1519 INSN(ornw, 0, 0b01, 1); 1520 INSN(eonw, 0, 0b10, 1); 1521 INSN(bicsw, 0, 0b11, 1); 1522 1523 #undef INSN 1524 1525 // Aliases for short forms of orn 1526 void mvn(Register Rd, Register Rm, 1527 enum shift_kind kind = LSL, unsigned shift = 0) { 1528 orn(Rd, zr, Rm, kind, shift); 1529 } 1530 1531 void mvnw(Register Rd, Register Rm, 1532 enum shift_kind kind = LSL, unsigned shift = 0) { 1533 ornw(Rd, zr, Rm, kind, shift); 1534 } 1535 1536 // Add/subtract (shifted register) 1537 #define INSN(NAME, size, op) \ 1538 void NAME(Register Rd, Register Rn, Register Rm, \ 1539 enum shift_kind kind, unsigned shift = 0) { \ 1540 starti; \ 1541 f(0, 21); \ 1542 assert_cond(kind != ROR); \ 1543 guarantee(size == 1 || shift < 32, "incorrect shift");\ 1544 zrf(Rd, 0), zrf(Rn, 5), zrf(Rm, 16); \ 1545 op_shifted_reg(0b01011, kind, shift, size, op); \ 1546 } 1547 1548 INSN(add, 1, 0b000); 1549 INSN(sub, 1, 0b10); 1550 INSN(addw, 0, 0b000); 1551 INSN(subw, 0, 0b10); 1552 1553 INSN(adds, 1, 0b001); 1554 INSN(subs, 1, 0b11); 1555 INSN(addsw, 0, 0b001); 1556 INSN(subsw, 0, 0b11); 1557 1558 #undef INSN 1559 1560 // Add/subtract (extended register) 1561 #define INSN(NAME, op) \ 1562 void NAME(Register Rd, Register Rn, Register Rm, \ 1563 ext::operation option, int amount = 0) { \ 1564 starti; \ 1565 zrf(Rm, 16), srf(Rn, 5), srf(Rd, 0); \ 1566 add_sub_extended_reg(op, 0b01011, Rd, Rn, Rm, 0b00, option, amount); \ 1567 } 1568 1569 void add_sub_extended_reg(unsigned op, unsigned decode, 1570 Register Rd, Register Rn, Register Rm, 1571 unsigned opt, ext::operation option, unsigned imm) { 1572 guarantee(imm <= 4, "shift amount must be <= 4"); 1573 f(op, 31, 29), f(decode, 28, 24), f(opt, 23, 22), f(1, 21); 1574 f(option, 15, 13), f(imm, 12, 10); 1575 } 1576 1577 INSN(addw, 0b000); 1578 INSN(subw, 0b010); 1579 INSN(add, 0b100); 1580 INSN(sub, 0b110); 1581 1582 #undef INSN 1583 1584 #define INSN(NAME, op) \ 1585 void NAME(Register Rd, Register Rn, Register Rm, \ 1586 ext::operation option, int amount = 0) { \ 1587 starti; \ 1588 zrf(Rm, 16), srf(Rn, 5), zrf(Rd, 0); \ 1589 add_sub_extended_reg(op, 0b01011, Rd, Rn, Rm, 0b00, option, amount); \ 1590 } 1591 1592 INSN(addsw, 0b001); 1593 INSN(subsw, 0b011); 1594 INSN(adds, 0b101); 1595 INSN(subs, 0b111); 1596 1597 #undef INSN 1598 1599 // Aliases for short forms of add and sub 1600 #define INSN(NAME) \ 1601 void NAME(Register Rd, Register Rn, Register Rm) { \ 1602 if (Rd == sp || Rn == sp) \ 1603 NAME(Rd, Rn, Rm, ext::uxtx); \ 1604 else \ 1605 NAME(Rd, Rn, Rm, LSL); \ 1606 } 1607 1608 INSN(addw); 1609 INSN(subw); 1610 INSN(add); 1611 INSN(sub); 1612 1613 INSN(addsw); 1614 INSN(subsw); 1615 INSN(adds); 1616 INSN(subs); 1617 1618 #undef INSN 1619 1620 // Add/subtract (with carry) 1621 void add_sub_carry(unsigned op, Register Rd, Register Rn, Register Rm) { 1622 starti; 1623 f(op, 31, 29); 1624 f(0b11010000, 28, 21); 1625 f(0b000000, 15, 10); 1626 zrf(Rm, 16), zrf(Rn, 5), zrf(Rd, 0); 1627 } 1628 1629 #define INSN(NAME, op) \ 1630 void NAME(Register Rd, Register Rn, Register Rm) { \ 1631 add_sub_carry(op, Rd, Rn, Rm); \ 1632 } 1633 1634 INSN(adcw, 0b000); 1635 INSN(adcsw, 0b001); 1636 INSN(sbcw, 0b010); 1637 INSN(sbcsw, 0b011); 1638 INSN(adc, 0b100); 1639 INSN(adcs, 0b101); 1640 INSN(sbc,0b110); 1641 INSN(sbcs, 0b111); 1642 1643 #undef INSN 1644 1645 // Conditional compare (both kinds) 1646 void conditional_compare(unsigned op, int o1, int o2, int o3, 1647 Register Rn, unsigned imm5, unsigned nzcv, 1648 unsigned cond) { 1649 starti; 1650 f(op, 31, 29); 1651 f(0b11010010, 28, 21); 1652 f(cond, 15, 12); 1653 f(o1, 11); 1654 f(o2, 10); 1655 f(o3, 4); 1656 f(nzcv, 3, 0); 1657 f(imm5, 20, 16), zrf(Rn, 5); 1658 } 1659 1660 #define INSN(NAME, op) \ 1661 void NAME(Register Rn, Register Rm, int imm, Condition cond) { \ 1662 int regNumber = (Rm == zr ? 31 : (uintptr_t)Rm); \ 1663 conditional_compare(op, 0, 0, 0, Rn, regNumber, imm, cond); \ 1664 } \ 1665 \ 1666 void NAME(Register Rn, int imm5, int imm, Condition cond) { \ 1667 conditional_compare(op, 1, 0, 0, Rn, imm5, imm, cond); \ 1668 } 1669 1670 INSN(ccmnw, 0b001); 1671 INSN(ccmpw, 0b011); 1672 INSN(ccmn, 0b101); 1673 INSN(ccmp, 0b111); 1674 1675 #undef INSN 1676 1677 // Conditional select 1678 void conditional_select(unsigned op, unsigned op2, 1679 Register Rd, Register Rn, Register Rm, 1680 unsigned cond) { 1681 starti; 1682 f(op, 31, 29); 1683 f(0b11010100, 28, 21); 1684 f(cond, 15, 12); 1685 f(op2, 11, 10); 1686 zrf(Rm, 16), zrf(Rn, 5), rf(Rd, 0); 1687 } 1688 1689 #define INSN(NAME, op, op2) \ 1690 void NAME(Register Rd, Register Rn, Register Rm, Condition cond) { \ 1691 conditional_select(op, op2, Rd, Rn, Rm, cond); \ 1692 } 1693 1694 INSN(cselw, 0b000, 0b00); 1695 INSN(csincw, 0b000, 0b01); 1696 INSN(csinvw, 0b010, 0b00); 1697 INSN(csnegw, 0b010, 0b01); 1698 INSN(csel, 0b100, 0b00); 1699 INSN(csinc, 0b100, 0b01); 1700 INSN(csinv, 0b110, 0b00); 1701 INSN(csneg, 0b110, 0b01); 1702 1703 #undef INSN 1704 1705 // Data processing 1706 void data_processing(unsigned op29, unsigned opcode, 1707 Register Rd, Register Rn) { 1708 f(op29, 31, 29), f(0b11010110, 28, 21); 1709 f(opcode, 15, 10); 1710 rf(Rn, 5), rf(Rd, 0); 1711 } 1712 1713 // (1 source) 1714 #define INSN(NAME, op29, opcode2, opcode) \ 1715 void NAME(Register Rd, Register Rn) { \ 1716 starti; \ 1717 f(opcode2, 20, 16); \ 1718 data_processing(op29, opcode, Rd, Rn); \ 1719 } 1720 1721 INSN(rbitw, 0b010, 0b00000, 0b00000); 1722 INSN(rev16w, 0b010, 0b00000, 0b00001); 1723 INSN(revw, 0b010, 0b00000, 0b00010); 1724 INSN(clzw, 0b010, 0b00000, 0b00100); 1725 INSN(clsw, 0b010, 0b00000, 0b00101); 1726 1727 INSN(rbit, 0b110, 0b00000, 0b00000); 1728 INSN(rev16, 0b110, 0b00000, 0b00001); 1729 INSN(rev32, 0b110, 0b00000, 0b00010); 1730 INSN(rev, 0b110, 0b00000, 0b00011); 1731 INSN(clz, 0b110, 0b00000, 0b00100); 1732 INSN(cls, 0b110, 0b00000, 0b00101); 1733 1734 #undef INSN 1735 1736 // (2 sources) 1737 #define INSN(NAME, op29, opcode) \ 1738 void NAME(Register Rd, Register Rn, Register Rm) { \ 1739 starti; \ 1740 rf(Rm, 16); \ 1741 data_processing(op29, opcode, Rd, Rn); \ 1742 } 1743 1744 INSN(udivw, 0b000, 0b000010); 1745 INSN(sdivw, 0b000, 0b000011); 1746 INSN(lslvw, 0b000, 0b001000); 1747 INSN(lsrvw, 0b000, 0b001001); 1748 INSN(asrvw, 0b000, 0b001010); 1749 INSN(rorvw, 0b000, 0b001011); 1750 1751 INSN(udiv, 0b100, 0b000010); 1752 INSN(sdiv, 0b100, 0b000011); 1753 INSN(lslv, 0b100, 0b001000); 1754 INSN(lsrv, 0b100, 0b001001); 1755 INSN(asrv, 0b100, 0b001010); 1756 INSN(rorv, 0b100, 0b001011); 1757 1758 #undef INSN 1759 1760 // (3 sources) 1761 void data_processing(unsigned op54, unsigned op31, unsigned o0, 1762 Register Rd, Register Rn, Register Rm, 1763 Register Ra) { 1764 starti; 1765 f(op54, 31, 29), f(0b11011, 28, 24); 1766 f(op31, 23, 21), f(o0, 15); 1767 zrf(Rm, 16), zrf(Ra, 10), zrf(Rn, 5), zrf(Rd, 0); 1768 } 1769 1770 #define INSN(NAME, op54, op31, o0) \ 1771 void NAME(Register Rd, Register Rn, Register Rm, Register Ra) { \ 1772 data_processing(op54, op31, o0, Rd, Rn, Rm, Ra); \ 1773 } 1774 1775 INSN(maddw, 0b000, 0b000, 0); 1776 INSN(msubw, 0b000, 0b000, 1); 1777 INSN(madd, 0b100, 0b000, 0); 1778 INSN(msub, 0b100, 0b000, 1); 1779 INSN(smaddl, 0b100, 0b001, 0); 1780 INSN(smsubl, 0b100, 0b001, 1); 1781 INSN(umaddl, 0b100, 0b101, 0); 1782 INSN(umsubl, 0b100, 0b101, 1); 1783 1784 #undef INSN 1785 1786 #define INSN(NAME, op54, op31, o0) \ 1787 void NAME(Register Rd, Register Rn, Register Rm) { \ 1788 data_processing(op54, op31, o0, Rd, Rn, Rm, (Register)31); \ 1789 } 1790 1791 INSN(smulh, 0b100, 0b010, 0); 1792 INSN(umulh, 0b100, 0b110, 0); 1793 1794 #undef INSN 1795 1796 // Floating-point data-processing (1 source) 1797 void data_processing(unsigned op31, unsigned type, unsigned opcode, 1798 FloatRegister Vd, FloatRegister Vn) { 1799 starti; 1800 f(op31, 31, 29); 1801 f(0b11110, 28, 24); 1802 f(type, 23, 22), f(1, 21), f(opcode, 20, 15), f(0b10000, 14, 10); 1803 rf(Vn, 5), rf(Vd, 0); 1804 } 1805 1806 #define INSN(NAME, op31, type, opcode) \ 1807 void NAME(FloatRegister Vd, FloatRegister Vn) { \ 1808 data_processing(op31, type, opcode, Vd, Vn); \ 1809 } 1810 1811 private: 1812 INSN(i_fmovs, 0b000, 0b00, 0b000000); 1813 public: 1814 INSN(fabss, 0b000, 0b00, 0b000001); 1815 INSN(fnegs, 0b000, 0b00, 0b000010); 1816 INSN(fsqrts, 0b000, 0b00, 0b000011); 1817 INSN(fcvts, 0b000, 0b00, 0b000101); // Single-precision to double-precision 1818 1819 private: 1820 INSN(i_fmovd, 0b000, 0b01, 0b000000); 1821 public: 1822 INSN(fabsd, 0b000, 0b01, 0b000001); 1823 INSN(fnegd, 0b000, 0b01, 0b000010); 1824 INSN(fsqrtd, 0b000, 0b01, 0b000011); 1825 INSN(fcvtd, 0b000, 0b01, 0b000100); // Double-precision to single-precision 1826 1827 void fmovd(FloatRegister Vd, FloatRegister Vn) { 1828 assert(Vd != Vn, "should be"); 1829 i_fmovd(Vd, Vn); 1830 } 1831 1832 void fmovs(FloatRegister Vd, FloatRegister Vn) { 1833 assert(Vd != Vn, "should be"); 1834 i_fmovs(Vd, Vn); 1835 } 1836 1837 #undef INSN 1838 1839 // Floating-point data-processing (2 source) 1840 void data_processing(unsigned op31, unsigned type, unsigned opcode, 1841 FloatRegister Vd, FloatRegister Vn, FloatRegister Vm) { 1842 starti; 1843 f(op31, 31, 29); 1844 f(0b11110, 28, 24); 1845 f(type, 23, 22), f(1, 21), f(opcode, 15, 12), f(0b10, 11, 10); 1846 rf(Vm, 16), rf(Vn, 5), rf(Vd, 0); 1847 } 1848 1849 #define INSN(NAME, op31, type, opcode) \ 1850 void NAME(FloatRegister Vd, FloatRegister Vn, FloatRegister Vm) { \ 1851 data_processing(op31, type, opcode, Vd, Vn, Vm); \ 1852 } 1853 1854 INSN(fmuls, 0b000, 0b00, 0b0000); 1855 INSN(fdivs, 0b000, 0b00, 0b0001); 1856 INSN(fadds, 0b000, 0b00, 0b0010); 1857 INSN(fsubs, 0b000, 0b00, 0b0011); 1858 INSN(fmaxs, 0b000, 0b00, 0b0100); 1859 INSN(fmins, 0b000, 0b00, 0b0101); 1860 INSN(fnmuls, 0b000, 0b00, 0b1000); 1861 1862 INSN(fmuld, 0b000, 0b01, 0b0000); 1863 INSN(fdivd, 0b000, 0b01, 0b0001); 1864 INSN(faddd, 0b000, 0b01, 0b0010); 1865 INSN(fsubd, 0b000, 0b01, 0b0011); 1866 INSN(fmaxd, 0b000, 0b01, 0b0100); 1867 INSN(fmind, 0b000, 0b01, 0b0101); 1868 INSN(fnmuld, 0b000, 0b01, 0b1000); 1869 1870 #undef INSN 1871 1872 // Floating-point data-processing (3 source) 1873 void data_processing(unsigned op31, unsigned type, unsigned o1, unsigned o0, 1874 FloatRegister Vd, FloatRegister Vn, FloatRegister Vm, 1875 FloatRegister Va) { 1876 starti; 1877 f(op31, 31, 29); 1878 f(0b11111, 28, 24); 1879 f(type, 23, 22), f(o1, 21), f(o0, 15); 1880 rf(Vm, 16), rf(Va, 10), rf(Vn, 5), rf(Vd, 0); 1881 } 1882 1883 #define INSN(NAME, op31, type, o1, o0) \ 1884 void NAME(FloatRegister Vd, FloatRegister Vn, FloatRegister Vm, \ 1885 FloatRegister Va) { \ 1886 data_processing(op31, type, o1, o0, Vd, Vn, Vm, Va); \ 1887 } 1888 1889 INSN(fmadds, 0b000, 0b00, 0, 0); 1890 INSN(fmsubs, 0b000, 0b00, 0, 1); 1891 INSN(fnmadds, 0b000, 0b00, 1, 0); 1892 INSN(fnmsubs, 0b000, 0b00, 1, 1); 1893 1894 INSN(fmaddd, 0b000, 0b01, 0, 0); 1895 INSN(fmsubd, 0b000, 0b01, 0, 1); 1896 INSN(fnmaddd, 0b000, 0b01, 1, 0); 1897 INSN(fnmsub, 0b000, 0b01, 1, 1); 1898 1899 #undef INSN 1900 1901 // Floating-point conditional select 1902 void fp_conditional_select(unsigned op31, unsigned type, 1903 unsigned op1, unsigned op2, 1904 Condition cond, FloatRegister Vd, 1905 FloatRegister Vn, FloatRegister Vm) { 1906 starti; 1907 f(op31, 31, 29); 1908 f(0b11110, 28, 24); 1909 f(type, 23, 22); 1910 f(op1, 21, 21); 1911 f(op2, 11, 10); 1912 f(cond, 15, 12); 1913 rf(Vm, 16), rf(Vn, 5), rf(Vd, 0); 1914 } 1915 1916 #define INSN(NAME, op31, type, op1, op2) \ 1917 void NAME(FloatRegister Vd, FloatRegister Vn, \ 1918 FloatRegister Vm, Condition cond) { \ 1919 fp_conditional_select(op31, type, op1, op2, cond, Vd, Vn, Vm); \ 1920 } 1921 1922 INSN(fcsels, 0b000, 0b00, 0b1, 0b11); 1923 INSN(fcseld, 0b000, 0b01, 0b1, 0b11); 1924 1925 #undef INSN 1926 1927 // Floating-point<->integer conversions 1928 void float_int_convert(unsigned op31, unsigned type, 1929 unsigned rmode, unsigned opcode, 1930 Register Rd, Register Rn) { 1931 starti; 1932 f(op31, 31, 29); 1933 f(0b11110, 28, 24); 1934 f(type, 23, 22), f(1, 21), f(rmode, 20, 19); 1935 f(opcode, 18, 16), f(0b000000, 15, 10); 1936 zrf(Rn, 5), zrf(Rd, 0); 1937 } 1938 1939 #define INSN(NAME, op31, type, rmode, opcode) \ 1940 void NAME(Register Rd, FloatRegister Vn) { \ 1941 float_int_convert(op31, type, rmode, opcode, Rd, (Register)Vn); \ 1942 } 1943 1944 INSN(fcvtzsw, 0b000, 0b00, 0b11, 0b000); 1945 INSN(fcvtzs, 0b100, 0b00, 0b11, 0b000); 1946 INSN(fcvtzdw, 0b000, 0b01, 0b11, 0b000); 1947 INSN(fcvtzd, 0b100, 0b01, 0b11, 0b000); 1948 1949 INSN(fmovs, 0b000, 0b00, 0b00, 0b110); 1950 INSN(fmovd, 0b100, 0b01, 0b00, 0b110); 1951 1952 // INSN(fmovhid, 0b100, 0b10, 0b01, 0b110); 1953 1954 #undef INSN 1955 1956 #define INSN(NAME, op31, type, rmode, opcode) \ 1957 void NAME(FloatRegister Vd, Register Rn) { \ 1958 float_int_convert(op31, type, rmode, opcode, (Register)Vd, Rn); \ 1959 } 1960 1961 INSN(fmovs, 0b000, 0b00, 0b00, 0b111); 1962 INSN(fmovd, 0b100, 0b01, 0b00, 0b111); 1963 1964 INSN(scvtfws, 0b000, 0b00, 0b00, 0b010); 1965 INSN(scvtfs, 0b100, 0b00, 0b00, 0b010); 1966 INSN(scvtfwd, 0b000, 0b01, 0b00, 0b010); 1967 INSN(scvtfd, 0b100, 0b01, 0b00, 0b010); 1968 1969 // INSN(fmovhid, 0b100, 0b10, 0b01, 0b111); 1970 1971 #undef INSN 1972 1973 // Floating-point compare 1974 void float_compare(unsigned op31, unsigned type, 1975 unsigned op, unsigned op2, 1976 FloatRegister Vn, FloatRegister Vm = (FloatRegister)0) { 1977 starti; 1978 f(op31, 31, 29); 1979 f(0b11110, 28, 24); 1980 f(type, 23, 22), f(1, 21); 1981 f(op, 15, 14), f(0b1000, 13, 10), f(op2, 4, 0); 1982 rf(Vn, 5), rf(Vm, 16); 1983 } 1984 1985 1986 #define INSN(NAME, op31, type, op, op2) \ 1987 void NAME(FloatRegister Vn, FloatRegister Vm) { \ 1988 float_compare(op31, type, op, op2, Vn, Vm); \ 1989 } 1990 1991 #define INSN1(NAME, op31, type, op, op2) \ 1992 void NAME(FloatRegister Vn, double d) { \ 1993 assert_cond(d == 0.0); \ 1994 float_compare(op31, type, op, op2, Vn); \ 1995 } 1996 1997 INSN(fcmps, 0b000, 0b00, 0b00, 0b00000); 1998 INSN1(fcmps, 0b000, 0b00, 0b00, 0b01000); 1999 // INSN(fcmpes, 0b000, 0b00, 0b00, 0b10000); 2000 // INSN1(fcmpes, 0b000, 0b00, 0b00, 0b11000); 2001 2002 INSN(fcmpd, 0b000, 0b01, 0b00, 0b00000); 2003 INSN1(fcmpd, 0b000, 0b01, 0b00, 0b01000); 2004 // INSN(fcmped, 0b000, 0b01, 0b00, 0b10000); 2005 // INSN1(fcmped, 0b000, 0b01, 0b00, 0b11000); 2006 2007 #undef INSN 2008 #undef INSN1 2009 2010 // Floating-point Move (immediate) 2011 private: 2012 unsigned pack(double value); 2013 2014 void fmov_imm(FloatRegister Vn, double value, unsigned size) { 2015 starti; 2016 f(0b00011110, 31, 24), f(size, 23, 22), f(1, 21); 2017 f(pack(value), 20, 13), f(0b10000000, 12, 5); 2018 rf(Vn, 0); 2019 } 2020 2021 public: 2022 2023 void fmovs(FloatRegister Vn, double value) { 2024 if (value) 2025 fmov_imm(Vn, value, 0b00); 2026 else 2027 fmovs(Vn, zr); 2028 } 2029 void fmovd(FloatRegister Vn, double value) { 2030 if (value) 2031 fmov_imm(Vn, value, 0b01); 2032 else 2033 fmovd(Vn, zr); 2034 } 2035 2036 // Floating-point rounding 2037 // type: half-precision = 11 2038 // single = 00 2039 // double = 01 2040 // rmode: A = Away = 100 2041 // I = current = 111 2042 // M = MinusInf = 010 2043 // N = eveN = 000 2044 // P = PlusInf = 001 2045 // X = eXact = 110 2046 // Z = Zero = 011 2047 void float_round(unsigned type, unsigned rmode, FloatRegister Rd, FloatRegister Rn) { 2048 starti; 2049 f(0b00011110, 31, 24); 2050 f(type, 23, 22); 2051 f(0b1001, 21, 18); 2052 f(rmode, 17, 15); 2053 f(0b10000, 14, 10); 2054 rf(Rn, 5), rf(Rd, 0); 2055 } 2056 #define INSN(NAME, type, rmode) \ 2057 void NAME(FloatRegister Vd, FloatRegister Vn) { \ 2058 float_round(type, rmode, Vd, Vn); \ 2059 } 2060 2061 public: 2062 INSN(frintah, 0b11, 0b100); 2063 INSN(frintih, 0b11, 0b111); 2064 INSN(frintmh, 0b11, 0b010); 2065 INSN(frintnh, 0b11, 0b000); 2066 INSN(frintph, 0b11, 0b001); 2067 INSN(frintxh, 0b11, 0b110); 2068 INSN(frintzh, 0b11, 0b011); 2069 2070 INSN(frintas, 0b00, 0b100); 2071 INSN(frintis, 0b00, 0b111); 2072 INSN(frintms, 0b00, 0b010); 2073 INSN(frintns, 0b00, 0b000); 2074 INSN(frintps, 0b00, 0b001); 2075 INSN(frintxs, 0b00, 0b110); 2076 INSN(frintzs, 0b00, 0b011); 2077 2078 INSN(frintad, 0b01, 0b100); 2079 INSN(frintid, 0b01, 0b111); 2080 INSN(frintmd, 0b01, 0b010); 2081 INSN(frintnd, 0b01, 0b000); 2082 INSN(frintpd, 0b01, 0b001); 2083 INSN(frintxd, 0b01, 0b110); 2084 INSN(frintzd, 0b01, 0b011); 2085 #undef INSN 2086 2087 /* SIMD extensions 2088 * 2089 * We just use FloatRegister in the following. They are exactly the same 2090 * as SIMD registers. 2091 */ 2092 public: 2093 2094 enum SIMD_Arrangement { 2095 T8B, T16B, T4H, T8H, T2S, T4S, T1D, T2D, T1Q 2096 }; 2097 2098 enum SIMD_RegVariant { 2099 B, H, S, D, Q 2100 }; 2101 2102 private: 2103 static short SIMD_Size_in_bytes[]; 2104 2105 public: 2106 #define INSN(NAME, op) \ 2107 void NAME(FloatRegister Rt, SIMD_RegVariant T, const Address &adr) { \ 2108 ld_st2((Register)Rt, adr, (int)T & 3, op + ((T==Q) ? 0b10:0b00), 1); \ 2109 } \ 2110 2111 INSN(ldr, 1); 2112 INSN(str, 0); 2113 2114 #undef INSN 2115 2116 private: 2117 2118 void ld_st(FloatRegister Vt, SIMD_Arrangement T, Register Xn, int op1, int op2) { 2119 starti; 2120 f(0,31), f((int)T & 1, 30); 2121 f(op1, 29, 21), f(0, 20, 16), f(op2, 15, 12); 2122 f((int)T >> 1, 11, 10), srf(Xn, 5), rf(Vt, 0); 2123 } 2124 void ld_st(FloatRegister Vt, SIMD_Arrangement T, Register Xn, 2125 int imm, int op1, int op2, int regs) { 2126 2127 bool replicate = op2 >> 2 == 3; 2128 // post-index value (imm) is formed differently for replicate/non-replicate ld* instructions 2129 int expectedImmediate = replicate ? regs * (1 << (T >> 1)) : SIMD_Size_in_bytes[T] * regs; 2130 guarantee(T < T1Q , "incorrect arrangement"); 2131 guarantee(imm == expectedImmediate, "bad offset"); 2132 starti; 2133 f(0,31), f((int)T & 1, 30); 2134 f(op1 | 0b100, 29, 21), f(0b11111, 20, 16), f(op2, 15, 12); 2135 f((int)T >> 1, 11, 10), srf(Xn, 5), rf(Vt, 0); 2136 } 2137 void ld_st(FloatRegister Vt, SIMD_Arrangement T, Register Xn, 2138 Register Xm, int op1, int op2) { 2139 starti; 2140 f(0,31), f((int)T & 1, 30); 2141 f(op1 | 0b100, 29, 21), rf(Xm, 16), f(op2, 15, 12); 2142 f((int)T >> 1, 11, 10), srf(Xn, 5), rf(Vt, 0); 2143 } 2144 2145 void ld_st(FloatRegister Vt, SIMD_Arrangement T, Address a, int op1, int op2, int regs) { 2146 switch (a.getMode()) { 2147 case Address::base_plus_offset: 2148 guarantee(a.offset() == 0, "no offset allowed here"); 2149 ld_st(Vt, T, a.base(), op1, op2); 2150 break; 2151 case Address::post: 2152 ld_st(Vt, T, a.base(), a.offset(), op1, op2, regs); 2153 break; 2154 case Address::post_reg: 2155 ld_st(Vt, T, a.base(), a.index(), op1, op2); 2156 break; 2157 default: 2158 ShouldNotReachHere(); 2159 } 2160 } 2161 2162 public: 2163 2164 #define INSN1(NAME, op1, op2) \ 2165 void NAME(FloatRegister Vt, SIMD_Arrangement T, const Address &a) { \ 2166 ld_st(Vt, T, a, op1, op2, 1); \ 2167 } 2168 2169 #define INSN2(NAME, op1, op2) \ 2170 void NAME(FloatRegister Vt, FloatRegister Vt2, SIMD_Arrangement T, const Address &a) { \ 2171 assert(Vt->successor() == Vt2, "Registers must be ordered"); \ 2172 ld_st(Vt, T, a, op1, op2, 2); \ 2173 } 2174 2175 #define INSN3(NAME, op1, op2) \ 2176 void NAME(FloatRegister Vt, FloatRegister Vt2, FloatRegister Vt3, \ 2177 SIMD_Arrangement T, const Address &a) { \ 2178 assert(Vt->successor() == Vt2 && Vt2->successor() == Vt3, \ 2179 "Registers must be ordered"); \ 2180 ld_st(Vt, T, a, op1, op2, 3); \ 2181 } 2182 2183 #define INSN4(NAME, op1, op2) \ 2184 void NAME(FloatRegister Vt, FloatRegister Vt2, FloatRegister Vt3, \ 2185 FloatRegister Vt4, SIMD_Arrangement T, const Address &a) { \ 2186 assert(Vt->successor() == Vt2 && Vt2->successor() == Vt3 && \ 2187 Vt3->successor() == Vt4, "Registers must be ordered"); \ 2188 ld_st(Vt, T, a, op1, op2, 4); \ 2189 } 2190 2191 INSN1(ld1, 0b001100010, 0b0111); 2192 INSN2(ld1, 0b001100010, 0b1010); 2193 INSN3(ld1, 0b001100010, 0b0110); 2194 INSN4(ld1, 0b001100010, 0b0010); 2195 2196 INSN2(ld2, 0b001100010, 0b1000); 2197 INSN3(ld3, 0b001100010, 0b0100); 2198 INSN4(ld4, 0b001100010, 0b0000); 2199 2200 INSN1(st1, 0b001100000, 0b0111); 2201 INSN2(st1, 0b001100000, 0b1010); 2202 INSN3(st1, 0b001100000, 0b0110); 2203 INSN4(st1, 0b001100000, 0b0010); 2204 2205 INSN2(st2, 0b001100000, 0b1000); 2206 INSN3(st3, 0b001100000, 0b0100); 2207 INSN4(st4, 0b001100000, 0b0000); 2208 2209 INSN1(ld1r, 0b001101010, 0b1100); 2210 INSN2(ld2r, 0b001101011, 0b1100); 2211 INSN3(ld3r, 0b001101010, 0b1110); 2212 INSN4(ld4r, 0b001101011, 0b1110); 2213 2214 #undef INSN1 2215 #undef INSN2 2216 #undef INSN3 2217 #undef INSN4 2218 2219 #define INSN(NAME, opc) \ 2220 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, FloatRegister Vm) { \ 2221 starti; \ 2222 assert(T == T8B || T == T16B, "must be T8B or T16B"); \ 2223 f(0, 31), f((int)T & 1, 30), f(opc, 29, 21); \ 2224 rf(Vm, 16), f(0b000111, 15, 10), rf(Vn, 5), rf(Vd, 0); \ 2225 } 2226 2227 INSN(eor, 0b101110001); 2228 INSN(orr, 0b001110101); 2229 INSN(andr, 0b001110001); 2230 INSN(bic, 0b001110011); 2231 INSN(bif, 0b101110111); 2232 INSN(bit, 0b101110101); 2233 INSN(bsl, 0b101110011); 2234 INSN(orn, 0b001110111); 2235 2236 #undef INSN 2237 2238 #define INSN(NAME, opc, opc2, acceptT2D) \ 2239 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, FloatRegister Vm) { \ 2240 guarantee(T != T1Q && T != T1D, "incorrect arrangement"); \ 2241 if (!acceptT2D) guarantee(T != T2D, "incorrect arrangement"); \ 2242 starti; \ 2243 f(0, 31), f((int)T & 1, 30), f(opc, 29), f(0b01110, 28, 24); \ 2244 f((int)T >> 1, 23, 22), f(1, 21), rf(Vm, 16), f(opc2, 15, 10); \ 2245 rf(Vn, 5), rf(Vd, 0); \ 2246 } 2247 2248 INSN(addv, 0, 0b100001, true); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S, T2D 2249 INSN(subv, 1, 0b100001, true); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S, T2D 2250 INSN(mulv, 0, 0b100111, false); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S 2251 INSN(mlav, 0, 0b100101, false); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S 2252 INSN(mlsv, 1, 0b100101, false); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S 2253 INSN(sshl, 0, 0b010001, true); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S, T2D 2254 INSN(ushl, 1, 0b010001, true); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S, T2D 2255 INSN(addpv, 0, 0b101111, true); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S, T2D 2256 INSN(smullv, 0, 0b110000, false); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S 2257 INSN(umullv, 1, 0b110000, false); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S 2258 INSN(umlalv, 1, 0b100000, false); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S 2259 2260 #undef INSN 2261 2262 #define INSN(NAME, opc, opc2, accepted) \ 2263 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn) { \ 2264 guarantee(T != T1Q && T != T1D, "incorrect arrangement"); \ 2265 if (accepted < 3) guarantee(T != T2D, "incorrect arrangement"); \ 2266 if (accepted < 2) guarantee(T != T2S, "incorrect arrangement"); \ 2267 if (accepted < 1) guarantee(T == T8B || T == T16B, "incorrect arrangement"); \ 2268 starti; \ 2269 f(0, 31), f((int)T & 1, 30), f(opc, 29), f(0b01110, 28, 24); \ 2270 f((int)T >> 1, 23, 22), f(opc2, 21, 10); \ 2271 rf(Vn, 5), rf(Vd, 0); \ 2272 } 2273 2274 INSN(absr, 0, 0b100000101110, 3); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S, T2D 2275 INSN(negr, 1, 0b100000101110, 3); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S, T2D 2276 INSN(notr, 1, 0b100000010110, 0); // accepted arrangements: T8B, T16B 2277 INSN(addv, 0, 0b110001101110, 1); // accepted arrangements: T8B, T16B, T4H, T8H, T4S 2278 INSN(cls, 0, 0b100000010010, 2); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S 2279 INSN(clz, 1, 0b100000010010, 2); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S 2280 INSN(cnt, 0, 0b100000010110, 0); // accepted arrangements: T8B, T16B 2281 INSN(uaddlp, 1, 0b100000001010, 2); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S 2282 INSN(uaddlv, 1, 0b110000001110, 1); // accepted arrangements: T8B, T16B, T4H, T8H, T4S 2283 2284 #undef INSN 2285 2286 #define INSN(NAME, opc) \ 2287 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn) { \ 2288 starti; \ 2289 assert(T == T4S, "arrangement must be T4S"); \ 2290 f(0, 31), f((int)T & 1, 30), f(0b101110, 29, 24), f(opc, 23), \ 2291 f(T == T4S ? 0 : 1, 22), f(0b110000111110, 21, 10); rf(Vn, 5), rf(Vd, 0); \ 2292 } 2293 2294 INSN(fmaxv, 0); 2295 INSN(fminv, 1); 2296 2297 #undef INSN 2298 2299 #define INSN(NAME, op0, cmode0) \ 2300 void NAME(FloatRegister Vd, SIMD_Arrangement T, unsigned imm8, unsigned lsl = 0) { \ 2301 unsigned cmode = cmode0; \ 2302 unsigned op = op0; \ 2303 starti; \ 2304 assert(lsl == 0 || \ 2305 ((T == T4H || T == T8H) && lsl == 8) || \ 2306 ((T == T2S || T == T4S) && ((lsl >> 3) < 4) && ((lsl & 7) == 0)), "invalid shift");\ 2307 cmode |= lsl >> 2; \ 2308 if (T == T4H || T == T8H) cmode |= 0b1000; \ 2309 if (!(T == T4H || T == T8H || T == T2S || T == T4S)) { \ 2310 assert(op == 0 && cmode0 == 0, "must be MOVI"); \ 2311 cmode = 0b1110; \ 2312 if (T == T1D || T == T2D) op = 1; \ 2313 } \ 2314 f(0, 31), f((int)T & 1, 30), f(op, 29), f(0b0111100000, 28, 19); \ 2315 f(imm8 >> 5, 18, 16), f(cmode, 15, 12), f(0x01, 11, 10), f(imm8 & 0b11111, 9, 5); \ 2316 rf(Vd, 0); \ 2317 } 2318 2319 INSN(movi, 0, 0); 2320 INSN(orri, 0, 1); 2321 INSN(mvni, 1, 0); 2322 INSN(bici, 1, 1); 2323 2324 #undef INSN 2325 2326 #define INSN(NAME, op1, op2, op3) \ 2327 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, FloatRegister Vm) { \ 2328 starti; \ 2329 assert(T == T2S || T == T4S || T == T2D, "invalid arrangement"); \ 2330 f(0, 31), f((int)T & 1, 30), f(op1, 29), f(0b01110, 28, 24), f(op2, 23); \ 2331 f(T==T2D ? 1:0, 22); f(1, 21), rf(Vm, 16), f(op3, 15, 10), rf(Vn, 5), rf(Vd, 0); \ 2332 } 2333 2334 INSN(fadd, 0, 0, 0b110101); 2335 INSN(fdiv, 1, 0, 0b111111); 2336 INSN(fmul, 1, 0, 0b110111); 2337 INSN(fsub, 0, 1, 0b110101); 2338 INSN(fmla, 0, 0, 0b110011); 2339 INSN(fmls, 0, 1, 0b110011); 2340 INSN(fmax, 0, 0, 0b111101); 2341 INSN(fmin, 0, 1, 0b111101); 2342 2343 #undef INSN 2344 2345 #define INSN(NAME, opc) \ 2346 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, FloatRegister Vm) { \ 2347 starti; \ 2348 assert(T == T4S, "arrangement must be T4S"); \ 2349 f(0b01011110000, 31, 21), rf(Vm, 16), f(opc, 15, 10), rf(Vn, 5), rf(Vd, 0); \ 2350 } 2351 2352 INSN(sha1c, 0b000000); 2353 INSN(sha1m, 0b001000); 2354 INSN(sha1p, 0b000100); 2355 INSN(sha1su0, 0b001100); 2356 INSN(sha256h2, 0b010100); 2357 INSN(sha256h, 0b010000); 2358 INSN(sha256su1, 0b011000); 2359 2360 #undef INSN 2361 2362 #define INSN(NAME, opc) \ 2363 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn) { \ 2364 starti; \ 2365 assert(T == T4S, "arrangement must be T4S"); \ 2366 f(0b0101111000101000, 31, 16), f(opc, 15, 10), rf(Vn, 5), rf(Vd, 0); \ 2367 } 2368 2369 INSN(sha1h, 0b000010); 2370 INSN(sha1su1, 0b000110); 2371 INSN(sha256su0, 0b001010); 2372 2373 #undef INSN 2374 2375 #define INSN(NAME, opc) \ 2376 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, FloatRegister Vm) { \ 2377 starti; \ 2378 assert(T == T2D, "arrangement must be T2D"); \ 2379 f(0b11001110011, 31, 21), rf(Vm, 16), f(opc, 15, 10), rf(Vn, 5), rf(Vd, 0); \ 2380 } 2381 2382 INSN(sha512h, 0b100000); 2383 INSN(sha512h2, 0b100001); 2384 INSN(sha512su1, 0b100010); 2385 2386 #undef INSN 2387 2388 #define INSN(NAME, opc) \ 2389 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn) { \ 2390 starti; \ 2391 assert(T == T2D, "arrangement must be T2D"); \ 2392 f(opc, 31, 10), rf(Vn, 5), rf(Vd, 0); \ 2393 } 2394 2395 INSN(sha512su0, 0b1100111011000000100000); 2396 2397 #undef INSN 2398 2399 #define INSN(NAME, opc) \ 2400 void NAME(FloatRegister Vd, FloatRegister Vn) { \ 2401 starti; \ 2402 f(opc, 31, 10), rf(Vn, 5), rf(Vd, 0); \ 2403 } 2404 2405 INSN(aese, 0b0100111000101000010010); 2406 INSN(aesd, 0b0100111000101000010110); 2407 INSN(aesmc, 0b0100111000101000011010); 2408 INSN(aesimc, 0b0100111000101000011110); 2409 2410 #undef INSN 2411 2412 #define INSN(NAME, op1, op2) \ 2413 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, FloatRegister Vm, int index = 0) { \ 2414 starti; \ 2415 assert(T == T2S || T == T4S || T == T2D, "invalid arrangement"); \ 2416 assert(index >= 0 && ((T == T2D && index <= 1) || (T != T2D && index <= 3)), "invalid index"); \ 2417 f(0, 31), f((int)T & 1, 30), f(op1, 29); f(0b011111, 28, 23); \ 2418 f(T == T2D ? 1 : 0, 22), f(T == T2D ? 0 : index & 1, 21), rf(Vm, 16); \ 2419 f(op2, 15, 12), f(T == T2D ? index : (index >> 1), 11), f(0, 10); \ 2420 rf(Vn, 5), rf(Vd, 0); \ 2421 } 2422 2423 // FMLA/FMLS - Vector - Scalar 2424 INSN(fmlavs, 0, 0b0001); 2425 INSN(fmlsvs, 0, 0b0101); 2426 // FMULX - Vector - Scalar 2427 INSN(fmulxvs, 1, 0b1001); 2428 2429 #undef INSN 2430 2431 // Floating-point Reciprocal Estimate 2432 void frecpe(FloatRegister Vd, FloatRegister Vn, SIMD_RegVariant type) { 2433 assert(type == D || type == S, "Wrong type for frecpe"); 2434 starti; 2435 f(0b010111101, 31, 23); 2436 f(type == D ? 1 : 0, 22); 2437 f(0b100001110110, 21, 10); 2438 rf(Vn, 5), rf(Vd, 0); 2439 } 2440 2441 // (double) {a, b} -> (a + b) 2442 void faddpd(FloatRegister Vd, FloatRegister Vn) { 2443 starti; 2444 f(0b0111111001110000110110, 31, 10); 2445 rf(Vn, 5), rf(Vd, 0); 2446 } 2447 2448 void ins(FloatRegister Vd, SIMD_RegVariant T, FloatRegister Vn, int didx, int sidx) { 2449 starti; 2450 assert(T != Q, "invalid register variant"); 2451 f(0b01101110000, 31, 21), f(((didx<<1)|1)<<(int)T, 20, 16), f(0, 15); 2452 f(sidx<<(int)T, 14, 11), f(1, 10), rf(Vn, 5), rf(Vd, 0); 2453 } 2454 2455 void umov(Register Rd, FloatRegister Vn, SIMD_RegVariant T, int idx) { 2456 starti; 2457 f(0, 31), f(T==D ? 1:0, 30), f(0b001110000, 29, 21); 2458 f(((idx<<1)|1)<<(int)T, 20, 16), f(0b001111, 15, 10); 2459 rf(Vn, 5), rf(Rd, 0); 2460 } 2461 2462 #define INSN(NAME, opc, opc2, isSHR) \ 2463 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, int shift){ \ 2464 starti; \ 2465 /* The encodings for the immh:immb fields (bits 22:16) in *SHR are \ 2466 * 0001 xxx 8B/16B, shift = 16 - UInt(immh:immb) \ 2467 * 001x xxx 4H/8H, shift = 32 - UInt(immh:immb) \ 2468 * 01xx xxx 2S/4S, shift = 64 - UInt(immh:immb) \ 2469 * 1xxx xxx 1D/2D, shift = 128 - UInt(immh:immb) \ 2470 * (1D is RESERVED) \ 2471 * for SHL shift is calculated as: \ 2472 * 0001 xxx 8B/16B, shift = UInt(immh:immb) - 8 \ 2473 * 001x xxx 4H/8H, shift = UInt(immh:immb) - 16 \ 2474 * 01xx xxx 2S/4S, shift = UInt(immh:immb) - 32 \ 2475 * 1xxx xxx 1D/2D, shift = UInt(immh:immb) - 64 \ 2476 * (1D is RESERVED) \ 2477 */ \ 2478 assert((1 << ((T>>1)+3)) > shift, "Invalid Shift value"); \ 2479 int cVal = (1 << (((T >> 1) + 3) + (isSHR ? 1 : 0))); \ 2480 int encodedShift = isSHR ? cVal - shift : cVal + shift; \ 2481 f(0, 31), f(T & 1, 30), f(opc, 29), f(0b011110, 28, 23), \ 2482 f(encodedShift, 22, 16); f(opc2, 15, 10), rf(Vn, 5), rf(Vd, 0); \ 2483 } 2484 2485 INSN(shl, 0, 0b010101, /* isSHR = */ false); 2486 INSN(sshr, 0, 0b000001, /* isSHR = */ true); 2487 INSN(ushr, 1, 0b000001, /* isSHR = */ true); 2488 2489 #undef INSN 2490 2491 private: 2492 void _ushll(FloatRegister Vd, SIMD_Arrangement Ta, FloatRegister Vn, SIMD_Arrangement Tb, int shift) { 2493 starti; 2494 /* The encodings for the immh:immb fields (bits 22:16) are 2495 * 0001 xxx 8H, 8B/16b shift = xxx 2496 * 001x xxx 4S, 4H/8H shift = xxxx 2497 * 01xx xxx 2D, 2S/4S shift = xxxxx 2498 * 1xxx xxx RESERVED 2499 */ 2500 assert((Tb >> 1) + 1 == (Ta >> 1), "Incompatible arrangement"); 2501 assert((1 << ((Tb>>1)+3)) > shift, "Invalid shift value"); 2502 f(0, 31), f(Tb & 1, 30), f(0b1011110, 29, 23), f((1 << ((Tb>>1)+3))|shift, 22, 16); 2503 f(0b101001, 15, 10), rf(Vn, 5), rf(Vd, 0); 2504 } 2505 2506 public: 2507 void ushll(FloatRegister Vd, SIMD_Arrangement Ta, FloatRegister Vn, SIMD_Arrangement Tb, int shift) { 2508 assert(Tb == T8B || Tb == T4H || Tb == T2S, "invalid arrangement"); 2509 _ushll(Vd, Ta, Vn, Tb, shift); 2510 } 2511 2512 void ushll2(FloatRegister Vd, SIMD_Arrangement Ta, FloatRegister Vn, SIMD_Arrangement Tb, int shift) { 2513 assert(Tb == T16B || Tb == T8H || Tb == T4S, "invalid arrangement"); 2514 _ushll(Vd, Ta, Vn, Tb, shift); 2515 } 2516 2517 // Move from general purpose register 2518 // mov Vd.T[index], Rn 2519 void mov(FloatRegister Vd, SIMD_Arrangement T, int index, Register Xn) { 2520 starti; 2521 f(0b01001110000, 31, 21), f(((1 << (T >> 1)) | (index << ((T >> 1) + 1))), 20, 16); 2522 f(0b000111, 15, 10), zrf(Xn, 5), rf(Vd, 0); 2523 } 2524 2525 // Move to general purpose register 2526 // mov Rd, Vn.T[index] 2527 void mov(Register Xd, FloatRegister Vn, SIMD_Arrangement T, int index) { 2528 guarantee(T >= T2S && T < T1Q, "only D and S arrangements are supported"); 2529 starti; 2530 f(0, 31), f((T >= T1D) ? 1:0, 30), f(0b001110000, 29, 21); 2531 f(((1 << (T >> 1)) | (index << ((T >> 1) + 1))), 20, 16); 2532 f(0b001111, 15, 10), rf(Vn, 5), rf(Xd, 0); 2533 } 2534 2535 private: 2536 void _pmull(FloatRegister Vd, SIMD_Arrangement Ta, FloatRegister Vn, FloatRegister Vm, SIMD_Arrangement Tb) { 2537 starti; 2538 assert((Ta == T1Q && (Tb == T1D || Tb == T2D)) || 2539 (Ta == T8H && (Tb == T8B || Tb == T16B)), "Invalid Size specifier"); 2540 int size = (Ta == T1Q) ? 0b11 : 0b00; 2541 f(0, 31), f(Tb & 1, 30), f(0b001110, 29, 24), f(size, 23, 22); 2542 f(1, 21), rf(Vm, 16), f(0b111000, 15, 10), rf(Vn, 5), rf(Vd, 0); 2543 } 2544 2545 public: 2546 void pmull(FloatRegister Vd, SIMD_Arrangement Ta, FloatRegister Vn, FloatRegister Vm, SIMD_Arrangement Tb) { 2547 assert(Tb == T1D || Tb == T8B, "pmull assumes T1D or T8B as the second size specifier"); 2548 _pmull(Vd, Ta, Vn, Vm, Tb); 2549 } 2550 2551 void pmull2(FloatRegister Vd, SIMD_Arrangement Ta, FloatRegister Vn, FloatRegister Vm, SIMD_Arrangement Tb) { 2552 assert(Tb == T2D || Tb == T16B, "pmull2 assumes T2D or T16B as the second size specifier"); 2553 _pmull(Vd, Ta, Vn, Vm, Tb); 2554 } 2555 2556 void uqxtn(FloatRegister Vd, SIMD_Arrangement Tb, FloatRegister Vn, SIMD_Arrangement Ta) { 2557 starti; 2558 int size_b = (int)Tb >> 1; 2559 int size_a = (int)Ta >> 1; 2560 assert(size_b < 3 && size_b == size_a - 1, "Invalid size specifier"); 2561 f(0, 31), f(Tb & 1, 30), f(0b101110, 29, 24), f(size_b, 23, 22); 2562 f(0b100001010010, 21, 10), rf(Vn, 5), rf(Vd, 0); 2563 } 2564 2565 void dup(FloatRegister Vd, SIMD_Arrangement T, Register Xs) 2566 { 2567 starti; 2568 assert(T != T1D, "reserved encoding"); 2569 f(0,31), f((int)T & 1, 30), f(0b001110000, 29, 21); 2570 f((1 << (T >> 1)), 20, 16), f(0b000011, 15, 10), zrf(Xs, 5), rf(Vd, 0); 2571 } 2572 2573 void dup(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, int index = 0) 2574 { 2575 starti; 2576 assert(T != T1D, "reserved encoding"); 2577 f(0, 31), f((int)T & 1, 30), f(0b001110000, 29, 21); 2578 f(((1 << (T >> 1)) | (index << ((T >> 1) + 1))), 20, 16); 2579 f(0b000001, 15, 10), rf(Vn, 5), rf(Vd, 0); 2580 } 2581 2582 // AdvSIMD ZIP/UZP/TRN 2583 #define INSN(NAME, opcode) \ 2584 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, FloatRegister Vm) { \ 2585 guarantee(T != T1D && T != T1Q, "invalid arrangement"); \ 2586 starti; \ 2587 f(0, 31), f(0b001110, 29, 24), f(0, 21), f(0, 15); \ 2588 f(opcode, 14, 12), f(0b10, 11, 10); \ 2589 rf(Vm, 16), rf(Vn, 5), rf(Vd, 0); \ 2590 f(T & 1, 30), f(T >> 1, 23, 22); \ 2591 } 2592 2593 INSN(uzp1, 0b001); 2594 INSN(trn1, 0b010); 2595 INSN(zip1, 0b011); 2596 INSN(uzp2, 0b101); 2597 INSN(trn2, 0b110); 2598 INSN(zip2, 0b111); 2599 2600 #undef INSN 2601 2602 // CRC32 instructions 2603 #define INSN(NAME, c, sf, sz) \ 2604 void NAME(Register Rd, Register Rn, Register Rm) { \ 2605 starti; \ 2606 f(sf, 31), f(0b0011010110, 30, 21), f(0b010, 15, 13), f(c, 12); \ 2607 f(sz, 11, 10), rf(Rm, 16), rf(Rn, 5), rf(Rd, 0); \ 2608 } 2609 2610 INSN(crc32b, 0, 0, 0b00); 2611 INSN(crc32h, 0, 0, 0b01); 2612 INSN(crc32w, 0, 0, 0b10); 2613 INSN(crc32x, 0, 1, 0b11); 2614 INSN(crc32cb, 1, 0, 0b00); 2615 INSN(crc32ch, 1, 0, 0b01); 2616 INSN(crc32cw, 1, 0, 0b10); 2617 INSN(crc32cx, 1, 1, 0b11); 2618 2619 #undef INSN 2620 2621 // Table vector lookup 2622 #define INSN(NAME, op) \ 2623 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, unsigned registers, FloatRegister Vm) { \ 2624 starti; \ 2625 assert(T == T8B || T == T16B, "invalid arrangement"); \ 2626 assert(0 < registers && registers <= 4, "invalid number of registers"); \ 2627 f(0, 31), f((int)T & 1, 30), f(0b001110000, 29, 21), rf(Vm, 16), f(0, 15); \ 2628 f(registers - 1, 14, 13), f(op, 12),f(0b00, 11, 10), rf(Vn, 5), rf(Vd, 0); \ 2629 } 2630 2631 INSN(tbl, 0); 2632 INSN(tbx, 1); 2633 2634 #undef INSN 2635 2636 // AdvSIMD two-reg misc 2637 // In this instruction group, the 2 bits in the size field ([23:22]) may be 2638 // fixed or determined by the "SIMD_Arrangement T", or both. The additional 2639 // parameter "tmask" is a 2-bit mask used to indicate which bits in the size 2640 // field are determined by the SIMD_Arrangement. The bit of "tmask" should be 2641 // set to 1 if corresponding bit marked as "x" in the ArmARM. 2642 #define INSN(NAME, U, size, tmask, opcode) \ 2643 void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn) { \ 2644 starti; \ 2645 assert((ASSERTION), MSG); \ 2646 f(0, 31), f((int)T & 1, 30), f(U, 29), f(0b01110, 28, 24); \ 2647 f(size | ((int)(T >> 1) & tmask), 23, 22), f(0b10000, 21, 17); \ 2648 f(opcode, 16, 12), f(0b10, 11, 10), rf(Vn, 5), rf(Vd, 0); \ 2649 } 2650 2651 #define MSG "invalid arrangement" 2652 2653 #define ASSERTION (T == T2S || T == T4S || T == T2D) 2654 INSN(fsqrt, 1, 0b10, 0b01, 0b11111); 2655 INSN(fabs, 0, 0b10, 0b01, 0b01111); 2656 INSN(fneg, 1, 0b10, 0b01, 0b01111); 2657 INSN(frintn, 0, 0b00, 0b01, 0b11000); 2658 INSN(frintm, 0, 0b00, 0b01, 0b11001); 2659 INSN(frintp, 0, 0b10, 0b01, 0b11000); 2660 #undef ASSERTION 2661 2662 #define ASSERTION (T == T8B || T == T16B || T == T4H || T == T8H || T == T2S || T == T4S) 2663 INSN(rev64, 0, 0b00, 0b11, 0b00000); 2664 #undef ASSERTION 2665 2666 #define ASSERTION (T == T8B || T == T16B || T == T4H || T == T8H) 2667 INSN(rev32, 1, 0b00, 0b11, 0b00000); 2668 #undef ASSERTION 2669 2670 #define ASSERTION (T == T8B || T == T16B) 2671 INSN(rev16, 0, 0b00, 0b11, 0b00001); 2672 INSN(rbit, 1, 0b01, 0b00, 0b00101); 2673 #undef ASSERTION 2674 2675 #undef MSG 2676 2677 #undef INSN 2678 2679 void ext(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, FloatRegister Vm, int index) 2680 { 2681 starti; 2682 assert(T == T8B || T == T16B, "invalid arrangement"); 2683 assert((T == T8B && index <= 0b0111) || (T == T16B && index <= 0b1111), "Invalid index value"); 2684 f(0, 31), f((int)T & 1, 30), f(0b101110000, 29, 21); 2685 rf(Vm, 16), f(0, 15), f(index, 14, 11); 2686 f(0, 10), rf(Vn, 5), rf(Vd, 0); 2687 } 2688 2689 Assembler(CodeBuffer* code) : AbstractAssembler(code) { 2690 } 2691 2692 virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr, 2693 Register tmp, 2694 int offset) { 2695 ShouldNotCallThis(); 2696 return RegisterOrConstant(); 2697 } 2698 2699 // Stack overflow checking 2700 virtual void bang_stack_with_offset(int offset); 2701 2702 static bool operand_valid_for_logical_immediate(bool is32, uint64_t imm); 2703 static bool operand_valid_for_add_sub_immediate(int64_t imm); 2704 static bool operand_valid_for_float_immediate(double imm); 2705 2706 void emit_data64(jlong data, relocInfo::relocType rtype, int format = 0); 2707 void emit_data64(jlong data, RelocationHolder const& rspec, int format = 0); 2708 }; 2709 2710 inline Assembler::Membar_mask_bits operator|(Assembler::Membar_mask_bits a, 2711 Assembler::Membar_mask_bits b) { 2712 return Assembler::Membar_mask_bits(unsigned(a)|unsigned(b)); 2713 } 2714 2715 Instruction_aarch64::~Instruction_aarch64() { 2716 assem->emit(); 2717 } 2718 2719 #undef starti 2720 2721 // Invert a condition 2722 inline const Assembler::Condition operator~(const Assembler::Condition cond) { 2723 return Assembler::Condition(int(cond) ^ 1); 2724 } 2725 2726 class BiasedLockingCounters; 2727 2728 extern "C" void das(uint64_t start, int len); 2729 2730 #endif // CPU_AARCH64_ASSEMBLER_AARCH64_HPP