1 /* 2 * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #ifndef CPU_AARCH64_MACROASSEMBLER_AARCH64_HPP 27 #define CPU_AARCH64_MACROASSEMBLER_AARCH64_HPP 28 29 #include "asm/assembler.hpp" 30 #include "oops/compressedOops.hpp" 31 32 class ciValueKlass; 33 34 // MacroAssembler extends Assembler by frequently used macros. 35 // 36 // Instructions for which a 'better' code sequence exists depending 37 // on arguments should also go in here. 38 39 class MacroAssembler: public Assembler { 40 friend class LIR_Assembler; 41 42 public: 43 using Assembler::mov; 44 using Assembler::movi; 45 46 protected: 47 48 // Support for VM calls 49 // 50 // This is the base routine called by the different versions of call_VM_leaf. The interpreter 51 // may customize this version by overriding it for its purposes (e.g., to save/restore 52 // additional registers when doing a VM call). 53 virtual void call_VM_leaf_base( 54 address entry_point, // the entry point 55 int number_of_arguments, // the number of arguments to pop after the call 56 Label *retaddr = NULL 57 ); 58 59 virtual void call_VM_leaf_base( 60 address entry_point, // the entry point 61 int number_of_arguments, // the number of arguments to pop after the call 62 Label &retaddr) { 63 call_VM_leaf_base(entry_point, number_of_arguments, &retaddr); 64 } 65 66 // This is the base routine called by the different versions of call_VM. The interpreter 67 // may customize this version by overriding it for its purposes (e.g., to save/restore 68 // additional registers when doing a VM call). 69 // 70 // If no java_thread register is specified (noreg) than rthread will be used instead. call_VM_base 71 // returns the register which contains the thread upon return. If a thread register has been 72 // specified, the return value will correspond to that register. If no last_java_sp is specified 73 // (noreg) than rsp will be used instead. 74 virtual void call_VM_base( // returns the register containing the thread upon return 75 Register oop_result, // where an oop-result ends up if any; use noreg otherwise 76 Register java_thread, // the thread if computed before ; use noreg otherwise 77 Register last_java_sp, // to set up last_Java_frame in stubs; use noreg otherwise 78 address entry_point, // the entry point 79 int number_of_arguments, // the number of arguments (w/o thread) to pop after the call 80 bool check_exceptions // whether to check for pending exceptions after return 81 ); 82 83 void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true); 84 85 // True if an XOR can be used to expand narrow klass references. 86 bool use_XOR_for_compressed_class_base; 87 88 public: 89 MacroAssembler(CodeBuffer* code) : Assembler(code) { 90 use_XOR_for_compressed_class_base 91 = operand_valid_for_logical_immediate 92 (/*is32*/false, (uint64_t)CompressedKlassPointers::base()) 93 && ((uint64_t)CompressedKlassPointers::base() 94 > (1UL << log2_intptr(CompressedKlassPointers::range()))); 95 } 96 97 // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code. 98 // The implementation is only non-empty for the InterpreterMacroAssembler, 99 // as only the interpreter handles PopFrame and ForceEarlyReturn requests. 100 virtual void check_and_handle_popframe(Register java_thread); 101 virtual void check_and_handle_earlyret(Register java_thread); 102 103 void safepoint_poll(Label& slow_path); 104 void safepoint_poll_acquire(Label& slow_path); 105 106 // Biased locking support 107 // lock_reg and obj_reg must be loaded up with the appropriate values. 108 // swap_reg is killed. 109 // tmp_reg must be supplied and must not be rscratch1 or rscratch2 110 // Optional slow case is for implementations (interpreter and C1) which branch to 111 // slow case directly. Leaves condition codes set for C2's Fast_Lock node. 112 // Returns offset of first potentially-faulting instruction for null 113 // check info (currently consumed only by C1). If 114 // swap_reg_contains_mark is true then returns -1 as it is assumed 115 // the calling code has already passed any potential faults. 116 int biased_locking_enter(Register lock_reg, Register obj_reg, 117 Register swap_reg, Register tmp_reg, 118 bool swap_reg_contains_mark, 119 Label& done, Label* slow_case = NULL, 120 BiasedLockingCounters* counters = NULL); 121 void biased_locking_exit (Register obj_reg, Register temp_reg, Label& done); 122 123 124 // Helper functions for statistics gathering. 125 // Unconditional atomic increment. 126 void atomic_incw(Register counter_addr, Register tmp, Register tmp2); 127 void atomic_incw(Address counter_addr, Register tmp1, Register tmp2, Register tmp3) { 128 lea(tmp1, counter_addr); 129 atomic_incw(tmp1, tmp2, tmp3); 130 } 131 // Load Effective Address 132 void lea(Register r, const Address &a) { 133 InstructionMark im(this); 134 code_section()->relocate(inst_mark(), a.rspec()); 135 a.lea(this, r); 136 } 137 138 void addmw(Address a, Register incr, Register scratch) { 139 ldrw(scratch, a); 140 addw(scratch, scratch, incr); 141 strw(scratch, a); 142 } 143 144 // Add constant to memory word 145 void addmw(Address a, int imm, Register scratch) { 146 ldrw(scratch, a); 147 if (imm > 0) 148 addw(scratch, scratch, (unsigned)imm); 149 else 150 subw(scratch, scratch, (unsigned)-imm); 151 strw(scratch, a); 152 } 153 154 void bind(Label& L) { 155 Assembler::bind(L); 156 code()->clear_last_insn(); 157 } 158 159 void membar(Membar_mask_bits order_constraint); 160 161 using Assembler::ldr; 162 using Assembler::str; 163 164 void ldr(Register Rx, const Address &adr); 165 void ldrw(Register Rw, const Address &adr); 166 void str(Register Rx, const Address &adr); 167 void strw(Register Rx, const Address &adr); 168 169 // Frame creation and destruction shared between JITs. 170 void build_frame(int framesize); 171 void remove_frame(int framesize); 172 173 virtual void _call_Unimplemented(address call_site) { 174 mov(rscratch2, call_site); 175 haltsim(); 176 } 177 178 #define call_Unimplemented() _call_Unimplemented((address)__PRETTY_FUNCTION__) 179 180 virtual void notify(int type); 181 182 // aliases defined in AARCH64 spec 183 184 template<class T> 185 inline void cmpw(Register Rd, T imm) { subsw(zr, Rd, imm); } 186 187 inline void cmp(Register Rd, unsigned char imm8) { subs(zr, Rd, imm8); } 188 inline void cmp(Register Rd, unsigned imm) __attribute__ ((deprecated)); 189 190 inline void cmnw(Register Rd, unsigned imm) { addsw(zr, Rd, imm); } 191 inline void cmn(Register Rd, unsigned imm) { adds(zr, Rd, imm); } 192 193 void cset(Register Rd, Assembler::Condition cond) { 194 csinc(Rd, zr, zr, ~cond); 195 } 196 void csetw(Register Rd, Assembler::Condition cond) { 197 csincw(Rd, zr, zr, ~cond); 198 } 199 200 void cneg(Register Rd, Register Rn, Assembler::Condition cond) { 201 csneg(Rd, Rn, Rn, ~cond); 202 } 203 void cnegw(Register Rd, Register Rn, Assembler::Condition cond) { 204 csnegw(Rd, Rn, Rn, ~cond); 205 } 206 207 inline void movw(Register Rd, Register Rn) { 208 if (Rd == sp || Rn == sp) { 209 addw(Rd, Rn, 0U); 210 } else { 211 orrw(Rd, zr, Rn); 212 } 213 } 214 inline void mov(Register Rd, Register Rn) { 215 assert(Rd != r31_sp && Rn != r31_sp, "should be"); 216 if (Rd == Rn) { 217 } else if (Rd == sp || Rn == sp) { 218 add(Rd, Rn, 0U); 219 } else { 220 orr(Rd, zr, Rn); 221 } 222 } 223 224 inline void moviw(Register Rd, unsigned imm) { orrw(Rd, zr, imm); } 225 inline void movi(Register Rd, unsigned imm) { orr(Rd, zr, imm); } 226 227 inline void tstw(Register Rd, Register Rn) { andsw(zr, Rd, Rn); } 228 inline void tst(Register Rd, Register Rn) { ands(zr, Rd, Rn); } 229 230 inline void tstw(Register Rd, uint64_t imm) { andsw(zr, Rd, imm); } 231 inline void tst(Register Rd, uint64_t imm) { ands(zr, Rd, imm); } 232 233 inline void bfiw(Register Rd, Register Rn, unsigned lsb, unsigned width) { 234 bfmw(Rd, Rn, ((32 - lsb) & 31), (width - 1)); 235 } 236 inline void bfi(Register Rd, Register Rn, unsigned lsb, unsigned width) { 237 bfm(Rd, Rn, ((64 - lsb) & 63), (width - 1)); 238 } 239 240 inline void bfxilw(Register Rd, Register Rn, unsigned lsb, unsigned width) { 241 bfmw(Rd, Rn, lsb, (lsb + width - 1)); 242 } 243 inline void bfxil(Register Rd, Register Rn, unsigned lsb, unsigned width) { 244 bfm(Rd, Rn, lsb , (lsb + width - 1)); 245 } 246 247 inline void sbfizw(Register Rd, Register Rn, unsigned lsb, unsigned width) { 248 sbfmw(Rd, Rn, ((32 - lsb) & 31), (width - 1)); 249 } 250 inline void sbfiz(Register Rd, Register Rn, unsigned lsb, unsigned width) { 251 sbfm(Rd, Rn, ((64 - lsb) & 63), (width - 1)); 252 } 253 254 inline void sbfxw(Register Rd, Register Rn, unsigned lsb, unsigned width) { 255 sbfmw(Rd, Rn, lsb, (lsb + width - 1)); 256 } 257 inline void sbfx(Register Rd, Register Rn, unsigned lsb, unsigned width) { 258 sbfm(Rd, Rn, lsb , (lsb + width - 1)); 259 } 260 261 inline void ubfizw(Register Rd, Register Rn, unsigned lsb, unsigned width) { 262 ubfmw(Rd, Rn, ((32 - lsb) & 31), (width - 1)); 263 } 264 inline void ubfiz(Register Rd, Register Rn, unsigned lsb, unsigned width) { 265 ubfm(Rd, Rn, ((64 - lsb) & 63), (width - 1)); 266 } 267 268 inline void ubfxw(Register Rd, Register Rn, unsigned lsb, unsigned width) { 269 ubfmw(Rd, Rn, lsb, (lsb + width - 1)); 270 } 271 inline void ubfx(Register Rd, Register Rn, unsigned lsb, unsigned width) { 272 ubfm(Rd, Rn, lsb , (lsb + width - 1)); 273 } 274 275 inline void asrw(Register Rd, Register Rn, unsigned imm) { 276 sbfmw(Rd, Rn, imm, 31); 277 } 278 279 inline void asr(Register Rd, Register Rn, unsigned imm) { 280 sbfm(Rd, Rn, imm, 63); 281 } 282 283 inline void lslw(Register Rd, Register Rn, unsigned imm) { 284 ubfmw(Rd, Rn, ((32 - imm) & 31), (31 - imm)); 285 } 286 287 inline void lsl(Register Rd, Register Rn, unsigned imm) { 288 ubfm(Rd, Rn, ((64 - imm) & 63), (63 - imm)); 289 } 290 291 inline void lsrw(Register Rd, Register Rn, unsigned imm) { 292 ubfmw(Rd, Rn, imm, 31); 293 } 294 295 inline void lsr(Register Rd, Register Rn, unsigned imm) { 296 ubfm(Rd, Rn, imm, 63); 297 } 298 299 inline void rorw(Register Rd, Register Rn, unsigned imm) { 300 extrw(Rd, Rn, Rn, imm); 301 } 302 303 inline void ror(Register Rd, Register Rn, unsigned imm) { 304 extr(Rd, Rn, Rn, imm); 305 } 306 307 inline void sxtbw(Register Rd, Register Rn) { 308 sbfmw(Rd, Rn, 0, 7); 309 } 310 inline void sxthw(Register Rd, Register Rn) { 311 sbfmw(Rd, Rn, 0, 15); 312 } 313 inline void sxtb(Register Rd, Register Rn) { 314 sbfm(Rd, Rn, 0, 7); 315 } 316 inline void sxth(Register Rd, Register Rn) { 317 sbfm(Rd, Rn, 0, 15); 318 } 319 inline void sxtw(Register Rd, Register Rn) { 320 sbfm(Rd, Rn, 0, 31); 321 } 322 323 inline void uxtbw(Register Rd, Register Rn) { 324 ubfmw(Rd, Rn, 0, 7); 325 } 326 inline void uxthw(Register Rd, Register Rn) { 327 ubfmw(Rd, Rn, 0, 15); 328 } 329 inline void uxtb(Register Rd, Register Rn) { 330 ubfm(Rd, Rn, 0, 7); 331 } 332 inline void uxth(Register Rd, Register Rn) { 333 ubfm(Rd, Rn, 0, 15); 334 } 335 inline void uxtw(Register Rd, Register Rn) { 336 ubfm(Rd, Rn, 0, 31); 337 } 338 339 inline void cmnw(Register Rn, Register Rm) { 340 addsw(zr, Rn, Rm); 341 } 342 inline void cmn(Register Rn, Register Rm) { 343 adds(zr, Rn, Rm); 344 } 345 346 inline void cmpw(Register Rn, Register Rm) { 347 subsw(zr, Rn, Rm); 348 } 349 inline void cmp(Register Rn, Register Rm) { 350 subs(zr, Rn, Rm); 351 } 352 353 inline void negw(Register Rd, Register Rn) { 354 subw(Rd, zr, Rn); 355 } 356 357 inline void neg(Register Rd, Register Rn) { 358 sub(Rd, zr, Rn); 359 } 360 361 inline void negsw(Register Rd, Register Rn) { 362 subsw(Rd, zr, Rn); 363 } 364 365 inline void negs(Register Rd, Register Rn) { 366 subs(Rd, zr, Rn); 367 } 368 369 inline void cmnw(Register Rn, Register Rm, enum shift_kind kind, unsigned shift = 0) { 370 addsw(zr, Rn, Rm, kind, shift); 371 } 372 inline void cmn(Register Rn, Register Rm, enum shift_kind kind, unsigned shift = 0) { 373 adds(zr, Rn, Rm, kind, shift); 374 } 375 376 inline void cmpw(Register Rn, Register Rm, enum shift_kind kind, unsigned shift = 0) { 377 subsw(zr, Rn, Rm, kind, shift); 378 } 379 inline void cmp(Register Rn, Register Rm, enum shift_kind kind, unsigned shift = 0) { 380 subs(zr, Rn, Rm, kind, shift); 381 } 382 383 inline void negw(Register Rd, Register Rn, enum shift_kind kind, unsigned shift = 0) { 384 subw(Rd, zr, Rn, kind, shift); 385 } 386 387 inline void neg(Register Rd, Register Rn, enum shift_kind kind, unsigned shift = 0) { 388 sub(Rd, zr, Rn, kind, shift); 389 } 390 391 inline void negsw(Register Rd, Register Rn, enum shift_kind kind, unsigned shift = 0) { 392 subsw(Rd, zr, Rn, kind, shift); 393 } 394 395 inline void negs(Register Rd, Register Rn, enum shift_kind kind, unsigned shift = 0) { 396 subs(Rd, zr, Rn, kind, shift); 397 } 398 399 inline void mnegw(Register Rd, Register Rn, Register Rm) { 400 msubw(Rd, Rn, Rm, zr); 401 } 402 inline void mneg(Register Rd, Register Rn, Register Rm) { 403 msub(Rd, Rn, Rm, zr); 404 } 405 406 inline void mulw(Register Rd, Register Rn, Register Rm) { 407 maddw(Rd, Rn, Rm, zr); 408 } 409 inline void mul(Register Rd, Register Rn, Register Rm) { 410 madd(Rd, Rn, Rm, zr); 411 } 412 413 inline void smnegl(Register Rd, Register Rn, Register Rm) { 414 smsubl(Rd, Rn, Rm, zr); 415 } 416 inline void smull(Register Rd, Register Rn, Register Rm) { 417 smaddl(Rd, Rn, Rm, zr); 418 } 419 420 inline void umnegl(Register Rd, Register Rn, Register Rm) { 421 umsubl(Rd, Rn, Rm, zr); 422 } 423 inline void umull(Register Rd, Register Rn, Register Rm) { 424 umaddl(Rd, Rn, Rm, zr); 425 } 426 427 #define WRAP(INSN) \ 428 void INSN(Register Rd, Register Rn, Register Rm, Register Ra) { \ 429 if ((VM_Version::features() & VM_Version::CPU_A53MAC) && Ra != zr) \ 430 nop(); \ 431 Assembler::INSN(Rd, Rn, Rm, Ra); \ 432 } 433 434 WRAP(madd) WRAP(msub) WRAP(maddw) WRAP(msubw) 435 WRAP(smaddl) WRAP(smsubl) WRAP(umaddl) WRAP(umsubl) 436 #undef WRAP 437 438 439 // macro assembly operations needed for aarch64 440 441 // first two private routines for loading 32 bit or 64 bit constants 442 private: 443 444 void mov_immediate64(Register dst, u_int64_t imm64); 445 void mov_immediate32(Register dst, u_int32_t imm32); 446 447 int push(unsigned int bitset, Register stack); 448 int pop(unsigned int bitset, Register stack); 449 450 void mov(Register dst, Address a); 451 452 public: 453 void push(RegSet regs, Register stack) { if (regs.bits()) push(regs.bits(), stack); } 454 void pop(RegSet regs, Register stack) { if (regs.bits()) pop(regs.bits(), stack); } 455 456 // Push and pop everything that might be clobbered by a native 457 // runtime call except rscratch1 and rscratch2. (They are always 458 // scratch, so we don't have to protect them.) Only save the lower 459 // 64 bits of each vector register. 460 void push_call_clobbered_registers(); 461 void pop_call_clobbered_registers(); 462 463 // now mov instructions for loading absolute addresses and 32 or 464 // 64 bit integers 465 466 inline void mov(Register dst, address addr) 467 { 468 mov_immediate64(dst, (u_int64_t)addr); 469 } 470 471 inline void mov(Register dst, u_int64_t imm64) 472 { 473 mov_immediate64(dst, imm64); 474 } 475 476 inline void movw(Register dst, u_int32_t imm32) 477 { 478 mov_immediate32(dst, imm32); 479 } 480 481 inline void mov(Register dst, long l) 482 { 483 mov(dst, (u_int64_t)l); 484 } 485 486 inline void mov(Register dst, int i) 487 { 488 mov(dst, (long)i); 489 } 490 491 void mov(Register dst, RegisterOrConstant src) { 492 if (src.is_register()) 493 mov(dst, src.as_register()); 494 else 495 mov(dst, src.as_constant()); 496 } 497 498 void movptr(Register r, uintptr_t imm64); 499 500 void mov(FloatRegister Vd, SIMD_Arrangement T, u_int32_t imm32); 501 502 void mov(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn) { 503 orr(Vd, T, Vn, Vn); 504 } 505 506 public: 507 508 // Generalized Test Bit And Branch, including a "far" variety which 509 // spans more than 32KiB. 510 void tbr(Condition cond, Register Rt, int bitpos, Label &dest, bool far = false) { 511 assert(cond == EQ || cond == NE, "must be"); 512 513 if (far) 514 cond = ~cond; 515 516 void (Assembler::* branch)(Register Rt, int bitpos, Label &L); 517 if (cond == Assembler::EQ) 518 branch = &Assembler::tbz; 519 else 520 branch = &Assembler::tbnz; 521 522 if (far) { 523 Label L; 524 (this->*branch)(Rt, bitpos, L); 525 b(dest); 526 bind(L); 527 } else { 528 (this->*branch)(Rt, bitpos, dest); 529 } 530 } 531 532 // macro instructions for accessing and updating floating point 533 // status register 534 // 535 // FPSR : op1 == 011 536 // CRn == 0100 537 // CRm == 0100 538 // op2 == 001 539 540 inline void get_fpsr(Register reg) 541 { 542 mrs(0b11, 0b0100, 0b0100, 0b001, reg); 543 } 544 545 inline void set_fpsr(Register reg) 546 { 547 msr(0b011, 0b0100, 0b0100, 0b001, reg); 548 } 549 550 inline void clear_fpsr() 551 { 552 msr(0b011, 0b0100, 0b0100, 0b001, zr); 553 } 554 555 // DCZID_EL0: op1 == 011 556 // CRn == 0000 557 // CRm == 0000 558 // op2 == 111 559 inline void get_dczid_el0(Register reg) 560 { 561 mrs(0b011, 0b0000, 0b0000, 0b111, reg); 562 } 563 564 // CTR_EL0: op1 == 011 565 // CRn == 0000 566 // CRm == 0000 567 // op2 == 001 568 inline void get_ctr_el0(Register reg) 569 { 570 mrs(0b011, 0b0000, 0b0000, 0b001, reg); 571 } 572 573 // idiv variant which deals with MINLONG as dividend and -1 as divisor 574 int corrected_idivl(Register result, Register ra, Register rb, 575 bool want_remainder, Register tmp = rscratch1); 576 int corrected_idivq(Register result, Register ra, Register rb, 577 bool want_remainder, Register tmp = rscratch1); 578 579 // Support for NULL-checks 580 // 581 // Generates code that causes a NULL OS exception if the content of reg is NULL. 582 // If the accessed location is M[reg + offset] and the offset is known, provide the 583 // offset. No explicit code generation is needed if the offset is within a certain 584 // range (0 <= offset <= page_size). 585 586 virtual void null_check(Register reg, int offset = -1); 587 static bool needs_explicit_null_check(intptr_t offset); 588 static bool uses_implicit_null_check(void* address); 589 590 void test_klass_is_value(Register klass, Register temp_reg, Label& is_value); 591 592 void test_field_is_flattenable(Register flags, Register temp_reg, Label& is_flattenable); 593 void test_field_is_not_flattenable(Register flags, Register temp_reg, Label& notFlattenable); 594 void test_field_is_flattened(Register flags, Register temp_reg, Label& is_flattened); 595 596 // Check klass/oops is flat value type array (oop->_klass->_layout_helper & vt_bit) 597 void test_flattened_array_oop(Register klass, Register temp_reg, Label& is_flattened_array); 598 void test_null_free_array_oop(Register oop, Register temp_reg, Label& is_null_free_array); 599 600 static address target_addr_for_insn(address insn_addr, unsigned insn); 601 static address target_addr_for_insn(address insn_addr) { 602 unsigned insn = *(unsigned*)insn_addr; 603 return target_addr_for_insn(insn_addr, insn); 604 } 605 606 // Required platform-specific helpers for Label::patch_instructions. 607 // They _shadow_ the declarations in AbstractAssembler, which are undefined. 608 static int pd_patch_instruction_size(address branch, address target); 609 static void pd_patch_instruction(address branch, address target, const char* file = NULL, int line = 0) { 610 pd_patch_instruction_size(branch, target); 611 } 612 static address pd_call_destination(address branch) { 613 return target_addr_for_insn(branch); 614 } 615 #ifndef PRODUCT 616 static void pd_print_patched_instruction(address branch); 617 #endif 618 619 static int patch_oop(address insn_addr, address o); 620 static int patch_narrow_klass(address insn_addr, narrowKlass n); 621 622 address emit_trampoline_stub(int insts_call_instruction_offset, address target); 623 void emit_static_call_stub(); 624 625 // The following 4 methods return the offset of the appropriate move instruction 626 627 // Support for fast byte/short loading with zero extension (depending on particular CPU) 628 int load_unsigned_byte(Register dst, Address src); 629 int load_unsigned_short(Register dst, Address src); 630 631 // Support for fast byte/short loading with sign extension (depending on particular CPU) 632 int load_signed_byte(Register dst, Address src); 633 int load_signed_short(Register dst, Address src); 634 635 int load_signed_byte32(Register dst, Address src); 636 int load_signed_short32(Register dst, Address src); 637 638 // Support for sign-extension (hi:lo = extend_sign(lo)) 639 void extend_sign(Register hi, Register lo); 640 641 // Load and store values by size and signed-ness 642 void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2 = noreg); 643 void store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2 = noreg); 644 645 // Support for inc/dec with optimal instruction selection depending on value 646 647 // x86_64 aliases an unqualified register/address increment and 648 // decrement to call incrementq and decrementq but also supports 649 // explicitly sized calls to incrementq/decrementq or 650 // incrementl/decrementl 651 652 // for aarch64 the proper convention would be to use 653 // increment/decrement for 64 bit operatons and 654 // incrementw/decrementw for 32 bit operations. so when porting 655 // x86_64 code we can leave calls to increment/decrement as is, 656 // replace incrementq/decrementq with increment/decrement and 657 // replace incrementl/decrementl with incrementw/decrementw. 658 659 // n.b. increment/decrement calls with an Address destination will 660 // need to use a scratch register to load the value to be 661 // incremented. increment/decrement calls which add or subtract a 662 // constant value greater than 2^12 will need to use a 2nd scratch 663 // register to hold the constant. so, a register increment/decrement 664 // may trash rscratch2 and an address increment/decrement trash 665 // rscratch and rscratch2 666 667 void decrementw(Address dst, int value = 1); 668 void decrementw(Register reg, int value = 1); 669 670 void decrement(Register reg, int value = 1); 671 void decrement(Address dst, int value = 1); 672 673 void incrementw(Address dst, int value = 1); 674 void incrementw(Register reg, int value = 1); 675 676 void increment(Register reg, int value = 1); 677 void increment(Address dst, int value = 1); 678 679 680 // Alignment 681 void align(int modulus); 682 683 // Stack frame creation/removal 684 void enter() 685 { 686 stp(rfp, lr, Address(pre(sp, -2 * wordSize))); 687 mov(rfp, sp); 688 } 689 void leave() 690 { 691 mov(sp, rfp); 692 ldp(rfp, lr, Address(post(sp, 2 * wordSize))); 693 } 694 695 // Support for getting the JavaThread pointer (i.e.; a reference to thread-local information) 696 // The pointer will be loaded into the thread register. 697 void get_thread(Register thread); 698 699 700 // Support for VM calls 701 // 702 // It is imperative that all calls into the VM are handled via the call_VM macros. 703 // They make sure that the stack linkage is setup correctly. call_VM's correspond 704 // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points. 705 706 707 void call_VM(Register oop_result, 708 address entry_point, 709 bool check_exceptions = true); 710 void call_VM(Register oop_result, 711 address entry_point, 712 Register arg_1, 713 bool check_exceptions = true); 714 void call_VM(Register oop_result, 715 address entry_point, 716 Register arg_1, Register arg_2, 717 bool check_exceptions = true); 718 void call_VM(Register oop_result, 719 address entry_point, 720 Register arg_1, Register arg_2, Register arg_3, 721 bool check_exceptions = true); 722 723 // Overloadings with last_Java_sp 724 void call_VM(Register oop_result, 725 Register last_java_sp, 726 address entry_point, 727 int number_of_arguments = 0, 728 bool check_exceptions = true); 729 void call_VM(Register oop_result, 730 Register last_java_sp, 731 address entry_point, 732 Register arg_1, bool 733 check_exceptions = true); 734 void call_VM(Register oop_result, 735 Register last_java_sp, 736 address entry_point, 737 Register arg_1, Register arg_2, 738 bool check_exceptions = true); 739 void call_VM(Register oop_result, 740 Register last_java_sp, 741 address entry_point, 742 Register arg_1, Register arg_2, Register arg_3, 743 bool check_exceptions = true); 744 745 void get_vm_result (Register oop_result, Register thread); 746 void get_vm_result_2(Register metadata_result, Register thread); 747 748 // These always tightly bind to MacroAssembler::call_VM_base 749 // bypassing the virtual implementation 750 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true); 751 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true); 752 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true); 753 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true); 754 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4, bool check_exceptions = true); 755 756 void call_VM_leaf(address entry_point, 757 int number_of_arguments = 0); 758 void call_VM_leaf(address entry_point, 759 Register arg_1); 760 void call_VM_leaf(address entry_point, 761 Register arg_1, Register arg_2); 762 void call_VM_leaf(address entry_point, 763 Register arg_1, Register arg_2, Register arg_3); 764 765 // These always tightly bind to MacroAssembler::call_VM_leaf_base 766 // bypassing the virtual implementation 767 void super_call_VM_leaf(address entry_point); 768 void super_call_VM_leaf(address entry_point, Register arg_1); 769 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2); 770 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3); 771 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4); 772 773 // last Java Frame (fills frame anchor) 774 void set_last_Java_frame(Register last_java_sp, 775 Register last_java_fp, 776 address last_java_pc, 777 Register scratch); 778 779 void set_last_Java_frame(Register last_java_sp, 780 Register last_java_fp, 781 Label &last_java_pc, 782 Register scratch); 783 784 void set_last_Java_frame(Register last_java_sp, 785 Register last_java_fp, 786 Register last_java_pc, 787 Register scratch); 788 789 void reset_last_Java_frame(Register thread); 790 791 // thread in the default location (rthread) 792 void reset_last_Java_frame(bool clear_fp); 793 794 // Stores 795 void store_check(Register obj); // store check for obj - register is destroyed afterwards 796 void store_check(Register obj, Address dst); // same as above, dst is exact store location (reg. is destroyed) 797 798 void resolve_jobject(Register value, Register thread, Register tmp); 799 800 // C 'boolean' to Java boolean: x == 0 ? 0 : 1 801 void c2bool(Register x); 802 803 // oop manipulations 804 void load_metadata(Register dst, Register src); 805 void load_storage_props(Register dst, Register src); 806 807 void load_klass(Register dst, Register src); 808 void store_klass(Register dst, Register src); 809 void cmp_klass(Register oop, Register trial_klass, Register tmp); 810 811 void resolve_oop_handle(Register result, Register tmp = r5); 812 void load_mirror(Register dst, Register method, Register tmp = r5); 813 814 void access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src, 815 Register tmp1, Register tmp_thread); 816 817 void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register src, 818 Register tmp1, Register tmp_thread, Register tmp3 = noreg); 819 820 // Resolves obj for access. Result is placed in the same register. 821 // All other registers are preserved. 822 void resolve(DecoratorSet decorators, Register obj); 823 824 void load_heap_oop(Register dst, Address src, Register tmp1 = noreg, 825 Register thread_tmp = noreg, DecoratorSet decorators = 0); 826 827 void load_heap_oop_not_null(Register dst, Address src, Register tmp1 = noreg, 828 Register thread_tmp = noreg, DecoratorSet decorators = 0); 829 void store_heap_oop(Address dst, Register src, Register tmp1 = noreg, 830 Register tmp_thread = noreg, Register tmp3 = noreg, DecoratorSet decorators = 0); 831 832 // currently unimplemented 833 // Used for storing NULL. All other oop constants should be 834 // stored using routines that take a jobject. 835 void store_heap_oop_null(Address dst); 836 837 void load_prototype_header(Register dst, Register src); 838 839 void store_klass_gap(Register dst, Register src); 840 841 // This dummy is to prevent a call to store_heap_oop from 842 // converting a zero (like NULL) into a Register by giving 843 // the compiler two choices it can't resolve 844 845 void store_heap_oop(Address dst, void* dummy); 846 847 void encode_heap_oop(Register d, Register s); 848 void encode_heap_oop(Register r) { encode_heap_oop(r, r); } 849 void decode_heap_oop(Register d, Register s); 850 void decode_heap_oop(Register r) { decode_heap_oop(r, r); } 851 void encode_heap_oop_not_null(Register r); 852 void decode_heap_oop_not_null(Register r); 853 void encode_heap_oop_not_null(Register dst, Register src); 854 void decode_heap_oop_not_null(Register dst, Register src); 855 856 void set_narrow_oop(Register dst, jobject obj); 857 858 void encode_klass_not_null(Register r); 859 void decode_klass_not_null(Register r); 860 void encode_klass_not_null(Register dst, Register src); 861 void decode_klass_not_null(Register dst, Register src); 862 863 void set_narrow_klass(Register dst, Klass* k); 864 865 // if heap base register is used - reinit it with the correct value 866 void reinit_heapbase(); 867 868 DEBUG_ONLY(void verify_heapbase(const char* msg);) 869 870 void push_CPU_state(bool save_vectors = false); 871 void pop_CPU_state(bool restore_vectors = false) ; 872 873 // Round up to a power of two 874 void round_to(Register reg, int modulus); 875 876 // allocation 877 void eden_allocate( 878 Register obj, // result: pointer to object after successful allocation 879 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise 880 int con_size_in_bytes, // object size in bytes if known at compile time 881 Register t1, // temp register 882 Label& slow_case // continuation point if fast allocation fails 883 ); 884 void tlab_allocate( 885 Register obj, // result: pointer to object after successful allocation 886 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise 887 int con_size_in_bytes, // object size in bytes if known at compile time 888 Register t1, // temp register 889 Register t2, // temp register 890 Label& slow_case // continuation point if fast allocation fails 891 ); 892 void zero_memory(Register addr, Register len, Register t1); 893 void verify_tlab(); 894 895 // interface method calling 896 void lookup_interface_method(Register recv_klass, 897 Register intf_klass, 898 RegisterOrConstant itable_index, 899 Register method_result, 900 Register scan_temp, 901 Label& no_such_interface, 902 bool return_method = true); 903 904 // virtual method calling 905 // n.b. x86 allows RegisterOrConstant for vtable_index 906 void lookup_virtual_method(Register recv_klass, 907 RegisterOrConstant vtable_index, 908 Register method_result); 909 910 // Test sub_klass against super_klass, with fast and slow paths. 911 912 // The fast path produces a tri-state answer: yes / no / maybe-slow. 913 // One of the three labels can be NULL, meaning take the fall-through. 914 // If super_check_offset is -1, the value is loaded up from super_klass. 915 // No registers are killed, except temp_reg. 916 void check_klass_subtype_fast_path(Register sub_klass, 917 Register super_klass, 918 Register temp_reg, 919 Label* L_success, 920 Label* L_failure, 921 Label* L_slow_path, 922 RegisterOrConstant super_check_offset = RegisterOrConstant(-1)); 923 924 // The rest of the type check; must be wired to a corresponding fast path. 925 // It does not repeat the fast path logic, so don't use it standalone. 926 // The temp_reg and temp2_reg can be noreg, if no temps are available. 927 // Updates the sub's secondary super cache as necessary. 928 // If set_cond_codes, condition codes will be Z on success, NZ on failure. 929 void check_klass_subtype_slow_path(Register sub_klass, 930 Register super_klass, 931 Register temp_reg, 932 Register temp2_reg, 933 Label* L_success, 934 Label* L_failure, 935 bool set_cond_codes = false); 936 937 // Simplified, combined version, good for typical uses. 938 // Falls through on failure. 939 void check_klass_subtype(Register sub_klass, 940 Register super_klass, 941 Register temp_reg, 942 Label& L_success); 943 944 Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0); 945 946 947 // Debugging 948 949 // only if +VerifyOops 950 void verify_oop(Register reg, const char* s = "broken oop"); 951 void verify_oop_addr(Address addr, const char * s = "broken oop addr"); 952 953 // TODO: verify method and klass metadata (compare against vptr?) 954 void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {} 955 void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line){} 956 957 #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__) 958 #define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__) 959 960 // only if +VerifyFPU 961 void verify_FPU(int stack_depth, const char* s = "illegal FPU state"); 962 963 // prints msg, dumps registers and stops execution 964 void stop(const char* msg); 965 966 // prints msg and continues 967 void warn(const char* msg); 968 969 static void debug64(char* msg, int64_t pc, int64_t regs[]); 970 971 void untested() { stop("untested"); } 972 973 void unimplemented(const char* what = ""); 974 975 void should_not_reach_here() { stop("should not reach here"); } 976 977 // Stack overflow checking 978 void bang_stack_with_offset(int offset) { 979 // stack grows down, caller passes positive offset 980 assert(offset > 0, "must bang with negative offset"); 981 sub(rscratch2, sp, offset); 982 str(zr, Address(rscratch2)); 983 } 984 985 // Writes to stack successive pages until offset reached to check for 986 // stack overflow + shadow pages. Also, clobbers tmp 987 void bang_stack_size(Register size, Register tmp); 988 989 // Check for reserved stack access in method being exited (for JIT) 990 void reserved_stack_check(); 991 992 virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr, 993 Register tmp, 994 int offset); 995 996 // Arithmetics 997 998 void addptr(const Address &dst, int32_t src); 999 void cmpptr(Register src1, Address src2); 1000 1001 void cmpoop(Register obj1, Register obj2); 1002 1003 // Various forms of CAS 1004 1005 void cmpxchg_obj_header(Register oldv, Register newv, Register obj, Register tmp, 1006 Label &suceed, Label *fail); 1007 void cmpxchgptr(Register oldv, Register newv, Register addr, Register tmp, 1008 Label &suceed, Label *fail); 1009 1010 void cmpxchgw(Register oldv, Register newv, Register addr, Register tmp, 1011 Label &suceed, Label *fail); 1012 1013 void atomic_add(Register prev, RegisterOrConstant incr, Register addr); 1014 void atomic_addw(Register prev, RegisterOrConstant incr, Register addr); 1015 void atomic_addal(Register prev, RegisterOrConstant incr, Register addr); 1016 void atomic_addalw(Register prev, RegisterOrConstant incr, Register addr); 1017 1018 void atomic_xchg(Register prev, Register newv, Register addr); 1019 void atomic_xchgw(Register prev, Register newv, Register addr); 1020 void atomic_xchgal(Register prev, Register newv, Register addr); 1021 void atomic_xchgalw(Register prev, Register newv, Register addr); 1022 1023 void orptr(Address adr, RegisterOrConstant src) { 1024 ldr(rscratch1, adr); 1025 if (src.is_register()) 1026 orr(rscratch1, rscratch1, src.as_register()); 1027 else 1028 orr(rscratch1, rscratch1, src.as_constant()); 1029 str(rscratch1, adr); 1030 } 1031 1032 // A generic CAS; success or failure is in the EQ flag. 1033 // Clobbers rscratch1 1034 void cmpxchg(Register addr, Register expected, Register new_val, 1035 enum operand_size size, 1036 bool acquire, bool release, bool weak, 1037 Register result); 1038 private: 1039 void compare_eq(Register rn, Register rm, enum operand_size size); 1040 1041 public: 1042 // Calls 1043 1044 address trampoline_call(Address entry, CodeBuffer *cbuf = NULL); 1045 1046 static bool far_branches() { 1047 return ReservedCodeCacheSize > branch_range || UseAOT; 1048 } 1049 1050 // Jumps that can reach anywhere in the code cache. 1051 // Trashes tmp. 1052 void far_call(Address entry, CodeBuffer *cbuf = NULL, Register tmp = rscratch1); 1053 void far_jump(Address entry, CodeBuffer *cbuf = NULL, Register tmp = rscratch1); 1054 1055 static int far_branch_size() { 1056 if (far_branches()) { 1057 return 3 * 4; // adrp, add, br 1058 } else { 1059 return 4; 1060 } 1061 } 1062 1063 // Emit the CompiledIC call idiom 1064 address ic_call(address entry, jint method_index = 0); 1065 1066 public: 1067 1068 // Data 1069 1070 void mov_metadata(Register dst, Metadata* obj); 1071 Address allocate_metadata_address(Metadata* obj); 1072 Address constant_oop_address(jobject obj); 1073 1074 void movoop(Register dst, jobject obj, bool immediate = false); 1075 1076 // CRC32 code for java.util.zip.CRC32::updateBytes() instrinsic. 1077 void kernel_crc32(Register crc, Register buf, Register len, 1078 Register table0, Register table1, Register table2, Register table3, 1079 Register tmp, Register tmp2, Register tmp3); 1080 // CRC32 code for java.util.zip.CRC32C::updateBytes() instrinsic. 1081 void kernel_crc32c(Register crc, Register buf, Register len, 1082 Register table0, Register table1, Register table2, Register table3, 1083 Register tmp, Register tmp2, Register tmp3); 1084 1085 // Stack push and pop individual 64 bit registers 1086 void push(Register src); 1087 void pop(Register dst); 1088 1089 // push all registers onto the stack 1090 void pusha(); 1091 void popa(); 1092 1093 void repne_scan(Register addr, Register value, Register count, 1094 Register scratch); 1095 void repne_scanw(Register addr, Register value, Register count, 1096 Register scratch); 1097 1098 typedef void (MacroAssembler::* add_sub_imm_insn)(Register Rd, Register Rn, unsigned imm); 1099 typedef void (MacroAssembler::* add_sub_reg_insn)(Register Rd, Register Rn, Register Rm, enum shift_kind kind, unsigned shift); 1100 1101 // If a constant does not fit in an immediate field, generate some 1102 // number of MOV instructions and then perform the operation 1103 void wrap_add_sub_imm_insn(Register Rd, Register Rn, unsigned imm, 1104 add_sub_imm_insn insn1, 1105 add_sub_reg_insn insn2); 1106 // Seperate vsn which sets the flags 1107 void wrap_adds_subs_imm_insn(Register Rd, Register Rn, unsigned imm, 1108 add_sub_imm_insn insn1, 1109 add_sub_reg_insn insn2); 1110 1111 #define WRAP(INSN) \ 1112 void INSN(Register Rd, Register Rn, unsigned imm) { \ 1113 wrap_add_sub_imm_insn(Rd, Rn, imm, &Assembler::INSN, &Assembler::INSN); \ 1114 } \ 1115 \ 1116 void INSN(Register Rd, Register Rn, Register Rm, \ 1117 enum shift_kind kind, unsigned shift = 0) { \ 1118 Assembler::INSN(Rd, Rn, Rm, kind, shift); \ 1119 } \ 1120 \ 1121 void INSN(Register Rd, Register Rn, Register Rm) { \ 1122 Assembler::INSN(Rd, Rn, Rm); \ 1123 } \ 1124 \ 1125 void INSN(Register Rd, Register Rn, Register Rm, \ 1126 ext::operation option, int amount = 0) { \ 1127 Assembler::INSN(Rd, Rn, Rm, option, amount); \ 1128 } 1129 1130 WRAP(add) WRAP(addw) WRAP(sub) WRAP(subw) 1131 1132 #undef WRAP 1133 #define WRAP(INSN) \ 1134 void INSN(Register Rd, Register Rn, unsigned imm) { \ 1135 wrap_adds_subs_imm_insn(Rd, Rn, imm, &Assembler::INSN, &Assembler::INSN); \ 1136 } \ 1137 \ 1138 void INSN(Register Rd, Register Rn, Register Rm, \ 1139 enum shift_kind kind, unsigned shift = 0) { \ 1140 Assembler::INSN(Rd, Rn, Rm, kind, shift); \ 1141 } \ 1142 \ 1143 void INSN(Register Rd, Register Rn, Register Rm) { \ 1144 Assembler::INSN(Rd, Rn, Rm); \ 1145 } \ 1146 \ 1147 void INSN(Register Rd, Register Rn, Register Rm, \ 1148 ext::operation option, int amount = 0) { \ 1149 Assembler::INSN(Rd, Rn, Rm, option, amount); \ 1150 } 1151 1152 WRAP(adds) WRAP(addsw) WRAP(subs) WRAP(subsw) 1153 1154 void add(Register Rd, Register Rn, RegisterOrConstant increment); 1155 void addw(Register Rd, Register Rn, RegisterOrConstant increment); 1156 void sub(Register Rd, Register Rn, RegisterOrConstant decrement); 1157 void subw(Register Rd, Register Rn, RegisterOrConstant decrement); 1158 1159 void adrp(Register reg1, const Address &dest, unsigned long &byte_offset); 1160 1161 1162 enum RegState { 1163 reg_readonly, 1164 reg_writable, 1165 reg_written 1166 }; 1167 1168 void verified_entry(Compile* C, int sp_inc); 1169 1170 // Unpack all value type arguments passed as oops 1171 void unpack_value_args(Compile* C, bool receiver_only); 1172 void store_value_type_fields_to_buf(ciValueKlass* vk); 1173 1174 void tableswitch(Register index, jint lowbound, jint highbound, 1175 Label &jumptable, Label &jumptable_end, int stride = 1) { 1176 adr(rscratch1, jumptable); 1177 subsw(rscratch2, index, lowbound); 1178 subsw(zr, rscratch2, highbound - lowbound); 1179 br(Assembler::HS, jumptable_end); 1180 add(rscratch1, rscratch1, rscratch2, 1181 ext::sxtw, exact_log2(stride * Assembler::instruction_size)); 1182 br(rscratch1); 1183 } 1184 1185 // Form an address from base + offset in Rd. Rd may or may not 1186 // actually be used: you must use the Address that is returned. It 1187 // is up to you to ensure that the shift provided matches the size 1188 // of your data. 1189 Address form_address(Register Rd, Register base, long byte_offset, int shift); 1190 1191 // Return true iff an address is within the 48-bit AArch64 address 1192 // space. 1193 bool is_valid_AArch64_address(address a) { 1194 return ((uint64_t)a >> 48) == 0; 1195 } 1196 1197 // Load the base of the cardtable byte map into reg. 1198 void load_byte_map_base(Register reg); 1199 1200 // Prolog generator routines to support switch between x86 code and 1201 // generated ARM code 1202 1203 // routine to generate an x86 prolog for a stub function which 1204 // bootstraps into the generated ARM code which directly follows the 1205 // stub 1206 // 1207 1208 public: 1209 // enum used for aarch64--x86 linkage to define return type of x86 function 1210 enum ret_type { ret_type_void, ret_type_integral, ret_type_float, ret_type_double}; 1211 1212 #ifdef BUILTIN_SIM 1213 void c_stub_prolog(int gp_arg_count, int fp_arg_count, int ret_type, address *prolog_ptr = NULL); 1214 #else 1215 void c_stub_prolog(int gp_arg_count, int fp_arg_count, int ret_type) { } 1216 #endif 1217 1218 // special version of call_VM_leaf_base needed for aarch64 simulator 1219 // where we need to specify both the gp and fp arg counts and the 1220 // return type so that the linkage routine from aarch64 to x86 and 1221 // back knows which aarch64 registers to copy to x86 registers and 1222 // which x86 result register to copy back to an aarch64 register 1223 1224 void call_VM_leaf_base1( 1225 address entry_point, // the entry point 1226 int number_of_gp_arguments, // the number of gp reg arguments to pass 1227 int number_of_fp_arguments, // the number of fp reg arguments to pass 1228 ret_type type, // the return type for the call 1229 Label* retaddr = NULL 1230 ); 1231 1232 void ldr_constant(Register dest, const Address &const_addr) { 1233 if (NearCpool) { 1234 ldr(dest, const_addr); 1235 } else { 1236 unsigned long offset; 1237 adrp(dest, InternalAddress(const_addr.target()), offset); 1238 ldr(dest, Address(dest, offset)); 1239 } 1240 } 1241 1242 address read_polling_page(Register r, address page, relocInfo::relocType rtype); 1243 address read_polling_page(Register r, relocInfo::relocType rtype); 1244 void get_polling_page(Register dest, address page, relocInfo::relocType rtype); 1245 1246 // CRC32 code for java.util.zip.CRC32::updateBytes() instrinsic. 1247 void update_byte_crc32(Register crc, Register val, Register table); 1248 void update_word_crc32(Register crc, Register v, Register tmp, 1249 Register table0, Register table1, Register table2, Register table3, 1250 bool upper = false); 1251 1252 void string_compare(Register str1, Register str2, 1253 Register cnt1, Register cnt2, Register result, 1254 Register tmp1, Register tmp2, FloatRegister vtmp1, 1255 FloatRegister vtmp2, FloatRegister vtmp3, int ae); 1256 1257 void has_negatives(Register ary1, Register len, Register result); 1258 1259 void arrays_equals(Register a1, Register a2, Register result, Register cnt1, 1260 Register tmp1, Register tmp2, Register tmp3, int elem_size); 1261 1262 void string_equals(Register a1, Register a2, Register result, Register cnt1, 1263 int elem_size); 1264 1265 void fill_words(Register base, Register cnt, Register value); 1266 void fill_words(Register base, u_int64_t cnt, Register value); 1267 1268 void zero_words(Register base, u_int64_t cnt); 1269 void zero_words(Register ptr, Register cnt); 1270 void zero_dcache_blocks(Register base, Register cnt); 1271 1272 static const int zero_words_block_size; 1273 1274 void byte_array_inflate(Register src, Register dst, Register len, 1275 FloatRegister vtmp1, FloatRegister vtmp2, 1276 FloatRegister vtmp3, Register tmp4); 1277 1278 void char_array_compress(Register src, Register dst, Register len, 1279 FloatRegister tmp1Reg, FloatRegister tmp2Reg, 1280 FloatRegister tmp3Reg, FloatRegister tmp4Reg, 1281 Register result); 1282 1283 void encode_iso_array(Register src, Register dst, 1284 Register len, Register result, 1285 FloatRegister Vtmp1, FloatRegister Vtmp2, 1286 FloatRegister Vtmp3, FloatRegister Vtmp4); 1287 void string_indexof(Register str1, Register str2, 1288 Register cnt1, Register cnt2, 1289 Register tmp1, Register tmp2, 1290 Register tmp3, Register tmp4, 1291 Register tmp5, Register tmp6, 1292 int int_cnt1, Register result, int ae); 1293 void string_indexof_char(Register str1, Register cnt1, 1294 Register ch, Register result, 1295 Register tmp1, Register tmp2, Register tmp3); 1296 void fast_log(FloatRegister vtmp0, FloatRegister vtmp1, FloatRegister vtmp2, 1297 FloatRegister vtmp3, FloatRegister vtmp4, FloatRegister vtmp5, 1298 FloatRegister tmpC1, FloatRegister tmpC2, FloatRegister tmpC3, 1299 FloatRegister tmpC4, Register tmp1, Register tmp2, 1300 Register tmp3, Register tmp4, Register tmp5); 1301 void generate_dsin_dcos(bool isCos, address npio2_hw, address two_over_pi, 1302 address pio2, address dsin_coef, address dcos_coef); 1303 private: 1304 // begin trigonometric functions support block 1305 void generate__ieee754_rem_pio2(address npio2_hw, address two_over_pi, address pio2); 1306 void generate__kernel_rem_pio2(address two_over_pi, address pio2); 1307 void generate_kernel_sin(FloatRegister x, bool iyIsOne, address dsin_coef); 1308 void generate_kernel_cos(FloatRegister x, address dcos_coef); 1309 // end trigonometric functions support block 1310 void add2_with_carry(Register final_dest_hi, Register dest_hi, Register dest_lo, 1311 Register src1, Register src2); 1312 void add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2) { 1313 add2_with_carry(dest_hi, dest_hi, dest_lo, src1, src2); 1314 } 1315 void multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart, 1316 Register y, Register y_idx, Register z, 1317 Register carry, Register product, 1318 Register idx, Register kdx); 1319 void multiply_128_x_128_loop(Register y, Register z, 1320 Register carry, Register carry2, 1321 Register idx, Register jdx, 1322 Register yz_idx1, Register yz_idx2, 1323 Register tmp, Register tmp3, Register tmp4, 1324 Register tmp7, Register product_hi); 1325 void kernel_crc32_using_crc32(Register crc, Register buf, 1326 Register len, Register tmp0, Register tmp1, Register tmp2, 1327 Register tmp3); 1328 void kernel_crc32c_using_crc32c(Register crc, Register buf, 1329 Register len, Register tmp0, Register tmp1, Register tmp2, 1330 Register tmp3); 1331 public: 1332 void multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, 1333 Register zlen, Register tmp1, Register tmp2, Register tmp3, 1334 Register tmp4, Register tmp5, Register tmp6, Register tmp7); 1335 void mul_add(Register out, Register in, Register offs, Register len, Register k); 1336 // ISB may be needed because of a safepoint 1337 void maybe_isb() { isb(); } 1338 1339 private: 1340 // Return the effective address r + (r1 << ext) + offset. 1341 // Uses rscratch2. 1342 Address offsetted_address(Register r, Register r1, Address::extend ext, 1343 int offset, int size); 1344 1345 private: 1346 // Returns an address on the stack which is reachable with a ldr/str of size 1347 // Uses rscratch2 if the address is not directly reachable 1348 Address spill_address(int size, int offset, Register tmp=rscratch2); 1349 1350 bool merge_alignment_check(Register base, size_t size, long cur_offset, long prev_offset) const; 1351 1352 // Check whether two loads/stores can be merged into ldp/stp. 1353 bool ldst_can_merge(Register rx, const Address &adr, size_t cur_size_in_bytes, bool is_store) const; 1354 1355 // Merge current load/store with previous load/store into ldp/stp. 1356 void merge_ldst(Register rx, const Address &adr, size_t cur_size_in_bytes, bool is_store); 1357 1358 // Try to merge two loads/stores into ldp/stp. If success, returns true else false. 1359 bool try_merge_ldst(Register rt, const Address &adr, size_t cur_size_in_bytes, bool is_store); 1360 1361 public: 1362 void spill(Register Rx, bool is64, int offset) { 1363 if (is64) { 1364 str(Rx, spill_address(8, offset)); 1365 } else { 1366 strw(Rx, spill_address(4, offset)); 1367 } 1368 } 1369 void spill(FloatRegister Vx, SIMD_RegVariant T, int offset) { 1370 str(Vx, T, spill_address(1 << (int)T, offset)); 1371 } 1372 void unspill(Register Rx, bool is64, int offset) { 1373 if (is64) { 1374 ldr(Rx, spill_address(8, offset)); 1375 } else { 1376 ldrw(Rx, spill_address(4, offset)); 1377 } 1378 } 1379 void unspill(FloatRegister Vx, SIMD_RegVariant T, int offset) { 1380 ldr(Vx, T, spill_address(1 << (int)T, offset)); 1381 } 1382 void spill_copy128(int src_offset, int dst_offset, 1383 Register tmp1=rscratch1, Register tmp2=rscratch2) { 1384 if (src_offset < 512 && (src_offset & 7) == 0 && 1385 dst_offset < 512 && (dst_offset & 7) == 0) { 1386 ldp(tmp1, tmp2, Address(sp, src_offset)); 1387 stp(tmp1, tmp2, Address(sp, dst_offset)); 1388 } else { 1389 unspill(tmp1, true, src_offset); 1390 spill(tmp1, true, dst_offset); 1391 unspill(tmp1, true, src_offset+8); 1392 spill(tmp1, true, dst_offset+8); 1393 } 1394 } 1395 }; 1396 1397 #ifdef ASSERT 1398 inline bool AbstractAssembler::pd_check_instruction_mark() { return false; } 1399 #endif 1400 1401 /** 1402 * class SkipIfEqual: 1403 * 1404 * Instantiating this class will result in assembly code being output that will 1405 * jump around any code emitted between the creation of the instance and it's 1406 * automatic destruction at the end of a scope block, depending on the value of 1407 * the flag passed to the constructor, which will be checked at run-time. 1408 */ 1409 class SkipIfEqual { 1410 private: 1411 MacroAssembler* _masm; 1412 Label _label; 1413 1414 public: 1415 SkipIfEqual(MacroAssembler*, const bool* flag_addr, bool value); 1416 ~SkipIfEqual(); 1417 }; 1418 1419 struct tableswitch { 1420 Register _reg; 1421 int _insn_index; jint _first_key; jint _last_key; 1422 Label _after; 1423 Label _branches; 1424 }; 1425 1426 #endif // CPU_AARCH64_MACROASSEMBLER_AARCH64_HPP