1 /* 2 * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef CPU_ARM_VM_MACROASSEMBLER_ARM_HPP 26 #define CPU_ARM_VM_MACROASSEMBLER_ARM_HPP 27 28 #include "code/relocInfo.hpp" 29 #include "code/relocInfo_ext.hpp" 30 31 class BiasedLockingCounters; 32 33 // Introduced AddressLiteral and its subclasses to ease portability from 34 // x86 and avoid relocation issues 35 class AddressLiteral VALUE_OBJ_CLASS_SPEC { 36 RelocationHolder _rspec; 37 // Typically we use AddressLiterals we want to use their rval 38 // However in some situations we want the lval (effect address) of the item. 39 // We provide a special factory for making those lvals. 40 bool _is_lval; 41 42 address _target; 43 44 private: 45 static relocInfo::relocType reloc_for_target(address target) { 46 // Used for ExternalAddress or when the type is not specified 47 // Sometimes ExternalAddress is used for values which aren't 48 // exactly addresses, like the card table base. 49 // external_word_type can't be used for values in the first page 50 // so just skip the reloc in that case. 51 return external_word_Relocation::can_be_relocated(target) ? relocInfo::external_word_type : relocInfo::none; 52 } 53 54 void set_rspec(relocInfo::relocType rtype); 55 56 protected: 57 // creation 58 AddressLiteral() 59 : _is_lval(false), 60 _target(NULL) 61 {} 62 63 public: 64 65 AddressLiteral(address target, relocInfo::relocType rtype) { 66 _is_lval = false; 67 _target = target; 68 set_rspec(rtype); 69 } 70 71 AddressLiteral(address target, RelocationHolder const& rspec) 72 : _rspec(rspec), 73 _is_lval(false), 74 _target(target) 75 {} 76 77 AddressLiteral(address target) { 78 _is_lval = false; 79 _target = target; 80 set_rspec(reloc_for_target(target)); 81 } 82 83 AddressLiteral addr() { 84 AddressLiteral ret = *this; 85 ret._is_lval = true; 86 return ret; 87 } 88 89 private: 90 91 address target() { return _target; } 92 bool is_lval() { return _is_lval; } 93 94 relocInfo::relocType reloc() const { return _rspec.type(); } 95 const RelocationHolder& rspec() const { return _rspec; } 96 97 friend class Assembler; 98 friend class MacroAssembler; 99 friend class Address; 100 friend class LIR_Assembler; 101 friend class InlinedAddress; 102 }; 103 104 class ExternalAddress: public AddressLiteral { 105 106 public: 107 108 ExternalAddress(address target) : AddressLiteral(target) {} 109 110 }; 111 112 class InternalAddress: public AddressLiteral { 113 114 public: 115 116 InternalAddress(address target) : AddressLiteral(target, relocInfo::internal_word_type) {} 117 118 }; 119 120 // Inlined constants, for use with ldr_literal / bind_literal 121 // Note: InlinedInteger not supported (use move_slow(Register,int[,cond])) 122 class InlinedLiteral: StackObj { 123 public: 124 Label label; // need to be public for direct access with & 125 InlinedLiteral() { 126 } 127 }; 128 129 class InlinedMetadata: public InlinedLiteral { 130 private: 131 Metadata *_data; 132 133 public: 134 InlinedMetadata(Metadata *data): InlinedLiteral() { 135 _data = data; 136 } 137 Metadata *data() { return _data; } 138 }; 139 140 // Currently unused 141 // class InlinedOop: public InlinedLiteral { 142 // private: 143 // jobject _jobject; 144 // 145 // public: 146 // InlinedOop(jobject target): InlinedLiteral() { 147 // _jobject = target; 148 // } 149 // jobject jobject() { return _jobject; } 150 // }; 151 152 class InlinedAddress: public InlinedLiteral { 153 private: 154 AddressLiteral _literal; 155 156 public: 157 158 InlinedAddress(jobject object): InlinedLiteral(), _literal((address)object, relocInfo::oop_type) { 159 ShouldNotReachHere(); // use mov_oop (or implement InlinedOop) 160 } 161 162 InlinedAddress(Metadata *data): InlinedLiteral(), _literal((address)data, relocInfo::metadata_type) { 163 ShouldNotReachHere(); // use InlinedMetadata or mov_metadata 164 } 165 166 InlinedAddress(address target, const RelocationHolder &rspec): InlinedLiteral(), _literal(target, rspec) { 167 assert(rspec.type() != relocInfo::oop_type, "Do not use InlinedAddress for oops"); 168 assert(rspec.type() != relocInfo::metadata_type, "Do not use InlinedAddress for metadatas"); 169 } 170 171 InlinedAddress(address target, relocInfo::relocType rtype): InlinedLiteral(), _literal(target, rtype) { 172 assert(rtype != relocInfo::oop_type, "Do not use InlinedAddress for oops"); 173 assert(rtype != relocInfo::metadata_type, "Do not use InlinedAddress for metadatas"); 174 } 175 176 // Note: default is relocInfo::none for InlinedAddress 177 InlinedAddress(address target): InlinedLiteral(), _literal(target, relocInfo::none) { 178 } 179 180 address target() { return _literal.target(); } 181 182 const RelocationHolder& rspec() const { return _literal.rspec(); } 183 }; 184 185 class InlinedString: public InlinedLiteral { 186 private: 187 const char* _msg; 188 189 public: 190 InlinedString(const char* msg): InlinedLiteral() { 191 _msg = msg; 192 } 193 const char* msg() { return _msg; } 194 }; 195 196 class MacroAssembler: public Assembler { 197 protected: 198 199 // Support for VM calls 200 // 201 202 // This is the base routine called by the different versions of call_VM_leaf. 203 void call_VM_leaf_helper(address entry_point, int number_of_arguments); 204 205 // This is the base routine called by the different versions of call_VM. The interpreter 206 // may customize this version by overriding it for its purposes (e.g., to save/restore 207 // additional registers when doing a VM call). 208 virtual void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions); 209 public: 210 211 MacroAssembler(CodeBuffer* code) : Assembler(code) {} 212 213 // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code. 214 // The implementation is only non-empty for the InterpreterMacroAssembler, 215 // as only the interpreter handles PopFrame and ForceEarlyReturn requests. 216 virtual void check_and_handle_popframe() {} 217 virtual void check_and_handle_earlyret() {} 218 219 // By default, we do not need relocation information for non 220 // patchable absolute addresses. However, when needed by some 221 // extensions, ignore_non_patchable_relocations can be modified, 222 // returning false to preserve all relocation information. 223 inline bool ignore_non_patchable_relocations() { return true; } 224 225 // Initially added to the Assembler interface as a pure virtual: 226 // RegisterConstant delayed_value(..) 227 // for: 228 // 6812678 macro assembler needs delayed binding of a few constants (for 6655638) 229 // this was subsequently modified to its present name and return type 230 virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr, Register tmp, int offset); 231 232 #ifdef AARCH64 233 # define NOT_IMPLEMENTED() unimplemented("NYI at " __FILE__ ":" XSTR(__LINE__)) 234 # define NOT_TESTED() warn("Not tested at " __FILE__ ":" XSTR(__LINE__)) 235 #endif 236 237 void align(int modulus); 238 239 // Support for VM calls 240 // 241 // It is imperative that all calls into the VM are handled via the call_VM methods. 242 // They make sure that the stack linkage is setup correctly. call_VM's correspond 243 // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points. 244 245 void call_VM(Register oop_result, address entry_point, bool check_exceptions = true); 246 void call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions = true); 247 void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true); 248 void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true); 249 250 // The following methods are required by templateTable.cpp, 251 // but not used on ARM. 252 void call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true); 253 void call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true); 254 void call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true); 255 void call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true); 256 257 // Note: The super_call_VM calls are not used on ARM 258 259 // Raw call, without saving/restoring registers, exception handling, etc. 260 // Mainly used from various stubs. 261 // Note: if 'save_R9_if_scratched' is true, call_VM may on some 262 // platforms save values on the stack. Set it to false (and handle 263 // R9 in the callers) if the top of the stack must not be modified 264 // by call_VM. 265 void call_VM(address entry_point, bool save_R9_if_scratched); 266 267 void call_VM_leaf(address entry_point); 268 void call_VM_leaf(address entry_point, Register arg_1); 269 void call_VM_leaf(address entry_point, Register arg_1, Register arg_2); 270 void call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3); 271 void call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4); 272 273 void get_vm_result(Register oop_result, Register tmp); 274 void get_vm_result_2(Register metadata_result, Register tmp); 275 276 // Always sets/resets sp, which default to SP if (last_sp == noreg) 277 // Optionally sets/resets fp (use noreg to avoid setting it) 278 // Always sets/resets pc on AArch64; optionally sets/resets pc on 32-bit ARM depending on save_last_java_pc flag 279 // Note: when saving PC, set_last_Java_frame returns PC's offset in the code section 280 // (for oop_maps offset computation) 281 int set_last_Java_frame(Register last_sp, Register last_fp, bool save_last_java_pc, Register tmp); 282 void reset_last_Java_frame(Register tmp); 283 // status set in set_last_Java_frame for reset_last_Java_frame 284 bool _fp_saved; 285 bool _pc_saved; 286 287 #ifdef PRODUCT 288 #define BLOCK_COMMENT(str) /* nothing */ 289 #define STOP(error) __ stop(error) 290 #else 291 #define BLOCK_COMMENT(str) __ block_comment(str) 292 #define STOP(error) __ block_comment(error); __ stop(error) 293 #endif 294 295 void lookup_virtual_method(Register recv_klass, 296 Register vtable_index, 297 Register method_result); 298 299 // Test sub_klass against super_klass, with fast and slow paths. 300 301 // The fast path produces a tri-state answer: yes / no / maybe-slow. 302 // One of the three labels can be NULL, meaning take the fall-through. 303 // No registers are killed, except temp_regs. 304 void check_klass_subtype_fast_path(Register sub_klass, 305 Register super_klass, 306 Register temp_reg, 307 Register temp_reg2, 308 Label* L_success, 309 Label* L_failure, 310 Label* L_slow_path); 311 312 // The rest of the type check; must be wired to a corresponding fast path. 313 // It does not repeat the fast path logic, so don't use it standalone. 314 // temp_reg3 can be noreg, if no temps are available. 315 // Updates the sub's secondary super cache as necessary. 316 // If set_cond_codes: 317 // - condition codes will be Z on success, NZ on failure. 318 // - temp_reg will be 0 on success, non-0 on failure 319 void check_klass_subtype_slow_path(Register sub_klass, 320 Register super_klass, 321 Register temp_reg, 322 Register temp_reg2, 323 Register temp_reg3, // auto assigned if noreg 324 Label* L_success, 325 Label* L_failure, 326 bool set_cond_codes = false); 327 328 // Simplified, combined version, good for typical uses. 329 // temp_reg3 can be noreg, if no temps are available. It is used only on slow path. 330 // Falls through on failure. 331 void check_klass_subtype(Register sub_klass, 332 Register super_klass, 333 Register temp_reg, 334 Register temp_reg2, 335 Register temp_reg3, // auto assigned on slow path if noreg 336 Label& L_success); 337 338 // Returns address of receiver parameter, using tmp as base register. tmp and params_count can be the same. 339 Address receiver_argument_address(Register params_base, Register params_count, Register tmp); 340 341 void _verify_oop(Register reg, const char* s, const char* file, int line); 342 void _verify_oop_addr(Address addr, const char * s, const char* file, int line); 343 344 // TODO: verify method and klass metadata (compare against vptr?) 345 void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {} 346 void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line) {} 347 348 #define verify_oop(reg) _verify_oop(reg, "broken oop " #reg, __FILE__, __LINE__) 349 #define verify_oop_addr(addr) _verify_oop_addr(addr, "broken oop ", __FILE__, __LINE__) 350 #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__) 351 #define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__) 352 353 void null_check(Register reg, Register tmp, int offset = -1); 354 inline void null_check(Register reg) { null_check(reg, noreg, -1); } // for C1 lir_null_check 355 356 // Puts address of allocated object into register `obj` and end of allocated object into register `obj_end`. 357 void eden_allocate(Register obj, Register obj_end, Register tmp1, Register tmp2, 358 RegisterOrConstant size_expression, Label& slow_case); 359 void tlab_allocate(Register obj, Register obj_end, Register tmp1, 360 RegisterOrConstant size_expression, Label& slow_case); 361 362 void tlab_refill(Register top, Register tmp1, Register tmp2, Register tmp3, Register tmp4, 363 Label& try_eden, Label& slow_case); 364 void zero_memory(Register start, Register end, Register tmp); 365 366 void incr_allocated_bytes(RegisterOrConstant size_in_bytes, Register tmp); 367 368 static bool needs_explicit_null_check(intptr_t offset); 369 370 void arm_stack_overflow_check(int frame_size_in_bytes, Register tmp); 371 void arm_stack_overflow_check(Register Rsize, Register tmp); 372 373 void bang_stack_with_offset(int offset) { 374 ShouldNotReachHere(); 375 } 376 377 // Biased locking support 378 // lock_reg and obj_reg must be loaded up with the appropriate values. 379 // swap_reg must be supplied. 380 // tmp_reg must be supplied. 381 // Optional slow case is for implementations (interpreter and C1) which branch to 382 // slow case directly. If slow_case is NULL, then leaves condition 383 // codes set (for C2's Fast_Lock node) and jumps to done label. 384 // Falls through for the fast locking attempt. 385 // Returns offset of first potentially-faulting instruction for null 386 // check info (currently consumed only by C1). If 387 // swap_reg_contains_mark is true then returns -1 as it is assumed 388 // the calling code has already passed any potential faults. 389 // Notes: 390 // - swap_reg and tmp_reg are scratched 391 // - Rtemp was (implicitly) scratched and can now be specified as the tmp2 392 int biased_locking_enter(Register obj_reg, Register swap_reg, Register tmp_reg, 393 bool swap_reg_contains_mark, 394 Register tmp2, 395 Label& done, Label& slow_case, 396 BiasedLockingCounters* counters = NULL); 397 void biased_locking_exit(Register obj_reg, Register temp_reg, Label& done); 398 399 // Building block for CAS cases of biased locking: makes CAS and records statistics. 400 // Optional slow_case label is used to transfer control if CAS fails. Otherwise leaves condition codes set. 401 void biased_locking_enter_with_cas(Register obj_reg, Register old_mark_reg, Register new_mark_reg, 402 Register tmp, Label& slow_case, int* counter_addr); 403 404 #ifndef AARCH64 405 void nop() { 406 mov(R0, R0); 407 } 408 409 void push(Register rd, AsmCondition cond = al) { 410 assert(rd != SP, "unpredictable instruction"); 411 str(rd, Address(SP, -wordSize, pre_indexed), cond); 412 } 413 414 void push(RegisterSet reg_set, AsmCondition cond = al) { 415 assert(!reg_set.contains(SP), "unpredictable instruction"); 416 stmdb(SP, reg_set, writeback, cond); 417 } 418 419 void pop(Register rd, AsmCondition cond = al) { 420 assert(rd != SP, "unpredictable instruction"); 421 ldr(rd, Address(SP, wordSize, post_indexed), cond); 422 } 423 424 void pop(RegisterSet reg_set, AsmCondition cond = al) { 425 assert(!reg_set.contains(SP), "unpredictable instruction"); 426 ldmia(SP, reg_set, writeback, cond); 427 } 428 429 void fpushd(FloatRegister fd, AsmCondition cond = al) { 430 fstmdbd(SP, FloatRegisterSet(fd), writeback, cond); 431 } 432 433 void fpushs(FloatRegister fd, AsmCondition cond = al) { 434 fstmdbs(SP, FloatRegisterSet(fd), writeback, cond); 435 } 436 437 void fpopd(FloatRegister fd, AsmCondition cond = al) { 438 fldmiad(SP, FloatRegisterSet(fd), writeback, cond); 439 } 440 441 void fpops(FloatRegister fd, AsmCondition cond = al) { 442 fldmias(SP, FloatRegisterSet(fd), writeback, cond); 443 } 444 #endif // !AARCH64 445 446 // Order access primitives 447 enum Membar_mask_bits { 448 StoreStore = 1 << 3, 449 LoadStore = 1 << 2, 450 StoreLoad = 1 << 1, 451 LoadLoad = 1 << 0 452 }; 453 454 #ifdef AARCH64 455 // tmp register is not used on AArch64, this parameter is provided solely for better compatibility with 32-bit ARM 456 void membar(Membar_mask_bits order_constraint, Register tmp = noreg); 457 #else 458 void membar(Membar_mask_bits mask, 459 Register tmp, 460 bool preserve_flags = true, 461 Register load_tgt = noreg); 462 #endif 463 464 void breakpoint(AsmCondition cond = al); 465 void stop(const char* msg); 466 // prints msg and continues 467 void warn(const char* msg); 468 void unimplemented(const char* what = ""); 469 void should_not_reach_here() { stop("should not reach here"); } 470 static void debug(const char* msg, const intx* registers); 471 472 // Create a walkable frame to help tracking down who called this code. 473 // Returns the frame size in words. 474 int should_not_call_this() { 475 raw_push(FP, LR); 476 should_not_reach_here(); 477 flush(); 478 return 2; // frame_size_in_words (FP+LR) 479 } 480 481 int save_all_registers(); 482 void restore_all_registers(); 483 int save_caller_save_registers(); 484 void restore_caller_save_registers(); 485 486 void add_rc(Register dst, Register arg1, RegisterOrConstant arg2); 487 488 // add_slow and mov_slow are used to manipulate offsets larger than 1024, 489 // these functions are not expected to handle all possible constants, 490 // only those that can really occur during compilation 491 void add_slow(Register rd, Register rn, int c); 492 void sub_slow(Register rd, Register rn, int c); 493 494 #ifdef AARCH64 495 static int mov_slow_helper(Register rd, intptr_t c, MacroAssembler* masm /* optional */); 496 #endif 497 498 void mov_slow(Register rd, intptr_t c NOT_AARCH64_ARG(AsmCondition cond = al)); 499 void mov_slow(Register rd, const char *string); 500 void mov_slow(Register rd, address addr); 501 502 void patchable_mov_oop(Register rd, jobject o, int oop_index) { 503 mov_oop(rd, o, oop_index AARCH64_ONLY_ARG(true)); 504 } 505 void mov_oop(Register rd, jobject o, int index = 0 506 AARCH64_ONLY_ARG(bool patchable = false) 507 NOT_AARCH64_ARG(AsmCondition cond = al)); 508 509 510 void patchable_mov_metadata(Register rd, Metadata* o, int index) { 511 mov_metadata(rd, o, index AARCH64_ONLY_ARG(true)); 512 } 513 void mov_metadata(Register rd, Metadata* o, int index = 0 AARCH64_ONLY_ARG(bool patchable = false)); 514 515 void mov_float(FloatRegister fd, jfloat c NOT_AARCH64_ARG(AsmCondition cond = al)); 516 void mov_double(FloatRegister fd, jdouble c NOT_AARCH64_ARG(AsmCondition cond = al)); 517 518 #ifdef AARCH64 519 int mov_pc_to(Register rd) { 520 Label L; 521 adr(rd, L); 522 bind(L); 523 return offset(); 524 } 525 #endif 526 527 // Note: this variant of mov_address assumes the address moves with 528 // the code. Do *not* implement it with non-relocated instructions, 529 // unless PC-relative. 530 #ifdef AARCH64 531 void mov_relative_address(Register rd, address addr) { 532 adr(rd, addr); 533 } 534 #else 535 void mov_relative_address(Register rd, address addr, AsmCondition cond = al) { 536 int offset = addr - pc() - 8; 537 assert((offset & 3) == 0, "bad alignment"); 538 if (offset >= 0) { 539 assert(AsmOperand::is_rotated_imm(offset), "addr too far"); 540 add(rd, PC, offset, cond); 541 } else { 542 assert(AsmOperand::is_rotated_imm(-offset), "addr too far"); 543 sub(rd, PC, -offset, cond); 544 } 545 } 546 #endif // AARCH64 547 548 // Runtime address that may vary from one execution to another. The 549 // symbolic_reference describes what the address is, allowing 550 // the address to be resolved in a different execution context. 551 // Warning: do not implement as a PC relative address. 552 void mov_address(Register rd, address addr, symbolic_Relocation::symbolic_reference t) { 553 mov_address(rd, addr, RelocationHolder::none); 554 } 555 556 // rspec can be RelocationHolder::none (for ignored symbolic_Relocation). 557 // In that case, the address is absolute and the generated code need 558 // not be relocable. 559 void mov_address(Register rd, address addr, RelocationHolder const& rspec) { 560 assert(rspec.type() != relocInfo::runtime_call_type, "do not use mov_address for runtime calls"); 561 assert(rspec.type() != relocInfo::static_call_type, "do not use mov_address for relocable calls"); 562 if (rspec.type() == relocInfo::none) { 563 // absolute address, relocation not needed 564 mov_slow(rd, (intptr_t)addr); 565 return; 566 } 567 #ifndef AARCH64 568 if (VM_Version::supports_movw()) { 569 relocate(rspec); 570 int c = (int)addr; 571 movw(rd, c & 0xffff); 572 if ((unsigned int)c >> 16) { 573 movt(rd, (unsigned int)c >> 16); 574 } 575 return; 576 } 577 #endif 578 Label skip_literal; 579 InlinedAddress addr_literal(addr, rspec); 580 ldr_literal(rd, addr_literal); 581 b(skip_literal); 582 bind_literal(addr_literal); 583 // AARCH64 WARNING: because of alignment padding, extra padding 584 // may be required to get a consistent size for C2, or rules must 585 // overestimate size see MachEpilogNode::size 586 bind(skip_literal); 587 } 588 589 // Note: Do not define mov_address for a Label 590 // 591 // Load from addresses potentially within the code are now handled 592 // InlinedLiteral subclasses (to allow more flexibility on how the 593 // ldr_literal is performed). 594 595 void ldr_literal(Register rd, InlinedAddress& L) { 596 assert(L.rspec().type() != relocInfo::runtime_call_type, "avoid ldr_literal for calls"); 597 assert(L.rspec().type() != relocInfo::static_call_type, "avoid ldr_literal for calls"); 598 relocate(L.rspec()); 599 #ifdef AARCH64 600 ldr(rd, target(L.label)); 601 #else 602 ldr(rd, Address(PC, target(L.label) - pc() - 8)); 603 #endif 604 } 605 606 void ldr_literal(Register rd, InlinedString& L) { 607 const char* msg = L.msg(); 608 if (code()->consts()->contains((address)msg)) { 609 // string address moves with the code 610 #ifdef AARCH64 611 ldr(rd, (address)msg); 612 #else 613 ldr(rd, Address(PC, ((address)msg) - pc() - 8)); 614 #endif 615 return; 616 } 617 // Warning: use external strings with care. They are not relocated 618 // if the code moves. If needed, use code_string to move them 619 // to the consts section. 620 #ifdef AARCH64 621 ldr(rd, target(L.label)); 622 #else 623 ldr(rd, Address(PC, target(L.label) - pc() - 8)); 624 #endif 625 } 626 627 void ldr_literal(Register rd, InlinedMetadata& L) { 628 // relocation done in the bind_literal for metadatas 629 #ifdef AARCH64 630 ldr(rd, target(L.label)); 631 #else 632 ldr(rd, Address(PC, target(L.label) - pc() - 8)); 633 #endif 634 } 635 636 void bind_literal(InlinedAddress& L) { 637 AARCH64_ONLY(align(wordSize)); 638 bind(L.label); 639 assert(L.rspec().type() != relocInfo::metadata_type, "Must use InlinedMetadata"); 640 // We currently do not use oop 'bound' literals. 641 // If the code evolves and the following assert is triggered, 642 // we need to implement InlinedOop (see InlinedMetadata). 643 assert(L.rspec().type() != relocInfo::oop_type, "Inlined oops not supported"); 644 // Note: relocation is handled by relocate calls in ldr_literal 645 AbstractAssembler::emit_address((address)L.target()); 646 } 647 648 void bind_literal(InlinedString& L) { 649 const char* msg = L.msg(); 650 if (code()->consts()->contains((address)msg)) { 651 // The Label should not be used; avoid binding it 652 // to detect errors. 653 return; 654 } 655 AARCH64_ONLY(align(wordSize)); 656 bind(L.label); 657 AbstractAssembler::emit_address((address)L.msg()); 658 } 659 660 void bind_literal(InlinedMetadata& L) { 661 AARCH64_ONLY(align(wordSize)); 662 bind(L.label); 663 relocate(metadata_Relocation::spec_for_immediate()); 664 AbstractAssembler::emit_address((address)L.data()); 665 } 666 667 void load_mirror(Register mirror, Register method, Register tmp); 668 669 // Porting layer between 32-bit ARM and AArch64 670 671 #define COMMON_INSTR_1(common_mnemonic, aarch64_mnemonic, arm32_mnemonic, arg_type) \ 672 void common_mnemonic(arg_type arg) { \ 673 AARCH64_ONLY(aarch64_mnemonic) NOT_AARCH64(arm32_mnemonic) (arg); \ 674 } 675 676 #define COMMON_INSTR_2(common_mnemonic, aarch64_mnemonic, arm32_mnemonic, arg1_type, arg2_type) \ 677 void common_mnemonic(arg1_type arg1, arg2_type arg2) { \ 678 AARCH64_ONLY(aarch64_mnemonic) NOT_AARCH64(arm32_mnemonic) (arg1, arg2); \ 679 } 680 681 #define COMMON_INSTR_3(common_mnemonic, aarch64_mnemonic, arm32_mnemonic, arg1_type, arg2_type, arg3_type) \ 682 void common_mnemonic(arg1_type arg1, arg2_type arg2, arg3_type arg3) { \ 683 AARCH64_ONLY(aarch64_mnemonic) NOT_AARCH64(arm32_mnemonic) (arg1, arg2, arg3); \ 684 } 685 686 COMMON_INSTR_1(jump, br, bx, Register) 687 COMMON_INSTR_1(call, blr, blx, Register) 688 689 COMMON_INSTR_2(cbz_32, cbz_w, cbz, Register, Label&) 690 COMMON_INSTR_2(cbnz_32, cbnz_w, cbnz, Register, Label&) 691 692 COMMON_INSTR_2(ldr_u32, ldr_w, ldr, Register, Address) 693 COMMON_INSTR_2(ldr_s32, ldrsw, ldr, Register, Address) 694 COMMON_INSTR_2(str_32, str_w, str, Register, Address) 695 696 COMMON_INSTR_2(mvn_32, mvn_w, mvn, Register, Register) 697 COMMON_INSTR_2(cmp_32, cmp_w, cmp, Register, Register) 698 COMMON_INSTR_2(neg_32, neg_w, neg, Register, Register) 699 COMMON_INSTR_2(clz_32, clz_w, clz, Register, Register) 700 COMMON_INSTR_2(rbit_32, rbit_w, rbit, Register, Register) 701 702 COMMON_INSTR_2(cmp_32, cmp_w, cmp, Register, int) 703 COMMON_INSTR_2(cmn_32, cmn_w, cmn, Register, int) 704 705 COMMON_INSTR_3(add_32, add_w, add, Register, Register, Register) 706 COMMON_INSTR_3(sub_32, sub_w, sub, Register, Register, Register) 707 COMMON_INSTR_3(subs_32, subs_w, subs, Register, Register, Register) 708 COMMON_INSTR_3(mul_32, mul_w, mul, Register, Register, Register) 709 COMMON_INSTR_3(and_32, andr_w, andr, Register, Register, Register) 710 COMMON_INSTR_3(orr_32, orr_w, orr, Register, Register, Register) 711 COMMON_INSTR_3(eor_32, eor_w, eor, Register, Register, Register) 712 713 COMMON_INSTR_3(add_32, add_w, add, Register, Register, AsmOperand) 714 COMMON_INSTR_3(sub_32, sub_w, sub, Register, Register, AsmOperand) 715 COMMON_INSTR_3(orr_32, orr_w, orr, Register, Register, AsmOperand) 716 COMMON_INSTR_3(eor_32, eor_w, eor, Register, Register, AsmOperand) 717 COMMON_INSTR_3(and_32, andr_w, andr, Register, Register, AsmOperand) 718 719 720 COMMON_INSTR_3(add_32, add_w, add, Register, Register, int) 721 COMMON_INSTR_3(adds_32, adds_w, adds, Register, Register, int) 722 COMMON_INSTR_3(sub_32, sub_w, sub, Register, Register, int) 723 COMMON_INSTR_3(subs_32, subs_w, subs, Register, Register, int) 724 725 COMMON_INSTR_2(tst_32, tst_w, tst, Register, unsigned int) 726 COMMON_INSTR_2(tst_32, tst_w, tst, Register, AsmOperand) 727 728 COMMON_INSTR_3(and_32, andr_w, andr, Register, Register, uint) 729 COMMON_INSTR_3(orr_32, orr_w, orr, Register, Register, uint) 730 COMMON_INSTR_3(eor_32, eor_w, eor, Register, Register, uint) 731 732 COMMON_INSTR_1(cmp_zero_float, fcmp0_s, fcmpzs, FloatRegister) 733 COMMON_INSTR_1(cmp_zero_double, fcmp0_d, fcmpzd, FloatRegister) 734 735 COMMON_INSTR_2(ldr_float, ldr_s, flds, FloatRegister, Address) 736 COMMON_INSTR_2(str_float, str_s, fsts, FloatRegister, Address) 737 COMMON_INSTR_2(mov_float, fmov_s, fcpys, FloatRegister, FloatRegister) 738 COMMON_INSTR_2(neg_float, fneg_s, fnegs, FloatRegister, FloatRegister) 739 COMMON_INSTR_2(abs_float, fabs_s, fabss, FloatRegister, FloatRegister) 740 COMMON_INSTR_2(sqrt_float, fsqrt_s, fsqrts, FloatRegister, FloatRegister) 741 COMMON_INSTR_2(cmp_float, fcmp_s, fcmps, FloatRegister, FloatRegister) 742 743 COMMON_INSTR_3(add_float, fadd_s, fadds, FloatRegister, FloatRegister, FloatRegister) 744 COMMON_INSTR_3(sub_float, fsub_s, fsubs, FloatRegister, FloatRegister, FloatRegister) 745 COMMON_INSTR_3(mul_float, fmul_s, fmuls, FloatRegister, FloatRegister, FloatRegister) 746 COMMON_INSTR_3(div_float, fdiv_s, fdivs, FloatRegister, FloatRegister, FloatRegister) 747 748 COMMON_INSTR_2(ldr_double, ldr_d, fldd, FloatRegister, Address) 749 COMMON_INSTR_2(str_double, str_d, fstd, FloatRegister, Address) 750 COMMON_INSTR_2(mov_double, fmov_d, fcpyd, FloatRegister, FloatRegister) 751 COMMON_INSTR_2(neg_double, fneg_d, fnegd, FloatRegister, FloatRegister) 752 COMMON_INSTR_2(cmp_double, fcmp_d, fcmpd, FloatRegister, FloatRegister) 753 COMMON_INSTR_2(abs_double, fabs_d, fabsd, FloatRegister, FloatRegister) 754 COMMON_INSTR_2(sqrt_double, fsqrt_d, fsqrtd, FloatRegister, FloatRegister) 755 756 COMMON_INSTR_3(add_double, fadd_d, faddd, FloatRegister, FloatRegister, FloatRegister) 757 COMMON_INSTR_3(sub_double, fsub_d, fsubd, FloatRegister, FloatRegister, FloatRegister) 758 COMMON_INSTR_3(mul_double, fmul_d, fmuld, FloatRegister, FloatRegister, FloatRegister) 759 COMMON_INSTR_3(div_double, fdiv_d, fdivd, FloatRegister, FloatRegister, FloatRegister) 760 761 COMMON_INSTR_2(convert_f2d, fcvt_ds, fcvtds, FloatRegister, FloatRegister) 762 COMMON_INSTR_2(convert_d2f, fcvt_sd, fcvtsd, FloatRegister, FloatRegister) 763 764 COMMON_INSTR_2(mov_fpr2gpr_float, fmov_ws, fmrs, Register, FloatRegister) 765 766 #undef COMMON_INSTR_1 767 #undef COMMON_INSTR_2 768 #undef COMMON_INSTR_3 769 770 771 #ifdef AARCH64 772 773 void mov(Register dst, Register src, AsmCondition cond) { 774 if (cond == al) { 775 mov(dst, src); 776 } else { 777 csel(dst, src, dst, cond); 778 } 779 } 780 781 // Propagate other overloaded "mov" methods from Assembler. 782 void mov(Register dst, Register src) { Assembler::mov(dst, src); } 783 void mov(Register rd, int imm) { Assembler::mov(rd, imm); } 784 785 void mov(Register dst, int imm, AsmCondition cond) { 786 assert(imm == 0 || imm == 1, ""); 787 if (imm == 0) { 788 mov(dst, ZR, cond); 789 } else if (imm == 1) { 790 csinc(dst, dst, ZR, inverse(cond)); 791 } else if (imm == -1) { 792 csinv(dst, dst, ZR, inverse(cond)); 793 } else { 794 fatal("illegal mov(R%d,%d,cond)", dst->encoding(), imm); 795 } 796 } 797 798 void movs(Register dst, Register src) { adds(dst, src, 0); } 799 800 #else // AARCH64 801 802 void tbz(Register rt, int bit, Label& L) { 803 assert(0 <= bit && bit < BitsPerWord, "bit number is out of range"); 804 tst(rt, 1 << bit); 805 b(L, eq); 806 } 807 808 void tbnz(Register rt, int bit, Label& L) { 809 assert(0 <= bit && bit < BitsPerWord, "bit number is out of range"); 810 tst(rt, 1 << bit); 811 b(L, ne); 812 } 813 814 void cbz(Register rt, Label& L) { 815 cmp(rt, 0); 816 b(L, eq); 817 } 818 819 void cbz(Register rt, address target) { 820 cmp(rt, 0); 821 b(target, eq); 822 } 823 824 void cbnz(Register rt, Label& L) { 825 cmp(rt, 0); 826 b(L, ne); 827 } 828 829 void ret(Register dst = LR) { 830 bx(dst); 831 } 832 833 #endif // AARCH64 834 835 Register zero_register(Register tmp) { 836 #ifdef AARCH64 837 return ZR; 838 #else 839 mov(tmp, 0); 840 return tmp; 841 #endif 842 } 843 844 void logical_shift_left(Register dst, Register src, int shift) { 845 #ifdef AARCH64 846 _lsl(dst, src, shift); 847 #else 848 mov(dst, AsmOperand(src, lsl, shift)); 849 #endif 850 } 851 852 void logical_shift_left_32(Register dst, Register src, int shift) { 853 #ifdef AARCH64 854 _lsl_w(dst, src, shift); 855 #else 856 mov(dst, AsmOperand(src, lsl, shift)); 857 #endif 858 } 859 860 void logical_shift_right(Register dst, Register src, int shift) { 861 #ifdef AARCH64 862 _lsr(dst, src, shift); 863 #else 864 mov(dst, AsmOperand(src, lsr, shift)); 865 #endif 866 } 867 868 void arith_shift_right(Register dst, Register src, int shift) { 869 #ifdef AARCH64 870 _asr(dst, src, shift); 871 #else 872 mov(dst, AsmOperand(src, asr, shift)); 873 #endif 874 } 875 876 void asr_32(Register dst, Register src, int shift) { 877 #ifdef AARCH64 878 _asr_w(dst, src, shift); 879 #else 880 mov(dst, AsmOperand(src, asr, shift)); 881 #endif 882 } 883 884 // If <cond> holds, compares r1 and r2. Otherwise, flags are set so that <cond> does not hold. 885 void cond_cmp(Register r1, Register r2, AsmCondition cond) { 886 #ifdef AARCH64 887 ccmp(r1, r2, flags_for_condition(inverse(cond)), cond); 888 #else 889 cmp(r1, r2, cond); 890 #endif 891 } 892 893 // If <cond> holds, compares r and imm. Otherwise, flags are set so that <cond> does not hold. 894 void cond_cmp(Register r, int imm, AsmCondition cond) { 895 #ifdef AARCH64 896 ccmp(r, imm, flags_for_condition(inverse(cond)), cond); 897 #else 898 cmp(r, imm, cond); 899 #endif 900 } 901 902 void align_reg(Register dst, Register src, int align) { 903 assert (is_power_of_2(align), "should be"); 904 #ifdef AARCH64 905 andr(dst, src, ~(uintx)(align-1)); 906 #else 907 bic(dst, src, align-1); 908 #endif 909 } 910 911 void prefetch_read(Address addr) { 912 #ifdef AARCH64 913 prfm(pldl1keep, addr); 914 #else 915 pld(addr); 916 #endif 917 } 918 919 void raw_push(Register r1, Register r2) { 920 #ifdef AARCH64 921 stp(r1, r2, Address(SP, -2*wordSize, pre_indexed)); 922 #else 923 assert(r1->encoding() < r2->encoding(), "should be ordered"); 924 push(RegisterSet(r1) | RegisterSet(r2)); 925 #endif 926 } 927 928 void raw_pop(Register r1, Register r2) { 929 #ifdef AARCH64 930 ldp(r1, r2, Address(SP, 2*wordSize, post_indexed)); 931 #else 932 assert(r1->encoding() < r2->encoding(), "should be ordered"); 933 pop(RegisterSet(r1) | RegisterSet(r2)); 934 #endif 935 } 936 937 void raw_push(Register r1, Register r2, Register r3) { 938 #ifdef AARCH64 939 raw_push(r1, r2); 940 raw_push(r3, ZR); 941 #else 942 assert(r1->encoding() < r2->encoding() && r2->encoding() < r3->encoding(), "should be ordered"); 943 push(RegisterSet(r1) | RegisterSet(r2) | RegisterSet(r3)); 944 #endif 945 } 946 947 void raw_pop(Register r1, Register r2, Register r3) { 948 #ifdef AARCH64 949 raw_pop(r3, ZR); 950 raw_pop(r1, r2); 951 #else 952 assert(r1->encoding() < r2->encoding() && r2->encoding() < r3->encoding(), "should be ordered"); 953 pop(RegisterSet(r1) | RegisterSet(r2) | RegisterSet(r3)); 954 #endif 955 } 956 957 // Restores registers r1 and r2 previously saved by raw_push(r1, r2, ret_addr) and returns by ret_addr. Clobbers LR. 958 void raw_pop_and_ret(Register r1, Register r2) { 959 #ifdef AARCH64 960 raw_pop(r1, r2, LR); 961 ret(); 962 #else 963 raw_pop(r1, r2, PC); 964 #endif 965 } 966 967 void indirect_jump(Address addr, Register scratch) { 968 #ifdef AARCH64 969 ldr(scratch, addr); 970 br(scratch); 971 #else 972 ldr(PC, addr); 973 #endif 974 } 975 976 void indirect_jump(InlinedAddress& literal, Register scratch) { 977 #ifdef AARCH64 978 ldr_literal(scratch, literal); 979 br(scratch); 980 #else 981 ldr_literal(PC, literal); 982 #endif 983 } 984 985 #ifndef AARCH64 986 void neg(Register dst, Register src) { 987 rsb(dst, src, 0); 988 } 989 #endif 990 991 void branch_if_negative_32(Register r, Label& L) { 992 // Note about branch_if_negative_32() / branch_if_any_negative_32() implementation for AArch64: 993 // tbnz is not used instead of tst & b.mi because destination may be out of tbnz range (+-32KB) 994 // since these methods are used in LIR_Assembler::emit_arraycopy() to jump to stub entry. 995 tst_32(r, r); 996 b(L, mi); 997 } 998 999 void branch_if_any_negative_32(Register r1, Register r2, Register tmp, Label& L) { 1000 #ifdef AARCH64 1001 orr_32(tmp, r1, r2); 1002 tst_32(tmp, tmp); 1003 #else 1004 orrs(tmp, r1, r2); 1005 #endif 1006 b(L, mi); 1007 } 1008 1009 void branch_if_any_negative_32(Register r1, Register r2, Register r3, Register tmp, Label& L) { 1010 orr_32(tmp, r1, r2); 1011 #ifdef AARCH64 1012 orr_32(tmp, tmp, r3); 1013 tst_32(tmp, tmp); 1014 #else 1015 orrs(tmp, tmp, r3); 1016 #endif 1017 b(L, mi); 1018 } 1019 1020 void add_ptr_scaled_int32(Register dst, Register r1, Register r2, int shift) { 1021 #ifdef AARCH64 1022 add(dst, r1, r2, ex_sxtw, shift); 1023 #else 1024 add(dst, r1, AsmOperand(r2, lsl, shift)); 1025 #endif 1026 } 1027 1028 void sub_ptr_scaled_int32(Register dst, Register r1, Register r2, int shift) { 1029 #ifdef AARCH64 1030 sub(dst, r1, r2, ex_sxtw, shift); 1031 #else 1032 sub(dst, r1, AsmOperand(r2, lsl, shift)); 1033 #endif 1034 } 1035 1036 1037 // klass oop manipulations if compressed 1038 1039 #ifdef AARCH64 1040 void load_klass(Register dst_klass, Register src_oop); 1041 #else 1042 void load_klass(Register dst_klass, Register src_oop, AsmCondition cond = al); 1043 #endif // AARCH64 1044 1045 void store_klass(Register src_klass, Register dst_oop); 1046 1047 #ifdef AARCH64 1048 void store_klass_gap(Register dst); 1049 #endif // AARCH64 1050 1051 // oop manipulations 1052 1053 void load_heap_oop(Register dst, Address src); 1054 void store_heap_oop(Register src, Address dst); 1055 void store_heap_oop(Address dst, Register src) { 1056 store_heap_oop(src, dst); 1057 } 1058 void store_heap_oop_null(Register src, Address dst); 1059 1060 #ifdef AARCH64 1061 void encode_heap_oop(Register dst, Register src); 1062 void encode_heap_oop(Register r) { 1063 encode_heap_oop(r, r); 1064 } 1065 void decode_heap_oop(Register dst, Register src); 1066 void decode_heap_oop(Register r) { 1067 decode_heap_oop(r, r); 1068 } 1069 1070 #ifdef COMPILER2 1071 void encode_heap_oop_not_null(Register dst, Register src); 1072 void decode_heap_oop_not_null(Register dst, Register src); 1073 1074 void set_narrow_klass(Register dst, Klass* k); 1075 void set_narrow_oop(Register dst, jobject obj); 1076 #endif 1077 1078 void encode_klass_not_null(Register r); 1079 void encode_klass_not_null(Register dst, Register src); 1080 void decode_klass_not_null(Register r); 1081 void decode_klass_not_null(Register dst, Register src); 1082 1083 void reinit_heapbase(); 1084 1085 #ifdef ASSERT 1086 void verify_heapbase(const char* msg); 1087 #endif // ASSERT 1088 1089 static int instr_count_for_mov_slow(intptr_t c); 1090 static int instr_count_for_mov_slow(address addr); 1091 static int instr_count_for_decode_klass_not_null(); 1092 #endif // AARCH64 1093 1094 void ldr_global_ptr(Register reg, address address_of_global); 1095 void ldr_global_s32(Register reg, address address_of_global); 1096 void ldrb_global(Register reg, address address_of_global); 1097 1098 // address_placeholder_instruction is invalid instruction and is used 1099 // as placeholder in code for address of label 1100 enum { address_placeholder_instruction = 0xFFFFFFFF }; 1101 1102 void emit_address(Label& L) { 1103 assert(!L.is_bound(), "otherwise address will not be patched"); 1104 target(L); // creates relocation which will be patched later 1105 1106 assert ((offset() & (wordSize-1)) == 0, "should be aligned by word size"); 1107 1108 #ifdef AARCH64 1109 emit_int32(address_placeholder_instruction); 1110 emit_int32(address_placeholder_instruction); 1111 #else 1112 AbstractAssembler::emit_address((address)address_placeholder_instruction); 1113 #endif 1114 } 1115 1116 void b(address target, AsmCondition cond = al) { 1117 Assembler::b(target, cond); \ 1118 } 1119 void b(Label& L, AsmCondition cond = al) { 1120 // internal jumps 1121 Assembler::b(target(L), cond); 1122 } 1123 1124 void bl(address target NOT_AARCH64_ARG(AsmCondition cond = al)) { 1125 Assembler::bl(target NOT_AARCH64_ARG(cond)); 1126 } 1127 void bl(Label& L NOT_AARCH64_ARG(AsmCondition cond = al)) { 1128 // internal calls 1129 Assembler::bl(target(L) NOT_AARCH64_ARG(cond)); 1130 } 1131 1132 #ifndef AARCH64 1133 void adr(Register dest, Label& L, AsmCondition cond = al) { 1134 int delta = target(L) - pc() - 8; 1135 if (delta >= 0) { 1136 add(dest, PC, delta, cond); 1137 } else { 1138 sub(dest, PC, -delta, cond); 1139 } 1140 } 1141 #endif // !AARCH64 1142 1143 // Variable-length jump and calls. We now distinguish only the 1144 // patchable case from the other cases. Patchable must be 1145 // distinguised from relocable. Relocable means the generated code 1146 // containing the jump/call may move. Patchable means that the 1147 // targeted address may be changed later. 1148 1149 // Non patchable versions. 1150 // - used only for relocInfo::runtime_call_type and relocInfo::none 1151 // - may use relative or absolute format (do not use relocInfo::none 1152 // if the generated code may move) 1153 // - the implementation takes into account switch to THUMB mode if the 1154 // destination is a THUMB address 1155 // - the implementation supports far targets 1156 // 1157 // To reduce regression risk, scratch still defaults to noreg on 1158 // arm32. This results in patchable instructions. However, if 1159 // patching really matters, the call sites should be modified and 1160 // use patchable_call or patchable_jump. If patching is not required 1161 // and if a register can be cloberred, it should be explicitly 1162 // specified to allow future optimizations. 1163 void jump(address target, 1164 relocInfo::relocType rtype = relocInfo::runtime_call_type, 1165 Register scratch = AARCH64_ONLY(Rtemp) NOT_AARCH64(noreg) 1166 #ifndef AARCH64 1167 , AsmCondition cond = al 1168 #endif 1169 ); 1170 1171 void call(address target, 1172 RelocationHolder rspec 1173 NOT_AARCH64_ARG(AsmCondition cond = al)); 1174 1175 void call(address target, 1176 relocInfo::relocType rtype = relocInfo::runtime_call_type 1177 NOT_AARCH64_ARG(AsmCondition cond = al)) { 1178 call(target, Relocation::spec_simple(rtype) NOT_AARCH64_ARG(cond)); 1179 } 1180 1181 void jump(AddressLiteral dest) { 1182 jump(dest.target(), dest.reloc()); 1183 } 1184 #ifndef AARCH64 1185 void jump(address dest, relocInfo::relocType rtype, AsmCondition cond) { 1186 jump(dest, rtype, Rtemp, cond); 1187 } 1188 #endif 1189 1190 void call(AddressLiteral dest) { 1191 call(dest.target(), dest.reloc()); 1192 } 1193 1194 // Patchable version: 1195 // - set_destination can be used to atomically change the target 1196 // 1197 // The targets for patchable_jump and patchable_call must be in the 1198 // code cache. 1199 // [ including possible extensions of the code cache, like AOT code ] 1200 // 1201 // To reduce regression risk, scratch still defaults to noreg on 1202 // arm32. If a register can be cloberred, it should be explicitly 1203 // specified to allow future optimizations. 1204 void patchable_jump(address target, 1205 relocInfo::relocType rtype = relocInfo::runtime_call_type, 1206 Register scratch = AARCH64_ONLY(Rtemp) NOT_AARCH64(noreg) 1207 #ifndef AARCH64 1208 , AsmCondition cond = al 1209 #endif 1210 ); 1211 1212 // patchable_call may scratch Rtemp 1213 int patchable_call(address target, 1214 RelocationHolder const& rspec, 1215 bool c2 = false); 1216 1217 int patchable_call(address target, 1218 relocInfo::relocType rtype, 1219 bool c2 = false) { 1220 return patchable_call(target, Relocation::spec_simple(rtype), c2); 1221 } 1222 1223 #if defined(AARCH64) && defined(COMPILER2) 1224 static int call_size(address target, bool far, bool patchable); 1225 #endif 1226 1227 #ifdef AARCH64 1228 static bool page_reachable_from_cache(address target); 1229 #endif 1230 static bool _reachable_from_cache(address target); 1231 static bool _cache_fully_reachable(); 1232 bool cache_fully_reachable(); 1233 bool reachable_from_cache(address target); 1234 1235 void zero_extend(Register rd, Register rn, int bits); 1236 void sign_extend(Register rd, Register rn, int bits); 1237 1238 inline void zap_high_non_significant_bits(Register r) { 1239 #ifdef AARCH64 1240 if(ZapHighNonSignificantBits) { 1241 movk(r, 0xBAAD, 48); 1242 movk(r, 0xF00D, 32); 1243 } 1244 #endif 1245 } 1246 1247 #ifndef AARCH64 1248 void long_move(Register rd_lo, Register rd_hi, 1249 Register rn_lo, Register rn_hi, 1250 AsmCondition cond = al); 1251 void long_shift(Register rd_lo, Register rd_hi, 1252 Register rn_lo, Register rn_hi, 1253 AsmShift shift, Register count); 1254 void long_shift(Register rd_lo, Register rd_hi, 1255 Register rn_lo, Register rn_hi, 1256 AsmShift shift, int count); 1257 1258 void atomic_cas(Register tmpreg1, Register tmpreg2, Register oldval, Register newval, Register base, int offset); 1259 void atomic_cas_bool(Register oldval, Register newval, Register base, int offset, Register tmpreg); 1260 void atomic_cas64(Register temp_lo, Register temp_hi, Register temp_result, Register oldval_lo, Register oldval_hi, Register newval_lo, Register newval_hi, Register base, int offset); 1261 #endif // !AARCH64 1262 1263 void cas_for_lock_acquire(Register oldval, Register newval, Register base, Register tmp, Label &slow_case, bool allow_fallthrough_on_failure = false, bool one_shot = false); 1264 void cas_for_lock_release(Register oldval, Register newval, Register base, Register tmp, Label &slow_case, bool allow_fallthrough_on_failure = false, bool one_shot = false); 1265 1266 #ifndef PRODUCT 1267 // Preserves flags and all registers. 1268 // On SMP the updated value might not be visible to external observers without a sychronization barrier 1269 void cond_atomic_inc32(AsmCondition cond, int* counter_addr); 1270 #endif // !PRODUCT 1271 1272 // unconditional non-atomic increment 1273 void inc_counter(address counter_addr, Register tmpreg1, Register tmpreg2); 1274 void inc_counter(int* counter_addr, Register tmpreg1, Register tmpreg2) { 1275 inc_counter((address) counter_addr, tmpreg1, tmpreg2); 1276 } 1277 1278 void pd_patch_instruction(address branch, address target); 1279 1280 // Loading and storing values by size and signed-ness; 1281 // size must not exceed wordSize (i.e. 8-byte values are not supported on 32-bit ARM); 1282 // each of these calls generates exactly one load or store instruction, 1283 // so src can be pre- or post-indexed address. 1284 #ifdef AARCH64 1285 void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed); 1286 void store_sized_value(Register src, Address dst, size_t size_in_bytes); 1287 #else 1288 // 32-bit ARM variants also support conditional execution 1289 void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, AsmCondition cond = al); 1290 void store_sized_value(Register src, Address dst, size_t size_in_bytes, AsmCondition cond = al); 1291 #endif 1292 1293 void lookup_interface_method(Register recv_klass, 1294 Register intf_klass, 1295 Register itable_index, 1296 Register method_result, 1297 Register temp_reg1, 1298 Register temp_reg2, 1299 Label& L_no_such_interface); 1300 1301 // Compare char[] arrays aligned to 4 bytes. 1302 void char_arrays_equals(Register ary1, Register ary2, 1303 Register limit, Register result, 1304 Register chr1, Register chr2, Label& Ldone); 1305 1306 1307 void floating_cmp(Register dst); 1308 1309 // improved x86 portability (minimizing source code changes) 1310 1311 void ldr_literal(Register rd, AddressLiteral addr) { 1312 relocate(addr.rspec()); 1313 #ifdef AARCH64 1314 ldr(rd, addr.target()); 1315 #else 1316 ldr(rd, Address(PC, addr.target() - pc() - 8)); 1317 #endif 1318 } 1319 1320 void lea(Register Rd, AddressLiteral addr) { 1321 // Never dereferenced, as on x86 (lval status ignored) 1322 mov_address(Rd, addr.target(), addr.rspec()); 1323 } 1324 1325 void restore_default_fp_mode(); 1326 1327 #ifdef COMPILER2 1328 #ifdef AARCH64 1329 // Code used by cmpFastLock and cmpFastUnlock mach instructions in .ad file. 1330 void fast_lock(Register obj, Register box, Register scratch, Register scratch2, Register scratch3); 1331 void fast_unlock(Register obj, Register box, Register scratch, Register scratch2, Register scratch3); 1332 #else 1333 void fast_lock(Register obj, Register box, Register scratch, Register scratch2); 1334 void fast_unlock(Register obj, Register box, Register scratch, Register scratch2); 1335 #endif 1336 #endif 1337 1338 #ifdef AARCH64 1339 1340 #define F(mnemonic) \ 1341 void mnemonic(Register rt, address target) { \ 1342 Assembler::mnemonic(rt, target); \ 1343 } \ 1344 void mnemonic(Register rt, Label& L) { \ 1345 Assembler::mnemonic(rt, target(L)); \ 1346 } 1347 1348 F(cbz_w); 1349 F(cbnz_w); 1350 F(cbz); 1351 F(cbnz); 1352 1353 #undef F 1354 1355 #define F(mnemonic) \ 1356 void mnemonic(Register rt, int bit, address target) { \ 1357 Assembler::mnemonic(rt, bit, target); \ 1358 } \ 1359 void mnemonic(Register rt, int bit, Label& L) { \ 1360 Assembler::mnemonic(rt, bit, target(L)); \ 1361 } 1362 1363 F(tbz); 1364 F(tbnz); 1365 #undef F 1366 1367 #endif // AARCH64 1368 1369 }; 1370 1371 1372 // The purpose of this class is to build several code fragments of the same size 1373 // in order to allow fast table branch. 1374 1375 class FixedSizeCodeBlock VALUE_OBJ_CLASS_SPEC { 1376 public: 1377 FixedSizeCodeBlock(MacroAssembler* masm, int size_in_instrs, bool enabled); 1378 ~FixedSizeCodeBlock(); 1379 1380 private: 1381 MacroAssembler* _masm; 1382 address _start; 1383 int _size_in_instrs; 1384 bool _enabled; 1385 }; 1386 1387 1388 #endif // CPU_ARM_VM_MACROASSEMBLER_ARM_HPP 1389