1 /* 2 * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef CPU_ARM_VM_MACROASSEMBLER_ARM_HPP 26 #define CPU_ARM_VM_MACROASSEMBLER_ARM_HPP 27 28 #include "code/relocInfo.hpp" 29 #include "code/relocInfo_ext.hpp" 30 31 class BiasedLockingCounters; 32 33 // Introduced AddressLiteral and its subclasses to ease portability from 34 // x86 and avoid relocation issues 35 class AddressLiteral { 36 RelocationHolder _rspec; 37 // Typically we use AddressLiterals we want to use their rval 38 // However in some situations we want the lval (effect address) of the item. 39 // We provide a special factory for making those lvals. 40 bool _is_lval; 41 42 address _target; 43 44 private: 45 static relocInfo::relocType reloc_for_target(address target) { 46 // Used for ExternalAddress or when the type is not specified 47 // Sometimes ExternalAddress is used for values which aren't 48 // exactly addresses, like the card table base. 49 // external_word_type can't be used for values in the first page 50 // so just skip the reloc in that case. 51 return external_word_Relocation::can_be_relocated(target) ? relocInfo::external_word_type : relocInfo::none; 52 } 53 54 void set_rspec(relocInfo::relocType rtype); 55 56 protected: 57 // creation 58 AddressLiteral() 59 : _is_lval(false), 60 _target(NULL) 61 {} 62 63 public: 64 65 AddressLiteral(address target, relocInfo::relocType rtype) { 66 _is_lval = false; 67 _target = target; 68 set_rspec(rtype); 69 } 70 71 AddressLiteral(address target, RelocationHolder const& rspec) 72 : _rspec(rspec), 73 _is_lval(false), 74 _target(target) 75 {} 76 77 AddressLiteral(address target) { 78 _is_lval = false; 79 _target = target; 80 set_rspec(reloc_for_target(target)); 81 } 82 83 AddressLiteral addr() { 84 AddressLiteral ret = *this; 85 ret._is_lval = true; 86 return ret; 87 } 88 89 private: 90 91 address target() { return _target; } 92 bool is_lval() { return _is_lval; } 93 94 relocInfo::relocType reloc() const { return _rspec.type(); } 95 const RelocationHolder& rspec() const { return _rspec; } 96 97 friend class Assembler; 98 friend class MacroAssembler; 99 friend class Address; 100 friend class LIR_Assembler; 101 friend class InlinedAddress; 102 }; 103 104 class ExternalAddress: public AddressLiteral { 105 106 public: 107 108 ExternalAddress(address target) : AddressLiteral(target) {} 109 110 }; 111 112 class InternalAddress: public AddressLiteral { 113 114 public: 115 116 InternalAddress(address target) : AddressLiteral(target, relocInfo::internal_word_type) {} 117 118 }; 119 120 // Inlined constants, for use with ldr_literal / bind_literal 121 // Note: InlinedInteger not supported (use move_slow(Register,int[,cond])) 122 class InlinedLiteral: StackObj { 123 public: 124 Label label; // need to be public for direct access with & 125 InlinedLiteral() { 126 } 127 }; 128 129 class InlinedMetadata: public InlinedLiteral { 130 private: 131 Metadata *_data; 132 133 public: 134 InlinedMetadata(Metadata *data): InlinedLiteral() { 135 _data = data; 136 } 137 Metadata *data() { return _data; } 138 }; 139 140 // Currently unused 141 // class InlinedOop: public InlinedLiteral { 142 // private: 143 // jobject _jobject; 144 // 145 // public: 146 // InlinedOop(jobject target): InlinedLiteral() { 147 // _jobject = target; 148 // } 149 // jobject jobject() { return _jobject; } 150 // }; 151 152 class InlinedAddress: public InlinedLiteral { 153 private: 154 AddressLiteral _literal; 155 156 public: 157 158 InlinedAddress(jobject object): InlinedLiteral(), _literal((address)object, relocInfo::oop_type) { 159 ShouldNotReachHere(); // use mov_oop (or implement InlinedOop) 160 } 161 162 InlinedAddress(Metadata *data): InlinedLiteral(), _literal((address)data, relocInfo::metadata_type) { 163 ShouldNotReachHere(); // use InlinedMetadata or mov_metadata 164 } 165 166 InlinedAddress(address target, const RelocationHolder &rspec): InlinedLiteral(), _literal(target, rspec) { 167 assert(rspec.type() != relocInfo::oop_type, "Do not use InlinedAddress for oops"); 168 assert(rspec.type() != relocInfo::metadata_type, "Do not use InlinedAddress for metadatas"); 169 } 170 171 InlinedAddress(address target, relocInfo::relocType rtype): InlinedLiteral(), _literal(target, rtype) { 172 assert(rtype != relocInfo::oop_type, "Do not use InlinedAddress for oops"); 173 assert(rtype != relocInfo::metadata_type, "Do not use InlinedAddress for metadatas"); 174 } 175 176 // Note: default is relocInfo::none for InlinedAddress 177 InlinedAddress(address target): InlinedLiteral(), _literal(target, relocInfo::none) { 178 } 179 180 address target() { return _literal.target(); } 181 182 const RelocationHolder& rspec() const { return _literal.rspec(); } 183 }; 184 185 class InlinedString: public InlinedLiteral { 186 private: 187 const char* _msg; 188 189 public: 190 InlinedString(const char* msg): InlinedLiteral() { 191 _msg = msg; 192 } 193 const char* msg() { return _msg; } 194 }; 195 196 class MacroAssembler: public Assembler { 197 protected: 198 199 // Support for VM calls 200 // 201 202 // This is the base routine called by the different versions of call_VM_leaf. 203 void call_VM_leaf_helper(address entry_point, int number_of_arguments); 204 205 // This is the base routine called by the different versions of call_VM. The interpreter 206 // may customize this version by overriding it for its purposes (e.g., to save/restore 207 // additional registers when doing a VM call). 208 virtual void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions); 209 public: 210 211 MacroAssembler(CodeBuffer* code) : Assembler(code) {} 212 213 // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code. 214 // The implementation is only non-empty for the InterpreterMacroAssembler, 215 // as only the interpreter handles PopFrame and ForceEarlyReturn requests. 216 virtual void check_and_handle_popframe() {} 217 virtual void check_and_handle_earlyret() {} 218 219 // By default, we do not need relocation information for non 220 // patchable absolute addresses. However, when needed by some 221 // extensions, ignore_non_patchable_relocations can be modified, 222 // returning false to preserve all relocation information. 223 inline bool ignore_non_patchable_relocations() { return true; } 224 225 // Initially added to the Assembler interface as a pure virtual: 226 // RegisterConstant delayed_value(..) 227 // for: 228 // 6812678 macro assembler needs delayed binding of a few constants (for 6655638) 229 // this was subsequently modified to its present name and return type 230 virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr, Register tmp, int offset); 231 232 233 void align(int modulus); 234 235 // Support for VM calls 236 // 237 // It is imperative that all calls into the VM are handled via the call_VM methods. 238 // They make sure that the stack linkage is setup correctly. call_VM's correspond 239 // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points. 240 241 void call_VM(Register oop_result, address entry_point, bool check_exceptions = true); 242 void call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions = true); 243 void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true); 244 void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true); 245 246 // The following methods are required by templateTable.cpp, 247 // but not used on ARM. 248 void call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true); 249 void call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true); 250 void call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true); 251 void call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true); 252 253 // Note: The super_call_VM calls are not used on ARM 254 255 // Raw call, without saving/restoring registers, exception handling, etc. 256 // Mainly used from various stubs. 257 // Note: if 'save_R9_if_scratched' is true, call_VM may on some 258 // platforms save values on the stack. Set it to false (and handle 259 // R9 in the callers) if the top of the stack must not be modified 260 // by call_VM. 261 void call_VM(address entry_point, bool save_R9_if_scratched); 262 263 void call_VM_leaf(address entry_point); 264 void call_VM_leaf(address entry_point, Register arg_1); 265 void call_VM_leaf(address entry_point, Register arg_1, Register arg_2); 266 void call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3); 267 void call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4); 268 269 void get_vm_result(Register oop_result, Register tmp); 270 void get_vm_result_2(Register metadata_result, Register tmp); 271 272 // Always sets/resets sp, which default to SP if (last_sp == noreg) 273 // Optionally sets/resets fp (use noreg to avoid setting it) 274 // Optionally sets/resets pc depending on save_last_java_pc flag 275 // Note: when saving PC, set_last_Java_frame returns PC's offset in the code section 276 // (for oop_maps offset computation) 277 int set_last_Java_frame(Register last_sp, Register last_fp, bool save_last_java_pc, Register tmp); 278 void reset_last_Java_frame(Register tmp); 279 // status set in set_last_Java_frame for reset_last_Java_frame 280 bool _fp_saved; 281 bool _pc_saved; 282 283 #ifdef PRODUCT 284 #define BLOCK_COMMENT(str) /* nothing */ 285 #define STOP(error) __ stop(error) 286 #else 287 #define BLOCK_COMMENT(str) __ block_comment(str) 288 #define STOP(error) __ block_comment(error); __ stop(error) 289 #endif 290 291 void lookup_virtual_method(Register recv_klass, 292 Register vtable_index, 293 Register method_result); 294 295 // Test sub_klass against super_klass, with fast and slow paths. 296 297 // The fast path produces a tri-state answer: yes / no / maybe-slow. 298 // One of the three labels can be NULL, meaning take the fall-through. 299 // No registers are killed, except temp_regs. 300 void check_klass_subtype_fast_path(Register sub_klass, 301 Register super_klass, 302 Register temp_reg, 303 Register temp_reg2, 304 Label* L_success, 305 Label* L_failure, 306 Label* L_slow_path); 307 308 // The rest of the type check; must be wired to a corresponding fast path. 309 // It does not repeat the fast path logic, so don't use it standalone. 310 // temp_reg3 can be noreg, if no temps are available. 311 // Updates the sub's secondary super cache as necessary. 312 // If set_cond_codes: 313 // - condition codes will be Z on success, NZ on failure. 314 // - temp_reg will be 0 on success, non-0 on failure 315 void check_klass_subtype_slow_path(Register sub_klass, 316 Register super_klass, 317 Register temp_reg, 318 Register temp_reg2, 319 Register temp_reg3, // auto assigned if noreg 320 Label* L_success, 321 Label* L_failure, 322 bool set_cond_codes = false); 323 324 // Simplified, combined version, good for typical uses. 325 // temp_reg3 can be noreg, if no temps are available. It is used only on slow path. 326 // Falls through on failure. 327 void check_klass_subtype(Register sub_klass, 328 Register super_klass, 329 Register temp_reg, 330 Register temp_reg2, 331 Register temp_reg3, // auto assigned on slow path if noreg 332 Label& L_success); 333 334 // Returns address of receiver parameter, using tmp as base register. tmp and params_count can be the same. 335 Address receiver_argument_address(Register params_base, Register params_count, Register tmp); 336 337 void _verify_oop(Register reg, const char* s, const char* file, int line); 338 void _verify_oop_addr(Address addr, const char * s, const char* file, int line); 339 340 // TODO: verify method and klass metadata (compare against vptr?) 341 void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {} 342 void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line) {} 343 344 #define verify_oop(reg) _verify_oop(reg, "broken oop " #reg, __FILE__, __LINE__) 345 #define verify_oop_addr(addr) _verify_oop_addr(addr, "broken oop ", __FILE__, __LINE__) 346 #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__) 347 #define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__) 348 349 void null_check(Register reg, Register tmp, int offset = -1); 350 inline void null_check(Register reg) { null_check(reg, noreg, -1); } // for C1 lir_null_check 351 352 // Puts address of allocated object into register `obj` and end of allocated object into register `obj_end`. 353 void eden_allocate(Register obj, Register obj_end, Register tmp1, Register tmp2, 354 RegisterOrConstant size_expression, Label& slow_case); 355 void tlab_allocate(Register obj, Register obj_end, Register tmp1, 356 RegisterOrConstant size_expression, Label& slow_case); 357 358 void zero_memory(Register start, Register end, Register tmp); 359 360 static bool needs_explicit_null_check(intptr_t offset); 361 362 void arm_stack_overflow_check(int frame_size_in_bytes, Register tmp); 363 void arm_stack_overflow_check(Register Rsize, Register tmp); 364 365 void bang_stack_with_offset(int offset) { 366 ShouldNotReachHere(); 367 } 368 369 // Biased locking support 370 // lock_reg and obj_reg must be loaded up with the appropriate values. 371 // swap_reg must be supplied. 372 // tmp_reg must be supplied. 373 // Optional slow case is for implementations (interpreter and C1) which branch to 374 // slow case directly. If slow_case is NULL, then leaves condition 375 // codes set (for C2's Fast_Lock node) and jumps to done label. 376 // Falls through for the fast locking attempt. 377 // Returns offset of first potentially-faulting instruction for null 378 // check info (currently consumed only by C1). If 379 // swap_reg_contains_mark is true then returns -1 as it is assumed 380 // the calling code has already passed any potential faults. 381 // Notes: 382 // - swap_reg and tmp_reg are scratched 383 // - Rtemp was (implicitly) scratched and can now be specified as the tmp2 384 int biased_locking_enter(Register obj_reg, Register swap_reg, Register tmp_reg, 385 bool swap_reg_contains_mark, 386 Register tmp2, 387 Label& done, Label& slow_case, 388 BiasedLockingCounters* counters = NULL); 389 void biased_locking_exit(Register obj_reg, Register temp_reg, Label& done); 390 391 // Building block for CAS cases of biased locking: makes CAS and records statistics. 392 // Optional slow_case label is used to transfer control if CAS fails. Otherwise leaves condition codes set. 393 void biased_locking_enter_with_cas(Register obj_reg, Register old_mark_reg, Register new_mark_reg, 394 Register tmp, Label& slow_case, int* counter_addr); 395 396 void resolve_jobject(Register value, Register tmp1, Register tmp2); 397 398 void nop() { 399 mov(R0, R0); 400 } 401 402 void push(Register rd, AsmCondition cond = al) { 403 assert(rd != SP, "unpredictable instruction"); 404 str(rd, Address(SP, -wordSize, pre_indexed), cond); 405 } 406 407 void push(RegisterSet reg_set, AsmCondition cond = al) { 408 assert(!reg_set.contains(SP), "unpredictable instruction"); 409 stmdb(SP, reg_set, writeback, cond); 410 } 411 412 void pop(Register rd, AsmCondition cond = al) { 413 assert(rd != SP, "unpredictable instruction"); 414 ldr(rd, Address(SP, wordSize, post_indexed), cond); 415 } 416 417 void pop(RegisterSet reg_set, AsmCondition cond = al) { 418 assert(!reg_set.contains(SP), "unpredictable instruction"); 419 ldmia(SP, reg_set, writeback, cond); 420 } 421 422 void fpushd(FloatRegister fd, AsmCondition cond = al) { 423 fstmdbd(SP, FloatRegisterSet(fd), writeback, cond); 424 } 425 426 void fpushs(FloatRegister fd, AsmCondition cond = al) { 427 fstmdbs(SP, FloatRegisterSet(fd), writeback, cond); 428 } 429 430 void fpopd(FloatRegister fd, AsmCondition cond = al) { 431 fldmiad(SP, FloatRegisterSet(fd), writeback, cond); 432 } 433 434 void fpops(FloatRegister fd, AsmCondition cond = al) { 435 fldmias(SP, FloatRegisterSet(fd), writeback, cond); 436 } 437 438 // Order access primitives 439 enum Membar_mask_bits { 440 StoreStore = 1 << 3, 441 LoadStore = 1 << 2, 442 StoreLoad = 1 << 1, 443 LoadLoad = 1 << 0 444 }; 445 446 void membar(Membar_mask_bits mask, 447 Register tmp, 448 bool preserve_flags = true, 449 Register load_tgt = noreg); 450 451 void breakpoint(AsmCondition cond = al); 452 void stop(const char* msg); 453 // prints msg and continues 454 void warn(const char* msg); 455 void unimplemented(const char* what = ""); 456 void should_not_reach_here() { stop("should not reach here"); } 457 static void debug(const char* msg, const intx* registers); 458 459 // Create a walkable frame to help tracking down who called this code. 460 // Returns the frame size in words. 461 int should_not_call_this() { 462 raw_push(FP, LR); 463 should_not_reach_here(); 464 flush(); 465 return 2; // frame_size_in_words (FP+LR) 466 } 467 468 int save_all_registers(); 469 void restore_all_registers(); 470 int save_caller_save_registers(); 471 void restore_caller_save_registers(); 472 473 void add_rc(Register dst, Register arg1, RegisterOrConstant arg2); 474 475 // add_slow and mov_slow are used to manipulate offsets larger than 1024, 476 // these functions are not expected to handle all possible constants, 477 // only those that can really occur during compilation 478 void add_slow(Register rd, Register rn, int c); 479 void sub_slow(Register rd, Register rn, int c); 480 481 482 void mov_slow(Register rd, intptr_t c, AsmCondition cond = al); 483 void mov_slow(Register rd, const char *string); 484 void mov_slow(Register rd, address addr); 485 486 void patchable_mov_oop(Register rd, jobject o, int oop_index) { 487 mov_oop(rd, o, oop_index); 488 } 489 void mov_oop(Register rd, jobject o, int index = 0, AsmCondition cond = al); 490 491 void patchable_mov_metadata(Register rd, Metadata* o, int index) { 492 mov_metadata(rd, o, index); 493 } 494 void mov_metadata(Register rd, Metadata* o, int index = 0); 495 496 void mov_float(FloatRegister fd, jfloat c, AsmCondition cond = al); 497 void mov_double(FloatRegister fd, jdouble c, AsmCondition cond = al); 498 499 500 // Note: this variant of mov_address assumes the address moves with 501 // the code. Do *not* implement it with non-relocated instructions, 502 // unless PC-relative. 503 void mov_relative_address(Register rd, address addr, AsmCondition cond = al) { 504 int offset = addr - pc() - 8; 505 assert((offset & 3) == 0, "bad alignment"); 506 if (offset >= 0) { 507 assert(AsmOperand::is_rotated_imm(offset), "addr too far"); 508 add(rd, PC, offset, cond); 509 } else { 510 assert(AsmOperand::is_rotated_imm(-offset), "addr too far"); 511 sub(rd, PC, -offset, cond); 512 } 513 } 514 515 // Runtime address that may vary from one execution to another. The 516 // symbolic_reference describes what the address is, allowing 517 // the address to be resolved in a different execution context. 518 // Warning: do not implement as a PC relative address. 519 void mov_address(Register rd, address addr, symbolic_Relocation::symbolic_reference t) { 520 mov_address(rd, addr, RelocationHolder::none); 521 } 522 523 // rspec can be RelocationHolder::none (for ignored symbolic_Relocation). 524 // In that case, the address is absolute and the generated code need 525 // not be relocable. 526 void mov_address(Register rd, address addr, RelocationHolder const& rspec) { 527 assert(rspec.type() != relocInfo::runtime_call_type, "do not use mov_address for runtime calls"); 528 assert(rspec.type() != relocInfo::static_call_type, "do not use mov_address for relocable calls"); 529 if (rspec.type() == relocInfo::none) { 530 // absolute address, relocation not needed 531 mov_slow(rd, (intptr_t)addr); 532 return; 533 } 534 if (VM_Version::supports_movw()) { 535 relocate(rspec); 536 int c = (int)addr; 537 movw(rd, c & 0xffff); 538 if ((unsigned int)c >> 16) { 539 movt(rd, (unsigned int)c >> 16); 540 } 541 return; 542 } 543 Label skip_literal; 544 InlinedAddress addr_literal(addr, rspec); 545 ldr_literal(rd, addr_literal); 546 b(skip_literal); 547 bind_literal(addr_literal); 548 bind(skip_literal); 549 } 550 551 // Note: Do not define mov_address for a Label 552 // 553 // Load from addresses potentially within the code are now handled 554 // InlinedLiteral subclasses (to allow more flexibility on how the 555 // ldr_literal is performed). 556 557 void ldr_literal(Register rd, InlinedAddress& L) { 558 assert(L.rspec().type() != relocInfo::runtime_call_type, "avoid ldr_literal for calls"); 559 assert(L.rspec().type() != relocInfo::static_call_type, "avoid ldr_literal for calls"); 560 relocate(L.rspec()); 561 ldr(rd, Address(PC, target(L.label) - pc() - 8)); 562 } 563 564 void ldr_literal(Register rd, InlinedString& L) { 565 const char* msg = L.msg(); 566 if (code()->consts()->contains((address)msg)) { 567 // string address moves with the code 568 ldr(rd, Address(PC, ((address)msg) - pc() - 8)); 569 return; 570 } 571 // Warning: use external strings with care. They are not relocated 572 // if the code moves. If needed, use code_string to move them 573 // to the consts section. 574 ldr(rd, Address(PC, target(L.label) - pc() - 8)); 575 } 576 577 void ldr_literal(Register rd, InlinedMetadata& L) { 578 // relocation done in the bind_literal for metadatas 579 ldr(rd, Address(PC, target(L.label) - pc() - 8)); 580 } 581 582 void bind_literal(InlinedAddress& L) { 583 bind(L.label); 584 assert(L.rspec().type() != relocInfo::metadata_type, "Must use InlinedMetadata"); 585 // We currently do not use oop 'bound' literals. 586 // If the code evolves and the following assert is triggered, 587 // we need to implement InlinedOop (see InlinedMetadata). 588 assert(L.rspec().type() != relocInfo::oop_type, "Inlined oops not supported"); 589 // Note: relocation is handled by relocate calls in ldr_literal 590 AbstractAssembler::emit_address((address)L.target()); 591 } 592 593 void bind_literal(InlinedString& L) { 594 const char* msg = L.msg(); 595 if (code()->consts()->contains((address)msg)) { 596 // The Label should not be used; avoid binding it 597 // to detect errors. 598 return; 599 } 600 bind(L.label); 601 AbstractAssembler::emit_address((address)L.msg()); 602 } 603 604 void bind_literal(InlinedMetadata& L) { 605 bind(L.label); 606 relocate(metadata_Relocation::spec_for_immediate()); 607 AbstractAssembler::emit_address((address)L.data()); 608 } 609 610 void resolve_oop_handle(Register result); 611 void load_mirror(Register mirror, Register method, Register tmp); 612 613 #define ARM_INSTR_1(common_mnemonic, arm32_mnemonic, arg_type) \ 614 void common_mnemonic(arg_type arg) { \ 615 arm32_mnemonic(arg); \ 616 } 617 618 #define ARM_INSTR_2(common_mnemonic, arm32_mnemonic, arg1_type, arg2_type) \ 619 void common_mnemonic(arg1_type arg1, arg2_type arg2) { \ 620 arm32_mnemonic(arg1, arg2); \ 621 } 622 623 #define ARM_INSTR_3(common_mnemonic, arm32_mnemonic, arg1_type, arg2_type, arg3_type) \ 624 void common_mnemonic(arg1_type arg1, arg2_type arg2, arg3_type arg3) { \ 625 arm32_mnemonic(arg1, arg2, arg3); \ 626 } 627 628 ARM_INSTR_1(jump, bx, Register) 629 ARM_INSTR_1(call, blx, Register) 630 631 ARM_INSTR_2(cbz_32, cbz, Register, Label&) 632 ARM_INSTR_2(cbnz_32, cbnz, Register, Label&) 633 634 ARM_INSTR_2(ldr_u32, ldr, Register, Address) 635 ARM_INSTR_2(ldr_s32, ldr, Register, Address) 636 ARM_INSTR_2(str_32, str, Register, Address) 637 638 ARM_INSTR_2(mvn_32, mvn, Register, Register) 639 ARM_INSTR_2(cmp_32, cmp, Register, Register) 640 ARM_INSTR_2(neg_32, neg, Register, Register) 641 ARM_INSTR_2(clz_32, clz, Register, Register) 642 ARM_INSTR_2(rbit_32, rbit, Register, Register) 643 644 ARM_INSTR_2(cmp_32, cmp, Register, int) 645 ARM_INSTR_2(cmn_32, cmn, Register, int) 646 647 ARM_INSTR_3(add_32, add, Register, Register, Register) 648 ARM_INSTR_3(sub_32, sub, Register, Register, Register) 649 ARM_INSTR_3(subs_32, subs, Register, Register, Register) 650 ARM_INSTR_3(mul_32, mul, Register, Register, Register) 651 ARM_INSTR_3(and_32, andr, Register, Register, Register) 652 ARM_INSTR_3(orr_32, orr, Register, Register, Register) 653 ARM_INSTR_3(eor_32, eor, Register, Register, Register) 654 655 ARM_INSTR_3(add_32, add, Register, Register, AsmOperand) 656 ARM_INSTR_3(sub_32, sub, Register, Register, AsmOperand) 657 ARM_INSTR_3(orr_32, orr, Register, Register, AsmOperand) 658 ARM_INSTR_3(eor_32, eor, Register, Register, AsmOperand) 659 ARM_INSTR_3(and_32, andr, Register, Register, AsmOperand) 660 661 662 ARM_INSTR_3(add_32, add, Register, Register, int) 663 ARM_INSTR_3(adds_32, adds, Register, Register, int) 664 ARM_INSTR_3(sub_32, sub, Register, Register, int) 665 ARM_INSTR_3(subs_32, subs, Register, Register, int) 666 667 ARM_INSTR_2(tst_32, tst, Register, unsigned int) 668 ARM_INSTR_2(tst_32, tst, Register, AsmOperand) 669 670 ARM_INSTR_3(and_32, andr, Register, Register, uint) 671 ARM_INSTR_3(orr_32, orr, Register, Register, uint) 672 ARM_INSTR_3(eor_32, eor, Register, Register, uint) 673 674 ARM_INSTR_1(cmp_zero_float, fcmpzs, FloatRegister) 675 ARM_INSTR_1(cmp_zero_double, fcmpzd, FloatRegister) 676 677 ARM_INSTR_2(ldr_float, flds, FloatRegister, Address) 678 ARM_INSTR_2(str_float, fsts, FloatRegister, Address) 679 ARM_INSTR_2(mov_float, fcpys, FloatRegister, FloatRegister) 680 ARM_INSTR_2(neg_float, fnegs, FloatRegister, FloatRegister) 681 ARM_INSTR_2(abs_float, fabss, FloatRegister, FloatRegister) 682 ARM_INSTR_2(sqrt_float, fsqrts, FloatRegister, FloatRegister) 683 ARM_INSTR_2(cmp_float, fcmps, FloatRegister, FloatRegister) 684 685 ARM_INSTR_3(add_float, fadds, FloatRegister, FloatRegister, FloatRegister) 686 ARM_INSTR_3(sub_float, fsubs, FloatRegister, FloatRegister, FloatRegister) 687 ARM_INSTR_3(mul_float, fmuls, FloatRegister, FloatRegister, FloatRegister) 688 ARM_INSTR_3(div_float, fdivs, FloatRegister, FloatRegister, FloatRegister) 689 690 ARM_INSTR_2(ldr_double, fldd, FloatRegister, Address) 691 ARM_INSTR_2(str_double, fstd, FloatRegister, Address) 692 ARM_INSTR_2(mov_double, fcpyd, FloatRegister, FloatRegister) 693 ARM_INSTR_2(neg_double, fnegd, FloatRegister, FloatRegister) 694 ARM_INSTR_2(cmp_double, fcmpd, FloatRegister, FloatRegister) 695 ARM_INSTR_2(abs_double, fabsd, FloatRegister, FloatRegister) 696 ARM_INSTR_2(sqrt_double, fsqrtd, FloatRegister, FloatRegister) 697 698 ARM_INSTR_3(add_double, faddd, FloatRegister, FloatRegister, FloatRegister) 699 ARM_INSTR_3(sub_double, fsubd, FloatRegister, FloatRegister, FloatRegister) 700 ARM_INSTR_3(mul_double, fmuld, FloatRegister, FloatRegister, FloatRegister) 701 ARM_INSTR_3(div_double, fdivd, FloatRegister, FloatRegister, FloatRegister) 702 703 ARM_INSTR_2(convert_f2d, fcvtds, FloatRegister, FloatRegister) 704 ARM_INSTR_2(convert_d2f, fcvtsd, FloatRegister, FloatRegister) 705 706 ARM_INSTR_2(mov_fpr2gpr_float, fmrs, Register, FloatRegister) 707 708 #undef ARM_INSTR_1 709 #undef ARM_INSTR_2 710 #undef ARM_INSTR_3 711 712 713 714 void tbz(Register rt, int bit, Label& L) { 715 assert(0 <= bit && bit < BitsPerWord, "bit number is out of range"); 716 tst(rt, 1 << bit); 717 b(L, eq); 718 } 719 720 void tbnz(Register rt, int bit, Label& L) { 721 assert(0 <= bit && bit < BitsPerWord, "bit number is out of range"); 722 tst(rt, 1 << bit); 723 b(L, ne); 724 } 725 726 void cbz(Register rt, Label& L) { 727 cmp(rt, 0); 728 b(L, eq); 729 } 730 731 void cbz(Register rt, address target) { 732 cmp(rt, 0); 733 b(target, eq); 734 } 735 736 void cbnz(Register rt, Label& L) { 737 cmp(rt, 0); 738 b(L, ne); 739 } 740 741 void ret(Register dst = LR) { 742 bx(dst); 743 } 744 745 746 Register zero_register(Register tmp) { 747 mov(tmp, 0); 748 return tmp; 749 } 750 751 void logical_shift_left(Register dst, Register src, int shift) { 752 mov(dst, AsmOperand(src, lsl, shift)); 753 } 754 755 void logical_shift_left_32(Register dst, Register src, int shift) { 756 mov(dst, AsmOperand(src, lsl, shift)); 757 } 758 759 void logical_shift_right(Register dst, Register src, int shift) { 760 mov(dst, AsmOperand(src, lsr, shift)); 761 } 762 763 void arith_shift_right(Register dst, Register src, int shift) { 764 mov(dst, AsmOperand(src, asr, shift)); 765 } 766 767 void asr_32(Register dst, Register src, int shift) { 768 mov(dst, AsmOperand(src, asr, shift)); 769 } 770 771 // If <cond> holds, compares r1 and r2. Otherwise, flags are set so that <cond> does not hold. 772 void cond_cmp(Register r1, Register r2, AsmCondition cond) { 773 cmp(r1, r2, cond); 774 } 775 776 // If <cond> holds, compares r and imm. Otherwise, flags are set so that <cond> does not hold. 777 void cond_cmp(Register r, int imm, AsmCondition cond) { 778 cmp(r, imm, cond); 779 } 780 781 void align_reg(Register dst, Register src, int align) { 782 assert (is_power_of_2(align), "should be"); 783 bic(dst, src, align-1); 784 } 785 786 void prefetch_read(Address addr) { 787 pld(addr); 788 } 789 790 void raw_push(Register r1, Register r2) { 791 assert(r1->encoding() < r2->encoding(), "should be ordered"); 792 push(RegisterSet(r1) | RegisterSet(r2)); 793 } 794 795 void raw_pop(Register r1, Register r2) { 796 assert(r1->encoding() < r2->encoding(), "should be ordered"); 797 pop(RegisterSet(r1) | RegisterSet(r2)); 798 } 799 800 void raw_push(Register r1, Register r2, Register r3) { 801 assert(r1->encoding() < r2->encoding() && r2->encoding() < r3->encoding(), "should be ordered"); 802 push(RegisterSet(r1) | RegisterSet(r2) | RegisterSet(r3)); 803 } 804 805 void raw_pop(Register r1, Register r2, Register r3) { 806 assert(r1->encoding() < r2->encoding() && r2->encoding() < r3->encoding(), "should be ordered"); 807 pop(RegisterSet(r1) | RegisterSet(r2) | RegisterSet(r3)); 808 } 809 810 // Restores registers r1 and r2 previously saved by raw_push(r1, r2, ret_addr) and returns by ret_addr. Clobbers LR. 811 void raw_pop_and_ret(Register r1, Register r2) { 812 raw_pop(r1, r2, PC); 813 } 814 815 void indirect_jump(Address addr, Register scratch) { 816 ldr(PC, addr); 817 } 818 819 void indirect_jump(InlinedAddress& literal, Register scratch) { 820 ldr_literal(PC, literal); 821 } 822 823 void neg(Register dst, Register src) { 824 rsb(dst, src, 0); 825 } 826 827 void branch_if_negative_32(Register r, Label& L) { 828 // TODO: This function and branch_if_any_negative_32 could possibly 829 // be revised after the aarch64 removal. 830 // tbnz is not used instead of tst & b.mi because destination may be out of tbnz range (+-32KB) 831 // since these methods are used in LIR_Assembler::emit_arraycopy() to jump to stub entry. 832 tst_32(r, r); 833 b(L, mi); 834 } 835 836 void branch_if_any_negative_32(Register r1, Register r2, Register tmp, Label& L) { 837 orrs(tmp, r1, r2); 838 b(L, mi); 839 } 840 841 void branch_if_any_negative_32(Register r1, Register r2, Register r3, Register tmp, Label& L) { 842 orr_32(tmp, r1, r2); 843 orrs(tmp, tmp, r3); 844 b(L, mi); 845 } 846 847 void add_ptr_scaled_int32(Register dst, Register r1, Register r2, int shift) { 848 add(dst, r1, AsmOperand(r2, lsl, shift)); 849 } 850 851 void sub_ptr_scaled_int32(Register dst, Register r1, Register r2, int shift) { 852 sub(dst, r1, AsmOperand(r2, lsl, shift)); 853 } 854 855 856 // klass oop manipulations if compressed 857 858 void load_klass(Register dst_klass, Register src_oop, AsmCondition cond = al); 859 860 void store_klass(Register src_klass, Register dst_oop); 861 862 863 // oop manipulations 864 865 void load_heap_oop(Register dst, Address src, Register tmp1 = noreg, Register tmp2 = noreg, Register tmp3 = noreg, DecoratorSet decorators = 0); 866 void store_heap_oop(Address obj, Register new_val, Register tmp1 = noreg, Register tmp2 = noreg, Register tmp3 = noreg, DecoratorSet decorators = 0); 867 void store_heap_oop_null(Address obj, Register new_val, Register tmp1 = noreg, Register tmp2 = noreg, Register tmp3 = noreg, DecoratorSet decorators = 0); 868 869 void access_load_at(BasicType type, DecoratorSet decorators, Address src, Register dst, Register tmp1, Register tmp2, Register tmp3); 870 void access_store_at(BasicType type, DecoratorSet decorators, Address obj, Register new_val, Register tmp1, Register tmp2, Register tmp3, bool is_null); 871 872 // Resolves obj for access. Result is placed in the same register. 873 // All other registers are preserved. 874 void resolve(DecoratorSet decorators, Register obj); 875 876 877 void ldr_global_ptr(Register reg, address address_of_global); 878 void ldr_global_s32(Register reg, address address_of_global); 879 void ldrb_global(Register reg, address address_of_global); 880 881 // address_placeholder_instruction is invalid instruction and is used 882 // as placeholder in code for address of label 883 enum { address_placeholder_instruction = 0xFFFFFFFF }; 884 885 void emit_address(Label& L) { 886 assert(!L.is_bound(), "otherwise address will not be patched"); 887 target(L); // creates relocation which will be patched later 888 889 assert ((offset() & (wordSize-1)) == 0, "should be aligned by word size"); 890 891 AbstractAssembler::emit_address((address)address_placeholder_instruction); 892 } 893 894 void b(address target, AsmCondition cond = al) { 895 Assembler::b(target, cond); \ 896 } 897 void b(Label& L, AsmCondition cond = al) { 898 // internal jumps 899 Assembler::b(target(L), cond); 900 } 901 902 void bl(address target, AsmCondition cond = al) { 903 Assembler::bl(target, cond); 904 } 905 void bl(Label& L, AsmCondition cond = al) { 906 // internal calls 907 Assembler::bl(target(L), cond); 908 } 909 910 void adr(Register dest, Label& L, AsmCondition cond = al) { 911 int delta = target(L) - pc() - 8; 912 if (delta >= 0) { 913 add(dest, PC, delta, cond); 914 } else { 915 sub(dest, PC, -delta, cond); 916 } 917 } 918 919 // Variable-length jump and calls. We now distinguish only the 920 // patchable case from the other cases. Patchable must be 921 // distinguised from relocable. Relocable means the generated code 922 // containing the jump/call may move. Patchable means that the 923 // targeted address may be changed later. 924 925 // Non patchable versions. 926 // - used only for relocInfo::runtime_call_type and relocInfo::none 927 // - may use relative or absolute format (do not use relocInfo::none 928 // if the generated code may move) 929 // - the implementation takes into account switch to THUMB mode if the 930 // destination is a THUMB address 931 // - the implementation supports far targets 932 // 933 // To reduce regression risk, scratch still defaults to noreg on 934 // arm32. This results in patchable instructions. However, if 935 // patching really matters, the call sites should be modified and 936 // use patchable_call or patchable_jump. If patching is not required 937 // and if a register can be cloberred, it should be explicitly 938 // specified to allow future optimizations. 939 void jump(address target, 940 relocInfo::relocType rtype = relocInfo::runtime_call_type, 941 Register scratch = noreg, AsmCondition cond = al); 942 943 void call(address target, 944 RelocationHolder rspec, AsmCondition cond = al); 945 946 void call(address target, 947 relocInfo::relocType rtype = relocInfo::runtime_call_type, 948 AsmCondition cond = al) { 949 call(target, Relocation::spec_simple(rtype), cond); 950 } 951 952 void jump(AddressLiteral dest) { 953 jump(dest.target(), dest.reloc()); 954 } 955 void jump(address dest, relocInfo::relocType rtype, AsmCondition cond) { 956 jump(dest, rtype, Rtemp, cond); 957 } 958 959 void call(AddressLiteral dest) { 960 call(dest.target(), dest.reloc()); 961 } 962 963 // Patchable version: 964 // - set_destination can be used to atomically change the target 965 // 966 // The targets for patchable_jump and patchable_call must be in the 967 // code cache. 968 // [ including possible extensions of the code cache, like AOT code ] 969 // 970 // To reduce regression risk, scratch still defaults to noreg on 971 // arm32. If a register can be cloberred, it should be explicitly 972 // specified to allow future optimizations. 973 void patchable_jump(address target, 974 relocInfo::relocType rtype = relocInfo::runtime_call_type, 975 Register scratch = noreg, AsmCondition cond = al 976 ); 977 978 // patchable_call may scratch Rtemp 979 int patchable_call(address target, 980 RelocationHolder const& rspec, 981 bool c2 = false); 982 983 int patchable_call(address target, 984 relocInfo::relocType rtype, 985 bool c2 = false) { 986 return patchable_call(target, Relocation::spec_simple(rtype), c2); 987 } 988 989 990 static bool _reachable_from_cache(address target); 991 static bool _cache_fully_reachable(); 992 bool cache_fully_reachable(); 993 bool reachable_from_cache(address target); 994 995 void zero_extend(Register rd, Register rn, int bits); 996 void sign_extend(Register rd, Register rn, int bits); 997 998 inline void zap_high_non_significant_bits(Register r) { 999 } 1000 1001 void cmpoop(Register obj1, Register obj2); 1002 1003 void long_move(Register rd_lo, Register rd_hi, 1004 Register rn_lo, Register rn_hi, 1005 AsmCondition cond = al); 1006 void long_shift(Register rd_lo, Register rd_hi, 1007 Register rn_lo, Register rn_hi, 1008 AsmShift shift, Register count); 1009 void long_shift(Register rd_lo, Register rd_hi, 1010 Register rn_lo, Register rn_hi, 1011 AsmShift shift, int count); 1012 1013 void atomic_cas(Register tmpreg1, Register tmpreg2, Register oldval, Register newval, Register base, int offset); 1014 void atomic_cas_bool(Register oldval, Register newval, Register base, int offset, Register tmpreg); 1015 void atomic_cas64(Register temp_lo, Register temp_hi, Register temp_result, Register oldval_lo, Register oldval_hi, Register newval_lo, Register newval_hi, Register base, int offset); 1016 1017 void cas_for_lock_acquire(Register oldval, Register newval, Register base, Register tmp, Label &slow_case, bool allow_fallthrough_on_failure = false, bool one_shot = false); 1018 void cas_for_lock_release(Register oldval, Register newval, Register base, Register tmp, Label &slow_case, bool allow_fallthrough_on_failure = false, bool one_shot = false); 1019 1020 #ifndef PRODUCT 1021 // Preserves flags and all registers. 1022 // On SMP the updated value might not be visible to external observers without a sychronization barrier 1023 void cond_atomic_inc32(AsmCondition cond, int* counter_addr); 1024 #endif // !PRODUCT 1025 1026 // unconditional non-atomic increment 1027 void inc_counter(address counter_addr, Register tmpreg1, Register tmpreg2); 1028 void inc_counter(int* counter_addr, Register tmpreg1, Register tmpreg2) { 1029 inc_counter((address) counter_addr, tmpreg1, tmpreg2); 1030 } 1031 1032 void pd_patch_instruction(address branch, address target, const char* file, int line); 1033 1034 // Loading and storing values by size and signed-ness; 1035 // size must not exceed wordSize (i.e. 8-byte values are not supported on 32-bit ARM); 1036 // each of these calls generates exactly one load or store instruction, 1037 // so src can be pre- or post-indexed address. 1038 // 32-bit ARM variants also support conditional execution 1039 void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, AsmCondition cond = al); 1040 void store_sized_value(Register src, Address dst, size_t size_in_bytes, AsmCondition cond = al); 1041 1042 void lookup_interface_method(Register recv_klass, 1043 Register intf_klass, 1044 RegisterOrConstant itable_index, 1045 Register method_result, 1046 Register temp_reg1, 1047 Register temp_reg2, 1048 Label& L_no_such_interface); 1049 1050 // Compare char[] arrays aligned to 4 bytes. 1051 void char_arrays_equals(Register ary1, Register ary2, 1052 Register limit, Register result, 1053 Register chr1, Register chr2, Label& Ldone); 1054 1055 1056 void floating_cmp(Register dst); 1057 1058 // improved x86 portability (minimizing source code changes) 1059 1060 void ldr_literal(Register rd, AddressLiteral addr) { 1061 relocate(addr.rspec()); 1062 ldr(rd, Address(PC, addr.target() - pc() - 8)); 1063 } 1064 1065 void lea(Register Rd, AddressLiteral addr) { 1066 // Never dereferenced, as on x86 (lval status ignored) 1067 mov_address(Rd, addr.target(), addr.rspec()); 1068 } 1069 1070 void restore_default_fp_mode(); 1071 1072 #ifdef COMPILER2 1073 void fast_lock(Register obj, Register box, Register scratch, Register scratch2); 1074 void fast_unlock(Register obj, Register box, Register scratch, Register scratch2); 1075 #endif 1076 1077 1078 }; 1079 1080 1081 // The purpose of this class is to build several code fragments of the same size 1082 // in order to allow fast table branch. 1083 1084 class FixedSizeCodeBlock { 1085 public: 1086 FixedSizeCodeBlock(MacroAssembler* masm, int size_in_instrs, bool enabled); 1087 ~FixedSizeCodeBlock(); 1088 1089 private: 1090 MacroAssembler* _masm; 1091 address _start; 1092 int _size_in_instrs; 1093 bool _enabled; 1094 }; 1095 1096 1097 #endif // CPU_ARM_VM_MACROASSEMBLER_ARM_HPP 1098