1 /* 2 * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef CPU_X86_VM_MACROASSEMBLER_X86_HPP 26 #define CPU_X86_VM_MACROASSEMBLER_X86_HPP 27 28 #include "asm/assembler.hpp" 29 #include "utilities/macros.hpp" 30 #include "runtime/rtmLocking.hpp" 31 32 // MacroAssembler extends Assembler by frequently used macros. 33 // 34 // Instructions for which a 'better' code sequence exists depending 35 // on arguments should also go in here. 36 37 class MacroAssembler: public Assembler { 38 friend class LIR_Assembler; 39 friend class Runtime1; // as_Address() 40 41 protected: 42 43 Address as_Address(AddressLiteral adr); 44 Address as_Address(ArrayAddress adr); 45 46 // Support for VM calls 47 // 48 // This is the base routine called by the different versions of call_VM_leaf. The interpreter 49 // may customize this version by overriding it for its purposes (e.g., to save/restore 50 // additional registers when doing a VM call). 51 #define COMMA , 52 53 virtual void call_VM_leaf_base( 54 address entry_point, // the entry point 55 int number_of_arguments // the number of arguments to pop after the call 56 ); 57 58 // This is the base routine called by the different versions of call_VM. The interpreter 59 // may customize this version by overriding it for its purposes (e.g., to save/restore 60 // additional registers when doing a VM call). 61 // 62 // If no java_thread register is specified (noreg) than rdi will be used instead. call_VM_base 63 // returns the register which contains the thread upon return. If a thread register has been 64 // specified, the return value will correspond to that register. If no last_java_sp is specified 65 // (noreg) than rsp will be used instead. 66 virtual void call_VM_base( // returns the register containing the thread upon return 67 Register oop_result, // where an oop-result ends up if any; use noreg otherwise 68 Register java_thread, // the thread if computed before ; use noreg otherwise 69 Register last_java_sp, // to set up last_Java_frame in stubs; use noreg otherwise 70 address entry_point, // the entry point 71 int number_of_arguments, // the number of arguments (w/o thread) to pop after the call 72 bool check_exceptions // whether to check for pending exceptions after return 73 ); 74 75 // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code. 76 // The implementation is only non-empty for the InterpreterMacroAssembler, 77 // as only the interpreter handles PopFrame and ForceEarlyReturn requests. 78 virtual void check_and_handle_popframe(Register java_thread); 79 virtual void check_and_handle_earlyret(Register java_thread); 80 81 void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true); 82 83 // helpers for FPU flag access 84 // tmp is a temporary register, if none is available use noreg 85 void save_rax (Register tmp); 86 void restore_rax(Register tmp); 87 88 public: 89 MacroAssembler(CodeBuffer* code) : Assembler(code) {} 90 91 // Support for NULL-checks 92 // 93 // Generates code that causes a NULL OS exception if the content of reg is NULL. 94 // If the accessed location is M[reg + offset] and the offset is known, provide the 95 // offset. No explicit code generation is needed if the offset is within a certain 96 // range (0 <= offset <= page_size). 97 98 void null_check(Register reg, int offset = -1); 99 static bool needs_explicit_null_check(intptr_t offset); 100 101 // Required platform-specific helpers for Label::patch_instructions. 102 // They _shadow_ the declarations in AbstractAssembler, which are undefined. 103 void pd_patch_instruction(address branch, address target) { 104 unsigned char op = branch[0]; 105 assert(op == 0xE8 /* call */ || 106 op == 0xE9 /* jmp */ || 107 op == 0xEB /* short jmp */ || 108 (op & 0xF0) == 0x70 /* short jcc */ || 109 op == 0x0F && (branch[1] & 0xF0) == 0x80 /* jcc */ || 110 op == 0xC7 && branch[1] == 0xF8 /* xbegin */, 111 "Invalid opcode at patch point"); 112 113 if (op == 0xEB || (op & 0xF0) == 0x70) { 114 // short offset operators (jmp and jcc) 115 char* disp = (char*) &branch[1]; 116 int imm8 = target - (address) &disp[1]; 117 guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset"); 118 *disp = imm8; 119 } else { 120 int* disp = (int*) &branch[(op == 0x0F || op == 0xC7)? 2: 1]; 121 int imm32 = target - (address) &disp[1]; 122 *disp = imm32; 123 } 124 } 125 126 // The following 4 methods return the offset of the appropriate move instruction 127 128 // Support for fast byte/short loading with zero extension (depending on particular CPU) 129 int load_unsigned_byte(Register dst, Address src); 130 int load_unsigned_short(Register dst, Address src); 131 132 // Support for fast byte/short loading with sign extension (depending on particular CPU) 133 int load_signed_byte(Register dst, Address src); 134 int load_signed_short(Register dst, Address src); 135 136 // Support for sign-extension (hi:lo = extend_sign(lo)) 137 void extend_sign(Register hi, Register lo); 138 139 // Load and store values by size and signed-ness 140 void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2 = noreg); 141 void store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2 = noreg); 142 143 // Support for inc/dec with optimal instruction selection depending on value 144 145 void increment(Register reg, int value = 1) { LP64_ONLY(incrementq(reg, value)) NOT_LP64(incrementl(reg, value)) ; } 146 void decrement(Register reg, int value = 1) { LP64_ONLY(decrementq(reg, value)) NOT_LP64(decrementl(reg, value)) ; } 147 148 void decrementl(Address dst, int value = 1); 149 void decrementl(Register reg, int value = 1); 150 151 void decrementq(Register reg, int value = 1); 152 void decrementq(Address dst, int value = 1); 153 154 void incrementl(Address dst, int value = 1); 155 void incrementl(Register reg, int value = 1); 156 157 void incrementq(Register reg, int value = 1); 158 void incrementq(Address dst, int value = 1); 159 160 // Support optimal SSE move instructions. 161 void movflt(XMMRegister dst, XMMRegister src) { 162 if (UseXmmRegToRegMoveAll) { movaps(dst, src); return; } 163 else { movss (dst, src); return; } 164 } 165 void movflt(XMMRegister dst, Address src) { movss(dst, src); } 166 void movflt(XMMRegister dst, AddressLiteral src); 167 void movflt(Address dst, XMMRegister src) { movss(dst, src); } 168 169 void movdbl(XMMRegister dst, XMMRegister src) { 170 if (UseXmmRegToRegMoveAll) { movapd(dst, src); return; } 171 else { movsd (dst, src); return; } 172 } 173 174 void movdbl(XMMRegister dst, AddressLiteral src); 175 176 void movdbl(XMMRegister dst, Address src) { 177 if (UseXmmLoadAndClearUpper) { movsd (dst, src); return; } 178 else { movlpd(dst, src); return; } 179 } 180 void movdbl(Address dst, XMMRegister src) { movsd(dst, src); } 181 182 void incrementl(AddressLiteral dst); 183 void incrementl(ArrayAddress dst); 184 185 void incrementq(AddressLiteral dst); 186 187 // Alignment 188 void align(int modulus); 189 void align(int modulus, int target); 190 191 // A 5 byte nop that is safe for patching (see patch_verified_entry) 192 void fat_nop(); 193 194 // Stack frame creation/removal 195 void enter(); 196 void leave(); 197 198 // Support for getting the JavaThread pointer (i.e.; a reference to thread-local information) 199 // The pointer will be loaded into the thread register. 200 void get_thread(Register thread); 201 202 203 // Support for VM calls 204 // 205 // It is imperative that all calls into the VM are handled via the call_VM macros. 206 // They make sure that the stack linkage is setup correctly. call_VM's correspond 207 // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points. 208 209 210 void call_VM(Register oop_result, 211 address entry_point, 212 bool check_exceptions = true); 213 void call_VM(Register oop_result, 214 address entry_point, 215 Register arg_1, 216 bool check_exceptions = true); 217 void call_VM(Register oop_result, 218 address entry_point, 219 Register arg_1, Register arg_2, 220 bool check_exceptions = true); 221 void call_VM(Register oop_result, 222 address entry_point, 223 Register arg_1, Register arg_2, Register arg_3, 224 bool check_exceptions = true); 225 226 // Overloadings with last_Java_sp 227 void call_VM(Register oop_result, 228 Register last_java_sp, 229 address entry_point, 230 int number_of_arguments = 0, 231 bool check_exceptions = true); 232 void call_VM(Register oop_result, 233 Register last_java_sp, 234 address entry_point, 235 Register arg_1, bool 236 check_exceptions = true); 237 void call_VM(Register oop_result, 238 Register last_java_sp, 239 address entry_point, 240 Register arg_1, Register arg_2, 241 bool check_exceptions = true); 242 void call_VM(Register oop_result, 243 Register last_java_sp, 244 address entry_point, 245 Register arg_1, Register arg_2, Register arg_3, 246 bool check_exceptions = true); 247 248 void get_vm_result (Register oop_result, Register thread); 249 void get_vm_result_2(Register metadata_result, Register thread); 250 251 // These always tightly bind to MacroAssembler::call_VM_base 252 // bypassing the virtual implementation 253 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true); 254 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true); 255 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true); 256 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true); 257 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4, bool check_exceptions = true); 258 259 void call_VM_leaf(address entry_point, 260 int number_of_arguments = 0); 261 void call_VM_leaf(address entry_point, 262 Register arg_1); 263 void call_VM_leaf(address entry_point, 264 Register arg_1, Register arg_2); 265 void call_VM_leaf(address entry_point, 266 Register arg_1, Register arg_2, Register arg_3); 267 268 // These always tightly bind to MacroAssembler::call_VM_leaf_base 269 // bypassing the virtual implementation 270 void super_call_VM_leaf(address entry_point); 271 void super_call_VM_leaf(address entry_point, Register arg_1); 272 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2); 273 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3); 274 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4); 275 276 // last Java Frame (fills frame anchor) 277 void set_last_Java_frame(Register thread, 278 Register last_java_sp, 279 Register last_java_fp, 280 address last_java_pc); 281 282 // thread in the default location (r15_thread on 64bit) 283 void set_last_Java_frame(Register last_java_sp, 284 Register last_java_fp, 285 address last_java_pc); 286 287 void reset_last_Java_frame(Register thread, bool clear_fp, bool clear_pc); 288 289 // thread in the default location (r15_thread on 64bit) 290 void reset_last_Java_frame(bool clear_fp, bool clear_pc); 291 292 // Stores 293 void store_check(Register obj); // store check for obj - register is destroyed afterwards 294 void store_check(Register obj, Address dst); // same as above, dst is exact store location (reg. is destroyed) 295 296 #if INCLUDE_ALL_GCS 297 298 void g1_write_barrier_pre(Register obj, 299 Register pre_val, 300 Register thread, 301 Register tmp, 302 bool tosca_live, 303 bool expand_call); 304 305 void g1_write_barrier_post(Register store_addr, 306 Register new_val, 307 Register thread, 308 Register tmp, 309 Register tmp2); 310 311 #endif // INCLUDE_ALL_GCS 312 313 // C 'boolean' to Java boolean: x == 0 ? 0 : 1 314 void c2bool(Register x); 315 316 // C++ bool manipulation 317 318 void movbool(Register dst, Address src); 319 void movbool(Address dst, bool boolconst); 320 void movbool(Address dst, Register src); 321 void testbool(Register dst); 322 323 // oop manipulations 324 void load_klass(Register dst, Register src); 325 void store_klass(Register dst, Register src); 326 327 void load_heap_oop(Register dst, Address src); 328 void load_heap_oop_not_null(Register dst, Address src); 329 void store_heap_oop(Address dst, Register src); 330 void cmp_heap_oop(Register src1, Address src2, Register tmp = noreg); 331 332 // Used for storing NULL. All other oop constants should be 333 // stored using routines that take a jobject. 334 void store_heap_oop_null(Address dst); 335 336 void load_prototype_header(Register dst, Register src); 337 338 #ifdef _LP64 339 void store_klass_gap(Register dst, Register src); 340 341 // This dummy is to prevent a call to store_heap_oop from 342 // converting a zero (like NULL) into a Register by giving 343 // the compiler two choices it can't resolve 344 345 void store_heap_oop(Address dst, void* dummy); 346 347 void encode_heap_oop(Register r); 348 void decode_heap_oop(Register r); 349 void encode_heap_oop_not_null(Register r); 350 void decode_heap_oop_not_null(Register r); 351 void encode_heap_oop_not_null(Register dst, Register src); 352 void decode_heap_oop_not_null(Register dst, Register src); 353 354 void set_narrow_oop(Register dst, jobject obj); 355 void set_narrow_oop(Address dst, jobject obj); 356 void cmp_narrow_oop(Register dst, jobject obj); 357 void cmp_narrow_oop(Address dst, jobject obj); 358 359 void encode_klass_not_null(Register r); 360 void decode_klass_not_null(Register r); 361 void encode_klass_not_null(Register dst, Register src); 362 void decode_klass_not_null(Register dst, Register src); 363 void set_narrow_klass(Register dst, Klass* k); 364 void set_narrow_klass(Address dst, Klass* k); 365 void cmp_narrow_klass(Register dst, Klass* k); 366 void cmp_narrow_klass(Address dst, Klass* k); 367 368 // Returns the byte size of the instructions generated by decode_klass_not_null() 369 // when compressed klass pointers are being used. 370 static int instr_size_for_decode_klass_not_null(); 371 372 // if heap base register is used - reinit it with the correct value 373 void reinit_heapbase(); 374 375 DEBUG_ONLY(void verify_heapbase(const char* msg);) 376 377 #endif // _LP64 378 379 // Int division/remainder for Java 380 // (as idivl, but checks for special case as described in JVM spec.) 381 // returns idivl instruction offset for implicit exception handling 382 int corrected_idivl(Register reg); 383 384 // Long division/remainder for Java 385 // (as idivq, but checks for special case as described in JVM spec.) 386 // returns idivq instruction offset for implicit exception handling 387 int corrected_idivq(Register reg); 388 389 void int3(); 390 391 // Long operation macros for a 32bit cpu 392 // Long negation for Java 393 void lneg(Register hi, Register lo); 394 395 // Long multiplication for Java 396 // (destroys contents of eax, ebx, ecx and edx) 397 void lmul(int x_rsp_offset, int y_rsp_offset); // rdx:rax = x * y 398 399 // Long shifts for Java 400 // (semantics as described in JVM spec.) 401 void lshl(Register hi, Register lo); // hi:lo << (rcx & 0x3f) 402 void lshr(Register hi, Register lo, bool sign_extension = false); // hi:lo >> (rcx & 0x3f) 403 404 // Long compare for Java 405 // (semantics as described in JVM spec.) 406 void lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo); // x_hi = lcmp(x, y) 407 408 409 // misc 410 411 // Sign extension 412 void sign_extend_short(Register reg); 413 void sign_extend_byte(Register reg); 414 415 // Division by power of 2, rounding towards 0 416 void division_with_shift(Register reg, int shift_value); 417 418 // Compares the top-most stack entries on the FPU stack and sets the eflags as follows: 419 // 420 // CF (corresponds to C0) if x < y 421 // PF (corresponds to C2) if unordered 422 // ZF (corresponds to C3) if x = y 423 // 424 // The arguments are in reversed order on the stack (i.e., top of stack is first argument). 425 // tmp is a temporary register, if none is available use noreg (only matters for non-P6 code) 426 void fcmp(Register tmp); 427 // Variant of the above which allows y to be further down the stack 428 // and which only pops x and y if specified. If pop_right is 429 // specified then pop_left must also be specified. 430 void fcmp(Register tmp, int index, bool pop_left, bool pop_right); 431 432 // Floating-point comparison for Java 433 // Compares the top-most stack entries on the FPU stack and stores the result in dst. 434 // The arguments are in reversed order on the stack (i.e., top of stack is first argument). 435 // (semantics as described in JVM spec.) 436 void fcmp2int(Register dst, bool unordered_is_less); 437 // Variant of the above which allows y to be further down the stack 438 // and which only pops x and y if specified. If pop_right is 439 // specified then pop_left must also be specified. 440 void fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right); 441 442 // Floating-point remainder for Java (ST0 = ST0 fremr ST1, ST1 is empty afterwards) 443 // tmp is a temporary register, if none is available use noreg 444 void fremr(Register tmp); 445 446 447 // same as fcmp2int, but using SSE2 448 void cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less); 449 void cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less); 450 451 // Inlined sin/cos generator for Java; must not use CPU instruction 452 // directly on Intel as it does not have high enough precision 453 // outside of the range [-pi/4, pi/4]. Extra argument indicate the 454 // number of FPU stack slots in use; all but the topmost will 455 // require saving if a slow case is necessary. Assumes argument is 456 // on FP TOS; result is on FP TOS. No cpu registers are changed by 457 // this code. 458 void trigfunc(char trig, int num_fpu_regs_in_use = 1); 459 460 // branch to L if FPU flag C2 is set/not set 461 // tmp is a temporary register, if none is available use noreg 462 void jC2 (Register tmp, Label& L); 463 void jnC2(Register tmp, Label& L); 464 465 // Pop ST (ffree & fincstp combined) 466 void fpop(); 467 468 // Load float value from 'address'. If UseSSE >= 1, the value is loaded into 469 // register xmm0. Otherwise, the value is loaded onto the FPU stack. 470 void load_float(Address src); 471 472 // Store float value to 'address'. If UseSSE >= 1, the value is stored 473 // from register xmm0. Otherwise, the value is stored from the FPU stack. 474 void store_float(Address dst); 475 476 // Load double value from 'address'. If UseSSE >= 2, the value is loaded into 477 // register xmm0. Otherwise, the value is loaded onto the FPU stack. 478 void load_double(Address src); 479 480 // Store double value to 'address'. If UseSSE >= 2, the value is stored 481 // from register xmm0. Otherwise, the value is stored from the FPU stack. 482 void store_double(Address dst); 483 484 // pushes double TOS element of FPU stack on CPU stack; pops from FPU stack 485 void push_fTOS(); 486 487 // pops double TOS element from CPU stack and pushes on FPU stack 488 void pop_fTOS(); 489 490 void empty_FPU_stack(); 491 492 void push_IU_state(); 493 void pop_IU_state(); 494 495 void push_FPU_state(); 496 void pop_FPU_state(); 497 498 void push_CPU_state(); 499 void pop_CPU_state(); 500 501 // Round up to a power of two 502 void round_to(Register reg, int modulus); 503 504 // Callee saved registers handling 505 void push_callee_saved_registers(); 506 void pop_callee_saved_registers(); 507 508 // allocation 509 void eden_allocate( 510 Register obj, // result: pointer to object after successful allocation 511 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise 512 int con_size_in_bytes, // object size in bytes if known at compile time 513 Register t1, // temp register 514 Label& slow_case // continuation point if fast allocation fails 515 ); 516 void tlab_allocate( 517 Register obj, // result: pointer to object after successful allocation 518 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise 519 int con_size_in_bytes, // object size in bytes if known at compile time 520 Register t1, // temp register 521 Register t2, // temp register 522 Label& slow_case // continuation point if fast allocation fails 523 ); 524 Register tlab_refill(Label& retry_tlab, Label& try_eden, Label& slow_case); // returns TLS address 525 void zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp); 526 527 void incr_allocated_bytes(Register thread, 528 Register var_size_in_bytes, int con_size_in_bytes, 529 Register t1 = noreg); 530 531 // interface method calling 532 void lookup_interface_method(Register recv_klass, 533 Register intf_klass, 534 RegisterOrConstant itable_index, 535 Register method_result, 536 Register scan_temp, 537 Label& no_such_interface); 538 539 // virtual method calling 540 void lookup_virtual_method(Register recv_klass, 541 RegisterOrConstant vtable_index, 542 Register method_result); 543 544 // Test sub_klass against super_klass, with fast and slow paths. 545 546 // The fast path produces a tri-state answer: yes / no / maybe-slow. 547 // One of the three labels can be NULL, meaning take the fall-through. 548 // If super_check_offset is -1, the value is loaded up from super_klass. 549 // No registers are killed, except temp_reg. 550 void check_klass_subtype_fast_path(Register sub_klass, 551 Register super_klass, 552 Register temp_reg, 553 Label* L_success, 554 Label* L_failure, 555 Label* L_slow_path, 556 RegisterOrConstant super_check_offset = RegisterOrConstant(-1)); 557 558 // The rest of the type check; must be wired to a corresponding fast path. 559 // It does not repeat the fast path logic, so don't use it standalone. 560 // The temp_reg and temp2_reg can be noreg, if no temps are available. 561 // Updates the sub's secondary super cache as necessary. 562 // If set_cond_codes, condition codes will be Z on success, NZ on failure. 563 void check_klass_subtype_slow_path(Register sub_klass, 564 Register super_klass, 565 Register temp_reg, 566 Register temp2_reg, 567 Label* L_success, 568 Label* L_failure, 569 bool set_cond_codes = false); 570 571 // Simplified, combined version, good for typical uses. 572 // Falls through on failure. 573 void check_klass_subtype(Register sub_klass, 574 Register super_klass, 575 Register temp_reg, 576 Label& L_success); 577 578 // method handles (JSR 292) 579 Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0); 580 581 //---- 582 void set_word_if_not_zero(Register reg); // sets reg to 1 if not zero, otherwise 0 583 584 // Debugging 585 586 // only if +VerifyOops 587 // TODO: Make these macros with file and line like sparc version! 588 void verify_oop(Register reg, const char* s = "broken oop"); 589 void verify_oop_addr(Address addr, const char * s = "broken oop addr"); 590 591 // TODO: verify method and klass metadata (compare against vptr?) 592 void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {} 593 void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line){} 594 595 #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__) 596 #define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__) 597 598 // only if +VerifyFPU 599 void verify_FPU(int stack_depth, const char* s = "illegal FPU state"); 600 601 // Verify or restore cpu control state after JNI call 602 void restore_cpu_control_state_after_jni(); 603 604 // prints msg, dumps registers and stops execution 605 void stop(const char* msg); 606 607 // prints msg and continues 608 void warn(const char* msg); 609 610 // dumps registers and other state 611 void print_state(); 612 613 static void debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg); 614 static void debug64(char* msg, int64_t pc, int64_t regs[]); 615 static void print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip); 616 static void print_state64(int64_t pc, int64_t regs[]); 617 618 void os_breakpoint(); 619 620 void untested() { stop("untested"); } 621 622 void unimplemented(const char* what = "") { char* b = new char[1024]; jio_snprintf(b, 1024, "unimplemented: %s", what); stop(b); } 623 624 void should_not_reach_here() { stop("should not reach here"); } 625 626 void print_CPU_state(); 627 628 // Stack overflow checking 629 void bang_stack_with_offset(int offset) { 630 // stack grows down, caller passes positive offset 631 assert(offset > 0, "must bang with negative offset"); 632 movl(Address(rsp, (-offset)), rax); 633 } 634 635 // Writes to stack successive pages until offset reached to check for 636 // stack overflow + shadow pages. Also, clobbers tmp 637 void bang_stack_size(Register size, Register tmp); 638 639 // Check for reserved stack access in method being exited (for JIT) 640 void reserved_stack_check(); 641 642 virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr, 643 Register tmp, 644 int offset); 645 646 // Support for serializing memory accesses between threads 647 void serialize_memory(Register thread, Register tmp); 648 649 void verify_tlab(); 650 651 // Biased locking support 652 // lock_reg and obj_reg must be loaded up with the appropriate values. 653 // swap_reg must be rax, and is killed. 654 // tmp_reg is optional. If it is supplied (i.e., != noreg) it will 655 // be killed; if not supplied, push/pop will be used internally to 656 // allocate a temporary (inefficient, avoid if possible). 657 // Optional slow case is for implementations (interpreter and C1) which branch to 658 // slow case directly. Leaves condition codes set for C2's Fast_Lock node. 659 // Returns offset of first potentially-faulting instruction for null 660 // check info (currently consumed only by C1). If 661 // swap_reg_contains_mark is true then returns -1 as it is assumed 662 // the calling code has already passed any potential faults. 663 int biased_locking_enter(Register lock_reg, Register obj_reg, 664 Register swap_reg, Register tmp_reg, 665 bool swap_reg_contains_mark, 666 Label& done, Label* slow_case = NULL, 667 BiasedLockingCounters* counters = NULL); 668 void biased_locking_exit (Register obj_reg, Register temp_reg, Label& done); 669 #ifdef COMPILER2 670 // Code used by cmpFastLock and cmpFastUnlock mach instructions in .ad file. 671 // See full desription in macroAssembler_x86.cpp. 672 void fast_lock(Register obj, Register box, Register tmp, 673 Register scr, Register cx1, Register cx2, 674 BiasedLockingCounters* counters, 675 RTMLockingCounters* rtm_counters, 676 RTMLockingCounters* stack_rtm_counters, 677 Metadata* method_data, 678 bool use_rtm, bool profile_rtm); 679 void fast_unlock(Register obj, Register box, Register tmp, bool use_rtm); 680 #if INCLUDE_RTM_OPT 681 void rtm_counters_update(Register abort_status, Register rtm_counters); 682 void branch_on_random_using_rdtsc(Register tmp, Register scr, int count, Label& brLabel); 683 void rtm_abort_ratio_calculation(Register tmp, Register rtm_counters_reg, 684 RTMLockingCounters* rtm_counters, 685 Metadata* method_data); 686 void rtm_profiling(Register abort_status_Reg, Register rtm_counters_Reg, 687 RTMLockingCounters* rtm_counters, Metadata* method_data, bool profile_rtm); 688 void rtm_retry_lock_on_abort(Register retry_count, Register abort_status, Label& retryLabel); 689 void rtm_retry_lock_on_busy(Register retry_count, Register box, Register tmp, Register scr, Label& retryLabel); 690 void rtm_stack_locking(Register obj, Register tmp, Register scr, 691 Register retry_on_abort_count, 692 RTMLockingCounters* stack_rtm_counters, 693 Metadata* method_data, bool profile_rtm, 694 Label& DONE_LABEL, Label& IsInflated); 695 void rtm_inflated_locking(Register obj, Register box, Register tmp, 696 Register scr, Register retry_on_busy_count, 697 Register retry_on_abort_count, 698 RTMLockingCounters* rtm_counters, 699 Metadata* method_data, bool profile_rtm, 700 Label& DONE_LABEL); 701 #endif 702 #endif 703 704 Condition negate_condition(Condition cond); 705 706 // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit 707 // operands. In general the names are modified to avoid hiding the instruction in Assembler 708 // so that we don't need to implement all the varieties in the Assembler with trivial wrappers 709 // here in MacroAssembler. The major exception to this rule is call 710 711 // Arithmetics 712 713 714 void addptr(Address dst, int32_t src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)) ; } 715 void addptr(Address dst, Register src); 716 717 void addptr(Register dst, Address src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); } 718 void addptr(Register dst, int32_t src); 719 void addptr(Register dst, Register src); 720 void addptr(Register dst, RegisterOrConstant src) { 721 if (src.is_constant()) addptr(dst, (int) src.as_constant()); 722 else addptr(dst, src.as_register()); 723 } 724 725 void andptr(Register dst, int32_t src); 726 void andptr(Register src1, Register src2) { LP64_ONLY(andq(src1, src2)) NOT_LP64(andl(src1, src2)) ; } 727 728 void cmp8(AddressLiteral src1, int imm); 729 730 // renamed to drag out the casting of address to int32_t/intptr_t 731 void cmp32(Register src1, int32_t imm); 732 733 void cmp32(AddressLiteral src1, int32_t imm); 734 // compare reg - mem, or reg - &mem 735 void cmp32(Register src1, AddressLiteral src2); 736 737 void cmp32(Register src1, Address src2); 738 739 #ifndef _LP64 740 void cmpklass(Address dst, Metadata* obj); 741 void cmpklass(Register dst, Metadata* obj); 742 void cmpoop(Address dst, jobject obj); 743 void cmpoop(Register dst, jobject obj); 744 #endif // _LP64 745 746 // NOTE src2 must be the lval. This is NOT an mem-mem compare 747 void cmpptr(Address src1, AddressLiteral src2); 748 749 void cmpptr(Register src1, AddressLiteral src2); 750 751 void cmpptr(Register src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 752 void cmpptr(Register src1, Address src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 753 // void cmpptr(Address src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 754 755 void cmpptr(Register src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 756 void cmpptr(Address src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 757 758 // cmp64 to avoild hiding cmpq 759 void cmp64(Register src1, AddressLiteral src); 760 761 void cmpxchgptr(Register reg, Address adr); 762 763 void locked_cmpxchgptr(Register reg, AddressLiteral adr); 764 765 766 void imulptr(Register dst, Register src) { LP64_ONLY(imulq(dst, src)) NOT_LP64(imull(dst, src)); } 767 void imulptr(Register dst, Register src, int imm32) { LP64_ONLY(imulq(dst, src, imm32)) NOT_LP64(imull(dst, src, imm32)); } 768 769 770 void negptr(Register dst) { LP64_ONLY(negq(dst)) NOT_LP64(negl(dst)); } 771 772 void notptr(Register dst) { LP64_ONLY(notq(dst)) NOT_LP64(notl(dst)); } 773 774 void shlptr(Register dst, int32_t shift); 775 void shlptr(Register dst) { LP64_ONLY(shlq(dst)) NOT_LP64(shll(dst)); } 776 777 void shrptr(Register dst, int32_t shift); 778 void shrptr(Register dst) { LP64_ONLY(shrq(dst)) NOT_LP64(shrl(dst)); } 779 780 void sarptr(Register dst) { LP64_ONLY(sarq(dst)) NOT_LP64(sarl(dst)); } 781 void sarptr(Register dst, int32_t src) { LP64_ONLY(sarq(dst, src)) NOT_LP64(sarl(dst, src)); } 782 783 void subptr(Address dst, int32_t src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); } 784 785 void subptr(Register dst, Address src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); } 786 void subptr(Register dst, int32_t src); 787 // Force generation of a 4 byte immediate value even if it fits into 8bit 788 void subptr_imm32(Register dst, int32_t src); 789 void subptr(Register dst, Register src); 790 void subptr(Register dst, RegisterOrConstant src) { 791 if (src.is_constant()) subptr(dst, (int) src.as_constant()); 792 else subptr(dst, src.as_register()); 793 } 794 795 void sbbptr(Address dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); } 796 void sbbptr(Register dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); } 797 798 void xchgptr(Register src1, Register src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; } 799 void xchgptr(Register src1, Address src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; } 800 801 void xaddptr(Address src1, Register src2) { LP64_ONLY(xaddq(src1, src2)) NOT_LP64(xaddl(src1, src2)) ; } 802 803 804 805 // Helper functions for statistics gathering. 806 // Conditionally (atomically, on MPs) increments passed counter address, preserving condition codes. 807 void cond_inc32(Condition cond, AddressLiteral counter_addr); 808 // Unconditional atomic increment. 809 void atomic_incl(Address counter_addr); 810 void atomic_incl(AddressLiteral counter_addr, Register scr = rscratch1); 811 #ifdef _LP64 812 void atomic_incq(Address counter_addr); 813 void atomic_incq(AddressLiteral counter_addr, Register scr = rscratch1); 814 #endif 815 void atomic_incptr(AddressLiteral counter_addr, Register scr = rscratch1) { LP64_ONLY(atomic_incq(counter_addr, scr)) NOT_LP64(atomic_incl(counter_addr, scr)) ; } 816 void atomic_incptr(Address counter_addr) { LP64_ONLY(atomic_incq(counter_addr)) NOT_LP64(atomic_incl(counter_addr)) ; } 817 818 void lea(Register dst, AddressLiteral adr); 819 void lea(Address dst, AddressLiteral adr); 820 void lea(Register dst, Address adr) { Assembler::lea(dst, adr); } 821 822 void leal32(Register dst, Address src) { leal(dst, src); } 823 824 // Import other testl() methods from the parent class or else 825 // they will be hidden by the following overriding declaration. 826 using Assembler::testl; 827 void testl(Register dst, AddressLiteral src); 828 829 void orptr(Register dst, Address src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } 830 void orptr(Register dst, Register src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } 831 void orptr(Register dst, int32_t src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } 832 void orptr(Address dst, int32_t imm32) { LP64_ONLY(orq(dst, imm32)) NOT_LP64(orl(dst, imm32)); } 833 834 void testptr(Register src, int32_t imm32) { LP64_ONLY(testq(src, imm32)) NOT_LP64(testl(src, imm32)); } 835 void testptr(Register src1, Register src2); 836 837 void xorptr(Register dst, Register src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); } 838 void xorptr(Register dst, Address src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); } 839 840 // Calls 841 842 void call(Label& L, relocInfo::relocType rtype); 843 void call(Register entry); 844 845 // NOTE: this call tranfers to the effective address of entry NOT 846 // the address contained by entry. This is because this is more natural 847 // for jumps/calls. 848 void call(AddressLiteral entry); 849 850 // Emit the CompiledIC call idiom 851 void ic_call(address entry, jint method_index = 0); 852 853 // Jumps 854 855 // NOTE: these jumps tranfer to the effective address of dst NOT 856 // the address contained by dst. This is because this is more natural 857 // for jumps/calls. 858 void jump(AddressLiteral dst); 859 void jump_cc(Condition cc, AddressLiteral dst); 860 861 // 32bit can do a case table jump in one instruction but we no longer allow the base 862 // to be installed in the Address class. This jump will tranfers to the address 863 // contained in the location described by entry (not the address of entry) 864 void jump(ArrayAddress entry); 865 866 // Floating 867 868 void andpd(XMMRegister dst, Address src) { Assembler::andpd(dst, src); } 869 void andpd(XMMRegister dst, AddressLiteral src); 870 void andpd(XMMRegister dst, XMMRegister src) { Assembler::andpd(dst, src); } 871 872 void andps(XMMRegister dst, XMMRegister src) { Assembler::andps(dst, src); } 873 void andps(XMMRegister dst, Address src) { Assembler::andps(dst, src); } 874 void andps(XMMRegister dst, AddressLiteral src); 875 876 void comiss(XMMRegister dst, XMMRegister src) { Assembler::comiss(dst, src); } 877 void comiss(XMMRegister dst, Address src) { Assembler::comiss(dst, src); } 878 void comiss(XMMRegister dst, AddressLiteral src); 879 880 void comisd(XMMRegister dst, XMMRegister src) { Assembler::comisd(dst, src); } 881 void comisd(XMMRegister dst, Address src) { Assembler::comisd(dst, src); } 882 void comisd(XMMRegister dst, AddressLiteral src); 883 884 void fadd_s(Address src) { Assembler::fadd_s(src); } 885 void fadd_s(AddressLiteral src) { Assembler::fadd_s(as_Address(src)); } 886 887 void fldcw(Address src) { Assembler::fldcw(src); } 888 void fldcw(AddressLiteral src); 889 890 void fld_s(int index) { Assembler::fld_s(index); } 891 void fld_s(Address src) { Assembler::fld_s(src); } 892 void fld_s(AddressLiteral src); 893 894 void fld_d(Address src) { Assembler::fld_d(src); } 895 void fld_d(AddressLiteral src); 896 897 void fld_x(Address src) { Assembler::fld_x(src); } 898 void fld_x(AddressLiteral src); 899 900 void fmul_s(Address src) { Assembler::fmul_s(src); } 901 void fmul_s(AddressLiteral src) { Assembler::fmul_s(as_Address(src)); } 902 903 void ldmxcsr(Address src) { Assembler::ldmxcsr(src); } 904 void ldmxcsr(AddressLiteral src); 905 906 void fast_exp(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 907 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 908 Register rax, Register rcx, Register rdx, Register tmp); 909 910 void fast_log(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 911 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 912 Register rax, Register rcx, Register rdx, Register tmp1 LP64_ONLY(COMMA Register tmp2)); 913 914 void fast_pow(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, XMMRegister xmm4, 915 XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, Register rax, Register rcx, 916 Register rdx NOT_LP64(COMMA Register tmp) LP64_ONLY(COMMA Register tmp1) 917 LP64_ONLY(COMMA Register tmp2) LP64_ONLY(COMMA Register tmp3) LP64_ONLY(COMMA Register tmp4)); 918 919 void fast_sin(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 920 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 921 Register rax, Register rbx LP64_ONLY(COMMA Register rcx), Register rdx 922 LP64_ONLY(COMMA Register tmp1) LP64_ONLY(COMMA Register tmp2) 923 LP64_ONLY(COMMA Register tmp3) LP64_ONLY(COMMA Register tmp4)); 924 925 void fast_cos(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 926 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 927 Register rax, Register rcx, Register rdx NOT_LP64(COMMA Register tmp) 928 LP64_ONLY(COMMA Register r8) LP64_ONLY(COMMA Register r9) 929 LP64_ONLY(COMMA Register r10) LP64_ONLY(COMMA Register r11)); 930 931 #ifndef _LP64 932 void libm_sincos_huge(XMMRegister xmm0, XMMRegister xmm1, Register eax, Register ecx, 933 Register edx, Register ebx, Register esi, Register edi, 934 Register ebp, Register esp); 935 void libm_reduce_pi04l(Register eax, Register ecx, Register edx, Register ebx, 936 Register esi, Register edi, Register ebp, Register esp); 937 #endif 938 939 void increase_precision(); 940 void restore_precision(); 941 942 private: 943 944 // call runtime as a fallback for trig functions and pow/exp. 945 void fp_runtime_fallback(address runtime_entry, int nb_args, int num_fpu_regs_in_use); 946 947 // these are private because users should be doing movflt/movdbl 948 949 void movss(Address dst, XMMRegister src) { Assembler::movss(dst, src); } 950 void movss(XMMRegister dst, XMMRegister src) { Assembler::movss(dst, src); } 951 void movss(XMMRegister dst, Address src) { Assembler::movss(dst, src); } 952 void movss(XMMRegister dst, AddressLiteral src); 953 954 void movlpd(XMMRegister dst, Address src) {Assembler::movlpd(dst, src); } 955 void movlpd(XMMRegister dst, AddressLiteral src); 956 957 public: 958 959 void addsd(XMMRegister dst, XMMRegister src) { Assembler::addsd(dst, src); } 960 void addsd(XMMRegister dst, Address src) { Assembler::addsd(dst, src); } 961 void addsd(XMMRegister dst, AddressLiteral src); 962 963 void addss(XMMRegister dst, XMMRegister src) { Assembler::addss(dst, src); } 964 void addss(XMMRegister dst, Address src) { Assembler::addss(dst, src); } 965 void addss(XMMRegister dst, AddressLiteral src); 966 967 void addpd(XMMRegister dst, XMMRegister src) { Assembler::addpd(dst, src); } 968 void addpd(XMMRegister dst, Address src) { Assembler::addpd(dst, src); } 969 void addpd(XMMRegister dst, AddressLiteral src); 970 971 void divsd(XMMRegister dst, XMMRegister src) { Assembler::divsd(dst, src); } 972 void divsd(XMMRegister dst, Address src) { Assembler::divsd(dst, src); } 973 void divsd(XMMRegister dst, AddressLiteral src); 974 975 void divss(XMMRegister dst, XMMRegister src) { Assembler::divss(dst, src); } 976 void divss(XMMRegister dst, Address src) { Assembler::divss(dst, src); } 977 void divss(XMMRegister dst, AddressLiteral src); 978 979 // Move Unaligned Double Quadword 980 void movdqu(Address dst, XMMRegister src); 981 void movdqu(XMMRegister dst, Address src); 982 void movdqu(XMMRegister dst, XMMRegister src); 983 void movdqu(XMMRegister dst, AddressLiteral src); 984 // AVX Unaligned forms 985 void vmovdqu(Address dst, XMMRegister src); 986 void vmovdqu(XMMRegister dst, Address src); 987 void vmovdqu(XMMRegister dst, XMMRegister src); 988 void vmovdqu(XMMRegister dst, AddressLiteral src); 989 990 // Move Aligned Double Quadword 991 void movdqa(XMMRegister dst, Address src) { Assembler::movdqa(dst, src); } 992 void movdqa(XMMRegister dst, XMMRegister src) { Assembler::movdqa(dst, src); } 993 void movdqa(XMMRegister dst, AddressLiteral src); 994 995 void movsd(XMMRegister dst, XMMRegister src) { Assembler::movsd(dst, src); } 996 void movsd(Address dst, XMMRegister src) { Assembler::movsd(dst, src); } 997 void movsd(XMMRegister dst, Address src) { Assembler::movsd(dst, src); } 998 void movsd(XMMRegister dst, AddressLiteral src); 999 1000 void mulpd(XMMRegister dst, XMMRegister src) { Assembler::mulpd(dst, src); } 1001 void mulpd(XMMRegister dst, Address src) { Assembler::mulpd(dst, src); } 1002 void mulpd(XMMRegister dst, AddressLiteral src); 1003 1004 void mulsd(XMMRegister dst, XMMRegister src) { Assembler::mulsd(dst, src); } 1005 void mulsd(XMMRegister dst, Address src) { Assembler::mulsd(dst, src); } 1006 void mulsd(XMMRegister dst, AddressLiteral src); 1007 1008 void mulss(XMMRegister dst, XMMRegister src) { Assembler::mulss(dst, src); } 1009 void mulss(XMMRegister dst, Address src) { Assembler::mulss(dst, src); } 1010 void mulss(XMMRegister dst, AddressLiteral src); 1011 1012 // Carry-Less Multiplication Quadword 1013 void pclmulldq(XMMRegister dst, XMMRegister src) { 1014 // 0x00 - multiply lower 64 bits [0:63] 1015 Assembler::pclmulqdq(dst, src, 0x00); 1016 } 1017 void pclmulhdq(XMMRegister dst, XMMRegister src) { 1018 // 0x11 - multiply upper 64 bits [64:127] 1019 Assembler::pclmulqdq(dst, src, 0x11); 1020 } 1021 1022 void pcmpeqb(XMMRegister dst, XMMRegister src); 1023 void pcmpeqw(XMMRegister dst, XMMRegister src); 1024 1025 void pcmpestri(XMMRegister dst, Address src, int imm8); 1026 void pcmpestri(XMMRegister dst, XMMRegister src, int imm8); 1027 1028 void pmovzxbw(XMMRegister dst, XMMRegister src); 1029 void pmovzxbw(XMMRegister dst, Address src); 1030 1031 void pmovmskb(Register dst, XMMRegister src); 1032 1033 void ptest(XMMRegister dst, XMMRegister src); 1034 1035 void sqrtsd(XMMRegister dst, XMMRegister src) { Assembler::sqrtsd(dst, src); } 1036 void sqrtsd(XMMRegister dst, Address src) { Assembler::sqrtsd(dst, src); } 1037 void sqrtsd(XMMRegister dst, AddressLiteral src); 1038 1039 void sqrtss(XMMRegister dst, XMMRegister src) { Assembler::sqrtss(dst, src); } 1040 void sqrtss(XMMRegister dst, Address src) { Assembler::sqrtss(dst, src); } 1041 void sqrtss(XMMRegister dst, AddressLiteral src); 1042 1043 void subsd(XMMRegister dst, XMMRegister src) { Assembler::subsd(dst, src); } 1044 void subsd(XMMRegister dst, Address src) { Assembler::subsd(dst, src); } 1045 void subsd(XMMRegister dst, AddressLiteral src); 1046 1047 void subss(XMMRegister dst, XMMRegister src) { Assembler::subss(dst, src); } 1048 void subss(XMMRegister dst, Address src) { Assembler::subss(dst, src); } 1049 void subss(XMMRegister dst, AddressLiteral src); 1050 1051 void ucomiss(XMMRegister dst, XMMRegister src) { Assembler::ucomiss(dst, src); } 1052 void ucomiss(XMMRegister dst, Address src) { Assembler::ucomiss(dst, src); } 1053 void ucomiss(XMMRegister dst, AddressLiteral src); 1054 1055 void ucomisd(XMMRegister dst, XMMRegister src) { Assembler::ucomisd(dst, src); } 1056 void ucomisd(XMMRegister dst, Address src) { Assembler::ucomisd(dst, src); } 1057 void ucomisd(XMMRegister dst, AddressLiteral src); 1058 1059 // Bitwise Logical XOR of Packed Double-Precision Floating-Point Values 1060 void xorpd(XMMRegister dst, XMMRegister src); 1061 void xorpd(XMMRegister dst, Address src) { Assembler::xorpd(dst, src); } 1062 void xorpd(XMMRegister dst, AddressLiteral src); 1063 1064 // Bitwise Logical XOR of Packed Single-Precision Floating-Point Values 1065 void xorps(XMMRegister dst, XMMRegister src); 1066 void xorps(XMMRegister dst, Address src) { Assembler::xorps(dst, src); } 1067 void xorps(XMMRegister dst, AddressLiteral src); 1068 1069 // Shuffle Bytes 1070 void pshufb(XMMRegister dst, XMMRegister src) { Assembler::pshufb(dst, src); } 1071 void pshufb(XMMRegister dst, Address src) { Assembler::pshufb(dst, src); } 1072 void pshufb(XMMRegister dst, AddressLiteral src); 1073 // AVX 3-operands instructions 1074 1075 void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddsd(dst, nds, src); } 1076 void vaddsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddsd(dst, nds, src); } 1077 void vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1078 1079 void vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddss(dst, nds, src); } 1080 void vaddss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddss(dst, nds, src); } 1081 void vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1082 1083 void vabsss(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len); 1084 void vabssd(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len); 1085 1086 void vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1087 void vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1088 1089 void vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1090 void vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1091 1092 void vpbroadcastw(XMMRegister dst, XMMRegister src); 1093 1094 void vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1095 void vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1096 1097 void vpmovzxbw(XMMRegister dst, Address src, int vector_len); 1098 void vpmovmskb(Register dst, XMMRegister src); 1099 1100 void vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1101 void vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1102 1103 void vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1104 void vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1105 1106 void vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1107 void vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1108 1109 void vpsraw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1110 void vpsraw(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1111 1112 void vpsrlw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1113 void vpsrlw(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1114 1115 void vpsllw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1116 void vpsllw(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1117 1118 void vptest(XMMRegister dst, XMMRegister src); 1119 1120 void punpcklbw(XMMRegister dst, XMMRegister src); 1121 void punpcklbw(XMMRegister dst, Address src) { Assembler::punpcklbw(dst, src); } 1122 1123 void pshuflw(XMMRegister dst, XMMRegister src, int mode); 1124 void pshuflw(XMMRegister dst, Address src, int mode) { Assembler::pshuflw(dst, src, mode); } 1125 1126 void vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); } 1127 void vandpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); } 1128 void vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len); 1129 1130 void vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); } 1131 void vandps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); } 1132 void vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len); 1133 1134 void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivsd(dst, nds, src); } 1135 void vdivsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivsd(dst, nds, src); } 1136 void vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1137 1138 void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivss(dst, nds, src); } 1139 void vdivss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivss(dst, nds, src); } 1140 void vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1141 1142 void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulsd(dst, nds, src); } 1143 void vmulsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulsd(dst, nds, src); } 1144 void vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1145 1146 void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulss(dst, nds, src); } 1147 void vmulss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulss(dst, nds, src); } 1148 void vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1149 1150 void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubsd(dst, nds, src); } 1151 void vsubsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubsd(dst, nds, src); } 1152 void vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1153 1154 void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubss(dst, nds, src); } 1155 void vsubss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubss(dst, nds, src); } 1156 void vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1157 1158 void vnegatess(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1159 void vnegatesd(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1160 1161 // AVX Vector instructions 1162 1163 void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); } 1164 void vxorpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); } 1165 void vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len); 1166 1167 void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); } 1168 void vxorps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); } 1169 void vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len); 1170 1171 void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 1172 if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2 1173 Assembler::vpxor(dst, nds, src, vector_len); 1174 else 1175 Assembler::vxorpd(dst, nds, src, vector_len); 1176 } 1177 void vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 1178 if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2 1179 Assembler::vpxor(dst, nds, src, vector_len); 1180 else 1181 Assembler::vxorpd(dst, nds, src, vector_len); 1182 } 1183 1184 // Simple version for AVX2 256bit vectors 1185 void vpxor(XMMRegister dst, XMMRegister src) { Assembler::vpxor(dst, dst, src, true); } 1186 void vpxor(XMMRegister dst, Address src) { Assembler::vpxor(dst, dst, src, true); } 1187 1188 // Move packed integer values from low 128 bit to hign 128 bit in 256 bit vector. 1189 void vinserti128h(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1190 if (UseAVX > 1) // vinserti128h is available only in AVX2 1191 Assembler::vinserti128h(dst, nds, src); 1192 else 1193 Assembler::vinsertf128h(dst, nds, src); 1194 } 1195 1196 // Carry-Less Multiplication Quadword 1197 void vpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1198 // 0x00 - multiply lower 64 bits [0:63] 1199 Assembler::vpclmulqdq(dst, nds, src, 0x00); 1200 } 1201 void vpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1202 // 0x11 - multiply upper 64 bits [64:127] 1203 Assembler::vpclmulqdq(dst, nds, src, 0x11); 1204 } 1205 1206 // Data 1207 1208 void cmov32( Condition cc, Register dst, Address src); 1209 void cmov32( Condition cc, Register dst, Register src); 1210 1211 void cmov( Condition cc, Register dst, Register src) { cmovptr(cc, dst, src); } 1212 1213 void cmovptr(Condition cc, Register dst, Address src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); } 1214 void cmovptr(Condition cc, Register dst, Register src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); } 1215 1216 void movoop(Register dst, jobject obj); 1217 void movoop(Address dst, jobject obj); 1218 1219 void mov_metadata(Register dst, Metadata* obj); 1220 void mov_metadata(Address dst, Metadata* obj); 1221 1222 void movptr(ArrayAddress dst, Register src); 1223 // can this do an lea? 1224 void movptr(Register dst, ArrayAddress src); 1225 1226 void movptr(Register dst, Address src); 1227 1228 #ifdef _LP64 1229 void movptr(Register dst, AddressLiteral src, Register scratch=rscratch1); 1230 #else 1231 void movptr(Register dst, AddressLiteral src, Register scratch=noreg); // Scratch reg is ignored in 32-bit 1232 #endif 1233 1234 void movptr(Register dst, intptr_t src); 1235 void movptr(Register dst, Register src); 1236 void movptr(Address dst, intptr_t src); 1237 1238 void movptr(Address dst, Register src); 1239 1240 void movptr(Register dst, RegisterOrConstant src) { 1241 if (src.is_constant()) movptr(dst, src.as_constant()); 1242 else movptr(dst, src.as_register()); 1243 } 1244 1245 #ifdef _LP64 1246 // Generally the next two are only used for moving NULL 1247 // Although there are situations in initializing the mark word where 1248 // they could be used. They are dangerous. 1249 1250 // They only exist on LP64 so that int32_t and intptr_t are not the same 1251 // and we have ambiguous declarations. 1252 1253 void movptr(Address dst, int32_t imm32); 1254 void movptr(Register dst, int32_t imm32); 1255 #endif // _LP64 1256 1257 // to avoid hiding movl 1258 void mov32(AddressLiteral dst, Register src); 1259 void mov32(Register dst, AddressLiteral src); 1260 1261 // to avoid hiding movb 1262 void movbyte(ArrayAddress dst, int src); 1263 1264 // Import other mov() methods from the parent class or else 1265 // they will be hidden by the following overriding declaration. 1266 using Assembler::movdl; 1267 using Assembler::movq; 1268 void movdl(XMMRegister dst, AddressLiteral src); 1269 void movq(XMMRegister dst, AddressLiteral src); 1270 1271 // Can push value or effective address 1272 void pushptr(AddressLiteral src); 1273 1274 void pushptr(Address src) { LP64_ONLY(pushq(src)) NOT_LP64(pushl(src)); } 1275 void popptr(Address src) { LP64_ONLY(popq(src)) NOT_LP64(popl(src)); } 1276 1277 void pushoop(jobject obj); 1278 void pushklass(Metadata* obj); 1279 1280 // sign extend as need a l to ptr sized element 1281 void movl2ptr(Register dst, Address src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(movl(dst, src)); } 1282 void movl2ptr(Register dst, Register src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(if (dst != src) movl(dst, src)); } 1283 1284 // C2 compiled method's prolog code. 1285 void verified_entry(int framesize, int stack_bang_size, bool fp_mode_24b); 1286 1287 // clear memory of size 'cnt' qwords, starting at 'base'. 1288 void clear_mem(Register base, Register cnt, Register rtmp); 1289 1290 #ifdef COMPILER2 1291 void string_indexof_char(Register str1, Register cnt1, Register ch, Register result, 1292 XMMRegister vec1, XMMRegister vec2, XMMRegister vec3, Register tmp); 1293 1294 // IndexOf strings. 1295 // Small strings are loaded through stack if they cross page boundary. 1296 void string_indexof(Register str1, Register str2, 1297 Register cnt1, Register cnt2, 1298 int int_cnt2, Register result, 1299 XMMRegister vec, Register tmp, 1300 int ae); 1301 1302 // IndexOf for constant substrings with size >= 8 elements 1303 // which don't need to be loaded through stack. 1304 void string_indexofC8(Register str1, Register str2, 1305 Register cnt1, Register cnt2, 1306 int int_cnt2, Register result, 1307 XMMRegister vec, Register tmp, 1308 int ae); 1309 1310 // Smallest code: we don't need to load through stack, 1311 // check string tail. 1312 1313 // helper function for string_compare 1314 void load_next_elements(Register elem1, Register elem2, Register str1, Register str2, 1315 Address::ScaleFactor scale, Address::ScaleFactor scale1, 1316 Address::ScaleFactor scale2, Register index, int ae); 1317 // Compare strings. 1318 void string_compare(Register str1, Register str2, 1319 Register cnt1, Register cnt2, Register result, 1320 XMMRegister vec1, int ae); 1321 1322 // Search for Non-ASCII character (Negative byte value) in a byte array, 1323 // return true if it has any and false otherwise. 1324 void has_negatives(Register ary1, Register len, 1325 Register result, Register tmp1, 1326 XMMRegister vec1, XMMRegister vec2); 1327 1328 // Compare char[] or byte[] arrays. 1329 void arrays_equals(bool is_array_equ, Register ary1, Register ary2, 1330 Register limit, Register result, Register chr, 1331 XMMRegister vec1, XMMRegister vec2, bool is_char); 1332 1333 #endif 1334 1335 // Fill primitive arrays 1336 void generate_fill(BasicType t, bool aligned, 1337 Register to, Register value, Register count, 1338 Register rtmp, XMMRegister xtmp); 1339 1340 void encode_iso_array(Register src, Register dst, Register len, 1341 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3, 1342 XMMRegister tmp4, Register tmp5, Register result); 1343 1344 #ifdef _LP64 1345 void add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2); 1346 void multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart, 1347 Register y, Register y_idx, Register z, 1348 Register carry, Register product, 1349 Register idx, Register kdx); 1350 void multiply_add_128_x_128(Register x_xstart, Register y, Register z, 1351 Register yz_idx, Register idx, 1352 Register carry, Register product, int offset); 1353 void multiply_128_x_128_bmi2_loop(Register y, Register z, 1354 Register carry, Register carry2, 1355 Register idx, Register jdx, 1356 Register yz_idx1, Register yz_idx2, 1357 Register tmp, Register tmp3, Register tmp4); 1358 void multiply_128_x_128_loop(Register x_xstart, Register y, Register z, 1359 Register yz_idx, Register idx, Register jdx, 1360 Register carry, Register product, 1361 Register carry2); 1362 void multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, Register zlen, 1363 Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5); 1364 void square_rshift(Register x, Register len, Register z, Register tmp1, Register tmp3, 1365 Register tmp4, Register tmp5, Register rdxReg, Register raxReg); 1366 void multiply_add_64_bmi2(Register sum, Register op1, Register op2, Register carry, 1367 Register tmp2); 1368 void multiply_add_64(Register sum, Register op1, Register op2, Register carry, 1369 Register rdxReg, Register raxReg); 1370 void add_one_64(Register z, Register zlen, Register carry, Register tmp1); 1371 void lshift_by_1(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, 1372 Register tmp3, Register tmp4); 1373 void square_to_len(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, 1374 Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg); 1375 1376 void mul_add_128_x_32_loop(Register out, Register in, Register offset, Register len, Register tmp1, 1377 Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, 1378 Register raxReg); 1379 void mul_add(Register out, Register in, Register offset, Register len, Register k, Register tmp1, 1380 Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, 1381 Register raxReg); 1382 void vectorized_mismatch(Register obja, Register objb, Register length, Register log2_array_indxscale, 1383 Register result, Register tmp1, Register tmp2, 1384 XMMRegister vec1, XMMRegister vec2, XMMRegister vec3); 1385 #endif 1386 1387 // CRC32 code for java.util.zip.CRC32::updateBytes() intrinsic. 1388 void update_byte_crc32(Register crc, Register val, Register table); 1389 void kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp); 1390 // CRC32C code for java.util.zip.CRC32C::updateBytes() intrinsic 1391 // Note on a naming convention: 1392 // Prefix w = register only used on a Westmere+ architecture 1393 // Prefix n = register only used on a Nehalem architecture 1394 #ifdef _LP64 1395 void crc32c_ipl_alg4(Register in_out, uint32_t n, 1396 Register tmp1, Register tmp2, Register tmp3); 1397 #else 1398 void crc32c_ipl_alg4(Register in_out, uint32_t n, 1399 Register tmp1, Register tmp2, Register tmp3, 1400 XMMRegister xtmp1, XMMRegister xtmp2); 1401 #endif 1402 void crc32c_pclmulqdq(XMMRegister w_xtmp1, 1403 Register in_out, 1404 uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported, 1405 XMMRegister w_xtmp2, 1406 Register tmp1, 1407 Register n_tmp2, Register n_tmp3); 1408 void crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2, 1409 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 1410 Register tmp1, Register tmp2, 1411 Register n_tmp3); 1412 void crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, 1413 Register in_out1, Register in_out2, Register in_out3, 1414 Register tmp1, Register tmp2, Register tmp3, 1415 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 1416 Register tmp4, Register tmp5, 1417 Register n_tmp6); 1418 void crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2, 1419 Register tmp1, Register tmp2, Register tmp3, 1420 Register tmp4, Register tmp5, Register tmp6, 1421 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 1422 bool is_pclmulqdq_supported); 1423 // Fold 128-bit data chunk 1424 void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset); 1425 void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf); 1426 // Fold 8-bit data 1427 void fold_8bit_crc32(Register crc, Register table, Register tmp); 1428 void fold_8bit_crc32(XMMRegister crc, Register table, XMMRegister xtmp, Register tmp); 1429 1430 // Compress char[] array to byte[]. 1431 void char_array_compress(Register src, Register dst, Register len, 1432 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3, 1433 XMMRegister tmp4, Register tmp5, Register result); 1434 1435 // Inflate byte[] array to char[]. 1436 void byte_array_inflate(Register src, Register dst, Register len, 1437 XMMRegister tmp1, Register tmp2); 1438 1439 }; 1440 1441 /** 1442 * class SkipIfEqual: 1443 * 1444 * Instantiating this class will result in assembly code being output that will 1445 * jump around any code emitted between the creation of the instance and it's 1446 * automatic destruction at the end of a scope block, depending on the value of 1447 * the flag passed to the constructor, which will be checked at run-time. 1448 */ 1449 class SkipIfEqual { 1450 private: 1451 MacroAssembler* _masm; 1452 Label _label; 1453 1454 public: 1455 SkipIfEqual(MacroAssembler*, const bool* flag_addr, bool value); 1456 ~SkipIfEqual(); 1457 }; 1458 1459 #endif // CPU_X86_VM_MACROASSEMBLER_X86_HPP