1 /* 2 * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef CPU_X86_VM_MACROASSEMBLER_X86_HPP 26 #define CPU_X86_VM_MACROASSEMBLER_X86_HPP 27 28 #include "asm/assembler.hpp" 29 #include "utilities/macros.hpp" 30 #include "runtime/rtmLocking.hpp" 31 32 // MacroAssembler extends Assembler by frequently used macros. 33 // 34 // Instructions for which a 'better' code sequence exists depending 35 // on arguments should also go in here. 36 37 class MacroAssembler: public Assembler { 38 friend class LIR_Assembler; 39 friend class Runtime1; // as_Address() 40 41 protected: 42 43 Address as_Address(AddressLiteral adr); 44 Address as_Address(ArrayAddress adr); 45 46 // Support for VM calls 47 // 48 // This is the base routine called by the different versions of call_VM_leaf. The interpreter 49 // may customize this version by overriding it for its purposes (e.g., to save/restore 50 // additional registers when doing a VM call). 51 52 virtual void call_VM_leaf_base( 53 address entry_point, // the entry point 54 int number_of_arguments // the number of arguments to pop after the call 55 ); 56 57 // This is the base routine called by the different versions of call_VM. The interpreter 58 // may customize this version by overriding it for its purposes (e.g., to save/restore 59 // additional registers when doing a VM call). 60 // 61 // If no java_thread register is specified (noreg) than rdi will be used instead. call_VM_base 62 // returns the register which contains the thread upon return. If a thread register has been 63 // specified, the return value will correspond to that register. If no last_java_sp is specified 64 // (noreg) than rsp will be used instead. 65 virtual void call_VM_base( // returns the register containing the thread upon return 66 Register oop_result, // where an oop-result ends up if any; use noreg otherwise 67 Register java_thread, // the thread if computed before ; use noreg otherwise 68 Register last_java_sp, // to set up last_Java_frame in stubs; use noreg otherwise 69 address entry_point, // the entry point 70 int number_of_arguments, // the number of arguments (w/o thread) to pop after the call 71 bool check_exceptions // whether to check for pending exceptions after return 72 ); 73 74 // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code. 75 // The implementation is only non-empty for the InterpreterMacroAssembler, 76 // as only the interpreter handles PopFrame and ForceEarlyReturn requests. 77 virtual void check_and_handle_popframe(Register java_thread); 78 virtual void check_and_handle_earlyret(Register java_thread); 79 80 void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true); 81 82 // helpers for FPU flag access 83 // tmp is a temporary register, if none is available use noreg 84 void save_rax (Register tmp); 85 void restore_rax(Register tmp); 86 87 public: 88 MacroAssembler(CodeBuffer* code) : Assembler(code) {} 89 90 // Support for NULL-checks 91 // 92 // Generates code that causes a NULL OS exception if the content of reg is NULL. 93 // If the accessed location is M[reg + offset] and the offset is known, provide the 94 // offset. No explicit code generation is needed if the offset is within a certain 95 // range (0 <= offset <= page_size). 96 97 void null_check(Register reg, int offset = -1); 98 static bool needs_explicit_null_check(intptr_t offset); 99 100 // Required platform-specific helpers for Label::patch_instructions. 101 // They _shadow_ the declarations in AbstractAssembler, which are undefined. 102 void pd_patch_instruction(address branch, address target) { 103 unsigned char op = branch[0]; 104 assert(op == 0xE8 /* call */ || 105 op == 0xE9 /* jmp */ || 106 op == 0xEB /* short jmp */ || 107 (op & 0xF0) == 0x70 /* short jcc */ || 108 op == 0x0F && (branch[1] & 0xF0) == 0x80 /* jcc */ || 109 op == 0xC7 && branch[1] == 0xF8 /* xbegin */, 110 "Invalid opcode at patch point"); 111 112 if (op == 0xEB || (op & 0xF0) == 0x70) { 113 // short offset operators (jmp and jcc) 114 char* disp = (char*) &branch[1]; 115 int imm8 = target - (address) &disp[1]; 116 guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset"); 117 *disp = imm8; 118 } else { 119 int* disp = (int*) &branch[(op == 0x0F || op == 0xC7)? 2: 1]; 120 int imm32 = target - (address) &disp[1]; 121 *disp = imm32; 122 } 123 } 124 125 // The following 4 methods return the offset of the appropriate move instruction 126 127 // Support for fast byte/short loading with zero extension (depending on particular CPU) 128 int load_unsigned_byte(Register dst, Address src); 129 int load_unsigned_short(Register dst, Address src); 130 131 // Support for fast byte/short loading with sign extension (depending on particular CPU) 132 int load_signed_byte(Register dst, Address src); 133 int load_signed_short(Register dst, Address src); 134 135 // Support for sign-extension (hi:lo = extend_sign(lo)) 136 void extend_sign(Register hi, Register lo); 137 138 // Load and store values by size and signed-ness 139 void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2 = noreg); 140 void store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2 = noreg); 141 142 // Support for inc/dec with optimal instruction selection depending on value 143 144 void increment(Register reg, int value = 1) { LP64_ONLY(incrementq(reg, value)) NOT_LP64(incrementl(reg, value)) ; } 145 void decrement(Register reg, int value = 1) { LP64_ONLY(decrementq(reg, value)) NOT_LP64(decrementl(reg, value)) ; } 146 147 void decrementl(Address dst, int value = 1); 148 void decrementl(Register reg, int value = 1); 149 150 void decrementq(Register reg, int value = 1); 151 void decrementq(Address dst, int value = 1); 152 153 void incrementl(Address dst, int value = 1); 154 void incrementl(Register reg, int value = 1); 155 156 void incrementq(Register reg, int value = 1); 157 void incrementq(Address dst, int value = 1); 158 159 // special instructions for EVEX 160 void setvectmask(Register dst, Register src); 161 void restorevectmask(); 162 163 // Support optimal SSE move instructions. 164 void movflt(XMMRegister dst, XMMRegister src) { 165 if (UseXmmRegToRegMoveAll) { movaps(dst, src); return; } 166 else { movss (dst, src); return; } 167 } 168 void movflt(XMMRegister dst, Address src) { movss(dst, src); } 169 void movflt(XMMRegister dst, AddressLiteral src); 170 void movflt(Address dst, XMMRegister src) { movss(dst, src); } 171 172 void movdbl(XMMRegister dst, XMMRegister src) { 173 if (UseXmmRegToRegMoveAll) { movapd(dst, src); return; } 174 else { movsd (dst, src); return; } 175 } 176 177 void movdbl(XMMRegister dst, AddressLiteral src); 178 179 void movdbl(XMMRegister dst, Address src) { 180 if (UseXmmLoadAndClearUpper) { movsd (dst, src); return; } 181 else { movlpd(dst, src); return; } 182 } 183 void movdbl(Address dst, XMMRegister src) { movsd(dst, src); } 184 185 void incrementl(AddressLiteral dst); 186 void incrementl(ArrayAddress dst); 187 188 void incrementq(AddressLiteral dst); 189 190 // Alignment 191 void align(int modulus); 192 void align(int modulus, int target); 193 194 // A 5 byte nop that is safe for patching (see patch_verified_entry) 195 void fat_nop(); 196 197 // Stack frame creation/removal 198 void enter(); 199 void leave(); 200 201 // Support for getting the JavaThread pointer (i.e.; a reference to thread-local information) 202 // The pointer will be loaded into the thread register. 203 void get_thread(Register thread); 204 205 206 // Support for VM calls 207 // 208 // It is imperative that all calls into the VM are handled via the call_VM macros. 209 // They make sure that the stack linkage is setup correctly. call_VM's correspond 210 // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points. 211 212 213 void call_VM(Register oop_result, 214 address entry_point, 215 bool check_exceptions = true); 216 void call_VM(Register oop_result, 217 address entry_point, 218 Register arg_1, 219 bool check_exceptions = true); 220 void call_VM(Register oop_result, 221 address entry_point, 222 Register arg_1, Register arg_2, 223 bool check_exceptions = true); 224 void call_VM(Register oop_result, 225 address entry_point, 226 Register arg_1, Register arg_2, Register arg_3, 227 bool check_exceptions = true); 228 229 // Overloadings with last_Java_sp 230 void call_VM(Register oop_result, 231 Register last_java_sp, 232 address entry_point, 233 int number_of_arguments = 0, 234 bool check_exceptions = true); 235 void call_VM(Register oop_result, 236 Register last_java_sp, 237 address entry_point, 238 Register arg_1, bool 239 check_exceptions = true); 240 void call_VM(Register oop_result, 241 Register last_java_sp, 242 address entry_point, 243 Register arg_1, Register arg_2, 244 bool check_exceptions = true); 245 void call_VM(Register oop_result, 246 Register last_java_sp, 247 address entry_point, 248 Register arg_1, Register arg_2, Register arg_3, 249 bool check_exceptions = true); 250 251 void get_vm_result (Register oop_result, Register thread); 252 void get_vm_result_2(Register metadata_result, Register thread); 253 254 // These always tightly bind to MacroAssembler::call_VM_base 255 // bypassing the virtual implementation 256 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true); 257 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true); 258 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true); 259 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true); 260 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4, bool check_exceptions = true); 261 262 void call_VM_leaf0(address entry_point); 263 void call_VM_leaf(address entry_point, 264 int number_of_arguments = 0); 265 void call_VM_leaf(address entry_point, 266 Register arg_1); 267 void call_VM_leaf(address entry_point, 268 Register arg_1, Register arg_2); 269 void call_VM_leaf(address entry_point, 270 Register arg_1, Register arg_2, Register arg_3); 271 272 // These always tightly bind to MacroAssembler::call_VM_leaf_base 273 // bypassing the virtual implementation 274 void super_call_VM_leaf(address entry_point); 275 void super_call_VM_leaf(address entry_point, Register arg_1); 276 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2); 277 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3); 278 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4); 279 280 // last Java Frame (fills frame anchor) 281 void set_last_Java_frame(Register thread, 282 Register last_java_sp, 283 Register last_java_fp, 284 address last_java_pc); 285 286 // thread in the default location (r15_thread on 64bit) 287 void set_last_Java_frame(Register last_java_sp, 288 Register last_java_fp, 289 address last_java_pc); 290 291 void reset_last_Java_frame(Register thread, bool clear_fp); 292 293 // thread in the default location (r15_thread on 64bit) 294 void reset_last_Java_frame(bool clear_fp); 295 296 // Stores 297 void store_check(Register obj); // store check for obj - register is destroyed afterwards 298 void store_check(Register obj, Address dst); // same as above, dst is exact store location (reg. is destroyed) 299 300 #if INCLUDE_ALL_GCS 301 302 void g1_write_barrier_pre(Register obj, 303 Register pre_val, 304 Register thread, 305 Register tmp, 306 bool tosca_live, 307 bool expand_call); 308 309 void g1_write_barrier_post(Register store_addr, 310 Register new_val, 311 Register thread, 312 Register tmp, 313 Register tmp2); 314 315 #endif // INCLUDE_ALL_GCS 316 317 // C 'boolean' to Java boolean: x == 0 ? 0 : 1 318 void c2bool(Register x); 319 320 // C++ bool manipulation 321 322 void movbool(Register dst, Address src); 323 void movbool(Address dst, bool boolconst); 324 void movbool(Address dst, Register src); 325 void testbool(Register dst); 326 327 void load_mirror(Register mirror, Register method); 328 329 // oop manipulations 330 void load_klass(Register dst, Register src); 331 void store_klass(Register dst, Register src); 332 333 void load_heap_oop(Register dst, Address src); 334 void load_heap_oop_not_null(Register dst, Address src); 335 void store_heap_oop(Address dst, Register src); 336 void cmp_heap_oop(Register src1, Address src2, Register tmp = noreg); 337 338 // Used for storing NULL. All other oop constants should be 339 // stored using routines that take a jobject. 340 void store_heap_oop_null(Address dst); 341 342 void load_prototype_header(Register dst, Register src); 343 344 #ifdef _LP64 345 void store_klass_gap(Register dst, Register src); 346 347 // This dummy is to prevent a call to store_heap_oop from 348 // converting a zero (like NULL) into a Register by giving 349 // the compiler two choices it can't resolve 350 351 void store_heap_oop(Address dst, void* dummy); 352 353 void encode_heap_oop(Register r); 354 void decode_heap_oop(Register r); 355 void encode_heap_oop_not_null(Register r); 356 void decode_heap_oop_not_null(Register r); 357 void encode_heap_oop_not_null(Register dst, Register src); 358 void decode_heap_oop_not_null(Register dst, Register src); 359 360 void set_narrow_oop(Register dst, jobject obj); 361 void set_narrow_oop(Address dst, jobject obj); 362 void cmp_narrow_oop(Register dst, jobject obj); 363 void cmp_narrow_oop(Address dst, jobject obj); 364 365 void encode_klass_not_null(Register r); 366 void decode_klass_not_null(Register r); 367 void encode_klass_not_null(Register dst, Register src); 368 void decode_klass_not_null(Register dst, Register src); 369 void set_narrow_klass(Register dst, Klass* k); 370 void set_narrow_klass(Address dst, Klass* k); 371 void cmp_narrow_klass(Register dst, Klass* k); 372 void cmp_narrow_klass(Address dst, Klass* k); 373 374 // Returns the byte size of the instructions generated by decode_klass_not_null() 375 // when compressed klass pointers are being used. 376 static int instr_size_for_decode_klass_not_null(); 377 378 // if heap base register is used - reinit it with the correct value 379 void reinit_heapbase(); 380 381 DEBUG_ONLY(void verify_heapbase(const char* msg);) 382 383 #endif // _LP64 384 385 // Int division/remainder for Java 386 // (as idivl, but checks for special case as described in JVM spec.) 387 // returns idivl instruction offset for implicit exception handling 388 int corrected_idivl(Register reg); 389 390 // Long division/remainder for Java 391 // (as idivq, but checks for special case as described in JVM spec.) 392 // returns idivq instruction offset for implicit exception handling 393 int corrected_idivq(Register reg); 394 395 void int3(); 396 397 // Long operation macros for a 32bit cpu 398 // Long negation for Java 399 void lneg(Register hi, Register lo); 400 401 // Long multiplication for Java 402 // (destroys contents of eax, ebx, ecx and edx) 403 void lmul(int x_rsp_offset, int y_rsp_offset); // rdx:rax = x * y 404 405 // Long shifts for Java 406 // (semantics as described in JVM spec.) 407 void lshl(Register hi, Register lo); // hi:lo << (rcx & 0x3f) 408 void lshr(Register hi, Register lo, bool sign_extension = false); // hi:lo >> (rcx & 0x3f) 409 410 // Long compare for Java 411 // (semantics as described in JVM spec.) 412 void lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo); // x_hi = lcmp(x, y) 413 414 415 // misc 416 417 // Sign extension 418 void sign_extend_short(Register reg); 419 void sign_extend_byte(Register reg); 420 421 // Division by power of 2, rounding towards 0 422 void division_with_shift(Register reg, int shift_value); 423 424 // Compares the top-most stack entries on the FPU stack and sets the eflags as follows: 425 // 426 // CF (corresponds to C0) if x < y 427 // PF (corresponds to C2) if unordered 428 // ZF (corresponds to C3) if x = y 429 // 430 // The arguments are in reversed order on the stack (i.e., top of stack is first argument). 431 // tmp is a temporary register, if none is available use noreg (only matters for non-P6 code) 432 void fcmp(Register tmp); 433 // Variant of the above which allows y to be further down the stack 434 // and which only pops x and y if specified. If pop_right is 435 // specified then pop_left must also be specified. 436 void fcmp(Register tmp, int index, bool pop_left, bool pop_right); 437 438 // Floating-point comparison for Java 439 // Compares the top-most stack entries on the FPU stack and stores the result in dst. 440 // The arguments are in reversed order on the stack (i.e., top of stack is first argument). 441 // (semantics as described in JVM spec.) 442 void fcmp2int(Register dst, bool unordered_is_less); 443 // Variant of the above which allows y to be further down the stack 444 // and which only pops x and y if specified. If pop_right is 445 // specified then pop_left must also be specified. 446 void fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right); 447 448 // Floating-point remainder for Java (ST0 = ST0 fremr ST1, ST1 is empty afterwards) 449 // tmp is a temporary register, if none is available use noreg 450 void fremr(Register tmp); 451 452 453 // same as fcmp2int, but using SSE2 454 void cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less); 455 void cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less); 456 457 // branch to L if FPU flag C2 is set/not set 458 // tmp is a temporary register, if none is available use noreg 459 void jC2 (Register tmp, Label& L); 460 void jnC2(Register tmp, Label& L); 461 462 // Pop ST (ffree & fincstp combined) 463 void fpop(); 464 465 // Load float value from 'address'. If UseSSE >= 1, the value is loaded into 466 // register xmm0. Otherwise, the value is loaded onto the FPU stack. 467 void load_float(Address src); 468 469 // Store float value to 'address'. If UseSSE >= 1, the value is stored 470 // from register xmm0. Otherwise, the value is stored from the FPU stack. 471 void store_float(Address dst); 472 473 // Load double value from 'address'. If UseSSE >= 2, the value is loaded into 474 // register xmm0. Otherwise, the value is loaded onto the FPU stack. 475 void load_double(Address src); 476 477 // Store double value to 'address'. If UseSSE >= 2, the value is stored 478 // from register xmm0. Otherwise, the value is stored from the FPU stack. 479 void store_double(Address dst); 480 481 // pushes double TOS element of FPU stack on CPU stack; pops from FPU stack 482 void push_fTOS(); 483 484 // pops double TOS element from CPU stack and pushes on FPU stack 485 void pop_fTOS(); 486 487 void empty_FPU_stack(); 488 489 void push_IU_state(); 490 void pop_IU_state(); 491 492 void push_FPU_state(); 493 void pop_FPU_state(); 494 495 void push_CPU_state(); 496 void pop_CPU_state(); 497 498 // Round up to a power of two 499 void round_to(Register reg, int modulus); 500 501 // Callee saved registers handling 502 void push_callee_saved_registers(); 503 void pop_callee_saved_registers(); 504 505 // allocation 506 void eden_allocate( 507 Register obj, // result: pointer to object after successful allocation 508 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise 509 int con_size_in_bytes, // object size in bytes if known at compile time 510 Register t1, // temp register 511 Label& slow_case // continuation point if fast allocation fails 512 ); 513 void tlab_allocate( 514 Register obj, // result: pointer to object after successful allocation 515 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise 516 int con_size_in_bytes, // object size in bytes if known at compile time 517 Register t1, // temp register 518 Register t2, // temp register 519 Label& slow_case // continuation point if fast allocation fails 520 ); 521 Register tlab_refill(Label& retry_tlab, Label& try_eden, Label& slow_case); // returns TLS address 522 void zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp); 523 524 void incr_allocated_bytes(Register thread, 525 Register var_size_in_bytes, int con_size_in_bytes, 526 Register t1 = noreg); 527 528 // interface method calling 529 void lookup_interface_method(Register recv_klass, 530 Register intf_klass, 531 RegisterOrConstant itable_index, 532 Register method_result, 533 Register scan_temp, 534 Label& no_such_interface); 535 536 // virtual method calling 537 void lookup_virtual_method(Register recv_klass, 538 RegisterOrConstant vtable_index, 539 Register method_result); 540 541 // Test sub_klass against super_klass, with fast and slow paths. 542 543 // The fast path produces a tri-state answer: yes / no / maybe-slow. 544 // One of the three labels can be NULL, meaning take the fall-through. 545 // If super_check_offset is -1, the value is loaded up from super_klass. 546 // No registers are killed, except temp_reg. 547 void check_klass_subtype_fast_path(Register sub_klass, 548 Register super_klass, 549 Register temp_reg, 550 Label* L_success, 551 Label* L_failure, 552 Label* L_slow_path, 553 RegisterOrConstant super_check_offset = RegisterOrConstant(-1)); 554 555 // The rest of the type check; must be wired to a corresponding fast path. 556 // It does not repeat the fast path logic, so don't use it standalone. 557 // The temp_reg and temp2_reg can be noreg, if no temps are available. 558 // Updates the sub's secondary super cache as necessary. 559 // If set_cond_codes, condition codes will be Z on success, NZ on failure. 560 void check_klass_subtype_slow_path(Register sub_klass, 561 Register super_klass, 562 Register temp_reg, 563 Register temp2_reg, 564 Label* L_success, 565 Label* L_failure, 566 bool set_cond_codes = false); 567 568 // Simplified, combined version, good for typical uses. 569 // Falls through on failure. 570 void check_klass_subtype(Register sub_klass, 571 Register super_klass, 572 Register temp_reg, 573 Label& L_success); 574 575 // method handles (JSR 292) 576 Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0); 577 578 //---- 579 void set_word_if_not_zero(Register reg); // sets reg to 1 if not zero, otherwise 0 580 581 // Debugging 582 583 // only if +VerifyOops 584 // TODO: Make these macros with file and line like sparc version! 585 void verify_oop(Register reg, const char* s = "broken oop"); 586 void verify_oop_addr(Address addr, const char * s = "broken oop addr"); 587 588 void in_heap_check(Register raddr, Label& done); 589 void shenandoah_cset_check(Register raddr, Register tmp1, Register tmp2, Label& done); 590 591 void _shenandoah_store_addr_check(Register dst, const char* msg, const char* file, int line); 592 void _shenandoah_store_addr_check(Address dst, const char* msg, const char* file, int line); 593 #define shenandoah_store_addr_check(reg) _shenandoah_store_addr_check(reg, "oop not safe for writing", __FILE__, __LINE__) 594 595 void _shenandoah_store_check(Address addr, Register value, const char* msg, const char* file, int line); 596 void _shenandoah_store_check(Register addr, Register value, const char* msg, const char* file, int line); 597 #define shenandoah_store_check(addr, value) _shenandoah_store_check(addr, value, "oop not safe for writing", __FILE__, __LINE__) 598 599 void _shenandoah_lock_check(Register dst, const char* msg, const char* file, int line); 600 #define shenandoah_lock_check(reg) _shenandoah_lock_check(reg, "lock/oop not safe for writing", __FILE__, __LINE__) 601 602 // TODO: verify method and klass metadata (compare against vptr?) 603 void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {} 604 void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line){} 605 606 #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__) 607 #define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__) 608 609 // only if +VerifyFPU 610 void verify_FPU(int stack_depth, const char* s = "illegal FPU state"); 611 612 // Verify or restore cpu control state after JNI call 613 void restore_cpu_control_state_after_jni(); 614 615 // prints msg, dumps registers and stops execution 616 void stop(const char* msg); 617 618 // prints msg and continues 619 void warn(const char* msg); 620 621 // dumps registers and other state 622 void print_state(); 623 624 static void debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg); 625 static void debug64(char* msg, int64_t pc, int64_t regs[]); 626 static void print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip); 627 static void print_state64(int64_t pc, int64_t regs[]); 628 629 void os_breakpoint(); 630 631 void untested() { stop("untested"); } 632 633 void unimplemented(const char* what = "") { char* b = new char[1024]; jio_snprintf(b, 1024, "unimplemented: %s", what); stop(b); } 634 635 void should_not_reach_here() { stop("should not reach here"); } 636 637 void print_CPU_state(); 638 639 // Stack overflow checking 640 void bang_stack_with_offset(int offset) { 641 // stack grows down, caller passes positive offset 642 assert(offset > 0, "must bang with negative offset"); 643 movl(Address(rsp, (-offset)), rax); 644 } 645 646 // Writes to stack successive pages until offset reached to check for 647 // stack overflow + shadow pages. Also, clobbers tmp 648 void bang_stack_size(Register size, Register tmp); 649 650 // Check for reserved stack access in method being exited (for JIT) 651 void reserved_stack_check(); 652 653 virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr, 654 Register tmp, 655 int offset); 656 657 // Support for serializing memory accesses between threads 658 void serialize_memory(Register thread, Register tmp); 659 660 void verify_tlab(); 661 662 // Biased locking support 663 // lock_reg and obj_reg must be loaded up with the appropriate values. 664 // swap_reg must be rax, and is killed. 665 // tmp_reg is optional. If it is supplied (i.e., != noreg) it will 666 // be killed; if not supplied, push/pop will be used internally to 667 // allocate a temporary (inefficient, avoid if possible). 668 // Optional slow case is for implementations (interpreter and C1) which branch to 669 // slow case directly. Leaves condition codes set for C2's Fast_Lock node. 670 // Returns offset of first potentially-faulting instruction for null 671 // check info (currently consumed only by C1). If 672 // swap_reg_contains_mark is true then returns -1 as it is assumed 673 // the calling code has already passed any potential faults. 674 int biased_locking_enter(Register lock_reg, Register obj_reg, 675 Register swap_reg, Register tmp_reg, 676 bool swap_reg_contains_mark, 677 Label& done, Label* slow_case = NULL, 678 BiasedLockingCounters* counters = NULL); 679 void biased_locking_exit (Register obj_reg, Register temp_reg, Label& done); 680 #ifdef COMPILER2 681 // Code used by cmpFastLock and cmpFastUnlock mach instructions in .ad file. 682 // See full desription in macroAssembler_x86.cpp. 683 void fast_lock(Register obj, Register box, Register tmp, 684 Register scr, Register cx1, Register cx2, 685 BiasedLockingCounters* counters, 686 RTMLockingCounters* rtm_counters, 687 RTMLockingCounters* stack_rtm_counters, 688 Metadata* method_data, 689 bool use_rtm, bool profile_rtm); 690 void fast_unlock(Register obj, Register box, Register tmp, bool use_rtm); 691 #if INCLUDE_RTM_OPT 692 void rtm_counters_update(Register abort_status, Register rtm_counters); 693 void branch_on_random_using_rdtsc(Register tmp, Register scr, int count, Label& brLabel); 694 void rtm_abort_ratio_calculation(Register tmp, Register rtm_counters_reg, 695 RTMLockingCounters* rtm_counters, 696 Metadata* method_data); 697 void rtm_profiling(Register abort_status_Reg, Register rtm_counters_Reg, 698 RTMLockingCounters* rtm_counters, Metadata* method_data, bool profile_rtm); 699 void rtm_retry_lock_on_abort(Register retry_count, Register abort_status, Label& retryLabel); 700 void rtm_retry_lock_on_busy(Register retry_count, Register box, Register tmp, Register scr, Label& retryLabel); 701 void rtm_stack_locking(Register obj, Register tmp, Register scr, 702 Register retry_on_abort_count, 703 RTMLockingCounters* stack_rtm_counters, 704 Metadata* method_data, bool profile_rtm, 705 Label& DONE_LABEL, Label& IsInflated); 706 void rtm_inflated_locking(Register obj, Register box, Register tmp, 707 Register scr, Register retry_on_busy_count, 708 Register retry_on_abort_count, 709 RTMLockingCounters* rtm_counters, 710 Metadata* method_data, bool profile_rtm, 711 Label& DONE_LABEL); 712 #endif 713 #endif 714 715 Condition negate_condition(Condition cond); 716 717 // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit 718 // operands. In general the names are modified to avoid hiding the instruction in Assembler 719 // so that we don't need to implement all the varieties in the Assembler with trivial wrappers 720 // here in MacroAssembler. The major exception to this rule is call 721 722 // Arithmetics 723 724 725 void addptr(Address dst, int32_t src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)) ; } 726 void addptr(Address dst, Register src); 727 728 void addptr(Register dst, Address src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); } 729 void addptr(Register dst, int32_t src); 730 void addptr(Register dst, Register src); 731 void addptr(Register dst, RegisterOrConstant src) { 732 if (src.is_constant()) addptr(dst, (int) src.as_constant()); 733 else addptr(dst, src.as_register()); 734 } 735 736 void andptr(Register dst, int32_t src); 737 void andptr(Register src1, Register src2) { LP64_ONLY(andq(src1, src2)) NOT_LP64(andl(src1, src2)) ; } 738 739 void cmp8(AddressLiteral src1, int imm); 740 741 // renamed to drag out the casting of address to int32_t/intptr_t 742 void cmp32(Register src1, int32_t imm); 743 744 void cmp32(AddressLiteral src1, int32_t imm); 745 // compare reg - mem, or reg - &mem 746 void cmp32(Register src1, AddressLiteral src2); 747 748 void cmp32(Register src1, Address src2); 749 750 #ifndef _LP64 751 void cmpklass(Address dst, Metadata* obj); 752 void cmpklass(Register dst, Metadata* obj); 753 void cmpoop(Address dst, jobject obj); 754 void cmpoop(Register dst, jobject obj); 755 #endif // _LP64 756 757 // NOTE src2 must be the lval. This is NOT an mem-mem compare 758 void cmpptr(Address src1, AddressLiteral src2); 759 760 void cmpptr(Register src1, AddressLiteral src2); 761 762 void cmpptr(Register src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 763 void cmpptr(Register src1, Address src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 764 // void cmpptr(Address src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 765 766 void cmpptr(Register src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 767 void cmpptr(Address src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 768 769 // cmp64 to avoild hiding cmpq 770 void cmp64(Register src1, AddressLiteral src); 771 772 void cmpxchgptr(Register reg, Address adr); 773 774 // Special Shenandoah CAS implementation that handles false negatives 775 // due to concurrent evacuation. 776 void cmpxchg_oop_shenandoah(Register res, Address addr, Register oldval, Register newval, 777 bool exchange, 778 Register tmp1, Register tmp2); 779 780 void locked_cmpxchgptr(Register reg, AddressLiteral adr); 781 782 783 void imulptr(Register dst, Register src) { LP64_ONLY(imulq(dst, src)) NOT_LP64(imull(dst, src)); } 784 void imulptr(Register dst, Register src, int imm32) { LP64_ONLY(imulq(dst, src, imm32)) NOT_LP64(imull(dst, src, imm32)); } 785 786 787 void negptr(Register dst) { LP64_ONLY(negq(dst)) NOT_LP64(negl(dst)); } 788 789 void notptr(Register dst) { LP64_ONLY(notq(dst)) NOT_LP64(notl(dst)); } 790 791 void shlptr(Register dst, int32_t shift); 792 void shlptr(Register dst) { LP64_ONLY(shlq(dst)) NOT_LP64(shll(dst)); } 793 794 void shrptr(Register dst, int32_t shift); 795 void shrptr(Register dst) { LP64_ONLY(shrq(dst)) NOT_LP64(shrl(dst)); } 796 797 void sarptr(Register dst) { LP64_ONLY(sarq(dst)) NOT_LP64(sarl(dst)); } 798 void sarptr(Register dst, int32_t src) { LP64_ONLY(sarq(dst, src)) NOT_LP64(sarl(dst, src)); } 799 800 void subptr(Address dst, int32_t src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); } 801 802 void subptr(Register dst, Address src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); } 803 void subptr(Register dst, int32_t src); 804 // Force generation of a 4 byte immediate value even if it fits into 8bit 805 void subptr_imm32(Register dst, int32_t src); 806 void subptr(Register dst, Register src); 807 void subptr(Register dst, RegisterOrConstant src) { 808 if (src.is_constant()) subptr(dst, (int) src.as_constant()); 809 else subptr(dst, src.as_register()); 810 } 811 812 void sbbptr(Address dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); } 813 void sbbptr(Register dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); } 814 815 void xchgptr(Register src1, Register src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; } 816 void xchgptr(Register src1, Address src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; } 817 818 void xaddptr(Address src1, Register src2) { LP64_ONLY(xaddq(src1, src2)) NOT_LP64(xaddl(src1, src2)) ; } 819 820 821 822 // Helper functions for statistics gathering. 823 // Conditionally (atomically, on MPs) increments passed counter address, preserving condition codes. 824 void cond_inc32(Condition cond, AddressLiteral counter_addr); 825 // Unconditional atomic increment. 826 void atomic_incl(Address counter_addr); 827 void atomic_incl(AddressLiteral counter_addr, Register scr = rscratch1); 828 #ifdef _LP64 829 void atomic_incq(Address counter_addr); 830 void atomic_incq(AddressLiteral counter_addr, Register scr = rscratch1); 831 #endif 832 void atomic_incptr(AddressLiteral counter_addr, Register scr = rscratch1) { LP64_ONLY(atomic_incq(counter_addr, scr)) NOT_LP64(atomic_incl(counter_addr, scr)) ; } 833 void atomic_incptr(Address counter_addr) { LP64_ONLY(atomic_incq(counter_addr)) NOT_LP64(atomic_incl(counter_addr)) ; } 834 835 void lea(Register dst, AddressLiteral adr); 836 void lea(Address dst, AddressLiteral adr); 837 void lea(Register dst, Address adr) { Assembler::lea(dst, adr); } 838 839 void leal32(Register dst, Address src) { leal(dst, src); } 840 841 // Import other testl() methods from the parent class or else 842 // they will be hidden by the following overriding declaration. 843 using Assembler::testl; 844 void testl(Register dst, AddressLiteral src); 845 846 void orptr(Register dst, Address src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } 847 void orptr(Register dst, Register src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } 848 void orptr(Register dst, int32_t src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } 849 void orptr(Address dst, int32_t imm32) { LP64_ONLY(orq(dst, imm32)) NOT_LP64(orl(dst, imm32)); } 850 851 void testptr(Register src, int32_t imm32) { LP64_ONLY(testq(src, imm32)) NOT_LP64(testl(src, imm32)); } 852 void testptr(Register src1, Register src2); 853 854 void xorptr(Register dst, Register src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); } 855 void xorptr(Register dst, Address src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); } 856 857 // Calls 858 859 void call(Label& L, relocInfo::relocType rtype); 860 void call(Register entry); 861 862 // NOTE: this call transfers to the effective address of entry NOT 863 // the address contained by entry. This is because this is more natural 864 // for jumps/calls. 865 void call(AddressLiteral entry); 866 867 // Emit the CompiledIC call idiom 868 void ic_call(address entry, jint method_index = 0); 869 870 // Jumps 871 872 // NOTE: these jumps tranfer to the effective address of dst NOT 873 // the address contained by dst. This is because this is more natural 874 // for jumps/calls. 875 void jump(AddressLiteral dst); 876 void jump_cc(Condition cc, AddressLiteral dst); 877 878 // 32bit can do a case table jump in one instruction but we no longer allow the base 879 // to be installed in the Address class. This jump will tranfers to the address 880 // contained in the location described by entry (not the address of entry) 881 void jump(ArrayAddress entry); 882 883 // Floating 884 885 void andpd(XMMRegister dst, Address src) { Assembler::andpd(dst, src); } 886 void andpd(XMMRegister dst, AddressLiteral src); 887 void andpd(XMMRegister dst, XMMRegister src) { Assembler::andpd(dst, src); } 888 889 void andps(XMMRegister dst, XMMRegister src) { Assembler::andps(dst, src); } 890 void andps(XMMRegister dst, Address src) { Assembler::andps(dst, src); } 891 void andps(XMMRegister dst, AddressLiteral src); 892 893 void comiss(XMMRegister dst, XMMRegister src) { Assembler::comiss(dst, src); } 894 void comiss(XMMRegister dst, Address src) { Assembler::comiss(dst, src); } 895 void comiss(XMMRegister dst, AddressLiteral src); 896 897 void comisd(XMMRegister dst, XMMRegister src) { Assembler::comisd(dst, src); } 898 void comisd(XMMRegister dst, Address src) { Assembler::comisd(dst, src); } 899 void comisd(XMMRegister dst, AddressLiteral src); 900 901 void fadd_s(Address src) { Assembler::fadd_s(src); } 902 void fadd_s(AddressLiteral src) { Assembler::fadd_s(as_Address(src)); } 903 904 void fldcw(Address src) { Assembler::fldcw(src); } 905 void fldcw(AddressLiteral src); 906 907 void fld_s(int index) { Assembler::fld_s(index); } 908 void fld_s(Address src) { Assembler::fld_s(src); } 909 void fld_s(AddressLiteral src); 910 911 void fld_d(Address src) { Assembler::fld_d(src); } 912 void fld_d(AddressLiteral src); 913 914 void fld_x(Address src) { Assembler::fld_x(src); } 915 void fld_x(AddressLiteral src); 916 917 void fmul_s(Address src) { Assembler::fmul_s(src); } 918 void fmul_s(AddressLiteral src) { Assembler::fmul_s(as_Address(src)); } 919 920 void ldmxcsr(Address src) { Assembler::ldmxcsr(src); } 921 void ldmxcsr(AddressLiteral src); 922 923 #ifdef _LP64 924 private: 925 void sha256_AVX2_one_round_compute( 926 Register reg_old_h, 927 Register reg_a, 928 Register reg_b, 929 Register reg_c, 930 Register reg_d, 931 Register reg_e, 932 Register reg_f, 933 Register reg_g, 934 Register reg_h, 935 int iter); 936 void sha256_AVX2_four_rounds_compute_first(int start); 937 void sha256_AVX2_four_rounds_compute_last(int start); 938 void sha256_AVX2_one_round_and_sched( 939 XMMRegister xmm_0, /* == ymm4 on 0, 1, 2, 3 iterations, then rotate 4 registers left on 4, 8, 12 iterations */ 940 XMMRegister xmm_1, /* ymm5 */ /* full cycle is 16 iterations */ 941 XMMRegister xmm_2, /* ymm6 */ 942 XMMRegister xmm_3, /* ymm7 */ 943 Register reg_a, /* == eax on 0 iteration, then rotate 8 register right on each next iteration */ 944 Register reg_b, /* ebx */ /* full cycle is 8 iterations */ 945 Register reg_c, /* edi */ 946 Register reg_d, /* esi */ 947 Register reg_e, /* r8d */ 948 Register reg_f, /* r9d */ 949 Register reg_g, /* r10d */ 950 Register reg_h, /* r11d */ 951 int iter); 952 953 void addm(int disp, Register r1, Register r2); 954 955 public: 956 void sha256_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, 957 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, 958 Register buf, Register state, Register ofs, Register limit, Register rsp, 959 bool multi_block, XMMRegister shuf_mask); 960 #endif 961 962 void fast_sha1(XMMRegister abcd, XMMRegister e0, XMMRegister e1, XMMRegister msg0, 963 XMMRegister msg1, XMMRegister msg2, XMMRegister msg3, XMMRegister shuf_mask, 964 Register buf, Register state, Register ofs, Register limit, Register rsp, 965 bool multi_block); 966 967 #ifdef _LP64 968 void fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, 969 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, 970 Register buf, Register state, Register ofs, Register limit, Register rsp, 971 bool multi_block, XMMRegister shuf_mask); 972 #else 973 void fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, 974 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, 975 Register buf, Register state, Register ofs, Register limit, Register rsp, 976 bool multi_block); 977 #endif 978 979 void fast_exp(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 980 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 981 Register rax, Register rcx, Register rdx, Register tmp); 982 983 #ifdef _LP64 984 void fast_log(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 985 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 986 Register rax, Register rcx, Register rdx, Register tmp1, Register tmp2); 987 988 void fast_log10(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 989 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 990 Register rax, Register rcx, Register rdx, Register r11); 991 992 void fast_pow(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, XMMRegister xmm4, 993 XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, Register rax, Register rcx, 994 Register rdx, Register tmp1, Register tmp2, Register tmp3, Register tmp4); 995 996 void fast_sin(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 997 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 998 Register rax, Register rbx, Register rcx, Register rdx, Register tmp1, Register tmp2, 999 Register tmp3, Register tmp4); 1000 1001 void fast_cos(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1002 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1003 Register rax, Register rcx, Register rdx, Register tmp1, 1004 Register tmp2, Register tmp3, Register tmp4); 1005 void fast_tan(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1006 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1007 Register rax, Register rcx, Register rdx, Register tmp1, 1008 Register tmp2, Register tmp3, Register tmp4); 1009 #else 1010 void fast_log(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1011 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1012 Register rax, Register rcx, Register rdx, Register tmp1); 1013 1014 void fast_log10(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1015 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1016 Register rax, Register rcx, Register rdx, Register tmp); 1017 1018 void fast_pow(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, XMMRegister xmm4, 1019 XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, Register rax, Register rcx, 1020 Register rdx, Register tmp); 1021 1022 void fast_sin(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1023 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1024 Register rax, Register rbx, Register rdx); 1025 1026 void fast_cos(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1027 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1028 Register rax, Register rcx, Register rdx, Register tmp); 1029 1030 void libm_sincos_huge(XMMRegister xmm0, XMMRegister xmm1, Register eax, Register ecx, 1031 Register edx, Register ebx, Register esi, Register edi, 1032 Register ebp, Register esp); 1033 1034 void libm_reduce_pi04l(Register eax, Register ecx, Register edx, Register ebx, 1035 Register esi, Register edi, Register ebp, Register esp); 1036 1037 void libm_tancot_huge(XMMRegister xmm0, XMMRegister xmm1, Register eax, Register ecx, 1038 Register edx, Register ebx, Register esi, Register edi, 1039 Register ebp, Register esp); 1040 1041 void fast_tan(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1042 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1043 Register rax, Register rcx, Register rdx, Register tmp); 1044 #endif 1045 1046 void increase_precision(); 1047 void restore_precision(); 1048 1049 private: 1050 1051 // these are private because users should be doing movflt/movdbl 1052 1053 void movss(Address dst, XMMRegister src) { Assembler::movss(dst, src); } 1054 void movss(XMMRegister dst, XMMRegister src) { Assembler::movss(dst, src); } 1055 void movss(XMMRegister dst, Address src) { Assembler::movss(dst, src); } 1056 void movss(XMMRegister dst, AddressLiteral src); 1057 1058 void movlpd(XMMRegister dst, Address src) {Assembler::movlpd(dst, src); } 1059 void movlpd(XMMRegister dst, AddressLiteral src); 1060 1061 public: 1062 1063 void addsd(XMMRegister dst, XMMRegister src) { Assembler::addsd(dst, src); } 1064 void addsd(XMMRegister dst, Address src) { Assembler::addsd(dst, src); } 1065 void addsd(XMMRegister dst, AddressLiteral src); 1066 1067 void addss(XMMRegister dst, XMMRegister src) { Assembler::addss(dst, src); } 1068 void addss(XMMRegister dst, Address src) { Assembler::addss(dst, src); } 1069 void addss(XMMRegister dst, AddressLiteral src); 1070 1071 void addpd(XMMRegister dst, XMMRegister src) { Assembler::addpd(dst, src); } 1072 void addpd(XMMRegister dst, Address src) { Assembler::addpd(dst, src); } 1073 void addpd(XMMRegister dst, AddressLiteral src); 1074 1075 void divsd(XMMRegister dst, XMMRegister src) { Assembler::divsd(dst, src); } 1076 void divsd(XMMRegister dst, Address src) { Assembler::divsd(dst, src); } 1077 void divsd(XMMRegister dst, AddressLiteral src); 1078 1079 void divss(XMMRegister dst, XMMRegister src) { Assembler::divss(dst, src); } 1080 void divss(XMMRegister dst, Address src) { Assembler::divss(dst, src); } 1081 void divss(XMMRegister dst, AddressLiteral src); 1082 1083 // Move Unaligned Double Quadword 1084 void movdqu(Address dst, XMMRegister src); 1085 void movdqu(XMMRegister dst, Address src); 1086 void movdqu(XMMRegister dst, XMMRegister src); 1087 void movdqu(XMMRegister dst, AddressLiteral src); 1088 // AVX Unaligned forms 1089 void vmovdqu(Address dst, XMMRegister src); 1090 void vmovdqu(XMMRegister dst, Address src); 1091 void vmovdqu(XMMRegister dst, XMMRegister src); 1092 void vmovdqu(XMMRegister dst, AddressLiteral src); 1093 1094 // Move Aligned Double Quadword 1095 void movdqa(XMMRegister dst, Address src) { Assembler::movdqa(dst, src); } 1096 void movdqa(XMMRegister dst, XMMRegister src) { Assembler::movdqa(dst, src); } 1097 void movdqa(XMMRegister dst, AddressLiteral src); 1098 1099 void movsd(XMMRegister dst, XMMRegister src) { Assembler::movsd(dst, src); } 1100 void movsd(Address dst, XMMRegister src) { Assembler::movsd(dst, src); } 1101 void movsd(XMMRegister dst, Address src) { Assembler::movsd(dst, src); } 1102 void movsd(XMMRegister dst, AddressLiteral src); 1103 1104 void mulpd(XMMRegister dst, XMMRegister src) { Assembler::mulpd(dst, src); } 1105 void mulpd(XMMRegister dst, Address src) { Assembler::mulpd(dst, src); } 1106 void mulpd(XMMRegister dst, AddressLiteral src); 1107 1108 void mulsd(XMMRegister dst, XMMRegister src) { Assembler::mulsd(dst, src); } 1109 void mulsd(XMMRegister dst, Address src) { Assembler::mulsd(dst, src); } 1110 void mulsd(XMMRegister dst, AddressLiteral src); 1111 1112 void mulss(XMMRegister dst, XMMRegister src) { Assembler::mulss(dst, src); } 1113 void mulss(XMMRegister dst, Address src) { Assembler::mulss(dst, src); } 1114 void mulss(XMMRegister dst, AddressLiteral src); 1115 1116 // Carry-Less Multiplication Quadword 1117 void pclmulldq(XMMRegister dst, XMMRegister src) { 1118 // 0x00 - multiply lower 64 bits [0:63] 1119 Assembler::pclmulqdq(dst, src, 0x00); 1120 } 1121 void pclmulhdq(XMMRegister dst, XMMRegister src) { 1122 // 0x11 - multiply upper 64 bits [64:127] 1123 Assembler::pclmulqdq(dst, src, 0x11); 1124 } 1125 1126 void pcmpeqb(XMMRegister dst, XMMRegister src); 1127 void pcmpeqw(XMMRegister dst, XMMRegister src); 1128 1129 void pcmpestri(XMMRegister dst, Address src, int imm8); 1130 void pcmpestri(XMMRegister dst, XMMRegister src, int imm8); 1131 1132 void pmovzxbw(XMMRegister dst, XMMRegister src); 1133 void pmovzxbw(XMMRegister dst, Address src); 1134 1135 void pmovmskb(Register dst, XMMRegister src); 1136 1137 void ptest(XMMRegister dst, XMMRegister src); 1138 1139 void sqrtsd(XMMRegister dst, XMMRegister src) { Assembler::sqrtsd(dst, src); } 1140 void sqrtsd(XMMRegister dst, Address src) { Assembler::sqrtsd(dst, src); } 1141 void sqrtsd(XMMRegister dst, AddressLiteral src); 1142 1143 void sqrtss(XMMRegister dst, XMMRegister src) { Assembler::sqrtss(dst, src); } 1144 void sqrtss(XMMRegister dst, Address src) { Assembler::sqrtss(dst, src); } 1145 void sqrtss(XMMRegister dst, AddressLiteral src); 1146 1147 void subsd(XMMRegister dst, XMMRegister src) { Assembler::subsd(dst, src); } 1148 void subsd(XMMRegister dst, Address src) { Assembler::subsd(dst, src); } 1149 void subsd(XMMRegister dst, AddressLiteral src); 1150 1151 void subss(XMMRegister dst, XMMRegister src) { Assembler::subss(dst, src); } 1152 void subss(XMMRegister dst, Address src) { Assembler::subss(dst, src); } 1153 void subss(XMMRegister dst, AddressLiteral src); 1154 1155 void ucomiss(XMMRegister dst, XMMRegister src) { Assembler::ucomiss(dst, src); } 1156 void ucomiss(XMMRegister dst, Address src) { Assembler::ucomiss(dst, src); } 1157 void ucomiss(XMMRegister dst, AddressLiteral src); 1158 1159 void ucomisd(XMMRegister dst, XMMRegister src) { Assembler::ucomisd(dst, src); } 1160 void ucomisd(XMMRegister dst, Address src) { Assembler::ucomisd(dst, src); } 1161 void ucomisd(XMMRegister dst, AddressLiteral src); 1162 1163 // Bitwise Logical XOR of Packed Double-Precision Floating-Point Values 1164 void xorpd(XMMRegister dst, XMMRegister src); 1165 void xorpd(XMMRegister dst, Address src) { Assembler::xorpd(dst, src); } 1166 void xorpd(XMMRegister dst, AddressLiteral src); 1167 1168 // Bitwise Logical XOR of Packed Single-Precision Floating-Point Values 1169 void xorps(XMMRegister dst, XMMRegister src); 1170 void xorps(XMMRegister dst, Address src) { Assembler::xorps(dst, src); } 1171 void xorps(XMMRegister dst, AddressLiteral src); 1172 1173 // Shuffle Bytes 1174 void pshufb(XMMRegister dst, XMMRegister src) { Assembler::pshufb(dst, src); } 1175 void pshufb(XMMRegister dst, Address src) { Assembler::pshufb(dst, src); } 1176 void pshufb(XMMRegister dst, AddressLiteral src); 1177 // AVX 3-operands instructions 1178 1179 void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddsd(dst, nds, src); } 1180 void vaddsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddsd(dst, nds, src); } 1181 void vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1182 1183 void vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddss(dst, nds, src); } 1184 void vaddss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddss(dst, nds, src); } 1185 void vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1186 1187 void vabsss(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len); 1188 void vabssd(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len); 1189 1190 void vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1191 void vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1192 1193 void vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1194 void vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1195 1196 void vpbroadcastw(XMMRegister dst, XMMRegister src); 1197 1198 void vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1199 void vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1200 1201 void vpmovzxbw(XMMRegister dst, Address src, int vector_len); 1202 void vpmovmskb(Register dst, XMMRegister src); 1203 1204 void vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1205 void vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1206 1207 void vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1208 void vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1209 1210 void vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1211 void vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1212 1213 void vpsraw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1214 void vpsraw(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1215 1216 void vpsrlw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1217 void vpsrlw(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1218 1219 void vpsllw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1220 void vpsllw(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1221 1222 void vptest(XMMRegister dst, XMMRegister src); 1223 1224 void punpcklbw(XMMRegister dst, XMMRegister src); 1225 void punpcklbw(XMMRegister dst, Address src) { Assembler::punpcklbw(dst, src); } 1226 1227 void pshuflw(XMMRegister dst, XMMRegister src, int mode); 1228 void pshuflw(XMMRegister dst, Address src, int mode) { Assembler::pshuflw(dst, src, mode); } 1229 1230 void vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); } 1231 void vandpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); } 1232 void vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len); 1233 1234 void vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); } 1235 void vandps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); } 1236 void vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len); 1237 1238 void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivsd(dst, nds, src); } 1239 void vdivsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivsd(dst, nds, src); } 1240 void vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1241 1242 void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivss(dst, nds, src); } 1243 void vdivss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivss(dst, nds, src); } 1244 void vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1245 1246 void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulsd(dst, nds, src); } 1247 void vmulsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulsd(dst, nds, src); } 1248 void vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1249 1250 void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulss(dst, nds, src); } 1251 void vmulss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulss(dst, nds, src); } 1252 void vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1253 1254 void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubsd(dst, nds, src); } 1255 void vsubsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubsd(dst, nds, src); } 1256 void vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1257 1258 void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubss(dst, nds, src); } 1259 void vsubss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubss(dst, nds, src); } 1260 void vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1261 1262 void vnegatess(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1263 void vnegatesd(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1264 1265 // AVX Vector instructions 1266 1267 void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); } 1268 void vxorpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); } 1269 void vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len); 1270 1271 void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); } 1272 void vxorps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); } 1273 void vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len); 1274 1275 void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 1276 if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2 1277 Assembler::vpxor(dst, nds, src, vector_len); 1278 else 1279 Assembler::vxorpd(dst, nds, src, vector_len); 1280 } 1281 void vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 1282 if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2 1283 Assembler::vpxor(dst, nds, src, vector_len); 1284 else 1285 Assembler::vxorpd(dst, nds, src, vector_len); 1286 } 1287 1288 // Simple version for AVX2 256bit vectors 1289 void vpxor(XMMRegister dst, XMMRegister src) { Assembler::vpxor(dst, dst, src, true); } 1290 void vpxor(XMMRegister dst, Address src) { Assembler::vpxor(dst, dst, src, true); } 1291 1292 void vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { 1293 if (UseAVX > 2) { 1294 Assembler::vinserti32x4(dst, dst, src, imm8); 1295 } else if (UseAVX > 1) { 1296 // vinserti128 is available only in AVX2 1297 Assembler::vinserti128(dst, nds, src, imm8); 1298 } else { 1299 Assembler::vinsertf128(dst, nds, src, imm8); 1300 } 1301 } 1302 1303 void vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) { 1304 if (UseAVX > 2) { 1305 Assembler::vinserti32x4(dst, dst, src, imm8); 1306 } else if (UseAVX > 1) { 1307 // vinserti128 is available only in AVX2 1308 Assembler::vinserti128(dst, nds, src, imm8); 1309 } else { 1310 Assembler::vinsertf128(dst, nds, src, imm8); 1311 } 1312 } 1313 1314 void vextracti128(XMMRegister dst, XMMRegister src, uint8_t imm8) { 1315 if (UseAVX > 2) { 1316 Assembler::vextracti32x4(dst, src, imm8); 1317 } else if (UseAVX > 1) { 1318 // vextracti128 is available only in AVX2 1319 Assembler::vextracti128(dst, src, imm8); 1320 } else { 1321 Assembler::vextractf128(dst, src, imm8); 1322 } 1323 } 1324 1325 void vextracti128(Address dst, XMMRegister src, uint8_t imm8) { 1326 if (UseAVX > 2) { 1327 Assembler::vextracti32x4(dst, src, imm8); 1328 } else if (UseAVX > 1) { 1329 // vextracti128 is available only in AVX2 1330 Assembler::vextracti128(dst, src, imm8); 1331 } else { 1332 Assembler::vextractf128(dst, src, imm8); 1333 } 1334 } 1335 1336 // 128bit copy to/from high 128 bits of 256bit (YMM) vector registers 1337 void vinserti128_high(XMMRegister dst, XMMRegister src) { 1338 vinserti128(dst, dst, src, 1); 1339 } 1340 void vinserti128_high(XMMRegister dst, Address src) { 1341 vinserti128(dst, dst, src, 1); 1342 } 1343 void vextracti128_high(XMMRegister dst, XMMRegister src) { 1344 vextracti128(dst, src, 1); 1345 } 1346 void vextracti128_high(Address dst, XMMRegister src) { 1347 vextracti128(dst, src, 1); 1348 } 1349 1350 void vinsertf128_high(XMMRegister dst, XMMRegister src) { 1351 if (UseAVX > 2) { 1352 Assembler::vinsertf32x4(dst, dst, src, 1); 1353 } else { 1354 Assembler::vinsertf128(dst, dst, src, 1); 1355 } 1356 } 1357 1358 void vinsertf128_high(XMMRegister dst, Address src) { 1359 if (UseAVX > 2) { 1360 Assembler::vinsertf32x4(dst, dst, src, 1); 1361 } else { 1362 Assembler::vinsertf128(dst, dst, src, 1); 1363 } 1364 } 1365 1366 void vextractf128_high(XMMRegister dst, XMMRegister src) { 1367 if (UseAVX > 2) { 1368 Assembler::vextractf32x4(dst, src, 1); 1369 } else { 1370 Assembler::vextractf128(dst, src, 1); 1371 } 1372 } 1373 1374 void vextractf128_high(Address dst, XMMRegister src) { 1375 if (UseAVX > 2) { 1376 Assembler::vextractf32x4(dst, src, 1); 1377 } else { 1378 Assembler::vextractf128(dst, src, 1); 1379 } 1380 } 1381 1382 // 256bit copy to/from high 256 bits of 512bit (ZMM) vector registers 1383 void vinserti64x4_high(XMMRegister dst, XMMRegister src) { 1384 Assembler::vinserti64x4(dst, dst, src, 1); 1385 } 1386 void vinsertf64x4_high(XMMRegister dst, XMMRegister src) { 1387 Assembler::vinsertf64x4(dst, dst, src, 1); 1388 } 1389 void vextracti64x4_high(XMMRegister dst, XMMRegister src) { 1390 Assembler::vextracti64x4(dst, src, 1); 1391 } 1392 void vextractf64x4_high(XMMRegister dst, XMMRegister src) { 1393 Assembler::vextractf64x4(dst, src, 1); 1394 } 1395 void vextractf64x4_high(Address dst, XMMRegister src) { 1396 Assembler::vextractf64x4(dst, src, 1); 1397 } 1398 void vinsertf64x4_high(XMMRegister dst, Address src) { 1399 Assembler::vinsertf64x4(dst, dst, src, 1); 1400 } 1401 1402 // 128bit copy to/from low 128 bits of 256bit (YMM) vector registers 1403 void vinserti128_low(XMMRegister dst, XMMRegister src) { 1404 vinserti128(dst, dst, src, 0); 1405 } 1406 void vinserti128_low(XMMRegister dst, Address src) { 1407 vinserti128(dst, dst, src, 0); 1408 } 1409 void vextracti128_low(XMMRegister dst, XMMRegister src) { 1410 vextracti128(dst, src, 0); 1411 } 1412 void vextracti128_low(Address dst, XMMRegister src) { 1413 vextracti128(dst, src, 0); 1414 } 1415 1416 void vinsertf128_low(XMMRegister dst, XMMRegister src) { 1417 if (UseAVX > 2) { 1418 Assembler::vinsertf32x4(dst, dst, src, 0); 1419 } else { 1420 Assembler::vinsertf128(dst, dst, src, 0); 1421 } 1422 } 1423 1424 void vinsertf128_low(XMMRegister dst, Address src) { 1425 if (UseAVX > 2) { 1426 Assembler::vinsertf32x4(dst, dst, src, 0); 1427 } else { 1428 Assembler::vinsertf128(dst, dst, src, 0); 1429 } 1430 } 1431 1432 void vextractf128_low(XMMRegister dst, XMMRegister src) { 1433 if (UseAVX > 2) { 1434 Assembler::vextractf32x4(dst, src, 0); 1435 } else { 1436 Assembler::vextractf128(dst, src, 0); 1437 } 1438 } 1439 1440 void vextractf128_low(Address dst, XMMRegister src) { 1441 if (UseAVX > 2) { 1442 Assembler::vextractf32x4(dst, src, 0); 1443 } else { 1444 Assembler::vextractf128(dst, src, 0); 1445 } 1446 } 1447 1448 // 256bit copy to/from low 256 bits of 512bit (ZMM) vector registers 1449 void vinserti64x4_low(XMMRegister dst, XMMRegister src) { 1450 Assembler::vinserti64x4(dst, dst, src, 0); 1451 } 1452 void vinsertf64x4_low(XMMRegister dst, XMMRegister src) { 1453 Assembler::vinsertf64x4(dst, dst, src, 0); 1454 } 1455 void vextracti64x4_low(XMMRegister dst, XMMRegister src) { 1456 Assembler::vextracti64x4(dst, src, 0); 1457 } 1458 void vextractf64x4_low(XMMRegister dst, XMMRegister src) { 1459 Assembler::vextractf64x4(dst, src, 0); 1460 } 1461 void vextractf64x4_low(Address dst, XMMRegister src) { 1462 Assembler::vextractf64x4(dst, src, 0); 1463 } 1464 void vinsertf64x4_low(XMMRegister dst, Address src) { 1465 Assembler::vinsertf64x4(dst, dst, src, 0); 1466 } 1467 1468 // Carry-Less Multiplication Quadword 1469 void vpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1470 // 0x00 - multiply lower 64 bits [0:63] 1471 Assembler::vpclmulqdq(dst, nds, src, 0x00); 1472 } 1473 void vpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1474 // 0x11 - multiply upper 64 bits [64:127] 1475 Assembler::vpclmulqdq(dst, nds, src, 0x11); 1476 } 1477 1478 // Data 1479 1480 void cmov32( Condition cc, Register dst, Address src); 1481 void cmov32( Condition cc, Register dst, Register src); 1482 1483 void cmov( Condition cc, Register dst, Register src) { cmovptr(cc, dst, src); } 1484 1485 void cmovptr(Condition cc, Register dst, Address src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); } 1486 void cmovptr(Condition cc, Register dst, Register src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); } 1487 1488 void movoop(Register dst, jobject obj); 1489 void movoop(Address dst, jobject obj); 1490 1491 void mov_metadata(Register dst, Metadata* obj); 1492 void mov_metadata(Address dst, Metadata* obj); 1493 1494 void movptr(ArrayAddress dst, Register src); 1495 // can this do an lea? 1496 void movptr(Register dst, ArrayAddress src); 1497 1498 void movptr(Register dst, Address src); 1499 1500 #ifdef _LP64 1501 void movptr(Register dst, AddressLiteral src, Register scratch=rscratch1); 1502 #else 1503 void movptr(Register dst, AddressLiteral src, Register scratch=noreg); // Scratch reg is ignored in 32-bit 1504 #endif 1505 1506 void movptr(Register dst, intptr_t src); 1507 void movptr(Register dst, Register src); 1508 void movptr(Address dst, intptr_t src); 1509 1510 void movptr(Address dst, Register src); 1511 1512 void movptr(Register dst, RegisterOrConstant src) { 1513 if (src.is_constant()) movptr(dst, src.as_constant()); 1514 else movptr(dst, src.as_register()); 1515 } 1516 1517 #ifdef _LP64 1518 // Generally the next two are only used for moving NULL 1519 // Although there are situations in initializing the mark word where 1520 // they could be used. They are dangerous. 1521 1522 // They only exist on LP64 so that int32_t and intptr_t are not the same 1523 // and we have ambiguous declarations. 1524 1525 void movptr(Address dst, int32_t imm32); 1526 void movptr(Register dst, int32_t imm32); 1527 #endif // _LP64 1528 1529 // to avoid hiding movl 1530 void mov32(AddressLiteral dst, Register src); 1531 void mov32(Register dst, AddressLiteral src); 1532 1533 // to avoid hiding movb 1534 void movbyte(ArrayAddress dst, int src); 1535 1536 // Import other mov() methods from the parent class or else 1537 // they will be hidden by the following overriding declaration. 1538 using Assembler::movdl; 1539 using Assembler::movq; 1540 void movdl(XMMRegister dst, AddressLiteral src); 1541 void movq(XMMRegister dst, AddressLiteral src); 1542 1543 // Can push value or effective address 1544 void pushptr(AddressLiteral src); 1545 1546 void pushptr(Address src) { LP64_ONLY(pushq(src)) NOT_LP64(pushl(src)); } 1547 void popptr(Address src) { LP64_ONLY(popq(src)) NOT_LP64(popl(src)); } 1548 1549 void pushoop(jobject obj); 1550 void pushklass(Metadata* obj); 1551 1552 // sign extend as need a l to ptr sized element 1553 void movl2ptr(Register dst, Address src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(movl(dst, src)); } 1554 void movl2ptr(Register dst, Register src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(if (dst != src) movl(dst, src)); } 1555 1556 // C2 compiled method's prolog code. 1557 void verified_entry(int framesize, int stack_bang_size, bool fp_mode_24b); 1558 1559 // clear memory of size 'cnt' qwords, starting at 'base'; 1560 // if 'is_large' is set, do not try to produce short loop 1561 void clear_mem(Register base, Register cnt, Register rtmp, bool is_large); 1562 1563 #ifdef COMPILER2 1564 void string_indexof_char(Register str1, Register cnt1, Register ch, Register result, 1565 XMMRegister vec1, XMMRegister vec2, XMMRegister vec3, Register tmp); 1566 1567 // IndexOf strings. 1568 // Small strings are loaded through stack if they cross page boundary. 1569 void string_indexof(Register str1, Register str2, 1570 Register cnt1, Register cnt2, 1571 int int_cnt2, Register result, 1572 XMMRegister vec, Register tmp, 1573 int ae); 1574 1575 // IndexOf for constant substrings with size >= 8 elements 1576 // which don't need to be loaded through stack. 1577 void string_indexofC8(Register str1, Register str2, 1578 Register cnt1, Register cnt2, 1579 int int_cnt2, Register result, 1580 XMMRegister vec, Register tmp, 1581 int ae); 1582 1583 // Smallest code: we don't need to load through stack, 1584 // check string tail. 1585 1586 // helper function for string_compare 1587 void load_next_elements(Register elem1, Register elem2, Register str1, Register str2, 1588 Address::ScaleFactor scale, Address::ScaleFactor scale1, 1589 Address::ScaleFactor scale2, Register index, int ae); 1590 // Compare strings. 1591 void string_compare(Register str1, Register str2, 1592 Register cnt1, Register cnt2, Register result, 1593 XMMRegister vec1, int ae); 1594 1595 // Search for Non-ASCII character (Negative byte value) in a byte array, 1596 // return true if it has any and false otherwise. 1597 void has_negatives(Register ary1, Register len, 1598 Register result, Register tmp1, 1599 XMMRegister vec1, XMMRegister vec2); 1600 1601 // Compare char[] or byte[] arrays. 1602 void arrays_equals(bool is_array_equ, Register ary1, Register ary2, 1603 Register limit, Register result, Register chr, 1604 XMMRegister vec1, XMMRegister vec2, bool is_char); 1605 1606 #endif 1607 1608 // Fill primitive arrays 1609 void generate_fill(BasicType t, bool aligned, 1610 Register to, Register value, Register count, 1611 Register rtmp, XMMRegister xtmp); 1612 1613 void encode_iso_array(Register src, Register dst, Register len, 1614 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3, 1615 XMMRegister tmp4, Register tmp5, Register result); 1616 1617 #ifdef _LP64 1618 void add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2); 1619 void multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart, 1620 Register y, Register y_idx, Register z, 1621 Register carry, Register product, 1622 Register idx, Register kdx); 1623 void multiply_add_128_x_128(Register x_xstart, Register y, Register z, 1624 Register yz_idx, Register idx, 1625 Register carry, Register product, int offset); 1626 void multiply_128_x_128_bmi2_loop(Register y, Register z, 1627 Register carry, Register carry2, 1628 Register idx, Register jdx, 1629 Register yz_idx1, Register yz_idx2, 1630 Register tmp, Register tmp3, Register tmp4); 1631 void multiply_128_x_128_loop(Register x_xstart, Register y, Register z, 1632 Register yz_idx, Register idx, Register jdx, 1633 Register carry, Register product, 1634 Register carry2); 1635 void multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, Register zlen, 1636 Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5); 1637 void square_rshift(Register x, Register len, Register z, Register tmp1, Register tmp3, 1638 Register tmp4, Register tmp5, Register rdxReg, Register raxReg); 1639 void multiply_add_64_bmi2(Register sum, Register op1, Register op2, Register carry, 1640 Register tmp2); 1641 void multiply_add_64(Register sum, Register op1, Register op2, Register carry, 1642 Register rdxReg, Register raxReg); 1643 void add_one_64(Register z, Register zlen, Register carry, Register tmp1); 1644 void lshift_by_1(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, 1645 Register tmp3, Register tmp4); 1646 void square_to_len(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, 1647 Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg); 1648 1649 void mul_add_128_x_32_loop(Register out, Register in, Register offset, Register len, Register tmp1, 1650 Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, 1651 Register raxReg); 1652 void mul_add(Register out, Register in, Register offset, Register len, Register k, Register tmp1, 1653 Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, 1654 Register raxReg); 1655 void vectorized_mismatch(Register obja, Register objb, Register length, Register log2_array_indxscale, 1656 Register result, Register tmp1, Register tmp2, 1657 XMMRegister vec1, XMMRegister vec2, XMMRegister vec3); 1658 #endif 1659 1660 // CRC32 code for java.util.zip.CRC32::updateBytes() intrinsic. 1661 void update_byte_crc32(Register crc, Register val, Register table); 1662 void kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp); 1663 // CRC32C code for java.util.zip.CRC32C::updateBytes() intrinsic 1664 // Note on a naming convention: 1665 // Prefix w = register only used on a Westmere+ architecture 1666 // Prefix n = register only used on a Nehalem architecture 1667 #ifdef _LP64 1668 void crc32c_ipl_alg4(Register in_out, uint32_t n, 1669 Register tmp1, Register tmp2, Register tmp3); 1670 #else 1671 void crc32c_ipl_alg4(Register in_out, uint32_t n, 1672 Register tmp1, Register tmp2, Register tmp3, 1673 XMMRegister xtmp1, XMMRegister xtmp2); 1674 #endif 1675 void crc32c_pclmulqdq(XMMRegister w_xtmp1, 1676 Register in_out, 1677 uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported, 1678 XMMRegister w_xtmp2, 1679 Register tmp1, 1680 Register n_tmp2, Register n_tmp3); 1681 void crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2, 1682 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 1683 Register tmp1, Register tmp2, 1684 Register n_tmp3); 1685 void crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, 1686 Register in_out1, Register in_out2, Register in_out3, 1687 Register tmp1, Register tmp2, Register tmp3, 1688 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 1689 Register tmp4, Register tmp5, 1690 Register n_tmp6); 1691 void crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2, 1692 Register tmp1, Register tmp2, Register tmp3, 1693 Register tmp4, Register tmp5, Register tmp6, 1694 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 1695 bool is_pclmulqdq_supported); 1696 // Fold 128-bit data chunk 1697 void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset); 1698 void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf); 1699 // Fold 8-bit data 1700 void fold_8bit_crc32(Register crc, Register table, Register tmp); 1701 void fold_8bit_crc32(XMMRegister crc, Register table, XMMRegister xtmp, Register tmp); 1702 1703 // Compress char[] array to byte[]. 1704 void char_array_compress(Register src, Register dst, Register len, 1705 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3, 1706 XMMRegister tmp4, Register tmp5, Register result); 1707 1708 // Inflate byte[] array to char[]. 1709 void byte_array_inflate(Register src, Register dst, Register len, 1710 XMMRegister tmp1, Register tmp2); 1711 1712 1713 void save_vector_registers(); 1714 void restore_vector_registers(); 1715 }; 1716 1717 /** 1718 * class SkipIfEqual: 1719 * 1720 * Instantiating this class will result in assembly code being output that will 1721 * jump around any code emitted between the creation of the instance and it's 1722 * automatic destruction at the end of a scope block, depending on the value of 1723 * the flag passed to the constructor, which will be checked at run-time. 1724 */ 1725 class SkipIfEqual { 1726 private: 1727 MacroAssembler* _masm; 1728 Label _label; 1729 1730 public: 1731 SkipIfEqual(MacroAssembler*, const bool* flag_addr, bool value); 1732 ~SkipIfEqual(); 1733 }; 1734 1735 #endif // CPU_X86_VM_MACROASSEMBLER_X86_HPP