1 /* 2 * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef CPU_X86_MACROASSEMBLER_X86_HPP 26 #define CPU_X86_MACROASSEMBLER_X86_HPP 27 28 #include "asm/assembler.hpp" 29 #include "utilities/macros.hpp" 30 #include "runtime/rtmLocking.hpp" 31 32 // MacroAssembler extends Assembler by frequently used macros. 33 // 34 // Instructions for which a 'better' code sequence exists depending 35 // on arguments should also go in here. 36 37 class MacroAssembler: public Assembler { 38 friend class LIR_Assembler; 39 friend class Runtime1; // as_Address() 40 41 public: 42 // Support for VM calls 43 // 44 // This is the base routine called by the different versions of call_VM_leaf. The interpreter 45 // may customize this version by overriding it for its purposes (e.g., to save/restore 46 // additional registers when doing a VM call). 47 48 virtual void call_VM_leaf_base( 49 address entry_point, // the entry point 50 int number_of_arguments // the number of arguments to pop after the call 51 ); 52 53 protected: 54 // This is the base routine called by the different versions of call_VM. The interpreter 55 // may customize this version by overriding it for its purposes (e.g., to save/restore 56 // additional registers when doing a VM call). 57 // 58 // If no java_thread register is specified (noreg) than rdi will be used instead. call_VM_base 59 // returns the register which contains the thread upon return. If a thread register has been 60 // specified, the return value will correspond to that register. If no last_java_sp is specified 61 // (noreg) than rsp will be used instead. 62 virtual void call_VM_base( // returns the register containing the thread upon return 63 Register oop_result, // where an oop-result ends up if any; use noreg otherwise 64 Register java_thread, // the thread if computed before ; use noreg otherwise 65 Register last_java_sp, // to set up last_Java_frame in stubs; use noreg otherwise 66 address entry_point, // the entry point 67 int number_of_arguments, // the number of arguments (w/o thread) to pop after the call 68 bool check_exceptions // whether to check for pending exceptions after return 69 ); 70 71 void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true); 72 73 // helpers for FPU flag access 74 // tmp is a temporary register, if none is available use noreg 75 void save_rax (Register tmp); 76 void restore_rax(Register tmp); 77 78 public: 79 MacroAssembler(CodeBuffer* code) : Assembler(code) {} 80 81 // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code. 82 // The implementation is only non-empty for the InterpreterMacroAssembler, 83 // as only the interpreter handles PopFrame and ForceEarlyReturn requests. 84 virtual void check_and_handle_popframe(Register java_thread); 85 virtual void check_and_handle_earlyret(Register java_thread); 86 87 Address as_Address(AddressLiteral adr); 88 Address as_Address(ArrayAddress adr); 89 90 // Support for NULL-checks 91 // 92 // Generates code that causes a NULL OS exception if the content of reg is NULL. 93 // If the accessed location is M[reg + offset] and the offset is known, provide the 94 // offset. No explicit code generation is needed if the offset is within a certain 95 // range (0 <= offset <= page_size). 96 97 void null_check(Register reg, int offset = -1); 98 static bool needs_explicit_null_check(intptr_t offset); 99 static bool uses_implicit_null_check(void* address); 100 101 // Required platform-specific helpers for Label::patch_instructions. 102 // They _shadow_ the declarations in AbstractAssembler, which are undefined. 103 void pd_patch_instruction(address branch, address target, const char* file, int line) { 104 unsigned char op = branch[0]; 105 assert(op == 0xE8 /* call */ || 106 op == 0xE9 /* jmp */ || 107 op == 0xEB /* short jmp */ || 108 (op & 0xF0) == 0x70 /* short jcc */ || 109 op == 0x0F && (branch[1] & 0xF0) == 0x80 /* jcc */ || 110 op == 0xC7 && branch[1] == 0xF8 /* xbegin */, 111 "Invalid opcode at patch point"); 112 113 if (op == 0xEB || (op & 0xF0) == 0x70) { 114 // short offset operators (jmp and jcc) 115 char* disp = (char*) &branch[1]; 116 int imm8 = target - (address) &disp[1]; 117 guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset at %s:%d", 118 file == NULL ? "<NULL>" : file, line); 119 *disp = imm8; 120 } else { 121 int* disp = (int*) &branch[(op == 0x0F || op == 0xC7)? 2: 1]; 122 int imm32 = target - (address) &disp[1]; 123 *disp = imm32; 124 } 125 } 126 127 // The following 4 methods return the offset of the appropriate move instruction 128 129 // Support for fast byte/short loading with zero extension (depending on particular CPU) 130 int load_unsigned_byte(Register dst, Address src); 131 int load_unsigned_short(Register dst, Address src); 132 133 // Support for fast byte/short loading with sign extension (depending on particular CPU) 134 int load_signed_byte(Register dst, Address src); 135 int load_signed_short(Register dst, Address src); 136 137 // Support for sign-extension (hi:lo = extend_sign(lo)) 138 void extend_sign(Register hi, Register lo); 139 140 // Load and store values by size and signed-ness 141 void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2 = noreg); 142 void store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2 = noreg); 143 144 // Support for inc/dec with optimal instruction selection depending on value 145 146 void increment(Register reg, int value = 1) { LP64_ONLY(incrementq(reg, value)) NOT_LP64(incrementl(reg, value)) ; } 147 void decrement(Register reg, int value = 1) { LP64_ONLY(decrementq(reg, value)) NOT_LP64(decrementl(reg, value)) ; } 148 149 void decrementl(Address dst, int value = 1); 150 void decrementl(Register reg, int value = 1); 151 152 void decrementq(Register reg, int value = 1); 153 void decrementq(Address dst, int value = 1); 154 155 void incrementl(Address dst, int value = 1); 156 void incrementl(Register reg, int value = 1); 157 158 void incrementq(Register reg, int value = 1); 159 void incrementq(Address dst, int value = 1); 160 161 // Support optimal SSE move instructions. 162 void movflt(XMMRegister dst, XMMRegister src) { 163 if (dst-> encoding() == src->encoding()) return; 164 if (UseXmmRegToRegMoveAll) { movaps(dst, src); return; } 165 else { movss (dst, src); return; } 166 } 167 void movflt(XMMRegister dst, Address src) { movss(dst, src); } 168 void movflt(XMMRegister dst, AddressLiteral src); 169 void movflt(Address dst, XMMRegister src) { movss(dst, src); } 170 171 void movdbl(XMMRegister dst, XMMRegister src) { 172 if (dst-> encoding() == src->encoding()) return; 173 if (UseXmmRegToRegMoveAll) { movapd(dst, src); return; } 174 else { movsd (dst, src); return; } 175 } 176 177 void movdbl(XMMRegister dst, AddressLiteral src); 178 179 void movdbl(XMMRegister dst, Address src) { 180 if (UseXmmLoadAndClearUpper) { movsd (dst, src); return; } 181 else { movlpd(dst, src); return; } 182 } 183 void movdbl(Address dst, XMMRegister src) { movsd(dst, src); } 184 185 void incrementl(AddressLiteral dst); 186 void incrementl(ArrayAddress dst); 187 188 void incrementq(AddressLiteral dst); 189 190 // Alignment 191 void align(int modulus); 192 void align(int modulus, int target); 193 194 // A 5 byte nop that is safe for patching (see patch_verified_entry) 195 void fat_nop(); 196 197 // Stack frame creation/removal 198 void enter(); 199 void leave(); 200 201 // Support for getting the JavaThread pointer (i.e.; a reference to thread-local information) 202 // The pointer will be loaded into the thread register. 203 void get_thread(Register thread); 204 205 206 // Support for VM calls 207 // 208 // It is imperative that all calls into the VM are handled via the call_VM macros. 209 // They make sure that the stack linkage is setup correctly. call_VM's correspond 210 // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points. 211 212 213 void call_VM(Register oop_result, 214 address entry_point, 215 bool check_exceptions = true); 216 void call_VM(Register oop_result, 217 address entry_point, 218 Register arg_1, 219 bool check_exceptions = true); 220 void call_VM(Register oop_result, 221 address entry_point, 222 Register arg_1, Register arg_2, 223 bool check_exceptions = true); 224 void call_VM(Register oop_result, 225 address entry_point, 226 Register arg_1, Register arg_2, Register arg_3, 227 bool check_exceptions = true); 228 229 // Overloadings with last_Java_sp 230 void call_VM(Register oop_result, 231 Register last_java_sp, 232 address entry_point, 233 int number_of_arguments = 0, 234 bool check_exceptions = true); 235 void call_VM(Register oop_result, 236 Register last_java_sp, 237 address entry_point, 238 Register arg_1, bool 239 check_exceptions = true); 240 void call_VM(Register oop_result, 241 Register last_java_sp, 242 address entry_point, 243 Register arg_1, Register arg_2, 244 bool check_exceptions = true); 245 void call_VM(Register oop_result, 246 Register last_java_sp, 247 address entry_point, 248 Register arg_1, Register arg_2, Register arg_3, 249 bool check_exceptions = true); 250 251 void get_vm_result (Register oop_result, Register thread); 252 void get_vm_result_2(Register metadata_result, Register thread); 253 254 // These always tightly bind to MacroAssembler::call_VM_base 255 // bypassing the virtual implementation 256 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true); 257 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true); 258 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true); 259 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true); 260 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4, bool check_exceptions = true); 261 262 void call_VM_leaf0(address entry_point); 263 void call_VM_leaf(address entry_point, 264 int number_of_arguments = 0); 265 void call_VM_leaf(address entry_point, 266 Register arg_1); 267 void call_VM_leaf(address entry_point, 268 Register arg_1, Register arg_2); 269 void call_VM_leaf(address entry_point, 270 Register arg_1, Register arg_2, Register arg_3); 271 272 // These always tightly bind to MacroAssembler::call_VM_leaf_base 273 // bypassing the virtual implementation 274 void super_call_VM_leaf(address entry_point); 275 void super_call_VM_leaf(address entry_point, Register arg_1); 276 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2); 277 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3); 278 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4); 279 280 // last Java Frame (fills frame anchor) 281 void set_last_Java_frame(Register thread, 282 Register last_java_sp, 283 Register last_java_fp, 284 address last_java_pc); 285 286 // thread in the default location (r15_thread on 64bit) 287 void set_last_Java_frame(Register last_java_sp, 288 Register last_java_fp, 289 address last_java_pc); 290 291 void reset_last_Java_frame(Register thread, bool clear_fp); 292 293 // thread in the default location (r15_thread on 64bit) 294 void reset_last_Java_frame(bool clear_fp); 295 296 // jobjects 297 void clear_jweak_tag(Register possibly_jweak); 298 void resolve_jobject(Register value, Register thread, Register tmp); 299 300 // C 'boolean' to Java boolean: x == 0 ? 0 : 1 301 void c2bool(Register x); 302 303 // C++ bool manipulation 304 305 void movbool(Register dst, Address src); 306 void movbool(Address dst, bool boolconst); 307 void movbool(Address dst, Register src); 308 void testbool(Register dst); 309 310 void resolve_oop_handle(Register result, Register tmp = rscratch2); 311 void resolve_weak_handle(Register result, Register tmp); 312 void load_mirror(Register mirror, Register method, Register tmp = rscratch2); 313 void load_method_holder_cld(Register rresult, Register rmethod); 314 315 void load_method_holder(Register holder, Register method); 316 317 // oop manipulations 318 void load_klass(Register dst, Register src, Register tmp); 319 void store_klass(Register dst, Register src, Register tmp); 320 321 void access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src, 322 Register tmp1, Register thread_tmp); 323 void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register src, 324 Register tmp1, Register tmp2); 325 326 // Resolves obj access. Result is placed in the same register. 327 // All other registers are preserved. 328 void resolve(DecoratorSet decorators, Register obj); 329 330 void load_heap_oop(Register dst, Address src, Register tmp1 = noreg, 331 Register thread_tmp = noreg, DecoratorSet decorators = 0); 332 void load_heap_oop_not_null(Register dst, Address src, Register tmp1 = noreg, 333 Register thread_tmp = noreg, DecoratorSet decorators = 0); 334 void store_heap_oop(Address dst, Register src, Register tmp1 = noreg, 335 Register tmp2 = noreg, DecoratorSet decorators = 0); 336 337 // Used for storing NULL. All other oop constants should be 338 // stored using routines that take a jobject. 339 void store_heap_oop_null(Address dst); 340 341 void load_prototype_header(Register dst, Register src, Register tmp); 342 343 #ifdef _LP64 344 void store_klass_gap(Register dst, Register src); 345 346 // This dummy is to prevent a call to store_heap_oop from 347 // converting a zero (like NULL) into a Register by giving 348 // the compiler two choices it can't resolve 349 350 void store_heap_oop(Address dst, void* dummy); 351 352 void encode_heap_oop(Register r); 353 void decode_heap_oop(Register r); 354 void encode_heap_oop_not_null(Register r); 355 void decode_heap_oop_not_null(Register r); 356 void encode_heap_oop_not_null(Register dst, Register src); 357 void decode_heap_oop_not_null(Register dst, Register src); 358 359 void set_narrow_oop(Register dst, jobject obj); 360 void set_narrow_oop(Address dst, jobject obj); 361 void cmp_narrow_oop(Register dst, jobject obj); 362 void cmp_narrow_oop(Address dst, jobject obj); 363 364 void encode_klass_not_null(Register r, Register tmp); 365 void decode_klass_not_null(Register r, Register tmp); 366 void encode_and_move_klass_not_null(Register dst, Register src); 367 void decode_and_move_klass_not_null(Register dst, Register src); 368 void set_narrow_klass(Register dst, Klass* k); 369 void set_narrow_klass(Address dst, Klass* k); 370 void cmp_narrow_klass(Register dst, Klass* k); 371 void cmp_narrow_klass(Address dst, Klass* k); 372 373 // if heap base register is used - reinit it with the correct value 374 void reinit_heapbase(); 375 376 DEBUG_ONLY(void verify_heapbase(const char* msg);) 377 378 #endif // _LP64 379 380 // Int division/remainder for Java 381 // (as idivl, but checks for special case as described in JVM spec.) 382 // returns idivl instruction offset for implicit exception handling 383 int corrected_idivl(Register reg); 384 385 // Long division/remainder for Java 386 // (as idivq, but checks for special case as described in JVM spec.) 387 // returns idivq instruction offset for implicit exception handling 388 int corrected_idivq(Register reg); 389 390 void int3(); 391 392 // Long operation macros for a 32bit cpu 393 // Long negation for Java 394 void lneg(Register hi, Register lo); 395 396 // Long multiplication for Java 397 // (destroys contents of eax, ebx, ecx and edx) 398 void lmul(int x_rsp_offset, int y_rsp_offset); // rdx:rax = x * y 399 400 // Long shifts for Java 401 // (semantics as described in JVM spec.) 402 void lshl(Register hi, Register lo); // hi:lo << (rcx & 0x3f) 403 void lshr(Register hi, Register lo, bool sign_extension = false); // hi:lo >> (rcx & 0x3f) 404 405 // Long compare for Java 406 // (semantics as described in JVM spec.) 407 void lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo); // x_hi = lcmp(x, y) 408 409 410 // misc 411 412 // Sign extension 413 void sign_extend_short(Register reg); 414 void sign_extend_byte(Register reg); 415 416 // Division by power of 2, rounding towards 0 417 void division_with_shift(Register reg, int shift_value); 418 419 #ifndef _LP64 420 // Compares the top-most stack entries on the FPU stack and sets the eflags as follows: 421 // 422 // CF (corresponds to C0) if x < y 423 // PF (corresponds to C2) if unordered 424 // ZF (corresponds to C3) if x = y 425 // 426 // The arguments are in reversed order on the stack (i.e., top of stack is first argument). 427 // tmp is a temporary register, if none is available use noreg (only matters for non-P6 code) 428 void fcmp(Register tmp); 429 // Variant of the above which allows y to be further down the stack 430 // and which only pops x and y if specified. If pop_right is 431 // specified then pop_left must also be specified. 432 void fcmp(Register tmp, int index, bool pop_left, bool pop_right); 433 434 // Floating-point comparison for Java 435 // Compares the top-most stack entries on the FPU stack and stores the result in dst. 436 // The arguments are in reversed order on the stack (i.e., top of stack is first argument). 437 // (semantics as described in JVM spec.) 438 void fcmp2int(Register dst, bool unordered_is_less); 439 // Variant of the above which allows y to be further down the stack 440 // and which only pops x and y if specified. If pop_right is 441 // specified then pop_left must also be specified. 442 void fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right); 443 444 // Floating-point remainder for Java (ST0 = ST0 fremr ST1, ST1 is empty afterwards) 445 // tmp is a temporary register, if none is available use noreg 446 void fremr(Register tmp); 447 448 // only if +VerifyFPU 449 void verify_FPU(int stack_depth, const char* s = "illegal FPU state"); 450 #endif // !LP64 451 452 // dst = c = a * b + c 453 void fmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c); 454 void fmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c); 455 456 void vfmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len); 457 void vfmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len); 458 void vfmad(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len); 459 void vfmaf(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len); 460 461 462 // same as fcmp2int, but using SSE2 463 void cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less); 464 void cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less); 465 466 // branch to L if FPU flag C2 is set/not set 467 // tmp is a temporary register, if none is available use noreg 468 void jC2 (Register tmp, Label& L); 469 void jnC2(Register tmp, Label& L); 470 471 // Load float value from 'address'. If UseSSE >= 1, the value is loaded into 472 // register xmm0. Otherwise, the value is loaded onto the FPU stack. 473 void load_float(Address src); 474 475 // Store float value to 'address'. If UseSSE >= 1, the value is stored 476 // from register xmm0. Otherwise, the value is stored from the FPU stack. 477 void store_float(Address dst); 478 479 // Load double value from 'address'. If UseSSE >= 2, the value is loaded into 480 // register xmm0. Otherwise, the value is loaded onto the FPU stack. 481 void load_double(Address src); 482 483 // Store double value to 'address'. If UseSSE >= 2, the value is stored 484 // from register xmm0. Otherwise, the value is stored from the FPU stack. 485 void store_double(Address dst); 486 487 #ifndef _LP64 488 // Pop ST (ffree & fincstp combined) 489 void fpop(); 490 491 void empty_FPU_stack(); 492 #endif // !_LP64 493 494 void push_IU_state(); 495 void pop_IU_state(); 496 497 void push_FPU_state(); 498 void pop_FPU_state(); 499 500 void push_CPU_state(); 501 void pop_CPU_state(); 502 503 // Round up to a power of two 504 void round_to(Register reg, int modulus); 505 506 // Callee saved registers handling 507 void push_callee_saved_registers(); 508 void pop_callee_saved_registers(); 509 510 // allocation 511 void eden_allocate( 512 Register thread, // Current thread 513 Register obj, // result: pointer to object after successful allocation 514 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise 515 int con_size_in_bytes, // object size in bytes if known at compile time 516 Register t1, // temp register 517 Label& slow_case // continuation point if fast allocation fails 518 ); 519 void tlab_allocate( 520 Register thread, // Current thread 521 Register obj, // result: pointer to object after successful allocation 522 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise 523 int con_size_in_bytes, // object size in bytes if known at compile time 524 Register t1, // temp register 525 Register t2, // temp register 526 Label& slow_case // continuation point if fast allocation fails 527 ); 528 void zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp); 529 530 // interface method calling 531 void lookup_interface_method(Register recv_klass, 532 Register intf_klass, 533 RegisterOrConstant itable_index, 534 Register method_result, 535 Register scan_temp, 536 Label& no_such_interface, 537 bool return_method = true); 538 539 // virtual method calling 540 void lookup_virtual_method(Register recv_klass, 541 RegisterOrConstant vtable_index, 542 Register method_result); 543 544 // Test sub_klass against super_klass, with fast and slow paths. 545 546 // The fast path produces a tri-state answer: yes / no / maybe-slow. 547 // One of the three labels can be NULL, meaning take the fall-through. 548 // If super_check_offset is -1, the value is loaded up from super_klass. 549 // No registers are killed, except temp_reg. 550 void check_klass_subtype_fast_path(Register sub_klass, 551 Register super_klass, 552 Register temp_reg, 553 Label* L_success, 554 Label* L_failure, 555 Label* L_slow_path, 556 RegisterOrConstant super_check_offset = RegisterOrConstant(-1)); 557 558 // The rest of the type check; must be wired to a corresponding fast path. 559 // It does not repeat the fast path logic, so don't use it standalone. 560 // The temp_reg and temp2_reg can be noreg, if no temps are available. 561 // Updates the sub's secondary super cache as necessary. 562 // If set_cond_codes, condition codes will be Z on success, NZ on failure. 563 void check_klass_subtype_slow_path(Register sub_klass, 564 Register super_klass, 565 Register temp_reg, 566 Register temp2_reg, 567 Label* L_success, 568 Label* L_failure, 569 bool set_cond_codes = false); 570 571 // Simplified, combined version, good for typical uses. 572 // Falls through on failure. 573 void check_klass_subtype(Register sub_klass, 574 Register super_klass, 575 Register temp_reg, 576 Label& L_success); 577 578 void clinit_barrier(Register klass, 579 Register thread, 580 Label* L_fast_path = NULL, 581 Label* L_slow_path = NULL); 582 583 // method handles (JSR 292) 584 Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0); 585 586 //---- 587 void set_word_if_not_zero(Register reg); // sets reg to 1 if not zero, otherwise 0 588 589 // Debugging 590 591 // only if +VerifyOops 592 void _verify_oop(Register reg, const char* s, const char* file, int line); 593 void _verify_oop_addr(Address addr, const char* s, const char* file, int line); 594 595 // TODO: verify method and klass metadata (compare against vptr?) 596 void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {} 597 void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line){} 598 599 #define verify_oop(reg) _verify_oop(reg, "broken oop " #reg, __FILE__, __LINE__) 600 #define verify_oop_msg(reg, msg) _verify_oop(reg, "broken oop " #reg ", " #msg, __FILE__, __LINE__) 601 #define verify_oop_addr(addr) _verify_oop_addr(addr, "broken oop addr " #addr, __FILE__, __LINE__) 602 #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__) 603 #define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__) 604 605 // Verify or restore cpu control state after JNI call 606 void restore_cpu_control_state_after_jni(); 607 608 // prints msg, dumps registers and stops execution 609 void stop(const char* msg); 610 611 // prints msg and continues 612 void warn(const char* msg); 613 614 // dumps registers and other state 615 void print_state(); 616 617 static void debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg); 618 static void debug64(char* msg, int64_t pc, int64_t regs[]); 619 static void print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip); 620 static void print_state64(int64_t pc, int64_t regs[]); 621 622 void os_breakpoint(); 623 624 void untested() { stop("untested"); } 625 626 void unimplemented(const char* what = ""); 627 628 void should_not_reach_here() { stop("should not reach here"); } 629 630 void print_CPU_state(); 631 632 // Stack overflow checking 633 void bang_stack_with_offset(int offset) { 634 // stack grows down, caller passes positive offset 635 assert(offset > 0, "must bang with negative offset"); 636 movl(Address(rsp, (-offset)), rax); 637 } 638 639 // Writes to stack successive pages until offset reached to check for 640 // stack overflow + shadow pages. Also, clobbers tmp 641 void bang_stack_size(Register size, Register tmp); 642 643 // Check for reserved stack access in method being exited (for JIT) 644 void reserved_stack_check(); 645 646 virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr, 647 Register tmp, 648 int offset); 649 650 // If thread_reg is != noreg the code assumes the register passed contains 651 // the thread (required on 64 bit). 652 void safepoint_poll(Label& slow_path, Register thread_reg, Register temp_reg); 653 654 void verify_tlab(); 655 656 // Biased locking support 657 // lock_reg and obj_reg must be loaded up with the appropriate values. 658 // swap_reg must be rax, and is killed. 659 // tmp_reg is optional. If it is supplied (i.e., != noreg) it will 660 // be killed; if not supplied, push/pop will be used internally to 661 // allocate a temporary (inefficient, avoid if possible). 662 // Optional slow case is for implementations (interpreter and C1) which branch to 663 // slow case directly. Leaves condition codes set for C2's Fast_Lock node. 664 // Returns offset of first potentially-faulting instruction for null 665 // check info (currently consumed only by C1). If 666 // swap_reg_contains_mark is true then returns -1 as it is assumed 667 // the calling code has already passed any potential faults. 668 int biased_locking_enter(Register lock_reg, Register obj_reg, 669 Register swap_reg, Register tmp_reg, 670 Register tmp_reg2, bool swap_reg_contains_mark, 671 Label& done, Label* slow_case = NULL, 672 BiasedLockingCounters* counters = NULL); 673 void biased_locking_exit (Register obj_reg, Register temp_reg, Label& done); 674 675 Condition negate_condition(Condition cond); 676 677 // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit 678 // operands. In general the names are modified to avoid hiding the instruction in Assembler 679 // so that we don't need to implement all the varieties in the Assembler with trivial wrappers 680 // here in MacroAssembler. The major exception to this rule is call 681 682 // Arithmetics 683 684 685 void addptr(Address dst, int32_t src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)) ; } 686 void addptr(Address dst, Register src); 687 688 void addptr(Register dst, Address src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); } 689 void addptr(Register dst, int32_t src); 690 void addptr(Register dst, Register src); 691 void addptr(Register dst, RegisterOrConstant src) { 692 if (src.is_constant()) addptr(dst, (int) src.as_constant()); 693 else addptr(dst, src.as_register()); 694 } 695 696 void andptr(Register dst, int32_t src); 697 void andptr(Register src1, Register src2) { LP64_ONLY(andq(src1, src2)) NOT_LP64(andl(src1, src2)) ; } 698 699 void cmp8(AddressLiteral src1, int imm); 700 701 // renamed to drag out the casting of address to int32_t/intptr_t 702 void cmp32(Register src1, int32_t imm); 703 704 void cmp32(AddressLiteral src1, int32_t imm); 705 // compare reg - mem, or reg - &mem 706 void cmp32(Register src1, AddressLiteral src2); 707 708 void cmp32(Register src1, Address src2); 709 710 #ifndef _LP64 711 void cmpklass(Address dst, Metadata* obj); 712 void cmpklass(Register dst, Metadata* obj); 713 void cmpoop(Address dst, jobject obj); 714 void cmpoop_raw(Address dst, jobject obj); 715 #endif // _LP64 716 717 void cmpoop(Register src1, Register src2); 718 void cmpoop(Register src1, Address src2); 719 void cmpoop(Register dst, jobject obj); 720 void cmpoop_raw(Register dst, jobject obj); 721 722 // NOTE src2 must be the lval. This is NOT an mem-mem compare 723 void cmpptr(Address src1, AddressLiteral src2); 724 725 void cmpptr(Register src1, AddressLiteral src2); 726 727 void cmpptr(Register src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 728 void cmpptr(Register src1, Address src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 729 // void cmpptr(Address src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 730 731 void cmpptr(Register src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 732 void cmpptr(Address src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 733 734 // cmp64 to avoild hiding cmpq 735 void cmp64(Register src1, AddressLiteral src); 736 737 void cmpxchgptr(Register reg, Address adr); 738 739 void locked_cmpxchgptr(Register reg, AddressLiteral adr); 740 741 742 void imulptr(Register dst, Register src) { LP64_ONLY(imulq(dst, src)) NOT_LP64(imull(dst, src)); } 743 void imulptr(Register dst, Register src, int imm32) { LP64_ONLY(imulq(dst, src, imm32)) NOT_LP64(imull(dst, src, imm32)); } 744 745 746 void negptr(Register dst) { LP64_ONLY(negq(dst)) NOT_LP64(negl(dst)); } 747 748 void notptr(Register dst) { LP64_ONLY(notq(dst)) NOT_LP64(notl(dst)); } 749 750 void shlptr(Register dst, int32_t shift); 751 void shlptr(Register dst) { LP64_ONLY(shlq(dst)) NOT_LP64(shll(dst)); } 752 753 void shrptr(Register dst, int32_t shift); 754 void shrptr(Register dst) { LP64_ONLY(shrq(dst)) NOT_LP64(shrl(dst)); } 755 756 void sarptr(Register dst) { LP64_ONLY(sarq(dst)) NOT_LP64(sarl(dst)); } 757 void sarptr(Register dst, int32_t src) { LP64_ONLY(sarq(dst, src)) NOT_LP64(sarl(dst, src)); } 758 759 void subptr(Address dst, int32_t src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); } 760 761 void subptr(Register dst, Address src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); } 762 void subptr(Register dst, int32_t src); 763 // Force generation of a 4 byte immediate value even if it fits into 8bit 764 void subptr_imm32(Register dst, int32_t src); 765 void subptr(Register dst, Register src); 766 void subptr(Register dst, RegisterOrConstant src) { 767 if (src.is_constant()) subptr(dst, (int) src.as_constant()); 768 else subptr(dst, src.as_register()); 769 } 770 771 void sbbptr(Address dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); } 772 void sbbptr(Register dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); } 773 774 void xchgptr(Register src1, Register src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; } 775 void xchgptr(Register src1, Address src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; } 776 777 void xaddptr(Address src1, Register src2) { LP64_ONLY(xaddq(src1, src2)) NOT_LP64(xaddl(src1, src2)) ; } 778 779 780 781 // Helper functions for statistics gathering. 782 // Conditionally (atomically, on MPs) increments passed counter address, preserving condition codes. 783 void cond_inc32(Condition cond, AddressLiteral counter_addr); 784 // Unconditional atomic increment. 785 void atomic_incl(Address counter_addr); 786 void atomic_incl(AddressLiteral counter_addr, Register scr = rscratch1); 787 #ifdef _LP64 788 void atomic_incq(Address counter_addr); 789 void atomic_incq(AddressLiteral counter_addr, Register scr = rscratch1); 790 #endif 791 void atomic_incptr(AddressLiteral counter_addr, Register scr = rscratch1) { LP64_ONLY(atomic_incq(counter_addr, scr)) NOT_LP64(atomic_incl(counter_addr, scr)) ; } 792 void atomic_incptr(Address counter_addr) { LP64_ONLY(atomic_incq(counter_addr)) NOT_LP64(atomic_incl(counter_addr)) ; } 793 794 void lea(Register dst, AddressLiteral adr); 795 void lea(Address dst, AddressLiteral adr); 796 void lea(Register dst, Address adr) { Assembler::lea(dst, adr); } 797 798 void leal32(Register dst, Address src) { leal(dst, src); } 799 800 // Import other testl() methods from the parent class or else 801 // they will be hidden by the following overriding declaration. 802 using Assembler::testl; 803 void testl(Register dst, AddressLiteral src); 804 805 void orptr(Register dst, Address src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } 806 void orptr(Register dst, Register src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } 807 void orptr(Register dst, int32_t src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } 808 void orptr(Address dst, int32_t imm32) { LP64_ONLY(orq(dst, imm32)) NOT_LP64(orl(dst, imm32)); } 809 810 void testptr(Register src, int32_t imm32) { LP64_ONLY(testq(src, imm32)) NOT_LP64(testl(src, imm32)); } 811 void testptr(Register src1, Address src2) { LP64_ONLY(testq(src1, src2)) NOT_LP64(testl(src1, src2)); } 812 void testptr(Register src1, Register src2); 813 814 void xorptr(Register dst, Register src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); } 815 void xorptr(Register dst, Address src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); } 816 817 // Calls 818 819 void call(Label& L, relocInfo::relocType rtype); 820 void call(Register entry); 821 822 // NOTE: this call transfers to the effective address of entry NOT 823 // the address contained by entry. This is because this is more natural 824 // for jumps/calls. 825 void call(AddressLiteral entry); 826 827 // Emit the CompiledIC call idiom 828 void ic_call(address entry, jint method_index = 0); 829 830 // Jumps 831 832 // NOTE: these jumps tranfer to the effective address of dst NOT 833 // the address contained by dst. This is because this is more natural 834 // for jumps/calls. 835 void jump(AddressLiteral dst); 836 void jump_cc(Condition cc, AddressLiteral dst); 837 838 // 32bit can do a case table jump in one instruction but we no longer allow the base 839 // to be installed in the Address class. This jump will tranfers to the address 840 // contained in the location described by entry (not the address of entry) 841 void jump(ArrayAddress entry); 842 843 // Floating 844 845 void andpd(XMMRegister dst, Address src) { Assembler::andpd(dst, src); } 846 void andpd(XMMRegister dst, AddressLiteral src, Register scratch_reg = rscratch1); 847 void andpd(XMMRegister dst, XMMRegister src) { Assembler::andpd(dst, src); } 848 849 void andps(XMMRegister dst, XMMRegister src) { Assembler::andps(dst, src); } 850 void andps(XMMRegister dst, Address src) { Assembler::andps(dst, src); } 851 void andps(XMMRegister dst, AddressLiteral src, Register scratch_reg = rscratch1); 852 853 void comiss(XMMRegister dst, XMMRegister src) { Assembler::comiss(dst, src); } 854 void comiss(XMMRegister dst, Address src) { Assembler::comiss(dst, src); } 855 void comiss(XMMRegister dst, AddressLiteral src); 856 857 void comisd(XMMRegister dst, XMMRegister src) { Assembler::comisd(dst, src); } 858 void comisd(XMMRegister dst, Address src) { Assembler::comisd(dst, src); } 859 void comisd(XMMRegister dst, AddressLiteral src); 860 861 #ifndef _LP64 862 void fadd_s(Address src) { Assembler::fadd_s(src); } 863 void fadd_s(AddressLiteral src) { Assembler::fadd_s(as_Address(src)); } 864 865 void fldcw(Address src) { Assembler::fldcw(src); } 866 void fldcw(AddressLiteral src); 867 868 void fld_s(int index) { Assembler::fld_s(index); } 869 void fld_s(Address src) { Assembler::fld_s(src); } 870 void fld_s(AddressLiteral src); 871 872 void fld_d(Address src) { Assembler::fld_d(src); } 873 void fld_d(AddressLiteral src); 874 875 void fld_x(Address src) { Assembler::fld_x(src); } 876 void fld_x(AddressLiteral src); 877 878 void fmul_s(Address src) { Assembler::fmul_s(src); } 879 void fmul_s(AddressLiteral src) { Assembler::fmul_s(as_Address(src)); } 880 #endif // _LP64 881 882 void ldmxcsr(Address src) { Assembler::ldmxcsr(src); } 883 void ldmxcsr(AddressLiteral src); 884 885 #ifdef _LP64 886 private: 887 void sha256_AVX2_one_round_compute( 888 Register reg_old_h, 889 Register reg_a, 890 Register reg_b, 891 Register reg_c, 892 Register reg_d, 893 Register reg_e, 894 Register reg_f, 895 Register reg_g, 896 Register reg_h, 897 int iter); 898 void sha256_AVX2_four_rounds_compute_first(int start); 899 void sha256_AVX2_four_rounds_compute_last(int start); 900 void sha256_AVX2_one_round_and_sched( 901 XMMRegister xmm_0, /* == ymm4 on 0, 1, 2, 3 iterations, then rotate 4 registers left on 4, 8, 12 iterations */ 902 XMMRegister xmm_1, /* ymm5 */ /* full cycle is 16 iterations */ 903 XMMRegister xmm_2, /* ymm6 */ 904 XMMRegister xmm_3, /* ymm7 */ 905 Register reg_a, /* == eax on 0 iteration, then rotate 8 register right on each next iteration */ 906 Register reg_b, /* ebx */ /* full cycle is 8 iterations */ 907 Register reg_c, /* edi */ 908 Register reg_d, /* esi */ 909 Register reg_e, /* r8d */ 910 Register reg_f, /* r9d */ 911 Register reg_g, /* r10d */ 912 Register reg_h, /* r11d */ 913 int iter); 914 915 void addm(int disp, Register r1, Register r2); 916 void gfmul(XMMRegister tmp0, XMMRegister t); 917 void schoolbookAAD(int i, Register subkeyH, XMMRegister data, XMMRegister tmp0, 918 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3); 919 void generateHtbl_one_block(Register htbl); 920 void generateHtbl_eight_blocks(Register htbl); 921 public: 922 void sha256_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, 923 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, 924 Register buf, Register state, Register ofs, Register limit, Register rsp, 925 bool multi_block, XMMRegister shuf_mask); 926 void avx_ghash(Register state, Register htbl, Register data, Register blocks); 927 #endif 928 929 #ifdef _LP64 930 private: 931 void sha512_AVX2_one_round_compute(Register old_h, Register a, Register b, Register c, Register d, 932 Register e, Register f, Register g, Register h, int iteration); 933 934 void sha512_AVX2_one_round_and_schedule(XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 935 Register a, Register b, Register c, Register d, Register e, Register f, 936 Register g, Register h, int iteration); 937 938 void addmq(int disp, Register r1, Register r2); 939 public: 940 void sha512_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, 941 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, 942 Register buf, Register state, Register ofs, Register limit, Register rsp, bool multi_block, 943 XMMRegister shuf_mask); 944 private: 945 void roundEnc(XMMRegister key, int rnum); 946 void lastroundEnc(XMMRegister key, int rnum); 947 void roundDec(XMMRegister key, int rnum); 948 void lastroundDec(XMMRegister key, int rnum); 949 void ev_load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask); 950 951 public: 952 void aesecb_encrypt(Register source_addr, Register dest_addr, Register key, Register len); 953 void aesecb_decrypt(Register source_addr, Register dest_addr, Register key, Register len); 954 void aesctr_encrypt(Register src_addr, Register dest_addr, Register key, Register counter, 955 Register len_reg, Register used, Register used_addr, Register saved_encCounter_start); 956 957 #endif 958 959 void fast_md5(Register buf, Address state, Address ofs, Address limit, 960 bool multi_block); 961 962 void fast_sha1(XMMRegister abcd, XMMRegister e0, XMMRegister e1, XMMRegister msg0, 963 XMMRegister msg1, XMMRegister msg2, XMMRegister msg3, XMMRegister shuf_mask, 964 Register buf, Register state, Register ofs, Register limit, Register rsp, 965 bool multi_block); 966 967 #ifdef _LP64 968 void fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, 969 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, 970 Register buf, Register state, Register ofs, Register limit, Register rsp, 971 bool multi_block, XMMRegister shuf_mask); 972 #else 973 void fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0, 974 XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4, 975 Register buf, Register state, Register ofs, Register limit, Register rsp, 976 bool multi_block); 977 #endif 978 979 void fast_exp(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 980 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 981 Register rax, Register rcx, Register rdx, Register tmp); 982 983 #ifdef _LP64 984 void fast_log(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 985 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 986 Register rax, Register rcx, Register rdx, Register tmp1, Register tmp2); 987 988 void fast_log10(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 989 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 990 Register rax, Register rcx, Register rdx, Register r11); 991 992 void fast_pow(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, XMMRegister xmm4, 993 XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, Register rax, Register rcx, 994 Register rdx, Register tmp1, Register tmp2, Register tmp3, Register tmp4); 995 996 void fast_sin(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 997 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 998 Register rax, Register rbx, Register rcx, Register rdx, Register tmp1, Register tmp2, 999 Register tmp3, Register tmp4); 1000 1001 void fast_cos(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1002 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1003 Register rax, Register rcx, Register rdx, Register tmp1, 1004 Register tmp2, Register tmp3, Register tmp4); 1005 void fast_tan(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1006 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1007 Register rax, Register rcx, Register rdx, Register tmp1, 1008 Register tmp2, Register tmp3, Register tmp4); 1009 #else 1010 void fast_log(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1011 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1012 Register rax, Register rcx, Register rdx, Register tmp1); 1013 1014 void fast_log10(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1015 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1016 Register rax, Register rcx, Register rdx, Register tmp); 1017 1018 void fast_pow(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, XMMRegister xmm4, 1019 XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, Register rax, Register rcx, 1020 Register rdx, Register tmp); 1021 1022 void fast_sin(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1023 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1024 Register rax, Register rbx, Register rdx); 1025 1026 void fast_cos(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1027 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1028 Register rax, Register rcx, Register rdx, Register tmp); 1029 1030 void libm_sincos_huge(XMMRegister xmm0, XMMRegister xmm1, Register eax, Register ecx, 1031 Register edx, Register ebx, Register esi, Register edi, 1032 Register ebp, Register esp); 1033 1034 void libm_reduce_pi04l(Register eax, Register ecx, Register edx, Register ebx, 1035 Register esi, Register edi, Register ebp, Register esp); 1036 1037 void libm_tancot_huge(XMMRegister xmm0, XMMRegister xmm1, Register eax, Register ecx, 1038 Register edx, Register ebx, Register esi, Register edi, 1039 Register ebp, Register esp); 1040 1041 void fast_tan(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, 1042 XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, 1043 Register rax, Register rcx, Register rdx, Register tmp); 1044 #endif 1045 1046 private: 1047 1048 // these are private because users should be doing movflt/movdbl 1049 1050 void movss(XMMRegister dst, XMMRegister src) { Assembler::movss(dst, src); } 1051 void movss(Address dst, XMMRegister src) { Assembler::movss(dst, src); } 1052 void movss(XMMRegister dst, Address src) { Assembler::movss(dst, src); } 1053 void movss(XMMRegister dst, AddressLiteral src); 1054 1055 void movlpd(XMMRegister dst, Address src) {Assembler::movlpd(dst, src); } 1056 void movlpd(XMMRegister dst, AddressLiteral src); 1057 1058 public: 1059 1060 void addsd(XMMRegister dst, XMMRegister src) { Assembler::addsd(dst, src); } 1061 void addsd(XMMRegister dst, Address src) { Assembler::addsd(dst, src); } 1062 void addsd(XMMRegister dst, AddressLiteral src); 1063 1064 void addss(XMMRegister dst, XMMRegister src) { Assembler::addss(dst, src); } 1065 void addss(XMMRegister dst, Address src) { Assembler::addss(dst, src); } 1066 void addss(XMMRegister dst, AddressLiteral src); 1067 1068 void addpd(XMMRegister dst, XMMRegister src) { Assembler::addpd(dst, src); } 1069 void addpd(XMMRegister dst, Address src) { Assembler::addpd(dst, src); } 1070 void addpd(XMMRegister dst, AddressLiteral src); 1071 1072 void divsd(XMMRegister dst, XMMRegister src) { Assembler::divsd(dst, src); } 1073 void divsd(XMMRegister dst, Address src) { Assembler::divsd(dst, src); } 1074 void divsd(XMMRegister dst, AddressLiteral src); 1075 1076 void divss(XMMRegister dst, XMMRegister src) { Assembler::divss(dst, src); } 1077 void divss(XMMRegister dst, Address src) { Assembler::divss(dst, src); } 1078 void divss(XMMRegister dst, AddressLiteral src); 1079 1080 // Move Unaligned Double Quadword 1081 void movdqu(Address dst, XMMRegister src); 1082 void movdqu(XMMRegister dst, Address src); 1083 void movdqu(XMMRegister dst, XMMRegister src); 1084 void movdqu(XMMRegister dst, AddressLiteral src, Register scratchReg = rscratch1); 1085 // AVX Unaligned forms 1086 void vmovdqu(Address dst, XMMRegister src); 1087 void vmovdqu(XMMRegister dst, Address src); 1088 void vmovdqu(XMMRegister dst, XMMRegister src); 1089 void vmovdqu(XMMRegister dst, AddressLiteral src, Register scratch_reg = rscratch1); 1090 void evmovdquq(XMMRegister dst, Address src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); } 1091 void evmovdquq(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); } 1092 void evmovdquq(Address dst, XMMRegister src, int vector_len) { Assembler::evmovdquq(dst, src, vector_len); } 1093 void evmovdquq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch); 1094 1095 // Move Aligned Double Quadword 1096 void movdqa(XMMRegister dst, Address src) { Assembler::movdqa(dst, src); } 1097 void movdqa(XMMRegister dst, XMMRegister src) { Assembler::movdqa(dst, src); } 1098 void movdqa(XMMRegister dst, AddressLiteral src); 1099 1100 void movsd(XMMRegister dst, XMMRegister src) { Assembler::movsd(dst, src); } 1101 void movsd(Address dst, XMMRegister src) { Assembler::movsd(dst, src); } 1102 void movsd(XMMRegister dst, Address src) { Assembler::movsd(dst, src); } 1103 void movsd(XMMRegister dst, AddressLiteral src); 1104 1105 void mulpd(XMMRegister dst, XMMRegister src) { Assembler::mulpd(dst, src); } 1106 void mulpd(XMMRegister dst, Address src) { Assembler::mulpd(dst, src); } 1107 void mulpd(XMMRegister dst, AddressLiteral src); 1108 1109 void mulsd(XMMRegister dst, XMMRegister src) { Assembler::mulsd(dst, src); } 1110 void mulsd(XMMRegister dst, Address src) { Assembler::mulsd(dst, src); } 1111 void mulsd(XMMRegister dst, AddressLiteral src); 1112 1113 void mulss(XMMRegister dst, XMMRegister src) { Assembler::mulss(dst, src); } 1114 void mulss(XMMRegister dst, Address src) { Assembler::mulss(dst, src); } 1115 void mulss(XMMRegister dst, AddressLiteral src); 1116 1117 // Carry-Less Multiplication Quadword 1118 void pclmulldq(XMMRegister dst, XMMRegister src) { 1119 // 0x00 - multiply lower 64 bits [0:63] 1120 Assembler::pclmulqdq(dst, src, 0x00); 1121 } 1122 void pclmulhdq(XMMRegister dst, XMMRegister src) { 1123 // 0x11 - multiply upper 64 bits [64:127] 1124 Assembler::pclmulqdq(dst, src, 0x11); 1125 } 1126 1127 void pcmpeqb(XMMRegister dst, XMMRegister src); 1128 void pcmpeqw(XMMRegister dst, XMMRegister src); 1129 1130 void pcmpestri(XMMRegister dst, Address src, int imm8); 1131 void pcmpestri(XMMRegister dst, XMMRegister src, int imm8); 1132 1133 void pmovzxbw(XMMRegister dst, XMMRegister src); 1134 void pmovzxbw(XMMRegister dst, Address src); 1135 1136 void pmovmskb(Register dst, XMMRegister src); 1137 1138 void ptest(XMMRegister dst, XMMRegister src); 1139 1140 void sqrtsd(XMMRegister dst, XMMRegister src) { Assembler::sqrtsd(dst, src); } 1141 void sqrtsd(XMMRegister dst, Address src) { Assembler::sqrtsd(dst, src); } 1142 void sqrtsd(XMMRegister dst, AddressLiteral src); 1143 1144 void roundsd(XMMRegister dst, XMMRegister src, int32_t rmode) { Assembler::roundsd(dst, src, rmode); } 1145 void roundsd(XMMRegister dst, Address src, int32_t rmode) { Assembler::roundsd(dst, src, rmode); } 1146 void roundsd(XMMRegister dst, AddressLiteral src, int32_t rmode, Register scratch_reg); 1147 1148 void sqrtss(XMMRegister dst, XMMRegister src) { Assembler::sqrtss(dst, src); } 1149 void sqrtss(XMMRegister dst, Address src) { Assembler::sqrtss(dst, src); } 1150 void sqrtss(XMMRegister dst, AddressLiteral src); 1151 1152 void subsd(XMMRegister dst, XMMRegister src) { Assembler::subsd(dst, src); } 1153 void subsd(XMMRegister dst, Address src) { Assembler::subsd(dst, src); } 1154 void subsd(XMMRegister dst, AddressLiteral src); 1155 1156 void subss(XMMRegister dst, XMMRegister src) { Assembler::subss(dst, src); } 1157 void subss(XMMRegister dst, Address src) { Assembler::subss(dst, src); } 1158 void subss(XMMRegister dst, AddressLiteral src); 1159 1160 void ucomiss(XMMRegister dst, XMMRegister src) { Assembler::ucomiss(dst, src); } 1161 void ucomiss(XMMRegister dst, Address src) { Assembler::ucomiss(dst, src); } 1162 void ucomiss(XMMRegister dst, AddressLiteral src); 1163 1164 void ucomisd(XMMRegister dst, XMMRegister src) { Assembler::ucomisd(dst, src); } 1165 void ucomisd(XMMRegister dst, Address src) { Assembler::ucomisd(dst, src); } 1166 void ucomisd(XMMRegister dst, AddressLiteral src); 1167 1168 // Bitwise Logical XOR of Packed Double-Precision Floating-Point Values 1169 void xorpd(XMMRegister dst, XMMRegister src); 1170 void xorpd(XMMRegister dst, Address src) { Assembler::xorpd(dst, src); } 1171 void xorpd(XMMRegister dst, AddressLiteral src, Register scratch_reg = rscratch1); 1172 1173 // Bitwise Logical XOR of Packed Single-Precision Floating-Point Values 1174 void xorps(XMMRegister dst, XMMRegister src); 1175 void xorps(XMMRegister dst, Address src) { Assembler::xorps(dst, src); } 1176 void xorps(XMMRegister dst, AddressLiteral src, Register scratch_reg = rscratch1); 1177 1178 // Shuffle Bytes 1179 void pshufb(XMMRegister dst, XMMRegister src) { Assembler::pshufb(dst, src); } 1180 void pshufb(XMMRegister dst, Address src) { Assembler::pshufb(dst, src); } 1181 void pshufb(XMMRegister dst, AddressLiteral src); 1182 // AVX 3-operands instructions 1183 1184 void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddsd(dst, nds, src); } 1185 void vaddsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddsd(dst, nds, src); } 1186 void vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1187 1188 void vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddss(dst, nds, src); } 1189 void vaddss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddss(dst, nds, src); } 1190 void vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1191 1192 void vabsss(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len); 1193 void vabssd(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len); 1194 1195 void vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1196 void vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1197 1198 void vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1199 void vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1200 1201 void vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); } 1202 void vpaddd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpaddd(dst, nds, src, vector_len); } 1203 void vpaddd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch); 1204 1205 void vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); } 1206 void vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); } 1207 void vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1); 1208 1209 void vpbroadcastw(XMMRegister dst, XMMRegister src, int vector_len); 1210 void vpbroadcastw(XMMRegister dst, Address src, int vector_len) { Assembler::vpbroadcastw(dst, src, vector_len); } 1211 1212 void vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1213 1214 void vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1215 1216 void vpmovzxbw(XMMRegister dst, Address src, int vector_len); 1217 void vpmovzxbw(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::vpmovzxbw(dst, src, vector_len); } 1218 1219 void vpmovmskb(Register dst, XMMRegister src); 1220 1221 void vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1222 void vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1223 1224 void vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1225 void vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1226 1227 void vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len); 1228 void vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len); 1229 1230 void vpsraw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1231 void vpsraw(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1232 1233 void evpsraq(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1234 void evpsraq(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1235 1236 void vpsrlw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1237 void vpsrlw(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1238 1239 void vpsllw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len); 1240 void vpsllw(XMMRegister dst, XMMRegister nds, int shift, int vector_len); 1241 1242 void vptest(XMMRegister dst, XMMRegister src); 1243 1244 void punpcklbw(XMMRegister dst, XMMRegister src); 1245 void punpcklbw(XMMRegister dst, Address src) { Assembler::punpcklbw(dst, src); } 1246 1247 void pshufd(XMMRegister dst, Address src, int mode); 1248 void pshufd(XMMRegister dst, XMMRegister src, int mode) { Assembler::pshufd(dst, src, mode); } 1249 1250 void pshuflw(XMMRegister dst, XMMRegister src, int mode); 1251 void pshuflw(XMMRegister dst, Address src, int mode) { Assembler::pshuflw(dst, src, mode); } 1252 1253 void vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); } 1254 void vandpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vandpd(dst, nds, src, vector_len); } 1255 void vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1); 1256 1257 void vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); } 1258 void vandps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vandps(dst, nds, src, vector_len); } 1259 void vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1); 1260 1261 void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivsd(dst, nds, src); } 1262 void vdivsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivsd(dst, nds, src); } 1263 void vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1264 1265 void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivss(dst, nds, src); } 1266 void vdivss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivss(dst, nds, src); } 1267 void vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1268 1269 void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulsd(dst, nds, src); } 1270 void vmulsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulsd(dst, nds, src); } 1271 void vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1272 1273 void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulss(dst, nds, src); } 1274 void vmulss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulss(dst, nds, src); } 1275 void vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1276 1277 void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubsd(dst, nds, src); } 1278 void vsubsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubsd(dst, nds, src); } 1279 void vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1280 1281 void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubss(dst, nds, src); } 1282 void vsubss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubss(dst, nds, src); } 1283 void vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1284 1285 void vnegatess(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1286 void vnegatesd(XMMRegister dst, XMMRegister nds, AddressLiteral src); 1287 1288 // AVX Vector instructions 1289 1290 void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); } 1291 void vxorpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorpd(dst, nds, src, vector_len); } 1292 void vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1); 1293 1294 void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); } 1295 void vxorps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vxorps(dst, nds, src, vector_len); } 1296 void vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1); 1297 1298 void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 1299 if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2 1300 Assembler::vpxor(dst, nds, src, vector_len); 1301 else 1302 Assembler::vxorpd(dst, nds, src, vector_len); 1303 } 1304 void vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 1305 if (UseAVX > 1 || (vector_len < 1)) // vpxor 256 bit is available only in AVX2 1306 Assembler::vpxor(dst, nds, src, vector_len); 1307 else 1308 Assembler::vxorpd(dst, nds, src, vector_len); 1309 } 1310 void vpxor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1); 1311 1312 // Simple version for AVX2 256bit vectors 1313 void vpxor(XMMRegister dst, XMMRegister src) { Assembler::vpxor(dst, dst, src, true); } 1314 void vpxor(XMMRegister dst, Address src) { Assembler::vpxor(dst, dst, src, true); } 1315 1316 void vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) { 1317 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1318 Assembler::vinserti32x4(dst, dst, src, imm8); 1319 } else if (UseAVX > 1) { 1320 // vinserti128 is available only in AVX2 1321 Assembler::vinserti128(dst, nds, src, imm8); 1322 } else { 1323 Assembler::vinsertf128(dst, nds, src, imm8); 1324 } 1325 } 1326 1327 void vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) { 1328 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1329 Assembler::vinserti32x4(dst, dst, src, imm8); 1330 } else if (UseAVX > 1) { 1331 // vinserti128 is available only in AVX2 1332 Assembler::vinserti128(dst, nds, src, imm8); 1333 } else { 1334 Assembler::vinsertf128(dst, nds, src, imm8); 1335 } 1336 } 1337 1338 void vextracti128(XMMRegister dst, XMMRegister src, uint8_t imm8) { 1339 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1340 Assembler::vextracti32x4(dst, src, imm8); 1341 } else if (UseAVX > 1) { 1342 // vextracti128 is available only in AVX2 1343 Assembler::vextracti128(dst, src, imm8); 1344 } else { 1345 Assembler::vextractf128(dst, src, imm8); 1346 } 1347 } 1348 1349 void vextracti128(Address dst, XMMRegister src, uint8_t imm8) { 1350 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1351 Assembler::vextracti32x4(dst, src, imm8); 1352 } else if (UseAVX > 1) { 1353 // vextracti128 is available only in AVX2 1354 Assembler::vextracti128(dst, src, imm8); 1355 } else { 1356 Assembler::vextractf128(dst, src, imm8); 1357 } 1358 } 1359 1360 // 128bit copy to/from high 128 bits of 256bit (YMM) vector registers 1361 void vinserti128_high(XMMRegister dst, XMMRegister src) { 1362 vinserti128(dst, dst, src, 1); 1363 } 1364 void vinserti128_high(XMMRegister dst, Address src) { 1365 vinserti128(dst, dst, src, 1); 1366 } 1367 void vextracti128_high(XMMRegister dst, XMMRegister src) { 1368 vextracti128(dst, src, 1); 1369 } 1370 void vextracti128_high(Address dst, XMMRegister src) { 1371 vextracti128(dst, src, 1); 1372 } 1373 1374 void vinsertf128_high(XMMRegister dst, XMMRegister src) { 1375 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1376 Assembler::vinsertf32x4(dst, dst, src, 1); 1377 } else { 1378 Assembler::vinsertf128(dst, dst, src, 1); 1379 } 1380 } 1381 1382 void vinsertf128_high(XMMRegister dst, Address src) { 1383 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1384 Assembler::vinsertf32x4(dst, dst, src, 1); 1385 } else { 1386 Assembler::vinsertf128(dst, dst, src, 1); 1387 } 1388 } 1389 1390 void vextractf128_high(XMMRegister dst, XMMRegister src) { 1391 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1392 Assembler::vextractf32x4(dst, src, 1); 1393 } else { 1394 Assembler::vextractf128(dst, src, 1); 1395 } 1396 } 1397 1398 void vextractf128_high(Address dst, XMMRegister src) { 1399 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1400 Assembler::vextractf32x4(dst, src, 1); 1401 } else { 1402 Assembler::vextractf128(dst, src, 1); 1403 } 1404 } 1405 1406 // 256bit copy to/from high 256 bits of 512bit (ZMM) vector registers 1407 void vinserti64x4_high(XMMRegister dst, XMMRegister src) { 1408 Assembler::vinserti64x4(dst, dst, src, 1); 1409 } 1410 void vinsertf64x4_high(XMMRegister dst, XMMRegister src) { 1411 Assembler::vinsertf64x4(dst, dst, src, 1); 1412 } 1413 void vextracti64x4_high(XMMRegister dst, XMMRegister src) { 1414 Assembler::vextracti64x4(dst, src, 1); 1415 } 1416 void vextractf64x4_high(XMMRegister dst, XMMRegister src) { 1417 Assembler::vextractf64x4(dst, src, 1); 1418 } 1419 void vextractf64x4_high(Address dst, XMMRegister src) { 1420 Assembler::vextractf64x4(dst, src, 1); 1421 } 1422 void vinsertf64x4_high(XMMRegister dst, Address src) { 1423 Assembler::vinsertf64x4(dst, dst, src, 1); 1424 } 1425 1426 // 128bit copy to/from low 128 bits of 256bit (YMM) vector registers 1427 void vinserti128_low(XMMRegister dst, XMMRegister src) { 1428 vinserti128(dst, dst, src, 0); 1429 } 1430 void vinserti128_low(XMMRegister dst, Address src) { 1431 vinserti128(dst, dst, src, 0); 1432 } 1433 void vextracti128_low(XMMRegister dst, XMMRegister src) { 1434 vextracti128(dst, src, 0); 1435 } 1436 void vextracti128_low(Address dst, XMMRegister src) { 1437 vextracti128(dst, src, 0); 1438 } 1439 1440 void vinsertf128_low(XMMRegister dst, XMMRegister src) { 1441 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1442 Assembler::vinsertf32x4(dst, dst, src, 0); 1443 } else { 1444 Assembler::vinsertf128(dst, dst, src, 0); 1445 } 1446 } 1447 1448 void vinsertf128_low(XMMRegister dst, Address src) { 1449 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1450 Assembler::vinsertf32x4(dst, dst, src, 0); 1451 } else { 1452 Assembler::vinsertf128(dst, dst, src, 0); 1453 } 1454 } 1455 1456 void vextractf128_low(XMMRegister dst, XMMRegister src) { 1457 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1458 Assembler::vextractf32x4(dst, src, 0); 1459 } else { 1460 Assembler::vextractf128(dst, src, 0); 1461 } 1462 } 1463 1464 void vextractf128_low(Address dst, XMMRegister src) { 1465 if (UseAVX > 2 && VM_Version::supports_avx512novl()) { 1466 Assembler::vextractf32x4(dst, src, 0); 1467 } else { 1468 Assembler::vextractf128(dst, src, 0); 1469 } 1470 } 1471 1472 // 256bit copy to/from low 256 bits of 512bit (ZMM) vector registers 1473 void vinserti64x4_low(XMMRegister dst, XMMRegister src) { 1474 Assembler::vinserti64x4(dst, dst, src, 0); 1475 } 1476 void vinsertf64x4_low(XMMRegister dst, XMMRegister src) { 1477 Assembler::vinsertf64x4(dst, dst, src, 0); 1478 } 1479 void vextracti64x4_low(XMMRegister dst, XMMRegister src) { 1480 Assembler::vextracti64x4(dst, src, 0); 1481 } 1482 void vextractf64x4_low(XMMRegister dst, XMMRegister src) { 1483 Assembler::vextractf64x4(dst, src, 0); 1484 } 1485 void vextractf64x4_low(Address dst, XMMRegister src) { 1486 Assembler::vextractf64x4(dst, src, 0); 1487 } 1488 void vinsertf64x4_low(XMMRegister dst, Address src) { 1489 Assembler::vinsertf64x4(dst, dst, src, 0); 1490 } 1491 1492 // Carry-Less Multiplication Quadword 1493 void vpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1494 // 0x00 - multiply lower 64 bits [0:63] 1495 Assembler::vpclmulqdq(dst, nds, src, 0x00); 1496 } 1497 void vpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1498 // 0x11 - multiply upper 64 bits [64:127] 1499 Assembler::vpclmulqdq(dst, nds, src, 0x11); 1500 } 1501 void vpclmullqhqdq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1502 // 0x10 - multiply nds[0:63] and src[64:127] 1503 Assembler::vpclmulqdq(dst, nds, src, 0x10); 1504 } 1505 void vpclmulhqlqdq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 1506 //0x01 - multiply nds[64:127] and src[0:63] 1507 Assembler::vpclmulqdq(dst, nds, src, 0x01); 1508 } 1509 1510 void evpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 1511 // 0x00 - multiply lower 64 bits [0:63] 1512 Assembler::evpclmulqdq(dst, nds, src, 0x00, vector_len); 1513 } 1514 void evpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 1515 // 0x11 - multiply upper 64 bits [64:127] 1516 Assembler::evpclmulqdq(dst, nds, src, 0x11, vector_len); 1517 } 1518 1519 // Data 1520 1521 void cmov32( Condition cc, Register dst, Address src); 1522 void cmov32( Condition cc, Register dst, Register src); 1523 1524 void cmov( Condition cc, Register dst, Register src) { cmovptr(cc, dst, src); } 1525 1526 void cmovptr(Condition cc, Register dst, Address src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); } 1527 void cmovptr(Condition cc, Register dst, Register src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); } 1528 1529 void movoop(Register dst, jobject obj); 1530 void movoop(Address dst, jobject obj); 1531 1532 void mov_metadata(Register dst, Metadata* obj); 1533 void mov_metadata(Address dst, Metadata* obj); 1534 1535 void movptr(ArrayAddress dst, Register src); 1536 // can this do an lea? 1537 void movptr(Register dst, ArrayAddress src); 1538 1539 void movptr(Register dst, Address src); 1540 1541 #ifdef _LP64 1542 void movptr(Register dst, AddressLiteral src, Register scratch=rscratch1); 1543 #else 1544 void movptr(Register dst, AddressLiteral src, Register scratch=noreg); // Scratch reg is ignored in 32-bit 1545 #endif 1546 1547 void movptr(Register dst, intptr_t src); 1548 void movptr(Register dst, Register src); 1549 void movptr(Address dst, intptr_t src); 1550 1551 void movptr(Address dst, Register src); 1552 1553 void movptr(Register dst, RegisterOrConstant src) { 1554 if (src.is_constant()) movptr(dst, src.as_constant()); 1555 else movptr(dst, src.as_register()); 1556 } 1557 1558 #ifdef _LP64 1559 // Generally the next two are only used for moving NULL 1560 // Although there are situations in initializing the mark word where 1561 // they could be used. They are dangerous. 1562 1563 // They only exist on LP64 so that int32_t and intptr_t are not the same 1564 // and we have ambiguous declarations. 1565 1566 void movptr(Address dst, int32_t imm32); 1567 void movptr(Register dst, int32_t imm32); 1568 #endif // _LP64 1569 1570 // to avoid hiding movl 1571 void mov32(AddressLiteral dst, Register src); 1572 void mov32(Register dst, AddressLiteral src); 1573 1574 // to avoid hiding movb 1575 void movbyte(ArrayAddress dst, int src); 1576 1577 // Import other mov() methods from the parent class or else 1578 // they will be hidden by the following overriding declaration. 1579 using Assembler::movdl; 1580 using Assembler::movq; 1581 void movdl(XMMRegister dst, AddressLiteral src); 1582 void movq(XMMRegister dst, AddressLiteral src); 1583 1584 // Can push value or effective address 1585 void pushptr(AddressLiteral src); 1586 1587 void pushptr(Address src) { LP64_ONLY(pushq(src)) NOT_LP64(pushl(src)); } 1588 void popptr(Address src) { LP64_ONLY(popq(src)) NOT_LP64(popl(src)); } 1589 1590 void pushoop(jobject obj); 1591 void pushklass(Metadata* obj); 1592 1593 // sign extend as need a l to ptr sized element 1594 void movl2ptr(Register dst, Address src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(movl(dst, src)); } 1595 void movl2ptr(Register dst, Register src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(if (dst != src) movl(dst, src)); } 1596 1597 1598 public: 1599 // C2 compiled method's prolog code. 1600 void verified_entry(int framesize, int stack_bang_size, bool fp_mode_24b, bool is_stub); 1601 1602 // clear memory of size 'cnt' qwords, starting at 'base'; 1603 // if 'is_large' is set, do not try to produce short loop 1604 void clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, bool is_large); 1605 1606 // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM registers 1607 void xmm_clear_mem(Register base, Register cnt, XMMRegister xtmp); 1608 1609 // Fill primitive arrays 1610 void generate_fill(BasicType t, bool aligned, 1611 Register to, Register value, Register count, 1612 Register rtmp, XMMRegister xtmp); 1613 1614 void encode_iso_array(Register src, Register dst, Register len, 1615 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3, 1616 XMMRegister tmp4, Register tmp5, Register result); 1617 1618 #ifdef _LP64 1619 void add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2); 1620 void multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart, 1621 Register y, Register y_idx, Register z, 1622 Register carry, Register product, 1623 Register idx, Register kdx); 1624 void multiply_add_128_x_128(Register x_xstart, Register y, Register z, 1625 Register yz_idx, Register idx, 1626 Register carry, Register product, int offset); 1627 void multiply_128_x_128_bmi2_loop(Register y, Register z, 1628 Register carry, Register carry2, 1629 Register idx, Register jdx, 1630 Register yz_idx1, Register yz_idx2, 1631 Register tmp, Register tmp3, Register tmp4); 1632 void multiply_128_x_128_loop(Register x_xstart, Register y, Register z, 1633 Register yz_idx, Register idx, Register jdx, 1634 Register carry, Register product, 1635 Register carry2); 1636 void multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, Register zlen, 1637 Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5); 1638 void square_rshift(Register x, Register len, Register z, Register tmp1, Register tmp3, 1639 Register tmp4, Register tmp5, Register rdxReg, Register raxReg); 1640 void multiply_add_64_bmi2(Register sum, Register op1, Register op2, Register carry, 1641 Register tmp2); 1642 void multiply_add_64(Register sum, Register op1, Register op2, Register carry, 1643 Register rdxReg, Register raxReg); 1644 void add_one_64(Register z, Register zlen, Register carry, Register tmp1); 1645 void lshift_by_1(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, 1646 Register tmp3, Register tmp4); 1647 void square_to_len(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, 1648 Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg); 1649 1650 void mul_add_128_x_32_loop(Register out, Register in, Register offset, Register len, Register tmp1, 1651 Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, 1652 Register raxReg); 1653 void mul_add(Register out, Register in, Register offset, Register len, Register k, Register tmp1, 1654 Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, 1655 Register raxReg); 1656 void vectorized_mismatch(Register obja, Register objb, Register length, Register log2_array_indxscale, 1657 Register result, Register tmp1, Register tmp2, 1658 XMMRegister vec1, XMMRegister vec2, XMMRegister vec3); 1659 #endif 1660 1661 // CRC32 code for java.util.zip.CRC32::updateBytes() intrinsic. 1662 void update_byte_crc32(Register crc, Register val, Register table); 1663 void kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp); 1664 1665 1666 #ifdef _LP64 1667 void kernel_crc32_avx512(Register crc, Register buf, Register len, Register table, Register tmp1, Register tmp2); 1668 void kernel_crc32_avx512_256B(Register crc, Register buf, Register len, Register key, Register pos, 1669 Register tmp1, Register tmp2, Label& L_barrett, Label& L_16B_reduction_loop, 1670 Label& L_get_last_two_xmms, Label& L_128_done, Label& L_cleanup); 1671 #endif // _LP64 1672 1673 // CRC32C code for java.util.zip.CRC32C::updateBytes() intrinsic 1674 // Note on a naming convention: 1675 // Prefix w = register only used on a Westmere+ architecture 1676 // Prefix n = register only used on a Nehalem architecture 1677 #ifdef _LP64 1678 void crc32c_ipl_alg4(Register in_out, uint32_t n, 1679 Register tmp1, Register tmp2, Register tmp3); 1680 #else 1681 void crc32c_ipl_alg4(Register in_out, uint32_t n, 1682 Register tmp1, Register tmp2, Register tmp3, 1683 XMMRegister xtmp1, XMMRegister xtmp2); 1684 #endif 1685 void crc32c_pclmulqdq(XMMRegister w_xtmp1, 1686 Register in_out, 1687 uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported, 1688 XMMRegister w_xtmp2, 1689 Register tmp1, 1690 Register n_tmp2, Register n_tmp3); 1691 void crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2, 1692 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 1693 Register tmp1, Register tmp2, 1694 Register n_tmp3); 1695 void crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, 1696 Register in_out1, Register in_out2, Register in_out3, 1697 Register tmp1, Register tmp2, Register tmp3, 1698 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 1699 Register tmp4, Register tmp5, 1700 Register n_tmp6); 1701 void crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2, 1702 Register tmp1, Register tmp2, Register tmp3, 1703 Register tmp4, Register tmp5, Register tmp6, 1704 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 1705 bool is_pclmulqdq_supported); 1706 // Fold 128-bit data chunk 1707 void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset); 1708 void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf); 1709 #ifdef _LP64 1710 // Fold 512-bit data chunk 1711 void fold512bit_crc32_avx512(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, Register pos, int offset); 1712 #endif // _LP64 1713 // Fold 8-bit data 1714 void fold_8bit_crc32(Register crc, Register table, Register tmp); 1715 void fold_8bit_crc32(XMMRegister crc, Register table, XMMRegister xtmp, Register tmp); 1716 1717 // Compress char[] array to byte[]. 1718 void char_array_compress(Register src, Register dst, Register len, 1719 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3, 1720 XMMRegister tmp4, Register tmp5, Register result); 1721 1722 // Inflate byte[] array to char[]. 1723 void byte_array_inflate(Register src, Register dst, Register len, 1724 XMMRegister tmp1, Register tmp2); 1725 1726 #ifdef _LP64 1727 void convert_f2i(Register dst, XMMRegister src); 1728 void convert_d2i(Register dst, XMMRegister src); 1729 void convert_f2l(Register dst, XMMRegister src); 1730 void convert_d2l(Register dst, XMMRegister src); 1731 1732 void cache_wb(Address line); 1733 void cache_wbsync(bool is_pre); 1734 #endif // _LP64 1735 1736 void vallones(XMMRegister dst, int vector_len); 1737 }; 1738 1739 /** 1740 * class SkipIfEqual: 1741 * 1742 * Instantiating this class will result in assembly code being output that will 1743 * jump around any code emitted between the creation of the instance and it's 1744 * automatic destruction at the end of a scope block, depending on the value of 1745 * the flag passed to the constructor, which will be checked at run-time. 1746 */ 1747 class SkipIfEqual { 1748 private: 1749 MacroAssembler* _masm; 1750 Label _label; 1751 1752 public: 1753 SkipIfEqual(MacroAssembler*, const bool* flag_addr, bool value); 1754 ~SkipIfEqual(); 1755 }; 1756 1757 #endif // CPU_X86_MACROASSEMBLER_X86_HPP