1 /* 2 * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "jvm.h" 27 #include "asm/assembler.hpp" 28 #include "asm/assembler.inline.hpp" 29 #include "compiler/disassembler.hpp" 30 #include "gc/shared/cardTable.hpp" 31 #include "gc/shared/cardTableModRefBS.hpp" 32 #include "gc/shared/collectedHeap.inline.hpp" 33 #include "interpreter/interpreter.hpp" 34 #include "memory/resourceArea.hpp" 35 #include "memory/universe.hpp" 36 #include "oops/klass.inline.hpp" 37 #include "prims/methodHandles.hpp" 38 #include "runtime/biasedLocking.hpp" 39 #include "runtime/interfaceSupport.hpp" 40 #include "runtime/objectMonitor.hpp" 41 #include "runtime/os.hpp" 42 #include "runtime/safepoint.hpp" 43 #include "runtime/safepointMechanism.hpp" 44 #include "runtime/sharedRuntime.hpp" 45 #include "runtime/stubRoutines.hpp" 46 #include "runtime/thread.hpp" 47 #include "utilities/macros.hpp" 48 #if INCLUDE_ALL_GCS 49 #include "gc/g1/g1CardTable.hpp" 50 #include "gc/g1/g1CollectedHeap.inline.hpp" 51 #include "gc/g1/g1SATBCardTableModRefBS.hpp" 52 #include "gc/g1/heapRegion.hpp" 53 #endif // INCLUDE_ALL_GCS 54 #include "crc32c.h" 55 #ifdef COMPILER2 56 #include "opto/intrinsicnode.hpp" 57 #endif 58 59 #ifdef PRODUCT 60 #define BLOCK_COMMENT(str) /* nothing */ 61 #define STOP(error) stop(error) 62 #else 63 #define BLOCK_COMMENT(str) block_comment(str) 64 #define STOP(error) block_comment(error); stop(error) 65 #endif 66 67 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 68 69 #ifdef ASSERT 70 bool AbstractAssembler::pd_check_instruction_mark() { return true; } 71 #endif 72 73 static Assembler::Condition reverse[] = { 74 Assembler::noOverflow /* overflow = 0x0 */ , 75 Assembler::overflow /* noOverflow = 0x1 */ , 76 Assembler::aboveEqual /* carrySet = 0x2, below = 0x2 */ , 77 Assembler::below /* aboveEqual = 0x3, carryClear = 0x3 */ , 78 Assembler::notZero /* zero = 0x4, equal = 0x4 */ , 79 Assembler::zero /* notZero = 0x5, notEqual = 0x5 */ , 80 Assembler::above /* belowEqual = 0x6 */ , 81 Assembler::belowEqual /* above = 0x7 */ , 82 Assembler::positive /* negative = 0x8 */ , 83 Assembler::negative /* positive = 0x9 */ , 84 Assembler::noParity /* parity = 0xa */ , 85 Assembler::parity /* noParity = 0xb */ , 86 Assembler::greaterEqual /* less = 0xc */ , 87 Assembler::less /* greaterEqual = 0xd */ , 88 Assembler::greater /* lessEqual = 0xe */ , 89 Assembler::lessEqual /* greater = 0xf, */ 90 91 }; 92 93 94 // Implementation of MacroAssembler 95 96 // First all the versions that have distinct versions depending on 32/64 bit 97 // Unless the difference is trivial (1 line or so). 98 99 #ifndef _LP64 100 101 // 32bit versions 102 103 Address MacroAssembler::as_Address(AddressLiteral adr) { 104 return Address(adr.target(), adr.rspec()); 105 } 106 107 Address MacroAssembler::as_Address(ArrayAddress adr) { 108 return Address::make_array(adr); 109 } 110 111 void MacroAssembler::call_VM_leaf_base(address entry_point, 112 int number_of_arguments) { 113 call(RuntimeAddress(entry_point)); 114 increment(rsp, number_of_arguments * wordSize); 115 } 116 117 void MacroAssembler::cmpklass(Address src1, Metadata* obj) { 118 cmp_literal32(src1, (int32_t)obj, metadata_Relocation::spec_for_immediate()); 119 } 120 121 void MacroAssembler::cmpklass(Register src1, Metadata* obj) { 122 cmp_literal32(src1, (int32_t)obj, metadata_Relocation::spec_for_immediate()); 123 } 124 125 void MacroAssembler::cmpoop(Address src1, jobject obj) { 126 cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate()); 127 } 128 129 void MacroAssembler::cmpoop(Register src1, jobject obj) { 130 cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate()); 131 } 132 133 void MacroAssembler::extend_sign(Register hi, Register lo) { 134 // According to Intel Doc. AP-526, "Integer Divide", p.18. 135 if (VM_Version::is_P6() && hi == rdx && lo == rax) { 136 cdql(); 137 } else { 138 movl(hi, lo); 139 sarl(hi, 31); 140 } 141 } 142 143 void MacroAssembler::jC2(Register tmp, Label& L) { 144 // set parity bit if FPU flag C2 is set (via rax) 145 save_rax(tmp); 146 fwait(); fnstsw_ax(); 147 sahf(); 148 restore_rax(tmp); 149 // branch 150 jcc(Assembler::parity, L); 151 } 152 153 void MacroAssembler::jnC2(Register tmp, Label& L) { 154 // set parity bit if FPU flag C2 is set (via rax) 155 save_rax(tmp); 156 fwait(); fnstsw_ax(); 157 sahf(); 158 restore_rax(tmp); 159 // branch 160 jcc(Assembler::noParity, L); 161 } 162 163 // 32bit can do a case table jump in one instruction but we no longer allow the base 164 // to be installed in the Address class 165 void MacroAssembler::jump(ArrayAddress entry) { 166 jmp(as_Address(entry)); 167 } 168 169 // Note: y_lo will be destroyed 170 void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) { 171 // Long compare for Java (semantics as described in JVM spec.) 172 Label high, low, done; 173 174 cmpl(x_hi, y_hi); 175 jcc(Assembler::less, low); 176 jcc(Assembler::greater, high); 177 // x_hi is the return register 178 xorl(x_hi, x_hi); 179 cmpl(x_lo, y_lo); 180 jcc(Assembler::below, low); 181 jcc(Assembler::equal, done); 182 183 bind(high); 184 xorl(x_hi, x_hi); 185 increment(x_hi); 186 jmp(done); 187 188 bind(low); 189 xorl(x_hi, x_hi); 190 decrementl(x_hi); 191 192 bind(done); 193 } 194 195 void MacroAssembler::lea(Register dst, AddressLiteral src) { 196 mov_literal32(dst, (int32_t)src.target(), src.rspec()); 197 } 198 199 void MacroAssembler::lea(Address dst, AddressLiteral adr) { 200 // leal(dst, as_Address(adr)); 201 // see note in movl as to why we must use a move 202 mov_literal32(dst, (int32_t) adr.target(), adr.rspec()); 203 } 204 205 void MacroAssembler::leave() { 206 mov(rsp, rbp); 207 pop(rbp); 208 } 209 210 void MacroAssembler::lmul(int x_rsp_offset, int y_rsp_offset) { 211 // Multiplication of two Java long values stored on the stack 212 // as illustrated below. Result is in rdx:rax. 213 // 214 // rsp ---> [ ?? ] \ \ 215 // .... | y_rsp_offset | 216 // [ y_lo ] / (in bytes) | x_rsp_offset 217 // [ y_hi ] | (in bytes) 218 // .... | 219 // [ x_lo ] / 220 // [ x_hi ] 221 // .... 222 // 223 // Basic idea: lo(result) = lo(x_lo * y_lo) 224 // hi(result) = hi(x_lo * y_lo) + lo(x_hi * y_lo) + lo(x_lo * y_hi) 225 Address x_hi(rsp, x_rsp_offset + wordSize); Address x_lo(rsp, x_rsp_offset); 226 Address y_hi(rsp, y_rsp_offset + wordSize); Address y_lo(rsp, y_rsp_offset); 227 Label quick; 228 // load x_hi, y_hi and check if quick 229 // multiplication is possible 230 movl(rbx, x_hi); 231 movl(rcx, y_hi); 232 movl(rax, rbx); 233 orl(rbx, rcx); // rbx, = 0 <=> x_hi = 0 and y_hi = 0 234 jcc(Assembler::zero, quick); // if rbx, = 0 do quick multiply 235 // do full multiplication 236 // 1st step 237 mull(y_lo); // x_hi * y_lo 238 movl(rbx, rax); // save lo(x_hi * y_lo) in rbx, 239 // 2nd step 240 movl(rax, x_lo); 241 mull(rcx); // x_lo * y_hi 242 addl(rbx, rax); // add lo(x_lo * y_hi) to rbx, 243 // 3rd step 244 bind(quick); // note: rbx, = 0 if quick multiply! 245 movl(rax, x_lo); 246 mull(y_lo); // x_lo * y_lo 247 addl(rdx, rbx); // correct hi(x_lo * y_lo) 248 } 249 250 void MacroAssembler::lneg(Register hi, Register lo) { 251 negl(lo); 252 adcl(hi, 0); 253 negl(hi); 254 } 255 256 void MacroAssembler::lshl(Register hi, Register lo) { 257 // Java shift left long support (semantics as described in JVM spec., p.305) 258 // (basic idea for shift counts s >= n: x << s == (x << n) << (s - n)) 259 // shift value is in rcx ! 260 assert(hi != rcx, "must not use rcx"); 261 assert(lo != rcx, "must not use rcx"); 262 const Register s = rcx; // shift count 263 const int n = BitsPerWord; 264 Label L; 265 andl(s, 0x3f); // s := s & 0x3f (s < 0x40) 266 cmpl(s, n); // if (s < n) 267 jcc(Assembler::less, L); // else (s >= n) 268 movl(hi, lo); // x := x << n 269 xorl(lo, lo); 270 // Note: subl(s, n) is not needed since the Intel shift instructions work rcx mod n! 271 bind(L); // s (mod n) < n 272 shldl(hi, lo); // x := x << s 273 shll(lo); 274 } 275 276 277 void MacroAssembler::lshr(Register hi, Register lo, bool sign_extension) { 278 // Java shift right long support (semantics as described in JVM spec., p.306 & p.310) 279 // (basic idea for shift counts s >= n: x >> s == (x >> n) >> (s - n)) 280 assert(hi != rcx, "must not use rcx"); 281 assert(lo != rcx, "must not use rcx"); 282 const Register s = rcx; // shift count 283 const int n = BitsPerWord; 284 Label L; 285 andl(s, 0x3f); // s := s & 0x3f (s < 0x40) 286 cmpl(s, n); // if (s < n) 287 jcc(Assembler::less, L); // else (s >= n) 288 movl(lo, hi); // x := x >> n 289 if (sign_extension) sarl(hi, 31); 290 else xorl(hi, hi); 291 // Note: subl(s, n) is not needed since the Intel shift instructions work rcx mod n! 292 bind(L); // s (mod n) < n 293 shrdl(lo, hi); // x := x >> s 294 if (sign_extension) sarl(hi); 295 else shrl(hi); 296 } 297 298 void MacroAssembler::movoop(Register dst, jobject obj) { 299 mov_literal32(dst, (int32_t)obj, oop_Relocation::spec_for_immediate()); 300 } 301 302 void MacroAssembler::movoop(Address dst, jobject obj) { 303 mov_literal32(dst, (int32_t)obj, oop_Relocation::spec_for_immediate()); 304 } 305 306 void MacroAssembler::mov_metadata(Register dst, Metadata* obj) { 307 mov_literal32(dst, (int32_t)obj, metadata_Relocation::spec_for_immediate()); 308 } 309 310 void MacroAssembler::mov_metadata(Address dst, Metadata* obj) { 311 mov_literal32(dst, (int32_t)obj, metadata_Relocation::spec_for_immediate()); 312 } 313 314 void MacroAssembler::movptr(Register dst, AddressLiteral src, Register scratch) { 315 // scratch register is not used, 316 // it is defined to match parameters of 64-bit version of this method. 317 if (src.is_lval()) { 318 mov_literal32(dst, (intptr_t)src.target(), src.rspec()); 319 } else { 320 movl(dst, as_Address(src)); 321 } 322 } 323 324 void MacroAssembler::movptr(ArrayAddress dst, Register src) { 325 movl(as_Address(dst), src); 326 } 327 328 void MacroAssembler::movptr(Register dst, ArrayAddress src) { 329 movl(dst, as_Address(src)); 330 } 331 332 // src should NEVER be a real pointer. Use AddressLiteral for true pointers 333 void MacroAssembler::movptr(Address dst, intptr_t src) { 334 movl(dst, src); 335 } 336 337 338 void MacroAssembler::pop_callee_saved_registers() { 339 pop(rcx); 340 pop(rdx); 341 pop(rdi); 342 pop(rsi); 343 } 344 345 void MacroAssembler::pop_fTOS() { 346 fld_d(Address(rsp, 0)); 347 addl(rsp, 2 * wordSize); 348 } 349 350 void MacroAssembler::push_callee_saved_registers() { 351 push(rsi); 352 push(rdi); 353 push(rdx); 354 push(rcx); 355 } 356 357 void MacroAssembler::push_fTOS() { 358 subl(rsp, 2 * wordSize); 359 fstp_d(Address(rsp, 0)); 360 } 361 362 363 void MacroAssembler::pushoop(jobject obj) { 364 push_literal32((int32_t)obj, oop_Relocation::spec_for_immediate()); 365 } 366 367 void MacroAssembler::pushklass(Metadata* obj) { 368 push_literal32((int32_t)obj, metadata_Relocation::spec_for_immediate()); 369 } 370 371 void MacroAssembler::pushptr(AddressLiteral src) { 372 if (src.is_lval()) { 373 push_literal32((int32_t)src.target(), src.rspec()); 374 } else { 375 pushl(as_Address(src)); 376 } 377 } 378 379 void MacroAssembler::set_word_if_not_zero(Register dst) { 380 xorl(dst, dst); 381 set_byte_if_not_zero(dst); 382 } 383 384 static void pass_arg0(MacroAssembler* masm, Register arg) { 385 masm->push(arg); 386 } 387 388 static void pass_arg1(MacroAssembler* masm, Register arg) { 389 masm->push(arg); 390 } 391 392 static void pass_arg2(MacroAssembler* masm, Register arg) { 393 masm->push(arg); 394 } 395 396 static void pass_arg3(MacroAssembler* masm, Register arg) { 397 masm->push(arg); 398 } 399 400 #ifndef PRODUCT 401 extern "C" void findpc(intptr_t x); 402 #endif 403 404 void MacroAssembler::debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg) { 405 // In order to get locks to work, we need to fake a in_VM state 406 JavaThread* thread = JavaThread::current(); 407 JavaThreadState saved_state = thread->thread_state(); 408 thread->set_thread_state(_thread_in_vm); 409 if (ShowMessageBoxOnError) { 410 JavaThread* thread = JavaThread::current(); 411 JavaThreadState saved_state = thread->thread_state(); 412 thread->set_thread_state(_thread_in_vm); 413 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { 414 ttyLocker ttyl; 415 BytecodeCounter::print(); 416 } 417 // To see where a verify_oop failed, get $ebx+40/X for this frame. 418 // This is the value of eip which points to where verify_oop will return. 419 if (os::message_box(msg, "Execution stopped, print registers?")) { 420 print_state32(rdi, rsi, rbp, rsp, rbx, rdx, rcx, rax, eip); 421 BREAKPOINT; 422 } 423 } else { 424 ttyLocker ttyl; 425 ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", msg); 426 } 427 // Don't assert holding the ttyLock 428 assert(false, "DEBUG MESSAGE: %s", msg); 429 ThreadStateTransition::transition(thread, _thread_in_vm, saved_state); 430 } 431 432 void MacroAssembler::print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip) { 433 ttyLocker ttyl; 434 FlagSetting fs(Debugging, true); 435 tty->print_cr("eip = 0x%08x", eip); 436 #ifndef PRODUCT 437 if ((WizardMode || Verbose) && PrintMiscellaneous) { 438 tty->cr(); 439 findpc(eip); 440 tty->cr(); 441 } 442 #endif 443 #define PRINT_REG(rax) \ 444 { tty->print("%s = ", #rax); os::print_location(tty, rax); } 445 PRINT_REG(rax); 446 PRINT_REG(rbx); 447 PRINT_REG(rcx); 448 PRINT_REG(rdx); 449 PRINT_REG(rdi); 450 PRINT_REG(rsi); 451 PRINT_REG(rbp); 452 PRINT_REG(rsp); 453 #undef PRINT_REG 454 // Print some words near top of staack. 455 int* dump_sp = (int*) rsp; 456 for (int col1 = 0; col1 < 8; col1++) { 457 tty->print("(rsp+0x%03x) 0x%08x: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp); 458 os::print_location(tty, *dump_sp++); 459 } 460 for (int row = 0; row < 16; row++) { 461 tty->print("(rsp+0x%03x) 0x%08x: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp); 462 for (int col = 0; col < 8; col++) { 463 tty->print(" 0x%08x", *dump_sp++); 464 } 465 tty->cr(); 466 } 467 // Print some instructions around pc: 468 Disassembler::decode((address)eip-64, (address)eip); 469 tty->print_cr("--------"); 470 Disassembler::decode((address)eip, (address)eip+32); 471 } 472 473 void MacroAssembler::stop(const char* msg) { 474 ExternalAddress message((address)msg); 475 // push address of message 476 pushptr(message.addr()); 477 { Label L; call(L, relocInfo::none); bind(L); } // push eip 478 pusha(); // push registers 479 call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug32))); 480 hlt(); 481 } 482 483 void MacroAssembler::warn(const char* msg) { 484 push_CPU_state(); 485 486 ExternalAddress message((address) msg); 487 // push address of message 488 pushptr(message.addr()); 489 490 call(RuntimeAddress(CAST_FROM_FN_PTR(address, warning))); 491 addl(rsp, wordSize); // discard argument 492 pop_CPU_state(); 493 } 494 495 void MacroAssembler::print_state() { 496 { Label L; call(L, relocInfo::none); bind(L); } // push eip 497 pusha(); // push registers 498 499 push_CPU_state(); 500 call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::print_state32))); 501 pop_CPU_state(); 502 503 popa(); 504 addl(rsp, wordSize); 505 } 506 507 #else // _LP64 508 509 // 64 bit versions 510 511 Address MacroAssembler::as_Address(AddressLiteral adr) { 512 // amd64 always does this as a pc-rel 513 // we can be absolute or disp based on the instruction type 514 // jmp/call are displacements others are absolute 515 assert(!adr.is_lval(), "must be rval"); 516 assert(reachable(adr), "must be"); 517 return Address((int32_t)(intptr_t)(adr.target() - pc()), adr.target(), adr.reloc()); 518 519 } 520 521 Address MacroAssembler::as_Address(ArrayAddress adr) { 522 AddressLiteral base = adr.base(); 523 lea(rscratch1, base); 524 Address index = adr.index(); 525 assert(index._disp == 0, "must not have disp"); // maybe it can? 526 Address array(rscratch1, index._index, index._scale, index._disp); 527 return array; 528 } 529 530 void MacroAssembler::call_VM_leaf_base(address entry_point, int num_args) { 531 Label L, E; 532 533 #ifdef _WIN64 534 // Windows always allocates space for it's register args 535 assert(num_args <= 4, "only register arguments supported"); 536 subq(rsp, frame::arg_reg_save_area_bytes); 537 #endif 538 539 // Align stack if necessary 540 testl(rsp, 15); 541 jcc(Assembler::zero, L); 542 543 subq(rsp, 8); 544 { 545 call(RuntimeAddress(entry_point)); 546 } 547 addq(rsp, 8); 548 jmp(E); 549 550 bind(L); 551 { 552 call(RuntimeAddress(entry_point)); 553 } 554 555 bind(E); 556 557 #ifdef _WIN64 558 // restore stack pointer 559 addq(rsp, frame::arg_reg_save_area_bytes); 560 #endif 561 562 } 563 564 void MacroAssembler::cmp64(Register src1, AddressLiteral src2) { 565 assert(!src2.is_lval(), "should use cmpptr"); 566 567 if (reachable(src2)) { 568 cmpq(src1, as_Address(src2)); 569 } else { 570 lea(rscratch1, src2); 571 Assembler::cmpq(src1, Address(rscratch1, 0)); 572 } 573 } 574 575 int MacroAssembler::corrected_idivq(Register reg) { 576 // Full implementation of Java ldiv and lrem; checks for special 577 // case as described in JVM spec., p.243 & p.271. The function 578 // returns the (pc) offset of the idivl instruction - may be needed 579 // for implicit exceptions. 580 // 581 // normal case special case 582 // 583 // input : rax: dividend min_long 584 // reg: divisor (may not be eax/edx) -1 585 // 586 // output: rax: quotient (= rax idiv reg) min_long 587 // rdx: remainder (= rax irem reg) 0 588 assert(reg != rax && reg != rdx, "reg cannot be rax or rdx register"); 589 static const int64_t min_long = 0x8000000000000000; 590 Label normal_case, special_case; 591 592 // check for special case 593 cmp64(rax, ExternalAddress((address) &min_long)); 594 jcc(Assembler::notEqual, normal_case); 595 xorl(rdx, rdx); // prepare rdx for possible special case (where 596 // remainder = 0) 597 cmpq(reg, -1); 598 jcc(Assembler::equal, special_case); 599 600 // handle normal case 601 bind(normal_case); 602 cdqq(); 603 int idivq_offset = offset(); 604 idivq(reg); 605 606 // normal and special case exit 607 bind(special_case); 608 609 return idivq_offset; 610 } 611 612 void MacroAssembler::decrementq(Register reg, int value) { 613 if (value == min_jint) { subq(reg, value); return; } 614 if (value < 0) { incrementq(reg, -value); return; } 615 if (value == 0) { ; return; } 616 if (value == 1 && UseIncDec) { decq(reg) ; return; } 617 /* else */ { subq(reg, value) ; return; } 618 } 619 620 void MacroAssembler::decrementq(Address dst, int value) { 621 if (value == min_jint) { subq(dst, value); return; } 622 if (value < 0) { incrementq(dst, -value); return; } 623 if (value == 0) { ; return; } 624 if (value == 1 && UseIncDec) { decq(dst) ; return; } 625 /* else */ { subq(dst, value) ; return; } 626 } 627 628 void MacroAssembler::incrementq(AddressLiteral dst) { 629 if (reachable(dst)) { 630 incrementq(as_Address(dst)); 631 } else { 632 lea(rscratch1, dst); 633 incrementq(Address(rscratch1, 0)); 634 } 635 } 636 637 void MacroAssembler::incrementq(Register reg, int value) { 638 if (value == min_jint) { addq(reg, value); return; } 639 if (value < 0) { decrementq(reg, -value); return; } 640 if (value == 0) { ; return; } 641 if (value == 1 && UseIncDec) { incq(reg) ; return; } 642 /* else */ { addq(reg, value) ; return; } 643 } 644 645 void MacroAssembler::incrementq(Address dst, int value) { 646 if (value == min_jint) { addq(dst, value); return; } 647 if (value < 0) { decrementq(dst, -value); return; } 648 if (value == 0) { ; return; } 649 if (value == 1 && UseIncDec) { incq(dst) ; return; } 650 /* else */ { addq(dst, value) ; return; } 651 } 652 653 // 32bit can do a case table jump in one instruction but we no longer allow the base 654 // to be installed in the Address class 655 void MacroAssembler::jump(ArrayAddress entry) { 656 lea(rscratch1, entry.base()); 657 Address dispatch = entry.index(); 658 assert(dispatch._base == noreg, "must be"); 659 dispatch._base = rscratch1; 660 jmp(dispatch); 661 } 662 663 void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) { 664 ShouldNotReachHere(); // 64bit doesn't use two regs 665 cmpq(x_lo, y_lo); 666 } 667 668 void MacroAssembler::lea(Register dst, AddressLiteral src) { 669 mov_literal64(dst, (intptr_t)src.target(), src.rspec()); 670 } 671 672 void MacroAssembler::lea(Address dst, AddressLiteral adr) { 673 mov_literal64(rscratch1, (intptr_t)adr.target(), adr.rspec()); 674 movptr(dst, rscratch1); 675 } 676 677 void MacroAssembler::leave() { 678 // %%% is this really better? Why not on 32bit too? 679 emit_int8((unsigned char)0xC9); // LEAVE 680 } 681 682 void MacroAssembler::lneg(Register hi, Register lo) { 683 ShouldNotReachHere(); // 64bit doesn't use two regs 684 negq(lo); 685 } 686 687 void MacroAssembler::movoop(Register dst, jobject obj) { 688 mov_literal64(dst, (intptr_t)obj, oop_Relocation::spec_for_immediate()); 689 } 690 691 void MacroAssembler::movoop(Address dst, jobject obj) { 692 mov_literal64(rscratch1, (intptr_t)obj, oop_Relocation::spec_for_immediate()); 693 movq(dst, rscratch1); 694 } 695 696 void MacroAssembler::mov_metadata(Register dst, Metadata* obj) { 697 mov_literal64(dst, (intptr_t)obj, metadata_Relocation::spec_for_immediate()); 698 } 699 700 void MacroAssembler::mov_metadata(Address dst, Metadata* obj) { 701 mov_literal64(rscratch1, (intptr_t)obj, metadata_Relocation::spec_for_immediate()); 702 movq(dst, rscratch1); 703 } 704 705 void MacroAssembler::movptr(Register dst, AddressLiteral src, Register scratch) { 706 if (src.is_lval()) { 707 mov_literal64(dst, (intptr_t)src.target(), src.rspec()); 708 } else { 709 if (reachable(src)) { 710 movq(dst, as_Address(src)); 711 } else { 712 lea(scratch, src); 713 movq(dst, Address(scratch, 0)); 714 } 715 } 716 } 717 718 void MacroAssembler::movptr(ArrayAddress dst, Register src) { 719 movq(as_Address(dst), src); 720 } 721 722 void MacroAssembler::movptr(Register dst, ArrayAddress src) { 723 movq(dst, as_Address(src)); 724 } 725 726 // src should NEVER be a real pointer. Use AddressLiteral for true pointers 727 void MacroAssembler::movptr(Address dst, intptr_t src) { 728 mov64(rscratch1, src); 729 movq(dst, rscratch1); 730 } 731 732 // These are mostly for initializing NULL 733 void MacroAssembler::movptr(Address dst, int32_t src) { 734 movslq(dst, src); 735 } 736 737 void MacroAssembler::movptr(Register dst, int32_t src) { 738 mov64(dst, (intptr_t)src); 739 } 740 741 void MacroAssembler::pushoop(jobject obj) { 742 movoop(rscratch1, obj); 743 push(rscratch1); 744 } 745 746 void MacroAssembler::pushklass(Metadata* obj) { 747 mov_metadata(rscratch1, obj); 748 push(rscratch1); 749 } 750 751 void MacroAssembler::pushptr(AddressLiteral src) { 752 lea(rscratch1, src); 753 if (src.is_lval()) { 754 push(rscratch1); 755 } else { 756 pushq(Address(rscratch1, 0)); 757 } 758 } 759 760 void MacroAssembler::reset_last_Java_frame(bool clear_fp) { 761 // we must set sp to zero to clear frame 762 movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), NULL_WORD); 763 // must clear fp, so that compiled frames are not confused; it is 764 // possible that we need it only for debugging 765 if (clear_fp) { 766 movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()), NULL_WORD); 767 } 768 769 // Always clear the pc because it could have been set by make_walkable() 770 movptr(Address(r15_thread, JavaThread::last_Java_pc_offset()), NULL_WORD); 771 vzeroupper(); 772 } 773 774 void MacroAssembler::set_last_Java_frame(Register last_java_sp, 775 Register last_java_fp, 776 address last_java_pc) { 777 vzeroupper(); 778 // determine last_java_sp register 779 if (!last_java_sp->is_valid()) { 780 last_java_sp = rsp; 781 } 782 783 // last_java_fp is optional 784 if (last_java_fp->is_valid()) { 785 movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()), 786 last_java_fp); 787 } 788 789 // last_java_pc is optional 790 if (last_java_pc != NULL) { 791 Address java_pc(r15_thread, 792 JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()); 793 lea(rscratch1, InternalAddress(last_java_pc)); 794 movptr(java_pc, rscratch1); 795 } 796 797 movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), last_java_sp); 798 } 799 800 static void pass_arg0(MacroAssembler* masm, Register arg) { 801 if (c_rarg0 != arg ) { 802 masm->mov(c_rarg0, arg); 803 } 804 } 805 806 static void pass_arg1(MacroAssembler* masm, Register arg) { 807 if (c_rarg1 != arg ) { 808 masm->mov(c_rarg1, arg); 809 } 810 } 811 812 static void pass_arg2(MacroAssembler* masm, Register arg) { 813 if (c_rarg2 != arg ) { 814 masm->mov(c_rarg2, arg); 815 } 816 } 817 818 static void pass_arg3(MacroAssembler* masm, Register arg) { 819 if (c_rarg3 != arg ) { 820 masm->mov(c_rarg3, arg); 821 } 822 } 823 824 void MacroAssembler::stop(const char* msg) { 825 address rip = pc(); 826 pusha(); // get regs on stack 827 lea(c_rarg0, ExternalAddress((address) msg)); 828 lea(c_rarg1, InternalAddress(rip)); 829 movq(c_rarg2, rsp); // pass pointer to regs array 830 andq(rsp, -16); // align stack as required by ABI 831 call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64))); 832 hlt(); 833 } 834 835 void MacroAssembler::warn(const char* msg) { 836 push(rbp); 837 movq(rbp, rsp); 838 andq(rsp, -16); // align stack as required by push_CPU_state and call 839 push_CPU_state(); // keeps alignment at 16 bytes 840 lea(c_rarg0, ExternalAddress((address) msg)); 841 lea(rax, ExternalAddress(CAST_FROM_FN_PTR(address, warning))); 842 call(rax); 843 pop_CPU_state(); 844 mov(rsp, rbp); 845 pop(rbp); 846 } 847 848 void MacroAssembler::print_state() { 849 address rip = pc(); 850 pusha(); // get regs on stack 851 push(rbp); 852 movq(rbp, rsp); 853 andq(rsp, -16); // align stack as required by push_CPU_state and call 854 push_CPU_state(); // keeps alignment at 16 bytes 855 856 lea(c_rarg0, InternalAddress(rip)); 857 lea(c_rarg1, Address(rbp, wordSize)); // pass pointer to regs array 858 call_VM_leaf(CAST_FROM_FN_PTR(address, MacroAssembler::print_state64), c_rarg0, c_rarg1); 859 860 pop_CPU_state(); 861 mov(rsp, rbp); 862 pop(rbp); 863 popa(); 864 } 865 866 #ifndef PRODUCT 867 extern "C" void findpc(intptr_t x); 868 #endif 869 870 void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[]) { 871 // In order to get locks to work, we need to fake a in_VM state 872 if (ShowMessageBoxOnError) { 873 JavaThread* thread = JavaThread::current(); 874 JavaThreadState saved_state = thread->thread_state(); 875 thread->set_thread_state(_thread_in_vm); 876 #ifndef PRODUCT 877 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { 878 ttyLocker ttyl; 879 BytecodeCounter::print(); 880 } 881 #endif 882 // To see where a verify_oop failed, get $ebx+40/X for this frame. 883 // XXX correct this offset for amd64 884 // This is the value of eip which points to where verify_oop will return. 885 if (os::message_box(msg, "Execution stopped, print registers?")) { 886 print_state64(pc, regs); 887 BREAKPOINT; 888 assert(false, "start up GDB"); 889 } 890 ThreadStateTransition::transition(thread, _thread_in_vm, saved_state); 891 } else { 892 ttyLocker ttyl; 893 ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", 894 msg); 895 assert(false, "DEBUG MESSAGE: %s", msg); 896 } 897 } 898 899 void MacroAssembler::print_state64(int64_t pc, int64_t regs[]) { 900 ttyLocker ttyl; 901 FlagSetting fs(Debugging, true); 902 tty->print_cr("rip = 0x%016lx", (intptr_t)pc); 903 #ifndef PRODUCT 904 tty->cr(); 905 findpc(pc); 906 tty->cr(); 907 #endif 908 #define PRINT_REG(rax, value) \ 909 { tty->print("%s = ", #rax); os::print_location(tty, value); } 910 PRINT_REG(rax, regs[15]); 911 PRINT_REG(rbx, regs[12]); 912 PRINT_REG(rcx, regs[14]); 913 PRINT_REG(rdx, regs[13]); 914 PRINT_REG(rdi, regs[8]); 915 PRINT_REG(rsi, regs[9]); 916 PRINT_REG(rbp, regs[10]); 917 PRINT_REG(rsp, regs[11]); 918 PRINT_REG(r8 , regs[7]); 919 PRINT_REG(r9 , regs[6]); 920 PRINT_REG(r10, regs[5]); 921 PRINT_REG(r11, regs[4]); 922 PRINT_REG(r12, regs[3]); 923 PRINT_REG(r13, regs[2]); 924 PRINT_REG(r14, regs[1]); 925 PRINT_REG(r15, regs[0]); 926 #undef PRINT_REG 927 // Print some words near top of staack. 928 int64_t* rsp = (int64_t*) regs[11]; 929 int64_t* dump_sp = rsp; 930 for (int col1 = 0; col1 < 8; col1++) { 931 tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp); 932 os::print_location(tty, *dump_sp++); 933 } 934 for (int row = 0; row < 25; row++) { 935 tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp); 936 for (int col = 0; col < 4; col++) { 937 tty->print(" 0x%016lx", (intptr_t)*dump_sp++); 938 } 939 tty->cr(); 940 } 941 // Print some instructions around pc: 942 Disassembler::decode((address)pc-64, (address)pc); 943 tty->print_cr("--------"); 944 Disassembler::decode((address)pc, (address)pc+32); 945 } 946 947 #endif // _LP64 948 949 // Now versions that are common to 32/64 bit 950 951 void MacroAssembler::addptr(Register dst, int32_t imm32) { 952 LP64_ONLY(addq(dst, imm32)) NOT_LP64(addl(dst, imm32)); 953 } 954 955 void MacroAssembler::addptr(Register dst, Register src) { 956 LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); 957 } 958 959 void MacroAssembler::addptr(Address dst, Register src) { 960 LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); 961 } 962 963 void MacroAssembler::addsd(XMMRegister dst, AddressLiteral src) { 964 if (reachable(src)) { 965 Assembler::addsd(dst, as_Address(src)); 966 } else { 967 lea(rscratch1, src); 968 Assembler::addsd(dst, Address(rscratch1, 0)); 969 } 970 } 971 972 void MacroAssembler::addss(XMMRegister dst, AddressLiteral src) { 973 if (reachable(src)) { 974 addss(dst, as_Address(src)); 975 } else { 976 lea(rscratch1, src); 977 addss(dst, Address(rscratch1, 0)); 978 } 979 } 980 981 void MacroAssembler::addpd(XMMRegister dst, AddressLiteral src) { 982 if (reachable(src)) { 983 Assembler::addpd(dst, as_Address(src)); 984 } else { 985 lea(rscratch1, src); 986 Assembler::addpd(dst, Address(rscratch1, 0)); 987 } 988 } 989 990 void MacroAssembler::align(int modulus) { 991 align(modulus, offset()); 992 } 993 994 void MacroAssembler::align(int modulus, int target) { 995 if (target % modulus != 0) { 996 nop(modulus - (target % modulus)); 997 } 998 } 999 1000 void MacroAssembler::andpd(XMMRegister dst, AddressLiteral src) { 1001 // Used in sign-masking with aligned address. 1002 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes"); 1003 if (reachable(src)) { 1004 Assembler::andpd(dst, as_Address(src)); 1005 } else { 1006 lea(rscratch1, src); 1007 Assembler::andpd(dst, Address(rscratch1, 0)); 1008 } 1009 } 1010 1011 void MacroAssembler::andps(XMMRegister dst, AddressLiteral src) { 1012 // Used in sign-masking with aligned address. 1013 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes"); 1014 if (reachable(src)) { 1015 Assembler::andps(dst, as_Address(src)); 1016 } else { 1017 lea(rscratch1, src); 1018 Assembler::andps(dst, Address(rscratch1, 0)); 1019 } 1020 } 1021 1022 void MacroAssembler::andptr(Register dst, int32_t imm32) { 1023 LP64_ONLY(andq(dst, imm32)) NOT_LP64(andl(dst, imm32)); 1024 } 1025 1026 void MacroAssembler::atomic_incl(Address counter_addr) { 1027 if (os::is_MP()) 1028 lock(); 1029 incrementl(counter_addr); 1030 } 1031 1032 void MacroAssembler::atomic_incl(AddressLiteral counter_addr, Register scr) { 1033 if (reachable(counter_addr)) { 1034 atomic_incl(as_Address(counter_addr)); 1035 } else { 1036 lea(scr, counter_addr); 1037 atomic_incl(Address(scr, 0)); 1038 } 1039 } 1040 1041 #ifdef _LP64 1042 void MacroAssembler::atomic_incq(Address counter_addr) { 1043 if (os::is_MP()) 1044 lock(); 1045 incrementq(counter_addr); 1046 } 1047 1048 void MacroAssembler::atomic_incq(AddressLiteral counter_addr, Register scr) { 1049 if (reachable(counter_addr)) { 1050 atomic_incq(as_Address(counter_addr)); 1051 } else { 1052 lea(scr, counter_addr); 1053 atomic_incq(Address(scr, 0)); 1054 } 1055 } 1056 #endif 1057 1058 // Writes to stack successive pages until offset reached to check for 1059 // stack overflow + shadow pages. This clobbers tmp. 1060 void MacroAssembler::bang_stack_size(Register size, Register tmp) { 1061 movptr(tmp, rsp); 1062 // Bang stack for total size given plus shadow page size. 1063 // Bang one page at a time because large size can bang beyond yellow and 1064 // red zones. 1065 Label loop; 1066 bind(loop); 1067 movl(Address(tmp, (-os::vm_page_size())), size ); 1068 subptr(tmp, os::vm_page_size()); 1069 subl(size, os::vm_page_size()); 1070 jcc(Assembler::greater, loop); 1071 1072 // Bang down shadow pages too. 1073 // At this point, (tmp-0) is the last address touched, so don't 1074 // touch it again. (It was touched as (tmp-pagesize) but then tmp 1075 // was post-decremented.) Skip this address by starting at i=1, and 1076 // touch a few more pages below. N.B. It is important to touch all 1077 // the way down including all pages in the shadow zone. 1078 for (int i = 1; i < ((int)JavaThread::stack_shadow_zone_size() / os::vm_page_size()); i++) { 1079 // this could be any sized move but this is can be a debugging crumb 1080 // so the bigger the better. 1081 movptr(Address(tmp, (-i*os::vm_page_size())), size ); 1082 } 1083 } 1084 1085 void MacroAssembler::reserved_stack_check() { 1086 // testing if reserved zone needs to be enabled 1087 Label no_reserved_zone_enabling; 1088 Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread); 1089 NOT_LP64(get_thread(rsi);) 1090 1091 cmpptr(rsp, Address(thread, JavaThread::reserved_stack_activation_offset())); 1092 jcc(Assembler::below, no_reserved_zone_enabling); 1093 1094 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), thread); 1095 jump(RuntimeAddress(StubRoutines::throw_delayed_StackOverflowError_entry())); 1096 should_not_reach_here(); 1097 1098 bind(no_reserved_zone_enabling); 1099 } 1100 1101 int MacroAssembler::biased_locking_enter(Register lock_reg, 1102 Register obj_reg, 1103 Register swap_reg, 1104 Register tmp_reg, 1105 bool swap_reg_contains_mark, 1106 Label& done, 1107 Label* slow_case, 1108 BiasedLockingCounters* counters) { 1109 assert(UseBiasedLocking, "why call this otherwise?"); 1110 assert(swap_reg == rax, "swap_reg must be rax for cmpxchgq"); 1111 assert(tmp_reg != noreg, "tmp_reg must be supplied"); 1112 assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg); 1113 assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout"); 1114 Address mark_addr (obj_reg, oopDesc::mark_offset_in_bytes()); 1115 NOT_LP64( Address saved_mark_addr(lock_reg, 0); ) 1116 1117 if (PrintBiasedLockingStatistics && counters == NULL) { 1118 counters = BiasedLocking::counters(); 1119 } 1120 // Biased locking 1121 // See whether the lock is currently biased toward our thread and 1122 // whether the epoch is still valid 1123 // Note that the runtime guarantees sufficient alignment of JavaThread 1124 // pointers to allow age to be placed into low bits 1125 // First check to see whether biasing is even enabled for this object 1126 Label cas_label; 1127 int null_check_offset = -1; 1128 if (!swap_reg_contains_mark) { 1129 null_check_offset = offset(); 1130 movptr(swap_reg, mark_addr); 1131 } 1132 movptr(tmp_reg, swap_reg); 1133 andptr(tmp_reg, markOopDesc::biased_lock_mask_in_place); 1134 cmpptr(tmp_reg, markOopDesc::biased_lock_pattern); 1135 jcc(Assembler::notEqual, cas_label); 1136 // The bias pattern is present in the object's header. Need to check 1137 // whether the bias owner and the epoch are both still current. 1138 #ifndef _LP64 1139 // Note that because there is no current thread register on x86_32 we 1140 // need to store off the mark word we read out of the object to 1141 // avoid reloading it and needing to recheck invariants below. This 1142 // store is unfortunate but it makes the overall code shorter and 1143 // simpler. 1144 movptr(saved_mark_addr, swap_reg); 1145 #endif 1146 if (swap_reg_contains_mark) { 1147 null_check_offset = offset(); 1148 } 1149 load_prototype_header(tmp_reg, obj_reg); 1150 #ifdef _LP64 1151 orptr(tmp_reg, r15_thread); 1152 xorptr(tmp_reg, swap_reg); 1153 Register header_reg = tmp_reg; 1154 #else 1155 xorptr(tmp_reg, swap_reg); 1156 get_thread(swap_reg); 1157 xorptr(swap_reg, tmp_reg); 1158 Register header_reg = swap_reg; 1159 #endif 1160 andptr(header_reg, ~((int) markOopDesc::age_mask_in_place)); 1161 if (counters != NULL) { 1162 cond_inc32(Assembler::zero, 1163 ExternalAddress((address) counters->biased_lock_entry_count_addr())); 1164 } 1165 jcc(Assembler::equal, done); 1166 1167 Label try_revoke_bias; 1168 Label try_rebias; 1169 1170 // At this point we know that the header has the bias pattern and 1171 // that we are not the bias owner in the current epoch. We need to 1172 // figure out more details about the state of the header in order to 1173 // know what operations can be legally performed on the object's 1174 // header. 1175 1176 // If the low three bits in the xor result aren't clear, that means 1177 // the prototype header is no longer biased and we have to revoke 1178 // the bias on this object. 1179 testptr(header_reg, markOopDesc::biased_lock_mask_in_place); 1180 jccb(Assembler::notZero, try_revoke_bias); 1181 1182 // Biasing is still enabled for this data type. See whether the 1183 // epoch of the current bias is still valid, meaning that the epoch 1184 // bits of the mark word are equal to the epoch bits of the 1185 // prototype header. (Note that the prototype header's epoch bits 1186 // only change at a safepoint.) If not, attempt to rebias the object 1187 // toward the current thread. Note that we must be absolutely sure 1188 // that the current epoch is invalid in order to do this because 1189 // otherwise the manipulations it performs on the mark word are 1190 // illegal. 1191 testptr(header_reg, markOopDesc::epoch_mask_in_place); 1192 jccb(Assembler::notZero, try_rebias); 1193 1194 // The epoch of the current bias is still valid but we know nothing 1195 // about the owner; it might be set or it might be clear. Try to 1196 // acquire the bias of the object using an atomic operation. If this 1197 // fails we will go in to the runtime to revoke the object's bias. 1198 // Note that we first construct the presumed unbiased header so we 1199 // don't accidentally blow away another thread's valid bias. 1200 NOT_LP64( movptr(swap_reg, saved_mark_addr); ) 1201 andptr(swap_reg, 1202 markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place); 1203 #ifdef _LP64 1204 movptr(tmp_reg, swap_reg); 1205 orptr(tmp_reg, r15_thread); 1206 #else 1207 get_thread(tmp_reg); 1208 orptr(tmp_reg, swap_reg); 1209 #endif 1210 if (os::is_MP()) { 1211 lock(); 1212 } 1213 cmpxchgptr(tmp_reg, mark_addr); // compare tmp_reg and swap_reg 1214 // If the biasing toward our thread failed, this means that 1215 // another thread succeeded in biasing it toward itself and we 1216 // need to revoke that bias. The revocation will occur in the 1217 // interpreter runtime in the slow case. 1218 if (counters != NULL) { 1219 cond_inc32(Assembler::zero, 1220 ExternalAddress((address) counters->anonymously_biased_lock_entry_count_addr())); 1221 } 1222 if (slow_case != NULL) { 1223 jcc(Assembler::notZero, *slow_case); 1224 } 1225 jmp(done); 1226 1227 bind(try_rebias); 1228 // At this point we know the epoch has expired, meaning that the 1229 // current "bias owner", if any, is actually invalid. Under these 1230 // circumstances _only_, we are allowed to use the current header's 1231 // value as the comparison value when doing the cas to acquire the 1232 // bias in the current epoch. In other words, we allow transfer of 1233 // the bias from one thread to another directly in this situation. 1234 // 1235 // FIXME: due to a lack of registers we currently blow away the age 1236 // bits in this situation. Should attempt to preserve them. 1237 load_prototype_header(tmp_reg, obj_reg); 1238 #ifdef _LP64 1239 orptr(tmp_reg, r15_thread); 1240 #else 1241 get_thread(swap_reg); 1242 orptr(tmp_reg, swap_reg); 1243 movptr(swap_reg, saved_mark_addr); 1244 #endif 1245 if (os::is_MP()) { 1246 lock(); 1247 } 1248 cmpxchgptr(tmp_reg, mark_addr); // compare tmp_reg and swap_reg 1249 // If the biasing toward our thread failed, then another thread 1250 // succeeded in biasing it toward itself and we need to revoke that 1251 // bias. The revocation will occur in the runtime in the slow case. 1252 if (counters != NULL) { 1253 cond_inc32(Assembler::zero, 1254 ExternalAddress((address) counters->rebiased_lock_entry_count_addr())); 1255 } 1256 if (slow_case != NULL) { 1257 jcc(Assembler::notZero, *slow_case); 1258 } 1259 jmp(done); 1260 1261 bind(try_revoke_bias); 1262 // The prototype mark in the klass doesn't have the bias bit set any 1263 // more, indicating that objects of this data type are not supposed 1264 // to be biased any more. We are going to try to reset the mark of 1265 // this object to the prototype value and fall through to the 1266 // CAS-based locking scheme. Note that if our CAS fails, it means 1267 // that another thread raced us for the privilege of revoking the 1268 // bias of this particular object, so it's okay to continue in the 1269 // normal locking code. 1270 // 1271 // FIXME: due to a lack of registers we currently blow away the age 1272 // bits in this situation. Should attempt to preserve them. 1273 NOT_LP64( movptr(swap_reg, saved_mark_addr); ) 1274 load_prototype_header(tmp_reg, obj_reg); 1275 if (os::is_MP()) { 1276 lock(); 1277 } 1278 cmpxchgptr(tmp_reg, mark_addr); // compare tmp_reg and swap_reg 1279 // Fall through to the normal CAS-based lock, because no matter what 1280 // the result of the above CAS, some thread must have succeeded in 1281 // removing the bias bit from the object's header. 1282 if (counters != NULL) { 1283 cond_inc32(Assembler::zero, 1284 ExternalAddress((address) counters->revoked_lock_entry_count_addr())); 1285 } 1286 1287 bind(cas_label); 1288 1289 return null_check_offset; 1290 } 1291 1292 void MacroAssembler::biased_locking_exit(Register obj_reg, Register temp_reg, Label& done) { 1293 assert(UseBiasedLocking, "why call this otherwise?"); 1294 1295 // Check for biased locking unlock case, which is a no-op 1296 // Note: we do not have to check the thread ID for two reasons. 1297 // First, the interpreter checks for IllegalMonitorStateException at 1298 // a higher level. Second, if the bias was revoked while we held the 1299 // lock, the object could not be rebiased toward another thread, so 1300 // the bias bit would be clear. 1301 movptr(temp_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes())); 1302 andptr(temp_reg, markOopDesc::biased_lock_mask_in_place); 1303 cmpptr(temp_reg, markOopDesc::biased_lock_pattern); 1304 jcc(Assembler::equal, done); 1305 } 1306 1307 #ifdef COMPILER2 1308 1309 #if INCLUDE_RTM_OPT 1310 1311 // Update rtm_counters based on abort status 1312 // input: abort_status 1313 // rtm_counters (RTMLockingCounters*) 1314 // flags are killed 1315 void MacroAssembler::rtm_counters_update(Register abort_status, Register rtm_counters) { 1316 1317 atomic_incptr(Address(rtm_counters, RTMLockingCounters::abort_count_offset())); 1318 if (PrintPreciseRTMLockingStatistics) { 1319 for (int i = 0; i < RTMLockingCounters::ABORT_STATUS_LIMIT; i++) { 1320 Label check_abort; 1321 testl(abort_status, (1<<i)); 1322 jccb(Assembler::equal, check_abort); 1323 atomic_incptr(Address(rtm_counters, RTMLockingCounters::abortX_count_offset() + (i * sizeof(uintx)))); 1324 bind(check_abort); 1325 } 1326 } 1327 } 1328 1329 // Branch if (random & (count-1) != 0), count is 2^n 1330 // tmp, scr and flags are killed 1331 void MacroAssembler::branch_on_random_using_rdtsc(Register tmp, Register scr, int count, Label& brLabel) { 1332 assert(tmp == rax, ""); 1333 assert(scr == rdx, ""); 1334 rdtsc(); // modifies EDX:EAX 1335 andptr(tmp, count-1); 1336 jccb(Assembler::notZero, brLabel); 1337 } 1338 1339 // Perform abort ratio calculation, set no_rtm bit if high ratio 1340 // input: rtm_counters_Reg (RTMLockingCounters* address) 1341 // tmpReg, rtm_counters_Reg and flags are killed 1342 void MacroAssembler::rtm_abort_ratio_calculation(Register tmpReg, 1343 Register rtm_counters_Reg, 1344 RTMLockingCounters* rtm_counters, 1345 Metadata* method_data) { 1346 Label L_done, L_check_always_rtm1, L_check_always_rtm2; 1347 1348 if (RTMLockingCalculationDelay > 0) { 1349 // Delay calculation 1350 movptr(tmpReg, ExternalAddress((address) RTMLockingCounters::rtm_calculation_flag_addr()), tmpReg); 1351 testptr(tmpReg, tmpReg); 1352 jccb(Assembler::equal, L_done); 1353 } 1354 // Abort ratio calculation only if abort_count > RTMAbortThreshold 1355 // Aborted transactions = abort_count * 100 1356 // All transactions = total_count * RTMTotalCountIncrRate 1357 // Set no_rtm bit if (Aborted transactions >= All transactions * RTMAbortRatio) 1358 1359 movptr(tmpReg, Address(rtm_counters_Reg, RTMLockingCounters::abort_count_offset())); 1360 cmpptr(tmpReg, RTMAbortThreshold); 1361 jccb(Assembler::below, L_check_always_rtm2); 1362 imulptr(tmpReg, tmpReg, 100); 1363 1364 Register scrReg = rtm_counters_Reg; 1365 movptr(scrReg, Address(rtm_counters_Reg, RTMLockingCounters::total_count_offset())); 1366 imulptr(scrReg, scrReg, RTMTotalCountIncrRate); 1367 imulptr(scrReg, scrReg, RTMAbortRatio); 1368 cmpptr(tmpReg, scrReg); 1369 jccb(Assembler::below, L_check_always_rtm1); 1370 if (method_data != NULL) { 1371 // set rtm_state to "no rtm" in MDO 1372 mov_metadata(tmpReg, method_data); 1373 if (os::is_MP()) { 1374 lock(); 1375 } 1376 orl(Address(tmpReg, MethodData::rtm_state_offset_in_bytes()), NoRTM); 1377 } 1378 jmpb(L_done); 1379 bind(L_check_always_rtm1); 1380 // Reload RTMLockingCounters* address 1381 lea(rtm_counters_Reg, ExternalAddress((address)rtm_counters)); 1382 bind(L_check_always_rtm2); 1383 movptr(tmpReg, Address(rtm_counters_Reg, RTMLockingCounters::total_count_offset())); 1384 cmpptr(tmpReg, RTMLockingThreshold / RTMTotalCountIncrRate); 1385 jccb(Assembler::below, L_done); 1386 if (method_data != NULL) { 1387 // set rtm_state to "always rtm" in MDO 1388 mov_metadata(tmpReg, method_data); 1389 if (os::is_MP()) { 1390 lock(); 1391 } 1392 orl(Address(tmpReg, MethodData::rtm_state_offset_in_bytes()), UseRTM); 1393 } 1394 bind(L_done); 1395 } 1396 1397 // Update counters and perform abort ratio calculation 1398 // input: abort_status_Reg 1399 // rtm_counters_Reg, flags are killed 1400 void MacroAssembler::rtm_profiling(Register abort_status_Reg, 1401 Register rtm_counters_Reg, 1402 RTMLockingCounters* rtm_counters, 1403 Metadata* method_data, 1404 bool profile_rtm) { 1405 1406 assert(rtm_counters != NULL, "should not be NULL when profiling RTM"); 1407 // update rtm counters based on rax value at abort 1408 // reads abort_status_Reg, updates flags 1409 lea(rtm_counters_Reg, ExternalAddress((address)rtm_counters)); 1410 rtm_counters_update(abort_status_Reg, rtm_counters_Reg); 1411 if (profile_rtm) { 1412 // Save abort status because abort_status_Reg is used by following code. 1413 if (RTMRetryCount > 0) { 1414 push(abort_status_Reg); 1415 } 1416 assert(rtm_counters != NULL, "should not be NULL when profiling RTM"); 1417 rtm_abort_ratio_calculation(abort_status_Reg, rtm_counters_Reg, rtm_counters, method_data); 1418 // restore abort status 1419 if (RTMRetryCount > 0) { 1420 pop(abort_status_Reg); 1421 } 1422 } 1423 } 1424 1425 // Retry on abort if abort's status is 0x6: can retry (0x2) | memory conflict (0x4) 1426 // inputs: retry_count_Reg 1427 // : abort_status_Reg 1428 // output: retry_count_Reg decremented by 1 1429 // flags are killed 1430 void MacroAssembler::rtm_retry_lock_on_abort(Register retry_count_Reg, Register abort_status_Reg, Label& retryLabel) { 1431 Label doneRetry; 1432 assert(abort_status_Reg == rax, ""); 1433 // The abort reason bits are in eax (see all states in rtmLocking.hpp) 1434 // 0x6 = conflict on which we can retry (0x2) | memory conflict (0x4) 1435 // if reason is in 0x6 and retry count != 0 then retry 1436 andptr(abort_status_Reg, 0x6); 1437 jccb(Assembler::zero, doneRetry); 1438 testl(retry_count_Reg, retry_count_Reg); 1439 jccb(Assembler::zero, doneRetry); 1440 pause(); 1441 decrementl(retry_count_Reg); 1442 jmp(retryLabel); 1443 bind(doneRetry); 1444 } 1445 1446 // Spin and retry if lock is busy, 1447 // inputs: box_Reg (monitor address) 1448 // : retry_count_Reg 1449 // output: retry_count_Reg decremented by 1 1450 // : clear z flag if retry count exceeded 1451 // tmp_Reg, scr_Reg, flags are killed 1452 void MacroAssembler::rtm_retry_lock_on_busy(Register retry_count_Reg, Register box_Reg, 1453 Register tmp_Reg, Register scr_Reg, Label& retryLabel) { 1454 Label SpinLoop, SpinExit, doneRetry; 1455 int owner_offset = OM_OFFSET_NO_MONITOR_VALUE_TAG(owner); 1456 1457 testl(retry_count_Reg, retry_count_Reg); 1458 jccb(Assembler::zero, doneRetry); 1459 decrementl(retry_count_Reg); 1460 movptr(scr_Reg, RTMSpinLoopCount); 1461 1462 bind(SpinLoop); 1463 pause(); 1464 decrementl(scr_Reg); 1465 jccb(Assembler::lessEqual, SpinExit); 1466 movptr(tmp_Reg, Address(box_Reg, owner_offset)); 1467 testptr(tmp_Reg, tmp_Reg); 1468 jccb(Assembler::notZero, SpinLoop); 1469 1470 bind(SpinExit); 1471 jmp(retryLabel); 1472 bind(doneRetry); 1473 incrementl(retry_count_Reg); // clear z flag 1474 } 1475 1476 // Use RTM for normal stack locks 1477 // Input: objReg (object to lock) 1478 void MacroAssembler::rtm_stack_locking(Register objReg, Register tmpReg, Register scrReg, 1479 Register retry_on_abort_count_Reg, 1480 RTMLockingCounters* stack_rtm_counters, 1481 Metadata* method_data, bool profile_rtm, 1482 Label& DONE_LABEL, Label& IsInflated) { 1483 assert(UseRTMForStackLocks, "why call this otherwise?"); 1484 assert(!UseBiasedLocking, "Biased locking is not supported with RTM locking"); 1485 assert(tmpReg == rax, ""); 1486 assert(scrReg == rdx, ""); 1487 Label L_rtm_retry, L_decrement_retry, L_on_abort; 1488 1489 if (RTMRetryCount > 0) { 1490 movl(retry_on_abort_count_Reg, RTMRetryCount); // Retry on abort 1491 bind(L_rtm_retry); 1492 } 1493 movptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes())); 1494 testptr(tmpReg, markOopDesc::monitor_value); // inflated vs stack-locked|neutral|biased 1495 jcc(Assembler::notZero, IsInflated); 1496 1497 if (PrintPreciseRTMLockingStatistics || profile_rtm) { 1498 Label L_noincrement; 1499 if (RTMTotalCountIncrRate > 1) { 1500 // tmpReg, scrReg and flags are killed 1501 branch_on_random_using_rdtsc(tmpReg, scrReg, RTMTotalCountIncrRate, L_noincrement); 1502 } 1503 assert(stack_rtm_counters != NULL, "should not be NULL when profiling RTM"); 1504 atomic_incptr(ExternalAddress((address)stack_rtm_counters->total_count_addr()), scrReg); 1505 bind(L_noincrement); 1506 } 1507 xbegin(L_on_abort); 1508 movptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // fetch markword 1509 andptr(tmpReg, markOopDesc::biased_lock_mask_in_place); // look at 3 lock bits 1510 cmpptr(tmpReg, markOopDesc::unlocked_value); // bits = 001 unlocked 1511 jcc(Assembler::equal, DONE_LABEL); // all done if unlocked 1512 1513 Register abort_status_Reg = tmpReg; // status of abort is stored in RAX 1514 if (UseRTMXendForLockBusy) { 1515 xend(); 1516 movptr(abort_status_Reg, 0x2); // Set the abort status to 2 (so we can retry) 1517 jmp(L_decrement_retry); 1518 } 1519 else { 1520 xabort(0); 1521 } 1522 bind(L_on_abort); 1523 if (PrintPreciseRTMLockingStatistics || profile_rtm) { 1524 rtm_profiling(abort_status_Reg, scrReg, stack_rtm_counters, method_data, profile_rtm); 1525 } 1526 bind(L_decrement_retry); 1527 if (RTMRetryCount > 0) { 1528 // retry on lock abort if abort status is 'can retry' (0x2) or 'memory conflict' (0x4) 1529 rtm_retry_lock_on_abort(retry_on_abort_count_Reg, abort_status_Reg, L_rtm_retry); 1530 } 1531 } 1532 1533 // Use RTM for inflating locks 1534 // inputs: objReg (object to lock) 1535 // boxReg (on-stack box address (displaced header location) - KILLED) 1536 // tmpReg (ObjectMonitor address + markOopDesc::monitor_value) 1537 void MacroAssembler::rtm_inflated_locking(Register objReg, Register boxReg, Register tmpReg, 1538 Register scrReg, Register retry_on_busy_count_Reg, 1539 Register retry_on_abort_count_Reg, 1540 RTMLockingCounters* rtm_counters, 1541 Metadata* method_data, bool profile_rtm, 1542 Label& DONE_LABEL) { 1543 assert(UseRTMLocking, "why call this otherwise?"); 1544 assert(tmpReg == rax, ""); 1545 assert(scrReg == rdx, ""); 1546 Label L_rtm_retry, L_decrement_retry, L_on_abort; 1547 int owner_offset = OM_OFFSET_NO_MONITOR_VALUE_TAG(owner); 1548 1549 // Without cast to int32_t a movptr will destroy r10 which is typically obj 1550 movptr(Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark())); 1551 movptr(boxReg, tmpReg); // Save ObjectMonitor address 1552 1553 if (RTMRetryCount > 0) { 1554 movl(retry_on_busy_count_Reg, RTMRetryCount); // Retry on lock busy 1555 movl(retry_on_abort_count_Reg, RTMRetryCount); // Retry on abort 1556 bind(L_rtm_retry); 1557 } 1558 if (PrintPreciseRTMLockingStatistics || profile_rtm) { 1559 Label L_noincrement; 1560 if (RTMTotalCountIncrRate > 1) { 1561 // tmpReg, scrReg and flags are killed 1562 branch_on_random_using_rdtsc(tmpReg, scrReg, RTMTotalCountIncrRate, L_noincrement); 1563 } 1564 assert(rtm_counters != NULL, "should not be NULL when profiling RTM"); 1565 atomic_incptr(ExternalAddress((address)rtm_counters->total_count_addr()), scrReg); 1566 bind(L_noincrement); 1567 } 1568 xbegin(L_on_abort); 1569 movptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes())); 1570 movptr(tmpReg, Address(tmpReg, owner_offset)); 1571 testptr(tmpReg, tmpReg); 1572 jcc(Assembler::zero, DONE_LABEL); 1573 if (UseRTMXendForLockBusy) { 1574 xend(); 1575 jmp(L_decrement_retry); 1576 } 1577 else { 1578 xabort(0); 1579 } 1580 bind(L_on_abort); 1581 Register abort_status_Reg = tmpReg; // status of abort is stored in RAX 1582 if (PrintPreciseRTMLockingStatistics || profile_rtm) { 1583 rtm_profiling(abort_status_Reg, scrReg, rtm_counters, method_data, profile_rtm); 1584 } 1585 if (RTMRetryCount > 0) { 1586 // retry on lock abort if abort status is 'can retry' (0x2) or 'memory conflict' (0x4) 1587 rtm_retry_lock_on_abort(retry_on_abort_count_Reg, abort_status_Reg, L_rtm_retry); 1588 } 1589 1590 movptr(tmpReg, Address(boxReg, owner_offset)) ; 1591 testptr(tmpReg, tmpReg) ; 1592 jccb(Assembler::notZero, L_decrement_retry) ; 1593 1594 // Appears unlocked - try to swing _owner from null to non-null. 1595 // Invariant: tmpReg == 0. tmpReg is EAX which is the implicit cmpxchg comparand. 1596 #ifdef _LP64 1597 Register threadReg = r15_thread; 1598 #else 1599 get_thread(scrReg); 1600 Register threadReg = scrReg; 1601 #endif 1602 if (os::is_MP()) { 1603 lock(); 1604 } 1605 cmpxchgptr(threadReg, Address(boxReg, owner_offset)); // Updates tmpReg 1606 1607 if (RTMRetryCount > 0) { 1608 // success done else retry 1609 jccb(Assembler::equal, DONE_LABEL) ; 1610 bind(L_decrement_retry); 1611 // Spin and retry if lock is busy. 1612 rtm_retry_lock_on_busy(retry_on_busy_count_Reg, boxReg, tmpReg, scrReg, L_rtm_retry); 1613 } 1614 else { 1615 bind(L_decrement_retry); 1616 } 1617 } 1618 1619 #endif // INCLUDE_RTM_OPT 1620 1621 // Fast_Lock and Fast_Unlock used by C2 1622 1623 // Because the transitions from emitted code to the runtime 1624 // monitorenter/exit helper stubs are so slow it's critical that 1625 // we inline both the stack-locking fast-path and the inflated fast path. 1626 // 1627 // See also: cmpFastLock and cmpFastUnlock. 1628 // 1629 // What follows is a specialized inline transliteration of the code 1630 // in slow_enter() and slow_exit(). If we're concerned about I$ bloat 1631 // another option would be to emit TrySlowEnter and TrySlowExit methods 1632 // at startup-time. These methods would accept arguments as 1633 // (rax,=Obj, rbx=Self, rcx=box, rdx=Scratch) and return success-failure 1634 // indications in the icc.ZFlag. Fast_Lock and Fast_Unlock would simply 1635 // marshal the arguments and emit calls to TrySlowEnter and TrySlowExit. 1636 // In practice, however, the # of lock sites is bounded and is usually small. 1637 // Besides the call overhead, TrySlowEnter and TrySlowExit might suffer 1638 // if the processor uses simple bimodal branch predictors keyed by EIP 1639 // Since the helper routines would be called from multiple synchronization 1640 // sites. 1641 // 1642 // An even better approach would be write "MonitorEnter()" and "MonitorExit()" 1643 // in java - using j.u.c and unsafe - and just bind the lock and unlock sites 1644 // to those specialized methods. That'd give us a mostly platform-independent 1645 // implementation that the JITs could optimize and inline at their pleasure. 1646 // Done correctly, the only time we'd need to cross to native could would be 1647 // to park() or unpark() threads. We'd also need a few more unsafe operators 1648 // to (a) prevent compiler-JIT reordering of non-volatile accesses, and 1649 // (b) explicit barriers or fence operations. 1650 // 1651 // TODO: 1652 // 1653 // * Arrange for C2 to pass "Self" into Fast_Lock and Fast_Unlock in one of the registers (scr). 1654 // This avoids manifesting the Self pointer in the Fast_Lock and Fast_Unlock terminals. 1655 // Given TLAB allocation, Self is usually manifested in a register, so passing it into 1656 // the lock operators would typically be faster than reifying Self. 1657 // 1658 // * Ideally I'd define the primitives as: 1659 // fast_lock (nax Obj, nax box, EAX tmp, nax scr) where box, tmp and scr are KILLED. 1660 // fast_unlock (nax Obj, EAX box, nax tmp) where box and tmp are KILLED 1661 // Unfortunately ADLC bugs prevent us from expressing the ideal form. 1662 // Instead, we're stuck with a rather awkward and brittle register assignments below. 1663 // Furthermore the register assignments are overconstrained, possibly resulting in 1664 // sub-optimal code near the synchronization site. 1665 // 1666 // * Eliminate the sp-proximity tests and just use "== Self" tests instead. 1667 // Alternately, use a better sp-proximity test. 1668 // 1669 // * Currently ObjectMonitor._Owner can hold either an sp value or a (THREAD *) value. 1670 // Either one is sufficient to uniquely identify a thread. 1671 // TODO: eliminate use of sp in _owner and use get_thread(tr) instead. 1672 // 1673 // * Intrinsify notify() and notifyAll() for the common cases where the 1674 // object is locked by the calling thread but the waitlist is empty. 1675 // avoid the expensive JNI call to JVM_Notify() and JVM_NotifyAll(). 1676 // 1677 // * use jccb and jmpb instead of jcc and jmp to improve code density. 1678 // But beware of excessive branch density on AMD Opterons. 1679 // 1680 // * Both Fast_Lock and Fast_Unlock set the ICC.ZF to indicate success 1681 // or failure of the fast-path. If the fast-path fails then we pass 1682 // control to the slow-path, typically in C. In Fast_Lock and 1683 // Fast_Unlock we often branch to DONE_LABEL, just to find that C2 1684 // will emit a conditional branch immediately after the node. 1685 // So we have branches to branches and lots of ICC.ZF games. 1686 // Instead, it might be better to have C2 pass a "FailureLabel" 1687 // into Fast_Lock and Fast_Unlock. In the case of success, control 1688 // will drop through the node. ICC.ZF is undefined at exit. 1689 // In the case of failure, the node will branch directly to the 1690 // FailureLabel 1691 1692 1693 // obj: object to lock 1694 // box: on-stack box address (displaced header location) - KILLED 1695 // rax,: tmp -- KILLED 1696 // scr: tmp -- KILLED 1697 void MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmpReg, 1698 Register scrReg, Register cx1Reg, Register cx2Reg, 1699 BiasedLockingCounters* counters, 1700 RTMLockingCounters* rtm_counters, 1701 RTMLockingCounters* stack_rtm_counters, 1702 Metadata* method_data, 1703 bool use_rtm, bool profile_rtm) { 1704 // Ensure the register assignments are disjoint 1705 assert(tmpReg == rax, ""); 1706 1707 if (use_rtm) { 1708 assert_different_registers(objReg, boxReg, tmpReg, scrReg, cx1Reg, cx2Reg); 1709 } else { 1710 assert(cx1Reg == noreg, ""); 1711 assert(cx2Reg == noreg, ""); 1712 assert_different_registers(objReg, boxReg, tmpReg, scrReg); 1713 } 1714 1715 if (counters != NULL) { 1716 atomic_incl(ExternalAddress((address)counters->total_entry_count_addr()), scrReg); 1717 } 1718 if (EmitSync & 1) { 1719 // set box->dhw = markOopDesc::unused_mark() 1720 // Force all sync thru slow-path: slow_enter() and slow_exit() 1721 movptr (Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark())); 1722 cmpptr (rsp, (int32_t)NULL_WORD); 1723 } else { 1724 // Possible cases that we'll encounter in fast_lock 1725 // ------------------------------------------------ 1726 // * Inflated 1727 // -- unlocked 1728 // -- Locked 1729 // = by self 1730 // = by other 1731 // * biased 1732 // -- by Self 1733 // -- by other 1734 // * neutral 1735 // * stack-locked 1736 // -- by self 1737 // = sp-proximity test hits 1738 // = sp-proximity test generates false-negative 1739 // -- by other 1740 // 1741 1742 Label IsInflated, DONE_LABEL; 1743 1744 // it's stack-locked, biased or neutral 1745 // TODO: optimize away redundant LDs of obj->mark and improve the markword triage 1746 // order to reduce the number of conditional branches in the most common cases. 1747 // Beware -- there's a subtle invariant that fetch of the markword 1748 // at [FETCH], below, will never observe a biased encoding (*101b). 1749 // If this invariant is not held we risk exclusion (safety) failure. 1750 if (UseBiasedLocking && !UseOptoBiasInlining) { 1751 biased_locking_enter(boxReg, objReg, tmpReg, scrReg, false, DONE_LABEL, NULL, counters); 1752 } 1753 1754 #if INCLUDE_RTM_OPT 1755 if (UseRTMForStackLocks && use_rtm) { 1756 rtm_stack_locking(objReg, tmpReg, scrReg, cx2Reg, 1757 stack_rtm_counters, method_data, profile_rtm, 1758 DONE_LABEL, IsInflated); 1759 } 1760 #endif // INCLUDE_RTM_OPT 1761 1762 movptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // [FETCH] 1763 testptr(tmpReg, markOopDesc::monitor_value); // inflated vs stack-locked|neutral|biased 1764 jccb(Assembler::notZero, IsInflated); 1765 1766 // Attempt stack-locking ... 1767 orptr (tmpReg, markOopDesc::unlocked_value); 1768 movptr(Address(boxReg, 0), tmpReg); // Anticipate successful CAS 1769 if (os::is_MP()) { 1770 lock(); 1771 } 1772 cmpxchgptr(boxReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // Updates tmpReg 1773 if (counters != NULL) { 1774 cond_inc32(Assembler::equal, 1775 ExternalAddress((address)counters->fast_path_entry_count_addr())); 1776 } 1777 jcc(Assembler::equal, DONE_LABEL); // Success 1778 1779 // Recursive locking. 1780 // The object is stack-locked: markword contains stack pointer to BasicLock. 1781 // Locked by current thread if difference with current SP is less than one page. 1782 subptr(tmpReg, rsp); 1783 // Next instruction set ZFlag == 1 (Success) if difference is less then one page. 1784 andptr(tmpReg, (int32_t) (NOT_LP64(0xFFFFF003) LP64_ONLY(7 - os::vm_page_size())) ); 1785 movptr(Address(boxReg, 0), tmpReg); 1786 if (counters != NULL) { 1787 cond_inc32(Assembler::equal, 1788 ExternalAddress((address)counters->fast_path_entry_count_addr())); 1789 } 1790 jmp(DONE_LABEL); 1791 1792 bind(IsInflated); 1793 // The object is inflated. tmpReg contains pointer to ObjectMonitor* + markOopDesc::monitor_value 1794 1795 #if INCLUDE_RTM_OPT 1796 // Use the same RTM locking code in 32- and 64-bit VM. 1797 if (use_rtm) { 1798 rtm_inflated_locking(objReg, boxReg, tmpReg, scrReg, cx1Reg, cx2Reg, 1799 rtm_counters, method_data, profile_rtm, DONE_LABEL); 1800 } else { 1801 #endif // INCLUDE_RTM_OPT 1802 1803 #ifndef _LP64 1804 // The object is inflated. 1805 1806 // boxReg refers to the on-stack BasicLock in the current frame. 1807 // We'd like to write: 1808 // set box->_displaced_header = markOopDesc::unused_mark(). Any non-0 value suffices. 1809 // This is convenient but results a ST-before-CAS penalty. The following CAS suffers 1810 // additional latency as we have another ST in the store buffer that must drain. 1811 1812 if (EmitSync & 8192) { 1813 movptr(Address(boxReg, 0), 3); // results in ST-before-CAS penalty 1814 get_thread (scrReg); 1815 movptr(boxReg, tmpReg); // consider: LEA box, [tmp-2] 1816 movptr(tmpReg, NULL_WORD); // consider: xor vs mov 1817 if (os::is_MP()) { 1818 lock(); 1819 } 1820 cmpxchgptr(scrReg, Address(boxReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); 1821 } else 1822 if ((EmitSync & 128) == 0) { // avoid ST-before-CAS 1823 // register juggle because we need tmpReg for cmpxchgptr below 1824 movptr(scrReg, boxReg); 1825 movptr(boxReg, tmpReg); // consider: LEA box, [tmp-2] 1826 1827 // Using a prefetchw helps avoid later RTS->RTO upgrades and cache probes 1828 if ((EmitSync & 2048) && VM_Version::supports_3dnow_prefetch() && os::is_MP()) { 1829 // prefetchw [eax + Offset(_owner)-2] 1830 prefetchw(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); 1831 } 1832 1833 if ((EmitSync & 64) == 0) { 1834 // Optimistic form: consider XORL tmpReg,tmpReg 1835 movptr(tmpReg, NULL_WORD); 1836 } else { 1837 // Can suffer RTS->RTO upgrades on shared or cold $ lines 1838 // Test-And-CAS instead of CAS 1839 movptr(tmpReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); // rax, = m->_owner 1840 testptr(tmpReg, tmpReg); // Locked ? 1841 jccb (Assembler::notZero, DONE_LABEL); 1842 } 1843 1844 // Appears unlocked - try to swing _owner from null to non-null. 1845 // Ideally, I'd manifest "Self" with get_thread and then attempt 1846 // to CAS the register containing Self into m->Owner. 1847 // But we don't have enough registers, so instead we can either try to CAS 1848 // rsp or the address of the box (in scr) into &m->owner. If the CAS succeeds 1849 // we later store "Self" into m->Owner. Transiently storing a stack address 1850 // (rsp or the address of the box) into m->owner is harmless. 1851 // Invariant: tmpReg == 0. tmpReg is EAX which is the implicit cmpxchg comparand. 1852 if (os::is_MP()) { 1853 lock(); 1854 } 1855 cmpxchgptr(scrReg, Address(boxReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); 1856 movptr(Address(scrReg, 0), 3); // box->_displaced_header = 3 1857 // If we weren't able to swing _owner from NULL to the BasicLock 1858 // then take the slow path. 1859 jccb (Assembler::notZero, DONE_LABEL); 1860 // update _owner from BasicLock to thread 1861 get_thread (scrReg); // beware: clobbers ICCs 1862 movptr(Address(boxReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), scrReg); 1863 xorptr(boxReg, boxReg); // set icc.ZFlag = 1 to indicate success 1864 1865 // If the CAS fails we can either retry or pass control to the slow-path. 1866 // We use the latter tactic. 1867 // Pass the CAS result in the icc.ZFlag into DONE_LABEL 1868 // If the CAS was successful ... 1869 // Self has acquired the lock 1870 // Invariant: m->_recursions should already be 0, so we don't need to explicitly set it. 1871 // Intentional fall-through into DONE_LABEL ... 1872 } else { 1873 movptr(Address(boxReg, 0), intptr_t(markOopDesc::unused_mark())); // results in ST-before-CAS penalty 1874 movptr(boxReg, tmpReg); 1875 1876 // Using a prefetchw helps avoid later RTS->RTO upgrades and cache probes 1877 if ((EmitSync & 2048) && VM_Version::supports_3dnow_prefetch() && os::is_MP()) { 1878 // prefetchw [eax + Offset(_owner)-2] 1879 prefetchw(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); 1880 } 1881 1882 if ((EmitSync & 64) == 0) { 1883 // Optimistic form 1884 xorptr (tmpReg, tmpReg); 1885 } else { 1886 // Can suffer RTS->RTO upgrades on shared or cold $ lines 1887 movptr(tmpReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); // rax, = m->_owner 1888 testptr(tmpReg, tmpReg); // Locked ? 1889 jccb (Assembler::notZero, DONE_LABEL); 1890 } 1891 1892 // Appears unlocked - try to swing _owner from null to non-null. 1893 // Use either "Self" (in scr) or rsp as thread identity in _owner. 1894 // Invariant: tmpReg == 0. tmpReg is EAX which is the implicit cmpxchg comparand. 1895 get_thread (scrReg); 1896 if (os::is_MP()) { 1897 lock(); 1898 } 1899 cmpxchgptr(scrReg, Address(boxReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); 1900 1901 // If the CAS fails we can either retry or pass control to the slow-path. 1902 // We use the latter tactic. 1903 // Pass the CAS result in the icc.ZFlag into DONE_LABEL 1904 // If the CAS was successful ... 1905 // Self has acquired the lock 1906 // Invariant: m->_recursions should already be 0, so we don't need to explicitly set it. 1907 // Intentional fall-through into DONE_LABEL ... 1908 } 1909 #else // _LP64 1910 // It's inflated 1911 movq(scrReg, tmpReg); 1912 xorq(tmpReg, tmpReg); 1913 1914 if (os::is_MP()) { 1915 lock(); 1916 } 1917 cmpxchgptr(r15_thread, Address(scrReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); 1918 // Unconditionally set box->_displaced_header = markOopDesc::unused_mark(). 1919 // Without cast to int32_t movptr will destroy r10 which is typically obj. 1920 movptr(Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark())); 1921 // Intentional fall-through into DONE_LABEL ... 1922 // Propagate ICC.ZF from CAS above into DONE_LABEL. 1923 #endif // _LP64 1924 #if INCLUDE_RTM_OPT 1925 } // use_rtm() 1926 #endif 1927 // DONE_LABEL is a hot target - we'd really like to place it at the 1928 // start of cache line by padding with NOPs. 1929 // See the AMD and Intel software optimization manuals for the 1930 // most efficient "long" NOP encodings. 1931 // Unfortunately none of our alignment mechanisms suffice. 1932 bind(DONE_LABEL); 1933 1934 // At DONE_LABEL the icc ZFlag is set as follows ... 1935 // Fast_Unlock uses the same protocol. 1936 // ZFlag == 1 -> Success 1937 // ZFlag == 0 -> Failure - force control through the slow-path 1938 } 1939 } 1940 1941 // obj: object to unlock 1942 // box: box address (displaced header location), killed. Must be EAX. 1943 // tmp: killed, cannot be obj nor box. 1944 // 1945 // Some commentary on balanced locking: 1946 // 1947 // Fast_Lock and Fast_Unlock are emitted only for provably balanced lock sites. 1948 // Methods that don't have provably balanced locking are forced to run in the 1949 // interpreter - such methods won't be compiled to use fast_lock and fast_unlock. 1950 // The interpreter provides two properties: 1951 // I1: At return-time the interpreter automatically and quietly unlocks any 1952 // objects acquired the current activation (frame). Recall that the 1953 // interpreter maintains an on-stack list of locks currently held by 1954 // a frame. 1955 // I2: If a method attempts to unlock an object that is not held by the 1956 // the frame the interpreter throws IMSX. 1957 // 1958 // Lets say A(), which has provably balanced locking, acquires O and then calls B(). 1959 // B() doesn't have provably balanced locking so it runs in the interpreter. 1960 // Control returns to A() and A() unlocks O. By I1 and I2, above, we know that O 1961 // is still locked by A(). 1962 // 1963 // The only other source of unbalanced locking would be JNI. The "Java Native Interface: 1964 // Programmer's Guide and Specification" claims that an object locked by jni_monitorenter 1965 // should not be unlocked by "normal" java-level locking and vice-versa. The specification 1966 // doesn't specify what will occur if a program engages in such mixed-mode locking, however. 1967 // Arguably given that the spec legislates the JNI case as undefined our implementation 1968 // could reasonably *avoid* checking owner in Fast_Unlock(). 1969 // In the interest of performance we elide m->Owner==Self check in unlock. 1970 // A perfectly viable alternative is to elide the owner check except when 1971 // Xcheck:jni is enabled. 1972 1973 void MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register tmpReg, bool use_rtm) { 1974 assert(boxReg == rax, ""); 1975 assert_different_registers(objReg, boxReg, tmpReg); 1976 1977 if (EmitSync & 4) { 1978 // Disable - inhibit all inlining. Force control through the slow-path 1979 cmpptr (rsp, 0); 1980 } else { 1981 Label DONE_LABEL, Stacked, CheckSucc; 1982 1983 // Critically, the biased locking test must have precedence over 1984 // and appear before the (box->dhw == 0) recursive stack-lock test. 1985 if (UseBiasedLocking && !UseOptoBiasInlining) { 1986 biased_locking_exit(objReg, tmpReg, DONE_LABEL); 1987 } 1988 1989 #if INCLUDE_RTM_OPT 1990 if (UseRTMForStackLocks && use_rtm) { 1991 assert(!UseBiasedLocking, "Biased locking is not supported with RTM locking"); 1992 Label L_regular_unlock; 1993 movptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // fetch markword 1994 andptr(tmpReg, markOopDesc::biased_lock_mask_in_place); // look at 3 lock bits 1995 cmpptr(tmpReg, markOopDesc::unlocked_value); // bits = 001 unlocked 1996 jccb(Assembler::notEqual, L_regular_unlock); // if !HLE RegularLock 1997 xend(); // otherwise end... 1998 jmp(DONE_LABEL); // ... and we're done 1999 bind(L_regular_unlock); 2000 } 2001 #endif 2002 2003 cmpptr(Address(boxReg, 0), (int32_t)NULL_WORD); // Examine the displaced header 2004 jcc (Assembler::zero, DONE_LABEL); // 0 indicates recursive stack-lock 2005 movptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // Examine the object's markword 2006 testptr(tmpReg, markOopDesc::monitor_value); // Inflated? 2007 jccb (Assembler::zero, Stacked); 2008 2009 // It's inflated. 2010 #if INCLUDE_RTM_OPT 2011 if (use_rtm) { 2012 Label L_regular_inflated_unlock; 2013 int owner_offset = OM_OFFSET_NO_MONITOR_VALUE_TAG(owner); 2014 movptr(boxReg, Address(tmpReg, owner_offset)); 2015 testptr(boxReg, boxReg); 2016 jccb(Assembler::notZero, L_regular_inflated_unlock); 2017 xend(); 2018 jmpb(DONE_LABEL); 2019 bind(L_regular_inflated_unlock); 2020 } 2021 #endif 2022 2023 // Despite our balanced locking property we still check that m->_owner == Self 2024 // as java routines or native JNI code called by this thread might 2025 // have released the lock. 2026 // Refer to the comments in synchronizer.cpp for how we might encode extra 2027 // state in _succ so we can avoid fetching EntryList|cxq. 2028 // 2029 // I'd like to add more cases in fast_lock() and fast_unlock() -- 2030 // such as recursive enter and exit -- but we have to be wary of 2031 // I$ bloat, T$ effects and BP$ effects. 2032 // 2033 // If there's no contention try a 1-0 exit. That is, exit without 2034 // a costly MEMBAR or CAS. See synchronizer.cpp for details on how 2035 // we detect and recover from the race that the 1-0 exit admits. 2036 // 2037 // Conceptually Fast_Unlock() must execute a STST|LDST "release" barrier 2038 // before it STs null into _owner, releasing the lock. Updates 2039 // to data protected by the critical section must be visible before 2040 // we drop the lock (and thus before any other thread could acquire 2041 // the lock and observe the fields protected by the lock). 2042 // IA32's memory-model is SPO, so STs are ordered with respect to 2043 // each other and there's no need for an explicit barrier (fence). 2044 // See also http://gee.cs.oswego.edu/dl/jmm/cookbook.html. 2045 #ifndef _LP64 2046 get_thread (boxReg); 2047 if ((EmitSync & 4096) && VM_Version::supports_3dnow_prefetch() && os::is_MP()) { 2048 // prefetchw [ebx + Offset(_owner)-2] 2049 prefetchw(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); 2050 } 2051 2052 // Note that we could employ various encoding schemes to reduce 2053 // the number of loads below (currently 4) to just 2 or 3. 2054 // Refer to the comments in synchronizer.cpp. 2055 // In practice the chain of fetches doesn't seem to impact performance, however. 2056 xorptr(boxReg, boxReg); 2057 if ((EmitSync & 65536) == 0 && (EmitSync & 256)) { 2058 // Attempt to reduce branch density - AMD's branch predictor. 2059 orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions))); 2060 orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList))); 2061 orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq))); 2062 jccb (Assembler::notZero, DONE_LABEL); 2063 movptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), NULL_WORD); 2064 jmpb (DONE_LABEL); 2065 } else { 2066 orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions))); 2067 jccb (Assembler::notZero, DONE_LABEL); 2068 movptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList))); 2069 orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq))); 2070 jccb (Assembler::notZero, CheckSucc); 2071 movptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), NULL_WORD); 2072 jmpb (DONE_LABEL); 2073 } 2074 2075 // The Following code fragment (EmitSync & 65536) improves the performance of 2076 // contended applications and contended synchronization microbenchmarks. 2077 // Unfortunately the emission of the code - even though not executed - causes regressions 2078 // in scimark and jetstream, evidently because of $ effects. Replacing the code 2079 // with an equal number of never-executed NOPs results in the same regression. 2080 // We leave it off by default. 2081 2082 if ((EmitSync & 65536) != 0) { 2083 Label LSuccess, LGoSlowPath ; 2084 2085 bind (CheckSucc); 2086 2087 // Optional pre-test ... it's safe to elide this 2088 cmpptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), (int32_t)NULL_WORD); 2089 jccb(Assembler::zero, LGoSlowPath); 2090 2091 // We have a classic Dekker-style idiom: 2092 // ST m->_owner = 0 ; MEMBAR; LD m->_succ 2093 // There are a number of ways to implement the barrier: 2094 // (1) lock:andl &m->_owner, 0 2095 // is fast, but mask doesn't currently support the "ANDL M,IMM32" form. 2096 // LOCK: ANDL [ebx+Offset(_Owner)-2], 0 2097 // Encodes as 81 31 OFF32 IMM32 or 83 63 OFF8 IMM8 2098 // (2) If supported, an explicit MFENCE is appealing. 2099 // In older IA32 processors MFENCE is slower than lock:add or xchg 2100 // particularly if the write-buffer is full as might be the case if 2101 // if stores closely precede the fence or fence-equivalent instruction. 2102 // See https://blogs.oracle.com/dave/entry/instruction_selection_for_volatile_fences 2103 // as the situation has changed with Nehalem and Shanghai. 2104 // (3) In lieu of an explicit fence, use lock:addl to the top-of-stack 2105 // The $lines underlying the top-of-stack should be in M-state. 2106 // The locked add instruction is serializing, of course. 2107 // (4) Use xchg, which is serializing 2108 // mov boxReg, 0; xchgl boxReg, [tmpReg + Offset(_owner)-2] also works 2109 // (5) ST m->_owner = 0 and then execute lock:orl &m->_succ, 0. 2110 // The integer condition codes will tell us if succ was 0. 2111 // Since _succ and _owner should reside in the same $line and 2112 // we just stored into _owner, it's likely that the $line 2113 // remains in M-state for the lock:orl. 2114 // 2115 // We currently use (3), although it's likely that switching to (2) 2116 // is correct for the future. 2117 2118 movptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), NULL_WORD); 2119 if (os::is_MP()) { 2120 lock(); addptr(Address(rsp, 0), 0); 2121 } 2122 // Ratify _succ remains non-null 2123 cmpptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), 0); 2124 jccb (Assembler::notZero, LSuccess); 2125 2126 xorptr(boxReg, boxReg); // box is really EAX 2127 if (os::is_MP()) { lock(); } 2128 cmpxchgptr(rsp, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); 2129 // There's no successor so we tried to regrab the lock with the 2130 // placeholder value. If that didn't work, then another thread 2131 // grabbed the lock so we're done (and exit was a success). 2132 jccb (Assembler::notEqual, LSuccess); 2133 // Since we're low on registers we installed rsp as a placeholding in _owner. 2134 // Now install Self over rsp. This is safe as we're transitioning from 2135 // non-null to non=null 2136 get_thread (boxReg); 2137 movptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), boxReg); 2138 // Intentional fall-through into LGoSlowPath ... 2139 2140 bind (LGoSlowPath); 2141 orptr(boxReg, 1); // set ICC.ZF=0 to indicate failure 2142 jmpb (DONE_LABEL); 2143 2144 bind (LSuccess); 2145 xorptr(boxReg, boxReg); // set ICC.ZF=1 to indicate success 2146 jmpb (DONE_LABEL); 2147 } 2148 2149 bind (Stacked); 2150 // It's not inflated and it's not recursively stack-locked and it's not biased. 2151 // It must be stack-locked. 2152 // Try to reset the header to displaced header. 2153 // The "box" value on the stack is stable, so we can reload 2154 // and be assured we observe the same value as above. 2155 movptr(tmpReg, Address(boxReg, 0)); 2156 if (os::is_MP()) { 2157 lock(); 2158 } 2159 cmpxchgptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // Uses RAX which is box 2160 // Intention fall-thru into DONE_LABEL 2161 2162 // DONE_LABEL is a hot target - we'd really like to place it at the 2163 // start of cache line by padding with NOPs. 2164 // See the AMD and Intel software optimization manuals for the 2165 // most efficient "long" NOP encodings. 2166 // Unfortunately none of our alignment mechanisms suffice. 2167 if ((EmitSync & 65536) == 0) { 2168 bind (CheckSucc); 2169 } 2170 #else // _LP64 2171 // It's inflated 2172 if (EmitSync & 1024) { 2173 // Emit code to check that _owner == Self 2174 // We could fold the _owner test into subsequent code more efficiently 2175 // than using a stand-alone check, but since _owner checking is off by 2176 // default we don't bother. We also might consider predicating the 2177 // _owner==Self check on Xcheck:jni or running on a debug build. 2178 movptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); 2179 xorptr(boxReg, r15_thread); 2180 } else { 2181 xorptr(boxReg, boxReg); 2182 } 2183 orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions))); 2184 jccb (Assembler::notZero, DONE_LABEL); 2185 movptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq))); 2186 orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList))); 2187 jccb (Assembler::notZero, CheckSucc); 2188 movptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), (int32_t)NULL_WORD); 2189 jmpb (DONE_LABEL); 2190 2191 if ((EmitSync & 65536) == 0) { 2192 // Try to avoid passing control into the slow_path ... 2193 Label LSuccess, LGoSlowPath ; 2194 bind (CheckSucc); 2195 2196 // The following optional optimization can be elided if necessary 2197 // Effectively: if (succ == null) goto SlowPath 2198 // The code reduces the window for a race, however, 2199 // and thus benefits performance. 2200 cmpptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), (int32_t)NULL_WORD); 2201 jccb (Assembler::zero, LGoSlowPath); 2202 2203 xorptr(boxReg, boxReg); 2204 if ((EmitSync & 16) && os::is_MP()) { 2205 xchgptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); 2206 } else { 2207 movptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), (int32_t)NULL_WORD); 2208 if (os::is_MP()) { 2209 // Memory barrier/fence 2210 // Dekker pivot point -- fulcrum : ST Owner; MEMBAR; LD Succ 2211 // Instead of MFENCE we use a dummy locked add of 0 to the top-of-stack. 2212 // This is faster on Nehalem and AMD Shanghai/Barcelona. 2213 // See https://blogs.oracle.com/dave/entry/instruction_selection_for_volatile_fences 2214 // We might also restructure (ST Owner=0;barrier;LD _Succ) to 2215 // (mov box,0; xchgq box, &m->Owner; LD _succ) . 2216 lock(); addl(Address(rsp, 0), 0); 2217 } 2218 } 2219 cmpptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), (int32_t)NULL_WORD); 2220 jccb (Assembler::notZero, LSuccess); 2221 2222 // Rare inopportune interleaving - race. 2223 // The successor vanished in the small window above. 2224 // The lock is contended -- (cxq|EntryList) != null -- and there's no apparent successor. 2225 // We need to ensure progress and succession. 2226 // Try to reacquire the lock. 2227 // If that fails then the new owner is responsible for succession and this 2228 // thread needs to take no further action and can exit via the fast path (success). 2229 // If the re-acquire succeeds then pass control into the slow path. 2230 // As implemented, this latter mode is horrible because we generated more 2231 // coherence traffic on the lock *and* artifically extended the critical section 2232 // length while by virtue of passing control into the slow path. 2233 2234 // box is really RAX -- the following CMPXCHG depends on that binding 2235 // cmpxchg R,[M] is equivalent to rax = CAS(M,rax,R) 2236 if (os::is_MP()) { lock(); } 2237 cmpxchgptr(r15_thread, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); 2238 // There's no successor so we tried to regrab the lock. 2239 // If that didn't work, then another thread grabbed the 2240 // lock so we're done (and exit was a success). 2241 jccb (Assembler::notEqual, LSuccess); 2242 // Intentional fall-through into slow-path 2243 2244 bind (LGoSlowPath); 2245 orl (boxReg, 1); // set ICC.ZF=0 to indicate failure 2246 jmpb (DONE_LABEL); 2247 2248 bind (LSuccess); 2249 testl (boxReg, 0); // set ICC.ZF=1 to indicate success 2250 jmpb (DONE_LABEL); 2251 } 2252 2253 bind (Stacked); 2254 movptr(tmpReg, Address (boxReg, 0)); // re-fetch 2255 if (os::is_MP()) { lock(); } 2256 cmpxchgptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // Uses RAX which is box 2257 2258 if (EmitSync & 65536) { 2259 bind (CheckSucc); 2260 } 2261 #endif 2262 bind(DONE_LABEL); 2263 } 2264 } 2265 #endif // COMPILER2 2266 2267 void MacroAssembler::c2bool(Register x) { 2268 // implements x == 0 ? 0 : 1 2269 // note: must only look at least-significant byte of x 2270 // since C-style booleans are stored in one byte 2271 // only! (was bug) 2272 andl(x, 0xFF); 2273 setb(Assembler::notZero, x); 2274 } 2275 2276 // Wouldn't need if AddressLiteral version had new name 2277 void MacroAssembler::call(Label& L, relocInfo::relocType rtype) { 2278 Assembler::call(L, rtype); 2279 } 2280 2281 void MacroAssembler::call(Register entry) { 2282 Assembler::call(entry); 2283 } 2284 2285 void MacroAssembler::call(AddressLiteral entry) { 2286 if (reachable(entry)) { 2287 Assembler::call_literal(entry.target(), entry.rspec()); 2288 } else { 2289 lea(rscratch1, entry); 2290 Assembler::call(rscratch1); 2291 } 2292 } 2293 2294 void MacroAssembler::ic_call(address entry, jint method_index) { 2295 RelocationHolder rh = virtual_call_Relocation::spec(pc(), method_index); 2296 movptr(rax, (intptr_t)Universe::non_oop_word()); 2297 call(AddressLiteral(entry, rh)); 2298 } 2299 2300 // Implementation of call_VM versions 2301 2302 void MacroAssembler::call_VM(Register oop_result, 2303 address entry_point, 2304 bool check_exceptions) { 2305 Label C, E; 2306 call(C, relocInfo::none); 2307 jmp(E); 2308 2309 bind(C); 2310 call_VM_helper(oop_result, entry_point, 0, check_exceptions); 2311 ret(0); 2312 2313 bind(E); 2314 } 2315 2316 void MacroAssembler::call_VM(Register oop_result, 2317 address entry_point, 2318 Register arg_1, 2319 bool check_exceptions) { 2320 Label C, E; 2321 call(C, relocInfo::none); 2322 jmp(E); 2323 2324 bind(C); 2325 pass_arg1(this, arg_1); 2326 call_VM_helper(oop_result, entry_point, 1, check_exceptions); 2327 ret(0); 2328 2329 bind(E); 2330 } 2331 2332 void MacroAssembler::call_VM(Register oop_result, 2333 address entry_point, 2334 Register arg_1, 2335 Register arg_2, 2336 bool check_exceptions) { 2337 Label C, E; 2338 call(C, relocInfo::none); 2339 jmp(E); 2340 2341 bind(C); 2342 2343 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg")); 2344 2345 pass_arg2(this, arg_2); 2346 pass_arg1(this, arg_1); 2347 call_VM_helper(oop_result, entry_point, 2, check_exceptions); 2348 ret(0); 2349 2350 bind(E); 2351 } 2352 2353 void MacroAssembler::call_VM(Register oop_result, 2354 address entry_point, 2355 Register arg_1, 2356 Register arg_2, 2357 Register arg_3, 2358 bool check_exceptions) { 2359 Label C, E; 2360 call(C, relocInfo::none); 2361 jmp(E); 2362 2363 bind(C); 2364 2365 LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg")); 2366 LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg")); 2367 pass_arg3(this, arg_3); 2368 2369 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg")); 2370 pass_arg2(this, arg_2); 2371 2372 pass_arg1(this, arg_1); 2373 call_VM_helper(oop_result, entry_point, 3, check_exceptions); 2374 ret(0); 2375 2376 bind(E); 2377 } 2378 2379 void MacroAssembler::call_VM(Register oop_result, 2380 Register last_java_sp, 2381 address entry_point, 2382 int number_of_arguments, 2383 bool check_exceptions) { 2384 Register thread = LP64_ONLY(r15_thread) NOT_LP64(noreg); 2385 call_VM_base(oop_result, thread, last_java_sp, entry_point, number_of_arguments, check_exceptions); 2386 } 2387 2388 void MacroAssembler::call_VM(Register oop_result, 2389 Register last_java_sp, 2390 address entry_point, 2391 Register arg_1, 2392 bool check_exceptions) { 2393 pass_arg1(this, arg_1); 2394 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions); 2395 } 2396 2397 void MacroAssembler::call_VM(Register oop_result, 2398 Register last_java_sp, 2399 address entry_point, 2400 Register arg_1, 2401 Register arg_2, 2402 bool check_exceptions) { 2403 2404 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg")); 2405 pass_arg2(this, arg_2); 2406 pass_arg1(this, arg_1); 2407 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions); 2408 } 2409 2410 void MacroAssembler::call_VM(Register oop_result, 2411 Register last_java_sp, 2412 address entry_point, 2413 Register arg_1, 2414 Register arg_2, 2415 Register arg_3, 2416 bool check_exceptions) { 2417 LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg")); 2418 LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg")); 2419 pass_arg3(this, arg_3); 2420 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg")); 2421 pass_arg2(this, arg_2); 2422 pass_arg1(this, arg_1); 2423 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions); 2424 } 2425 2426 void MacroAssembler::super_call_VM(Register oop_result, 2427 Register last_java_sp, 2428 address entry_point, 2429 int number_of_arguments, 2430 bool check_exceptions) { 2431 Register thread = LP64_ONLY(r15_thread) NOT_LP64(noreg); 2432 MacroAssembler::call_VM_base(oop_result, thread, last_java_sp, entry_point, number_of_arguments, check_exceptions); 2433 } 2434 2435 void MacroAssembler::super_call_VM(Register oop_result, 2436 Register last_java_sp, 2437 address entry_point, 2438 Register arg_1, 2439 bool check_exceptions) { 2440 pass_arg1(this, arg_1); 2441 super_call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions); 2442 } 2443 2444 void MacroAssembler::super_call_VM(Register oop_result, 2445 Register last_java_sp, 2446 address entry_point, 2447 Register arg_1, 2448 Register arg_2, 2449 bool check_exceptions) { 2450 2451 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg")); 2452 pass_arg2(this, arg_2); 2453 pass_arg1(this, arg_1); 2454 super_call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions); 2455 } 2456 2457 void MacroAssembler::super_call_VM(Register oop_result, 2458 Register last_java_sp, 2459 address entry_point, 2460 Register arg_1, 2461 Register arg_2, 2462 Register arg_3, 2463 bool check_exceptions) { 2464 LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg")); 2465 LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg")); 2466 pass_arg3(this, arg_3); 2467 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg")); 2468 pass_arg2(this, arg_2); 2469 pass_arg1(this, arg_1); 2470 super_call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions); 2471 } 2472 2473 void MacroAssembler::call_VM_base(Register oop_result, 2474 Register java_thread, 2475 Register last_java_sp, 2476 address entry_point, 2477 int number_of_arguments, 2478 bool check_exceptions) { 2479 // determine java_thread register 2480 if (!java_thread->is_valid()) { 2481 #ifdef _LP64 2482 java_thread = r15_thread; 2483 #else 2484 java_thread = rdi; 2485 get_thread(java_thread); 2486 #endif // LP64 2487 } 2488 // determine last_java_sp register 2489 if (!last_java_sp->is_valid()) { 2490 last_java_sp = rsp; 2491 } 2492 // debugging support 2493 assert(number_of_arguments >= 0 , "cannot have negative number of arguments"); 2494 LP64_ONLY(assert(java_thread == r15_thread, "unexpected register")); 2495 #ifdef ASSERT 2496 // TraceBytecodes does not use r12 but saves it over the call, so don't verify 2497 // r12 is the heapbase. 2498 LP64_ONLY(if ((UseCompressedOops || UseCompressedClassPointers) && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?");) 2499 #endif // ASSERT 2500 2501 assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result"); 2502 assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp"); 2503 2504 // push java thread (becomes first argument of C function) 2505 2506 NOT_LP64(push(java_thread); number_of_arguments++); 2507 LP64_ONLY(mov(c_rarg0, r15_thread)); 2508 2509 // set last Java frame before call 2510 assert(last_java_sp != rbp, "can't use ebp/rbp"); 2511 2512 // Only interpreter should have to set fp 2513 set_last_Java_frame(java_thread, last_java_sp, rbp, NULL); 2514 2515 // do the call, remove parameters 2516 MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments); 2517 2518 // restore the thread (cannot use the pushed argument since arguments 2519 // may be overwritten by C code generated by an optimizing compiler); 2520 // however can use the register value directly if it is callee saved. 2521 if (LP64_ONLY(true ||) java_thread == rdi || java_thread == rsi) { 2522 // rdi & rsi (also r15) are callee saved -> nothing to do 2523 #ifdef ASSERT 2524 guarantee(java_thread != rax, "change this code"); 2525 push(rax); 2526 { Label L; 2527 get_thread(rax); 2528 cmpptr(java_thread, rax); 2529 jcc(Assembler::equal, L); 2530 STOP("MacroAssembler::call_VM_base: rdi not callee saved?"); 2531 bind(L); 2532 } 2533 pop(rax); 2534 #endif 2535 } else { 2536 get_thread(java_thread); 2537 } 2538 // reset last Java frame 2539 // Only interpreter should have to clear fp 2540 reset_last_Java_frame(java_thread, true); 2541 2542 // C++ interp handles this in the interpreter 2543 check_and_handle_popframe(java_thread); 2544 check_and_handle_earlyret(java_thread); 2545 2546 if (check_exceptions) { 2547 // check for pending exceptions (java_thread is set upon return) 2548 cmpptr(Address(java_thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD); 2549 #ifndef _LP64 2550 jump_cc(Assembler::notEqual, 2551 RuntimeAddress(StubRoutines::forward_exception_entry())); 2552 #else 2553 // This used to conditionally jump to forward_exception however it is 2554 // possible if we relocate that the branch will not reach. So we must jump 2555 // around so we can always reach 2556 2557 Label ok; 2558 jcc(Assembler::equal, ok); 2559 jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 2560 bind(ok); 2561 #endif // LP64 2562 } 2563 2564 // get oop result if there is one and reset the value in the thread 2565 if (oop_result->is_valid()) { 2566 get_vm_result(oop_result, java_thread); 2567 } 2568 } 2569 2570 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) { 2571 2572 // Calculate the value for last_Java_sp 2573 // somewhat subtle. call_VM does an intermediate call 2574 // which places a return address on the stack just under the 2575 // stack pointer as the user finsihed with it. This allows 2576 // use to retrieve last_Java_pc from last_Java_sp[-1]. 2577 // On 32bit we then have to push additional args on the stack to accomplish 2578 // the actual requested call. On 64bit call_VM only can use register args 2579 // so the only extra space is the return address that call_VM created. 2580 // This hopefully explains the calculations here. 2581 2582 #ifdef _LP64 2583 // We've pushed one address, correct last_Java_sp 2584 lea(rax, Address(rsp, wordSize)); 2585 #else 2586 lea(rax, Address(rsp, (1 + number_of_arguments) * wordSize)); 2587 #endif // LP64 2588 2589 call_VM_base(oop_result, noreg, rax, entry_point, number_of_arguments, check_exceptions); 2590 2591 } 2592 2593 // Use this method when MacroAssembler version of call_VM_leaf_base() should be called from Interpreter. 2594 void MacroAssembler::call_VM_leaf0(address entry_point) { 2595 MacroAssembler::call_VM_leaf_base(entry_point, 0); 2596 } 2597 2598 void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) { 2599 call_VM_leaf_base(entry_point, number_of_arguments); 2600 } 2601 2602 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) { 2603 pass_arg0(this, arg_0); 2604 call_VM_leaf(entry_point, 1); 2605 } 2606 2607 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) { 2608 2609 LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg")); 2610 pass_arg1(this, arg_1); 2611 pass_arg0(this, arg_0); 2612 call_VM_leaf(entry_point, 2); 2613 } 2614 2615 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) { 2616 LP64_ONLY(assert(arg_0 != c_rarg2, "smashed arg")); 2617 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg")); 2618 pass_arg2(this, arg_2); 2619 LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg")); 2620 pass_arg1(this, arg_1); 2621 pass_arg0(this, arg_0); 2622 call_VM_leaf(entry_point, 3); 2623 } 2624 2625 void MacroAssembler::super_call_VM_leaf(address entry_point) { 2626 MacroAssembler::call_VM_leaf_base(entry_point, 1); 2627 } 2628 2629 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) { 2630 pass_arg0(this, arg_0); 2631 MacroAssembler::call_VM_leaf_base(entry_point, 1); 2632 } 2633 2634 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) { 2635 2636 LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg")); 2637 pass_arg1(this, arg_1); 2638 pass_arg0(this, arg_0); 2639 MacroAssembler::call_VM_leaf_base(entry_point, 2); 2640 } 2641 2642 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) { 2643 LP64_ONLY(assert(arg_0 != c_rarg2, "smashed arg")); 2644 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg")); 2645 pass_arg2(this, arg_2); 2646 LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg")); 2647 pass_arg1(this, arg_1); 2648 pass_arg0(this, arg_0); 2649 MacroAssembler::call_VM_leaf_base(entry_point, 3); 2650 } 2651 2652 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) { 2653 LP64_ONLY(assert(arg_0 != c_rarg3, "smashed arg")); 2654 LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg")); 2655 LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg")); 2656 pass_arg3(this, arg_3); 2657 LP64_ONLY(assert(arg_0 != c_rarg2, "smashed arg")); 2658 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg")); 2659 pass_arg2(this, arg_2); 2660 LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg")); 2661 pass_arg1(this, arg_1); 2662 pass_arg0(this, arg_0); 2663 MacroAssembler::call_VM_leaf_base(entry_point, 4); 2664 } 2665 2666 void MacroAssembler::get_vm_result(Register oop_result, Register java_thread) { 2667 movptr(oop_result, Address(java_thread, JavaThread::vm_result_offset())); 2668 movptr(Address(java_thread, JavaThread::vm_result_offset()), NULL_WORD); 2669 verify_oop(oop_result, "broken oop in call_VM_base"); 2670 } 2671 2672 void MacroAssembler::get_vm_result_2(Register metadata_result, Register java_thread) { 2673 movptr(metadata_result, Address(java_thread, JavaThread::vm_result_2_offset())); 2674 movptr(Address(java_thread, JavaThread::vm_result_2_offset()), NULL_WORD); 2675 } 2676 2677 void MacroAssembler::check_and_handle_earlyret(Register java_thread) { 2678 } 2679 2680 void MacroAssembler::check_and_handle_popframe(Register java_thread) { 2681 } 2682 2683 void MacroAssembler::cmp32(AddressLiteral src1, int32_t imm) { 2684 if (reachable(src1)) { 2685 cmpl(as_Address(src1), imm); 2686 } else { 2687 lea(rscratch1, src1); 2688 cmpl(Address(rscratch1, 0), imm); 2689 } 2690 } 2691 2692 void MacroAssembler::cmp32(Register src1, AddressLiteral src2) { 2693 assert(!src2.is_lval(), "use cmpptr"); 2694 if (reachable(src2)) { 2695 cmpl(src1, as_Address(src2)); 2696 } else { 2697 lea(rscratch1, src2); 2698 cmpl(src1, Address(rscratch1, 0)); 2699 } 2700 } 2701 2702 void MacroAssembler::cmp32(Register src1, int32_t imm) { 2703 Assembler::cmpl(src1, imm); 2704 } 2705 2706 void MacroAssembler::cmp32(Register src1, Address src2) { 2707 Assembler::cmpl(src1, src2); 2708 } 2709 2710 void MacroAssembler::cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) { 2711 ucomisd(opr1, opr2); 2712 2713 Label L; 2714 if (unordered_is_less) { 2715 movl(dst, -1); 2716 jcc(Assembler::parity, L); 2717 jcc(Assembler::below , L); 2718 movl(dst, 0); 2719 jcc(Assembler::equal , L); 2720 increment(dst); 2721 } else { // unordered is greater 2722 movl(dst, 1); 2723 jcc(Assembler::parity, L); 2724 jcc(Assembler::above , L); 2725 movl(dst, 0); 2726 jcc(Assembler::equal , L); 2727 decrementl(dst); 2728 } 2729 bind(L); 2730 } 2731 2732 void MacroAssembler::cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) { 2733 ucomiss(opr1, opr2); 2734 2735 Label L; 2736 if (unordered_is_less) { 2737 movl(dst, -1); 2738 jcc(Assembler::parity, L); 2739 jcc(Assembler::below , L); 2740 movl(dst, 0); 2741 jcc(Assembler::equal , L); 2742 increment(dst); 2743 } else { // unordered is greater 2744 movl(dst, 1); 2745 jcc(Assembler::parity, L); 2746 jcc(Assembler::above , L); 2747 movl(dst, 0); 2748 jcc(Assembler::equal , L); 2749 decrementl(dst); 2750 } 2751 bind(L); 2752 } 2753 2754 2755 void MacroAssembler::cmp8(AddressLiteral src1, int imm) { 2756 if (reachable(src1)) { 2757 cmpb(as_Address(src1), imm); 2758 } else { 2759 lea(rscratch1, src1); 2760 cmpb(Address(rscratch1, 0), imm); 2761 } 2762 } 2763 2764 void MacroAssembler::cmpptr(Register src1, AddressLiteral src2) { 2765 #ifdef _LP64 2766 if (src2.is_lval()) { 2767 movptr(rscratch1, src2); 2768 Assembler::cmpq(src1, rscratch1); 2769 } else if (reachable(src2)) { 2770 cmpq(src1, as_Address(src2)); 2771 } else { 2772 lea(rscratch1, src2); 2773 Assembler::cmpq(src1, Address(rscratch1, 0)); 2774 } 2775 #else 2776 if (src2.is_lval()) { 2777 cmp_literal32(src1, (int32_t) src2.target(), src2.rspec()); 2778 } else { 2779 cmpl(src1, as_Address(src2)); 2780 } 2781 #endif // _LP64 2782 } 2783 2784 void MacroAssembler::cmpptr(Address src1, AddressLiteral src2) { 2785 assert(src2.is_lval(), "not a mem-mem compare"); 2786 #ifdef _LP64 2787 // moves src2's literal address 2788 movptr(rscratch1, src2); 2789 Assembler::cmpq(src1, rscratch1); 2790 #else 2791 cmp_literal32(src1, (int32_t) src2.target(), src2.rspec()); 2792 #endif // _LP64 2793 } 2794 2795 void MacroAssembler::cmpoop(Register src1, Register src2) { 2796 cmpptr(src1, src2); 2797 } 2798 2799 void MacroAssembler::cmpoop(Register src1, Address src2) { 2800 cmpptr(src1, src2); 2801 } 2802 2803 #ifdef _LP64 2804 void MacroAssembler::cmpoop(Register src1, jobject src2) { 2805 movoop(rscratch1, src2); 2806 cmpptr(src1, rscratch1); 2807 } 2808 #endif 2809 2810 void MacroAssembler::locked_cmpxchgptr(Register reg, AddressLiteral adr) { 2811 if (reachable(adr)) { 2812 if (os::is_MP()) 2813 lock(); 2814 cmpxchgptr(reg, as_Address(adr)); 2815 } else { 2816 lea(rscratch1, adr); 2817 if (os::is_MP()) 2818 lock(); 2819 cmpxchgptr(reg, Address(rscratch1, 0)); 2820 } 2821 } 2822 2823 void MacroAssembler::cmpxchgptr(Register reg, Address adr) { 2824 LP64_ONLY(cmpxchgq(reg, adr)) NOT_LP64(cmpxchgl(reg, adr)); 2825 } 2826 2827 void MacroAssembler::comisd(XMMRegister dst, AddressLiteral src) { 2828 if (reachable(src)) { 2829 Assembler::comisd(dst, as_Address(src)); 2830 } else { 2831 lea(rscratch1, src); 2832 Assembler::comisd(dst, Address(rscratch1, 0)); 2833 } 2834 } 2835 2836 void MacroAssembler::comiss(XMMRegister dst, AddressLiteral src) { 2837 if (reachable(src)) { 2838 Assembler::comiss(dst, as_Address(src)); 2839 } else { 2840 lea(rscratch1, src); 2841 Assembler::comiss(dst, Address(rscratch1, 0)); 2842 } 2843 } 2844 2845 2846 void MacroAssembler::cond_inc32(Condition cond, AddressLiteral counter_addr) { 2847 Condition negated_cond = negate_condition(cond); 2848 Label L; 2849 jcc(negated_cond, L); 2850 pushf(); // Preserve flags 2851 atomic_incl(counter_addr); 2852 popf(); 2853 bind(L); 2854 } 2855 2856 int MacroAssembler::corrected_idivl(Register reg) { 2857 // Full implementation of Java idiv and irem; checks for 2858 // special case as described in JVM spec., p.243 & p.271. 2859 // The function returns the (pc) offset of the idivl 2860 // instruction - may be needed for implicit exceptions. 2861 // 2862 // normal case special case 2863 // 2864 // input : rax,: dividend min_int 2865 // reg: divisor (may not be rax,/rdx) -1 2866 // 2867 // output: rax,: quotient (= rax, idiv reg) min_int 2868 // rdx: remainder (= rax, irem reg) 0 2869 assert(reg != rax && reg != rdx, "reg cannot be rax, or rdx register"); 2870 const int min_int = 0x80000000; 2871 Label normal_case, special_case; 2872 2873 // check for special case 2874 cmpl(rax, min_int); 2875 jcc(Assembler::notEqual, normal_case); 2876 xorl(rdx, rdx); // prepare rdx for possible special case (where remainder = 0) 2877 cmpl(reg, -1); 2878 jcc(Assembler::equal, special_case); 2879 2880 // handle normal case 2881 bind(normal_case); 2882 cdql(); 2883 int idivl_offset = offset(); 2884 idivl(reg); 2885 2886 // normal and special case exit 2887 bind(special_case); 2888 2889 return idivl_offset; 2890 } 2891 2892 2893 2894 void MacroAssembler::decrementl(Register reg, int value) { 2895 if (value == min_jint) {subl(reg, value) ; return; } 2896 if (value < 0) { incrementl(reg, -value); return; } 2897 if (value == 0) { ; return; } 2898 if (value == 1 && UseIncDec) { decl(reg) ; return; } 2899 /* else */ { subl(reg, value) ; return; } 2900 } 2901 2902 void MacroAssembler::decrementl(Address dst, int value) { 2903 if (value == min_jint) {subl(dst, value) ; return; } 2904 if (value < 0) { incrementl(dst, -value); return; } 2905 if (value == 0) { ; return; } 2906 if (value == 1 && UseIncDec) { decl(dst) ; return; } 2907 /* else */ { subl(dst, value) ; return; } 2908 } 2909 2910 void MacroAssembler::division_with_shift (Register reg, int shift_value) { 2911 assert (shift_value > 0, "illegal shift value"); 2912 Label _is_positive; 2913 testl (reg, reg); 2914 jcc (Assembler::positive, _is_positive); 2915 int offset = (1 << shift_value) - 1 ; 2916 2917 if (offset == 1) { 2918 incrementl(reg); 2919 } else { 2920 addl(reg, offset); 2921 } 2922 2923 bind (_is_positive); 2924 sarl(reg, shift_value); 2925 } 2926 2927 void MacroAssembler::divsd(XMMRegister dst, AddressLiteral src) { 2928 if (reachable(src)) { 2929 Assembler::divsd(dst, as_Address(src)); 2930 } else { 2931 lea(rscratch1, src); 2932 Assembler::divsd(dst, Address(rscratch1, 0)); 2933 } 2934 } 2935 2936 void MacroAssembler::divss(XMMRegister dst, AddressLiteral src) { 2937 if (reachable(src)) { 2938 Assembler::divss(dst, as_Address(src)); 2939 } else { 2940 lea(rscratch1, src); 2941 Assembler::divss(dst, Address(rscratch1, 0)); 2942 } 2943 } 2944 2945 // !defined(COMPILER2) is because of stupid core builds 2946 #if !defined(_LP64) || defined(COMPILER1) || !defined(COMPILER2) || INCLUDE_JVMCI 2947 void MacroAssembler::empty_FPU_stack() { 2948 if (VM_Version::supports_mmx()) { 2949 emms(); 2950 } else { 2951 for (int i = 8; i-- > 0; ) ffree(i); 2952 } 2953 } 2954 #endif // !LP64 || C1 || !C2 || INCLUDE_JVMCI 2955 2956 2957 // Defines obj, preserves var_size_in_bytes 2958 void MacroAssembler::eden_allocate(Register obj, 2959 Register var_size_in_bytes, 2960 int con_size_in_bytes, 2961 Register t1, 2962 Label& slow_case) { 2963 assert(obj == rax, "obj must be in rax, for cmpxchg"); 2964 assert_different_registers(obj, var_size_in_bytes, t1); 2965 if (!Universe::heap()->supports_inline_contig_alloc()) { 2966 jmp(slow_case); 2967 } else { 2968 Register end = t1; 2969 Label retry; 2970 bind(retry); 2971 ExternalAddress heap_top((address) Universe::heap()->top_addr()); 2972 movptr(obj, heap_top); 2973 if (var_size_in_bytes == noreg) { 2974 lea(end, Address(obj, con_size_in_bytes)); 2975 } else { 2976 lea(end, Address(obj, var_size_in_bytes, Address::times_1)); 2977 } 2978 // if end < obj then we wrapped around => object too long => slow case 2979 cmpptr(end, obj); 2980 jcc(Assembler::below, slow_case); 2981 cmpptr(end, ExternalAddress((address) Universe::heap()->end_addr())); 2982 jcc(Assembler::above, slow_case); 2983 // Compare obj with the top addr, and if still equal, store the new top addr in 2984 // end at the address of the top addr pointer. Sets ZF if was equal, and clears 2985 // it otherwise. Use lock prefix for atomicity on MPs. 2986 locked_cmpxchgptr(end, heap_top); 2987 jcc(Assembler::notEqual, retry); 2988 } 2989 } 2990 2991 void MacroAssembler::enter() { 2992 push(rbp); 2993 mov(rbp, rsp); 2994 } 2995 2996 // A 5 byte nop that is safe for patching (see patch_verified_entry) 2997 void MacroAssembler::fat_nop() { 2998 if (UseAddressNop) { 2999 addr_nop_5(); 3000 } else { 3001 emit_int8(0x26); // es: 3002 emit_int8(0x2e); // cs: 3003 emit_int8(0x64); // fs: 3004 emit_int8(0x65); // gs: 3005 emit_int8((unsigned char)0x90); 3006 } 3007 } 3008 3009 void MacroAssembler::fcmp(Register tmp) { 3010 fcmp(tmp, 1, true, true); 3011 } 3012 3013 void MacroAssembler::fcmp(Register tmp, int index, bool pop_left, bool pop_right) { 3014 assert(!pop_right || pop_left, "usage error"); 3015 if (VM_Version::supports_cmov()) { 3016 assert(tmp == noreg, "unneeded temp"); 3017 if (pop_left) { 3018 fucomip(index); 3019 } else { 3020 fucomi(index); 3021 } 3022 if (pop_right) { 3023 fpop(); 3024 } 3025 } else { 3026 assert(tmp != noreg, "need temp"); 3027 if (pop_left) { 3028 if (pop_right) { 3029 fcompp(); 3030 } else { 3031 fcomp(index); 3032 } 3033 } else { 3034 fcom(index); 3035 } 3036 // convert FPU condition into eflags condition via rax, 3037 save_rax(tmp); 3038 fwait(); fnstsw_ax(); 3039 sahf(); 3040 restore_rax(tmp); 3041 } 3042 // condition codes set as follows: 3043 // 3044 // CF (corresponds to C0) if x < y 3045 // PF (corresponds to C2) if unordered 3046 // ZF (corresponds to C3) if x = y 3047 } 3048 3049 void MacroAssembler::fcmp2int(Register dst, bool unordered_is_less) { 3050 fcmp2int(dst, unordered_is_less, 1, true, true); 3051 } 3052 3053 void MacroAssembler::fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right) { 3054 fcmp(VM_Version::supports_cmov() ? noreg : dst, index, pop_left, pop_right); 3055 Label L; 3056 if (unordered_is_less) { 3057 movl(dst, -1); 3058 jcc(Assembler::parity, L); 3059 jcc(Assembler::below , L); 3060 movl(dst, 0); 3061 jcc(Assembler::equal , L); 3062 increment(dst); 3063 } else { // unordered is greater 3064 movl(dst, 1); 3065 jcc(Assembler::parity, L); 3066 jcc(Assembler::above , L); 3067 movl(dst, 0); 3068 jcc(Assembler::equal , L); 3069 decrementl(dst); 3070 } 3071 bind(L); 3072 } 3073 3074 void MacroAssembler::fld_d(AddressLiteral src) { 3075 fld_d(as_Address(src)); 3076 } 3077 3078 void MacroAssembler::fld_s(AddressLiteral src) { 3079 fld_s(as_Address(src)); 3080 } 3081 3082 void MacroAssembler::fld_x(AddressLiteral src) { 3083 Assembler::fld_x(as_Address(src)); 3084 } 3085 3086 void MacroAssembler::fldcw(AddressLiteral src) { 3087 Assembler::fldcw(as_Address(src)); 3088 } 3089 3090 void MacroAssembler::mulpd(XMMRegister dst, AddressLiteral src) { 3091 if (reachable(src)) { 3092 Assembler::mulpd(dst, as_Address(src)); 3093 } else { 3094 lea(rscratch1, src); 3095 Assembler::mulpd(dst, Address(rscratch1, 0)); 3096 } 3097 } 3098 3099 void MacroAssembler::increase_precision() { 3100 subptr(rsp, BytesPerWord); 3101 fnstcw(Address(rsp, 0)); 3102 movl(rax, Address(rsp, 0)); 3103 orl(rax, 0x300); 3104 push(rax); 3105 fldcw(Address(rsp, 0)); 3106 pop(rax); 3107 } 3108 3109 void MacroAssembler::restore_precision() { 3110 fldcw(Address(rsp, 0)); 3111 addptr(rsp, BytesPerWord); 3112 } 3113 3114 void MacroAssembler::fpop() { 3115 ffree(); 3116 fincstp(); 3117 } 3118 3119 void MacroAssembler::load_float(Address src) { 3120 if (UseSSE >= 1) { 3121 movflt(xmm0, src); 3122 } else { 3123 LP64_ONLY(ShouldNotReachHere()); 3124 NOT_LP64(fld_s(src)); 3125 } 3126 } 3127 3128 void MacroAssembler::store_float(Address dst) { 3129 if (UseSSE >= 1) { 3130 movflt(dst, xmm0); 3131 } else { 3132 LP64_ONLY(ShouldNotReachHere()); 3133 NOT_LP64(fstp_s(dst)); 3134 } 3135 } 3136 3137 void MacroAssembler::load_double(Address src) { 3138 if (UseSSE >= 2) { 3139 movdbl(xmm0, src); 3140 } else { 3141 LP64_ONLY(ShouldNotReachHere()); 3142 NOT_LP64(fld_d(src)); 3143 } 3144 } 3145 3146 void MacroAssembler::store_double(Address dst) { 3147 if (UseSSE >= 2) { 3148 movdbl(dst, xmm0); 3149 } else { 3150 LP64_ONLY(ShouldNotReachHere()); 3151 NOT_LP64(fstp_d(dst)); 3152 } 3153 } 3154 3155 void MacroAssembler::fremr(Register tmp) { 3156 save_rax(tmp); 3157 { Label L; 3158 bind(L); 3159 fprem(); 3160 fwait(); fnstsw_ax(); 3161 #ifdef _LP64 3162 testl(rax, 0x400); 3163 jcc(Assembler::notEqual, L); 3164 #else 3165 sahf(); 3166 jcc(Assembler::parity, L); 3167 #endif // _LP64 3168 } 3169 restore_rax(tmp); 3170 // Result is in ST0. 3171 // Note: fxch & fpop to get rid of ST1 3172 // (otherwise FPU stack could overflow eventually) 3173 fxch(1); 3174 fpop(); 3175 } 3176 3177 // dst = c = a * b + c 3178 void MacroAssembler::fmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c) { 3179 Assembler::vfmadd231sd(c, a, b); 3180 if (dst != c) { 3181 movdbl(dst, c); 3182 } 3183 } 3184 3185 // dst = c = a * b + c 3186 void MacroAssembler::fmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c) { 3187 Assembler::vfmadd231ss(c, a, b); 3188 if (dst != c) { 3189 movflt(dst, c); 3190 } 3191 } 3192 3193 // dst = c = a * b + c 3194 void MacroAssembler::vfmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len) { 3195 Assembler::vfmadd231pd(c, a, b, vector_len); 3196 if (dst != c) { 3197 vmovdqu(dst, c); 3198 } 3199 } 3200 3201 // dst = c = a * b + c 3202 void MacroAssembler::vfmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len) { 3203 Assembler::vfmadd231ps(c, a, b, vector_len); 3204 if (dst != c) { 3205 vmovdqu(dst, c); 3206 } 3207 } 3208 3209 // dst = c = a * b + c 3210 void MacroAssembler::vfmad(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len) { 3211 Assembler::vfmadd231pd(c, a, b, vector_len); 3212 if (dst != c) { 3213 vmovdqu(dst, c); 3214 } 3215 } 3216 3217 // dst = c = a * b + c 3218 void MacroAssembler::vfmaf(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len) { 3219 Assembler::vfmadd231ps(c, a, b, vector_len); 3220 if (dst != c) { 3221 vmovdqu(dst, c); 3222 } 3223 } 3224 3225 void MacroAssembler::incrementl(AddressLiteral dst) { 3226 if (reachable(dst)) { 3227 incrementl(as_Address(dst)); 3228 } else { 3229 lea(rscratch1, dst); 3230 incrementl(Address(rscratch1, 0)); 3231 } 3232 } 3233 3234 void MacroAssembler::incrementl(ArrayAddress dst) { 3235 incrementl(as_Address(dst)); 3236 } 3237 3238 void MacroAssembler::incrementl(Register reg, int value) { 3239 if (value == min_jint) {addl(reg, value) ; return; } 3240 if (value < 0) { decrementl(reg, -value); return; } 3241 if (value == 0) { ; return; } 3242 if (value == 1 && UseIncDec) { incl(reg) ; return; } 3243 /* else */ { addl(reg, value) ; return; } 3244 } 3245 3246 void MacroAssembler::incrementl(Address dst, int value) { 3247 if (value == min_jint) {addl(dst, value) ; return; } 3248 if (value < 0) { decrementl(dst, -value); return; } 3249 if (value == 0) { ; return; } 3250 if (value == 1 && UseIncDec) { incl(dst) ; return; } 3251 /* else */ { addl(dst, value) ; return; } 3252 } 3253 3254 void MacroAssembler::jump(AddressLiteral dst) { 3255 if (reachable(dst)) { 3256 jmp_literal(dst.target(), dst.rspec()); 3257 } else { 3258 lea(rscratch1, dst); 3259 jmp(rscratch1); 3260 } 3261 } 3262 3263 void MacroAssembler::jump_cc(Condition cc, AddressLiteral dst) { 3264 if (reachable(dst)) { 3265 InstructionMark im(this); 3266 relocate(dst.reloc()); 3267 const int short_size = 2; 3268 const int long_size = 6; 3269 int offs = (intptr_t)dst.target() - ((intptr_t)pc()); 3270 if (dst.reloc() == relocInfo::none && is8bit(offs - short_size)) { 3271 // 0111 tttn #8-bit disp 3272 emit_int8(0x70 | cc); 3273 emit_int8((offs - short_size) & 0xFF); 3274 } else { 3275 // 0000 1111 1000 tttn #32-bit disp 3276 emit_int8(0x0F); 3277 emit_int8((unsigned char)(0x80 | cc)); 3278 emit_int32(offs - long_size); 3279 } 3280 } else { 3281 #ifdef ASSERT 3282 warning("reversing conditional branch"); 3283 #endif /* ASSERT */ 3284 Label skip; 3285 jccb(reverse[cc], skip); 3286 lea(rscratch1, dst); 3287 Assembler::jmp(rscratch1); 3288 bind(skip); 3289 } 3290 } 3291 3292 void MacroAssembler::ldmxcsr(AddressLiteral src) { 3293 if (reachable(src)) { 3294 Assembler::ldmxcsr(as_Address(src)); 3295 } else { 3296 lea(rscratch1, src); 3297 Assembler::ldmxcsr(Address(rscratch1, 0)); 3298 } 3299 } 3300 3301 int MacroAssembler::load_signed_byte(Register dst, Address src) { 3302 int off; 3303 if (LP64_ONLY(true ||) VM_Version::is_P6()) { 3304 off = offset(); 3305 movsbl(dst, src); // movsxb 3306 } else { 3307 off = load_unsigned_byte(dst, src); 3308 shll(dst, 24); 3309 sarl(dst, 24); 3310 } 3311 return off; 3312 } 3313 3314 // Note: load_signed_short used to be called load_signed_word. 3315 // Although the 'w' in x86 opcodes refers to the term "word" in the assembler 3316 // manual, which means 16 bits, that usage is found nowhere in HotSpot code. 3317 // The term "word" in HotSpot means a 32- or 64-bit machine word. 3318 int MacroAssembler::load_signed_short(Register dst, Address src) { 3319 int off; 3320 if (LP64_ONLY(true ||) VM_Version::is_P6()) { 3321 // This is dubious to me since it seems safe to do a signed 16 => 64 bit 3322 // version but this is what 64bit has always done. This seems to imply 3323 // that users are only using 32bits worth. 3324 off = offset(); 3325 movswl(dst, src); // movsxw 3326 } else { 3327 off = load_unsigned_short(dst, src); 3328 shll(dst, 16); 3329 sarl(dst, 16); 3330 } 3331 return off; 3332 } 3333 3334 int MacroAssembler::load_unsigned_byte(Register dst, Address src) { 3335 // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16, 3336 // and "3.9 Partial Register Penalties", p. 22). 3337 int off; 3338 if (LP64_ONLY(true || ) VM_Version::is_P6() || src.uses(dst)) { 3339 off = offset(); 3340 movzbl(dst, src); // movzxb 3341 } else { 3342 xorl(dst, dst); 3343 off = offset(); 3344 movb(dst, src); 3345 } 3346 return off; 3347 } 3348 3349 // Note: load_unsigned_short used to be called load_unsigned_word. 3350 int MacroAssembler::load_unsigned_short(Register dst, Address src) { 3351 // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16, 3352 // and "3.9 Partial Register Penalties", p. 22). 3353 int off; 3354 if (LP64_ONLY(true ||) VM_Version::is_P6() || src.uses(dst)) { 3355 off = offset(); 3356 movzwl(dst, src); // movzxw 3357 } else { 3358 xorl(dst, dst); 3359 off = offset(); 3360 movw(dst, src); 3361 } 3362 return off; 3363 } 3364 3365 void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2) { 3366 switch (size_in_bytes) { 3367 #ifndef _LP64 3368 case 8: 3369 assert(dst2 != noreg, "second dest register required"); 3370 movl(dst, src); 3371 movl(dst2, src.plus_disp(BytesPerInt)); 3372 break; 3373 #else 3374 case 8: movq(dst, src); break; 3375 #endif 3376 case 4: movl(dst, src); break; 3377 case 2: is_signed ? load_signed_short(dst, src) : load_unsigned_short(dst, src); break; 3378 case 1: is_signed ? load_signed_byte( dst, src) : load_unsigned_byte( dst, src); break; 3379 default: ShouldNotReachHere(); 3380 } 3381 } 3382 3383 void MacroAssembler::store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2) { 3384 switch (size_in_bytes) { 3385 #ifndef _LP64 3386 case 8: 3387 assert(src2 != noreg, "second source register required"); 3388 movl(dst, src); 3389 movl(dst.plus_disp(BytesPerInt), src2); 3390 break; 3391 #else 3392 case 8: movq(dst, src); break; 3393 #endif 3394 case 4: movl(dst, src); break; 3395 case 2: movw(dst, src); break; 3396 case 1: movb(dst, src); break; 3397 default: ShouldNotReachHere(); 3398 } 3399 } 3400 3401 void MacroAssembler::mov32(AddressLiteral dst, Register src) { 3402 if (reachable(dst)) { 3403 movl(as_Address(dst), src); 3404 } else { 3405 lea(rscratch1, dst); 3406 movl(Address(rscratch1, 0), src); 3407 } 3408 } 3409 3410 void MacroAssembler::mov32(Register dst, AddressLiteral src) { 3411 if (reachable(src)) { 3412 movl(dst, as_Address(src)); 3413 } else { 3414 lea(rscratch1, src); 3415 movl(dst, Address(rscratch1, 0)); 3416 } 3417 } 3418 3419 // C++ bool manipulation 3420 3421 void MacroAssembler::movbool(Register dst, Address src) { 3422 if(sizeof(bool) == 1) 3423 movb(dst, src); 3424 else if(sizeof(bool) == 2) 3425 movw(dst, src); 3426 else if(sizeof(bool) == 4) 3427 movl(dst, src); 3428 else 3429 // unsupported 3430 ShouldNotReachHere(); 3431 } 3432 3433 void MacroAssembler::movbool(Address dst, bool boolconst) { 3434 if(sizeof(bool) == 1) 3435 movb(dst, (int) boolconst); 3436 else if(sizeof(bool) == 2) 3437 movw(dst, (int) boolconst); 3438 else if(sizeof(bool) == 4) 3439 movl(dst, (int) boolconst); 3440 else 3441 // unsupported 3442 ShouldNotReachHere(); 3443 } 3444 3445 void MacroAssembler::movbool(Address dst, Register src) { 3446 if(sizeof(bool) == 1) 3447 movb(dst, src); 3448 else if(sizeof(bool) == 2) 3449 movw(dst, src); 3450 else if(sizeof(bool) == 4) 3451 movl(dst, src); 3452 else 3453 // unsupported 3454 ShouldNotReachHere(); 3455 } 3456 3457 void MacroAssembler::movbyte(ArrayAddress dst, int src) { 3458 movb(as_Address(dst), src); 3459 } 3460 3461 void MacroAssembler::movdl(XMMRegister dst, AddressLiteral src) { 3462 if (reachable(src)) { 3463 movdl(dst, as_Address(src)); 3464 } else { 3465 lea(rscratch1, src); 3466 movdl(dst, Address(rscratch1, 0)); 3467 } 3468 } 3469 3470 void MacroAssembler::movq(XMMRegister dst, AddressLiteral src) { 3471 if (reachable(src)) { 3472 movq(dst, as_Address(src)); 3473 } else { 3474 lea(rscratch1, src); 3475 movq(dst, Address(rscratch1, 0)); 3476 } 3477 } 3478 3479 void MacroAssembler::setvectmask(Register dst, Register src) { 3480 Assembler::movl(dst, 1); 3481 Assembler::shlxl(dst, dst, src); 3482 Assembler::decl(dst); 3483 Assembler::kmovdl(k1, dst); 3484 Assembler::movl(dst, src); 3485 } 3486 3487 void MacroAssembler::restorevectmask() { 3488 Assembler::knotwl(k1, k0); 3489 } 3490 3491 void MacroAssembler::movdbl(XMMRegister dst, AddressLiteral src) { 3492 if (reachable(src)) { 3493 if (UseXmmLoadAndClearUpper) { 3494 movsd (dst, as_Address(src)); 3495 } else { 3496 movlpd(dst, as_Address(src)); 3497 } 3498 } else { 3499 lea(rscratch1, src); 3500 if (UseXmmLoadAndClearUpper) { 3501 movsd (dst, Address(rscratch1, 0)); 3502 } else { 3503 movlpd(dst, Address(rscratch1, 0)); 3504 } 3505 } 3506 } 3507 3508 void MacroAssembler::movflt(XMMRegister dst, AddressLiteral src) { 3509 if (reachable(src)) { 3510 movss(dst, as_Address(src)); 3511 } else { 3512 lea(rscratch1, src); 3513 movss(dst, Address(rscratch1, 0)); 3514 } 3515 } 3516 3517 void MacroAssembler::movptr(Register dst, Register src) { 3518 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src)); 3519 } 3520 3521 void MacroAssembler::movptr(Register dst, Address src) { 3522 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src)); 3523 } 3524 3525 // src should NEVER be a real pointer. Use AddressLiteral for true pointers 3526 void MacroAssembler::movptr(Register dst, intptr_t src) { 3527 LP64_ONLY(mov64(dst, src)) NOT_LP64(movl(dst, src)); 3528 } 3529 3530 void MacroAssembler::movptr(Address dst, Register src) { 3531 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src)); 3532 } 3533 3534 void MacroAssembler::movdqu(Address dst, XMMRegister src) { 3535 if (UseAVX > 2 && !VM_Version::supports_avx512vl() && (src->encoding() > 15)) { 3536 Assembler::vextractf32x4(dst, src, 0); 3537 } else { 3538 Assembler::movdqu(dst, src); 3539 } 3540 } 3541 3542 void MacroAssembler::movdqu(XMMRegister dst, Address src) { 3543 if (UseAVX > 2 && !VM_Version::supports_avx512vl() && (dst->encoding() > 15)) { 3544 Assembler::vinsertf32x4(dst, dst, src, 0); 3545 } else { 3546 Assembler::movdqu(dst, src); 3547 } 3548 } 3549 3550 void MacroAssembler::movdqu(XMMRegister dst, XMMRegister src) { 3551 if (UseAVX > 2 && !VM_Version::supports_avx512vl()) { 3552 Assembler::evmovdqul(dst, src, Assembler::AVX_512bit); 3553 } else { 3554 Assembler::movdqu(dst, src); 3555 } 3556 } 3557 3558 void MacroAssembler::movdqu(XMMRegister dst, AddressLiteral src, Register scratchReg) { 3559 if (reachable(src)) { 3560 movdqu(dst, as_Address(src)); 3561 } else { 3562 lea(scratchReg, src); 3563 movdqu(dst, Address(scratchReg, 0)); 3564 } 3565 } 3566 3567 void MacroAssembler::vmovdqu(Address dst, XMMRegister src) { 3568 if (UseAVX > 2 && !VM_Version::supports_avx512vl() && (src->encoding() > 15)) { 3569 vextractf64x4_low(dst, src); 3570 } else { 3571 Assembler::vmovdqu(dst, src); 3572 } 3573 } 3574 3575 void MacroAssembler::vmovdqu(XMMRegister dst, Address src) { 3576 if (UseAVX > 2 && !VM_Version::supports_avx512vl() && (dst->encoding() > 15)) { 3577 vinsertf64x4_low(dst, src); 3578 } else { 3579 Assembler::vmovdqu(dst, src); 3580 } 3581 } 3582 3583 void MacroAssembler::vmovdqu(XMMRegister dst, XMMRegister src) { 3584 if (UseAVX > 2 && !VM_Version::supports_avx512vl()) { 3585 Assembler::evmovdqul(dst, src, Assembler::AVX_512bit); 3586 } 3587 else { 3588 Assembler::vmovdqu(dst, src); 3589 } 3590 } 3591 3592 void MacroAssembler::vmovdqu(XMMRegister dst, AddressLiteral src) { 3593 if (reachable(src)) { 3594 vmovdqu(dst, as_Address(src)); 3595 } 3596 else { 3597 lea(rscratch1, src); 3598 vmovdqu(dst, Address(rscratch1, 0)); 3599 } 3600 } 3601 3602 void MacroAssembler::movdqa(XMMRegister dst, AddressLiteral src) { 3603 if (reachable(src)) { 3604 Assembler::movdqa(dst, as_Address(src)); 3605 } else { 3606 lea(rscratch1, src); 3607 Assembler::movdqa(dst, Address(rscratch1, 0)); 3608 } 3609 } 3610 3611 void MacroAssembler::movsd(XMMRegister dst, AddressLiteral src) { 3612 if (reachable(src)) { 3613 Assembler::movsd(dst, as_Address(src)); 3614 } else { 3615 lea(rscratch1, src); 3616 Assembler::movsd(dst, Address(rscratch1, 0)); 3617 } 3618 } 3619 3620 void MacroAssembler::movss(XMMRegister dst, AddressLiteral src) { 3621 if (reachable(src)) { 3622 Assembler::movss(dst, as_Address(src)); 3623 } else { 3624 lea(rscratch1, src); 3625 Assembler::movss(dst, Address(rscratch1, 0)); 3626 } 3627 } 3628 3629 void MacroAssembler::mulsd(XMMRegister dst, AddressLiteral src) { 3630 if (reachable(src)) { 3631 Assembler::mulsd(dst, as_Address(src)); 3632 } else { 3633 lea(rscratch1, src); 3634 Assembler::mulsd(dst, Address(rscratch1, 0)); 3635 } 3636 } 3637 3638 void MacroAssembler::mulss(XMMRegister dst, AddressLiteral src) { 3639 if (reachable(src)) { 3640 Assembler::mulss(dst, as_Address(src)); 3641 } else { 3642 lea(rscratch1, src); 3643 Assembler::mulss(dst, Address(rscratch1, 0)); 3644 } 3645 } 3646 3647 void MacroAssembler::null_check(Register reg, int offset) { 3648 if (needs_explicit_null_check(offset)) { 3649 // provoke OS NULL exception if reg = NULL by 3650 // accessing M[reg] w/o changing any (non-CC) registers 3651 // NOTE: cmpl is plenty here to provoke a segv 3652 cmpptr(rax, Address(reg, 0)); 3653 // Note: should probably use testl(rax, Address(reg, 0)); 3654 // may be shorter code (however, this version of 3655 // testl needs to be implemented first) 3656 } else { 3657 // nothing to do, (later) access of M[reg + offset] 3658 // will provoke OS NULL exception if reg = NULL 3659 } 3660 } 3661 3662 void MacroAssembler::test_klass_is_value(Register klass, Register temp_reg, Label& is_value) { 3663 movl(temp_reg, Address(klass, Klass::access_flags_offset())); 3664 testl(temp_reg, JVM_ACC_VALUE); 3665 jcc(Assembler::notZero, is_value); 3666 } 3667 3668 void MacroAssembler::test_field_is_flattenable(Register flags, Register temp_reg, Label& is_flattenable) { 3669 movl(temp_reg, flags); 3670 shrl(temp_reg, ConstantPoolCacheEntry::is_flattenable_field_shift); 3671 andl(temp_reg, 0x1); 3672 testl(temp_reg, temp_reg); 3673 jcc(Assembler::notZero, is_flattenable); 3674 } 3675 3676 void MacroAssembler::test_field_is_not_flattenable(Register flags, Register temp_reg, Label& notFlattenable) { 3677 movl(temp_reg, flags); 3678 shrl(temp_reg, ConstantPoolCacheEntry::is_flattenable_field_shift); 3679 andl(temp_reg, 0x1); 3680 testl(temp_reg, temp_reg); 3681 jcc(Assembler::zero, notFlattenable); 3682 } 3683 3684 void MacroAssembler::test_field_is_flattened(Register flags, Register temp_reg, Label& is_flattened) { 3685 movl(temp_reg, flags); 3686 shrl(temp_reg, ConstantPoolCacheEntry::is_flattened_field_shift); 3687 andl(temp_reg, 0x1); 3688 testl(temp_reg, temp_reg); 3689 jcc(Assembler::notZero, is_flattened); 3690 } 3691 3692 void MacroAssembler::test_flat_array_klass(Register klass, Register temp_reg, 3693 Label& is_flat_array) { 3694 movl(temp_reg, Address(klass, Klass::layout_helper_offset())); 3695 sarl(temp_reg, Klass::_lh_array_tag_shift); 3696 cmpl(temp_reg, Klass::_lh_array_tag_vt_value); 3697 jcc(Assembler::equal, is_flat_array); 3698 } 3699 3700 3701 void MacroAssembler::test_flat_array_oop(Register oop, Register temp_reg, 3702 Label& is_flat_array) { 3703 load_klass(temp_reg, oop); 3704 test_flat_array_klass(temp_reg, temp_reg, is_flat_array); 3705 } 3706 3707 void MacroAssembler::test_value_is_not_buffered(Register value, Register temp_reg, Label& not_buffered) { 3708 ExternalAddress VTBuffer_top(VTBuffer::top_addr()); 3709 ExternalAddress VTBuffer_end(VTBuffer::end_addr()); 3710 3711 // Test below is ordered based on the relative positions of 3712 // the Java heap and the VTBuffer to execute a single test for heap-allocated values 3713 3714 if (VTBuffer::base() < Universe::heap()->base()) { 3715 lea(temp_reg, VTBuffer_end); 3716 cmpptr(value, temp_reg); 3717 jcc(Assembler::greaterEqual, not_buffered); 3718 lea(temp_reg, VTBuffer_top); 3719 cmpptr(value, temp_reg); 3720 jcc(Assembler::less, not_buffered); 3721 } else { 3722 lea(temp_reg, VTBuffer_top); 3723 cmpptr(value, temp_reg); 3724 jcc(Assembler::less, not_buffered); 3725 lea(temp_reg, VTBuffer_end); 3726 cmpptr(value, temp_reg); 3727 jcc(Assembler::greaterEqual, not_buffered); 3728 } 3729 } 3730 3731 void MacroAssembler::test_oop_is_value(Register oop, Register temp, Label* is_value, Label* is_not_value) { 3732 const int mask = Universe::oop_metadata_valuetype_mask(); 3733 assert((is_value != NULL) || (is_not_value != NULL), "Need a label to jump to"); 3734 assert((is_value == NULL) ^ (is_not_value == NULL), "Need one label"); 3735 #ifdef _LP64 3736 if (UseCompressedClassPointers) { 3737 movl(temp, Address(oop, oopDesc::klass_offset_in_bytes())); 3738 } else 3739 #endif 3740 movptr(temp, Address(oop, oopDesc::klass_offset_in_bytes())); 3741 3742 andl(temp, mask); 3743 testl(temp, temp); 3744 if (is_not_value != NULL) { 3745 jcc(Assembler::zero, *is_not_value); 3746 } else { 3747 jcc(Assembler::notZero, *is_value); 3748 } 3749 } 3750 3751 void MacroAssembler::os_breakpoint() { 3752 // instead of directly emitting a breakpoint, call os:breakpoint for better debugability 3753 // (e.g., MSVC can't call ps() otherwise) 3754 call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint))); 3755 } 3756 3757 void MacroAssembler::unimplemented(const char* what) { 3758 const char* buf = NULL; 3759 { 3760 ResourceMark rm; 3761 stringStream ss; 3762 ss.print("unimplemented: %s", what); 3763 buf = code_string(ss.as_string()); 3764 } 3765 stop(buf); 3766 } 3767 3768 #ifdef _LP64 3769 #define XSTATE_BV 0x200 3770 #endif 3771 3772 void MacroAssembler::pop_CPU_state() { 3773 pop_FPU_state(); 3774 pop_IU_state(); 3775 } 3776 3777 void MacroAssembler::pop_FPU_state() { 3778 #ifndef _LP64 3779 frstor(Address(rsp, 0)); 3780 #else 3781 fxrstor(Address(rsp, 0)); 3782 #endif 3783 addptr(rsp, FPUStateSizeInWords * wordSize); 3784 } 3785 3786 void MacroAssembler::pop_IU_state() { 3787 popa(); 3788 LP64_ONLY(addq(rsp, 8)); 3789 popf(); 3790 } 3791 3792 // Save Integer and Float state 3793 // Warning: Stack must be 16 byte aligned (64bit) 3794 void MacroAssembler::push_CPU_state() { 3795 push_IU_state(); 3796 push_FPU_state(); 3797 } 3798 3799 void MacroAssembler::push_FPU_state() { 3800 subptr(rsp, FPUStateSizeInWords * wordSize); 3801 #ifndef _LP64 3802 fnsave(Address(rsp, 0)); 3803 fwait(); 3804 #else 3805 fxsave(Address(rsp, 0)); 3806 #endif // LP64 3807 } 3808 3809 void MacroAssembler::push_IU_state() { 3810 // Push flags first because pusha kills them 3811 pushf(); 3812 // Make sure rsp stays 16-byte aligned 3813 LP64_ONLY(subq(rsp, 8)); 3814 pusha(); 3815 } 3816 3817 void MacroAssembler::reset_last_Java_frame(Register java_thread, bool clear_fp) { // determine java_thread register 3818 if (!java_thread->is_valid()) { 3819 java_thread = rdi; 3820 get_thread(java_thread); 3821 } 3822 // we must set sp to zero to clear frame 3823 movptr(Address(java_thread, JavaThread::last_Java_sp_offset()), NULL_WORD); 3824 if (clear_fp) { 3825 movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), NULL_WORD); 3826 } 3827 3828 // Always clear the pc because it could have been set by make_walkable() 3829 movptr(Address(java_thread, JavaThread::last_Java_pc_offset()), NULL_WORD); 3830 3831 vzeroupper(); 3832 } 3833 3834 void MacroAssembler::restore_rax(Register tmp) { 3835 if (tmp == noreg) pop(rax); 3836 else if (tmp != rax) mov(rax, tmp); 3837 } 3838 3839 void MacroAssembler::round_to(Register reg, int modulus) { 3840 addptr(reg, modulus - 1); 3841 andptr(reg, -modulus); 3842 } 3843 3844 void MacroAssembler::save_rax(Register tmp) { 3845 if (tmp == noreg) push(rax); 3846 else if (tmp != rax) mov(tmp, rax); 3847 } 3848 3849 // Write serialization page so VM thread can do a pseudo remote membar. 3850 // We use the current thread pointer to calculate a thread specific 3851 // offset to write to within the page. This minimizes bus traffic 3852 // due to cache line collision. 3853 void MacroAssembler::serialize_memory(Register thread, Register tmp) { 3854 movl(tmp, thread); 3855 shrl(tmp, os::get_serialize_page_shift_count()); 3856 andl(tmp, (os::vm_page_size() - sizeof(int))); 3857 3858 Address index(noreg, tmp, Address::times_1); 3859 ExternalAddress page(os::get_memory_serialize_page()); 3860 3861 // Size of store must match masking code above 3862 movl(as_Address(ArrayAddress(page, index)), tmp); 3863 } 3864 3865 void MacroAssembler::safepoint_poll(Label& slow_path, Register thread_reg, Register temp_reg) { 3866 if (SafepointMechanism::uses_thread_local_poll()) { 3867 #ifdef _LP64 3868 assert(thread_reg == r15_thread, "should be"); 3869 #else 3870 if (thread_reg == noreg) { 3871 thread_reg = temp_reg; 3872 get_thread(thread_reg); 3873 } 3874 #endif 3875 testb(Address(thread_reg, Thread::polling_page_offset()), SafepointMechanism::poll_bit()); 3876 jcc(Assembler::notZero, slow_path); // handshake bit set implies poll 3877 } else { 3878 cmp32(ExternalAddress(SafepointSynchronize::address_of_state()), 3879 SafepointSynchronize::_not_synchronized); 3880 jcc(Assembler::notEqual, slow_path); 3881 } 3882 } 3883 3884 // Calls to C land 3885 // 3886 // When entering C land, the rbp, & rsp of the last Java frame have to be recorded 3887 // in the (thread-local) JavaThread object. When leaving C land, the last Java fp 3888 // has to be reset to 0. This is required to allow proper stack traversal. 3889 void MacroAssembler::set_last_Java_frame(Register java_thread, 3890 Register last_java_sp, 3891 Register last_java_fp, 3892 address last_java_pc) { 3893 vzeroupper(); 3894 // determine java_thread register 3895 if (!java_thread->is_valid()) { 3896 java_thread = rdi; 3897 get_thread(java_thread); 3898 } 3899 // determine last_java_sp register 3900 if (!last_java_sp->is_valid()) { 3901 last_java_sp = rsp; 3902 } 3903 3904 // last_java_fp is optional 3905 3906 if (last_java_fp->is_valid()) { 3907 movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), last_java_fp); 3908 } 3909 3910 // last_java_pc is optional 3911 3912 if (last_java_pc != NULL) { 3913 lea(Address(java_thread, 3914 JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()), 3915 InternalAddress(last_java_pc)); 3916 3917 } 3918 movptr(Address(java_thread, JavaThread::last_Java_sp_offset()), last_java_sp); 3919 } 3920 3921 void MacroAssembler::shlptr(Register dst, int imm8) { 3922 LP64_ONLY(shlq(dst, imm8)) NOT_LP64(shll(dst, imm8)); 3923 } 3924 3925 void MacroAssembler::shrptr(Register dst, int imm8) { 3926 LP64_ONLY(shrq(dst, imm8)) NOT_LP64(shrl(dst, imm8)); 3927 } 3928 3929 void MacroAssembler::sign_extend_byte(Register reg) { 3930 if (LP64_ONLY(true ||) (VM_Version::is_P6() && reg->has_byte_register())) { 3931 movsbl(reg, reg); // movsxb 3932 } else { 3933 shll(reg, 24); 3934 sarl(reg, 24); 3935 } 3936 } 3937 3938 void MacroAssembler::sign_extend_short(Register reg) { 3939 if (LP64_ONLY(true ||) VM_Version::is_P6()) { 3940 movswl(reg, reg); // movsxw 3941 } else { 3942 shll(reg, 16); 3943 sarl(reg, 16); 3944 } 3945 } 3946 3947 void MacroAssembler::testl(Register dst, AddressLiteral src) { 3948 assert(reachable(src), "Address should be reachable"); 3949 testl(dst, as_Address(src)); 3950 } 3951 3952 void MacroAssembler::pcmpeqb(XMMRegister dst, XMMRegister src) { 3953 int dst_enc = dst->encoding(); 3954 int src_enc = src->encoding(); 3955 if (VM_Version::supports_avxonly() || VM_Version::supports_avx512bw()) { 3956 Assembler::pcmpeqb(dst, src); 3957 } else if ((dst_enc < 16) && (src_enc < 16)) { 3958 Assembler::pcmpeqb(dst, src); 3959 } else if (src_enc < 16) { 3960 subptr(rsp, 64); 3961 evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); 3962 evmovdqul(xmm0, dst, Assembler::AVX_512bit); 3963 Assembler::pcmpeqb(xmm0, src); 3964 movdqu(dst, xmm0); 3965 evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); 3966 addptr(rsp, 64); 3967 } else if (dst_enc < 16) { 3968 subptr(rsp, 64); 3969 evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); 3970 evmovdqul(xmm0, src, Assembler::AVX_512bit); 3971 Assembler::pcmpeqb(dst, xmm0); 3972 evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); 3973 addptr(rsp, 64); 3974 } else { 3975 subptr(rsp, 64); 3976 evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); 3977 subptr(rsp, 64); 3978 evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit); 3979 movdqu(xmm0, src); 3980 movdqu(xmm1, dst); 3981 Assembler::pcmpeqb(xmm1, xmm0); 3982 movdqu(dst, xmm1); 3983 evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit); 3984 addptr(rsp, 64); 3985 evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); 3986 addptr(rsp, 64); 3987 } 3988 } 3989 3990 void MacroAssembler::pcmpeqw(XMMRegister dst, XMMRegister src) { 3991 int dst_enc = dst->encoding(); 3992 int src_enc = src->encoding(); 3993 if (VM_Version::supports_avxonly() || VM_Version::supports_avx512bw()) { 3994 Assembler::pcmpeqw(dst, src); 3995 } else if ((dst_enc < 16) && (src_enc < 16)) { 3996 Assembler::pcmpeqw(dst, src); 3997 } else if (src_enc < 16) { 3998 subptr(rsp, 64); 3999 evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); 4000 evmovdqul(xmm0, dst, Assembler::AVX_512bit); 4001 Assembler::pcmpeqw(xmm0, src); 4002 movdqu(dst, xmm0); 4003 evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); 4004 addptr(rsp, 64); 4005 } else if (dst_enc < 16) { 4006 subptr(rsp, 64); 4007 evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); 4008 evmovdqul(xmm0, src, Assembler::AVX_512bit); 4009 Assembler::pcmpeqw(dst, xmm0); 4010 evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); 4011 addptr(rsp, 64); 4012 } else { 4013 subptr(rsp, 64); 4014 evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); 4015 subptr(rsp, 64); 4016 evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit); 4017 movdqu(xmm0, src); 4018 movdqu(xmm1, dst); 4019 Assembler::pcmpeqw(xmm1, xmm0); 4020 movdqu(dst, xmm1); 4021 evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit); 4022 addptr(rsp, 64); 4023 evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); 4024 addptr(rsp, 64); 4025 } 4026 } 4027 4028 void MacroAssembler::pcmpestri(XMMRegister dst, Address src, int imm8) { 4029 int dst_enc = dst->encoding(); 4030 if (dst_enc < 16) { 4031 Assembler::pcmpestri(dst, src, imm8); 4032 } else { 4033 subptr(rsp, 64); 4034 evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); 4035 evmovdqul(xmm0, dst, Assembler::AVX_512bit); 4036 Assembler::pcmpestri(xmm0, src, imm8); 4037 movdqu(dst, xmm0); 4038 evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); 4039 addptr(rsp, 64); 4040 } 4041 } 4042 4043 void MacroAssembler::pcmpestri(XMMRegister dst, XMMRegister src, int imm8) { 4044 int dst_enc = dst->encoding(); 4045 int src_enc = src->encoding(); 4046 if ((dst_enc < 16) && (src_enc < 16)) { 4047 Assembler::pcmpestri(dst, src, imm8); 4048 } else if (src_enc < 16) { 4049 subptr(rsp, 64); 4050 evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); 4051 evmovdqul(xmm0, dst, Assembler::AVX_512bit); 4052 Assembler::pcmpestri(xmm0, src, imm8); 4053 movdqu(dst, xmm0); 4054 evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); 4055 addptr(rsp, 64); 4056 } else if (dst_enc < 16) { 4057 subptr(rsp, 64); 4058 evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); 4059 evmovdqul(xmm0, src, Assembler::AVX_512bit); 4060 Assembler::pcmpestri(dst, xmm0, imm8); 4061 evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); 4062 addptr(rsp, 64); 4063 } else { 4064 subptr(rsp, 64); 4065 evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); 4066 subptr(rsp, 64); 4067 evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit); 4068 movdqu(xmm0, src); 4069 movdqu(xmm1, dst); 4070 Assembler::pcmpestri(xmm1, xmm0, imm8); 4071 movdqu(dst, xmm1); 4072 evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit); 4073 addptr(rsp, 64); 4074 evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); 4075 addptr(rsp, 64); 4076 } 4077 } 4078 4079 void MacroAssembler::pmovzxbw(XMMRegister dst, XMMRegister src) { 4080 int dst_enc = dst->encoding(); 4081 int src_enc = src->encoding(); 4082 if (VM_Version::supports_avxonly() || VM_Version::supports_avx512bw()) { 4083 Assembler::pmovzxbw(dst, src); 4084 } else if ((dst_enc < 16) && (src_enc < 16)) { 4085 Assembler::pmovzxbw(dst, src); 4086 } else if (src_enc < 16) { 4087 subptr(rsp, 64); 4088 evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); 4089 evmovdqul(xmm0, dst, Assembler::AVX_512bit); 4090 Assembler::pmovzxbw(xmm0, src); 4091 movdqu(dst, xmm0); 4092 evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); 4093 addptr(rsp, 64); 4094 } else if (dst_enc < 16) { 4095 subptr(rsp, 64); 4096 evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); 4097 evmovdqul(xmm0, src, Assembler::AVX_512bit); 4098 Assembler::pmovzxbw(dst, xmm0); 4099 evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); 4100 addptr(rsp, 64); 4101 } else { 4102 subptr(rsp, 64); 4103 evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); 4104 subptr(rsp, 64); 4105 evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit); 4106 movdqu(xmm0, src); 4107 movdqu(xmm1, dst); 4108 Assembler::pmovzxbw(xmm1, xmm0); 4109 movdqu(dst, xmm1); 4110 evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit); 4111 addptr(rsp, 64); 4112 evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); 4113 addptr(rsp, 64); 4114 } 4115 } 4116 4117 void MacroAssembler::pmovzxbw(XMMRegister dst, Address src) { 4118 int dst_enc = dst->encoding(); 4119 if (VM_Version::supports_avxonly() || VM_Version::supports_avx512bw()) { 4120 Assembler::pmovzxbw(dst, src); 4121 } else if (dst_enc < 16) { 4122 Assembler::pmovzxbw(dst, src); 4123 } else { 4124 subptr(rsp, 64); 4125 evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); 4126 evmovdqul(xmm0, dst, Assembler::AVX_512bit); 4127 Assembler::pmovzxbw(xmm0, src); 4128 movdqu(dst, xmm0); 4129 evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); 4130 addptr(rsp, 64); 4131 } 4132 } 4133 4134 void MacroAssembler::pmovmskb(Register dst, XMMRegister src) { 4135 int src_enc = src->encoding(); 4136 if (src_enc < 16) { 4137 Assembler::pmovmskb(dst, src); 4138 } else { 4139 subptr(rsp, 64); 4140 evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); 4141 evmovdqul(xmm0, src, Assembler::AVX_512bit); 4142 Assembler::pmovmskb(dst, xmm0); 4143 evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); 4144 addptr(rsp, 64); 4145 } 4146 } 4147 4148 void MacroAssembler::ptest(XMMRegister dst, XMMRegister src) { 4149 int dst_enc = dst->encoding(); 4150 int src_enc = src->encoding(); 4151 if ((dst_enc < 16) && (src_enc < 16)) { 4152 Assembler::ptest(dst, src); 4153 } else if (src_enc < 16) { 4154 subptr(rsp, 64); 4155 evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); 4156 evmovdqul(xmm0, dst, Assembler::AVX_512bit); 4157 Assembler::ptest(xmm0, src); 4158 evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); 4159 addptr(rsp, 64); 4160 } else if (dst_enc < 16) { 4161 subptr(rsp, 64); 4162 evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); 4163 evmovdqul(xmm0, src, Assembler::AVX_512bit); 4164 Assembler::ptest(dst, xmm0); 4165 evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); 4166 addptr(rsp, 64); 4167 } else { 4168 subptr(rsp, 64); 4169 evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); 4170 subptr(rsp, 64); 4171 evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit); 4172 movdqu(xmm0, src); 4173 movdqu(xmm1, dst); 4174 Assembler::ptest(xmm1, xmm0); 4175 evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit); 4176 addptr(rsp, 64); 4177 evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); 4178 addptr(rsp, 64); 4179 } 4180 } 4181 4182 void MacroAssembler::sqrtsd(XMMRegister dst, AddressLiteral src) { 4183 if (reachable(src)) { 4184 Assembler::sqrtsd(dst, as_Address(src)); 4185 } else { 4186 lea(rscratch1, src); 4187 Assembler::sqrtsd(dst, Address(rscratch1, 0)); 4188 } 4189 } 4190 4191 void MacroAssembler::sqrtss(XMMRegister dst, AddressLiteral src) { 4192 if (reachable(src)) { 4193 Assembler::sqrtss(dst, as_Address(src)); 4194 } else { 4195 lea(rscratch1, src); 4196 Assembler::sqrtss(dst, Address(rscratch1, 0)); 4197 } 4198 } 4199 4200 void MacroAssembler::subsd(XMMRegister dst, AddressLiteral src) { 4201 if (reachable(src)) { 4202 Assembler::subsd(dst, as_Address(src)); 4203 } else { 4204 lea(rscratch1, src); 4205 Assembler::subsd(dst, Address(rscratch1, 0)); 4206 } 4207 } 4208 4209 void MacroAssembler::subss(XMMRegister dst, AddressLiteral src) { 4210 if (reachable(src)) { 4211 Assembler::subss(dst, as_Address(src)); 4212 } else { 4213 lea(rscratch1, src); 4214 Assembler::subss(dst, Address(rscratch1, 0)); 4215 } 4216 } 4217 4218 void MacroAssembler::ucomisd(XMMRegister dst, AddressLiteral src) { 4219 if (reachable(src)) { 4220 Assembler::ucomisd(dst, as_Address(src)); 4221 } else { 4222 lea(rscratch1, src); 4223 Assembler::ucomisd(dst, Address(rscratch1, 0)); 4224 } 4225 } 4226 4227 void MacroAssembler::ucomiss(XMMRegister dst, AddressLiteral src) { 4228 if (reachable(src)) { 4229 Assembler::ucomiss(dst, as_Address(src)); 4230 } else { 4231 lea(rscratch1, src); 4232 Assembler::ucomiss(dst, Address(rscratch1, 0)); 4233 } 4234 } 4235 4236 void MacroAssembler::xorpd(XMMRegister dst, AddressLiteral src) { 4237 // Used in sign-bit flipping with aligned address. 4238 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes"); 4239 if (reachable(src)) { 4240 Assembler::xorpd(dst, as_Address(src)); 4241 } else { 4242 lea(rscratch1, src); 4243 Assembler::xorpd(dst, Address(rscratch1, 0)); 4244 } 4245 } 4246 4247 void MacroAssembler::xorpd(XMMRegister dst, XMMRegister src) { 4248 if (UseAVX > 2 && !VM_Version::supports_avx512dq() && (dst->encoding() == src->encoding())) { 4249 Assembler::vpxor(dst, dst, src, Assembler::AVX_512bit); 4250 } 4251 else { 4252 Assembler::xorpd(dst, src); 4253 } 4254 } 4255 4256 void MacroAssembler::xorps(XMMRegister dst, XMMRegister src) { 4257 if (UseAVX > 2 && !VM_Version::supports_avx512dq() && (dst->encoding() == src->encoding())) { 4258 Assembler::vpxor(dst, dst, src, Assembler::AVX_512bit); 4259 } else { 4260 Assembler::xorps(dst, src); 4261 } 4262 } 4263 4264 void MacroAssembler::xorps(XMMRegister dst, AddressLiteral src) { 4265 // Used in sign-bit flipping with aligned address. 4266 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes"); 4267 if (reachable(src)) { 4268 Assembler::xorps(dst, as_Address(src)); 4269 } else { 4270 lea(rscratch1, src); 4271 Assembler::xorps(dst, Address(rscratch1, 0)); 4272 } 4273 } 4274 4275 void MacroAssembler::pshufb(XMMRegister dst, AddressLiteral src) { 4276 // Used in sign-bit flipping with aligned address. 4277 bool aligned_adr = (((intptr_t)src.target() & 15) == 0); 4278 assert((UseAVX > 0) || aligned_adr, "SSE mode requires address alignment 16 bytes"); 4279 if (reachable(src)) { 4280 Assembler::pshufb(dst, as_Address(src)); 4281 } else { 4282 lea(rscratch1, src); 4283 Assembler::pshufb(dst, Address(rscratch1, 0)); 4284 } 4285 } 4286 4287 // AVX 3-operands instructions 4288 4289 void MacroAssembler::vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) { 4290 if (reachable(src)) { 4291 vaddsd(dst, nds, as_Address(src)); 4292 } else { 4293 lea(rscratch1, src); 4294 vaddsd(dst, nds, Address(rscratch1, 0)); 4295 } 4296 } 4297 4298 void MacroAssembler::vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src) { 4299 if (reachable(src)) { 4300 vaddss(dst, nds, as_Address(src)); 4301 } else { 4302 lea(rscratch1, src); 4303 vaddss(dst, nds, Address(rscratch1, 0)); 4304 } 4305 } 4306 4307 void MacroAssembler::vabsss(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len) { 4308 int dst_enc = dst->encoding(); 4309 int nds_enc = nds->encoding(); 4310 int src_enc = src->encoding(); 4311 if ((dst_enc < 16) && (nds_enc < 16)) { 4312 vandps(dst, nds, negate_field, vector_len); 4313 } else if ((src_enc < 16) && (dst_enc < 16)) { 4314 evmovdqul(src, nds, Assembler::AVX_512bit); 4315 vandps(dst, src, negate_field, vector_len); 4316 } else if (src_enc < 16) { 4317 evmovdqul(src, nds, Assembler::AVX_512bit); 4318 vandps(src, src, negate_field, vector_len); 4319 evmovdqul(dst, src, Assembler::AVX_512bit); 4320 } else if (dst_enc < 16) { 4321 evmovdqul(src, xmm0, Assembler::AVX_512bit); 4322 evmovdqul(xmm0, nds, Assembler::AVX_512bit); 4323 vandps(dst, xmm0, negate_field, vector_len); 4324 evmovdqul(xmm0, src, Assembler::AVX_512bit); 4325 } else { 4326 if (src_enc != dst_enc) { 4327 evmovdqul(src, xmm0, Assembler::AVX_512bit); 4328 evmovdqul(xmm0, nds, Assembler::AVX_512bit); 4329 vandps(xmm0, xmm0, negate_field, vector_len); 4330 evmovdqul(dst, xmm0, Assembler::AVX_512bit); 4331 evmovdqul(xmm0, src, Assembler::AVX_512bit); 4332 } else { 4333 subptr(rsp, 64); 4334 evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); 4335 evmovdqul(xmm0, nds, Assembler::AVX_512bit); 4336 vandps(xmm0, xmm0, negate_field, vector_len); 4337 evmovdqul(dst, xmm0, Assembler::AVX_512bit); 4338 evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); 4339 addptr(rsp, 64); 4340 } 4341 } 4342 } 4343 4344 void MacroAssembler::vabssd(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len) { 4345 int dst_enc = dst->encoding(); 4346 int nds_enc = nds->encoding(); 4347 int src_enc = src->encoding(); 4348 if ((dst_enc < 16) && (nds_enc < 16)) { 4349 vandpd(dst, nds, negate_field, vector_len); 4350 } else if ((src_enc < 16) && (dst_enc < 16)) { 4351 evmovdqul(src, nds, Assembler::AVX_512bit); 4352 vandpd(dst, src, negate_field, vector_len); 4353 } else if (src_enc < 16) { 4354 evmovdqul(src, nds, Assembler::AVX_512bit); 4355 vandpd(src, src, negate_field, vector_len); 4356 evmovdqul(dst, src, Assembler::AVX_512bit); 4357 } else if (dst_enc < 16) { 4358 evmovdqul(src, xmm0, Assembler::AVX_512bit); 4359 evmovdqul(xmm0, nds, Assembler::AVX_512bit); 4360 vandpd(dst, xmm0, negate_field, vector_len); 4361 evmovdqul(xmm0, src, Assembler::AVX_512bit); 4362 } else { 4363 if (src_enc != dst_enc) { 4364 evmovdqul(src, xmm0, Assembler::AVX_512bit); 4365 evmovdqul(xmm0, nds, Assembler::AVX_512bit); 4366 vandpd(xmm0, xmm0, negate_field, vector_len); 4367 evmovdqul(dst, xmm0, Assembler::AVX_512bit); 4368 evmovdqul(xmm0, src, Assembler::AVX_512bit); 4369 } else { 4370 subptr(rsp, 64); 4371 evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); 4372 evmovdqul(xmm0, nds, Assembler::AVX_512bit); 4373 vandpd(xmm0, xmm0, negate_field, vector_len); 4374 evmovdqul(dst, xmm0, Assembler::AVX_512bit); 4375 evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); 4376 addptr(rsp, 64); 4377 } 4378 } 4379 } 4380 4381 void MacroAssembler::vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4382 int dst_enc = dst->encoding(); 4383 int nds_enc = nds->encoding(); 4384 int src_enc = src->encoding(); 4385 if (VM_Version::supports_avxonly() || VM_Version::supports_avx512bw()) { 4386 Assembler::vpaddb(dst, nds, src, vector_len); 4387 } else if ((dst_enc < 16) && (src_enc < 16)) { 4388 Assembler::vpaddb(dst, dst, src, vector_len); 4389 } else if ((dst_enc < 16) && (nds_enc < 16)) { 4390 // use nds as scratch for src 4391 evmovdqul(nds, src, Assembler::AVX_512bit); 4392 Assembler::vpaddb(dst, dst, nds, vector_len); 4393 } else if ((src_enc < 16) && (nds_enc < 16)) { 4394 // use nds as scratch for dst 4395 evmovdqul(nds, dst, Assembler::AVX_512bit); 4396 Assembler::vpaddb(nds, nds, src, vector_len); 4397 evmovdqul(dst, nds, Assembler::AVX_512bit); 4398 } else if (dst_enc < 16) { 4399 // use nds as scatch for xmm0 to hold src 4400 evmovdqul(nds, xmm0, Assembler::AVX_512bit); 4401 evmovdqul(xmm0, src, Assembler::AVX_512bit); 4402 Assembler::vpaddb(dst, dst, xmm0, vector_len); 4403 evmovdqul(xmm0, nds, Assembler::AVX_512bit); 4404 } else { 4405 // worse case scenario, all regs are in the upper bank 4406 subptr(rsp, 64); 4407 evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit); 4408 evmovdqul(nds, xmm0, Assembler::AVX_512bit); 4409 evmovdqul(xmm1, src, Assembler::AVX_512bit); 4410 evmovdqul(xmm0, dst, Assembler::AVX_512bit); 4411 Assembler::vpaddb(xmm0, xmm0, xmm1, vector_len); 4412 evmovdqul(dst, xmm0, Assembler::AVX_512bit); 4413 evmovdqul(xmm0, nds, Assembler::AVX_512bit); 4414 evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit); 4415 addptr(rsp, 64); 4416 } 4417 } 4418 4419 void MacroAssembler::vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4420 int dst_enc = dst->encoding(); 4421 int nds_enc = nds->encoding(); 4422 if (VM_Version::supports_avxonly() || VM_Version::supports_avx512bw()) { 4423 Assembler::vpaddb(dst, nds, src, vector_len); 4424 } else if (dst_enc < 16) { 4425 Assembler::vpaddb(dst, dst, src, vector_len); 4426 } else if (nds_enc < 16) { 4427 // implies dst_enc in upper bank with src as scratch 4428 evmovdqul(nds, dst, Assembler::AVX_512bit); 4429 Assembler::vpaddb(nds, nds, src, vector_len); 4430 evmovdqul(dst, nds, Assembler::AVX_512bit); 4431 } else { 4432 // worse case scenario, all regs in upper bank 4433 evmovdqul(nds, xmm0, Assembler::AVX_512bit); 4434 evmovdqul(xmm0, dst, Assembler::AVX_512bit); 4435 Assembler::vpaddb(xmm0, xmm0, src, vector_len); 4436 evmovdqul(xmm0, nds, Assembler::AVX_512bit); 4437 } 4438 } 4439 4440 void MacroAssembler::vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4441 int dst_enc = dst->encoding(); 4442 int nds_enc = nds->encoding(); 4443 int src_enc = src->encoding(); 4444 if (VM_Version::supports_avxonly() || VM_Version::supports_avx512bw()) { 4445 Assembler::vpaddw(dst, nds, src, vector_len); 4446 } else if ((dst_enc < 16) && (src_enc < 16)) { 4447 Assembler::vpaddw(dst, dst, src, vector_len); 4448 } else if ((dst_enc < 16) && (nds_enc < 16)) { 4449 // use nds as scratch for src 4450 evmovdqul(nds, src, Assembler::AVX_512bit); 4451 Assembler::vpaddw(dst, dst, nds, vector_len); 4452 } else if ((src_enc < 16) && (nds_enc < 16)) { 4453 // use nds as scratch for dst 4454 evmovdqul(nds, dst, Assembler::AVX_512bit); 4455 Assembler::vpaddw(nds, nds, src, vector_len); 4456 evmovdqul(dst, nds, Assembler::AVX_512bit); 4457 } else if (dst_enc < 16) { 4458 // use nds as scatch for xmm0 to hold src 4459 evmovdqul(nds, xmm0, Assembler::AVX_512bit); 4460 evmovdqul(xmm0, src, Assembler::AVX_512bit); 4461 Assembler::vpaddw(dst, dst, xmm0, vector_len); 4462 evmovdqul(xmm0, nds, Assembler::AVX_512bit); 4463 } else { 4464 // worse case scenario, all regs are in the upper bank 4465 subptr(rsp, 64); 4466 evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit); 4467 evmovdqul(nds, xmm0, Assembler::AVX_512bit); 4468 evmovdqul(xmm1, src, Assembler::AVX_512bit); 4469 evmovdqul(xmm0, dst, Assembler::AVX_512bit); 4470 Assembler::vpaddw(xmm0, xmm0, xmm1, vector_len); 4471 evmovdqul(dst, xmm0, Assembler::AVX_512bit); 4472 evmovdqul(xmm0, nds, Assembler::AVX_512bit); 4473 evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit); 4474 addptr(rsp, 64); 4475 } 4476 } 4477 4478 void MacroAssembler::vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4479 int dst_enc = dst->encoding(); 4480 int nds_enc = nds->encoding(); 4481 if (VM_Version::supports_avxonly() || VM_Version::supports_avx512bw()) { 4482 Assembler::vpaddw(dst, nds, src, vector_len); 4483 } else if (dst_enc < 16) { 4484 Assembler::vpaddw(dst, dst, src, vector_len); 4485 } else if (nds_enc < 16) { 4486 // implies dst_enc in upper bank with src as scratch 4487 evmovdqul(nds, dst, Assembler::AVX_512bit); 4488 Assembler::vpaddw(nds, nds, src, vector_len); 4489 evmovdqul(dst, nds, Assembler::AVX_512bit); 4490 } else { 4491 // worse case scenario, all regs in upper bank 4492 evmovdqul(nds, xmm0, Assembler::AVX_512bit); 4493 evmovdqul(xmm0, dst, Assembler::AVX_512bit); 4494 Assembler::vpaddw(xmm0, xmm0, src, vector_len); 4495 evmovdqul(xmm0, nds, Assembler::AVX_512bit); 4496 } 4497 } 4498 4499 void MacroAssembler::vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len) { 4500 if (reachable(src)) { 4501 Assembler::vpand(dst, nds, as_Address(src), vector_len); 4502 } else { 4503 lea(rscratch1, src); 4504 Assembler::vpand(dst, nds, Address(rscratch1, 0), vector_len); 4505 } 4506 } 4507 4508 void MacroAssembler::vpbroadcastw(XMMRegister dst, XMMRegister src) { 4509 int dst_enc = dst->encoding(); 4510 int src_enc = src->encoding(); 4511 if (VM_Version::supports_avxonly() || VM_Version::supports_avx512bw()) { 4512 Assembler::vpbroadcastw(dst, src); 4513 } else if ((dst_enc < 16) && (src_enc < 16)) { 4514 Assembler::vpbroadcastw(dst, src); 4515 } else if (src_enc < 16) { 4516 subptr(rsp, 64); 4517 evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); 4518 evmovdqul(xmm0, dst, Assembler::AVX_512bit); 4519 Assembler::vpbroadcastw(xmm0, src); 4520 movdqu(dst, xmm0); 4521 evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); 4522 addptr(rsp, 64); 4523 } else if (dst_enc < 16) { 4524 subptr(rsp, 64); 4525 evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); 4526 evmovdqul(xmm0, src, Assembler::AVX_512bit); 4527 Assembler::vpbroadcastw(dst, xmm0); 4528 evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); 4529 addptr(rsp, 64); 4530 } else { 4531 subptr(rsp, 64); 4532 evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); 4533 subptr(rsp, 64); 4534 evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit); 4535 movdqu(xmm0, src); 4536 movdqu(xmm1, dst); 4537 Assembler::vpbroadcastw(xmm1, xmm0); 4538 movdqu(dst, xmm1); 4539 evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit); 4540 addptr(rsp, 64); 4541 evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); 4542 addptr(rsp, 64); 4543 } 4544 } 4545 4546 void MacroAssembler::vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4547 int dst_enc = dst->encoding(); 4548 int nds_enc = nds->encoding(); 4549 int src_enc = src->encoding(); 4550 assert(dst_enc == nds_enc, ""); 4551 if ((dst_enc < 16) && (src_enc < 16)) { 4552 Assembler::vpcmpeqb(dst, nds, src, vector_len); 4553 } else if (src_enc < 16) { 4554 subptr(rsp, 64); 4555 evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); 4556 evmovdqul(xmm0, dst, Assembler::AVX_512bit); 4557 Assembler::vpcmpeqb(xmm0, xmm0, src, vector_len); 4558 movdqu(dst, xmm0); 4559 evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); 4560 addptr(rsp, 64); 4561 } else if (dst_enc < 16) { 4562 subptr(rsp, 64); 4563 evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); 4564 evmovdqul(xmm0, src, Assembler::AVX_512bit); 4565 Assembler::vpcmpeqb(dst, dst, xmm0, vector_len); 4566 evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); 4567 addptr(rsp, 64); 4568 } else { 4569 subptr(rsp, 64); 4570 evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); 4571 subptr(rsp, 64); 4572 evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit); 4573 movdqu(xmm0, src); 4574 movdqu(xmm1, dst); 4575 Assembler::vpcmpeqb(xmm1, xmm1, xmm0, vector_len); 4576 movdqu(dst, xmm1); 4577 evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit); 4578 addptr(rsp, 64); 4579 evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); 4580 addptr(rsp, 64); 4581 } 4582 } 4583 4584 void MacroAssembler::vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4585 int dst_enc = dst->encoding(); 4586 int nds_enc = nds->encoding(); 4587 int src_enc = src->encoding(); 4588 assert(dst_enc == nds_enc, ""); 4589 if ((dst_enc < 16) && (src_enc < 16)) { 4590 Assembler::vpcmpeqw(dst, nds, src, vector_len); 4591 } else if (src_enc < 16) { 4592 subptr(rsp, 64); 4593 evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); 4594 evmovdqul(xmm0, dst, Assembler::AVX_512bit); 4595 Assembler::vpcmpeqw(xmm0, xmm0, src, vector_len); 4596 movdqu(dst, xmm0); 4597 evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); 4598 addptr(rsp, 64); 4599 } else if (dst_enc < 16) { 4600 subptr(rsp, 64); 4601 evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); 4602 evmovdqul(xmm0, src, Assembler::AVX_512bit); 4603 Assembler::vpcmpeqw(dst, dst, xmm0, vector_len); 4604 evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); 4605 addptr(rsp, 64); 4606 } else { 4607 subptr(rsp, 64); 4608 evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); 4609 subptr(rsp, 64); 4610 evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit); 4611 movdqu(xmm0, src); 4612 movdqu(xmm1, dst); 4613 Assembler::vpcmpeqw(xmm1, xmm1, xmm0, vector_len); 4614 movdqu(dst, xmm1); 4615 evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit); 4616 addptr(rsp, 64); 4617 evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); 4618 addptr(rsp, 64); 4619 } 4620 } 4621 4622 void MacroAssembler::vpmovzxbw(XMMRegister dst, Address src, int vector_len) { 4623 int dst_enc = dst->encoding(); 4624 if (VM_Version::supports_avxonly() || VM_Version::supports_avx512bw()) { 4625 Assembler::vpmovzxbw(dst, src, vector_len); 4626 } else if (dst_enc < 16) { 4627 Assembler::vpmovzxbw(dst, src, vector_len); 4628 } else { 4629 subptr(rsp, 64); 4630 evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); 4631 evmovdqul(xmm0, dst, Assembler::AVX_512bit); 4632 Assembler::vpmovzxbw(xmm0, src, vector_len); 4633 movdqu(dst, xmm0); 4634 evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); 4635 addptr(rsp, 64); 4636 } 4637 } 4638 4639 void MacroAssembler::vpmovmskb(Register dst, XMMRegister src) { 4640 int src_enc = src->encoding(); 4641 if (src_enc < 16) { 4642 Assembler::vpmovmskb(dst, src); 4643 } else { 4644 subptr(rsp, 64); 4645 evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); 4646 evmovdqul(xmm0, src, Assembler::AVX_512bit); 4647 Assembler::vpmovmskb(dst, xmm0); 4648 evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); 4649 addptr(rsp, 64); 4650 } 4651 } 4652 4653 void MacroAssembler::vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4654 int dst_enc = dst->encoding(); 4655 int nds_enc = nds->encoding(); 4656 int src_enc = src->encoding(); 4657 if (VM_Version::supports_avxonly() || VM_Version::supports_avx512bw()) { 4658 Assembler::vpmullw(dst, nds, src, vector_len); 4659 } else if ((dst_enc < 16) && (src_enc < 16)) { 4660 Assembler::vpmullw(dst, dst, src, vector_len); 4661 } else if ((dst_enc < 16) && (nds_enc < 16)) { 4662 // use nds as scratch for src 4663 evmovdqul(nds, src, Assembler::AVX_512bit); 4664 Assembler::vpmullw(dst, dst, nds, vector_len); 4665 } else if ((src_enc < 16) && (nds_enc < 16)) { 4666 // use nds as scratch for dst 4667 evmovdqul(nds, dst, Assembler::AVX_512bit); 4668 Assembler::vpmullw(nds, nds, src, vector_len); 4669 evmovdqul(dst, nds, Assembler::AVX_512bit); 4670 } else if (dst_enc < 16) { 4671 // use nds as scatch for xmm0 to hold src 4672 evmovdqul(nds, xmm0, Assembler::AVX_512bit); 4673 evmovdqul(xmm0, src, Assembler::AVX_512bit); 4674 Assembler::vpmullw(dst, dst, xmm0, vector_len); 4675 evmovdqul(xmm0, nds, Assembler::AVX_512bit); 4676 } else { 4677 // worse case scenario, all regs are in the upper bank 4678 subptr(rsp, 64); 4679 evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit); 4680 evmovdqul(nds, xmm0, Assembler::AVX_512bit); 4681 evmovdqul(xmm1, src, Assembler::AVX_512bit); 4682 evmovdqul(xmm0, dst, Assembler::AVX_512bit); 4683 Assembler::vpmullw(xmm0, xmm0, xmm1, vector_len); 4684 evmovdqul(dst, xmm0, Assembler::AVX_512bit); 4685 evmovdqul(xmm0, nds, Assembler::AVX_512bit); 4686 evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit); 4687 addptr(rsp, 64); 4688 } 4689 } 4690 4691 void MacroAssembler::vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4692 int dst_enc = dst->encoding(); 4693 int nds_enc = nds->encoding(); 4694 if (VM_Version::supports_avxonly() || VM_Version::supports_avx512bw()) { 4695 Assembler::vpmullw(dst, nds, src, vector_len); 4696 } else if (dst_enc < 16) { 4697 Assembler::vpmullw(dst, dst, src, vector_len); 4698 } else if (nds_enc < 16) { 4699 // implies dst_enc in upper bank with src as scratch 4700 evmovdqul(nds, dst, Assembler::AVX_512bit); 4701 Assembler::vpmullw(nds, nds, src, vector_len); 4702 evmovdqul(dst, nds, Assembler::AVX_512bit); 4703 } else { 4704 // worse case scenario, all regs in upper bank 4705 evmovdqul(nds, xmm0, Assembler::AVX_512bit); 4706 evmovdqul(xmm0, dst, Assembler::AVX_512bit); 4707 Assembler::vpmullw(xmm0, xmm0, src, vector_len); 4708 evmovdqul(xmm0, nds, Assembler::AVX_512bit); 4709 } 4710 } 4711 4712 void MacroAssembler::vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4713 int dst_enc = dst->encoding(); 4714 int nds_enc = nds->encoding(); 4715 int src_enc = src->encoding(); 4716 if (VM_Version::supports_avxonly() || VM_Version::supports_avx512bw()) { 4717 Assembler::vpsubb(dst, nds, src, vector_len); 4718 } else if ((dst_enc < 16) && (src_enc < 16)) { 4719 Assembler::vpsubb(dst, dst, src, vector_len); 4720 } else if ((dst_enc < 16) && (nds_enc < 16)) { 4721 // use nds as scratch for src 4722 evmovdqul(nds, src, Assembler::AVX_512bit); 4723 Assembler::vpsubb(dst, dst, nds, vector_len); 4724 } else if ((src_enc < 16) && (nds_enc < 16)) { 4725 // use nds as scratch for dst 4726 evmovdqul(nds, dst, Assembler::AVX_512bit); 4727 Assembler::vpsubb(nds, nds, src, vector_len); 4728 evmovdqul(dst, nds, Assembler::AVX_512bit); 4729 } else if (dst_enc < 16) { 4730 // use nds as scatch for xmm0 to hold src 4731 evmovdqul(nds, xmm0, Assembler::AVX_512bit); 4732 evmovdqul(xmm0, src, Assembler::AVX_512bit); 4733 Assembler::vpsubb(dst, dst, xmm0, vector_len); 4734 evmovdqul(xmm0, nds, Assembler::AVX_512bit); 4735 } else { 4736 // worse case scenario, all regs are in the upper bank 4737 subptr(rsp, 64); 4738 evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit); 4739 evmovdqul(nds, xmm0, Assembler::AVX_512bit); 4740 evmovdqul(xmm1, src, Assembler::AVX_512bit); 4741 evmovdqul(xmm0, dst, Assembler::AVX_512bit); 4742 Assembler::vpsubb(xmm0, xmm0, xmm1, vector_len); 4743 evmovdqul(dst, xmm0, Assembler::AVX_512bit); 4744 evmovdqul(xmm0, nds, Assembler::AVX_512bit); 4745 evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit); 4746 addptr(rsp, 64); 4747 } 4748 } 4749 4750 void MacroAssembler::vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4751 int dst_enc = dst->encoding(); 4752 int nds_enc = nds->encoding(); 4753 if (VM_Version::supports_avxonly() || VM_Version::supports_avx512bw()) { 4754 Assembler::vpsubb(dst, nds, src, vector_len); 4755 } else if (dst_enc < 16) { 4756 Assembler::vpsubb(dst, dst, src, vector_len); 4757 } else if (nds_enc < 16) { 4758 // implies dst_enc in upper bank with src as scratch 4759 evmovdqul(nds, dst, Assembler::AVX_512bit); 4760 Assembler::vpsubb(nds, nds, src, vector_len); 4761 evmovdqul(dst, nds, Assembler::AVX_512bit); 4762 } else { 4763 // worse case scenario, all regs in upper bank 4764 evmovdqul(nds, xmm0, Assembler::AVX_512bit); 4765 evmovdqul(xmm0, dst, Assembler::AVX_512bit); 4766 Assembler::vpsubw(xmm0, xmm0, src, vector_len); 4767 evmovdqul(xmm0, nds, Assembler::AVX_512bit); 4768 } 4769 } 4770 4771 void MacroAssembler::vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4772 int dst_enc = dst->encoding(); 4773 int nds_enc = nds->encoding(); 4774 int src_enc = src->encoding(); 4775 if (VM_Version::supports_avxonly() || VM_Version::supports_avx512bw()) { 4776 Assembler::vpsubw(dst, nds, src, vector_len); 4777 } else if ((dst_enc < 16) && (src_enc < 16)) { 4778 Assembler::vpsubw(dst, dst, src, vector_len); 4779 } else if ((dst_enc < 16) && (nds_enc < 16)) { 4780 // use nds as scratch for src 4781 evmovdqul(nds, src, Assembler::AVX_512bit); 4782 Assembler::vpsubw(dst, dst, nds, vector_len); 4783 } else if ((src_enc < 16) && (nds_enc < 16)) { 4784 // use nds as scratch for dst 4785 evmovdqul(nds, dst, Assembler::AVX_512bit); 4786 Assembler::vpsubw(nds, nds, src, vector_len); 4787 evmovdqul(dst, nds, Assembler::AVX_512bit); 4788 } else if (dst_enc < 16) { 4789 // use nds as scatch for xmm0 to hold src 4790 evmovdqul(nds, xmm0, Assembler::AVX_512bit); 4791 evmovdqul(xmm0, src, Assembler::AVX_512bit); 4792 Assembler::vpsubw(dst, dst, xmm0, vector_len); 4793 evmovdqul(xmm0, nds, Assembler::AVX_512bit); 4794 } else { 4795 // worse case scenario, all regs are in the upper bank 4796 subptr(rsp, 64); 4797 evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit); 4798 evmovdqul(nds, xmm0, Assembler::AVX_512bit); 4799 evmovdqul(xmm1, src, Assembler::AVX_512bit); 4800 evmovdqul(xmm0, dst, Assembler::AVX_512bit); 4801 Assembler::vpsubw(xmm0, xmm0, xmm1, vector_len); 4802 evmovdqul(dst, xmm0, Assembler::AVX_512bit); 4803 evmovdqul(xmm0, nds, Assembler::AVX_512bit); 4804 evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit); 4805 addptr(rsp, 64); 4806 } 4807 } 4808 4809 void MacroAssembler::vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 4810 int dst_enc = dst->encoding(); 4811 int nds_enc = nds->encoding(); 4812 if (VM_Version::supports_avxonly() || VM_Version::supports_avx512bw()) { 4813 Assembler::vpsubw(dst, nds, src, vector_len); 4814 } else if (dst_enc < 16) { 4815 Assembler::vpsubw(dst, dst, src, vector_len); 4816 } else if (nds_enc < 16) { 4817 // implies dst_enc in upper bank with src as scratch 4818 evmovdqul(nds, dst, Assembler::AVX_512bit); 4819 Assembler::vpsubw(nds, nds, src, vector_len); 4820 evmovdqul(dst, nds, Assembler::AVX_512bit); 4821 } else { 4822 // worse case scenario, all regs in upper bank 4823 evmovdqul(nds, xmm0, Assembler::AVX_512bit); 4824 evmovdqul(xmm0, dst, Assembler::AVX_512bit); 4825 Assembler::vpsubw(xmm0, xmm0, src, vector_len); 4826 evmovdqul(xmm0, nds, Assembler::AVX_512bit); 4827 } 4828 } 4829 4830 void MacroAssembler::vpsraw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) { 4831 int dst_enc = dst->encoding(); 4832 int nds_enc = nds->encoding(); 4833 int shift_enc = shift->encoding(); 4834 if (VM_Version::supports_avxonly() || VM_Version::supports_avx512bw()) { 4835 Assembler::vpsraw(dst, nds, shift, vector_len); 4836 } else if ((dst_enc < 16) && (shift_enc < 16)) { 4837 Assembler::vpsraw(dst, dst, shift, vector_len); 4838 } else if ((dst_enc < 16) && (nds_enc < 16)) { 4839 // use nds_enc as scratch with shift 4840 evmovdqul(nds, shift, Assembler::AVX_512bit); 4841 Assembler::vpsraw(dst, dst, nds, vector_len); 4842 } else if ((shift_enc < 16) && (nds_enc < 16)) { 4843 // use nds as scratch with dst 4844 evmovdqul(nds, dst, Assembler::AVX_512bit); 4845 Assembler::vpsraw(nds, nds, shift, vector_len); 4846 evmovdqul(dst, nds, Assembler::AVX_512bit); 4847 } else if (dst_enc < 16) { 4848 // use nds to save a copy of xmm0 and hold shift 4849 evmovdqul(nds, xmm0, Assembler::AVX_512bit); 4850 evmovdqul(xmm0, shift, Assembler::AVX_512bit); 4851 Assembler::vpsraw(dst, dst, xmm0, vector_len); 4852 evmovdqul(xmm0, nds, Assembler::AVX_512bit); 4853 } else if (nds_enc < 16) { 4854 // use nds as dest as temps 4855 evmovdqul(nds, dst, Assembler::AVX_512bit); 4856 evmovdqul(dst, xmm0, Assembler::AVX_512bit); 4857 evmovdqul(xmm0, shift, Assembler::AVX_512bit); 4858 Assembler::vpsraw(nds, nds, xmm0, vector_len); 4859 evmovdqul(xmm0, dst, Assembler::AVX_512bit); 4860 evmovdqul(dst, nds, Assembler::AVX_512bit); 4861 } else { 4862 // worse case scenario, all regs are in the upper bank 4863 subptr(rsp, 64); 4864 evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit); 4865 evmovdqul(nds, xmm0, Assembler::AVX_512bit); 4866 evmovdqul(xmm1, shift, Assembler::AVX_512bit); 4867 evmovdqul(xmm0, dst, Assembler::AVX_512bit); 4868 Assembler::vpsllw(xmm0, xmm0, xmm1, vector_len); 4869 evmovdqul(xmm1, dst, Assembler::AVX_512bit); 4870 evmovdqul(dst, xmm0, Assembler::AVX_512bit); 4871 evmovdqul(xmm0, nds, Assembler::AVX_512bit); 4872 evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit); 4873 addptr(rsp, 64); 4874 } 4875 } 4876 4877 void MacroAssembler::vpsraw(XMMRegister dst, XMMRegister nds, int shift, int vector_len) { 4878 int dst_enc = dst->encoding(); 4879 int nds_enc = nds->encoding(); 4880 if (VM_Version::supports_avxonly() || VM_Version::supports_avx512bw()) { 4881 Assembler::vpsraw(dst, nds, shift, vector_len); 4882 } else if (dst_enc < 16) { 4883 Assembler::vpsraw(dst, dst, shift, vector_len); 4884 } else if (nds_enc < 16) { 4885 // use nds as scratch 4886 evmovdqul(nds, dst, Assembler::AVX_512bit); 4887 Assembler::vpsraw(nds, nds, shift, vector_len); 4888 evmovdqul(dst, nds, Assembler::AVX_512bit); 4889 } else { 4890 // use nds as scratch for xmm0 4891 evmovdqul(nds, xmm0, Assembler::AVX_512bit); 4892 evmovdqul(xmm0, dst, Assembler::AVX_512bit); 4893 Assembler::vpsraw(xmm0, xmm0, shift, vector_len); 4894 evmovdqul(xmm0, nds, Assembler::AVX_512bit); 4895 } 4896 } 4897 4898 void MacroAssembler::vpsrlw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) { 4899 int dst_enc = dst->encoding(); 4900 int nds_enc = nds->encoding(); 4901 int shift_enc = shift->encoding(); 4902 if (VM_Version::supports_avxonly() || VM_Version::supports_avx512bw()) { 4903 Assembler::vpsrlw(dst, nds, shift, vector_len); 4904 } else if ((dst_enc < 16) && (shift_enc < 16)) { 4905 Assembler::vpsrlw(dst, dst, shift, vector_len); 4906 } else if ((dst_enc < 16) && (nds_enc < 16)) { 4907 // use nds_enc as scratch with shift 4908 evmovdqul(nds, shift, Assembler::AVX_512bit); 4909 Assembler::vpsrlw(dst, dst, nds, vector_len); 4910 } else if ((shift_enc < 16) && (nds_enc < 16)) { 4911 // use nds as scratch with dst 4912 evmovdqul(nds, dst, Assembler::AVX_512bit); 4913 Assembler::vpsrlw(nds, nds, shift, vector_len); 4914 evmovdqul(dst, nds, Assembler::AVX_512bit); 4915 } else if (dst_enc < 16) { 4916 // use nds to save a copy of xmm0 and hold shift 4917 evmovdqul(nds, xmm0, Assembler::AVX_512bit); 4918 evmovdqul(xmm0, shift, Assembler::AVX_512bit); 4919 Assembler::vpsrlw(dst, dst, xmm0, vector_len); 4920 evmovdqul(xmm0, nds, Assembler::AVX_512bit); 4921 } else if (nds_enc < 16) { 4922 // use nds as dest as temps 4923 evmovdqul(nds, dst, Assembler::AVX_512bit); 4924 evmovdqul(dst, xmm0, Assembler::AVX_512bit); 4925 evmovdqul(xmm0, shift, Assembler::AVX_512bit); 4926 Assembler::vpsrlw(nds, nds, xmm0, vector_len); 4927 evmovdqul(xmm0, dst, Assembler::AVX_512bit); 4928 evmovdqul(dst, nds, Assembler::AVX_512bit); 4929 } else { 4930 // worse case scenario, all regs are in the upper bank 4931 subptr(rsp, 64); 4932 evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit); 4933 evmovdqul(nds, xmm0, Assembler::AVX_512bit); 4934 evmovdqul(xmm1, shift, Assembler::AVX_512bit); 4935 evmovdqul(xmm0, dst, Assembler::AVX_512bit); 4936 Assembler::vpsllw(xmm0, xmm0, xmm1, vector_len); 4937 evmovdqul(xmm1, dst, Assembler::AVX_512bit); 4938 evmovdqul(dst, xmm0, Assembler::AVX_512bit); 4939 evmovdqul(xmm0, nds, Assembler::AVX_512bit); 4940 evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit); 4941 addptr(rsp, 64); 4942 } 4943 } 4944 4945 void MacroAssembler::vpsrlw(XMMRegister dst, XMMRegister nds, int shift, int vector_len) { 4946 int dst_enc = dst->encoding(); 4947 int nds_enc = nds->encoding(); 4948 if (VM_Version::supports_avxonly() || VM_Version::supports_avx512bw()) { 4949 Assembler::vpsrlw(dst, nds, shift, vector_len); 4950 } else if (dst_enc < 16) { 4951 Assembler::vpsrlw(dst, dst, shift, vector_len); 4952 } else if (nds_enc < 16) { 4953 // use nds as scratch 4954 evmovdqul(nds, dst, Assembler::AVX_512bit); 4955 Assembler::vpsrlw(nds, nds, shift, vector_len); 4956 evmovdqul(dst, nds, Assembler::AVX_512bit); 4957 } else { 4958 // use nds as scratch for xmm0 4959 evmovdqul(nds, xmm0, Assembler::AVX_512bit); 4960 evmovdqul(xmm0, dst, Assembler::AVX_512bit); 4961 Assembler::vpsrlw(xmm0, xmm0, shift, vector_len); 4962 evmovdqul(xmm0, nds, Assembler::AVX_512bit); 4963 } 4964 } 4965 4966 void MacroAssembler::vpsllw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) { 4967 int dst_enc = dst->encoding(); 4968 int nds_enc = nds->encoding(); 4969 int shift_enc = shift->encoding(); 4970 if (VM_Version::supports_avxonly() || VM_Version::supports_avx512bw()) { 4971 Assembler::vpsllw(dst, nds, shift, vector_len); 4972 } else if ((dst_enc < 16) && (shift_enc < 16)) { 4973 Assembler::vpsllw(dst, dst, shift, vector_len); 4974 } else if ((dst_enc < 16) && (nds_enc < 16)) { 4975 // use nds_enc as scratch with shift 4976 evmovdqul(nds, shift, Assembler::AVX_512bit); 4977 Assembler::vpsllw(dst, dst, nds, vector_len); 4978 } else if ((shift_enc < 16) && (nds_enc < 16)) { 4979 // use nds as scratch with dst 4980 evmovdqul(nds, dst, Assembler::AVX_512bit); 4981 Assembler::vpsllw(nds, nds, shift, vector_len); 4982 evmovdqul(dst, nds, Assembler::AVX_512bit); 4983 } else if (dst_enc < 16) { 4984 // use nds to save a copy of xmm0 and hold shift 4985 evmovdqul(nds, xmm0, Assembler::AVX_512bit); 4986 evmovdqul(xmm0, shift, Assembler::AVX_512bit); 4987 Assembler::vpsllw(dst, dst, xmm0, vector_len); 4988 evmovdqul(xmm0, nds, Assembler::AVX_512bit); 4989 } else if (nds_enc < 16) { 4990 // use nds as dest as temps 4991 evmovdqul(nds, dst, Assembler::AVX_512bit); 4992 evmovdqul(dst, xmm0, Assembler::AVX_512bit); 4993 evmovdqul(xmm0, shift, Assembler::AVX_512bit); 4994 Assembler::vpsllw(nds, nds, xmm0, vector_len); 4995 evmovdqul(xmm0, dst, Assembler::AVX_512bit); 4996 evmovdqul(dst, nds, Assembler::AVX_512bit); 4997 } else { 4998 // worse case scenario, all regs are in the upper bank 4999 subptr(rsp, 64); 5000 evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit); 5001 evmovdqul(nds, xmm0, Assembler::AVX_512bit); 5002 evmovdqul(xmm1, shift, Assembler::AVX_512bit); 5003 evmovdqul(xmm0, dst, Assembler::AVX_512bit); 5004 Assembler::vpsllw(xmm0, xmm0, xmm1, vector_len); 5005 evmovdqul(xmm1, dst, Assembler::AVX_512bit); 5006 evmovdqul(dst, xmm0, Assembler::AVX_512bit); 5007 evmovdqul(xmm0, nds, Assembler::AVX_512bit); 5008 evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit); 5009 addptr(rsp, 64); 5010 } 5011 } 5012 5013 void MacroAssembler::vpsllw(XMMRegister dst, XMMRegister nds, int shift, int vector_len) { 5014 int dst_enc = dst->encoding(); 5015 int nds_enc = nds->encoding(); 5016 if (VM_Version::supports_avxonly() || VM_Version::supports_avx512bw()) { 5017 Assembler::vpsllw(dst, nds, shift, vector_len); 5018 } else if (dst_enc < 16) { 5019 Assembler::vpsllw(dst, dst, shift, vector_len); 5020 } else if (nds_enc < 16) { 5021 // use nds as scratch 5022 evmovdqul(nds, dst, Assembler::AVX_512bit); 5023 Assembler::vpsllw(nds, nds, shift, vector_len); 5024 evmovdqul(dst, nds, Assembler::AVX_512bit); 5025 } else { 5026 // use nds as scratch for xmm0 5027 evmovdqul(nds, xmm0, Assembler::AVX_512bit); 5028 evmovdqul(xmm0, dst, Assembler::AVX_512bit); 5029 Assembler::vpsllw(xmm0, xmm0, shift, vector_len); 5030 evmovdqul(xmm0, nds, Assembler::AVX_512bit); 5031 } 5032 } 5033 5034 void MacroAssembler::vptest(XMMRegister dst, XMMRegister src) { 5035 int dst_enc = dst->encoding(); 5036 int src_enc = src->encoding(); 5037 if ((dst_enc < 16) && (src_enc < 16)) { 5038 Assembler::vptest(dst, src); 5039 } else if (src_enc < 16) { 5040 subptr(rsp, 64); 5041 evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); 5042 evmovdqul(xmm0, dst, Assembler::AVX_512bit); 5043 Assembler::vptest(xmm0, src); 5044 evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); 5045 addptr(rsp, 64); 5046 } else if (dst_enc < 16) { 5047 subptr(rsp, 64); 5048 evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); 5049 evmovdqul(xmm0, src, Assembler::AVX_512bit); 5050 Assembler::vptest(dst, xmm0); 5051 evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); 5052 addptr(rsp, 64); 5053 } else { 5054 subptr(rsp, 64); 5055 evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); 5056 subptr(rsp, 64); 5057 evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit); 5058 movdqu(xmm0, src); 5059 movdqu(xmm1, dst); 5060 Assembler::vptest(xmm1, xmm0); 5061 evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit); 5062 addptr(rsp, 64); 5063 evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); 5064 addptr(rsp, 64); 5065 } 5066 } 5067 5068 // This instruction exists within macros, ergo we cannot control its input 5069 // when emitted through those patterns. 5070 void MacroAssembler::punpcklbw(XMMRegister dst, XMMRegister src) { 5071 if (VM_Version::supports_avx512nobw()) { 5072 int dst_enc = dst->encoding(); 5073 int src_enc = src->encoding(); 5074 if (dst_enc == src_enc) { 5075 if (dst_enc < 16) { 5076 Assembler::punpcklbw(dst, src); 5077 } else { 5078 subptr(rsp, 64); 5079 evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); 5080 evmovdqul(xmm0, dst, Assembler::AVX_512bit); 5081 Assembler::punpcklbw(xmm0, xmm0); 5082 evmovdqul(dst, xmm0, Assembler::AVX_512bit); 5083 evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); 5084 addptr(rsp, 64); 5085 } 5086 } else { 5087 if ((src_enc < 16) && (dst_enc < 16)) { 5088 Assembler::punpcklbw(dst, src); 5089 } else if (src_enc < 16) { 5090 subptr(rsp, 64); 5091 evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); 5092 evmovdqul(xmm0, dst, Assembler::AVX_512bit); 5093 Assembler::punpcklbw(xmm0, src); 5094 evmovdqul(dst, xmm0, Assembler::AVX_512bit); 5095 evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); 5096 addptr(rsp, 64); 5097 } else if (dst_enc < 16) { 5098 subptr(rsp, 64); 5099 evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); 5100 evmovdqul(xmm0, src, Assembler::AVX_512bit); 5101 Assembler::punpcklbw(dst, xmm0); 5102 evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); 5103 addptr(rsp, 64); 5104 } else { 5105 subptr(rsp, 64); 5106 evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); 5107 subptr(rsp, 64); 5108 evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit); 5109 evmovdqul(xmm0, dst, Assembler::AVX_512bit); 5110 evmovdqul(xmm1, src, Assembler::AVX_512bit); 5111 Assembler::punpcklbw(xmm0, xmm1); 5112 evmovdqul(dst, xmm0, Assembler::AVX_512bit); 5113 evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit); 5114 addptr(rsp, 64); 5115 evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); 5116 addptr(rsp, 64); 5117 } 5118 } 5119 } else { 5120 Assembler::punpcklbw(dst, src); 5121 } 5122 } 5123 5124 void MacroAssembler::pshufd(XMMRegister dst, Address src, int mode) { 5125 if (VM_Version::supports_avx512vl()) { 5126 Assembler::pshufd(dst, src, mode); 5127 } else { 5128 int dst_enc = dst->encoding(); 5129 if (dst_enc < 16) { 5130 Assembler::pshufd(dst, src, mode); 5131 } else { 5132 subptr(rsp, 64); 5133 evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); 5134 Assembler::pshufd(xmm0, src, mode); 5135 evmovdqul(dst, xmm0, Assembler::AVX_512bit); 5136 evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); 5137 addptr(rsp, 64); 5138 } 5139 } 5140 } 5141 5142 // This instruction exists within macros, ergo we cannot control its input 5143 // when emitted through those patterns. 5144 void MacroAssembler::pshuflw(XMMRegister dst, XMMRegister src, int mode) { 5145 if (VM_Version::supports_avx512nobw()) { 5146 int dst_enc = dst->encoding(); 5147 int src_enc = src->encoding(); 5148 if (dst_enc == src_enc) { 5149 if (dst_enc < 16) { 5150 Assembler::pshuflw(dst, src, mode); 5151 } else { 5152 subptr(rsp, 64); 5153 evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); 5154 evmovdqul(xmm0, dst, Assembler::AVX_512bit); 5155 Assembler::pshuflw(xmm0, xmm0, mode); 5156 evmovdqul(dst, xmm0, Assembler::AVX_512bit); 5157 evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); 5158 addptr(rsp, 64); 5159 } 5160 } else { 5161 if ((src_enc < 16) && (dst_enc < 16)) { 5162 Assembler::pshuflw(dst, src, mode); 5163 } else if (src_enc < 16) { 5164 subptr(rsp, 64); 5165 evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); 5166 evmovdqul(xmm0, dst, Assembler::AVX_512bit); 5167 Assembler::pshuflw(xmm0, src, mode); 5168 evmovdqul(dst, xmm0, Assembler::AVX_512bit); 5169 evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); 5170 addptr(rsp, 64); 5171 } else if (dst_enc < 16) { 5172 subptr(rsp, 64); 5173 evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); 5174 evmovdqul(xmm0, src, Assembler::AVX_512bit); 5175 Assembler::pshuflw(dst, xmm0, mode); 5176 evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); 5177 addptr(rsp, 64); 5178 } else { 5179 subptr(rsp, 64); 5180 evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); 5181 subptr(rsp, 64); 5182 evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit); 5183 evmovdqul(xmm0, dst, Assembler::AVX_512bit); 5184 evmovdqul(xmm1, src, Assembler::AVX_512bit); 5185 Assembler::pshuflw(xmm0, xmm1, mode); 5186 evmovdqul(dst, xmm0, Assembler::AVX_512bit); 5187 evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit); 5188 addptr(rsp, 64); 5189 evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); 5190 addptr(rsp, 64); 5191 } 5192 } 5193 } else { 5194 Assembler::pshuflw(dst, src, mode); 5195 } 5196 } 5197 5198 void MacroAssembler::vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len) { 5199 if (reachable(src)) { 5200 vandpd(dst, nds, as_Address(src), vector_len); 5201 } else { 5202 lea(rscratch1, src); 5203 vandpd(dst, nds, Address(rscratch1, 0), vector_len); 5204 } 5205 } 5206 5207 void MacroAssembler::vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len) { 5208 if (reachable(src)) { 5209 vandps(dst, nds, as_Address(src), vector_len); 5210 } else { 5211 lea(rscratch1, src); 5212 vandps(dst, nds, Address(rscratch1, 0), vector_len); 5213 } 5214 } 5215 5216 void MacroAssembler::vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) { 5217 if (reachable(src)) { 5218 vdivsd(dst, nds, as_Address(src)); 5219 } else { 5220 lea(rscratch1, src); 5221 vdivsd(dst, nds, Address(rscratch1, 0)); 5222 } 5223 } 5224 5225 void MacroAssembler::vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src) { 5226 if (reachable(src)) { 5227 vdivss(dst, nds, as_Address(src)); 5228 } else { 5229 lea(rscratch1, src); 5230 vdivss(dst, nds, Address(rscratch1, 0)); 5231 } 5232 } 5233 5234 void MacroAssembler::vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) { 5235 if (reachable(src)) { 5236 vmulsd(dst, nds, as_Address(src)); 5237 } else { 5238 lea(rscratch1, src); 5239 vmulsd(dst, nds, Address(rscratch1, 0)); 5240 } 5241 } 5242 5243 void MacroAssembler::vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src) { 5244 if (reachable(src)) { 5245 vmulss(dst, nds, as_Address(src)); 5246 } else { 5247 lea(rscratch1, src); 5248 vmulss(dst, nds, Address(rscratch1, 0)); 5249 } 5250 } 5251 5252 void MacroAssembler::vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) { 5253 if (reachable(src)) { 5254 vsubsd(dst, nds, as_Address(src)); 5255 } else { 5256 lea(rscratch1, src); 5257 vsubsd(dst, nds, Address(rscratch1, 0)); 5258 } 5259 } 5260 5261 void MacroAssembler::vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src) { 5262 if (reachable(src)) { 5263 vsubss(dst, nds, as_Address(src)); 5264 } else { 5265 lea(rscratch1, src); 5266 vsubss(dst, nds, Address(rscratch1, 0)); 5267 } 5268 } 5269 5270 void MacroAssembler::vnegatess(XMMRegister dst, XMMRegister nds, AddressLiteral src) { 5271 int nds_enc = nds->encoding(); 5272 int dst_enc = dst->encoding(); 5273 bool dst_upper_bank = (dst_enc > 15); 5274 bool nds_upper_bank = (nds_enc > 15); 5275 if (VM_Version::supports_avx512novl() && 5276 (nds_upper_bank || dst_upper_bank)) { 5277 if (dst_upper_bank) { 5278 subptr(rsp, 64); 5279 evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); 5280 movflt(xmm0, nds); 5281 vxorps(xmm0, xmm0, src, Assembler::AVX_128bit); 5282 movflt(dst, xmm0); 5283 evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); 5284 addptr(rsp, 64); 5285 } else { 5286 movflt(dst, nds); 5287 vxorps(dst, dst, src, Assembler::AVX_128bit); 5288 } 5289 } else { 5290 vxorps(dst, nds, src, Assembler::AVX_128bit); 5291 } 5292 } 5293 5294 void MacroAssembler::vnegatesd(XMMRegister dst, XMMRegister nds, AddressLiteral src) { 5295 int nds_enc = nds->encoding(); 5296 int dst_enc = dst->encoding(); 5297 bool dst_upper_bank = (dst_enc > 15); 5298 bool nds_upper_bank = (nds_enc > 15); 5299 if (VM_Version::supports_avx512novl() && 5300 (nds_upper_bank || dst_upper_bank)) { 5301 if (dst_upper_bank) { 5302 subptr(rsp, 64); 5303 evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit); 5304 movdbl(xmm0, nds); 5305 vxorpd(xmm0, xmm0, src, Assembler::AVX_128bit); 5306 movdbl(dst, xmm0); 5307 evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit); 5308 addptr(rsp, 64); 5309 } else { 5310 movdbl(dst, nds); 5311 vxorpd(dst, dst, src, Assembler::AVX_128bit); 5312 } 5313 } else { 5314 vxorpd(dst, nds, src, Assembler::AVX_128bit); 5315 } 5316 } 5317 5318 void MacroAssembler::vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len) { 5319 if (reachable(src)) { 5320 vxorpd(dst, nds, as_Address(src), vector_len); 5321 } else { 5322 lea(rscratch1, src); 5323 vxorpd(dst, nds, Address(rscratch1, 0), vector_len); 5324 } 5325 } 5326 5327 void MacroAssembler::vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len) { 5328 if (reachable(src)) { 5329 vxorps(dst, nds, as_Address(src), vector_len); 5330 } else { 5331 lea(rscratch1, src); 5332 vxorps(dst, nds, Address(rscratch1, 0), vector_len); 5333 } 5334 } 5335 5336 5337 void MacroAssembler::resolve_jobject(Register value, 5338 Register thread, 5339 Register tmp) { 5340 assert_different_registers(value, thread, tmp); 5341 Label done, not_weak; 5342 testptr(value, value); 5343 jcc(Assembler::zero, done); // Use NULL as-is. 5344 testptr(value, JNIHandles::weak_tag_mask); // Test for jweak tag. 5345 jcc(Assembler::zero, not_weak); 5346 // Resolve jweak. 5347 movptr(value, Address(value, -JNIHandles::weak_tag_value)); 5348 verify_oop(value); 5349 #if INCLUDE_ALL_GCS 5350 if (UseG1GC) { 5351 g1_write_barrier_pre(noreg /* obj */, 5352 value /* pre_val */, 5353 thread /* thread */, 5354 tmp /* tmp */, 5355 true /* tosca_live */, 5356 true /* expand_call */); 5357 } 5358 #endif // INCLUDE_ALL_GCS 5359 jmp(done); 5360 bind(not_weak); 5361 // Resolve (untagged) jobject. 5362 movptr(value, Address(value, 0)); 5363 verify_oop(value); 5364 bind(done); 5365 } 5366 5367 void MacroAssembler::clear_jweak_tag(Register possibly_jweak) { 5368 const int32_t inverted_jweak_mask = ~static_cast<int32_t>(JNIHandles::weak_tag_mask); 5369 STATIC_ASSERT(inverted_jweak_mask == -2); // otherwise check this code 5370 // The inverted mask is sign-extended 5371 andptr(possibly_jweak, inverted_jweak_mask); 5372 } 5373 5374 ////////////////////////////////////////////////////////////////////////////////// 5375 #if INCLUDE_ALL_GCS 5376 5377 void MacroAssembler::g1_write_barrier_pre(Register obj, 5378 Register pre_val, 5379 Register thread, 5380 Register tmp, 5381 bool tosca_live, 5382 bool expand_call) { 5383 5384 // If expand_call is true then we expand the call_VM_leaf macro 5385 // directly to skip generating the check by 5386 // InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp. 5387 5388 #ifdef _LP64 5389 assert(thread == r15_thread, "must be"); 5390 #endif // _LP64 5391 5392 Label done; 5393 Label runtime; 5394 5395 assert(pre_val != noreg, "check this code"); 5396 5397 if (obj != noreg) { 5398 assert_different_registers(obj, pre_val, tmp); 5399 assert(pre_val != rax, "check this code"); 5400 } 5401 5402 Address in_progress(thread, in_bytes(JavaThread::satb_mark_queue_offset() + 5403 SATBMarkQueue::byte_offset_of_active())); 5404 Address index(thread, in_bytes(JavaThread::satb_mark_queue_offset() + 5405 SATBMarkQueue::byte_offset_of_index())); 5406 Address buffer(thread, in_bytes(JavaThread::satb_mark_queue_offset() + 5407 SATBMarkQueue::byte_offset_of_buf())); 5408 5409 5410 // Is marking active? 5411 if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) { 5412 cmpl(in_progress, 0); 5413 } else { 5414 assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption"); 5415 cmpb(in_progress, 0); 5416 } 5417 jcc(Assembler::equal, done); 5418 5419 // Do we need to load the previous value? 5420 if (obj != noreg) { 5421 load_heap_oop(pre_val, Address(obj, 0)); 5422 } 5423 5424 // Is the previous value null? 5425 cmpptr(pre_val, (int32_t) NULL_WORD); 5426 jcc(Assembler::equal, done); 5427 5428 // Can we store original value in the thread's buffer? 5429 // Is index == 0? 5430 // (The index field is typed as size_t.) 5431 5432 movptr(tmp, index); // tmp := *index_adr 5433 cmpptr(tmp, 0); // tmp == 0? 5434 jcc(Assembler::equal, runtime); // If yes, goto runtime 5435 5436 subptr(tmp, wordSize); // tmp := tmp - wordSize 5437 movptr(index, tmp); // *index_adr := tmp 5438 addptr(tmp, buffer); // tmp := tmp + *buffer_adr 5439 5440 // Record the previous value 5441 movptr(Address(tmp, 0), pre_val); 5442 jmp(done); 5443 5444 bind(runtime); 5445 // save the live input values 5446 if(tosca_live) push(rax); 5447 5448 if (obj != noreg && obj != rax) 5449 push(obj); 5450 5451 if (pre_val != rax) 5452 push(pre_val); 5453 5454 // Calling the runtime using the regular call_VM_leaf mechanism generates 5455 // code (generated by InterpreterMacroAssember::call_VM_leaf_base) 5456 // that checks that the *(ebp+frame::interpreter_frame_last_sp) == NULL. 5457 // 5458 // If we care generating the pre-barrier without a frame (e.g. in the 5459 // intrinsified Reference.get() routine) then ebp might be pointing to 5460 // the caller frame and so this check will most likely fail at runtime. 5461 // 5462 // Expanding the call directly bypasses the generation of the check. 5463 // So when we do not have have a full interpreter frame on the stack 5464 // expand_call should be passed true. 5465 5466 NOT_LP64( push(thread); ) 5467 5468 if (expand_call) { 5469 LP64_ONLY( assert(pre_val != c_rarg1, "smashed arg"); ) 5470 pass_arg1(this, thread); 5471 pass_arg0(this, pre_val); 5472 MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), 2); 5473 } else { 5474 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), pre_val, thread); 5475 } 5476 5477 NOT_LP64( pop(thread); ) 5478 5479 // save the live input values 5480 if (pre_val != rax) 5481 pop(pre_val); 5482 5483 if (obj != noreg && obj != rax) 5484 pop(obj); 5485 5486 if(tosca_live) pop(rax); 5487 5488 bind(done); 5489 } 5490 5491 void MacroAssembler::g1_write_barrier_post(Register store_addr, 5492 Register new_val, 5493 Register thread, 5494 Register tmp, 5495 Register tmp2) { 5496 #ifdef _LP64 5497 assert(thread == r15_thread, "must be"); 5498 #endif // _LP64 5499 5500 Address queue_index(thread, in_bytes(JavaThread::dirty_card_queue_offset() + 5501 DirtyCardQueue::byte_offset_of_index())); 5502 Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() + 5503 DirtyCardQueue::byte_offset_of_buf())); 5504 5505 CardTableModRefBS* ctbs = 5506 barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set()); 5507 CardTable* ct = ctbs->card_table(); 5508 assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code"); 5509 5510 Label done; 5511 Label runtime; 5512 5513 // Does store cross heap regions? 5514 5515 movptr(tmp, store_addr); 5516 xorptr(tmp, new_val); 5517 shrptr(tmp, HeapRegion::LogOfHRGrainBytes); 5518 jcc(Assembler::equal, done); 5519 5520 // crosses regions, storing NULL? 5521 5522 cmpptr(new_val, (int32_t) NULL_WORD); 5523 jcc(Assembler::equal, done); 5524 5525 // storing region crossing non-NULL, is card already dirty? 5526 5527 const Register card_addr = tmp; 5528 const Register cardtable = tmp2; 5529 5530 movptr(card_addr, store_addr); 5531 shrptr(card_addr, CardTable::card_shift); 5532 // Do not use ExternalAddress to load 'byte_map_base', since 'byte_map_base' is NOT 5533 // a valid address and therefore is not properly handled by the relocation code. 5534 movptr(cardtable, (intptr_t)ct->byte_map_base()); 5535 addptr(card_addr, cardtable); 5536 5537 cmpb(Address(card_addr, 0), (int)G1CardTable::g1_young_card_val()); 5538 jcc(Assembler::equal, done); 5539 5540 membar(Assembler::Membar_mask_bits(Assembler::StoreLoad)); 5541 cmpb(Address(card_addr, 0), (int)CardTable::dirty_card_val()); 5542 jcc(Assembler::equal, done); 5543 5544 5545 // storing a region crossing, non-NULL oop, card is clean. 5546 // dirty card and log. 5547 5548 movb(Address(card_addr, 0), (int)CardTable::dirty_card_val()); 5549 5550 cmpl(queue_index, 0); 5551 jcc(Assembler::equal, runtime); 5552 subl(queue_index, wordSize); 5553 movptr(tmp2, buffer); 5554 #ifdef _LP64 5555 movslq(rscratch1, queue_index); 5556 addq(tmp2, rscratch1); 5557 movq(Address(tmp2, 0), card_addr); 5558 #else 5559 addl(tmp2, queue_index); 5560 movl(Address(tmp2, 0), card_addr); 5561 #endif 5562 jmp(done); 5563 5564 bind(runtime); 5565 // save the live input values 5566 push(store_addr); 5567 push(new_val); 5568 #ifdef _LP64 5569 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, r15_thread); 5570 #else 5571 push(thread); 5572 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread); 5573 pop(thread); 5574 #endif 5575 pop(new_val); 5576 pop(store_addr); 5577 5578 bind(done); 5579 } 5580 5581 #endif // INCLUDE_ALL_GCS 5582 ////////////////////////////////////////////////////////////////////////////////// 5583 5584 5585 void MacroAssembler::store_check(Register obj, Address dst) { 5586 store_check(obj); 5587 } 5588 5589 void MacroAssembler::store_check(Register obj) { 5590 // Does a store check for the oop in register obj. The content of 5591 // register obj is destroyed afterwards. 5592 BarrierSet* bs = Universe::heap()->barrier_set(); 5593 assert(bs->kind() == BarrierSet::CardTableModRef, 5594 "Wrong barrier set kind"); 5595 5596 CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs); 5597 CardTable* ct = ctbs->card_table(); 5598 assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code"); 5599 5600 shrptr(obj, CardTable::card_shift); 5601 5602 Address card_addr; 5603 5604 // The calculation for byte_map_base is as follows: 5605 // byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift); 5606 // So this essentially converts an address to a displacement and it will 5607 // never need to be relocated. On 64bit however the value may be too 5608 // large for a 32bit displacement. 5609 intptr_t disp = (intptr_t) ct->byte_map_base(); 5610 if (is_simm32(disp)) { 5611 card_addr = Address(noreg, obj, Address::times_1, disp); 5612 } else { 5613 // By doing it as an ExternalAddress 'disp' could be converted to a rip-relative 5614 // displacement and done in a single instruction given favorable mapping and a 5615 // smarter version of as_Address. However, 'ExternalAddress' generates a relocation 5616 // entry and that entry is not properly handled by the relocation code. 5617 AddressLiteral cardtable((address)ct->byte_map_base(), relocInfo::none); 5618 Address index(noreg, obj, Address::times_1); 5619 card_addr = as_Address(ArrayAddress(cardtable, index)); 5620 } 5621 5622 int dirty = CardTable::dirty_card_val(); 5623 if (UseCondCardMark) { 5624 Label L_already_dirty; 5625 if (UseConcMarkSweepGC) { 5626 membar(Assembler::StoreLoad); 5627 } 5628 cmpb(card_addr, dirty); 5629 jcc(Assembler::equal, L_already_dirty); 5630 movb(card_addr, dirty); 5631 bind(L_already_dirty); 5632 } else { 5633 movb(card_addr, dirty); 5634 } 5635 } 5636 5637 void MacroAssembler::subptr(Register dst, int32_t imm32) { 5638 LP64_ONLY(subq(dst, imm32)) NOT_LP64(subl(dst, imm32)); 5639 } 5640 5641 // Force generation of a 4 byte immediate value even if it fits into 8bit 5642 void MacroAssembler::subptr_imm32(Register dst, int32_t imm32) { 5643 LP64_ONLY(subq_imm32(dst, imm32)) NOT_LP64(subl_imm32(dst, imm32)); 5644 } 5645 5646 void MacroAssembler::subptr(Register dst, Register src) { 5647 LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); 5648 } 5649 5650 // C++ bool manipulation 5651 void MacroAssembler::testbool(Register dst) { 5652 if(sizeof(bool) == 1) 5653 testb(dst, 0xff); 5654 else if(sizeof(bool) == 2) { 5655 // testw implementation needed for two byte bools 5656 ShouldNotReachHere(); 5657 } else if(sizeof(bool) == 4) 5658 testl(dst, dst); 5659 else 5660 // unsupported 5661 ShouldNotReachHere(); 5662 } 5663 5664 void MacroAssembler::testptr(Register dst, Register src) { 5665 LP64_ONLY(testq(dst, src)) NOT_LP64(testl(dst, src)); 5666 } 5667 5668 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes. 5669 void MacroAssembler::tlab_allocate(Register obj, 5670 Register var_size_in_bytes, 5671 int con_size_in_bytes, 5672 Register t1, 5673 Register t2, 5674 Label& slow_case) { 5675 assert_different_registers(obj, t1, t2); 5676 assert_different_registers(obj, var_size_in_bytes, t1); 5677 Register end = t2; 5678 Register thread = NOT_LP64(t1) LP64_ONLY(r15_thread); 5679 5680 verify_tlab(); 5681 5682 NOT_LP64(get_thread(thread)); 5683 5684 movptr(obj, Address(thread, JavaThread::tlab_top_offset())); 5685 if (var_size_in_bytes == noreg) { 5686 lea(end, Address(obj, con_size_in_bytes)); 5687 } else { 5688 lea(end, Address(obj, var_size_in_bytes, Address::times_1)); 5689 } 5690 cmpptr(end, Address(thread, JavaThread::tlab_end_offset())); 5691 jcc(Assembler::above, slow_case); 5692 5693 // update the tlab top pointer 5694 movptr(Address(thread, JavaThread::tlab_top_offset()), end); 5695 5696 // recover var_size_in_bytes if necessary 5697 if (var_size_in_bytes == end) { 5698 subptr(var_size_in_bytes, obj); 5699 } 5700 verify_tlab(); 5701 } 5702 5703 // Preserves the contents of address, destroys the contents length_in_bytes and temp. 5704 void MacroAssembler::zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp) { 5705 assert(address != length_in_bytes && address != temp && temp != length_in_bytes, "registers must be different"); 5706 assert((offset_in_bytes & (BytesPerWord - 1)) == 0, "offset must be a multiple of BytesPerWord"); 5707 Label done; 5708 5709 testptr(length_in_bytes, length_in_bytes); 5710 jcc(Assembler::zero, done); 5711 5712 // initialize topmost word, divide index by 2, check if odd and test if zero 5713 // note: for the remaining code to work, index must be a multiple of BytesPerWord 5714 #ifdef ASSERT 5715 { 5716 Label L; 5717 testptr(length_in_bytes, BytesPerWord - 1); 5718 jcc(Assembler::zero, L); 5719 stop("length must be a multiple of BytesPerWord"); 5720 bind(L); 5721 } 5722 #endif 5723 Register index = length_in_bytes; 5724 xorptr(temp, temp); // use _zero reg to clear memory (shorter code) 5725 if (UseIncDec) { 5726 shrptr(index, 3); // divide by 8/16 and set carry flag if bit 2 was set 5727 } else { 5728 shrptr(index, 2); // use 2 instructions to avoid partial flag stall 5729 shrptr(index, 1); 5730 } 5731 #ifndef _LP64 5732 // index could have not been a multiple of 8 (i.e., bit 2 was set) 5733 { 5734 Label even; 5735 // note: if index was a multiple of 8, then it cannot 5736 // be 0 now otherwise it must have been 0 before 5737 // => if it is even, we don't need to check for 0 again 5738 jcc(Assembler::carryClear, even); 5739 // clear topmost word (no jump would be needed if conditional assignment worked here) 5740 movptr(Address(address, index, Address::times_8, offset_in_bytes - 0*BytesPerWord), temp); 5741 // index could be 0 now, must check again 5742 jcc(Assembler::zero, done); 5743 bind(even); 5744 } 5745 #endif // !_LP64 5746 // initialize remaining object fields: index is a multiple of 2 now 5747 { 5748 Label loop; 5749 bind(loop); 5750 movptr(Address(address, index, Address::times_8, offset_in_bytes - 1*BytesPerWord), temp); 5751 NOT_LP64(movptr(Address(address, index, Address::times_8, offset_in_bytes - 2*BytesPerWord), temp);) 5752 decrement(index); 5753 jcc(Assembler::notZero, loop); 5754 } 5755 5756 bind(done); 5757 } 5758 5759 void MacroAssembler::incr_allocated_bytes(Register thread, 5760 Register var_size_in_bytes, 5761 int con_size_in_bytes, 5762 Register t1) { 5763 if (!thread->is_valid()) { 5764 #ifdef _LP64 5765 thread = r15_thread; 5766 #else 5767 assert(t1->is_valid(), "need temp reg"); 5768 thread = t1; 5769 get_thread(thread); 5770 #endif 5771 } 5772 5773 #ifdef _LP64 5774 if (var_size_in_bytes->is_valid()) { 5775 addq(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), var_size_in_bytes); 5776 } else { 5777 addq(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), con_size_in_bytes); 5778 } 5779 #else 5780 if (var_size_in_bytes->is_valid()) { 5781 addl(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), var_size_in_bytes); 5782 } else { 5783 addl(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), con_size_in_bytes); 5784 } 5785 adcl(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())+4), 0); 5786 #endif 5787 } 5788 5789 // Look up the method for a megamorphic invokeinterface call. 5790 // The target method is determined by <intf_klass, itable_index>. 5791 // The receiver klass is in recv_klass. 5792 // On success, the result will be in method_result, and execution falls through. 5793 // On failure, execution transfers to the given label. 5794 void MacroAssembler::lookup_interface_method(Register recv_klass, 5795 Register intf_klass, 5796 RegisterOrConstant itable_index, 5797 Register method_result, 5798 Register scan_temp, 5799 Label& L_no_such_interface, 5800 bool return_method) { 5801 assert_different_registers(recv_klass, intf_klass, scan_temp); 5802 assert_different_registers(method_result, intf_klass, scan_temp); 5803 assert(recv_klass != method_result || !return_method, 5804 "recv_klass can be destroyed when method isn't needed"); 5805 5806 assert(itable_index.is_constant() || itable_index.as_register() == method_result, 5807 "caller must use same register for non-constant itable index as for method"); 5808 5809 // Compute start of first itableOffsetEntry (which is at the end of the vtable) 5810 int vtable_base = in_bytes(Klass::vtable_start_offset()); 5811 int itentry_off = itableMethodEntry::method_offset_in_bytes(); 5812 int scan_step = itableOffsetEntry::size() * wordSize; 5813 int vte_size = vtableEntry::size_in_bytes(); 5814 Address::ScaleFactor times_vte_scale = Address::times_ptr; 5815 assert(vte_size == wordSize, "else adjust times_vte_scale"); 5816 5817 movl(scan_temp, Address(recv_klass, Klass::vtable_length_offset())); 5818 5819 // %%% Could store the aligned, prescaled offset in the klassoop. 5820 lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base)); 5821 5822 if (return_method) { 5823 // Adjust recv_klass by scaled itable_index, so we can free itable_index. 5824 assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below"); 5825 lea(recv_klass, Address(recv_klass, itable_index, Address::times_ptr, itentry_off)); 5826 } 5827 5828 // for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) { 5829 // if (scan->interface() == intf) { 5830 // result = (klass + scan->offset() + itable_index); 5831 // } 5832 // } 5833 Label search, found_method; 5834 5835 for (int peel = 1; peel >= 0; peel--) { 5836 movptr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset_in_bytes())); 5837 cmpptr(intf_klass, method_result); 5838 5839 if (peel) { 5840 jccb(Assembler::equal, found_method); 5841 } else { 5842 jccb(Assembler::notEqual, search); 5843 // (invert the test to fall through to found_method...) 5844 } 5845 5846 if (!peel) break; 5847 5848 bind(search); 5849 5850 // Check that the previous entry is non-null. A null entry means that 5851 // the receiver class doesn't implement the interface, and wasn't the 5852 // same as when the caller was compiled. 5853 testptr(method_result, method_result); 5854 jcc(Assembler::zero, L_no_such_interface); 5855 addptr(scan_temp, scan_step); 5856 } 5857 5858 bind(found_method); 5859 5860 if (return_method) { 5861 // Got a hit. 5862 movl(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset_in_bytes())); 5863 movptr(method_result, Address(recv_klass, scan_temp, Address::times_1)); 5864 } 5865 } 5866 5867 5868 // virtual method calling 5869 void MacroAssembler::lookup_virtual_method(Register recv_klass, 5870 RegisterOrConstant vtable_index, 5871 Register method_result) { 5872 const int base = in_bytes(Klass::vtable_start_offset()); 5873 assert(vtableEntry::size() * wordSize == wordSize, "else adjust the scaling in the code below"); 5874 Address vtable_entry_addr(recv_klass, 5875 vtable_index, Address::times_ptr, 5876 base + vtableEntry::method_offset_in_bytes()); 5877 movptr(method_result, vtable_entry_addr); 5878 } 5879 5880 5881 void MacroAssembler::check_klass_subtype(Register sub_klass, 5882 Register super_klass, 5883 Register temp_reg, 5884 Label& L_success) { 5885 Label L_failure; 5886 check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg, &L_success, &L_failure, NULL); 5887 check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, NULL); 5888 bind(L_failure); 5889 } 5890 5891 5892 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass, 5893 Register super_klass, 5894 Register temp_reg, 5895 Label* L_success, 5896 Label* L_failure, 5897 Label* L_slow_path, 5898 RegisterOrConstant super_check_offset) { 5899 assert_different_registers(sub_klass, super_klass, temp_reg); 5900 bool must_load_sco = (super_check_offset.constant_or_zero() == -1); 5901 if (super_check_offset.is_register()) { 5902 assert_different_registers(sub_klass, super_klass, 5903 super_check_offset.as_register()); 5904 } else if (must_load_sco) { 5905 assert(temp_reg != noreg, "supply either a temp or a register offset"); 5906 } 5907 5908 Label L_fallthrough; 5909 int label_nulls = 0; 5910 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } 5911 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } 5912 if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; } 5913 assert(label_nulls <= 1, "at most one NULL in the batch"); 5914 5915 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 5916 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 5917 Address super_check_offset_addr(super_klass, sco_offset); 5918 5919 // Hacked jcc, which "knows" that L_fallthrough, at least, is in 5920 // range of a jccb. If this routine grows larger, reconsider at 5921 // least some of these. 5922 #define local_jcc(assembler_cond, label) \ 5923 if (&(label) == &L_fallthrough) jccb(assembler_cond, label); \ 5924 else jcc( assembler_cond, label) /*omit semi*/ 5925 5926 // Hacked jmp, which may only be used just before L_fallthrough. 5927 #define final_jmp(label) \ 5928 if (&(label) == &L_fallthrough) { /*do nothing*/ } \ 5929 else jmp(label) /*omit semi*/ 5930 5931 // If the pointers are equal, we are done (e.g., String[] elements). 5932 // This self-check enables sharing of secondary supertype arrays among 5933 // non-primary types such as array-of-interface. Otherwise, each such 5934 // type would need its own customized SSA. 5935 // We move this check to the front of the fast path because many 5936 // type checks are in fact trivially successful in this manner, 5937 // so we get a nicely predicted branch right at the start of the check. 5938 cmpptr(sub_klass, super_klass); 5939 local_jcc(Assembler::equal, *L_success); 5940 5941 // Check the supertype display: 5942 if (must_load_sco) { 5943 // Positive movl does right thing on LP64. 5944 movl(temp_reg, super_check_offset_addr); 5945 super_check_offset = RegisterOrConstant(temp_reg); 5946 } 5947 Address super_check_addr(sub_klass, super_check_offset, Address::times_1, 0); 5948 cmpptr(super_klass, super_check_addr); // load displayed supertype 5949 5950 // This check has worked decisively for primary supers. 5951 // Secondary supers are sought in the super_cache ('super_cache_addr'). 5952 // (Secondary supers are interfaces and very deeply nested subtypes.) 5953 // This works in the same check above because of a tricky aliasing 5954 // between the super_cache and the primary super display elements. 5955 // (The 'super_check_addr' can address either, as the case requires.) 5956 // Note that the cache is updated below if it does not help us find 5957 // what we need immediately. 5958 // So if it was a primary super, we can just fail immediately. 5959 // Otherwise, it's the slow path for us (no success at this point). 5960 5961 if (super_check_offset.is_register()) { 5962 local_jcc(Assembler::equal, *L_success); 5963 cmpl(super_check_offset.as_register(), sc_offset); 5964 if (L_failure == &L_fallthrough) { 5965 local_jcc(Assembler::equal, *L_slow_path); 5966 } else { 5967 local_jcc(Assembler::notEqual, *L_failure); 5968 final_jmp(*L_slow_path); 5969 } 5970 } else if (super_check_offset.as_constant() == sc_offset) { 5971 // Need a slow path; fast failure is impossible. 5972 if (L_slow_path == &L_fallthrough) { 5973 local_jcc(Assembler::equal, *L_success); 5974 } else { 5975 local_jcc(Assembler::notEqual, *L_slow_path); 5976 final_jmp(*L_success); 5977 } 5978 } else { 5979 // No slow path; it's a fast decision. 5980 if (L_failure == &L_fallthrough) { 5981 local_jcc(Assembler::equal, *L_success); 5982 } else { 5983 local_jcc(Assembler::notEqual, *L_failure); 5984 final_jmp(*L_success); 5985 } 5986 } 5987 5988 bind(L_fallthrough); 5989 5990 #undef local_jcc 5991 #undef final_jmp 5992 } 5993 5994 5995 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass, 5996 Register super_klass, 5997 Register temp_reg, 5998 Register temp2_reg, 5999 Label* L_success, 6000 Label* L_failure, 6001 bool set_cond_codes) { 6002 assert_different_registers(sub_klass, super_klass, temp_reg); 6003 if (temp2_reg != noreg) 6004 assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg); 6005 #define IS_A_TEMP(reg) ((reg) == temp_reg || (reg) == temp2_reg) 6006 6007 Label L_fallthrough; 6008 int label_nulls = 0; 6009 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } 6010 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } 6011 assert(label_nulls <= 1, "at most one NULL in the batch"); 6012 6013 // a couple of useful fields in sub_klass: 6014 int ss_offset = in_bytes(Klass::secondary_supers_offset()); 6015 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 6016 Address secondary_supers_addr(sub_klass, ss_offset); 6017 Address super_cache_addr( sub_klass, sc_offset); 6018 6019 // Do a linear scan of the secondary super-klass chain. 6020 // This code is rarely used, so simplicity is a virtue here. 6021 // The repne_scan instruction uses fixed registers, which we must spill. 6022 // Don't worry too much about pre-existing connections with the input regs. 6023 6024 assert(sub_klass != rax, "killed reg"); // killed by mov(rax, super) 6025 assert(sub_klass != rcx, "killed reg"); // killed by lea(rcx, &pst_counter) 6026 6027 // Get super_klass value into rax (even if it was in rdi or rcx). 6028 bool pushed_rax = false, pushed_rcx = false, pushed_rdi = false; 6029 if (super_klass != rax || UseCompressedOops) { 6030 if (!IS_A_TEMP(rax)) { push(rax); pushed_rax = true; } 6031 mov(rax, super_klass); 6032 } 6033 if (!IS_A_TEMP(rcx)) { push(rcx); pushed_rcx = true; } 6034 if (!IS_A_TEMP(rdi)) { push(rdi); pushed_rdi = true; } 6035 6036 #ifndef PRODUCT 6037 int* pst_counter = &SharedRuntime::_partial_subtype_ctr; 6038 ExternalAddress pst_counter_addr((address) pst_counter); 6039 NOT_LP64( incrementl(pst_counter_addr) ); 6040 LP64_ONLY( lea(rcx, pst_counter_addr) ); 6041 LP64_ONLY( incrementl(Address(rcx, 0)) ); 6042 #endif //PRODUCT 6043 6044 // We will consult the secondary-super array. 6045 movptr(rdi, secondary_supers_addr); 6046 // Load the array length. (Positive movl does right thing on LP64.) 6047 movl(rcx, Address(rdi, Array<Klass*>::length_offset_in_bytes())); 6048 // Skip to start of data. 6049 addptr(rdi, Array<Klass*>::base_offset_in_bytes()); 6050 6051 // Scan RCX words at [RDI] for an occurrence of RAX. 6052 // Set NZ/Z based on last compare. 6053 // Z flag value will not be set by 'repne' if RCX == 0 since 'repne' does 6054 // not change flags (only scas instruction which is repeated sets flags). 6055 // Set Z = 0 (not equal) before 'repne' to indicate that class was not found. 6056 6057 testptr(rax,rax); // Set Z = 0 6058 repne_scan(); 6059 6060 // Unspill the temp. registers: 6061 if (pushed_rdi) pop(rdi); 6062 if (pushed_rcx) pop(rcx); 6063 if (pushed_rax) pop(rax); 6064 6065 if (set_cond_codes) { 6066 // Special hack for the AD files: rdi is guaranteed non-zero. 6067 assert(!pushed_rdi, "rdi must be left non-NULL"); 6068 // Also, the condition codes are properly set Z/NZ on succeed/failure. 6069 } 6070 6071 if (L_failure == &L_fallthrough) 6072 jccb(Assembler::notEqual, *L_failure); 6073 else jcc(Assembler::notEqual, *L_failure); 6074 6075 // Success. Cache the super we found and proceed in triumph. 6076 movptr(super_cache_addr, super_klass); 6077 6078 if (L_success != &L_fallthrough) { 6079 jmp(*L_success); 6080 } 6081 6082 #undef IS_A_TEMP 6083 6084 bind(L_fallthrough); 6085 } 6086 6087 6088 void MacroAssembler::cmov32(Condition cc, Register dst, Address src) { 6089 if (VM_Version::supports_cmov()) { 6090 cmovl(cc, dst, src); 6091 } else { 6092 Label L; 6093 jccb(negate_condition(cc), L); 6094 movl(dst, src); 6095 bind(L); 6096 } 6097 } 6098 6099 void MacroAssembler::cmov32(Condition cc, Register dst, Register src) { 6100 if (VM_Version::supports_cmov()) { 6101 cmovl(cc, dst, src); 6102 } else { 6103 Label L; 6104 jccb(negate_condition(cc), L); 6105 movl(dst, src); 6106 bind(L); 6107 } 6108 } 6109 6110 void MacroAssembler::verify_oop(Register reg, const char* s) { 6111 if (!VerifyOops || VerifyAdapterSharing) { 6112 // Below address of the code string confuses VerifyAdapterSharing 6113 // because it may differ between otherwise equivalent adapters. 6114 return; 6115 } 6116 6117 // Pass register number to verify_oop_subroutine 6118 const char* b = NULL; 6119 { 6120 ResourceMark rm; 6121 stringStream ss; 6122 ss.print("verify_oop: %s: %s", reg->name(), s); 6123 b = code_string(ss.as_string()); 6124 } 6125 BLOCK_COMMENT("verify_oop {"); 6126 #ifdef _LP64 6127 push(rscratch1); // save r10, trashed by movptr() 6128 #endif 6129 push(rax); // save rax, 6130 push(reg); // pass register argument 6131 ExternalAddress buffer((address) b); 6132 // avoid using pushptr, as it modifies scratch registers 6133 // and our contract is not to modify anything 6134 movptr(rax, buffer.addr()); 6135 push(rax); 6136 // call indirectly to solve generation ordering problem 6137 movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address())); 6138 call(rax); 6139 // Caller pops the arguments (oop, message) and restores rax, r10 6140 BLOCK_COMMENT("} verify_oop"); 6141 } 6142 6143 6144 RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr, 6145 Register tmp, 6146 int offset) { 6147 intptr_t value = *delayed_value_addr; 6148 if (value != 0) 6149 return RegisterOrConstant(value + offset); 6150 6151 // load indirectly to solve generation ordering problem 6152 movptr(tmp, ExternalAddress((address) delayed_value_addr)); 6153 6154 #ifdef ASSERT 6155 { Label L; 6156 testptr(tmp, tmp); 6157 if (WizardMode) { 6158 const char* buf = NULL; 6159 { 6160 ResourceMark rm; 6161 stringStream ss; 6162 ss.print("DelayedValue=" INTPTR_FORMAT, delayed_value_addr[1]); 6163 buf = code_string(ss.as_string()); 6164 } 6165 jcc(Assembler::notZero, L); 6166 STOP(buf); 6167 } else { 6168 jccb(Assembler::notZero, L); 6169 hlt(); 6170 } 6171 bind(L); 6172 } 6173 #endif 6174 6175 if (offset != 0) 6176 addptr(tmp, offset); 6177 6178 return RegisterOrConstant(tmp); 6179 } 6180 6181 6182 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot, 6183 int extra_slot_offset) { 6184 // cf. TemplateTable::prepare_invoke(), if (load_receiver). 6185 int stackElementSize = Interpreter::stackElementSize; 6186 int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0); 6187 #ifdef ASSERT 6188 int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1); 6189 assert(offset1 - offset == stackElementSize, "correct arithmetic"); 6190 #endif 6191 Register scale_reg = noreg; 6192 Address::ScaleFactor scale_factor = Address::no_scale; 6193 if (arg_slot.is_constant()) { 6194 offset += arg_slot.as_constant() * stackElementSize; 6195 } else { 6196 scale_reg = arg_slot.as_register(); 6197 scale_factor = Address::times(stackElementSize); 6198 } 6199 offset += wordSize; // return PC is on stack 6200 return Address(rsp, scale_reg, scale_factor, offset); 6201 } 6202 6203 6204 void MacroAssembler::verify_oop_addr(Address addr, const char* s) { 6205 if (!VerifyOops || VerifyAdapterSharing) { 6206 // Below address of the code string confuses VerifyAdapterSharing 6207 // because it may differ between otherwise equivalent adapters. 6208 return; 6209 } 6210 6211 // Address adjust(addr.base(), addr.index(), addr.scale(), addr.disp() + BytesPerWord); 6212 // Pass register number to verify_oop_subroutine 6213 const char* b = NULL; 6214 { 6215 ResourceMark rm; 6216 stringStream ss; 6217 ss.print("verify_oop_addr: %s", s); 6218 b = code_string(ss.as_string()); 6219 } 6220 #ifdef _LP64 6221 push(rscratch1); // save r10, trashed by movptr() 6222 #endif 6223 push(rax); // save rax, 6224 // addr may contain rsp so we will have to adjust it based on the push 6225 // we just did (and on 64 bit we do two pushes) 6226 // NOTE: 64bit seemed to have had a bug in that it did movq(addr, rax); which 6227 // stores rax into addr which is backwards of what was intended. 6228 if (addr.uses(rsp)) { 6229 lea(rax, addr); 6230 pushptr(Address(rax, LP64_ONLY(2 *) BytesPerWord)); 6231 } else { 6232 pushptr(addr); 6233 } 6234 6235 ExternalAddress buffer((address) b); 6236 // pass msg argument 6237 // avoid using pushptr, as it modifies scratch registers 6238 // and our contract is not to modify anything 6239 movptr(rax, buffer.addr()); 6240 push(rax); 6241 6242 // call indirectly to solve generation ordering problem 6243 movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address())); 6244 call(rax); 6245 // Caller pops the arguments (addr, message) and restores rax, r10. 6246 } 6247 6248 void MacroAssembler::verify_tlab() { 6249 #ifdef ASSERT 6250 if (UseTLAB && VerifyOops) { 6251 Label next, ok; 6252 Register t1 = rsi; 6253 Register thread_reg = NOT_LP64(rbx) LP64_ONLY(r15_thread); 6254 6255 push(t1); 6256 NOT_LP64(push(thread_reg)); 6257 NOT_LP64(get_thread(thread_reg)); 6258 6259 movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset()))); 6260 cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_start_offset()))); 6261 jcc(Assembler::aboveEqual, next); 6262 STOP("assert(top >= start)"); 6263 should_not_reach_here(); 6264 6265 bind(next); 6266 movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_end_offset()))); 6267 cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset()))); 6268 jcc(Assembler::aboveEqual, ok); 6269 STOP("assert(top <= end)"); 6270 should_not_reach_here(); 6271 6272 bind(ok); 6273 NOT_LP64(pop(thread_reg)); 6274 pop(t1); 6275 } 6276 #endif 6277 } 6278 6279 class ControlWord { 6280 public: 6281 int32_t _value; 6282 6283 int rounding_control() const { return (_value >> 10) & 3 ; } 6284 int precision_control() const { return (_value >> 8) & 3 ; } 6285 bool precision() const { return ((_value >> 5) & 1) != 0; } 6286 bool underflow() const { return ((_value >> 4) & 1) != 0; } 6287 bool overflow() const { return ((_value >> 3) & 1) != 0; } 6288 bool zero_divide() const { return ((_value >> 2) & 1) != 0; } 6289 bool denormalized() const { return ((_value >> 1) & 1) != 0; } 6290 bool invalid() const { return ((_value >> 0) & 1) != 0; } 6291 6292 void print() const { 6293 // rounding control 6294 const char* rc; 6295 switch (rounding_control()) { 6296 case 0: rc = "round near"; break; 6297 case 1: rc = "round down"; break; 6298 case 2: rc = "round up "; break; 6299 case 3: rc = "chop "; break; 6300 }; 6301 // precision control 6302 const char* pc; 6303 switch (precision_control()) { 6304 case 0: pc = "24 bits "; break; 6305 case 1: pc = "reserved"; break; 6306 case 2: pc = "53 bits "; break; 6307 case 3: pc = "64 bits "; break; 6308 }; 6309 // flags 6310 char f[9]; 6311 f[0] = ' '; 6312 f[1] = ' '; 6313 f[2] = (precision ()) ? 'P' : 'p'; 6314 f[3] = (underflow ()) ? 'U' : 'u'; 6315 f[4] = (overflow ()) ? 'O' : 'o'; 6316 f[5] = (zero_divide ()) ? 'Z' : 'z'; 6317 f[6] = (denormalized()) ? 'D' : 'd'; 6318 f[7] = (invalid ()) ? 'I' : 'i'; 6319 f[8] = '\x0'; 6320 // output 6321 printf("%04x masks = %s, %s, %s", _value & 0xFFFF, f, rc, pc); 6322 } 6323 6324 }; 6325 6326 class StatusWord { 6327 public: 6328 int32_t _value; 6329 6330 bool busy() const { return ((_value >> 15) & 1) != 0; } 6331 bool C3() const { return ((_value >> 14) & 1) != 0; } 6332 bool C2() const { return ((_value >> 10) & 1) != 0; } 6333 bool C1() const { return ((_value >> 9) & 1) != 0; } 6334 bool C0() const { return ((_value >> 8) & 1) != 0; } 6335 int top() const { return (_value >> 11) & 7 ; } 6336 bool error_status() const { return ((_value >> 7) & 1) != 0; } 6337 bool stack_fault() const { return ((_value >> 6) & 1) != 0; } 6338 bool precision() const { return ((_value >> 5) & 1) != 0; } 6339 bool underflow() const { return ((_value >> 4) & 1) != 0; } 6340 bool overflow() const { return ((_value >> 3) & 1) != 0; } 6341 bool zero_divide() const { return ((_value >> 2) & 1) != 0; } 6342 bool denormalized() const { return ((_value >> 1) & 1) != 0; } 6343 bool invalid() const { return ((_value >> 0) & 1) != 0; } 6344 6345 void print() const { 6346 // condition codes 6347 char c[5]; 6348 c[0] = (C3()) ? '3' : '-'; 6349 c[1] = (C2()) ? '2' : '-'; 6350 c[2] = (C1()) ? '1' : '-'; 6351 c[3] = (C0()) ? '0' : '-'; 6352 c[4] = '\x0'; 6353 // flags 6354 char f[9]; 6355 f[0] = (error_status()) ? 'E' : '-'; 6356 f[1] = (stack_fault ()) ? 'S' : '-'; 6357 f[2] = (precision ()) ? 'P' : '-'; 6358 f[3] = (underflow ()) ? 'U' : '-'; 6359 f[4] = (overflow ()) ? 'O' : '-'; 6360 f[5] = (zero_divide ()) ? 'Z' : '-'; 6361 f[6] = (denormalized()) ? 'D' : '-'; 6362 f[7] = (invalid ()) ? 'I' : '-'; 6363 f[8] = '\x0'; 6364 // output 6365 printf("%04x flags = %s, cc = %s, top = %d", _value & 0xFFFF, f, c, top()); 6366 } 6367 6368 }; 6369 6370 class TagWord { 6371 public: 6372 int32_t _value; 6373 6374 int tag_at(int i) const { return (_value >> (i*2)) & 3; } 6375 6376 void print() const { 6377 printf("%04x", _value & 0xFFFF); 6378 } 6379 6380 }; 6381 6382 class FPU_Register { 6383 public: 6384 int32_t _m0; 6385 int32_t _m1; 6386 int16_t _ex; 6387 6388 bool is_indefinite() const { 6389 return _ex == -1 && _m1 == (int32_t)0xC0000000 && _m0 == 0; 6390 } 6391 6392 void print() const { 6393 char sign = (_ex < 0) ? '-' : '+'; 6394 const char* kind = (_ex == 0x7FFF || _ex == (int16_t)-1) ? "NaN" : " "; 6395 printf("%c%04hx.%08x%08x %s", sign, _ex, _m1, _m0, kind); 6396 }; 6397 6398 }; 6399 6400 class FPU_State { 6401 public: 6402 enum { 6403 register_size = 10, 6404 number_of_registers = 8, 6405 register_mask = 7 6406 }; 6407 6408 ControlWord _control_word; 6409 StatusWord _status_word; 6410 TagWord _tag_word; 6411 int32_t _error_offset; 6412 int32_t _error_selector; 6413 int32_t _data_offset; 6414 int32_t _data_selector; 6415 int8_t _register[register_size * number_of_registers]; 6416 6417 int tag_for_st(int i) const { return _tag_word.tag_at((_status_word.top() + i) & register_mask); } 6418 FPU_Register* st(int i) const { return (FPU_Register*)&_register[register_size * i]; } 6419 6420 const char* tag_as_string(int tag) const { 6421 switch (tag) { 6422 case 0: return "valid"; 6423 case 1: return "zero"; 6424 case 2: return "special"; 6425 case 3: return "empty"; 6426 } 6427 ShouldNotReachHere(); 6428 return NULL; 6429 } 6430 6431 void print() const { 6432 // print computation registers 6433 { int t = _status_word.top(); 6434 for (int i = 0; i < number_of_registers; i++) { 6435 int j = (i - t) & register_mask; 6436 printf("%c r%d = ST%d = ", (j == 0 ? '*' : ' '), i, j); 6437 st(j)->print(); 6438 printf(" %s\n", tag_as_string(_tag_word.tag_at(i))); 6439 } 6440 } 6441 printf("\n"); 6442 // print control registers 6443 printf("ctrl = "); _control_word.print(); printf("\n"); 6444 printf("stat = "); _status_word .print(); printf("\n"); 6445 printf("tags = "); _tag_word .print(); printf("\n"); 6446 } 6447 6448 }; 6449 6450 class Flag_Register { 6451 public: 6452 int32_t _value; 6453 6454 bool overflow() const { return ((_value >> 11) & 1) != 0; } 6455 bool direction() const { return ((_value >> 10) & 1) != 0; } 6456 bool sign() const { return ((_value >> 7) & 1) != 0; } 6457 bool zero() const { return ((_value >> 6) & 1) != 0; } 6458 bool auxiliary_carry() const { return ((_value >> 4) & 1) != 0; } 6459 bool parity() const { return ((_value >> 2) & 1) != 0; } 6460 bool carry() const { return ((_value >> 0) & 1) != 0; } 6461 6462 void print() const { 6463 // flags 6464 char f[8]; 6465 f[0] = (overflow ()) ? 'O' : '-'; 6466 f[1] = (direction ()) ? 'D' : '-'; 6467 f[2] = (sign ()) ? 'S' : '-'; 6468 f[3] = (zero ()) ? 'Z' : '-'; 6469 f[4] = (auxiliary_carry()) ? 'A' : '-'; 6470 f[5] = (parity ()) ? 'P' : '-'; 6471 f[6] = (carry ()) ? 'C' : '-'; 6472 f[7] = '\x0'; 6473 // output 6474 printf("%08x flags = %s", _value, f); 6475 } 6476 6477 }; 6478 6479 class IU_Register { 6480 public: 6481 int32_t _value; 6482 6483 void print() const { 6484 printf("%08x %11d", _value, _value); 6485 } 6486 6487 }; 6488 6489 class IU_State { 6490 public: 6491 Flag_Register _eflags; 6492 IU_Register _rdi; 6493 IU_Register _rsi; 6494 IU_Register _rbp; 6495 IU_Register _rsp; 6496 IU_Register _rbx; 6497 IU_Register _rdx; 6498 IU_Register _rcx; 6499 IU_Register _rax; 6500 6501 void print() const { 6502 // computation registers 6503 printf("rax, = "); _rax.print(); printf("\n"); 6504 printf("rbx, = "); _rbx.print(); printf("\n"); 6505 printf("rcx = "); _rcx.print(); printf("\n"); 6506 printf("rdx = "); _rdx.print(); printf("\n"); 6507 printf("rdi = "); _rdi.print(); printf("\n"); 6508 printf("rsi = "); _rsi.print(); printf("\n"); 6509 printf("rbp, = "); _rbp.print(); printf("\n"); 6510 printf("rsp = "); _rsp.print(); printf("\n"); 6511 printf("\n"); 6512 // control registers 6513 printf("flgs = "); _eflags.print(); printf("\n"); 6514 } 6515 }; 6516 6517 6518 class CPU_State { 6519 public: 6520 FPU_State _fpu_state; 6521 IU_State _iu_state; 6522 6523 void print() const { 6524 printf("--------------------------------------------------\n"); 6525 _iu_state .print(); 6526 printf("\n"); 6527 _fpu_state.print(); 6528 printf("--------------------------------------------------\n"); 6529 } 6530 6531 }; 6532 6533 6534 static void _print_CPU_state(CPU_State* state) { 6535 state->print(); 6536 }; 6537 6538 6539 void MacroAssembler::print_CPU_state() { 6540 push_CPU_state(); 6541 push(rsp); // pass CPU state 6542 call(RuntimeAddress(CAST_FROM_FN_PTR(address, _print_CPU_state))); 6543 addptr(rsp, wordSize); // discard argument 6544 pop_CPU_state(); 6545 } 6546 6547 6548 static bool _verify_FPU(int stack_depth, char* s, CPU_State* state) { 6549 static int counter = 0; 6550 FPU_State* fs = &state->_fpu_state; 6551 counter++; 6552 // For leaf calls, only verify that the top few elements remain empty. 6553 // We only need 1 empty at the top for C2 code. 6554 if( stack_depth < 0 ) { 6555 if( fs->tag_for_st(7) != 3 ) { 6556 printf("FPR7 not empty\n"); 6557 state->print(); 6558 assert(false, "error"); 6559 return false; 6560 } 6561 return true; // All other stack states do not matter 6562 } 6563 6564 assert((fs->_control_word._value & 0xffff) == StubRoutines::_fpu_cntrl_wrd_std, 6565 "bad FPU control word"); 6566 6567 // compute stack depth 6568 int i = 0; 6569 while (i < FPU_State::number_of_registers && fs->tag_for_st(i) < 3) i++; 6570 int d = i; 6571 while (i < FPU_State::number_of_registers && fs->tag_for_st(i) == 3) i++; 6572 // verify findings 6573 if (i != FPU_State::number_of_registers) { 6574 // stack not contiguous 6575 printf("%s: stack not contiguous at ST%d\n", s, i); 6576 state->print(); 6577 assert(false, "error"); 6578 return false; 6579 } 6580 // check if computed stack depth corresponds to expected stack depth 6581 if (stack_depth < 0) { 6582 // expected stack depth is -stack_depth or less 6583 if (d > -stack_depth) { 6584 // too many elements on the stack 6585 printf("%s: <= %d stack elements expected but found %d\n", s, -stack_depth, d); 6586 state->print(); 6587 assert(false, "error"); 6588 return false; 6589 } 6590 } else { 6591 // expected stack depth is stack_depth 6592 if (d != stack_depth) { 6593 // wrong stack depth 6594 printf("%s: %d stack elements expected but found %d\n", s, stack_depth, d); 6595 state->print(); 6596 assert(false, "error"); 6597 return false; 6598 } 6599 } 6600 // everything is cool 6601 return true; 6602 } 6603 6604 6605 void MacroAssembler::verify_FPU(int stack_depth, const char* s) { 6606 if (!VerifyFPU) return; 6607 push_CPU_state(); 6608 push(rsp); // pass CPU state 6609 ExternalAddress msg((address) s); 6610 // pass message string s 6611 pushptr(msg.addr()); 6612 push(stack_depth); // pass stack depth 6613 call(RuntimeAddress(CAST_FROM_FN_PTR(address, _verify_FPU))); 6614 addptr(rsp, 3 * wordSize); // discard arguments 6615 // check for error 6616 { Label L; 6617 testl(rax, rax); 6618 jcc(Assembler::notZero, L); 6619 int3(); // break if error condition 6620 bind(L); 6621 } 6622 pop_CPU_state(); 6623 } 6624 6625 void MacroAssembler::restore_cpu_control_state_after_jni() { 6626 // Either restore the MXCSR register after returning from the JNI Call 6627 // or verify that it wasn't changed (with -Xcheck:jni flag). 6628 if (VM_Version::supports_sse()) { 6629 if (RestoreMXCSROnJNICalls) { 6630 ldmxcsr(ExternalAddress(StubRoutines::addr_mxcsr_std())); 6631 } else if (CheckJNICalls) { 6632 call(RuntimeAddress(StubRoutines::x86::verify_mxcsr_entry())); 6633 } 6634 } 6635 // Clear upper bits of YMM registers to avoid SSE <-> AVX transition penalty. 6636 vzeroupper(); 6637 // Reset k1 to 0xffff. 6638 if (VM_Version::supports_evex()) { 6639 push(rcx); 6640 movl(rcx, 0xffff); 6641 kmovwl(k1, rcx); 6642 pop(rcx); 6643 } 6644 6645 #ifndef _LP64 6646 // Either restore the x87 floating pointer control word after returning 6647 // from the JNI call or verify that it wasn't changed. 6648 if (CheckJNICalls) { 6649 call(RuntimeAddress(StubRoutines::x86::verify_fpu_cntrl_wrd_entry())); 6650 } 6651 #endif // _LP64 6652 } 6653 6654 // ((OopHandle)result).resolve(); 6655 void MacroAssembler::resolve_oop_handle(Register result) { 6656 // OopHandle::resolve is an indirection. 6657 movptr(result, Address(result, 0)); 6658 } 6659 6660 void MacroAssembler::load_mirror(Register mirror, Register method) { 6661 // get mirror 6662 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 6663 movptr(mirror, Address(method, Method::const_offset())); 6664 movptr(mirror, Address(mirror, ConstMethod::constants_offset())); 6665 movptr(mirror, Address(mirror, ConstantPool::pool_holder_offset_in_bytes())); 6666 movptr(mirror, Address(mirror, mirror_offset)); 6667 resolve_oop_handle(mirror); 6668 } 6669 6670 void MacroAssembler::load_klass(Register dst, Register src) { 6671 #ifdef _LP64 6672 if (UseCompressedClassPointers) { 6673 movl(dst, Address(src, oopDesc::klass_offset_in_bytes())); 6674 decode_klass_not_null(dst); 6675 } else 6676 #endif 6677 movptr(dst, Address(src, oopDesc::klass_offset_in_bytes())); 6678 } 6679 6680 void MacroAssembler::load_prototype_header(Register dst, Register src) { 6681 load_klass(dst, src); 6682 movptr(dst, Address(dst, Klass::prototype_header_offset())); 6683 } 6684 6685 void MacroAssembler::store_klass(Register dst, Register src) { 6686 #ifdef _LP64 6687 if (UseCompressedClassPointers) { 6688 encode_klass_not_null(src); 6689 movl(Address(dst, oopDesc::klass_offset_in_bytes()), src); 6690 } else 6691 #endif 6692 movptr(Address(dst, oopDesc::klass_offset_in_bytes()), src); 6693 } 6694 6695 void MacroAssembler::load_heap_oop(Register dst, Address src) { 6696 #ifdef _LP64 6697 // FIXME: Must change all places where we try to load the klass. 6698 if (UseCompressedOops) { 6699 movl(dst, src); 6700 decode_heap_oop(dst); 6701 } else 6702 #endif 6703 movptr(dst, src); 6704 } 6705 6706 // Doesn't do verfication, generates fixed size code 6707 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src) { 6708 #ifdef _LP64 6709 if (UseCompressedOops) { 6710 movl(dst, src); 6711 decode_heap_oop_not_null(dst); 6712 } else 6713 #endif 6714 movptr(dst, src); 6715 } 6716 6717 void MacroAssembler::store_heap_oop(Address dst, Register src) { 6718 #ifdef _LP64 6719 if (UseCompressedOops) { 6720 assert(!dst.uses(src), "not enough registers"); 6721 encode_heap_oop(src); 6722 movl(dst, src); 6723 } else 6724 #endif 6725 movptr(dst, src); 6726 } 6727 6728 void MacroAssembler::cmp_heap_oop(Register src1, Address src2, Register tmp) { 6729 assert_different_registers(src1, tmp); 6730 #ifdef _LP64 6731 if (UseCompressedOops) { 6732 bool did_push = false; 6733 if (tmp == noreg) { 6734 tmp = rax; 6735 push(tmp); 6736 did_push = true; 6737 assert(!src2.uses(rsp), "can't push"); 6738 } 6739 load_heap_oop(tmp, src2); 6740 cmpptr(src1, tmp); 6741 if (did_push) pop(tmp); 6742 } else 6743 #endif 6744 cmpptr(src1, src2); 6745 } 6746 6747 // Used for storing NULLs. 6748 void MacroAssembler::store_heap_oop_null(Address dst) { 6749 #ifdef _LP64 6750 if (UseCompressedOops) { 6751 movl(dst, (int32_t)NULL_WORD); 6752 } else { 6753 movslq(dst, (int32_t)NULL_WORD); 6754 } 6755 #else 6756 movl(dst, (int32_t)NULL_WORD); 6757 #endif 6758 } 6759 6760 #ifdef _LP64 6761 void MacroAssembler::store_klass_gap(Register dst, Register src) { 6762 if (UseCompressedClassPointers) { 6763 // Store to klass gap in destination 6764 movl(Address(dst, oopDesc::klass_gap_offset_in_bytes()), src); 6765 } 6766 } 6767 6768 #ifdef ASSERT 6769 void MacroAssembler::verify_heapbase(const char* msg) { 6770 assert (UseCompressedOops, "should be compressed"); 6771 assert (Universe::heap() != NULL, "java heap should be initialized"); 6772 if (CheckCompressedOops) { 6773 Label ok; 6774 push(rscratch1); // cmpptr trashes rscratch1 6775 cmpptr(r12_heapbase, ExternalAddress((address)Universe::narrow_ptrs_base_addr())); 6776 jcc(Assembler::equal, ok); 6777 STOP(msg); 6778 bind(ok); 6779 pop(rscratch1); 6780 } 6781 } 6782 #endif 6783 6784 // Algorithm must match oop.inline.hpp encode_heap_oop. 6785 void MacroAssembler::encode_heap_oop(Register r) { 6786 #ifdef ASSERT 6787 verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?"); 6788 #endif 6789 verify_oop(r, "broken oop in encode_heap_oop"); 6790 if (Universe::narrow_oop_base() == NULL) { 6791 if (Universe::narrow_oop_shift() != 0) { 6792 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 6793 shrq(r, LogMinObjAlignmentInBytes); 6794 } 6795 return; 6796 } 6797 testq(r, r); 6798 cmovq(Assembler::equal, r, r12_heapbase); 6799 subq(r, r12_heapbase); 6800 shrq(r, LogMinObjAlignmentInBytes); 6801 } 6802 6803 void MacroAssembler::encode_heap_oop_not_null(Register r) { 6804 #ifdef ASSERT 6805 verify_heapbase("MacroAssembler::encode_heap_oop_not_null: heap base corrupted?"); 6806 if (CheckCompressedOops) { 6807 Label ok; 6808 testq(r, r); 6809 jcc(Assembler::notEqual, ok); 6810 STOP("null oop passed to encode_heap_oop_not_null"); 6811 bind(ok); 6812 } 6813 #endif 6814 verify_oop(r, "broken oop in encode_heap_oop_not_null"); 6815 if (Universe::narrow_oop_base() != NULL) { 6816 subq(r, r12_heapbase); 6817 } 6818 if (Universe::narrow_oop_shift() != 0) { 6819 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 6820 shrq(r, LogMinObjAlignmentInBytes); 6821 } 6822 } 6823 6824 void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) { 6825 #ifdef ASSERT 6826 verify_heapbase("MacroAssembler::encode_heap_oop_not_null2: heap base corrupted?"); 6827 if (CheckCompressedOops) { 6828 Label ok; 6829 testq(src, src); 6830 jcc(Assembler::notEqual, ok); 6831 STOP("null oop passed to encode_heap_oop_not_null2"); 6832 bind(ok); 6833 } 6834 #endif 6835 verify_oop(src, "broken oop in encode_heap_oop_not_null2"); 6836 if (dst != src) { 6837 movq(dst, src); 6838 } 6839 if (Universe::narrow_oop_base() != NULL) { 6840 subq(dst, r12_heapbase); 6841 } 6842 if (Universe::narrow_oop_shift() != 0) { 6843 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 6844 shrq(dst, LogMinObjAlignmentInBytes); 6845 } 6846 } 6847 6848 void MacroAssembler::decode_heap_oop(Register r) { 6849 #ifdef ASSERT 6850 verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?"); 6851 #endif 6852 if (Universe::narrow_oop_base() == NULL) { 6853 if (Universe::narrow_oop_shift() != 0) { 6854 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 6855 shlq(r, LogMinObjAlignmentInBytes); 6856 } 6857 } else { 6858 Label done; 6859 shlq(r, LogMinObjAlignmentInBytes); 6860 jccb(Assembler::equal, done); 6861 addq(r, r12_heapbase); 6862 bind(done); 6863 } 6864 verify_oop(r, "broken oop in decode_heap_oop"); 6865 } 6866 6867 void MacroAssembler::decode_heap_oop_not_null(Register r) { 6868 // Note: it will change flags 6869 assert (UseCompressedOops, "should only be used for compressed headers"); 6870 assert (Universe::heap() != NULL, "java heap should be initialized"); 6871 // Cannot assert, unverified entry point counts instructions (see .ad file) 6872 // vtableStubs also counts instructions in pd_code_size_limit. 6873 // Also do not verify_oop as this is called by verify_oop. 6874 if (Universe::narrow_oop_shift() != 0) { 6875 assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 6876 shlq(r, LogMinObjAlignmentInBytes); 6877 if (Universe::narrow_oop_base() != NULL) { 6878 addq(r, r12_heapbase); 6879 } 6880 } else { 6881 assert (Universe::narrow_oop_base() == NULL, "sanity"); 6882 } 6883 } 6884 6885 void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) { 6886 // Note: it will change flags 6887 assert (UseCompressedOops, "should only be used for compressed headers"); 6888 assert (Universe::heap() != NULL, "java heap should be initialized"); 6889 // Cannot assert, unverified entry point counts instructions (see .ad file) 6890 // vtableStubs also counts instructions in pd_code_size_limit. 6891 // Also do not verify_oop as this is called by verify_oop. 6892 if (Universe::narrow_oop_shift() != 0) { 6893 assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 6894 if (LogMinObjAlignmentInBytes == Address::times_8) { 6895 leaq(dst, Address(r12_heapbase, src, Address::times_8, 0)); 6896 } else { 6897 if (dst != src) { 6898 movq(dst, src); 6899 } 6900 shlq(dst, LogMinObjAlignmentInBytes); 6901 if (Universe::narrow_oop_base() != NULL) { 6902 addq(dst, r12_heapbase); 6903 } 6904 } 6905 } else { 6906 assert (Universe::narrow_oop_base() == NULL, "sanity"); 6907 if (dst != src) { 6908 movq(dst, src); 6909 } 6910 } 6911 } 6912 6913 void MacroAssembler::encode_klass_not_null(Register r) { 6914 if (Universe::narrow_klass_base() != NULL) { 6915 // Use r12 as a scratch register in which to temporarily load the narrow_klass_base. 6916 assert(r != r12_heapbase, "Encoding a klass in r12"); 6917 mov64(r12_heapbase, (int64_t)Universe::narrow_klass_base()); 6918 subq(r, r12_heapbase); 6919 } 6920 if (Universe::narrow_klass_shift() != 0) { 6921 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); 6922 shrq(r, LogKlassAlignmentInBytes); 6923 } 6924 if (Universe::narrow_klass_base() != NULL) { 6925 reinit_heapbase(); 6926 } 6927 } 6928 6929 void MacroAssembler::encode_klass_not_null(Register dst, Register src) { 6930 if (dst == src) { 6931 encode_klass_not_null(src); 6932 } else { 6933 if (Universe::narrow_klass_base() != NULL) { 6934 mov64(dst, (int64_t)Universe::narrow_klass_base()); 6935 negq(dst); 6936 addq(dst, src); 6937 } else { 6938 movptr(dst, src); 6939 } 6940 if (Universe::narrow_klass_shift() != 0) { 6941 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); 6942 shrq(dst, LogKlassAlignmentInBytes); 6943 } 6944 } 6945 } 6946 6947 // Function instr_size_for_decode_klass_not_null() counts the instructions 6948 // generated by decode_klass_not_null(register r) and reinit_heapbase(), 6949 // when (Universe::heap() != NULL). Hence, if the instructions they 6950 // generate change, then this method needs to be updated. 6951 int MacroAssembler::instr_size_for_decode_klass_not_null() { 6952 assert (UseCompressedClassPointers, "only for compressed klass ptrs"); 6953 if (Universe::narrow_klass_base() != NULL) { 6954 // mov64 + addq + shlq? + mov64 (for reinit_heapbase()). 6955 return (Universe::narrow_klass_shift() == 0 ? 20 : 24); 6956 } else { 6957 // longest load decode klass function, mov64, leaq 6958 return 16; 6959 } 6960 } 6961 6962 // !!! If the instructions that get generated here change then function 6963 // instr_size_for_decode_klass_not_null() needs to get updated. 6964 void MacroAssembler::decode_klass_not_null(Register r) { 6965 // Note: it will change flags 6966 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 6967 assert(r != r12_heapbase, "Decoding a klass in r12"); 6968 // Cannot assert, unverified entry point counts instructions (see .ad file) 6969 // vtableStubs also counts instructions in pd_code_size_limit. 6970 // Also do not verify_oop as this is called by verify_oop. 6971 if (Universe::narrow_klass_shift() != 0) { 6972 assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); 6973 shlq(r, LogKlassAlignmentInBytes); 6974 } 6975 // Use r12 as a scratch register in which to temporarily load the narrow_klass_base. 6976 if (Universe::narrow_klass_base() != NULL) { 6977 mov64(r12_heapbase, (int64_t)Universe::narrow_klass_base()); 6978 addq(r, r12_heapbase); 6979 reinit_heapbase(); 6980 } 6981 } 6982 6983 void MacroAssembler::decode_klass_not_null(Register dst, Register src) { 6984 // Note: it will change flags 6985 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 6986 if (dst == src) { 6987 decode_klass_not_null(dst); 6988 } else { 6989 // Cannot assert, unverified entry point counts instructions (see .ad file) 6990 // vtableStubs also counts instructions in pd_code_size_limit. 6991 // Also do not verify_oop as this is called by verify_oop. 6992 mov64(dst, (int64_t)Universe::narrow_klass_base()); 6993 if (Universe::narrow_klass_shift() != 0) { 6994 assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); 6995 assert(LogKlassAlignmentInBytes == Address::times_8, "klass not aligned on 64bits?"); 6996 leaq(dst, Address(dst, src, Address::times_8, 0)); 6997 } else { 6998 addq(dst, src); 6999 } 7000 } 7001 } 7002 7003 void MacroAssembler::set_narrow_oop(Register dst, jobject obj) { 7004 assert (UseCompressedOops, "should only be used for compressed headers"); 7005 assert (Universe::heap() != NULL, "java heap should be initialized"); 7006 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder"); 7007 int oop_index = oop_recorder()->find_index(obj); 7008 RelocationHolder rspec = oop_Relocation::spec(oop_index); 7009 mov_narrow_oop(dst, oop_index, rspec); 7010 } 7011 7012 void MacroAssembler::set_narrow_oop(Address dst, jobject obj) { 7013 assert (UseCompressedOops, "should only be used for compressed headers"); 7014 assert (Universe::heap() != NULL, "java heap should be initialized"); 7015 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder"); 7016 int oop_index = oop_recorder()->find_index(obj); 7017 RelocationHolder rspec = oop_Relocation::spec(oop_index); 7018 mov_narrow_oop(dst, oop_index, rspec); 7019 } 7020 7021 void MacroAssembler::set_narrow_klass(Register dst, Klass* k) { 7022 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 7023 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder"); 7024 int klass_index = oop_recorder()->find_index(k); 7025 RelocationHolder rspec = metadata_Relocation::spec(klass_index); 7026 mov_narrow_oop(dst, Klass::encode_klass(k), rspec); 7027 } 7028 7029 void MacroAssembler::set_narrow_klass(Address dst, Klass* k) { 7030 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 7031 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder"); 7032 int klass_index = oop_recorder()->find_index(k); 7033 RelocationHolder rspec = metadata_Relocation::spec(klass_index); 7034 mov_narrow_oop(dst, Klass::encode_klass(k), rspec); 7035 } 7036 7037 void MacroAssembler::cmp_narrow_oop(Register dst, jobject obj) { 7038 assert (UseCompressedOops, "should only be used for compressed headers"); 7039 assert (Universe::heap() != NULL, "java heap should be initialized"); 7040 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder"); 7041 int oop_index = oop_recorder()->find_index(obj); 7042 RelocationHolder rspec = oop_Relocation::spec(oop_index); 7043 Assembler::cmp_narrow_oop(dst, oop_index, rspec); 7044 } 7045 7046 void MacroAssembler::cmp_narrow_oop(Address dst, jobject obj) { 7047 assert (UseCompressedOops, "should only be used for compressed headers"); 7048 assert (Universe::heap() != NULL, "java heap should be initialized"); 7049 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder"); 7050 int oop_index = oop_recorder()->find_index(obj); 7051 RelocationHolder rspec = oop_Relocation::spec(oop_index); 7052 Assembler::cmp_narrow_oop(dst, oop_index, rspec); 7053 } 7054 7055 void MacroAssembler::cmp_narrow_klass(Register dst, Klass* k) { 7056 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 7057 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder"); 7058 int klass_index = oop_recorder()->find_index(k); 7059 RelocationHolder rspec = metadata_Relocation::spec(klass_index); 7060 Assembler::cmp_narrow_oop(dst, Klass::encode_klass(k), rspec); 7061 } 7062 7063 void MacroAssembler::cmp_narrow_klass(Address dst, Klass* k) { 7064 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 7065 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder"); 7066 int klass_index = oop_recorder()->find_index(k); 7067 RelocationHolder rspec = metadata_Relocation::spec(klass_index); 7068 Assembler::cmp_narrow_oop(dst, Klass::encode_klass(k), rspec); 7069 } 7070 7071 void MacroAssembler::reinit_heapbase() { 7072 if (UseCompressedOops || UseCompressedClassPointers) { 7073 if (Universe::heap() != NULL) { 7074 if (Universe::narrow_oop_base() == NULL) { 7075 MacroAssembler::xorptr(r12_heapbase, r12_heapbase); 7076 } else { 7077 mov64(r12_heapbase, (int64_t)Universe::narrow_ptrs_base()); 7078 } 7079 } else { 7080 movptr(r12_heapbase, ExternalAddress((address)Universe::narrow_ptrs_base_addr())); 7081 } 7082 } 7083 } 7084 7085 #endif // _LP64 7086 7087 // C2 compiled method's prolog code. 7088 void MacroAssembler::verified_entry(int framesize, int stack_bang_size, bool fp_mode_24b) { 7089 7090 // WARNING: Initial instruction MUST be 5 bytes or longer so that 7091 // NativeJump::patch_verified_entry will be able to patch out the entry 7092 // code safely. The push to verify stack depth is ok at 5 bytes, 7093 // the frame allocation can be either 3 or 6 bytes. So if we don't do 7094 // stack bang then we must use the 6 byte frame allocation even if 7095 // we have no frame. :-( 7096 assert(stack_bang_size >= framesize || stack_bang_size <= 0, "stack bang size incorrect"); 7097 7098 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned"); 7099 // Remove word for return addr 7100 framesize -= wordSize; 7101 stack_bang_size -= wordSize; 7102 7103 // Calls to C2R adapters often do not accept exceptional returns. 7104 // We require that their callers must bang for them. But be careful, because 7105 // some VM calls (such as call site linkage) can use several kilobytes of 7106 // stack. But the stack safety zone should account for that. 7107 // See bugs 4446381, 4468289, 4497237. 7108 if (stack_bang_size > 0) { 7109 generate_stack_overflow_check(stack_bang_size); 7110 7111 // We always push rbp, so that on return to interpreter rbp, will be 7112 // restored correctly and we can correct the stack. 7113 push(rbp); 7114 // Save caller's stack pointer into RBP if the frame pointer is preserved. 7115 if (PreserveFramePointer) { 7116 mov(rbp, rsp); 7117 } 7118 // Remove word for ebp 7119 framesize -= wordSize; 7120 7121 // Create frame 7122 if (framesize) { 7123 subptr(rsp, framesize); 7124 } 7125 } else { 7126 // Create frame (force generation of a 4 byte immediate value) 7127 subptr_imm32(rsp, framesize); 7128 7129 // Save RBP register now. 7130 framesize -= wordSize; 7131 movptr(Address(rsp, framesize), rbp); 7132 // Save caller's stack pointer into RBP if the frame pointer is preserved. 7133 if (PreserveFramePointer) { 7134 movptr(rbp, rsp); 7135 if (framesize > 0) { 7136 addptr(rbp, framesize); 7137 } 7138 } 7139 } 7140 7141 if (VerifyStackAtCalls) { // Majik cookie to verify stack depth 7142 framesize -= wordSize; 7143 movptr(Address(rsp, framesize), (int32_t)0xbadb100d); 7144 } 7145 7146 #ifndef _LP64 7147 // If method sets FPU control word do it now 7148 if (fp_mode_24b) { 7149 fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_24())); 7150 } 7151 if (UseSSE >= 2 && VerifyFPU) { 7152 verify_FPU(0, "FPU stack must be clean on entry"); 7153 } 7154 #endif 7155 7156 #ifdef ASSERT 7157 if (VerifyStackAtCalls) { 7158 Label L; 7159 push(rax); 7160 mov(rax, rsp); 7161 andptr(rax, StackAlignmentInBytes-1); 7162 cmpptr(rax, StackAlignmentInBytes-wordSize); 7163 pop(rax); 7164 jcc(Assembler::equal, L); 7165 STOP("Stack is not properly aligned!"); 7166 bind(L); 7167 } 7168 #endif 7169 7170 } 7171 7172 void MacroAssembler::clear_mem(Register base, Register cnt, Register tmp, bool is_large) { 7173 // cnt - number of qwords (8-byte words). 7174 // base - start address, qword aligned. 7175 // is_large - if optimizers know cnt is larger than InitArrayShortSize 7176 assert(base==rdi, "base register must be edi for rep stos"); 7177 assert(tmp==rax, "tmp register must be eax for rep stos"); 7178 assert(cnt==rcx, "cnt register must be ecx for rep stos"); 7179 assert(InitArrayShortSize % BytesPerLong == 0, 7180 "InitArrayShortSize should be the multiple of BytesPerLong"); 7181 7182 Label DONE; 7183 7184 xorptr(tmp, tmp); 7185 7186 if (!is_large) { 7187 Label LOOP, LONG; 7188 cmpptr(cnt, InitArrayShortSize/BytesPerLong); 7189 jccb(Assembler::greater, LONG); 7190 7191 NOT_LP64(shlptr(cnt, 1);) // convert to number of 32-bit words for 32-bit VM 7192 7193 decrement(cnt); 7194 jccb(Assembler::negative, DONE); // Zero length 7195 7196 // Use individual pointer-sized stores for small counts: 7197 BIND(LOOP); 7198 movptr(Address(base, cnt, Address::times_ptr), tmp); 7199 decrement(cnt); 7200 jccb(Assembler::greaterEqual, LOOP); 7201 jmpb(DONE); 7202 7203 BIND(LONG); 7204 } 7205 7206 // Use longer rep-prefixed ops for non-small counts: 7207 if (UseFastStosb) { 7208 shlptr(cnt, 3); // convert to number of bytes 7209 rep_stosb(); 7210 } else { 7211 NOT_LP64(shlptr(cnt, 1);) // convert to number of 32-bit words for 32-bit VM 7212 rep_stos(); 7213 } 7214 7215 BIND(DONE); 7216 } 7217 7218 #ifdef COMPILER2 7219 7220 // IndexOf for constant substrings with size >= 8 chars 7221 // which don't need to be loaded through stack. 7222 void MacroAssembler::string_indexofC8(Register str1, Register str2, 7223 Register cnt1, Register cnt2, 7224 int int_cnt2, Register result, 7225 XMMRegister vec, Register tmp, 7226 int ae) { 7227 ShortBranchVerifier sbv(this); 7228 assert(UseSSE42Intrinsics, "SSE4.2 intrinsics are required"); 7229 assert(ae != StrIntrinsicNode::LU, "Invalid encoding"); 7230 7231 // This method uses the pcmpestri instruction with bound registers 7232 // inputs: 7233 // xmm - substring 7234 // rax - substring length (elements count) 7235 // mem - scanned string 7236 // rdx - string length (elements count) 7237 // 0xd - mode: 1100 (substring search) + 01 (unsigned shorts) 7238 // 0xc - mode: 1100 (substring search) + 00 (unsigned bytes) 7239 // outputs: 7240 // rcx - matched index in string 7241 assert(cnt1 == rdx && cnt2 == rax && tmp == rcx, "pcmpestri"); 7242 int mode = (ae == StrIntrinsicNode::LL) ? 0x0c : 0x0d; // bytes or shorts 7243 int stride = (ae == StrIntrinsicNode::LL) ? 16 : 8; //UU, UL -> 8 7244 Address::ScaleFactor scale1 = (ae == StrIntrinsicNode::LL) ? Address::times_1 : Address::times_2; 7245 Address::ScaleFactor scale2 = (ae == StrIntrinsicNode::UL) ? Address::times_1 : scale1; 7246 7247 Label RELOAD_SUBSTR, SCAN_TO_SUBSTR, SCAN_SUBSTR, 7248 RET_FOUND, RET_NOT_FOUND, EXIT, FOUND_SUBSTR, 7249 MATCH_SUBSTR_HEAD, RELOAD_STR, FOUND_CANDIDATE; 7250 7251 // Note, inline_string_indexOf() generates checks: 7252 // if (substr.count > string.count) return -1; 7253 // if (substr.count == 0) return 0; 7254 assert(int_cnt2 >= stride, "this code is used only for cnt2 >= 8 chars"); 7255 7256 // Load substring. 7257 if (ae == StrIntrinsicNode::UL) { 7258 pmovzxbw(vec, Address(str2, 0)); 7259 } else { 7260 movdqu(vec, Address(str2, 0)); 7261 } 7262 movl(cnt2, int_cnt2); 7263 movptr(result, str1); // string addr 7264 7265 if (int_cnt2 > stride) { 7266 jmpb(SCAN_TO_SUBSTR); 7267 7268 // Reload substr for rescan, this code 7269 // is executed only for large substrings (> 8 chars) 7270 bind(RELOAD_SUBSTR); 7271 if (ae == StrIntrinsicNode::UL) { 7272 pmovzxbw(vec, Address(str2, 0)); 7273 } else { 7274 movdqu(vec, Address(str2, 0)); 7275 } 7276 negptr(cnt2); // Jumped here with negative cnt2, convert to positive 7277 7278 bind(RELOAD_STR); 7279 // We came here after the beginning of the substring was 7280 // matched but the rest of it was not so we need to search 7281 // again. Start from the next element after the previous match. 7282 7283 // cnt2 is number of substring reminding elements and 7284 // cnt1 is number of string reminding elements when cmp failed. 7285 // Restored cnt1 = cnt1 - cnt2 + int_cnt2 7286 subl(cnt1, cnt2); 7287 addl(cnt1, int_cnt2); 7288 movl(cnt2, int_cnt2); // Now restore cnt2 7289 7290 decrementl(cnt1); // Shift to next element 7291 cmpl(cnt1, cnt2); 7292 jcc(Assembler::negative, RET_NOT_FOUND); // Left less then substring 7293 7294 addptr(result, (1<<scale1)); 7295 7296 } // (int_cnt2 > 8) 7297 7298 // Scan string for start of substr in 16-byte vectors 7299 bind(SCAN_TO_SUBSTR); 7300 pcmpestri(vec, Address(result, 0), mode); 7301 jccb(Assembler::below, FOUND_CANDIDATE); // CF == 1 7302 subl(cnt1, stride); 7303 jccb(Assembler::lessEqual, RET_NOT_FOUND); // Scanned full string 7304 cmpl(cnt1, cnt2); 7305 jccb(Assembler::negative, RET_NOT_FOUND); // Left less then substring 7306 addptr(result, 16); 7307 jmpb(SCAN_TO_SUBSTR); 7308 7309 // Found a potential substr 7310 bind(FOUND_CANDIDATE); 7311 // Matched whole vector if first element matched (tmp(rcx) == 0). 7312 if (int_cnt2 == stride) { 7313 jccb(Assembler::overflow, RET_FOUND); // OF == 1 7314 } else { // int_cnt2 > 8 7315 jccb(Assembler::overflow, FOUND_SUBSTR); 7316 } 7317 // After pcmpestri tmp(rcx) contains matched element index 7318 // Compute start addr of substr 7319 lea(result, Address(result, tmp, scale1)); 7320 7321 // Make sure string is still long enough 7322 subl(cnt1, tmp); 7323 cmpl(cnt1, cnt2); 7324 if (int_cnt2 == stride) { 7325 jccb(Assembler::greaterEqual, SCAN_TO_SUBSTR); 7326 } else { // int_cnt2 > 8 7327 jccb(Assembler::greaterEqual, MATCH_SUBSTR_HEAD); 7328 } 7329 // Left less then substring. 7330 7331 bind(RET_NOT_FOUND); 7332 movl(result, -1); 7333 jmp(EXIT); 7334 7335 if (int_cnt2 > stride) { 7336 // This code is optimized for the case when whole substring 7337 // is matched if its head is matched. 7338 bind(MATCH_SUBSTR_HEAD); 7339 pcmpestri(vec, Address(result, 0), mode); 7340 // Reload only string if does not match 7341 jcc(Assembler::noOverflow, RELOAD_STR); // OF == 0 7342 7343 Label CONT_SCAN_SUBSTR; 7344 // Compare the rest of substring (> 8 chars). 7345 bind(FOUND_SUBSTR); 7346 // First 8 chars are already matched. 7347 negptr(cnt2); 7348 addptr(cnt2, stride); 7349 7350 bind(SCAN_SUBSTR); 7351 subl(cnt1, stride); 7352 cmpl(cnt2, -stride); // Do not read beyond substring 7353 jccb(Assembler::lessEqual, CONT_SCAN_SUBSTR); 7354 // Back-up strings to avoid reading beyond substring: 7355 // cnt1 = cnt1 - cnt2 + 8 7356 addl(cnt1, cnt2); // cnt2 is negative 7357 addl(cnt1, stride); 7358 movl(cnt2, stride); negptr(cnt2); 7359 bind(CONT_SCAN_SUBSTR); 7360 if (int_cnt2 < (int)G) { 7361 int tail_off1 = int_cnt2<<scale1; 7362 int tail_off2 = int_cnt2<<scale2; 7363 if (ae == StrIntrinsicNode::UL) { 7364 pmovzxbw(vec, Address(str2, cnt2, scale2, tail_off2)); 7365 } else { 7366 movdqu(vec, Address(str2, cnt2, scale2, tail_off2)); 7367 } 7368 pcmpestri(vec, Address(result, cnt2, scale1, tail_off1), mode); 7369 } else { 7370 // calculate index in register to avoid integer overflow (int_cnt2*2) 7371 movl(tmp, int_cnt2); 7372 addptr(tmp, cnt2); 7373 if (ae == StrIntrinsicNode::UL) { 7374 pmovzxbw(vec, Address(str2, tmp, scale2, 0)); 7375 } else { 7376 movdqu(vec, Address(str2, tmp, scale2, 0)); 7377 } 7378 pcmpestri(vec, Address(result, tmp, scale1, 0), mode); 7379 } 7380 // Need to reload strings pointers if not matched whole vector 7381 jcc(Assembler::noOverflow, RELOAD_SUBSTR); // OF == 0 7382 addptr(cnt2, stride); 7383 jcc(Assembler::negative, SCAN_SUBSTR); 7384 // Fall through if found full substring 7385 7386 } // (int_cnt2 > 8) 7387 7388 bind(RET_FOUND); 7389 // Found result if we matched full small substring. 7390 // Compute substr offset 7391 subptr(result, str1); 7392 if (ae == StrIntrinsicNode::UU || ae == StrIntrinsicNode::UL) { 7393 shrl(result, 1); // index 7394 } 7395 bind(EXIT); 7396 7397 } // string_indexofC8 7398 7399 // Small strings are loaded through stack if they cross page boundary. 7400 void MacroAssembler::string_indexof(Register str1, Register str2, 7401 Register cnt1, Register cnt2, 7402 int int_cnt2, Register result, 7403 XMMRegister vec, Register tmp, 7404 int ae) { 7405 ShortBranchVerifier sbv(this); 7406 assert(UseSSE42Intrinsics, "SSE4.2 intrinsics are required"); 7407 assert(ae != StrIntrinsicNode::LU, "Invalid encoding"); 7408 7409 // 7410 // int_cnt2 is length of small (< 8 chars) constant substring 7411 // or (-1) for non constant substring in which case its length 7412 // is in cnt2 register. 7413 // 7414 // Note, inline_string_indexOf() generates checks: 7415 // if (substr.count > string.count) return -1; 7416 // if (substr.count == 0) return 0; 7417 // 7418 int stride = (ae == StrIntrinsicNode::LL) ? 16 : 8; //UU, UL -> 8 7419 assert(int_cnt2 == -1 || (0 < int_cnt2 && int_cnt2 < stride), "should be != 0"); 7420 // This method uses the pcmpestri instruction with bound registers 7421 // inputs: 7422 // xmm - substring 7423 // rax - substring length (elements count) 7424 // mem - scanned string 7425 // rdx - string length (elements count) 7426 // 0xd - mode: 1100 (substring search) + 01 (unsigned shorts) 7427 // 0xc - mode: 1100 (substring search) + 00 (unsigned bytes) 7428 // outputs: 7429 // rcx - matched index in string 7430 assert(cnt1 == rdx && cnt2 == rax && tmp == rcx, "pcmpestri"); 7431 int mode = (ae == StrIntrinsicNode::LL) ? 0x0c : 0x0d; // bytes or shorts 7432 Address::ScaleFactor scale1 = (ae == StrIntrinsicNode::LL) ? Address::times_1 : Address::times_2; 7433 Address::ScaleFactor scale2 = (ae == StrIntrinsicNode::UL) ? Address::times_1 : scale1; 7434 7435 Label RELOAD_SUBSTR, SCAN_TO_SUBSTR, SCAN_SUBSTR, ADJUST_STR, 7436 RET_FOUND, RET_NOT_FOUND, CLEANUP, FOUND_SUBSTR, 7437 FOUND_CANDIDATE; 7438 7439 { //======================================================== 7440 // We don't know where these strings are located 7441 // and we can't read beyond them. Load them through stack. 7442 Label BIG_STRINGS, CHECK_STR, COPY_SUBSTR, COPY_STR; 7443 7444 movptr(tmp, rsp); // save old SP 7445 7446 if (int_cnt2 > 0) { // small (< 8 chars) constant substring 7447 if (int_cnt2 == (1>>scale2)) { // One byte 7448 assert((ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::UL), "Only possible for latin1 encoding"); 7449 load_unsigned_byte(result, Address(str2, 0)); 7450 movdl(vec, result); // move 32 bits 7451 } else if (ae == StrIntrinsicNode::LL && int_cnt2 == 3) { // Three bytes 7452 // Not enough header space in 32-bit VM: 12+3 = 15. 7453 movl(result, Address(str2, -1)); 7454 shrl(result, 8); 7455 movdl(vec, result); // move 32 bits 7456 } else if (ae != StrIntrinsicNode::UL && int_cnt2 == (2>>scale2)) { // One char 7457 load_unsigned_short(result, Address(str2, 0)); 7458 movdl(vec, result); // move 32 bits 7459 } else if (ae != StrIntrinsicNode::UL && int_cnt2 == (4>>scale2)) { // Two chars 7460 movdl(vec, Address(str2, 0)); // move 32 bits 7461 } else if (ae != StrIntrinsicNode::UL && int_cnt2 == (8>>scale2)) { // Four chars 7462 movq(vec, Address(str2, 0)); // move 64 bits 7463 } else { // cnt2 = { 3, 5, 6, 7 } || (ae == StrIntrinsicNode::UL && cnt2 ={2, ..., 7}) 7464 // Array header size is 12 bytes in 32-bit VM 7465 // + 6 bytes for 3 chars == 18 bytes, 7466 // enough space to load vec and shift. 7467 assert(HeapWordSize*TypeArrayKlass::header_size() >= 12,"sanity"); 7468 if (ae == StrIntrinsicNode::UL) { 7469 int tail_off = int_cnt2-8; 7470 pmovzxbw(vec, Address(str2, tail_off)); 7471 psrldq(vec, -2*tail_off); 7472 } 7473 else { 7474 int tail_off = int_cnt2*(1<<scale2); 7475 movdqu(vec, Address(str2, tail_off-16)); 7476 psrldq(vec, 16-tail_off); 7477 } 7478 } 7479 } else { // not constant substring 7480 cmpl(cnt2, stride); 7481 jccb(Assembler::aboveEqual, BIG_STRINGS); // Both strings are big enough 7482 7483 // We can read beyond string if srt+16 does not cross page boundary 7484 // since heaps are aligned and mapped by pages. 7485 assert(os::vm_page_size() < (int)G, "default page should be small"); 7486 movl(result, str2); // We need only low 32 bits 7487 andl(result, (os::vm_page_size()-1)); 7488 cmpl(result, (os::vm_page_size()-16)); 7489 jccb(Assembler::belowEqual, CHECK_STR); 7490 7491 // Move small strings to stack to allow load 16 bytes into vec. 7492 subptr(rsp, 16); 7493 int stk_offset = wordSize-(1<<scale2); 7494 push(cnt2); 7495 7496 bind(COPY_SUBSTR); 7497 if (ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::UL) { 7498 load_unsigned_byte(result, Address(str2, cnt2, scale2, -1)); 7499 movb(Address(rsp, cnt2, scale2, stk_offset), result); 7500 } else if (ae == StrIntrinsicNode::UU) { 7501 load_unsigned_short(result, Address(str2, cnt2, scale2, -2)); 7502 movw(Address(rsp, cnt2, scale2, stk_offset), result); 7503 } 7504 decrement(cnt2); 7505 jccb(Assembler::notZero, COPY_SUBSTR); 7506 7507 pop(cnt2); 7508 movptr(str2, rsp); // New substring address 7509 } // non constant 7510 7511 bind(CHECK_STR); 7512 cmpl(cnt1, stride); 7513 jccb(Assembler::aboveEqual, BIG_STRINGS); 7514 7515 // Check cross page boundary. 7516 movl(result, str1); // We need only low 32 bits 7517 andl(result, (os::vm_page_size()-1)); 7518 cmpl(result, (os::vm_page_size()-16)); 7519 jccb(Assembler::belowEqual, BIG_STRINGS); 7520 7521 subptr(rsp, 16); 7522 int stk_offset = -(1<<scale1); 7523 if (int_cnt2 < 0) { // not constant 7524 push(cnt2); 7525 stk_offset += wordSize; 7526 } 7527 movl(cnt2, cnt1); 7528 7529 bind(COPY_STR); 7530 if (ae == StrIntrinsicNode::LL) { 7531 load_unsigned_byte(result, Address(str1, cnt2, scale1, -1)); 7532 movb(Address(rsp, cnt2, scale1, stk_offset), result); 7533 } else { 7534 load_unsigned_short(result, Address(str1, cnt2, scale1, -2)); 7535 movw(Address(rsp, cnt2, scale1, stk_offset), result); 7536 } 7537 decrement(cnt2); 7538 jccb(Assembler::notZero, COPY_STR); 7539 7540 if (int_cnt2 < 0) { // not constant 7541 pop(cnt2); 7542 } 7543 movptr(str1, rsp); // New string address 7544 7545 bind(BIG_STRINGS); 7546 // Load substring. 7547 if (int_cnt2 < 0) { // -1 7548 if (ae == StrIntrinsicNode::UL) { 7549 pmovzxbw(vec, Address(str2, 0)); 7550 } else { 7551 movdqu(vec, Address(str2, 0)); 7552 } 7553 push(cnt2); // substr count 7554 push(str2); // substr addr 7555 push(str1); // string addr 7556 } else { 7557 // Small (< 8 chars) constant substrings are loaded already. 7558 movl(cnt2, int_cnt2); 7559 } 7560 push(tmp); // original SP 7561 7562 } // Finished loading 7563 7564 //======================================================== 7565 // Start search 7566 // 7567 7568 movptr(result, str1); // string addr 7569 7570 if (int_cnt2 < 0) { // Only for non constant substring 7571 jmpb(SCAN_TO_SUBSTR); 7572 7573 // SP saved at sp+0 7574 // String saved at sp+1*wordSize 7575 // Substr saved at sp+2*wordSize 7576 // Substr count saved at sp+3*wordSize 7577 7578 // Reload substr for rescan, this code 7579 // is executed only for large substrings (> 8 chars) 7580 bind(RELOAD_SUBSTR); 7581 movptr(str2, Address(rsp, 2*wordSize)); 7582 movl(cnt2, Address(rsp, 3*wordSize)); 7583 if (ae == StrIntrinsicNode::UL) { 7584 pmovzxbw(vec, Address(str2, 0)); 7585 } else { 7586 movdqu(vec, Address(str2, 0)); 7587 } 7588 // We came here after the beginning of the substring was 7589 // matched but the rest of it was not so we need to search 7590 // again. Start from the next element after the previous match. 7591 subptr(str1, result); // Restore counter 7592 if (ae == StrIntrinsicNode::UU || ae == StrIntrinsicNode::UL) { 7593 shrl(str1, 1); 7594 } 7595 addl(cnt1, str1); 7596 decrementl(cnt1); // Shift to next element 7597 cmpl(cnt1, cnt2); 7598 jcc(Assembler::negative, RET_NOT_FOUND); // Left less then substring 7599 7600 addptr(result, (1<<scale1)); 7601 } // non constant 7602 7603 // Scan string for start of substr in 16-byte vectors 7604 bind(SCAN_TO_SUBSTR); 7605 assert(cnt1 == rdx && cnt2 == rax && tmp == rcx, "pcmpestri"); 7606 pcmpestri(vec, Address(result, 0), mode); 7607 jccb(Assembler::below, FOUND_CANDIDATE); // CF == 1 7608 subl(cnt1, stride); 7609 jccb(Assembler::lessEqual, RET_NOT_FOUND); // Scanned full string 7610 cmpl(cnt1, cnt2); 7611 jccb(Assembler::negative, RET_NOT_FOUND); // Left less then substring 7612 addptr(result, 16); 7613 7614 bind(ADJUST_STR); 7615 cmpl(cnt1, stride); // Do not read beyond string 7616 jccb(Assembler::greaterEqual, SCAN_TO_SUBSTR); 7617 // Back-up string to avoid reading beyond string. 7618 lea(result, Address(result, cnt1, scale1, -16)); 7619 movl(cnt1, stride); 7620 jmpb(SCAN_TO_SUBSTR); 7621 7622 // Found a potential substr 7623 bind(FOUND_CANDIDATE); 7624 // After pcmpestri tmp(rcx) contains matched element index 7625 7626 // Make sure string is still long enough 7627 subl(cnt1, tmp); 7628 cmpl(cnt1, cnt2); 7629 jccb(Assembler::greaterEqual, FOUND_SUBSTR); 7630 // Left less then substring. 7631 7632 bind(RET_NOT_FOUND); 7633 movl(result, -1); 7634 jmpb(CLEANUP); 7635 7636 bind(FOUND_SUBSTR); 7637 // Compute start addr of substr 7638 lea(result, Address(result, tmp, scale1)); 7639 if (int_cnt2 > 0) { // Constant substring 7640 // Repeat search for small substring (< 8 chars) 7641 // from new point without reloading substring. 7642 // Have to check that we don't read beyond string. 7643 cmpl(tmp, stride-int_cnt2); 7644 jccb(Assembler::greater, ADJUST_STR); 7645 // Fall through if matched whole substring. 7646 } else { // non constant 7647 assert(int_cnt2 == -1, "should be != 0"); 7648 7649 addl(tmp, cnt2); 7650 // Found result if we matched whole substring. 7651 cmpl(tmp, stride); 7652 jccb(Assembler::lessEqual, RET_FOUND); 7653 7654 // Repeat search for small substring (<= 8 chars) 7655 // from new point 'str1' without reloading substring. 7656 cmpl(cnt2, stride); 7657 // Have to check that we don't read beyond string. 7658 jccb(Assembler::lessEqual, ADJUST_STR); 7659 7660 Label CHECK_NEXT, CONT_SCAN_SUBSTR, RET_FOUND_LONG; 7661 // Compare the rest of substring (> 8 chars). 7662 movptr(str1, result); 7663 7664 cmpl(tmp, cnt2); 7665 // First 8 chars are already matched. 7666 jccb(Assembler::equal, CHECK_NEXT); 7667 7668 bind(SCAN_SUBSTR); 7669 pcmpestri(vec, Address(str1, 0), mode); 7670 // Need to reload strings pointers if not matched whole vector 7671 jcc(Assembler::noOverflow, RELOAD_SUBSTR); // OF == 0 7672 7673 bind(CHECK_NEXT); 7674 subl(cnt2, stride); 7675 jccb(Assembler::lessEqual, RET_FOUND_LONG); // Found full substring 7676 addptr(str1, 16); 7677 if (ae == StrIntrinsicNode::UL) { 7678 addptr(str2, 8); 7679 } else { 7680 addptr(str2, 16); 7681 } 7682 subl(cnt1, stride); 7683 cmpl(cnt2, stride); // Do not read beyond substring 7684 jccb(Assembler::greaterEqual, CONT_SCAN_SUBSTR); 7685 // Back-up strings to avoid reading beyond substring. 7686 7687 if (ae == StrIntrinsicNode::UL) { 7688 lea(str2, Address(str2, cnt2, scale2, -8)); 7689 lea(str1, Address(str1, cnt2, scale1, -16)); 7690 } else { 7691 lea(str2, Address(str2, cnt2, scale2, -16)); 7692 lea(str1, Address(str1, cnt2, scale1, -16)); 7693 } 7694 subl(cnt1, cnt2); 7695 movl(cnt2, stride); 7696 addl(cnt1, stride); 7697 bind(CONT_SCAN_SUBSTR); 7698 if (ae == StrIntrinsicNode::UL) { 7699 pmovzxbw(vec, Address(str2, 0)); 7700 } else { 7701 movdqu(vec, Address(str2, 0)); 7702 } 7703 jmp(SCAN_SUBSTR); 7704 7705 bind(RET_FOUND_LONG); 7706 movptr(str1, Address(rsp, wordSize)); 7707 } // non constant 7708 7709 bind(RET_FOUND); 7710 // Compute substr offset 7711 subptr(result, str1); 7712 if (ae == StrIntrinsicNode::UU || ae == StrIntrinsicNode::UL) { 7713 shrl(result, 1); // index 7714 } 7715 bind(CLEANUP); 7716 pop(rsp); // restore SP 7717 7718 } // string_indexof 7719 7720 void MacroAssembler::string_indexof_char(Register str1, Register cnt1, Register ch, Register result, 7721 XMMRegister vec1, XMMRegister vec2, XMMRegister vec3, Register tmp) { 7722 ShortBranchVerifier sbv(this); 7723 assert(UseSSE42Intrinsics, "SSE4.2 intrinsics are required"); 7724 7725 int stride = 8; 7726 7727 Label FOUND_CHAR, SCAN_TO_CHAR, SCAN_TO_CHAR_LOOP, 7728 SCAN_TO_8_CHAR, SCAN_TO_8_CHAR_LOOP, SCAN_TO_16_CHAR_LOOP, 7729 RET_NOT_FOUND, SCAN_TO_8_CHAR_INIT, 7730 FOUND_SEQ_CHAR, DONE_LABEL; 7731 7732 movptr(result, str1); 7733 if (UseAVX >= 2) { 7734 cmpl(cnt1, stride); 7735 jcc(Assembler::less, SCAN_TO_CHAR_LOOP); 7736 cmpl(cnt1, 2*stride); 7737 jcc(Assembler::less, SCAN_TO_8_CHAR_INIT); 7738 movdl(vec1, ch); 7739 vpbroadcastw(vec1, vec1); 7740 vpxor(vec2, vec2); 7741 movl(tmp, cnt1); 7742 andl(tmp, 0xFFFFFFF0); //vector count (in chars) 7743 andl(cnt1,0x0000000F); //tail count (in chars) 7744 7745 bind(SCAN_TO_16_CHAR_LOOP); 7746 vmovdqu(vec3, Address(result, 0)); 7747 vpcmpeqw(vec3, vec3, vec1, 1); 7748 vptest(vec2, vec3); 7749 jcc(Assembler::carryClear, FOUND_CHAR); 7750 addptr(result, 32); 7751 subl(tmp, 2*stride); 7752 jccb(Assembler::notZero, SCAN_TO_16_CHAR_LOOP); 7753 jmp(SCAN_TO_8_CHAR); 7754 bind(SCAN_TO_8_CHAR_INIT); 7755 movdl(vec1, ch); 7756 pshuflw(vec1, vec1, 0x00); 7757 pshufd(vec1, vec1, 0); 7758 pxor(vec2, vec2); 7759 } 7760 bind(SCAN_TO_8_CHAR); 7761 cmpl(cnt1, stride); 7762 if (UseAVX >= 2) { 7763 jcc(Assembler::less, SCAN_TO_CHAR); 7764 } else { 7765 jcc(Assembler::less, SCAN_TO_CHAR_LOOP); 7766 movdl(vec1, ch); 7767 pshuflw(vec1, vec1, 0x00); 7768 pshufd(vec1, vec1, 0); 7769 pxor(vec2, vec2); 7770 } 7771 movl(tmp, cnt1); 7772 andl(tmp, 0xFFFFFFF8); //vector count (in chars) 7773 andl(cnt1,0x00000007); //tail count (in chars) 7774 7775 bind(SCAN_TO_8_CHAR_LOOP); 7776 movdqu(vec3, Address(result, 0)); 7777 pcmpeqw(vec3, vec1); 7778 ptest(vec2, vec3); 7779 jcc(Assembler::carryClear, FOUND_CHAR); 7780 addptr(result, 16); 7781 subl(tmp, stride); 7782 jccb(Assembler::notZero, SCAN_TO_8_CHAR_LOOP); 7783 bind(SCAN_TO_CHAR); 7784 testl(cnt1, cnt1); 7785 jcc(Assembler::zero, RET_NOT_FOUND); 7786 bind(SCAN_TO_CHAR_LOOP); 7787 load_unsigned_short(tmp, Address(result, 0)); 7788 cmpl(ch, tmp); 7789 jccb(Assembler::equal, FOUND_SEQ_CHAR); 7790 addptr(result, 2); 7791 subl(cnt1, 1); 7792 jccb(Assembler::zero, RET_NOT_FOUND); 7793 jmp(SCAN_TO_CHAR_LOOP); 7794 7795 bind(RET_NOT_FOUND); 7796 movl(result, -1); 7797 jmpb(DONE_LABEL); 7798 7799 bind(FOUND_CHAR); 7800 if (UseAVX >= 2) { 7801 vpmovmskb(tmp, vec3); 7802 } else { 7803 pmovmskb(tmp, vec3); 7804 } 7805 bsfl(ch, tmp); 7806 addl(result, ch); 7807 7808 bind(FOUND_SEQ_CHAR); 7809 subptr(result, str1); 7810 shrl(result, 1); 7811 7812 bind(DONE_LABEL); 7813 } // string_indexof_char 7814 7815 // helper function for string_compare 7816 void MacroAssembler::load_next_elements(Register elem1, Register elem2, Register str1, Register str2, 7817 Address::ScaleFactor scale, Address::ScaleFactor scale1, 7818 Address::ScaleFactor scale2, Register index, int ae) { 7819 if (ae == StrIntrinsicNode::LL) { 7820 load_unsigned_byte(elem1, Address(str1, index, scale, 0)); 7821 load_unsigned_byte(elem2, Address(str2, index, scale, 0)); 7822 } else if (ae == StrIntrinsicNode::UU) { 7823 load_unsigned_short(elem1, Address(str1, index, scale, 0)); 7824 load_unsigned_short(elem2, Address(str2, index, scale, 0)); 7825 } else { 7826 load_unsigned_byte(elem1, Address(str1, index, scale1, 0)); 7827 load_unsigned_short(elem2, Address(str2, index, scale2, 0)); 7828 } 7829 } 7830 7831 // Compare strings, used for char[] and byte[]. 7832 void MacroAssembler::string_compare(Register str1, Register str2, 7833 Register cnt1, Register cnt2, Register result, 7834 XMMRegister vec1, int ae) { 7835 ShortBranchVerifier sbv(this); 7836 Label LENGTH_DIFF_LABEL, POP_LABEL, DONE_LABEL, WHILE_HEAD_LABEL; 7837 Label COMPARE_WIDE_VECTORS_LOOP_FAILED; // used only _LP64 && AVX3 7838 int stride, stride2, adr_stride, adr_stride1, adr_stride2; 7839 int stride2x2 = 0x40; 7840 Address::ScaleFactor scale = Address::no_scale; 7841 Address::ScaleFactor scale1 = Address::no_scale; 7842 Address::ScaleFactor scale2 = Address::no_scale; 7843 7844 if (ae != StrIntrinsicNode::LL) { 7845 stride2x2 = 0x20; 7846 } 7847 7848 if (ae == StrIntrinsicNode::LU || ae == StrIntrinsicNode::UL) { 7849 shrl(cnt2, 1); 7850 } 7851 // Compute the minimum of the string lengths and the 7852 // difference of the string lengths (stack). 7853 // Do the conditional move stuff 7854 movl(result, cnt1); 7855 subl(cnt1, cnt2); 7856 push(cnt1); 7857 cmov32(Assembler::lessEqual, cnt2, result); // cnt2 = min(cnt1, cnt2) 7858 7859 // Is the minimum length zero? 7860 testl(cnt2, cnt2); 7861 jcc(Assembler::zero, LENGTH_DIFF_LABEL); 7862 if (ae == StrIntrinsicNode::LL) { 7863 // Load first bytes 7864 load_unsigned_byte(result, Address(str1, 0)); // result = str1[0] 7865 load_unsigned_byte(cnt1, Address(str2, 0)); // cnt1 = str2[0] 7866 } else if (ae == StrIntrinsicNode::UU) { 7867 // Load first characters 7868 load_unsigned_short(result, Address(str1, 0)); 7869 load_unsigned_short(cnt1, Address(str2, 0)); 7870 } else { 7871 load_unsigned_byte(result, Address(str1, 0)); 7872 load_unsigned_short(cnt1, Address(str2, 0)); 7873 } 7874 subl(result, cnt1); 7875 jcc(Assembler::notZero, POP_LABEL); 7876 7877 if (ae == StrIntrinsicNode::UU) { 7878 // Divide length by 2 to get number of chars 7879 shrl(cnt2, 1); 7880 } 7881 cmpl(cnt2, 1); 7882 jcc(Assembler::equal, LENGTH_DIFF_LABEL); 7883 7884 // Check if the strings start at the same location and setup scale and stride 7885 if (ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::UU) { 7886 cmpptr(str1, str2); 7887 jcc(Assembler::equal, LENGTH_DIFF_LABEL); 7888 if (ae == StrIntrinsicNode::LL) { 7889 scale = Address::times_1; 7890 stride = 16; 7891 } else { 7892 scale = Address::times_2; 7893 stride = 8; 7894 } 7895 } else { 7896 scale1 = Address::times_1; 7897 scale2 = Address::times_2; 7898 // scale not used 7899 stride = 8; 7900 } 7901 7902 if (UseAVX >= 2 && UseSSE42Intrinsics) { 7903 Label COMPARE_WIDE_VECTORS, VECTOR_NOT_EQUAL, COMPARE_WIDE_TAIL, COMPARE_SMALL_STR; 7904 Label COMPARE_WIDE_VECTORS_LOOP, COMPARE_16_CHARS, COMPARE_INDEX_CHAR; 7905 Label COMPARE_WIDE_VECTORS_LOOP_AVX2; 7906 Label COMPARE_TAIL_LONG; 7907 Label COMPARE_WIDE_VECTORS_LOOP_AVX3; // used only _LP64 && AVX3 7908 7909 int pcmpmask = 0x19; 7910 if (ae == StrIntrinsicNode::LL) { 7911 pcmpmask &= ~0x01; 7912 } 7913 7914 // Setup to compare 16-chars (32-bytes) vectors, 7915 // start from first character again because it has aligned address. 7916 if (ae == StrIntrinsicNode::LL) { 7917 stride2 = 32; 7918 } else { 7919 stride2 = 16; 7920 } 7921 if (ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::UU) { 7922 adr_stride = stride << scale; 7923 } else { 7924 adr_stride1 = 8; //stride << scale1; 7925 adr_stride2 = 16; //stride << scale2; 7926 } 7927 7928 assert(result == rax && cnt2 == rdx && cnt1 == rcx, "pcmpestri"); 7929 // rax and rdx are used by pcmpestri as elements counters 7930 movl(result, cnt2); 7931 andl(cnt2, ~(stride2-1)); // cnt2 holds the vector count 7932 jcc(Assembler::zero, COMPARE_TAIL_LONG); 7933 7934 // fast path : compare first 2 8-char vectors. 7935 bind(COMPARE_16_CHARS); 7936 if (ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::UU) { 7937 movdqu(vec1, Address(str1, 0)); 7938 } else { 7939 pmovzxbw(vec1, Address(str1, 0)); 7940 } 7941 pcmpestri(vec1, Address(str2, 0), pcmpmask); 7942 jccb(Assembler::below, COMPARE_INDEX_CHAR); 7943 7944 if (ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::UU) { 7945 movdqu(vec1, Address(str1, adr_stride)); 7946 pcmpestri(vec1, Address(str2, adr_stride), pcmpmask); 7947 } else { 7948 pmovzxbw(vec1, Address(str1, adr_stride1)); 7949 pcmpestri(vec1, Address(str2, adr_stride2), pcmpmask); 7950 } 7951 jccb(Assembler::aboveEqual, COMPARE_WIDE_VECTORS); 7952 addl(cnt1, stride); 7953 7954 // Compare the characters at index in cnt1 7955 bind(COMPARE_INDEX_CHAR); // cnt1 has the offset of the mismatching character 7956 load_next_elements(result, cnt2, str1, str2, scale, scale1, scale2, cnt1, ae); 7957 subl(result, cnt2); 7958 jmp(POP_LABEL); 7959 7960 // Setup the registers to start vector comparison loop 7961 bind(COMPARE_WIDE_VECTORS); 7962 if (ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::UU) { 7963 lea(str1, Address(str1, result, scale)); 7964 lea(str2, Address(str2, result, scale)); 7965 } else { 7966 lea(str1, Address(str1, result, scale1)); 7967 lea(str2, Address(str2, result, scale2)); 7968 } 7969 subl(result, stride2); 7970 subl(cnt2, stride2); 7971 jcc(Assembler::zero, COMPARE_WIDE_TAIL); 7972 negptr(result); 7973 7974 // In a loop, compare 16-chars (32-bytes) at once using (vpxor+vptest) 7975 bind(COMPARE_WIDE_VECTORS_LOOP); 7976 7977 #ifdef _LP64 7978 if (VM_Version::supports_avx512vlbw()) { // trying 64 bytes fast loop 7979 cmpl(cnt2, stride2x2); 7980 jccb(Assembler::below, COMPARE_WIDE_VECTORS_LOOP_AVX2); 7981 testl(cnt2, stride2x2-1); // cnt2 holds the vector count 7982 jccb(Assembler::notZero, COMPARE_WIDE_VECTORS_LOOP_AVX2); // means we cannot subtract by 0x40 7983 7984 bind(COMPARE_WIDE_VECTORS_LOOP_AVX3); // the hottest loop 7985 if (ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::UU) { 7986 evmovdquq(vec1, Address(str1, result, scale), Assembler::AVX_512bit); 7987 evpcmpeqb(k7, vec1, Address(str2, result, scale), Assembler::AVX_512bit); // k7 == 11..11, if operands equal, otherwise k7 has some 0 7988 } else { 7989 vpmovzxbw(vec1, Address(str1, result, scale1), Assembler::AVX_512bit); 7990 evpcmpeqb(k7, vec1, Address(str2, result, scale2), Assembler::AVX_512bit); // k7 == 11..11, if operands equal, otherwise k7 has some 0 7991 } 7992 kortestql(k7, k7); 7993 jcc(Assembler::aboveEqual, COMPARE_WIDE_VECTORS_LOOP_FAILED); // miscompare 7994 addptr(result, stride2x2); // update since we already compared at this addr 7995 subl(cnt2, stride2x2); // and sub the size too 7996 jccb(Assembler::notZero, COMPARE_WIDE_VECTORS_LOOP_AVX3); 7997 7998 vpxor(vec1, vec1); 7999 jmpb(COMPARE_WIDE_TAIL); 8000 }//if (VM_Version::supports_avx512vlbw()) 8001 #endif // _LP64 8002 8003 8004 bind(COMPARE_WIDE_VECTORS_LOOP_AVX2); 8005 if (ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::UU) { 8006 vmovdqu(vec1, Address(str1, result, scale)); 8007 vpxor(vec1, Address(str2, result, scale)); 8008 } else { 8009 vpmovzxbw(vec1, Address(str1, result, scale1), Assembler::AVX_256bit); 8010 vpxor(vec1, Address(str2, result, scale2)); 8011 } 8012 vptest(vec1, vec1); 8013 jcc(Assembler::notZero, VECTOR_NOT_EQUAL); 8014 addptr(result, stride2); 8015 subl(cnt2, stride2); 8016 jcc(Assembler::notZero, COMPARE_WIDE_VECTORS_LOOP); 8017 // clean upper bits of YMM registers 8018 vpxor(vec1, vec1); 8019 8020 // compare wide vectors tail 8021 bind(COMPARE_WIDE_TAIL); 8022 testptr(result, result); 8023 jcc(Assembler::zero, LENGTH_DIFF_LABEL); 8024 8025 movl(result, stride2); 8026 movl(cnt2, result); 8027 negptr(result); 8028 jmp(COMPARE_WIDE_VECTORS_LOOP_AVX2); 8029 8030 // Identifies the mismatching (higher or lower)16-bytes in the 32-byte vectors. 8031 bind(VECTOR_NOT_EQUAL); 8032 // clean upper bits of YMM registers 8033 vpxor(vec1, vec1); 8034 if (ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::UU) { 8035 lea(str1, Address(str1, result, scale)); 8036 lea(str2, Address(str2, result, scale)); 8037 } else { 8038 lea(str1, Address(str1, result, scale1)); 8039 lea(str2, Address(str2, result, scale2)); 8040 } 8041 jmp(COMPARE_16_CHARS); 8042 8043 // Compare tail chars, length between 1 to 15 chars 8044 bind(COMPARE_TAIL_LONG); 8045 movl(cnt2, result); 8046 cmpl(cnt2, stride); 8047 jcc(Assembler::less, COMPARE_SMALL_STR); 8048 8049 if (ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::UU) { 8050 movdqu(vec1, Address(str1, 0)); 8051 } else { 8052 pmovzxbw(vec1, Address(str1, 0)); 8053 } 8054 pcmpestri(vec1, Address(str2, 0), pcmpmask); 8055 jcc(Assembler::below, COMPARE_INDEX_CHAR); 8056 subptr(cnt2, stride); 8057 jcc(Assembler::zero, LENGTH_DIFF_LABEL); 8058 if (ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::UU) { 8059 lea(str1, Address(str1, result, scale)); 8060 lea(str2, Address(str2, result, scale)); 8061 } else { 8062 lea(str1, Address(str1, result, scale1)); 8063 lea(str2, Address(str2, result, scale2)); 8064 } 8065 negptr(cnt2); 8066 jmpb(WHILE_HEAD_LABEL); 8067 8068 bind(COMPARE_SMALL_STR); 8069 } else if (UseSSE42Intrinsics) { 8070 Label COMPARE_WIDE_VECTORS, VECTOR_NOT_EQUAL, COMPARE_TAIL; 8071 int pcmpmask = 0x19; 8072 // Setup to compare 8-char (16-byte) vectors, 8073 // start from first character again because it has aligned address. 8074 movl(result, cnt2); 8075 andl(cnt2, ~(stride - 1)); // cnt2 holds the vector count 8076 if (ae == StrIntrinsicNode::LL) { 8077 pcmpmask &= ~0x01; 8078 } 8079 jcc(Assembler::zero, COMPARE_TAIL); 8080 if (ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::UU) { 8081 lea(str1, Address(str1, result, scale)); 8082 lea(str2, Address(str2, result, scale)); 8083 } else { 8084 lea(str1, Address(str1, result, scale1)); 8085 lea(str2, Address(str2, result, scale2)); 8086 } 8087 negptr(result); 8088 8089 // pcmpestri 8090 // inputs: 8091 // vec1- substring 8092 // rax - negative string length (elements count) 8093 // mem - scanned string 8094 // rdx - string length (elements count) 8095 // pcmpmask - cmp mode: 11000 (string compare with negated result) 8096 // + 00 (unsigned bytes) or + 01 (unsigned shorts) 8097 // outputs: 8098 // rcx - first mismatched element index 8099 assert(result == rax && cnt2 == rdx && cnt1 == rcx, "pcmpestri"); 8100 8101 bind(COMPARE_WIDE_VECTORS); 8102 if (ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::UU) { 8103 movdqu(vec1, Address(str1, result, scale)); 8104 pcmpestri(vec1, Address(str2, result, scale), pcmpmask); 8105 } else { 8106 pmovzxbw(vec1, Address(str1, result, scale1)); 8107 pcmpestri(vec1, Address(str2, result, scale2), pcmpmask); 8108 } 8109 // After pcmpestri cnt1(rcx) contains mismatched element index 8110 8111 jccb(Assembler::below, VECTOR_NOT_EQUAL); // CF==1 8112 addptr(result, stride); 8113 subptr(cnt2, stride); 8114 jccb(Assembler::notZero, COMPARE_WIDE_VECTORS); 8115 8116 // compare wide vectors tail 8117 testptr(result, result); 8118 jcc(Assembler::zero, LENGTH_DIFF_LABEL); 8119 8120 movl(cnt2, stride); 8121 movl(result, stride); 8122 negptr(result); 8123 if (ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::UU) { 8124 movdqu(vec1, Address(str1, result, scale)); 8125 pcmpestri(vec1, Address(str2, result, scale), pcmpmask); 8126 } else { 8127 pmovzxbw(vec1, Address(str1, result, scale1)); 8128 pcmpestri(vec1, Address(str2, result, scale2), pcmpmask); 8129 } 8130 jccb(Assembler::aboveEqual, LENGTH_DIFF_LABEL); 8131 8132 // Mismatched characters in the vectors 8133 bind(VECTOR_NOT_EQUAL); 8134 addptr(cnt1, result); 8135 load_next_elements(result, cnt2, str1, str2, scale, scale1, scale2, cnt1, ae); 8136 subl(result, cnt2); 8137 jmpb(POP_LABEL); 8138 8139 bind(COMPARE_TAIL); // limit is zero 8140 movl(cnt2, result); 8141 // Fallthru to tail compare 8142 } 8143 // Shift str2 and str1 to the end of the arrays, negate min 8144 if (ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::UU) { 8145 lea(str1, Address(str1, cnt2, scale)); 8146 lea(str2, Address(str2, cnt2, scale)); 8147 } else { 8148 lea(str1, Address(str1, cnt2, scale1)); 8149 lea(str2, Address(str2, cnt2, scale2)); 8150 } 8151 decrementl(cnt2); // first character was compared already 8152 negptr(cnt2); 8153 8154 // Compare the rest of the elements 8155 bind(WHILE_HEAD_LABEL); 8156 load_next_elements(result, cnt1, str1, str2, scale, scale1, scale2, cnt2, ae); 8157 subl(result, cnt1); 8158 jccb(Assembler::notZero, POP_LABEL); 8159 increment(cnt2); 8160 jccb(Assembler::notZero, WHILE_HEAD_LABEL); 8161 8162 // Strings are equal up to min length. Return the length difference. 8163 bind(LENGTH_DIFF_LABEL); 8164 pop(result); 8165 if (ae == StrIntrinsicNode::UU) { 8166 // Divide diff by 2 to get number of chars 8167 sarl(result, 1); 8168 } 8169 jmpb(DONE_LABEL); 8170 8171 #ifdef _LP64 8172 if (VM_Version::supports_avx512vlbw()) { 8173 8174 bind(COMPARE_WIDE_VECTORS_LOOP_FAILED); 8175 8176 kmovql(cnt1, k7); 8177 notq(cnt1); 8178 bsfq(cnt2, cnt1); 8179 if (ae != StrIntrinsicNode::LL) { 8180 // Divide diff by 2 to get number of chars 8181 sarl(cnt2, 1); 8182 } 8183 addq(result, cnt2); 8184 if (ae == StrIntrinsicNode::LL) { 8185 load_unsigned_byte(cnt1, Address(str2, result)); 8186 load_unsigned_byte(result, Address(str1, result)); 8187 } else if (ae == StrIntrinsicNode::UU) { 8188 load_unsigned_short(cnt1, Address(str2, result, scale)); 8189 load_unsigned_short(result, Address(str1, result, scale)); 8190 } else { 8191 load_unsigned_short(cnt1, Address(str2, result, scale2)); 8192 load_unsigned_byte(result, Address(str1, result, scale1)); 8193 } 8194 subl(result, cnt1); 8195 jmpb(POP_LABEL); 8196 }//if (VM_Version::supports_avx512vlbw()) 8197 #endif // _LP64 8198 8199 // Discard the stored length difference 8200 bind(POP_LABEL); 8201 pop(cnt1); 8202 8203 // That's it 8204 bind(DONE_LABEL); 8205 if(ae == StrIntrinsicNode::UL) { 8206 negl(result); 8207 } 8208 8209 } 8210 8211 // Search for Non-ASCII character (Negative byte value) in a byte array, 8212 // return true if it has any and false otherwise. 8213 // ..\jdk\src\java.base\share\classes\java\lang\StringCoding.java 8214 // @HotSpotIntrinsicCandidate 8215 // private static boolean hasNegatives(byte[] ba, int off, int len) { 8216 // for (int i = off; i < off + len; i++) { 8217 // if (ba[i] < 0) { 8218 // return true; 8219 // } 8220 // } 8221 // return false; 8222 // } 8223 void MacroAssembler::has_negatives(Register ary1, Register len, 8224 Register result, Register tmp1, 8225 XMMRegister vec1, XMMRegister vec2) { 8226 // rsi: byte array 8227 // rcx: len 8228 // rax: result 8229 ShortBranchVerifier sbv(this); 8230 assert_different_registers(ary1, len, result, tmp1); 8231 assert_different_registers(vec1, vec2); 8232 Label TRUE_LABEL, FALSE_LABEL, DONE, COMPARE_CHAR, COMPARE_VECTORS, COMPARE_BYTE; 8233 8234 // len == 0 8235 testl(len, len); 8236 jcc(Assembler::zero, FALSE_LABEL); 8237 8238 if ((UseAVX > 2) && // AVX512 8239 VM_Version::supports_avx512vlbw() && 8240 VM_Version::supports_bmi2()) { 8241 8242 set_vector_masking(); // opening of the stub context for programming mask registers 8243 8244 Label test_64_loop, test_tail; 8245 Register tmp3_aliased = len; 8246 8247 movl(tmp1, len); 8248 vpxor(vec2, vec2, vec2, Assembler::AVX_512bit); 8249 8250 andl(tmp1, 64 - 1); // tail count (in chars) 0x3F 8251 andl(len, ~(64 - 1)); // vector count (in chars) 8252 jccb(Assembler::zero, test_tail); 8253 8254 lea(ary1, Address(ary1, len, Address::times_1)); 8255 negptr(len); 8256 8257 bind(test_64_loop); 8258 // Check whether our 64 elements of size byte contain negatives 8259 evpcmpgtb(k2, vec2, Address(ary1, len, Address::times_1), Assembler::AVX_512bit); 8260 kortestql(k2, k2); 8261 jcc(Assembler::notZero, TRUE_LABEL); 8262 8263 addptr(len, 64); 8264 jccb(Assembler::notZero, test_64_loop); 8265 8266 8267 bind(test_tail); 8268 // bail out when there is nothing to be done 8269 testl(tmp1, -1); 8270 jcc(Assembler::zero, FALSE_LABEL); 8271 8272 // Save k1 8273 kmovql(k3, k1); 8274 8275 // ~(~0 << len) applied up to two times (for 32-bit scenario) 8276 #ifdef _LP64 8277 mov64(tmp3_aliased, 0xFFFFFFFFFFFFFFFF); 8278 shlxq(tmp3_aliased, tmp3_aliased, tmp1); 8279 notq(tmp3_aliased); 8280 kmovql(k1, tmp3_aliased); 8281 #else 8282 Label k_init; 8283 jmp(k_init); 8284 8285 // We could not read 64-bits from a general purpose register thus we move 8286 // data required to compose 64 1's to the instruction stream 8287 // We emit 64 byte wide series of elements from 0..63 which later on would 8288 // be used as a compare targets with tail count contained in tmp1 register. 8289 // Result would be a k1 register having tmp1 consecutive number or 1 8290 // counting from least significant bit. 8291 address tmp = pc(); 8292 emit_int64(0x0706050403020100); 8293 emit_int64(0x0F0E0D0C0B0A0908); 8294 emit_int64(0x1716151413121110); 8295 emit_int64(0x1F1E1D1C1B1A1918); 8296 emit_int64(0x2726252423222120); 8297 emit_int64(0x2F2E2D2C2B2A2928); 8298 emit_int64(0x3736353433323130); 8299 emit_int64(0x3F3E3D3C3B3A3938); 8300 8301 bind(k_init); 8302 lea(len, InternalAddress(tmp)); 8303 // create mask to test for negative byte inside a vector 8304 evpbroadcastb(vec1, tmp1, Assembler::AVX_512bit); 8305 evpcmpgtb(k1, vec1, Address(len, 0), Assembler::AVX_512bit); 8306 8307 #endif 8308 evpcmpgtb(k2, k1, vec2, Address(ary1, 0), Assembler::AVX_512bit); 8309 ktestq(k2, k1); 8310 // Restore k1 8311 kmovql(k1, k3); 8312 jcc(Assembler::notZero, TRUE_LABEL); 8313 8314 jmp(FALSE_LABEL); 8315 8316 clear_vector_masking(); // closing of the stub context for programming mask registers 8317 } else { 8318 movl(result, len); // copy 8319 8320 if (UseAVX == 2 && UseSSE >= 2) { 8321 // With AVX2, use 32-byte vector compare 8322 Label COMPARE_WIDE_VECTORS, COMPARE_TAIL; 8323 8324 // Compare 32-byte vectors 8325 andl(result, 0x0000001f); // tail count (in bytes) 8326 andl(len, 0xffffffe0); // vector count (in bytes) 8327 jccb(Assembler::zero, COMPARE_TAIL); 8328 8329 lea(ary1, Address(ary1, len, Address::times_1)); 8330 negptr(len); 8331 8332 movl(tmp1, 0x80808080); // create mask to test for Unicode chars in vector 8333 movdl(vec2, tmp1); 8334 vpbroadcastd(vec2, vec2); 8335 8336 bind(COMPARE_WIDE_VECTORS); 8337 vmovdqu(vec1, Address(ary1, len, Address::times_1)); 8338 vptest(vec1, vec2); 8339 jccb(Assembler::notZero, TRUE_LABEL); 8340 addptr(len, 32); 8341 jcc(Assembler::notZero, COMPARE_WIDE_VECTORS); 8342 8343 testl(result, result); 8344 jccb(Assembler::zero, FALSE_LABEL); 8345 8346 vmovdqu(vec1, Address(ary1, result, Address::times_1, -32)); 8347 vptest(vec1, vec2); 8348 jccb(Assembler::notZero, TRUE_LABEL); 8349 jmpb(FALSE_LABEL); 8350 8351 bind(COMPARE_TAIL); // len is zero 8352 movl(len, result); 8353 // Fallthru to tail compare 8354 } else if (UseSSE42Intrinsics) { 8355 // With SSE4.2, use double quad vector compare 8356 Label COMPARE_WIDE_VECTORS, COMPARE_TAIL; 8357 8358 // Compare 16-byte vectors 8359 andl(result, 0x0000000f); // tail count (in bytes) 8360 andl(len, 0xfffffff0); // vector count (in bytes) 8361 jccb(Assembler::zero, COMPARE_TAIL); 8362 8363 lea(ary1, Address(ary1, len, Address::times_1)); 8364 negptr(len); 8365 8366 movl(tmp1, 0x80808080); 8367 movdl(vec2, tmp1); 8368 pshufd(vec2, vec2, 0); 8369 8370 bind(COMPARE_WIDE_VECTORS); 8371 movdqu(vec1, Address(ary1, len, Address::times_1)); 8372 ptest(vec1, vec2); 8373 jccb(Assembler::notZero, TRUE_LABEL); 8374 addptr(len, 16); 8375 jcc(Assembler::notZero, COMPARE_WIDE_VECTORS); 8376 8377 testl(result, result); 8378 jccb(Assembler::zero, FALSE_LABEL); 8379 8380 movdqu(vec1, Address(ary1, result, Address::times_1, -16)); 8381 ptest(vec1, vec2); 8382 jccb(Assembler::notZero, TRUE_LABEL); 8383 jmpb(FALSE_LABEL); 8384 8385 bind(COMPARE_TAIL); // len is zero 8386 movl(len, result); 8387 // Fallthru to tail compare 8388 } 8389 } 8390 // Compare 4-byte vectors 8391 andl(len, 0xfffffffc); // vector count (in bytes) 8392 jccb(Assembler::zero, COMPARE_CHAR); 8393 8394 lea(ary1, Address(ary1, len, Address::times_1)); 8395 negptr(len); 8396 8397 bind(COMPARE_VECTORS); 8398 movl(tmp1, Address(ary1, len, Address::times_1)); 8399 andl(tmp1, 0x80808080); 8400 jccb(Assembler::notZero, TRUE_LABEL); 8401 addptr(len, 4); 8402 jcc(Assembler::notZero, COMPARE_VECTORS); 8403 8404 // Compare trailing char (final 2 bytes), if any 8405 bind(COMPARE_CHAR); 8406 testl(result, 0x2); // tail char 8407 jccb(Assembler::zero, COMPARE_BYTE); 8408 load_unsigned_short(tmp1, Address(ary1, 0)); 8409 andl(tmp1, 0x00008080); 8410 jccb(Assembler::notZero, TRUE_LABEL); 8411 subptr(result, 2); 8412 lea(ary1, Address(ary1, 2)); 8413 8414 bind(COMPARE_BYTE); 8415 testl(result, 0x1); // tail byte 8416 jccb(Assembler::zero, FALSE_LABEL); 8417 load_unsigned_byte(tmp1, Address(ary1, 0)); 8418 andl(tmp1, 0x00000080); 8419 jccb(Assembler::notEqual, TRUE_LABEL); 8420 jmpb(FALSE_LABEL); 8421 8422 bind(TRUE_LABEL); 8423 movl(result, 1); // return true 8424 jmpb(DONE); 8425 8426 bind(FALSE_LABEL); 8427 xorl(result, result); // return false 8428 8429 // That's it 8430 bind(DONE); 8431 if (UseAVX >= 2 && UseSSE >= 2) { 8432 // clean upper bits of YMM registers 8433 vpxor(vec1, vec1); 8434 vpxor(vec2, vec2); 8435 } 8436 } 8437 // Compare char[] or byte[] arrays aligned to 4 bytes or substrings. 8438 void MacroAssembler::arrays_equals(bool is_array_equ, Register ary1, Register ary2, 8439 Register limit, Register result, Register chr, 8440 XMMRegister vec1, XMMRegister vec2, bool is_char) { 8441 ShortBranchVerifier sbv(this); 8442 Label TRUE_LABEL, FALSE_LABEL, DONE, COMPARE_VECTORS, COMPARE_CHAR, COMPARE_BYTE; 8443 8444 int length_offset = arrayOopDesc::length_offset_in_bytes(); 8445 int base_offset = arrayOopDesc::base_offset_in_bytes(is_char ? T_CHAR : T_BYTE); 8446 8447 if (is_array_equ) { 8448 // Check the input args 8449 cmpoop(ary1, ary2); 8450 jcc(Assembler::equal, TRUE_LABEL); 8451 8452 // Need additional checks for arrays_equals. 8453 testptr(ary1, ary1); 8454 jcc(Assembler::zero, FALSE_LABEL); 8455 testptr(ary2, ary2); 8456 jcc(Assembler::zero, FALSE_LABEL); 8457 8458 // Check the lengths 8459 movl(limit, Address(ary1, length_offset)); 8460 cmpl(limit, Address(ary2, length_offset)); 8461 jcc(Assembler::notEqual, FALSE_LABEL); 8462 } 8463 8464 // count == 0 8465 testl(limit, limit); 8466 jcc(Assembler::zero, TRUE_LABEL); 8467 8468 if (is_array_equ) { 8469 // Load array address 8470 lea(ary1, Address(ary1, base_offset)); 8471 lea(ary2, Address(ary2, base_offset)); 8472 } 8473 8474 if (is_array_equ && is_char) { 8475 // arrays_equals when used for char[]. 8476 shll(limit, 1); // byte count != 0 8477 } 8478 movl(result, limit); // copy 8479 8480 if (UseAVX >= 2) { 8481 // With AVX2, use 32-byte vector compare 8482 Label COMPARE_WIDE_VECTORS, COMPARE_TAIL; 8483 8484 // Compare 32-byte vectors 8485 andl(result, 0x0000001f); // tail count (in bytes) 8486 andl(limit, 0xffffffe0); // vector count (in bytes) 8487 jcc(Assembler::zero, COMPARE_TAIL); 8488 8489 lea(ary1, Address(ary1, limit, Address::times_1)); 8490 lea(ary2, Address(ary2, limit, Address::times_1)); 8491 negptr(limit); 8492 8493 bind(COMPARE_WIDE_VECTORS); 8494 8495 #ifdef _LP64 8496 if (VM_Version::supports_avx512vlbw()) { // trying 64 bytes fast loop 8497 Label COMPARE_WIDE_VECTORS_LOOP_AVX2, COMPARE_WIDE_VECTORS_LOOP_AVX3; 8498 8499 cmpl(limit, -64); 8500 jccb(Assembler::greater, COMPARE_WIDE_VECTORS_LOOP_AVX2); 8501 8502 bind(COMPARE_WIDE_VECTORS_LOOP_AVX3); // the hottest loop 8503 8504 evmovdquq(vec1, Address(ary1, limit, Address::times_1), Assembler::AVX_512bit); 8505 evpcmpeqb(k7, vec1, Address(ary2, limit, Address::times_1), Assembler::AVX_512bit); 8506 kortestql(k7, k7); 8507 jcc(Assembler::aboveEqual, FALSE_LABEL); // miscompare 8508 addptr(limit, 64); // update since we already compared at this addr 8509 cmpl(limit, -64); 8510 jccb(Assembler::lessEqual, COMPARE_WIDE_VECTORS_LOOP_AVX3); 8511 8512 // At this point we may still need to compare -limit+result bytes. 8513 // We could execute the next two instruction and just continue via non-wide path: 8514 // cmpl(limit, 0); 8515 // jcc(Assembler::equal, COMPARE_TAIL); // true 8516 // But since we stopped at the points ary{1,2}+limit which are 8517 // not farther than 64 bytes from the ends of arrays ary{1,2}+result 8518 // (|limit| <= 32 and result < 32), 8519 // we may just compare the last 64 bytes. 8520 // 8521 addptr(result, -64); // it is safe, bc we just came from this area 8522 evmovdquq(vec1, Address(ary1, result, Address::times_1), Assembler::AVX_512bit); 8523 evpcmpeqb(k7, vec1, Address(ary2, result, Address::times_1), Assembler::AVX_512bit); 8524 kortestql(k7, k7); 8525 jcc(Assembler::aboveEqual, FALSE_LABEL); // miscompare 8526 8527 jmp(TRUE_LABEL); 8528 8529 bind(COMPARE_WIDE_VECTORS_LOOP_AVX2); 8530 8531 }//if (VM_Version::supports_avx512vlbw()) 8532 #endif //_LP64 8533 8534 vmovdqu(vec1, Address(ary1, limit, Address::times_1)); 8535 vmovdqu(vec2, Address(ary2, limit, Address::times_1)); 8536 vpxor(vec1, vec2); 8537 8538 vptest(vec1, vec1); 8539 jcc(Assembler::notZero, FALSE_LABEL); 8540 addptr(limit, 32); 8541 jcc(Assembler::notZero, COMPARE_WIDE_VECTORS); 8542 8543 testl(result, result); 8544 jcc(Assembler::zero, TRUE_LABEL); 8545 8546 vmovdqu(vec1, Address(ary1, result, Address::times_1, -32)); 8547 vmovdqu(vec2, Address(ary2, result, Address::times_1, -32)); 8548 vpxor(vec1, vec2); 8549 8550 vptest(vec1, vec1); 8551 jccb(Assembler::notZero, FALSE_LABEL); 8552 jmpb(TRUE_LABEL); 8553 8554 bind(COMPARE_TAIL); // limit is zero 8555 movl(limit, result); 8556 // Fallthru to tail compare 8557 } else if (UseSSE42Intrinsics) { 8558 // With SSE4.2, use double quad vector compare 8559 Label COMPARE_WIDE_VECTORS, COMPARE_TAIL; 8560 8561 // Compare 16-byte vectors 8562 andl(result, 0x0000000f); // tail count (in bytes) 8563 andl(limit, 0xfffffff0); // vector count (in bytes) 8564 jcc(Assembler::zero, COMPARE_TAIL); 8565 8566 lea(ary1, Address(ary1, limit, Address::times_1)); 8567 lea(ary2, Address(ary2, limit, Address::times_1)); 8568 negptr(limit); 8569 8570 bind(COMPARE_WIDE_VECTORS); 8571 movdqu(vec1, Address(ary1, limit, Address::times_1)); 8572 movdqu(vec2, Address(ary2, limit, Address::times_1)); 8573 pxor(vec1, vec2); 8574 8575 ptest(vec1, vec1); 8576 jcc(Assembler::notZero, FALSE_LABEL); 8577 addptr(limit, 16); 8578 jcc(Assembler::notZero, COMPARE_WIDE_VECTORS); 8579 8580 testl(result, result); 8581 jcc(Assembler::zero, TRUE_LABEL); 8582 8583 movdqu(vec1, Address(ary1, result, Address::times_1, -16)); 8584 movdqu(vec2, Address(ary2, result, Address::times_1, -16)); 8585 pxor(vec1, vec2); 8586 8587 ptest(vec1, vec1); 8588 jccb(Assembler::notZero, FALSE_LABEL); 8589 jmpb(TRUE_LABEL); 8590 8591 bind(COMPARE_TAIL); // limit is zero 8592 movl(limit, result); 8593 // Fallthru to tail compare 8594 } 8595 8596 // Compare 4-byte vectors 8597 andl(limit, 0xfffffffc); // vector count (in bytes) 8598 jccb(Assembler::zero, COMPARE_CHAR); 8599 8600 lea(ary1, Address(ary1, limit, Address::times_1)); 8601 lea(ary2, Address(ary2, limit, Address::times_1)); 8602 negptr(limit); 8603 8604 bind(COMPARE_VECTORS); 8605 movl(chr, Address(ary1, limit, Address::times_1)); 8606 cmpl(chr, Address(ary2, limit, Address::times_1)); 8607 jccb(Assembler::notEqual, FALSE_LABEL); 8608 addptr(limit, 4); 8609 jcc(Assembler::notZero, COMPARE_VECTORS); 8610 8611 // Compare trailing char (final 2 bytes), if any 8612 bind(COMPARE_CHAR); 8613 testl(result, 0x2); // tail char 8614 jccb(Assembler::zero, COMPARE_BYTE); 8615 load_unsigned_short(chr, Address(ary1, 0)); 8616 load_unsigned_short(limit, Address(ary2, 0)); 8617 cmpl(chr, limit); 8618 jccb(Assembler::notEqual, FALSE_LABEL); 8619 8620 if (is_array_equ && is_char) { 8621 bind(COMPARE_BYTE); 8622 } else { 8623 lea(ary1, Address(ary1, 2)); 8624 lea(ary2, Address(ary2, 2)); 8625 8626 bind(COMPARE_BYTE); 8627 testl(result, 0x1); // tail byte 8628 jccb(Assembler::zero, TRUE_LABEL); 8629 load_unsigned_byte(chr, Address(ary1, 0)); 8630 load_unsigned_byte(limit, Address(ary2, 0)); 8631 cmpl(chr, limit); 8632 jccb(Assembler::notEqual, FALSE_LABEL); 8633 } 8634 bind(TRUE_LABEL); 8635 movl(result, 1); // return true 8636 jmpb(DONE); 8637 8638 bind(FALSE_LABEL); 8639 xorl(result, result); // return false 8640 8641 // That's it 8642 bind(DONE); 8643 if (UseAVX >= 2) { 8644 // clean upper bits of YMM registers 8645 vpxor(vec1, vec1); 8646 vpxor(vec2, vec2); 8647 } 8648 } 8649 8650 #endif 8651 8652 void MacroAssembler::generate_fill(BasicType t, bool aligned, 8653 Register to, Register value, Register count, 8654 Register rtmp, XMMRegister xtmp) { 8655 ShortBranchVerifier sbv(this); 8656 assert_different_registers(to, value, count, rtmp); 8657 Label L_exit, L_skip_align1, L_skip_align2, L_fill_byte; 8658 Label L_fill_2_bytes, L_fill_4_bytes; 8659 8660 int shift = -1; 8661 switch (t) { 8662 case T_BYTE: 8663 shift = 2; 8664 break; 8665 case T_SHORT: 8666 shift = 1; 8667 break; 8668 case T_INT: 8669 shift = 0; 8670 break; 8671 default: ShouldNotReachHere(); 8672 } 8673 8674 if (t == T_BYTE) { 8675 andl(value, 0xff); 8676 movl(rtmp, value); 8677 shll(rtmp, 8); 8678 orl(value, rtmp); 8679 } 8680 if (t == T_SHORT) { 8681 andl(value, 0xffff); 8682 } 8683 if (t == T_BYTE || t == T_SHORT) { 8684 movl(rtmp, value); 8685 shll(rtmp, 16); 8686 orl(value, rtmp); 8687 } 8688 8689 cmpl(count, 2<<shift); // Short arrays (< 8 bytes) fill by element 8690 jcc(Assembler::below, L_fill_4_bytes); // use unsigned cmp 8691 if (!UseUnalignedLoadStores && !aligned && (t == T_BYTE || t == T_SHORT)) { 8692 // align source address at 4 bytes address boundary 8693 if (t == T_BYTE) { 8694 // One byte misalignment happens only for byte arrays 8695 testptr(to, 1); 8696 jccb(Assembler::zero, L_skip_align1); 8697 movb(Address(to, 0), value); 8698 increment(to); 8699 decrement(count); 8700 BIND(L_skip_align1); 8701 } 8702 // Two bytes misalignment happens only for byte and short (char) arrays 8703 testptr(to, 2); 8704 jccb(Assembler::zero, L_skip_align2); 8705 movw(Address(to, 0), value); 8706 addptr(to, 2); 8707 subl(count, 1<<(shift-1)); 8708 BIND(L_skip_align2); 8709 } 8710 if (UseSSE < 2) { 8711 Label L_fill_32_bytes_loop, L_check_fill_8_bytes, L_fill_8_bytes_loop, L_fill_8_bytes; 8712 // Fill 32-byte chunks 8713 subl(count, 8 << shift); 8714 jcc(Assembler::less, L_check_fill_8_bytes); 8715 align(16); 8716 8717 BIND(L_fill_32_bytes_loop); 8718 8719 for (int i = 0; i < 32; i += 4) { 8720 movl(Address(to, i), value); 8721 } 8722 8723 addptr(to, 32); 8724 subl(count, 8 << shift); 8725 jcc(Assembler::greaterEqual, L_fill_32_bytes_loop); 8726 BIND(L_check_fill_8_bytes); 8727 addl(count, 8 << shift); 8728 jccb(Assembler::zero, L_exit); 8729 jmpb(L_fill_8_bytes); 8730 8731 // 8732 // length is too short, just fill qwords 8733 // 8734 BIND(L_fill_8_bytes_loop); 8735 movl(Address(to, 0), value); 8736 movl(Address(to, 4), value); 8737 addptr(to, 8); 8738 BIND(L_fill_8_bytes); 8739 subl(count, 1 << (shift + 1)); 8740 jcc(Assembler::greaterEqual, L_fill_8_bytes_loop); 8741 // fall through to fill 4 bytes 8742 } else { 8743 Label L_fill_32_bytes; 8744 if (!UseUnalignedLoadStores) { 8745 // align to 8 bytes, we know we are 4 byte aligned to start 8746 testptr(to, 4); 8747 jccb(Assembler::zero, L_fill_32_bytes); 8748 movl(Address(to, 0), value); 8749 addptr(to, 4); 8750 subl(count, 1<<shift); 8751 } 8752 BIND(L_fill_32_bytes); 8753 { 8754 assert( UseSSE >= 2, "supported cpu only" ); 8755 Label L_fill_32_bytes_loop, L_check_fill_8_bytes, L_fill_8_bytes_loop, L_fill_8_bytes; 8756 if (UseAVX > 2) { 8757 movl(rtmp, 0xffff); 8758 kmovwl(k1, rtmp); 8759 } 8760 movdl(xtmp, value); 8761 if (UseAVX > 2 && UseUnalignedLoadStores) { 8762 // Fill 64-byte chunks 8763 Label L_fill_64_bytes_loop, L_check_fill_32_bytes; 8764 evpbroadcastd(xtmp, xtmp, Assembler::AVX_512bit); 8765 8766 subl(count, 16 << shift); 8767 jcc(Assembler::less, L_check_fill_32_bytes); 8768 align(16); 8769 8770 BIND(L_fill_64_bytes_loop); 8771 evmovdqul(Address(to, 0), xtmp, Assembler::AVX_512bit); 8772 addptr(to, 64); 8773 subl(count, 16 << shift); 8774 jcc(Assembler::greaterEqual, L_fill_64_bytes_loop); 8775 8776 BIND(L_check_fill_32_bytes); 8777 addl(count, 8 << shift); 8778 jccb(Assembler::less, L_check_fill_8_bytes); 8779 vmovdqu(Address(to, 0), xtmp); 8780 addptr(to, 32); 8781 subl(count, 8 << shift); 8782 8783 BIND(L_check_fill_8_bytes); 8784 } else if (UseAVX == 2 && UseUnalignedLoadStores) { 8785 // Fill 64-byte chunks 8786 Label L_fill_64_bytes_loop, L_check_fill_32_bytes; 8787 vpbroadcastd(xtmp, xtmp); 8788 8789 subl(count, 16 << shift); 8790 jcc(Assembler::less, L_check_fill_32_bytes); 8791 align(16); 8792 8793 BIND(L_fill_64_bytes_loop); 8794 vmovdqu(Address(to, 0), xtmp); 8795 vmovdqu(Address(to, 32), xtmp); 8796 addptr(to, 64); 8797 subl(count, 16 << shift); 8798 jcc(Assembler::greaterEqual, L_fill_64_bytes_loop); 8799 8800 BIND(L_check_fill_32_bytes); 8801 addl(count, 8 << shift); 8802 jccb(Assembler::less, L_check_fill_8_bytes); 8803 vmovdqu(Address(to, 0), xtmp); 8804 addptr(to, 32); 8805 subl(count, 8 << shift); 8806 8807 BIND(L_check_fill_8_bytes); 8808 // clean upper bits of YMM registers 8809 movdl(xtmp, value); 8810 pshufd(xtmp, xtmp, 0); 8811 } else { 8812 // Fill 32-byte chunks 8813 pshufd(xtmp, xtmp, 0); 8814 8815 subl(count, 8 << shift); 8816 jcc(Assembler::less, L_check_fill_8_bytes); 8817 align(16); 8818 8819 BIND(L_fill_32_bytes_loop); 8820 8821 if (UseUnalignedLoadStores) { 8822 movdqu(Address(to, 0), xtmp); 8823 movdqu(Address(to, 16), xtmp); 8824 } else { 8825 movq(Address(to, 0), xtmp); 8826 movq(Address(to, 8), xtmp); 8827 movq(Address(to, 16), xtmp); 8828 movq(Address(to, 24), xtmp); 8829 } 8830 8831 addptr(to, 32); 8832 subl(count, 8 << shift); 8833 jcc(Assembler::greaterEqual, L_fill_32_bytes_loop); 8834 8835 BIND(L_check_fill_8_bytes); 8836 } 8837 addl(count, 8 << shift); 8838 jccb(Assembler::zero, L_exit); 8839 jmpb(L_fill_8_bytes); 8840 8841 // 8842 // length is too short, just fill qwords 8843 // 8844 BIND(L_fill_8_bytes_loop); 8845 movq(Address(to, 0), xtmp); 8846 addptr(to, 8); 8847 BIND(L_fill_8_bytes); 8848 subl(count, 1 << (shift + 1)); 8849 jcc(Assembler::greaterEqual, L_fill_8_bytes_loop); 8850 } 8851 } 8852 // fill trailing 4 bytes 8853 BIND(L_fill_4_bytes); 8854 testl(count, 1<<shift); 8855 jccb(Assembler::zero, L_fill_2_bytes); 8856 movl(Address(to, 0), value); 8857 if (t == T_BYTE || t == T_SHORT) { 8858 addptr(to, 4); 8859 BIND(L_fill_2_bytes); 8860 // fill trailing 2 bytes 8861 testl(count, 1<<(shift-1)); 8862 jccb(Assembler::zero, L_fill_byte); 8863 movw(Address(to, 0), value); 8864 if (t == T_BYTE) { 8865 addptr(to, 2); 8866 BIND(L_fill_byte); 8867 // fill trailing byte 8868 testl(count, 1); 8869 jccb(Assembler::zero, L_exit); 8870 movb(Address(to, 0), value); 8871 } else { 8872 BIND(L_fill_byte); 8873 } 8874 } else { 8875 BIND(L_fill_2_bytes); 8876 } 8877 BIND(L_exit); 8878 } 8879 8880 // encode char[] to byte[] in ISO_8859_1 8881 //@HotSpotIntrinsicCandidate 8882 //private static int implEncodeISOArray(byte[] sa, int sp, 8883 //byte[] da, int dp, int len) { 8884 // int i = 0; 8885 // for (; i < len; i++) { 8886 // char c = StringUTF16.getChar(sa, sp++); 8887 // if (c > '\u00FF') 8888 // break; 8889 // da[dp++] = (byte)c; 8890 // } 8891 // return i; 8892 //} 8893 void MacroAssembler::encode_iso_array(Register src, Register dst, Register len, 8894 XMMRegister tmp1Reg, XMMRegister tmp2Reg, 8895 XMMRegister tmp3Reg, XMMRegister tmp4Reg, 8896 Register tmp5, Register result) { 8897 8898 // rsi: src 8899 // rdi: dst 8900 // rdx: len 8901 // rcx: tmp5 8902 // rax: result 8903 ShortBranchVerifier sbv(this); 8904 assert_different_registers(src, dst, len, tmp5, result); 8905 Label L_done, L_copy_1_char, L_copy_1_char_exit; 8906 8907 // set result 8908 xorl(result, result); 8909 // check for zero length 8910 testl(len, len); 8911 jcc(Assembler::zero, L_done); 8912 8913 movl(result, len); 8914 8915 // Setup pointers 8916 lea(src, Address(src, len, Address::times_2)); // char[] 8917 lea(dst, Address(dst, len, Address::times_1)); // byte[] 8918 negptr(len); 8919 8920 if (UseSSE42Intrinsics || UseAVX >= 2) { 8921 Label L_chars_8_check, L_copy_8_chars, L_copy_8_chars_exit; 8922 Label L_chars_16_check, L_copy_16_chars, L_copy_16_chars_exit; 8923 8924 if (UseAVX >= 2) { 8925 Label L_chars_32_check, L_copy_32_chars, L_copy_32_chars_exit; 8926 movl(tmp5, 0xff00ff00); // create mask to test for Unicode chars in vector 8927 movdl(tmp1Reg, tmp5); 8928 vpbroadcastd(tmp1Reg, tmp1Reg); 8929 jmp(L_chars_32_check); 8930 8931 bind(L_copy_32_chars); 8932 vmovdqu(tmp3Reg, Address(src, len, Address::times_2, -64)); 8933 vmovdqu(tmp4Reg, Address(src, len, Address::times_2, -32)); 8934 vpor(tmp2Reg, tmp3Reg, tmp4Reg, /* vector_len */ 1); 8935 vptest(tmp2Reg, tmp1Reg); // check for Unicode chars in vector 8936 jccb(Assembler::notZero, L_copy_32_chars_exit); 8937 vpackuswb(tmp3Reg, tmp3Reg, tmp4Reg, /* vector_len */ 1); 8938 vpermq(tmp4Reg, tmp3Reg, 0xD8, /* vector_len */ 1); 8939 vmovdqu(Address(dst, len, Address::times_1, -32), tmp4Reg); 8940 8941 bind(L_chars_32_check); 8942 addptr(len, 32); 8943 jcc(Assembler::lessEqual, L_copy_32_chars); 8944 8945 bind(L_copy_32_chars_exit); 8946 subptr(len, 16); 8947 jccb(Assembler::greater, L_copy_16_chars_exit); 8948 8949 } else if (UseSSE42Intrinsics) { 8950 movl(tmp5, 0xff00ff00); // create mask to test for Unicode chars in vector 8951 movdl(tmp1Reg, tmp5); 8952 pshufd(tmp1Reg, tmp1Reg, 0); 8953 jmpb(L_chars_16_check); 8954 } 8955 8956 bind(L_copy_16_chars); 8957 if (UseAVX >= 2) { 8958 vmovdqu(tmp2Reg, Address(src, len, Address::times_2, -32)); 8959 vptest(tmp2Reg, tmp1Reg); 8960 jcc(Assembler::notZero, L_copy_16_chars_exit); 8961 vpackuswb(tmp2Reg, tmp2Reg, tmp1Reg, /* vector_len */ 1); 8962 vpermq(tmp3Reg, tmp2Reg, 0xD8, /* vector_len */ 1); 8963 } else { 8964 if (UseAVX > 0) { 8965 movdqu(tmp3Reg, Address(src, len, Address::times_2, -32)); 8966 movdqu(tmp4Reg, Address(src, len, Address::times_2, -16)); 8967 vpor(tmp2Reg, tmp3Reg, tmp4Reg, /* vector_len */ 0); 8968 } else { 8969 movdqu(tmp3Reg, Address(src, len, Address::times_2, -32)); 8970 por(tmp2Reg, tmp3Reg); 8971 movdqu(tmp4Reg, Address(src, len, Address::times_2, -16)); 8972 por(tmp2Reg, tmp4Reg); 8973 } 8974 ptest(tmp2Reg, tmp1Reg); // check for Unicode chars in vector 8975 jccb(Assembler::notZero, L_copy_16_chars_exit); 8976 packuswb(tmp3Reg, tmp4Reg); 8977 } 8978 movdqu(Address(dst, len, Address::times_1, -16), tmp3Reg); 8979 8980 bind(L_chars_16_check); 8981 addptr(len, 16); 8982 jcc(Assembler::lessEqual, L_copy_16_chars); 8983 8984 bind(L_copy_16_chars_exit); 8985 if (UseAVX >= 2) { 8986 // clean upper bits of YMM registers 8987 vpxor(tmp2Reg, tmp2Reg); 8988 vpxor(tmp3Reg, tmp3Reg); 8989 vpxor(tmp4Reg, tmp4Reg); 8990 movdl(tmp1Reg, tmp5); 8991 pshufd(tmp1Reg, tmp1Reg, 0); 8992 } 8993 subptr(len, 8); 8994 jccb(Assembler::greater, L_copy_8_chars_exit); 8995 8996 bind(L_copy_8_chars); 8997 movdqu(tmp3Reg, Address(src, len, Address::times_2, -16)); 8998 ptest(tmp3Reg, tmp1Reg); 8999 jccb(Assembler::notZero, L_copy_8_chars_exit); 9000 packuswb(tmp3Reg, tmp1Reg); 9001 movq(Address(dst, len, Address::times_1, -8), tmp3Reg); 9002 addptr(len, 8); 9003 jccb(Assembler::lessEqual, L_copy_8_chars); 9004 9005 bind(L_copy_8_chars_exit); 9006 subptr(len, 8); 9007 jccb(Assembler::zero, L_done); 9008 } 9009 9010 bind(L_copy_1_char); 9011 load_unsigned_short(tmp5, Address(src, len, Address::times_2, 0)); 9012 testl(tmp5, 0xff00); // check if Unicode char 9013 jccb(Assembler::notZero, L_copy_1_char_exit); 9014 movb(Address(dst, len, Address::times_1, 0), tmp5); 9015 addptr(len, 1); 9016 jccb(Assembler::less, L_copy_1_char); 9017 9018 bind(L_copy_1_char_exit); 9019 addptr(result, len); // len is negative count of not processed elements 9020 9021 bind(L_done); 9022 } 9023 9024 #ifdef _LP64 9025 /** 9026 * Helper for multiply_to_len(). 9027 */ 9028 void MacroAssembler::add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2) { 9029 addq(dest_lo, src1); 9030 adcq(dest_hi, 0); 9031 addq(dest_lo, src2); 9032 adcq(dest_hi, 0); 9033 } 9034 9035 /** 9036 * Multiply 64 bit by 64 bit first loop. 9037 */ 9038 void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart, 9039 Register y, Register y_idx, Register z, 9040 Register carry, Register product, 9041 Register idx, Register kdx) { 9042 // 9043 // jlong carry, x[], y[], z[]; 9044 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) { 9045 // huge_128 product = y[idx] * x[xstart] + carry; 9046 // z[kdx] = (jlong)product; 9047 // carry = (jlong)(product >>> 64); 9048 // } 9049 // z[xstart] = carry; 9050 // 9051 9052 Label L_first_loop, L_first_loop_exit; 9053 Label L_one_x, L_one_y, L_multiply; 9054 9055 decrementl(xstart); 9056 jcc(Assembler::negative, L_one_x); 9057 9058 movq(x_xstart, Address(x, xstart, Address::times_4, 0)); 9059 rorq(x_xstart, 32); // convert big-endian to little-endian 9060 9061 bind(L_first_loop); 9062 decrementl(idx); 9063 jcc(Assembler::negative, L_first_loop_exit); 9064 decrementl(idx); 9065 jcc(Assembler::negative, L_one_y); 9066 movq(y_idx, Address(y, idx, Address::times_4, 0)); 9067 rorq(y_idx, 32); // convert big-endian to little-endian 9068 bind(L_multiply); 9069 movq(product, x_xstart); 9070 mulq(y_idx); // product(rax) * y_idx -> rdx:rax 9071 addq(product, carry); 9072 adcq(rdx, 0); 9073 subl(kdx, 2); 9074 movl(Address(z, kdx, Address::times_4, 4), product); 9075 shrq(product, 32); 9076 movl(Address(z, kdx, Address::times_4, 0), product); 9077 movq(carry, rdx); 9078 jmp(L_first_loop); 9079 9080 bind(L_one_y); 9081 movl(y_idx, Address(y, 0)); 9082 jmp(L_multiply); 9083 9084 bind(L_one_x); 9085 movl(x_xstart, Address(x, 0)); 9086 jmp(L_first_loop); 9087 9088 bind(L_first_loop_exit); 9089 } 9090 9091 /** 9092 * Multiply 64 bit by 64 bit and add 128 bit. 9093 */ 9094 void MacroAssembler::multiply_add_128_x_128(Register x_xstart, Register y, Register z, 9095 Register yz_idx, Register idx, 9096 Register carry, Register product, int offset) { 9097 // huge_128 product = (y[idx] * x_xstart) + z[kdx] + carry; 9098 // z[kdx] = (jlong)product; 9099 9100 movq(yz_idx, Address(y, idx, Address::times_4, offset)); 9101 rorq(yz_idx, 32); // convert big-endian to little-endian 9102 movq(product, x_xstart); 9103 mulq(yz_idx); // product(rax) * yz_idx -> rdx:product(rax) 9104 movq(yz_idx, Address(z, idx, Address::times_4, offset)); 9105 rorq(yz_idx, 32); // convert big-endian to little-endian 9106 9107 add2_with_carry(rdx, product, carry, yz_idx); 9108 9109 movl(Address(z, idx, Address::times_4, offset+4), product); 9110 shrq(product, 32); 9111 movl(Address(z, idx, Address::times_4, offset), product); 9112 9113 } 9114 9115 /** 9116 * Multiply 128 bit by 128 bit. Unrolled inner loop. 9117 */ 9118 void MacroAssembler::multiply_128_x_128_loop(Register x_xstart, Register y, Register z, 9119 Register yz_idx, Register idx, Register jdx, 9120 Register carry, Register product, 9121 Register carry2) { 9122 // jlong carry, x[], y[], z[]; 9123 // int kdx = ystart+1; 9124 // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop 9125 // huge_128 product = (y[idx+1] * x_xstart) + z[kdx+idx+1] + carry; 9126 // z[kdx+idx+1] = (jlong)product; 9127 // jlong carry2 = (jlong)(product >>> 64); 9128 // product = (y[idx] * x_xstart) + z[kdx+idx] + carry2; 9129 // z[kdx+idx] = (jlong)product; 9130 // carry = (jlong)(product >>> 64); 9131 // } 9132 // idx += 2; 9133 // if (idx > 0) { 9134 // product = (y[idx] * x_xstart) + z[kdx+idx] + carry; 9135 // z[kdx+idx] = (jlong)product; 9136 // carry = (jlong)(product >>> 64); 9137 // } 9138 // 9139 9140 Label L_third_loop, L_third_loop_exit, L_post_third_loop_done; 9141 9142 movl(jdx, idx); 9143 andl(jdx, 0xFFFFFFFC); 9144 shrl(jdx, 2); 9145 9146 bind(L_third_loop); 9147 subl(jdx, 1); 9148 jcc(Assembler::negative, L_third_loop_exit); 9149 subl(idx, 4); 9150 9151 multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 8); 9152 movq(carry2, rdx); 9153 9154 multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry2, product, 0); 9155 movq(carry, rdx); 9156 jmp(L_third_loop); 9157 9158 bind (L_third_loop_exit); 9159 9160 andl (idx, 0x3); 9161 jcc(Assembler::zero, L_post_third_loop_done); 9162 9163 Label L_check_1; 9164 subl(idx, 2); 9165 jcc(Assembler::negative, L_check_1); 9166 9167 multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 0); 9168 movq(carry, rdx); 9169 9170 bind (L_check_1); 9171 addl (idx, 0x2); 9172 andl (idx, 0x1); 9173 subl(idx, 1); 9174 jcc(Assembler::negative, L_post_third_loop_done); 9175 9176 movl(yz_idx, Address(y, idx, Address::times_4, 0)); 9177 movq(product, x_xstart); 9178 mulq(yz_idx); // product(rax) * yz_idx -> rdx:product(rax) 9179 movl(yz_idx, Address(z, idx, Address::times_4, 0)); 9180 9181 add2_with_carry(rdx, product, yz_idx, carry); 9182 9183 movl(Address(z, idx, Address::times_4, 0), product); 9184 shrq(product, 32); 9185 9186 shlq(rdx, 32); 9187 orq(product, rdx); 9188 movq(carry, product); 9189 9190 bind(L_post_third_loop_done); 9191 } 9192 9193 /** 9194 * Multiply 128 bit by 128 bit using BMI2. Unrolled inner loop. 9195 * 9196 */ 9197 void MacroAssembler::multiply_128_x_128_bmi2_loop(Register y, Register z, 9198 Register carry, Register carry2, 9199 Register idx, Register jdx, 9200 Register yz_idx1, Register yz_idx2, 9201 Register tmp, Register tmp3, Register tmp4) { 9202 assert(UseBMI2Instructions, "should be used only when BMI2 is available"); 9203 9204 // jlong carry, x[], y[], z[]; 9205 // int kdx = ystart+1; 9206 // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop 9207 // huge_128 tmp3 = (y[idx+1] * rdx) + z[kdx+idx+1] + carry; 9208 // jlong carry2 = (jlong)(tmp3 >>> 64); 9209 // huge_128 tmp4 = (y[idx] * rdx) + z[kdx+idx] + carry2; 9210 // carry = (jlong)(tmp4 >>> 64); 9211 // z[kdx+idx+1] = (jlong)tmp3; 9212 // z[kdx+idx] = (jlong)tmp4; 9213 // } 9214 // idx += 2; 9215 // if (idx > 0) { 9216 // yz_idx1 = (y[idx] * rdx) + z[kdx+idx] + carry; 9217 // z[kdx+idx] = (jlong)yz_idx1; 9218 // carry = (jlong)(yz_idx1 >>> 64); 9219 // } 9220 // 9221 9222 Label L_third_loop, L_third_loop_exit, L_post_third_loop_done; 9223 9224 movl(jdx, idx); 9225 andl(jdx, 0xFFFFFFFC); 9226 shrl(jdx, 2); 9227 9228 bind(L_third_loop); 9229 subl(jdx, 1); 9230 jcc(Assembler::negative, L_third_loop_exit); 9231 subl(idx, 4); 9232 9233 movq(yz_idx1, Address(y, idx, Address::times_4, 8)); 9234 rorxq(yz_idx1, yz_idx1, 32); // convert big-endian to little-endian 9235 movq(yz_idx2, Address(y, idx, Address::times_4, 0)); 9236 rorxq(yz_idx2, yz_idx2, 32); 9237 9238 mulxq(tmp4, tmp3, yz_idx1); // yz_idx1 * rdx -> tmp4:tmp3 9239 mulxq(carry2, tmp, yz_idx2); // yz_idx2 * rdx -> carry2:tmp 9240 9241 movq(yz_idx1, Address(z, idx, Address::times_4, 8)); 9242 rorxq(yz_idx1, yz_idx1, 32); 9243 movq(yz_idx2, Address(z, idx, Address::times_4, 0)); 9244 rorxq(yz_idx2, yz_idx2, 32); 9245 9246 if (VM_Version::supports_adx()) { 9247 adcxq(tmp3, carry); 9248 adoxq(tmp3, yz_idx1); 9249 9250 adcxq(tmp4, tmp); 9251 adoxq(tmp4, yz_idx2); 9252 9253 movl(carry, 0); // does not affect flags 9254 adcxq(carry2, carry); 9255 adoxq(carry2, carry); 9256 } else { 9257 add2_with_carry(tmp4, tmp3, carry, yz_idx1); 9258 add2_with_carry(carry2, tmp4, tmp, yz_idx2); 9259 } 9260 movq(carry, carry2); 9261 9262 movl(Address(z, idx, Address::times_4, 12), tmp3); 9263 shrq(tmp3, 32); 9264 movl(Address(z, idx, Address::times_4, 8), tmp3); 9265 9266 movl(Address(z, idx, Address::times_4, 4), tmp4); 9267 shrq(tmp4, 32); 9268 movl(Address(z, idx, Address::times_4, 0), tmp4); 9269 9270 jmp(L_third_loop); 9271 9272 bind (L_third_loop_exit); 9273 9274 andl (idx, 0x3); 9275 jcc(Assembler::zero, L_post_third_loop_done); 9276 9277 Label L_check_1; 9278 subl(idx, 2); 9279 jcc(Assembler::negative, L_check_1); 9280 9281 movq(yz_idx1, Address(y, idx, Address::times_4, 0)); 9282 rorxq(yz_idx1, yz_idx1, 32); 9283 mulxq(tmp4, tmp3, yz_idx1); // yz_idx1 * rdx -> tmp4:tmp3 9284 movq(yz_idx2, Address(z, idx, Address::times_4, 0)); 9285 rorxq(yz_idx2, yz_idx2, 32); 9286 9287 add2_with_carry(tmp4, tmp3, carry, yz_idx2); 9288 9289 movl(Address(z, idx, Address::times_4, 4), tmp3); 9290 shrq(tmp3, 32); 9291 movl(Address(z, idx, Address::times_4, 0), tmp3); 9292 movq(carry, tmp4); 9293 9294 bind (L_check_1); 9295 addl (idx, 0x2); 9296 andl (idx, 0x1); 9297 subl(idx, 1); 9298 jcc(Assembler::negative, L_post_third_loop_done); 9299 movl(tmp4, Address(y, idx, Address::times_4, 0)); 9300 mulxq(carry2, tmp3, tmp4); // tmp4 * rdx -> carry2:tmp3 9301 movl(tmp4, Address(z, idx, Address::times_4, 0)); 9302 9303 add2_with_carry(carry2, tmp3, tmp4, carry); 9304 9305 movl(Address(z, idx, Address::times_4, 0), tmp3); 9306 shrq(tmp3, 32); 9307 9308 shlq(carry2, 32); 9309 orq(tmp3, carry2); 9310 movq(carry, tmp3); 9311 9312 bind(L_post_third_loop_done); 9313 } 9314 9315 /** 9316 * Code for BigInteger::multiplyToLen() instrinsic. 9317 * 9318 * rdi: x 9319 * rax: xlen 9320 * rsi: y 9321 * rcx: ylen 9322 * r8: z 9323 * r11: zlen 9324 * r12: tmp1 9325 * r13: tmp2 9326 * r14: tmp3 9327 * r15: tmp4 9328 * rbx: tmp5 9329 * 9330 */ 9331 void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, Register zlen, 9332 Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5) { 9333 ShortBranchVerifier sbv(this); 9334 assert_different_registers(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5, rdx); 9335 9336 push(tmp1); 9337 push(tmp2); 9338 push(tmp3); 9339 push(tmp4); 9340 push(tmp5); 9341 9342 push(xlen); 9343 push(zlen); 9344 9345 const Register idx = tmp1; 9346 const Register kdx = tmp2; 9347 const Register xstart = tmp3; 9348 9349 const Register y_idx = tmp4; 9350 const Register carry = tmp5; 9351 const Register product = xlen; 9352 const Register x_xstart = zlen; // reuse register 9353 9354 // First Loop. 9355 // 9356 // final static long LONG_MASK = 0xffffffffL; 9357 // int xstart = xlen - 1; 9358 // int ystart = ylen - 1; 9359 // long carry = 0; 9360 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) { 9361 // long product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry; 9362 // z[kdx] = (int)product; 9363 // carry = product >>> 32; 9364 // } 9365 // z[xstart] = (int)carry; 9366 // 9367 9368 movl(idx, ylen); // idx = ylen; 9369 movl(kdx, zlen); // kdx = xlen+ylen; 9370 xorq(carry, carry); // carry = 0; 9371 9372 Label L_done; 9373 9374 movl(xstart, xlen); 9375 decrementl(xstart); 9376 jcc(Assembler::negative, L_done); 9377 9378 multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx); 9379 9380 Label L_second_loop; 9381 testl(kdx, kdx); 9382 jcc(Assembler::zero, L_second_loop); 9383 9384 Label L_carry; 9385 subl(kdx, 1); 9386 jcc(Assembler::zero, L_carry); 9387 9388 movl(Address(z, kdx, Address::times_4, 0), carry); 9389 shrq(carry, 32); 9390 subl(kdx, 1); 9391 9392 bind(L_carry); 9393 movl(Address(z, kdx, Address::times_4, 0), carry); 9394 9395 // Second and third (nested) loops. 9396 // 9397 // for (int i = xstart-1; i >= 0; i--) { // Second loop 9398 // carry = 0; 9399 // for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop 9400 // long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) + 9401 // (z[k] & LONG_MASK) + carry; 9402 // z[k] = (int)product; 9403 // carry = product >>> 32; 9404 // } 9405 // z[i] = (int)carry; 9406 // } 9407 // 9408 // i = xlen, j = tmp1, k = tmp2, carry = tmp5, x[i] = rdx 9409 9410 const Register jdx = tmp1; 9411 9412 bind(L_second_loop); 9413 xorl(carry, carry); // carry = 0; 9414 movl(jdx, ylen); // j = ystart+1 9415 9416 subl(xstart, 1); // i = xstart-1; 9417 jcc(Assembler::negative, L_done); 9418 9419 push (z); 9420 9421 Label L_last_x; 9422 lea(z, Address(z, xstart, Address::times_4, 4)); // z = z + k - j 9423 subl(xstart, 1); // i = xstart-1; 9424 jcc(Assembler::negative, L_last_x); 9425 9426 if (UseBMI2Instructions) { 9427 movq(rdx, Address(x, xstart, Address::times_4, 0)); 9428 rorxq(rdx, rdx, 32); // convert big-endian to little-endian 9429 } else { 9430 movq(x_xstart, Address(x, xstart, Address::times_4, 0)); 9431 rorq(x_xstart, 32); // convert big-endian to little-endian 9432 } 9433 9434 Label L_third_loop_prologue; 9435 bind(L_third_loop_prologue); 9436 9437 push (x); 9438 push (xstart); 9439 push (ylen); 9440 9441 9442 if (UseBMI2Instructions) { 9443 multiply_128_x_128_bmi2_loop(y, z, carry, x, jdx, ylen, product, tmp2, x_xstart, tmp3, tmp4); 9444 } else { // !UseBMI2Instructions 9445 multiply_128_x_128_loop(x_xstart, y, z, y_idx, jdx, ylen, carry, product, x); 9446 } 9447 9448 pop(ylen); 9449 pop(xlen); 9450 pop(x); 9451 pop(z); 9452 9453 movl(tmp3, xlen); 9454 addl(tmp3, 1); 9455 movl(Address(z, tmp3, Address::times_4, 0), carry); 9456 subl(tmp3, 1); 9457 jccb(Assembler::negative, L_done); 9458 9459 shrq(carry, 32); 9460 movl(Address(z, tmp3, Address::times_4, 0), carry); 9461 jmp(L_second_loop); 9462 9463 // Next infrequent code is moved outside loops. 9464 bind(L_last_x); 9465 if (UseBMI2Instructions) { 9466 movl(rdx, Address(x, 0)); 9467 } else { 9468 movl(x_xstart, Address(x, 0)); 9469 } 9470 jmp(L_third_loop_prologue); 9471 9472 bind(L_done); 9473 9474 pop(zlen); 9475 pop(xlen); 9476 9477 pop(tmp5); 9478 pop(tmp4); 9479 pop(tmp3); 9480 pop(tmp2); 9481 pop(tmp1); 9482 } 9483 9484 void MacroAssembler::vectorized_mismatch(Register obja, Register objb, Register length, Register log2_array_indxscale, 9485 Register result, Register tmp1, Register tmp2, XMMRegister rymm0, XMMRegister rymm1, XMMRegister rymm2){ 9486 assert(UseSSE42Intrinsics, "SSE4.2 must be enabled."); 9487 Label VECTOR64_LOOP, VECTOR64_TAIL, VECTOR64_NOT_EQUAL, VECTOR32_TAIL; 9488 Label VECTOR32_LOOP, VECTOR16_LOOP, VECTOR8_LOOP, VECTOR4_LOOP; 9489 Label VECTOR16_TAIL, VECTOR8_TAIL, VECTOR4_TAIL; 9490 Label VECTOR32_NOT_EQUAL, VECTOR16_NOT_EQUAL, VECTOR8_NOT_EQUAL, VECTOR4_NOT_EQUAL; 9491 Label SAME_TILL_END, DONE; 9492 Label BYTES_LOOP, BYTES_TAIL, BYTES_NOT_EQUAL; 9493 9494 //scale is in rcx in both Win64 and Unix 9495 ShortBranchVerifier sbv(this); 9496 9497 shlq(length); 9498 xorq(result, result); 9499 9500 if ((UseAVX > 2) && 9501 VM_Version::supports_avx512vlbw()) { 9502 set_vector_masking(); // opening of the stub context for programming mask registers 9503 cmpq(length, 64); 9504 jcc(Assembler::less, VECTOR32_TAIL); 9505 movq(tmp1, length); 9506 andq(tmp1, 0x3F); // tail count 9507 andq(length, ~(0x3F)); //vector count 9508 9509 bind(VECTOR64_LOOP); 9510 // AVX512 code to compare 64 byte vectors. 9511 evmovdqub(rymm0, Address(obja, result), Assembler::AVX_512bit); 9512 evpcmpeqb(k7, rymm0, Address(objb, result), Assembler::AVX_512bit); 9513 kortestql(k7, k7); 9514 jcc(Assembler::aboveEqual, VECTOR64_NOT_EQUAL); // mismatch 9515 addq(result, 64); 9516 subq(length, 64); 9517 jccb(Assembler::notZero, VECTOR64_LOOP); 9518 9519 //bind(VECTOR64_TAIL); 9520 testq(tmp1, tmp1); 9521 jcc(Assembler::zero, SAME_TILL_END); 9522 9523 bind(VECTOR64_TAIL); 9524 // AVX512 code to compare upto 63 byte vectors. 9525 // Save k1 9526 kmovql(k3, k1); 9527 mov64(tmp2, 0xFFFFFFFFFFFFFFFF); 9528 shlxq(tmp2, tmp2, tmp1); 9529 notq(tmp2); 9530 kmovql(k1, tmp2); 9531 9532 evmovdqub(rymm0, k1, Address(obja, result), Assembler::AVX_512bit); 9533 evpcmpeqb(k7, k1, rymm0, Address(objb, result), Assembler::AVX_512bit); 9534 9535 ktestql(k7, k1); 9536 // Restore k1 9537 kmovql(k1, k3); 9538 jcc(Assembler::below, SAME_TILL_END); // not mismatch 9539 9540 bind(VECTOR64_NOT_EQUAL); 9541 kmovql(tmp1, k7); 9542 notq(tmp1); 9543 tzcntq(tmp1, tmp1); 9544 addq(result, tmp1); 9545 shrq(result); 9546 jmp(DONE); 9547 bind(VECTOR32_TAIL); 9548 clear_vector_masking(); // closing of the stub context for programming mask registers 9549 } 9550 9551 cmpq(length, 8); 9552 jcc(Assembler::equal, VECTOR8_LOOP); 9553 jcc(Assembler::less, VECTOR4_TAIL); 9554 9555 if (UseAVX >= 2) { 9556 9557 cmpq(length, 16); 9558 jcc(Assembler::equal, VECTOR16_LOOP); 9559 jcc(Assembler::less, VECTOR8_LOOP); 9560 9561 cmpq(length, 32); 9562 jccb(Assembler::less, VECTOR16_TAIL); 9563 9564 subq(length, 32); 9565 bind(VECTOR32_LOOP); 9566 vmovdqu(rymm0, Address(obja, result)); 9567 vmovdqu(rymm1, Address(objb, result)); 9568 vpxor(rymm2, rymm0, rymm1, Assembler::AVX_256bit); 9569 vptest(rymm2, rymm2); 9570 jcc(Assembler::notZero, VECTOR32_NOT_EQUAL);//mismatch found 9571 addq(result, 32); 9572 subq(length, 32); 9573 jccb(Assembler::greaterEqual, VECTOR32_LOOP); 9574 addq(length, 32); 9575 jcc(Assembler::equal, SAME_TILL_END); 9576 //falling through if less than 32 bytes left //close the branch here. 9577 9578 bind(VECTOR16_TAIL); 9579 cmpq(length, 16); 9580 jccb(Assembler::less, VECTOR8_TAIL); 9581 bind(VECTOR16_LOOP); 9582 movdqu(rymm0, Address(obja, result)); 9583 movdqu(rymm1, Address(objb, result)); 9584 vpxor(rymm2, rymm0, rymm1, Assembler::AVX_128bit); 9585 ptest(rymm2, rymm2); 9586 jcc(Assembler::notZero, VECTOR16_NOT_EQUAL);//mismatch found 9587 addq(result, 16); 9588 subq(length, 16); 9589 jcc(Assembler::equal, SAME_TILL_END); 9590 //falling through if less than 16 bytes left 9591 } else {//regular intrinsics 9592 9593 cmpq(length, 16); 9594 jccb(Assembler::less, VECTOR8_TAIL); 9595 9596 subq(length, 16); 9597 bind(VECTOR16_LOOP); 9598 movdqu(rymm0, Address(obja, result)); 9599 movdqu(rymm1, Address(objb, result)); 9600 pxor(rymm0, rymm1); 9601 ptest(rymm0, rymm0); 9602 jcc(Assembler::notZero, VECTOR16_NOT_EQUAL);//mismatch found 9603 addq(result, 16); 9604 subq(length, 16); 9605 jccb(Assembler::greaterEqual, VECTOR16_LOOP); 9606 addq(length, 16); 9607 jcc(Assembler::equal, SAME_TILL_END); 9608 //falling through if less than 16 bytes left 9609 } 9610 9611 bind(VECTOR8_TAIL); 9612 cmpq(length, 8); 9613 jccb(Assembler::less, VECTOR4_TAIL); 9614 bind(VECTOR8_LOOP); 9615 movq(tmp1, Address(obja, result)); 9616 movq(tmp2, Address(objb, result)); 9617 xorq(tmp1, tmp2); 9618 testq(tmp1, tmp1); 9619 jcc(Assembler::notZero, VECTOR8_NOT_EQUAL);//mismatch found 9620 addq(result, 8); 9621 subq(length, 8); 9622 jcc(Assembler::equal, SAME_TILL_END); 9623 //falling through if less than 8 bytes left 9624 9625 bind(VECTOR4_TAIL); 9626 cmpq(length, 4); 9627 jccb(Assembler::less, BYTES_TAIL); 9628 bind(VECTOR4_LOOP); 9629 movl(tmp1, Address(obja, result)); 9630 xorl(tmp1, Address(objb, result)); 9631 testl(tmp1, tmp1); 9632 jcc(Assembler::notZero, VECTOR4_NOT_EQUAL);//mismatch found 9633 addq(result, 4); 9634 subq(length, 4); 9635 jcc(Assembler::equal, SAME_TILL_END); 9636 //falling through if less than 4 bytes left 9637 9638 bind(BYTES_TAIL); 9639 bind(BYTES_LOOP); 9640 load_unsigned_byte(tmp1, Address(obja, result)); 9641 load_unsigned_byte(tmp2, Address(objb, result)); 9642 xorl(tmp1, tmp2); 9643 testl(tmp1, tmp1); 9644 jccb(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found 9645 decq(length); 9646 jccb(Assembler::zero, SAME_TILL_END); 9647 incq(result); 9648 load_unsigned_byte(tmp1, Address(obja, result)); 9649 load_unsigned_byte(tmp2, Address(objb, result)); 9650 xorl(tmp1, tmp2); 9651 testl(tmp1, tmp1); 9652 jccb(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found 9653 decq(length); 9654 jccb(Assembler::zero, SAME_TILL_END); 9655 incq(result); 9656 load_unsigned_byte(tmp1, Address(obja, result)); 9657 load_unsigned_byte(tmp2, Address(objb, result)); 9658 xorl(tmp1, tmp2); 9659 testl(tmp1, tmp1); 9660 jccb(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found 9661 jmpb(SAME_TILL_END); 9662 9663 if (UseAVX >= 2) { 9664 bind(VECTOR32_NOT_EQUAL); 9665 vpcmpeqb(rymm2, rymm2, rymm2, Assembler::AVX_256bit); 9666 vpcmpeqb(rymm0, rymm0, rymm1, Assembler::AVX_256bit); 9667 vpxor(rymm0, rymm0, rymm2, Assembler::AVX_256bit); 9668 vpmovmskb(tmp1, rymm0); 9669 bsfq(tmp1, tmp1); 9670 addq(result, tmp1); 9671 shrq(result); 9672 jmpb(DONE); 9673 } 9674 9675 bind(VECTOR16_NOT_EQUAL); 9676 if (UseAVX >= 2) { 9677 vpcmpeqb(rymm2, rymm2, rymm2, Assembler::AVX_128bit); 9678 vpcmpeqb(rymm0, rymm0, rymm1, Assembler::AVX_128bit); 9679 pxor(rymm0, rymm2); 9680 } else { 9681 pcmpeqb(rymm2, rymm2); 9682 pxor(rymm0, rymm1); 9683 pcmpeqb(rymm0, rymm1); 9684 pxor(rymm0, rymm2); 9685 } 9686 pmovmskb(tmp1, rymm0); 9687 bsfq(tmp1, tmp1); 9688 addq(result, tmp1); 9689 shrq(result); 9690 jmpb(DONE); 9691 9692 bind(VECTOR8_NOT_EQUAL); 9693 bind(VECTOR4_NOT_EQUAL); 9694 bsfq(tmp1, tmp1); 9695 shrq(tmp1, 3); 9696 addq(result, tmp1); 9697 bind(BYTES_NOT_EQUAL); 9698 shrq(result); 9699 jmpb(DONE); 9700 9701 bind(SAME_TILL_END); 9702 mov64(result, -1); 9703 9704 bind(DONE); 9705 } 9706 9707 //Helper functions for square_to_len() 9708 9709 /** 9710 * Store the squares of x[], right shifted one bit (divided by 2) into z[] 9711 * Preserves x and z and modifies rest of the registers. 9712 */ 9713 void MacroAssembler::square_rshift(Register x, Register xlen, Register z, Register tmp1, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg) { 9714 // Perform square and right shift by 1 9715 // Handle odd xlen case first, then for even xlen do the following 9716 // jlong carry = 0; 9717 // for (int j=0, i=0; j < xlen; j+=2, i+=4) { 9718 // huge_128 product = x[j:j+1] * x[j:j+1]; 9719 // z[i:i+1] = (carry << 63) | (jlong)(product >>> 65); 9720 // z[i+2:i+3] = (jlong)(product >>> 1); 9721 // carry = (jlong)product; 9722 // } 9723 9724 xorq(tmp5, tmp5); // carry 9725 xorq(rdxReg, rdxReg); 9726 xorl(tmp1, tmp1); // index for x 9727 xorl(tmp4, tmp4); // index for z 9728 9729 Label L_first_loop, L_first_loop_exit; 9730 9731 testl(xlen, 1); 9732 jccb(Assembler::zero, L_first_loop); //jump if xlen is even 9733 9734 // Square and right shift by 1 the odd element using 32 bit multiply 9735 movl(raxReg, Address(x, tmp1, Address::times_4, 0)); 9736 imulq(raxReg, raxReg); 9737 shrq(raxReg, 1); 9738 adcq(tmp5, 0); 9739 movq(Address(z, tmp4, Address::times_4, 0), raxReg); 9740 incrementl(tmp1); 9741 addl(tmp4, 2); 9742 9743 // Square and right shift by 1 the rest using 64 bit multiply 9744 bind(L_first_loop); 9745 cmpptr(tmp1, xlen); 9746 jccb(Assembler::equal, L_first_loop_exit); 9747 9748 // Square 9749 movq(raxReg, Address(x, tmp1, Address::times_4, 0)); 9750 rorq(raxReg, 32); // convert big-endian to little-endian 9751 mulq(raxReg); // 64-bit multiply rax * rax -> rdx:rax 9752 9753 // Right shift by 1 and save carry 9754 shrq(tmp5, 1); // rdx:rax:tmp5 = (tmp5:rdx:rax) >>> 1 9755 rcrq(rdxReg, 1); 9756 rcrq(raxReg, 1); 9757 adcq(tmp5, 0); 9758 9759 // Store result in z 9760 movq(Address(z, tmp4, Address::times_4, 0), rdxReg); 9761 movq(Address(z, tmp4, Address::times_4, 8), raxReg); 9762 9763 // Update indices for x and z 9764 addl(tmp1, 2); 9765 addl(tmp4, 4); 9766 jmp(L_first_loop); 9767 9768 bind(L_first_loop_exit); 9769 } 9770 9771 9772 /** 9773 * Perform the following multiply add operation using BMI2 instructions 9774 * carry:sum = sum + op1*op2 + carry 9775 * op2 should be in rdx 9776 * op2 is preserved, all other registers are modified 9777 */ 9778 void MacroAssembler::multiply_add_64_bmi2(Register sum, Register op1, Register op2, Register carry, Register tmp2) { 9779 // assert op2 is rdx 9780 mulxq(tmp2, op1, op1); // op1 * op2 -> tmp2:op1 9781 addq(sum, carry); 9782 adcq(tmp2, 0); 9783 addq(sum, op1); 9784 adcq(tmp2, 0); 9785 movq(carry, tmp2); 9786 } 9787 9788 /** 9789 * Perform the following multiply add operation: 9790 * carry:sum = sum + op1*op2 + carry 9791 * Preserves op1, op2 and modifies rest of registers 9792 */ 9793 void MacroAssembler::multiply_add_64(Register sum, Register op1, Register op2, Register carry, Register rdxReg, Register raxReg) { 9794 // rdx:rax = op1 * op2 9795 movq(raxReg, op2); 9796 mulq(op1); 9797 9798 // rdx:rax = sum + carry + rdx:rax 9799 addq(sum, carry); 9800 adcq(rdxReg, 0); 9801 addq(sum, raxReg); 9802 adcq(rdxReg, 0); 9803 9804 // carry:sum = rdx:sum 9805 movq(carry, rdxReg); 9806 } 9807 9808 /** 9809 * Add 64 bit long carry into z[] with carry propogation. 9810 * Preserves z and carry register values and modifies rest of registers. 9811 * 9812 */ 9813 void MacroAssembler::add_one_64(Register z, Register zlen, Register carry, Register tmp1) { 9814 Label L_fourth_loop, L_fourth_loop_exit; 9815 9816 movl(tmp1, 1); 9817 subl(zlen, 2); 9818 addq(Address(z, zlen, Address::times_4, 0), carry); 9819 9820 bind(L_fourth_loop); 9821 jccb(Assembler::carryClear, L_fourth_loop_exit); 9822 subl(zlen, 2); 9823 jccb(Assembler::negative, L_fourth_loop_exit); 9824 addq(Address(z, zlen, Address::times_4, 0), tmp1); 9825 jmp(L_fourth_loop); 9826 bind(L_fourth_loop_exit); 9827 } 9828 9829 /** 9830 * Shift z[] left by 1 bit. 9831 * Preserves x, len, z and zlen registers and modifies rest of the registers. 9832 * 9833 */ 9834 void MacroAssembler::lshift_by_1(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, Register tmp3, Register tmp4) { 9835 9836 Label L_fifth_loop, L_fifth_loop_exit; 9837 9838 // Fifth loop 9839 // Perform primitiveLeftShift(z, zlen, 1) 9840 9841 const Register prev_carry = tmp1; 9842 const Register new_carry = tmp4; 9843 const Register value = tmp2; 9844 const Register zidx = tmp3; 9845 9846 // int zidx, carry; 9847 // long value; 9848 // carry = 0; 9849 // for (zidx = zlen-2; zidx >=0; zidx -= 2) { 9850 // (carry:value) = (z[i] << 1) | carry ; 9851 // z[i] = value; 9852 // } 9853 9854 movl(zidx, zlen); 9855 xorl(prev_carry, prev_carry); // clear carry flag and prev_carry register 9856 9857 bind(L_fifth_loop); 9858 decl(zidx); // Use decl to preserve carry flag 9859 decl(zidx); 9860 jccb(Assembler::negative, L_fifth_loop_exit); 9861 9862 if (UseBMI2Instructions) { 9863 movq(value, Address(z, zidx, Address::times_4, 0)); 9864 rclq(value, 1); 9865 rorxq(value, value, 32); 9866 movq(Address(z, zidx, Address::times_4, 0), value); // Store back in big endian form 9867 } 9868 else { 9869 // clear new_carry 9870 xorl(new_carry, new_carry); 9871 9872 // Shift z[i] by 1, or in previous carry and save new carry 9873 movq(value, Address(z, zidx, Address::times_4, 0)); 9874 shlq(value, 1); 9875 adcl(new_carry, 0); 9876 9877 orq(value, prev_carry); 9878 rorq(value, 0x20); 9879 movq(Address(z, zidx, Address::times_4, 0), value); // Store back in big endian form 9880 9881 // Set previous carry = new carry 9882 movl(prev_carry, new_carry); 9883 } 9884 jmp(L_fifth_loop); 9885 9886 bind(L_fifth_loop_exit); 9887 } 9888 9889 9890 /** 9891 * Code for BigInteger::squareToLen() intrinsic 9892 * 9893 * rdi: x 9894 * rsi: len 9895 * r8: z 9896 * rcx: zlen 9897 * r12: tmp1 9898 * r13: tmp2 9899 * r14: tmp3 9900 * r15: tmp4 9901 * rbx: tmp5 9902 * 9903 */ 9904 void MacroAssembler::square_to_len(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg) { 9905 9906 Label L_second_loop, L_second_loop_exit, L_third_loop, L_third_loop_exit, fifth_loop, fifth_loop_exit, L_last_x, L_multiply; 9907 push(tmp1); 9908 push(tmp2); 9909 push(tmp3); 9910 push(tmp4); 9911 push(tmp5); 9912 9913 // First loop 9914 // Store the squares, right shifted one bit (i.e., divided by 2). 9915 square_rshift(x, len, z, tmp1, tmp3, tmp4, tmp5, rdxReg, raxReg); 9916 9917 // Add in off-diagonal sums. 9918 // 9919 // Second, third (nested) and fourth loops. 9920 // zlen +=2; 9921 // for (int xidx=len-2,zidx=zlen-4; xidx > 0; xidx-=2,zidx-=4) { 9922 // carry = 0; 9923 // long op2 = x[xidx:xidx+1]; 9924 // for (int j=xidx-2,k=zidx; j >= 0; j-=2) { 9925 // k -= 2; 9926 // long op1 = x[j:j+1]; 9927 // long sum = z[k:k+1]; 9928 // carry:sum = multiply_add_64(sum, op1, op2, carry, tmp_regs); 9929 // z[k:k+1] = sum; 9930 // } 9931 // add_one_64(z, k, carry, tmp_regs); 9932 // } 9933 9934 const Register carry = tmp5; 9935 const Register sum = tmp3; 9936 const Register op1 = tmp4; 9937 Register op2 = tmp2; 9938 9939 push(zlen); 9940 push(len); 9941 addl(zlen,2); 9942 bind(L_second_loop); 9943 xorq(carry, carry); 9944 subl(zlen, 4); 9945 subl(len, 2); 9946 push(zlen); 9947 push(len); 9948 cmpl(len, 0); 9949 jccb(Assembler::lessEqual, L_second_loop_exit); 9950 9951 // Multiply an array by one 64 bit long. 9952 if (UseBMI2Instructions) { 9953 op2 = rdxReg; 9954 movq(op2, Address(x, len, Address::times_4, 0)); 9955 rorxq(op2, op2, 32); 9956 } 9957 else { 9958 movq(op2, Address(x, len, Address::times_4, 0)); 9959 rorq(op2, 32); 9960 } 9961 9962 bind(L_third_loop); 9963 decrementl(len); 9964 jccb(Assembler::negative, L_third_loop_exit); 9965 decrementl(len); 9966 jccb(Assembler::negative, L_last_x); 9967 9968 movq(op1, Address(x, len, Address::times_4, 0)); 9969 rorq(op1, 32); 9970 9971 bind(L_multiply); 9972 subl(zlen, 2); 9973 movq(sum, Address(z, zlen, Address::times_4, 0)); 9974 9975 // Multiply 64 bit by 64 bit and add 64 bits lower half and upper 64 bits as carry. 9976 if (UseBMI2Instructions) { 9977 multiply_add_64_bmi2(sum, op1, op2, carry, tmp2); 9978 } 9979 else { 9980 multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg); 9981 } 9982 9983 movq(Address(z, zlen, Address::times_4, 0), sum); 9984 9985 jmp(L_third_loop); 9986 bind(L_third_loop_exit); 9987 9988 // Fourth loop 9989 // Add 64 bit long carry into z with carry propogation. 9990 // Uses offsetted zlen. 9991 add_one_64(z, zlen, carry, tmp1); 9992 9993 pop(len); 9994 pop(zlen); 9995 jmp(L_second_loop); 9996 9997 // Next infrequent code is moved outside loops. 9998 bind(L_last_x); 9999 movl(op1, Address(x, 0)); 10000 jmp(L_multiply); 10001 10002 bind(L_second_loop_exit); 10003 pop(len); 10004 pop(zlen); 10005 pop(len); 10006 pop(zlen); 10007 10008 // Fifth loop 10009 // Shift z left 1 bit. 10010 lshift_by_1(x, len, z, zlen, tmp1, tmp2, tmp3, tmp4); 10011 10012 // z[zlen-1] |= x[len-1] & 1; 10013 movl(tmp3, Address(x, len, Address::times_4, -4)); 10014 andl(tmp3, 1); 10015 orl(Address(z, zlen, Address::times_4, -4), tmp3); 10016 10017 pop(tmp5); 10018 pop(tmp4); 10019 pop(tmp3); 10020 pop(tmp2); 10021 pop(tmp1); 10022 } 10023 10024 /** 10025 * Helper function for mul_add() 10026 * Multiply the in[] by int k and add to out[] starting at offset offs using 10027 * 128 bit by 32 bit multiply and return the carry in tmp5. 10028 * Only quad int aligned length of in[] is operated on in this function. 10029 * k is in rdxReg for BMI2Instructions, for others it is in tmp2. 10030 * This function preserves out, in and k registers. 10031 * len and offset point to the appropriate index in "in" & "out" correspondingly 10032 * tmp5 has the carry. 10033 * other registers are temporary and are modified. 10034 * 10035 */ 10036 void MacroAssembler::mul_add_128_x_32_loop(Register out, Register in, 10037 Register offset, Register len, Register tmp1, Register tmp2, Register tmp3, 10038 Register tmp4, Register tmp5, Register rdxReg, Register raxReg) { 10039 10040 Label L_first_loop, L_first_loop_exit; 10041 10042 movl(tmp1, len); 10043 shrl(tmp1, 2); 10044 10045 bind(L_first_loop); 10046 subl(tmp1, 1); 10047 jccb(Assembler::negative, L_first_loop_exit); 10048 10049 subl(len, 4); 10050 subl(offset, 4); 10051 10052 Register op2 = tmp2; 10053 const Register sum = tmp3; 10054 const Register op1 = tmp4; 10055 const Register carry = tmp5; 10056 10057 if (UseBMI2Instructions) { 10058 op2 = rdxReg; 10059 } 10060 10061 movq(op1, Address(in, len, Address::times_4, 8)); 10062 rorq(op1, 32); 10063 movq(sum, Address(out, offset, Address::times_4, 8)); 10064 rorq(sum, 32); 10065 if (UseBMI2Instructions) { 10066 multiply_add_64_bmi2(sum, op1, op2, carry, raxReg); 10067 } 10068 else { 10069 multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg); 10070 } 10071 // Store back in big endian from little endian 10072 rorq(sum, 0x20); 10073 movq(Address(out, offset, Address::times_4, 8), sum); 10074 10075 movq(op1, Address(in, len, Address::times_4, 0)); 10076 rorq(op1, 32); 10077 movq(sum, Address(out, offset, Address::times_4, 0)); 10078 rorq(sum, 32); 10079 if (UseBMI2Instructions) { 10080 multiply_add_64_bmi2(sum, op1, op2, carry, raxReg); 10081 } 10082 else { 10083 multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg); 10084 } 10085 // Store back in big endian from little endian 10086 rorq(sum, 0x20); 10087 movq(Address(out, offset, Address::times_4, 0), sum); 10088 10089 jmp(L_first_loop); 10090 bind(L_first_loop_exit); 10091 } 10092 10093 /** 10094 * Code for BigInteger::mulAdd() intrinsic 10095 * 10096 * rdi: out 10097 * rsi: in 10098 * r11: offs (out.length - offset) 10099 * rcx: len 10100 * r8: k 10101 * r12: tmp1 10102 * r13: tmp2 10103 * r14: tmp3 10104 * r15: tmp4 10105 * rbx: tmp5 10106 * Multiply the in[] by word k and add to out[], return the carry in rax 10107 */ 10108 void MacroAssembler::mul_add(Register out, Register in, Register offs, 10109 Register len, Register k, Register tmp1, Register tmp2, Register tmp3, 10110 Register tmp4, Register tmp5, Register rdxReg, Register raxReg) { 10111 10112 Label L_carry, L_last_in, L_done; 10113 10114 // carry = 0; 10115 // for (int j=len-1; j >= 0; j--) { 10116 // long product = (in[j] & LONG_MASK) * kLong + 10117 // (out[offs] & LONG_MASK) + carry; 10118 // out[offs--] = (int)product; 10119 // carry = product >>> 32; 10120 // } 10121 // 10122 push(tmp1); 10123 push(tmp2); 10124 push(tmp3); 10125 push(tmp4); 10126 push(tmp5); 10127 10128 Register op2 = tmp2; 10129 const Register sum = tmp3; 10130 const Register op1 = tmp4; 10131 const Register carry = tmp5; 10132 10133 if (UseBMI2Instructions) { 10134 op2 = rdxReg; 10135 movl(op2, k); 10136 } 10137 else { 10138 movl(op2, k); 10139 } 10140 10141 xorq(carry, carry); 10142 10143 //First loop 10144 10145 //Multiply in[] by k in a 4 way unrolled loop using 128 bit by 32 bit multiply 10146 //The carry is in tmp5 10147 mul_add_128_x_32_loop(out, in, offs, len, tmp1, tmp2, tmp3, tmp4, tmp5, rdxReg, raxReg); 10148 10149 //Multiply the trailing in[] entry using 64 bit by 32 bit, if any 10150 decrementl(len); 10151 jccb(Assembler::negative, L_carry); 10152 decrementl(len); 10153 jccb(Assembler::negative, L_last_in); 10154 10155 movq(op1, Address(in, len, Address::times_4, 0)); 10156 rorq(op1, 32); 10157 10158 subl(offs, 2); 10159 movq(sum, Address(out, offs, Address::times_4, 0)); 10160 rorq(sum, 32); 10161 10162 if (UseBMI2Instructions) { 10163 multiply_add_64_bmi2(sum, op1, op2, carry, raxReg); 10164 } 10165 else { 10166 multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg); 10167 } 10168 10169 // Store back in big endian from little endian 10170 rorq(sum, 0x20); 10171 movq(Address(out, offs, Address::times_4, 0), sum); 10172 10173 testl(len, len); 10174 jccb(Assembler::zero, L_carry); 10175 10176 //Multiply the last in[] entry, if any 10177 bind(L_last_in); 10178 movl(op1, Address(in, 0)); 10179 movl(sum, Address(out, offs, Address::times_4, -4)); 10180 10181 movl(raxReg, k); 10182 mull(op1); //tmp4 * eax -> edx:eax 10183 addl(sum, carry); 10184 adcl(rdxReg, 0); 10185 addl(sum, raxReg); 10186 adcl(rdxReg, 0); 10187 movl(carry, rdxReg); 10188 10189 movl(Address(out, offs, Address::times_4, -4), sum); 10190 10191 bind(L_carry); 10192 //return tmp5/carry as carry in rax 10193 movl(rax, carry); 10194 10195 bind(L_done); 10196 pop(tmp5); 10197 pop(tmp4); 10198 pop(tmp3); 10199 pop(tmp2); 10200 pop(tmp1); 10201 } 10202 #endif 10203 10204 /** 10205 * Emits code to update CRC-32 with a byte value according to constants in table 10206 * 10207 * @param [in,out]crc Register containing the crc. 10208 * @param [in]val Register containing the byte to fold into the CRC. 10209 * @param [in]table Register containing the table of crc constants. 10210 * 10211 * uint32_t crc; 10212 * val = crc_table[(val ^ crc) & 0xFF]; 10213 * crc = val ^ (crc >> 8); 10214 * 10215 */ 10216 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) { 10217 xorl(val, crc); 10218 andl(val, 0xFF); 10219 shrl(crc, 8); // unsigned shift 10220 xorl(crc, Address(table, val, Address::times_4, 0)); 10221 } 10222 10223 /** 10224 * Fold 128-bit data chunk 10225 */ 10226 void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset) { 10227 if (UseAVX > 0) { 10228 vpclmulhdq(xtmp, xK, xcrc); // [123:64] 10229 vpclmulldq(xcrc, xK, xcrc); // [63:0] 10230 vpxor(xcrc, xcrc, Address(buf, offset), 0 /* vector_len */); 10231 pxor(xcrc, xtmp); 10232 } else { 10233 movdqa(xtmp, xcrc); 10234 pclmulhdq(xtmp, xK); // [123:64] 10235 pclmulldq(xcrc, xK); // [63:0] 10236 pxor(xcrc, xtmp); 10237 movdqu(xtmp, Address(buf, offset)); 10238 pxor(xcrc, xtmp); 10239 } 10240 } 10241 10242 void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf) { 10243 if (UseAVX > 0) { 10244 vpclmulhdq(xtmp, xK, xcrc); 10245 vpclmulldq(xcrc, xK, xcrc); 10246 pxor(xcrc, xbuf); 10247 pxor(xcrc, xtmp); 10248 } else { 10249 movdqa(xtmp, xcrc); 10250 pclmulhdq(xtmp, xK); 10251 pclmulldq(xcrc, xK); 10252 pxor(xcrc, xbuf); 10253 pxor(xcrc, xtmp); 10254 } 10255 } 10256 10257 /** 10258 * 8-bit folds to compute 32-bit CRC 10259 * 10260 * uint64_t xcrc; 10261 * timesXtoThe32[xcrc & 0xFF] ^ (xcrc >> 8); 10262 */ 10263 void MacroAssembler::fold_8bit_crc32(XMMRegister xcrc, Register table, XMMRegister xtmp, Register tmp) { 10264 movdl(tmp, xcrc); 10265 andl(tmp, 0xFF); 10266 movdl(xtmp, Address(table, tmp, Address::times_4, 0)); 10267 psrldq(xcrc, 1); // unsigned shift one byte 10268 pxor(xcrc, xtmp); 10269 } 10270 10271 /** 10272 * uint32_t crc; 10273 * timesXtoThe32[crc & 0xFF] ^ (crc >> 8); 10274 */ 10275 void MacroAssembler::fold_8bit_crc32(Register crc, Register table, Register tmp) { 10276 movl(tmp, crc); 10277 andl(tmp, 0xFF); 10278 shrl(crc, 8); 10279 xorl(crc, Address(table, tmp, Address::times_4, 0)); 10280 } 10281 10282 /** 10283 * @param crc register containing existing CRC (32-bit) 10284 * @param buf register pointing to input byte buffer (byte*) 10285 * @param len register containing number of bytes 10286 * @param table register that will contain address of CRC table 10287 * @param tmp scratch register 10288 */ 10289 void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp) { 10290 assert_different_registers(crc, buf, len, table, tmp, rax); 10291 10292 Label L_tail, L_tail_restore, L_tail_loop, L_exit, L_align_loop, L_aligned; 10293 Label L_fold_tail, L_fold_128b, L_fold_512b, L_fold_512b_loop, L_fold_tail_loop; 10294 10295 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 10296 // context for the registers used, where all instructions below are using 128-bit mode 10297 // On EVEX without VL and BW, these instructions will all be AVX. 10298 if (VM_Version::supports_avx512vlbw()) { 10299 movl(tmp, 0xffff); 10300 kmovwl(k1, tmp); 10301 } 10302 10303 lea(table, ExternalAddress(StubRoutines::crc_table_addr())); 10304 notl(crc); // ~crc 10305 cmpl(len, 16); 10306 jcc(Assembler::less, L_tail); 10307 10308 // Align buffer to 16 bytes 10309 movl(tmp, buf); 10310 andl(tmp, 0xF); 10311 jccb(Assembler::zero, L_aligned); 10312 subl(tmp, 16); 10313 addl(len, tmp); 10314 10315 align(4); 10316 BIND(L_align_loop); 10317 movsbl(rax, Address(buf, 0)); // load byte with sign extension 10318 update_byte_crc32(crc, rax, table); 10319 increment(buf); 10320 incrementl(tmp); 10321 jccb(Assembler::less, L_align_loop); 10322 10323 BIND(L_aligned); 10324 movl(tmp, len); // save 10325 shrl(len, 4); 10326 jcc(Assembler::zero, L_tail_restore); 10327 10328 // Fold crc into first bytes of vector 10329 movdqa(xmm1, Address(buf, 0)); 10330 movdl(rax, xmm1); 10331 xorl(crc, rax); 10332 if (VM_Version::supports_sse4_1()) { 10333 pinsrd(xmm1, crc, 0); 10334 } else { 10335 pinsrw(xmm1, crc, 0); 10336 shrl(crc, 16); 10337 pinsrw(xmm1, crc, 1); 10338 } 10339 addptr(buf, 16); 10340 subl(len, 4); // len > 0 10341 jcc(Assembler::less, L_fold_tail); 10342 10343 movdqa(xmm2, Address(buf, 0)); 10344 movdqa(xmm3, Address(buf, 16)); 10345 movdqa(xmm4, Address(buf, 32)); 10346 addptr(buf, 48); 10347 subl(len, 3); 10348 jcc(Assembler::lessEqual, L_fold_512b); 10349 10350 // Fold total 512 bits of polynomial on each iteration, 10351 // 128 bits per each of 4 parallel streams. 10352 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 32)); 10353 10354 align(32); 10355 BIND(L_fold_512b_loop); 10356 fold_128bit_crc32(xmm1, xmm0, xmm5, buf, 0); 10357 fold_128bit_crc32(xmm2, xmm0, xmm5, buf, 16); 10358 fold_128bit_crc32(xmm3, xmm0, xmm5, buf, 32); 10359 fold_128bit_crc32(xmm4, xmm0, xmm5, buf, 48); 10360 addptr(buf, 64); 10361 subl(len, 4); 10362 jcc(Assembler::greater, L_fold_512b_loop); 10363 10364 // Fold 512 bits to 128 bits. 10365 BIND(L_fold_512b); 10366 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 16)); 10367 fold_128bit_crc32(xmm1, xmm0, xmm5, xmm2); 10368 fold_128bit_crc32(xmm1, xmm0, xmm5, xmm3); 10369 fold_128bit_crc32(xmm1, xmm0, xmm5, xmm4); 10370 10371 // Fold the rest of 128 bits data chunks 10372 BIND(L_fold_tail); 10373 addl(len, 3); 10374 jccb(Assembler::lessEqual, L_fold_128b); 10375 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 16)); 10376 10377 BIND(L_fold_tail_loop); 10378 fold_128bit_crc32(xmm1, xmm0, xmm5, buf, 0); 10379 addptr(buf, 16); 10380 decrementl(len); 10381 jccb(Assembler::greater, L_fold_tail_loop); 10382 10383 // Fold 128 bits in xmm1 down into 32 bits in crc register. 10384 BIND(L_fold_128b); 10385 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr())); 10386 if (UseAVX > 0) { 10387 vpclmulqdq(xmm2, xmm0, xmm1, 0x1); 10388 vpand(xmm3, xmm0, xmm2, 0 /* vector_len */); 10389 vpclmulqdq(xmm0, xmm0, xmm3, 0x1); 10390 } else { 10391 movdqa(xmm2, xmm0); 10392 pclmulqdq(xmm2, xmm1, 0x1); 10393 movdqa(xmm3, xmm0); 10394 pand(xmm3, xmm2); 10395 pclmulqdq(xmm0, xmm3, 0x1); 10396 } 10397 psrldq(xmm1, 8); 10398 psrldq(xmm2, 4); 10399 pxor(xmm0, xmm1); 10400 pxor(xmm0, xmm2); 10401 10402 // 8 8-bit folds to compute 32-bit CRC. 10403 for (int j = 0; j < 4; j++) { 10404 fold_8bit_crc32(xmm0, table, xmm1, rax); 10405 } 10406 movdl(crc, xmm0); // mov 32 bits to general register 10407 for (int j = 0; j < 4; j++) { 10408 fold_8bit_crc32(crc, table, rax); 10409 } 10410 10411 BIND(L_tail_restore); 10412 movl(len, tmp); // restore 10413 BIND(L_tail); 10414 andl(len, 0xf); 10415 jccb(Assembler::zero, L_exit); 10416 10417 // Fold the rest of bytes 10418 align(4); 10419 BIND(L_tail_loop); 10420 movsbl(rax, Address(buf, 0)); // load byte with sign extension 10421 update_byte_crc32(crc, rax, table); 10422 increment(buf); 10423 decrementl(len); 10424 jccb(Assembler::greater, L_tail_loop); 10425 10426 BIND(L_exit); 10427 notl(crc); // ~c 10428 } 10429 10430 #ifdef _LP64 10431 // S. Gueron / Information Processing Letters 112 (2012) 184 10432 // Algorithm 4: Computing carry-less multiplication using a precomputed lookup table. 10433 // Input: A 32 bit value B = [byte3, byte2, byte1, byte0]. 10434 // Output: the 64-bit carry-less product of B * CONST 10435 void MacroAssembler::crc32c_ipl_alg4(Register in, uint32_t n, 10436 Register tmp1, Register tmp2, Register tmp3) { 10437 lea(tmp3, ExternalAddress(StubRoutines::crc32c_table_addr())); 10438 if (n > 0) { 10439 addq(tmp3, n * 256 * 8); 10440 } 10441 // Q1 = TABLEExt[n][B & 0xFF]; 10442 movl(tmp1, in); 10443 andl(tmp1, 0x000000FF); 10444 shll(tmp1, 3); 10445 addq(tmp1, tmp3); 10446 movq(tmp1, Address(tmp1, 0)); 10447 10448 // Q2 = TABLEExt[n][B >> 8 & 0xFF]; 10449 movl(tmp2, in); 10450 shrl(tmp2, 8); 10451 andl(tmp2, 0x000000FF); 10452 shll(tmp2, 3); 10453 addq(tmp2, tmp3); 10454 movq(tmp2, Address(tmp2, 0)); 10455 10456 shlq(tmp2, 8); 10457 xorq(tmp1, tmp2); 10458 10459 // Q3 = TABLEExt[n][B >> 16 & 0xFF]; 10460 movl(tmp2, in); 10461 shrl(tmp2, 16); 10462 andl(tmp2, 0x000000FF); 10463 shll(tmp2, 3); 10464 addq(tmp2, tmp3); 10465 movq(tmp2, Address(tmp2, 0)); 10466 10467 shlq(tmp2, 16); 10468 xorq(tmp1, tmp2); 10469 10470 // Q4 = TABLEExt[n][B >> 24 & 0xFF]; 10471 shrl(in, 24); 10472 andl(in, 0x000000FF); 10473 shll(in, 3); 10474 addq(in, tmp3); 10475 movq(in, Address(in, 0)); 10476 10477 shlq(in, 24); 10478 xorq(in, tmp1); 10479 // return Q1 ^ Q2 << 8 ^ Q3 << 16 ^ Q4 << 24; 10480 } 10481 10482 void MacroAssembler::crc32c_pclmulqdq(XMMRegister w_xtmp1, 10483 Register in_out, 10484 uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported, 10485 XMMRegister w_xtmp2, 10486 Register tmp1, 10487 Register n_tmp2, Register n_tmp3) { 10488 if (is_pclmulqdq_supported) { 10489 movdl(w_xtmp1, in_out); // modified blindly 10490 10491 movl(tmp1, const_or_pre_comp_const_index); 10492 movdl(w_xtmp2, tmp1); 10493 pclmulqdq(w_xtmp1, w_xtmp2, 0); 10494 10495 movdq(in_out, w_xtmp1); 10496 } else { 10497 crc32c_ipl_alg4(in_out, const_or_pre_comp_const_index, tmp1, n_tmp2, n_tmp3); 10498 } 10499 } 10500 10501 // Recombination Alternative 2: No bit-reflections 10502 // T1 = (CRC_A * U1) << 1 10503 // T2 = (CRC_B * U2) << 1 10504 // C1 = T1 >> 32 10505 // C2 = T2 >> 32 10506 // T1 = T1 & 0xFFFFFFFF 10507 // T2 = T2 & 0xFFFFFFFF 10508 // T1 = CRC32(0, T1) 10509 // T2 = CRC32(0, T2) 10510 // C1 = C1 ^ T1 10511 // C2 = C2 ^ T2 10512 // CRC = C1 ^ C2 ^ CRC_C 10513 void MacroAssembler::crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2, 10514 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 10515 Register tmp1, Register tmp2, 10516 Register n_tmp3) { 10517 crc32c_pclmulqdq(w_xtmp1, in_out, const_or_pre_comp_const_index_u1, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3); 10518 crc32c_pclmulqdq(w_xtmp2, in1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3); 10519 shlq(in_out, 1); 10520 movl(tmp1, in_out); 10521 shrq(in_out, 32); 10522 xorl(tmp2, tmp2); 10523 crc32(tmp2, tmp1, 4); 10524 xorl(in_out, tmp2); // we don't care about upper 32 bit contents here 10525 shlq(in1, 1); 10526 movl(tmp1, in1); 10527 shrq(in1, 32); 10528 xorl(tmp2, tmp2); 10529 crc32(tmp2, tmp1, 4); 10530 xorl(in1, tmp2); 10531 xorl(in_out, in1); 10532 xorl(in_out, in2); 10533 } 10534 10535 // Set N to predefined value 10536 // Subtract from a lenght of a buffer 10537 // execute in a loop: 10538 // CRC_A = 0xFFFFFFFF, CRC_B = 0, CRC_C = 0 10539 // for i = 1 to N do 10540 // CRC_A = CRC32(CRC_A, A[i]) 10541 // CRC_B = CRC32(CRC_B, B[i]) 10542 // CRC_C = CRC32(CRC_C, C[i]) 10543 // end for 10544 // Recombine 10545 void MacroAssembler::crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, 10546 Register in_out1, Register in_out2, Register in_out3, 10547 Register tmp1, Register tmp2, Register tmp3, 10548 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 10549 Register tmp4, Register tmp5, 10550 Register n_tmp6) { 10551 Label L_processPartitions; 10552 Label L_processPartition; 10553 Label L_exit; 10554 10555 bind(L_processPartitions); 10556 cmpl(in_out1, 3 * size); 10557 jcc(Assembler::less, L_exit); 10558 xorl(tmp1, tmp1); 10559 xorl(tmp2, tmp2); 10560 movq(tmp3, in_out2); 10561 addq(tmp3, size); 10562 10563 bind(L_processPartition); 10564 crc32(in_out3, Address(in_out2, 0), 8); 10565 crc32(tmp1, Address(in_out2, size), 8); 10566 crc32(tmp2, Address(in_out2, size * 2), 8); 10567 addq(in_out2, 8); 10568 cmpq(in_out2, tmp3); 10569 jcc(Assembler::less, L_processPartition); 10570 crc32c_rec_alt2(const_or_pre_comp_const_index_u1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, in_out3, tmp1, tmp2, 10571 w_xtmp1, w_xtmp2, w_xtmp3, 10572 tmp4, tmp5, 10573 n_tmp6); 10574 addq(in_out2, 2 * size); 10575 subl(in_out1, 3 * size); 10576 jmp(L_processPartitions); 10577 10578 bind(L_exit); 10579 } 10580 #else 10581 void MacroAssembler::crc32c_ipl_alg4(Register in_out, uint32_t n, 10582 Register tmp1, Register tmp2, Register tmp3, 10583 XMMRegister xtmp1, XMMRegister xtmp2) { 10584 lea(tmp3, ExternalAddress(StubRoutines::crc32c_table_addr())); 10585 if (n > 0) { 10586 addl(tmp3, n * 256 * 8); 10587 } 10588 // Q1 = TABLEExt[n][B & 0xFF]; 10589 movl(tmp1, in_out); 10590 andl(tmp1, 0x000000FF); 10591 shll(tmp1, 3); 10592 addl(tmp1, tmp3); 10593 movq(xtmp1, Address(tmp1, 0)); 10594 10595 // Q2 = TABLEExt[n][B >> 8 & 0xFF]; 10596 movl(tmp2, in_out); 10597 shrl(tmp2, 8); 10598 andl(tmp2, 0x000000FF); 10599 shll(tmp2, 3); 10600 addl(tmp2, tmp3); 10601 movq(xtmp2, Address(tmp2, 0)); 10602 10603 psllq(xtmp2, 8); 10604 pxor(xtmp1, xtmp2); 10605 10606 // Q3 = TABLEExt[n][B >> 16 & 0xFF]; 10607 movl(tmp2, in_out); 10608 shrl(tmp2, 16); 10609 andl(tmp2, 0x000000FF); 10610 shll(tmp2, 3); 10611 addl(tmp2, tmp3); 10612 movq(xtmp2, Address(tmp2, 0)); 10613 10614 psllq(xtmp2, 16); 10615 pxor(xtmp1, xtmp2); 10616 10617 // Q4 = TABLEExt[n][B >> 24 & 0xFF]; 10618 shrl(in_out, 24); 10619 andl(in_out, 0x000000FF); 10620 shll(in_out, 3); 10621 addl(in_out, tmp3); 10622 movq(xtmp2, Address(in_out, 0)); 10623 10624 psllq(xtmp2, 24); 10625 pxor(xtmp1, xtmp2); // Result in CXMM 10626 // return Q1 ^ Q2 << 8 ^ Q3 << 16 ^ Q4 << 24; 10627 } 10628 10629 void MacroAssembler::crc32c_pclmulqdq(XMMRegister w_xtmp1, 10630 Register in_out, 10631 uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported, 10632 XMMRegister w_xtmp2, 10633 Register tmp1, 10634 Register n_tmp2, Register n_tmp3) { 10635 if (is_pclmulqdq_supported) { 10636 movdl(w_xtmp1, in_out); 10637 10638 movl(tmp1, const_or_pre_comp_const_index); 10639 movdl(w_xtmp2, tmp1); 10640 pclmulqdq(w_xtmp1, w_xtmp2, 0); 10641 // Keep result in XMM since GPR is 32 bit in length 10642 } else { 10643 crc32c_ipl_alg4(in_out, const_or_pre_comp_const_index, tmp1, n_tmp2, n_tmp3, w_xtmp1, w_xtmp2); 10644 } 10645 } 10646 10647 void MacroAssembler::crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2, 10648 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 10649 Register tmp1, Register tmp2, 10650 Register n_tmp3) { 10651 crc32c_pclmulqdq(w_xtmp1, in_out, const_or_pre_comp_const_index_u1, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3); 10652 crc32c_pclmulqdq(w_xtmp2, in1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3); 10653 10654 psllq(w_xtmp1, 1); 10655 movdl(tmp1, w_xtmp1); 10656 psrlq(w_xtmp1, 32); 10657 movdl(in_out, w_xtmp1); 10658 10659 xorl(tmp2, tmp2); 10660 crc32(tmp2, tmp1, 4); 10661 xorl(in_out, tmp2); 10662 10663 psllq(w_xtmp2, 1); 10664 movdl(tmp1, w_xtmp2); 10665 psrlq(w_xtmp2, 32); 10666 movdl(in1, w_xtmp2); 10667 10668 xorl(tmp2, tmp2); 10669 crc32(tmp2, tmp1, 4); 10670 xorl(in1, tmp2); 10671 xorl(in_out, in1); 10672 xorl(in_out, in2); 10673 } 10674 10675 void MacroAssembler::crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, 10676 Register in_out1, Register in_out2, Register in_out3, 10677 Register tmp1, Register tmp2, Register tmp3, 10678 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 10679 Register tmp4, Register tmp5, 10680 Register n_tmp6) { 10681 Label L_processPartitions; 10682 Label L_processPartition; 10683 Label L_exit; 10684 10685 bind(L_processPartitions); 10686 cmpl(in_out1, 3 * size); 10687 jcc(Assembler::less, L_exit); 10688 xorl(tmp1, tmp1); 10689 xorl(tmp2, tmp2); 10690 movl(tmp3, in_out2); 10691 addl(tmp3, size); 10692 10693 bind(L_processPartition); 10694 crc32(in_out3, Address(in_out2, 0), 4); 10695 crc32(tmp1, Address(in_out2, size), 4); 10696 crc32(tmp2, Address(in_out2, size*2), 4); 10697 crc32(in_out3, Address(in_out2, 0+4), 4); 10698 crc32(tmp1, Address(in_out2, size+4), 4); 10699 crc32(tmp2, Address(in_out2, size*2+4), 4); 10700 addl(in_out2, 8); 10701 cmpl(in_out2, tmp3); 10702 jcc(Assembler::less, L_processPartition); 10703 10704 push(tmp3); 10705 push(in_out1); 10706 push(in_out2); 10707 tmp4 = tmp3; 10708 tmp5 = in_out1; 10709 n_tmp6 = in_out2; 10710 10711 crc32c_rec_alt2(const_or_pre_comp_const_index_u1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, in_out3, tmp1, tmp2, 10712 w_xtmp1, w_xtmp2, w_xtmp3, 10713 tmp4, tmp5, 10714 n_tmp6); 10715 10716 pop(in_out2); 10717 pop(in_out1); 10718 pop(tmp3); 10719 10720 addl(in_out2, 2 * size); 10721 subl(in_out1, 3 * size); 10722 jmp(L_processPartitions); 10723 10724 bind(L_exit); 10725 } 10726 #endif //LP64 10727 10728 #ifdef _LP64 10729 // Algorithm 2: Pipelined usage of the CRC32 instruction. 10730 // Input: A buffer I of L bytes. 10731 // Output: the CRC32C value of the buffer. 10732 // Notations: 10733 // Write L = 24N + r, with N = floor (L/24). 10734 // r = L mod 24 (0 <= r < 24). 10735 // Consider I as the concatenation of A|B|C|R, where A, B, C, each, 10736 // N quadwords, and R consists of r bytes. 10737 // A[j] = I [8j+7:8j], j= 0, 1, ..., N-1 10738 // B[j] = I [N + 8j+7:N + 8j], j= 0, 1, ..., N-1 10739 // C[j] = I [2N + 8j+7:2N + 8j], j= 0, 1, ..., N-1 10740 // if r > 0 R[j] = I [3N +j], j= 0, 1, ...,r-1 10741 void MacroAssembler::crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2, 10742 Register tmp1, Register tmp2, Register tmp3, 10743 Register tmp4, Register tmp5, Register tmp6, 10744 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 10745 bool is_pclmulqdq_supported) { 10746 uint32_t const_or_pre_comp_const_index[CRC32C_NUM_PRECOMPUTED_CONSTANTS]; 10747 Label L_wordByWord; 10748 Label L_byteByByteProlog; 10749 Label L_byteByByte; 10750 Label L_exit; 10751 10752 if (is_pclmulqdq_supported ) { 10753 const_or_pre_comp_const_index[1] = *(uint32_t *)StubRoutines::_crc32c_table_addr; 10754 const_or_pre_comp_const_index[0] = *((uint32_t *)StubRoutines::_crc32c_table_addr+1); 10755 10756 const_or_pre_comp_const_index[3] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 2); 10757 const_or_pre_comp_const_index[2] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 3); 10758 10759 const_or_pre_comp_const_index[5] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 4); 10760 const_or_pre_comp_const_index[4] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 5); 10761 assert((CRC32C_NUM_PRECOMPUTED_CONSTANTS - 1 ) == 5, "Checking whether you declared all of the constants based on the number of \"chunks\""); 10762 } else { 10763 const_or_pre_comp_const_index[0] = 1; 10764 const_or_pre_comp_const_index[1] = 0; 10765 10766 const_or_pre_comp_const_index[2] = 3; 10767 const_or_pre_comp_const_index[3] = 2; 10768 10769 const_or_pre_comp_const_index[4] = 5; 10770 const_or_pre_comp_const_index[5] = 4; 10771 } 10772 crc32c_proc_chunk(CRC32C_HIGH, const_or_pre_comp_const_index[0], const_or_pre_comp_const_index[1], is_pclmulqdq_supported, 10773 in2, in1, in_out, 10774 tmp1, tmp2, tmp3, 10775 w_xtmp1, w_xtmp2, w_xtmp3, 10776 tmp4, tmp5, 10777 tmp6); 10778 crc32c_proc_chunk(CRC32C_MIDDLE, const_or_pre_comp_const_index[2], const_or_pre_comp_const_index[3], is_pclmulqdq_supported, 10779 in2, in1, in_out, 10780 tmp1, tmp2, tmp3, 10781 w_xtmp1, w_xtmp2, w_xtmp3, 10782 tmp4, tmp5, 10783 tmp6); 10784 crc32c_proc_chunk(CRC32C_LOW, const_or_pre_comp_const_index[4], const_or_pre_comp_const_index[5], is_pclmulqdq_supported, 10785 in2, in1, in_out, 10786 tmp1, tmp2, tmp3, 10787 w_xtmp1, w_xtmp2, w_xtmp3, 10788 tmp4, tmp5, 10789 tmp6); 10790 movl(tmp1, in2); 10791 andl(tmp1, 0x00000007); 10792 negl(tmp1); 10793 addl(tmp1, in2); 10794 addq(tmp1, in1); 10795 10796 BIND(L_wordByWord); 10797 cmpq(in1, tmp1); 10798 jcc(Assembler::greaterEqual, L_byteByByteProlog); 10799 crc32(in_out, Address(in1, 0), 4); 10800 addq(in1, 4); 10801 jmp(L_wordByWord); 10802 10803 BIND(L_byteByByteProlog); 10804 andl(in2, 0x00000007); 10805 movl(tmp2, 1); 10806 10807 BIND(L_byteByByte); 10808 cmpl(tmp2, in2); 10809 jccb(Assembler::greater, L_exit); 10810 crc32(in_out, Address(in1, 0), 1); 10811 incq(in1); 10812 incl(tmp2); 10813 jmp(L_byteByByte); 10814 10815 BIND(L_exit); 10816 } 10817 #else 10818 void MacroAssembler::crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2, 10819 Register tmp1, Register tmp2, Register tmp3, 10820 Register tmp4, Register tmp5, Register tmp6, 10821 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 10822 bool is_pclmulqdq_supported) { 10823 uint32_t const_or_pre_comp_const_index[CRC32C_NUM_PRECOMPUTED_CONSTANTS]; 10824 Label L_wordByWord; 10825 Label L_byteByByteProlog; 10826 Label L_byteByByte; 10827 Label L_exit; 10828 10829 if (is_pclmulqdq_supported) { 10830 const_or_pre_comp_const_index[1] = *(uint32_t *)StubRoutines::_crc32c_table_addr; 10831 const_or_pre_comp_const_index[0] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 1); 10832 10833 const_or_pre_comp_const_index[3] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 2); 10834 const_or_pre_comp_const_index[2] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 3); 10835 10836 const_or_pre_comp_const_index[5] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 4); 10837 const_or_pre_comp_const_index[4] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 5); 10838 } else { 10839 const_or_pre_comp_const_index[0] = 1; 10840 const_or_pre_comp_const_index[1] = 0; 10841 10842 const_or_pre_comp_const_index[2] = 3; 10843 const_or_pre_comp_const_index[3] = 2; 10844 10845 const_or_pre_comp_const_index[4] = 5; 10846 const_or_pre_comp_const_index[5] = 4; 10847 } 10848 crc32c_proc_chunk(CRC32C_HIGH, const_or_pre_comp_const_index[0], const_or_pre_comp_const_index[1], is_pclmulqdq_supported, 10849 in2, in1, in_out, 10850 tmp1, tmp2, tmp3, 10851 w_xtmp1, w_xtmp2, w_xtmp3, 10852 tmp4, tmp5, 10853 tmp6); 10854 crc32c_proc_chunk(CRC32C_MIDDLE, const_or_pre_comp_const_index[2], const_or_pre_comp_const_index[3], is_pclmulqdq_supported, 10855 in2, in1, in_out, 10856 tmp1, tmp2, tmp3, 10857 w_xtmp1, w_xtmp2, w_xtmp3, 10858 tmp4, tmp5, 10859 tmp6); 10860 crc32c_proc_chunk(CRC32C_LOW, const_or_pre_comp_const_index[4], const_or_pre_comp_const_index[5], is_pclmulqdq_supported, 10861 in2, in1, in_out, 10862 tmp1, tmp2, tmp3, 10863 w_xtmp1, w_xtmp2, w_xtmp3, 10864 tmp4, tmp5, 10865 tmp6); 10866 movl(tmp1, in2); 10867 andl(tmp1, 0x00000007); 10868 negl(tmp1); 10869 addl(tmp1, in2); 10870 addl(tmp1, in1); 10871 10872 BIND(L_wordByWord); 10873 cmpl(in1, tmp1); 10874 jcc(Assembler::greaterEqual, L_byteByByteProlog); 10875 crc32(in_out, Address(in1,0), 4); 10876 addl(in1, 4); 10877 jmp(L_wordByWord); 10878 10879 BIND(L_byteByByteProlog); 10880 andl(in2, 0x00000007); 10881 movl(tmp2, 1); 10882 10883 BIND(L_byteByByte); 10884 cmpl(tmp2, in2); 10885 jccb(Assembler::greater, L_exit); 10886 movb(tmp1, Address(in1, 0)); 10887 crc32(in_out, tmp1, 1); 10888 incl(in1); 10889 incl(tmp2); 10890 jmp(L_byteByByte); 10891 10892 BIND(L_exit); 10893 } 10894 #endif // LP64 10895 #undef BIND 10896 #undef BLOCK_COMMENT 10897 10898 // Compress char[] array to byte[]. 10899 // ..\jdk\src\java.base\share\classes\java\lang\StringUTF16.java 10900 // @HotSpotIntrinsicCandidate 10901 // private static int compress(char[] src, int srcOff, byte[] dst, int dstOff, int len) { 10902 // for (int i = 0; i < len; i++) { 10903 // int c = src[srcOff++]; 10904 // if (c >>> 8 != 0) { 10905 // return 0; 10906 // } 10907 // dst[dstOff++] = (byte)c; 10908 // } 10909 // return len; 10910 // } 10911 void MacroAssembler::char_array_compress(Register src, Register dst, Register len, 10912 XMMRegister tmp1Reg, XMMRegister tmp2Reg, 10913 XMMRegister tmp3Reg, XMMRegister tmp4Reg, 10914 Register tmp5, Register result) { 10915 Label copy_chars_loop, return_length, return_zero, done, below_threshold; 10916 10917 // rsi: src 10918 // rdi: dst 10919 // rdx: len 10920 // rcx: tmp5 10921 // rax: result 10922 10923 // rsi holds start addr of source char[] to be compressed 10924 // rdi holds start addr of destination byte[] 10925 // rdx holds length 10926 10927 assert(len != result, ""); 10928 10929 // save length for return 10930 push(len); 10931 10932 if ((UseAVX > 2) && // AVX512 10933 VM_Version::supports_avx512vlbw() && 10934 VM_Version::supports_bmi2()) { 10935 10936 set_vector_masking(); // opening of the stub context for programming mask registers 10937 10938 Label copy_32_loop, copy_loop_tail, restore_k1_return_zero; 10939 10940 // alignement 10941 Label post_alignement; 10942 10943 // if length of the string is less than 16, handle it in an old fashioned 10944 // way 10945 testl(len, -32); 10946 jcc(Assembler::zero, below_threshold); 10947 10948 // First check whether a character is compressable ( <= 0xFF). 10949 // Create mask to test for Unicode chars inside zmm vector 10950 movl(result, 0x00FF); 10951 evpbroadcastw(tmp2Reg, result, Assembler::AVX_512bit); 10952 10953 // Save k1 10954 kmovql(k3, k1); 10955 10956 testl(len, -64); 10957 jcc(Assembler::zero, post_alignement); 10958 10959 movl(tmp5, dst); 10960 andl(tmp5, (32 - 1)); 10961 negl(tmp5); 10962 andl(tmp5, (32 - 1)); 10963 10964 // bail out when there is nothing to be done 10965 testl(tmp5, 0xFFFFFFFF); 10966 jcc(Assembler::zero, post_alignement); 10967 10968 // ~(~0 << len), where len is the # of remaining elements to process 10969 movl(result, 0xFFFFFFFF); 10970 shlxl(result, result, tmp5); 10971 notl(result); 10972 kmovdl(k1, result); 10973 10974 evmovdquw(tmp1Reg, k1, Address(src, 0), Assembler::AVX_512bit); 10975 evpcmpuw(k2, k1, tmp1Reg, tmp2Reg, Assembler::le, Assembler::AVX_512bit); 10976 ktestd(k2, k1); 10977 jcc(Assembler::carryClear, restore_k1_return_zero); 10978 10979 evpmovwb(Address(dst, 0), k1, tmp1Reg, Assembler::AVX_512bit); 10980 10981 addptr(src, tmp5); 10982 addptr(src, tmp5); 10983 addptr(dst, tmp5); 10984 subl(len, tmp5); 10985 10986 bind(post_alignement); 10987 // end of alignement 10988 10989 movl(tmp5, len); 10990 andl(tmp5, (32 - 1)); // tail count (in chars) 10991 andl(len, ~(32 - 1)); // vector count (in chars) 10992 jcc(Assembler::zero, copy_loop_tail); 10993 10994 lea(src, Address(src, len, Address::times_2)); 10995 lea(dst, Address(dst, len, Address::times_1)); 10996 negptr(len); 10997 10998 bind(copy_32_loop); 10999 evmovdquw(tmp1Reg, Address(src, len, Address::times_2), Assembler::AVX_512bit); 11000 evpcmpuw(k2, tmp1Reg, tmp2Reg, Assembler::le, Assembler::AVX_512bit); 11001 kortestdl(k2, k2); 11002 jcc(Assembler::carryClear, restore_k1_return_zero); 11003 11004 // All elements in current processed chunk are valid candidates for 11005 // compression. Write a truncated byte elements to the memory. 11006 evpmovwb(Address(dst, len, Address::times_1), tmp1Reg, Assembler::AVX_512bit); 11007 addptr(len, 32); 11008 jcc(Assembler::notZero, copy_32_loop); 11009 11010 bind(copy_loop_tail); 11011 // bail out when there is nothing to be done 11012 testl(tmp5, 0xFFFFFFFF); 11013 // Restore k1 11014 kmovql(k1, k3); 11015 jcc(Assembler::zero, return_length); 11016 11017 movl(len, tmp5); 11018 11019 // ~(~0 << len), where len is the # of remaining elements to process 11020 movl(result, 0xFFFFFFFF); 11021 shlxl(result, result, len); 11022 notl(result); 11023 11024 kmovdl(k1, result); 11025 11026 evmovdquw(tmp1Reg, k1, Address(src, 0), Assembler::AVX_512bit); 11027 evpcmpuw(k2, k1, tmp1Reg, tmp2Reg, Assembler::le, Assembler::AVX_512bit); 11028 ktestd(k2, k1); 11029 jcc(Assembler::carryClear, restore_k1_return_zero); 11030 11031 evpmovwb(Address(dst, 0), k1, tmp1Reg, Assembler::AVX_512bit); 11032 // Restore k1 11033 kmovql(k1, k3); 11034 jmp(return_length); 11035 11036 bind(restore_k1_return_zero); 11037 // Restore k1 11038 kmovql(k1, k3); 11039 jmp(return_zero); 11040 11041 clear_vector_masking(); // closing of the stub context for programming mask registers 11042 } 11043 if (UseSSE42Intrinsics) { 11044 Label copy_32_loop, copy_16, copy_tail; 11045 11046 bind(below_threshold); 11047 11048 movl(result, len); 11049 11050 movl(tmp5, 0xff00ff00); // create mask to test for Unicode chars in vectors 11051 11052 // vectored compression 11053 andl(len, 0xfffffff0); // vector count (in chars) 11054 andl(result, 0x0000000f); // tail count (in chars) 11055 testl(len, len); 11056 jccb(Assembler::zero, copy_16); 11057 11058 // compress 16 chars per iter 11059 movdl(tmp1Reg, tmp5); 11060 pshufd(tmp1Reg, tmp1Reg, 0); // store Unicode mask in tmp1Reg 11061 pxor(tmp4Reg, tmp4Reg); 11062 11063 lea(src, Address(src, len, Address::times_2)); 11064 lea(dst, Address(dst, len, Address::times_1)); 11065 negptr(len); 11066 11067 bind(copy_32_loop); 11068 movdqu(tmp2Reg, Address(src, len, Address::times_2)); // load 1st 8 characters 11069 por(tmp4Reg, tmp2Reg); 11070 movdqu(tmp3Reg, Address(src, len, Address::times_2, 16)); // load next 8 characters 11071 por(tmp4Reg, tmp3Reg); 11072 ptest(tmp4Reg, tmp1Reg); // check for Unicode chars in next vector 11073 jcc(Assembler::notZero, return_zero); 11074 packuswb(tmp2Reg, tmp3Reg); // only ASCII chars; compress each to 1 byte 11075 movdqu(Address(dst, len, Address::times_1), tmp2Reg); 11076 addptr(len, 16); 11077 jcc(Assembler::notZero, copy_32_loop); 11078 11079 // compress next vector of 8 chars (if any) 11080 bind(copy_16); 11081 movl(len, result); 11082 andl(len, 0xfffffff8); // vector count (in chars) 11083 andl(result, 0x00000007); // tail count (in chars) 11084 testl(len, len); 11085 jccb(Assembler::zero, copy_tail); 11086 11087 movdl(tmp1Reg, tmp5); 11088 pshufd(tmp1Reg, tmp1Reg, 0); // store Unicode mask in tmp1Reg 11089 pxor(tmp3Reg, tmp3Reg); 11090 11091 movdqu(tmp2Reg, Address(src, 0)); 11092 ptest(tmp2Reg, tmp1Reg); // check for Unicode chars in vector 11093 jccb(Assembler::notZero, return_zero); 11094 packuswb(tmp2Reg, tmp3Reg); // only LATIN1 chars; compress each to 1 byte 11095 movq(Address(dst, 0), tmp2Reg); 11096 addptr(src, 16); 11097 addptr(dst, 8); 11098 11099 bind(copy_tail); 11100 movl(len, result); 11101 } 11102 // compress 1 char per iter 11103 testl(len, len); 11104 jccb(Assembler::zero, return_length); 11105 lea(src, Address(src, len, Address::times_2)); 11106 lea(dst, Address(dst, len, Address::times_1)); 11107 negptr(len); 11108 11109 bind(copy_chars_loop); 11110 load_unsigned_short(result, Address(src, len, Address::times_2)); 11111 testl(result, 0xff00); // check if Unicode char 11112 jccb(Assembler::notZero, return_zero); 11113 movb(Address(dst, len, Address::times_1), result); // ASCII char; compress to 1 byte 11114 increment(len); 11115 jcc(Assembler::notZero, copy_chars_loop); 11116 11117 // if compression succeeded, return length 11118 bind(return_length); 11119 pop(result); 11120 jmpb(done); 11121 11122 // if compression failed, return 0 11123 bind(return_zero); 11124 xorl(result, result); 11125 addptr(rsp, wordSize); 11126 11127 bind(done); 11128 } 11129 11130 // Inflate byte[] array to char[]. 11131 // ..\jdk\src\java.base\share\classes\java\lang\StringLatin1.java 11132 // @HotSpotIntrinsicCandidate 11133 // private static void inflate(byte[] src, int srcOff, char[] dst, int dstOff, int len) { 11134 // for (int i = 0; i < len; i++) { 11135 // dst[dstOff++] = (char)(src[srcOff++] & 0xff); 11136 // } 11137 // } 11138 void MacroAssembler::byte_array_inflate(Register src, Register dst, Register len, 11139 XMMRegister tmp1, Register tmp2) { 11140 Label copy_chars_loop, done, below_threshold; 11141 // rsi: src 11142 // rdi: dst 11143 // rdx: len 11144 // rcx: tmp2 11145 11146 // rsi holds start addr of source byte[] to be inflated 11147 // rdi holds start addr of destination char[] 11148 // rdx holds length 11149 assert_different_registers(src, dst, len, tmp2); 11150 11151 if ((UseAVX > 2) && // AVX512 11152 VM_Version::supports_avx512vlbw() && 11153 VM_Version::supports_bmi2()) { 11154 11155 set_vector_masking(); // opening of the stub context for programming mask registers 11156 11157 Label copy_32_loop, copy_tail; 11158 Register tmp3_aliased = len; 11159 11160 // if length of the string is less than 16, handle it in an old fashioned 11161 // way 11162 testl(len, -16); 11163 jcc(Assembler::zero, below_threshold); 11164 11165 // In order to use only one arithmetic operation for the main loop we use 11166 // this pre-calculation 11167 movl(tmp2, len); 11168 andl(tmp2, (32 - 1)); // tail count (in chars), 32 element wide loop 11169 andl(len, -32); // vector count 11170 jccb(Assembler::zero, copy_tail); 11171 11172 lea(src, Address(src, len, Address::times_1)); 11173 lea(dst, Address(dst, len, Address::times_2)); 11174 negptr(len); 11175 11176 11177 // inflate 32 chars per iter 11178 bind(copy_32_loop); 11179 vpmovzxbw(tmp1, Address(src, len, Address::times_1), Assembler::AVX_512bit); 11180 evmovdquw(Address(dst, len, Address::times_2), tmp1, Assembler::AVX_512bit); 11181 addptr(len, 32); 11182 jcc(Assembler::notZero, copy_32_loop); 11183 11184 bind(copy_tail); 11185 // bail out when there is nothing to be done 11186 testl(tmp2, -1); // we don't destroy the contents of tmp2 here 11187 jcc(Assembler::zero, done); 11188 11189 // Save k1 11190 kmovql(k2, k1); 11191 11192 // ~(~0 << length), where length is the # of remaining elements to process 11193 movl(tmp3_aliased, -1); 11194 shlxl(tmp3_aliased, tmp3_aliased, tmp2); 11195 notl(tmp3_aliased); 11196 kmovdl(k1, tmp3_aliased); 11197 evpmovzxbw(tmp1, k1, Address(src, 0), Assembler::AVX_512bit); 11198 evmovdquw(Address(dst, 0), k1, tmp1, Assembler::AVX_512bit); 11199 11200 // Restore k1 11201 kmovql(k1, k2); 11202 jmp(done); 11203 11204 clear_vector_masking(); // closing of the stub context for programming mask registers 11205 } 11206 if (UseSSE42Intrinsics) { 11207 Label copy_16_loop, copy_8_loop, copy_bytes, copy_new_tail, copy_tail; 11208 11209 movl(tmp2, len); 11210 11211 if (UseAVX > 1) { 11212 andl(tmp2, (16 - 1)); 11213 andl(len, -16); 11214 jccb(Assembler::zero, copy_new_tail); 11215 } else { 11216 andl(tmp2, 0x00000007); // tail count (in chars) 11217 andl(len, 0xfffffff8); // vector count (in chars) 11218 jccb(Assembler::zero, copy_tail); 11219 } 11220 11221 // vectored inflation 11222 lea(src, Address(src, len, Address::times_1)); 11223 lea(dst, Address(dst, len, Address::times_2)); 11224 negptr(len); 11225 11226 if (UseAVX > 1) { 11227 bind(copy_16_loop); 11228 vpmovzxbw(tmp1, Address(src, len, Address::times_1), Assembler::AVX_256bit); 11229 vmovdqu(Address(dst, len, Address::times_2), tmp1); 11230 addptr(len, 16); 11231 jcc(Assembler::notZero, copy_16_loop); 11232 11233 bind(below_threshold); 11234 bind(copy_new_tail); 11235 if ((UseAVX > 2) && 11236 VM_Version::supports_avx512vlbw() && 11237 VM_Version::supports_bmi2()) { 11238 movl(tmp2, len); 11239 } else { 11240 movl(len, tmp2); 11241 } 11242 andl(tmp2, 0x00000007); 11243 andl(len, 0xFFFFFFF8); 11244 jccb(Assembler::zero, copy_tail); 11245 11246 pmovzxbw(tmp1, Address(src, 0)); 11247 movdqu(Address(dst, 0), tmp1); 11248 addptr(src, 8); 11249 addptr(dst, 2 * 8); 11250 11251 jmp(copy_tail, true); 11252 } 11253 11254 // inflate 8 chars per iter 11255 bind(copy_8_loop); 11256 pmovzxbw(tmp1, Address(src, len, Address::times_1)); // unpack to 8 words 11257 movdqu(Address(dst, len, Address::times_2), tmp1); 11258 addptr(len, 8); 11259 jcc(Assembler::notZero, copy_8_loop); 11260 11261 bind(copy_tail); 11262 movl(len, tmp2); 11263 11264 cmpl(len, 4); 11265 jccb(Assembler::less, copy_bytes); 11266 11267 movdl(tmp1, Address(src, 0)); // load 4 byte chars 11268 pmovzxbw(tmp1, tmp1); 11269 movq(Address(dst, 0), tmp1); 11270 subptr(len, 4); 11271 addptr(src, 4); 11272 addptr(dst, 8); 11273 11274 bind(copy_bytes); 11275 } 11276 testl(len, len); 11277 jccb(Assembler::zero, done); 11278 lea(src, Address(src, len, Address::times_1)); 11279 lea(dst, Address(dst, len, Address::times_2)); 11280 negptr(len); 11281 11282 // inflate 1 char per iter 11283 bind(copy_chars_loop); 11284 load_unsigned_byte(tmp2, Address(src, len, Address::times_1)); // load byte char 11285 movw(Address(dst, len, Address::times_2), tmp2); // inflate byte char to word 11286 increment(len); 11287 jcc(Assembler::notZero, copy_chars_loop); 11288 11289 bind(done); 11290 } 11291 11292 Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) { 11293 switch (cond) { 11294 // Note some conditions are synonyms for others 11295 case Assembler::zero: return Assembler::notZero; 11296 case Assembler::notZero: return Assembler::zero; 11297 case Assembler::less: return Assembler::greaterEqual; 11298 case Assembler::lessEqual: return Assembler::greater; 11299 case Assembler::greater: return Assembler::lessEqual; 11300 case Assembler::greaterEqual: return Assembler::less; 11301 case Assembler::below: return Assembler::aboveEqual; 11302 case Assembler::belowEqual: return Assembler::above; 11303 case Assembler::above: return Assembler::belowEqual; 11304 case Assembler::aboveEqual: return Assembler::below; 11305 case Assembler::overflow: return Assembler::noOverflow; 11306 case Assembler::noOverflow: return Assembler::overflow; 11307 case Assembler::negative: return Assembler::positive; 11308 case Assembler::positive: return Assembler::negative; 11309 case Assembler::parity: return Assembler::noParity; 11310 case Assembler::noParity: return Assembler::parity; 11311 } 11312 ShouldNotReachHere(); return Assembler::overflow; 11313 } 11314 11315 SkipIfEqual::SkipIfEqual( 11316 MacroAssembler* masm, const bool* flag_addr, bool value) { 11317 _masm = masm; 11318 _masm->cmp8(ExternalAddress((address)flag_addr), value); 11319 _masm->jcc(Assembler::equal, _label); 11320 } 11321 11322 SkipIfEqual::~SkipIfEqual() { 11323 _masm->bind(_label); 11324 } 11325 11326 // 32-bit Windows has its own fast-path implementation 11327 // of get_thread 11328 #if !defined(WIN32) || defined(_LP64) 11329 11330 // This is simply a call to Thread::current() 11331 void MacroAssembler::get_thread(Register thread) { 11332 if (thread != rax) { 11333 push(rax); 11334 } 11335 LP64_ONLY(push(rdi);) 11336 LP64_ONLY(push(rsi);) 11337 push(rdx); 11338 push(rcx); 11339 #ifdef _LP64 11340 push(r8); 11341 push(r9); 11342 push(r10); 11343 push(r11); 11344 #endif 11345 11346 MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, Thread::current), 0); 11347 11348 #ifdef _LP64 11349 pop(r11); 11350 pop(r10); 11351 pop(r9); 11352 pop(r8); 11353 #endif 11354 pop(rcx); 11355 pop(rdx); 11356 LP64_ONLY(pop(rsi);) 11357 LP64_ONLY(pop(rdi);) 11358 if (thread != rax) { 11359 mov(thread, rax); 11360 pop(rax); 11361 } 11362 } 11363 11364 #endif