1 /* 2 * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "jvm.h" 27 #include "asm/assembler.hpp" 28 #include "asm/assembler.inline.hpp" 29 #include "compiler/disassembler.hpp" 30 #include "gc/shared/barrierSet.hpp" 31 #include "gc/shared/barrierSetAssembler.hpp" 32 #include "gc/shared/collectedHeap.inline.hpp" 33 #include "interpreter/interpreter.hpp" 34 #include "memory/resourceArea.hpp" 35 #include "memory/universe.hpp" 36 #include "oops/accessDecorators.hpp" 37 #include "oops/compressedOops.inline.hpp" 38 #include "oops/klass.inline.hpp" 39 #include "prims/methodHandles.hpp" 40 #include "runtime/biasedLocking.hpp" 41 #include "runtime/flags/flagSetting.hpp" 42 #include "runtime/interfaceSupport.inline.hpp" 43 #include "runtime/objectMonitor.hpp" 44 #include "runtime/os.hpp" 45 #include "runtime/safepoint.hpp" 46 #include "runtime/safepointMechanism.hpp" 47 #include "runtime/sharedRuntime.hpp" 48 #include "runtime/stubRoutines.hpp" 49 #include "runtime/thread.hpp" 50 #include "utilities/macros.hpp" 51 #include "crc32c.h" 52 #ifdef COMPILER2 53 #include "opto/intrinsicnode.hpp" 54 #endif 55 56 #ifdef PRODUCT 57 #define BLOCK_COMMENT(str) /* nothing */ 58 #define STOP(error) stop(error) 59 #else 60 #define BLOCK_COMMENT(str) block_comment(str) 61 #define STOP(error) block_comment(error); stop(error) 62 #endif 63 64 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 65 66 #ifdef ASSERT 67 bool AbstractAssembler::pd_check_instruction_mark() { return true; } 68 #endif 69 70 static Assembler::Condition reverse[] = { 71 Assembler::noOverflow /* overflow = 0x0 */ , 72 Assembler::overflow /* noOverflow = 0x1 */ , 73 Assembler::aboveEqual /* carrySet = 0x2, below = 0x2 */ , 74 Assembler::below /* aboveEqual = 0x3, carryClear = 0x3 */ , 75 Assembler::notZero /* zero = 0x4, equal = 0x4 */ , 76 Assembler::zero /* notZero = 0x5, notEqual = 0x5 */ , 77 Assembler::above /* belowEqual = 0x6 */ , 78 Assembler::belowEqual /* above = 0x7 */ , 79 Assembler::positive /* negative = 0x8 */ , 80 Assembler::negative /* positive = 0x9 */ , 81 Assembler::noParity /* parity = 0xa */ , 82 Assembler::parity /* noParity = 0xb */ , 83 Assembler::greaterEqual /* less = 0xc */ , 84 Assembler::less /* greaterEqual = 0xd */ , 85 Assembler::greater /* lessEqual = 0xe */ , 86 Assembler::lessEqual /* greater = 0xf, */ 87 88 }; 89 90 91 // Implementation of MacroAssembler 92 93 // First all the versions that have distinct versions depending on 32/64 bit 94 // Unless the difference is trivial (1 line or so). 95 96 #ifndef _LP64 97 98 // 32bit versions 99 100 Address MacroAssembler::as_Address(AddressLiteral adr) { 101 return Address(adr.target(), adr.rspec()); 102 } 103 104 Address MacroAssembler::as_Address(ArrayAddress adr) { 105 return Address::make_array(adr); 106 } 107 108 void MacroAssembler::call_VM_leaf_base(address entry_point, 109 int number_of_arguments) { 110 call(RuntimeAddress(entry_point)); 111 increment(rsp, number_of_arguments * wordSize); 112 } 113 114 void MacroAssembler::cmpklass(Address src1, Metadata* obj) { 115 cmp_literal32(src1, (int32_t)obj, metadata_Relocation::spec_for_immediate()); 116 } 117 118 void MacroAssembler::cmpklass(Register src1, Metadata* obj) { 119 cmp_literal32(src1, (int32_t)obj, metadata_Relocation::spec_for_immediate()); 120 } 121 122 void MacroAssembler::cmpoop_raw(Address src1, jobject obj) { 123 cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate()); 124 } 125 126 void MacroAssembler::cmpoop_raw(Register src1, jobject obj) { 127 cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate()); 128 } 129 130 void MacroAssembler::cmpoop(Address src1, jobject obj) { 131 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 132 bs->obj_equals(this, src1, obj); 133 } 134 135 void MacroAssembler::cmpoop(Register src1, jobject obj) { 136 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 137 bs->obj_equals(this, src1, obj); 138 } 139 140 void MacroAssembler::extend_sign(Register hi, Register lo) { 141 // According to Intel Doc. AP-526, "Integer Divide", p.18. 142 if (VM_Version::is_P6() && hi == rdx && lo == rax) { 143 cdql(); 144 } else { 145 movl(hi, lo); 146 sarl(hi, 31); 147 } 148 } 149 150 void MacroAssembler::jC2(Register tmp, Label& L) { 151 // set parity bit if FPU flag C2 is set (via rax) 152 save_rax(tmp); 153 fwait(); fnstsw_ax(); 154 sahf(); 155 restore_rax(tmp); 156 // branch 157 jcc(Assembler::parity, L); 158 } 159 160 void MacroAssembler::jnC2(Register tmp, Label& L) { 161 // set parity bit if FPU flag C2 is set (via rax) 162 save_rax(tmp); 163 fwait(); fnstsw_ax(); 164 sahf(); 165 restore_rax(tmp); 166 // branch 167 jcc(Assembler::noParity, L); 168 } 169 170 // 32bit can do a case table jump in one instruction but we no longer allow the base 171 // to be installed in the Address class 172 void MacroAssembler::jump(ArrayAddress entry) { 173 jmp(as_Address(entry)); 174 } 175 176 // Note: y_lo will be destroyed 177 void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) { 178 // Long compare for Java (semantics as described in JVM spec.) 179 Label high, low, done; 180 181 cmpl(x_hi, y_hi); 182 jcc(Assembler::less, low); 183 jcc(Assembler::greater, high); 184 // x_hi is the return register 185 xorl(x_hi, x_hi); 186 cmpl(x_lo, y_lo); 187 jcc(Assembler::below, low); 188 jcc(Assembler::equal, done); 189 190 bind(high); 191 xorl(x_hi, x_hi); 192 increment(x_hi); 193 jmp(done); 194 195 bind(low); 196 xorl(x_hi, x_hi); 197 decrementl(x_hi); 198 199 bind(done); 200 } 201 202 void MacroAssembler::lea(Register dst, AddressLiteral src) { 203 mov_literal32(dst, (int32_t)src.target(), src.rspec()); 204 } 205 206 void MacroAssembler::lea(Address dst, AddressLiteral adr) { 207 // leal(dst, as_Address(adr)); 208 // see note in movl as to why we must use a move 209 mov_literal32(dst, (int32_t) adr.target(), adr.rspec()); 210 } 211 212 void MacroAssembler::leave() { 213 mov(rsp, rbp); 214 pop(rbp); 215 } 216 217 void MacroAssembler::lmul(int x_rsp_offset, int y_rsp_offset) { 218 // Multiplication of two Java long values stored on the stack 219 // as illustrated below. Result is in rdx:rax. 220 // 221 // rsp ---> [ ?? ] \ \ 222 // .... | y_rsp_offset | 223 // [ y_lo ] / (in bytes) | x_rsp_offset 224 // [ y_hi ] | (in bytes) 225 // .... | 226 // [ x_lo ] / 227 // [ x_hi ] 228 // .... 229 // 230 // Basic idea: lo(result) = lo(x_lo * y_lo) 231 // hi(result) = hi(x_lo * y_lo) + lo(x_hi * y_lo) + lo(x_lo * y_hi) 232 Address x_hi(rsp, x_rsp_offset + wordSize); Address x_lo(rsp, x_rsp_offset); 233 Address y_hi(rsp, y_rsp_offset + wordSize); Address y_lo(rsp, y_rsp_offset); 234 Label quick; 235 // load x_hi, y_hi and check if quick 236 // multiplication is possible 237 movl(rbx, x_hi); 238 movl(rcx, y_hi); 239 movl(rax, rbx); 240 orl(rbx, rcx); // rbx, = 0 <=> x_hi = 0 and y_hi = 0 241 jcc(Assembler::zero, quick); // if rbx, = 0 do quick multiply 242 // do full multiplication 243 // 1st step 244 mull(y_lo); // x_hi * y_lo 245 movl(rbx, rax); // save lo(x_hi * y_lo) in rbx, 246 // 2nd step 247 movl(rax, x_lo); 248 mull(rcx); // x_lo * y_hi 249 addl(rbx, rax); // add lo(x_lo * y_hi) to rbx, 250 // 3rd step 251 bind(quick); // note: rbx, = 0 if quick multiply! 252 movl(rax, x_lo); 253 mull(y_lo); // x_lo * y_lo 254 addl(rdx, rbx); // correct hi(x_lo * y_lo) 255 } 256 257 void MacroAssembler::lneg(Register hi, Register lo) { 258 negl(lo); 259 adcl(hi, 0); 260 negl(hi); 261 } 262 263 void MacroAssembler::lshl(Register hi, Register lo) { 264 // Java shift left long support (semantics as described in JVM spec., p.305) 265 // (basic idea for shift counts s >= n: x << s == (x << n) << (s - n)) 266 // shift value is in rcx ! 267 assert(hi != rcx, "must not use rcx"); 268 assert(lo != rcx, "must not use rcx"); 269 const Register s = rcx; // shift count 270 const int n = BitsPerWord; 271 Label L; 272 andl(s, 0x3f); // s := s & 0x3f (s < 0x40) 273 cmpl(s, n); // if (s < n) 274 jcc(Assembler::less, L); // else (s >= n) 275 movl(hi, lo); // x := x << n 276 xorl(lo, lo); 277 // Note: subl(s, n) is not needed since the Intel shift instructions work rcx mod n! 278 bind(L); // s (mod n) < n 279 shldl(hi, lo); // x := x << s 280 shll(lo); 281 } 282 283 284 void MacroAssembler::lshr(Register hi, Register lo, bool sign_extension) { 285 // Java shift right long support (semantics as described in JVM spec., p.306 & p.310) 286 // (basic idea for shift counts s >= n: x >> s == (x >> n) >> (s - n)) 287 assert(hi != rcx, "must not use rcx"); 288 assert(lo != rcx, "must not use rcx"); 289 const Register s = rcx; // shift count 290 const int n = BitsPerWord; 291 Label L; 292 andl(s, 0x3f); // s := s & 0x3f (s < 0x40) 293 cmpl(s, n); // if (s < n) 294 jcc(Assembler::less, L); // else (s >= n) 295 movl(lo, hi); // x := x >> n 296 if (sign_extension) sarl(hi, 31); 297 else xorl(hi, hi); 298 // Note: subl(s, n) is not needed since the Intel shift instructions work rcx mod n! 299 bind(L); // s (mod n) < n 300 shrdl(lo, hi); // x := x >> s 301 if (sign_extension) sarl(hi); 302 else shrl(hi); 303 } 304 305 void MacroAssembler::movoop(Register dst, jobject obj) { 306 mov_literal32(dst, (int32_t)obj, oop_Relocation::spec_for_immediate()); 307 } 308 309 void MacroAssembler::movoop(Address dst, jobject obj) { 310 mov_literal32(dst, (int32_t)obj, oop_Relocation::spec_for_immediate()); 311 } 312 313 void MacroAssembler::mov_metadata(Register dst, Metadata* obj) { 314 mov_literal32(dst, (int32_t)obj, metadata_Relocation::spec_for_immediate()); 315 } 316 317 void MacroAssembler::mov_metadata(Address dst, Metadata* obj) { 318 mov_literal32(dst, (int32_t)obj, metadata_Relocation::spec_for_immediate()); 319 } 320 321 void MacroAssembler::movptr(Register dst, AddressLiteral src, Register scratch) { 322 // scratch register is not used, 323 // it is defined to match parameters of 64-bit version of this method. 324 if (src.is_lval()) { 325 mov_literal32(dst, (intptr_t)src.target(), src.rspec()); 326 } else { 327 movl(dst, as_Address(src)); 328 } 329 } 330 331 void MacroAssembler::movptr(ArrayAddress dst, Register src) { 332 movl(as_Address(dst), src); 333 } 334 335 void MacroAssembler::movptr(Register dst, ArrayAddress src) { 336 movl(dst, as_Address(src)); 337 } 338 339 // src should NEVER be a real pointer. Use AddressLiteral for true pointers 340 void MacroAssembler::movptr(Address dst, intptr_t src) { 341 movl(dst, src); 342 } 343 344 345 void MacroAssembler::pop_callee_saved_registers() { 346 pop(rcx); 347 pop(rdx); 348 pop(rdi); 349 pop(rsi); 350 } 351 352 void MacroAssembler::pop_fTOS() { 353 fld_d(Address(rsp, 0)); 354 addl(rsp, 2 * wordSize); 355 } 356 357 void MacroAssembler::push_callee_saved_registers() { 358 push(rsi); 359 push(rdi); 360 push(rdx); 361 push(rcx); 362 } 363 364 void MacroAssembler::push_fTOS() { 365 subl(rsp, 2 * wordSize); 366 fstp_d(Address(rsp, 0)); 367 } 368 369 370 void MacroAssembler::pushoop(jobject obj) { 371 push_literal32((int32_t)obj, oop_Relocation::spec_for_immediate()); 372 } 373 374 void MacroAssembler::pushklass(Metadata* obj) { 375 push_literal32((int32_t)obj, metadata_Relocation::spec_for_immediate()); 376 } 377 378 void MacroAssembler::pushptr(AddressLiteral src) { 379 if (src.is_lval()) { 380 push_literal32((int32_t)src.target(), src.rspec()); 381 } else { 382 pushl(as_Address(src)); 383 } 384 } 385 386 void MacroAssembler::set_word_if_not_zero(Register dst) { 387 xorl(dst, dst); 388 set_byte_if_not_zero(dst); 389 } 390 391 static void pass_arg0(MacroAssembler* masm, Register arg) { 392 masm->push(arg); 393 } 394 395 static void pass_arg1(MacroAssembler* masm, Register arg) { 396 masm->push(arg); 397 } 398 399 static void pass_arg2(MacroAssembler* masm, Register arg) { 400 masm->push(arg); 401 } 402 403 static void pass_arg3(MacroAssembler* masm, Register arg) { 404 masm->push(arg); 405 } 406 407 #ifndef PRODUCT 408 extern "C" void findpc(intptr_t x); 409 #endif 410 411 void MacroAssembler::debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg) { 412 // In order to get locks to work, we need to fake a in_VM state 413 JavaThread* thread = JavaThread::current(); 414 JavaThreadState saved_state = thread->thread_state(); 415 thread->set_thread_state(_thread_in_vm); 416 if (ShowMessageBoxOnError) { 417 JavaThread* thread = JavaThread::current(); 418 JavaThreadState saved_state = thread->thread_state(); 419 thread->set_thread_state(_thread_in_vm); 420 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { 421 ttyLocker ttyl; 422 BytecodeCounter::print(); 423 } 424 // To see where a verify_oop failed, get $ebx+40/X for this frame. 425 // This is the value of eip which points to where verify_oop will return. 426 if (os::message_box(msg, "Execution stopped, print registers?")) { 427 print_state32(rdi, rsi, rbp, rsp, rbx, rdx, rcx, rax, eip); 428 BREAKPOINT; 429 } 430 } 431 fatal("DEBUG MESSAGE: %s", msg); 432 } 433 434 void MacroAssembler::print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip) { 435 ttyLocker ttyl; 436 FlagSetting fs(Debugging, true); 437 tty->print_cr("eip = 0x%08x", eip); 438 #ifndef PRODUCT 439 if ((WizardMode || Verbose) && PrintMiscellaneous) { 440 tty->cr(); 441 findpc(eip); 442 tty->cr(); 443 } 444 #endif 445 #define PRINT_REG(rax) \ 446 { tty->print("%s = ", #rax); os::print_location(tty, rax); } 447 PRINT_REG(rax); 448 PRINT_REG(rbx); 449 PRINT_REG(rcx); 450 PRINT_REG(rdx); 451 PRINT_REG(rdi); 452 PRINT_REG(rsi); 453 PRINT_REG(rbp); 454 PRINT_REG(rsp); 455 #undef PRINT_REG 456 // Print some words near top of staack. 457 int* dump_sp = (int*) rsp; 458 for (int col1 = 0; col1 < 8; col1++) { 459 tty->print("(rsp+0x%03x) 0x%08x: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp); 460 os::print_location(tty, *dump_sp++); 461 } 462 for (int row = 0; row < 16; row++) { 463 tty->print("(rsp+0x%03x) 0x%08x: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp); 464 for (int col = 0; col < 8; col++) { 465 tty->print(" 0x%08x", *dump_sp++); 466 } 467 tty->cr(); 468 } 469 // Print some instructions around pc: 470 Disassembler::decode((address)eip-64, (address)eip); 471 tty->print_cr("--------"); 472 Disassembler::decode((address)eip, (address)eip+32); 473 } 474 475 void MacroAssembler::stop(const char* msg) { 476 ExternalAddress message((address)msg); 477 // push address of message 478 pushptr(message.addr()); 479 { Label L; call(L, relocInfo::none); bind(L); } // push eip 480 pusha(); // push registers 481 call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug32))); 482 hlt(); 483 } 484 485 void MacroAssembler::warn(const char* msg) { 486 push_CPU_state(); 487 488 ExternalAddress message((address) msg); 489 // push address of message 490 pushptr(message.addr()); 491 492 call(RuntimeAddress(CAST_FROM_FN_PTR(address, warning))); 493 addl(rsp, wordSize); // discard argument 494 pop_CPU_state(); 495 } 496 497 void MacroAssembler::print_state() { 498 { Label L; call(L, relocInfo::none); bind(L); } // push eip 499 pusha(); // push registers 500 501 push_CPU_state(); 502 call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::print_state32))); 503 pop_CPU_state(); 504 505 popa(); 506 addl(rsp, wordSize); 507 } 508 509 #else // _LP64 510 511 // 64 bit versions 512 513 Address MacroAssembler::as_Address(AddressLiteral adr) { 514 // amd64 always does this as a pc-rel 515 // we can be absolute or disp based on the instruction type 516 // jmp/call are displacements others are absolute 517 assert(!adr.is_lval(), "must be rval"); 518 assert(reachable(adr), "must be"); 519 return Address((int32_t)(intptr_t)(adr.target() - pc()), adr.target(), adr.reloc()); 520 521 } 522 523 Address MacroAssembler::as_Address(ArrayAddress adr) { 524 AddressLiteral base = adr.base(); 525 lea(rscratch1, base); 526 Address index = adr.index(); 527 assert(index._disp == 0, "must not have disp"); // maybe it can? 528 Address array(rscratch1, index._index, index._scale, index._disp); 529 return array; 530 } 531 532 void MacroAssembler::call_VM_leaf_base(address entry_point, int num_args) { 533 Label L, E; 534 535 #ifdef _WIN64 536 // Windows always allocates space for it's register args 537 assert(num_args <= 4, "only register arguments supported"); 538 subq(rsp, frame::arg_reg_save_area_bytes); 539 #endif 540 541 // Align stack if necessary 542 testl(rsp, 15); 543 jcc(Assembler::zero, L); 544 545 subq(rsp, 8); 546 { 547 call(RuntimeAddress(entry_point)); 548 } 549 addq(rsp, 8); 550 jmp(E); 551 552 bind(L); 553 { 554 call(RuntimeAddress(entry_point)); 555 } 556 557 bind(E); 558 559 #ifdef _WIN64 560 // restore stack pointer 561 addq(rsp, frame::arg_reg_save_area_bytes); 562 #endif 563 564 } 565 566 void MacroAssembler::cmp64(Register src1, AddressLiteral src2) { 567 assert(!src2.is_lval(), "should use cmpptr"); 568 569 if (reachable(src2)) { 570 cmpq(src1, as_Address(src2)); 571 } else { 572 lea(rscratch1, src2); 573 Assembler::cmpq(src1, Address(rscratch1, 0)); 574 } 575 } 576 577 int MacroAssembler::corrected_idivq(Register reg) { 578 // Full implementation of Java ldiv and lrem; checks for special 579 // case as described in JVM spec., p.243 & p.271. The function 580 // returns the (pc) offset of the idivl instruction - may be needed 581 // for implicit exceptions. 582 // 583 // normal case special case 584 // 585 // input : rax: dividend min_long 586 // reg: divisor (may not be eax/edx) -1 587 // 588 // output: rax: quotient (= rax idiv reg) min_long 589 // rdx: remainder (= rax irem reg) 0 590 assert(reg != rax && reg != rdx, "reg cannot be rax or rdx register"); 591 static const int64_t min_long = 0x8000000000000000; 592 Label normal_case, special_case; 593 594 // check for special case 595 cmp64(rax, ExternalAddress((address) &min_long)); 596 jcc(Assembler::notEqual, normal_case); 597 xorl(rdx, rdx); // prepare rdx for possible special case (where 598 // remainder = 0) 599 cmpq(reg, -1); 600 jcc(Assembler::equal, special_case); 601 602 // handle normal case 603 bind(normal_case); 604 cdqq(); 605 int idivq_offset = offset(); 606 idivq(reg); 607 608 // normal and special case exit 609 bind(special_case); 610 611 return idivq_offset; 612 } 613 614 void MacroAssembler::decrementq(Register reg, int value) { 615 if (value == min_jint) { subq(reg, value); return; } 616 if (value < 0) { incrementq(reg, -value); return; } 617 if (value == 0) { ; return; } 618 if (value == 1 && UseIncDec) { decq(reg) ; return; } 619 /* else */ { subq(reg, value) ; return; } 620 } 621 622 void MacroAssembler::decrementq(Address dst, int value) { 623 if (value == min_jint) { subq(dst, value); return; } 624 if (value < 0) { incrementq(dst, -value); return; } 625 if (value == 0) { ; return; } 626 if (value == 1 && UseIncDec) { decq(dst) ; return; } 627 /* else */ { subq(dst, value) ; return; } 628 } 629 630 void MacroAssembler::incrementq(AddressLiteral dst) { 631 if (reachable(dst)) { 632 incrementq(as_Address(dst)); 633 } else { 634 lea(rscratch1, dst); 635 incrementq(Address(rscratch1, 0)); 636 } 637 } 638 639 void MacroAssembler::incrementq(Register reg, int value) { 640 if (value == min_jint) { addq(reg, value); return; } 641 if (value < 0) { decrementq(reg, -value); return; } 642 if (value == 0) { ; return; } 643 if (value == 1 && UseIncDec) { incq(reg) ; return; } 644 /* else */ { addq(reg, value) ; return; } 645 } 646 647 void MacroAssembler::incrementq(Address dst, int value) { 648 if (value == min_jint) { addq(dst, value); return; } 649 if (value < 0) { decrementq(dst, -value); return; } 650 if (value == 0) { ; return; } 651 if (value == 1 && UseIncDec) { incq(dst) ; return; } 652 /* else */ { addq(dst, value) ; return; } 653 } 654 655 // 32bit can do a case table jump in one instruction but we no longer allow the base 656 // to be installed in the Address class 657 void MacroAssembler::jump(ArrayAddress entry) { 658 lea(rscratch1, entry.base()); 659 Address dispatch = entry.index(); 660 assert(dispatch._base == noreg, "must be"); 661 dispatch._base = rscratch1; 662 jmp(dispatch); 663 } 664 665 void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) { 666 ShouldNotReachHere(); // 64bit doesn't use two regs 667 cmpq(x_lo, y_lo); 668 } 669 670 void MacroAssembler::lea(Register dst, AddressLiteral src) { 671 mov_literal64(dst, (intptr_t)src.target(), src.rspec()); 672 } 673 674 void MacroAssembler::lea(Address dst, AddressLiteral adr) { 675 mov_literal64(rscratch1, (intptr_t)adr.target(), adr.rspec()); 676 movptr(dst, rscratch1); 677 } 678 679 void MacroAssembler::leave() { 680 // %%% is this really better? Why not on 32bit too? 681 emit_int8((unsigned char)0xC9); // LEAVE 682 } 683 684 void MacroAssembler::lneg(Register hi, Register lo) { 685 ShouldNotReachHere(); // 64bit doesn't use two regs 686 negq(lo); 687 } 688 689 void MacroAssembler::movoop(Register dst, jobject obj) { 690 mov_literal64(dst, (intptr_t)obj, oop_Relocation::spec_for_immediate()); 691 } 692 693 void MacroAssembler::movoop(Address dst, jobject obj) { 694 mov_literal64(rscratch1, (intptr_t)obj, oop_Relocation::spec_for_immediate()); 695 movq(dst, rscratch1); 696 } 697 698 void MacroAssembler::mov_metadata(Register dst, Metadata* obj) { 699 mov_literal64(dst, (intptr_t)obj, metadata_Relocation::spec_for_immediate()); 700 } 701 702 void MacroAssembler::mov_metadata(Address dst, Metadata* obj) { 703 mov_literal64(rscratch1, (intptr_t)obj, metadata_Relocation::spec_for_immediate()); 704 movq(dst, rscratch1); 705 } 706 707 void MacroAssembler::movptr(Register dst, AddressLiteral src, Register scratch) { 708 if (src.is_lval()) { 709 mov_literal64(dst, (intptr_t)src.target(), src.rspec()); 710 } else { 711 if (reachable(src)) { 712 movq(dst, as_Address(src)); 713 } else { 714 lea(scratch, src); 715 movq(dst, Address(scratch, 0)); 716 } 717 } 718 } 719 720 void MacroAssembler::movptr(ArrayAddress dst, Register src) { 721 movq(as_Address(dst), src); 722 } 723 724 void MacroAssembler::movptr(Register dst, ArrayAddress src) { 725 movq(dst, as_Address(src)); 726 } 727 728 // src should NEVER be a real pointer. Use AddressLiteral for true pointers 729 void MacroAssembler::movptr(Address dst, intptr_t src) { 730 mov64(rscratch1, src); 731 movq(dst, rscratch1); 732 } 733 734 // These are mostly for initializing NULL 735 void MacroAssembler::movptr(Address dst, int32_t src) { 736 movslq(dst, src); 737 } 738 739 void MacroAssembler::movptr(Register dst, int32_t src) { 740 mov64(dst, (intptr_t)src); 741 } 742 743 void MacroAssembler::pushoop(jobject obj) { 744 movoop(rscratch1, obj); 745 push(rscratch1); 746 } 747 748 void MacroAssembler::pushklass(Metadata* obj) { 749 mov_metadata(rscratch1, obj); 750 push(rscratch1); 751 } 752 753 void MacroAssembler::pushptr(AddressLiteral src) { 754 lea(rscratch1, src); 755 if (src.is_lval()) { 756 push(rscratch1); 757 } else { 758 pushq(Address(rscratch1, 0)); 759 } 760 } 761 762 void MacroAssembler::reset_last_Java_frame(bool clear_fp) { 763 // we must set sp to zero to clear frame 764 movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), NULL_WORD); 765 // must clear fp, so that compiled frames are not confused; it is 766 // possible that we need it only for debugging 767 if (clear_fp) { 768 movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()), NULL_WORD); 769 } 770 771 // Always clear the pc because it could have been set by make_walkable() 772 movptr(Address(r15_thread, JavaThread::last_Java_pc_offset()), NULL_WORD); 773 vzeroupper(); 774 } 775 776 void MacroAssembler::set_last_Java_frame(Register last_java_sp, 777 Register last_java_fp, 778 address last_java_pc) { 779 vzeroupper(); 780 // determine last_java_sp register 781 if (!last_java_sp->is_valid()) { 782 last_java_sp = rsp; 783 } 784 785 // last_java_fp is optional 786 if (last_java_fp->is_valid()) { 787 movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()), 788 last_java_fp); 789 } 790 791 // last_java_pc is optional 792 if (last_java_pc != NULL) { 793 Address java_pc(r15_thread, 794 JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()); 795 lea(rscratch1, InternalAddress(last_java_pc)); 796 movptr(java_pc, rscratch1); 797 } 798 799 movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), last_java_sp); 800 } 801 802 static void pass_arg0(MacroAssembler* masm, Register arg) { 803 if (c_rarg0 != arg ) { 804 masm->mov(c_rarg0, arg); 805 } 806 } 807 808 static void pass_arg1(MacroAssembler* masm, Register arg) { 809 if (c_rarg1 != arg ) { 810 masm->mov(c_rarg1, arg); 811 } 812 } 813 814 static void pass_arg2(MacroAssembler* masm, Register arg) { 815 if (c_rarg2 != arg ) { 816 masm->mov(c_rarg2, arg); 817 } 818 } 819 820 static void pass_arg3(MacroAssembler* masm, Register arg) { 821 if (c_rarg3 != arg ) { 822 masm->mov(c_rarg3, arg); 823 } 824 } 825 826 void MacroAssembler::stop(const char* msg) { 827 if (ShowMessageBoxOnError) { 828 address rip = pc(); 829 pusha(); // get regs on stack 830 lea(c_rarg1, InternalAddress(rip)); 831 movq(c_rarg2, rsp); // pass pointer to regs array 832 } 833 lea(c_rarg0, ExternalAddress((address) msg)); 834 andq(rsp, -16); // align stack as required by ABI 835 call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64))); 836 hlt(); 837 } 838 839 void MacroAssembler::warn(const char* msg) { 840 push(rbp); 841 movq(rbp, rsp); 842 andq(rsp, -16); // align stack as required by push_CPU_state and call 843 push_CPU_state(); // keeps alignment at 16 bytes 844 lea(c_rarg0, ExternalAddress((address) msg)); 845 lea(rax, ExternalAddress(CAST_FROM_FN_PTR(address, warning))); 846 call(rax); 847 pop_CPU_state(); 848 mov(rsp, rbp); 849 pop(rbp); 850 } 851 852 void MacroAssembler::print_state() { 853 address rip = pc(); 854 pusha(); // get regs on stack 855 push(rbp); 856 movq(rbp, rsp); 857 andq(rsp, -16); // align stack as required by push_CPU_state and call 858 push_CPU_state(); // keeps alignment at 16 bytes 859 860 lea(c_rarg0, InternalAddress(rip)); 861 lea(c_rarg1, Address(rbp, wordSize)); // pass pointer to regs array 862 call_VM_leaf(CAST_FROM_FN_PTR(address, MacroAssembler::print_state64), c_rarg0, c_rarg1); 863 864 pop_CPU_state(); 865 mov(rsp, rbp); 866 pop(rbp); 867 popa(); 868 } 869 870 #ifndef PRODUCT 871 extern "C" void findpc(intptr_t x); 872 #endif 873 874 void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[]) { 875 // In order to get locks to work, we need to fake a in_VM state 876 if (ShowMessageBoxOnError) { 877 JavaThread* thread = JavaThread::current(); 878 JavaThreadState saved_state = thread->thread_state(); 879 thread->set_thread_state(_thread_in_vm); 880 #ifndef PRODUCT 881 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { 882 ttyLocker ttyl; 883 BytecodeCounter::print(); 884 } 885 #endif 886 // To see where a verify_oop failed, get $ebx+40/X for this frame. 887 // XXX correct this offset for amd64 888 // This is the value of eip which points to where verify_oop will return. 889 if (os::message_box(msg, "Execution stopped, print registers?")) { 890 print_state64(pc, regs); 891 BREAKPOINT; 892 } 893 } 894 fatal("DEBUG MESSAGE: %s", msg); 895 } 896 897 void MacroAssembler::print_state64(int64_t pc, int64_t regs[]) { 898 ttyLocker ttyl; 899 FlagSetting fs(Debugging, true); 900 tty->print_cr("rip = 0x%016lx", (intptr_t)pc); 901 #ifndef PRODUCT 902 tty->cr(); 903 findpc(pc); 904 tty->cr(); 905 #endif 906 #define PRINT_REG(rax, value) \ 907 { tty->print("%s = ", #rax); os::print_location(tty, value); } 908 PRINT_REG(rax, regs[15]); 909 PRINT_REG(rbx, regs[12]); 910 PRINT_REG(rcx, regs[14]); 911 PRINT_REG(rdx, regs[13]); 912 PRINT_REG(rdi, regs[8]); 913 PRINT_REG(rsi, regs[9]); 914 PRINT_REG(rbp, regs[10]); 915 PRINT_REG(rsp, regs[11]); 916 PRINT_REG(r8 , regs[7]); 917 PRINT_REG(r9 , regs[6]); 918 PRINT_REG(r10, regs[5]); 919 PRINT_REG(r11, regs[4]); 920 PRINT_REG(r12, regs[3]); 921 PRINT_REG(r13, regs[2]); 922 PRINT_REG(r14, regs[1]); 923 PRINT_REG(r15, regs[0]); 924 #undef PRINT_REG 925 // Print some words near top of staack. 926 int64_t* rsp = (int64_t*) regs[11]; 927 int64_t* dump_sp = rsp; 928 for (int col1 = 0; col1 < 8; col1++) { 929 tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp); 930 os::print_location(tty, *dump_sp++); 931 } 932 for (int row = 0; row < 25; row++) { 933 tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp); 934 for (int col = 0; col < 4; col++) { 935 tty->print(" 0x%016lx", (intptr_t)*dump_sp++); 936 } 937 tty->cr(); 938 } 939 // Print some instructions around pc: 940 Disassembler::decode((address)pc-64, (address)pc); 941 tty->print_cr("--------"); 942 Disassembler::decode((address)pc, (address)pc+32); 943 } 944 945 #endif // _LP64 946 947 // Now versions that are common to 32/64 bit 948 949 void MacroAssembler::addptr(Register dst, int32_t imm32) { 950 LP64_ONLY(addq(dst, imm32)) NOT_LP64(addl(dst, imm32)); 951 } 952 953 void MacroAssembler::addptr(Register dst, Register src) { 954 LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); 955 } 956 957 void MacroAssembler::addptr(Address dst, Register src) { 958 LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); 959 } 960 961 void MacroAssembler::addsd(XMMRegister dst, AddressLiteral src) { 962 if (reachable(src)) { 963 Assembler::addsd(dst, as_Address(src)); 964 } else { 965 lea(rscratch1, src); 966 Assembler::addsd(dst, Address(rscratch1, 0)); 967 } 968 } 969 970 void MacroAssembler::addss(XMMRegister dst, AddressLiteral src) { 971 if (reachable(src)) { 972 addss(dst, as_Address(src)); 973 } else { 974 lea(rscratch1, src); 975 addss(dst, Address(rscratch1, 0)); 976 } 977 } 978 979 void MacroAssembler::addpd(XMMRegister dst, AddressLiteral src) { 980 if (reachable(src)) { 981 Assembler::addpd(dst, as_Address(src)); 982 } else { 983 lea(rscratch1, src); 984 Assembler::addpd(dst, Address(rscratch1, 0)); 985 } 986 } 987 988 void MacroAssembler::align(int modulus) { 989 align(modulus, offset()); 990 } 991 992 void MacroAssembler::align(int modulus, int target) { 993 if (target % modulus != 0) { 994 nop(modulus - (target % modulus)); 995 } 996 } 997 998 void MacroAssembler::andpd(XMMRegister dst, AddressLiteral src, Register scratch_reg) { 999 // Used in sign-masking with aligned address. 1000 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes"); 1001 if (reachable(src)) { 1002 Assembler::andpd(dst, as_Address(src)); 1003 } else { 1004 lea(scratch_reg, src); 1005 Assembler::andpd(dst, Address(scratch_reg, 0)); 1006 } 1007 } 1008 1009 void MacroAssembler::andps(XMMRegister dst, AddressLiteral src, Register scratch_reg) { 1010 // Used in sign-masking with aligned address. 1011 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes"); 1012 if (reachable(src)) { 1013 Assembler::andps(dst, as_Address(src)); 1014 } else { 1015 lea(scratch_reg, src); 1016 Assembler::andps(dst, Address(scratch_reg, 0)); 1017 } 1018 } 1019 1020 void MacroAssembler::andptr(Register dst, int32_t imm32) { 1021 LP64_ONLY(andq(dst, imm32)) NOT_LP64(andl(dst, imm32)); 1022 } 1023 1024 void MacroAssembler::atomic_incl(Address counter_addr) { 1025 lock(); 1026 incrementl(counter_addr); 1027 } 1028 1029 void MacroAssembler::atomic_incl(AddressLiteral counter_addr, Register scr) { 1030 if (reachable(counter_addr)) { 1031 atomic_incl(as_Address(counter_addr)); 1032 } else { 1033 lea(scr, counter_addr); 1034 atomic_incl(Address(scr, 0)); 1035 } 1036 } 1037 1038 #ifdef _LP64 1039 void MacroAssembler::atomic_incq(Address counter_addr) { 1040 lock(); 1041 incrementq(counter_addr); 1042 } 1043 1044 void MacroAssembler::atomic_incq(AddressLiteral counter_addr, Register scr) { 1045 if (reachable(counter_addr)) { 1046 atomic_incq(as_Address(counter_addr)); 1047 } else { 1048 lea(scr, counter_addr); 1049 atomic_incq(Address(scr, 0)); 1050 } 1051 } 1052 #endif 1053 1054 // Writes to stack successive pages until offset reached to check for 1055 // stack overflow + shadow pages. This clobbers tmp. 1056 void MacroAssembler::bang_stack_size(Register size, Register tmp) { 1057 movptr(tmp, rsp); 1058 // Bang stack for total size given plus shadow page size. 1059 // Bang one page at a time because large size can bang beyond yellow and 1060 // red zones. 1061 Label loop; 1062 bind(loop); 1063 movl(Address(tmp, (-os::vm_page_size())), size ); 1064 subptr(tmp, os::vm_page_size()); 1065 subl(size, os::vm_page_size()); 1066 jcc(Assembler::greater, loop); 1067 1068 // Bang down shadow pages too. 1069 // At this point, (tmp-0) is the last address touched, so don't 1070 // touch it again. (It was touched as (tmp-pagesize) but then tmp 1071 // was post-decremented.) Skip this address by starting at i=1, and 1072 // touch a few more pages below. N.B. It is important to touch all 1073 // the way down including all pages in the shadow zone. 1074 for (int i = 1; i < ((int)JavaThread::stack_shadow_zone_size() / os::vm_page_size()); i++) { 1075 // this could be any sized move but this is can be a debugging crumb 1076 // so the bigger the better. 1077 movptr(Address(tmp, (-i*os::vm_page_size())), size ); 1078 } 1079 } 1080 1081 void MacroAssembler::reserved_stack_check() { 1082 // testing if reserved zone needs to be enabled 1083 Label no_reserved_zone_enabling; 1084 Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread); 1085 NOT_LP64(get_thread(rsi);) 1086 1087 cmpptr(rsp, Address(thread, JavaThread::reserved_stack_activation_offset())); 1088 jcc(Assembler::below, no_reserved_zone_enabling); 1089 1090 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), thread); 1091 jump(RuntimeAddress(StubRoutines::throw_delayed_StackOverflowError_entry())); 1092 should_not_reach_here(); 1093 1094 bind(no_reserved_zone_enabling); 1095 } 1096 1097 int MacroAssembler::biased_locking_enter(Register lock_reg, 1098 Register obj_reg, 1099 Register swap_reg, 1100 Register tmp_reg, 1101 bool swap_reg_contains_mark, 1102 Label& done, 1103 Label* slow_case, 1104 BiasedLockingCounters* counters) { 1105 assert(UseBiasedLocking, "why call this otherwise?"); 1106 assert(swap_reg == rax, "swap_reg must be rax for cmpxchgq"); 1107 assert(tmp_reg != noreg, "tmp_reg must be supplied"); 1108 assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg); 1109 assert(markWord::age_shift == markWord::lock_bits + markWord::biased_lock_bits, "biased locking makes assumptions about bit layout"); 1110 Address mark_addr (obj_reg, oopDesc::mark_offset_in_bytes()); 1111 NOT_LP64( Address saved_mark_addr(lock_reg, 0); ) 1112 1113 if (PrintBiasedLockingStatistics && counters == NULL) { 1114 counters = BiasedLocking::counters(); 1115 } 1116 // Biased locking 1117 // See whether the lock is currently biased toward our thread and 1118 // whether the epoch is still valid 1119 // Note that the runtime guarantees sufficient alignment of JavaThread 1120 // pointers to allow age to be placed into low bits 1121 // First check to see whether biasing is even enabled for this object 1122 Label cas_label; 1123 int null_check_offset = -1; 1124 if (!swap_reg_contains_mark) { 1125 null_check_offset = offset(); 1126 movptr(swap_reg, mark_addr); 1127 } 1128 movptr(tmp_reg, swap_reg); 1129 andptr(tmp_reg, markWord::biased_lock_mask_in_place); 1130 cmpptr(tmp_reg, markWord::biased_lock_pattern); 1131 jcc(Assembler::notEqual, cas_label); 1132 // The bias pattern is present in the object's header. Need to check 1133 // whether the bias owner and the epoch are both still current. 1134 #ifndef _LP64 1135 // Note that because there is no current thread register on x86_32 we 1136 // need to store off the mark word we read out of the object to 1137 // avoid reloading it and needing to recheck invariants below. This 1138 // store is unfortunate but it makes the overall code shorter and 1139 // simpler. 1140 movptr(saved_mark_addr, swap_reg); 1141 #endif 1142 if (swap_reg_contains_mark) { 1143 null_check_offset = offset(); 1144 } 1145 load_prototype_header(tmp_reg, obj_reg); 1146 #ifdef _LP64 1147 orptr(tmp_reg, r15_thread); 1148 xorptr(tmp_reg, swap_reg); 1149 Register header_reg = tmp_reg; 1150 #else 1151 xorptr(tmp_reg, swap_reg); 1152 get_thread(swap_reg); 1153 xorptr(swap_reg, tmp_reg); 1154 Register header_reg = swap_reg; 1155 #endif 1156 andptr(header_reg, ~((int) markWord::age_mask_in_place)); 1157 if (counters != NULL) { 1158 cond_inc32(Assembler::zero, 1159 ExternalAddress((address) counters->biased_lock_entry_count_addr())); 1160 } 1161 jcc(Assembler::equal, done); 1162 1163 Label try_revoke_bias; 1164 Label try_rebias; 1165 1166 // At this point we know that the header has the bias pattern and 1167 // that we are not the bias owner in the current epoch. We need to 1168 // figure out more details about the state of the header in order to 1169 // know what operations can be legally performed on the object's 1170 // header. 1171 1172 // If the low three bits in the xor result aren't clear, that means 1173 // the prototype header is no longer biased and we have to revoke 1174 // the bias on this object. 1175 testptr(header_reg, markWord::biased_lock_mask_in_place); 1176 jccb(Assembler::notZero, try_revoke_bias); 1177 1178 // Biasing is still enabled for this data type. See whether the 1179 // epoch of the current bias is still valid, meaning that the epoch 1180 // bits of the mark word are equal to the epoch bits of the 1181 // prototype header. (Note that the prototype header's epoch bits 1182 // only change at a safepoint.) If not, attempt to rebias the object 1183 // toward the current thread. Note that we must be absolutely sure 1184 // that the current epoch is invalid in order to do this because 1185 // otherwise the manipulations it performs on the mark word are 1186 // illegal. 1187 testptr(header_reg, markWord::epoch_mask_in_place); 1188 jccb(Assembler::notZero, try_rebias); 1189 1190 // The epoch of the current bias is still valid but we know nothing 1191 // about the owner; it might be set or it might be clear. Try to 1192 // acquire the bias of the object using an atomic operation. If this 1193 // fails we will go in to the runtime to revoke the object's bias. 1194 // Note that we first construct the presumed unbiased header so we 1195 // don't accidentally blow away another thread's valid bias. 1196 NOT_LP64( movptr(swap_reg, saved_mark_addr); ) 1197 andptr(swap_reg, 1198 markWord::biased_lock_mask_in_place | markWord::age_mask_in_place | markWord::epoch_mask_in_place); 1199 #ifdef _LP64 1200 movptr(tmp_reg, swap_reg); 1201 orptr(tmp_reg, r15_thread); 1202 #else 1203 get_thread(tmp_reg); 1204 orptr(tmp_reg, swap_reg); 1205 #endif 1206 lock(); 1207 cmpxchgptr(tmp_reg, mark_addr); // compare tmp_reg and swap_reg 1208 // If the biasing toward our thread failed, this means that 1209 // another thread succeeded in biasing it toward itself and we 1210 // need to revoke that bias. The revocation will occur in the 1211 // interpreter runtime in the slow case. 1212 if (counters != NULL) { 1213 cond_inc32(Assembler::zero, 1214 ExternalAddress((address) counters->anonymously_biased_lock_entry_count_addr())); 1215 } 1216 if (slow_case != NULL) { 1217 jcc(Assembler::notZero, *slow_case); 1218 } 1219 jmp(done); 1220 1221 bind(try_rebias); 1222 // At this point we know the epoch has expired, meaning that the 1223 // current "bias owner", if any, is actually invalid. Under these 1224 // circumstances _only_, we are allowed to use the current header's 1225 // value as the comparison value when doing the cas to acquire the 1226 // bias in the current epoch. In other words, we allow transfer of 1227 // the bias from one thread to another directly in this situation. 1228 // 1229 // FIXME: due to a lack of registers we currently blow away the age 1230 // bits in this situation. Should attempt to preserve them. 1231 load_prototype_header(tmp_reg, obj_reg); 1232 #ifdef _LP64 1233 orptr(tmp_reg, r15_thread); 1234 #else 1235 get_thread(swap_reg); 1236 orptr(tmp_reg, swap_reg); 1237 movptr(swap_reg, saved_mark_addr); 1238 #endif 1239 lock(); 1240 cmpxchgptr(tmp_reg, mark_addr); // compare tmp_reg and swap_reg 1241 // If the biasing toward our thread failed, then another thread 1242 // succeeded in biasing it toward itself and we need to revoke that 1243 // bias. The revocation will occur in the runtime in the slow case. 1244 if (counters != NULL) { 1245 cond_inc32(Assembler::zero, 1246 ExternalAddress((address) counters->rebiased_lock_entry_count_addr())); 1247 } 1248 if (slow_case != NULL) { 1249 jcc(Assembler::notZero, *slow_case); 1250 } 1251 jmp(done); 1252 1253 bind(try_revoke_bias); 1254 // The prototype mark in the klass doesn't have the bias bit set any 1255 // more, indicating that objects of this data type are not supposed 1256 // to be biased any more. We are going to try to reset the mark of 1257 // this object to the prototype value and fall through to the 1258 // CAS-based locking scheme. Note that if our CAS fails, it means 1259 // that another thread raced us for the privilege of revoking the 1260 // bias of this particular object, so it's okay to continue in the 1261 // normal locking code. 1262 // 1263 // FIXME: due to a lack of registers we currently blow away the age 1264 // bits in this situation. Should attempt to preserve them. 1265 NOT_LP64( movptr(swap_reg, saved_mark_addr); ) 1266 load_prototype_header(tmp_reg, obj_reg); 1267 lock(); 1268 cmpxchgptr(tmp_reg, mark_addr); // compare tmp_reg and swap_reg 1269 // Fall through to the normal CAS-based lock, because no matter what 1270 // the result of the above CAS, some thread must have succeeded in 1271 // removing the bias bit from the object's header. 1272 if (counters != NULL) { 1273 cond_inc32(Assembler::zero, 1274 ExternalAddress((address) counters->revoked_lock_entry_count_addr())); 1275 } 1276 1277 bind(cas_label); 1278 1279 return null_check_offset; 1280 } 1281 1282 void MacroAssembler::biased_locking_exit(Register obj_reg, Register temp_reg, Label& done) { 1283 assert(UseBiasedLocking, "why call this otherwise?"); 1284 1285 // Check for biased locking unlock case, which is a no-op 1286 // Note: we do not have to check the thread ID for two reasons. 1287 // First, the interpreter checks for IllegalMonitorStateException at 1288 // a higher level. Second, if the bias was revoked while we held the 1289 // lock, the object could not be rebiased toward another thread, so 1290 // the bias bit would be clear. 1291 movptr(temp_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes())); 1292 andptr(temp_reg, markWord::biased_lock_mask_in_place); 1293 cmpptr(temp_reg, markWord::biased_lock_pattern); 1294 jcc(Assembler::equal, done); 1295 } 1296 1297 #ifdef COMPILER2 1298 1299 // Increment the ObjectMonitor's ref_count for safety or force a branch 1300 // to 'done' with ICC.ZF=0 to indicate failure/take the slow path. 1301 void MacroAssembler::inc_om_ref_count(Register obj_reg, Register om_reg, Register tmp_reg, Label& done) { 1302 atomic_incl(Address(om_reg, OM_OFFSET_NO_MONITOR_VALUE_TAG(ref_count))); 1303 1304 Label LGoSlowPath; 1305 if (AsyncDeflateIdleMonitors) { 1306 // Race here if monitor is not owned! The above ref_count bump 1307 // will cause subsequent async deflation to skip it. However, 1308 // previous or concurrent async deflation is a race. 1309 1310 // First check: if the owner field == DEFLATER_MARKER: 1311 movptr(tmp_reg, Address(om_reg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); 1312 // DEFLATER_MARKER == reinterpret_cast<void*>(-1) so the compiler 1313 // doesn't like to use the define here: 1314 cmpptr(tmp_reg, -1); 1315 // If marked for async deflation, then take the slow path. This is a 1316 // simpler check than what ObjectMonitorHandle::save_om_ptr() does 1317 // so ObjectMonitor::install_displaced_markword_in_object() doesn't 1318 // have to be implemented in macro assembler. 1319 jccb(Assembler::equal, LGoSlowPath); 1320 1321 // Second check: if ref_count field <= 0: 1322 movptr(tmp_reg, Address(om_reg, OM_OFFSET_NO_MONITOR_VALUE_TAG(ref_count))); 1323 cmpptr(tmp_reg, 0); 1324 // If async deflation is in the process of bailing out, but has not 1325 // yet restored the ref_count field, then we take the slow path. We 1326 // want a stable ref_count value for the fast path. 1327 jccb(Assembler::lessEqual, LGoSlowPath); 1328 1329 // Final check: if object field == obj_reg: 1330 cmpptr(obj_reg, Address(om_reg, OM_OFFSET_NO_MONITOR_VALUE_TAG(object))); 1331 // If the ObjectMonitor has been deflated and recycled, then take 1332 // the slow path. 1333 jccb(Assembler::notEqual, LGoSlowPath); 1334 } 1335 1336 Label LRetToCaller; 1337 // We leave the ref_count incremented to protect the caller's code 1338 // paths against async deflation. 1339 jmpb(LRetToCaller); 1340 1341 bind(LGoSlowPath); 1342 lock(); 1343 decrementl(Address(om_reg, OM_OFFSET_NO_MONITOR_VALUE_TAG(ref_count))); 1344 // Jump to 'done' with ICC.ZF=0 to indicate failure/take the slow path. 1345 orl(tmp_reg, 1); 1346 jmp(done); 1347 1348 bind(LRetToCaller); 1349 } 1350 1351 #if INCLUDE_RTM_OPT 1352 1353 // Update rtm_counters based on abort status 1354 // input: abort_status 1355 // rtm_counters (RTMLockingCounters*) 1356 // flags are killed 1357 void MacroAssembler::rtm_counters_update(Register abort_status, Register rtm_counters) { 1358 1359 atomic_incptr(Address(rtm_counters, RTMLockingCounters::abort_count_offset())); 1360 if (PrintPreciseRTMLockingStatistics) { 1361 for (int i = 0; i < RTMLockingCounters::ABORT_STATUS_LIMIT; i++) { 1362 Label check_abort; 1363 testl(abort_status, (1<<i)); 1364 jccb(Assembler::equal, check_abort); 1365 atomic_incptr(Address(rtm_counters, RTMLockingCounters::abortX_count_offset() + (i * sizeof(uintx)))); 1366 bind(check_abort); 1367 } 1368 } 1369 } 1370 1371 // Branch if (random & (count-1) != 0), count is 2^n 1372 // tmp, scr and flags are killed 1373 void MacroAssembler::branch_on_random_using_rdtsc(Register tmp, Register scr, int count, Label& brLabel) { 1374 assert(tmp == rax, ""); 1375 assert(scr == rdx, ""); 1376 rdtsc(); // modifies EDX:EAX 1377 andptr(tmp, count-1); 1378 jccb(Assembler::notZero, brLabel); 1379 } 1380 1381 // Perform abort ratio calculation, set no_rtm bit if high ratio 1382 // input: rtm_counters_Reg (RTMLockingCounters* address) 1383 // tmpReg, rtm_counters_Reg and flags are killed 1384 void MacroAssembler::rtm_abort_ratio_calculation(Register tmpReg, 1385 Register rtm_counters_Reg, 1386 RTMLockingCounters* rtm_counters, 1387 Metadata* method_data) { 1388 Label L_done, L_check_always_rtm1, L_check_always_rtm2; 1389 1390 if (RTMLockingCalculationDelay > 0) { 1391 // Delay calculation 1392 movptr(tmpReg, ExternalAddress((address) RTMLockingCounters::rtm_calculation_flag_addr()), tmpReg); 1393 testptr(tmpReg, tmpReg); 1394 jccb(Assembler::equal, L_done); 1395 } 1396 // Abort ratio calculation only if abort_count > RTMAbortThreshold 1397 // Aborted transactions = abort_count * 100 1398 // All transactions = total_count * RTMTotalCountIncrRate 1399 // Set no_rtm bit if (Aborted transactions >= All transactions * RTMAbortRatio) 1400 1401 movptr(tmpReg, Address(rtm_counters_Reg, RTMLockingCounters::abort_count_offset())); 1402 cmpptr(tmpReg, RTMAbortThreshold); 1403 jccb(Assembler::below, L_check_always_rtm2); 1404 imulptr(tmpReg, tmpReg, 100); 1405 1406 Register scrReg = rtm_counters_Reg; 1407 movptr(scrReg, Address(rtm_counters_Reg, RTMLockingCounters::total_count_offset())); 1408 imulptr(scrReg, scrReg, RTMTotalCountIncrRate); 1409 imulptr(scrReg, scrReg, RTMAbortRatio); 1410 cmpptr(tmpReg, scrReg); 1411 jccb(Assembler::below, L_check_always_rtm1); 1412 if (method_data != NULL) { 1413 // set rtm_state to "no rtm" in MDO 1414 mov_metadata(tmpReg, method_data); 1415 lock(); 1416 orl(Address(tmpReg, MethodData::rtm_state_offset_in_bytes()), NoRTM); 1417 } 1418 jmpb(L_done); 1419 bind(L_check_always_rtm1); 1420 // Reload RTMLockingCounters* address 1421 lea(rtm_counters_Reg, ExternalAddress((address)rtm_counters)); 1422 bind(L_check_always_rtm2); 1423 movptr(tmpReg, Address(rtm_counters_Reg, RTMLockingCounters::total_count_offset())); 1424 cmpptr(tmpReg, RTMLockingThreshold / RTMTotalCountIncrRate); 1425 jccb(Assembler::below, L_done); 1426 if (method_data != NULL) { 1427 // set rtm_state to "always rtm" in MDO 1428 mov_metadata(tmpReg, method_data); 1429 lock(); 1430 orl(Address(tmpReg, MethodData::rtm_state_offset_in_bytes()), UseRTM); 1431 } 1432 bind(L_done); 1433 } 1434 1435 // Update counters and perform abort ratio calculation 1436 // input: abort_status_Reg 1437 // rtm_counters_Reg, flags are killed 1438 void MacroAssembler::rtm_profiling(Register abort_status_Reg, 1439 Register rtm_counters_Reg, 1440 RTMLockingCounters* rtm_counters, 1441 Metadata* method_data, 1442 bool profile_rtm) { 1443 1444 assert(rtm_counters != NULL, "should not be NULL when profiling RTM"); 1445 // update rtm counters based on rax value at abort 1446 // reads abort_status_Reg, updates flags 1447 lea(rtm_counters_Reg, ExternalAddress((address)rtm_counters)); 1448 rtm_counters_update(abort_status_Reg, rtm_counters_Reg); 1449 if (profile_rtm) { 1450 // Save abort status because abort_status_Reg is used by following code. 1451 if (RTMRetryCount > 0) { 1452 push(abort_status_Reg); 1453 } 1454 assert(rtm_counters != NULL, "should not be NULL when profiling RTM"); 1455 rtm_abort_ratio_calculation(abort_status_Reg, rtm_counters_Reg, rtm_counters, method_data); 1456 // restore abort status 1457 if (RTMRetryCount > 0) { 1458 pop(abort_status_Reg); 1459 } 1460 } 1461 } 1462 1463 // Retry on abort if abort's status is 0x6: can retry (0x2) | memory conflict (0x4) 1464 // inputs: retry_count_Reg 1465 // : abort_status_Reg 1466 // output: retry_count_Reg decremented by 1 1467 // flags are killed 1468 void MacroAssembler::rtm_retry_lock_on_abort(Register retry_count_Reg, Register abort_status_Reg, Label& retryLabel) { 1469 Label doneRetry; 1470 assert(abort_status_Reg == rax, ""); 1471 // The abort reason bits are in eax (see all states in rtmLocking.hpp) 1472 // 0x6 = conflict on which we can retry (0x2) | memory conflict (0x4) 1473 // if reason is in 0x6 and retry count != 0 then retry 1474 andptr(abort_status_Reg, 0x6); 1475 jccb(Assembler::zero, doneRetry); 1476 testl(retry_count_Reg, retry_count_Reg); 1477 jccb(Assembler::zero, doneRetry); 1478 pause(); 1479 decrementl(retry_count_Reg); 1480 jmp(retryLabel); 1481 bind(doneRetry); 1482 } 1483 1484 // Spin and retry if lock is busy, 1485 // inputs: box_Reg (monitor address) 1486 // : retry_count_Reg 1487 // output: retry_count_Reg decremented by 1 1488 // : clear z flag if retry count exceeded 1489 // tmp_Reg, scr_Reg, flags are killed 1490 void MacroAssembler::rtm_retry_lock_on_busy(Register retry_count_Reg, Register box_Reg, 1491 Register tmp_Reg, Register scr_Reg, Label& retryLabel) { 1492 Label SpinLoop, SpinExit, doneRetry; 1493 int owner_offset = OM_OFFSET_NO_MONITOR_VALUE_TAG(owner); 1494 1495 testl(retry_count_Reg, retry_count_Reg); 1496 jccb(Assembler::zero, doneRetry); 1497 decrementl(retry_count_Reg); 1498 movptr(scr_Reg, RTMSpinLoopCount); 1499 1500 bind(SpinLoop); 1501 pause(); 1502 decrementl(scr_Reg); 1503 jccb(Assembler::lessEqual, SpinExit); 1504 movptr(tmp_Reg, Address(box_Reg, owner_offset)); 1505 testptr(tmp_Reg, tmp_Reg); 1506 jccb(Assembler::notZero, SpinLoop); 1507 1508 bind(SpinExit); 1509 jmp(retryLabel); 1510 bind(doneRetry); 1511 incrementl(retry_count_Reg); // clear z flag 1512 } 1513 1514 // Use RTM for normal stack locks 1515 // Input: objReg (object to lock) 1516 void MacroAssembler::rtm_stack_locking(Register objReg, Register tmpReg, Register scrReg, 1517 Register retry_on_abort_count_Reg, 1518 RTMLockingCounters* stack_rtm_counters, 1519 Metadata* method_data, bool profile_rtm, 1520 Label& DONE_LABEL, Label& IsInflated) { 1521 assert(UseRTMForStackLocks, "why call this otherwise?"); 1522 assert(!UseBiasedLocking, "Biased locking is not supported with RTM locking"); 1523 assert(tmpReg == rax, ""); 1524 assert(scrReg == rdx, ""); 1525 Label L_rtm_retry, L_decrement_retry, L_on_abort; 1526 1527 if (RTMRetryCount > 0) { 1528 movl(retry_on_abort_count_Reg, RTMRetryCount); // Retry on abort 1529 bind(L_rtm_retry); 1530 } 1531 movptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes())); 1532 testptr(tmpReg, markWord::monitor_value); // inflated vs stack-locked|neutral|biased 1533 jcc(Assembler::notZero, IsInflated); 1534 1535 if (PrintPreciseRTMLockingStatistics || profile_rtm) { 1536 Label L_noincrement; 1537 if (RTMTotalCountIncrRate > 1) { 1538 // tmpReg, scrReg and flags are killed 1539 branch_on_random_using_rdtsc(tmpReg, scrReg, RTMTotalCountIncrRate, L_noincrement); 1540 } 1541 assert(stack_rtm_counters != NULL, "should not be NULL when profiling RTM"); 1542 atomic_incptr(ExternalAddress((address)stack_rtm_counters->total_count_addr()), scrReg); 1543 bind(L_noincrement); 1544 } 1545 xbegin(L_on_abort); 1546 movptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // fetch markword 1547 andptr(tmpReg, markWord::biased_lock_mask_in_place); // look at 3 lock bits 1548 cmpptr(tmpReg, markWord::unlocked_value); // bits = 001 unlocked 1549 jcc(Assembler::equal, DONE_LABEL); // all done if unlocked 1550 1551 Register abort_status_Reg = tmpReg; // status of abort is stored in RAX 1552 if (UseRTMXendForLockBusy) { 1553 xend(); 1554 movptr(abort_status_Reg, 0x2); // Set the abort status to 2 (so we can retry) 1555 jmp(L_decrement_retry); 1556 } 1557 else { 1558 xabort(0); 1559 } 1560 bind(L_on_abort); 1561 if (PrintPreciseRTMLockingStatistics || profile_rtm) { 1562 rtm_profiling(abort_status_Reg, scrReg, stack_rtm_counters, method_data, profile_rtm); 1563 } 1564 bind(L_decrement_retry); 1565 if (RTMRetryCount > 0) { 1566 // retry on lock abort if abort status is 'can retry' (0x2) or 'memory conflict' (0x4) 1567 rtm_retry_lock_on_abort(retry_on_abort_count_Reg, abort_status_Reg, L_rtm_retry); 1568 } 1569 } 1570 1571 // Use RTM for inflating locks 1572 // inputs: objReg (object to lock) 1573 // boxReg (on-stack box address (displaced header location) - KILLED) 1574 // tmpReg (ObjectMonitor address + markWord::monitor_value) 1575 void MacroAssembler::rtm_inflated_locking(Register objReg, Register boxReg, Register tmpReg, 1576 Register scrReg, Register retry_on_busy_count_Reg, 1577 Register retry_on_abort_count_Reg, 1578 RTMLockingCounters* rtm_counters, 1579 Metadata* method_data, bool profile_rtm, 1580 Label& DONE_LABEL) { 1581 assert(UseRTMLocking, "why call this otherwise?"); 1582 assert(tmpReg == rax, ""); 1583 assert(scrReg == rdx, ""); 1584 Label L_rtm_retry, L_decrement_retry, L_on_abort, L_local_done; 1585 int owner_offset = OM_OFFSET_NO_MONITOR_VALUE_TAG(owner); 1586 1587 // Without cast to int32_t this style of movptr will destroy r10 which is typically obj. 1588 movptr(Address(boxReg, 0), (int32_t)intptr_t(markWord::unused_mark().value())); 1589 1590 if (!HandshakeAfterDeflateIdleMonitors) { 1591 // Increment the ObjectMonitor's ref_count for safety or force the 1592 // enter slow path via DONE_LABEL. 1593 // In rtm_inflated_locking(), initially tmpReg contains the object's 1594 // mark word which, in this case, is the (ObjectMonitor* | monitor_value). 1595 // Also this code uses scrReg as its temporary register. 1596 inc_om_ref_count(objReg, tmpReg /* om_reg */, scrReg /* tmp_reg */, DONE_LABEL); 1597 } 1598 1599 movptr(boxReg, tmpReg); // Save ObjectMonitor address 1600 1601 if (RTMRetryCount > 0) { 1602 movl(retry_on_busy_count_Reg, RTMRetryCount); // Retry on lock busy 1603 movl(retry_on_abort_count_Reg, RTMRetryCount); // Retry on abort 1604 bind(L_rtm_retry); 1605 } 1606 if (PrintPreciseRTMLockingStatistics || profile_rtm) { 1607 Label L_noincrement; 1608 if (RTMTotalCountIncrRate > 1) { 1609 // tmpReg, scrReg and flags are killed 1610 branch_on_random_using_rdtsc(tmpReg, scrReg, RTMTotalCountIncrRate, L_noincrement); 1611 } 1612 assert(rtm_counters != NULL, "should not be NULL when profiling RTM"); 1613 atomic_incptr(ExternalAddress((address)rtm_counters->total_count_addr()), scrReg); 1614 bind(L_noincrement); 1615 } 1616 xbegin(L_on_abort); 1617 movptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes())); 1618 movptr(tmpReg, Address(tmpReg, owner_offset)); 1619 testptr(tmpReg, tmpReg); 1620 jcc(Assembler::zero, L_local_done); 1621 if (UseRTMXendForLockBusy) { 1622 xend(); 1623 jmp(L_decrement_retry); 1624 } 1625 else { 1626 xabort(0); 1627 } 1628 bind(L_on_abort); 1629 Register abort_status_Reg = tmpReg; // status of abort is stored in RAX 1630 if (PrintPreciseRTMLockingStatistics || profile_rtm) { 1631 rtm_profiling(abort_status_Reg, scrReg, rtm_counters, method_data, profile_rtm); 1632 } 1633 if (RTMRetryCount > 0) { 1634 // retry on lock abort if abort status is 'can retry' (0x2) or 'memory conflict' (0x4) 1635 rtm_retry_lock_on_abort(retry_on_abort_count_Reg, abort_status_Reg, L_rtm_retry); 1636 } 1637 1638 movptr(tmpReg, Address(boxReg, owner_offset)) ; 1639 testptr(tmpReg, tmpReg) ; 1640 jccb(Assembler::notZero, L_decrement_retry) ; 1641 1642 // Appears unlocked - try to swing _owner from null to non-null. 1643 // Invariant: tmpReg == 0. tmpReg is EAX which is the implicit cmpxchg comparand. 1644 #ifdef _LP64 1645 Register threadReg = r15_thread; 1646 #else 1647 get_thread(scrReg); 1648 Register threadReg = scrReg; 1649 #endif 1650 lock(); 1651 cmpxchgptr(threadReg, Address(boxReg, owner_offset)); // Updates tmpReg 1652 1653 if (RTMRetryCount > 0) { 1654 // success done else retry 1655 jccb(Assembler::equal, L_local_done); 1656 bind(L_decrement_retry); 1657 // Spin and retry if lock is busy. 1658 rtm_retry_lock_on_busy(retry_on_busy_count_Reg, boxReg, tmpReg, scrReg, L_rtm_retry); 1659 } 1660 else { 1661 bind(L_decrement_retry); 1662 } 1663 1664 // rtm_inflated_locking() exit paths come here except for a failed 1665 // inc_om_ref_count() which goes directly to DONE_LABEL. 1666 bind(L_local_done); 1667 if (!HandshakeAfterDeflateIdleMonitors) { 1668 pushf(); // Preserve flags. 1669 // Decrement the ObjectMonitor's ref_count. 1670 lock(); 1671 decrementl(Address(boxReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(ref_count))); 1672 popf(); // Restore flags so we have the proper ICC.ZF value. 1673 } 1674 1675 jmp(DONE_LABEL) ; 1676 } 1677 1678 #endif // INCLUDE_RTM_OPT 1679 1680 // fast_lock and fast_unlock used by C2 1681 1682 // Because the transitions from emitted code to the runtime 1683 // monitorenter/exit helper stubs are so slow it's critical that 1684 // we inline both the stack-locking fast path and the inflated fast path. 1685 // 1686 // See also: cmpFastLock and cmpFastUnlock. 1687 // 1688 // What follows is a specialized inline transliteration of the code 1689 // in enter() and exit(). If we're concerned about I$ bloat another 1690 // option would be to emit TrySlowEnter and TrySlowExit methods 1691 // at startup-time. These methods would accept arguments as 1692 // (rax,=Obj, rbx=Self, rcx=box, rdx=Scratch) and return success-failure 1693 // indications in the icc.ZFlag. fast_lock and fast_unlock would simply 1694 // marshal the arguments and emit calls to TrySlowEnter and TrySlowExit. 1695 // In practice, however, the # of lock sites is bounded and is usually small. 1696 // Besides the call overhead, TrySlowEnter and TrySlowExit might suffer 1697 // if the processor uses simple bimodal branch predictors keyed by EIP 1698 // Since the helper routines would be called from multiple synchronization 1699 // sites. 1700 // 1701 // An even better approach would be write "MonitorEnter()" and "MonitorExit()" 1702 // in java - using j.u.c and unsafe - and just bind the lock and unlock sites 1703 // to those specialized methods. That'd give us a mostly platform-independent 1704 // implementation that the JITs could optimize and inline at their pleasure. 1705 // Done correctly, the only time we'd need to cross to native could would be 1706 // to park() or unpark() threads. We'd also need a few more unsafe operators 1707 // to (a) prevent compiler-JIT reordering of non-volatile accesses, and 1708 // (b) explicit barriers or fence operations. 1709 // 1710 // TODO: 1711 // 1712 // * Arrange for C2 to pass "Self" into fast_lock and fast_unlock in one of the registers (scr). 1713 // This avoids manifesting the Self pointer in the fast_lock and fast_unlock terminals. 1714 // Given TLAB allocation, Self is usually manifested in a register, so passing it into 1715 // the lock operators would typically be faster than reifying Self. 1716 // 1717 // * Ideally I'd define the primitives as: 1718 // fast_lock (nax Obj, nax box, EAX tmp, nax scr) where box, tmp and scr are KILLED. 1719 // fast_unlock (nax Obj, EAX box, nax tmp) where box and tmp are KILLED 1720 // Unfortunately ADLC bugs prevent us from expressing the ideal form. 1721 // Instead, we're stuck with a rather awkward and brittle register assignments below. 1722 // Furthermore the register assignments are overconstrained, possibly resulting in 1723 // sub-optimal code near the synchronization site. 1724 // 1725 // * Eliminate the sp-proximity tests and just use "== Self" tests instead. 1726 // Alternately, use a better sp-proximity test. 1727 // 1728 // * Currently ObjectMonitor._Owner can hold either an sp value or a (THREAD *) value. 1729 // Either one is sufficient to uniquely identify a thread. 1730 // TODO: eliminate use of sp in _owner and use get_thread(tr) instead. 1731 // 1732 // * Intrinsify notify() and notifyAll() for the common cases where the 1733 // object is locked by the calling thread but the waitlist is empty. 1734 // avoid the expensive JNI call to JVM_Notify() and JVM_NotifyAll(). 1735 // 1736 // * use jccb and jmpb instead of jcc and jmp to improve code density. 1737 // But beware of excessive branch density on AMD Opterons. 1738 // 1739 // * Both fast_lock and fast_unlock set the ICC.ZF to indicate success 1740 // or failure of the fast path. If the fast path fails then we pass 1741 // control to the slow path, typically in C. In fast_lock and 1742 // fast_unlock we often branch to DONE_LABEL, just to find that C2 1743 // will emit a conditional branch immediately after the node. 1744 // So we have branches to branches and lots of ICC.ZF games. 1745 // Instead, it might be better to have C2 pass a "FailureLabel" 1746 // into fast_lock and fast_unlock. In the case of success, control 1747 // will drop through the node. ICC.ZF is undefined at exit. 1748 // In the case of failure, the node will branch directly to the 1749 // FailureLabel 1750 1751 1752 // obj: object to lock 1753 // box: on-stack box address (displaced header location) - KILLED 1754 // rax,: tmp -- KILLED 1755 // scr: tmp -- KILLED 1756 void MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmpReg, 1757 Register scrReg, Register cx1Reg, Register cx2Reg, 1758 BiasedLockingCounters* counters, 1759 RTMLockingCounters* rtm_counters, 1760 RTMLockingCounters* stack_rtm_counters, 1761 Metadata* method_data, 1762 bool use_rtm, bool profile_rtm) { 1763 // Ensure the register assignments are disjoint 1764 assert(tmpReg == rax, ""); 1765 1766 if (use_rtm) { 1767 assert_different_registers(objReg, boxReg, tmpReg, scrReg, cx1Reg, cx2Reg); 1768 } else { 1769 assert(cx1Reg == noreg, ""); 1770 assert(cx2Reg == noreg, ""); 1771 assert_different_registers(objReg, boxReg, tmpReg, scrReg); 1772 } 1773 1774 if (counters != NULL) { 1775 atomic_incl(ExternalAddress((address)counters->total_entry_count_addr()), scrReg); 1776 } 1777 1778 // Possible cases that we'll encounter in fast_lock 1779 // ------------------------------------------------ 1780 // * Inflated 1781 // -- unlocked 1782 // -- Locked 1783 // = by self 1784 // = by other 1785 // * biased 1786 // -- by Self 1787 // -- by other 1788 // * neutral 1789 // * stack-locked 1790 // -- by self 1791 // = sp-proximity test hits 1792 // = sp-proximity test generates false-negative 1793 // -- by other 1794 // 1795 1796 Label IsInflated, DONE_LABEL; 1797 1798 // it's stack-locked, biased or neutral 1799 // TODO: optimize away redundant LDs of obj->mark and improve the markword triage 1800 // order to reduce the number of conditional branches in the most common cases. 1801 // Beware -- there's a subtle invariant that fetch of the markword 1802 // at [FETCH], below, will never observe a biased encoding (*101b). 1803 // If this invariant is not held we risk exclusion (safety) failure. 1804 if (UseBiasedLocking && !UseOptoBiasInlining) { 1805 biased_locking_enter(boxReg, objReg, tmpReg, scrReg, false, DONE_LABEL, NULL, counters); 1806 } 1807 1808 #if INCLUDE_RTM_OPT 1809 if (UseRTMForStackLocks && use_rtm) { 1810 rtm_stack_locking(objReg, tmpReg, scrReg, cx2Reg, 1811 stack_rtm_counters, method_data, profile_rtm, 1812 DONE_LABEL, IsInflated); 1813 } 1814 #endif // INCLUDE_RTM_OPT 1815 1816 movptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // [FETCH] 1817 testptr(tmpReg, markWord::monitor_value); // inflated vs stack-locked|neutral|biased 1818 jccb(Assembler::notZero, IsInflated); 1819 1820 // Attempt stack-locking ... 1821 orptr (tmpReg, markWord::unlocked_value); 1822 movptr(Address(boxReg, 0), tmpReg); // Anticipate successful CAS 1823 lock(); 1824 cmpxchgptr(boxReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // Updates tmpReg 1825 if (counters != NULL) { 1826 cond_inc32(Assembler::equal, 1827 ExternalAddress((address)counters->fast_path_entry_count_addr())); 1828 } 1829 jcc(Assembler::equal, DONE_LABEL); // Success 1830 1831 // Recursive locking. 1832 // The object is stack-locked: markword contains stack pointer to BasicLock. 1833 // Locked by current thread if difference with current SP is less than one page. 1834 subptr(tmpReg, rsp); 1835 // Next instruction set ZFlag == 1 (Success) if difference is less then one page. 1836 andptr(tmpReg, (int32_t) (NOT_LP64(0xFFFFF003) LP64_ONLY(7 - os::vm_page_size())) ); 1837 movptr(Address(boxReg, 0), tmpReg); 1838 if (counters != NULL) { 1839 cond_inc32(Assembler::equal, 1840 ExternalAddress((address)counters->fast_path_entry_count_addr())); 1841 } 1842 jmp(DONE_LABEL); 1843 1844 bind(IsInflated); 1845 // The object is inflated. tmpReg contains pointer to ObjectMonitor* + markWord::monitor_value 1846 1847 #if INCLUDE_RTM_OPT 1848 // Use the same RTM locking code in 32- and 64-bit VM. 1849 if (use_rtm) { 1850 rtm_inflated_locking(objReg, boxReg, tmpReg, scrReg, cx1Reg, cx2Reg, 1851 rtm_counters, method_data, profile_rtm, DONE_LABEL); 1852 } else { 1853 #endif // INCLUDE_RTM_OPT 1854 1855 #ifndef _LP64 1856 // The object is inflated. 1857 1858 // boxReg refers to the on-stack BasicLock in the current frame. 1859 // We'd like to write: 1860 // set box->_displaced_header = markWord::unused_mark(). Any non-0 value suffices. 1861 // This is convenient but results a ST-before-CAS penalty. The following CAS suffers 1862 // additional latency as we have another ST in the store buffer that must drain. 1863 1864 // avoid ST-before-CAS 1865 // register juggle because we need tmpReg for cmpxchgptr below 1866 movptr(scrReg, boxReg); 1867 movptr(boxReg, tmpReg); // consider: LEA box, [tmp-2] 1868 1869 // Optimistic form: consider XORL tmpReg,tmpReg 1870 movptr(tmpReg, NULL_WORD); 1871 1872 // Appears unlocked - try to swing _owner from null to non-null. 1873 // Ideally, I'd manifest "Self" with get_thread and then attempt 1874 // to CAS the register containing Self into m->Owner. 1875 // But we don't have enough registers, so instead we can either try to CAS 1876 // rsp or the address of the box (in scr) into &m->owner. If the CAS succeeds 1877 // we later store "Self" into m->Owner. Transiently storing a stack address 1878 // (rsp or the address of the box) into m->owner is harmless. 1879 // Invariant: tmpReg == 0. tmpReg is EAX which is the implicit cmpxchg comparand. 1880 lock(); 1881 cmpxchgptr(scrReg, Address(boxReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); 1882 movptr(Address(scrReg, 0), 3); // box->_displaced_header = 3 1883 // If we weren't able to swing _owner from NULL to the BasicLock 1884 // then take the slow path. 1885 jccb (Assembler::notZero, DONE_LABEL); 1886 // update _owner from BasicLock to thread 1887 get_thread (scrReg); // beware: clobbers ICCs 1888 movptr(Address(boxReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), scrReg); 1889 xorptr(boxReg, boxReg); // set icc.ZFlag = 1 to indicate success 1890 1891 // If the CAS fails we can either retry or pass control to the slow path. 1892 // We use the latter tactic. 1893 // Pass the CAS result in the icc.ZFlag into DONE_LABEL 1894 // If the CAS was successful ... 1895 // Self has acquired the lock 1896 // Invariant: m->_recursions should already be 0, so we don't need to explicitly set it. 1897 // Intentional fall-through into DONE_LABEL ... 1898 #else // _LP64 1899 // It's inflated and we use scrReg for ObjectMonitor* in this section. 1900 movq(scrReg, tmpReg); 1901 1902 // Unconditionally set box->_displaced_header = markWord::unused_mark(). 1903 // Without cast to int32_t this style of movptr will destroy r10 which is typically obj. 1904 movptr(Address(boxReg, 0), (int32_t)intptr_t(markWord::unused_mark().value())); 1905 1906 if (!HandshakeAfterDeflateIdleMonitors) { 1907 // Increment the ObjectMonitor's ref_count for safety or force the 1908 // enter slow path via DONE_LABEL. 1909 // In fast_lock(), scrReg contains the object's mark word which, 1910 // in this case, is the (ObjectMonitor* | monitor_value). Also this 1911 // code uses tmpReg as its temporary register. 1912 inc_om_ref_count(objReg, scrReg /* om_reg */, tmpReg /* tmp_reg */, DONE_LABEL); 1913 } 1914 1915 xorq(tmpReg, tmpReg); 1916 lock(); 1917 cmpxchgptr(r15_thread, Address(scrReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); 1918 // Intentional fall-through into DONE_LABEL ... 1919 // Propagate ICC.ZF from CAS above into DONE_LABEL. 1920 1921 if (!HandshakeAfterDeflateIdleMonitors) { 1922 pushf(); // Preserve flags. 1923 // Decrement the ObjectMonitor's ref_count. 1924 lock(); 1925 decrementl(Address(scrReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(ref_count))); 1926 popf(); // Restore flags so we have the proper ICC.ZF value. 1927 } 1928 #endif // _LP64 1929 #if INCLUDE_RTM_OPT 1930 } // use_rtm() 1931 #endif 1932 // DONE_LABEL is a hot target - we'd really like to place it at the 1933 // start of cache line by padding with NOPs. 1934 // See the AMD and Intel software optimization manuals for the 1935 // most efficient "long" NOP encodings. 1936 // Unfortunately none of our alignment mechanisms suffice. 1937 bind(DONE_LABEL); 1938 1939 // At DONE_LABEL the icc ZFlag is set as follows ... 1940 // fast_unlock uses the same protocol. 1941 // ZFlag == 1 -> Success 1942 // ZFlag == 0 -> Failure - force control through the slow path 1943 } 1944 1945 // obj: object to unlock 1946 // box: box address (displaced header location), killed. Must be EAX. 1947 // tmp: killed, cannot be obj nor box. 1948 // 1949 // Some commentary on balanced locking: 1950 // 1951 // fast_lock and fast_unlock are emitted only for provably balanced lock sites. 1952 // Methods that don't have provably balanced locking are forced to run in the 1953 // interpreter - such methods won't be compiled to use fast_lock and fast_unlock. 1954 // The interpreter provides two properties: 1955 // I1: At return-time the interpreter automatically and quietly unlocks any 1956 // objects acquired the current activation (frame). Recall that the 1957 // interpreter maintains an on-stack list of locks currently held by 1958 // a frame. 1959 // I2: If a method attempts to unlock an object that is not held by the 1960 // the frame the interpreter throws IMSX. 1961 // 1962 // Lets say A(), which has provably balanced locking, acquires O and then calls B(). 1963 // B() doesn't have provably balanced locking so it runs in the interpreter. 1964 // Control returns to A() and A() unlocks O. By I1 and I2, above, we know that O 1965 // is still locked by A(). 1966 // 1967 // The only other source of unbalanced locking would be JNI. The "Java Native Interface: 1968 // Programmer's Guide and Specification" claims that an object locked by jni_monitorenter 1969 // should not be unlocked by "normal" java-level locking and vice-versa. The specification 1970 // doesn't specify what will occur if a program engages in such mixed-mode locking, however. 1971 // Arguably given that the spec legislates the JNI case as undefined our implementation 1972 // could reasonably *avoid* checking owner in fast_unlock(). 1973 // In the interest of performance we elide m->Owner==Self check in unlock. 1974 // A perfectly viable alternative is to elide the owner check except when 1975 // Xcheck:jni is enabled. 1976 1977 void MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register tmpReg, bool use_rtm) { 1978 assert(boxReg == rax, ""); 1979 assert_different_registers(objReg, boxReg, tmpReg); 1980 1981 Label DONE_LABEL, Stacked, CheckSucc; 1982 1983 // Critically, the biased locking test must have precedence over 1984 // and appear before the (box->dhw == 0) recursive stack-lock test. 1985 if (UseBiasedLocking && !UseOptoBiasInlining) { 1986 biased_locking_exit(objReg, tmpReg, DONE_LABEL); 1987 } 1988 1989 #if INCLUDE_RTM_OPT 1990 if (UseRTMForStackLocks && use_rtm) { 1991 assert(!UseBiasedLocking, "Biased locking is not supported with RTM locking"); 1992 Label L_regular_unlock; 1993 movptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // fetch markword 1994 andptr(tmpReg, markWord::biased_lock_mask_in_place); // look at 3 lock bits 1995 cmpptr(tmpReg, markWord::unlocked_value); // bits = 001 unlocked 1996 jccb(Assembler::notEqual, L_regular_unlock); // if !HLE RegularLock 1997 xend(); // otherwise end... 1998 jmp(DONE_LABEL); // ... and we're done 1999 bind(L_regular_unlock); 2000 } 2001 #endif 2002 2003 cmpptr(Address(boxReg, 0), (int32_t)NULL_WORD); // Examine the displaced header 2004 jcc (Assembler::zero, DONE_LABEL); // 0 indicates recursive stack-lock 2005 movptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // Examine the object's markword 2006 testptr(tmpReg, markWord::monitor_value); // Inflated? 2007 jcc (Assembler::zero, Stacked); 2008 2009 // It's inflated. 2010 #if INCLUDE_RTM_OPT 2011 if (use_rtm) { 2012 Label L_regular_inflated_unlock; 2013 int owner_offset = OM_OFFSET_NO_MONITOR_VALUE_TAG(owner); 2014 movptr(boxReg, Address(tmpReg, owner_offset)); 2015 testptr(boxReg, boxReg); 2016 jccb(Assembler::notZero, L_regular_inflated_unlock); 2017 xend(); 2018 jmp(DONE_LABEL); 2019 bind(L_regular_inflated_unlock); 2020 } 2021 #endif 2022 2023 // Despite our balanced locking property we still check that m->_owner == Self 2024 // as java routines or native JNI code called by this thread might 2025 // have released the lock. 2026 // Refer to the comments in synchronizer.cpp for how we might encode extra 2027 // state in _succ so we can avoid fetching EntryList|cxq. 2028 // 2029 // I'd like to add more cases in fast_lock() and fast_unlock() -- 2030 // such as recursive enter and exit -- but we have to be wary of 2031 // I$ bloat, T$ effects and BP$ effects. 2032 // 2033 // If there's no contention try a 1-0 exit. That is, exit without 2034 // a costly MEMBAR or CAS. See synchronizer.cpp for details on how 2035 // we detect and recover from the race that the 1-0 exit admits. 2036 // 2037 // Conceptually fast_unlock() must execute a STST|LDST "release" barrier 2038 // before it STs null into _owner, releasing the lock. Updates 2039 // to data protected by the critical section must be visible before 2040 // we drop the lock (and thus before any other thread could acquire 2041 // the lock and observe the fields protected by the lock). 2042 // IA32's memory-model is SPO, so STs are ordered with respect to 2043 // each other and there's no need for an explicit barrier (fence). 2044 // See also http://gee.cs.oswego.edu/dl/jmm/cookbook.html. 2045 #ifndef _LP64 2046 get_thread (boxReg); 2047 2048 // Note that we could employ various encoding schemes to reduce 2049 // the number of loads below (currently 4) to just 2 or 3. 2050 // Refer to the comments in synchronizer.cpp. 2051 // In practice the chain of fetches doesn't seem to impact performance, however. 2052 xorptr(boxReg, boxReg); 2053 orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions))); 2054 jccb (Assembler::notZero, DONE_LABEL); 2055 movptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList))); 2056 orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq))); 2057 jccb (Assembler::notZero, CheckSucc); 2058 movptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), NULL_WORD); 2059 jmpb (DONE_LABEL); 2060 2061 bind (Stacked); 2062 // It's not inflated and it's not recursively stack-locked and it's not biased. 2063 // It must be stack-locked. 2064 // Try to reset the header to displaced header. 2065 // The "box" value on the stack is stable, so we can reload 2066 // and be assured we observe the same value as above. 2067 movptr(tmpReg, Address(boxReg, 0)); 2068 lock(); 2069 cmpxchgptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // Uses RAX which is box 2070 // Intention fall-thru into DONE_LABEL 2071 2072 // DONE_LABEL is a hot target - we'd really like to place it at the 2073 // start of cache line by padding with NOPs. 2074 // See the AMD and Intel software optimization manuals for the 2075 // most efficient "long" NOP encodings. 2076 // Unfortunately none of our alignment mechanisms suffice. 2077 bind (CheckSucc); 2078 #else // _LP64 2079 // It's inflated 2080 2081 if (!HandshakeAfterDeflateIdleMonitors) { 2082 // Increment the ObjectMonitor's ref_count for safety or force the 2083 // exit slow path via DONE_LABEL. 2084 // In fast_unlock(), tmpReg contains the object's mark word which, 2085 // in this case, is the (ObjectMonitor* | monitor_value). Also this 2086 // code uses boxReg as its temporary register. 2087 inc_om_ref_count(objReg, tmpReg /* om_reg */, boxReg /* tmp_reg */, DONE_LABEL); 2088 } 2089 2090 // Try to avoid passing control into the slow path ... 2091 Label LSuccess, LGoSlowPath; 2092 xorptr(boxReg, boxReg); 2093 orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions))); 2094 jccb(Assembler::notZero, LGoSlowPath); 2095 movptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq))); 2096 orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList))); 2097 jccb (Assembler::notZero, CheckSucc); 2098 // Without cast to int32_t this style of movptr will destroy r10 which is typically obj. 2099 movptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), (int32_t)NULL_WORD); 2100 jmpb(LSuccess); 2101 2102 bind (CheckSucc); 2103 2104 // The following optional optimization can be elided if necessary 2105 // Effectively: if (succ == null) goto slow path 2106 // The code reduces the window for a race, however, 2107 // and thus benefits performance. 2108 cmpptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), (int32_t)NULL_WORD); 2109 jccb (Assembler::zero, LGoSlowPath); 2110 2111 xorptr(boxReg, boxReg); 2112 // Without cast to int32_t this style of movptr will destroy r10 which is typically obj. 2113 movptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), (int32_t)NULL_WORD); 2114 2115 // Memory barrier/fence 2116 // Dekker pivot point -- fulcrum : ST Owner; MEMBAR; LD Succ 2117 // Instead of MFENCE we use a dummy locked add of 0 to the top-of-stack. 2118 // This is faster on Nehalem and AMD Shanghai/Barcelona. 2119 // See https://blogs.oracle.com/dave/entry/instruction_selection_for_volatile_fences 2120 // We might also restructure (ST Owner=0;barrier;LD _Succ) to 2121 // (mov box,0; xchgq box, &m->Owner; LD _succ) . 2122 lock(); addl(Address(rsp, 0), 0); 2123 2124 cmpptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), (int32_t)NULL_WORD); 2125 jccb (Assembler::notZero, LSuccess); 2126 2127 // Rare inopportune interleaving - race. 2128 // The successor vanished in the small window above. 2129 // The lock is contended -- (cxq|EntryList) != null -- and there's no apparent successor. 2130 // We need to ensure progress and succession. 2131 // Try to reacquire the lock. 2132 // If that fails then the new owner is responsible for succession and this 2133 // thread needs to take no further action and can exit via the fast path (success). 2134 // If the re-acquire succeeds then pass control into the slow path. 2135 // As implemented, this latter mode is horrible because we generated more 2136 // coherence traffic on the lock *and* artifically extended the critical section 2137 // length while by virtue of passing control into the slow path. 2138 2139 // box is really RAX -- the following CMPXCHG depends on that binding 2140 // cmpxchg R,[M] is equivalent to rax = CAS(M,rax,R) 2141 lock(); 2142 cmpxchgptr(r15_thread, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); 2143 // There's no successor so we tried to regrab the lock. 2144 // If that didn't work, then another thread grabbed the 2145 // lock so we're done (and exit was a success). 2146 jccb (Assembler::notEqual, LSuccess); 2147 // Intentional fall-through into slow path 2148 2149 bind (LGoSlowPath); 2150 if (!HandshakeAfterDeflateIdleMonitors) { 2151 lock(); 2152 decrementl(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(ref_count))); 2153 } 2154 orl (boxReg, 1); // set ICC.ZF=0 to indicate failure 2155 jmpb (DONE_LABEL); 2156 2157 bind (LSuccess); 2158 if (!HandshakeAfterDeflateIdleMonitors) { 2159 lock(); 2160 decrementl(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(ref_count))); 2161 } 2162 testl (boxReg, 0); // set ICC.ZF=1 to indicate success 2163 jmpb (DONE_LABEL); 2164 2165 bind (Stacked); 2166 movptr(tmpReg, Address (boxReg, 0)); // re-fetch 2167 lock(); 2168 cmpxchgptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // Uses RAX which is box 2169 2170 #endif 2171 bind(DONE_LABEL); 2172 } 2173 #endif // COMPILER2 2174 2175 void MacroAssembler::c2bool(Register x) { 2176 // implements x == 0 ? 0 : 1 2177 // note: must only look at least-significant byte of x 2178 // since C-style booleans are stored in one byte 2179 // only! (was bug) 2180 andl(x, 0xFF); 2181 setb(Assembler::notZero, x); 2182 } 2183 2184 // Wouldn't need if AddressLiteral version had new name 2185 void MacroAssembler::call(Label& L, relocInfo::relocType rtype) { 2186 Assembler::call(L, rtype); 2187 } 2188 2189 void MacroAssembler::call(Register entry) { 2190 Assembler::call(entry); 2191 } 2192 2193 void MacroAssembler::call(AddressLiteral entry) { 2194 if (reachable(entry)) { 2195 Assembler::call_literal(entry.target(), entry.rspec()); 2196 } else { 2197 lea(rscratch1, entry); 2198 Assembler::call(rscratch1); 2199 } 2200 } 2201 2202 void MacroAssembler::ic_call(address entry, jint method_index) { 2203 RelocationHolder rh = virtual_call_Relocation::spec(pc(), method_index); 2204 movptr(rax, (intptr_t)Universe::non_oop_word()); 2205 call(AddressLiteral(entry, rh)); 2206 } 2207 2208 // Implementation of call_VM versions 2209 2210 void MacroAssembler::call_VM(Register oop_result, 2211 address entry_point, 2212 bool check_exceptions) { 2213 Label C, E; 2214 call(C, relocInfo::none); 2215 jmp(E); 2216 2217 bind(C); 2218 call_VM_helper(oop_result, entry_point, 0, check_exceptions); 2219 ret(0); 2220 2221 bind(E); 2222 } 2223 2224 void MacroAssembler::call_VM(Register oop_result, 2225 address entry_point, 2226 Register arg_1, 2227 bool check_exceptions) { 2228 Label C, E; 2229 call(C, relocInfo::none); 2230 jmp(E); 2231 2232 bind(C); 2233 pass_arg1(this, arg_1); 2234 call_VM_helper(oop_result, entry_point, 1, check_exceptions); 2235 ret(0); 2236 2237 bind(E); 2238 } 2239 2240 void MacroAssembler::call_VM(Register oop_result, 2241 address entry_point, 2242 Register arg_1, 2243 Register arg_2, 2244 bool check_exceptions) { 2245 Label C, E; 2246 call(C, relocInfo::none); 2247 jmp(E); 2248 2249 bind(C); 2250 2251 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg")); 2252 2253 pass_arg2(this, arg_2); 2254 pass_arg1(this, arg_1); 2255 call_VM_helper(oop_result, entry_point, 2, check_exceptions); 2256 ret(0); 2257 2258 bind(E); 2259 } 2260 2261 void MacroAssembler::call_VM(Register oop_result, 2262 address entry_point, 2263 Register arg_1, 2264 Register arg_2, 2265 Register arg_3, 2266 bool check_exceptions) { 2267 Label C, E; 2268 call(C, relocInfo::none); 2269 jmp(E); 2270 2271 bind(C); 2272 2273 LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg")); 2274 LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg")); 2275 pass_arg3(this, arg_3); 2276 2277 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg")); 2278 pass_arg2(this, arg_2); 2279 2280 pass_arg1(this, arg_1); 2281 call_VM_helper(oop_result, entry_point, 3, check_exceptions); 2282 ret(0); 2283 2284 bind(E); 2285 } 2286 2287 void MacroAssembler::call_VM(Register oop_result, 2288 Register last_java_sp, 2289 address entry_point, 2290 int number_of_arguments, 2291 bool check_exceptions) { 2292 Register thread = LP64_ONLY(r15_thread) NOT_LP64(noreg); 2293 call_VM_base(oop_result, thread, last_java_sp, entry_point, number_of_arguments, check_exceptions); 2294 } 2295 2296 void MacroAssembler::call_VM(Register oop_result, 2297 Register last_java_sp, 2298 address entry_point, 2299 Register arg_1, 2300 bool check_exceptions) { 2301 pass_arg1(this, arg_1); 2302 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions); 2303 } 2304 2305 void MacroAssembler::call_VM(Register oop_result, 2306 Register last_java_sp, 2307 address entry_point, 2308 Register arg_1, 2309 Register arg_2, 2310 bool check_exceptions) { 2311 2312 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg")); 2313 pass_arg2(this, arg_2); 2314 pass_arg1(this, arg_1); 2315 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions); 2316 } 2317 2318 void MacroAssembler::call_VM(Register oop_result, 2319 Register last_java_sp, 2320 address entry_point, 2321 Register arg_1, 2322 Register arg_2, 2323 Register arg_3, 2324 bool check_exceptions) { 2325 LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg")); 2326 LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg")); 2327 pass_arg3(this, arg_3); 2328 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg")); 2329 pass_arg2(this, arg_2); 2330 pass_arg1(this, arg_1); 2331 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions); 2332 } 2333 2334 void MacroAssembler::super_call_VM(Register oop_result, 2335 Register last_java_sp, 2336 address entry_point, 2337 int number_of_arguments, 2338 bool check_exceptions) { 2339 Register thread = LP64_ONLY(r15_thread) NOT_LP64(noreg); 2340 MacroAssembler::call_VM_base(oop_result, thread, last_java_sp, entry_point, number_of_arguments, check_exceptions); 2341 } 2342 2343 void MacroAssembler::super_call_VM(Register oop_result, 2344 Register last_java_sp, 2345 address entry_point, 2346 Register arg_1, 2347 bool check_exceptions) { 2348 pass_arg1(this, arg_1); 2349 super_call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions); 2350 } 2351 2352 void MacroAssembler::super_call_VM(Register oop_result, 2353 Register last_java_sp, 2354 address entry_point, 2355 Register arg_1, 2356 Register arg_2, 2357 bool check_exceptions) { 2358 2359 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg")); 2360 pass_arg2(this, arg_2); 2361 pass_arg1(this, arg_1); 2362 super_call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions); 2363 } 2364 2365 void MacroAssembler::super_call_VM(Register oop_result, 2366 Register last_java_sp, 2367 address entry_point, 2368 Register arg_1, 2369 Register arg_2, 2370 Register arg_3, 2371 bool check_exceptions) { 2372 LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg")); 2373 LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg")); 2374 pass_arg3(this, arg_3); 2375 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg")); 2376 pass_arg2(this, arg_2); 2377 pass_arg1(this, arg_1); 2378 super_call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions); 2379 } 2380 2381 void MacroAssembler::call_VM_base(Register oop_result, 2382 Register java_thread, 2383 Register last_java_sp, 2384 address entry_point, 2385 int number_of_arguments, 2386 bool check_exceptions) { 2387 // determine java_thread register 2388 if (!java_thread->is_valid()) { 2389 #ifdef _LP64 2390 java_thread = r15_thread; 2391 #else 2392 java_thread = rdi; 2393 get_thread(java_thread); 2394 #endif // LP64 2395 } 2396 // determine last_java_sp register 2397 if (!last_java_sp->is_valid()) { 2398 last_java_sp = rsp; 2399 } 2400 // debugging support 2401 assert(number_of_arguments >= 0 , "cannot have negative number of arguments"); 2402 LP64_ONLY(assert(java_thread == r15_thread, "unexpected register")); 2403 #ifdef ASSERT 2404 // TraceBytecodes does not use r12 but saves it over the call, so don't verify 2405 // r12 is the heapbase. 2406 LP64_ONLY(if ((UseCompressedOops || UseCompressedClassPointers) && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?");) 2407 #endif // ASSERT 2408 2409 assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result"); 2410 assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp"); 2411 2412 // push java thread (becomes first argument of C function) 2413 2414 NOT_LP64(push(java_thread); number_of_arguments++); 2415 LP64_ONLY(mov(c_rarg0, r15_thread)); 2416 2417 // set last Java frame before call 2418 assert(last_java_sp != rbp, "can't use ebp/rbp"); 2419 2420 // Only interpreter should have to set fp 2421 set_last_Java_frame(java_thread, last_java_sp, rbp, NULL); 2422 2423 // do the call, remove parameters 2424 MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments); 2425 2426 // restore the thread (cannot use the pushed argument since arguments 2427 // may be overwritten by C code generated by an optimizing compiler); 2428 // however can use the register value directly if it is callee saved. 2429 if (LP64_ONLY(true ||) java_thread == rdi || java_thread == rsi) { 2430 // rdi & rsi (also r15) are callee saved -> nothing to do 2431 #ifdef ASSERT 2432 guarantee(java_thread != rax, "change this code"); 2433 push(rax); 2434 { Label L; 2435 get_thread(rax); 2436 cmpptr(java_thread, rax); 2437 jcc(Assembler::equal, L); 2438 STOP("MacroAssembler::call_VM_base: rdi not callee saved?"); 2439 bind(L); 2440 } 2441 pop(rax); 2442 #endif 2443 } else { 2444 get_thread(java_thread); 2445 } 2446 // reset last Java frame 2447 // Only interpreter should have to clear fp 2448 reset_last_Java_frame(java_thread, true); 2449 2450 // C++ interp handles this in the interpreter 2451 check_and_handle_popframe(java_thread); 2452 check_and_handle_earlyret(java_thread); 2453 2454 if (check_exceptions) { 2455 // check for pending exceptions (java_thread is set upon return) 2456 cmpptr(Address(java_thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD); 2457 #ifndef _LP64 2458 jump_cc(Assembler::notEqual, 2459 RuntimeAddress(StubRoutines::forward_exception_entry())); 2460 #else 2461 // This used to conditionally jump to forward_exception however it is 2462 // possible if we relocate that the branch will not reach. So we must jump 2463 // around so we can always reach 2464 2465 Label ok; 2466 jcc(Assembler::equal, ok); 2467 jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 2468 bind(ok); 2469 #endif // LP64 2470 } 2471 2472 // get oop result if there is one and reset the value in the thread 2473 if (oop_result->is_valid()) { 2474 get_vm_result(oop_result, java_thread); 2475 } 2476 } 2477 2478 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) { 2479 2480 // Calculate the value for last_Java_sp 2481 // somewhat subtle. call_VM does an intermediate call 2482 // which places a return address on the stack just under the 2483 // stack pointer as the user finsihed with it. This allows 2484 // use to retrieve last_Java_pc from last_Java_sp[-1]. 2485 // On 32bit we then have to push additional args on the stack to accomplish 2486 // the actual requested call. On 64bit call_VM only can use register args 2487 // so the only extra space is the return address that call_VM created. 2488 // This hopefully explains the calculations here. 2489 2490 #ifdef _LP64 2491 // We've pushed one address, correct last_Java_sp 2492 lea(rax, Address(rsp, wordSize)); 2493 #else 2494 lea(rax, Address(rsp, (1 + number_of_arguments) * wordSize)); 2495 #endif // LP64 2496 2497 call_VM_base(oop_result, noreg, rax, entry_point, number_of_arguments, check_exceptions); 2498 2499 } 2500 2501 // Use this method when MacroAssembler version of call_VM_leaf_base() should be called from Interpreter. 2502 void MacroAssembler::call_VM_leaf0(address entry_point) { 2503 MacroAssembler::call_VM_leaf_base(entry_point, 0); 2504 } 2505 2506 void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) { 2507 call_VM_leaf_base(entry_point, number_of_arguments); 2508 } 2509 2510 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) { 2511 pass_arg0(this, arg_0); 2512 call_VM_leaf(entry_point, 1); 2513 } 2514 2515 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) { 2516 2517 LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg")); 2518 pass_arg1(this, arg_1); 2519 pass_arg0(this, arg_0); 2520 call_VM_leaf(entry_point, 2); 2521 } 2522 2523 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) { 2524 LP64_ONLY(assert(arg_0 != c_rarg2, "smashed arg")); 2525 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg")); 2526 pass_arg2(this, arg_2); 2527 LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg")); 2528 pass_arg1(this, arg_1); 2529 pass_arg0(this, arg_0); 2530 call_VM_leaf(entry_point, 3); 2531 } 2532 2533 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) { 2534 pass_arg0(this, arg_0); 2535 MacroAssembler::call_VM_leaf_base(entry_point, 1); 2536 } 2537 2538 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) { 2539 2540 LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg")); 2541 pass_arg1(this, arg_1); 2542 pass_arg0(this, arg_0); 2543 MacroAssembler::call_VM_leaf_base(entry_point, 2); 2544 } 2545 2546 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) { 2547 LP64_ONLY(assert(arg_0 != c_rarg2, "smashed arg")); 2548 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg")); 2549 pass_arg2(this, arg_2); 2550 LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg")); 2551 pass_arg1(this, arg_1); 2552 pass_arg0(this, arg_0); 2553 MacroAssembler::call_VM_leaf_base(entry_point, 3); 2554 } 2555 2556 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) { 2557 LP64_ONLY(assert(arg_0 != c_rarg3, "smashed arg")); 2558 LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg")); 2559 LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg")); 2560 pass_arg3(this, arg_3); 2561 LP64_ONLY(assert(arg_0 != c_rarg2, "smashed arg")); 2562 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg")); 2563 pass_arg2(this, arg_2); 2564 LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg")); 2565 pass_arg1(this, arg_1); 2566 pass_arg0(this, arg_0); 2567 MacroAssembler::call_VM_leaf_base(entry_point, 4); 2568 } 2569 2570 void MacroAssembler::get_vm_result(Register oop_result, Register java_thread) { 2571 movptr(oop_result, Address(java_thread, JavaThread::vm_result_offset())); 2572 movptr(Address(java_thread, JavaThread::vm_result_offset()), NULL_WORD); 2573 verify_oop(oop_result, "broken oop in call_VM_base"); 2574 } 2575 2576 void MacroAssembler::get_vm_result_2(Register metadata_result, Register java_thread) { 2577 movptr(metadata_result, Address(java_thread, JavaThread::vm_result_2_offset())); 2578 movptr(Address(java_thread, JavaThread::vm_result_2_offset()), NULL_WORD); 2579 } 2580 2581 void MacroAssembler::check_and_handle_earlyret(Register java_thread) { 2582 } 2583 2584 void MacroAssembler::check_and_handle_popframe(Register java_thread) { 2585 } 2586 2587 void MacroAssembler::cmp32(AddressLiteral src1, int32_t imm) { 2588 if (reachable(src1)) { 2589 cmpl(as_Address(src1), imm); 2590 } else { 2591 lea(rscratch1, src1); 2592 cmpl(Address(rscratch1, 0), imm); 2593 } 2594 } 2595 2596 void MacroAssembler::cmp32(Register src1, AddressLiteral src2) { 2597 assert(!src2.is_lval(), "use cmpptr"); 2598 if (reachable(src2)) { 2599 cmpl(src1, as_Address(src2)); 2600 } else { 2601 lea(rscratch1, src2); 2602 cmpl(src1, Address(rscratch1, 0)); 2603 } 2604 } 2605 2606 void MacroAssembler::cmp32(Register src1, int32_t imm) { 2607 Assembler::cmpl(src1, imm); 2608 } 2609 2610 void MacroAssembler::cmp32(Register src1, Address src2) { 2611 Assembler::cmpl(src1, src2); 2612 } 2613 2614 void MacroAssembler::cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) { 2615 ucomisd(opr1, opr2); 2616 2617 Label L; 2618 if (unordered_is_less) { 2619 movl(dst, -1); 2620 jcc(Assembler::parity, L); 2621 jcc(Assembler::below , L); 2622 movl(dst, 0); 2623 jcc(Assembler::equal , L); 2624 increment(dst); 2625 } else { // unordered is greater 2626 movl(dst, 1); 2627 jcc(Assembler::parity, L); 2628 jcc(Assembler::above , L); 2629 movl(dst, 0); 2630 jcc(Assembler::equal , L); 2631 decrementl(dst); 2632 } 2633 bind(L); 2634 } 2635 2636 void MacroAssembler::cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) { 2637 ucomiss(opr1, opr2); 2638 2639 Label L; 2640 if (unordered_is_less) { 2641 movl(dst, -1); 2642 jcc(Assembler::parity, L); 2643 jcc(Assembler::below , L); 2644 movl(dst, 0); 2645 jcc(Assembler::equal , L); 2646 increment(dst); 2647 } else { // unordered is greater 2648 movl(dst, 1); 2649 jcc(Assembler::parity, L); 2650 jcc(Assembler::above , L); 2651 movl(dst, 0); 2652 jcc(Assembler::equal , L); 2653 decrementl(dst); 2654 } 2655 bind(L); 2656 } 2657 2658 2659 void MacroAssembler::cmp8(AddressLiteral src1, int imm) { 2660 if (reachable(src1)) { 2661 cmpb(as_Address(src1), imm); 2662 } else { 2663 lea(rscratch1, src1); 2664 cmpb(Address(rscratch1, 0), imm); 2665 } 2666 } 2667 2668 void MacroAssembler::cmpptr(Register src1, AddressLiteral src2) { 2669 #ifdef _LP64 2670 if (src2.is_lval()) { 2671 movptr(rscratch1, src2); 2672 Assembler::cmpq(src1, rscratch1); 2673 } else if (reachable(src2)) { 2674 cmpq(src1, as_Address(src2)); 2675 } else { 2676 lea(rscratch1, src2); 2677 Assembler::cmpq(src1, Address(rscratch1, 0)); 2678 } 2679 #else 2680 if (src2.is_lval()) { 2681 cmp_literal32(src1, (int32_t) src2.target(), src2.rspec()); 2682 } else { 2683 cmpl(src1, as_Address(src2)); 2684 } 2685 #endif // _LP64 2686 } 2687 2688 void MacroAssembler::cmpptr(Address src1, AddressLiteral src2) { 2689 assert(src2.is_lval(), "not a mem-mem compare"); 2690 #ifdef _LP64 2691 // moves src2's literal address 2692 movptr(rscratch1, src2); 2693 Assembler::cmpq(src1, rscratch1); 2694 #else 2695 cmp_literal32(src1, (int32_t) src2.target(), src2.rspec()); 2696 #endif // _LP64 2697 } 2698 2699 void MacroAssembler::cmpoop(Register src1, Register src2) { 2700 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 2701 bs->obj_equals(this, src1, src2); 2702 } 2703 2704 void MacroAssembler::cmpoop(Register src1, Address src2) { 2705 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 2706 bs->obj_equals(this, src1, src2); 2707 } 2708 2709 #ifdef _LP64 2710 void MacroAssembler::cmpoop(Register src1, jobject src2) { 2711 movoop(rscratch1, src2); 2712 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 2713 bs->obj_equals(this, src1, rscratch1); 2714 } 2715 #endif 2716 2717 void MacroAssembler::locked_cmpxchgptr(Register reg, AddressLiteral adr) { 2718 if (reachable(adr)) { 2719 lock(); 2720 cmpxchgptr(reg, as_Address(adr)); 2721 } else { 2722 lea(rscratch1, adr); 2723 lock(); 2724 cmpxchgptr(reg, Address(rscratch1, 0)); 2725 } 2726 } 2727 2728 void MacroAssembler::cmpxchgptr(Register reg, Address adr) { 2729 LP64_ONLY(cmpxchgq(reg, adr)) NOT_LP64(cmpxchgl(reg, adr)); 2730 } 2731 2732 void MacroAssembler::comisd(XMMRegister dst, AddressLiteral src) { 2733 if (reachable(src)) { 2734 Assembler::comisd(dst, as_Address(src)); 2735 } else { 2736 lea(rscratch1, src); 2737 Assembler::comisd(dst, Address(rscratch1, 0)); 2738 } 2739 } 2740 2741 void MacroAssembler::comiss(XMMRegister dst, AddressLiteral src) { 2742 if (reachable(src)) { 2743 Assembler::comiss(dst, as_Address(src)); 2744 } else { 2745 lea(rscratch1, src); 2746 Assembler::comiss(dst, Address(rscratch1, 0)); 2747 } 2748 } 2749 2750 2751 void MacroAssembler::cond_inc32(Condition cond, AddressLiteral counter_addr) { 2752 Condition negated_cond = negate_condition(cond); 2753 Label L; 2754 jcc(negated_cond, L); 2755 pushf(); // Preserve flags 2756 atomic_incl(counter_addr); 2757 popf(); 2758 bind(L); 2759 } 2760 2761 int MacroAssembler::corrected_idivl(Register reg) { 2762 // Full implementation of Java idiv and irem; checks for 2763 // special case as described in JVM spec., p.243 & p.271. 2764 // The function returns the (pc) offset of the idivl 2765 // instruction - may be needed for implicit exceptions. 2766 // 2767 // normal case special case 2768 // 2769 // input : rax,: dividend min_int 2770 // reg: divisor (may not be rax,/rdx) -1 2771 // 2772 // output: rax,: quotient (= rax, idiv reg) min_int 2773 // rdx: remainder (= rax, irem reg) 0 2774 assert(reg != rax && reg != rdx, "reg cannot be rax, or rdx register"); 2775 const int min_int = 0x80000000; 2776 Label normal_case, special_case; 2777 2778 // check for special case 2779 cmpl(rax, min_int); 2780 jcc(Assembler::notEqual, normal_case); 2781 xorl(rdx, rdx); // prepare rdx for possible special case (where remainder = 0) 2782 cmpl(reg, -1); 2783 jcc(Assembler::equal, special_case); 2784 2785 // handle normal case 2786 bind(normal_case); 2787 cdql(); 2788 int idivl_offset = offset(); 2789 idivl(reg); 2790 2791 // normal and special case exit 2792 bind(special_case); 2793 2794 return idivl_offset; 2795 } 2796 2797 2798 2799 void MacroAssembler::decrementl(Register reg, int value) { 2800 if (value == min_jint) {subl(reg, value) ; return; } 2801 if (value < 0) { incrementl(reg, -value); return; } 2802 if (value == 0) { ; return; } 2803 if (value == 1 && UseIncDec) { decl(reg) ; return; } 2804 /* else */ { subl(reg, value) ; return; } 2805 } 2806 2807 void MacroAssembler::decrementl(Address dst, int value) { 2808 if (value == min_jint) {subl(dst, value) ; return; } 2809 if (value < 0) { incrementl(dst, -value); return; } 2810 if (value == 0) { ; return; } 2811 if (value == 1 && UseIncDec) { decl(dst) ; return; } 2812 /* else */ { subl(dst, value) ; return; } 2813 } 2814 2815 void MacroAssembler::division_with_shift (Register reg, int shift_value) { 2816 assert (shift_value > 0, "illegal shift value"); 2817 Label _is_positive; 2818 testl (reg, reg); 2819 jcc (Assembler::positive, _is_positive); 2820 int offset = (1 << shift_value) - 1 ; 2821 2822 if (offset == 1) { 2823 incrementl(reg); 2824 } else { 2825 addl(reg, offset); 2826 } 2827 2828 bind (_is_positive); 2829 sarl(reg, shift_value); 2830 } 2831 2832 void MacroAssembler::divsd(XMMRegister dst, AddressLiteral src) { 2833 if (reachable(src)) { 2834 Assembler::divsd(dst, as_Address(src)); 2835 } else { 2836 lea(rscratch1, src); 2837 Assembler::divsd(dst, Address(rscratch1, 0)); 2838 } 2839 } 2840 2841 void MacroAssembler::divss(XMMRegister dst, AddressLiteral src) { 2842 if (reachable(src)) { 2843 Assembler::divss(dst, as_Address(src)); 2844 } else { 2845 lea(rscratch1, src); 2846 Assembler::divss(dst, Address(rscratch1, 0)); 2847 } 2848 } 2849 2850 // !defined(COMPILER2) is because of stupid core builds 2851 #if !defined(_LP64) || defined(COMPILER1) || !defined(COMPILER2) || INCLUDE_JVMCI 2852 void MacroAssembler::empty_FPU_stack() { 2853 if (VM_Version::supports_mmx()) { 2854 emms(); 2855 } else { 2856 for (int i = 8; i-- > 0; ) ffree(i); 2857 } 2858 } 2859 #endif // !LP64 || C1 || !C2 || INCLUDE_JVMCI 2860 2861 2862 void MacroAssembler::enter() { 2863 push(rbp); 2864 mov(rbp, rsp); 2865 } 2866 2867 // A 5 byte nop that is safe for patching (see patch_verified_entry) 2868 void MacroAssembler::fat_nop() { 2869 if (UseAddressNop) { 2870 addr_nop_5(); 2871 } else { 2872 emit_int8(0x26); // es: 2873 emit_int8(0x2e); // cs: 2874 emit_int8(0x64); // fs: 2875 emit_int8(0x65); // gs: 2876 emit_int8((unsigned char)0x90); 2877 } 2878 } 2879 2880 void MacroAssembler::fcmp(Register tmp) { 2881 fcmp(tmp, 1, true, true); 2882 } 2883 2884 void MacroAssembler::fcmp(Register tmp, int index, bool pop_left, bool pop_right) { 2885 assert(!pop_right || pop_left, "usage error"); 2886 if (VM_Version::supports_cmov()) { 2887 assert(tmp == noreg, "unneeded temp"); 2888 if (pop_left) { 2889 fucomip(index); 2890 } else { 2891 fucomi(index); 2892 } 2893 if (pop_right) { 2894 fpop(); 2895 } 2896 } else { 2897 assert(tmp != noreg, "need temp"); 2898 if (pop_left) { 2899 if (pop_right) { 2900 fcompp(); 2901 } else { 2902 fcomp(index); 2903 } 2904 } else { 2905 fcom(index); 2906 } 2907 // convert FPU condition into eflags condition via rax, 2908 save_rax(tmp); 2909 fwait(); fnstsw_ax(); 2910 sahf(); 2911 restore_rax(tmp); 2912 } 2913 // condition codes set as follows: 2914 // 2915 // CF (corresponds to C0) if x < y 2916 // PF (corresponds to C2) if unordered 2917 // ZF (corresponds to C3) if x = y 2918 } 2919 2920 void MacroAssembler::fcmp2int(Register dst, bool unordered_is_less) { 2921 fcmp2int(dst, unordered_is_less, 1, true, true); 2922 } 2923 2924 void MacroAssembler::fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right) { 2925 fcmp(VM_Version::supports_cmov() ? noreg : dst, index, pop_left, pop_right); 2926 Label L; 2927 if (unordered_is_less) { 2928 movl(dst, -1); 2929 jcc(Assembler::parity, L); 2930 jcc(Assembler::below , L); 2931 movl(dst, 0); 2932 jcc(Assembler::equal , L); 2933 increment(dst); 2934 } else { // unordered is greater 2935 movl(dst, 1); 2936 jcc(Assembler::parity, L); 2937 jcc(Assembler::above , L); 2938 movl(dst, 0); 2939 jcc(Assembler::equal , L); 2940 decrementl(dst); 2941 } 2942 bind(L); 2943 } 2944 2945 void MacroAssembler::fld_d(AddressLiteral src) { 2946 fld_d(as_Address(src)); 2947 } 2948 2949 void MacroAssembler::fld_s(AddressLiteral src) { 2950 fld_s(as_Address(src)); 2951 } 2952 2953 void MacroAssembler::fld_x(AddressLiteral src) { 2954 Assembler::fld_x(as_Address(src)); 2955 } 2956 2957 void MacroAssembler::fldcw(AddressLiteral src) { 2958 Assembler::fldcw(as_Address(src)); 2959 } 2960 2961 void MacroAssembler::mulpd(XMMRegister dst, AddressLiteral src) { 2962 if (reachable(src)) { 2963 Assembler::mulpd(dst, as_Address(src)); 2964 } else { 2965 lea(rscratch1, src); 2966 Assembler::mulpd(dst, Address(rscratch1, 0)); 2967 } 2968 } 2969 2970 void MacroAssembler::increase_precision() { 2971 subptr(rsp, BytesPerWord); 2972 fnstcw(Address(rsp, 0)); 2973 movl(rax, Address(rsp, 0)); 2974 orl(rax, 0x300); 2975 push(rax); 2976 fldcw(Address(rsp, 0)); 2977 pop(rax); 2978 } 2979 2980 void MacroAssembler::restore_precision() { 2981 fldcw(Address(rsp, 0)); 2982 addptr(rsp, BytesPerWord); 2983 } 2984 2985 void MacroAssembler::fpop() { 2986 ffree(); 2987 fincstp(); 2988 } 2989 2990 void MacroAssembler::load_float(Address src) { 2991 if (UseSSE >= 1) { 2992 movflt(xmm0, src); 2993 } else { 2994 LP64_ONLY(ShouldNotReachHere()); 2995 NOT_LP64(fld_s(src)); 2996 } 2997 } 2998 2999 void MacroAssembler::store_float(Address dst) { 3000 if (UseSSE >= 1) { 3001 movflt(dst, xmm0); 3002 } else { 3003 LP64_ONLY(ShouldNotReachHere()); 3004 NOT_LP64(fstp_s(dst)); 3005 } 3006 } 3007 3008 void MacroAssembler::load_double(Address src) { 3009 if (UseSSE >= 2) { 3010 movdbl(xmm0, src); 3011 } else { 3012 LP64_ONLY(ShouldNotReachHere()); 3013 NOT_LP64(fld_d(src)); 3014 } 3015 } 3016 3017 void MacroAssembler::store_double(Address dst) { 3018 if (UseSSE >= 2) { 3019 movdbl(dst, xmm0); 3020 } else { 3021 LP64_ONLY(ShouldNotReachHere()); 3022 NOT_LP64(fstp_d(dst)); 3023 } 3024 } 3025 3026 void MacroAssembler::fremr(Register tmp) { 3027 save_rax(tmp); 3028 { Label L; 3029 bind(L); 3030 fprem(); 3031 fwait(); fnstsw_ax(); 3032 #ifdef _LP64 3033 testl(rax, 0x400); 3034 jcc(Assembler::notEqual, L); 3035 #else 3036 sahf(); 3037 jcc(Assembler::parity, L); 3038 #endif // _LP64 3039 } 3040 restore_rax(tmp); 3041 // Result is in ST0. 3042 // Note: fxch & fpop to get rid of ST1 3043 // (otherwise FPU stack could overflow eventually) 3044 fxch(1); 3045 fpop(); 3046 } 3047 3048 // dst = c = a * b + c 3049 void MacroAssembler::fmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c) { 3050 Assembler::vfmadd231sd(c, a, b); 3051 if (dst != c) { 3052 movdbl(dst, c); 3053 } 3054 } 3055 3056 // dst = c = a * b + c 3057 void MacroAssembler::fmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c) { 3058 Assembler::vfmadd231ss(c, a, b); 3059 if (dst != c) { 3060 movflt(dst, c); 3061 } 3062 } 3063 3064 // dst = c = a * b + c 3065 void MacroAssembler::vfmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len) { 3066 Assembler::vfmadd231pd(c, a, b, vector_len); 3067 if (dst != c) { 3068 vmovdqu(dst, c); 3069 } 3070 } 3071 3072 // dst = c = a * b + c 3073 void MacroAssembler::vfmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len) { 3074 Assembler::vfmadd231ps(c, a, b, vector_len); 3075 if (dst != c) { 3076 vmovdqu(dst, c); 3077 } 3078 } 3079 3080 // dst = c = a * b + c 3081 void MacroAssembler::vfmad(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len) { 3082 Assembler::vfmadd231pd(c, a, b, vector_len); 3083 if (dst != c) { 3084 vmovdqu(dst, c); 3085 } 3086 } 3087 3088 // dst = c = a * b + c 3089 void MacroAssembler::vfmaf(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len) { 3090 Assembler::vfmadd231ps(c, a, b, vector_len); 3091 if (dst != c) { 3092 vmovdqu(dst, c); 3093 } 3094 } 3095 3096 void MacroAssembler::incrementl(AddressLiteral dst) { 3097 if (reachable(dst)) { 3098 incrementl(as_Address(dst)); 3099 } else { 3100 lea(rscratch1, dst); 3101 incrementl(Address(rscratch1, 0)); 3102 } 3103 } 3104 3105 void MacroAssembler::incrementl(ArrayAddress dst) { 3106 incrementl(as_Address(dst)); 3107 } 3108 3109 void MacroAssembler::incrementl(Register reg, int value) { 3110 if (value == min_jint) {addl(reg, value) ; return; } 3111 if (value < 0) { decrementl(reg, -value); return; } 3112 if (value == 0) { ; return; } 3113 if (value == 1 && UseIncDec) { incl(reg) ; return; } 3114 /* else */ { addl(reg, value) ; return; } 3115 } 3116 3117 void MacroAssembler::incrementl(Address dst, int value) { 3118 if (value == min_jint) {addl(dst, value) ; return; } 3119 if (value < 0) { decrementl(dst, -value); return; } 3120 if (value == 0) { ; return; } 3121 if (value == 1 && UseIncDec) { incl(dst) ; return; } 3122 /* else */ { addl(dst, value) ; return; } 3123 } 3124 3125 void MacroAssembler::jump(AddressLiteral dst) { 3126 if (reachable(dst)) { 3127 jmp_literal(dst.target(), dst.rspec()); 3128 } else { 3129 lea(rscratch1, dst); 3130 jmp(rscratch1); 3131 } 3132 } 3133 3134 void MacroAssembler::jump_cc(Condition cc, AddressLiteral dst) { 3135 if (reachable(dst)) { 3136 InstructionMark im(this); 3137 relocate(dst.reloc()); 3138 const int short_size = 2; 3139 const int long_size = 6; 3140 int offs = (intptr_t)dst.target() - ((intptr_t)pc()); 3141 if (dst.reloc() == relocInfo::none && is8bit(offs - short_size)) { 3142 // 0111 tttn #8-bit disp 3143 emit_int8(0x70 | cc); 3144 emit_int8((offs - short_size) & 0xFF); 3145 } else { 3146 // 0000 1111 1000 tttn #32-bit disp 3147 emit_int8(0x0F); 3148 emit_int8((unsigned char)(0x80 | cc)); 3149 emit_int32(offs - long_size); 3150 } 3151 } else { 3152 #ifdef ASSERT 3153 warning("reversing conditional branch"); 3154 #endif /* ASSERT */ 3155 Label skip; 3156 jccb(reverse[cc], skip); 3157 lea(rscratch1, dst); 3158 Assembler::jmp(rscratch1); 3159 bind(skip); 3160 } 3161 } 3162 3163 void MacroAssembler::ldmxcsr(AddressLiteral src) { 3164 if (reachable(src)) { 3165 Assembler::ldmxcsr(as_Address(src)); 3166 } else { 3167 lea(rscratch1, src); 3168 Assembler::ldmxcsr(Address(rscratch1, 0)); 3169 } 3170 } 3171 3172 int MacroAssembler::load_signed_byte(Register dst, Address src) { 3173 int off; 3174 if (LP64_ONLY(true ||) VM_Version::is_P6()) { 3175 off = offset(); 3176 movsbl(dst, src); // movsxb 3177 } else { 3178 off = load_unsigned_byte(dst, src); 3179 shll(dst, 24); 3180 sarl(dst, 24); 3181 } 3182 return off; 3183 } 3184 3185 // Note: load_signed_short used to be called load_signed_word. 3186 // Although the 'w' in x86 opcodes refers to the term "word" in the assembler 3187 // manual, which means 16 bits, that usage is found nowhere in HotSpot code. 3188 // The term "word" in HotSpot means a 32- or 64-bit machine word. 3189 int MacroAssembler::load_signed_short(Register dst, Address src) { 3190 int off; 3191 if (LP64_ONLY(true ||) VM_Version::is_P6()) { 3192 // This is dubious to me since it seems safe to do a signed 16 => 64 bit 3193 // version but this is what 64bit has always done. This seems to imply 3194 // that users are only using 32bits worth. 3195 off = offset(); 3196 movswl(dst, src); // movsxw 3197 } else { 3198 off = load_unsigned_short(dst, src); 3199 shll(dst, 16); 3200 sarl(dst, 16); 3201 } 3202 return off; 3203 } 3204 3205 int MacroAssembler::load_unsigned_byte(Register dst, Address src) { 3206 // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16, 3207 // and "3.9 Partial Register Penalties", p. 22). 3208 int off; 3209 if (LP64_ONLY(true || ) VM_Version::is_P6() || src.uses(dst)) { 3210 off = offset(); 3211 movzbl(dst, src); // movzxb 3212 } else { 3213 xorl(dst, dst); 3214 off = offset(); 3215 movb(dst, src); 3216 } 3217 return off; 3218 } 3219 3220 // Note: load_unsigned_short used to be called load_unsigned_word. 3221 int MacroAssembler::load_unsigned_short(Register dst, Address src) { 3222 // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16, 3223 // and "3.9 Partial Register Penalties", p. 22). 3224 int off; 3225 if (LP64_ONLY(true ||) VM_Version::is_P6() || src.uses(dst)) { 3226 off = offset(); 3227 movzwl(dst, src); // movzxw 3228 } else { 3229 xorl(dst, dst); 3230 off = offset(); 3231 movw(dst, src); 3232 } 3233 return off; 3234 } 3235 3236 void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2) { 3237 switch (size_in_bytes) { 3238 #ifndef _LP64 3239 case 8: 3240 assert(dst2 != noreg, "second dest register required"); 3241 movl(dst, src); 3242 movl(dst2, src.plus_disp(BytesPerInt)); 3243 break; 3244 #else 3245 case 8: movq(dst, src); break; 3246 #endif 3247 case 4: movl(dst, src); break; 3248 case 2: is_signed ? load_signed_short(dst, src) : load_unsigned_short(dst, src); break; 3249 case 1: is_signed ? load_signed_byte( dst, src) : load_unsigned_byte( dst, src); break; 3250 default: ShouldNotReachHere(); 3251 } 3252 } 3253 3254 void MacroAssembler::store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2) { 3255 switch (size_in_bytes) { 3256 #ifndef _LP64 3257 case 8: 3258 assert(src2 != noreg, "second source register required"); 3259 movl(dst, src); 3260 movl(dst.plus_disp(BytesPerInt), src2); 3261 break; 3262 #else 3263 case 8: movq(dst, src); break; 3264 #endif 3265 case 4: movl(dst, src); break; 3266 case 2: movw(dst, src); break; 3267 case 1: movb(dst, src); break; 3268 default: ShouldNotReachHere(); 3269 } 3270 } 3271 3272 void MacroAssembler::mov32(AddressLiteral dst, Register src) { 3273 if (reachable(dst)) { 3274 movl(as_Address(dst), src); 3275 } else { 3276 lea(rscratch1, dst); 3277 movl(Address(rscratch1, 0), src); 3278 } 3279 } 3280 3281 void MacroAssembler::mov32(Register dst, AddressLiteral src) { 3282 if (reachable(src)) { 3283 movl(dst, as_Address(src)); 3284 } else { 3285 lea(rscratch1, src); 3286 movl(dst, Address(rscratch1, 0)); 3287 } 3288 } 3289 3290 // C++ bool manipulation 3291 3292 void MacroAssembler::movbool(Register dst, Address src) { 3293 if(sizeof(bool) == 1) 3294 movb(dst, src); 3295 else if(sizeof(bool) == 2) 3296 movw(dst, src); 3297 else if(sizeof(bool) == 4) 3298 movl(dst, src); 3299 else 3300 // unsupported 3301 ShouldNotReachHere(); 3302 } 3303 3304 void MacroAssembler::movbool(Address dst, bool boolconst) { 3305 if(sizeof(bool) == 1) 3306 movb(dst, (int) boolconst); 3307 else if(sizeof(bool) == 2) 3308 movw(dst, (int) boolconst); 3309 else if(sizeof(bool) == 4) 3310 movl(dst, (int) boolconst); 3311 else 3312 // unsupported 3313 ShouldNotReachHere(); 3314 } 3315 3316 void MacroAssembler::movbool(Address dst, Register src) { 3317 if(sizeof(bool) == 1) 3318 movb(dst, src); 3319 else if(sizeof(bool) == 2) 3320 movw(dst, src); 3321 else if(sizeof(bool) == 4) 3322 movl(dst, src); 3323 else 3324 // unsupported 3325 ShouldNotReachHere(); 3326 } 3327 3328 void MacroAssembler::movbyte(ArrayAddress dst, int src) { 3329 movb(as_Address(dst), src); 3330 } 3331 3332 void MacroAssembler::movdl(XMMRegister dst, AddressLiteral src) { 3333 if (reachable(src)) { 3334 movdl(dst, as_Address(src)); 3335 } else { 3336 lea(rscratch1, src); 3337 movdl(dst, Address(rscratch1, 0)); 3338 } 3339 } 3340 3341 void MacroAssembler::movq(XMMRegister dst, AddressLiteral src) { 3342 if (reachable(src)) { 3343 movq(dst, as_Address(src)); 3344 } else { 3345 lea(rscratch1, src); 3346 movq(dst, Address(rscratch1, 0)); 3347 } 3348 } 3349 3350 #ifdef COMPILER2 3351 void MacroAssembler::setvectmask(Register dst, Register src) { 3352 guarantee(PostLoopMultiversioning, "must be"); 3353 Assembler::movl(dst, 1); 3354 Assembler::shlxl(dst, dst, src); 3355 Assembler::decl(dst); 3356 Assembler::kmovdl(k1, dst); 3357 Assembler::movl(dst, src); 3358 } 3359 3360 void MacroAssembler::restorevectmask() { 3361 guarantee(PostLoopMultiversioning, "must be"); 3362 Assembler::knotwl(k1, k0); 3363 } 3364 #endif // COMPILER2 3365 3366 void MacroAssembler::movdbl(XMMRegister dst, AddressLiteral src) { 3367 if (reachable(src)) { 3368 if (UseXmmLoadAndClearUpper) { 3369 movsd (dst, as_Address(src)); 3370 } else { 3371 movlpd(dst, as_Address(src)); 3372 } 3373 } else { 3374 lea(rscratch1, src); 3375 if (UseXmmLoadAndClearUpper) { 3376 movsd (dst, Address(rscratch1, 0)); 3377 } else { 3378 movlpd(dst, Address(rscratch1, 0)); 3379 } 3380 } 3381 } 3382 3383 void MacroAssembler::movflt(XMMRegister dst, AddressLiteral src) { 3384 if (reachable(src)) { 3385 movss(dst, as_Address(src)); 3386 } else { 3387 lea(rscratch1, src); 3388 movss(dst, Address(rscratch1, 0)); 3389 } 3390 } 3391 3392 void MacroAssembler::movptr(Register dst, Register src) { 3393 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src)); 3394 } 3395 3396 void MacroAssembler::movptr(Register dst, Address src) { 3397 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src)); 3398 } 3399 3400 // src should NEVER be a real pointer. Use AddressLiteral for true pointers 3401 void MacroAssembler::movptr(Register dst, intptr_t src) { 3402 LP64_ONLY(mov64(dst, src)) NOT_LP64(movl(dst, src)); 3403 } 3404 3405 void MacroAssembler::movptr(Address dst, Register src) { 3406 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src)); 3407 } 3408 3409 void MacroAssembler::movdqu(Address dst, XMMRegister src) { 3410 assert(((src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 3411 Assembler::movdqu(dst, src); 3412 } 3413 3414 void MacroAssembler::movdqu(XMMRegister dst, Address src) { 3415 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 3416 Assembler::movdqu(dst, src); 3417 } 3418 3419 void MacroAssembler::movdqu(XMMRegister dst, XMMRegister src) { 3420 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 3421 Assembler::movdqu(dst, src); 3422 } 3423 3424 void MacroAssembler::movdqu(XMMRegister dst, AddressLiteral src, Register scratchReg) { 3425 if (reachable(src)) { 3426 movdqu(dst, as_Address(src)); 3427 } else { 3428 lea(scratchReg, src); 3429 movdqu(dst, Address(scratchReg, 0)); 3430 } 3431 } 3432 3433 void MacroAssembler::vmovdqu(Address dst, XMMRegister src) { 3434 assert(((src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 3435 Assembler::vmovdqu(dst, src); 3436 } 3437 3438 void MacroAssembler::vmovdqu(XMMRegister dst, Address src) { 3439 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 3440 Assembler::vmovdqu(dst, src); 3441 } 3442 3443 void MacroAssembler::vmovdqu(XMMRegister dst, XMMRegister src) { 3444 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 3445 Assembler::vmovdqu(dst, src); 3446 } 3447 3448 void MacroAssembler::vmovdqu(XMMRegister dst, AddressLiteral src, Register scratch_reg) { 3449 if (reachable(src)) { 3450 vmovdqu(dst, as_Address(src)); 3451 } 3452 else { 3453 lea(scratch_reg, src); 3454 vmovdqu(dst, Address(scratch_reg, 0)); 3455 } 3456 } 3457 3458 void MacroAssembler::evmovdquq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 3459 if (reachable(src)) { 3460 Assembler::evmovdquq(dst, as_Address(src), vector_len); 3461 } else { 3462 lea(rscratch, src); 3463 Assembler::evmovdquq(dst, Address(rscratch, 0), vector_len); 3464 } 3465 } 3466 3467 void MacroAssembler::movdqa(XMMRegister dst, AddressLiteral src) { 3468 if (reachable(src)) { 3469 Assembler::movdqa(dst, as_Address(src)); 3470 } else { 3471 lea(rscratch1, src); 3472 Assembler::movdqa(dst, Address(rscratch1, 0)); 3473 } 3474 } 3475 3476 void MacroAssembler::movsd(XMMRegister dst, AddressLiteral src) { 3477 if (reachable(src)) { 3478 Assembler::movsd(dst, as_Address(src)); 3479 } else { 3480 lea(rscratch1, src); 3481 Assembler::movsd(dst, Address(rscratch1, 0)); 3482 } 3483 } 3484 3485 void MacroAssembler::movss(XMMRegister dst, AddressLiteral src) { 3486 if (reachable(src)) { 3487 Assembler::movss(dst, as_Address(src)); 3488 } else { 3489 lea(rscratch1, src); 3490 Assembler::movss(dst, Address(rscratch1, 0)); 3491 } 3492 } 3493 3494 void MacroAssembler::mulsd(XMMRegister dst, AddressLiteral src) { 3495 if (reachable(src)) { 3496 Assembler::mulsd(dst, as_Address(src)); 3497 } else { 3498 lea(rscratch1, src); 3499 Assembler::mulsd(dst, Address(rscratch1, 0)); 3500 } 3501 } 3502 3503 void MacroAssembler::mulss(XMMRegister dst, AddressLiteral src) { 3504 if (reachable(src)) { 3505 Assembler::mulss(dst, as_Address(src)); 3506 } else { 3507 lea(rscratch1, src); 3508 Assembler::mulss(dst, Address(rscratch1, 0)); 3509 } 3510 } 3511 3512 void MacroAssembler::null_check(Register reg, int offset) { 3513 if (needs_explicit_null_check(offset)) { 3514 // provoke OS NULL exception if reg = NULL by 3515 // accessing M[reg] w/o changing any (non-CC) registers 3516 // NOTE: cmpl is plenty here to provoke a segv 3517 cmpptr(rax, Address(reg, 0)); 3518 // Note: should probably use testl(rax, Address(reg, 0)); 3519 // may be shorter code (however, this version of 3520 // testl needs to be implemented first) 3521 } else { 3522 // nothing to do, (later) access of M[reg + offset] 3523 // will provoke OS NULL exception if reg = NULL 3524 } 3525 } 3526 3527 void MacroAssembler::os_breakpoint() { 3528 // instead of directly emitting a breakpoint, call os:breakpoint for better debugability 3529 // (e.g., MSVC can't call ps() otherwise) 3530 call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint))); 3531 } 3532 3533 void MacroAssembler::unimplemented(const char* what) { 3534 const char* buf = NULL; 3535 { 3536 ResourceMark rm; 3537 stringStream ss; 3538 ss.print("unimplemented: %s", what); 3539 buf = code_string(ss.as_string()); 3540 } 3541 stop(buf); 3542 } 3543 3544 #ifdef _LP64 3545 #define XSTATE_BV 0x200 3546 #endif 3547 3548 void MacroAssembler::pop_CPU_state() { 3549 pop_FPU_state(); 3550 pop_IU_state(); 3551 } 3552 3553 void MacroAssembler::pop_FPU_state() { 3554 #ifndef _LP64 3555 frstor(Address(rsp, 0)); 3556 #else 3557 fxrstor(Address(rsp, 0)); 3558 #endif 3559 addptr(rsp, FPUStateSizeInWords * wordSize); 3560 } 3561 3562 void MacroAssembler::pop_IU_state() { 3563 popa(); 3564 LP64_ONLY(addq(rsp, 8)); 3565 popf(); 3566 } 3567 3568 // Save Integer and Float state 3569 // Warning: Stack must be 16 byte aligned (64bit) 3570 void MacroAssembler::push_CPU_state() { 3571 push_IU_state(); 3572 push_FPU_state(); 3573 } 3574 3575 void MacroAssembler::push_FPU_state() { 3576 subptr(rsp, FPUStateSizeInWords * wordSize); 3577 #ifndef _LP64 3578 fnsave(Address(rsp, 0)); 3579 fwait(); 3580 #else 3581 fxsave(Address(rsp, 0)); 3582 #endif // LP64 3583 } 3584 3585 void MacroAssembler::push_IU_state() { 3586 // Push flags first because pusha kills them 3587 pushf(); 3588 // Make sure rsp stays 16-byte aligned 3589 LP64_ONLY(subq(rsp, 8)); 3590 pusha(); 3591 } 3592 3593 void MacroAssembler::reset_last_Java_frame(Register java_thread, bool clear_fp) { // determine java_thread register 3594 if (!java_thread->is_valid()) { 3595 java_thread = rdi; 3596 get_thread(java_thread); 3597 } 3598 // we must set sp to zero to clear frame 3599 movptr(Address(java_thread, JavaThread::last_Java_sp_offset()), NULL_WORD); 3600 if (clear_fp) { 3601 movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), NULL_WORD); 3602 } 3603 3604 // Always clear the pc because it could have been set by make_walkable() 3605 movptr(Address(java_thread, JavaThread::last_Java_pc_offset()), NULL_WORD); 3606 3607 vzeroupper(); 3608 } 3609 3610 void MacroAssembler::restore_rax(Register tmp) { 3611 if (tmp == noreg) pop(rax); 3612 else if (tmp != rax) mov(rax, tmp); 3613 } 3614 3615 void MacroAssembler::round_to(Register reg, int modulus) { 3616 addptr(reg, modulus - 1); 3617 andptr(reg, -modulus); 3618 } 3619 3620 void MacroAssembler::save_rax(Register tmp) { 3621 if (tmp == noreg) push(rax); 3622 else if (tmp != rax) mov(tmp, rax); 3623 } 3624 3625 void MacroAssembler::safepoint_poll(Label& slow_path, Register thread_reg, Register temp_reg) { 3626 if (SafepointMechanism::uses_thread_local_poll()) { 3627 #ifdef _LP64 3628 assert(thread_reg == r15_thread, "should be"); 3629 #else 3630 if (thread_reg == noreg) { 3631 thread_reg = temp_reg; 3632 get_thread(thread_reg); 3633 } 3634 #endif 3635 testb(Address(thread_reg, Thread::polling_page_offset()), SafepointMechanism::poll_bit()); 3636 jcc(Assembler::notZero, slow_path); // handshake bit set implies poll 3637 } else { 3638 cmp32(ExternalAddress(SafepointSynchronize::address_of_state()), 3639 SafepointSynchronize::_not_synchronized); 3640 jcc(Assembler::notEqual, slow_path); 3641 } 3642 } 3643 3644 // Calls to C land 3645 // 3646 // When entering C land, the rbp, & rsp of the last Java frame have to be recorded 3647 // in the (thread-local) JavaThread object. When leaving C land, the last Java fp 3648 // has to be reset to 0. This is required to allow proper stack traversal. 3649 void MacroAssembler::set_last_Java_frame(Register java_thread, 3650 Register last_java_sp, 3651 Register last_java_fp, 3652 address last_java_pc) { 3653 vzeroupper(); 3654 // determine java_thread register 3655 if (!java_thread->is_valid()) { 3656 java_thread = rdi; 3657 get_thread(java_thread); 3658 } 3659 // determine last_java_sp register 3660 if (!last_java_sp->is_valid()) { 3661 last_java_sp = rsp; 3662 } 3663 3664 // last_java_fp is optional 3665 3666 if (last_java_fp->is_valid()) { 3667 movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), last_java_fp); 3668 } 3669 3670 // last_java_pc is optional 3671 3672 if (last_java_pc != NULL) { 3673 lea(Address(java_thread, 3674 JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()), 3675 InternalAddress(last_java_pc)); 3676 3677 } 3678 movptr(Address(java_thread, JavaThread::last_Java_sp_offset()), last_java_sp); 3679 } 3680 3681 void MacroAssembler::shlptr(Register dst, int imm8) { 3682 LP64_ONLY(shlq(dst, imm8)) NOT_LP64(shll(dst, imm8)); 3683 } 3684 3685 void MacroAssembler::shrptr(Register dst, int imm8) { 3686 LP64_ONLY(shrq(dst, imm8)) NOT_LP64(shrl(dst, imm8)); 3687 } 3688 3689 void MacroAssembler::sign_extend_byte(Register reg) { 3690 if (LP64_ONLY(true ||) (VM_Version::is_P6() && reg->has_byte_register())) { 3691 movsbl(reg, reg); // movsxb 3692 } else { 3693 shll(reg, 24); 3694 sarl(reg, 24); 3695 } 3696 } 3697 3698 void MacroAssembler::sign_extend_short(Register reg) { 3699 if (LP64_ONLY(true ||) VM_Version::is_P6()) { 3700 movswl(reg, reg); // movsxw 3701 } else { 3702 shll(reg, 16); 3703 sarl(reg, 16); 3704 } 3705 } 3706 3707 void MacroAssembler::testl(Register dst, AddressLiteral src) { 3708 assert(reachable(src), "Address should be reachable"); 3709 testl(dst, as_Address(src)); 3710 } 3711 3712 void MacroAssembler::pcmpeqb(XMMRegister dst, XMMRegister src) { 3713 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3714 Assembler::pcmpeqb(dst, src); 3715 } 3716 3717 void MacroAssembler::pcmpeqw(XMMRegister dst, XMMRegister src) { 3718 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3719 Assembler::pcmpeqw(dst, src); 3720 } 3721 3722 void MacroAssembler::pcmpestri(XMMRegister dst, Address src, int imm8) { 3723 assert((dst->encoding() < 16),"XMM register should be 0-15"); 3724 Assembler::pcmpestri(dst, src, imm8); 3725 } 3726 3727 void MacroAssembler::pcmpestri(XMMRegister dst, XMMRegister src, int imm8) { 3728 assert((dst->encoding() < 16 && src->encoding() < 16),"XMM register should be 0-15"); 3729 Assembler::pcmpestri(dst, src, imm8); 3730 } 3731 3732 void MacroAssembler::pmovzxbw(XMMRegister dst, XMMRegister src) { 3733 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3734 Assembler::pmovzxbw(dst, src); 3735 } 3736 3737 void MacroAssembler::pmovzxbw(XMMRegister dst, Address src) { 3738 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3739 Assembler::pmovzxbw(dst, src); 3740 } 3741 3742 void MacroAssembler::pmovmskb(Register dst, XMMRegister src) { 3743 assert((src->encoding() < 16),"XMM register should be 0-15"); 3744 Assembler::pmovmskb(dst, src); 3745 } 3746 3747 void MacroAssembler::ptest(XMMRegister dst, XMMRegister src) { 3748 assert((dst->encoding() < 16 && src->encoding() < 16),"XMM register should be 0-15"); 3749 Assembler::ptest(dst, src); 3750 } 3751 3752 void MacroAssembler::sqrtsd(XMMRegister dst, AddressLiteral src) { 3753 if (reachable(src)) { 3754 Assembler::sqrtsd(dst, as_Address(src)); 3755 } else { 3756 lea(rscratch1, src); 3757 Assembler::sqrtsd(dst, Address(rscratch1, 0)); 3758 } 3759 } 3760 3761 void MacroAssembler::sqrtss(XMMRegister dst, AddressLiteral src) { 3762 if (reachable(src)) { 3763 Assembler::sqrtss(dst, as_Address(src)); 3764 } else { 3765 lea(rscratch1, src); 3766 Assembler::sqrtss(dst, Address(rscratch1, 0)); 3767 } 3768 } 3769 3770 void MacroAssembler::subsd(XMMRegister dst, AddressLiteral src) { 3771 if (reachable(src)) { 3772 Assembler::subsd(dst, as_Address(src)); 3773 } else { 3774 lea(rscratch1, src); 3775 Assembler::subsd(dst, Address(rscratch1, 0)); 3776 } 3777 } 3778 3779 void MacroAssembler::roundsd(XMMRegister dst, AddressLiteral src, int32_t rmode, Register scratch_reg) { 3780 if (reachable(src)) { 3781 Assembler::roundsd(dst, as_Address(src), rmode); 3782 } else { 3783 lea(scratch_reg, src); 3784 Assembler::roundsd(dst, Address(scratch_reg, 0), rmode); 3785 } 3786 } 3787 3788 void MacroAssembler::subss(XMMRegister dst, AddressLiteral src) { 3789 if (reachable(src)) { 3790 Assembler::subss(dst, as_Address(src)); 3791 } else { 3792 lea(rscratch1, src); 3793 Assembler::subss(dst, Address(rscratch1, 0)); 3794 } 3795 } 3796 3797 void MacroAssembler::ucomisd(XMMRegister dst, AddressLiteral src) { 3798 if (reachable(src)) { 3799 Assembler::ucomisd(dst, as_Address(src)); 3800 } else { 3801 lea(rscratch1, src); 3802 Assembler::ucomisd(dst, Address(rscratch1, 0)); 3803 } 3804 } 3805 3806 void MacroAssembler::ucomiss(XMMRegister dst, AddressLiteral src) { 3807 if (reachable(src)) { 3808 Assembler::ucomiss(dst, as_Address(src)); 3809 } else { 3810 lea(rscratch1, src); 3811 Assembler::ucomiss(dst, Address(rscratch1, 0)); 3812 } 3813 } 3814 3815 void MacroAssembler::xorpd(XMMRegister dst, AddressLiteral src, Register scratch_reg) { 3816 // Used in sign-bit flipping with aligned address. 3817 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes"); 3818 if (reachable(src)) { 3819 Assembler::xorpd(dst, as_Address(src)); 3820 } else { 3821 lea(scratch_reg, src); 3822 Assembler::xorpd(dst, Address(scratch_reg, 0)); 3823 } 3824 } 3825 3826 void MacroAssembler::xorpd(XMMRegister dst, XMMRegister src) { 3827 if (UseAVX > 2 && !VM_Version::supports_avx512dq() && (dst->encoding() == src->encoding())) { 3828 Assembler::vpxor(dst, dst, src, Assembler::AVX_512bit); 3829 } 3830 else { 3831 Assembler::xorpd(dst, src); 3832 } 3833 } 3834 3835 void MacroAssembler::xorps(XMMRegister dst, XMMRegister src) { 3836 if (UseAVX > 2 && !VM_Version::supports_avx512dq() && (dst->encoding() == src->encoding())) { 3837 Assembler::vpxor(dst, dst, src, Assembler::AVX_512bit); 3838 } else { 3839 Assembler::xorps(dst, src); 3840 } 3841 } 3842 3843 void MacroAssembler::xorps(XMMRegister dst, AddressLiteral src, Register scratch_reg) { 3844 // Used in sign-bit flipping with aligned address. 3845 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes"); 3846 if (reachable(src)) { 3847 Assembler::xorps(dst, as_Address(src)); 3848 } else { 3849 lea(scratch_reg, src); 3850 Assembler::xorps(dst, Address(scratch_reg, 0)); 3851 } 3852 } 3853 3854 void MacroAssembler::pshufb(XMMRegister dst, AddressLiteral src) { 3855 // Used in sign-bit flipping with aligned address. 3856 bool aligned_adr = (((intptr_t)src.target() & 15) == 0); 3857 assert((UseAVX > 0) || aligned_adr, "SSE mode requires address alignment 16 bytes"); 3858 if (reachable(src)) { 3859 Assembler::pshufb(dst, as_Address(src)); 3860 } else { 3861 lea(rscratch1, src); 3862 Assembler::pshufb(dst, Address(rscratch1, 0)); 3863 } 3864 } 3865 3866 // AVX 3-operands instructions 3867 3868 void MacroAssembler::vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) { 3869 if (reachable(src)) { 3870 vaddsd(dst, nds, as_Address(src)); 3871 } else { 3872 lea(rscratch1, src); 3873 vaddsd(dst, nds, Address(rscratch1, 0)); 3874 } 3875 } 3876 3877 void MacroAssembler::vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src) { 3878 if (reachable(src)) { 3879 vaddss(dst, nds, as_Address(src)); 3880 } else { 3881 lea(rscratch1, src); 3882 vaddss(dst, nds, Address(rscratch1, 0)); 3883 } 3884 } 3885 3886 void MacroAssembler::vpaddd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3887 assert(UseAVX > 0, "requires some form of AVX"); 3888 if (reachable(src)) { 3889 Assembler::vpaddd(dst, nds, as_Address(src), vector_len); 3890 } else { 3891 lea(rscratch, src); 3892 Assembler::vpaddd(dst, nds, Address(rscratch, 0), vector_len); 3893 } 3894 } 3895 3896 void MacroAssembler::vabsss(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len) { 3897 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15"); 3898 vandps(dst, nds, negate_field, vector_len); 3899 } 3900 3901 void MacroAssembler::vabssd(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len) { 3902 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15"); 3903 vandpd(dst, nds, negate_field, vector_len); 3904 } 3905 3906 void MacroAssembler::vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3907 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3908 Assembler::vpaddb(dst, nds, src, vector_len); 3909 } 3910 3911 void MacroAssembler::vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3912 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3913 Assembler::vpaddb(dst, nds, src, vector_len); 3914 } 3915 3916 void MacroAssembler::vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3917 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3918 Assembler::vpaddw(dst, nds, src, vector_len); 3919 } 3920 3921 void MacroAssembler::vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3922 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3923 Assembler::vpaddw(dst, nds, src, vector_len); 3924 } 3925 3926 void MacroAssembler::vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg) { 3927 if (reachable(src)) { 3928 Assembler::vpand(dst, nds, as_Address(src), vector_len); 3929 } else { 3930 lea(scratch_reg, src); 3931 Assembler::vpand(dst, nds, Address(scratch_reg, 0), vector_len); 3932 } 3933 } 3934 3935 void MacroAssembler::vpbroadcastw(XMMRegister dst, XMMRegister src, int vector_len) { 3936 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3937 Assembler::vpbroadcastw(dst, src, vector_len); 3938 } 3939 3940 void MacroAssembler::vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3941 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3942 Assembler::vpcmpeqb(dst, nds, src, vector_len); 3943 } 3944 3945 void MacroAssembler::vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3946 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3947 Assembler::vpcmpeqw(dst, nds, src, vector_len); 3948 } 3949 3950 void MacroAssembler::vpmovzxbw(XMMRegister dst, Address src, int vector_len) { 3951 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3952 Assembler::vpmovzxbw(dst, src, vector_len); 3953 } 3954 3955 void MacroAssembler::vpmovmskb(Register dst, XMMRegister src) { 3956 assert((src->encoding() < 16),"XMM register should be 0-15"); 3957 Assembler::vpmovmskb(dst, src); 3958 } 3959 3960 void MacroAssembler::vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3961 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3962 Assembler::vpmullw(dst, nds, src, vector_len); 3963 } 3964 3965 void MacroAssembler::vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3966 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3967 Assembler::vpmullw(dst, nds, src, vector_len); 3968 } 3969 3970 void MacroAssembler::vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3971 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3972 Assembler::vpsubb(dst, nds, src, vector_len); 3973 } 3974 3975 void MacroAssembler::vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3976 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3977 Assembler::vpsubb(dst, nds, src, vector_len); 3978 } 3979 3980 void MacroAssembler::vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3981 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3982 Assembler::vpsubw(dst, nds, src, vector_len); 3983 } 3984 3985 void MacroAssembler::vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3986 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3987 Assembler::vpsubw(dst, nds, src, vector_len); 3988 } 3989 3990 void MacroAssembler::vpsraw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) { 3991 assert(((dst->encoding() < 16 && shift->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3992 Assembler::vpsraw(dst, nds, shift, vector_len); 3993 } 3994 3995 void MacroAssembler::vpsraw(XMMRegister dst, XMMRegister nds, int shift, int vector_len) { 3996 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3997 Assembler::vpsraw(dst, nds, shift, vector_len); 3998 } 3999 4000 void MacroAssembler::evpsraq(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) { 4001 assert(UseAVX > 2,""); 4002 if (!VM_Version::supports_avx512vl() && vector_len < 2) { 4003 vector_len = 2; 4004 } 4005 Assembler::evpsraq(dst, nds, shift, vector_len); 4006 } 4007 4008 void MacroAssembler::evpsraq(XMMRegister dst, XMMRegister nds, int shift, int vector_len) { 4009 assert(UseAVX > 2,""); 4010 if (!VM_Version::supports_avx512vl() && vector_len < 2) { 4011 vector_len = 2; 4012 } 4013 Assembler::evpsraq(dst, nds, shift, vector_len); 4014 } 4015 4016 void MacroAssembler::vpsrlw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) { 4017 assert(((dst->encoding() < 16 && shift->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 4018 Assembler::vpsrlw(dst, nds, shift, vector_len); 4019 } 4020 4021 void MacroAssembler::vpsrlw(XMMRegister dst, XMMRegister nds, int shift, int vector_len) { 4022 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 4023 Assembler::vpsrlw(dst, nds, shift, vector_len); 4024 } 4025 4026 void MacroAssembler::vpsllw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) { 4027 assert(((dst->encoding() < 16 && shift->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 4028 Assembler::vpsllw(dst, nds, shift, vector_len); 4029 } 4030 4031 void MacroAssembler::vpsllw(XMMRegister dst, XMMRegister nds, int shift, int vector_len) { 4032 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 4033 Assembler::vpsllw(dst, nds, shift, vector_len); 4034 } 4035 4036 void MacroAssembler::vptest(XMMRegister dst, XMMRegister src) { 4037 assert((dst->encoding() < 16 && src->encoding() < 16),"XMM register should be 0-15"); 4038 Assembler::vptest(dst, src); 4039 } 4040 4041 void MacroAssembler::punpcklbw(XMMRegister dst, XMMRegister src) { 4042 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 4043 Assembler::punpcklbw(dst, src); 4044 } 4045 4046 void MacroAssembler::pshufd(XMMRegister dst, Address src, int mode) { 4047 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 4048 Assembler::pshufd(dst, src, mode); 4049 } 4050 4051 void MacroAssembler::pshuflw(XMMRegister dst, XMMRegister src, int mode) { 4052 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 4053 Assembler::pshuflw(dst, src, mode); 4054 } 4055 4056 void MacroAssembler::vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg) { 4057 if (reachable(src)) { 4058 vandpd(dst, nds, as_Address(src), vector_len); 4059 } else { 4060 lea(scratch_reg, src); 4061 vandpd(dst, nds, Address(scratch_reg, 0), vector_len); 4062 } 4063 } 4064 4065 void MacroAssembler::vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg) { 4066 if (reachable(src)) { 4067 vandps(dst, nds, as_Address(src), vector_len); 4068 } else { 4069 lea(scratch_reg, src); 4070 vandps(dst, nds, Address(scratch_reg, 0), vector_len); 4071 } 4072 } 4073 4074 void MacroAssembler::vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) { 4075 if (reachable(src)) { 4076 vdivsd(dst, nds, as_Address(src)); 4077 } else { 4078 lea(rscratch1, src); 4079 vdivsd(dst, nds, Address(rscratch1, 0)); 4080 } 4081 } 4082 4083 void MacroAssembler::vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src) { 4084 if (reachable(src)) { 4085 vdivss(dst, nds, as_Address(src)); 4086 } else { 4087 lea(rscratch1, src); 4088 vdivss(dst, nds, Address(rscratch1, 0)); 4089 } 4090 } 4091 4092 void MacroAssembler::vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) { 4093 if (reachable(src)) { 4094 vmulsd(dst, nds, as_Address(src)); 4095 } else { 4096 lea(rscratch1, src); 4097 vmulsd(dst, nds, Address(rscratch1, 0)); 4098 } 4099 } 4100 4101 void MacroAssembler::vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src) { 4102 if (reachable(src)) { 4103 vmulss(dst, nds, as_Address(src)); 4104 } else { 4105 lea(rscratch1, src); 4106 vmulss(dst, nds, Address(rscratch1, 0)); 4107 } 4108 } 4109 4110 void MacroAssembler::vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) { 4111 if (reachable(src)) { 4112 vsubsd(dst, nds, as_Address(src)); 4113 } else { 4114 lea(rscratch1, src); 4115 vsubsd(dst, nds, Address(rscratch1, 0)); 4116 } 4117 } 4118 4119 void MacroAssembler::vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src) { 4120 if (reachable(src)) { 4121 vsubss(dst, nds, as_Address(src)); 4122 } else { 4123 lea(rscratch1, src); 4124 vsubss(dst, nds, Address(rscratch1, 0)); 4125 } 4126 } 4127 4128 void MacroAssembler::vnegatess(XMMRegister dst, XMMRegister nds, AddressLiteral src) { 4129 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15"); 4130 vxorps(dst, nds, src, Assembler::AVX_128bit); 4131 } 4132 4133 void MacroAssembler::vnegatesd(XMMRegister dst, XMMRegister nds, AddressLiteral src) { 4134 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15"); 4135 vxorpd(dst, nds, src, Assembler::AVX_128bit); 4136 } 4137 4138 void MacroAssembler::vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg) { 4139 if (reachable(src)) { 4140 vxorpd(dst, nds, as_Address(src), vector_len); 4141 } else { 4142 lea(scratch_reg, src); 4143 vxorpd(dst, nds, Address(scratch_reg, 0), vector_len); 4144 } 4145 } 4146 4147 void MacroAssembler::vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg) { 4148 if (reachable(src)) { 4149 vxorps(dst, nds, as_Address(src), vector_len); 4150 } else { 4151 lea(scratch_reg, src); 4152 vxorps(dst, nds, Address(scratch_reg, 0), vector_len); 4153 } 4154 } 4155 4156 void MacroAssembler::vpxor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg) { 4157 if (UseAVX > 1 || (vector_len < 1)) { 4158 if (reachable(src)) { 4159 Assembler::vpxor(dst, nds, as_Address(src), vector_len); 4160 } else { 4161 lea(scratch_reg, src); 4162 Assembler::vpxor(dst, nds, Address(scratch_reg, 0), vector_len); 4163 } 4164 } 4165 else { 4166 MacroAssembler::vxorpd(dst, nds, src, vector_len, scratch_reg); 4167 } 4168 } 4169 4170 //------------------------------------------------------------------------------------------- 4171 #ifdef COMPILER2 4172 // Generic instructions support for use in .ad files C2 code generation 4173 4174 void MacroAssembler::vabsnegd(int opcode, XMMRegister dst, Register scr) { 4175 if (opcode == Op_AbsVD) { 4176 andpd(dst, ExternalAddress(StubRoutines::x86::vector_double_sign_mask()), scr); 4177 } else { 4178 assert((opcode == Op_NegVD),"opcode should be Op_NegD"); 4179 xorpd(dst, ExternalAddress(StubRoutines::x86::vector_double_sign_flip()), scr); 4180 } 4181 } 4182 4183 void MacroAssembler::vabsnegd(int opcode, XMMRegister dst, XMMRegister src, int vector_len, Register scr) { 4184 if (opcode == Op_AbsVD) { 4185 vandpd(dst, src, ExternalAddress(StubRoutines::x86::vector_double_sign_mask()), vector_len, scr); 4186 } else { 4187 assert((opcode == Op_NegVD),"opcode should be Op_NegD"); 4188 vxorpd(dst, src, ExternalAddress(StubRoutines::x86::vector_double_sign_flip()), vector_len, scr); 4189 } 4190 } 4191 4192 void MacroAssembler::vabsnegf(int opcode, XMMRegister dst, Register scr) { 4193 if (opcode == Op_AbsVF) { 4194 andps(dst, ExternalAddress(StubRoutines::x86::vector_float_sign_mask()), scr); 4195 } else { 4196 assert((opcode == Op_NegVF),"opcode should be Op_NegF"); 4197 xorps(dst, ExternalAddress(StubRoutines::x86::vector_float_sign_flip()), scr); 4198 } 4199 } 4200 4201 void MacroAssembler::vabsnegf(int opcode, XMMRegister dst, XMMRegister src, int vector_len, Register scr) { 4202 if (opcode == Op_AbsVF) { 4203 vandps(dst, src, ExternalAddress(StubRoutines::x86::vector_float_sign_mask()), vector_len, scr); 4204 } else { 4205 assert((opcode == Op_NegVF),"opcode should be Op_NegF"); 4206 vxorps(dst, src, ExternalAddress(StubRoutines::x86::vector_float_sign_flip()), vector_len, scr); 4207 } 4208 } 4209 4210 void MacroAssembler::vextendbw(bool sign, XMMRegister dst, XMMRegister src) { 4211 if (sign) { 4212 pmovsxbw(dst, src); 4213 } else { 4214 pmovzxbw(dst, src); 4215 } 4216 } 4217 4218 void MacroAssembler::vextendbw(bool sign, XMMRegister dst, XMMRegister src, int vector_len) { 4219 if (sign) { 4220 vpmovsxbw(dst, src, vector_len); 4221 } else { 4222 vpmovzxbw(dst, src, vector_len); 4223 } 4224 } 4225 4226 void MacroAssembler::vshiftd(int opcode, XMMRegister dst, XMMRegister src) { 4227 if (opcode == Op_RShiftVI) { 4228 psrad(dst, src); 4229 } else if (opcode == Op_LShiftVI) { 4230 pslld(dst, src); 4231 } else { 4232 assert((opcode == Op_URShiftVI),"opcode should be Op_URShiftVI"); 4233 psrld(dst, src); 4234 } 4235 } 4236 4237 void MacroAssembler::vshiftd(int opcode, XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4238 if (opcode == Op_RShiftVI) { 4239 vpsrad(dst, nds, src, vector_len); 4240 } else if (opcode == Op_LShiftVI) { 4241 vpslld(dst, nds, src, vector_len); 4242 } else { 4243 assert((opcode == Op_URShiftVI),"opcode should be Op_URShiftVI"); 4244 vpsrld(dst, nds, src, vector_len); 4245 } 4246 } 4247 4248 void MacroAssembler::vshiftw(int opcode, XMMRegister dst, XMMRegister src) { 4249 if ((opcode == Op_RShiftVS) || (opcode == Op_RShiftVB)) { 4250 psraw(dst, src); 4251 } else if ((opcode == Op_LShiftVS) || (opcode == Op_LShiftVB)) { 4252 psllw(dst, src); 4253 } else { 4254 assert(((opcode == Op_URShiftVS) || (opcode == Op_URShiftVB)),"opcode should be one of Op_URShiftVS or Op_URShiftVB"); 4255 psrlw(dst, src); 4256 } 4257 } 4258 4259 void MacroAssembler::vshiftw(int opcode, XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4260 if ((opcode == Op_RShiftVS) || (opcode == Op_RShiftVB)) { 4261 vpsraw(dst, nds, src, vector_len); 4262 } else if ((opcode == Op_LShiftVS) || (opcode == Op_LShiftVB)) { 4263 vpsllw(dst, nds, src, vector_len); 4264 } else { 4265 assert(((opcode == Op_URShiftVS) || (opcode == Op_URShiftVB)),"opcode should be one of Op_URShiftVS or Op_URShiftVB"); 4266 vpsrlw(dst, nds, src, vector_len); 4267 } 4268 } 4269 4270 void MacroAssembler::vshiftq(int opcode, XMMRegister dst, XMMRegister src) { 4271 if (opcode == Op_RShiftVL) { 4272 psrlq(dst, src); // using srl to implement sra on pre-avs512 systems 4273 } else if (opcode == Op_LShiftVL) { 4274 psllq(dst, src); 4275 } else { 4276 assert((opcode == Op_URShiftVL),"opcode should be Op_URShiftVL"); 4277 psrlq(dst, src); 4278 } 4279 } 4280 4281 void MacroAssembler::vshiftq(int opcode, XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 4282 if (opcode == Op_RShiftVL) { 4283 evpsraq(dst, nds, src, vector_len); 4284 } else if (opcode == Op_LShiftVL) { 4285 vpsllq(dst, nds, src, vector_len); 4286 } else { 4287 assert((opcode == Op_URShiftVL),"opcode should be Op_URShiftVL"); 4288 vpsrlq(dst, nds, src, vector_len); 4289 } 4290 } 4291 #endif 4292 //------------------------------------------------------------------------------------------- 4293 4294 void MacroAssembler::clear_jweak_tag(Register possibly_jweak) { 4295 const int32_t inverted_jweak_mask = ~static_cast<int32_t>(JNIHandles::weak_tag_mask); 4296 STATIC_ASSERT(inverted_jweak_mask == -2); // otherwise check this code 4297 // The inverted mask is sign-extended 4298 andptr(possibly_jweak, inverted_jweak_mask); 4299 } 4300 4301 void MacroAssembler::resolve_jobject(Register value, 4302 Register thread, 4303 Register tmp) { 4304 assert_different_registers(value, thread, tmp); 4305 Label done, not_weak; 4306 testptr(value, value); 4307 jcc(Assembler::zero, done); // Use NULL as-is. 4308 testptr(value, JNIHandles::weak_tag_mask); // Test for jweak tag. 4309 jcc(Assembler::zero, not_weak); 4310 // Resolve jweak. 4311 access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF, 4312 value, Address(value, -JNIHandles::weak_tag_value), tmp, thread); 4313 verify_oop(value); 4314 jmp(done); 4315 bind(not_weak); 4316 // Resolve (untagged) jobject. 4317 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, 0), tmp, thread); 4318 verify_oop(value); 4319 bind(done); 4320 } 4321 4322 void MacroAssembler::subptr(Register dst, int32_t imm32) { 4323 LP64_ONLY(subq(dst, imm32)) NOT_LP64(subl(dst, imm32)); 4324 } 4325 4326 // Force generation of a 4 byte immediate value even if it fits into 8bit 4327 void MacroAssembler::subptr_imm32(Register dst, int32_t imm32) { 4328 LP64_ONLY(subq_imm32(dst, imm32)) NOT_LP64(subl_imm32(dst, imm32)); 4329 } 4330 4331 void MacroAssembler::subptr(Register dst, Register src) { 4332 LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); 4333 } 4334 4335 // C++ bool manipulation 4336 void MacroAssembler::testbool(Register dst) { 4337 if(sizeof(bool) == 1) 4338 testb(dst, 0xff); 4339 else if(sizeof(bool) == 2) { 4340 // testw implementation needed for two byte bools 4341 ShouldNotReachHere(); 4342 } else if(sizeof(bool) == 4) 4343 testl(dst, dst); 4344 else 4345 // unsupported 4346 ShouldNotReachHere(); 4347 } 4348 4349 void MacroAssembler::testptr(Register dst, Register src) { 4350 LP64_ONLY(testq(dst, src)) NOT_LP64(testl(dst, src)); 4351 } 4352 4353 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes. 4354 void MacroAssembler::tlab_allocate(Register thread, Register obj, 4355 Register var_size_in_bytes, 4356 int con_size_in_bytes, 4357 Register t1, 4358 Register t2, 4359 Label& slow_case) { 4360 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 4361 bs->tlab_allocate(this, thread, obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case); 4362 } 4363 4364 // Defines obj, preserves var_size_in_bytes 4365 void MacroAssembler::eden_allocate(Register thread, Register obj, 4366 Register var_size_in_bytes, 4367 int con_size_in_bytes, 4368 Register t1, 4369 Label& slow_case) { 4370 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 4371 bs->eden_allocate(this, thread, obj, var_size_in_bytes, con_size_in_bytes, t1, slow_case); 4372 } 4373 4374 // Preserves the contents of address, destroys the contents length_in_bytes and temp. 4375 void MacroAssembler::zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp) { 4376 assert(address != length_in_bytes && address != temp && temp != length_in_bytes, "registers must be different"); 4377 assert((offset_in_bytes & (BytesPerWord - 1)) == 0, "offset must be a multiple of BytesPerWord"); 4378 Label done; 4379 4380 testptr(length_in_bytes, length_in_bytes); 4381 jcc(Assembler::zero, done); 4382 4383 // initialize topmost word, divide index by 2, check if odd and test if zero 4384 // note: for the remaining code to work, index must be a multiple of BytesPerWord 4385 #ifdef ASSERT 4386 { 4387 Label L; 4388 testptr(length_in_bytes, BytesPerWord - 1); 4389 jcc(Assembler::zero, L); 4390 stop("length must be a multiple of BytesPerWord"); 4391 bind(L); 4392 } 4393 #endif 4394 Register index = length_in_bytes; 4395 xorptr(temp, temp); // use _zero reg to clear memory (shorter code) 4396 if (UseIncDec) { 4397 shrptr(index, 3); // divide by 8/16 and set carry flag if bit 2 was set 4398 } else { 4399 shrptr(index, 2); // use 2 instructions to avoid partial flag stall 4400 shrptr(index, 1); 4401 } 4402 #ifndef _LP64 4403 // index could have not been a multiple of 8 (i.e., bit 2 was set) 4404 { 4405 Label even; 4406 // note: if index was a multiple of 8, then it cannot 4407 // be 0 now otherwise it must have been 0 before 4408 // => if it is even, we don't need to check for 0 again 4409 jcc(Assembler::carryClear, even); 4410 // clear topmost word (no jump would be needed if conditional assignment worked here) 4411 movptr(Address(address, index, Address::times_8, offset_in_bytes - 0*BytesPerWord), temp); 4412 // index could be 0 now, must check again 4413 jcc(Assembler::zero, done); 4414 bind(even); 4415 } 4416 #endif // !_LP64 4417 // initialize remaining object fields: index is a multiple of 2 now 4418 { 4419 Label loop; 4420 bind(loop); 4421 movptr(Address(address, index, Address::times_8, offset_in_bytes - 1*BytesPerWord), temp); 4422 NOT_LP64(movptr(Address(address, index, Address::times_8, offset_in_bytes - 2*BytesPerWord), temp);) 4423 decrement(index); 4424 jcc(Assembler::notZero, loop); 4425 } 4426 4427 bind(done); 4428 } 4429 4430 // Look up the method for a megamorphic invokeinterface call. 4431 // The target method is determined by <intf_klass, itable_index>. 4432 // The receiver klass is in recv_klass. 4433 // On success, the result will be in method_result, and execution falls through. 4434 // On failure, execution transfers to the given label. 4435 void MacroAssembler::lookup_interface_method(Register recv_klass, 4436 Register intf_klass, 4437 RegisterOrConstant itable_index, 4438 Register method_result, 4439 Register scan_temp, 4440 Label& L_no_such_interface, 4441 bool return_method) { 4442 assert_different_registers(recv_klass, intf_klass, scan_temp); 4443 assert_different_registers(method_result, intf_klass, scan_temp); 4444 assert(recv_klass != method_result || !return_method, 4445 "recv_klass can be destroyed when method isn't needed"); 4446 4447 assert(itable_index.is_constant() || itable_index.as_register() == method_result, 4448 "caller must use same register for non-constant itable index as for method"); 4449 4450 // Compute start of first itableOffsetEntry (which is at the end of the vtable) 4451 int vtable_base = in_bytes(Klass::vtable_start_offset()); 4452 int itentry_off = itableMethodEntry::method_offset_in_bytes(); 4453 int scan_step = itableOffsetEntry::size() * wordSize; 4454 int vte_size = vtableEntry::size_in_bytes(); 4455 Address::ScaleFactor times_vte_scale = Address::times_ptr; 4456 assert(vte_size == wordSize, "else adjust times_vte_scale"); 4457 4458 movl(scan_temp, Address(recv_klass, Klass::vtable_length_offset())); 4459 4460 // %%% Could store the aligned, prescaled offset in the klassoop. 4461 lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base)); 4462 4463 if (return_method) { 4464 // Adjust recv_klass by scaled itable_index, so we can free itable_index. 4465 assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below"); 4466 lea(recv_klass, Address(recv_klass, itable_index, Address::times_ptr, itentry_off)); 4467 } 4468 4469 // for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) { 4470 // if (scan->interface() == intf) { 4471 // result = (klass + scan->offset() + itable_index); 4472 // } 4473 // } 4474 Label search, found_method; 4475 4476 for (int peel = 1; peel >= 0; peel--) { 4477 movptr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset_in_bytes())); 4478 cmpptr(intf_klass, method_result); 4479 4480 if (peel) { 4481 jccb(Assembler::equal, found_method); 4482 } else { 4483 jccb(Assembler::notEqual, search); 4484 // (invert the test to fall through to found_method...) 4485 } 4486 4487 if (!peel) break; 4488 4489 bind(search); 4490 4491 // Check that the previous entry is non-null. A null entry means that 4492 // the receiver class doesn't implement the interface, and wasn't the 4493 // same as when the caller was compiled. 4494 testptr(method_result, method_result); 4495 jcc(Assembler::zero, L_no_such_interface); 4496 addptr(scan_temp, scan_step); 4497 } 4498 4499 bind(found_method); 4500 4501 if (return_method) { 4502 // Got a hit. 4503 movl(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset_in_bytes())); 4504 movptr(method_result, Address(recv_klass, scan_temp, Address::times_1)); 4505 } 4506 } 4507 4508 4509 // virtual method calling 4510 void MacroAssembler::lookup_virtual_method(Register recv_klass, 4511 RegisterOrConstant vtable_index, 4512 Register method_result) { 4513 const int base = in_bytes(Klass::vtable_start_offset()); 4514 assert(vtableEntry::size() * wordSize == wordSize, "else adjust the scaling in the code below"); 4515 Address vtable_entry_addr(recv_klass, 4516 vtable_index, Address::times_ptr, 4517 base + vtableEntry::method_offset_in_bytes()); 4518 movptr(method_result, vtable_entry_addr); 4519 } 4520 4521 4522 void MacroAssembler::check_klass_subtype(Register sub_klass, 4523 Register super_klass, 4524 Register temp_reg, 4525 Label& L_success) { 4526 Label L_failure; 4527 check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg, &L_success, &L_failure, NULL); 4528 check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, NULL); 4529 bind(L_failure); 4530 } 4531 4532 4533 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass, 4534 Register super_klass, 4535 Register temp_reg, 4536 Label* L_success, 4537 Label* L_failure, 4538 Label* L_slow_path, 4539 RegisterOrConstant super_check_offset) { 4540 assert_different_registers(sub_klass, super_klass, temp_reg); 4541 bool must_load_sco = (super_check_offset.constant_or_zero() == -1); 4542 if (super_check_offset.is_register()) { 4543 assert_different_registers(sub_klass, super_klass, 4544 super_check_offset.as_register()); 4545 } else if (must_load_sco) { 4546 assert(temp_reg != noreg, "supply either a temp or a register offset"); 4547 } 4548 4549 Label L_fallthrough; 4550 int label_nulls = 0; 4551 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } 4552 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } 4553 if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; } 4554 assert(label_nulls <= 1, "at most one NULL in the batch"); 4555 4556 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 4557 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 4558 Address super_check_offset_addr(super_klass, sco_offset); 4559 4560 // Hacked jcc, which "knows" that L_fallthrough, at least, is in 4561 // range of a jccb. If this routine grows larger, reconsider at 4562 // least some of these. 4563 #define local_jcc(assembler_cond, label) \ 4564 if (&(label) == &L_fallthrough) jccb(assembler_cond, label); \ 4565 else jcc( assembler_cond, label) /*omit semi*/ 4566 4567 // Hacked jmp, which may only be used just before L_fallthrough. 4568 #define final_jmp(label) \ 4569 if (&(label) == &L_fallthrough) { /*do nothing*/ } \ 4570 else jmp(label) /*omit semi*/ 4571 4572 // If the pointers are equal, we are done (e.g., String[] elements). 4573 // This self-check enables sharing of secondary supertype arrays among 4574 // non-primary types such as array-of-interface. Otherwise, each such 4575 // type would need its own customized SSA. 4576 // We move this check to the front of the fast path because many 4577 // type checks are in fact trivially successful in this manner, 4578 // so we get a nicely predicted branch right at the start of the check. 4579 cmpptr(sub_klass, super_klass); 4580 local_jcc(Assembler::equal, *L_success); 4581 4582 // Check the supertype display: 4583 if (must_load_sco) { 4584 // Positive movl does right thing on LP64. 4585 movl(temp_reg, super_check_offset_addr); 4586 super_check_offset = RegisterOrConstant(temp_reg); 4587 } 4588 Address super_check_addr(sub_klass, super_check_offset, Address::times_1, 0); 4589 cmpptr(super_klass, super_check_addr); // load displayed supertype 4590 4591 // This check has worked decisively for primary supers. 4592 // Secondary supers are sought in the super_cache ('super_cache_addr'). 4593 // (Secondary supers are interfaces and very deeply nested subtypes.) 4594 // This works in the same check above because of a tricky aliasing 4595 // between the super_cache and the primary super display elements. 4596 // (The 'super_check_addr' can address either, as the case requires.) 4597 // Note that the cache is updated below if it does not help us find 4598 // what we need immediately. 4599 // So if it was a primary super, we can just fail immediately. 4600 // Otherwise, it's the slow path for us (no success at this point). 4601 4602 if (super_check_offset.is_register()) { 4603 local_jcc(Assembler::equal, *L_success); 4604 cmpl(super_check_offset.as_register(), sc_offset); 4605 if (L_failure == &L_fallthrough) { 4606 local_jcc(Assembler::equal, *L_slow_path); 4607 } else { 4608 local_jcc(Assembler::notEqual, *L_failure); 4609 final_jmp(*L_slow_path); 4610 } 4611 } else if (super_check_offset.as_constant() == sc_offset) { 4612 // Need a slow path; fast failure is impossible. 4613 if (L_slow_path == &L_fallthrough) { 4614 local_jcc(Assembler::equal, *L_success); 4615 } else { 4616 local_jcc(Assembler::notEqual, *L_slow_path); 4617 final_jmp(*L_success); 4618 } 4619 } else { 4620 // No slow path; it's a fast decision. 4621 if (L_failure == &L_fallthrough) { 4622 local_jcc(Assembler::equal, *L_success); 4623 } else { 4624 local_jcc(Assembler::notEqual, *L_failure); 4625 final_jmp(*L_success); 4626 } 4627 } 4628 4629 bind(L_fallthrough); 4630 4631 #undef local_jcc 4632 #undef final_jmp 4633 } 4634 4635 4636 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass, 4637 Register super_klass, 4638 Register temp_reg, 4639 Register temp2_reg, 4640 Label* L_success, 4641 Label* L_failure, 4642 bool set_cond_codes) { 4643 assert_different_registers(sub_klass, super_klass, temp_reg); 4644 if (temp2_reg != noreg) 4645 assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg); 4646 #define IS_A_TEMP(reg) ((reg) == temp_reg || (reg) == temp2_reg) 4647 4648 Label L_fallthrough; 4649 int label_nulls = 0; 4650 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; } 4651 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; } 4652 assert(label_nulls <= 1, "at most one NULL in the batch"); 4653 4654 // a couple of useful fields in sub_klass: 4655 int ss_offset = in_bytes(Klass::secondary_supers_offset()); 4656 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 4657 Address secondary_supers_addr(sub_klass, ss_offset); 4658 Address super_cache_addr( sub_klass, sc_offset); 4659 4660 // Do a linear scan of the secondary super-klass chain. 4661 // This code is rarely used, so simplicity is a virtue here. 4662 // The repne_scan instruction uses fixed registers, which we must spill. 4663 // Don't worry too much about pre-existing connections with the input regs. 4664 4665 assert(sub_klass != rax, "killed reg"); // killed by mov(rax, super) 4666 assert(sub_klass != rcx, "killed reg"); // killed by lea(rcx, &pst_counter) 4667 4668 // Get super_klass value into rax (even if it was in rdi or rcx). 4669 bool pushed_rax = false, pushed_rcx = false, pushed_rdi = false; 4670 if (super_klass != rax || UseCompressedOops) { 4671 if (!IS_A_TEMP(rax)) { push(rax); pushed_rax = true; } 4672 mov(rax, super_klass); 4673 } 4674 if (!IS_A_TEMP(rcx)) { push(rcx); pushed_rcx = true; } 4675 if (!IS_A_TEMP(rdi)) { push(rdi); pushed_rdi = true; } 4676 4677 #ifndef PRODUCT 4678 int* pst_counter = &SharedRuntime::_partial_subtype_ctr; 4679 ExternalAddress pst_counter_addr((address) pst_counter); 4680 NOT_LP64( incrementl(pst_counter_addr) ); 4681 LP64_ONLY( lea(rcx, pst_counter_addr) ); 4682 LP64_ONLY( incrementl(Address(rcx, 0)) ); 4683 #endif //PRODUCT 4684 4685 // We will consult the secondary-super array. 4686 movptr(rdi, secondary_supers_addr); 4687 // Load the array length. (Positive movl does right thing on LP64.) 4688 movl(rcx, Address(rdi, Array<Klass*>::length_offset_in_bytes())); 4689 // Skip to start of data. 4690 addptr(rdi, Array<Klass*>::base_offset_in_bytes()); 4691 4692 // Scan RCX words at [RDI] for an occurrence of RAX. 4693 // Set NZ/Z based on last compare. 4694 // Z flag value will not be set by 'repne' if RCX == 0 since 'repne' does 4695 // not change flags (only scas instruction which is repeated sets flags). 4696 // Set Z = 0 (not equal) before 'repne' to indicate that class was not found. 4697 4698 testptr(rax,rax); // Set Z = 0 4699 repne_scan(); 4700 4701 // Unspill the temp. registers: 4702 if (pushed_rdi) pop(rdi); 4703 if (pushed_rcx) pop(rcx); 4704 if (pushed_rax) pop(rax); 4705 4706 if (set_cond_codes) { 4707 // Special hack for the AD files: rdi is guaranteed non-zero. 4708 assert(!pushed_rdi, "rdi must be left non-NULL"); 4709 // Also, the condition codes are properly set Z/NZ on succeed/failure. 4710 } 4711 4712 if (L_failure == &L_fallthrough) 4713 jccb(Assembler::notEqual, *L_failure); 4714 else jcc(Assembler::notEqual, *L_failure); 4715 4716 // Success. Cache the super we found and proceed in triumph. 4717 movptr(super_cache_addr, super_klass); 4718 4719 if (L_success != &L_fallthrough) { 4720 jmp(*L_success); 4721 } 4722 4723 #undef IS_A_TEMP 4724 4725 bind(L_fallthrough); 4726 } 4727 4728 void MacroAssembler::clinit_barrier(Register klass, Register thread, Label* L_fast_path, Label* L_slow_path) { 4729 assert(L_fast_path != NULL || L_slow_path != NULL, "at least one is required"); 4730 4731 Label L_fallthrough; 4732 if (L_fast_path == NULL) { 4733 L_fast_path = &L_fallthrough; 4734 } else if (L_slow_path == NULL) { 4735 L_slow_path = &L_fallthrough; 4736 } 4737 4738 // Fast path check: class is fully initialized 4739 cmpb(Address(klass, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized); 4740 jcc(Assembler::equal, *L_fast_path); 4741 4742 // Fast path check: current thread is initializer thread 4743 cmpptr(thread, Address(klass, InstanceKlass::init_thread_offset())); 4744 if (L_slow_path == &L_fallthrough) { 4745 jcc(Assembler::equal, *L_fast_path); 4746 bind(*L_slow_path); 4747 } else if (L_fast_path == &L_fallthrough) { 4748 jcc(Assembler::notEqual, *L_slow_path); 4749 bind(*L_fast_path); 4750 } else { 4751 Unimplemented(); 4752 } 4753 } 4754 4755 void MacroAssembler::cmov32(Condition cc, Register dst, Address src) { 4756 if (VM_Version::supports_cmov()) { 4757 cmovl(cc, dst, src); 4758 } else { 4759 Label L; 4760 jccb(negate_condition(cc), L); 4761 movl(dst, src); 4762 bind(L); 4763 } 4764 } 4765 4766 void MacroAssembler::cmov32(Condition cc, Register dst, Register src) { 4767 if (VM_Version::supports_cmov()) { 4768 cmovl(cc, dst, src); 4769 } else { 4770 Label L; 4771 jccb(negate_condition(cc), L); 4772 movl(dst, src); 4773 bind(L); 4774 } 4775 } 4776 4777 void MacroAssembler::verify_oop(Register reg, const char* s) { 4778 if (!VerifyOops) return; 4779 4780 // Pass register number to verify_oop_subroutine 4781 const char* b = NULL; 4782 { 4783 ResourceMark rm; 4784 stringStream ss; 4785 ss.print("verify_oop: %s: %s", reg->name(), s); 4786 b = code_string(ss.as_string()); 4787 } 4788 BLOCK_COMMENT("verify_oop {"); 4789 #ifdef _LP64 4790 push(rscratch1); // save r10, trashed by movptr() 4791 #endif 4792 push(rax); // save rax, 4793 push(reg); // pass register argument 4794 ExternalAddress buffer((address) b); 4795 // avoid using pushptr, as it modifies scratch registers 4796 // and our contract is not to modify anything 4797 movptr(rax, buffer.addr()); 4798 push(rax); 4799 // call indirectly to solve generation ordering problem 4800 movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address())); 4801 call(rax); 4802 // Caller pops the arguments (oop, message) and restores rax, r10 4803 BLOCK_COMMENT("} verify_oop"); 4804 } 4805 4806 4807 RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr, 4808 Register tmp, 4809 int offset) { 4810 intptr_t value = *delayed_value_addr; 4811 if (value != 0) 4812 return RegisterOrConstant(value + offset); 4813 4814 // load indirectly to solve generation ordering problem 4815 movptr(tmp, ExternalAddress((address) delayed_value_addr)); 4816 4817 #ifdef ASSERT 4818 { Label L; 4819 testptr(tmp, tmp); 4820 if (WizardMode) { 4821 const char* buf = NULL; 4822 { 4823 ResourceMark rm; 4824 stringStream ss; 4825 ss.print("DelayedValue=" INTPTR_FORMAT, delayed_value_addr[1]); 4826 buf = code_string(ss.as_string()); 4827 } 4828 jcc(Assembler::notZero, L); 4829 STOP(buf); 4830 } else { 4831 jccb(Assembler::notZero, L); 4832 hlt(); 4833 } 4834 bind(L); 4835 } 4836 #endif 4837 4838 if (offset != 0) 4839 addptr(tmp, offset); 4840 4841 return RegisterOrConstant(tmp); 4842 } 4843 4844 4845 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot, 4846 int extra_slot_offset) { 4847 // cf. TemplateTable::prepare_invoke(), if (load_receiver). 4848 int stackElementSize = Interpreter::stackElementSize; 4849 int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0); 4850 #ifdef ASSERT 4851 int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1); 4852 assert(offset1 - offset == stackElementSize, "correct arithmetic"); 4853 #endif 4854 Register scale_reg = noreg; 4855 Address::ScaleFactor scale_factor = Address::no_scale; 4856 if (arg_slot.is_constant()) { 4857 offset += arg_slot.as_constant() * stackElementSize; 4858 } else { 4859 scale_reg = arg_slot.as_register(); 4860 scale_factor = Address::times(stackElementSize); 4861 } 4862 offset += wordSize; // return PC is on stack 4863 return Address(rsp, scale_reg, scale_factor, offset); 4864 } 4865 4866 4867 void MacroAssembler::verify_oop_addr(Address addr, const char* s) { 4868 if (!VerifyOops) return; 4869 4870 // Address adjust(addr.base(), addr.index(), addr.scale(), addr.disp() + BytesPerWord); 4871 // Pass register number to verify_oop_subroutine 4872 const char* b = NULL; 4873 { 4874 ResourceMark rm; 4875 stringStream ss; 4876 ss.print("verify_oop_addr: %s", s); 4877 b = code_string(ss.as_string()); 4878 } 4879 #ifdef _LP64 4880 push(rscratch1); // save r10, trashed by movptr() 4881 #endif 4882 push(rax); // save rax, 4883 // addr may contain rsp so we will have to adjust it based on the push 4884 // we just did (and on 64 bit we do two pushes) 4885 // NOTE: 64bit seemed to have had a bug in that it did movq(addr, rax); which 4886 // stores rax into addr which is backwards of what was intended. 4887 if (addr.uses(rsp)) { 4888 lea(rax, addr); 4889 pushptr(Address(rax, LP64_ONLY(2 *) BytesPerWord)); 4890 } else { 4891 pushptr(addr); 4892 } 4893 4894 ExternalAddress buffer((address) b); 4895 // pass msg argument 4896 // avoid using pushptr, as it modifies scratch registers 4897 // and our contract is not to modify anything 4898 movptr(rax, buffer.addr()); 4899 push(rax); 4900 4901 // call indirectly to solve generation ordering problem 4902 movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address())); 4903 call(rax); 4904 // Caller pops the arguments (addr, message) and restores rax, r10. 4905 } 4906 4907 void MacroAssembler::verify_tlab() { 4908 #ifdef ASSERT 4909 if (UseTLAB && VerifyOops) { 4910 Label next, ok; 4911 Register t1 = rsi; 4912 Register thread_reg = NOT_LP64(rbx) LP64_ONLY(r15_thread); 4913 4914 push(t1); 4915 NOT_LP64(push(thread_reg)); 4916 NOT_LP64(get_thread(thread_reg)); 4917 4918 movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset()))); 4919 cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_start_offset()))); 4920 jcc(Assembler::aboveEqual, next); 4921 STOP("assert(top >= start)"); 4922 should_not_reach_here(); 4923 4924 bind(next); 4925 movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_end_offset()))); 4926 cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset()))); 4927 jcc(Assembler::aboveEqual, ok); 4928 STOP("assert(top <= end)"); 4929 should_not_reach_here(); 4930 4931 bind(ok); 4932 NOT_LP64(pop(thread_reg)); 4933 pop(t1); 4934 } 4935 #endif 4936 } 4937 4938 class ControlWord { 4939 public: 4940 int32_t _value; 4941 4942 int rounding_control() const { return (_value >> 10) & 3 ; } 4943 int precision_control() const { return (_value >> 8) & 3 ; } 4944 bool precision() const { return ((_value >> 5) & 1) != 0; } 4945 bool underflow() const { return ((_value >> 4) & 1) != 0; } 4946 bool overflow() const { return ((_value >> 3) & 1) != 0; } 4947 bool zero_divide() const { return ((_value >> 2) & 1) != 0; } 4948 bool denormalized() const { return ((_value >> 1) & 1) != 0; } 4949 bool invalid() const { return ((_value >> 0) & 1) != 0; } 4950 4951 void print() const { 4952 // rounding control 4953 const char* rc; 4954 switch (rounding_control()) { 4955 case 0: rc = "round near"; break; 4956 case 1: rc = "round down"; break; 4957 case 2: rc = "round up "; break; 4958 case 3: rc = "chop "; break; 4959 }; 4960 // precision control 4961 const char* pc; 4962 switch (precision_control()) { 4963 case 0: pc = "24 bits "; break; 4964 case 1: pc = "reserved"; break; 4965 case 2: pc = "53 bits "; break; 4966 case 3: pc = "64 bits "; break; 4967 }; 4968 // flags 4969 char f[9]; 4970 f[0] = ' '; 4971 f[1] = ' '; 4972 f[2] = (precision ()) ? 'P' : 'p'; 4973 f[3] = (underflow ()) ? 'U' : 'u'; 4974 f[4] = (overflow ()) ? 'O' : 'o'; 4975 f[5] = (zero_divide ()) ? 'Z' : 'z'; 4976 f[6] = (denormalized()) ? 'D' : 'd'; 4977 f[7] = (invalid ()) ? 'I' : 'i'; 4978 f[8] = '\x0'; 4979 // output 4980 printf("%04x masks = %s, %s, %s", _value & 0xFFFF, f, rc, pc); 4981 } 4982 4983 }; 4984 4985 class StatusWord { 4986 public: 4987 int32_t _value; 4988 4989 bool busy() const { return ((_value >> 15) & 1) != 0; } 4990 bool C3() const { return ((_value >> 14) & 1) != 0; } 4991 bool C2() const { return ((_value >> 10) & 1) != 0; } 4992 bool C1() const { return ((_value >> 9) & 1) != 0; } 4993 bool C0() const { return ((_value >> 8) & 1) != 0; } 4994 int top() const { return (_value >> 11) & 7 ; } 4995 bool error_status() const { return ((_value >> 7) & 1) != 0; } 4996 bool stack_fault() const { return ((_value >> 6) & 1) != 0; } 4997 bool precision() const { return ((_value >> 5) & 1) != 0; } 4998 bool underflow() const { return ((_value >> 4) & 1) != 0; } 4999 bool overflow() const { return ((_value >> 3) & 1) != 0; } 5000 bool zero_divide() const { return ((_value >> 2) & 1) != 0; } 5001 bool denormalized() const { return ((_value >> 1) & 1) != 0; } 5002 bool invalid() const { return ((_value >> 0) & 1) != 0; } 5003 5004 void print() const { 5005 // condition codes 5006 char c[5]; 5007 c[0] = (C3()) ? '3' : '-'; 5008 c[1] = (C2()) ? '2' : '-'; 5009 c[2] = (C1()) ? '1' : '-'; 5010 c[3] = (C0()) ? '0' : '-'; 5011 c[4] = '\x0'; 5012 // flags 5013 char f[9]; 5014 f[0] = (error_status()) ? 'E' : '-'; 5015 f[1] = (stack_fault ()) ? 'S' : '-'; 5016 f[2] = (precision ()) ? 'P' : '-'; 5017 f[3] = (underflow ()) ? 'U' : '-'; 5018 f[4] = (overflow ()) ? 'O' : '-'; 5019 f[5] = (zero_divide ()) ? 'Z' : '-'; 5020 f[6] = (denormalized()) ? 'D' : '-'; 5021 f[7] = (invalid ()) ? 'I' : '-'; 5022 f[8] = '\x0'; 5023 // output 5024 printf("%04x flags = %s, cc = %s, top = %d", _value & 0xFFFF, f, c, top()); 5025 } 5026 5027 }; 5028 5029 class TagWord { 5030 public: 5031 int32_t _value; 5032 5033 int tag_at(int i) const { return (_value >> (i*2)) & 3; } 5034 5035 void print() const { 5036 printf("%04x", _value & 0xFFFF); 5037 } 5038 5039 }; 5040 5041 class FPU_Register { 5042 public: 5043 int32_t _m0; 5044 int32_t _m1; 5045 int16_t _ex; 5046 5047 bool is_indefinite() const { 5048 return _ex == -1 && _m1 == (int32_t)0xC0000000 && _m0 == 0; 5049 } 5050 5051 void print() const { 5052 char sign = (_ex < 0) ? '-' : '+'; 5053 const char* kind = (_ex == 0x7FFF || _ex == (int16_t)-1) ? "NaN" : " "; 5054 printf("%c%04hx.%08x%08x %s", sign, _ex, _m1, _m0, kind); 5055 }; 5056 5057 }; 5058 5059 class FPU_State { 5060 public: 5061 enum { 5062 register_size = 10, 5063 number_of_registers = 8, 5064 register_mask = 7 5065 }; 5066 5067 ControlWord _control_word; 5068 StatusWord _status_word; 5069 TagWord _tag_word; 5070 int32_t _error_offset; 5071 int32_t _error_selector; 5072 int32_t _data_offset; 5073 int32_t _data_selector; 5074 int8_t _register[register_size * number_of_registers]; 5075 5076 int tag_for_st(int i) const { return _tag_word.tag_at((_status_word.top() + i) & register_mask); } 5077 FPU_Register* st(int i) const { return (FPU_Register*)&_register[register_size * i]; } 5078 5079 const char* tag_as_string(int tag) const { 5080 switch (tag) { 5081 case 0: return "valid"; 5082 case 1: return "zero"; 5083 case 2: return "special"; 5084 case 3: return "empty"; 5085 } 5086 ShouldNotReachHere(); 5087 return NULL; 5088 } 5089 5090 void print() const { 5091 // print computation registers 5092 { int t = _status_word.top(); 5093 for (int i = 0; i < number_of_registers; i++) { 5094 int j = (i - t) & register_mask; 5095 printf("%c r%d = ST%d = ", (j == 0 ? '*' : ' '), i, j); 5096 st(j)->print(); 5097 printf(" %s\n", tag_as_string(_tag_word.tag_at(i))); 5098 } 5099 } 5100 printf("\n"); 5101 // print control registers 5102 printf("ctrl = "); _control_word.print(); printf("\n"); 5103 printf("stat = "); _status_word .print(); printf("\n"); 5104 printf("tags = "); _tag_word .print(); printf("\n"); 5105 } 5106 5107 }; 5108 5109 class Flag_Register { 5110 public: 5111 int32_t _value; 5112 5113 bool overflow() const { return ((_value >> 11) & 1) != 0; } 5114 bool direction() const { return ((_value >> 10) & 1) != 0; } 5115 bool sign() const { return ((_value >> 7) & 1) != 0; } 5116 bool zero() const { return ((_value >> 6) & 1) != 0; } 5117 bool auxiliary_carry() const { return ((_value >> 4) & 1) != 0; } 5118 bool parity() const { return ((_value >> 2) & 1) != 0; } 5119 bool carry() const { return ((_value >> 0) & 1) != 0; } 5120 5121 void print() const { 5122 // flags 5123 char f[8]; 5124 f[0] = (overflow ()) ? 'O' : '-'; 5125 f[1] = (direction ()) ? 'D' : '-'; 5126 f[2] = (sign ()) ? 'S' : '-'; 5127 f[3] = (zero ()) ? 'Z' : '-'; 5128 f[4] = (auxiliary_carry()) ? 'A' : '-'; 5129 f[5] = (parity ()) ? 'P' : '-'; 5130 f[6] = (carry ()) ? 'C' : '-'; 5131 f[7] = '\x0'; 5132 // output 5133 printf("%08x flags = %s", _value, f); 5134 } 5135 5136 }; 5137 5138 class IU_Register { 5139 public: 5140 int32_t _value; 5141 5142 void print() const { 5143 printf("%08x %11d", _value, _value); 5144 } 5145 5146 }; 5147 5148 class IU_State { 5149 public: 5150 Flag_Register _eflags; 5151 IU_Register _rdi; 5152 IU_Register _rsi; 5153 IU_Register _rbp; 5154 IU_Register _rsp; 5155 IU_Register _rbx; 5156 IU_Register _rdx; 5157 IU_Register _rcx; 5158 IU_Register _rax; 5159 5160 void print() const { 5161 // computation registers 5162 printf("rax, = "); _rax.print(); printf("\n"); 5163 printf("rbx, = "); _rbx.print(); printf("\n"); 5164 printf("rcx = "); _rcx.print(); printf("\n"); 5165 printf("rdx = "); _rdx.print(); printf("\n"); 5166 printf("rdi = "); _rdi.print(); printf("\n"); 5167 printf("rsi = "); _rsi.print(); printf("\n"); 5168 printf("rbp, = "); _rbp.print(); printf("\n"); 5169 printf("rsp = "); _rsp.print(); printf("\n"); 5170 printf("\n"); 5171 // control registers 5172 printf("flgs = "); _eflags.print(); printf("\n"); 5173 } 5174 }; 5175 5176 5177 class CPU_State { 5178 public: 5179 FPU_State _fpu_state; 5180 IU_State _iu_state; 5181 5182 void print() const { 5183 printf("--------------------------------------------------\n"); 5184 _iu_state .print(); 5185 printf("\n"); 5186 _fpu_state.print(); 5187 printf("--------------------------------------------------\n"); 5188 } 5189 5190 }; 5191 5192 5193 static void _print_CPU_state(CPU_State* state) { 5194 state->print(); 5195 }; 5196 5197 5198 void MacroAssembler::print_CPU_state() { 5199 push_CPU_state(); 5200 push(rsp); // pass CPU state 5201 call(RuntimeAddress(CAST_FROM_FN_PTR(address, _print_CPU_state))); 5202 addptr(rsp, wordSize); // discard argument 5203 pop_CPU_state(); 5204 } 5205 5206 5207 static bool _verify_FPU(int stack_depth, char* s, CPU_State* state) { 5208 static int counter = 0; 5209 FPU_State* fs = &state->_fpu_state; 5210 counter++; 5211 // For leaf calls, only verify that the top few elements remain empty. 5212 // We only need 1 empty at the top for C2 code. 5213 if( stack_depth < 0 ) { 5214 if( fs->tag_for_st(7) != 3 ) { 5215 printf("FPR7 not empty\n"); 5216 state->print(); 5217 assert(false, "error"); 5218 return false; 5219 } 5220 return true; // All other stack states do not matter 5221 } 5222 5223 assert((fs->_control_word._value & 0xffff) == StubRoutines::_fpu_cntrl_wrd_std, 5224 "bad FPU control word"); 5225 5226 // compute stack depth 5227 int i = 0; 5228 while (i < FPU_State::number_of_registers && fs->tag_for_st(i) < 3) i++; 5229 int d = i; 5230 while (i < FPU_State::number_of_registers && fs->tag_for_st(i) == 3) i++; 5231 // verify findings 5232 if (i != FPU_State::number_of_registers) { 5233 // stack not contiguous 5234 printf("%s: stack not contiguous at ST%d\n", s, i); 5235 state->print(); 5236 assert(false, "error"); 5237 return false; 5238 } 5239 // check if computed stack depth corresponds to expected stack depth 5240 if (stack_depth < 0) { 5241 // expected stack depth is -stack_depth or less 5242 if (d > -stack_depth) { 5243 // too many elements on the stack 5244 printf("%s: <= %d stack elements expected but found %d\n", s, -stack_depth, d); 5245 state->print(); 5246 assert(false, "error"); 5247 return false; 5248 } 5249 } else { 5250 // expected stack depth is stack_depth 5251 if (d != stack_depth) { 5252 // wrong stack depth 5253 printf("%s: %d stack elements expected but found %d\n", s, stack_depth, d); 5254 state->print(); 5255 assert(false, "error"); 5256 return false; 5257 } 5258 } 5259 // everything is cool 5260 return true; 5261 } 5262 5263 5264 void MacroAssembler::verify_FPU(int stack_depth, const char* s) { 5265 if (!VerifyFPU) return; 5266 push_CPU_state(); 5267 push(rsp); // pass CPU state 5268 ExternalAddress msg((address) s); 5269 // pass message string s 5270 pushptr(msg.addr()); 5271 push(stack_depth); // pass stack depth 5272 call(RuntimeAddress(CAST_FROM_FN_PTR(address, _verify_FPU))); 5273 addptr(rsp, 3 * wordSize); // discard arguments 5274 // check for error 5275 { Label L; 5276 testl(rax, rax); 5277 jcc(Assembler::notZero, L); 5278 int3(); // break if error condition 5279 bind(L); 5280 } 5281 pop_CPU_state(); 5282 } 5283 5284 void MacroAssembler::restore_cpu_control_state_after_jni() { 5285 // Either restore the MXCSR register after returning from the JNI Call 5286 // or verify that it wasn't changed (with -Xcheck:jni flag). 5287 if (VM_Version::supports_sse()) { 5288 if (RestoreMXCSROnJNICalls) { 5289 ldmxcsr(ExternalAddress(StubRoutines::addr_mxcsr_std())); 5290 } else if (CheckJNICalls) { 5291 call(RuntimeAddress(StubRoutines::x86::verify_mxcsr_entry())); 5292 } 5293 } 5294 // Clear upper bits of YMM registers to avoid SSE <-> AVX transition penalty. 5295 vzeroupper(); 5296 // Reset k1 to 0xffff. 5297 5298 #ifdef COMPILER2 5299 if (PostLoopMultiversioning && VM_Version::supports_evex()) { 5300 push(rcx); 5301 movl(rcx, 0xffff); 5302 kmovwl(k1, rcx); 5303 pop(rcx); 5304 } 5305 #endif // COMPILER2 5306 5307 #ifndef _LP64 5308 // Either restore the x87 floating pointer control word after returning 5309 // from the JNI call or verify that it wasn't changed. 5310 if (CheckJNICalls) { 5311 call(RuntimeAddress(StubRoutines::x86::verify_fpu_cntrl_wrd_entry())); 5312 } 5313 #endif // _LP64 5314 } 5315 5316 // ((OopHandle)result).resolve(); 5317 void MacroAssembler::resolve_oop_handle(Register result, Register tmp) { 5318 assert_different_registers(result, tmp); 5319 5320 // Only 64 bit platforms support GCs that require a tmp register 5321 // Only IN_HEAP loads require a thread_tmp register 5322 // OopHandle::resolve is an indirection like jobject. 5323 access_load_at(T_OBJECT, IN_NATIVE, 5324 result, Address(result, 0), tmp, /*tmp_thread*/noreg); 5325 } 5326 5327 // ((WeakHandle)result).resolve(); 5328 void MacroAssembler::resolve_weak_handle(Register rresult, Register rtmp) { 5329 assert_different_registers(rresult, rtmp); 5330 Label resolved; 5331 5332 // A null weak handle resolves to null. 5333 cmpptr(rresult, 0); 5334 jcc(Assembler::equal, resolved); 5335 5336 // Only 64 bit platforms support GCs that require a tmp register 5337 // Only IN_HEAP loads require a thread_tmp register 5338 // WeakHandle::resolve is an indirection like jweak. 5339 access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF, 5340 rresult, Address(rresult, 0), rtmp, /*tmp_thread*/noreg); 5341 bind(resolved); 5342 } 5343 5344 void MacroAssembler::load_mirror(Register mirror, Register method, Register tmp) { 5345 // get mirror 5346 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 5347 load_method_holder(mirror, method); 5348 movptr(mirror, Address(mirror, mirror_offset)); 5349 resolve_oop_handle(mirror, tmp); 5350 } 5351 5352 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) { 5353 load_method_holder(rresult, rmethod); 5354 movptr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset())); 5355 } 5356 5357 void MacroAssembler::load_method_holder(Register holder, Register method) { 5358 movptr(holder, Address(method, Method::const_offset())); // ConstMethod* 5359 movptr(holder, Address(holder, ConstMethod::constants_offset())); // ConstantPool* 5360 movptr(holder, Address(holder, ConstantPool::pool_holder_offset_in_bytes())); // InstanceKlass* 5361 } 5362 5363 void MacroAssembler::load_klass(Register dst, Register src) { 5364 #ifdef _LP64 5365 if (UseCompressedClassPointers) { 5366 movl(dst, Address(src, oopDesc::klass_offset_in_bytes())); 5367 decode_klass_not_null(dst); 5368 } else 5369 #endif 5370 movptr(dst, Address(src, oopDesc::klass_offset_in_bytes())); 5371 } 5372 5373 void MacroAssembler::load_prototype_header(Register dst, Register src) { 5374 load_klass(dst, src); 5375 movptr(dst, Address(dst, Klass::prototype_header_offset())); 5376 } 5377 5378 void MacroAssembler::store_klass(Register dst, Register src) { 5379 #ifdef _LP64 5380 if (UseCompressedClassPointers) { 5381 encode_klass_not_null(src); 5382 movl(Address(dst, oopDesc::klass_offset_in_bytes()), src); 5383 } else 5384 #endif 5385 movptr(Address(dst, oopDesc::klass_offset_in_bytes()), src); 5386 } 5387 5388 void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src, 5389 Register tmp1, Register thread_tmp) { 5390 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 5391 decorators = AccessInternal::decorator_fixup(decorators); 5392 bool as_raw = (decorators & AS_RAW) != 0; 5393 if (as_raw) { 5394 bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1, thread_tmp); 5395 } else { 5396 bs->load_at(this, decorators, type, dst, src, tmp1, thread_tmp); 5397 } 5398 } 5399 5400 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register src, 5401 Register tmp1, Register tmp2) { 5402 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 5403 decorators = AccessInternal::decorator_fixup(decorators); 5404 bool as_raw = (decorators & AS_RAW) != 0; 5405 if (as_raw) { 5406 bs->BarrierSetAssembler::store_at(this, decorators, type, dst, src, tmp1, tmp2); 5407 } else { 5408 bs->store_at(this, decorators, type, dst, src, tmp1, tmp2); 5409 } 5410 } 5411 5412 void MacroAssembler::resolve(DecoratorSet decorators, Register obj) { 5413 // Use stronger ACCESS_WRITE|ACCESS_READ by default. 5414 if ((decorators & (ACCESS_READ | ACCESS_WRITE)) == 0) { 5415 decorators |= ACCESS_READ | ACCESS_WRITE; 5416 } 5417 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 5418 return bs->resolve(this, decorators, obj); 5419 } 5420 5421 void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1, 5422 Register thread_tmp, DecoratorSet decorators) { 5423 access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, thread_tmp); 5424 } 5425 5426 // Doesn't do verfication, generates fixed size code 5427 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1, 5428 Register thread_tmp, DecoratorSet decorators) { 5429 access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL | decorators, dst, src, tmp1, thread_tmp); 5430 } 5431 5432 void MacroAssembler::store_heap_oop(Address dst, Register src, Register tmp1, 5433 Register tmp2, DecoratorSet decorators) { 5434 access_store_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, tmp2); 5435 } 5436 5437 // Used for storing NULLs. 5438 void MacroAssembler::store_heap_oop_null(Address dst) { 5439 access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg); 5440 } 5441 5442 #ifdef _LP64 5443 void MacroAssembler::store_klass_gap(Register dst, Register src) { 5444 if (UseCompressedClassPointers) { 5445 // Store to klass gap in destination 5446 movl(Address(dst, oopDesc::klass_gap_offset_in_bytes()), src); 5447 } 5448 } 5449 5450 #ifdef ASSERT 5451 void MacroAssembler::verify_heapbase(const char* msg) { 5452 assert (UseCompressedOops, "should be compressed"); 5453 assert (Universe::heap() != NULL, "java heap should be initialized"); 5454 if (CheckCompressedOops) { 5455 Label ok; 5456 push(rscratch1); // cmpptr trashes rscratch1 5457 cmpptr(r12_heapbase, ExternalAddress((address)CompressedOops::ptrs_base_addr())); 5458 jcc(Assembler::equal, ok); 5459 STOP(msg); 5460 bind(ok); 5461 pop(rscratch1); 5462 } 5463 } 5464 #endif 5465 5466 // Algorithm must match oop.inline.hpp encode_heap_oop. 5467 void MacroAssembler::encode_heap_oop(Register r) { 5468 #ifdef ASSERT 5469 verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?"); 5470 #endif 5471 verify_oop(r, "broken oop in encode_heap_oop"); 5472 if (CompressedOops::base() == NULL) { 5473 if (CompressedOops::shift() != 0) { 5474 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5475 shrq(r, LogMinObjAlignmentInBytes); 5476 } 5477 return; 5478 } 5479 testq(r, r); 5480 cmovq(Assembler::equal, r, r12_heapbase); 5481 subq(r, r12_heapbase); 5482 shrq(r, LogMinObjAlignmentInBytes); 5483 } 5484 5485 void MacroAssembler::encode_heap_oop_not_null(Register r) { 5486 #ifdef ASSERT 5487 verify_heapbase("MacroAssembler::encode_heap_oop_not_null: heap base corrupted?"); 5488 if (CheckCompressedOops) { 5489 Label ok; 5490 testq(r, r); 5491 jcc(Assembler::notEqual, ok); 5492 STOP("null oop passed to encode_heap_oop_not_null"); 5493 bind(ok); 5494 } 5495 #endif 5496 verify_oop(r, "broken oop in encode_heap_oop_not_null"); 5497 if (CompressedOops::base() != NULL) { 5498 subq(r, r12_heapbase); 5499 } 5500 if (CompressedOops::shift() != 0) { 5501 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5502 shrq(r, LogMinObjAlignmentInBytes); 5503 } 5504 } 5505 5506 void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) { 5507 #ifdef ASSERT 5508 verify_heapbase("MacroAssembler::encode_heap_oop_not_null2: heap base corrupted?"); 5509 if (CheckCompressedOops) { 5510 Label ok; 5511 testq(src, src); 5512 jcc(Assembler::notEqual, ok); 5513 STOP("null oop passed to encode_heap_oop_not_null2"); 5514 bind(ok); 5515 } 5516 #endif 5517 verify_oop(src, "broken oop in encode_heap_oop_not_null2"); 5518 if (dst != src) { 5519 movq(dst, src); 5520 } 5521 if (CompressedOops::base() != NULL) { 5522 subq(dst, r12_heapbase); 5523 } 5524 if (CompressedOops::shift() != 0) { 5525 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5526 shrq(dst, LogMinObjAlignmentInBytes); 5527 } 5528 } 5529 5530 void MacroAssembler::decode_heap_oop(Register r) { 5531 #ifdef ASSERT 5532 verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?"); 5533 #endif 5534 if (CompressedOops::base() == NULL) { 5535 if (CompressedOops::shift() != 0) { 5536 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5537 shlq(r, LogMinObjAlignmentInBytes); 5538 } 5539 } else { 5540 Label done; 5541 shlq(r, LogMinObjAlignmentInBytes); 5542 jccb(Assembler::equal, done); 5543 addq(r, r12_heapbase); 5544 bind(done); 5545 } 5546 verify_oop(r, "broken oop in decode_heap_oop"); 5547 } 5548 5549 void MacroAssembler::decode_heap_oop_not_null(Register r) { 5550 // Note: it will change flags 5551 assert (UseCompressedOops, "should only be used for compressed headers"); 5552 assert (Universe::heap() != NULL, "java heap should be initialized"); 5553 // Cannot assert, unverified entry point counts instructions (see .ad file) 5554 // vtableStubs also counts instructions in pd_code_size_limit. 5555 // Also do not verify_oop as this is called by verify_oop. 5556 if (CompressedOops::shift() != 0) { 5557 assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5558 shlq(r, LogMinObjAlignmentInBytes); 5559 if (CompressedOops::base() != NULL) { 5560 addq(r, r12_heapbase); 5561 } 5562 } else { 5563 assert (CompressedOops::base() == NULL, "sanity"); 5564 } 5565 } 5566 5567 void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) { 5568 // Note: it will change flags 5569 assert (UseCompressedOops, "should only be used for compressed headers"); 5570 assert (Universe::heap() != NULL, "java heap should be initialized"); 5571 // Cannot assert, unverified entry point counts instructions (see .ad file) 5572 // vtableStubs also counts instructions in pd_code_size_limit. 5573 // Also do not verify_oop as this is called by verify_oop. 5574 if (CompressedOops::shift() != 0) { 5575 assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5576 if (LogMinObjAlignmentInBytes == Address::times_8) { 5577 leaq(dst, Address(r12_heapbase, src, Address::times_8, 0)); 5578 } else { 5579 if (dst != src) { 5580 movq(dst, src); 5581 } 5582 shlq(dst, LogMinObjAlignmentInBytes); 5583 if (CompressedOops::base() != NULL) { 5584 addq(dst, r12_heapbase); 5585 } 5586 } 5587 } else { 5588 assert (CompressedOops::base() == NULL, "sanity"); 5589 if (dst != src) { 5590 movq(dst, src); 5591 } 5592 } 5593 } 5594 5595 void MacroAssembler::encode_klass_not_null(Register r) { 5596 if (CompressedKlassPointers::base() != NULL) { 5597 // Use r12 as a scratch register in which to temporarily load the narrow_klass_base. 5598 assert(r != r12_heapbase, "Encoding a klass in r12"); 5599 mov64(r12_heapbase, (int64_t)CompressedKlassPointers::base()); 5600 subq(r, r12_heapbase); 5601 } 5602 if (CompressedKlassPointers::shift() != 0) { 5603 assert (LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong"); 5604 shrq(r, LogKlassAlignmentInBytes); 5605 } 5606 if (CompressedKlassPointers::base() != NULL) { 5607 reinit_heapbase(); 5608 } 5609 } 5610 5611 void MacroAssembler::encode_klass_not_null(Register dst, Register src) { 5612 if (dst == src) { 5613 encode_klass_not_null(src); 5614 } else { 5615 if (CompressedKlassPointers::base() != NULL) { 5616 mov64(dst, (int64_t)CompressedKlassPointers::base()); 5617 negq(dst); 5618 addq(dst, src); 5619 } else { 5620 movptr(dst, src); 5621 } 5622 if (CompressedKlassPointers::shift() != 0) { 5623 assert (LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong"); 5624 shrq(dst, LogKlassAlignmentInBytes); 5625 } 5626 } 5627 } 5628 5629 // Function instr_size_for_decode_klass_not_null() counts the instructions 5630 // generated by decode_klass_not_null(register r) and reinit_heapbase(), 5631 // when (Universe::heap() != NULL). Hence, if the instructions they 5632 // generate change, then this method needs to be updated. 5633 int MacroAssembler::instr_size_for_decode_klass_not_null() { 5634 assert (UseCompressedClassPointers, "only for compressed klass ptrs"); 5635 if (CompressedKlassPointers::base() != NULL) { 5636 // mov64 + addq + shlq? + mov64 (for reinit_heapbase()). 5637 return (CompressedKlassPointers::shift() == 0 ? 20 : 24); 5638 } else { 5639 // longest load decode klass function, mov64, leaq 5640 return 16; 5641 } 5642 } 5643 5644 // !!! If the instructions that get generated here change then function 5645 // instr_size_for_decode_klass_not_null() needs to get updated. 5646 void MacroAssembler::decode_klass_not_null(Register r) { 5647 // Note: it will change flags 5648 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 5649 assert(r != r12_heapbase, "Decoding a klass in r12"); 5650 // Cannot assert, unverified entry point counts instructions (see .ad file) 5651 // vtableStubs also counts instructions in pd_code_size_limit. 5652 // Also do not verify_oop as this is called by verify_oop. 5653 if (CompressedKlassPointers::shift() != 0) { 5654 assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong"); 5655 shlq(r, LogKlassAlignmentInBytes); 5656 } 5657 // Use r12 as a scratch register in which to temporarily load the narrow_klass_base. 5658 if (CompressedKlassPointers::base() != NULL) { 5659 mov64(r12_heapbase, (int64_t)CompressedKlassPointers::base()); 5660 addq(r, r12_heapbase); 5661 reinit_heapbase(); 5662 } 5663 } 5664 5665 void MacroAssembler::decode_klass_not_null(Register dst, Register src) { 5666 // Note: it will change flags 5667 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 5668 if (dst == src) { 5669 decode_klass_not_null(dst); 5670 } else { 5671 // Cannot assert, unverified entry point counts instructions (see .ad file) 5672 // vtableStubs also counts instructions in pd_code_size_limit. 5673 // Also do not verify_oop as this is called by verify_oop. 5674 mov64(dst, (int64_t)CompressedKlassPointers::base()); 5675 if (CompressedKlassPointers::shift() != 0) { 5676 assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong"); 5677 assert(LogKlassAlignmentInBytes == Address::times_8, "klass not aligned on 64bits?"); 5678 leaq(dst, Address(dst, src, Address::times_8, 0)); 5679 } else { 5680 addq(dst, src); 5681 } 5682 } 5683 } 5684 5685 void MacroAssembler::set_narrow_oop(Register dst, jobject obj) { 5686 assert (UseCompressedOops, "should only be used for compressed headers"); 5687 assert (Universe::heap() != NULL, "java heap should be initialized"); 5688 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder"); 5689 int oop_index = oop_recorder()->find_index(obj); 5690 RelocationHolder rspec = oop_Relocation::spec(oop_index); 5691 mov_narrow_oop(dst, oop_index, rspec); 5692 } 5693 5694 void MacroAssembler::set_narrow_oop(Address dst, jobject obj) { 5695 assert (UseCompressedOops, "should only be used for compressed headers"); 5696 assert (Universe::heap() != NULL, "java heap should be initialized"); 5697 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder"); 5698 int oop_index = oop_recorder()->find_index(obj); 5699 RelocationHolder rspec = oop_Relocation::spec(oop_index); 5700 mov_narrow_oop(dst, oop_index, rspec); 5701 } 5702 5703 void MacroAssembler::set_narrow_klass(Register dst, Klass* k) { 5704 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 5705 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder"); 5706 int klass_index = oop_recorder()->find_index(k); 5707 RelocationHolder rspec = metadata_Relocation::spec(klass_index); 5708 mov_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec); 5709 } 5710 5711 void MacroAssembler::set_narrow_klass(Address dst, Klass* k) { 5712 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 5713 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder"); 5714 int klass_index = oop_recorder()->find_index(k); 5715 RelocationHolder rspec = metadata_Relocation::spec(klass_index); 5716 mov_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec); 5717 } 5718 5719 void MacroAssembler::cmp_narrow_oop(Register dst, jobject obj) { 5720 assert (UseCompressedOops, "should only be used for compressed headers"); 5721 assert (Universe::heap() != NULL, "java heap should be initialized"); 5722 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder"); 5723 int oop_index = oop_recorder()->find_index(obj); 5724 RelocationHolder rspec = oop_Relocation::spec(oop_index); 5725 Assembler::cmp_narrow_oop(dst, oop_index, rspec); 5726 } 5727 5728 void MacroAssembler::cmp_narrow_oop(Address dst, jobject obj) { 5729 assert (UseCompressedOops, "should only be used for compressed headers"); 5730 assert (Universe::heap() != NULL, "java heap should be initialized"); 5731 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder"); 5732 int oop_index = oop_recorder()->find_index(obj); 5733 RelocationHolder rspec = oop_Relocation::spec(oop_index); 5734 Assembler::cmp_narrow_oop(dst, oop_index, rspec); 5735 } 5736 5737 void MacroAssembler::cmp_narrow_klass(Register dst, Klass* k) { 5738 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 5739 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder"); 5740 int klass_index = oop_recorder()->find_index(k); 5741 RelocationHolder rspec = metadata_Relocation::spec(klass_index); 5742 Assembler::cmp_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec); 5743 } 5744 5745 void MacroAssembler::cmp_narrow_klass(Address dst, Klass* k) { 5746 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 5747 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder"); 5748 int klass_index = oop_recorder()->find_index(k); 5749 RelocationHolder rspec = metadata_Relocation::spec(klass_index); 5750 Assembler::cmp_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec); 5751 } 5752 5753 void MacroAssembler::reinit_heapbase() { 5754 if (UseCompressedOops || UseCompressedClassPointers) { 5755 if (Universe::heap() != NULL) { 5756 if (CompressedOops::base() == NULL) { 5757 MacroAssembler::xorptr(r12_heapbase, r12_heapbase); 5758 } else { 5759 mov64(r12_heapbase, (int64_t)CompressedOops::ptrs_base()); 5760 } 5761 } else { 5762 movptr(r12_heapbase, ExternalAddress((address)CompressedOops::ptrs_base_addr())); 5763 } 5764 } 5765 } 5766 5767 #endif // _LP64 5768 5769 // C2 compiled method's prolog code. 5770 void MacroAssembler::verified_entry(int framesize, int stack_bang_size, bool fp_mode_24b, bool is_stub) { 5771 5772 // WARNING: Initial instruction MUST be 5 bytes or longer so that 5773 // NativeJump::patch_verified_entry will be able to patch out the entry 5774 // code safely. The push to verify stack depth is ok at 5 bytes, 5775 // the frame allocation can be either 3 or 6 bytes. So if we don't do 5776 // stack bang then we must use the 6 byte frame allocation even if 5777 // we have no frame. :-( 5778 assert(stack_bang_size >= framesize || stack_bang_size <= 0, "stack bang size incorrect"); 5779 5780 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned"); 5781 // Remove word for return addr 5782 framesize -= wordSize; 5783 stack_bang_size -= wordSize; 5784 5785 // Calls to C2R adapters often do not accept exceptional returns. 5786 // We require that their callers must bang for them. But be careful, because 5787 // some VM calls (such as call site linkage) can use several kilobytes of 5788 // stack. But the stack safety zone should account for that. 5789 // See bugs 4446381, 4468289, 4497237. 5790 if (stack_bang_size > 0) { 5791 generate_stack_overflow_check(stack_bang_size); 5792 5793 // We always push rbp, so that on return to interpreter rbp, will be 5794 // restored correctly and we can correct the stack. 5795 push(rbp); 5796 // Save caller's stack pointer into RBP if the frame pointer is preserved. 5797 if (PreserveFramePointer) { 5798 mov(rbp, rsp); 5799 } 5800 // Remove word for ebp 5801 framesize -= wordSize; 5802 5803 // Create frame 5804 if (framesize) { 5805 subptr(rsp, framesize); 5806 } 5807 } else { 5808 // Create frame (force generation of a 4 byte immediate value) 5809 subptr_imm32(rsp, framesize); 5810 5811 // Save RBP register now. 5812 framesize -= wordSize; 5813 movptr(Address(rsp, framesize), rbp); 5814 // Save caller's stack pointer into RBP if the frame pointer is preserved. 5815 if (PreserveFramePointer) { 5816 movptr(rbp, rsp); 5817 if (framesize > 0) { 5818 addptr(rbp, framesize); 5819 } 5820 } 5821 } 5822 5823 if (VerifyStackAtCalls) { // Majik cookie to verify stack depth 5824 framesize -= wordSize; 5825 movptr(Address(rsp, framesize), (int32_t)0xbadb100d); 5826 } 5827 5828 #ifndef _LP64 5829 // If method sets FPU control word do it now 5830 if (fp_mode_24b) { 5831 fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_24())); 5832 } 5833 if (UseSSE >= 2 && VerifyFPU) { 5834 verify_FPU(0, "FPU stack must be clean on entry"); 5835 } 5836 #endif 5837 5838 #ifdef ASSERT 5839 if (VerifyStackAtCalls) { 5840 Label L; 5841 push(rax); 5842 mov(rax, rsp); 5843 andptr(rax, StackAlignmentInBytes-1); 5844 cmpptr(rax, StackAlignmentInBytes-wordSize); 5845 pop(rax); 5846 jcc(Assembler::equal, L); 5847 STOP("Stack is not properly aligned!"); 5848 bind(L); 5849 } 5850 #endif 5851 5852 if (!is_stub) { 5853 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 5854 bs->nmethod_entry_barrier(this); 5855 } 5856 } 5857 5858 // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM registers 5859 void MacroAssembler::xmm_clear_mem(Register base, Register cnt, XMMRegister xtmp) { 5860 // cnt - number of qwords (8-byte words). 5861 // base - start address, qword aligned. 5862 Label L_zero_64_bytes, L_loop, L_sloop, L_tail, L_end; 5863 if (UseAVX >= 2) { 5864 vpxor(xtmp, xtmp, xtmp, AVX_256bit); 5865 } else { 5866 pxor(xtmp, xtmp); 5867 } 5868 jmp(L_zero_64_bytes); 5869 5870 BIND(L_loop); 5871 if (UseAVX >= 2) { 5872 vmovdqu(Address(base, 0), xtmp); 5873 vmovdqu(Address(base, 32), xtmp); 5874 } else { 5875 movdqu(Address(base, 0), xtmp); 5876 movdqu(Address(base, 16), xtmp); 5877 movdqu(Address(base, 32), xtmp); 5878 movdqu(Address(base, 48), xtmp); 5879 } 5880 addptr(base, 64); 5881 5882 BIND(L_zero_64_bytes); 5883 subptr(cnt, 8); 5884 jccb(Assembler::greaterEqual, L_loop); 5885 addptr(cnt, 4); 5886 jccb(Assembler::less, L_tail); 5887 // Copy trailing 32 bytes 5888 if (UseAVX >= 2) { 5889 vmovdqu(Address(base, 0), xtmp); 5890 } else { 5891 movdqu(Address(base, 0), xtmp); 5892 movdqu(Address(base, 16), xtmp); 5893 } 5894 addptr(base, 32); 5895 subptr(cnt, 4); 5896 5897 BIND(L_tail); 5898 addptr(cnt, 4); 5899 jccb(Assembler::lessEqual, L_end); 5900 decrement(cnt); 5901 5902 BIND(L_sloop); 5903 movq(Address(base, 0), xtmp); 5904 addptr(base, 8); 5905 decrement(cnt); 5906 jccb(Assembler::greaterEqual, L_sloop); 5907 BIND(L_end); 5908 } 5909 5910 void MacroAssembler::clear_mem(Register base, Register cnt, Register tmp, XMMRegister xtmp, bool is_large) { 5911 // cnt - number of qwords (8-byte words). 5912 // base - start address, qword aligned. 5913 // is_large - if optimizers know cnt is larger than InitArrayShortSize 5914 assert(base==rdi, "base register must be edi for rep stos"); 5915 assert(tmp==rax, "tmp register must be eax for rep stos"); 5916 assert(cnt==rcx, "cnt register must be ecx for rep stos"); 5917 assert(InitArrayShortSize % BytesPerLong == 0, 5918 "InitArrayShortSize should be the multiple of BytesPerLong"); 5919 5920 Label DONE; 5921 5922 if (!is_large || !UseXMMForObjInit) { 5923 xorptr(tmp, tmp); 5924 } 5925 5926 if (!is_large) { 5927 Label LOOP, LONG; 5928 cmpptr(cnt, InitArrayShortSize/BytesPerLong); 5929 jccb(Assembler::greater, LONG); 5930 5931 NOT_LP64(shlptr(cnt, 1);) // convert to number of 32-bit words for 32-bit VM 5932 5933 decrement(cnt); 5934 jccb(Assembler::negative, DONE); // Zero length 5935 5936 // Use individual pointer-sized stores for small counts: 5937 BIND(LOOP); 5938 movptr(Address(base, cnt, Address::times_ptr), tmp); 5939 decrement(cnt); 5940 jccb(Assembler::greaterEqual, LOOP); 5941 jmpb(DONE); 5942 5943 BIND(LONG); 5944 } 5945 5946 // Use longer rep-prefixed ops for non-small counts: 5947 if (UseFastStosb) { 5948 shlptr(cnt, 3); // convert to number of bytes 5949 rep_stosb(); 5950 } else if (UseXMMForObjInit) { 5951 movptr(tmp, base); 5952 xmm_clear_mem(tmp, cnt, xtmp); 5953 } else { 5954 NOT_LP64(shlptr(cnt, 1);) // convert to number of 32-bit words for 32-bit VM 5955 rep_stos(); 5956 } 5957 5958 BIND(DONE); 5959 } 5960 5961 #ifdef COMPILER2 5962 5963 // IndexOf for constant substrings with size >= 8 chars 5964 // which don't need to be loaded through stack. 5965 void MacroAssembler::string_indexofC8(Register str1, Register str2, 5966 Register cnt1, Register cnt2, 5967 int int_cnt2, Register result, 5968 XMMRegister vec, Register tmp, 5969 int ae) { 5970 ShortBranchVerifier sbv(this); 5971 assert(UseSSE42Intrinsics, "SSE4.2 intrinsics are required"); 5972 assert(ae != StrIntrinsicNode::LU, "Invalid encoding"); 5973 5974 // This method uses the pcmpestri instruction with bound registers 5975 // inputs: 5976 // xmm - substring 5977 // rax - substring length (elements count) 5978 // mem - scanned string 5979 // rdx - string length (elements count) 5980 // 0xd - mode: 1100 (substring search) + 01 (unsigned shorts) 5981 // 0xc - mode: 1100 (substring search) + 00 (unsigned bytes) 5982 // outputs: 5983 // rcx - matched index in string 5984 assert(cnt1 == rdx && cnt2 == rax && tmp == rcx, "pcmpestri"); 5985 int mode = (ae == StrIntrinsicNode::LL) ? 0x0c : 0x0d; // bytes or shorts 5986 int stride = (ae == StrIntrinsicNode::LL) ? 16 : 8; //UU, UL -> 8 5987 Address::ScaleFactor scale1 = (ae == StrIntrinsicNode::LL) ? Address::times_1 : Address::times_2; 5988 Address::ScaleFactor scale2 = (ae == StrIntrinsicNode::UL) ? Address::times_1 : scale1; 5989 5990 Label RELOAD_SUBSTR, SCAN_TO_SUBSTR, SCAN_SUBSTR, 5991 RET_FOUND, RET_NOT_FOUND, EXIT, FOUND_SUBSTR, 5992 MATCH_SUBSTR_HEAD, RELOAD_STR, FOUND_CANDIDATE; 5993 5994 // Note, inline_string_indexOf() generates checks: 5995 // if (substr.count > string.count) return -1; 5996 // if (substr.count == 0) return 0; 5997 assert(int_cnt2 >= stride, "this code is used only for cnt2 >= 8 chars"); 5998 5999 // Load substring. 6000 if (ae == StrIntrinsicNode::UL) { 6001 pmovzxbw(vec, Address(str2, 0)); 6002 } else { 6003 movdqu(vec, Address(str2, 0)); 6004 } 6005 movl(cnt2, int_cnt2); 6006 movptr(result, str1); // string addr 6007 6008 if (int_cnt2 > stride) { 6009 jmpb(SCAN_TO_SUBSTR); 6010 6011 // Reload substr for rescan, this code 6012 // is executed only for large substrings (> 8 chars) 6013 bind(RELOAD_SUBSTR); 6014 if (ae == StrIntrinsicNode::UL) { 6015 pmovzxbw(vec, Address(str2, 0)); 6016 } else { 6017 movdqu(vec, Address(str2, 0)); 6018 } 6019 negptr(cnt2); // Jumped here with negative cnt2, convert to positive 6020 6021 bind(RELOAD_STR); 6022 // We came here after the beginning of the substring was 6023 // matched but the rest of it was not so we need to search 6024 // again. Start from the next element after the previous match. 6025 6026 // cnt2 is number of substring reminding elements and 6027 // cnt1 is number of string reminding elements when cmp failed. 6028 // Restored cnt1 = cnt1 - cnt2 + int_cnt2 6029 subl(cnt1, cnt2); 6030 addl(cnt1, int_cnt2); 6031 movl(cnt2, int_cnt2); // Now restore cnt2 6032 6033 decrementl(cnt1); // Shift to next element 6034 cmpl(cnt1, cnt2); 6035 jcc(Assembler::negative, RET_NOT_FOUND); // Left less then substring 6036 6037 addptr(result, (1<<scale1)); 6038 6039 } // (int_cnt2 > 8) 6040 6041 // Scan string for start of substr in 16-byte vectors 6042 bind(SCAN_TO_SUBSTR); 6043 pcmpestri(vec, Address(result, 0), mode); 6044 jccb(Assembler::below, FOUND_CANDIDATE); // CF == 1 6045 subl(cnt1, stride); 6046 jccb(Assembler::lessEqual, RET_NOT_FOUND); // Scanned full string 6047 cmpl(cnt1, cnt2); 6048 jccb(Assembler::negative, RET_NOT_FOUND); // Left less then substring 6049 addptr(result, 16); 6050 jmpb(SCAN_TO_SUBSTR); 6051 6052 // Found a potential substr 6053 bind(FOUND_CANDIDATE); 6054 // Matched whole vector if first element matched (tmp(rcx) == 0). 6055 if (int_cnt2 == stride) { 6056 jccb(Assembler::overflow, RET_FOUND); // OF == 1 6057 } else { // int_cnt2 > 8 6058 jccb(Assembler::overflow, FOUND_SUBSTR); 6059 } 6060 // After pcmpestri tmp(rcx) contains matched element index 6061 // Compute start addr of substr 6062 lea(result, Address(result, tmp, scale1)); 6063 6064 // Make sure string is still long enough 6065 subl(cnt1, tmp); 6066 cmpl(cnt1, cnt2); 6067 if (int_cnt2 == stride) { 6068 jccb(Assembler::greaterEqual, SCAN_TO_SUBSTR); 6069 } else { // int_cnt2 > 8 6070 jccb(Assembler::greaterEqual, MATCH_SUBSTR_HEAD); 6071 } 6072 // Left less then substring. 6073 6074 bind(RET_NOT_FOUND); 6075 movl(result, -1); 6076 jmp(EXIT); 6077 6078 if (int_cnt2 > stride) { 6079 // This code is optimized for the case when whole substring 6080 // is matched if its head is matched. 6081 bind(MATCH_SUBSTR_HEAD); 6082 pcmpestri(vec, Address(result, 0), mode); 6083 // Reload only string if does not match 6084 jcc(Assembler::noOverflow, RELOAD_STR); // OF == 0 6085 6086 Label CONT_SCAN_SUBSTR; 6087 // Compare the rest of substring (> 8 chars). 6088 bind(FOUND_SUBSTR); 6089 // First 8 chars are already matched. 6090 negptr(cnt2); 6091 addptr(cnt2, stride); 6092 6093 bind(SCAN_SUBSTR); 6094 subl(cnt1, stride); 6095 cmpl(cnt2, -stride); // Do not read beyond substring 6096 jccb(Assembler::lessEqual, CONT_SCAN_SUBSTR); 6097 // Back-up strings to avoid reading beyond substring: 6098 // cnt1 = cnt1 - cnt2 + 8 6099 addl(cnt1, cnt2); // cnt2 is negative 6100 addl(cnt1, stride); 6101 movl(cnt2, stride); negptr(cnt2); 6102 bind(CONT_SCAN_SUBSTR); 6103 if (int_cnt2 < (int)G) { 6104 int tail_off1 = int_cnt2<<scale1; 6105 int tail_off2 = int_cnt2<<scale2; 6106 if (ae == StrIntrinsicNode::UL) { 6107 pmovzxbw(vec, Address(str2, cnt2, scale2, tail_off2)); 6108 } else { 6109 movdqu(vec, Address(str2, cnt2, scale2, tail_off2)); 6110 } 6111 pcmpestri(vec, Address(result, cnt2, scale1, tail_off1), mode); 6112 } else { 6113 // calculate index in register to avoid integer overflow (int_cnt2*2) 6114 movl(tmp, int_cnt2); 6115 addptr(tmp, cnt2); 6116 if (ae == StrIntrinsicNode::UL) { 6117 pmovzxbw(vec, Address(str2, tmp, scale2, 0)); 6118 } else { 6119 movdqu(vec, Address(str2, tmp, scale2, 0)); 6120 } 6121 pcmpestri(vec, Address(result, tmp, scale1, 0), mode); 6122 } 6123 // Need to reload strings pointers if not matched whole vector 6124 jcc(Assembler::noOverflow, RELOAD_SUBSTR); // OF == 0 6125 addptr(cnt2, stride); 6126 jcc(Assembler::negative, SCAN_SUBSTR); 6127 // Fall through if found full substring 6128 6129 } // (int_cnt2 > 8) 6130 6131 bind(RET_FOUND); 6132 // Found result if we matched full small substring. 6133 // Compute substr offset 6134 subptr(result, str1); 6135 if (ae == StrIntrinsicNode::UU || ae == StrIntrinsicNode::UL) { 6136 shrl(result, 1); // index 6137 } 6138 bind(EXIT); 6139 6140 } // string_indexofC8 6141 6142 // Small strings are loaded through stack if they cross page boundary. 6143 void MacroAssembler::string_indexof(Register str1, Register str2, 6144 Register cnt1, Register cnt2, 6145 int int_cnt2, Register result, 6146 XMMRegister vec, Register tmp, 6147 int ae) { 6148 ShortBranchVerifier sbv(this); 6149 assert(UseSSE42Intrinsics, "SSE4.2 intrinsics are required"); 6150 assert(ae != StrIntrinsicNode::LU, "Invalid encoding"); 6151 6152 // 6153 // int_cnt2 is length of small (< 8 chars) constant substring 6154 // or (-1) for non constant substring in which case its length 6155 // is in cnt2 register. 6156 // 6157 // Note, inline_string_indexOf() generates checks: 6158 // if (substr.count > string.count) return -1; 6159 // if (substr.count == 0) return 0; 6160 // 6161 int stride = (ae == StrIntrinsicNode::LL) ? 16 : 8; //UU, UL -> 8 6162 assert(int_cnt2 == -1 || (0 < int_cnt2 && int_cnt2 < stride), "should be != 0"); 6163 // This method uses the pcmpestri instruction with bound registers 6164 // inputs: 6165 // xmm - substring 6166 // rax - substring length (elements count) 6167 // mem - scanned string 6168 // rdx - string length (elements count) 6169 // 0xd - mode: 1100 (substring search) + 01 (unsigned shorts) 6170 // 0xc - mode: 1100 (substring search) + 00 (unsigned bytes) 6171 // outputs: 6172 // rcx - matched index in string 6173 assert(cnt1 == rdx && cnt2 == rax && tmp == rcx, "pcmpestri"); 6174 int mode = (ae == StrIntrinsicNode::LL) ? 0x0c : 0x0d; // bytes or shorts 6175 Address::ScaleFactor scale1 = (ae == StrIntrinsicNode::LL) ? Address::times_1 : Address::times_2; 6176 Address::ScaleFactor scale2 = (ae == StrIntrinsicNode::UL) ? Address::times_1 : scale1; 6177 6178 Label RELOAD_SUBSTR, SCAN_TO_SUBSTR, SCAN_SUBSTR, ADJUST_STR, 6179 RET_FOUND, RET_NOT_FOUND, CLEANUP, FOUND_SUBSTR, 6180 FOUND_CANDIDATE; 6181 6182 { //======================================================== 6183 // We don't know where these strings are located 6184 // and we can't read beyond them. Load them through stack. 6185 Label BIG_STRINGS, CHECK_STR, COPY_SUBSTR, COPY_STR; 6186 6187 movptr(tmp, rsp); // save old SP 6188 6189 if (int_cnt2 > 0) { // small (< 8 chars) constant substring 6190 if (int_cnt2 == (1>>scale2)) { // One byte 6191 assert((ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::UL), "Only possible for latin1 encoding"); 6192 load_unsigned_byte(result, Address(str2, 0)); 6193 movdl(vec, result); // move 32 bits 6194 } else if (ae == StrIntrinsicNode::LL && int_cnt2 == 3) { // Three bytes 6195 // Not enough header space in 32-bit VM: 12+3 = 15. 6196 movl(result, Address(str2, -1)); 6197 shrl(result, 8); 6198 movdl(vec, result); // move 32 bits 6199 } else if (ae != StrIntrinsicNode::UL && int_cnt2 == (2>>scale2)) { // One char 6200 load_unsigned_short(result, Address(str2, 0)); 6201 movdl(vec, result); // move 32 bits 6202 } else if (ae != StrIntrinsicNode::UL && int_cnt2 == (4>>scale2)) { // Two chars 6203 movdl(vec, Address(str2, 0)); // move 32 bits 6204 } else if (ae != StrIntrinsicNode::UL && int_cnt2 == (8>>scale2)) { // Four chars 6205 movq(vec, Address(str2, 0)); // move 64 bits 6206 } else { // cnt2 = { 3, 5, 6, 7 } || (ae == StrIntrinsicNode::UL && cnt2 ={2, ..., 7}) 6207 // Array header size is 12 bytes in 32-bit VM 6208 // + 6 bytes for 3 chars == 18 bytes, 6209 // enough space to load vec and shift. 6210 assert(HeapWordSize*TypeArrayKlass::header_size() >= 12,"sanity"); 6211 if (ae == StrIntrinsicNode::UL) { 6212 int tail_off = int_cnt2-8; 6213 pmovzxbw(vec, Address(str2, tail_off)); 6214 psrldq(vec, -2*tail_off); 6215 } 6216 else { 6217 int tail_off = int_cnt2*(1<<scale2); 6218 movdqu(vec, Address(str2, tail_off-16)); 6219 psrldq(vec, 16-tail_off); 6220 } 6221 } 6222 } else { // not constant substring 6223 cmpl(cnt2, stride); 6224 jccb(Assembler::aboveEqual, BIG_STRINGS); // Both strings are big enough 6225 6226 // We can read beyond string if srt+16 does not cross page boundary 6227 // since heaps are aligned and mapped by pages. 6228 assert(os::vm_page_size() < (int)G, "default page should be small"); 6229 movl(result, str2); // We need only low 32 bits 6230 andl(result, (os::vm_page_size()-1)); 6231 cmpl(result, (os::vm_page_size()-16)); 6232 jccb(Assembler::belowEqual, CHECK_STR); 6233 6234 // Move small strings to stack to allow load 16 bytes into vec. 6235 subptr(rsp, 16); 6236 int stk_offset = wordSize-(1<<scale2); 6237 push(cnt2); 6238 6239 bind(COPY_SUBSTR); 6240 if (ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::UL) { 6241 load_unsigned_byte(result, Address(str2, cnt2, scale2, -1)); 6242 movb(Address(rsp, cnt2, scale2, stk_offset), result); 6243 } else if (ae == StrIntrinsicNode::UU) { 6244 load_unsigned_short(result, Address(str2, cnt2, scale2, -2)); 6245 movw(Address(rsp, cnt2, scale2, stk_offset), result); 6246 } 6247 decrement(cnt2); 6248 jccb(Assembler::notZero, COPY_SUBSTR); 6249 6250 pop(cnt2); 6251 movptr(str2, rsp); // New substring address 6252 } // non constant 6253 6254 bind(CHECK_STR); 6255 cmpl(cnt1, stride); 6256 jccb(Assembler::aboveEqual, BIG_STRINGS); 6257 6258 // Check cross page boundary. 6259 movl(result, str1); // We need only low 32 bits 6260 andl(result, (os::vm_page_size()-1)); 6261 cmpl(result, (os::vm_page_size()-16)); 6262 jccb(Assembler::belowEqual, BIG_STRINGS); 6263 6264 subptr(rsp, 16); 6265 int stk_offset = -(1<<scale1); 6266 if (int_cnt2 < 0) { // not constant 6267 push(cnt2); 6268 stk_offset += wordSize; 6269 } 6270 movl(cnt2, cnt1); 6271 6272 bind(COPY_STR); 6273 if (ae == StrIntrinsicNode::LL) { 6274 load_unsigned_byte(result, Address(str1, cnt2, scale1, -1)); 6275 movb(Address(rsp, cnt2, scale1, stk_offset), result); 6276 } else { 6277 load_unsigned_short(result, Address(str1, cnt2, scale1, -2)); 6278 movw(Address(rsp, cnt2, scale1, stk_offset), result); 6279 } 6280 decrement(cnt2); 6281 jccb(Assembler::notZero, COPY_STR); 6282 6283 if (int_cnt2 < 0) { // not constant 6284 pop(cnt2); 6285 } 6286 movptr(str1, rsp); // New string address 6287 6288 bind(BIG_STRINGS); 6289 // Load substring. 6290 if (int_cnt2 < 0) { // -1 6291 if (ae == StrIntrinsicNode::UL) { 6292 pmovzxbw(vec, Address(str2, 0)); 6293 } else { 6294 movdqu(vec, Address(str2, 0)); 6295 } 6296 push(cnt2); // substr count 6297 push(str2); // substr addr 6298 push(str1); // string addr 6299 } else { 6300 // Small (< 8 chars) constant substrings are loaded already. 6301 movl(cnt2, int_cnt2); 6302 } 6303 push(tmp); // original SP 6304 6305 } // Finished loading 6306 6307 //======================================================== 6308 // Start search 6309 // 6310 6311 movptr(result, str1); // string addr 6312 6313 if (int_cnt2 < 0) { // Only for non constant substring 6314 jmpb(SCAN_TO_SUBSTR); 6315 6316 // SP saved at sp+0 6317 // String saved at sp+1*wordSize 6318 // Substr saved at sp+2*wordSize 6319 // Substr count saved at sp+3*wordSize 6320 6321 // Reload substr for rescan, this code 6322 // is executed only for large substrings (> 8 chars) 6323 bind(RELOAD_SUBSTR); 6324 movptr(str2, Address(rsp, 2*wordSize)); 6325 movl(cnt2, Address(rsp, 3*wordSize)); 6326 if (ae == StrIntrinsicNode::UL) { 6327 pmovzxbw(vec, Address(str2, 0)); 6328 } else { 6329 movdqu(vec, Address(str2, 0)); 6330 } 6331 // We came here after the beginning of the substring was 6332 // matched but the rest of it was not so we need to search 6333 // again. Start from the next element after the previous match. 6334 subptr(str1, result); // Restore counter 6335 if (ae == StrIntrinsicNode::UU || ae == StrIntrinsicNode::UL) { 6336 shrl(str1, 1); 6337 } 6338 addl(cnt1, str1); 6339 decrementl(cnt1); // Shift to next element 6340 cmpl(cnt1, cnt2); 6341 jcc(Assembler::negative, RET_NOT_FOUND); // Left less then substring 6342 6343 addptr(result, (1<<scale1)); 6344 } // non constant 6345 6346 // Scan string for start of substr in 16-byte vectors 6347 bind(SCAN_TO_SUBSTR); 6348 assert(cnt1 == rdx && cnt2 == rax && tmp == rcx, "pcmpestri"); 6349 pcmpestri(vec, Address(result, 0), mode); 6350 jccb(Assembler::below, FOUND_CANDIDATE); // CF == 1 6351 subl(cnt1, stride); 6352 jccb(Assembler::lessEqual, RET_NOT_FOUND); // Scanned full string 6353 cmpl(cnt1, cnt2); 6354 jccb(Assembler::negative, RET_NOT_FOUND); // Left less then substring 6355 addptr(result, 16); 6356 6357 bind(ADJUST_STR); 6358 cmpl(cnt1, stride); // Do not read beyond string 6359 jccb(Assembler::greaterEqual, SCAN_TO_SUBSTR); 6360 // Back-up string to avoid reading beyond string. 6361 lea(result, Address(result, cnt1, scale1, -16)); 6362 movl(cnt1, stride); 6363 jmpb(SCAN_TO_SUBSTR); 6364 6365 // Found a potential substr 6366 bind(FOUND_CANDIDATE); 6367 // After pcmpestri tmp(rcx) contains matched element index 6368 6369 // Make sure string is still long enough 6370 subl(cnt1, tmp); 6371 cmpl(cnt1, cnt2); 6372 jccb(Assembler::greaterEqual, FOUND_SUBSTR); 6373 // Left less then substring. 6374 6375 bind(RET_NOT_FOUND); 6376 movl(result, -1); 6377 jmp(CLEANUP); 6378 6379 bind(FOUND_SUBSTR); 6380 // Compute start addr of substr 6381 lea(result, Address(result, tmp, scale1)); 6382 if (int_cnt2 > 0) { // Constant substring 6383 // Repeat search for small substring (< 8 chars) 6384 // from new point without reloading substring. 6385 // Have to check that we don't read beyond string. 6386 cmpl(tmp, stride-int_cnt2); 6387 jccb(Assembler::greater, ADJUST_STR); 6388 // Fall through if matched whole substring. 6389 } else { // non constant 6390 assert(int_cnt2 == -1, "should be != 0"); 6391 6392 addl(tmp, cnt2); 6393 // Found result if we matched whole substring. 6394 cmpl(tmp, stride); 6395 jcc(Assembler::lessEqual, RET_FOUND); 6396 6397 // Repeat search for small substring (<= 8 chars) 6398 // from new point 'str1' without reloading substring. 6399 cmpl(cnt2, stride); 6400 // Have to check that we don't read beyond string. 6401 jccb(Assembler::lessEqual, ADJUST_STR); 6402 6403 Label CHECK_NEXT, CONT_SCAN_SUBSTR, RET_FOUND_LONG; 6404 // Compare the rest of substring (> 8 chars). 6405 movptr(str1, result); 6406 6407 cmpl(tmp, cnt2); 6408 // First 8 chars are already matched. 6409 jccb(Assembler::equal, CHECK_NEXT); 6410 6411 bind(SCAN_SUBSTR); 6412 pcmpestri(vec, Address(str1, 0), mode); 6413 // Need to reload strings pointers if not matched whole vector 6414 jcc(Assembler::noOverflow, RELOAD_SUBSTR); // OF == 0 6415 6416 bind(CHECK_NEXT); 6417 subl(cnt2, stride); 6418 jccb(Assembler::lessEqual, RET_FOUND_LONG); // Found full substring 6419 addptr(str1, 16); 6420 if (ae == StrIntrinsicNode::UL) { 6421 addptr(str2, 8); 6422 } else { 6423 addptr(str2, 16); 6424 } 6425 subl(cnt1, stride); 6426 cmpl(cnt2, stride); // Do not read beyond substring 6427 jccb(Assembler::greaterEqual, CONT_SCAN_SUBSTR); 6428 // Back-up strings to avoid reading beyond substring. 6429 6430 if (ae == StrIntrinsicNode::UL) { 6431 lea(str2, Address(str2, cnt2, scale2, -8)); 6432 lea(str1, Address(str1, cnt2, scale1, -16)); 6433 } else { 6434 lea(str2, Address(str2, cnt2, scale2, -16)); 6435 lea(str1, Address(str1, cnt2, scale1, -16)); 6436 } 6437 subl(cnt1, cnt2); 6438 movl(cnt2, stride); 6439 addl(cnt1, stride); 6440 bind(CONT_SCAN_SUBSTR); 6441 if (ae == StrIntrinsicNode::UL) { 6442 pmovzxbw(vec, Address(str2, 0)); 6443 } else { 6444 movdqu(vec, Address(str2, 0)); 6445 } 6446 jmp(SCAN_SUBSTR); 6447 6448 bind(RET_FOUND_LONG); 6449 movptr(str1, Address(rsp, wordSize)); 6450 } // non constant 6451 6452 bind(RET_FOUND); 6453 // Compute substr offset 6454 subptr(result, str1); 6455 if (ae == StrIntrinsicNode::UU || ae == StrIntrinsicNode::UL) { 6456 shrl(result, 1); // index 6457 } 6458 bind(CLEANUP); 6459 pop(rsp); // restore SP 6460 6461 } // string_indexof 6462 6463 void MacroAssembler::string_indexof_char(Register str1, Register cnt1, Register ch, Register result, 6464 XMMRegister vec1, XMMRegister vec2, XMMRegister vec3, Register tmp) { 6465 ShortBranchVerifier sbv(this); 6466 assert(UseSSE42Intrinsics, "SSE4.2 intrinsics are required"); 6467 6468 int stride = 8; 6469 6470 Label FOUND_CHAR, SCAN_TO_CHAR, SCAN_TO_CHAR_LOOP, 6471 SCAN_TO_8_CHAR, SCAN_TO_8_CHAR_LOOP, SCAN_TO_16_CHAR_LOOP, 6472 RET_NOT_FOUND, SCAN_TO_8_CHAR_INIT, 6473 FOUND_SEQ_CHAR, DONE_LABEL; 6474 6475 movptr(result, str1); 6476 if (UseAVX >= 2) { 6477 cmpl(cnt1, stride); 6478 jcc(Assembler::less, SCAN_TO_CHAR); 6479 cmpl(cnt1, 2*stride); 6480 jcc(Assembler::less, SCAN_TO_8_CHAR_INIT); 6481 movdl(vec1, ch); 6482 vpbroadcastw(vec1, vec1, Assembler::AVX_256bit); 6483 vpxor(vec2, vec2); 6484 movl(tmp, cnt1); 6485 andl(tmp, 0xFFFFFFF0); //vector count (in chars) 6486 andl(cnt1,0x0000000F); //tail count (in chars) 6487 6488 bind(SCAN_TO_16_CHAR_LOOP); 6489 vmovdqu(vec3, Address(result, 0)); 6490 vpcmpeqw(vec3, vec3, vec1, 1); 6491 vptest(vec2, vec3); 6492 jcc(Assembler::carryClear, FOUND_CHAR); 6493 addptr(result, 32); 6494 subl(tmp, 2*stride); 6495 jcc(Assembler::notZero, SCAN_TO_16_CHAR_LOOP); 6496 jmp(SCAN_TO_8_CHAR); 6497 bind(SCAN_TO_8_CHAR_INIT); 6498 movdl(vec1, ch); 6499 pshuflw(vec1, vec1, 0x00); 6500 pshufd(vec1, vec1, 0); 6501 pxor(vec2, vec2); 6502 } 6503 bind(SCAN_TO_8_CHAR); 6504 cmpl(cnt1, stride); 6505 jcc(Assembler::less, SCAN_TO_CHAR); 6506 if (UseAVX < 2) { 6507 movdl(vec1, ch); 6508 pshuflw(vec1, vec1, 0x00); 6509 pshufd(vec1, vec1, 0); 6510 pxor(vec2, vec2); 6511 } 6512 movl(tmp, cnt1); 6513 andl(tmp, 0xFFFFFFF8); //vector count (in chars) 6514 andl(cnt1,0x00000007); //tail count (in chars) 6515 6516 bind(SCAN_TO_8_CHAR_LOOP); 6517 movdqu(vec3, Address(result, 0)); 6518 pcmpeqw(vec3, vec1); 6519 ptest(vec2, vec3); 6520 jcc(Assembler::carryClear, FOUND_CHAR); 6521 addptr(result, 16); 6522 subl(tmp, stride); 6523 jcc(Assembler::notZero, SCAN_TO_8_CHAR_LOOP); 6524 bind(SCAN_TO_CHAR); 6525 testl(cnt1, cnt1); 6526 jcc(Assembler::zero, RET_NOT_FOUND); 6527 bind(SCAN_TO_CHAR_LOOP); 6528 load_unsigned_short(tmp, Address(result, 0)); 6529 cmpl(ch, tmp); 6530 jccb(Assembler::equal, FOUND_SEQ_CHAR); 6531 addptr(result, 2); 6532 subl(cnt1, 1); 6533 jccb(Assembler::zero, RET_NOT_FOUND); 6534 jmp(SCAN_TO_CHAR_LOOP); 6535 6536 bind(RET_NOT_FOUND); 6537 movl(result, -1); 6538 jmpb(DONE_LABEL); 6539 6540 bind(FOUND_CHAR); 6541 if (UseAVX >= 2) { 6542 vpmovmskb(tmp, vec3); 6543 } else { 6544 pmovmskb(tmp, vec3); 6545 } 6546 bsfl(ch, tmp); 6547 addl(result, ch); 6548 6549 bind(FOUND_SEQ_CHAR); 6550 subptr(result, str1); 6551 shrl(result, 1); 6552 6553 bind(DONE_LABEL); 6554 } // string_indexof_char 6555 6556 // helper function for string_compare 6557 void MacroAssembler::load_next_elements(Register elem1, Register elem2, Register str1, Register str2, 6558 Address::ScaleFactor scale, Address::ScaleFactor scale1, 6559 Address::ScaleFactor scale2, Register index, int ae) { 6560 if (ae == StrIntrinsicNode::LL) { 6561 load_unsigned_byte(elem1, Address(str1, index, scale, 0)); 6562 load_unsigned_byte(elem2, Address(str2, index, scale, 0)); 6563 } else if (ae == StrIntrinsicNode::UU) { 6564 load_unsigned_short(elem1, Address(str1, index, scale, 0)); 6565 load_unsigned_short(elem2, Address(str2, index, scale, 0)); 6566 } else { 6567 load_unsigned_byte(elem1, Address(str1, index, scale1, 0)); 6568 load_unsigned_short(elem2, Address(str2, index, scale2, 0)); 6569 } 6570 } 6571 6572 // Compare strings, used for char[] and byte[]. 6573 void MacroAssembler::string_compare(Register str1, Register str2, 6574 Register cnt1, Register cnt2, Register result, 6575 XMMRegister vec1, int ae) { 6576 ShortBranchVerifier sbv(this); 6577 Label LENGTH_DIFF_LABEL, POP_LABEL, DONE_LABEL, WHILE_HEAD_LABEL; 6578 Label COMPARE_WIDE_VECTORS_LOOP_FAILED; // used only _LP64 && AVX3 6579 int stride, stride2, adr_stride, adr_stride1, adr_stride2; 6580 int stride2x2 = 0x40; 6581 Address::ScaleFactor scale = Address::no_scale; 6582 Address::ScaleFactor scale1 = Address::no_scale; 6583 Address::ScaleFactor scale2 = Address::no_scale; 6584 6585 if (ae != StrIntrinsicNode::LL) { 6586 stride2x2 = 0x20; 6587 } 6588 6589 if (ae == StrIntrinsicNode::LU || ae == StrIntrinsicNode::UL) { 6590 shrl(cnt2, 1); 6591 } 6592 // Compute the minimum of the string lengths and the 6593 // difference of the string lengths (stack). 6594 // Do the conditional move stuff 6595 movl(result, cnt1); 6596 subl(cnt1, cnt2); 6597 push(cnt1); 6598 cmov32(Assembler::lessEqual, cnt2, result); // cnt2 = min(cnt1, cnt2) 6599 6600 // Is the minimum length zero? 6601 testl(cnt2, cnt2); 6602 jcc(Assembler::zero, LENGTH_DIFF_LABEL); 6603 if (ae == StrIntrinsicNode::LL) { 6604 // Load first bytes 6605 load_unsigned_byte(result, Address(str1, 0)); // result = str1[0] 6606 load_unsigned_byte(cnt1, Address(str2, 0)); // cnt1 = str2[0] 6607 } else if (ae == StrIntrinsicNode::UU) { 6608 // Load first characters 6609 load_unsigned_short(result, Address(str1, 0)); 6610 load_unsigned_short(cnt1, Address(str2, 0)); 6611 } else { 6612 load_unsigned_byte(result, Address(str1, 0)); 6613 load_unsigned_short(cnt1, Address(str2, 0)); 6614 } 6615 subl(result, cnt1); 6616 jcc(Assembler::notZero, POP_LABEL); 6617 6618 if (ae == StrIntrinsicNode::UU) { 6619 // Divide length by 2 to get number of chars 6620 shrl(cnt2, 1); 6621 } 6622 cmpl(cnt2, 1); 6623 jcc(Assembler::equal, LENGTH_DIFF_LABEL); 6624 6625 // Check if the strings start at the same location and setup scale and stride 6626 if (ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::UU) { 6627 cmpptr(str1, str2); 6628 jcc(Assembler::equal, LENGTH_DIFF_LABEL); 6629 if (ae == StrIntrinsicNode::LL) { 6630 scale = Address::times_1; 6631 stride = 16; 6632 } else { 6633 scale = Address::times_2; 6634 stride = 8; 6635 } 6636 } else { 6637 scale1 = Address::times_1; 6638 scale2 = Address::times_2; 6639 // scale not used 6640 stride = 8; 6641 } 6642 6643 if (UseAVX >= 2 && UseSSE42Intrinsics) { 6644 Label COMPARE_WIDE_VECTORS, VECTOR_NOT_EQUAL, COMPARE_WIDE_TAIL, COMPARE_SMALL_STR; 6645 Label COMPARE_WIDE_VECTORS_LOOP, COMPARE_16_CHARS, COMPARE_INDEX_CHAR; 6646 Label COMPARE_WIDE_VECTORS_LOOP_AVX2; 6647 Label COMPARE_TAIL_LONG; 6648 Label COMPARE_WIDE_VECTORS_LOOP_AVX3; // used only _LP64 && AVX3 6649 6650 int pcmpmask = 0x19; 6651 if (ae == StrIntrinsicNode::LL) { 6652 pcmpmask &= ~0x01; 6653 } 6654 6655 // Setup to compare 16-chars (32-bytes) vectors, 6656 // start from first character again because it has aligned address. 6657 if (ae == StrIntrinsicNode::LL) { 6658 stride2 = 32; 6659 } else { 6660 stride2 = 16; 6661 } 6662 if (ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::UU) { 6663 adr_stride = stride << scale; 6664 } else { 6665 adr_stride1 = 8; //stride << scale1; 6666 adr_stride2 = 16; //stride << scale2; 6667 } 6668 6669 assert(result == rax && cnt2 == rdx && cnt1 == rcx, "pcmpestri"); 6670 // rax and rdx are used by pcmpestri as elements counters 6671 movl(result, cnt2); 6672 andl(cnt2, ~(stride2-1)); // cnt2 holds the vector count 6673 jcc(Assembler::zero, COMPARE_TAIL_LONG); 6674 6675 // fast path : compare first 2 8-char vectors. 6676 bind(COMPARE_16_CHARS); 6677 if (ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::UU) { 6678 movdqu(vec1, Address(str1, 0)); 6679 } else { 6680 pmovzxbw(vec1, Address(str1, 0)); 6681 } 6682 pcmpestri(vec1, Address(str2, 0), pcmpmask); 6683 jccb(Assembler::below, COMPARE_INDEX_CHAR); 6684 6685 if (ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::UU) { 6686 movdqu(vec1, Address(str1, adr_stride)); 6687 pcmpestri(vec1, Address(str2, adr_stride), pcmpmask); 6688 } else { 6689 pmovzxbw(vec1, Address(str1, adr_stride1)); 6690 pcmpestri(vec1, Address(str2, adr_stride2), pcmpmask); 6691 } 6692 jccb(Assembler::aboveEqual, COMPARE_WIDE_VECTORS); 6693 addl(cnt1, stride); 6694 6695 // Compare the characters at index in cnt1 6696 bind(COMPARE_INDEX_CHAR); // cnt1 has the offset of the mismatching character 6697 load_next_elements(result, cnt2, str1, str2, scale, scale1, scale2, cnt1, ae); 6698 subl(result, cnt2); 6699 jmp(POP_LABEL); 6700 6701 // Setup the registers to start vector comparison loop 6702 bind(COMPARE_WIDE_VECTORS); 6703 if (ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::UU) { 6704 lea(str1, Address(str1, result, scale)); 6705 lea(str2, Address(str2, result, scale)); 6706 } else { 6707 lea(str1, Address(str1, result, scale1)); 6708 lea(str2, Address(str2, result, scale2)); 6709 } 6710 subl(result, stride2); 6711 subl(cnt2, stride2); 6712 jcc(Assembler::zero, COMPARE_WIDE_TAIL); 6713 negptr(result); 6714 6715 // In a loop, compare 16-chars (32-bytes) at once using (vpxor+vptest) 6716 bind(COMPARE_WIDE_VECTORS_LOOP); 6717 6718 #ifdef _LP64 6719 if ((AVX3Threshold == 0) && VM_Version::supports_avx512vlbw()) { // trying 64 bytes fast loop 6720 cmpl(cnt2, stride2x2); 6721 jccb(Assembler::below, COMPARE_WIDE_VECTORS_LOOP_AVX2); 6722 testl(cnt2, stride2x2-1); // cnt2 holds the vector count 6723 jccb(Assembler::notZero, COMPARE_WIDE_VECTORS_LOOP_AVX2); // means we cannot subtract by 0x40 6724 6725 bind(COMPARE_WIDE_VECTORS_LOOP_AVX3); // the hottest loop 6726 if (ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::UU) { 6727 evmovdquq(vec1, Address(str1, result, scale), Assembler::AVX_512bit); 6728 evpcmpeqb(k7, vec1, Address(str2, result, scale), Assembler::AVX_512bit); // k7 == 11..11, if operands equal, otherwise k7 has some 0 6729 } else { 6730 vpmovzxbw(vec1, Address(str1, result, scale1), Assembler::AVX_512bit); 6731 evpcmpeqb(k7, vec1, Address(str2, result, scale2), Assembler::AVX_512bit); // k7 == 11..11, if operands equal, otherwise k7 has some 0 6732 } 6733 kortestql(k7, k7); 6734 jcc(Assembler::aboveEqual, COMPARE_WIDE_VECTORS_LOOP_FAILED); // miscompare 6735 addptr(result, stride2x2); // update since we already compared at this addr 6736 subl(cnt2, stride2x2); // and sub the size too 6737 jccb(Assembler::notZero, COMPARE_WIDE_VECTORS_LOOP_AVX3); 6738 6739 vpxor(vec1, vec1); 6740 jmpb(COMPARE_WIDE_TAIL); 6741 }//if (VM_Version::supports_avx512vlbw()) 6742 #endif // _LP64 6743 6744 6745 bind(COMPARE_WIDE_VECTORS_LOOP_AVX2); 6746 if (ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::UU) { 6747 vmovdqu(vec1, Address(str1, result, scale)); 6748 vpxor(vec1, Address(str2, result, scale)); 6749 } else { 6750 vpmovzxbw(vec1, Address(str1, result, scale1), Assembler::AVX_256bit); 6751 vpxor(vec1, Address(str2, result, scale2)); 6752 } 6753 vptest(vec1, vec1); 6754 jcc(Assembler::notZero, VECTOR_NOT_EQUAL); 6755 addptr(result, stride2); 6756 subl(cnt2, stride2); 6757 jcc(Assembler::notZero, COMPARE_WIDE_VECTORS_LOOP); 6758 // clean upper bits of YMM registers 6759 vpxor(vec1, vec1); 6760 6761 // compare wide vectors tail 6762 bind(COMPARE_WIDE_TAIL); 6763 testptr(result, result); 6764 jcc(Assembler::zero, LENGTH_DIFF_LABEL); 6765 6766 movl(result, stride2); 6767 movl(cnt2, result); 6768 negptr(result); 6769 jmp(COMPARE_WIDE_VECTORS_LOOP_AVX2); 6770 6771 // Identifies the mismatching (higher or lower)16-bytes in the 32-byte vectors. 6772 bind(VECTOR_NOT_EQUAL); 6773 // clean upper bits of YMM registers 6774 vpxor(vec1, vec1); 6775 if (ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::UU) { 6776 lea(str1, Address(str1, result, scale)); 6777 lea(str2, Address(str2, result, scale)); 6778 } else { 6779 lea(str1, Address(str1, result, scale1)); 6780 lea(str2, Address(str2, result, scale2)); 6781 } 6782 jmp(COMPARE_16_CHARS); 6783 6784 // Compare tail chars, length between 1 to 15 chars 6785 bind(COMPARE_TAIL_LONG); 6786 movl(cnt2, result); 6787 cmpl(cnt2, stride); 6788 jcc(Assembler::less, COMPARE_SMALL_STR); 6789 6790 if (ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::UU) { 6791 movdqu(vec1, Address(str1, 0)); 6792 } else { 6793 pmovzxbw(vec1, Address(str1, 0)); 6794 } 6795 pcmpestri(vec1, Address(str2, 0), pcmpmask); 6796 jcc(Assembler::below, COMPARE_INDEX_CHAR); 6797 subptr(cnt2, stride); 6798 jcc(Assembler::zero, LENGTH_DIFF_LABEL); 6799 if (ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::UU) { 6800 lea(str1, Address(str1, result, scale)); 6801 lea(str2, Address(str2, result, scale)); 6802 } else { 6803 lea(str1, Address(str1, result, scale1)); 6804 lea(str2, Address(str2, result, scale2)); 6805 } 6806 negptr(cnt2); 6807 jmpb(WHILE_HEAD_LABEL); 6808 6809 bind(COMPARE_SMALL_STR); 6810 } else if (UseSSE42Intrinsics) { 6811 Label COMPARE_WIDE_VECTORS, VECTOR_NOT_EQUAL, COMPARE_TAIL; 6812 int pcmpmask = 0x19; 6813 // Setup to compare 8-char (16-byte) vectors, 6814 // start from first character again because it has aligned address. 6815 movl(result, cnt2); 6816 andl(cnt2, ~(stride - 1)); // cnt2 holds the vector count 6817 if (ae == StrIntrinsicNode::LL) { 6818 pcmpmask &= ~0x01; 6819 } 6820 jcc(Assembler::zero, COMPARE_TAIL); 6821 if (ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::UU) { 6822 lea(str1, Address(str1, result, scale)); 6823 lea(str2, Address(str2, result, scale)); 6824 } else { 6825 lea(str1, Address(str1, result, scale1)); 6826 lea(str2, Address(str2, result, scale2)); 6827 } 6828 negptr(result); 6829 6830 // pcmpestri 6831 // inputs: 6832 // vec1- substring 6833 // rax - negative string length (elements count) 6834 // mem - scanned string 6835 // rdx - string length (elements count) 6836 // pcmpmask - cmp mode: 11000 (string compare with negated result) 6837 // + 00 (unsigned bytes) or + 01 (unsigned shorts) 6838 // outputs: 6839 // rcx - first mismatched element index 6840 assert(result == rax && cnt2 == rdx && cnt1 == rcx, "pcmpestri"); 6841 6842 bind(COMPARE_WIDE_VECTORS); 6843 if (ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::UU) { 6844 movdqu(vec1, Address(str1, result, scale)); 6845 pcmpestri(vec1, Address(str2, result, scale), pcmpmask); 6846 } else { 6847 pmovzxbw(vec1, Address(str1, result, scale1)); 6848 pcmpestri(vec1, Address(str2, result, scale2), pcmpmask); 6849 } 6850 // After pcmpestri cnt1(rcx) contains mismatched element index 6851 6852 jccb(Assembler::below, VECTOR_NOT_EQUAL); // CF==1 6853 addptr(result, stride); 6854 subptr(cnt2, stride); 6855 jccb(Assembler::notZero, COMPARE_WIDE_VECTORS); 6856 6857 // compare wide vectors tail 6858 testptr(result, result); 6859 jcc(Assembler::zero, LENGTH_DIFF_LABEL); 6860 6861 movl(cnt2, stride); 6862 movl(result, stride); 6863 negptr(result); 6864 if (ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::UU) { 6865 movdqu(vec1, Address(str1, result, scale)); 6866 pcmpestri(vec1, Address(str2, result, scale), pcmpmask); 6867 } else { 6868 pmovzxbw(vec1, Address(str1, result, scale1)); 6869 pcmpestri(vec1, Address(str2, result, scale2), pcmpmask); 6870 } 6871 jccb(Assembler::aboveEqual, LENGTH_DIFF_LABEL); 6872 6873 // Mismatched characters in the vectors 6874 bind(VECTOR_NOT_EQUAL); 6875 addptr(cnt1, result); 6876 load_next_elements(result, cnt2, str1, str2, scale, scale1, scale2, cnt1, ae); 6877 subl(result, cnt2); 6878 jmpb(POP_LABEL); 6879 6880 bind(COMPARE_TAIL); // limit is zero 6881 movl(cnt2, result); 6882 // Fallthru to tail compare 6883 } 6884 // Shift str2 and str1 to the end of the arrays, negate min 6885 if (ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::UU) { 6886 lea(str1, Address(str1, cnt2, scale)); 6887 lea(str2, Address(str2, cnt2, scale)); 6888 } else { 6889 lea(str1, Address(str1, cnt2, scale1)); 6890 lea(str2, Address(str2, cnt2, scale2)); 6891 } 6892 decrementl(cnt2); // first character was compared already 6893 negptr(cnt2); 6894 6895 // Compare the rest of the elements 6896 bind(WHILE_HEAD_LABEL); 6897 load_next_elements(result, cnt1, str1, str2, scale, scale1, scale2, cnt2, ae); 6898 subl(result, cnt1); 6899 jccb(Assembler::notZero, POP_LABEL); 6900 increment(cnt2); 6901 jccb(Assembler::notZero, WHILE_HEAD_LABEL); 6902 6903 // Strings are equal up to min length. Return the length difference. 6904 bind(LENGTH_DIFF_LABEL); 6905 pop(result); 6906 if (ae == StrIntrinsicNode::UU) { 6907 // Divide diff by 2 to get number of chars 6908 sarl(result, 1); 6909 } 6910 jmpb(DONE_LABEL); 6911 6912 #ifdef _LP64 6913 if (VM_Version::supports_avx512vlbw()) { 6914 6915 bind(COMPARE_WIDE_VECTORS_LOOP_FAILED); 6916 6917 kmovql(cnt1, k7); 6918 notq(cnt1); 6919 bsfq(cnt2, cnt1); 6920 if (ae != StrIntrinsicNode::LL) { 6921 // Divide diff by 2 to get number of chars 6922 sarl(cnt2, 1); 6923 } 6924 addq(result, cnt2); 6925 if (ae == StrIntrinsicNode::LL) { 6926 load_unsigned_byte(cnt1, Address(str2, result)); 6927 load_unsigned_byte(result, Address(str1, result)); 6928 } else if (ae == StrIntrinsicNode::UU) { 6929 load_unsigned_short(cnt1, Address(str2, result, scale)); 6930 load_unsigned_short(result, Address(str1, result, scale)); 6931 } else { 6932 load_unsigned_short(cnt1, Address(str2, result, scale2)); 6933 load_unsigned_byte(result, Address(str1, result, scale1)); 6934 } 6935 subl(result, cnt1); 6936 jmpb(POP_LABEL); 6937 }//if (VM_Version::supports_avx512vlbw()) 6938 #endif // _LP64 6939 6940 // Discard the stored length difference 6941 bind(POP_LABEL); 6942 pop(cnt1); 6943 6944 // That's it 6945 bind(DONE_LABEL); 6946 if(ae == StrIntrinsicNode::UL) { 6947 negl(result); 6948 } 6949 6950 } 6951 6952 // Search for Non-ASCII character (Negative byte value) in a byte array, 6953 // return true if it has any and false otherwise. 6954 // ..\jdk\src\java.base\share\classes\java\lang\StringCoding.java 6955 // @HotSpotIntrinsicCandidate 6956 // private static boolean hasNegatives(byte[] ba, int off, int len) { 6957 // for (int i = off; i < off + len; i++) { 6958 // if (ba[i] < 0) { 6959 // return true; 6960 // } 6961 // } 6962 // return false; 6963 // } 6964 void MacroAssembler::has_negatives(Register ary1, Register len, 6965 Register result, Register tmp1, 6966 XMMRegister vec1, XMMRegister vec2) { 6967 // rsi: byte array 6968 // rcx: len 6969 // rax: result 6970 ShortBranchVerifier sbv(this); 6971 assert_different_registers(ary1, len, result, tmp1); 6972 assert_different_registers(vec1, vec2); 6973 Label TRUE_LABEL, FALSE_LABEL, DONE, COMPARE_CHAR, COMPARE_VECTORS, COMPARE_BYTE; 6974 6975 // len == 0 6976 testl(len, len); 6977 jcc(Assembler::zero, FALSE_LABEL); 6978 6979 if ((AVX3Threshold == 0) && (UseAVX > 2) && // AVX512 6980 VM_Version::supports_avx512vlbw() && 6981 VM_Version::supports_bmi2()) { 6982 6983 Label test_64_loop, test_tail; 6984 Register tmp3_aliased = len; 6985 6986 movl(tmp1, len); 6987 vpxor(vec2, vec2, vec2, Assembler::AVX_512bit); 6988 6989 andl(tmp1, 64 - 1); // tail count (in chars) 0x3F 6990 andl(len, ~(64 - 1)); // vector count (in chars) 6991 jccb(Assembler::zero, test_tail); 6992 6993 lea(ary1, Address(ary1, len, Address::times_1)); 6994 negptr(len); 6995 6996 bind(test_64_loop); 6997 // Check whether our 64 elements of size byte contain negatives 6998 evpcmpgtb(k2, vec2, Address(ary1, len, Address::times_1), Assembler::AVX_512bit); 6999 kortestql(k2, k2); 7000 jcc(Assembler::notZero, TRUE_LABEL); 7001 7002 addptr(len, 64); 7003 jccb(Assembler::notZero, test_64_loop); 7004 7005 7006 bind(test_tail); 7007 // bail out when there is nothing to be done 7008 testl(tmp1, -1); 7009 jcc(Assembler::zero, FALSE_LABEL); 7010 7011 // ~(~0 << len) applied up to two times (for 32-bit scenario) 7012 #ifdef _LP64 7013 mov64(tmp3_aliased, 0xFFFFFFFFFFFFFFFF); 7014 shlxq(tmp3_aliased, tmp3_aliased, tmp1); 7015 notq(tmp3_aliased); 7016 kmovql(k3, tmp3_aliased); 7017 #else 7018 Label k_init; 7019 jmp(k_init); 7020 7021 // We could not read 64-bits from a general purpose register thus we move 7022 // data required to compose 64 1's to the instruction stream 7023 // We emit 64 byte wide series of elements from 0..63 which later on would 7024 // be used as a compare targets with tail count contained in tmp1 register. 7025 // Result would be a k register having tmp1 consecutive number or 1 7026 // counting from least significant bit. 7027 address tmp = pc(); 7028 emit_int64(0x0706050403020100); 7029 emit_int64(0x0F0E0D0C0B0A0908); 7030 emit_int64(0x1716151413121110); 7031 emit_int64(0x1F1E1D1C1B1A1918); 7032 emit_int64(0x2726252423222120); 7033 emit_int64(0x2F2E2D2C2B2A2928); 7034 emit_int64(0x3736353433323130); 7035 emit_int64(0x3F3E3D3C3B3A3938); 7036 7037 bind(k_init); 7038 lea(len, InternalAddress(tmp)); 7039 // create mask to test for negative byte inside a vector 7040 evpbroadcastb(vec1, tmp1, Assembler::AVX_512bit); 7041 evpcmpgtb(k3, vec1, Address(len, 0), Assembler::AVX_512bit); 7042 7043 #endif 7044 evpcmpgtb(k2, k3, vec2, Address(ary1, 0), Assembler::AVX_512bit); 7045 ktestq(k2, k3); 7046 jcc(Assembler::notZero, TRUE_LABEL); 7047 7048 jmp(FALSE_LABEL); 7049 } else { 7050 movl(result, len); // copy 7051 7052 if (UseAVX >= 2 && UseSSE >= 2) { 7053 // With AVX2, use 32-byte vector compare 7054 Label COMPARE_WIDE_VECTORS, COMPARE_TAIL; 7055 7056 // Compare 32-byte vectors 7057 andl(result, 0x0000001f); // tail count (in bytes) 7058 andl(len, 0xffffffe0); // vector count (in bytes) 7059 jccb(Assembler::zero, COMPARE_TAIL); 7060 7061 lea(ary1, Address(ary1, len, Address::times_1)); 7062 negptr(len); 7063 7064 movl(tmp1, 0x80808080); // create mask to test for Unicode chars in vector 7065 movdl(vec2, tmp1); 7066 vpbroadcastd(vec2, vec2, Assembler::AVX_256bit); 7067 7068 bind(COMPARE_WIDE_VECTORS); 7069 vmovdqu(vec1, Address(ary1, len, Address::times_1)); 7070 vptest(vec1, vec2); 7071 jccb(Assembler::notZero, TRUE_LABEL); 7072 addptr(len, 32); 7073 jcc(Assembler::notZero, COMPARE_WIDE_VECTORS); 7074 7075 testl(result, result); 7076 jccb(Assembler::zero, FALSE_LABEL); 7077 7078 vmovdqu(vec1, Address(ary1, result, Address::times_1, -32)); 7079 vptest(vec1, vec2); 7080 jccb(Assembler::notZero, TRUE_LABEL); 7081 jmpb(FALSE_LABEL); 7082 7083 bind(COMPARE_TAIL); // len is zero 7084 movl(len, result); 7085 // Fallthru to tail compare 7086 } else if (UseSSE42Intrinsics) { 7087 // With SSE4.2, use double quad vector compare 7088 Label COMPARE_WIDE_VECTORS, COMPARE_TAIL; 7089 7090 // Compare 16-byte vectors 7091 andl(result, 0x0000000f); // tail count (in bytes) 7092 andl(len, 0xfffffff0); // vector count (in bytes) 7093 jcc(Assembler::zero, COMPARE_TAIL); 7094 7095 lea(ary1, Address(ary1, len, Address::times_1)); 7096 negptr(len); 7097 7098 movl(tmp1, 0x80808080); 7099 movdl(vec2, tmp1); 7100 pshufd(vec2, vec2, 0); 7101 7102 bind(COMPARE_WIDE_VECTORS); 7103 movdqu(vec1, Address(ary1, len, Address::times_1)); 7104 ptest(vec1, vec2); 7105 jcc(Assembler::notZero, TRUE_LABEL); 7106 addptr(len, 16); 7107 jcc(Assembler::notZero, COMPARE_WIDE_VECTORS); 7108 7109 testl(result, result); 7110 jcc(Assembler::zero, FALSE_LABEL); 7111 7112 movdqu(vec1, Address(ary1, result, Address::times_1, -16)); 7113 ptest(vec1, vec2); 7114 jccb(Assembler::notZero, TRUE_LABEL); 7115 jmpb(FALSE_LABEL); 7116 7117 bind(COMPARE_TAIL); // len is zero 7118 movl(len, result); 7119 // Fallthru to tail compare 7120 } 7121 } 7122 // Compare 4-byte vectors 7123 andl(len, 0xfffffffc); // vector count (in bytes) 7124 jccb(Assembler::zero, COMPARE_CHAR); 7125 7126 lea(ary1, Address(ary1, len, Address::times_1)); 7127 negptr(len); 7128 7129 bind(COMPARE_VECTORS); 7130 movl(tmp1, Address(ary1, len, Address::times_1)); 7131 andl(tmp1, 0x80808080); 7132 jccb(Assembler::notZero, TRUE_LABEL); 7133 addptr(len, 4); 7134 jcc(Assembler::notZero, COMPARE_VECTORS); 7135 7136 // Compare trailing char (final 2 bytes), if any 7137 bind(COMPARE_CHAR); 7138 testl(result, 0x2); // tail char 7139 jccb(Assembler::zero, COMPARE_BYTE); 7140 load_unsigned_short(tmp1, Address(ary1, 0)); 7141 andl(tmp1, 0x00008080); 7142 jccb(Assembler::notZero, TRUE_LABEL); 7143 subptr(result, 2); 7144 lea(ary1, Address(ary1, 2)); 7145 7146 bind(COMPARE_BYTE); 7147 testl(result, 0x1); // tail byte 7148 jccb(Assembler::zero, FALSE_LABEL); 7149 load_unsigned_byte(tmp1, Address(ary1, 0)); 7150 andl(tmp1, 0x00000080); 7151 jccb(Assembler::notEqual, TRUE_LABEL); 7152 jmpb(FALSE_LABEL); 7153 7154 bind(TRUE_LABEL); 7155 movl(result, 1); // return true 7156 jmpb(DONE); 7157 7158 bind(FALSE_LABEL); 7159 xorl(result, result); // return false 7160 7161 // That's it 7162 bind(DONE); 7163 if (UseAVX >= 2 && UseSSE >= 2) { 7164 // clean upper bits of YMM registers 7165 vpxor(vec1, vec1); 7166 vpxor(vec2, vec2); 7167 } 7168 } 7169 // Compare char[] or byte[] arrays aligned to 4 bytes or substrings. 7170 void MacroAssembler::arrays_equals(bool is_array_equ, Register ary1, Register ary2, 7171 Register limit, Register result, Register chr, 7172 XMMRegister vec1, XMMRegister vec2, bool is_char) { 7173 ShortBranchVerifier sbv(this); 7174 Label TRUE_LABEL, FALSE_LABEL, DONE, COMPARE_VECTORS, COMPARE_CHAR, COMPARE_BYTE; 7175 7176 int length_offset = arrayOopDesc::length_offset_in_bytes(); 7177 int base_offset = arrayOopDesc::base_offset_in_bytes(is_char ? T_CHAR : T_BYTE); 7178 7179 if (is_array_equ) { 7180 // Check the input args 7181 cmpoop(ary1, ary2); 7182 jcc(Assembler::equal, TRUE_LABEL); 7183 7184 // Need additional checks for arrays_equals. 7185 testptr(ary1, ary1); 7186 jcc(Assembler::zero, FALSE_LABEL); 7187 testptr(ary2, ary2); 7188 jcc(Assembler::zero, FALSE_LABEL); 7189 7190 // Check the lengths 7191 movl(limit, Address(ary1, length_offset)); 7192 cmpl(limit, Address(ary2, length_offset)); 7193 jcc(Assembler::notEqual, FALSE_LABEL); 7194 } 7195 7196 // count == 0 7197 testl(limit, limit); 7198 jcc(Assembler::zero, TRUE_LABEL); 7199 7200 if (is_array_equ) { 7201 // Load array address 7202 lea(ary1, Address(ary1, base_offset)); 7203 lea(ary2, Address(ary2, base_offset)); 7204 } 7205 7206 if (is_array_equ && is_char) { 7207 // arrays_equals when used for char[]. 7208 shll(limit, 1); // byte count != 0 7209 } 7210 movl(result, limit); // copy 7211 7212 if (UseAVX >= 2) { 7213 // With AVX2, use 32-byte vector compare 7214 Label COMPARE_WIDE_VECTORS, COMPARE_TAIL; 7215 7216 // Compare 32-byte vectors 7217 andl(result, 0x0000001f); // tail count (in bytes) 7218 andl(limit, 0xffffffe0); // vector count (in bytes) 7219 jcc(Assembler::zero, COMPARE_TAIL); 7220 7221 lea(ary1, Address(ary1, limit, Address::times_1)); 7222 lea(ary2, Address(ary2, limit, Address::times_1)); 7223 negptr(limit); 7224 7225 #ifdef _LP64 7226 if ((AVX3Threshold == 0) && VM_Version::supports_avx512vlbw()) { // trying 64 bytes fast loop 7227 Label COMPARE_WIDE_VECTORS_LOOP_AVX2, COMPARE_WIDE_VECTORS_LOOP_AVX3; 7228 7229 cmpl(limit, -64); 7230 jcc(Assembler::greater, COMPARE_WIDE_VECTORS_LOOP_AVX2); 7231 7232 bind(COMPARE_WIDE_VECTORS_LOOP_AVX3); // the hottest loop 7233 7234 evmovdquq(vec1, Address(ary1, limit, Address::times_1), Assembler::AVX_512bit); 7235 evpcmpeqb(k7, vec1, Address(ary2, limit, Address::times_1), Assembler::AVX_512bit); 7236 kortestql(k7, k7); 7237 jcc(Assembler::aboveEqual, FALSE_LABEL); // miscompare 7238 addptr(limit, 64); // update since we already compared at this addr 7239 cmpl(limit, -64); 7240 jccb(Assembler::lessEqual, COMPARE_WIDE_VECTORS_LOOP_AVX3); 7241 7242 // At this point we may still need to compare -limit+result bytes. 7243 // We could execute the next two instruction and just continue via non-wide path: 7244 // cmpl(limit, 0); 7245 // jcc(Assembler::equal, COMPARE_TAIL); // true 7246 // But since we stopped at the points ary{1,2}+limit which are 7247 // not farther than 64 bytes from the ends of arrays ary{1,2}+result 7248 // (|limit| <= 32 and result < 32), 7249 // we may just compare the last 64 bytes. 7250 // 7251 addptr(result, -64); // it is safe, bc we just came from this area 7252 evmovdquq(vec1, Address(ary1, result, Address::times_1), Assembler::AVX_512bit); 7253 evpcmpeqb(k7, vec1, Address(ary2, result, Address::times_1), Assembler::AVX_512bit); 7254 kortestql(k7, k7); 7255 jcc(Assembler::aboveEqual, FALSE_LABEL); // miscompare 7256 7257 jmp(TRUE_LABEL); 7258 7259 bind(COMPARE_WIDE_VECTORS_LOOP_AVX2); 7260 7261 }//if (VM_Version::supports_avx512vlbw()) 7262 #endif //_LP64 7263 bind(COMPARE_WIDE_VECTORS); 7264 vmovdqu(vec1, Address(ary1, limit, Address::times_1)); 7265 vmovdqu(vec2, Address(ary2, limit, Address::times_1)); 7266 vpxor(vec1, vec2); 7267 7268 vptest(vec1, vec1); 7269 jcc(Assembler::notZero, FALSE_LABEL); 7270 addptr(limit, 32); 7271 jcc(Assembler::notZero, COMPARE_WIDE_VECTORS); 7272 7273 testl(result, result); 7274 jcc(Assembler::zero, TRUE_LABEL); 7275 7276 vmovdqu(vec1, Address(ary1, result, Address::times_1, -32)); 7277 vmovdqu(vec2, Address(ary2, result, Address::times_1, -32)); 7278 vpxor(vec1, vec2); 7279 7280 vptest(vec1, vec1); 7281 jccb(Assembler::notZero, FALSE_LABEL); 7282 jmpb(TRUE_LABEL); 7283 7284 bind(COMPARE_TAIL); // limit is zero 7285 movl(limit, result); 7286 // Fallthru to tail compare 7287 } else if (UseSSE42Intrinsics) { 7288 // With SSE4.2, use double quad vector compare 7289 Label COMPARE_WIDE_VECTORS, COMPARE_TAIL; 7290 7291 // Compare 16-byte vectors 7292 andl(result, 0x0000000f); // tail count (in bytes) 7293 andl(limit, 0xfffffff0); // vector count (in bytes) 7294 jcc(Assembler::zero, COMPARE_TAIL); 7295 7296 lea(ary1, Address(ary1, limit, Address::times_1)); 7297 lea(ary2, Address(ary2, limit, Address::times_1)); 7298 negptr(limit); 7299 7300 bind(COMPARE_WIDE_VECTORS); 7301 movdqu(vec1, Address(ary1, limit, Address::times_1)); 7302 movdqu(vec2, Address(ary2, limit, Address::times_1)); 7303 pxor(vec1, vec2); 7304 7305 ptest(vec1, vec1); 7306 jcc(Assembler::notZero, FALSE_LABEL); 7307 addptr(limit, 16); 7308 jcc(Assembler::notZero, COMPARE_WIDE_VECTORS); 7309 7310 testl(result, result); 7311 jcc(Assembler::zero, TRUE_LABEL); 7312 7313 movdqu(vec1, Address(ary1, result, Address::times_1, -16)); 7314 movdqu(vec2, Address(ary2, result, Address::times_1, -16)); 7315 pxor(vec1, vec2); 7316 7317 ptest(vec1, vec1); 7318 jccb(Assembler::notZero, FALSE_LABEL); 7319 jmpb(TRUE_LABEL); 7320 7321 bind(COMPARE_TAIL); // limit is zero 7322 movl(limit, result); 7323 // Fallthru to tail compare 7324 } 7325 7326 // Compare 4-byte vectors 7327 andl(limit, 0xfffffffc); // vector count (in bytes) 7328 jccb(Assembler::zero, COMPARE_CHAR); 7329 7330 lea(ary1, Address(ary1, limit, Address::times_1)); 7331 lea(ary2, Address(ary2, limit, Address::times_1)); 7332 negptr(limit); 7333 7334 bind(COMPARE_VECTORS); 7335 movl(chr, Address(ary1, limit, Address::times_1)); 7336 cmpl(chr, Address(ary2, limit, Address::times_1)); 7337 jccb(Assembler::notEqual, FALSE_LABEL); 7338 addptr(limit, 4); 7339 jcc(Assembler::notZero, COMPARE_VECTORS); 7340 7341 // Compare trailing char (final 2 bytes), if any 7342 bind(COMPARE_CHAR); 7343 testl(result, 0x2); // tail char 7344 jccb(Assembler::zero, COMPARE_BYTE); 7345 load_unsigned_short(chr, Address(ary1, 0)); 7346 load_unsigned_short(limit, Address(ary2, 0)); 7347 cmpl(chr, limit); 7348 jccb(Assembler::notEqual, FALSE_LABEL); 7349 7350 if (is_array_equ && is_char) { 7351 bind(COMPARE_BYTE); 7352 } else { 7353 lea(ary1, Address(ary1, 2)); 7354 lea(ary2, Address(ary2, 2)); 7355 7356 bind(COMPARE_BYTE); 7357 testl(result, 0x1); // tail byte 7358 jccb(Assembler::zero, TRUE_LABEL); 7359 load_unsigned_byte(chr, Address(ary1, 0)); 7360 load_unsigned_byte(limit, Address(ary2, 0)); 7361 cmpl(chr, limit); 7362 jccb(Assembler::notEqual, FALSE_LABEL); 7363 } 7364 bind(TRUE_LABEL); 7365 movl(result, 1); // return true 7366 jmpb(DONE); 7367 7368 bind(FALSE_LABEL); 7369 xorl(result, result); // return false 7370 7371 // That's it 7372 bind(DONE); 7373 if (UseAVX >= 2) { 7374 // clean upper bits of YMM registers 7375 vpxor(vec1, vec1); 7376 vpxor(vec2, vec2); 7377 } 7378 } 7379 7380 #endif 7381 7382 void MacroAssembler::generate_fill(BasicType t, bool aligned, 7383 Register to, Register value, Register count, 7384 Register rtmp, XMMRegister xtmp) { 7385 ShortBranchVerifier sbv(this); 7386 assert_different_registers(to, value, count, rtmp); 7387 Label L_exit; 7388 Label L_fill_2_bytes, L_fill_4_bytes; 7389 7390 int shift = -1; 7391 switch (t) { 7392 case T_BYTE: 7393 shift = 2; 7394 break; 7395 case T_SHORT: 7396 shift = 1; 7397 break; 7398 case T_INT: 7399 shift = 0; 7400 break; 7401 default: ShouldNotReachHere(); 7402 } 7403 7404 if (t == T_BYTE) { 7405 andl(value, 0xff); 7406 movl(rtmp, value); 7407 shll(rtmp, 8); 7408 orl(value, rtmp); 7409 } 7410 if (t == T_SHORT) { 7411 andl(value, 0xffff); 7412 } 7413 if (t == T_BYTE || t == T_SHORT) { 7414 movl(rtmp, value); 7415 shll(rtmp, 16); 7416 orl(value, rtmp); 7417 } 7418 7419 cmpl(count, 2<<shift); // Short arrays (< 8 bytes) fill by element 7420 jcc(Assembler::below, L_fill_4_bytes); // use unsigned cmp 7421 if (!UseUnalignedLoadStores && !aligned && (t == T_BYTE || t == T_SHORT)) { 7422 Label L_skip_align2; 7423 // align source address at 4 bytes address boundary 7424 if (t == T_BYTE) { 7425 Label L_skip_align1; 7426 // One byte misalignment happens only for byte arrays 7427 testptr(to, 1); 7428 jccb(Assembler::zero, L_skip_align1); 7429 movb(Address(to, 0), value); 7430 increment(to); 7431 decrement(count); 7432 BIND(L_skip_align1); 7433 } 7434 // Two bytes misalignment happens only for byte and short (char) arrays 7435 testptr(to, 2); 7436 jccb(Assembler::zero, L_skip_align2); 7437 movw(Address(to, 0), value); 7438 addptr(to, 2); 7439 subl(count, 1<<(shift-1)); 7440 BIND(L_skip_align2); 7441 } 7442 if (UseSSE < 2) { 7443 Label L_fill_32_bytes_loop, L_check_fill_8_bytes, L_fill_8_bytes_loop, L_fill_8_bytes; 7444 // Fill 32-byte chunks 7445 subl(count, 8 << shift); 7446 jcc(Assembler::less, L_check_fill_8_bytes); 7447 align(16); 7448 7449 BIND(L_fill_32_bytes_loop); 7450 7451 for (int i = 0; i < 32; i += 4) { 7452 movl(Address(to, i), value); 7453 } 7454 7455 addptr(to, 32); 7456 subl(count, 8 << shift); 7457 jcc(Assembler::greaterEqual, L_fill_32_bytes_loop); 7458 BIND(L_check_fill_8_bytes); 7459 addl(count, 8 << shift); 7460 jccb(Assembler::zero, L_exit); 7461 jmpb(L_fill_8_bytes); 7462 7463 // 7464 // length is too short, just fill qwords 7465 // 7466 BIND(L_fill_8_bytes_loop); 7467 movl(Address(to, 0), value); 7468 movl(Address(to, 4), value); 7469 addptr(to, 8); 7470 BIND(L_fill_8_bytes); 7471 subl(count, 1 << (shift + 1)); 7472 jcc(Assembler::greaterEqual, L_fill_8_bytes_loop); 7473 // fall through to fill 4 bytes 7474 } else { 7475 Label L_fill_32_bytes; 7476 if (!UseUnalignedLoadStores) { 7477 // align to 8 bytes, we know we are 4 byte aligned to start 7478 testptr(to, 4); 7479 jccb(Assembler::zero, L_fill_32_bytes); 7480 movl(Address(to, 0), value); 7481 addptr(to, 4); 7482 subl(count, 1<<shift); 7483 } 7484 BIND(L_fill_32_bytes); 7485 { 7486 assert( UseSSE >= 2, "supported cpu only" ); 7487 Label L_fill_32_bytes_loop, L_check_fill_8_bytes, L_fill_8_bytes_loop, L_fill_8_bytes; 7488 movdl(xtmp, value); 7489 if (UseAVX >= 2 && UseUnalignedLoadStores) { 7490 Label L_check_fill_32_bytes; 7491 if (UseAVX > 2) { 7492 // Fill 64-byte chunks 7493 Label L_fill_64_bytes_loop_avx3, L_check_fill_64_bytes_avx2; 7494 7495 // If number of bytes to fill < AVX3Threshold, perform fill using AVX2 7496 cmpl(count, AVX3Threshold); 7497 jccb(Assembler::below, L_check_fill_64_bytes_avx2); 7498 7499 vpbroadcastd(xtmp, xtmp, Assembler::AVX_512bit); 7500 7501 subl(count, 16 << shift); 7502 jccb(Assembler::less, L_check_fill_32_bytes); 7503 align(16); 7504 7505 BIND(L_fill_64_bytes_loop_avx3); 7506 evmovdqul(Address(to, 0), xtmp, Assembler::AVX_512bit); 7507 addptr(to, 64); 7508 subl(count, 16 << shift); 7509 jcc(Assembler::greaterEqual, L_fill_64_bytes_loop_avx3); 7510 jmpb(L_check_fill_32_bytes); 7511 7512 BIND(L_check_fill_64_bytes_avx2); 7513 } 7514 // Fill 64-byte chunks 7515 Label L_fill_64_bytes_loop; 7516 vpbroadcastd(xtmp, xtmp, Assembler::AVX_256bit); 7517 7518 subl(count, 16 << shift); 7519 jcc(Assembler::less, L_check_fill_32_bytes); 7520 align(16); 7521 7522 BIND(L_fill_64_bytes_loop); 7523 vmovdqu(Address(to, 0), xtmp); 7524 vmovdqu(Address(to, 32), xtmp); 7525 addptr(to, 64); 7526 subl(count, 16 << shift); 7527 jcc(Assembler::greaterEqual, L_fill_64_bytes_loop); 7528 7529 BIND(L_check_fill_32_bytes); 7530 addl(count, 8 << shift); 7531 jccb(Assembler::less, L_check_fill_8_bytes); 7532 vmovdqu(Address(to, 0), xtmp); 7533 addptr(to, 32); 7534 subl(count, 8 << shift); 7535 7536 BIND(L_check_fill_8_bytes); 7537 // clean upper bits of YMM registers 7538 movdl(xtmp, value); 7539 pshufd(xtmp, xtmp, 0); 7540 } else { 7541 // Fill 32-byte chunks 7542 pshufd(xtmp, xtmp, 0); 7543 7544 subl(count, 8 << shift); 7545 jcc(Assembler::less, L_check_fill_8_bytes); 7546 align(16); 7547 7548 BIND(L_fill_32_bytes_loop); 7549 7550 if (UseUnalignedLoadStores) { 7551 movdqu(Address(to, 0), xtmp); 7552 movdqu(Address(to, 16), xtmp); 7553 } else { 7554 movq(Address(to, 0), xtmp); 7555 movq(Address(to, 8), xtmp); 7556 movq(Address(to, 16), xtmp); 7557 movq(Address(to, 24), xtmp); 7558 } 7559 7560 addptr(to, 32); 7561 subl(count, 8 << shift); 7562 jcc(Assembler::greaterEqual, L_fill_32_bytes_loop); 7563 7564 BIND(L_check_fill_8_bytes); 7565 } 7566 addl(count, 8 << shift); 7567 jccb(Assembler::zero, L_exit); 7568 jmpb(L_fill_8_bytes); 7569 7570 // 7571 // length is too short, just fill qwords 7572 // 7573 BIND(L_fill_8_bytes_loop); 7574 movq(Address(to, 0), xtmp); 7575 addptr(to, 8); 7576 BIND(L_fill_8_bytes); 7577 subl(count, 1 << (shift + 1)); 7578 jcc(Assembler::greaterEqual, L_fill_8_bytes_loop); 7579 } 7580 } 7581 // fill trailing 4 bytes 7582 BIND(L_fill_4_bytes); 7583 testl(count, 1<<shift); 7584 jccb(Assembler::zero, L_fill_2_bytes); 7585 movl(Address(to, 0), value); 7586 if (t == T_BYTE || t == T_SHORT) { 7587 Label L_fill_byte; 7588 addptr(to, 4); 7589 BIND(L_fill_2_bytes); 7590 // fill trailing 2 bytes 7591 testl(count, 1<<(shift-1)); 7592 jccb(Assembler::zero, L_fill_byte); 7593 movw(Address(to, 0), value); 7594 if (t == T_BYTE) { 7595 addptr(to, 2); 7596 BIND(L_fill_byte); 7597 // fill trailing byte 7598 testl(count, 1); 7599 jccb(Assembler::zero, L_exit); 7600 movb(Address(to, 0), value); 7601 } else { 7602 BIND(L_fill_byte); 7603 } 7604 } else { 7605 BIND(L_fill_2_bytes); 7606 } 7607 BIND(L_exit); 7608 } 7609 7610 // encode char[] to byte[] in ISO_8859_1 7611 //@HotSpotIntrinsicCandidate 7612 //private static int implEncodeISOArray(byte[] sa, int sp, 7613 //byte[] da, int dp, int len) { 7614 // int i = 0; 7615 // for (; i < len; i++) { 7616 // char c = StringUTF16.getChar(sa, sp++); 7617 // if (c > '\u00FF') 7618 // break; 7619 // da[dp++] = (byte)c; 7620 // } 7621 // return i; 7622 //} 7623 void MacroAssembler::encode_iso_array(Register src, Register dst, Register len, 7624 XMMRegister tmp1Reg, XMMRegister tmp2Reg, 7625 XMMRegister tmp3Reg, XMMRegister tmp4Reg, 7626 Register tmp5, Register result) { 7627 7628 // rsi: src 7629 // rdi: dst 7630 // rdx: len 7631 // rcx: tmp5 7632 // rax: result 7633 ShortBranchVerifier sbv(this); 7634 assert_different_registers(src, dst, len, tmp5, result); 7635 Label L_done, L_copy_1_char, L_copy_1_char_exit; 7636 7637 // set result 7638 xorl(result, result); 7639 // check for zero length 7640 testl(len, len); 7641 jcc(Assembler::zero, L_done); 7642 7643 movl(result, len); 7644 7645 // Setup pointers 7646 lea(src, Address(src, len, Address::times_2)); // char[] 7647 lea(dst, Address(dst, len, Address::times_1)); // byte[] 7648 negptr(len); 7649 7650 if (UseSSE42Intrinsics || UseAVX >= 2) { 7651 Label L_copy_8_chars, L_copy_8_chars_exit; 7652 Label L_chars_16_check, L_copy_16_chars, L_copy_16_chars_exit; 7653 7654 if (UseAVX >= 2) { 7655 Label L_chars_32_check, L_copy_32_chars, L_copy_32_chars_exit; 7656 movl(tmp5, 0xff00ff00); // create mask to test for Unicode chars in vector 7657 movdl(tmp1Reg, tmp5); 7658 vpbroadcastd(tmp1Reg, tmp1Reg, Assembler::AVX_256bit); 7659 jmp(L_chars_32_check); 7660 7661 bind(L_copy_32_chars); 7662 vmovdqu(tmp3Reg, Address(src, len, Address::times_2, -64)); 7663 vmovdqu(tmp4Reg, Address(src, len, Address::times_2, -32)); 7664 vpor(tmp2Reg, tmp3Reg, tmp4Reg, /* vector_len */ 1); 7665 vptest(tmp2Reg, tmp1Reg); // check for Unicode chars in vector 7666 jccb(Assembler::notZero, L_copy_32_chars_exit); 7667 vpackuswb(tmp3Reg, tmp3Reg, tmp4Reg, /* vector_len */ 1); 7668 vpermq(tmp4Reg, tmp3Reg, 0xD8, /* vector_len */ 1); 7669 vmovdqu(Address(dst, len, Address::times_1, -32), tmp4Reg); 7670 7671 bind(L_chars_32_check); 7672 addptr(len, 32); 7673 jcc(Assembler::lessEqual, L_copy_32_chars); 7674 7675 bind(L_copy_32_chars_exit); 7676 subptr(len, 16); 7677 jccb(Assembler::greater, L_copy_16_chars_exit); 7678 7679 } else if (UseSSE42Intrinsics) { 7680 movl(tmp5, 0xff00ff00); // create mask to test for Unicode chars in vector 7681 movdl(tmp1Reg, tmp5); 7682 pshufd(tmp1Reg, tmp1Reg, 0); 7683 jmpb(L_chars_16_check); 7684 } 7685 7686 bind(L_copy_16_chars); 7687 if (UseAVX >= 2) { 7688 vmovdqu(tmp2Reg, Address(src, len, Address::times_2, -32)); 7689 vptest(tmp2Reg, tmp1Reg); 7690 jcc(Assembler::notZero, L_copy_16_chars_exit); 7691 vpackuswb(tmp2Reg, tmp2Reg, tmp1Reg, /* vector_len */ 1); 7692 vpermq(tmp3Reg, tmp2Reg, 0xD8, /* vector_len */ 1); 7693 } else { 7694 if (UseAVX > 0) { 7695 movdqu(tmp3Reg, Address(src, len, Address::times_2, -32)); 7696 movdqu(tmp4Reg, Address(src, len, Address::times_2, -16)); 7697 vpor(tmp2Reg, tmp3Reg, tmp4Reg, /* vector_len */ 0); 7698 } else { 7699 movdqu(tmp3Reg, Address(src, len, Address::times_2, -32)); 7700 por(tmp2Reg, tmp3Reg); 7701 movdqu(tmp4Reg, Address(src, len, Address::times_2, -16)); 7702 por(tmp2Reg, tmp4Reg); 7703 } 7704 ptest(tmp2Reg, tmp1Reg); // check for Unicode chars in vector 7705 jccb(Assembler::notZero, L_copy_16_chars_exit); 7706 packuswb(tmp3Reg, tmp4Reg); 7707 } 7708 movdqu(Address(dst, len, Address::times_1, -16), tmp3Reg); 7709 7710 bind(L_chars_16_check); 7711 addptr(len, 16); 7712 jcc(Assembler::lessEqual, L_copy_16_chars); 7713 7714 bind(L_copy_16_chars_exit); 7715 if (UseAVX >= 2) { 7716 // clean upper bits of YMM registers 7717 vpxor(tmp2Reg, tmp2Reg); 7718 vpxor(tmp3Reg, tmp3Reg); 7719 vpxor(tmp4Reg, tmp4Reg); 7720 movdl(tmp1Reg, tmp5); 7721 pshufd(tmp1Reg, tmp1Reg, 0); 7722 } 7723 subptr(len, 8); 7724 jccb(Assembler::greater, L_copy_8_chars_exit); 7725 7726 bind(L_copy_8_chars); 7727 movdqu(tmp3Reg, Address(src, len, Address::times_2, -16)); 7728 ptest(tmp3Reg, tmp1Reg); 7729 jccb(Assembler::notZero, L_copy_8_chars_exit); 7730 packuswb(tmp3Reg, tmp1Reg); 7731 movq(Address(dst, len, Address::times_1, -8), tmp3Reg); 7732 addptr(len, 8); 7733 jccb(Assembler::lessEqual, L_copy_8_chars); 7734 7735 bind(L_copy_8_chars_exit); 7736 subptr(len, 8); 7737 jccb(Assembler::zero, L_done); 7738 } 7739 7740 bind(L_copy_1_char); 7741 load_unsigned_short(tmp5, Address(src, len, Address::times_2, 0)); 7742 testl(tmp5, 0xff00); // check if Unicode char 7743 jccb(Assembler::notZero, L_copy_1_char_exit); 7744 movb(Address(dst, len, Address::times_1, 0), tmp5); 7745 addptr(len, 1); 7746 jccb(Assembler::less, L_copy_1_char); 7747 7748 bind(L_copy_1_char_exit); 7749 addptr(result, len); // len is negative count of not processed elements 7750 7751 bind(L_done); 7752 } 7753 7754 #ifdef _LP64 7755 /** 7756 * Helper for multiply_to_len(). 7757 */ 7758 void MacroAssembler::add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2) { 7759 addq(dest_lo, src1); 7760 adcq(dest_hi, 0); 7761 addq(dest_lo, src2); 7762 adcq(dest_hi, 0); 7763 } 7764 7765 /** 7766 * Multiply 64 bit by 64 bit first loop. 7767 */ 7768 void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart, 7769 Register y, Register y_idx, Register z, 7770 Register carry, Register product, 7771 Register idx, Register kdx) { 7772 // 7773 // jlong carry, x[], y[], z[]; 7774 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) { 7775 // huge_128 product = y[idx] * x[xstart] + carry; 7776 // z[kdx] = (jlong)product; 7777 // carry = (jlong)(product >>> 64); 7778 // } 7779 // z[xstart] = carry; 7780 // 7781 7782 Label L_first_loop, L_first_loop_exit; 7783 Label L_one_x, L_one_y, L_multiply; 7784 7785 decrementl(xstart); 7786 jcc(Assembler::negative, L_one_x); 7787 7788 movq(x_xstart, Address(x, xstart, Address::times_4, 0)); 7789 rorq(x_xstart, 32); // convert big-endian to little-endian 7790 7791 bind(L_first_loop); 7792 decrementl(idx); 7793 jcc(Assembler::negative, L_first_loop_exit); 7794 decrementl(idx); 7795 jcc(Assembler::negative, L_one_y); 7796 movq(y_idx, Address(y, idx, Address::times_4, 0)); 7797 rorq(y_idx, 32); // convert big-endian to little-endian 7798 bind(L_multiply); 7799 movq(product, x_xstart); 7800 mulq(y_idx); // product(rax) * y_idx -> rdx:rax 7801 addq(product, carry); 7802 adcq(rdx, 0); 7803 subl(kdx, 2); 7804 movl(Address(z, kdx, Address::times_4, 4), product); 7805 shrq(product, 32); 7806 movl(Address(z, kdx, Address::times_4, 0), product); 7807 movq(carry, rdx); 7808 jmp(L_first_loop); 7809 7810 bind(L_one_y); 7811 movl(y_idx, Address(y, 0)); 7812 jmp(L_multiply); 7813 7814 bind(L_one_x); 7815 movl(x_xstart, Address(x, 0)); 7816 jmp(L_first_loop); 7817 7818 bind(L_first_loop_exit); 7819 } 7820 7821 /** 7822 * Multiply 64 bit by 64 bit and add 128 bit. 7823 */ 7824 void MacroAssembler::multiply_add_128_x_128(Register x_xstart, Register y, Register z, 7825 Register yz_idx, Register idx, 7826 Register carry, Register product, int offset) { 7827 // huge_128 product = (y[idx] * x_xstart) + z[kdx] + carry; 7828 // z[kdx] = (jlong)product; 7829 7830 movq(yz_idx, Address(y, idx, Address::times_4, offset)); 7831 rorq(yz_idx, 32); // convert big-endian to little-endian 7832 movq(product, x_xstart); 7833 mulq(yz_idx); // product(rax) * yz_idx -> rdx:product(rax) 7834 movq(yz_idx, Address(z, idx, Address::times_4, offset)); 7835 rorq(yz_idx, 32); // convert big-endian to little-endian 7836 7837 add2_with_carry(rdx, product, carry, yz_idx); 7838 7839 movl(Address(z, idx, Address::times_4, offset+4), product); 7840 shrq(product, 32); 7841 movl(Address(z, idx, Address::times_4, offset), product); 7842 7843 } 7844 7845 /** 7846 * Multiply 128 bit by 128 bit. Unrolled inner loop. 7847 */ 7848 void MacroAssembler::multiply_128_x_128_loop(Register x_xstart, Register y, Register z, 7849 Register yz_idx, Register idx, Register jdx, 7850 Register carry, Register product, 7851 Register carry2) { 7852 // jlong carry, x[], y[], z[]; 7853 // int kdx = ystart+1; 7854 // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop 7855 // huge_128 product = (y[idx+1] * x_xstart) + z[kdx+idx+1] + carry; 7856 // z[kdx+idx+1] = (jlong)product; 7857 // jlong carry2 = (jlong)(product >>> 64); 7858 // product = (y[idx] * x_xstart) + z[kdx+idx] + carry2; 7859 // z[kdx+idx] = (jlong)product; 7860 // carry = (jlong)(product >>> 64); 7861 // } 7862 // idx += 2; 7863 // if (idx > 0) { 7864 // product = (y[idx] * x_xstart) + z[kdx+idx] + carry; 7865 // z[kdx+idx] = (jlong)product; 7866 // carry = (jlong)(product >>> 64); 7867 // } 7868 // 7869 7870 Label L_third_loop, L_third_loop_exit, L_post_third_loop_done; 7871 7872 movl(jdx, idx); 7873 andl(jdx, 0xFFFFFFFC); 7874 shrl(jdx, 2); 7875 7876 bind(L_third_loop); 7877 subl(jdx, 1); 7878 jcc(Assembler::negative, L_third_loop_exit); 7879 subl(idx, 4); 7880 7881 multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 8); 7882 movq(carry2, rdx); 7883 7884 multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry2, product, 0); 7885 movq(carry, rdx); 7886 jmp(L_third_loop); 7887 7888 bind (L_third_loop_exit); 7889 7890 andl (idx, 0x3); 7891 jcc(Assembler::zero, L_post_third_loop_done); 7892 7893 Label L_check_1; 7894 subl(idx, 2); 7895 jcc(Assembler::negative, L_check_1); 7896 7897 multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 0); 7898 movq(carry, rdx); 7899 7900 bind (L_check_1); 7901 addl (idx, 0x2); 7902 andl (idx, 0x1); 7903 subl(idx, 1); 7904 jcc(Assembler::negative, L_post_third_loop_done); 7905 7906 movl(yz_idx, Address(y, idx, Address::times_4, 0)); 7907 movq(product, x_xstart); 7908 mulq(yz_idx); // product(rax) * yz_idx -> rdx:product(rax) 7909 movl(yz_idx, Address(z, idx, Address::times_4, 0)); 7910 7911 add2_with_carry(rdx, product, yz_idx, carry); 7912 7913 movl(Address(z, idx, Address::times_4, 0), product); 7914 shrq(product, 32); 7915 7916 shlq(rdx, 32); 7917 orq(product, rdx); 7918 movq(carry, product); 7919 7920 bind(L_post_third_loop_done); 7921 } 7922 7923 /** 7924 * Multiply 128 bit by 128 bit using BMI2. Unrolled inner loop. 7925 * 7926 */ 7927 void MacroAssembler::multiply_128_x_128_bmi2_loop(Register y, Register z, 7928 Register carry, Register carry2, 7929 Register idx, Register jdx, 7930 Register yz_idx1, Register yz_idx2, 7931 Register tmp, Register tmp3, Register tmp4) { 7932 assert(UseBMI2Instructions, "should be used only when BMI2 is available"); 7933 7934 // jlong carry, x[], y[], z[]; 7935 // int kdx = ystart+1; 7936 // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop 7937 // huge_128 tmp3 = (y[idx+1] * rdx) + z[kdx+idx+1] + carry; 7938 // jlong carry2 = (jlong)(tmp3 >>> 64); 7939 // huge_128 tmp4 = (y[idx] * rdx) + z[kdx+idx] + carry2; 7940 // carry = (jlong)(tmp4 >>> 64); 7941 // z[kdx+idx+1] = (jlong)tmp3; 7942 // z[kdx+idx] = (jlong)tmp4; 7943 // } 7944 // idx += 2; 7945 // if (idx > 0) { 7946 // yz_idx1 = (y[idx] * rdx) + z[kdx+idx] + carry; 7947 // z[kdx+idx] = (jlong)yz_idx1; 7948 // carry = (jlong)(yz_idx1 >>> 64); 7949 // } 7950 // 7951 7952 Label L_third_loop, L_third_loop_exit, L_post_third_loop_done; 7953 7954 movl(jdx, idx); 7955 andl(jdx, 0xFFFFFFFC); 7956 shrl(jdx, 2); 7957 7958 bind(L_third_loop); 7959 subl(jdx, 1); 7960 jcc(Assembler::negative, L_third_loop_exit); 7961 subl(idx, 4); 7962 7963 movq(yz_idx1, Address(y, idx, Address::times_4, 8)); 7964 rorxq(yz_idx1, yz_idx1, 32); // convert big-endian to little-endian 7965 movq(yz_idx2, Address(y, idx, Address::times_4, 0)); 7966 rorxq(yz_idx2, yz_idx2, 32); 7967 7968 mulxq(tmp4, tmp3, yz_idx1); // yz_idx1 * rdx -> tmp4:tmp3 7969 mulxq(carry2, tmp, yz_idx2); // yz_idx2 * rdx -> carry2:tmp 7970 7971 movq(yz_idx1, Address(z, idx, Address::times_4, 8)); 7972 rorxq(yz_idx1, yz_idx1, 32); 7973 movq(yz_idx2, Address(z, idx, Address::times_4, 0)); 7974 rorxq(yz_idx2, yz_idx2, 32); 7975 7976 if (VM_Version::supports_adx()) { 7977 adcxq(tmp3, carry); 7978 adoxq(tmp3, yz_idx1); 7979 7980 adcxq(tmp4, tmp); 7981 adoxq(tmp4, yz_idx2); 7982 7983 movl(carry, 0); // does not affect flags 7984 adcxq(carry2, carry); 7985 adoxq(carry2, carry); 7986 } else { 7987 add2_with_carry(tmp4, tmp3, carry, yz_idx1); 7988 add2_with_carry(carry2, tmp4, tmp, yz_idx2); 7989 } 7990 movq(carry, carry2); 7991 7992 movl(Address(z, idx, Address::times_4, 12), tmp3); 7993 shrq(tmp3, 32); 7994 movl(Address(z, idx, Address::times_4, 8), tmp3); 7995 7996 movl(Address(z, idx, Address::times_4, 4), tmp4); 7997 shrq(tmp4, 32); 7998 movl(Address(z, idx, Address::times_4, 0), tmp4); 7999 8000 jmp(L_third_loop); 8001 8002 bind (L_third_loop_exit); 8003 8004 andl (idx, 0x3); 8005 jcc(Assembler::zero, L_post_third_loop_done); 8006 8007 Label L_check_1; 8008 subl(idx, 2); 8009 jcc(Assembler::negative, L_check_1); 8010 8011 movq(yz_idx1, Address(y, idx, Address::times_4, 0)); 8012 rorxq(yz_idx1, yz_idx1, 32); 8013 mulxq(tmp4, tmp3, yz_idx1); // yz_idx1 * rdx -> tmp4:tmp3 8014 movq(yz_idx2, Address(z, idx, Address::times_4, 0)); 8015 rorxq(yz_idx2, yz_idx2, 32); 8016 8017 add2_with_carry(tmp4, tmp3, carry, yz_idx2); 8018 8019 movl(Address(z, idx, Address::times_4, 4), tmp3); 8020 shrq(tmp3, 32); 8021 movl(Address(z, idx, Address::times_4, 0), tmp3); 8022 movq(carry, tmp4); 8023 8024 bind (L_check_1); 8025 addl (idx, 0x2); 8026 andl (idx, 0x1); 8027 subl(idx, 1); 8028 jcc(Assembler::negative, L_post_third_loop_done); 8029 movl(tmp4, Address(y, idx, Address::times_4, 0)); 8030 mulxq(carry2, tmp3, tmp4); // tmp4 * rdx -> carry2:tmp3 8031 movl(tmp4, Address(z, idx, Address::times_4, 0)); 8032 8033 add2_with_carry(carry2, tmp3, tmp4, carry); 8034 8035 movl(Address(z, idx, Address::times_4, 0), tmp3); 8036 shrq(tmp3, 32); 8037 8038 shlq(carry2, 32); 8039 orq(tmp3, carry2); 8040 movq(carry, tmp3); 8041 8042 bind(L_post_third_loop_done); 8043 } 8044 8045 /** 8046 * Code for BigInteger::multiplyToLen() instrinsic. 8047 * 8048 * rdi: x 8049 * rax: xlen 8050 * rsi: y 8051 * rcx: ylen 8052 * r8: z 8053 * r11: zlen 8054 * r12: tmp1 8055 * r13: tmp2 8056 * r14: tmp3 8057 * r15: tmp4 8058 * rbx: tmp5 8059 * 8060 */ 8061 void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, Register zlen, 8062 Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5) { 8063 ShortBranchVerifier sbv(this); 8064 assert_different_registers(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5, rdx); 8065 8066 push(tmp1); 8067 push(tmp2); 8068 push(tmp3); 8069 push(tmp4); 8070 push(tmp5); 8071 8072 push(xlen); 8073 push(zlen); 8074 8075 const Register idx = tmp1; 8076 const Register kdx = tmp2; 8077 const Register xstart = tmp3; 8078 8079 const Register y_idx = tmp4; 8080 const Register carry = tmp5; 8081 const Register product = xlen; 8082 const Register x_xstart = zlen; // reuse register 8083 8084 // First Loop. 8085 // 8086 // final static long LONG_MASK = 0xffffffffL; 8087 // int xstart = xlen - 1; 8088 // int ystart = ylen - 1; 8089 // long carry = 0; 8090 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) { 8091 // long product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry; 8092 // z[kdx] = (int)product; 8093 // carry = product >>> 32; 8094 // } 8095 // z[xstart] = (int)carry; 8096 // 8097 8098 movl(idx, ylen); // idx = ylen; 8099 movl(kdx, zlen); // kdx = xlen+ylen; 8100 xorq(carry, carry); // carry = 0; 8101 8102 Label L_done; 8103 8104 movl(xstart, xlen); 8105 decrementl(xstart); 8106 jcc(Assembler::negative, L_done); 8107 8108 multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx); 8109 8110 Label L_second_loop; 8111 testl(kdx, kdx); 8112 jcc(Assembler::zero, L_second_loop); 8113 8114 Label L_carry; 8115 subl(kdx, 1); 8116 jcc(Assembler::zero, L_carry); 8117 8118 movl(Address(z, kdx, Address::times_4, 0), carry); 8119 shrq(carry, 32); 8120 subl(kdx, 1); 8121 8122 bind(L_carry); 8123 movl(Address(z, kdx, Address::times_4, 0), carry); 8124 8125 // Second and third (nested) loops. 8126 // 8127 // for (int i = xstart-1; i >= 0; i--) { // Second loop 8128 // carry = 0; 8129 // for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop 8130 // long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) + 8131 // (z[k] & LONG_MASK) + carry; 8132 // z[k] = (int)product; 8133 // carry = product >>> 32; 8134 // } 8135 // z[i] = (int)carry; 8136 // } 8137 // 8138 // i = xlen, j = tmp1, k = tmp2, carry = tmp5, x[i] = rdx 8139 8140 const Register jdx = tmp1; 8141 8142 bind(L_second_loop); 8143 xorl(carry, carry); // carry = 0; 8144 movl(jdx, ylen); // j = ystart+1 8145 8146 subl(xstart, 1); // i = xstart-1; 8147 jcc(Assembler::negative, L_done); 8148 8149 push (z); 8150 8151 Label L_last_x; 8152 lea(z, Address(z, xstart, Address::times_4, 4)); // z = z + k - j 8153 subl(xstart, 1); // i = xstart-1; 8154 jcc(Assembler::negative, L_last_x); 8155 8156 if (UseBMI2Instructions) { 8157 movq(rdx, Address(x, xstart, Address::times_4, 0)); 8158 rorxq(rdx, rdx, 32); // convert big-endian to little-endian 8159 } else { 8160 movq(x_xstart, Address(x, xstart, Address::times_4, 0)); 8161 rorq(x_xstart, 32); // convert big-endian to little-endian 8162 } 8163 8164 Label L_third_loop_prologue; 8165 bind(L_third_loop_prologue); 8166 8167 push (x); 8168 push (xstart); 8169 push (ylen); 8170 8171 8172 if (UseBMI2Instructions) { 8173 multiply_128_x_128_bmi2_loop(y, z, carry, x, jdx, ylen, product, tmp2, x_xstart, tmp3, tmp4); 8174 } else { // !UseBMI2Instructions 8175 multiply_128_x_128_loop(x_xstart, y, z, y_idx, jdx, ylen, carry, product, x); 8176 } 8177 8178 pop(ylen); 8179 pop(xlen); 8180 pop(x); 8181 pop(z); 8182 8183 movl(tmp3, xlen); 8184 addl(tmp3, 1); 8185 movl(Address(z, tmp3, Address::times_4, 0), carry); 8186 subl(tmp3, 1); 8187 jccb(Assembler::negative, L_done); 8188 8189 shrq(carry, 32); 8190 movl(Address(z, tmp3, Address::times_4, 0), carry); 8191 jmp(L_second_loop); 8192 8193 // Next infrequent code is moved outside loops. 8194 bind(L_last_x); 8195 if (UseBMI2Instructions) { 8196 movl(rdx, Address(x, 0)); 8197 } else { 8198 movl(x_xstart, Address(x, 0)); 8199 } 8200 jmp(L_third_loop_prologue); 8201 8202 bind(L_done); 8203 8204 pop(zlen); 8205 pop(xlen); 8206 8207 pop(tmp5); 8208 pop(tmp4); 8209 pop(tmp3); 8210 pop(tmp2); 8211 pop(tmp1); 8212 } 8213 8214 void MacroAssembler::vectorized_mismatch(Register obja, Register objb, Register length, Register log2_array_indxscale, 8215 Register result, Register tmp1, Register tmp2, XMMRegister rymm0, XMMRegister rymm1, XMMRegister rymm2){ 8216 assert(UseSSE42Intrinsics, "SSE4.2 must be enabled."); 8217 Label VECTOR16_LOOP, VECTOR8_LOOP, VECTOR4_LOOP; 8218 Label VECTOR8_TAIL, VECTOR4_TAIL; 8219 Label VECTOR32_NOT_EQUAL, VECTOR16_NOT_EQUAL, VECTOR8_NOT_EQUAL, VECTOR4_NOT_EQUAL; 8220 Label SAME_TILL_END, DONE; 8221 Label BYTES_LOOP, BYTES_TAIL, BYTES_NOT_EQUAL; 8222 8223 //scale is in rcx in both Win64 and Unix 8224 ShortBranchVerifier sbv(this); 8225 8226 shlq(length); 8227 xorq(result, result); 8228 8229 if ((AVX3Threshold == 0) && (UseAVX > 2) && 8230 VM_Version::supports_avx512vlbw()) { 8231 Label VECTOR64_LOOP, VECTOR64_NOT_EQUAL, VECTOR32_TAIL; 8232 8233 cmpq(length, 64); 8234 jcc(Assembler::less, VECTOR32_TAIL); 8235 8236 movq(tmp1, length); 8237 andq(tmp1, 0x3F); // tail count 8238 andq(length, ~(0x3F)); //vector count 8239 8240 bind(VECTOR64_LOOP); 8241 // AVX512 code to compare 64 byte vectors. 8242 evmovdqub(rymm0, Address(obja, result), Assembler::AVX_512bit); 8243 evpcmpeqb(k7, rymm0, Address(objb, result), Assembler::AVX_512bit); 8244 kortestql(k7, k7); 8245 jcc(Assembler::aboveEqual, VECTOR64_NOT_EQUAL); // mismatch 8246 addq(result, 64); 8247 subq(length, 64); 8248 jccb(Assembler::notZero, VECTOR64_LOOP); 8249 8250 //bind(VECTOR64_TAIL); 8251 testq(tmp1, tmp1); 8252 jcc(Assembler::zero, SAME_TILL_END); 8253 8254 //bind(VECTOR64_TAIL); 8255 // AVX512 code to compare upto 63 byte vectors. 8256 mov64(tmp2, 0xFFFFFFFFFFFFFFFF); 8257 shlxq(tmp2, tmp2, tmp1); 8258 notq(tmp2); 8259 kmovql(k3, tmp2); 8260 8261 evmovdqub(rymm0, k3, Address(obja, result), Assembler::AVX_512bit); 8262 evpcmpeqb(k7, k3, rymm0, Address(objb, result), Assembler::AVX_512bit); 8263 8264 ktestql(k7, k3); 8265 jcc(Assembler::below, SAME_TILL_END); // not mismatch 8266 8267 bind(VECTOR64_NOT_EQUAL); 8268 kmovql(tmp1, k7); 8269 notq(tmp1); 8270 tzcntq(tmp1, tmp1); 8271 addq(result, tmp1); 8272 shrq(result); 8273 jmp(DONE); 8274 bind(VECTOR32_TAIL); 8275 } 8276 8277 cmpq(length, 8); 8278 jcc(Assembler::equal, VECTOR8_LOOP); 8279 jcc(Assembler::less, VECTOR4_TAIL); 8280 8281 if (UseAVX >= 2) { 8282 Label VECTOR16_TAIL, VECTOR32_LOOP; 8283 8284 cmpq(length, 16); 8285 jcc(Assembler::equal, VECTOR16_LOOP); 8286 jcc(Assembler::less, VECTOR8_LOOP); 8287 8288 cmpq(length, 32); 8289 jccb(Assembler::less, VECTOR16_TAIL); 8290 8291 subq(length, 32); 8292 bind(VECTOR32_LOOP); 8293 vmovdqu(rymm0, Address(obja, result)); 8294 vmovdqu(rymm1, Address(objb, result)); 8295 vpxor(rymm2, rymm0, rymm1, Assembler::AVX_256bit); 8296 vptest(rymm2, rymm2); 8297 jcc(Assembler::notZero, VECTOR32_NOT_EQUAL);//mismatch found 8298 addq(result, 32); 8299 subq(length, 32); 8300 jcc(Assembler::greaterEqual, VECTOR32_LOOP); 8301 addq(length, 32); 8302 jcc(Assembler::equal, SAME_TILL_END); 8303 //falling through if less than 32 bytes left //close the branch here. 8304 8305 bind(VECTOR16_TAIL); 8306 cmpq(length, 16); 8307 jccb(Assembler::less, VECTOR8_TAIL); 8308 bind(VECTOR16_LOOP); 8309 movdqu(rymm0, Address(obja, result)); 8310 movdqu(rymm1, Address(objb, result)); 8311 vpxor(rymm2, rymm0, rymm1, Assembler::AVX_128bit); 8312 ptest(rymm2, rymm2); 8313 jcc(Assembler::notZero, VECTOR16_NOT_EQUAL);//mismatch found 8314 addq(result, 16); 8315 subq(length, 16); 8316 jcc(Assembler::equal, SAME_TILL_END); 8317 //falling through if less than 16 bytes left 8318 } else {//regular intrinsics 8319 8320 cmpq(length, 16); 8321 jccb(Assembler::less, VECTOR8_TAIL); 8322 8323 subq(length, 16); 8324 bind(VECTOR16_LOOP); 8325 movdqu(rymm0, Address(obja, result)); 8326 movdqu(rymm1, Address(objb, result)); 8327 pxor(rymm0, rymm1); 8328 ptest(rymm0, rymm0); 8329 jcc(Assembler::notZero, VECTOR16_NOT_EQUAL);//mismatch found 8330 addq(result, 16); 8331 subq(length, 16); 8332 jccb(Assembler::greaterEqual, VECTOR16_LOOP); 8333 addq(length, 16); 8334 jcc(Assembler::equal, SAME_TILL_END); 8335 //falling through if less than 16 bytes left 8336 } 8337 8338 bind(VECTOR8_TAIL); 8339 cmpq(length, 8); 8340 jccb(Assembler::less, VECTOR4_TAIL); 8341 bind(VECTOR8_LOOP); 8342 movq(tmp1, Address(obja, result)); 8343 movq(tmp2, Address(objb, result)); 8344 xorq(tmp1, tmp2); 8345 testq(tmp1, tmp1); 8346 jcc(Assembler::notZero, VECTOR8_NOT_EQUAL);//mismatch found 8347 addq(result, 8); 8348 subq(length, 8); 8349 jcc(Assembler::equal, SAME_TILL_END); 8350 //falling through if less than 8 bytes left 8351 8352 bind(VECTOR4_TAIL); 8353 cmpq(length, 4); 8354 jccb(Assembler::less, BYTES_TAIL); 8355 bind(VECTOR4_LOOP); 8356 movl(tmp1, Address(obja, result)); 8357 xorl(tmp1, Address(objb, result)); 8358 testl(tmp1, tmp1); 8359 jcc(Assembler::notZero, VECTOR4_NOT_EQUAL);//mismatch found 8360 addq(result, 4); 8361 subq(length, 4); 8362 jcc(Assembler::equal, SAME_TILL_END); 8363 //falling through if less than 4 bytes left 8364 8365 bind(BYTES_TAIL); 8366 bind(BYTES_LOOP); 8367 load_unsigned_byte(tmp1, Address(obja, result)); 8368 load_unsigned_byte(tmp2, Address(objb, result)); 8369 xorl(tmp1, tmp2); 8370 testl(tmp1, tmp1); 8371 jcc(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found 8372 decq(length); 8373 jcc(Assembler::zero, SAME_TILL_END); 8374 incq(result); 8375 load_unsigned_byte(tmp1, Address(obja, result)); 8376 load_unsigned_byte(tmp2, Address(objb, result)); 8377 xorl(tmp1, tmp2); 8378 testl(tmp1, tmp1); 8379 jcc(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found 8380 decq(length); 8381 jcc(Assembler::zero, SAME_TILL_END); 8382 incq(result); 8383 load_unsigned_byte(tmp1, Address(obja, result)); 8384 load_unsigned_byte(tmp2, Address(objb, result)); 8385 xorl(tmp1, tmp2); 8386 testl(tmp1, tmp1); 8387 jcc(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found 8388 jmp(SAME_TILL_END); 8389 8390 if (UseAVX >= 2) { 8391 bind(VECTOR32_NOT_EQUAL); 8392 vpcmpeqb(rymm2, rymm2, rymm2, Assembler::AVX_256bit); 8393 vpcmpeqb(rymm0, rymm0, rymm1, Assembler::AVX_256bit); 8394 vpxor(rymm0, rymm0, rymm2, Assembler::AVX_256bit); 8395 vpmovmskb(tmp1, rymm0); 8396 bsfq(tmp1, tmp1); 8397 addq(result, tmp1); 8398 shrq(result); 8399 jmp(DONE); 8400 } 8401 8402 bind(VECTOR16_NOT_EQUAL); 8403 if (UseAVX >= 2) { 8404 vpcmpeqb(rymm2, rymm2, rymm2, Assembler::AVX_128bit); 8405 vpcmpeqb(rymm0, rymm0, rymm1, Assembler::AVX_128bit); 8406 pxor(rymm0, rymm2); 8407 } else { 8408 pcmpeqb(rymm2, rymm2); 8409 pxor(rymm0, rymm1); 8410 pcmpeqb(rymm0, rymm1); 8411 pxor(rymm0, rymm2); 8412 } 8413 pmovmskb(tmp1, rymm0); 8414 bsfq(tmp1, tmp1); 8415 addq(result, tmp1); 8416 shrq(result); 8417 jmpb(DONE); 8418 8419 bind(VECTOR8_NOT_EQUAL); 8420 bind(VECTOR4_NOT_EQUAL); 8421 bsfq(tmp1, tmp1); 8422 shrq(tmp1, 3); 8423 addq(result, tmp1); 8424 bind(BYTES_NOT_EQUAL); 8425 shrq(result); 8426 jmpb(DONE); 8427 8428 bind(SAME_TILL_END); 8429 mov64(result, -1); 8430 8431 bind(DONE); 8432 } 8433 8434 //Helper functions for square_to_len() 8435 8436 /** 8437 * Store the squares of x[], right shifted one bit (divided by 2) into z[] 8438 * Preserves x and z and modifies rest of the registers. 8439 */ 8440 void MacroAssembler::square_rshift(Register x, Register xlen, Register z, Register tmp1, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg) { 8441 // Perform square and right shift by 1 8442 // Handle odd xlen case first, then for even xlen do the following 8443 // jlong carry = 0; 8444 // for (int j=0, i=0; j < xlen; j+=2, i+=4) { 8445 // huge_128 product = x[j:j+1] * x[j:j+1]; 8446 // z[i:i+1] = (carry << 63) | (jlong)(product >>> 65); 8447 // z[i+2:i+3] = (jlong)(product >>> 1); 8448 // carry = (jlong)product; 8449 // } 8450 8451 xorq(tmp5, tmp5); // carry 8452 xorq(rdxReg, rdxReg); 8453 xorl(tmp1, tmp1); // index for x 8454 xorl(tmp4, tmp4); // index for z 8455 8456 Label L_first_loop, L_first_loop_exit; 8457 8458 testl(xlen, 1); 8459 jccb(Assembler::zero, L_first_loop); //jump if xlen is even 8460 8461 // Square and right shift by 1 the odd element using 32 bit multiply 8462 movl(raxReg, Address(x, tmp1, Address::times_4, 0)); 8463 imulq(raxReg, raxReg); 8464 shrq(raxReg, 1); 8465 adcq(tmp5, 0); 8466 movq(Address(z, tmp4, Address::times_4, 0), raxReg); 8467 incrementl(tmp1); 8468 addl(tmp4, 2); 8469 8470 // Square and right shift by 1 the rest using 64 bit multiply 8471 bind(L_first_loop); 8472 cmpptr(tmp1, xlen); 8473 jccb(Assembler::equal, L_first_loop_exit); 8474 8475 // Square 8476 movq(raxReg, Address(x, tmp1, Address::times_4, 0)); 8477 rorq(raxReg, 32); // convert big-endian to little-endian 8478 mulq(raxReg); // 64-bit multiply rax * rax -> rdx:rax 8479 8480 // Right shift by 1 and save carry 8481 shrq(tmp5, 1); // rdx:rax:tmp5 = (tmp5:rdx:rax) >>> 1 8482 rcrq(rdxReg, 1); 8483 rcrq(raxReg, 1); 8484 adcq(tmp5, 0); 8485 8486 // Store result in z 8487 movq(Address(z, tmp4, Address::times_4, 0), rdxReg); 8488 movq(Address(z, tmp4, Address::times_4, 8), raxReg); 8489 8490 // Update indices for x and z 8491 addl(tmp1, 2); 8492 addl(tmp4, 4); 8493 jmp(L_first_loop); 8494 8495 bind(L_first_loop_exit); 8496 } 8497 8498 8499 /** 8500 * Perform the following multiply add operation using BMI2 instructions 8501 * carry:sum = sum + op1*op2 + carry 8502 * op2 should be in rdx 8503 * op2 is preserved, all other registers are modified 8504 */ 8505 void MacroAssembler::multiply_add_64_bmi2(Register sum, Register op1, Register op2, Register carry, Register tmp2) { 8506 // assert op2 is rdx 8507 mulxq(tmp2, op1, op1); // op1 * op2 -> tmp2:op1 8508 addq(sum, carry); 8509 adcq(tmp2, 0); 8510 addq(sum, op1); 8511 adcq(tmp2, 0); 8512 movq(carry, tmp2); 8513 } 8514 8515 /** 8516 * Perform the following multiply add operation: 8517 * carry:sum = sum + op1*op2 + carry 8518 * Preserves op1, op2 and modifies rest of registers 8519 */ 8520 void MacroAssembler::multiply_add_64(Register sum, Register op1, Register op2, Register carry, Register rdxReg, Register raxReg) { 8521 // rdx:rax = op1 * op2 8522 movq(raxReg, op2); 8523 mulq(op1); 8524 8525 // rdx:rax = sum + carry + rdx:rax 8526 addq(sum, carry); 8527 adcq(rdxReg, 0); 8528 addq(sum, raxReg); 8529 adcq(rdxReg, 0); 8530 8531 // carry:sum = rdx:sum 8532 movq(carry, rdxReg); 8533 } 8534 8535 /** 8536 * Add 64 bit long carry into z[] with carry propogation. 8537 * Preserves z and carry register values and modifies rest of registers. 8538 * 8539 */ 8540 void MacroAssembler::add_one_64(Register z, Register zlen, Register carry, Register tmp1) { 8541 Label L_fourth_loop, L_fourth_loop_exit; 8542 8543 movl(tmp1, 1); 8544 subl(zlen, 2); 8545 addq(Address(z, zlen, Address::times_4, 0), carry); 8546 8547 bind(L_fourth_loop); 8548 jccb(Assembler::carryClear, L_fourth_loop_exit); 8549 subl(zlen, 2); 8550 jccb(Assembler::negative, L_fourth_loop_exit); 8551 addq(Address(z, zlen, Address::times_4, 0), tmp1); 8552 jmp(L_fourth_loop); 8553 bind(L_fourth_loop_exit); 8554 } 8555 8556 /** 8557 * Shift z[] left by 1 bit. 8558 * Preserves x, len, z and zlen registers and modifies rest of the registers. 8559 * 8560 */ 8561 void MacroAssembler::lshift_by_1(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, Register tmp3, Register tmp4) { 8562 8563 Label L_fifth_loop, L_fifth_loop_exit; 8564 8565 // Fifth loop 8566 // Perform primitiveLeftShift(z, zlen, 1) 8567 8568 const Register prev_carry = tmp1; 8569 const Register new_carry = tmp4; 8570 const Register value = tmp2; 8571 const Register zidx = tmp3; 8572 8573 // int zidx, carry; 8574 // long value; 8575 // carry = 0; 8576 // for (zidx = zlen-2; zidx >=0; zidx -= 2) { 8577 // (carry:value) = (z[i] << 1) | carry ; 8578 // z[i] = value; 8579 // } 8580 8581 movl(zidx, zlen); 8582 xorl(prev_carry, prev_carry); // clear carry flag and prev_carry register 8583 8584 bind(L_fifth_loop); 8585 decl(zidx); // Use decl to preserve carry flag 8586 decl(zidx); 8587 jccb(Assembler::negative, L_fifth_loop_exit); 8588 8589 if (UseBMI2Instructions) { 8590 movq(value, Address(z, zidx, Address::times_4, 0)); 8591 rclq(value, 1); 8592 rorxq(value, value, 32); 8593 movq(Address(z, zidx, Address::times_4, 0), value); // Store back in big endian form 8594 } 8595 else { 8596 // clear new_carry 8597 xorl(new_carry, new_carry); 8598 8599 // Shift z[i] by 1, or in previous carry and save new carry 8600 movq(value, Address(z, zidx, Address::times_4, 0)); 8601 shlq(value, 1); 8602 adcl(new_carry, 0); 8603 8604 orq(value, prev_carry); 8605 rorq(value, 0x20); 8606 movq(Address(z, zidx, Address::times_4, 0), value); // Store back in big endian form 8607 8608 // Set previous carry = new carry 8609 movl(prev_carry, new_carry); 8610 } 8611 jmp(L_fifth_loop); 8612 8613 bind(L_fifth_loop_exit); 8614 } 8615 8616 8617 /** 8618 * Code for BigInteger::squareToLen() intrinsic 8619 * 8620 * rdi: x 8621 * rsi: len 8622 * r8: z 8623 * rcx: zlen 8624 * r12: tmp1 8625 * r13: tmp2 8626 * r14: tmp3 8627 * r15: tmp4 8628 * rbx: tmp5 8629 * 8630 */ 8631 void MacroAssembler::square_to_len(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg) { 8632 8633 Label L_second_loop, L_second_loop_exit, L_third_loop, L_third_loop_exit, L_last_x, L_multiply; 8634 push(tmp1); 8635 push(tmp2); 8636 push(tmp3); 8637 push(tmp4); 8638 push(tmp5); 8639 8640 // First loop 8641 // Store the squares, right shifted one bit (i.e., divided by 2). 8642 square_rshift(x, len, z, tmp1, tmp3, tmp4, tmp5, rdxReg, raxReg); 8643 8644 // Add in off-diagonal sums. 8645 // 8646 // Second, third (nested) and fourth loops. 8647 // zlen +=2; 8648 // for (int xidx=len-2,zidx=zlen-4; xidx > 0; xidx-=2,zidx-=4) { 8649 // carry = 0; 8650 // long op2 = x[xidx:xidx+1]; 8651 // for (int j=xidx-2,k=zidx; j >= 0; j-=2) { 8652 // k -= 2; 8653 // long op1 = x[j:j+1]; 8654 // long sum = z[k:k+1]; 8655 // carry:sum = multiply_add_64(sum, op1, op2, carry, tmp_regs); 8656 // z[k:k+1] = sum; 8657 // } 8658 // add_one_64(z, k, carry, tmp_regs); 8659 // } 8660 8661 const Register carry = tmp5; 8662 const Register sum = tmp3; 8663 const Register op1 = tmp4; 8664 Register op2 = tmp2; 8665 8666 push(zlen); 8667 push(len); 8668 addl(zlen,2); 8669 bind(L_second_loop); 8670 xorq(carry, carry); 8671 subl(zlen, 4); 8672 subl(len, 2); 8673 push(zlen); 8674 push(len); 8675 cmpl(len, 0); 8676 jccb(Assembler::lessEqual, L_second_loop_exit); 8677 8678 // Multiply an array by one 64 bit long. 8679 if (UseBMI2Instructions) { 8680 op2 = rdxReg; 8681 movq(op2, Address(x, len, Address::times_4, 0)); 8682 rorxq(op2, op2, 32); 8683 } 8684 else { 8685 movq(op2, Address(x, len, Address::times_4, 0)); 8686 rorq(op2, 32); 8687 } 8688 8689 bind(L_third_loop); 8690 decrementl(len); 8691 jccb(Assembler::negative, L_third_loop_exit); 8692 decrementl(len); 8693 jccb(Assembler::negative, L_last_x); 8694 8695 movq(op1, Address(x, len, Address::times_4, 0)); 8696 rorq(op1, 32); 8697 8698 bind(L_multiply); 8699 subl(zlen, 2); 8700 movq(sum, Address(z, zlen, Address::times_4, 0)); 8701 8702 // Multiply 64 bit by 64 bit and add 64 bits lower half and upper 64 bits as carry. 8703 if (UseBMI2Instructions) { 8704 multiply_add_64_bmi2(sum, op1, op2, carry, tmp2); 8705 } 8706 else { 8707 multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg); 8708 } 8709 8710 movq(Address(z, zlen, Address::times_4, 0), sum); 8711 8712 jmp(L_third_loop); 8713 bind(L_third_loop_exit); 8714 8715 // Fourth loop 8716 // Add 64 bit long carry into z with carry propogation. 8717 // Uses offsetted zlen. 8718 add_one_64(z, zlen, carry, tmp1); 8719 8720 pop(len); 8721 pop(zlen); 8722 jmp(L_second_loop); 8723 8724 // Next infrequent code is moved outside loops. 8725 bind(L_last_x); 8726 movl(op1, Address(x, 0)); 8727 jmp(L_multiply); 8728 8729 bind(L_second_loop_exit); 8730 pop(len); 8731 pop(zlen); 8732 pop(len); 8733 pop(zlen); 8734 8735 // Fifth loop 8736 // Shift z left 1 bit. 8737 lshift_by_1(x, len, z, zlen, tmp1, tmp2, tmp3, tmp4); 8738 8739 // z[zlen-1] |= x[len-1] & 1; 8740 movl(tmp3, Address(x, len, Address::times_4, -4)); 8741 andl(tmp3, 1); 8742 orl(Address(z, zlen, Address::times_4, -4), tmp3); 8743 8744 pop(tmp5); 8745 pop(tmp4); 8746 pop(tmp3); 8747 pop(tmp2); 8748 pop(tmp1); 8749 } 8750 8751 /** 8752 * Helper function for mul_add() 8753 * Multiply the in[] by int k and add to out[] starting at offset offs using 8754 * 128 bit by 32 bit multiply and return the carry in tmp5. 8755 * Only quad int aligned length of in[] is operated on in this function. 8756 * k is in rdxReg for BMI2Instructions, for others it is in tmp2. 8757 * This function preserves out, in and k registers. 8758 * len and offset point to the appropriate index in "in" & "out" correspondingly 8759 * tmp5 has the carry. 8760 * other registers are temporary and are modified. 8761 * 8762 */ 8763 void MacroAssembler::mul_add_128_x_32_loop(Register out, Register in, 8764 Register offset, Register len, Register tmp1, Register tmp2, Register tmp3, 8765 Register tmp4, Register tmp5, Register rdxReg, Register raxReg) { 8766 8767 Label L_first_loop, L_first_loop_exit; 8768 8769 movl(tmp1, len); 8770 shrl(tmp1, 2); 8771 8772 bind(L_first_loop); 8773 subl(tmp1, 1); 8774 jccb(Assembler::negative, L_first_loop_exit); 8775 8776 subl(len, 4); 8777 subl(offset, 4); 8778 8779 Register op2 = tmp2; 8780 const Register sum = tmp3; 8781 const Register op1 = tmp4; 8782 const Register carry = tmp5; 8783 8784 if (UseBMI2Instructions) { 8785 op2 = rdxReg; 8786 } 8787 8788 movq(op1, Address(in, len, Address::times_4, 8)); 8789 rorq(op1, 32); 8790 movq(sum, Address(out, offset, Address::times_4, 8)); 8791 rorq(sum, 32); 8792 if (UseBMI2Instructions) { 8793 multiply_add_64_bmi2(sum, op1, op2, carry, raxReg); 8794 } 8795 else { 8796 multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg); 8797 } 8798 // Store back in big endian from little endian 8799 rorq(sum, 0x20); 8800 movq(Address(out, offset, Address::times_4, 8), sum); 8801 8802 movq(op1, Address(in, len, Address::times_4, 0)); 8803 rorq(op1, 32); 8804 movq(sum, Address(out, offset, Address::times_4, 0)); 8805 rorq(sum, 32); 8806 if (UseBMI2Instructions) { 8807 multiply_add_64_bmi2(sum, op1, op2, carry, raxReg); 8808 } 8809 else { 8810 multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg); 8811 } 8812 // Store back in big endian from little endian 8813 rorq(sum, 0x20); 8814 movq(Address(out, offset, Address::times_4, 0), sum); 8815 8816 jmp(L_first_loop); 8817 bind(L_first_loop_exit); 8818 } 8819 8820 /** 8821 * Code for BigInteger::mulAdd() intrinsic 8822 * 8823 * rdi: out 8824 * rsi: in 8825 * r11: offs (out.length - offset) 8826 * rcx: len 8827 * r8: k 8828 * r12: tmp1 8829 * r13: tmp2 8830 * r14: tmp3 8831 * r15: tmp4 8832 * rbx: tmp5 8833 * Multiply the in[] by word k and add to out[], return the carry in rax 8834 */ 8835 void MacroAssembler::mul_add(Register out, Register in, Register offs, 8836 Register len, Register k, Register tmp1, Register tmp2, Register tmp3, 8837 Register tmp4, Register tmp5, Register rdxReg, Register raxReg) { 8838 8839 Label L_carry, L_last_in, L_done; 8840 8841 // carry = 0; 8842 // for (int j=len-1; j >= 0; j--) { 8843 // long product = (in[j] & LONG_MASK) * kLong + 8844 // (out[offs] & LONG_MASK) + carry; 8845 // out[offs--] = (int)product; 8846 // carry = product >>> 32; 8847 // } 8848 // 8849 push(tmp1); 8850 push(tmp2); 8851 push(tmp3); 8852 push(tmp4); 8853 push(tmp5); 8854 8855 Register op2 = tmp2; 8856 const Register sum = tmp3; 8857 const Register op1 = tmp4; 8858 const Register carry = tmp5; 8859 8860 if (UseBMI2Instructions) { 8861 op2 = rdxReg; 8862 movl(op2, k); 8863 } 8864 else { 8865 movl(op2, k); 8866 } 8867 8868 xorq(carry, carry); 8869 8870 //First loop 8871 8872 //Multiply in[] by k in a 4 way unrolled loop using 128 bit by 32 bit multiply 8873 //The carry is in tmp5 8874 mul_add_128_x_32_loop(out, in, offs, len, tmp1, tmp2, tmp3, tmp4, tmp5, rdxReg, raxReg); 8875 8876 //Multiply the trailing in[] entry using 64 bit by 32 bit, if any 8877 decrementl(len); 8878 jccb(Assembler::negative, L_carry); 8879 decrementl(len); 8880 jccb(Assembler::negative, L_last_in); 8881 8882 movq(op1, Address(in, len, Address::times_4, 0)); 8883 rorq(op1, 32); 8884 8885 subl(offs, 2); 8886 movq(sum, Address(out, offs, Address::times_4, 0)); 8887 rorq(sum, 32); 8888 8889 if (UseBMI2Instructions) { 8890 multiply_add_64_bmi2(sum, op1, op2, carry, raxReg); 8891 } 8892 else { 8893 multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg); 8894 } 8895 8896 // Store back in big endian from little endian 8897 rorq(sum, 0x20); 8898 movq(Address(out, offs, Address::times_4, 0), sum); 8899 8900 testl(len, len); 8901 jccb(Assembler::zero, L_carry); 8902 8903 //Multiply the last in[] entry, if any 8904 bind(L_last_in); 8905 movl(op1, Address(in, 0)); 8906 movl(sum, Address(out, offs, Address::times_4, -4)); 8907 8908 movl(raxReg, k); 8909 mull(op1); //tmp4 * eax -> edx:eax 8910 addl(sum, carry); 8911 adcl(rdxReg, 0); 8912 addl(sum, raxReg); 8913 adcl(rdxReg, 0); 8914 movl(carry, rdxReg); 8915 8916 movl(Address(out, offs, Address::times_4, -4), sum); 8917 8918 bind(L_carry); 8919 //return tmp5/carry as carry in rax 8920 movl(rax, carry); 8921 8922 bind(L_done); 8923 pop(tmp5); 8924 pop(tmp4); 8925 pop(tmp3); 8926 pop(tmp2); 8927 pop(tmp1); 8928 } 8929 #endif 8930 8931 /** 8932 * Emits code to update CRC-32 with a byte value according to constants in table 8933 * 8934 * @param [in,out]crc Register containing the crc. 8935 * @param [in]val Register containing the byte to fold into the CRC. 8936 * @param [in]table Register containing the table of crc constants. 8937 * 8938 * uint32_t crc; 8939 * val = crc_table[(val ^ crc) & 0xFF]; 8940 * crc = val ^ (crc >> 8); 8941 * 8942 */ 8943 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) { 8944 xorl(val, crc); 8945 andl(val, 0xFF); 8946 shrl(crc, 8); // unsigned shift 8947 xorl(crc, Address(table, val, Address::times_4, 0)); 8948 } 8949 8950 /** 8951 * Fold four 128-bit data chunks 8952 */ 8953 void MacroAssembler::fold_128bit_crc32_avx512(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset) { 8954 evpclmulhdq(xtmp, xK, xcrc, Assembler::AVX_512bit); // [123:64] 8955 evpclmulldq(xcrc, xK, xcrc, Assembler::AVX_512bit); // [63:0] 8956 evpxorq(xcrc, xcrc, Address(buf, offset), Assembler::AVX_512bit /* vector_len */); 8957 evpxorq(xcrc, xcrc, xtmp, Assembler::AVX_512bit /* vector_len */); 8958 } 8959 8960 /** 8961 * Fold 128-bit data chunk 8962 */ 8963 void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset) { 8964 if (UseAVX > 0) { 8965 vpclmulhdq(xtmp, xK, xcrc); // [123:64] 8966 vpclmulldq(xcrc, xK, xcrc); // [63:0] 8967 vpxor(xcrc, xcrc, Address(buf, offset), 0 /* vector_len */); 8968 pxor(xcrc, xtmp); 8969 } else { 8970 movdqa(xtmp, xcrc); 8971 pclmulhdq(xtmp, xK); // [123:64] 8972 pclmulldq(xcrc, xK); // [63:0] 8973 pxor(xcrc, xtmp); 8974 movdqu(xtmp, Address(buf, offset)); 8975 pxor(xcrc, xtmp); 8976 } 8977 } 8978 8979 void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf) { 8980 if (UseAVX > 0) { 8981 vpclmulhdq(xtmp, xK, xcrc); 8982 vpclmulldq(xcrc, xK, xcrc); 8983 pxor(xcrc, xbuf); 8984 pxor(xcrc, xtmp); 8985 } else { 8986 movdqa(xtmp, xcrc); 8987 pclmulhdq(xtmp, xK); 8988 pclmulldq(xcrc, xK); 8989 pxor(xcrc, xbuf); 8990 pxor(xcrc, xtmp); 8991 } 8992 } 8993 8994 /** 8995 * 8-bit folds to compute 32-bit CRC 8996 * 8997 * uint64_t xcrc; 8998 * timesXtoThe32[xcrc & 0xFF] ^ (xcrc >> 8); 8999 */ 9000 void MacroAssembler::fold_8bit_crc32(XMMRegister xcrc, Register table, XMMRegister xtmp, Register tmp) { 9001 movdl(tmp, xcrc); 9002 andl(tmp, 0xFF); 9003 movdl(xtmp, Address(table, tmp, Address::times_4, 0)); 9004 psrldq(xcrc, 1); // unsigned shift one byte 9005 pxor(xcrc, xtmp); 9006 } 9007 9008 /** 9009 * uint32_t crc; 9010 * timesXtoThe32[crc & 0xFF] ^ (crc >> 8); 9011 */ 9012 void MacroAssembler::fold_8bit_crc32(Register crc, Register table, Register tmp) { 9013 movl(tmp, crc); 9014 andl(tmp, 0xFF); 9015 shrl(crc, 8); 9016 xorl(crc, Address(table, tmp, Address::times_4, 0)); 9017 } 9018 9019 /** 9020 * @param crc register containing existing CRC (32-bit) 9021 * @param buf register pointing to input byte buffer (byte*) 9022 * @param len register containing number of bytes 9023 * @param table register that will contain address of CRC table 9024 * @param tmp scratch register 9025 */ 9026 void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp) { 9027 assert_different_registers(crc, buf, len, table, tmp, rax); 9028 9029 Label L_tail, L_tail_restore, L_tail_loop, L_exit, L_align_loop, L_aligned; 9030 Label L_fold_tail, L_fold_128b, L_fold_512b, L_fold_512b_loop, L_fold_tail_loop; 9031 9032 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 9033 // context for the registers used, where all instructions below are using 128-bit mode 9034 // On EVEX without VL and BW, these instructions will all be AVX. 9035 lea(table, ExternalAddress(StubRoutines::crc_table_addr())); 9036 notl(crc); // ~crc 9037 cmpl(len, 16); 9038 jcc(Assembler::less, L_tail); 9039 9040 // Align buffer to 16 bytes 9041 movl(tmp, buf); 9042 andl(tmp, 0xF); 9043 jccb(Assembler::zero, L_aligned); 9044 subl(tmp, 16); 9045 addl(len, tmp); 9046 9047 align(4); 9048 BIND(L_align_loop); 9049 movsbl(rax, Address(buf, 0)); // load byte with sign extension 9050 update_byte_crc32(crc, rax, table); 9051 increment(buf); 9052 incrementl(tmp); 9053 jccb(Assembler::less, L_align_loop); 9054 9055 BIND(L_aligned); 9056 movl(tmp, len); // save 9057 shrl(len, 4); 9058 jcc(Assembler::zero, L_tail_restore); 9059 9060 // Fold total 512 bits of polynomial on each iteration 9061 if (VM_Version::supports_vpclmulqdq()) { 9062 Label Parallel_loop, L_No_Parallel; 9063 9064 cmpl(len, 8); 9065 jccb(Assembler::less, L_No_Parallel); 9066 9067 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 32)); 9068 evmovdquq(xmm1, Address(buf, 0), Assembler::AVX_512bit); 9069 movdl(xmm5, crc); 9070 evpxorq(xmm1, xmm1, xmm5, Assembler::AVX_512bit); 9071 addptr(buf, 64); 9072 subl(len, 7); 9073 evshufi64x2(xmm0, xmm0, xmm0, 0x00, Assembler::AVX_512bit); //propagate the mask from 128 bits to 512 bits 9074 9075 BIND(Parallel_loop); 9076 fold_128bit_crc32_avx512(xmm1, xmm0, xmm5, buf, 0); 9077 addptr(buf, 64); 9078 subl(len, 4); 9079 jcc(Assembler::greater, Parallel_loop); 9080 9081 vextracti64x2(xmm2, xmm1, 0x01); 9082 vextracti64x2(xmm3, xmm1, 0x02); 9083 vextracti64x2(xmm4, xmm1, 0x03); 9084 jmp(L_fold_512b); 9085 9086 BIND(L_No_Parallel); 9087 } 9088 // Fold crc into first bytes of vector 9089 movdqa(xmm1, Address(buf, 0)); 9090 movdl(rax, xmm1); 9091 xorl(crc, rax); 9092 if (VM_Version::supports_sse4_1()) { 9093 pinsrd(xmm1, crc, 0); 9094 } else { 9095 pinsrw(xmm1, crc, 0); 9096 shrl(crc, 16); 9097 pinsrw(xmm1, crc, 1); 9098 } 9099 addptr(buf, 16); 9100 subl(len, 4); // len > 0 9101 jcc(Assembler::less, L_fold_tail); 9102 9103 movdqa(xmm2, Address(buf, 0)); 9104 movdqa(xmm3, Address(buf, 16)); 9105 movdqa(xmm4, Address(buf, 32)); 9106 addptr(buf, 48); 9107 subl(len, 3); 9108 jcc(Assembler::lessEqual, L_fold_512b); 9109 9110 // Fold total 512 bits of polynomial on each iteration, 9111 // 128 bits per each of 4 parallel streams. 9112 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 32)); 9113 9114 align(32); 9115 BIND(L_fold_512b_loop); 9116 fold_128bit_crc32(xmm1, xmm0, xmm5, buf, 0); 9117 fold_128bit_crc32(xmm2, xmm0, xmm5, buf, 16); 9118 fold_128bit_crc32(xmm3, xmm0, xmm5, buf, 32); 9119 fold_128bit_crc32(xmm4, xmm0, xmm5, buf, 48); 9120 addptr(buf, 64); 9121 subl(len, 4); 9122 jcc(Assembler::greater, L_fold_512b_loop); 9123 9124 // Fold 512 bits to 128 bits. 9125 BIND(L_fold_512b); 9126 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 16)); 9127 fold_128bit_crc32(xmm1, xmm0, xmm5, xmm2); 9128 fold_128bit_crc32(xmm1, xmm0, xmm5, xmm3); 9129 fold_128bit_crc32(xmm1, xmm0, xmm5, xmm4); 9130 9131 // Fold the rest of 128 bits data chunks 9132 BIND(L_fold_tail); 9133 addl(len, 3); 9134 jccb(Assembler::lessEqual, L_fold_128b); 9135 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 16)); 9136 9137 BIND(L_fold_tail_loop); 9138 fold_128bit_crc32(xmm1, xmm0, xmm5, buf, 0); 9139 addptr(buf, 16); 9140 decrementl(len); 9141 jccb(Assembler::greater, L_fold_tail_loop); 9142 9143 // Fold 128 bits in xmm1 down into 32 bits in crc register. 9144 BIND(L_fold_128b); 9145 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr())); 9146 if (UseAVX > 0) { 9147 vpclmulqdq(xmm2, xmm0, xmm1, 0x1); 9148 vpand(xmm3, xmm0, xmm2, 0 /* vector_len */); 9149 vpclmulqdq(xmm0, xmm0, xmm3, 0x1); 9150 } else { 9151 movdqa(xmm2, xmm0); 9152 pclmulqdq(xmm2, xmm1, 0x1); 9153 movdqa(xmm3, xmm0); 9154 pand(xmm3, xmm2); 9155 pclmulqdq(xmm0, xmm3, 0x1); 9156 } 9157 psrldq(xmm1, 8); 9158 psrldq(xmm2, 4); 9159 pxor(xmm0, xmm1); 9160 pxor(xmm0, xmm2); 9161 9162 // 8 8-bit folds to compute 32-bit CRC. 9163 for (int j = 0; j < 4; j++) { 9164 fold_8bit_crc32(xmm0, table, xmm1, rax); 9165 } 9166 movdl(crc, xmm0); // mov 32 bits to general register 9167 for (int j = 0; j < 4; j++) { 9168 fold_8bit_crc32(crc, table, rax); 9169 } 9170 9171 BIND(L_tail_restore); 9172 movl(len, tmp); // restore 9173 BIND(L_tail); 9174 andl(len, 0xf); 9175 jccb(Assembler::zero, L_exit); 9176 9177 // Fold the rest of bytes 9178 align(4); 9179 BIND(L_tail_loop); 9180 movsbl(rax, Address(buf, 0)); // load byte with sign extension 9181 update_byte_crc32(crc, rax, table); 9182 increment(buf); 9183 decrementl(len); 9184 jccb(Assembler::greater, L_tail_loop); 9185 9186 BIND(L_exit); 9187 notl(crc); // ~c 9188 } 9189 9190 #ifdef _LP64 9191 // S. Gueron / Information Processing Letters 112 (2012) 184 9192 // Algorithm 4: Computing carry-less multiplication using a precomputed lookup table. 9193 // Input: A 32 bit value B = [byte3, byte2, byte1, byte0]. 9194 // Output: the 64-bit carry-less product of B * CONST 9195 void MacroAssembler::crc32c_ipl_alg4(Register in, uint32_t n, 9196 Register tmp1, Register tmp2, Register tmp3) { 9197 lea(tmp3, ExternalAddress(StubRoutines::crc32c_table_addr())); 9198 if (n > 0) { 9199 addq(tmp3, n * 256 * 8); 9200 } 9201 // Q1 = TABLEExt[n][B & 0xFF]; 9202 movl(tmp1, in); 9203 andl(tmp1, 0x000000FF); 9204 shll(tmp1, 3); 9205 addq(tmp1, tmp3); 9206 movq(tmp1, Address(tmp1, 0)); 9207 9208 // Q2 = TABLEExt[n][B >> 8 & 0xFF]; 9209 movl(tmp2, in); 9210 shrl(tmp2, 8); 9211 andl(tmp2, 0x000000FF); 9212 shll(tmp2, 3); 9213 addq(tmp2, tmp3); 9214 movq(tmp2, Address(tmp2, 0)); 9215 9216 shlq(tmp2, 8); 9217 xorq(tmp1, tmp2); 9218 9219 // Q3 = TABLEExt[n][B >> 16 & 0xFF]; 9220 movl(tmp2, in); 9221 shrl(tmp2, 16); 9222 andl(tmp2, 0x000000FF); 9223 shll(tmp2, 3); 9224 addq(tmp2, tmp3); 9225 movq(tmp2, Address(tmp2, 0)); 9226 9227 shlq(tmp2, 16); 9228 xorq(tmp1, tmp2); 9229 9230 // Q4 = TABLEExt[n][B >> 24 & 0xFF]; 9231 shrl(in, 24); 9232 andl(in, 0x000000FF); 9233 shll(in, 3); 9234 addq(in, tmp3); 9235 movq(in, Address(in, 0)); 9236 9237 shlq(in, 24); 9238 xorq(in, tmp1); 9239 // return Q1 ^ Q2 << 8 ^ Q3 << 16 ^ Q4 << 24; 9240 } 9241 9242 void MacroAssembler::crc32c_pclmulqdq(XMMRegister w_xtmp1, 9243 Register in_out, 9244 uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported, 9245 XMMRegister w_xtmp2, 9246 Register tmp1, 9247 Register n_tmp2, Register n_tmp3) { 9248 if (is_pclmulqdq_supported) { 9249 movdl(w_xtmp1, in_out); // modified blindly 9250 9251 movl(tmp1, const_or_pre_comp_const_index); 9252 movdl(w_xtmp2, tmp1); 9253 pclmulqdq(w_xtmp1, w_xtmp2, 0); 9254 9255 movdq(in_out, w_xtmp1); 9256 } else { 9257 crc32c_ipl_alg4(in_out, const_or_pre_comp_const_index, tmp1, n_tmp2, n_tmp3); 9258 } 9259 } 9260 9261 // Recombination Alternative 2: No bit-reflections 9262 // T1 = (CRC_A * U1) << 1 9263 // T2 = (CRC_B * U2) << 1 9264 // C1 = T1 >> 32 9265 // C2 = T2 >> 32 9266 // T1 = T1 & 0xFFFFFFFF 9267 // T2 = T2 & 0xFFFFFFFF 9268 // T1 = CRC32(0, T1) 9269 // T2 = CRC32(0, T2) 9270 // C1 = C1 ^ T1 9271 // C2 = C2 ^ T2 9272 // CRC = C1 ^ C2 ^ CRC_C 9273 void MacroAssembler::crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2, 9274 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 9275 Register tmp1, Register tmp2, 9276 Register n_tmp3) { 9277 crc32c_pclmulqdq(w_xtmp1, in_out, const_or_pre_comp_const_index_u1, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3); 9278 crc32c_pclmulqdq(w_xtmp2, in1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3); 9279 shlq(in_out, 1); 9280 movl(tmp1, in_out); 9281 shrq(in_out, 32); 9282 xorl(tmp2, tmp2); 9283 crc32(tmp2, tmp1, 4); 9284 xorl(in_out, tmp2); // we don't care about upper 32 bit contents here 9285 shlq(in1, 1); 9286 movl(tmp1, in1); 9287 shrq(in1, 32); 9288 xorl(tmp2, tmp2); 9289 crc32(tmp2, tmp1, 4); 9290 xorl(in1, tmp2); 9291 xorl(in_out, in1); 9292 xorl(in_out, in2); 9293 } 9294 9295 // Set N to predefined value 9296 // Subtract from a lenght of a buffer 9297 // execute in a loop: 9298 // CRC_A = 0xFFFFFFFF, CRC_B = 0, CRC_C = 0 9299 // for i = 1 to N do 9300 // CRC_A = CRC32(CRC_A, A[i]) 9301 // CRC_B = CRC32(CRC_B, B[i]) 9302 // CRC_C = CRC32(CRC_C, C[i]) 9303 // end for 9304 // Recombine 9305 void MacroAssembler::crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, 9306 Register in_out1, Register in_out2, Register in_out3, 9307 Register tmp1, Register tmp2, Register tmp3, 9308 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 9309 Register tmp4, Register tmp5, 9310 Register n_tmp6) { 9311 Label L_processPartitions; 9312 Label L_processPartition; 9313 Label L_exit; 9314 9315 bind(L_processPartitions); 9316 cmpl(in_out1, 3 * size); 9317 jcc(Assembler::less, L_exit); 9318 xorl(tmp1, tmp1); 9319 xorl(tmp2, tmp2); 9320 movq(tmp3, in_out2); 9321 addq(tmp3, size); 9322 9323 bind(L_processPartition); 9324 crc32(in_out3, Address(in_out2, 0), 8); 9325 crc32(tmp1, Address(in_out2, size), 8); 9326 crc32(tmp2, Address(in_out2, size * 2), 8); 9327 addq(in_out2, 8); 9328 cmpq(in_out2, tmp3); 9329 jcc(Assembler::less, L_processPartition); 9330 crc32c_rec_alt2(const_or_pre_comp_const_index_u1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, in_out3, tmp1, tmp2, 9331 w_xtmp1, w_xtmp2, w_xtmp3, 9332 tmp4, tmp5, 9333 n_tmp6); 9334 addq(in_out2, 2 * size); 9335 subl(in_out1, 3 * size); 9336 jmp(L_processPartitions); 9337 9338 bind(L_exit); 9339 } 9340 #else 9341 void MacroAssembler::crc32c_ipl_alg4(Register in_out, uint32_t n, 9342 Register tmp1, Register tmp2, Register tmp3, 9343 XMMRegister xtmp1, XMMRegister xtmp2) { 9344 lea(tmp3, ExternalAddress(StubRoutines::crc32c_table_addr())); 9345 if (n > 0) { 9346 addl(tmp3, n * 256 * 8); 9347 } 9348 // Q1 = TABLEExt[n][B & 0xFF]; 9349 movl(tmp1, in_out); 9350 andl(tmp1, 0x000000FF); 9351 shll(tmp1, 3); 9352 addl(tmp1, tmp3); 9353 movq(xtmp1, Address(tmp1, 0)); 9354 9355 // Q2 = TABLEExt[n][B >> 8 & 0xFF]; 9356 movl(tmp2, in_out); 9357 shrl(tmp2, 8); 9358 andl(tmp2, 0x000000FF); 9359 shll(tmp2, 3); 9360 addl(tmp2, tmp3); 9361 movq(xtmp2, Address(tmp2, 0)); 9362 9363 psllq(xtmp2, 8); 9364 pxor(xtmp1, xtmp2); 9365 9366 // Q3 = TABLEExt[n][B >> 16 & 0xFF]; 9367 movl(tmp2, in_out); 9368 shrl(tmp2, 16); 9369 andl(tmp2, 0x000000FF); 9370 shll(tmp2, 3); 9371 addl(tmp2, tmp3); 9372 movq(xtmp2, Address(tmp2, 0)); 9373 9374 psllq(xtmp2, 16); 9375 pxor(xtmp1, xtmp2); 9376 9377 // Q4 = TABLEExt[n][B >> 24 & 0xFF]; 9378 shrl(in_out, 24); 9379 andl(in_out, 0x000000FF); 9380 shll(in_out, 3); 9381 addl(in_out, tmp3); 9382 movq(xtmp2, Address(in_out, 0)); 9383 9384 psllq(xtmp2, 24); 9385 pxor(xtmp1, xtmp2); // Result in CXMM 9386 // return Q1 ^ Q2 << 8 ^ Q3 << 16 ^ Q4 << 24; 9387 } 9388 9389 void MacroAssembler::crc32c_pclmulqdq(XMMRegister w_xtmp1, 9390 Register in_out, 9391 uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported, 9392 XMMRegister w_xtmp2, 9393 Register tmp1, 9394 Register n_tmp2, Register n_tmp3) { 9395 if (is_pclmulqdq_supported) { 9396 movdl(w_xtmp1, in_out); 9397 9398 movl(tmp1, const_or_pre_comp_const_index); 9399 movdl(w_xtmp2, tmp1); 9400 pclmulqdq(w_xtmp1, w_xtmp2, 0); 9401 // Keep result in XMM since GPR is 32 bit in length 9402 } else { 9403 crc32c_ipl_alg4(in_out, const_or_pre_comp_const_index, tmp1, n_tmp2, n_tmp3, w_xtmp1, w_xtmp2); 9404 } 9405 } 9406 9407 void MacroAssembler::crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2, 9408 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 9409 Register tmp1, Register tmp2, 9410 Register n_tmp3) { 9411 crc32c_pclmulqdq(w_xtmp1, in_out, const_or_pre_comp_const_index_u1, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3); 9412 crc32c_pclmulqdq(w_xtmp2, in1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3); 9413 9414 psllq(w_xtmp1, 1); 9415 movdl(tmp1, w_xtmp1); 9416 psrlq(w_xtmp1, 32); 9417 movdl(in_out, w_xtmp1); 9418 9419 xorl(tmp2, tmp2); 9420 crc32(tmp2, tmp1, 4); 9421 xorl(in_out, tmp2); 9422 9423 psllq(w_xtmp2, 1); 9424 movdl(tmp1, w_xtmp2); 9425 psrlq(w_xtmp2, 32); 9426 movdl(in1, w_xtmp2); 9427 9428 xorl(tmp2, tmp2); 9429 crc32(tmp2, tmp1, 4); 9430 xorl(in1, tmp2); 9431 xorl(in_out, in1); 9432 xorl(in_out, in2); 9433 } 9434 9435 void MacroAssembler::crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, 9436 Register in_out1, Register in_out2, Register in_out3, 9437 Register tmp1, Register tmp2, Register tmp3, 9438 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 9439 Register tmp4, Register tmp5, 9440 Register n_tmp6) { 9441 Label L_processPartitions; 9442 Label L_processPartition; 9443 Label L_exit; 9444 9445 bind(L_processPartitions); 9446 cmpl(in_out1, 3 * size); 9447 jcc(Assembler::less, L_exit); 9448 xorl(tmp1, tmp1); 9449 xorl(tmp2, tmp2); 9450 movl(tmp3, in_out2); 9451 addl(tmp3, size); 9452 9453 bind(L_processPartition); 9454 crc32(in_out3, Address(in_out2, 0), 4); 9455 crc32(tmp1, Address(in_out2, size), 4); 9456 crc32(tmp2, Address(in_out2, size*2), 4); 9457 crc32(in_out3, Address(in_out2, 0+4), 4); 9458 crc32(tmp1, Address(in_out2, size+4), 4); 9459 crc32(tmp2, Address(in_out2, size*2+4), 4); 9460 addl(in_out2, 8); 9461 cmpl(in_out2, tmp3); 9462 jcc(Assembler::less, L_processPartition); 9463 9464 push(tmp3); 9465 push(in_out1); 9466 push(in_out2); 9467 tmp4 = tmp3; 9468 tmp5 = in_out1; 9469 n_tmp6 = in_out2; 9470 9471 crc32c_rec_alt2(const_or_pre_comp_const_index_u1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, in_out3, tmp1, tmp2, 9472 w_xtmp1, w_xtmp2, w_xtmp3, 9473 tmp4, tmp5, 9474 n_tmp6); 9475 9476 pop(in_out2); 9477 pop(in_out1); 9478 pop(tmp3); 9479 9480 addl(in_out2, 2 * size); 9481 subl(in_out1, 3 * size); 9482 jmp(L_processPartitions); 9483 9484 bind(L_exit); 9485 } 9486 #endif //LP64 9487 9488 #ifdef _LP64 9489 // Algorithm 2: Pipelined usage of the CRC32 instruction. 9490 // Input: A buffer I of L bytes. 9491 // Output: the CRC32C value of the buffer. 9492 // Notations: 9493 // Write L = 24N + r, with N = floor (L/24). 9494 // r = L mod 24 (0 <= r < 24). 9495 // Consider I as the concatenation of A|B|C|R, where A, B, C, each, 9496 // N quadwords, and R consists of r bytes. 9497 // A[j] = I [8j+7:8j], j= 0, 1, ..., N-1 9498 // B[j] = I [N + 8j+7:N + 8j], j= 0, 1, ..., N-1 9499 // C[j] = I [2N + 8j+7:2N + 8j], j= 0, 1, ..., N-1 9500 // if r > 0 R[j] = I [3N +j], j= 0, 1, ...,r-1 9501 void MacroAssembler::crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2, 9502 Register tmp1, Register tmp2, Register tmp3, 9503 Register tmp4, Register tmp5, Register tmp6, 9504 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 9505 bool is_pclmulqdq_supported) { 9506 uint32_t const_or_pre_comp_const_index[CRC32C_NUM_PRECOMPUTED_CONSTANTS]; 9507 Label L_wordByWord; 9508 Label L_byteByByteProlog; 9509 Label L_byteByByte; 9510 Label L_exit; 9511 9512 if (is_pclmulqdq_supported ) { 9513 const_or_pre_comp_const_index[1] = *(uint32_t *)StubRoutines::_crc32c_table_addr; 9514 const_or_pre_comp_const_index[0] = *((uint32_t *)StubRoutines::_crc32c_table_addr+1); 9515 9516 const_or_pre_comp_const_index[3] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 2); 9517 const_or_pre_comp_const_index[2] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 3); 9518 9519 const_or_pre_comp_const_index[5] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 4); 9520 const_or_pre_comp_const_index[4] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 5); 9521 assert((CRC32C_NUM_PRECOMPUTED_CONSTANTS - 1 ) == 5, "Checking whether you declared all of the constants based on the number of \"chunks\""); 9522 } else { 9523 const_or_pre_comp_const_index[0] = 1; 9524 const_or_pre_comp_const_index[1] = 0; 9525 9526 const_or_pre_comp_const_index[2] = 3; 9527 const_or_pre_comp_const_index[3] = 2; 9528 9529 const_or_pre_comp_const_index[4] = 5; 9530 const_or_pre_comp_const_index[5] = 4; 9531 } 9532 crc32c_proc_chunk(CRC32C_HIGH, const_or_pre_comp_const_index[0], const_or_pre_comp_const_index[1], is_pclmulqdq_supported, 9533 in2, in1, in_out, 9534 tmp1, tmp2, tmp3, 9535 w_xtmp1, w_xtmp2, w_xtmp3, 9536 tmp4, tmp5, 9537 tmp6); 9538 crc32c_proc_chunk(CRC32C_MIDDLE, const_or_pre_comp_const_index[2], const_or_pre_comp_const_index[3], is_pclmulqdq_supported, 9539 in2, in1, in_out, 9540 tmp1, tmp2, tmp3, 9541 w_xtmp1, w_xtmp2, w_xtmp3, 9542 tmp4, tmp5, 9543 tmp6); 9544 crc32c_proc_chunk(CRC32C_LOW, const_or_pre_comp_const_index[4], const_or_pre_comp_const_index[5], is_pclmulqdq_supported, 9545 in2, in1, in_out, 9546 tmp1, tmp2, tmp3, 9547 w_xtmp1, w_xtmp2, w_xtmp3, 9548 tmp4, tmp5, 9549 tmp6); 9550 movl(tmp1, in2); 9551 andl(tmp1, 0x00000007); 9552 negl(tmp1); 9553 addl(tmp1, in2); 9554 addq(tmp1, in1); 9555 9556 BIND(L_wordByWord); 9557 cmpq(in1, tmp1); 9558 jcc(Assembler::greaterEqual, L_byteByByteProlog); 9559 crc32(in_out, Address(in1, 0), 4); 9560 addq(in1, 4); 9561 jmp(L_wordByWord); 9562 9563 BIND(L_byteByByteProlog); 9564 andl(in2, 0x00000007); 9565 movl(tmp2, 1); 9566 9567 BIND(L_byteByByte); 9568 cmpl(tmp2, in2); 9569 jccb(Assembler::greater, L_exit); 9570 crc32(in_out, Address(in1, 0), 1); 9571 incq(in1); 9572 incl(tmp2); 9573 jmp(L_byteByByte); 9574 9575 BIND(L_exit); 9576 } 9577 #else 9578 void MacroAssembler::crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2, 9579 Register tmp1, Register tmp2, Register tmp3, 9580 Register tmp4, Register tmp5, Register tmp6, 9581 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 9582 bool is_pclmulqdq_supported) { 9583 uint32_t const_or_pre_comp_const_index[CRC32C_NUM_PRECOMPUTED_CONSTANTS]; 9584 Label L_wordByWord; 9585 Label L_byteByByteProlog; 9586 Label L_byteByByte; 9587 Label L_exit; 9588 9589 if (is_pclmulqdq_supported) { 9590 const_or_pre_comp_const_index[1] = *(uint32_t *)StubRoutines::_crc32c_table_addr; 9591 const_or_pre_comp_const_index[0] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 1); 9592 9593 const_or_pre_comp_const_index[3] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 2); 9594 const_or_pre_comp_const_index[2] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 3); 9595 9596 const_or_pre_comp_const_index[5] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 4); 9597 const_or_pre_comp_const_index[4] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 5); 9598 } else { 9599 const_or_pre_comp_const_index[0] = 1; 9600 const_or_pre_comp_const_index[1] = 0; 9601 9602 const_or_pre_comp_const_index[2] = 3; 9603 const_or_pre_comp_const_index[3] = 2; 9604 9605 const_or_pre_comp_const_index[4] = 5; 9606 const_or_pre_comp_const_index[5] = 4; 9607 } 9608 crc32c_proc_chunk(CRC32C_HIGH, const_or_pre_comp_const_index[0], const_or_pre_comp_const_index[1], is_pclmulqdq_supported, 9609 in2, in1, in_out, 9610 tmp1, tmp2, tmp3, 9611 w_xtmp1, w_xtmp2, w_xtmp3, 9612 tmp4, tmp5, 9613 tmp6); 9614 crc32c_proc_chunk(CRC32C_MIDDLE, const_or_pre_comp_const_index[2], const_or_pre_comp_const_index[3], is_pclmulqdq_supported, 9615 in2, in1, in_out, 9616 tmp1, tmp2, tmp3, 9617 w_xtmp1, w_xtmp2, w_xtmp3, 9618 tmp4, tmp5, 9619 tmp6); 9620 crc32c_proc_chunk(CRC32C_LOW, const_or_pre_comp_const_index[4], const_or_pre_comp_const_index[5], is_pclmulqdq_supported, 9621 in2, in1, in_out, 9622 tmp1, tmp2, tmp3, 9623 w_xtmp1, w_xtmp2, w_xtmp3, 9624 tmp4, tmp5, 9625 tmp6); 9626 movl(tmp1, in2); 9627 andl(tmp1, 0x00000007); 9628 negl(tmp1); 9629 addl(tmp1, in2); 9630 addl(tmp1, in1); 9631 9632 BIND(L_wordByWord); 9633 cmpl(in1, tmp1); 9634 jcc(Assembler::greaterEqual, L_byteByByteProlog); 9635 crc32(in_out, Address(in1,0), 4); 9636 addl(in1, 4); 9637 jmp(L_wordByWord); 9638 9639 BIND(L_byteByByteProlog); 9640 andl(in2, 0x00000007); 9641 movl(tmp2, 1); 9642 9643 BIND(L_byteByByte); 9644 cmpl(tmp2, in2); 9645 jccb(Assembler::greater, L_exit); 9646 movb(tmp1, Address(in1, 0)); 9647 crc32(in_out, tmp1, 1); 9648 incl(in1); 9649 incl(tmp2); 9650 jmp(L_byteByByte); 9651 9652 BIND(L_exit); 9653 } 9654 #endif // LP64 9655 #undef BIND 9656 #undef BLOCK_COMMENT 9657 9658 // Compress char[] array to byte[]. 9659 // ..\jdk\src\java.base\share\classes\java\lang\StringUTF16.java 9660 // @HotSpotIntrinsicCandidate 9661 // private static int compress(char[] src, int srcOff, byte[] dst, int dstOff, int len) { 9662 // for (int i = 0; i < len; i++) { 9663 // int c = src[srcOff++]; 9664 // if (c >>> 8 != 0) { 9665 // return 0; 9666 // } 9667 // dst[dstOff++] = (byte)c; 9668 // } 9669 // return len; 9670 // } 9671 void MacroAssembler::char_array_compress(Register src, Register dst, Register len, 9672 XMMRegister tmp1Reg, XMMRegister tmp2Reg, 9673 XMMRegister tmp3Reg, XMMRegister tmp4Reg, 9674 Register tmp5, Register result) { 9675 Label copy_chars_loop, return_length, return_zero, done; 9676 9677 // rsi: src 9678 // rdi: dst 9679 // rdx: len 9680 // rcx: tmp5 9681 // rax: result 9682 9683 // rsi holds start addr of source char[] to be compressed 9684 // rdi holds start addr of destination byte[] 9685 // rdx holds length 9686 9687 assert(len != result, ""); 9688 9689 // save length for return 9690 push(len); 9691 9692 if ((AVX3Threshold == 0) && (UseAVX > 2) && // AVX512 9693 VM_Version::supports_avx512vlbw() && 9694 VM_Version::supports_bmi2()) { 9695 9696 Label copy_32_loop, copy_loop_tail, below_threshold; 9697 9698 // alignment 9699 Label post_alignment; 9700 9701 // if length of the string is less than 16, handle it in an old fashioned way 9702 testl(len, -32); 9703 jcc(Assembler::zero, below_threshold); 9704 9705 // First check whether a character is compressable ( <= 0xFF). 9706 // Create mask to test for Unicode chars inside zmm vector 9707 movl(result, 0x00FF); 9708 evpbroadcastw(tmp2Reg, result, Assembler::AVX_512bit); 9709 9710 testl(len, -64); 9711 jcc(Assembler::zero, post_alignment); 9712 9713 movl(tmp5, dst); 9714 andl(tmp5, (32 - 1)); 9715 negl(tmp5); 9716 andl(tmp5, (32 - 1)); 9717 9718 // bail out when there is nothing to be done 9719 testl(tmp5, 0xFFFFFFFF); 9720 jcc(Assembler::zero, post_alignment); 9721 9722 // ~(~0 << len), where len is the # of remaining elements to process 9723 movl(result, 0xFFFFFFFF); 9724 shlxl(result, result, tmp5); 9725 notl(result); 9726 kmovdl(k3, result); 9727 9728 evmovdquw(tmp1Reg, k3, Address(src, 0), Assembler::AVX_512bit); 9729 evpcmpuw(k2, k3, tmp1Reg, tmp2Reg, Assembler::le, Assembler::AVX_512bit); 9730 ktestd(k2, k3); 9731 jcc(Assembler::carryClear, return_zero); 9732 9733 evpmovwb(Address(dst, 0), k3, tmp1Reg, Assembler::AVX_512bit); 9734 9735 addptr(src, tmp5); 9736 addptr(src, tmp5); 9737 addptr(dst, tmp5); 9738 subl(len, tmp5); 9739 9740 bind(post_alignment); 9741 // end of alignment 9742 9743 movl(tmp5, len); 9744 andl(tmp5, (32 - 1)); // tail count (in chars) 9745 andl(len, ~(32 - 1)); // vector count (in chars) 9746 jcc(Assembler::zero, copy_loop_tail); 9747 9748 lea(src, Address(src, len, Address::times_2)); 9749 lea(dst, Address(dst, len, Address::times_1)); 9750 negptr(len); 9751 9752 bind(copy_32_loop); 9753 evmovdquw(tmp1Reg, Address(src, len, Address::times_2), Assembler::AVX_512bit); 9754 evpcmpuw(k2, tmp1Reg, tmp2Reg, Assembler::le, Assembler::AVX_512bit); 9755 kortestdl(k2, k2); 9756 jcc(Assembler::carryClear, return_zero); 9757 9758 // All elements in current processed chunk are valid candidates for 9759 // compression. Write a truncated byte elements to the memory. 9760 evpmovwb(Address(dst, len, Address::times_1), tmp1Reg, Assembler::AVX_512bit); 9761 addptr(len, 32); 9762 jcc(Assembler::notZero, copy_32_loop); 9763 9764 bind(copy_loop_tail); 9765 // bail out when there is nothing to be done 9766 testl(tmp5, 0xFFFFFFFF); 9767 jcc(Assembler::zero, return_length); 9768 9769 movl(len, tmp5); 9770 9771 // ~(~0 << len), where len is the # of remaining elements to process 9772 movl(result, 0xFFFFFFFF); 9773 shlxl(result, result, len); 9774 notl(result); 9775 9776 kmovdl(k3, result); 9777 9778 evmovdquw(tmp1Reg, k3, Address(src, 0), Assembler::AVX_512bit); 9779 evpcmpuw(k2, k3, tmp1Reg, tmp2Reg, Assembler::le, Assembler::AVX_512bit); 9780 ktestd(k2, k3); 9781 jcc(Assembler::carryClear, return_zero); 9782 9783 evpmovwb(Address(dst, 0), k3, tmp1Reg, Assembler::AVX_512bit); 9784 jmp(return_length); 9785 9786 bind(below_threshold); 9787 } 9788 9789 if (UseSSE42Intrinsics) { 9790 Label copy_32_loop, copy_16, copy_tail; 9791 9792 movl(result, len); 9793 9794 movl(tmp5, 0xff00ff00); // create mask to test for Unicode chars in vectors 9795 9796 // vectored compression 9797 andl(len, 0xfffffff0); // vector count (in chars) 9798 andl(result, 0x0000000f); // tail count (in chars) 9799 testl(len, len); 9800 jcc(Assembler::zero, copy_16); 9801 9802 // compress 16 chars per iter 9803 movdl(tmp1Reg, tmp5); 9804 pshufd(tmp1Reg, tmp1Reg, 0); // store Unicode mask in tmp1Reg 9805 pxor(tmp4Reg, tmp4Reg); 9806 9807 lea(src, Address(src, len, Address::times_2)); 9808 lea(dst, Address(dst, len, Address::times_1)); 9809 negptr(len); 9810 9811 bind(copy_32_loop); 9812 movdqu(tmp2Reg, Address(src, len, Address::times_2)); // load 1st 8 characters 9813 por(tmp4Reg, tmp2Reg); 9814 movdqu(tmp3Reg, Address(src, len, Address::times_2, 16)); // load next 8 characters 9815 por(tmp4Reg, tmp3Reg); 9816 ptest(tmp4Reg, tmp1Reg); // check for Unicode chars in next vector 9817 jcc(Assembler::notZero, return_zero); 9818 packuswb(tmp2Reg, tmp3Reg); // only ASCII chars; compress each to 1 byte 9819 movdqu(Address(dst, len, Address::times_1), tmp2Reg); 9820 addptr(len, 16); 9821 jcc(Assembler::notZero, copy_32_loop); 9822 9823 // compress next vector of 8 chars (if any) 9824 bind(copy_16); 9825 movl(len, result); 9826 andl(len, 0xfffffff8); // vector count (in chars) 9827 andl(result, 0x00000007); // tail count (in chars) 9828 testl(len, len); 9829 jccb(Assembler::zero, copy_tail); 9830 9831 movdl(tmp1Reg, tmp5); 9832 pshufd(tmp1Reg, tmp1Reg, 0); // store Unicode mask in tmp1Reg 9833 pxor(tmp3Reg, tmp3Reg); 9834 9835 movdqu(tmp2Reg, Address(src, 0)); 9836 ptest(tmp2Reg, tmp1Reg); // check for Unicode chars in vector 9837 jccb(Assembler::notZero, return_zero); 9838 packuswb(tmp2Reg, tmp3Reg); // only LATIN1 chars; compress each to 1 byte 9839 movq(Address(dst, 0), tmp2Reg); 9840 addptr(src, 16); 9841 addptr(dst, 8); 9842 9843 bind(copy_tail); 9844 movl(len, result); 9845 } 9846 // compress 1 char per iter 9847 testl(len, len); 9848 jccb(Assembler::zero, return_length); 9849 lea(src, Address(src, len, Address::times_2)); 9850 lea(dst, Address(dst, len, Address::times_1)); 9851 negptr(len); 9852 9853 bind(copy_chars_loop); 9854 load_unsigned_short(result, Address(src, len, Address::times_2)); 9855 testl(result, 0xff00); // check if Unicode char 9856 jccb(Assembler::notZero, return_zero); 9857 movb(Address(dst, len, Address::times_1), result); // ASCII char; compress to 1 byte 9858 increment(len); 9859 jcc(Assembler::notZero, copy_chars_loop); 9860 9861 // if compression succeeded, return length 9862 bind(return_length); 9863 pop(result); 9864 jmpb(done); 9865 9866 // if compression failed, return 0 9867 bind(return_zero); 9868 xorl(result, result); 9869 addptr(rsp, wordSize); 9870 9871 bind(done); 9872 } 9873 9874 // Inflate byte[] array to char[]. 9875 // ..\jdk\src\java.base\share\classes\java\lang\StringLatin1.java 9876 // @HotSpotIntrinsicCandidate 9877 // private static void inflate(byte[] src, int srcOff, char[] dst, int dstOff, int len) { 9878 // for (int i = 0; i < len; i++) { 9879 // dst[dstOff++] = (char)(src[srcOff++] & 0xff); 9880 // } 9881 // } 9882 void MacroAssembler::byte_array_inflate(Register src, Register dst, Register len, 9883 XMMRegister tmp1, Register tmp2) { 9884 Label copy_chars_loop, done, below_threshold, avx3_threshold; 9885 // rsi: src 9886 // rdi: dst 9887 // rdx: len 9888 // rcx: tmp2 9889 9890 // rsi holds start addr of source byte[] to be inflated 9891 // rdi holds start addr of destination char[] 9892 // rdx holds length 9893 assert_different_registers(src, dst, len, tmp2); 9894 movl(tmp2, len); 9895 if ((UseAVX > 2) && // AVX512 9896 VM_Version::supports_avx512vlbw() && 9897 VM_Version::supports_bmi2()) { 9898 9899 Label copy_32_loop, copy_tail; 9900 Register tmp3_aliased = len; 9901 9902 // if length of the string is less than 16, handle it in an old fashioned way 9903 testl(len, -16); 9904 jcc(Assembler::zero, below_threshold); 9905 9906 testl(len, -1 * AVX3Threshold); 9907 jcc(Assembler::zero, avx3_threshold); 9908 9909 // In order to use only one arithmetic operation for the main loop we use 9910 // this pre-calculation 9911 andl(tmp2, (32 - 1)); // tail count (in chars), 32 element wide loop 9912 andl(len, -32); // vector count 9913 jccb(Assembler::zero, copy_tail); 9914 9915 lea(src, Address(src, len, Address::times_1)); 9916 lea(dst, Address(dst, len, Address::times_2)); 9917 negptr(len); 9918 9919 9920 // inflate 32 chars per iter 9921 bind(copy_32_loop); 9922 vpmovzxbw(tmp1, Address(src, len, Address::times_1), Assembler::AVX_512bit); 9923 evmovdquw(Address(dst, len, Address::times_2), tmp1, Assembler::AVX_512bit); 9924 addptr(len, 32); 9925 jcc(Assembler::notZero, copy_32_loop); 9926 9927 bind(copy_tail); 9928 // bail out when there is nothing to be done 9929 testl(tmp2, -1); // we don't destroy the contents of tmp2 here 9930 jcc(Assembler::zero, done); 9931 9932 // ~(~0 << length), where length is the # of remaining elements to process 9933 movl(tmp3_aliased, -1); 9934 shlxl(tmp3_aliased, tmp3_aliased, tmp2); 9935 notl(tmp3_aliased); 9936 kmovdl(k2, tmp3_aliased); 9937 evpmovzxbw(tmp1, k2, Address(src, 0), Assembler::AVX_512bit); 9938 evmovdquw(Address(dst, 0), k2, tmp1, Assembler::AVX_512bit); 9939 9940 jmp(done); 9941 bind(avx3_threshold); 9942 } 9943 if (UseSSE42Intrinsics) { 9944 Label copy_16_loop, copy_8_loop, copy_bytes, copy_new_tail, copy_tail; 9945 9946 if (UseAVX > 1) { 9947 andl(tmp2, (16 - 1)); 9948 andl(len, -16); 9949 jccb(Assembler::zero, copy_new_tail); 9950 } else { 9951 andl(tmp2, 0x00000007); // tail count (in chars) 9952 andl(len, 0xfffffff8); // vector count (in chars) 9953 jccb(Assembler::zero, copy_tail); 9954 } 9955 9956 // vectored inflation 9957 lea(src, Address(src, len, Address::times_1)); 9958 lea(dst, Address(dst, len, Address::times_2)); 9959 negptr(len); 9960 9961 if (UseAVX > 1) { 9962 bind(copy_16_loop); 9963 vpmovzxbw(tmp1, Address(src, len, Address::times_1), Assembler::AVX_256bit); 9964 vmovdqu(Address(dst, len, Address::times_2), tmp1); 9965 addptr(len, 16); 9966 jcc(Assembler::notZero, copy_16_loop); 9967 9968 bind(below_threshold); 9969 bind(copy_new_tail); 9970 movl(len, tmp2); 9971 andl(tmp2, 0x00000007); 9972 andl(len, 0xFFFFFFF8); 9973 jccb(Assembler::zero, copy_tail); 9974 9975 pmovzxbw(tmp1, Address(src, 0)); 9976 movdqu(Address(dst, 0), tmp1); 9977 addptr(src, 8); 9978 addptr(dst, 2 * 8); 9979 9980 jmp(copy_tail, true); 9981 } 9982 9983 // inflate 8 chars per iter 9984 bind(copy_8_loop); 9985 pmovzxbw(tmp1, Address(src, len, Address::times_1)); // unpack to 8 words 9986 movdqu(Address(dst, len, Address::times_2), tmp1); 9987 addptr(len, 8); 9988 jcc(Assembler::notZero, copy_8_loop); 9989 9990 bind(copy_tail); 9991 movl(len, tmp2); 9992 9993 cmpl(len, 4); 9994 jccb(Assembler::less, copy_bytes); 9995 9996 movdl(tmp1, Address(src, 0)); // load 4 byte chars 9997 pmovzxbw(tmp1, tmp1); 9998 movq(Address(dst, 0), tmp1); 9999 subptr(len, 4); 10000 addptr(src, 4); 10001 addptr(dst, 8); 10002 10003 bind(copy_bytes); 10004 } else { 10005 bind(below_threshold); 10006 } 10007 10008 testl(len, len); 10009 jccb(Assembler::zero, done); 10010 lea(src, Address(src, len, Address::times_1)); 10011 lea(dst, Address(dst, len, Address::times_2)); 10012 negptr(len); 10013 10014 // inflate 1 char per iter 10015 bind(copy_chars_loop); 10016 load_unsigned_byte(tmp2, Address(src, len, Address::times_1)); // load byte char 10017 movw(Address(dst, len, Address::times_2), tmp2); // inflate byte char to word 10018 increment(len); 10019 jcc(Assembler::notZero, copy_chars_loop); 10020 10021 bind(done); 10022 } 10023 10024 #ifdef _LP64 10025 void MacroAssembler::cache_wb(Address line) 10026 { 10027 // 64 bit cpus always support clflush 10028 assert(VM_Version::supports_clflush(), "clflush should be available"); 10029 bool optimized = VM_Version::supports_clflushopt(); 10030 bool no_evict = VM_Version::supports_clwb(); 10031 10032 // prefer clwb (writeback without evict) otherwise 10033 // prefer clflushopt (potentially parallel writeback with evict) 10034 // otherwise fallback on clflush (serial writeback with evict) 10035 10036 if (optimized) { 10037 if (no_evict) { 10038 clwb(line); 10039 } else { 10040 clflushopt(line); 10041 } 10042 } else { 10043 // no need for fence when using CLFLUSH 10044 clflush(line); 10045 } 10046 } 10047 10048 void MacroAssembler::cache_wbsync(bool is_pre) 10049 { 10050 assert(VM_Version::supports_clflush(), "clflush should be available"); 10051 bool optimized = VM_Version::supports_clflushopt(); 10052 bool no_evict = VM_Version::supports_clwb(); 10053 10054 // pick the correct implementation 10055 10056 if (!is_pre && (optimized || no_evict)) { 10057 // need an sfence for post flush when using clflushopt or clwb 10058 // otherwise no no need for any synchroniaztion 10059 10060 sfence(); 10061 } 10062 } 10063 #endif // _LP64 10064 10065 Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) { 10066 switch (cond) { 10067 // Note some conditions are synonyms for others 10068 case Assembler::zero: return Assembler::notZero; 10069 case Assembler::notZero: return Assembler::zero; 10070 case Assembler::less: return Assembler::greaterEqual; 10071 case Assembler::lessEqual: return Assembler::greater; 10072 case Assembler::greater: return Assembler::lessEqual; 10073 case Assembler::greaterEqual: return Assembler::less; 10074 case Assembler::below: return Assembler::aboveEqual; 10075 case Assembler::belowEqual: return Assembler::above; 10076 case Assembler::above: return Assembler::belowEqual; 10077 case Assembler::aboveEqual: return Assembler::below; 10078 case Assembler::overflow: return Assembler::noOverflow; 10079 case Assembler::noOverflow: return Assembler::overflow; 10080 case Assembler::negative: return Assembler::positive; 10081 case Assembler::positive: return Assembler::negative; 10082 case Assembler::parity: return Assembler::noParity; 10083 case Assembler::noParity: return Assembler::parity; 10084 } 10085 ShouldNotReachHere(); return Assembler::overflow; 10086 } 10087 10088 SkipIfEqual::SkipIfEqual( 10089 MacroAssembler* masm, const bool* flag_addr, bool value) { 10090 _masm = masm; 10091 _masm->cmp8(ExternalAddress((address)flag_addr), value); 10092 _masm->jcc(Assembler::equal, _label); 10093 } 10094 10095 SkipIfEqual::~SkipIfEqual() { 10096 _masm->bind(_label); 10097 } 10098 10099 // 32-bit Windows has its own fast-path implementation 10100 // of get_thread 10101 #if !defined(WIN32) || defined(_LP64) 10102 10103 // This is simply a call to Thread::current() 10104 void MacroAssembler::get_thread(Register thread) { 10105 if (thread != rax) { 10106 push(rax); 10107 } 10108 LP64_ONLY(push(rdi);) 10109 LP64_ONLY(push(rsi);) 10110 push(rdx); 10111 push(rcx); 10112 #ifdef _LP64 10113 push(r8); 10114 push(r9); 10115 push(r10); 10116 push(r11); 10117 #endif 10118 10119 MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, Thread::current), 0); 10120 10121 #ifdef _LP64 10122 pop(r11); 10123 pop(r10); 10124 pop(r9); 10125 pop(r8); 10126 #endif 10127 pop(rcx); 10128 pop(rdx); 10129 LP64_ONLY(pop(rsi);) 10130 LP64_ONLY(pop(rdi);) 10131 if (thread != rax) { 10132 mov(thread, rax); 10133 pop(rax); 10134 } 10135 } 10136 10137 #endif