1 /* 2 * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "ci/ciUtilities.hpp" 29 #include "gc/shared/barrierSet.hpp" 30 #include "gc/shared/barrierSetAssembler.hpp" 31 #include "gc/shared/barrierSetNMethod.hpp" 32 #include "interpreter/interpreter.hpp" 33 #include "memory/universe.hpp" 34 #include "nativeInst_x86.hpp" 35 #include "oops/instanceOop.hpp" 36 #include "oops/method.hpp" 37 #include "oops/objArrayKlass.hpp" 38 #include "oops/oop.inline.hpp" 39 #include "prims/methodHandles.hpp" 40 #include "runtime/frame.inline.hpp" 41 #include "runtime/handles.inline.hpp" 42 #include "runtime/sharedRuntime.hpp" 43 #include "runtime/stubCodeGenerator.hpp" 44 #include "runtime/stubRoutines.hpp" 45 #include "runtime/thread.inline.hpp" 46 #ifdef COMPILER2 47 #include "opto/runtime.hpp" 48 #endif 49 #if INCLUDE_ZGC 50 #include "gc/z/zThreadLocalData.hpp" 51 #endif 52 53 // Declaration and definition of StubGenerator (no .hpp file). 54 // For a more detailed description of the stub routine structure 55 // see the comment in stubRoutines.hpp 56 57 #define __ _masm-> 58 #define TIMES_OOP (UseCompressedOops ? Address::times_4 : Address::times_8) 59 #define a__ ((Assembler*)_masm)-> 60 61 #ifdef PRODUCT 62 #define BLOCK_COMMENT(str) /* nothing */ 63 #else 64 #define BLOCK_COMMENT(str) __ block_comment(str) 65 #endif 66 67 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 68 const int MXCSR_MASK = 0xFFC0; // Mask out any pending exceptions 69 70 // Stub Code definitions 71 72 class StubGenerator: public StubCodeGenerator { 73 private: 74 75 #ifdef PRODUCT 76 #define inc_counter_np(counter) ((void)0) 77 #else 78 void inc_counter_np_(int& counter) { 79 // This can destroy rscratch1 if counter is far from the code cache 80 __ incrementl(ExternalAddress((address)&counter)); 81 } 82 #define inc_counter_np(counter) \ 83 BLOCK_COMMENT("inc_counter " #counter); \ 84 inc_counter_np_(counter); 85 #endif 86 87 // Call stubs are used to call Java from C 88 // 89 // Linux Arguments: 90 // c_rarg0: call wrapper address address 91 // c_rarg1: result address 92 // c_rarg2: result type BasicType 93 // c_rarg3: method Method* 94 // c_rarg4: (interpreter) entry point address 95 // c_rarg5: parameters intptr_t* 96 // 16(rbp): parameter size (in words) int 97 // 24(rbp): thread Thread* 98 // 99 // [ return_from_Java ] <--- rsp 100 // [ argument word n ] 101 // ... 102 // -12 [ argument word 1 ] 103 // -11 [ saved r15 ] <--- rsp_after_call 104 // -10 [ saved r14 ] 105 // -9 [ saved r13 ] 106 // -8 [ saved r12 ] 107 // -7 [ saved rbx ] 108 // -6 [ call wrapper ] 109 // -5 [ result ] 110 // -4 [ result type ] 111 // -3 [ method ] 112 // -2 [ entry point ] 113 // -1 [ parameters ] 114 // 0 [ saved rbp ] <--- rbp 115 // 1 [ return address ] 116 // 2 [ parameter size ] 117 // 3 [ thread ] 118 // 119 // Windows Arguments: 120 // c_rarg0: call wrapper address address 121 // c_rarg1: result address 122 // c_rarg2: result type BasicType 123 // c_rarg3: method Method* 124 // 48(rbp): (interpreter) entry point address 125 // 56(rbp): parameters intptr_t* 126 // 64(rbp): parameter size (in words) int 127 // 72(rbp): thread Thread* 128 // 129 // [ return_from_Java ] <--- rsp 130 // [ argument word n ] 131 // ... 132 // -60 [ argument word 1 ] 133 // -59 [ saved xmm31 ] <--- rsp after_call 134 // [ saved xmm16-xmm30 ] (EVEX enabled, else the space is blank) 135 // -27 [ saved xmm15 ] 136 // [ saved xmm7-xmm14 ] 137 // -9 [ saved xmm6 ] (each xmm register takes 2 slots) 138 // -7 [ saved r15 ] 139 // -6 [ saved r14 ] 140 // -5 [ saved r13 ] 141 // -4 [ saved r12 ] 142 // -3 [ saved rdi ] 143 // -2 [ saved rsi ] 144 // -1 [ saved rbx ] 145 // 0 [ saved rbp ] <--- rbp 146 // 1 [ return address ] 147 // 2 [ call wrapper ] 148 // 3 [ result ] 149 // 4 [ result type ] 150 // 5 [ method ] 151 // 6 [ entry point ] 152 // 7 [ parameters ] 153 // 8 [ parameter size ] 154 // 9 [ thread ] 155 // 156 // Windows reserves the callers stack space for arguments 1-4. 157 // We spill c_rarg0-c_rarg3 to this space. 158 159 // Call stub stack layout word offsets from rbp 160 enum call_stub_layout { 161 #ifdef _WIN64 162 xmm_save_first = 6, // save from xmm6 163 xmm_save_last = 31, // to xmm31 164 xmm_save_base = -9, 165 rsp_after_call_off = xmm_save_base - 2 * (xmm_save_last - xmm_save_first), // -27 166 r15_off = -7, 167 r14_off = -6, 168 r13_off = -5, 169 r12_off = -4, 170 rdi_off = -3, 171 rsi_off = -2, 172 rbx_off = -1, 173 rbp_off = 0, 174 retaddr_off = 1, 175 call_wrapper_off = 2, 176 result_off = 3, 177 result_type_off = 4, 178 method_off = 5, 179 entry_point_off = 6, 180 parameters_off = 7, 181 parameter_size_off = 8, 182 thread_off = 9 183 #else 184 rsp_after_call_off = -12, 185 mxcsr_off = rsp_after_call_off, 186 r15_off = -11, 187 r14_off = -10, 188 r13_off = -9, 189 r12_off = -8, 190 rbx_off = -7, 191 call_wrapper_off = -6, 192 result_off = -5, 193 result_type_off = -4, 194 method_off = -3, 195 entry_point_off = -2, 196 parameters_off = -1, 197 rbp_off = 0, 198 retaddr_off = 1, 199 parameter_size_off = 2, 200 thread_off = 3 201 #endif 202 }; 203 204 #ifdef _WIN64 205 Address xmm_save(int reg) { 206 assert(reg >= xmm_save_first && reg <= xmm_save_last, "XMM register number out of range"); 207 return Address(rbp, (xmm_save_base - (reg - xmm_save_first) * 2) * wordSize); 208 } 209 #endif 210 211 address generate_call_stub(address& return_address) { 212 assert((int)frame::entry_frame_after_call_words == -(int)rsp_after_call_off + 1 && 213 (int)frame::entry_frame_call_wrapper_offset == (int)call_wrapper_off, 214 "adjust this code"); 215 StubCodeMark mark(this, "StubRoutines", "call_stub"); 216 address start = __ pc(); 217 218 // same as in generate_catch_exception()! 219 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize); 220 221 const Address call_wrapper (rbp, call_wrapper_off * wordSize); 222 const Address result (rbp, result_off * wordSize); 223 const Address result_type (rbp, result_type_off * wordSize); 224 const Address method (rbp, method_off * wordSize); 225 const Address entry_point (rbp, entry_point_off * wordSize); 226 const Address parameters (rbp, parameters_off * wordSize); 227 const Address parameter_size(rbp, parameter_size_off * wordSize); 228 229 // same as in generate_catch_exception()! 230 const Address thread (rbp, thread_off * wordSize); 231 232 const Address r15_save(rbp, r15_off * wordSize); 233 const Address r14_save(rbp, r14_off * wordSize); 234 const Address r13_save(rbp, r13_off * wordSize); 235 const Address r12_save(rbp, r12_off * wordSize); 236 const Address rbx_save(rbp, rbx_off * wordSize); 237 238 // stub code 239 __ enter(); 240 __ subptr(rsp, -rsp_after_call_off * wordSize); 241 242 // save register parameters 243 #ifndef _WIN64 244 __ movptr(parameters, c_rarg5); // parameters 245 __ movptr(entry_point, c_rarg4); // entry_point 246 #endif 247 248 __ movptr(method, c_rarg3); // method 249 __ movl(result_type, c_rarg2); // result type 250 __ movptr(result, c_rarg1); // result 251 __ movptr(call_wrapper, c_rarg0); // call wrapper 252 253 // save regs belonging to calling function 254 __ movptr(rbx_save, rbx); 255 __ movptr(r12_save, r12); 256 __ movptr(r13_save, r13); 257 __ movptr(r14_save, r14); 258 __ movptr(r15_save, r15); 259 260 #ifdef _WIN64 261 int last_reg = 15; 262 if (UseAVX > 2) { 263 last_reg = 31; 264 } 265 if (VM_Version::supports_evex()) { 266 for (int i = xmm_save_first; i <= last_reg; i++) { 267 __ vextractf32x4(xmm_save(i), as_XMMRegister(i), 0); 268 } 269 } else { 270 for (int i = xmm_save_first; i <= last_reg; i++) { 271 __ movdqu(xmm_save(i), as_XMMRegister(i)); 272 } 273 } 274 275 const Address rdi_save(rbp, rdi_off * wordSize); 276 const Address rsi_save(rbp, rsi_off * wordSize); 277 278 __ movptr(rsi_save, rsi); 279 __ movptr(rdi_save, rdi); 280 #else 281 const Address mxcsr_save(rbp, mxcsr_off * wordSize); 282 { 283 Label skip_ldmx; 284 __ stmxcsr(mxcsr_save); 285 __ movl(rax, mxcsr_save); 286 __ andl(rax, MXCSR_MASK); // Only check control and mask bits 287 ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std()); 288 __ cmp32(rax, mxcsr_std); 289 __ jcc(Assembler::equal, skip_ldmx); 290 __ ldmxcsr(mxcsr_std); 291 __ bind(skip_ldmx); 292 } 293 #endif 294 295 // Load up thread register 296 __ movptr(r15_thread, thread); 297 __ reinit_heapbase(); 298 299 #ifdef ASSERT 300 // make sure we have no pending exceptions 301 { 302 Label L; 303 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 304 __ jcc(Assembler::equal, L); 305 __ stop("StubRoutines::call_stub: entered with pending exception"); 306 __ bind(L); 307 } 308 #endif 309 310 // pass parameters if any 311 BLOCK_COMMENT("pass parameters if any"); 312 Label parameters_done; 313 __ movl(c_rarg3, parameter_size); 314 __ testl(c_rarg3, c_rarg3); 315 __ jcc(Assembler::zero, parameters_done); 316 317 Label loop; 318 __ movptr(c_rarg2, parameters); // parameter pointer 319 __ movl(c_rarg1, c_rarg3); // parameter counter is in c_rarg1 320 __ BIND(loop); 321 __ movptr(rax, Address(c_rarg2, 0));// get parameter 322 __ addptr(c_rarg2, wordSize); // advance to next parameter 323 __ decrementl(c_rarg1); // decrement counter 324 __ push(rax); // pass parameter 325 __ jcc(Assembler::notZero, loop); 326 327 // call Java function 328 __ BIND(parameters_done); 329 __ movptr(rbx, method); // get Method* 330 __ movptr(c_rarg1, entry_point); // get entry_point 331 __ mov(r13, rsp); // set sender sp 332 BLOCK_COMMENT("call Java function"); 333 __ call(c_rarg1); 334 335 BLOCK_COMMENT("call_stub_return_address:"); 336 return_address = __ pc(); 337 338 // store result depending on type (everything that is not 339 // T_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT) 340 __ movptr(c_rarg0, result); 341 Label is_long, is_float, is_double, exit; 342 __ movl(c_rarg1, result_type); 343 __ cmpl(c_rarg1, T_OBJECT); 344 __ jcc(Assembler::equal, is_long); 345 __ cmpl(c_rarg1, T_LONG); 346 __ jcc(Assembler::equal, is_long); 347 __ cmpl(c_rarg1, T_FLOAT); 348 __ jcc(Assembler::equal, is_float); 349 __ cmpl(c_rarg1, T_DOUBLE); 350 __ jcc(Assembler::equal, is_double); 351 352 // handle T_INT case 353 __ movl(Address(c_rarg0, 0), rax); 354 355 __ BIND(exit); 356 357 // pop parameters 358 __ lea(rsp, rsp_after_call); 359 360 #ifdef ASSERT 361 // verify that threads correspond 362 { 363 Label L1, L2, L3; 364 __ cmpptr(r15_thread, thread); 365 __ jcc(Assembler::equal, L1); 366 __ stop("StubRoutines::call_stub: r15_thread is corrupted"); 367 __ bind(L1); 368 __ get_thread(rbx); 369 __ cmpptr(r15_thread, thread); 370 __ jcc(Assembler::equal, L2); 371 __ stop("StubRoutines::call_stub: r15_thread is modified by call"); 372 __ bind(L2); 373 __ cmpptr(r15_thread, rbx); 374 __ jcc(Assembler::equal, L3); 375 __ stop("StubRoutines::call_stub: threads must correspond"); 376 __ bind(L3); 377 } 378 #endif 379 380 // restore regs belonging to calling function 381 #ifdef _WIN64 382 // emit the restores for xmm regs 383 if (VM_Version::supports_evex()) { 384 for (int i = xmm_save_first; i <= last_reg; i++) { 385 __ vinsertf32x4(as_XMMRegister(i), as_XMMRegister(i), xmm_save(i), 0); 386 } 387 } else { 388 for (int i = xmm_save_first; i <= last_reg; i++) { 389 __ movdqu(as_XMMRegister(i), xmm_save(i)); 390 } 391 } 392 #endif 393 __ movptr(r15, r15_save); 394 __ movptr(r14, r14_save); 395 __ movptr(r13, r13_save); 396 __ movptr(r12, r12_save); 397 __ movptr(rbx, rbx_save); 398 399 #ifdef _WIN64 400 __ movptr(rdi, rdi_save); 401 __ movptr(rsi, rsi_save); 402 #else 403 __ ldmxcsr(mxcsr_save); 404 #endif 405 406 // restore rsp 407 __ addptr(rsp, -rsp_after_call_off * wordSize); 408 409 // return 410 __ vzeroupper(); 411 __ pop(rbp); 412 __ ret(0); 413 414 // handle return types different from T_INT 415 __ BIND(is_long); 416 __ movq(Address(c_rarg0, 0), rax); 417 __ jmp(exit); 418 419 __ BIND(is_float); 420 __ movflt(Address(c_rarg0, 0), xmm0); 421 __ jmp(exit); 422 423 __ BIND(is_double); 424 __ movdbl(Address(c_rarg0, 0), xmm0); 425 __ jmp(exit); 426 427 return start; 428 } 429 430 // Return point for a Java call if there's an exception thrown in 431 // Java code. The exception is caught and transformed into a 432 // pending exception stored in JavaThread that can be tested from 433 // within the VM. 434 // 435 // Note: Usually the parameters are removed by the callee. In case 436 // of an exception crossing an activation frame boundary, that is 437 // not the case if the callee is compiled code => need to setup the 438 // rsp. 439 // 440 // rax: exception oop 441 442 address generate_catch_exception() { 443 StubCodeMark mark(this, "StubRoutines", "catch_exception"); 444 address start = __ pc(); 445 446 // same as in generate_call_stub(): 447 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize); 448 const Address thread (rbp, thread_off * wordSize); 449 450 #ifdef ASSERT 451 // verify that threads correspond 452 { 453 Label L1, L2, L3; 454 __ cmpptr(r15_thread, thread); 455 __ jcc(Assembler::equal, L1); 456 __ stop("StubRoutines::catch_exception: r15_thread is corrupted"); 457 __ bind(L1); 458 __ get_thread(rbx); 459 __ cmpptr(r15_thread, thread); 460 __ jcc(Assembler::equal, L2); 461 __ stop("StubRoutines::catch_exception: r15_thread is modified by call"); 462 __ bind(L2); 463 __ cmpptr(r15_thread, rbx); 464 __ jcc(Assembler::equal, L3); 465 __ stop("StubRoutines::catch_exception: threads must correspond"); 466 __ bind(L3); 467 } 468 #endif 469 470 // set pending exception 471 __ verify_oop(rax); 472 473 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), rax); 474 __ lea(rscratch1, ExternalAddress((address)__FILE__)); 475 __ movptr(Address(r15_thread, Thread::exception_file_offset()), rscratch1); 476 __ movl(Address(r15_thread, Thread::exception_line_offset()), (int) __LINE__); 477 478 // complete return to VM 479 assert(StubRoutines::_call_stub_return_address != NULL, 480 "_call_stub_return_address must have been generated before"); 481 __ jump(RuntimeAddress(StubRoutines::_call_stub_return_address)); 482 483 return start; 484 } 485 486 // Continuation point for runtime calls returning with a pending 487 // exception. The pending exception check happened in the runtime 488 // or native call stub. The pending exception in Thread is 489 // converted into a Java-level exception. 490 // 491 // Contract with Java-level exception handlers: 492 // rax: exception 493 // rdx: throwing pc 494 // 495 // NOTE: At entry of this stub, exception-pc must be on stack !! 496 497 address generate_forward_exception() { 498 StubCodeMark mark(this, "StubRoutines", "forward exception"); 499 address start = __ pc(); 500 501 // Upon entry, the sp points to the return address returning into 502 // Java (interpreted or compiled) code; i.e., the return address 503 // becomes the throwing pc. 504 // 505 // Arguments pushed before the runtime call are still on the stack 506 // but the exception handler will reset the stack pointer -> 507 // ignore them. A potential result in registers can be ignored as 508 // well. 509 510 #ifdef ASSERT 511 // make sure this code is only executed if there is a pending exception 512 { 513 Label L; 514 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t) NULL); 515 __ jcc(Assembler::notEqual, L); 516 __ stop("StubRoutines::forward exception: no pending exception (1)"); 517 __ bind(L); 518 } 519 #endif 520 521 // compute exception handler into rbx 522 __ movptr(c_rarg0, Address(rsp, 0)); 523 BLOCK_COMMENT("call exception_handler_for_return_address"); 524 __ call_VM_leaf(CAST_FROM_FN_PTR(address, 525 SharedRuntime::exception_handler_for_return_address), 526 r15_thread, c_rarg0); 527 __ mov(rbx, rax); 528 529 // setup rax & rdx, remove return address & clear pending exception 530 __ pop(rdx); 531 __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset())); 532 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 533 534 #ifdef ASSERT 535 // make sure exception is set 536 { 537 Label L; 538 __ testptr(rax, rax); 539 __ jcc(Assembler::notEqual, L); 540 __ stop("StubRoutines::forward exception: no pending exception (2)"); 541 __ bind(L); 542 } 543 #endif 544 545 // continue at exception handler (return address removed) 546 // rax: exception 547 // rbx: exception handler 548 // rdx: throwing pc 549 __ verify_oop(rax); 550 __ jmp(rbx); 551 552 return start; 553 } 554 555 // Implementation of jint atomic_xchg(jint add_value, volatile jint* dest) 556 // used by Atomic::xchg(volatile jint* dest, jint exchange_value) 557 // 558 // Arguments : 559 // c_rarg0: exchange_value 560 // c_rarg0: dest 561 // 562 // Result: 563 // *dest <- ex, return (orig *dest) 564 address generate_atomic_xchg() { 565 StubCodeMark mark(this, "StubRoutines", "atomic_xchg"); 566 address start = __ pc(); 567 568 __ movl(rax, c_rarg0); // Copy to eax we need a return value anyhow 569 __ xchgl(rax, Address(c_rarg1, 0)); // automatic LOCK 570 __ ret(0); 571 572 return start; 573 } 574 575 // Implementation of intptr_t atomic_xchg(jlong add_value, volatile jlong* dest) 576 // used by Atomic::xchg(volatile jlong* dest, jlong exchange_value) 577 // 578 // Arguments : 579 // c_rarg0: exchange_value 580 // c_rarg1: dest 581 // 582 // Result: 583 // *dest <- ex, return (orig *dest) 584 address generate_atomic_xchg_long() { 585 StubCodeMark mark(this, "StubRoutines", "atomic_xchg_long"); 586 address start = __ pc(); 587 588 __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow 589 __ xchgptr(rax, Address(c_rarg1, 0)); // automatic LOCK 590 __ ret(0); 591 592 return start; 593 } 594 595 // Support for jint atomic::atomic_cmpxchg(jint exchange_value, volatile jint* dest, 596 // jint compare_value) 597 // 598 // Arguments : 599 // c_rarg0: exchange_value 600 // c_rarg1: dest 601 // c_rarg2: compare_value 602 // 603 // Result: 604 // if ( compare_value == *dest ) { 605 // *dest = exchange_value 606 // return compare_value; 607 // else 608 // return *dest; 609 address generate_atomic_cmpxchg() { 610 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg"); 611 address start = __ pc(); 612 613 __ movl(rax, c_rarg2); 614 __ lock(); 615 __ cmpxchgl(c_rarg0, Address(c_rarg1, 0)); 616 __ ret(0); 617 618 return start; 619 } 620 621 // Support for int8_t atomic::atomic_cmpxchg(int8_t exchange_value, volatile int8_t* dest, 622 // int8_t compare_value) 623 // 624 // Arguments : 625 // c_rarg0: exchange_value 626 // c_rarg1: dest 627 // c_rarg2: compare_value 628 // 629 // Result: 630 // if ( compare_value == *dest ) { 631 // *dest = exchange_value 632 // return compare_value; 633 // else 634 // return *dest; 635 address generate_atomic_cmpxchg_byte() { 636 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_byte"); 637 address start = __ pc(); 638 639 __ movsbq(rax, c_rarg2); 640 __ lock(); 641 __ cmpxchgb(c_rarg0, Address(c_rarg1, 0)); 642 __ ret(0); 643 644 return start; 645 } 646 647 // Support for int64_t atomic::atomic_cmpxchg(int64_t exchange_value, 648 // volatile int64_t* dest, 649 // int64_t compare_value) 650 // Arguments : 651 // c_rarg0: exchange_value 652 // c_rarg1: dest 653 // c_rarg2: compare_value 654 // 655 // Result: 656 // if ( compare_value == *dest ) { 657 // *dest = exchange_value 658 // return compare_value; 659 // else 660 // return *dest; 661 address generate_atomic_cmpxchg_long() { 662 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long"); 663 address start = __ pc(); 664 665 __ movq(rax, c_rarg2); 666 __ lock(); 667 __ cmpxchgq(c_rarg0, Address(c_rarg1, 0)); 668 __ ret(0); 669 670 return start; 671 } 672 673 // Implementation of jint atomic_add(jint add_value, volatile jint* dest) 674 // used by Atomic::add(volatile jint* dest, jint add_value) 675 // 676 // Arguments : 677 // c_rarg0: add_value 678 // c_rarg1: dest 679 // 680 // Result: 681 // *dest += add_value 682 // return *dest; 683 address generate_atomic_add() { 684 StubCodeMark mark(this, "StubRoutines", "atomic_add"); 685 address start = __ pc(); 686 687 __ movl(rax, c_rarg0); 688 __ lock(); 689 __ xaddl(Address(c_rarg1, 0), c_rarg0); 690 __ addl(rax, c_rarg0); 691 __ ret(0); 692 693 return start; 694 } 695 696 // Implementation of intptr_t atomic_add(intptr_t add_value, volatile intptr_t* dest) 697 // used by Atomic::add(volatile intptr_t* dest, intptr_t add_value) 698 // 699 // Arguments : 700 // c_rarg0: add_value 701 // c_rarg1: dest 702 // 703 // Result: 704 // *dest += add_value 705 // return *dest; 706 address generate_atomic_add_long() { 707 StubCodeMark mark(this, "StubRoutines", "atomic_add_long"); 708 address start = __ pc(); 709 710 __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow 711 __ lock(); 712 __ xaddptr(Address(c_rarg1, 0), c_rarg0); 713 __ addptr(rax, c_rarg0); 714 __ ret(0); 715 716 return start; 717 } 718 719 // Support for intptr_t OrderAccess::fence() 720 // 721 // Arguments : 722 // 723 // Result: 724 address generate_orderaccess_fence() { 725 StubCodeMark mark(this, "StubRoutines", "orderaccess_fence"); 726 address start = __ pc(); 727 __ membar(Assembler::StoreLoad); 728 __ ret(0); 729 730 return start; 731 } 732 733 // Support for intptr_t get_previous_fp() 734 // 735 // This routine is used to find the previous frame pointer for the 736 // caller (current_frame_guess). This is used as part of debugging 737 // ps() is seemingly lost trying to find frames. 738 // This code assumes that caller current_frame_guess) has a frame. 739 address generate_get_previous_fp() { 740 StubCodeMark mark(this, "StubRoutines", "get_previous_fp"); 741 const Address old_fp(rbp, 0); 742 const Address older_fp(rax, 0); 743 address start = __ pc(); 744 745 __ enter(); 746 __ movptr(rax, old_fp); // callers fp 747 __ movptr(rax, older_fp); // the frame for ps() 748 __ pop(rbp); 749 __ ret(0); 750 751 return start; 752 } 753 754 // Support for intptr_t get_previous_sp() 755 // 756 // This routine is used to find the previous stack pointer for the 757 // caller. 758 address generate_get_previous_sp() { 759 StubCodeMark mark(this, "StubRoutines", "get_previous_sp"); 760 address start = __ pc(); 761 762 __ movptr(rax, rsp); 763 __ addptr(rax, 8); // return address is at the top of the stack. 764 __ ret(0); 765 766 return start; 767 } 768 769 //---------------------------------------------------------------------------------------------------- 770 // Support for void verify_mxcsr() 771 // 772 // This routine is used with -Xcheck:jni to verify that native 773 // JNI code does not return to Java code without restoring the 774 // MXCSR register to our expected state. 775 776 address generate_verify_mxcsr() { 777 StubCodeMark mark(this, "StubRoutines", "verify_mxcsr"); 778 address start = __ pc(); 779 780 const Address mxcsr_save(rsp, 0); 781 782 if (CheckJNICalls) { 783 Label ok_ret; 784 ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std()); 785 __ push(rax); 786 __ subptr(rsp, wordSize); // allocate a temp location 787 __ stmxcsr(mxcsr_save); 788 __ movl(rax, mxcsr_save); 789 __ andl(rax, MXCSR_MASK); // Only check control and mask bits 790 __ cmp32(rax, mxcsr_std); 791 __ jcc(Assembler::equal, ok_ret); 792 793 __ warn("MXCSR changed by native JNI code, use -XX:+RestoreMXCSROnJNICall"); 794 795 __ ldmxcsr(mxcsr_std); 796 797 __ bind(ok_ret); 798 __ addptr(rsp, wordSize); 799 __ pop(rax); 800 } 801 802 __ ret(0); 803 804 return start; 805 } 806 807 address generate_f2i_fixup() { 808 StubCodeMark mark(this, "StubRoutines", "f2i_fixup"); 809 Address inout(rsp, 5 * wordSize); // return address + 4 saves 810 811 address start = __ pc(); 812 813 Label L; 814 815 __ push(rax); 816 __ push(c_rarg3); 817 __ push(c_rarg2); 818 __ push(c_rarg1); 819 820 __ movl(rax, 0x7f800000); 821 __ xorl(c_rarg3, c_rarg3); 822 __ movl(c_rarg2, inout); 823 __ movl(c_rarg1, c_rarg2); 824 __ andl(c_rarg1, 0x7fffffff); 825 __ cmpl(rax, c_rarg1); // NaN? -> 0 826 __ jcc(Assembler::negative, L); 827 __ testl(c_rarg2, c_rarg2); // signed ? min_jint : max_jint 828 __ movl(c_rarg3, 0x80000000); 829 __ movl(rax, 0x7fffffff); 830 __ cmovl(Assembler::positive, c_rarg3, rax); 831 832 __ bind(L); 833 __ movptr(inout, c_rarg3); 834 835 __ pop(c_rarg1); 836 __ pop(c_rarg2); 837 __ pop(c_rarg3); 838 __ pop(rax); 839 840 __ ret(0); 841 842 return start; 843 } 844 845 address generate_f2l_fixup() { 846 StubCodeMark mark(this, "StubRoutines", "f2l_fixup"); 847 Address inout(rsp, 5 * wordSize); // return address + 4 saves 848 address start = __ pc(); 849 850 Label L; 851 852 __ push(rax); 853 __ push(c_rarg3); 854 __ push(c_rarg2); 855 __ push(c_rarg1); 856 857 __ movl(rax, 0x7f800000); 858 __ xorl(c_rarg3, c_rarg3); 859 __ movl(c_rarg2, inout); 860 __ movl(c_rarg1, c_rarg2); 861 __ andl(c_rarg1, 0x7fffffff); 862 __ cmpl(rax, c_rarg1); // NaN? -> 0 863 __ jcc(Assembler::negative, L); 864 __ testl(c_rarg2, c_rarg2); // signed ? min_jlong : max_jlong 865 __ mov64(c_rarg3, 0x8000000000000000); 866 __ mov64(rax, 0x7fffffffffffffff); 867 __ cmov(Assembler::positive, c_rarg3, rax); 868 869 __ bind(L); 870 __ movptr(inout, c_rarg3); 871 872 __ pop(c_rarg1); 873 __ pop(c_rarg2); 874 __ pop(c_rarg3); 875 __ pop(rax); 876 877 __ ret(0); 878 879 return start; 880 } 881 882 address generate_d2i_fixup() { 883 StubCodeMark mark(this, "StubRoutines", "d2i_fixup"); 884 Address inout(rsp, 6 * wordSize); // return address + 5 saves 885 886 address start = __ pc(); 887 888 Label L; 889 890 __ push(rax); 891 __ push(c_rarg3); 892 __ push(c_rarg2); 893 __ push(c_rarg1); 894 __ push(c_rarg0); 895 896 __ movl(rax, 0x7ff00000); 897 __ movq(c_rarg2, inout); 898 __ movl(c_rarg3, c_rarg2); 899 __ mov(c_rarg1, c_rarg2); 900 __ mov(c_rarg0, c_rarg2); 901 __ negl(c_rarg3); 902 __ shrptr(c_rarg1, 0x20); 903 __ orl(c_rarg3, c_rarg2); 904 __ andl(c_rarg1, 0x7fffffff); 905 __ xorl(c_rarg2, c_rarg2); 906 __ shrl(c_rarg3, 0x1f); 907 __ orl(c_rarg1, c_rarg3); 908 __ cmpl(rax, c_rarg1); 909 __ jcc(Assembler::negative, L); // NaN -> 0 910 __ testptr(c_rarg0, c_rarg0); // signed ? min_jint : max_jint 911 __ movl(c_rarg2, 0x80000000); 912 __ movl(rax, 0x7fffffff); 913 __ cmov(Assembler::positive, c_rarg2, rax); 914 915 __ bind(L); 916 __ movptr(inout, c_rarg2); 917 918 __ pop(c_rarg0); 919 __ pop(c_rarg1); 920 __ pop(c_rarg2); 921 __ pop(c_rarg3); 922 __ pop(rax); 923 924 __ ret(0); 925 926 return start; 927 } 928 929 address generate_d2l_fixup() { 930 StubCodeMark mark(this, "StubRoutines", "d2l_fixup"); 931 Address inout(rsp, 6 * wordSize); // return address + 5 saves 932 933 address start = __ pc(); 934 935 Label L; 936 937 __ push(rax); 938 __ push(c_rarg3); 939 __ push(c_rarg2); 940 __ push(c_rarg1); 941 __ push(c_rarg0); 942 943 __ movl(rax, 0x7ff00000); 944 __ movq(c_rarg2, inout); 945 __ movl(c_rarg3, c_rarg2); 946 __ mov(c_rarg1, c_rarg2); 947 __ mov(c_rarg0, c_rarg2); 948 __ negl(c_rarg3); 949 __ shrptr(c_rarg1, 0x20); 950 __ orl(c_rarg3, c_rarg2); 951 __ andl(c_rarg1, 0x7fffffff); 952 __ xorl(c_rarg2, c_rarg2); 953 __ shrl(c_rarg3, 0x1f); 954 __ orl(c_rarg1, c_rarg3); 955 __ cmpl(rax, c_rarg1); 956 __ jcc(Assembler::negative, L); // NaN -> 0 957 __ testq(c_rarg0, c_rarg0); // signed ? min_jlong : max_jlong 958 __ mov64(c_rarg2, 0x8000000000000000); 959 __ mov64(rax, 0x7fffffffffffffff); 960 __ cmovq(Assembler::positive, c_rarg2, rax); 961 962 __ bind(L); 963 __ movq(inout, c_rarg2); 964 965 __ pop(c_rarg0); 966 __ pop(c_rarg1); 967 __ pop(c_rarg2); 968 __ pop(c_rarg3); 969 __ pop(rax); 970 971 __ ret(0); 972 973 return start; 974 } 975 976 address generate_fp_mask(const char *stub_name, int64_t mask) { 977 __ align(CodeEntryAlignment); 978 StubCodeMark mark(this, "StubRoutines", stub_name); 979 address start = __ pc(); 980 981 __ emit_data64( mask, relocInfo::none ); 982 __ emit_data64( mask, relocInfo::none ); 983 984 return start; 985 } 986 987 address generate_vector_mask(const char *stub_name, int64_t mask) { 988 __ align(CodeEntryAlignment); 989 StubCodeMark mark(this, "StubRoutines", stub_name); 990 address start = __ pc(); 991 992 __ emit_data64(mask, relocInfo::none); 993 __ emit_data64(mask, relocInfo::none); 994 __ emit_data64(mask, relocInfo::none); 995 __ emit_data64(mask, relocInfo::none); 996 __ emit_data64(mask, relocInfo::none); 997 __ emit_data64(mask, relocInfo::none); 998 __ emit_data64(mask, relocInfo::none); 999 __ emit_data64(mask, relocInfo::none); 1000 1001 return start; 1002 } 1003 1004 address generate_vector_byte_perm_mask(const char *stub_name) { 1005 __ align(CodeEntryAlignment); 1006 StubCodeMark mark(this, "StubRoutines", stub_name); 1007 address start = __ pc(); 1008 1009 __ emit_data64(0x0000000000000001, relocInfo::none); 1010 __ emit_data64(0x0000000000000003, relocInfo::none); 1011 __ emit_data64(0x0000000000000005, relocInfo::none); 1012 __ emit_data64(0x0000000000000007, relocInfo::none); 1013 __ emit_data64(0x0000000000000000, relocInfo::none); 1014 __ emit_data64(0x0000000000000002, relocInfo::none); 1015 __ emit_data64(0x0000000000000004, relocInfo::none); 1016 __ emit_data64(0x0000000000000006, relocInfo::none); 1017 1018 return start; 1019 } 1020 1021 // Non-destructive plausibility checks for oops 1022 // 1023 // Arguments: 1024 // all args on stack! 1025 // 1026 // Stack after saving c_rarg3: 1027 // [tos + 0]: saved c_rarg3 1028 // [tos + 1]: saved c_rarg2 1029 // [tos + 2]: saved r12 (several TemplateTable methods use it) 1030 // [tos + 3]: saved flags 1031 // [tos + 4]: return address 1032 // * [tos + 5]: error message (char*) 1033 // * [tos + 6]: object to verify (oop) 1034 // * [tos + 7]: saved rax - saved by caller and bashed 1035 // * [tos + 8]: saved r10 (rscratch1) - saved by caller 1036 // * = popped on exit 1037 address generate_verify_oop() { 1038 StubCodeMark mark(this, "StubRoutines", "verify_oop"); 1039 address start = __ pc(); 1040 1041 Label exit, error; 1042 1043 __ pushf(); 1044 __ incrementl(ExternalAddress((address) StubRoutines::verify_oop_count_addr())); 1045 1046 __ push(r12); 1047 1048 // save c_rarg2 and c_rarg3 1049 __ push(c_rarg2); 1050 __ push(c_rarg3); 1051 1052 enum { 1053 // After previous pushes. 1054 oop_to_verify = 6 * wordSize, 1055 saved_rax = 7 * wordSize, 1056 saved_r10 = 8 * wordSize, 1057 1058 // Before the call to MacroAssembler::debug(), see below. 1059 return_addr = 16 * wordSize, 1060 error_msg = 17 * wordSize 1061 }; 1062 1063 // get object 1064 __ movptr(rax, Address(rsp, oop_to_verify)); 1065 1066 // make sure object is 'reasonable' 1067 __ testptr(rax, rax); 1068 __ jcc(Assembler::zero, exit); // if obj is NULL it is OK 1069 1070 #if INCLUDE_ZGC 1071 if (UseZGC) { 1072 // Check if metadata bits indicate a bad oop 1073 __ testptr(rax, Address(r15_thread, ZThreadLocalData::address_bad_mask_offset())); 1074 __ jcc(Assembler::notZero, error); 1075 } 1076 #endif 1077 1078 // Check if the oop is in the right area of memory 1079 __ movptr(c_rarg2, rax); 1080 __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_mask()); 1081 __ andptr(c_rarg2, c_rarg3); 1082 __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_bits()); 1083 __ cmpptr(c_rarg2, c_rarg3); 1084 __ jcc(Assembler::notZero, error); 1085 1086 // make sure klass is 'reasonable', which is not zero. 1087 __ load_klass(rax, rax, rscratch1); // get klass 1088 __ testptr(rax, rax); 1089 __ jcc(Assembler::zero, error); // if klass is NULL it is broken 1090 1091 // return if everything seems ok 1092 __ bind(exit); 1093 __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back 1094 __ movptr(rscratch1, Address(rsp, saved_r10)); // get saved r10 back 1095 __ pop(c_rarg3); // restore c_rarg3 1096 __ pop(c_rarg2); // restore c_rarg2 1097 __ pop(r12); // restore r12 1098 __ popf(); // restore flags 1099 __ ret(4 * wordSize); // pop caller saved stuff 1100 1101 // handle errors 1102 __ bind(error); 1103 __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back 1104 __ movptr(rscratch1, Address(rsp, saved_r10)); // get saved r10 back 1105 __ pop(c_rarg3); // get saved c_rarg3 back 1106 __ pop(c_rarg2); // get saved c_rarg2 back 1107 __ pop(r12); // get saved r12 back 1108 __ popf(); // get saved flags off stack -- 1109 // will be ignored 1110 1111 __ pusha(); // push registers 1112 // (rip is already 1113 // already pushed) 1114 // debug(char* msg, int64_t pc, int64_t regs[]) 1115 // We've popped the registers we'd saved (c_rarg3, c_rarg2 and flags), and 1116 // pushed all the registers, so now the stack looks like: 1117 // [tos + 0] 16 saved registers 1118 // [tos + 16] return address 1119 // * [tos + 17] error message (char*) 1120 // * [tos + 18] object to verify (oop) 1121 // * [tos + 19] saved rax - saved by caller and bashed 1122 // * [tos + 20] saved r10 (rscratch1) - saved by caller 1123 // * = popped on exit 1124 1125 __ movptr(c_rarg0, Address(rsp, error_msg)); // pass address of error message 1126 __ movptr(c_rarg1, Address(rsp, return_addr)); // pass return address 1127 __ movq(c_rarg2, rsp); // pass address of regs on stack 1128 __ mov(r12, rsp); // remember rsp 1129 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1130 __ andptr(rsp, -16); // align stack as required by ABI 1131 BLOCK_COMMENT("call MacroAssembler::debug"); 1132 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64))); 1133 __ hlt(); 1134 return start; 1135 } 1136 1137 // 1138 // Verify that a register contains clean 32-bits positive value 1139 // (high 32-bits are 0) so it could be used in 64-bits shifts. 1140 // 1141 // Input: 1142 // Rint - 32-bits value 1143 // Rtmp - scratch 1144 // 1145 void assert_clean_int(Register Rint, Register Rtmp) { 1146 #ifdef ASSERT 1147 Label L; 1148 assert_different_registers(Rtmp, Rint); 1149 __ movslq(Rtmp, Rint); 1150 __ cmpq(Rtmp, Rint); 1151 __ jcc(Assembler::equal, L); 1152 __ stop("high 32-bits of int value are not 0"); 1153 __ bind(L); 1154 #endif 1155 } 1156 1157 // Generate overlap test for array copy stubs 1158 // 1159 // Input: 1160 // c_rarg0 - from 1161 // c_rarg1 - to 1162 // c_rarg2 - element count 1163 // 1164 // Output: 1165 // rax - &from[element count - 1] 1166 // 1167 void array_overlap_test(address no_overlap_target, Address::ScaleFactor sf) { 1168 assert(no_overlap_target != NULL, "must be generated"); 1169 array_overlap_test(no_overlap_target, NULL, sf); 1170 } 1171 void array_overlap_test(Label& L_no_overlap, Address::ScaleFactor sf) { 1172 array_overlap_test(NULL, &L_no_overlap, sf); 1173 } 1174 void array_overlap_test(address no_overlap_target, Label* NOLp, Address::ScaleFactor sf) { 1175 const Register from = c_rarg0; 1176 const Register to = c_rarg1; 1177 const Register count = c_rarg2; 1178 const Register end_from = rax; 1179 1180 __ cmpptr(to, from); 1181 __ lea(end_from, Address(from, count, sf, 0)); 1182 if (NOLp == NULL) { 1183 ExternalAddress no_overlap(no_overlap_target); 1184 __ jump_cc(Assembler::belowEqual, no_overlap); 1185 __ cmpptr(to, end_from); 1186 __ jump_cc(Assembler::aboveEqual, no_overlap); 1187 } else { 1188 __ jcc(Assembler::belowEqual, (*NOLp)); 1189 __ cmpptr(to, end_from); 1190 __ jcc(Assembler::aboveEqual, (*NOLp)); 1191 } 1192 } 1193 1194 // Shuffle first three arg regs on Windows into Linux/Solaris locations. 1195 // 1196 // Outputs: 1197 // rdi - rcx 1198 // rsi - rdx 1199 // rdx - r8 1200 // rcx - r9 1201 // 1202 // Registers r9 and r10 are used to save rdi and rsi on Windows, which latter 1203 // are non-volatile. r9 and r10 should not be used by the caller. 1204 // 1205 DEBUG_ONLY(bool regs_in_thread;) 1206 1207 void setup_arg_regs(int nargs = 3) { 1208 const Register saved_rdi = r9; 1209 const Register saved_rsi = r10; 1210 assert(nargs == 3 || nargs == 4, "else fix"); 1211 #ifdef _WIN64 1212 assert(c_rarg0 == rcx && c_rarg1 == rdx && c_rarg2 == r8 && c_rarg3 == r9, 1213 "unexpected argument registers"); 1214 if (nargs >= 4) 1215 __ mov(rax, r9); // r9 is also saved_rdi 1216 __ movptr(saved_rdi, rdi); 1217 __ movptr(saved_rsi, rsi); 1218 __ mov(rdi, rcx); // c_rarg0 1219 __ mov(rsi, rdx); // c_rarg1 1220 __ mov(rdx, r8); // c_rarg2 1221 if (nargs >= 4) 1222 __ mov(rcx, rax); // c_rarg3 (via rax) 1223 #else 1224 assert(c_rarg0 == rdi && c_rarg1 == rsi && c_rarg2 == rdx && c_rarg3 == rcx, 1225 "unexpected argument registers"); 1226 #endif 1227 DEBUG_ONLY(regs_in_thread = false;) 1228 } 1229 1230 void restore_arg_regs() { 1231 assert(!regs_in_thread, "wrong call to restore_arg_regs"); 1232 const Register saved_rdi = r9; 1233 const Register saved_rsi = r10; 1234 #ifdef _WIN64 1235 __ movptr(rdi, saved_rdi); 1236 __ movptr(rsi, saved_rsi); 1237 #endif 1238 } 1239 1240 // This is used in places where r10 is a scratch register, and can 1241 // be adapted if r9 is needed also. 1242 void setup_arg_regs_using_thread() { 1243 const Register saved_r15 = r9; 1244 #ifdef _WIN64 1245 __ mov(saved_r15, r15); // r15 is callee saved and needs to be restored 1246 __ get_thread(r15_thread); 1247 assert(c_rarg0 == rcx && c_rarg1 == rdx && c_rarg2 == r8 && c_rarg3 == r9, 1248 "unexpected argument registers"); 1249 __ movptr(Address(r15_thread, in_bytes(JavaThread::windows_saved_rdi_offset())), rdi); 1250 __ movptr(Address(r15_thread, in_bytes(JavaThread::windows_saved_rsi_offset())), rsi); 1251 1252 __ mov(rdi, rcx); // c_rarg0 1253 __ mov(rsi, rdx); // c_rarg1 1254 __ mov(rdx, r8); // c_rarg2 1255 #else 1256 assert(c_rarg0 == rdi && c_rarg1 == rsi && c_rarg2 == rdx && c_rarg3 == rcx, 1257 "unexpected argument registers"); 1258 #endif 1259 DEBUG_ONLY(regs_in_thread = true;) 1260 } 1261 1262 void restore_arg_regs_using_thread() { 1263 assert(regs_in_thread, "wrong call to restore_arg_regs"); 1264 const Register saved_r15 = r9; 1265 #ifdef _WIN64 1266 __ get_thread(r15_thread); 1267 __ movptr(rsi, Address(r15_thread, in_bytes(JavaThread::windows_saved_rsi_offset()))); 1268 __ movptr(rdi, Address(r15_thread, in_bytes(JavaThread::windows_saved_rdi_offset()))); 1269 __ mov(r15, saved_r15); // r15 is callee saved and needs to be restored 1270 #endif 1271 } 1272 1273 // Copy big chunks forward 1274 // 1275 // Inputs: 1276 // end_from - source arrays end address 1277 // end_to - destination array end address 1278 // qword_count - 64-bits element count, negative 1279 // to - scratch 1280 // L_copy_bytes - entry label 1281 // L_copy_8_bytes - exit label 1282 // 1283 void copy_bytes_forward(Register end_from, Register end_to, 1284 Register qword_count, Register to, 1285 Label& L_copy_bytes, Label& L_copy_8_bytes) { 1286 DEBUG_ONLY(__ stop("enter at entry label, not here")); 1287 Label L_loop; 1288 __ align(OptoLoopAlignment); 1289 if (UseUnalignedLoadStores) { 1290 Label L_end; 1291 // Copy 64-bytes per iteration 1292 if (UseAVX > 2) { 1293 Label L_loop_avx512, L_loop_avx2, L_32_byte_head, L_above_threshold, L_below_threshold; 1294 1295 __ BIND(L_copy_bytes); 1296 __ cmpptr(qword_count, (-1 * AVX3Threshold / 8)); 1297 __ jccb(Assembler::less, L_above_threshold); 1298 __ jmpb(L_below_threshold); 1299 1300 __ bind(L_loop_avx512); 1301 __ evmovdqul(xmm0, Address(end_from, qword_count, Address::times_8, -56), Assembler::AVX_512bit); 1302 __ evmovdqul(Address(end_to, qword_count, Address::times_8, -56), xmm0, Assembler::AVX_512bit); 1303 __ bind(L_above_threshold); 1304 __ addptr(qword_count, 8); 1305 __ jcc(Assembler::lessEqual, L_loop_avx512); 1306 __ jmpb(L_32_byte_head); 1307 1308 __ bind(L_loop_avx2); 1309 __ vmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56)); 1310 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0); 1311 __ vmovdqu(xmm1, Address(end_from, qword_count, Address::times_8, -24)); 1312 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -24), xmm1); 1313 __ bind(L_below_threshold); 1314 __ addptr(qword_count, 8); 1315 __ jcc(Assembler::lessEqual, L_loop_avx2); 1316 1317 __ bind(L_32_byte_head); 1318 __ subptr(qword_count, 4); // sub(8) and add(4) 1319 __ jccb(Assembler::greater, L_end); 1320 } else { 1321 __ BIND(L_loop); 1322 if (UseAVX == 2) { 1323 __ vmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56)); 1324 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0); 1325 __ vmovdqu(xmm1, Address(end_from, qword_count, Address::times_8, -24)); 1326 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -24), xmm1); 1327 } else { 1328 __ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56)); 1329 __ movdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0); 1330 __ movdqu(xmm1, Address(end_from, qword_count, Address::times_8, -40)); 1331 __ movdqu(Address(end_to, qword_count, Address::times_8, -40), xmm1); 1332 __ movdqu(xmm2, Address(end_from, qword_count, Address::times_8, -24)); 1333 __ movdqu(Address(end_to, qword_count, Address::times_8, -24), xmm2); 1334 __ movdqu(xmm3, Address(end_from, qword_count, Address::times_8, - 8)); 1335 __ movdqu(Address(end_to, qword_count, Address::times_8, - 8), xmm3); 1336 } 1337 1338 __ BIND(L_copy_bytes); 1339 __ addptr(qword_count, 8); 1340 __ jcc(Assembler::lessEqual, L_loop); 1341 __ subptr(qword_count, 4); // sub(8) and add(4) 1342 __ jccb(Assembler::greater, L_end); 1343 } 1344 // Copy trailing 32 bytes 1345 if (UseAVX >= 2) { 1346 __ vmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24)); 1347 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -24), xmm0); 1348 } else { 1349 __ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24)); 1350 __ movdqu(Address(end_to, qword_count, Address::times_8, -24), xmm0); 1351 __ movdqu(xmm1, Address(end_from, qword_count, Address::times_8, - 8)); 1352 __ movdqu(Address(end_to, qword_count, Address::times_8, - 8), xmm1); 1353 } 1354 __ addptr(qword_count, 4); 1355 __ BIND(L_end); 1356 if (UseAVX >= 2) { 1357 // clean upper bits of YMM registers 1358 __ vpxor(xmm0, xmm0); 1359 __ vpxor(xmm1, xmm1); 1360 } 1361 } else { 1362 // Copy 32-bytes per iteration 1363 __ BIND(L_loop); 1364 __ movq(to, Address(end_from, qword_count, Address::times_8, -24)); 1365 __ movq(Address(end_to, qword_count, Address::times_8, -24), to); 1366 __ movq(to, Address(end_from, qword_count, Address::times_8, -16)); 1367 __ movq(Address(end_to, qword_count, Address::times_8, -16), to); 1368 __ movq(to, Address(end_from, qword_count, Address::times_8, - 8)); 1369 __ movq(Address(end_to, qword_count, Address::times_8, - 8), to); 1370 __ movq(to, Address(end_from, qword_count, Address::times_8, - 0)); 1371 __ movq(Address(end_to, qword_count, Address::times_8, - 0), to); 1372 1373 __ BIND(L_copy_bytes); 1374 __ addptr(qword_count, 4); 1375 __ jcc(Assembler::lessEqual, L_loop); 1376 } 1377 __ subptr(qword_count, 4); 1378 __ jcc(Assembler::less, L_copy_8_bytes); // Copy trailing qwords 1379 } 1380 1381 // Copy big chunks backward 1382 // 1383 // Inputs: 1384 // from - source arrays address 1385 // dest - destination array address 1386 // qword_count - 64-bits element count 1387 // to - scratch 1388 // L_copy_bytes - entry label 1389 // L_copy_8_bytes - exit label 1390 // 1391 void copy_bytes_backward(Register from, Register dest, 1392 Register qword_count, Register to, 1393 Label& L_copy_bytes, Label& L_copy_8_bytes) { 1394 DEBUG_ONLY(__ stop("enter at entry label, not here")); 1395 Label L_loop; 1396 __ align(OptoLoopAlignment); 1397 if (UseUnalignedLoadStores) { 1398 Label L_end; 1399 // Copy 64-bytes per iteration 1400 if (UseAVX > 2) { 1401 Label L_loop_avx512, L_loop_avx2, L_32_byte_head, L_above_threshold, L_below_threshold; 1402 1403 __ BIND(L_copy_bytes); 1404 __ cmpptr(qword_count, (AVX3Threshold / 8)); 1405 __ jccb(Assembler::greater, L_above_threshold); 1406 __ jmpb(L_below_threshold); 1407 1408 __ BIND(L_loop_avx512); 1409 __ evmovdqul(xmm0, Address(from, qword_count, Address::times_8, 0), Assembler::AVX_512bit); 1410 __ evmovdqul(Address(dest, qword_count, Address::times_8, 0), xmm0, Assembler::AVX_512bit); 1411 __ bind(L_above_threshold); 1412 __ subptr(qword_count, 8); 1413 __ jcc(Assembler::greaterEqual, L_loop_avx512); 1414 __ jmpb(L_32_byte_head); 1415 1416 __ bind(L_loop_avx2); 1417 __ vmovdqu(xmm0, Address(from, qword_count, Address::times_8, 32)); 1418 __ vmovdqu(Address(dest, qword_count, Address::times_8, 32), xmm0); 1419 __ vmovdqu(xmm1, Address(from, qword_count, Address::times_8, 0)); 1420 __ vmovdqu(Address(dest, qword_count, Address::times_8, 0), xmm1); 1421 __ bind(L_below_threshold); 1422 __ subptr(qword_count, 8); 1423 __ jcc(Assembler::greaterEqual, L_loop_avx2); 1424 1425 __ bind(L_32_byte_head); 1426 __ addptr(qword_count, 4); // add(8) and sub(4) 1427 __ jccb(Assembler::less, L_end); 1428 } else { 1429 __ BIND(L_loop); 1430 if (UseAVX == 2) { 1431 __ vmovdqu(xmm0, Address(from, qword_count, Address::times_8, 32)); 1432 __ vmovdqu(Address(dest, qword_count, Address::times_8, 32), xmm0); 1433 __ vmovdqu(xmm1, Address(from, qword_count, Address::times_8, 0)); 1434 __ vmovdqu(Address(dest, qword_count, Address::times_8, 0), xmm1); 1435 } else { 1436 __ movdqu(xmm0, Address(from, qword_count, Address::times_8, 48)); 1437 __ movdqu(Address(dest, qword_count, Address::times_8, 48), xmm0); 1438 __ movdqu(xmm1, Address(from, qword_count, Address::times_8, 32)); 1439 __ movdqu(Address(dest, qword_count, Address::times_8, 32), xmm1); 1440 __ movdqu(xmm2, Address(from, qword_count, Address::times_8, 16)); 1441 __ movdqu(Address(dest, qword_count, Address::times_8, 16), xmm2); 1442 __ movdqu(xmm3, Address(from, qword_count, Address::times_8, 0)); 1443 __ movdqu(Address(dest, qword_count, Address::times_8, 0), xmm3); 1444 } 1445 1446 __ BIND(L_copy_bytes); 1447 __ subptr(qword_count, 8); 1448 __ jcc(Assembler::greaterEqual, L_loop); 1449 1450 __ addptr(qword_count, 4); // add(8) and sub(4) 1451 __ jccb(Assembler::less, L_end); 1452 } 1453 // Copy trailing 32 bytes 1454 if (UseAVX >= 2) { 1455 __ vmovdqu(xmm0, Address(from, qword_count, Address::times_8, 0)); 1456 __ vmovdqu(Address(dest, qword_count, Address::times_8, 0), xmm0); 1457 } else { 1458 __ movdqu(xmm0, Address(from, qword_count, Address::times_8, 16)); 1459 __ movdqu(Address(dest, qword_count, Address::times_8, 16), xmm0); 1460 __ movdqu(xmm1, Address(from, qword_count, Address::times_8, 0)); 1461 __ movdqu(Address(dest, qword_count, Address::times_8, 0), xmm1); 1462 } 1463 __ subptr(qword_count, 4); 1464 __ BIND(L_end); 1465 if (UseAVX >= 2) { 1466 // clean upper bits of YMM registers 1467 __ vpxor(xmm0, xmm0); 1468 __ vpxor(xmm1, xmm1); 1469 } 1470 } else { 1471 // Copy 32-bytes per iteration 1472 __ BIND(L_loop); 1473 __ movq(to, Address(from, qword_count, Address::times_8, 24)); 1474 __ movq(Address(dest, qword_count, Address::times_8, 24), to); 1475 __ movq(to, Address(from, qword_count, Address::times_8, 16)); 1476 __ movq(Address(dest, qword_count, Address::times_8, 16), to); 1477 __ movq(to, Address(from, qword_count, Address::times_8, 8)); 1478 __ movq(Address(dest, qword_count, Address::times_8, 8), to); 1479 __ movq(to, Address(from, qword_count, Address::times_8, 0)); 1480 __ movq(Address(dest, qword_count, Address::times_8, 0), to); 1481 1482 __ BIND(L_copy_bytes); 1483 __ subptr(qword_count, 4); 1484 __ jcc(Assembler::greaterEqual, L_loop); 1485 } 1486 __ addptr(qword_count, 4); 1487 __ jcc(Assembler::greater, L_copy_8_bytes); // Copy trailing qwords 1488 } 1489 1490 // Arguments: 1491 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1492 // ignored 1493 // name - stub name string 1494 // 1495 // Inputs: 1496 // c_rarg0 - source array address 1497 // c_rarg1 - destination array address 1498 // c_rarg2 - element count, treated as ssize_t, can be zero 1499 // 1500 // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries, 1501 // we let the hardware handle it. The one to eight bytes within words, 1502 // dwords or qwords that span cache line boundaries will still be loaded 1503 // and stored atomically. 1504 // 1505 // Side Effects: 1506 // disjoint_byte_copy_entry is set to the no-overlap entry point 1507 // used by generate_conjoint_byte_copy(). 1508 // 1509 address generate_disjoint_byte_copy(bool aligned, address* entry, const char *name) { 1510 __ align(CodeEntryAlignment); 1511 StubCodeMark mark(this, "StubRoutines", name); 1512 address start = __ pc(); 1513 1514 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes; 1515 Label L_copy_byte, L_exit; 1516 const Register from = rdi; // source array address 1517 const Register to = rsi; // destination array address 1518 const Register count = rdx; // elements count 1519 const Register byte_count = rcx; 1520 const Register qword_count = count; 1521 const Register end_from = from; // source array end address 1522 const Register end_to = to; // destination array end address 1523 // End pointers are inclusive, and if count is not zero they point 1524 // to the last unit copied: end_to[0] := end_from[0] 1525 1526 __ enter(); // required for proper stackwalking of RuntimeStub frame 1527 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1528 1529 if (entry != NULL) { 1530 *entry = __ pc(); 1531 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1532 BLOCK_COMMENT("Entry:"); 1533 } 1534 1535 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1536 // r9 and r10 may be used to save non-volatile registers 1537 1538 { 1539 // UnsafeCopyMemory page error: continue after ucm 1540 UnsafeCopyMemoryMark ucmm(this, !aligned, true); 1541 // 'from', 'to' and 'count' are now valid 1542 __ movptr(byte_count, count); 1543 __ shrptr(count, 3); // count => qword_count 1544 1545 // Copy from low to high addresses. Use 'to' as scratch. 1546 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1547 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1548 __ negptr(qword_count); // make the count negative 1549 __ jmp(L_copy_bytes); 1550 1551 // Copy trailing qwords 1552 __ BIND(L_copy_8_bytes); 1553 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1554 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1555 __ increment(qword_count); 1556 __ jcc(Assembler::notZero, L_copy_8_bytes); 1557 1558 // Check for and copy trailing dword 1559 __ BIND(L_copy_4_bytes); 1560 __ testl(byte_count, 4); 1561 __ jccb(Assembler::zero, L_copy_2_bytes); 1562 __ movl(rax, Address(end_from, 8)); 1563 __ movl(Address(end_to, 8), rax); 1564 1565 __ addptr(end_from, 4); 1566 __ addptr(end_to, 4); 1567 1568 // Check for and copy trailing word 1569 __ BIND(L_copy_2_bytes); 1570 __ testl(byte_count, 2); 1571 __ jccb(Assembler::zero, L_copy_byte); 1572 __ movw(rax, Address(end_from, 8)); 1573 __ movw(Address(end_to, 8), rax); 1574 1575 __ addptr(end_from, 2); 1576 __ addptr(end_to, 2); 1577 1578 // Check for and copy trailing byte 1579 __ BIND(L_copy_byte); 1580 __ testl(byte_count, 1); 1581 __ jccb(Assembler::zero, L_exit); 1582 __ movb(rax, Address(end_from, 8)); 1583 __ movb(Address(end_to, 8), rax); 1584 } 1585 __ BIND(L_exit); 1586 address ucme_exit_pc = __ pc(); 1587 restore_arg_regs(); 1588 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1589 __ xorptr(rax, rax); // return 0 1590 __ vzeroupper(); 1591 __ leave(); // required for proper stackwalking of RuntimeStub frame 1592 __ ret(0); 1593 1594 { 1595 UnsafeCopyMemoryMark ucmm(this, !aligned, false, ucme_exit_pc); 1596 // Copy in multi-bytes chunks 1597 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1598 __ jmp(L_copy_4_bytes); 1599 } 1600 return start; 1601 } 1602 1603 // Arguments: 1604 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1605 // ignored 1606 // name - stub name string 1607 // 1608 // Inputs: 1609 // c_rarg0 - source array address 1610 // c_rarg1 - destination array address 1611 // c_rarg2 - element count, treated as ssize_t, can be zero 1612 // 1613 // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries, 1614 // we let the hardware handle it. The one to eight bytes within words, 1615 // dwords or qwords that span cache line boundaries will still be loaded 1616 // and stored atomically. 1617 // 1618 address generate_conjoint_byte_copy(bool aligned, address nooverlap_target, 1619 address* entry, const char *name) { 1620 __ align(CodeEntryAlignment); 1621 StubCodeMark mark(this, "StubRoutines", name); 1622 address start = __ pc(); 1623 1624 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes; 1625 const Register from = rdi; // source array address 1626 const Register to = rsi; // destination array address 1627 const Register count = rdx; // elements count 1628 const Register byte_count = rcx; 1629 const Register qword_count = count; 1630 1631 __ enter(); // required for proper stackwalking of RuntimeStub frame 1632 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1633 1634 if (entry != NULL) { 1635 *entry = __ pc(); 1636 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1637 BLOCK_COMMENT("Entry:"); 1638 } 1639 1640 array_overlap_test(nooverlap_target, Address::times_1); 1641 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1642 // r9 and r10 may be used to save non-volatile registers 1643 1644 { 1645 // UnsafeCopyMemory page error: continue after ucm 1646 UnsafeCopyMemoryMark ucmm(this, !aligned, true); 1647 // 'from', 'to' and 'count' are now valid 1648 __ movptr(byte_count, count); 1649 __ shrptr(count, 3); // count => qword_count 1650 1651 // Copy from high to low addresses. 1652 1653 // Check for and copy trailing byte 1654 __ testl(byte_count, 1); 1655 __ jcc(Assembler::zero, L_copy_2_bytes); 1656 __ movb(rax, Address(from, byte_count, Address::times_1, -1)); 1657 __ movb(Address(to, byte_count, Address::times_1, -1), rax); 1658 __ decrement(byte_count); // Adjust for possible trailing word 1659 1660 // Check for and copy trailing word 1661 __ BIND(L_copy_2_bytes); 1662 __ testl(byte_count, 2); 1663 __ jcc(Assembler::zero, L_copy_4_bytes); 1664 __ movw(rax, Address(from, byte_count, Address::times_1, -2)); 1665 __ movw(Address(to, byte_count, Address::times_1, -2), rax); 1666 1667 // Check for and copy trailing dword 1668 __ BIND(L_copy_4_bytes); 1669 __ testl(byte_count, 4); 1670 __ jcc(Assembler::zero, L_copy_bytes); 1671 __ movl(rax, Address(from, qword_count, Address::times_8)); 1672 __ movl(Address(to, qword_count, Address::times_8), rax); 1673 __ jmp(L_copy_bytes); 1674 1675 // Copy trailing qwords 1676 __ BIND(L_copy_8_bytes); 1677 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 1678 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 1679 __ decrement(qword_count); 1680 __ jcc(Assembler::notZero, L_copy_8_bytes); 1681 } 1682 restore_arg_regs(); 1683 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1684 __ xorptr(rax, rax); // return 0 1685 __ vzeroupper(); 1686 __ leave(); // required for proper stackwalking of RuntimeStub frame 1687 __ ret(0); 1688 1689 { 1690 // UnsafeCopyMemory page error: continue after ucm 1691 UnsafeCopyMemoryMark ucmm(this, !aligned, true); 1692 // Copy in multi-bytes chunks 1693 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1694 } 1695 restore_arg_regs(); 1696 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1697 __ xorptr(rax, rax); // return 0 1698 __ vzeroupper(); 1699 __ leave(); // required for proper stackwalking of RuntimeStub frame 1700 __ ret(0); 1701 1702 return start; 1703 } 1704 1705 // Arguments: 1706 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1707 // ignored 1708 // name - stub name string 1709 // 1710 // Inputs: 1711 // c_rarg0 - source array address 1712 // c_rarg1 - destination array address 1713 // c_rarg2 - element count, treated as ssize_t, can be zero 1714 // 1715 // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we 1716 // let the hardware handle it. The two or four words within dwords 1717 // or qwords that span cache line boundaries will still be loaded 1718 // and stored atomically. 1719 // 1720 // Side Effects: 1721 // disjoint_short_copy_entry is set to the no-overlap entry point 1722 // used by generate_conjoint_short_copy(). 1723 // 1724 address generate_disjoint_short_copy(bool aligned, address *entry, const char *name) { 1725 __ align(CodeEntryAlignment); 1726 StubCodeMark mark(this, "StubRoutines", name); 1727 address start = __ pc(); 1728 1729 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes,L_copy_2_bytes,L_exit; 1730 const Register from = rdi; // source array address 1731 const Register to = rsi; // destination array address 1732 const Register count = rdx; // elements count 1733 const Register word_count = rcx; 1734 const Register qword_count = count; 1735 const Register end_from = from; // source array end address 1736 const Register end_to = to; // destination array end address 1737 // End pointers are inclusive, and if count is not zero they point 1738 // to the last unit copied: end_to[0] := end_from[0] 1739 1740 __ enter(); // required for proper stackwalking of RuntimeStub frame 1741 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1742 1743 if (entry != NULL) { 1744 *entry = __ pc(); 1745 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1746 BLOCK_COMMENT("Entry:"); 1747 } 1748 1749 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1750 // r9 and r10 may be used to save non-volatile registers 1751 1752 { 1753 // UnsafeCopyMemory page error: continue after ucm 1754 UnsafeCopyMemoryMark ucmm(this, !aligned, true); 1755 // 'from', 'to' and 'count' are now valid 1756 __ movptr(word_count, count); 1757 __ shrptr(count, 2); // count => qword_count 1758 1759 // Copy from low to high addresses. Use 'to' as scratch. 1760 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1761 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1762 __ negptr(qword_count); 1763 __ jmp(L_copy_bytes); 1764 1765 // Copy trailing qwords 1766 __ BIND(L_copy_8_bytes); 1767 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1768 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1769 __ increment(qword_count); 1770 __ jcc(Assembler::notZero, L_copy_8_bytes); 1771 1772 // Original 'dest' is trashed, so we can't use it as a 1773 // base register for a possible trailing word copy 1774 1775 // Check for and copy trailing dword 1776 __ BIND(L_copy_4_bytes); 1777 __ testl(word_count, 2); 1778 __ jccb(Assembler::zero, L_copy_2_bytes); 1779 __ movl(rax, Address(end_from, 8)); 1780 __ movl(Address(end_to, 8), rax); 1781 1782 __ addptr(end_from, 4); 1783 __ addptr(end_to, 4); 1784 1785 // Check for and copy trailing word 1786 __ BIND(L_copy_2_bytes); 1787 __ testl(word_count, 1); 1788 __ jccb(Assembler::zero, L_exit); 1789 __ movw(rax, Address(end_from, 8)); 1790 __ movw(Address(end_to, 8), rax); 1791 } 1792 __ BIND(L_exit); 1793 address ucme_exit_pc = __ pc(); 1794 restore_arg_regs(); 1795 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1796 __ xorptr(rax, rax); // return 0 1797 __ vzeroupper(); 1798 __ leave(); // required for proper stackwalking of RuntimeStub frame 1799 __ ret(0); 1800 1801 { 1802 UnsafeCopyMemoryMark ucmm(this, !aligned, false, ucme_exit_pc); 1803 // Copy in multi-bytes chunks 1804 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1805 __ jmp(L_copy_4_bytes); 1806 } 1807 1808 return start; 1809 } 1810 1811 address generate_fill(BasicType t, bool aligned, const char *name) { 1812 __ align(CodeEntryAlignment); 1813 StubCodeMark mark(this, "StubRoutines", name); 1814 address start = __ pc(); 1815 1816 BLOCK_COMMENT("Entry:"); 1817 1818 const Register to = c_rarg0; // source array address 1819 const Register value = c_rarg1; // value 1820 const Register count = c_rarg2; // elements count 1821 1822 __ enter(); // required for proper stackwalking of RuntimeStub frame 1823 1824 __ generate_fill(t, aligned, to, value, count, rax, xmm0); 1825 1826 __ vzeroupper(); 1827 __ leave(); // required for proper stackwalking of RuntimeStub frame 1828 __ ret(0); 1829 return start; 1830 } 1831 1832 // Arguments: 1833 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1834 // ignored 1835 // name - stub name string 1836 // 1837 // Inputs: 1838 // c_rarg0 - source array address 1839 // c_rarg1 - destination array address 1840 // c_rarg2 - element count, treated as ssize_t, can be zero 1841 // 1842 // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we 1843 // let the hardware handle it. The two or four words within dwords 1844 // or qwords that span cache line boundaries will still be loaded 1845 // and stored atomically. 1846 // 1847 address generate_conjoint_short_copy(bool aligned, address nooverlap_target, 1848 address *entry, const char *name) { 1849 __ align(CodeEntryAlignment); 1850 StubCodeMark mark(this, "StubRoutines", name); 1851 address start = __ pc(); 1852 1853 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes; 1854 const Register from = rdi; // source array address 1855 const Register to = rsi; // destination array address 1856 const Register count = rdx; // elements count 1857 const Register word_count = rcx; 1858 const Register qword_count = count; 1859 1860 __ enter(); // required for proper stackwalking of RuntimeStub frame 1861 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1862 1863 if (entry != NULL) { 1864 *entry = __ pc(); 1865 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1866 BLOCK_COMMENT("Entry:"); 1867 } 1868 1869 array_overlap_test(nooverlap_target, Address::times_2); 1870 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1871 // r9 and r10 may be used to save non-volatile registers 1872 1873 { 1874 // UnsafeCopyMemory page error: continue after ucm 1875 UnsafeCopyMemoryMark ucmm(this, !aligned, true); 1876 // 'from', 'to' and 'count' are now valid 1877 __ movptr(word_count, count); 1878 __ shrptr(count, 2); // count => qword_count 1879 1880 // Copy from high to low addresses. Use 'to' as scratch. 1881 1882 // Check for and copy trailing word 1883 __ testl(word_count, 1); 1884 __ jccb(Assembler::zero, L_copy_4_bytes); 1885 __ movw(rax, Address(from, word_count, Address::times_2, -2)); 1886 __ movw(Address(to, word_count, Address::times_2, -2), rax); 1887 1888 // Check for and copy trailing dword 1889 __ BIND(L_copy_4_bytes); 1890 __ testl(word_count, 2); 1891 __ jcc(Assembler::zero, L_copy_bytes); 1892 __ movl(rax, Address(from, qword_count, Address::times_8)); 1893 __ movl(Address(to, qword_count, Address::times_8), rax); 1894 __ jmp(L_copy_bytes); 1895 1896 // Copy trailing qwords 1897 __ BIND(L_copy_8_bytes); 1898 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 1899 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 1900 __ decrement(qword_count); 1901 __ jcc(Assembler::notZero, L_copy_8_bytes); 1902 } 1903 restore_arg_regs(); 1904 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1905 __ xorptr(rax, rax); // return 0 1906 __ vzeroupper(); 1907 __ leave(); // required for proper stackwalking of RuntimeStub frame 1908 __ ret(0); 1909 1910 { 1911 // UnsafeCopyMemory page error: continue after ucm 1912 UnsafeCopyMemoryMark ucmm(this, !aligned, true); 1913 // Copy in multi-bytes chunks 1914 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1915 } 1916 restore_arg_regs(); 1917 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1918 __ xorptr(rax, rax); // return 0 1919 __ vzeroupper(); 1920 __ leave(); // required for proper stackwalking of RuntimeStub frame 1921 __ ret(0); 1922 1923 return start; 1924 } 1925 1926 // Arguments: 1927 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1928 // ignored 1929 // is_oop - true => oop array, so generate store check code 1930 // name - stub name string 1931 // 1932 // Inputs: 1933 // c_rarg0 - source array address 1934 // c_rarg1 - destination array address 1935 // c_rarg2 - element count, treated as ssize_t, can be zero 1936 // 1937 // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let 1938 // the hardware handle it. The two dwords within qwords that span 1939 // cache line boundaries will still be loaded and stored atomicly. 1940 // 1941 // Side Effects: 1942 // disjoint_int_copy_entry is set to the no-overlap entry point 1943 // used by generate_conjoint_int_oop_copy(). 1944 // 1945 address generate_disjoint_int_oop_copy(bool aligned, bool is_oop, address* entry, 1946 const char *name, bool dest_uninitialized = false) { 1947 __ align(CodeEntryAlignment); 1948 StubCodeMark mark(this, "StubRoutines", name); 1949 address start = __ pc(); 1950 1951 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_exit; 1952 const Register from = rdi; // source array address 1953 const Register to = rsi; // destination array address 1954 const Register count = rdx; // elements count 1955 const Register dword_count = rcx; 1956 const Register qword_count = count; 1957 const Register end_from = from; // source array end address 1958 const Register end_to = to; // destination array end address 1959 // End pointers are inclusive, and if count is not zero they point 1960 // to the last unit copied: end_to[0] := end_from[0] 1961 1962 __ enter(); // required for proper stackwalking of RuntimeStub frame 1963 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1964 1965 if (entry != NULL) { 1966 *entry = __ pc(); 1967 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1968 BLOCK_COMMENT("Entry:"); 1969 } 1970 1971 setup_arg_regs_using_thread(); // from => rdi, to => rsi, count => rdx 1972 // r9 is used to save r15_thread 1973 1974 DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_DISJOINT; 1975 if (dest_uninitialized) { 1976 decorators |= IS_DEST_UNINITIALIZED; 1977 } 1978 if (aligned) { 1979 decorators |= ARRAYCOPY_ALIGNED; 1980 } 1981 1982 BasicType type = is_oop ? T_OBJECT : T_INT; 1983 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 1984 bs->arraycopy_prologue(_masm, decorators, type, from, to, count); 1985 1986 { 1987 // UnsafeCopyMemory page error: continue after ucm 1988 UnsafeCopyMemoryMark ucmm(this, !is_oop && !aligned, true); 1989 // 'from', 'to' and 'count' are now valid 1990 __ movptr(dword_count, count); 1991 __ shrptr(count, 1); // count => qword_count 1992 1993 // Copy from low to high addresses. Use 'to' as scratch. 1994 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1995 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1996 __ negptr(qword_count); 1997 __ jmp(L_copy_bytes); 1998 1999 // Copy trailing qwords 2000 __ BIND(L_copy_8_bytes); 2001 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 2002 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 2003 __ increment(qword_count); 2004 __ jcc(Assembler::notZero, L_copy_8_bytes); 2005 2006 // Check for and copy trailing dword 2007 __ BIND(L_copy_4_bytes); 2008 __ testl(dword_count, 1); // Only byte test since the value is 0 or 1 2009 __ jccb(Assembler::zero, L_exit); 2010 __ movl(rax, Address(end_from, 8)); 2011 __ movl(Address(end_to, 8), rax); 2012 } 2013 __ BIND(L_exit); 2014 address ucme_exit_pc = __ pc(); 2015 bs->arraycopy_epilogue(_masm, decorators, type, from, to, dword_count); 2016 restore_arg_regs_using_thread(); 2017 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 2018 __ vzeroupper(); 2019 __ xorptr(rax, rax); // return 0 2020 __ leave(); // required for proper stackwalking of RuntimeStub frame 2021 __ ret(0); 2022 2023 { 2024 UnsafeCopyMemoryMark ucmm(this, !is_oop && !aligned, false, ucme_exit_pc); 2025 // Copy in multi-bytes chunks 2026 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2027 __ jmp(L_copy_4_bytes); 2028 } 2029 2030 return start; 2031 } 2032 2033 // Arguments: 2034 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 2035 // ignored 2036 // is_oop - true => oop array, so generate store check code 2037 // name - stub name string 2038 // 2039 // Inputs: 2040 // c_rarg0 - source array address 2041 // c_rarg1 - destination array address 2042 // c_rarg2 - element count, treated as ssize_t, can be zero 2043 // 2044 // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let 2045 // the hardware handle it. The two dwords within qwords that span 2046 // cache line boundaries will still be loaded and stored atomicly. 2047 // 2048 address generate_conjoint_int_oop_copy(bool aligned, bool is_oop, address nooverlap_target, 2049 address *entry, const char *name, 2050 bool dest_uninitialized = false) { 2051 __ align(CodeEntryAlignment); 2052 StubCodeMark mark(this, "StubRoutines", name); 2053 address start = __ pc(); 2054 2055 Label L_copy_bytes, L_copy_8_bytes, L_exit; 2056 const Register from = rdi; // source array address 2057 const Register to = rsi; // destination array address 2058 const Register count = rdx; // elements count 2059 const Register dword_count = rcx; 2060 const Register qword_count = count; 2061 2062 __ enter(); // required for proper stackwalking of RuntimeStub frame 2063 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 2064 2065 if (entry != NULL) { 2066 *entry = __ pc(); 2067 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2068 BLOCK_COMMENT("Entry:"); 2069 } 2070 2071 array_overlap_test(nooverlap_target, Address::times_4); 2072 setup_arg_regs_using_thread(); // from => rdi, to => rsi, count => rdx 2073 // r9 is used to save r15_thread 2074 2075 DecoratorSet decorators = IN_HEAP | IS_ARRAY; 2076 if (dest_uninitialized) { 2077 decorators |= IS_DEST_UNINITIALIZED; 2078 } 2079 if (aligned) { 2080 decorators |= ARRAYCOPY_ALIGNED; 2081 } 2082 2083 BasicType type = is_oop ? T_OBJECT : T_INT; 2084 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 2085 // no registers are destroyed by this call 2086 bs->arraycopy_prologue(_masm, decorators, type, from, to, count); 2087 2088 assert_clean_int(count, rax); // Make sure 'count' is clean int. 2089 { 2090 // UnsafeCopyMemory page error: continue after ucm 2091 UnsafeCopyMemoryMark ucmm(this, !is_oop && !aligned, true); 2092 // 'from', 'to' and 'count' are now valid 2093 __ movptr(dword_count, count); 2094 __ shrptr(count, 1); // count => qword_count 2095 2096 // Copy from high to low addresses. Use 'to' as scratch. 2097 2098 // Check for and copy trailing dword 2099 __ testl(dword_count, 1); 2100 __ jcc(Assembler::zero, L_copy_bytes); 2101 __ movl(rax, Address(from, dword_count, Address::times_4, -4)); 2102 __ movl(Address(to, dword_count, Address::times_4, -4), rax); 2103 __ jmp(L_copy_bytes); 2104 2105 // Copy trailing qwords 2106 __ BIND(L_copy_8_bytes); 2107 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 2108 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 2109 __ decrement(qword_count); 2110 __ jcc(Assembler::notZero, L_copy_8_bytes); 2111 } 2112 if (is_oop) { 2113 __ jmp(L_exit); 2114 } 2115 restore_arg_regs_using_thread(); 2116 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 2117 __ xorptr(rax, rax); // return 0 2118 __ vzeroupper(); 2119 __ leave(); // required for proper stackwalking of RuntimeStub frame 2120 __ ret(0); 2121 2122 { 2123 // UnsafeCopyMemory page error: continue after ucm 2124 UnsafeCopyMemoryMark ucmm(this, !is_oop && !aligned, true); 2125 // Copy in multi-bytes chunks 2126 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2127 } 2128 2129 __ BIND(L_exit); 2130 bs->arraycopy_epilogue(_masm, decorators, type, from, to, dword_count); 2131 restore_arg_regs_using_thread(); 2132 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 2133 __ xorptr(rax, rax); // return 0 2134 __ vzeroupper(); 2135 __ leave(); // required for proper stackwalking of RuntimeStub frame 2136 __ ret(0); 2137 2138 return start; 2139 } 2140 2141 // Arguments: 2142 // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes 2143 // ignored 2144 // is_oop - true => oop array, so generate store check code 2145 // name - stub name string 2146 // 2147 // Inputs: 2148 // c_rarg0 - source array address 2149 // c_rarg1 - destination array address 2150 // c_rarg2 - element count, treated as ssize_t, can be zero 2151 // 2152 // Side Effects: 2153 // disjoint_oop_copy_entry or disjoint_long_copy_entry is set to the 2154 // no-overlap entry point used by generate_conjoint_long_oop_copy(). 2155 // 2156 address generate_disjoint_long_oop_copy(bool aligned, bool is_oop, address *entry, 2157 const char *name, bool dest_uninitialized = false) { 2158 __ align(CodeEntryAlignment); 2159 StubCodeMark mark(this, "StubRoutines", name); 2160 address start = __ pc(); 2161 2162 Label L_copy_bytes, L_copy_8_bytes, L_exit; 2163 const Register from = rdi; // source array address 2164 const Register to = rsi; // destination array address 2165 const Register qword_count = rdx; // elements count 2166 const Register end_from = from; // source array end address 2167 const Register end_to = rcx; // destination array end address 2168 const Register saved_count = r11; 2169 // End pointers are inclusive, and if count is not zero they point 2170 // to the last unit copied: end_to[0] := end_from[0] 2171 2172 __ enter(); // required for proper stackwalking of RuntimeStub frame 2173 // Save no-overlap entry point for generate_conjoint_long_oop_copy() 2174 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 2175 2176 if (entry != NULL) { 2177 *entry = __ pc(); 2178 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2179 BLOCK_COMMENT("Entry:"); 2180 } 2181 2182 setup_arg_regs_using_thread(); // from => rdi, to => rsi, count => rdx 2183 // r9 is used to save r15_thread 2184 // 'from', 'to' and 'qword_count' are now valid 2185 2186 DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_DISJOINT; 2187 if (dest_uninitialized) { 2188 decorators |= IS_DEST_UNINITIALIZED; 2189 } 2190 if (aligned) { 2191 decorators |= ARRAYCOPY_ALIGNED; 2192 } 2193 2194 BasicType type = is_oop ? T_OBJECT : T_LONG; 2195 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 2196 bs->arraycopy_prologue(_masm, decorators, type, from, to, qword_count); 2197 { 2198 // UnsafeCopyMemory page error: continue after ucm 2199 UnsafeCopyMemoryMark ucmm(this, !is_oop && !aligned, true); 2200 2201 // Copy from low to high addresses. Use 'to' as scratch. 2202 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 2203 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 2204 __ negptr(qword_count); 2205 __ jmp(L_copy_bytes); 2206 2207 // Copy trailing qwords 2208 __ BIND(L_copy_8_bytes); 2209 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 2210 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 2211 __ increment(qword_count); 2212 __ jcc(Assembler::notZero, L_copy_8_bytes); 2213 } 2214 if (is_oop) { 2215 __ jmp(L_exit); 2216 } else { 2217 restore_arg_regs_using_thread(); 2218 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2219 __ xorptr(rax, rax); // return 0 2220 __ vzeroupper(); 2221 __ leave(); // required for proper stackwalking of RuntimeStub frame 2222 __ ret(0); 2223 } 2224 2225 { 2226 // UnsafeCopyMemory page error: continue after ucm 2227 UnsafeCopyMemoryMark ucmm(this, !is_oop && !aligned, true); 2228 // Copy in multi-bytes chunks 2229 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2230 } 2231 2232 __ BIND(L_exit); 2233 bs->arraycopy_epilogue(_masm, decorators, type, from, to, qword_count); 2234 restore_arg_regs_using_thread(); 2235 if (is_oop) { 2236 inc_counter_np(SharedRuntime::_oop_array_copy_ctr); // Update counter after rscratch1 is free 2237 } else { 2238 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2239 } 2240 __ vzeroupper(); 2241 __ xorptr(rax, rax); // return 0 2242 __ leave(); // required for proper stackwalking of RuntimeStub frame 2243 __ ret(0); 2244 2245 return start; 2246 } 2247 2248 // Arguments: 2249 // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes 2250 // ignored 2251 // is_oop - true => oop array, so generate store check code 2252 // name - stub name string 2253 // 2254 // Inputs: 2255 // c_rarg0 - source array address 2256 // c_rarg1 - destination array address 2257 // c_rarg2 - element count, treated as ssize_t, can be zero 2258 // 2259 address generate_conjoint_long_oop_copy(bool aligned, bool is_oop, 2260 address nooverlap_target, address *entry, 2261 const char *name, bool dest_uninitialized = false) { 2262 __ align(CodeEntryAlignment); 2263 StubCodeMark mark(this, "StubRoutines", name); 2264 address start = __ pc(); 2265 2266 Label L_copy_bytes, L_copy_8_bytes, L_exit; 2267 const Register from = rdi; // source array address 2268 const Register to = rsi; // destination array address 2269 const Register qword_count = rdx; // elements count 2270 const Register saved_count = rcx; 2271 2272 __ enter(); // required for proper stackwalking of RuntimeStub frame 2273 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 2274 2275 if (entry != NULL) { 2276 *entry = __ pc(); 2277 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2278 BLOCK_COMMENT("Entry:"); 2279 } 2280 2281 array_overlap_test(nooverlap_target, Address::times_8); 2282 setup_arg_regs_using_thread(); // from => rdi, to => rsi, count => rdx 2283 // r9 is used to save r15_thread 2284 // 'from', 'to' and 'qword_count' are now valid 2285 2286 DecoratorSet decorators = IN_HEAP | IS_ARRAY; 2287 if (dest_uninitialized) { 2288 decorators |= IS_DEST_UNINITIALIZED; 2289 } 2290 if (aligned) { 2291 decorators |= ARRAYCOPY_ALIGNED; 2292 } 2293 2294 BasicType type = is_oop ? T_OBJECT : T_LONG; 2295 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 2296 bs->arraycopy_prologue(_masm, decorators, type, from, to, qword_count); 2297 { 2298 // UnsafeCopyMemory page error: continue after ucm 2299 UnsafeCopyMemoryMark ucmm(this, !is_oop && !aligned, true); 2300 2301 __ jmp(L_copy_bytes); 2302 2303 // Copy trailing qwords 2304 __ BIND(L_copy_8_bytes); 2305 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 2306 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 2307 __ decrement(qword_count); 2308 __ jcc(Assembler::notZero, L_copy_8_bytes); 2309 } 2310 if (is_oop) { 2311 __ jmp(L_exit); 2312 } else { 2313 restore_arg_regs_using_thread(); 2314 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2315 __ xorptr(rax, rax); // return 0 2316 __ vzeroupper(); 2317 __ leave(); // required for proper stackwalking of RuntimeStub frame 2318 __ ret(0); 2319 } 2320 { 2321 // UnsafeCopyMemory page error: continue after ucm 2322 UnsafeCopyMemoryMark ucmm(this, !is_oop && !aligned, true); 2323 2324 // Copy in multi-bytes chunks 2325 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2326 } 2327 __ BIND(L_exit); 2328 bs->arraycopy_epilogue(_masm, decorators, type, from, to, qword_count); 2329 restore_arg_regs_using_thread(); 2330 if (is_oop) { 2331 inc_counter_np(SharedRuntime::_oop_array_copy_ctr); // Update counter after rscratch1 is free 2332 } else { 2333 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2334 } 2335 __ vzeroupper(); 2336 __ xorptr(rax, rax); // return 0 2337 __ leave(); // required for proper stackwalking of RuntimeStub frame 2338 __ ret(0); 2339 2340 return start; 2341 } 2342 2343 2344 // Helper for generating a dynamic type check. 2345 // Smashes no registers. 2346 void generate_type_check(Register sub_klass, 2347 Register super_check_offset, 2348 Register super_klass, 2349 Label& L_success) { 2350 assert_different_registers(sub_klass, super_check_offset, super_klass); 2351 2352 BLOCK_COMMENT("type_check:"); 2353 2354 Label L_miss; 2355 2356 __ check_klass_subtype_fast_path(sub_klass, super_klass, noreg, &L_success, &L_miss, NULL, 2357 super_check_offset); 2358 __ check_klass_subtype_slow_path(sub_klass, super_klass, noreg, noreg, &L_success, NULL); 2359 2360 // Fall through on failure! 2361 __ BIND(L_miss); 2362 } 2363 2364 // 2365 // Generate checkcasting array copy stub 2366 // 2367 // Input: 2368 // c_rarg0 - source array address 2369 // c_rarg1 - destination array address 2370 // c_rarg2 - element count, treated as ssize_t, can be zero 2371 // c_rarg3 - size_t ckoff (super_check_offset) 2372 // not Win64 2373 // c_rarg4 - oop ckval (super_klass) 2374 // Win64 2375 // rsp+40 - oop ckval (super_klass) 2376 // 2377 // Output: 2378 // rax == 0 - success 2379 // rax == -1^K - failure, where K is partial transfer count 2380 // 2381 address generate_checkcast_copy(const char *name, address *entry, 2382 bool dest_uninitialized = false) { 2383 2384 Label L_load_element, L_store_element, L_do_card_marks, L_done; 2385 2386 // Input registers (after setup_arg_regs) 2387 const Register from = rdi; // source array address 2388 const Register to = rsi; // destination array address 2389 const Register length = rdx; // elements count 2390 const Register ckoff = rcx; // super_check_offset 2391 const Register ckval = r8; // super_klass 2392 2393 // Registers used as temps (r13, r14 are save-on-entry) 2394 const Register end_from = from; // source array end address 2395 const Register end_to = r13; // destination array end address 2396 const Register count = rdx; // -(count_remaining) 2397 const Register r14_length = r14; // saved copy of length 2398 // End pointers are inclusive, and if length is not zero they point 2399 // to the last unit copied: end_to[0] := end_from[0] 2400 2401 const Register rax_oop = rax; // actual oop copied 2402 const Register r11_klass = r11; // oop._klass 2403 2404 //--------------------------------------------------------------- 2405 // Assembler stub will be used for this call to arraycopy 2406 // if the two arrays are subtypes of Object[] but the 2407 // destination array type is not equal to or a supertype 2408 // of the source type. Each element must be separately 2409 // checked. 2410 2411 __ align(CodeEntryAlignment); 2412 StubCodeMark mark(this, "StubRoutines", name); 2413 address start = __ pc(); 2414 2415 __ enter(); // required for proper stackwalking of RuntimeStub frame 2416 2417 #ifdef ASSERT 2418 // caller guarantees that the arrays really are different 2419 // otherwise, we would have to make conjoint checks 2420 { Label L; 2421 array_overlap_test(L, TIMES_OOP); 2422 __ stop("checkcast_copy within a single array"); 2423 __ bind(L); 2424 } 2425 #endif //ASSERT 2426 2427 setup_arg_regs(4); // from => rdi, to => rsi, length => rdx 2428 // ckoff => rcx, ckval => r8 2429 // r9 and r10 may be used to save non-volatile registers 2430 #ifdef _WIN64 2431 // last argument (#4) is on stack on Win64 2432 __ movptr(ckval, Address(rsp, 6 * wordSize)); 2433 #endif 2434 2435 // Caller of this entry point must set up the argument registers. 2436 if (entry != NULL) { 2437 *entry = __ pc(); 2438 BLOCK_COMMENT("Entry:"); 2439 } 2440 2441 // allocate spill slots for r13, r14 2442 enum { 2443 saved_r13_offset, 2444 saved_r14_offset, 2445 saved_r10_offset, 2446 saved_rbp_offset 2447 }; 2448 __ subptr(rsp, saved_rbp_offset * wordSize); 2449 __ movptr(Address(rsp, saved_r13_offset * wordSize), r13); 2450 __ movptr(Address(rsp, saved_r14_offset * wordSize), r14); 2451 __ movptr(Address(rsp, saved_r10_offset * wordSize), r10); 2452 2453 #ifdef ASSERT 2454 Label L2; 2455 __ get_thread(r14); 2456 __ cmpptr(r15_thread, r14); 2457 __ jcc(Assembler::equal, L2); 2458 __ stop("StubRoutines::call_stub: r15_thread is modified by call"); 2459 __ bind(L2); 2460 #endif // ASSERT 2461 2462 // check that int operands are properly extended to size_t 2463 assert_clean_int(length, rax); 2464 assert_clean_int(ckoff, rax); 2465 2466 #ifdef ASSERT 2467 BLOCK_COMMENT("assert consistent ckoff/ckval"); 2468 // The ckoff and ckval must be mutually consistent, 2469 // even though caller generates both. 2470 { Label L; 2471 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2472 __ cmpl(ckoff, Address(ckval, sco_offset)); 2473 __ jcc(Assembler::equal, L); 2474 __ stop("super_check_offset inconsistent"); 2475 __ bind(L); 2476 } 2477 #endif //ASSERT 2478 2479 // Loop-invariant addresses. They are exclusive end pointers. 2480 Address end_from_addr(from, length, TIMES_OOP, 0); 2481 Address end_to_addr(to, length, TIMES_OOP, 0); 2482 // Loop-variant addresses. They assume post-incremented count < 0. 2483 Address from_element_addr(end_from, count, TIMES_OOP, 0); 2484 Address to_element_addr(end_to, count, TIMES_OOP, 0); 2485 2486 DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_CHECKCAST | ARRAYCOPY_DISJOINT; 2487 if (dest_uninitialized) { 2488 decorators |= IS_DEST_UNINITIALIZED; 2489 } 2490 2491 BasicType type = T_OBJECT; 2492 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 2493 bs->arraycopy_prologue(_masm, decorators, type, from, to, count); 2494 2495 // Copy from low to high addresses, indexed from the end of each array. 2496 __ lea(end_from, end_from_addr); 2497 __ lea(end_to, end_to_addr); 2498 __ movptr(r14_length, length); // save a copy of the length 2499 assert(length == count, ""); // else fix next line: 2500 __ negptr(count); // negate and test the length 2501 __ jcc(Assembler::notZero, L_load_element); 2502 2503 // Empty array: Nothing to do. 2504 __ xorptr(rax, rax); // return 0 on (trivial) success 2505 __ jmp(L_done); 2506 2507 // ======== begin loop ======== 2508 // (Loop is rotated; its entry is L_load_element.) 2509 // Loop control: 2510 // for (count = -count; count != 0; count++) 2511 // Base pointers src, dst are biased by 8*(count-1),to last element. 2512 __ align(OptoLoopAlignment); 2513 2514 __ BIND(L_store_element); 2515 __ store_heap_oop(to_element_addr, rax_oop, noreg, noreg, AS_RAW); // store the oop 2516 __ increment(count); // increment the count toward zero 2517 __ jcc(Assembler::zero, L_do_card_marks); 2518 2519 // ======== loop entry is here ======== 2520 __ BIND(L_load_element); 2521 __ load_heap_oop(rax_oop, from_element_addr, noreg, noreg, AS_RAW); // load the oop 2522 __ testptr(rax_oop, rax_oop); 2523 __ jcc(Assembler::zero, L_store_element); 2524 2525 __ load_klass(r11_klass, rax_oop, rscratch1);// query the object klass 2526 generate_type_check(r11_klass, ckoff, ckval, L_store_element); 2527 // ======== end loop ======== 2528 2529 // It was a real error; we must depend on the caller to finish the job. 2530 // Register rdx = -1 * number of *remaining* oops, r14 = *total* oops. 2531 // Emit GC store barriers for the oops we have copied (r14 + rdx), 2532 // and report their number to the caller. 2533 assert_different_registers(rax, r14_length, count, to, end_to, rcx, rscratch1); 2534 Label L_post_barrier; 2535 __ addptr(r14_length, count); // K = (original - remaining) oops 2536 __ movptr(rax, r14_length); // save the value 2537 __ notptr(rax); // report (-1^K) to caller (does not affect flags) 2538 __ jccb(Assembler::notZero, L_post_barrier); 2539 __ jmp(L_done); // K == 0, nothing was copied, skip post barrier 2540 2541 // Come here on success only. 2542 __ BIND(L_do_card_marks); 2543 __ xorptr(rax, rax); // return 0 on success 2544 2545 __ BIND(L_post_barrier); 2546 bs->arraycopy_epilogue(_masm, decorators, type, from, to, r14_length); 2547 2548 // Common exit point (success or failure). 2549 __ BIND(L_done); 2550 __ movptr(r13, Address(rsp, saved_r13_offset * wordSize)); 2551 __ movptr(r14, Address(rsp, saved_r14_offset * wordSize)); 2552 __ movptr(r10, Address(rsp, saved_r10_offset * wordSize)); 2553 restore_arg_regs(); 2554 inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr); // Update counter after rscratch1 is free 2555 __ leave(); // required for proper stackwalking of RuntimeStub frame 2556 __ ret(0); 2557 2558 return start; 2559 } 2560 2561 // 2562 // Generate 'unsafe' array copy stub 2563 // Though just as safe as the other stubs, it takes an unscaled 2564 // size_t argument instead of an element count. 2565 // 2566 // Input: 2567 // c_rarg0 - source array address 2568 // c_rarg1 - destination array address 2569 // c_rarg2 - byte count, treated as ssize_t, can be zero 2570 // 2571 // Examines the alignment of the operands and dispatches 2572 // to a long, int, short, or byte copy loop. 2573 // 2574 address generate_unsafe_copy(const char *name, 2575 address byte_copy_entry, address short_copy_entry, 2576 address int_copy_entry, address long_copy_entry) { 2577 2578 Label L_long_aligned, L_int_aligned, L_short_aligned; 2579 2580 // Input registers (before setup_arg_regs) 2581 const Register from = c_rarg0; // source array address 2582 const Register to = c_rarg1; // destination array address 2583 const Register size = c_rarg2; // byte count (size_t) 2584 2585 // Register used as a temp 2586 const Register bits = rax; // test copy of low bits 2587 2588 __ align(CodeEntryAlignment); 2589 StubCodeMark mark(this, "StubRoutines", name); 2590 address start = __ pc(); 2591 2592 __ enter(); // required for proper stackwalking of RuntimeStub frame 2593 2594 // bump this on entry, not on exit: 2595 inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr); 2596 2597 __ mov(bits, from); 2598 __ orptr(bits, to); 2599 __ orptr(bits, size); 2600 2601 __ testb(bits, BytesPerLong-1); 2602 __ jccb(Assembler::zero, L_long_aligned); 2603 2604 __ testb(bits, BytesPerInt-1); 2605 __ jccb(Assembler::zero, L_int_aligned); 2606 2607 __ testb(bits, BytesPerShort-1); 2608 __ jump_cc(Assembler::notZero, RuntimeAddress(byte_copy_entry)); 2609 2610 __ BIND(L_short_aligned); 2611 __ shrptr(size, LogBytesPerShort); // size => short_count 2612 __ jump(RuntimeAddress(short_copy_entry)); 2613 2614 __ BIND(L_int_aligned); 2615 __ shrptr(size, LogBytesPerInt); // size => int_count 2616 __ jump(RuntimeAddress(int_copy_entry)); 2617 2618 __ BIND(L_long_aligned); 2619 __ shrptr(size, LogBytesPerLong); // size => qword_count 2620 __ jump(RuntimeAddress(long_copy_entry)); 2621 2622 return start; 2623 } 2624 2625 // Perform range checks on the proposed arraycopy. 2626 // Kills temp, but nothing else. 2627 // Also, clean the sign bits of src_pos and dst_pos. 2628 void arraycopy_range_checks(Register src, // source array oop (c_rarg0) 2629 Register src_pos, // source position (c_rarg1) 2630 Register dst, // destination array oo (c_rarg2) 2631 Register dst_pos, // destination position (c_rarg3) 2632 Register length, 2633 Register temp, 2634 Label& L_failed) { 2635 BLOCK_COMMENT("arraycopy_range_checks:"); 2636 2637 // if (src_pos + length > arrayOop(src)->length()) FAIL; 2638 __ movl(temp, length); 2639 __ addl(temp, src_pos); // src_pos + length 2640 __ cmpl(temp, Address(src, arrayOopDesc::length_offset_in_bytes())); 2641 __ jcc(Assembler::above, L_failed); 2642 2643 // if (dst_pos + length > arrayOop(dst)->length()) FAIL; 2644 __ movl(temp, length); 2645 __ addl(temp, dst_pos); // dst_pos + length 2646 __ cmpl(temp, Address(dst, arrayOopDesc::length_offset_in_bytes())); 2647 __ jcc(Assembler::above, L_failed); 2648 2649 // Have to clean up high 32-bits of 'src_pos' and 'dst_pos'. 2650 // Move with sign extension can be used since they are positive. 2651 __ movslq(src_pos, src_pos); 2652 __ movslq(dst_pos, dst_pos); 2653 2654 BLOCK_COMMENT("arraycopy_range_checks done"); 2655 } 2656 2657 // 2658 // Generate generic array copy stubs 2659 // 2660 // Input: 2661 // c_rarg0 - src oop 2662 // c_rarg1 - src_pos (32-bits) 2663 // c_rarg2 - dst oop 2664 // c_rarg3 - dst_pos (32-bits) 2665 // not Win64 2666 // c_rarg4 - element count (32-bits) 2667 // Win64 2668 // rsp+40 - element count (32-bits) 2669 // 2670 // Output: 2671 // rax == 0 - success 2672 // rax == -1^K - failure, where K is partial transfer count 2673 // 2674 address generate_generic_copy(const char *name, 2675 address byte_copy_entry, address short_copy_entry, 2676 address int_copy_entry, address oop_copy_entry, 2677 address long_copy_entry, address checkcast_copy_entry) { 2678 2679 Label L_failed, L_failed_0, L_objArray; 2680 Label L_copy_bytes, L_copy_shorts, L_copy_ints, L_copy_longs; 2681 2682 // Input registers 2683 const Register src = c_rarg0; // source array oop 2684 const Register src_pos = c_rarg1; // source position 2685 const Register dst = c_rarg2; // destination array oop 2686 const Register dst_pos = c_rarg3; // destination position 2687 #ifndef _WIN64 2688 const Register length = c_rarg4; 2689 const Register rklass_tmp = r9; // load_klass 2690 #else 2691 const Address length(rsp, 6 * wordSize); // elements count is on stack on Win64 2692 const Register rklass_tmp = rdi; // load_klass 2693 #endif 2694 2695 { int modulus = CodeEntryAlignment; 2696 int target = modulus - 5; // 5 = sizeof jmp(L_failed) 2697 int advance = target - (__ offset() % modulus); 2698 if (advance < 0) advance += modulus; 2699 if (advance > 0) __ nop(advance); 2700 } 2701 StubCodeMark mark(this, "StubRoutines", name); 2702 2703 // Short-hop target to L_failed. Makes for denser prologue code. 2704 __ BIND(L_failed_0); 2705 __ jmp(L_failed); 2706 assert(__ offset() % CodeEntryAlignment == 0, "no further alignment needed"); 2707 2708 __ align(CodeEntryAlignment); 2709 address start = __ pc(); 2710 2711 __ enter(); // required for proper stackwalking of RuntimeStub frame 2712 2713 // bump this on entry, not on exit: 2714 inc_counter_np(SharedRuntime::_generic_array_copy_ctr); 2715 2716 //----------------------------------------------------------------------- 2717 // Assembler stub will be used for this call to arraycopy 2718 // if the following conditions are met: 2719 // 2720 // (1) src and dst must not be null. 2721 // (2) src_pos must not be negative. 2722 // (3) dst_pos must not be negative. 2723 // (4) length must not be negative. 2724 // (5) src klass and dst klass should be the same and not NULL. 2725 // (6) src and dst should be arrays. 2726 // (7) src_pos + length must not exceed length of src. 2727 // (8) dst_pos + length must not exceed length of dst. 2728 // 2729 2730 // if (src == NULL) return -1; 2731 __ testptr(src, src); // src oop 2732 size_t j1off = __ offset(); 2733 __ jccb(Assembler::zero, L_failed_0); 2734 2735 // if (src_pos < 0) return -1; 2736 __ testl(src_pos, src_pos); // src_pos (32-bits) 2737 __ jccb(Assembler::negative, L_failed_0); 2738 2739 // if (dst == NULL) return -1; 2740 __ testptr(dst, dst); // dst oop 2741 __ jccb(Assembler::zero, L_failed_0); 2742 2743 // if (dst_pos < 0) return -1; 2744 __ testl(dst_pos, dst_pos); // dst_pos (32-bits) 2745 size_t j4off = __ offset(); 2746 __ jccb(Assembler::negative, L_failed_0); 2747 2748 // The first four tests are very dense code, 2749 // but not quite dense enough to put four 2750 // jumps in a 16-byte instruction fetch buffer. 2751 // That's good, because some branch predicters 2752 // do not like jumps so close together. 2753 // Make sure of this. 2754 guarantee(((j1off ^ j4off) & ~15) != 0, "I$ line of 1st & 4th jumps"); 2755 2756 // registers used as temp 2757 const Register r11_length = r11; // elements count to copy 2758 const Register r10_src_klass = r10; // array klass 2759 2760 // if (length < 0) return -1; 2761 __ movl(r11_length, length); // length (elements count, 32-bits value) 2762 __ testl(r11_length, r11_length); 2763 __ jccb(Assembler::negative, L_failed_0); 2764 2765 __ load_klass(r10_src_klass, src, rklass_tmp); 2766 #ifdef ASSERT 2767 // assert(src->klass() != NULL); 2768 { 2769 BLOCK_COMMENT("assert klasses not null {"); 2770 Label L1, L2; 2771 __ testptr(r10_src_klass, r10_src_klass); 2772 __ jcc(Assembler::notZero, L2); // it is broken if klass is NULL 2773 __ bind(L1); 2774 __ stop("broken null klass"); 2775 __ bind(L2); 2776 __ load_klass(rax, dst, rklass_tmp); 2777 __ cmpq(rax, 0); 2778 __ jcc(Assembler::equal, L1); // this would be broken also 2779 BLOCK_COMMENT("} assert klasses not null done"); 2780 } 2781 #endif 2782 2783 // Load layout helper (32-bits) 2784 // 2785 // |array_tag| | header_size | element_type | |log2_element_size| 2786 // 32 30 24 16 8 2 0 2787 // 2788 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0 2789 // 2790 2791 const int lh_offset = in_bytes(Klass::layout_helper_offset()); 2792 2793 // Handle objArrays completely differently... 2794 const jint objArray_lh = Klass::array_layout_helper(T_OBJECT); 2795 __ cmpl(Address(r10_src_klass, lh_offset), objArray_lh); 2796 __ jcc(Assembler::equal, L_objArray); 2797 2798 // if (src->klass() != dst->klass()) return -1; 2799 __ load_klass(rax, dst, rklass_tmp); 2800 __ cmpq(r10_src_klass, rax); 2801 __ jcc(Assembler::notEqual, L_failed); 2802 2803 const Register rax_lh = rax; // layout helper 2804 __ movl(rax_lh, Address(r10_src_klass, lh_offset)); 2805 2806 // if (!src->is_Array()) return -1; 2807 __ cmpl(rax_lh, Klass::_lh_neutral_value); 2808 __ jcc(Assembler::greaterEqual, L_failed); 2809 2810 // At this point, it is known to be a typeArray (array_tag 0x3). 2811 #ifdef ASSERT 2812 { 2813 BLOCK_COMMENT("assert primitive array {"); 2814 Label L; 2815 __ cmpl(rax_lh, (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift)); 2816 __ jcc(Assembler::greaterEqual, L); 2817 __ stop("must be a primitive array"); 2818 __ bind(L); 2819 BLOCK_COMMENT("} assert primitive array done"); 2820 } 2821 #endif 2822 2823 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2824 r10, L_failed); 2825 2826 // TypeArrayKlass 2827 // 2828 // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize); 2829 // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize); 2830 // 2831 2832 const Register r10_offset = r10; // array offset 2833 const Register rax_elsize = rax_lh; // element size 2834 2835 __ movl(r10_offset, rax_lh); 2836 __ shrl(r10_offset, Klass::_lh_header_size_shift); 2837 __ andptr(r10_offset, Klass::_lh_header_size_mask); // array_offset 2838 __ addptr(src, r10_offset); // src array offset 2839 __ addptr(dst, r10_offset); // dst array offset 2840 BLOCK_COMMENT("choose copy loop based on element size"); 2841 __ andl(rax_lh, Klass::_lh_log2_element_size_mask); // rax_lh -> rax_elsize 2842 2843 // next registers should be set before the jump to corresponding stub 2844 const Register from = c_rarg0; // source array address 2845 const Register to = c_rarg1; // destination array address 2846 const Register count = c_rarg2; // elements count 2847 2848 // 'from', 'to', 'count' registers should be set in such order 2849 // since they are the same as 'src', 'src_pos', 'dst'. 2850 2851 __ BIND(L_copy_bytes); 2852 __ cmpl(rax_elsize, 0); 2853 __ jccb(Assembler::notEqual, L_copy_shorts); 2854 __ lea(from, Address(src, src_pos, Address::times_1, 0));// src_addr 2855 __ lea(to, Address(dst, dst_pos, Address::times_1, 0));// dst_addr 2856 __ movl2ptr(count, r11_length); // length 2857 __ jump(RuntimeAddress(byte_copy_entry)); 2858 2859 __ BIND(L_copy_shorts); 2860 __ cmpl(rax_elsize, LogBytesPerShort); 2861 __ jccb(Assembler::notEqual, L_copy_ints); 2862 __ lea(from, Address(src, src_pos, Address::times_2, 0));// src_addr 2863 __ lea(to, Address(dst, dst_pos, Address::times_2, 0));// dst_addr 2864 __ movl2ptr(count, r11_length); // length 2865 __ jump(RuntimeAddress(short_copy_entry)); 2866 2867 __ BIND(L_copy_ints); 2868 __ cmpl(rax_elsize, LogBytesPerInt); 2869 __ jccb(Assembler::notEqual, L_copy_longs); 2870 __ lea(from, Address(src, src_pos, Address::times_4, 0));// src_addr 2871 __ lea(to, Address(dst, dst_pos, Address::times_4, 0));// dst_addr 2872 __ movl2ptr(count, r11_length); // length 2873 __ jump(RuntimeAddress(int_copy_entry)); 2874 2875 __ BIND(L_copy_longs); 2876 #ifdef ASSERT 2877 { 2878 BLOCK_COMMENT("assert long copy {"); 2879 Label L; 2880 __ cmpl(rax_elsize, LogBytesPerLong); 2881 __ jcc(Assembler::equal, L); 2882 __ stop("must be long copy, but elsize is wrong"); 2883 __ bind(L); 2884 BLOCK_COMMENT("} assert long copy done"); 2885 } 2886 #endif 2887 __ lea(from, Address(src, src_pos, Address::times_8, 0));// src_addr 2888 __ lea(to, Address(dst, dst_pos, Address::times_8, 0));// dst_addr 2889 __ movl2ptr(count, r11_length); // length 2890 __ jump(RuntimeAddress(long_copy_entry)); 2891 2892 // ObjArrayKlass 2893 __ BIND(L_objArray); 2894 // live at this point: r10_src_klass, r11_length, src[_pos], dst[_pos] 2895 2896 Label L_plain_copy, L_checkcast_copy; 2897 // test array classes for subtyping 2898 __ load_klass(rax, dst, rklass_tmp); 2899 __ cmpq(r10_src_klass, rax); // usual case is exact equality 2900 __ jcc(Assembler::notEqual, L_checkcast_copy); 2901 2902 // Identically typed arrays can be copied without element-wise checks. 2903 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2904 r10, L_failed); 2905 2906 __ lea(from, Address(src, src_pos, TIMES_OOP, 2907 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // src_addr 2908 __ lea(to, Address(dst, dst_pos, TIMES_OOP, 2909 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // dst_addr 2910 __ movl2ptr(count, r11_length); // length 2911 __ BIND(L_plain_copy); 2912 __ jump(RuntimeAddress(oop_copy_entry)); 2913 2914 __ BIND(L_checkcast_copy); 2915 // live at this point: r10_src_klass, r11_length, rax (dst_klass) 2916 { 2917 // Before looking at dst.length, make sure dst is also an objArray. 2918 __ cmpl(Address(rax, lh_offset), objArray_lh); 2919 __ jcc(Assembler::notEqual, L_failed); 2920 2921 // It is safe to examine both src.length and dst.length. 2922 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2923 rax, L_failed); 2924 2925 const Register r11_dst_klass = r11; 2926 __ load_klass(r11_dst_klass, dst, rklass_tmp); // reload 2927 2928 // Marshal the base address arguments now, freeing registers. 2929 __ lea(from, Address(src, src_pos, TIMES_OOP, 2930 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 2931 __ lea(to, Address(dst, dst_pos, TIMES_OOP, 2932 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 2933 __ movl(count, length); // length (reloaded) 2934 Register sco_temp = c_rarg3; // this register is free now 2935 assert_different_registers(from, to, count, sco_temp, 2936 r11_dst_klass, r10_src_klass); 2937 assert_clean_int(count, sco_temp); 2938 2939 // Generate the type check. 2940 const int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2941 __ movl(sco_temp, Address(r11_dst_klass, sco_offset)); 2942 assert_clean_int(sco_temp, rax); 2943 generate_type_check(r10_src_klass, sco_temp, r11_dst_klass, L_plain_copy); 2944 2945 // Fetch destination element klass from the ObjArrayKlass header. 2946 int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset()); 2947 __ movptr(r11_dst_klass, Address(r11_dst_klass, ek_offset)); 2948 __ movl( sco_temp, Address(r11_dst_klass, sco_offset)); 2949 assert_clean_int(sco_temp, rax); 2950 2951 // the checkcast_copy loop needs two extra arguments: 2952 assert(c_rarg3 == sco_temp, "#3 already in place"); 2953 // Set up arguments for checkcast_copy_entry. 2954 setup_arg_regs(4); 2955 __ movptr(r8, r11_dst_klass); // dst.klass.element_klass, r8 is c_rarg4 on Linux/Solaris 2956 __ jump(RuntimeAddress(checkcast_copy_entry)); 2957 } 2958 2959 __ BIND(L_failed); 2960 __ xorptr(rax, rax); 2961 __ notptr(rax); // return -1 2962 __ leave(); // required for proper stackwalking of RuntimeStub frame 2963 __ ret(0); 2964 2965 return start; 2966 } 2967 2968 address generate_data_cache_writeback() { 2969 const Register src = c_rarg0; // source address 2970 2971 __ align(CodeEntryAlignment); 2972 2973 StubCodeMark mark(this, "StubRoutines", "_data_cache_writeback"); 2974 2975 address start = __ pc(); 2976 __ enter(); 2977 __ cache_wb(Address(src, 0)); 2978 __ leave(); 2979 __ ret(0); 2980 2981 return start; 2982 } 2983 2984 address generate_data_cache_writeback_sync() { 2985 const Register is_pre = c_rarg0; // pre or post sync 2986 2987 __ align(CodeEntryAlignment); 2988 2989 StubCodeMark mark(this, "StubRoutines", "_data_cache_writeback_sync"); 2990 2991 // pre wbsync is a no-op 2992 // post wbsync translates to an sfence 2993 2994 Label skip; 2995 address start = __ pc(); 2996 __ enter(); 2997 __ cmpl(is_pre, 0); 2998 __ jcc(Assembler::notEqual, skip); 2999 __ cache_wbsync(false); 3000 __ bind(skip); 3001 __ leave(); 3002 __ ret(0); 3003 3004 return start; 3005 } 3006 3007 void generate_arraycopy_stubs() { 3008 address entry; 3009 address entry_jbyte_arraycopy; 3010 address entry_jshort_arraycopy; 3011 address entry_jint_arraycopy; 3012 address entry_oop_arraycopy; 3013 address entry_jlong_arraycopy; 3014 address entry_checkcast_arraycopy; 3015 3016 StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, &entry, 3017 "jbyte_disjoint_arraycopy"); 3018 StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, entry, &entry_jbyte_arraycopy, 3019 "jbyte_arraycopy"); 3020 3021 StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, &entry, 3022 "jshort_disjoint_arraycopy"); 3023 StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, entry, &entry_jshort_arraycopy, 3024 "jshort_arraycopy"); 3025 3026 StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, false, &entry, 3027 "jint_disjoint_arraycopy"); 3028 StubRoutines::_jint_arraycopy = generate_conjoint_int_oop_copy(false, false, entry, 3029 &entry_jint_arraycopy, "jint_arraycopy"); 3030 3031 StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, false, &entry, 3032 "jlong_disjoint_arraycopy"); 3033 StubRoutines::_jlong_arraycopy = generate_conjoint_long_oop_copy(false, false, entry, 3034 &entry_jlong_arraycopy, "jlong_arraycopy"); 3035 3036 3037 if (UseCompressedOops) { 3038 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, true, &entry, 3039 "oop_disjoint_arraycopy"); 3040 StubRoutines::_oop_arraycopy = generate_conjoint_int_oop_copy(false, true, entry, 3041 &entry_oop_arraycopy, "oop_arraycopy"); 3042 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_int_oop_copy(false, true, &entry, 3043 "oop_disjoint_arraycopy_uninit", 3044 /*dest_uninitialized*/true); 3045 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_int_oop_copy(false, true, entry, 3046 NULL, "oop_arraycopy_uninit", 3047 /*dest_uninitialized*/true); 3048 } else { 3049 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, true, &entry, 3050 "oop_disjoint_arraycopy"); 3051 StubRoutines::_oop_arraycopy = generate_conjoint_long_oop_copy(false, true, entry, 3052 &entry_oop_arraycopy, "oop_arraycopy"); 3053 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_long_oop_copy(false, true, &entry, 3054 "oop_disjoint_arraycopy_uninit", 3055 /*dest_uninitialized*/true); 3056 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_long_oop_copy(false, true, entry, 3057 NULL, "oop_arraycopy_uninit", 3058 /*dest_uninitialized*/true); 3059 } 3060 3061 StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy); 3062 StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", NULL, 3063 /*dest_uninitialized*/true); 3064 3065 StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy", 3066 entry_jbyte_arraycopy, 3067 entry_jshort_arraycopy, 3068 entry_jint_arraycopy, 3069 entry_jlong_arraycopy); 3070 StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy", 3071 entry_jbyte_arraycopy, 3072 entry_jshort_arraycopy, 3073 entry_jint_arraycopy, 3074 entry_oop_arraycopy, 3075 entry_jlong_arraycopy, 3076 entry_checkcast_arraycopy); 3077 3078 StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill"); 3079 StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill"); 3080 StubRoutines::_jint_fill = generate_fill(T_INT, false, "jint_fill"); 3081 StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill"); 3082 StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill"); 3083 StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill"); 3084 3085 // We don't generate specialized code for HeapWord-aligned source 3086 // arrays, so just use the code we've already generated 3087 StubRoutines::_arrayof_jbyte_disjoint_arraycopy = StubRoutines::_jbyte_disjoint_arraycopy; 3088 StubRoutines::_arrayof_jbyte_arraycopy = StubRoutines::_jbyte_arraycopy; 3089 3090 StubRoutines::_arrayof_jshort_disjoint_arraycopy = StubRoutines::_jshort_disjoint_arraycopy; 3091 StubRoutines::_arrayof_jshort_arraycopy = StubRoutines::_jshort_arraycopy; 3092 3093 StubRoutines::_arrayof_jint_disjoint_arraycopy = StubRoutines::_jint_disjoint_arraycopy; 3094 StubRoutines::_arrayof_jint_arraycopy = StubRoutines::_jint_arraycopy; 3095 3096 StubRoutines::_arrayof_jlong_disjoint_arraycopy = StubRoutines::_jlong_disjoint_arraycopy; 3097 StubRoutines::_arrayof_jlong_arraycopy = StubRoutines::_jlong_arraycopy; 3098 3099 StubRoutines::_arrayof_oop_disjoint_arraycopy = StubRoutines::_oop_disjoint_arraycopy; 3100 StubRoutines::_arrayof_oop_arraycopy = StubRoutines::_oop_arraycopy; 3101 3102 StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = StubRoutines::_oop_disjoint_arraycopy_uninit; 3103 StubRoutines::_arrayof_oop_arraycopy_uninit = StubRoutines::_oop_arraycopy_uninit; 3104 } 3105 3106 // AES intrinsic stubs 3107 enum {AESBlockSize = 16}; 3108 3109 address generate_key_shuffle_mask() { 3110 __ align(16); 3111 StubCodeMark mark(this, "StubRoutines", "key_shuffle_mask"); 3112 address start = __ pc(); 3113 __ emit_data64( 0x0405060700010203, relocInfo::none ); 3114 __ emit_data64( 0x0c0d0e0f08090a0b, relocInfo::none ); 3115 return start; 3116 } 3117 3118 address generate_counter_shuffle_mask() { 3119 __ align(16); 3120 StubCodeMark mark(this, "StubRoutines", "counter_shuffle_mask"); 3121 address start = __ pc(); 3122 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none); 3123 __ emit_data64(0x0001020304050607, relocInfo::none); 3124 return start; 3125 } 3126 3127 // Utility routine for loading a 128-bit key word in little endian format 3128 // can optionally specify that the shuffle mask is already in an xmmregister 3129 void load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask=NULL) { 3130 __ movdqu(xmmdst, Address(key, offset)); 3131 if (xmm_shuf_mask != NULL) { 3132 __ pshufb(xmmdst, xmm_shuf_mask); 3133 } else { 3134 __ pshufb(xmmdst, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3135 } 3136 } 3137 3138 // Utility routine for increase 128bit counter (iv in CTR mode) 3139 void inc_counter(Register reg, XMMRegister xmmdst, int inc_delta, Label& next_block) { 3140 __ pextrq(reg, xmmdst, 0x0); 3141 __ addq(reg, inc_delta); 3142 __ pinsrq(xmmdst, reg, 0x0); 3143 __ jcc(Assembler::carryClear, next_block); // jump if no carry 3144 __ pextrq(reg, xmmdst, 0x01); // Carry 3145 __ addq(reg, 0x01); 3146 __ pinsrq(xmmdst, reg, 0x01); //Carry end 3147 __ BIND(next_block); // next instruction 3148 } 3149 3150 // Arguments: 3151 // 3152 // Inputs: 3153 // c_rarg0 - source byte array address 3154 // c_rarg1 - destination byte array address 3155 // c_rarg2 - K (key) in little endian int array 3156 // 3157 address generate_aescrypt_encryptBlock() { 3158 assert(UseAES, "need AES instructions and misaligned SSE support"); 3159 __ align(CodeEntryAlignment); 3160 StubCodeMark mark(this, "StubRoutines", "aescrypt_encryptBlock"); 3161 Label L_doLast; 3162 address start = __ pc(); 3163 3164 const Register from = c_rarg0; // source array address 3165 const Register to = c_rarg1; // destination array address 3166 const Register key = c_rarg2; // key array address 3167 const Register keylen = rax; 3168 3169 const XMMRegister xmm_result = xmm0; 3170 const XMMRegister xmm_key_shuf_mask = xmm1; 3171 // On win64 xmm6-xmm15 must be preserved so don't use them. 3172 const XMMRegister xmm_temp1 = xmm2; 3173 const XMMRegister xmm_temp2 = xmm3; 3174 const XMMRegister xmm_temp3 = xmm4; 3175 const XMMRegister xmm_temp4 = xmm5; 3176 3177 __ enter(); // required for proper stackwalking of RuntimeStub frame 3178 3179 // keylen could be only {11, 13, 15} * 4 = {44, 52, 60} 3180 __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3181 3182 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3183 __ movdqu(xmm_result, Address(from, 0)); // get 16 bytes of input 3184 3185 // For encryption, the java expanded key ordering is just what we need 3186 // we don't know if the key is aligned, hence not using load-execute form 3187 3188 load_key(xmm_temp1, key, 0x00, xmm_key_shuf_mask); 3189 __ pxor(xmm_result, xmm_temp1); 3190 3191 load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask); 3192 load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask); 3193 load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask); 3194 load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask); 3195 3196 __ aesenc(xmm_result, xmm_temp1); 3197 __ aesenc(xmm_result, xmm_temp2); 3198 __ aesenc(xmm_result, xmm_temp3); 3199 __ aesenc(xmm_result, xmm_temp4); 3200 3201 load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask); 3202 load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask); 3203 load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask); 3204 load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask); 3205 3206 __ aesenc(xmm_result, xmm_temp1); 3207 __ aesenc(xmm_result, xmm_temp2); 3208 __ aesenc(xmm_result, xmm_temp3); 3209 __ aesenc(xmm_result, xmm_temp4); 3210 3211 load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask); 3212 load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask); 3213 3214 __ cmpl(keylen, 44); 3215 __ jccb(Assembler::equal, L_doLast); 3216 3217 __ aesenc(xmm_result, xmm_temp1); 3218 __ aesenc(xmm_result, xmm_temp2); 3219 3220 load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask); 3221 load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask); 3222 3223 __ cmpl(keylen, 52); 3224 __ jccb(Assembler::equal, L_doLast); 3225 3226 __ aesenc(xmm_result, xmm_temp1); 3227 __ aesenc(xmm_result, xmm_temp2); 3228 3229 load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask); 3230 load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask); 3231 3232 __ BIND(L_doLast); 3233 __ aesenc(xmm_result, xmm_temp1); 3234 __ aesenclast(xmm_result, xmm_temp2); 3235 __ movdqu(Address(to, 0), xmm_result); // store the result 3236 __ xorptr(rax, rax); // return 0 3237 __ leave(); // required for proper stackwalking of RuntimeStub frame 3238 __ ret(0); 3239 3240 return start; 3241 } 3242 3243 3244 // Arguments: 3245 // 3246 // Inputs: 3247 // c_rarg0 - source byte array address 3248 // c_rarg1 - destination byte array address 3249 // c_rarg2 - K (key) in little endian int array 3250 // 3251 address generate_aescrypt_decryptBlock() { 3252 assert(UseAES, "need AES instructions and misaligned SSE support"); 3253 __ align(CodeEntryAlignment); 3254 StubCodeMark mark(this, "StubRoutines", "aescrypt_decryptBlock"); 3255 Label L_doLast; 3256 address start = __ pc(); 3257 3258 const Register from = c_rarg0; // source array address 3259 const Register to = c_rarg1; // destination array address 3260 const Register key = c_rarg2; // key array address 3261 const Register keylen = rax; 3262 3263 const XMMRegister xmm_result = xmm0; 3264 const XMMRegister xmm_key_shuf_mask = xmm1; 3265 // On win64 xmm6-xmm15 must be preserved so don't use them. 3266 const XMMRegister xmm_temp1 = xmm2; 3267 const XMMRegister xmm_temp2 = xmm3; 3268 const XMMRegister xmm_temp3 = xmm4; 3269 const XMMRegister xmm_temp4 = xmm5; 3270 3271 __ enter(); // required for proper stackwalking of RuntimeStub frame 3272 3273 // keylen could be only {11, 13, 15} * 4 = {44, 52, 60} 3274 __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3275 3276 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3277 __ movdqu(xmm_result, Address(from, 0)); 3278 3279 // for decryption java expanded key ordering is rotated one position from what we want 3280 // so we start from 0x10 here and hit 0x00 last 3281 // we don't know if the key is aligned, hence not using load-execute form 3282 load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask); 3283 load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask); 3284 load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask); 3285 load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask); 3286 3287 __ pxor (xmm_result, xmm_temp1); 3288 __ aesdec(xmm_result, xmm_temp2); 3289 __ aesdec(xmm_result, xmm_temp3); 3290 __ aesdec(xmm_result, xmm_temp4); 3291 3292 load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask); 3293 load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask); 3294 load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask); 3295 load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask); 3296 3297 __ aesdec(xmm_result, xmm_temp1); 3298 __ aesdec(xmm_result, xmm_temp2); 3299 __ aesdec(xmm_result, xmm_temp3); 3300 __ aesdec(xmm_result, xmm_temp4); 3301 3302 load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask); 3303 load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask); 3304 load_key(xmm_temp3, key, 0x00, xmm_key_shuf_mask); 3305 3306 __ cmpl(keylen, 44); 3307 __ jccb(Assembler::equal, L_doLast); 3308 3309 __ aesdec(xmm_result, xmm_temp1); 3310 __ aesdec(xmm_result, xmm_temp2); 3311 3312 load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask); 3313 load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask); 3314 3315 __ cmpl(keylen, 52); 3316 __ jccb(Assembler::equal, L_doLast); 3317 3318 __ aesdec(xmm_result, xmm_temp1); 3319 __ aesdec(xmm_result, xmm_temp2); 3320 3321 load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask); 3322 load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask); 3323 3324 __ BIND(L_doLast); 3325 __ aesdec(xmm_result, xmm_temp1); 3326 __ aesdec(xmm_result, xmm_temp2); 3327 3328 // for decryption the aesdeclast operation is always on key+0x00 3329 __ aesdeclast(xmm_result, xmm_temp3); 3330 __ movdqu(Address(to, 0), xmm_result); // store the result 3331 __ xorptr(rax, rax); // return 0 3332 __ leave(); // required for proper stackwalking of RuntimeStub frame 3333 __ ret(0); 3334 3335 return start; 3336 } 3337 3338 3339 // Arguments: 3340 // 3341 // Inputs: 3342 // c_rarg0 - source byte array address 3343 // c_rarg1 - destination byte array address 3344 // c_rarg2 - K (key) in little endian int array 3345 // c_rarg3 - r vector byte array address 3346 // c_rarg4 - input length 3347 // 3348 // Output: 3349 // rax - input length 3350 // 3351 address generate_cipherBlockChaining_encryptAESCrypt() { 3352 assert(UseAES, "need AES instructions and misaligned SSE support"); 3353 __ align(CodeEntryAlignment); 3354 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_encryptAESCrypt"); 3355 address start = __ pc(); 3356 3357 Label L_exit, L_key_192_256, L_key_256, L_loopTop_128, L_loopTop_192, L_loopTop_256; 3358 const Register from = c_rarg0; // source array address 3359 const Register to = c_rarg1; // destination array address 3360 const Register key = c_rarg2; // key array address 3361 const Register rvec = c_rarg3; // r byte array initialized from initvector array address 3362 // and left with the results of the last encryption block 3363 #ifndef _WIN64 3364 const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16) 3365 #else 3366 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 3367 const Register len_reg = r11; // pick the volatile windows register 3368 #endif 3369 const Register pos = rax; 3370 3371 // xmm register assignments for the loops below 3372 const XMMRegister xmm_result = xmm0; 3373 const XMMRegister xmm_temp = xmm1; 3374 // keys 0-10 preloaded into xmm2-xmm12 3375 const int XMM_REG_NUM_KEY_FIRST = 2; 3376 const int XMM_REG_NUM_KEY_LAST = 15; 3377 const XMMRegister xmm_key0 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST); 3378 const XMMRegister xmm_key10 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+10); 3379 const XMMRegister xmm_key11 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+11); 3380 const XMMRegister xmm_key12 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+12); 3381 const XMMRegister xmm_key13 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+13); 3382 3383 __ enter(); // required for proper stackwalking of RuntimeStub frame 3384 3385 #ifdef _WIN64 3386 // on win64, fill len_reg from stack position 3387 __ movl(len_reg, len_mem); 3388 #else 3389 __ push(len_reg); // Save 3390 #endif 3391 3392 const XMMRegister xmm_key_shuf_mask = xmm_temp; // used temporarily to swap key bytes up front 3393 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3394 // load up xmm regs xmm2 thru xmm12 with key 0x00 - 0xa0 3395 for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x00; rnum <= XMM_REG_NUM_KEY_FIRST+10; rnum++) { 3396 load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask); 3397 offset += 0x10; 3398 } 3399 __ movdqu(xmm_result, Address(rvec, 0x00)); // initialize xmm_result with r vec 3400 3401 // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256)) 3402 __ movl(rax, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3403 __ cmpl(rax, 44); 3404 __ jcc(Assembler::notEqual, L_key_192_256); 3405 3406 // 128 bit code follows here 3407 __ movptr(pos, 0); 3408 __ align(OptoLoopAlignment); 3409 3410 __ BIND(L_loopTop_128); 3411 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3412 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3413 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3414 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 9; rnum++) { 3415 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3416 } 3417 __ aesenclast(xmm_result, xmm_key10); 3418 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3419 // no need to store r to memory until we exit 3420 __ addptr(pos, AESBlockSize); 3421 __ subptr(len_reg, AESBlockSize); 3422 __ jcc(Assembler::notEqual, L_loopTop_128); 3423 3424 __ BIND(L_exit); 3425 __ movdqu(Address(rvec, 0), xmm_result); // final value of r stored in rvec of CipherBlockChaining object 3426 3427 #ifdef _WIN64 3428 __ movl(rax, len_mem); 3429 #else 3430 __ pop(rax); // return length 3431 #endif 3432 __ leave(); // required for proper stackwalking of RuntimeStub frame 3433 __ ret(0); 3434 3435 __ BIND(L_key_192_256); 3436 // here rax = len in ints of AESCrypt.KLE array (52=192, or 60=256) 3437 load_key(xmm_key11, key, 0xb0, xmm_key_shuf_mask); 3438 load_key(xmm_key12, key, 0xc0, xmm_key_shuf_mask); 3439 __ cmpl(rax, 52); 3440 __ jcc(Assembler::notEqual, L_key_256); 3441 3442 // 192-bit code follows here (could be changed to use more xmm registers) 3443 __ movptr(pos, 0); 3444 __ align(OptoLoopAlignment); 3445 3446 __ BIND(L_loopTop_192); 3447 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3448 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3449 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3450 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 11; rnum++) { 3451 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3452 } 3453 __ aesenclast(xmm_result, xmm_key12); 3454 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3455 // no need to store r to memory until we exit 3456 __ addptr(pos, AESBlockSize); 3457 __ subptr(len_reg, AESBlockSize); 3458 __ jcc(Assembler::notEqual, L_loopTop_192); 3459 __ jmp(L_exit); 3460 3461 __ BIND(L_key_256); 3462 // 256-bit code follows here (could be changed to use more xmm registers) 3463 load_key(xmm_key13, key, 0xd0, xmm_key_shuf_mask); 3464 __ movptr(pos, 0); 3465 __ align(OptoLoopAlignment); 3466 3467 __ BIND(L_loopTop_256); 3468 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3469 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3470 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3471 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 13; rnum++) { 3472 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3473 } 3474 load_key(xmm_temp, key, 0xe0); 3475 __ aesenclast(xmm_result, xmm_temp); 3476 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3477 // no need to store r to memory until we exit 3478 __ addptr(pos, AESBlockSize); 3479 __ subptr(len_reg, AESBlockSize); 3480 __ jcc(Assembler::notEqual, L_loopTop_256); 3481 __ jmp(L_exit); 3482 3483 return start; 3484 } 3485 3486 // Safefetch stubs. 3487 void generate_safefetch(const char* name, int size, address* entry, 3488 address* fault_pc, address* continuation_pc) { 3489 // safefetch signatures: 3490 // int SafeFetch32(int* adr, int errValue); 3491 // intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue); 3492 // 3493 // arguments: 3494 // c_rarg0 = adr 3495 // c_rarg1 = errValue 3496 // 3497 // result: 3498 // PPC_RET = *adr or errValue 3499 3500 StubCodeMark mark(this, "StubRoutines", name); 3501 3502 // Entry point, pc or function descriptor. 3503 *entry = __ pc(); 3504 3505 // Load *adr into c_rarg1, may fault. 3506 *fault_pc = __ pc(); 3507 switch (size) { 3508 case 4: 3509 // int32_t 3510 __ movl(c_rarg1, Address(c_rarg0, 0)); 3511 break; 3512 case 8: 3513 // int64_t 3514 __ movq(c_rarg1, Address(c_rarg0, 0)); 3515 break; 3516 default: 3517 ShouldNotReachHere(); 3518 } 3519 3520 // return errValue or *adr 3521 *continuation_pc = __ pc(); 3522 __ movq(rax, c_rarg1); 3523 __ ret(0); 3524 } 3525 3526 // This is a version of CBC/AES Decrypt which does 4 blocks in a loop at a time 3527 // to hide instruction latency 3528 // 3529 // Arguments: 3530 // 3531 // Inputs: 3532 // c_rarg0 - source byte array address 3533 // c_rarg1 - destination byte array address 3534 // c_rarg2 - K (key) in little endian int array 3535 // c_rarg3 - r vector byte array address 3536 // c_rarg4 - input length 3537 // 3538 // Output: 3539 // rax - input length 3540 // 3541 address generate_cipherBlockChaining_decryptAESCrypt_Parallel() { 3542 assert(UseAES, "need AES instructions and misaligned SSE support"); 3543 __ align(CodeEntryAlignment); 3544 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt"); 3545 address start = __ pc(); 3546 3547 const Register from = c_rarg0; // source array address 3548 const Register to = c_rarg1; // destination array address 3549 const Register key = c_rarg2; // key array address 3550 const Register rvec = c_rarg3; // r byte array initialized from initvector array address 3551 // and left with the results of the last encryption block 3552 #ifndef _WIN64 3553 const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16) 3554 #else 3555 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 3556 const Register len_reg = r11; // pick the volatile windows register 3557 #endif 3558 const Register pos = rax; 3559 3560 const int PARALLEL_FACTOR = 4; 3561 const int ROUNDS[3] = { 10, 12, 14 }; // aes rounds for key128, key192, key256 3562 3563 Label L_exit; 3564 Label L_singleBlock_loopTopHead[3]; // 128, 192, 256 3565 Label L_singleBlock_loopTopHead2[3]; // 128, 192, 256 3566 Label L_singleBlock_loopTop[3]; // 128, 192, 256 3567 Label L_multiBlock_loopTopHead[3]; // 128, 192, 256 3568 Label L_multiBlock_loopTop[3]; // 128, 192, 256 3569 3570 // keys 0-10 preloaded into xmm5-xmm15 3571 const int XMM_REG_NUM_KEY_FIRST = 5; 3572 const int XMM_REG_NUM_KEY_LAST = 15; 3573 const XMMRegister xmm_key_first = as_XMMRegister(XMM_REG_NUM_KEY_FIRST); 3574 const XMMRegister xmm_key_last = as_XMMRegister(XMM_REG_NUM_KEY_LAST); 3575 3576 __ enter(); // required for proper stackwalking of RuntimeStub frame 3577 3578 #ifdef _WIN64 3579 // on win64, fill len_reg from stack position 3580 __ movl(len_reg, len_mem); 3581 #else 3582 __ push(len_reg); // Save 3583 #endif 3584 __ push(rbx); 3585 // the java expanded key ordering is rotated one position from what we want 3586 // so we start from 0x10 here and hit 0x00 last 3587 const XMMRegister xmm_key_shuf_mask = xmm1; // used temporarily to swap key bytes up front 3588 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3589 // load up xmm regs 5 thru 15 with key 0x10 - 0xa0 - 0x00 3590 for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x10; rnum < XMM_REG_NUM_KEY_LAST; rnum++) { 3591 load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask); 3592 offset += 0x10; 3593 } 3594 load_key(xmm_key_last, key, 0x00, xmm_key_shuf_mask); 3595 3596 const XMMRegister xmm_prev_block_cipher = xmm1; // holds cipher of previous block 3597 3598 // registers holding the four results in the parallelized loop 3599 const XMMRegister xmm_result0 = xmm0; 3600 const XMMRegister xmm_result1 = xmm2; 3601 const XMMRegister xmm_result2 = xmm3; 3602 const XMMRegister xmm_result3 = xmm4; 3603 3604 __ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // initialize with initial rvec 3605 3606 __ xorptr(pos, pos); 3607 3608 // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256)) 3609 __ movl(rbx, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3610 __ cmpl(rbx, 52); 3611 __ jcc(Assembler::equal, L_multiBlock_loopTopHead[1]); 3612 __ cmpl(rbx, 60); 3613 __ jcc(Assembler::equal, L_multiBlock_loopTopHead[2]); 3614 3615 #define DoFour(opc, src_reg) \ 3616 __ opc(xmm_result0, src_reg); \ 3617 __ opc(xmm_result1, src_reg); \ 3618 __ opc(xmm_result2, src_reg); \ 3619 __ opc(xmm_result3, src_reg); \ 3620 3621 for (int k = 0; k < 3; ++k) { 3622 __ BIND(L_multiBlock_loopTopHead[k]); 3623 if (k != 0) { 3624 __ cmpptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // see if at least 4 blocks left 3625 __ jcc(Assembler::less, L_singleBlock_loopTopHead2[k]); 3626 } 3627 if (k == 1) { 3628 __ subptr(rsp, 6 * wordSize); 3629 __ movdqu(Address(rsp, 0), xmm15); //save last_key from xmm15 3630 load_key(xmm15, key, 0xb0); // 0xb0; 192-bit key goes up to 0xc0 3631 __ movdqu(Address(rsp, 2 * wordSize), xmm15); 3632 load_key(xmm1, key, 0xc0); // 0xc0; 3633 __ movdqu(Address(rsp, 4 * wordSize), xmm1); 3634 } else if (k == 2) { 3635 __ subptr(rsp, 10 * wordSize); 3636 __ movdqu(Address(rsp, 0), xmm15); //save last_key from xmm15 3637 load_key(xmm15, key, 0xd0); // 0xd0; 256-bit key goes upto 0xe0 3638 __ movdqu(Address(rsp, 6 * wordSize), xmm15); 3639 load_key(xmm1, key, 0xe0); // 0xe0; 3640 __ movdqu(Address(rsp, 8 * wordSize), xmm1); 3641 load_key(xmm15, key, 0xb0); // 0xb0; 3642 __ movdqu(Address(rsp, 2 * wordSize), xmm15); 3643 load_key(xmm1, key, 0xc0); // 0xc0; 3644 __ movdqu(Address(rsp, 4 * wordSize), xmm1); 3645 } 3646 __ align(OptoLoopAlignment); 3647 __ BIND(L_multiBlock_loopTop[k]); 3648 __ cmpptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // see if at least 4 blocks left 3649 __ jcc(Assembler::less, L_singleBlock_loopTopHead[k]); 3650 3651 if (k != 0) { 3652 __ movdqu(xmm15, Address(rsp, 2 * wordSize)); 3653 __ movdqu(xmm1, Address(rsp, 4 * wordSize)); 3654 } 3655 3656 __ movdqu(xmm_result0, Address(from, pos, Address::times_1, 0 * AESBlockSize)); // get next 4 blocks into xmmresult registers 3657 __ movdqu(xmm_result1, Address(from, pos, Address::times_1, 1 * AESBlockSize)); 3658 __ movdqu(xmm_result2, Address(from, pos, Address::times_1, 2 * AESBlockSize)); 3659 __ movdqu(xmm_result3, Address(from, pos, Address::times_1, 3 * AESBlockSize)); 3660 3661 DoFour(pxor, xmm_key_first); 3662 if (k == 0) { 3663 for (int rnum = 1; rnum < ROUNDS[k]; rnum++) { 3664 DoFour(aesdec, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3665 } 3666 DoFour(aesdeclast, xmm_key_last); 3667 } else if (k == 1) { 3668 for (int rnum = 1; rnum <= ROUNDS[k]-2; rnum++) { 3669 DoFour(aesdec, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3670 } 3671 __ movdqu(xmm_key_last, Address(rsp, 0)); // xmm15 needs to be loaded again. 3672 DoFour(aesdec, xmm1); // key : 0xc0 3673 __ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // xmm1 needs to be loaded again 3674 DoFour(aesdeclast, xmm_key_last); 3675 } else if (k == 2) { 3676 for (int rnum = 1; rnum <= ROUNDS[k] - 4; rnum++) { 3677 DoFour(aesdec, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3678 } 3679 DoFour(aesdec, xmm1); // key : 0xc0 3680 __ movdqu(xmm15, Address(rsp, 6 * wordSize)); 3681 __ movdqu(xmm1, Address(rsp, 8 * wordSize)); 3682 DoFour(aesdec, xmm15); // key : 0xd0 3683 __ movdqu(xmm_key_last, Address(rsp, 0)); // xmm15 needs to be loaded again. 3684 DoFour(aesdec, xmm1); // key : 0xe0 3685 __ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // xmm1 needs to be loaded again 3686 DoFour(aesdeclast, xmm_key_last); 3687 } 3688 3689 // for each result, xor with the r vector of previous cipher block 3690 __ pxor(xmm_result0, xmm_prev_block_cipher); 3691 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 0 * AESBlockSize)); 3692 __ pxor(xmm_result1, xmm_prev_block_cipher); 3693 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 1 * AESBlockSize)); 3694 __ pxor(xmm_result2, xmm_prev_block_cipher); 3695 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 2 * AESBlockSize)); 3696 __ pxor(xmm_result3, xmm_prev_block_cipher); 3697 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 3 * AESBlockSize)); // this will carry over to next set of blocks 3698 if (k != 0) { 3699 __ movdqu(Address(rvec, 0x00), xmm_prev_block_cipher); 3700 } 3701 3702 __ movdqu(Address(to, pos, Address::times_1, 0 * AESBlockSize), xmm_result0); // store 4 results into the next 64 bytes of output 3703 __ movdqu(Address(to, pos, Address::times_1, 1 * AESBlockSize), xmm_result1); 3704 __ movdqu(Address(to, pos, Address::times_1, 2 * AESBlockSize), xmm_result2); 3705 __ movdqu(Address(to, pos, Address::times_1, 3 * AESBlockSize), xmm_result3); 3706 3707 __ addptr(pos, PARALLEL_FACTOR * AESBlockSize); 3708 __ subptr(len_reg, PARALLEL_FACTOR * AESBlockSize); 3709 __ jmp(L_multiBlock_loopTop[k]); 3710 3711 // registers used in the non-parallelized loops 3712 // xmm register assignments for the loops below 3713 const XMMRegister xmm_result = xmm0; 3714 const XMMRegister xmm_prev_block_cipher_save = xmm2; 3715 const XMMRegister xmm_key11 = xmm3; 3716 const XMMRegister xmm_key12 = xmm4; 3717 const XMMRegister key_tmp = xmm4; 3718 3719 __ BIND(L_singleBlock_loopTopHead[k]); 3720 if (k == 1) { 3721 __ addptr(rsp, 6 * wordSize); 3722 } else if (k == 2) { 3723 __ addptr(rsp, 10 * wordSize); 3724 } 3725 __ cmpptr(len_reg, 0); // any blocks left?? 3726 __ jcc(Assembler::equal, L_exit); 3727 __ BIND(L_singleBlock_loopTopHead2[k]); 3728 if (k == 1) { 3729 load_key(xmm_key11, key, 0xb0); // 0xb0; 192-bit key goes upto 0xc0 3730 load_key(xmm_key12, key, 0xc0); // 0xc0; 192-bit key goes upto 0xc0 3731 } 3732 if (k == 2) { 3733 load_key(xmm_key11, key, 0xb0); // 0xb0; 256-bit key goes upto 0xe0 3734 } 3735 __ align(OptoLoopAlignment); 3736 __ BIND(L_singleBlock_loopTop[k]); 3737 __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of cipher input 3738 __ movdqa(xmm_prev_block_cipher_save, xmm_result); // save for next r vector 3739 __ pxor(xmm_result, xmm_key_first); // do the aes dec rounds 3740 for (int rnum = 1; rnum <= 9 ; rnum++) { 3741 __ aesdec(xmm_result, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3742 } 3743 if (k == 1) { 3744 __ aesdec(xmm_result, xmm_key11); 3745 __ aesdec(xmm_result, xmm_key12); 3746 } 3747 if (k == 2) { 3748 __ aesdec(xmm_result, xmm_key11); 3749 load_key(key_tmp, key, 0xc0); 3750 __ aesdec(xmm_result, key_tmp); 3751 load_key(key_tmp, key, 0xd0); 3752 __ aesdec(xmm_result, key_tmp); 3753 load_key(key_tmp, key, 0xe0); 3754 __ aesdec(xmm_result, key_tmp); 3755 } 3756 3757 __ aesdeclast(xmm_result, xmm_key_last); // xmm15 always came from key+0 3758 __ pxor(xmm_result, xmm_prev_block_cipher); // xor with the current r vector 3759 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3760 // no need to store r to memory until we exit 3761 __ movdqa(xmm_prev_block_cipher, xmm_prev_block_cipher_save); // set up next r vector with cipher input from this block 3762 __ addptr(pos, AESBlockSize); 3763 __ subptr(len_reg, AESBlockSize); 3764 __ jcc(Assembler::notEqual, L_singleBlock_loopTop[k]); 3765 if (k != 2) { 3766 __ jmp(L_exit); 3767 } 3768 } //for 128/192/256 3769 3770 __ BIND(L_exit); 3771 __ movdqu(Address(rvec, 0), xmm_prev_block_cipher); // final value of r stored in rvec of CipherBlockChaining object 3772 __ pop(rbx); 3773 #ifdef _WIN64 3774 __ movl(rax, len_mem); 3775 #else 3776 __ pop(rax); // return length 3777 #endif 3778 __ leave(); // required for proper stackwalking of RuntimeStub frame 3779 __ ret(0); 3780 return start; 3781 } 3782 3783 address generate_electronicCodeBook_encryptAESCrypt() { 3784 __ align(CodeEntryAlignment); 3785 StubCodeMark mark(this, "StubRoutines", "electronicCodeBook_encryptAESCrypt"); 3786 address start = __ pc(); 3787 const Register from = c_rarg0; // source array address 3788 const Register to = c_rarg1; // destination array address 3789 const Register key = c_rarg2; // key array address 3790 const Register len = c_rarg3; // src len (must be multiple of blocksize 16) 3791 __ enter(); // required for proper stackwalking of RuntimeStub frame 3792 __ aesecb_encrypt(from, to, key, len); 3793 __ leave(); // required for proper stackwalking of RuntimeStub frame 3794 __ ret(0); 3795 return start; 3796 } 3797 3798 address generate_electronicCodeBook_decryptAESCrypt() { 3799 __ align(CodeEntryAlignment); 3800 StubCodeMark mark(this, "StubRoutines", "electronicCodeBook_decryptAESCrypt"); 3801 address start = __ pc(); 3802 const Register from = c_rarg0; // source array address 3803 const Register to = c_rarg1; // destination array address 3804 const Register key = c_rarg2; // key array address 3805 const Register len = c_rarg3; // src len (must be multiple of blocksize 16) 3806 __ enter(); // required for proper stackwalking of RuntimeStub frame 3807 __ aesecb_decrypt(from, to, key, len); 3808 __ leave(); // required for proper stackwalking of RuntimeStub frame 3809 __ ret(0); 3810 return start; 3811 } 3812 3813 address generate_upper_word_mask() { 3814 __ align(64); 3815 StubCodeMark mark(this, "StubRoutines", "upper_word_mask"); 3816 address start = __ pc(); 3817 __ emit_data64(0x0000000000000000, relocInfo::none); 3818 __ emit_data64(0xFFFFFFFF00000000, relocInfo::none); 3819 return start; 3820 } 3821 3822 address generate_shuffle_byte_flip_mask() { 3823 __ align(64); 3824 StubCodeMark mark(this, "StubRoutines", "shuffle_byte_flip_mask"); 3825 address start = __ pc(); 3826 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none); 3827 __ emit_data64(0x0001020304050607, relocInfo::none); 3828 return start; 3829 } 3830 3831 // ofs and limit are use for multi-block byte array. 3832 // int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit) 3833 address generate_sha1_implCompress(bool multi_block, const char *name) { 3834 __ align(CodeEntryAlignment); 3835 StubCodeMark mark(this, "StubRoutines", name); 3836 address start = __ pc(); 3837 3838 Register buf = c_rarg0; 3839 Register state = c_rarg1; 3840 Register ofs = c_rarg2; 3841 Register limit = c_rarg3; 3842 3843 const XMMRegister abcd = xmm0; 3844 const XMMRegister e0 = xmm1; 3845 const XMMRegister e1 = xmm2; 3846 const XMMRegister msg0 = xmm3; 3847 3848 const XMMRegister msg1 = xmm4; 3849 const XMMRegister msg2 = xmm5; 3850 const XMMRegister msg3 = xmm6; 3851 const XMMRegister shuf_mask = xmm7; 3852 3853 __ enter(); 3854 3855 __ subptr(rsp, 4 * wordSize); 3856 3857 __ fast_sha1(abcd, e0, e1, msg0, msg1, msg2, msg3, shuf_mask, 3858 buf, state, ofs, limit, rsp, multi_block); 3859 3860 __ addptr(rsp, 4 * wordSize); 3861 3862 __ leave(); 3863 __ ret(0); 3864 return start; 3865 } 3866 3867 address generate_pshuffle_byte_flip_mask() { 3868 __ align(64); 3869 StubCodeMark mark(this, "StubRoutines", "pshuffle_byte_flip_mask"); 3870 address start = __ pc(); 3871 __ emit_data64(0x0405060700010203, relocInfo::none); 3872 __ emit_data64(0x0c0d0e0f08090a0b, relocInfo::none); 3873 3874 if (VM_Version::supports_avx2()) { 3875 __ emit_data64(0x0405060700010203, relocInfo::none); // second copy 3876 __ emit_data64(0x0c0d0e0f08090a0b, relocInfo::none); 3877 // _SHUF_00BA 3878 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3879 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3880 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3881 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3882 // _SHUF_DC00 3883 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3884 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3885 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3886 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3887 } 3888 3889 return start; 3890 } 3891 3892 //Mask for byte-swapping a couple of qwords in an XMM register using (v)pshufb. 3893 address generate_pshuffle_byte_flip_mask_sha512() { 3894 __ align(32); 3895 StubCodeMark mark(this, "StubRoutines", "pshuffle_byte_flip_mask_sha512"); 3896 address start = __ pc(); 3897 if (VM_Version::supports_avx2()) { 3898 __ emit_data64(0x0001020304050607, relocInfo::none); // PSHUFFLE_BYTE_FLIP_MASK 3899 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none); 3900 __ emit_data64(0x1011121314151617, relocInfo::none); 3901 __ emit_data64(0x18191a1b1c1d1e1f, relocInfo::none); 3902 __ emit_data64(0x0000000000000000, relocInfo::none); //MASK_YMM_LO 3903 __ emit_data64(0x0000000000000000, relocInfo::none); 3904 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3905 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3906 } 3907 3908 return start; 3909 } 3910 3911 // ofs and limit are use for multi-block byte array. 3912 // int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit) 3913 address generate_sha256_implCompress(bool multi_block, const char *name) { 3914 assert(VM_Version::supports_sha() || VM_Version::supports_avx2(), ""); 3915 __ align(CodeEntryAlignment); 3916 StubCodeMark mark(this, "StubRoutines", name); 3917 address start = __ pc(); 3918 3919 Register buf = c_rarg0; 3920 Register state = c_rarg1; 3921 Register ofs = c_rarg2; 3922 Register limit = c_rarg3; 3923 3924 const XMMRegister msg = xmm0; 3925 const XMMRegister state0 = xmm1; 3926 const XMMRegister state1 = xmm2; 3927 const XMMRegister msgtmp0 = xmm3; 3928 3929 const XMMRegister msgtmp1 = xmm4; 3930 const XMMRegister msgtmp2 = xmm5; 3931 const XMMRegister msgtmp3 = xmm6; 3932 const XMMRegister msgtmp4 = xmm7; 3933 3934 const XMMRegister shuf_mask = xmm8; 3935 3936 __ enter(); 3937 3938 __ subptr(rsp, 4 * wordSize); 3939 3940 if (VM_Version::supports_sha()) { 3941 __ fast_sha256(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4, 3942 buf, state, ofs, limit, rsp, multi_block, shuf_mask); 3943 } else if (VM_Version::supports_avx2()) { 3944 __ sha256_AVX2(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4, 3945 buf, state, ofs, limit, rsp, multi_block, shuf_mask); 3946 } 3947 __ addptr(rsp, 4 * wordSize); 3948 __ vzeroupper(); 3949 __ leave(); 3950 __ ret(0); 3951 return start; 3952 } 3953 3954 address generate_sha512_implCompress(bool multi_block, const char *name) { 3955 assert(VM_Version::supports_avx2(), ""); 3956 assert(VM_Version::supports_bmi2(), ""); 3957 __ align(CodeEntryAlignment); 3958 StubCodeMark mark(this, "StubRoutines", name); 3959 address start = __ pc(); 3960 3961 Register buf = c_rarg0; 3962 Register state = c_rarg1; 3963 Register ofs = c_rarg2; 3964 Register limit = c_rarg3; 3965 3966 const XMMRegister msg = xmm0; 3967 const XMMRegister state0 = xmm1; 3968 const XMMRegister state1 = xmm2; 3969 const XMMRegister msgtmp0 = xmm3; 3970 const XMMRegister msgtmp1 = xmm4; 3971 const XMMRegister msgtmp2 = xmm5; 3972 const XMMRegister msgtmp3 = xmm6; 3973 const XMMRegister msgtmp4 = xmm7; 3974 3975 const XMMRegister shuf_mask = xmm8; 3976 3977 __ enter(); 3978 3979 __ sha512_AVX2(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4, 3980 buf, state, ofs, limit, rsp, multi_block, shuf_mask); 3981 3982 __ vzeroupper(); 3983 __ leave(); 3984 __ ret(0); 3985 return start; 3986 } 3987 3988 // This mask is used for incrementing counter value(linc0, linc4, etc.) 3989 address counter_mask_addr() { 3990 __ align(64); 3991 StubCodeMark mark(this, "StubRoutines", "counter_mask_addr"); 3992 address start = __ pc(); 3993 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none);//lbswapmask 3994 __ emit_data64(0x0001020304050607, relocInfo::none); 3995 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none); 3996 __ emit_data64(0x0001020304050607, relocInfo::none); 3997 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none); 3998 __ emit_data64(0x0001020304050607, relocInfo::none); 3999 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none); 4000 __ emit_data64(0x0001020304050607, relocInfo::none); 4001 __ emit_data64(0x0000000000000000, relocInfo::none);//linc0 = counter_mask_addr+64 4002 __ emit_data64(0x0000000000000000, relocInfo::none); 4003 __ emit_data64(0x0000000000000001, relocInfo::none);//counter_mask_addr() + 80 4004 __ emit_data64(0x0000000000000000, relocInfo::none); 4005 __ emit_data64(0x0000000000000002, relocInfo::none); 4006 __ emit_data64(0x0000000000000000, relocInfo::none); 4007 __ emit_data64(0x0000000000000003, relocInfo::none); 4008 __ emit_data64(0x0000000000000000, relocInfo::none); 4009 __ emit_data64(0x0000000000000004, relocInfo::none);//linc4 = counter_mask_addr() + 128 4010 __ emit_data64(0x0000000000000000, relocInfo::none); 4011 __ emit_data64(0x0000000000000004, relocInfo::none); 4012 __ emit_data64(0x0000000000000000, relocInfo::none); 4013 __ emit_data64(0x0000000000000004, relocInfo::none); 4014 __ emit_data64(0x0000000000000000, relocInfo::none); 4015 __ emit_data64(0x0000000000000004, relocInfo::none); 4016 __ emit_data64(0x0000000000000000, relocInfo::none); 4017 __ emit_data64(0x0000000000000008, relocInfo::none);//linc8 = counter_mask_addr() + 192 4018 __ emit_data64(0x0000000000000000, relocInfo::none); 4019 __ emit_data64(0x0000000000000008, relocInfo::none); 4020 __ emit_data64(0x0000000000000000, relocInfo::none); 4021 __ emit_data64(0x0000000000000008, relocInfo::none); 4022 __ emit_data64(0x0000000000000000, relocInfo::none); 4023 __ emit_data64(0x0000000000000008, relocInfo::none); 4024 __ emit_data64(0x0000000000000000, relocInfo::none); 4025 __ emit_data64(0x0000000000000020, relocInfo::none);//linc32 = counter_mask_addr() + 256 4026 __ emit_data64(0x0000000000000000, relocInfo::none); 4027 __ emit_data64(0x0000000000000020, relocInfo::none); 4028 __ emit_data64(0x0000000000000000, relocInfo::none); 4029 __ emit_data64(0x0000000000000020, relocInfo::none); 4030 __ emit_data64(0x0000000000000000, relocInfo::none); 4031 __ emit_data64(0x0000000000000020, relocInfo::none); 4032 __ emit_data64(0x0000000000000000, relocInfo::none); 4033 __ emit_data64(0x0000000000000010, relocInfo::none);//linc16 = counter_mask_addr() + 320 4034 __ emit_data64(0x0000000000000000, relocInfo::none); 4035 __ emit_data64(0x0000000000000010, relocInfo::none); 4036 __ emit_data64(0x0000000000000000, relocInfo::none); 4037 __ emit_data64(0x0000000000000010, relocInfo::none); 4038 __ emit_data64(0x0000000000000000, relocInfo::none); 4039 __ emit_data64(0x0000000000000010, relocInfo::none); 4040 __ emit_data64(0x0000000000000000, relocInfo::none); 4041 return start; 4042 } 4043 4044 // Vector AES Counter implementation 4045 address generate_counterMode_VectorAESCrypt() { 4046 __ align(CodeEntryAlignment); 4047 StubCodeMark mark(this, "StubRoutines", "counterMode_AESCrypt"); 4048 address start = __ pc(); 4049 const Register from = c_rarg0; // source array address 4050 const Register to = c_rarg1; // destination array address 4051 const Register key = c_rarg2; // key array address r8 4052 const Register counter = c_rarg3; // counter byte array initialized from counter array address 4053 // and updated with the incremented counter in the end 4054 #ifndef _WIN64 4055 const Register len_reg = c_rarg4; 4056 const Register saved_encCounter_start = c_rarg5; 4057 const Register used_addr = r10; 4058 const Address used_mem(rbp, 2 * wordSize); 4059 const Register used = r11; 4060 #else 4061 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 4062 const Address saved_encCounter_mem(rbp, 7 * wordSize); // saved encrypted counter is on stack on Win64 4063 const Address used_mem(rbp, 8 * wordSize); // used length is on stack on Win64 4064 const Register len_reg = r10; // pick the first volatile windows register 4065 const Register saved_encCounter_start = r11; 4066 const Register used_addr = r13; 4067 const Register used = r14; 4068 #endif 4069 __ enter(); 4070 // Save state before entering routine 4071 __ push(r12); 4072 __ push(r13); 4073 __ push(r14); 4074 __ push(r15); 4075 #ifdef _WIN64 4076 // on win64, fill len_reg from stack position 4077 __ movl(len_reg, len_mem); 4078 __ movptr(saved_encCounter_start, saved_encCounter_mem); 4079 __ movptr(used_addr, used_mem); 4080 __ movl(used, Address(used_addr, 0)); 4081 #else 4082 __ push(len_reg); // Save 4083 __ movptr(used_addr, used_mem); 4084 __ movl(used, Address(used_addr, 0)); 4085 #endif 4086 __ push(rbx); 4087 __ aesctr_encrypt(from, to, key, counter, len_reg, used, used_addr, saved_encCounter_start); 4088 // Restore state before leaving routine 4089 __ pop(rbx); 4090 #ifdef _WIN64 4091 __ movl(rax, len_mem); // return length 4092 #else 4093 __ pop(rax); // return length 4094 #endif 4095 __ pop(r15); 4096 __ pop(r14); 4097 __ pop(r13); 4098 __ pop(r12); 4099 4100 __ leave(); // required for proper stackwalking of RuntimeStub frame 4101 __ ret(0); 4102 return start; 4103 } 4104 4105 // This is a version of CTR/AES crypt which does 6 blocks in a loop at a time 4106 // to hide instruction latency 4107 // 4108 // Arguments: 4109 // 4110 // Inputs: 4111 // c_rarg0 - source byte array address 4112 // c_rarg1 - destination byte array address 4113 // c_rarg2 - K (key) in little endian int array 4114 // c_rarg3 - counter vector byte array address 4115 // Linux 4116 // c_rarg4 - input length 4117 // c_rarg5 - saved encryptedCounter start 4118 // rbp + 6 * wordSize - saved used length 4119 // Windows 4120 // rbp + 6 * wordSize - input length 4121 // rbp + 7 * wordSize - saved encryptedCounter start 4122 // rbp + 8 * wordSize - saved used length 4123 // 4124 // Output: 4125 // rax - input length 4126 // 4127 address generate_counterMode_AESCrypt_Parallel() { 4128 assert(UseAES, "need AES instructions and misaligned SSE support"); 4129 __ align(CodeEntryAlignment); 4130 StubCodeMark mark(this, "StubRoutines", "counterMode_AESCrypt"); 4131 address start = __ pc(); 4132 const Register from = c_rarg0; // source array address 4133 const Register to = c_rarg1; // destination array address 4134 const Register key = c_rarg2; // key array address 4135 const Register counter = c_rarg3; // counter byte array initialized from counter array address 4136 // and updated with the incremented counter in the end 4137 #ifndef _WIN64 4138 const Register len_reg = c_rarg4; 4139 const Register saved_encCounter_start = c_rarg5; 4140 const Register used_addr = r10; 4141 const Address used_mem(rbp, 2 * wordSize); 4142 const Register used = r11; 4143 #else 4144 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 4145 const Address saved_encCounter_mem(rbp, 7 * wordSize); // length is on stack on Win64 4146 const Address used_mem(rbp, 8 * wordSize); // length is on stack on Win64 4147 const Register len_reg = r10; // pick the first volatile windows register 4148 const Register saved_encCounter_start = r11; 4149 const Register used_addr = r13; 4150 const Register used = r14; 4151 #endif 4152 const Register pos = rax; 4153 4154 const int PARALLEL_FACTOR = 6; 4155 const XMMRegister xmm_counter_shuf_mask = xmm0; 4156 const XMMRegister xmm_key_shuf_mask = xmm1; // used temporarily to swap key bytes up front 4157 const XMMRegister xmm_curr_counter = xmm2; 4158 4159 const XMMRegister xmm_key_tmp0 = xmm3; 4160 const XMMRegister xmm_key_tmp1 = xmm4; 4161 4162 // registers holding the four results in the parallelized loop 4163 const XMMRegister xmm_result0 = xmm5; 4164 const XMMRegister xmm_result1 = xmm6; 4165 const XMMRegister xmm_result2 = xmm7; 4166 const XMMRegister xmm_result3 = xmm8; 4167 const XMMRegister xmm_result4 = xmm9; 4168 const XMMRegister xmm_result5 = xmm10; 4169 4170 const XMMRegister xmm_from0 = xmm11; 4171 const XMMRegister xmm_from1 = xmm12; 4172 const XMMRegister xmm_from2 = xmm13; 4173 const XMMRegister xmm_from3 = xmm14; //the last one is xmm14. we have to preserve it on WIN64. 4174 const XMMRegister xmm_from4 = xmm3; //reuse xmm3~4. Because xmm_key_tmp0~1 are useless when loading input text 4175 const XMMRegister xmm_from5 = xmm4; 4176 4177 //for key_128, key_192, key_256 4178 const int rounds[3] = {10, 12, 14}; 4179 Label L_exit_preLoop, L_preLoop_start; 4180 Label L_multiBlock_loopTop[3]; 4181 Label L_singleBlockLoopTop[3]; 4182 Label L__incCounter[3][6]; //for 6 blocks 4183 Label L__incCounter_single[3]; //for single block, key128, key192, key256 4184 Label L_processTail_insr[3], L_processTail_4_insr[3], L_processTail_2_insr[3], L_processTail_1_insr[3], L_processTail_exit_insr[3]; 4185 Label L_processTail_4_extr[3], L_processTail_2_extr[3], L_processTail_1_extr[3], L_processTail_exit_extr[3]; 4186 4187 Label L_exit; 4188 4189 __ enter(); // required for proper stackwalking of RuntimeStub frame 4190 4191 #ifdef _WIN64 4192 // allocate spill slots for r13, r14 4193 enum { 4194 saved_r13_offset, 4195 saved_r14_offset 4196 }; 4197 __ subptr(rsp, 2 * wordSize); 4198 __ movptr(Address(rsp, saved_r13_offset * wordSize), r13); 4199 __ movptr(Address(rsp, saved_r14_offset * wordSize), r14); 4200 4201 // on win64, fill len_reg from stack position 4202 __ movl(len_reg, len_mem); 4203 __ movptr(saved_encCounter_start, saved_encCounter_mem); 4204 __ movptr(used_addr, used_mem); 4205 __ movl(used, Address(used_addr, 0)); 4206 #else 4207 __ push(len_reg); // Save 4208 __ movptr(used_addr, used_mem); 4209 __ movl(used, Address(used_addr, 0)); 4210 #endif 4211 4212 __ push(rbx); // Save RBX 4213 __ movdqu(xmm_curr_counter, Address(counter, 0x00)); // initialize counter with initial counter 4214 __ movdqu(xmm_counter_shuf_mask, ExternalAddress(StubRoutines::x86::counter_shuffle_mask_addr()), pos); // pos as scratch 4215 __ pshufb(xmm_curr_counter, xmm_counter_shuf_mask); //counter is shuffled 4216 __ movptr(pos, 0); 4217 4218 // Use the partially used encrpyted counter from last invocation 4219 __ BIND(L_preLoop_start); 4220 __ cmpptr(used, 16); 4221 __ jcc(Assembler::aboveEqual, L_exit_preLoop); 4222 __ cmpptr(len_reg, 0); 4223 __ jcc(Assembler::lessEqual, L_exit_preLoop); 4224 __ movb(rbx, Address(saved_encCounter_start, used)); 4225 __ xorb(rbx, Address(from, pos)); 4226 __ movb(Address(to, pos), rbx); 4227 __ addptr(pos, 1); 4228 __ addptr(used, 1); 4229 __ subptr(len_reg, 1); 4230 4231 __ jmp(L_preLoop_start); 4232 4233 __ BIND(L_exit_preLoop); 4234 __ movl(Address(used_addr, 0), used); 4235 4236 // key length could be only {11, 13, 15} * 4 = {44, 52, 60} 4237 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()), rbx); // rbx as scratch 4238 __ movl(rbx, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 4239 __ cmpl(rbx, 52); 4240 __ jcc(Assembler::equal, L_multiBlock_loopTop[1]); 4241 __ cmpl(rbx, 60); 4242 __ jcc(Assembler::equal, L_multiBlock_loopTop[2]); 4243 4244 #define CTR_DoSix(opc, src_reg) \ 4245 __ opc(xmm_result0, src_reg); \ 4246 __ opc(xmm_result1, src_reg); \ 4247 __ opc(xmm_result2, src_reg); \ 4248 __ opc(xmm_result3, src_reg); \ 4249 __ opc(xmm_result4, src_reg); \ 4250 __ opc(xmm_result5, src_reg); 4251 4252 // k == 0 : generate code for key_128 4253 // k == 1 : generate code for key_192 4254 // k == 2 : generate code for key_256 4255 for (int k = 0; k < 3; ++k) { 4256 //multi blocks starts here 4257 __ align(OptoLoopAlignment); 4258 __ BIND(L_multiBlock_loopTop[k]); 4259 __ cmpptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // see if at least PARALLEL_FACTOR blocks left 4260 __ jcc(Assembler::less, L_singleBlockLoopTop[k]); 4261 load_key(xmm_key_tmp0, key, 0x00, xmm_key_shuf_mask); 4262 4263 //load, then increase counters 4264 CTR_DoSix(movdqa, xmm_curr_counter); 4265 inc_counter(rbx, xmm_result1, 0x01, L__incCounter[k][0]); 4266 inc_counter(rbx, xmm_result2, 0x02, L__incCounter[k][1]); 4267 inc_counter(rbx, xmm_result3, 0x03, L__incCounter[k][2]); 4268 inc_counter(rbx, xmm_result4, 0x04, L__incCounter[k][3]); 4269 inc_counter(rbx, xmm_result5, 0x05, L__incCounter[k][4]); 4270 inc_counter(rbx, xmm_curr_counter, 0x06, L__incCounter[k][5]); 4271 CTR_DoSix(pshufb, xmm_counter_shuf_mask); // after increased, shuffled counters back for PXOR 4272 CTR_DoSix(pxor, xmm_key_tmp0); //PXOR with Round 0 key 4273 4274 //load two ROUND_KEYs at a time 4275 for (int i = 1; i < rounds[k]; ) { 4276 load_key(xmm_key_tmp1, key, (0x10 * i), xmm_key_shuf_mask); 4277 load_key(xmm_key_tmp0, key, (0x10 * (i+1)), xmm_key_shuf_mask); 4278 CTR_DoSix(aesenc, xmm_key_tmp1); 4279 i++; 4280 if (i != rounds[k]) { 4281 CTR_DoSix(aesenc, xmm_key_tmp0); 4282 } else { 4283 CTR_DoSix(aesenclast, xmm_key_tmp0); 4284 } 4285 i++; 4286 } 4287 4288 // get next PARALLEL_FACTOR blocks into xmm_result registers 4289 __ movdqu(xmm_from0, Address(from, pos, Address::times_1, 0 * AESBlockSize)); 4290 __ movdqu(xmm_from1, Address(from, pos, Address::times_1, 1 * AESBlockSize)); 4291 __ movdqu(xmm_from2, Address(from, pos, Address::times_1, 2 * AESBlockSize)); 4292 __ movdqu(xmm_from3, Address(from, pos, Address::times_1, 3 * AESBlockSize)); 4293 __ movdqu(xmm_from4, Address(from, pos, Address::times_1, 4 * AESBlockSize)); 4294 __ movdqu(xmm_from5, Address(from, pos, Address::times_1, 5 * AESBlockSize)); 4295 4296 __ pxor(xmm_result0, xmm_from0); 4297 __ pxor(xmm_result1, xmm_from1); 4298 __ pxor(xmm_result2, xmm_from2); 4299 __ pxor(xmm_result3, xmm_from3); 4300 __ pxor(xmm_result4, xmm_from4); 4301 __ pxor(xmm_result5, xmm_from5); 4302 4303 // store 6 results into the next 64 bytes of output 4304 __ movdqu(Address(to, pos, Address::times_1, 0 * AESBlockSize), xmm_result0); 4305 __ movdqu(Address(to, pos, Address::times_1, 1 * AESBlockSize), xmm_result1); 4306 __ movdqu(Address(to, pos, Address::times_1, 2 * AESBlockSize), xmm_result2); 4307 __ movdqu(Address(to, pos, Address::times_1, 3 * AESBlockSize), xmm_result3); 4308 __ movdqu(Address(to, pos, Address::times_1, 4 * AESBlockSize), xmm_result4); 4309 __ movdqu(Address(to, pos, Address::times_1, 5 * AESBlockSize), xmm_result5); 4310 4311 __ addptr(pos, PARALLEL_FACTOR * AESBlockSize); // increase the length of crypt text 4312 __ subptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // decrease the remaining length 4313 __ jmp(L_multiBlock_loopTop[k]); 4314 4315 // singleBlock starts here 4316 __ align(OptoLoopAlignment); 4317 __ BIND(L_singleBlockLoopTop[k]); 4318 __ cmpptr(len_reg, 0); 4319 __ jcc(Assembler::lessEqual, L_exit); 4320 load_key(xmm_key_tmp0, key, 0x00, xmm_key_shuf_mask); 4321 __ movdqa(xmm_result0, xmm_curr_counter); 4322 inc_counter(rbx, xmm_curr_counter, 0x01, L__incCounter_single[k]); 4323 __ pshufb(xmm_result0, xmm_counter_shuf_mask); 4324 __ pxor(xmm_result0, xmm_key_tmp0); 4325 for (int i = 1; i < rounds[k]; i++) { 4326 load_key(xmm_key_tmp0, key, (0x10 * i), xmm_key_shuf_mask); 4327 __ aesenc(xmm_result0, xmm_key_tmp0); 4328 } 4329 load_key(xmm_key_tmp0, key, (rounds[k] * 0x10), xmm_key_shuf_mask); 4330 __ aesenclast(xmm_result0, xmm_key_tmp0); 4331 __ cmpptr(len_reg, AESBlockSize); 4332 __ jcc(Assembler::less, L_processTail_insr[k]); 4333 __ movdqu(xmm_from0, Address(from, pos, Address::times_1, 0 * AESBlockSize)); 4334 __ pxor(xmm_result0, xmm_from0); 4335 __ movdqu(Address(to, pos, Address::times_1, 0 * AESBlockSize), xmm_result0); 4336 __ addptr(pos, AESBlockSize); 4337 __ subptr(len_reg, AESBlockSize); 4338 __ jmp(L_singleBlockLoopTop[k]); 4339 __ BIND(L_processTail_insr[k]); // Process the tail part of the input array 4340 __ addptr(pos, len_reg); // 1. Insert bytes from src array into xmm_from0 register 4341 __ testptr(len_reg, 8); 4342 __ jcc(Assembler::zero, L_processTail_4_insr[k]); 4343 __ subptr(pos,8); 4344 __ pinsrq(xmm_from0, Address(from, pos), 0); 4345 __ BIND(L_processTail_4_insr[k]); 4346 __ testptr(len_reg, 4); 4347 __ jcc(Assembler::zero, L_processTail_2_insr[k]); 4348 __ subptr(pos,4); 4349 __ pslldq(xmm_from0, 4); 4350 __ pinsrd(xmm_from0, Address(from, pos), 0); 4351 __ BIND(L_processTail_2_insr[k]); 4352 __ testptr(len_reg, 2); 4353 __ jcc(Assembler::zero, L_processTail_1_insr[k]); 4354 __ subptr(pos, 2); 4355 __ pslldq(xmm_from0, 2); 4356 __ pinsrw(xmm_from0, Address(from, pos), 0); 4357 __ BIND(L_processTail_1_insr[k]); 4358 __ testptr(len_reg, 1); 4359 __ jcc(Assembler::zero, L_processTail_exit_insr[k]); 4360 __ subptr(pos, 1); 4361 __ pslldq(xmm_from0, 1); 4362 __ pinsrb(xmm_from0, Address(from, pos), 0); 4363 __ BIND(L_processTail_exit_insr[k]); 4364 4365 __ movdqu(Address(saved_encCounter_start, 0), xmm_result0); // 2. Perform pxor of the encrypted counter and plaintext Bytes. 4366 __ pxor(xmm_result0, xmm_from0); // Also the encrypted counter is saved for next invocation. 4367 4368 __ testptr(len_reg, 8); 4369 __ jcc(Assembler::zero, L_processTail_4_extr[k]); // 3. Extract bytes from xmm_result0 into the dest. array 4370 __ pextrq(Address(to, pos), xmm_result0, 0); 4371 __ psrldq(xmm_result0, 8); 4372 __ addptr(pos, 8); 4373 __ BIND(L_processTail_4_extr[k]); 4374 __ testptr(len_reg, 4); 4375 __ jcc(Assembler::zero, L_processTail_2_extr[k]); 4376 __ pextrd(Address(to, pos), xmm_result0, 0); 4377 __ psrldq(xmm_result0, 4); 4378 __ addptr(pos, 4); 4379 __ BIND(L_processTail_2_extr[k]); 4380 __ testptr(len_reg, 2); 4381 __ jcc(Assembler::zero, L_processTail_1_extr[k]); 4382 __ pextrw(Address(to, pos), xmm_result0, 0); 4383 __ psrldq(xmm_result0, 2); 4384 __ addptr(pos, 2); 4385 __ BIND(L_processTail_1_extr[k]); 4386 __ testptr(len_reg, 1); 4387 __ jcc(Assembler::zero, L_processTail_exit_extr[k]); 4388 __ pextrb(Address(to, pos), xmm_result0, 0); 4389 4390 __ BIND(L_processTail_exit_extr[k]); 4391 __ movl(Address(used_addr, 0), len_reg); 4392 __ jmp(L_exit); 4393 4394 } 4395 4396 __ BIND(L_exit); 4397 __ pshufb(xmm_curr_counter, xmm_counter_shuf_mask); //counter is shuffled back. 4398 __ movdqu(Address(counter, 0), xmm_curr_counter); //save counter back 4399 __ pop(rbx); // pop the saved RBX. 4400 #ifdef _WIN64 4401 __ movl(rax, len_mem); 4402 __ movptr(r13, Address(rsp, saved_r13_offset * wordSize)); 4403 __ movptr(r14, Address(rsp, saved_r14_offset * wordSize)); 4404 __ addptr(rsp, 2 * wordSize); 4405 #else 4406 __ pop(rax); // return 'len' 4407 #endif 4408 __ leave(); // required for proper stackwalking of RuntimeStub frame 4409 __ ret(0); 4410 return start; 4411 } 4412 4413 void roundDec(XMMRegister xmm_reg) { 4414 __ vaesdec(xmm1, xmm1, xmm_reg, Assembler::AVX_512bit); 4415 __ vaesdec(xmm2, xmm2, xmm_reg, Assembler::AVX_512bit); 4416 __ vaesdec(xmm3, xmm3, xmm_reg, Assembler::AVX_512bit); 4417 __ vaesdec(xmm4, xmm4, xmm_reg, Assembler::AVX_512bit); 4418 __ vaesdec(xmm5, xmm5, xmm_reg, Assembler::AVX_512bit); 4419 __ vaesdec(xmm6, xmm6, xmm_reg, Assembler::AVX_512bit); 4420 __ vaesdec(xmm7, xmm7, xmm_reg, Assembler::AVX_512bit); 4421 __ vaesdec(xmm8, xmm8, xmm_reg, Assembler::AVX_512bit); 4422 } 4423 4424 void roundDeclast(XMMRegister xmm_reg) { 4425 __ vaesdeclast(xmm1, xmm1, xmm_reg, Assembler::AVX_512bit); 4426 __ vaesdeclast(xmm2, xmm2, xmm_reg, Assembler::AVX_512bit); 4427 __ vaesdeclast(xmm3, xmm3, xmm_reg, Assembler::AVX_512bit); 4428 __ vaesdeclast(xmm4, xmm4, xmm_reg, Assembler::AVX_512bit); 4429 __ vaesdeclast(xmm5, xmm5, xmm_reg, Assembler::AVX_512bit); 4430 __ vaesdeclast(xmm6, xmm6, xmm_reg, Assembler::AVX_512bit); 4431 __ vaesdeclast(xmm7, xmm7, xmm_reg, Assembler::AVX_512bit); 4432 __ vaesdeclast(xmm8, xmm8, xmm_reg, Assembler::AVX_512bit); 4433 } 4434 4435 void ev_load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask = NULL) { 4436 __ movdqu(xmmdst, Address(key, offset)); 4437 if (xmm_shuf_mask != NULL) { 4438 __ pshufb(xmmdst, xmm_shuf_mask); 4439 } else { 4440 __ pshufb(xmmdst, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 4441 } 4442 __ evshufi64x2(xmmdst, xmmdst, xmmdst, 0x0, Assembler::AVX_512bit); 4443 4444 } 4445 4446 address generate_cipherBlockChaining_decryptVectorAESCrypt() { 4447 assert(VM_Version::supports_avx512_vaes(), "need AES instructions and misaligned SSE support"); 4448 __ align(CodeEntryAlignment); 4449 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt"); 4450 address start = __ pc(); 4451 4452 const Register from = c_rarg0; // source array address 4453 const Register to = c_rarg1; // destination array address 4454 const Register key = c_rarg2; // key array address 4455 const Register rvec = c_rarg3; // r byte array initialized from initvector array address 4456 // and left with the results of the last encryption block 4457 #ifndef _WIN64 4458 const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16) 4459 #else 4460 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 4461 const Register len_reg = r11; // pick the volatile windows register 4462 #endif 4463 4464 Label Loop, Loop1, L_128, L_256, L_192, KEY_192, KEY_256, Loop2, Lcbc_dec_rem_loop, 4465 Lcbc_dec_rem_last, Lcbc_dec_ret, Lcbc_dec_rem, Lcbc_exit; 4466 4467 __ enter(); 4468 4469 #ifdef _WIN64 4470 // on win64, fill len_reg from stack position 4471 __ movl(len_reg, len_mem); 4472 #else 4473 __ push(len_reg); // Save 4474 #endif 4475 __ push(rbx); 4476 __ vzeroupper(); 4477 4478 // Temporary variable declaration for swapping key bytes 4479 const XMMRegister xmm_key_shuf_mask = xmm1; 4480 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 4481 4482 // Calculate number of rounds from key size: 44 for 10-rounds, 52 for 12-rounds, 60 for 14-rounds 4483 const Register rounds = rbx; 4484 __ movl(rounds, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 4485 4486 const XMMRegister IV = xmm0; 4487 // Load IV and broadcast value to 512-bits 4488 __ evbroadcasti64x2(IV, Address(rvec, 0), Assembler::AVX_512bit); 4489 4490 // Temporary variables for storing round keys 4491 const XMMRegister RK0 = xmm30; 4492 const XMMRegister RK1 = xmm9; 4493 const XMMRegister RK2 = xmm18; 4494 const XMMRegister RK3 = xmm19; 4495 const XMMRegister RK4 = xmm20; 4496 const XMMRegister RK5 = xmm21; 4497 const XMMRegister RK6 = xmm22; 4498 const XMMRegister RK7 = xmm23; 4499 const XMMRegister RK8 = xmm24; 4500 const XMMRegister RK9 = xmm25; 4501 const XMMRegister RK10 = xmm26; 4502 4503 // Load and shuffle key 4504 // the java expanded key ordering is rotated one position from what we want 4505 // so we start from 1*16 here and hit 0*16 last 4506 ev_load_key(RK1, key, 1 * 16, xmm_key_shuf_mask); 4507 ev_load_key(RK2, key, 2 * 16, xmm_key_shuf_mask); 4508 ev_load_key(RK3, key, 3 * 16, xmm_key_shuf_mask); 4509 ev_load_key(RK4, key, 4 * 16, xmm_key_shuf_mask); 4510 ev_load_key(RK5, key, 5 * 16, xmm_key_shuf_mask); 4511 ev_load_key(RK6, key, 6 * 16, xmm_key_shuf_mask); 4512 ev_load_key(RK7, key, 7 * 16, xmm_key_shuf_mask); 4513 ev_load_key(RK8, key, 8 * 16, xmm_key_shuf_mask); 4514 ev_load_key(RK9, key, 9 * 16, xmm_key_shuf_mask); 4515 ev_load_key(RK10, key, 10 * 16, xmm_key_shuf_mask); 4516 ev_load_key(RK0, key, 0*16, xmm_key_shuf_mask); 4517 4518 // Variables for storing source cipher text 4519 const XMMRegister S0 = xmm10; 4520 const XMMRegister S1 = xmm11; 4521 const XMMRegister S2 = xmm12; 4522 const XMMRegister S3 = xmm13; 4523 const XMMRegister S4 = xmm14; 4524 const XMMRegister S5 = xmm15; 4525 const XMMRegister S6 = xmm16; 4526 const XMMRegister S7 = xmm17; 4527 4528 // Variables for storing decrypted text 4529 const XMMRegister B0 = xmm1; 4530 const XMMRegister B1 = xmm2; 4531 const XMMRegister B2 = xmm3; 4532 const XMMRegister B3 = xmm4; 4533 const XMMRegister B4 = xmm5; 4534 const XMMRegister B5 = xmm6; 4535 const XMMRegister B6 = xmm7; 4536 const XMMRegister B7 = xmm8; 4537 4538 __ cmpl(rounds, 44); 4539 __ jcc(Assembler::greater, KEY_192); 4540 __ jmp(Loop); 4541 4542 __ BIND(KEY_192); 4543 const XMMRegister RK11 = xmm27; 4544 const XMMRegister RK12 = xmm28; 4545 ev_load_key(RK11, key, 11*16, xmm_key_shuf_mask); 4546 ev_load_key(RK12, key, 12*16, xmm_key_shuf_mask); 4547 4548 __ cmpl(rounds, 52); 4549 __ jcc(Assembler::greater, KEY_256); 4550 __ jmp(Loop); 4551 4552 __ BIND(KEY_256); 4553 const XMMRegister RK13 = xmm29; 4554 const XMMRegister RK14 = xmm31; 4555 ev_load_key(RK13, key, 13*16, xmm_key_shuf_mask); 4556 ev_load_key(RK14, key, 14*16, xmm_key_shuf_mask); 4557 4558 __ BIND(Loop); 4559 __ cmpl(len_reg, 512); 4560 __ jcc(Assembler::below, Lcbc_dec_rem); 4561 __ BIND(Loop1); 4562 __ subl(len_reg, 512); 4563 __ evmovdquq(S0, Address(from, 0 * 64), Assembler::AVX_512bit); 4564 __ evmovdquq(S1, Address(from, 1 * 64), Assembler::AVX_512bit); 4565 __ evmovdquq(S2, Address(from, 2 * 64), Assembler::AVX_512bit); 4566 __ evmovdquq(S3, Address(from, 3 * 64), Assembler::AVX_512bit); 4567 __ evmovdquq(S4, Address(from, 4 * 64), Assembler::AVX_512bit); 4568 __ evmovdquq(S5, Address(from, 5 * 64), Assembler::AVX_512bit); 4569 __ evmovdquq(S6, Address(from, 6 * 64), Assembler::AVX_512bit); 4570 __ evmovdquq(S7, Address(from, 7 * 64), Assembler::AVX_512bit); 4571 __ leaq(from, Address(from, 8 * 64)); 4572 4573 __ evpxorq(B0, S0, RK1, Assembler::AVX_512bit); 4574 __ evpxorq(B1, S1, RK1, Assembler::AVX_512bit); 4575 __ evpxorq(B2, S2, RK1, Assembler::AVX_512bit); 4576 __ evpxorq(B3, S3, RK1, Assembler::AVX_512bit); 4577 __ evpxorq(B4, S4, RK1, Assembler::AVX_512bit); 4578 __ evpxorq(B5, S5, RK1, Assembler::AVX_512bit); 4579 __ evpxorq(B6, S6, RK1, Assembler::AVX_512bit); 4580 __ evpxorq(B7, S7, RK1, Assembler::AVX_512bit); 4581 4582 __ evalignq(IV, S0, IV, 0x06); 4583 __ evalignq(S0, S1, S0, 0x06); 4584 __ evalignq(S1, S2, S1, 0x06); 4585 __ evalignq(S2, S3, S2, 0x06); 4586 __ evalignq(S3, S4, S3, 0x06); 4587 __ evalignq(S4, S5, S4, 0x06); 4588 __ evalignq(S5, S6, S5, 0x06); 4589 __ evalignq(S6, S7, S6, 0x06); 4590 4591 roundDec(RK2); 4592 roundDec(RK3); 4593 roundDec(RK4); 4594 roundDec(RK5); 4595 roundDec(RK6); 4596 roundDec(RK7); 4597 roundDec(RK8); 4598 roundDec(RK9); 4599 roundDec(RK10); 4600 4601 __ cmpl(rounds, 44); 4602 __ jcc(Assembler::belowEqual, L_128); 4603 roundDec(RK11); 4604 roundDec(RK12); 4605 4606 __ cmpl(rounds, 52); 4607 __ jcc(Assembler::belowEqual, L_192); 4608 roundDec(RK13); 4609 roundDec(RK14); 4610 4611 __ BIND(L_256); 4612 roundDeclast(RK0); 4613 __ jmp(Loop2); 4614 4615 __ BIND(L_128); 4616 roundDeclast(RK0); 4617 __ jmp(Loop2); 4618 4619 __ BIND(L_192); 4620 roundDeclast(RK0); 4621 4622 __ BIND(Loop2); 4623 __ evpxorq(B0, B0, IV, Assembler::AVX_512bit); 4624 __ evpxorq(B1, B1, S0, Assembler::AVX_512bit); 4625 __ evpxorq(B2, B2, S1, Assembler::AVX_512bit); 4626 __ evpxorq(B3, B3, S2, Assembler::AVX_512bit); 4627 __ evpxorq(B4, B4, S3, Assembler::AVX_512bit); 4628 __ evpxorq(B5, B5, S4, Assembler::AVX_512bit); 4629 __ evpxorq(B6, B6, S5, Assembler::AVX_512bit); 4630 __ evpxorq(B7, B7, S6, Assembler::AVX_512bit); 4631 __ evmovdquq(IV, S7, Assembler::AVX_512bit); 4632 4633 __ evmovdquq(Address(to, 0 * 64), B0, Assembler::AVX_512bit); 4634 __ evmovdquq(Address(to, 1 * 64), B1, Assembler::AVX_512bit); 4635 __ evmovdquq(Address(to, 2 * 64), B2, Assembler::AVX_512bit); 4636 __ evmovdquq(Address(to, 3 * 64), B3, Assembler::AVX_512bit); 4637 __ evmovdquq(Address(to, 4 * 64), B4, Assembler::AVX_512bit); 4638 __ evmovdquq(Address(to, 5 * 64), B5, Assembler::AVX_512bit); 4639 __ evmovdquq(Address(to, 6 * 64), B6, Assembler::AVX_512bit); 4640 __ evmovdquq(Address(to, 7 * 64), B7, Assembler::AVX_512bit); 4641 __ leaq(to, Address(to, 8 * 64)); 4642 __ jmp(Loop); 4643 4644 __ BIND(Lcbc_dec_rem); 4645 __ evshufi64x2(IV, IV, IV, 0x03, Assembler::AVX_512bit); 4646 4647 __ BIND(Lcbc_dec_rem_loop); 4648 __ subl(len_reg, 16); 4649 __ jcc(Assembler::carrySet, Lcbc_dec_ret); 4650 4651 __ movdqu(S0, Address(from, 0)); 4652 __ evpxorq(B0, S0, RK1, Assembler::AVX_512bit); 4653 __ vaesdec(B0, B0, RK2, Assembler::AVX_512bit); 4654 __ vaesdec(B0, B0, RK3, Assembler::AVX_512bit); 4655 __ vaesdec(B0, B0, RK4, Assembler::AVX_512bit); 4656 __ vaesdec(B0, B0, RK5, Assembler::AVX_512bit); 4657 __ vaesdec(B0, B0, RK6, Assembler::AVX_512bit); 4658 __ vaesdec(B0, B0, RK7, Assembler::AVX_512bit); 4659 __ vaesdec(B0, B0, RK8, Assembler::AVX_512bit); 4660 __ vaesdec(B0, B0, RK9, Assembler::AVX_512bit); 4661 __ vaesdec(B0, B0, RK10, Assembler::AVX_512bit); 4662 __ cmpl(rounds, 44); 4663 __ jcc(Assembler::belowEqual, Lcbc_dec_rem_last); 4664 4665 __ vaesdec(B0, B0, RK11, Assembler::AVX_512bit); 4666 __ vaesdec(B0, B0, RK12, Assembler::AVX_512bit); 4667 __ cmpl(rounds, 52); 4668 __ jcc(Assembler::belowEqual, Lcbc_dec_rem_last); 4669 4670 __ vaesdec(B0, B0, RK13, Assembler::AVX_512bit); 4671 __ vaesdec(B0, B0, RK14, Assembler::AVX_512bit); 4672 4673 __ BIND(Lcbc_dec_rem_last); 4674 __ vaesdeclast(B0, B0, RK0, Assembler::AVX_512bit); 4675 4676 __ evpxorq(B0, B0, IV, Assembler::AVX_512bit); 4677 __ evmovdquq(IV, S0, Assembler::AVX_512bit); 4678 __ movdqu(Address(to, 0), B0); 4679 __ leaq(from, Address(from, 16)); 4680 __ leaq(to, Address(to, 16)); 4681 __ jmp(Lcbc_dec_rem_loop); 4682 4683 __ BIND(Lcbc_dec_ret); 4684 __ movdqu(Address(rvec, 0), IV); 4685 4686 // Zero out the round keys 4687 __ evpxorq(RK0, RK0, RK0, Assembler::AVX_512bit); 4688 __ evpxorq(RK1, RK1, RK1, Assembler::AVX_512bit); 4689 __ evpxorq(RK2, RK2, RK2, Assembler::AVX_512bit); 4690 __ evpxorq(RK3, RK3, RK3, Assembler::AVX_512bit); 4691 __ evpxorq(RK4, RK4, RK4, Assembler::AVX_512bit); 4692 __ evpxorq(RK5, RK5, RK5, Assembler::AVX_512bit); 4693 __ evpxorq(RK6, RK6, RK6, Assembler::AVX_512bit); 4694 __ evpxorq(RK7, RK7, RK7, Assembler::AVX_512bit); 4695 __ evpxorq(RK8, RK8, RK8, Assembler::AVX_512bit); 4696 __ evpxorq(RK9, RK9, RK9, Assembler::AVX_512bit); 4697 __ evpxorq(RK10, RK10, RK10, Assembler::AVX_512bit); 4698 __ cmpl(rounds, 44); 4699 __ jcc(Assembler::belowEqual, Lcbc_exit); 4700 __ evpxorq(RK11, RK11, RK11, Assembler::AVX_512bit); 4701 __ evpxorq(RK12, RK12, RK12, Assembler::AVX_512bit); 4702 __ cmpl(rounds, 52); 4703 __ jcc(Assembler::belowEqual, Lcbc_exit); 4704 __ evpxorq(RK13, RK13, RK13, Assembler::AVX_512bit); 4705 __ evpxorq(RK14, RK14, RK14, Assembler::AVX_512bit); 4706 4707 __ BIND(Lcbc_exit); 4708 __ pop(rbx); 4709 #ifdef _WIN64 4710 __ movl(rax, len_mem); 4711 #else 4712 __ pop(rax); // return length 4713 #endif 4714 __ leave(); // required for proper stackwalking of RuntimeStub frame 4715 __ ret(0); 4716 return start; 4717 } 4718 4719 // Polynomial x^128+x^127+x^126+x^121+1 4720 address ghash_polynomial_addr() { 4721 __ align(CodeEntryAlignment); 4722 StubCodeMark mark(this, "StubRoutines", "_ghash_poly_addr"); 4723 address start = __ pc(); 4724 __ emit_data64(0x0000000000000001, relocInfo::none); 4725 __ emit_data64(0xc200000000000000, relocInfo::none); 4726 return start; 4727 } 4728 4729 address ghash_shufflemask_addr() { 4730 __ align(CodeEntryAlignment); 4731 StubCodeMark mark(this, "StubRoutines", "_ghash_shuffmask_addr"); 4732 address start = __ pc(); 4733 __ emit_data64(0x0f0f0f0f0f0f0f0f, relocInfo::none); 4734 __ emit_data64(0x0f0f0f0f0f0f0f0f, relocInfo::none); 4735 return start; 4736 } 4737 4738 // Ghash single and multi block operations using AVX instructions 4739 address generate_avx_ghash_processBlocks() { 4740 __ align(CodeEntryAlignment); 4741 4742 StubCodeMark mark(this, "StubRoutines", "ghash_processBlocks"); 4743 address start = __ pc(); 4744 4745 // arguments 4746 const Register state = c_rarg0; 4747 const Register htbl = c_rarg1; 4748 const Register data = c_rarg2; 4749 const Register blocks = c_rarg3; 4750 __ enter(); 4751 // Save state before entering routine 4752 __ avx_ghash(state, htbl, data, blocks); 4753 __ leave(); // required for proper stackwalking of RuntimeStub frame 4754 __ ret(0); 4755 return start; 4756 } 4757 4758 // byte swap x86 long 4759 address generate_ghash_long_swap_mask() { 4760 __ align(CodeEntryAlignment); 4761 StubCodeMark mark(this, "StubRoutines", "ghash_long_swap_mask"); 4762 address start = __ pc(); 4763 __ emit_data64(0x0f0e0d0c0b0a0908, relocInfo::none ); 4764 __ emit_data64(0x0706050403020100, relocInfo::none ); 4765 return start; 4766 } 4767 4768 // byte swap x86 byte array 4769 address generate_ghash_byte_swap_mask() { 4770 __ align(CodeEntryAlignment); 4771 StubCodeMark mark(this, "StubRoutines", "ghash_byte_swap_mask"); 4772 address start = __ pc(); 4773 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none ); 4774 __ emit_data64(0x0001020304050607, relocInfo::none ); 4775 return start; 4776 } 4777 4778 /* Single and multi-block ghash operations */ 4779 address generate_ghash_processBlocks() { 4780 __ align(CodeEntryAlignment); 4781 Label L_ghash_loop, L_exit; 4782 StubCodeMark mark(this, "StubRoutines", "ghash_processBlocks"); 4783 address start = __ pc(); 4784 4785 const Register state = c_rarg0; 4786 const Register subkeyH = c_rarg1; 4787 const Register data = c_rarg2; 4788 const Register blocks = c_rarg3; 4789 4790 const XMMRegister xmm_temp0 = xmm0; 4791 const XMMRegister xmm_temp1 = xmm1; 4792 const XMMRegister xmm_temp2 = xmm2; 4793 const XMMRegister xmm_temp3 = xmm3; 4794 const XMMRegister xmm_temp4 = xmm4; 4795 const XMMRegister xmm_temp5 = xmm5; 4796 const XMMRegister xmm_temp6 = xmm6; 4797 const XMMRegister xmm_temp7 = xmm7; 4798 const XMMRegister xmm_temp8 = xmm8; 4799 const XMMRegister xmm_temp9 = xmm9; 4800 const XMMRegister xmm_temp10 = xmm10; 4801 4802 __ enter(); 4803 4804 __ movdqu(xmm_temp10, ExternalAddress(StubRoutines::x86::ghash_long_swap_mask_addr())); 4805 4806 __ movdqu(xmm_temp0, Address(state, 0)); 4807 __ pshufb(xmm_temp0, xmm_temp10); 4808 4809 4810 __ BIND(L_ghash_loop); 4811 __ movdqu(xmm_temp2, Address(data, 0)); 4812 __ pshufb(xmm_temp2, ExternalAddress(StubRoutines::x86::ghash_byte_swap_mask_addr())); 4813 4814 __ movdqu(xmm_temp1, Address(subkeyH, 0)); 4815 __ pshufb(xmm_temp1, xmm_temp10); 4816 4817 __ pxor(xmm_temp0, xmm_temp2); 4818 4819 // 4820 // Multiply with the hash key 4821 // 4822 __ movdqu(xmm_temp3, xmm_temp0); 4823 __ pclmulqdq(xmm_temp3, xmm_temp1, 0); // xmm3 holds a0*b0 4824 __ movdqu(xmm_temp4, xmm_temp0); 4825 __ pclmulqdq(xmm_temp4, xmm_temp1, 16); // xmm4 holds a0*b1 4826 4827 __ movdqu(xmm_temp5, xmm_temp0); 4828 __ pclmulqdq(xmm_temp5, xmm_temp1, 1); // xmm5 holds a1*b0 4829 __ movdqu(xmm_temp6, xmm_temp0); 4830 __ pclmulqdq(xmm_temp6, xmm_temp1, 17); // xmm6 holds a1*b1 4831 4832 __ pxor(xmm_temp4, xmm_temp5); // xmm4 holds a0*b1 + a1*b0 4833 4834 __ movdqu(xmm_temp5, xmm_temp4); // move the contents of xmm4 to xmm5 4835 __ psrldq(xmm_temp4, 8); // shift by xmm4 64 bits to the right 4836 __ pslldq(xmm_temp5, 8); // shift by xmm5 64 bits to the left 4837 __ pxor(xmm_temp3, xmm_temp5); 4838 __ pxor(xmm_temp6, xmm_temp4); // Register pair <xmm6:xmm3> holds the result 4839 // of the carry-less multiplication of 4840 // xmm0 by xmm1. 4841 4842 // We shift the result of the multiplication by one bit position 4843 // to the left to cope for the fact that the bits are reversed. 4844 __ movdqu(xmm_temp7, xmm_temp3); 4845 __ movdqu(xmm_temp8, xmm_temp6); 4846 __ pslld(xmm_temp3, 1); 4847 __ pslld(xmm_temp6, 1); 4848 __ psrld(xmm_temp7, 31); 4849 __ psrld(xmm_temp8, 31); 4850 __ movdqu(xmm_temp9, xmm_temp7); 4851 __ pslldq(xmm_temp8, 4); 4852 __ pslldq(xmm_temp7, 4); 4853 __ psrldq(xmm_temp9, 12); 4854 __ por(xmm_temp3, xmm_temp7); 4855 __ por(xmm_temp6, xmm_temp8); 4856 __ por(xmm_temp6, xmm_temp9); 4857 4858 // 4859 // First phase of the reduction 4860 // 4861 // Move xmm3 into xmm7, xmm8, xmm9 in order to perform the shifts 4862 // independently. 4863 __ movdqu(xmm_temp7, xmm_temp3); 4864 __ movdqu(xmm_temp8, xmm_temp3); 4865 __ movdqu(xmm_temp9, xmm_temp3); 4866 __ pslld(xmm_temp7, 31); // packed right shift shifting << 31 4867 __ pslld(xmm_temp8, 30); // packed right shift shifting << 30 4868 __ pslld(xmm_temp9, 25); // packed right shift shifting << 25 4869 __ pxor(xmm_temp7, xmm_temp8); // xor the shifted versions 4870 __ pxor(xmm_temp7, xmm_temp9); 4871 __ movdqu(xmm_temp8, xmm_temp7); 4872 __ pslldq(xmm_temp7, 12); 4873 __ psrldq(xmm_temp8, 4); 4874 __ pxor(xmm_temp3, xmm_temp7); // first phase of the reduction complete 4875 4876 // 4877 // Second phase of the reduction 4878 // 4879 // Make 3 copies of xmm3 in xmm2, xmm4, xmm5 for doing these 4880 // shift operations. 4881 __ movdqu(xmm_temp2, xmm_temp3); 4882 __ movdqu(xmm_temp4, xmm_temp3); 4883 __ movdqu(xmm_temp5, xmm_temp3); 4884 __ psrld(xmm_temp2, 1); // packed left shifting >> 1 4885 __ psrld(xmm_temp4, 2); // packed left shifting >> 2 4886 __ psrld(xmm_temp5, 7); // packed left shifting >> 7 4887 __ pxor(xmm_temp2, xmm_temp4); // xor the shifted versions 4888 __ pxor(xmm_temp2, xmm_temp5); 4889 __ pxor(xmm_temp2, xmm_temp8); 4890 __ pxor(xmm_temp3, xmm_temp2); 4891 __ pxor(xmm_temp6, xmm_temp3); // the result is in xmm6 4892 4893 __ decrement(blocks); 4894 __ jcc(Assembler::zero, L_exit); 4895 __ movdqu(xmm_temp0, xmm_temp6); 4896 __ addptr(data, 16); 4897 __ jmp(L_ghash_loop); 4898 4899 __ BIND(L_exit); 4900 __ pshufb(xmm_temp6, xmm_temp10); // Byte swap 16-byte result 4901 __ movdqu(Address(state, 0), xmm_temp6); // store the result 4902 __ leave(); 4903 __ ret(0); 4904 return start; 4905 } 4906 4907 //base64 character set 4908 address base64_charset_addr() { 4909 __ align(CodeEntryAlignment); 4910 StubCodeMark mark(this, "StubRoutines", "base64_charset"); 4911 address start = __ pc(); 4912 __ emit_data64(0x0000004200000041, relocInfo::none); 4913 __ emit_data64(0x0000004400000043, relocInfo::none); 4914 __ emit_data64(0x0000004600000045, relocInfo::none); 4915 __ emit_data64(0x0000004800000047, relocInfo::none); 4916 __ emit_data64(0x0000004a00000049, relocInfo::none); 4917 __ emit_data64(0x0000004c0000004b, relocInfo::none); 4918 __ emit_data64(0x0000004e0000004d, relocInfo::none); 4919 __ emit_data64(0x000000500000004f, relocInfo::none); 4920 __ emit_data64(0x0000005200000051, relocInfo::none); 4921 __ emit_data64(0x0000005400000053, relocInfo::none); 4922 __ emit_data64(0x0000005600000055, relocInfo::none); 4923 __ emit_data64(0x0000005800000057, relocInfo::none); 4924 __ emit_data64(0x0000005a00000059, relocInfo::none); 4925 __ emit_data64(0x0000006200000061, relocInfo::none); 4926 __ emit_data64(0x0000006400000063, relocInfo::none); 4927 __ emit_data64(0x0000006600000065, relocInfo::none); 4928 __ emit_data64(0x0000006800000067, relocInfo::none); 4929 __ emit_data64(0x0000006a00000069, relocInfo::none); 4930 __ emit_data64(0x0000006c0000006b, relocInfo::none); 4931 __ emit_data64(0x0000006e0000006d, relocInfo::none); 4932 __ emit_data64(0x000000700000006f, relocInfo::none); 4933 __ emit_data64(0x0000007200000071, relocInfo::none); 4934 __ emit_data64(0x0000007400000073, relocInfo::none); 4935 __ emit_data64(0x0000007600000075, relocInfo::none); 4936 __ emit_data64(0x0000007800000077, relocInfo::none); 4937 __ emit_data64(0x0000007a00000079, relocInfo::none); 4938 __ emit_data64(0x0000003100000030, relocInfo::none); 4939 __ emit_data64(0x0000003300000032, relocInfo::none); 4940 __ emit_data64(0x0000003500000034, relocInfo::none); 4941 __ emit_data64(0x0000003700000036, relocInfo::none); 4942 __ emit_data64(0x0000003900000038, relocInfo::none); 4943 __ emit_data64(0x0000002f0000002b, relocInfo::none); 4944 return start; 4945 } 4946 4947 //base64 url character set 4948 address base64url_charset_addr() { 4949 __ align(CodeEntryAlignment); 4950 StubCodeMark mark(this, "StubRoutines", "base64url_charset"); 4951 address start = __ pc(); 4952 __ emit_data64(0x0000004200000041, relocInfo::none); 4953 __ emit_data64(0x0000004400000043, relocInfo::none); 4954 __ emit_data64(0x0000004600000045, relocInfo::none); 4955 __ emit_data64(0x0000004800000047, relocInfo::none); 4956 __ emit_data64(0x0000004a00000049, relocInfo::none); 4957 __ emit_data64(0x0000004c0000004b, relocInfo::none); 4958 __ emit_data64(0x0000004e0000004d, relocInfo::none); 4959 __ emit_data64(0x000000500000004f, relocInfo::none); 4960 __ emit_data64(0x0000005200000051, relocInfo::none); 4961 __ emit_data64(0x0000005400000053, relocInfo::none); 4962 __ emit_data64(0x0000005600000055, relocInfo::none); 4963 __ emit_data64(0x0000005800000057, relocInfo::none); 4964 __ emit_data64(0x0000005a00000059, relocInfo::none); 4965 __ emit_data64(0x0000006200000061, relocInfo::none); 4966 __ emit_data64(0x0000006400000063, relocInfo::none); 4967 __ emit_data64(0x0000006600000065, relocInfo::none); 4968 __ emit_data64(0x0000006800000067, relocInfo::none); 4969 __ emit_data64(0x0000006a00000069, relocInfo::none); 4970 __ emit_data64(0x0000006c0000006b, relocInfo::none); 4971 __ emit_data64(0x0000006e0000006d, relocInfo::none); 4972 __ emit_data64(0x000000700000006f, relocInfo::none); 4973 __ emit_data64(0x0000007200000071, relocInfo::none); 4974 __ emit_data64(0x0000007400000073, relocInfo::none); 4975 __ emit_data64(0x0000007600000075, relocInfo::none); 4976 __ emit_data64(0x0000007800000077, relocInfo::none); 4977 __ emit_data64(0x0000007a00000079, relocInfo::none); 4978 __ emit_data64(0x0000003100000030, relocInfo::none); 4979 __ emit_data64(0x0000003300000032, relocInfo::none); 4980 __ emit_data64(0x0000003500000034, relocInfo::none); 4981 __ emit_data64(0x0000003700000036, relocInfo::none); 4982 __ emit_data64(0x0000003900000038, relocInfo::none); 4983 __ emit_data64(0x0000005f0000002d, relocInfo::none); 4984 4985 return start; 4986 } 4987 4988 address base64_bswap_mask_addr() { 4989 __ align(CodeEntryAlignment); 4990 StubCodeMark mark(this, "StubRoutines", "bswap_mask_base64"); 4991 address start = __ pc(); 4992 __ emit_data64(0x0504038002010080, relocInfo::none); 4993 __ emit_data64(0x0b0a098008070680, relocInfo::none); 4994 __ emit_data64(0x0908078006050480, relocInfo::none); 4995 __ emit_data64(0x0f0e0d800c0b0a80, relocInfo::none); 4996 __ emit_data64(0x0605048003020180, relocInfo::none); 4997 __ emit_data64(0x0c0b0a8009080780, relocInfo::none); 4998 __ emit_data64(0x0504038002010080, relocInfo::none); 4999 __ emit_data64(0x0b0a098008070680, relocInfo::none); 5000 5001 return start; 5002 } 5003 5004 address base64_right_shift_mask_addr() { 5005 __ align(CodeEntryAlignment); 5006 StubCodeMark mark(this, "StubRoutines", "right_shift_mask"); 5007 address start = __ pc(); 5008 __ emit_data64(0x0006000400020000, relocInfo::none); 5009 __ emit_data64(0x0006000400020000, relocInfo::none); 5010 __ emit_data64(0x0006000400020000, relocInfo::none); 5011 __ emit_data64(0x0006000400020000, relocInfo::none); 5012 __ emit_data64(0x0006000400020000, relocInfo::none); 5013 __ emit_data64(0x0006000400020000, relocInfo::none); 5014 __ emit_data64(0x0006000400020000, relocInfo::none); 5015 __ emit_data64(0x0006000400020000, relocInfo::none); 5016 5017 return start; 5018 } 5019 5020 address base64_left_shift_mask_addr() { 5021 __ align(CodeEntryAlignment); 5022 StubCodeMark mark(this, "StubRoutines", "left_shift_mask"); 5023 address start = __ pc(); 5024 __ emit_data64(0x0000000200040000, relocInfo::none); 5025 __ emit_data64(0x0000000200040000, relocInfo::none); 5026 __ emit_data64(0x0000000200040000, relocInfo::none); 5027 __ emit_data64(0x0000000200040000, relocInfo::none); 5028 __ emit_data64(0x0000000200040000, relocInfo::none); 5029 __ emit_data64(0x0000000200040000, relocInfo::none); 5030 __ emit_data64(0x0000000200040000, relocInfo::none); 5031 __ emit_data64(0x0000000200040000, relocInfo::none); 5032 5033 return start; 5034 } 5035 5036 address base64_and_mask_addr() { 5037 __ align(CodeEntryAlignment); 5038 StubCodeMark mark(this, "StubRoutines", "and_mask"); 5039 address start = __ pc(); 5040 __ emit_data64(0x3f003f003f000000, relocInfo::none); 5041 __ emit_data64(0x3f003f003f000000, relocInfo::none); 5042 __ emit_data64(0x3f003f003f000000, relocInfo::none); 5043 __ emit_data64(0x3f003f003f000000, relocInfo::none); 5044 __ emit_data64(0x3f003f003f000000, relocInfo::none); 5045 __ emit_data64(0x3f003f003f000000, relocInfo::none); 5046 __ emit_data64(0x3f003f003f000000, relocInfo::none); 5047 __ emit_data64(0x3f003f003f000000, relocInfo::none); 5048 return start; 5049 } 5050 5051 address base64_gather_mask_addr() { 5052 __ align(CodeEntryAlignment); 5053 StubCodeMark mark(this, "StubRoutines", "gather_mask"); 5054 address start = __ pc(); 5055 __ emit_data64(0xffffffffffffffff, relocInfo::none); 5056 return start; 5057 } 5058 5059 // Code for generating Base64 encoding. 5060 // Intrinsic function prototype in Base64.java: 5061 // private void encodeBlock(byte[] src, int sp, int sl, byte[] dst, int dp, boolean isURL) { 5062 address generate_base64_encodeBlock() { 5063 __ align(CodeEntryAlignment); 5064 StubCodeMark mark(this, "StubRoutines", "implEncode"); 5065 address start = __ pc(); 5066 __ enter(); 5067 5068 // Save callee-saved registers before using them 5069 __ push(r12); 5070 __ push(r13); 5071 __ push(r14); 5072 __ push(r15); 5073 5074 // arguments 5075 const Register source = c_rarg0; // Source Array 5076 const Register start_offset = c_rarg1; // start offset 5077 const Register end_offset = c_rarg2; // end offset 5078 const Register dest = c_rarg3; // destination array 5079 5080 #ifndef _WIN64 5081 const Register dp = c_rarg4; // Position for writing to dest array 5082 const Register isURL = c_rarg5;// Base64 or URL character set 5083 #else 5084 const Address dp_mem(rbp, 6 * wordSize); // length is on stack on Win64 5085 const Address isURL_mem(rbp, 7 * wordSize); 5086 const Register isURL = r10; // pick the volatile windows register 5087 const Register dp = r12; 5088 __ movl(dp, dp_mem); 5089 __ movl(isURL, isURL_mem); 5090 #endif 5091 5092 const Register length = r14; 5093 Label L_process80, L_process32, L_process3, L_exit, L_processdata; 5094 5095 // calculate length from offsets 5096 __ movl(length, end_offset); 5097 __ subl(length, start_offset); 5098 __ cmpl(length, 0); 5099 __ jcc(Assembler::lessEqual, L_exit); 5100 5101 __ lea(r11, ExternalAddress(StubRoutines::x86::base64_charset_addr())); 5102 // check if base64 charset(isURL=0) or base64 url charset(isURL=1) needs to be loaded 5103 __ cmpl(isURL, 0); 5104 __ jcc(Assembler::equal, L_processdata); 5105 __ lea(r11, ExternalAddress(StubRoutines::x86::base64url_charset_addr())); 5106 5107 // load masks required for encoding data 5108 __ BIND(L_processdata); 5109 __ movdqu(xmm16, ExternalAddress(StubRoutines::x86::base64_gather_mask_addr())); 5110 // Set 64 bits of K register. 5111 __ evpcmpeqb(k3, xmm16, xmm16, Assembler::AVX_512bit); 5112 __ evmovdquq(xmm12, ExternalAddress(StubRoutines::x86::base64_bswap_mask_addr()), Assembler::AVX_256bit, r13); 5113 __ evmovdquq(xmm13, ExternalAddress(StubRoutines::x86::base64_right_shift_mask_addr()), Assembler::AVX_512bit, r13); 5114 __ evmovdquq(xmm14, ExternalAddress(StubRoutines::x86::base64_left_shift_mask_addr()), Assembler::AVX_512bit, r13); 5115 __ evmovdquq(xmm15, ExternalAddress(StubRoutines::x86::base64_and_mask_addr()), Assembler::AVX_512bit, r13); 5116 5117 // Vector Base64 implementation, producing 96 bytes of encoded data 5118 __ BIND(L_process80); 5119 __ cmpl(length, 80); 5120 __ jcc(Assembler::below, L_process32); 5121 __ evmovdquq(xmm0, Address(source, start_offset, Address::times_1, 0), Assembler::AVX_256bit); 5122 __ evmovdquq(xmm1, Address(source, start_offset, Address::times_1, 24), Assembler::AVX_256bit); 5123 __ evmovdquq(xmm2, Address(source, start_offset, Address::times_1, 48), Assembler::AVX_256bit); 5124 5125 //permute the input data in such a manner that we have continuity of the source 5126 __ vpermq(xmm3, xmm0, 148, Assembler::AVX_256bit); 5127 __ vpermq(xmm4, xmm1, 148, Assembler::AVX_256bit); 5128 __ vpermq(xmm5, xmm2, 148, Assembler::AVX_256bit); 5129 5130 //shuffle input and group 3 bytes of data and to it add 0 as the 4th byte. 5131 //we can deal with 12 bytes at a time in a 128 bit register 5132 __ vpshufb(xmm3, xmm3, xmm12, Assembler::AVX_256bit); 5133 __ vpshufb(xmm4, xmm4, xmm12, Assembler::AVX_256bit); 5134 __ vpshufb(xmm5, xmm5, xmm12, Assembler::AVX_256bit); 5135 5136 //convert byte to word. Each 128 bit register will have 6 bytes for processing 5137 __ vpmovzxbw(xmm3, xmm3, Assembler::AVX_512bit); 5138 __ vpmovzxbw(xmm4, xmm4, Assembler::AVX_512bit); 5139 __ vpmovzxbw(xmm5, xmm5, Assembler::AVX_512bit); 5140 5141 // Extract bits in the following pattern 6, 4+2, 2+4, 6 to convert 3, 8 bit numbers to 4, 6 bit numbers 5142 __ evpsrlvw(xmm0, xmm3, xmm13, Assembler::AVX_512bit); 5143 __ evpsrlvw(xmm1, xmm4, xmm13, Assembler::AVX_512bit); 5144 __ evpsrlvw(xmm2, xmm5, xmm13, Assembler::AVX_512bit); 5145 5146 __ evpsllvw(xmm3, xmm3, xmm14, Assembler::AVX_512bit); 5147 __ evpsllvw(xmm4, xmm4, xmm14, Assembler::AVX_512bit); 5148 __ evpsllvw(xmm5, xmm5, xmm14, Assembler::AVX_512bit); 5149 5150 __ vpsrlq(xmm0, xmm0, 8, Assembler::AVX_512bit); 5151 __ vpsrlq(xmm1, xmm1, 8, Assembler::AVX_512bit); 5152 __ vpsrlq(xmm2, xmm2, 8, Assembler::AVX_512bit); 5153 5154 __ vpsllq(xmm3, xmm3, 8, Assembler::AVX_512bit); 5155 __ vpsllq(xmm4, xmm4, 8, Assembler::AVX_512bit); 5156 __ vpsllq(xmm5, xmm5, 8, Assembler::AVX_512bit); 5157 5158 __ vpandq(xmm3, xmm3, xmm15, Assembler::AVX_512bit); 5159 __ vpandq(xmm4, xmm4, xmm15, Assembler::AVX_512bit); 5160 __ vpandq(xmm5, xmm5, xmm15, Assembler::AVX_512bit); 5161 5162 // Get the final 4*6 bits base64 encoding 5163 __ vporq(xmm3, xmm3, xmm0, Assembler::AVX_512bit); 5164 __ vporq(xmm4, xmm4, xmm1, Assembler::AVX_512bit); 5165 __ vporq(xmm5, xmm5, xmm2, Assembler::AVX_512bit); 5166 5167 // Shift 5168 __ vpsrlq(xmm3, xmm3, 8, Assembler::AVX_512bit); 5169 __ vpsrlq(xmm4, xmm4, 8, Assembler::AVX_512bit); 5170 __ vpsrlq(xmm5, xmm5, 8, Assembler::AVX_512bit); 5171 5172 // look up 6 bits in the base64 character set to fetch the encoding 5173 // we are converting word to dword as gather instructions need dword indices for looking up encoding 5174 __ vextracti64x4(xmm6, xmm3, 0); 5175 __ vpmovzxwd(xmm0, xmm6, Assembler::AVX_512bit); 5176 __ vextracti64x4(xmm6, xmm3, 1); 5177 __ vpmovzxwd(xmm1, xmm6, Assembler::AVX_512bit); 5178 5179 __ vextracti64x4(xmm6, xmm4, 0); 5180 __ vpmovzxwd(xmm2, xmm6, Assembler::AVX_512bit); 5181 __ vextracti64x4(xmm6, xmm4, 1); 5182 __ vpmovzxwd(xmm3, xmm6, Assembler::AVX_512bit); 5183 5184 __ vextracti64x4(xmm4, xmm5, 0); 5185 __ vpmovzxwd(xmm6, xmm4, Assembler::AVX_512bit); 5186 5187 __ vextracti64x4(xmm4, xmm5, 1); 5188 __ vpmovzxwd(xmm7, xmm4, Assembler::AVX_512bit); 5189 5190 __ kmovql(k2, k3); 5191 __ evpgatherdd(xmm4, k2, Address(r11, xmm0, Address::times_4, 0), Assembler::AVX_512bit); 5192 __ kmovql(k2, k3); 5193 __ evpgatherdd(xmm5, k2, Address(r11, xmm1, Address::times_4, 0), Assembler::AVX_512bit); 5194 __ kmovql(k2, k3); 5195 __ evpgatherdd(xmm8, k2, Address(r11, xmm2, Address::times_4, 0), Assembler::AVX_512bit); 5196 __ kmovql(k2, k3); 5197 __ evpgatherdd(xmm9, k2, Address(r11, xmm3, Address::times_4, 0), Assembler::AVX_512bit); 5198 __ kmovql(k2, k3); 5199 __ evpgatherdd(xmm10, k2, Address(r11, xmm6, Address::times_4, 0), Assembler::AVX_512bit); 5200 __ kmovql(k2, k3); 5201 __ evpgatherdd(xmm11, k2, Address(r11, xmm7, Address::times_4, 0), Assembler::AVX_512bit); 5202 5203 //Down convert dword to byte. Final output is 16*6 = 96 bytes long 5204 __ evpmovdb(Address(dest, dp, Address::times_1, 0), xmm4, Assembler::AVX_512bit); 5205 __ evpmovdb(Address(dest, dp, Address::times_1, 16), xmm5, Assembler::AVX_512bit); 5206 __ evpmovdb(Address(dest, dp, Address::times_1, 32), xmm8, Assembler::AVX_512bit); 5207 __ evpmovdb(Address(dest, dp, Address::times_1, 48), xmm9, Assembler::AVX_512bit); 5208 __ evpmovdb(Address(dest, dp, Address::times_1, 64), xmm10, Assembler::AVX_512bit); 5209 __ evpmovdb(Address(dest, dp, Address::times_1, 80), xmm11, Assembler::AVX_512bit); 5210 5211 __ addq(dest, 96); 5212 __ addq(source, 72); 5213 __ subq(length, 72); 5214 __ jmp(L_process80); 5215 5216 // Vector Base64 implementation generating 32 bytes of encoded data 5217 __ BIND(L_process32); 5218 __ cmpl(length, 32); 5219 __ jcc(Assembler::below, L_process3); 5220 __ evmovdquq(xmm0, Address(source, start_offset), Assembler::AVX_256bit); 5221 __ vpermq(xmm0, xmm0, 148, Assembler::AVX_256bit); 5222 __ vpshufb(xmm6, xmm0, xmm12, Assembler::AVX_256bit); 5223 __ vpmovzxbw(xmm6, xmm6, Assembler::AVX_512bit); 5224 __ evpsrlvw(xmm2, xmm6, xmm13, Assembler::AVX_512bit); 5225 __ evpsllvw(xmm3, xmm6, xmm14, Assembler::AVX_512bit); 5226 5227 __ vpsrlq(xmm2, xmm2, 8, Assembler::AVX_512bit); 5228 __ vpsllq(xmm3, xmm3, 8, Assembler::AVX_512bit); 5229 __ vpandq(xmm3, xmm3, xmm15, Assembler::AVX_512bit); 5230 __ vporq(xmm1, xmm2, xmm3, Assembler::AVX_512bit); 5231 __ vpsrlq(xmm1, xmm1, 8, Assembler::AVX_512bit); 5232 __ vextracti64x4(xmm9, xmm1, 0); 5233 __ vpmovzxwd(xmm6, xmm9, Assembler::AVX_512bit); 5234 __ vextracti64x4(xmm9, xmm1, 1); 5235 __ vpmovzxwd(xmm5, xmm9, Assembler::AVX_512bit); 5236 __ kmovql(k2, k3); 5237 __ evpgatherdd(xmm8, k2, Address(r11, xmm6, Address::times_4, 0), Assembler::AVX_512bit); 5238 __ kmovql(k2, k3); 5239 __ evpgatherdd(xmm10, k2, Address(r11, xmm5, Address::times_4, 0), Assembler::AVX_512bit); 5240 __ evpmovdb(Address(dest, dp, Address::times_1, 0), xmm8, Assembler::AVX_512bit); 5241 __ evpmovdb(Address(dest, dp, Address::times_1, 16), xmm10, Assembler::AVX_512bit); 5242 __ subq(length, 24); 5243 __ addq(dest, 32); 5244 __ addq(source, 24); 5245 __ jmp(L_process32); 5246 5247 // Scalar data processing takes 3 bytes at a time and produces 4 bytes of encoded data 5248 /* This code corresponds to the scalar version of the following snippet in Base64.java 5249 ** int bits = (src[sp0++] & 0xff) << 16 |(src[sp0++] & 0xff) << 8 |(src[sp0++] & 0xff); 5250 ** dst[dp0++] = (byte)base64[(bits >> > 18) & 0x3f]; 5251 ** dst[dp0++] = (byte)base64[(bits >> > 12) & 0x3f]; 5252 ** dst[dp0++] = (byte)base64[(bits >> > 6) & 0x3f]; 5253 ** dst[dp0++] = (byte)base64[bits & 0x3f];*/ 5254 __ BIND(L_process3); 5255 __ cmpl(length, 3); 5256 __ jcc(Assembler::below, L_exit); 5257 // Read 1 byte at a time 5258 __ movzbl(rax, Address(source, start_offset)); 5259 __ shll(rax, 0x10); 5260 __ movl(r15, rax); 5261 __ movzbl(rax, Address(source, start_offset, Address::times_1, 1)); 5262 __ shll(rax, 0x8); 5263 __ movzwl(rax, rax); 5264 __ orl(r15, rax); 5265 __ movzbl(rax, Address(source, start_offset, Address::times_1, 2)); 5266 __ orl(rax, r15); 5267 // Save 3 bytes read in r15 5268 __ movl(r15, rax); 5269 __ shrl(rax, 0x12); 5270 __ andl(rax, 0x3f); 5271 // rax contains the index, r11 contains base64 lookup table 5272 __ movb(rax, Address(r11, rax, Address::times_4)); 5273 // Write the encoded byte to destination 5274 __ movb(Address(dest, dp, Address::times_1, 0), rax); 5275 __ movl(rax, r15); 5276 __ shrl(rax, 0xc); 5277 __ andl(rax, 0x3f); 5278 __ movb(rax, Address(r11, rax, Address::times_4)); 5279 __ movb(Address(dest, dp, Address::times_1, 1), rax); 5280 __ movl(rax, r15); 5281 __ shrl(rax, 0x6); 5282 __ andl(rax, 0x3f); 5283 __ movb(rax, Address(r11, rax, Address::times_4)); 5284 __ movb(Address(dest, dp, Address::times_1, 2), rax); 5285 __ movl(rax, r15); 5286 __ andl(rax, 0x3f); 5287 __ movb(rax, Address(r11, rax, Address::times_4)); 5288 __ movb(Address(dest, dp, Address::times_1, 3), rax); 5289 __ subl(length, 3); 5290 __ addq(dest, 4); 5291 __ addq(source, 3); 5292 __ jmp(L_process3); 5293 __ BIND(L_exit); 5294 __ pop(r15); 5295 __ pop(r14); 5296 __ pop(r13); 5297 __ pop(r12); 5298 __ leave(); 5299 __ ret(0); 5300 return start; 5301 } 5302 5303 /** 5304 * Arguments: 5305 * 5306 * Inputs: 5307 * c_rarg0 - int crc 5308 * c_rarg1 - byte* buf 5309 * c_rarg2 - int length 5310 * 5311 * Ouput: 5312 * rax - int crc result 5313 */ 5314 address generate_updateBytesCRC32() { 5315 assert(UseCRC32Intrinsics, "need AVX and CLMUL instructions"); 5316 5317 __ align(CodeEntryAlignment); 5318 StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32"); 5319 5320 address start = __ pc(); 5321 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 5322 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 5323 // rscratch1: r10 5324 const Register crc = c_rarg0; // crc 5325 const Register buf = c_rarg1; // source java byte array address 5326 const Register len = c_rarg2; // length 5327 const Register table = c_rarg3; // crc_table address (reuse register) 5328 const Register tmp1 = r11; 5329 const Register tmp2 = r10; 5330 assert_different_registers(crc, buf, len, table, tmp1, tmp2, rax); 5331 5332 BLOCK_COMMENT("Entry:"); 5333 __ enter(); // required for proper stackwalking of RuntimeStub frame 5334 5335 if (VM_Version::supports_sse4_1() && VM_Version::supports_avx512_vpclmulqdq() && 5336 VM_Version::supports_avx512bw() && 5337 VM_Version::supports_avx512vl()) { 5338 __ kernel_crc32_avx512(crc, buf, len, table, tmp1, tmp2); 5339 } else { 5340 __ kernel_crc32(crc, buf, len, table, tmp1); 5341 } 5342 5343 __ movl(rax, crc); 5344 __ vzeroupper(); 5345 __ leave(); // required for proper stackwalking of RuntimeStub frame 5346 __ ret(0); 5347 5348 return start; 5349 } 5350 5351 /** 5352 * Arguments: 5353 * 5354 * Inputs: 5355 * c_rarg0 - int crc 5356 * c_rarg1 - byte* buf 5357 * c_rarg2 - long length 5358 * c_rarg3 - table_start - optional (present only when doing a library_call, 5359 * not used by x86 algorithm) 5360 * 5361 * Ouput: 5362 * rax - int crc result 5363 */ 5364 address generate_updateBytesCRC32C(bool is_pclmulqdq_supported) { 5365 assert(UseCRC32CIntrinsics, "need SSE4_2"); 5366 __ align(CodeEntryAlignment); 5367 StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32C"); 5368 address start = __ pc(); 5369 //reg.arg int#0 int#1 int#2 int#3 int#4 int#5 float regs 5370 //Windows RCX RDX R8 R9 none none XMM0..XMM3 5371 //Lin / Sol RDI RSI RDX RCX R8 R9 XMM0..XMM7 5372 const Register crc = c_rarg0; // crc 5373 const Register buf = c_rarg1; // source java byte array address 5374 const Register len = c_rarg2; // length 5375 const Register a = rax; 5376 const Register j = r9; 5377 const Register k = r10; 5378 const Register l = r11; 5379 #ifdef _WIN64 5380 const Register y = rdi; 5381 const Register z = rsi; 5382 #else 5383 const Register y = rcx; 5384 const Register z = r8; 5385 #endif 5386 assert_different_registers(crc, buf, len, a, j, k, l, y, z); 5387 5388 BLOCK_COMMENT("Entry:"); 5389 __ enter(); // required for proper stackwalking of RuntimeStub frame 5390 #ifdef _WIN64 5391 __ push(y); 5392 __ push(z); 5393 #endif 5394 __ crc32c_ipl_alg2_alt2(crc, buf, len, 5395 a, j, k, 5396 l, y, z, 5397 c_farg0, c_farg1, c_farg2, 5398 is_pclmulqdq_supported); 5399 __ movl(rax, crc); 5400 #ifdef _WIN64 5401 __ pop(z); 5402 __ pop(y); 5403 #endif 5404 __ vzeroupper(); 5405 __ leave(); // required for proper stackwalking of RuntimeStub frame 5406 __ ret(0); 5407 5408 return start; 5409 } 5410 5411 /** 5412 * Arguments: 5413 * 5414 * Input: 5415 * c_rarg0 - x address 5416 * c_rarg1 - x length 5417 * c_rarg2 - y address 5418 * c_rarg3 - y length 5419 * not Win64 5420 * c_rarg4 - z address 5421 * c_rarg5 - z length 5422 * Win64 5423 * rsp+40 - z address 5424 * rsp+48 - z length 5425 */ 5426 address generate_multiplyToLen() { 5427 __ align(CodeEntryAlignment); 5428 StubCodeMark mark(this, "StubRoutines", "multiplyToLen"); 5429 5430 address start = __ pc(); 5431 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 5432 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 5433 const Register x = rdi; 5434 const Register xlen = rax; 5435 const Register y = rsi; 5436 const Register ylen = rcx; 5437 const Register z = r8; 5438 const Register zlen = r11; 5439 5440 // Next registers will be saved on stack in multiply_to_len(). 5441 const Register tmp1 = r12; 5442 const Register tmp2 = r13; 5443 const Register tmp3 = r14; 5444 const Register tmp4 = r15; 5445 const Register tmp5 = rbx; 5446 5447 BLOCK_COMMENT("Entry:"); 5448 __ enter(); // required for proper stackwalking of RuntimeStub frame 5449 5450 #ifndef _WIN64 5451 __ movptr(zlen, r9); // Save r9 in r11 - zlen 5452 #endif 5453 setup_arg_regs(4); // x => rdi, xlen => rsi, y => rdx 5454 // ylen => rcx, z => r8, zlen => r11 5455 // r9 and r10 may be used to save non-volatile registers 5456 #ifdef _WIN64 5457 // last 2 arguments (#4, #5) are on stack on Win64 5458 __ movptr(z, Address(rsp, 6 * wordSize)); 5459 __ movptr(zlen, Address(rsp, 7 * wordSize)); 5460 #endif 5461 5462 __ movptr(xlen, rsi); 5463 __ movptr(y, rdx); 5464 __ multiply_to_len(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5); 5465 5466 restore_arg_regs(); 5467 5468 __ leave(); // required for proper stackwalking of RuntimeStub frame 5469 __ ret(0); 5470 5471 return start; 5472 } 5473 5474 /** 5475 * Arguments: 5476 * 5477 * Input: 5478 * c_rarg0 - obja address 5479 * c_rarg1 - objb address 5480 * c_rarg3 - length length 5481 * c_rarg4 - scale log2_array_indxscale 5482 * 5483 * Output: 5484 * rax - int >= mismatched index, < 0 bitwise complement of tail 5485 */ 5486 address generate_vectorizedMismatch() { 5487 __ align(CodeEntryAlignment); 5488 StubCodeMark mark(this, "StubRoutines", "vectorizedMismatch"); 5489 address start = __ pc(); 5490 5491 BLOCK_COMMENT("Entry:"); 5492 __ enter(); 5493 5494 #ifdef _WIN64 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 5495 const Register scale = c_rarg0; //rcx, will exchange with r9 5496 const Register objb = c_rarg1; //rdx 5497 const Register length = c_rarg2; //r8 5498 const Register obja = c_rarg3; //r9 5499 __ xchgq(obja, scale); //now obja and scale contains the correct contents 5500 5501 const Register tmp1 = r10; 5502 const Register tmp2 = r11; 5503 #endif 5504 #ifndef _WIN64 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 5505 const Register obja = c_rarg0; //U:rdi 5506 const Register objb = c_rarg1; //U:rsi 5507 const Register length = c_rarg2; //U:rdx 5508 const Register scale = c_rarg3; //U:rcx 5509 const Register tmp1 = r8; 5510 const Register tmp2 = r9; 5511 #endif 5512 const Register result = rax; //return value 5513 const XMMRegister vec0 = xmm0; 5514 const XMMRegister vec1 = xmm1; 5515 const XMMRegister vec2 = xmm2; 5516 5517 __ vectorized_mismatch(obja, objb, length, scale, result, tmp1, tmp2, vec0, vec1, vec2); 5518 5519 __ vzeroupper(); 5520 __ leave(); 5521 __ ret(0); 5522 5523 return start; 5524 } 5525 5526 /** 5527 * Arguments: 5528 * 5529 // Input: 5530 // c_rarg0 - x address 5531 // c_rarg1 - x length 5532 // c_rarg2 - z address 5533 // c_rarg3 - z lenth 5534 * 5535 */ 5536 address generate_squareToLen() { 5537 5538 __ align(CodeEntryAlignment); 5539 StubCodeMark mark(this, "StubRoutines", "squareToLen"); 5540 5541 address start = __ pc(); 5542 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 5543 // Unix: rdi, rsi, rdx, rcx (c_rarg0, c_rarg1, ...) 5544 const Register x = rdi; 5545 const Register len = rsi; 5546 const Register z = r8; 5547 const Register zlen = rcx; 5548 5549 const Register tmp1 = r12; 5550 const Register tmp2 = r13; 5551 const Register tmp3 = r14; 5552 const Register tmp4 = r15; 5553 const Register tmp5 = rbx; 5554 5555 BLOCK_COMMENT("Entry:"); 5556 __ enter(); // required for proper stackwalking of RuntimeStub frame 5557 5558 setup_arg_regs(4); // x => rdi, len => rsi, z => rdx 5559 // zlen => rcx 5560 // r9 and r10 may be used to save non-volatile registers 5561 __ movptr(r8, rdx); 5562 __ square_to_len(x, len, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5, rdx, rax); 5563 5564 restore_arg_regs(); 5565 5566 __ leave(); // required for proper stackwalking of RuntimeStub frame 5567 __ ret(0); 5568 5569 return start; 5570 } 5571 5572 address generate_method_entry_barrier() { 5573 __ align(CodeEntryAlignment); 5574 StubCodeMark mark(this, "StubRoutines", "nmethod_entry_barrier"); 5575 5576 Label deoptimize_label; 5577 5578 address start = __ pc(); 5579 5580 __ push(-1); // cookie, this is used for writing the new rsp when deoptimizing 5581 5582 BLOCK_COMMENT("Entry:"); 5583 __ enter(); // save rbp 5584 5585 // save c_rarg0, because we want to use that value. 5586 // We could do without it but then we depend on the number of slots used by pusha 5587 __ push(c_rarg0); 5588 5589 __ lea(c_rarg0, Address(rsp, wordSize * 3)); // 1 for cookie, 1 for rbp, 1 for c_rarg0 - this should be the return address 5590 5591 __ pusha(); 5592 5593 // The method may have floats as arguments, and we must spill them before calling 5594 // the VM runtime. 5595 assert(Argument::n_float_register_parameters_j == 8, "Assumption"); 5596 const int xmm_size = wordSize * 2; 5597 const int xmm_spill_size = xmm_size * Argument::n_float_register_parameters_j; 5598 __ subptr(rsp, xmm_spill_size); 5599 __ movdqu(Address(rsp, xmm_size * 7), xmm7); 5600 __ movdqu(Address(rsp, xmm_size * 6), xmm6); 5601 __ movdqu(Address(rsp, xmm_size * 5), xmm5); 5602 __ movdqu(Address(rsp, xmm_size * 4), xmm4); 5603 __ movdqu(Address(rsp, xmm_size * 3), xmm3); 5604 __ movdqu(Address(rsp, xmm_size * 2), xmm2); 5605 __ movdqu(Address(rsp, xmm_size * 1), xmm1); 5606 __ movdqu(Address(rsp, xmm_size * 0), xmm0); 5607 5608 __ call_VM_leaf(CAST_FROM_FN_PTR(address, static_cast<int (*)(address*)>(BarrierSetNMethod::nmethod_stub_entry_barrier)), 1); 5609 5610 __ movdqu(xmm0, Address(rsp, xmm_size * 0)); 5611 __ movdqu(xmm1, Address(rsp, xmm_size * 1)); 5612 __ movdqu(xmm2, Address(rsp, xmm_size * 2)); 5613 __ movdqu(xmm3, Address(rsp, xmm_size * 3)); 5614 __ movdqu(xmm4, Address(rsp, xmm_size * 4)); 5615 __ movdqu(xmm5, Address(rsp, xmm_size * 5)); 5616 __ movdqu(xmm6, Address(rsp, xmm_size * 6)); 5617 __ movdqu(xmm7, Address(rsp, xmm_size * 7)); 5618 __ addptr(rsp, xmm_spill_size); 5619 5620 __ cmpl(rax, 1); // 1 means deoptimize 5621 __ jcc(Assembler::equal, deoptimize_label); 5622 5623 __ popa(); 5624 __ pop(c_rarg0); 5625 5626 __ leave(); 5627 5628 __ addptr(rsp, 1 * wordSize); // cookie 5629 __ ret(0); 5630 5631 5632 __ BIND(deoptimize_label); 5633 5634 __ popa(); 5635 __ pop(c_rarg0); 5636 5637 __ leave(); 5638 5639 // this can be taken out, but is good for verification purposes. getting a SIGSEGV 5640 // here while still having a correct stack is valuable 5641 __ testptr(rsp, Address(rsp, 0)); 5642 5643 __ movptr(rsp, Address(rsp, 0)); // new rsp was written in the barrier 5644 __ jmp(Address(rsp, -1 * wordSize)); // jmp target should be callers verified_entry_point 5645 5646 return start; 5647 } 5648 5649 /** 5650 * Arguments: 5651 * 5652 * Input: 5653 * c_rarg0 - out address 5654 * c_rarg1 - in address 5655 * c_rarg2 - offset 5656 * c_rarg3 - len 5657 * not Win64 5658 * c_rarg4 - k 5659 * Win64 5660 * rsp+40 - k 5661 */ 5662 address generate_mulAdd() { 5663 __ align(CodeEntryAlignment); 5664 StubCodeMark mark(this, "StubRoutines", "mulAdd"); 5665 5666 address start = __ pc(); 5667 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 5668 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 5669 const Register out = rdi; 5670 const Register in = rsi; 5671 const Register offset = r11; 5672 const Register len = rcx; 5673 const Register k = r8; 5674 5675 // Next registers will be saved on stack in mul_add(). 5676 const Register tmp1 = r12; 5677 const Register tmp2 = r13; 5678 const Register tmp3 = r14; 5679 const Register tmp4 = r15; 5680 const Register tmp5 = rbx; 5681 5682 BLOCK_COMMENT("Entry:"); 5683 __ enter(); // required for proper stackwalking of RuntimeStub frame 5684 5685 setup_arg_regs(4); // out => rdi, in => rsi, offset => rdx 5686 // len => rcx, k => r8 5687 // r9 and r10 may be used to save non-volatile registers 5688 #ifdef _WIN64 5689 // last argument is on stack on Win64 5690 __ movl(k, Address(rsp, 6 * wordSize)); 5691 #endif 5692 __ movptr(r11, rdx); // move offset in rdx to offset(r11) 5693 __ mul_add(out, in, offset, len, k, tmp1, tmp2, tmp3, tmp4, tmp5, rdx, rax); 5694 5695 restore_arg_regs(); 5696 5697 __ leave(); // required for proper stackwalking of RuntimeStub frame 5698 __ ret(0); 5699 5700 return start; 5701 } 5702 5703 address generate_bigIntegerRightShift() { 5704 __ align(CodeEntryAlignment); 5705 StubCodeMark mark(this, "StubRoutines", "bigIntegerRightShiftWorker"); 5706 5707 address start = __ pc(); 5708 Label Shift512Loop, ShiftTwo, ShiftTwoLoop, ShiftOne, Exit; 5709 // For Unix, the arguments are as follows: rdi, rsi, rdx, rcx, r8. 5710 const Register newArr = rdi; 5711 const Register oldArr = rsi; 5712 const Register newIdx = rdx; 5713 const Register shiftCount = rcx; // It was intentional to have shiftCount in rcx since it is used implicitly for shift. 5714 const Register totalNumIter = r8; 5715 5716 // For windows, we use r9 and r10 as temps to save rdi and rsi. Thus we cannot allocate them for our temps. 5717 // For everything else, we prefer using r9 and r10 since we do not have to save them before use. 5718 const Register tmp1 = r11; // Caller save. 5719 const Register tmp2 = rax; // Caller save. 5720 const Register tmp3 = WINDOWS_ONLY(r12) NOT_WINDOWS(r9); // Windows: Callee save. Linux: Caller save. 5721 const Register tmp4 = WINDOWS_ONLY(r13) NOT_WINDOWS(r10); // Windows: Callee save. Linux: Caller save. 5722 const Register tmp5 = r14; // Callee save. 5723 const Register tmp6 = r15; 5724 5725 const XMMRegister x0 = xmm0; 5726 const XMMRegister x1 = xmm1; 5727 const XMMRegister x2 = xmm2; 5728 5729 BLOCK_COMMENT("Entry:"); 5730 __ enter(); // required for proper stackwalking of RuntimeStub frame 5731 5732 #ifdef _WINDOWS 5733 setup_arg_regs(4); 5734 // For windows, since last argument is on stack, we need to move it to the appropriate register. 5735 __ movl(totalNumIter, Address(rsp, 6 * wordSize)); 5736 // Save callee save registers. 5737 __ push(tmp3); 5738 __ push(tmp4); 5739 #endif 5740 __ push(tmp5); 5741 5742 // Rename temps used throughout the code. 5743 const Register idx = tmp1; 5744 const Register nIdx = tmp2; 5745 5746 __ xorl(idx, idx); 5747 5748 // Start right shift from end of the array. 5749 // For example, if #iteration = 4 and newIdx = 1 5750 // then dest[4] = src[4] >> shiftCount | src[3] <<< (shiftCount - 32) 5751 // if #iteration = 4 and newIdx = 0 5752 // then dest[3] = src[4] >> shiftCount | src[3] <<< (shiftCount - 32) 5753 __ movl(idx, totalNumIter); 5754 __ movl(nIdx, idx); 5755 __ addl(nIdx, newIdx); 5756 5757 // If vectorization is enabled, check if the number of iterations is at least 64 5758 // If not, then go to ShifTwo processing 2 iterations 5759 if (VM_Version::supports_avx512_vbmi2()) { 5760 __ cmpptr(totalNumIter, (AVX3Threshold/64)); 5761 __ jcc(Assembler::less, ShiftTwo); 5762 5763 if (AVX3Threshold < 16 * 64) { 5764 __ cmpl(totalNumIter, 16); 5765 __ jcc(Assembler::less, ShiftTwo); 5766 } 5767 __ evpbroadcastd(x0, shiftCount, Assembler::AVX_512bit); 5768 __ subl(idx, 16); 5769 __ subl(nIdx, 16); 5770 __ BIND(Shift512Loop); 5771 __ evmovdqul(x2, Address(oldArr, idx, Address::times_4, 4), Assembler::AVX_512bit); 5772 __ evmovdqul(x1, Address(oldArr, idx, Address::times_4), Assembler::AVX_512bit); 5773 __ vpshrdvd(x2, x1, x0, Assembler::AVX_512bit); 5774 __ evmovdqul(Address(newArr, nIdx, Address::times_4), x2, Assembler::AVX_512bit); 5775 __ subl(nIdx, 16); 5776 __ subl(idx, 16); 5777 __ jcc(Assembler::greaterEqual, Shift512Loop); 5778 __ addl(idx, 16); 5779 __ addl(nIdx, 16); 5780 } 5781 __ BIND(ShiftTwo); 5782 __ cmpl(idx, 2); 5783 __ jcc(Assembler::less, ShiftOne); 5784 __ subl(idx, 2); 5785 __ subl(nIdx, 2); 5786 __ BIND(ShiftTwoLoop); 5787 __ movl(tmp5, Address(oldArr, idx, Address::times_4, 8)); 5788 __ movl(tmp4, Address(oldArr, idx, Address::times_4, 4)); 5789 __ movl(tmp3, Address(oldArr, idx, Address::times_4)); 5790 __ shrdl(tmp5, tmp4); 5791 __ shrdl(tmp4, tmp3); 5792 __ movl(Address(newArr, nIdx, Address::times_4, 4), tmp5); 5793 __ movl(Address(newArr, nIdx, Address::times_4), tmp4); 5794 __ subl(nIdx, 2); 5795 __ subl(idx, 2); 5796 __ jcc(Assembler::greaterEqual, ShiftTwoLoop); 5797 __ addl(idx, 2); 5798 __ addl(nIdx, 2); 5799 5800 // Do the last iteration 5801 __ BIND(ShiftOne); 5802 __ cmpl(idx, 1); 5803 __ jcc(Assembler::less, Exit); 5804 __ subl(idx, 1); 5805 __ subl(nIdx, 1); 5806 __ movl(tmp4, Address(oldArr, idx, Address::times_4, 4)); 5807 __ movl(tmp3, Address(oldArr, idx, Address::times_4)); 5808 __ shrdl(tmp4, tmp3); 5809 __ movl(Address(newArr, nIdx, Address::times_4), tmp4); 5810 __ BIND(Exit); 5811 // Restore callee save registers. 5812 __ pop(tmp5); 5813 #ifdef _WINDOWS 5814 __ pop(tmp4); 5815 __ pop(tmp3); 5816 restore_arg_regs(); 5817 #endif 5818 __ leave(); // required for proper stackwalking of RuntimeStub frame 5819 __ ret(0); 5820 return start; 5821 } 5822 5823 /** 5824 * Arguments: 5825 * 5826 * Input: 5827 * c_rarg0 - newArr address 5828 * c_rarg1 - oldArr address 5829 * c_rarg2 - newIdx 5830 * c_rarg3 - shiftCount 5831 * not Win64 5832 * c_rarg4 - numIter 5833 * Win64 5834 * rsp40 - numIter 5835 */ 5836 address generate_bigIntegerLeftShift() { 5837 __ align(CodeEntryAlignment); 5838 StubCodeMark mark(this, "StubRoutines", "bigIntegerLeftShiftWorker"); 5839 address start = __ pc(); 5840 Label Shift512Loop, ShiftTwo, ShiftTwoLoop, ShiftOne, Exit; 5841 // For Unix, the arguments are as follows: rdi, rsi, rdx, rcx, r8. 5842 const Register newArr = rdi; 5843 const Register oldArr = rsi; 5844 const Register newIdx = rdx; 5845 const Register shiftCount = rcx; // It was intentional to have shiftCount in rcx since it is used implicitly for shift. 5846 const Register totalNumIter = r8; 5847 // For windows, we use r9 and r10 as temps to save rdi and rsi. Thus we cannot allocate them for our temps. 5848 // For everything else, we prefer using r9 and r10 since we do not have to save them before use. 5849 const Register tmp1 = r11; // Caller save. 5850 const Register tmp2 = rax; // Caller save. 5851 const Register tmp3 = WINDOWS_ONLY(r12) NOT_WINDOWS(r9); // Windows: Callee save. Linux: Caller save. 5852 const Register tmp4 = WINDOWS_ONLY(r13) NOT_WINDOWS(r10); // Windows: Callee save. Linux: Caller save. 5853 const Register tmp5 = r14; // Callee save. 5854 5855 const XMMRegister x0 = xmm0; 5856 const XMMRegister x1 = xmm1; 5857 const XMMRegister x2 = xmm2; 5858 BLOCK_COMMENT("Entry:"); 5859 __ enter(); // required for proper stackwalking of RuntimeStub frame 5860 5861 #ifdef _WINDOWS 5862 setup_arg_regs(4); 5863 // For windows, since last argument is on stack, we need to move it to the appropriate register. 5864 __ movl(totalNumIter, Address(rsp, 6 * wordSize)); 5865 // Save callee save registers. 5866 __ push(tmp3); 5867 __ push(tmp4); 5868 #endif 5869 __ push(tmp5); 5870 5871 // Rename temps used throughout the code 5872 const Register idx = tmp1; 5873 const Register numIterTmp = tmp2; 5874 5875 // Start idx from zero. 5876 __ xorl(idx, idx); 5877 // Compute interior pointer for new array. We do this so that we can use same index for both old and new arrays. 5878 __ lea(newArr, Address(newArr, newIdx, Address::times_4)); 5879 __ movl(numIterTmp, totalNumIter); 5880 5881 // If vectorization is enabled, check if the number of iterations is at least 64 5882 // If not, then go to ShiftTwo shifting two numbers at a time 5883 if (VM_Version::supports_avx512_vbmi2()) { 5884 __ cmpl(totalNumIter, (AVX3Threshold/64)); 5885 __ jcc(Assembler::less, ShiftTwo); 5886 5887 if (AVX3Threshold < 16 * 64) { 5888 __ cmpl(totalNumIter, 16); 5889 __ jcc(Assembler::less, ShiftTwo); 5890 } 5891 __ evpbroadcastd(x0, shiftCount, Assembler::AVX_512bit); 5892 __ subl(numIterTmp, 16); 5893 __ BIND(Shift512Loop); 5894 __ evmovdqul(x1, Address(oldArr, idx, Address::times_4), Assembler::AVX_512bit); 5895 __ evmovdqul(x2, Address(oldArr, idx, Address::times_4, 0x4), Assembler::AVX_512bit); 5896 __ vpshldvd(x1, x2, x0, Assembler::AVX_512bit); 5897 __ evmovdqul(Address(newArr, idx, Address::times_4), x1, Assembler::AVX_512bit); 5898 __ addl(idx, 16); 5899 __ subl(numIterTmp, 16); 5900 __ jcc(Assembler::greaterEqual, Shift512Loop); 5901 __ addl(numIterTmp, 16); 5902 } 5903 __ BIND(ShiftTwo); 5904 __ cmpl(totalNumIter, 1); 5905 __ jcc(Assembler::less, Exit); 5906 __ movl(tmp3, Address(oldArr, idx, Address::times_4)); 5907 __ subl(numIterTmp, 2); 5908 __ jcc(Assembler::less, ShiftOne); 5909 5910 __ BIND(ShiftTwoLoop); 5911 __ movl(tmp4, Address(oldArr, idx, Address::times_4, 0x4)); 5912 __ movl(tmp5, Address(oldArr, idx, Address::times_4, 0x8)); 5913 __ shldl(tmp3, tmp4); 5914 __ shldl(tmp4, tmp5); 5915 __ movl(Address(newArr, idx, Address::times_4), tmp3); 5916 __ movl(Address(newArr, idx, Address::times_4, 0x4), tmp4); 5917 __ movl(tmp3, tmp5); 5918 __ addl(idx, 2); 5919 __ subl(numIterTmp, 2); 5920 __ jcc(Assembler::greaterEqual, ShiftTwoLoop); 5921 5922 // Do the last iteration 5923 __ BIND(ShiftOne); 5924 __ addl(numIterTmp, 2); 5925 __ cmpl(numIterTmp, 1); 5926 __ jcc(Assembler::less, Exit); 5927 __ movl(tmp4, Address(oldArr, idx, Address::times_4, 0x4)); 5928 __ shldl(tmp3, tmp4); 5929 __ movl(Address(newArr, idx, Address::times_4), tmp3); 5930 5931 __ BIND(Exit); 5932 // Restore callee save registers. 5933 __ pop(tmp5); 5934 #ifdef _WINDOWS 5935 __ pop(tmp4); 5936 __ pop(tmp3); 5937 restore_arg_regs(); 5938 #endif 5939 __ leave(); // required for proper stackwalking of RuntimeStub frame 5940 __ ret(0); 5941 return start; 5942 } 5943 5944 address generate_libmExp() { 5945 StubCodeMark mark(this, "StubRoutines", "libmExp"); 5946 5947 address start = __ pc(); 5948 5949 const XMMRegister x0 = xmm0; 5950 const XMMRegister x1 = xmm1; 5951 const XMMRegister x2 = xmm2; 5952 const XMMRegister x3 = xmm3; 5953 5954 const XMMRegister x4 = xmm4; 5955 const XMMRegister x5 = xmm5; 5956 const XMMRegister x6 = xmm6; 5957 const XMMRegister x7 = xmm7; 5958 5959 const Register tmp = r11; 5960 5961 BLOCK_COMMENT("Entry:"); 5962 __ enter(); // required for proper stackwalking of RuntimeStub frame 5963 5964 __ fast_exp(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp); 5965 5966 __ leave(); // required for proper stackwalking of RuntimeStub frame 5967 __ ret(0); 5968 5969 return start; 5970 5971 } 5972 5973 address generate_libmLog() { 5974 StubCodeMark mark(this, "StubRoutines", "libmLog"); 5975 5976 address start = __ pc(); 5977 5978 const XMMRegister x0 = xmm0; 5979 const XMMRegister x1 = xmm1; 5980 const XMMRegister x2 = xmm2; 5981 const XMMRegister x3 = xmm3; 5982 5983 const XMMRegister x4 = xmm4; 5984 const XMMRegister x5 = xmm5; 5985 const XMMRegister x6 = xmm6; 5986 const XMMRegister x7 = xmm7; 5987 5988 const Register tmp1 = r11; 5989 const Register tmp2 = r8; 5990 5991 BLOCK_COMMENT("Entry:"); 5992 __ enter(); // required for proper stackwalking of RuntimeStub frame 5993 5994 __ fast_log(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2); 5995 5996 __ leave(); // required for proper stackwalking of RuntimeStub frame 5997 __ ret(0); 5998 5999 return start; 6000 6001 } 6002 6003 address generate_libmLog10() { 6004 StubCodeMark mark(this, "StubRoutines", "libmLog10"); 6005 6006 address start = __ pc(); 6007 6008 const XMMRegister x0 = xmm0; 6009 const XMMRegister x1 = xmm1; 6010 const XMMRegister x2 = xmm2; 6011 const XMMRegister x3 = xmm3; 6012 6013 const XMMRegister x4 = xmm4; 6014 const XMMRegister x5 = xmm5; 6015 const XMMRegister x6 = xmm6; 6016 const XMMRegister x7 = xmm7; 6017 6018 const Register tmp = r11; 6019 6020 BLOCK_COMMENT("Entry:"); 6021 __ enter(); // required for proper stackwalking of RuntimeStub frame 6022 6023 __ fast_log10(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp); 6024 6025 __ leave(); // required for proper stackwalking of RuntimeStub frame 6026 __ ret(0); 6027 6028 return start; 6029 6030 } 6031 6032 address generate_libmPow() { 6033 StubCodeMark mark(this, "StubRoutines", "libmPow"); 6034 6035 address start = __ pc(); 6036 6037 const XMMRegister x0 = xmm0; 6038 const XMMRegister x1 = xmm1; 6039 const XMMRegister x2 = xmm2; 6040 const XMMRegister x3 = xmm3; 6041 6042 const XMMRegister x4 = xmm4; 6043 const XMMRegister x5 = xmm5; 6044 const XMMRegister x6 = xmm6; 6045 const XMMRegister x7 = xmm7; 6046 6047 const Register tmp1 = r8; 6048 const Register tmp2 = r9; 6049 const Register tmp3 = r10; 6050 const Register tmp4 = r11; 6051 6052 BLOCK_COMMENT("Entry:"); 6053 __ enter(); // required for proper stackwalking of RuntimeStub frame 6054 6055 __ fast_pow(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 6056 6057 __ leave(); // required for proper stackwalking of RuntimeStub frame 6058 __ ret(0); 6059 6060 return start; 6061 6062 } 6063 6064 address generate_libmSin() { 6065 StubCodeMark mark(this, "StubRoutines", "libmSin"); 6066 6067 address start = __ pc(); 6068 6069 const XMMRegister x0 = xmm0; 6070 const XMMRegister x1 = xmm1; 6071 const XMMRegister x2 = xmm2; 6072 const XMMRegister x3 = xmm3; 6073 6074 const XMMRegister x4 = xmm4; 6075 const XMMRegister x5 = xmm5; 6076 const XMMRegister x6 = xmm6; 6077 const XMMRegister x7 = xmm7; 6078 6079 const Register tmp1 = r8; 6080 const Register tmp2 = r9; 6081 const Register tmp3 = r10; 6082 const Register tmp4 = r11; 6083 6084 BLOCK_COMMENT("Entry:"); 6085 __ enter(); // required for proper stackwalking of RuntimeStub frame 6086 6087 #ifdef _WIN64 6088 __ push(rsi); 6089 __ push(rdi); 6090 #endif 6091 __ fast_sin(x0, x1, x2, x3, x4, x5, x6, x7, rax, rbx, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 6092 6093 #ifdef _WIN64 6094 __ pop(rdi); 6095 __ pop(rsi); 6096 #endif 6097 6098 __ leave(); // required for proper stackwalking of RuntimeStub frame 6099 __ ret(0); 6100 6101 return start; 6102 6103 } 6104 6105 address generate_libmCos() { 6106 StubCodeMark mark(this, "StubRoutines", "libmCos"); 6107 6108 address start = __ pc(); 6109 6110 const XMMRegister x0 = xmm0; 6111 const XMMRegister x1 = xmm1; 6112 const XMMRegister x2 = xmm2; 6113 const XMMRegister x3 = xmm3; 6114 6115 const XMMRegister x4 = xmm4; 6116 const XMMRegister x5 = xmm5; 6117 const XMMRegister x6 = xmm6; 6118 const XMMRegister x7 = xmm7; 6119 6120 const Register tmp1 = r8; 6121 const Register tmp2 = r9; 6122 const Register tmp3 = r10; 6123 const Register tmp4 = r11; 6124 6125 BLOCK_COMMENT("Entry:"); 6126 __ enter(); // required for proper stackwalking of RuntimeStub frame 6127 6128 #ifdef _WIN64 6129 __ push(rsi); 6130 __ push(rdi); 6131 #endif 6132 __ fast_cos(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 6133 6134 #ifdef _WIN64 6135 __ pop(rdi); 6136 __ pop(rsi); 6137 #endif 6138 6139 __ leave(); // required for proper stackwalking of RuntimeStub frame 6140 __ ret(0); 6141 6142 return start; 6143 6144 } 6145 6146 address generate_libmTan() { 6147 StubCodeMark mark(this, "StubRoutines", "libmTan"); 6148 6149 address start = __ pc(); 6150 6151 const XMMRegister x0 = xmm0; 6152 const XMMRegister x1 = xmm1; 6153 const XMMRegister x2 = xmm2; 6154 const XMMRegister x3 = xmm3; 6155 6156 const XMMRegister x4 = xmm4; 6157 const XMMRegister x5 = xmm5; 6158 const XMMRegister x6 = xmm6; 6159 const XMMRegister x7 = xmm7; 6160 6161 const Register tmp1 = r8; 6162 const Register tmp2 = r9; 6163 const Register tmp3 = r10; 6164 const Register tmp4 = r11; 6165 6166 BLOCK_COMMENT("Entry:"); 6167 __ enter(); // required for proper stackwalking of RuntimeStub frame 6168 6169 #ifdef _WIN64 6170 __ push(rsi); 6171 __ push(rdi); 6172 #endif 6173 __ fast_tan(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 6174 6175 #ifdef _WIN64 6176 __ pop(rdi); 6177 __ pop(rsi); 6178 #endif 6179 6180 __ leave(); // required for proper stackwalking of RuntimeStub frame 6181 __ ret(0); 6182 6183 return start; 6184 6185 } 6186 6187 #undef __ 6188 #define __ masm-> 6189 6190 // Continuation point for throwing of implicit exceptions that are 6191 // not handled in the current activation. Fabricates an exception 6192 // oop and initiates normal exception dispatching in this 6193 // frame. Since we need to preserve callee-saved values (currently 6194 // only for C2, but done for C1 as well) we need a callee-saved oop 6195 // map and therefore have to make these stubs into RuntimeStubs 6196 // rather than BufferBlobs. If the compiler needs all registers to 6197 // be preserved between the fault point and the exception handler 6198 // then it must assume responsibility for that in 6199 // AbstractCompiler::continuation_for_implicit_null_exception or 6200 // continuation_for_implicit_division_by_zero_exception. All other 6201 // implicit exceptions (e.g., NullPointerException or 6202 // AbstractMethodError on entry) are either at call sites or 6203 // otherwise assume that stack unwinding will be initiated, so 6204 // caller saved registers were assumed volatile in the compiler. 6205 address generate_throw_exception(const char* name, 6206 address runtime_entry, 6207 Register arg1 = noreg, 6208 Register arg2 = noreg) { 6209 // Information about frame layout at time of blocking runtime call. 6210 // Note that we only have to preserve callee-saved registers since 6211 // the compilers are responsible for supplying a continuation point 6212 // if they expect all registers to be preserved. 6213 enum layout { 6214 rbp_off = frame::arg_reg_save_area_bytes/BytesPerInt, 6215 rbp_off2, 6216 return_off, 6217 return_off2, 6218 framesize // inclusive of return address 6219 }; 6220 6221 int insts_size = 512; 6222 int locs_size = 64; 6223 6224 CodeBuffer code(name, insts_size, locs_size); 6225 OopMapSet* oop_maps = new OopMapSet(); 6226 MacroAssembler* masm = new MacroAssembler(&code); 6227 6228 address start = __ pc(); 6229 6230 // This is an inlined and slightly modified version of call_VM 6231 // which has the ability to fetch the return PC out of 6232 // thread-local storage and also sets up last_Java_sp slightly 6233 // differently than the real call_VM 6234 6235 __ enter(); // required for proper stackwalking of RuntimeStub frame 6236 6237 assert(is_even(framesize/2), "sp not 16-byte aligned"); 6238 6239 // return address and rbp are already in place 6240 __ subptr(rsp, (framesize-4) << LogBytesPerInt); // prolog 6241 6242 int frame_complete = __ pc() - start; 6243 6244 // Set up last_Java_sp and last_Java_fp 6245 address the_pc = __ pc(); 6246 __ set_last_Java_frame(rsp, rbp, the_pc); 6247 __ andptr(rsp, -(StackAlignmentInBytes)); // Align stack 6248 6249 // Call runtime 6250 if (arg1 != noreg) { 6251 assert(arg2 != c_rarg1, "clobbered"); 6252 __ movptr(c_rarg1, arg1); 6253 } 6254 if (arg2 != noreg) { 6255 __ movptr(c_rarg2, arg2); 6256 } 6257 __ movptr(c_rarg0, r15_thread); 6258 BLOCK_COMMENT("call runtime_entry"); 6259 __ call(RuntimeAddress(runtime_entry)); 6260 6261 // Generate oop map 6262 OopMap* map = new OopMap(framesize, 0); 6263 6264 oop_maps->add_gc_map(the_pc - start, map); 6265 6266 __ reset_last_Java_frame(true); 6267 6268 __ leave(); // required for proper stackwalking of RuntimeStub frame 6269 6270 // check for pending exceptions 6271 #ifdef ASSERT 6272 Label L; 6273 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), 6274 (int32_t) NULL_WORD); 6275 __ jcc(Assembler::notEqual, L); 6276 __ should_not_reach_here(); 6277 __ bind(L); 6278 #endif // ASSERT 6279 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 6280 6281 6282 // codeBlob framesize is in words (not VMRegImpl::slot_size) 6283 RuntimeStub* stub = 6284 RuntimeStub::new_runtime_stub(name, 6285 &code, 6286 frame_complete, 6287 (framesize >> (LogBytesPerWord - LogBytesPerInt)), 6288 oop_maps, false); 6289 return stub->entry_point(); 6290 } 6291 6292 void create_control_words() { 6293 // Round to nearest, 53-bit mode, exceptions masked 6294 StubRoutines::_fpu_cntrl_wrd_std = 0x027F; 6295 // Round to zero, 53-bit mode, exception mased 6296 StubRoutines::_fpu_cntrl_wrd_trunc = 0x0D7F; 6297 // Round to nearest, 24-bit mode, exceptions masked 6298 StubRoutines::_fpu_cntrl_wrd_24 = 0x007F; 6299 // Round to nearest, 64-bit mode, exceptions masked 6300 StubRoutines::_mxcsr_std = 0x1F80; 6301 // Note: the following two constants are 80-bit values 6302 // layout is critical for correct loading by FPU. 6303 // Bias for strict fp multiply/divide 6304 StubRoutines::_fpu_subnormal_bias1[0]= 0x00000000; // 2^(-15360) == 0x03ff 8000 0000 0000 0000 6305 StubRoutines::_fpu_subnormal_bias1[1]= 0x80000000; 6306 StubRoutines::_fpu_subnormal_bias1[2]= 0x03ff; 6307 // Un-Bias for strict fp multiply/divide 6308 StubRoutines::_fpu_subnormal_bias2[0]= 0x00000000; // 2^(+15360) == 0x7bff 8000 0000 0000 0000 6309 StubRoutines::_fpu_subnormal_bias2[1]= 0x80000000; 6310 StubRoutines::_fpu_subnormal_bias2[2]= 0x7bff; 6311 } 6312 6313 // Initialization 6314 void generate_initial() { 6315 // Generates all stubs and initializes the entry points 6316 6317 // This platform-specific settings are needed by generate_call_stub() 6318 create_control_words(); 6319 6320 // entry points that exist in all platforms Note: This is code 6321 // that could be shared among different platforms - however the 6322 // benefit seems to be smaller than the disadvantage of having a 6323 // much more complicated generator structure. See also comment in 6324 // stubRoutines.hpp. 6325 6326 StubRoutines::_forward_exception_entry = generate_forward_exception(); 6327 6328 StubRoutines::_call_stub_entry = 6329 generate_call_stub(StubRoutines::_call_stub_return_address); 6330 6331 // is referenced by megamorphic call 6332 StubRoutines::_catch_exception_entry = generate_catch_exception(); 6333 6334 // atomic calls 6335 StubRoutines::_atomic_xchg_entry = generate_atomic_xchg(); 6336 StubRoutines::_atomic_xchg_long_entry = generate_atomic_xchg_long(); 6337 StubRoutines::_atomic_cmpxchg_entry = generate_atomic_cmpxchg(); 6338 StubRoutines::_atomic_cmpxchg_byte_entry = generate_atomic_cmpxchg_byte(); 6339 StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long(); 6340 StubRoutines::_atomic_add_entry = generate_atomic_add(); 6341 StubRoutines::_atomic_add_long_entry = generate_atomic_add_long(); 6342 StubRoutines::_fence_entry = generate_orderaccess_fence(); 6343 6344 // platform dependent 6345 StubRoutines::x86::_get_previous_fp_entry = generate_get_previous_fp(); 6346 StubRoutines::x86::_get_previous_sp_entry = generate_get_previous_sp(); 6347 6348 StubRoutines::x86::_verify_mxcsr_entry = generate_verify_mxcsr(); 6349 6350 StubRoutines::x86::_f2i_fixup = generate_f2i_fixup(); 6351 StubRoutines::x86::_f2l_fixup = generate_f2l_fixup(); 6352 StubRoutines::x86::_d2i_fixup = generate_d2i_fixup(); 6353 StubRoutines::x86::_d2l_fixup = generate_d2l_fixup(); 6354 6355 StubRoutines::x86::_float_sign_mask = generate_fp_mask("float_sign_mask", 0x7FFFFFFF7FFFFFFF); 6356 StubRoutines::x86::_float_sign_flip = generate_fp_mask("float_sign_flip", 0x8000000080000000); 6357 StubRoutines::x86::_double_sign_mask = generate_fp_mask("double_sign_mask", 0x7FFFFFFFFFFFFFFF); 6358 StubRoutines::x86::_double_sign_flip = generate_fp_mask("double_sign_flip", 0x8000000000000000); 6359 6360 // Build this early so it's available for the interpreter. 6361 StubRoutines::_throw_StackOverflowError_entry = 6362 generate_throw_exception("StackOverflowError throw_exception", 6363 CAST_FROM_FN_PTR(address, 6364 SharedRuntime:: 6365 throw_StackOverflowError)); 6366 StubRoutines::_throw_delayed_StackOverflowError_entry = 6367 generate_throw_exception("delayed StackOverflowError throw_exception", 6368 CAST_FROM_FN_PTR(address, 6369 SharedRuntime:: 6370 throw_delayed_StackOverflowError)); 6371 if (UseCRC32Intrinsics) { 6372 // set table address before stub generation which use it 6373 StubRoutines::_crc_table_adr = (address)StubRoutines::x86::_crc_table; 6374 StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32(); 6375 } 6376 6377 if (UseCRC32CIntrinsics) { 6378 bool supports_clmul = VM_Version::supports_clmul(); 6379 StubRoutines::x86::generate_CRC32C_table(supports_clmul); 6380 StubRoutines::_crc32c_table_addr = (address)StubRoutines::x86::_crc32c_table; 6381 StubRoutines::_updateBytesCRC32C = generate_updateBytesCRC32C(supports_clmul); 6382 } 6383 if (UseLibmIntrinsic && InlineIntrinsics) { 6384 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin) || 6385 vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos) || 6386 vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dtan)) { 6387 StubRoutines::x86::_ONEHALF_adr = (address)StubRoutines::x86::_ONEHALF; 6388 StubRoutines::x86::_P_2_adr = (address)StubRoutines::x86::_P_2; 6389 StubRoutines::x86::_SC_4_adr = (address)StubRoutines::x86::_SC_4; 6390 StubRoutines::x86::_Ctable_adr = (address)StubRoutines::x86::_Ctable; 6391 StubRoutines::x86::_SC_2_adr = (address)StubRoutines::x86::_SC_2; 6392 StubRoutines::x86::_SC_3_adr = (address)StubRoutines::x86::_SC_3; 6393 StubRoutines::x86::_SC_1_adr = (address)StubRoutines::x86::_SC_1; 6394 StubRoutines::x86::_PI_INV_TABLE_adr = (address)StubRoutines::x86::_PI_INV_TABLE; 6395 StubRoutines::x86::_PI_4_adr = (address)StubRoutines::x86::_PI_4; 6396 StubRoutines::x86::_PI32INV_adr = (address)StubRoutines::x86::_PI32INV; 6397 StubRoutines::x86::_SIGN_MASK_adr = (address)StubRoutines::x86::_SIGN_MASK; 6398 StubRoutines::x86::_P_1_adr = (address)StubRoutines::x86::_P_1; 6399 StubRoutines::x86::_P_3_adr = (address)StubRoutines::x86::_P_3; 6400 StubRoutines::x86::_NEG_ZERO_adr = (address)StubRoutines::x86::_NEG_ZERO; 6401 } 6402 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dexp)) { 6403 StubRoutines::_dexp = generate_libmExp(); 6404 } 6405 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dlog)) { 6406 StubRoutines::_dlog = generate_libmLog(); 6407 } 6408 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dlog10)) { 6409 StubRoutines::_dlog10 = generate_libmLog10(); 6410 } 6411 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dpow)) { 6412 StubRoutines::_dpow = generate_libmPow(); 6413 } 6414 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin)) { 6415 StubRoutines::_dsin = generate_libmSin(); 6416 } 6417 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos)) { 6418 StubRoutines::_dcos = generate_libmCos(); 6419 } 6420 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dtan)) { 6421 StubRoutines::_dtan = generate_libmTan(); 6422 } 6423 } 6424 6425 // Safefetch stubs. 6426 generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry, 6427 &StubRoutines::_safefetch32_fault_pc, 6428 &StubRoutines::_safefetch32_continuation_pc); 6429 generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry, 6430 &StubRoutines::_safefetchN_fault_pc, 6431 &StubRoutines::_safefetchN_continuation_pc); 6432 } 6433 6434 void generate_all() { 6435 // Generates all stubs and initializes the entry points 6436 6437 // These entry points require SharedInfo::stack0 to be set up in 6438 // non-core builds and need to be relocatable, so they each 6439 // fabricate a RuntimeStub internally. 6440 StubRoutines::_throw_AbstractMethodError_entry = 6441 generate_throw_exception("AbstractMethodError throw_exception", 6442 CAST_FROM_FN_PTR(address, 6443 SharedRuntime:: 6444 throw_AbstractMethodError)); 6445 6446 StubRoutines::_throw_IncompatibleClassChangeError_entry = 6447 generate_throw_exception("IncompatibleClassChangeError throw_exception", 6448 CAST_FROM_FN_PTR(address, 6449 SharedRuntime:: 6450 throw_IncompatibleClassChangeError)); 6451 6452 StubRoutines::_throw_NullPointerException_at_call_entry = 6453 generate_throw_exception("NullPointerException at call throw_exception", 6454 CAST_FROM_FN_PTR(address, 6455 SharedRuntime:: 6456 throw_NullPointerException_at_call)); 6457 6458 // entry points that are platform specific 6459 StubRoutines::x86::_vector_float_sign_mask = generate_vector_mask("vector_float_sign_mask", 0x7FFFFFFF7FFFFFFF); 6460 StubRoutines::x86::_vector_float_sign_flip = generate_vector_mask("vector_float_sign_flip", 0x8000000080000000); 6461 StubRoutines::x86::_vector_double_sign_mask = generate_vector_mask("vector_double_sign_mask", 0x7FFFFFFFFFFFFFFF); 6462 StubRoutines::x86::_vector_double_sign_flip = generate_vector_mask("vector_double_sign_flip", 0x8000000000000000); 6463 StubRoutines::x86::_vector_short_to_byte_mask = generate_vector_mask("vector_short_to_byte_mask", 0x00ff00ff00ff00ff); 6464 StubRoutines::x86::_vector_byte_perm_mask = generate_vector_byte_perm_mask("vector_byte_perm_mask"); 6465 StubRoutines::x86::_vector_long_sign_mask = generate_vector_mask("vector_long_sign_mask", 0x8000000000000000); 6466 6467 // support for verify_oop (must happen after universe_init) 6468 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop(); 6469 6470 // data cache line writeback 6471 StubRoutines::_data_cache_writeback = generate_data_cache_writeback(); 6472 StubRoutines::_data_cache_writeback_sync = generate_data_cache_writeback_sync(); 6473 6474 // arraycopy stubs used by compilers 6475 generate_arraycopy_stubs(); 6476 6477 // don't bother generating these AES intrinsic stubs unless global flag is set 6478 if (UseAESIntrinsics) { 6479 StubRoutines::x86::_key_shuffle_mask_addr = generate_key_shuffle_mask(); // needed by the others 6480 StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock(); 6481 StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock(); 6482 StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt(); 6483 if (VM_Version::supports_avx512_vaes() && VM_Version::supports_avx512vl() && VM_Version::supports_avx512dq() ) { 6484 StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptVectorAESCrypt(); 6485 StubRoutines::_electronicCodeBook_encryptAESCrypt = generate_electronicCodeBook_encryptAESCrypt(); 6486 StubRoutines::_electronicCodeBook_decryptAESCrypt = generate_electronicCodeBook_decryptAESCrypt(); 6487 } else { 6488 StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt_Parallel(); 6489 } 6490 } 6491 if (UseAESCTRIntrinsics) { 6492 if (VM_Version::supports_avx512_vaes() && VM_Version::supports_avx512bw() && VM_Version::supports_avx512vl()) { 6493 StubRoutines::x86::_counter_mask_addr = counter_mask_addr(); 6494 StubRoutines::_counterMode_AESCrypt = generate_counterMode_VectorAESCrypt(); 6495 } else { 6496 StubRoutines::x86::_counter_shuffle_mask_addr = generate_counter_shuffle_mask(); 6497 StubRoutines::_counterMode_AESCrypt = generate_counterMode_AESCrypt_Parallel(); 6498 } 6499 } 6500 6501 if (UseSHA1Intrinsics) { 6502 StubRoutines::x86::_upper_word_mask_addr = generate_upper_word_mask(); 6503 StubRoutines::x86::_shuffle_byte_flip_mask_addr = generate_shuffle_byte_flip_mask(); 6504 StubRoutines::_sha1_implCompress = generate_sha1_implCompress(false, "sha1_implCompress"); 6505 StubRoutines::_sha1_implCompressMB = generate_sha1_implCompress(true, "sha1_implCompressMB"); 6506 } 6507 if (UseSHA256Intrinsics) { 6508 StubRoutines::x86::_k256_adr = (address)StubRoutines::x86::_k256; 6509 char* dst = (char*)StubRoutines::x86::_k256_W; 6510 char* src = (char*)StubRoutines::x86::_k256; 6511 for (int ii = 0; ii < 16; ++ii) { 6512 memcpy(dst + 32 * ii, src + 16 * ii, 16); 6513 memcpy(dst + 32 * ii + 16, src + 16 * ii, 16); 6514 } 6515 StubRoutines::x86::_k256_W_adr = (address)StubRoutines::x86::_k256_W; 6516 StubRoutines::x86::_pshuffle_byte_flip_mask_addr = generate_pshuffle_byte_flip_mask(); 6517 StubRoutines::_sha256_implCompress = generate_sha256_implCompress(false, "sha256_implCompress"); 6518 StubRoutines::_sha256_implCompressMB = generate_sha256_implCompress(true, "sha256_implCompressMB"); 6519 } 6520 if (UseSHA512Intrinsics) { 6521 StubRoutines::x86::_k512_W_addr = (address)StubRoutines::x86::_k512_W; 6522 StubRoutines::x86::_pshuffle_byte_flip_mask_addr_sha512 = generate_pshuffle_byte_flip_mask_sha512(); 6523 StubRoutines::_sha512_implCompress = generate_sha512_implCompress(false, "sha512_implCompress"); 6524 StubRoutines::_sha512_implCompressMB = generate_sha512_implCompress(true, "sha512_implCompressMB"); 6525 } 6526 6527 // Generate GHASH intrinsics code 6528 if (UseGHASHIntrinsics) { 6529 StubRoutines::x86::_ghash_long_swap_mask_addr = generate_ghash_long_swap_mask(); 6530 StubRoutines::x86::_ghash_byte_swap_mask_addr = generate_ghash_byte_swap_mask(); 6531 if (VM_Version::supports_avx()) { 6532 StubRoutines::x86::_ghash_shuffmask_addr = ghash_shufflemask_addr(); 6533 StubRoutines::x86::_ghash_poly_addr = ghash_polynomial_addr(); 6534 StubRoutines::_ghash_processBlocks = generate_avx_ghash_processBlocks(); 6535 } else { 6536 StubRoutines::_ghash_processBlocks = generate_ghash_processBlocks(); 6537 } 6538 } 6539 6540 if (UseBASE64Intrinsics) { 6541 StubRoutines::x86::_and_mask = base64_and_mask_addr(); 6542 StubRoutines::x86::_bswap_mask = base64_bswap_mask_addr(); 6543 StubRoutines::x86::_base64_charset = base64_charset_addr(); 6544 StubRoutines::x86::_url_charset = base64url_charset_addr(); 6545 StubRoutines::x86::_gather_mask = base64_gather_mask_addr(); 6546 StubRoutines::x86::_left_shift_mask = base64_left_shift_mask_addr(); 6547 StubRoutines::x86::_right_shift_mask = base64_right_shift_mask_addr(); 6548 StubRoutines::_base64_encodeBlock = generate_base64_encodeBlock(); 6549 } 6550 6551 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod(); 6552 if (bs_nm != NULL) { 6553 StubRoutines::x86::_method_entry_barrier = generate_method_entry_barrier(); 6554 } 6555 #ifdef COMPILER2 6556 if (UseMultiplyToLenIntrinsic) { 6557 StubRoutines::_multiplyToLen = generate_multiplyToLen(); 6558 } 6559 if (UseSquareToLenIntrinsic) { 6560 StubRoutines::_squareToLen = generate_squareToLen(); 6561 } 6562 if (UseMulAddIntrinsic) { 6563 StubRoutines::_mulAdd = generate_mulAdd(); 6564 } 6565 if (VM_Version::supports_avx512_vbmi2()) { 6566 StubRoutines::_bigIntegerRightShiftWorker = generate_bigIntegerRightShift(); 6567 StubRoutines::_bigIntegerLeftShiftWorker = generate_bigIntegerLeftShift(); 6568 } 6569 if (UseMontgomeryMultiplyIntrinsic) { 6570 StubRoutines::_montgomeryMultiply 6571 = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_multiply); 6572 } 6573 if (UseMontgomerySquareIntrinsic) { 6574 StubRoutines::_montgomerySquare 6575 = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_square); 6576 } 6577 #endif // COMPILER2 6578 6579 if (UseVectorizedMismatchIntrinsic) { 6580 StubRoutines::_vectorizedMismatch = generate_vectorizedMismatch(); 6581 } 6582 } 6583 6584 public: 6585 StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) { 6586 if (all) { 6587 generate_all(); 6588 } else { 6589 generate_initial(); 6590 } 6591 } 6592 }; // end class declaration 6593 6594 #define UCM_TABLE_MAX_ENTRIES 16 6595 void StubGenerator_generate(CodeBuffer* code, bool all) { 6596 if (UnsafeCopyMemory::_table == NULL) { 6597 UnsafeCopyMemory::create_table(UCM_TABLE_MAX_ENTRIES); 6598 } 6599 StubGenerator g(code, all); 6600 }