1 /* 2 * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "ci/ciUtilities.hpp" 29 #include "gc/shared/barrierSet.hpp" 30 #include "gc/shared/barrierSetAssembler.hpp" 31 #include "gc/shared/barrierSetNMethod.hpp" 32 #include "interpreter/interpreter.hpp" 33 #include "memory/universe.hpp" 34 #include "nativeInst_x86.hpp" 35 #include "oops/instanceOop.hpp" 36 #include "oops/method.hpp" 37 #include "oops/objArrayKlass.hpp" 38 #include "oops/oop.inline.hpp" 39 #include "prims/methodHandles.hpp" 40 #include "runtime/frame.inline.hpp" 41 #include "runtime/handles.inline.hpp" 42 #include "runtime/sharedRuntime.hpp" 43 #include "runtime/stubCodeGenerator.hpp" 44 #include "runtime/stubRoutines.hpp" 45 #include "runtime/thread.inline.hpp" 46 #ifdef COMPILER2 47 #include "opto/runtime.hpp" 48 #endif 49 #if INCLUDE_ZGC 50 #include "gc/z/zThreadLocalData.hpp" 51 #endif 52 53 // Declaration and definition of StubGenerator (no .hpp file). 54 // For a more detailed description of the stub routine structure 55 // see the comment in stubRoutines.hpp 56 57 #define __ _masm-> 58 #define TIMES_OOP (UseCompressedOops ? Address::times_4 : Address::times_8) 59 #define a__ ((Assembler*)_masm)-> 60 61 #ifdef PRODUCT 62 #define BLOCK_COMMENT(str) /* nothing */ 63 #else 64 #define BLOCK_COMMENT(str) __ block_comment(str) 65 #endif 66 67 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 68 const int MXCSR_MASK = 0xFFC0; // Mask out any pending exceptions 69 70 // Stub Code definitions 71 72 class StubGenerator: public StubCodeGenerator { 73 private: 74 75 #ifdef PRODUCT 76 #define inc_counter_np(counter) ((void)0) 77 #else 78 void inc_counter_np_(int& counter) { 79 // This can destroy rscratch1 if counter is far from the code cache 80 __ incrementl(ExternalAddress((address)&counter)); 81 } 82 #define inc_counter_np(counter) \ 83 BLOCK_COMMENT("inc_counter " #counter); \ 84 inc_counter_np_(counter); 85 #endif 86 87 // Call stubs are used to call Java from C 88 // 89 // Linux Arguments: 90 // c_rarg0: call wrapper address address 91 // c_rarg1: result address 92 // c_rarg2: result type BasicType 93 // c_rarg3: method Method* 94 // c_rarg4: (interpreter) entry point address 95 // c_rarg5: parameters intptr_t* 96 // 16(rbp): parameter size (in words) int 97 // 24(rbp): thread Thread* 98 // 99 // [ return_from_Java ] <--- rsp 100 // [ argument word n ] 101 // ... 102 // -12 [ argument word 1 ] 103 // -11 [ saved r15 ] <--- rsp_after_call 104 // -10 [ saved r14 ] 105 // -9 [ saved r13 ] 106 // -8 [ saved r12 ] 107 // -7 [ saved rbx ] 108 // -6 [ call wrapper ] 109 // -5 [ result ] 110 // -4 [ result type ] 111 // -3 [ method ] 112 // -2 [ entry point ] 113 // -1 [ parameters ] 114 // 0 [ saved rbp ] <--- rbp 115 // 1 [ return address ] 116 // 2 [ parameter size ] 117 // 3 [ thread ] 118 // 119 // Windows Arguments: 120 // c_rarg0: call wrapper address address 121 // c_rarg1: result address 122 // c_rarg2: result type BasicType 123 // c_rarg3: method Method* 124 // 48(rbp): (interpreter) entry point address 125 // 56(rbp): parameters intptr_t* 126 // 64(rbp): parameter size (in words) int 127 // 72(rbp): thread Thread* 128 // 129 // [ return_from_Java ] <--- rsp 130 // [ argument word n ] 131 // ... 132 // -60 [ argument word 1 ] 133 // -59 [ saved xmm31 ] <--- rsp after_call 134 // [ saved xmm16-xmm30 ] (EVEX enabled, else the space is blank) 135 // -27 [ saved xmm15 ] 136 // [ saved xmm7-xmm14 ] 137 // -9 [ saved xmm6 ] (each xmm register takes 2 slots) 138 // -7 [ saved r15 ] 139 // -6 [ saved r14 ] 140 // -5 [ saved r13 ] 141 // -4 [ saved r12 ] 142 // -3 [ saved rdi ] 143 // -2 [ saved rsi ] 144 // -1 [ saved rbx ] 145 // 0 [ saved rbp ] <--- rbp 146 // 1 [ return address ] 147 // 2 [ call wrapper ] 148 // 3 [ result ] 149 // 4 [ result type ] 150 // 5 [ method ] 151 // 6 [ entry point ] 152 // 7 [ parameters ] 153 // 8 [ parameter size ] 154 // 9 [ thread ] 155 // 156 // Windows reserves the callers stack space for arguments 1-4. 157 // We spill c_rarg0-c_rarg3 to this space. 158 159 // Call stub stack layout word offsets from rbp 160 enum call_stub_layout { 161 #ifdef _WIN64 162 xmm_save_first = 6, // save from xmm6 163 xmm_save_last = 31, // to xmm31 164 xmm_save_base = -9, 165 rsp_after_call_off = xmm_save_base - 2 * (xmm_save_last - xmm_save_first), // -27 166 r15_off = -7, 167 r14_off = -6, 168 r13_off = -5, 169 r12_off = -4, 170 rdi_off = -3, 171 rsi_off = -2, 172 rbx_off = -1, 173 rbp_off = 0, 174 retaddr_off = 1, 175 call_wrapper_off = 2, 176 result_off = 3, 177 result_type_off = 4, 178 method_off = 5, 179 entry_point_off = 6, 180 parameters_off = 7, 181 parameter_size_off = 8, 182 thread_off = 9 183 #else 184 rsp_after_call_off = -12, 185 mxcsr_off = rsp_after_call_off, 186 r15_off = -11, 187 r14_off = -10, 188 r13_off = -9, 189 r12_off = -8, 190 rbx_off = -7, 191 call_wrapper_off = -6, 192 result_off = -5, 193 result_type_off = -4, 194 method_off = -3, 195 entry_point_off = -2, 196 parameters_off = -1, 197 rbp_off = 0, 198 retaddr_off = 1, 199 parameter_size_off = 2, 200 thread_off = 3 201 #endif 202 }; 203 204 #ifdef _WIN64 205 Address xmm_save(int reg) { 206 assert(reg >= xmm_save_first && reg <= xmm_save_last, "XMM register number out of range"); 207 return Address(rbp, (xmm_save_base - (reg - xmm_save_first) * 2) * wordSize); 208 } 209 #endif 210 211 address generate_call_stub(address& return_address) { 212 assert((int)frame::entry_frame_after_call_words == -(int)rsp_after_call_off + 1 && 213 (int)frame::entry_frame_call_wrapper_offset == (int)call_wrapper_off, 214 "adjust this code"); 215 StubCodeMark mark(this, "StubRoutines", "call_stub"); 216 address start = __ pc(); 217 218 // same as in generate_catch_exception()! 219 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize); 220 221 const Address call_wrapper (rbp, call_wrapper_off * wordSize); 222 const Address result (rbp, result_off * wordSize); 223 const Address result_type (rbp, result_type_off * wordSize); 224 const Address method (rbp, method_off * wordSize); 225 const Address entry_point (rbp, entry_point_off * wordSize); 226 const Address parameters (rbp, parameters_off * wordSize); 227 const Address parameter_size(rbp, parameter_size_off * wordSize); 228 229 // same as in generate_catch_exception()! 230 const Address thread (rbp, thread_off * wordSize); 231 232 const Address r15_save(rbp, r15_off * wordSize); 233 const Address r14_save(rbp, r14_off * wordSize); 234 const Address r13_save(rbp, r13_off * wordSize); 235 const Address r12_save(rbp, r12_off * wordSize); 236 const Address rbx_save(rbp, rbx_off * wordSize); 237 238 // stub code 239 __ enter(); 240 __ subptr(rsp, -rsp_after_call_off * wordSize); 241 242 // save register parameters 243 #ifndef _WIN64 244 __ movptr(parameters, c_rarg5); // parameters 245 __ movptr(entry_point, c_rarg4); // entry_point 246 #endif 247 248 __ movptr(method, c_rarg3); // method 249 __ movl(result_type, c_rarg2); // result type 250 __ movptr(result, c_rarg1); // result 251 __ movptr(call_wrapper, c_rarg0); // call wrapper 252 253 // save regs belonging to calling function 254 __ movptr(rbx_save, rbx); 255 __ movptr(r12_save, r12); 256 __ movptr(r13_save, r13); 257 __ movptr(r14_save, r14); 258 __ movptr(r15_save, r15); 259 260 #ifdef _WIN64 261 int last_reg = 15; 262 if (UseAVX > 2) { 263 last_reg = 31; 264 } 265 if (VM_Version::supports_evex()) { 266 for (int i = xmm_save_first; i <= last_reg; i++) { 267 __ vextractf32x4(xmm_save(i), as_XMMRegister(i), 0); 268 } 269 } else { 270 for (int i = xmm_save_first; i <= last_reg; i++) { 271 __ movdqu(xmm_save(i), as_XMMRegister(i)); 272 } 273 } 274 275 const Address rdi_save(rbp, rdi_off * wordSize); 276 const Address rsi_save(rbp, rsi_off * wordSize); 277 278 __ movptr(rsi_save, rsi); 279 __ movptr(rdi_save, rdi); 280 #else 281 const Address mxcsr_save(rbp, mxcsr_off * wordSize); 282 { 283 Label skip_ldmx; 284 __ stmxcsr(mxcsr_save); 285 __ movl(rax, mxcsr_save); 286 __ andl(rax, MXCSR_MASK); // Only check control and mask bits 287 ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std()); 288 __ cmp32(rax, mxcsr_std); 289 __ jcc(Assembler::equal, skip_ldmx); 290 __ ldmxcsr(mxcsr_std); 291 __ bind(skip_ldmx); 292 } 293 #endif 294 295 // Load up thread register 296 __ movptr(r15_thread, thread); 297 __ reinit_heapbase(); 298 299 #ifdef ASSERT 300 // make sure we have no pending exceptions 301 { 302 Label L; 303 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 304 __ jcc(Assembler::equal, L); 305 __ stop("StubRoutines::call_stub: entered with pending exception"); 306 __ bind(L); 307 } 308 #endif 309 310 // pass parameters if any 311 BLOCK_COMMENT("pass parameters if any"); 312 Label parameters_done; 313 __ movl(c_rarg3, parameter_size); 314 __ testl(c_rarg3, c_rarg3); 315 __ jcc(Assembler::zero, parameters_done); 316 317 Label loop; 318 __ movptr(c_rarg2, parameters); // parameter pointer 319 __ movl(c_rarg1, c_rarg3); // parameter counter is in c_rarg1 320 __ BIND(loop); 321 __ movptr(rax, Address(c_rarg2, 0));// get parameter 322 __ addptr(c_rarg2, wordSize); // advance to next parameter 323 __ decrementl(c_rarg1); // decrement counter 324 __ push(rax); // pass parameter 325 __ jcc(Assembler::notZero, loop); 326 327 // call Java function 328 __ BIND(parameters_done); 329 __ movptr(rbx, method); // get Method* 330 __ movptr(c_rarg1, entry_point); // get entry_point 331 __ mov(r13, rsp); // set sender sp 332 BLOCK_COMMENT("call Java function"); 333 __ call(c_rarg1); 334 335 BLOCK_COMMENT("call_stub_return_address:"); 336 return_address = __ pc(); 337 338 // store result depending on type (everything that is not 339 // T_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT) 340 __ movptr(c_rarg0, result); 341 Label is_long, is_float, is_double, exit; 342 __ movl(c_rarg1, result_type); 343 __ cmpl(c_rarg1, T_OBJECT); 344 __ jcc(Assembler::equal, is_long); 345 __ cmpl(c_rarg1, T_LONG); 346 __ jcc(Assembler::equal, is_long); 347 __ cmpl(c_rarg1, T_FLOAT); 348 __ jcc(Assembler::equal, is_float); 349 __ cmpl(c_rarg1, T_DOUBLE); 350 __ jcc(Assembler::equal, is_double); 351 352 // handle T_INT case 353 __ movl(Address(c_rarg0, 0), rax); 354 355 __ BIND(exit); 356 357 // pop parameters 358 __ lea(rsp, rsp_after_call); 359 360 #ifdef ASSERT 361 // verify that threads correspond 362 { 363 Label L1, L2, L3; 364 __ cmpptr(r15_thread, thread); 365 __ jcc(Assembler::equal, L1); 366 __ stop("StubRoutines::call_stub: r15_thread is corrupted"); 367 __ bind(L1); 368 __ get_thread(rbx); 369 __ cmpptr(r15_thread, thread); 370 __ jcc(Assembler::equal, L2); 371 __ stop("StubRoutines::call_stub: r15_thread is modified by call"); 372 __ bind(L2); 373 __ cmpptr(r15_thread, rbx); 374 __ jcc(Assembler::equal, L3); 375 __ stop("StubRoutines::call_stub: threads must correspond"); 376 __ bind(L3); 377 } 378 #endif 379 380 // restore regs belonging to calling function 381 #ifdef _WIN64 382 // emit the restores for xmm regs 383 if (VM_Version::supports_evex()) { 384 for (int i = xmm_save_first; i <= last_reg; i++) { 385 __ vinsertf32x4(as_XMMRegister(i), as_XMMRegister(i), xmm_save(i), 0); 386 } 387 } else { 388 for (int i = xmm_save_first; i <= last_reg; i++) { 389 __ movdqu(as_XMMRegister(i), xmm_save(i)); 390 } 391 } 392 #endif 393 __ movptr(r15, r15_save); 394 __ movptr(r14, r14_save); 395 __ movptr(r13, r13_save); 396 __ movptr(r12, r12_save); 397 __ movptr(rbx, rbx_save); 398 399 #ifdef _WIN64 400 __ movptr(rdi, rdi_save); 401 __ movptr(rsi, rsi_save); 402 #else 403 __ ldmxcsr(mxcsr_save); 404 #endif 405 406 // restore rsp 407 __ addptr(rsp, -rsp_after_call_off * wordSize); 408 409 // return 410 __ vzeroupper(); 411 __ pop(rbp); 412 __ ret(0); 413 414 // handle return types different from T_INT 415 __ BIND(is_long); 416 __ movq(Address(c_rarg0, 0), rax); 417 __ jmp(exit); 418 419 __ BIND(is_float); 420 __ movflt(Address(c_rarg0, 0), xmm0); 421 __ jmp(exit); 422 423 __ BIND(is_double); 424 __ movdbl(Address(c_rarg0, 0), xmm0); 425 __ jmp(exit); 426 427 return start; 428 } 429 430 // Return point for a Java call if there's an exception thrown in 431 // Java code. The exception is caught and transformed into a 432 // pending exception stored in JavaThread that can be tested from 433 // within the VM. 434 // 435 // Note: Usually the parameters are removed by the callee. In case 436 // of an exception crossing an activation frame boundary, that is 437 // not the case if the callee is compiled code => need to setup the 438 // rsp. 439 // 440 // rax: exception oop 441 442 address generate_catch_exception() { 443 StubCodeMark mark(this, "StubRoutines", "catch_exception"); 444 address start = __ pc(); 445 446 // same as in generate_call_stub(): 447 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize); 448 const Address thread (rbp, thread_off * wordSize); 449 450 #ifdef ASSERT 451 // verify that threads correspond 452 { 453 Label L1, L2, L3; 454 __ cmpptr(r15_thread, thread); 455 __ jcc(Assembler::equal, L1); 456 __ stop("StubRoutines::catch_exception: r15_thread is corrupted"); 457 __ bind(L1); 458 __ get_thread(rbx); 459 __ cmpptr(r15_thread, thread); 460 __ jcc(Assembler::equal, L2); 461 __ stop("StubRoutines::catch_exception: r15_thread is modified by call"); 462 __ bind(L2); 463 __ cmpptr(r15_thread, rbx); 464 __ jcc(Assembler::equal, L3); 465 __ stop("StubRoutines::catch_exception: threads must correspond"); 466 __ bind(L3); 467 } 468 #endif 469 470 // set pending exception 471 __ verify_oop(rax); 472 473 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), rax); 474 __ lea(rscratch1, ExternalAddress((address)__FILE__)); 475 __ movptr(Address(r15_thread, Thread::exception_file_offset()), rscratch1); 476 __ movl(Address(r15_thread, Thread::exception_line_offset()), (int) __LINE__); 477 478 // complete return to VM 479 assert(StubRoutines::_call_stub_return_address != NULL, 480 "_call_stub_return_address must have been generated before"); 481 __ jump(RuntimeAddress(StubRoutines::_call_stub_return_address)); 482 483 return start; 484 } 485 486 // Continuation point for runtime calls returning with a pending 487 // exception. The pending exception check happened in the runtime 488 // or native call stub. The pending exception in Thread is 489 // converted into a Java-level exception. 490 // 491 // Contract with Java-level exception handlers: 492 // rax: exception 493 // rdx: throwing pc 494 // 495 // NOTE: At entry of this stub, exception-pc must be on stack !! 496 497 address generate_forward_exception() { 498 StubCodeMark mark(this, "StubRoutines", "forward exception"); 499 address start = __ pc(); 500 501 // Upon entry, the sp points to the return address returning into 502 // Java (interpreted or compiled) code; i.e., the return address 503 // becomes the throwing pc. 504 // 505 // Arguments pushed before the runtime call are still on the stack 506 // but the exception handler will reset the stack pointer -> 507 // ignore them. A potential result in registers can be ignored as 508 // well. 509 510 #ifdef ASSERT 511 // make sure this code is only executed if there is a pending exception 512 { 513 Label L; 514 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t) NULL); 515 __ jcc(Assembler::notEqual, L); 516 __ stop("StubRoutines::forward exception: no pending exception (1)"); 517 __ bind(L); 518 } 519 #endif 520 521 // compute exception handler into rbx 522 __ movptr(c_rarg0, Address(rsp, 0)); 523 BLOCK_COMMENT("call exception_handler_for_return_address"); 524 __ call_VM_leaf(CAST_FROM_FN_PTR(address, 525 SharedRuntime::exception_handler_for_return_address), 526 r15_thread, c_rarg0); 527 __ mov(rbx, rax); 528 529 // setup rax & rdx, remove return address & clear pending exception 530 __ pop(rdx); 531 __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset())); 532 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 533 534 #ifdef ASSERT 535 // make sure exception is set 536 { 537 Label L; 538 __ testptr(rax, rax); 539 __ jcc(Assembler::notEqual, L); 540 __ stop("StubRoutines::forward exception: no pending exception (2)"); 541 __ bind(L); 542 } 543 #endif 544 545 // continue at exception handler (return address removed) 546 // rax: exception 547 // rbx: exception handler 548 // rdx: throwing pc 549 __ verify_oop(rax); 550 __ jmp(rbx); 551 552 return start; 553 } 554 555 // Support for intptr_t OrderAccess::fence() 556 // 557 // Arguments : 558 // 559 // Result: 560 address generate_orderaccess_fence() { 561 StubCodeMark mark(this, "StubRoutines", "orderaccess_fence"); 562 address start = __ pc(); 563 __ membar(Assembler::StoreLoad); 564 __ ret(0); 565 566 return start; 567 } 568 569 // Support for intptr_t get_previous_fp() 570 // 571 // This routine is used to find the previous frame pointer for the 572 // caller (current_frame_guess). This is used as part of debugging 573 // ps() is seemingly lost trying to find frames. 574 // This code assumes that caller current_frame_guess) has a frame. 575 address generate_get_previous_fp() { 576 StubCodeMark mark(this, "StubRoutines", "get_previous_fp"); 577 const Address old_fp(rbp, 0); 578 const Address older_fp(rax, 0); 579 address start = __ pc(); 580 581 __ enter(); 582 __ movptr(rax, old_fp); // callers fp 583 __ movptr(rax, older_fp); // the frame for ps() 584 __ pop(rbp); 585 __ ret(0); 586 587 return start; 588 } 589 590 // Support for intptr_t get_previous_sp() 591 // 592 // This routine is used to find the previous stack pointer for the 593 // caller. 594 address generate_get_previous_sp() { 595 StubCodeMark mark(this, "StubRoutines", "get_previous_sp"); 596 address start = __ pc(); 597 598 __ movptr(rax, rsp); 599 __ addptr(rax, 8); // return address is at the top of the stack. 600 __ ret(0); 601 602 return start; 603 } 604 605 //---------------------------------------------------------------------------------------------------- 606 // Support for void verify_mxcsr() 607 // 608 // This routine is used with -Xcheck:jni to verify that native 609 // JNI code does not return to Java code without restoring the 610 // MXCSR register to our expected state. 611 612 address generate_verify_mxcsr() { 613 StubCodeMark mark(this, "StubRoutines", "verify_mxcsr"); 614 address start = __ pc(); 615 616 const Address mxcsr_save(rsp, 0); 617 618 if (CheckJNICalls) { 619 Label ok_ret; 620 ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std()); 621 __ push(rax); 622 __ subptr(rsp, wordSize); // allocate a temp location 623 __ stmxcsr(mxcsr_save); 624 __ movl(rax, mxcsr_save); 625 __ andl(rax, MXCSR_MASK); // Only check control and mask bits 626 __ cmp32(rax, mxcsr_std); 627 __ jcc(Assembler::equal, ok_ret); 628 629 __ warn("MXCSR changed by native JNI code, use -XX:+RestoreMXCSROnJNICall"); 630 631 __ ldmxcsr(mxcsr_std); 632 633 __ bind(ok_ret); 634 __ addptr(rsp, wordSize); 635 __ pop(rax); 636 } 637 638 __ ret(0); 639 640 return start; 641 } 642 643 address generate_f2i_fixup() { 644 StubCodeMark mark(this, "StubRoutines", "f2i_fixup"); 645 Address inout(rsp, 5 * wordSize); // return address + 4 saves 646 647 address start = __ pc(); 648 649 Label L; 650 651 __ push(rax); 652 __ push(c_rarg3); 653 __ push(c_rarg2); 654 __ push(c_rarg1); 655 656 __ movl(rax, 0x7f800000); 657 __ xorl(c_rarg3, c_rarg3); 658 __ movl(c_rarg2, inout); 659 __ movl(c_rarg1, c_rarg2); 660 __ andl(c_rarg1, 0x7fffffff); 661 __ cmpl(rax, c_rarg1); // NaN? -> 0 662 __ jcc(Assembler::negative, L); 663 __ testl(c_rarg2, c_rarg2); // signed ? min_jint : max_jint 664 __ movl(c_rarg3, 0x80000000); 665 __ movl(rax, 0x7fffffff); 666 __ cmovl(Assembler::positive, c_rarg3, rax); 667 668 __ bind(L); 669 __ movptr(inout, c_rarg3); 670 671 __ pop(c_rarg1); 672 __ pop(c_rarg2); 673 __ pop(c_rarg3); 674 __ pop(rax); 675 676 __ ret(0); 677 678 return start; 679 } 680 681 address generate_f2l_fixup() { 682 StubCodeMark mark(this, "StubRoutines", "f2l_fixup"); 683 Address inout(rsp, 5 * wordSize); // return address + 4 saves 684 address start = __ pc(); 685 686 Label L; 687 688 __ push(rax); 689 __ push(c_rarg3); 690 __ push(c_rarg2); 691 __ push(c_rarg1); 692 693 __ movl(rax, 0x7f800000); 694 __ xorl(c_rarg3, c_rarg3); 695 __ movl(c_rarg2, inout); 696 __ movl(c_rarg1, c_rarg2); 697 __ andl(c_rarg1, 0x7fffffff); 698 __ cmpl(rax, c_rarg1); // NaN? -> 0 699 __ jcc(Assembler::negative, L); 700 __ testl(c_rarg2, c_rarg2); // signed ? min_jlong : max_jlong 701 __ mov64(c_rarg3, 0x8000000000000000); 702 __ mov64(rax, 0x7fffffffffffffff); 703 __ cmov(Assembler::positive, c_rarg3, rax); 704 705 __ bind(L); 706 __ movptr(inout, c_rarg3); 707 708 __ pop(c_rarg1); 709 __ pop(c_rarg2); 710 __ pop(c_rarg3); 711 __ pop(rax); 712 713 __ ret(0); 714 715 return start; 716 } 717 718 address generate_d2i_fixup() { 719 StubCodeMark mark(this, "StubRoutines", "d2i_fixup"); 720 Address inout(rsp, 6 * wordSize); // return address + 5 saves 721 722 address start = __ pc(); 723 724 Label L; 725 726 __ push(rax); 727 __ push(c_rarg3); 728 __ push(c_rarg2); 729 __ push(c_rarg1); 730 __ push(c_rarg0); 731 732 __ movl(rax, 0x7ff00000); 733 __ movq(c_rarg2, inout); 734 __ movl(c_rarg3, c_rarg2); 735 __ mov(c_rarg1, c_rarg2); 736 __ mov(c_rarg0, c_rarg2); 737 __ negl(c_rarg3); 738 __ shrptr(c_rarg1, 0x20); 739 __ orl(c_rarg3, c_rarg2); 740 __ andl(c_rarg1, 0x7fffffff); 741 __ xorl(c_rarg2, c_rarg2); 742 __ shrl(c_rarg3, 0x1f); 743 __ orl(c_rarg1, c_rarg3); 744 __ cmpl(rax, c_rarg1); 745 __ jcc(Assembler::negative, L); // NaN -> 0 746 __ testptr(c_rarg0, c_rarg0); // signed ? min_jint : max_jint 747 __ movl(c_rarg2, 0x80000000); 748 __ movl(rax, 0x7fffffff); 749 __ cmov(Assembler::positive, c_rarg2, rax); 750 751 __ bind(L); 752 __ movptr(inout, c_rarg2); 753 754 __ pop(c_rarg0); 755 __ pop(c_rarg1); 756 __ pop(c_rarg2); 757 __ pop(c_rarg3); 758 __ pop(rax); 759 760 __ ret(0); 761 762 return start; 763 } 764 765 address generate_d2l_fixup() { 766 StubCodeMark mark(this, "StubRoutines", "d2l_fixup"); 767 Address inout(rsp, 6 * wordSize); // return address + 5 saves 768 769 address start = __ pc(); 770 771 Label L; 772 773 __ push(rax); 774 __ push(c_rarg3); 775 __ push(c_rarg2); 776 __ push(c_rarg1); 777 __ push(c_rarg0); 778 779 __ movl(rax, 0x7ff00000); 780 __ movq(c_rarg2, inout); 781 __ movl(c_rarg3, c_rarg2); 782 __ mov(c_rarg1, c_rarg2); 783 __ mov(c_rarg0, c_rarg2); 784 __ negl(c_rarg3); 785 __ shrptr(c_rarg1, 0x20); 786 __ orl(c_rarg3, c_rarg2); 787 __ andl(c_rarg1, 0x7fffffff); 788 __ xorl(c_rarg2, c_rarg2); 789 __ shrl(c_rarg3, 0x1f); 790 __ orl(c_rarg1, c_rarg3); 791 __ cmpl(rax, c_rarg1); 792 __ jcc(Assembler::negative, L); // NaN -> 0 793 __ testq(c_rarg0, c_rarg0); // signed ? min_jlong : max_jlong 794 __ mov64(c_rarg2, 0x8000000000000000); 795 __ mov64(rax, 0x7fffffffffffffff); 796 __ cmovq(Assembler::positive, c_rarg2, rax); 797 798 __ bind(L); 799 __ movq(inout, c_rarg2); 800 801 __ pop(c_rarg0); 802 __ pop(c_rarg1); 803 __ pop(c_rarg2); 804 __ pop(c_rarg3); 805 __ pop(rax); 806 807 __ ret(0); 808 809 return start; 810 } 811 812 address generate_fp_mask(const char *stub_name, int64_t mask) { 813 __ align(CodeEntryAlignment); 814 StubCodeMark mark(this, "StubRoutines", stub_name); 815 address start = __ pc(); 816 817 __ emit_data64( mask, relocInfo::none ); 818 __ emit_data64( mask, relocInfo::none ); 819 820 return start; 821 } 822 823 address generate_vector_mask(const char *stub_name, int64_t mask) { 824 __ align(CodeEntryAlignment); 825 StubCodeMark mark(this, "StubRoutines", stub_name); 826 address start = __ pc(); 827 828 __ emit_data64(mask, relocInfo::none); 829 __ emit_data64(mask, relocInfo::none); 830 __ emit_data64(mask, relocInfo::none); 831 __ emit_data64(mask, relocInfo::none); 832 __ emit_data64(mask, relocInfo::none); 833 __ emit_data64(mask, relocInfo::none); 834 __ emit_data64(mask, relocInfo::none); 835 __ emit_data64(mask, relocInfo::none); 836 837 return start; 838 } 839 840 address generate_vector_byte_perm_mask(const char *stub_name) { 841 __ align(CodeEntryAlignment); 842 StubCodeMark mark(this, "StubRoutines", stub_name); 843 address start = __ pc(); 844 845 __ emit_data64(0x0000000000000001, relocInfo::none); 846 __ emit_data64(0x0000000000000003, relocInfo::none); 847 __ emit_data64(0x0000000000000005, relocInfo::none); 848 __ emit_data64(0x0000000000000007, relocInfo::none); 849 __ emit_data64(0x0000000000000000, relocInfo::none); 850 __ emit_data64(0x0000000000000002, relocInfo::none); 851 __ emit_data64(0x0000000000000004, relocInfo::none); 852 __ emit_data64(0x0000000000000006, relocInfo::none); 853 854 return start; 855 } 856 857 // Non-destructive plausibility checks for oops 858 // 859 // Arguments: 860 // all args on stack! 861 // 862 // Stack after saving c_rarg3: 863 // [tos + 0]: saved c_rarg3 864 // [tos + 1]: saved c_rarg2 865 // [tos + 2]: saved r12 (several TemplateTable methods use it) 866 // [tos + 3]: saved flags 867 // [tos + 4]: return address 868 // * [tos + 5]: error message (char*) 869 // * [tos + 6]: object to verify (oop) 870 // * [tos + 7]: saved rax - saved by caller and bashed 871 // * [tos + 8]: saved r10 (rscratch1) - saved by caller 872 // * = popped on exit 873 address generate_verify_oop() { 874 StubCodeMark mark(this, "StubRoutines", "verify_oop"); 875 address start = __ pc(); 876 877 Label exit, error; 878 879 __ pushf(); 880 __ incrementl(ExternalAddress((address) StubRoutines::verify_oop_count_addr())); 881 882 __ push(r12); 883 884 // save c_rarg2 and c_rarg3 885 __ push(c_rarg2); 886 __ push(c_rarg3); 887 888 enum { 889 // After previous pushes. 890 oop_to_verify = 6 * wordSize, 891 saved_rax = 7 * wordSize, 892 saved_r10 = 8 * wordSize, 893 894 // Before the call to MacroAssembler::debug(), see below. 895 return_addr = 16 * wordSize, 896 error_msg = 17 * wordSize 897 }; 898 899 // get object 900 __ movptr(rax, Address(rsp, oop_to_verify)); 901 902 // make sure object is 'reasonable' 903 __ testptr(rax, rax); 904 __ jcc(Assembler::zero, exit); // if obj is NULL it is OK 905 906 #if INCLUDE_ZGC 907 if (UseZGC) { 908 // Check if metadata bits indicate a bad oop 909 __ testptr(rax, Address(r15_thread, ZThreadLocalData::address_bad_mask_offset())); 910 __ jcc(Assembler::notZero, error); 911 } 912 #endif 913 914 // Check if the oop is in the right area of memory 915 __ movptr(c_rarg2, rax); 916 __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_mask()); 917 __ andptr(c_rarg2, c_rarg3); 918 __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_bits()); 919 __ cmpptr(c_rarg2, c_rarg3); 920 __ jcc(Assembler::notZero, error); 921 922 // make sure klass is 'reasonable', which is not zero. 923 __ load_klass(rax, rax, rscratch1); // get klass 924 __ testptr(rax, rax); 925 __ jcc(Assembler::zero, error); // if klass is NULL it is broken 926 927 // return if everything seems ok 928 __ bind(exit); 929 __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back 930 __ movptr(rscratch1, Address(rsp, saved_r10)); // get saved r10 back 931 __ pop(c_rarg3); // restore c_rarg3 932 __ pop(c_rarg2); // restore c_rarg2 933 __ pop(r12); // restore r12 934 __ popf(); // restore flags 935 __ ret(4 * wordSize); // pop caller saved stuff 936 937 // handle errors 938 __ bind(error); 939 __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back 940 __ movptr(rscratch1, Address(rsp, saved_r10)); // get saved r10 back 941 __ pop(c_rarg3); // get saved c_rarg3 back 942 __ pop(c_rarg2); // get saved c_rarg2 back 943 __ pop(r12); // get saved r12 back 944 __ popf(); // get saved flags off stack -- 945 // will be ignored 946 947 __ pusha(); // push registers 948 // (rip is already 949 // already pushed) 950 // debug(char* msg, int64_t pc, int64_t regs[]) 951 // We've popped the registers we'd saved (c_rarg3, c_rarg2 and flags), and 952 // pushed all the registers, so now the stack looks like: 953 // [tos + 0] 16 saved registers 954 // [tos + 16] return address 955 // * [tos + 17] error message (char*) 956 // * [tos + 18] object to verify (oop) 957 // * [tos + 19] saved rax - saved by caller and bashed 958 // * [tos + 20] saved r10 (rscratch1) - saved by caller 959 // * = popped on exit 960 961 __ movptr(c_rarg0, Address(rsp, error_msg)); // pass address of error message 962 __ movptr(c_rarg1, Address(rsp, return_addr)); // pass return address 963 __ movq(c_rarg2, rsp); // pass address of regs on stack 964 __ mov(r12, rsp); // remember rsp 965 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 966 __ andptr(rsp, -16); // align stack as required by ABI 967 BLOCK_COMMENT("call MacroAssembler::debug"); 968 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64))); 969 __ hlt(); 970 return start; 971 } 972 973 // 974 // Verify that a register contains clean 32-bits positive value 975 // (high 32-bits are 0) so it could be used in 64-bits shifts. 976 // 977 // Input: 978 // Rint - 32-bits value 979 // Rtmp - scratch 980 // 981 void assert_clean_int(Register Rint, Register Rtmp) { 982 #ifdef ASSERT 983 Label L; 984 assert_different_registers(Rtmp, Rint); 985 __ movslq(Rtmp, Rint); 986 __ cmpq(Rtmp, Rint); 987 __ jcc(Assembler::equal, L); 988 __ stop("high 32-bits of int value are not 0"); 989 __ bind(L); 990 #endif 991 } 992 993 // Generate overlap test for array copy stubs 994 // 995 // Input: 996 // c_rarg0 - from 997 // c_rarg1 - to 998 // c_rarg2 - element count 999 // 1000 // Output: 1001 // rax - &from[element count - 1] 1002 // 1003 void array_overlap_test(address no_overlap_target, Address::ScaleFactor sf) { 1004 assert(no_overlap_target != NULL, "must be generated"); 1005 array_overlap_test(no_overlap_target, NULL, sf); 1006 } 1007 void array_overlap_test(Label& L_no_overlap, Address::ScaleFactor sf) { 1008 array_overlap_test(NULL, &L_no_overlap, sf); 1009 } 1010 void array_overlap_test(address no_overlap_target, Label* NOLp, Address::ScaleFactor sf) { 1011 const Register from = c_rarg0; 1012 const Register to = c_rarg1; 1013 const Register count = c_rarg2; 1014 const Register end_from = rax; 1015 1016 __ cmpptr(to, from); 1017 __ lea(end_from, Address(from, count, sf, 0)); 1018 if (NOLp == NULL) { 1019 ExternalAddress no_overlap(no_overlap_target); 1020 __ jump_cc(Assembler::belowEqual, no_overlap); 1021 __ cmpptr(to, end_from); 1022 __ jump_cc(Assembler::aboveEqual, no_overlap); 1023 } else { 1024 __ jcc(Assembler::belowEqual, (*NOLp)); 1025 __ cmpptr(to, end_from); 1026 __ jcc(Assembler::aboveEqual, (*NOLp)); 1027 } 1028 } 1029 1030 // Shuffle first three arg regs on Windows into Linux/Solaris locations. 1031 // 1032 // Outputs: 1033 // rdi - rcx 1034 // rsi - rdx 1035 // rdx - r8 1036 // rcx - r9 1037 // 1038 // Registers r9 and r10 are used to save rdi and rsi on Windows, which latter 1039 // are non-volatile. r9 and r10 should not be used by the caller. 1040 // 1041 DEBUG_ONLY(bool regs_in_thread;) 1042 1043 void setup_arg_regs(int nargs = 3) { 1044 const Register saved_rdi = r9; 1045 const Register saved_rsi = r10; 1046 assert(nargs == 3 || nargs == 4, "else fix"); 1047 #ifdef _WIN64 1048 assert(c_rarg0 == rcx && c_rarg1 == rdx && c_rarg2 == r8 && c_rarg3 == r9, 1049 "unexpected argument registers"); 1050 if (nargs >= 4) 1051 __ mov(rax, r9); // r9 is also saved_rdi 1052 __ movptr(saved_rdi, rdi); 1053 __ movptr(saved_rsi, rsi); 1054 __ mov(rdi, rcx); // c_rarg0 1055 __ mov(rsi, rdx); // c_rarg1 1056 __ mov(rdx, r8); // c_rarg2 1057 if (nargs >= 4) 1058 __ mov(rcx, rax); // c_rarg3 (via rax) 1059 #else 1060 assert(c_rarg0 == rdi && c_rarg1 == rsi && c_rarg2 == rdx && c_rarg3 == rcx, 1061 "unexpected argument registers"); 1062 #endif 1063 DEBUG_ONLY(regs_in_thread = false;) 1064 } 1065 1066 void restore_arg_regs() { 1067 assert(!regs_in_thread, "wrong call to restore_arg_regs"); 1068 const Register saved_rdi = r9; 1069 const Register saved_rsi = r10; 1070 #ifdef _WIN64 1071 __ movptr(rdi, saved_rdi); 1072 __ movptr(rsi, saved_rsi); 1073 #endif 1074 } 1075 1076 // This is used in places where r10 is a scratch register, and can 1077 // be adapted if r9 is needed also. 1078 void setup_arg_regs_using_thread() { 1079 const Register saved_r15 = r9; 1080 #ifdef _WIN64 1081 __ mov(saved_r15, r15); // r15 is callee saved and needs to be restored 1082 __ get_thread(r15_thread); 1083 assert(c_rarg0 == rcx && c_rarg1 == rdx && c_rarg2 == r8 && c_rarg3 == r9, 1084 "unexpected argument registers"); 1085 __ movptr(Address(r15_thread, in_bytes(JavaThread::windows_saved_rdi_offset())), rdi); 1086 __ movptr(Address(r15_thread, in_bytes(JavaThread::windows_saved_rsi_offset())), rsi); 1087 1088 __ mov(rdi, rcx); // c_rarg0 1089 __ mov(rsi, rdx); // c_rarg1 1090 __ mov(rdx, r8); // c_rarg2 1091 #else 1092 assert(c_rarg0 == rdi && c_rarg1 == rsi && c_rarg2 == rdx && c_rarg3 == rcx, 1093 "unexpected argument registers"); 1094 #endif 1095 DEBUG_ONLY(regs_in_thread = true;) 1096 } 1097 1098 void restore_arg_regs_using_thread() { 1099 assert(regs_in_thread, "wrong call to restore_arg_regs"); 1100 const Register saved_r15 = r9; 1101 #ifdef _WIN64 1102 __ get_thread(r15_thread); 1103 __ movptr(rsi, Address(r15_thread, in_bytes(JavaThread::windows_saved_rsi_offset()))); 1104 __ movptr(rdi, Address(r15_thread, in_bytes(JavaThread::windows_saved_rdi_offset()))); 1105 __ mov(r15, saved_r15); // r15 is callee saved and needs to be restored 1106 #endif 1107 } 1108 1109 // Copy big chunks forward 1110 // 1111 // Inputs: 1112 // end_from - source arrays end address 1113 // end_to - destination array end address 1114 // qword_count - 64-bits element count, negative 1115 // to - scratch 1116 // L_copy_bytes - entry label 1117 // L_copy_8_bytes - exit label 1118 // 1119 void copy_bytes_forward(Register end_from, Register end_to, 1120 Register qword_count, Register to, 1121 Label& L_copy_bytes, Label& L_copy_8_bytes) { 1122 DEBUG_ONLY(__ stop("enter at entry label, not here")); 1123 Label L_loop; 1124 __ align(OptoLoopAlignment); 1125 if (UseUnalignedLoadStores) { 1126 Label L_end; 1127 // Copy 64-bytes per iteration 1128 if (UseAVX > 2) { 1129 Label L_loop_avx512, L_loop_avx2, L_32_byte_head, L_above_threshold, L_below_threshold; 1130 1131 __ BIND(L_copy_bytes); 1132 __ cmpptr(qword_count, (-1 * AVX3Threshold / 8)); 1133 __ jccb(Assembler::less, L_above_threshold); 1134 __ jmpb(L_below_threshold); 1135 1136 __ bind(L_loop_avx512); 1137 __ evmovdqul(xmm0, Address(end_from, qword_count, Address::times_8, -56), Assembler::AVX_512bit); 1138 __ evmovdqul(Address(end_to, qword_count, Address::times_8, -56), xmm0, Assembler::AVX_512bit); 1139 __ bind(L_above_threshold); 1140 __ addptr(qword_count, 8); 1141 __ jcc(Assembler::lessEqual, L_loop_avx512); 1142 __ jmpb(L_32_byte_head); 1143 1144 __ bind(L_loop_avx2); 1145 __ vmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56)); 1146 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0); 1147 __ vmovdqu(xmm1, Address(end_from, qword_count, Address::times_8, -24)); 1148 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -24), xmm1); 1149 __ bind(L_below_threshold); 1150 __ addptr(qword_count, 8); 1151 __ jcc(Assembler::lessEqual, L_loop_avx2); 1152 1153 __ bind(L_32_byte_head); 1154 __ subptr(qword_count, 4); // sub(8) and add(4) 1155 __ jccb(Assembler::greater, L_end); 1156 } else { 1157 __ BIND(L_loop); 1158 if (UseAVX == 2) { 1159 __ vmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56)); 1160 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0); 1161 __ vmovdqu(xmm1, Address(end_from, qword_count, Address::times_8, -24)); 1162 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -24), xmm1); 1163 } else { 1164 __ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56)); 1165 __ movdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0); 1166 __ movdqu(xmm1, Address(end_from, qword_count, Address::times_8, -40)); 1167 __ movdqu(Address(end_to, qword_count, Address::times_8, -40), xmm1); 1168 __ movdqu(xmm2, Address(end_from, qword_count, Address::times_8, -24)); 1169 __ movdqu(Address(end_to, qword_count, Address::times_8, -24), xmm2); 1170 __ movdqu(xmm3, Address(end_from, qword_count, Address::times_8, - 8)); 1171 __ movdqu(Address(end_to, qword_count, Address::times_8, - 8), xmm3); 1172 } 1173 1174 __ BIND(L_copy_bytes); 1175 __ addptr(qword_count, 8); 1176 __ jcc(Assembler::lessEqual, L_loop); 1177 __ subptr(qword_count, 4); // sub(8) and add(4) 1178 __ jccb(Assembler::greater, L_end); 1179 } 1180 // Copy trailing 32 bytes 1181 if (UseAVX >= 2) { 1182 __ vmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24)); 1183 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -24), xmm0); 1184 } else { 1185 __ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24)); 1186 __ movdqu(Address(end_to, qword_count, Address::times_8, -24), xmm0); 1187 __ movdqu(xmm1, Address(end_from, qword_count, Address::times_8, - 8)); 1188 __ movdqu(Address(end_to, qword_count, Address::times_8, - 8), xmm1); 1189 } 1190 __ addptr(qword_count, 4); 1191 __ BIND(L_end); 1192 if (UseAVX >= 2) { 1193 // clean upper bits of YMM registers 1194 __ vpxor(xmm0, xmm0); 1195 __ vpxor(xmm1, xmm1); 1196 } 1197 } else { 1198 // Copy 32-bytes per iteration 1199 __ BIND(L_loop); 1200 __ movq(to, Address(end_from, qword_count, Address::times_8, -24)); 1201 __ movq(Address(end_to, qword_count, Address::times_8, -24), to); 1202 __ movq(to, Address(end_from, qword_count, Address::times_8, -16)); 1203 __ movq(Address(end_to, qword_count, Address::times_8, -16), to); 1204 __ movq(to, Address(end_from, qword_count, Address::times_8, - 8)); 1205 __ movq(Address(end_to, qword_count, Address::times_8, - 8), to); 1206 __ movq(to, Address(end_from, qword_count, Address::times_8, - 0)); 1207 __ movq(Address(end_to, qword_count, Address::times_8, - 0), to); 1208 1209 __ BIND(L_copy_bytes); 1210 __ addptr(qword_count, 4); 1211 __ jcc(Assembler::lessEqual, L_loop); 1212 } 1213 __ subptr(qword_count, 4); 1214 __ jcc(Assembler::less, L_copy_8_bytes); // Copy trailing qwords 1215 } 1216 1217 // Copy big chunks backward 1218 // 1219 // Inputs: 1220 // from - source arrays address 1221 // dest - destination array address 1222 // qword_count - 64-bits element count 1223 // to - scratch 1224 // L_copy_bytes - entry label 1225 // L_copy_8_bytes - exit label 1226 // 1227 void copy_bytes_backward(Register from, Register dest, 1228 Register qword_count, Register to, 1229 Label& L_copy_bytes, Label& L_copy_8_bytes) { 1230 DEBUG_ONLY(__ stop("enter at entry label, not here")); 1231 Label L_loop; 1232 __ align(OptoLoopAlignment); 1233 if (UseUnalignedLoadStores) { 1234 Label L_end; 1235 // Copy 64-bytes per iteration 1236 if (UseAVX > 2) { 1237 Label L_loop_avx512, L_loop_avx2, L_32_byte_head, L_above_threshold, L_below_threshold; 1238 1239 __ BIND(L_copy_bytes); 1240 __ cmpptr(qword_count, (AVX3Threshold / 8)); 1241 __ jccb(Assembler::greater, L_above_threshold); 1242 __ jmpb(L_below_threshold); 1243 1244 __ BIND(L_loop_avx512); 1245 __ evmovdqul(xmm0, Address(from, qword_count, Address::times_8, 0), Assembler::AVX_512bit); 1246 __ evmovdqul(Address(dest, qword_count, Address::times_8, 0), xmm0, Assembler::AVX_512bit); 1247 __ bind(L_above_threshold); 1248 __ subptr(qword_count, 8); 1249 __ jcc(Assembler::greaterEqual, L_loop_avx512); 1250 __ jmpb(L_32_byte_head); 1251 1252 __ bind(L_loop_avx2); 1253 __ vmovdqu(xmm0, Address(from, qword_count, Address::times_8, 32)); 1254 __ vmovdqu(Address(dest, qword_count, Address::times_8, 32), xmm0); 1255 __ vmovdqu(xmm1, Address(from, qword_count, Address::times_8, 0)); 1256 __ vmovdqu(Address(dest, qword_count, Address::times_8, 0), xmm1); 1257 __ bind(L_below_threshold); 1258 __ subptr(qword_count, 8); 1259 __ jcc(Assembler::greaterEqual, L_loop_avx2); 1260 1261 __ bind(L_32_byte_head); 1262 __ addptr(qword_count, 4); // add(8) and sub(4) 1263 __ jccb(Assembler::less, L_end); 1264 } else { 1265 __ BIND(L_loop); 1266 if (UseAVX == 2) { 1267 __ vmovdqu(xmm0, Address(from, qword_count, Address::times_8, 32)); 1268 __ vmovdqu(Address(dest, qword_count, Address::times_8, 32), xmm0); 1269 __ vmovdqu(xmm1, Address(from, qword_count, Address::times_8, 0)); 1270 __ vmovdqu(Address(dest, qword_count, Address::times_8, 0), xmm1); 1271 } else { 1272 __ movdqu(xmm0, Address(from, qword_count, Address::times_8, 48)); 1273 __ movdqu(Address(dest, qword_count, Address::times_8, 48), xmm0); 1274 __ movdqu(xmm1, Address(from, qword_count, Address::times_8, 32)); 1275 __ movdqu(Address(dest, qword_count, Address::times_8, 32), xmm1); 1276 __ movdqu(xmm2, Address(from, qword_count, Address::times_8, 16)); 1277 __ movdqu(Address(dest, qword_count, Address::times_8, 16), xmm2); 1278 __ movdqu(xmm3, Address(from, qword_count, Address::times_8, 0)); 1279 __ movdqu(Address(dest, qword_count, Address::times_8, 0), xmm3); 1280 } 1281 1282 __ BIND(L_copy_bytes); 1283 __ subptr(qword_count, 8); 1284 __ jcc(Assembler::greaterEqual, L_loop); 1285 1286 __ addptr(qword_count, 4); // add(8) and sub(4) 1287 __ jccb(Assembler::less, L_end); 1288 } 1289 // Copy trailing 32 bytes 1290 if (UseAVX >= 2) { 1291 __ vmovdqu(xmm0, Address(from, qword_count, Address::times_8, 0)); 1292 __ vmovdqu(Address(dest, qword_count, Address::times_8, 0), xmm0); 1293 } else { 1294 __ movdqu(xmm0, Address(from, qword_count, Address::times_8, 16)); 1295 __ movdqu(Address(dest, qword_count, Address::times_8, 16), xmm0); 1296 __ movdqu(xmm1, Address(from, qword_count, Address::times_8, 0)); 1297 __ movdqu(Address(dest, qword_count, Address::times_8, 0), xmm1); 1298 } 1299 __ subptr(qword_count, 4); 1300 __ BIND(L_end); 1301 if (UseAVX >= 2) { 1302 // clean upper bits of YMM registers 1303 __ vpxor(xmm0, xmm0); 1304 __ vpxor(xmm1, xmm1); 1305 } 1306 } else { 1307 // Copy 32-bytes per iteration 1308 __ BIND(L_loop); 1309 __ movq(to, Address(from, qword_count, Address::times_8, 24)); 1310 __ movq(Address(dest, qword_count, Address::times_8, 24), to); 1311 __ movq(to, Address(from, qword_count, Address::times_8, 16)); 1312 __ movq(Address(dest, qword_count, Address::times_8, 16), to); 1313 __ movq(to, Address(from, qword_count, Address::times_8, 8)); 1314 __ movq(Address(dest, qword_count, Address::times_8, 8), to); 1315 __ movq(to, Address(from, qword_count, Address::times_8, 0)); 1316 __ movq(Address(dest, qword_count, Address::times_8, 0), to); 1317 1318 __ BIND(L_copy_bytes); 1319 __ subptr(qword_count, 4); 1320 __ jcc(Assembler::greaterEqual, L_loop); 1321 } 1322 __ addptr(qword_count, 4); 1323 __ jcc(Assembler::greater, L_copy_8_bytes); // Copy trailing qwords 1324 } 1325 1326 // Arguments: 1327 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1328 // ignored 1329 // name - stub name string 1330 // 1331 // Inputs: 1332 // c_rarg0 - source array address 1333 // c_rarg1 - destination array address 1334 // c_rarg2 - element count, treated as ssize_t, can be zero 1335 // 1336 // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries, 1337 // we let the hardware handle it. The one to eight bytes within words, 1338 // dwords or qwords that span cache line boundaries will still be loaded 1339 // and stored atomically. 1340 // 1341 // Side Effects: 1342 // disjoint_byte_copy_entry is set to the no-overlap entry point 1343 // used by generate_conjoint_byte_copy(). 1344 // 1345 address generate_disjoint_byte_copy(bool aligned, address* entry, const char *name) { 1346 __ align(CodeEntryAlignment); 1347 StubCodeMark mark(this, "StubRoutines", name); 1348 address start = __ pc(); 1349 1350 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes; 1351 Label L_copy_byte, L_exit; 1352 const Register from = rdi; // source array address 1353 const Register to = rsi; // destination array address 1354 const Register count = rdx; // elements count 1355 const Register byte_count = rcx; 1356 const Register qword_count = count; 1357 const Register end_from = from; // source array end address 1358 const Register end_to = to; // destination array end address 1359 // End pointers are inclusive, and if count is not zero they point 1360 // to the last unit copied: end_to[0] := end_from[0] 1361 1362 __ enter(); // required for proper stackwalking of RuntimeStub frame 1363 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1364 1365 if (entry != NULL) { 1366 *entry = __ pc(); 1367 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1368 BLOCK_COMMENT("Entry:"); 1369 } 1370 1371 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1372 // r9 and r10 may be used to save non-volatile registers 1373 1374 { 1375 // UnsafeCopyMemory page error: continue after ucm 1376 UnsafeCopyMemoryMark ucmm(this, !aligned, true); 1377 // 'from', 'to' and 'count' are now valid 1378 __ movptr(byte_count, count); 1379 __ shrptr(count, 3); // count => qword_count 1380 1381 // Copy from low to high addresses. Use 'to' as scratch. 1382 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1383 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1384 __ negptr(qword_count); // make the count negative 1385 __ jmp(L_copy_bytes); 1386 1387 // Copy trailing qwords 1388 __ BIND(L_copy_8_bytes); 1389 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1390 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1391 __ increment(qword_count); 1392 __ jcc(Assembler::notZero, L_copy_8_bytes); 1393 1394 // Check for and copy trailing dword 1395 __ BIND(L_copy_4_bytes); 1396 __ testl(byte_count, 4); 1397 __ jccb(Assembler::zero, L_copy_2_bytes); 1398 __ movl(rax, Address(end_from, 8)); 1399 __ movl(Address(end_to, 8), rax); 1400 1401 __ addptr(end_from, 4); 1402 __ addptr(end_to, 4); 1403 1404 // Check for and copy trailing word 1405 __ BIND(L_copy_2_bytes); 1406 __ testl(byte_count, 2); 1407 __ jccb(Assembler::zero, L_copy_byte); 1408 __ movw(rax, Address(end_from, 8)); 1409 __ movw(Address(end_to, 8), rax); 1410 1411 __ addptr(end_from, 2); 1412 __ addptr(end_to, 2); 1413 1414 // Check for and copy trailing byte 1415 __ BIND(L_copy_byte); 1416 __ testl(byte_count, 1); 1417 __ jccb(Assembler::zero, L_exit); 1418 __ movb(rax, Address(end_from, 8)); 1419 __ movb(Address(end_to, 8), rax); 1420 } 1421 __ BIND(L_exit); 1422 address ucme_exit_pc = __ pc(); 1423 restore_arg_regs(); 1424 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1425 __ xorptr(rax, rax); // return 0 1426 __ vzeroupper(); 1427 __ leave(); // required for proper stackwalking of RuntimeStub frame 1428 __ ret(0); 1429 1430 { 1431 UnsafeCopyMemoryMark ucmm(this, !aligned, false, ucme_exit_pc); 1432 // Copy in multi-bytes chunks 1433 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1434 __ jmp(L_copy_4_bytes); 1435 } 1436 return start; 1437 } 1438 1439 // Arguments: 1440 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1441 // ignored 1442 // name - stub name string 1443 // 1444 // Inputs: 1445 // c_rarg0 - source array address 1446 // c_rarg1 - destination array address 1447 // c_rarg2 - element count, treated as ssize_t, can be zero 1448 // 1449 // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries, 1450 // we let the hardware handle it. The one to eight bytes within words, 1451 // dwords or qwords that span cache line boundaries will still be loaded 1452 // and stored atomically. 1453 // 1454 address generate_conjoint_byte_copy(bool aligned, address nooverlap_target, 1455 address* entry, const char *name) { 1456 __ align(CodeEntryAlignment); 1457 StubCodeMark mark(this, "StubRoutines", name); 1458 address start = __ pc(); 1459 1460 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes; 1461 const Register from = rdi; // source array address 1462 const Register to = rsi; // destination array address 1463 const Register count = rdx; // elements count 1464 const Register byte_count = rcx; 1465 const Register qword_count = count; 1466 1467 __ enter(); // required for proper stackwalking of RuntimeStub frame 1468 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1469 1470 if (entry != NULL) { 1471 *entry = __ pc(); 1472 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1473 BLOCK_COMMENT("Entry:"); 1474 } 1475 1476 array_overlap_test(nooverlap_target, Address::times_1); 1477 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1478 // r9 and r10 may be used to save non-volatile registers 1479 1480 { 1481 // UnsafeCopyMemory page error: continue after ucm 1482 UnsafeCopyMemoryMark ucmm(this, !aligned, true); 1483 // 'from', 'to' and 'count' are now valid 1484 __ movptr(byte_count, count); 1485 __ shrptr(count, 3); // count => qword_count 1486 1487 // Copy from high to low addresses. 1488 1489 // Check for and copy trailing byte 1490 __ testl(byte_count, 1); 1491 __ jcc(Assembler::zero, L_copy_2_bytes); 1492 __ movb(rax, Address(from, byte_count, Address::times_1, -1)); 1493 __ movb(Address(to, byte_count, Address::times_1, -1), rax); 1494 __ decrement(byte_count); // Adjust for possible trailing word 1495 1496 // Check for and copy trailing word 1497 __ BIND(L_copy_2_bytes); 1498 __ testl(byte_count, 2); 1499 __ jcc(Assembler::zero, L_copy_4_bytes); 1500 __ movw(rax, Address(from, byte_count, Address::times_1, -2)); 1501 __ movw(Address(to, byte_count, Address::times_1, -2), rax); 1502 1503 // Check for and copy trailing dword 1504 __ BIND(L_copy_4_bytes); 1505 __ testl(byte_count, 4); 1506 __ jcc(Assembler::zero, L_copy_bytes); 1507 __ movl(rax, Address(from, qword_count, Address::times_8)); 1508 __ movl(Address(to, qword_count, Address::times_8), rax); 1509 __ jmp(L_copy_bytes); 1510 1511 // Copy trailing qwords 1512 __ BIND(L_copy_8_bytes); 1513 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 1514 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 1515 __ decrement(qword_count); 1516 __ jcc(Assembler::notZero, L_copy_8_bytes); 1517 } 1518 restore_arg_regs(); 1519 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1520 __ xorptr(rax, rax); // return 0 1521 __ vzeroupper(); 1522 __ leave(); // required for proper stackwalking of RuntimeStub frame 1523 __ ret(0); 1524 1525 { 1526 // UnsafeCopyMemory page error: continue after ucm 1527 UnsafeCopyMemoryMark ucmm(this, !aligned, true); 1528 // Copy in multi-bytes chunks 1529 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1530 } 1531 restore_arg_regs(); 1532 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1533 __ xorptr(rax, rax); // return 0 1534 __ vzeroupper(); 1535 __ leave(); // required for proper stackwalking of RuntimeStub frame 1536 __ ret(0); 1537 1538 return start; 1539 } 1540 1541 // Arguments: 1542 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1543 // ignored 1544 // name - stub name string 1545 // 1546 // Inputs: 1547 // c_rarg0 - source array address 1548 // c_rarg1 - destination array address 1549 // c_rarg2 - element count, treated as ssize_t, can be zero 1550 // 1551 // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we 1552 // let the hardware handle it. The two or four words within dwords 1553 // or qwords that span cache line boundaries will still be loaded 1554 // and stored atomically. 1555 // 1556 // Side Effects: 1557 // disjoint_short_copy_entry is set to the no-overlap entry point 1558 // used by generate_conjoint_short_copy(). 1559 // 1560 address generate_disjoint_short_copy(bool aligned, address *entry, const char *name) { 1561 __ align(CodeEntryAlignment); 1562 StubCodeMark mark(this, "StubRoutines", name); 1563 address start = __ pc(); 1564 1565 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes,L_copy_2_bytes,L_exit; 1566 const Register from = rdi; // source array address 1567 const Register to = rsi; // destination array address 1568 const Register count = rdx; // elements count 1569 const Register word_count = rcx; 1570 const Register qword_count = count; 1571 const Register end_from = from; // source array end address 1572 const Register end_to = to; // destination array end address 1573 // End pointers are inclusive, and if count is not zero they point 1574 // to the last unit copied: end_to[0] := end_from[0] 1575 1576 __ enter(); // required for proper stackwalking of RuntimeStub frame 1577 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1578 1579 if (entry != NULL) { 1580 *entry = __ pc(); 1581 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1582 BLOCK_COMMENT("Entry:"); 1583 } 1584 1585 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1586 // r9 and r10 may be used to save non-volatile registers 1587 1588 { 1589 // UnsafeCopyMemory page error: continue after ucm 1590 UnsafeCopyMemoryMark ucmm(this, !aligned, true); 1591 // 'from', 'to' and 'count' are now valid 1592 __ movptr(word_count, count); 1593 __ shrptr(count, 2); // count => qword_count 1594 1595 // Copy from low to high addresses. Use 'to' as scratch. 1596 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1597 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1598 __ negptr(qword_count); 1599 __ jmp(L_copy_bytes); 1600 1601 // Copy trailing qwords 1602 __ BIND(L_copy_8_bytes); 1603 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1604 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1605 __ increment(qword_count); 1606 __ jcc(Assembler::notZero, L_copy_8_bytes); 1607 1608 // Original 'dest' is trashed, so we can't use it as a 1609 // base register for a possible trailing word copy 1610 1611 // Check for and copy trailing dword 1612 __ BIND(L_copy_4_bytes); 1613 __ testl(word_count, 2); 1614 __ jccb(Assembler::zero, L_copy_2_bytes); 1615 __ movl(rax, Address(end_from, 8)); 1616 __ movl(Address(end_to, 8), rax); 1617 1618 __ addptr(end_from, 4); 1619 __ addptr(end_to, 4); 1620 1621 // Check for and copy trailing word 1622 __ BIND(L_copy_2_bytes); 1623 __ testl(word_count, 1); 1624 __ jccb(Assembler::zero, L_exit); 1625 __ movw(rax, Address(end_from, 8)); 1626 __ movw(Address(end_to, 8), rax); 1627 } 1628 __ BIND(L_exit); 1629 address ucme_exit_pc = __ pc(); 1630 restore_arg_regs(); 1631 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1632 __ xorptr(rax, rax); // return 0 1633 __ vzeroupper(); 1634 __ leave(); // required for proper stackwalking of RuntimeStub frame 1635 __ ret(0); 1636 1637 { 1638 UnsafeCopyMemoryMark ucmm(this, !aligned, false, ucme_exit_pc); 1639 // Copy in multi-bytes chunks 1640 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1641 __ jmp(L_copy_4_bytes); 1642 } 1643 1644 return start; 1645 } 1646 1647 address generate_fill(BasicType t, bool aligned, const char *name) { 1648 __ align(CodeEntryAlignment); 1649 StubCodeMark mark(this, "StubRoutines", name); 1650 address start = __ pc(); 1651 1652 BLOCK_COMMENT("Entry:"); 1653 1654 const Register to = c_rarg0; // source array address 1655 const Register value = c_rarg1; // value 1656 const Register count = c_rarg2; // elements count 1657 1658 __ enter(); // required for proper stackwalking of RuntimeStub frame 1659 1660 __ generate_fill(t, aligned, to, value, count, rax, xmm0); 1661 1662 __ vzeroupper(); 1663 __ leave(); // required for proper stackwalking of RuntimeStub frame 1664 __ ret(0); 1665 return start; 1666 } 1667 1668 // Arguments: 1669 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1670 // ignored 1671 // name - stub name string 1672 // 1673 // Inputs: 1674 // c_rarg0 - source array address 1675 // c_rarg1 - destination array address 1676 // c_rarg2 - element count, treated as ssize_t, can be zero 1677 // 1678 // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we 1679 // let the hardware handle it. The two or four words within dwords 1680 // or qwords that span cache line boundaries will still be loaded 1681 // and stored atomically. 1682 // 1683 address generate_conjoint_short_copy(bool aligned, address nooverlap_target, 1684 address *entry, const char *name) { 1685 __ align(CodeEntryAlignment); 1686 StubCodeMark mark(this, "StubRoutines", name); 1687 address start = __ pc(); 1688 1689 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes; 1690 const Register from = rdi; // source array address 1691 const Register to = rsi; // destination array address 1692 const Register count = rdx; // elements count 1693 const Register word_count = rcx; 1694 const Register qword_count = count; 1695 1696 __ enter(); // required for proper stackwalking of RuntimeStub frame 1697 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1698 1699 if (entry != NULL) { 1700 *entry = __ pc(); 1701 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1702 BLOCK_COMMENT("Entry:"); 1703 } 1704 1705 array_overlap_test(nooverlap_target, Address::times_2); 1706 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1707 // r9 and r10 may be used to save non-volatile registers 1708 1709 { 1710 // UnsafeCopyMemory page error: continue after ucm 1711 UnsafeCopyMemoryMark ucmm(this, !aligned, true); 1712 // 'from', 'to' and 'count' are now valid 1713 __ movptr(word_count, count); 1714 __ shrptr(count, 2); // count => qword_count 1715 1716 // Copy from high to low addresses. Use 'to' as scratch. 1717 1718 // Check for and copy trailing word 1719 __ testl(word_count, 1); 1720 __ jccb(Assembler::zero, L_copy_4_bytes); 1721 __ movw(rax, Address(from, word_count, Address::times_2, -2)); 1722 __ movw(Address(to, word_count, Address::times_2, -2), rax); 1723 1724 // Check for and copy trailing dword 1725 __ BIND(L_copy_4_bytes); 1726 __ testl(word_count, 2); 1727 __ jcc(Assembler::zero, L_copy_bytes); 1728 __ movl(rax, Address(from, qword_count, Address::times_8)); 1729 __ movl(Address(to, qword_count, Address::times_8), rax); 1730 __ jmp(L_copy_bytes); 1731 1732 // Copy trailing qwords 1733 __ BIND(L_copy_8_bytes); 1734 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 1735 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 1736 __ decrement(qword_count); 1737 __ jcc(Assembler::notZero, L_copy_8_bytes); 1738 } 1739 restore_arg_regs(); 1740 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1741 __ xorptr(rax, rax); // return 0 1742 __ vzeroupper(); 1743 __ leave(); // required for proper stackwalking of RuntimeStub frame 1744 __ ret(0); 1745 1746 { 1747 // UnsafeCopyMemory page error: continue after ucm 1748 UnsafeCopyMemoryMark ucmm(this, !aligned, true); 1749 // Copy in multi-bytes chunks 1750 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1751 } 1752 restore_arg_regs(); 1753 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1754 __ xorptr(rax, rax); // return 0 1755 __ vzeroupper(); 1756 __ leave(); // required for proper stackwalking of RuntimeStub frame 1757 __ ret(0); 1758 1759 return start; 1760 } 1761 1762 // Arguments: 1763 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1764 // ignored 1765 // is_oop - true => oop array, so generate store check code 1766 // name - stub name string 1767 // 1768 // Inputs: 1769 // c_rarg0 - source array address 1770 // c_rarg1 - destination array address 1771 // c_rarg2 - element count, treated as ssize_t, can be zero 1772 // 1773 // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let 1774 // the hardware handle it. The two dwords within qwords that span 1775 // cache line boundaries will still be loaded and stored atomicly. 1776 // 1777 // Side Effects: 1778 // disjoint_int_copy_entry is set to the no-overlap entry point 1779 // used by generate_conjoint_int_oop_copy(). 1780 // 1781 address generate_disjoint_int_oop_copy(bool aligned, bool is_oop, address* entry, 1782 const char *name, bool dest_uninitialized = false) { 1783 __ align(CodeEntryAlignment); 1784 StubCodeMark mark(this, "StubRoutines", name); 1785 address start = __ pc(); 1786 1787 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_exit; 1788 const Register from = rdi; // source array address 1789 const Register to = rsi; // destination array address 1790 const Register count = rdx; // elements count 1791 const Register dword_count = rcx; 1792 const Register qword_count = count; 1793 const Register end_from = from; // source array end address 1794 const Register end_to = to; // destination array end address 1795 // End pointers are inclusive, and if count is not zero they point 1796 // to the last unit copied: end_to[0] := end_from[0] 1797 1798 __ enter(); // required for proper stackwalking of RuntimeStub frame 1799 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1800 1801 if (entry != NULL) { 1802 *entry = __ pc(); 1803 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1804 BLOCK_COMMENT("Entry:"); 1805 } 1806 1807 setup_arg_regs_using_thread(); // from => rdi, to => rsi, count => rdx 1808 // r9 is used to save r15_thread 1809 1810 DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_DISJOINT; 1811 if (dest_uninitialized) { 1812 decorators |= IS_DEST_UNINITIALIZED; 1813 } 1814 if (aligned) { 1815 decorators |= ARRAYCOPY_ALIGNED; 1816 } 1817 1818 BasicType type = is_oop ? T_OBJECT : T_INT; 1819 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 1820 bs->arraycopy_prologue(_masm, decorators, type, from, to, count); 1821 1822 { 1823 // UnsafeCopyMemory page error: continue after ucm 1824 UnsafeCopyMemoryMark ucmm(this, !is_oop && !aligned, true); 1825 // 'from', 'to' and 'count' are now valid 1826 __ movptr(dword_count, count); 1827 __ shrptr(count, 1); // count => qword_count 1828 1829 // Copy from low to high addresses. Use 'to' as scratch. 1830 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1831 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1832 __ negptr(qword_count); 1833 __ jmp(L_copy_bytes); 1834 1835 // Copy trailing qwords 1836 __ BIND(L_copy_8_bytes); 1837 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1838 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1839 __ increment(qword_count); 1840 __ jcc(Assembler::notZero, L_copy_8_bytes); 1841 1842 // Check for and copy trailing dword 1843 __ BIND(L_copy_4_bytes); 1844 __ testl(dword_count, 1); // Only byte test since the value is 0 or 1 1845 __ jccb(Assembler::zero, L_exit); 1846 __ movl(rax, Address(end_from, 8)); 1847 __ movl(Address(end_to, 8), rax); 1848 } 1849 __ BIND(L_exit); 1850 address ucme_exit_pc = __ pc(); 1851 bs->arraycopy_epilogue(_masm, decorators, type, from, to, dword_count); 1852 restore_arg_regs_using_thread(); 1853 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 1854 __ vzeroupper(); 1855 __ xorptr(rax, rax); // return 0 1856 __ leave(); // required for proper stackwalking of RuntimeStub frame 1857 __ ret(0); 1858 1859 { 1860 UnsafeCopyMemoryMark ucmm(this, !is_oop && !aligned, false, ucme_exit_pc); 1861 // Copy in multi-bytes chunks 1862 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1863 __ jmp(L_copy_4_bytes); 1864 } 1865 1866 return start; 1867 } 1868 1869 // Arguments: 1870 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1871 // ignored 1872 // is_oop - true => oop array, so generate store check code 1873 // name - stub name string 1874 // 1875 // Inputs: 1876 // c_rarg0 - source array address 1877 // c_rarg1 - destination array address 1878 // c_rarg2 - element count, treated as ssize_t, can be zero 1879 // 1880 // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let 1881 // the hardware handle it. The two dwords within qwords that span 1882 // cache line boundaries will still be loaded and stored atomicly. 1883 // 1884 address generate_conjoint_int_oop_copy(bool aligned, bool is_oop, address nooverlap_target, 1885 address *entry, const char *name, 1886 bool dest_uninitialized = false) { 1887 __ align(CodeEntryAlignment); 1888 StubCodeMark mark(this, "StubRoutines", name); 1889 address start = __ pc(); 1890 1891 Label L_copy_bytes, L_copy_8_bytes, L_exit; 1892 const Register from = rdi; // source array address 1893 const Register to = rsi; // destination array address 1894 const Register count = rdx; // elements count 1895 const Register dword_count = rcx; 1896 const Register qword_count = count; 1897 1898 __ enter(); // required for proper stackwalking of RuntimeStub frame 1899 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1900 1901 if (entry != NULL) { 1902 *entry = __ pc(); 1903 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1904 BLOCK_COMMENT("Entry:"); 1905 } 1906 1907 array_overlap_test(nooverlap_target, Address::times_4); 1908 setup_arg_regs_using_thread(); // from => rdi, to => rsi, count => rdx 1909 // r9 is used to save r15_thread 1910 1911 DecoratorSet decorators = IN_HEAP | IS_ARRAY; 1912 if (dest_uninitialized) { 1913 decorators |= IS_DEST_UNINITIALIZED; 1914 } 1915 if (aligned) { 1916 decorators |= ARRAYCOPY_ALIGNED; 1917 } 1918 1919 BasicType type = is_oop ? T_OBJECT : T_INT; 1920 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 1921 // no registers are destroyed by this call 1922 bs->arraycopy_prologue(_masm, decorators, type, from, to, count); 1923 1924 assert_clean_int(count, rax); // Make sure 'count' is clean int. 1925 { 1926 // UnsafeCopyMemory page error: continue after ucm 1927 UnsafeCopyMemoryMark ucmm(this, !is_oop && !aligned, true); 1928 // 'from', 'to' and 'count' are now valid 1929 __ movptr(dword_count, count); 1930 __ shrptr(count, 1); // count => qword_count 1931 1932 // Copy from high to low addresses. Use 'to' as scratch. 1933 1934 // Check for and copy trailing dword 1935 __ testl(dword_count, 1); 1936 __ jcc(Assembler::zero, L_copy_bytes); 1937 __ movl(rax, Address(from, dword_count, Address::times_4, -4)); 1938 __ movl(Address(to, dword_count, Address::times_4, -4), rax); 1939 __ jmp(L_copy_bytes); 1940 1941 // Copy trailing qwords 1942 __ BIND(L_copy_8_bytes); 1943 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 1944 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 1945 __ decrement(qword_count); 1946 __ jcc(Assembler::notZero, L_copy_8_bytes); 1947 } 1948 if (is_oop) { 1949 __ jmp(L_exit); 1950 } 1951 restore_arg_regs_using_thread(); 1952 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 1953 __ xorptr(rax, rax); // return 0 1954 __ vzeroupper(); 1955 __ leave(); // required for proper stackwalking of RuntimeStub frame 1956 __ ret(0); 1957 1958 { 1959 // UnsafeCopyMemory page error: continue after ucm 1960 UnsafeCopyMemoryMark ucmm(this, !is_oop && !aligned, true); 1961 // Copy in multi-bytes chunks 1962 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1963 } 1964 1965 __ BIND(L_exit); 1966 bs->arraycopy_epilogue(_masm, decorators, type, from, to, dword_count); 1967 restore_arg_regs_using_thread(); 1968 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 1969 __ xorptr(rax, rax); // return 0 1970 __ vzeroupper(); 1971 __ leave(); // required for proper stackwalking of RuntimeStub frame 1972 __ ret(0); 1973 1974 return start; 1975 } 1976 1977 // Arguments: 1978 // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes 1979 // ignored 1980 // is_oop - true => oop array, so generate store check code 1981 // name - stub name string 1982 // 1983 // Inputs: 1984 // c_rarg0 - source array address 1985 // c_rarg1 - destination array address 1986 // c_rarg2 - element count, treated as ssize_t, can be zero 1987 // 1988 // Side Effects: 1989 // disjoint_oop_copy_entry or disjoint_long_copy_entry is set to the 1990 // no-overlap entry point used by generate_conjoint_long_oop_copy(). 1991 // 1992 address generate_disjoint_long_oop_copy(bool aligned, bool is_oop, address *entry, 1993 const char *name, bool dest_uninitialized = false) { 1994 __ align(CodeEntryAlignment); 1995 StubCodeMark mark(this, "StubRoutines", name); 1996 address start = __ pc(); 1997 1998 Label L_copy_bytes, L_copy_8_bytes, L_exit; 1999 const Register from = rdi; // source array address 2000 const Register to = rsi; // destination array address 2001 const Register qword_count = rdx; // elements count 2002 const Register end_from = from; // source array end address 2003 const Register end_to = rcx; // destination array end address 2004 const Register saved_count = r11; 2005 // End pointers are inclusive, and if count is not zero they point 2006 // to the last unit copied: end_to[0] := end_from[0] 2007 2008 __ enter(); // required for proper stackwalking of RuntimeStub frame 2009 // Save no-overlap entry point for generate_conjoint_long_oop_copy() 2010 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 2011 2012 if (entry != NULL) { 2013 *entry = __ pc(); 2014 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2015 BLOCK_COMMENT("Entry:"); 2016 } 2017 2018 setup_arg_regs_using_thread(); // from => rdi, to => rsi, count => rdx 2019 // r9 is used to save r15_thread 2020 // 'from', 'to' and 'qword_count' are now valid 2021 2022 DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_DISJOINT; 2023 if (dest_uninitialized) { 2024 decorators |= IS_DEST_UNINITIALIZED; 2025 } 2026 if (aligned) { 2027 decorators |= ARRAYCOPY_ALIGNED; 2028 } 2029 2030 BasicType type = is_oop ? T_OBJECT : T_LONG; 2031 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 2032 bs->arraycopy_prologue(_masm, decorators, type, from, to, qword_count); 2033 { 2034 // UnsafeCopyMemory page error: continue after ucm 2035 UnsafeCopyMemoryMark ucmm(this, !is_oop && !aligned, true); 2036 2037 // Copy from low to high addresses. Use 'to' as scratch. 2038 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 2039 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 2040 __ negptr(qword_count); 2041 __ jmp(L_copy_bytes); 2042 2043 // Copy trailing qwords 2044 __ BIND(L_copy_8_bytes); 2045 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 2046 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 2047 __ increment(qword_count); 2048 __ jcc(Assembler::notZero, L_copy_8_bytes); 2049 } 2050 if (is_oop) { 2051 __ jmp(L_exit); 2052 } else { 2053 restore_arg_regs_using_thread(); 2054 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2055 __ xorptr(rax, rax); // return 0 2056 __ vzeroupper(); 2057 __ leave(); // required for proper stackwalking of RuntimeStub frame 2058 __ ret(0); 2059 } 2060 2061 { 2062 // UnsafeCopyMemory page error: continue after ucm 2063 UnsafeCopyMemoryMark ucmm(this, !is_oop && !aligned, true); 2064 // Copy in multi-bytes chunks 2065 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2066 } 2067 2068 __ BIND(L_exit); 2069 bs->arraycopy_epilogue(_masm, decorators, type, from, to, qword_count); 2070 restore_arg_regs_using_thread(); 2071 if (is_oop) { 2072 inc_counter_np(SharedRuntime::_oop_array_copy_ctr); // Update counter after rscratch1 is free 2073 } else { 2074 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2075 } 2076 __ vzeroupper(); 2077 __ xorptr(rax, rax); // return 0 2078 __ leave(); // required for proper stackwalking of RuntimeStub frame 2079 __ ret(0); 2080 2081 return start; 2082 } 2083 2084 // Arguments: 2085 // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes 2086 // ignored 2087 // is_oop - true => oop array, so generate store check code 2088 // name - stub name string 2089 // 2090 // Inputs: 2091 // c_rarg0 - source array address 2092 // c_rarg1 - destination array address 2093 // c_rarg2 - element count, treated as ssize_t, can be zero 2094 // 2095 address generate_conjoint_long_oop_copy(bool aligned, bool is_oop, 2096 address nooverlap_target, address *entry, 2097 const char *name, bool dest_uninitialized = false) { 2098 __ align(CodeEntryAlignment); 2099 StubCodeMark mark(this, "StubRoutines", name); 2100 address start = __ pc(); 2101 2102 Label L_copy_bytes, L_copy_8_bytes, L_exit; 2103 const Register from = rdi; // source array address 2104 const Register to = rsi; // destination array address 2105 const Register qword_count = rdx; // elements count 2106 const Register saved_count = rcx; 2107 2108 __ enter(); // required for proper stackwalking of RuntimeStub frame 2109 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 2110 2111 if (entry != NULL) { 2112 *entry = __ pc(); 2113 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2114 BLOCK_COMMENT("Entry:"); 2115 } 2116 2117 array_overlap_test(nooverlap_target, Address::times_8); 2118 setup_arg_regs_using_thread(); // from => rdi, to => rsi, count => rdx 2119 // r9 is used to save r15_thread 2120 // 'from', 'to' and 'qword_count' are now valid 2121 2122 DecoratorSet decorators = IN_HEAP | IS_ARRAY; 2123 if (dest_uninitialized) { 2124 decorators |= IS_DEST_UNINITIALIZED; 2125 } 2126 if (aligned) { 2127 decorators |= ARRAYCOPY_ALIGNED; 2128 } 2129 2130 BasicType type = is_oop ? T_OBJECT : T_LONG; 2131 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 2132 bs->arraycopy_prologue(_masm, decorators, type, from, to, qword_count); 2133 { 2134 // UnsafeCopyMemory page error: continue after ucm 2135 UnsafeCopyMemoryMark ucmm(this, !is_oop && !aligned, true); 2136 2137 __ jmp(L_copy_bytes); 2138 2139 // Copy trailing qwords 2140 __ BIND(L_copy_8_bytes); 2141 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 2142 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 2143 __ decrement(qword_count); 2144 __ jcc(Assembler::notZero, L_copy_8_bytes); 2145 } 2146 if (is_oop) { 2147 __ jmp(L_exit); 2148 } else { 2149 restore_arg_regs_using_thread(); 2150 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2151 __ xorptr(rax, rax); // return 0 2152 __ vzeroupper(); 2153 __ leave(); // required for proper stackwalking of RuntimeStub frame 2154 __ ret(0); 2155 } 2156 { 2157 // UnsafeCopyMemory page error: continue after ucm 2158 UnsafeCopyMemoryMark ucmm(this, !is_oop && !aligned, true); 2159 2160 // Copy in multi-bytes chunks 2161 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2162 } 2163 __ BIND(L_exit); 2164 bs->arraycopy_epilogue(_masm, decorators, type, from, to, qword_count); 2165 restore_arg_regs_using_thread(); 2166 if (is_oop) { 2167 inc_counter_np(SharedRuntime::_oop_array_copy_ctr); // Update counter after rscratch1 is free 2168 } else { 2169 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2170 } 2171 __ vzeroupper(); 2172 __ xorptr(rax, rax); // return 0 2173 __ leave(); // required for proper stackwalking of RuntimeStub frame 2174 __ ret(0); 2175 2176 return start; 2177 } 2178 2179 2180 // Helper for generating a dynamic type check. 2181 // Smashes no registers. 2182 void generate_type_check(Register sub_klass, 2183 Register super_check_offset, 2184 Register super_klass, 2185 Label& L_success) { 2186 assert_different_registers(sub_klass, super_check_offset, super_klass); 2187 2188 BLOCK_COMMENT("type_check:"); 2189 2190 Label L_miss; 2191 2192 __ check_klass_subtype_fast_path(sub_klass, super_klass, noreg, &L_success, &L_miss, NULL, 2193 super_check_offset); 2194 __ check_klass_subtype_slow_path(sub_klass, super_klass, noreg, noreg, &L_success, NULL); 2195 2196 // Fall through on failure! 2197 __ BIND(L_miss); 2198 } 2199 2200 // 2201 // Generate checkcasting array copy stub 2202 // 2203 // Input: 2204 // c_rarg0 - source array address 2205 // c_rarg1 - destination array address 2206 // c_rarg2 - element count, treated as ssize_t, can be zero 2207 // c_rarg3 - size_t ckoff (super_check_offset) 2208 // not Win64 2209 // c_rarg4 - oop ckval (super_klass) 2210 // Win64 2211 // rsp+40 - oop ckval (super_klass) 2212 // 2213 // Output: 2214 // rax == 0 - success 2215 // rax == -1^K - failure, where K is partial transfer count 2216 // 2217 address generate_checkcast_copy(const char *name, address *entry, 2218 bool dest_uninitialized = false) { 2219 2220 Label L_load_element, L_store_element, L_do_card_marks, L_done; 2221 2222 // Input registers (after setup_arg_regs) 2223 const Register from = rdi; // source array address 2224 const Register to = rsi; // destination array address 2225 const Register length = rdx; // elements count 2226 const Register ckoff = rcx; // super_check_offset 2227 const Register ckval = r8; // super_klass 2228 2229 // Registers used as temps (r13, r14 are save-on-entry) 2230 const Register end_from = from; // source array end address 2231 const Register end_to = r13; // destination array end address 2232 const Register count = rdx; // -(count_remaining) 2233 const Register r14_length = r14; // saved copy of length 2234 // End pointers are inclusive, and if length is not zero they point 2235 // to the last unit copied: end_to[0] := end_from[0] 2236 2237 const Register rax_oop = rax; // actual oop copied 2238 const Register r11_klass = r11; // oop._klass 2239 2240 //--------------------------------------------------------------- 2241 // Assembler stub will be used for this call to arraycopy 2242 // if the two arrays are subtypes of Object[] but the 2243 // destination array type is not equal to or a supertype 2244 // of the source type. Each element must be separately 2245 // checked. 2246 2247 __ align(CodeEntryAlignment); 2248 StubCodeMark mark(this, "StubRoutines", name); 2249 address start = __ pc(); 2250 2251 __ enter(); // required for proper stackwalking of RuntimeStub frame 2252 2253 #ifdef ASSERT 2254 // caller guarantees that the arrays really are different 2255 // otherwise, we would have to make conjoint checks 2256 { Label L; 2257 array_overlap_test(L, TIMES_OOP); 2258 __ stop("checkcast_copy within a single array"); 2259 __ bind(L); 2260 } 2261 #endif //ASSERT 2262 2263 setup_arg_regs(4); // from => rdi, to => rsi, length => rdx 2264 // ckoff => rcx, ckval => r8 2265 // r9 and r10 may be used to save non-volatile registers 2266 #ifdef _WIN64 2267 // last argument (#4) is on stack on Win64 2268 __ movptr(ckval, Address(rsp, 6 * wordSize)); 2269 #endif 2270 2271 // Caller of this entry point must set up the argument registers. 2272 if (entry != NULL) { 2273 *entry = __ pc(); 2274 BLOCK_COMMENT("Entry:"); 2275 } 2276 2277 // allocate spill slots for r13, r14 2278 enum { 2279 saved_r13_offset, 2280 saved_r14_offset, 2281 saved_r10_offset, 2282 saved_rbp_offset 2283 }; 2284 __ subptr(rsp, saved_rbp_offset * wordSize); 2285 __ movptr(Address(rsp, saved_r13_offset * wordSize), r13); 2286 __ movptr(Address(rsp, saved_r14_offset * wordSize), r14); 2287 __ movptr(Address(rsp, saved_r10_offset * wordSize), r10); 2288 2289 #ifdef ASSERT 2290 Label L2; 2291 __ get_thread(r14); 2292 __ cmpptr(r15_thread, r14); 2293 __ jcc(Assembler::equal, L2); 2294 __ stop("StubRoutines::call_stub: r15_thread is modified by call"); 2295 __ bind(L2); 2296 #endif // ASSERT 2297 2298 // check that int operands are properly extended to size_t 2299 assert_clean_int(length, rax); 2300 assert_clean_int(ckoff, rax); 2301 2302 #ifdef ASSERT 2303 BLOCK_COMMENT("assert consistent ckoff/ckval"); 2304 // The ckoff and ckval must be mutually consistent, 2305 // even though caller generates both. 2306 { Label L; 2307 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2308 __ cmpl(ckoff, Address(ckval, sco_offset)); 2309 __ jcc(Assembler::equal, L); 2310 __ stop("super_check_offset inconsistent"); 2311 __ bind(L); 2312 } 2313 #endif //ASSERT 2314 2315 // Loop-invariant addresses. They are exclusive end pointers. 2316 Address end_from_addr(from, length, TIMES_OOP, 0); 2317 Address end_to_addr(to, length, TIMES_OOP, 0); 2318 // Loop-variant addresses. They assume post-incremented count < 0. 2319 Address from_element_addr(end_from, count, TIMES_OOP, 0); 2320 Address to_element_addr(end_to, count, TIMES_OOP, 0); 2321 2322 DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_CHECKCAST | ARRAYCOPY_DISJOINT; 2323 if (dest_uninitialized) { 2324 decorators |= IS_DEST_UNINITIALIZED; 2325 } 2326 2327 BasicType type = T_OBJECT; 2328 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 2329 bs->arraycopy_prologue(_masm, decorators, type, from, to, count); 2330 2331 // Copy from low to high addresses, indexed from the end of each array. 2332 __ lea(end_from, end_from_addr); 2333 __ lea(end_to, end_to_addr); 2334 __ movptr(r14_length, length); // save a copy of the length 2335 assert(length == count, ""); // else fix next line: 2336 __ negptr(count); // negate and test the length 2337 __ jcc(Assembler::notZero, L_load_element); 2338 2339 // Empty array: Nothing to do. 2340 __ xorptr(rax, rax); // return 0 on (trivial) success 2341 __ jmp(L_done); 2342 2343 // ======== begin loop ======== 2344 // (Loop is rotated; its entry is L_load_element.) 2345 // Loop control: 2346 // for (count = -count; count != 0; count++) 2347 // Base pointers src, dst are biased by 8*(count-1),to last element. 2348 __ align(OptoLoopAlignment); 2349 2350 __ BIND(L_store_element); 2351 __ store_heap_oop(to_element_addr, rax_oop, noreg, noreg, AS_RAW); // store the oop 2352 __ increment(count); // increment the count toward zero 2353 __ jcc(Assembler::zero, L_do_card_marks); 2354 2355 // ======== loop entry is here ======== 2356 __ BIND(L_load_element); 2357 __ load_heap_oop(rax_oop, from_element_addr, noreg, noreg, AS_RAW); // load the oop 2358 __ testptr(rax_oop, rax_oop); 2359 __ jcc(Assembler::zero, L_store_element); 2360 2361 __ load_klass(r11_klass, rax_oop, rscratch1);// query the object klass 2362 generate_type_check(r11_klass, ckoff, ckval, L_store_element); 2363 // ======== end loop ======== 2364 2365 // It was a real error; we must depend on the caller to finish the job. 2366 // Register rdx = -1 * number of *remaining* oops, r14 = *total* oops. 2367 // Emit GC store barriers for the oops we have copied (r14 + rdx), 2368 // and report their number to the caller. 2369 assert_different_registers(rax, r14_length, count, to, end_to, rcx, rscratch1); 2370 Label L_post_barrier; 2371 __ addptr(r14_length, count); // K = (original - remaining) oops 2372 __ movptr(rax, r14_length); // save the value 2373 __ notptr(rax); // report (-1^K) to caller (does not affect flags) 2374 __ jccb(Assembler::notZero, L_post_barrier); 2375 __ jmp(L_done); // K == 0, nothing was copied, skip post barrier 2376 2377 // Come here on success only. 2378 __ BIND(L_do_card_marks); 2379 __ xorptr(rax, rax); // return 0 on success 2380 2381 __ BIND(L_post_barrier); 2382 bs->arraycopy_epilogue(_masm, decorators, type, from, to, r14_length); 2383 2384 // Common exit point (success or failure). 2385 __ BIND(L_done); 2386 __ movptr(r13, Address(rsp, saved_r13_offset * wordSize)); 2387 __ movptr(r14, Address(rsp, saved_r14_offset * wordSize)); 2388 __ movptr(r10, Address(rsp, saved_r10_offset * wordSize)); 2389 restore_arg_regs(); 2390 inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr); // Update counter after rscratch1 is free 2391 __ leave(); // required for proper stackwalking of RuntimeStub frame 2392 __ ret(0); 2393 2394 return start; 2395 } 2396 2397 // 2398 // Generate 'unsafe' array copy stub 2399 // Though just as safe as the other stubs, it takes an unscaled 2400 // size_t argument instead of an element count. 2401 // 2402 // Input: 2403 // c_rarg0 - source array address 2404 // c_rarg1 - destination array address 2405 // c_rarg2 - byte count, treated as ssize_t, can be zero 2406 // 2407 // Examines the alignment of the operands and dispatches 2408 // to a long, int, short, or byte copy loop. 2409 // 2410 address generate_unsafe_copy(const char *name, 2411 address byte_copy_entry, address short_copy_entry, 2412 address int_copy_entry, address long_copy_entry) { 2413 2414 Label L_long_aligned, L_int_aligned, L_short_aligned; 2415 2416 // Input registers (before setup_arg_regs) 2417 const Register from = c_rarg0; // source array address 2418 const Register to = c_rarg1; // destination array address 2419 const Register size = c_rarg2; // byte count (size_t) 2420 2421 // Register used as a temp 2422 const Register bits = rax; // test copy of low bits 2423 2424 __ align(CodeEntryAlignment); 2425 StubCodeMark mark(this, "StubRoutines", name); 2426 address start = __ pc(); 2427 2428 __ enter(); // required for proper stackwalking of RuntimeStub frame 2429 2430 // bump this on entry, not on exit: 2431 inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr); 2432 2433 __ mov(bits, from); 2434 __ orptr(bits, to); 2435 __ orptr(bits, size); 2436 2437 __ testb(bits, BytesPerLong-1); 2438 __ jccb(Assembler::zero, L_long_aligned); 2439 2440 __ testb(bits, BytesPerInt-1); 2441 __ jccb(Assembler::zero, L_int_aligned); 2442 2443 __ testb(bits, BytesPerShort-1); 2444 __ jump_cc(Assembler::notZero, RuntimeAddress(byte_copy_entry)); 2445 2446 __ BIND(L_short_aligned); 2447 __ shrptr(size, LogBytesPerShort); // size => short_count 2448 __ jump(RuntimeAddress(short_copy_entry)); 2449 2450 __ BIND(L_int_aligned); 2451 __ shrptr(size, LogBytesPerInt); // size => int_count 2452 __ jump(RuntimeAddress(int_copy_entry)); 2453 2454 __ BIND(L_long_aligned); 2455 __ shrptr(size, LogBytesPerLong); // size => qword_count 2456 __ jump(RuntimeAddress(long_copy_entry)); 2457 2458 return start; 2459 } 2460 2461 // Perform range checks on the proposed arraycopy. 2462 // Kills temp, but nothing else. 2463 // Also, clean the sign bits of src_pos and dst_pos. 2464 void arraycopy_range_checks(Register src, // source array oop (c_rarg0) 2465 Register src_pos, // source position (c_rarg1) 2466 Register dst, // destination array oo (c_rarg2) 2467 Register dst_pos, // destination position (c_rarg3) 2468 Register length, 2469 Register temp, 2470 Label& L_failed) { 2471 BLOCK_COMMENT("arraycopy_range_checks:"); 2472 2473 // if (src_pos + length > arrayOop(src)->length()) FAIL; 2474 __ movl(temp, length); 2475 __ addl(temp, src_pos); // src_pos + length 2476 __ cmpl(temp, Address(src, arrayOopDesc::length_offset_in_bytes())); 2477 __ jcc(Assembler::above, L_failed); 2478 2479 // if (dst_pos + length > arrayOop(dst)->length()) FAIL; 2480 __ movl(temp, length); 2481 __ addl(temp, dst_pos); // dst_pos + length 2482 __ cmpl(temp, Address(dst, arrayOopDesc::length_offset_in_bytes())); 2483 __ jcc(Assembler::above, L_failed); 2484 2485 // Have to clean up high 32-bits of 'src_pos' and 'dst_pos'. 2486 // Move with sign extension can be used since they are positive. 2487 __ movslq(src_pos, src_pos); 2488 __ movslq(dst_pos, dst_pos); 2489 2490 BLOCK_COMMENT("arraycopy_range_checks done"); 2491 } 2492 2493 // 2494 // Generate generic array copy stubs 2495 // 2496 // Input: 2497 // c_rarg0 - src oop 2498 // c_rarg1 - src_pos (32-bits) 2499 // c_rarg2 - dst oop 2500 // c_rarg3 - dst_pos (32-bits) 2501 // not Win64 2502 // c_rarg4 - element count (32-bits) 2503 // Win64 2504 // rsp+40 - element count (32-bits) 2505 // 2506 // Output: 2507 // rax == 0 - success 2508 // rax == -1^K - failure, where K is partial transfer count 2509 // 2510 address generate_generic_copy(const char *name, 2511 address byte_copy_entry, address short_copy_entry, 2512 address int_copy_entry, address oop_copy_entry, 2513 address long_copy_entry, address checkcast_copy_entry) { 2514 2515 Label L_failed, L_failed_0, L_objArray; 2516 Label L_copy_bytes, L_copy_shorts, L_copy_ints, L_copy_longs; 2517 2518 // Input registers 2519 const Register src = c_rarg0; // source array oop 2520 const Register src_pos = c_rarg1; // source position 2521 const Register dst = c_rarg2; // destination array oop 2522 const Register dst_pos = c_rarg3; // destination position 2523 #ifndef _WIN64 2524 const Register length = c_rarg4; 2525 const Register rklass_tmp = r9; // load_klass 2526 #else 2527 const Address length(rsp, 6 * wordSize); // elements count is on stack on Win64 2528 const Register rklass_tmp = rdi; // load_klass 2529 #endif 2530 2531 { int modulus = CodeEntryAlignment; 2532 int target = modulus - 5; // 5 = sizeof jmp(L_failed) 2533 int advance = target - (__ offset() % modulus); 2534 if (advance < 0) advance += modulus; 2535 if (advance > 0) __ nop(advance); 2536 } 2537 StubCodeMark mark(this, "StubRoutines", name); 2538 2539 // Short-hop target to L_failed. Makes for denser prologue code. 2540 __ BIND(L_failed_0); 2541 __ jmp(L_failed); 2542 assert(__ offset() % CodeEntryAlignment == 0, "no further alignment needed"); 2543 2544 __ align(CodeEntryAlignment); 2545 address start = __ pc(); 2546 2547 __ enter(); // required for proper stackwalking of RuntimeStub frame 2548 2549 // bump this on entry, not on exit: 2550 inc_counter_np(SharedRuntime::_generic_array_copy_ctr); 2551 2552 //----------------------------------------------------------------------- 2553 // Assembler stub will be used for this call to arraycopy 2554 // if the following conditions are met: 2555 // 2556 // (1) src and dst must not be null. 2557 // (2) src_pos must not be negative. 2558 // (3) dst_pos must not be negative. 2559 // (4) length must not be negative. 2560 // (5) src klass and dst klass should be the same and not NULL. 2561 // (6) src and dst should be arrays. 2562 // (7) src_pos + length must not exceed length of src. 2563 // (8) dst_pos + length must not exceed length of dst. 2564 // 2565 2566 // if (src == NULL) return -1; 2567 __ testptr(src, src); // src oop 2568 size_t j1off = __ offset(); 2569 __ jccb(Assembler::zero, L_failed_0); 2570 2571 // if (src_pos < 0) return -1; 2572 __ testl(src_pos, src_pos); // src_pos (32-bits) 2573 __ jccb(Assembler::negative, L_failed_0); 2574 2575 // if (dst == NULL) return -1; 2576 __ testptr(dst, dst); // dst oop 2577 __ jccb(Assembler::zero, L_failed_0); 2578 2579 // if (dst_pos < 0) return -1; 2580 __ testl(dst_pos, dst_pos); // dst_pos (32-bits) 2581 size_t j4off = __ offset(); 2582 __ jccb(Assembler::negative, L_failed_0); 2583 2584 // The first four tests are very dense code, 2585 // but not quite dense enough to put four 2586 // jumps in a 16-byte instruction fetch buffer. 2587 // That's good, because some branch predicters 2588 // do not like jumps so close together. 2589 // Make sure of this. 2590 guarantee(((j1off ^ j4off) & ~15) != 0, "I$ line of 1st & 4th jumps"); 2591 2592 // registers used as temp 2593 const Register r11_length = r11; // elements count to copy 2594 const Register r10_src_klass = r10; // array klass 2595 2596 // if (length < 0) return -1; 2597 __ movl(r11_length, length); // length (elements count, 32-bits value) 2598 __ testl(r11_length, r11_length); 2599 __ jccb(Assembler::negative, L_failed_0); 2600 2601 __ load_klass(r10_src_klass, src, rklass_tmp); 2602 #ifdef ASSERT 2603 // assert(src->klass() != NULL); 2604 { 2605 BLOCK_COMMENT("assert klasses not null {"); 2606 Label L1, L2; 2607 __ testptr(r10_src_klass, r10_src_klass); 2608 __ jcc(Assembler::notZero, L2); // it is broken if klass is NULL 2609 __ bind(L1); 2610 __ stop("broken null klass"); 2611 __ bind(L2); 2612 __ load_klass(rax, dst, rklass_tmp); 2613 __ cmpq(rax, 0); 2614 __ jcc(Assembler::equal, L1); // this would be broken also 2615 BLOCK_COMMENT("} assert klasses not null done"); 2616 } 2617 #endif 2618 2619 // Load layout helper (32-bits) 2620 // 2621 // |array_tag| | header_size | element_type | |log2_element_size| 2622 // 32 30 24 16 8 2 0 2623 // 2624 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0 2625 // 2626 2627 const int lh_offset = in_bytes(Klass::layout_helper_offset()); 2628 2629 // Handle objArrays completely differently... 2630 const jint objArray_lh = Klass::array_layout_helper(T_OBJECT); 2631 __ cmpl(Address(r10_src_klass, lh_offset), objArray_lh); 2632 __ jcc(Assembler::equal, L_objArray); 2633 2634 // if (src->klass() != dst->klass()) return -1; 2635 __ load_klass(rax, dst, rklass_tmp); 2636 __ cmpq(r10_src_klass, rax); 2637 __ jcc(Assembler::notEqual, L_failed); 2638 2639 const Register rax_lh = rax; // layout helper 2640 __ movl(rax_lh, Address(r10_src_klass, lh_offset)); 2641 2642 // if (!src->is_Array()) return -1; 2643 __ cmpl(rax_lh, Klass::_lh_neutral_value); 2644 __ jcc(Assembler::greaterEqual, L_failed); 2645 2646 // At this point, it is known to be a typeArray (array_tag 0x3). 2647 #ifdef ASSERT 2648 { 2649 BLOCK_COMMENT("assert primitive array {"); 2650 Label L; 2651 __ cmpl(rax_lh, (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift)); 2652 __ jcc(Assembler::greaterEqual, L); 2653 __ stop("must be a primitive array"); 2654 __ bind(L); 2655 BLOCK_COMMENT("} assert primitive array done"); 2656 } 2657 #endif 2658 2659 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2660 r10, L_failed); 2661 2662 // TypeArrayKlass 2663 // 2664 // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize); 2665 // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize); 2666 // 2667 2668 const Register r10_offset = r10; // array offset 2669 const Register rax_elsize = rax_lh; // element size 2670 2671 __ movl(r10_offset, rax_lh); 2672 __ shrl(r10_offset, Klass::_lh_header_size_shift); 2673 __ andptr(r10_offset, Klass::_lh_header_size_mask); // array_offset 2674 __ addptr(src, r10_offset); // src array offset 2675 __ addptr(dst, r10_offset); // dst array offset 2676 BLOCK_COMMENT("choose copy loop based on element size"); 2677 __ andl(rax_lh, Klass::_lh_log2_element_size_mask); // rax_lh -> rax_elsize 2678 2679 // next registers should be set before the jump to corresponding stub 2680 const Register from = c_rarg0; // source array address 2681 const Register to = c_rarg1; // destination array address 2682 const Register count = c_rarg2; // elements count 2683 2684 // 'from', 'to', 'count' registers should be set in such order 2685 // since they are the same as 'src', 'src_pos', 'dst'. 2686 2687 __ BIND(L_copy_bytes); 2688 __ cmpl(rax_elsize, 0); 2689 __ jccb(Assembler::notEqual, L_copy_shorts); 2690 __ lea(from, Address(src, src_pos, Address::times_1, 0));// src_addr 2691 __ lea(to, Address(dst, dst_pos, Address::times_1, 0));// dst_addr 2692 __ movl2ptr(count, r11_length); // length 2693 __ jump(RuntimeAddress(byte_copy_entry)); 2694 2695 __ BIND(L_copy_shorts); 2696 __ cmpl(rax_elsize, LogBytesPerShort); 2697 __ jccb(Assembler::notEqual, L_copy_ints); 2698 __ lea(from, Address(src, src_pos, Address::times_2, 0));// src_addr 2699 __ lea(to, Address(dst, dst_pos, Address::times_2, 0));// dst_addr 2700 __ movl2ptr(count, r11_length); // length 2701 __ jump(RuntimeAddress(short_copy_entry)); 2702 2703 __ BIND(L_copy_ints); 2704 __ cmpl(rax_elsize, LogBytesPerInt); 2705 __ jccb(Assembler::notEqual, L_copy_longs); 2706 __ lea(from, Address(src, src_pos, Address::times_4, 0));// src_addr 2707 __ lea(to, Address(dst, dst_pos, Address::times_4, 0));// dst_addr 2708 __ movl2ptr(count, r11_length); // length 2709 __ jump(RuntimeAddress(int_copy_entry)); 2710 2711 __ BIND(L_copy_longs); 2712 #ifdef ASSERT 2713 { 2714 BLOCK_COMMENT("assert long copy {"); 2715 Label L; 2716 __ cmpl(rax_elsize, LogBytesPerLong); 2717 __ jcc(Assembler::equal, L); 2718 __ stop("must be long copy, but elsize is wrong"); 2719 __ bind(L); 2720 BLOCK_COMMENT("} assert long copy done"); 2721 } 2722 #endif 2723 __ lea(from, Address(src, src_pos, Address::times_8, 0));// src_addr 2724 __ lea(to, Address(dst, dst_pos, Address::times_8, 0));// dst_addr 2725 __ movl2ptr(count, r11_length); // length 2726 __ jump(RuntimeAddress(long_copy_entry)); 2727 2728 // ObjArrayKlass 2729 __ BIND(L_objArray); 2730 // live at this point: r10_src_klass, r11_length, src[_pos], dst[_pos] 2731 2732 Label L_plain_copy, L_checkcast_copy; 2733 // test array classes for subtyping 2734 __ load_klass(rax, dst, rklass_tmp); 2735 __ cmpq(r10_src_klass, rax); // usual case is exact equality 2736 __ jcc(Assembler::notEqual, L_checkcast_copy); 2737 2738 // Identically typed arrays can be copied without element-wise checks. 2739 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2740 r10, L_failed); 2741 2742 __ lea(from, Address(src, src_pos, TIMES_OOP, 2743 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // src_addr 2744 __ lea(to, Address(dst, dst_pos, TIMES_OOP, 2745 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // dst_addr 2746 __ movl2ptr(count, r11_length); // length 2747 __ BIND(L_plain_copy); 2748 __ jump(RuntimeAddress(oop_copy_entry)); 2749 2750 __ BIND(L_checkcast_copy); 2751 // live at this point: r10_src_klass, r11_length, rax (dst_klass) 2752 { 2753 // Before looking at dst.length, make sure dst is also an objArray. 2754 __ cmpl(Address(rax, lh_offset), objArray_lh); 2755 __ jcc(Assembler::notEqual, L_failed); 2756 2757 // It is safe to examine both src.length and dst.length. 2758 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2759 rax, L_failed); 2760 2761 const Register r11_dst_klass = r11; 2762 __ load_klass(r11_dst_klass, dst, rklass_tmp); // reload 2763 2764 // Marshal the base address arguments now, freeing registers. 2765 __ lea(from, Address(src, src_pos, TIMES_OOP, 2766 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 2767 __ lea(to, Address(dst, dst_pos, TIMES_OOP, 2768 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 2769 __ movl(count, length); // length (reloaded) 2770 Register sco_temp = c_rarg3; // this register is free now 2771 assert_different_registers(from, to, count, sco_temp, 2772 r11_dst_klass, r10_src_klass); 2773 assert_clean_int(count, sco_temp); 2774 2775 // Generate the type check. 2776 const int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2777 __ movl(sco_temp, Address(r11_dst_klass, sco_offset)); 2778 assert_clean_int(sco_temp, rax); 2779 generate_type_check(r10_src_klass, sco_temp, r11_dst_klass, L_plain_copy); 2780 2781 // Fetch destination element klass from the ObjArrayKlass header. 2782 int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset()); 2783 __ movptr(r11_dst_klass, Address(r11_dst_klass, ek_offset)); 2784 __ movl( sco_temp, Address(r11_dst_klass, sco_offset)); 2785 assert_clean_int(sco_temp, rax); 2786 2787 // the checkcast_copy loop needs two extra arguments: 2788 assert(c_rarg3 == sco_temp, "#3 already in place"); 2789 // Set up arguments for checkcast_copy_entry. 2790 setup_arg_regs(4); 2791 __ movptr(r8, r11_dst_klass); // dst.klass.element_klass, r8 is c_rarg4 on Linux/Solaris 2792 __ jump(RuntimeAddress(checkcast_copy_entry)); 2793 } 2794 2795 __ BIND(L_failed); 2796 __ xorptr(rax, rax); 2797 __ notptr(rax); // return -1 2798 __ leave(); // required for proper stackwalking of RuntimeStub frame 2799 __ ret(0); 2800 2801 return start; 2802 } 2803 2804 address generate_data_cache_writeback() { 2805 const Register src = c_rarg0; // source address 2806 2807 __ align(CodeEntryAlignment); 2808 2809 StubCodeMark mark(this, "StubRoutines", "_data_cache_writeback"); 2810 2811 address start = __ pc(); 2812 __ enter(); 2813 __ cache_wb(Address(src, 0)); 2814 __ leave(); 2815 __ ret(0); 2816 2817 return start; 2818 } 2819 2820 address generate_data_cache_writeback_sync() { 2821 const Register is_pre = c_rarg0; // pre or post sync 2822 2823 __ align(CodeEntryAlignment); 2824 2825 StubCodeMark mark(this, "StubRoutines", "_data_cache_writeback_sync"); 2826 2827 // pre wbsync is a no-op 2828 // post wbsync translates to an sfence 2829 2830 Label skip; 2831 address start = __ pc(); 2832 __ enter(); 2833 __ cmpl(is_pre, 0); 2834 __ jcc(Assembler::notEqual, skip); 2835 __ cache_wbsync(false); 2836 __ bind(skip); 2837 __ leave(); 2838 __ ret(0); 2839 2840 return start; 2841 } 2842 2843 void generate_arraycopy_stubs() { 2844 address entry; 2845 address entry_jbyte_arraycopy; 2846 address entry_jshort_arraycopy; 2847 address entry_jint_arraycopy; 2848 address entry_oop_arraycopy; 2849 address entry_jlong_arraycopy; 2850 address entry_checkcast_arraycopy; 2851 2852 StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, &entry, 2853 "jbyte_disjoint_arraycopy"); 2854 StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, entry, &entry_jbyte_arraycopy, 2855 "jbyte_arraycopy"); 2856 2857 StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, &entry, 2858 "jshort_disjoint_arraycopy"); 2859 StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, entry, &entry_jshort_arraycopy, 2860 "jshort_arraycopy"); 2861 2862 StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, false, &entry, 2863 "jint_disjoint_arraycopy"); 2864 StubRoutines::_jint_arraycopy = generate_conjoint_int_oop_copy(false, false, entry, 2865 &entry_jint_arraycopy, "jint_arraycopy"); 2866 2867 StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, false, &entry, 2868 "jlong_disjoint_arraycopy"); 2869 StubRoutines::_jlong_arraycopy = generate_conjoint_long_oop_copy(false, false, entry, 2870 &entry_jlong_arraycopy, "jlong_arraycopy"); 2871 2872 2873 if (UseCompressedOops) { 2874 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, true, &entry, 2875 "oop_disjoint_arraycopy"); 2876 StubRoutines::_oop_arraycopy = generate_conjoint_int_oop_copy(false, true, entry, 2877 &entry_oop_arraycopy, "oop_arraycopy"); 2878 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_int_oop_copy(false, true, &entry, 2879 "oop_disjoint_arraycopy_uninit", 2880 /*dest_uninitialized*/true); 2881 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_int_oop_copy(false, true, entry, 2882 NULL, "oop_arraycopy_uninit", 2883 /*dest_uninitialized*/true); 2884 } else { 2885 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, true, &entry, 2886 "oop_disjoint_arraycopy"); 2887 StubRoutines::_oop_arraycopy = generate_conjoint_long_oop_copy(false, true, entry, 2888 &entry_oop_arraycopy, "oop_arraycopy"); 2889 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_long_oop_copy(false, true, &entry, 2890 "oop_disjoint_arraycopy_uninit", 2891 /*dest_uninitialized*/true); 2892 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_long_oop_copy(false, true, entry, 2893 NULL, "oop_arraycopy_uninit", 2894 /*dest_uninitialized*/true); 2895 } 2896 2897 StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy); 2898 StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", NULL, 2899 /*dest_uninitialized*/true); 2900 2901 StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy", 2902 entry_jbyte_arraycopy, 2903 entry_jshort_arraycopy, 2904 entry_jint_arraycopy, 2905 entry_jlong_arraycopy); 2906 StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy", 2907 entry_jbyte_arraycopy, 2908 entry_jshort_arraycopy, 2909 entry_jint_arraycopy, 2910 entry_oop_arraycopy, 2911 entry_jlong_arraycopy, 2912 entry_checkcast_arraycopy); 2913 2914 StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill"); 2915 StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill"); 2916 StubRoutines::_jint_fill = generate_fill(T_INT, false, "jint_fill"); 2917 StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill"); 2918 StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill"); 2919 StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill"); 2920 2921 // We don't generate specialized code for HeapWord-aligned source 2922 // arrays, so just use the code we've already generated 2923 StubRoutines::_arrayof_jbyte_disjoint_arraycopy = StubRoutines::_jbyte_disjoint_arraycopy; 2924 StubRoutines::_arrayof_jbyte_arraycopy = StubRoutines::_jbyte_arraycopy; 2925 2926 StubRoutines::_arrayof_jshort_disjoint_arraycopy = StubRoutines::_jshort_disjoint_arraycopy; 2927 StubRoutines::_arrayof_jshort_arraycopy = StubRoutines::_jshort_arraycopy; 2928 2929 StubRoutines::_arrayof_jint_disjoint_arraycopy = StubRoutines::_jint_disjoint_arraycopy; 2930 StubRoutines::_arrayof_jint_arraycopy = StubRoutines::_jint_arraycopy; 2931 2932 StubRoutines::_arrayof_jlong_disjoint_arraycopy = StubRoutines::_jlong_disjoint_arraycopy; 2933 StubRoutines::_arrayof_jlong_arraycopy = StubRoutines::_jlong_arraycopy; 2934 2935 StubRoutines::_arrayof_oop_disjoint_arraycopy = StubRoutines::_oop_disjoint_arraycopy; 2936 StubRoutines::_arrayof_oop_arraycopy = StubRoutines::_oop_arraycopy; 2937 2938 StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = StubRoutines::_oop_disjoint_arraycopy_uninit; 2939 StubRoutines::_arrayof_oop_arraycopy_uninit = StubRoutines::_oop_arraycopy_uninit; 2940 } 2941 2942 // AES intrinsic stubs 2943 enum {AESBlockSize = 16}; 2944 2945 address generate_key_shuffle_mask() { 2946 __ align(16); 2947 StubCodeMark mark(this, "StubRoutines", "key_shuffle_mask"); 2948 address start = __ pc(); 2949 __ emit_data64( 0x0405060700010203, relocInfo::none ); 2950 __ emit_data64( 0x0c0d0e0f08090a0b, relocInfo::none ); 2951 return start; 2952 } 2953 2954 address generate_counter_shuffle_mask() { 2955 __ align(16); 2956 StubCodeMark mark(this, "StubRoutines", "counter_shuffle_mask"); 2957 address start = __ pc(); 2958 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none); 2959 __ emit_data64(0x0001020304050607, relocInfo::none); 2960 return start; 2961 } 2962 2963 // Utility routine for loading a 128-bit key word in little endian format 2964 // can optionally specify that the shuffle mask is already in an xmmregister 2965 void load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask=NULL) { 2966 __ movdqu(xmmdst, Address(key, offset)); 2967 if (xmm_shuf_mask != NULL) { 2968 __ pshufb(xmmdst, xmm_shuf_mask); 2969 } else { 2970 __ pshufb(xmmdst, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 2971 } 2972 } 2973 2974 // Utility routine for increase 128bit counter (iv in CTR mode) 2975 void inc_counter(Register reg, XMMRegister xmmdst, int inc_delta, Label& next_block) { 2976 __ pextrq(reg, xmmdst, 0x0); 2977 __ addq(reg, inc_delta); 2978 __ pinsrq(xmmdst, reg, 0x0); 2979 __ jcc(Assembler::carryClear, next_block); // jump if no carry 2980 __ pextrq(reg, xmmdst, 0x01); // Carry 2981 __ addq(reg, 0x01); 2982 __ pinsrq(xmmdst, reg, 0x01); //Carry end 2983 __ BIND(next_block); // next instruction 2984 } 2985 2986 // Arguments: 2987 // 2988 // Inputs: 2989 // c_rarg0 - source byte array address 2990 // c_rarg1 - destination byte array address 2991 // c_rarg2 - K (key) in little endian int array 2992 // 2993 address generate_aescrypt_encryptBlock() { 2994 assert(UseAES, "need AES instructions and misaligned SSE support"); 2995 __ align(CodeEntryAlignment); 2996 StubCodeMark mark(this, "StubRoutines", "aescrypt_encryptBlock"); 2997 Label L_doLast; 2998 address start = __ pc(); 2999 3000 const Register from = c_rarg0; // source array address 3001 const Register to = c_rarg1; // destination array address 3002 const Register key = c_rarg2; // key array address 3003 const Register keylen = rax; 3004 3005 const XMMRegister xmm_result = xmm0; 3006 const XMMRegister xmm_key_shuf_mask = xmm1; 3007 // On win64 xmm6-xmm15 must be preserved so don't use them. 3008 const XMMRegister xmm_temp1 = xmm2; 3009 const XMMRegister xmm_temp2 = xmm3; 3010 const XMMRegister xmm_temp3 = xmm4; 3011 const XMMRegister xmm_temp4 = xmm5; 3012 3013 __ enter(); // required for proper stackwalking of RuntimeStub frame 3014 3015 // keylen could be only {11, 13, 15} * 4 = {44, 52, 60} 3016 __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3017 3018 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3019 __ movdqu(xmm_result, Address(from, 0)); // get 16 bytes of input 3020 3021 // For encryption, the java expanded key ordering is just what we need 3022 // we don't know if the key is aligned, hence not using load-execute form 3023 3024 load_key(xmm_temp1, key, 0x00, xmm_key_shuf_mask); 3025 __ pxor(xmm_result, xmm_temp1); 3026 3027 load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask); 3028 load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask); 3029 load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask); 3030 load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask); 3031 3032 __ aesenc(xmm_result, xmm_temp1); 3033 __ aesenc(xmm_result, xmm_temp2); 3034 __ aesenc(xmm_result, xmm_temp3); 3035 __ aesenc(xmm_result, xmm_temp4); 3036 3037 load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask); 3038 load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask); 3039 load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask); 3040 load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask); 3041 3042 __ aesenc(xmm_result, xmm_temp1); 3043 __ aesenc(xmm_result, xmm_temp2); 3044 __ aesenc(xmm_result, xmm_temp3); 3045 __ aesenc(xmm_result, xmm_temp4); 3046 3047 load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask); 3048 load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask); 3049 3050 __ cmpl(keylen, 44); 3051 __ jccb(Assembler::equal, L_doLast); 3052 3053 __ aesenc(xmm_result, xmm_temp1); 3054 __ aesenc(xmm_result, xmm_temp2); 3055 3056 load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask); 3057 load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask); 3058 3059 __ cmpl(keylen, 52); 3060 __ jccb(Assembler::equal, L_doLast); 3061 3062 __ aesenc(xmm_result, xmm_temp1); 3063 __ aesenc(xmm_result, xmm_temp2); 3064 3065 load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask); 3066 load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask); 3067 3068 __ BIND(L_doLast); 3069 __ aesenc(xmm_result, xmm_temp1); 3070 __ aesenclast(xmm_result, xmm_temp2); 3071 __ movdqu(Address(to, 0), xmm_result); // store the result 3072 __ xorptr(rax, rax); // return 0 3073 __ leave(); // required for proper stackwalking of RuntimeStub frame 3074 __ ret(0); 3075 3076 return start; 3077 } 3078 3079 3080 // Arguments: 3081 // 3082 // Inputs: 3083 // c_rarg0 - source byte array address 3084 // c_rarg1 - destination byte array address 3085 // c_rarg2 - K (key) in little endian int array 3086 // 3087 address generate_aescrypt_decryptBlock() { 3088 assert(UseAES, "need AES instructions and misaligned SSE support"); 3089 __ align(CodeEntryAlignment); 3090 StubCodeMark mark(this, "StubRoutines", "aescrypt_decryptBlock"); 3091 Label L_doLast; 3092 address start = __ pc(); 3093 3094 const Register from = c_rarg0; // source array address 3095 const Register to = c_rarg1; // destination array address 3096 const Register key = c_rarg2; // key array address 3097 const Register keylen = rax; 3098 3099 const XMMRegister xmm_result = xmm0; 3100 const XMMRegister xmm_key_shuf_mask = xmm1; 3101 // On win64 xmm6-xmm15 must be preserved so don't use them. 3102 const XMMRegister xmm_temp1 = xmm2; 3103 const XMMRegister xmm_temp2 = xmm3; 3104 const XMMRegister xmm_temp3 = xmm4; 3105 const XMMRegister xmm_temp4 = xmm5; 3106 3107 __ enter(); // required for proper stackwalking of RuntimeStub frame 3108 3109 // keylen could be only {11, 13, 15} * 4 = {44, 52, 60} 3110 __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3111 3112 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3113 __ movdqu(xmm_result, Address(from, 0)); 3114 3115 // for decryption java expanded key ordering is rotated one position from what we want 3116 // so we start from 0x10 here and hit 0x00 last 3117 // we don't know if the key is aligned, hence not using load-execute form 3118 load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask); 3119 load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask); 3120 load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask); 3121 load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask); 3122 3123 __ pxor (xmm_result, xmm_temp1); 3124 __ aesdec(xmm_result, xmm_temp2); 3125 __ aesdec(xmm_result, xmm_temp3); 3126 __ aesdec(xmm_result, xmm_temp4); 3127 3128 load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask); 3129 load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask); 3130 load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask); 3131 load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask); 3132 3133 __ aesdec(xmm_result, xmm_temp1); 3134 __ aesdec(xmm_result, xmm_temp2); 3135 __ aesdec(xmm_result, xmm_temp3); 3136 __ aesdec(xmm_result, xmm_temp4); 3137 3138 load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask); 3139 load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask); 3140 load_key(xmm_temp3, key, 0x00, xmm_key_shuf_mask); 3141 3142 __ cmpl(keylen, 44); 3143 __ jccb(Assembler::equal, L_doLast); 3144 3145 __ aesdec(xmm_result, xmm_temp1); 3146 __ aesdec(xmm_result, xmm_temp2); 3147 3148 load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask); 3149 load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask); 3150 3151 __ cmpl(keylen, 52); 3152 __ jccb(Assembler::equal, L_doLast); 3153 3154 __ aesdec(xmm_result, xmm_temp1); 3155 __ aesdec(xmm_result, xmm_temp2); 3156 3157 load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask); 3158 load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask); 3159 3160 __ BIND(L_doLast); 3161 __ aesdec(xmm_result, xmm_temp1); 3162 __ aesdec(xmm_result, xmm_temp2); 3163 3164 // for decryption the aesdeclast operation is always on key+0x00 3165 __ aesdeclast(xmm_result, xmm_temp3); 3166 __ movdqu(Address(to, 0), xmm_result); // store the result 3167 __ xorptr(rax, rax); // return 0 3168 __ leave(); // required for proper stackwalking of RuntimeStub frame 3169 __ ret(0); 3170 3171 return start; 3172 } 3173 3174 3175 // Arguments: 3176 // 3177 // Inputs: 3178 // c_rarg0 - source byte array address 3179 // c_rarg1 - destination byte array address 3180 // c_rarg2 - K (key) in little endian int array 3181 // c_rarg3 - r vector byte array address 3182 // c_rarg4 - input length 3183 // 3184 // Output: 3185 // rax - input length 3186 // 3187 address generate_cipherBlockChaining_encryptAESCrypt() { 3188 assert(UseAES, "need AES instructions and misaligned SSE support"); 3189 __ align(CodeEntryAlignment); 3190 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_encryptAESCrypt"); 3191 address start = __ pc(); 3192 3193 Label L_exit, L_key_192_256, L_key_256, L_loopTop_128, L_loopTop_192, L_loopTop_256; 3194 const Register from = c_rarg0; // source array address 3195 const Register to = c_rarg1; // destination array address 3196 const Register key = c_rarg2; // key array address 3197 const Register rvec = c_rarg3; // r byte array initialized from initvector array address 3198 // and left with the results of the last encryption block 3199 #ifndef _WIN64 3200 const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16) 3201 #else 3202 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 3203 const Register len_reg = r11; // pick the volatile windows register 3204 #endif 3205 const Register pos = rax; 3206 3207 // xmm register assignments for the loops below 3208 const XMMRegister xmm_result = xmm0; 3209 const XMMRegister xmm_temp = xmm1; 3210 // keys 0-10 preloaded into xmm2-xmm12 3211 const int XMM_REG_NUM_KEY_FIRST = 2; 3212 const int XMM_REG_NUM_KEY_LAST = 15; 3213 const XMMRegister xmm_key0 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST); 3214 const XMMRegister xmm_key10 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+10); 3215 const XMMRegister xmm_key11 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+11); 3216 const XMMRegister xmm_key12 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+12); 3217 const XMMRegister xmm_key13 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+13); 3218 3219 __ enter(); // required for proper stackwalking of RuntimeStub frame 3220 3221 #ifdef _WIN64 3222 // on win64, fill len_reg from stack position 3223 __ movl(len_reg, len_mem); 3224 #else 3225 __ push(len_reg); // Save 3226 #endif 3227 3228 const XMMRegister xmm_key_shuf_mask = xmm_temp; // used temporarily to swap key bytes up front 3229 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3230 // load up xmm regs xmm2 thru xmm12 with key 0x00 - 0xa0 3231 for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x00; rnum <= XMM_REG_NUM_KEY_FIRST+10; rnum++) { 3232 load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask); 3233 offset += 0x10; 3234 } 3235 __ movdqu(xmm_result, Address(rvec, 0x00)); // initialize xmm_result with r vec 3236 3237 // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256)) 3238 __ movl(rax, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3239 __ cmpl(rax, 44); 3240 __ jcc(Assembler::notEqual, L_key_192_256); 3241 3242 // 128 bit code follows here 3243 __ movptr(pos, 0); 3244 __ align(OptoLoopAlignment); 3245 3246 __ BIND(L_loopTop_128); 3247 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3248 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3249 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3250 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 9; rnum++) { 3251 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3252 } 3253 __ aesenclast(xmm_result, xmm_key10); 3254 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3255 // no need to store r to memory until we exit 3256 __ addptr(pos, AESBlockSize); 3257 __ subptr(len_reg, AESBlockSize); 3258 __ jcc(Assembler::notEqual, L_loopTop_128); 3259 3260 __ BIND(L_exit); 3261 __ movdqu(Address(rvec, 0), xmm_result); // final value of r stored in rvec of CipherBlockChaining object 3262 3263 #ifdef _WIN64 3264 __ movl(rax, len_mem); 3265 #else 3266 __ pop(rax); // return length 3267 #endif 3268 __ leave(); // required for proper stackwalking of RuntimeStub frame 3269 __ ret(0); 3270 3271 __ BIND(L_key_192_256); 3272 // here rax = len in ints of AESCrypt.KLE array (52=192, or 60=256) 3273 load_key(xmm_key11, key, 0xb0, xmm_key_shuf_mask); 3274 load_key(xmm_key12, key, 0xc0, xmm_key_shuf_mask); 3275 __ cmpl(rax, 52); 3276 __ jcc(Assembler::notEqual, L_key_256); 3277 3278 // 192-bit code follows here (could be changed to use more xmm registers) 3279 __ movptr(pos, 0); 3280 __ align(OptoLoopAlignment); 3281 3282 __ BIND(L_loopTop_192); 3283 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3284 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3285 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3286 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 11; rnum++) { 3287 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3288 } 3289 __ aesenclast(xmm_result, xmm_key12); 3290 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3291 // no need to store r to memory until we exit 3292 __ addptr(pos, AESBlockSize); 3293 __ subptr(len_reg, AESBlockSize); 3294 __ jcc(Assembler::notEqual, L_loopTop_192); 3295 __ jmp(L_exit); 3296 3297 __ BIND(L_key_256); 3298 // 256-bit code follows here (could be changed to use more xmm registers) 3299 load_key(xmm_key13, key, 0xd0, xmm_key_shuf_mask); 3300 __ movptr(pos, 0); 3301 __ align(OptoLoopAlignment); 3302 3303 __ BIND(L_loopTop_256); 3304 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3305 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3306 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3307 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 13; rnum++) { 3308 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3309 } 3310 load_key(xmm_temp, key, 0xe0); 3311 __ aesenclast(xmm_result, xmm_temp); 3312 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3313 // no need to store r to memory until we exit 3314 __ addptr(pos, AESBlockSize); 3315 __ subptr(len_reg, AESBlockSize); 3316 __ jcc(Assembler::notEqual, L_loopTop_256); 3317 __ jmp(L_exit); 3318 3319 return start; 3320 } 3321 3322 // Safefetch stubs. 3323 void generate_safefetch(const char* name, int size, address* entry, 3324 address* fault_pc, address* continuation_pc) { 3325 // safefetch signatures: 3326 // int SafeFetch32(int* adr, int errValue); 3327 // intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue); 3328 // 3329 // arguments: 3330 // c_rarg0 = adr 3331 // c_rarg1 = errValue 3332 // 3333 // result: 3334 // PPC_RET = *adr or errValue 3335 3336 StubCodeMark mark(this, "StubRoutines", name); 3337 3338 // Entry point, pc or function descriptor. 3339 *entry = __ pc(); 3340 3341 // Load *adr into c_rarg1, may fault. 3342 *fault_pc = __ pc(); 3343 switch (size) { 3344 case 4: 3345 // int32_t 3346 __ movl(c_rarg1, Address(c_rarg0, 0)); 3347 break; 3348 case 8: 3349 // int64_t 3350 __ movq(c_rarg1, Address(c_rarg0, 0)); 3351 break; 3352 default: 3353 ShouldNotReachHere(); 3354 } 3355 3356 // return errValue or *adr 3357 *continuation_pc = __ pc(); 3358 __ movq(rax, c_rarg1); 3359 __ ret(0); 3360 } 3361 3362 // This is a version of CBC/AES Decrypt which does 4 blocks in a loop at a time 3363 // to hide instruction latency 3364 // 3365 // Arguments: 3366 // 3367 // Inputs: 3368 // c_rarg0 - source byte array address 3369 // c_rarg1 - destination byte array address 3370 // c_rarg2 - K (key) in little endian int array 3371 // c_rarg3 - r vector byte array address 3372 // c_rarg4 - input length 3373 // 3374 // Output: 3375 // rax - input length 3376 // 3377 address generate_cipherBlockChaining_decryptAESCrypt_Parallel() { 3378 assert(UseAES, "need AES instructions and misaligned SSE support"); 3379 __ align(CodeEntryAlignment); 3380 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt"); 3381 address start = __ pc(); 3382 3383 const Register from = c_rarg0; // source array address 3384 const Register to = c_rarg1; // destination array address 3385 const Register key = c_rarg2; // key array address 3386 const Register rvec = c_rarg3; // r byte array initialized from initvector array address 3387 // and left with the results of the last encryption block 3388 #ifndef _WIN64 3389 const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16) 3390 #else 3391 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 3392 const Register len_reg = r11; // pick the volatile windows register 3393 #endif 3394 const Register pos = rax; 3395 3396 const int PARALLEL_FACTOR = 4; 3397 const int ROUNDS[3] = { 10, 12, 14 }; // aes rounds for key128, key192, key256 3398 3399 Label L_exit; 3400 Label L_singleBlock_loopTopHead[3]; // 128, 192, 256 3401 Label L_singleBlock_loopTopHead2[3]; // 128, 192, 256 3402 Label L_singleBlock_loopTop[3]; // 128, 192, 256 3403 Label L_multiBlock_loopTopHead[3]; // 128, 192, 256 3404 Label L_multiBlock_loopTop[3]; // 128, 192, 256 3405 3406 // keys 0-10 preloaded into xmm5-xmm15 3407 const int XMM_REG_NUM_KEY_FIRST = 5; 3408 const int XMM_REG_NUM_KEY_LAST = 15; 3409 const XMMRegister xmm_key_first = as_XMMRegister(XMM_REG_NUM_KEY_FIRST); 3410 const XMMRegister xmm_key_last = as_XMMRegister(XMM_REG_NUM_KEY_LAST); 3411 3412 __ enter(); // required for proper stackwalking of RuntimeStub frame 3413 3414 #ifdef _WIN64 3415 // on win64, fill len_reg from stack position 3416 __ movl(len_reg, len_mem); 3417 #else 3418 __ push(len_reg); // Save 3419 #endif 3420 __ push(rbx); 3421 // the java expanded key ordering is rotated one position from what we want 3422 // so we start from 0x10 here and hit 0x00 last 3423 const XMMRegister xmm_key_shuf_mask = xmm1; // used temporarily to swap key bytes up front 3424 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3425 // load up xmm regs 5 thru 15 with key 0x10 - 0xa0 - 0x00 3426 for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x10; rnum < XMM_REG_NUM_KEY_LAST; rnum++) { 3427 load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask); 3428 offset += 0x10; 3429 } 3430 load_key(xmm_key_last, key, 0x00, xmm_key_shuf_mask); 3431 3432 const XMMRegister xmm_prev_block_cipher = xmm1; // holds cipher of previous block 3433 3434 // registers holding the four results in the parallelized loop 3435 const XMMRegister xmm_result0 = xmm0; 3436 const XMMRegister xmm_result1 = xmm2; 3437 const XMMRegister xmm_result2 = xmm3; 3438 const XMMRegister xmm_result3 = xmm4; 3439 3440 __ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // initialize with initial rvec 3441 3442 __ xorptr(pos, pos); 3443 3444 // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256)) 3445 __ movl(rbx, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3446 __ cmpl(rbx, 52); 3447 __ jcc(Assembler::equal, L_multiBlock_loopTopHead[1]); 3448 __ cmpl(rbx, 60); 3449 __ jcc(Assembler::equal, L_multiBlock_loopTopHead[2]); 3450 3451 #define DoFour(opc, src_reg) \ 3452 __ opc(xmm_result0, src_reg); \ 3453 __ opc(xmm_result1, src_reg); \ 3454 __ opc(xmm_result2, src_reg); \ 3455 __ opc(xmm_result3, src_reg); \ 3456 3457 for (int k = 0; k < 3; ++k) { 3458 __ BIND(L_multiBlock_loopTopHead[k]); 3459 if (k != 0) { 3460 __ cmpptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // see if at least 4 blocks left 3461 __ jcc(Assembler::less, L_singleBlock_loopTopHead2[k]); 3462 } 3463 if (k == 1) { 3464 __ subptr(rsp, 6 * wordSize); 3465 __ movdqu(Address(rsp, 0), xmm15); //save last_key from xmm15 3466 load_key(xmm15, key, 0xb0); // 0xb0; 192-bit key goes up to 0xc0 3467 __ movdqu(Address(rsp, 2 * wordSize), xmm15); 3468 load_key(xmm1, key, 0xc0); // 0xc0; 3469 __ movdqu(Address(rsp, 4 * wordSize), xmm1); 3470 } else if (k == 2) { 3471 __ subptr(rsp, 10 * wordSize); 3472 __ movdqu(Address(rsp, 0), xmm15); //save last_key from xmm15 3473 load_key(xmm15, key, 0xd0); // 0xd0; 256-bit key goes upto 0xe0 3474 __ movdqu(Address(rsp, 6 * wordSize), xmm15); 3475 load_key(xmm1, key, 0xe0); // 0xe0; 3476 __ movdqu(Address(rsp, 8 * wordSize), xmm1); 3477 load_key(xmm15, key, 0xb0); // 0xb0; 3478 __ movdqu(Address(rsp, 2 * wordSize), xmm15); 3479 load_key(xmm1, key, 0xc0); // 0xc0; 3480 __ movdqu(Address(rsp, 4 * wordSize), xmm1); 3481 } 3482 __ align(OptoLoopAlignment); 3483 __ BIND(L_multiBlock_loopTop[k]); 3484 __ cmpptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // see if at least 4 blocks left 3485 __ jcc(Assembler::less, L_singleBlock_loopTopHead[k]); 3486 3487 if (k != 0) { 3488 __ movdqu(xmm15, Address(rsp, 2 * wordSize)); 3489 __ movdqu(xmm1, Address(rsp, 4 * wordSize)); 3490 } 3491 3492 __ movdqu(xmm_result0, Address(from, pos, Address::times_1, 0 * AESBlockSize)); // get next 4 blocks into xmmresult registers 3493 __ movdqu(xmm_result1, Address(from, pos, Address::times_1, 1 * AESBlockSize)); 3494 __ movdqu(xmm_result2, Address(from, pos, Address::times_1, 2 * AESBlockSize)); 3495 __ movdqu(xmm_result3, Address(from, pos, Address::times_1, 3 * AESBlockSize)); 3496 3497 DoFour(pxor, xmm_key_first); 3498 if (k == 0) { 3499 for (int rnum = 1; rnum < ROUNDS[k]; rnum++) { 3500 DoFour(aesdec, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3501 } 3502 DoFour(aesdeclast, xmm_key_last); 3503 } else if (k == 1) { 3504 for (int rnum = 1; rnum <= ROUNDS[k]-2; rnum++) { 3505 DoFour(aesdec, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3506 } 3507 __ movdqu(xmm_key_last, Address(rsp, 0)); // xmm15 needs to be loaded again. 3508 DoFour(aesdec, xmm1); // key : 0xc0 3509 __ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // xmm1 needs to be loaded again 3510 DoFour(aesdeclast, xmm_key_last); 3511 } else if (k == 2) { 3512 for (int rnum = 1; rnum <= ROUNDS[k] - 4; rnum++) { 3513 DoFour(aesdec, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3514 } 3515 DoFour(aesdec, xmm1); // key : 0xc0 3516 __ movdqu(xmm15, Address(rsp, 6 * wordSize)); 3517 __ movdqu(xmm1, Address(rsp, 8 * wordSize)); 3518 DoFour(aesdec, xmm15); // key : 0xd0 3519 __ movdqu(xmm_key_last, Address(rsp, 0)); // xmm15 needs to be loaded again. 3520 DoFour(aesdec, xmm1); // key : 0xe0 3521 __ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // xmm1 needs to be loaded again 3522 DoFour(aesdeclast, xmm_key_last); 3523 } 3524 3525 // for each result, xor with the r vector of previous cipher block 3526 __ pxor(xmm_result0, xmm_prev_block_cipher); 3527 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 0 * AESBlockSize)); 3528 __ pxor(xmm_result1, xmm_prev_block_cipher); 3529 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 1 * AESBlockSize)); 3530 __ pxor(xmm_result2, xmm_prev_block_cipher); 3531 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 2 * AESBlockSize)); 3532 __ pxor(xmm_result3, xmm_prev_block_cipher); 3533 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 3 * AESBlockSize)); // this will carry over to next set of blocks 3534 if (k != 0) { 3535 __ movdqu(Address(rvec, 0x00), xmm_prev_block_cipher); 3536 } 3537 3538 __ movdqu(Address(to, pos, Address::times_1, 0 * AESBlockSize), xmm_result0); // store 4 results into the next 64 bytes of output 3539 __ movdqu(Address(to, pos, Address::times_1, 1 * AESBlockSize), xmm_result1); 3540 __ movdqu(Address(to, pos, Address::times_1, 2 * AESBlockSize), xmm_result2); 3541 __ movdqu(Address(to, pos, Address::times_1, 3 * AESBlockSize), xmm_result3); 3542 3543 __ addptr(pos, PARALLEL_FACTOR * AESBlockSize); 3544 __ subptr(len_reg, PARALLEL_FACTOR * AESBlockSize); 3545 __ jmp(L_multiBlock_loopTop[k]); 3546 3547 // registers used in the non-parallelized loops 3548 // xmm register assignments for the loops below 3549 const XMMRegister xmm_result = xmm0; 3550 const XMMRegister xmm_prev_block_cipher_save = xmm2; 3551 const XMMRegister xmm_key11 = xmm3; 3552 const XMMRegister xmm_key12 = xmm4; 3553 const XMMRegister key_tmp = xmm4; 3554 3555 __ BIND(L_singleBlock_loopTopHead[k]); 3556 if (k == 1) { 3557 __ addptr(rsp, 6 * wordSize); 3558 } else if (k == 2) { 3559 __ addptr(rsp, 10 * wordSize); 3560 } 3561 __ cmpptr(len_reg, 0); // any blocks left?? 3562 __ jcc(Assembler::equal, L_exit); 3563 __ BIND(L_singleBlock_loopTopHead2[k]); 3564 if (k == 1) { 3565 load_key(xmm_key11, key, 0xb0); // 0xb0; 192-bit key goes upto 0xc0 3566 load_key(xmm_key12, key, 0xc0); // 0xc0; 192-bit key goes upto 0xc0 3567 } 3568 if (k == 2) { 3569 load_key(xmm_key11, key, 0xb0); // 0xb0; 256-bit key goes upto 0xe0 3570 } 3571 __ align(OptoLoopAlignment); 3572 __ BIND(L_singleBlock_loopTop[k]); 3573 __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of cipher input 3574 __ movdqa(xmm_prev_block_cipher_save, xmm_result); // save for next r vector 3575 __ pxor(xmm_result, xmm_key_first); // do the aes dec rounds 3576 for (int rnum = 1; rnum <= 9 ; rnum++) { 3577 __ aesdec(xmm_result, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3578 } 3579 if (k == 1) { 3580 __ aesdec(xmm_result, xmm_key11); 3581 __ aesdec(xmm_result, xmm_key12); 3582 } 3583 if (k == 2) { 3584 __ aesdec(xmm_result, xmm_key11); 3585 load_key(key_tmp, key, 0xc0); 3586 __ aesdec(xmm_result, key_tmp); 3587 load_key(key_tmp, key, 0xd0); 3588 __ aesdec(xmm_result, key_tmp); 3589 load_key(key_tmp, key, 0xe0); 3590 __ aesdec(xmm_result, key_tmp); 3591 } 3592 3593 __ aesdeclast(xmm_result, xmm_key_last); // xmm15 always came from key+0 3594 __ pxor(xmm_result, xmm_prev_block_cipher); // xor with the current r vector 3595 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3596 // no need to store r to memory until we exit 3597 __ movdqa(xmm_prev_block_cipher, xmm_prev_block_cipher_save); // set up next r vector with cipher input from this block 3598 __ addptr(pos, AESBlockSize); 3599 __ subptr(len_reg, AESBlockSize); 3600 __ jcc(Assembler::notEqual, L_singleBlock_loopTop[k]); 3601 if (k != 2) { 3602 __ jmp(L_exit); 3603 } 3604 } //for 128/192/256 3605 3606 __ BIND(L_exit); 3607 __ movdqu(Address(rvec, 0), xmm_prev_block_cipher); // final value of r stored in rvec of CipherBlockChaining object 3608 __ pop(rbx); 3609 #ifdef _WIN64 3610 __ movl(rax, len_mem); 3611 #else 3612 __ pop(rax); // return length 3613 #endif 3614 __ leave(); // required for proper stackwalking of RuntimeStub frame 3615 __ ret(0); 3616 return start; 3617 } 3618 3619 address generate_electronicCodeBook_encryptAESCrypt() { 3620 __ align(CodeEntryAlignment); 3621 StubCodeMark mark(this, "StubRoutines", "electronicCodeBook_encryptAESCrypt"); 3622 address start = __ pc(); 3623 const Register from = c_rarg0; // source array address 3624 const Register to = c_rarg1; // destination array address 3625 const Register key = c_rarg2; // key array address 3626 const Register len = c_rarg3; // src len (must be multiple of blocksize 16) 3627 __ enter(); // required for proper stackwalking of RuntimeStub frame 3628 __ aesecb_encrypt(from, to, key, len); 3629 __ leave(); // required for proper stackwalking of RuntimeStub frame 3630 __ ret(0); 3631 return start; 3632 } 3633 3634 address generate_electronicCodeBook_decryptAESCrypt() { 3635 __ align(CodeEntryAlignment); 3636 StubCodeMark mark(this, "StubRoutines", "electronicCodeBook_decryptAESCrypt"); 3637 address start = __ pc(); 3638 const Register from = c_rarg0; // source array address 3639 const Register to = c_rarg1; // destination array address 3640 const Register key = c_rarg2; // key array address 3641 const Register len = c_rarg3; // src len (must be multiple of blocksize 16) 3642 __ enter(); // required for proper stackwalking of RuntimeStub frame 3643 __ aesecb_decrypt(from, to, key, len); 3644 __ leave(); // required for proper stackwalking of RuntimeStub frame 3645 __ ret(0); 3646 return start; 3647 } 3648 3649 address generate_upper_word_mask() { 3650 __ align(64); 3651 StubCodeMark mark(this, "StubRoutines", "upper_word_mask"); 3652 address start = __ pc(); 3653 __ emit_data64(0x0000000000000000, relocInfo::none); 3654 __ emit_data64(0xFFFFFFFF00000000, relocInfo::none); 3655 return start; 3656 } 3657 3658 address generate_shuffle_byte_flip_mask() { 3659 __ align(64); 3660 StubCodeMark mark(this, "StubRoutines", "shuffle_byte_flip_mask"); 3661 address start = __ pc(); 3662 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none); 3663 __ emit_data64(0x0001020304050607, relocInfo::none); 3664 return start; 3665 } 3666 3667 // ofs and limit are use for multi-block byte array. 3668 // int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit) 3669 address generate_sha1_implCompress(bool multi_block, const char *name) { 3670 __ align(CodeEntryAlignment); 3671 StubCodeMark mark(this, "StubRoutines", name); 3672 address start = __ pc(); 3673 3674 Register buf = c_rarg0; 3675 Register state = c_rarg1; 3676 Register ofs = c_rarg2; 3677 Register limit = c_rarg3; 3678 3679 const XMMRegister abcd = xmm0; 3680 const XMMRegister e0 = xmm1; 3681 const XMMRegister e1 = xmm2; 3682 const XMMRegister msg0 = xmm3; 3683 3684 const XMMRegister msg1 = xmm4; 3685 const XMMRegister msg2 = xmm5; 3686 const XMMRegister msg3 = xmm6; 3687 const XMMRegister shuf_mask = xmm7; 3688 3689 __ enter(); 3690 3691 __ subptr(rsp, 4 * wordSize); 3692 3693 __ fast_sha1(abcd, e0, e1, msg0, msg1, msg2, msg3, shuf_mask, 3694 buf, state, ofs, limit, rsp, multi_block); 3695 3696 __ addptr(rsp, 4 * wordSize); 3697 3698 __ leave(); 3699 __ ret(0); 3700 return start; 3701 } 3702 3703 address generate_pshuffle_byte_flip_mask() { 3704 __ align(64); 3705 StubCodeMark mark(this, "StubRoutines", "pshuffle_byte_flip_mask"); 3706 address start = __ pc(); 3707 __ emit_data64(0x0405060700010203, relocInfo::none); 3708 __ emit_data64(0x0c0d0e0f08090a0b, relocInfo::none); 3709 3710 if (VM_Version::supports_avx2()) { 3711 __ emit_data64(0x0405060700010203, relocInfo::none); // second copy 3712 __ emit_data64(0x0c0d0e0f08090a0b, relocInfo::none); 3713 // _SHUF_00BA 3714 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3715 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3716 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3717 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3718 // _SHUF_DC00 3719 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3720 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3721 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3722 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3723 } 3724 3725 return start; 3726 } 3727 3728 //Mask for byte-swapping a couple of qwords in an XMM register using (v)pshufb. 3729 address generate_pshuffle_byte_flip_mask_sha512() { 3730 __ align(32); 3731 StubCodeMark mark(this, "StubRoutines", "pshuffle_byte_flip_mask_sha512"); 3732 address start = __ pc(); 3733 if (VM_Version::supports_avx2()) { 3734 __ emit_data64(0x0001020304050607, relocInfo::none); // PSHUFFLE_BYTE_FLIP_MASK 3735 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none); 3736 __ emit_data64(0x1011121314151617, relocInfo::none); 3737 __ emit_data64(0x18191a1b1c1d1e1f, relocInfo::none); 3738 __ emit_data64(0x0000000000000000, relocInfo::none); //MASK_YMM_LO 3739 __ emit_data64(0x0000000000000000, relocInfo::none); 3740 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3741 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3742 } 3743 3744 return start; 3745 } 3746 3747 // ofs and limit are use for multi-block byte array. 3748 // int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit) 3749 address generate_sha256_implCompress(bool multi_block, const char *name) { 3750 assert(VM_Version::supports_sha() || VM_Version::supports_avx2(), ""); 3751 __ align(CodeEntryAlignment); 3752 StubCodeMark mark(this, "StubRoutines", name); 3753 address start = __ pc(); 3754 3755 Register buf = c_rarg0; 3756 Register state = c_rarg1; 3757 Register ofs = c_rarg2; 3758 Register limit = c_rarg3; 3759 3760 const XMMRegister msg = xmm0; 3761 const XMMRegister state0 = xmm1; 3762 const XMMRegister state1 = xmm2; 3763 const XMMRegister msgtmp0 = xmm3; 3764 3765 const XMMRegister msgtmp1 = xmm4; 3766 const XMMRegister msgtmp2 = xmm5; 3767 const XMMRegister msgtmp3 = xmm6; 3768 const XMMRegister msgtmp4 = xmm7; 3769 3770 const XMMRegister shuf_mask = xmm8; 3771 3772 __ enter(); 3773 3774 __ subptr(rsp, 4 * wordSize); 3775 3776 if (VM_Version::supports_sha()) { 3777 __ fast_sha256(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4, 3778 buf, state, ofs, limit, rsp, multi_block, shuf_mask); 3779 } else if (VM_Version::supports_avx2()) { 3780 __ sha256_AVX2(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4, 3781 buf, state, ofs, limit, rsp, multi_block, shuf_mask); 3782 } 3783 __ addptr(rsp, 4 * wordSize); 3784 __ vzeroupper(); 3785 __ leave(); 3786 __ ret(0); 3787 return start; 3788 } 3789 3790 address generate_sha512_implCompress(bool multi_block, const char *name) { 3791 assert(VM_Version::supports_avx2(), ""); 3792 assert(VM_Version::supports_bmi2(), ""); 3793 __ align(CodeEntryAlignment); 3794 StubCodeMark mark(this, "StubRoutines", name); 3795 address start = __ pc(); 3796 3797 Register buf = c_rarg0; 3798 Register state = c_rarg1; 3799 Register ofs = c_rarg2; 3800 Register limit = c_rarg3; 3801 3802 const XMMRegister msg = xmm0; 3803 const XMMRegister state0 = xmm1; 3804 const XMMRegister state1 = xmm2; 3805 const XMMRegister msgtmp0 = xmm3; 3806 const XMMRegister msgtmp1 = xmm4; 3807 const XMMRegister msgtmp2 = xmm5; 3808 const XMMRegister msgtmp3 = xmm6; 3809 const XMMRegister msgtmp4 = xmm7; 3810 3811 const XMMRegister shuf_mask = xmm8; 3812 3813 __ enter(); 3814 3815 __ sha512_AVX2(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4, 3816 buf, state, ofs, limit, rsp, multi_block, shuf_mask); 3817 3818 __ vzeroupper(); 3819 __ leave(); 3820 __ ret(0); 3821 return start; 3822 } 3823 3824 // This mask is used for incrementing counter value(linc0, linc4, etc.) 3825 address counter_mask_addr() { 3826 __ align(64); 3827 StubCodeMark mark(this, "StubRoutines", "counter_mask_addr"); 3828 address start = __ pc(); 3829 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none);//lbswapmask 3830 __ emit_data64(0x0001020304050607, relocInfo::none); 3831 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none); 3832 __ emit_data64(0x0001020304050607, relocInfo::none); 3833 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none); 3834 __ emit_data64(0x0001020304050607, relocInfo::none); 3835 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none); 3836 __ emit_data64(0x0001020304050607, relocInfo::none); 3837 __ emit_data64(0x0000000000000000, relocInfo::none);//linc0 = counter_mask_addr+64 3838 __ emit_data64(0x0000000000000000, relocInfo::none); 3839 __ emit_data64(0x0000000000000001, relocInfo::none);//counter_mask_addr() + 80 3840 __ emit_data64(0x0000000000000000, relocInfo::none); 3841 __ emit_data64(0x0000000000000002, relocInfo::none); 3842 __ emit_data64(0x0000000000000000, relocInfo::none); 3843 __ emit_data64(0x0000000000000003, relocInfo::none); 3844 __ emit_data64(0x0000000000000000, relocInfo::none); 3845 __ emit_data64(0x0000000000000004, relocInfo::none);//linc4 = counter_mask_addr() + 128 3846 __ emit_data64(0x0000000000000000, relocInfo::none); 3847 __ emit_data64(0x0000000000000004, relocInfo::none); 3848 __ emit_data64(0x0000000000000000, relocInfo::none); 3849 __ emit_data64(0x0000000000000004, relocInfo::none); 3850 __ emit_data64(0x0000000000000000, relocInfo::none); 3851 __ emit_data64(0x0000000000000004, relocInfo::none); 3852 __ emit_data64(0x0000000000000000, relocInfo::none); 3853 __ emit_data64(0x0000000000000008, relocInfo::none);//linc8 = counter_mask_addr() + 192 3854 __ emit_data64(0x0000000000000000, relocInfo::none); 3855 __ emit_data64(0x0000000000000008, relocInfo::none); 3856 __ emit_data64(0x0000000000000000, relocInfo::none); 3857 __ emit_data64(0x0000000000000008, relocInfo::none); 3858 __ emit_data64(0x0000000000000000, relocInfo::none); 3859 __ emit_data64(0x0000000000000008, relocInfo::none); 3860 __ emit_data64(0x0000000000000000, relocInfo::none); 3861 __ emit_data64(0x0000000000000020, relocInfo::none);//linc32 = counter_mask_addr() + 256 3862 __ emit_data64(0x0000000000000000, relocInfo::none); 3863 __ emit_data64(0x0000000000000020, relocInfo::none); 3864 __ emit_data64(0x0000000000000000, relocInfo::none); 3865 __ emit_data64(0x0000000000000020, relocInfo::none); 3866 __ emit_data64(0x0000000000000000, relocInfo::none); 3867 __ emit_data64(0x0000000000000020, relocInfo::none); 3868 __ emit_data64(0x0000000000000000, relocInfo::none); 3869 __ emit_data64(0x0000000000000010, relocInfo::none);//linc16 = counter_mask_addr() + 320 3870 __ emit_data64(0x0000000000000000, relocInfo::none); 3871 __ emit_data64(0x0000000000000010, relocInfo::none); 3872 __ emit_data64(0x0000000000000000, relocInfo::none); 3873 __ emit_data64(0x0000000000000010, relocInfo::none); 3874 __ emit_data64(0x0000000000000000, relocInfo::none); 3875 __ emit_data64(0x0000000000000010, relocInfo::none); 3876 __ emit_data64(0x0000000000000000, relocInfo::none); 3877 return start; 3878 } 3879 3880 // Vector AES Counter implementation 3881 address generate_counterMode_VectorAESCrypt() { 3882 __ align(CodeEntryAlignment); 3883 StubCodeMark mark(this, "StubRoutines", "counterMode_AESCrypt"); 3884 address start = __ pc(); 3885 const Register from = c_rarg0; // source array address 3886 const Register to = c_rarg1; // destination array address 3887 const Register key = c_rarg2; // key array address r8 3888 const Register counter = c_rarg3; // counter byte array initialized from counter array address 3889 // and updated with the incremented counter in the end 3890 #ifndef _WIN64 3891 const Register len_reg = c_rarg4; 3892 const Register saved_encCounter_start = c_rarg5; 3893 const Register used_addr = r10; 3894 const Address used_mem(rbp, 2 * wordSize); 3895 const Register used = r11; 3896 #else 3897 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 3898 const Address saved_encCounter_mem(rbp, 7 * wordSize); // saved encrypted counter is on stack on Win64 3899 const Address used_mem(rbp, 8 * wordSize); // used length is on stack on Win64 3900 const Register len_reg = r10; // pick the first volatile windows register 3901 const Register saved_encCounter_start = r11; 3902 const Register used_addr = r13; 3903 const Register used = r14; 3904 #endif 3905 __ enter(); 3906 // Save state before entering routine 3907 __ push(r12); 3908 __ push(r13); 3909 __ push(r14); 3910 __ push(r15); 3911 #ifdef _WIN64 3912 // on win64, fill len_reg from stack position 3913 __ movl(len_reg, len_mem); 3914 __ movptr(saved_encCounter_start, saved_encCounter_mem); 3915 __ movptr(used_addr, used_mem); 3916 __ movl(used, Address(used_addr, 0)); 3917 #else 3918 __ push(len_reg); // Save 3919 __ movptr(used_addr, used_mem); 3920 __ movl(used, Address(used_addr, 0)); 3921 #endif 3922 __ push(rbx); 3923 __ aesctr_encrypt(from, to, key, counter, len_reg, used, used_addr, saved_encCounter_start); 3924 // Restore state before leaving routine 3925 __ pop(rbx); 3926 #ifdef _WIN64 3927 __ movl(rax, len_mem); // return length 3928 #else 3929 __ pop(rax); // return length 3930 #endif 3931 __ pop(r15); 3932 __ pop(r14); 3933 __ pop(r13); 3934 __ pop(r12); 3935 3936 __ leave(); // required for proper stackwalking of RuntimeStub frame 3937 __ ret(0); 3938 return start; 3939 } 3940 3941 // This is a version of CTR/AES crypt which does 6 blocks in a loop at a time 3942 // to hide instruction latency 3943 // 3944 // Arguments: 3945 // 3946 // Inputs: 3947 // c_rarg0 - source byte array address 3948 // c_rarg1 - destination byte array address 3949 // c_rarg2 - K (key) in little endian int array 3950 // c_rarg3 - counter vector byte array address 3951 // Linux 3952 // c_rarg4 - input length 3953 // c_rarg5 - saved encryptedCounter start 3954 // rbp + 6 * wordSize - saved used length 3955 // Windows 3956 // rbp + 6 * wordSize - input length 3957 // rbp + 7 * wordSize - saved encryptedCounter start 3958 // rbp + 8 * wordSize - saved used length 3959 // 3960 // Output: 3961 // rax - input length 3962 // 3963 address generate_counterMode_AESCrypt_Parallel() { 3964 assert(UseAES, "need AES instructions and misaligned SSE support"); 3965 __ align(CodeEntryAlignment); 3966 StubCodeMark mark(this, "StubRoutines", "counterMode_AESCrypt"); 3967 address start = __ pc(); 3968 const Register from = c_rarg0; // source array address 3969 const Register to = c_rarg1; // destination array address 3970 const Register key = c_rarg2; // key array address 3971 const Register counter = c_rarg3; // counter byte array initialized from counter array address 3972 // and updated with the incremented counter in the end 3973 #ifndef _WIN64 3974 const Register len_reg = c_rarg4; 3975 const Register saved_encCounter_start = c_rarg5; 3976 const Register used_addr = r10; 3977 const Address used_mem(rbp, 2 * wordSize); 3978 const Register used = r11; 3979 #else 3980 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 3981 const Address saved_encCounter_mem(rbp, 7 * wordSize); // length is on stack on Win64 3982 const Address used_mem(rbp, 8 * wordSize); // length is on stack on Win64 3983 const Register len_reg = r10; // pick the first volatile windows register 3984 const Register saved_encCounter_start = r11; 3985 const Register used_addr = r13; 3986 const Register used = r14; 3987 #endif 3988 const Register pos = rax; 3989 3990 const int PARALLEL_FACTOR = 6; 3991 const XMMRegister xmm_counter_shuf_mask = xmm0; 3992 const XMMRegister xmm_key_shuf_mask = xmm1; // used temporarily to swap key bytes up front 3993 const XMMRegister xmm_curr_counter = xmm2; 3994 3995 const XMMRegister xmm_key_tmp0 = xmm3; 3996 const XMMRegister xmm_key_tmp1 = xmm4; 3997 3998 // registers holding the four results in the parallelized loop 3999 const XMMRegister xmm_result0 = xmm5; 4000 const XMMRegister xmm_result1 = xmm6; 4001 const XMMRegister xmm_result2 = xmm7; 4002 const XMMRegister xmm_result3 = xmm8; 4003 const XMMRegister xmm_result4 = xmm9; 4004 const XMMRegister xmm_result5 = xmm10; 4005 4006 const XMMRegister xmm_from0 = xmm11; 4007 const XMMRegister xmm_from1 = xmm12; 4008 const XMMRegister xmm_from2 = xmm13; 4009 const XMMRegister xmm_from3 = xmm14; //the last one is xmm14. we have to preserve it on WIN64. 4010 const XMMRegister xmm_from4 = xmm3; //reuse xmm3~4. Because xmm_key_tmp0~1 are useless when loading input text 4011 const XMMRegister xmm_from5 = xmm4; 4012 4013 //for key_128, key_192, key_256 4014 const int rounds[3] = {10, 12, 14}; 4015 Label L_exit_preLoop, L_preLoop_start; 4016 Label L_multiBlock_loopTop[3]; 4017 Label L_singleBlockLoopTop[3]; 4018 Label L__incCounter[3][6]; //for 6 blocks 4019 Label L__incCounter_single[3]; //for single block, key128, key192, key256 4020 Label L_processTail_insr[3], L_processTail_4_insr[3], L_processTail_2_insr[3], L_processTail_1_insr[3], L_processTail_exit_insr[3]; 4021 Label L_processTail_4_extr[3], L_processTail_2_extr[3], L_processTail_1_extr[3], L_processTail_exit_extr[3]; 4022 4023 Label L_exit; 4024 4025 __ enter(); // required for proper stackwalking of RuntimeStub frame 4026 4027 #ifdef _WIN64 4028 // allocate spill slots for r13, r14 4029 enum { 4030 saved_r13_offset, 4031 saved_r14_offset 4032 }; 4033 __ subptr(rsp, 2 * wordSize); 4034 __ movptr(Address(rsp, saved_r13_offset * wordSize), r13); 4035 __ movptr(Address(rsp, saved_r14_offset * wordSize), r14); 4036 4037 // on win64, fill len_reg from stack position 4038 __ movl(len_reg, len_mem); 4039 __ movptr(saved_encCounter_start, saved_encCounter_mem); 4040 __ movptr(used_addr, used_mem); 4041 __ movl(used, Address(used_addr, 0)); 4042 #else 4043 __ push(len_reg); // Save 4044 __ movptr(used_addr, used_mem); 4045 __ movl(used, Address(used_addr, 0)); 4046 #endif 4047 4048 __ push(rbx); // Save RBX 4049 __ movdqu(xmm_curr_counter, Address(counter, 0x00)); // initialize counter with initial counter 4050 __ movdqu(xmm_counter_shuf_mask, ExternalAddress(StubRoutines::x86::counter_shuffle_mask_addr()), pos); // pos as scratch 4051 __ pshufb(xmm_curr_counter, xmm_counter_shuf_mask); //counter is shuffled 4052 __ movptr(pos, 0); 4053 4054 // Use the partially used encrpyted counter from last invocation 4055 __ BIND(L_preLoop_start); 4056 __ cmpptr(used, 16); 4057 __ jcc(Assembler::aboveEqual, L_exit_preLoop); 4058 __ cmpptr(len_reg, 0); 4059 __ jcc(Assembler::lessEqual, L_exit_preLoop); 4060 __ movb(rbx, Address(saved_encCounter_start, used)); 4061 __ xorb(rbx, Address(from, pos)); 4062 __ movb(Address(to, pos), rbx); 4063 __ addptr(pos, 1); 4064 __ addptr(used, 1); 4065 __ subptr(len_reg, 1); 4066 4067 __ jmp(L_preLoop_start); 4068 4069 __ BIND(L_exit_preLoop); 4070 __ movl(Address(used_addr, 0), used); 4071 4072 // key length could be only {11, 13, 15} * 4 = {44, 52, 60} 4073 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()), rbx); // rbx as scratch 4074 __ movl(rbx, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 4075 __ cmpl(rbx, 52); 4076 __ jcc(Assembler::equal, L_multiBlock_loopTop[1]); 4077 __ cmpl(rbx, 60); 4078 __ jcc(Assembler::equal, L_multiBlock_loopTop[2]); 4079 4080 #define CTR_DoSix(opc, src_reg) \ 4081 __ opc(xmm_result0, src_reg); \ 4082 __ opc(xmm_result1, src_reg); \ 4083 __ opc(xmm_result2, src_reg); \ 4084 __ opc(xmm_result3, src_reg); \ 4085 __ opc(xmm_result4, src_reg); \ 4086 __ opc(xmm_result5, src_reg); 4087 4088 // k == 0 : generate code for key_128 4089 // k == 1 : generate code for key_192 4090 // k == 2 : generate code for key_256 4091 for (int k = 0; k < 3; ++k) { 4092 //multi blocks starts here 4093 __ align(OptoLoopAlignment); 4094 __ BIND(L_multiBlock_loopTop[k]); 4095 __ cmpptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // see if at least PARALLEL_FACTOR blocks left 4096 __ jcc(Assembler::less, L_singleBlockLoopTop[k]); 4097 load_key(xmm_key_tmp0, key, 0x00, xmm_key_shuf_mask); 4098 4099 //load, then increase counters 4100 CTR_DoSix(movdqa, xmm_curr_counter); 4101 inc_counter(rbx, xmm_result1, 0x01, L__incCounter[k][0]); 4102 inc_counter(rbx, xmm_result2, 0x02, L__incCounter[k][1]); 4103 inc_counter(rbx, xmm_result3, 0x03, L__incCounter[k][2]); 4104 inc_counter(rbx, xmm_result4, 0x04, L__incCounter[k][3]); 4105 inc_counter(rbx, xmm_result5, 0x05, L__incCounter[k][4]); 4106 inc_counter(rbx, xmm_curr_counter, 0x06, L__incCounter[k][5]); 4107 CTR_DoSix(pshufb, xmm_counter_shuf_mask); // after increased, shuffled counters back for PXOR 4108 CTR_DoSix(pxor, xmm_key_tmp0); //PXOR with Round 0 key 4109 4110 //load two ROUND_KEYs at a time 4111 for (int i = 1; i < rounds[k]; ) { 4112 load_key(xmm_key_tmp1, key, (0x10 * i), xmm_key_shuf_mask); 4113 load_key(xmm_key_tmp0, key, (0x10 * (i+1)), xmm_key_shuf_mask); 4114 CTR_DoSix(aesenc, xmm_key_tmp1); 4115 i++; 4116 if (i != rounds[k]) { 4117 CTR_DoSix(aesenc, xmm_key_tmp0); 4118 } else { 4119 CTR_DoSix(aesenclast, xmm_key_tmp0); 4120 } 4121 i++; 4122 } 4123 4124 // get next PARALLEL_FACTOR blocks into xmm_result registers 4125 __ movdqu(xmm_from0, Address(from, pos, Address::times_1, 0 * AESBlockSize)); 4126 __ movdqu(xmm_from1, Address(from, pos, Address::times_1, 1 * AESBlockSize)); 4127 __ movdqu(xmm_from2, Address(from, pos, Address::times_1, 2 * AESBlockSize)); 4128 __ movdqu(xmm_from3, Address(from, pos, Address::times_1, 3 * AESBlockSize)); 4129 __ movdqu(xmm_from4, Address(from, pos, Address::times_1, 4 * AESBlockSize)); 4130 __ movdqu(xmm_from5, Address(from, pos, Address::times_1, 5 * AESBlockSize)); 4131 4132 __ pxor(xmm_result0, xmm_from0); 4133 __ pxor(xmm_result1, xmm_from1); 4134 __ pxor(xmm_result2, xmm_from2); 4135 __ pxor(xmm_result3, xmm_from3); 4136 __ pxor(xmm_result4, xmm_from4); 4137 __ pxor(xmm_result5, xmm_from5); 4138 4139 // store 6 results into the next 64 bytes of output 4140 __ movdqu(Address(to, pos, Address::times_1, 0 * AESBlockSize), xmm_result0); 4141 __ movdqu(Address(to, pos, Address::times_1, 1 * AESBlockSize), xmm_result1); 4142 __ movdqu(Address(to, pos, Address::times_1, 2 * AESBlockSize), xmm_result2); 4143 __ movdqu(Address(to, pos, Address::times_1, 3 * AESBlockSize), xmm_result3); 4144 __ movdqu(Address(to, pos, Address::times_1, 4 * AESBlockSize), xmm_result4); 4145 __ movdqu(Address(to, pos, Address::times_1, 5 * AESBlockSize), xmm_result5); 4146 4147 __ addptr(pos, PARALLEL_FACTOR * AESBlockSize); // increase the length of crypt text 4148 __ subptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // decrease the remaining length 4149 __ jmp(L_multiBlock_loopTop[k]); 4150 4151 // singleBlock starts here 4152 __ align(OptoLoopAlignment); 4153 __ BIND(L_singleBlockLoopTop[k]); 4154 __ cmpptr(len_reg, 0); 4155 __ jcc(Assembler::lessEqual, L_exit); 4156 load_key(xmm_key_tmp0, key, 0x00, xmm_key_shuf_mask); 4157 __ movdqa(xmm_result0, xmm_curr_counter); 4158 inc_counter(rbx, xmm_curr_counter, 0x01, L__incCounter_single[k]); 4159 __ pshufb(xmm_result0, xmm_counter_shuf_mask); 4160 __ pxor(xmm_result0, xmm_key_tmp0); 4161 for (int i = 1; i < rounds[k]; i++) { 4162 load_key(xmm_key_tmp0, key, (0x10 * i), xmm_key_shuf_mask); 4163 __ aesenc(xmm_result0, xmm_key_tmp0); 4164 } 4165 load_key(xmm_key_tmp0, key, (rounds[k] * 0x10), xmm_key_shuf_mask); 4166 __ aesenclast(xmm_result0, xmm_key_tmp0); 4167 __ cmpptr(len_reg, AESBlockSize); 4168 __ jcc(Assembler::less, L_processTail_insr[k]); 4169 __ movdqu(xmm_from0, Address(from, pos, Address::times_1, 0 * AESBlockSize)); 4170 __ pxor(xmm_result0, xmm_from0); 4171 __ movdqu(Address(to, pos, Address::times_1, 0 * AESBlockSize), xmm_result0); 4172 __ addptr(pos, AESBlockSize); 4173 __ subptr(len_reg, AESBlockSize); 4174 __ jmp(L_singleBlockLoopTop[k]); 4175 __ BIND(L_processTail_insr[k]); // Process the tail part of the input array 4176 __ addptr(pos, len_reg); // 1. Insert bytes from src array into xmm_from0 register 4177 __ testptr(len_reg, 8); 4178 __ jcc(Assembler::zero, L_processTail_4_insr[k]); 4179 __ subptr(pos,8); 4180 __ pinsrq(xmm_from0, Address(from, pos), 0); 4181 __ BIND(L_processTail_4_insr[k]); 4182 __ testptr(len_reg, 4); 4183 __ jcc(Assembler::zero, L_processTail_2_insr[k]); 4184 __ subptr(pos,4); 4185 __ pslldq(xmm_from0, 4); 4186 __ pinsrd(xmm_from0, Address(from, pos), 0); 4187 __ BIND(L_processTail_2_insr[k]); 4188 __ testptr(len_reg, 2); 4189 __ jcc(Assembler::zero, L_processTail_1_insr[k]); 4190 __ subptr(pos, 2); 4191 __ pslldq(xmm_from0, 2); 4192 __ pinsrw(xmm_from0, Address(from, pos), 0); 4193 __ BIND(L_processTail_1_insr[k]); 4194 __ testptr(len_reg, 1); 4195 __ jcc(Assembler::zero, L_processTail_exit_insr[k]); 4196 __ subptr(pos, 1); 4197 __ pslldq(xmm_from0, 1); 4198 __ pinsrb(xmm_from0, Address(from, pos), 0); 4199 __ BIND(L_processTail_exit_insr[k]); 4200 4201 __ movdqu(Address(saved_encCounter_start, 0), xmm_result0); // 2. Perform pxor of the encrypted counter and plaintext Bytes. 4202 __ pxor(xmm_result0, xmm_from0); // Also the encrypted counter is saved for next invocation. 4203 4204 __ testptr(len_reg, 8); 4205 __ jcc(Assembler::zero, L_processTail_4_extr[k]); // 3. Extract bytes from xmm_result0 into the dest. array 4206 __ pextrq(Address(to, pos), xmm_result0, 0); 4207 __ psrldq(xmm_result0, 8); 4208 __ addptr(pos, 8); 4209 __ BIND(L_processTail_4_extr[k]); 4210 __ testptr(len_reg, 4); 4211 __ jcc(Assembler::zero, L_processTail_2_extr[k]); 4212 __ pextrd(Address(to, pos), xmm_result0, 0); 4213 __ psrldq(xmm_result0, 4); 4214 __ addptr(pos, 4); 4215 __ BIND(L_processTail_2_extr[k]); 4216 __ testptr(len_reg, 2); 4217 __ jcc(Assembler::zero, L_processTail_1_extr[k]); 4218 __ pextrw(Address(to, pos), xmm_result0, 0); 4219 __ psrldq(xmm_result0, 2); 4220 __ addptr(pos, 2); 4221 __ BIND(L_processTail_1_extr[k]); 4222 __ testptr(len_reg, 1); 4223 __ jcc(Assembler::zero, L_processTail_exit_extr[k]); 4224 __ pextrb(Address(to, pos), xmm_result0, 0); 4225 4226 __ BIND(L_processTail_exit_extr[k]); 4227 __ movl(Address(used_addr, 0), len_reg); 4228 __ jmp(L_exit); 4229 4230 } 4231 4232 __ BIND(L_exit); 4233 __ pshufb(xmm_curr_counter, xmm_counter_shuf_mask); //counter is shuffled back. 4234 __ movdqu(Address(counter, 0), xmm_curr_counter); //save counter back 4235 __ pop(rbx); // pop the saved RBX. 4236 #ifdef _WIN64 4237 __ movl(rax, len_mem); 4238 __ movptr(r13, Address(rsp, saved_r13_offset * wordSize)); 4239 __ movptr(r14, Address(rsp, saved_r14_offset * wordSize)); 4240 __ addptr(rsp, 2 * wordSize); 4241 #else 4242 __ pop(rax); // return 'len' 4243 #endif 4244 __ leave(); // required for proper stackwalking of RuntimeStub frame 4245 __ ret(0); 4246 return start; 4247 } 4248 4249 void roundDec(XMMRegister xmm_reg) { 4250 __ vaesdec(xmm1, xmm1, xmm_reg, Assembler::AVX_512bit); 4251 __ vaesdec(xmm2, xmm2, xmm_reg, Assembler::AVX_512bit); 4252 __ vaesdec(xmm3, xmm3, xmm_reg, Assembler::AVX_512bit); 4253 __ vaesdec(xmm4, xmm4, xmm_reg, Assembler::AVX_512bit); 4254 __ vaesdec(xmm5, xmm5, xmm_reg, Assembler::AVX_512bit); 4255 __ vaesdec(xmm6, xmm6, xmm_reg, Assembler::AVX_512bit); 4256 __ vaesdec(xmm7, xmm7, xmm_reg, Assembler::AVX_512bit); 4257 __ vaesdec(xmm8, xmm8, xmm_reg, Assembler::AVX_512bit); 4258 } 4259 4260 void roundDeclast(XMMRegister xmm_reg) { 4261 __ vaesdeclast(xmm1, xmm1, xmm_reg, Assembler::AVX_512bit); 4262 __ vaesdeclast(xmm2, xmm2, xmm_reg, Assembler::AVX_512bit); 4263 __ vaesdeclast(xmm3, xmm3, xmm_reg, Assembler::AVX_512bit); 4264 __ vaesdeclast(xmm4, xmm4, xmm_reg, Assembler::AVX_512bit); 4265 __ vaesdeclast(xmm5, xmm5, xmm_reg, Assembler::AVX_512bit); 4266 __ vaesdeclast(xmm6, xmm6, xmm_reg, Assembler::AVX_512bit); 4267 __ vaesdeclast(xmm7, xmm7, xmm_reg, Assembler::AVX_512bit); 4268 __ vaesdeclast(xmm8, xmm8, xmm_reg, Assembler::AVX_512bit); 4269 } 4270 4271 void ev_load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask = NULL) { 4272 __ movdqu(xmmdst, Address(key, offset)); 4273 if (xmm_shuf_mask != NULL) { 4274 __ pshufb(xmmdst, xmm_shuf_mask); 4275 } else { 4276 __ pshufb(xmmdst, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 4277 } 4278 __ evshufi64x2(xmmdst, xmmdst, xmmdst, 0x0, Assembler::AVX_512bit); 4279 4280 } 4281 4282 address generate_cipherBlockChaining_decryptVectorAESCrypt() { 4283 assert(VM_Version::supports_avx512_vaes(), "need AES instructions and misaligned SSE support"); 4284 __ align(CodeEntryAlignment); 4285 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt"); 4286 address start = __ pc(); 4287 4288 const Register from = c_rarg0; // source array address 4289 const Register to = c_rarg1; // destination array address 4290 const Register key = c_rarg2; // key array address 4291 const Register rvec = c_rarg3; // r byte array initialized from initvector array address 4292 // and left with the results of the last encryption block 4293 #ifndef _WIN64 4294 const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16) 4295 #else 4296 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 4297 const Register len_reg = r11; // pick the volatile windows register 4298 #endif 4299 4300 Label Loop, Loop1, L_128, L_256, L_192, KEY_192, KEY_256, Loop2, Lcbc_dec_rem_loop, 4301 Lcbc_dec_rem_last, Lcbc_dec_ret, Lcbc_dec_rem, Lcbc_exit; 4302 4303 __ enter(); 4304 4305 #ifdef _WIN64 4306 // on win64, fill len_reg from stack position 4307 __ movl(len_reg, len_mem); 4308 #else 4309 __ push(len_reg); // Save 4310 #endif 4311 __ push(rbx); 4312 __ vzeroupper(); 4313 4314 // Temporary variable declaration for swapping key bytes 4315 const XMMRegister xmm_key_shuf_mask = xmm1; 4316 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 4317 4318 // Calculate number of rounds from key size: 44 for 10-rounds, 52 for 12-rounds, 60 for 14-rounds 4319 const Register rounds = rbx; 4320 __ movl(rounds, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 4321 4322 const XMMRegister IV = xmm0; 4323 // Load IV and broadcast value to 512-bits 4324 __ evbroadcasti64x2(IV, Address(rvec, 0), Assembler::AVX_512bit); 4325 4326 // Temporary variables for storing round keys 4327 const XMMRegister RK0 = xmm30; 4328 const XMMRegister RK1 = xmm9; 4329 const XMMRegister RK2 = xmm18; 4330 const XMMRegister RK3 = xmm19; 4331 const XMMRegister RK4 = xmm20; 4332 const XMMRegister RK5 = xmm21; 4333 const XMMRegister RK6 = xmm22; 4334 const XMMRegister RK7 = xmm23; 4335 const XMMRegister RK8 = xmm24; 4336 const XMMRegister RK9 = xmm25; 4337 const XMMRegister RK10 = xmm26; 4338 4339 // Load and shuffle key 4340 // the java expanded key ordering is rotated one position from what we want 4341 // so we start from 1*16 here and hit 0*16 last 4342 ev_load_key(RK1, key, 1 * 16, xmm_key_shuf_mask); 4343 ev_load_key(RK2, key, 2 * 16, xmm_key_shuf_mask); 4344 ev_load_key(RK3, key, 3 * 16, xmm_key_shuf_mask); 4345 ev_load_key(RK4, key, 4 * 16, xmm_key_shuf_mask); 4346 ev_load_key(RK5, key, 5 * 16, xmm_key_shuf_mask); 4347 ev_load_key(RK6, key, 6 * 16, xmm_key_shuf_mask); 4348 ev_load_key(RK7, key, 7 * 16, xmm_key_shuf_mask); 4349 ev_load_key(RK8, key, 8 * 16, xmm_key_shuf_mask); 4350 ev_load_key(RK9, key, 9 * 16, xmm_key_shuf_mask); 4351 ev_load_key(RK10, key, 10 * 16, xmm_key_shuf_mask); 4352 ev_load_key(RK0, key, 0*16, xmm_key_shuf_mask); 4353 4354 // Variables for storing source cipher text 4355 const XMMRegister S0 = xmm10; 4356 const XMMRegister S1 = xmm11; 4357 const XMMRegister S2 = xmm12; 4358 const XMMRegister S3 = xmm13; 4359 const XMMRegister S4 = xmm14; 4360 const XMMRegister S5 = xmm15; 4361 const XMMRegister S6 = xmm16; 4362 const XMMRegister S7 = xmm17; 4363 4364 // Variables for storing decrypted text 4365 const XMMRegister B0 = xmm1; 4366 const XMMRegister B1 = xmm2; 4367 const XMMRegister B2 = xmm3; 4368 const XMMRegister B3 = xmm4; 4369 const XMMRegister B4 = xmm5; 4370 const XMMRegister B5 = xmm6; 4371 const XMMRegister B6 = xmm7; 4372 const XMMRegister B7 = xmm8; 4373 4374 __ cmpl(rounds, 44); 4375 __ jcc(Assembler::greater, KEY_192); 4376 __ jmp(Loop); 4377 4378 __ BIND(KEY_192); 4379 const XMMRegister RK11 = xmm27; 4380 const XMMRegister RK12 = xmm28; 4381 ev_load_key(RK11, key, 11*16, xmm_key_shuf_mask); 4382 ev_load_key(RK12, key, 12*16, xmm_key_shuf_mask); 4383 4384 __ cmpl(rounds, 52); 4385 __ jcc(Assembler::greater, KEY_256); 4386 __ jmp(Loop); 4387 4388 __ BIND(KEY_256); 4389 const XMMRegister RK13 = xmm29; 4390 const XMMRegister RK14 = xmm31; 4391 ev_load_key(RK13, key, 13*16, xmm_key_shuf_mask); 4392 ev_load_key(RK14, key, 14*16, xmm_key_shuf_mask); 4393 4394 __ BIND(Loop); 4395 __ cmpl(len_reg, 512); 4396 __ jcc(Assembler::below, Lcbc_dec_rem); 4397 __ BIND(Loop1); 4398 __ subl(len_reg, 512); 4399 __ evmovdquq(S0, Address(from, 0 * 64), Assembler::AVX_512bit); 4400 __ evmovdquq(S1, Address(from, 1 * 64), Assembler::AVX_512bit); 4401 __ evmovdquq(S2, Address(from, 2 * 64), Assembler::AVX_512bit); 4402 __ evmovdquq(S3, Address(from, 3 * 64), Assembler::AVX_512bit); 4403 __ evmovdquq(S4, Address(from, 4 * 64), Assembler::AVX_512bit); 4404 __ evmovdquq(S5, Address(from, 5 * 64), Assembler::AVX_512bit); 4405 __ evmovdquq(S6, Address(from, 6 * 64), Assembler::AVX_512bit); 4406 __ evmovdquq(S7, Address(from, 7 * 64), Assembler::AVX_512bit); 4407 __ leaq(from, Address(from, 8 * 64)); 4408 4409 __ evpxorq(B0, S0, RK1, Assembler::AVX_512bit); 4410 __ evpxorq(B1, S1, RK1, Assembler::AVX_512bit); 4411 __ evpxorq(B2, S2, RK1, Assembler::AVX_512bit); 4412 __ evpxorq(B3, S3, RK1, Assembler::AVX_512bit); 4413 __ evpxorq(B4, S4, RK1, Assembler::AVX_512bit); 4414 __ evpxorq(B5, S5, RK1, Assembler::AVX_512bit); 4415 __ evpxorq(B6, S6, RK1, Assembler::AVX_512bit); 4416 __ evpxorq(B7, S7, RK1, Assembler::AVX_512bit); 4417 4418 __ evalignq(IV, S0, IV, 0x06); 4419 __ evalignq(S0, S1, S0, 0x06); 4420 __ evalignq(S1, S2, S1, 0x06); 4421 __ evalignq(S2, S3, S2, 0x06); 4422 __ evalignq(S3, S4, S3, 0x06); 4423 __ evalignq(S4, S5, S4, 0x06); 4424 __ evalignq(S5, S6, S5, 0x06); 4425 __ evalignq(S6, S7, S6, 0x06); 4426 4427 roundDec(RK2); 4428 roundDec(RK3); 4429 roundDec(RK4); 4430 roundDec(RK5); 4431 roundDec(RK6); 4432 roundDec(RK7); 4433 roundDec(RK8); 4434 roundDec(RK9); 4435 roundDec(RK10); 4436 4437 __ cmpl(rounds, 44); 4438 __ jcc(Assembler::belowEqual, L_128); 4439 roundDec(RK11); 4440 roundDec(RK12); 4441 4442 __ cmpl(rounds, 52); 4443 __ jcc(Assembler::belowEqual, L_192); 4444 roundDec(RK13); 4445 roundDec(RK14); 4446 4447 __ BIND(L_256); 4448 roundDeclast(RK0); 4449 __ jmp(Loop2); 4450 4451 __ BIND(L_128); 4452 roundDeclast(RK0); 4453 __ jmp(Loop2); 4454 4455 __ BIND(L_192); 4456 roundDeclast(RK0); 4457 4458 __ BIND(Loop2); 4459 __ evpxorq(B0, B0, IV, Assembler::AVX_512bit); 4460 __ evpxorq(B1, B1, S0, Assembler::AVX_512bit); 4461 __ evpxorq(B2, B2, S1, Assembler::AVX_512bit); 4462 __ evpxorq(B3, B3, S2, Assembler::AVX_512bit); 4463 __ evpxorq(B4, B4, S3, Assembler::AVX_512bit); 4464 __ evpxorq(B5, B5, S4, Assembler::AVX_512bit); 4465 __ evpxorq(B6, B6, S5, Assembler::AVX_512bit); 4466 __ evpxorq(B7, B7, S6, Assembler::AVX_512bit); 4467 __ evmovdquq(IV, S7, Assembler::AVX_512bit); 4468 4469 __ evmovdquq(Address(to, 0 * 64), B0, Assembler::AVX_512bit); 4470 __ evmovdquq(Address(to, 1 * 64), B1, Assembler::AVX_512bit); 4471 __ evmovdquq(Address(to, 2 * 64), B2, Assembler::AVX_512bit); 4472 __ evmovdquq(Address(to, 3 * 64), B3, Assembler::AVX_512bit); 4473 __ evmovdquq(Address(to, 4 * 64), B4, Assembler::AVX_512bit); 4474 __ evmovdquq(Address(to, 5 * 64), B5, Assembler::AVX_512bit); 4475 __ evmovdquq(Address(to, 6 * 64), B6, Assembler::AVX_512bit); 4476 __ evmovdquq(Address(to, 7 * 64), B7, Assembler::AVX_512bit); 4477 __ leaq(to, Address(to, 8 * 64)); 4478 __ jmp(Loop); 4479 4480 __ BIND(Lcbc_dec_rem); 4481 __ evshufi64x2(IV, IV, IV, 0x03, Assembler::AVX_512bit); 4482 4483 __ BIND(Lcbc_dec_rem_loop); 4484 __ subl(len_reg, 16); 4485 __ jcc(Assembler::carrySet, Lcbc_dec_ret); 4486 4487 __ movdqu(S0, Address(from, 0)); 4488 __ evpxorq(B0, S0, RK1, Assembler::AVX_512bit); 4489 __ vaesdec(B0, B0, RK2, Assembler::AVX_512bit); 4490 __ vaesdec(B0, B0, RK3, Assembler::AVX_512bit); 4491 __ vaesdec(B0, B0, RK4, Assembler::AVX_512bit); 4492 __ vaesdec(B0, B0, RK5, Assembler::AVX_512bit); 4493 __ vaesdec(B0, B0, RK6, Assembler::AVX_512bit); 4494 __ vaesdec(B0, B0, RK7, Assembler::AVX_512bit); 4495 __ vaesdec(B0, B0, RK8, Assembler::AVX_512bit); 4496 __ vaesdec(B0, B0, RK9, Assembler::AVX_512bit); 4497 __ vaesdec(B0, B0, RK10, Assembler::AVX_512bit); 4498 __ cmpl(rounds, 44); 4499 __ jcc(Assembler::belowEqual, Lcbc_dec_rem_last); 4500 4501 __ vaesdec(B0, B0, RK11, Assembler::AVX_512bit); 4502 __ vaesdec(B0, B0, RK12, Assembler::AVX_512bit); 4503 __ cmpl(rounds, 52); 4504 __ jcc(Assembler::belowEqual, Lcbc_dec_rem_last); 4505 4506 __ vaesdec(B0, B0, RK13, Assembler::AVX_512bit); 4507 __ vaesdec(B0, B0, RK14, Assembler::AVX_512bit); 4508 4509 __ BIND(Lcbc_dec_rem_last); 4510 __ vaesdeclast(B0, B0, RK0, Assembler::AVX_512bit); 4511 4512 __ evpxorq(B0, B0, IV, Assembler::AVX_512bit); 4513 __ evmovdquq(IV, S0, Assembler::AVX_512bit); 4514 __ movdqu(Address(to, 0), B0); 4515 __ leaq(from, Address(from, 16)); 4516 __ leaq(to, Address(to, 16)); 4517 __ jmp(Lcbc_dec_rem_loop); 4518 4519 __ BIND(Lcbc_dec_ret); 4520 __ movdqu(Address(rvec, 0), IV); 4521 4522 // Zero out the round keys 4523 __ evpxorq(RK0, RK0, RK0, Assembler::AVX_512bit); 4524 __ evpxorq(RK1, RK1, RK1, Assembler::AVX_512bit); 4525 __ evpxorq(RK2, RK2, RK2, Assembler::AVX_512bit); 4526 __ evpxorq(RK3, RK3, RK3, Assembler::AVX_512bit); 4527 __ evpxorq(RK4, RK4, RK4, Assembler::AVX_512bit); 4528 __ evpxorq(RK5, RK5, RK5, Assembler::AVX_512bit); 4529 __ evpxorq(RK6, RK6, RK6, Assembler::AVX_512bit); 4530 __ evpxorq(RK7, RK7, RK7, Assembler::AVX_512bit); 4531 __ evpxorq(RK8, RK8, RK8, Assembler::AVX_512bit); 4532 __ evpxorq(RK9, RK9, RK9, Assembler::AVX_512bit); 4533 __ evpxorq(RK10, RK10, RK10, Assembler::AVX_512bit); 4534 __ cmpl(rounds, 44); 4535 __ jcc(Assembler::belowEqual, Lcbc_exit); 4536 __ evpxorq(RK11, RK11, RK11, Assembler::AVX_512bit); 4537 __ evpxorq(RK12, RK12, RK12, Assembler::AVX_512bit); 4538 __ cmpl(rounds, 52); 4539 __ jcc(Assembler::belowEqual, Lcbc_exit); 4540 __ evpxorq(RK13, RK13, RK13, Assembler::AVX_512bit); 4541 __ evpxorq(RK14, RK14, RK14, Assembler::AVX_512bit); 4542 4543 __ BIND(Lcbc_exit); 4544 __ pop(rbx); 4545 #ifdef _WIN64 4546 __ movl(rax, len_mem); 4547 #else 4548 __ pop(rax); // return length 4549 #endif 4550 __ leave(); // required for proper stackwalking of RuntimeStub frame 4551 __ ret(0); 4552 return start; 4553 } 4554 4555 // Polynomial x^128+x^127+x^126+x^121+1 4556 address ghash_polynomial_addr() { 4557 __ align(CodeEntryAlignment); 4558 StubCodeMark mark(this, "StubRoutines", "_ghash_poly_addr"); 4559 address start = __ pc(); 4560 __ emit_data64(0x0000000000000001, relocInfo::none); 4561 __ emit_data64(0xc200000000000000, relocInfo::none); 4562 return start; 4563 } 4564 4565 address ghash_shufflemask_addr() { 4566 __ align(CodeEntryAlignment); 4567 StubCodeMark mark(this, "StubRoutines", "_ghash_shuffmask_addr"); 4568 address start = __ pc(); 4569 __ emit_data64(0x0f0f0f0f0f0f0f0f, relocInfo::none); 4570 __ emit_data64(0x0f0f0f0f0f0f0f0f, relocInfo::none); 4571 return start; 4572 } 4573 4574 // Ghash single and multi block operations using AVX instructions 4575 address generate_avx_ghash_processBlocks() { 4576 __ align(CodeEntryAlignment); 4577 4578 StubCodeMark mark(this, "StubRoutines", "ghash_processBlocks"); 4579 address start = __ pc(); 4580 4581 // arguments 4582 const Register state = c_rarg0; 4583 const Register htbl = c_rarg1; 4584 const Register data = c_rarg2; 4585 const Register blocks = c_rarg3; 4586 __ enter(); 4587 // Save state before entering routine 4588 __ avx_ghash(state, htbl, data, blocks); 4589 __ leave(); // required for proper stackwalking of RuntimeStub frame 4590 __ ret(0); 4591 return start; 4592 } 4593 4594 // byte swap x86 long 4595 address generate_ghash_long_swap_mask() { 4596 __ align(CodeEntryAlignment); 4597 StubCodeMark mark(this, "StubRoutines", "ghash_long_swap_mask"); 4598 address start = __ pc(); 4599 __ emit_data64(0x0f0e0d0c0b0a0908, relocInfo::none ); 4600 __ emit_data64(0x0706050403020100, relocInfo::none ); 4601 return start; 4602 } 4603 4604 // byte swap x86 byte array 4605 address generate_ghash_byte_swap_mask() { 4606 __ align(CodeEntryAlignment); 4607 StubCodeMark mark(this, "StubRoutines", "ghash_byte_swap_mask"); 4608 address start = __ pc(); 4609 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none ); 4610 __ emit_data64(0x0001020304050607, relocInfo::none ); 4611 return start; 4612 } 4613 4614 /* Single and multi-block ghash operations */ 4615 address generate_ghash_processBlocks() { 4616 __ align(CodeEntryAlignment); 4617 Label L_ghash_loop, L_exit; 4618 StubCodeMark mark(this, "StubRoutines", "ghash_processBlocks"); 4619 address start = __ pc(); 4620 4621 const Register state = c_rarg0; 4622 const Register subkeyH = c_rarg1; 4623 const Register data = c_rarg2; 4624 const Register blocks = c_rarg3; 4625 4626 const XMMRegister xmm_temp0 = xmm0; 4627 const XMMRegister xmm_temp1 = xmm1; 4628 const XMMRegister xmm_temp2 = xmm2; 4629 const XMMRegister xmm_temp3 = xmm3; 4630 const XMMRegister xmm_temp4 = xmm4; 4631 const XMMRegister xmm_temp5 = xmm5; 4632 const XMMRegister xmm_temp6 = xmm6; 4633 const XMMRegister xmm_temp7 = xmm7; 4634 const XMMRegister xmm_temp8 = xmm8; 4635 const XMMRegister xmm_temp9 = xmm9; 4636 const XMMRegister xmm_temp10 = xmm10; 4637 4638 __ enter(); 4639 4640 __ movdqu(xmm_temp10, ExternalAddress(StubRoutines::x86::ghash_long_swap_mask_addr())); 4641 4642 __ movdqu(xmm_temp0, Address(state, 0)); 4643 __ pshufb(xmm_temp0, xmm_temp10); 4644 4645 4646 __ BIND(L_ghash_loop); 4647 __ movdqu(xmm_temp2, Address(data, 0)); 4648 __ pshufb(xmm_temp2, ExternalAddress(StubRoutines::x86::ghash_byte_swap_mask_addr())); 4649 4650 __ movdqu(xmm_temp1, Address(subkeyH, 0)); 4651 __ pshufb(xmm_temp1, xmm_temp10); 4652 4653 __ pxor(xmm_temp0, xmm_temp2); 4654 4655 // 4656 // Multiply with the hash key 4657 // 4658 __ movdqu(xmm_temp3, xmm_temp0); 4659 __ pclmulqdq(xmm_temp3, xmm_temp1, 0); // xmm3 holds a0*b0 4660 __ movdqu(xmm_temp4, xmm_temp0); 4661 __ pclmulqdq(xmm_temp4, xmm_temp1, 16); // xmm4 holds a0*b1 4662 4663 __ movdqu(xmm_temp5, xmm_temp0); 4664 __ pclmulqdq(xmm_temp5, xmm_temp1, 1); // xmm5 holds a1*b0 4665 __ movdqu(xmm_temp6, xmm_temp0); 4666 __ pclmulqdq(xmm_temp6, xmm_temp1, 17); // xmm6 holds a1*b1 4667 4668 __ pxor(xmm_temp4, xmm_temp5); // xmm4 holds a0*b1 + a1*b0 4669 4670 __ movdqu(xmm_temp5, xmm_temp4); // move the contents of xmm4 to xmm5 4671 __ psrldq(xmm_temp4, 8); // shift by xmm4 64 bits to the right 4672 __ pslldq(xmm_temp5, 8); // shift by xmm5 64 bits to the left 4673 __ pxor(xmm_temp3, xmm_temp5); 4674 __ pxor(xmm_temp6, xmm_temp4); // Register pair <xmm6:xmm3> holds the result 4675 // of the carry-less multiplication of 4676 // xmm0 by xmm1. 4677 4678 // We shift the result of the multiplication by one bit position 4679 // to the left to cope for the fact that the bits are reversed. 4680 __ movdqu(xmm_temp7, xmm_temp3); 4681 __ movdqu(xmm_temp8, xmm_temp6); 4682 __ pslld(xmm_temp3, 1); 4683 __ pslld(xmm_temp6, 1); 4684 __ psrld(xmm_temp7, 31); 4685 __ psrld(xmm_temp8, 31); 4686 __ movdqu(xmm_temp9, xmm_temp7); 4687 __ pslldq(xmm_temp8, 4); 4688 __ pslldq(xmm_temp7, 4); 4689 __ psrldq(xmm_temp9, 12); 4690 __ por(xmm_temp3, xmm_temp7); 4691 __ por(xmm_temp6, xmm_temp8); 4692 __ por(xmm_temp6, xmm_temp9); 4693 4694 // 4695 // First phase of the reduction 4696 // 4697 // Move xmm3 into xmm7, xmm8, xmm9 in order to perform the shifts 4698 // independently. 4699 __ movdqu(xmm_temp7, xmm_temp3); 4700 __ movdqu(xmm_temp8, xmm_temp3); 4701 __ movdqu(xmm_temp9, xmm_temp3); 4702 __ pslld(xmm_temp7, 31); // packed right shift shifting << 31 4703 __ pslld(xmm_temp8, 30); // packed right shift shifting << 30 4704 __ pslld(xmm_temp9, 25); // packed right shift shifting << 25 4705 __ pxor(xmm_temp7, xmm_temp8); // xor the shifted versions 4706 __ pxor(xmm_temp7, xmm_temp9); 4707 __ movdqu(xmm_temp8, xmm_temp7); 4708 __ pslldq(xmm_temp7, 12); 4709 __ psrldq(xmm_temp8, 4); 4710 __ pxor(xmm_temp3, xmm_temp7); // first phase of the reduction complete 4711 4712 // 4713 // Second phase of the reduction 4714 // 4715 // Make 3 copies of xmm3 in xmm2, xmm4, xmm5 for doing these 4716 // shift operations. 4717 __ movdqu(xmm_temp2, xmm_temp3); 4718 __ movdqu(xmm_temp4, xmm_temp3); 4719 __ movdqu(xmm_temp5, xmm_temp3); 4720 __ psrld(xmm_temp2, 1); // packed left shifting >> 1 4721 __ psrld(xmm_temp4, 2); // packed left shifting >> 2 4722 __ psrld(xmm_temp5, 7); // packed left shifting >> 7 4723 __ pxor(xmm_temp2, xmm_temp4); // xor the shifted versions 4724 __ pxor(xmm_temp2, xmm_temp5); 4725 __ pxor(xmm_temp2, xmm_temp8); 4726 __ pxor(xmm_temp3, xmm_temp2); 4727 __ pxor(xmm_temp6, xmm_temp3); // the result is in xmm6 4728 4729 __ decrement(blocks); 4730 __ jcc(Assembler::zero, L_exit); 4731 __ movdqu(xmm_temp0, xmm_temp6); 4732 __ addptr(data, 16); 4733 __ jmp(L_ghash_loop); 4734 4735 __ BIND(L_exit); 4736 __ pshufb(xmm_temp6, xmm_temp10); // Byte swap 16-byte result 4737 __ movdqu(Address(state, 0), xmm_temp6); // store the result 4738 __ leave(); 4739 __ ret(0); 4740 return start; 4741 } 4742 4743 //base64 character set 4744 address base64_charset_addr() { 4745 __ align(CodeEntryAlignment); 4746 StubCodeMark mark(this, "StubRoutines", "base64_charset"); 4747 address start = __ pc(); 4748 __ emit_data64(0x0000004200000041, relocInfo::none); 4749 __ emit_data64(0x0000004400000043, relocInfo::none); 4750 __ emit_data64(0x0000004600000045, relocInfo::none); 4751 __ emit_data64(0x0000004800000047, relocInfo::none); 4752 __ emit_data64(0x0000004a00000049, relocInfo::none); 4753 __ emit_data64(0x0000004c0000004b, relocInfo::none); 4754 __ emit_data64(0x0000004e0000004d, relocInfo::none); 4755 __ emit_data64(0x000000500000004f, relocInfo::none); 4756 __ emit_data64(0x0000005200000051, relocInfo::none); 4757 __ emit_data64(0x0000005400000053, relocInfo::none); 4758 __ emit_data64(0x0000005600000055, relocInfo::none); 4759 __ emit_data64(0x0000005800000057, relocInfo::none); 4760 __ emit_data64(0x0000005a00000059, relocInfo::none); 4761 __ emit_data64(0x0000006200000061, relocInfo::none); 4762 __ emit_data64(0x0000006400000063, relocInfo::none); 4763 __ emit_data64(0x0000006600000065, relocInfo::none); 4764 __ emit_data64(0x0000006800000067, relocInfo::none); 4765 __ emit_data64(0x0000006a00000069, relocInfo::none); 4766 __ emit_data64(0x0000006c0000006b, relocInfo::none); 4767 __ emit_data64(0x0000006e0000006d, relocInfo::none); 4768 __ emit_data64(0x000000700000006f, relocInfo::none); 4769 __ emit_data64(0x0000007200000071, relocInfo::none); 4770 __ emit_data64(0x0000007400000073, relocInfo::none); 4771 __ emit_data64(0x0000007600000075, relocInfo::none); 4772 __ emit_data64(0x0000007800000077, relocInfo::none); 4773 __ emit_data64(0x0000007a00000079, relocInfo::none); 4774 __ emit_data64(0x0000003100000030, relocInfo::none); 4775 __ emit_data64(0x0000003300000032, relocInfo::none); 4776 __ emit_data64(0x0000003500000034, relocInfo::none); 4777 __ emit_data64(0x0000003700000036, relocInfo::none); 4778 __ emit_data64(0x0000003900000038, relocInfo::none); 4779 __ emit_data64(0x0000002f0000002b, relocInfo::none); 4780 return start; 4781 } 4782 4783 //base64 url character set 4784 address base64url_charset_addr() { 4785 __ align(CodeEntryAlignment); 4786 StubCodeMark mark(this, "StubRoutines", "base64url_charset"); 4787 address start = __ pc(); 4788 __ emit_data64(0x0000004200000041, relocInfo::none); 4789 __ emit_data64(0x0000004400000043, relocInfo::none); 4790 __ emit_data64(0x0000004600000045, relocInfo::none); 4791 __ emit_data64(0x0000004800000047, relocInfo::none); 4792 __ emit_data64(0x0000004a00000049, relocInfo::none); 4793 __ emit_data64(0x0000004c0000004b, relocInfo::none); 4794 __ emit_data64(0x0000004e0000004d, relocInfo::none); 4795 __ emit_data64(0x000000500000004f, relocInfo::none); 4796 __ emit_data64(0x0000005200000051, relocInfo::none); 4797 __ emit_data64(0x0000005400000053, relocInfo::none); 4798 __ emit_data64(0x0000005600000055, relocInfo::none); 4799 __ emit_data64(0x0000005800000057, relocInfo::none); 4800 __ emit_data64(0x0000005a00000059, relocInfo::none); 4801 __ emit_data64(0x0000006200000061, relocInfo::none); 4802 __ emit_data64(0x0000006400000063, relocInfo::none); 4803 __ emit_data64(0x0000006600000065, relocInfo::none); 4804 __ emit_data64(0x0000006800000067, relocInfo::none); 4805 __ emit_data64(0x0000006a00000069, relocInfo::none); 4806 __ emit_data64(0x0000006c0000006b, relocInfo::none); 4807 __ emit_data64(0x0000006e0000006d, relocInfo::none); 4808 __ emit_data64(0x000000700000006f, relocInfo::none); 4809 __ emit_data64(0x0000007200000071, relocInfo::none); 4810 __ emit_data64(0x0000007400000073, relocInfo::none); 4811 __ emit_data64(0x0000007600000075, relocInfo::none); 4812 __ emit_data64(0x0000007800000077, relocInfo::none); 4813 __ emit_data64(0x0000007a00000079, relocInfo::none); 4814 __ emit_data64(0x0000003100000030, relocInfo::none); 4815 __ emit_data64(0x0000003300000032, relocInfo::none); 4816 __ emit_data64(0x0000003500000034, relocInfo::none); 4817 __ emit_data64(0x0000003700000036, relocInfo::none); 4818 __ emit_data64(0x0000003900000038, relocInfo::none); 4819 __ emit_data64(0x0000005f0000002d, relocInfo::none); 4820 4821 return start; 4822 } 4823 4824 address base64_bswap_mask_addr() { 4825 __ align(CodeEntryAlignment); 4826 StubCodeMark mark(this, "StubRoutines", "bswap_mask_base64"); 4827 address start = __ pc(); 4828 __ emit_data64(0x0504038002010080, relocInfo::none); 4829 __ emit_data64(0x0b0a098008070680, relocInfo::none); 4830 __ emit_data64(0x0908078006050480, relocInfo::none); 4831 __ emit_data64(0x0f0e0d800c0b0a80, relocInfo::none); 4832 __ emit_data64(0x0605048003020180, relocInfo::none); 4833 __ emit_data64(0x0c0b0a8009080780, relocInfo::none); 4834 __ emit_data64(0x0504038002010080, relocInfo::none); 4835 __ emit_data64(0x0b0a098008070680, relocInfo::none); 4836 4837 return start; 4838 } 4839 4840 address base64_right_shift_mask_addr() { 4841 __ align(CodeEntryAlignment); 4842 StubCodeMark mark(this, "StubRoutines", "right_shift_mask"); 4843 address start = __ pc(); 4844 __ emit_data64(0x0006000400020000, relocInfo::none); 4845 __ emit_data64(0x0006000400020000, relocInfo::none); 4846 __ emit_data64(0x0006000400020000, relocInfo::none); 4847 __ emit_data64(0x0006000400020000, relocInfo::none); 4848 __ emit_data64(0x0006000400020000, relocInfo::none); 4849 __ emit_data64(0x0006000400020000, relocInfo::none); 4850 __ emit_data64(0x0006000400020000, relocInfo::none); 4851 __ emit_data64(0x0006000400020000, relocInfo::none); 4852 4853 return start; 4854 } 4855 4856 address base64_left_shift_mask_addr() { 4857 __ align(CodeEntryAlignment); 4858 StubCodeMark mark(this, "StubRoutines", "left_shift_mask"); 4859 address start = __ pc(); 4860 __ emit_data64(0x0000000200040000, relocInfo::none); 4861 __ emit_data64(0x0000000200040000, relocInfo::none); 4862 __ emit_data64(0x0000000200040000, relocInfo::none); 4863 __ emit_data64(0x0000000200040000, relocInfo::none); 4864 __ emit_data64(0x0000000200040000, relocInfo::none); 4865 __ emit_data64(0x0000000200040000, relocInfo::none); 4866 __ emit_data64(0x0000000200040000, relocInfo::none); 4867 __ emit_data64(0x0000000200040000, relocInfo::none); 4868 4869 return start; 4870 } 4871 4872 address base64_and_mask_addr() { 4873 __ align(CodeEntryAlignment); 4874 StubCodeMark mark(this, "StubRoutines", "and_mask"); 4875 address start = __ pc(); 4876 __ emit_data64(0x3f003f003f000000, relocInfo::none); 4877 __ emit_data64(0x3f003f003f000000, relocInfo::none); 4878 __ emit_data64(0x3f003f003f000000, relocInfo::none); 4879 __ emit_data64(0x3f003f003f000000, relocInfo::none); 4880 __ emit_data64(0x3f003f003f000000, relocInfo::none); 4881 __ emit_data64(0x3f003f003f000000, relocInfo::none); 4882 __ emit_data64(0x3f003f003f000000, relocInfo::none); 4883 __ emit_data64(0x3f003f003f000000, relocInfo::none); 4884 return start; 4885 } 4886 4887 address base64_gather_mask_addr() { 4888 __ align(CodeEntryAlignment); 4889 StubCodeMark mark(this, "StubRoutines", "gather_mask"); 4890 address start = __ pc(); 4891 __ emit_data64(0xffffffffffffffff, relocInfo::none); 4892 return start; 4893 } 4894 4895 // Code for generating Base64 encoding. 4896 // Intrinsic function prototype in Base64.java: 4897 // private void encodeBlock(byte[] src, int sp, int sl, byte[] dst, int dp, boolean isURL) { 4898 address generate_base64_encodeBlock() { 4899 __ align(CodeEntryAlignment); 4900 StubCodeMark mark(this, "StubRoutines", "implEncode"); 4901 address start = __ pc(); 4902 __ enter(); 4903 4904 // Save callee-saved registers before using them 4905 __ push(r12); 4906 __ push(r13); 4907 __ push(r14); 4908 __ push(r15); 4909 4910 // arguments 4911 const Register source = c_rarg0; // Source Array 4912 const Register start_offset = c_rarg1; // start offset 4913 const Register end_offset = c_rarg2; // end offset 4914 const Register dest = c_rarg3; // destination array 4915 4916 #ifndef _WIN64 4917 const Register dp = c_rarg4; // Position for writing to dest array 4918 const Register isURL = c_rarg5;// Base64 or URL character set 4919 #else 4920 const Address dp_mem(rbp, 6 * wordSize); // length is on stack on Win64 4921 const Address isURL_mem(rbp, 7 * wordSize); 4922 const Register isURL = r10; // pick the volatile windows register 4923 const Register dp = r12; 4924 __ movl(dp, dp_mem); 4925 __ movl(isURL, isURL_mem); 4926 #endif 4927 4928 const Register length = r14; 4929 Label L_process80, L_process32, L_process3, L_exit, L_processdata; 4930 4931 // calculate length from offsets 4932 __ movl(length, end_offset); 4933 __ subl(length, start_offset); 4934 __ cmpl(length, 0); 4935 __ jcc(Assembler::lessEqual, L_exit); 4936 4937 __ lea(r11, ExternalAddress(StubRoutines::x86::base64_charset_addr())); 4938 // check if base64 charset(isURL=0) or base64 url charset(isURL=1) needs to be loaded 4939 __ cmpl(isURL, 0); 4940 __ jcc(Assembler::equal, L_processdata); 4941 __ lea(r11, ExternalAddress(StubRoutines::x86::base64url_charset_addr())); 4942 4943 // load masks required for encoding data 4944 __ BIND(L_processdata); 4945 __ movdqu(xmm16, ExternalAddress(StubRoutines::x86::base64_gather_mask_addr())); 4946 // Set 64 bits of K register. 4947 __ evpcmpeqb(k3, xmm16, xmm16, Assembler::AVX_512bit); 4948 __ evmovdquq(xmm12, ExternalAddress(StubRoutines::x86::base64_bswap_mask_addr()), Assembler::AVX_256bit, r13); 4949 __ evmovdquq(xmm13, ExternalAddress(StubRoutines::x86::base64_right_shift_mask_addr()), Assembler::AVX_512bit, r13); 4950 __ evmovdquq(xmm14, ExternalAddress(StubRoutines::x86::base64_left_shift_mask_addr()), Assembler::AVX_512bit, r13); 4951 __ evmovdquq(xmm15, ExternalAddress(StubRoutines::x86::base64_and_mask_addr()), Assembler::AVX_512bit, r13); 4952 4953 // Vector Base64 implementation, producing 96 bytes of encoded data 4954 __ BIND(L_process80); 4955 __ cmpl(length, 80); 4956 __ jcc(Assembler::below, L_process32); 4957 __ evmovdquq(xmm0, Address(source, start_offset, Address::times_1, 0), Assembler::AVX_256bit); 4958 __ evmovdquq(xmm1, Address(source, start_offset, Address::times_1, 24), Assembler::AVX_256bit); 4959 __ evmovdquq(xmm2, Address(source, start_offset, Address::times_1, 48), Assembler::AVX_256bit); 4960 4961 //permute the input data in such a manner that we have continuity of the source 4962 __ vpermq(xmm3, xmm0, 148, Assembler::AVX_256bit); 4963 __ vpermq(xmm4, xmm1, 148, Assembler::AVX_256bit); 4964 __ vpermq(xmm5, xmm2, 148, Assembler::AVX_256bit); 4965 4966 //shuffle input and group 3 bytes of data and to it add 0 as the 4th byte. 4967 //we can deal with 12 bytes at a time in a 128 bit register 4968 __ vpshufb(xmm3, xmm3, xmm12, Assembler::AVX_256bit); 4969 __ vpshufb(xmm4, xmm4, xmm12, Assembler::AVX_256bit); 4970 __ vpshufb(xmm5, xmm5, xmm12, Assembler::AVX_256bit); 4971 4972 //convert byte to word. Each 128 bit register will have 6 bytes for processing 4973 __ vpmovzxbw(xmm3, xmm3, Assembler::AVX_512bit); 4974 __ vpmovzxbw(xmm4, xmm4, Assembler::AVX_512bit); 4975 __ vpmovzxbw(xmm5, xmm5, Assembler::AVX_512bit); 4976 4977 // Extract bits in the following pattern 6, 4+2, 2+4, 6 to convert 3, 8 bit numbers to 4, 6 bit numbers 4978 __ evpsrlvw(xmm0, xmm3, xmm13, Assembler::AVX_512bit); 4979 __ evpsrlvw(xmm1, xmm4, xmm13, Assembler::AVX_512bit); 4980 __ evpsrlvw(xmm2, xmm5, xmm13, Assembler::AVX_512bit); 4981 4982 __ evpsllvw(xmm3, xmm3, xmm14, Assembler::AVX_512bit); 4983 __ evpsllvw(xmm4, xmm4, xmm14, Assembler::AVX_512bit); 4984 __ evpsllvw(xmm5, xmm5, xmm14, Assembler::AVX_512bit); 4985 4986 __ vpsrlq(xmm0, xmm0, 8, Assembler::AVX_512bit); 4987 __ vpsrlq(xmm1, xmm1, 8, Assembler::AVX_512bit); 4988 __ vpsrlq(xmm2, xmm2, 8, Assembler::AVX_512bit); 4989 4990 __ vpsllq(xmm3, xmm3, 8, Assembler::AVX_512bit); 4991 __ vpsllq(xmm4, xmm4, 8, Assembler::AVX_512bit); 4992 __ vpsllq(xmm5, xmm5, 8, Assembler::AVX_512bit); 4993 4994 __ vpandq(xmm3, xmm3, xmm15, Assembler::AVX_512bit); 4995 __ vpandq(xmm4, xmm4, xmm15, Assembler::AVX_512bit); 4996 __ vpandq(xmm5, xmm5, xmm15, Assembler::AVX_512bit); 4997 4998 // Get the final 4*6 bits base64 encoding 4999 __ vporq(xmm3, xmm3, xmm0, Assembler::AVX_512bit); 5000 __ vporq(xmm4, xmm4, xmm1, Assembler::AVX_512bit); 5001 __ vporq(xmm5, xmm5, xmm2, Assembler::AVX_512bit); 5002 5003 // Shift 5004 __ vpsrlq(xmm3, xmm3, 8, Assembler::AVX_512bit); 5005 __ vpsrlq(xmm4, xmm4, 8, Assembler::AVX_512bit); 5006 __ vpsrlq(xmm5, xmm5, 8, Assembler::AVX_512bit); 5007 5008 // look up 6 bits in the base64 character set to fetch the encoding 5009 // we are converting word to dword as gather instructions need dword indices for looking up encoding 5010 __ vextracti64x4(xmm6, xmm3, 0); 5011 __ vpmovzxwd(xmm0, xmm6, Assembler::AVX_512bit); 5012 __ vextracti64x4(xmm6, xmm3, 1); 5013 __ vpmovzxwd(xmm1, xmm6, Assembler::AVX_512bit); 5014 5015 __ vextracti64x4(xmm6, xmm4, 0); 5016 __ vpmovzxwd(xmm2, xmm6, Assembler::AVX_512bit); 5017 __ vextracti64x4(xmm6, xmm4, 1); 5018 __ vpmovzxwd(xmm3, xmm6, Assembler::AVX_512bit); 5019 5020 __ vextracti64x4(xmm4, xmm5, 0); 5021 __ vpmovzxwd(xmm6, xmm4, Assembler::AVX_512bit); 5022 5023 __ vextracti64x4(xmm4, xmm5, 1); 5024 __ vpmovzxwd(xmm7, xmm4, Assembler::AVX_512bit); 5025 5026 __ kmovql(k2, k3); 5027 __ evpgatherdd(xmm4, k2, Address(r11, xmm0, Address::times_4, 0), Assembler::AVX_512bit); 5028 __ kmovql(k2, k3); 5029 __ evpgatherdd(xmm5, k2, Address(r11, xmm1, Address::times_4, 0), Assembler::AVX_512bit); 5030 __ kmovql(k2, k3); 5031 __ evpgatherdd(xmm8, k2, Address(r11, xmm2, Address::times_4, 0), Assembler::AVX_512bit); 5032 __ kmovql(k2, k3); 5033 __ evpgatherdd(xmm9, k2, Address(r11, xmm3, Address::times_4, 0), Assembler::AVX_512bit); 5034 __ kmovql(k2, k3); 5035 __ evpgatherdd(xmm10, k2, Address(r11, xmm6, Address::times_4, 0), Assembler::AVX_512bit); 5036 __ kmovql(k2, k3); 5037 __ evpgatherdd(xmm11, k2, Address(r11, xmm7, Address::times_4, 0), Assembler::AVX_512bit); 5038 5039 //Down convert dword to byte. Final output is 16*6 = 96 bytes long 5040 __ evpmovdb(Address(dest, dp, Address::times_1, 0), xmm4, Assembler::AVX_512bit); 5041 __ evpmovdb(Address(dest, dp, Address::times_1, 16), xmm5, Assembler::AVX_512bit); 5042 __ evpmovdb(Address(dest, dp, Address::times_1, 32), xmm8, Assembler::AVX_512bit); 5043 __ evpmovdb(Address(dest, dp, Address::times_1, 48), xmm9, Assembler::AVX_512bit); 5044 __ evpmovdb(Address(dest, dp, Address::times_1, 64), xmm10, Assembler::AVX_512bit); 5045 __ evpmovdb(Address(dest, dp, Address::times_1, 80), xmm11, Assembler::AVX_512bit); 5046 5047 __ addq(dest, 96); 5048 __ addq(source, 72); 5049 __ subq(length, 72); 5050 __ jmp(L_process80); 5051 5052 // Vector Base64 implementation generating 32 bytes of encoded data 5053 __ BIND(L_process32); 5054 __ cmpl(length, 32); 5055 __ jcc(Assembler::below, L_process3); 5056 __ evmovdquq(xmm0, Address(source, start_offset), Assembler::AVX_256bit); 5057 __ vpermq(xmm0, xmm0, 148, Assembler::AVX_256bit); 5058 __ vpshufb(xmm6, xmm0, xmm12, Assembler::AVX_256bit); 5059 __ vpmovzxbw(xmm6, xmm6, Assembler::AVX_512bit); 5060 __ evpsrlvw(xmm2, xmm6, xmm13, Assembler::AVX_512bit); 5061 __ evpsllvw(xmm3, xmm6, xmm14, Assembler::AVX_512bit); 5062 5063 __ vpsrlq(xmm2, xmm2, 8, Assembler::AVX_512bit); 5064 __ vpsllq(xmm3, xmm3, 8, Assembler::AVX_512bit); 5065 __ vpandq(xmm3, xmm3, xmm15, Assembler::AVX_512bit); 5066 __ vporq(xmm1, xmm2, xmm3, Assembler::AVX_512bit); 5067 __ vpsrlq(xmm1, xmm1, 8, Assembler::AVX_512bit); 5068 __ vextracti64x4(xmm9, xmm1, 0); 5069 __ vpmovzxwd(xmm6, xmm9, Assembler::AVX_512bit); 5070 __ vextracti64x4(xmm9, xmm1, 1); 5071 __ vpmovzxwd(xmm5, xmm9, Assembler::AVX_512bit); 5072 __ kmovql(k2, k3); 5073 __ evpgatherdd(xmm8, k2, Address(r11, xmm6, Address::times_4, 0), Assembler::AVX_512bit); 5074 __ kmovql(k2, k3); 5075 __ evpgatherdd(xmm10, k2, Address(r11, xmm5, Address::times_4, 0), Assembler::AVX_512bit); 5076 __ evpmovdb(Address(dest, dp, Address::times_1, 0), xmm8, Assembler::AVX_512bit); 5077 __ evpmovdb(Address(dest, dp, Address::times_1, 16), xmm10, Assembler::AVX_512bit); 5078 __ subq(length, 24); 5079 __ addq(dest, 32); 5080 __ addq(source, 24); 5081 __ jmp(L_process32); 5082 5083 // Scalar data processing takes 3 bytes at a time and produces 4 bytes of encoded data 5084 /* This code corresponds to the scalar version of the following snippet in Base64.java 5085 ** int bits = (src[sp0++] & 0xff) << 16 |(src[sp0++] & 0xff) << 8 |(src[sp0++] & 0xff); 5086 ** dst[dp0++] = (byte)base64[(bits >> > 18) & 0x3f]; 5087 ** dst[dp0++] = (byte)base64[(bits >> > 12) & 0x3f]; 5088 ** dst[dp0++] = (byte)base64[(bits >> > 6) & 0x3f]; 5089 ** dst[dp0++] = (byte)base64[bits & 0x3f];*/ 5090 __ BIND(L_process3); 5091 __ cmpl(length, 3); 5092 __ jcc(Assembler::below, L_exit); 5093 // Read 1 byte at a time 5094 __ movzbl(rax, Address(source, start_offset)); 5095 __ shll(rax, 0x10); 5096 __ movl(r15, rax); 5097 __ movzbl(rax, Address(source, start_offset, Address::times_1, 1)); 5098 __ shll(rax, 0x8); 5099 __ movzwl(rax, rax); 5100 __ orl(r15, rax); 5101 __ movzbl(rax, Address(source, start_offset, Address::times_1, 2)); 5102 __ orl(rax, r15); 5103 // Save 3 bytes read in r15 5104 __ movl(r15, rax); 5105 __ shrl(rax, 0x12); 5106 __ andl(rax, 0x3f); 5107 // rax contains the index, r11 contains base64 lookup table 5108 __ movb(rax, Address(r11, rax, Address::times_4)); 5109 // Write the encoded byte to destination 5110 __ movb(Address(dest, dp, Address::times_1, 0), rax); 5111 __ movl(rax, r15); 5112 __ shrl(rax, 0xc); 5113 __ andl(rax, 0x3f); 5114 __ movb(rax, Address(r11, rax, Address::times_4)); 5115 __ movb(Address(dest, dp, Address::times_1, 1), rax); 5116 __ movl(rax, r15); 5117 __ shrl(rax, 0x6); 5118 __ andl(rax, 0x3f); 5119 __ movb(rax, Address(r11, rax, Address::times_4)); 5120 __ movb(Address(dest, dp, Address::times_1, 2), rax); 5121 __ movl(rax, r15); 5122 __ andl(rax, 0x3f); 5123 __ movb(rax, Address(r11, rax, Address::times_4)); 5124 __ movb(Address(dest, dp, Address::times_1, 3), rax); 5125 __ subl(length, 3); 5126 __ addq(dest, 4); 5127 __ addq(source, 3); 5128 __ jmp(L_process3); 5129 __ BIND(L_exit); 5130 __ pop(r15); 5131 __ pop(r14); 5132 __ pop(r13); 5133 __ pop(r12); 5134 __ leave(); 5135 __ ret(0); 5136 return start; 5137 } 5138 5139 /** 5140 * Arguments: 5141 * 5142 * Inputs: 5143 * c_rarg0 - int crc 5144 * c_rarg1 - byte* buf 5145 * c_rarg2 - int length 5146 * 5147 * Ouput: 5148 * rax - int crc result 5149 */ 5150 address generate_updateBytesCRC32() { 5151 assert(UseCRC32Intrinsics, "need AVX and CLMUL instructions"); 5152 5153 __ align(CodeEntryAlignment); 5154 StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32"); 5155 5156 address start = __ pc(); 5157 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 5158 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 5159 // rscratch1: r10 5160 const Register crc = c_rarg0; // crc 5161 const Register buf = c_rarg1; // source java byte array address 5162 const Register len = c_rarg2; // length 5163 const Register table = c_rarg3; // crc_table address (reuse register) 5164 const Register tmp1 = r11; 5165 const Register tmp2 = r10; 5166 assert_different_registers(crc, buf, len, table, tmp1, tmp2, rax); 5167 5168 BLOCK_COMMENT("Entry:"); 5169 __ enter(); // required for proper stackwalking of RuntimeStub frame 5170 5171 if (VM_Version::supports_sse4_1() && VM_Version::supports_avx512_vpclmulqdq() && 5172 VM_Version::supports_avx512bw() && 5173 VM_Version::supports_avx512vl()) { 5174 __ kernel_crc32_avx512(crc, buf, len, table, tmp1, tmp2); 5175 } else { 5176 __ kernel_crc32(crc, buf, len, table, tmp1); 5177 } 5178 5179 __ movl(rax, crc); 5180 __ vzeroupper(); 5181 __ leave(); // required for proper stackwalking of RuntimeStub frame 5182 __ ret(0); 5183 5184 return start; 5185 } 5186 5187 /** 5188 * Arguments: 5189 * 5190 * Inputs: 5191 * c_rarg0 - int crc 5192 * c_rarg1 - byte* buf 5193 * c_rarg2 - long length 5194 * c_rarg3 - table_start - optional (present only when doing a library_call, 5195 * not used by x86 algorithm) 5196 * 5197 * Ouput: 5198 * rax - int crc result 5199 */ 5200 address generate_updateBytesCRC32C(bool is_pclmulqdq_supported) { 5201 assert(UseCRC32CIntrinsics, "need SSE4_2"); 5202 __ align(CodeEntryAlignment); 5203 StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32C"); 5204 address start = __ pc(); 5205 //reg.arg int#0 int#1 int#2 int#3 int#4 int#5 float regs 5206 //Windows RCX RDX R8 R9 none none XMM0..XMM3 5207 //Lin / Sol RDI RSI RDX RCX R8 R9 XMM0..XMM7 5208 const Register crc = c_rarg0; // crc 5209 const Register buf = c_rarg1; // source java byte array address 5210 const Register len = c_rarg2; // length 5211 const Register a = rax; 5212 const Register j = r9; 5213 const Register k = r10; 5214 const Register l = r11; 5215 #ifdef _WIN64 5216 const Register y = rdi; 5217 const Register z = rsi; 5218 #else 5219 const Register y = rcx; 5220 const Register z = r8; 5221 #endif 5222 assert_different_registers(crc, buf, len, a, j, k, l, y, z); 5223 5224 BLOCK_COMMENT("Entry:"); 5225 __ enter(); // required for proper stackwalking of RuntimeStub frame 5226 #ifdef _WIN64 5227 __ push(y); 5228 __ push(z); 5229 #endif 5230 __ crc32c_ipl_alg2_alt2(crc, buf, len, 5231 a, j, k, 5232 l, y, z, 5233 c_farg0, c_farg1, c_farg2, 5234 is_pclmulqdq_supported); 5235 __ movl(rax, crc); 5236 #ifdef _WIN64 5237 __ pop(z); 5238 __ pop(y); 5239 #endif 5240 __ vzeroupper(); 5241 __ leave(); // required for proper stackwalking of RuntimeStub frame 5242 __ ret(0); 5243 5244 return start; 5245 } 5246 5247 /** 5248 * Arguments: 5249 * 5250 * Input: 5251 * c_rarg0 - x address 5252 * c_rarg1 - x length 5253 * c_rarg2 - y address 5254 * c_rarg3 - y length 5255 * not Win64 5256 * c_rarg4 - z address 5257 * c_rarg5 - z length 5258 * Win64 5259 * rsp+40 - z address 5260 * rsp+48 - z length 5261 */ 5262 address generate_multiplyToLen() { 5263 __ align(CodeEntryAlignment); 5264 StubCodeMark mark(this, "StubRoutines", "multiplyToLen"); 5265 5266 address start = __ pc(); 5267 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 5268 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 5269 const Register x = rdi; 5270 const Register xlen = rax; 5271 const Register y = rsi; 5272 const Register ylen = rcx; 5273 const Register z = r8; 5274 const Register zlen = r11; 5275 5276 // Next registers will be saved on stack in multiply_to_len(). 5277 const Register tmp1 = r12; 5278 const Register tmp2 = r13; 5279 const Register tmp3 = r14; 5280 const Register tmp4 = r15; 5281 const Register tmp5 = rbx; 5282 5283 BLOCK_COMMENT("Entry:"); 5284 __ enter(); // required for proper stackwalking of RuntimeStub frame 5285 5286 #ifndef _WIN64 5287 __ movptr(zlen, r9); // Save r9 in r11 - zlen 5288 #endif 5289 setup_arg_regs(4); // x => rdi, xlen => rsi, y => rdx 5290 // ylen => rcx, z => r8, zlen => r11 5291 // r9 and r10 may be used to save non-volatile registers 5292 #ifdef _WIN64 5293 // last 2 arguments (#4, #5) are on stack on Win64 5294 __ movptr(z, Address(rsp, 6 * wordSize)); 5295 __ movptr(zlen, Address(rsp, 7 * wordSize)); 5296 #endif 5297 5298 __ movptr(xlen, rsi); 5299 __ movptr(y, rdx); 5300 __ multiply_to_len(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5); 5301 5302 restore_arg_regs(); 5303 5304 __ leave(); // required for proper stackwalking of RuntimeStub frame 5305 __ ret(0); 5306 5307 return start; 5308 } 5309 5310 /** 5311 * Arguments: 5312 * 5313 * Input: 5314 * c_rarg0 - obja address 5315 * c_rarg1 - objb address 5316 * c_rarg3 - length length 5317 * c_rarg4 - scale log2_array_indxscale 5318 * 5319 * Output: 5320 * rax - int >= mismatched index, < 0 bitwise complement of tail 5321 */ 5322 address generate_vectorizedMismatch() { 5323 __ align(CodeEntryAlignment); 5324 StubCodeMark mark(this, "StubRoutines", "vectorizedMismatch"); 5325 address start = __ pc(); 5326 5327 BLOCK_COMMENT("Entry:"); 5328 __ enter(); 5329 5330 #ifdef _WIN64 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 5331 const Register scale = c_rarg0; //rcx, will exchange with r9 5332 const Register objb = c_rarg1; //rdx 5333 const Register length = c_rarg2; //r8 5334 const Register obja = c_rarg3; //r9 5335 __ xchgq(obja, scale); //now obja and scale contains the correct contents 5336 5337 const Register tmp1 = r10; 5338 const Register tmp2 = r11; 5339 #endif 5340 #ifndef _WIN64 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 5341 const Register obja = c_rarg0; //U:rdi 5342 const Register objb = c_rarg1; //U:rsi 5343 const Register length = c_rarg2; //U:rdx 5344 const Register scale = c_rarg3; //U:rcx 5345 const Register tmp1 = r8; 5346 const Register tmp2 = r9; 5347 #endif 5348 const Register result = rax; //return value 5349 const XMMRegister vec0 = xmm0; 5350 const XMMRegister vec1 = xmm1; 5351 const XMMRegister vec2 = xmm2; 5352 5353 __ vectorized_mismatch(obja, objb, length, scale, result, tmp1, tmp2, vec0, vec1, vec2); 5354 5355 __ vzeroupper(); 5356 __ leave(); 5357 __ ret(0); 5358 5359 return start; 5360 } 5361 5362 /** 5363 * Arguments: 5364 * 5365 // Input: 5366 // c_rarg0 - x address 5367 // c_rarg1 - x length 5368 // c_rarg2 - z address 5369 // c_rarg3 - z lenth 5370 * 5371 */ 5372 address generate_squareToLen() { 5373 5374 __ align(CodeEntryAlignment); 5375 StubCodeMark mark(this, "StubRoutines", "squareToLen"); 5376 5377 address start = __ pc(); 5378 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 5379 // Unix: rdi, rsi, rdx, rcx (c_rarg0, c_rarg1, ...) 5380 const Register x = rdi; 5381 const Register len = rsi; 5382 const Register z = r8; 5383 const Register zlen = rcx; 5384 5385 const Register tmp1 = r12; 5386 const Register tmp2 = r13; 5387 const Register tmp3 = r14; 5388 const Register tmp4 = r15; 5389 const Register tmp5 = rbx; 5390 5391 BLOCK_COMMENT("Entry:"); 5392 __ enter(); // required for proper stackwalking of RuntimeStub frame 5393 5394 setup_arg_regs(4); // x => rdi, len => rsi, z => rdx 5395 // zlen => rcx 5396 // r9 and r10 may be used to save non-volatile registers 5397 __ movptr(r8, rdx); 5398 __ square_to_len(x, len, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5, rdx, rax); 5399 5400 restore_arg_regs(); 5401 5402 __ leave(); // required for proper stackwalking of RuntimeStub frame 5403 __ ret(0); 5404 5405 return start; 5406 } 5407 5408 address generate_method_entry_barrier() { 5409 __ align(CodeEntryAlignment); 5410 StubCodeMark mark(this, "StubRoutines", "nmethod_entry_barrier"); 5411 5412 Label deoptimize_label; 5413 5414 address start = __ pc(); 5415 5416 __ push(-1); // cookie, this is used for writing the new rsp when deoptimizing 5417 5418 BLOCK_COMMENT("Entry:"); 5419 __ enter(); // save rbp 5420 5421 // save c_rarg0, because we want to use that value. 5422 // We could do without it but then we depend on the number of slots used by pusha 5423 __ push(c_rarg0); 5424 5425 __ lea(c_rarg0, Address(rsp, wordSize * 3)); // 1 for cookie, 1 for rbp, 1 for c_rarg0 - this should be the return address 5426 5427 __ pusha(); 5428 5429 // The method may have floats as arguments, and we must spill them before calling 5430 // the VM runtime. 5431 assert(Argument::n_float_register_parameters_j == 8, "Assumption"); 5432 const int xmm_size = wordSize * 2; 5433 const int xmm_spill_size = xmm_size * Argument::n_float_register_parameters_j; 5434 __ subptr(rsp, xmm_spill_size); 5435 __ movdqu(Address(rsp, xmm_size * 7), xmm7); 5436 __ movdqu(Address(rsp, xmm_size * 6), xmm6); 5437 __ movdqu(Address(rsp, xmm_size * 5), xmm5); 5438 __ movdqu(Address(rsp, xmm_size * 4), xmm4); 5439 __ movdqu(Address(rsp, xmm_size * 3), xmm3); 5440 __ movdqu(Address(rsp, xmm_size * 2), xmm2); 5441 __ movdqu(Address(rsp, xmm_size * 1), xmm1); 5442 __ movdqu(Address(rsp, xmm_size * 0), xmm0); 5443 5444 __ call_VM_leaf(CAST_FROM_FN_PTR(address, static_cast<int (*)(address*)>(BarrierSetNMethod::nmethod_stub_entry_barrier)), 1); 5445 5446 __ movdqu(xmm0, Address(rsp, xmm_size * 0)); 5447 __ movdqu(xmm1, Address(rsp, xmm_size * 1)); 5448 __ movdqu(xmm2, Address(rsp, xmm_size * 2)); 5449 __ movdqu(xmm3, Address(rsp, xmm_size * 3)); 5450 __ movdqu(xmm4, Address(rsp, xmm_size * 4)); 5451 __ movdqu(xmm5, Address(rsp, xmm_size * 5)); 5452 __ movdqu(xmm6, Address(rsp, xmm_size * 6)); 5453 __ movdqu(xmm7, Address(rsp, xmm_size * 7)); 5454 __ addptr(rsp, xmm_spill_size); 5455 5456 __ cmpl(rax, 1); // 1 means deoptimize 5457 __ jcc(Assembler::equal, deoptimize_label); 5458 5459 __ popa(); 5460 __ pop(c_rarg0); 5461 5462 __ leave(); 5463 5464 __ addptr(rsp, 1 * wordSize); // cookie 5465 __ ret(0); 5466 5467 5468 __ BIND(deoptimize_label); 5469 5470 __ popa(); 5471 __ pop(c_rarg0); 5472 5473 __ leave(); 5474 5475 // this can be taken out, but is good for verification purposes. getting a SIGSEGV 5476 // here while still having a correct stack is valuable 5477 __ testptr(rsp, Address(rsp, 0)); 5478 5479 __ movptr(rsp, Address(rsp, 0)); // new rsp was written in the barrier 5480 __ jmp(Address(rsp, -1 * wordSize)); // jmp target should be callers verified_entry_point 5481 5482 return start; 5483 } 5484 5485 /** 5486 * Arguments: 5487 * 5488 * Input: 5489 * c_rarg0 - out address 5490 * c_rarg1 - in address 5491 * c_rarg2 - offset 5492 * c_rarg3 - len 5493 * not Win64 5494 * c_rarg4 - k 5495 * Win64 5496 * rsp+40 - k 5497 */ 5498 address generate_mulAdd() { 5499 __ align(CodeEntryAlignment); 5500 StubCodeMark mark(this, "StubRoutines", "mulAdd"); 5501 5502 address start = __ pc(); 5503 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 5504 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 5505 const Register out = rdi; 5506 const Register in = rsi; 5507 const Register offset = r11; 5508 const Register len = rcx; 5509 const Register k = r8; 5510 5511 // Next registers will be saved on stack in mul_add(). 5512 const Register tmp1 = r12; 5513 const Register tmp2 = r13; 5514 const Register tmp3 = r14; 5515 const Register tmp4 = r15; 5516 const Register tmp5 = rbx; 5517 5518 BLOCK_COMMENT("Entry:"); 5519 __ enter(); // required for proper stackwalking of RuntimeStub frame 5520 5521 setup_arg_regs(4); // out => rdi, in => rsi, offset => rdx 5522 // len => rcx, k => r8 5523 // r9 and r10 may be used to save non-volatile registers 5524 #ifdef _WIN64 5525 // last argument is on stack on Win64 5526 __ movl(k, Address(rsp, 6 * wordSize)); 5527 #endif 5528 __ movptr(r11, rdx); // move offset in rdx to offset(r11) 5529 __ mul_add(out, in, offset, len, k, tmp1, tmp2, tmp3, tmp4, tmp5, rdx, rax); 5530 5531 restore_arg_regs(); 5532 5533 __ leave(); // required for proper stackwalking of RuntimeStub frame 5534 __ ret(0); 5535 5536 return start; 5537 } 5538 5539 address generate_bigIntegerRightShift() { 5540 __ align(CodeEntryAlignment); 5541 StubCodeMark mark(this, "StubRoutines", "bigIntegerRightShiftWorker"); 5542 5543 address start = __ pc(); 5544 Label Shift512Loop, ShiftTwo, ShiftTwoLoop, ShiftOne, Exit; 5545 // For Unix, the arguments are as follows: rdi, rsi, rdx, rcx, r8. 5546 const Register newArr = rdi; 5547 const Register oldArr = rsi; 5548 const Register newIdx = rdx; 5549 const Register shiftCount = rcx; // It was intentional to have shiftCount in rcx since it is used implicitly for shift. 5550 const Register totalNumIter = r8; 5551 5552 // For windows, we use r9 and r10 as temps to save rdi and rsi. Thus we cannot allocate them for our temps. 5553 // For everything else, we prefer using r9 and r10 since we do not have to save them before use. 5554 const Register tmp1 = r11; // Caller save. 5555 const Register tmp2 = rax; // Caller save. 5556 const Register tmp3 = WINDOWS_ONLY(r12) NOT_WINDOWS(r9); // Windows: Callee save. Linux: Caller save. 5557 const Register tmp4 = WINDOWS_ONLY(r13) NOT_WINDOWS(r10); // Windows: Callee save. Linux: Caller save. 5558 const Register tmp5 = r14; // Callee save. 5559 const Register tmp6 = r15; 5560 5561 const XMMRegister x0 = xmm0; 5562 const XMMRegister x1 = xmm1; 5563 const XMMRegister x2 = xmm2; 5564 5565 BLOCK_COMMENT("Entry:"); 5566 __ enter(); // required for proper stackwalking of RuntimeStub frame 5567 5568 #ifdef _WINDOWS 5569 setup_arg_regs(4); 5570 // For windows, since last argument is on stack, we need to move it to the appropriate register. 5571 __ movl(totalNumIter, Address(rsp, 6 * wordSize)); 5572 // Save callee save registers. 5573 __ push(tmp3); 5574 __ push(tmp4); 5575 #endif 5576 __ push(tmp5); 5577 5578 // Rename temps used throughout the code. 5579 const Register idx = tmp1; 5580 const Register nIdx = tmp2; 5581 5582 __ xorl(idx, idx); 5583 5584 // Start right shift from end of the array. 5585 // For example, if #iteration = 4 and newIdx = 1 5586 // then dest[4] = src[4] >> shiftCount | src[3] <<< (shiftCount - 32) 5587 // if #iteration = 4 and newIdx = 0 5588 // then dest[3] = src[4] >> shiftCount | src[3] <<< (shiftCount - 32) 5589 __ movl(idx, totalNumIter); 5590 __ movl(nIdx, idx); 5591 __ addl(nIdx, newIdx); 5592 5593 // If vectorization is enabled, check if the number of iterations is at least 64 5594 // If not, then go to ShifTwo processing 2 iterations 5595 if (VM_Version::supports_avx512_vbmi2()) { 5596 __ cmpptr(totalNumIter, (AVX3Threshold/64)); 5597 __ jcc(Assembler::less, ShiftTwo); 5598 5599 if (AVX3Threshold < 16 * 64) { 5600 __ cmpl(totalNumIter, 16); 5601 __ jcc(Assembler::less, ShiftTwo); 5602 } 5603 __ evpbroadcastd(x0, shiftCount, Assembler::AVX_512bit); 5604 __ subl(idx, 16); 5605 __ subl(nIdx, 16); 5606 __ BIND(Shift512Loop); 5607 __ evmovdqul(x2, Address(oldArr, idx, Address::times_4, 4), Assembler::AVX_512bit); 5608 __ evmovdqul(x1, Address(oldArr, idx, Address::times_4), Assembler::AVX_512bit); 5609 __ vpshrdvd(x2, x1, x0, Assembler::AVX_512bit); 5610 __ evmovdqul(Address(newArr, nIdx, Address::times_4), x2, Assembler::AVX_512bit); 5611 __ subl(nIdx, 16); 5612 __ subl(idx, 16); 5613 __ jcc(Assembler::greaterEqual, Shift512Loop); 5614 __ addl(idx, 16); 5615 __ addl(nIdx, 16); 5616 } 5617 __ BIND(ShiftTwo); 5618 __ cmpl(idx, 2); 5619 __ jcc(Assembler::less, ShiftOne); 5620 __ subl(idx, 2); 5621 __ subl(nIdx, 2); 5622 __ BIND(ShiftTwoLoop); 5623 __ movl(tmp5, Address(oldArr, idx, Address::times_4, 8)); 5624 __ movl(tmp4, Address(oldArr, idx, Address::times_4, 4)); 5625 __ movl(tmp3, Address(oldArr, idx, Address::times_4)); 5626 __ shrdl(tmp5, tmp4); 5627 __ shrdl(tmp4, tmp3); 5628 __ movl(Address(newArr, nIdx, Address::times_4, 4), tmp5); 5629 __ movl(Address(newArr, nIdx, Address::times_4), tmp4); 5630 __ subl(nIdx, 2); 5631 __ subl(idx, 2); 5632 __ jcc(Assembler::greaterEqual, ShiftTwoLoop); 5633 __ addl(idx, 2); 5634 __ addl(nIdx, 2); 5635 5636 // Do the last iteration 5637 __ BIND(ShiftOne); 5638 __ cmpl(idx, 1); 5639 __ jcc(Assembler::less, Exit); 5640 __ subl(idx, 1); 5641 __ subl(nIdx, 1); 5642 __ movl(tmp4, Address(oldArr, idx, Address::times_4, 4)); 5643 __ movl(tmp3, Address(oldArr, idx, Address::times_4)); 5644 __ shrdl(tmp4, tmp3); 5645 __ movl(Address(newArr, nIdx, Address::times_4), tmp4); 5646 __ BIND(Exit); 5647 // Restore callee save registers. 5648 __ pop(tmp5); 5649 #ifdef _WINDOWS 5650 __ pop(tmp4); 5651 __ pop(tmp3); 5652 restore_arg_regs(); 5653 #endif 5654 __ leave(); // required for proper stackwalking of RuntimeStub frame 5655 __ ret(0); 5656 return start; 5657 } 5658 5659 /** 5660 * Arguments: 5661 * 5662 * Input: 5663 * c_rarg0 - newArr address 5664 * c_rarg1 - oldArr address 5665 * c_rarg2 - newIdx 5666 * c_rarg3 - shiftCount 5667 * not Win64 5668 * c_rarg4 - numIter 5669 * Win64 5670 * rsp40 - numIter 5671 */ 5672 address generate_bigIntegerLeftShift() { 5673 __ align(CodeEntryAlignment); 5674 StubCodeMark mark(this, "StubRoutines", "bigIntegerLeftShiftWorker"); 5675 address start = __ pc(); 5676 Label Shift512Loop, ShiftTwo, ShiftTwoLoop, ShiftOne, Exit; 5677 // For Unix, the arguments are as follows: rdi, rsi, rdx, rcx, r8. 5678 const Register newArr = rdi; 5679 const Register oldArr = rsi; 5680 const Register newIdx = rdx; 5681 const Register shiftCount = rcx; // It was intentional to have shiftCount in rcx since it is used implicitly for shift. 5682 const Register totalNumIter = r8; 5683 // For windows, we use r9 and r10 as temps to save rdi and rsi. Thus we cannot allocate them for our temps. 5684 // For everything else, we prefer using r9 and r10 since we do not have to save them before use. 5685 const Register tmp1 = r11; // Caller save. 5686 const Register tmp2 = rax; // Caller save. 5687 const Register tmp3 = WINDOWS_ONLY(r12) NOT_WINDOWS(r9); // Windows: Callee save. Linux: Caller save. 5688 const Register tmp4 = WINDOWS_ONLY(r13) NOT_WINDOWS(r10); // Windows: Callee save. Linux: Caller save. 5689 const Register tmp5 = r14; // Callee save. 5690 5691 const XMMRegister x0 = xmm0; 5692 const XMMRegister x1 = xmm1; 5693 const XMMRegister x2 = xmm2; 5694 BLOCK_COMMENT("Entry:"); 5695 __ enter(); // required for proper stackwalking of RuntimeStub frame 5696 5697 #ifdef _WINDOWS 5698 setup_arg_regs(4); 5699 // For windows, since last argument is on stack, we need to move it to the appropriate register. 5700 __ movl(totalNumIter, Address(rsp, 6 * wordSize)); 5701 // Save callee save registers. 5702 __ push(tmp3); 5703 __ push(tmp4); 5704 #endif 5705 __ push(tmp5); 5706 5707 // Rename temps used throughout the code 5708 const Register idx = tmp1; 5709 const Register numIterTmp = tmp2; 5710 5711 // Start idx from zero. 5712 __ xorl(idx, idx); 5713 // Compute interior pointer for new array. We do this so that we can use same index for both old and new arrays. 5714 __ lea(newArr, Address(newArr, newIdx, Address::times_4)); 5715 __ movl(numIterTmp, totalNumIter); 5716 5717 // If vectorization is enabled, check if the number of iterations is at least 64 5718 // If not, then go to ShiftTwo shifting two numbers at a time 5719 if (VM_Version::supports_avx512_vbmi2()) { 5720 __ cmpl(totalNumIter, (AVX3Threshold/64)); 5721 __ jcc(Assembler::less, ShiftTwo); 5722 5723 if (AVX3Threshold < 16 * 64) { 5724 __ cmpl(totalNumIter, 16); 5725 __ jcc(Assembler::less, ShiftTwo); 5726 } 5727 __ evpbroadcastd(x0, shiftCount, Assembler::AVX_512bit); 5728 __ subl(numIterTmp, 16); 5729 __ BIND(Shift512Loop); 5730 __ evmovdqul(x1, Address(oldArr, idx, Address::times_4), Assembler::AVX_512bit); 5731 __ evmovdqul(x2, Address(oldArr, idx, Address::times_4, 0x4), Assembler::AVX_512bit); 5732 __ vpshldvd(x1, x2, x0, Assembler::AVX_512bit); 5733 __ evmovdqul(Address(newArr, idx, Address::times_4), x1, Assembler::AVX_512bit); 5734 __ addl(idx, 16); 5735 __ subl(numIterTmp, 16); 5736 __ jcc(Assembler::greaterEqual, Shift512Loop); 5737 __ addl(numIterTmp, 16); 5738 } 5739 __ BIND(ShiftTwo); 5740 __ cmpl(totalNumIter, 1); 5741 __ jcc(Assembler::less, Exit); 5742 __ movl(tmp3, Address(oldArr, idx, Address::times_4)); 5743 __ subl(numIterTmp, 2); 5744 __ jcc(Assembler::less, ShiftOne); 5745 5746 __ BIND(ShiftTwoLoop); 5747 __ movl(tmp4, Address(oldArr, idx, Address::times_4, 0x4)); 5748 __ movl(tmp5, Address(oldArr, idx, Address::times_4, 0x8)); 5749 __ shldl(tmp3, tmp4); 5750 __ shldl(tmp4, tmp5); 5751 __ movl(Address(newArr, idx, Address::times_4), tmp3); 5752 __ movl(Address(newArr, idx, Address::times_4, 0x4), tmp4); 5753 __ movl(tmp3, tmp5); 5754 __ addl(idx, 2); 5755 __ subl(numIterTmp, 2); 5756 __ jcc(Assembler::greaterEqual, ShiftTwoLoop); 5757 5758 // Do the last iteration 5759 __ BIND(ShiftOne); 5760 __ addl(numIterTmp, 2); 5761 __ cmpl(numIterTmp, 1); 5762 __ jcc(Assembler::less, Exit); 5763 __ movl(tmp4, Address(oldArr, idx, Address::times_4, 0x4)); 5764 __ shldl(tmp3, tmp4); 5765 __ movl(Address(newArr, idx, Address::times_4), tmp3); 5766 5767 __ BIND(Exit); 5768 // Restore callee save registers. 5769 __ pop(tmp5); 5770 #ifdef _WINDOWS 5771 __ pop(tmp4); 5772 __ pop(tmp3); 5773 restore_arg_regs(); 5774 #endif 5775 __ leave(); // required for proper stackwalking of RuntimeStub frame 5776 __ ret(0); 5777 return start; 5778 } 5779 5780 address generate_libmExp() { 5781 StubCodeMark mark(this, "StubRoutines", "libmExp"); 5782 5783 address start = __ pc(); 5784 5785 const XMMRegister x0 = xmm0; 5786 const XMMRegister x1 = xmm1; 5787 const XMMRegister x2 = xmm2; 5788 const XMMRegister x3 = xmm3; 5789 5790 const XMMRegister x4 = xmm4; 5791 const XMMRegister x5 = xmm5; 5792 const XMMRegister x6 = xmm6; 5793 const XMMRegister x7 = xmm7; 5794 5795 const Register tmp = r11; 5796 5797 BLOCK_COMMENT("Entry:"); 5798 __ enter(); // required for proper stackwalking of RuntimeStub frame 5799 5800 __ fast_exp(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp); 5801 5802 __ leave(); // required for proper stackwalking of RuntimeStub frame 5803 __ ret(0); 5804 5805 return start; 5806 5807 } 5808 5809 address generate_libmLog() { 5810 StubCodeMark mark(this, "StubRoutines", "libmLog"); 5811 5812 address start = __ pc(); 5813 5814 const XMMRegister x0 = xmm0; 5815 const XMMRegister x1 = xmm1; 5816 const XMMRegister x2 = xmm2; 5817 const XMMRegister x3 = xmm3; 5818 5819 const XMMRegister x4 = xmm4; 5820 const XMMRegister x5 = xmm5; 5821 const XMMRegister x6 = xmm6; 5822 const XMMRegister x7 = xmm7; 5823 5824 const Register tmp1 = r11; 5825 const Register tmp2 = r8; 5826 5827 BLOCK_COMMENT("Entry:"); 5828 __ enter(); // required for proper stackwalking of RuntimeStub frame 5829 5830 __ fast_log(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2); 5831 5832 __ leave(); // required for proper stackwalking of RuntimeStub frame 5833 __ ret(0); 5834 5835 return start; 5836 5837 } 5838 5839 address generate_libmLog10() { 5840 StubCodeMark mark(this, "StubRoutines", "libmLog10"); 5841 5842 address start = __ pc(); 5843 5844 const XMMRegister x0 = xmm0; 5845 const XMMRegister x1 = xmm1; 5846 const XMMRegister x2 = xmm2; 5847 const XMMRegister x3 = xmm3; 5848 5849 const XMMRegister x4 = xmm4; 5850 const XMMRegister x5 = xmm5; 5851 const XMMRegister x6 = xmm6; 5852 const XMMRegister x7 = xmm7; 5853 5854 const Register tmp = r11; 5855 5856 BLOCK_COMMENT("Entry:"); 5857 __ enter(); // required for proper stackwalking of RuntimeStub frame 5858 5859 __ fast_log10(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp); 5860 5861 __ leave(); // required for proper stackwalking of RuntimeStub frame 5862 __ ret(0); 5863 5864 return start; 5865 5866 } 5867 5868 address generate_libmPow() { 5869 StubCodeMark mark(this, "StubRoutines", "libmPow"); 5870 5871 address start = __ pc(); 5872 5873 const XMMRegister x0 = xmm0; 5874 const XMMRegister x1 = xmm1; 5875 const XMMRegister x2 = xmm2; 5876 const XMMRegister x3 = xmm3; 5877 5878 const XMMRegister x4 = xmm4; 5879 const XMMRegister x5 = xmm5; 5880 const XMMRegister x6 = xmm6; 5881 const XMMRegister x7 = xmm7; 5882 5883 const Register tmp1 = r8; 5884 const Register tmp2 = r9; 5885 const Register tmp3 = r10; 5886 const Register tmp4 = r11; 5887 5888 BLOCK_COMMENT("Entry:"); 5889 __ enter(); // required for proper stackwalking of RuntimeStub frame 5890 5891 __ fast_pow(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 5892 5893 __ leave(); // required for proper stackwalking of RuntimeStub frame 5894 __ ret(0); 5895 5896 return start; 5897 5898 } 5899 5900 address generate_libmSin() { 5901 StubCodeMark mark(this, "StubRoutines", "libmSin"); 5902 5903 address start = __ pc(); 5904 5905 const XMMRegister x0 = xmm0; 5906 const XMMRegister x1 = xmm1; 5907 const XMMRegister x2 = xmm2; 5908 const XMMRegister x3 = xmm3; 5909 5910 const XMMRegister x4 = xmm4; 5911 const XMMRegister x5 = xmm5; 5912 const XMMRegister x6 = xmm6; 5913 const XMMRegister x7 = xmm7; 5914 5915 const Register tmp1 = r8; 5916 const Register tmp2 = r9; 5917 const Register tmp3 = r10; 5918 const Register tmp4 = r11; 5919 5920 BLOCK_COMMENT("Entry:"); 5921 __ enter(); // required for proper stackwalking of RuntimeStub frame 5922 5923 #ifdef _WIN64 5924 __ push(rsi); 5925 __ push(rdi); 5926 #endif 5927 __ fast_sin(x0, x1, x2, x3, x4, x5, x6, x7, rax, rbx, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 5928 5929 #ifdef _WIN64 5930 __ pop(rdi); 5931 __ pop(rsi); 5932 #endif 5933 5934 __ leave(); // required for proper stackwalking of RuntimeStub frame 5935 __ ret(0); 5936 5937 return start; 5938 5939 } 5940 5941 address generate_libmCos() { 5942 StubCodeMark mark(this, "StubRoutines", "libmCos"); 5943 5944 address start = __ pc(); 5945 5946 const XMMRegister x0 = xmm0; 5947 const XMMRegister x1 = xmm1; 5948 const XMMRegister x2 = xmm2; 5949 const XMMRegister x3 = xmm3; 5950 5951 const XMMRegister x4 = xmm4; 5952 const XMMRegister x5 = xmm5; 5953 const XMMRegister x6 = xmm6; 5954 const XMMRegister x7 = xmm7; 5955 5956 const Register tmp1 = r8; 5957 const Register tmp2 = r9; 5958 const Register tmp3 = r10; 5959 const Register tmp4 = r11; 5960 5961 BLOCK_COMMENT("Entry:"); 5962 __ enter(); // required for proper stackwalking of RuntimeStub frame 5963 5964 #ifdef _WIN64 5965 __ push(rsi); 5966 __ push(rdi); 5967 #endif 5968 __ fast_cos(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 5969 5970 #ifdef _WIN64 5971 __ pop(rdi); 5972 __ pop(rsi); 5973 #endif 5974 5975 __ leave(); // required for proper stackwalking of RuntimeStub frame 5976 __ ret(0); 5977 5978 return start; 5979 5980 } 5981 5982 address generate_libmTan() { 5983 StubCodeMark mark(this, "StubRoutines", "libmTan"); 5984 5985 address start = __ pc(); 5986 5987 const XMMRegister x0 = xmm0; 5988 const XMMRegister x1 = xmm1; 5989 const XMMRegister x2 = xmm2; 5990 const XMMRegister x3 = xmm3; 5991 5992 const XMMRegister x4 = xmm4; 5993 const XMMRegister x5 = xmm5; 5994 const XMMRegister x6 = xmm6; 5995 const XMMRegister x7 = xmm7; 5996 5997 const Register tmp1 = r8; 5998 const Register tmp2 = r9; 5999 const Register tmp3 = r10; 6000 const Register tmp4 = r11; 6001 6002 BLOCK_COMMENT("Entry:"); 6003 __ enter(); // required for proper stackwalking of RuntimeStub frame 6004 6005 #ifdef _WIN64 6006 __ push(rsi); 6007 __ push(rdi); 6008 #endif 6009 __ fast_tan(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 6010 6011 #ifdef _WIN64 6012 __ pop(rdi); 6013 __ pop(rsi); 6014 #endif 6015 6016 __ leave(); // required for proper stackwalking of RuntimeStub frame 6017 __ ret(0); 6018 6019 return start; 6020 6021 } 6022 6023 #undef __ 6024 #define __ masm-> 6025 6026 // Continuation point for throwing of implicit exceptions that are 6027 // not handled in the current activation. Fabricates an exception 6028 // oop and initiates normal exception dispatching in this 6029 // frame. Since we need to preserve callee-saved values (currently 6030 // only for C2, but done for C1 as well) we need a callee-saved oop 6031 // map and therefore have to make these stubs into RuntimeStubs 6032 // rather than BufferBlobs. If the compiler needs all registers to 6033 // be preserved between the fault point and the exception handler 6034 // then it must assume responsibility for that in 6035 // AbstractCompiler::continuation_for_implicit_null_exception or 6036 // continuation_for_implicit_division_by_zero_exception. All other 6037 // implicit exceptions (e.g., NullPointerException or 6038 // AbstractMethodError on entry) are either at call sites or 6039 // otherwise assume that stack unwinding will be initiated, so 6040 // caller saved registers were assumed volatile in the compiler. 6041 address generate_throw_exception(const char* name, 6042 address runtime_entry, 6043 Register arg1 = noreg, 6044 Register arg2 = noreg) { 6045 // Information about frame layout at time of blocking runtime call. 6046 // Note that we only have to preserve callee-saved registers since 6047 // the compilers are responsible for supplying a continuation point 6048 // if they expect all registers to be preserved. 6049 enum layout { 6050 rbp_off = frame::arg_reg_save_area_bytes/BytesPerInt, 6051 rbp_off2, 6052 return_off, 6053 return_off2, 6054 framesize // inclusive of return address 6055 }; 6056 6057 int insts_size = 512; 6058 int locs_size = 64; 6059 6060 CodeBuffer code(name, insts_size, locs_size); 6061 OopMapSet* oop_maps = new OopMapSet(); 6062 MacroAssembler* masm = new MacroAssembler(&code); 6063 6064 address start = __ pc(); 6065 6066 // This is an inlined and slightly modified version of call_VM 6067 // which has the ability to fetch the return PC out of 6068 // thread-local storage and also sets up last_Java_sp slightly 6069 // differently than the real call_VM 6070 6071 __ enter(); // required for proper stackwalking of RuntimeStub frame 6072 6073 assert(is_even(framesize/2), "sp not 16-byte aligned"); 6074 6075 // return address and rbp are already in place 6076 __ subptr(rsp, (framesize-4) << LogBytesPerInt); // prolog 6077 6078 int frame_complete = __ pc() - start; 6079 6080 // Set up last_Java_sp and last_Java_fp 6081 address the_pc = __ pc(); 6082 __ set_last_Java_frame(rsp, rbp, the_pc); 6083 __ andptr(rsp, -(StackAlignmentInBytes)); // Align stack 6084 6085 // Call runtime 6086 if (arg1 != noreg) { 6087 assert(arg2 != c_rarg1, "clobbered"); 6088 __ movptr(c_rarg1, arg1); 6089 } 6090 if (arg2 != noreg) { 6091 __ movptr(c_rarg2, arg2); 6092 } 6093 __ movptr(c_rarg0, r15_thread); 6094 BLOCK_COMMENT("call runtime_entry"); 6095 __ call(RuntimeAddress(runtime_entry)); 6096 6097 // Generate oop map 6098 OopMap* map = new OopMap(framesize, 0); 6099 6100 oop_maps->add_gc_map(the_pc - start, map); 6101 6102 __ reset_last_Java_frame(true); 6103 6104 __ leave(); // required for proper stackwalking of RuntimeStub frame 6105 6106 // check for pending exceptions 6107 #ifdef ASSERT 6108 Label L; 6109 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), 6110 (int32_t) NULL_WORD); 6111 __ jcc(Assembler::notEqual, L); 6112 __ should_not_reach_here(); 6113 __ bind(L); 6114 #endif // ASSERT 6115 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 6116 6117 6118 // codeBlob framesize is in words (not VMRegImpl::slot_size) 6119 RuntimeStub* stub = 6120 RuntimeStub::new_runtime_stub(name, 6121 &code, 6122 frame_complete, 6123 (framesize >> (LogBytesPerWord - LogBytesPerInt)), 6124 oop_maps, false); 6125 return stub->entry_point(); 6126 } 6127 6128 void create_control_words() { 6129 // Round to nearest, 53-bit mode, exceptions masked 6130 StubRoutines::_fpu_cntrl_wrd_std = 0x027F; 6131 // Round to zero, 53-bit mode, exception mased 6132 StubRoutines::_fpu_cntrl_wrd_trunc = 0x0D7F; 6133 // Round to nearest, 24-bit mode, exceptions masked 6134 StubRoutines::_fpu_cntrl_wrd_24 = 0x007F; 6135 // Round to nearest, 64-bit mode, exceptions masked 6136 StubRoutines::_mxcsr_std = 0x1F80; 6137 // Note: the following two constants are 80-bit values 6138 // layout is critical for correct loading by FPU. 6139 // Bias for strict fp multiply/divide 6140 StubRoutines::_fpu_subnormal_bias1[0]= 0x00000000; // 2^(-15360) == 0x03ff 8000 0000 0000 0000 6141 StubRoutines::_fpu_subnormal_bias1[1]= 0x80000000; 6142 StubRoutines::_fpu_subnormal_bias1[2]= 0x03ff; 6143 // Un-Bias for strict fp multiply/divide 6144 StubRoutines::_fpu_subnormal_bias2[0]= 0x00000000; // 2^(+15360) == 0x7bff 8000 0000 0000 0000 6145 StubRoutines::_fpu_subnormal_bias2[1]= 0x80000000; 6146 StubRoutines::_fpu_subnormal_bias2[2]= 0x7bff; 6147 } 6148 6149 // Initialization 6150 void generate_initial() { 6151 // Generates all stubs and initializes the entry points 6152 6153 // This platform-specific settings are needed by generate_call_stub() 6154 create_control_words(); 6155 6156 // entry points that exist in all platforms Note: This is code 6157 // that could be shared among different platforms - however the 6158 // benefit seems to be smaller than the disadvantage of having a 6159 // much more complicated generator structure. See also comment in 6160 // stubRoutines.hpp. 6161 6162 StubRoutines::_forward_exception_entry = generate_forward_exception(); 6163 6164 StubRoutines::_call_stub_entry = 6165 generate_call_stub(StubRoutines::_call_stub_return_address); 6166 6167 // is referenced by megamorphic call 6168 StubRoutines::_catch_exception_entry = generate_catch_exception(); 6169 6170 // atomic calls 6171 StubRoutines::_fence_entry = generate_orderaccess_fence(); 6172 6173 // platform dependent 6174 StubRoutines::x86::_get_previous_fp_entry = generate_get_previous_fp(); 6175 StubRoutines::x86::_get_previous_sp_entry = generate_get_previous_sp(); 6176 6177 StubRoutines::x86::_verify_mxcsr_entry = generate_verify_mxcsr(); 6178 6179 StubRoutines::x86::_f2i_fixup = generate_f2i_fixup(); 6180 StubRoutines::x86::_f2l_fixup = generate_f2l_fixup(); 6181 StubRoutines::x86::_d2i_fixup = generate_d2i_fixup(); 6182 StubRoutines::x86::_d2l_fixup = generate_d2l_fixup(); 6183 6184 StubRoutines::x86::_float_sign_mask = generate_fp_mask("float_sign_mask", 0x7FFFFFFF7FFFFFFF); 6185 StubRoutines::x86::_float_sign_flip = generate_fp_mask("float_sign_flip", 0x8000000080000000); 6186 StubRoutines::x86::_double_sign_mask = generate_fp_mask("double_sign_mask", 0x7FFFFFFFFFFFFFFF); 6187 StubRoutines::x86::_double_sign_flip = generate_fp_mask("double_sign_flip", 0x8000000000000000); 6188 6189 // Build this early so it's available for the interpreter. 6190 StubRoutines::_throw_StackOverflowError_entry = 6191 generate_throw_exception("StackOverflowError throw_exception", 6192 CAST_FROM_FN_PTR(address, 6193 SharedRuntime:: 6194 throw_StackOverflowError)); 6195 StubRoutines::_throw_delayed_StackOverflowError_entry = 6196 generate_throw_exception("delayed StackOverflowError throw_exception", 6197 CAST_FROM_FN_PTR(address, 6198 SharedRuntime:: 6199 throw_delayed_StackOverflowError)); 6200 if (UseCRC32Intrinsics) { 6201 // set table address before stub generation which use it 6202 StubRoutines::_crc_table_adr = (address)StubRoutines::x86::_crc_table; 6203 StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32(); 6204 } 6205 6206 if (UseCRC32CIntrinsics) { 6207 bool supports_clmul = VM_Version::supports_clmul(); 6208 StubRoutines::x86::generate_CRC32C_table(supports_clmul); 6209 StubRoutines::_crc32c_table_addr = (address)StubRoutines::x86::_crc32c_table; 6210 StubRoutines::_updateBytesCRC32C = generate_updateBytesCRC32C(supports_clmul); 6211 } 6212 if (UseLibmIntrinsic && InlineIntrinsics) { 6213 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin) || 6214 vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos) || 6215 vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dtan)) { 6216 StubRoutines::x86::_ONEHALF_adr = (address)StubRoutines::x86::_ONEHALF; 6217 StubRoutines::x86::_P_2_adr = (address)StubRoutines::x86::_P_2; 6218 StubRoutines::x86::_SC_4_adr = (address)StubRoutines::x86::_SC_4; 6219 StubRoutines::x86::_Ctable_adr = (address)StubRoutines::x86::_Ctable; 6220 StubRoutines::x86::_SC_2_adr = (address)StubRoutines::x86::_SC_2; 6221 StubRoutines::x86::_SC_3_adr = (address)StubRoutines::x86::_SC_3; 6222 StubRoutines::x86::_SC_1_adr = (address)StubRoutines::x86::_SC_1; 6223 StubRoutines::x86::_PI_INV_TABLE_adr = (address)StubRoutines::x86::_PI_INV_TABLE; 6224 StubRoutines::x86::_PI_4_adr = (address)StubRoutines::x86::_PI_4; 6225 StubRoutines::x86::_PI32INV_adr = (address)StubRoutines::x86::_PI32INV; 6226 StubRoutines::x86::_SIGN_MASK_adr = (address)StubRoutines::x86::_SIGN_MASK; 6227 StubRoutines::x86::_P_1_adr = (address)StubRoutines::x86::_P_1; 6228 StubRoutines::x86::_P_3_adr = (address)StubRoutines::x86::_P_3; 6229 StubRoutines::x86::_NEG_ZERO_adr = (address)StubRoutines::x86::_NEG_ZERO; 6230 } 6231 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dexp)) { 6232 StubRoutines::_dexp = generate_libmExp(); 6233 } 6234 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dlog)) { 6235 StubRoutines::_dlog = generate_libmLog(); 6236 } 6237 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dlog10)) { 6238 StubRoutines::_dlog10 = generate_libmLog10(); 6239 } 6240 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dpow)) { 6241 StubRoutines::_dpow = generate_libmPow(); 6242 } 6243 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin)) { 6244 StubRoutines::_dsin = generate_libmSin(); 6245 } 6246 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos)) { 6247 StubRoutines::_dcos = generate_libmCos(); 6248 } 6249 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dtan)) { 6250 StubRoutines::_dtan = generate_libmTan(); 6251 } 6252 } 6253 6254 // Safefetch stubs. 6255 generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry, 6256 &StubRoutines::_safefetch32_fault_pc, 6257 &StubRoutines::_safefetch32_continuation_pc); 6258 generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry, 6259 &StubRoutines::_safefetchN_fault_pc, 6260 &StubRoutines::_safefetchN_continuation_pc); 6261 } 6262 6263 void generate_all() { 6264 // Generates all stubs and initializes the entry points 6265 6266 // These entry points require SharedInfo::stack0 to be set up in 6267 // non-core builds and need to be relocatable, so they each 6268 // fabricate a RuntimeStub internally. 6269 StubRoutines::_throw_AbstractMethodError_entry = 6270 generate_throw_exception("AbstractMethodError throw_exception", 6271 CAST_FROM_FN_PTR(address, 6272 SharedRuntime:: 6273 throw_AbstractMethodError)); 6274 6275 StubRoutines::_throw_IncompatibleClassChangeError_entry = 6276 generate_throw_exception("IncompatibleClassChangeError throw_exception", 6277 CAST_FROM_FN_PTR(address, 6278 SharedRuntime:: 6279 throw_IncompatibleClassChangeError)); 6280 6281 StubRoutines::_throw_NullPointerException_at_call_entry = 6282 generate_throw_exception("NullPointerException at call throw_exception", 6283 CAST_FROM_FN_PTR(address, 6284 SharedRuntime:: 6285 throw_NullPointerException_at_call)); 6286 6287 // entry points that are platform specific 6288 StubRoutines::x86::_vector_float_sign_mask = generate_vector_mask("vector_float_sign_mask", 0x7FFFFFFF7FFFFFFF); 6289 StubRoutines::x86::_vector_float_sign_flip = generate_vector_mask("vector_float_sign_flip", 0x8000000080000000); 6290 StubRoutines::x86::_vector_double_sign_mask = generate_vector_mask("vector_double_sign_mask", 0x7FFFFFFFFFFFFFFF); 6291 StubRoutines::x86::_vector_double_sign_flip = generate_vector_mask("vector_double_sign_flip", 0x8000000000000000); 6292 StubRoutines::x86::_vector_short_to_byte_mask = generate_vector_mask("vector_short_to_byte_mask", 0x00ff00ff00ff00ff); 6293 StubRoutines::x86::_vector_byte_perm_mask = generate_vector_byte_perm_mask("vector_byte_perm_mask"); 6294 StubRoutines::x86::_vector_long_sign_mask = generate_vector_mask("vector_long_sign_mask", 0x8000000000000000); 6295 6296 // support for verify_oop (must happen after universe_init) 6297 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop(); 6298 6299 // data cache line writeback 6300 StubRoutines::_data_cache_writeback = generate_data_cache_writeback(); 6301 StubRoutines::_data_cache_writeback_sync = generate_data_cache_writeback_sync(); 6302 6303 // arraycopy stubs used by compilers 6304 generate_arraycopy_stubs(); 6305 6306 // don't bother generating these AES intrinsic stubs unless global flag is set 6307 if (UseAESIntrinsics) { 6308 StubRoutines::x86::_key_shuffle_mask_addr = generate_key_shuffle_mask(); // needed by the others 6309 StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock(); 6310 StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock(); 6311 StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt(); 6312 if (VM_Version::supports_avx512_vaes() && VM_Version::supports_avx512vl() && VM_Version::supports_avx512dq() ) { 6313 StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptVectorAESCrypt(); 6314 StubRoutines::_electronicCodeBook_encryptAESCrypt = generate_electronicCodeBook_encryptAESCrypt(); 6315 StubRoutines::_electronicCodeBook_decryptAESCrypt = generate_electronicCodeBook_decryptAESCrypt(); 6316 } else { 6317 StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt_Parallel(); 6318 } 6319 } 6320 if (UseAESCTRIntrinsics) { 6321 if (VM_Version::supports_avx512_vaes() && VM_Version::supports_avx512bw() && VM_Version::supports_avx512vl()) { 6322 StubRoutines::x86::_counter_mask_addr = counter_mask_addr(); 6323 StubRoutines::_counterMode_AESCrypt = generate_counterMode_VectorAESCrypt(); 6324 } else { 6325 StubRoutines::x86::_counter_shuffle_mask_addr = generate_counter_shuffle_mask(); 6326 StubRoutines::_counterMode_AESCrypt = generate_counterMode_AESCrypt_Parallel(); 6327 } 6328 } 6329 6330 if (UseSHA1Intrinsics) { 6331 StubRoutines::x86::_upper_word_mask_addr = generate_upper_word_mask(); 6332 StubRoutines::x86::_shuffle_byte_flip_mask_addr = generate_shuffle_byte_flip_mask(); 6333 StubRoutines::_sha1_implCompress = generate_sha1_implCompress(false, "sha1_implCompress"); 6334 StubRoutines::_sha1_implCompressMB = generate_sha1_implCompress(true, "sha1_implCompressMB"); 6335 } 6336 if (UseSHA256Intrinsics) { 6337 StubRoutines::x86::_k256_adr = (address)StubRoutines::x86::_k256; 6338 char* dst = (char*)StubRoutines::x86::_k256_W; 6339 char* src = (char*)StubRoutines::x86::_k256; 6340 for (int ii = 0; ii < 16; ++ii) { 6341 memcpy(dst + 32 * ii, src + 16 * ii, 16); 6342 memcpy(dst + 32 * ii + 16, src + 16 * ii, 16); 6343 } 6344 StubRoutines::x86::_k256_W_adr = (address)StubRoutines::x86::_k256_W; 6345 StubRoutines::x86::_pshuffle_byte_flip_mask_addr = generate_pshuffle_byte_flip_mask(); 6346 StubRoutines::_sha256_implCompress = generate_sha256_implCompress(false, "sha256_implCompress"); 6347 StubRoutines::_sha256_implCompressMB = generate_sha256_implCompress(true, "sha256_implCompressMB"); 6348 } 6349 if (UseSHA512Intrinsics) { 6350 StubRoutines::x86::_k512_W_addr = (address)StubRoutines::x86::_k512_W; 6351 StubRoutines::x86::_pshuffle_byte_flip_mask_addr_sha512 = generate_pshuffle_byte_flip_mask_sha512(); 6352 StubRoutines::_sha512_implCompress = generate_sha512_implCompress(false, "sha512_implCompress"); 6353 StubRoutines::_sha512_implCompressMB = generate_sha512_implCompress(true, "sha512_implCompressMB"); 6354 } 6355 6356 // Generate GHASH intrinsics code 6357 if (UseGHASHIntrinsics) { 6358 StubRoutines::x86::_ghash_long_swap_mask_addr = generate_ghash_long_swap_mask(); 6359 StubRoutines::x86::_ghash_byte_swap_mask_addr = generate_ghash_byte_swap_mask(); 6360 if (VM_Version::supports_avx()) { 6361 StubRoutines::x86::_ghash_shuffmask_addr = ghash_shufflemask_addr(); 6362 StubRoutines::x86::_ghash_poly_addr = ghash_polynomial_addr(); 6363 StubRoutines::_ghash_processBlocks = generate_avx_ghash_processBlocks(); 6364 } else { 6365 StubRoutines::_ghash_processBlocks = generate_ghash_processBlocks(); 6366 } 6367 } 6368 6369 if (UseBASE64Intrinsics) { 6370 StubRoutines::x86::_and_mask = base64_and_mask_addr(); 6371 StubRoutines::x86::_bswap_mask = base64_bswap_mask_addr(); 6372 StubRoutines::x86::_base64_charset = base64_charset_addr(); 6373 StubRoutines::x86::_url_charset = base64url_charset_addr(); 6374 StubRoutines::x86::_gather_mask = base64_gather_mask_addr(); 6375 StubRoutines::x86::_left_shift_mask = base64_left_shift_mask_addr(); 6376 StubRoutines::x86::_right_shift_mask = base64_right_shift_mask_addr(); 6377 StubRoutines::_base64_encodeBlock = generate_base64_encodeBlock(); 6378 } 6379 6380 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod(); 6381 if (bs_nm != NULL) { 6382 StubRoutines::x86::_method_entry_barrier = generate_method_entry_barrier(); 6383 } 6384 #ifdef COMPILER2 6385 if (UseMultiplyToLenIntrinsic) { 6386 StubRoutines::_multiplyToLen = generate_multiplyToLen(); 6387 } 6388 if (UseSquareToLenIntrinsic) { 6389 StubRoutines::_squareToLen = generate_squareToLen(); 6390 } 6391 if (UseMulAddIntrinsic) { 6392 StubRoutines::_mulAdd = generate_mulAdd(); 6393 } 6394 if (VM_Version::supports_avx512_vbmi2()) { 6395 StubRoutines::_bigIntegerRightShiftWorker = generate_bigIntegerRightShift(); 6396 StubRoutines::_bigIntegerLeftShiftWorker = generate_bigIntegerLeftShift(); 6397 } 6398 if (UseMontgomeryMultiplyIntrinsic) { 6399 StubRoutines::_montgomeryMultiply 6400 = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_multiply); 6401 } 6402 if (UseMontgomerySquareIntrinsic) { 6403 StubRoutines::_montgomerySquare 6404 = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_square); 6405 } 6406 #endif // COMPILER2 6407 6408 if (UseVectorizedMismatchIntrinsic) { 6409 StubRoutines::_vectorizedMismatch = generate_vectorizedMismatch(); 6410 } 6411 } 6412 6413 public: 6414 StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) { 6415 if (all) { 6416 generate_all(); 6417 } else { 6418 generate_initial(); 6419 } 6420 } 6421 }; // end class declaration 6422 6423 #define UCM_TABLE_MAX_ENTRIES 16 6424 void StubGenerator_generate(CodeBuffer* code, bool all) { 6425 if (UnsafeCopyMemory::_table == NULL) { 6426 UnsafeCopyMemory::create_table(UCM_TABLE_MAX_ENTRIES); 6427 } 6428 StubGenerator g(code, all); 6429 }