1 /* 2 * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "ci/ciUtilities.hpp" 29 #include "gc/shared/barrierSet.hpp" 30 #include "gc/shared/barrierSetAssembler.hpp" 31 #include "gc/shared/barrierSetNMethod.hpp" 32 #include "interpreter/interpreter.hpp" 33 #include "memory/universe.hpp" 34 #include "nativeInst_x86.hpp" 35 #include "oops/instanceOop.hpp" 36 #include "oops/method.hpp" 37 #include "oops/objArrayKlass.hpp" 38 #include "oops/oop.inline.hpp" 39 #include "prims/methodHandles.hpp" 40 #include "runtime/frame.inline.hpp" 41 #include "runtime/handles.inline.hpp" 42 #include "runtime/sharedRuntime.hpp" 43 #include "runtime/stubCodeGenerator.hpp" 44 #include "runtime/stubRoutines.hpp" 45 #include "runtime/thread.inline.hpp" 46 #ifdef COMPILER2 47 #include "opto/runtime.hpp" 48 #endif 49 #if INCLUDE_ZGC 50 #include "gc/z/zThreadLocalData.hpp" 51 #endif 52 53 // Declaration and definition of StubGenerator (no .hpp file). 54 // For a more detailed description of the stub routine structure 55 // see the comment in stubRoutines.hpp 56 57 #define __ _masm-> 58 #define TIMES_OOP (UseCompressedOops ? Address::times_4 : Address::times_8) 59 #define a__ ((Assembler*)_masm)-> 60 61 #ifdef PRODUCT 62 #define BLOCK_COMMENT(str) /* nothing */ 63 #else 64 #define BLOCK_COMMENT(str) __ block_comment(str) 65 #endif 66 67 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 68 const int MXCSR_MASK = 0xFFC0; // Mask out any pending exceptions 69 70 // Stub Code definitions 71 72 class StubGenerator: public StubCodeGenerator { 73 private: 74 75 #ifdef PRODUCT 76 #define inc_counter_np(counter) ((void)0) 77 #else 78 void inc_counter_np_(int& counter) { 79 // This can destroy rscratch1 if counter is far from the code cache 80 __ incrementl(ExternalAddress((address)&counter)); 81 } 82 #define inc_counter_np(counter) \ 83 BLOCK_COMMENT("inc_counter " #counter); \ 84 inc_counter_np_(counter); 85 #endif 86 87 // Call stubs are used to call Java from C 88 // 89 // Linux Arguments: 90 // c_rarg0: call wrapper address address 91 // c_rarg1: result address 92 // c_rarg2: result type BasicType 93 // c_rarg3: method Method* 94 // c_rarg4: (interpreter) entry point address 95 // c_rarg5: parameters intptr_t* 96 // 16(rbp): parameter size (in words) int 97 // 24(rbp): thread Thread* 98 // 99 // [ return_from_Java ] <--- rsp 100 // [ argument word n ] 101 // ... 102 // -12 [ argument word 1 ] 103 // -11 [ saved r15 ] <--- rsp_after_call 104 // -10 [ saved r14 ] 105 // -9 [ saved r13 ] 106 // -8 [ saved r12 ] 107 // -7 [ saved rbx ] 108 // -6 [ call wrapper ] 109 // -5 [ result ] 110 // -4 [ result type ] 111 // -3 [ method ] 112 // -2 [ entry point ] 113 // -1 [ parameters ] 114 // 0 [ saved rbp ] <--- rbp 115 // 1 [ return address ] 116 // 2 [ parameter size ] 117 // 3 [ thread ] 118 // 119 // Windows Arguments: 120 // c_rarg0: call wrapper address address 121 // c_rarg1: result address 122 // c_rarg2: result type BasicType 123 // c_rarg3: method Method* 124 // 48(rbp): (interpreter) entry point address 125 // 56(rbp): parameters intptr_t* 126 // 64(rbp): parameter size (in words) int 127 // 72(rbp): thread Thread* 128 // 129 // [ return_from_Java ] <--- rsp 130 // [ argument word n ] 131 // ... 132 // -60 [ argument word 1 ] 133 // -59 [ saved xmm31 ] <--- rsp after_call 134 // [ saved xmm16-xmm30 ] (EVEX enabled, else the space is blank) 135 // -27 [ saved xmm15 ] 136 // [ saved xmm7-xmm14 ] 137 // -9 [ saved xmm6 ] (each xmm register takes 2 slots) 138 // -7 [ saved r15 ] 139 // -6 [ saved r14 ] 140 // -5 [ saved r13 ] 141 // -4 [ saved r12 ] 142 // -3 [ saved rdi ] 143 // -2 [ saved rsi ] 144 // -1 [ saved rbx ] 145 // 0 [ saved rbp ] <--- rbp 146 // 1 [ return address ] 147 // 2 [ call wrapper ] 148 // 3 [ result ] 149 // 4 [ result type ] 150 // 5 [ method ] 151 // 6 [ entry point ] 152 // 7 [ parameters ] 153 // 8 [ parameter size ] 154 // 9 [ thread ] 155 // 156 // Windows reserves the callers stack space for arguments 1-4. 157 // We spill c_rarg0-c_rarg3 to this space. 158 159 // Call stub stack layout word offsets from rbp 160 enum call_stub_layout { 161 #ifdef _WIN64 162 xmm_save_first = 6, // save from xmm6 163 xmm_save_last = 31, // to xmm31 164 xmm_save_base = -9, 165 rsp_after_call_off = xmm_save_base - 2 * (xmm_save_last - xmm_save_first), // -27 166 r15_off = -7, 167 r14_off = -6, 168 r13_off = -5, 169 r12_off = -4, 170 rdi_off = -3, 171 rsi_off = -2, 172 rbx_off = -1, 173 rbp_off = 0, 174 retaddr_off = 1, 175 call_wrapper_off = 2, 176 result_off = 3, 177 result_type_off = 4, 178 method_off = 5, 179 entry_point_off = 6, 180 parameters_off = 7, 181 parameter_size_off = 8, 182 thread_off = 9 183 #else 184 rsp_after_call_off = -12, 185 mxcsr_off = rsp_after_call_off, 186 r15_off = -11, 187 r14_off = -10, 188 r13_off = -9, 189 r12_off = -8, 190 rbx_off = -7, 191 call_wrapper_off = -6, 192 result_off = -5, 193 result_type_off = -4, 194 method_off = -3, 195 entry_point_off = -2, 196 parameters_off = -1, 197 rbp_off = 0, 198 retaddr_off = 1, 199 parameter_size_off = 2, 200 thread_off = 3 201 #endif 202 }; 203 204 #ifdef _WIN64 205 Address xmm_save(int reg) { 206 assert(reg >= xmm_save_first && reg <= xmm_save_last, "XMM register number out of range"); 207 return Address(rbp, (xmm_save_base - (reg - xmm_save_first) * 2) * wordSize); 208 } 209 #endif 210 211 address generate_call_stub(address& return_address) { 212 assert((int)frame::entry_frame_after_call_words == -(int)rsp_after_call_off + 1 && 213 (int)frame::entry_frame_call_wrapper_offset == (int)call_wrapper_off, 214 "adjust this code"); 215 StubCodeMark mark(this, "StubRoutines", "call_stub"); 216 address start = __ pc(); 217 218 // same as in generate_catch_exception()! 219 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize); 220 221 const Address call_wrapper (rbp, call_wrapper_off * wordSize); 222 const Address result (rbp, result_off * wordSize); 223 const Address result_type (rbp, result_type_off * wordSize); 224 const Address method (rbp, method_off * wordSize); 225 const Address entry_point (rbp, entry_point_off * wordSize); 226 const Address parameters (rbp, parameters_off * wordSize); 227 const Address parameter_size(rbp, parameter_size_off * wordSize); 228 229 // same as in generate_catch_exception()! 230 const Address thread (rbp, thread_off * wordSize); 231 232 const Address r15_save(rbp, r15_off * wordSize); 233 const Address r14_save(rbp, r14_off * wordSize); 234 const Address r13_save(rbp, r13_off * wordSize); 235 const Address r12_save(rbp, r12_off * wordSize); 236 const Address rbx_save(rbp, rbx_off * wordSize); 237 238 // stub code 239 __ enter(); 240 __ subptr(rsp, -rsp_after_call_off * wordSize); 241 242 // save register parameters 243 #ifndef _WIN64 244 __ movptr(parameters, c_rarg5); // parameters 245 __ movptr(entry_point, c_rarg4); // entry_point 246 #endif 247 248 __ movptr(method, c_rarg3); // method 249 __ movl(result_type, c_rarg2); // result type 250 __ movptr(result, c_rarg1); // result 251 __ movptr(call_wrapper, c_rarg0); // call wrapper 252 253 // save regs belonging to calling function 254 __ movptr(rbx_save, rbx); 255 __ movptr(r12_save, r12); 256 __ movptr(r13_save, r13); 257 __ movptr(r14_save, r14); 258 __ movptr(r15_save, r15); 259 260 #ifdef _WIN64 261 int last_reg = 15; 262 if (UseAVX > 2) { 263 last_reg = 31; 264 } 265 if (VM_Version::supports_evex()) { 266 for (int i = xmm_save_first; i <= last_reg; i++) { 267 __ vextractf32x4(xmm_save(i), as_XMMRegister(i), 0); 268 } 269 } else { 270 for (int i = xmm_save_first; i <= last_reg; i++) { 271 __ movdqu(xmm_save(i), as_XMMRegister(i)); 272 } 273 } 274 275 const Address rdi_save(rbp, rdi_off * wordSize); 276 const Address rsi_save(rbp, rsi_off * wordSize); 277 278 __ movptr(rsi_save, rsi); 279 __ movptr(rdi_save, rdi); 280 #else 281 const Address mxcsr_save(rbp, mxcsr_off * wordSize); 282 { 283 Label skip_ldmx; 284 __ stmxcsr(mxcsr_save); 285 __ movl(rax, mxcsr_save); 286 __ andl(rax, MXCSR_MASK); // Only check control and mask bits 287 ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std()); 288 __ cmp32(rax, mxcsr_std); 289 __ jcc(Assembler::equal, skip_ldmx); 290 __ ldmxcsr(mxcsr_std); 291 __ bind(skip_ldmx); 292 } 293 #endif 294 295 // Load up thread register 296 __ movptr(r15_thread, thread); 297 __ reinit_heapbase(); 298 299 #ifdef ASSERT 300 // make sure we have no pending exceptions 301 { 302 Label L; 303 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 304 __ jcc(Assembler::equal, L); 305 __ stop("StubRoutines::call_stub: entered with pending exception"); 306 __ bind(L); 307 } 308 #endif 309 310 // pass parameters if any 311 BLOCK_COMMENT("pass parameters if any"); 312 Label parameters_done; 313 __ movl(c_rarg3, parameter_size); 314 __ testl(c_rarg3, c_rarg3); 315 __ jcc(Assembler::zero, parameters_done); 316 317 Label loop; 318 __ movptr(c_rarg2, parameters); // parameter pointer 319 __ movl(c_rarg1, c_rarg3); // parameter counter is in c_rarg1 320 __ BIND(loop); 321 __ movptr(rax, Address(c_rarg2, 0));// get parameter 322 __ addptr(c_rarg2, wordSize); // advance to next parameter 323 __ decrementl(c_rarg1); // decrement counter 324 __ push(rax); // pass parameter 325 __ jcc(Assembler::notZero, loop); 326 327 // call Java function 328 __ BIND(parameters_done); 329 __ movptr(rbx, method); // get Method* 330 __ movptr(c_rarg1, entry_point); // get entry_point 331 __ mov(r13, rsp); // set sender sp 332 BLOCK_COMMENT("call Java function"); 333 __ call(c_rarg1); 334 335 BLOCK_COMMENT("call_stub_return_address:"); 336 return_address = __ pc(); 337 338 // store result depending on type (everything that is not 339 // T_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT) 340 __ movptr(c_rarg0, result); 341 Label is_long, is_float, is_double, exit; 342 __ movl(c_rarg1, result_type); 343 __ cmpl(c_rarg1, T_OBJECT); 344 __ jcc(Assembler::equal, is_long); 345 __ cmpl(c_rarg1, T_LONG); 346 __ jcc(Assembler::equal, is_long); 347 __ cmpl(c_rarg1, T_FLOAT); 348 __ jcc(Assembler::equal, is_float); 349 __ cmpl(c_rarg1, T_DOUBLE); 350 __ jcc(Assembler::equal, is_double); 351 352 // handle T_INT case 353 __ movl(Address(c_rarg0, 0), rax); 354 355 __ BIND(exit); 356 357 // pop parameters 358 __ lea(rsp, rsp_after_call); 359 360 #ifdef ASSERT 361 // verify that threads correspond 362 { 363 Label L1, L2, L3; 364 __ cmpptr(r15_thread, thread); 365 __ jcc(Assembler::equal, L1); 366 __ stop("StubRoutines::call_stub: r15_thread is corrupted"); 367 __ bind(L1); 368 __ get_thread(rbx); 369 __ cmpptr(r15_thread, thread); 370 __ jcc(Assembler::equal, L2); 371 __ stop("StubRoutines::call_stub: r15_thread is modified by call"); 372 __ bind(L2); 373 __ cmpptr(r15_thread, rbx); 374 __ jcc(Assembler::equal, L3); 375 __ stop("StubRoutines::call_stub: threads must correspond"); 376 __ bind(L3); 377 } 378 #endif 379 380 // restore regs belonging to calling function 381 #ifdef _WIN64 382 // emit the restores for xmm regs 383 if (VM_Version::supports_evex()) { 384 for (int i = xmm_save_first; i <= last_reg; i++) { 385 __ vinsertf32x4(as_XMMRegister(i), as_XMMRegister(i), xmm_save(i), 0); 386 } 387 } else { 388 for (int i = xmm_save_first; i <= last_reg; i++) { 389 __ movdqu(as_XMMRegister(i), xmm_save(i)); 390 } 391 } 392 #endif 393 __ movptr(r15, r15_save); 394 __ movptr(r14, r14_save); 395 __ movptr(r13, r13_save); 396 __ movptr(r12, r12_save); 397 __ movptr(rbx, rbx_save); 398 399 #ifdef _WIN64 400 __ movptr(rdi, rdi_save); 401 __ movptr(rsi, rsi_save); 402 #else 403 __ ldmxcsr(mxcsr_save); 404 #endif 405 406 // restore rsp 407 __ addptr(rsp, -rsp_after_call_off * wordSize); 408 409 // return 410 __ vzeroupper(); 411 __ pop(rbp); 412 __ ret(0); 413 414 // handle return types different from T_INT 415 __ BIND(is_long); 416 __ movq(Address(c_rarg0, 0), rax); 417 __ jmp(exit); 418 419 __ BIND(is_float); 420 __ movflt(Address(c_rarg0, 0), xmm0); 421 __ jmp(exit); 422 423 __ BIND(is_double); 424 __ movdbl(Address(c_rarg0, 0), xmm0); 425 __ jmp(exit); 426 427 return start; 428 } 429 430 // Return point for a Java call if there's an exception thrown in 431 // Java code. The exception is caught and transformed into a 432 // pending exception stored in JavaThread that can be tested from 433 // within the VM. 434 // 435 // Note: Usually the parameters are removed by the callee. In case 436 // of an exception crossing an activation frame boundary, that is 437 // not the case if the callee is compiled code => need to setup the 438 // rsp. 439 // 440 // rax: exception oop 441 442 address generate_catch_exception() { 443 StubCodeMark mark(this, "StubRoutines", "catch_exception"); 444 address start = __ pc(); 445 446 // same as in generate_call_stub(): 447 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize); 448 const Address thread (rbp, thread_off * wordSize); 449 450 #ifdef ASSERT 451 // verify that threads correspond 452 { 453 Label L1, L2, L3; 454 __ cmpptr(r15_thread, thread); 455 __ jcc(Assembler::equal, L1); 456 __ stop("StubRoutines::catch_exception: r15_thread is corrupted"); 457 __ bind(L1); 458 __ get_thread(rbx); 459 __ cmpptr(r15_thread, thread); 460 __ jcc(Assembler::equal, L2); 461 __ stop("StubRoutines::catch_exception: r15_thread is modified by call"); 462 __ bind(L2); 463 __ cmpptr(r15_thread, rbx); 464 __ jcc(Assembler::equal, L3); 465 __ stop("StubRoutines::catch_exception: threads must correspond"); 466 __ bind(L3); 467 } 468 #endif 469 470 // set pending exception 471 __ verify_oop(rax); 472 473 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), rax); 474 __ lea(rscratch1, ExternalAddress((address)__FILE__)); 475 __ movptr(Address(r15_thread, Thread::exception_file_offset()), rscratch1); 476 __ movl(Address(r15_thread, Thread::exception_line_offset()), (int) __LINE__); 477 478 // complete return to VM 479 assert(StubRoutines::_call_stub_return_address != NULL, 480 "_call_stub_return_address must have been generated before"); 481 __ jump(RuntimeAddress(StubRoutines::_call_stub_return_address)); 482 483 return start; 484 } 485 486 // Continuation point for runtime calls returning with a pending 487 // exception. The pending exception check happened in the runtime 488 // or native call stub. The pending exception in Thread is 489 // converted into a Java-level exception. 490 // 491 // Contract with Java-level exception handlers: 492 // rax: exception 493 // rdx: throwing pc 494 // 495 // NOTE: At entry of this stub, exception-pc must be on stack !! 496 497 address generate_forward_exception() { 498 StubCodeMark mark(this, "StubRoutines", "forward exception"); 499 address start = __ pc(); 500 501 // Upon entry, the sp points to the return address returning into 502 // Java (interpreted or compiled) code; i.e., the return address 503 // becomes the throwing pc. 504 // 505 // Arguments pushed before the runtime call are still on the stack 506 // but the exception handler will reset the stack pointer -> 507 // ignore them. A potential result in registers can be ignored as 508 // well. 509 510 #ifdef ASSERT 511 // make sure this code is only executed if there is a pending exception 512 { 513 Label L; 514 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t) NULL); 515 __ jcc(Assembler::notEqual, L); 516 __ stop("StubRoutines::forward exception: no pending exception (1)"); 517 __ bind(L); 518 } 519 #endif 520 521 // compute exception handler into rbx 522 __ movptr(c_rarg0, Address(rsp, 0)); 523 BLOCK_COMMENT("call exception_handler_for_return_address"); 524 __ call_VM_leaf(CAST_FROM_FN_PTR(address, 525 SharedRuntime::exception_handler_for_return_address), 526 r15_thread, c_rarg0); 527 __ mov(rbx, rax); 528 529 // setup rax & rdx, remove return address & clear pending exception 530 __ pop(rdx); 531 __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset())); 532 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 533 534 #ifdef ASSERT 535 // make sure exception is set 536 { 537 Label L; 538 __ testptr(rax, rax); 539 __ jcc(Assembler::notEqual, L); 540 __ stop("StubRoutines::forward exception: no pending exception (2)"); 541 __ bind(L); 542 } 543 #endif 544 545 // continue at exception handler (return address removed) 546 // rax: exception 547 // rbx: exception handler 548 // rdx: throwing pc 549 __ verify_oop(rax); 550 __ jmp(rbx); 551 552 return start; 553 } 554 555 // Support for jint atomic::xchg(volatile jint* dest, jint exchange_value) 556 // 557 // Arguments : 558 // c_rarg0: exchange_value 559 // c_rarg0: dest 560 // 561 // Result: 562 // *dest <- ex, return (orig *dest) 563 address generate_atomic_xchg() { 564 StubCodeMark mark(this, "StubRoutines", "atomic_xchg"); 565 address start = __ pc(); 566 567 __ movl(rax, c_rarg0); // Copy to eax we need a return value anyhow 568 __ xchgl(rax, Address(c_rarg1, 0)); // automatic LOCK 569 __ ret(0); 570 571 return start; 572 } 573 574 // Support for intptr_t atomic::xchg_long(volatile jlong* dest, jlong exchange_value) 575 // 576 // Arguments : 577 // c_rarg0: exchange_value 578 // c_rarg1: dest 579 // 580 // Result: 581 // *dest <- ex, return (orig *dest) 582 address generate_atomic_xchg_long() { 583 StubCodeMark mark(this, "StubRoutines", "atomic_xchg_long"); 584 address start = __ pc(); 585 586 __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow 587 __ xchgptr(rax, Address(c_rarg1, 0)); // automatic LOCK 588 __ ret(0); 589 590 return start; 591 } 592 593 // Support for jint atomic::atomic_cmpxchg(jint exchange_value, volatile jint* dest, 594 // jint compare_value) 595 // 596 // Arguments : 597 // c_rarg0: exchange_value 598 // c_rarg1: dest 599 // c_rarg2: compare_value 600 // 601 // Result: 602 // if ( compare_value == *dest ) { 603 // *dest = exchange_value 604 // return compare_value; 605 // else 606 // return *dest; 607 address generate_atomic_cmpxchg() { 608 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg"); 609 address start = __ pc(); 610 611 __ movl(rax, c_rarg2); 612 __ lock(); 613 __ cmpxchgl(c_rarg0, Address(c_rarg1, 0)); 614 __ ret(0); 615 616 return start; 617 } 618 619 // Support for int8_t atomic::atomic_cmpxchg(int8_t exchange_value, volatile int8_t* dest, 620 // int8_t compare_value) 621 // 622 // Arguments : 623 // c_rarg0: exchange_value 624 // c_rarg1: dest 625 // c_rarg2: compare_value 626 // 627 // Result: 628 // if ( compare_value == *dest ) { 629 // *dest = exchange_value 630 // return compare_value; 631 // else 632 // return *dest; 633 address generate_atomic_cmpxchg_byte() { 634 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_byte"); 635 address start = __ pc(); 636 637 __ movsbq(rax, c_rarg2); 638 __ lock(); 639 __ cmpxchgb(c_rarg0, Address(c_rarg1, 0)); 640 __ ret(0); 641 642 return start; 643 } 644 645 // Support for int64_t atomic::atomic_cmpxchg(int64_t exchange_value, 646 // volatile int64_t* dest, 647 // int64_t compare_value) 648 // Arguments : 649 // c_rarg0: exchange_value 650 // c_rarg1: dest 651 // c_rarg2: compare_value 652 // 653 // Result: 654 // if ( compare_value == *dest ) { 655 // *dest = exchange_value 656 // return compare_value; 657 // else 658 // return *dest; 659 address generate_atomic_cmpxchg_long() { 660 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long"); 661 address start = __ pc(); 662 663 __ movq(rax, c_rarg2); 664 __ lock(); 665 __ cmpxchgq(c_rarg0, Address(c_rarg1, 0)); 666 __ ret(0); 667 668 return start; 669 } 670 671 // Support for jint atomic::add(jint add_value, volatile jint* dest) 672 // 673 // Arguments : 674 // c_rarg0: add_value 675 // c_rarg1: dest 676 // 677 // Result: 678 // *dest += add_value 679 // return *dest; 680 address generate_atomic_add() { 681 StubCodeMark mark(this, "StubRoutines", "atomic_add"); 682 address start = __ pc(); 683 684 __ movl(rax, c_rarg0); 685 __ lock(); 686 __ xaddl(Address(c_rarg1, 0), c_rarg0); 687 __ addl(rax, c_rarg0); 688 __ ret(0); 689 690 return start; 691 } 692 693 // Support for intptr_t atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) 694 // 695 // Arguments : 696 // c_rarg0: add_value 697 // c_rarg1: dest 698 // 699 // Result: 700 // *dest += add_value 701 // return *dest; 702 address generate_atomic_add_long() { 703 StubCodeMark mark(this, "StubRoutines", "atomic_add_long"); 704 address start = __ pc(); 705 706 __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow 707 __ lock(); 708 __ xaddptr(Address(c_rarg1, 0), c_rarg0); 709 __ addptr(rax, c_rarg0); 710 __ ret(0); 711 712 return start; 713 } 714 715 // Support for intptr_t OrderAccess::fence() 716 // 717 // Arguments : 718 // 719 // Result: 720 address generate_orderaccess_fence() { 721 StubCodeMark mark(this, "StubRoutines", "orderaccess_fence"); 722 address start = __ pc(); 723 __ membar(Assembler::StoreLoad); 724 __ ret(0); 725 726 return start; 727 } 728 729 // Support for intptr_t get_previous_fp() 730 // 731 // This routine is used to find the previous frame pointer for the 732 // caller (current_frame_guess). This is used as part of debugging 733 // ps() is seemingly lost trying to find frames. 734 // This code assumes that caller current_frame_guess) has a frame. 735 address generate_get_previous_fp() { 736 StubCodeMark mark(this, "StubRoutines", "get_previous_fp"); 737 const Address old_fp(rbp, 0); 738 const Address older_fp(rax, 0); 739 address start = __ pc(); 740 741 __ enter(); 742 __ movptr(rax, old_fp); // callers fp 743 __ movptr(rax, older_fp); // the frame for ps() 744 __ pop(rbp); 745 __ ret(0); 746 747 return start; 748 } 749 750 // Support for intptr_t get_previous_sp() 751 // 752 // This routine is used to find the previous stack pointer for the 753 // caller. 754 address generate_get_previous_sp() { 755 StubCodeMark mark(this, "StubRoutines", "get_previous_sp"); 756 address start = __ pc(); 757 758 __ movptr(rax, rsp); 759 __ addptr(rax, 8); // return address is at the top of the stack. 760 __ ret(0); 761 762 return start; 763 } 764 765 //---------------------------------------------------------------------------------------------------- 766 // Support for void verify_mxcsr() 767 // 768 // This routine is used with -Xcheck:jni to verify that native 769 // JNI code does not return to Java code without restoring the 770 // MXCSR register to our expected state. 771 772 address generate_verify_mxcsr() { 773 StubCodeMark mark(this, "StubRoutines", "verify_mxcsr"); 774 address start = __ pc(); 775 776 const Address mxcsr_save(rsp, 0); 777 778 if (CheckJNICalls) { 779 Label ok_ret; 780 ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std()); 781 __ push(rax); 782 __ subptr(rsp, wordSize); // allocate a temp location 783 __ stmxcsr(mxcsr_save); 784 __ movl(rax, mxcsr_save); 785 __ andl(rax, MXCSR_MASK); // Only check control and mask bits 786 __ cmp32(rax, mxcsr_std); 787 __ jcc(Assembler::equal, ok_ret); 788 789 __ warn("MXCSR changed by native JNI code, use -XX:+RestoreMXCSROnJNICall"); 790 791 __ ldmxcsr(mxcsr_std); 792 793 __ bind(ok_ret); 794 __ addptr(rsp, wordSize); 795 __ pop(rax); 796 } 797 798 __ ret(0); 799 800 return start; 801 } 802 803 address generate_f2i_fixup() { 804 StubCodeMark mark(this, "StubRoutines", "f2i_fixup"); 805 Address inout(rsp, 5 * wordSize); // return address + 4 saves 806 807 address start = __ pc(); 808 809 Label L; 810 811 __ push(rax); 812 __ push(c_rarg3); 813 __ push(c_rarg2); 814 __ push(c_rarg1); 815 816 __ movl(rax, 0x7f800000); 817 __ xorl(c_rarg3, c_rarg3); 818 __ movl(c_rarg2, inout); 819 __ movl(c_rarg1, c_rarg2); 820 __ andl(c_rarg1, 0x7fffffff); 821 __ cmpl(rax, c_rarg1); // NaN? -> 0 822 __ jcc(Assembler::negative, L); 823 __ testl(c_rarg2, c_rarg2); // signed ? min_jint : max_jint 824 __ movl(c_rarg3, 0x80000000); 825 __ movl(rax, 0x7fffffff); 826 __ cmovl(Assembler::positive, c_rarg3, rax); 827 828 __ bind(L); 829 __ movptr(inout, c_rarg3); 830 831 __ pop(c_rarg1); 832 __ pop(c_rarg2); 833 __ pop(c_rarg3); 834 __ pop(rax); 835 836 __ ret(0); 837 838 return start; 839 } 840 841 address generate_f2l_fixup() { 842 StubCodeMark mark(this, "StubRoutines", "f2l_fixup"); 843 Address inout(rsp, 5 * wordSize); // return address + 4 saves 844 address start = __ pc(); 845 846 Label L; 847 848 __ push(rax); 849 __ push(c_rarg3); 850 __ push(c_rarg2); 851 __ push(c_rarg1); 852 853 __ movl(rax, 0x7f800000); 854 __ xorl(c_rarg3, c_rarg3); 855 __ movl(c_rarg2, inout); 856 __ movl(c_rarg1, c_rarg2); 857 __ andl(c_rarg1, 0x7fffffff); 858 __ cmpl(rax, c_rarg1); // NaN? -> 0 859 __ jcc(Assembler::negative, L); 860 __ testl(c_rarg2, c_rarg2); // signed ? min_jlong : max_jlong 861 __ mov64(c_rarg3, 0x8000000000000000); 862 __ mov64(rax, 0x7fffffffffffffff); 863 __ cmov(Assembler::positive, c_rarg3, rax); 864 865 __ bind(L); 866 __ movptr(inout, c_rarg3); 867 868 __ pop(c_rarg1); 869 __ pop(c_rarg2); 870 __ pop(c_rarg3); 871 __ pop(rax); 872 873 __ ret(0); 874 875 return start; 876 } 877 878 address generate_d2i_fixup() { 879 StubCodeMark mark(this, "StubRoutines", "d2i_fixup"); 880 Address inout(rsp, 6 * wordSize); // return address + 5 saves 881 882 address start = __ pc(); 883 884 Label L; 885 886 __ push(rax); 887 __ push(c_rarg3); 888 __ push(c_rarg2); 889 __ push(c_rarg1); 890 __ push(c_rarg0); 891 892 __ movl(rax, 0x7ff00000); 893 __ movq(c_rarg2, inout); 894 __ movl(c_rarg3, c_rarg2); 895 __ mov(c_rarg1, c_rarg2); 896 __ mov(c_rarg0, c_rarg2); 897 __ negl(c_rarg3); 898 __ shrptr(c_rarg1, 0x20); 899 __ orl(c_rarg3, c_rarg2); 900 __ andl(c_rarg1, 0x7fffffff); 901 __ xorl(c_rarg2, c_rarg2); 902 __ shrl(c_rarg3, 0x1f); 903 __ orl(c_rarg1, c_rarg3); 904 __ cmpl(rax, c_rarg1); 905 __ jcc(Assembler::negative, L); // NaN -> 0 906 __ testptr(c_rarg0, c_rarg0); // signed ? min_jint : max_jint 907 __ movl(c_rarg2, 0x80000000); 908 __ movl(rax, 0x7fffffff); 909 __ cmov(Assembler::positive, c_rarg2, rax); 910 911 __ bind(L); 912 __ movptr(inout, c_rarg2); 913 914 __ pop(c_rarg0); 915 __ pop(c_rarg1); 916 __ pop(c_rarg2); 917 __ pop(c_rarg3); 918 __ pop(rax); 919 920 __ ret(0); 921 922 return start; 923 } 924 925 address generate_d2l_fixup() { 926 StubCodeMark mark(this, "StubRoutines", "d2l_fixup"); 927 Address inout(rsp, 6 * wordSize); // return address + 5 saves 928 929 address start = __ pc(); 930 931 Label L; 932 933 __ push(rax); 934 __ push(c_rarg3); 935 __ push(c_rarg2); 936 __ push(c_rarg1); 937 __ push(c_rarg0); 938 939 __ movl(rax, 0x7ff00000); 940 __ movq(c_rarg2, inout); 941 __ movl(c_rarg3, c_rarg2); 942 __ mov(c_rarg1, c_rarg2); 943 __ mov(c_rarg0, c_rarg2); 944 __ negl(c_rarg3); 945 __ shrptr(c_rarg1, 0x20); 946 __ orl(c_rarg3, c_rarg2); 947 __ andl(c_rarg1, 0x7fffffff); 948 __ xorl(c_rarg2, c_rarg2); 949 __ shrl(c_rarg3, 0x1f); 950 __ orl(c_rarg1, c_rarg3); 951 __ cmpl(rax, c_rarg1); 952 __ jcc(Assembler::negative, L); // NaN -> 0 953 __ testq(c_rarg0, c_rarg0); // signed ? min_jlong : max_jlong 954 __ mov64(c_rarg2, 0x8000000000000000); 955 __ mov64(rax, 0x7fffffffffffffff); 956 __ cmovq(Assembler::positive, c_rarg2, rax); 957 958 __ bind(L); 959 __ movq(inout, c_rarg2); 960 961 __ pop(c_rarg0); 962 __ pop(c_rarg1); 963 __ pop(c_rarg2); 964 __ pop(c_rarg3); 965 __ pop(rax); 966 967 __ ret(0); 968 969 return start; 970 } 971 972 address generate_fp_mask(const char *stub_name, int64_t mask) { 973 __ align(CodeEntryAlignment); 974 StubCodeMark mark(this, "StubRoutines", stub_name); 975 address start = __ pc(); 976 977 __ emit_data64( mask, relocInfo::none ); 978 __ emit_data64( mask, relocInfo::none ); 979 980 return start; 981 } 982 983 address generate_vector_mask(const char *stub_name, int64_t mask) { 984 __ align(CodeEntryAlignment); 985 StubCodeMark mark(this, "StubRoutines", stub_name); 986 address start = __ pc(); 987 988 __ emit_data64(mask, relocInfo::none); 989 __ emit_data64(mask, relocInfo::none); 990 __ emit_data64(mask, relocInfo::none); 991 __ emit_data64(mask, relocInfo::none); 992 __ emit_data64(mask, relocInfo::none); 993 __ emit_data64(mask, relocInfo::none); 994 __ emit_data64(mask, relocInfo::none); 995 __ emit_data64(mask, relocInfo::none); 996 997 return start; 998 } 999 1000 address generate_vector_byte_perm_mask(const char *stub_name) { 1001 __ align(CodeEntryAlignment); 1002 StubCodeMark mark(this, "StubRoutines", stub_name); 1003 address start = __ pc(); 1004 1005 __ emit_data64(0x0000000000000001, relocInfo::none); 1006 __ emit_data64(0x0000000000000003, relocInfo::none); 1007 __ emit_data64(0x0000000000000005, relocInfo::none); 1008 __ emit_data64(0x0000000000000007, relocInfo::none); 1009 __ emit_data64(0x0000000000000000, relocInfo::none); 1010 __ emit_data64(0x0000000000000002, relocInfo::none); 1011 __ emit_data64(0x0000000000000004, relocInfo::none); 1012 __ emit_data64(0x0000000000000006, relocInfo::none); 1013 1014 return start; 1015 } 1016 1017 // Non-destructive plausibility checks for oops 1018 // 1019 // Arguments: 1020 // all args on stack! 1021 // 1022 // Stack after saving c_rarg3: 1023 // [tos + 0]: saved c_rarg3 1024 // [tos + 1]: saved c_rarg2 1025 // [tos + 2]: saved r12 (several TemplateTable methods use it) 1026 // [tos + 3]: saved flags 1027 // [tos + 4]: return address 1028 // * [tos + 5]: error message (char*) 1029 // * [tos + 6]: object to verify (oop) 1030 // * [tos + 7]: saved rax - saved by caller and bashed 1031 // * [tos + 8]: saved r10 (rscratch1) - saved by caller 1032 // * = popped on exit 1033 address generate_verify_oop() { 1034 StubCodeMark mark(this, "StubRoutines", "verify_oop"); 1035 address start = __ pc(); 1036 1037 Label exit, error; 1038 1039 __ pushf(); 1040 __ incrementl(ExternalAddress((address) StubRoutines::verify_oop_count_addr())); 1041 1042 __ push(r12); 1043 1044 // save c_rarg2 and c_rarg3 1045 __ push(c_rarg2); 1046 __ push(c_rarg3); 1047 1048 enum { 1049 // After previous pushes. 1050 oop_to_verify = 6 * wordSize, 1051 saved_rax = 7 * wordSize, 1052 saved_r10 = 8 * wordSize, 1053 1054 // Before the call to MacroAssembler::debug(), see below. 1055 return_addr = 16 * wordSize, 1056 error_msg = 17 * wordSize 1057 }; 1058 1059 // get object 1060 __ movptr(rax, Address(rsp, oop_to_verify)); 1061 1062 // make sure object is 'reasonable' 1063 __ testptr(rax, rax); 1064 __ jcc(Assembler::zero, exit); // if obj is NULL it is OK 1065 1066 #if INCLUDE_ZGC 1067 if (UseZGC) { 1068 // Check if metadata bits indicate a bad oop 1069 __ testptr(rax, Address(r15_thread, ZThreadLocalData::address_bad_mask_offset())); 1070 __ jcc(Assembler::notZero, error); 1071 } 1072 #endif 1073 1074 // Check if the oop is in the right area of memory 1075 __ movptr(c_rarg2, rax); 1076 __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_mask()); 1077 __ andptr(c_rarg2, c_rarg3); 1078 __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_bits()); 1079 __ cmpptr(c_rarg2, c_rarg3); 1080 __ jcc(Assembler::notZero, error); 1081 1082 // set r12 to heapbase for load_klass() 1083 __ reinit_heapbase(); 1084 1085 // make sure klass is 'reasonable', which is not zero. 1086 __ load_klass(rax, rax); // get klass 1087 __ testptr(rax, rax); 1088 __ jcc(Assembler::zero, error); // if klass is NULL it is broken 1089 1090 // return if everything seems ok 1091 __ bind(exit); 1092 __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back 1093 __ movptr(rscratch1, Address(rsp, saved_r10)); // get saved r10 back 1094 __ pop(c_rarg3); // restore c_rarg3 1095 __ pop(c_rarg2); // restore c_rarg2 1096 __ pop(r12); // restore r12 1097 __ popf(); // restore flags 1098 __ ret(4 * wordSize); // pop caller saved stuff 1099 1100 // handle errors 1101 __ bind(error); 1102 __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back 1103 __ movptr(rscratch1, Address(rsp, saved_r10)); // get saved r10 back 1104 __ pop(c_rarg3); // get saved c_rarg3 back 1105 __ pop(c_rarg2); // get saved c_rarg2 back 1106 __ pop(r12); // get saved r12 back 1107 __ popf(); // get saved flags off stack -- 1108 // will be ignored 1109 1110 __ pusha(); // push registers 1111 // (rip is already 1112 // already pushed) 1113 // debug(char* msg, int64_t pc, int64_t regs[]) 1114 // We've popped the registers we'd saved (c_rarg3, c_rarg2 and flags), and 1115 // pushed all the registers, so now the stack looks like: 1116 // [tos + 0] 16 saved registers 1117 // [tos + 16] return address 1118 // * [tos + 17] error message (char*) 1119 // * [tos + 18] object to verify (oop) 1120 // * [tos + 19] saved rax - saved by caller and bashed 1121 // * [tos + 20] saved r10 (rscratch1) - saved by caller 1122 // * = popped on exit 1123 1124 __ movptr(c_rarg0, Address(rsp, error_msg)); // pass address of error message 1125 __ movptr(c_rarg1, Address(rsp, return_addr)); // pass return address 1126 __ movq(c_rarg2, rsp); // pass address of regs on stack 1127 __ mov(r12, rsp); // remember rsp 1128 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1129 __ andptr(rsp, -16); // align stack as required by ABI 1130 BLOCK_COMMENT("call MacroAssembler::debug"); 1131 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64))); 1132 __ hlt(); 1133 return start; 1134 } 1135 1136 // 1137 // Verify that a register contains clean 32-bits positive value 1138 // (high 32-bits are 0) so it could be used in 64-bits shifts. 1139 // 1140 // Input: 1141 // Rint - 32-bits value 1142 // Rtmp - scratch 1143 // 1144 void assert_clean_int(Register Rint, Register Rtmp) { 1145 #ifdef ASSERT 1146 Label L; 1147 assert_different_registers(Rtmp, Rint); 1148 __ movslq(Rtmp, Rint); 1149 __ cmpq(Rtmp, Rint); 1150 __ jcc(Assembler::equal, L); 1151 __ stop("high 32-bits of int value are not 0"); 1152 __ bind(L); 1153 #endif 1154 } 1155 1156 // Generate overlap test for array copy stubs 1157 // 1158 // Input: 1159 // c_rarg0 - from 1160 // c_rarg1 - to 1161 // c_rarg2 - element count 1162 // 1163 // Output: 1164 // rax - &from[element count - 1] 1165 // 1166 void array_overlap_test(address no_overlap_target, Address::ScaleFactor sf) { 1167 assert(no_overlap_target != NULL, "must be generated"); 1168 array_overlap_test(no_overlap_target, NULL, sf); 1169 } 1170 void array_overlap_test(Label& L_no_overlap, Address::ScaleFactor sf) { 1171 array_overlap_test(NULL, &L_no_overlap, sf); 1172 } 1173 void array_overlap_test(address no_overlap_target, Label* NOLp, Address::ScaleFactor sf) { 1174 const Register from = c_rarg0; 1175 const Register to = c_rarg1; 1176 const Register count = c_rarg2; 1177 const Register end_from = rax; 1178 1179 __ cmpptr(to, from); 1180 __ lea(end_from, Address(from, count, sf, 0)); 1181 if (NOLp == NULL) { 1182 ExternalAddress no_overlap(no_overlap_target); 1183 __ jump_cc(Assembler::belowEqual, no_overlap); 1184 __ cmpptr(to, end_from); 1185 __ jump_cc(Assembler::aboveEqual, no_overlap); 1186 } else { 1187 __ jcc(Assembler::belowEqual, (*NOLp)); 1188 __ cmpptr(to, end_from); 1189 __ jcc(Assembler::aboveEqual, (*NOLp)); 1190 } 1191 } 1192 1193 // Shuffle first three arg regs on Windows into Linux/Solaris locations. 1194 // 1195 // Outputs: 1196 // rdi - rcx 1197 // rsi - rdx 1198 // rdx - r8 1199 // rcx - r9 1200 // 1201 // Registers r9 and r10 are used to save rdi and rsi on Windows, which latter 1202 // are non-volatile. r9 and r10 should not be used by the caller. 1203 // 1204 DEBUG_ONLY(bool regs_in_thread;) 1205 1206 void setup_arg_regs(int nargs = 3) { 1207 const Register saved_rdi = r9; 1208 const Register saved_rsi = r10; 1209 assert(nargs == 3 || nargs == 4, "else fix"); 1210 #ifdef _WIN64 1211 assert(c_rarg0 == rcx && c_rarg1 == rdx && c_rarg2 == r8 && c_rarg3 == r9, 1212 "unexpected argument registers"); 1213 if (nargs >= 4) 1214 __ mov(rax, r9); // r9 is also saved_rdi 1215 __ movptr(saved_rdi, rdi); 1216 __ movptr(saved_rsi, rsi); 1217 __ mov(rdi, rcx); // c_rarg0 1218 __ mov(rsi, rdx); // c_rarg1 1219 __ mov(rdx, r8); // c_rarg2 1220 if (nargs >= 4) 1221 __ mov(rcx, rax); // c_rarg3 (via rax) 1222 #else 1223 assert(c_rarg0 == rdi && c_rarg1 == rsi && c_rarg2 == rdx && c_rarg3 == rcx, 1224 "unexpected argument registers"); 1225 #endif 1226 DEBUG_ONLY(regs_in_thread = false;) 1227 } 1228 1229 void restore_arg_regs() { 1230 assert(!regs_in_thread, "wrong call to restore_arg_regs"); 1231 const Register saved_rdi = r9; 1232 const Register saved_rsi = r10; 1233 #ifdef _WIN64 1234 __ movptr(rdi, saved_rdi); 1235 __ movptr(rsi, saved_rsi); 1236 #endif 1237 } 1238 1239 // This is used in places where r10 is a scratch register, and can 1240 // be adapted if r9 is needed also. 1241 void setup_arg_regs_using_thread() { 1242 const Register saved_r15 = r9; 1243 #ifdef _WIN64 1244 __ mov(saved_r15, r15); // r15 is callee saved and needs to be restored 1245 __ get_thread(r15_thread); 1246 assert(c_rarg0 == rcx && c_rarg1 == rdx && c_rarg2 == r8 && c_rarg3 == r9, 1247 "unexpected argument registers"); 1248 __ movptr(Address(r15_thread, in_bytes(JavaThread::windows_saved_rdi_offset())), rdi); 1249 __ movptr(Address(r15_thread, in_bytes(JavaThread::windows_saved_rsi_offset())), rsi); 1250 1251 __ mov(rdi, rcx); // c_rarg0 1252 __ mov(rsi, rdx); // c_rarg1 1253 __ mov(rdx, r8); // c_rarg2 1254 #else 1255 assert(c_rarg0 == rdi && c_rarg1 == rsi && c_rarg2 == rdx && c_rarg3 == rcx, 1256 "unexpected argument registers"); 1257 #endif 1258 DEBUG_ONLY(regs_in_thread = true;) 1259 } 1260 1261 void restore_arg_regs_using_thread() { 1262 assert(regs_in_thread, "wrong call to restore_arg_regs"); 1263 const Register saved_r15 = r9; 1264 #ifdef _WIN64 1265 __ get_thread(r15_thread); 1266 __ movptr(rsi, Address(r15_thread, in_bytes(JavaThread::windows_saved_rsi_offset()))); 1267 __ movptr(rdi, Address(r15_thread, in_bytes(JavaThread::windows_saved_rdi_offset()))); 1268 __ mov(r15, saved_r15); // r15 is callee saved and needs to be restored 1269 #endif 1270 } 1271 1272 // Copy big chunks forward 1273 // 1274 // Inputs: 1275 // end_from - source arrays end address 1276 // end_to - destination array end address 1277 // qword_count - 64-bits element count, negative 1278 // to - scratch 1279 // L_copy_bytes - entry label 1280 // L_copy_8_bytes - exit label 1281 // 1282 void copy_bytes_forward(Register end_from, Register end_to, 1283 Register qword_count, Register to, 1284 Label& L_copy_bytes, Label& L_copy_8_bytes) { 1285 DEBUG_ONLY(__ stop("enter at entry label, not here")); 1286 Label L_loop; 1287 __ align(OptoLoopAlignment); 1288 if (UseUnalignedLoadStores) { 1289 Label L_end; 1290 // Copy 64-bytes per iteration 1291 if (UseAVX > 2) { 1292 Label L_loop_avx512, L_loop_avx2, L_32_byte_head, L_above_threshold, L_below_threshold; 1293 1294 __ BIND(L_copy_bytes); 1295 __ cmpptr(qword_count, (-1 * AVX3Threshold / 8)); 1296 __ jccb(Assembler::less, L_above_threshold); 1297 __ jmpb(L_below_threshold); 1298 1299 __ bind(L_loop_avx512); 1300 __ evmovdqul(xmm0, Address(end_from, qword_count, Address::times_8, -56), Assembler::AVX_512bit); 1301 __ evmovdqul(Address(end_to, qword_count, Address::times_8, -56), xmm0, Assembler::AVX_512bit); 1302 __ bind(L_above_threshold); 1303 __ addptr(qword_count, 8); 1304 __ jcc(Assembler::lessEqual, L_loop_avx512); 1305 __ jmpb(L_32_byte_head); 1306 1307 __ bind(L_loop_avx2); 1308 __ vmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56)); 1309 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0); 1310 __ vmovdqu(xmm1, Address(end_from, qword_count, Address::times_8, -24)); 1311 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -24), xmm1); 1312 __ bind(L_below_threshold); 1313 __ addptr(qword_count, 8); 1314 __ jcc(Assembler::lessEqual, L_loop_avx2); 1315 1316 __ bind(L_32_byte_head); 1317 __ subptr(qword_count, 4); // sub(8) and add(4) 1318 __ jccb(Assembler::greater, L_end); 1319 } else { 1320 __ BIND(L_loop); 1321 if (UseAVX == 2) { 1322 __ vmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56)); 1323 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0); 1324 __ vmovdqu(xmm1, Address(end_from, qword_count, Address::times_8, -24)); 1325 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -24), xmm1); 1326 } else { 1327 __ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56)); 1328 __ movdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0); 1329 __ movdqu(xmm1, Address(end_from, qword_count, Address::times_8, -40)); 1330 __ movdqu(Address(end_to, qword_count, Address::times_8, -40), xmm1); 1331 __ movdqu(xmm2, Address(end_from, qword_count, Address::times_8, -24)); 1332 __ movdqu(Address(end_to, qword_count, Address::times_8, -24), xmm2); 1333 __ movdqu(xmm3, Address(end_from, qword_count, Address::times_8, - 8)); 1334 __ movdqu(Address(end_to, qword_count, Address::times_8, - 8), xmm3); 1335 } 1336 1337 __ BIND(L_copy_bytes); 1338 __ addptr(qword_count, 8); 1339 __ jcc(Assembler::lessEqual, L_loop); 1340 __ subptr(qword_count, 4); // sub(8) and add(4) 1341 __ jccb(Assembler::greater, L_end); 1342 } 1343 // Copy trailing 32 bytes 1344 if (UseAVX >= 2) { 1345 __ vmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24)); 1346 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -24), xmm0); 1347 } else { 1348 __ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24)); 1349 __ movdqu(Address(end_to, qword_count, Address::times_8, -24), xmm0); 1350 __ movdqu(xmm1, Address(end_from, qword_count, Address::times_8, - 8)); 1351 __ movdqu(Address(end_to, qword_count, Address::times_8, - 8), xmm1); 1352 } 1353 __ addptr(qword_count, 4); 1354 __ BIND(L_end); 1355 if (UseAVX >= 2) { 1356 // clean upper bits of YMM registers 1357 __ vpxor(xmm0, xmm0); 1358 __ vpxor(xmm1, xmm1); 1359 } 1360 } else { 1361 // Copy 32-bytes per iteration 1362 __ BIND(L_loop); 1363 __ movq(to, Address(end_from, qword_count, Address::times_8, -24)); 1364 __ movq(Address(end_to, qword_count, Address::times_8, -24), to); 1365 __ movq(to, Address(end_from, qword_count, Address::times_8, -16)); 1366 __ movq(Address(end_to, qword_count, Address::times_8, -16), to); 1367 __ movq(to, Address(end_from, qword_count, Address::times_8, - 8)); 1368 __ movq(Address(end_to, qword_count, Address::times_8, - 8), to); 1369 __ movq(to, Address(end_from, qword_count, Address::times_8, - 0)); 1370 __ movq(Address(end_to, qword_count, Address::times_8, - 0), to); 1371 1372 __ BIND(L_copy_bytes); 1373 __ addptr(qword_count, 4); 1374 __ jcc(Assembler::lessEqual, L_loop); 1375 } 1376 __ subptr(qword_count, 4); 1377 __ jcc(Assembler::less, L_copy_8_bytes); // Copy trailing qwords 1378 } 1379 1380 // Copy big chunks backward 1381 // 1382 // Inputs: 1383 // from - source arrays address 1384 // dest - destination array address 1385 // qword_count - 64-bits element count 1386 // to - scratch 1387 // L_copy_bytes - entry label 1388 // L_copy_8_bytes - exit label 1389 // 1390 void copy_bytes_backward(Register from, Register dest, 1391 Register qword_count, Register to, 1392 Label& L_copy_bytes, Label& L_copy_8_bytes) { 1393 DEBUG_ONLY(__ stop("enter at entry label, not here")); 1394 Label L_loop; 1395 __ align(OptoLoopAlignment); 1396 if (UseUnalignedLoadStores) { 1397 Label L_end; 1398 // Copy 64-bytes per iteration 1399 if (UseAVX > 2) { 1400 Label L_loop_avx512, L_loop_avx2, L_32_byte_head, L_above_threshold, L_below_threshold; 1401 1402 __ BIND(L_copy_bytes); 1403 __ cmpptr(qword_count, (AVX3Threshold / 8)); 1404 __ jccb(Assembler::greater, L_above_threshold); 1405 __ jmpb(L_below_threshold); 1406 1407 __ BIND(L_loop_avx512); 1408 __ evmovdqul(xmm0, Address(from, qword_count, Address::times_8, 0), Assembler::AVX_512bit); 1409 __ evmovdqul(Address(dest, qword_count, Address::times_8, 0), xmm0, Assembler::AVX_512bit); 1410 __ bind(L_above_threshold); 1411 __ subptr(qword_count, 8); 1412 __ jcc(Assembler::greaterEqual, L_loop_avx512); 1413 __ jmpb(L_32_byte_head); 1414 1415 __ bind(L_loop_avx2); 1416 __ vmovdqu(xmm0, Address(from, qword_count, Address::times_8, 32)); 1417 __ vmovdqu(Address(dest, qword_count, Address::times_8, 32), xmm0); 1418 __ vmovdqu(xmm1, Address(from, qword_count, Address::times_8, 0)); 1419 __ vmovdqu(Address(dest, qword_count, Address::times_8, 0), xmm1); 1420 __ bind(L_below_threshold); 1421 __ subptr(qword_count, 8); 1422 __ jcc(Assembler::greaterEqual, L_loop_avx2); 1423 1424 __ bind(L_32_byte_head); 1425 __ addptr(qword_count, 4); // add(8) and sub(4) 1426 __ jccb(Assembler::less, L_end); 1427 } else { 1428 __ BIND(L_loop); 1429 if (UseAVX == 2) { 1430 __ vmovdqu(xmm0, Address(from, qword_count, Address::times_8, 32)); 1431 __ vmovdqu(Address(dest, qword_count, Address::times_8, 32), xmm0); 1432 __ vmovdqu(xmm1, Address(from, qword_count, Address::times_8, 0)); 1433 __ vmovdqu(Address(dest, qword_count, Address::times_8, 0), xmm1); 1434 } else { 1435 __ movdqu(xmm0, Address(from, qword_count, Address::times_8, 48)); 1436 __ movdqu(Address(dest, qword_count, Address::times_8, 48), xmm0); 1437 __ movdqu(xmm1, Address(from, qword_count, Address::times_8, 32)); 1438 __ movdqu(Address(dest, qword_count, Address::times_8, 32), xmm1); 1439 __ movdqu(xmm2, Address(from, qword_count, Address::times_8, 16)); 1440 __ movdqu(Address(dest, qword_count, Address::times_8, 16), xmm2); 1441 __ movdqu(xmm3, Address(from, qword_count, Address::times_8, 0)); 1442 __ movdqu(Address(dest, qword_count, Address::times_8, 0), xmm3); 1443 } 1444 1445 __ BIND(L_copy_bytes); 1446 __ subptr(qword_count, 8); 1447 __ jcc(Assembler::greaterEqual, L_loop); 1448 1449 __ addptr(qword_count, 4); // add(8) and sub(4) 1450 __ jccb(Assembler::less, L_end); 1451 } 1452 // Copy trailing 32 bytes 1453 if (UseAVX >= 2) { 1454 __ vmovdqu(xmm0, Address(from, qword_count, Address::times_8, 0)); 1455 __ vmovdqu(Address(dest, qword_count, Address::times_8, 0), xmm0); 1456 } else { 1457 __ movdqu(xmm0, Address(from, qword_count, Address::times_8, 16)); 1458 __ movdqu(Address(dest, qword_count, Address::times_8, 16), xmm0); 1459 __ movdqu(xmm1, Address(from, qword_count, Address::times_8, 0)); 1460 __ movdqu(Address(dest, qword_count, Address::times_8, 0), xmm1); 1461 } 1462 __ subptr(qword_count, 4); 1463 __ BIND(L_end); 1464 if (UseAVX >= 2) { 1465 // clean upper bits of YMM registers 1466 __ vpxor(xmm0, xmm0); 1467 __ vpxor(xmm1, xmm1); 1468 } 1469 } else { 1470 // Copy 32-bytes per iteration 1471 __ BIND(L_loop); 1472 __ movq(to, Address(from, qword_count, Address::times_8, 24)); 1473 __ movq(Address(dest, qword_count, Address::times_8, 24), to); 1474 __ movq(to, Address(from, qword_count, Address::times_8, 16)); 1475 __ movq(Address(dest, qword_count, Address::times_8, 16), to); 1476 __ movq(to, Address(from, qword_count, Address::times_8, 8)); 1477 __ movq(Address(dest, qword_count, Address::times_8, 8), to); 1478 __ movq(to, Address(from, qword_count, Address::times_8, 0)); 1479 __ movq(Address(dest, qword_count, Address::times_8, 0), to); 1480 1481 __ BIND(L_copy_bytes); 1482 __ subptr(qword_count, 4); 1483 __ jcc(Assembler::greaterEqual, L_loop); 1484 } 1485 __ addptr(qword_count, 4); 1486 __ jcc(Assembler::greater, L_copy_8_bytes); // Copy trailing qwords 1487 } 1488 1489 // Arguments: 1490 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1491 // ignored 1492 // name - stub name string 1493 // 1494 // Inputs: 1495 // c_rarg0 - source array address 1496 // c_rarg1 - destination array address 1497 // c_rarg2 - element count, treated as ssize_t, can be zero 1498 // 1499 // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries, 1500 // we let the hardware handle it. The one to eight bytes within words, 1501 // dwords or qwords that span cache line boundaries will still be loaded 1502 // and stored atomically. 1503 // 1504 // Side Effects: 1505 // disjoint_byte_copy_entry is set to the no-overlap entry point 1506 // used by generate_conjoint_byte_copy(). 1507 // 1508 address generate_disjoint_byte_copy(bool aligned, address* entry, const char *name) { 1509 __ align(CodeEntryAlignment); 1510 StubCodeMark mark(this, "StubRoutines", name); 1511 address start = __ pc(); 1512 1513 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes; 1514 Label L_copy_byte, L_exit; 1515 const Register from = rdi; // source array address 1516 const Register to = rsi; // destination array address 1517 const Register count = rdx; // elements count 1518 const Register byte_count = rcx; 1519 const Register qword_count = count; 1520 const Register end_from = from; // source array end address 1521 const Register end_to = to; // destination array end address 1522 // End pointers are inclusive, and if count is not zero they point 1523 // to the last unit copied: end_to[0] := end_from[0] 1524 1525 __ enter(); // required for proper stackwalking of RuntimeStub frame 1526 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1527 1528 if (entry != NULL) { 1529 *entry = __ pc(); 1530 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1531 BLOCK_COMMENT("Entry:"); 1532 } 1533 1534 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1535 // r9 and r10 may be used to save non-volatile registers 1536 1537 { 1538 // UnsafeCopyMemory page error: continue after ucm 1539 UnsafeCopyMemoryMark ucmm(this, !aligned, true); 1540 // 'from', 'to' and 'count' are now valid 1541 __ movptr(byte_count, count); 1542 __ shrptr(count, 3); // count => qword_count 1543 1544 // Copy from low to high addresses. Use 'to' as scratch. 1545 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1546 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1547 __ negptr(qword_count); // make the count negative 1548 __ jmp(L_copy_bytes); 1549 1550 // Copy trailing qwords 1551 __ BIND(L_copy_8_bytes); 1552 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1553 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1554 __ increment(qword_count); 1555 __ jcc(Assembler::notZero, L_copy_8_bytes); 1556 1557 // Check for and copy trailing dword 1558 __ BIND(L_copy_4_bytes); 1559 __ testl(byte_count, 4); 1560 __ jccb(Assembler::zero, L_copy_2_bytes); 1561 __ movl(rax, Address(end_from, 8)); 1562 __ movl(Address(end_to, 8), rax); 1563 1564 __ addptr(end_from, 4); 1565 __ addptr(end_to, 4); 1566 1567 // Check for and copy trailing word 1568 __ BIND(L_copy_2_bytes); 1569 __ testl(byte_count, 2); 1570 __ jccb(Assembler::zero, L_copy_byte); 1571 __ movw(rax, Address(end_from, 8)); 1572 __ movw(Address(end_to, 8), rax); 1573 1574 __ addptr(end_from, 2); 1575 __ addptr(end_to, 2); 1576 1577 // Check for and copy trailing byte 1578 __ BIND(L_copy_byte); 1579 __ testl(byte_count, 1); 1580 __ jccb(Assembler::zero, L_exit); 1581 __ movb(rax, Address(end_from, 8)); 1582 __ movb(Address(end_to, 8), rax); 1583 } 1584 __ BIND(L_exit); 1585 address ucme_exit_pc = __ pc(); 1586 restore_arg_regs(); 1587 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1588 __ xorptr(rax, rax); // return 0 1589 __ vzeroupper(); 1590 __ leave(); // required for proper stackwalking of RuntimeStub frame 1591 __ ret(0); 1592 1593 { 1594 UnsafeCopyMemoryMark ucmm(this, !aligned, false, ucme_exit_pc); 1595 // Copy in multi-bytes chunks 1596 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1597 __ jmp(L_copy_4_bytes); 1598 } 1599 return start; 1600 } 1601 1602 // Arguments: 1603 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1604 // ignored 1605 // name - stub name string 1606 // 1607 // Inputs: 1608 // c_rarg0 - source array address 1609 // c_rarg1 - destination array address 1610 // c_rarg2 - element count, treated as ssize_t, can be zero 1611 // 1612 // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries, 1613 // we let the hardware handle it. The one to eight bytes within words, 1614 // dwords or qwords that span cache line boundaries will still be loaded 1615 // and stored atomically. 1616 // 1617 address generate_conjoint_byte_copy(bool aligned, address nooverlap_target, 1618 address* entry, const char *name) { 1619 __ align(CodeEntryAlignment); 1620 StubCodeMark mark(this, "StubRoutines", name); 1621 address start = __ pc(); 1622 1623 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes; 1624 const Register from = rdi; // source array address 1625 const Register to = rsi; // destination array address 1626 const Register count = rdx; // elements count 1627 const Register byte_count = rcx; 1628 const Register qword_count = count; 1629 1630 __ enter(); // required for proper stackwalking of RuntimeStub frame 1631 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1632 1633 if (entry != NULL) { 1634 *entry = __ pc(); 1635 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1636 BLOCK_COMMENT("Entry:"); 1637 } 1638 1639 array_overlap_test(nooverlap_target, Address::times_1); 1640 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1641 // r9 and r10 may be used to save non-volatile registers 1642 1643 { 1644 // UnsafeCopyMemory page error: continue after ucm 1645 UnsafeCopyMemoryMark ucmm(this, !aligned, true); 1646 // 'from', 'to' and 'count' are now valid 1647 __ movptr(byte_count, count); 1648 __ shrptr(count, 3); // count => qword_count 1649 1650 // Copy from high to low addresses. 1651 1652 // Check for and copy trailing byte 1653 __ testl(byte_count, 1); 1654 __ jcc(Assembler::zero, L_copy_2_bytes); 1655 __ movb(rax, Address(from, byte_count, Address::times_1, -1)); 1656 __ movb(Address(to, byte_count, Address::times_1, -1), rax); 1657 __ decrement(byte_count); // Adjust for possible trailing word 1658 1659 // Check for and copy trailing word 1660 __ BIND(L_copy_2_bytes); 1661 __ testl(byte_count, 2); 1662 __ jcc(Assembler::zero, L_copy_4_bytes); 1663 __ movw(rax, Address(from, byte_count, Address::times_1, -2)); 1664 __ movw(Address(to, byte_count, Address::times_1, -2), rax); 1665 1666 // Check for and copy trailing dword 1667 __ BIND(L_copy_4_bytes); 1668 __ testl(byte_count, 4); 1669 __ jcc(Assembler::zero, L_copy_bytes); 1670 __ movl(rax, Address(from, qword_count, Address::times_8)); 1671 __ movl(Address(to, qword_count, Address::times_8), rax); 1672 __ jmp(L_copy_bytes); 1673 1674 // Copy trailing qwords 1675 __ BIND(L_copy_8_bytes); 1676 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 1677 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 1678 __ decrement(qword_count); 1679 __ jcc(Assembler::notZero, L_copy_8_bytes); 1680 } 1681 restore_arg_regs(); 1682 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1683 __ xorptr(rax, rax); // return 0 1684 __ vzeroupper(); 1685 __ leave(); // required for proper stackwalking of RuntimeStub frame 1686 __ ret(0); 1687 1688 { 1689 // UnsafeCopyMemory page error: continue after ucm 1690 UnsafeCopyMemoryMark ucmm(this, !aligned, true); 1691 // Copy in multi-bytes chunks 1692 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1693 } 1694 restore_arg_regs(); 1695 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1696 __ xorptr(rax, rax); // return 0 1697 __ vzeroupper(); 1698 __ leave(); // required for proper stackwalking of RuntimeStub frame 1699 __ ret(0); 1700 1701 return start; 1702 } 1703 1704 // Arguments: 1705 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1706 // ignored 1707 // name - stub name string 1708 // 1709 // Inputs: 1710 // c_rarg0 - source array address 1711 // c_rarg1 - destination array address 1712 // c_rarg2 - element count, treated as ssize_t, can be zero 1713 // 1714 // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we 1715 // let the hardware handle it. The two or four words within dwords 1716 // or qwords that span cache line boundaries will still be loaded 1717 // and stored atomically. 1718 // 1719 // Side Effects: 1720 // disjoint_short_copy_entry is set to the no-overlap entry point 1721 // used by generate_conjoint_short_copy(). 1722 // 1723 address generate_disjoint_short_copy(bool aligned, address *entry, const char *name) { 1724 __ align(CodeEntryAlignment); 1725 StubCodeMark mark(this, "StubRoutines", name); 1726 address start = __ pc(); 1727 1728 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes,L_copy_2_bytes,L_exit; 1729 const Register from = rdi; // source array address 1730 const Register to = rsi; // destination array address 1731 const Register count = rdx; // elements count 1732 const Register word_count = rcx; 1733 const Register qword_count = count; 1734 const Register end_from = from; // source array end address 1735 const Register end_to = to; // destination array end address 1736 // End pointers are inclusive, and if count is not zero they point 1737 // to the last unit copied: end_to[0] := end_from[0] 1738 1739 __ enter(); // required for proper stackwalking of RuntimeStub frame 1740 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1741 1742 if (entry != NULL) { 1743 *entry = __ pc(); 1744 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1745 BLOCK_COMMENT("Entry:"); 1746 } 1747 1748 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1749 // r9 and r10 may be used to save non-volatile registers 1750 1751 { 1752 // UnsafeCopyMemory page error: continue after ucm 1753 UnsafeCopyMemoryMark ucmm(this, !aligned, true); 1754 // 'from', 'to' and 'count' are now valid 1755 __ movptr(word_count, count); 1756 __ shrptr(count, 2); // count => qword_count 1757 1758 // Copy from low to high addresses. Use 'to' as scratch. 1759 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1760 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1761 __ negptr(qword_count); 1762 __ jmp(L_copy_bytes); 1763 1764 // Copy trailing qwords 1765 __ BIND(L_copy_8_bytes); 1766 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1767 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1768 __ increment(qword_count); 1769 __ jcc(Assembler::notZero, L_copy_8_bytes); 1770 1771 // Original 'dest' is trashed, so we can't use it as a 1772 // base register for a possible trailing word copy 1773 1774 // Check for and copy trailing dword 1775 __ BIND(L_copy_4_bytes); 1776 __ testl(word_count, 2); 1777 __ jccb(Assembler::zero, L_copy_2_bytes); 1778 __ movl(rax, Address(end_from, 8)); 1779 __ movl(Address(end_to, 8), rax); 1780 1781 __ addptr(end_from, 4); 1782 __ addptr(end_to, 4); 1783 1784 // Check for and copy trailing word 1785 __ BIND(L_copy_2_bytes); 1786 __ testl(word_count, 1); 1787 __ jccb(Assembler::zero, L_exit); 1788 __ movw(rax, Address(end_from, 8)); 1789 __ movw(Address(end_to, 8), rax); 1790 } 1791 __ BIND(L_exit); 1792 address ucme_exit_pc = __ pc(); 1793 restore_arg_regs(); 1794 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1795 __ xorptr(rax, rax); // return 0 1796 __ vzeroupper(); 1797 __ leave(); // required for proper stackwalking of RuntimeStub frame 1798 __ ret(0); 1799 1800 { 1801 UnsafeCopyMemoryMark ucmm(this, !aligned, false, ucme_exit_pc); 1802 // Copy in multi-bytes chunks 1803 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1804 __ jmp(L_copy_4_bytes); 1805 } 1806 1807 return start; 1808 } 1809 1810 address generate_fill(BasicType t, bool aligned, const char *name) { 1811 __ align(CodeEntryAlignment); 1812 StubCodeMark mark(this, "StubRoutines", name); 1813 address start = __ pc(); 1814 1815 BLOCK_COMMENT("Entry:"); 1816 1817 const Register to = c_rarg0; // source array address 1818 const Register value = c_rarg1; // value 1819 const Register count = c_rarg2; // elements count 1820 1821 __ enter(); // required for proper stackwalking of RuntimeStub frame 1822 1823 __ generate_fill(t, aligned, to, value, count, rax, xmm0); 1824 1825 __ vzeroupper(); 1826 __ leave(); // required for proper stackwalking of RuntimeStub frame 1827 __ ret(0); 1828 return start; 1829 } 1830 1831 // Arguments: 1832 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1833 // ignored 1834 // name - stub name string 1835 // 1836 // Inputs: 1837 // c_rarg0 - source array address 1838 // c_rarg1 - destination array address 1839 // c_rarg2 - element count, treated as ssize_t, can be zero 1840 // 1841 // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we 1842 // let the hardware handle it. The two or four words within dwords 1843 // or qwords that span cache line boundaries will still be loaded 1844 // and stored atomically. 1845 // 1846 address generate_conjoint_short_copy(bool aligned, address nooverlap_target, 1847 address *entry, const char *name) { 1848 __ align(CodeEntryAlignment); 1849 StubCodeMark mark(this, "StubRoutines", name); 1850 address start = __ pc(); 1851 1852 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes; 1853 const Register from = rdi; // source array address 1854 const Register to = rsi; // destination array address 1855 const Register count = rdx; // elements count 1856 const Register word_count = rcx; 1857 const Register qword_count = count; 1858 1859 __ enter(); // required for proper stackwalking of RuntimeStub frame 1860 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1861 1862 if (entry != NULL) { 1863 *entry = __ pc(); 1864 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1865 BLOCK_COMMENT("Entry:"); 1866 } 1867 1868 array_overlap_test(nooverlap_target, Address::times_2); 1869 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1870 // r9 and r10 may be used to save non-volatile registers 1871 1872 { 1873 // UnsafeCopyMemory page error: continue after ucm 1874 UnsafeCopyMemoryMark ucmm(this, !aligned, true); 1875 // 'from', 'to' and 'count' are now valid 1876 __ movptr(word_count, count); 1877 __ shrptr(count, 2); // count => qword_count 1878 1879 // Copy from high to low addresses. Use 'to' as scratch. 1880 1881 // Check for and copy trailing word 1882 __ testl(word_count, 1); 1883 __ jccb(Assembler::zero, L_copy_4_bytes); 1884 __ movw(rax, Address(from, word_count, Address::times_2, -2)); 1885 __ movw(Address(to, word_count, Address::times_2, -2), rax); 1886 1887 // Check for and copy trailing dword 1888 __ BIND(L_copy_4_bytes); 1889 __ testl(word_count, 2); 1890 __ jcc(Assembler::zero, L_copy_bytes); 1891 __ movl(rax, Address(from, qword_count, Address::times_8)); 1892 __ movl(Address(to, qword_count, Address::times_8), rax); 1893 __ jmp(L_copy_bytes); 1894 1895 // Copy trailing qwords 1896 __ BIND(L_copy_8_bytes); 1897 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 1898 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 1899 __ decrement(qword_count); 1900 __ jcc(Assembler::notZero, L_copy_8_bytes); 1901 } 1902 restore_arg_regs(); 1903 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1904 __ xorptr(rax, rax); // return 0 1905 __ vzeroupper(); 1906 __ leave(); // required for proper stackwalking of RuntimeStub frame 1907 __ ret(0); 1908 1909 { 1910 // UnsafeCopyMemory page error: continue after ucm 1911 UnsafeCopyMemoryMark ucmm(this, !aligned, true); 1912 // Copy in multi-bytes chunks 1913 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1914 } 1915 restore_arg_regs(); 1916 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1917 __ xorptr(rax, rax); // return 0 1918 __ vzeroupper(); 1919 __ leave(); // required for proper stackwalking of RuntimeStub frame 1920 __ ret(0); 1921 1922 return start; 1923 } 1924 1925 // Arguments: 1926 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1927 // ignored 1928 // is_oop - true => oop array, so generate store check code 1929 // name - stub name string 1930 // 1931 // Inputs: 1932 // c_rarg0 - source array address 1933 // c_rarg1 - destination array address 1934 // c_rarg2 - element count, treated as ssize_t, can be zero 1935 // 1936 // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let 1937 // the hardware handle it. The two dwords within qwords that span 1938 // cache line boundaries will still be loaded and stored atomicly. 1939 // 1940 // Side Effects: 1941 // disjoint_int_copy_entry is set to the no-overlap entry point 1942 // used by generate_conjoint_int_oop_copy(). 1943 // 1944 address generate_disjoint_int_oop_copy(bool aligned, bool is_oop, address* entry, 1945 const char *name, bool dest_uninitialized = false) { 1946 __ align(CodeEntryAlignment); 1947 StubCodeMark mark(this, "StubRoutines", name); 1948 address start = __ pc(); 1949 1950 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_exit; 1951 const Register from = rdi; // source array address 1952 const Register to = rsi; // destination array address 1953 const Register count = rdx; // elements count 1954 const Register dword_count = rcx; 1955 const Register qword_count = count; 1956 const Register end_from = from; // source array end address 1957 const Register end_to = to; // destination array end address 1958 // End pointers are inclusive, and if count is not zero they point 1959 // to the last unit copied: end_to[0] := end_from[0] 1960 1961 __ enter(); // required for proper stackwalking of RuntimeStub frame 1962 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1963 1964 if (entry != NULL) { 1965 *entry = __ pc(); 1966 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1967 BLOCK_COMMENT("Entry:"); 1968 } 1969 1970 setup_arg_regs_using_thread(); // from => rdi, to => rsi, count => rdx 1971 // r9 is used to save r15_thread 1972 1973 DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_DISJOINT; 1974 if (dest_uninitialized) { 1975 decorators |= IS_DEST_UNINITIALIZED; 1976 } 1977 if (aligned) { 1978 decorators |= ARRAYCOPY_ALIGNED; 1979 } 1980 1981 BasicType type = is_oop ? T_OBJECT : T_INT; 1982 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 1983 bs->arraycopy_prologue(_masm, decorators, type, from, to, count); 1984 1985 { 1986 // UnsafeCopyMemory page error: continue after ucm 1987 UnsafeCopyMemoryMark ucmm(this, !is_oop && !aligned, true); 1988 // 'from', 'to' and 'count' are now valid 1989 __ movptr(dword_count, count); 1990 __ shrptr(count, 1); // count => qword_count 1991 1992 // Copy from low to high addresses. Use 'to' as scratch. 1993 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1994 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1995 __ negptr(qword_count); 1996 __ jmp(L_copy_bytes); 1997 1998 // Copy trailing qwords 1999 __ BIND(L_copy_8_bytes); 2000 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 2001 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 2002 __ increment(qword_count); 2003 __ jcc(Assembler::notZero, L_copy_8_bytes); 2004 2005 // Check for and copy trailing dword 2006 __ BIND(L_copy_4_bytes); 2007 __ testl(dword_count, 1); // Only byte test since the value is 0 or 1 2008 __ jccb(Assembler::zero, L_exit); 2009 __ movl(rax, Address(end_from, 8)); 2010 __ movl(Address(end_to, 8), rax); 2011 } 2012 __ BIND(L_exit); 2013 address ucme_exit_pc = __ pc(); 2014 bs->arraycopy_epilogue(_masm, decorators, type, from, to, dword_count); 2015 restore_arg_regs_using_thread(); 2016 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 2017 __ vzeroupper(); 2018 __ xorptr(rax, rax); // return 0 2019 __ leave(); // required for proper stackwalking of RuntimeStub frame 2020 __ ret(0); 2021 2022 { 2023 UnsafeCopyMemoryMark ucmm(this, !is_oop && !aligned, false, ucme_exit_pc); 2024 // Copy in multi-bytes chunks 2025 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2026 __ jmp(L_copy_4_bytes); 2027 } 2028 2029 return start; 2030 } 2031 2032 // Arguments: 2033 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 2034 // ignored 2035 // is_oop - true => oop array, so generate store check code 2036 // name - stub name string 2037 // 2038 // Inputs: 2039 // c_rarg0 - source array address 2040 // c_rarg1 - destination array address 2041 // c_rarg2 - element count, treated as ssize_t, can be zero 2042 // 2043 // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let 2044 // the hardware handle it. The two dwords within qwords that span 2045 // cache line boundaries will still be loaded and stored atomicly. 2046 // 2047 address generate_conjoint_int_oop_copy(bool aligned, bool is_oop, address nooverlap_target, 2048 address *entry, const char *name, 2049 bool dest_uninitialized = false) { 2050 __ align(CodeEntryAlignment); 2051 StubCodeMark mark(this, "StubRoutines", name); 2052 address start = __ pc(); 2053 2054 Label L_copy_bytes, L_copy_8_bytes, L_exit; 2055 const Register from = rdi; // source array address 2056 const Register to = rsi; // destination array address 2057 const Register count = rdx; // elements count 2058 const Register dword_count = rcx; 2059 const Register qword_count = count; 2060 2061 __ enter(); // required for proper stackwalking of RuntimeStub frame 2062 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 2063 2064 if (entry != NULL) { 2065 *entry = __ pc(); 2066 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2067 BLOCK_COMMENT("Entry:"); 2068 } 2069 2070 array_overlap_test(nooverlap_target, Address::times_4); 2071 setup_arg_regs_using_thread(); // from => rdi, to => rsi, count => rdx 2072 // r9 is used to save r15_thread 2073 2074 DecoratorSet decorators = IN_HEAP | IS_ARRAY; 2075 if (dest_uninitialized) { 2076 decorators |= IS_DEST_UNINITIALIZED; 2077 } 2078 if (aligned) { 2079 decorators |= ARRAYCOPY_ALIGNED; 2080 } 2081 2082 BasicType type = is_oop ? T_OBJECT : T_INT; 2083 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 2084 // no registers are destroyed by this call 2085 bs->arraycopy_prologue(_masm, decorators, type, from, to, count); 2086 2087 assert_clean_int(count, rax); // Make sure 'count' is clean int. 2088 { 2089 // UnsafeCopyMemory page error: continue after ucm 2090 UnsafeCopyMemoryMark ucmm(this, !is_oop && !aligned, true); 2091 // 'from', 'to' and 'count' are now valid 2092 __ movptr(dword_count, count); 2093 __ shrptr(count, 1); // count => qword_count 2094 2095 // Copy from high to low addresses. Use 'to' as scratch. 2096 2097 // Check for and copy trailing dword 2098 __ testl(dword_count, 1); 2099 __ jcc(Assembler::zero, L_copy_bytes); 2100 __ movl(rax, Address(from, dword_count, Address::times_4, -4)); 2101 __ movl(Address(to, dword_count, Address::times_4, -4), rax); 2102 __ jmp(L_copy_bytes); 2103 2104 // Copy trailing qwords 2105 __ BIND(L_copy_8_bytes); 2106 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 2107 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 2108 __ decrement(qword_count); 2109 __ jcc(Assembler::notZero, L_copy_8_bytes); 2110 } 2111 if (is_oop) { 2112 __ jmp(L_exit); 2113 } 2114 restore_arg_regs_using_thread(); 2115 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 2116 __ xorptr(rax, rax); // return 0 2117 __ vzeroupper(); 2118 __ leave(); // required for proper stackwalking of RuntimeStub frame 2119 __ ret(0); 2120 2121 { 2122 // UnsafeCopyMemory page error: continue after ucm 2123 UnsafeCopyMemoryMark ucmm(this, !is_oop && !aligned, true); 2124 // Copy in multi-bytes chunks 2125 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2126 } 2127 2128 __ BIND(L_exit); 2129 bs->arraycopy_epilogue(_masm, decorators, type, from, to, dword_count); 2130 restore_arg_regs_using_thread(); 2131 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 2132 __ xorptr(rax, rax); // return 0 2133 __ vzeroupper(); 2134 __ leave(); // required for proper stackwalking of RuntimeStub frame 2135 __ ret(0); 2136 2137 return start; 2138 } 2139 2140 // Arguments: 2141 // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes 2142 // ignored 2143 // is_oop - true => oop array, so generate store check code 2144 // name - stub name string 2145 // 2146 // Inputs: 2147 // c_rarg0 - source array address 2148 // c_rarg1 - destination array address 2149 // c_rarg2 - element count, treated as ssize_t, can be zero 2150 // 2151 // Side Effects: 2152 // disjoint_oop_copy_entry or disjoint_long_copy_entry is set to the 2153 // no-overlap entry point used by generate_conjoint_long_oop_copy(). 2154 // 2155 address generate_disjoint_long_oop_copy(bool aligned, bool is_oop, address *entry, 2156 const char *name, bool dest_uninitialized = false) { 2157 __ align(CodeEntryAlignment); 2158 StubCodeMark mark(this, "StubRoutines", name); 2159 address start = __ pc(); 2160 2161 Label L_copy_bytes, L_copy_8_bytes, L_exit; 2162 const Register from = rdi; // source array address 2163 const Register to = rsi; // destination array address 2164 const Register qword_count = rdx; // elements count 2165 const Register end_from = from; // source array end address 2166 const Register end_to = rcx; // destination array end address 2167 const Register saved_count = r11; 2168 // End pointers are inclusive, and if count is not zero they point 2169 // to the last unit copied: end_to[0] := end_from[0] 2170 2171 __ enter(); // required for proper stackwalking of RuntimeStub frame 2172 // Save no-overlap entry point for generate_conjoint_long_oop_copy() 2173 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 2174 2175 if (entry != NULL) { 2176 *entry = __ pc(); 2177 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2178 BLOCK_COMMENT("Entry:"); 2179 } 2180 2181 setup_arg_regs_using_thread(); // from => rdi, to => rsi, count => rdx 2182 // r9 is used to save r15_thread 2183 // 'from', 'to' and 'qword_count' are now valid 2184 2185 DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_DISJOINT; 2186 if (dest_uninitialized) { 2187 decorators |= IS_DEST_UNINITIALIZED; 2188 } 2189 if (aligned) { 2190 decorators |= ARRAYCOPY_ALIGNED; 2191 } 2192 2193 BasicType type = is_oop ? T_OBJECT : T_LONG; 2194 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 2195 bs->arraycopy_prologue(_masm, decorators, type, from, to, qword_count); 2196 { 2197 // UnsafeCopyMemory page error: continue after ucm 2198 UnsafeCopyMemoryMark ucmm(this, !is_oop && !aligned, true); 2199 2200 // Copy from low to high addresses. Use 'to' as scratch. 2201 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 2202 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 2203 __ negptr(qword_count); 2204 __ jmp(L_copy_bytes); 2205 2206 // Copy trailing qwords 2207 __ BIND(L_copy_8_bytes); 2208 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 2209 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 2210 __ increment(qword_count); 2211 __ jcc(Assembler::notZero, L_copy_8_bytes); 2212 } 2213 if (is_oop) { 2214 __ jmp(L_exit); 2215 } else { 2216 restore_arg_regs_using_thread(); 2217 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2218 __ xorptr(rax, rax); // return 0 2219 __ vzeroupper(); 2220 __ leave(); // required for proper stackwalking of RuntimeStub frame 2221 __ ret(0); 2222 } 2223 2224 { 2225 // UnsafeCopyMemory page error: continue after ucm 2226 UnsafeCopyMemoryMark ucmm(this, !is_oop && !aligned, true); 2227 // Copy in multi-bytes chunks 2228 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2229 } 2230 2231 __ BIND(L_exit); 2232 bs->arraycopy_epilogue(_masm, decorators, type, from, to, qword_count); 2233 restore_arg_regs_using_thread(); 2234 if (is_oop) { 2235 inc_counter_np(SharedRuntime::_oop_array_copy_ctr); // Update counter after rscratch1 is free 2236 } else { 2237 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2238 } 2239 __ vzeroupper(); 2240 __ xorptr(rax, rax); // return 0 2241 __ leave(); // required for proper stackwalking of RuntimeStub frame 2242 __ ret(0); 2243 2244 return start; 2245 } 2246 2247 // Arguments: 2248 // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes 2249 // ignored 2250 // is_oop - true => oop array, so generate store check code 2251 // name - stub name string 2252 // 2253 // Inputs: 2254 // c_rarg0 - source array address 2255 // c_rarg1 - destination array address 2256 // c_rarg2 - element count, treated as ssize_t, can be zero 2257 // 2258 address generate_conjoint_long_oop_copy(bool aligned, bool is_oop, 2259 address nooverlap_target, address *entry, 2260 const char *name, bool dest_uninitialized = false) { 2261 __ align(CodeEntryAlignment); 2262 StubCodeMark mark(this, "StubRoutines", name); 2263 address start = __ pc(); 2264 2265 Label L_copy_bytes, L_copy_8_bytes, L_exit; 2266 const Register from = rdi; // source array address 2267 const Register to = rsi; // destination array address 2268 const Register qword_count = rdx; // elements count 2269 const Register saved_count = rcx; 2270 2271 __ enter(); // required for proper stackwalking of RuntimeStub frame 2272 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 2273 2274 if (entry != NULL) { 2275 *entry = __ pc(); 2276 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2277 BLOCK_COMMENT("Entry:"); 2278 } 2279 2280 array_overlap_test(nooverlap_target, Address::times_8); 2281 setup_arg_regs_using_thread(); // from => rdi, to => rsi, count => rdx 2282 // r9 is used to save r15_thread 2283 // 'from', 'to' and 'qword_count' are now valid 2284 2285 DecoratorSet decorators = IN_HEAP | IS_ARRAY; 2286 if (dest_uninitialized) { 2287 decorators |= IS_DEST_UNINITIALIZED; 2288 } 2289 if (aligned) { 2290 decorators |= ARRAYCOPY_ALIGNED; 2291 } 2292 2293 BasicType type = is_oop ? T_OBJECT : T_LONG; 2294 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 2295 bs->arraycopy_prologue(_masm, decorators, type, from, to, qword_count); 2296 { 2297 // UnsafeCopyMemory page error: continue after ucm 2298 UnsafeCopyMemoryMark ucmm(this, !is_oop && !aligned, true); 2299 2300 __ jmp(L_copy_bytes); 2301 2302 // Copy trailing qwords 2303 __ BIND(L_copy_8_bytes); 2304 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 2305 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 2306 __ decrement(qword_count); 2307 __ jcc(Assembler::notZero, L_copy_8_bytes); 2308 } 2309 if (is_oop) { 2310 __ jmp(L_exit); 2311 } else { 2312 restore_arg_regs_using_thread(); 2313 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2314 __ xorptr(rax, rax); // return 0 2315 __ vzeroupper(); 2316 __ leave(); // required for proper stackwalking of RuntimeStub frame 2317 __ ret(0); 2318 } 2319 { 2320 // UnsafeCopyMemory page error: continue after ucm 2321 UnsafeCopyMemoryMark ucmm(this, !is_oop && !aligned, true); 2322 2323 // Copy in multi-bytes chunks 2324 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2325 } 2326 __ BIND(L_exit); 2327 bs->arraycopy_epilogue(_masm, decorators, type, from, to, qword_count); 2328 restore_arg_regs_using_thread(); 2329 if (is_oop) { 2330 inc_counter_np(SharedRuntime::_oop_array_copy_ctr); // Update counter after rscratch1 is free 2331 } else { 2332 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2333 } 2334 __ vzeroupper(); 2335 __ xorptr(rax, rax); // return 0 2336 __ leave(); // required for proper stackwalking of RuntimeStub frame 2337 __ ret(0); 2338 2339 return start; 2340 } 2341 2342 2343 // Helper for generating a dynamic type check. 2344 // Smashes no registers. 2345 void generate_type_check(Register sub_klass, 2346 Register super_check_offset, 2347 Register super_klass, 2348 Label& L_success) { 2349 assert_different_registers(sub_klass, super_check_offset, super_klass); 2350 2351 BLOCK_COMMENT("type_check:"); 2352 2353 Label L_miss; 2354 2355 __ check_klass_subtype_fast_path(sub_klass, super_klass, noreg, &L_success, &L_miss, NULL, 2356 super_check_offset); 2357 __ check_klass_subtype_slow_path(sub_klass, super_klass, noreg, noreg, &L_success, NULL); 2358 2359 // Fall through on failure! 2360 __ BIND(L_miss); 2361 } 2362 2363 // 2364 // Generate checkcasting array copy stub 2365 // 2366 // Input: 2367 // c_rarg0 - source array address 2368 // c_rarg1 - destination array address 2369 // c_rarg2 - element count, treated as ssize_t, can be zero 2370 // c_rarg3 - size_t ckoff (super_check_offset) 2371 // not Win64 2372 // c_rarg4 - oop ckval (super_klass) 2373 // Win64 2374 // rsp+40 - oop ckval (super_klass) 2375 // 2376 // Output: 2377 // rax == 0 - success 2378 // rax == -1^K - failure, where K is partial transfer count 2379 // 2380 address generate_checkcast_copy(const char *name, address *entry, 2381 bool dest_uninitialized = false) { 2382 2383 Label L_load_element, L_store_element, L_do_card_marks, L_done; 2384 2385 // Input registers (after setup_arg_regs) 2386 const Register from = rdi; // source array address 2387 const Register to = rsi; // destination array address 2388 const Register length = rdx; // elements count 2389 const Register ckoff = rcx; // super_check_offset 2390 const Register ckval = r8; // super_klass 2391 2392 // Registers used as temps (r13, r14 are save-on-entry) 2393 const Register end_from = from; // source array end address 2394 const Register end_to = r13; // destination array end address 2395 const Register count = rdx; // -(count_remaining) 2396 const Register r14_length = r14; // saved copy of length 2397 // End pointers are inclusive, and if length is not zero they point 2398 // to the last unit copied: end_to[0] := end_from[0] 2399 2400 const Register rax_oop = rax; // actual oop copied 2401 const Register r11_klass = r11; // oop._klass 2402 2403 //--------------------------------------------------------------- 2404 // Assembler stub will be used for this call to arraycopy 2405 // if the two arrays are subtypes of Object[] but the 2406 // destination array type is not equal to or a supertype 2407 // of the source type. Each element must be separately 2408 // checked. 2409 2410 __ align(CodeEntryAlignment); 2411 StubCodeMark mark(this, "StubRoutines", name); 2412 address start = __ pc(); 2413 2414 __ enter(); // required for proper stackwalking of RuntimeStub frame 2415 2416 #ifdef ASSERT 2417 // caller guarantees that the arrays really are different 2418 // otherwise, we would have to make conjoint checks 2419 { Label L; 2420 array_overlap_test(L, TIMES_OOP); 2421 __ stop("checkcast_copy within a single array"); 2422 __ bind(L); 2423 } 2424 #endif //ASSERT 2425 2426 setup_arg_regs(4); // from => rdi, to => rsi, length => rdx 2427 // ckoff => rcx, ckval => r8 2428 // r9 and r10 may be used to save non-volatile registers 2429 #ifdef _WIN64 2430 // last argument (#4) is on stack on Win64 2431 __ movptr(ckval, Address(rsp, 6 * wordSize)); 2432 #endif 2433 2434 // Caller of this entry point must set up the argument registers. 2435 if (entry != NULL) { 2436 *entry = __ pc(); 2437 BLOCK_COMMENT("Entry:"); 2438 } 2439 2440 // allocate spill slots for r13, r14 2441 enum { 2442 saved_r13_offset, 2443 saved_r14_offset, 2444 saved_r10_offset, 2445 saved_rbp_offset 2446 }; 2447 __ subptr(rsp, saved_rbp_offset * wordSize); 2448 __ movptr(Address(rsp, saved_r13_offset * wordSize), r13); 2449 __ movptr(Address(rsp, saved_r14_offset * wordSize), r14); 2450 __ movptr(Address(rsp, saved_r10_offset * wordSize), r10); 2451 2452 #ifdef ASSERT 2453 Label L2; 2454 __ get_thread(r14); 2455 __ cmpptr(r15_thread, r14); 2456 __ jcc(Assembler::equal, L2); 2457 __ stop("StubRoutines::call_stub: r15_thread is modified by call"); 2458 __ bind(L2); 2459 #endif // ASSERT 2460 2461 // check that int operands are properly extended to size_t 2462 assert_clean_int(length, rax); 2463 assert_clean_int(ckoff, rax); 2464 2465 #ifdef ASSERT 2466 BLOCK_COMMENT("assert consistent ckoff/ckval"); 2467 // The ckoff and ckval must be mutually consistent, 2468 // even though caller generates both. 2469 { Label L; 2470 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2471 __ cmpl(ckoff, Address(ckval, sco_offset)); 2472 __ jcc(Assembler::equal, L); 2473 __ stop("super_check_offset inconsistent"); 2474 __ bind(L); 2475 } 2476 #endif //ASSERT 2477 2478 // Loop-invariant addresses. They are exclusive end pointers. 2479 Address end_from_addr(from, length, TIMES_OOP, 0); 2480 Address end_to_addr(to, length, TIMES_OOP, 0); 2481 // Loop-variant addresses. They assume post-incremented count < 0. 2482 Address from_element_addr(end_from, count, TIMES_OOP, 0); 2483 Address to_element_addr(end_to, count, TIMES_OOP, 0); 2484 2485 DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_CHECKCAST | ARRAYCOPY_DISJOINT; 2486 if (dest_uninitialized) { 2487 decorators |= IS_DEST_UNINITIALIZED; 2488 } 2489 2490 BasicType type = T_OBJECT; 2491 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 2492 bs->arraycopy_prologue(_masm, decorators, type, from, to, count); 2493 2494 // Copy from low to high addresses, indexed from the end of each array. 2495 __ lea(end_from, end_from_addr); 2496 __ lea(end_to, end_to_addr); 2497 __ movptr(r14_length, length); // save a copy of the length 2498 assert(length == count, ""); // else fix next line: 2499 __ negptr(count); // negate and test the length 2500 __ jcc(Assembler::notZero, L_load_element); 2501 2502 // Empty array: Nothing to do. 2503 __ xorptr(rax, rax); // return 0 on (trivial) success 2504 __ jmp(L_done); 2505 2506 // ======== begin loop ======== 2507 // (Loop is rotated; its entry is L_load_element.) 2508 // Loop control: 2509 // for (count = -count; count != 0; count++) 2510 // Base pointers src, dst are biased by 8*(count-1),to last element. 2511 __ align(OptoLoopAlignment); 2512 2513 __ BIND(L_store_element); 2514 __ store_heap_oop(to_element_addr, rax_oop, noreg, noreg, AS_RAW); // store the oop 2515 __ increment(count); // increment the count toward zero 2516 __ jcc(Assembler::zero, L_do_card_marks); 2517 2518 // ======== loop entry is here ======== 2519 __ BIND(L_load_element); 2520 __ load_heap_oop(rax_oop, from_element_addr, noreg, noreg, AS_RAW); // load the oop 2521 __ testptr(rax_oop, rax_oop); 2522 __ jcc(Assembler::zero, L_store_element); 2523 2524 __ load_klass(r11_klass, rax_oop);// query the object klass 2525 generate_type_check(r11_klass, ckoff, ckval, L_store_element); 2526 // ======== end loop ======== 2527 2528 // It was a real error; we must depend on the caller to finish the job. 2529 // Register rdx = -1 * number of *remaining* oops, r14 = *total* oops. 2530 // Emit GC store barriers for the oops we have copied (r14 + rdx), 2531 // and report their number to the caller. 2532 assert_different_registers(rax, r14_length, count, to, end_to, rcx, rscratch1); 2533 Label L_post_barrier; 2534 __ addptr(r14_length, count); // K = (original - remaining) oops 2535 __ movptr(rax, r14_length); // save the value 2536 __ notptr(rax); // report (-1^K) to caller (does not affect flags) 2537 __ jccb(Assembler::notZero, L_post_barrier); 2538 __ jmp(L_done); // K == 0, nothing was copied, skip post barrier 2539 2540 // Come here on success only. 2541 __ BIND(L_do_card_marks); 2542 __ xorptr(rax, rax); // return 0 on success 2543 2544 __ BIND(L_post_barrier); 2545 bs->arraycopy_epilogue(_masm, decorators, type, from, to, r14_length); 2546 2547 // Common exit point (success or failure). 2548 __ BIND(L_done); 2549 __ movptr(r13, Address(rsp, saved_r13_offset * wordSize)); 2550 __ movptr(r14, Address(rsp, saved_r14_offset * wordSize)); 2551 __ movptr(r10, Address(rsp, saved_r10_offset * wordSize)); 2552 restore_arg_regs(); 2553 inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr); // Update counter after rscratch1 is free 2554 __ leave(); // required for proper stackwalking of RuntimeStub frame 2555 __ ret(0); 2556 2557 return start; 2558 } 2559 2560 // 2561 // Generate 'unsafe' array copy stub 2562 // Though just as safe as the other stubs, it takes an unscaled 2563 // size_t argument instead of an element count. 2564 // 2565 // Input: 2566 // c_rarg0 - source array address 2567 // c_rarg1 - destination array address 2568 // c_rarg2 - byte count, treated as ssize_t, can be zero 2569 // 2570 // Examines the alignment of the operands and dispatches 2571 // to a long, int, short, or byte copy loop. 2572 // 2573 address generate_unsafe_copy(const char *name, 2574 address byte_copy_entry, address short_copy_entry, 2575 address int_copy_entry, address long_copy_entry) { 2576 2577 Label L_long_aligned, L_int_aligned, L_short_aligned; 2578 2579 // Input registers (before setup_arg_regs) 2580 const Register from = c_rarg0; // source array address 2581 const Register to = c_rarg1; // destination array address 2582 const Register size = c_rarg2; // byte count (size_t) 2583 2584 // Register used as a temp 2585 const Register bits = rax; // test copy of low bits 2586 2587 __ align(CodeEntryAlignment); 2588 StubCodeMark mark(this, "StubRoutines", name); 2589 address start = __ pc(); 2590 2591 __ enter(); // required for proper stackwalking of RuntimeStub frame 2592 2593 // bump this on entry, not on exit: 2594 inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr); 2595 2596 __ mov(bits, from); 2597 __ orptr(bits, to); 2598 __ orptr(bits, size); 2599 2600 __ testb(bits, BytesPerLong-1); 2601 __ jccb(Assembler::zero, L_long_aligned); 2602 2603 __ testb(bits, BytesPerInt-1); 2604 __ jccb(Assembler::zero, L_int_aligned); 2605 2606 __ testb(bits, BytesPerShort-1); 2607 __ jump_cc(Assembler::notZero, RuntimeAddress(byte_copy_entry)); 2608 2609 __ BIND(L_short_aligned); 2610 __ shrptr(size, LogBytesPerShort); // size => short_count 2611 __ jump(RuntimeAddress(short_copy_entry)); 2612 2613 __ BIND(L_int_aligned); 2614 __ shrptr(size, LogBytesPerInt); // size => int_count 2615 __ jump(RuntimeAddress(int_copy_entry)); 2616 2617 __ BIND(L_long_aligned); 2618 __ shrptr(size, LogBytesPerLong); // size => qword_count 2619 __ jump(RuntimeAddress(long_copy_entry)); 2620 2621 return start; 2622 } 2623 2624 // Perform range checks on the proposed arraycopy. 2625 // Kills temp, but nothing else. 2626 // Also, clean the sign bits of src_pos and dst_pos. 2627 void arraycopy_range_checks(Register src, // source array oop (c_rarg0) 2628 Register src_pos, // source position (c_rarg1) 2629 Register dst, // destination array oo (c_rarg2) 2630 Register dst_pos, // destination position (c_rarg3) 2631 Register length, 2632 Register temp, 2633 Label& L_failed) { 2634 BLOCK_COMMENT("arraycopy_range_checks:"); 2635 2636 // if (src_pos + length > arrayOop(src)->length()) FAIL; 2637 __ movl(temp, length); 2638 __ addl(temp, src_pos); // src_pos + length 2639 __ cmpl(temp, Address(src, arrayOopDesc::length_offset_in_bytes())); 2640 __ jcc(Assembler::above, L_failed); 2641 2642 // if (dst_pos + length > arrayOop(dst)->length()) FAIL; 2643 __ movl(temp, length); 2644 __ addl(temp, dst_pos); // dst_pos + length 2645 __ cmpl(temp, Address(dst, arrayOopDesc::length_offset_in_bytes())); 2646 __ jcc(Assembler::above, L_failed); 2647 2648 // Have to clean up high 32-bits of 'src_pos' and 'dst_pos'. 2649 // Move with sign extension can be used since they are positive. 2650 __ movslq(src_pos, src_pos); 2651 __ movslq(dst_pos, dst_pos); 2652 2653 BLOCK_COMMENT("arraycopy_range_checks done"); 2654 } 2655 2656 // 2657 // Generate generic array copy stubs 2658 // 2659 // Input: 2660 // c_rarg0 - src oop 2661 // c_rarg1 - src_pos (32-bits) 2662 // c_rarg2 - dst oop 2663 // c_rarg3 - dst_pos (32-bits) 2664 // not Win64 2665 // c_rarg4 - element count (32-bits) 2666 // Win64 2667 // rsp+40 - element count (32-bits) 2668 // 2669 // Output: 2670 // rax == 0 - success 2671 // rax == -1^K - failure, where K is partial transfer count 2672 // 2673 address generate_generic_copy(const char *name, 2674 address byte_copy_entry, address short_copy_entry, 2675 address int_copy_entry, address oop_copy_entry, 2676 address long_copy_entry, address checkcast_copy_entry) { 2677 2678 Label L_failed, L_failed_0, L_objArray; 2679 Label L_copy_bytes, L_copy_shorts, L_copy_ints, L_copy_longs; 2680 2681 // Input registers 2682 const Register src = c_rarg0; // source array oop 2683 const Register src_pos = c_rarg1; // source position 2684 const Register dst = c_rarg2; // destination array oop 2685 const Register dst_pos = c_rarg3; // destination position 2686 #ifndef _WIN64 2687 const Register length = c_rarg4; 2688 #else 2689 const Address length(rsp, 6 * wordSize); // elements count is on stack on Win64 2690 #endif 2691 2692 { int modulus = CodeEntryAlignment; 2693 int target = modulus - 5; // 5 = sizeof jmp(L_failed) 2694 int advance = target - (__ offset() % modulus); 2695 if (advance < 0) advance += modulus; 2696 if (advance > 0) __ nop(advance); 2697 } 2698 StubCodeMark mark(this, "StubRoutines", name); 2699 2700 // Short-hop target to L_failed. Makes for denser prologue code. 2701 __ BIND(L_failed_0); 2702 __ jmp(L_failed); 2703 assert(__ offset() % CodeEntryAlignment == 0, "no further alignment needed"); 2704 2705 __ align(CodeEntryAlignment); 2706 address start = __ pc(); 2707 2708 __ enter(); // required for proper stackwalking of RuntimeStub frame 2709 2710 // bump this on entry, not on exit: 2711 inc_counter_np(SharedRuntime::_generic_array_copy_ctr); 2712 2713 //----------------------------------------------------------------------- 2714 // Assembler stub will be used for this call to arraycopy 2715 // if the following conditions are met: 2716 // 2717 // (1) src and dst must not be null. 2718 // (2) src_pos must not be negative. 2719 // (3) dst_pos must not be negative. 2720 // (4) length must not be negative. 2721 // (5) src klass and dst klass should be the same and not NULL. 2722 // (6) src and dst should be arrays. 2723 // (7) src_pos + length must not exceed length of src. 2724 // (8) dst_pos + length must not exceed length of dst. 2725 // 2726 2727 // if (src == NULL) return -1; 2728 __ testptr(src, src); // src oop 2729 size_t j1off = __ offset(); 2730 __ jccb(Assembler::zero, L_failed_0); 2731 2732 // if (src_pos < 0) return -1; 2733 __ testl(src_pos, src_pos); // src_pos (32-bits) 2734 __ jccb(Assembler::negative, L_failed_0); 2735 2736 // if (dst == NULL) return -1; 2737 __ testptr(dst, dst); // dst oop 2738 __ jccb(Assembler::zero, L_failed_0); 2739 2740 // if (dst_pos < 0) return -1; 2741 __ testl(dst_pos, dst_pos); // dst_pos (32-bits) 2742 size_t j4off = __ offset(); 2743 __ jccb(Assembler::negative, L_failed_0); 2744 2745 // The first four tests are very dense code, 2746 // but not quite dense enough to put four 2747 // jumps in a 16-byte instruction fetch buffer. 2748 // That's good, because some branch predicters 2749 // do not like jumps so close together. 2750 // Make sure of this. 2751 guarantee(((j1off ^ j4off) & ~15) != 0, "I$ line of 1st & 4th jumps"); 2752 2753 // registers used as temp 2754 const Register r11_length = r11; // elements count to copy 2755 const Register r10_src_klass = r10; // array klass 2756 2757 // if (length < 0) return -1; 2758 __ movl(r11_length, length); // length (elements count, 32-bits value) 2759 __ testl(r11_length, r11_length); 2760 __ jccb(Assembler::negative, L_failed_0); 2761 2762 __ load_klass(r10_src_klass, src); 2763 #ifdef ASSERT 2764 // assert(src->klass() != NULL); 2765 { 2766 BLOCK_COMMENT("assert klasses not null {"); 2767 Label L1, L2; 2768 __ testptr(r10_src_klass, r10_src_klass); 2769 __ jcc(Assembler::notZero, L2); // it is broken if klass is NULL 2770 __ bind(L1); 2771 __ stop("broken null klass"); 2772 __ bind(L2); 2773 __ load_klass(rax, dst); 2774 __ cmpq(rax, 0); 2775 __ jcc(Assembler::equal, L1); // this would be broken also 2776 BLOCK_COMMENT("} assert klasses not null done"); 2777 } 2778 #endif 2779 2780 // Load layout helper (32-bits) 2781 // 2782 // |array_tag| | header_size | element_type | |log2_element_size| 2783 // 32 30 24 16 8 2 0 2784 // 2785 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0 2786 // 2787 2788 const int lh_offset = in_bytes(Klass::layout_helper_offset()); 2789 2790 // Handle objArrays completely differently... 2791 const jint objArray_lh = Klass::array_layout_helper(T_OBJECT); 2792 __ cmpl(Address(r10_src_klass, lh_offset), objArray_lh); 2793 __ jcc(Assembler::equal, L_objArray); 2794 2795 // if (src->klass() != dst->klass()) return -1; 2796 __ load_klass(rax, dst); 2797 __ cmpq(r10_src_klass, rax); 2798 __ jcc(Assembler::notEqual, L_failed); 2799 2800 const Register rax_lh = rax; // layout helper 2801 __ movl(rax_lh, Address(r10_src_klass, lh_offset)); 2802 2803 // if (!src->is_Array()) return -1; 2804 __ cmpl(rax_lh, Klass::_lh_neutral_value); 2805 __ jcc(Assembler::greaterEqual, L_failed); 2806 2807 // At this point, it is known to be a typeArray (array_tag 0x3). 2808 #ifdef ASSERT 2809 { 2810 BLOCK_COMMENT("assert primitive array {"); 2811 Label L; 2812 __ cmpl(rax_lh, (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift)); 2813 __ jcc(Assembler::greaterEqual, L); 2814 __ stop("must be a primitive array"); 2815 __ bind(L); 2816 BLOCK_COMMENT("} assert primitive array done"); 2817 } 2818 #endif 2819 2820 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2821 r10, L_failed); 2822 2823 // TypeArrayKlass 2824 // 2825 // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize); 2826 // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize); 2827 // 2828 2829 const Register r10_offset = r10; // array offset 2830 const Register rax_elsize = rax_lh; // element size 2831 2832 __ movl(r10_offset, rax_lh); 2833 __ shrl(r10_offset, Klass::_lh_header_size_shift); 2834 __ andptr(r10_offset, Klass::_lh_header_size_mask); // array_offset 2835 __ addptr(src, r10_offset); // src array offset 2836 __ addptr(dst, r10_offset); // dst array offset 2837 BLOCK_COMMENT("choose copy loop based on element size"); 2838 __ andl(rax_lh, Klass::_lh_log2_element_size_mask); // rax_lh -> rax_elsize 2839 2840 // next registers should be set before the jump to corresponding stub 2841 const Register from = c_rarg0; // source array address 2842 const Register to = c_rarg1; // destination array address 2843 const Register count = c_rarg2; // elements count 2844 2845 // 'from', 'to', 'count' registers should be set in such order 2846 // since they are the same as 'src', 'src_pos', 'dst'. 2847 2848 __ BIND(L_copy_bytes); 2849 __ cmpl(rax_elsize, 0); 2850 __ jccb(Assembler::notEqual, L_copy_shorts); 2851 __ lea(from, Address(src, src_pos, Address::times_1, 0));// src_addr 2852 __ lea(to, Address(dst, dst_pos, Address::times_1, 0));// dst_addr 2853 __ movl2ptr(count, r11_length); // length 2854 __ jump(RuntimeAddress(byte_copy_entry)); 2855 2856 __ BIND(L_copy_shorts); 2857 __ cmpl(rax_elsize, LogBytesPerShort); 2858 __ jccb(Assembler::notEqual, L_copy_ints); 2859 __ lea(from, Address(src, src_pos, Address::times_2, 0));// src_addr 2860 __ lea(to, Address(dst, dst_pos, Address::times_2, 0));// dst_addr 2861 __ movl2ptr(count, r11_length); // length 2862 __ jump(RuntimeAddress(short_copy_entry)); 2863 2864 __ BIND(L_copy_ints); 2865 __ cmpl(rax_elsize, LogBytesPerInt); 2866 __ jccb(Assembler::notEqual, L_copy_longs); 2867 __ lea(from, Address(src, src_pos, Address::times_4, 0));// src_addr 2868 __ lea(to, Address(dst, dst_pos, Address::times_4, 0));// dst_addr 2869 __ movl2ptr(count, r11_length); // length 2870 __ jump(RuntimeAddress(int_copy_entry)); 2871 2872 __ BIND(L_copy_longs); 2873 #ifdef ASSERT 2874 { 2875 BLOCK_COMMENT("assert long copy {"); 2876 Label L; 2877 __ cmpl(rax_elsize, LogBytesPerLong); 2878 __ jcc(Assembler::equal, L); 2879 __ stop("must be long copy, but elsize is wrong"); 2880 __ bind(L); 2881 BLOCK_COMMENT("} assert long copy done"); 2882 } 2883 #endif 2884 __ lea(from, Address(src, src_pos, Address::times_8, 0));// src_addr 2885 __ lea(to, Address(dst, dst_pos, Address::times_8, 0));// dst_addr 2886 __ movl2ptr(count, r11_length); // length 2887 __ jump(RuntimeAddress(long_copy_entry)); 2888 2889 // ObjArrayKlass 2890 __ BIND(L_objArray); 2891 // live at this point: r10_src_klass, r11_length, src[_pos], dst[_pos] 2892 2893 Label L_plain_copy, L_checkcast_copy; 2894 // test array classes for subtyping 2895 __ load_klass(rax, dst); 2896 __ cmpq(r10_src_klass, rax); // usual case is exact equality 2897 __ jcc(Assembler::notEqual, L_checkcast_copy); 2898 2899 // Identically typed arrays can be copied without element-wise checks. 2900 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2901 r10, L_failed); 2902 2903 __ lea(from, Address(src, src_pos, TIMES_OOP, 2904 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // src_addr 2905 __ lea(to, Address(dst, dst_pos, TIMES_OOP, 2906 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // dst_addr 2907 __ movl2ptr(count, r11_length); // length 2908 __ BIND(L_plain_copy); 2909 __ jump(RuntimeAddress(oop_copy_entry)); 2910 2911 __ BIND(L_checkcast_copy); 2912 // live at this point: r10_src_klass, r11_length, rax (dst_klass) 2913 { 2914 // Before looking at dst.length, make sure dst is also an objArray. 2915 __ cmpl(Address(rax, lh_offset), objArray_lh); 2916 __ jcc(Assembler::notEqual, L_failed); 2917 2918 // It is safe to examine both src.length and dst.length. 2919 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2920 rax, L_failed); 2921 2922 const Register r11_dst_klass = r11; 2923 __ load_klass(r11_dst_klass, dst); // reload 2924 2925 // Marshal the base address arguments now, freeing registers. 2926 __ lea(from, Address(src, src_pos, TIMES_OOP, 2927 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 2928 __ lea(to, Address(dst, dst_pos, TIMES_OOP, 2929 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 2930 __ movl(count, length); // length (reloaded) 2931 Register sco_temp = c_rarg3; // this register is free now 2932 assert_different_registers(from, to, count, sco_temp, 2933 r11_dst_klass, r10_src_klass); 2934 assert_clean_int(count, sco_temp); 2935 2936 // Generate the type check. 2937 const int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2938 __ movl(sco_temp, Address(r11_dst_klass, sco_offset)); 2939 assert_clean_int(sco_temp, rax); 2940 generate_type_check(r10_src_klass, sco_temp, r11_dst_klass, L_plain_copy); 2941 2942 // Fetch destination element klass from the ObjArrayKlass header. 2943 int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset()); 2944 __ movptr(r11_dst_klass, Address(r11_dst_klass, ek_offset)); 2945 __ movl( sco_temp, Address(r11_dst_klass, sco_offset)); 2946 assert_clean_int(sco_temp, rax); 2947 2948 // the checkcast_copy loop needs two extra arguments: 2949 assert(c_rarg3 == sco_temp, "#3 already in place"); 2950 // Set up arguments for checkcast_copy_entry. 2951 setup_arg_regs(4); 2952 __ movptr(r8, r11_dst_klass); // dst.klass.element_klass, r8 is c_rarg4 on Linux/Solaris 2953 __ jump(RuntimeAddress(checkcast_copy_entry)); 2954 } 2955 2956 __ BIND(L_failed); 2957 __ xorptr(rax, rax); 2958 __ notptr(rax); // return -1 2959 __ leave(); // required for proper stackwalking of RuntimeStub frame 2960 __ ret(0); 2961 2962 return start; 2963 } 2964 2965 address generate_data_cache_writeback() { 2966 const Register src = c_rarg0; // source address 2967 2968 __ align(CodeEntryAlignment); 2969 2970 StubCodeMark mark(this, "StubRoutines", "_data_cache_writeback"); 2971 2972 address start = __ pc(); 2973 __ enter(); 2974 __ cache_wb(Address(src, 0)); 2975 __ leave(); 2976 __ ret(0); 2977 2978 return start; 2979 } 2980 2981 address generate_data_cache_writeback_sync() { 2982 const Register is_pre = c_rarg0; // pre or post sync 2983 2984 __ align(CodeEntryAlignment); 2985 2986 StubCodeMark mark(this, "StubRoutines", "_data_cache_writeback_sync"); 2987 2988 // pre wbsync is a no-op 2989 // post wbsync translates to an sfence 2990 2991 Label skip; 2992 address start = __ pc(); 2993 __ enter(); 2994 __ cmpl(is_pre, 0); 2995 __ jcc(Assembler::notEqual, skip); 2996 __ cache_wbsync(false); 2997 __ bind(skip); 2998 __ leave(); 2999 __ ret(0); 3000 3001 return start; 3002 } 3003 3004 void generate_arraycopy_stubs() { 3005 address entry; 3006 address entry_jbyte_arraycopy; 3007 address entry_jshort_arraycopy; 3008 address entry_jint_arraycopy; 3009 address entry_oop_arraycopy; 3010 address entry_jlong_arraycopy; 3011 address entry_checkcast_arraycopy; 3012 3013 StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, &entry, 3014 "jbyte_disjoint_arraycopy"); 3015 StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, entry, &entry_jbyte_arraycopy, 3016 "jbyte_arraycopy"); 3017 3018 StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, &entry, 3019 "jshort_disjoint_arraycopy"); 3020 StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, entry, &entry_jshort_arraycopy, 3021 "jshort_arraycopy"); 3022 3023 StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, false, &entry, 3024 "jint_disjoint_arraycopy"); 3025 StubRoutines::_jint_arraycopy = generate_conjoint_int_oop_copy(false, false, entry, 3026 &entry_jint_arraycopy, "jint_arraycopy"); 3027 3028 StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, false, &entry, 3029 "jlong_disjoint_arraycopy"); 3030 StubRoutines::_jlong_arraycopy = generate_conjoint_long_oop_copy(false, false, entry, 3031 &entry_jlong_arraycopy, "jlong_arraycopy"); 3032 3033 3034 if (UseCompressedOops) { 3035 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, true, &entry, 3036 "oop_disjoint_arraycopy"); 3037 StubRoutines::_oop_arraycopy = generate_conjoint_int_oop_copy(false, true, entry, 3038 &entry_oop_arraycopy, "oop_arraycopy"); 3039 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_int_oop_copy(false, true, &entry, 3040 "oop_disjoint_arraycopy_uninit", 3041 /*dest_uninitialized*/true); 3042 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_int_oop_copy(false, true, entry, 3043 NULL, "oop_arraycopy_uninit", 3044 /*dest_uninitialized*/true); 3045 } else { 3046 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, true, &entry, 3047 "oop_disjoint_arraycopy"); 3048 StubRoutines::_oop_arraycopy = generate_conjoint_long_oop_copy(false, true, entry, 3049 &entry_oop_arraycopy, "oop_arraycopy"); 3050 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_long_oop_copy(false, true, &entry, 3051 "oop_disjoint_arraycopy_uninit", 3052 /*dest_uninitialized*/true); 3053 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_long_oop_copy(false, true, entry, 3054 NULL, "oop_arraycopy_uninit", 3055 /*dest_uninitialized*/true); 3056 } 3057 3058 StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy); 3059 StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", NULL, 3060 /*dest_uninitialized*/true); 3061 3062 StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy", 3063 entry_jbyte_arraycopy, 3064 entry_jshort_arraycopy, 3065 entry_jint_arraycopy, 3066 entry_jlong_arraycopy); 3067 StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy", 3068 entry_jbyte_arraycopy, 3069 entry_jshort_arraycopy, 3070 entry_jint_arraycopy, 3071 entry_oop_arraycopy, 3072 entry_jlong_arraycopy, 3073 entry_checkcast_arraycopy); 3074 3075 StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill"); 3076 StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill"); 3077 StubRoutines::_jint_fill = generate_fill(T_INT, false, "jint_fill"); 3078 StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill"); 3079 StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill"); 3080 StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill"); 3081 3082 // We don't generate specialized code for HeapWord-aligned source 3083 // arrays, so just use the code we've already generated 3084 StubRoutines::_arrayof_jbyte_disjoint_arraycopy = StubRoutines::_jbyte_disjoint_arraycopy; 3085 StubRoutines::_arrayof_jbyte_arraycopy = StubRoutines::_jbyte_arraycopy; 3086 3087 StubRoutines::_arrayof_jshort_disjoint_arraycopy = StubRoutines::_jshort_disjoint_arraycopy; 3088 StubRoutines::_arrayof_jshort_arraycopy = StubRoutines::_jshort_arraycopy; 3089 3090 StubRoutines::_arrayof_jint_disjoint_arraycopy = StubRoutines::_jint_disjoint_arraycopy; 3091 StubRoutines::_arrayof_jint_arraycopy = StubRoutines::_jint_arraycopy; 3092 3093 StubRoutines::_arrayof_jlong_disjoint_arraycopy = StubRoutines::_jlong_disjoint_arraycopy; 3094 StubRoutines::_arrayof_jlong_arraycopy = StubRoutines::_jlong_arraycopy; 3095 3096 StubRoutines::_arrayof_oop_disjoint_arraycopy = StubRoutines::_oop_disjoint_arraycopy; 3097 StubRoutines::_arrayof_oop_arraycopy = StubRoutines::_oop_arraycopy; 3098 3099 StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = StubRoutines::_oop_disjoint_arraycopy_uninit; 3100 StubRoutines::_arrayof_oop_arraycopy_uninit = StubRoutines::_oop_arraycopy_uninit; 3101 } 3102 3103 // AES intrinsic stubs 3104 enum {AESBlockSize = 16}; 3105 3106 address generate_key_shuffle_mask() { 3107 __ align(16); 3108 StubCodeMark mark(this, "StubRoutines", "key_shuffle_mask"); 3109 address start = __ pc(); 3110 __ emit_data64( 0x0405060700010203, relocInfo::none ); 3111 __ emit_data64( 0x0c0d0e0f08090a0b, relocInfo::none ); 3112 return start; 3113 } 3114 3115 address generate_counter_shuffle_mask() { 3116 __ align(16); 3117 StubCodeMark mark(this, "StubRoutines", "counter_shuffle_mask"); 3118 address start = __ pc(); 3119 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none); 3120 __ emit_data64(0x0001020304050607, relocInfo::none); 3121 return start; 3122 } 3123 3124 // Utility routine for loading a 128-bit key word in little endian format 3125 // can optionally specify that the shuffle mask is already in an xmmregister 3126 void load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask=NULL) { 3127 __ movdqu(xmmdst, Address(key, offset)); 3128 if (xmm_shuf_mask != NULL) { 3129 __ pshufb(xmmdst, xmm_shuf_mask); 3130 } else { 3131 __ pshufb(xmmdst, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3132 } 3133 } 3134 3135 // Utility routine for increase 128bit counter (iv in CTR mode) 3136 void inc_counter(Register reg, XMMRegister xmmdst, int inc_delta, Label& next_block) { 3137 __ pextrq(reg, xmmdst, 0x0); 3138 __ addq(reg, inc_delta); 3139 __ pinsrq(xmmdst, reg, 0x0); 3140 __ jcc(Assembler::carryClear, next_block); // jump if no carry 3141 __ pextrq(reg, xmmdst, 0x01); // Carry 3142 __ addq(reg, 0x01); 3143 __ pinsrq(xmmdst, reg, 0x01); //Carry end 3144 __ BIND(next_block); // next instruction 3145 } 3146 3147 // Arguments: 3148 // 3149 // Inputs: 3150 // c_rarg0 - source byte array address 3151 // c_rarg1 - destination byte array address 3152 // c_rarg2 - K (key) in little endian int array 3153 // 3154 address generate_aescrypt_encryptBlock() { 3155 assert(UseAES, "need AES instructions and misaligned SSE support"); 3156 __ align(CodeEntryAlignment); 3157 StubCodeMark mark(this, "StubRoutines", "aescrypt_encryptBlock"); 3158 Label L_doLast; 3159 address start = __ pc(); 3160 3161 const Register from = c_rarg0; // source array address 3162 const Register to = c_rarg1; // destination array address 3163 const Register key = c_rarg2; // key array address 3164 const Register keylen = rax; 3165 3166 const XMMRegister xmm_result = xmm0; 3167 const XMMRegister xmm_key_shuf_mask = xmm1; 3168 // On win64 xmm6-xmm15 must be preserved so don't use them. 3169 const XMMRegister xmm_temp1 = xmm2; 3170 const XMMRegister xmm_temp2 = xmm3; 3171 const XMMRegister xmm_temp3 = xmm4; 3172 const XMMRegister xmm_temp4 = xmm5; 3173 3174 __ enter(); // required for proper stackwalking of RuntimeStub frame 3175 3176 // keylen could be only {11, 13, 15} * 4 = {44, 52, 60} 3177 __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3178 3179 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3180 __ movdqu(xmm_result, Address(from, 0)); // get 16 bytes of input 3181 3182 // For encryption, the java expanded key ordering is just what we need 3183 // we don't know if the key is aligned, hence not using load-execute form 3184 3185 load_key(xmm_temp1, key, 0x00, xmm_key_shuf_mask); 3186 __ pxor(xmm_result, xmm_temp1); 3187 3188 load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask); 3189 load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask); 3190 load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask); 3191 load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask); 3192 3193 __ aesenc(xmm_result, xmm_temp1); 3194 __ aesenc(xmm_result, xmm_temp2); 3195 __ aesenc(xmm_result, xmm_temp3); 3196 __ aesenc(xmm_result, xmm_temp4); 3197 3198 load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask); 3199 load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask); 3200 load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask); 3201 load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask); 3202 3203 __ aesenc(xmm_result, xmm_temp1); 3204 __ aesenc(xmm_result, xmm_temp2); 3205 __ aesenc(xmm_result, xmm_temp3); 3206 __ aesenc(xmm_result, xmm_temp4); 3207 3208 load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask); 3209 load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask); 3210 3211 __ cmpl(keylen, 44); 3212 __ jccb(Assembler::equal, L_doLast); 3213 3214 __ aesenc(xmm_result, xmm_temp1); 3215 __ aesenc(xmm_result, xmm_temp2); 3216 3217 load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask); 3218 load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask); 3219 3220 __ cmpl(keylen, 52); 3221 __ jccb(Assembler::equal, L_doLast); 3222 3223 __ aesenc(xmm_result, xmm_temp1); 3224 __ aesenc(xmm_result, xmm_temp2); 3225 3226 load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask); 3227 load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask); 3228 3229 __ BIND(L_doLast); 3230 __ aesenc(xmm_result, xmm_temp1); 3231 __ aesenclast(xmm_result, xmm_temp2); 3232 __ movdqu(Address(to, 0), xmm_result); // store the result 3233 __ xorptr(rax, rax); // return 0 3234 __ leave(); // required for proper stackwalking of RuntimeStub frame 3235 __ ret(0); 3236 3237 return start; 3238 } 3239 3240 3241 // Arguments: 3242 // 3243 // Inputs: 3244 // c_rarg0 - source byte array address 3245 // c_rarg1 - destination byte array address 3246 // c_rarg2 - K (key) in little endian int array 3247 // 3248 address generate_aescrypt_decryptBlock() { 3249 assert(UseAES, "need AES instructions and misaligned SSE support"); 3250 __ align(CodeEntryAlignment); 3251 StubCodeMark mark(this, "StubRoutines", "aescrypt_decryptBlock"); 3252 Label L_doLast; 3253 address start = __ pc(); 3254 3255 const Register from = c_rarg0; // source array address 3256 const Register to = c_rarg1; // destination array address 3257 const Register key = c_rarg2; // key array address 3258 const Register keylen = rax; 3259 3260 const XMMRegister xmm_result = xmm0; 3261 const XMMRegister xmm_key_shuf_mask = xmm1; 3262 // On win64 xmm6-xmm15 must be preserved so don't use them. 3263 const XMMRegister xmm_temp1 = xmm2; 3264 const XMMRegister xmm_temp2 = xmm3; 3265 const XMMRegister xmm_temp3 = xmm4; 3266 const XMMRegister xmm_temp4 = xmm5; 3267 3268 __ enter(); // required for proper stackwalking of RuntimeStub frame 3269 3270 // keylen could be only {11, 13, 15} * 4 = {44, 52, 60} 3271 __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3272 3273 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3274 __ movdqu(xmm_result, Address(from, 0)); 3275 3276 // for decryption java expanded key ordering is rotated one position from what we want 3277 // so we start from 0x10 here and hit 0x00 last 3278 // we don't know if the key is aligned, hence not using load-execute form 3279 load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask); 3280 load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask); 3281 load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask); 3282 load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask); 3283 3284 __ pxor (xmm_result, xmm_temp1); 3285 __ aesdec(xmm_result, xmm_temp2); 3286 __ aesdec(xmm_result, xmm_temp3); 3287 __ aesdec(xmm_result, xmm_temp4); 3288 3289 load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask); 3290 load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask); 3291 load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask); 3292 load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask); 3293 3294 __ aesdec(xmm_result, xmm_temp1); 3295 __ aesdec(xmm_result, xmm_temp2); 3296 __ aesdec(xmm_result, xmm_temp3); 3297 __ aesdec(xmm_result, xmm_temp4); 3298 3299 load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask); 3300 load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask); 3301 load_key(xmm_temp3, key, 0x00, xmm_key_shuf_mask); 3302 3303 __ cmpl(keylen, 44); 3304 __ jccb(Assembler::equal, L_doLast); 3305 3306 __ aesdec(xmm_result, xmm_temp1); 3307 __ aesdec(xmm_result, xmm_temp2); 3308 3309 load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask); 3310 load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask); 3311 3312 __ cmpl(keylen, 52); 3313 __ jccb(Assembler::equal, L_doLast); 3314 3315 __ aesdec(xmm_result, xmm_temp1); 3316 __ aesdec(xmm_result, xmm_temp2); 3317 3318 load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask); 3319 load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask); 3320 3321 __ BIND(L_doLast); 3322 __ aesdec(xmm_result, xmm_temp1); 3323 __ aesdec(xmm_result, xmm_temp2); 3324 3325 // for decryption the aesdeclast operation is always on key+0x00 3326 __ aesdeclast(xmm_result, xmm_temp3); 3327 __ movdqu(Address(to, 0), xmm_result); // store the result 3328 __ xorptr(rax, rax); // return 0 3329 __ leave(); // required for proper stackwalking of RuntimeStub frame 3330 __ ret(0); 3331 3332 return start; 3333 } 3334 3335 3336 // Arguments: 3337 // 3338 // Inputs: 3339 // c_rarg0 - source byte array address 3340 // c_rarg1 - destination byte array address 3341 // c_rarg2 - K (key) in little endian int array 3342 // c_rarg3 - r vector byte array address 3343 // c_rarg4 - input length 3344 // 3345 // Output: 3346 // rax - input length 3347 // 3348 address generate_cipherBlockChaining_encryptAESCrypt() { 3349 assert(UseAES, "need AES instructions and misaligned SSE support"); 3350 __ align(CodeEntryAlignment); 3351 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_encryptAESCrypt"); 3352 address start = __ pc(); 3353 3354 Label L_exit, L_key_192_256, L_key_256, L_loopTop_128, L_loopTop_192, L_loopTop_256; 3355 const Register from = c_rarg0; // source array address 3356 const Register to = c_rarg1; // destination array address 3357 const Register key = c_rarg2; // key array address 3358 const Register rvec = c_rarg3; // r byte array initialized from initvector array address 3359 // and left with the results of the last encryption block 3360 #ifndef _WIN64 3361 const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16) 3362 #else 3363 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 3364 const Register len_reg = r11; // pick the volatile windows register 3365 #endif 3366 const Register pos = rax; 3367 3368 // xmm register assignments for the loops below 3369 const XMMRegister xmm_result = xmm0; 3370 const XMMRegister xmm_temp = xmm1; 3371 // keys 0-10 preloaded into xmm2-xmm12 3372 const int XMM_REG_NUM_KEY_FIRST = 2; 3373 const int XMM_REG_NUM_KEY_LAST = 15; 3374 const XMMRegister xmm_key0 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST); 3375 const XMMRegister xmm_key10 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+10); 3376 const XMMRegister xmm_key11 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+11); 3377 const XMMRegister xmm_key12 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+12); 3378 const XMMRegister xmm_key13 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+13); 3379 3380 __ enter(); // required for proper stackwalking of RuntimeStub frame 3381 3382 #ifdef _WIN64 3383 // on win64, fill len_reg from stack position 3384 __ movl(len_reg, len_mem); 3385 #else 3386 __ push(len_reg); // Save 3387 #endif 3388 3389 const XMMRegister xmm_key_shuf_mask = xmm_temp; // used temporarily to swap key bytes up front 3390 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3391 // load up xmm regs xmm2 thru xmm12 with key 0x00 - 0xa0 3392 for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x00; rnum <= XMM_REG_NUM_KEY_FIRST+10; rnum++) { 3393 load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask); 3394 offset += 0x10; 3395 } 3396 __ movdqu(xmm_result, Address(rvec, 0x00)); // initialize xmm_result with r vec 3397 3398 // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256)) 3399 __ movl(rax, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3400 __ cmpl(rax, 44); 3401 __ jcc(Assembler::notEqual, L_key_192_256); 3402 3403 // 128 bit code follows here 3404 __ movptr(pos, 0); 3405 __ align(OptoLoopAlignment); 3406 3407 __ BIND(L_loopTop_128); 3408 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3409 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3410 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3411 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 9; rnum++) { 3412 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3413 } 3414 __ aesenclast(xmm_result, xmm_key10); 3415 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3416 // no need to store r to memory until we exit 3417 __ addptr(pos, AESBlockSize); 3418 __ subptr(len_reg, AESBlockSize); 3419 __ jcc(Assembler::notEqual, L_loopTop_128); 3420 3421 __ BIND(L_exit); 3422 __ movdqu(Address(rvec, 0), xmm_result); // final value of r stored in rvec of CipherBlockChaining object 3423 3424 #ifdef _WIN64 3425 __ movl(rax, len_mem); 3426 #else 3427 __ pop(rax); // return length 3428 #endif 3429 __ leave(); // required for proper stackwalking of RuntimeStub frame 3430 __ ret(0); 3431 3432 __ BIND(L_key_192_256); 3433 // here rax = len in ints of AESCrypt.KLE array (52=192, or 60=256) 3434 load_key(xmm_key11, key, 0xb0, xmm_key_shuf_mask); 3435 load_key(xmm_key12, key, 0xc0, xmm_key_shuf_mask); 3436 __ cmpl(rax, 52); 3437 __ jcc(Assembler::notEqual, L_key_256); 3438 3439 // 192-bit code follows here (could be changed to use more xmm registers) 3440 __ movptr(pos, 0); 3441 __ align(OptoLoopAlignment); 3442 3443 __ BIND(L_loopTop_192); 3444 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3445 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3446 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3447 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 11; rnum++) { 3448 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3449 } 3450 __ aesenclast(xmm_result, xmm_key12); 3451 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3452 // no need to store r to memory until we exit 3453 __ addptr(pos, AESBlockSize); 3454 __ subptr(len_reg, AESBlockSize); 3455 __ jcc(Assembler::notEqual, L_loopTop_192); 3456 __ jmp(L_exit); 3457 3458 __ BIND(L_key_256); 3459 // 256-bit code follows here (could be changed to use more xmm registers) 3460 load_key(xmm_key13, key, 0xd0, xmm_key_shuf_mask); 3461 __ movptr(pos, 0); 3462 __ align(OptoLoopAlignment); 3463 3464 __ BIND(L_loopTop_256); 3465 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3466 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3467 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3468 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 13; rnum++) { 3469 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3470 } 3471 load_key(xmm_temp, key, 0xe0); 3472 __ aesenclast(xmm_result, xmm_temp); 3473 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3474 // no need to store r to memory until we exit 3475 __ addptr(pos, AESBlockSize); 3476 __ subptr(len_reg, AESBlockSize); 3477 __ jcc(Assembler::notEqual, L_loopTop_256); 3478 __ jmp(L_exit); 3479 3480 return start; 3481 } 3482 3483 // Safefetch stubs. 3484 void generate_safefetch(const char* name, int size, address* entry, 3485 address* fault_pc, address* continuation_pc) { 3486 // safefetch signatures: 3487 // int SafeFetch32(int* adr, int errValue); 3488 // intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue); 3489 // 3490 // arguments: 3491 // c_rarg0 = adr 3492 // c_rarg1 = errValue 3493 // 3494 // result: 3495 // PPC_RET = *adr or errValue 3496 3497 StubCodeMark mark(this, "StubRoutines", name); 3498 3499 // Entry point, pc or function descriptor. 3500 *entry = __ pc(); 3501 3502 // Load *adr into c_rarg1, may fault. 3503 *fault_pc = __ pc(); 3504 switch (size) { 3505 case 4: 3506 // int32_t 3507 __ movl(c_rarg1, Address(c_rarg0, 0)); 3508 break; 3509 case 8: 3510 // int64_t 3511 __ movq(c_rarg1, Address(c_rarg0, 0)); 3512 break; 3513 default: 3514 ShouldNotReachHere(); 3515 } 3516 3517 // return errValue or *adr 3518 *continuation_pc = __ pc(); 3519 __ movq(rax, c_rarg1); 3520 __ ret(0); 3521 } 3522 3523 // This is a version of CBC/AES Decrypt which does 4 blocks in a loop at a time 3524 // to hide instruction latency 3525 // 3526 // Arguments: 3527 // 3528 // Inputs: 3529 // c_rarg0 - source byte array address 3530 // c_rarg1 - destination byte array address 3531 // c_rarg2 - K (key) in little endian int array 3532 // c_rarg3 - r vector byte array address 3533 // c_rarg4 - input length 3534 // 3535 // Output: 3536 // rax - input length 3537 // 3538 address generate_cipherBlockChaining_decryptAESCrypt_Parallel() { 3539 assert(UseAES, "need AES instructions and misaligned SSE support"); 3540 __ align(CodeEntryAlignment); 3541 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt"); 3542 address start = __ pc(); 3543 3544 const Register from = c_rarg0; // source array address 3545 const Register to = c_rarg1; // destination array address 3546 const Register key = c_rarg2; // key array address 3547 const Register rvec = c_rarg3; // r byte array initialized from initvector array address 3548 // and left with the results of the last encryption block 3549 #ifndef _WIN64 3550 const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16) 3551 #else 3552 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 3553 const Register len_reg = r11; // pick the volatile windows register 3554 #endif 3555 const Register pos = rax; 3556 3557 const int PARALLEL_FACTOR = 4; 3558 const int ROUNDS[3] = { 10, 12, 14 }; // aes rounds for key128, key192, key256 3559 3560 Label L_exit; 3561 Label L_singleBlock_loopTopHead[3]; // 128, 192, 256 3562 Label L_singleBlock_loopTopHead2[3]; // 128, 192, 256 3563 Label L_singleBlock_loopTop[3]; // 128, 192, 256 3564 Label L_multiBlock_loopTopHead[3]; // 128, 192, 256 3565 Label L_multiBlock_loopTop[3]; // 128, 192, 256 3566 3567 // keys 0-10 preloaded into xmm5-xmm15 3568 const int XMM_REG_NUM_KEY_FIRST = 5; 3569 const int XMM_REG_NUM_KEY_LAST = 15; 3570 const XMMRegister xmm_key_first = as_XMMRegister(XMM_REG_NUM_KEY_FIRST); 3571 const XMMRegister xmm_key_last = as_XMMRegister(XMM_REG_NUM_KEY_LAST); 3572 3573 __ enter(); // required for proper stackwalking of RuntimeStub frame 3574 3575 #ifdef _WIN64 3576 // on win64, fill len_reg from stack position 3577 __ movl(len_reg, len_mem); 3578 #else 3579 __ push(len_reg); // Save 3580 #endif 3581 __ push(rbx); 3582 // the java expanded key ordering is rotated one position from what we want 3583 // so we start from 0x10 here and hit 0x00 last 3584 const XMMRegister xmm_key_shuf_mask = xmm1; // used temporarily to swap key bytes up front 3585 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3586 // load up xmm regs 5 thru 15 with key 0x10 - 0xa0 - 0x00 3587 for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x10; rnum < XMM_REG_NUM_KEY_LAST; rnum++) { 3588 load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask); 3589 offset += 0x10; 3590 } 3591 load_key(xmm_key_last, key, 0x00, xmm_key_shuf_mask); 3592 3593 const XMMRegister xmm_prev_block_cipher = xmm1; // holds cipher of previous block 3594 3595 // registers holding the four results in the parallelized loop 3596 const XMMRegister xmm_result0 = xmm0; 3597 const XMMRegister xmm_result1 = xmm2; 3598 const XMMRegister xmm_result2 = xmm3; 3599 const XMMRegister xmm_result3 = xmm4; 3600 3601 __ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // initialize with initial rvec 3602 3603 __ xorptr(pos, pos); 3604 3605 // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256)) 3606 __ movl(rbx, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3607 __ cmpl(rbx, 52); 3608 __ jcc(Assembler::equal, L_multiBlock_loopTopHead[1]); 3609 __ cmpl(rbx, 60); 3610 __ jcc(Assembler::equal, L_multiBlock_loopTopHead[2]); 3611 3612 #define DoFour(opc, src_reg) \ 3613 __ opc(xmm_result0, src_reg); \ 3614 __ opc(xmm_result1, src_reg); \ 3615 __ opc(xmm_result2, src_reg); \ 3616 __ opc(xmm_result3, src_reg); \ 3617 3618 for (int k = 0; k < 3; ++k) { 3619 __ BIND(L_multiBlock_loopTopHead[k]); 3620 if (k != 0) { 3621 __ cmpptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // see if at least 4 blocks left 3622 __ jcc(Assembler::less, L_singleBlock_loopTopHead2[k]); 3623 } 3624 if (k == 1) { 3625 __ subptr(rsp, 6 * wordSize); 3626 __ movdqu(Address(rsp, 0), xmm15); //save last_key from xmm15 3627 load_key(xmm15, key, 0xb0); // 0xb0; 192-bit key goes up to 0xc0 3628 __ movdqu(Address(rsp, 2 * wordSize), xmm15); 3629 load_key(xmm1, key, 0xc0); // 0xc0; 3630 __ movdqu(Address(rsp, 4 * wordSize), xmm1); 3631 } else if (k == 2) { 3632 __ subptr(rsp, 10 * wordSize); 3633 __ movdqu(Address(rsp, 0), xmm15); //save last_key from xmm15 3634 load_key(xmm15, key, 0xd0); // 0xd0; 256-bit key goes upto 0xe0 3635 __ movdqu(Address(rsp, 6 * wordSize), xmm15); 3636 load_key(xmm1, key, 0xe0); // 0xe0; 3637 __ movdqu(Address(rsp, 8 * wordSize), xmm1); 3638 load_key(xmm15, key, 0xb0); // 0xb0; 3639 __ movdqu(Address(rsp, 2 * wordSize), xmm15); 3640 load_key(xmm1, key, 0xc0); // 0xc0; 3641 __ movdqu(Address(rsp, 4 * wordSize), xmm1); 3642 } 3643 __ align(OptoLoopAlignment); 3644 __ BIND(L_multiBlock_loopTop[k]); 3645 __ cmpptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // see if at least 4 blocks left 3646 __ jcc(Assembler::less, L_singleBlock_loopTopHead[k]); 3647 3648 if (k != 0) { 3649 __ movdqu(xmm15, Address(rsp, 2 * wordSize)); 3650 __ movdqu(xmm1, Address(rsp, 4 * wordSize)); 3651 } 3652 3653 __ movdqu(xmm_result0, Address(from, pos, Address::times_1, 0 * AESBlockSize)); // get next 4 blocks into xmmresult registers 3654 __ movdqu(xmm_result1, Address(from, pos, Address::times_1, 1 * AESBlockSize)); 3655 __ movdqu(xmm_result2, Address(from, pos, Address::times_1, 2 * AESBlockSize)); 3656 __ movdqu(xmm_result3, Address(from, pos, Address::times_1, 3 * AESBlockSize)); 3657 3658 DoFour(pxor, xmm_key_first); 3659 if (k == 0) { 3660 for (int rnum = 1; rnum < ROUNDS[k]; rnum++) { 3661 DoFour(aesdec, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3662 } 3663 DoFour(aesdeclast, xmm_key_last); 3664 } else if (k == 1) { 3665 for (int rnum = 1; rnum <= ROUNDS[k]-2; rnum++) { 3666 DoFour(aesdec, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3667 } 3668 __ movdqu(xmm_key_last, Address(rsp, 0)); // xmm15 needs to be loaded again. 3669 DoFour(aesdec, xmm1); // key : 0xc0 3670 __ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // xmm1 needs to be loaded again 3671 DoFour(aesdeclast, xmm_key_last); 3672 } else if (k == 2) { 3673 for (int rnum = 1; rnum <= ROUNDS[k] - 4; rnum++) { 3674 DoFour(aesdec, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3675 } 3676 DoFour(aesdec, xmm1); // key : 0xc0 3677 __ movdqu(xmm15, Address(rsp, 6 * wordSize)); 3678 __ movdqu(xmm1, Address(rsp, 8 * wordSize)); 3679 DoFour(aesdec, xmm15); // key : 0xd0 3680 __ movdqu(xmm_key_last, Address(rsp, 0)); // xmm15 needs to be loaded again. 3681 DoFour(aesdec, xmm1); // key : 0xe0 3682 __ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // xmm1 needs to be loaded again 3683 DoFour(aesdeclast, xmm_key_last); 3684 } 3685 3686 // for each result, xor with the r vector of previous cipher block 3687 __ pxor(xmm_result0, xmm_prev_block_cipher); 3688 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 0 * AESBlockSize)); 3689 __ pxor(xmm_result1, xmm_prev_block_cipher); 3690 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 1 * AESBlockSize)); 3691 __ pxor(xmm_result2, xmm_prev_block_cipher); 3692 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 2 * AESBlockSize)); 3693 __ pxor(xmm_result3, xmm_prev_block_cipher); 3694 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 3 * AESBlockSize)); // this will carry over to next set of blocks 3695 if (k != 0) { 3696 __ movdqu(Address(rvec, 0x00), xmm_prev_block_cipher); 3697 } 3698 3699 __ movdqu(Address(to, pos, Address::times_1, 0 * AESBlockSize), xmm_result0); // store 4 results into the next 64 bytes of output 3700 __ movdqu(Address(to, pos, Address::times_1, 1 * AESBlockSize), xmm_result1); 3701 __ movdqu(Address(to, pos, Address::times_1, 2 * AESBlockSize), xmm_result2); 3702 __ movdqu(Address(to, pos, Address::times_1, 3 * AESBlockSize), xmm_result3); 3703 3704 __ addptr(pos, PARALLEL_FACTOR * AESBlockSize); 3705 __ subptr(len_reg, PARALLEL_FACTOR * AESBlockSize); 3706 __ jmp(L_multiBlock_loopTop[k]); 3707 3708 // registers used in the non-parallelized loops 3709 // xmm register assignments for the loops below 3710 const XMMRegister xmm_result = xmm0; 3711 const XMMRegister xmm_prev_block_cipher_save = xmm2; 3712 const XMMRegister xmm_key11 = xmm3; 3713 const XMMRegister xmm_key12 = xmm4; 3714 const XMMRegister key_tmp = xmm4; 3715 3716 __ BIND(L_singleBlock_loopTopHead[k]); 3717 if (k == 1) { 3718 __ addptr(rsp, 6 * wordSize); 3719 } else if (k == 2) { 3720 __ addptr(rsp, 10 * wordSize); 3721 } 3722 __ cmpptr(len_reg, 0); // any blocks left?? 3723 __ jcc(Assembler::equal, L_exit); 3724 __ BIND(L_singleBlock_loopTopHead2[k]); 3725 if (k == 1) { 3726 load_key(xmm_key11, key, 0xb0); // 0xb0; 192-bit key goes upto 0xc0 3727 load_key(xmm_key12, key, 0xc0); // 0xc0; 192-bit key goes upto 0xc0 3728 } 3729 if (k == 2) { 3730 load_key(xmm_key11, key, 0xb0); // 0xb0; 256-bit key goes upto 0xe0 3731 } 3732 __ align(OptoLoopAlignment); 3733 __ BIND(L_singleBlock_loopTop[k]); 3734 __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of cipher input 3735 __ movdqa(xmm_prev_block_cipher_save, xmm_result); // save for next r vector 3736 __ pxor(xmm_result, xmm_key_first); // do the aes dec rounds 3737 for (int rnum = 1; rnum <= 9 ; rnum++) { 3738 __ aesdec(xmm_result, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3739 } 3740 if (k == 1) { 3741 __ aesdec(xmm_result, xmm_key11); 3742 __ aesdec(xmm_result, xmm_key12); 3743 } 3744 if (k == 2) { 3745 __ aesdec(xmm_result, xmm_key11); 3746 load_key(key_tmp, key, 0xc0); 3747 __ aesdec(xmm_result, key_tmp); 3748 load_key(key_tmp, key, 0xd0); 3749 __ aesdec(xmm_result, key_tmp); 3750 load_key(key_tmp, key, 0xe0); 3751 __ aesdec(xmm_result, key_tmp); 3752 } 3753 3754 __ aesdeclast(xmm_result, xmm_key_last); // xmm15 always came from key+0 3755 __ pxor(xmm_result, xmm_prev_block_cipher); // xor with the current r vector 3756 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3757 // no need to store r to memory until we exit 3758 __ movdqa(xmm_prev_block_cipher, xmm_prev_block_cipher_save); // set up next r vector with cipher input from this block 3759 __ addptr(pos, AESBlockSize); 3760 __ subptr(len_reg, AESBlockSize); 3761 __ jcc(Assembler::notEqual, L_singleBlock_loopTop[k]); 3762 if (k != 2) { 3763 __ jmp(L_exit); 3764 } 3765 } //for 128/192/256 3766 3767 __ BIND(L_exit); 3768 __ movdqu(Address(rvec, 0), xmm_prev_block_cipher); // final value of r stored in rvec of CipherBlockChaining object 3769 __ pop(rbx); 3770 #ifdef _WIN64 3771 __ movl(rax, len_mem); 3772 #else 3773 __ pop(rax); // return length 3774 #endif 3775 __ leave(); // required for proper stackwalking of RuntimeStub frame 3776 __ ret(0); 3777 return start; 3778 } 3779 3780 address generate_electronicCodeBook_encryptAESCrypt() { 3781 __ align(CodeEntryAlignment); 3782 StubCodeMark mark(this, "StubRoutines", "electronicCodeBook_encryptAESCrypt"); 3783 address start = __ pc(); 3784 const Register from = c_rarg0; // source array address 3785 const Register to = c_rarg1; // destination array address 3786 const Register key = c_rarg2; // key array address 3787 const Register len = c_rarg3; // src len (must be multiple of blocksize 16) 3788 __ enter(); // required for proper stackwalking of RuntimeStub frame 3789 __ aesecb_encrypt(from, to, key, len); 3790 __ leave(); // required for proper stackwalking of RuntimeStub frame 3791 __ ret(0); 3792 return start; 3793 } 3794 3795 address generate_electronicCodeBook_decryptAESCrypt() { 3796 __ align(CodeEntryAlignment); 3797 StubCodeMark mark(this, "StubRoutines", "electronicCodeBook_decryptAESCrypt"); 3798 address start = __ pc(); 3799 const Register from = c_rarg0; // source array address 3800 const Register to = c_rarg1; // destination array address 3801 const Register key = c_rarg2; // key array address 3802 const Register len = c_rarg3; // src len (must be multiple of blocksize 16) 3803 __ enter(); // required for proper stackwalking of RuntimeStub frame 3804 __ aesecb_decrypt(from, to, key, len); 3805 __ leave(); // required for proper stackwalking of RuntimeStub frame 3806 __ ret(0); 3807 return start; 3808 } 3809 3810 address generate_upper_word_mask() { 3811 __ align(64); 3812 StubCodeMark mark(this, "StubRoutines", "upper_word_mask"); 3813 address start = __ pc(); 3814 __ emit_data64(0x0000000000000000, relocInfo::none); 3815 __ emit_data64(0xFFFFFFFF00000000, relocInfo::none); 3816 return start; 3817 } 3818 3819 address generate_shuffle_byte_flip_mask() { 3820 __ align(64); 3821 StubCodeMark mark(this, "StubRoutines", "shuffle_byte_flip_mask"); 3822 address start = __ pc(); 3823 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none); 3824 __ emit_data64(0x0001020304050607, relocInfo::none); 3825 return start; 3826 } 3827 3828 // ofs and limit are use for multi-block byte array. 3829 // int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit) 3830 address generate_sha1_implCompress(bool multi_block, const char *name) { 3831 __ align(CodeEntryAlignment); 3832 StubCodeMark mark(this, "StubRoutines", name); 3833 address start = __ pc(); 3834 3835 Register buf = c_rarg0; 3836 Register state = c_rarg1; 3837 Register ofs = c_rarg2; 3838 Register limit = c_rarg3; 3839 3840 const XMMRegister abcd = xmm0; 3841 const XMMRegister e0 = xmm1; 3842 const XMMRegister e1 = xmm2; 3843 const XMMRegister msg0 = xmm3; 3844 3845 const XMMRegister msg1 = xmm4; 3846 const XMMRegister msg2 = xmm5; 3847 const XMMRegister msg3 = xmm6; 3848 const XMMRegister shuf_mask = xmm7; 3849 3850 __ enter(); 3851 3852 __ subptr(rsp, 4 * wordSize); 3853 3854 __ fast_sha1(abcd, e0, e1, msg0, msg1, msg2, msg3, shuf_mask, 3855 buf, state, ofs, limit, rsp, multi_block); 3856 3857 __ addptr(rsp, 4 * wordSize); 3858 3859 __ leave(); 3860 __ ret(0); 3861 return start; 3862 } 3863 3864 address generate_pshuffle_byte_flip_mask() { 3865 __ align(64); 3866 StubCodeMark mark(this, "StubRoutines", "pshuffle_byte_flip_mask"); 3867 address start = __ pc(); 3868 __ emit_data64(0x0405060700010203, relocInfo::none); 3869 __ emit_data64(0x0c0d0e0f08090a0b, relocInfo::none); 3870 3871 if (VM_Version::supports_avx2()) { 3872 __ emit_data64(0x0405060700010203, relocInfo::none); // second copy 3873 __ emit_data64(0x0c0d0e0f08090a0b, relocInfo::none); 3874 // _SHUF_00BA 3875 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3876 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3877 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3878 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3879 // _SHUF_DC00 3880 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3881 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3882 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3883 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3884 } 3885 3886 return start; 3887 } 3888 3889 //Mask for byte-swapping a couple of qwords in an XMM register using (v)pshufb. 3890 address generate_pshuffle_byte_flip_mask_sha512() { 3891 __ align(32); 3892 StubCodeMark mark(this, "StubRoutines", "pshuffle_byte_flip_mask_sha512"); 3893 address start = __ pc(); 3894 if (VM_Version::supports_avx2()) { 3895 __ emit_data64(0x0001020304050607, relocInfo::none); // PSHUFFLE_BYTE_FLIP_MASK 3896 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none); 3897 __ emit_data64(0x1011121314151617, relocInfo::none); 3898 __ emit_data64(0x18191a1b1c1d1e1f, relocInfo::none); 3899 __ emit_data64(0x0000000000000000, relocInfo::none); //MASK_YMM_LO 3900 __ emit_data64(0x0000000000000000, relocInfo::none); 3901 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3902 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3903 } 3904 3905 return start; 3906 } 3907 3908 // ofs and limit are use for multi-block byte array. 3909 // int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit) 3910 address generate_sha256_implCompress(bool multi_block, const char *name) { 3911 assert(VM_Version::supports_sha() || VM_Version::supports_avx2(), ""); 3912 __ align(CodeEntryAlignment); 3913 StubCodeMark mark(this, "StubRoutines", name); 3914 address start = __ pc(); 3915 3916 Register buf = c_rarg0; 3917 Register state = c_rarg1; 3918 Register ofs = c_rarg2; 3919 Register limit = c_rarg3; 3920 3921 const XMMRegister msg = xmm0; 3922 const XMMRegister state0 = xmm1; 3923 const XMMRegister state1 = xmm2; 3924 const XMMRegister msgtmp0 = xmm3; 3925 3926 const XMMRegister msgtmp1 = xmm4; 3927 const XMMRegister msgtmp2 = xmm5; 3928 const XMMRegister msgtmp3 = xmm6; 3929 const XMMRegister msgtmp4 = xmm7; 3930 3931 const XMMRegister shuf_mask = xmm8; 3932 3933 __ enter(); 3934 3935 __ subptr(rsp, 4 * wordSize); 3936 3937 if (VM_Version::supports_sha()) { 3938 __ fast_sha256(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4, 3939 buf, state, ofs, limit, rsp, multi_block, shuf_mask); 3940 } else if (VM_Version::supports_avx2()) { 3941 __ sha256_AVX2(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4, 3942 buf, state, ofs, limit, rsp, multi_block, shuf_mask); 3943 } 3944 __ addptr(rsp, 4 * wordSize); 3945 __ vzeroupper(); 3946 __ leave(); 3947 __ ret(0); 3948 return start; 3949 } 3950 3951 address generate_sha512_implCompress(bool multi_block, const char *name) { 3952 assert(VM_Version::supports_avx2(), ""); 3953 assert(VM_Version::supports_bmi2(), ""); 3954 __ align(CodeEntryAlignment); 3955 StubCodeMark mark(this, "StubRoutines", name); 3956 address start = __ pc(); 3957 3958 Register buf = c_rarg0; 3959 Register state = c_rarg1; 3960 Register ofs = c_rarg2; 3961 Register limit = c_rarg3; 3962 3963 const XMMRegister msg = xmm0; 3964 const XMMRegister state0 = xmm1; 3965 const XMMRegister state1 = xmm2; 3966 const XMMRegister msgtmp0 = xmm3; 3967 const XMMRegister msgtmp1 = xmm4; 3968 const XMMRegister msgtmp2 = xmm5; 3969 const XMMRegister msgtmp3 = xmm6; 3970 const XMMRegister msgtmp4 = xmm7; 3971 3972 const XMMRegister shuf_mask = xmm8; 3973 3974 __ enter(); 3975 3976 __ sha512_AVX2(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4, 3977 buf, state, ofs, limit, rsp, multi_block, shuf_mask); 3978 3979 __ vzeroupper(); 3980 __ leave(); 3981 __ ret(0); 3982 return start; 3983 } 3984 3985 // This mask is used for incrementing counter value(linc0, linc4, etc.) 3986 address counter_mask_addr() { 3987 __ align(64); 3988 StubCodeMark mark(this, "StubRoutines", "counter_mask_addr"); 3989 address start = __ pc(); 3990 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none);//lbswapmask 3991 __ emit_data64(0x0001020304050607, relocInfo::none); 3992 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none); 3993 __ emit_data64(0x0001020304050607, relocInfo::none); 3994 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none); 3995 __ emit_data64(0x0001020304050607, relocInfo::none); 3996 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none); 3997 __ emit_data64(0x0001020304050607, relocInfo::none); 3998 __ emit_data64(0x0000000000000000, relocInfo::none);//linc0 = counter_mask_addr+64 3999 __ emit_data64(0x0000000000000000, relocInfo::none); 4000 __ emit_data64(0x0000000000000001, relocInfo::none);//counter_mask_addr() + 80 4001 __ emit_data64(0x0000000000000000, relocInfo::none); 4002 __ emit_data64(0x0000000000000002, relocInfo::none); 4003 __ emit_data64(0x0000000000000000, relocInfo::none); 4004 __ emit_data64(0x0000000000000003, relocInfo::none); 4005 __ emit_data64(0x0000000000000000, relocInfo::none); 4006 __ emit_data64(0x0000000000000004, relocInfo::none);//linc4 = counter_mask_addr() + 128 4007 __ emit_data64(0x0000000000000000, relocInfo::none); 4008 __ emit_data64(0x0000000000000004, relocInfo::none); 4009 __ emit_data64(0x0000000000000000, relocInfo::none); 4010 __ emit_data64(0x0000000000000004, relocInfo::none); 4011 __ emit_data64(0x0000000000000000, relocInfo::none); 4012 __ emit_data64(0x0000000000000004, relocInfo::none); 4013 __ emit_data64(0x0000000000000000, relocInfo::none); 4014 __ emit_data64(0x0000000000000008, relocInfo::none);//linc8 = counter_mask_addr() + 192 4015 __ emit_data64(0x0000000000000000, relocInfo::none); 4016 __ emit_data64(0x0000000000000008, relocInfo::none); 4017 __ emit_data64(0x0000000000000000, relocInfo::none); 4018 __ emit_data64(0x0000000000000008, relocInfo::none); 4019 __ emit_data64(0x0000000000000000, relocInfo::none); 4020 __ emit_data64(0x0000000000000008, relocInfo::none); 4021 __ emit_data64(0x0000000000000000, relocInfo::none); 4022 __ emit_data64(0x0000000000000020, relocInfo::none);//linc32 = counter_mask_addr() + 256 4023 __ emit_data64(0x0000000000000000, relocInfo::none); 4024 __ emit_data64(0x0000000000000020, relocInfo::none); 4025 __ emit_data64(0x0000000000000000, relocInfo::none); 4026 __ emit_data64(0x0000000000000020, relocInfo::none); 4027 __ emit_data64(0x0000000000000000, relocInfo::none); 4028 __ emit_data64(0x0000000000000020, relocInfo::none); 4029 __ emit_data64(0x0000000000000000, relocInfo::none); 4030 __ emit_data64(0x0000000000000010, relocInfo::none);//linc16 = counter_mask_addr() + 320 4031 __ emit_data64(0x0000000000000000, relocInfo::none); 4032 __ emit_data64(0x0000000000000010, relocInfo::none); 4033 __ emit_data64(0x0000000000000000, relocInfo::none); 4034 __ emit_data64(0x0000000000000010, relocInfo::none); 4035 __ emit_data64(0x0000000000000000, relocInfo::none); 4036 __ emit_data64(0x0000000000000010, relocInfo::none); 4037 __ emit_data64(0x0000000000000000, relocInfo::none); 4038 return start; 4039 } 4040 4041 // Vector AES Counter implementation 4042 address generate_counterMode_VectorAESCrypt() { 4043 __ align(CodeEntryAlignment); 4044 StubCodeMark mark(this, "StubRoutines", "counterMode_AESCrypt"); 4045 address start = __ pc(); 4046 const Register from = c_rarg0; // source array address 4047 const Register to = c_rarg1; // destination array address 4048 const Register key = c_rarg2; // key array address r8 4049 const Register counter = c_rarg3; // counter byte array initialized from counter array address 4050 // and updated with the incremented counter in the end 4051 #ifndef _WIN64 4052 const Register len_reg = c_rarg4; 4053 const Register saved_encCounter_start = c_rarg5; 4054 const Register used_addr = r10; 4055 const Address used_mem(rbp, 2 * wordSize); 4056 const Register used = r11; 4057 #else 4058 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 4059 const Address saved_encCounter_mem(rbp, 7 * wordSize); // saved encrypted counter is on stack on Win64 4060 const Address used_mem(rbp, 8 * wordSize); // used length is on stack on Win64 4061 const Register len_reg = r10; // pick the first volatile windows register 4062 const Register saved_encCounter_start = r11; 4063 const Register used_addr = r13; 4064 const Register used = r14; 4065 #endif 4066 __ enter(); 4067 // Save state before entering routine 4068 __ push(r12); 4069 __ push(r13); 4070 __ push(r14); 4071 __ push(r15); 4072 #ifdef _WIN64 4073 // on win64, fill len_reg from stack position 4074 __ movl(len_reg, len_mem); 4075 __ movptr(saved_encCounter_start, saved_encCounter_mem); 4076 __ movptr(used_addr, used_mem); 4077 __ movl(used, Address(used_addr, 0)); 4078 #else 4079 __ push(len_reg); // Save 4080 __ movptr(used_addr, used_mem); 4081 __ movl(used, Address(used_addr, 0)); 4082 #endif 4083 __ push(rbx); 4084 __ aesctr_encrypt(from, to, key, counter, len_reg, used, used_addr, saved_encCounter_start); 4085 // Restore state before leaving routine 4086 __ pop(rbx); 4087 #ifdef _WIN64 4088 __ movl(rax, len_mem); // return length 4089 #else 4090 __ pop(rax); // return length 4091 #endif 4092 __ pop(r15); 4093 __ pop(r14); 4094 __ pop(r13); 4095 __ pop(r12); 4096 4097 __ leave(); // required for proper stackwalking of RuntimeStub frame 4098 __ ret(0); 4099 return start; 4100 } 4101 4102 // This is a version of CTR/AES crypt which does 6 blocks in a loop at a time 4103 // to hide instruction latency 4104 // 4105 // Arguments: 4106 // 4107 // Inputs: 4108 // c_rarg0 - source byte array address 4109 // c_rarg1 - destination byte array address 4110 // c_rarg2 - K (key) in little endian int array 4111 // c_rarg3 - counter vector byte array address 4112 // Linux 4113 // c_rarg4 - input length 4114 // c_rarg5 - saved encryptedCounter start 4115 // rbp + 6 * wordSize - saved used length 4116 // Windows 4117 // rbp + 6 * wordSize - input length 4118 // rbp + 7 * wordSize - saved encryptedCounter start 4119 // rbp + 8 * wordSize - saved used length 4120 // 4121 // Output: 4122 // rax - input length 4123 // 4124 address generate_counterMode_AESCrypt_Parallel() { 4125 assert(UseAES, "need AES instructions and misaligned SSE support"); 4126 __ align(CodeEntryAlignment); 4127 StubCodeMark mark(this, "StubRoutines", "counterMode_AESCrypt"); 4128 address start = __ pc(); 4129 const Register from = c_rarg0; // source array address 4130 const Register to = c_rarg1; // destination array address 4131 const Register key = c_rarg2; // key array address 4132 const Register counter = c_rarg3; // counter byte array initialized from counter array address 4133 // and updated with the incremented counter in the end 4134 #ifndef _WIN64 4135 const Register len_reg = c_rarg4; 4136 const Register saved_encCounter_start = c_rarg5; 4137 const Register used_addr = r10; 4138 const Address used_mem(rbp, 2 * wordSize); 4139 const Register used = r11; 4140 #else 4141 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 4142 const Address saved_encCounter_mem(rbp, 7 * wordSize); // length is on stack on Win64 4143 const Address used_mem(rbp, 8 * wordSize); // length is on stack on Win64 4144 const Register len_reg = r10; // pick the first volatile windows register 4145 const Register saved_encCounter_start = r11; 4146 const Register used_addr = r13; 4147 const Register used = r14; 4148 #endif 4149 const Register pos = rax; 4150 4151 const int PARALLEL_FACTOR = 6; 4152 const XMMRegister xmm_counter_shuf_mask = xmm0; 4153 const XMMRegister xmm_key_shuf_mask = xmm1; // used temporarily to swap key bytes up front 4154 const XMMRegister xmm_curr_counter = xmm2; 4155 4156 const XMMRegister xmm_key_tmp0 = xmm3; 4157 const XMMRegister xmm_key_tmp1 = xmm4; 4158 4159 // registers holding the four results in the parallelized loop 4160 const XMMRegister xmm_result0 = xmm5; 4161 const XMMRegister xmm_result1 = xmm6; 4162 const XMMRegister xmm_result2 = xmm7; 4163 const XMMRegister xmm_result3 = xmm8; 4164 const XMMRegister xmm_result4 = xmm9; 4165 const XMMRegister xmm_result5 = xmm10; 4166 4167 const XMMRegister xmm_from0 = xmm11; 4168 const XMMRegister xmm_from1 = xmm12; 4169 const XMMRegister xmm_from2 = xmm13; 4170 const XMMRegister xmm_from3 = xmm14; //the last one is xmm14. we have to preserve it on WIN64. 4171 const XMMRegister xmm_from4 = xmm3; //reuse xmm3~4. Because xmm_key_tmp0~1 are useless when loading input text 4172 const XMMRegister xmm_from5 = xmm4; 4173 4174 //for key_128, key_192, key_256 4175 const int rounds[3] = {10, 12, 14}; 4176 Label L_exit_preLoop, L_preLoop_start; 4177 Label L_multiBlock_loopTop[3]; 4178 Label L_singleBlockLoopTop[3]; 4179 Label L__incCounter[3][6]; //for 6 blocks 4180 Label L__incCounter_single[3]; //for single block, key128, key192, key256 4181 Label L_processTail_insr[3], L_processTail_4_insr[3], L_processTail_2_insr[3], L_processTail_1_insr[3], L_processTail_exit_insr[3]; 4182 Label L_processTail_4_extr[3], L_processTail_2_extr[3], L_processTail_1_extr[3], L_processTail_exit_extr[3]; 4183 4184 Label L_exit; 4185 4186 __ enter(); // required for proper stackwalking of RuntimeStub frame 4187 4188 #ifdef _WIN64 4189 // allocate spill slots for r13, r14 4190 enum { 4191 saved_r13_offset, 4192 saved_r14_offset 4193 }; 4194 __ subptr(rsp, 2 * wordSize); 4195 __ movptr(Address(rsp, saved_r13_offset * wordSize), r13); 4196 __ movptr(Address(rsp, saved_r14_offset * wordSize), r14); 4197 4198 // on win64, fill len_reg from stack position 4199 __ movl(len_reg, len_mem); 4200 __ movptr(saved_encCounter_start, saved_encCounter_mem); 4201 __ movptr(used_addr, used_mem); 4202 __ movl(used, Address(used_addr, 0)); 4203 #else 4204 __ push(len_reg); // Save 4205 __ movptr(used_addr, used_mem); 4206 __ movl(used, Address(used_addr, 0)); 4207 #endif 4208 4209 __ push(rbx); // Save RBX 4210 __ movdqu(xmm_curr_counter, Address(counter, 0x00)); // initialize counter with initial counter 4211 __ movdqu(xmm_counter_shuf_mask, ExternalAddress(StubRoutines::x86::counter_shuffle_mask_addr()), pos); // pos as scratch 4212 __ pshufb(xmm_curr_counter, xmm_counter_shuf_mask); //counter is shuffled 4213 __ movptr(pos, 0); 4214 4215 // Use the partially used encrpyted counter from last invocation 4216 __ BIND(L_preLoop_start); 4217 __ cmpptr(used, 16); 4218 __ jcc(Assembler::aboveEqual, L_exit_preLoop); 4219 __ cmpptr(len_reg, 0); 4220 __ jcc(Assembler::lessEqual, L_exit_preLoop); 4221 __ movb(rbx, Address(saved_encCounter_start, used)); 4222 __ xorb(rbx, Address(from, pos)); 4223 __ movb(Address(to, pos), rbx); 4224 __ addptr(pos, 1); 4225 __ addptr(used, 1); 4226 __ subptr(len_reg, 1); 4227 4228 __ jmp(L_preLoop_start); 4229 4230 __ BIND(L_exit_preLoop); 4231 __ movl(Address(used_addr, 0), used); 4232 4233 // key length could be only {11, 13, 15} * 4 = {44, 52, 60} 4234 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()), rbx); // rbx as scratch 4235 __ movl(rbx, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 4236 __ cmpl(rbx, 52); 4237 __ jcc(Assembler::equal, L_multiBlock_loopTop[1]); 4238 __ cmpl(rbx, 60); 4239 __ jcc(Assembler::equal, L_multiBlock_loopTop[2]); 4240 4241 #define CTR_DoSix(opc, src_reg) \ 4242 __ opc(xmm_result0, src_reg); \ 4243 __ opc(xmm_result1, src_reg); \ 4244 __ opc(xmm_result2, src_reg); \ 4245 __ opc(xmm_result3, src_reg); \ 4246 __ opc(xmm_result4, src_reg); \ 4247 __ opc(xmm_result5, src_reg); 4248 4249 // k == 0 : generate code for key_128 4250 // k == 1 : generate code for key_192 4251 // k == 2 : generate code for key_256 4252 for (int k = 0; k < 3; ++k) { 4253 //multi blocks starts here 4254 __ align(OptoLoopAlignment); 4255 __ BIND(L_multiBlock_loopTop[k]); 4256 __ cmpptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // see if at least PARALLEL_FACTOR blocks left 4257 __ jcc(Assembler::less, L_singleBlockLoopTop[k]); 4258 load_key(xmm_key_tmp0, key, 0x00, xmm_key_shuf_mask); 4259 4260 //load, then increase counters 4261 CTR_DoSix(movdqa, xmm_curr_counter); 4262 inc_counter(rbx, xmm_result1, 0x01, L__incCounter[k][0]); 4263 inc_counter(rbx, xmm_result2, 0x02, L__incCounter[k][1]); 4264 inc_counter(rbx, xmm_result3, 0x03, L__incCounter[k][2]); 4265 inc_counter(rbx, xmm_result4, 0x04, L__incCounter[k][3]); 4266 inc_counter(rbx, xmm_result5, 0x05, L__incCounter[k][4]); 4267 inc_counter(rbx, xmm_curr_counter, 0x06, L__incCounter[k][5]); 4268 CTR_DoSix(pshufb, xmm_counter_shuf_mask); // after increased, shuffled counters back for PXOR 4269 CTR_DoSix(pxor, xmm_key_tmp0); //PXOR with Round 0 key 4270 4271 //load two ROUND_KEYs at a time 4272 for (int i = 1; i < rounds[k]; ) { 4273 load_key(xmm_key_tmp1, key, (0x10 * i), xmm_key_shuf_mask); 4274 load_key(xmm_key_tmp0, key, (0x10 * (i+1)), xmm_key_shuf_mask); 4275 CTR_DoSix(aesenc, xmm_key_tmp1); 4276 i++; 4277 if (i != rounds[k]) { 4278 CTR_DoSix(aesenc, xmm_key_tmp0); 4279 } else { 4280 CTR_DoSix(aesenclast, xmm_key_tmp0); 4281 } 4282 i++; 4283 } 4284 4285 // get next PARALLEL_FACTOR blocks into xmm_result registers 4286 __ movdqu(xmm_from0, Address(from, pos, Address::times_1, 0 * AESBlockSize)); 4287 __ movdqu(xmm_from1, Address(from, pos, Address::times_1, 1 * AESBlockSize)); 4288 __ movdqu(xmm_from2, Address(from, pos, Address::times_1, 2 * AESBlockSize)); 4289 __ movdqu(xmm_from3, Address(from, pos, Address::times_1, 3 * AESBlockSize)); 4290 __ movdqu(xmm_from4, Address(from, pos, Address::times_1, 4 * AESBlockSize)); 4291 __ movdqu(xmm_from5, Address(from, pos, Address::times_1, 5 * AESBlockSize)); 4292 4293 __ pxor(xmm_result0, xmm_from0); 4294 __ pxor(xmm_result1, xmm_from1); 4295 __ pxor(xmm_result2, xmm_from2); 4296 __ pxor(xmm_result3, xmm_from3); 4297 __ pxor(xmm_result4, xmm_from4); 4298 __ pxor(xmm_result5, xmm_from5); 4299 4300 // store 6 results into the next 64 bytes of output 4301 __ movdqu(Address(to, pos, Address::times_1, 0 * AESBlockSize), xmm_result0); 4302 __ movdqu(Address(to, pos, Address::times_1, 1 * AESBlockSize), xmm_result1); 4303 __ movdqu(Address(to, pos, Address::times_1, 2 * AESBlockSize), xmm_result2); 4304 __ movdqu(Address(to, pos, Address::times_1, 3 * AESBlockSize), xmm_result3); 4305 __ movdqu(Address(to, pos, Address::times_1, 4 * AESBlockSize), xmm_result4); 4306 __ movdqu(Address(to, pos, Address::times_1, 5 * AESBlockSize), xmm_result5); 4307 4308 __ addptr(pos, PARALLEL_FACTOR * AESBlockSize); // increase the length of crypt text 4309 __ subptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // decrease the remaining length 4310 __ jmp(L_multiBlock_loopTop[k]); 4311 4312 // singleBlock starts here 4313 __ align(OptoLoopAlignment); 4314 __ BIND(L_singleBlockLoopTop[k]); 4315 __ cmpptr(len_reg, 0); 4316 __ jcc(Assembler::lessEqual, L_exit); 4317 load_key(xmm_key_tmp0, key, 0x00, xmm_key_shuf_mask); 4318 __ movdqa(xmm_result0, xmm_curr_counter); 4319 inc_counter(rbx, xmm_curr_counter, 0x01, L__incCounter_single[k]); 4320 __ pshufb(xmm_result0, xmm_counter_shuf_mask); 4321 __ pxor(xmm_result0, xmm_key_tmp0); 4322 for (int i = 1; i < rounds[k]; i++) { 4323 load_key(xmm_key_tmp0, key, (0x10 * i), xmm_key_shuf_mask); 4324 __ aesenc(xmm_result0, xmm_key_tmp0); 4325 } 4326 load_key(xmm_key_tmp0, key, (rounds[k] * 0x10), xmm_key_shuf_mask); 4327 __ aesenclast(xmm_result0, xmm_key_tmp0); 4328 __ cmpptr(len_reg, AESBlockSize); 4329 __ jcc(Assembler::less, L_processTail_insr[k]); 4330 __ movdqu(xmm_from0, Address(from, pos, Address::times_1, 0 * AESBlockSize)); 4331 __ pxor(xmm_result0, xmm_from0); 4332 __ movdqu(Address(to, pos, Address::times_1, 0 * AESBlockSize), xmm_result0); 4333 __ addptr(pos, AESBlockSize); 4334 __ subptr(len_reg, AESBlockSize); 4335 __ jmp(L_singleBlockLoopTop[k]); 4336 __ BIND(L_processTail_insr[k]); // Process the tail part of the input array 4337 __ addptr(pos, len_reg); // 1. Insert bytes from src array into xmm_from0 register 4338 __ testptr(len_reg, 8); 4339 __ jcc(Assembler::zero, L_processTail_4_insr[k]); 4340 __ subptr(pos,8); 4341 __ pinsrq(xmm_from0, Address(from, pos), 0); 4342 __ BIND(L_processTail_4_insr[k]); 4343 __ testptr(len_reg, 4); 4344 __ jcc(Assembler::zero, L_processTail_2_insr[k]); 4345 __ subptr(pos,4); 4346 __ pslldq(xmm_from0, 4); 4347 __ pinsrd(xmm_from0, Address(from, pos), 0); 4348 __ BIND(L_processTail_2_insr[k]); 4349 __ testptr(len_reg, 2); 4350 __ jcc(Assembler::zero, L_processTail_1_insr[k]); 4351 __ subptr(pos, 2); 4352 __ pslldq(xmm_from0, 2); 4353 __ pinsrw(xmm_from0, Address(from, pos), 0); 4354 __ BIND(L_processTail_1_insr[k]); 4355 __ testptr(len_reg, 1); 4356 __ jcc(Assembler::zero, L_processTail_exit_insr[k]); 4357 __ subptr(pos, 1); 4358 __ pslldq(xmm_from0, 1); 4359 __ pinsrb(xmm_from0, Address(from, pos), 0); 4360 __ BIND(L_processTail_exit_insr[k]); 4361 4362 __ movdqu(Address(saved_encCounter_start, 0), xmm_result0); // 2. Perform pxor of the encrypted counter and plaintext Bytes. 4363 __ pxor(xmm_result0, xmm_from0); // Also the encrypted counter is saved for next invocation. 4364 4365 __ testptr(len_reg, 8); 4366 __ jcc(Assembler::zero, L_processTail_4_extr[k]); // 3. Extract bytes from xmm_result0 into the dest. array 4367 __ pextrq(Address(to, pos), xmm_result0, 0); 4368 __ psrldq(xmm_result0, 8); 4369 __ addptr(pos, 8); 4370 __ BIND(L_processTail_4_extr[k]); 4371 __ testptr(len_reg, 4); 4372 __ jcc(Assembler::zero, L_processTail_2_extr[k]); 4373 __ pextrd(Address(to, pos), xmm_result0, 0); 4374 __ psrldq(xmm_result0, 4); 4375 __ addptr(pos, 4); 4376 __ BIND(L_processTail_2_extr[k]); 4377 __ testptr(len_reg, 2); 4378 __ jcc(Assembler::zero, L_processTail_1_extr[k]); 4379 __ pextrw(Address(to, pos), xmm_result0, 0); 4380 __ psrldq(xmm_result0, 2); 4381 __ addptr(pos, 2); 4382 __ BIND(L_processTail_1_extr[k]); 4383 __ testptr(len_reg, 1); 4384 __ jcc(Assembler::zero, L_processTail_exit_extr[k]); 4385 __ pextrb(Address(to, pos), xmm_result0, 0); 4386 4387 __ BIND(L_processTail_exit_extr[k]); 4388 __ movl(Address(used_addr, 0), len_reg); 4389 __ jmp(L_exit); 4390 4391 } 4392 4393 __ BIND(L_exit); 4394 __ pshufb(xmm_curr_counter, xmm_counter_shuf_mask); //counter is shuffled back. 4395 __ movdqu(Address(counter, 0), xmm_curr_counter); //save counter back 4396 __ pop(rbx); // pop the saved RBX. 4397 #ifdef _WIN64 4398 __ movl(rax, len_mem); 4399 __ movptr(r13, Address(rsp, saved_r13_offset * wordSize)); 4400 __ movptr(r14, Address(rsp, saved_r14_offset * wordSize)); 4401 __ addptr(rsp, 2 * wordSize); 4402 #else 4403 __ pop(rax); // return 'len' 4404 #endif 4405 __ leave(); // required for proper stackwalking of RuntimeStub frame 4406 __ ret(0); 4407 return start; 4408 } 4409 4410 void roundDec(XMMRegister xmm_reg) { 4411 __ vaesdec(xmm1, xmm1, xmm_reg, Assembler::AVX_512bit); 4412 __ vaesdec(xmm2, xmm2, xmm_reg, Assembler::AVX_512bit); 4413 __ vaesdec(xmm3, xmm3, xmm_reg, Assembler::AVX_512bit); 4414 __ vaesdec(xmm4, xmm4, xmm_reg, Assembler::AVX_512bit); 4415 __ vaesdec(xmm5, xmm5, xmm_reg, Assembler::AVX_512bit); 4416 __ vaesdec(xmm6, xmm6, xmm_reg, Assembler::AVX_512bit); 4417 __ vaesdec(xmm7, xmm7, xmm_reg, Assembler::AVX_512bit); 4418 __ vaesdec(xmm8, xmm8, xmm_reg, Assembler::AVX_512bit); 4419 } 4420 4421 void roundDeclast(XMMRegister xmm_reg) { 4422 __ vaesdeclast(xmm1, xmm1, xmm_reg, Assembler::AVX_512bit); 4423 __ vaesdeclast(xmm2, xmm2, xmm_reg, Assembler::AVX_512bit); 4424 __ vaesdeclast(xmm3, xmm3, xmm_reg, Assembler::AVX_512bit); 4425 __ vaesdeclast(xmm4, xmm4, xmm_reg, Assembler::AVX_512bit); 4426 __ vaesdeclast(xmm5, xmm5, xmm_reg, Assembler::AVX_512bit); 4427 __ vaesdeclast(xmm6, xmm6, xmm_reg, Assembler::AVX_512bit); 4428 __ vaesdeclast(xmm7, xmm7, xmm_reg, Assembler::AVX_512bit); 4429 __ vaesdeclast(xmm8, xmm8, xmm_reg, Assembler::AVX_512bit); 4430 } 4431 4432 void ev_load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask = NULL) { 4433 __ movdqu(xmmdst, Address(key, offset)); 4434 if (xmm_shuf_mask != NULL) { 4435 __ pshufb(xmmdst, xmm_shuf_mask); 4436 } else { 4437 __ pshufb(xmmdst, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 4438 } 4439 __ evshufi64x2(xmmdst, xmmdst, xmmdst, 0x0, Assembler::AVX_512bit); 4440 4441 } 4442 4443 address generate_cipherBlockChaining_decryptVectorAESCrypt() { 4444 assert(VM_Version::supports_vaes(), "need AES instructions and misaligned SSE support"); 4445 __ align(CodeEntryAlignment); 4446 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt"); 4447 address start = __ pc(); 4448 4449 const Register from = c_rarg0; // source array address 4450 const Register to = c_rarg1; // destination array address 4451 const Register key = c_rarg2; // key array address 4452 const Register rvec = c_rarg3; // r byte array initialized from initvector array address 4453 // and left with the results of the last encryption block 4454 #ifndef _WIN64 4455 const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16) 4456 #else 4457 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 4458 const Register len_reg = r11; // pick the volatile windows register 4459 #endif 4460 4461 Label Loop, Loop1, L_128, L_256, L_192, KEY_192, KEY_256, Loop2, Lcbc_dec_rem_loop, 4462 Lcbc_dec_rem_last, Lcbc_dec_ret, Lcbc_dec_rem, Lcbc_exit; 4463 4464 __ enter(); 4465 4466 #ifdef _WIN64 4467 // on win64, fill len_reg from stack position 4468 __ movl(len_reg, len_mem); 4469 #else 4470 __ push(len_reg); // Save 4471 #endif 4472 __ push(rbx); 4473 __ vzeroupper(); 4474 4475 // Temporary variable declaration for swapping key bytes 4476 const XMMRegister xmm_key_shuf_mask = xmm1; 4477 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 4478 4479 // Calculate number of rounds from key size: 44 for 10-rounds, 52 for 12-rounds, 60 for 14-rounds 4480 const Register rounds = rbx; 4481 __ movl(rounds, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 4482 4483 const XMMRegister IV = xmm0; 4484 // Load IV and broadcast value to 512-bits 4485 __ evbroadcasti64x2(IV, Address(rvec, 0), Assembler::AVX_512bit); 4486 4487 // Temporary variables for storing round keys 4488 const XMMRegister RK0 = xmm30; 4489 const XMMRegister RK1 = xmm9; 4490 const XMMRegister RK2 = xmm18; 4491 const XMMRegister RK3 = xmm19; 4492 const XMMRegister RK4 = xmm20; 4493 const XMMRegister RK5 = xmm21; 4494 const XMMRegister RK6 = xmm22; 4495 const XMMRegister RK7 = xmm23; 4496 const XMMRegister RK8 = xmm24; 4497 const XMMRegister RK9 = xmm25; 4498 const XMMRegister RK10 = xmm26; 4499 4500 // Load and shuffle key 4501 // the java expanded key ordering is rotated one position from what we want 4502 // so we start from 1*16 here and hit 0*16 last 4503 ev_load_key(RK1, key, 1 * 16, xmm_key_shuf_mask); 4504 ev_load_key(RK2, key, 2 * 16, xmm_key_shuf_mask); 4505 ev_load_key(RK3, key, 3 * 16, xmm_key_shuf_mask); 4506 ev_load_key(RK4, key, 4 * 16, xmm_key_shuf_mask); 4507 ev_load_key(RK5, key, 5 * 16, xmm_key_shuf_mask); 4508 ev_load_key(RK6, key, 6 * 16, xmm_key_shuf_mask); 4509 ev_load_key(RK7, key, 7 * 16, xmm_key_shuf_mask); 4510 ev_load_key(RK8, key, 8 * 16, xmm_key_shuf_mask); 4511 ev_load_key(RK9, key, 9 * 16, xmm_key_shuf_mask); 4512 ev_load_key(RK10, key, 10 * 16, xmm_key_shuf_mask); 4513 ev_load_key(RK0, key, 0*16, xmm_key_shuf_mask); 4514 4515 // Variables for storing source cipher text 4516 const XMMRegister S0 = xmm10; 4517 const XMMRegister S1 = xmm11; 4518 const XMMRegister S2 = xmm12; 4519 const XMMRegister S3 = xmm13; 4520 const XMMRegister S4 = xmm14; 4521 const XMMRegister S5 = xmm15; 4522 const XMMRegister S6 = xmm16; 4523 const XMMRegister S7 = xmm17; 4524 4525 // Variables for storing decrypted text 4526 const XMMRegister B0 = xmm1; 4527 const XMMRegister B1 = xmm2; 4528 const XMMRegister B2 = xmm3; 4529 const XMMRegister B3 = xmm4; 4530 const XMMRegister B4 = xmm5; 4531 const XMMRegister B5 = xmm6; 4532 const XMMRegister B6 = xmm7; 4533 const XMMRegister B7 = xmm8; 4534 4535 __ cmpl(rounds, 44); 4536 __ jcc(Assembler::greater, KEY_192); 4537 __ jmp(Loop); 4538 4539 __ BIND(KEY_192); 4540 const XMMRegister RK11 = xmm27; 4541 const XMMRegister RK12 = xmm28; 4542 ev_load_key(RK11, key, 11*16, xmm_key_shuf_mask); 4543 ev_load_key(RK12, key, 12*16, xmm_key_shuf_mask); 4544 4545 __ cmpl(rounds, 52); 4546 __ jcc(Assembler::greater, KEY_256); 4547 __ jmp(Loop); 4548 4549 __ BIND(KEY_256); 4550 const XMMRegister RK13 = xmm29; 4551 const XMMRegister RK14 = xmm31; 4552 ev_load_key(RK13, key, 13*16, xmm_key_shuf_mask); 4553 ev_load_key(RK14, key, 14*16, xmm_key_shuf_mask); 4554 4555 __ BIND(Loop); 4556 __ cmpl(len_reg, 512); 4557 __ jcc(Assembler::below, Lcbc_dec_rem); 4558 __ BIND(Loop1); 4559 __ subl(len_reg, 512); 4560 __ evmovdquq(S0, Address(from, 0 * 64), Assembler::AVX_512bit); 4561 __ evmovdquq(S1, Address(from, 1 * 64), Assembler::AVX_512bit); 4562 __ evmovdquq(S2, Address(from, 2 * 64), Assembler::AVX_512bit); 4563 __ evmovdquq(S3, Address(from, 3 * 64), Assembler::AVX_512bit); 4564 __ evmovdquq(S4, Address(from, 4 * 64), Assembler::AVX_512bit); 4565 __ evmovdquq(S5, Address(from, 5 * 64), Assembler::AVX_512bit); 4566 __ evmovdquq(S6, Address(from, 6 * 64), Assembler::AVX_512bit); 4567 __ evmovdquq(S7, Address(from, 7 * 64), Assembler::AVX_512bit); 4568 __ leaq(from, Address(from, 8 * 64)); 4569 4570 __ evpxorq(B0, S0, RK1, Assembler::AVX_512bit); 4571 __ evpxorq(B1, S1, RK1, Assembler::AVX_512bit); 4572 __ evpxorq(B2, S2, RK1, Assembler::AVX_512bit); 4573 __ evpxorq(B3, S3, RK1, Assembler::AVX_512bit); 4574 __ evpxorq(B4, S4, RK1, Assembler::AVX_512bit); 4575 __ evpxorq(B5, S5, RK1, Assembler::AVX_512bit); 4576 __ evpxorq(B6, S6, RK1, Assembler::AVX_512bit); 4577 __ evpxorq(B7, S7, RK1, Assembler::AVX_512bit); 4578 4579 __ evalignq(IV, S0, IV, 0x06); 4580 __ evalignq(S0, S1, S0, 0x06); 4581 __ evalignq(S1, S2, S1, 0x06); 4582 __ evalignq(S2, S3, S2, 0x06); 4583 __ evalignq(S3, S4, S3, 0x06); 4584 __ evalignq(S4, S5, S4, 0x06); 4585 __ evalignq(S5, S6, S5, 0x06); 4586 __ evalignq(S6, S7, S6, 0x06); 4587 4588 roundDec(RK2); 4589 roundDec(RK3); 4590 roundDec(RK4); 4591 roundDec(RK5); 4592 roundDec(RK6); 4593 roundDec(RK7); 4594 roundDec(RK8); 4595 roundDec(RK9); 4596 roundDec(RK10); 4597 4598 __ cmpl(rounds, 44); 4599 __ jcc(Assembler::belowEqual, L_128); 4600 roundDec(RK11); 4601 roundDec(RK12); 4602 4603 __ cmpl(rounds, 52); 4604 __ jcc(Assembler::belowEqual, L_192); 4605 roundDec(RK13); 4606 roundDec(RK14); 4607 4608 __ BIND(L_256); 4609 roundDeclast(RK0); 4610 __ jmp(Loop2); 4611 4612 __ BIND(L_128); 4613 roundDeclast(RK0); 4614 __ jmp(Loop2); 4615 4616 __ BIND(L_192); 4617 roundDeclast(RK0); 4618 4619 __ BIND(Loop2); 4620 __ evpxorq(B0, B0, IV, Assembler::AVX_512bit); 4621 __ evpxorq(B1, B1, S0, Assembler::AVX_512bit); 4622 __ evpxorq(B2, B2, S1, Assembler::AVX_512bit); 4623 __ evpxorq(B3, B3, S2, Assembler::AVX_512bit); 4624 __ evpxorq(B4, B4, S3, Assembler::AVX_512bit); 4625 __ evpxorq(B5, B5, S4, Assembler::AVX_512bit); 4626 __ evpxorq(B6, B6, S5, Assembler::AVX_512bit); 4627 __ evpxorq(B7, B7, S6, Assembler::AVX_512bit); 4628 __ evmovdquq(IV, S7, Assembler::AVX_512bit); 4629 4630 __ evmovdquq(Address(to, 0 * 64), B0, Assembler::AVX_512bit); 4631 __ evmovdquq(Address(to, 1 * 64), B1, Assembler::AVX_512bit); 4632 __ evmovdquq(Address(to, 2 * 64), B2, Assembler::AVX_512bit); 4633 __ evmovdquq(Address(to, 3 * 64), B3, Assembler::AVX_512bit); 4634 __ evmovdquq(Address(to, 4 * 64), B4, Assembler::AVX_512bit); 4635 __ evmovdquq(Address(to, 5 * 64), B5, Assembler::AVX_512bit); 4636 __ evmovdquq(Address(to, 6 * 64), B6, Assembler::AVX_512bit); 4637 __ evmovdquq(Address(to, 7 * 64), B7, Assembler::AVX_512bit); 4638 __ leaq(to, Address(to, 8 * 64)); 4639 __ jmp(Loop); 4640 4641 __ BIND(Lcbc_dec_rem); 4642 __ evshufi64x2(IV, IV, IV, 0x03, Assembler::AVX_512bit); 4643 4644 __ BIND(Lcbc_dec_rem_loop); 4645 __ subl(len_reg, 16); 4646 __ jcc(Assembler::carrySet, Lcbc_dec_ret); 4647 4648 __ movdqu(S0, Address(from, 0)); 4649 __ evpxorq(B0, S0, RK1, Assembler::AVX_512bit); 4650 __ vaesdec(B0, B0, RK2, Assembler::AVX_512bit); 4651 __ vaesdec(B0, B0, RK3, Assembler::AVX_512bit); 4652 __ vaesdec(B0, B0, RK4, Assembler::AVX_512bit); 4653 __ vaesdec(B0, B0, RK5, Assembler::AVX_512bit); 4654 __ vaesdec(B0, B0, RK6, Assembler::AVX_512bit); 4655 __ vaesdec(B0, B0, RK7, Assembler::AVX_512bit); 4656 __ vaesdec(B0, B0, RK8, Assembler::AVX_512bit); 4657 __ vaesdec(B0, B0, RK9, Assembler::AVX_512bit); 4658 __ vaesdec(B0, B0, RK10, Assembler::AVX_512bit); 4659 __ cmpl(rounds, 44); 4660 __ jcc(Assembler::belowEqual, Lcbc_dec_rem_last); 4661 4662 __ vaesdec(B0, B0, RK11, Assembler::AVX_512bit); 4663 __ vaesdec(B0, B0, RK12, Assembler::AVX_512bit); 4664 __ cmpl(rounds, 52); 4665 __ jcc(Assembler::belowEqual, Lcbc_dec_rem_last); 4666 4667 __ vaesdec(B0, B0, RK13, Assembler::AVX_512bit); 4668 __ vaesdec(B0, B0, RK14, Assembler::AVX_512bit); 4669 4670 __ BIND(Lcbc_dec_rem_last); 4671 __ vaesdeclast(B0, B0, RK0, Assembler::AVX_512bit); 4672 4673 __ evpxorq(B0, B0, IV, Assembler::AVX_512bit); 4674 __ evmovdquq(IV, S0, Assembler::AVX_512bit); 4675 __ movdqu(Address(to, 0), B0); 4676 __ leaq(from, Address(from, 16)); 4677 __ leaq(to, Address(to, 16)); 4678 __ jmp(Lcbc_dec_rem_loop); 4679 4680 __ BIND(Lcbc_dec_ret); 4681 __ movdqu(Address(rvec, 0), IV); 4682 4683 // Zero out the round keys 4684 __ evpxorq(RK0, RK0, RK0, Assembler::AVX_512bit); 4685 __ evpxorq(RK1, RK1, RK1, Assembler::AVX_512bit); 4686 __ evpxorq(RK2, RK2, RK2, Assembler::AVX_512bit); 4687 __ evpxorq(RK3, RK3, RK3, Assembler::AVX_512bit); 4688 __ evpxorq(RK4, RK4, RK4, Assembler::AVX_512bit); 4689 __ evpxorq(RK5, RK5, RK5, Assembler::AVX_512bit); 4690 __ evpxorq(RK6, RK6, RK6, Assembler::AVX_512bit); 4691 __ evpxorq(RK7, RK7, RK7, Assembler::AVX_512bit); 4692 __ evpxorq(RK8, RK8, RK8, Assembler::AVX_512bit); 4693 __ evpxorq(RK9, RK9, RK9, Assembler::AVX_512bit); 4694 __ evpxorq(RK10, RK10, RK10, Assembler::AVX_512bit); 4695 __ cmpl(rounds, 44); 4696 __ jcc(Assembler::belowEqual, Lcbc_exit); 4697 __ evpxorq(RK11, RK11, RK11, Assembler::AVX_512bit); 4698 __ evpxorq(RK12, RK12, RK12, Assembler::AVX_512bit); 4699 __ cmpl(rounds, 52); 4700 __ jcc(Assembler::belowEqual, Lcbc_exit); 4701 __ evpxorq(RK13, RK13, RK13, Assembler::AVX_512bit); 4702 __ evpxorq(RK14, RK14, RK14, Assembler::AVX_512bit); 4703 4704 __ BIND(Lcbc_exit); 4705 __ pop(rbx); 4706 #ifdef _WIN64 4707 __ movl(rax, len_mem); 4708 #else 4709 __ pop(rax); // return length 4710 #endif 4711 __ leave(); // required for proper stackwalking of RuntimeStub frame 4712 __ ret(0); 4713 return start; 4714 } 4715 4716 // Polynomial x^128+x^127+x^126+x^121+1 4717 address ghash_polynomial_addr() { 4718 __ align(CodeEntryAlignment); 4719 StubCodeMark mark(this, "StubRoutines", "_ghash_poly_addr"); 4720 address start = __ pc(); 4721 __ emit_data64(0x0000000000000001, relocInfo::none); 4722 __ emit_data64(0xc200000000000000, relocInfo::none); 4723 return start; 4724 } 4725 4726 address ghash_shufflemask_addr() { 4727 __ align(CodeEntryAlignment); 4728 StubCodeMark mark(this, "StubRoutines", "_ghash_shuffmask_addr"); 4729 address start = __ pc(); 4730 __ emit_data64(0x0f0f0f0f0f0f0f0f, relocInfo::none); 4731 __ emit_data64(0x0f0f0f0f0f0f0f0f, relocInfo::none); 4732 return start; 4733 } 4734 4735 // Ghash single and multi block operations using AVX instructions 4736 address generate_avx_ghash_processBlocks() { 4737 __ align(CodeEntryAlignment); 4738 4739 StubCodeMark mark(this, "StubRoutines", "ghash_processBlocks"); 4740 address start = __ pc(); 4741 4742 // arguments 4743 const Register state = c_rarg0; 4744 const Register htbl = c_rarg1; 4745 const Register data = c_rarg2; 4746 const Register blocks = c_rarg3; 4747 __ enter(); 4748 // Save state before entering routine 4749 __ avx_ghash(state, htbl, data, blocks); 4750 __ leave(); // required for proper stackwalking of RuntimeStub frame 4751 __ ret(0); 4752 return start; 4753 } 4754 4755 // byte swap x86 long 4756 address generate_ghash_long_swap_mask() { 4757 __ align(CodeEntryAlignment); 4758 StubCodeMark mark(this, "StubRoutines", "ghash_long_swap_mask"); 4759 address start = __ pc(); 4760 __ emit_data64(0x0f0e0d0c0b0a0908, relocInfo::none ); 4761 __ emit_data64(0x0706050403020100, relocInfo::none ); 4762 return start; 4763 } 4764 4765 // byte swap x86 byte array 4766 address generate_ghash_byte_swap_mask() { 4767 __ align(CodeEntryAlignment); 4768 StubCodeMark mark(this, "StubRoutines", "ghash_byte_swap_mask"); 4769 address start = __ pc(); 4770 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none ); 4771 __ emit_data64(0x0001020304050607, relocInfo::none ); 4772 return start; 4773 } 4774 4775 /* Single and multi-block ghash operations */ 4776 address generate_ghash_processBlocks() { 4777 __ align(CodeEntryAlignment); 4778 Label L_ghash_loop, L_exit; 4779 StubCodeMark mark(this, "StubRoutines", "ghash_processBlocks"); 4780 address start = __ pc(); 4781 4782 const Register state = c_rarg0; 4783 const Register subkeyH = c_rarg1; 4784 const Register data = c_rarg2; 4785 const Register blocks = c_rarg3; 4786 4787 const XMMRegister xmm_temp0 = xmm0; 4788 const XMMRegister xmm_temp1 = xmm1; 4789 const XMMRegister xmm_temp2 = xmm2; 4790 const XMMRegister xmm_temp3 = xmm3; 4791 const XMMRegister xmm_temp4 = xmm4; 4792 const XMMRegister xmm_temp5 = xmm5; 4793 const XMMRegister xmm_temp6 = xmm6; 4794 const XMMRegister xmm_temp7 = xmm7; 4795 const XMMRegister xmm_temp8 = xmm8; 4796 const XMMRegister xmm_temp9 = xmm9; 4797 const XMMRegister xmm_temp10 = xmm10; 4798 4799 __ enter(); 4800 4801 __ movdqu(xmm_temp10, ExternalAddress(StubRoutines::x86::ghash_long_swap_mask_addr())); 4802 4803 __ movdqu(xmm_temp0, Address(state, 0)); 4804 __ pshufb(xmm_temp0, xmm_temp10); 4805 4806 4807 __ BIND(L_ghash_loop); 4808 __ movdqu(xmm_temp2, Address(data, 0)); 4809 __ pshufb(xmm_temp2, ExternalAddress(StubRoutines::x86::ghash_byte_swap_mask_addr())); 4810 4811 __ movdqu(xmm_temp1, Address(subkeyH, 0)); 4812 __ pshufb(xmm_temp1, xmm_temp10); 4813 4814 __ pxor(xmm_temp0, xmm_temp2); 4815 4816 // 4817 // Multiply with the hash key 4818 // 4819 __ movdqu(xmm_temp3, xmm_temp0); 4820 __ pclmulqdq(xmm_temp3, xmm_temp1, 0); // xmm3 holds a0*b0 4821 __ movdqu(xmm_temp4, xmm_temp0); 4822 __ pclmulqdq(xmm_temp4, xmm_temp1, 16); // xmm4 holds a0*b1 4823 4824 __ movdqu(xmm_temp5, xmm_temp0); 4825 __ pclmulqdq(xmm_temp5, xmm_temp1, 1); // xmm5 holds a1*b0 4826 __ movdqu(xmm_temp6, xmm_temp0); 4827 __ pclmulqdq(xmm_temp6, xmm_temp1, 17); // xmm6 holds a1*b1 4828 4829 __ pxor(xmm_temp4, xmm_temp5); // xmm4 holds a0*b1 + a1*b0 4830 4831 __ movdqu(xmm_temp5, xmm_temp4); // move the contents of xmm4 to xmm5 4832 __ psrldq(xmm_temp4, 8); // shift by xmm4 64 bits to the right 4833 __ pslldq(xmm_temp5, 8); // shift by xmm5 64 bits to the left 4834 __ pxor(xmm_temp3, xmm_temp5); 4835 __ pxor(xmm_temp6, xmm_temp4); // Register pair <xmm6:xmm3> holds the result 4836 // of the carry-less multiplication of 4837 // xmm0 by xmm1. 4838 4839 // We shift the result of the multiplication by one bit position 4840 // to the left to cope for the fact that the bits are reversed. 4841 __ movdqu(xmm_temp7, xmm_temp3); 4842 __ movdqu(xmm_temp8, xmm_temp6); 4843 __ pslld(xmm_temp3, 1); 4844 __ pslld(xmm_temp6, 1); 4845 __ psrld(xmm_temp7, 31); 4846 __ psrld(xmm_temp8, 31); 4847 __ movdqu(xmm_temp9, xmm_temp7); 4848 __ pslldq(xmm_temp8, 4); 4849 __ pslldq(xmm_temp7, 4); 4850 __ psrldq(xmm_temp9, 12); 4851 __ por(xmm_temp3, xmm_temp7); 4852 __ por(xmm_temp6, xmm_temp8); 4853 __ por(xmm_temp6, xmm_temp9); 4854 4855 // 4856 // First phase of the reduction 4857 // 4858 // Move xmm3 into xmm7, xmm8, xmm9 in order to perform the shifts 4859 // independently. 4860 __ movdqu(xmm_temp7, xmm_temp3); 4861 __ movdqu(xmm_temp8, xmm_temp3); 4862 __ movdqu(xmm_temp9, xmm_temp3); 4863 __ pslld(xmm_temp7, 31); // packed right shift shifting << 31 4864 __ pslld(xmm_temp8, 30); // packed right shift shifting << 30 4865 __ pslld(xmm_temp9, 25); // packed right shift shifting << 25 4866 __ pxor(xmm_temp7, xmm_temp8); // xor the shifted versions 4867 __ pxor(xmm_temp7, xmm_temp9); 4868 __ movdqu(xmm_temp8, xmm_temp7); 4869 __ pslldq(xmm_temp7, 12); 4870 __ psrldq(xmm_temp8, 4); 4871 __ pxor(xmm_temp3, xmm_temp7); // first phase of the reduction complete 4872 4873 // 4874 // Second phase of the reduction 4875 // 4876 // Make 3 copies of xmm3 in xmm2, xmm4, xmm5 for doing these 4877 // shift operations. 4878 __ movdqu(xmm_temp2, xmm_temp3); 4879 __ movdqu(xmm_temp4, xmm_temp3); 4880 __ movdqu(xmm_temp5, xmm_temp3); 4881 __ psrld(xmm_temp2, 1); // packed left shifting >> 1 4882 __ psrld(xmm_temp4, 2); // packed left shifting >> 2 4883 __ psrld(xmm_temp5, 7); // packed left shifting >> 7 4884 __ pxor(xmm_temp2, xmm_temp4); // xor the shifted versions 4885 __ pxor(xmm_temp2, xmm_temp5); 4886 __ pxor(xmm_temp2, xmm_temp8); 4887 __ pxor(xmm_temp3, xmm_temp2); 4888 __ pxor(xmm_temp6, xmm_temp3); // the result is in xmm6 4889 4890 __ decrement(blocks); 4891 __ jcc(Assembler::zero, L_exit); 4892 __ movdqu(xmm_temp0, xmm_temp6); 4893 __ addptr(data, 16); 4894 __ jmp(L_ghash_loop); 4895 4896 __ BIND(L_exit); 4897 __ pshufb(xmm_temp6, xmm_temp10); // Byte swap 16-byte result 4898 __ movdqu(Address(state, 0), xmm_temp6); // store the result 4899 __ leave(); 4900 __ ret(0); 4901 return start; 4902 } 4903 4904 //base64 character set 4905 address base64_charset_addr() { 4906 __ align(CodeEntryAlignment); 4907 StubCodeMark mark(this, "StubRoutines", "base64_charset"); 4908 address start = __ pc(); 4909 __ emit_data64(0x0000004200000041, relocInfo::none); 4910 __ emit_data64(0x0000004400000043, relocInfo::none); 4911 __ emit_data64(0x0000004600000045, relocInfo::none); 4912 __ emit_data64(0x0000004800000047, relocInfo::none); 4913 __ emit_data64(0x0000004a00000049, relocInfo::none); 4914 __ emit_data64(0x0000004c0000004b, relocInfo::none); 4915 __ emit_data64(0x0000004e0000004d, relocInfo::none); 4916 __ emit_data64(0x000000500000004f, relocInfo::none); 4917 __ emit_data64(0x0000005200000051, relocInfo::none); 4918 __ emit_data64(0x0000005400000053, relocInfo::none); 4919 __ emit_data64(0x0000005600000055, relocInfo::none); 4920 __ emit_data64(0x0000005800000057, relocInfo::none); 4921 __ emit_data64(0x0000005a00000059, relocInfo::none); 4922 __ emit_data64(0x0000006200000061, relocInfo::none); 4923 __ emit_data64(0x0000006400000063, relocInfo::none); 4924 __ emit_data64(0x0000006600000065, relocInfo::none); 4925 __ emit_data64(0x0000006800000067, relocInfo::none); 4926 __ emit_data64(0x0000006a00000069, relocInfo::none); 4927 __ emit_data64(0x0000006c0000006b, relocInfo::none); 4928 __ emit_data64(0x0000006e0000006d, relocInfo::none); 4929 __ emit_data64(0x000000700000006f, relocInfo::none); 4930 __ emit_data64(0x0000007200000071, relocInfo::none); 4931 __ emit_data64(0x0000007400000073, relocInfo::none); 4932 __ emit_data64(0x0000007600000075, relocInfo::none); 4933 __ emit_data64(0x0000007800000077, relocInfo::none); 4934 __ emit_data64(0x0000007a00000079, relocInfo::none); 4935 __ emit_data64(0x0000003100000030, relocInfo::none); 4936 __ emit_data64(0x0000003300000032, relocInfo::none); 4937 __ emit_data64(0x0000003500000034, relocInfo::none); 4938 __ emit_data64(0x0000003700000036, relocInfo::none); 4939 __ emit_data64(0x0000003900000038, relocInfo::none); 4940 __ emit_data64(0x0000002f0000002b, relocInfo::none); 4941 return start; 4942 } 4943 4944 //base64 url character set 4945 address base64url_charset_addr() { 4946 __ align(CodeEntryAlignment); 4947 StubCodeMark mark(this, "StubRoutines", "base64url_charset"); 4948 address start = __ pc(); 4949 __ emit_data64(0x0000004200000041, relocInfo::none); 4950 __ emit_data64(0x0000004400000043, relocInfo::none); 4951 __ emit_data64(0x0000004600000045, relocInfo::none); 4952 __ emit_data64(0x0000004800000047, relocInfo::none); 4953 __ emit_data64(0x0000004a00000049, relocInfo::none); 4954 __ emit_data64(0x0000004c0000004b, relocInfo::none); 4955 __ emit_data64(0x0000004e0000004d, relocInfo::none); 4956 __ emit_data64(0x000000500000004f, relocInfo::none); 4957 __ emit_data64(0x0000005200000051, relocInfo::none); 4958 __ emit_data64(0x0000005400000053, relocInfo::none); 4959 __ emit_data64(0x0000005600000055, relocInfo::none); 4960 __ emit_data64(0x0000005800000057, relocInfo::none); 4961 __ emit_data64(0x0000005a00000059, relocInfo::none); 4962 __ emit_data64(0x0000006200000061, relocInfo::none); 4963 __ emit_data64(0x0000006400000063, relocInfo::none); 4964 __ emit_data64(0x0000006600000065, relocInfo::none); 4965 __ emit_data64(0x0000006800000067, relocInfo::none); 4966 __ emit_data64(0x0000006a00000069, relocInfo::none); 4967 __ emit_data64(0x0000006c0000006b, relocInfo::none); 4968 __ emit_data64(0x0000006e0000006d, relocInfo::none); 4969 __ emit_data64(0x000000700000006f, relocInfo::none); 4970 __ emit_data64(0x0000007200000071, relocInfo::none); 4971 __ emit_data64(0x0000007400000073, relocInfo::none); 4972 __ emit_data64(0x0000007600000075, relocInfo::none); 4973 __ emit_data64(0x0000007800000077, relocInfo::none); 4974 __ emit_data64(0x0000007a00000079, relocInfo::none); 4975 __ emit_data64(0x0000003100000030, relocInfo::none); 4976 __ emit_data64(0x0000003300000032, relocInfo::none); 4977 __ emit_data64(0x0000003500000034, relocInfo::none); 4978 __ emit_data64(0x0000003700000036, relocInfo::none); 4979 __ emit_data64(0x0000003900000038, relocInfo::none); 4980 __ emit_data64(0x0000005f0000002d, relocInfo::none); 4981 4982 return start; 4983 } 4984 4985 address base64_bswap_mask_addr() { 4986 __ align(CodeEntryAlignment); 4987 StubCodeMark mark(this, "StubRoutines", "bswap_mask_base64"); 4988 address start = __ pc(); 4989 __ emit_data64(0x0504038002010080, relocInfo::none); 4990 __ emit_data64(0x0b0a098008070680, relocInfo::none); 4991 __ emit_data64(0x0908078006050480, relocInfo::none); 4992 __ emit_data64(0x0f0e0d800c0b0a80, relocInfo::none); 4993 __ emit_data64(0x0605048003020180, relocInfo::none); 4994 __ emit_data64(0x0c0b0a8009080780, relocInfo::none); 4995 __ emit_data64(0x0504038002010080, relocInfo::none); 4996 __ emit_data64(0x0b0a098008070680, relocInfo::none); 4997 4998 return start; 4999 } 5000 5001 address base64_right_shift_mask_addr() { 5002 __ align(CodeEntryAlignment); 5003 StubCodeMark mark(this, "StubRoutines", "right_shift_mask"); 5004 address start = __ pc(); 5005 __ emit_data64(0x0006000400020000, relocInfo::none); 5006 __ emit_data64(0x0006000400020000, relocInfo::none); 5007 __ emit_data64(0x0006000400020000, relocInfo::none); 5008 __ emit_data64(0x0006000400020000, relocInfo::none); 5009 __ emit_data64(0x0006000400020000, relocInfo::none); 5010 __ emit_data64(0x0006000400020000, relocInfo::none); 5011 __ emit_data64(0x0006000400020000, relocInfo::none); 5012 __ emit_data64(0x0006000400020000, relocInfo::none); 5013 5014 return start; 5015 } 5016 5017 address base64_left_shift_mask_addr() { 5018 __ align(CodeEntryAlignment); 5019 StubCodeMark mark(this, "StubRoutines", "left_shift_mask"); 5020 address start = __ pc(); 5021 __ emit_data64(0x0000000200040000, relocInfo::none); 5022 __ emit_data64(0x0000000200040000, relocInfo::none); 5023 __ emit_data64(0x0000000200040000, relocInfo::none); 5024 __ emit_data64(0x0000000200040000, relocInfo::none); 5025 __ emit_data64(0x0000000200040000, relocInfo::none); 5026 __ emit_data64(0x0000000200040000, relocInfo::none); 5027 __ emit_data64(0x0000000200040000, relocInfo::none); 5028 __ emit_data64(0x0000000200040000, relocInfo::none); 5029 5030 return start; 5031 } 5032 5033 address base64_and_mask_addr() { 5034 __ align(CodeEntryAlignment); 5035 StubCodeMark mark(this, "StubRoutines", "and_mask"); 5036 address start = __ pc(); 5037 __ emit_data64(0x3f003f003f000000, relocInfo::none); 5038 __ emit_data64(0x3f003f003f000000, relocInfo::none); 5039 __ emit_data64(0x3f003f003f000000, relocInfo::none); 5040 __ emit_data64(0x3f003f003f000000, relocInfo::none); 5041 __ emit_data64(0x3f003f003f000000, relocInfo::none); 5042 __ emit_data64(0x3f003f003f000000, relocInfo::none); 5043 __ emit_data64(0x3f003f003f000000, relocInfo::none); 5044 __ emit_data64(0x3f003f003f000000, relocInfo::none); 5045 return start; 5046 } 5047 5048 address base64_gather_mask_addr() { 5049 __ align(CodeEntryAlignment); 5050 StubCodeMark mark(this, "StubRoutines", "gather_mask"); 5051 address start = __ pc(); 5052 __ emit_data64(0xffffffffffffffff, relocInfo::none); 5053 return start; 5054 } 5055 5056 // Code for generating Base64 encoding. 5057 // Intrinsic function prototype in Base64.java: 5058 // private void encodeBlock(byte[] src, int sp, int sl, byte[] dst, int dp, boolean isURL) { 5059 address generate_base64_encodeBlock() { 5060 __ align(CodeEntryAlignment); 5061 StubCodeMark mark(this, "StubRoutines", "implEncode"); 5062 address start = __ pc(); 5063 __ enter(); 5064 5065 // Save callee-saved registers before using them 5066 __ push(r12); 5067 __ push(r13); 5068 __ push(r14); 5069 __ push(r15); 5070 5071 // arguments 5072 const Register source = c_rarg0; // Source Array 5073 const Register start_offset = c_rarg1; // start offset 5074 const Register end_offset = c_rarg2; // end offset 5075 const Register dest = c_rarg3; // destination array 5076 5077 #ifndef _WIN64 5078 const Register dp = c_rarg4; // Position for writing to dest array 5079 const Register isURL = c_rarg5;// Base64 or URL character set 5080 #else 5081 const Address dp_mem(rbp, 6 * wordSize); // length is on stack on Win64 5082 const Address isURL_mem(rbp, 7 * wordSize); 5083 const Register isURL = r10; // pick the volatile windows register 5084 const Register dp = r12; 5085 __ movl(dp, dp_mem); 5086 __ movl(isURL, isURL_mem); 5087 #endif 5088 5089 const Register length = r14; 5090 Label L_process80, L_process32, L_process3, L_exit, L_processdata; 5091 5092 // calculate length from offsets 5093 __ movl(length, end_offset); 5094 __ subl(length, start_offset); 5095 __ cmpl(length, 0); 5096 __ jcc(Assembler::lessEqual, L_exit); 5097 5098 __ lea(r11, ExternalAddress(StubRoutines::x86::base64_charset_addr())); 5099 // check if base64 charset(isURL=0) or base64 url charset(isURL=1) needs to be loaded 5100 __ cmpl(isURL, 0); 5101 __ jcc(Assembler::equal, L_processdata); 5102 __ lea(r11, ExternalAddress(StubRoutines::x86::base64url_charset_addr())); 5103 5104 // load masks required for encoding data 5105 __ BIND(L_processdata); 5106 __ movdqu(xmm16, ExternalAddress(StubRoutines::x86::base64_gather_mask_addr())); 5107 // Set 64 bits of K register. 5108 __ evpcmpeqb(k3, xmm16, xmm16, Assembler::AVX_512bit); 5109 __ evmovdquq(xmm12, ExternalAddress(StubRoutines::x86::base64_bswap_mask_addr()), Assembler::AVX_256bit, r13); 5110 __ evmovdquq(xmm13, ExternalAddress(StubRoutines::x86::base64_right_shift_mask_addr()), Assembler::AVX_512bit, r13); 5111 __ evmovdquq(xmm14, ExternalAddress(StubRoutines::x86::base64_left_shift_mask_addr()), Assembler::AVX_512bit, r13); 5112 __ evmovdquq(xmm15, ExternalAddress(StubRoutines::x86::base64_and_mask_addr()), Assembler::AVX_512bit, r13); 5113 5114 // Vector Base64 implementation, producing 96 bytes of encoded data 5115 __ BIND(L_process80); 5116 __ cmpl(length, 80); 5117 __ jcc(Assembler::below, L_process32); 5118 __ evmovdquq(xmm0, Address(source, start_offset, Address::times_1, 0), Assembler::AVX_256bit); 5119 __ evmovdquq(xmm1, Address(source, start_offset, Address::times_1, 24), Assembler::AVX_256bit); 5120 __ evmovdquq(xmm2, Address(source, start_offset, Address::times_1, 48), Assembler::AVX_256bit); 5121 5122 //permute the input data in such a manner that we have continuity of the source 5123 __ vpermq(xmm3, xmm0, 148, Assembler::AVX_256bit); 5124 __ vpermq(xmm4, xmm1, 148, Assembler::AVX_256bit); 5125 __ vpermq(xmm5, xmm2, 148, Assembler::AVX_256bit); 5126 5127 //shuffle input and group 3 bytes of data and to it add 0 as the 4th byte. 5128 //we can deal with 12 bytes at a time in a 128 bit register 5129 __ vpshufb(xmm3, xmm3, xmm12, Assembler::AVX_256bit); 5130 __ vpshufb(xmm4, xmm4, xmm12, Assembler::AVX_256bit); 5131 __ vpshufb(xmm5, xmm5, xmm12, Assembler::AVX_256bit); 5132 5133 //convert byte to word. Each 128 bit register will have 6 bytes for processing 5134 __ vpmovzxbw(xmm3, xmm3, Assembler::AVX_512bit); 5135 __ vpmovzxbw(xmm4, xmm4, Assembler::AVX_512bit); 5136 __ vpmovzxbw(xmm5, xmm5, Assembler::AVX_512bit); 5137 5138 // Extract bits in the following pattern 6, 4+2, 2+4, 6 to convert 3, 8 bit numbers to 4, 6 bit numbers 5139 __ evpsrlvw(xmm0, xmm3, xmm13, Assembler::AVX_512bit); 5140 __ evpsrlvw(xmm1, xmm4, xmm13, Assembler::AVX_512bit); 5141 __ evpsrlvw(xmm2, xmm5, xmm13, Assembler::AVX_512bit); 5142 5143 __ evpsllvw(xmm3, xmm3, xmm14, Assembler::AVX_512bit); 5144 __ evpsllvw(xmm4, xmm4, xmm14, Assembler::AVX_512bit); 5145 __ evpsllvw(xmm5, xmm5, xmm14, Assembler::AVX_512bit); 5146 5147 __ vpsrlq(xmm0, xmm0, 8, Assembler::AVX_512bit); 5148 __ vpsrlq(xmm1, xmm1, 8, Assembler::AVX_512bit); 5149 __ vpsrlq(xmm2, xmm2, 8, Assembler::AVX_512bit); 5150 5151 __ vpsllq(xmm3, xmm3, 8, Assembler::AVX_512bit); 5152 __ vpsllq(xmm4, xmm4, 8, Assembler::AVX_512bit); 5153 __ vpsllq(xmm5, xmm5, 8, Assembler::AVX_512bit); 5154 5155 __ vpandq(xmm3, xmm3, xmm15, Assembler::AVX_512bit); 5156 __ vpandq(xmm4, xmm4, xmm15, Assembler::AVX_512bit); 5157 __ vpandq(xmm5, xmm5, xmm15, Assembler::AVX_512bit); 5158 5159 // Get the final 4*6 bits base64 encoding 5160 __ vporq(xmm3, xmm3, xmm0, Assembler::AVX_512bit); 5161 __ vporq(xmm4, xmm4, xmm1, Assembler::AVX_512bit); 5162 __ vporq(xmm5, xmm5, xmm2, Assembler::AVX_512bit); 5163 5164 // Shift 5165 __ vpsrlq(xmm3, xmm3, 8, Assembler::AVX_512bit); 5166 __ vpsrlq(xmm4, xmm4, 8, Assembler::AVX_512bit); 5167 __ vpsrlq(xmm5, xmm5, 8, Assembler::AVX_512bit); 5168 5169 // look up 6 bits in the base64 character set to fetch the encoding 5170 // we are converting word to dword as gather instructions need dword indices for looking up encoding 5171 __ vextracti64x4(xmm6, xmm3, 0); 5172 __ vpmovzxwd(xmm0, xmm6, Assembler::AVX_512bit); 5173 __ vextracti64x4(xmm6, xmm3, 1); 5174 __ vpmovzxwd(xmm1, xmm6, Assembler::AVX_512bit); 5175 5176 __ vextracti64x4(xmm6, xmm4, 0); 5177 __ vpmovzxwd(xmm2, xmm6, Assembler::AVX_512bit); 5178 __ vextracti64x4(xmm6, xmm4, 1); 5179 __ vpmovzxwd(xmm3, xmm6, Assembler::AVX_512bit); 5180 5181 __ vextracti64x4(xmm4, xmm5, 0); 5182 __ vpmovzxwd(xmm6, xmm4, Assembler::AVX_512bit); 5183 5184 __ vextracti64x4(xmm4, xmm5, 1); 5185 __ vpmovzxwd(xmm7, xmm4, Assembler::AVX_512bit); 5186 5187 __ kmovql(k2, k3); 5188 __ evpgatherdd(xmm4, k2, Address(r11, xmm0, Address::times_4, 0), Assembler::AVX_512bit); 5189 __ kmovql(k2, k3); 5190 __ evpgatherdd(xmm5, k2, Address(r11, xmm1, Address::times_4, 0), Assembler::AVX_512bit); 5191 __ kmovql(k2, k3); 5192 __ evpgatherdd(xmm8, k2, Address(r11, xmm2, Address::times_4, 0), Assembler::AVX_512bit); 5193 __ kmovql(k2, k3); 5194 __ evpgatherdd(xmm9, k2, Address(r11, xmm3, Address::times_4, 0), Assembler::AVX_512bit); 5195 __ kmovql(k2, k3); 5196 __ evpgatherdd(xmm10, k2, Address(r11, xmm6, Address::times_4, 0), Assembler::AVX_512bit); 5197 __ kmovql(k2, k3); 5198 __ evpgatherdd(xmm11, k2, Address(r11, xmm7, Address::times_4, 0), Assembler::AVX_512bit); 5199 5200 //Down convert dword to byte. Final output is 16*6 = 96 bytes long 5201 __ evpmovdb(Address(dest, dp, Address::times_1, 0), xmm4, Assembler::AVX_512bit); 5202 __ evpmovdb(Address(dest, dp, Address::times_1, 16), xmm5, Assembler::AVX_512bit); 5203 __ evpmovdb(Address(dest, dp, Address::times_1, 32), xmm8, Assembler::AVX_512bit); 5204 __ evpmovdb(Address(dest, dp, Address::times_1, 48), xmm9, Assembler::AVX_512bit); 5205 __ evpmovdb(Address(dest, dp, Address::times_1, 64), xmm10, Assembler::AVX_512bit); 5206 __ evpmovdb(Address(dest, dp, Address::times_1, 80), xmm11, Assembler::AVX_512bit); 5207 5208 __ addq(dest, 96); 5209 __ addq(source, 72); 5210 __ subq(length, 72); 5211 __ jmp(L_process80); 5212 5213 // Vector Base64 implementation generating 32 bytes of encoded data 5214 __ BIND(L_process32); 5215 __ cmpl(length, 32); 5216 __ jcc(Assembler::below, L_process3); 5217 __ evmovdquq(xmm0, Address(source, start_offset), Assembler::AVX_256bit); 5218 __ vpermq(xmm0, xmm0, 148, Assembler::AVX_256bit); 5219 __ vpshufb(xmm6, xmm0, xmm12, Assembler::AVX_256bit); 5220 __ vpmovzxbw(xmm6, xmm6, Assembler::AVX_512bit); 5221 __ evpsrlvw(xmm2, xmm6, xmm13, Assembler::AVX_512bit); 5222 __ evpsllvw(xmm3, xmm6, xmm14, Assembler::AVX_512bit); 5223 5224 __ vpsrlq(xmm2, xmm2, 8, Assembler::AVX_512bit); 5225 __ vpsllq(xmm3, xmm3, 8, Assembler::AVX_512bit); 5226 __ vpandq(xmm3, xmm3, xmm15, Assembler::AVX_512bit); 5227 __ vporq(xmm1, xmm2, xmm3, Assembler::AVX_512bit); 5228 __ vpsrlq(xmm1, xmm1, 8, Assembler::AVX_512bit); 5229 __ vextracti64x4(xmm9, xmm1, 0); 5230 __ vpmovzxwd(xmm6, xmm9, Assembler::AVX_512bit); 5231 __ vextracti64x4(xmm9, xmm1, 1); 5232 __ vpmovzxwd(xmm5, xmm9, Assembler::AVX_512bit); 5233 __ kmovql(k2, k3); 5234 __ evpgatherdd(xmm8, k2, Address(r11, xmm6, Address::times_4, 0), Assembler::AVX_512bit); 5235 __ kmovql(k2, k3); 5236 __ evpgatherdd(xmm10, k2, Address(r11, xmm5, Address::times_4, 0), Assembler::AVX_512bit); 5237 __ evpmovdb(Address(dest, dp, Address::times_1, 0), xmm8, Assembler::AVX_512bit); 5238 __ evpmovdb(Address(dest, dp, Address::times_1, 16), xmm10, Assembler::AVX_512bit); 5239 __ subq(length, 24); 5240 __ addq(dest, 32); 5241 __ addq(source, 24); 5242 __ jmp(L_process32); 5243 5244 // Scalar data processing takes 3 bytes at a time and produces 4 bytes of encoded data 5245 /* This code corresponds to the scalar version of the following snippet in Base64.java 5246 ** int bits = (src[sp0++] & 0xff) << 16 |(src[sp0++] & 0xff) << 8 |(src[sp0++] & 0xff); 5247 ** dst[dp0++] = (byte)base64[(bits >> > 18) & 0x3f]; 5248 ** dst[dp0++] = (byte)base64[(bits >> > 12) & 0x3f]; 5249 ** dst[dp0++] = (byte)base64[(bits >> > 6) & 0x3f]; 5250 ** dst[dp0++] = (byte)base64[bits & 0x3f];*/ 5251 __ BIND(L_process3); 5252 __ cmpl(length, 3); 5253 __ jcc(Assembler::below, L_exit); 5254 // Read 1 byte at a time 5255 __ movzbl(rax, Address(source, start_offset)); 5256 __ shll(rax, 0x10); 5257 __ movl(r15, rax); 5258 __ movzbl(rax, Address(source, start_offset, Address::times_1, 1)); 5259 __ shll(rax, 0x8); 5260 __ movzwl(rax, rax); 5261 __ orl(r15, rax); 5262 __ movzbl(rax, Address(source, start_offset, Address::times_1, 2)); 5263 __ orl(rax, r15); 5264 // Save 3 bytes read in r15 5265 __ movl(r15, rax); 5266 __ shrl(rax, 0x12); 5267 __ andl(rax, 0x3f); 5268 // rax contains the index, r11 contains base64 lookup table 5269 __ movb(rax, Address(r11, rax, Address::times_4)); 5270 // Write the encoded byte to destination 5271 __ movb(Address(dest, dp, Address::times_1, 0), rax); 5272 __ movl(rax, r15); 5273 __ shrl(rax, 0xc); 5274 __ andl(rax, 0x3f); 5275 __ movb(rax, Address(r11, rax, Address::times_4)); 5276 __ movb(Address(dest, dp, Address::times_1, 1), rax); 5277 __ movl(rax, r15); 5278 __ shrl(rax, 0x6); 5279 __ andl(rax, 0x3f); 5280 __ movb(rax, Address(r11, rax, Address::times_4)); 5281 __ movb(Address(dest, dp, Address::times_1, 2), rax); 5282 __ movl(rax, r15); 5283 __ andl(rax, 0x3f); 5284 __ movb(rax, Address(r11, rax, Address::times_4)); 5285 __ movb(Address(dest, dp, Address::times_1, 3), rax); 5286 __ subl(length, 3); 5287 __ addq(dest, 4); 5288 __ addq(source, 3); 5289 __ jmp(L_process3); 5290 __ BIND(L_exit); 5291 __ pop(r15); 5292 __ pop(r14); 5293 __ pop(r13); 5294 __ pop(r12); 5295 __ leave(); 5296 __ ret(0); 5297 return start; 5298 } 5299 5300 /** 5301 * Arguments: 5302 * 5303 * Inputs: 5304 * c_rarg0 - int crc 5305 * c_rarg1 - byte* buf 5306 * c_rarg2 - int length 5307 * 5308 * Ouput: 5309 * rax - int crc result 5310 */ 5311 address generate_updateBytesCRC32() { 5312 assert(UseCRC32Intrinsics, "need AVX and CLMUL instructions"); 5313 5314 __ align(CodeEntryAlignment); 5315 StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32"); 5316 5317 address start = __ pc(); 5318 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 5319 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 5320 // rscratch1: r10 5321 const Register crc = c_rarg0; // crc 5322 const Register buf = c_rarg1; // source java byte array address 5323 const Register len = c_rarg2; // length 5324 const Register table = c_rarg3; // crc_table address (reuse register) 5325 const Register tmp = r11; 5326 assert_different_registers(crc, buf, len, table, tmp, rax); 5327 5328 BLOCK_COMMENT("Entry:"); 5329 __ enter(); // required for proper stackwalking of RuntimeStub frame 5330 5331 __ kernel_crc32(crc, buf, len, table, tmp); 5332 5333 __ movl(rax, crc); 5334 __ vzeroupper(); 5335 __ leave(); // required for proper stackwalking of RuntimeStub frame 5336 __ ret(0); 5337 5338 return start; 5339 } 5340 5341 /** 5342 * Arguments: 5343 * 5344 * Inputs: 5345 * c_rarg0 - int crc 5346 * c_rarg1 - byte* buf 5347 * c_rarg2 - long length 5348 * c_rarg3 - table_start - optional (present only when doing a library_call, 5349 * not used by x86 algorithm) 5350 * 5351 * Ouput: 5352 * rax - int crc result 5353 */ 5354 address generate_updateBytesCRC32C(bool is_pclmulqdq_supported) { 5355 assert(UseCRC32CIntrinsics, "need SSE4_2"); 5356 __ align(CodeEntryAlignment); 5357 StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32C"); 5358 address start = __ pc(); 5359 //reg.arg int#0 int#1 int#2 int#3 int#4 int#5 float regs 5360 //Windows RCX RDX R8 R9 none none XMM0..XMM3 5361 //Lin / Sol RDI RSI RDX RCX R8 R9 XMM0..XMM7 5362 const Register crc = c_rarg0; // crc 5363 const Register buf = c_rarg1; // source java byte array address 5364 const Register len = c_rarg2; // length 5365 const Register a = rax; 5366 const Register j = r9; 5367 const Register k = r10; 5368 const Register l = r11; 5369 #ifdef _WIN64 5370 const Register y = rdi; 5371 const Register z = rsi; 5372 #else 5373 const Register y = rcx; 5374 const Register z = r8; 5375 #endif 5376 assert_different_registers(crc, buf, len, a, j, k, l, y, z); 5377 5378 BLOCK_COMMENT("Entry:"); 5379 __ enter(); // required for proper stackwalking of RuntimeStub frame 5380 #ifdef _WIN64 5381 __ push(y); 5382 __ push(z); 5383 #endif 5384 __ crc32c_ipl_alg2_alt2(crc, buf, len, 5385 a, j, k, 5386 l, y, z, 5387 c_farg0, c_farg1, c_farg2, 5388 is_pclmulqdq_supported); 5389 __ movl(rax, crc); 5390 #ifdef _WIN64 5391 __ pop(z); 5392 __ pop(y); 5393 #endif 5394 __ vzeroupper(); 5395 __ leave(); // required for proper stackwalking of RuntimeStub frame 5396 __ ret(0); 5397 5398 return start; 5399 } 5400 5401 /** 5402 * Arguments: 5403 * 5404 * Input: 5405 * c_rarg0 - x address 5406 * c_rarg1 - x length 5407 * c_rarg2 - y address 5408 * c_rarg3 - y length 5409 * not Win64 5410 * c_rarg4 - z address 5411 * c_rarg5 - z length 5412 * Win64 5413 * rsp+40 - z address 5414 * rsp+48 - z length 5415 */ 5416 address generate_multiplyToLen() { 5417 __ align(CodeEntryAlignment); 5418 StubCodeMark mark(this, "StubRoutines", "multiplyToLen"); 5419 5420 address start = __ pc(); 5421 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 5422 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 5423 const Register x = rdi; 5424 const Register xlen = rax; 5425 const Register y = rsi; 5426 const Register ylen = rcx; 5427 const Register z = r8; 5428 const Register zlen = r11; 5429 5430 // Next registers will be saved on stack in multiply_to_len(). 5431 const Register tmp1 = r12; 5432 const Register tmp2 = r13; 5433 const Register tmp3 = r14; 5434 const Register tmp4 = r15; 5435 const Register tmp5 = rbx; 5436 5437 BLOCK_COMMENT("Entry:"); 5438 __ enter(); // required for proper stackwalking of RuntimeStub frame 5439 5440 #ifndef _WIN64 5441 __ movptr(zlen, r9); // Save r9 in r11 - zlen 5442 #endif 5443 setup_arg_regs(4); // x => rdi, xlen => rsi, y => rdx 5444 // ylen => rcx, z => r8, zlen => r11 5445 // r9 and r10 may be used to save non-volatile registers 5446 #ifdef _WIN64 5447 // last 2 arguments (#4, #5) are on stack on Win64 5448 __ movptr(z, Address(rsp, 6 * wordSize)); 5449 __ movptr(zlen, Address(rsp, 7 * wordSize)); 5450 #endif 5451 5452 __ movptr(xlen, rsi); 5453 __ movptr(y, rdx); 5454 __ multiply_to_len(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5); 5455 5456 restore_arg_regs(); 5457 5458 __ leave(); // required for proper stackwalking of RuntimeStub frame 5459 __ ret(0); 5460 5461 return start; 5462 } 5463 5464 /** 5465 * Arguments: 5466 * 5467 * Input: 5468 * c_rarg0 - obja address 5469 * c_rarg1 - objb address 5470 * c_rarg3 - length length 5471 * c_rarg4 - scale log2_array_indxscale 5472 * 5473 * Output: 5474 * rax - int >= mismatched index, < 0 bitwise complement of tail 5475 */ 5476 address generate_vectorizedMismatch() { 5477 __ align(CodeEntryAlignment); 5478 StubCodeMark mark(this, "StubRoutines", "vectorizedMismatch"); 5479 address start = __ pc(); 5480 5481 BLOCK_COMMENT("Entry:"); 5482 __ enter(); 5483 5484 #ifdef _WIN64 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 5485 const Register scale = c_rarg0; //rcx, will exchange with r9 5486 const Register objb = c_rarg1; //rdx 5487 const Register length = c_rarg2; //r8 5488 const Register obja = c_rarg3; //r9 5489 __ xchgq(obja, scale); //now obja and scale contains the correct contents 5490 5491 const Register tmp1 = r10; 5492 const Register tmp2 = r11; 5493 #endif 5494 #ifndef _WIN64 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 5495 const Register obja = c_rarg0; //U:rdi 5496 const Register objb = c_rarg1; //U:rsi 5497 const Register length = c_rarg2; //U:rdx 5498 const Register scale = c_rarg3; //U:rcx 5499 const Register tmp1 = r8; 5500 const Register tmp2 = r9; 5501 #endif 5502 const Register result = rax; //return value 5503 const XMMRegister vec0 = xmm0; 5504 const XMMRegister vec1 = xmm1; 5505 const XMMRegister vec2 = xmm2; 5506 5507 __ vectorized_mismatch(obja, objb, length, scale, result, tmp1, tmp2, vec0, vec1, vec2); 5508 5509 __ vzeroupper(); 5510 __ leave(); 5511 __ ret(0); 5512 5513 return start; 5514 } 5515 5516 /** 5517 * Arguments: 5518 * 5519 // Input: 5520 // c_rarg0 - x address 5521 // c_rarg1 - x length 5522 // c_rarg2 - z address 5523 // c_rarg3 - z lenth 5524 * 5525 */ 5526 address generate_squareToLen() { 5527 5528 __ align(CodeEntryAlignment); 5529 StubCodeMark mark(this, "StubRoutines", "squareToLen"); 5530 5531 address start = __ pc(); 5532 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 5533 // Unix: rdi, rsi, rdx, rcx (c_rarg0, c_rarg1, ...) 5534 const Register x = rdi; 5535 const Register len = rsi; 5536 const Register z = r8; 5537 const Register zlen = rcx; 5538 5539 const Register tmp1 = r12; 5540 const Register tmp2 = r13; 5541 const Register tmp3 = r14; 5542 const Register tmp4 = r15; 5543 const Register tmp5 = rbx; 5544 5545 BLOCK_COMMENT("Entry:"); 5546 __ enter(); // required for proper stackwalking of RuntimeStub frame 5547 5548 setup_arg_regs(4); // x => rdi, len => rsi, z => rdx 5549 // zlen => rcx 5550 // r9 and r10 may be used to save non-volatile registers 5551 __ movptr(r8, rdx); 5552 __ square_to_len(x, len, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5, rdx, rax); 5553 5554 restore_arg_regs(); 5555 5556 __ leave(); // required for proper stackwalking of RuntimeStub frame 5557 __ ret(0); 5558 5559 return start; 5560 } 5561 5562 address generate_method_entry_barrier() { 5563 __ align(CodeEntryAlignment); 5564 StubCodeMark mark(this, "StubRoutines", "nmethod_entry_barrier"); 5565 5566 Label deoptimize_label; 5567 5568 address start = __ pc(); 5569 5570 __ push(-1); // cookie, this is used for writing the new rsp when deoptimizing 5571 5572 BLOCK_COMMENT("Entry:"); 5573 __ enter(); // save rbp 5574 5575 // save c_rarg0, because we want to use that value. 5576 // We could do without it but then we depend on the number of slots used by pusha 5577 __ push(c_rarg0); 5578 5579 __ lea(c_rarg0, Address(rsp, wordSize * 3)); // 1 for cookie, 1 for rbp, 1 for c_rarg0 - this should be the return address 5580 5581 __ pusha(); 5582 5583 // The method may have floats as arguments, and we must spill them before calling 5584 // the VM runtime. 5585 assert(Argument::n_float_register_parameters_j == 8, "Assumption"); 5586 const int xmm_size = wordSize * 2; 5587 const int xmm_spill_size = xmm_size * Argument::n_float_register_parameters_j; 5588 __ subptr(rsp, xmm_spill_size); 5589 __ movdqu(Address(rsp, xmm_size * 7), xmm7); 5590 __ movdqu(Address(rsp, xmm_size * 6), xmm6); 5591 __ movdqu(Address(rsp, xmm_size * 5), xmm5); 5592 __ movdqu(Address(rsp, xmm_size * 4), xmm4); 5593 __ movdqu(Address(rsp, xmm_size * 3), xmm3); 5594 __ movdqu(Address(rsp, xmm_size * 2), xmm2); 5595 __ movdqu(Address(rsp, xmm_size * 1), xmm1); 5596 __ movdqu(Address(rsp, xmm_size * 0), xmm0); 5597 5598 __ call_VM_leaf(CAST_FROM_FN_PTR(address, static_cast<int (*)(address*)>(BarrierSetNMethod::nmethod_stub_entry_barrier)), 1); 5599 5600 __ movdqu(xmm0, Address(rsp, xmm_size * 0)); 5601 __ movdqu(xmm1, Address(rsp, xmm_size * 1)); 5602 __ movdqu(xmm2, Address(rsp, xmm_size * 2)); 5603 __ movdqu(xmm3, Address(rsp, xmm_size * 3)); 5604 __ movdqu(xmm4, Address(rsp, xmm_size * 4)); 5605 __ movdqu(xmm5, Address(rsp, xmm_size * 5)); 5606 __ movdqu(xmm6, Address(rsp, xmm_size * 6)); 5607 __ movdqu(xmm7, Address(rsp, xmm_size * 7)); 5608 __ addptr(rsp, xmm_spill_size); 5609 5610 __ cmpl(rax, 1); // 1 means deoptimize 5611 __ jcc(Assembler::equal, deoptimize_label); 5612 5613 __ popa(); 5614 __ pop(c_rarg0); 5615 5616 __ leave(); 5617 5618 __ addptr(rsp, 1 * wordSize); // cookie 5619 __ ret(0); 5620 5621 5622 __ BIND(deoptimize_label); 5623 5624 __ popa(); 5625 __ pop(c_rarg0); 5626 5627 __ leave(); 5628 5629 // this can be taken out, but is good for verification purposes. getting a SIGSEGV 5630 // here while still having a correct stack is valuable 5631 __ testptr(rsp, Address(rsp, 0)); 5632 5633 __ movptr(rsp, Address(rsp, 0)); // new rsp was written in the barrier 5634 __ jmp(Address(rsp, -1 * wordSize)); // jmp target should be callers verified_entry_point 5635 5636 return start; 5637 } 5638 5639 /** 5640 * Arguments: 5641 * 5642 * Input: 5643 * c_rarg0 - out address 5644 * c_rarg1 - in address 5645 * c_rarg2 - offset 5646 * c_rarg3 - len 5647 * not Win64 5648 * c_rarg4 - k 5649 * Win64 5650 * rsp+40 - k 5651 */ 5652 address generate_mulAdd() { 5653 __ align(CodeEntryAlignment); 5654 StubCodeMark mark(this, "StubRoutines", "mulAdd"); 5655 5656 address start = __ pc(); 5657 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 5658 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 5659 const Register out = rdi; 5660 const Register in = rsi; 5661 const Register offset = r11; 5662 const Register len = rcx; 5663 const Register k = r8; 5664 5665 // Next registers will be saved on stack in mul_add(). 5666 const Register tmp1 = r12; 5667 const Register tmp2 = r13; 5668 const Register tmp3 = r14; 5669 const Register tmp4 = r15; 5670 const Register tmp5 = rbx; 5671 5672 BLOCK_COMMENT("Entry:"); 5673 __ enter(); // required for proper stackwalking of RuntimeStub frame 5674 5675 setup_arg_regs(4); // out => rdi, in => rsi, offset => rdx 5676 // len => rcx, k => r8 5677 // r9 and r10 may be used to save non-volatile registers 5678 #ifdef _WIN64 5679 // last argument is on stack on Win64 5680 __ movl(k, Address(rsp, 6 * wordSize)); 5681 #endif 5682 __ movptr(r11, rdx); // move offset in rdx to offset(r11) 5683 __ mul_add(out, in, offset, len, k, tmp1, tmp2, tmp3, tmp4, tmp5, rdx, rax); 5684 5685 restore_arg_regs(); 5686 5687 __ leave(); // required for proper stackwalking of RuntimeStub frame 5688 __ ret(0); 5689 5690 return start; 5691 } 5692 5693 address generate_libmExp() { 5694 StubCodeMark mark(this, "StubRoutines", "libmExp"); 5695 5696 address start = __ pc(); 5697 5698 const XMMRegister x0 = xmm0; 5699 const XMMRegister x1 = xmm1; 5700 const XMMRegister x2 = xmm2; 5701 const XMMRegister x3 = xmm3; 5702 5703 const XMMRegister x4 = xmm4; 5704 const XMMRegister x5 = xmm5; 5705 const XMMRegister x6 = xmm6; 5706 const XMMRegister x7 = xmm7; 5707 5708 const Register tmp = r11; 5709 5710 BLOCK_COMMENT("Entry:"); 5711 __ enter(); // required for proper stackwalking of RuntimeStub frame 5712 5713 __ fast_exp(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp); 5714 5715 __ leave(); // required for proper stackwalking of RuntimeStub frame 5716 __ ret(0); 5717 5718 return start; 5719 5720 } 5721 5722 address generate_libmLog() { 5723 StubCodeMark mark(this, "StubRoutines", "libmLog"); 5724 5725 address start = __ pc(); 5726 5727 const XMMRegister x0 = xmm0; 5728 const XMMRegister x1 = xmm1; 5729 const XMMRegister x2 = xmm2; 5730 const XMMRegister x3 = xmm3; 5731 5732 const XMMRegister x4 = xmm4; 5733 const XMMRegister x5 = xmm5; 5734 const XMMRegister x6 = xmm6; 5735 const XMMRegister x7 = xmm7; 5736 5737 const Register tmp1 = r11; 5738 const Register tmp2 = r8; 5739 5740 BLOCK_COMMENT("Entry:"); 5741 __ enter(); // required for proper stackwalking of RuntimeStub frame 5742 5743 __ fast_log(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2); 5744 5745 __ leave(); // required for proper stackwalking of RuntimeStub frame 5746 __ ret(0); 5747 5748 return start; 5749 5750 } 5751 5752 address generate_libmLog10() { 5753 StubCodeMark mark(this, "StubRoutines", "libmLog10"); 5754 5755 address start = __ pc(); 5756 5757 const XMMRegister x0 = xmm0; 5758 const XMMRegister x1 = xmm1; 5759 const XMMRegister x2 = xmm2; 5760 const XMMRegister x3 = xmm3; 5761 5762 const XMMRegister x4 = xmm4; 5763 const XMMRegister x5 = xmm5; 5764 const XMMRegister x6 = xmm6; 5765 const XMMRegister x7 = xmm7; 5766 5767 const Register tmp = r11; 5768 5769 BLOCK_COMMENT("Entry:"); 5770 __ enter(); // required for proper stackwalking of RuntimeStub frame 5771 5772 __ fast_log10(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp); 5773 5774 __ leave(); // required for proper stackwalking of RuntimeStub frame 5775 __ ret(0); 5776 5777 return start; 5778 5779 } 5780 5781 address generate_libmPow() { 5782 StubCodeMark mark(this, "StubRoutines", "libmPow"); 5783 5784 address start = __ pc(); 5785 5786 const XMMRegister x0 = xmm0; 5787 const XMMRegister x1 = xmm1; 5788 const XMMRegister x2 = xmm2; 5789 const XMMRegister x3 = xmm3; 5790 5791 const XMMRegister x4 = xmm4; 5792 const XMMRegister x5 = xmm5; 5793 const XMMRegister x6 = xmm6; 5794 const XMMRegister x7 = xmm7; 5795 5796 const Register tmp1 = r8; 5797 const Register tmp2 = r9; 5798 const Register tmp3 = r10; 5799 const Register tmp4 = r11; 5800 5801 BLOCK_COMMENT("Entry:"); 5802 __ enter(); // required for proper stackwalking of RuntimeStub frame 5803 5804 __ fast_pow(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 5805 5806 __ leave(); // required for proper stackwalking of RuntimeStub frame 5807 __ ret(0); 5808 5809 return start; 5810 5811 } 5812 5813 address generate_libmSin() { 5814 StubCodeMark mark(this, "StubRoutines", "libmSin"); 5815 5816 address start = __ pc(); 5817 5818 const XMMRegister x0 = xmm0; 5819 const XMMRegister x1 = xmm1; 5820 const XMMRegister x2 = xmm2; 5821 const XMMRegister x3 = xmm3; 5822 5823 const XMMRegister x4 = xmm4; 5824 const XMMRegister x5 = xmm5; 5825 const XMMRegister x6 = xmm6; 5826 const XMMRegister x7 = xmm7; 5827 5828 const Register tmp1 = r8; 5829 const Register tmp2 = r9; 5830 const Register tmp3 = r10; 5831 const Register tmp4 = r11; 5832 5833 BLOCK_COMMENT("Entry:"); 5834 __ enter(); // required for proper stackwalking of RuntimeStub frame 5835 5836 #ifdef _WIN64 5837 __ push(rsi); 5838 __ push(rdi); 5839 #endif 5840 __ fast_sin(x0, x1, x2, x3, x4, x5, x6, x7, rax, rbx, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 5841 5842 #ifdef _WIN64 5843 __ pop(rdi); 5844 __ pop(rsi); 5845 #endif 5846 5847 __ leave(); // required for proper stackwalking of RuntimeStub frame 5848 __ ret(0); 5849 5850 return start; 5851 5852 } 5853 5854 address generate_libmCos() { 5855 StubCodeMark mark(this, "StubRoutines", "libmCos"); 5856 5857 address start = __ pc(); 5858 5859 const XMMRegister x0 = xmm0; 5860 const XMMRegister x1 = xmm1; 5861 const XMMRegister x2 = xmm2; 5862 const XMMRegister x3 = xmm3; 5863 5864 const XMMRegister x4 = xmm4; 5865 const XMMRegister x5 = xmm5; 5866 const XMMRegister x6 = xmm6; 5867 const XMMRegister x7 = xmm7; 5868 5869 const Register tmp1 = r8; 5870 const Register tmp2 = r9; 5871 const Register tmp3 = r10; 5872 const Register tmp4 = r11; 5873 5874 BLOCK_COMMENT("Entry:"); 5875 __ enter(); // required for proper stackwalking of RuntimeStub frame 5876 5877 #ifdef _WIN64 5878 __ push(rsi); 5879 __ push(rdi); 5880 #endif 5881 __ fast_cos(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 5882 5883 #ifdef _WIN64 5884 __ pop(rdi); 5885 __ pop(rsi); 5886 #endif 5887 5888 __ leave(); // required for proper stackwalking of RuntimeStub frame 5889 __ ret(0); 5890 5891 return start; 5892 5893 } 5894 5895 address generate_libmTan() { 5896 StubCodeMark mark(this, "StubRoutines", "libmTan"); 5897 5898 address start = __ pc(); 5899 5900 const XMMRegister x0 = xmm0; 5901 const XMMRegister x1 = xmm1; 5902 const XMMRegister x2 = xmm2; 5903 const XMMRegister x3 = xmm3; 5904 5905 const XMMRegister x4 = xmm4; 5906 const XMMRegister x5 = xmm5; 5907 const XMMRegister x6 = xmm6; 5908 const XMMRegister x7 = xmm7; 5909 5910 const Register tmp1 = r8; 5911 const Register tmp2 = r9; 5912 const Register tmp3 = r10; 5913 const Register tmp4 = r11; 5914 5915 BLOCK_COMMENT("Entry:"); 5916 __ enter(); // required for proper stackwalking of RuntimeStub frame 5917 5918 #ifdef _WIN64 5919 __ push(rsi); 5920 __ push(rdi); 5921 #endif 5922 __ fast_tan(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 5923 5924 #ifdef _WIN64 5925 __ pop(rdi); 5926 __ pop(rsi); 5927 #endif 5928 5929 __ leave(); // required for proper stackwalking of RuntimeStub frame 5930 __ ret(0); 5931 5932 return start; 5933 5934 } 5935 5936 #undef __ 5937 #define __ masm-> 5938 5939 // Continuation point for throwing of implicit exceptions that are 5940 // not handled in the current activation. Fabricates an exception 5941 // oop and initiates normal exception dispatching in this 5942 // frame. Since we need to preserve callee-saved values (currently 5943 // only for C2, but done for C1 as well) we need a callee-saved oop 5944 // map and therefore have to make these stubs into RuntimeStubs 5945 // rather than BufferBlobs. If the compiler needs all registers to 5946 // be preserved between the fault point and the exception handler 5947 // then it must assume responsibility for that in 5948 // AbstractCompiler::continuation_for_implicit_null_exception or 5949 // continuation_for_implicit_division_by_zero_exception. All other 5950 // implicit exceptions (e.g., NullPointerException or 5951 // AbstractMethodError on entry) are either at call sites or 5952 // otherwise assume that stack unwinding will be initiated, so 5953 // caller saved registers were assumed volatile in the compiler. 5954 address generate_throw_exception(const char* name, 5955 address runtime_entry, 5956 Register arg1 = noreg, 5957 Register arg2 = noreg) { 5958 // Information about frame layout at time of blocking runtime call. 5959 // Note that we only have to preserve callee-saved registers since 5960 // the compilers are responsible for supplying a continuation point 5961 // if they expect all registers to be preserved. 5962 enum layout { 5963 rbp_off = frame::arg_reg_save_area_bytes/BytesPerInt, 5964 rbp_off2, 5965 return_off, 5966 return_off2, 5967 framesize // inclusive of return address 5968 }; 5969 5970 int insts_size = 512; 5971 int locs_size = 64; 5972 5973 CodeBuffer code(name, insts_size, locs_size); 5974 OopMapSet* oop_maps = new OopMapSet(); 5975 MacroAssembler* masm = new MacroAssembler(&code); 5976 5977 address start = __ pc(); 5978 5979 // This is an inlined and slightly modified version of call_VM 5980 // which has the ability to fetch the return PC out of 5981 // thread-local storage and also sets up last_Java_sp slightly 5982 // differently than the real call_VM 5983 5984 __ enter(); // required for proper stackwalking of RuntimeStub frame 5985 5986 assert(is_even(framesize/2), "sp not 16-byte aligned"); 5987 5988 // return address and rbp are already in place 5989 __ subptr(rsp, (framesize-4) << LogBytesPerInt); // prolog 5990 5991 int frame_complete = __ pc() - start; 5992 5993 // Set up last_Java_sp and last_Java_fp 5994 address the_pc = __ pc(); 5995 __ set_last_Java_frame(rsp, rbp, the_pc); 5996 __ andptr(rsp, -(StackAlignmentInBytes)); // Align stack 5997 5998 // Call runtime 5999 if (arg1 != noreg) { 6000 assert(arg2 != c_rarg1, "clobbered"); 6001 __ movptr(c_rarg1, arg1); 6002 } 6003 if (arg2 != noreg) { 6004 __ movptr(c_rarg2, arg2); 6005 } 6006 __ movptr(c_rarg0, r15_thread); 6007 BLOCK_COMMENT("call runtime_entry"); 6008 __ call(RuntimeAddress(runtime_entry)); 6009 6010 // Generate oop map 6011 OopMap* map = new OopMap(framesize, 0); 6012 6013 oop_maps->add_gc_map(the_pc - start, map); 6014 6015 __ reset_last_Java_frame(true); 6016 6017 __ leave(); // required for proper stackwalking of RuntimeStub frame 6018 6019 // check for pending exceptions 6020 #ifdef ASSERT 6021 Label L; 6022 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), 6023 (int32_t) NULL_WORD); 6024 __ jcc(Assembler::notEqual, L); 6025 __ should_not_reach_here(); 6026 __ bind(L); 6027 #endif // ASSERT 6028 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 6029 6030 6031 // codeBlob framesize is in words (not VMRegImpl::slot_size) 6032 RuntimeStub* stub = 6033 RuntimeStub::new_runtime_stub(name, 6034 &code, 6035 frame_complete, 6036 (framesize >> (LogBytesPerWord - LogBytesPerInt)), 6037 oop_maps, false); 6038 return stub->entry_point(); 6039 } 6040 6041 void create_control_words() { 6042 // Round to nearest, 53-bit mode, exceptions masked 6043 StubRoutines::_fpu_cntrl_wrd_std = 0x027F; 6044 // Round to zero, 53-bit mode, exception mased 6045 StubRoutines::_fpu_cntrl_wrd_trunc = 0x0D7F; 6046 // Round to nearest, 24-bit mode, exceptions masked 6047 StubRoutines::_fpu_cntrl_wrd_24 = 0x007F; 6048 // Round to nearest, 64-bit mode, exceptions masked 6049 StubRoutines::_mxcsr_std = 0x1F80; 6050 // Note: the following two constants are 80-bit values 6051 // layout is critical for correct loading by FPU. 6052 // Bias for strict fp multiply/divide 6053 StubRoutines::_fpu_subnormal_bias1[0]= 0x00000000; // 2^(-15360) == 0x03ff 8000 0000 0000 0000 6054 StubRoutines::_fpu_subnormal_bias1[1]= 0x80000000; 6055 StubRoutines::_fpu_subnormal_bias1[2]= 0x03ff; 6056 // Un-Bias for strict fp multiply/divide 6057 StubRoutines::_fpu_subnormal_bias2[0]= 0x00000000; // 2^(+15360) == 0x7bff 8000 0000 0000 0000 6058 StubRoutines::_fpu_subnormal_bias2[1]= 0x80000000; 6059 StubRoutines::_fpu_subnormal_bias2[2]= 0x7bff; 6060 } 6061 6062 // Initialization 6063 void generate_initial() { 6064 // Generates all stubs and initializes the entry points 6065 6066 // This platform-specific settings are needed by generate_call_stub() 6067 create_control_words(); 6068 6069 // entry points that exist in all platforms Note: This is code 6070 // that could be shared among different platforms - however the 6071 // benefit seems to be smaller than the disadvantage of having a 6072 // much more complicated generator structure. See also comment in 6073 // stubRoutines.hpp. 6074 6075 StubRoutines::_forward_exception_entry = generate_forward_exception(); 6076 6077 StubRoutines::_call_stub_entry = 6078 generate_call_stub(StubRoutines::_call_stub_return_address); 6079 6080 // is referenced by megamorphic call 6081 StubRoutines::_catch_exception_entry = generate_catch_exception(); 6082 6083 // atomic calls 6084 StubRoutines::_atomic_xchg_entry = generate_atomic_xchg(); 6085 StubRoutines::_atomic_xchg_long_entry = generate_atomic_xchg_long(); 6086 StubRoutines::_atomic_cmpxchg_entry = generate_atomic_cmpxchg(); 6087 StubRoutines::_atomic_cmpxchg_byte_entry = generate_atomic_cmpxchg_byte(); 6088 StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long(); 6089 StubRoutines::_atomic_add_entry = generate_atomic_add(); 6090 StubRoutines::_atomic_add_long_entry = generate_atomic_add_long(); 6091 StubRoutines::_fence_entry = generate_orderaccess_fence(); 6092 6093 // platform dependent 6094 StubRoutines::x86::_get_previous_fp_entry = generate_get_previous_fp(); 6095 StubRoutines::x86::_get_previous_sp_entry = generate_get_previous_sp(); 6096 6097 StubRoutines::x86::_verify_mxcsr_entry = generate_verify_mxcsr(); 6098 6099 // Build this early so it's available for the interpreter. 6100 StubRoutines::_throw_StackOverflowError_entry = 6101 generate_throw_exception("StackOverflowError throw_exception", 6102 CAST_FROM_FN_PTR(address, 6103 SharedRuntime:: 6104 throw_StackOverflowError)); 6105 StubRoutines::_throw_delayed_StackOverflowError_entry = 6106 generate_throw_exception("delayed StackOverflowError throw_exception", 6107 CAST_FROM_FN_PTR(address, 6108 SharedRuntime:: 6109 throw_delayed_StackOverflowError)); 6110 if (UseCRC32Intrinsics) { 6111 // set table address before stub generation which use it 6112 StubRoutines::_crc_table_adr = (address)StubRoutines::x86::_crc_table; 6113 StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32(); 6114 } 6115 6116 if (UseCRC32CIntrinsics) { 6117 bool supports_clmul = VM_Version::supports_clmul(); 6118 StubRoutines::x86::generate_CRC32C_table(supports_clmul); 6119 StubRoutines::_crc32c_table_addr = (address)StubRoutines::x86::_crc32c_table; 6120 StubRoutines::_updateBytesCRC32C = generate_updateBytesCRC32C(supports_clmul); 6121 } 6122 if (VM_Version::supports_sse2() && UseLibmIntrinsic && InlineIntrinsics) { 6123 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin) || 6124 vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos) || 6125 vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dtan)) { 6126 StubRoutines::x86::_ONEHALF_adr = (address)StubRoutines::x86::_ONEHALF; 6127 StubRoutines::x86::_P_2_adr = (address)StubRoutines::x86::_P_2; 6128 StubRoutines::x86::_SC_4_adr = (address)StubRoutines::x86::_SC_4; 6129 StubRoutines::x86::_Ctable_adr = (address)StubRoutines::x86::_Ctable; 6130 StubRoutines::x86::_SC_2_adr = (address)StubRoutines::x86::_SC_2; 6131 StubRoutines::x86::_SC_3_adr = (address)StubRoutines::x86::_SC_3; 6132 StubRoutines::x86::_SC_1_adr = (address)StubRoutines::x86::_SC_1; 6133 StubRoutines::x86::_PI_INV_TABLE_adr = (address)StubRoutines::x86::_PI_INV_TABLE; 6134 StubRoutines::x86::_PI_4_adr = (address)StubRoutines::x86::_PI_4; 6135 StubRoutines::x86::_PI32INV_adr = (address)StubRoutines::x86::_PI32INV; 6136 StubRoutines::x86::_SIGN_MASK_adr = (address)StubRoutines::x86::_SIGN_MASK; 6137 StubRoutines::x86::_P_1_adr = (address)StubRoutines::x86::_P_1; 6138 StubRoutines::x86::_P_3_adr = (address)StubRoutines::x86::_P_3; 6139 StubRoutines::x86::_NEG_ZERO_adr = (address)StubRoutines::x86::_NEG_ZERO; 6140 } 6141 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dexp)) { 6142 StubRoutines::_dexp = generate_libmExp(); 6143 } 6144 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dlog)) { 6145 StubRoutines::_dlog = generate_libmLog(); 6146 } 6147 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dlog10)) { 6148 StubRoutines::_dlog10 = generate_libmLog10(); 6149 } 6150 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dpow)) { 6151 StubRoutines::_dpow = generate_libmPow(); 6152 } 6153 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin)) { 6154 StubRoutines::_dsin = generate_libmSin(); 6155 } 6156 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos)) { 6157 StubRoutines::_dcos = generate_libmCos(); 6158 } 6159 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dtan)) { 6160 StubRoutines::_dtan = generate_libmTan(); 6161 } 6162 } 6163 } 6164 6165 void generate_all() { 6166 // Generates all stubs and initializes the entry points 6167 6168 // These entry points require SharedInfo::stack0 to be set up in 6169 // non-core builds and need to be relocatable, so they each 6170 // fabricate a RuntimeStub internally. 6171 StubRoutines::_throw_AbstractMethodError_entry = 6172 generate_throw_exception("AbstractMethodError throw_exception", 6173 CAST_FROM_FN_PTR(address, 6174 SharedRuntime:: 6175 throw_AbstractMethodError)); 6176 6177 StubRoutines::_throw_IncompatibleClassChangeError_entry = 6178 generate_throw_exception("IncompatibleClassChangeError throw_exception", 6179 CAST_FROM_FN_PTR(address, 6180 SharedRuntime:: 6181 throw_IncompatibleClassChangeError)); 6182 6183 StubRoutines::_throw_NullPointerException_at_call_entry = 6184 generate_throw_exception("NullPointerException at call throw_exception", 6185 CAST_FROM_FN_PTR(address, 6186 SharedRuntime:: 6187 throw_NullPointerException_at_call)); 6188 6189 // entry points that are platform specific 6190 StubRoutines::x86::_f2i_fixup = generate_f2i_fixup(); 6191 StubRoutines::x86::_f2l_fixup = generate_f2l_fixup(); 6192 StubRoutines::x86::_d2i_fixup = generate_d2i_fixup(); 6193 StubRoutines::x86::_d2l_fixup = generate_d2l_fixup(); 6194 6195 StubRoutines::x86::_float_sign_mask = generate_fp_mask("float_sign_mask", 0x7FFFFFFF7FFFFFFF); 6196 StubRoutines::x86::_float_sign_flip = generate_fp_mask("float_sign_flip", 0x8000000080000000); 6197 StubRoutines::x86::_double_sign_mask = generate_fp_mask("double_sign_mask", 0x7FFFFFFFFFFFFFFF); 6198 StubRoutines::x86::_double_sign_flip = generate_fp_mask("double_sign_flip", 0x8000000000000000); 6199 StubRoutines::x86::_vector_float_sign_mask = generate_vector_mask("vector_float_sign_mask", 0x7FFFFFFF7FFFFFFF); 6200 StubRoutines::x86::_vector_float_sign_flip = generate_vector_mask("vector_float_sign_flip", 0x8000000080000000); 6201 StubRoutines::x86::_vector_double_sign_mask = generate_vector_mask("vector_double_sign_mask", 0x7FFFFFFFFFFFFFFF); 6202 StubRoutines::x86::_vector_double_sign_flip = generate_vector_mask("vector_double_sign_flip", 0x8000000000000000); 6203 StubRoutines::x86::_vector_short_to_byte_mask = generate_vector_mask("vector_short_to_byte_mask", 0x00ff00ff00ff00ff); 6204 StubRoutines::x86::_vector_byte_perm_mask = generate_vector_byte_perm_mask("vector_byte_perm_mask"); 6205 StubRoutines::x86::_vector_long_sign_mask = generate_vector_mask("vector_long_sign_mask", 0x8000000000000000); 6206 6207 // support for verify_oop (must happen after universe_init) 6208 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop(); 6209 6210 // data cache line writeback 6211 StubRoutines::_data_cache_writeback = generate_data_cache_writeback(); 6212 StubRoutines::_data_cache_writeback_sync = generate_data_cache_writeback_sync(); 6213 6214 // arraycopy stubs used by compilers 6215 generate_arraycopy_stubs(); 6216 6217 // don't bother generating these AES intrinsic stubs unless global flag is set 6218 if (UseAESIntrinsics) { 6219 StubRoutines::x86::_key_shuffle_mask_addr = generate_key_shuffle_mask(); // needed by the others 6220 StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock(); 6221 StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock(); 6222 StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt(); 6223 if (VM_Version::supports_vaes() && VM_Version::supports_avx512vl() && VM_Version::supports_avx512dq() ) { 6224 StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptVectorAESCrypt(); 6225 StubRoutines::_electronicCodeBook_encryptAESCrypt = generate_electronicCodeBook_encryptAESCrypt(); 6226 StubRoutines::_electronicCodeBook_decryptAESCrypt = generate_electronicCodeBook_decryptAESCrypt(); 6227 } else { 6228 StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt_Parallel(); 6229 } 6230 } 6231 if (UseAESCTRIntrinsics) { 6232 if (VM_Version::supports_vaes() && VM_Version::supports_avx512bw() && VM_Version::supports_avx512vl()) { 6233 StubRoutines::x86::_counter_mask_addr = counter_mask_addr(); 6234 StubRoutines::_counterMode_AESCrypt = generate_counterMode_VectorAESCrypt(); 6235 } else { 6236 StubRoutines::x86::_counter_shuffle_mask_addr = generate_counter_shuffle_mask(); 6237 StubRoutines::_counterMode_AESCrypt = generate_counterMode_AESCrypt_Parallel(); 6238 } 6239 } 6240 6241 if (UseSHA1Intrinsics) { 6242 StubRoutines::x86::_upper_word_mask_addr = generate_upper_word_mask(); 6243 StubRoutines::x86::_shuffle_byte_flip_mask_addr = generate_shuffle_byte_flip_mask(); 6244 StubRoutines::_sha1_implCompress = generate_sha1_implCompress(false, "sha1_implCompress"); 6245 StubRoutines::_sha1_implCompressMB = generate_sha1_implCompress(true, "sha1_implCompressMB"); 6246 } 6247 if (UseSHA256Intrinsics) { 6248 StubRoutines::x86::_k256_adr = (address)StubRoutines::x86::_k256; 6249 char* dst = (char*)StubRoutines::x86::_k256_W; 6250 char* src = (char*)StubRoutines::x86::_k256; 6251 for (int ii = 0; ii < 16; ++ii) { 6252 memcpy(dst + 32 * ii, src + 16 * ii, 16); 6253 memcpy(dst + 32 * ii + 16, src + 16 * ii, 16); 6254 } 6255 StubRoutines::x86::_k256_W_adr = (address)StubRoutines::x86::_k256_W; 6256 StubRoutines::x86::_pshuffle_byte_flip_mask_addr = generate_pshuffle_byte_flip_mask(); 6257 StubRoutines::_sha256_implCompress = generate_sha256_implCompress(false, "sha256_implCompress"); 6258 StubRoutines::_sha256_implCompressMB = generate_sha256_implCompress(true, "sha256_implCompressMB"); 6259 } 6260 if (UseSHA512Intrinsics) { 6261 StubRoutines::x86::_k512_W_addr = (address)StubRoutines::x86::_k512_W; 6262 StubRoutines::x86::_pshuffle_byte_flip_mask_addr_sha512 = generate_pshuffle_byte_flip_mask_sha512(); 6263 StubRoutines::_sha512_implCompress = generate_sha512_implCompress(false, "sha512_implCompress"); 6264 StubRoutines::_sha512_implCompressMB = generate_sha512_implCompress(true, "sha512_implCompressMB"); 6265 } 6266 6267 // Generate GHASH intrinsics code 6268 if (UseGHASHIntrinsics) { 6269 StubRoutines::x86::_ghash_long_swap_mask_addr = generate_ghash_long_swap_mask(); 6270 StubRoutines::x86::_ghash_byte_swap_mask_addr = generate_ghash_byte_swap_mask(); 6271 if (VM_Version::supports_avx()) { 6272 StubRoutines::x86::_ghash_shuffmask_addr = ghash_shufflemask_addr(); 6273 StubRoutines::x86::_ghash_poly_addr = ghash_polynomial_addr(); 6274 StubRoutines::_ghash_processBlocks = generate_avx_ghash_processBlocks(); 6275 } else { 6276 StubRoutines::_ghash_processBlocks = generate_ghash_processBlocks(); 6277 } 6278 } 6279 6280 if (UseBASE64Intrinsics) { 6281 StubRoutines::x86::_and_mask = base64_and_mask_addr(); 6282 StubRoutines::x86::_bswap_mask = base64_bswap_mask_addr(); 6283 StubRoutines::x86::_base64_charset = base64_charset_addr(); 6284 StubRoutines::x86::_url_charset = base64url_charset_addr(); 6285 StubRoutines::x86::_gather_mask = base64_gather_mask_addr(); 6286 StubRoutines::x86::_left_shift_mask = base64_left_shift_mask_addr(); 6287 StubRoutines::x86::_right_shift_mask = base64_right_shift_mask_addr(); 6288 StubRoutines::_base64_encodeBlock = generate_base64_encodeBlock(); 6289 } 6290 6291 // Safefetch stubs. 6292 generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry, 6293 &StubRoutines::_safefetch32_fault_pc, 6294 &StubRoutines::_safefetch32_continuation_pc); 6295 generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry, 6296 &StubRoutines::_safefetchN_fault_pc, 6297 &StubRoutines::_safefetchN_continuation_pc); 6298 6299 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod(); 6300 if (bs_nm != NULL) { 6301 StubRoutines::x86::_method_entry_barrier = generate_method_entry_barrier(); 6302 } 6303 #ifdef COMPILER2 6304 if (UseMultiplyToLenIntrinsic) { 6305 StubRoutines::_multiplyToLen = generate_multiplyToLen(); 6306 } 6307 if (UseSquareToLenIntrinsic) { 6308 StubRoutines::_squareToLen = generate_squareToLen(); 6309 } 6310 if (UseMulAddIntrinsic) { 6311 StubRoutines::_mulAdd = generate_mulAdd(); 6312 } 6313 #ifndef _WINDOWS 6314 if (UseMontgomeryMultiplyIntrinsic) { 6315 StubRoutines::_montgomeryMultiply 6316 = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_multiply); 6317 } 6318 if (UseMontgomerySquareIntrinsic) { 6319 StubRoutines::_montgomerySquare 6320 = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_square); 6321 } 6322 #endif // WINDOWS 6323 #endif // COMPILER2 6324 6325 if (UseVectorizedMismatchIntrinsic) { 6326 StubRoutines::_vectorizedMismatch = generate_vectorizedMismatch(); 6327 } 6328 } 6329 6330 public: 6331 StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) { 6332 if (all) { 6333 generate_all(); 6334 } else { 6335 generate_initial(); 6336 } 6337 } 6338 }; // end class declaration 6339 6340 #define UCM_TABLE_MAX_ENTRIES 16 6341 void StubGenerator_generate(CodeBuffer* code, bool all) { 6342 if (UnsafeCopyMemory::_table == NULL) { 6343 UnsafeCopyMemory::create_table(UCM_TABLE_MAX_ENTRIES); 6344 } 6345 StubGenerator g(code, all); 6346 }