1 /* 2 * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "ci/ciUtilities.hpp" 29 #include "gc/shared/barrierSet.hpp" 30 #include "gc/shared/barrierSetAssembler.hpp" 31 #include "gc/shared/barrierSetNMethod.hpp" 32 #include "interpreter/interpreter.hpp" 33 #include "memory/universe.hpp" 34 #include "nativeInst_x86.hpp" 35 #include "oops/instanceOop.hpp" 36 #include "oops/method.hpp" 37 #include "oops/objArrayKlass.hpp" 38 #include "oops/oop.inline.hpp" 39 #include "prims/methodHandles.hpp" 40 #include "runtime/frame.inline.hpp" 41 #include "runtime/handles.inline.hpp" 42 #include "runtime/sharedRuntime.hpp" 43 #include "runtime/stubCodeGenerator.hpp" 44 #include "runtime/stubRoutines.hpp" 45 #include "runtime/thread.inline.hpp" 46 #ifdef COMPILER2 47 #include "opto/runtime.hpp" 48 #endif 49 #if INCLUDE_ZGC 50 #include "gc/z/zThreadLocalData.hpp" 51 #endif 52 53 // Declaration and definition of StubGenerator (no .hpp file). 54 // For a more detailed description of the stub routine structure 55 // see the comment in stubRoutines.hpp 56 57 #define __ _masm-> 58 #define TIMES_OOP (UseCompressedOops ? Address::times_4 : Address::times_8) 59 #define a__ ((Assembler*)_masm)-> 60 61 #ifdef PRODUCT 62 #define BLOCK_COMMENT(str) /* nothing */ 63 #else 64 #define BLOCK_COMMENT(str) __ block_comment(str) 65 #endif 66 67 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 68 const int MXCSR_MASK = 0xFFC0; // Mask out any pending exceptions 69 70 // Stub Code definitions 71 72 class StubGenerator: public StubCodeGenerator { 73 private: 74 75 #ifdef PRODUCT 76 #define inc_counter_np(counter) ((void)0) 77 #else 78 void inc_counter_np_(int& counter) { 79 // This can destroy rscratch1 if counter is far from the code cache 80 __ incrementl(ExternalAddress((address)&counter)); 81 } 82 #define inc_counter_np(counter) \ 83 BLOCK_COMMENT("inc_counter " #counter); \ 84 inc_counter_np_(counter); 85 #endif 86 87 // Call stubs are used to call Java from C 88 // 89 // Linux Arguments: 90 // c_rarg0: call wrapper address address 91 // c_rarg1: result address 92 // c_rarg2: result type BasicType 93 // c_rarg3: method Method* 94 // c_rarg4: (interpreter) entry point address 95 // c_rarg5: parameters intptr_t* 96 // 16(rbp): parameter size (in words) int 97 // 24(rbp): thread Thread* 98 // 99 // [ return_from_Java ] <--- rsp 100 // [ argument word n ] 101 // ... 102 // -12 [ argument word 1 ] 103 // -11 [ saved r15 ] <--- rsp_after_call 104 // -10 [ saved r14 ] 105 // -9 [ saved r13 ] 106 // -8 [ saved r12 ] 107 // -7 [ saved rbx ] 108 // -6 [ call wrapper ] 109 // -5 [ result ] 110 // -4 [ result type ] 111 // -3 [ method ] 112 // -2 [ entry point ] 113 // -1 [ parameters ] 114 // 0 [ saved rbp ] <--- rbp 115 // 1 [ return address ] 116 // 2 [ parameter size ] 117 // 3 [ thread ] 118 // 119 // Windows Arguments: 120 // c_rarg0: call wrapper address address 121 // c_rarg1: result address 122 // c_rarg2: result type BasicType 123 // c_rarg3: method Method* 124 // 48(rbp): (interpreter) entry point address 125 // 56(rbp): parameters intptr_t* 126 // 64(rbp): parameter size (in words) int 127 // 72(rbp): thread Thread* 128 // 129 // [ return_from_Java ] <--- rsp 130 // [ argument word n ] 131 // ... 132 // -60 [ argument word 1 ] 133 // -59 [ saved xmm31 ] <--- rsp after_call 134 // [ saved xmm16-xmm30 ] (EVEX enabled, else the space is blank) 135 // -27 [ saved xmm15 ] 136 // [ saved xmm7-xmm14 ] 137 // -9 [ saved xmm6 ] (each xmm register takes 2 slots) 138 // -7 [ saved r15 ] 139 // -6 [ saved r14 ] 140 // -5 [ saved r13 ] 141 // -4 [ saved r12 ] 142 // -3 [ saved rdi ] 143 // -2 [ saved rsi ] 144 // -1 [ saved rbx ] 145 // 0 [ saved rbp ] <--- rbp 146 // 1 [ return address ] 147 // 2 [ call wrapper ] 148 // 3 [ result ] 149 // 4 [ result type ] 150 // 5 [ method ] 151 // 6 [ entry point ] 152 // 7 [ parameters ] 153 // 8 [ parameter size ] 154 // 9 [ thread ] 155 // 156 // Windows reserves the callers stack space for arguments 1-4. 157 // We spill c_rarg0-c_rarg3 to this space. 158 159 // Call stub stack layout word offsets from rbp 160 enum call_stub_layout { 161 #ifdef _WIN64 162 xmm_save_first = 6, // save from xmm6 163 xmm_save_last = 31, // to xmm31 164 xmm_save_base = -9, 165 rsp_after_call_off = xmm_save_base - 2 * (xmm_save_last - xmm_save_first), // -27 166 r15_off = -7, 167 r14_off = -6, 168 r13_off = -5, 169 r12_off = -4, 170 rdi_off = -3, 171 rsi_off = -2, 172 rbx_off = -1, 173 rbp_off = 0, 174 retaddr_off = 1, 175 call_wrapper_off = 2, 176 result_off = 3, 177 result_type_off = 4, 178 method_off = 5, 179 entry_point_off = 6, 180 parameters_off = 7, 181 parameter_size_off = 8, 182 thread_off = 9 183 #else 184 rsp_after_call_off = -12, 185 mxcsr_off = rsp_after_call_off, 186 r15_off = -11, 187 r14_off = -10, 188 r13_off = -9, 189 r12_off = -8, 190 rbx_off = -7, 191 call_wrapper_off = -6, 192 result_off = -5, 193 result_type_off = -4, 194 method_off = -3, 195 entry_point_off = -2, 196 parameters_off = -1, 197 rbp_off = 0, 198 retaddr_off = 1, 199 parameter_size_off = 2, 200 thread_off = 3 201 #endif 202 }; 203 204 #ifdef _WIN64 205 Address xmm_save(int reg) { 206 assert(reg >= xmm_save_first && reg <= xmm_save_last, "XMM register number out of range"); 207 return Address(rbp, (xmm_save_base - (reg - xmm_save_first) * 2) * wordSize); 208 } 209 #endif 210 211 address generate_call_stub(address& return_address) { 212 assert((int)frame::entry_frame_after_call_words == -(int)rsp_after_call_off + 1 && 213 (int)frame::entry_frame_call_wrapper_offset == (int)call_wrapper_off, 214 "adjust this code"); 215 StubCodeMark mark(this, "StubRoutines", "call_stub"); 216 address start = __ pc(); 217 218 // same as in generate_catch_exception()! 219 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize); 220 221 const Address call_wrapper (rbp, call_wrapper_off * wordSize); 222 const Address result (rbp, result_off * wordSize); 223 const Address result_type (rbp, result_type_off * wordSize); 224 const Address method (rbp, method_off * wordSize); 225 const Address entry_point (rbp, entry_point_off * wordSize); 226 const Address parameters (rbp, parameters_off * wordSize); 227 const Address parameter_size(rbp, parameter_size_off * wordSize); 228 229 // same as in generate_catch_exception()! 230 const Address thread (rbp, thread_off * wordSize); 231 232 const Address r15_save(rbp, r15_off * wordSize); 233 const Address r14_save(rbp, r14_off * wordSize); 234 const Address r13_save(rbp, r13_off * wordSize); 235 const Address r12_save(rbp, r12_off * wordSize); 236 const Address rbx_save(rbp, rbx_off * wordSize); 237 238 // stub code 239 __ enter(); 240 __ subptr(rsp, -rsp_after_call_off * wordSize); 241 242 // save register parameters 243 #ifndef _WIN64 244 __ movptr(parameters, c_rarg5); // parameters 245 __ movptr(entry_point, c_rarg4); // entry_point 246 #endif 247 248 __ movptr(method, c_rarg3); // method 249 __ movl(result_type, c_rarg2); // result type 250 __ movptr(result, c_rarg1); // result 251 __ movptr(call_wrapper, c_rarg0); // call wrapper 252 253 // save regs belonging to calling function 254 __ movptr(rbx_save, rbx); 255 __ movptr(r12_save, r12); 256 __ movptr(r13_save, r13); 257 __ movptr(r14_save, r14); 258 __ movptr(r15_save, r15); 259 260 #ifdef _WIN64 261 int last_reg = 15; 262 if (UseAVX > 2) { 263 last_reg = 31; 264 } 265 if (VM_Version::supports_evex()) { 266 for (int i = xmm_save_first; i <= last_reg; i++) { 267 __ vextractf32x4(xmm_save(i), as_XMMRegister(i), 0); 268 } 269 } else { 270 for (int i = xmm_save_first; i <= last_reg; i++) { 271 __ movdqu(xmm_save(i), as_XMMRegister(i)); 272 } 273 } 274 275 const Address rdi_save(rbp, rdi_off * wordSize); 276 const Address rsi_save(rbp, rsi_off * wordSize); 277 278 __ movptr(rsi_save, rsi); 279 __ movptr(rdi_save, rdi); 280 #else 281 const Address mxcsr_save(rbp, mxcsr_off * wordSize); 282 { 283 Label skip_ldmx; 284 __ stmxcsr(mxcsr_save); 285 __ movl(rax, mxcsr_save); 286 __ andl(rax, MXCSR_MASK); // Only check control and mask bits 287 ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std()); 288 __ cmp32(rax, mxcsr_std); 289 __ jcc(Assembler::equal, skip_ldmx); 290 __ ldmxcsr(mxcsr_std); 291 __ bind(skip_ldmx); 292 } 293 #endif 294 295 // Load up thread register 296 __ movptr(r15_thread, thread); 297 __ reinit_heapbase(); 298 299 #ifdef ASSERT 300 // make sure we have no pending exceptions 301 { 302 Label L; 303 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 304 __ jcc(Assembler::equal, L); 305 __ stop("StubRoutines::call_stub: entered with pending exception"); 306 __ bind(L); 307 } 308 #endif 309 310 // pass parameters if any 311 BLOCK_COMMENT("pass parameters if any"); 312 Label parameters_done; 313 __ movl(c_rarg3, parameter_size); 314 __ testl(c_rarg3, c_rarg3); 315 __ jcc(Assembler::zero, parameters_done); 316 317 Label loop; 318 __ movptr(c_rarg2, parameters); // parameter pointer 319 __ movl(c_rarg1, c_rarg3); // parameter counter is in c_rarg1 320 __ BIND(loop); 321 __ movptr(rax, Address(c_rarg2, 0));// get parameter 322 __ addptr(c_rarg2, wordSize); // advance to next parameter 323 __ decrementl(c_rarg1); // decrement counter 324 __ push(rax); // pass parameter 325 __ jcc(Assembler::notZero, loop); 326 327 // call Java function 328 __ BIND(parameters_done); 329 __ movptr(rbx, method); // get Method* 330 __ movptr(c_rarg1, entry_point); // get entry_point 331 __ mov(r13, rsp); // set sender sp 332 BLOCK_COMMENT("call Java function"); 333 __ call(c_rarg1); 334 335 BLOCK_COMMENT("call_stub_return_address:"); 336 return_address = __ pc(); 337 338 // store result depending on type (everything that is not 339 // T_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT) 340 __ movptr(c_rarg0, result); 341 Label is_long, is_float, is_double, exit; 342 __ movl(c_rarg1, result_type); 343 __ cmpl(c_rarg1, T_OBJECT); 344 __ jcc(Assembler::equal, is_long); 345 __ cmpl(c_rarg1, T_LONG); 346 __ jcc(Assembler::equal, is_long); 347 __ cmpl(c_rarg1, T_FLOAT); 348 __ jcc(Assembler::equal, is_float); 349 __ cmpl(c_rarg1, T_DOUBLE); 350 __ jcc(Assembler::equal, is_double); 351 352 // handle T_INT case 353 __ movl(Address(c_rarg0, 0), rax); 354 355 __ BIND(exit); 356 357 // pop parameters 358 __ lea(rsp, rsp_after_call); 359 360 #ifdef ASSERT 361 // verify that threads correspond 362 { 363 Label L1, L2, L3; 364 __ cmpptr(r15_thread, thread); 365 __ jcc(Assembler::equal, L1); 366 __ stop("StubRoutines::call_stub: r15_thread is corrupted"); 367 __ bind(L1); 368 __ get_thread(rbx); 369 __ cmpptr(r15_thread, thread); 370 __ jcc(Assembler::equal, L2); 371 __ stop("StubRoutines::call_stub: r15_thread is modified by call"); 372 __ bind(L2); 373 __ cmpptr(r15_thread, rbx); 374 __ jcc(Assembler::equal, L3); 375 __ stop("StubRoutines::call_stub: threads must correspond"); 376 __ bind(L3); 377 } 378 #endif 379 380 // restore regs belonging to calling function 381 #ifdef _WIN64 382 // emit the restores for xmm regs 383 if (VM_Version::supports_evex()) { 384 for (int i = xmm_save_first; i <= last_reg; i++) { 385 __ vinsertf32x4(as_XMMRegister(i), as_XMMRegister(i), xmm_save(i), 0); 386 } 387 } else { 388 for (int i = xmm_save_first; i <= last_reg; i++) { 389 __ movdqu(as_XMMRegister(i), xmm_save(i)); 390 } 391 } 392 #endif 393 __ movptr(r15, r15_save); 394 __ movptr(r14, r14_save); 395 __ movptr(r13, r13_save); 396 __ movptr(r12, r12_save); 397 __ movptr(rbx, rbx_save); 398 399 #ifdef _WIN64 400 __ movptr(rdi, rdi_save); 401 __ movptr(rsi, rsi_save); 402 #else 403 __ ldmxcsr(mxcsr_save); 404 #endif 405 406 // restore rsp 407 __ addptr(rsp, -rsp_after_call_off * wordSize); 408 409 // return 410 __ vzeroupper(); 411 __ pop(rbp); 412 __ ret(0); 413 414 // handle return types different from T_INT 415 __ BIND(is_long); 416 __ movq(Address(c_rarg0, 0), rax); 417 __ jmp(exit); 418 419 __ BIND(is_float); 420 __ movflt(Address(c_rarg0, 0), xmm0); 421 __ jmp(exit); 422 423 __ BIND(is_double); 424 __ movdbl(Address(c_rarg0, 0), xmm0); 425 __ jmp(exit); 426 427 return start; 428 } 429 430 // Return point for a Java call if there's an exception thrown in 431 // Java code. The exception is caught and transformed into a 432 // pending exception stored in JavaThread that can be tested from 433 // within the VM. 434 // 435 // Note: Usually the parameters are removed by the callee. In case 436 // of an exception crossing an activation frame boundary, that is 437 // not the case if the callee is compiled code => need to setup the 438 // rsp. 439 // 440 // rax: exception oop 441 442 address generate_catch_exception() { 443 StubCodeMark mark(this, "StubRoutines", "catch_exception"); 444 address start = __ pc(); 445 446 // same as in generate_call_stub(): 447 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize); 448 const Address thread (rbp, thread_off * wordSize); 449 450 #ifdef ASSERT 451 // verify that threads correspond 452 { 453 Label L1, L2, L3; 454 __ cmpptr(r15_thread, thread); 455 __ jcc(Assembler::equal, L1); 456 __ stop("StubRoutines::catch_exception: r15_thread is corrupted"); 457 __ bind(L1); 458 __ get_thread(rbx); 459 __ cmpptr(r15_thread, thread); 460 __ jcc(Assembler::equal, L2); 461 __ stop("StubRoutines::catch_exception: r15_thread is modified by call"); 462 __ bind(L2); 463 __ cmpptr(r15_thread, rbx); 464 __ jcc(Assembler::equal, L3); 465 __ stop("StubRoutines::catch_exception: threads must correspond"); 466 __ bind(L3); 467 } 468 #endif 469 470 // set pending exception 471 __ verify_oop(rax); 472 473 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), rax); 474 __ lea(rscratch1, ExternalAddress((address)__FILE__)); 475 __ movptr(Address(r15_thread, Thread::exception_file_offset()), rscratch1); 476 __ movl(Address(r15_thread, Thread::exception_line_offset()), (int) __LINE__); 477 478 // complete return to VM 479 assert(StubRoutines::_call_stub_return_address != NULL, 480 "_call_stub_return_address must have been generated before"); 481 __ jump(RuntimeAddress(StubRoutines::_call_stub_return_address)); 482 483 return start; 484 } 485 486 // Continuation point for runtime calls returning with a pending 487 // exception. The pending exception check happened in the runtime 488 // or native call stub. The pending exception in Thread is 489 // converted into a Java-level exception. 490 // 491 // Contract with Java-level exception handlers: 492 // rax: exception 493 // rdx: throwing pc 494 // 495 // NOTE: At entry of this stub, exception-pc must be on stack !! 496 497 address generate_forward_exception() { 498 StubCodeMark mark(this, "StubRoutines", "forward exception"); 499 address start = __ pc(); 500 501 // Upon entry, the sp points to the return address returning into 502 // Java (interpreted or compiled) code; i.e., the return address 503 // becomes the throwing pc. 504 // 505 // Arguments pushed before the runtime call are still on the stack 506 // but the exception handler will reset the stack pointer -> 507 // ignore them. A potential result in registers can be ignored as 508 // well. 509 510 #ifdef ASSERT 511 // make sure this code is only executed if there is a pending exception 512 { 513 Label L; 514 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t) NULL); 515 __ jcc(Assembler::notEqual, L); 516 __ stop("StubRoutines::forward exception: no pending exception (1)"); 517 __ bind(L); 518 } 519 #endif 520 521 // compute exception handler into rbx 522 __ movptr(c_rarg0, Address(rsp, 0)); 523 BLOCK_COMMENT("call exception_handler_for_return_address"); 524 __ call_VM_leaf(CAST_FROM_FN_PTR(address, 525 SharedRuntime::exception_handler_for_return_address), 526 r15_thread, c_rarg0); 527 __ mov(rbx, rax); 528 529 // setup rax & rdx, remove return address & clear pending exception 530 __ pop(rdx); 531 __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset())); 532 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 533 534 #ifdef ASSERT 535 // make sure exception is set 536 { 537 Label L; 538 __ testptr(rax, rax); 539 __ jcc(Assembler::notEqual, L); 540 __ stop("StubRoutines::forward exception: no pending exception (2)"); 541 __ bind(L); 542 } 543 #endif 544 545 // continue at exception handler (return address removed) 546 // rax: exception 547 // rbx: exception handler 548 // rdx: throwing pc 549 __ verify_oop(rax); 550 __ jmp(rbx); 551 552 return start; 553 } 554 555 // Implementation of jint atomic_xchg(jint add_value, volatile jint* dest) 556 // used by Atomic::xchg(volatile jint* dest, jint exchange_value) 557 // 558 // Arguments : 559 // c_rarg0: exchange_value 560 // c_rarg0: dest 561 // 562 // Result: 563 // *dest <- ex, return (orig *dest) 564 address generate_atomic_xchg() { 565 StubCodeMark mark(this, "StubRoutines", "atomic_xchg"); 566 address start = __ pc(); 567 568 __ movl(rax, c_rarg0); // Copy to eax we need a return value anyhow 569 __ xchgl(rax, Address(c_rarg1, 0)); // automatic LOCK 570 __ ret(0); 571 572 return start; 573 } 574 575 // Implementation of intptr_t atomic_xchg(jlong add_value, volatile jlong* dest) 576 // used by Atomic::xchg(volatile jlong* dest, jlong exchange_value) 577 // 578 // Arguments : 579 // c_rarg0: exchange_value 580 // c_rarg1: dest 581 // 582 // Result: 583 // *dest <- ex, return (orig *dest) 584 address generate_atomic_xchg_long() { 585 StubCodeMark mark(this, "StubRoutines", "atomic_xchg_long"); 586 address start = __ pc(); 587 588 __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow 589 __ xchgptr(rax, Address(c_rarg1, 0)); // automatic LOCK 590 __ ret(0); 591 592 return start; 593 } 594 595 // Support for jint atomic::atomic_cmpxchg(jint exchange_value, volatile jint* dest, 596 // jint compare_value) 597 // 598 // Arguments : 599 // c_rarg0: exchange_value 600 // c_rarg1: dest 601 // c_rarg2: compare_value 602 // 603 // Result: 604 // if ( compare_value == *dest ) { 605 // *dest = exchange_value 606 // return compare_value; 607 // else 608 // return *dest; 609 address generate_atomic_cmpxchg() { 610 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg"); 611 address start = __ pc(); 612 613 __ movl(rax, c_rarg2); 614 __ lock(); 615 __ cmpxchgl(c_rarg0, Address(c_rarg1, 0)); 616 __ ret(0); 617 618 return start; 619 } 620 621 // Support for int8_t atomic::atomic_cmpxchg(int8_t exchange_value, volatile int8_t* dest, 622 // int8_t compare_value) 623 // 624 // Arguments : 625 // c_rarg0: exchange_value 626 // c_rarg1: dest 627 // c_rarg2: compare_value 628 // 629 // Result: 630 // if ( compare_value == *dest ) { 631 // *dest = exchange_value 632 // return compare_value; 633 // else 634 // return *dest; 635 address generate_atomic_cmpxchg_byte() { 636 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_byte"); 637 address start = __ pc(); 638 639 __ movsbq(rax, c_rarg2); 640 __ lock(); 641 __ cmpxchgb(c_rarg0, Address(c_rarg1, 0)); 642 __ ret(0); 643 644 return start; 645 } 646 647 // Support for int64_t atomic::atomic_cmpxchg(int64_t exchange_value, 648 // volatile int64_t* dest, 649 // int64_t compare_value) 650 // Arguments : 651 // c_rarg0: exchange_value 652 // c_rarg1: dest 653 // c_rarg2: compare_value 654 // 655 // Result: 656 // if ( compare_value == *dest ) { 657 // *dest = exchange_value 658 // return compare_value; 659 // else 660 // return *dest; 661 address generate_atomic_cmpxchg_long() { 662 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long"); 663 address start = __ pc(); 664 665 __ movq(rax, c_rarg2); 666 __ lock(); 667 __ cmpxchgq(c_rarg0, Address(c_rarg1, 0)); 668 __ ret(0); 669 670 return start; 671 } 672 673 // Implementation of jint atomic_add(jint add_value, volatile jint* dest) 674 // used by Atomic::add(volatile jint* dest, jint add_value) 675 // 676 // Arguments : 677 // c_rarg0: add_value 678 // c_rarg1: dest 679 // 680 // Result: 681 // *dest += add_value 682 // return *dest; 683 address generate_atomic_add() { 684 StubCodeMark mark(this, "StubRoutines", "atomic_add"); 685 address start = __ pc(); 686 687 __ movl(rax, c_rarg0); 688 __ lock(); 689 __ xaddl(Address(c_rarg1, 0), c_rarg0); 690 __ addl(rax, c_rarg0); 691 __ ret(0); 692 693 return start; 694 } 695 696 // Implementation of intptr_t atomic_add(intptr_t add_value, volatile intptr_t* dest) 697 // used by Atomic::add(volatile intptr_t* dest, intptr_t add_value) 698 // 699 // Arguments : 700 // c_rarg0: add_value 701 // c_rarg1: dest 702 // 703 // Result: 704 // *dest += add_value 705 // return *dest; 706 address generate_atomic_add_long() { 707 StubCodeMark mark(this, "StubRoutines", "atomic_add_long"); 708 address start = __ pc(); 709 710 __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow 711 __ lock(); 712 __ xaddptr(Address(c_rarg1, 0), c_rarg0); 713 __ addptr(rax, c_rarg0); 714 __ ret(0); 715 716 return start; 717 } 718 719 // Support for intptr_t OrderAccess::fence() 720 // 721 // Arguments : 722 // 723 // Result: 724 address generate_orderaccess_fence() { 725 StubCodeMark mark(this, "StubRoutines", "orderaccess_fence"); 726 address start = __ pc(); 727 __ membar(Assembler::StoreLoad); 728 __ ret(0); 729 730 return start; 731 } 732 733 // Support for intptr_t get_previous_fp() 734 // 735 // This routine is used to find the previous frame pointer for the 736 // caller (current_frame_guess). This is used as part of debugging 737 // ps() is seemingly lost trying to find frames. 738 // This code assumes that caller current_frame_guess) has a frame. 739 address generate_get_previous_fp() { 740 StubCodeMark mark(this, "StubRoutines", "get_previous_fp"); 741 const Address old_fp(rbp, 0); 742 const Address older_fp(rax, 0); 743 address start = __ pc(); 744 745 __ enter(); 746 __ movptr(rax, old_fp); // callers fp 747 __ movptr(rax, older_fp); // the frame for ps() 748 __ pop(rbp); 749 __ ret(0); 750 751 return start; 752 } 753 754 // Support for intptr_t get_previous_sp() 755 // 756 // This routine is used to find the previous stack pointer for the 757 // caller. 758 address generate_get_previous_sp() { 759 StubCodeMark mark(this, "StubRoutines", "get_previous_sp"); 760 address start = __ pc(); 761 762 __ movptr(rax, rsp); 763 __ addptr(rax, 8); // return address is at the top of the stack. 764 __ ret(0); 765 766 return start; 767 } 768 769 //---------------------------------------------------------------------------------------------------- 770 // Support for void verify_mxcsr() 771 // 772 // This routine is used with -Xcheck:jni to verify that native 773 // JNI code does not return to Java code without restoring the 774 // MXCSR register to our expected state. 775 776 address generate_verify_mxcsr() { 777 StubCodeMark mark(this, "StubRoutines", "verify_mxcsr"); 778 address start = __ pc(); 779 780 const Address mxcsr_save(rsp, 0); 781 782 if (CheckJNICalls) { 783 Label ok_ret; 784 ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std()); 785 __ push(rax); 786 __ subptr(rsp, wordSize); // allocate a temp location 787 __ stmxcsr(mxcsr_save); 788 __ movl(rax, mxcsr_save); 789 __ andl(rax, MXCSR_MASK); // Only check control and mask bits 790 __ cmp32(rax, mxcsr_std); 791 __ jcc(Assembler::equal, ok_ret); 792 793 __ warn("MXCSR changed by native JNI code, use -XX:+RestoreMXCSROnJNICall"); 794 795 __ ldmxcsr(mxcsr_std); 796 797 __ bind(ok_ret); 798 __ addptr(rsp, wordSize); 799 __ pop(rax); 800 } 801 802 __ ret(0); 803 804 return start; 805 } 806 807 address generate_f2i_fixup() { 808 StubCodeMark mark(this, "StubRoutines", "f2i_fixup"); 809 Address inout(rsp, 5 * wordSize); // return address + 4 saves 810 811 address start = __ pc(); 812 813 Label L; 814 815 __ push(rax); 816 __ push(c_rarg3); 817 __ push(c_rarg2); 818 __ push(c_rarg1); 819 820 __ movl(rax, 0x7f800000); 821 __ xorl(c_rarg3, c_rarg3); 822 __ movl(c_rarg2, inout); 823 __ movl(c_rarg1, c_rarg2); 824 __ andl(c_rarg1, 0x7fffffff); 825 __ cmpl(rax, c_rarg1); // NaN? -> 0 826 __ jcc(Assembler::negative, L); 827 __ testl(c_rarg2, c_rarg2); // signed ? min_jint : max_jint 828 __ movl(c_rarg3, 0x80000000); 829 __ movl(rax, 0x7fffffff); 830 __ cmovl(Assembler::positive, c_rarg3, rax); 831 832 __ bind(L); 833 __ movptr(inout, c_rarg3); 834 835 __ pop(c_rarg1); 836 __ pop(c_rarg2); 837 __ pop(c_rarg3); 838 __ pop(rax); 839 840 __ ret(0); 841 842 return start; 843 } 844 845 address generate_f2l_fixup() { 846 StubCodeMark mark(this, "StubRoutines", "f2l_fixup"); 847 Address inout(rsp, 5 * wordSize); // return address + 4 saves 848 address start = __ pc(); 849 850 Label L; 851 852 __ push(rax); 853 __ push(c_rarg3); 854 __ push(c_rarg2); 855 __ push(c_rarg1); 856 857 __ movl(rax, 0x7f800000); 858 __ xorl(c_rarg3, c_rarg3); 859 __ movl(c_rarg2, inout); 860 __ movl(c_rarg1, c_rarg2); 861 __ andl(c_rarg1, 0x7fffffff); 862 __ cmpl(rax, c_rarg1); // NaN? -> 0 863 __ jcc(Assembler::negative, L); 864 __ testl(c_rarg2, c_rarg2); // signed ? min_jlong : max_jlong 865 __ mov64(c_rarg3, 0x8000000000000000); 866 __ mov64(rax, 0x7fffffffffffffff); 867 __ cmov(Assembler::positive, c_rarg3, rax); 868 869 __ bind(L); 870 __ movptr(inout, c_rarg3); 871 872 __ pop(c_rarg1); 873 __ pop(c_rarg2); 874 __ pop(c_rarg3); 875 __ pop(rax); 876 877 __ ret(0); 878 879 return start; 880 } 881 882 address generate_d2i_fixup() { 883 StubCodeMark mark(this, "StubRoutines", "d2i_fixup"); 884 Address inout(rsp, 6 * wordSize); // return address + 5 saves 885 886 address start = __ pc(); 887 888 Label L; 889 890 __ push(rax); 891 __ push(c_rarg3); 892 __ push(c_rarg2); 893 __ push(c_rarg1); 894 __ push(c_rarg0); 895 896 __ movl(rax, 0x7ff00000); 897 __ movq(c_rarg2, inout); 898 __ movl(c_rarg3, c_rarg2); 899 __ mov(c_rarg1, c_rarg2); 900 __ mov(c_rarg0, c_rarg2); 901 __ negl(c_rarg3); 902 __ shrptr(c_rarg1, 0x20); 903 __ orl(c_rarg3, c_rarg2); 904 __ andl(c_rarg1, 0x7fffffff); 905 __ xorl(c_rarg2, c_rarg2); 906 __ shrl(c_rarg3, 0x1f); 907 __ orl(c_rarg1, c_rarg3); 908 __ cmpl(rax, c_rarg1); 909 __ jcc(Assembler::negative, L); // NaN -> 0 910 __ testptr(c_rarg0, c_rarg0); // signed ? min_jint : max_jint 911 __ movl(c_rarg2, 0x80000000); 912 __ movl(rax, 0x7fffffff); 913 __ cmov(Assembler::positive, c_rarg2, rax); 914 915 __ bind(L); 916 __ movptr(inout, c_rarg2); 917 918 __ pop(c_rarg0); 919 __ pop(c_rarg1); 920 __ pop(c_rarg2); 921 __ pop(c_rarg3); 922 __ pop(rax); 923 924 __ ret(0); 925 926 return start; 927 } 928 929 address generate_d2l_fixup() { 930 StubCodeMark mark(this, "StubRoutines", "d2l_fixup"); 931 Address inout(rsp, 6 * wordSize); // return address + 5 saves 932 933 address start = __ pc(); 934 935 Label L; 936 937 __ push(rax); 938 __ push(c_rarg3); 939 __ push(c_rarg2); 940 __ push(c_rarg1); 941 __ push(c_rarg0); 942 943 __ movl(rax, 0x7ff00000); 944 __ movq(c_rarg2, inout); 945 __ movl(c_rarg3, c_rarg2); 946 __ mov(c_rarg1, c_rarg2); 947 __ mov(c_rarg0, c_rarg2); 948 __ negl(c_rarg3); 949 __ shrptr(c_rarg1, 0x20); 950 __ orl(c_rarg3, c_rarg2); 951 __ andl(c_rarg1, 0x7fffffff); 952 __ xorl(c_rarg2, c_rarg2); 953 __ shrl(c_rarg3, 0x1f); 954 __ orl(c_rarg1, c_rarg3); 955 __ cmpl(rax, c_rarg1); 956 __ jcc(Assembler::negative, L); // NaN -> 0 957 __ testq(c_rarg0, c_rarg0); // signed ? min_jlong : max_jlong 958 __ mov64(c_rarg2, 0x8000000000000000); 959 __ mov64(rax, 0x7fffffffffffffff); 960 __ cmovq(Assembler::positive, c_rarg2, rax); 961 962 __ bind(L); 963 __ movq(inout, c_rarg2); 964 965 __ pop(c_rarg0); 966 __ pop(c_rarg1); 967 __ pop(c_rarg2); 968 __ pop(c_rarg3); 969 __ pop(rax); 970 971 __ ret(0); 972 973 return start; 974 } 975 976 address generate_iota_indices(const char *stub_name) { 977 __ align(CodeEntryAlignment); 978 StubCodeMark mark(this, "StubRoutines", stub_name); 979 address start = __ pc(); 980 __ emit_data64(0x0706050403020100, relocInfo::none); 981 __ emit_data64(0x0F0E0D0C0B0A0908, relocInfo::none); 982 __ emit_data64(0x1716151413121110, relocInfo::none); 983 __ emit_data64(0x1F1E1D1C1B1A1918, relocInfo::none); 984 __ emit_data64(0x2726252423222120, relocInfo::none); 985 __ emit_data64(0x2F2E2D2C2B2A2928, relocInfo::none); 986 __ emit_data64(0x3736353433323130, relocInfo::none); 987 __ emit_data64(0x3F3E3D3C3B3A3938, relocInfo::none); 988 return start; 989 } 990 991 address generate_fp_mask(const char *stub_name, int64_t mask) { 992 __ align(CodeEntryAlignment); 993 StubCodeMark mark(this, "StubRoutines", stub_name); 994 address start = __ pc(); 995 996 __ emit_data64( mask, relocInfo::none ); 997 __ emit_data64( mask, relocInfo::none ); 998 999 return start; 1000 } 1001 1002 address generate_vector_mask(const char *stub_name, int64_t mask) { 1003 __ align(CodeEntryAlignment); 1004 StubCodeMark mark(this, "StubRoutines", stub_name); 1005 address start = __ pc(); 1006 1007 __ emit_data64(mask, relocInfo::none); 1008 __ emit_data64(mask, relocInfo::none); 1009 __ emit_data64(mask, relocInfo::none); 1010 __ emit_data64(mask, relocInfo::none); 1011 __ emit_data64(mask, relocInfo::none); 1012 __ emit_data64(mask, relocInfo::none); 1013 __ emit_data64(mask, relocInfo::none); 1014 __ emit_data64(mask, relocInfo::none); 1015 1016 return start; 1017 } 1018 1019 address generate_vector_byte_perm_mask(const char *stub_name) { 1020 __ align(CodeEntryAlignment); 1021 StubCodeMark mark(this, "StubRoutines", stub_name); 1022 address start = __ pc(); 1023 1024 __ emit_data64(0x0000000000000001, relocInfo::none); 1025 __ emit_data64(0x0000000000000003, relocInfo::none); 1026 __ emit_data64(0x0000000000000005, relocInfo::none); 1027 __ emit_data64(0x0000000000000007, relocInfo::none); 1028 __ emit_data64(0x0000000000000000, relocInfo::none); 1029 __ emit_data64(0x0000000000000002, relocInfo::none); 1030 __ emit_data64(0x0000000000000004, relocInfo::none); 1031 __ emit_data64(0x0000000000000006, relocInfo::none); 1032 1033 return start; 1034 } 1035 1036 address generate_vector_fp_mask(const char *stub_name, int64_t mask) { 1037 __ align(CodeEntryAlignment); 1038 StubCodeMark mark(this, "StubRoutines", stub_name); 1039 address start = __ pc(); 1040 1041 __ emit_data64(mask, relocInfo::none); 1042 __ emit_data64(mask, relocInfo::none); 1043 __ emit_data64(mask, relocInfo::none); 1044 __ emit_data64(mask, relocInfo::none); 1045 __ emit_data64(mask, relocInfo::none); 1046 __ emit_data64(mask, relocInfo::none); 1047 __ emit_data64(mask, relocInfo::none); 1048 __ emit_data64(mask, relocInfo::none); 1049 1050 return start; 1051 } 1052 1053 address generate_vector_custom_i32(const char *stub_name, Assembler::AvxVectorLen len, 1054 int32_t val0, int32_t val1, int32_t val2, int32_t val3, 1055 int32_t val4 = 0, int32_t val5 = 0, int32_t val6 = 0, int32_t val7 = 0, 1056 int32_t val8 = 0, int32_t val9 = 0, int32_t val10 = 0, int32_t val11 = 0, 1057 int32_t val12 = 0, int32_t val13 = 0, int32_t val14 = 0, int32_t val15 = 0) { 1058 __ align(CodeEntryAlignment); 1059 StubCodeMark mark(this, "StubRoutines", stub_name); 1060 address start = __ pc(); 1061 1062 assert(len != Assembler::AVX_NoVec, "vector len must be specified"); 1063 __ emit_data(val0, relocInfo::none, 0); 1064 __ emit_data(val1, relocInfo::none, 0); 1065 __ emit_data(val2, relocInfo::none, 0); 1066 __ emit_data(val3, relocInfo::none, 0); 1067 if (len >= Assembler::AVX_256bit) { 1068 __ emit_data(val4, relocInfo::none, 0); 1069 __ emit_data(val5, relocInfo::none, 0); 1070 __ emit_data(val6, relocInfo::none, 0); 1071 __ emit_data(val7, relocInfo::none, 0); 1072 if (len >= Assembler::AVX_512bit) { 1073 __ emit_data(val8, relocInfo::none, 0); 1074 __ emit_data(val9, relocInfo::none, 0); 1075 __ emit_data(val10, relocInfo::none, 0); 1076 __ emit_data(val11, relocInfo::none, 0); 1077 __ emit_data(val12, relocInfo::none, 0); 1078 __ emit_data(val13, relocInfo::none, 0); 1079 __ emit_data(val14, relocInfo::none, 0); 1080 __ emit_data(val15, relocInfo::none, 0); 1081 } 1082 } 1083 1084 return start; 1085 } 1086 1087 // Non-destructive plausibility checks for oops 1088 // 1089 // Arguments: 1090 // all args on stack! 1091 // 1092 // Stack after saving c_rarg3: 1093 // [tos + 0]: saved c_rarg3 1094 // [tos + 1]: saved c_rarg2 1095 // [tos + 2]: saved r12 (several TemplateTable methods use it) 1096 // [tos + 3]: saved flags 1097 // [tos + 4]: return address 1098 // * [tos + 5]: error message (char*) 1099 // * [tos + 6]: object to verify (oop) 1100 // * [tos + 7]: saved rax - saved by caller and bashed 1101 // * [tos + 8]: saved r10 (rscratch1) - saved by caller 1102 // * = popped on exit 1103 address generate_verify_oop() { 1104 StubCodeMark mark(this, "StubRoutines", "verify_oop"); 1105 address start = __ pc(); 1106 1107 Label exit, error; 1108 1109 __ pushf(); 1110 __ incrementl(ExternalAddress((address) StubRoutines::verify_oop_count_addr())); 1111 1112 __ push(r12); 1113 1114 // save c_rarg2 and c_rarg3 1115 __ push(c_rarg2); 1116 __ push(c_rarg3); 1117 1118 enum { 1119 // After previous pushes. 1120 oop_to_verify = 6 * wordSize, 1121 saved_rax = 7 * wordSize, 1122 saved_r10 = 8 * wordSize, 1123 1124 // Before the call to MacroAssembler::debug(), see below. 1125 return_addr = 16 * wordSize, 1126 error_msg = 17 * wordSize 1127 }; 1128 1129 // get object 1130 __ movptr(rax, Address(rsp, oop_to_verify)); 1131 1132 // make sure object is 'reasonable' 1133 __ testptr(rax, rax); 1134 __ jcc(Assembler::zero, exit); // if obj is NULL it is OK 1135 1136 #if INCLUDE_ZGC 1137 if (UseZGC) { 1138 // Check if metadata bits indicate a bad oop 1139 __ testptr(rax, Address(r15_thread, ZThreadLocalData::address_bad_mask_offset())); 1140 __ jcc(Assembler::notZero, error); 1141 } 1142 #endif 1143 1144 // Check if the oop is in the right area of memory 1145 __ movptr(c_rarg2, rax); 1146 __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_mask()); 1147 __ andptr(c_rarg2, c_rarg3); 1148 __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_bits()); 1149 __ cmpptr(c_rarg2, c_rarg3); 1150 __ jcc(Assembler::notZero, error); 1151 1152 // make sure klass is 'reasonable', which is not zero. 1153 __ load_klass(rax, rax, rscratch1); // get klass 1154 __ testptr(rax, rax); 1155 __ jcc(Assembler::zero, error); // if klass is NULL it is broken 1156 1157 // return if everything seems ok 1158 __ bind(exit); 1159 __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back 1160 __ movptr(rscratch1, Address(rsp, saved_r10)); // get saved r10 back 1161 __ pop(c_rarg3); // restore c_rarg3 1162 __ pop(c_rarg2); // restore c_rarg2 1163 __ pop(r12); // restore r12 1164 __ popf(); // restore flags 1165 __ ret(4 * wordSize); // pop caller saved stuff 1166 1167 // handle errors 1168 __ bind(error); 1169 __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back 1170 __ movptr(rscratch1, Address(rsp, saved_r10)); // get saved r10 back 1171 __ pop(c_rarg3); // get saved c_rarg3 back 1172 __ pop(c_rarg2); // get saved c_rarg2 back 1173 __ pop(r12); // get saved r12 back 1174 __ popf(); // get saved flags off stack -- 1175 // will be ignored 1176 1177 __ pusha(); // push registers 1178 // (rip is already 1179 // already pushed) 1180 // debug(char* msg, int64_t pc, int64_t regs[]) 1181 // We've popped the registers we'd saved (c_rarg3, c_rarg2 and flags), and 1182 // pushed all the registers, so now the stack looks like: 1183 // [tos + 0] 16 saved registers 1184 // [tos + 16] return address 1185 // * [tos + 17] error message (char*) 1186 // * [tos + 18] object to verify (oop) 1187 // * [tos + 19] saved rax - saved by caller and bashed 1188 // * [tos + 20] saved r10 (rscratch1) - saved by caller 1189 // * = popped on exit 1190 1191 __ movptr(c_rarg0, Address(rsp, error_msg)); // pass address of error message 1192 __ movptr(c_rarg1, Address(rsp, return_addr)); // pass return address 1193 __ movq(c_rarg2, rsp); // pass address of regs on stack 1194 __ mov(r12, rsp); // remember rsp 1195 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1196 __ andptr(rsp, -16); // align stack as required by ABI 1197 BLOCK_COMMENT("call MacroAssembler::debug"); 1198 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64))); 1199 __ hlt(); 1200 return start; 1201 } 1202 1203 // 1204 // Verify that a register contains clean 32-bits positive value 1205 // (high 32-bits are 0) so it could be used in 64-bits shifts. 1206 // 1207 // Input: 1208 // Rint - 32-bits value 1209 // Rtmp - scratch 1210 // 1211 void assert_clean_int(Register Rint, Register Rtmp) { 1212 #ifdef ASSERT 1213 Label L; 1214 assert_different_registers(Rtmp, Rint); 1215 __ movslq(Rtmp, Rint); 1216 __ cmpq(Rtmp, Rint); 1217 __ jcc(Assembler::equal, L); 1218 __ stop("high 32-bits of int value are not 0"); 1219 __ bind(L); 1220 #endif 1221 } 1222 1223 // Generate overlap test for array copy stubs 1224 // 1225 // Input: 1226 // c_rarg0 - from 1227 // c_rarg1 - to 1228 // c_rarg2 - element count 1229 // 1230 // Output: 1231 // rax - &from[element count - 1] 1232 // 1233 void array_overlap_test(address no_overlap_target, Address::ScaleFactor sf) { 1234 assert(no_overlap_target != NULL, "must be generated"); 1235 array_overlap_test(no_overlap_target, NULL, sf); 1236 } 1237 void array_overlap_test(Label& L_no_overlap, Address::ScaleFactor sf) { 1238 array_overlap_test(NULL, &L_no_overlap, sf); 1239 } 1240 void array_overlap_test(address no_overlap_target, Label* NOLp, Address::ScaleFactor sf) { 1241 const Register from = c_rarg0; 1242 const Register to = c_rarg1; 1243 const Register count = c_rarg2; 1244 const Register end_from = rax; 1245 1246 __ cmpptr(to, from); 1247 __ lea(end_from, Address(from, count, sf, 0)); 1248 if (NOLp == NULL) { 1249 ExternalAddress no_overlap(no_overlap_target); 1250 __ jump_cc(Assembler::belowEqual, no_overlap); 1251 __ cmpptr(to, end_from); 1252 __ jump_cc(Assembler::aboveEqual, no_overlap); 1253 } else { 1254 __ jcc(Assembler::belowEqual, (*NOLp)); 1255 __ cmpptr(to, end_from); 1256 __ jcc(Assembler::aboveEqual, (*NOLp)); 1257 } 1258 } 1259 1260 // Shuffle first three arg regs on Windows into Linux/Solaris locations. 1261 // 1262 // Outputs: 1263 // rdi - rcx 1264 // rsi - rdx 1265 // rdx - r8 1266 // rcx - r9 1267 // 1268 // Registers r9 and r10 are used to save rdi and rsi on Windows, which latter 1269 // are non-volatile. r9 and r10 should not be used by the caller. 1270 // 1271 DEBUG_ONLY(bool regs_in_thread;) 1272 1273 void setup_arg_regs(int nargs = 3) { 1274 const Register saved_rdi = r9; 1275 const Register saved_rsi = r10; 1276 assert(nargs == 3 || nargs == 4, "else fix"); 1277 #ifdef _WIN64 1278 assert(c_rarg0 == rcx && c_rarg1 == rdx && c_rarg2 == r8 && c_rarg3 == r9, 1279 "unexpected argument registers"); 1280 if (nargs >= 4) 1281 __ mov(rax, r9); // r9 is also saved_rdi 1282 __ movptr(saved_rdi, rdi); 1283 __ movptr(saved_rsi, rsi); 1284 __ mov(rdi, rcx); // c_rarg0 1285 __ mov(rsi, rdx); // c_rarg1 1286 __ mov(rdx, r8); // c_rarg2 1287 if (nargs >= 4) 1288 __ mov(rcx, rax); // c_rarg3 (via rax) 1289 #else 1290 assert(c_rarg0 == rdi && c_rarg1 == rsi && c_rarg2 == rdx && c_rarg3 == rcx, 1291 "unexpected argument registers"); 1292 #endif 1293 DEBUG_ONLY(regs_in_thread = false;) 1294 } 1295 1296 void restore_arg_regs() { 1297 assert(!regs_in_thread, "wrong call to restore_arg_regs"); 1298 const Register saved_rdi = r9; 1299 const Register saved_rsi = r10; 1300 #ifdef _WIN64 1301 __ movptr(rdi, saved_rdi); 1302 __ movptr(rsi, saved_rsi); 1303 #endif 1304 } 1305 1306 // This is used in places where r10 is a scratch register, and can 1307 // be adapted if r9 is needed also. 1308 void setup_arg_regs_using_thread() { 1309 const Register saved_r15 = r9; 1310 #ifdef _WIN64 1311 __ mov(saved_r15, r15); // r15 is callee saved and needs to be restored 1312 __ get_thread(r15_thread); 1313 assert(c_rarg0 == rcx && c_rarg1 == rdx && c_rarg2 == r8 && c_rarg3 == r9, 1314 "unexpected argument registers"); 1315 __ movptr(Address(r15_thread, in_bytes(JavaThread::windows_saved_rdi_offset())), rdi); 1316 __ movptr(Address(r15_thread, in_bytes(JavaThread::windows_saved_rsi_offset())), rsi); 1317 1318 __ mov(rdi, rcx); // c_rarg0 1319 __ mov(rsi, rdx); // c_rarg1 1320 __ mov(rdx, r8); // c_rarg2 1321 #else 1322 assert(c_rarg0 == rdi && c_rarg1 == rsi && c_rarg2 == rdx && c_rarg3 == rcx, 1323 "unexpected argument registers"); 1324 #endif 1325 DEBUG_ONLY(regs_in_thread = true;) 1326 } 1327 1328 void restore_arg_regs_using_thread() { 1329 assert(regs_in_thread, "wrong call to restore_arg_regs"); 1330 const Register saved_r15 = r9; 1331 #ifdef _WIN64 1332 __ get_thread(r15_thread); 1333 __ movptr(rsi, Address(r15_thread, in_bytes(JavaThread::windows_saved_rsi_offset()))); 1334 __ movptr(rdi, Address(r15_thread, in_bytes(JavaThread::windows_saved_rdi_offset()))); 1335 __ mov(r15, saved_r15); // r15 is callee saved and needs to be restored 1336 #endif 1337 } 1338 1339 // Copy big chunks forward 1340 // 1341 // Inputs: 1342 // end_from - source arrays end address 1343 // end_to - destination array end address 1344 // qword_count - 64-bits element count, negative 1345 // to - scratch 1346 // L_copy_bytes - entry label 1347 // L_copy_8_bytes - exit label 1348 // 1349 void copy_bytes_forward(Register end_from, Register end_to, 1350 Register qword_count, Register to, 1351 Label& L_copy_bytes, Label& L_copy_8_bytes) { 1352 DEBUG_ONLY(__ stop("enter at entry label, not here")); 1353 Label L_loop; 1354 __ align(OptoLoopAlignment); 1355 if (UseUnalignedLoadStores) { 1356 Label L_end; 1357 // Copy 64-bytes per iteration 1358 if (UseAVX > 2) { 1359 Label L_loop_avx512, L_loop_avx2, L_32_byte_head, L_above_threshold, L_below_threshold; 1360 1361 __ BIND(L_copy_bytes); 1362 __ cmpptr(qword_count, (-1 * AVX3Threshold / 8)); 1363 __ jccb(Assembler::less, L_above_threshold); 1364 __ jmpb(L_below_threshold); 1365 1366 __ bind(L_loop_avx512); 1367 __ evmovdqul(xmm0, Address(end_from, qword_count, Address::times_8, -56), Assembler::AVX_512bit); 1368 __ evmovdqul(Address(end_to, qword_count, Address::times_8, -56), xmm0, Assembler::AVX_512bit); 1369 __ bind(L_above_threshold); 1370 __ addptr(qword_count, 8); 1371 __ jcc(Assembler::lessEqual, L_loop_avx512); 1372 __ jmpb(L_32_byte_head); 1373 1374 __ bind(L_loop_avx2); 1375 __ vmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56)); 1376 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0); 1377 __ vmovdqu(xmm1, Address(end_from, qword_count, Address::times_8, -24)); 1378 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -24), xmm1); 1379 __ bind(L_below_threshold); 1380 __ addptr(qword_count, 8); 1381 __ jcc(Assembler::lessEqual, L_loop_avx2); 1382 1383 __ bind(L_32_byte_head); 1384 __ subptr(qword_count, 4); // sub(8) and add(4) 1385 __ jccb(Assembler::greater, L_end); 1386 } else { 1387 __ BIND(L_loop); 1388 if (UseAVX == 2) { 1389 __ vmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56)); 1390 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0); 1391 __ vmovdqu(xmm1, Address(end_from, qword_count, Address::times_8, -24)); 1392 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -24), xmm1); 1393 } else { 1394 __ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56)); 1395 __ movdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0); 1396 __ movdqu(xmm1, Address(end_from, qword_count, Address::times_8, -40)); 1397 __ movdqu(Address(end_to, qword_count, Address::times_8, -40), xmm1); 1398 __ movdqu(xmm2, Address(end_from, qword_count, Address::times_8, -24)); 1399 __ movdqu(Address(end_to, qword_count, Address::times_8, -24), xmm2); 1400 __ movdqu(xmm3, Address(end_from, qword_count, Address::times_8, - 8)); 1401 __ movdqu(Address(end_to, qword_count, Address::times_8, - 8), xmm3); 1402 } 1403 1404 __ BIND(L_copy_bytes); 1405 __ addptr(qword_count, 8); 1406 __ jcc(Assembler::lessEqual, L_loop); 1407 __ subptr(qword_count, 4); // sub(8) and add(4) 1408 __ jccb(Assembler::greater, L_end); 1409 } 1410 // Copy trailing 32 bytes 1411 if (UseAVX >= 2) { 1412 __ vmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24)); 1413 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -24), xmm0); 1414 } else { 1415 __ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24)); 1416 __ movdqu(Address(end_to, qword_count, Address::times_8, -24), xmm0); 1417 __ movdqu(xmm1, Address(end_from, qword_count, Address::times_8, - 8)); 1418 __ movdqu(Address(end_to, qword_count, Address::times_8, - 8), xmm1); 1419 } 1420 __ addptr(qword_count, 4); 1421 __ BIND(L_end); 1422 if (UseAVX >= 2) { 1423 // clean upper bits of YMM registers 1424 __ vpxor(xmm0, xmm0); 1425 __ vpxor(xmm1, xmm1); 1426 } 1427 } else { 1428 // Copy 32-bytes per iteration 1429 __ BIND(L_loop); 1430 __ movq(to, Address(end_from, qword_count, Address::times_8, -24)); 1431 __ movq(Address(end_to, qword_count, Address::times_8, -24), to); 1432 __ movq(to, Address(end_from, qword_count, Address::times_8, -16)); 1433 __ movq(Address(end_to, qword_count, Address::times_8, -16), to); 1434 __ movq(to, Address(end_from, qword_count, Address::times_8, - 8)); 1435 __ movq(Address(end_to, qword_count, Address::times_8, - 8), to); 1436 __ movq(to, Address(end_from, qword_count, Address::times_8, - 0)); 1437 __ movq(Address(end_to, qword_count, Address::times_8, - 0), to); 1438 1439 __ BIND(L_copy_bytes); 1440 __ addptr(qword_count, 4); 1441 __ jcc(Assembler::lessEqual, L_loop); 1442 } 1443 __ subptr(qword_count, 4); 1444 __ jcc(Assembler::less, L_copy_8_bytes); // Copy trailing qwords 1445 } 1446 1447 // Copy big chunks backward 1448 // 1449 // Inputs: 1450 // from - source arrays address 1451 // dest - destination array address 1452 // qword_count - 64-bits element count 1453 // to - scratch 1454 // L_copy_bytes - entry label 1455 // L_copy_8_bytes - exit label 1456 // 1457 void copy_bytes_backward(Register from, Register dest, 1458 Register qword_count, Register to, 1459 Label& L_copy_bytes, Label& L_copy_8_bytes) { 1460 DEBUG_ONLY(__ stop("enter at entry label, not here")); 1461 Label L_loop; 1462 __ align(OptoLoopAlignment); 1463 if (UseUnalignedLoadStores) { 1464 Label L_end; 1465 // Copy 64-bytes per iteration 1466 if (UseAVX > 2) { 1467 Label L_loop_avx512, L_loop_avx2, L_32_byte_head, L_above_threshold, L_below_threshold; 1468 1469 __ BIND(L_copy_bytes); 1470 __ cmpptr(qword_count, (AVX3Threshold / 8)); 1471 __ jccb(Assembler::greater, L_above_threshold); 1472 __ jmpb(L_below_threshold); 1473 1474 __ BIND(L_loop_avx512); 1475 __ evmovdqul(xmm0, Address(from, qword_count, Address::times_8, 0), Assembler::AVX_512bit); 1476 __ evmovdqul(Address(dest, qword_count, Address::times_8, 0), xmm0, Assembler::AVX_512bit); 1477 __ bind(L_above_threshold); 1478 __ subptr(qword_count, 8); 1479 __ jcc(Assembler::greaterEqual, L_loop_avx512); 1480 __ jmpb(L_32_byte_head); 1481 1482 __ bind(L_loop_avx2); 1483 __ vmovdqu(xmm0, Address(from, qword_count, Address::times_8, 32)); 1484 __ vmovdqu(Address(dest, qword_count, Address::times_8, 32), xmm0); 1485 __ vmovdqu(xmm1, Address(from, qword_count, Address::times_8, 0)); 1486 __ vmovdqu(Address(dest, qword_count, Address::times_8, 0), xmm1); 1487 __ bind(L_below_threshold); 1488 __ subptr(qword_count, 8); 1489 __ jcc(Assembler::greaterEqual, L_loop_avx2); 1490 1491 __ bind(L_32_byte_head); 1492 __ addptr(qword_count, 4); // add(8) and sub(4) 1493 __ jccb(Assembler::less, L_end); 1494 } else { 1495 __ BIND(L_loop); 1496 if (UseAVX == 2) { 1497 __ vmovdqu(xmm0, Address(from, qword_count, Address::times_8, 32)); 1498 __ vmovdqu(Address(dest, qword_count, Address::times_8, 32), xmm0); 1499 __ vmovdqu(xmm1, Address(from, qword_count, Address::times_8, 0)); 1500 __ vmovdqu(Address(dest, qword_count, Address::times_8, 0), xmm1); 1501 } else { 1502 __ movdqu(xmm0, Address(from, qword_count, Address::times_8, 48)); 1503 __ movdqu(Address(dest, qword_count, Address::times_8, 48), xmm0); 1504 __ movdqu(xmm1, Address(from, qword_count, Address::times_8, 32)); 1505 __ movdqu(Address(dest, qword_count, Address::times_8, 32), xmm1); 1506 __ movdqu(xmm2, Address(from, qword_count, Address::times_8, 16)); 1507 __ movdqu(Address(dest, qword_count, Address::times_8, 16), xmm2); 1508 __ movdqu(xmm3, Address(from, qword_count, Address::times_8, 0)); 1509 __ movdqu(Address(dest, qword_count, Address::times_8, 0), xmm3); 1510 } 1511 1512 __ BIND(L_copy_bytes); 1513 __ subptr(qword_count, 8); 1514 __ jcc(Assembler::greaterEqual, L_loop); 1515 1516 __ addptr(qword_count, 4); // add(8) and sub(4) 1517 __ jccb(Assembler::less, L_end); 1518 } 1519 // Copy trailing 32 bytes 1520 if (UseAVX >= 2) { 1521 __ vmovdqu(xmm0, Address(from, qword_count, Address::times_8, 0)); 1522 __ vmovdqu(Address(dest, qword_count, Address::times_8, 0), xmm0); 1523 } else { 1524 __ movdqu(xmm0, Address(from, qword_count, Address::times_8, 16)); 1525 __ movdqu(Address(dest, qword_count, Address::times_8, 16), xmm0); 1526 __ movdqu(xmm1, Address(from, qword_count, Address::times_8, 0)); 1527 __ movdqu(Address(dest, qword_count, Address::times_8, 0), xmm1); 1528 } 1529 __ subptr(qword_count, 4); 1530 __ BIND(L_end); 1531 if (UseAVX >= 2) { 1532 // clean upper bits of YMM registers 1533 __ vpxor(xmm0, xmm0); 1534 __ vpxor(xmm1, xmm1); 1535 } 1536 } else { 1537 // Copy 32-bytes per iteration 1538 __ BIND(L_loop); 1539 __ movq(to, Address(from, qword_count, Address::times_8, 24)); 1540 __ movq(Address(dest, qword_count, Address::times_8, 24), to); 1541 __ movq(to, Address(from, qword_count, Address::times_8, 16)); 1542 __ movq(Address(dest, qword_count, Address::times_8, 16), to); 1543 __ movq(to, Address(from, qword_count, Address::times_8, 8)); 1544 __ movq(Address(dest, qword_count, Address::times_8, 8), to); 1545 __ movq(to, Address(from, qword_count, Address::times_8, 0)); 1546 __ movq(Address(dest, qword_count, Address::times_8, 0), to); 1547 1548 __ BIND(L_copy_bytes); 1549 __ subptr(qword_count, 4); 1550 __ jcc(Assembler::greaterEqual, L_loop); 1551 } 1552 __ addptr(qword_count, 4); 1553 __ jcc(Assembler::greater, L_copy_8_bytes); // Copy trailing qwords 1554 } 1555 1556 // Arguments: 1557 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1558 // ignored 1559 // name - stub name string 1560 // 1561 // Inputs: 1562 // c_rarg0 - source array address 1563 // c_rarg1 - destination array address 1564 // c_rarg2 - element count, treated as ssize_t, can be zero 1565 // 1566 // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries, 1567 // we let the hardware handle it. The one to eight bytes within words, 1568 // dwords or qwords that span cache line boundaries will still be loaded 1569 // and stored atomically. 1570 // 1571 // Side Effects: 1572 // disjoint_byte_copy_entry is set to the no-overlap entry point 1573 // used by generate_conjoint_byte_copy(). 1574 // 1575 address generate_disjoint_byte_copy(bool aligned, address* entry, const char *name) { 1576 __ align(CodeEntryAlignment); 1577 StubCodeMark mark(this, "StubRoutines", name); 1578 address start = __ pc(); 1579 1580 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes; 1581 Label L_copy_byte, L_exit; 1582 const Register from = rdi; // source array address 1583 const Register to = rsi; // destination array address 1584 const Register count = rdx; // elements count 1585 const Register byte_count = rcx; 1586 const Register qword_count = count; 1587 const Register end_from = from; // source array end address 1588 const Register end_to = to; // destination array end address 1589 // End pointers are inclusive, and if count is not zero they point 1590 // to the last unit copied: end_to[0] := end_from[0] 1591 1592 __ enter(); // required for proper stackwalking of RuntimeStub frame 1593 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1594 1595 if (entry != NULL) { 1596 *entry = __ pc(); 1597 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1598 BLOCK_COMMENT("Entry:"); 1599 } 1600 1601 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1602 // r9 and r10 may be used to save non-volatile registers 1603 1604 { 1605 // UnsafeCopyMemory page error: continue after ucm 1606 UnsafeCopyMemoryMark ucmm(this, !aligned, true); 1607 // 'from', 'to' and 'count' are now valid 1608 __ movptr(byte_count, count); 1609 __ shrptr(count, 3); // count => qword_count 1610 1611 // Copy from low to high addresses. Use 'to' as scratch. 1612 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1613 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1614 __ negptr(qword_count); // make the count negative 1615 __ jmp(L_copy_bytes); 1616 1617 // Copy trailing qwords 1618 __ BIND(L_copy_8_bytes); 1619 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1620 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1621 __ increment(qword_count); 1622 __ jcc(Assembler::notZero, L_copy_8_bytes); 1623 1624 // Check for and copy trailing dword 1625 __ BIND(L_copy_4_bytes); 1626 __ testl(byte_count, 4); 1627 __ jccb(Assembler::zero, L_copy_2_bytes); 1628 __ movl(rax, Address(end_from, 8)); 1629 __ movl(Address(end_to, 8), rax); 1630 1631 __ addptr(end_from, 4); 1632 __ addptr(end_to, 4); 1633 1634 // Check for and copy trailing word 1635 __ BIND(L_copy_2_bytes); 1636 __ testl(byte_count, 2); 1637 __ jccb(Assembler::zero, L_copy_byte); 1638 __ movw(rax, Address(end_from, 8)); 1639 __ movw(Address(end_to, 8), rax); 1640 1641 __ addptr(end_from, 2); 1642 __ addptr(end_to, 2); 1643 1644 // Check for and copy trailing byte 1645 __ BIND(L_copy_byte); 1646 __ testl(byte_count, 1); 1647 __ jccb(Assembler::zero, L_exit); 1648 __ movb(rax, Address(end_from, 8)); 1649 __ movb(Address(end_to, 8), rax); 1650 } 1651 __ BIND(L_exit); 1652 address ucme_exit_pc = __ pc(); 1653 restore_arg_regs(); 1654 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1655 __ xorptr(rax, rax); // return 0 1656 __ vzeroupper(); 1657 __ leave(); // required for proper stackwalking of RuntimeStub frame 1658 __ ret(0); 1659 1660 { 1661 UnsafeCopyMemoryMark ucmm(this, !aligned, false, ucme_exit_pc); 1662 // Copy in multi-bytes chunks 1663 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1664 __ jmp(L_copy_4_bytes); 1665 } 1666 return start; 1667 } 1668 1669 // Arguments: 1670 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1671 // ignored 1672 // name - stub name string 1673 // 1674 // Inputs: 1675 // c_rarg0 - source array address 1676 // c_rarg1 - destination array address 1677 // c_rarg2 - element count, treated as ssize_t, can be zero 1678 // 1679 // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries, 1680 // we let the hardware handle it. The one to eight bytes within words, 1681 // dwords or qwords that span cache line boundaries will still be loaded 1682 // and stored atomically. 1683 // 1684 address generate_conjoint_byte_copy(bool aligned, address nooverlap_target, 1685 address* entry, const char *name) { 1686 __ align(CodeEntryAlignment); 1687 StubCodeMark mark(this, "StubRoutines", name); 1688 address start = __ pc(); 1689 1690 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes; 1691 const Register from = rdi; // source array address 1692 const Register to = rsi; // destination array address 1693 const Register count = rdx; // elements count 1694 const Register byte_count = rcx; 1695 const Register qword_count = count; 1696 1697 __ enter(); // required for proper stackwalking of RuntimeStub frame 1698 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1699 1700 if (entry != NULL) { 1701 *entry = __ pc(); 1702 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1703 BLOCK_COMMENT("Entry:"); 1704 } 1705 1706 array_overlap_test(nooverlap_target, Address::times_1); 1707 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1708 // r9 and r10 may be used to save non-volatile registers 1709 1710 { 1711 // UnsafeCopyMemory page error: continue after ucm 1712 UnsafeCopyMemoryMark ucmm(this, !aligned, true); 1713 // 'from', 'to' and 'count' are now valid 1714 __ movptr(byte_count, count); 1715 __ shrptr(count, 3); // count => qword_count 1716 1717 // Copy from high to low addresses. 1718 1719 // Check for and copy trailing byte 1720 __ testl(byte_count, 1); 1721 __ jcc(Assembler::zero, L_copy_2_bytes); 1722 __ movb(rax, Address(from, byte_count, Address::times_1, -1)); 1723 __ movb(Address(to, byte_count, Address::times_1, -1), rax); 1724 __ decrement(byte_count); // Adjust for possible trailing word 1725 1726 // Check for and copy trailing word 1727 __ BIND(L_copy_2_bytes); 1728 __ testl(byte_count, 2); 1729 __ jcc(Assembler::zero, L_copy_4_bytes); 1730 __ movw(rax, Address(from, byte_count, Address::times_1, -2)); 1731 __ movw(Address(to, byte_count, Address::times_1, -2), rax); 1732 1733 // Check for and copy trailing dword 1734 __ BIND(L_copy_4_bytes); 1735 __ testl(byte_count, 4); 1736 __ jcc(Assembler::zero, L_copy_bytes); 1737 __ movl(rax, Address(from, qword_count, Address::times_8)); 1738 __ movl(Address(to, qword_count, Address::times_8), rax); 1739 __ jmp(L_copy_bytes); 1740 1741 // Copy trailing qwords 1742 __ BIND(L_copy_8_bytes); 1743 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 1744 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 1745 __ decrement(qword_count); 1746 __ jcc(Assembler::notZero, L_copy_8_bytes); 1747 } 1748 restore_arg_regs(); 1749 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1750 __ xorptr(rax, rax); // return 0 1751 __ vzeroupper(); 1752 __ leave(); // required for proper stackwalking of RuntimeStub frame 1753 __ ret(0); 1754 1755 { 1756 // UnsafeCopyMemory page error: continue after ucm 1757 UnsafeCopyMemoryMark ucmm(this, !aligned, true); 1758 // Copy in multi-bytes chunks 1759 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1760 } 1761 restore_arg_regs(); 1762 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1763 __ xorptr(rax, rax); // return 0 1764 __ vzeroupper(); 1765 __ leave(); // required for proper stackwalking of RuntimeStub frame 1766 __ ret(0); 1767 1768 return start; 1769 } 1770 1771 // Arguments: 1772 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1773 // ignored 1774 // name - stub name string 1775 // 1776 // Inputs: 1777 // c_rarg0 - source array address 1778 // c_rarg1 - destination array address 1779 // c_rarg2 - element count, treated as ssize_t, can be zero 1780 // 1781 // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we 1782 // let the hardware handle it. The two or four words within dwords 1783 // or qwords that span cache line boundaries will still be loaded 1784 // and stored atomically. 1785 // 1786 // Side Effects: 1787 // disjoint_short_copy_entry is set to the no-overlap entry point 1788 // used by generate_conjoint_short_copy(). 1789 // 1790 address generate_disjoint_short_copy(bool aligned, address *entry, const char *name) { 1791 __ align(CodeEntryAlignment); 1792 StubCodeMark mark(this, "StubRoutines", name); 1793 address start = __ pc(); 1794 1795 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes,L_copy_2_bytes,L_exit; 1796 const Register from = rdi; // source array address 1797 const Register to = rsi; // destination array address 1798 const Register count = rdx; // elements count 1799 const Register word_count = rcx; 1800 const Register qword_count = count; 1801 const Register end_from = from; // source array end address 1802 const Register end_to = to; // destination array end address 1803 // End pointers are inclusive, and if count is not zero they point 1804 // to the last unit copied: end_to[0] := end_from[0] 1805 1806 __ enter(); // required for proper stackwalking of RuntimeStub frame 1807 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1808 1809 if (entry != NULL) { 1810 *entry = __ pc(); 1811 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1812 BLOCK_COMMENT("Entry:"); 1813 } 1814 1815 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1816 // r9 and r10 may be used to save non-volatile registers 1817 1818 { 1819 // UnsafeCopyMemory page error: continue after ucm 1820 UnsafeCopyMemoryMark ucmm(this, !aligned, true); 1821 // 'from', 'to' and 'count' are now valid 1822 __ movptr(word_count, count); 1823 __ shrptr(count, 2); // count => qword_count 1824 1825 // Copy from low to high addresses. Use 'to' as scratch. 1826 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1827 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1828 __ negptr(qword_count); 1829 __ jmp(L_copy_bytes); 1830 1831 // Copy trailing qwords 1832 __ BIND(L_copy_8_bytes); 1833 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1834 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1835 __ increment(qword_count); 1836 __ jcc(Assembler::notZero, L_copy_8_bytes); 1837 1838 // Original 'dest' is trashed, so we can't use it as a 1839 // base register for a possible trailing word copy 1840 1841 // Check for and copy trailing dword 1842 __ BIND(L_copy_4_bytes); 1843 __ testl(word_count, 2); 1844 __ jccb(Assembler::zero, L_copy_2_bytes); 1845 __ movl(rax, Address(end_from, 8)); 1846 __ movl(Address(end_to, 8), rax); 1847 1848 __ addptr(end_from, 4); 1849 __ addptr(end_to, 4); 1850 1851 // Check for and copy trailing word 1852 __ BIND(L_copy_2_bytes); 1853 __ testl(word_count, 1); 1854 __ jccb(Assembler::zero, L_exit); 1855 __ movw(rax, Address(end_from, 8)); 1856 __ movw(Address(end_to, 8), rax); 1857 } 1858 __ BIND(L_exit); 1859 address ucme_exit_pc = __ pc(); 1860 restore_arg_regs(); 1861 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1862 __ xorptr(rax, rax); // return 0 1863 __ vzeroupper(); 1864 __ leave(); // required for proper stackwalking of RuntimeStub frame 1865 __ ret(0); 1866 1867 { 1868 UnsafeCopyMemoryMark ucmm(this, !aligned, false, ucme_exit_pc); 1869 // Copy in multi-bytes chunks 1870 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1871 __ jmp(L_copy_4_bytes); 1872 } 1873 1874 return start; 1875 } 1876 1877 address generate_fill(BasicType t, bool aligned, const char *name) { 1878 __ align(CodeEntryAlignment); 1879 StubCodeMark mark(this, "StubRoutines", name); 1880 address start = __ pc(); 1881 1882 BLOCK_COMMENT("Entry:"); 1883 1884 const Register to = c_rarg0; // source array address 1885 const Register value = c_rarg1; // value 1886 const Register count = c_rarg2; // elements count 1887 1888 __ enter(); // required for proper stackwalking of RuntimeStub frame 1889 1890 __ generate_fill(t, aligned, to, value, count, rax, xmm0); 1891 1892 __ vzeroupper(); 1893 __ leave(); // required for proper stackwalking of RuntimeStub frame 1894 __ ret(0); 1895 return start; 1896 } 1897 1898 // Arguments: 1899 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1900 // ignored 1901 // name - stub name string 1902 // 1903 // Inputs: 1904 // c_rarg0 - source array address 1905 // c_rarg1 - destination array address 1906 // c_rarg2 - element count, treated as ssize_t, can be zero 1907 // 1908 // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we 1909 // let the hardware handle it. The two or four words within dwords 1910 // or qwords that span cache line boundaries will still be loaded 1911 // and stored atomically. 1912 // 1913 address generate_conjoint_short_copy(bool aligned, address nooverlap_target, 1914 address *entry, const char *name) { 1915 __ align(CodeEntryAlignment); 1916 StubCodeMark mark(this, "StubRoutines", name); 1917 address start = __ pc(); 1918 1919 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes; 1920 const Register from = rdi; // source array address 1921 const Register to = rsi; // destination array address 1922 const Register count = rdx; // elements count 1923 const Register word_count = rcx; 1924 const Register qword_count = count; 1925 1926 __ enter(); // required for proper stackwalking of RuntimeStub frame 1927 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1928 1929 if (entry != NULL) { 1930 *entry = __ pc(); 1931 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1932 BLOCK_COMMENT("Entry:"); 1933 } 1934 1935 array_overlap_test(nooverlap_target, Address::times_2); 1936 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1937 // r9 and r10 may be used to save non-volatile registers 1938 1939 { 1940 // UnsafeCopyMemory page error: continue after ucm 1941 UnsafeCopyMemoryMark ucmm(this, !aligned, true); 1942 // 'from', 'to' and 'count' are now valid 1943 __ movptr(word_count, count); 1944 __ shrptr(count, 2); // count => qword_count 1945 1946 // Copy from high to low addresses. Use 'to' as scratch. 1947 1948 // Check for and copy trailing word 1949 __ testl(word_count, 1); 1950 __ jccb(Assembler::zero, L_copy_4_bytes); 1951 __ movw(rax, Address(from, word_count, Address::times_2, -2)); 1952 __ movw(Address(to, word_count, Address::times_2, -2), rax); 1953 1954 // Check for and copy trailing dword 1955 __ BIND(L_copy_4_bytes); 1956 __ testl(word_count, 2); 1957 __ jcc(Assembler::zero, L_copy_bytes); 1958 __ movl(rax, Address(from, qword_count, Address::times_8)); 1959 __ movl(Address(to, qword_count, Address::times_8), rax); 1960 __ jmp(L_copy_bytes); 1961 1962 // Copy trailing qwords 1963 __ BIND(L_copy_8_bytes); 1964 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 1965 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 1966 __ decrement(qword_count); 1967 __ jcc(Assembler::notZero, L_copy_8_bytes); 1968 } 1969 restore_arg_regs(); 1970 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1971 __ xorptr(rax, rax); // return 0 1972 __ vzeroupper(); 1973 __ leave(); // required for proper stackwalking of RuntimeStub frame 1974 __ ret(0); 1975 1976 { 1977 // UnsafeCopyMemory page error: continue after ucm 1978 UnsafeCopyMemoryMark ucmm(this, !aligned, true); 1979 // Copy in multi-bytes chunks 1980 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1981 } 1982 restore_arg_regs(); 1983 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1984 __ xorptr(rax, rax); // return 0 1985 __ vzeroupper(); 1986 __ leave(); // required for proper stackwalking of RuntimeStub frame 1987 __ ret(0); 1988 1989 return start; 1990 } 1991 1992 // Arguments: 1993 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1994 // ignored 1995 // is_oop - true => oop array, so generate store check code 1996 // name - stub name string 1997 // 1998 // Inputs: 1999 // c_rarg0 - source array address 2000 // c_rarg1 - destination array address 2001 // c_rarg2 - element count, treated as ssize_t, can be zero 2002 // 2003 // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let 2004 // the hardware handle it. The two dwords within qwords that span 2005 // cache line boundaries will still be loaded and stored atomicly. 2006 // 2007 // Side Effects: 2008 // disjoint_int_copy_entry is set to the no-overlap entry point 2009 // used by generate_conjoint_int_oop_copy(). 2010 // 2011 address generate_disjoint_int_oop_copy(bool aligned, bool is_oop, address* entry, 2012 const char *name, bool dest_uninitialized = false) { 2013 __ align(CodeEntryAlignment); 2014 StubCodeMark mark(this, "StubRoutines", name); 2015 address start = __ pc(); 2016 2017 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_exit; 2018 const Register from = rdi; // source array address 2019 const Register to = rsi; // destination array address 2020 const Register count = rdx; // elements count 2021 const Register dword_count = rcx; 2022 const Register qword_count = count; 2023 const Register end_from = from; // source array end address 2024 const Register end_to = to; // destination array end address 2025 // End pointers are inclusive, and if count is not zero they point 2026 // to the last unit copied: end_to[0] := end_from[0] 2027 2028 __ enter(); // required for proper stackwalking of RuntimeStub frame 2029 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 2030 2031 if (entry != NULL) { 2032 *entry = __ pc(); 2033 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2034 BLOCK_COMMENT("Entry:"); 2035 } 2036 2037 setup_arg_regs_using_thread(); // from => rdi, to => rsi, count => rdx 2038 // r9 is used to save r15_thread 2039 2040 DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_DISJOINT; 2041 if (dest_uninitialized) { 2042 decorators |= IS_DEST_UNINITIALIZED; 2043 } 2044 if (aligned) { 2045 decorators |= ARRAYCOPY_ALIGNED; 2046 } 2047 2048 BasicType type = is_oop ? T_OBJECT : T_INT; 2049 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 2050 bs->arraycopy_prologue(_masm, decorators, type, from, to, count); 2051 2052 { 2053 // UnsafeCopyMemory page error: continue after ucm 2054 UnsafeCopyMemoryMark ucmm(this, !is_oop && !aligned, true); 2055 // 'from', 'to' and 'count' are now valid 2056 __ movptr(dword_count, count); 2057 __ shrptr(count, 1); // count => qword_count 2058 2059 // Copy from low to high addresses. Use 'to' as scratch. 2060 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 2061 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 2062 __ negptr(qword_count); 2063 __ jmp(L_copy_bytes); 2064 2065 // Copy trailing qwords 2066 __ BIND(L_copy_8_bytes); 2067 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 2068 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 2069 __ increment(qword_count); 2070 __ jcc(Assembler::notZero, L_copy_8_bytes); 2071 2072 // Check for and copy trailing dword 2073 __ BIND(L_copy_4_bytes); 2074 __ testl(dword_count, 1); // Only byte test since the value is 0 or 1 2075 __ jccb(Assembler::zero, L_exit); 2076 __ movl(rax, Address(end_from, 8)); 2077 __ movl(Address(end_to, 8), rax); 2078 } 2079 __ BIND(L_exit); 2080 address ucme_exit_pc = __ pc(); 2081 bs->arraycopy_epilogue(_masm, decorators, type, from, to, dword_count); 2082 restore_arg_regs_using_thread(); 2083 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 2084 __ vzeroupper(); 2085 __ xorptr(rax, rax); // return 0 2086 __ leave(); // required for proper stackwalking of RuntimeStub frame 2087 __ ret(0); 2088 2089 { 2090 UnsafeCopyMemoryMark ucmm(this, !is_oop && !aligned, false, ucme_exit_pc); 2091 // Copy in multi-bytes chunks 2092 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2093 __ jmp(L_copy_4_bytes); 2094 } 2095 2096 return start; 2097 } 2098 2099 // Arguments: 2100 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 2101 // ignored 2102 // is_oop - true => oop array, so generate store check code 2103 // name - stub name string 2104 // 2105 // Inputs: 2106 // c_rarg0 - source array address 2107 // c_rarg1 - destination array address 2108 // c_rarg2 - element count, treated as ssize_t, can be zero 2109 // 2110 // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let 2111 // the hardware handle it. The two dwords within qwords that span 2112 // cache line boundaries will still be loaded and stored atomicly. 2113 // 2114 address generate_conjoint_int_oop_copy(bool aligned, bool is_oop, address nooverlap_target, 2115 address *entry, const char *name, 2116 bool dest_uninitialized = false) { 2117 __ align(CodeEntryAlignment); 2118 StubCodeMark mark(this, "StubRoutines", name); 2119 address start = __ pc(); 2120 2121 Label L_copy_bytes, L_copy_8_bytes, L_exit; 2122 const Register from = rdi; // source array address 2123 const Register to = rsi; // destination array address 2124 const Register count = rdx; // elements count 2125 const Register dword_count = rcx; 2126 const Register qword_count = count; 2127 2128 __ enter(); // required for proper stackwalking of RuntimeStub frame 2129 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 2130 2131 if (entry != NULL) { 2132 *entry = __ pc(); 2133 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2134 BLOCK_COMMENT("Entry:"); 2135 } 2136 2137 array_overlap_test(nooverlap_target, Address::times_4); 2138 setup_arg_regs_using_thread(); // from => rdi, to => rsi, count => rdx 2139 // r9 is used to save r15_thread 2140 2141 DecoratorSet decorators = IN_HEAP | IS_ARRAY; 2142 if (dest_uninitialized) { 2143 decorators |= IS_DEST_UNINITIALIZED; 2144 } 2145 if (aligned) { 2146 decorators |= ARRAYCOPY_ALIGNED; 2147 } 2148 2149 BasicType type = is_oop ? T_OBJECT : T_INT; 2150 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 2151 // no registers are destroyed by this call 2152 bs->arraycopy_prologue(_masm, decorators, type, from, to, count); 2153 2154 assert_clean_int(count, rax); // Make sure 'count' is clean int. 2155 { 2156 // UnsafeCopyMemory page error: continue after ucm 2157 UnsafeCopyMemoryMark ucmm(this, !is_oop && !aligned, true); 2158 // 'from', 'to' and 'count' are now valid 2159 __ movptr(dword_count, count); 2160 __ shrptr(count, 1); // count => qword_count 2161 2162 // Copy from high to low addresses. Use 'to' as scratch. 2163 2164 // Check for and copy trailing dword 2165 __ testl(dword_count, 1); 2166 __ jcc(Assembler::zero, L_copy_bytes); 2167 __ movl(rax, Address(from, dword_count, Address::times_4, -4)); 2168 __ movl(Address(to, dword_count, Address::times_4, -4), rax); 2169 __ jmp(L_copy_bytes); 2170 2171 // Copy trailing qwords 2172 __ BIND(L_copy_8_bytes); 2173 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 2174 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 2175 __ decrement(qword_count); 2176 __ jcc(Assembler::notZero, L_copy_8_bytes); 2177 } 2178 if (is_oop) { 2179 __ jmp(L_exit); 2180 } 2181 restore_arg_regs_using_thread(); 2182 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 2183 __ xorptr(rax, rax); // return 0 2184 __ vzeroupper(); 2185 __ leave(); // required for proper stackwalking of RuntimeStub frame 2186 __ ret(0); 2187 2188 { 2189 // UnsafeCopyMemory page error: continue after ucm 2190 UnsafeCopyMemoryMark ucmm(this, !is_oop && !aligned, true); 2191 // Copy in multi-bytes chunks 2192 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2193 } 2194 2195 __ BIND(L_exit); 2196 bs->arraycopy_epilogue(_masm, decorators, type, from, to, dword_count); 2197 restore_arg_regs_using_thread(); 2198 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 2199 __ xorptr(rax, rax); // return 0 2200 __ vzeroupper(); 2201 __ leave(); // required for proper stackwalking of RuntimeStub frame 2202 __ ret(0); 2203 2204 return start; 2205 } 2206 2207 // Arguments: 2208 // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes 2209 // ignored 2210 // is_oop - true => oop array, so generate store check code 2211 // name - stub name string 2212 // 2213 // Inputs: 2214 // c_rarg0 - source array address 2215 // c_rarg1 - destination array address 2216 // c_rarg2 - element count, treated as ssize_t, can be zero 2217 // 2218 // Side Effects: 2219 // disjoint_oop_copy_entry or disjoint_long_copy_entry is set to the 2220 // no-overlap entry point used by generate_conjoint_long_oop_copy(). 2221 // 2222 address generate_disjoint_long_oop_copy(bool aligned, bool is_oop, address *entry, 2223 const char *name, bool dest_uninitialized = false) { 2224 __ align(CodeEntryAlignment); 2225 StubCodeMark mark(this, "StubRoutines", name); 2226 address start = __ pc(); 2227 2228 Label L_copy_bytes, L_copy_8_bytes, L_exit; 2229 const Register from = rdi; // source array address 2230 const Register to = rsi; // destination array address 2231 const Register qword_count = rdx; // elements count 2232 const Register end_from = from; // source array end address 2233 const Register end_to = rcx; // destination array end address 2234 const Register saved_count = r11; 2235 // End pointers are inclusive, and if count is not zero they point 2236 // to the last unit copied: end_to[0] := end_from[0] 2237 2238 __ enter(); // required for proper stackwalking of RuntimeStub frame 2239 // Save no-overlap entry point for generate_conjoint_long_oop_copy() 2240 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 2241 2242 if (entry != NULL) { 2243 *entry = __ pc(); 2244 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2245 BLOCK_COMMENT("Entry:"); 2246 } 2247 2248 setup_arg_regs_using_thread(); // from => rdi, to => rsi, count => rdx 2249 // r9 is used to save r15_thread 2250 // 'from', 'to' and 'qword_count' are now valid 2251 2252 DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_DISJOINT; 2253 if (dest_uninitialized) { 2254 decorators |= IS_DEST_UNINITIALIZED; 2255 } 2256 if (aligned) { 2257 decorators |= ARRAYCOPY_ALIGNED; 2258 } 2259 2260 BasicType type = is_oop ? T_OBJECT : T_LONG; 2261 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 2262 bs->arraycopy_prologue(_masm, decorators, type, from, to, qword_count); 2263 { 2264 // UnsafeCopyMemory page error: continue after ucm 2265 UnsafeCopyMemoryMark ucmm(this, !is_oop && !aligned, true); 2266 2267 // Copy from low to high addresses. Use 'to' as scratch. 2268 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 2269 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 2270 __ negptr(qword_count); 2271 __ jmp(L_copy_bytes); 2272 2273 // Copy trailing qwords 2274 __ BIND(L_copy_8_bytes); 2275 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 2276 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 2277 __ increment(qword_count); 2278 __ jcc(Assembler::notZero, L_copy_8_bytes); 2279 } 2280 if (is_oop) { 2281 __ jmp(L_exit); 2282 } else { 2283 restore_arg_regs_using_thread(); 2284 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2285 __ xorptr(rax, rax); // return 0 2286 __ vzeroupper(); 2287 __ leave(); // required for proper stackwalking of RuntimeStub frame 2288 __ ret(0); 2289 } 2290 2291 { 2292 // UnsafeCopyMemory page error: continue after ucm 2293 UnsafeCopyMemoryMark ucmm(this, !is_oop && !aligned, true); 2294 // Copy in multi-bytes chunks 2295 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2296 } 2297 2298 __ BIND(L_exit); 2299 bs->arraycopy_epilogue(_masm, decorators, type, from, to, qword_count); 2300 restore_arg_regs_using_thread(); 2301 if (is_oop) { 2302 inc_counter_np(SharedRuntime::_oop_array_copy_ctr); // Update counter after rscratch1 is free 2303 } else { 2304 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2305 } 2306 __ vzeroupper(); 2307 __ xorptr(rax, rax); // return 0 2308 __ leave(); // required for proper stackwalking of RuntimeStub frame 2309 __ ret(0); 2310 2311 return start; 2312 } 2313 2314 // Arguments: 2315 // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes 2316 // ignored 2317 // is_oop - true => oop array, so generate store check code 2318 // name - stub name string 2319 // 2320 // Inputs: 2321 // c_rarg0 - source array address 2322 // c_rarg1 - destination array address 2323 // c_rarg2 - element count, treated as ssize_t, can be zero 2324 // 2325 address generate_conjoint_long_oop_copy(bool aligned, bool is_oop, 2326 address nooverlap_target, address *entry, 2327 const char *name, bool dest_uninitialized = false) { 2328 __ align(CodeEntryAlignment); 2329 StubCodeMark mark(this, "StubRoutines", name); 2330 address start = __ pc(); 2331 2332 Label L_copy_bytes, L_copy_8_bytes, L_exit; 2333 const Register from = rdi; // source array address 2334 const Register to = rsi; // destination array address 2335 const Register qword_count = rdx; // elements count 2336 const Register saved_count = rcx; 2337 2338 __ enter(); // required for proper stackwalking of RuntimeStub frame 2339 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 2340 2341 if (entry != NULL) { 2342 *entry = __ pc(); 2343 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2344 BLOCK_COMMENT("Entry:"); 2345 } 2346 2347 array_overlap_test(nooverlap_target, Address::times_8); 2348 setup_arg_regs_using_thread(); // from => rdi, to => rsi, count => rdx 2349 // r9 is used to save r15_thread 2350 // 'from', 'to' and 'qword_count' are now valid 2351 2352 DecoratorSet decorators = IN_HEAP | IS_ARRAY; 2353 if (dest_uninitialized) { 2354 decorators |= IS_DEST_UNINITIALIZED; 2355 } 2356 if (aligned) { 2357 decorators |= ARRAYCOPY_ALIGNED; 2358 } 2359 2360 BasicType type = is_oop ? T_OBJECT : T_LONG; 2361 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 2362 bs->arraycopy_prologue(_masm, decorators, type, from, to, qword_count); 2363 { 2364 // UnsafeCopyMemory page error: continue after ucm 2365 UnsafeCopyMemoryMark ucmm(this, !is_oop && !aligned, true); 2366 2367 __ jmp(L_copy_bytes); 2368 2369 // Copy trailing qwords 2370 __ BIND(L_copy_8_bytes); 2371 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 2372 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 2373 __ decrement(qword_count); 2374 __ jcc(Assembler::notZero, L_copy_8_bytes); 2375 } 2376 if (is_oop) { 2377 __ jmp(L_exit); 2378 } else { 2379 restore_arg_regs_using_thread(); 2380 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2381 __ xorptr(rax, rax); // return 0 2382 __ vzeroupper(); 2383 __ leave(); // required for proper stackwalking of RuntimeStub frame 2384 __ ret(0); 2385 } 2386 { 2387 // UnsafeCopyMemory page error: continue after ucm 2388 UnsafeCopyMemoryMark ucmm(this, !is_oop && !aligned, true); 2389 2390 // Copy in multi-bytes chunks 2391 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2392 } 2393 __ BIND(L_exit); 2394 bs->arraycopy_epilogue(_masm, decorators, type, from, to, qword_count); 2395 restore_arg_regs_using_thread(); 2396 if (is_oop) { 2397 inc_counter_np(SharedRuntime::_oop_array_copy_ctr); // Update counter after rscratch1 is free 2398 } else { 2399 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2400 } 2401 __ vzeroupper(); 2402 __ xorptr(rax, rax); // return 0 2403 __ leave(); // required for proper stackwalking of RuntimeStub frame 2404 __ ret(0); 2405 2406 return start; 2407 } 2408 2409 2410 // Helper for generating a dynamic type check. 2411 // Smashes no registers. 2412 void generate_type_check(Register sub_klass, 2413 Register super_check_offset, 2414 Register super_klass, 2415 Label& L_success) { 2416 assert_different_registers(sub_klass, super_check_offset, super_klass); 2417 2418 BLOCK_COMMENT("type_check:"); 2419 2420 Label L_miss; 2421 2422 __ check_klass_subtype_fast_path(sub_klass, super_klass, noreg, &L_success, &L_miss, NULL, 2423 super_check_offset); 2424 __ check_klass_subtype_slow_path(sub_klass, super_klass, noreg, noreg, &L_success, NULL); 2425 2426 // Fall through on failure! 2427 __ BIND(L_miss); 2428 } 2429 2430 // 2431 // Generate checkcasting array copy stub 2432 // 2433 // Input: 2434 // c_rarg0 - source array address 2435 // c_rarg1 - destination array address 2436 // c_rarg2 - element count, treated as ssize_t, can be zero 2437 // c_rarg3 - size_t ckoff (super_check_offset) 2438 // not Win64 2439 // c_rarg4 - oop ckval (super_klass) 2440 // Win64 2441 // rsp+40 - oop ckval (super_klass) 2442 // 2443 // Output: 2444 // rax == 0 - success 2445 // rax == -1^K - failure, where K is partial transfer count 2446 // 2447 address generate_checkcast_copy(const char *name, address *entry, 2448 bool dest_uninitialized = false) { 2449 2450 Label L_load_element, L_store_element, L_do_card_marks, L_done; 2451 2452 // Input registers (after setup_arg_regs) 2453 const Register from = rdi; // source array address 2454 const Register to = rsi; // destination array address 2455 const Register length = rdx; // elements count 2456 const Register ckoff = rcx; // super_check_offset 2457 const Register ckval = r8; // super_klass 2458 2459 // Registers used as temps (r13, r14 are save-on-entry) 2460 const Register end_from = from; // source array end address 2461 const Register end_to = r13; // destination array end address 2462 const Register count = rdx; // -(count_remaining) 2463 const Register r14_length = r14; // saved copy of length 2464 // End pointers are inclusive, and if length is not zero they point 2465 // to the last unit copied: end_to[0] := end_from[0] 2466 2467 const Register rax_oop = rax; // actual oop copied 2468 const Register r11_klass = r11; // oop._klass 2469 2470 //--------------------------------------------------------------- 2471 // Assembler stub will be used for this call to arraycopy 2472 // if the two arrays are subtypes of Object[] but the 2473 // destination array type is not equal to or a supertype 2474 // of the source type. Each element must be separately 2475 // checked. 2476 2477 __ align(CodeEntryAlignment); 2478 StubCodeMark mark(this, "StubRoutines", name); 2479 address start = __ pc(); 2480 2481 __ enter(); // required for proper stackwalking of RuntimeStub frame 2482 2483 #ifdef ASSERT 2484 // caller guarantees that the arrays really are different 2485 // otherwise, we would have to make conjoint checks 2486 { Label L; 2487 array_overlap_test(L, TIMES_OOP); 2488 __ stop("checkcast_copy within a single array"); 2489 __ bind(L); 2490 } 2491 #endif //ASSERT 2492 2493 setup_arg_regs(4); // from => rdi, to => rsi, length => rdx 2494 // ckoff => rcx, ckval => r8 2495 // r9 and r10 may be used to save non-volatile registers 2496 #ifdef _WIN64 2497 // last argument (#4) is on stack on Win64 2498 __ movptr(ckval, Address(rsp, 6 * wordSize)); 2499 #endif 2500 2501 // Caller of this entry point must set up the argument registers. 2502 if (entry != NULL) { 2503 *entry = __ pc(); 2504 BLOCK_COMMENT("Entry:"); 2505 } 2506 2507 // allocate spill slots for r13, r14 2508 enum { 2509 saved_r13_offset, 2510 saved_r14_offset, 2511 saved_r10_offset, 2512 saved_rbp_offset 2513 }; 2514 __ subptr(rsp, saved_rbp_offset * wordSize); 2515 __ movptr(Address(rsp, saved_r13_offset * wordSize), r13); 2516 __ movptr(Address(rsp, saved_r14_offset * wordSize), r14); 2517 __ movptr(Address(rsp, saved_r10_offset * wordSize), r10); 2518 2519 #ifdef ASSERT 2520 Label L2; 2521 __ get_thread(r14); 2522 __ cmpptr(r15_thread, r14); 2523 __ jcc(Assembler::equal, L2); 2524 __ stop("StubRoutines::call_stub: r15_thread is modified by call"); 2525 __ bind(L2); 2526 #endif // ASSERT 2527 2528 // check that int operands are properly extended to size_t 2529 assert_clean_int(length, rax); 2530 assert_clean_int(ckoff, rax); 2531 2532 #ifdef ASSERT 2533 BLOCK_COMMENT("assert consistent ckoff/ckval"); 2534 // The ckoff and ckval must be mutually consistent, 2535 // even though caller generates both. 2536 { Label L; 2537 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2538 __ cmpl(ckoff, Address(ckval, sco_offset)); 2539 __ jcc(Assembler::equal, L); 2540 __ stop("super_check_offset inconsistent"); 2541 __ bind(L); 2542 } 2543 #endif //ASSERT 2544 2545 // Loop-invariant addresses. They are exclusive end pointers. 2546 Address end_from_addr(from, length, TIMES_OOP, 0); 2547 Address end_to_addr(to, length, TIMES_OOP, 0); 2548 // Loop-variant addresses. They assume post-incremented count < 0. 2549 Address from_element_addr(end_from, count, TIMES_OOP, 0); 2550 Address to_element_addr(end_to, count, TIMES_OOP, 0); 2551 2552 DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_CHECKCAST | ARRAYCOPY_DISJOINT; 2553 if (dest_uninitialized) { 2554 decorators |= IS_DEST_UNINITIALIZED; 2555 } 2556 2557 BasicType type = T_OBJECT; 2558 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 2559 bs->arraycopy_prologue(_masm, decorators, type, from, to, count); 2560 2561 // Copy from low to high addresses, indexed from the end of each array. 2562 __ lea(end_from, end_from_addr); 2563 __ lea(end_to, end_to_addr); 2564 __ movptr(r14_length, length); // save a copy of the length 2565 assert(length == count, ""); // else fix next line: 2566 __ negptr(count); // negate and test the length 2567 __ jcc(Assembler::notZero, L_load_element); 2568 2569 // Empty array: Nothing to do. 2570 __ xorptr(rax, rax); // return 0 on (trivial) success 2571 __ jmp(L_done); 2572 2573 // ======== begin loop ======== 2574 // (Loop is rotated; its entry is L_load_element.) 2575 // Loop control: 2576 // for (count = -count; count != 0; count++) 2577 // Base pointers src, dst are biased by 8*(count-1),to last element. 2578 __ align(OptoLoopAlignment); 2579 2580 __ BIND(L_store_element); 2581 __ store_heap_oop(to_element_addr, rax_oop, noreg, noreg, AS_RAW); // store the oop 2582 __ increment(count); // increment the count toward zero 2583 __ jcc(Assembler::zero, L_do_card_marks); 2584 2585 // ======== loop entry is here ======== 2586 __ BIND(L_load_element); 2587 __ load_heap_oop(rax_oop, from_element_addr, noreg, noreg, AS_RAW); // load the oop 2588 __ testptr(rax_oop, rax_oop); 2589 __ jcc(Assembler::zero, L_store_element); 2590 2591 __ load_klass(r11_klass, rax_oop, rscratch1);// query the object klass 2592 generate_type_check(r11_klass, ckoff, ckval, L_store_element); 2593 // ======== end loop ======== 2594 2595 // It was a real error; we must depend on the caller to finish the job. 2596 // Register rdx = -1 * number of *remaining* oops, r14 = *total* oops. 2597 // Emit GC store barriers for the oops we have copied (r14 + rdx), 2598 // and report their number to the caller. 2599 assert_different_registers(rax, r14_length, count, to, end_to, rcx, rscratch1); 2600 Label L_post_barrier; 2601 __ addptr(r14_length, count); // K = (original - remaining) oops 2602 __ movptr(rax, r14_length); // save the value 2603 __ notptr(rax); // report (-1^K) to caller (does not affect flags) 2604 __ jccb(Assembler::notZero, L_post_barrier); 2605 __ jmp(L_done); // K == 0, nothing was copied, skip post barrier 2606 2607 // Come here on success only. 2608 __ BIND(L_do_card_marks); 2609 __ xorptr(rax, rax); // return 0 on success 2610 2611 __ BIND(L_post_barrier); 2612 bs->arraycopy_epilogue(_masm, decorators, type, from, to, r14_length); 2613 2614 // Common exit point (success or failure). 2615 __ BIND(L_done); 2616 __ movptr(r13, Address(rsp, saved_r13_offset * wordSize)); 2617 __ movptr(r14, Address(rsp, saved_r14_offset * wordSize)); 2618 __ movptr(r10, Address(rsp, saved_r10_offset * wordSize)); 2619 restore_arg_regs(); 2620 inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr); // Update counter after rscratch1 is free 2621 __ leave(); // required for proper stackwalking of RuntimeStub frame 2622 __ ret(0); 2623 2624 return start; 2625 } 2626 2627 // 2628 // Generate 'unsafe' array copy stub 2629 // Though just as safe as the other stubs, it takes an unscaled 2630 // size_t argument instead of an element count. 2631 // 2632 // Input: 2633 // c_rarg0 - source array address 2634 // c_rarg1 - destination array address 2635 // c_rarg2 - byte count, treated as ssize_t, can be zero 2636 // 2637 // Examines the alignment of the operands and dispatches 2638 // to a long, int, short, or byte copy loop. 2639 // 2640 address generate_unsafe_copy(const char *name, 2641 address byte_copy_entry, address short_copy_entry, 2642 address int_copy_entry, address long_copy_entry) { 2643 2644 Label L_long_aligned, L_int_aligned, L_short_aligned; 2645 2646 // Input registers (before setup_arg_regs) 2647 const Register from = c_rarg0; // source array address 2648 const Register to = c_rarg1; // destination array address 2649 const Register size = c_rarg2; // byte count (size_t) 2650 2651 // Register used as a temp 2652 const Register bits = rax; // test copy of low bits 2653 2654 __ align(CodeEntryAlignment); 2655 StubCodeMark mark(this, "StubRoutines", name); 2656 address start = __ pc(); 2657 2658 __ enter(); // required for proper stackwalking of RuntimeStub frame 2659 2660 // bump this on entry, not on exit: 2661 inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr); 2662 2663 __ mov(bits, from); 2664 __ orptr(bits, to); 2665 __ orptr(bits, size); 2666 2667 __ testb(bits, BytesPerLong-1); 2668 __ jccb(Assembler::zero, L_long_aligned); 2669 2670 __ testb(bits, BytesPerInt-1); 2671 __ jccb(Assembler::zero, L_int_aligned); 2672 2673 __ testb(bits, BytesPerShort-1); 2674 __ jump_cc(Assembler::notZero, RuntimeAddress(byte_copy_entry)); 2675 2676 __ BIND(L_short_aligned); 2677 __ shrptr(size, LogBytesPerShort); // size => short_count 2678 __ jump(RuntimeAddress(short_copy_entry)); 2679 2680 __ BIND(L_int_aligned); 2681 __ shrptr(size, LogBytesPerInt); // size => int_count 2682 __ jump(RuntimeAddress(int_copy_entry)); 2683 2684 __ BIND(L_long_aligned); 2685 __ shrptr(size, LogBytesPerLong); // size => qword_count 2686 __ jump(RuntimeAddress(long_copy_entry)); 2687 2688 return start; 2689 } 2690 2691 // Perform range checks on the proposed arraycopy. 2692 // Kills temp, but nothing else. 2693 // Also, clean the sign bits of src_pos and dst_pos. 2694 void arraycopy_range_checks(Register src, // source array oop (c_rarg0) 2695 Register src_pos, // source position (c_rarg1) 2696 Register dst, // destination array oo (c_rarg2) 2697 Register dst_pos, // destination position (c_rarg3) 2698 Register length, 2699 Register temp, 2700 Label& L_failed) { 2701 BLOCK_COMMENT("arraycopy_range_checks:"); 2702 2703 // if (src_pos + length > arrayOop(src)->length()) FAIL; 2704 __ movl(temp, length); 2705 __ addl(temp, src_pos); // src_pos + length 2706 __ cmpl(temp, Address(src, arrayOopDesc::length_offset_in_bytes())); 2707 __ jcc(Assembler::above, L_failed); 2708 2709 // if (dst_pos + length > arrayOop(dst)->length()) FAIL; 2710 __ movl(temp, length); 2711 __ addl(temp, dst_pos); // dst_pos + length 2712 __ cmpl(temp, Address(dst, arrayOopDesc::length_offset_in_bytes())); 2713 __ jcc(Assembler::above, L_failed); 2714 2715 // Have to clean up high 32-bits of 'src_pos' and 'dst_pos'. 2716 // Move with sign extension can be used since they are positive. 2717 __ movslq(src_pos, src_pos); 2718 __ movslq(dst_pos, dst_pos); 2719 2720 BLOCK_COMMENT("arraycopy_range_checks done"); 2721 } 2722 2723 // 2724 // Generate generic array copy stubs 2725 // 2726 // Input: 2727 // c_rarg0 - src oop 2728 // c_rarg1 - src_pos (32-bits) 2729 // c_rarg2 - dst oop 2730 // c_rarg3 - dst_pos (32-bits) 2731 // not Win64 2732 // c_rarg4 - element count (32-bits) 2733 // Win64 2734 // rsp+40 - element count (32-bits) 2735 // 2736 // Output: 2737 // rax == 0 - success 2738 // rax == -1^K - failure, where K is partial transfer count 2739 // 2740 address generate_generic_copy(const char *name, 2741 address byte_copy_entry, address short_copy_entry, 2742 address int_copy_entry, address oop_copy_entry, 2743 address long_copy_entry, address checkcast_copy_entry) { 2744 2745 Label L_failed, L_failed_0, L_objArray; 2746 Label L_copy_bytes, L_copy_shorts, L_copy_ints, L_copy_longs; 2747 2748 // Input registers 2749 const Register src = c_rarg0; // source array oop 2750 const Register src_pos = c_rarg1; // source position 2751 const Register dst = c_rarg2; // destination array oop 2752 const Register dst_pos = c_rarg3; // destination position 2753 #ifndef _WIN64 2754 const Register length = c_rarg4; 2755 const Register rklass_tmp = r9; // load_klass 2756 #else 2757 const Address length(rsp, 6 * wordSize); // elements count is on stack on Win64 2758 const Register rklass_tmp = rdi; // load_klass 2759 #endif 2760 2761 { int modulus = CodeEntryAlignment; 2762 int target = modulus - 5; // 5 = sizeof jmp(L_failed) 2763 int advance = target - (__ offset() % modulus); 2764 if (advance < 0) advance += modulus; 2765 if (advance > 0) __ nop(advance); 2766 } 2767 StubCodeMark mark(this, "StubRoutines", name); 2768 2769 // Short-hop target to L_failed. Makes for denser prologue code. 2770 __ BIND(L_failed_0); 2771 __ jmp(L_failed); 2772 assert(__ offset() % CodeEntryAlignment == 0, "no further alignment needed"); 2773 2774 __ align(CodeEntryAlignment); 2775 address start = __ pc(); 2776 2777 __ enter(); // required for proper stackwalking of RuntimeStub frame 2778 2779 // bump this on entry, not on exit: 2780 inc_counter_np(SharedRuntime::_generic_array_copy_ctr); 2781 2782 //----------------------------------------------------------------------- 2783 // Assembler stub will be used for this call to arraycopy 2784 // if the following conditions are met: 2785 // 2786 // (1) src and dst must not be null. 2787 // (2) src_pos must not be negative. 2788 // (3) dst_pos must not be negative. 2789 // (4) length must not be negative. 2790 // (5) src klass and dst klass should be the same and not NULL. 2791 // (6) src and dst should be arrays. 2792 // (7) src_pos + length must not exceed length of src. 2793 // (8) dst_pos + length must not exceed length of dst. 2794 // 2795 2796 // if (src == NULL) return -1; 2797 __ testptr(src, src); // src oop 2798 size_t j1off = __ offset(); 2799 __ jccb(Assembler::zero, L_failed_0); 2800 2801 // if (src_pos < 0) return -1; 2802 __ testl(src_pos, src_pos); // src_pos (32-bits) 2803 __ jccb(Assembler::negative, L_failed_0); 2804 2805 // if (dst == NULL) return -1; 2806 __ testptr(dst, dst); // dst oop 2807 __ jccb(Assembler::zero, L_failed_0); 2808 2809 // if (dst_pos < 0) return -1; 2810 __ testl(dst_pos, dst_pos); // dst_pos (32-bits) 2811 size_t j4off = __ offset(); 2812 __ jccb(Assembler::negative, L_failed_0); 2813 2814 // The first four tests are very dense code, 2815 // but not quite dense enough to put four 2816 // jumps in a 16-byte instruction fetch buffer. 2817 // That's good, because some branch predicters 2818 // do not like jumps so close together. 2819 // Make sure of this. 2820 guarantee(((j1off ^ j4off) & ~15) != 0, "I$ line of 1st & 4th jumps"); 2821 2822 // registers used as temp 2823 const Register r11_length = r11; // elements count to copy 2824 const Register r10_src_klass = r10; // array klass 2825 2826 // if (length < 0) return -1; 2827 __ movl(r11_length, length); // length (elements count, 32-bits value) 2828 __ testl(r11_length, r11_length); 2829 __ jccb(Assembler::negative, L_failed_0); 2830 2831 __ load_klass(r10_src_klass, src, rklass_tmp); 2832 #ifdef ASSERT 2833 // assert(src->klass() != NULL); 2834 { 2835 BLOCK_COMMENT("assert klasses not null {"); 2836 Label L1, L2; 2837 __ testptr(r10_src_klass, r10_src_klass); 2838 __ jcc(Assembler::notZero, L2); // it is broken if klass is NULL 2839 __ bind(L1); 2840 __ stop("broken null klass"); 2841 __ bind(L2); 2842 __ load_klass(rax, dst, rklass_tmp); 2843 __ cmpq(rax, 0); 2844 __ jcc(Assembler::equal, L1); // this would be broken also 2845 BLOCK_COMMENT("} assert klasses not null done"); 2846 } 2847 #endif 2848 2849 // Load layout helper (32-bits) 2850 // 2851 // |array_tag| | header_size | element_type | |log2_element_size| 2852 // 32 30 24 16 8 2 0 2853 // 2854 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0 2855 // 2856 2857 const int lh_offset = in_bytes(Klass::layout_helper_offset()); 2858 2859 // Handle objArrays completely differently... 2860 const jint objArray_lh = Klass::array_layout_helper(T_OBJECT); 2861 __ cmpl(Address(r10_src_klass, lh_offset), objArray_lh); 2862 __ jcc(Assembler::equal, L_objArray); 2863 2864 // if (src->klass() != dst->klass()) return -1; 2865 __ load_klass(rax, dst, rklass_tmp); 2866 __ cmpq(r10_src_klass, rax); 2867 __ jcc(Assembler::notEqual, L_failed); 2868 2869 const Register rax_lh = rax; // layout helper 2870 __ movl(rax_lh, Address(r10_src_klass, lh_offset)); 2871 2872 // if (!src->is_Array()) return -1; 2873 __ cmpl(rax_lh, Klass::_lh_neutral_value); 2874 __ jcc(Assembler::greaterEqual, L_failed); 2875 2876 // At this point, it is known to be a typeArray (array_tag 0x3). 2877 #ifdef ASSERT 2878 { 2879 BLOCK_COMMENT("assert primitive array {"); 2880 Label L; 2881 __ cmpl(rax_lh, (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift)); 2882 __ jcc(Assembler::greaterEqual, L); 2883 __ stop("must be a primitive array"); 2884 __ bind(L); 2885 BLOCK_COMMENT("} assert primitive array done"); 2886 } 2887 #endif 2888 2889 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2890 r10, L_failed); 2891 2892 // TypeArrayKlass 2893 // 2894 // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize); 2895 // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize); 2896 // 2897 2898 const Register r10_offset = r10; // array offset 2899 const Register rax_elsize = rax_lh; // element size 2900 2901 __ movl(r10_offset, rax_lh); 2902 __ shrl(r10_offset, Klass::_lh_header_size_shift); 2903 __ andptr(r10_offset, Klass::_lh_header_size_mask); // array_offset 2904 __ addptr(src, r10_offset); // src array offset 2905 __ addptr(dst, r10_offset); // dst array offset 2906 BLOCK_COMMENT("choose copy loop based on element size"); 2907 __ andl(rax_lh, Klass::_lh_log2_element_size_mask); // rax_lh -> rax_elsize 2908 2909 // next registers should be set before the jump to corresponding stub 2910 const Register from = c_rarg0; // source array address 2911 const Register to = c_rarg1; // destination array address 2912 const Register count = c_rarg2; // elements count 2913 2914 // 'from', 'to', 'count' registers should be set in such order 2915 // since they are the same as 'src', 'src_pos', 'dst'. 2916 2917 __ BIND(L_copy_bytes); 2918 __ cmpl(rax_elsize, 0); 2919 __ jccb(Assembler::notEqual, L_copy_shorts); 2920 __ lea(from, Address(src, src_pos, Address::times_1, 0));// src_addr 2921 __ lea(to, Address(dst, dst_pos, Address::times_1, 0));// dst_addr 2922 __ movl2ptr(count, r11_length); // length 2923 __ jump(RuntimeAddress(byte_copy_entry)); 2924 2925 __ BIND(L_copy_shorts); 2926 __ cmpl(rax_elsize, LogBytesPerShort); 2927 __ jccb(Assembler::notEqual, L_copy_ints); 2928 __ lea(from, Address(src, src_pos, Address::times_2, 0));// src_addr 2929 __ lea(to, Address(dst, dst_pos, Address::times_2, 0));// dst_addr 2930 __ movl2ptr(count, r11_length); // length 2931 __ jump(RuntimeAddress(short_copy_entry)); 2932 2933 __ BIND(L_copy_ints); 2934 __ cmpl(rax_elsize, LogBytesPerInt); 2935 __ jccb(Assembler::notEqual, L_copy_longs); 2936 __ lea(from, Address(src, src_pos, Address::times_4, 0));// src_addr 2937 __ lea(to, Address(dst, dst_pos, Address::times_4, 0));// dst_addr 2938 __ movl2ptr(count, r11_length); // length 2939 __ jump(RuntimeAddress(int_copy_entry)); 2940 2941 __ BIND(L_copy_longs); 2942 #ifdef ASSERT 2943 { 2944 BLOCK_COMMENT("assert long copy {"); 2945 Label L; 2946 __ cmpl(rax_elsize, LogBytesPerLong); 2947 __ jcc(Assembler::equal, L); 2948 __ stop("must be long copy, but elsize is wrong"); 2949 __ bind(L); 2950 BLOCK_COMMENT("} assert long copy done"); 2951 } 2952 #endif 2953 __ lea(from, Address(src, src_pos, Address::times_8, 0));// src_addr 2954 __ lea(to, Address(dst, dst_pos, Address::times_8, 0));// dst_addr 2955 __ movl2ptr(count, r11_length); // length 2956 __ jump(RuntimeAddress(long_copy_entry)); 2957 2958 // ObjArrayKlass 2959 __ BIND(L_objArray); 2960 // live at this point: r10_src_klass, r11_length, src[_pos], dst[_pos] 2961 2962 Label L_plain_copy, L_checkcast_copy; 2963 // test array classes for subtyping 2964 __ load_klass(rax, dst, rklass_tmp); 2965 __ cmpq(r10_src_klass, rax); // usual case is exact equality 2966 __ jcc(Assembler::notEqual, L_checkcast_copy); 2967 2968 // Identically typed arrays can be copied without element-wise checks. 2969 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2970 r10, L_failed); 2971 2972 __ lea(from, Address(src, src_pos, TIMES_OOP, 2973 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // src_addr 2974 __ lea(to, Address(dst, dst_pos, TIMES_OOP, 2975 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // dst_addr 2976 __ movl2ptr(count, r11_length); // length 2977 __ BIND(L_plain_copy); 2978 __ jump(RuntimeAddress(oop_copy_entry)); 2979 2980 __ BIND(L_checkcast_copy); 2981 // live at this point: r10_src_klass, r11_length, rax (dst_klass) 2982 { 2983 // Before looking at dst.length, make sure dst is also an objArray. 2984 __ cmpl(Address(rax, lh_offset), objArray_lh); 2985 __ jcc(Assembler::notEqual, L_failed); 2986 2987 // It is safe to examine both src.length and dst.length. 2988 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2989 rax, L_failed); 2990 2991 const Register r11_dst_klass = r11; 2992 __ load_klass(r11_dst_klass, dst, rklass_tmp); // reload 2993 2994 // Marshal the base address arguments now, freeing registers. 2995 __ lea(from, Address(src, src_pos, TIMES_OOP, 2996 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 2997 __ lea(to, Address(dst, dst_pos, TIMES_OOP, 2998 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 2999 __ movl(count, length); // length (reloaded) 3000 Register sco_temp = c_rarg3; // this register is free now 3001 assert_different_registers(from, to, count, sco_temp, 3002 r11_dst_klass, r10_src_klass); 3003 assert_clean_int(count, sco_temp); 3004 3005 // Generate the type check. 3006 const int sco_offset = in_bytes(Klass::super_check_offset_offset()); 3007 __ movl(sco_temp, Address(r11_dst_klass, sco_offset)); 3008 assert_clean_int(sco_temp, rax); 3009 generate_type_check(r10_src_klass, sco_temp, r11_dst_klass, L_plain_copy); 3010 3011 // Fetch destination element klass from the ObjArrayKlass header. 3012 int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset()); 3013 __ movptr(r11_dst_klass, Address(r11_dst_klass, ek_offset)); 3014 __ movl( sco_temp, Address(r11_dst_klass, sco_offset)); 3015 assert_clean_int(sco_temp, rax); 3016 3017 // the checkcast_copy loop needs two extra arguments: 3018 assert(c_rarg3 == sco_temp, "#3 already in place"); 3019 // Set up arguments for checkcast_copy_entry. 3020 setup_arg_regs(4); 3021 __ movptr(r8, r11_dst_klass); // dst.klass.element_klass, r8 is c_rarg4 on Linux/Solaris 3022 __ jump(RuntimeAddress(checkcast_copy_entry)); 3023 } 3024 3025 __ BIND(L_failed); 3026 __ xorptr(rax, rax); 3027 __ notptr(rax); // return -1 3028 __ leave(); // required for proper stackwalking of RuntimeStub frame 3029 __ ret(0); 3030 3031 return start; 3032 } 3033 3034 address generate_data_cache_writeback() { 3035 const Register src = c_rarg0; // source address 3036 3037 __ align(CodeEntryAlignment); 3038 3039 StubCodeMark mark(this, "StubRoutines", "_data_cache_writeback"); 3040 3041 address start = __ pc(); 3042 __ enter(); 3043 __ cache_wb(Address(src, 0)); 3044 __ leave(); 3045 __ ret(0); 3046 3047 return start; 3048 } 3049 3050 address generate_data_cache_writeback_sync() { 3051 const Register is_pre = c_rarg0; // pre or post sync 3052 3053 __ align(CodeEntryAlignment); 3054 3055 StubCodeMark mark(this, "StubRoutines", "_data_cache_writeback_sync"); 3056 3057 // pre wbsync is a no-op 3058 // post wbsync translates to an sfence 3059 3060 Label skip; 3061 address start = __ pc(); 3062 __ enter(); 3063 __ cmpl(is_pre, 0); 3064 __ jcc(Assembler::notEqual, skip); 3065 __ cache_wbsync(false); 3066 __ bind(skip); 3067 __ leave(); 3068 __ ret(0); 3069 3070 return start; 3071 } 3072 3073 void generate_arraycopy_stubs() { 3074 address entry; 3075 address entry_jbyte_arraycopy; 3076 address entry_jshort_arraycopy; 3077 address entry_jint_arraycopy; 3078 address entry_oop_arraycopy; 3079 address entry_jlong_arraycopy; 3080 address entry_checkcast_arraycopy; 3081 3082 StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, &entry, 3083 "jbyte_disjoint_arraycopy"); 3084 StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, entry, &entry_jbyte_arraycopy, 3085 "jbyte_arraycopy"); 3086 3087 StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, &entry, 3088 "jshort_disjoint_arraycopy"); 3089 StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, entry, &entry_jshort_arraycopy, 3090 "jshort_arraycopy"); 3091 3092 StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, false, &entry, 3093 "jint_disjoint_arraycopy"); 3094 StubRoutines::_jint_arraycopy = generate_conjoint_int_oop_copy(false, false, entry, 3095 &entry_jint_arraycopy, "jint_arraycopy"); 3096 3097 StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, false, &entry, 3098 "jlong_disjoint_arraycopy"); 3099 StubRoutines::_jlong_arraycopy = generate_conjoint_long_oop_copy(false, false, entry, 3100 &entry_jlong_arraycopy, "jlong_arraycopy"); 3101 3102 3103 if (UseCompressedOops) { 3104 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, true, &entry, 3105 "oop_disjoint_arraycopy"); 3106 StubRoutines::_oop_arraycopy = generate_conjoint_int_oop_copy(false, true, entry, 3107 &entry_oop_arraycopy, "oop_arraycopy"); 3108 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_int_oop_copy(false, true, &entry, 3109 "oop_disjoint_arraycopy_uninit", 3110 /*dest_uninitialized*/true); 3111 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_int_oop_copy(false, true, entry, 3112 NULL, "oop_arraycopy_uninit", 3113 /*dest_uninitialized*/true); 3114 } else { 3115 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, true, &entry, 3116 "oop_disjoint_arraycopy"); 3117 StubRoutines::_oop_arraycopy = generate_conjoint_long_oop_copy(false, true, entry, 3118 &entry_oop_arraycopy, "oop_arraycopy"); 3119 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_long_oop_copy(false, true, &entry, 3120 "oop_disjoint_arraycopy_uninit", 3121 /*dest_uninitialized*/true); 3122 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_long_oop_copy(false, true, entry, 3123 NULL, "oop_arraycopy_uninit", 3124 /*dest_uninitialized*/true); 3125 } 3126 3127 StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy); 3128 StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", NULL, 3129 /*dest_uninitialized*/true); 3130 3131 StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy", 3132 entry_jbyte_arraycopy, 3133 entry_jshort_arraycopy, 3134 entry_jint_arraycopy, 3135 entry_jlong_arraycopy); 3136 StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy", 3137 entry_jbyte_arraycopy, 3138 entry_jshort_arraycopy, 3139 entry_jint_arraycopy, 3140 entry_oop_arraycopy, 3141 entry_jlong_arraycopy, 3142 entry_checkcast_arraycopy); 3143 3144 StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill"); 3145 StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill"); 3146 StubRoutines::_jint_fill = generate_fill(T_INT, false, "jint_fill"); 3147 StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill"); 3148 StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill"); 3149 StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill"); 3150 3151 // We don't generate specialized code for HeapWord-aligned source 3152 // arrays, so just use the code we've already generated 3153 StubRoutines::_arrayof_jbyte_disjoint_arraycopy = StubRoutines::_jbyte_disjoint_arraycopy; 3154 StubRoutines::_arrayof_jbyte_arraycopy = StubRoutines::_jbyte_arraycopy; 3155 3156 StubRoutines::_arrayof_jshort_disjoint_arraycopy = StubRoutines::_jshort_disjoint_arraycopy; 3157 StubRoutines::_arrayof_jshort_arraycopy = StubRoutines::_jshort_arraycopy; 3158 3159 StubRoutines::_arrayof_jint_disjoint_arraycopy = StubRoutines::_jint_disjoint_arraycopy; 3160 StubRoutines::_arrayof_jint_arraycopy = StubRoutines::_jint_arraycopy; 3161 3162 StubRoutines::_arrayof_jlong_disjoint_arraycopy = StubRoutines::_jlong_disjoint_arraycopy; 3163 StubRoutines::_arrayof_jlong_arraycopy = StubRoutines::_jlong_arraycopy; 3164 3165 StubRoutines::_arrayof_oop_disjoint_arraycopy = StubRoutines::_oop_disjoint_arraycopy; 3166 StubRoutines::_arrayof_oop_arraycopy = StubRoutines::_oop_arraycopy; 3167 3168 StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = StubRoutines::_oop_disjoint_arraycopy_uninit; 3169 StubRoutines::_arrayof_oop_arraycopy_uninit = StubRoutines::_oop_arraycopy_uninit; 3170 } 3171 3172 // AES intrinsic stubs 3173 enum {AESBlockSize = 16}; 3174 3175 address generate_key_shuffle_mask() { 3176 __ align(16); 3177 StubCodeMark mark(this, "StubRoutines", "key_shuffle_mask"); 3178 address start = __ pc(); 3179 __ emit_data64( 0x0405060700010203, relocInfo::none ); 3180 __ emit_data64( 0x0c0d0e0f08090a0b, relocInfo::none ); 3181 return start; 3182 } 3183 3184 address generate_counter_shuffle_mask() { 3185 __ align(16); 3186 StubCodeMark mark(this, "StubRoutines", "counter_shuffle_mask"); 3187 address start = __ pc(); 3188 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none); 3189 __ emit_data64(0x0001020304050607, relocInfo::none); 3190 return start; 3191 } 3192 3193 // Utility routine for loading a 128-bit key word in little endian format 3194 // can optionally specify that the shuffle mask is already in an xmmregister 3195 void load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask=NULL) { 3196 __ movdqu(xmmdst, Address(key, offset)); 3197 if (xmm_shuf_mask != NULL) { 3198 __ pshufb(xmmdst, xmm_shuf_mask); 3199 } else { 3200 __ pshufb(xmmdst, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3201 } 3202 } 3203 3204 // Utility routine for increase 128bit counter (iv in CTR mode) 3205 void inc_counter(Register reg, XMMRegister xmmdst, int inc_delta, Label& next_block) { 3206 __ pextrq(reg, xmmdst, 0x0); 3207 __ addq(reg, inc_delta); 3208 __ pinsrq(xmmdst, reg, 0x0); 3209 __ jcc(Assembler::carryClear, next_block); // jump if no carry 3210 __ pextrq(reg, xmmdst, 0x01); // Carry 3211 __ addq(reg, 0x01); 3212 __ pinsrq(xmmdst, reg, 0x01); //Carry end 3213 __ BIND(next_block); // next instruction 3214 } 3215 3216 // Arguments: 3217 // 3218 // Inputs: 3219 // c_rarg0 - source byte array address 3220 // c_rarg1 - destination byte array address 3221 // c_rarg2 - K (key) in little endian int array 3222 // 3223 address generate_aescrypt_encryptBlock() { 3224 assert(UseAES, "need AES instructions and misaligned SSE support"); 3225 __ align(CodeEntryAlignment); 3226 StubCodeMark mark(this, "StubRoutines", "aescrypt_encryptBlock"); 3227 Label L_doLast; 3228 address start = __ pc(); 3229 3230 const Register from = c_rarg0; // source array address 3231 const Register to = c_rarg1; // destination array address 3232 const Register key = c_rarg2; // key array address 3233 const Register keylen = rax; 3234 3235 const XMMRegister xmm_result = xmm0; 3236 const XMMRegister xmm_key_shuf_mask = xmm1; 3237 // On win64 xmm6-xmm15 must be preserved so don't use them. 3238 const XMMRegister xmm_temp1 = xmm2; 3239 const XMMRegister xmm_temp2 = xmm3; 3240 const XMMRegister xmm_temp3 = xmm4; 3241 const XMMRegister xmm_temp4 = xmm5; 3242 3243 __ enter(); // required for proper stackwalking of RuntimeStub frame 3244 3245 // keylen could be only {11, 13, 15} * 4 = {44, 52, 60} 3246 __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3247 3248 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3249 __ movdqu(xmm_result, Address(from, 0)); // get 16 bytes of input 3250 3251 // For encryption, the java expanded key ordering is just what we need 3252 // we don't know if the key is aligned, hence not using load-execute form 3253 3254 load_key(xmm_temp1, key, 0x00, xmm_key_shuf_mask); 3255 __ pxor(xmm_result, xmm_temp1); 3256 3257 load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask); 3258 load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask); 3259 load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask); 3260 load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask); 3261 3262 __ aesenc(xmm_result, xmm_temp1); 3263 __ aesenc(xmm_result, xmm_temp2); 3264 __ aesenc(xmm_result, xmm_temp3); 3265 __ aesenc(xmm_result, xmm_temp4); 3266 3267 load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask); 3268 load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask); 3269 load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask); 3270 load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask); 3271 3272 __ aesenc(xmm_result, xmm_temp1); 3273 __ aesenc(xmm_result, xmm_temp2); 3274 __ aesenc(xmm_result, xmm_temp3); 3275 __ aesenc(xmm_result, xmm_temp4); 3276 3277 load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask); 3278 load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask); 3279 3280 __ cmpl(keylen, 44); 3281 __ jccb(Assembler::equal, L_doLast); 3282 3283 __ aesenc(xmm_result, xmm_temp1); 3284 __ aesenc(xmm_result, xmm_temp2); 3285 3286 load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask); 3287 load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask); 3288 3289 __ cmpl(keylen, 52); 3290 __ jccb(Assembler::equal, L_doLast); 3291 3292 __ aesenc(xmm_result, xmm_temp1); 3293 __ aesenc(xmm_result, xmm_temp2); 3294 3295 load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask); 3296 load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask); 3297 3298 __ BIND(L_doLast); 3299 __ aesenc(xmm_result, xmm_temp1); 3300 __ aesenclast(xmm_result, xmm_temp2); 3301 __ movdqu(Address(to, 0), xmm_result); // store the result 3302 __ xorptr(rax, rax); // return 0 3303 __ leave(); // required for proper stackwalking of RuntimeStub frame 3304 __ ret(0); 3305 3306 return start; 3307 } 3308 3309 3310 // Arguments: 3311 // 3312 // Inputs: 3313 // c_rarg0 - source byte array address 3314 // c_rarg1 - destination byte array address 3315 // c_rarg2 - K (key) in little endian int array 3316 // 3317 address generate_aescrypt_decryptBlock() { 3318 assert(UseAES, "need AES instructions and misaligned SSE support"); 3319 __ align(CodeEntryAlignment); 3320 StubCodeMark mark(this, "StubRoutines", "aescrypt_decryptBlock"); 3321 Label L_doLast; 3322 address start = __ pc(); 3323 3324 const Register from = c_rarg0; // source array address 3325 const Register to = c_rarg1; // destination array address 3326 const Register key = c_rarg2; // key array address 3327 const Register keylen = rax; 3328 3329 const XMMRegister xmm_result = xmm0; 3330 const XMMRegister xmm_key_shuf_mask = xmm1; 3331 // On win64 xmm6-xmm15 must be preserved so don't use them. 3332 const XMMRegister xmm_temp1 = xmm2; 3333 const XMMRegister xmm_temp2 = xmm3; 3334 const XMMRegister xmm_temp3 = xmm4; 3335 const XMMRegister xmm_temp4 = xmm5; 3336 3337 __ enter(); // required for proper stackwalking of RuntimeStub frame 3338 3339 // keylen could be only {11, 13, 15} * 4 = {44, 52, 60} 3340 __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3341 3342 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3343 __ movdqu(xmm_result, Address(from, 0)); 3344 3345 // for decryption java expanded key ordering is rotated one position from what we want 3346 // so we start from 0x10 here and hit 0x00 last 3347 // we don't know if the key is aligned, hence not using load-execute form 3348 load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask); 3349 load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask); 3350 load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask); 3351 load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask); 3352 3353 __ pxor (xmm_result, xmm_temp1); 3354 __ aesdec(xmm_result, xmm_temp2); 3355 __ aesdec(xmm_result, xmm_temp3); 3356 __ aesdec(xmm_result, xmm_temp4); 3357 3358 load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask); 3359 load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask); 3360 load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask); 3361 load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask); 3362 3363 __ aesdec(xmm_result, xmm_temp1); 3364 __ aesdec(xmm_result, xmm_temp2); 3365 __ aesdec(xmm_result, xmm_temp3); 3366 __ aesdec(xmm_result, xmm_temp4); 3367 3368 load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask); 3369 load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask); 3370 load_key(xmm_temp3, key, 0x00, xmm_key_shuf_mask); 3371 3372 __ cmpl(keylen, 44); 3373 __ jccb(Assembler::equal, L_doLast); 3374 3375 __ aesdec(xmm_result, xmm_temp1); 3376 __ aesdec(xmm_result, xmm_temp2); 3377 3378 load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask); 3379 load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask); 3380 3381 __ cmpl(keylen, 52); 3382 __ jccb(Assembler::equal, L_doLast); 3383 3384 __ aesdec(xmm_result, xmm_temp1); 3385 __ aesdec(xmm_result, xmm_temp2); 3386 3387 load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask); 3388 load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask); 3389 3390 __ BIND(L_doLast); 3391 __ aesdec(xmm_result, xmm_temp1); 3392 __ aesdec(xmm_result, xmm_temp2); 3393 3394 // for decryption the aesdeclast operation is always on key+0x00 3395 __ aesdeclast(xmm_result, xmm_temp3); 3396 __ movdqu(Address(to, 0), xmm_result); // store the result 3397 __ xorptr(rax, rax); // return 0 3398 __ leave(); // required for proper stackwalking of RuntimeStub frame 3399 __ ret(0); 3400 3401 return start; 3402 } 3403 3404 3405 // Arguments: 3406 // 3407 // Inputs: 3408 // c_rarg0 - source byte array address 3409 // c_rarg1 - destination byte array address 3410 // c_rarg2 - K (key) in little endian int array 3411 // c_rarg3 - r vector byte array address 3412 // c_rarg4 - input length 3413 // 3414 // Output: 3415 // rax - input length 3416 // 3417 address generate_cipherBlockChaining_encryptAESCrypt() { 3418 assert(UseAES, "need AES instructions and misaligned SSE support"); 3419 __ align(CodeEntryAlignment); 3420 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_encryptAESCrypt"); 3421 address start = __ pc(); 3422 3423 Label L_exit, L_key_192_256, L_key_256, L_loopTop_128, L_loopTop_192, L_loopTop_256; 3424 const Register from = c_rarg0; // source array address 3425 const Register to = c_rarg1; // destination array address 3426 const Register key = c_rarg2; // key array address 3427 const Register rvec = c_rarg3; // r byte array initialized from initvector array address 3428 // and left with the results of the last encryption block 3429 #ifndef _WIN64 3430 const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16) 3431 #else 3432 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 3433 const Register len_reg = r11; // pick the volatile windows register 3434 #endif 3435 const Register pos = rax; 3436 3437 // xmm register assignments for the loops below 3438 const XMMRegister xmm_result = xmm0; 3439 const XMMRegister xmm_temp = xmm1; 3440 // keys 0-10 preloaded into xmm2-xmm12 3441 const int XMM_REG_NUM_KEY_FIRST = 2; 3442 const int XMM_REG_NUM_KEY_LAST = 15; 3443 const XMMRegister xmm_key0 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST); 3444 const XMMRegister xmm_key10 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+10); 3445 const XMMRegister xmm_key11 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+11); 3446 const XMMRegister xmm_key12 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+12); 3447 const XMMRegister xmm_key13 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+13); 3448 3449 __ enter(); // required for proper stackwalking of RuntimeStub frame 3450 3451 #ifdef _WIN64 3452 // on win64, fill len_reg from stack position 3453 __ movl(len_reg, len_mem); 3454 #else 3455 __ push(len_reg); // Save 3456 #endif 3457 3458 const XMMRegister xmm_key_shuf_mask = xmm_temp; // used temporarily to swap key bytes up front 3459 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3460 // load up xmm regs xmm2 thru xmm12 with key 0x00 - 0xa0 3461 for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x00; rnum <= XMM_REG_NUM_KEY_FIRST+10; rnum++) { 3462 load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask); 3463 offset += 0x10; 3464 } 3465 __ movdqu(xmm_result, Address(rvec, 0x00)); // initialize xmm_result with r vec 3466 3467 // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256)) 3468 __ movl(rax, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3469 __ cmpl(rax, 44); 3470 __ jcc(Assembler::notEqual, L_key_192_256); 3471 3472 // 128 bit code follows here 3473 __ movptr(pos, 0); 3474 __ align(OptoLoopAlignment); 3475 3476 __ BIND(L_loopTop_128); 3477 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3478 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3479 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3480 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 9; rnum++) { 3481 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3482 } 3483 __ aesenclast(xmm_result, xmm_key10); 3484 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3485 // no need to store r to memory until we exit 3486 __ addptr(pos, AESBlockSize); 3487 __ subptr(len_reg, AESBlockSize); 3488 __ jcc(Assembler::notEqual, L_loopTop_128); 3489 3490 __ BIND(L_exit); 3491 __ movdqu(Address(rvec, 0), xmm_result); // final value of r stored in rvec of CipherBlockChaining object 3492 3493 #ifdef _WIN64 3494 __ movl(rax, len_mem); 3495 #else 3496 __ pop(rax); // return length 3497 #endif 3498 __ leave(); // required for proper stackwalking of RuntimeStub frame 3499 __ ret(0); 3500 3501 __ BIND(L_key_192_256); 3502 // here rax = len in ints of AESCrypt.KLE array (52=192, or 60=256) 3503 load_key(xmm_key11, key, 0xb0, xmm_key_shuf_mask); 3504 load_key(xmm_key12, key, 0xc0, xmm_key_shuf_mask); 3505 __ cmpl(rax, 52); 3506 __ jcc(Assembler::notEqual, L_key_256); 3507 3508 // 192-bit code follows here (could be changed to use more xmm registers) 3509 __ movptr(pos, 0); 3510 __ align(OptoLoopAlignment); 3511 3512 __ BIND(L_loopTop_192); 3513 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3514 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3515 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3516 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 11; rnum++) { 3517 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3518 } 3519 __ aesenclast(xmm_result, xmm_key12); 3520 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3521 // no need to store r to memory until we exit 3522 __ addptr(pos, AESBlockSize); 3523 __ subptr(len_reg, AESBlockSize); 3524 __ jcc(Assembler::notEqual, L_loopTop_192); 3525 __ jmp(L_exit); 3526 3527 __ BIND(L_key_256); 3528 // 256-bit code follows here (could be changed to use more xmm registers) 3529 load_key(xmm_key13, key, 0xd0, xmm_key_shuf_mask); 3530 __ movptr(pos, 0); 3531 __ align(OptoLoopAlignment); 3532 3533 __ BIND(L_loopTop_256); 3534 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3535 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3536 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3537 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 13; rnum++) { 3538 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3539 } 3540 load_key(xmm_temp, key, 0xe0); 3541 __ aesenclast(xmm_result, xmm_temp); 3542 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3543 // no need to store r to memory until we exit 3544 __ addptr(pos, AESBlockSize); 3545 __ subptr(len_reg, AESBlockSize); 3546 __ jcc(Assembler::notEqual, L_loopTop_256); 3547 __ jmp(L_exit); 3548 3549 return start; 3550 } 3551 3552 // Safefetch stubs. 3553 void generate_safefetch(const char* name, int size, address* entry, 3554 address* fault_pc, address* continuation_pc) { 3555 // safefetch signatures: 3556 // int SafeFetch32(int* adr, int errValue); 3557 // intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue); 3558 // 3559 // arguments: 3560 // c_rarg0 = adr 3561 // c_rarg1 = errValue 3562 // 3563 // result: 3564 // PPC_RET = *adr or errValue 3565 3566 StubCodeMark mark(this, "StubRoutines", name); 3567 3568 // Entry point, pc or function descriptor. 3569 *entry = __ pc(); 3570 3571 // Load *adr into c_rarg1, may fault. 3572 *fault_pc = __ pc(); 3573 switch (size) { 3574 case 4: 3575 // int32_t 3576 __ movl(c_rarg1, Address(c_rarg0, 0)); 3577 break; 3578 case 8: 3579 // int64_t 3580 __ movq(c_rarg1, Address(c_rarg0, 0)); 3581 break; 3582 default: 3583 ShouldNotReachHere(); 3584 } 3585 3586 // return errValue or *adr 3587 *continuation_pc = __ pc(); 3588 __ movq(rax, c_rarg1); 3589 __ ret(0); 3590 } 3591 3592 // This is a version of CBC/AES Decrypt which does 4 blocks in a loop at a time 3593 // to hide instruction latency 3594 // 3595 // Arguments: 3596 // 3597 // Inputs: 3598 // c_rarg0 - source byte array address 3599 // c_rarg1 - destination byte array address 3600 // c_rarg2 - K (key) in little endian int array 3601 // c_rarg3 - r vector byte array address 3602 // c_rarg4 - input length 3603 // 3604 // Output: 3605 // rax - input length 3606 // 3607 address generate_cipherBlockChaining_decryptAESCrypt_Parallel() { 3608 assert(UseAES, "need AES instructions and misaligned SSE support"); 3609 __ align(CodeEntryAlignment); 3610 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt"); 3611 address start = __ pc(); 3612 3613 const Register from = c_rarg0; // source array address 3614 const Register to = c_rarg1; // destination array address 3615 const Register key = c_rarg2; // key array address 3616 const Register rvec = c_rarg3; // r byte array initialized from initvector array address 3617 // and left with the results of the last encryption block 3618 #ifndef _WIN64 3619 const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16) 3620 #else 3621 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 3622 const Register len_reg = r11; // pick the volatile windows register 3623 #endif 3624 const Register pos = rax; 3625 3626 const int PARALLEL_FACTOR = 4; 3627 const int ROUNDS[3] = { 10, 12, 14 }; // aes rounds for key128, key192, key256 3628 3629 Label L_exit; 3630 Label L_singleBlock_loopTopHead[3]; // 128, 192, 256 3631 Label L_singleBlock_loopTopHead2[3]; // 128, 192, 256 3632 Label L_singleBlock_loopTop[3]; // 128, 192, 256 3633 Label L_multiBlock_loopTopHead[3]; // 128, 192, 256 3634 Label L_multiBlock_loopTop[3]; // 128, 192, 256 3635 3636 // keys 0-10 preloaded into xmm5-xmm15 3637 const int XMM_REG_NUM_KEY_FIRST = 5; 3638 const int XMM_REG_NUM_KEY_LAST = 15; 3639 const XMMRegister xmm_key_first = as_XMMRegister(XMM_REG_NUM_KEY_FIRST); 3640 const XMMRegister xmm_key_last = as_XMMRegister(XMM_REG_NUM_KEY_LAST); 3641 3642 __ enter(); // required for proper stackwalking of RuntimeStub frame 3643 3644 #ifdef _WIN64 3645 // on win64, fill len_reg from stack position 3646 __ movl(len_reg, len_mem); 3647 #else 3648 __ push(len_reg); // Save 3649 #endif 3650 __ push(rbx); 3651 // the java expanded key ordering is rotated one position from what we want 3652 // so we start from 0x10 here and hit 0x00 last 3653 const XMMRegister xmm_key_shuf_mask = xmm1; // used temporarily to swap key bytes up front 3654 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3655 // load up xmm regs 5 thru 15 with key 0x10 - 0xa0 - 0x00 3656 for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x10; rnum < XMM_REG_NUM_KEY_LAST; rnum++) { 3657 load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask); 3658 offset += 0x10; 3659 } 3660 load_key(xmm_key_last, key, 0x00, xmm_key_shuf_mask); 3661 3662 const XMMRegister xmm_prev_block_cipher = xmm1; // holds cipher of previous block 3663 3664 // registers holding the four results in the parallelized loop 3665 const XMMRegister xmm_result0 = xmm0; 3666 const XMMRegister xmm_result1 = xmm2; 3667 const XMMRegister xmm_result2 = xmm3; 3668 const XMMRegister xmm_result3 = xmm4; 3669 3670 __ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // initialize with initial rvec 3671 3672 __ xorptr(pos, pos); 3673 3674 // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256)) 3675 __ movl(rbx, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3676 __ cmpl(rbx, 52); 3677 __ jcc(Assembler::equal, L_multiBlock_loopTopHead[1]); 3678 __ cmpl(rbx, 60); 3679 __ jcc(Assembler::equal, L_multiBlock_loopTopHead[2]); 3680 3681 #define DoFour(opc, src_reg) \ 3682 __ opc(xmm_result0, src_reg); \ 3683 __ opc(xmm_result1, src_reg); \ 3684 __ opc(xmm_result2, src_reg); \ 3685 __ opc(xmm_result3, src_reg); \ 3686 3687 for (int k = 0; k < 3; ++k) { 3688 __ BIND(L_multiBlock_loopTopHead[k]); 3689 if (k != 0) { 3690 __ cmpptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // see if at least 4 blocks left 3691 __ jcc(Assembler::less, L_singleBlock_loopTopHead2[k]); 3692 } 3693 if (k == 1) { 3694 __ subptr(rsp, 6 * wordSize); 3695 __ movdqu(Address(rsp, 0), xmm15); //save last_key from xmm15 3696 load_key(xmm15, key, 0xb0); // 0xb0; 192-bit key goes up to 0xc0 3697 __ movdqu(Address(rsp, 2 * wordSize), xmm15); 3698 load_key(xmm1, key, 0xc0); // 0xc0; 3699 __ movdqu(Address(rsp, 4 * wordSize), xmm1); 3700 } else if (k == 2) { 3701 __ subptr(rsp, 10 * wordSize); 3702 __ movdqu(Address(rsp, 0), xmm15); //save last_key from xmm15 3703 load_key(xmm15, key, 0xd0); // 0xd0; 256-bit key goes upto 0xe0 3704 __ movdqu(Address(rsp, 6 * wordSize), xmm15); 3705 load_key(xmm1, key, 0xe0); // 0xe0; 3706 __ movdqu(Address(rsp, 8 * wordSize), xmm1); 3707 load_key(xmm15, key, 0xb0); // 0xb0; 3708 __ movdqu(Address(rsp, 2 * wordSize), xmm15); 3709 load_key(xmm1, key, 0xc0); // 0xc0; 3710 __ movdqu(Address(rsp, 4 * wordSize), xmm1); 3711 } 3712 __ align(OptoLoopAlignment); 3713 __ BIND(L_multiBlock_loopTop[k]); 3714 __ cmpptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // see if at least 4 blocks left 3715 __ jcc(Assembler::less, L_singleBlock_loopTopHead[k]); 3716 3717 if (k != 0) { 3718 __ movdqu(xmm15, Address(rsp, 2 * wordSize)); 3719 __ movdqu(xmm1, Address(rsp, 4 * wordSize)); 3720 } 3721 3722 __ movdqu(xmm_result0, Address(from, pos, Address::times_1, 0 * AESBlockSize)); // get next 4 blocks into xmmresult registers 3723 __ movdqu(xmm_result1, Address(from, pos, Address::times_1, 1 * AESBlockSize)); 3724 __ movdqu(xmm_result2, Address(from, pos, Address::times_1, 2 * AESBlockSize)); 3725 __ movdqu(xmm_result3, Address(from, pos, Address::times_1, 3 * AESBlockSize)); 3726 3727 DoFour(pxor, xmm_key_first); 3728 if (k == 0) { 3729 for (int rnum = 1; rnum < ROUNDS[k]; rnum++) { 3730 DoFour(aesdec, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3731 } 3732 DoFour(aesdeclast, xmm_key_last); 3733 } else if (k == 1) { 3734 for (int rnum = 1; rnum <= ROUNDS[k]-2; rnum++) { 3735 DoFour(aesdec, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3736 } 3737 __ movdqu(xmm_key_last, Address(rsp, 0)); // xmm15 needs to be loaded again. 3738 DoFour(aesdec, xmm1); // key : 0xc0 3739 __ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // xmm1 needs to be loaded again 3740 DoFour(aesdeclast, xmm_key_last); 3741 } else if (k == 2) { 3742 for (int rnum = 1; rnum <= ROUNDS[k] - 4; rnum++) { 3743 DoFour(aesdec, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3744 } 3745 DoFour(aesdec, xmm1); // key : 0xc0 3746 __ movdqu(xmm15, Address(rsp, 6 * wordSize)); 3747 __ movdqu(xmm1, Address(rsp, 8 * wordSize)); 3748 DoFour(aesdec, xmm15); // key : 0xd0 3749 __ movdqu(xmm_key_last, Address(rsp, 0)); // xmm15 needs to be loaded again. 3750 DoFour(aesdec, xmm1); // key : 0xe0 3751 __ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // xmm1 needs to be loaded again 3752 DoFour(aesdeclast, xmm_key_last); 3753 } 3754 3755 // for each result, xor with the r vector of previous cipher block 3756 __ pxor(xmm_result0, xmm_prev_block_cipher); 3757 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 0 * AESBlockSize)); 3758 __ pxor(xmm_result1, xmm_prev_block_cipher); 3759 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 1 * AESBlockSize)); 3760 __ pxor(xmm_result2, xmm_prev_block_cipher); 3761 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 2 * AESBlockSize)); 3762 __ pxor(xmm_result3, xmm_prev_block_cipher); 3763 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 3 * AESBlockSize)); // this will carry over to next set of blocks 3764 if (k != 0) { 3765 __ movdqu(Address(rvec, 0x00), xmm_prev_block_cipher); 3766 } 3767 3768 __ movdqu(Address(to, pos, Address::times_1, 0 * AESBlockSize), xmm_result0); // store 4 results into the next 64 bytes of output 3769 __ movdqu(Address(to, pos, Address::times_1, 1 * AESBlockSize), xmm_result1); 3770 __ movdqu(Address(to, pos, Address::times_1, 2 * AESBlockSize), xmm_result2); 3771 __ movdqu(Address(to, pos, Address::times_1, 3 * AESBlockSize), xmm_result3); 3772 3773 __ addptr(pos, PARALLEL_FACTOR * AESBlockSize); 3774 __ subptr(len_reg, PARALLEL_FACTOR * AESBlockSize); 3775 __ jmp(L_multiBlock_loopTop[k]); 3776 3777 // registers used in the non-parallelized loops 3778 // xmm register assignments for the loops below 3779 const XMMRegister xmm_result = xmm0; 3780 const XMMRegister xmm_prev_block_cipher_save = xmm2; 3781 const XMMRegister xmm_key11 = xmm3; 3782 const XMMRegister xmm_key12 = xmm4; 3783 const XMMRegister key_tmp = xmm4; 3784 3785 __ BIND(L_singleBlock_loopTopHead[k]); 3786 if (k == 1) { 3787 __ addptr(rsp, 6 * wordSize); 3788 } else if (k == 2) { 3789 __ addptr(rsp, 10 * wordSize); 3790 } 3791 __ cmpptr(len_reg, 0); // any blocks left?? 3792 __ jcc(Assembler::equal, L_exit); 3793 __ BIND(L_singleBlock_loopTopHead2[k]); 3794 if (k == 1) { 3795 load_key(xmm_key11, key, 0xb0); // 0xb0; 192-bit key goes upto 0xc0 3796 load_key(xmm_key12, key, 0xc0); // 0xc0; 192-bit key goes upto 0xc0 3797 } 3798 if (k == 2) { 3799 load_key(xmm_key11, key, 0xb0); // 0xb0; 256-bit key goes upto 0xe0 3800 } 3801 __ align(OptoLoopAlignment); 3802 __ BIND(L_singleBlock_loopTop[k]); 3803 __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of cipher input 3804 __ movdqa(xmm_prev_block_cipher_save, xmm_result); // save for next r vector 3805 __ pxor(xmm_result, xmm_key_first); // do the aes dec rounds 3806 for (int rnum = 1; rnum <= 9 ; rnum++) { 3807 __ aesdec(xmm_result, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3808 } 3809 if (k == 1) { 3810 __ aesdec(xmm_result, xmm_key11); 3811 __ aesdec(xmm_result, xmm_key12); 3812 } 3813 if (k == 2) { 3814 __ aesdec(xmm_result, xmm_key11); 3815 load_key(key_tmp, key, 0xc0); 3816 __ aesdec(xmm_result, key_tmp); 3817 load_key(key_tmp, key, 0xd0); 3818 __ aesdec(xmm_result, key_tmp); 3819 load_key(key_tmp, key, 0xe0); 3820 __ aesdec(xmm_result, key_tmp); 3821 } 3822 3823 __ aesdeclast(xmm_result, xmm_key_last); // xmm15 always came from key+0 3824 __ pxor(xmm_result, xmm_prev_block_cipher); // xor with the current r vector 3825 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3826 // no need to store r to memory until we exit 3827 __ movdqa(xmm_prev_block_cipher, xmm_prev_block_cipher_save); // set up next r vector with cipher input from this block 3828 __ addptr(pos, AESBlockSize); 3829 __ subptr(len_reg, AESBlockSize); 3830 __ jcc(Assembler::notEqual, L_singleBlock_loopTop[k]); 3831 if (k != 2) { 3832 __ jmp(L_exit); 3833 } 3834 } //for 128/192/256 3835 3836 __ BIND(L_exit); 3837 __ movdqu(Address(rvec, 0), xmm_prev_block_cipher); // final value of r stored in rvec of CipherBlockChaining object 3838 __ pop(rbx); 3839 #ifdef _WIN64 3840 __ movl(rax, len_mem); 3841 #else 3842 __ pop(rax); // return length 3843 #endif 3844 __ leave(); // required for proper stackwalking of RuntimeStub frame 3845 __ ret(0); 3846 return start; 3847 } 3848 3849 address generate_electronicCodeBook_encryptAESCrypt() { 3850 __ align(CodeEntryAlignment); 3851 StubCodeMark mark(this, "StubRoutines", "electronicCodeBook_encryptAESCrypt"); 3852 address start = __ pc(); 3853 const Register from = c_rarg0; // source array address 3854 const Register to = c_rarg1; // destination array address 3855 const Register key = c_rarg2; // key array address 3856 const Register len = c_rarg3; // src len (must be multiple of blocksize 16) 3857 __ enter(); // required for proper stackwalking of RuntimeStub frame 3858 __ aesecb_encrypt(from, to, key, len); 3859 __ leave(); // required for proper stackwalking of RuntimeStub frame 3860 __ ret(0); 3861 return start; 3862 } 3863 3864 address generate_electronicCodeBook_decryptAESCrypt() { 3865 __ align(CodeEntryAlignment); 3866 StubCodeMark mark(this, "StubRoutines", "electronicCodeBook_decryptAESCrypt"); 3867 address start = __ pc(); 3868 const Register from = c_rarg0; // source array address 3869 const Register to = c_rarg1; // destination array address 3870 const Register key = c_rarg2; // key array address 3871 const Register len = c_rarg3; // src len (must be multiple of blocksize 16) 3872 __ enter(); // required for proper stackwalking of RuntimeStub frame 3873 __ aesecb_decrypt(from, to, key, len); 3874 __ leave(); // required for proper stackwalking of RuntimeStub frame 3875 __ ret(0); 3876 return start; 3877 } 3878 3879 address generate_upper_word_mask() { 3880 __ align(64); 3881 StubCodeMark mark(this, "StubRoutines", "upper_word_mask"); 3882 address start = __ pc(); 3883 __ emit_data64(0x0000000000000000, relocInfo::none); 3884 __ emit_data64(0xFFFFFFFF00000000, relocInfo::none); 3885 return start; 3886 } 3887 3888 address generate_shuffle_byte_flip_mask() { 3889 __ align(64); 3890 StubCodeMark mark(this, "StubRoutines", "shuffle_byte_flip_mask"); 3891 address start = __ pc(); 3892 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none); 3893 __ emit_data64(0x0001020304050607, relocInfo::none); 3894 return start; 3895 } 3896 3897 // ofs and limit are use for multi-block byte array. 3898 // int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit) 3899 address generate_sha1_implCompress(bool multi_block, const char *name) { 3900 __ align(CodeEntryAlignment); 3901 StubCodeMark mark(this, "StubRoutines", name); 3902 address start = __ pc(); 3903 3904 Register buf = c_rarg0; 3905 Register state = c_rarg1; 3906 Register ofs = c_rarg2; 3907 Register limit = c_rarg3; 3908 3909 const XMMRegister abcd = xmm0; 3910 const XMMRegister e0 = xmm1; 3911 const XMMRegister e1 = xmm2; 3912 const XMMRegister msg0 = xmm3; 3913 3914 const XMMRegister msg1 = xmm4; 3915 const XMMRegister msg2 = xmm5; 3916 const XMMRegister msg3 = xmm6; 3917 const XMMRegister shuf_mask = xmm7; 3918 3919 __ enter(); 3920 3921 __ subptr(rsp, 4 * wordSize); 3922 3923 __ fast_sha1(abcd, e0, e1, msg0, msg1, msg2, msg3, shuf_mask, 3924 buf, state, ofs, limit, rsp, multi_block); 3925 3926 __ addptr(rsp, 4 * wordSize); 3927 3928 __ leave(); 3929 __ ret(0); 3930 return start; 3931 } 3932 3933 address generate_pshuffle_byte_flip_mask() { 3934 __ align(64); 3935 StubCodeMark mark(this, "StubRoutines", "pshuffle_byte_flip_mask"); 3936 address start = __ pc(); 3937 __ emit_data64(0x0405060700010203, relocInfo::none); 3938 __ emit_data64(0x0c0d0e0f08090a0b, relocInfo::none); 3939 3940 if (VM_Version::supports_avx2()) { 3941 __ emit_data64(0x0405060700010203, relocInfo::none); // second copy 3942 __ emit_data64(0x0c0d0e0f08090a0b, relocInfo::none); 3943 // _SHUF_00BA 3944 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3945 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3946 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3947 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3948 // _SHUF_DC00 3949 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3950 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3951 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3952 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3953 } 3954 3955 return start; 3956 } 3957 3958 //Mask for byte-swapping a couple of qwords in an XMM register using (v)pshufb. 3959 address generate_pshuffle_byte_flip_mask_sha512() { 3960 __ align(32); 3961 StubCodeMark mark(this, "StubRoutines", "pshuffle_byte_flip_mask_sha512"); 3962 address start = __ pc(); 3963 if (VM_Version::supports_avx2()) { 3964 __ emit_data64(0x0001020304050607, relocInfo::none); // PSHUFFLE_BYTE_FLIP_MASK 3965 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none); 3966 __ emit_data64(0x1011121314151617, relocInfo::none); 3967 __ emit_data64(0x18191a1b1c1d1e1f, relocInfo::none); 3968 __ emit_data64(0x0000000000000000, relocInfo::none); //MASK_YMM_LO 3969 __ emit_data64(0x0000000000000000, relocInfo::none); 3970 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3971 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3972 } 3973 3974 return start; 3975 } 3976 3977 // ofs and limit are use for multi-block byte array. 3978 // int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit) 3979 address generate_sha256_implCompress(bool multi_block, const char *name) { 3980 assert(VM_Version::supports_sha() || VM_Version::supports_avx2(), ""); 3981 __ align(CodeEntryAlignment); 3982 StubCodeMark mark(this, "StubRoutines", name); 3983 address start = __ pc(); 3984 3985 Register buf = c_rarg0; 3986 Register state = c_rarg1; 3987 Register ofs = c_rarg2; 3988 Register limit = c_rarg3; 3989 3990 const XMMRegister msg = xmm0; 3991 const XMMRegister state0 = xmm1; 3992 const XMMRegister state1 = xmm2; 3993 const XMMRegister msgtmp0 = xmm3; 3994 3995 const XMMRegister msgtmp1 = xmm4; 3996 const XMMRegister msgtmp2 = xmm5; 3997 const XMMRegister msgtmp3 = xmm6; 3998 const XMMRegister msgtmp4 = xmm7; 3999 4000 const XMMRegister shuf_mask = xmm8; 4001 4002 __ enter(); 4003 4004 __ subptr(rsp, 4 * wordSize); 4005 4006 if (VM_Version::supports_sha()) { 4007 __ fast_sha256(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4, 4008 buf, state, ofs, limit, rsp, multi_block, shuf_mask); 4009 } else if (VM_Version::supports_avx2()) { 4010 __ sha256_AVX2(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4, 4011 buf, state, ofs, limit, rsp, multi_block, shuf_mask); 4012 } 4013 __ addptr(rsp, 4 * wordSize); 4014 __ vzeroupper(); 4015 __ leave(); 4016 __ ret(0); 4017 return start; 4018 } 4019 4020 address generate_sha512_implCompress(bool multi_block, const char *name) { 4021 assert(VM_Version::supports_avx2(), ""); 4022 assert(VM_Version::supports_bmi2(), ""); 4023 __ align(CodeEntryAlignment); 4024 StubCodeMark mark(this, "StubRoutines", name); 4025 address start = __ pc(); 4026 4027 Register buf = c_rarg0; 4028 Register state = c_rarg1; 4029 Register ofs = c_rarg2; 4030 Register limit = c_rarg3; 4031 4032 const XMMRegister msg = xmm0; 4033 const XMMRegister state0 = xmm1; 4034 const XMMRegister state1 = xmm2; 4035 const XMMRegister msgtmp0 = xmm3; 4036 const XMMRegister msgtmp1 = xmm4; 4037 const XMMRegister msgtmp2 = xmm5; 4038 const XMMRegister msgtmp3 = xmm6; 4039 const XMMRegister msgtmp4 = xmm7; 4040 4041 const XMMRegister shuf_mask = xmm8; 4042 4043 __ enter(); 4044 4045 __ sha512_AVX2(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4, 4046 buf, state, ofs, limit, rsp, multi_block, shuf_mask); 4047 4048 __ vzeroupper(); 4049 __ leave(); 4050 __ ret(0); 4051 return start; 4052 } 4053 4054 // This mask is used for incrementing counter value(linc0, linc4, etc.) 4055 address counter_mask_addr() { 4056 __ align(64); 4057 StubCodeMark mark(this, "StubRoutines", "counter_mask_addr"); 4058 address start = __ pc(); 4059 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none);//lbswapmask 4060 __ emit_data64(0x0001020304050607, relocInfo::none); 4061 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none); 4062 __ emit_data64(0x0001020304050607, relocInfo::none); 4063 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none); 4064 __ emit_data64(0x0001020304050607, relocInfo::none); 4065 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none); 4066 __ emit_data64(0x0001020304050607, relocInfo::none); 4067 __ emit_data64(0x0000000000000000, relocInfo::none);//linc0 = counter_mask_addr+64 4068 __ emit_data64(0x0000000000000000, relocInfo::none); 4069 __ emit_data64(0x0000000000000001, relocInfo::none);//counter_mask_addr() + 80 4070 __ emit_data64(0x0000000000000000, relocInfo::none); 4071 __ emit_data64(0x0000000000000002, relocInfo::none); 4072 __ emit_data64(0x0000000000000000, relocInfo::none); 4073 __ emit_data64(0x0000000000000003, relocInfo::none); 4074 __ emit_data64(0x0000000000000000, relocInfo::none); 4075 __ emit_data64(0x0000000000000004, relocInfo::none);//linc4 = counter_mask_addr() + 128 4076 __ emit_data64(0x0000000000000000, relocInfo::none); 4077 __ emit_data64(0x0000000000000004, relocInfo::none); 4078 __ emit_data64(0x0000000000000000, relocInfo::none); 4079 __ emit_data64(0x0000000000000004, relocInfo::none); 4080 __ emit_data64(0x0000000000000000, relocInfo::none); 4081 __ emit_data64(0x0000000000000004, relocInfo::none); 4082 __ emit_data64(0x0000000000000000, relocInfo::none); 4083 __ emit_data64(0x0000000000000008, relocInfo::none);//linc8 = counter_mask_addr() + 192 4084 __ emit_data64(0x0000000000000000, relocInfo::none); 4085 __ emit_data64(0x0000000000000008, relocInfo::none); 4086 __ emit_data64(0x0000000000000000, relocInfo::none); 4087 __ emit_data64(0x0000000000000008, relocInfo::none); 4088 __ emit_data64(0x0000000000000000, relocInfo::none); 4089 __ emit_data64(0x0000000000000008, relocInfo::none); 4090 __ emit_data64(0x0000000000000000, relocInfo::none); 4091 __ emit_data64(0x0000000000000020, relocInfo::none);//linc32 = counter_mask_addr() + 256 4092 __ emit_data64(0x0000000000000000, relocInfo::none); 4093 __ emit_data64(0x0000000000000020, relocInfo::none); 4094 __ emit_data64(0x0000000000000000, relocInfo::none); 4095 __ emit_data64(0x0000000000000020, relocInfo::none); 4096 __ emit_data64(0x0000000000000000, relocInfo::none); 4097 __ emit_data64(0x0000000000000020, relocInfo::none); 4098 __ emit_data64(0x0000000000000000, relocInfo::none); 4099 __ emit_data64(0x0000000000000010, relocInfo::none);//linc16 = counter_mask_addr() + 320 4100 __ emit_data64(0x0000000000000000, relocInfo::none); 4101 __ emit_data64(0x0000000000000010, relocInfo::none); 4102 __ emit_data64(0x0000000000000000, relocInfo::none); 4103 __ emit_data64(0x0000000000000010, relocInfo::none); 4104 __ emit_data64(0x0000000000000000, relocInfo::none); 4105 __ emit_data64(0x0000000000000010, relocInfo::none); 4106 __ emit_data64(0x0000000000000000, relocInfo::none); 4107 return start; 4108 } 4109 4110 // Vector AES Counter implementation 4111 address generate_counterMode_VectorAESCrypt() { 4112 __ align(CodeEntryAlignment); 4113 StubCodeMark mark(this, "StubRoutines", "counterMode_AESCrypt"); 4114 address start = __ pc(); 4115 const Register from = c_rarg0; // source array address 4116 const Register to = c_rarg1; // destination array address 4117 const Register key = c_rarg2; // key array address r8 4118 const Register counter = c_rarg3; // counter byte array initialized from counter array address 4119 // and updated with the incremented counter in the end 4120 #ifndef _WIN64 4121 const Register len_reg = c_rarg4; 4122 const Register saved_encCounter_start = c_rarg5; 4123 const Register used_addr = r10; 4124 const Address used_mem(rbp, 2 * wordSize); 4125 const Register used = r11; 4126 #else 4127 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 4128 const Address saved_encCounter_mem(rbp, 7 * wordSize); // saved encrypted counter is on stack on Win64 4129 const Address used_mem(rbp, 8 * wordSize); // used length is on stack on Win64 4130 const Register len_reg = r10; // pick the first volatile windows register 4131 const Register saved_encCounter_start = r11; 4132 const Register used_addr = r13; 4133 const Register used = r14; 4134 #endif 4135 __ enter(); 4136 // Save state before entering routine 4137 __ push(r12); 4138 __ push(r13); 4139 __ push(r14); 4140 __ push(r15); 4141 #ifdef _WIN64 4142 // on win64, fill len_reg from stack position 4143 __ movl(len_reg, len_mem); 4144 __ movptr(saved_encCounter_start, saved_encCounter_mem); 4145 __ movptr(used_addr, used_mem); 4146 __ movl(used, Address(used_addr, 0)); 4147 #else 4148 __ push(len_reg); // Save 4149 __ movptr(used_addr, used_mem); 4150 __ movl(used, Address(used_addr, 0)); 4151 #endif 4152 __ push(rbx); 4153 __ aesctr_encrypt(from, to, key, counter, len_reg, used, used_addr, saved_encCounter_start); 4154 // Restore state before leaving routine 4155 __ pop(rbx); 4156 #ifdef _WIN64 4157 __ movl(rax, len_mem); // return length 4158 #else 4159 __ pop(rax); // return length 4160 #endif 4161 __ pop(r15); 4162 __ pop(r14); 4163 __ pop(r13); 4164 __ pop(r12); 4165 4166 __ leave(); // required for proper stackwalking of RuntimeStub frame 4167 __ ret(0); 4168 return start; 4169 } 4170 4171 // This is a version of CTR/AES crypt which does 6 blocks in a loop at a time 4172 // to hide instruction latency 4173 // 4174 // Arguments: 4175 // 4176 // Inputs: 4177 // c_rarg0 - source byte array address 4178 // c_rarg1 - destination byte array address 4179 // c_rarg2 - K (key) in little endian int array 4180 // c_rarg3 - counter vector byte array address 4181 // Linux 4182 // c_rarg4 - input length 4183 // c_rarg5 - saved encryptedCounter start 4184 // rbp + 6 * wordSize - saved used length 4185 // Windows 4186 // rbp + 6 * wordSize - input length 4187 // rbp + 7 * wordSize - saved encryptedCounter start 4188 // rbp + 8 * wordSize - saved used length 4189 // 4190 // Output: 4191 // rax - input length 4192 // 4193 address generate_counterMode_AESCrypt_Parallel() { 4194 assert(UseAES, "need AES instructions and misaligned SSE support"); 4195 __ align(CodeEntryAlignment); 4196 StubCodeMark mark(this, "StubRoutines", "counterMode_AESCrypt"); 4197 address start = __ pc(); 4198 const Register from = c_rarg0; // source array address 4199 const Register to = c_rarg1; // destination array address 4200 const Register key = c_rarg2; // key array address 4201 const Register counter = c_rarg3; // counter byte array initialized from counter array address 4202 // and updated with the incremented counter in the end 4203 #ifndef _WIN64 4204 const Register len_reg = c_rarg4; 4205 const Register saved_encCounter_start = c_rarg5; 4206 const Register used_addr = r10; 4207 const Address used_mem(rbp, 2 * wordSize); 4208 const Register used = r11; 4209 #else 4210 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 4211 const Address saved_encCounter_mem(rbp, 7 * wordSize); // length is on stack on Win64 4212 const Address used_mem(rbp, 8 * wordSize); // length is on stack on Win64 4213 const Register len_reg = r10; // pick the first volatile windows register 4214 const Register saved_encCounter_start = r11; 4215 const Register used_addr = r13; 4216 const Register used = r14; 4217 #endif 4218 const Register pos = rax; 4219 4220 const int PARALLEL_FACTOR = 6; 4221 const XMMRegister xmm_counter_shuf_mask = xmm0; 4222 const XMMRegister xmm_key_shuf_mask = xmm1; // used temporarily to swap key bytes up front 4223 const XMMRegister xmm_curr_counter = xmm2; 4224 4225 const XMMRegister xmm_key_tmp0 = xmm3; 4226 const XMMRegister xmm_key_tmp1 = xmm4; 4227 4228 // registers holding the four results in the parallelized loop 4229 const XMMRegister xmm_result0 = xmm5; 4230 const XMMRegister xmm_result1 = xmm6; 4231 const XMMRegister xmm_result2 = xmm7; 4232 const XMMRegister xmm_result3 = xmm8; 4233 const XMMRegister xmm_result4 = xmm9; 4234 const XMMRegister xmm_result5 = xmm10; 4235 4236 const XMMRegister xmm_from0 = xmm11; 4237 const XMMRegister xmm_from1 = xmm12; 4238 const XMMRegister xmm_from2 = xmm13; 4239 const XMMRegister xmm_from3 = xmm14; //the last one is xmm14. we have to preserve it on WIN64. 4240 const XMMRegister xmm_from4 = xmm3; //reuse xmm3~4. Because xmm_key_tmp0~1 are useless when loading input text 4241 const XMMRegister xmm_from5 = xmm4; 4242 4243 //for key_128, key_192, key_256 4244 const int rounds[3] = {10, 12, 14}; 4245 Label L_exit_preLoop, L_preLoop_start; 4246 Label L_multiBlock_loopTop[3]; 4247 Label L_singleBlockLoopTop[3]; 4248 Label L__incCounter[3][6]; //for 6 blocks 4249 Label L__incCounter_single[3]; //for single block, key128, key192, key256 4250 Label L_processTail_insr[3], L_processTail_4_insr[3], L_processTail_2_insr[3], L_processTail_1_insr[3], L_processTail_exit_insr[3]; 4251 Label L_processTail_4_extr[3], L_processTail_2_extr[3], L_processTail_1_extr[3], L_processTail_exit_extr[3]; 4252 4253 Label L_exit; 4254 4255 __ enter(); // required for proper stackwalking of RuntimeStub frame 4256 4257 #ifdef _WIN64 4258 // allocate spill slots for r13, r14 4259 enum { 4260 saved_r13_offset, 4261 saved_r14_offset 4262 }; 4263 __ subptr(rsp, 2 * wordSize); 4264 __ movptr(Address(rsp, saved_r13_offset * wordSize), r13); 4265 __ movptr(Address(rsp, saved_r14_offset * wordSize), r14); 4266 4267 // on win64, fill len_reg from stack position 4268 __ movl(len_reg, len_mem); 4269 __ movptr(saved_encCounter_start, saved_encCounter_mem); 4270 __ movptr(used_addr, used_mem); 4271 __ movl(used, Address(used_addr, 0)); 4272 #else 4273 __ push(len_reg); // Save 4274 __ movptr(used_addr, used_mem); 4275 __ movl(used, Address(used_addr, 0)); 4276 #endif 4277 4278 __ push(rbx); // Save RBX 4279 __ movdqu(xmm_curr_counter, Address(counter, 0x00)); // initialize counter with initial counter 4280 __ movdqu(xmm_counter_shuf_mask, ExternalAddress(StubRoutines::x86::counter_shuffle_mask_addr()), pos); // pos as scratch 4281 __ pshufb(xmm_curr_counter, xmm_counter_shuf_mask); //counter is shuffled 4282 __ movptr(pos, 0); 4283 4284 // Use the partially used encrpyted counter from last invocation 4285 __ BIND(L_preLoop_start); 4286 __ cmpptr(used, 16); 4287 __ jcc(Assembler::aboveEqual, L_exit_preLoop); 4288 __ cmpptr(len_reg, 0); 4289 __ jcc(Assembler::lessEqual, L_exit_preLoop); 4290 __ movb(rbx, Address(saved_encCounter_start, used)); 4291 __ xorb(rbx, Address(from, pos)); 4292 __ movb(Address(to, pos), rbx); 4293 __ addptr(pos, 1); 4294 __ addptr(used, 1); 4295 __ subptr(len_reg, 1); 4296 4297 __ jmp(L_preLoop_start); 4298 4299 __ BIND(L_exit_preLoop); 4300 __ movl(Address(used_addr, 0), used); 4301 4302 // key length could be only {11, 13, 15} * 4 = {44, 52, 60} 4303 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()), rbx); // rbx as scratch 4304 __ movl(rbx, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 4305 __ cmpl(rbx, 52); 4306 __ jcc(Assembler::equal, L_multiBlock_loopTop[1]); 4307 __ cmpl(rbx, 60); 4308 __ jcc(Assembler::equal, L_multiBlock_loopTop[2]); 4309 4310 #define CTR_DoSix(opc, src_reg) \ 4311 __ opc(xmm_result0, src_reg); \ 4312 __ opc(xmm_result1, src_reg); \ 4313 __ opc(xmm_result2, src_reg); \ 4314 __ opc(xmm_result3, src_reg); \ 4315 __ opc(xmm_result4, src_reg); \ 4316 __ opc(xmm_result5, src_reg); 4317 4318 // k == 0 : generate code for key_128 4319 // k == 1 : generate code for key_192 4320 // k == 2 : generate code for key_256 4321 for (int k = 0; k < 3; ++k) { 4322 //multi blocks starts here 4323 __ align(OptoLoopAlignment); 4324 __ BIND(L_multiBlock_loopTop[k]); 4325 __ cmpptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // see if at least PARALLEL_FACTOR blocks left 4326 __ jcc(Assembler::less, L_singleBlockLoopTop[k]); 4327 load_key(xmm_key_tmp0, key, 0x00, xmm_key_shuf_mask); 4328 4329 //load, then increase counters 4330 CTR_DoSix(movdqa, xmm_curr_counter); 4331 inc_counter(rbx, xmm_result1, 0x01, L__incCounter[k][0]); 4332 inc_counter(rbx, xmm_result2, 0x02, L__incCounter[k][1]); 4333 inc_counter(rbx, xmm_result3, 0x03, L__incCounter[k][2]); 4334 inc_counter(rbx, xmm_result4, 0x04, L__incCounter[k][3]); 4335 inc_counter(rbx, xmm_result5, 0x05, L__incCounter[k][4]); 4336 inc_counter(rbx, xmm_curr_counter, 0x06, L__incCounter[k][5]); 4337 CTR_DoSix(pshufb, xmm_counter_shuf_mask); // after increased, shuffled counters back for PXOR 4338 CTR_DoSix(pxor, xmm_key_tmp0); //PXOR with Round 0 key 4339 4340 //load two ROUND_KEYs at a time 4341 for (int i = 1; i < rounds[k]; ) { 4342 load_key(xmm_key_tmp1, key, (0x10 * i), xmm_key_shuf_mask); 4343 load_key(xmm_key_tmp0, key, (0x10 * (i+1)), xmm_key_shuf_mask); 4344 CTR_DoSix(aesenc, xmm_key_tmp1); 4345 i++; 4346 if (i != rounds[k]) { 4347 CTR_DoSix(aesenc, xmm_key_tmp0); 4348 } else { 4349 CTR_DoSix(aesenclast, xmm_key_tmp0); 4350 } 4351 i++; 4352 } 4353 4354 // get next PARALLEL_FACTOR blocks into xmm_result registers 4355 __ movdqu(xmm_from0, Address(from, pos, Address::times_1, 0 * AESBlockSize)); 4356 __ movdqu(xmm_from1, Address(from, pos, Address::times_1, 1 * AESBlockSize)); 4357 __ movdqu(xmm_from2, Address(from, pos, Address::times_1, 2 * AESBlockSize)); 4358 __ movdqu(xmm_from3, Address(from, pos, Address::times_1, 3 * AESBlockSize)); 4359 __ movdqu(xmm_from4, Address(from, pos, Address::times_1, 4 * AESBlockSize)); 4360 __ movdqu(xmm_from5, Address(from, pos, Address::times_1, 5 * AESBlockSize)); 4361 4362 __ pxor(xmm_result0, xmm_from0); 4363 __ pxor(xmm_result1, xmm_from1); 4364 __ pxor(xmm_result2, xmm_from2); 4365 __ pxor(xmm_result3, xmm_from3); 4366 __ pxor(xmm_result4, xmm_from4); 4367 __ pxor(xmm_result5, xmm_from5); 4368 4369 // store 6 results into the next 64 bytes of output 4370 __ movdqu(Address(to, pos, Address::times_1, 0 * AESBlockSize), xmm_result0); 4371 __ movdqu(Address(to, pos, Address::times_1, 1 * AESBlockSize), xmm_result1); 4372 __ movdqu(Address(to, pos, Address::times_1, 2 * AESBlockSize), xmm_result2); 4373 __ movdqu(Address(to, pos, Address::times_1, 3 * AESBlockSize), xmm_result3); 4374 __ movdqu(Address(to, pos, Address::times_1, 4 * AESBlockSize), xmm_result4); 4375 __ movdqu(Address(to, pos, Address::times_1, 5 * AESBlockSize), xmm_result5); 4376 4377 __ addptr(pos, PARALLEL_FACTOR * AESBlockSize); // increase the length of crypt text 4378 __ subptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // decrease the remaining length 4379 __ jmp(L_multiBlock_loopTop[k]); 4380 4381 // singleBlock starts here 4382 __ align(OptoLoopAlignment); 4383 __ BIND(L_singleBlockLoopTop[k]); 4384 __ cmpptr(len_reg, 0); 4385 __ jcc(Assembler::lessEqual, L_exit); 4386 load_key(xmm_key_tmp0, key, 0x00, xmm_key_shuf_mask); 4387 __ movdqa(xmm_result0, xmm_curr_counter); 4388 inc_counter(rbx, xmm_curr_counter, 0x01, L__incCounter_single[k]); 4389 __ pshufb(xmm_result0, xmm_counter_shuf_mask); 4390 __ pxor(xmm_result0, xmm_key_tmp0); 4391 for (int i = 1; i < rounds[k]; i++) { 4392 load_key(xmm_key_tmp0, key, (0x10 * i), xmm_key_shuf_mask); 4393 __ aesenc(xmm_result0, xmm_key_tmp0); 4394 } 4395 load_key(xmm_key_tmp0, key, (rounds[k] * 0x10), xmm_key_shuf_mask); 4396 __ aesenclast(xmm_result0, xmm_key_tmp0); 4397 __ cmpptr(len_reg, AESBlockSize); 4398 __ jcc(Assembler::less, L_processTail_insr[k]); 4399 __ movdqu(xmm_from0, Address(from, pos, Address::times_1, 0 * AESBlockSize)); 4400 __ pxor(xmm_result0, xmm_from0); 4401 __ movdqu(Address(to, pos, Address::times_1, 0 * AESBlockSize), xmm_result0); 4402 __ addptr(pos, AESBlockSize); 4403 __ subptr(len_reg, AESBlockSize); 4404 __ jmp(L_singleBlockLoopTop[k]); 4405 __ BIND(L_processTail_insr[k]); // Process the tail part of the input array 4406 __ addptr(pos, len_reg); // 1. Insert bytes from src array into xmm_from0 register 4407 __ testptr(len_reg, 8); 4408 __ jcc(Assembler::zero, L_processTail_4_insr[k]); 4409 __ subptr(pos,8); 4410 __ pinsrq(xmm_from0, Address(from, pos), 0); 4411 __ BIND(L_processTail_4_insr[k]); 4412 __ testptr(len_reg, 4); 4413 __ jcc(Assembler::zero, L_processTail_2_insr[k]); 4414 __ subptr(pos,4); 4415 __ pslldq(xmm_from0, 4); 4416 __ pinsrd(xmm_from0, Address(from, pos), 0); 4417 __ BIND(L_processTail_2_insr[k]); 4418 __ testptr(len_reg, 2); 4419 __ jcc(Assembler::zero, L_processTail_1_insr[k]); 4420 __ subptr(pos, 2); 4421 __ pslldq(xmm_from0, 2); 4422 __ pinsrw(xmm_from0, Address(from, pos), 0); 4423 __ BIND(L_processTail_1_insr[k]); 4424 __ testptr(len_reg, 1); 4425 __ jcc(Assembler::zero, L_processTail_exit_insr[k]); 4426 __ subptr(pos, 1); 4427 __ pslldq(xmm_from0, 1); 4428 __ pinsrb(xmm_from0, Address(from, pos), 0); 4429 __ BIND(L_processTail_exit_insr[k]); 4430 4431 __ movdqu(Address(saved_encCounter_start, 0), xmm_result0); // 2. Perform pxor of the encrypted counter and plaintext Bytes. 4432 __ pxor(xmm_result0, xmm_from0); // Also the encrypted counter is saved for next invocation. 4433 4434 __ testptr(len_reg, 8); 4435 __ jcc(Assembler::zero, L_processTail_4_extr[k]); // 3. Extract bytes from xmm_result0 into the dest. array 4436 __ pextrq(Address(to, pos), xmm_result0, 0); 4437 __ psrldq(xmm_result0, 8); 4438 __ addptr(pos, 8); 4439 __ BIND(L_processTail_4_extr[k]); 4440 __ testptr(len_reg, 4); 4441 __ jcc(Assembler::zero, L_processTail_2_extr[k]); 4442 __ pextrd(Address(to, pos), xmm_result0, 0); 4443 __ psrldq(xmm_result0, 4); 4444 __ addptr(pos, 4); 4445 __ BIND(L_processTail_2_extr[k]); 4446 __ testptr(len_reg, 2); 4447 __ jcc(Assembler::zero, L_processTail_1_extr[k]); 4448 __ pextrw(Address(to, pos), xmm_result0, 0); 4449 __ psrldq(xmm_result0, 2); 4450 __ addptr(pos, 2); 4451 __ BIND(L_processTail_1_extr[k]); 4452 __ testptr(len_reg, 1); 4453 __ jcc(Assembler::zero, L_processTail_exit_extr[k]); 4454 __ pextrb(Address(to, pos), xmm_result0, 0); 4455 4456 __ BIND(L_processTail_exit_extr[k]); 4457 __ movl(Address(used_addr, 0), len_reg); 4458 __ jmp(L_exit); 4459 4460 } 4461 4462 __ BIND(L_exit); 4463 __ pshufb(xmm_curr_counter, xmm_counter_shuf_mask); //counter is shuffled back. 4464 __ movdqu(Address(counter, 0), xmm_curr_counter); //save counter back 4465 __ pop(rbx); // pop the saved RBX. 4466 #ifdef _WIN64 4467 __ movl(rax, len_mem); 4468 __ movptr(r13, Address(rsp, saved_r13_offset * wordSize)); 4469 __ movptr(r14, Address(rsp, saved_r14_offset * wordSize)); 4470 __ addptr(rsp, 2 * wordSize); 4471 #else 4472 __ pop(rax); // return 'len' 4473 #endif 4474 __ leave(); // required for proper stackwalking of RuntimeStub frame 4475 __ ret(0); 4476 return start; 4477 } 4478 4479 void roundDec(XMMRegister xmm_reg) { 4480 __ vaesdec(xmm1, xmm1, xmm_reg, Assembler::AVX_512bit); 4481 __ vaesdec(xmm2, xmm2, xmm_reg, Assembler::AVX_512bit); 4482 __ vaesdec(xmm3, xmm3, xmm_reg, Assembler::AVX_512bit); 4483 __ vaesdec(xmm4, xmm4, xmm_reg, Assembler::AVX_512bit); 4484 __ vaesdec(xmm5, xmm5, xmm_reg, Assembler::AVX_512bit); 4485 __ vaesdec(xmm6, xmm6, xmm_reg, Assembler::AVX_512bit); 4486 __ vaesdec(xmm7, xmm7, xmm_reg, Assembler::AVX_512bit); 4487 __ vaesdec(xmm8, xmm8, xmm_reg, Assembler::AVX_512bit); 4488 } 4489 4490 void roundDeclast(XMMRegister xmm_reg) { 4491 __ vaesdeclast(xmm1, xmm1, xmm_reg, Assembler::AVX_512bit); 4492 __ vaesdeclast(xmm2, xmm2, xmm_reg, Assembler::AVX_512bit); 4493 __ vaesdeclast(xmm3, xmm3, xmm_reg, Assembler::AVX_512bit); 4494 __ vaesdeclast(xmm4, xmm4, xmm_reg, Assembler::AVX_512bit); 4495 __ vaesdeclast(xmm5, xmm5, xmm_reg, Assembler::AVX_512bit); 4496 __ vaesdeclast(xmm6, xmm6, xmm_reg, Assembler::AVX_512bit); 4497 __ vaesdeclast(xmm7, xmm7, xmm_reg, Assembler::AVX_512bit); 4498 __ vaesdeclast(xmm8, xmm8, xmm_reg, Assembler::AVX_512bit); 4499 } 4500 4501 void ev_load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask = NULL) { 4502 __ movdqu(xmmdst, Address(key, offset)); 4503 if (xmm_shuf_mask != NULL) { 4504 __ pshufb(xmmdst, xmm_shuf_mask); 4505 } else { 4506 __ pshufb(xmmdst, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 4507 } 4508 __ evshufi64x2(xmmdst, xmmdst, xmmdst, 0x0, Assembler::AVX_512bit); 4509 4510 } 4511 4512 address generate_cipherBlockChaining_decryptVectorAESCrypt() { 4513 assert(VM_Version::supports_avx512_vaes(), "need AES instructions and misaligned SSE support"); 4514 __ align(CodeEntryAlignment); 4515 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt"); 4516 address start = __ pc(); 4517 4518 const Register from = c_rarg0; // source array address 4519 const Register to = c_rarg1; // destination array address 4520 const Register key = c_rarg2; // key array address 4521 const Register rvec = c_rarg3; // r byte array initialized from initvector array address 4522 // and left with the results of the last encryption block 4523 #ifndef _WIN64 4524 const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16) 4525 #else 4526 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 4527 const Register len_reg = r11; // pick the volatile windows register 4528 #endif 4529 4530 Label Loop, Loop1, L_128, L_256, L_192, KEY_192, KEY_256, Loop2, Lcbc_dec_rem_loop, 4531 Lcbc_dec_rem_last, Lcbc_dec_ret, Lcbc_dec_rem, Lcbc_exit; 4532 4533 __ enter(); 4534 4535 #ifdef _WIN64 4536 // on win64, fill len_reg from stack position 4537 __ movl(len_reg, len_mem); 4538 #else 4539 __ push(len_reg); // Save 4540 #endif 4541 __ push(rbx); 4542 __ vzeroupper(); 4543 4544 // Temporary variable declaration for swapping key bytes 4545 const XMMRegister xmm_key_shuf_mask = xmm1; 4546 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 4547 4548 // Calculate number of rounds from key size: 44 for 10-rounds, 52 for 12-rounds, 60 for 14-rounds 4549 const Register rounds = rbx; 4550 __ movl(rounds, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 4551 4552 const XMMRegister IV = xmm0; 4553 // Load IV and broadcast value to 512-bits 4554 __ evbroadcasti64x2(IV, Address(rvec, 0), Assembler::AVX_512bit); 4555 4556 // Temporary variables for storing round keys 4557 const XMMRegister RK0 = xmm30; 4558 const XMMRegister RK1 = xmm9; 4559 const XMMRegister RK2 = xmm18; 4560 const XMMRegister RK3 = xmm19; 4561 const XMMRegister RK4 = xmm20; 4562 const XMMRegister RK5 = xmm21; 4563 const XMMRegister RK6 = xmm22; 4564 const XMMRegister RK7 = xmm23; 4565 const XMMRegister RK8 = xmm24; 4566 const XMMRegister RK9 = xmm25; 4567 const XMMRegister RK10 = xmm26; 4568 4569 // Load and shuffle key 4570 // the java expanded key ordering is rotated one position from what we want 4571 // so we start from 1*16 here and hit 0*16 last 4572 ev_load_key(RK1, key, 1 * 16, xmm_key_shuf_mask); 4573 ev_load_key(RK2, key, 2 * 16, xmm_key_shuf_mask); 4574 ev_load_key(RK3, key, 3 * 16, xmm_key_shuf_mask); 4575 ev_load_key(RK4, key, 4 * 16, xmm_key_shuf_mask); 4576 ev_load_key(RK5, key, 5 * 16, xmm_key_shuf_mask); 4577 ev_load_key(RK6, key, 6 * 16, xmm_key_shuf_mask); 4578 ev_load_key(RK7, key, 7 * 16, xmm_key_shuf_mask); 4579 ev_load_key(RK8, key, 8 * 16, xmm_key_shuf_mask); 4580 ev_load_key(RK9, key, 9 * 16, xmm_key_shuf_mask); 4581 ev_load_key(RK10, key, 10 * 16, xmm_key_shuf_mask); 4582 ev_load_key(RK0, key, 0*16, xmm_key_shuf_mask); 4583 4584 // Variables for storing source cipher text 4585 const XMMRegister S0 = xmm10; 4586 const XMMRegister S1 = xmm11; 4587 const XMMRegister S2 = xmm12; 4588 const XMMRegister S3 = xmm13; 4589 const XMMRegister S4 = xmm14; 4590 const XMMRegister S5 = xmm15; 4591 const XMMRegister S6 = xmm16; 4592 const XMMRegister S7 = xmm17; 4593 4594 // Variables for storing decrypted text 4595 const XMMRegister B0 = xmm1; 4596 const XMMRegister B1 = xmm2; 4597 const XMMRegister B2 = xmm3; 4598 const XMMRegister B3 = xmm4; 4599 const XMMRegister B4 = xmm5; 4600 const XMMRegister B5 = xmm6; 4601 const XMMRegister B6 = xmm7; 4602 const XMMRegister B7 = xmm8; 4603 4604 __ cmpl(rounds, 44); 4605 __ jcc(Assembler::greater, KEY_192); 4606 __ jmp(Loop); 4607 4608 __ BIND(KEY_192); 4609 const XMMRegister RK11 = xmm27; 4610 const XMMRegister RK12 = xmm28; 4611 ev_load_key(RK11, key, 11*16, xmm_key_shuf_mask); 4612 ev_load_key(RK12, key, 12*16, xmm_key_shuf_mask); 4613 4614 __ cmpl(rounds, 52); 4615 __ jcc(Assembler::greater, KEY_256); 4616 __ jmp(Loop); 4617 4618 __ BIND(KEY_256); 4619 const XMMRegister RK13 = xmm29; 4620 const XMMRegister RK14 = xmm31; 4621 ev_load_key(RK13, key, 13*16, xmm_key_shuf_mask); 4622 ev_load_key(RK14, key, 14*16, xmm_key_shuf_mask); 4623 4624 __ BIND(Loop); 4625 __ cmpl(len_reg, 512); 4626 __ jcc(Assembler::below, Lcbc_dec_rem); 4627 __ BIND(Loop1); 4628 __ subl(len_reg, 512); 4629 __ evmovdquq(S0, Address(from, 0 * 64), Assembler::AVX_512bit); 4630 __ evmovdquq(S1, Address(from, 1 * 64), Assembler::AVX_512bit); 4631 __ evmovdquq(S2, Address(from, 2 * 64), Assembler::AVX_512bit); 4632 __ evmovdquq(S3, Address(from, 3 * 64), Assembler::AVX_512bit); 4633 __ evmovdquq(S4, Address(from, 4 * 64), Assembler::AVX_512bit); 4634 __ evmovdquq(S5, Address(from, 5 * 64), Assembler::AVX_512bit); 4635 __ evmovdquq(S6, Address(from, 6 * 64), Assembler::AVX_512bit); 4636 __ evmovdquq(S7, Address(from, 7 * 64), Assembler::AVX_512bit); 4637 __ leaq(from, Address(from, 8 * 64)); 4638 4639 __ evpxorq(B0, S0, RK1, Assembler::AVX_512bit); 4640 __ evpxorq(B1, S1, RK1, Assembler::AVX_512bit); 4641 __ evpxorq(B2, S2, RK1, Assembler::AVX_512bit); 4642 __ evpxorq(B3, S3, RK1, Assembler::AVX_512bit); 4643 __ evpxorq(B4, S4, RK1, Assembler::AVX_512bit); 4644 __ evpxorq(B5, S5, RK1, Assembler::AVX_512bit); 4645 __ evpxorq(B6, S6, RK1, Assembler::AVX_512bit); 4646 __ evpxorq(B7, S7, RK1, Assembler::AVX_512bit); 4647 4648 __ evalignq(IV, S0, IV, 0x06); 4649 __ evalignq(S0, S1, S0, 0x06); 4650 __ evalignq(S1, S2, S1, 0x06); 4651 __ evalignq(S2, S3, S2, 0x06); 4652 __ evalignq(S3, S4, S3, 0x06); 4653 __ evalignq(S4, S5, S4, 0x06); 4654 __ evalignq(S5, S6, S5, 0x06); 4655 __ evalignq(S6, S7, S6, 0x06); 4656 4657 roundDec(RK2); 4658 roundDec(RK3); 4659 roundDec(RK4); 4660 roundDec(RK5); 4661 roundDec(RK6); 4662 roundDec(RK7); 4663 roundDec(RK8); 4664 roundDec(RK9); 4665 roundDec(RK10); 4666 4667 __ cmpl(rounds, 44); 4668 __ jcc(Assembler::belowEqual, L_128); 4669 roundDec(RK11); 4670 roundDec(RK12); 4671 4672 __ cmpl(rounds, 52); 4673 __ jcc(Assembler::belowEqual, L_192); 4674 roundDec(RK13); 4675 roundDec(RK14); 4676 4677 __ BIND(L_256); 4678 roundDeclast(RK0); 4679 __ jmp(Loop2); 4680 4681 __ BIND(L_128); 4682 roundDeclast(RK0); 4683 __ jmp(Loop2); 4684 4685 __ BIND(L_192); 4686 roundDeclast(RK0); 4687 4688 __ BIND(Loop2); 4689 __ evpxorq(B0, B0, IV, Assembler::AVX_512bit); 4690 __ evpxorq(B1, B1, S0, Assembler::AVX_512bit); 4691 __ evpxorq(B2, B2, S1, Assembler::AVX_512bit); 4692 __ evpxorq(B3, B3, S2, Assembler::AVX_512bit); 4693 __ evpxorq(B4, B4, S3, Assembler::AVX_512bit); 4694 __ evpxorq(B5, B5, S4, Assembler::AVX_512bit); 4695 __ evpxorq(B6, B6, S5, Assembler::AVX_512bit); 4696 __ evpxorq(B7, B7, S6, Assembler::AVX_512bit); 4697 __ evmovdquq(IV, S7, Assembler::AVX_512bit); 4698 4699 __ evmovdquq(Address(to, 0 * 64), B0, Assembler::AVX_512bit); 4700 __ evmovdquq(Address(to, 1 * 64), B1, Assembler::AVX_512bit); 4701 __ evmovdquq(Address(to, 2 * 64), B2, Assembler::AVX_512bit); 4702 __ evmovdquq(Address(to, 3 * 64), B3, Assembler::AVX_512bit); 4703 __ evmovdquq(Address(to, 4 * 64), B4, Assembler::AVX_512bit); 4704 __ evmovdquq(Address(to, 5 * 64), B5, Assembler::AVX_512bit); 4705 __ evmovdquq(Address(to, 6 * 64), B6, Assembler::AVX_512bit); 4706 __ evmovdquq(Address(to, 7 * 64), B7, Assembler::AVX_512bit); 4707 __ leaq(to, Address(to, 8 * 64)); 4708 __ jmp(Loop); 4709 4710 __ BIND(Lcbc_dec_rem); 4711 __ evshufi64x2(IV, IV, IV, 0x03, Assembler::AVX_512bit); 4712 4713 __ BIND(Lcbc_dec_rem_loop); 4714 __ subl(len_reg, 16); 4715 __ jcc(Assembler::carrySet, Lcbc_dec_ret); 4716 4717 __ movdqu(S0, Address(from, 0)); 4718 __ evpxorq(B0, S0, RK1, Assembler::AVX_512bit); 4719 __ vaesdec(B0, B0, RK2, Assembler::AVX_512bit); 4720 __ vaesdec(B0, B0, RK3, Assembler::AVX_512bit); 4721 __ vaesdec(B0, B0, RK4, Assembler::AVX_512bit); 4722 __ vaesdec(B0, B0, RK5, Assembler::AVX_512bit); 4723 __ vaesdec(B0, B0, RK6, Assembler::AVX_512bit); 4724 __ vaesdec(B0, B0, RK7, Assembler::AVX_512bit); 4725 __ vaesdec(B0, B0, RK8, Assembler::AVX_512bit); 4726 __ vaesdec(B0, B0, RK9, Assembler::AVX_512bit); 4727 __ vaesdec(B0, B0, RK10, Assembler::AVX_512bit); 4728 __ cmpl(rounds, 44); 4729 __ jcc(Assembler::belowEqual, Lcbc_dec_rem_last); 4730 4731 __ vaesdec(B0, B0, RK11, Assembler::AVX_512bit); 4732 __ vaesdec(B0, B0, RK12, Assembler::AVX_512bit); 4733 __ cmpl(rounds, 52); 4734 __ jcc(Assembler::belowEqual, Lcbc_dec_rem_last); 4735 4736 __ vaesdec(B0, B0, RK13, Assembler::AVX_512bit); 4737 __ vaesdec(B0, B0, RK14, Assembler::AVX_512bit); 4738 4739 __ BIND(Lcbc_dec_rem_last); 4740 __ vaesdeclast(B0, B0, RK0, Assembler::AVX_512bit); 4741 4742 __ evpxorq(B0, B0, IV, Assembler::AVX_512bit); 4743 __ evmovdquq(IV, S0, Assembler::AVX_512bit); 4744 __ movdqu(Address(to, 0), B0); 4745 __ leaq(from, Address(from, 16)); 4746 __ leaq(to, Address(to, 16)); 4747 __ jmp(Lcbc_dec_rem_loop); 4748 4749 __ BIND(Lcbc_dec_ret); 4750 __ movdqu(Address(rvec, 0), IV); 4751 4752 // Zero out the round keys 4753 __ evpxorq(RK0, RK0, RK0, Assembler::AVX_512bit); 4754 __ evpxorq(RK1, RK1, RK1, Assembler::AVX_512bit); 4755 __ evpxorq(RK2, RK2, RK2, Assembler::AVX_512bit); 4756 __ evpxorq(RK3, RK3, RK3, Assembler::AVX_512bit); 4757 __ evpxorq(RK4, RK4, RK4, Assembler::AVX_512bit); 4758 __ evpxorq(RK5, RK5, RK5, Assembler::AVX_512bit); 4759 __ evpxorq(RK6, RK6, RK6, Assembler::AVX_512bit); 4760 __ evpxorq(RK7, RK7, RK7, Assembler::AVX_512bit); 4761 __ evpxorq(RK8, RK8, RK8, Assembler::AVX_512bit); 4762 __ evpxorq(RK9, RK9, RK9, Assembler::AVX_512bit); 4763 __ evpxorq(RK10, RK10, RK10, Assembler::AVX_512bit); 4764 __ cmpl(rounds, 44); 4765 __ jcc(Assembler::belowEqual, Lcbc_exit); 4766 __ evpxorq(RK11, RK11, RK11, Assembler::AVX_512bit); 4767 __ evpxorq(RK12, RK12, RK12, Assembler::AVX_512bit); 4768 __ cmpl(rounds, 52); 4769 __ jcc(Assembler::belowEqual, Lcbc_exit); 4770 __ evpxorq(RK13, RK13, RK13, Assembler::AVX_512bit); 4771 __ evpxorq(RK14, RK14, RK14, Assembler::AVX_512bit); 4772 4773 __ BIND(Lcbc_exit); 4774 __ pop(rbx); 4775 #ifdef _WIN64 4776 __ movl(rax, len_mem); 4777 #else 4778 __ pop(rax); // return length 4779 #endif 4780 __ leave(); // required for proper stackwalking of RuntimeStub frame 4781 __ ret(0); 4782 return start; 4783 } 4784 4785 // Polynomial x^128+x^127+x^126+x^121+1 4786 address ghash_polynomial_addr() { 4787 __ align(CodeEntryAlignment); 4788 StubCodeMark mark(this, "StubRoutines", "_ghash_poly_addr"); 4789 address start = __ pc(); 4790 __ emit_data64(0x0000000000000001, relocInfo::none); 4791 __ emit_data64(0xc200000000000000, relocInfo::none); 4792 return start; 4793 } 4794 4795 address ghash_shufflemask_addr() { 4796 __ align(CodeEntryAlignment); 4797 StubCodeMark mark(this, "StubRoutines", "_ghash_shuffmask_addr"); 4798 address start = __ pc(); 4799 __ emit_data64(0x0f0f0f0f0f0f0f0f, relocInfo::none); 4800 __ emit_data64(0x0f0f0f0f0f0f0f0f, relocInfo::none); 4801 return start; 4802 } 4803 4804 // Ghash single and multi block operations using AVX instructions 4805 address generate_avx_ghash_processBlocks() { 4806 __ align(CodeEntryAlignment); 4807 4808 StubCodeMark mark(this, "StubRoutines", "ghash_processBlocks"); 4809 address start = __ pc(); 4810 4811 // arguments 4812 const Register state = c_rarg0; 4813 const Register htbl = c_rarg1; 4814 const Register data = c_rarg2; 4815 const Register blocks = c_rarg3; 4816 __ enter(); 4817 // Save state before entering routine 4818 __ avx_ghash(state, htbl, data, blocks); 4819 __ leave(); // required for proper stackwalking of RuntimeStub frame 4820 __ ret(0); 4821 return start; 4822 } 4823 4824 // byte swap x86 long 4825 address generate_ghash_long_swap_mask() { 4826 __ align(CodeEntryAlignment); 4827 StubCodeMark mark(this, "StubRoutines", "ghash_long_swap_mask"); 4828 address start = __ pc(); 4829 __ emit_data64(0x0f0e0d0c0b0a0908, relocInfo::none ); 4830 __ emit_data64(0x0706050403020100, relocInfo::none ); 4831 return start; 4832 } 4833 4834 // byte swap x86 byte array 4835 address generate_ghash_byte_swap_mask() { 4836 __ align(CodeEntryAlignment); 4837 StubCodeMark mark(this, "StubRoutines", "ghash_byte_swap_mask"); 4838 address start = __ pc(); 4839 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none ); 4840 __ emit_data64(0x0001020304050607, relocInfo::none ); 4841 return start; 4842 } 4843 4844 /* Single and multi-block ghash operations */ 4845 address generate_ghash_processBlocks() { 4846 __ align(CodeEntryAlignment); 4847 Label L_ghash_loop, L_exit; 4848 StubCodeMark mark(this, "StubRoutines", "ghash_processBlocks"); 4849 address start = __ pc(); 4850 4851 const Register state = c_rarg0; 4852 const Register subkeyH = c_rarg1; 4853 const Register data = c_rarg2; 4854 const Register blocks = c_rarg3; 4855 4856 const XMMRegister xmm_temp0 = xmm0; 4857 const XMMRegister xmm_temp1 = xmm1; 4858 const XMMRegister xmm_temp2 = xmm2; 4859 const XMMRegister xmm_temp3 = xmm3; 4860 const XMMRegister xmm_temp4 = xmm4; 4861 const XMMRegister xmm_temp5 = xmm5; 4862 const XMMRegister xmm_temp6 = xmm6; 4863 const XMMRegister xmm_temp7 = xmm7; 4864 const XMMRegister xmm_temp8 = xmm8; 4865 const XMMRegister xmm_temp9 = xmm9; 4866 const XMMRegister xmm_temp10 = xmm10; 4867 4868 __ enter(); 4869 4870 __ movdqu(xmm_temp10, ExternalAddress(StubRoutines::x86::ghash_long_swap_mask_addr())); 4871 4872 __ movdqu(xmm_temp0, Address(state, 0)); 4873 __ pshufb(xmm_temp0, xmm_temp10); 4874 4875 4876 __ BIND(L_ghash_loop); 4877 __ movdqu(xmm_temp2, Address(data, 0)); 4878 __ pshufb(xmm_temp2, ExternalAddress(StubRoutines::x86::ghash_byte_swap_mask_addr())); 4879 4880 __ movdqu(xmm_temp1, Address(subkeyH, 0)); 4881 __ pshufb(xmm_temp1, xmm_temp10); 4882 4883 __ pxor(xmm_temp0, xmm_temp2); 4884 4885 // 4886 // Multiply with the hash key 4887 // 4888 __ movdqu(xmm_temp3, xmm_temp0); 4889 __ pclmulqdq(xmm_temp3, xmm_temp1, 0); // xmm3 holds a0*b0 4890 __ movdqu(xmm_temp4, xmm_temp0); 4891 __ pclmulqdq(xmm_temp4, xmm_temp1, 16); // xmm4 holds a0*b1 4892 4893 __ movdqu(xmm_temp5, xmm_temp0); 4894 __ pclmulqdq(xmm_temp5, xmm_temp1, 1); // xmm5 holds a1*b0 4895 __ movdqu(xmm_temp6, xmm_temp0); 4896 __ pclmulqdq(xmm_temp6, xmm_temp1, 17); // xmm6 holds a1*b1 4897 4898 __ pxor(xmm_temp4, xmm_temp5); // xmm4 holds a0*b1 + a1*b0 4899 4900 __ movdqu(xmm_temp5, xmm_temp4); // move the contents of xmm4 to xmm5 4901 __ psrldq(xmm_temp4, 8); // shift by xmm4 64 bits to the right 4902 __ pslldq(xmm_temp5, 8); // shift by xmm5 64 bits to the left 4903 __ pxor(xmm_temp3, xmm_temp5); 4904 __ pxor(xmm_temp6, xmm_temp4); // Register pair <xmm6:xmm3> holds the result 4905 // of the carry-less multiplication of 4906 // xmm0 by xmm1. 4907 4908 // We shift the result of the multiplication by one bit position 4909 // to the left to cope for the fact that the bits are reversed. 4910 __ movdqu(xmm_temp7, xmm_temp3); 4911 __ movdqu(xmm_temp8, xmm_temp6); 4912 __ pslld(xmm_temp3, 1); 4913 __ pslld(xmm_temp6, 1); 4914 __ psrld(xmm_temp7, 31); 4915 __ psrld(xmm_temp8, 31); 4916 __ movdqu(xmm_temp9, xmm_temp7); 4917 __ pslldq(xmm_temp8, 4); 4918 __ pslldq(xmm_temp7, 4); 4919 __ psrldq(xmm_temp9, 12); 4920 __ por(xmm_temp3, xmm_temp7); 4921 __ por(xmm_temp6, xmm_temp8); 4922 __ por(xmm_temp6, xmm_temp9); 4923 4924 // 4925 // First phase of the reduction 4926 // 4927 // Move xmm3 into xmm7, xmm8, xmm9 in order to perform the shifts 4928 // independently. 4929 __ movdqu(xmm_temp7, xmm_temp3); 4930 __ movdqu(xmm_temp8, xmm_temp3); 4931 __ movdqu(xmm_temp9, xmm_temp3); 4932 __ pslld(xmm_temp7, 31); // packed right shift shifting << 31 4933 __ pslld(xmm_temp8, 30); // packed right shift shifting << 30 4934 __ pslld(xmm_temp9, 25); // packed right shift shifting << 25 4935 __ pxor(xmm_temp7, xmm_temp8); // xor the shifted versions 4936 __ pxor(xmm_temp7, xmm_temp9); 4937 __ movdqu(xmm_temp8, xmm_temp7); 4938 __ pslldq(xmm_temp7, 12); 4939 __ psrldq(xmm_temp8, 4); 4940 __ pxor(xmm_temp3, xmm_temp7); // first phase of the reduction complete 4941 4942 // 4943 // Second phase of the reduction 4944 // 4945 // Make 3 copies of xmm3 in xmm2, xmm4, xmm5 for doing these 4946 // shift operations. 4947 __ movdqu(xmm_temp2, xmm_temp3); 4948 __ movdqu(xmm_temp4, xmm_temp3); 4949 __ movdqu(xmm_temp5, xmm_temp3); 4950 __ psrld(xmm_temp2, 1); // packed left shifting >> 1 4951 __ psrld(xmm_temp4, 2); // packed left shifting >> 2 4952 __ psrld(xmm_temp5, 7); // packed left shifting >> 7 4953 __ pxor(xmm_temp2, xmm_temp4); // xor the shifted versions 4954 __ pxor(xmm_temp2, xmm_temp5); 4955 __ pxor(xmm_temp2, xmm_temp8); 4956 __ pxor(xmm_temp3, xmm_temp2); 4957 __ pxor(xmm_temp6, xmm_temp3); // the result is in xmm6 4958 4959 __ decrement(blocks); 4960 __ jcc(Assembler::zero, L_exit); 4961 __ movdqu(xmm_temp0, xmm_temp6); 4962 __ addptr(data, 16); 4963 __ jmp(L_ghash_loop); 4964 4965 __ BIND(L_exit); 4966 __ pshufb(xmm_temp6, xmm_temp10); // Byte swap 16-byte result 4967 __ movdqu(Address(state, 0), xmm_temp6); // store the result 4968 __ leave(); 4969 __ ret(0); 4970 return start; 4971 } 4972 4973 //base64 character set 4974 address base64_charset_addr() { 4975 __ align(CodeEntryAlignment); 4976 StubCodeMark mark(this, "StubRoutines", "base64_charset"); 4977 address start = __ pc(); 4978 __ emit_data64(0x0000004200000041, relocInfo::none); 4979 __ emit_data64(0x0000004400000043, relocInfo::none); 4980 __ emit_data64(0x0000004600000045, relocInfo::none); 4981 __ emit_data64(0x0000004800000047, relocInfo::none); 4982 __ emit_data64(0x0000004a00000049, relocInfo::none); 4983 __ emit_data64(0x0000004c0000004b, relocInfo::none); 4984 __ emit_data64(0x0000004e0000004d, relocInfo::none); 4985 __ emit_data64(0x000000500000004f, relocInfo::none); 4986 __ emit_data64(0x0000005200000051, relocInfo::none); 4987 __ emit_data64(0x0000005400000053, relocInfo::none); 4988 __ emit_data64(0x0000005600000055, relocInfo::none); 4989 __ emit_data64(0x0000005800000057, relocInfo::none); 4990 __ emit_data64(0x0000005a00000059, relocInfo::none); 4991 __ emit_data64(0x0000006200000061, relocInfo::none); 4992 __ emit_data64(0x0000006400000063, relocInfo::none); 4993 __ emit_data64(0x0000006600000065, relocInfo::none); 4994 __ emit_data64(0x0000006800000067, relocInfo::none); 4995 __ emit_data64(0x0000006a00000069, relocInfo::none); 4996 __ emit_data64(0x0000006c0000006b, relocInfo::none); 4997 __ emit_data64(0x0000006e0000006d, relocInfo::none); 4998 __ emit_data64(0x000000700000006f, relocInfo::none); 4999 __ emit_data64(0x0000007200000071, relocInfo::none); 5000 __ emit_data64(0x0000007400000073, relocInfo::none); 5001 __ emit_data64(0x0000007600000075, relocInfo::none); 5002 __ emit_data64(0x0000007800000077, relocInfo::none); 5003 __ emit_data64(0x0000007a00000079, relocInfo::none); 5004 __ emit_data64(0x0000003100000030, relocInfo::none); 5005 __ emit_data64(0x0000003300000032, relocInfo::none); 5006 __ emit_data64(0x0000003500000034, relocInfo::none); 5007 __ emit_data64(0x0000003700000036, relocInfo::none); 5008 __ emit_data64(0x0000003900000038, relocInfo::none); 5009 __ emit_data64(0x0000002f0000002b, relocInfo::none); 5010 return start; 5011 } 5012 5013 //base64 url character set 5014 address base64url_charset_addr() { 5015 __ align(CodeEntryAlignment); 5016 StubCodeMark mark(this, "StubRoutines", "base64url_charset"); 5017 address start = __ pc(); 5018 __ emit_data64(0x0000004200000041, relocInfo::none); 5019 __ emit_data64(0x0000004400000043, relocInfo::none); 5020 __ emit_data64(0x0000004600000045, relocInfo::none); 5021 __ emit_data64(0x0000004800000047, relocInfo::none); 5022 __ emit_data64(0x0000004a00000049, relocInfo::none); 5023 __ emit_data64(0x0000004c0000004b, relocInfo::none); 5024 __ emit_data64(0x0000004e0000004d, relocInfo::none); 5025 __ emit_data64(0x000000500000004f, relocInfo::none); 5026 __ emit_data64(0x0000005200000051, relocInfo::none); 5027 __ emit_data64(0x0000005400000053, relocInfo::none); 5028 __ emit_data64(0x0000005600000055, relocInfo::none); 5029 __ emit_data64(0x0000005800000057, relocInfo::none); 5030 __ emit_data64(0x0000005a00000059, relocInfo::none); 5031 __ emit_data64(0x0000006200000061, relocInfo::none); 5032 __ emit_data64(0x0000006400000063, relocInfo::none); 5033 __ emit_data64(0x0000006600000065, relocInfo::none); 5034 __ emit_data64(0x0000006800000067, relocInfo::none); 5035 __ emit_data64(0x0000006a00000069, relocInfo::none); 5036 __ emit_data64(0x0000006c0000006b, relocInfo::none); 5037 __ emit_data64(0x0000006e0000006d, relocInfo::none); 5038 __ emit_data64(0x000000700000006f, relocInfo::none); 5039 __ emit_data64(0x0000007200000071, relocInfo::none); 5040 __ emit_data64(0x0000007400000073, relocInfo::none); 5041 __ emit_data64(0x0000007600000075, relocInfo::none); 5042 __ emit_data64(0x0000007800000077, relocInfo::none); 5043 __ emit_data64(0x0000007a00000079, relocInfo::none); 5044 __ emit_data64(0x0000003100000030, relocInfo::none); 5045 __ emit_data64(0x0000003300000032, relocInfo::none); 5046 __ emit_data64(0x0000003500000034, relocInfo::none); 5047 __ emit_data64(0x0000003700000036, relocInfo::none); 5048 __ emit_data64(0x0000003900000038, relocInfo::none); 5049 __ emit_data64(0x0000005f0000002d, relocInfo::none); 5050 5051 return start; 5052 } 5053 5054 address base64_bswap_mask_addr() { 5055 __ align(CodeEntryAlignment); 5056 StubCodeMark mark(this, "StubRoutines", "bswap_mask_base64"); 5057 address start = __ pc(); 5058 __ emit_data64(0x0504038002010080, relocInfo::none); 5059 __ emit_data64(0x0b0a098008070680, relocInfo::none); 5060 __ emit_data64(0x0908078006050480, relocInfo::none); 5061 __ emit_data64(0x0f0e0d800c0b0a80, relocInfo::none); 5062 __ emit_data64(0x0605048003020180, relocInfo::none); 5063 __ emit_data64(0x0c0b0a8009080780, relocInfo::none); 5064 __ emit_data64(0x0504038002010080, relocInfo::none); 5065 __ emit_data64(0x0b0a098008070680, relocInfo::none); 5066 5067 return start; 5068 } 5069 5070 address base64_right_shift_mask_addr() { 5071 __ align(CodeEntryAlignment); 5072 StubCodeMark mark(this, "StubRoutines", "right_shift_mask"); 5073 address start = __ pc(); 5074 __ emit_data64(0x0006000400020000, relocInfo::none); 5075 __ emit_data64(0x0006000400020000, relocInfo::none); 5076 __ emit_data64(0x0006000400020000, relocInfo::none); 5077 __ emit_data64(0x0006000400020000, relocInfo::none); 5078 __ emit_data64(0x0006000400020000, relocInfo::none); 5079 __ emit_data64(0x0006000400020000, relocInfo::none); 5080 __ emit_data64(0x0006000400020000, relocInfo::none); 5081 __ emit_data64(0x0006000400020000, relocInfo::none); 5082 5083 return start; 5084 } 5085 5086 address base64_left_shift_mask_addr() { 5087 __ align(CodeEntryAlignment); 5088 StubCodeMark mark(this, "StubRoutines", "left_shift_mask"); 5089 address start = __ pc(); 5090 __ emit_data64(0x0000000200040000, relocInfo::none); 5091 __ emit_data64(0x0000000200040000, relocInfo::none); 5092 __ emit_data64(0x0000000200040000, relocInfo::none); 5093 __ emit_data64(0x0000000200040000, relocInfo::none); 5094 __ emit_data64(0x0000000200040000, relocInfo::none); 5095 __ emit_data64(0x0000000200040000, relocInfo::none); 5096 __ emit_data64(0x0000000200040000, relocInfo::none); 5097 __ emit_data64(0x0000000200040000, relocInfo::none); 5098 5099 return start; 5100 } 5101 5102 address base64_and_mask_addr() { 5103 __ align(CodeEntryAlignment); 5104 StubCodeMark mark(this, "StubRoutines", "and_mask"); 5105 address start = __ pc(); 5106 __ emit_data64(0x3f003f003f000000, relocInfo::none); 5107 __ emit_data64(0x3f003f003f000000, relocInfo::none); 5108 __ emit_data64(0x3f003f003f000000, relocInfo::none); 5109 __ emit_data64(0x3f003f003f000000, relocInfo::none); 5110 __ emit_data64(0x3f003f003f000000, relocInfo::none); 5111 __ emit_data64(0x3f003f003f000000, relocInfo::none); 5112 __ emit_data64(0x3f003f003f000000, relocInfo::none); 5113 __ emit_data64(0x3f003f003f000000, relocInfo::none); 5114 return start; 5115 } 5116 5117 address base64_gather_mask_addr() { 5118 __ align(CodeEntryAlignment); 5119 StubCodeMark mark(this, "StubRoutines", "gather_mask"); 5120 address start = __ pc(); 5121 __ emit_data64(0xffffffffffffffff, relocInfo::none); 5122 return start; 5123 } 5124 5125 // Code for generating Base64 encoding. 5126 // Intrinsic function prototype in Base64.java: 5127 // private void encodeBlock(byte[] src, int sp, int sl, byte[] dst, int dp, boolean isURL) { 5128 address generate_base64_encodeBlock() { 5129 __ align(CodeEntryAlignment); 5130 StubCodeMark mark(this, "StubRoutines", "implEncode"); 5131 address start = __ pc(); 5132 __ enter(); 5133 5134 // Save callee-saved registers before using them 5135 __ push(r12); 5136 __ push(r13); 5137 __ push(r14); 5138 __ push(r15); 5139 5140 // arguments 5141 const Register source = c_rarg0; // Source Array 5142 const Register start_offset = c_rarg1; // start offset 5143 const Register end_offset = c_rarg2; // end offset 5144 const Register dest = c_rarg3; // destination array 5145 5146 #ifndef _WIN64 5147 const Register dp = c_rarg4; // Position for writing to dest array 5148 const Register isURL = c_rarg5;// Base64 or URL character set 5149 #else 5150 const Address dp_mem(rbp, 6 * wordSize); // length is on stack on Win64 5151 const Address isURL_mem(rbp, 7 * wordSize); 5152 const Register isURL = r10; // pick the volatile windows register 5153 const Register dp = r12; 5154 __ movl(dp, dp_mem); 5155 __ movl(isURL, isURL_mem); 5156 #endif 5157 5158 const Register length = r14; 5159 Label L_process80, L_process32, L_process3, L_exit, L_processdata; 5160 5161 // calculate length from offsets 5162 __ movl(length, end_offset); 5163 __ subl(length, start_offset); 5164 __ cmpl(length, 0); 5165 __ jcc(Assembler::lessEqual, L_exit); 5166 5167 __ lea(r11, ExternalAddress(StubRoutines::x86::base64_charset_addr())); 5168 // check if base64 charset(isURL=0) or base64 url charset(isURL=1) needs to be loaded 5169 __ cmpl(isURL, 0); 5170 __ jcc(Assembler::equal, L_processdata); 5171 __ lea(r11, ExternalAddress(StubRoutines::x86::base64url_charset_addr())); 5172 5173 // load masks required for encoding data 5174 __ BIND(L_processdata); 5175 __ movdqu(xmm16, ExternalAddress(StubRoutines::x86::base64_gather_mask_addr())); 5176 // Set 64 bits of K register. 5177 __ evpcmpeqb(k3, xmm16, xmm16, Assembler::AVX_512bit); 5178 __ evmovdquq(xmm12, ExternalAddress(StubRoutines::x86::base64_bswap_mask_addr()), Assembler::AVX_256bit, r13); 5179 __ evmovdquq(xmm13, ExternalAddress(StubRoutines::x86::base64_right_shift_mask_addr()), Assembler::AVX_512bit, r13); 5180 __ evmovdquq(xmm14, ExternalAddress(StubRoutines::x86::base64_left_shift_mask_addr()), Assembler::AVX_512bit, r13); 5181 __ evmovdquq(xmm15, ExternalAddress(StubRoutines::x86::base64_and_mask_addr()), Assembler::AVX_512bit, r13); 5182 5183 // Vector Base64 implementation, producing 96 bytes of encoded data 5184 __ BIND(L_process80); 5185 __ cmpl(length, 80); 5186 __ jcc(Assembler::below, L_process32); 5187 __ evmovdquq(xmm0, Address(source, start_offset, Address::times_1, 0), Assembler::AVX_256bit); 5188 __ evmovdquq(xmm1, Address(source, start_offset, Address::times_1, 24), Assembler::AVX_256bit); 5189 __ evmovdquq(xmm2, Address(source, start_offset, Address::times_1, 48), Assembler::AVX_256bit); 5190 5191 //permute the input data in such a manner that we have continuity of the source 5192 __ vpermq(xmm3, xmm0, 148, Assembler::AVX_256bit); 5193 __ vpermq(xmm4, xmm1, 148, Assembler::AVX_256bit); 5194 __ vpermq(xmm5, xmm2, 148, Assembler::AVX_256bit); 5195 5196 //shuffle input and group 3 bytes of data and to it add 0 as the 4th byte. 5197 //we can deal with 12 bytes at a time in a 128 bit register 5198 __ vpshufb(xmm3, xmm3, xmm12, Assembler::AVX_256bit); 5199 __ vpshufb(xmm4, xmm4, xmm12, Assembler::AVX_256bit); 5200 __ vpshufb(xmm5, xmm5, xmm12, Assembler::AVX_256bit); 5201 5202 //convert byte to word. Each 128 bit register will have 6 bytes for processing 5203 __ vpmovzxbw(xmm3, xmm3, Assembler::AVX_512bit); 5204 __ vpmovzxbw(xmm4, xmm4, Assembler::AVX_512bit); 5205 __ vpmovzxbw(xmm5, xmm5, Assembler::AVX_512bit); 5206 5207 // Extract bits in the following pattern 6, 4+2, 2+4, 6 to convert 3, 8 bit numbers to 4, 6 bit numbers 5208 __ evpsrlvw(xmm0, xmm3, xmm13, Assembler::AVX_512bit); 5209 __ evpsrlvw(xmm1, xmm4, xmm13, Assembler::AVX_512bit); 5210 __ evpsrlvw(xmm2, xmm5, xmm13, Assembler::AVX_512bit); 5211 5212 __ evpsllvw(xmm3, xmm3, xmm14, Assembler::AVX_512bit); 5213 __ evpsllvw(xmm4, xmm4, xmm14, Assembler::AVX_512bit); 5214 __ evpsllvw(xmm5, xmm5, xmm14, Assembler::AVX_512bit); 5215 5216 __ vpsrlq(xmm0, xmm0, 8, Assembler::AVX_512bit); 5217 __ vpsrlq(xmm1, xmm1, 8, Assembler::AVX_512bit); 5218 __ vpsrlq(xmm2, xmm2, 8, Assembler::AVX_512bit); 5219 5220 __ vpsllq(xmm3, xmm3, 8, Assembler::AVX_512bit); 5221 __ vpsllq(xmm4, xmm4, 8, Assembler::AVX_512bit); 5222 __ vpsllq(xmm5, xmm5, 8, Assembler::AVX_512bit); 5223 5224 __ vpandq(xmm3, xmm3, xmm15, Assembler::AVX_512bit); 5225 __ vpandq(xmm4, xmm4, xmm15, Assembler::AVX_512bit); 5226 __ vpandq(xmm5, xmm5, xmm15, Assembler::AVX_512bit); 5227 5228 // Get the final 4*6 bits base64 encoding 5229 __ vporq(xmm3, xmm3, xmm0, Assembler::AVX_512bit); 5230 __ vporq(xmm4, xmm4, xmm1, Assembler::AVX_512bit); 5231 __ vporq(xmm5, xmm5, xmm2, Assembler::AVX_512bit); 5232 5233 // Shift 5234 __ vpsrlq(xmm3, xmm3, 8, Assembler::AVX_512bit); 5235 __ vpsrlq(xmm4, xmm4, 8, Assembler::AVX_512bit); 5236 __ vpsrlq(xmm5, xmm5, 8, Assembler::AVX_512bit); 5237 5238 // look up 6 bits in the base64 character set to fetch the encoding 5239 // we are converting word to dword as gather instructions need dword indices for looking up encoding 5240 __ vextracti64x4(xmm6, xmm3, 0); 5241 __ vpmovzxwd(xmm0, xmm6, Assembler::AVX_512bit); 5242 __ vextracti64x4(xmm6, xmm3, 1); 5243 __ vpmovzxwd(xmm1, xmm6, Assembler::AVX_512bit); 5244 5245 __ vextracti64x4(xmm6, xmm4, 0); 5246 __ vpmovzxwd(xmm2, xmm6, Assembler::AVX_512bit); 5247 __ vextracti64x4(xmm6, xmm4, 1); 5248 __ vpmovzxwd(xmm3, xmm6, Assembler::AVX_512bit); 5249 5250 __ vextracti64x4(xmm4, xmm5, 0); 5251 __ vpmovzxwd(xmm6, xmm4, Assembler::AVX_512bit); 5252 5253 __ vextracti64x4(xmm4, xmm5, 1); 5254 __ vpmovzxwd(xmm7, xmm4, Assembler::AVX_512bit); 5255 5256 __ kmovql(k2, k3); 5257 __ evpgatherdd(xmm4, k2, Address(r11, xmm0, Address::times_4, 0), Assembler::AVX_512bit); 5258 __ kmovql(k2, k3); 5259 __ evpgatherdd(xmm5, k2, Address(r11, xmm1, Address::times_4, 0), Assembler::AVX_512bit); 5260 __ kmovql(k2, k3); 5261 __ evpgatherdd(xmm8, k2, Address(r11, xmm2, Address::times_4, 0), Assembler::AVX_512bit); 5262 __ kmovql(k2, k3); 5263 __ evpgatherdd(xmm9, k2, Address(r11, xmm3, Address::times_4, 0), Assembler::AVX_512bit); 5264 __ kmovql(k2, k3); 5265 __ evpgatherdd(xmm10, k2, Address(r11, xmm6, Address::times_4, 0), Assembler::AVX_512bit); 5266 __ kmovql(k2, k3); 5267 __ evpgatherdd(xmm11, k2, Address(r11, xmm7, Address::times_4, 0), Assembler::AVX_512bit); 5268 5269 //Down convert dword to byte. Final output is 16*6 = 96 bytes long 5270 __ evpmovdb(Address(dest, dp, Address::times_1, 0), xmm4, Assembler::AVX_512bit); 5271 __ evpmovdb(Address(dest, dp, Address::times_1, 16), xmm5, Assembler::AVX_512bit); 5272 __ evpmovdb(Address(dest, dp, Address::times_1, 32), xmm8, Assembler::AVX_512bit); 5273 __ evpmovdb(Address(dest, dp, Address::times_1, 48), xmm9, Assembler::AVX_512bit); 5274 __ evpmovdb(Address(dest, dp, Address::times_1, 64), xmm10, Assembler::AVX_512bit); 5275 __ evpmovdb(Address(dest, dp, Address::times_1, 80), xmm11, Assembler::AVX_512bit); 5276 5277 __ addq(dest, 96); 5278 __ addq(source, 72); 5279 __ subq(length, 72); 5280 __ jmp(L_process80); 5281 5282 // Vector Base64 implementation generating 32 bytes of encoded data 5283 __ BIND(L_process32); 5284 __ cmpl(length, 32); 5285 __ jcc(Assembler::below, L_process3); 5286 __ evmovdquq(xmm0, Address(source, start_offset), Assembler::AVX_256bit); 5287 __ vpermq(xmm0, xmm0, 148, Assembler::AVX_256bit); 5288 __ vpshufb(xmm6, xmm0, xmm12, Assembler::AVX_256bit); 5289 __ vpmovzxbw(xmm6, xmm6, Assembler::AVX_512bit); 5290 __ evpsrlvw(xmm2, xmm6, xmm13, Assembler::AVX_512bit); 5291 __ evpsllvw(xmm3, xmm6, xmm14, Assembler::AVX_512bit); 5292 5293 __ vpsrlq(xmm2, xmm2, 8, Assembler::AVX_512bit); 5294 __ vpsllq(xmm3, xmm3, 8, Assembler::AVX_512bit); 5295 __ vpandq(xmm3, xmm3, xmm15, Assembler::AVX_512bit); 5296 __ vporq(xmm1, xmm2, xmm3, Assembler::AVX_512bit); 5297 __ vpsrlq(xmm1, xmm1, 8, Assembler::AVX_512bit); 5298 __ vextracti64x4(xmm9, xmm1, 0); 5299 __ vpmovzxwd(xmm6, xmm9, Assembler::AVX_512bit); 5300 __ vextracti64x4(xmm9, xmm1, 1); 5301 __ vpmovzxwd(xmm5, xmm9, Assembler::AVX_512bit); 5302 __ kmovql(k2, k3); 5303 __ evpgatherdd(xmm8, k2, Address(r11, xmm6, Address::times_4, 0), Assembler::AVX_512bit); 5304 __ kmovql(k2, k3); 5305 __ evpgatherdd(xmm10, k2, Address(r11, xmm5, Address::times_4, 0), Assembler::AVX_512bit); 5306 __ evpmovdb(Address(dest, dp, Address::times_1, 0), xmm8, Assembler::AVX_512bit); 5307 __ evpmovdb(Address(dest, dp, Address::times_1, 16), xmm10, Assembler::AVX_512bit); 5308 __ subq(length, 24); 5309 __ addq(dest, 32); 5310 __ addq(source, 24); 5311 __ jmp(L_process32); 5312 5313 // Scalar data processing takes 3 bytes at a time and produces 4 bytes of encoded data 5314 /* This code corresponds to the scalar version of the following snippet in Base64.java 5315 ** int bits = (src[sp0++] & 0xff) << 16 |(src[sp0++] & 0xff) << 8 |(src[sp0++] & 0xff); 5316 ** dst[dp0++] = (byte)base64[(bits >> > 18) & 0x3f]; 5317 ** dst[dp0++] = (byte)base64[(bits >> > 12) & 0x3f]; 5318 ** dst[dp0++] = (byte)base64[(bits >> > 6) & 0x3f]; 5319 ** dst[dp0++] = (byte)base64[bits & 0x3f];*/ 5320 __ BIND(L_process3); 5321 __ cmpl(length, 3); 5322 __ jcc(Assembler::below, L_exit); 5323 // Read 1 byte at a time 5324 __ movzbl(rax, Address(source, start_offset)); 5325 __ shll(rax, 0x10); 5326 __ movl(r15, rax); 5327 __ movzbl(rax, Address(source, start_offset, Address::times_1, 1)); 5328 __ shll(rax, 0x8); 5329 __ movzwl(rax, rax); 5330 __ orl(r15, rax); 5331 __ movzbl(rax, Address(source, start_offset, Address::times_1, 2)); 5332 __ orl(rax, r15); 5333 // Save 3 bytes read in r15 5334 __ movl(r15, rax); 5335 __ shrl(rax, 0x12); 5336 __ andl(rax, 0x3f); 5337 // rax contains the index, r11 contains base64 lookup table 5338 __ movb(rax, Address(r11, rax, Address::times_4)); 5339 // Write the encoded byte to destination 5340 __ movb(Address(dest, dp, Address::times_1, 0), rax); 5341 __ movl(rax, r15); 5342 __ shrl(rax, 0xc); 5343 __ andl(rax, 0x3f); 5344 __ movb(rax, Address(r11, rax, Address::times_4)); 5345 __ movb(Address(dest, dp, Address::times_1, 1), rax); 5346 __ movl(rax, r15); 5347 __ shrl(rax, 0x6); 5348 __ andl(rax, 0x3f); 5349 __ movb(rax, Address(r11, rax, Address::times_4)); 5350 __ movb(Address(dest, dp, Address::times_1, 2), rax); 5351 __ movl(rax, r15); 5352 __ andl(rax, 0x3f); 5353 __ movb(rax, Address(r11, rax, Address::times_4)); 5354 __ movb(Address(dest, dp, Address::times_1, 3), rax); 5355 __ subl(length, 3); 5356 __ addq(dest, 4); 5357 __ addq(source, 3); 5358 __ jmp(L_process3); 5359 __ BIND(L_exit); 5360 __ pop(r15); 5361 __ pop(r14); 5362 __ pop(r13); 5363 __ pop(r12); 5364 __ leave(); 5365 __ ret(0); 5366 return start; 5367 } 5368 5369 /** 5370 * Arguments: 5371 * 5372 * Inputs: 5373 * c_rarg0 - int crc 5374 * c_rarg1 - byte* buf 5375 * c_rarg2 - int length 5376 * 5377 * Ouput: 5378 * rax - int crc result 5379 */ 5380 address generate_updateBytesCRC32() { 5381 assert(UseCRC32Intrinsics, "need AVX and CLMUL instructions"); 5382 5383 __ align(CodeEntryAlignment); 5384 StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32"); 5385 5386 address start = __ pc(); 5387 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 5388 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 5389 // rscratch1: r10 5390 const Register crc = c_rarg0; // crc 5391 const Register buf = c_rarg1; // source java byte array address 5392 const Register len = c_rarg2; // length 5393 const Register table = c_rarg3; // crc_table address (reuse register) 5394 const Register tmp1 = r11; 5395 const Register tmp2 = r10; 5396 assert_different_registers(crc, buf, len, table, tmp1, tmp2, rax); 5397 5398 BLOCK_COMMENT("Entry:"); 5399 __ enter(); // required for proper stackwalking of RuntimeStub frame 5400 5401 if (VM_Version::supports_sse4_1() && VM_Version::supports_avx512_vpclmulqdq() && 5402 VM_Version::supports_avx512bw() && 5403 VM_Version::supports_avx512vl()) { 5404 __ kernel_crc32_avx512(crc, buf, len, table, tmp1, tmp2); 5405 } else { 5406 __ kernel_crc32(crc, buf, len, table, tmp1); 5407 } 5408 5409 __ movl(rax, crc); 5410 __ vzeroupper(); 5411 __ leave(); // required for proper stackwalking of RuntimeStub frame 5412 __ ret(0); 5413 5414 return start; 5415 } 5416 5417 /** 5418 * Arguments: 5419 * 5420 * Inputs: 5421 * c_rarg0 - int crc 5422 * c_rarg1 - byte* buf 5423 * c_rarg2 - long length 5424 * c_rarg3 - table_start - optional (present only when doing a library_call, 5425 * not used by x86 algorithm) 5426 * 5427 * Ouput: 5428 * rax - int crc result 5429 */ 5430 address generate_updateBytesCRC32C(bool is_pclmulqdq_supported) { 5431 assert(UseCRC32CIntrinsics, "need SSE4_2"); 5432 __ align(CodeEntryAlignment); 5433 StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32C"); 5434 address start = __ pc(); 5435 //reg.arg int#0 int#1 int#2 int#3 int#4 int#5 float regs 5436 //Windows RCX RDX R8 R9 none none XMM0..XMM3 5437 //Lin / Sol RDI RSI RDX RCX R8 R9 XMM0..XMM7 5438 const Register crc = c_rarg0; // crc 5439 const Register buf = c_rarg1; // source java byte array address 5440 const Register len = c_rarg2; // length 5441 const Register a = rax; 5442 const Register j = r9; 5443 const Register k = r10; 5444 const Register l = r11; 5445 #ifdef _WIN64 5446 const Register y = rdi; 5447 const Register z = rsi; 5448 #else 5449 const Register y = rcx; 5450 const Register z = r8; 5451 #endif 5452 assert_different_registers(crc, buf, len, a, j, k, l, y, z); 5453 5454 BLOCK_COMMENT("Entry:"); 5455 __ enter(); // required for proper stackwalking of RuntimeStub frame 5456 #ifdef _WIN64 5457 __ push(y); 5458 __ push(z); 5459 #endif 5460 __ crc32c_ipl_alg2_alt2(crc, buf, len, 5461 a, j, k, 5462 l, y, z, 5463 c_farg0, c_farg1, c_farg2, 5464 is_pclmulqdq_supported); 5465 __ movl(rax, crc); 5466 #ifdef _WIN64 5467 __ pop(z); 5468 __ pop(y); 5469 #endif 5470 __ vzeroupper(); 5471 __ leave(); // required for proper stackwalking of RuntimeStub frame 5472 __ ret(0); 5473 5474 return start; 5475 } 5476 5477 /** 5478 * Arguments: 5479 * 5480 * Input: 5481 * c_rarg0 - x address 5482 * c_rarg1 - x length 5483 * c_rarg2 - y address 5484 * c_rarg3 - y length 5485 * not Win64 5486 * c_rarg4 - z address 5487 * c_rarg5 - z length 5488 * Win64 5489 * rsp+40 - z address 5490 * rsp+48 - z length 5491 */ 5492 address generate_multiplyToLen() { 5493 __ align(CodeEntryAlignment); 5494 StubCodeMark mark(this, "StubRoutines", "multiplyToLen"); 5495 5496 address start = __ pc(); 5497 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 5498 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 5499 const Register x = rdi; 5500 const Register xlen = rax; 5501 const Register y = rsi; 5502 const Register ylen = rcx; 5503 const Register z = r8; 5504 const Register zlen = r11; 5505 5506 // Next registers will be saved on stack in multiply_to_len(). 5507 const Register tmp1 = r12; 5508 const Register tmp2 = r13; 5509 const Register tmp3 = r14; 5510 const Register tmp4 = r15; 5511 const Register tmp5 = rbx; 5512 5513 BLOCK_COMMENT("Entry:"); 5514 __ enter(); // required for proper stackwalking of RuntimeStub frame 5515 5516 #ifndef _WIN64 5517 __ movptr(zlen, r9); // Save r9 in r11 - zlen 5518 #endif 5519 setup_arg_regs(4); // x => rdi, xlen => rsi, y => rdx 5520 // ylen => rcx, z => r8, zlen => r11 5521 // r9 and r10 may be used to save non-volatile registers 5522 #ifdef _WIN64 5523 // last 2 arguments (#4, #5) are on stack on Win64 5524 __ movptr(z, Address(rsp, 6 * wordSize)); 5525 __ movptr(zlen, Address(rsp, 7 * wordSize)); 5526 #endif 5527 5528 __ movptr(xlen, rsi); 5529 __ movptr(y, rdx); 5530 __ multiply_to_len(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5); 5531 5532 restore_arg_regs(); 5533 5534 __ leave(); // required for proper stackwalking of RuntimeStub frame 5535 __ ret(0); 5536 5537 return start; 5538 } 5539 5540 /** 5541 * Arguments: 5542 * 5543 * Input: 5544 * c_rarg0 - obja address 5545 * c_rarg1 - objb address 5546 * c_rarg3 - length length 5547 * c_rarg4 - scale log2_array_indxscale 5548 * 5549 * Output: 5550 * rax - int >= mismatched index, < 0 bitwise complement of tail 5551 */ 5552 address generate_vectorizedMismatch() { 5553 __ align(CodeEntryAlignment); 5554 StubCodeMark mark(this, "StubRoutines", "vectorizedMismatch"); 5555 address start = __ pc(); 5556 5557 BLOCK_COMMENT("Entry:"); 5558 __ enter(); 5559 5560 #ifdef _WIN64 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 5561 const Register scale = c_rarg0; //rcx, will exchange with r9 5562 const Register objb = c_rarg1; //rdx 5563 const Register length = c_rarg2; //r8 5564 const Register obja = c_rarg3; //r9 5565 __ xchgq(obja, scale); //now obja and scale contains the correct contents 5566 5567 const Register tmp1 = r10; 5568 const Register tmp2 = r11; 5569 #endif 5570 #ifndef _WIN64 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 5571 const Register obja = c_rarg0; //U:rdi 5572 const Register objb = c_rarg1; //U:rsi 5573 const Register length = c_rarg2; //U:rdx 5574 const Register scale = c_rarg3; //U:rcx 5575 const Register tmp1 = r8; 5576 const Register tmp2 = r9; 5577 #endif 5578 const Register result = rax; //return value 5579 const XMMRegister vec0 = xmm0; 5580 const XMMRegister vec1 = xmm1; 5581 const XMMRegister vec2 = xmm2; 5582 5583 __ vectorized_mismatch(obja, objb, length, scale, result, tmp1, tmp2, vec0, vec1, vec2); 5584 5585 __ vzeroupper(); 5586 __ leave(); 5587 __ ret(0); 5588 5589 return start; 5590 } 5591 5592 /** 5593 * Arguments: 5594 * 5595 // Input: 5596 // c_rarg0 - x address 5597 // c_rarg1 - x length 5598 // c_rarg2 - z address 5599 // c_rarg3 - z lenth 5600 * 5601 */ 5602 address generate_squareToLen() { 5603 5604 __ align(CodeEntryAlignment); 5605 StubCodeMark mark(this, "StubRoutines", "squareToLen"); 5606 5607 address start = __ pc(); 5608 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 5609 // Unix: rdi, rsi, rdx, rcx (c_rarg0, c_rarg1, ...) 5610 const Register x = rdi; 5611 const Register len = rsi; 5612 const Register z = r8; 5613 const Register zlen = rcx; 5614 5615 const Register tmp1 = r12; 5616 const Register tmp2 = r13; 5617 const Register tmp3 = r14; 5618 const Register tmp4 = r15; 5619 const Register tmp5 = rbx; 5620 5621 BLOCK_COMMENT("Entry:"); 5622 __ enter(); // required for proper stackwalking of RuntimeStub frame 5623 5624 setup_arg_regs(4); // x => rdi, len => rsi, z => rdx 5625 // zlen => rcx 5626 // r9 and r10 may be used to save non-volatile registers 5627 __ movptr(r8, rdx); 5628 __ square_to_len(x, len, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5, rdx, rax); 5629 5630 restore_arg_regs(); 5631 5632 __ leave(); // required for proper stackwalking of RuntimeStub frame 5633 __ ret(0); 5634 5635 return start; 5636 } 5637 5638 address generate_method_entry_barrier() { 5639 __ align(CodeEntryAlignment); 5640 StubCodeMark mark(this, "StubRoutines", "nmethod_entry_barrier"); 5641 5642 Label deoptimize_label; 5643 5644 address start = __ pc(); 5645 5646 __ push(-1); // cookie, this is used for writing the new rsp when deoptimizing 5647 5648 BLOCK_COMMENT("Entry:"); 5649 __ enter(); // save rbp 5650 5651 // save c_rarg0, because we want to use that value. 5652 // We could do without it but then we depend on the number of slots used by pusha 5653 __ push(c_rarg0); 5654 5655 __ lea(c_rarg0, Address(rsp, wordSize * 3)); // 1 for cookie, 1 for rbp, 1 for c_rarg0 - this should be the return address 5656 5657 __ pusha(); 5658 5659 // The method may have floats as arguments, and we must spill them before calling 5660 // the VM runtime. 5661 assert(Argument::n_float_register_parameters_j == 8, "Assumption"); 5662 const int xmm_size = wordSize * 2; 5663 const int xmm_spill_size = xmm_size * Argument::n_float_register_parameters_j; 5664 __ subptr(rsp, xmm_spill_size); 5665 __ movdqu(Address(rsp, xmm_size * 7), xmm7); 5666 __ movdqu(Address(rsp, xmm_size * 6), xmm6); 5667 __ movdqu(Address(rsp, xmm_size * 5), xmm5); 5668 __ movdqu(Address(rsp, xmm_size * 4), xmm4); 5669 __ movdqu(Address(rsp, xmm_size * 3), xmm3); 5670 __ movdqu(Address(rsp, xmm_size * 2), xmm2); 5671 __ movdqu(Address(rsp, xmm_size * 1), xmm1); 5672 __ movdqu(Address(rsp, xmm_size * 0), xmm0); 5673 5674 __ call_VM_leaf(CAST_FROM_FN_PTR(address, static_cast<int (*)(address*)>(BarrierSetNMethod::nmethod_stub_entry_barrier)), 1); 5675 5676 __ movdqu(xmm0, Address(rsp, xmm_size * 0)); 5677 __ movdqu(xmm1, Address(rsp, xmm_size * 1)); 5678 __ movdqu(xmm2, Address(rsp, xmm_size * 2)); 5679 __ movdqu(xmm3, Address(rsp, xmm_size * 3)); 5680 __ movdqu(xmm4, Address(rsp, xmm_size * 4)); 5681 __ movdqu(xmm5, Address(rsp, xmm_size * 5)); 5682 __ movdqu(xmm6, Address(rsp, xmm_size * 6)); 5683 __ movdqu(xmm7, Address(rsp, xmm_size * 7)); 5684 __ addptr(rsp, xmm_spill_size); 5685 5686 __ cmpl(rax, 1); // 1 means deoptimize 5687 __ jcc(Assembler::equal, deoptimize_label); 5688 5689 __ popa(); 5690 __ pop(c_rarg0); 5691 5692 __ leave(); 5693 5694 __ addptr(rsp, 1 * wordSize); // cookie 5695 __ ret(0); 5696 5697 5698 __ BIND(deoptimize_label); 5699 5700 __ popa(); 5701 __ pop(c_rarg0); 5702 5703 __ leave(); 5704 5705 // this can be taken out, but is good for verification purposes. getting a SIGSEGV 5706 // here while still having a correct stack is valuable 5707 __ testptr(rsp, Address(rsp, 0)); 5708 5709 __ movptr(rsp, Address(rsp, 0)); // new rsp was written in the barrier 5710 __ jmp(Address(rsp, -1 * wordSize)); // jmp target should be callers verified_entry_point 5711 5712 return start; 5713 } 5714 5715 /** 5716 * Arguments: 5717 * 5718 * Input: 5719 * c_rarg0 - out address 5720 * c_rarg1 - in address 5721 * c_rarg2 - offset 5722 * c_rarg3 - len 5723 * not Win64 5724 * c_rarg4 - k 5725 * Win64 5726 * rsp+40 - k 5727 */ 5728 address generate_mulAdd() { 5729 __ align(CodeEntryAlignment); 5730 StubCodeMark mark(this, "StubRoutines", "mulAdd"); 5731 5732 address start = __ pc(); 5733 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 5734 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 5735 const Register out = rdi; 5736 const Register in = rsi; 5737 const Register offset = r11; 5738 const Register len = rcx; 5739 const Register k = r8; 5740 5741 // Next registers will be saved on stack in mul_add(). 5742 const Register tmp1 = r12; 5743 const Register tmp2 = r13; 5744 const Register tmp3 = r14; 5745 const Register tmp4 = r15; 5746 const Register tmp5 = rbx; 5747 5748 BLOCK_COMMENT("Entry:"); 5749 __ enter(); // required for proper stackwalking of RuntimeStub frame 5750 5751 setup_arg_regs(4); // out => rdi, in => rsi, offset => rdx 5752 // len => rcx, k => r8 5753 // r9 and r10 may be used to save non-volatile registers 5754 #ifdef _WIN64 5755 // last argument is on stack on Win64 5756 __ movl(k, Address(rsp, 6 * wordSize)); 5757 #endif 5758 __ movptr(r11, rdx); // move offset in rdx to offset(r11) 5759 __ mul_add(out, in, offset, len, k, tmp1, tmp2, tmp3, tmp4, tmp5, rdx, rax); 5760 5761 restore_arg_regs(); 5762 5763 __ leave(); // required for proper stackwalking of RuntimeStub frame 5764 __ ret(0); 5765 5766 return start; 5767 } 5768 5769 address generate_bigIntegerRightShift() { 5770 __ align(CodeEntryAlignment); 5771 StubCodeMark mark(this, "StubRoutines", "bigIntegerRightShiftWorker"); 5772 5773 address start = __ pc(); 5774 Label Shift512Loop, ShiftTwo, ShiftTwoLoop, ShiftOne, Exit; 5775 // For Unix, the arguments are as follows: rdi, rsi, rdx, rcx, r8. 5776 const Register newArr = rdi; 5777 const Register oldArr = rsi; 5778 const Register newIdx = rdx; 5779 const Register shiftCount = rcx; // It was intentional to have shiftCount in rcx since it is used implicitly for shift. 5780 const Register totalNumIter = r8; 5781 5782 // For windows, we use r9 and r10 as temps to save rdi and rsi. Thus we cannot allocate them for our temps. 5783 // For everything else, we prefer using r9 and r10 since we do not have to save them before use. 5784 const Register tmp1 = r11; // Caller save. 5785 const Register tmp2 = rax; // Caller save. 5786 const Register tmp3 = WINDOWS_ONLY(r12) NOT_WINDOWS(r9); // Windows: Callee save. Linux: Caller save. 5787 const Register tmp4 = WINDOWS_ONLY(r13) NOT_WINDOWS(r10); // Windows: Callee save. Linux: Caller save. 5788 const Register tmp5 = r14; // Callee save. 5789 const Register tmp6 = r15; 5790 5791 const XMMRegister x0 = xmm0; 5792 const XMMRegister x1 = xmm1; 5793 const XMMRegister x2 = xmm2; 5794 5795 BLOCK_COMMENT("Entry:"); 5796 __ enter(); // required for proper stackwalking of RuntimeStub frame 5797 5798 #ifdef _WINDOWS 5799 setup_arg_regs(4); 5800 // For windows, since last argument is on stack, we need to move it to the appropriate register. 5801 __ movl(totalNumIter, Address(rsp, 6 * wordSize)); 5802 // Save callee save registers. 5803 __ push(tmp3); 5804 __ push(tmp4); 5805 #endif 5806 __ push(tmp5); 5807 5808 // Rename temps used throughout the code. 5809 const Register idx = tmp1; 5810 const Register nIdx = tmp2; 5811 5812 __ xorl(idx, idx); 5813 5814 // Start right shift from end of the array. 5815 // For example, if #iteration = 4 and newIdx = 1 5816 // then dest[4] = src[4] >> shiftCount | src[3] <<< (shiftCount - 32) 5817 // if #iteration = 4 and newIdx = 0 5818 // then dest[3] = src[4] >> shiftCount | src[3] <<< (shiftCount - 32) 5819 __ movl(idx, totalNumIter); 5820 __ movl(nIdx, idx); 5821 __ addl(nIdx, newIdx); 5822 5823 // If vectorization is enabled, check if the number of iterations is at least 64 5824 // If not, then go to ShifTwo processing 2 iterations 5825 if (VM_Version::supports_avx512_vbmi2()) { 5826 __ cmpptr(totalNumIter, (AVX3Threshold/64)); 5827 __ jcc(Assembler::less, ShiftTwo); 5828 5829 if (AVX3Threshold < 16 * 64) { 5830 __ cmpl(totalNumIter, 16); 5831 __ jcc(Assembler::less, ShiftTwo); 5832 } 5833 __ evpbroadcastd(x0, shiftCount, Assembler::AVX_512bit); 5834 __ subl(idx, 16); 5835 __ subl(nIdx, 16); 5836 __ BIND(Shift512Loop); 5837 __ evmovdqul(x2, Address(oldArr, idx, Address::times_4, 4), Assembler::AVX_512bit); 5838 __ evmovdqul(x1, Address(oldArr, idx, Address::times_4), Assembler::AVX_512bit); 5839 __ vpshrdvd(x2, x1, x0, Assembler::AVX_512bit); 5840 __ evmovdqul(Address(newArr, nIdx, Address::times_4), x2, Assembler::AVX_512bit); 5841 __ subl(nIdx, 16); 5842 __ subl(idx, 16); 5843 __ jcc(Assembler::greaterEqual, Shift512Loop); 5844 __ addl(idx, 16); 5845 __ addl(nIdx, 16); 5846 } 5847 __ BIND(ShiftTwo); 5848 __ cmpl(idx, 2); 5849 __ jcc(Assembler::less, ShiftOne); 5850 __ subl(idx, 2); 5851 __ subl(nIdx, 2); 5852 __ BIND(ShiftTwoLoop); 5853 __ movl(tmp5, Address(oldArr, idx, Address::times_4, 8)); 5854 __ movl(tmp4, Address(oldArr, idx, Address::times_4, 4)); 5855 __ movl(tmp3, Address(oldArr, idx, Address::times_4)); 5856 __ shrdl(tmp5, tmp4); 5857 __ shrdl(tmp4, tmp3); 5858 __ movl(Address(newArr, nIdx, Address::times_4, 4), tmp5); 5859 __ movl(Address(newArr, nIdx, Address::times_4), tmp4); 5860 __ subl(nIdx, 2); 5861 __ subl(idx, 2); 5862 __ jcc(Assembler::greaterEqual, ShiftTwoLoop); 5863 __ addl(idx, 2); 5864 __ addl(nIdx, 2); 5865 5866 // Do the last iteration 5867 __ BIND(ShiftOne); 5868 __ cmpl(idx, 1); 5869 __ jcc(Assembler::less, Exit); 5870 __ subl(idx, 1); 5871 __ subl(nIdx, 1); 5872 __ movl(tmp4, Address(oldArr, idx, Address::times_4, 4)); 5873 __ movl(tmp3, Address(oldArr, idx, Address::times_4)); 5874 __ shrdl(tmp4, tmp3); 5875 __ movl(Address(newArr, nIdx, Address::times_4), tmp4); 5876 __ BIND(Exit); 5877 // Restore callee save registers. 5878 __ pop(tmp5); 5879 #ifdef _WINDOWS 5880 __ pop(tmp4); 5881 __ pop(tmp3); 5882 restore_arg_regs(); 5883 #endif 5884 __ leave(); // required for proper stackwalking of RuntimeStub frame 5885 __ ret(0); 5886 return start; 5887 } 5888 5889 /** 5890 * Arguments: 5891 * 5892 * Input: 5893 * c_rarg0 - newArr address 5894 * c_rarg1 - oldArr address 5895 * c_rarg2 - newIdx 5896 * c_rarg3 - shiftCount 5897 * not Win64 5898 * c_rarg4 - numIter 5899 * Win64 5900 * rsp40 - numIter 5901 */ 5902 address generate_bigIntegerLeftShift() { 5903 __ align(CodeEntryAlignment); 5904 StubCodeMark mark(this, "StubRoutines", "bigIntegerLeftShiftWorker"); 5905 address start = __ pc(); 5906 Label Shift512Loop, ShiftTwo, ShiftTwoLoop, ShiftOne, Exit; 5907 // For Unix, the arguments are as follows: rdi, rsi, rdx, rcx, r8. 5908 const Register newArr = rdi; 5909 const Register oldArr = rsi; 5910 const Register newIdx = rdx; 5911 const Register shiftCount = rcx; // It was intentional to have shiftCount in rcx since it is used implicitly for shift. 5912 const Register totalNumIter = r8; 5913 // For windows, we use r9 and r10 as temps to save rdi and rsi. Thus we cannot allocate them for our temps. 5914 // For everything else, we prefer using r9 and r10 since we do not have to save them before use. 5915 const Register tmp1 = r11; // Caller save. 5916 const Register tmp2 = rax; // Caller save. 5917 const Register tmp3 = WINDOWS_ONLY(r12) NOT_WINDOWS(r9); // Windows: Callee save. Linux: Caller save. 5918 const Register tmp4 = WINDOWS_ONLY(r13) NOT_WINDOWS(r10); // Windows: Callee save. Linux: Caller save. 5919 const Register tmp5 = r14; // Callee save. 5920 5921 const XMMRegister x0 = xmm0; 5922 const XMMRegister x1 = xmm1; 5923 const XMMRegister x2 = xmm2; 5924 BLOCK_COMMENT("Entry:"); 5925 __ enter(); // required for proper stackwalking of RuntimeStub frame 5926 5927 #ifdef _WINDOWS 5928 setup_arg_regs(4); 5929 // For windows, since last argument is on stack, we need to move it to the appropriate register. 5930 __ movl(totalNumIter, Address(rsp, 6 * wordSize)); 5931 // Save callee save registers. 5932 __ push(tmp3); 5933 __ push(tmp4); 5934 #endif 5935 __ push(tmp5); 5936 5937 // Rename temps used throughout the code 5938 const Register idx = tmp1; 5939 const Register numIterTmp = tmp2; 5940 5941 // Start idx from zero. 5942 __ xorl(idx, idx); 5943 // Compute interior pointer for new array. We do this so that we can use same index for both old and new arrays. 5944 __ lea(newArr, Address(newArr, newIdx, Address::times_4)); 5945 __ movl(numIterTmp, totalNumIter); 5946 5947 // If vectorization is enabled, check if the number of iterations is at least 64 5948 // If not, then go to ShiftTwo shifting two numbers at a time 5949 if (VM_Version::supports_avx512_vbmi2()) { 5950 __ cmpl(totalNumIter, (AVX3Threshold/64)); 5951 __ jcc(Assembler::less, ShiftTwo); 5952 5953 if (AVX3Threshold < 16 * 64) { 5954 __ cmpl(totalNumIter, 16); 5955 __ jcc(Assembler::less, ShiftTwo); 5956 } 5957 __ evpbroadcastd(x0, shiftCount, Assembler::AVX_512bit); 5958 __ subl(numIterTmp, 16); 5959 __ BIND(Shift512Loop); 5960 __ evmovdqul(x1, Address(oldArr, idx, Address::times_4), Assembler::AVX_512bit); 5961 __ evmovdqul(x2, Address(oldArr, idx, Address::times_4, 0x4), Assembler::AVX_512bit); 5962 __ vpshldvd(x1, x2, x0, Assembler::AVX_512bit); 5963 __ evmovdqul(Address(newArr, idx, Address::times_4), x1, Assembler::AVX_512bit); 5964 __ addl(idx, 16); 5965 __ subl(numIterTmp, 16); 5966 __ jcc(Assembler::greaterEqual, Shift512Loop); 5967 __ addl(numIterTmp, 16); 5968 } 5969 __ BIND(ShiftTwo); 5970 __ cmpl(totalNumIter, 1); 5971 __ jcc(Assembler::less, Exit); 5972 __ movl(tmp3, Address(oldArr, idx, Address::times_4)); 5973 __ subl(numIterTmp, 2); 5974 __ jcc(Assembler::less, ShiftOne); 5975 5976 __ BIND(ShiftTwoLoop); 5977 __ movl(tmp4, Address(oldArr, idx, Address::times_4, 0x4)); 5978 __ movl(tmp5, Address(oldArr, idx, Address::times_4, 0x8)); 5979 __ shldl(tmp3, tmp4); 5980 __ shldl(tmp4, tmp5); 5981 __ movl(Address(newArr, idx, Address::times_4), tmp3); 5982 __ movl(Address(newArr, idx, Address::times_4, 0x4), tmp4); 5983 __ movl(tmp3, tmp5); 5984 __ addl(idx, 2); 5985 __ subl(numIterTmp, 2); 5986 __ jcc(Assembler::greaterEqual, ShiftTwoLoop); 5987 5988 // Do the last iteration 5989 __ BIND(ShiftOne); 5990 __ addl(numIterTmp, 2); 5991 __ cmpl(numIterTmp, 1); 5992 __ jcc(Assembler::less, Exit); 5993 __ movl(tmp4, Address(oldArr, idx, Address::times_4, 0x4)); 5994 __ shldl(tmp3, tmp4); 5995 __ movl(Address(newArr, idx, Address::times_4), tmp3); 5996 5997 __ BIND(Exit); 5998 // Restore callee save registers. 5999 __ pop(tmp5); 6000 #ifdef _WINDOWS 6001 __ pop(tmp4); 6002 __ pop(tmp3); 6003 restore_arg_regs(); 6004 #endif 6005 __ leave(); // required for proper stackwalking of RuntimeStub frame 6006 __ ret(0); 6007 return start; 6008 } 6009 6010 address generate_libmExp() { 6011 StubCodeMark mark(this, "StubRoutines", "libmExp"); 6012 6013 address start = __ pc(); 6014 6015 const XMMRegister x0 = xmm0; 6016 const XMMRegister x1 = xmm1; 6017 const XMMRegister x2 = xmm2; 6018 const XMMRegister x3 = xmm3; 6019 6020 const XMMRegister x4 = xmm4; 6021 const XMMRegister x5 = xmm5; 6022 const XMMRegister x6 = xmm6; 6023 const XMMRegister x7 = xmm7; 6024 6025 const Register tmp = r11; 6026 6027 BLOCK_COMMENT("Entry:"); 6028 __ enter(); // required for proper stackwalking of RuntimeStub frame 6029 6030 __ fast_exp(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp); 6031 6032 __ leave(); // required for proper stackwalking of RuntimeStub frame 6033 __ ret(0); 6034 6035 return start; 6036 6037 } 6038 6039 address generate_libmLog() { 6040 StubCodeMark mark(this, "StubRoutines", "libmLog"); 6041 6042 address start = __ pc(); 6043 6044 const XMMRegister x0 = xmm0; 6045 const XMMRegister x1 = xmm1; 6046 const XMMRegister x2 = xmm2; 6047 const XMMRegister x3 = xmm3; 6048 6049 const XMMRegister x4 = xmm4; 6050 const XMMRegister x5 = xmm5; 6051 const XMMRegister x6 = xmm6; 6052 const XMMRegister x7 = xmm7; 6053 6054 const Register tmp1 = r11; 6055 const Register tmp2 = r8; 6056 6057 BLOCK_COMMENT("Entry:"); 6058 __ enter(); // required for proper stackwalking of RuntimeStub frame 6059 6060 __ fast_log(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2); 6061 6062 __ leave(); // required for proper stackwalking of RuntimeStub frame 6063 __ ret(0); 6064 6065 return start; 6066 6067 } 6068 6069 address generate_libmLog10() { 6070 StubCodeMark mark(this, "StubRoutines", "libmLog10"); 6071 6072 address start = __ pc(); 6073 6074 const XMMRegister x0 = xmm0; 6075 const XMMRegister x1 = xmm1; 6076 const XMMRegister x2 = xmm2; 6077 const XMMRegister x3 = xmm3; 6078 6079 const XMMRegister x4 = xmm4; 6080 const XMMRegister x5 = xmm5; 6081 const XMMRegister x6 = xmm6; 6082 const XMMRegister x7 = xmm7; 6083 6084 const Register tmp = r11; 6085 6086 BLOCK_COMMENT("Entry:"); 6087 __ enter(); // required for proper stackwalking of RuntimeStub frame 6088 6089 __ fast_log10(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp); 6090 6091 __ leave(); // required for proper stackwalking of RuntimeStub frame 6092 __ ret(0); 6093 6094 return start; 6095 6096 } 6097 6098 address generate_libmPow() { 6099 StubCodeMark mark(this, "StubRoutines", "libmPow"); 6100 6101 address start = __ pc(); 6102 6103 const XMMRegister x0 = xmm0; 6104 const XMMRegister x1 = xmm1; 6105 const XMMRegister x2 = xmm2; 6106 const XMMRegister x3 = xmm3; 6107 6108 const XMMRegister x4 = xmm4; 6109 const XMMRegister x5 = xmm5; 6110 const XMMRegister x6 = xmm6; 6111 const XMMRegister x7 = xmm7; 6112 6113 const Register tmp1 = r8; 6114 const Register tmp2 = r9; 6115 const Register tmp3 = r10; 6116 const Register tmp4 = r11; 6117 6118 BLOCK_COMMENT("Entry:"); 6119 __ enter(); // required for proper stackwalking of RuntimeStub frame 6120 6121 __ fast_pow(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 6122 6123 __ leave(); // required for proper stackwalking of RuntimeStub frame 6124 __ ret(0); 6125 6126 return start; 6127 6128 } 6129 6130 address generate_libmSin() { 6131 StubCodeMark mark(this, "StubRoutines", "libmSin"); 6132 6133 address start = __ pc(); 6134 6135 const XMMRegister x0 = xmm0; 6136 const XMMRegister x1 = xmm1; 6137 const XMMRegister x2 = xmm2; 6138 const XMMRegister x3 = xmm3; 6139 6140 const XMMRegister x4 = xmm4; 6141 const XMMRegister x5 = xmm5; 6142 const XMMRegister x6 = xmm6; 6143 const XMMRegister x7 = xmm7; 6144 6145 const Register tmp1 = r8; 6146 const Register tmp2 = r9; 6147 const Register tmp3 = r10; 6148 const Register tmp4 = r11; 6149 6150 BLOCK_COMMENT("Entry:"); 6151 __ enter(); // required for proper stackwalking of RuntimeStub frame 6152 6153 #ifdef _WIN64 6154 __ push(rsi); 6155 __ push(rdi); 6156 #endif 6157 __ fast_sin(x0, x1, x2, x3, x4, x5, x6, x7, rax, rbx, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 6158 6159 #ifdef _WIN64 6160 __ pop(rdi); 6161 __ pop(rsi); 6162 #endif 6163 6164 __ leave(); // required for proper stackwalking of RuntimeStub frame 6165 __ ret(0); 6166 6167 return start; 6168 6169 } 6170 6171 address generate_libmCos() { 6172 StubCodeMark mark(this, "StubRoutines", "libmCos"); 6173 6174 address start = __ pc(); 6175 6176 const XMMRegister x0 = xmm0; 6177 const XMMRegister x1 = xmm1; 6178 const XMMRegister x2 = xmm2; 6179 const XMMRegister x3 = xmm3; 6180 6181 const XMMRegister x4 = xmm4; 6182 const XMMRegister x5 = xmm5; 6183 const XMMRegister x6 = xmm6; 6184 const XMMRegister x7 = xmm7; 6185 6186 const Register tmp1 = r8; 6187 const Register tmp2 = r9; 6188 const Register tmp3 = r10; 6189 const Register tmp4 = r11; 6190 6191 BLOCK_COMMENT("Entry:"); 6192 __ enter(); // required for proper stackwalking of RuntimeStub frame 6193 6194 #ifdef _WIN64 6195 __ push(rsi); 6196 __ push(rdi); 6197 #endif 6198 __ fast_cos(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 6199 6200 #ifdef _WIN64 6201 __ pop(rdi); 6202 __ pop(rsi); 6203 #endif 6204 6205 __ leave(); // required for proper stackwalking of RuntimeStub frame 6206 __ ret(0); 6207 6208 return start; 6209 6210 } 6211 6212 address generate_libmTan() { 6213 StubCodeMark mark(this, "StubRoutines", "libmTan"); 6214 6215 address start = __ pc(); 6216 6217 const XMMRegister x0 = xmm0; 6218 const XMMRegister x1 = xmm1; 6219 const XMMRegister x2 = xmm2; 6220 const XMMRegister x3 = xmm3; 6221 6222 const XMMRegister x4 = xmm4; 6223 const XMMRegister x5 = xmm5; 6224 const XMMRegister x6 = xmm6; 6225 const XMMRegister x7 = xmm7; 6226 6227 const Register tmp1 = r8; 6228 const Register tmp2 = r9; 6229 const Register tmp3 = r10; 6230 const Register tmp4 = r11; 6231 6232 BLOCK_COMMENT("Entry:"); 6233 __ enter(); // required for proper stackwalking of RuntimeStub frame 6234 6235 #ifdef _WIN64 6236 __ push(rsi); 6237 __ push(rdi); 6238 #endif 6239 __ fast_tan(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 6240 6241 #ifdef _WIN64 6242 __ pop(rdi); 6243 __ pop(rsi); 6244 #endif 6245 6246 __ leave(); // required for proper stackwalking of RuntimeStub frame 6247 __ ret(0); 6248 6249 return start; 6250 6251 } 6252 6253 #undef __ 6254 #define __ masm-> 6255 6256 // Continuation point for throwing of implicit exceptions that are 6257 // not handled in the current activation. Fabricates an exception 6258 // oop and initiates normal exception dispatching in this 6259 // frame. Since we need to preserve callee-saved values (currently 6260 // only for C2, but done for C1 as well) we need a callee-saved oop 6261 // map and therefore have to make these stubs into RuntimeStubs 6262 // rather than BufferBlobs. If the compiler needs all registers to 6263 // be preserved between the fault point and the exception handler 6264 // then it must assume responsibility for that in 6265 // AbstractCompiler::continuation_for_implicit_null_exception or 6266 // continuation_for_implicit_division_by_zero_exception. All other 6267 // implicit exceptions (e.g., NullPointerException or 6268 // AbstractMethodError on entry) are either at call sites or 6269 // otherwise assume that stack unwinding will be initiated, so 6270 // caller saved registers were assumed volatile in the compiler. 6271 address generate_throw_exception(const char* name, 6272 address runtime_entry, 6273 Register arg1 = noreg, 6274 Register arg2 = noreg) { 6275 // Information about frame layout at time of blocking runtime call. 6276 // Note that we only have to preserve callee-saved registers since 6277 // the compilers are responsible for supplying a continuation point 6278 // if they expect all registers to be preserved. 6279 enum layout { 6280 rbp_off = frame::arg_reg_save_area_bytes/BytesPerInt, 6281 rbp_off2, 6282 return_off, 6283 return_off2, 6284 framesize // inclusive of return address 6285 }; 6286 6287 int insts_size = 512; 6288 int locs_size = 64; 6289 6290 CodeBuffer code(name, insts_size, locs_size); 6291 OopMapSet* oop_maps = new OopMapSet(); 6292 MacroAssembler* masm = new MacroAssembler(&code); 6293 6294 address start = __ pc(); 6295 6296 // This is an inlined and slightly modified version of call_VM 6297 // which has the ability to fetch the return PC out of 6298 // thread-local storage and also sets up last_Java_sp slightly 6299 // differently than the real call_VM 6300 6301 __ enter(); // required for proper stackwalking of RuntimeStub frame 6302 6303 assert(is_even(framesize/2), "sp not 16-byte aligned"); 6304 6305 // return address and rbp are already in place 6306 __ subptr(rsp, (framesize-4) << LogBytesPerInt); // prolog 6307 6308 int frame_complete = __ pc() - start; 6309 6310 // Set up last_Java_sp and last_Java_fp 6311 address the_pc = __ pc(); 6312 __ set_last_Java_frame(rsp, rbp, the_pc); 6313 __ andptr(rsp, -(StackAlignmentInBytes)); // Align stack 6314 6315 // Call runtime 6316 if (arg1 != noreg) { 6317 assert(arg2 != c_rarg1, "clobbered"); 6318 __ movptr(c_rarg1, arg1); 6319 } 6320 if (arg2 != noreg) { 6321 __ movptr(c_rarg2, arg2); 6322 } 6323 __ movptr(c_rarg0, r15_thread); 6324 BLOCK_COMMENT("call runtime_entry"); 6325 __ call(RuntimeAddress(runtime_entry)); 6326 6327 // Generate oop map 6328 OopMap* map = new OopMap(framesize, 0); 6329 6330 oop_maps->add_gc_map(the_pc - start, map); 6331 6332 __ reset_last_Java_frame(true); 6333 6334 __ leave(); // required for proper stackwalking of RuntimeStub frame 6335 6336 // check for pending exceptions 6337 #ifdef ASSERT 6338 Label L; 6339 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), 6340 (int32_t) NULL_WORD); 6341 __ jcc(Assembler::notEqual, L); 6342 __ should_not_reach_here(); 6343 __ bind(L); 6344 #endif // ASSERT 6345 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 6346 6347 6348 // codeBlob framesize is in words (not VMRegImpl::slot_size) 6349 RuntimeStub* stub = 6350 RuntimeStub::new_runtime_stub(name, 6351 &code, 6352 frame_complete, 6353 (framesize >> (LogBytesPerWord - LogBytesPerInt)), 6354 oop_maps, false); 6355 return stub->entry_point(); 6356 } 6357 6358 void create_control_words() { 6359 // Round to nearest, 53-bit mode, exceptions masked 6360 StubRoutines::_fpu_cntrl_wrd_std = 0x027F; 6361 // Round to zero, 53-bit mode, exception mased 6362 StubRoutines::_fpu_cntrl_wrd_trunc = 0x0D7F; 6363 // Round to nearest, 24-bit mode, exceptions masked 6364 StubRoutines::_fpu_cntrl_wrd_24 = 0x007F; 6365 // Round to nearest, 64-bit mode, exceptions masked 6366 StubRoutines::_mxcsr_std = 0x1F80; 6367 // Note: the following two constants are 80-bit values 6368 // layout is critical for correct loading by FPU. 6369 // Bias for strict fp multiply/divide 6370 StubRoutines::_fpu_subnormal_bias1[0]= 0x00000000; // 2^(-15360) == 0x03ff 8000 0000 0000 0000 6371 StubRoutines::_fpu_subnormal_bias1[1]= 0x80000000; 6372 StubRoutines::_fpu_subnormal_bias1[2]= 0x03ff; 6373 // Un-Bias for strict fp multiply/divide 6374 StubRoutines::_fpu_subnormal_bias2[0]= 0x00000000; // 2^(+15360) == 0x7bff 8000 0000 0000 0000 6375 StubRoutines::_fpu_subnormal_bias2[1]= 0x80000000; 6376 StubRoutines::_fpu_subnormal_bias2[2]= 0x7bff; 6377 } 6378 6379 // Initialization 6380 void generate_initial() { 6381 // Generates all stubs and initializes the entry points 6382 6383 // This platform-specific settings are needed by generate_call_stub() 6384 create_control_words(); 6385 6386 // entry points that exist in all platforms Note: This is code 6387 // that could be shared among different platforms - however the 6388 // benefit seems to be smaller than the disadvantage of having a 6389 // much more complicated generator structure. See also comment in 6390 // stubRoutines.hpp. 6391 6392 StubRoutines::_forward_exception_entry = generate_forward_exception(); 6393 6394 StubRoutines::_call_stub_entry = 6395 generate_call_stub(StubRoutines::_call_stub_return_address); 6396 6397 // is referenced by megamorphic call 6398 StubRoutines::_catch_exception_entry = generate_catch_exception(); 6399 6400 // atomic calls 6401 StubRoutines::_atomic_xchg_entry = generate_atomic_xchg(); 6402 StubRoutines::_atomic_xchg_long_entry = generate_atomic_xchg_long(); 6403 StubRoutines::_atomic_cmpxchg_entry = generate_atomic_cmpxchg(); 6404 StubRoutines::_atomic_cmpxchg_byte_entry = generate_atomic_cmpxchg_byte(); 6405 StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long(); 6406 StubRoutines::_atomic_add_entry = generate_atomic_add(); 6407 StubRoutines::_atomic_add_long_entry = generate_atomic_add_long(); 6408 StubRoutines::_fence_entry = generate_orderaccess_fence(); 6409 6410 // platform dependent 6411 StubRoutines::x86::_get_previous_fp_entry = generate_get_previous_fp(); 6412 StubRoutines::x86::_get_previous_sp_entry = generate_get_previous_sp(); 6413 6414 StubRoutines::x86::_verify_mxcsr_entry = generate_verify_mxcsr(); 6415 6416 StubRoutines::x86::_f2i_fixup = generate_f2i_fixup(); 6417 StubRoutines::x86::_f2l_fixup = generate_f2l_fixup(); 6418 StubRoutines::x86::_d2i_fixup = generate_d2i_fixup(); 6419 StubRoutines::x86::_d2l_fixup = generate_d2l_fixup(); 6420 6421 StubRoutines::x86::_float_sign_mask = generate_fp_mask("float_sign_mask", 0x7FFFFFFF7FFFFFFF); 6422 StubRoutines::x86::_float_sign_flip = generate_fp_mask("float_sign_flip", 0x8000000080000000); 6423 StubRoutines::x86::_double_sign_mask = generate_fp_mask("double_sign_mask", 0x7FFFFFFFFFFFFFFF); 6424 StubRoutines::x86::_double_sign_flip = generate_fp_mask("double_sign_flip", 0x8000000000000000); 6425 6426 // Build this early so it's available for the interpreter. 6427 StubRoutines::_throw_StackOverflowError_entry = 6428 generate_throw_exception("StackOverflowError throw_exception", 6429 CAST_FROM_FN_PTR(address, 6430 SharedRuntime:: 6431 throw_StackOverflowError)); 6432 StubRoutines::_throw_delayed_StackOverflowError_entry = 6433 generate_throw_exception("delayed StackOverflowError throw_exception", 6434 CAST_FROM_FN_PTR(address, 6435 SharedRuntime:: 6436 throw_delayed_StackOverflowError)); 6437 if (UseCRC32Intrinsics) { 6438 // set table address before stub generation which use it 6439 StubRoutines::_crc_table_adr = (address)StubRoutines::x86::_crc_table; 6440 StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32(); 6441 } 6442 6443 if (UseCRC32CIntrinsics) { 6444 bool supports_clmul = VM_Version::supports_clmul(); 6445 StubRoutines::x86::generate_CRC32C_table(supports_clmul); 6446 StubRoutines::_crc32c_table_addr = (address)StubRoutines::x86::_crc32c_table; 6447 StubRoutines::_updateBytesCRC32C = generate_updateBytesCRC32C(supports_clmul); 6448 } 6449 if (UseLibmIntrinsic && InlineIntrinsics) { 6450 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin) || 6451 vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos) || 6452 vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dtan)) { 6453 StubRoutines::x86::_ONEHALF_adr = (address)StubRoutines::x86::_ONEHALF; 6454 StubRoutines::x86::_P_2_adr = (address)StubRoutines::x86::_P_2; 6455 StubRoutines::x86::_SC_4_adr = (address)StubRoutines::x86::_SC_4; 6456 StubRoutines::x86::_Ctable_adr = (address)StubRoutines::x86::_Ctable; 6457 StubRoutines::x86::_SC_2_adr = (address)StubRoutines::x86::_SC_2; 6458 StubRoutines::x86::_SC_3_adr = (address)StubRoutines::x86::_SC_3; 6459 StubRoutines::x86::_SC_1_adr = (address)StubRoutines::x86::_SC_1; 6460 StubRoutines::x86::_PI_INV_TABLE_adr = (address)StubRoutines::x86::_PI_INV_TABLE; 6461 StubRoutines::x86::_PI_4_adr = (address)StubRoutines::x86::_PI_4; 6462 StubRoutines::x86::_PI32INV_adr = (address)StubRoutines::x86::_PI32INV; 6463 StubRoutines::x86::_SIGN_MASK_adr = (address)StubRoutines::x86::_SIGN_MASK; 6464 StubRoutines::x86::_P_1_adr = (address)StubRoutines::x86::_P_1; 6465 StubRoutines::x86::_P_3_adr = (address)StubRoutines::x86::_P_3; 6466 StubRoutines::x86::_NEG_ZERO_adr = (address)StubRoutines::x86::_NEG_ZERO; 6467 } 6468 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dexp)) { 6469 StubRoutines::_dexp = generate_libmExp(); 6470 } 6471 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dlog)) { 6472 StubRoutines::_dlog = generate_libmLog(); 6473 } 6474 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dlog10)) { 6475 StubRoutines::_dlog10 = generate_libmLog10(); 6476 } 6477 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dpow)) { 6478 StubRoutines::_dpow = generate_libmPow(); 6479 } 6480 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin)) { 6481 StubRoutines::_dsin = generate_libmSin(); 6482 } 6483 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos)) { 6484 StubRoutines::_dcos = generate_libmCos(); 6485 } 6486 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dtan)) { 6487 StubRoutines::_dtan = generate_libmTan(); 6488 } 6489 } 6490 6491 // Safefetch stubs. 6492 generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry, 6493 &StubRoutines::_safefetch32_fault_pc, 6494 &StubRoutines::_safefetch32_continuation_pc); 6495 generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry, 6496 &StubRoutines::_safefetchN_fault_pc, 6497 &StubRoutines::_safefetchN_continuation_pc); 6498 } 6499 6500 void generate_all() { 6501 // Generates all stubs and initializes the entry points 6502 6503 // These entry points require SharedInfo::stack0 to be set up in 6504 // non-core builds and need to be relocatable, so they each 6505 // fabricate a RuntimeStub internally. 6506 StubRoutines::_throw_AbstractMethodError_entry = 6507 generate_throw_exception("AbstractMethodError throw_exception", 6508 CAST_FROM_FN_PTR(address, 6509 SharedRuntime:: 6510 throw_AbstractMethodError)); 6511 6512 StubRoutines::_throw_IncompatibleClassChangeError_entry = 6513 generate_throw_exception("IncompatibleClassChangeError throw_exception", 6514 CAST_FROM_FN_PTR(address, 6515 SharedRuntime:: 6516 throw_IncompatibleClassChangeError)); 6517 6518 StubRoutines::_throw_NullPointerException_at_call_entry = 6519 generate_throw_exception("NullPointerException at call throw_exception", 6520 CAST_FROM_FN_PTR(address, 6521 SharedRuntime:: 6522 throw_NullPointerException_at_call)); 6523 6524 // entry points that are platform specific 6525 StubRoutines::x86::_vector_float_sign_mask = generate_vector_mask("vector_float_sign_mask", 0x7FFFFFFF7FFFFFFF); 6526 StubRoutines::x86::_vector_float_sign_flip = generate_vector_mask("vector_float_sign_flip", 0x8000000080000000); 6527 StubRoutines::x86::_vector_double_sign_mask = generate_vector_mask("vector_double_sign_mask", 0x7FFFFFFFFFFFFFFF); 6528 StubRoutines::x86::_vector_double_sign_flip = generate_vector_mask("vector_double_sign_flip", 0x8000000000000000); 6529 StubRoutines::x86::_vector_all_bits_set = generate_vector_mask("vector_all_bits_set", 0xFFFFFFFFFFFFFFFF); 6530 StubRoutines::x86::_vector_short_to_byte_mask = generate_vector_mask("vector_short_to_byte_mask", 0x00ff00ff00ff00ff); 6531 StubRoutines::x86::_vector_byte_perm_mask = generate_vector_byte_perm_mask("vector_byte_perm_mask"); 6532 StubRoutines::x86::_vector_int_to_byte_mask = generate_vector_mask("vector_int_to_byte_mask", 0x000000ff000000ff); 6533 StubRoutines::x86::_vector_int_to_short_mask = generate_vector_mask("vector_int_to_short_mask", 0x0000ffff0000ffff); 6534 StubRoutines::x86::_vector_32_bit_mask = generate_vector_custom_i32("vector_32_bit_mask", Assembler::AVX_512bit, 6535 0xFFFFFFFF, 0, 0, 0); 6536 StubRoutines::x86::_vector_64_bit_mask = generate_vector_custom_i32("vector_64_bit_mask", Assembler::AVX_512bit, 6537 0xFFFFFFFF, 0xFFFFFFFF, 0, 0); 6538 StubRoutines::x86::_vector_int_shuffle_mask = generate_vector_mask("vector_int_shuffle_mask", 0x0302010003020100); 6539 StubRoutines::x86::_vector_short_shuffle_mask = generate_vector_mask("vector_short_shuffle_mask", 0x0100010001000100); 6540 StubRoutines::x86::_vector_long_shuffle_mask = generate_vector_mask("vector_long_shuffle_mask", 0x0000000100000000); 6541 StubRoutines::x86::_vector_long_sign_mask = generate_vector_mask("vector_long_sign_mask", 0x8000000000000000); 6542 StubRoutines::x86::_vector_iota_indices = generate_iota_indices("iota_indices"); 6543 6544 // support for verify_oop (must happen after universe_init) 6545 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop(); 6546 6547 // data cache line writeback 6548 StubRoutines::_data_cache_writeback = generate_data_cache_writeback(); 6549 StubRoutines::_data_cache_writeback_sync = generate_data_cache_writeback_sync(); 6550 6551 // arraycopy stubs used by compilers 6552 generate_arraycopy_stubs(); 6553 6554 // don't bother generating these AES intrinsic stubs unless global flag is set 6555 if (UseAESIntrinsics) { 6556 StubRoutines::x86::_key_shuffle_mask_addr = generate_key_shuffle_mask(); // needed by the others 6557 StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock(); 6558 StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock(); 6559 StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt(); 6560 if (VM_Version::supports_avx512_vaes() && VM_Version::supports_avx512vl() && VM_Version::supports_avx512dq() ) { 6561 StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptVectorAESCrypt(); 6562 StubRoutines::_electronicCodeBook_encryptAESCrypt = generate_electronicCodeBook_encryptAESCrypt(); 6563 StubRoutines::_electronicCodeBook_decryptAESCrypt = generate_electronicCodeBook_decryptAESCrypt(); 6564 } else { 6565 StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt_Parallel(); 6566 } 6567 } 6568 if (UseAESCTRIntrinsics) { 6569 if (VM_Version::supports_avx512_vaes() && VM_Version::supports_avx512bw() && VM_Version::supports_avx512vl()) { 6570 StubRoutines::x86::_counter_mask_addr = counter_mask_addr(); 6571 StubRoutines::_counterMode_AESCrypt = generate_counterMode_VectorAESCrypt(); 6572 } else { 6573 StubRoutines::x86::_counter_shuffle_mask_addr = generate_counter_shuffle_mask(); 6574 StubRoutines::_counterMode_AESCrypt = generate_counterMode_AESCrypt_Parallel(); 6575 } 6576 } 6577 6578 if (UseSHA1Intrinsics) { 6579 StubRoutines::x86::_upper_word_mask_addr = generate_upper_word_mask(); 6580 StubRoutines::x86::_shuffle_byte_flip_mask_addr = generate_shuffle_byte_flip_mask(); 6581 StubRoutines::_sha1_implCompress = generate_sha1_implCompress(false, "sha1_implCompress"); 6582 StubRoutines::_sha1_implCompressMB = generate_sha1_implCompress(true, "sha1_implCompressMB"); 6583 } 6584 if (UseSHA256Intrinsics) { 6585 StubRoutines::x86::_k256_adr = (address)StubRoutines::x86::_k256; 6586 char* dst = (char*)StubRoutines::x86::_k256_W; 6587 char* src = (char*)StubRoutines::x86::_k256; 6588 for (int ii = 0; ii < 16; ++ii) { 6589 memcpy(dst + 32 * ii, src + 16 * ii, 16); 6590 memcpy(dst + 32 * ii + 16, src + 16 * ii, 16); 6591 } 6592 StubRoutines::x86::_k256_W_adr = (address)StubRoutines::x86::_k256_W; 6593 StubRoutines::x86::_pshuffle_byte_flip_mask_addr = generate_pshuffle_byte_flip_mask(); 6594 StubRoutines::_sha256_implCompress = generate_sha256_implCompress(false, "sha256_implCompress"); 6595 StubRoutines::_sha256_implCompressMB = generate_sha256_implCompress(true, "sha256_implCompressMB"); 6596 } 6597 if (UseSHA512Intrinsics) { 6598 StubRoutines::x86::_k512_W_addr = (address)StubRoutines::x86::_k512_W; 6599 StubRoutines::x86::_pshuffle_byte_flip_mask_addr_sha512 = generate_pshuffle_byte_flip_mask_sha512(); 6600 StubRoutines::_sha512_implCompress = generate_sha512_implCompress(false, "sha512_implCompress"); 6601 StubRoutines::_sha512_implCompressMB = generate_sha512_implCompress(true, "sha512_implCompressMB"); 6602 } 6603 6604 // Generate GHASH intrinsics code 6605 if (UseGHASHIntrinsics) { 6606 StubRoutines::x86::_ghash_long_swap_mask_addr = generate_ghash_long_swap_mask(); 6607 StubRoutines::x86::_ghash_byte_swap_mask_addr = generate_ghash_byte_swap_mask(); 6608 if (VM_Version::supports_avx()) { 6609 StubRoutines::x86::_ghash_shuffmask_addr = ghash_shufflemask_addr(); 6610 StubRoutines::x86::_ghash_poly_addr = ghash_polynomial_addr(); 6611 StubRoutines::_ghash_processBlocks = generate_avx_ghash_processBlocks(); 6612 } else { 6613 StubRoutines::_ghash_processBlocks = generate_ghash_processBlocks(); 6614 } 6615 } 6616 6617 if (UseBASE64Intrinsics) { 6618 StubRoutines::x86::_and_mask = base64_and_mask_addr(); 6619 StubRoutines::x86::_bswap_mask = base64_bswap_mask_addr(); 6620 StubRoutines::x86::_base64_charset = base64_charset_addr(); 6621 StubRoutines::x86::_url_charset = base64url_charset_addr(); 6622 StubRoutines::x86::_gather_mask = base64_gather_mask_addr(); 6623 StubRoutines::x86::_left_shift_mask = base64_left_shift_mask_addr(); 6624 StubRoutines::x86::_right_shift_mask = base64_right_shift_mask_addr(); 6625 StubRoutines::_base64_encodeBlock = generate_base64_encodeBlock(); 6626 } 6627 6628 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod(); 6629 if (bs_nm != NULL) { 6630 StubRoutines::x86::_method_entry_barrier = generate_method_entry_barrier(); 6631 } 6632 #ifdef COMPILER2 6633 if (UseMultiplyToLenIntrinsic) { 6634 StubRoutines::_multiplyToLen = generate_multiplyToLen(); 6635 } 6636 if (UseSquareToLenIntrinsic) { 6637 StubRoutines::_squareToLen = generate_squareToLen(); 6638 } 6639 if (UseMulAddIntrinsic) { 6640 StubRoutines::_mulAdd = generate_mulAdd(); 6641 } 6642 if (VM_Version::supports_avx512_vbmi2()) { 6643 StubRoutines::_bigIntegerRightShiftWorker = generate_bigIntegerRightShift(); 6644 StubRoutines::_bigIntegerLeftShiftWorker = generate_bigIntegerLeftShift(); 6645 } 6646 if (UseMontgomeryMultiplyIntrinsic) { 6647 StubRoutines::_montgomeryMultiply 6648 = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_multiply); 6649 } 6650 if (UseMontgomerySquareIntrinsic) { 6651 StubRoutines::_montgomerySquare 6652 = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_square); 6653 } 6654 #endif // COMPILER2 6655 6656 if (UseVectorizedMismatchIntrinsic) { 6657 StubRoutines::_vectorizedMismatch = generate_vectorizedMismatch(); 6658 } 6659 } 6660 6661 public: 6662 StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) { 6663 if (all) { 6664 generate_all(); 6665 } else { 6666 generate_initial(); 6667 } 6668 } 6669 }; // end class declaration 6670 6671 #define UCM_TABLE_MAX_ENTRIES 16 6672 void StubGenerator_generate(CodeBuffer* code, bool all) { 6673 if (UnsafeCopyMemory::_table == NULL) { 6674 UnsafeCopyMemory::create_table(UCM_TABLE_MAX_ENTRIES); 6675 } 6676 StubGenerator g(code, all); 6677 }