1 /* 2 * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "ci/ciUtilities.hpp" 29 #include "gc/shared/barrierSet.hpp" 30 #include "gc/shared/barrierSetAssembler.hpp" 31 #include "interpreter/interpreter.hpp" 32 #include "nativeInst_x86.hpp" 33 #include "oops/instanceOop.hpp" 34 #include "oops/method.hpp" 35 #include "oops/objArrayKlass.hpp" 36 #include "oops/oop.inline.hpp" 37 #include "prims/methodHandles.hpp" 38 #include "runtime/frame.inline.hpp" 39 #include "runtime/handles.inline.hpp" 40 #include "runtime/sharedRuntime.hpp" 41 #include "runtime/stubCodeGenerator.hpp" 42 #include "runtime/stubRoutines.hpp" 43 #include "runtime/thread.inline.hpp" 44 #ifdef COMPILER2 45 #include "opto/runtime.hpp" 46 #endif 47 #if INCLUDE_ZGC 48 #include "gc/z/zThreadLocalData.hpp" 49 #endif 50 51 // Declaration and definition of StubGenerator (no .hpp file). 52 // For a more detailed description of the stub routine structure 53 // see the comment in stubRoutines.hpp 54 55 #define __ _masm-> 56 #define TIMES_OOP (UseCompressedOops ? Address::times_4 : Address::times_8) 57 #define a__ ((Assembler*)_masm)-> 58 59 #ifdef PRODUCT 60 #define BLOCK_COMMENT(str) /* nothing */ 61 #else 62 #define BLOCK_COMMENT(str) __ block_comment(str) 63 #endif 64 65 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 66 const int MXCSR_MASK = 0xFFC0; // Mask out any pending exceptions 67 68 // Stub Code definitions 69 70 class StubGenerator: public StubCodeGenerator { 71 private: 72 73 #ifdef PRODUCT 74 #define inc_counter_np(counter) ((void)0) 75 #else 76 void inc_counter_np_(int& counter) { 77 // This can destroy rscratch1 if counter is far from the code cache 78 __ incrementl(ExternalAddress((address)&counter)); 79 } 80 #define inc_counter_np(counter) \ 81 BLOCK_COMMENT("inc_counter " #counter); \ 82 inc_counter_np_(counter); 83 #endif 84 85 // Call stubs are used to call Java from C 86 // 87 // Linux Arguments: 88 // c_rarg0: call wrapper address address 89 // c_rarg1: result address 90 // c_rarg2: result type BasicType 91 // c_rarg3: method Method* 92 // c_rarg4: (interpreter) entry point address 93 // c_rarg5: parameters intptr_t* 94 // 16(rbp): parameter size (in words) int 95 // 24(rbp): thread Thread* 96 // 97 // [ return_from_Java ] <--- rsp 98 // [ argument word n ] 99 // ... 100 // -12 [ argument word 1 ] 101 // -11 [ saved r15 ] <--- rsp_after_call 102 // -10 [ saved r14 ] 103 // -9 [ saved r13 ] 104 // -8 [ saved r12 ] 105 // -7 [ saved rbx ] 106 // -6 [ call wrapper ] 107 // -5 [ result ] 108 // -4 [ result type ] 109 // -3 [ method ] 110 // -2 [ entry point ] 111 // -1 [ parameters ] 112 // 0 [ saved rbp ] <--- rbp 113 // 1 [ return address ] 114 // 2 [ parameter size ] 115 // 3 [ thread ] 116 // 117 // Windows Arguments: 118 // c_rarg0: call wrapper address address 119 // c_rarg1: result address 120 // c_rarg2: result type BasicType 121 // c_rarg3: method Method* 122 // 48(rbp): (interpreter) entry point address 123 // 56(rbp): parameters intptr_t* 124 // 64(rbp): parameter size (in words) int 125 // 72(rbp): thread Thread* 126 // 127 // [ return_from_Java ] <--- rsp 128 // [ argument word n ] 129 // ... 130 // -60 [ argument word 1 ] 131 // -59 [ saved xmm31 ] <--- rsp after_call 132 // [ saved xmm16-xmm30 ] (EVEX enabled, else the space is blank) 133 // -27 [ saved xmm15 ] 134 // [ saved xmm7-xmm14 ] 135 // -9 [ saved xmm6 ] (each xmm register takes 2 slots) 136 // -7 [ saved r15 ] 137 // -6 [ saved r14 ] 138 // -5 [ saved r13 ] 139 // -4 [ saved r12 ] 140 // -3 [ saved rdi ] 141 // -2 [ saved rsi ] 142 // -1 [ saved rbx ] 143 // 0 [ saved rbp ] <--- rbp 144 // 1 [ return address ] 145 // 2 [ call wrapper ] 146 // 3 [ result ] 147 // 4 [ result type ] 148 // 5 [ method ] 149 // 6 [ entry point ] 150 // 7 [ parameters ] 151 // 8 [ parameter size ] 152 // 9 [ thread ] 153 // 154 // Windows reserves the callers stack space for arguments 1-4. 155 // We spill c_rarg0-c_rarg3 to this space. 156 157 // Call stub stack layout word offsets from rbp 158 enum call_stub_layout { 159 #ifdef _WIN64 160 xmm_save_first = 6, // save from xmm6 161 xmm_save_last = 31, // to xmm31 162 xmm_save_base = -9, 163 rsp_after_call_off = xmm_save_base - 2 * (xmm_save_last - xmm_save_first), // -27 164 r15_off = -7, 165 r14_off = -6, 166 r13_off = -5, 167 r12_off = -4, 168 rdi_off = -3, 169 rsi_off = -2, 170 rbx_off = -1, 171 rbp_off = 0, 172 retaddr_off = 1, 173 call_wrapper_off = 2, 174 result_off = 3, 175 result_type_off = 4, 176 method_off = 5, 177 entry_point_off = 6, 178 parameters_off = 7, 179 parameter_size_off = 8, 180 thread_off = 9 181 #else 182 rsp_after_call_off = -12, 183 mxcsr_off = rsp_after_call_off, 184 r15_off = -11, 185 r14_off = -10, 186 r13_off = -9, 187 r12_off = -8, 188 rbx_off = -7, 189 call_wrapper_off = -6, 190 result_off = -5, 191 result_type_off = -4, 192 method_off = -3, 193 entry_point_off = -2, 194 parameters_off = -1, 195 rbp_off = 0, 196 retaddr_off = 1, 197 parameter_size_off = 2, 198 thread_off = 3 199 #endif 200 }; 201 202 #ifdef _WIN64 203 Address xmm_save(int reg) { 204 assert(reg >= xmm_save_first && reg <= xmm_save_last, "XMM register number out of range"); 205 return Address(rbp, (xmm_save_base - (reg - xmm_save_first) * 2) * wordSize); 206 } 207 #endif 208 209 address generate_call_stub(address& return_address) { 210 assert((int)frame::entry_frame_after_call_words == -(int)rsp_after_call_off + 1 && 211 (int)frame::entry_frame_call_wrapper_offset == (int)call_wrapper_off, 212 "adjust this code"); 213 StubCodeMark mark(this, "StubRoutines", "call_stub"); 214 address start = __ pc(); 215 216 // same as in generate_catch_exception()! 217 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize); 218 219 const Address call_wrapper (rbp, call_wrapper_off * wordSize); 220 const Address result (rbp, result_off * wordSize); 221 const Address result_type (rbp, result_type_off * wordSize); 222 const Address method (rbp, method_off * wordSize); 223 const Address entry_point (rbp, entry_point_off * wordSize); 224 const Address parameters (rbp, parameters_off * wordSize); 225 const Address parameter_size(rbp, parameter_size_off * wordSize); 226 227 // same as in generate_catch_exception()! 228 const Address thread (rbp, thread_off * wordSize); 229 230 const Address r15_save(rbp, r15_off * wordSize); 231 const Address r14_save(rbp, r14_off * wordSize); 232 const Address r13_save(rbp, r13_off * wordSize); 233 const Address r12_save(rbp, r12_off * wordSize); 234 const Address rbx_save(rbp, rbx_off * wordSize); 235 236 // stub code 237 __ enter(); 238 __ subptr(rsp, -rsp_after_call_off * wordSize); 239 240 // save register parameters 241 #ifndef _WIN64 242 __ movptr(parameters, c_rarg5); // parameters 243 __ movptr(entry_point, c_rarg4); // entry_point 244 #endif 245 246 __ movptr(method, c_rarg3); // method 247 __ movl(result_type, c_rarg2); // result type 248 __ movptr(result, c_rarg1); // result 249 __ movptr(call_wrapper, c_rarg0); // call wrapper 250 251 // save regs belonging to calling function 252 __ movptr(rbx_save, rbx); 253 __ movptr(r12_save, r12); 254 __ movptr(r13_save, r13); 255 __ movptr(r14_save, r14); 256 __ movptr(r15_save, r15); 257 if (UseAVX > 2) { 258 __ movl(rbx, 0xffff); 259 __ kmovwl(k1, rbx); 260 } 261 #ifdef _WIN64 262 int last_reg = 15; 263 if (UseAVX > 2) { 264 last_reg = 31; 265 } 266 if (VM_Version::supports_evex()) { 267 for (int i = xmm_save_first; i <= last_reg; i++) { 268 __ vextractf32x4(xmm_save(i), as_XMMRegister(i), 0); 269 } 270 } else { 271 for (int i = xmm_save_first; i <= last_reg; i++) { 272 __ movdqu(xmm_save(i), as_XMMRegister(i)); 273 } 274 } 275 276 const Address rdi_save(rbp, rdi_off * wordSize); 277 const Address rsi_save(rbp, rsi_off * wordSize); 278 279 __ movptr(rsi_save, rsi); 280 __ movptr(rdi_save, rdi); 281 #else 282 const Address mxcsr_save(rbp, mxcsr_off * wordSize); 283 { 284 Label skip_ldmx; 285 __ stmxcsr(mxcsr_save); 286 __ movl(rax, mxcsr_save); 287 __ andl(rax, MXCSR_MASK); // Only check control and mask bits 288 ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std()); 289 __ cmp32(rax, mxcsr_std); 290 __ jcc(Assembler::equal, skip_ldmx); 291 __ ldmxcsr(mxcsr_std); 292 __ bind(skip_ldmx); 293 } 294 #endif 295 296 // Load up thread register 297 __ movptr(r15_thread, thread); 298 __ reinit_heapbase(); 299 300 #ifdef ASSERT 301 // make sure we have no pending exceptions 302 { 303 Label L; 304 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 305 __ jcc(Assembler::equal, L); 306 __ stop("StubRoutines::call_stub: entered with pending exception"); 307 __ bind(L); 308 } 309 #endif 310 311 // pass parameters if any 312 BLOCK_COMMENT("pass parameters if any"); 313 Label parameters_done; 314 __ movl(c_rarg3, parameter_size); 315 __ testl(c_rarg3, c_rarg3); 316 __ jcc(Assembler::zero, parameters_done); 317 318 Label loop; 319 __ movptr(c_rarg2, parameters); // parameter pointer 320 __ movl(c_rarg1, c_rarg3); // parameter counter is in c_rarg1 321 __ BIND(loop); 322 __ movptr(rax, Address(c_rarg2, 0));// get parameter 323 __ addptr(c_rarg2, wordSize); // advance to next parameter 324 __ decrementl(c_rarg1); // decrement counter 325 __ push(rax); // pass parameter 326 __ jcc(Assembler::notZero, loop); 327 328 // call Java function 329 __ BIND(parameters_done); 330 __ movptr(rbx, method); // get Method* 331 __ movptr(c_rarg1, entry_point); // get entry_point 332 __ mov(r13, rsp); // set sender sp 333 BLOCK_COMMENT("call Java function"); 334 __ call(c_rarg1); 335 336 BLOCK_COMMENT("call_stub_return_address:"); 337 return_address = __ pc(); 338 339 // store result depending on type (everything that is not 340 // T_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT) 341 __ movptr(c_rarg0, result); 342 Label is_long, is_float, is_double, exit; 343 __ movl(c_rarg1, result_type); 344 __ cmpl(c_rarg1, T_OBJECT); 345 __ jcc(Assembler::equal, is_long); 346 __ cmpl(c_rarg1, T_LONG); 347 __ jcc(Assembler::equal, is_long); 348 __ cmpl(c_rarg1, T_FLOAT); 349 __ jcc(Assembler::equal, is_float); 350 __ cmpl(c_rarg1, T_DOUBLE); 351 __ jcc(Assembler::equal, is_double); 352 353 // handle T_INT case 354 __ movl(Address(c_rarg0, 0), rax); 355 356 __ BIND(exit); 357 358 // pop parameters 359 __ lea(rsp, rsp_after_call); 360 361 #ifdef ASSERT 362 // verify that threads correspond 363 { 364 Label L1, L2, L3; 365 __ cmpptr(r15_thread, thread); 366 __ jcc(Assembler::equal, L1); 367 __ stop("StubRoutines::call_stub: r15_thread is corrupted"); 368 __ bind(L1); 369 __ get_thread(rbx); 370 __ cmpptr(r15_thread, thread); 371 __ jcc(Assembler::equal, L2); 372 __ stop("StubRoutines::call_stub: r15_thread is modified by call"); 373 __ bind(L2); 374 __ cmpptr(r15_thread, rbx); 375 __ jcc(Assembler::equal, L3); 376 __ stop("StubRoutines::call_stub: threads must correspond"); 377 __ bind(L3); 378 } 379 #endif 380 381 // restore regs belonging to calling function 382 #ifdef _WIN64 383 // emit the restores for xmm regs 384 if (VM_Version::supports_evex()) { 385 for (int i = xmm_save_first; i <= last_reg; i++) { 386 __ vinsertf32x4(as_XMMRegister(i), as_XMMRegister(i), xmm_save(i), 0); 387 } 388 } else { 389 for (int i = xmm_save_first; i <= last_reg; i++) { 390 __ movdqu(as_XMMRegister(i), xmm_save(i)); 391 } 392 } 393 #endif 394 __ movptr(r15, r15_save); 395 __ movptr(r14, r14_save); 396 __ movptr(r13, r13_save); 397 __ movptr(r12, r12_save); 398 __ movptr(rbx, rbx_save); 399 400 #ifdef _WIN64 401 __ movptr(rdi, rdi_save); 402 __ movptr(rsi, rsi_save); 403 #else 404 __ ldmxcsr(mxcsr_save); 405 #endif 406 407 // restore rsp 408 __ addptr(rsp, -rsp_after_call_off * wordSize); 409 410 // return 411 __ vzeroupper(); 412 __ pop(rbp); 413 __ ret(0); 414 415 // handle return types different from T_INT 416 __ BIND(is_long); 417 __ movq(Address(c_rarg0, 0), rax); 418 __ jmp(exit); 419 420 __ BIND(is_float); 421 __ movflt(Address(c_rarg0, 0), xmm0); 422 __ jmp(exit); 423 424 __ BIND(is_double); 425 __ movdbl(Address(c_rarg0, 0), xmm0); 426 __ jmp(exit); 427 428 return start; 429 } 430 431 // Return point for a Java call if there's an exception thrown in 432 // Java code. The exception is caught and transformed into a 433 // pending exception stored in JavaThread that can be tested from 434 // within the VM. 435 // 436 // Note: Usually the parameters are removed by the callee. In case 437 // of an exception crossing an activation frame boundary, that is 438 // not the case if the callee is compiled code => need to setup the 439 // rsp. 440 // 441 // rax: exception oop 442 443 address generate_catch_exception() { 444 StubCodeMark mark(this, "StubRoutines", "catch_exception"); 445 address start = __ pc(); 446 447 // same as in generate_call_stub(): 448 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize); 449 const Address thread (rbp, thread_off * wordSize); 450 451 #ifdef ASSERT 452 // verify that threads correspond 453 { 454 Label L1, L2, L3; 455 __ cmpptr(r15_thread, thread); 456 __ jcc(Assembler::equal, L1); 457 __ stop("StubRoutines::catch_exception: r15_thread is corrupted"); 458 __ bind(L1); 459 __ get_thread(rbx); 460 __ cmpptr(r15_thread, thread); 461 __ jcc(Assembler::equal, L2); 462 __ stop("StubRoutines::catch_exception: r15_thread is modified by call"); 463 __ bind(L2); 464 __ cmpptr(r15_thread, rbx); 465 __ jcc(Assembler::equal, L3); 466 __ stop("StubRoutines::catch_exception: threads must correspond"); 467 __ bind(L3); 468 } 469 #endif 470 471 // set pending exception 472 __ verify_oop(rax); 473 474 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), rax); 475 __ lea(rscratch1, ExternalAddress((address)__FILE__)); 476 __ movptr(Address(r15_thread, Thread::exception_file_offset()), rscratch1); 477 __ movl(Address(r15_thread, Thread::exception_line_offset()), (int) __LINE__); 478 479 // complete return to VM 480 assert(StubRoutines::_call_stub_return_address != NULL, 481 "_call_stub_return_address must have been generated before"); 482 __ jump(RuntimeAddress(StubRoutines::_call_stub_return_address)); 483 484 return start; 485 } 486 487 // Continuation point for runtime calls returning with a pending 488 // exception. The pending exception check happened in the runtime 489 // or native call stub. The pending exception in Thread is 490 // converted into a Java-level exception. 491 // 492 // Contract with Java-level exception handlers: 493 // rax: exception 494 // rdx: throwing pc 495 // 496 // NOTE: At entry of this stub, exception-pc must be on stack !! 497 498 address generate_forward_exception() { 499 StubCodeMark mark(this, "StubRoutines", "forward exception"); 500 address start = __ pc(); 501 502 // Upon entry, the sp points to the return address returning into 503 // Java (interpreted or compiled) code; i.e., the return address 504 // becomes the throwing pc. 505 // 506 // Arguments pushed before the runtime call are still on the stack 507 // but the exception handler will reset the stack pointer -> 508 // ignore them. A potential result in registers can be ignored as 509 // well. 510 511 #ifdef ASSERT 512 // make sure this code is only executed if there is a pending exception 513 { 514 Label L; 515 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t) NULL); 516 __ jcc(Assembler::notEqual, L); 517 __ stop("StubRoutines::forward exception: no pending exception (1)"); 518 __ bind(L); 519 } 520 #endif 521 522 // compute exception handler into rbx 523 __ movptr(c_rarg0, Address(rsp, 0)); 524 BLOCK_COMMENT("call exception_handler_for_return_address"); 525 __ call_VM_leaf(CAST_FROM_FN_PTR(address, 526 SharedRuntime::exception_handler_for_return_address), 527 r15_thread, c_rarg0); 528 __ mov(rbx, rax); 529 530 // setup rax & rdx, remove return address & clear pending exception 531 __ pop(rdx); 532 __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset())); 533 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 534 535 #ifdef ASSERT 536 // make sure exception is set 537 { 538 Label L; 539 __ testptr(rax, rax); 540 __ jcc(Assembler::notEqual, L); 541 __ stop("StubRoutines::forward exception: no pending exception (2)"); 542 __ bind(L); 543 } 544 #endif 545 546 // continue at exception handler (return address removed) 547 // rax: exception 548 // rbx: exception handler 549 // rdx: throwing pc 550 __ verify_oop(rax); 551 __ jmp(rbx); 552 553 return start; 554 } 555 556 // Support for jint atomic::xchg(jint exchange_value, volatile jint* dest) 557 // 558 // Arguments : 559 // c_rarg0: exchange_value 560 // c_rarg0: dest 561 // 562 // Result: 563 // *dest <- ex, return (orig *dest) 564 address generate_atomic_xchg() { 565 StubCodeMark mark(this, "StubRoutines", "atomic_xchg"); 566 address start = __ pc(); 567 568 __ movl(rax, c_rarg0); // Copy to eax we need a return value anyhow 569 __ xchgl(rax, Address(c_rarg1, 0)); // automatic LOCK 570 __ ret(0); 571 572 return start; 573 } 574 575 // Support for intptr_t atomic::xchg_long(jlong exchange_value, volatile jlong* dest) 576 // 577 // Arguments : 578 // c_rarg0: exchange_value 579 // c_rarg1: dest 580 // 581 // Result: 582 // *dest <- ex, return (orig *dest) 583 address generate_atomic_xchg_long() { 584 StubCodeMark mark(this, "StubRoutines", "atomic_xchg_long"); 585 address start = __ pc(); 586 587 __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow 588 __ xchgptr(rax, Address(c_rarg1, 0)); // automatic LOCK 589 __ ret(0); 590 591 return start; 592 } 593 594 // Support for jint atomic::atomic_cmpxchg(jint exchange_value, volatile jint* dest, 595 // jint compare_value) 596 // 597 // Arguments : 598 // c_rarg0: exchange_value 599 // c_rarg1: dest 600 // c_rarg2: compare_value 601 // 602 // Result: 603 // if ( compare_value == *dest ) { 604 // *dest = exchange_value 605 // return compare_value; 606 // else 607 // return *dest; 608 address generate_atomic_cmpxchg() { 609 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg"); 610 address start = __ pc(); 611 612 __ movl(rax, c_rarg2); 613 if ( os::is_MP() ) __ lock(); 614 __ cmpxchgl(c_rarg0, Address(c_rarg1, 0)); 615 __ ret(0); 616 617 return start; 618 } 619 620 // Support for int8_t atomic::atomic_cmpxchg(int8_t exchange_value, volatile int8_t* dest, 621 // int8_t compare_value) 622 // 623 // Arguments : 624 // c_rarg0: exchange_value 625 // c_rarg1: dest 626 // c_rarg2: compare_value 627 // 628 // Result: 629 // if ( compare_value == *dest ) { 630 // *dest = exchange_value 631 // return compare_value; 632 // else 633 // return *dest; 634 address generate_atomic_cmpxchg_byte() { 635 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_byte"); 636 address start = __ pc(); 637 638 __ movsbq(rax, c_rarg2); 639 if ( os::is_MP() ) __ lock(); 640 __ cmpxchgb(c_rarg0, Address(c_rarg1, 0)); 641 __ ret(0); 642 643 return start; 644 } 645 646 // Support for int64_t atomic::atomic_cmpxchg(int64_t exchange_value, 647 // volatile int64_t* dest, 648 // int64_t compare_value) 649 // Arguments : 650 // c_rarg0: exchange_value 651 // c_rarg1: dest 652 // c_rarg2: compare_value 653 // 654 // Result: 655 // if ( compare_value == *dest ) { 656 // *dest = exchange_value 657 // return compare_value; 658 // else 659 // return *dest; 660 address generate_atomic_cmpxchg_long() { 661 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long"); 662 address start = __ pc(); 663 664 __ movq(rax, c_rarg2); 665 if ( os::is_MP() ) __ lock(); 666 __ cmpxchgq(c_rarg0, Address(c_rarg1, 0)); 667 __ ret(0); 668 669 return start; 670 } 671 672 // Support for jint atomic::add(jint add_value, volatile jint* dest) 673 // 674 // Arguments : 675 // c_rarg0: add_value 676 // c_rarg1: dest 677 // 678 // Result: 679 // *dest += add_value 680 // return *dest; 681 address generate_atomic_add() { 682 StubCodeMark mark(this, "StubRoutines", "atomic_add"); 683 address start = __ pc(); 684 685 __ movl(rax, c_rarg0); 686 if ( os::is_MP() ) __ lock(); 687 __ xaddl(Address(c_rarg1, 0), c_rarg0); 688 __ addl(rax, c_rarg0); 689 __ ret(0); 690 691 return start; 692 } 693 694 // Support for intptr_t atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) 695 // 696 // Arguments : 697 // c_rarg0: add_value 698 // c_rarg1: dest 699 // 700 // Result: 701 // *dest += add_value 702 // return *dest; 703 address generate_atomic_add_long() { 704 StubCodeMark mark(this, "StubRoutines", "atomic_add_long"); 705 address start = __ pc(); 706 707 __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow 708 if ( os::is_MP() ) __ lock(); 709 __ xaddptr(Address(c_rarg1, 0), c_rarg0); 710 __ addptr(rax, c_rarg0); 711 __ ret(0); 712 713 return start; 714 } 715 716 // Support for intptr_t OrderAccess::fence() 717 // 718 // Arguments : 719 // 720 // Result: 721 address generate_orderaccess_fence() { 722 StubCodeMark mark(this, "StubRoutines", "orderaccess_fence"); 723 address start = __ pc(); 724 __ membar(Assembler::StoreLoad); 725 __ ret(0); 726 727 return start; 728 } 729 730 // Support for intptr_t get_previous_fp() 731 // 732 // This routine is used to find the previous frame pointer for the 733 // caller (current_frame_guess). This is used as part of debugging 734 // ps() is seemingly lost trying to find frames. 735 // This code assumes that caller current_frame_guess) has a frame. 736 address generate_get_previous_fp() { 737 StubCodeMark mark(this, "StubRoutines", "get_previous_fp"); 738 const Address old_fp(rbp, 0); 739 const Address older_fp(rax, 0); 740 address start = __ pc(); 741 742 __ enter(); 743 __ movptr(rax, old_fp); // callers fp 744 __ movptr(rax, older_fp); // the frame for ps() 745 __ pop(rbp); 746 __ ret(0); 747 748 return start; 749 } 750 751 // Support for intptr_t get_previous_sp() 752 // 753 // This routine is used to find the previous stack pointer for the 754 // caller. 755 address generate_get_previous_sp() { 756 StubCodeMark mark(this, "StubRoutines", "get_previous_sp"); 757 address start = __ pc(); 758 759 __ movptr(rax, rsp); 760 __ addptr(rax, 8); // return address is at the top of the stack. 761 __ ret(0); 762 763 return start; 764 } 765 766 //---------------------------------------------------------------------------------------------------- 767 // Support for void verify_mxcsr() 768 // 769 // This routine is used with -Xcheck:jni to verify that native 770 // JNI code does not return to Java code without restoring the 771 // MXCSR register to our expected state. 772 773 address generate_verify_mxcsr() { 774 StubCodeMark mark(this, "StubRoutines", "verify_mxcsr"); 775 address start = __ pc(); 776 777 const Address mxcsr_save(rsp, 0); 778 779 if (CheckJNICalls) { 780 Label ok_ret; 781 ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std()); 782 __ push(rax); 783 __ subptr(rsp, wordSize); // allocate a temp location 784 __ stmxcsr(mxcsr_save); 785 __ movl(rax, mxcsr_save); 786 __ andl(rax, MXCSR_MASK); // Only check control and mask bits 787 __ cmp32(rax, mxcsr_std); 788 __ jcc(Assembler::equal, ok_ret); 789 790 __ warn("MXCSR changed by native JNI code, use -XX:+RestoreMXCSROnJNICall"); 791 792 __ ldmxcsr(mxcsr_std); 793 794 __ bind(ok_ret); 795 __ addptr(rsp, wordSize); 796 __ pop(rax); 797 } 798 799 __ ret(0); 800 801 return start; 802 } 803 804 address generate_f2i_fixup() { 805 StubCodeMark mark(this, "StubRoutines", "f2i_fixup"); 806 Address inout(rsp, 5 * wordSize); // return address + 4 saves 807 808 address start = __ pc(); 809 810 Label L; 811 812 __ push(rax); 813 __ push(c_rarg3); 814 __ push(c_rarg2); 815 __ push(c_rarg1); 816 817 __ movl(rax, 0x7f800000); 818 __ xorl(c_rarg3, c_rarg3); 819 __ movl(c_rarg2, inout); 820 __ movl(c_rarg1, c_rarg2); 821 __ andl(c_rarg1, 0x7fffffff); 822 __ cmpl(rax, c_rarg1); // NaN? -> 0 823 __ jcc(Assembler::negative, L); 824 __ testl(c_rarg2, c_rarg2); // signed ? min_jint : max_jint 825 __ movl(c_rarg3, 0x80000000); 826 __ movl(rax, 0x7fffffff); 827 __ cmovl(Assembler::positive, c_rarg3, rax); 828 829 __ bind(L); 830 __ movptr(inout, c_rarg3); 831 832 __ pop(c_rarg1); 833 __ pop(c_rarg2); 834 __ pop(c_rarg3); 835 __ pop(rax); 836 837 __ ret(0); 838 839 return start; 840 } 841 842 address generate_f2l_fixup() { 843 StubCodeMark mark(this, "StubRoutines", "f2l_fixup"); 844 Address inout(rsp, 5 * wordSize); // return address + 4 saves 845 address start = __ pc(); 846 847 Label L; 848 849 __ push(rax); 850 __ push(c_rarg3); 851 __ push(c_rarg2); 852 __ push(c_rarg1); 853 854 __ movl(rax, 0x7f800000); 855 __ xorl(c_rarg3, c_rarg3); 856 __ movl(c_rarg2, inout); 857 __ movl(c_rarg1, c_rarg2); 858 __ andl(c_rarg1, 0x7fffffff); 859 __ cmpl(rax, c_rarg1); // NaN? -> 0 860 __ jcc(Assembler::negative, L); 861 __ testl(c_rarg2, c_rarg2); // signed ? min_jlong : max_jlong 862 __ mov64(c_rarg3, 0x8000000000000000); 863 __ mov64(rax, 0x7fffffffffffffff); 864 __ cmov(Assembler::positive, c_rarg3, rax); 865 866 __ bind(L); 867 __ movptr(inout, c_rarg3); 868 869 __ pop(c_rarg1); 870 __ pop(c_rarg2); 871 __ pop(c_rarg3); 872 __ pop(rax); 873 874 __ ret(0); 875 876 return start; 877 } 878 879 address generate_d2i_fixup() { 880 StubCodeMark mark(this, "StubRoutines", "d2i_fixup"); 881 Address inout(rsp, 6 * wordSize); // return address + 5 saves 882 883 address start = __ pc(); 884 885 Label L; 886 887 __ push(rax); 888 __ push(c_rarg3); 889 __ push(c_rarg2); 890 __ push(c_rarg1); 891 __ push(c_rarg0); 892 893 __ movl(rax, 0x7ff00000); 894 __ movq(c_rarg2, inout); 895 __ movl(c_rarg3, c_rarg2); 896 __ mov(c_rarg1, c_rarg2); 897 __ mov(c_rarg0, c_rarg2); 898 __ negl(c_rarg3); 899 __ shrptr(c_rarg1, 0x20); 900 __ orl(c_rarg3, c_rarg2); 901 __ andl(c_rarg1, 0x7fffffff); 902 __ xorl(c_rarg2, c_rarg2); 903 __ shrl(c_rarg3, 0x1f); 904 __ orl(c_rarg1, c_rarg3); 905 __ cmpl(rax, c_rarg1); 906 __ jcc(Assembler::negative, L); // NaN -> 0 907 __ testptr(c_rarg0, c_rarg0); // signed ? min_jint : max_jint 908 __ movl(c_rarg2, 0x80000000); 909 __ movl(rax, 0x7fffffff); 910 __ cmov(Assembler::positive, c_rarg2, rax); 911 912 __ bind(L); 913 __ movptr(inout, c_rarg2); 914 915 __ pop(c_rarg0); 916 __ pop(c_rarg1); 917 __ pop(c_rarg2); 918 __ pop(c_rarg3); 919 __ pop(rax); 920 921 __ ret(0); 922 923 return start; 924 } 925 926 address generate_d2l_fixup() { 927 StubCodeMark mark(this, "StubRoutines", "d2l_fixup"); 928 Address inout(rsp, 6 * wordSize); // return address + 5 saves 929 930 address start = __ pc(); 931 932 Label L; 933 934 __ push(rax); 935 __ push(c_rarg3); 936 __ push(c_rarg2); 937 __ push(c_rarg1); 938 __ push(c_rarg0); 939 940 __ movl(rax, 0x7ff00000); 941 __ movq(c_rarg2, inout); 942 __ movl(c_rarg3, c_rarg2); 943 __ mov(c_rarg1, c_rarg2); 944 __ mov(c_rarg0, c_rarg2); 945 __ negl(c_rarg3); 946 __ shrptr(c_rarg1, 0x20); 947 __ orl(c_rarg3, c_rarg2); 948 __ andl(c_rarg1, 0x7fffffff); 949 __ xorl(c_rarg2, c_rarg2); 950 __ shrl(c_rarg3, 0x1f); 951 __ orl(c_rarg1, c_rarg3); 952 __ cmpl(rax, c_rarg1); 953 __ jcc(Assembler::negative, L); // NaN -> 0 954 __ testq(c_rarg0, c_rarg0); // signed ? min_jlong : max_jlong 955 __ mov64(c_rarg2, 0x8000000000000000); 956 __ mov64(rax, 0x7fffffffffffffff); 957 __ cmovq(Assembler::positive, c_rarg2, rax); 958 959 __ bind(L); 960 __ movq(inout, c_rarg2); 961 962 __ pop(c_rarg0); 963 __ pop(c_rarg1); 964 __ pop(c_rarg2); 965 __ pop(c_rarg3); 966 __ pop(rax); 967 968 __ ret(0); 969 970 return start; 971 } 972 973 address generate_fp_mask(const char *stub_name, int64_t mask) { 974 __ align(CodeEntryAlignment); 975 StubCodeMark mark(this, "StubRoutines", stub_name); 976 address start = __ pc(); 977 978 __ emit_data64( mask, relocInfo::none ); 979 __ emit_data64( mask, relocInfo::none ); 980 981 return start; 982 } 983 984 // Non-destructive plausibility checks for oops 985 // 986 // Arguments: 987 // all args on stack! 988 // 989 // Stack after saving c_rarg3: 990 // [tos + 0]: saved c_rarg3 991 // [tos + 1]: saved c_rarg2 992 // [tos + 2]: saved r12 (several TemplateTable methods use it) 993 // [tos + 3]: saved flags 994 // [tos + 4]: return address 995 // * [tos + 5]: error message (char*) 996 // * [tos + 6]: object to verify (oop) 997 // * [tos + 7]: saved rax - saved by caller and bashed 998 // * [tos + 8]: saved r10 (rscratch1) - saved by caller 999 // * = popped on exit 1000 address generate_verify_oop() { 1001 StubCodeMark mark(this, "StubRoutines", "verify_oop"); 1002 address start = __ pc(); 1003 1004 Label exit, error; 1005 1006 __ pushf(); 1007 __ incrementl(ExternalAddress((address) StubRoutines::verify_oop_count_addr())); 1008 1009 __ push(r12); 1010 1011 // save c_rarg2 and c_rarg3 1012 __ push(c_rarg2); 1013 __ push(c_rarg3); 1014 1015 enum { 1016 // After previous pushes. 1017 oop_to_verify = 6 * wordSize, 1018 saved_rax = 7 * wordSize, 1019 saved_r10 = 8 * wordSize, 1020 1021 // Before the call to MacroAssembler::debug(), see below. 1022 return_addr = 16 * wordSize, 1023 error_msg = 17 * wordSize 1024 }; 1025 1026 // get object 1027 __ movptr(rax, Address(rsp, oop_to_verify)); 1028 1029 // make sure object is 'reasonable' 1030 __ testptr(rax, rax); 1031 __ jcc(Assembler::zero, exit); // if obj is NULL it is OK 1032 1033 #if INCLUDE_ZGC 1034 if (UseZGC) { 1035 // Check if metadata bits indicate a bad oop 1036 __ testptr(rax, Address(r15_thread, ZThreadLocalData::address_bad_mask_offset())); 1037 __ jcc(Assembler::notZero, error); 1038 } 1039 #endif 1040 1041 // Check if the oop is in the right area of memory 1042 __ movptr(c_rarg2, rax); 1043 __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_mask()); 1044 __ andptr(c_rarg2, c_rarg3); 1045 __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_bits()); 1046 __ cmpptr(c_rarg2, c_rarg3); 1047 __ jcc(Assembler::notZero, error); 1048 1049 // set r12 to heapbase for load_klass() 1050 __ reinit_heapbase(); 1051 1052 // make sure klass is 'reasonable', which is not zero. 1053 __ load_klass(rax, rax); // get klass 1054 __ testptr(rax, rax); 1055 __ jcc(Assembler::zero, error); // if klass is NULL it is broken 1056 1057 // return if everything seems ok 1058 __ bind(exit); 1059 __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back 1060 __ movptr(rscratch1, Address(rsp, saved_r10)); // get saved r10 back 1061 __ pop(c_rarg3); // restore c_rarg3 1062 __ pop(c_rarg2); // restore c_rarg2 1063 __ pop(r12); // restore r12 1064 __ popf(); // restore flags 1065 __ ret(4 * wordSize); // pop caller saved stuff 1066 1067 // handle errors 1068 __ bind(error); 1069 __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back 1070 __ movptr(rscratch1, Address(rsp, saved_r10)); // get saved r10 back 1071 __ pop(c_rarg3); // get saved c_rarg3 back 1072 __ pop(c_rarg2); // get saved c_rarg2 back 1073 __ pop(r12); // get saved r12 back 1074 __ popf(); // get saved flags off stack -- 1075 // will be ignored 1076 1077 __ pusha(); // push registers 1078 // (rip is already 1079 // already pushed) 1080 // debug(char* msg, int64_t pc, int64_t regs[]) 1081 // We've popped the registers we'd saved (c_rarg3, c_rarg2 and flags), and 1082 // pushed all the registers, so now the stack looks like: 1083 // [tos + 0] 16 saved registers 1084 // [tos + 16] return address 1085 // * [tos + 17] error message (char*) 1086 // * [tos + 18] object to verify (oop) 1087 // * [tos + 19] saved rax - saved by caller and bashed 1088 // * [tos + 20] saved r10 (rscratch1) - saved by caller 1089 // * = popped on exit 1090 1091 __ movptr(c_rarg0, Address(rsp, error_msg)); // pass address of error message 1092 __ movptr(c_rarg1, Address(rsp, return_addr)); // pass return address 1093 __ movq(c_rarg2, rsp); // pass address of regs on stack 1094 __ mov(r12, rsp); // remember rsp 1095 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1096 __ andptr(rsp, -16); // align stack as required by ABI 1097 BLOCK_COMMENT("call MacroAssembler::debug"); 1098 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64))); 1099 __ mov(rsp, r12); // restore rsp 1100 __ popa(); // pop registers (includes r12) 1101 __ ret(4 * wordSize); // pop caller saved stuff 1102 1103 return start; 1104 } 1105 1106 // 1107 // Verify that a register contains clean 32-bits positive value 1108 // (high 32-bits are 0) so it could be used in 64-bits shifts. 1109 // 1110 // Input: 1111 // Rint - 32-bits value 1112 // Rtmp - scratch 1113 // 1114 void assert_clean_int(Register Rint, Register Rtmp) { 1115 #ifdef ASSERT 1116 Label L; 1117 assert_different_registers(Rtmp, Rint); 1118 __ movslq(Rtmp, Rint); 1119 __ cmpq(Rtmp, Rint); 1120 __ jcc(Assembler::equal, L); 1121 __ stop("high 32-bits of int value are not 0"); 1122 __ bind(L); 1123 #endif 1124 } 1125 1126 // Generate overlap test for array copy stubs 1127 // 1128 // Input: 1129 // c_rarg0 - from 1130 // c_rarg1 - to 1131 // c_rarg2 - element count 1132 // 1133 // Output: 1134 // rax - &from[element count - 1] 1135 // 1136 void array_overlap_test(address no_overlap_target, Address::ScaleFactor sf) { 1137 assert(no_overlap_target != NULL, "must be generated"); 1138 array_overlap_test(no_overlap_target, NULL, sf); 1139 } 1140 void array_overlap_test(Label& L_no_overlap, Address::ScaleFactor sf) { 1141 array_overlap_test(NULL, &L_no_overlap, sf); 1142 } 1143 void array_overlap_test(address no_overlap_target, Label* NOLp, Address::ScaleFactor sf) { 1144 const Register from = c_rarg0; 1145 const Register to = c_rarg1; 1146 const Register count = c_rarg2; 1147 const Register end_from = rax; 1148 1149 __ cmpptr(to, from); 1150 __ lea(end_from, Address(from, count, sf, 0)); 1151 if (NOLp == NULL) { 1152 ExternalAddress no_overlap(no_overlap_target); 1153 __ jump_cc(Assembler::belowEqual, no_overlap); 1154 __ cmpptr(to, end_from); 1155 __ jump_cc(Assembler::aboveEqual, no_overlap); 1156 } else { 1157 __ jcc(Assembler::belowEqual, (*NOLp)); 1158 __ cmpptr(to, end_from); 1159 __ jcc(Assembler::aboveEqual, (*NOLp)); 1160 } 1161 } 1162 1163 // Shuffle first three arg regs on Windows into Linux/Solaris locations. 1164 // 1165 // Outputs: 1166 // rdi - rcx 1167 // rsi - rdx 1168 // rdx - r8 1169 // rcx - r9 1170 // 1171 // Registers r9 and r10 are used to save rdi and rsi on Windows, which latter 1172 // are non-volatile. r9 and r10 should not be used by the caller. 1173 // 1174 DEBUG_ONLY(bool regs_in_thread;) 1175 1176 void setup_arg_regs(int nargs = 3) { 1177 const Register saved_rdi = r9; 1178 const Register saved_rsi = r10; 1179 assert(nargs == 3 || nargs == 4, "else fix"); 1180 #ifdef _WIN64 1181 assert(c_rarg0 == rcx && c_rarg1 == rdx && c_rarg2 == r8 && c_rarg3 == r9, 1182 "unexpected argument registers"); 1183 if (nargs >= 4) 1184 __ mov(rax, r9); // r9 is also saved_rdi 1185 __ movptr(saved_rdi, rdi); 1186 __ movptr(saved_rsi, rsi); 1187 __ mov(rdi, rcx); // c_rarg0 1188 __ mov(rsi, rdx); // c_rarg1 1189 __ mov(rdx, r8); // c_rarg2 1190 if (nargs >= 4) 1191 __ mov(rcx, rax); // c_rarg3 (via rax) 1192 #else 1193 assert(c_rarg0 == rdi && c_rarg1 == rsi && c_rarg2 == rdx && c_rarg3 == rcx, 1194 "unexpected argument registers"); 1195 #endif 1196 DEBUG_ONLY(regs_in_thread = false;) 1197 } 1198 1199 void restore_arg_regs() { 1200 assert(!regs_in_thread, "wrong call to restore_arg_regs"); 1201 const Register saved_rdi = r9; 1202 const Register saved_rsi = r10; 1203 #ifdef _WIN64 1204 __ movptr(rdi, saved_rdi); 1205 __ movptr(rsi, saved_rsi); 1206 #endif 1207 } 1208 1209 // This is used in places where r10 is a scratch register, and can 1210 // be adapted if r9 is needed also. 1211 void setup_arg_regs_using_thread() { 1212 const Register saved_r15 = r9; 1213 #ifdef _WIN64 1214 __ mov(saved_r15, r15); // r15 is callee saved and needs to be restored 1215 __ get_thread(r15_thread); 1216 assert(c_rarg0 == rcx && c_rarg1 == rdx && c_rarg2 == r8 && c_rarg3 == r9, 1217 "unexpected argument registers"); 1218 __ movptr(Address(r15_thread, in_bytes(JavaThread::windows_saved_rdi_offset())), rdi); 1219 __ movptr(Address(r15_thread, in_bytes(JavaThread::windows_saved_rsi_offset())), rsi); 1220 1221 __ mov(rdi, rcx); // c_rarg0 1222 __ mov(rsi, rdx); // c_rarg1 1223 __ mov(rdx, r8); // c_rarg2 1224 #else 1225 assert(c_rarg0 == rdi && c_rarg1 == rsi && c_rarg2 == rdx && c_rarg3 == rcx, 1226 "unexpected argument registers"); 1227 #endif 1228 DEBUG_ONLY(regs_in_thread = true;) 1229 } 1230 1231 void restore_arg_regs_using_thread() { 1232 assert(regs_in_thread, "wrong call to restore_arg_regs"); 1233 const Register saved_r15 = r9; 1234 #ifdef _WIN64 1235 __ get_thread(r15_thread); 1236 __ movptr(rsi, Address(r15_thread, in_bytes(JavaThread::windows_saved_rsi_offset()))); 1237 __ movptr(rdi, Address(r15_thread, in_bytes(JavaThread::windows_saved_rdi_offset()))); 1238 __ mov(r15, saved_r15); // r15 is callee saved and needs to be restored 1239 #endif 1240 } 1241 1242 // Copy big chunks forward 1243 // 1244 // Inputs: 1245 // end_from - source arrays end address 1246 // end_to - destination array end address 1247 // qword_count - 64-bits element count, negative 1248 // to - scratch 1249 // L_copy_bytes - entry label 1250 // L_copy_8_bytes - exit label 1251 // 1252 void copy_bytes_forward(Register end_from, Register end_to, 1253 Register qword_count, Register to, 1254 Label& L_copy_bytes, Label& L_copy_8_bytes) { 1255 DEBUG_ONLY(__ stop("enter at entry label, not here")); 1256 Label L_loop; 1257 __ align(OptoLoopAlignment); 1258 if (UseUnalignedLoadStores) { 1259 Label L_end; 1260 if (UseAVX > 2) { 1261 __ movl(to, 0xffff); 1262 __ kmovwl(k1, to); 1263 } 1264 // Copy 64-bytes per iteration 1265 __ BIND(L_loop); 1266 if (UseAVX > 2) { 1267 __ evmovdqul(xmm0, Address(end_from, qword_count, Address::times_8, -56), Assembler::AVX_512bit); 1268 __ evmovdqul(Address(end_to, qword_count, Address::times_8, -56), xmm0, Assembler::AVX_512bit); 1269 } else if (UseAVX == 2) { 1270 __ vmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56)); 1271 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0); 1272 __ vmovdqu(xmm1, Address(end_from, qword_count, Address::times_8, -24)); 1273 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -24), xmm1); 1274 } else { 1275 __ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56)); 1276 __ movdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0); 1277 __ movdqu(xmm1, Address(end_from, qword_count, Address::times_8, -40)); 1278 __ movdqu(Address(end_to, qword_count, Address::times_8, -40), xmm1); 1279 __ movdqu(xmm2, Address(end_from, qword_count, Address::times_8, -24)); 1280 __ movdqu(Address(end_to, qword_count, Address::times_8, -24), xmm2); 1281 __ movdqu(xmm3, Address(end_from, qword_count, Address::times_8, - 8)); 1282 __ movdqu(Address(end_to, qword_count, Address::times_8, - 8), xmm3); 1283 } 1284 __ BIND(L_copy_bytes); 1285 __ addptr(qword_count, 8); 1286 __ jcc(Assembler::lessEqual, L_loop); 1287 __ subptr(qword_count, 4); // sub(8) and add(4) 1288 __ jccb(Assembler::greater, L_end); 1289 // Copy trailing 32 bytes 1290 if (UseAVX >= 2) { 1291 __ vmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24)); 1292 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -24), xmm0); 1293 } else { 1294 __ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24)); 1295 __ movdqu(Address(end_to, qword_count, Address::times_8, -24), xmm0); 1296 __ movdqu(xmm1, Address(end_from, qword_count, Address::times_8, - 8)); 1297 __ movdqu(Address(end_to, qword_count, Address::times_8, - 8), xmm1); 1298 } 1299 __ addptr(qword_count, 4); 1300 __ BIND(L_end); 1301 if (UseAVX >= 2) { 1302 // clean upper bits of YMM registers 1303 __ vpxor(xmm0, xmm0); 1304 __ vpxor(xmm1, xmm1); 1305 } 1306 } else { 1307 // Copy 32-bytes per iteration 1308 __ BIND(L_loop); 1309 __ movq(to, Address(end_from, qword_count, Address::times_8, -24)); 1310 __ movq(Address(end_to, qword_count, Address::times_8, -24), to); 1311 __ movq(to, Address(end_from, qword_count, Address::times_8, -16)); 1312 __ movq(Address(end_to, qword_count, Address::times_8, -16), to); 1313 __ movq(to, Address(end_from, qword_count, Address::times_8, - 8)); 1314 __ movq(Address(end_to, qword_count, Address::times_8, - 8), to); 1315 __ movq(to, Address(end_from, qword_count, Address::times_8, - 0)); 1316 __ movq(Address(end_to, qword_count, Address::times_8, - 0), to); 1317 1318 __ BIND(L_copy_bytes); 1319 __ addptr(qword_count, 4); 1320 __ jcc(Assembler::lessEqual, L_loop); 1321 } 1322 __ subptr(qword_count, 4); 1323 __ jcc(Assembler::less, L_copy_8_bytes); // Copy trailing qwords 1324 } 1325 1326 // Copy big chunks backward 1327 // 1328 // Inputs: 1329 // from - source arrays address 1330 // dest - destination array address 1331 // qword_count - 64-bits element count 1332 // to - scratch 1333 // L_copy_bytes - entry label 1334 // L_copy_8_bytes - exit label 1335 // 1336 void copy_bytes_backward(Register from, Register dest, 1337 Register qword_count, Register to, 1338 Label& L_copy_bytes, Label& L_copy_8_bytes) { 1339 DEBUG_ONLY(__ stop("enter at entry label, not here")); 1340 Label L_loop; 1341 __ align(OptoLoopAlignment); 1342 if (UseUnalignedLoadStores) { 1343 Label L_end; 1344 if (UseAVX > 2) { 1345 __ movl(to, 0xffff); 1346 __ kmovwl(k1, to); 1347 } 1348 // Copy 64-bytes per iteration 1349 __ BIND(L_loop); 1350 if (UseAVX > 2) { 1351 __ evmovdqul(xmm0, Address(from, qword_count, Address::times_8, 0), Assembler::AVX_512bit); 1352 __ evmovdqul(Address(dest, qword_count, Address::times_8, 0), xmm0, Assembler::AVX_512bit); 1353 } else if (UseAVX == 2) { 1354 __ vmovdqu(xmm0, Address(from, qword_count, Address::times_8, 32)); 1355 __ vmovdqu(Address(dest, qword_count, Address::times_8, 32), xmm0); 1356 __ vmovdqu(xmm1, Address(from, qword_count, Address::times_8, 0)); 1357 __ vmovdqu(Address(dest, qword_count, Address::times_8, 0), xmm1); 1358 } else { 1359 __ movdqu(xmm0, Address(from, qword_count, Address::times_8, 48)); 1360 __ movdqu(Address(dest, qword_count, Address::times_8, 48), xmm0); 1361 __ movdqu(xmm1, Address(from, qword_count, Address::times_8, 32)); 1362 __ movdqu(Address(dest, qword_count, Address::times_8, 32), xmm1); 1363 __ movdqu(xmm2, Address(from, qword_count, Address::times_8, 16)); 1364 __ movdqu(Address(dest, qword_count, Address::times_8, 16), xmm2); 1365 __ movdqu(xmm3, Address(from, qword_count, Address::times_8, 0)); 1366 __ movdqu(Address(dest, qword_count, Address::times_8, 0), xmm3); 1367 } 1368 __ BIND(L_copy_bytes); 1369 __ subptr(qword_count, 8); 1370 __ jcc(Assembler::greaterEqual, L_loop); 1371 1372 __ addptr(qword_count, 4); // add(8) and sub(4) 1373 __ jccb(Assembler::less, L_end); 1374 // Copy trailing 32 bytes 1375 if (UseAVX >= 2) { 1376 __ vmovdqu(xmm0, Address(from, qword_count, Address::times_8, 0)); 1377 __ vmovdqu(Address(dest, qword_count, Address::times_8, 0), xmm0); 1378 } else { 1379 __ movdqu(xmm0, Address(from, qword_count, Address::times_8, 16)); 1380 __ movdqu(Address(dest, qword_count, Address::times_8, 16), xmm0); 1381 __ movdqu(xmm1, Address(from, qword_count, Address::times_8, 0)); 1382 __ movdqu(Address(dest, qword_count, Address::times_8, 0), xmm1); 1383 } 1384 __ subptr(qword_count, 4); 1385 __ BIND(L_end); 1386 if (UseAVX >= 2) { 1387 // clean upper bits of YMM registers 1388 __ vpxor(xmm0, xmm0); 1389 __ vpxor(xmm1, xmm1); 1390 } 1391 } else { 1392 // Copy 32-bytes per iteration 1393 __ BIND(L_loop); 1394 __ movq(to, Address(from, qword_count, Address::times_8, 24)); 1395 __ movq(Address(dest, qword_count, Address::times_8, 24), to); 1396 __ movq(to, Address(from, qword_count, Address::times_8, 16)); 1397 __ movq(Address(dest, qword_count, Address::times_8, 16), to); 1398 __ movq(to, Address(from, qword_count, Address::times_8, 8)); 1399 __ movq(Address(dest, qword_count, Address::times_8, 8), to); 1400 __ movq(to, Address(from, qword_count, Address::times_8, 0)); 1401 __ movq(Address(dest, qword_count, Address::times_8, 0), to); 1402 1403 __ BIND(L_copy_bytes); 1404 __ subptr(qword_count, 4); 1405 __ jcc(Assembler::greaterEqual, L_loop); 1406 } 1407 __ addptr(qword_count, 4); 1408 __ jcc(Assembler::greater, L_copy_8_bytes); // Copy trailing qwords 1409 } 1410 1411 1412 // Arguments: 1413 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1414 // ignored 1415 // name - stub name string 1416 // 1417 // Inputs: 1418 // c_rarg0 - source array address 1419 // c_rarg1 - destination array address 1420 // c_rarg2 - element count, treated as ssize_t, can be zero 1421 // 1422 // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries, 1423 // we let the hardware handle it. The one to eight bytes within words, 1424 // dwords or qwords that span cache line boundaries will still be loaded 1425 // and stored atomically. 1426 // 1427 // Side Effects: 1428 // disjoint_byte_copy_entry is set to the no-overlap entry point 1429 // used by generate_conjoint_byte_copy(). 1430 // 1431 address generate_disjoint_byte_copy(bool aligned, address* entry, const char *name) { 1432 __ align(CodeEntryAlignment); 1433 StubCodeMark mark(this, "StubRoutines", name); 1434 address start = __ pc(); 1435 1436 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes; 1437 Label L_copy_byte, L_exit; 1438 const Register from = rdi; // source array address 1439 const Register to = rsi; // destination array address 1440 const Register count = rdx; // elements count 1441 const Register byte_count = rcx; 1442 const Register qword_count = count; 1443 const Register end_from = from; // source array end address 1444 const Register end_to = to; // destination array end address 1445 // End pointers are inclusive, and if count is not zero they point 1446 // to the last unit copied: end_to[0] := end_from[0] 1447 1448 __ enter(); // required for proper stackwalking of RuntimeStub frame 1449 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1450 1451 if (entry != NULL) { 1452 *entry = __ pc(); 1453 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1454 BLOCK_COMMENT("Entry:"); 1455 } 1456 1457 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1458 // r9 and r10 may be used to save non-volatile registers 1459 1460 // 'from', 'to' and 'count' are now valid 1461 __ movptr(byte_count, count); 1462 __ shrptr(count, 3); // count => qword_count 1463 1464 // Copy from low to high addresses. Use 'to' as scratch. 1465 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1466 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1467 __ negptr(qword_count); // make the count negative 1468 __ jmp(L_copy_bytes); 1469 1470 // Copy trailing qwords 1471 __ BIND(L_copy_8_bytes); 1472 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1473 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1474 __ increment(qword_count); 1475 __ jcc(Assembler::notZero, L_copy_8_bytes); 1476 1477 // Check for and copy trailing dword 1478 __ BIND(L_copy_4_bytes); 1479 __ testl(byte_count, 4); 1480 __ jccb(Assembler::zero, L_copy_2_bytes); 1481 __ movl(rax, Address(end_from, 8)); 1482 __ movl(Address(end_to, 8), rax); 1483 1484 __ addptr(end_from, 4); 1485 __ addptr(end_to, 4); 1486 1487 // Check for and copy trailing word 1488 __ BIND(L_copy_2_bytes); 1489 __ testl(byte_count, 2); 1490 __ jccb(Assembler::zero, L_copy_byte); 1491 __ movw(rax, Address(end_from, 8)); 1492 __ movw(Address(end_to, 8), rax); 1493 1494 __ addptr(end_from, 2); 1495 __ addptr(end_to, 2); 1496 1497 // Check for and copy trailing byte 1498 __ BIND(L_copy_byte); 1499 __ testl(byte_count, 1); 1500 __ jccb(Assembler::zero, L_exit); 1501 __ movb(rax, Address(end_from, 8)); 1502 __ movb(Address(end_to, 8), rax); 1503 1504 __ BIND(L_exit); 1505 restore_arg_regs(); 1506 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1507 __ xorptr(rax, rax); // return 0 1508 __ vzeroupper(); 1509 __ leave(); // required for proper stackwalking of RuntimeStub frame 1510 __ ret(0); 1511 1512 // Copy in multi-bytes chunks 1513 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1514 __ jmp(L_copy_4_bytes); 1515 1516 return start; 1517 } 1518 1519 // Arguments: 1520 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1521 // ignored 1522 // name - stub name string 1523 // 1524 // Inputs: 1525 // c_rarg0 - source array address 1526 // c_rarg1 - destination array address 1527 // c_rarg2 - element count, treated as ssize_t, can be zero 1528 // 1529 // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries, 1530 // we let the hardware handle it. The one to eight bytes within words, 1531 // dwords or qwords that span cache line boundaries will still be loaded 1532 // and stored atomically. 1533 // 1534 address generate_conjoint_byte_copy(bool aligned, address nooverlap_target, 1535 address* entry, const char *name) { 1536 __ align(CodeEntryAlignment); 1537 StubCodeMark mark(this, "StubRoutines", name); 1538 address start = __ pc(); 1539 1540 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes; 1541 const Register from = rdi; // source array address 1542 const Register to = rsi; // destination array address 1543 const Register count = rdx; // elements count 1544 const Register byte_count = rcx; 1545 const Register qword_count = count; 1546 1547 __ enter(); // required for proper stackwalking of RuntimeStub frame 1548 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1549 1550 if (entry != NULL) { 1551 *entry = __ pc(); 1552 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1553 BLOCK_COMMENT("Entry:"); 1554 } 1555 1556 array_overlap_test(nooverlap_target, Address::times_1); 1557 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1558 // r9 and r10 may be used to save non-volatile registers 1559 1560 // 'from', 'to' and 'count' are now valid 1561 __ movptr(byte_count, count); 1562 __ shrptr(count, 3); // count => qword_count 1563 1564 // Copy from high to low addresses. 1565 1566 // Check for and copy trailing byte 1567 __ testl(byte_count, 1); 1568 __ jcc(Assembler::zero, L_copy_2_bytes); 1569 __ movb(rax, Address(from, byte_count, Address::times_1, -1)); 1570 __ movb(Address(to, byte_count, Address::times_1, -1), rax); 1571 __ decrement(byte_count); // Adjust for possible trailing word 1572 1573 // Check for and copy trailing word 1574 __ BIND(L_copy_2_bytes); 1575 __ testl(byte_count, 2); 1576 __ jcc(Assembler::zero, L_copy_4_bytes); 1577 __ movw(rax, Address(from, byte_count, Address::times_1, -2)); 1578 __ movw(Address(to, byte_count, Address::times_1, -2), rax); 1579 1580 // Check for and copy trailing dword 1581 __ BIND(L_copy_4_bytes); 1582 __ testl(byte_count, 4); 1583 __ jcc(Assembler::zero, L_copy_bytes); 1584 __ movl(rax, Address(from, qword_count, Address::times_8)); 1585 __ movl(Address(to, qword_count, Address::times_8), rax); 1586 __ jmp(L_copy_bytes); 1587 1588 // Copy trailing qwords 1589 __ BIND(L_copy_8_bytes); 1590 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 1591 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 1592 __ decrement(qword_count); 1593 __ jcc(Assembler::notZero, L_copy_8_bytes); 1594 1595 restore_arg_regs(); 1596 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1597 __ xorptr(rax, rax); // return 0 1598 __ vzeroupper(); 1599 __ leave(); // required for proper stackwalking of RuntimeStub frame 1600 __ ret(0); 1601 1602 // Copy in multi-bytes chunks 1603 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1604 1605 restore_arg_regs(); 1606 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1607 __ xorptr(rax, rax); // return 0 1608 __ vzeroupper(); 1609 __ leave(); // required for proper stackwalking of RuntimeStub frame 1610 __ ret(0); 1611 1612 return start; 1613 } 1614 1615 // Arguments: 1616 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1617 // ignored 1618 // name - stub name string 1619 // 1620 // Inputs: 1621 // c_rarg0 - source array address 1622 // c_rarg1 - destination array address 1623 // c_rarg2 - element count, treated as ssize_t, can be zero 1624 // 1625 // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we 1626 // let the hardware handle it. The two or four words within dwords 1627 // or qwords that span cache line boundaries will still be loaded 1628 // and stored atomically. 1629 // 1630 // Side Effects: 1631 // disjoint_short_copy_entry is set to the no-overlap entry point 1632 // used by generate_conjoint_short_copy(). 1633 // 1634 address generate_disjoint_short_copy(bool aligned, address *entry, const char *name) { 1635 __ align(CodeEntryAlignment); 1636 StubCodeMark mark(this, "StubRoutines", name); 1637 address start = __ pc(); 1638 1639 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes,L_copy_2_bytes,L_exit; 1640 const Register from = rdi; // source array address 1641 const Register to = rsi; // destination array address 1642 const Register count = rdx; // elements count 1643 const Register word_count = rcx; 1644 const Register qword_count = count; 1645 const Register end_from = from; // source array end address 1646 const Register end_to = to; // destination array end address 1647 // End pointers are inclusive, and if count is not zero they point 1648 // to the last unit copied: end_to[0] := end_from[0] 1649 1650 __ enter(); // required for proper stackwalking of RuntimeStub frame 1651 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1652 1653 if (entry != NULL) { 1654 *entry = __ pc(); 1655 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1656 BLOCK_COMMENT("Entry:"); 1657 } 1658 1659 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1660 // r9 and r10 may be used to save non-volatile registers 1661 1662 // 'from', 'to' and 'count' are now valid 1663 __ movptr(word_count, count); 1664 __ shrptr(count, 2); // count => qword_count 1665 1666 // Copy from low to high addresses. Use 'to' as scratch. 1667 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1668 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1669 __ negptr(qword_count); 1670 __ jmp(L_copy_bytes); 1671 1672 // Copy trailing qwords 1673 __ BIND(L_copy_8_bytes); 1674 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1675 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1676 __ increment(qword_count); 1677 __ jcc(Assembler::notZero, L_copy_8_bytes); 1678 1679 // Original 'dest' is trashed, so we can't use it as a 1680 // base register for a possible trailing word copy 1681 1682 // Check for and copy trailing dword 1683 __ BIND(L_copy_4_bytes); 1684 __ testl(word_count, 2); 1685 __ jccb(Assembler::zero, L_copy_2_bytes); 1686 __ movl(rax, Address(end_from, 8)); 1687 __ movl(Address(end_to, 8), rax); 1688 1689 __ addptr(end_from, 4); 1690 __ addptr(end_to, 4); 1691 1692 // Check for and copy trailing word 1693 __ BIND(L_copy_2_bytes); 1694 __ testl(word_count, 1); 1695 __ jccb(Assembler::zero, L_exit); 1696 __ movw(rax, Address(end_from, 8)); 1697 __ movw(Address(end_to, 8), rax); 1698 1699 __ BIND(L_exit); 1700 restore_arg_regs(); 1701 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1702 __ xorptr(rax, rax); // return 0 1703 __ vzeroupper(); 1704 __ leave(); // required for proper stackwalking of RuntimeStub frame 1705 __ ret(0); 1706 1707 // Copy in multi-bytes chunks 1708 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1709 __ jmp(L_copy_4_bytes); 1710 1711 return start; 1712 } 1713 1714 address generate_fill(BasicType t, bool aligned, const char *name) { 1715 __ align(CodeEntryAlignment); 1716 StubCodeMark mark(this, "StubRoutines", name); 1717 address start = __ pc(); 1718 1719 BLOCK_COMMENT("Entry:"); 1720 1721 const Register to = c_rarg0; // source array address 1722 const Register value = c_rarg1; // value 1723 const Register count = c_rarg2; // elements count 1724 1725 __ enter(); // required for proper stackwalking of RuntimeStub frame 1726 1727 __ generate_fill(t, aligned, to, value, count, rax, xmm0); 1728 1729 __ vzeroupper(); 1730 __ leave(); // required for proper stackwalking of RuntimeStub frame 1731 __ ret(0); 1732 return start; 1733 } 1734 1735 // Arguments: 1736 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1737 // ignored 1738 // name - stub name string 1739 // 1740 // Inputs: 1741 // c_rarg0 - source array address 1742 // c_rarg1 - destination array address 1743 // c_rarg2 - element count, treated as ssize_t, can be zero 1744 // 1745 // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we 1746 // let the hardware handle it. The two or four words within dwords 1747 // or qwords that span cache line boundaries will still be loaded 1748 // and stored atomically. 1749 // 1750 address generate_conjoint_short_copy(bool aligned, address nooverlap_target, 1751 address *entry, const char *name) { 1752 __ align(CodeEntryAlignment); 1753 StubCodeMark mark(this, "StubRoutines", name); 1754 address start = __ pc(); 1755 1756 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes; 1757 const Register from = rdi; // source array address 1758 const Register to = rsi; // destination array address 1759 const Register count = rdx; // elements count 1760 const Register word_count = rcx; 1761 const Register qword_count = count; 1762 1763 __ enter(); // required for proper stackwalking of RuntimeStub frame 1764 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1765 1766 if (entry != NULL) { 1767 *entry = __ pc(); 1768 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1769 BLOCK_COMMENT("Entry:"); 1770 } 1771 1772 array_overlap_test(nooverlap_target, Address::times_2); 1773 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1774 // r9 and r10 may be used to save non-volatile registers 1775 1776 // 'from', 'to' and 'count' are now valid 1777 __ movptr(word_count, count); 1778 __ shrptr(count, 2); // count => qword_count 1779 1780 // Copy from high to low addresses. Use 'to' as scratch. 1781 1782 // Check for and copy trailing word 1783 __ testl(word_count, 1); 1784 __ jccb(Assembler::zero, L_copy_4_bytes); 1785 __ movw(rax, Address(from, word_count, Address::times_2, -2)); 1786 __ movw(Address(to, word_count, Address::times_2, -2), rax); 1787 1788 // Check for and copy trailing dword 1789 __ BIND(L_copy_4_bytes); 1790 __ testl(word_count, 2); 1791 __ jcc(Assembler::zero, L_copy_bytes); 1792 __ movl(rax, Address(from, qword_count, Address::times_8)); 1793 __ movl(Address(to, qword_count, Address::times_8), rax); 1794 __ jmp(L_copy_bytes); 1795 1796 // Copy trailing qwords 1797 __ BIND(L_copy_8_bytes); 1798 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 1799 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 1800 __ decrement(qword_count); 1801 __ jcc(Assembler::notZero, L_copy_8_bytes); 1802 1803 restore_arg_regs(); 1804 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1805 __ xorptr(rax, rax); // return 0 1806 __ vzeroupper(); 1807 __ leave(); // required for proper stackwalking of RuntimeStub frame 1808 __ ret(0); 1809 1810 // Copy in multi-bytes chunks 1811 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1812 1813 restore_arg_regs(); 1814 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1815 __ xorptr(rax, rax); // return 0 1816 __ vzeroupper(); 1817 __ leave(); // required for proper stackwalking of RuntimeStub frame 1818 __ ret(0); 1819 1820 return start; 1821 } 1822 1823 // Arguments: 1824 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1825 // ignored 1826 // is_oop - true => oop array, so generate store check code 1827 // name - stub name string 1828 // 1829 // Inputs: 1830 // c_rarg0 - source array address 1831 // c_rarg1 - destination array address 1832 // c_rarg2 - element count, treated as ssize_t, can be zero 1833 // 1834 // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let 1835 // the hardware handle it. The two dwords within qwords that span 1836 // cache line boundaries will still be loaded and stored atomicly. 1837 // 1838 // Side Effects: 1839 // disjoint_int_copy_entry is set to the no-overlap entry point 1840 // used by generate_conjoint_int_oop_copy(). 1841 // 1842 address generate_disjoint_int_oop_copy(bool aligned, bool is_oop, address* entry, 1843 const char *name, bool dest_uninitialized = false) { 1844 __ align(CodeEntryAlignment); 1845 StubCodeMark mark(this, "StubRoutines", name); 1846 address start = __ pc(); 1847 1848 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_exit; 1849 const Register from = rdi; // source array address 1850 const Register to = rsi; // destination array address 1851 const Register count = rdx; // elements count 1852 const Register dword_count = rcx; 1853 const Register qword_count = count; 1854 const Register end_from = from; // source array end address 1855 const Register end_to = to; // destination array end address 1856 // End pointers are inclusive, and if count is not zero they point 1857 // to the last unit copied: end_to[0] := end_from[0] 1858 1859 __ enter(); // required for proper stackwalking of RuntimeStub frame 1860 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1861 1862 if (entry != NULL) { 1863 *entry = __ pc(); 1864 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1865 BLOCK_COMMENT("Entry:"); 1866 } 1867 1868 setup_arg_regs_using_thread(); // from => rdi, to => rsi, count => rdx 1869 // r9 is used to save r15_thread 1870 1871 DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_DISJOINT; 1872 if (dest_uninitialized) { 1873 decorators |= IS_DEST_UNINITIALIZED; 1874 } 1875 if (aligned) { 1876 decorators |= ARRAYCOPY_ALIGNED; 1877 } 1878 1879 BasicType type = is_oop ? T_OBJECT : T_INT; 1880 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 1881 bs->arraycopy_prologue(_masm, decorators, type, from, to, count); 1882 1883 // 'from', 'to' and 'count' are now valid 1884 __ movptr(dword_count, count); 1885 __ shrptr(count, 1); // count => qword_count 1886 1887 // Copy from low to high addresses. Use 'to' as scratch. 1888 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1889 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1890 __ negptr(qword_count); 1891 __ jmp(L_copy_bytes); 1892 1893 // Copy trailing qwords 1894 __ BIND(L_copy_8_bytes); 1895 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1896 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1897 __ increment(qword_count); 1898 __ jcc(Assembler::notZero, L_copy_8_bytes); 1899 1900 // Check for and copy trailing dword 1901 __ BIND(L_copy_4_bytes); 1902 __ testl(dword_count, 1); // Only byte test since the value is 0 or 1 1903 __ jccb(Assembler::zero, L_exit); 1904 __ movl(rax, Address(end_from, 8)); 1905 __ movl(Address(end_to, 8), rax); 1906 1907 __ BIND(L_exit); 1908 bs->arraycopy_epilogue(_masm, decorators, type, from, to, dword_count); 1909 restore_arg_regs_using_thread(); 1910 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 1911 __ vzeroupper(); 1912 __ xorptr(rax, rax); // return 0 1913 __ leave(); // required for proper stackwalking of RuntimeStub frame 1914 __ ret(0); 1915 1916 // Copy in multi-bytes chunks 1917 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1918 __ jmp(L_copy_4_bytes); 1919 1920 return start; 1921 } 1922 1923 // Arguments: 1924 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1925 // ignored 1926 // is_oop - true => oop array, so generate store check code 1927 // name - stub name string 1928 // 1929 // Inputs: 1930 // c_rarg0 - source array address 1931 // c_rarg1 - destination array address 1932 // c_rarg2 - element count, treated as ssize_t, can be zero 1933 // 1934 // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let 1935 // the hardware handle it. The two dwords within qwords that span 1936 // cache line boundaries will still be loaded and stored atomicly. 1937 // 1938 address generate_conjoint_int_oop_copy(bool aligned, bool is_oop, address nooverlap_target, 1939 address *entry, const char *name, 1940 bool dest_uninitialized = false) { 1941 __ align(CodeEntryAlignment); 1942 StubCodeMark mark(this, "StubRoutines", name); 1943 address start = __ pc(); 1944 1945 Label L_copy_bytes, L_copy_8_bytes, L_copy_2_bytes, L_exit; 1946 const Register from = rdi; // source array address 1947 const Register to = rsi; // destination array address 1948 const Register count = rdx; // elements count 1949 const Register dword_count = rcx; 1950 const Register qword_count = count; 1951 1952 __ enter(); // required for proper stackwalking of RuntimeStub frame 1953 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1954 1955 if (entry != NULL) { 1956 *entry = __ pc(); 1957 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1958 BLOCK_COMMENT("Entry:"); 1959 } 1960 1961 array_overlap_test(nooverlap_target, Address::times_4); 1962 setup_arg_regs_using_thread(); // from => rdi, to => rsi, count => rdx 1963 // r9 is used to save r15_thread 1964 1965 DecoratorSet decorators = IN_HEAP | IS_ARRAY; 1966 if (dest_uninitialized) { 1967 decorators |= IS_DEST_UNINITIALIZED; 1968 } 1969 if (aligned) { 1970 decorators |= ARRAYCOPY_ALIGNED; 1971 } 1972 1973 BasicType type = is_oop ? T_OBJECT : T_INT; 1974 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 1975 // no registers are destroyed by this call 1976 bs->arraycopy_prologue(_masm, decorators, type, from, to, count); 1977 1978 assert_clean_int(count, rax); // Make sure 'count' is clean int. 1979 // 'from', 'to' and 'count' are now valid 1980 __ movptr(dword_count, count); 1981 __ shrptr(count, 1); // count => qword_count 1982 1983 // Copy from high to low addresses. Use 'to' as scratch. 1984 1985 // Check for and copy trailing dword 1986 __ testl(dword_count, 1); 1987 __ jcc(Assembler::zero, L_copy_bytes); 1988 __ movl(rax, Address(from, dword_count, Address::times_4, -4)); 1989 __ movl(Address(to, dword_count, Address::times_4, -4), rax); 1990 __ jmp(L_copy_bytes); 1991 1992 // Copy trailing qwords 1993 __ BIND(L_copy_8_bytes); 1994 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 1995 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 1996 __ decrement(qword_count); 1997 __ jcc(Assembler::notZero, L_copy_8_bytes); 1998 1999 if (is_oop) { 2000 __ jmp(L_exit); 2001 } 2002 restore_arg_regs_using_thread(); 2003 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 2004 __ xorptr(rax, rax); // return 0 2005 __ vzeroupper(); 2006 __ leave(); // required for proper stackwalking of RuntimeStub frame 2007 __ ret(0); 2008 2009 // Copy in multi-bytes chunks 2010 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2011 2012 __ BIND(L_exit); 2013 bs->arraycopy_epilogue(_masm, decorators, type, from, to, dword_count); 2014 restore_arg_regs_using_thread(); 2015 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 2016 __ xorptr(rax, rax); // return 0 2017 __ vzeroupper(); 2018 __ leave(); // required for proper stackwalking of RuntimeStub frame 2019 __ ret(0); 2020 2021 return start; 2022 } 2023 2024 // Arguments: 2025 // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes 2026 // ignored 2027 // is_oop - true => oop array, so generate store check code 2028 // name - stub name string 2029 // 2030 // Inputs: 2031 // c_rarg0 - source array address 2032 // c_rarg1 - destination array address 2033 // c_rarg2 - element count, treated as ssize_t, can be zero 2034 // 2035 // Side Effects: 2036 // disjoint_oop_copy_entry or disjoint_long_copy_entry is set to the 2037 // no-overlap entry point used by generate_conjoint_long_oop_copy(). 2038 // 2039 address generate_disjoint_long_oop_copy(bool aligned, bool is_oop, address *entry, 2040 const char *name, bool dest_uninitialized = false) { 2041 __ align(CodeEntryAlignment); 2042 StubCodeMark mark(this, "StubRoutines", name); 2043 address start = __ pc(); 2044 2045 Label L_copy_bytes, L_copy_8_bytes, L_exit; 2046 const Register from = rdi; // source array address 2047 const Register to = rsi; // destination array address 2048 const Register qword_count = rdx; // elements count 2049 const Register end_from = from; // source array end address 2050 const Register end_to = rcx; // destination array end address 2051 const Register saved_count = r11; 2052 // End pointers are inclusive, and if count is not zero they point 2053 // to the last unit copied: end_to[0] := end_from[0] 2054 2055 __ enter(); // required for proper stackwalking of RuntimeStub frame 2056 // Save no-overlap entry point for generate_conjoint_long_oop_copy() 2057 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 2058 2059 if (entry != NULL) { 2060 *entry = __ pc(); 2061 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2062 BLOCK_COMMENT("Entry:"); 2063 } 2064 2065 setup_arg_regs_using_thread(); // from => rdi, to => rsi, count => rdx 2066 // r9 is used to save r15_thread 2067 // 'from', 'to' and 'qword_count' are now valid 2068 2069 DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_DISJOINT; 2070 if (dest_uninitialized) { 2071 decorators |= IS_DEST_UNINITIALIZED; 2072 } 2073 if (aligned) { 2074 decorators |= ARRAYCOPY_ALIGNED; 2075 } 2076 2077 BasicType type = is_oop ? T_OBJECT : T_LONG; 2078 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 2079 bs->arraycopy_prologue(_masm, decorators, type, from, to, qword_count); 2080 2081 // Copy from low to high addresses. Use 'to' as scratch. 2082 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 2083 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 2084 __ negptr(qword_count); 2085 __ jmp(L_copy_bytes); 2086 2087 // Copy trailing qwords 2088 __ BIND(L_copy_8_bytes); 2089 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 2090 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 2091 __ increment(qword_count); 2092 __ jcc(Assembler::notZero, L_copy_8_bytes); 2093 2094 if (is_oop) { 2095 __ jmp(L_exit); 2096 } else { 2097 restore_arg_regs_using_thread(); 2098 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2099 __ xorptr(rax, rax); // return 0 2100 __ vzeroupper(); 2101 __ leave(); // required for proper stackwalking of RuntimeStub frame 2102 __ ret(0); 2103 } 2104 2105 // Copy in multi-bytes chunks 2106 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2107 2108 __ BIND(L_exit); 2109 bs->arraycopy_epilogue(_masm, decorators, type, from, to, qword_count); 2110 restore_arg_regs_using_thread(); 2111 if (is_oop) { 2112 inc_counter_np(SharedRuntime::_oop_array_copy_ctr); // Update counter after rscratch1 is free 2113 } else { 2114 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2115 } 2116 __ vzeroupper(); 2117 __ xorptr(rax, rax); // return 0 2118 __ leave(); // required for proper stackwalking of RuntimeStub frame 2119 __ ret(0); 2120 2121 return start; 2122 } 2123 2124 // Arguments: 2125 // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes 2126 // ignored 2127 // is_oop - true => oop array, so generate store check code 2128 // name - stub name string 2129 // 2130 // Inputs: 2131 // c_rarg0 - source array address 2132 // c_rarg1 - destination array address 2133 // c_rarg2 - element count, treated as ssize_t, can be zero 2134 // 2135 address generate_conjoint_long_oop_copy(bool aligned, bool is_oop, 2136 address nooverlap_target, address *entry, 2137 const char *name, bool dest_uninitialized = false) { 2138 __ align(CodeEntryAlignment); 2139 StubCodeMark mark(this, "StubRoutines", name); 2140 address start = __ pc(); 2141 2142 Label L_copy_bytes, L_copy_8_bytes, L_exit; 2143 const Register from = rdi; // source array address 2144 const Register to = rsi; // destination array address 2145 const Register qword_count = rdx; // elements count 2146 const Register saved_count = rcx; 2147 2148 __ enter(); // required for proper stackwalking of RuntimeStub frame 2149 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 2150 2151 if (entry != NULL) { 2152 *entry = __ pc(); 2153 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2154 BLOCK_COMMENT("Entry:"); 2155 } 2156 2157 array_overlap_test(nooverlap_target, Address::times_8); 2158 setup_arg_regs_using_thread(); // from => rdi, to => rsi, count => rdx 2159 // r9 is used to save r15_thread 2160 // 'from', 'to' and 'qword_count' are now valid 2161 2162 DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_DISJOINT; 2163 if (dest_uninitialized) { 2164 decorators |= IS_DEST_UNINITIALIZED; 2165 } 2166 if (aligned) { 2167 decorators |= ARRAYCOPY_ALIGNED; 2168 } 2169 2170 BasicType type = is_oop ? T_OBJECT : T_LONG; 2171 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 2172 bs->arraycopy_prologue(_masm, decorators, type, from, to, qword_count); 2173 2174 __ jmp(L_copy_bytes); 2175 2176 // Copy trailing qwords 2177 __ BIND(L_copy_8_bytes); 2178 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 2179 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 2180 __ decrement(qword_count); 2181 __ jcc(Assembler::notZero, L_copy_8_bytes); 2182 2183 if (is_oop) { 2184 __ jmp(L_exit); 2185 } else { 2186 restore_arg_regs_using_thread(); 2187 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2188 __ xorptr(rax, rax); // return 0 2189 __ vzeroupper(); 2190 __ leave(); // required for proper stackwalking of RuntimeStub frame 2191 __ ret(0); 2192 } 2193 2194 // Copy in multi-bytes chunks 2195 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2196 2197 __ BIND(L_exit); 2198 bs->arraycopy_epilogue(_masm, decorators, type, from, to, qword_count); 2199 restore_arg_regs_using_thread(); 2200 if (is_oop) { 2201 inc_counter_np(SharedRuntime::_oop_array_copy_ctr); // Update counter after rscratch1 is free 2202 } else { 2203 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2204 } 2205 __ vzeroupper(); 2206 __ xorptr(rax, rax); // return 0 2207 __ leave(); // required for proper stackwalking of RuntimeStub frame 2208 __ ret(0); 2209 2210 return start; 2211 } 2212 2213 2214 // Helper for generating a dynamic type check. 2215 // Smashes no registers. 2216 void generate_type_check(Register sub_klass, 2217 Register super_check_offset, 2218 Register super_klass, 2219 Label& L_success) { 2220 assert_different_registers(sub_klass, super_check_offset, super_klass); 2221 2222 BLOCK_COMMENT("type_check:"); 2223 2224 Label L_miss; 2225 2226 __ check_klass_subtype_fast_path(sub_klass, super_klass, noreg, &L_success, &L_miss, NULL, 2227 super_check_offset); 2228 __ check_klass_subtype_slow_path(sub_klass, super_klass, noreg, noreg, &L_success, NULL); 2229 2230 // Fall through on failure! 2231 __ BIND(L_miss); 2232 } 2233 2234 // 2235 // Generate checkcasting array copy stub 2236 // 2237 // Input: 2238 // c_rarg0 - source array address 2239 // c_rarg1 - destination array address 2240 // c_rarg2 - element count, treated as ssize_t, can be zero 2241 // c_rarg3 - size_t ckoff (super_check_offset) 2242 // not Win64 2243 // c_rarg4 - oop ckval (super_klass) 2244 // Win64 2245 // rsp+40 - oop ckval (super_klass) 2246 // 2247 // Output: 2248 // rax == 0 - success 2249 // rax == -1^K - failure, where K is partial transfer count 2250 // 2251 address generate_checkcast_copy(const char *name, address *entry, 2252 bool dest_uninitialized = false) { 2253 2254 Label L_load_element, L_store_element, L_do_card_marks, L_done; 2255 2256 // Input registers (after setup_arg_regs) 2257 const Register from = rdi; // source array address 2258 const Register to = rsi; // destination array address 2259 const Register length = rdx; // elements count 2260 const Register ckoff = rcx; // super_check_offset 2261 const Register ckval = r8; // super_klass 2262 2263 // Registers used as temps (r13, r14 are save-on-entry) 2264 const Register end_from = from; // source array end address 2265 const Register end_to = r13; // destination array end address 2266 const Register count = rdx; // -(count_remaining) 2267 const Register r14_length = r14; // saved copy of length 2268 // End pointers are inclusive, and if length is not zero they point 2269 // to the last unit copied: end_to[0] := end_from[0] 2270 2271 const Register rax_oop = rax; // actual oop copied 2272 const Register r11_klass = r11; // oop._klass 2273 2274 //--------------------------------------------------------------- 2275 // Assembler stub will be used for this call to arraycopy 2276 // if the two arrays are subtypes of Object[] but the 2277 // destination array type is not equal to or a supertype 2278 // of the source type. Each element must be separately 2279 // checked. 2280 2281 __ align(CodeEntryAlignment); 2282 StubCodeMark mark(this, "StubRoutines", name); 2283 address start = __ pc(); 2284 2285 __ enter(); // required for proper stackwalking of RuntimeStub frame 2286 2287 #ifdef ASSERT 2288 // caller guarantees that the arrays really are different 2289 // otherwise, we would have to make conjoint checks 2290 { Label L; 2291 array_overlap_test(L, TIMES_OOP); 2292 __ stop("checkcast_copy within a single array"); 2293 __ bind(L); 2294 } 2295 #endif //ASSERT 2296 2297 setup_arg_regs(4); // from => rdi, to => rsi, length => rdx 2298 // ckoff => rcx, ckval => r8 2299 // r9 and r10 may be used to save non-volatile registers 2300 #ifdef _WIN64 2301 // last argument (#4) is on stack on Win64 2302 __ movptr(ckval, Address(rsp, 6 * wordSize)); 2303 #endif 2304 2305 // Caller of this entry point must set up the argument registers. 2306 if (entry != NULL) { 2307 *entry = __ pc(); 2308 BLOCK_COMMENT("Entry:"); 2309 } 2310 2311 // allocate spill slots for r13, r14 2312 enum { 2313 saved_r13_offset, 2314 saved_r14_offset, 2315 saved_r10_offset, 2316 saved_rbp_offset 2317 }; 2318 __ subptr(rsp, saved_rbp_offset * wordSize); 2319 __ movptr(Address(rsp, saved_r13_offset * wordSize), r13); 2320 __ movptr(Address(rsp, saved_r14_offset * wordSize), r14); 2321 __ movptr(Address(rsp, saved_r10_offset * wordSize), r10); 2322 2323 #ifdef ASSERT 2324 Label L2; 2325 __ get_thread(r14); 2326 __ cmpptr(r15_thread, r14); 2327 __ jcc(Assembler::equal, L2); 2328 __ stop("StubRoutines::call_stub: r15_thread is modified by call"); 2329 __ bind(L2); 2330 #endif // ASSERT 2331 2332 // check that int operands are properly extended to size_t 2333 assert_clean_int(length, rax); 2334 assert_clean_int(ckoff, rax); 2335 2336 #ifdef ASSERT 2337 BLOCK_COMMENT("assert consistent ckoff/ckval"); 2338 // The ckoff and ckval must be mutually consistent, 2339 // even though caller generates both. 2340 { Label L; 2341 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2342 __ cmpl(ckoff, Address(ckval, sco_offset)); 2343 __ jcc(Assembler::equal, L); 2344 __ stop("super_check_offset inconsistent"); 2345 __ bind(L); 2346 } 2347 #endif //ASSERT 2348 2349 // Loop-invariant addresses. They are exclusive end pointers. 2350 Address end_from_addr(from, length, TIMES_OOP, 0); 2351 Address end_to_addr(to, length, TIMES_OOP, 0); 2352 // Loop-variant addresses. They assume post-incremented count < 0. 2353 Address from_element_addr(end_from, count, TIMES_OOP, 0); 2354 Address to_element_addr(end_to, count, TIMES_OOP, 0); 2355 2356 DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_CHECKCAST; 2357 if (dest_uninitialized) { 2358 decorators |= IS_DEST_UNINITIALIZED; 2359 } 2360 2361 BasicType type = T_OBJECT; 2362 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 2363 bs->arraycopy_prologue(_masm, decorators, type, from, to, count); 2364 2365 // Copy from low to high addresses, indexed from the end of each array. 2366 __ lea(end_from, end_from_addr); 2367 __ lea(end_to, end_to_addr); 2368 __ movptr(r14_length, length); // save a copy of the length 2369 assert(length == count, ""); // else fix next line: 2370 __ negptr(count); // negate and test the length 2371 __ jcc(Assembler::notZero, L_load_element); 2372 2373 // Empty array: Nothing to do. 2374 __ xorptr(rax, rax); // return 0 on (trivial) success 2375 __ jmp(L_done); 2376 2377 // ======== begin loop ======== 2378 // (Loop is rotated; its entry is L_load_element.) 2379 // Loop control: 2380 // for (count = -count; count != 0; count++) 2381 // Base pointers src, dst are biased by 8*(count-1),to last element. 2382 __ align(OptoLoopAlignment); 2383 2384 __ BIND(L_store_element); 2385 __ store_heap_oop(to_element_addr, rax_oop, noreg, noreg, AS_RAW); // store the oop 2386 __ increment(count); // increment the count toward zero 2387 __ jcc(Assembler::zero, L_do_card_marks); 2388 2389 // ======== loop entry is here ======== 2390 __ BIND(L_load_element); 2391 __ load_heap_oop(rax_oop, from_element_addr, noreg, noreg, AS_RAW); // load the oop 2392 __ testptr(rax_oop, rax_oop); 2393 __ jcc(Assembler::zero, L_store_element); 2394 2395 __ load_klass(r11_klass, rax_oop);// query the object klass 2396 generate_type_check(r11_klass, ckoff, ckval, L_store_element); 2397 // ======== end loop ======== 2398 2399 // It was a real error; we must depend on the caller to finish the job. 2400 // Register rdx = -1 * number of *remaining* oops, r14 = *total* oops. 2401 // Emit GC store barriers for the oops we have copied (r14 + rdx), 2402 // and report their number to the caller. 2403 assert_different_registers(rax, r14_length, count, to, end_to, rcx, rscratch1); 2404 Label L_post_barrier; 2405 __ addptr(r14_length, count); // K = (original - remaining) oops 2406 __ movptr(rax, r14_length); // save the value 2407 __ notptr(rax); // report (-1^K) to caller (does not affect flags) 2408 __ jccb(Assembler::notZero, L_post_barrier); 2409 __ jmp(L_done); // K == 0, nothing was copied, skip post barrier 2410 2411 // Come here on success only. 2412 __ BIND(L_do_card_marks); 2413 __ xorptr(rax, rax); // return 0 on success 2414 2415 __ BIND(L_post_barrier); 2416 bs->arraycopy_epilogue(_masm, decorators, type, from, to, r14_length); 2417 2418 // Common exit point (success or failure). 2419 __ BIND(L_done); 2420 __ movptr(r13, Address(rsp, saved_r13_offset * wordSize)); 2421 __ movptr(r14, Address(rsp, saved_r14_offset * wordSize)); 2422 __ movptr(r10, Address(rsp, saved_r10_offset * wordSize)); 2423 restore_arg_regs(); 2424 inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr); // Update counter after rscratch1 is free 2425 __ leave(); // required for proper stackwalking of RuntimeStub frame 2426 __ ret(0); 2427 2428 return start; 2429 } 2430 2431 // 2432 // Generate 'unsafe' array copy stub 2433 // Though just as safe as the other stubs, it takes an unscaled 2434 // size_t argument instead of an element count. 2435 // 2436 // Input: 2437 // c_rarg0 - source array address 2438 // c_rarg1 - destination array address 2439 // c_rarg2 - byte count, treated as ssize_t, can be zero 2440 // 2441 // Examines the alignment of the operands and dispatches 2442 // to a long, int, short, or byte copy loop. 2443 // 2444 address generate_unsafe_copy(const char *name, 2445 address byte_copy_entry, address short_copy_entry, 2446 address int_copy_entry, address long_copy_entry) { 2447 2448 Label L_long_aligned, L_int_aligned, L_short_aligned; 2449 2450 // Input registers (before setup_arg_regs) 2451 const Register from = c_rarg0; // source array address 2452 const Register to = c_rarg1; // destination array address 2453 const Register size = c_rarg2; // byte count (size_t) 2454 2455 // Register used as a temp 2456 const Register bits = rax; // test copy of low bits 2457 2458 __ align(CodeEntryAlignment); 2459 StubCodeMark mark(this, "StubRoutines", name); 2460 address start = __ pc(); 2461 2462 __ enter(); // required for proper stackwalking of RuntimeStub frame 2463 2464 // bump this on entry, not on exit: 2465 inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr); 2466 2467 __ mov(bits, from); 2468 __ orptr(bits, to); 2469 __ orptr(bits, size); 2470 2471 __ testb(bits, BytesPerLong-1); 2472 __ jccb(Assembler::zero, L_long_aligned); 2473 2474 __ testb(bits, BytesPerInt-1); 2475 __ jccb(Assembler::zero, L_int_aligned); 2476 2477 __ testb(bits, BytesPerShort-1); 2478 __ jump_cc(Assembler::notZero, RuntimeAddress(byte_copy_entry)); 2479 2480 __ BIND(L_short_aligned); 2481 __ shrptr(size, LogBytesPerShort); // size => short_count 2482 __ jump(RuntimeAddress(short_copy_entry)); 2483 2484 __ BIND(L_int_aligned); 2485 __ shrptr(size, LogBytesPerInt); // size => int_count 2486 __ jump(RuntimeAddress(int_copy_entry)); 2487 2488 __ BIND(L_long_aligned); 2489 __ shrptr(size, LogBytesPerLong); // size => qword_count 2490 __ jump(RuntimeAddress(long_copy_entry)); 2491 2492 return start; 2493 } 2494 2495 // Perform range checks on the proposed arraycopy. 2496 // Kills temp, but nothing else. 2497 // Also, clean the sign bits of src_pos and dst_pos. 2498 void arraycopy_range_checks(Register src, // source array oop (c_rarg0) 2499 Register src_pos, // source position (c_rarg1) 2500 Register dst, // destination array oo (c_rarg2) 2501 Register dst_pos, // destination position (c_rarg3) 2502 Register length, 2503 Register temp, 2504 Label& L_failed) { 2505 BLOCK_COMMENT("arraycopy_range_checks:"); 2506 2507 // if (src_pos + length > arrayOop(src)->length()) FAIL; 2508 __ movl(temp, length); 2509 __ addl(temp, src_pos); // src_pos + length 2510 __ cmpl(temp, Address(src, arrayOopDesc::length_offset_in_bytes())); 2511 __ jcc(Assembler::above, L_failed); 2512 2513 // if (dst_pos + length > arrayOop(dst)->length()) FAIL; 2514 __ movl(temp, length); 2515 __ addl(temp, dst_pos); // dst_pos + length 2516 __ cmpl(temp, Address(dst, arrayOopDesc::length_offset_in_bytes())); 2517 __ jcc(Assembler::above, L_failed); 2518 2519 // Have to clean up high 32-bits of 'src_pos' and 'dst_pos'. 2520 // Move with sign extension can be used since they are positive. 2521 __ movslq(src_pos, src_pos); 2522 __ movslq(dst_pos, dst_pos); 2523 2524 BLOCK_COMMENT("arraycopy_range_checks done"); 2525 } 2526 2527 // 2528 // Generate generic array copy stubs 2529 // 2530 // Input: 2531 // c_rarg0 - src oop 2532 // c_rarg1 - src_pos (32-bits) 2533 // c_rarg2 - dst oop 2534 // c_rarg3 - dst_pos (32-bits) 2535 // not Win64 2536 // c_rarg4 - element count (32-bits) 2537 // Win64 2538 // rsp+40 - element count (32-bits) 2539 // 2540 // Output: 2541 // rax == 0 - success 2542 // rax == -1^K - failure, where K is partial transfer count 2543 // 2544 address generate_generic_copy(const char *name, 2545 address byte_copy_entry, address short_copy_entry, 2546 address int_copy_entry, address oop_copy_entry, 2547 address long_copy_entry, address checkcast_copy_entry) { 2548 2549 Label L_failed, L_failed_0, L_objArray; 2550 Label L_copy_bytes, L_copy_shorts, L_copy_ints, L_copy_longs; 2551 2552 // Input registers 2553 const Register src = c_rarg0; // source array oop 2554 const Register src_pos = c_rarg1; // source position 2555 const Register dst = c_rarg2; // destination array oop 2556 const Register dst_pos = c_rarg3; // destination position 2557 #ifndef _WIN64 2558 const Register length = c_rarg4; 2559 #else 2560 const Address length(rsp, 6 * wordSize); // elements count is on stack on Win64 2561 #endif 2562 2563 { int modulus = CodeEntryAlignment; 2564 int target = modulus - 5; // 5 = sizeof jmp(L_failed) 2565 int advance = target - (__ offset() % modulus); 2566 if (advance < 0) advance += modulus; 2567 if (advance > 0) __ nop(advance); 2568 } 2569 StubCodeMark mark(this, "StubRoutines", name); 2570 2571 // Short-hop target to L_failed. Makes for denser prologue code. 2572 __ BIND(L_failed_0); 2573 __ jmp(L_failed); 2574 assert(__ offset() % CodeEntryAlignment == 0, "no further alignment needed"); 2575 2576 __ align(CodeEntryAlignment); 2577 address start = __ pc(); 2578 2579 __ enter(); // required for proper stackwalking of RuntimeStub frame 2580 2581 // bump this on entry, not on exit: 2582 inc_counter_np(SharedRuntime::_generic_array_copy_ctr); 2583 2584 //----------------------------------------------------------------------- 2585 // Assembler stub will be used for this call to arraycopy 2586 // if the following conditions are met: 2587 // 2588 // (1) src and dst must not be null. 2589 // (2) src_pos must not be negative. 2590 // (3) dst_pos must not be negative. 2591 // (4) length must not be negative. 2592 // (5) src klass and dst klass should be the same and not NULL. 2593 // (6) src and dst should be arrays. 2594 // (7) src_pos + length must not exceed length of src. 2595 // (8) dst_pos + length must not exceed length of dst. 2596 // 2597 2598 // if (src == NULL) return -1; 2599 __ testptr(src, src); // src oop 2600 size_t j1off = __ offset(); 2601 __ jccb(Assembler::zero, L_failed_0); 2602 2603 // if (src_pos < 0) return -1; 2604 __ testl(src_pos, src_pos); // src_pos (32-bits) 2605 __ jccb(Assembler::negative, L_failed_0); 2606 2607 // if (dst == NULL) return -1; 2608 __ testptr(dst, dst); // dst oop 2609 __ jccb(Assembler::zero, L_failed_0); 2610 2611 // if (dst_pos < 0) return -1; 2612 __ testl(dst_pos, dst_pos); // dst_pos (32-bits) 2613 size_t j4off = __ offset(); 2614 __ jccb(Assembler::negative, L_failed_0); 2615 2616 // The first four tests are very dense code, 2617 // but not quite dense enough to put four 2618 // jumps in a 16-byte instruction fetch buffer. 2619 // That's good, because some branch predicters 2620 // do not like jumps so close together. 2621 // Make sure of this. 2622 guarantee(((j1off ^ j4off) & ~15) != 0, "I$ line of 1st & 4th jumps"); 2623 2624 // registers used as temp 2625 const Register r11_length = r11; // elements count to copy 2626 const Register r10_src_klass = r10; // array klass 2627 2628 // if (length < 0) return -1; 2629 __ movl(r11_length, length); // length (elements count, 32-bits value) 2630 __ testl(r11_length, r11_length); 2631 __ jccb(Assembler::negative, L_failed_0); 2632 2633 __ load_klass(r10_src_klass, src); 2634 #ifdef ASSERT 2635 // assert(src->klass() != NULL); 2636 { 2637 BLOCK_COMMENT("assert klasses not null {"); 2638 Label L1, L2; 2639 __ testptr(r10_src_klass, r10_src_klass); 2640 __ jcc(Assembler::notZero, L2); // it is broken if klass is NULL 2641 __ bind(L1); 2642 __ stop("broken null klass"); 2643 __ bind(L2); 2644 __ load_klass(rax, dst); 2645 __ cmpq(rax, 0); 2646 __ jcc(Assembler::equal, L1); // this would be broken also 2647 BLOCK_COMMENT("} assert klasses not null done"); 2648 } 2649 #endif 2650 2651 // Load layout helper (32-bits) 2652 // 2653 // |array_tag| | header_size | element_type | |log2_element_size| 2654 // 32 30 24 16 8 2 0 2655 // 2656 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0 2657 // 2658 2659 const int lh_offset = in_bytes(Klass::layout_helper_offset()); 2660 2661 // Handle objArrays completely differently... 2662 const jint objArray_lh = Klass::array_layout_helper(T_OBJECT); 2663 __ cmpl(Address(r10_src_klass, lh_offset), objArray_lh); 2664 __ jcc(Assembler::equal, L_objArray); 2665 2666 // if (src->klass() != dst->klass()) return -1; 2667 __ load_klass(rax, dst); 2668 __ cmpq(r10_src_klass, rax); 2669 __ jcc(Assembler::notEqual, L_failed); 2670 2671 const Register rax_lh = rax; // layout helper 2672 __ movl(rax_lh, Address(r10_src_klass, lh_offset)); 2673 2674 // if (!src->is_Array()) return -1; 2675 __ cmpl(rax_lh, Klass::_lh_neutral_value); 2676 __ jcc(Assembler::greaterEqual, L_failed); 2677 2678 // At this point, it is known to be a typeArray (array_tag 0x3). 2679 #ifdef ASSERT 2680 { 2681 BLOCK_COMMENT("assert primitive array {"); 2682 Label L; 2683 __ cmpl(rax_lh, (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift)); 2684 __ jcc(Assembler::greaterEqual, L); 2685 __ stop("must be a primitive array"); 2686 __ bind(L); 2687 BLOCK_COMMENT("} assert primitive array done"); 2688 } 2689 #endif 2690 2691 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2692 r10, L_failed); 2693 2694 // TypeArrayKlass 2695 // 2696 // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize); 2697 // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize); 2698 // 2699 2700 const Register r10_offset = r10; // array offset 2701 const Register rax_elsize = rax_lh; // element size 2702 2703 __ movl(r10_offset, rax_lh); 2704 __ shrl(r10_offset, Klass::_lh_header_size_shift); 2705 __ andptr(r10_offset, Klass::_lh_header_size_mask); // array_offset 2706 __ addptr(src, r10_offset); // src array offset 2707 __ addptr(dst, r10_offset); // dst array offset 2708 BLOCK_COMMENT("choose copy loop based on element size"); 2709 __ andl(rax_lh, Klass::_lh_log2_element_size_mask); // rax_lh -> rax_elsize 2710 2711 // next registers should be set before the jump to corresponding stub 2712 const Register from = c_rarg0; // source array address 2713 const Register to = c_rarg1; // destination array address 2714 const Register count = c_rarg2; // elements count 2715 2716 // 'from', 'to', 'count' registers should be set in such order 2717 // since they are the same as 'src', 'src_pos', 'dst'. 2718 2719 __ BIND(L_copy_bytes); 2720 __ cmpl(rax_elsize, 0); 2721 __ jccb(Assembler::notEqual, L_copy_shorts); 2722 __ lea(from, Address(src, src_pos, Address::times_1, 0));// src_addr 2723 __ lea(to, Address(dst, dst_pos, Address::times_1, 0));// dst_addr 2724 __ movl2ptr(count, r11_length); // length 2725 __ jump(RuntimeAddress(byte_copy_entry)); 2726 2727 __ BIND(L_copy_shorts); 2728 __ cmpl(rax_elsize, LogBytesPerShort); 2729 __ jccb(Assembler::notEqual, L_copy_ints); 2730 __ lea(from, Address(src, src_pos, Address::times_2, 0));// src_addr 2731 __ lea(to, Address(dst, dst_pos, Address::times_2, 0));// dst_addr 2732 __ movl2ptr(count, r11_length); // length 2733 __ jump(RuntimeAddress(short_copy_entry)); 2734 2735 __ BIND(L_copy_ints); 2736 __ cmpl(rax_elsize, LogBytesPerInt); 2737 __ jccb(Assembler::notEqual, L_copy_longs); 2738 __ lea(from, Address(src, src_pos, Address::times_4, 0));// src_addr 2739 __ lea(to, Address(dst, dst_pos, Address::times_4, 0));// dst_addr 2740 __ movl2ptr(count, r11_length); // length 2741 __ jump(RuntimeAddress(int_copy_entry)); 2742 2743 __ BIND(L_copy_longs); 2744 #ifdef ASSERT 2745 { 2746 BLOCK_COMMENT("assert long copy {"); 2747 Label L; 2748 __ cmpl(rax_elsize, LogBytesPerLong); 2749 __ jcc(Assembler::equal, L); 2750 __ stop("must be long copy, but elsize is wrong"); 2751 __ bind(L); 2752 BLOCK_COMMENT("} assert long copy done"); 2753 } 2754 #endif 2755 __ lea(from, Address(src, src_pos, Address::times_8, 0));// src_addr 2756 __ lea(to, Address(dst, dst_pos, Address::times_8, 0));// dst_addr 2757 __ movl2ptr(count, r11_length); // length 2758 __ jump(RuntimeAddress(long_copy_entry)); 2759 2760 // ObjArrayKlass 2761 __ BIND(L_objArray); 2762 // live at this point: r10_src_klass, r11_length, src[_pos], dst[_pos] 2763 2764 Label L_plain_copy, L_checkcast_copy; 2765 // test array classes for subtyping 2766 __ load_klass(rax, dst); 2767 __ cmpq(r10_src_klass, rax); // usual case is exact equality 2768 __ jcc(Assembler::notEqual, L_checkcast_copy); 2769 2770 // Identically typed arrays can be copied without element-wise checks. 2771 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2772 r10, L_failed); 2773 2774 __ lea(from, Address(src, src_pos, TIMES_OOP, 2775 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // src_addr 2776 __ lea(to, Address(dst, dst_pos, TIMES_OOP, 2777 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // dst_addr 2778 __ movl2ptr(count, r11_length); // length 2779 __ BIND(L_plain_copy); 2780 __ jump(RuntimeAddress(oop_copy_entry)); 2781 2782 __ BIND(L_checkcast_copy); 2783 // live at this point: r10_src_klass, r11_length, rax (dst_klass) 2784 { 2785 // Before looking at dst.length, make sure dst is also an objArray. 2786 __ cmpl(Address(rax, lh_offset), objArray_lh); 2787 __ jcc(Assembler::notEqual, L_failed); 2788 2789 // It is safe to examine both src.length and dst.length. 2790 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2791 rax, L_failed); 2792 2793 const Register r11_dst_klass = r11; 2794 __ load_klass(r11_dst_klass, dst); // reload 2795 2796 // Marshal the base address arguments now, freeing registers. 2797 __ lea(from, Address(src, src_pos, TIMES_OOP, 2798 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 2799 __ lea(to, Address(dst, dst_pos, TIMES_OOP, 2800 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 2801 __ movl(count, length); // length (reloaded) 2802 Register sco_temp = c_rarg3; // this register is free now 2803 assert_different_registers(from, to, count, sco_temp, 2804 r11_dst_klass, r10_src_klass); 2805 assert_clean_int(count, sco_temp); 2806 2807 // Generate the type check. 2808 const int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2809 __ movl(sco_temp, Address(r11_dst_klass, sco_offset)); 2810 assert_clean_int(sco_temp, rax); 2811 generate_type_check(r10_src_klass, sco_temp, r11_dst_klass, L_plain_copy); 2812 2813 // Fetch destination element klass from the ObjArrayKlass header. 2814 int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset()); 2815 __ movptr(r11_dst_klass, Address(r11_dst_klass, ek_offset)); 2816 __ movl( sco_temp, Address(r11_dst_klass, sco_offset)); 2817 assert_clean_int(sco_temp, rax); 2818 2819 // the checkcast_copy loop needs two extra arguments: 2820 assert(c_rarg3 == sco_temp, "#3 already in place"); 2821 // Set up arguments for checkcast_copy_entry. 2822 setup_arg_regs(4); 2823 __ movptr(r8, r11_dst_klass); // dst.klass.element_klass, r8 is c_rarg4 on Linux/Solaris 2824 __ jump(RuntimeAddress(checkcast_copy_entry)); 2825 } 2826 2827 __ BIND(L_failed); 2828 __ xorptr(rax, rax); 2829 __ notptr(rax); // return -1 2830 __ leave(); // required for proper stackwalking of RuntimeStub frame 2831 __ ret(0); 2832 2833 return start; 2834 } 2835 2836 void generate_arraycopy_stubs() { 2837 address entry; 2838 address entry_jbyte_arraycopy; 2839 address entry_jshort_arraycopy; 2840 address entry_jint_arraycopy; 2841 address entry_oop_arraycopy; 2842 address entry_jlong_arraycopy; 2843 address entry_checkcast_arraycopy; 2844 2845 StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, &entry, 2846 "jbyte_disjoint_arraycopy"); 2847 StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, entry, &entry_jbyte_arraycopy, 2848 "jbyte_arraycopy"); 2849 2850 StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, &entry, 2851 "jshort_disjoint_arraycopy"); 2852 StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, entry, &entry_jshort_arraycopy, 2853 "jshort_arraycopy"); 2854 2855 StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, false, &entry, 2856 "jint_disjoint_arraycopy"); 2857 StubRoutines::_jint_arraycopy = generate_conjoint_int_oop_copy(false, false, entry, 2858 &entry_jint_arraycopy, "jint_arraycopy"); 2859 2860 StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, false, &entry, 2861 "jlong_disjoint_arraycopy"); 2862 StubRoutines::_jlong_arraycopy = generate_conjoint_long_oop_copy(false, false, entry, 2863 &entry_jlong_arraycopy, "jlong_arraycopy"); 2864 2865 2866 if (UseCompressedOops) { 2867 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, true, &entry, 2868 "oop_disjoint_arraycopy"); 2869 StubRoutines::_oop_arraycopy = generate_conjoint_int_oop_copy(false, true, entry, 2870 &entry_oop_arraycopy, "oop_arraycopy"); 2871 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_int_oop_copy(false, true, &entry, 2872 "oop_disjoint_arraycopy_uninit", 2873 /*dest_uninitialized*/true); 2874 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_int_oop_copy(false, true, entry, 2875 NULL, "oop_arraycopy_uninit", 2876 /*dest_uninitialized*/true); 2877 } else { 2878 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, true, &entry, 2879 "oop_disjoint_arraycopy"); 2880 StubRoutines::_oop_arraycopy = generate_conjoint_long_oop_copy(false, true, entry, 2881 &entry_oop_arraycopy, "oop_arraycopy"); 2882 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_long_oop_copy(false, true, &entry, 2883 "oop_disjoint_arraycopy_uninit", 2884 /*dest_uninitialized*/true); 2885 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_long_oop_copy(false, true, entry, 2886 NULL, "oop_arraycopy_uninit", 2887 /*dest_uninitialized*/true); 2888 } 2889 2890 StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy); 2891 StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", NULL, 2892 /*dest_uninitialized*/true); 2893 2894 StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy", 2895 entry_jbyte_arraycopy, 2896 entry_jshort_arraycopy, 2897 entry_jint_arraycopy, 2898 entry_jlong_arraycopy); 2899 StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy", 2900 entry_jbyte_arraycopy, 2901 entry_jshort_arraycopy, 2902 entry_jint_arraycopy, 2903 entry_oop_arraycopy, 2904 entry_jlong_arraycopy, 2905 entry_checkcast_arraycopy); 2906 2907 StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill"); 2908 StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill"); 2909 StubRoutines::_jint_fill = generate_fill(T_INT, false, "jint_fill"); 2910 StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill"); 2911 StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill"); 2912 StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill"); 2913 2914 // We don't generate specialized code for HeapWord-aligned source 2915 // arrays, so just use the code we've already generated 2916 StubRoutines::_arrayof_jbyte_disjoint_arraycopy = StubRoutines::_jbyte_disjoint_arraycopy; 2917 StubRoutines::_arrayof_jbyte_arraycopy = StubRoutines::_jbyte_arraycopy; 2918 2919 StubRoutines::_arrayof_jshort_disjoint_arraycopy = StubRoutines::_jshort_disjoint_arraycopy; 2920 StubRoutines::_arrayof_jshort_arraycopy = StubRoutines::_jshort_arraycopy; 2921 2922 StubRoutines::_arrayof_jint_disjoint_arraycopy = StubRoutines::_jint_disjoint_arraycopy; 2923 StubRoutines::_arrayof_jint_arraycopy = StubRoutines::_jint_arraycopy; 2924 2925 StubRoutines::_arrayof_jlong_disjoint_arraycopy = StubRoutines::_jlong_disjoint_arraycopy; 2926 StubRoutines::_arrayof_jlong_arraycopy = StubRoutines::_jlong_arraycopy; 2927 2928 StubRoutines::_arrayof_oop_disjoint_arraycopy = StubRoutines::_oop_disjoint_arraycopy; 2929 StubRoutines::_arrayof_oop_arraycopy = StubRoutines::_oop_arraycopy; 2930 2931 StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = StubRoutines::_oop_disjoint_arraycopy_uninit; 2932 StubRoutines::_arrayof_oop_arraycopy_uninit = StubRoutines::_oop_arraycopy_uninit; 2933 } 2934 2935 // AES intrinsic stubs 2936 enum {AESBlockSize = 16}; 2937 2938 address generate_key_shuffle_mask() { 2939 __ align(16); 2940 StubCodeMark mark(this, "StubRoutines", "key_shuffle_mask"); 2941 address start = __ pc(); 2942 __ emit_data64( 0x0405060700010203, relocInfo::none ); 2943 __ emit_data64( 0x0c0d0e0f08090a0b, relocInfo::none ); 2944 return start; 2945 } 2946 2947 address generate_counter_shuffle_mask() { 2948 __ align(16); 2949 StubCodeMark mark(this, "StubRoutines", "counter_shuffle_mask"); 2950 address start = __ pc(); 2951 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none); 2952 __ emit_data64(0x0001020304050607, relocInfo::none); 2953 return start; 2954 } 2955 2956 // Utility routine for loading a 128-bit key word in little endian format 2957 // can optionally specify that the shuffle mask is already in an xmmregister 2958 void load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask=NULL) { 2959 __ movdqu(xmmdst, Address(key, offset)); 2960 if (xmm_shuf_mask != NULL) { 2961 __ pshufb(xmmdst, xmm_shuf_mask); 2962 } else { 2963 __ pshufb(xmmdst, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 2964 } 2965 } 2966 2967 // Utility routine for increase 128bit counter (iv in CTR mode) 2968 void inc_counter(Register reg, XMMRegister xmmdst, int inc_delta, Label& next_block) { 2969 __ pextrq(reg, xmmdst, 0x0); 2970 __ addq(reg, inc_delta); 2971 __ pinsrq(xmmdst, reg, 0x0); 2972 __ jcc(Assembler::carryClear, next_block); // jump if no carry 2973 __ pextrq(reg, xmmdst, 0x01); // Carry 2974 __ addq(reg, 0x01); 2975 __ pinsrq(xmmdst, reg, 0x01); //Carry end 2976 __ BIND(next_block); // next instruction 2977 } 2978 2979 // Arguments: 2980 // 2981 // Inputs: 2982 // c_rarg0 - source byte array address 2983 // c_rarg1 - destination byte array address 2984 // c_rarg2 - K (key) in little endian int array 2985 // 2986 address generate_aescrypt_encryptBlock() { 2987 assert(UseAES, "need AES instructions and misaligned SSE support"); 2988 __ align(CodeEntryAlignment); 2989 StubCodeMark mark(this, "StubRoutines", "aescrypt_encryptBlock"); 2990 Label L_doLast; 2991 address start = __ pc(); 2992 2993 const Register from = c_rarg0; // source array address 2994 const Register to = c_rarg1; // destination array address 2995 const Register key = c_rarg2; // key array address 2996 const Register keylen = rax; 2997 2998 const XMMRegister xmm_result = xmm0; 2999 const XMMRegister xmm_key_shuf_mask = xmm1; 3000 // On win64 xmm6-xmm15 must be preserved so don't use them. 3001 const XMMRegister xmm_temp1 = xmm2; 3002 const XMMRegister xmm_temp2 = xmm3; 3003 const XMMRegister xmm_temp3 = xmm4; 3004 const XMMRegister xmm_temp4 = xmm5; 3005 3006 __ enter(); // required for proper stackwalking of RuntimeStub frame 3007 3008 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 3009 // context for the registers used, where all instructions below are using 128-bit mode 3010 // On EVEX without VL and BW, these instructions will all be AVX. 3011 if (VM_Version::supports_avx512vlbw()) { 3012 __ movl(rax, 0xffff); 3013 __ kmovql(k1, rax); 3014 } 3015 3016 // keylen could be only {11, 13, 15} * 4 = {44, 52, 60} 3017 __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3018 3019 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3020 __ movdqu(xmm_result, Address(from, 0)); // get 16 bytes of input 3021 3022 // For encryption, the java expanded key ordering is just what we need 3023 // we don't know if the key is aligned, hence not using load-execute form 3024 3025 load_key(xmm_temp1, key, 0x00, xmm_key_shuf_mask); 3026 __ pxor(xmm_result, xmm_temp1); 3027 3028 load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask); 3029 load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask); 3030 load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask); 3031 load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask); 3032 3033 __ aesenc(xmm_result, xmm_temp1); 3034 __ aesenc(xmm_result, xmm_temp2); 3035 __ aesenc(xmm_result, xmm_temp3); 3036 __ aesenc(xmm_result, xmm_temp4); 3037 3038 load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask); 3039 load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask); 3040 load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask); 3041 load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask); 3042 3043 __ aesenc(xmm_result, xmm_temp1); 3044 __ aesenc(xmm_result, xmm_temp2); 3045 __ aesenc(xmm_result, xmm_temp3); 3046 __ aesenc(xmm_result, xmm_temp4); 3047 3048 load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask); 3049 load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask); 3050 3051 __ cmpl(keylen, 44); 3052 __ jccb(Assembler::equal, L_doLast); 3053 3054 __ aesenc(xmm_result, xmm_temp1); 3055 __ aesenc(xmm_result, xmm_temp2); 3056 3057 load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask); 3058 load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask); 3059 3060 __ cmpl(keylen, 52); 3061 __ jccb(Assembler::equal, L_doLast); 3062 3063 __ aesenc(xmm_result, xmm_temp1); 3064 __ aesenc(xmm_result, xmm_temp2); 3065 3066 load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask); 3067 load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask); 3068 3069 __ BIND(L_doLast); 3070 __ aesenc(xmm_result, xmm_temp1); 3071 __ aesenclast(xmm_result, xmm_temp2); 3072 __ movdqu(Address(to, 0), xmm_result); // store the result 3073 __ xorptr(rax, rax); // return 0 3074 __ leave(); // required for proper stackwalking of RuntimeStub frame 3075 __ ret(0); 3076 3077 return start; 3078 } 3079 3080 3081 // Arguments: 3082 // 3083 // Inputs: 3084 // c_rarg0 - source byte array address 3085 // c_rarg1 - destination byte array address 3086 // c_rarg2 - K (key) in little endian int array 3087 // 3088 address generate_aescrypt_decryptBlock() { 3089 assert(UseAES, "need AES instructions and misaligned SSE support"); 3090 __ align(CodeEntryAlignment); 3091 StubCodeMark mark(this, "StubRoutines", "aescrypt_decryptBlock"); 3092 Label L_doLast; 3093 address start = __ pc(); 3094 3095 const Register from = c_rarg0; // source array address 3096 const Register to = c_rarg1; // destination array address 3097 const Register key = c_rarg2; // key array address 3098 const Register keylen = rax; 3099 3100 const XMMRegister xmm_result = xmm0; 3101 const XMMRegister xmm_key_shuf_mask = xmm1; 3102 // On win64 xmm6-xmm15 must be preserved so don't use them. 3103 const XMMRegister xmm_temp1 = xmm2; 3104 const XMMRegister xmm_temp2 = xmm3; 3105 const XMMRegister xmm_temp3 = xmm4; 3106 const XMMRegister xmm_temp4 = xmm5; 3107 3108 __ enter(); // required for proper stackwalking of RuntimeStub frame 3109 3110 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 3111 // context for the registers used, where all instructions below are using 128-bit mode 3112 // On EVEX without VL and BW, these instructions will all be AVX. 3113 if (VM_Version::supports_avx512vlbw()) { 3114 __ movl(rax, 0xffff); 3115 __ kmovql(k1, rax); 3116 } 3117 3118 // keylen could be only {11, 13, 15} * 4 = {44, 52, 60} 3119 __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3120 3121 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3122 __ movdqu(xmm_result, Address(from, 0)); 3123 3124 // for decryption java expanded key ordering is rotated one position from what we want 3125 // so we start from 0x10 here and hit 0x00 last 3126 // we don't know if the key is aligned, hence not using load-execute form 3127 load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask); 3128 load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask); 3129 load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask); 3130 load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask); 3131 3132 __ pxor (xmm_result, xmm_temp1); 3133 __ aesdec(xmm_result, xmm_temp2); 3134 __ aesdec(xmm_result, xmm_temp3); 3135 __ aesdec(xmm_result, xmm_temp4); 3136 3137 load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask); 3138 load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask); 3139 load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask); 3140 load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask); 3141 3142 __ aesdec(xmm_result, xmm_temp1); 3143 __ aesdec(xmm_result, xmm_temp2); 3144 __ aesdec(xmm_result, xmm_temp3); 3145 __ aesdec(xmm_result, xmm_temp4); 3146 3147 load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask); 3148 load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask); 3149 load_key(xmm_temp3, key, 0x00, xmm_key_shuf_mask); 3150 3151 __ cmpl(keylen, 44); 3152 __ jccb(Assembler::equal, L_doLast); 3153 3154 __ aesdec(xmm_result, xmm_temp1); 3155 __ aesdec(xmm_result, xmm_temp2); 3156 3157 load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask); 3158 load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask); 3159 3160 __ cmpl(keylen, 52); 3161 __ jccb(Assembler::equal, L_doLast); 3162 3163 __ aesdec(xmm_result, xmm_temp1); 3164 __ aesdec(xmm_result, xmm_temp2); 3165 3166 load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask); 3167 load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask); 3168 3169 __ BIND(L_doLast); 3170 __ aesdec(xmm_result, xmm_temp1); 3171 __ aesdec(xmm_result, xmm_temp2); 3172 3173 // for decryption the aesdeclast operation is always on key+0x00 3174 __ aesdeclast(xmm_result, xmm_temp3); 3175 __ movdqu(Address(to, 0), xmm_result); // store the result 3176 __ xorptr(rax, rax); // return 0 3177 __ leave(); // required for proper stackwalking of RuntimeStub frame 3178 __ ret(0); 3179 3180 return start; 3181 } 3182 3183 3184 // Arguments: 3185 // 3186 // Inputs: 3187 // c_rarg0 - source byte array address 3188 // c_rarg1 - destination byte array address 3189 // c_rarg2 - K (key) in little endian int array 3190 // c_rarg3 - r vector byte array address 3191 // c_rarg4 - input length 3192 // 3193 // Output: 3194 // rax - input length 3195 // 3196 address generate_cipherBlockChaining_encryptAESCrypt() { 3197 assert(UseAES, "need AES instructions and misaligned SSE support"); 3198 __ align(CodeEntryAlignment); 3199 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_encryptAESCrypt"); 3200 address start = __ pc(); 3201 3202 Label L_exit, L_key_192_256, L_key_256, L_loopTop_128, L_loopTop_192, L_loopTop_256; 3203 const Register from = c_rarg0; // source array address 3204 const Register to = c_rarg1; // destination array address 3205 const Register key = c_rarg2; // key array address 3206 const Register rvec = c_rarg3; // r byte array initialized from initvector array address 3207 // and left with the results of the last encryption block 3208 #ifndef _WIN64 3209 const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16) 3210 #else 3211 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 3212 const Register len_reg = r11; // pick the volatile windows register 3213 #endif 3214 const Register pos = rax; 3215 3216 // xmm register assignments for the loops below 3217 const XMMRegister xmm_result = xmm0; 3218 const XMMRegister xmm_temp = xmm1; 3219 // keys 0-10 preloaded into xmm2-xmm12 3220 const int XMM_REG_NUM_KEY_FIRST = 2; 3221 const int XMM_REG_NUM_KEY_LAST = 15; 3222 const XMMRegister xmm_key0 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST); 3223 const XMMRegister xmm_key10 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+10); 3224 const XMMRegister xmm_key11 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+11); 3225 const XMMRegister xmm_key12 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+12); 3226 const XMMRegister xmm_key13 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+13); 3227 3228 __ enter(); // required for proper stackwalking of RuntimeStub frame 3229 3230 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 3231 // context for the registers used, where all instructions below are using 128-bit mode 3232 // On EVEX without VL and BW, these instructions will all be AVX. 3233 if (VM_Version::supports_avx512vlbw()) { 3234 __ movl(rax, 0xffff); 3235 __ kmovql(k1, rax); 3236 } 3237 3238 #ifdef _WIN64 3239 // on win64, fill len_reg from stack position 3240 __ movl(len_reg, len_mem); 3241 #else 3242 __ push(len_reg); // Save 3243 #endif 3244 3245 const XMMRegister xmm_key_shuf_mask = xmm_temp; // used temporarily to swap key bytes up front 3246 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3247 // load up xmm regs xmm2 thru xmm12 with key 0x00 - 0xa0 3248 for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x00; rnum <= XMM_REG_NUM_KEY_FIRST+10; rnum++) { 3249 load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask); 3250 offset += 0x10; 3251 } 3252 __ movdqu(xmm_result, Address(rvec, 0x00)); // initialize xmm_result with r vec 3253 3254 // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256)) 3255 __ movl(rax, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3256 __ cmpl(rax, 44); 3257 __ jcc(Assembler::notEqual, L_key_192_256); 3258 3259 // 128 bit code follows here 3260 __ movptr(pos, 0); 3261 __ align(OptoLoopAlignment); 3262 3263 __ BIND(L_loopTop_128); 3264 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3265 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3266 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3267 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 9; rnum++) { 3268 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3269 } 3270 __ aesenclast(xmm_result, xmm_key10); 3271 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3272 // no need to store r to memory until we exit 3273 __ addptr(pos, AESBlockSize); 3274 __ subptr(len_reg, AESBlockSize); 3275 __ jcc(Assembler::notEqual, L_loopTop_128); 3276 3277 __ BIND(L_exit); 3278 __ movdqu(Address(rvec, 0), xmm_result); // final value of r stored in rvec of CipherBlockChaining object 3279 3280 #ifdef _WIN64 3281 __ movl(rax, len_mem); 3282 #else 3283 __ pop(rax); // return length 3284 #endif 3285 __ leave(); // required for proper stackwalking of RuntimeStub frame 3286 __ ret(0); 3287 3288 __ BIND(L_key_192_256); 3289 // here rax = len in ints of AESCrypt.KLE array (52=192, or 60=256) 3290 load_key(xmm_key11, key, 0xb0, xmm_key_shuf_mask); 3291 load_key(xmm_key12, key, 0xc0, xmm_key_shuf_mask); 3292 __ cmpl(rax, 52); 3293 __ jcc(Assembler::notEqual, L_key_256); 3294 3295 // 192-bit code follows here (could be changed to use more xmm registers) 3296 __ movptr(pos, 0); 3297 __ align(OptoLoopAlignment); 3298 3299 __ BIND(L_loopTop_192); 3300 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3301 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3302 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3303 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 11; rnum++) { 3304 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3305 } 3306 __ aesenclast(xmm_result, xmm_key12); 3307 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3308 // no need to store r to memory until we exit 3309 __ addptr(pos, AESBlockSize); 3310 __ subptr(len_reg, AESBlockSize); 3311 __ jcc(Assembler::notEqual, L_loopTop_192); 3312 __ jmp(L_exit); 3313 3314 __ BIND(L_key_256); 3315 // 256-bit code follows here (could be changed to use more xmm registers) 3316 load_key(xmm_key13, key, 0xd0, xmm_key_shuf_mask); 3317 __ movptr(pos, 0); 3318 __ align(OptoLoopAlignment); 3319 3320 __ BIND(L_loopTop_256); 3321 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3322 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3323 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3324 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 13; rnum++) { 3325 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3326 } 3327 load_key(xmm_temp, key, 0xe0); 3328 __ aesenclast(xmm_result, xmm_temp); 3329 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3330 // no need to store r to memory until we exit 3331 __ addptr(pos, AESBlockSize); 3332 __ subptr(len_reg, AESBlockSize); 3333 __ jcc(Assembler::notEqual, L_loopTop_256); 3334 __ jmp(L_exit); 3335 3336 return start; 3337 } 3338 3339 // Safefetch stubs. 3340 void generate_safefetch(const char* name, int size, address* entry, 3341 address* fault_pc, address* continuation_pc) { 3342 // safefetch signatures: 3343 // int SafeFetch32(int* adr, int errValue); 3344 // intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue); 3345 // 3346 // arguments: 3347 // c_rarg0 = adr 3348 // c_rarg1 = errValue 3349 // 3350 // result: 3351 // PPC_RET = *adr or errValue 3352 3353 StubCodeMark mark(this, "StubRoutines", name); 3354 3355 // Entry point, pc or function descriptor. 3356 *entry = __ pc(); 3357 3358 // Load *adr into c_rarg1, may fault. 3359 *fault_pc = __ pc(); 3360 switch (size) { 3361 case 4: 3362 // int32_t 3363 __ movl(c_rarg1, Address(c_rarg0, 0)); 3364 break; 3365 case 8: 3366 // int64_t 3367 __ movq(c_rarg1, Address(c_rarg0, 0)); 3368 break; 3369 default: 3370 ShouldNotReachHere(); 3371 } 3372 3373 // return errValue or *adr 3374 *continuation_pc = __ pc(); 3375 __ movq(rax, c_rarg1); 3376 __ ret(0); 3377 } 3378 3379 // This is a version of CBC/AES Decrypt which does 4 blocks in a loop at a time 3380 // to hide instruction latency 3381 // 3382 // Arguments: 3383 // 3384 // Inputs: 3385 // c_rarg0 - source byte array address 3386 // c_rarg1 - destination byte array address 3387 // c_rarg2 - K (key) in little endian int array 3388 // c_rarg3 - r vector byte array address 3389 // c_rarg4 - input length 3390 // 3391 // Output: 3392 // rax - input length 3393 // 3394 address generate_cipherBlockChaining_decryptAESCrypt_Parallel() { 3395 assert(UseAES, "need AES instructions and misaligned SSE support"); 3396 __ align(CodeEntryAlignment); 3397 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt"); 3398 address start = __ pc(); 3399 3400 const Register from = c_rarg0; // source array address 3401 const Register to = c_rarg1; // destination array address 3402 const Register key = c_rarg2; // key array address 3403 const Register rvec = c_rarg3; // r byte array initialized from initvector array address 3404 // and left with the results of the last encryption block 3405 #ifndef _WIN64 3406 const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16) 3407 #else 3408 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 3409 const Register len_reg = r11; // pick the volatile windows register 3410 #endif 3411 const Register pos = rax; 3412 3413 const int PARALLEL_FACTOR = 4; 3414 const int ROUNDS[3] = { 10, 12, 14 }; // aes rounds for key128, key192, key256 3415 3416 Label L_exit; 3417 Label L_singleBlock_loopTopHead[3]; // 128, 192, 256 3418 Label L_singleBlock_loopTopHead2[3]; // 128, 192, 256 3419 Label L_singleBlock_loopTop[3]; // 128, 192, 256 3420 Label L_multiBlock_loopTopHead[3]; // 128, 192, 256 3421 Label L_multiBlock_loopTop[3]; // 128, 192, 256 3422 3423 // keys 0-10 preloaded into xmm5-xmm15 3424 const int XMM_REG_NUM_KEY_FIRST = 5; 3425 const int XMM_REG_NUM_KEY_LAST = 15; 3426 const XMMRegister xmm_key_first = as_XMMRegister(XMM_REG_NUM_KEY_FIRST); 3427 const XMMRegister xmm_key_last = as_XMMRegister(XMM_REG_NUM_KEY_LAST); 3428 3429 __ enter(); // required for proper stackwalking of RuntimeStub frame 3430 3431 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 3432 // context for the registers used, where all instructions below are using 128-bit mode 3433 // On EVEX without VL and BW, these instructions will all be AVX. 3434 if (VM_Version::supports_avx512vlbw()) { 3435 __ movl(rax, 0xffff); 3436 __ kmovql(k1, rax); 3437 } 3438 3439 #ifdef _WIN64 3440 // on win64, fill len_reg from stack position 3441 __ movl(len_reg, len_mem); 3442 #else 3443 __ push(len_reg); // Save 3444 #endif 3445 __ push(rbx); 3446 // the java expanded key ordering is rotated one position from what we want 3447 // so we start from 0x10 here and hit 0x00 last 3448 const XMMRegister xmm_key_shuf_mask = xmm1; // used temporarily to swap key bytes up front 3449 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3450 // load up xmm regs 5 thru 15 with key 0x10 - 0xa0 - 0x00 3451 for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x10; rnum < XMM_REG_NUM_KEY_LAST; rnum++) { 3452 load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask); 3453 offset += 0x10; 3454 } 3455 load_key(xmm_key_last, key, 0x00, xmm_key_shuf_mask); 3456 3457 const XMMRegister xmm_prev_block_cipher = xmm1; // holds cipher of previous block 3458 3459 // registers holding the four results in the parallelized loop 3460 const XMMRegister xmm_result0 = xmm0; 3461 const XMMRegister xmm_result1 = xmm2; 3462 const XMMRegister xmm_result2 = xmm3; 3463 const XMMRegister xmm_result3 = xmm4; 3464 3465 __ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // initialize with initial rvec 3466 3467 __ xorptr(pos, pos); 3468 3469 // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256)) 3470 __ movl(rbx, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3471 __ cmpl(rbx, 52); 3472 __ jcc(Assembler::equal, L_multiBlock_loopTopHead[1]); 3473 __ cmpl(rbx, 60); 3474 __ jcc(Assembler::equal, L_multiBlock_loopTopHead[2]); 3475 3476 #define DoFour(opc, src_reg) \ 3477 __ opc(xmm_result0, src_reg); \ 3478 __ opc(xmm_result1, src_reg); \ 3479 __ opc(xmm_result2, src_reg); \ 3480 __ opc(xmm_result3, src_reg); \ 3481 3482 for (int k = 0; k < 3; ++k) { 3483 __ BIND(L_multiBlock_loopTopHead[k]); 3484 if (k != 0) { 3485 __ cmpptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // see if at least 4 blocks left 3486 __ jcc(Assembler::less, L_singleBlock_loopTopHead2[k]); 3487 } 3488 if (k == 1) { 3489 __ subptr(rsp, 6 * wordSize); 3490 __ movdqu(Address(rsp, 0), xmm15); //save last_key from xmm15 3491 load_key(xmm15, key, 0xb0); // 0xb0; 192-bit key goes up to 0xc0 3492 __ movdqu(Address(rsp, 2 * wordSize), xmm15); 3493 load_key(xmm1, key, 0xc0); // 0xc0; 3494 __ movdqu(Address(rsp, 4 * wordSize), xmm1); 3495 } else if (k == 2) { 3496 __ subptr(rsp, 10 * wordSize); 3497 __ movdqu(Address(rsp, 0), xmm15); //save last_key from xmm15 3498 load_key(xmm15, key, 0xd0); // 0xd0; 256-bit key goes upto 0xe0 3499 __ movdqu(Address(rsp, 6 * wordSize), xmm15); 3500 load_key(xmm1, key, 0xe0); // 0xe0; 3501 __ movdqu(Address(rsp, 8 * wordSize), xmm1); 3502 load_key(xmm15, key, 0xb0); // 0xb0; 3503 __ movdqu(Address(rsp, 2 * wordSize), xmm15); 3504 load_key(xmm1, key, 0xc0); // 0xc0; 3505 __ movdqu(Address(rsp, 4 * wordSize), xmm1); 3506 } 3507 __ align(OptoLoopAlignment); 3508 __ BIND(L_multiBlock_loopTop[k]); 3509 __ cmpptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // see if at least 4 blocks left 3510 __ jcc(Assembler::less, L_singleBlock_loopTopHead[k]); 3511 3512 if (k != 0) { 3513 __ movdqu(xmm15, Address(rsp, 2 * wordSize)); 3514 __ movdqu(xmm1, Address(rsp, 4 * wordSize)); 3515 } 3516 3517 __ movdqu(xmm_result0, Address(from, pos, Address::times_1, 0 * AESBlockSize)); // get next 4 blocks into xmmresult registers 3518 __ movdqu(xmm_result1, Address(from, pos, Address::times_1, 1 * AESBlockSize)); 3519 __ movdqu(xmm_result2, Address(from, pos, Address::times_1, 2 * AESBlockSize)); 3520 __ movdqu(xmm_result3, Address(from, pos, Address::times_1, 3 * AESBlockSize)); 3521 3522 DoFour(pxor, xmm_key_first); 3523 if (k == 0) { 3524 for (int rnum = 1; rnum < ROUNDS[k]; rnum++) { 3525 DoFour(aesdec, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3526 } 3527 DoFour(aesdeclast, xmm_key_last); 3528 } else if (k == 1) { 3529 for (int rnum = 1; rnum <= ROUNDS[k]-2; rnum++) { 3530 DoFour(aesdec, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3531 } 3532 __ movdqu(xmm_key_last, Address(rsp, 0)); // xmm15 needs to be loaded again. 3533 DoFour(aesdec, xmm1); // key : 0xc0 3534 __ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // xmm1 needs to be loaded again 3535 DoFour(aesdeclast, xmm_key_last); 3536 } else if (k == 2) { 3537 for (int rnum = 1; rnum <= ROUNDS[k] - 4; rnum++) { 3538 DoFour(aesdec, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3539 } 3540 DoFour(aesdec, xmm1); // key : 0xc0 3541 __ movdqu(xmm15, Address(rsp, 6 * wordSize)); 3542 __ movdqu(xmm1, Address(rsp, 8 * wordSize)); 3543 DoFour(aesdec, xmm15); // key : 0xd0 3544 __ movdqu(xmm_key_last, Address(rsp, 0)); // xmm15 needs to be loaded again. 3545 DoFour(aesdec, xmm1); // key : 0xe0 3546 __ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // xmm1 needs to be loaded again 3547 DoFour(aesdeclast, xmm_key_last); 3548 } 3549 3550 // for each result, xor with the r vector of previous cipher block 3551 __ pxor(xmm_result0, xmm_prev_block_cipher); 3552 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 0 * AESBlockSize)); 3553 __ pxor(xmm_result1, xmm_prev_block_cipher); 3554 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 1 * AESBlockSize)); 3555 __ pxor(xmm_result2, xmm_prev_block_cipher); 3556 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 2 * AESBlockSize)); 3557 __ pxor(xmm_result3, xmm_prev_block_cipher); 3558 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 3 * AESBlockSize)); // this will carry over to next set of blocks 3559 if (k != 0) { 3560 __ movdqu(Address(rvec, 0x00), xmm_prev_block_cipher); 3561 } 3562 3563 __ movdqu(Address(to, pos, Address::times_1, 0 * AESBlockSize), xmm_result0); // store 4 results into the next 64 bytes of output 3564 __ movdqu(Address(to, pos, Address::times_1, 1 * AESBlockSize), xmm_result1); 3565 __ movdqu(Address(to, pos, Address::times_1, 2 * AESBlockSize), xmm_result2); 3566 __ movdqu(Address(to, pos, Address::times_1, 3 * AESBlockSize), xmm_result3); 3567 3568 __ addptr(pos, PARALLEL_FACTOR * AESBlockSize); 3569 __ subptr(len_reg, PARALLEL_FACTOR * AESBlockSize); 3570 __ jmp(L_multiBlock_loopTop[k]); 3571 3572 // registers used in the non-parallelized loops 3573 // xmm register assignments for the loops below 3574 const XMMRegister xmm_result = xmm0; 3575 const XMMRegister xmm_prev_block_cipher_save = xmm2; 3576 const XMMRegister xmm_key11 = xmm3; 3577 const XMMRegister xmm_key12 = xmm4; 3578 const XMMRegister key_tmp = xmm4; 3579 3580 __ BIND(L_singleBlock_loopTopHead[k]); 3581 if (k == 1) { 3582 __ addptr(rsp, 6 * wordSize); 3583 } else if (k == 2) { 3584 __ addptr(rsp, 10 * wordSize); 3585 } 3586 __ cmpptr(len_reg, 0); // any blocks left?? 3587 __ jcc(Assembler::equal, L_exit); 3588 __ BIND(L_singleBlock_loopTopHead2[k]); 3589 if (k == 1) { 3590 load_key(xmm_key11, key, 0xb0); // 0xb0; 192-bit key goes upto 0xc0 3591 load_key(xmm_key12, key, 0xc0); // 0xc0; 192-bit key goes upto 0xc0 3592 } 3593 if (k == 2) { 3594 load_key(xmm_key11, key, 0xb0); // 0xb0; 256-bit key goes upto 0xe0 3595 } 3596 __ align(OptoLoopAlignment); 3597 __ BIND(L_singleBlock_loopTop[k]); 3598 __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of cipher input 3599 __ movdqa(xmm_prev_block_cipher_save, xmm_result); // save for next r vector 3600 __ pxor(xmm_result, xmm_key_first); // do the aes dec rounds 3601 for (int rnum = 1; rnum <= 9 ; rnum++) { 3602 __ aesdec(xmm_result, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3603 } 3604 if (k == 1) { 3605 __ aesdec(xmm_result, xmm_key11); 3606 __ aesdec(xmm_result, xmm_key12); 3607 } 3608 if (k == 2) { 3609 __ aesdec(xmm_result, xmm_key11); 3610 load_key(key_tmp, key, 0xc0); 3611 __ aesdec(xmm_result, key_tmp); 3612 load_key(key_tmp, key, 0xd0); 3613 __ aesdec(xmm_result, key_tmp); 3614 load_key(key_tmp, key, 0xe0); 3615 __ aesdec(xmm_result, key_tmp); 3616 } 3617 3618 __ aesdeclast(xmm_result, xmm_key_last); // xmm15 always came from key+0 3619 __ pxor(xmm_result, xmm_prev_block_cipher); // xor with the current r vector 3620 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3621 // no need to store r to memory until we exit 3622 __ movdqa(xmm_prev_block_cipher, xmm_prev_block_cipher_save); // set up next r vector with cipher input from this block 3623 __ addptr(pos, AESBlockSize); 3624 __ subptr(len_reg, AESBlockSize); 3625 __ jcc(Assembler::notEqual, L_singleBlock_loopTop[k]); 3626 if (k != 2) { 3627 __ jmp(L_exit); 3628 } 3629 } //for 128/192/256 3630 3631 __ BIND(L_exit); 3632 __ movdqu(Address(rvec, 0), xmm_prev_block_cipher); // final value of r stored in rvec of CipherBlockChaining object 3633 __ pop(rbx); 3634 #ifdef _WIN64 3635 __ movl(rax, len_mem); 3636 #else 3637 __ pop(rax); // return length 3638 #endif 3639 __ leave(); // required for proper stackwalking of RuntimeStub frame 3640 __ ret(0); 3641 return start; 3642 } 3643 3644 address generate_upper_word_mask() { 3645 __ align(64); 3646 StubCodeMark mark(this, "StubRoutines", "upper_word_mask"); 3647 address start = __ pc(); 3648 __ emit_data64(0x0000000000000000, relocInfo::none); 3649 __ emit_data64(0xFFFFFFFF00000000, relocInfo::none); 3650 return start; 3651 } 3652 3653 address generate_shuffle_byte_flip_mask() { 3654 __ align(64); 3655 StubCodeMark mark(this, "StubRoutines", "shuffle_byte_flip_mask"); 3656 address start = __ pc(); 3657 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none); 3658 __ emit_data64(0x0001020304050607, relocInfo::none); 3659 return start; 3660 } 3661 3662 // ofs and limit are use for multi-block byte array. 3663 // int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit) 3664 address generate_sha1_implCompress(bool multi_block, const char *name) { 3665 __ align(CodeEntryAlignment); 3666 StubCodeMark mark(this, "StubRoutines", name); 3667 address start = __ pc(); 3668 3669 Register buf = c_rarg0; 3670 Register state = c_rarg1; 3671 Register ofs = c_rarg2; 3672 Register limit = c_rarg3; 3673 3674 const XMMRegister abcd = xmm0; 3675 const XMMRegister e0 = xmm1; 3676 const XMMRegister e1 = xmm2; 3677 const XMMRegister msg0 = xmm3; 3678 3679 const XMMRegister msg1 = xmm4; 3680 const XMMRegister msg2 = xmm5; 3681 const XMMRegister msg3 = xmm6; 3682 const XMMRegister shuf_mask = xmm7; 3683 3684 __ enter(); 3685 3686 __ subptr(rsp, 4 * wordSize); 3687 3688 __ fast_sha1(abcd, e0, e1, msg0, msg1, msg2, msg3, shuf_mask, 3689 buf, state, ofs, limit, rsp, multi_block); 3690 3691 __ addptr(rsp, 4 * wordSize); 3692 3693 __ leave(); 3694 __ ret(0); 3695 return start; 3696 } 3697 3698 address generate_pshuffle_byte_flip_mask() { 3699 __ align(64); 3700 StubCodeMark mark(this, "StubRoutines", "pshuffle_byte_flip_mask"); 3701 address start = __ pc(); 3702 __ emit_data64(0x0405060700010203, relocInfo::none); 3703 __ emit_data64(0x0c0d0e0f08090a0b, relocInfo::none); 3704 3705 if (VM_Version::supports_avx2()) { 3706 __ emit_data64(0x0405060700010203, relocInfo::none); // second copy 3707 __ emit_data64(0x0c0d0e0f08090a0b, relocInfo::none); 3708 // _SHUF_00BA 3709 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3710 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3711 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3712 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3713 // _SHUF_DC00 3714 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3715 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3716 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3717 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3718 } 3719 3720 return start; 3721 } 3722 3723 //Mask for byte-swapping a couple of qwords in an XMM register using (v)pshufb. 3724 address generate_pshuffle_byte_flip_mask_sha512() { 3725 __ align(32); 3726 StubCodeMark mark(this, "StubRoutines", "pshuffle_byte_flip_mask_sha512"); 3727 address start = __ pc(); 3728 if (VM_Version::supports_avx2()) { 3729 __ emit_data64(0x0001020304050607, relocInfo::none); // PSHUFFLE_BYTE_FLIP_MASK 3730 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none); 3731 __ emit_data64(0x1011121314151617, relocInfo::none); 3732 __ emit_data64(0x18191a1b1c1d1e1f, relocInfo::none); 3733 __ emit_data64(0x0000000000000000, relocInfo::none); //MASK_YMM_LO 3734 __ emit_data64(0x0000000000000000, relocInfo::none); 3735 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3736 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3737 } 3738 3739 return start; 3740 } 3741 3742 // ofs and limit are use for multi-block byte array. 3743 // int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit) 3744 address generate_sha256_implCompress(bool multi_block, const char *name) { 3745 assert(VM_Version::supports_sha() || VM_Version::supports_avx2(), ""); 3746 __ align(CodeEntryAlignment); 3747 StubCodeMark mark(this, "StubRoutines", name); 3748 address start = __ pc(); 3749 3750 Register buf = c_rarg0; 3751 Register state = c_rarg1; 3752 Register ofs = c_rarg2; 3753 Register limit = c_rarg3; 3754 3755 const XMMRegister msg = xmm0; 3756 const XMMRegister state0 = xmm1; 3757 const XMMRegister state1 = xmm2; 3758 const XMMRegister msgtmp0 = xmm3; 3759 3760 const XMMRegister msgtmp1 = xmm4; 3761 const XMMRegister msgtmp2 = xmm5; 3762 const XMMRegister msgtmp3 = xmm6; 3763 const XMMRegister msgtmp4 = xmm7; 3764 3765 const XMMRegister shuf_mask = xmm8; 3766 3767 __ enter(); 3768 3769 __ subptr(rsp, 4 * wordSize); 3770 3771 if (VM_Version::supports_sha()) { 3772 __ fast_sha256(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4, 3773 buf, state, ofs, limit, rsp, multi_block, shuf_mask); 3774 } else if (VM_Version::supports_avx2()) { 3775 __ sha256_AVX2(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4, 3776 buf, state, ofs, limit, rsp, multi_block, shuf_mask); 3777 } 3778 __ addptr(rsp, 4 * wordSize); 3779 __ vzeroupper(); 3780 __ leave(); 3781 __ ret(0); 3782 return start; 3783 } 3784 3785 address generate_sha512_implCompress(bool multi_block, const char *name) { 3786 assert(VM_Version::supports_avx2(), ""); 3787 assert(VM_Version::supports_bmi2(), ""); 3788 __ align(CodeEntryAlignment); 3789 StubCodeMark mark(this, "StubRoutines", name); 3790 address start = __ pc(); 3791 3792 Register buf = c_rarg0; 3793 Register state = c_rarg1; 3794 Register ofs = c_rarg2; 3795 Register limit = c_rarg3; 3796 3797 const XMMRegister msg = xmm0; 3798 const XMMRegister state0 = xmm1; 3799 const XMMRegister state1 = xmm2; 3800 const XMMRegister msgtmp0 = xmm3; 3801 const XMMRegister msgtmp1 = xmm4; 3802 const XMMRegister msgtmp2 = xmm5; 3803 const XMMRegister msgtmp3 = xmm6; 3804 const XMMRegister msgtmp4 = xmm7; 3805 3806 const XMMRegister shuf_mask = xmm8; 3807 3808 __ enter(); 3809 3810 __ sha512_AVX2(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4, 3811 buf, state, ofs, limit, rsp, multi_block, shuf_mask); 3812 3813 __ vzeroupper(); 3814 __ leave(); 3815 __ ret(0); 3816 return start; 3817 } 3818 3819 // This is a version of CTR/AES crypt which does 6 blocks in a loop at a time 3820 // to hide instruction latency 3821 // 3822 // Arguments: 3823 // 3824 // Inputs: 3825 // c_rarg0 - source byte array address 3826 // c_rarg1 - destination byte array address 3827 // c_rarg2 - K (key) in little endian int array 3828 // c_rarg3 - counter vector byte array address 3829 // Linux 3830 // c_rarg4 - input length 3831 // c_rarg5 - saved encryptedCounter start 3832 // rbp + 6 * wordSize - saved used length 3833 // Windows 3834 // rbp + 6 * wordSize - input length 3835 // rbp + 7 * wordSize - saved encryptedCounter start 3836 // rbp + 8 * wordSize - saved used length 3837 // 3838 // Output: 3839 // rax - input length 3840 // 3841 address generate_counterMode_AESCrypt_Parallel() { 3842 assert(UseAES, "need AES instructions and misaligned SSE support"); 3843 __ align(CodeEntryAlignment); 3844 StubCodeMark mark(this, "StubRoutines", "counterMode_AESCrypt"); 3845 address start = __ pc(); 3846 const Register from = c_rarg0; // source array address 3847 const Register to = c_rarg1; // destination array address 3848 const Register key = c_rarg2; // key array address 3849 const Register counter = c_rarg3; // counter byte array initialized from counter array address 3850 // and updated with the incremented counter in the end 3851 #ifndef _WIN64 3852 const Register len_reg = c_rarg4; 3853 const Register saved_encCounter_start = c_rarg5; 3854 const Register used_addr = r10; 3855 const Address used_mem(rbp, 2 * wordSize); 3856 const Register used = r11; 3857 #else 3858 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 3859 const Address saved_encCounter_mem(rbp, 7 * wordSize); // length is on stack on Win64 3860 const Address used_mem(rbp, 8 * wordSize); // length is on stack on Win64 3861 const Register len_reg = r10; // pick the first volatile windows register 3862 const Register saved_encCounter_start = r11; 3863 const Register used_addr = r13; 3864 const Register used = r14; 3865 #endif 3866 const Register pos = rax; 3867 3868 const int PARALLEL_FACTOR = 6; 3869 const XMMRegister xmm_counter_shuf_mask = xmm0; 3870 const XMMRegister xmm_key_shuf_mask = xmm1; // used temporarily to swap key bytes up front 3871 const XMMRegister xmm_curr_counter = xmm2; 3872 3873 const XMMRegister xmm_key_tmp0 = xmm3; 3874 const XMMRegister xmm_key_tmp1 = xmm4; 3875 3876 // registers holding the four results in the parallelized loop 3877 const XMMRegister xmm_result0 = xmm5; 3878 const XMMRegister xmm_result1 = xmm6; 3879 const XMMRegister xmm_result2 = xmm7; 3880 const XMMRegister xmm_result3 = xmm8; 3881 const XMMRegister xmm_result4 = xmm9; 3882 const XMMRegister xmm_result5 = xmm10; 3883 3884 const XMMRegister xmm_from0 = xmm11; 3885 const XMMRegister xmm_from1 = xmm12; 3886 const XMMRegister xmm_from2 = xmm13; 3887 const XMMRegister xmm_from3 = xmm14; //the last one is xmm14. we have to preserve it on WIN64. 3888 const XMMRegister xmm_from4 = xmm3; //reuse xmm3~4. Because xmm_key_tmp0~1 are useless when loading input text 3889 const XMMRegister xmm_from5 = xmm4; 3890 3891 //for key_128, key_192, key_256 3892 const int rounds[3] = {10, 12, 14}; 3893 Label L_exit_preLoop, L_preLoop_start; 3894 Label L_multiBlock_loopTop[3]; 3895 Label L_singleBlockLoopTop[3]; 3896 Label L__incCounter[3][6]; //for 6 blocks 3897 Label L__incCounter_single[3]; //for single block, key128, key192, key256 3898 Label L_processTail_insr[3], L_processTail_4_insr[3], L_processTail_2_insr[3], L_processTail_1_insr[3], L_processTail_exit_insr[3]; 3899 Label L_processTail_extr[3], L_processTail_4_extr[3], L_processTail_2_extr[3], L_processTail_1_extr[3], L_processTail_exit_extr[3]; 3900 3901 Label L_exit; 3902 3903 __ enter(); // required for proper stackwalking of RuntimeStub frame 3904 3905 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 3906 // context for the registers used, where all instructions below are using 128-bit mode 3907 // On EVEX without VL and BW, these instructions will all be AVX. 3908 if (VM_Version::supports_avx512vlbw()) { 3909 __ movl(rax, 0xffff); 3910 __ kmovql(k1, rax); 3911 } 3912 3913 #ifdef _WIN64 3914 // allocate spill slots for r13, r14 3915 enum { 3916 saved_r13_offset, 3917 saved_r14_offset 3918 }; 3919 __ subptr(rsp, 2 * wordSize); 3920 __ movptr(Address(rsp, saved_r13_offset * wordSize), r13); 3921 __ movptr(Address(rsp, saved_r14_offset * wordSize), r14); 3922 3923 // on win64, fill len_reg from stack position 3924 __ movl(len_reg, len_mem); 3925 __ movptr(saved_encCounter_start, saved_encCounter_mem); 3926 __ movptr(used_addr, used_mem); 3927 __ movl(used, Address(used_addr, 0)); 3928 #else 3929 __ push(len_reg); // Save 3930 __ movptr(used_addr, used_mem); 3931 __ movl(used, Address(used_addr, 0)); 3932 #endif 3933 3934 __ push(rbx); // Save RBX 3935 __ movdqu(xmm_curr_counter, Address(counter, 0x00)); // initialize counter with initial counter 3936 __ movdqu(xmm_counter_shuf_mask, ExternalAddress(StubRoutines::x86::counter_shuffle_mask_addr()), pos); // pos as scratch 3937 __ pshufb(xmm_curr_counter, xmm_counter_shuf_mask); //counter is shuffled 3938 __ movptr(pos, 0); 3939 3940 // Use the partially used encrpyted counter from last invocation 3941 __ BIND(L_preLoop_start); 3942 __ cmpptr(used, 16); 3943 __ jcc(Assembler::aboveEqual, L_exit_preLoop); 3944 __ cmpptr(len_reg, 0); 3945 __ jcc(Assembler::lessEqual, L_exit_preLoop); 3946 __ movb(rbx, Address(saved_encCounter_start, used)); 3947 __ xorb(rbx, Address(from, pos)); 3948 __ movb(Address(to, pos), rbx); 3949 __ addptr(pos, 1); 3950 __ addptr(used, 1); 3951 __ subptr(len_reg, 1); 3952 3953 __ jmp(L_preLoop_start); 3954 3955 __ BIND(L_exit_preLoop); 3956 __ movl(Address(used_addr, 0), used); 3957 3958 // key length could be only {11, 13, 15} * 4 = {44, 52, 60} 3959 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()), rbx); // rbx as scratch 3960 __ movl(rbx, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3961 __ cmpl(rbx, 52); 3962 __ jcc(Assembler::equal, L_multiBlock_loopTop[1]); 3963 __ cmpl(rbx, 60); 3964 __ jcc(Assembler::equal, L_multiBlock_loopTop[2]); 3965 3966 #define CTR_DoSix(opc, src_reg) \ 3967 __ opc(xmm_result0, src_reg); \ 3968 __ opc(xmm_result1, src_reg); \ 3969 __ opc(xmm_result2, src_reg); \ 3970 __ opc(xmm_result3, src_reg); \ 3971 __ opc(xmm_result4, src_reg); \ 3972 __ opc(xmm_result5, src_reg); 3973 3974 // k == 0 : generate code for key_128 3975 // k == 1 : generate code for key_192 3976 // k == 2 : generate code for key_256 3977 for (int k = 0; k < 3; ++k) { 3978 //multi blocks starts here 3979 __ align(OptoLoopAlignment); 3980 __ BIND(L_multiBlock_loopTop[k]); 3981 __ cmpptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // see if at least PARALLEL_FACTOR blocks left 3982 __ jcc(Assembler::less, L_singleBlockLoopTop[k]); 3983 load_key(xmm_key_tmp0, key, 0x00, xmm_key_shuf_mask); 3984 3985 //load, then increase counters 3986 CTR_DoSix(movdqa, xmm_curr_counter); 3987 inc_counter(rbx, xmm_result1, 0x01, L__incCounter[k][0]); 3988 inc_counter(rbx, xmm_result2, 0x02, L__incCounter[k][1]); 3989 inc_counter(rbx, xmm_result3, 0x03, L__incCounter[k][2]); 3990 inc_counter(rbx, xmm_result4, 0x04, L__incCounter[k][3]); 3991 inc_counter(rbx, xmm_result5, 0x05, L__incCounter[k][4]); 3992 inc_counter(rbx, xmm_curr_counter, 0x06, L__incCounter[k][5]); 3993 CTR_DoSix(pshufb, xmm_counter_shuf_mask); // after increased, shuffled counters back for PXOR 3994 CTR_DoSix(pxor, xmm_key_tmp0); //PXOR with Round 0 key 3995 3996 //load two ROUND_KEYs at a time 3997 for (int i = 1; i < rounds[k]; ) { 3998 load_key(xmm_key_tmp1, key, (0x10 * i), xmm_key_shuf_mask); 3999 load_key(xmm_key_tmp0, key, (0x10 * (i+1)), xmm_key_shuf_mask); 4000 CTR_DoSix(aesenc, xmm_key_tmp1); 4001 i++; 4002 if (i != rounds[k]) { 4003 CTR_DoSix(aesenc, xmm_key_tmp0); 4004 } else { 4005 CTR_DoSix(aesenclast, xmm_key_tmp0); 4006 } 4007 i++; 4008 } 4009 4010 // get next PARALLEL_FACTOR blocks into xmm_result registers 4011 __ movdqu(xmm_from0, Address(from, pos, Address::times_1, 0 * AESBlockSize)); 4012 __ movdqu(xmm_from1, Address(from, pos, Address::times_1, 1 * AESBlockSize)); 4013 __ movdqu(xmm_from2, Address(from, pos, Address::times_1, 2 * AESBlockSize)); 4014 __ movdqu(xmm_from3, Address(from, pos, Address::times_1, 3 * AESBlockSize)); 4015 __ movdqu(xmm_from4, Address(from, pos, Address::times_1, 4 * AESBlockSize)); 4016 __ movdqu(xmm_from5, Address(from, pos, Address::times_1, 5 * AESBlockSize)); 4017 4018 __ pxor(xmm_result0, xmm_from0); 4019 __ pxor(xmm_result1, xmm_from1); 4020 __ pxor(xmm_result2, xmm_from2); 4021 __ pxor(xmm_result3, xmm_from3); 4022 __ pxor(xmm_result4, xmm_from4); 4023 __ pxor(xmm_result5, xmm_from5); 4024 4025 // store 6 results into the next 64 bytes of output 4026 __ movdqu(Address(to, pos, Address::times_1, 0 * AESBlockSize), xmm_result0); 4027 __ movdqu(Address(to, pos, Address::times_1, 1 * AESBlockSize), xmm_result1); 4028 __ movdqu(Address(to, pos, Address::times_1, 2 * AESBlockSize), xmm_result2); 4029 __ movdqu(Address(to, pos, Address::times_1, 3 * AESBlockSize), xmm_result3); 4030 __ movdqu(Address(to, pos, Address::times_1, 4 * AESBlockSize), xmm_result4); 4031 __ movdqu(Address(to, pos, Address::times_1, 5 * AESBlockSize), xmm_result5); 4032 4033 __ addptr(pos, PARALLEL_FACTOR * AESBlockSize); // increase the length of crypt text 4034 __ subptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // decrease the remaining length 4035 __ jmp(L_multiBlock_loopTop[k]); 4036 4037 // singleBlock starts here 4038 __ align(OptoLoopAlignment); 4039 __ BIND(L_singleBlockLoopTop[k]); 4040 __ cmpptr(len_reg, 0); 4041 __ jcc(Assembler::lessEqual, L_exit); 4042 load_key(xmm_key_tmp0, key, 0x00, xmm_key_shuf_mask); 4043 __ movdqa(xmm_result0, xmm_curr_counter); 4044 inc_counter(rbx, xmm_curr_counter, 0x01, L__incCounter_single[k]); 4045 __ pshufb(xmm_result0, xmm_counter_shuf_mask); 4046 __ pxor(xmm_result0, xmm_key_tmp0); 4047 for (int i = 1; i < rounds[k]; i++) { 4048 load_key(xmm_key_tmp0, key, (0x10 * i), xmm_key_shuf_mask); 4049 __ aesenc(xmm_result0, xmm_key_tmp0); 4050 } 4051 load_key(xmm_key_tmp0, key, (rounds[k] * 0x10), xmm_key_shuf_mask); 4052 __ aesenclast(xmm_result0, xmm_key_tmp0); 4053 __ cmpptr(len_reg, AESBlockSize); 4054 __ jcc(Assembler::less, L_processTail_insr[k]); 4055 __ movdqu(xmm_from0, Address(from, pos, Address::times_1, 0 * AESBlockSize)); 4056 __ pxor(xmm_result0, xmm_from0); 4057 __ movdqu(Address(to, pos, Address::times_1, 0 * AESBlockSize), xmm_result0); 4058 __ addptr(pos, AESBlockSize); 4059 __ subptr(len_reg, AESBlockSize); 4060 __ jmp(L_singleBlockLoopTop[k]); 4061 __ BIND(L_processTail_insr[k]); // Process the tail part of the input array 4062 __ addptr(pos, len_reg); // 1. Insert bytes from src array into xmm_from0 register 4063 __ testptr(len_reg, 8); 4064 __ jcc(Assembler::zero, L_processTail_4_insr[k]); 4065 __ subptr(pos,8); 4066 __ pinsrq(xmm_from0, Address(from, pos), 0); 4067 __ BIND(L_processTail_4_insr[k]); 4068 __ testptr(len_reg, 4); 4069 __ jcc(Assembler::zero, L_processTail_2_insr[k]); 4070 __ subptr(pos,4); 4071 __ pslldq(xmm_from0, 4); 4072 __ pinsrd(xmm_from0, Address(from, pos), 0); 4073 __ BIND(L_processTail_2_insr[k]); 4074 __ testptr(len_reg, 2); 4075 __ jcc(Assembler::zero, L_processTail_1_insr[k]); 4076 __ subptr(pos, 2); 4077 __ pslldq(xmm_from0, 2); 4078 __ pinsrw(xmm_from0, Address(from, pos), 0); 4079 __ BIND(L_processTail_1_insr[k]); 4080 __ testptr(len_reg, 1); 4081 __ jcc(Assembler::zero, L_processTail_exit_insr[k]); 4082 __ subptr(pos, 1); 4083 __ pslldq(xmm_from0, 1); 4084 __ pinsrb(xmm_from0, Address(from, pos), 0); 4085 __ BIND(L_processTail_exit_insr[k]); 4086 4087 __ movdqu(Address(saved_encCounter_start, 0), xmm_result0); // 2. Perform pxor of the encrypted counter and plaintext Bytes. 4088 __ pxor(xmm_result0, xmm_from0); // Also the encrypted counter is saved for next invocation. 4089 4090 __ testptr(len_reg, 8); 4091 __ jcc(Assembler::zero, L_processTail_4_extr[k]); // 3. Extract bytes from xmm_result0 into the dest. array 4092 __ pextrq(Address(to, pos), xmm_result0, 0); 4093 __ psrldq(xmm_result0, 8); 4094 __ addptr(pos, 8); 4095 __ BIND(L_processTail_4_extr[k]); 4096 __ testptr(len_reg, 4); 4097 __ jcc(Assembler::zero, L_processTail_2_extr[k]); 4098 __ pextrd(Address(to, pos), xmm_result0, 0); 4099 __ psrldq(xmm_result0, 4); 4100 __ addptr(pos, 4); 4101 __ BIND(L_processTail_2_extr[k]); 4102 __ testptr(len_reg, 2); 4103 __ jcc(Assembler::zero, L_processTail_1_extr[k]); 4104 __ pextrw(Address(to, pos), xmm_result0, 0); 4105 __ psrldq(xmm_result0, 2); 4106 __ addptr(pos, 2); 4107 __ BIND(L_processTail_1_extr[k]); 4108 __ testptr(len_reg, 1); 4109 __ jcc(Assembler::zero, L_processTail_exit_extr[k]); 4110 __ pextrb(Address(to, pos), xmm_result0, 0); 4111 4112 __ BIND(L_processTail_exit_extr[k]); 4113 __ movl(Address(used_addr, 0), len_reg); 4114 __ jmp(L_exit); 4115 4116 } 4117 4118 __ BIND(L_exit); 4119 __ pshufb(xmm_curr_counter, xmm_counter_shuf_mask); //counter is shuffled back. 4120 __ movdqu(Address(counter, 0), xmm_curr_counter); //save counter back 4121 __ pop(rbx); // pop the saved RBX. 4122 #ifdef _WIN64 4123 __ movl(rax, len_mem); 4124 __ movptr(r13, Address(rsp, saved_r13_offset * wordSize)); 4125 __ movptr(r14, Address(rsp, saved_r14_offset * wordSize)); 4126 __ addptr(rsp, 2 * wordSize); 4127 #else 4128 __ pop(rax); // return 'len' 4129 #endif 4130 __ leave(); // required for proper stackwalking of RuntimeStub frame 4131 __ ret(0); 4132 return start; 4133 } 4134 4135 void roundDec(XMMRegister xmm_reg) { 4136 __ vaesdec(xmm1, xmm1, xmm_reg, Assembler::AVX_512bit); 4137 __ vaesdec(xmm2, xmm2, xmm_reg, Assembler::AVX_512bit); 4138 __ vaesdec(xmm3, xmm3, xmm_reg, Assembler::AVX_512bit); 4139 __ vaesdec(xmm4, xmm4, xmm_reg, Assembler::AVX_512bit); 4140 __ vaesdec(xmm5, xmm5, xmm_reg, Assembler::AVX_512bit); 4141 __ vaesdec(xmm6, xmm6, xmm_reg, Assembler::AVX_512bit); 4142 __ vaesdec(xmm7, xmm7, xmm_reg, Assembler::AVX_512bit); 4143 __ vaesdec(xmm8, xmm8, xmm_reg, Assembler::AVX_512bit); 4144 } 4145 4146 void roundDeclast(XMMRegister xmm_reg) { 4147 __ vaesdeclast(xmm1, xmm1, xmm_reg, Assembler::AVX_512bit); 4148 __ vaesdeclast(xmm2, xmm2, xmm_reg, Assembler::AVX_512bit); 4149 __ vaesdeclast(xmm3, xmm3, xmm_reg, Assembler::AVX_512bit); 4150 __ vaesdeclast(xmm4, xmm4, xmm_reg, Assembler::AVX_512bit); 4151 __ vaesdeclast(xmm5, xmm5, xmm_reg, Assembler::AVX_512bit); 4152 __ vaesdeclast(xmm6, xmm6, xmm_reg, Assembler::AVX_512bit); 4153 __ vaesdeclast(xmm7, xmm7, xmm_reg, Assembler::AVX_512bit); 4154 __ vaesdeclast(xmm8, xmm8, xmm_reg, Assembler::AVX_512bit); 4155 } 4156 4157 void ev_load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask = NULL) { 4158 __ movdqu(xmmdst, Address(key, offset)); 4159 if (xmm_shuf_mask != NULL) { 4160 __ pshufb(xmmdst, xmm_shuf_mask); 4161 } else { 4162 __ pshufb(xmmdst, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 4163 } 4164 __ evshufi64x2(xmmdst, xmmdst, xmmdst, 0x0, Assembler::AVX_512bit); 4165 4166 } 4167 4168 address generate_cipherBlockChaining_decryptVectorAESCrypt() { 4169 assert(VM_Version::supports_vaes(), "need AES instructions and misaligned SSE support"); 4170 __ align(CodeEntryAlignment); 4171 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt"); 4172 address start = __ pc(); 4173 4174 const Register from = c_rarg0; // source array address 4175 const Register to = c_rarg1; // destination array address 4176 const Register key = c_rarg2; // key array address 4177 const Register rvec = c_rarg3; // r byte array initialized from initvector array address 4178 // and left with the results of the last encryption block 4179 #ifndef _WIN64 4180 const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16) 4181 #else 4182 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 4183 const Register len_reg = r11; // pick the volatile windows register 4184 #endif 4185 4186 Label Loop, Loop1, L_128, L_256, L_192, KEY_192, KEY_256, Loop2, Lcbc_dec_rem_loop, 4187 Lcbc_dec_rem_last, Lcbc_dec_ret, Lcbc_dec_rem, Lcbc_exit; 4188 4189 __ enter(); 4190 4191 #ifdef _WIN64 4192 // on win64, fill len_reg from stack position 4193 __ movl(len_reg, len_mem); 4194 #else 4195 __ push(len_reg); // Save 4196 #endif 4197 __ push(rbx); 4198 __ vzeroupper(); 4199 4200 // Temporary variable declaration for swapping key bytes 4201 const XMMRegister xmm_key_shuf_mask = xmm1; 4202 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 4203 4204 // Calculate number of rounds from key size: 44 for 10-rounds, 52 for 12-rounds, 60 for 14-rounds 4205 const Register rounds = rbx; 4206 __ movl(rounds, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 4207 4208 const XMMRegister IV = xmm0; 4209 // Load IV and broadcast value to 512-bits 4210 __ evbroadcasti64x2(IV, Address(rvec, 0), Assembler::AVX_512bit); 4211 4212 // Temporary variables for storing round keys 4213 const XMMRegister RK0 = xmm30; 4214 const XMMRegister RK1 = xmm9; 4215 const XMMRegister RK2 = xmm18; 4216 const XMMRegister RK3 = xmm19; 4217 const XMMRegister RK4 = xmm20; 4218 const XMMRegister RK5 = xmm21; 4219 const XMMRegister RK6 = xmm22; 4220 const XMMRegister RK7 = xmm23; 4221 const XMMRegister RK8 = xmm24; 4222 const XMMRegister RK9 = xmm25; 4223 const XMMRegister RK10 = xmm26; 4224 4225 // Load and shuffle key 4226 // the java expanded key ordering is rotated one position from what we want 4227 // so we start from 1*16 here and hit 0*16 last 4228 ev_load_key(RK1, key, 1 * 16, xmm_key_shuf_mask); 4229 ev_load_key(RK2, key, 2 * 16, xmm_key_shuf_mask); 4230 ev_load_key(RK3, key, 3 * 16, xmm_key_shuf_mask); 4231 ev_load_key(RK4, key, 4 * 16, xmm_key_shuf_mask); 4232 ev_load_key(RK5, key, 5 * 16, xmm_key_shuf_mask); 4233 ev_load_key(RK6, key, 6 * 16, xmm_key_shuf_mask); 4234 ev_load_key(RK7, key, 7 * 16, xmm_key_shuf_mask); 4235 ev_load_key(RK8, key, 8 * 16, xmm_key_shuf_mask); 4236 ev_load_key(RK9, key, 9 * 16, xmm_key_shuf_mask); 4237 ev_load_key(RK10, key, 10 * 16, xmm_key_shuf_mask); 4238 ev_load_key(RK0, key, 0*16, xmm_key_shuf_mask); 4239 4240 // Variables for storing source cipher text 4241 const XMMRegister S0 = xmm10; 4242 const XMMRegister S1 = xmm11; 4243 const XMMRegister S2 = xmm12; 4244 const XMMRegister S3 = xmm13; 4245 const XMMRegister S4 = xmm14; 4246 const XMMRegister S5 = xmm15; 4247 const XMMRegister S6 = xmm16; 4248 const XMMRegister S7 = xmm17; 4249 4250 // Variables for storing decrypted text 4251 const XMMRegister B0 = xmm1; 4252 const XMMRegister B1 = xmm2; 4253 const XMMRegister B2 = xmm3; 4254 const XMMRegister B3 = xmm4; 4255 const XMMRegister B4 = xmm5; 4256 const XMMRegister B5 = xmm6; 4257 const XMMRegister B6 = xmm7; 4258 const XMMRegister B7 = xmm8; 4259 4260 __ cmpl(rounds, 44); 4261 __ jcc(Assembler::greater, KEY_192); 4262 __ jmp(Loop); 4263 4264 __ BIND(KEY_192); 4265 const XMMRegister RK11 = xmm27; 4266 const XMMRegister RK12 = xmm28; 4267 ev_load_key(RK11, key, 11*16, xmm_key_shuf_mask); 4268 ev_load_key(RK12, key, 12*16, xmm_key_shuf_mask); 4269 4270 __ cmpl(rounds, 52); 4271 __ jcc(Assembler::greater, KEY_256); 4272 __ jmp(Loop); 4273 4274 __ BIND(KEY_256); 4275 const XMMRegister RK13 = xmm29; 4276 const XMMRegister RK14 = xmm31; 4277 ev_load_key(RK13, key, 13*16, xmm_key_shuf_mask); 4278 ev_load_key(RK14, key, 14*16, xmm_key_shuf_mask); 4279 4280 __ BIND(Loop); 4281 __ cmpl(len_reg, 512); 4282 __ jcc(Assembler::below, Lcbc_dec_rem); 4283 __ BIND(Loop1); 4284 __ subl(len_reg, 512); 4285 __ evmovdquq(S0, Address(from, 0 * 64), Assembler::AVX_512bit); 4286 __ evmovdquq(S1, Address(from, 1 * 64), Assembler::AVX_512bit); 4287 __ evmovdquq(S2, Address(from, 2 * 64), Assembler::AVX_512bit); 4288 __ evmovdquq(S3, Address(from, 3 * 64), Assembler::AVX_512bit); 4289 __ evmovdquq(S4, Address(from, 4 * 64), Assembler::AVX_512bit); 4290 __ evmovdquq(S5, Address(from, 5 * 64), Assembler::AVX_512bit); 4291 __ evmovdquq(S6, Address(from, 6 * 64), Assembler::AVX_512bit); 4292 __ evmovdquq(S7, Address(from, 7 * 64), Assembler::AVX_512bit); 4293 __ leaq(from, Address(from, 8 * 64)); 4294 4295 __ evpxorq(B0, S0, RK1, Assembler::AVX_512bit); 4296 __ evpxorq(B1, S1, RK1, Assembler::AVX_512bit); 4297 __ evpxorq(B2, S2, RK1, Assembler::AVX_512bit); 4298 __ evpxorq(B3, S3, RK1, Assembler::AVX_512bit); 4299 __ evpxorq(B4, S4, RK1, Assembler::AVX_512bit); 4300 __ evpxorq(B5, S5, RK1, Assembler::AVX_512bit); 4301 __ evpxorq(B6, S6, RK1, Assembler::AVX_512bit); 4302 __ evpxorq(B7, S7, RK1, Assembler::AVX_512bit); 4303 4304 __ evalignq(IV, S0, IV, 0x06); 4305 __ evalignq(S0, S1, S0, 0x06); 4306 __ evalignq(S1, S2, S1, 0x06); 4307 __ evalignq(S2, S3, S2, 0x06); 4308 __ evalignq(S3, S4, S3, 0x06); 4309 __ evalignq(S4, S5, S4, 0x06); 4310 __ evalignq(S5, S6, S5, 0x06); 4311 __ evalignq(S6, S7, S6, 0x06); 4312 4313 roundDec(RK2); 4314 roundDec(RK3); 4315 roundDec(RK4); 4316 roundDec(RK5); 4317 roundDec(RK6); 4318 roundDec(RK7); 4319 roundDec(RK8); 4320 roundDec(RK9); 4321 roundDec(RK10); 4322 4323 __ cmpl(rounds, 44); 4324 __ jcc(Assembler::belowEqual, L_128); 4325 roundDec(RK11); 4326 roundDec(RK12); 4327 4328 __ cmpl(rounds, 52); 4329 __ jcc(Assembler::belowEqual, L_192); 4330 roundDec(RK13); 4331 roundDec(RK14); 4332 4333 __ BIND(L_256); 4334 roundDeclast(RK0); 4335 __ jmp(Loop2); 4336 4337 __ BIND(L_128); 4338 roundDeclast(RK0); 4339 __ jmp(Loop2); 4340 4341 __ BIND(L_192); 4342 roundDeclast(RK0); 4343 4344 __ BIND(Loop2); 4345 __ evpxorq(B0, B0, IV, Assembler::AVX_512bit); 4346 __ evpxorq(B1, B1, S0, Assembler::AVX_512bit); 4347 __ evpxorq(B2, B2, S1, Assembler::AVX_512bit); 4348 __ evpxorq(B3, B3, S2, Assembler::AVX_512bit); 4349 __ evpxorq(B4, B4, S3, Assembler::AVX_512bit); 4350 __ evpxorq(B5, B5, S4, Assembler::AVX_512bit); 4351 __ evpxorq(B6, B6, S5, Assembler::AVX_512bit); 4352 __ evpxorq(B7, B7, S6, Assembler::AVX_512bit); 4353 __ evmovdquq(IV, S7, Assembler::AVX_512bit); 4354 4355 __ evmovdquq(Address(to, 0 * 64), B0, Assembler::AVX_512bit); 4356 __ evmovdquq(Address(to, 1 * 64), B1, Assembler::AVX_512bit); 4357 __ evmovdquq(Address(to, 2 * 64), B2, Assembler::AVX_512bit); 4358 __ evmovdquq(Address(to, 3 * 64), B3, Assembler::AVX_512bit); 4359 __ evmovdquq(Address(to, 4 * 64), B4, Assembler::AVX_512bit); 4360 __ evmovdquq(Address(to, 5 * 64), B5, Assembler::AVX_512bit); 4361 __ evmovdquq(Address(to, 6 * 64), B6, Assembler::AVX_512bit); 4362 __ evmovdquq(Address(to, 7 * 64), B7, Assembler::AVX_512bit); 4363 __ leaq(to, Address(to, 8 * 64)); 4364 __ jmp(Loop); 4365 4366 __ BIND(Lcbc_dec_rem); 4367 __ evshufi64x2(IV, IV, IV, 0x03, Assembler::AVX_512bit); 4368 4369 __ BIND(Lcbc_dec_rem_loop); 4370 __ subl(len_reg, 16); 4371 __ jcc(Assembler::carrySet, Lcbc_dec_ret); 4372 4373 __ movdqu(S0, Address(from, 0)); 4374 __ evpxorq(B0, S0, RK1, Assembler::AVX_512bit); 4375 __ vaesdec(B0, B0, RK2, Assembler::AVX_512bit); 4376 __ vaesdec(B0, B0, RK3, Assembler::AVX_512bit); 4377 __ vaesdec(B0, B0, RK4, Assembler::AVX_512bit); 4378 __ vaesdec(B0, B0, RK5, Assembler::AVX_512bit); 4379 __ vaesdec(B0, B0, RK6, Assembler::AVX_512bit); 4380 __ vaesdec(B0, B0, RK7, Assembler::AVX_512bit); 4381 __ vaesdec(B0, B0, RK8, Assembler::AVX_512bit); 4382 __ vaesdec(B0, B0, RK9, Assembler::AVX_512bit); 4383 __ vaesdec(B0, B0, RK10, Assembler::AVX_512bit); 4384 __ cmpl(rounds, 44); 4385 __ jcc(Assembler::belowEqual, Lcbc_dec_rem_last); 4386 4387 __ vaesdec(B0, B0, RK11, Assembler::AVX_512bit); 4388 __ vaesdec(B0, B0, RK12, Assembler::AVX_512bit); 4389 __ cmpl(rounds, 52); 4390 __ jcc(Assembler::belowEqual, Lcbc_dec_rem_last); 4391 4392 __ vaesdec(B0, B0, RK13, Assembler::AVX_512bit); 4393 __ vaesdec(B0, B0, RK14, Assembler::AVX_512bit); 4394 4395 __ BIND(Lcbc_dec_rem_last); 4396 __ vaesdeclast(B0, B0, RK0, Assembler::AVX_512bit); 4397 4398 __ evpxorq(B0, B0, IV, Assembler::AVX_512bit); 4399 __ evmovdquq(IV, S0, Assembler::AVX_512bit); 4400 __ movdqu(Address(to, 0), B0); 4401 __ leaq(from, Address(from, 16)); 4402 __ leaq(to, Address(to, 16)); 4403 __ jmp(Lcbc_dec_rem_loop); 4404 4405 __ BIND(Lcbc_dec_ret); 4406 __ movdqu(Address(rvec, 0), IV); 4407 4408 // Zero out the round keys 4409 __ evpxorq(RK0, RK0, RK0, Assembler::AVX_512bit); 4410 __ evpxorq(RK1, RK1, RK1, Assembler::AVX_512bit); 4411 __ evpxorq(RK2, RK2, RK2, Assembler::AVX_512bit); 4412 __ evpxorq(RK3, RK3, RK3, Assembler::AVX_512bit); 4413 __ evpxorq(RK4, RK4, RK4, Assembler::AVX_512bit); 4414 __ evpxorq(RK5, RK5, RK5, Assembler::AVX_512bit); 4415 __ evpxorq(RK6, RK6, RK6, Assembler::AVX_512bit); 4416 __ evpxorq(RK7, RK7, RK7, Assembler::AVX_512bit); 4417 __ evpxorq(RK8, RK8, RK8, Assembler::AVX_512bit); 4418 __ evpxorq(RK9, RK9, RK9, Assembler::AVX_512bit); 4419 __ evpxorq(RK10, RK10, RK10, Assembler::AVX_512bit); 4420 __ cmpl(rounds, 44); 4421 __ jcc(Assembler::belowEqual, Lcbc_exit); 4422 __ evpxorq(RK11, RK11, RK11, Assembler::AVX_512bit); 4423 __ evpxorq(RK12, RK12, RK12, Assembler::AVX_512bit); 4424 __ cmpl(rounds, 52); 4425 __ jcc(Assembler::belowEqual, Lcbc_exit); 4426 __ evpxorq(RK13, RK13, RK13, Assembler::AVX_512bit); 4427 __ evpxorq(RK14, RK14, RK14, Assembler::AVX_512bit); 4428 4429 __ BIND(Lcbc_exit); 4430 __ pop(rbx); 4431 #ifdef _WIN64 4432 __ movl(rax, len_mem); 4433 #else 4434 __ pop(rax); // return length 4435 #endif 4436 __ leave(); // required for proper stackwalking of RuntimeStub frame 4437 __ ret(0); 4438 return start; 4439 } 4440 4441 // byte swap x86 long 4442 address generate_ghash_long_swap_mask() { 4443 __ align(CodeEntryAlignment); 4444 StubCodeMark mark(this, "StubRoutines", "ghash_long_swap_mask"); 4445 address start = __ pc(); 4446 __ emit_data64(0x0f0e0d0c0b0a0908, relocInfo::none ); 4447 __ emit_data64(0x0706050403020100, relocInfo::none ); 4448 return start; 4449 } 4450 4451 // byte swap x86 byte array 4452 address generate_ghash_byte_swap_mask() { 4453 __ align(CodeEntryAlignment); 4454 StubCodeMark mark(this, "StubRoutines", "ghash_byte_swap_mask"); 4455 address start = __ pc(); 4456 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none ); 4457 __ emit_data64(0x0001020304050607, relocInfo::none ); 4458 return start; 4459 } 4460 4461 /* Single and multi-block ghash operations */ 4462 address generate_ghash_processBlocks() { 4463 __ align(CodeEntryAlignment); 4464 Label L_ghash_loop, L_exit; 4465 StubCodeMark mark(this, "StubRoutines", "ghash_processBlocks"); 4466 address start = __ pc(); 4467 4468 const Register state = c_rarg0; 4469 const Register subkeyH = c_rarg1; 4470 const Register data = c_rarg2; 4471 const Register blocks = c_rarg3; 4472 4473 const XMMRegister xmm_temp0 = xmm0; 4474 const XMMRegister xmm_temp1 = xmm1; 4475 const XMMRegister xmm_temp2 = xmm2; 4476 const XMMRegister xmm_temp3 = xmm3; 4477 const XMMRegister xmm_temp4 = xmm4; 4478 const XMMRegister xmm_temp5 = xmm5; 4479 const XMMRegister xmm_temp6 = xmm6; 4480 const XMMRegister xmm_temp7 = xmm7; 4481 const XMMRegister xmm_temp8 = xmm8; 4482 const XMMRegister xmm_temp9 = xmm9; 4483 const XMMRegister xmm_temp10 = xmm10; 4484 4485 __ enter(); 4486 4487 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 4488 // context for the registers used, where all instructions below are using 128-bit mode 4489 // On EVEX without VL and BW, these instructions will all be AVX. 4490 if (VM_Version::supports_avx512vlbw()) { 4491 __ movl(rax, 0xffff); 4492 __ kmovql(k1, rax); 4493 } 4494 4495 __ movdqu(xmm_temp10, ExternalAddress(StubRoutines::x86::ghash_long_swap_mask_addr())); 4496 4497 __ movdqu(xmm_temp0, Address(state, 0)); 4498 __ pshufb(xmm_temp0, xmm_temp10); 4499 4500 4501 __ BIND(L_ghash_loop); 4502 __ movdqu(xmm_temp2, Address(data, 0)); 4503 __ pshufb(xmm_temp2, ExternalAddress(StubRoutines::x86::ghash_byte_swap_mask_addr())); 4504 4505 __ movdqu(xmm_temp1, Address(subkeyH, 0)); 4506 __ pshufb(xmm_temp1, xmm_temp10); 4507 4508 __ pxor(xmm_temp0, xmm_temp2); 4509 4510 // 4511 // Multiply with the hash key 4512 // 4513 __ movdqu(xmm_temp3, xmm_temp0); 4514 __ pclmulqdq(xmm_temp3, xmm_temp1, 0); // xmm3 holds a0*b0 4515 __ movdqu(xmm_temp4, xmm_temp0); 4516 __ pclmulqdq(xmm_temp4, xmm_temp1, 16); // xmm4 holds a0*b1 4517 4518 __ movdqu(xmm_temp5, xmm_temp0); 4519 __ pclmulqdq(xmm_temp5, xmm_temp1, 1); // xmm5 holds a1*b0 4520 __ movdqu(xmm_temp6, xmm_temp0); 4521 __ pclmulqdq(xmm_temp6, xmm_temp1, 17); // xmm6 holds a1*b1 4522 4523 __ pxor(xmm_temp4, xmm_temp5); // xmm4 holds a0*b1 + a1*b0 4524 4525 __ movdqu(xmm_temp5, xmm_temp4); // move the contents of xmm4 to xmm5 4526 __ psrldq(xmm_temp4, 8); // shift by xmm4 64 bits to the right 4527 __ pslldq(xmm_temp5, 8); // shift by xmm5 64 bits to the left 4528 __ pxor(xmm_temp3, xmm_temp5); 4529 __ pxor(xmm_temp6, xmm_temp4); // Register pair <xmm6:xmm3> holds the result 4530 // of the carry-less multiplication of 4531 // xmm0 by xmm1. 4532 4533 // We shift the result of the multiplication by one bit position 4534 // to the left to cope for the fact that the bits are reversed. 4535 __ movdqu(xmm_temp7, xmm_temp3); 4536 __ movdqu(xmm_temp8, xmm_temp6); 4537 __ pslld(xmm_temp3, 1); 4538 __ pslld(xmm_temp6, 1); 4539 __ psrld(xmm_temp7, 31); 4540 __ psrld(xmm_temp8, 31); 4541 __ movdqu(xmm_temp9, xmm_temp7); 4542 __ pslldq(xmm_temp8, 4); 4543 __ pslldq(xmm_temp7, 4); 4544 __ psrldq(xmm_temp9, 12); 4545 __ por(xmm_temp3, xmm_temp7); 4546 __ por(xmm_temp6, xmm_temp8); 4547 __ por(xmm_temp6, xmm_temp9); 4548 4549 // 4550 // First phase of the reduction 4551 // 4552 // Move xmm3 into xmm7, xmm8, xmm9 in order to perform the shifts 4553 // independently. 4554 __ movdqu(xmm_temp7, xmm_temp3); 4555 __ movdqu(xmm_temp8, xmm_temp3); 4556 __ movdqu(xmm_temp9, xmm_temp3); 4557 __ pslld(xmm_temp7, 31); // packed right shift shifting << 31 4558 __ pslld(xmm_temp8, 30); // packed right shift shifting << 30 4559 __ pslld(xmm_temp9, 25); // packed right shift shifting << 25 4560 __ pxor(xmm_temp7, xmm_temp8); // xor the shifted versions 4561 __ pxor(xmm_temp7, xmm_temp9); 4562 __ movdqu(xmm_temp8, xmm_temp7); 4563 __ pslldq(xmm_temp7, 12); 4564 __ psrldq(xmm_temp8, 4); 4565 __ pxor(xmm_temp3, xmm_temp7); // first phase of the reduction complete 4566 4567 // 4568 // Second phase of the reduction 4569 // 4570 // Make 3 copies of xmm3 in xmm2, xmm4, xmm5 for doing these 4571 // shift operations. 4572 __ movdqu(xmm_temp2, xmm_temp3); 4573 __ movdqu(xmm_temp4, xmm_temp3); 4574 __ movdqu(xmm_temp5, xmm_temp3); 4575 __ psrld(xmm_temp2, 1); // packed left shifting >> 1 4576 __ psrld(xmm_temp4, 2); // packed left shifting >> 2 4577 __ psrld(xmm_temp5, 7); // packed left shifting >> 7 4578 __ pxor(xmm_temp2, xmm_temp4); // xor the shifted versions 4579 __ pxor(xmm_temp2, xmm_temp5); 4580 __ pxor(xmm_temp2, xmm_temp8); 4581 __ pxor(xmm_temp3, xmm_temp2); 4582 __ pxor(xmm_temp6, xmm_temp3); // the result is in xmm6 4583 4584 __ decrement(blocks); 4585 __ jcc(Assembler::zero, L_exit); 4586 __ movdqu(xmm_temp0, xmm_temp6); 4587 __ addptr(data, 16); 4588 __ jmp(L_ghash_loop); 4589 4590 __ BIND(L_exit); 4591 __ pshufb(xmm_temp6, xmm_temp10); // Byte swap 16-byte result 4592 __ movdqu(Address(state, 0), xmm_temp6); // store the result 4593 __ leave(); 4594 __ ret(0); 4595 return start; 4596 } 4597 4598 //base64 character set 4599 address base64_charset_addr() { 4600 __ align(CodeEntryAlignment); 4601 StubCodeMark mark(this, "StubRoutines", "base64_charset"); 4602 address start = __ pc(); 4603 __ emit_data64(0x0000004200000041, relocInfo::none); 4604 __ emit_data64(0x0000004400000043, relocInfo::none); 4605 __ emit_data64(0x0000004600000045, relocInfo::none); 4606 __ emit_data64(0x0000004800000047, relocInfo::none); 4607 __ emit_data64(0x0000004a00000049, relocInfo::none); 4608 __ emit_data64(0x0000004c0000004b, relocInfo::none); 4609 __ emit_data64(0x0000004e0000004d, relocInfo::none); 4610 __ emit_data64(0x000000500000004f, relocInfo::none); 4611 __ emit_data64(0x0000005200000051, relocInfo::none); 4612 __ emit_data64(0x0000005400000053, relocInfo::none); 4613 __ emit_data64(0x0000005600000055, relocInfo::none); 4614 __ emit_data64(0x0000005800000057, relocInfo::none); 4615 __ emit_data64(0x0000005a00000059, relocInfo::none); 4616 __ emit_data64(0x0000006200000061, relocInfo::none); 4617 __ emit_data64(0x0000006400000063, relocInfo::none); 4618 __ emit_data64(0x0000006600000065, relocInfo::none); 4619 __ emit_data64(0x0000006800000067, relocInfo::none); 4620 __ emit_data64(0x0000006a00000069, relocInfo::none); 4621 __ emit_data64(0x0000006c0000006b, relocInfo::none); 4622 __ emit_data64(0x0000006e0000006d, relocInfo::none); 4623 __ emit_data64(0x000000700000006f, relocInfo::none); 4624 __ emit_data64(0x0000007200000071, relocInfo::none); 4625 __ emit_data64(0x0000007400000073, relocInfo::none); 4626 __ emit_data64(0x0000007600000075, relocInfo::none); 4627 __ emit_data64(0x0000007800000077, relocInfo::none); 4628 __ emit_data64(0x0000007a00000079, relocInfo::none); 4629 __ emit_data64(0x0000003100000030, relocInfo::none); 4630 __ emit_data64(0x0000003300000032, relocInfo::none); 4631 __ emit_data64(0x0000003500000034, relocInfo::none); 4632 __ emit_data64(0x0000003700000036, relocInfo::none); 4633 __ emit_data64(0x0000003900000038, relocInfo::none); 4634 __ emit_data64(0x0000002f0000002b, relocInfo::none); 4635 return start; 4636 } 4637 4638 //base64 url character set 4639 address base64url_charset_addr() { 4640 __ align(CodeEntryAlignment); 4641 StubCodeMark mark(this, "StubRoutines", "base64url_charset"); 4642 address start = __ pc(); 4643 __ emit_data64(0x0000004200000041, relocInfo::none); 4644 __ emit_data64(0x0000004400000043, relocInfo::none); 4645 __ emit_data64(0x0000004600000045, relocInfo::none); 4646 __ emit_data64(0x0000004800000047, relocInfo::none); 4647 __ emit_data64(0x0000004a00000049, relocInfo::none); 4648 __ emit_data64(0x0000004c0000004b, relocInfo::none); 4649 __ emit_data64(0x0000004e0000004d, relocInfo::none); 4650 __ emit_data64(0x000000500000004f, relocInfo::none); 4651 __ emit_data64(0x0000005200000051, relocInfo::none); 4652 __ emit_data64(0x0000005400000053, relocInfo::none); 4653 __ emit_data64(0x0000005600000055, relocInfo::none); 4654 __ emit_data64(0x0000005800000057, relocInfo::none); 4655 __ emit_data64(0x0000005a00000059, relocInfo::none); 4656 __ emit_data64(0x0000006200000061, relocInfo::none); 4657 __ emit_data64(0x0000006400000063, relocInfo::none); 4658 __ emit_data64(0x0000006600000065, relocInfo::none); 4659 __ emit_data64(0x0000006800000067, relocInfo::none); 4660 __ emit_data64(0x0000006a00000069, relocInfo::none); 4661 __ emit_data64(0x0000006c0000006b, relocInfo::none); 4662 __ emit_data64(0x0000006e0000006d, relocInfo::none); 4663 __ emit_data64(0x000000700000006f, relocInfo::none); 4664 __ emit_data64(0x0000007200000071, relocInfo::none); 4665 __ emit_data64(0x0000007400000073, relocInfo::none); 4666 __ emit_data64(0x0000007600000075, relocInfo::none); 4667 __ emit_data64(0x0000007800000077, relocInfo::none); 4668 __ emit_data64(0x0000007a00000079, relocInfo::none); 4669 __ emit_data64(0x0000003100000030, relocInfo::none); 4670 __ emit_data64(0x0000003300000032, relocInfo::none); 4671 __ emit_data64(0x0000003500000034, relocInfo::none); 4672 __ emit_data64(0x0000003700000036, relocInfo::none); 4673 __ emit_data64(0x0000003900000038, relocInfo::none); 4674 __ emit_data64(0x0000005f0000002d, relocInfo::none); 4675 4676 return start; 4677 } 4678 4679 address base64_bswap_mask_addr() { 4680 __ align(CodeEntryAlignment); 4681 StubCodeMark mark(this, "StubRoutines", "bswap_mask_base64"); 4682 address start = __ pc(); 4683 __ emit_data64(0x0504038002010080, relocInfo::none); 4684 __ emit_data64(0x0b0a098008070680, relocInfo::none); 4685 __ emit_data64(0x0908078006050480, relocInfo::none); 4686 __ emit_data64(0x0f0e0d800c0b0a80, relocInfo::none); 4687 __ emit_data64(0x0605048003020180, relocInfo::none); 4688 __ emit_data64(0x0c0b0a8009080780, relocInfo::none); 4689 __ emit_data64(0x0504038002010080, relocInfo::none); 4690 __ emit_data64(0x0b0a098008070680, relocInfo::none); 4691 4692 return start; 4693 } 4694 4695 address base64_right_shift_mask_addr() { 4696 __ align(CodeEntryAlignment); 4697 StubCodeMark mark(this, "StubRoutines", "right_shift_mask"); 4698 address start = __ pc(); 4699 __ emit_data64(0x0006000400020000, relocInfo::none); 4700 __ emit_data64(0x0006000400020000, relocInfo::none); 4701 __ emit_data64(0x0006000400020000, relocInfo::none); 4702 __ emit_data64(0x0006000400020000, relocInfo::none); 4703 __ emit_data64(0x0006000400020000, relocInfo::none); 4704 __ emit_data64(0x0006000400020000, relocInfo::none); 4705 __ emit_data64(0x0006000400020000, relocInfo::none); 4706 __ emit_data64(0x0006000400020000, relocInfo::none); 4707 4708 return start; 4709 } 4710 4711 address base64_left_shift_mask_addr() { 4712 __ align(CodeEntryAlignment); 4713 StubCodeMark mark(this, "StubRoutines", "left_shift_mask"); 4714 address start = __ pc(); 4715 __ emit_data64(0x0000000200040000, relocInfo::none); 4716 __ emit_data64(0x0000000200040000, relocInfo::none); 4717 __ emit_data64(0x0000000200040000, relocInfo::none); 4718 __ emit_data64(0x0000000200040000, relocInfo::none); 4719 __ emit_data64(0x0000000200040000, relocInfo::none); 4720 __ emit_data64(0x0000000200040000, relocInfo::none); 4721 __ emit_data64(0x0000000200040000, relocInfo::none); 4722 __ emit_data64(0x0000000200040000, relocInfo::none); 4723 4724 return start; 4725 } 4726 4727 address base64_and_mask_addr() { 4728 __ align(CodeEntryAlignment); 4729 StubCodeMark mark(this, "StubRoutines", "and_mask"); 4730 address start = __ pc(); 4731 __ emit_data64(0x3f003f003f000000, relocInfo::none); 4732 __ emit_data64(0x3f003f003f000000, relocInfo::none); 4733 __ emit_data64(0x3f003f003f000000, relocInfo::none); 4734 __ emit_data64(0x3f003f003f000000, relocInfo::none); 4735 __ emit_data64(0x3f003f003f000000, relocInfo::none); 4736 __ emit_data64(0x3f003f003f000000, relocInfo::none); 4737 __ emit_data64(0x3f003f003f000000, relocInfo::none); 4738 __ emit_data64(0x3f003f003f000000, relocInfo::none); 4739 return start; 4740 } 4741 4742 address base64_gather_mask_addr() { 4743 __ align(CodeEntryAlignment); 4744 StubCodeMark mark(this, "StubRoutines", "gather_mask"); 4745 address start = __ pc(); 4746 __ emit_data64(0xffffffffffffffff, relocInfo::none); 4747 return start; 4748 } 4749 4750 // Code for generating Base64 encoding. 4751 // Intrinsic function prototype in Base64.java: 4752 // private void encodeBlock(byte[] src, int sp, int sl, byte[] dst, int dp, boolean isURL) { 4753 address generate_base64_encodeBlock() { 4754 __ align(CodeEntryAlignment); 4755 StubCodeMark mark(this, "StubRoutines", "implEncode"); 4756 address start = __ pc(); 4757 __ enter(); 4758 4759 // Save callee-saved registers before using them 4760 __ push(r12); 4761 __ push(r13); 4762 __ push(r14); 4763 __ push(r15); 4764 __ push(rbx); 4765 4766 // arguments 4767 const Register source = c_rarg0; // Source Array 4768 const Register start_offset = c_rarg1; // start offset 4769 const Register end_offset = c_rarg2; // end offset 4770 const Register dest = c_rarg3; // destination array 4771 4772 #ifndef _WIN64 4773 const Register dp = c_rarg4; // Position for writing to dest array 4774 const Register isURL = c_rarg5;// Base64 or URL character set 4775 #else 4776 const Address dp_mem(rbp, 6 * wordSize); // length is on stack on Win64 4777 const Address isURL_mem(rbp, 7 * wordSize); 4778 const Register isURL = r10; // pick the volatile windows register 4779 const Register dp = r12; 4780 __ movl(dp, dp_mem); 4781 __ movl(isURL, isURL_mem); 4782 #endif 4783 4784 const Register length = r14; 4785 Label L_process80, L_process32, L_process3, L_exit, L_processdata; 4786 4787 // calculate length from offsets 4788 __ movl(length, end_offset); 4789 __ subl(length, start_offset); 4790 __ cmpl(length, 0); 4791 __ jcc(Assembler::lessEqual, L_exit); 4792 4793 // Save k1 value in rbx 4794 __ kmovql(rbx, k1); 4795 __ lea(r11, ExternalAddress(StubRoutines::x86::base64_charset_addr())); 4796 // check if base64 charset(isURL=0) or base64 url charset(isURL=1) needs to be loaded 4797 __ cmpl(isURL, 0); 4798 __ jcc(Assembler::equal, L_processdata); 4799 __ lea(r11, ExternalAddress(StubRoutines::x86::base64url_charset_addr())); 4800 4801 // load masks required for encoding data 4802 __ BIND(L_processdata); 4803 __ movdqu(xmm16, ExternalAddress(StubRoutines::x86::base64_gather_mask_addr())); 4804 // Set 64 bits of K register. 4805 __ evpcmpeqb(k1, xmm16, xmm16, Assembler::AVX_512bit); 4806 __ evmovdquq(xmm12, ExternalAddress(StubRoutines::x86::base64_bswap_mask_addr()), Assembler::AVX_256bit, r13); 4807 __ evmovdquq(xmm13, ExternalAddress(StubRoutines::x86::base64_right_shift_mask_addr()), Assembler::AVX_512bit, r13); 4808 __ evmovdquq(xmm14, ExternalAddress(StubRoutines::x86::base64_left_shift_mask_addr()), Assembler::AVX_512bit, r13); 4809 __ evmovdquq(xmm15, ExternalAddress(StubRoutines::x86::base64_and_mask_addr()), Assembler::AVX_512bit, r13); 4810 4811 // Vector Base64 implementation, producing 96 bytes of encoded data 4812 __ BIND(L_process80); 4813 __ cmpl(length, 80); 4814 __ jcc(Assembler::below, L_process32); 4815 __ evmovdquq(xmm0, Address(source, start_offset, Address::times_1, 0), Assembler::AVX_256bit); 4816 __ evmovdquq(xmm1, Address(source, start_offset, Address::times_1, 24), Assembler::AVX_256bit); 4817 __ evmovdquq(xmm2, Address(source, start_offset, Address::times_1, 48), Assembler::AVX_256bit); 4818 4819 //permute the input data in such a manner that we have continuity of the source 4820 __ vpermq(xmm3, xmm0, 148, Assembler::AVX_256bit); 4821 __ vpermq(xmm4, xmm1, 148, Assembler::AVX_256bit); 4822 __ vpermq(xmm5, xmm2, 148, Assembler::AVX_256bit); 4823 4824 //shuffle input and group 3 bytes of data and to it add 0 as the 4th byte. 4825 //we can deal with 12 bytes at a time in a 128 bit register 4826 __ vpshufb(xmm3, xmm3, xmm12, Assembler::AVX_256bit); 4827 __ vpshufb(xmm4, xmm4, xmm12, Assembler::AVX_256bit); 4828 __ vpshufb(xmm5, xmm5, xmm12, Assembler::AVX_256bit); 4829 4830 //convert byte to word. Each 128 bit register will have 6 bytes for processing 4831 __ vpmovzxbw(xmm3, xmm3, Assembler::AVX_512bit); 4832 __ vpmovzxbw(xmm4, xmm4, Assembler::AVX_512bit); 4833 __ vpmovzxbw(xmm5, xmm5, Assembler::AVX_512bit); 4834 4835 // Extract bits in the following pattern 6, 4+2, 2+4, 6 to convert 3, 8 bit numbers to 4, 6 bit numbers 4836 __ evpsrlvw(xmm0, xmm3, xmm13, Assembler::AVX_512bit); 4837 __ evpsrlvw(xmm1, xmm4, xmm13, Assembler::AVX_512bit); 4838 __ evpsrlvw(xmm2, xmm5, xmm13, Assembler::AVX_512bit); 4839 4840 __ evpsllvw(xmm3, xmm3, xmm14, Assembler::AVX_512bit); 4841 __ evpsllvw(xmm4, xmm4, xmm14, Assembler::AVX_512bit); 4842 __ evpsllvw(xmm5, xmm5, xmm14, Assembler::AVX_512bit); 4843 4844 __ vpsrlq(xmm0, xmm0, 8, Assembler::AVX_512bit); 4845 __ vpsrlq(xmm1, xmm1, 8, Assembler::AVX_512bit); 4846 __ vpsrlq(xmm2, xmm2, 8, Assembler::AVX_512bit); 4847 4848 __ vpsllq(xmm3, xmm3, 8, Assembler::AVX_512bit); 4849 __ vpsllq(xmm4, xmm4, 8, Assembler::AVX_512bit); 4850 __ vpsllq(xmm5, xmm5, 8, Assembler::AVX_512bit); 4851 4852 __ vpandq(xmm3, xmm3, xmm15, Assembler::AVX_512bit); 4853 __ vpandq(xmm4, xmm4, xmm15, Assembler::AVX_512bit); 4854 __ vpandq(xmm5, xmm5, xmm15, Assembler::AVX_512bit); 4855 4856 // Get the final 4*6 bits base64 encoding 4857 __ vporq(xmm3, xmm3, xmm0, Assembler::AVX_512bit); 4858 __ vporq(xmm4, xmm4, xmm1, Assembler::AVX_512bit); 4859 __ vporq(xmm5, xmm5, xmm2, Assembler::AVX_512bit); 4860 4861 // Shift 4862 __ vpsrlq(xmm3, xmm3, 8, Assembler::AVX_512bit); 4863 __ vpsrlq(xmm4, xmm4, 8, Assembler::AVX_512bit); 4864 __ vpsrlq(xmm5, xmm5, 8, Assembler::AVX_512bit); 4865 4866 // look up 6 bits in the base64 character set to fetch the encoding 4867 // we are converting word to dword as gather instructions need dword indices for looking up encoding 4868 __ vextracti64x4(xmm6, xmm3, 0); 4869 __ vpmovzxwd(xmm0, xmm6, Assembler::AVX_512bit); 4870 __ vextracti64x4(xmm6, xmm3, 1); 4871 __ vpmovzxwd(xmm1, xmm6, Assembler::AVX_512bit); 4872 4873 __ vextracti64x4(xmm6, xmm4, 0); 4874 __ vpmovzxwd(xmm2, xmm6, Assembler::AVX_512bit); 4875 __ vextracti64x4(xmm6, xmm4, 1); 4876 __ vpmovzxwd(xmm3, xmm6, Assembler::AVX_512bit); 4877 4878 __ vextracti64x4(xmm4, xmm5, 0); 4879 __ vpmovzxwd(xmm6, xmm4, Assembler::AVX_512bit); 4880 4881 __ vextracti64x4(xmm4, xmm5, 1); 4882 __ vpmovzxwd(xmm7, xmm4, Assembler::AVX_512bit); 4883 4884 __ kmovql(k2, k1); 4885 __ evpgatherdd(xmm4, k2, Address(r11, xmm0, Address::times_4, 0), Assembler::AVX_512bit); 4886 __ kmovql(k2, k1); 4887 __ evpgatherdd(xmm5, k2, Address(r11, xmm1, Address::times_4, 0), Assembler::AVX_512bit); 4888 __ kmovql(k2, k1); 4889 __ evpgatherdd(xmm8, k2, Address(r11, xmm2, Address::times_4, 0), Assembler::AVX_512bit); 4890 __ kmovql(k2, k1); 4891 __ evpgatherdd(xmm9, k2, Address(r11, xmm3, Address::times_4, 0), Assembler::AVX_512bit); 4892 __ kmovql(k2, k1); 4893 __ evpgatherdd(xmm10, k2, Address(r11, xmm6, Address::times_4, 0), Assembler::AVX_512bit); 4894 __ kmovql(k2, k1); 4895 __ evpgatherdd(xmm11, k2, Address(r11, xmm7, Address::times_4, 0), Assembler::AVX_512bit); 4896 4897 //Down convert dword to byte. Final output is 16*6 = 96 bytes long 4898 __ evpmovdb(Address(dest, dp, Address::times_1, 0), xmm4, Assembler::AVX_512bit); 4899 __ evpmovdb(Address(dest, dp, Address::times_1, 16), xmm5, Assembler::AVX_512bit); 4900 __ evpmovdb(Address(dest, dp, Address::times_1, 32), xmm8, Assembler::AVX_512bit); 4901 __ evpmovdb(Address(dest, dp, Address::times_1, 48), xmm9, Assembler::AVX_512bit); 4902 __ evpmovdb(Address(dest, dp, Address::times_1, 64), xmm10, Assembler::AVX_512bit); 4903 __ evpmovdb(Address(dest, dp, Address::times_1, 80), xmm11, Assembler::AVX_512bit); 4904 4905 __ addq(dest, 96); 4906 __ addq(source, 72); 4907 __ subq(length, 72); 4908 __ jmp(L_process80); 4909 4910 // Vector Base64 implementation generating 32 bytes of encoded data 4911 __ BIND(L_process32); 4912 __ cmpl(length, 32); 4913 __ jcc(Assembler::below, L_process3); 4914 __ evmovdquq(xmm0, Address(source, start_offset), Assembler::AVX_256bit); 4915 __ vpermq(xmm0, xmm0, 148, Assembler::AVX_256bit); 4916 __ vpshufb(xmm6, xmm0, xmm12, Assembler::AVX_256bit); 4917 __ vpmovzxbw(xmm6, xmm6, Assembler::AVX_512bit); 4918 __ evpsrlvw(xmm2, xmm6, xmm13, Assembler::AVX_512bit); 4919 __ evpsllvw(xmm3, xmm6, xmm14, Assembler::AVX_512bit); 4920 4921 __ vpsrlq(xmm2, xmm2, 8, Assembler::AVX_512bit); 4922 __ vpsllq(xmm3, xmm3, 8, Assembler::AVX_512bit); 4923 __ vpandq(xmm3, xmm3, xmm15, Assembler::AVX_512bit); 4924 __ vporq(xmm1, xmm2, xmm3, Assembler::AVX_512bit); 4925 __ vpsrlq(xmm1, xmm1, 8, Assembler::AVX_512bit); 4926 __ vextracti64x4(xmm9, xmm1, 0); 4927 __ vpmovzxwd(xmm6, xmm9, Assembler::AVX_512bit); 4928 __ vextracti64x4(xmm9, xmm1, 1); 4929 __ vpmovzxwd(xmm5, xmm9, Assembler::AVX_512bit); 4930 __ kmovql(k2, k1); 4931 __ evpgatherdd(xmm8, k2, Address(r11, xmm6, Address::times_4, 0), Assembler::AVX_512bit); 4932 __ kmovql(k2, k1); 4933 __ evpgatherdd(xmm10, k2, Address(r11, xmm5, Address::times_4, 0), Assembler::AVX_512bit); 4934 __ evpmovdb(Address(dest, dp, Address::times_1, 0), xmm8, Assembler::AVX_512bit); 4935 __ evpmovdb(Address(dest, dp, Address::times_1, 16), xmm10, Assembler::AVX_512bit); 4936 __ subq(length, 24); 4937 __ addq(dest, 32); 4938 __ addq(source, 24); 4939 __ jmp(L_process32); 4940 4941 // Scalar data processing takes 3 bytes at a time and produces 4 bytes of encoded data 4942 /* This code corresponds to the scalar version of the following snippet in Base64.java 4943 ** int bits = (src[sp0++] & 0xff) << 16 |(src[sp0++] & 0xff) << 8 |(src[sp0++] & 0xff); 4944 ** dst[dp0++] = (byte)base64[(bits >> > 18) & 0x3f]; 4945 ** dst[dp0++] = (byte)base64[(bits >> > 12) & 0x3f]; 4946 ** dst[dp0++] = (byte)base64[(bits >> > 6) & 0x3f]; 4947 ** dst[dp0++] = (byte)base64[bits & 0x3f];*/ 4948 __ BIND(L_process3); 4949 __ cmpl(length, 3); 4950 __ jcc(Assembler::below, L_exit); 4951 // Read 1 byte at a time 4952 __ movzbl(rax, Address(source, start_offset)); 4953 __ shll(rax, 0x10); 4954 __ movl(r15, rax); 4955 __ movzbl(rax, Address(source, start_offset, Address::times_1, 1)); 4956 __ shll(rax, 0x8); 4957 __ movzwl(rax, rax); 4958 __ orl(r15, rax); 4959 __ movzbl(rax, Address(source, start_offset, Address::times_1, 2)); 4960 __ orl(rax, r15); 4961 // Save 3 bytes read in r15 4962 __ movl(r15, rax); 4963 __ shrl(rax, 0x12); 4964 __ andl(rax, 0x3f); 4965 // rax contains the index, r11 contains base64 lookup table 4966 __ movb(rax, Address(r11, rax, Address::times_4)); 4967 // Write the encoded byte to destination 4968 __ movb(Address(dest, dp, Address::times_1, 0), rax); 4969 __ movl(rax, r15); 4970 __ shrl(rax, 0xc); 4971 __ andl(rax, 0x3f); 4972 __ movb(rax, Address(r11, rax, Address::times_4)); 4973 __ movb(Address(dest, dp, Address::times_1, 1), rax); 4974 __ movl(rax, r15); 4975 __ shrl(rax, 0x6); 4976 __ andl(rax, 0x3f); 4977 __ movb(rax, Address(r11, rax, Address::times_4)); 4978 __ movb(Address(dest, dp, Address::times_1, 2), rax); 4979 __ movl(rax, r15); 4980 __ andl(rax, 0x3f); 4981 __ movb(rax, Address(r11, rax, Address::times_4)); 4982 __ movb(Address(dest, dp, Address::times_1, 3), rax); 4983 __ subl(length, 3); 4984 __ addq(dest, 4); 4985 __ addq(source, 3); 4986 __ jmp(L_process3); 4987 __ BIND(L_exit); 4988 // restore k1 register value 4989 __ kmovql(k1, rbx); 4990 __ pop(rbx); 4991 __ pop(r15); 4992 __ pop(r14); 4993 __ pop(r13); 4994 __ pop(r12); 4995 __ leave(); 4996 __ ret(0); 4997 return start; 4998 } 4999 5000 /** 5001 * Arguments: 5002 * 5003 * Inputs: 5004 * c_rarg0 - int crc 5005 * c_rarg1 - byte* buf 5006 * c_rarg2 - int length 5007 * 5008 * Ouput: 5009 * rax - int crc result 5010 */ 5011 address generate_updateBytesCRC32() { 5012 assert(UseCRC32Intrinsics, "need AVX and CLMUL instructions"); 5013 5014 __ align(CodeEntryAlignment); 5015 StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32"); 5016 5017 address start = __ pc(); 5018 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 5019 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 5020 // rscratch1: r10 5021 const Register crc = c_rarg0; // crc 5022 const Register buf = c_rarg1; // source java byte array address 5023 const Register len = c_rarg2; // length 5024 const Register table = c_rarg3; // crc_table address (reuse register) 5025 const Register tmp = r11; 5026 assert_different_registers(crc, buf, len, table, tmp, rax); 5027 5028 BLOCK_COMMENT("Entry:"); 5029 __ enter(); // required for proper stackwalking of RuntimeStub frame 5030 5031 __ kernel_crc32(crc, buf, len, table, tmp); 5032 5033 __ movl(rax, crc); 5034 __ vzeroupper(); 5035 __ leave(); // required for proper stackwalking of RuntimeStub frame 5036 __ ret(0); 5037 5038 return start; 5039 } 5040 5041 /** 5042 * Arguments: 5043 * 5044 * Inputs: 5045 * c_rarg0 - int crc 5046 * c_rarg1 - byte* buf 5047 * c_rarg2 - long length 5048 * c_rarg3 - table_start - optional (present only when doing a library_call, 5049 * not used by x86 algorithm) 5050 * 5051 * Ouput: 5052 * rax - int crc result 5053 */ 5054 address generate_updateBytesCRC32C(bool is_pclmulqdq_supported) { 5055 assert(UseCRC32CIntrinsics, "need SSE4_2"); 5056 __ align(CodeEntryAlignment); 5057 StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32C"); 5058 address start = __ pc(); 5059 //reg.arg int#0 int#1 int#2 int#3 int#4 int#5 float regs 5060 //Windows RCX RDX R8 R9 none none XMM0..XMM3 5061 //Lin / Sol RDI RSI RDX RCX R8 R9 XMM0..XMM7 5062 const Register crc = c_rarg0; // crc 5063 const Register buf = c_rarg1; // source java byte array address 5064 const Register len = c_rarg2; // length 5065 const Register a = rax; 5066 const Register j = r9; 5067 const Register k = r10; 5068 const Register l = r11; 5069 #ifdef _WIN64 5070 const Register y = rdi; 5071 const Register z = rsi; 5072 #else 5073 const Register y = rcx; 5074 const Register z = r8; 5075 #endif 5076 assert_different_registers(crc, buf, len, a, j, k, l, y, z); 5077 5078 BLOCK_COMMENT("Entry:"); 5079 __ enter(); // required for proper stackwalking of RuntimeStub frame 5080 #ifdef _WIN64 5081 __ push(y); 5082 __ push(z); 5083 #endif 5084 __ crc32c_ipl_alg2_alt2(crc, buf, len, 5085 a, j, k, 5086 l, y, z, 5087 c_farg0, c_farg1, c_farg2, 5088 is_pclmulqdq_supported); 5089 __ movl(rax, crc); 5090 #ifdef _WIN64 5091 __ pop(z); 5092 __ pop(y); 5093 #endif 5094 __ vzeroupper(); 5095 __ leave(); // required for proper stackwalking of RuntimeStub frame 5096 __ ret(0); 5097 5098 return start; 5099 } 5100 5101 /** 5102 * Arguments: 5103 * 5104 * Input: 5105 * c_rarg0 - x address 5106 * c_rarg1 - x length 5107 * c_rarg2 - y address 5108 * c_rarg3 - y length 5109 * not Win64 5110 * c_rarg4 - z address 5111 * c_rarg5 - z length 5112 * Win64 5113 * rsp+40 - z address 5114 * rsp+48 - z length 5115 */ 5116 address generate_multiplyToLen() { 5117 __ align(CodeEntryAlignment); 5118 StubCodeMark mark(this, "StubRoutines", "multiplyToLen"); 5119 5120 address start = __ pc(); 5121 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 5122 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 5123 const Register x = rdi; 5124 const Register xlen = rax; 5125 const Register y = rsi; 5126 const Register ylen = rcx; 5127 const Register z = r8; 5128 const Register zlen = r11; 5129 5130 // Next registers will be saved on stack in multiply_to_len(). 5131 const Register tmp1 = r12; 5132 const Register tmp2 = r13; 5133 const Register tmp3 = r14; 5134 const Register tmp4 = r15; 5135 const Register tmp5 = rbx; 5136 5137 BLOCK_COMMENT("Entry:"); 5138 __ enter(); // required for proper stackwalking of RuntimeStub frame 5139 5140 #ifndef _WIN64 5141 __ movptr(zlen, r9); // Save r9 in r11 - zlen 5142 #endif 5143 setup_arg_regs(4); // x => rdi, xlen => rsi, y => rdx 5144 // ylen => rcx, z => r8, zlen => r11 5145 // r9 and r10 may be used to save non-volatile registers 5146 #ifdef _WIN64 5147 // last 2 arguments (#4, #5) are on stack on Win64 5148 __ movptr(z, Address(rsp, 6 * wordSize)); 5149 __ movptr(zlen, Address(rsp, 7 * wordSize)); 5150 #endif 5151 5152 __ movptr(xlen, rsi); 5153 __ movptr(y, rdx); 5154 __ multiply_to_len(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5); 5155 5156 restore_arg_regs(); 5157 5158 __ leave(); // required for proper stackwalking of RuntimeStub frame 5159 __ ret(0); 5160 5161 return start; 5162 } 5163 5164 /** 5165 * Arguments: 5166 * 5167 * Input: 5168 * c_rarg0 - obja address 5169 * c_rarg1 - objb address 5170 * c_rarg3 - length length 5171 * c_rarg4 - scale log2_array_indxscale 5172 * 5173 * Output: 5174 * rax - int >= mismatched index, < 0 bitwise complement of tail 5175 */ 5176 address generate_vectorizedMismatch() { 5177 __ align(CodeEntryAlignment); 5178 StubCodeMark mark(this, "StubRoutines", "vectorizedMismatch"); 5179 address start = __ pc(); 5180 5181 BLOCK_COMMENT("Entry:"); 5182 __ enter(); 5183 5184 #ifdef _WIN64 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 5185 const Register scale = c_rarg0; //rcx, will exchange with r9 5186 const Register objb = c_rarg1; //rdx 5187 const Register length = c_rarg2; //r8 5188 const Register obja = c_rarg3; //r9 5189 __ xchgq(obja, scale); //now obja and scale contains the correct contents 5190 5191 const Register tmp1 = r10; 5192 const Register tmp2 = r11; 5193 #endif 5194 #ifndef _WIN64 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 5195 const Register obja = c_rarg0; //U:rdi 5196 const Register objb = c_rarg1; //U:rsi 5197 const Register length = c_rarg2; //U:rdx 5198 const Register scale = c_rarg3; //U:rcx 5199 const Register tmp1 = r8; 5200 const Register tmp2 = r9; 5201 #endif 5202 const Register result = rax; //return value 5203 const XMMRegister vec0 = xmm0; 5204 const XMMRegister vec1 = xmm1; 5205 const XMMRegister vec2 = xmm2; 5206 5207 __ vectorized_mismatch(obja, objb, length, scale, result, tmp1, tmp2, vec0, vec1, vec2); 5208 5209 __ vzeroupper(); 5210 __ leave(); 5211 __ ret(0); 5212 5213 return start; 5214 } 5215 5216 /** 5217 * Arguments: 5218 * 5219 // Input: 5220 // c_rarg0 - x address 5221 // c_rarg1 - x length 5222 // c_rarg2 - z address 5223 // c_rarg3 - z lenth 5224 * 5225 */ 5226 address generate_squareToLen() { 5227 5228 __ align(CodeEntryAlignment); 5229 StubCodeMark mark(this, "StubRoutines", "squareToLen"); 5230 5231 address start = __ pc(); 5232 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 5233 // Unix: rdi, rsi, rdx, rcx (c_rarg0, c_rarg1, ...) 5234 const Register x = rdi; 5235 const Register len = rsi; 5236 const Register z = r8; 5237 const Register zlen = rcx; 5238 5239 const Register tmp1 = r12; 5240 const Register tmp2 = r13; 5241 const Register tmp3 = r14; 5242 const Register tmp4 = r15; 5243 const Register tmp5 = rbx; 5244 5245 BLOCK_COMMENT("Entry:"); 5246 __ enter(); // required for proper stackwalking of RuntimeStub frame 5247 5248 setup_arg_regs(4); // x => rdi, len => rsi, z => rdx 5249 // zlen => rcx 5250 // r9 and r10 may be used to save non-volatile registers 5251 __ movptr(r8, rdx); 5252 __ square_to_len(x, len, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5, rdx, rax); 5253 5254 restore_arg_regs(); 5255 5256 __ leave(); // required for proper stackwalking of RuntimeStub frame 5257 __ ret(0); 5258 5259 return start; 5260 } 5261 5262 /** 5263 * Arguments: 5264 * 5265 * Input: 5266 * c_rarg0 - out address 5267 * c_rarg1 - in address 5268 * c_rarg2 - offset 5269 * c_rarg3 - len 5270 * not Win64 5271 * c_rarg4 - k 5272 * Win64 5273 * rsp+40 - k 5274 */ 5275 address generate_mulAdd() { 5276 __ align(CodeEntryAlignment); 5277 StubCodeMark mark(this, "StubRoutines", "mulAdd"); 5278 5279 address start = __ pc(); 5280 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 5281 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 5282 const Register out = rdi; 5283 const Register in = rsi; 5284 const Register offset = r11; 5285 const Register len = rcx; 5286 const Register k = r8; 5287 5288 // Next registers will be saved on stack in mul_add(). 5289 const Register tmp1 = r12; 5290 const Register tmp2 = r13; 5291 const Register tmp3 = r14; 5292 const Register tmp4 = r15; 5293 const Register tmp5 = rbx; 5294 5295 BLOCK_COMMENT("Entry:"); 5296 __ enter(); // required for proper stackwalking of RuntimeStub frame 5297 5298 setup_arg_regs(4); // out => rdi, in => rsi, offset => rdx 5299 // len => rcx, k => r8 5300 // r9 and r10 may be used to save non-volatile registers 5301 #ifdef _WIN64 5302 // last argument is on stack on Win64 5303 __ movl(k, Address(rsp, 6 * wordSize)); 5304 #endif 5305 __ movptr(r11, rdx); // move offset in rdx to offset(r11) 5306 __ mul_add(out, in, offset, len, k, tmp1, tmp2, tmp3, tmp4, tmp5, rdx, rax); 5307 5308 restore_arg_regs(); 5309 5310 __ leave(); // required for proper stackwalking of RuntimeStub frame 5311 __ ret(0); 5312 5313 return start; 5314 } 5315 5316 address generate_libmExp() { 5317 StubCodeMark mark(this, "StubRoutines", "libmExp"); 5318 5319 address start = __ pc(); 5320 5321 const XMMRegister x0 = xmm0; 5322 const XMMRegister x1 = xmm1; 5323 const XMMRegister x2 = xmm2; 5324 const XMMRegister x3 = xmm3; 5325 5326 const XMMRegister x4 = xmm4; 5327 const XMMRegister x5 = xmm5; 5328 const XMMRegister x6 = xmm6; 5329 const XMMRegister x7 = xmm7; 5330 5331 const Register tmp = r11; 5332 5333 BLOCK_COMMENT("Entry:"); 5334 __ enter(); // required for proper stackwalking of RuntimeStub frame 5335 5336 __ fast_exp(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp); 5337 5338 __ leave(); // required for proper stackwalking of RuntimeStub frame 5339 __ ret(0); 5340 5341 return start; 5342 5343 } 5344 5345 address generate_libmLog() { 5346 StubCodeMark mark(this, "StubRoutines", "libmLog"); 5347 5348 address start = __ pc(); 5349 5350 const XMMRegister x0 = xmm0; 5351 const XMMRegister x1 = xmm1; 5352 const XMMRegister x2 = xmm2; 5353 const XMMRegister x3 = xmm3; 5354 5355 const XMMRegister x4 = xmm4; 5356 const XMMRegister x5 = xmm5; 5357 const XMMRegister x6 = xmm6; 5358 const XMMRegister x7 = xmm7; 5359 5360 const Register tmp1 = r11; 5361 const Register tmp2 = r8; 5362 5363 BLOCK_COMMENT("Entry:"); 5364 __ enter(); // required for proper stackwalking of RuntimeStub frame 5365 5366 __ fast_log(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2); 5367 5368 __ leave(); // required for proper stackwalking of RuntimeStub frame 5369 __ ret(0); 5370 5371 return start; 5372 5373 } 5374 5375 address generate_libmLog10() { 5376 StubCodeMark mark(this, "StubRoutines", "libmLog10"); 5377 5378 address start = __ pc(); 5379 5380 const XMMRegister x0 = xmm0; 5381 const XMMRegister x1 = xmm1; 5382 const XMMRegister x2 = xmm2; 5383 const XMMRegister x3 = xmm3; 5384 5385 const XMMRegister x4 = xmm4; 5386 const XMMRegister x5 = xmm5; 5387 const XMMRegister x6 = xmm6; 5388 const XMMRegister x7 = xmm7; 5389 5390 const Register tmp = r11; 5391 5392 BLOCK_COMMENT("Entry:"); 5393 __ enter(); // required for proper stackwalking of RuntimeStub frame 5394 5395 __ fast_log10(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp); 5396 5397 __ leave(); // required for proper stackwalking of RuntimeStub frame 5398 __ ret(0); 5399 5400 return start; 5401 5402 } 5403 5404 address generate_libmPow() { 5405 StubCodeMark mark(this, "StubRoutines", "libmPow"); 5406 5407 address start = __ pc(); 5408 5409 const XMMRegister x0 = xmm0; 5410 const XMMRegister x1 = xmm1; 5411 const XMMRegister x2 = xmm2; 5412 const XMMRegister x3 = xmm3; 5413 5414 const XMMRegister x4 = xmm4; 5415 const XMMRegister x5 = xmm5; 5416 const XMMRegister x6 = xmm6; 5417 const XMMRegister x7 = xmm7; 5418 5419 const Register tmp1 = r8; 5420 const Register tmp2 = r9; 5421 const Register tmp3 = r10; 5422 const Register tmp4 = r11; 5423 5424 BLOCK_COMMENT("Entry:"); 5425 __ enter(); // required for proper stackwalking of RuntimeStub frame 5426 5427 __ fast_pow(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 5428 5429 __ leave(); // required for proper stackwalking of RuntimeStub frame 5430 __ ret(0); 5431 5432 return start; 5433 5434 } 5435 5436 address generate_libmSin() { 5437 StubCodeMark mark(this, "StubRoutines", "libmSin"); 5438 5439 address start = __ pc(); 5440 5441 const XMMRegister x0 = xmm0; 5442 const XMMRegister x1 = xmm1; 5443 const XMMRegister x2 = xmm2; 5444 const XMMRegister x3 = xmm3; 5445 5446 const XMMRegister x4 = xmm4; 5447 const XMMRegister x5 = xmm5; 5448 const XMMRegister x6 = xmm6; 5449 const XMMRegister x7 = xmm7; 5450 5451 const Register tmp1 = r8; 5452 const Register tmp2 = r9; 5453 const Register tmp3 = r10; 5454 const Register tmp4 = r11; 5455 5456 BLOCK_COMMENT("Entry:"); 5457 __ enter(); // required for proper stackwalking of RuntimeStub frame 5458 5459 #ifdef _WIN64 5460 __ push(rsi); 5461 __ push(rdi); 5462 #endif 5463 __ fast_sin(x0, x1, x2, x3, x4, x5, x6, x7, rax, rbx, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 5464 5465 #ifdef _WIN64 5466 __ pop(rdi); 5467 __ pop(rsi); 5468 #endif 5469 5470 __ leave(); // required for proper stackwalking of RuntimeStub frame 5471 __ ret(0); 5472 5473 return start; 5474 5475 } 5476 5477 address generate_libmCos() { 5478 StubCodeMark mark(this, "StubRoutines", "libmCos"); 5479 5480 address start = __ pc(); 5481 5482 const XMMRegister x0 = xmm0; 5483 const XMMRegister x1 = xmm1; 5484 const XMMRegister x2 = xmm2; 5485 const XMMRegister x3 = xmm3; 5486 5487 const XMMRegister x4 = xmm4; 5488 const XMMRegister x5 = xmm5; 5489 const XMMRegister x6 = xmm6; 5490 const XMMRegister x7 = xmm7; 5491 5492 const Register tmp1 = r8; 5493 const Register tmp2 = r9; 5494 const Register tmp3 = r10; 5495 const Register tmp4 = r11; 5496 5497 BLOCK_COMMENT("Entry:"); 5498 __ enter(); // required for proper stackwalking of RuntimeStub frame 5499 5500 #ifdef _WIN64 5501 __ push(rsi); 5502 __ push(rdi); 5503 #endif 5504 __ fast_cos(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 5505 5506 #ifdef _WIN64 5507 __ pop(rdi); 5508 __ pop(rsi); 5509 #endif 5510 5511 __ leave(); // required for proper stackwalking of RuntimeStub frame 5512 __ ret(0); 5513 5514 return start; 5515 5516 } 5517 5518 address generate_libmTan() { 5519 StubCodeMark mark(this, "StubRoutines", "libmTan"); 5520 5521 address start = __ pc(); 5522 5523 const XMMRegister x0 = xmm0; 5524 const XMMRegister x1 = xmm1; 5525 const XMMRegister x2 = xmm2; 5526 const XMMRegister x3 = xmm3; 5527 5528 const XMMRegister x4 = xmm4; 5529 const XMMRegister x5 = xmm5; 5530 const XMMRegister x6 = xmm6; 5531 const XMMRegister x7 = xmm7; 5532 5533 const Register tmp1 = r8; 5534 const Register tmp2 = r9; 5535 const Register tmp3 = r10; 5536 const Register tmp4 = r11; 5537 5538 BLOCK_COMMENT("Entry:"); 5539 __ enter(); // required for proper stackwalking of RuntimeStub frame 5540 5541 #ifdef _WIN64 5542 __ push(rsi); 5543 __ push(rdi); 5544 #endif 5545 __ fast_tan(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 5546 5547 #ifdef _WIN64 5548 __ pop(rdi); 5549 __ pop(rsi); 5550 #endif 5551 5552 __ leave(); // required for proper stackwalking of RuntimeStub frame 5553 __ ret(0); 5554 5555 return start; 5556 5557 } 5558 5559 #undef __ 5560 #define __ masm-> 5561 5562 // Continuation point for throwing of implicit exceptions that are 5563 // not handled in the current activation. Fabricates an exception 5564 // oop and initiates normal exception dispatching in this 5565 // frame. Since we need to preserve callee-saved values (currently 5566 // only for C2, but done for C1 as well) we need a callee-saved oop 5567 // map and therefore have to make these stubs into RuntimeStubs 5568 // rather than BufferBlobs. If the compiler needs all registers to 5569 // be preserved between the fault point and the exception handler 5570 // then it must assume responsibility for that in 5571 // AbstractCompiler::continuation_for_implicit_null_exception or 5572 // continuation_for_implicit_division_by_zero_exception. All other 5573 // implicit exceptions (e.g., NullPointerException or 5574 // AbstractMethodError on entry) are either at call sites or 5575 // otherwise assume that stack unwinding will be initiated, so 5576 // caller saved registers were assumed volatile in the compiler. 5577 address generate_throw_exception(const char* name, 5578 address runtime_entry, 5579 Register arg1 = noreg, 5580 Register arg2 = noreg) { 5581 // Information about frame layout at time of blocking runtime call. 5582 // Note that we only have to preserve callee-saved registers since 5583 // the compilers are responsible for supplying a continuation point 5584 // if they expect all registers to be preserved. 5585 enum layout { 5586 rbp_off = frame::arg_reg_save_area_bytes/BytesPerInt, 5587 rbp_off2, 5588 return_off, 5589 return_off2, 5590 framesize // inclusive of return address 5591 }; 5592 5593 int insts_size = 512; 5594 int locs_size = 64; 5595 5596 CodeBuffer code(name, insts_size, locs_size); 5597 OopMapSet* oop_maps = new OopMapSet(); 5598 MacroAssembler* masm = new MacroAssembler(&code); 5599 5600 address start = __ pc(); 5601 5602 // This is an inlined and slightly modified version of call_VM 5603 // which has the ability to fetch the return PC out of 5604 // thread-local storage and also sets up last_Java_sp slightly 5605 // differently than the real call_VM 5606 5607 __ enter(); // required for proper stackwalking of RuntimeStub frame 5608 5609 assert(is_even(framesize/2), "sp not 16-byte aligned"); 5610 5611 // return address and rbp are already in place 5612 __ subptr(rsp, (framesize-4) << LogBytesPerInt); // prolog 5613 5614 int frame_complete = __ pc() - start; 5615 5616 // Set up last_Java_sp and last_Java_fp 5617 address the_pc = __ pc(); 5618 __ set_last_Java_frame(rsp, rbp, the_pc); 5619 __ andptr(rsp, -(StackAlignmentInBytes)); // Align stack 5620 5621 // Call runtime 5622 if (arg1 != noreg) { 5623 assert(arg2 != c_rarg1, "clobbered"); 5624 __ movptr(c_rarg1, arg1); 5625 } 5626 if (arg2 != noreg) { 5627 __ movptr(c_rarg2, arg2); 5628 } 5629 __ movptr(c_rarg0, r15_thread); 5630 BLOCK_COMMENT("call runtime_entry"); 5631 __ call(RuntimeAddress(runtime_entry)); 5632 5633 // Generate oop map 5634 OopMap* map = new OopMap(framesize, 0); 5635 5636 oop_maps->add_gc_map(the_pc - start, map); 5637 5638 __ reset_last_Java_frame(true); 5639 5640 __ leave(); // required for proper stackwalking of RuntimeStub frame 5641 5642 // check for pending exceptions 5643 #ifdef ASSERT 5644 Label L; 5645 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), 5646 (int32_t) NULL_WORD); 5647 __ jcc(Assembler::notEqual, L); 5648 __ should_not_reach_here(); 5649 __ bind(L); 5650 #endif // ASSERT 5651 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 5652 5653 5654 // codeBlob framesize is in words (not VMRegImpl::slot_size) 5655 RuntimeStub* stub = 5656 RuntimeStub::new_runtime_stub(name, 5657 &code, 5658 frame_complete, 5659 (framesize >> (LogBytesPerWord - LogBytesPerInt)), 5660 oop_maps, false); 5661 return stub->entry_point(); 5662 } 5663 5664 void create_control_words() { 5665 // Round to nearest, 53-bit mode, exceptions masked 5666 StubRoutines::_fpu_cntrl_wrd_std = 0x027F; 5667 // Round to zero, 53-bit mode, exception mased 5668 StubRoutines::_fpu_cntrl_wrd_trunc = 0x0D7F; 5669 // Round to nearest, 24-bit mode, exceptions masked 5670 StubRoutines::_fpu_cntrl_wrd_24 = 0x007F; 5671 // Round to nearest, 64-bit mode, exceptions masked 5672 StubRoutines::_fpu_cntrl_wrd_64 = 0x037F; 5673 // Round to nearest, 64-bit mode, exceptions masked 5674 StubRoutines::_mxcsr_std = 0x1F80; 5675 // Note: the following two constants are 80-bit values 5676 // layout is critical for correct loading by FPU. 5677 // Bias for strict fp multiply/divide 5678 StubRoutines::_fpu_subnormal_bias1[0]= 0x00000000; // 2^(-15360) == 0x03ff 8000 0000 0000 0000 5679 StubRoutines::_fpu_subnormal_bias1[1]= 0x80000000; 5680 StubRoutines::_fpu_subnormal_bias1[2]= 0x03ff; 5681 // Un-Bias for strict fp multiply/divide 5682 StubRoutines::_fpu_subnormal_bias2[0]= 0x00000000; // 2^(+15360) == 0x7bff 8000 0000 0000 0000 5683 StubRoutines::_fpu_subnormal_bias2[1]= 0x80000000; 5684 StubRoutines::_fpu_subnormal_bias2[2]= 0x7bff; 5685 } 5686 5687 // Initialization 5688 void generate_initial() { 5689 // Generates all stubs and initializes the entry points 5690 5691 // This platform-specific settings are needed by generate_call_stub() 5692 create_control_words(); 5693 5694 // entry points that exist in all platforms Note: This is code 5695 // that could be shared among different platforms - however the 5696 // benefit seems to be smaller than the disadvantage of having a 5697 // much more complicated generator structure. See also comment in 5698 // stubRoutines.hpp. 5699 5700 StubRoutines::_forward_exception_entry = generate_forward_exception(); 5701 5702 StubRoutines::_call_stub_entry = 5703 generate_call_stub(StubRoutines::_call_stub_return_address); 5704 5705 // is referenced by megamorphic call 5706 StubRoutines::_catch_exception_entry = generate_catch_exception(); 5707 5708 // atomic calls 5709 StubRoutines::_atomic_xchg_entry = generate_atomic_xchg(); 5710 StubRoutines::_atomic_xchg_long_entry = generate_atomic_xchg_long(); 5711 StubRoutines::_atomic_cmpxchg_entry = generate_atomic_cmpxchg(); 5712 StubRoutines::_atomic_cmpxchg_byte_entry = generate_atomic_cmpxchg_byte(); 5713 StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long(); 5714 StubRoutines::_atomic_add_entry = generate_atomic_add(); 5715 StubRoutines::_atomic_add_long_entry = generate_atomic_add_long(); 5716 StubRoutines::_fence_entry = generate_orderaccess_fence(); 5717 5718 // platform dependent 5719 StubRoutines::x86::_get_previous_fp_entry = generate_get_previous_fp(); 5720 StubRoutines::x86::_get_previous_sp_entry = generate_get_previous_sp(); 5721 5722 StubRoutines::x86::_verify_mxcsr_entry = generate_verify_mxcsr(); 5723 5724 // Build this early so it's available for the interpreter. 5725 StubRoutines::_throw_StackOverflowError_entry = 5726 generate_throw_exception("StackOverflowError throw_exception", 5727 CAST_FROM_FN_PTR(address, 5728 SharedRuntime:: 5729 throw_StackOverflowError)); 5730 StubRoutines::_throw_delayed_StackOverflowError_entry = 5731 generate_throw_exception("delayed StackOverflowError throw_exception", 5732 CAST_FROM_FN_PTR(address, 5733 SharedRuntime:: 5734 throw_delayed_StackOverflowError)); 5735 if (UseCRC32Intrinsics) { 5736 // set table address before stub generation which use it 5737 StubRoutines::_crc_table_adr = (address)StubRoutines::x86::_crc_table; 5738 StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32(); 5739 } 5740 5741 if (UseCRC32CIntrinsics) { 5742 bool supports_clmul = VM_Version::supports_clmul(); 5743 StubRoutines::x86::generate_CRC32C_table(supports_clmul); 5744 StubRoutines::_crc32c_table_addr = (address)StubRoutines::x86::_crc32c_table; 5745 StubRoutines::_updateBytesCRC32C = generate_updateBytesCRC32C(supports_clmul); 5746 } 5747 if (VM_Version::supports_sse2() && UseLibmIntrinsic && InlineIntrinsics) { 5748 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin) || 5749 vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos) || 5750 vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dtan)) { 5751 StubRoutines::x86::_ONEHALF_adr = (address)StubRoutines::x86::_ONEHALF; 5752 StubRoutines::x86::_P_2_adr = (address)StubRoutines::x86::_P_2; 5753 StubRoutines::x86::_SC_4_adr = (address)StubRoutines::x86::_SC_4; 5754 StubRoutines::x86::_Ctable_adr = (address)StubRoutines::x86::_Ctable; 5755 StubRoutines::x86::_SC_2_adr = (address)StubRoutines::x86::_SC_2; 5756 StubRoutines::x86::_SC_3_adr = (address)StubRoutines::x86::_SC_3; 5757 StubRoutines::x86::_SC_1_adr = (address)StubRoutines::x86::_SC_1; 5758 StubRoutines::x86::_PI_INV_TABLE_adr = (address)StubRoutines::x86::_PI_INV_TABLE; 5759 StubRoutines::x86::_PI_4_adr = (address)StubRoutines::x86::_PI_4; 5760 StubRoutines::x86::_PI32INV_adr = (address)StubRoutines::x86::_PI32INV; 5761 StubRoutines::x86::_SIGN_MASK_adr = (address)StubRoutines::x86::_SIGN_MASK; 5762 StubRoutines::x86::_P_1_adr = (address)StubRoutines::x86::_P_1; 5763 StubRoutines::x86::_P_3_adr = (address)StubRoutines::x86::_P_3; 5764 StubRoutines::x86::_NEG_ZERO_adr = (address)StubRoutines::x86::_NEG_ZERO; 5765 } 5766 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dexp)) { 5767 StubRoutines::_dexp = generate_libmExp(); 5768 } 5769 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dlog)) { 5770 StubRoutines::_dlog = generate_libmLog(); 5771 } 5772 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dlog10)) { 5773 StubRoutines::_dlog10 = generate_libmLog10(); 5774 } 5775 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dpow)) { 5776 StubRoutines::_dpow = generate_libmPow(); 5777 } 5778 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin)) { 5779 StubRoutines::_dsin = generate_libmSin(); 5780 } 5781 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos)) { 5782 StubRoutines::_dcos = generate_libmCos(); 5783 } 5784 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dtan)) { 5785 StubRoutines::_dtan = generate_libmTan(); 5786 } 5787 } 5788 } 5789 5790 void generate_all() { 5791 // Generates all stubs and initializes the entry points 5792 5793 // These entry points require SharedInfo::stack0 to be set up in 5794 // non-core builds and need to be relocatable, so they each 5795 // fabricate a RuntimeStub internally. 5796 StubRoutines::_throw_AbstractMethodError_entry = 5797 generate_throw_exception("AbstractMethodError throw_exception", 5798 CAST_FROM_FN_PTR(address, 5799 SharedRuntime:: 5800 throw_AbstractMethodError)); 5801 5802 StubRoutines::_throw_IncompatibleClassChangeError_entry = 5803 generate_throw_exception("IncompatibleClassChangeError throw_exception", 5804 CAST_FROM_FN_PTR(address, 5805 SharedRuntime:: 5806 throw_IncompatibleClassChangeError)); 5807 5808 StubRoutines::_throw_NullPointerException_at_call_entry = 5809 generate_throw_exception("NullPointerException at call throw_exception", 5810 CAST_FROM_FN_PTR(address, 5811 SharedRuntime:: 5812 throw_NullPointerException_at_call)); 5813 5814 // entry points that are platform specific 5815 StubRoutines::x86::_f2i_fixup = generate_f2i_fixup(); 5816 StubRoutines::x86::_f2l_fixup = generate_f2l_fixup(); 5817 StubRoutines::x86::_d2i_fixup = generate_d2i_fixup(); 5818 StubRoutines::x86::_d2l_fixup = generate_d2l_fixup(); 5819 5820 StubRoutines::x86::_float_sign_mask = generate_fp_mask("float_sign_mask", 0x7FFFFFFF7FFFFFFF); 5821 StubRoutines::x86::_float_sign_flip = generate_fp_mask("float_sign_flip", 0x8000000080000000); 5822 StubRoutines::x86::_double_sign_mask = generate_fp_mask("double_sign_mask", 0x7FFFFFFFFFFFFFFF); 5823 StubRoutines::x86::_double_sign_flip = generate_fp_mask("double_sign_flip", 0x8000000000000000); 5824 5825 // support for verify_oop (must happen after universe_init) 5826 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop(); 5827 5828 // arraycopy stubs used by compilers 5829 generate_arraycopy_stubs(); 5830 5831 // don't bother generating these AES intrinsic stubs unless global flag is set 5832 if (UseAESIntrinsics) { 5833 StubRoutines::x86::_key_shuffle_mask_addr = generate_key_shuffle_mask(); // needed by the others 5834 StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock(); 5835 StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock(); 5836 StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt(); 5837 if (VM_Version::supports_vaes() && VM_Version::supports_avx512vl() && VM_Version::supports_avx512dq() ) { 5838 StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptVectorAESCrypt(); 5839 } else { 5840 StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt_Parallel(); 5841 } 5842 } 5843 if (UseAESCTRIntrinsics){ 5844 StubRoutines::x86::_counter_shuffle_mask_addr = generate_counter_shuffle_mask(); 5845 StubRoutines::_counterMode_AESCrypt = generate_counterMode_AESCrypt_Parallel(); 5846 } 5847 5848 if (UseSHA1Intrinsics) { 5849 StubRoutines::x86::_upper_word_mask_addr = generate_upper_word_mask(); 5850 StubRoutines::x86::_shuffle_byte_flip_mask_addr = generate_shuffle_byte_flip_mask(); 5851 StubRoutines::_sha1_implCompress = generate_sha1_implCompress(false, "sha1_implCompress"); 5852 StubRoutines::_sha1_implCompressMB = generate_sha1_implCompress(true, "sha1_implCompressMB"); 5853 } 5854 if (UseSHA256Intrinsics) { 5855 StubRoutines::x86::_k256_adr = (address)StubRoutines::x86::_k256; 5856 char* dst = (char*)StubRoutines::x86::_k256_W; 5857 char* src = (char*)StubRoutines::x86::_k256; 5858 for (int ii = 0; ii < 16; ++ii) { 5859 memcpy(dst + 32 * ii, src + 16 * ii, 16); 5860 memcpy(dst + 32 * ii + 16, src + 16 * ii, 16); 5861 } 5862 StubRoutines::x86::_k256_W_adr = (address)StubRoutines::x86::_k256_W; 5863 StubRoutines::x86::_pshuffle_byte_flip_mask_addr = generate_pshuffle_byte_flip_mask(); 5864 StubRoutines::_sha256_implCompress = generate_sha256_implCompress(false, "sha256_implCompress"); 5865 StubRoutines::_sha256_implCompressMB = generate_sha256_implCompress(true, "sha256_implCompressMB"); 5866 } 5867 if (UseSHA512Intrinsics) { 5868 StubRoutines::x86::_k512_W_addr = (address)StubRoutines::x86::_k512_W; 5869 StubRoutines::x86::_pshuffle_byte_flip_mask_addr_sha512 = generate_pshuffle_byte_flip_mask_sha512(); 5870 StubRoutines::_sha512_implCompress = generate_sha512_implCompress(false, "sha512_implCompress"); 5871 StubRoutines::_sha512_implCompressMB = generate_sha512_implCompress(true, "sha512_implCompressMB"); 5872 } 5873 5874 // Generate GHASH intrinsics code 5875 if (UseGHASHIntrinsics) { 5876 StubRoutines::x86::_ghash_long_swap_mask_addr = generate_ghash_long_swap_mask(); 5877 StubRoutines::x86::_ghash_byte_swap_mask_addr = generate_ghash_byte_swap_mask(); 5878 StubRoutines::_ghash_processBlocks = generate_ghash_processBlocks(); 5879 } 5880 5881 if (UseBASE64Intrinsics) { 5882 StubRoutines::x86::_and_mask = base64_and_mask_addr(); 5883 StubRoutines::x86::_bswap_mask = base64_bswap_mask_addr(); 5884 StubRoutines::x86::_base64_charset = base64_charset_addr(); 5885 StubRoutines::x86::_url_charset = base64url_charset_addr(); 5886 StubRoutines::x86::_gather_mask = base64_gather_mask_addr(); 5887 StubRoutines::x86::_left_shift_mask = base64_left_shift_mask_addr(); 5888 StubRoutines::x86::_right_shift_mask = base64_right_shift_mask_addr(); 5889 StubRoutines::_base64_encodeBlock = generate_base64_encodeBlock(); 5890 } 5891 5892 // Safefetch stubs. 5893 generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry, 5894 &StubRoutines::_safefetch32_fault_pc, 5895 &StubRoutines::_safefetch32_continuation_pc); 5896 generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry, 5897 &StubRoutines::_safefetchN_fault_pc, 5898 &StubRoutines::_safefetchN_continuation_pc); 5899 #ifdef COMPILER2 5900 if (UseMultiplyToLenIntrinsic) { 5901 StubRoutines::_multiplyToLen = generate_multiplyToLen(); 5902 } 5903 if (UseSquareToLenIntrinsic) { 5904 StubRoutines::_squareToLen = generate_squareToLen(); 5905 } 5906 if (UseMulAddIntrinsic) { 5907 StubRoutines::_mulAdd = generate_mulAdd(); 5908 } 5909 #ifndef _WINDOWS 5910 if (UseMontgomeryMultiplyIntrinsic) { 5911 StubRoutines::_montgomeryMultiply 5912 = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_multiply); 5913 } 5914 if (UseMontgomerySquareIntrinsic) { 5915 StubRoutines::_montgomerySquare 5916 = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_square); 5917 } 5918 #endif // WINDOWS 5919 #endif // COMPILER2 5920 5921 if (UseVectorizedMismatchIntrinsic) { 5922 StubRoutines::_vectorizedMismatch = generate_vectorizedMismatch(); 5923 } 5924 } 5925 5926 public: 5927 StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) { 5928 if (all) { 5929 generate_all(); 5930 } else { 5931 generate_initial(); 5932 } 5933 } 5934 }; // end class declaration 5935 5936 void StubGenerator_generate(CodeBuffer* code, bool all) { 5937 StubGenerator g(code, all); 5938 }