1 /* 2 * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "ci/ciUtilities.hpp" 29 #include "gc/shared/barrierSet.hpp" 30 #include "gc/shared/barrierSetAssembler.hpp" 31 #include "gc/shared/barrierSetNMethod.hpp" 32 #include "interpreter/interpreter.hpp" 33 #include "nativeInst_x86.hpp" 34 #include "oops/instanceOop.hpp" 35 #include "oops/method.hpp" 36 #include "oops/objArrayKlass.hpp" 37 #include "oops/oop.inline.hpp" 38 #include "prims/methodHandles.hpp" 39 #include "runtime/frame.inline.hpp" 40 #include "runtime/handles.inline.hpp" 41 #include "runtime/sharedRuntime.hpp" 42 #include "runtime/stubCodeGenerator.hpp" 43 #include "runtime/stubRoutines.hpp" 44 #include "runtime/thread.inline.hpp" 45 #ifdef COMPILER2 46 #include "opto/runtime.hpp" 47 #endif 48 #if INCLUDE_ZGC 49 #include "gc/z/zThreadLocalData.hpp" 50 #endif 51 52 // Declaration and definition of StubGenerator (no .hpp file). 53 // For a more detailed description of the stub routine structure 54 // see the comment in stubRoutines.hpp 55 56 #define __ _masm-> 57 #define TIMES_OOP (UseCompressedOops ? Address::times_4 : Address::times_8) 58 #define a__ ((Assembler*)_masm)-> 59 60 #ifdef PRODUCT 61 #define BLOCK_COMMENT(str) /* nothing */ 62 #else 63 #define BLOCK_COMMENT(str) __ block_comment(str) 64 #endif 65 66 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 67 const int MXCSR_MASK = 0xFFC0; // Mask out any pending exceptions 68 69 // Stub Code definitions 70 71 class StubGenerator: public StubCodeGenerator { 72 private: 73 74 #ifdef PRODUCT 75 #define inc_counter_np(counter) ((void)0) 76 #else 77 void inc_counter_np_(int& counter) { 78 // This can destroy rscratch1 if counter is far from the code cache 79 __ incrementl(ExternalAddress((address)&counter)); 80 } 81 #define inc_counter_np(counter) \ 82 BLOCK_COMMENT("inc_counter " #counter); \ 83 inc_counter_np_(counter); 84 #endif 85 86 // Call stubs are used to call Java from C 87 // 88 // Linux Arguments: 89 // c_rarg0: call wrapper address address 90 // c_rarg1: result address 91 // c_rarg2: result type BasicType 92 // c_rarg3: method Method* 93 // c_rarg4: (interpreter) entry point address 94 // c_rarg5: parameters intptr_t* 95 // 16(rbp): parameter size (in words) int 96 // 24(rbp): thread Thread* 97 // 98 // [ return_from_Java ] <--- rsp 99 // [ argument word n ] 100 // ... 101 // -12 [ argument word 1 ] 102 // -11 [ saved r15 ] <--- rsp_after_call 103 // -10 [ saved r14 ] 104 // -9 [ saved r13 ] 105 // -8 [ saved r12 ] 106 // -7 [ saved rbx ] 107 // -6 [ call wrapper ] 108 // -5 [ result ] 109 // -4 [ result type ] 110 // -3 [ method ] 111 // -2 [ entry point ] 112 // -1 [ parameters ] 113 // 0 [ saved rbp ] <--- rbp 114 // 1 [ return address ] 115 // 2 [ parameter size ] 116 // 3 [ thread ] 117 // 118 // Windows Arguments: 119 // c_rarg0: call wrapper address address 120 // c_rarg1: result address 121 // c_rarg2: result type BasicType 122 // c_rarg3: method Method* 123 // 48(rbp): (interpreter) entry point address 124 // 56(rbp): parameters intptr_t* 125 // 64(rbp): parameter size (in words) int 126 // 72(rbp): thread Thread* 127 // 128 // [ return_from_Java ] <--- rsp 129 // [ argument word n ] 130 // ... 131 // -60 [ argument word 1 ] 132 // -59 [ saved xmm31 ] <--- rsp after_call 133 // [ saved xmm16-xmm30 ] (EVEX enabled, else the space is blank) 134 // -27 [ saved xmm15 ] 135 // [ saved xmm7-xmm14 ] 136 // -9 [ saved xmm6 ] (each xmm register takes 2 slots) 137 // -7 [ saved r15 ] 138 // -6 [ saved r14 ] 139 // -5 [ saved r13 ] 140 // -4 [ saved r12 ] 141 // -3 [ saved rdi ] 142 // -2 [ saved rsi ] 143 // -1 [ saved rbx ] 144 // 0 [ saved rbp ] <--- rbp 145 // 1 [ return address ] 146 // 2 [ call wrapper ] 147 // 3 [ result ] 148 // 4 [ result type ] 149 // 5 [ method ] 150 // 6 [ entry point ] 151 // 7 [ parameters ] 152 // 8 [ parameter size ] 153 // 9 [ thread ] 154 // 155 // Windows reserves the callers stack space for arguments 1-4. 156 // We spill c_rarg0-c_rarg3 to this space. 157 158 // Call stub stack layout word offsets from rbp 159 enum call_stub_layout { 160 #ifdef _WIN64 161 xmm_save_first = 6, // save from xmm6 162 xmm_save_last = 31, // to xmm31 163 xmm_save_base = -9, 164 rsp_after_call_off = xmm_save_base - 2 * (xmm_save_last - xmm_save_first), // -27 165 r15_off = -7, 166 r14_off = -6, 167 r13_off = -5, 168 r12_off = -4, 169 rdi_off = -3, 170 rsi_off = -2, 171 rbx_off = -1, 172 rbp_off = 0, 173 retaddr_off = 1, 174 call_wrapper_off = 2, 175 result_off = 3, 176 result_type_off = 4, 177 method_off = 5, 178 entry_point_off = 6, 179 parameters_off = 7, 180 parameter_size_off = 8, 181 thread_off = 9 182 #else 183 rsp_after_call_off = -12, 184 mxcsr_off = rsp_after_call_off, 185 r15_off = -11, 186 r14_off = -10, 187 r13_off = -9, 188 r12_off = -8, 189 rbx_off = -7, 190 call_wrapper_off = -6, 191 result_off = -5, 192 result_type_off = -4, 193 method_off = -3, 194 entry_point_off = -2, 195 parameters_off = -1, 196 rbp_off = 0, 197 retaddr_off = 1, 198 parameter_size_off = 2, 199 thread_off = 3 200 #endif 201 }; 202 203 #ifdef _WIN64 204 Address xmm_save(int reg) { 205 assert(reg >= xmm_save_first && reg <= xmm_save_last, "XMM register number out of range"); 206 return Address(rbp, (xmm_save_base - (reg - xmm_save_first) * 2) * wordSize); 207 } 208 #endif 209 210 address generate_call_stub(address& return_address) { 211 assert((int)frame::entry_frame_after_call_words == -(int)rsp_after_call_off + 1 && 212 (int)frame::entry_frame_call_wrapper_offset == (int)call_wrapper_off, 213 "adjust this code"); 214 StubCodeMark mark(this, "StubRoutines", "call_stub"); 215 address start = __ pc(); 216 217 // same as in generate_catch_exception()! 218 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize); 219 220 const Address call_wrapper (rbp, call_wrapper_off * wordSize); 221 const Address result (rbp, result_off * wordSize); 222 const Address result_type (rbp, result_type_off * wordSize); 223 const Address method (rbp, method_off * wordSize); 224 const Address entry_point (rbp, entry_point_off * wordSize); 225 const Address parameters (rbp, parameters_off * wordSize); 226 const Address parameter_size(rbp, parameter_size_off * wordSize); 227 228 // same as in generate_catch_exception()! 229 const Address thread (rbp, thread_off * wordSize); 230 231 const Address r15_save(rbp, r15_off * wordSize); 232 const Address r14_save(rbp, r14_off * wordSize); 233 const Address r13_save(rbp, r13_off * wordSize); 234 const Address r12_save(rbp, r12_off * wordSize); 235 const Address rbx_save(rbp, rbx_off * wordSize); 236 237 // stub code 238 __ enter(); 239 __ subptr(rsp, -rsp_after_call_off * wordSize); 240 241 // save register parameters 242 #ifndef _WIN64 243 __ movptr(parameters, c_rarg5); // parameters 244 __ movptr(entry_point, c_rarg4); // entry_point 245 #endif 246 247 __ movptr(method, c_rarg3); // method 248 __ movl(result_type, c_rarg2); // result type 249 __ movptr(result, c_rarg1); // result 250 __ movptr(call_wrapper, c_rarg0); // call wrapper 251 252 // save regs belonging to calling function 253 __ movptr(rbx_save, rbx); 254 __ movptr(r12_save, r12); 255 __ movptr(r13_save, r13); 256 __ movptr(r14_save, r14); 257 __ movptr(r15_save, r15); 258 259 #ifdef _WIN64 260 int last_reg = 15; 261 if (UseAVX > 2) { 262 last_reg = 31; 263 } 264 if (VM_Version::supports_evex()) { 265 for (int i = xmm_save_first; i <= last_reg; i++) { 266 __ vextractf32x4(xmm_save(i), as_XMMRegister(i), 0); 267 } 268 } else { 269 for (int i = xmm_save_first; i <= last_reg; i++) { 270 __ movdqu(xmm_save(i), as_XMMRegister(i)); 271 } 272 } 273 274 const Address rdi_save(rbp, rdi_off * wordSize); 275 const Address rsi_save(rbp, rsi_off * wordSize); 276 277 __ movptr(rsi_save, rsi); 278 __ movptr(rdi_save, rdi); 279 #else 280 const Address mxcsr_save(rbp, mxcsr_off * wordSize); 281 { 282 Label skip_ldmx; 283 __ stmxcsr(mxcsr_save); 284 __ movl(rax, mxcsr_save); 285 __ andl(rax, MXCSR_MASK); // Only check control and mask bits 286 ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std()); 287 __ cmp32(rax, mxcsr_std); 288 __ jcc(Assembler::equal, skip_ldmx); 289 __ ldmxcsr(mxcsr_std); 290 __ bind(skip_ldmx); 291 } 292 #endif 293 294 // Load up thread register 295 __ movptr(r15_thread, thread); 296 __ reinit_heapbase(); 297 298 #ifdef ASSERT 299 // make sure we have no pending exceptions 300 { 301 Label L; 302 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 303 __ jcc(Assembler::equal, L); 304 __ stop("StubRoutines::call_stub: entered with pending exception"); 305 __ bind(L); 306 } 307 #endif 308 309 // pass parameters if any 310 BLOCK_COMMENT("pass parameters if any"); 311 Label parameters_done; 312 __ movl(c_rarg3, parameter_size); 313 __ testl(c_rarg3, c_rarg3); 314 __ jcc(Assembler::zero, parameters_done); 315 316 Label loop; 317 __ movptr(c_rarg2, parameters); // parameter pointer 318 __ movl(c_rarg1, c_rarg3); // parameter counter is in c_rarg1 319 __ BIND(loop); 320 __ movptr(rax, Address(c_rarg2, 0));// get parameter 321 __ addptr(c_rarg2, wordSize); // advance to next parameter 322 __ decrementl(c_rarg1); // decrement counter 323 __ push(rax); // pass parameter 324 __ jcc(Assembler::notZero, loop); 325 326 // call Java function 327 __ BIND(parameters_done); 328 __ movptr(rbx, method); // get Method* 329 __ movptr(c_rarg1, entry_point); // get entry_point 330 __ mov(r13, rsp); // set sender sp 331 BLOCK_COMMENT("call Java function"); 332 __ call(c_rarg1); 333 334 BLOCK_COMMENT("call_stub_return_address:"); 335 return_address = __ pc(); 336 337 // store result depending on type (everything that is not 338 // T_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT) 339 __ movptr(c_rarg0, result); 340 Label is_long, is_float, is_double, exit; 341 __ movl(c_rarg1, result_type); 342 __ cmpl(c_rarg1, T_OBJECT); 343 __ jcc(Assembler::equal, is_long); 344 __ cmpl(c_rarg1, T_LONG); 345 __ jcc(Assembler::equal, is_long); 346 __ cmpl(c_rarg1, T_FLOAT); 347 __ jcc(Assembler::equal, is_float); 348 __ cmpl(c_rarg1, T_DOUBLE); 349 __ jcc(Assembler::equal, is_double); 350 351 // handle T_INT case 352 __ movl(Address(c_rarg0, 0), rax); 353 354 __ BIND(exit); 355 356 // pop parameters 357 __ lea(rsp, rsp_after_call); 358 359 #ifdef ASSERT 360 // verify that threads correspond 361 { 362 Label L1, L2, L3; 363 __ cmpptr(r15_thread, thread); 364 __ jcc(Assembler::equal, L1); 365 __ stop("StubRoutines::call_stub: r15_thread is corrupted"); 366 __ bind(L1); 367 __ get_thread(rbx); 368 __ cmpptr(r15_thread, thread); 369 __ jcc(Assembler::equal, L2); 370 __ stop("StubRoutines::call_stub: r15_thread is modified by call"); 371 __ bind(L2); 372 __ cmpptr(r15_thread, rbx); 373 __ jcc(Assembler::equal, L3); 374 __ stop("StubRoutines::call_stub: threads must correspond"); 375 __ bind(L3); 376 } 377 #endif 378 379 // restore regs belonging to calling function 380 #ifdef _WIN64 381 // emit the restores for xmm regs 382 if (VM_Version::supports_evex()) { 383 for (int i = xmm_save_first; i <= last_reg; i++) { 384 __ vinsertf32x4(as_XMMRegister(i), as_XMMRegister(i), xmm_save(i), 0); 385 } 386 } else { 387 for (int i = xmm_save_first; i <= last_reg; i++) { 388 __ movdqu(as_XMMRegister(i), xmm_save(i)); 389 } 390 } 391 #endif 392 __ movptr(r15, r15_save); 393 __ movptr(r14, r14_save); 394 __ movptr(r13, r13_save); 395 __ movptr(r12, r12_save); 396 __ movptr(rbx, rbx_save); 397 398 #ifdef _WIN64 399 __ movptr(rdi, rdi_save); 400 __ movptr(rsi, rsi_save); 401 #else 402 __ ldmxcsr(mxcsr_save); 403 #endif 404 405 // restore rsp 406 __ addptr(rsp, -rsp_after_call_off * wordSize); 407 408 // return 409 __ vzeroupper(); 410 __ pop(rbp); 411 __ ret(0); 412 413 // handle return types different from T_INT 414 __ BIND(is_long); 415 __ movq(Address(c_rarg0, 0), rax); 416 __ jmp(exit); 417 418 __ BIND(is_float); 419 __ movflt(Address(c_rarg0, 0), xmm0); 420 __ jmp(exit); 421 422 __ BIND(is_double); 423 __ movdbl(Address(c_rarg0, 0), xmm0); 424 __ jmp(exit); 425 426 return start; 427 } 428 429 // Return point for a Java call if there's an exception thrown in 430 // Java code. The exception is caught and transformed into a 431 // pending exception stored in JavaThread that can be tested from 432 // within the VM. 433 // 434 // Note: Usually the parameters are removed by the callee. In case 435 // of an exception crossing an activation frame boundary, that is 436 // not the case if the callee is compiled code => need to setup the 437 // rsp. 438 // 439 // rax: exception oop 440 441 address generate_catch_exception() { 442 StubCodeMark mark(this, "StubRoutines", "catch_exception"); 443 address start = __ pc(); 444 445 // same as in generate_call_stub(): 446 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize); 447 const Address thread (rbp, thread_off * wordSize); 448 449 #ifdef ASSERT 450 // verify that threads correspond 451 { 452 Label L1, L2, L3; 453 __ cmpptr(r15_thread, thread); 454 __ jcc(Assembler::equal, L1); 455 __ stop("StubRoutines::catch_exception: r15_thread is corrupted"); 456 __ bind(L1); 457 __ get_thread(rbx); 458 __ cmpptr(r15_thread, thread); 459 __ jcc(Assembler::equal, L2); 460 __ stop("StubRoutines::catch_exception: r15_thread is modified by call"); 461 __ bind(L2); 462 __ cmpptr(r15_thread, rbx); 463 __ jcc(Assembler::equal, L3); 464 __ stop("StubRoutines::catch_exception: threads must correspond"); 465 __ bind(L3); 466 } 467 #endif 468 469 // set pending exception 470 __ verify_oop(rax); 471 472 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), rax); 473 __ lea(rscratch1, ExternalAddress((address)__FILE__)); 474 __ movptr(Address(r15_thread, Thread::exception_file_offset()), rscratch1); 475 __ movl(Address(r15_thread, Thread::exception_line_offset()), (int) __LINE__); 476 477 // complete return to VM 478 assert(StubRoutines::_call_stub_return_address != NULL, 479 "_call_stub_return_address must have been generated before"); 480 __ jump(RuntimeAddress(StubRoutines::_call_stub_return_address)); 481 482 return start; 483 } 484 485 // Continuation point for runtime calls returning with a pending 486 // exception. The pending exception check happened in the runtime 487 // or native call stub. The pending exception in Thread is 488 // converted into a Java-level exception. 489 // 490 // Contract with Java-level exception handlers: 491 // rax: exception 492 // rdx: throwing pc 493 // 494 // NOTE: At entry of this stub, exception-pc must be on stack !! 495 496 address generate_forward_exception() { 497 StubCodeMark mark(this, "StubRoutines", "forward exception"); 498 address start = __ pc(); 499 500 // Upon entry, the sp points to the return address returning into 501 // Java (interpreted or compiled) code; i.e., the return address 502 // becomes the throwing pc. 503 // 504 // Arguments pushed before the runtime call are still on the stack 505 // but the exception handler will reset the stack pointer -> 506 // ignore them. A potential result in registers can be ignored as 507 // well. 508 509 #ifdef ASSERT 510 // make sure this code is only executed if there is a pending exception 511 { 512 Label L; 513 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t) NULL); 514 __ jcc(Assembler::notEqual, L); 515 __ stop("StubRoutines::forward exception: no pending exception (1)"); 516 __ bind(L); 517 } 518 #endif 519 520 // compute exception handler into rbx 521 __ movptr(c_rarg0, Address(rsp, 0)); 522 BLOCK_COMMENT("call exception_handler_for_return_address"); 523 __ call_VM_leaf(CAST_FROM_FN_PTR(address, 524 SharedRuntime::exception_handler_for_return_address), 525 r15_thread, c_rarg0); 526 __ mov(rbx, rax); 527 528 // setup rax & rdx, remove return address & clear pending exception 529 __ pop(rdx); 530 __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset())); 531 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 532 533 #ifdef ASSERT 534 // make sure exception is set 535 { 536 Label L; 537 __ testptr(rax, rax); 538 __ jcc(Assembler::notEqual, L); 539 __ stop("StubRoutines::forward exception: no pending exception (2)"); 540 __ bind(L); 541 } 542 #endif 543 544 // continue at exception handler (return address removed) 545 // rax: exception 546 // rbx: exception handler 547 // rdx: throwing pc 548 __ verify_oop(rax); 549 __ jmp(rbx); 550 551 return start; 552 } 553 554 // Support for jint atomic::xchg(jint exchange_value, volatile jint* dest) 555 // 556 // Arguments : 557 // c_rarg0: exchange_value 558 // c_rarg0: dest 559 // 560 // Result: 561 // *dest <- ex, return (orig *dest) 562 address generate_atomic_xchg() { 563 StubCodeMark mark(this, "StubRoutines", "atomic_xchg"); 564 address start = __ pc(); 565 566 __ movl(rax, c_rarg0); // Copy to eax we need a return value anyhow 567 __ xchgl(rax, Address(c_rarg1, 0)); // automatic LOCK 568 __ ret(0); 569 570 return start; 571 } 572 573 // Support for intptr_t atomic::xchg_long(jlong exchange_value, volatile jlong* dest) 574 // 575 // Arguments : 576 // c_rarg0: exchange_value 577 // c_rarg1: dest 578 // 579 // Result: 580 // *dest <- ex, return (orig *dest) 581 address generate_atomic_xchg_long() { 582 StubCodeMark mark(this, "StubRoutines", "atomic_xchg_long"); 583 address start = __ pc(); 584 585 __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow 586 __ xchgptr(rax, Address(c_rarg1, 0)); // automatic LOCK 587 __ ret(0); 588 589 return start; 590 } 591 592 // Support for jint atomic::atomic_cmpxchg(jint exchange_value, volatile jint* dest, 593 // jint compare_value) 594 // 595 // Arguments : 596 // c_rarg0: exchange_value 597 // c_rarg1: dest 598 // c_rarg2: compare_value 599 // 600 // Result: 601 // if ( compare_value == *dest ) { 602 // *dest = exchange_value 603 // return compare_value; 604 // else 605 // return *dest; 606 address generate_atomic_cmpxchg() { 607 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg"); 608 address start = __ pc(); 609 610 __ movl(rax, c_rarg2); 611 __ lock(); 612 __ cmpxchgl(c_rarg0, Address(c_rarg1, 0)); 613 __ ret(0); 614 615 return start; 616 } 617 618 // Support for int8_t atomic::atomic_cmpxchg(int8_t exchange_value, volatile int8_t* dest, 619 // int8_t compare_value) 620 // 621 // Arguments : 622 // c_rarg0: exchange_value 623 // c_rarg1: dest 624 // c_rarg2: compare_value 625 // 626 // Result: 627 // if ( compare_value == *dest ) { 628 // *dest = exchange_value 629 // return compare_value; 630 // else 631 // return *dest; 632 address generate_atomic_cmpxchg_byte() { 633 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_byte"); 634 address start = __ pc(); 635 636 __ movsbq(rax, c_rarg2); 637 __ lock(); 638 __ cmpxchgb(c_rarg0, Address(c_rarg1, 0)); 639 __ ret(0); 640 641 return start; 642 } 643 644 // Support for int64_t atomic::atomic_cmpxchg(int64_t exchange_value, 645 // volatile int64_t* dest, 646 // int64_t compare_value) 647 // Arguments : 648 // c_rarg0: exchange_value 649 // c_rarg1: dest 650 // c_rarg2: compare_value 651 // 652 // Result: 653 // if ( compare_value == *dest ) { 654 // *dest = exchange_value 655 // return compare_value; 656 // else 657 // return *dest; 658 address generate_atomic_cmpxchg_long() { 659 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long"); 660 address start = __ pc(); 661 662 __ movq(rax, c_rarg2); 663 __ lock(); 664 __ cmpxchgq(c_rarg0, Address(c_rarg1, 0)); 665 __ ret(0); 666 667 return start; 668 } 669 670 // Support for jint atomic::add(jint add_value, volatile jint* dest) 671 // 672 // Arguments : 673 // c_rarg0: add_value 674 // c_rarg1: dest 675 // 676 // Result: 677 // *dest += add_value 678 // return *dest; 679 address generate_atomic_add() { 680 StubCodeMark mark(this, "StubRoutines", "atomic_add"); 681 address start = __ pc(); 682 683 __ movl(rax, c_rarg0); 684 __ lock(); 685 __ xaddl(Address(c_rarg1, 0), c_rarg0); 686 __ addl(rax, c_rarg0); 687 __ ret(0); 688 689 return start; 690 } 691 692 // Support for intptr_t atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) 693 // 694 // Arguments : 695 // c_rarg0: add_value 696 // c_rarg1: dest 697 // 698 // Result: 699 // *dest += add_value 700 // return *dest; 701 address generate_atomic_add_long() { 702 StubCodeMark mark(this, "StubRoutines", "atomic_add_long"); 703 address start = __ pc(); 704 705 __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow 706 __ lock(); 707 __ xaddptr(Address(c_rarg1, 0), c_rarg0); 708 __ addptr(rax, c_rarg0); 709 __ ret(0); 710 711 return start; 712 } 713 714 // Support for intptr_t OrderAccess::fence() 715 // 716 // Arguments : 717 // 718 // Result: 719 address generate_orderaccess_fence() { 720 StubCodeMark mark(this, "StubRoutines", "orderaccess_fence"); 721 address start = __ pc(); 722 __ membar(Assembler::StoreLoad); 723 __ ret(0); 724 725 return start; 726 } 727 728 // Support for intptr_t get_previous_fp() 729 // 730 // This routine is used to find the previous frame pointer for the 731 // caller (current_frame_guess). This is used as part of debugging 732 // ps() is seemingly lost trying to find frames. 733 // This code assumes that caller current_frame_guess) has a frame. 734 address generate_get_previous_fp() { 735 StubCodeMark mark(this, "StubRoutines", "get_previous_fp"); 736 const Address old_fp(rbp, 0); 737 const Address older_fp(rax, 0); 738 address start = __ pc(); 739 740 __ enter(); 741 __ movptr(rax, old_fp); // callers fp 742 __ movptr(rax, older_fp); // the frame for ps() 743 __ pop(rbp); 744 __ ret(0); 745 746 return start; 747 } 748 749 // Support for intptr_t get_previous_sp() 750 // 751 // This routine is used to find the previous stack pointer for the 752 // caller. 753 address generate_get_previous_sp() { 754 StubCodeMark mark(this, "StubRoutines", "get_previous_sp"); 755 address start = __ pc(); 756 757 __ movptr(rax, rsp); 758 __ addptr(rax, 8); // return address is at the top of the stack. 759 __ ret(0); 760 761 return start; 762 } 763 764 //---------------------------------------------------------------------------------------------------- 765 // Support for void verify_mxcsr() 766 // 767 // This routine is used with -Xcheck:jni to verify that native 768 // JNI code does not return to Java code without restoring the 769 // MXCSR register to our expected state. 770 771 address generate_verify_mxcsr() { 772 StubCodeMark mark(this, "StubRoutines", "verify_mxcsr"); 773 address start = __ pc(); 774 775 const Address mxcsr_save(rsp, 0); 776 777 if (CheckJNICalls) { 778 Label ok_ret; 779 ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std()); 780 __ push(rax); 781 __ subptr(rsp, wordSize); // allocate a temp location 782 __ stmxcsr(mxcsr_save); 783 __ movl(rax, mxcsr_save); 784 __ andl(rax, MXCSR_MASK); // Only check control and mask bits 785 __ cmp32(rax, mxcsr_std); 786 __ jcc(Assembler::equal, ok_ret); 787 788 __ warn("MXCSR changed by native JNI code, use -XX:+RestoreMXCSROnJNICall"); 789 790 __ ldmxcsr(mxcsr_std); 791 792 __ bind(ok_ret); 793 __ addptr(rsp, wordSize); 794 __ pop(rax); 795 } 796 797 __ ret(0); 798 799 return start; 800 } 801 802 address generate_f2i_fixup() { 803 StubCodeMark mark(this, "StubRoutines", "f2i_fixup"); 804 Address inout(rsp, 5 * wordSize); // return address + 4 saves 805 806 address start = __ pc(); 807 808 Label L; 809 810 __ push(rax); 811 __ push(c_rarg3); 812 __ push(c_rarg2); 813 __ push(c_rarg1); 814 815 __ movl(rax, 0x7f800000); 816 __ xorl(c_rarg3, c_rarg3); 817 __ movl(c_rarg2, inout); 818 __ movl(c_rarg1, c_rarg2); 819 __ andl(c_rarg1, 0x7fffffff); 820 __ cmpl(rax, c_rarg1); // NaN? -> 0 821 __ jcc(Assembler::negative, L); 822 __ testl(c_rarg2, c_rarg2); // signed ? min_jint : max_jint 823 __ movl(c_rarg3, 0x80000000); 824 __ movl(rax, 0x7fffffff); 825 __ cmovl(Assembler::positive, c_rarg3, rax); 826 827 __ bind(L); 828 __ movptr(inout, c_rarg3); 829 830 __ pop(c_rarg1); 831 __ pop(c_rarg2); 832 __ pop(c_rarg3); 833 __ pop(rax); 834 835 __ ret(0); 836 837 return start; 838 } 839 840 address generate_f2l_fixup() { 841 StubCodeMark mark(this, "StubRoutines", "f2l_fixup"); 842 Address inout(rsp, 5 * wordSize); // return address + 4 saves 843 address start = __ pc(); 844 845 Label L; 846 847 __ push(rax); 848 __ push(c_rarg3); 849 __ push(c_rarg2); 850 __ push(c_rarg1); 851 852 __ movl(rax, 0x7f800000); 853 __ xorl(c_rarg3, c_rarg3); 854 __ movl(c_rarg2, inout); 855 __ movl(c_rarg1, c_rarg2); 856 __ andl(c_rarg1, 0x7fffffff); 857 __ cmpl(rax, c_rarg1); // NaN? -> 0 858 __ jcc(Assembler::negative, L); 859 __ testl(c_rarg2, c_rarg2); // signed ? min_jlong : max_jlong 860 __ mov64(c_rarg3, 0x8000000000000000); 861 __ mov64(rax, 0x7fffffffffffffff); 862 __ cmov(Assembler::positive, c_rarg3, rax); 863 864 __ bind(L); 865 __ movptr(inout, c_rarg3); 866 867 __ pop(c_rarg1); 868 __ pop(c_rarg2); 869 __ pop(c_rarg3); 870 __ pop(rax); 871 872 __ ret(0); 873 874 return start; 875 } 876 877 address generate_d2i_fixup() { 878 StubCodeMark mark(this, "StubRoutines", "d2i_fixup"); 879 Address inout(rsp, 6 * wordSize); // return address + 5 saves 880 881 address start = __ pc(); 882 883 Label L; 884 885 __ push(rax); 886 __ push(c_rarg3); 887 __ push(c_rarg2); 888 __ push(c_rarg1); 889 __ push(c_rarg0); 890 891 __ movl(rax, 0x7ff00000); 892 __ movq(c_rarg2, inout); 893 __ movl(c_rarg3, c_rarg2); 894 __ mov(c_rarg1, c_rarg2); 895 __ mov(c_rarg0, c_rarg2); 896 __ negl(c_rarg3); 897 __ shrptr(c_rarg1, 0x20); 898 __ orl(c_rarg3, c_rarg2); 899 __ andl(c_rarg1, 0x7fffffff); 900 __ xorl(c_rarg2, c_rarg2); 901 __ shrl(c_rarg3, 0x1f); 902 __ orl(c_rarg1, c_rarg3); 903 __ cmpl(rax, c_rarg1); 904 __ jcc(Assembler::negative, L); // NaN -> 0 905 __ testptr(c_rarg0, c_rarg0); // signed ? min_jint : max_jint 906 __ movl(c_rarg2, 0x80000000); 907 __ movl(rax, 0x7fffffff); 908 __ cmov(Assembler::positive, c_rarg2, rax); 909 910 __ bind(L); 911 __ movptr(inout, c_rarg2); 912 913 __ pop(c_rarg0); 914 __ pop(c_rarg1); 915 __ pop(c_rarg2); 916 __ pop(c_rarg3); 917 __ pop(rax); 918 919 __ ret(0); 920 921 return start; 922 } 923 924 address generate_d2l_fixup() { 925 StubCodeMark mark(this, "StubRoutines", "d2l_fixup"); 926 Address inout(rsp, 6 * wordSize); // return address + 5 saves 927 928 address start = __ pc(); 929 930 Label L; 931 932 __ push(rax); 933 __ push(c_rarg3); 934 __ push(c_rarg2); 935 __ push(c_rarg1); 936 __ push(c_rarg0); 937 938 __ movl(rax, 0x7ff00000); 939 __ movq(c_rarg2, inout); 940 __ movl(c_rarg3, c_rarg2); 941 __ mov(c_rarg1, c_rarg2); 942 __ mov(c_rarg0, c_rarg2); 943 __ negl(c_rarg3); 944 __ shrptr(c_rarg1, 0x20); 945 __ orl(c_rarg3, c_rarg2); 946 __ andl(c_rarg1, 0x7fffffff); 947 __ xorl(c_rarg2, c_rarg2); 948 __ shrl(c_rarg3, 0x1f); 949 __ orl(c_rarg1, c_rarg3); 950 __ cmpl(rax, c_rarg1); 951 __ jcc(Assembler::negative, L); // NaN -> 0 952 __ testq(c_rarg0, c_rarg0); // signed ? min_jlong : max_jlong 953 __ mov64(c_rarg2, 0x8000000000000000); 954 __ mov64(rax, 0x7fffffffffffffff); 955 __ cmovq(Assembler::positive, c_rarg2, rax); 956 957 __ bind(L); 958 __ movq(inout, c_rarg2); 959 960 __ pop(c_rarg0); 961 __ pop(c_rarg1); 962 __ pop(c_rarg2); 963 __ pop(c_rarg3); 964 __ pop(rax); 965 966 __ ret(0); 967 968 return start; 969 } 970 971 address generate_fp_mask(const char *stub_name, int64_t mask) { 972 __ align(CodeEntryAlignment); 973 StubCodeMark mark(this, "StubRoutines", stub_name); 974 address start = __ pc(); 975 976 __ emit_data64( mask, relocInfo::none ); 977 __ emit_data64( mask, relocInfo::none ); 978 979 return start; 980 } 981 982 // Non-destructive plausibility checks for oops 983 // 984 // Arguments: 985 // all args on stack! 986 // 987 // Stack after saving c_rarg3: 988 // [tos + 0]: saved c_rarg3 989 // [tos + 1]: saved c_rarg2 990 // [tos + 2]: saved r12 (several TemplateTable methods use it) 991 // [tos + 3]: saved flags 992 // [tos + 4]: return address 993 // * [tos + 5]: error message (char*) 994 // * [tos + 6]: object to verify (oop) 995 // * [tos + 7]: saved rax - saved by caller and bashed 996 // * [tos + 8]: saved r10 (rscratch1) - saved by caller 997 // * = popped on exit 998 address generate_verify_oop() { 999 StubCodeMark mark(this, "StubRoutines", "verify_oop"); 1000 address start = __ pc(); 1001 1002 Label exit, error; 1003 1004 __ pushf(); 1005 __ incrementl(ExternalAddress((address) StubRoutines::verify_oop_count_addr())); 1006 1007 __ push(r12); 1008 1009 // save c_rarg2 and c_rarg3 1010 __ push(c_rarg2); 1011 __ push(c_rarg3); 1012 1013 enum { 1014 // After previous pushes. 1015 oop_to_verify = 6 * wordSize, 1016 saved_rax = 7 * wordSize, 1017 saved_r10 = 8 * wordSize, 1018 1019 // Before the call to MacroAssembler::debug(), see below. 1020 return_addr = 16 * wordSize, 1021 error_msg = 17 * wordSize 1022 }; 1023 1024 // get object 1025 __ movptr(rax, Address(rsp, oop_to_verify)); 1026 1027 // make sure object is 'reasonable' 1028 __ testptr(rax, rax); 1029 __ jcc(Assembler::zero, exit); // if obj is NULL it is OK 1030 1031 #if INCLUDE_ZGC 1032 if (UseZGC) { 1033 // Check if metadata bits indicate a bad oop 1034 __ testptr(rax, Address(r15_thread, ZThreadLocalData::address_bad_mask_offset())); 1035 __ jcc(Assembler::notZero, error); 1036 } 1037 #endif 1038 1039 // Check if the oop is in the right area of memory 1040 __ movptr(c_rarg2, rax); 1041 __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_mask()); 1042 __ andptr(c_rarg2, c_rarg3); 1043 __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_bits()); 1044 __ cmpptr(c_rarg2, c_rarg3); 1045 __ jcc(Assembler::notZero, error); 1046 1047 // set r12 to heapbase for load_klass() 1048 __ reinit_heapbase(); 1049 1050 // make sure klass is 'reasonable', which is not zero. 1051 __ load_klass(rax, rax); // get klass 1052 __ testptr(rax, rax); 1053 __ jcc(Assembler::zero, error); // if klass is NULL it is broken 1054 1055 // return if everything seems ok 1056 __ bind(exit); 1057 __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back 1058 __ movptr(rscratch1, Address(rsp, saved_r10)); // get saved r10 back 1059 __ pop(c_rarg3); // restore c_rarg3 1060 __ pop(c_rarg2); // restore c_rarg2 1061 __ pop(r12); // restore r12 1062 __ popf(); // restore flags 1063 __ ret(4 * wordSize); // pop caller saved stuff 1064 1065 // handle errors 1066 __ bind(error); 1067 __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back 1068 __ movptr(rscratch1, Address(rsp, saved_r10)); // get saved r10 back 1069 __ pop(c_rarg3); // get saved c_rarg3 back 1070 __ pop(c_rarg2); // get saved c_rarg2 back 1071 __ pop(r12); // get saved r12 back 1072 __ popf(); // get saved flags off stack -- 1073 // will be ignored 1074 1075 __ pusha(); // push registers 1076 // (rip is already 1077 // already pushed) 1078 // debug(char* msg, int64_t pc, int64_t regs[]) 1079 // We've popped the registers we'd saved (c_rarg3, c_rarg2 and flags), and 1080 // pushed all the registers, so now the stack looks like: 1081 // [tos + 0] 16 saved registers 1082 // [tos + 16] return address 1083 // * [tos + 17] error message (char*) 1084 // * [tos + 18] object to verify (oop) 1085 // * [tos + 19] saved rax - saved by caller and bashed 1086 // * [tos + 20] saved r10 (rscratch1) - saved by caller 1087 // * = popped on exit 1088 1089 __ movptr(c_rarg0, Address(rsp, error_msg)); // pass address of error message 1090 __ movptr(c_rarg1, Address(rsp, return_addr)); // pass return address 1091 __ movq(c_rarg2, rsp); // pass address of regs on stack 1092 __ mov(r12, rsp); // remember rsp 1093 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1094 __ andptr(rsp, -16); // align stack as required by ABI 1095 BLOCK_COMMENT("call MacroAssembler::debug"); 1096 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64))); 1097 __ mov(rsp, r12); // restore rsp 1098 __ popa(); // pop registers (includes r12) 1099 __ ret(4 * wordSize); // pop caller saved stuff 1100 1101 return start; 1102 } 1103 1104 // 1105 // Verify that a register contains clean 32-bits positive value 1106 // (high 32-bits are 0) so it could be used in 64-bits shifts. 1107 // 1108 // Input: 1109 // Rint - 32-bits value 1110 // Rtmp - scratch 1111 // 1112 void assert_clean_int(Register Rint, Register Rtmp) { 1113 #ifdef ASSERT 1114 Label L; 1115 assert_different_registers(Rtmp, Rint); 1116 __ movslq(Rtmp, Rint); 1117 __ cmpq(Rtmp, Rint); 1118 __ jcc(Assembler::equal, L); 1119 __ stop("high 32-bits of int value are not 0"); 1120 __ bind(L); 1121 #endif 1122 } 1123 1124 // Generate overlap test for array copy stubs 1125 // 1126 // Input: 1127 // c_rarg0 - from 1128 // c_rarg1 - to 1129 // c_rarg2 - element count 1130 // 1131 // Output: 1132 // rax - &from[element count - 1] 1133 // 1134 void array_overlap_test(address no_overlap_target, Address::ScaleFactor sf) { 1135 assert(no_overlap_target != NULL, "must be generated"); 1136 array_overlap_test(no_overlap_target, NULL, sf); 1137 } 1138 void array_overlap_test(Label& L_no_overlap, Address::ScaleFactor sf) { 1139 array_overlap_test(NULL, &L_no_overlap, sf); 1140 } 1141 void array_overlap_test(address no_overlap_target, Label* NOLp, Address::ScaleFactor sf) { 1142 const Register from = c_rarg0; 1143 const Register to = c_rarg1; 1144 const Register count = c_rarg2; 1145 const Register end_from = rax; 1146 1147 __ cmpptr(to, from); 1148 __ lea(end_from, Address(from, count, sf, 0)); 1149 if (NOLp == NULL) { 1150 ExternalAddress no_overlap(no_overlap_target); 1151 __ jump_cc(Assembler::belowEqual, no_overlap); 1152 __ cmpptr(to, end_from); 1153 __ jump_cc(Assembler::aboveEqual, no_overlap); 1154 } else { 1155 __ jcc(Assembler::belowEqual, (*NOLp)); 1156 __ cmpptr(to, end_from); 1157 __ jcc(Assembler::aboveEqual, (*NOLp)); 1158 } 1159 } 1160 1161 // Shuffle first three arg regs on Windows into Linux/Solaris locations. 1162 // 1163 // Outputs: 1164 // rdi - rcx 1165 // rsi - rdx 1166 // rdx - r8 1167 // rcx - r9 1168 // 1169 // Registers r9 and r10 are used to save rdi and rsi on Windows, which latter 1170 // are non-volatile. r9 and r10 should not be used by the caller. 1171 // 1172 DEBUG_ONLY(bool regs_in_thread;) 1173 1174 void setup_arg_regs(int nargs = 3) { 1175 const Register saved_rdi = r9; 1176 const Register saved_rsi = r10; 1177 assert(nargs == 3 || nargs == 4, "else fix"); 1178 #ifdef _WIN64 1179 assert(c_rarg0 == rcx && c_rarg1 == rdx && c_rarg2 == r8 && c_rarg3 == r9, 1180 "unexpected argument registers"); 1181 if (nargs >= 4) 1182 __ mov(rax, r9); // r9 is also saved_rdi 1183 __ movptr(saved_rdi, rdi); 1184 __ movptr(saved_rsi, rsi); 1185 __ mov(rdi, rcx); // c_rarg0 1186 __ mov(rsi, rdx); // c_rarg1 1187 __ mov(rdx, r8); // c_rarg2 1188 if (nargs >= 4) 1189 __ mov(rcx, rax); // c_rarg3 (via rax) 1190 #else 1191 assert(c_rarg0 == rdi && c_rarg1 == rsi && c_rarg2 == rdx && c_rarg3 == rcx, 1192 "unexpected argument registers"); 1193 #endif 1194 DEBUG_ONLY(regs_in_thread = false;) 1195 } 1196 1197 void restore_arg_regs() { 1198 assert(!regs_in_thread, "wrong call to restore_arg_regs"); 1199 const Register saved_rdi = r9; 1200 const Register saved_rsi = r10; 1201 #ifdef _WIN64 1202 __ movptr(rdi, saved_rdi); 1203 __ movptr(rsi, saved_rsi); 1204 #endif 1205 } 1206 1207 // This is used in places where r10 is a scratch register, and can 1208 // be adapted if r9 is needed also. 1209 void setup_arg_regs_using_thread() { 1210 const Register saved_r15 = r9; 1211 #ifdef _WIN64 1212 __ mov(saved_r15, r15); // r15 is callee saved and needs to be restored 1213 __ get_thread(r15_thread); 1214 assert(c_rarg0 == rcx && c_rarg1 == rdx && c_rarg2 == r8 && c_rarg3 == r9, 1215 "unexpected argument registers"); 1216 __ movptr(Address(r15_thread, in_bytes(JavaThread::windows_saved_rdi_offset())), rdi); 1217 __ movptr(Address(r15_thread, in_bytes(JavaThread::windows_saved_rsi_offset())), rsi); 1218 1219 __ mov(rdi, rcx); // c_rarg0 1220 __ mov(rsi, rdx); // c_rarg1 1221 __ mov(rdx, r8); // c_rarg2 1222 #else 1223 assert(c_rarg0 == rdi && c_rarg1 == rsi && c_rarg2 == rdx && c_rarg3 == rcx, 1224 "unexpected argument registers"); 1225 #endif 1226 DEBUG_ONLY(regs_in_thread = true;) 1227 } 1228 1229 void restore_arg_regs_using_thread() { 1230 assert(regs_in_thread, "wrong call to restore_arg_regs"); 1231 const Register saved_r15 = r9; 1232 #ifdef _WIN64 1233 __ get_thread(r15_thread); 1234 __ movptr(rsi, Address(r15_thread, in_bytes(JavaThread::windows_saved_rsi_offset()))); 1235 __ movptr(rdi, Address(r15_thread, in_bytes(JavaThread::windows_saved_rdi_offset()))); 1236 __ mov(r15, saved_r15); // r15 is callee saved and needs to be restored 1237 #endif 1238 } 1239 1240 // Copy big chunks forward 1241 // 1242 // Inputs: 1243 // end_from - source arrays end address 1244 // end_to - destination array end address 1245 // qword_count - 64-bits element count, negative 1246 // to - scratch 1247 // L_copy_bytes - entry label 1248 // L_copy_8_bytes - exit label 1249 // 1250 void copy_bytes_forward(Register end_from, Register end_to, 1251 Register qword_count, Register to, 1252 Label& L_copy_bytes, Label& L_copy_8_bytes) { 1253 DEBUG_ONLY(__ stop("enter at entry label, not here")); 1254 Label L_loop; 1255 __ align(OptoLoopAlignment); 1256 if (UseUnalignedLoadStores) { 1257 Label L_end; 1258 // Copy 64-bytes per iteration 1259 __ BIND(L_loop); 1260 if (UseAVX > 2) { 1261 __ evmovdqul(xmm0, Address(end_from, qword_count, Address::times_8, -56), Assembler::AVX_512bit); 1262 __ evmovdqul(Address(end_to, qword_count, Address::times_8, -56), xmm0, Assembler::AVX_512bit); 1263 } else if (UseAVX == 2) { 1264 __ vmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56)); 1265 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0); 1266 __ vmovdqu(xmm1, Address(end_from, qword_count, Address::times_8, -24)); 1267 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -24), xmm1); 1268 } else { 1269 __ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56)); 1270 __ movdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0); 1271 __ movdqu(xmm1, Address(end_from, qword_count, Address::times_8, -40)); 1272 __ movdqu(Address(end_to, qword_count, Address::times_8, -40), xmm1); 1273 __ movdqu(xmm2, Address(end_from, qword_count, Address::times_8, -24)); 1274 __ movdqu(Address(end_to, qword_count, Address::times_8, -24), xmm2); 1275 __ movdqu(xmm3, Address(end_from, qword_count, Address::times_8, - 8)); 1276 __ movdqu(Address(end_to, qword_count, Address::times_8, - 8), xmm3); 1277 } 1278 __ BIND(L_copy_bytes); 1279 __ addptr(qword_count, 8); 1280 __ jcc(Assembler::lessEqual, L_loop); 1281 __ subptr(qword_count, 4); // sub(8) and add(4) 1282 __ jccb(Assembler::greater, L_end); 1283 // Copy trailing 32 bytes 1284 if (UseAVX >= 2) { 1285 __ vmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24)); 1286 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -24), xmm0); 1287 } else { 1288 __ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24)); 1289 __ movdqu(Address(end_to, qword_count, Address::times_8, -24), xmm0); 1290 __ movdqu(xmm1, Address(end_from, qword_count, Address::times_8, - 8)); 1291 __ movdqu(Address(end_to, qword_count, Address::times_8, - 8), xmm1); 1292 } 1293 __ addptr(qword_count, 4); 1294 __ BIND(L_end); 1295 if (UseAVX >= 2) { 1296 // clean upper bits of YMM registers 1297 __ vpxor(xmm0, xmm0); 1298 __ vpxor(xmm1, xmm1); 1299 } 1300 } else { 1301 // Copy 32-bytes per iteration 1302 __ BIND(L_loop); 1303 __ movq(to, Address(end_from, qword_count, Address::times_8, -24)); 1304 __ movq(Address(end_to, qword_count, Address::times_8, -24), to); 1305 __ movq(to, Address(end_from, qword_count, Address::times_8, -16)); 1306 __ movq(Address(end_to, qword_count, Address::times_8, -16), to); 1307 __ movq(to, Address(end_from, qword_count, Address::times_8, - 8)); 1308 __ movq(Address(end_to, qword_count, Address::times_8, - 8), to); 1309 __ movq(to, Address(end_from, qword_count, Address::times_8, - 0)); 1310 __ movq(Address(end_to, qword_count, Address::times_8, - 0), to); 1311 1312 __ BIND(L_copy_bytes); 1313 __ addptr(qword_count, 4); 1314 __ jcc(Assembler::lessEqual, L_loop); 1315 } 1316 __ subptr(qword_count, 4); 1317 __ jcc(Assembler::less, L_copy_8_bytes); // Copy trailing qwords 1318 } 1319 1320 // Copy big chunks backward 1321 // 1322 // Inputs: 1323 // from - source arrays address 1324 // dest - destination array address 1325 // qword_count - 64-bits element count 1326 // to - scratch 1327 // L_copy_bytes - entry label 1328 // L_copy_8_bytes - exit label 1329 // 1330 void copy_bytes_backward(Register from, Register dest, 1331 Register qword_count, Register to, 1332 Label& L_copy_bytes, Label& L_copy_8_bytes) { 1333 DEBUG_ONLY(__ stop("enter at entry label, not here")); 1334 Label L_loop; 1335 __ align(OptoLoopAlignment); 1336 if (UseUnalignedLoadStores) { 1337 Label L_end; 1338 // Copy 64-bytes per iteration 1339 __ BIND(L_loop); 1340 if (UseAVX > 2) { 1341 __ evmovdqul(xmm0, Address(from, qword_count, Address::times_8, 0), Assembler::AVX_512bit); 1342 __ evmovdqul(Address(dest, qword_count, Address::times_8, 0), xmm0, Assembler::AVX_512bit); 1343 } else if (UseAVX == 2) { 1344 __ vmovdqu(xmm0, Address(from, qword_count, Address::times_8, 32)); 1345 __ vmovdqu(Address(dest, qword_count, Address::times_8, 32), xmm0); 1346 __ vmovdqu(xmm1, Address(from, qword_count, Address::times_8, 0)); 1347 __ vmovdqu(Address(dest, qword_count, Address::times_8, 0), xmm1); 1348 } else { 1349 __ movdqu(xmm0, Address(from, qword_count, Address::times_8, 48)); 1350 __ movdqu(Address(dest, qword_count, Address::times_8, 48), xmm0); 1351 __ movdqu(xmm1, Address(from, qword_count, Address::times_8, 32)); 1352 __ movdqu(Address(dest, qword_count, Address::times_8, 32), xmm1); 1353 __ movdqu(xmm2, Address(from, qword_count, Address::times_8, 16)); 1354 __ movdqu(Address(dest, qword_count, Address::times_8, 16), xmm2); 1355 __ movdqu(xmm3, Address(from, qword_count, Address::times_8, 0)); 1356 __ movdqu(Address(dest, qword_count, Address::times_8, 0), xmm3); 1357 } 1358 __ BIND(L_copy_bytes); 1359 __ subptr(qword_count, 8); 1360 __ jcc(Assembler::greaterEqual, L_loop); 1361 1362 __ addptr(qword_count, 4); // add(8) and sub(4) 1363 __ jccb(Assembler::less, L_end); 1364 // Copy trailing 32 bytes 1365 if (UseAVX >= 2) { 1366 __ vmovdqu(xmm0, Address(from, qword_count, Address::times_8, 0)); 1367 __ vmovdqu(Address(dest, qword_count, Address::times_8, 0), xmm0); 1368 } else { 1369 __ movdqu(xmm0, Address(from, qword_count, Address::times_8, 16)); 1370 __ movdqu(Address(dest, qword_count, Address::times_8, 16), xmm0); 1371 __ movdqu(xmm1, Address(from, qword_count, Address::times_8, 0)); 1372 __ movdqu(Address(dest, qword_count, Address::times_8, 0), xmm1); 1373 } 1374 __ subptr(qword_count, 4); 1375 __ BIND(L_end); 1376 if (UseAVX >= 2) { 1377 // clean upper bits of YMM registers 1378 __ vpxor(xmm0, xmm0); 1379 __ vpxor(xmm1, xmm1); 1380 } 1381 } else { 1382 // Copy 32-bytes per iteration 1383 __ BIND(L_loop); 1384 __ movq(to, Address(from, qword_count, Address::times_8, 24)); 1385 __ movq(Address(dest, qword_count, Address::times_8, 24), to); 1386 __ movq(to, Address(from, qword_count, Address::times_8, 16)); 1387 __ movq(Address(dest, qword_count, Address::times_8, 16), to); 1388 __ movq(to, Address(from, qword_count, Address::times_8, 8)); 1389 __ movq(Address(dest, qword_count, Address::times_8, 8), to); 1390 __ movq(to, Address(from, qword_count, Address::times_8, 0)); 1391 __ movq(Address(dest, qword_count, Address::times_8, 0), to); 1392 1393 __ BIND(L_copy_bytes); 1394 __ subptr(qword_count, 4); 1395 __ jcc(Assembler::greaterEqual, L_loop); 1396 } 1397 __ addptr(qword_count, 4); 1398 __ jcc(Assembler::greater, L_copy_8_bytes); // Copy trailing qwords 1399 } 1400 1401 1402 // Arguments: 1403 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1404 // ignored 1405 // name - stub name string 1406 // 1407 // Inputs: 1408 // c_rarg0 - source array address 1409 // c_rarg1 - destination array address 1410 // c_rarg2 - element count, treated as ssize_t, can be zero 1411 // 1412 // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries, 1413 // we let the hardware handle it. The one to eight bytes within words, 1414 // dwords or qwords that span cache line boundaries will still be loaded 1415 // and stored atomically. 1416 // 1417 // Side Effects: 1418 // disjoint_byte_copy_entry is set to the no-overlap entry point 1419 // used by generate_conjoint_byte_copy(). 1420 // 1421 address generate_disjoint_byte_copy(bool aligned, address* entry, const char *name) { 1422 __ align(CodeEntryAlignment); 1423 StubCodeMark mark(this, "StubRoutines", name); 1424 address start = __ pc(); 1425 1426 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes; 1427 Label L_copy_byte, L_exit; 1428 const Register from = rdi; // source array address 1429 const Register to = rsi; // destination array address 1430 const Register count = rdx; // elements count 1431 const Register byte_count = rcx; 1432 const Register qword_count = count; 1433 const Register end_from = from; // source array end address 1434 const Register end_to = to; // destination array end address 1435 // End pointers are inclusive, and if count is not zero they point 1436 // to the last unit copied: end_to[0] := end_from[0] 1437 1438 __ enter(); // required for proper stackwalking of RuntimeStub frame 1439 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1440 1441 if (entry != NULL) { 1442 *entry = __ pc(); 1443 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1444 BLOCK_COMMENT("Entry:"); 1445 } 1446 1447 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1448 // r9 and r10 may be used to save non-volatile registers 1449 1450 // 'from', 'to' and 'count' are now valid 1451 __ movptr(byte_count, count); 1452 __ shrptr(count, 3); // count => qword_count 1453 1454 // Copy from low to high addresses. Use 'to' as scratch. 1455 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1456 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1457 __ negptr(qword_count); // make the count negative 1458 __ jmp(L_copy_bytes); 1459 1460 // Copy trailing qwords 1461 __ BIND(L_copy_8_bytes); 1462 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1463 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1464 __ increment(qword_count); 1465 __ jcc(Assembler::notZero, L_copy_8_bytes); 1466 1467 // Check for and copy trailing dword 1468 __ BIND(L_copy_4_bytes); 1469 __ testl(byte_count, 4); 1470 __ jccb(Assembler::zero, L_copy_2_bytes); 1471 __ movl(rax, Address(end_from, 8)); 1472 __ movl(Address(end_to, 8), rax); 1473 1474 __ addptr(end_from, 4); 1475 __ addptr(end_to, 4); 1476 1477 // Check for and copy trailing word 1478 __ BIND(L_copy_2_bytes); 1479 __ testl(byte_count, 2); 1480 __ jccb(Assembler::zero, L_copy_byte); 1481 __ movw(rax, Address(end_from, 8)); 1482 __ movw(Address(end_to, 8), rax); 1483 1484 __ addptr(end_from, 2); 1485 __ addptr(end_to, 2); 1486 1487 // Check for and copy trailing byte 1488 __ BIND(L_copy_byte); 1489 __ testl(byte_count, 1); 1490 __ jccb(Assembler::zero, L_exit); 1491 __ movb(rax, Address(end_from, 8)); 1492 __ movb(Address(end_to, 8), rax); 1493 1494 __ BIND(L_exit); 1495 restore_arg_regs(); 1496 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1497 __ xorptr(rax, rax); // return 0 1498 __ vzeroupper(); 1499 __ leave(); // required for proper stackwalking of RuntimeStub frame 1500 __ ret(0); 1501 1502 // Copy in multi-bytes chunks 1503 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1504 __ jmp(L_copy_4_bytes); 1505 1506 return start; 1507 } 1508 1509 // Arguments: 1510 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1511 // ignored 1512 // name - stub name string 1513 // 1514 // Inputs: 1515 // c_rarg0 - source array address 1516 // c_rarg1 - destination array address 1517 // c_rarg2 - element count, treated as ssize_t, can be zero 1518 // 1519 // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries, 1520 // we let the hardware handle it. The one to eight bytes within words, 1521 // dwords or qwords that span cache line boundaries will still be loaded 1522 // and stored atomically. 1523 // 1524 address generate_conjoint_byte_copy(bool aligned, address nooverlap_target, 1525 address* entry, const char *name) { 1526 __ align(CodeEntryAlignment); 1527 StubCodeMark mark(this, "StubRoutines", name); 1528 address start = __ pc(); 1529 1530 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes; 1531 const Register from = rdi; // source array address 1532 const Register to = rsi; // destination array address 1533 const Register count = rdx; // elements count 1534 const Register byte_count = rcx; 1535 const Register qword_count = count; 1536 1537 __ enter(); // required for proper stackwalking of RuntimeStub frame 1538 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1539 1540 if (entry != NULL) { 1541 *entry = __ pc(); 1542 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1543 BLOCK_COMMENT("Entry:"); 1544 } 1545 1546 array_overlap_test(nooverlap_target, Address::times_1); 1547 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1548 // r9 and r10 may be used to save non-volatile registers 1549 1550 // 'from', 'to' and 'count' are now valid 1551 __ movptr(byte_count, count); 1552 __ shrptr(count, 3); // count => qword_count 1553 1554 // Copy from high to low addresses. 1555 1556 // Check for and copy trailing byte 1557 __ testl(byte_count, 1); 1558 __ jcc(Assembler::zero, L_copy_2_bytes); 1559 __ movb(rax, Address(from, byte_count, Address::times_1, -1)); 1560 __ movb(Address(to, byte_count, Address::times_1, -1), rax); 1561 __ decrement(byte_count); // Adjust for possible trailing word 1562 1563 // Check for and copy trailing word 1564 __ BIND(L_copy_2_bytes); 1565 __ testl(byte_count, 2); 1566 __ jcc(Assembler::zero, L_copy_4_bytes); 1567 __ movw(rax, Address(from, byte_count, Address::times_1, -2)); 1568 __ movw(Address(to, byte_count, Address::times_1, -2), rax); 1569 1570 // Check for and copy trailing dword 1571 __ BIND(L_copy_4_bytes); 1572 __ testl(byte_count, 4); 1573 __ jcc(Assembler::zero, L_copy_bytes); 1574 __ movl(rax, Address(from, qword_count, Address::times_8)); 1575 __ movl(Address(to, qword_count, Address::times_8), rax); 1576 __ jmp(L_copy_bytes); 1577 1578 // Copy trailing qwords 1579 __ BIND(L_copy_8_bytes); 1580 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 1581 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 1582 __ decrement(qword_count); 1583 __ jcc(Assembler::notZero, L_copy_8_bytes); 1584 1585 restore_arg_regs(); 1586 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1587 __ xorptr(rax, rax); // return 0 1588 __ vzeroupper(); 1589 __ leave(); // required for proper stackwalking of RuntimeStub frame 1590 __ ret(0); 1591 1592 // Copy in multi-bytes chunks 1593 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1594 1595 restore_arg_regs(); 1596 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1597 __ xorptr(rax, rax); // return 0 1598 __ vzeroupper(); 1599 __ leave(); // required for proper stackwalking of RuntimeStub frame 1600 __ ret(0); 1601 1602 return start; 1603 } 1604 1605 // Arguments: 1606 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1607 // ignored 1608 // name - stub name string 1609 // 1610 // Inputs: 1611 // c_rarg0 - source array address 1612 // c_rarg1 - destination array address 1613 // c_rarg2 - element count, treated as ssize_t, can be zero 1614 // 1615 // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we 1616 // let the hardware handle it. The two or four words within dwords 1617 // or qwords that span cache line boundaries will still be loaded 1618 // and stored atomically. 1619 // 1620 // Side Effects: 1621 // disjoint_short_copy_entry is set to the no-overlap entry point 1622 // used by generate_conjoint_short_copy(). 1623 // 1624 address generate_disjoint_short_copy(bool aligned, address *entry, const char *name) { 1625 __ align(CodeEntryAlignment); 1626 StubCodeMark mark(this, "StubRoutines", name); 1627 address start = __ pc(); 1628 1629 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes,L_copy_2_bytes,L_exit; 1630 const Register from = rdi; // source array address 1631 const Register to = rsi; // destination array address 1632 const Register count = rdx; // elements count 1633 const Register word_count = rcx; 1634 const Register qword_count = count; 1635 const Register end_from = from; // source array end address 1636 const Register end_to = to; // destination array end address 1637 // End pointers are inclusive, and if count is not zero they point 1638 // to the last unit copied: end_to[0] := end_from[0] 1639 1640 __ enter(); // required for proper stackwalking of RuntimeStub frame 1641 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1642 1643 if (entry != NULL) { 1644 *entry = __ pc(); 1645 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1646 BLOCK_COMMENT("Entry:"); 1647 } 1648 1649 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1650 // r9 and r10 may be used to save non-volatile registers 1651 1652 // 'from', 'to' and 'count' are now valid 1653 __ movptr(word_count, count); 1654 __ shrptr(count, 2); // count => qword_count 1655 1656 // Copy from low to high addresses. Use 'to' as scratch. 1657 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1658 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1659 __ negptr(qword_count); 1660 __ jmp(L_copy_bytes); 1661 1662 // Copy trailing qwords 1663 __ BIND(L_copy_8_bytes); 1664 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1665 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1666 __ increment(qword_count); 1667 __ jcc(Assembler::notZero, L_copy_8_bytes); 1668 1669 // Original 'dest' is trashed, so we can't use it as a 1670 // base register for a possible trailing word copy 1671 1672 // Check for and copy trailing dword 1673 __ BIND(L_copy_4_bytes); 1674 __ testl(word_count, 2); 1675 __ jccb(Assembler::zero, L_copy_2_bytes); 1676 __ movl(rax, Address(end_from, 8)); 1677 __ movl(Address(end_to, 8), rax); 1678 1679 __ addptr(end_from, 4); 1680 __ addptr(end_to, 4); 1681 1682 // Check for and copy trailing word 1683 __ BIND(L_copy_2_bytes); 1684 __ testl(word_count, 1); 1685 __ jccb(Assembler::zero, L_exit); 1686 __ movw(rax, Address(end_from, 8)); 1687 __ movw(Address(end_to, 8), rax); 1688 1689 __ BIND(L_exit); 1690 restore_arg_regs(); 1691 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1692 __ xorptr(rax, rax); // return 0 1693 __ vzeroupper(); 1694 __ leave(); // required for proper stackwalking of RuntimeStub frame 1695 __ ret(0); 1696 1697 // Copy in multi-bytes chunks 1698 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1699 __ jmp(L_copy_4_bytes); 1700 1701 return start; 1702 } 1703 1704 address generate_fill(BasicType t, bool aligned, const char *name) { 1705 __ align(CodeEntryAlignment); 1706 StubCodeMark mark(this, "StubRoutines", name); 1707 address start = __ pc(); 1708 1709 BLOCK_COMMENT("Entry:"); 1710 1711 const Register to = c_rarg0; // source array address 1712 const Register value = c_rarg1; // value 1713 const Register count = c_rarg2; // elements count 1714 1715 __ enter(); // required for proper stackwalking of RuntimeStub frame 1716 1717 __ generate_fill(t, aligned, to, value, count, rax, xmm0); 1718 1719 __ vzeroupper(); 1720 __ leave(); // required for proper stackwalking of RuntimeStub frame 1721 __ ret(0); 1722 return start; 1723 } 1724 1725 // Arguments: 1726 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1727 // ignored 1728 // name - stub name string 1729 // 1730 // Inputs: 1731 // c_rarg0 - source array address 1732 // c_rarg1 - destination array address 1733 // c_rarg2 - element count, treated as ssize_t, can be zero 1734 // 1735 // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we 1736 // let the hardware handle it. The two or four words within dwords 1737 // or qwords that span cache line boundaries will still be loaded 1738 // and stored atomically. 1739 // 1740 address generate_conjoint_short_copy(bool aligned, address nooverlap_target, 1741 address *entry, const char *name) { 1742 __ align(CodeEntryAlignment); 1743 StubCodeMark mark(this, "StubRoutines", name); 1744 address start = __ pc(); 1745 1746 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes; 1747 const Register from = rdi; // source array address 1748 const Register to = rsi; // destination array address 1749 const Register count = rdx; // elements count 1750 const Register word_count = rcx; 1751 const Register qword_count = count; 1752 1753 __ enter(); // required for proper stackwalking of RuntimeStub frame 1754 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1755 1756 if (entry != NULL) { 1757 *entry = __ pc(); 1758 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1759 BLOCK_COMMENT("Entry:"); 1760 } 1761 1762 array_overlap_test(nooverlap_target, Address::times_2); 1763 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1764 // r9 and r10 may be used to save non-volatile registers 1765 1766 // 'from', 'to' and 'count' are now valid 1767 __ movptr(word_count, count); 1768 __ shrptr(count, 2); // count => qword_count 1769 1770 // Copy from high to low addresses. Use 'to' as scratch. 1771 1772 // Check for and copy trailing word 1773 __ testl(word_count, 1); 1774 __ jccb(Assembler::zero, L_copy_4_bytes); 1775 __ movw(rax, Address(from, word_count, Address::times_2, -2)); 1776 __ movw(Address(to, word_count, Address::times_2, -2), rax); 1777 1778 // Check for and copy trailing dword 1779 __ BIND(L_copy_4_bytes); 1780 __ testl(word_count, 2); 1781 __ jcc(Assembler::zero, L_copy_bytes); 1782 __ movl(rax, Address(from, qword_count, Address::times_8)); 1783 __ movl(Address(to, qword_count, Address::times_8), rax); 1784 __ jmp(L_copy_bytes); 1785 1786 // Copy trailing qwords 1787 __ BIND(L_copy_8_bytes); 1788 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 1789 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 1790 __ decrement(qword_count); 1791 __ jcc(Assembler::notZero, L_copy_8_bytes); 1792 1793 restore_arg_regs(); 1794 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1795 __ xorptr(rax, rax); // return 0 1796 __ vzeroupper(); 1797 __ leave(); // required for proper stackwalking of RuntimeStub frame 1798 __ ret(0); 1799 1800 // Copy in multi-bytes chunks 1801 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1802 1803 restore_arg_regs(); 1804 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1805 __ xorptr(rax, rax); // return 0 1806 __ vzeroupper(); 1807 __ leave(); // required for proper stackwalking of RuntimeStub frame 1808 __ ret(0); 1809 1810 return start; 1811 } 1812 1813 // Arguments: 1814 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1815 // ignored 1816 // is_oop - true => oop array, so generate store check code 1817 // name - stub name string 1818 // 1819 // Inputs: 1820 // c_rarg0 - source array address 1821 // c_rarg1 - destination array address 1822 // c_rarg2 - element count, treated as ssize_t, can be zero 1823 // 1824 // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let 1825 // the hardware handle it. The two dwords within qwords that span 1826 // cache line boundaries will still be loaded and stored atomicly. 1827 // 1828 // Side Effects: 1829 // disjoint_int_copy_entry is set to the no-overlap entry point 1830 // used by generate_conjoint_int_oop_copy(). 1831 // 1832 address generate_disjoint_int_oop_copy(bool aligned, bool is_oop, address* entry, 1833 const char *name, bool dest_uninitialized = false) { 1834 __ align(CodeEntryAlignment); 1835 StubCodeMark mark(this, "StubRoutines", name); 1836 address start = __ pc(); 1837 1838 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_exit; 1839 const Register from = rdi; // source array address 1840 const Register to = rsi; // destination array address 1841 const Register count = rdx; // elements count 1842 const Register dword_count = rcx; 1843 const Register qword_count = count; 1844 const Register end_from = from; // source array end address 1845 const Register end_to = to; // destination array end address 1846 // End pointers are inclusive, and if count is not zero they point 1847 // to the last unit copied: end_to[0] := end_from[0] 1848 1849 __ enter(); // required for proper stackwalking of RuntimeStub frame 1850 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1851 1852 if (entry != NULL) { 1853 *entry = __ pc(); 1854 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1855 BLOCK_COMMENT("Entry:"); 1856 } 1857 1858 setup_arg_regs_using_thread(); // from => rdi, to => rsi, count => rdx 1859 // r9 is used to save r15_thread 1860 1861 DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_DISJOINT; 1862 if (dest_uninitialized) { 1863 decorators |= IS_DEST_UNINITIALIZED; 1864 } 1865 if (aligned) { 1866 decorators |= ARRAYCOPY_ALIGNED; 1867 } 1868 1869 BasicType type = is_oop ? T_OBJECT : T_INT; 1870 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 1871 bs->arraycopy_prologue(_masm, decorators, type, from, to, count); 1872 1873 // 'from', 'to' and 'count' are now valid 1874 __ movptr(dword_count, count); 1875 __ shrptr(count, 1); // count => qword_count 1876 1877 // Copy from low to high addresses. Use 'to' as scratch. 1878 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1879 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1880 __ negptr(qword_count); 1881 __ jmp(L_copy_bytes); 1882 1883 // Copy trailing qwords 1884 __ BIND(L_copy_8_bytes); 1885 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1886 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1887 __ increment(qword_count); 1888 __ jcc(Assembler::notZero, L_copy_8_bytes); 1889 1890 // Check for and copy trailing dword 1891 __ BIND(L_copy_4_bytes); 1892 __ testl(dword_count, 1); // Only byte test since the value is 0 or 1 1893 __ jccb(Assembler::zero, L_exit); 1894 __ movl(rax, Address(end_from, 8)); 1895 __ movl(Address(end_to, 8), rax); 1896 1897 __ BIND(L_exit); 1898 bs->arraycopy_epilogue(_masm, decorators, type, from, to, dword_count); 1899 restore_arg_regs_using_thread(); 1900 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 1901 __ vzeroupper(); 1902 __ xorptr(rax, rax); // return 0 1903 __ leave(); // required for proper stackwalking of RuntimeStub frame 1904 __ ret(0); 1905 1906 // Copy in multi-bytes chunks 1907 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1908 __ jmp(L_copy_4_bytes); 1909 1910 return start; 1911 } 1912 1913 // Arguments: 1914 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1915 // ignored 1916 // is_oop - true => oop array, so generate store check code 1917 // name - stub name string 1918 // 1919 // Inputs: 1920 // c_rarg0 - source array address 1921 // c_rarg1 - destination array address 1922 // c_rarg2 - element count, treated as ssize_t, can be zero 1923 // 1924 // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let 1925 // the hardware handle it. The two dwords within qwords that span 1926 // cache line boundaries will still be loaded and stored atomicly. 1927 // 1928 address generate_conjoint_int_oop_copy(bool aligned, bool is_oop, address nooverlap_target, 1929 address *entry, const char *name, 1930 bool dest_uninitialized = false) { 1931 __ align(CodeEntryAlignment); 1932 StubCodeMark mark(this, "StubRoutines", name); 1933 address start = __ pc(); 1934 1935 Label L_copy_bytes, L_copy_8_bytes, L_exit; 1936 const Register from = rdi; // source array address 1937 const Register to = rsi; // destination array address 1938 const Register count = rdx; // elements count 1939 const Register dword_count = rcx; 1940 const Register qword_count = count; 1941 1942 __ enter(); // required for proper stackwalking of RuntimeStub frame 1943 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1944 1945 if (entry != NULL) { 1946 *entry = __ pc(); 1947 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1948 BLOCK_COMMENT("Entry:"); 1949 } 1950 1951 array_overlap_test(nooverlap_target, Address::times_4); 1952 setup_arg_regs_using_thread(); // from => rdi, to => rsi, count => rdx 1953 // r9 is used to save r15_thread 1954 1955 DecoratorSet decorators = IN_HEAP | IS_ARRAY; 1956 if (dest_uninitialized) { 1957 decorators |= IS_DEST_UNINITIALIZED; 1958 } 1959 if (aligned) { 1960 decorators |= ARRAYCOPY_ALIGNED; 1961 } 1962 1963 BasicType type = is_oop ? T_OBJECT : T_INT; 1964 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 1965 // no registers are destroyed by this call 1966 bs->arraycopy_prologue(_masm, decorators, type, from, to, count); 1967 1968 assert_clean_int(count, rax); // Make sure 'count' is clean int. 1969 // 'from', 'to' and 'count' are now valid 1970 __ movptr(dword_count, count); 1971 __ shrptr(count, 1); // count => qword_count 1972 1973 // Copy from high to low addresses. Use 'to' as scratch. 1974 1975 // Check for and copy trailing dword 1976 __ testl(dword_count, 1); 1977 __ jcc(Assembler::zero, L_copy_bytes); 1978 __ movl(rax, Address(from, dword_count, Address::times_4, -4)); 1979 __ movl(Address(to, dword_count, Address::times_4, -4), rax); 1980 __ jmp(L_copy_bytes); 1981 1982 // Copy trailing qwords 1983 __ BIND(L_copy_8_bytes); 1984 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 1985 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 1986 __ decrement(qword_count); 1987 __ jcc(Assembler::notZero, L_copy_8_bytes); 1988 1989 if (is_oop) { 1990 __ jmp(L_exit); 1991 } 1992 restore_arg_regs_using_thread(); 1993 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 1994 __ xorptr(rax, rax); // return 0 1995 __ vzeroupper(); 1996 __ leave(); // required for proper stackwalking of RuntimeStub frame 1997 __ ret(0); 1998 1999 // Copy in multi-bytes chunks 2000 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2001 2002 __ BIND(L_exit); 2003 bs->arraycopy_epilogue(_masm, decorators, type, from, to, dword_count); 2004 restore_arg_regs_using_thread(); 2005 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 2006 __ xorptr(rax, rax); // return 0 2007 __ vzeroupper(); 2008 __ leave(); // required for proper stackwalking of RuntimeStub frame 2009 __ ret(0); 2010 2011 return start; 2012 } 2013 2014 // Arguments: 2015 // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes 2016 // ignored 2017 // is_oop - true => oop array, so generate store check code 2018 // name - stub name string 2019 // 2020 // Inputs: 2021 // c_rarg0 - source array address 2022 // c_rarg1 - destination array address 2023 // c_rarg2 - element count, treated as ssize_t, can be zero 2024 // 2025 // Side Effects: 2026 // disjoint_oop_copy_entry or disjoint_long_copy_entry is set to the 2027 // no-overlap entry point used by generate_conjoint_long_oop_copy(). 2028 // 2029 address generate_disjoint_long_oop_copy(bool aligned, bool is_oop, address *entry, 2030 const char *name, bool dest_uninitialized = false) { 2031 __ align(CodeEntryAlignment); 2032 StubCodeMark mark(this, "StubRoutines", name); 2033 address start = __ pc(); 2034 2035 Label L_copy_bytes, L_copy_8_bytes, L_exit; 2036 const Register from = rdi; // source array address 2037 const Register to = rsi; // destination array address 2038 const Register qword_count = rdx; // elements count 2039 const Register end_from = from; // source array end address 2040 const Register end_to = rcx; // destination array end address 2041 const Register saved_count = r11; 2042 // End pointers are inclusive, and if count is not zero they point 2043 // to the last unit copied: end_to[0] := end_from[0] 2044 2045 __ enter(); // required for proper stackwalking of RuntimeStub frame 2046 // Save no-overlap entry point for generate_conjoint_long_oop_copy() 2047 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 2048 2049 if (entry != NULL) { 2050 *entry = __ pc(); 2051 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2052 BLOCK_COMMENT("Entry:"); 2053 } 2054 2055 setup_arg_regs_using_thread(); // from => rdi, to => rsi, count => rdx 2056 // r9 is used to save r15_thread 2057 // 'from', 'to' and 'qword_count' are now valid 2058 2059 DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_DISJOINT; 2060 if (dest_uninitialized) { 2061 decorators |= IS_DEST_UNINITIALIZED; 2062 } 2063 if (aligned) { 2064 decorators |= ARRAYCOPY_ALIGNED; 2065 } 2066 2067 BasicType type = is_oop ? T_OBJECT : T_LONG; 2068 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 2069 bs->arraycopy_prologue(_masm, decorators, type, from, to, qword_count); 2070 2071 // Copy from low to high addresses. Use 'to' as scratch. 2072 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 2073 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 2074 __ negptr(qword_count); 2075 __ jmp(L_copy_bytes); 2076 2077 // Copy trailing qwords 2078 __ BIND(L_copy_8_bytes); 2079 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 2080 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 2081 __ increment(qword_count); 2082 __ jcc(Assembler::notZero, L_copy_8_bytes); 2083 2084 if (is_oop) { 2085 __ jmp(L_exit); 2086 } else { 2087 restore_arg_regs_using_thread(); 2088 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2089 __ xorptr(rax, rax); // return 0 2090 __ vzeroupper(); 2091 __ leave(); // required for proper stackwalking of RuntimeStub frame 2092 __ ret(0); 2093 } 2094 2095 // Copy in multi-bytes chunks 2096 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2097 2098 __ BIND(L_exit); 2099 bs->arraycopy_epilogue(_masm, decorators, type, from, to, qword_count); 2100 restore_arg_regs_using_thread(); 2101 if (is_oop) { 2102 inc_counter_np(SharedRuntime::_oop_array_copy_ctr); // Update counter after rscratch1 is free 2103 } else { 2104 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2105 } 2106 __ vzeroupper(); 2107 __ xorptr(rax, rax); // return 0 2108 __ leave(); // required for proper stackwalking of RuntimeStub frame 2109 __ ret(0); 2110 2111 return start; 2112 } 2113 2114 // Arguments: 2115 // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes 2116 // ignored 2117 // is_oop - true => oop array, so generate store check code 2118 // name - stub name string 2119 // 2120 // Inputs: 2121 // c_rarg0 - source array address 2122 // c_rarg1 - destination array address 2123 // c_rarg2 - element count, treated as ssize_t, can be zero 2124 // 2125 address generate_conjoint_long_oop_copy(bool aligned, bool is_oop, 2126 address nooverlap_target, address *entry, 2127 const char *name, bool dest_uninitialized = false) { 2128 __ align(CodeEntryAlignment); 2129 StubCodeMark mark(this, "StubRoutines", name); 2130 address start = __ pc(); 2131 2132 Label L_copy_bytes, L_copy_8_bytes, L_exit; 2133 const Register from = rdi; // source array address 2134 const Register to = rsi; // destination array address 2135 const Register qword_count = rdx; // elements count 2136 const Register saved_count = rcx; 2137 2138 __ enter(); // required for proper stackwalking of RuntimeStub frame 2139 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 2140 2141 if (entry != NULL) { 2142 *entry = __ pc(); 2143 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2144 BLOCK_COMMENT("Entry:"); 2145 } 2146 2147 array_overlap_test(nooverlap_target, Address::times_8); 2148 setup_arg_regs_using_thread(); // from => rdi, to => rsi, count => rdx 2149 // r9 is used to save r15_thread 2150 // 'from', 'to' and 'qword_count' are now valid 2151 2152 DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_DISJOINT; 2153 if (dest_uninitialized) { 2154 decorators |= IS_DEST_UNINITIALIZED; 2155 } 2156 if (aligned) { 2157 decorators |= ARRAYCOPY_ALIGNED; 2158 } 2159 2160 BasicType type = is_oop ? T_OBJECT : T_LONG; 2161 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 2162 bs->arraycopy_prologue(_masm, decorators, type, from, to, qword_count); 2163 2164 __ jmp(L_copy_bytes); 2165 2166 // Copy trailing qwords 2167 __ BIND(L_copy_8_bytes); 2168 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 2169 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 2170 __ decrement(qword_count); 2171 __ jcc(Assembler::notZero, L_copy_8_bytes); 2172 2173 if (is_oop) { 2174 __ jmp(L_exit); 2175 } else { 2176 restore_arg_regs_using_thread(); 2177 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2178 __ xorptr(rax, rax); // return 0 2179 __ vzeroupper(); 2180 __ leave(); // required for proper stackwalking of RuntimeStub frame 2181 __ ret(0); 2182 } 2183 2184 // Copy in multi-bytes chunks 2185 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2186 2187 __ BIND(L_exit); 2188 bs->arraycopy_epilogue(_masm, decorators, type, from, to, qword_count); 2189 restore_arg_regs_using_thread(); 2190 if (is_oop) { 2191 inc_counter_np(SharedRuntime::_oop_array_copy_ctr); // Update counter after rscratch1 is free 2192 } else { 2193 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2194 } 2195 __ vzeroupper(); 2196 __ xorptr(rax, rax); // return 0 2197 __ leave(); // required for proper stackwalking of RuntimeStub frame 2198 __ ret(0); 2199 2200 return start; 2201 } 2202 2203 2204 // Helper for generating a dynamic type check. 2205 // Smashes no registers. 2206 void generate_type_check(Register sub_klass, 2207 Register super_check_offset, 2208 Register super_klass, 2209 Label& L_success) { 2210 assert_different_registers(sub_klass, super_check_offset, super_klass); 2211 2212 BLOCK_COMMENT("type_check:"); 2213 2214 Label L_miss; 2215 2216 __ check_klass_subtype_fast_path(sub_klass, super_klass, noreg, &L_success, &L_miss, NULL, 2217 super_check_offset); 2218 __ check_klass_subtype_slow_path(sub_klass, super_klass, noreg, noreg, &L_success, NULL); 2219 2220 // Fall through on failure! 2221 __ BIND(L_miss); 2222 } 2223 2224 // 2225 // Generate checkcasting array copy stub 2226 // 2227 // Input: 2228 // c_rarg0 - source array address 2229 // c_rarg1 - destination array address 2230 // c_rarg2 - element count, treated as ssize_t, can be zero 2231 // c_rarg3 - size_t ckoff (super_check_offset) 2232 // not Win64 2233 // c_rarg4 - oop ckval (super_klass) 2234 // Win64 2235 // rsp+40 - oop ckval (super_klass) 2236 // 2237 // Output: 2238 // rax == 0 - success 2239 // rax == -1^K - failure, where K is partial transfer count 2240 // 2241 address generate_checkcast_copy(const char *name, address *entry, 2242 bool dest_uninitialized = false) { 2243 2244 Label L_load_element, L_store_element, L_do_card_marks, L_done; 2245 2246 // Input registers (after setup_arg_regs) 2247 const Register from = rdi; // source array address 2248 const Register to = rsi; // destination array address 2249 const Register length = rdx; // elements count 2250 const Register ckoff = rcx; // super_check_offset 2251 const Register ckval = r8; // super_klass 2252 2253 // Registers used as temps (r13, r14 are save-on-entry) 2254 const Register end_from = from; // source array end address 2255 const Register end_to = r13; // destination array end address 2256 const Register count = rdx; // -(count_remaining) 2257 const Register r14_length = r14; // saved copy of length 2258 // End pointers are inclusive, and if length is not zero they point 2259 // to the last unit copied: end_to[0] := end_from[0] 2260 2261 const Register rax_oop = rax; // actual oop copied 2262 const Register r11_klass = r11; // oop._klass 2263 2264 //--------------------------------------------------------------- 2265 // Assembler stub will be used for this call to arraycopy 2266 // if the two arrays are subtypes of Object[] but the 2267 // destination array type is not equal to or a supertype 2268 // of the source type. Each element must be separately 2269 // checked. 2270 2271 __ align(CodeEntryAlignment); 2272 StubCodeMark mark(this, "StubRoutines", name); 2273 address start = __ pc(); 2274 2275 __ enter(); // required for proper stackwalking of RuntimeStub frame 2276 2277 #ifdef ASSERT 2278 // caller guarantees that the arrays really are different 2279 // otherwise, we would have to make conjoint checks 2280 { Label L; 2281 array_overlap_test(L, TIMES_OOP); 2282 __ stop("checkcast_copy within a single array"); 2283 __ bind(L); 2284 } 2285 #endif //ASSERT 2286 2287 setup_arg_regs(4); // from => rdi, to => rsi, length => rdx 2288 // ckoff => rcx, ckval => r8 2289 // r9 and r10 may be used to save non-volatile registers 2290 #ifdef _WIN64 2291 // last argument (#4) is on stack on Win64 2292 __ movptr(ckval, Address(rsp, 6 * wordSize)); 2293 #endif 2294 2295 // Caller of this entry point must set up the argument registers. 2296 if (entry != NULL) { 2297 *entry = __ pc(); 2298 BLOCK_COMMENT("Entry:"); 2299 } 2300 2301 // allocate spill slots for r13, r14 2302 enum { 2303 saved_r13_offset, 2304 saved_r14_offset, 2305 saved_r10_offset, 2306 saved_rbp_offset 2307 }; 2308 __ subptr(rsp, saved_rbp_offset * wordSize); 2309 __ movptr(Address(rsp, saved_r13_offset * wordSize), r13); 2310 __ movptr(Address(rsp, saved_r14_offset * wordSize), r14); 2311 __ movptr(Address(rsp, saved_r10_offset * wordSize), r10); 2312 2313 #ifdef ASSERT 2314 Label L2; 2315 __ get_thread(r14); 2316 __ cmpptr(r15_thread, r14); 2317 __ jcc(Assembler::equal, L2); 2318 __ stop("StubRoutines::call_stub: r15_thread is modified by call"); 2319 __ bind(L2); 2320 #endif // ASSERT 2321 2322 // check that int operands are properly extended to size_t 2323 assert_clean_int(length, rax); 2324 assert_clean_int(ckoff, rax); 2325 2326 #ifdef ASSERT 2327 BLOCK_COMMENT("assert consistent ckoff/ckval"); 2328 // The ckoff and ckval must be mutually consistent, 2329 // even though caller generates both. 2330 { Label L; 2331 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2332 __ cmpl(ckoff, Address(ckval, sco_offset)); 2333 __ jcc(Assembler::equal, L); 2334 __ stop("super_check_offset inconsistent"); 2335 __ bind(L); 2336 } 2337 #endif //ASSERT 2338 2339 // Loop-invariant addresses. They are exclusive end pointers. 2340 Address end_from_addr(from, length, TIMES_OOP, 0); 2341 Address end_to_addr(to, length, TIMES_OOP, 0); 2342 // Loop-variant addresses. They assume post-incremented count < 0. 2343 Address from_element_addr(end_from, count, TIMES_OOP, 0); 2344 Address to_element_addr(end_to, count, TIMES_OOP, 0); 2345 2346 DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_CHECKCAST; 2347 if (dest_uninitialized) { 2348 decorators |= IS_DEST_UNINITIALIZED; 2349 } 2350 2351 BasicType type = T_OBJECT; 2352 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 2353 bs->arraycopy_prologue(_masm, decorators, type, from, to, count); 2354 2355 // Copy from low to high addresses, indexed from the end of each array. 2356 __ lea(end_from, end_from_addr); 2357 __ lea(end_to, end_to_addr); 2358 __ movptr(r14_length, length); // save a copy of the length 2359 assert(length == count, ""); // else fix next line: 2360 __ negptr(count); // negate and test the length 2361 __ jcc(Assembler::notZero, L_load_element); 2362 2363 // Empty array: Nothing to do. 2364 __ xorptr(rax, rax); // return 0 on (trivial) success 2365 __ jmp(L_done); 2366 2367 // ======== begin loop ======== 2368 // (Loop is rotated; its entry is L_load_element.) 2369 // Loop control: 2370 // for (count = -count; count != 0; count++) 2371 // Base pointers src, dst are biased by 8*(count-1),to last element. 2372 __ align(OptoLoopAlignment); 2373 2374 __ BIND(L_store_element); 2375 __ store_heap_oop(to_element_addr, rax_oop, noreg, noreg, AS_RAW); // store the oop 2376 __ increment(count); // increment the count toward zero 2377 __ jcc(Assembler::zero, L_do_card_marks); 2378 2379 // ======== loop entry is here ======== 2380 __ BIND(L_load_element); 2381 __ load_heap_oop(rax_oop, from_element_addr, noreg, noreg, AS_RAW); // load the oop 2382 __ testptr(rax_oop, rax_oop); 2383 __ jcc(Assembler::zero, L_store_element); 2384 2385 __ load_klass(r11_klass, rax_oop);// query the object klass 2386 generate_type_check(r11_klass, ckoff, ckval, L_store_element); 2387 // ======== end loop ======== 2388 2389 // It was a real error; we must depend on the caller to finish the job. 2390 // Register rdx = -1 * number of *remaining* oops, r14 = *total* oops. 2391 // Emit GC store barriers for the oops we have copied (r14 + rdx), 2392 // and report their number to the caller. 2393 assert_different_registers(rax, r14_length, count, to, end_to, rcx, rscratch1); 2394 Label L_post_barrier; 2395 __ addptr(r14_length, count); // K = (original - remaining) oops 2396 __ movptr(rax, r14_length); // save the value 2397 __ notptr(rax); // report (-1^K) to caller (does not affect flags) 2398 __ jccb(Assembler::notZero, L_post_barrier); 2399 __ jmp(L_done); // K == 0, nothing was copied, skip post barrier 2400 2401 // Come here on success only. 2402 __ BIND(L_do_card_marks); 2403 __ xorptr(rax, rax); // return 0 on success 2404 2405 __ BIND(L_post_barrier); 2406 bs->arraycopy_epilogue(_masm, decorators, type, from, to, r14_length); 2407 2408 // Common exit point (success or failure). 2409 __ BIND(L_done); 2410 __ movptr(r13, Address(rsp, saved_r13_offset * wordSize)); 2411 __ movptr(r14, Address(rsp, saved_r14_offset * wordSize)); 2412 __ movptr(r10, Address(rsp, saved_r10_offset * wordSize)); 2413 restore_arg_regs(); 2414 inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr); // Update counter after rscratch1 is free 2415 __ leave(); // required for proper stackwalking of RuntimeStub frame 2416 __ ret(0); 2417 2418 return start; 2419 } 2420 2421 // 2422 // Generate 'unsafe' array copy stub 2423 // Though just as safe as the other stubs, it takes an unscaled 2424 // size_t argument instead of an element count. 2425 // 2426 // Input: 2427 // c_rarg0 - source array address 2428 // c_rarg1 - destination array address 2429 // c_rarg2 - byte count, treated as ssize_t, can be zero 2430 // 2431 // Examines the alignment of the operands and dispatches 2432 // to a long, int, short, or byte copy loop. 2433 // 2434 address generate_unsafe_copy(const char *name, 2435 address byte_copy_entry, address short_copy_entry, 2436 address int_copy_entry, address long_copy_entry) { 2437 2438 Label L_long_aligned, L_int_aligned, L_short_aligned; 2439 2440 // Input registers (before setup_arg_regs) 2441 const Register from = c_rarg0; // source array address 2442 const Register to = c_rarg1; // destination array address 2443 const Register size = c_rarg2; // byte count (size_t) 2444 2445 // Register used as a temp 2446 const Register bits = rax; // test copy of low bits 2447 2448 __ align(CodeEntryAlignment); 2449 StubCodeMark mark(this, "StubRoutines", name); 2450 address start = __ pc(); 2451 2452 __ enter(); // required for proper stackwalking of RuntimeStub frame 2453 2454 // bump this on entry, not on exit: 2455 inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr); 2456 2457 __ mov(bits, from); 2458 __ orptr(bits, to); 2459 __ orptr(bits, size); 2460 2461 __ testb(bits, BytesPerLong-1); 2462 __ jccb(Assembler::zero, L_long_aligned); 2463 2464 __ testb(bits, BytesPerInt-1); 2465 __ jccb(Assembler::zero, L_int_aligned); 2466 2467 __ testb(bits, BytesPerShort-1); 2468 __ jump_cc(Assembler::notZero, RuntimeAddress(byte_copy_entry)); 2469 2470 __ BIND(L_short_aligned); 2471 __ shrptr(size, LogBytesPerShort); // size => short_count 2472 __ jump(RuntimeAddress(short_copy_entry)); 2473 2474 __ BIND(L_int_aligned); 2475 __ shrptr(size, LogBytesPerInt); // size => int_count 2476 __ jump(RuntimeAddress(int_copy_entry)); 2477 2478 __ BIND(L_long_aligned); 2479 __ shrptr(size, LogBytesPerLong); // size => qword_count 2480 __ jump(RuntimeAddress(long_copy_entry)); 2481 2482 return start; 2483 } 2484 2485 // Perform range checks on the proposed arraycopy. 2486 // Kills temp, but nothing else. 2487 // Also, clean the sign bits of src_pos and dst_pos. 2488 void arraycopy_range_checks(Register src, // source array oop (c_rarg0) 2489 Register src_pos, // source position (c_rarg1) 2490 Register dst, // destination array oo (c_rarg2) 2491 Register dst_pos, // destination position (c_rarg3) 2492 Register length, 2493 Register temp, 2494 Label& L_failed) { 2495 BLOCK_COMMENT("arraycopy_range_checks:"); 2496 2497 // if (src_pos + length > arrayOop(src)->length()) FAIL; 2498 __ movl(temp, length); 2499 __ addl(temp, src_pos); // src_pos + length 2500 __ cmpl(temp, Address(src, arrayOopDesc::length_offset_in_bytes())); 2501 __ jcc(Assembler::above, L_failed); 2502 2503 // if (dst_pos + length > arrayOop(dst)->length()) FAIL; 2504 __ movl(temp, length); 2505 __ addl(temp, dst_pos); // dst_pos + length 2506 __ cmpl(temp, Address(dst, arrayOopDesc::length_offset_in_bytes())); 2507 __ jcc(Assembler::above, L_failed); 2508 2509 // Have to clean up high 32-bits of 'src_pos' and 'dst_pos'. 2510 // Move with sign extension can be used since they are positive. 2511 __ movslq(src_pos, src_pos); 2512 __ movslq(dst_pos, dst_pos); 2513 2514 BLOCK_COMMENT("arraycopy_range_checks done"); 2515 } 2516 2517 // 2518 // Generate generic array copy stubs 2519 // 2520 // Input: 2521 // c_rarg0 - src oop 2522 // c_rarg1 - src_pos (32-bits) 2523 // c_rarg2 - dst oop 2524 // c_rarg3 - dst_pos (32-bits) 2525 // not Win64 2526 // c_rarg4 - element count (32-bits) 2527 // Win64 2528 // rsp+40 - element count (32-bits) 2529 // 2530 // Output: 2531 // rax == 0 - success 2532 // rax == -1^K - failure, where K is partial transfer count 2533 // 2534 address generate_generic_copy(const char *name, 2535 address byte_copy_entry, address short_copy_entry, 2536 address int_copy_entry, address oop_copy_entry, 2537 address long_copy_entry, address checkcast_copy_entry) { 2538 2539 Label L_failed, L_failed_0, L_objArray; 2540 Label L_copy_bytes, L_copy_shorts, L_copy_ints, L_copy_longs; 2541 2542 // Input registers 2543 const Register src = c_rarg0; // source array oop 2544 const Register src_pos = c_rarg1; // source position 2545 const Register dst = c_rarg2; // destination array oop 2546 const Register dst_pos = c_rarg3; // destination position 2547 #ifndef _WIN64 2548 const Register length = c_rarg4; 2549 #else 2550 const Address length(rsp, 6 * wordSize); // elements count is on stack on Win64 2551 #endif 2552 2553 { int modulus = CodeEntryAlignment; 2554 int target = modulus - 5; // 5 = sizeof jmp(L_failed) 2555 int advance = target - (__ offset() % modulus); 2556 if (advance < 0) advance += modulus; 2557 if (advance > 0) __ nop(advance); 2558 } 2559 StubCodeMark mark(this, "StubRoutines", name); 2560 2561 // Short-hop target to L_failed. Makes for denser prologue code. 2562 __ BIND(L_failed_0); 2563 __ jmp(L_failed); 2564 assert(__ offset() % CodeEntryAlignment == 0, "no further alignment needed"); 2565 2566 __ align(CodeEntryAlignment); 2567 address start = __ pc(); 2568 2569 __ enter(); // required for proper stackwalking of RuntimeStub frame 2570 2571 // bump this on entry, not on exit: 2572 inc_counter_np(SharedRuntime::_generic_array_copy_ctr); 2573 2574 //----------------------------------------------------------------------- 2575 // Assembler stub will be used for this call to arraycopy 2576 // if the following conditions are met: 2577 // 2578 // (1) src and dst must not be null. 2579 // (2) src_pos must not be negative. 2580 // (3) dst_pos must not be negative. 2581 // (4) length must not be negative. 2582 // (5) src klass and dst klass should be the same and not NULL. 2583 // (6) src and dst should be arrays. 2584 // (7) src_pos + length must not exceed length of src. 2585 // (8) dst_pos + length must not exceed length of dst. 2586 // 2587 2588 // if (src == NULL) return -1; 2589 __ testptr(src, src); // src oop 2590 size_t j1off = __ offset(); 2591 __ jccb(Assembler::zero, L_failed_0); 2592 2593 // if (src_pos < 0) return -1; 2594 __ testl(src_pos, src_pos); // src_pos (32-bits) 2595 __ jccb(Assembler::negative, L_failed_0); 2596 2597 // if (dst == NULL) return -1; 2598 __ testptr(dst, dst); // dst oop 2599 __ jccb(Assembler::zero, L_failed_0); 2600 2601 // if (dst_pos < 0) return -1; 2602 __ testl(dst_pos, dst_pos); // dst_pos (32-bits) 2603 size_t j4off = __ offset(); 2604 __ jccb(Assembler::negative, L_failed_0); 2605 2606 // The first four tests are very dense code, 2607 // but not quite dense enough to put four 2608 // jumps in a 16-byte instruction fetch buffer. 2609 // That's good, because some branch predicters 2610 // do not like jumps so close together. 2611 // Make sure of this. 2612 guarantee(((j1off ^ j4off) & ~15) != 0, "I$ line of 1st & 4th jumps"); 2613 2614 // registers used as temp 2615 const Register r11_length = r11; // elements count to copy 2616 const Register r10_src_klass = r10; // array klass 2617 2618 // if (length < 0) return -1; 2619 __ movl(r11_length, length); // length (elements count, 32-bits value) 2620 __ testl(r11_length, r11_length); 2621 __ jccb(Assembler::negative, L_failed_0); 2622 2623 __ load_klass(r10_src_klass, src); 2624 #ifdef ASSERT 2625 // assert(src->klass() != NULL); 2626 { 2627 BLOCK_COMMENT("assert klasses not null {"); 2628 Label L1, L2; 2629 __ testptr(r10_src_klass, r10_src_klass); 2630 __ jcc(Assembler::notZero, L2); // it is broken if klass is NULL 2631 __ bind(L1); 2632 __ stop("broken null klass"); 2633 __ bind(L2); 2634 __ load_klass(rax, dst); 2635 __ cmpq(rax, 0); 2636 __ jcc(Assembler::equal, L1); // this would be broken also 2637 BLOCK_COMMENT("} assert klasses not null done"); 2638 } 2639 #endif 2640 2641 // Load layout helper (32-bits) 2642 // 2643 // |array_tag| | header_size | element_type | |log2_element_size| 2644 // 32 30 24 16 8 2 0 2645 // 2646 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0 2647 // 2648 2649 const int lh_offset = in_bytes(Klass::layout_helper_offset()); 2650 2651 // Handle objArrays completely differently... 2652 const jint objArray_lh = Klass::array_layout_helper(T_OBJECT); 2653 __ cmpl(Address(r10_src_klass, lh_offset), objArray_lh); 2654 __ jcc(Assembler::equal, L_objArray); 2655 2656 // if (src->klass() != dst->klass()) return -1; 2657 __ load_klass(rax, dst); 2658 __ cmpq(r10_src_klass, rax); 2659 __ jcc(Assembler::notEqual, L_failed); 2660 2661 const Register rax_lh = rax; // layout helper 2662 __ movl(rax_lh, Address(r10_src_klass, lh_offset)); 2663 2664 // if (!src->is_Array()) return -1; 2665 __ cmpl(rax_lh, Klass::_lh_neutral_value); 2666 __ jcc(Assembler::greaterEqual, L_failed); 2667 2668 // At this point, it is known to be a typeArray (array_tag 0x3). 2669 #ifdef ASSERT 2670 { 2671 BLOCK_COMMENT("assert primitive array {"); 2672 Label L; 2673 __ cmpl(rax_lh, (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift)); 2674 __ jcc(Assembler::greaterEqual, L); 2675 __ stop("must be a primitive array"); 2676 __ bind(L); 2677 BLOCK_COMMENT("} assert primitive array done"); 2678 } 2679 #endif 2680 2681 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2682 r10, L_failed); 2683 2684 // TypeArrayKlass 2685 // 2686 // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize); 2687 // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize); 2688 // 2689 2690 const Register r10_offset = r10; // array offset 2691 const Register rax_elsize = rax_lh; // element size 2692 2693 __ movl(r10_offset, rax_lh); 2694 __ shrl(r10_offset, Klass::_lh_header_size_shift); 2695 __ andptr(r10_offset, Klass::_lh_header_size_mask); // array_offset 2696 __ addptr(src, r10_offset); // src array offset 2697 __ addptr(dst, r10_offset); // dst array offset 2698 BLOCK_COMMENT("choose copy loop based on element size"); 2699 __ andl(rax_lh, Klass::_lh_log2_element_size_mask); // rax_lh -> rax_elsize 2700 2701 // next registers should be set before the jump to corresponding stub 2702 const Register from = c_rarg0; // source array address 2703 const Register to = c_rarg1; // destination array address 2704 const Register count = c_rarg2; // elements count 2705 2706 // 'from', 'to', 'count' registers should be set in such order 2707 // since they are the same as 'src', 'src_pos', 'dst'. 2708 2709 __ BIND(L_copy_bytes); 2710 __ cmpl(rax_elsize, 0); 2711 __ jccb(Assembler::notEqual, L_copy_shorts); 2712 __ lea(from, Address(src, src_pos, Address::times_1, 0));// src_addr 2713 __ lea(to, Address(dst, dst_pos, Address::times_1, 0));// dst_addr 2714 __ movl2ptr(count, r11_length); // length 2715 __ jump(RuntimeAddress(byte_copy_entry)); 2716 2717 __ BIND(L_copy_shorts); 2718 __ cmpl(rax_elsize, LogBytesPerShort); 2719 __ jccb(Assembler::notEqual, L_copy_ints); 2720 __ lea(from, Address(src, src_pos, Address::times_2, 0));// src_addr 2721 __ lea(to, Address(dst, dst_pos, Address::times_2, 0));// dst_addr 2722 __ movl2ptr(count, r11_length); // length 2723 __ jump(RuntimeAddress(short_copy_entry)); 2724 2725 __ BIND(L_copy_ints); 2726 __ cmpl(rax_elsize, LogBytesPerInt); 2727 __ jccb(Assembler::notEqual, L_copy_longs); 2728 __ lea(from, Address(src, src_pos, Address::times_4, 0));// src_addr 2729 __ lea(to, Address(dst, dst_pos, Address::times_4, 0));// dst_addr 2730 __ movl2ptr(count, r11_length); // length 2731 __ jump(RuntimeAddress(int_copy_entry)); 2732 2733 __ BIND(L_copy_longs); 2734 #ifdef ASSERT 2735 { 2736 BLOCK_COMMENT("assert long copy {"); 2737 Label L; 2738 __ cmpl(rax_elsize, LogBytesPerLong); 2739 __ jcc(Assembler::equal, L); 2740 __ stop("must be long copy, but elsize is wrong"); 2741 __ bind(L); 2742 BLOCK_COMMENT("} assert long copy done"); 2743 } 2744 #endif 2745 __ lea(from, Address(src, src_pos, Address::times_8, 0));// src_addr 2746 __ lea(to, Address(dst, dst_pos, Address::times_8, 0));// dst_addr 2747 __ movl2ptr(count, r11_length); // length 2748 __ jump(RuntimeAddress(long_copy_entry)); 2749 2750 // ObjArrayKlass 2751 __ BIND(L_objArray); 2752 // live at this point: r10_src_klass, r11_length, src[_pos], dst[_pos] 2753 2754 Label L_plain_copy, L_checkcast_copy; 2755 // test array classes for subtyping 2756 __ load_klass(rax, dst); 2757 __ cmpq(r10_src_klass, rax); // usual case is exact equality 2758 __ jcc(Assembler::notEqual, L_checkcast_copy); 2759 2760 // Identically typed arrays can be copied without element-wise checks. 2761 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2762 r10, L_failed); 2763 2764 __ lea(from, Address(src, src_pos, TIMES_OOP, 2765 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // src_addr 2766 __ lea(to, Address(dst, dst_pos, TIMES_OOP, 2767 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // dst_addr 2768 __ movl2ptr(count, r11_length); // length 2769 __ BIND(L_plain_copy); 2770 __ jump(RuntimeAddress(oop_copy_entry)); 2771 2772 __ BIND(L_checkcast_copy); 2773 // live at this point: r10_src_klass, r11_length, rax (dst_klass) 2774 { 2775 // Before looking at dst.length, make sure dst is also an objArray. 2776 __ cmpl(Address(rax, lh_offset), objArray_lh); 2777 __ jcc(Assembler::notEqual, L_failed); 2778 2779 // It is safe to examine both src.length and dst.length. 2780 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2781 rax, L_failed); 2782 2783 const Register r11_dst_klass = r11; 2784 __ load_klass(r11_dst_klass, dst); // reload 2785 2786 // Marshal the base address arguments now, freeing registers. 2787 __ lea(from, Address(src, src_pos, TIMES_OOP, 2788 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 2789 __ lea(to, Address(dst, dst_pos, TIMES_OOP, 2790 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 2791 __ movl(count, length); // length (reloaded) 2792 Register sco_temp = c_rarg3; // this register is free now 2793 assert_different_registers(from, to, count, sco_temp, 2794 r11_dst_klass, r10_src_klass); 2795 assert_clean_int(count, sco_temp); 2796 2797 // Generate the type check. 2798 const int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2799 __ movl(sco_temp, Address(r11_dst_klass, sco_offset)); 2800 assert_clean_int(sco_temp, rax); 2801 generate_type_check(r10_src_klass, sco_temp, r11_dst_klass, L_plain_copy); 2802 2803 // Fetch destination element klass from the ObjArrayKlass header. 2804 int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset()); 2805 __ movptr(r11_dst_klass, Address(r11_dst_klass, ek_offset)); 2806 __ movl( sco_temp, Address(r11_dst_klass, sco_offset)); 2807 assert_clean_int(sco_temp, rax); 2808 2809 // the checkcast_copy loop needs two extra arguments: 2810 assert(c_rarg3 == sco_temp, "#3 already in place"); 2811 // Set up arguments for checkcast_copy_entry. 2812 setup_arg_regs(4); 2813 __ movptr(r8, r11_dst_klass); // dst.klass.element_klass, r8 is c_rarg4 on Linux/Solaris 2814 __ jump(RuntimeAddress(checkcast_copy_entry)); 2815 } 2816 2817 __ BIND(L_failed); 2818 __ xorptr(rax, rax); 2819 __ notptr(rax); // return -1 2820 __ leave(); // required for proper stackwalking of RuntimeStub frame 2821 __ ret(0); 2822 2823 return start; 2824 } 2825 2826 void generate_arraycopy_stubs() { 2827 address entry; 2828 address entry_jbyte_arraycopy; 2829 address entry_jshort_arraycopy; 2830 address entry_jint_arraycopy; 2831 address entry_oop_arraycopy; 2832 address entry_jlong_arraycopy; 2833 address entry_checkcast_arraycopy; 2834 2835 StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, &entry, 2836 "jbyte_disjoint_arraycopy"); 2837 StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, entry, &entry_jbyte_arraycopy, 2838 "jbyte_arraycopy"); 2839 2840 StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, &entry, 2841 "jshort_disjoint_arraycopy"); 2842 StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, entry, &entry_jshort_arraycopy, 2843 "jshort_arraycopy"); 2844 2845 StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, false, &entry, 2846 "jint_disjoint_arraycopy"); 2847 StubRoutines::_jint_arraycopy = generate_conjoint_int_oop_copy(false, false, entry, 2848 &entry_jint_arraycopy, "jint_arraycopy"); 2849 2850 StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, false, &entry, 2851 "jlong_disjoint_arraycopy"); 2852 StubRoutines::_jlong_arraycopy = generate_conjoint_long_oop_copy(false, false, entry, 2853 &entry_jlong_arraycopy, "jlong_arraycopy"); 2854 2855 2856 if (UseCompressedOops) { 2857 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, true, &entry, 2858 "oop_disjoint_arraycopy"); 2859 StubRoutines::_oop_arraycopy = generate_conjoint_int_oop_copy(false, true, entry, 2860 &entry_oop_arraycopy, "oop_arraycopy"); 2861 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_int_oop_copy(false, true, &entry, 2862 "oop_disjoint_arraycopy_uninit", 2863 /*dest_uninitialized*/true); 2864 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_int_oop_copy(false, true, entry, 2865 NULL, "oop_arraycopy_uninit", 2866 /*dest_uninitialized*/true); 2867 } else { 2868 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, true, &entry, 2869 "oop_disjoint_arraycopy"); 2870 StubRoutines::_oop_arraycopy = generate_conjoint_long_oop_copy(false, true, entry, 2871 &entry_oop_arraycopy, "oop_arraycopy"); 2872 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_long_oop_copy(false, true, &entry, 2873 "oop_disjoint_arraycopy_uninit", 2874 /*dest_uninitialized*/true); 2875 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_long_oop_copy(false, true, entry, 2876 NULL, "oop_arraycopy_uninit", 2877 /*dest_uninitialized*/true); 2878 } 2879 2880 StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy); 2881 StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", NULL, 2882 /*dest_uninitialized*/true); 2883 2884 StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy", 2885 entry_jbyte_arraycopy, 2886 entry_jshort_arraycopy, 2887 entry_jint_arraycopy, 2888 entry_jlong_arraycopy); 2889 StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy", 2890 entry_jbyte_arraycopy, 2891 entry_jshort_arraycopy, 2892 entry_jint_arraycopy, 2893 entry_oop_arraycopy, 2894 entry_jlong_arraycopy, 2895 entry_checkcast_arraycopy); 2896 2897 StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill"); 2898 StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill"); 2899 StubRoutines::_jint_fill = generate_fill(T_INT, false, "jint_fill"); 2900 StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill"); 2901 StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill"); 2902 StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill"); 2903 2904 // We don't generate specialized code for HeapWord-aligned source 2905 // arrays, so just use the code we've already generated 2906 StubRoutines::_arrayof_jbyte_disjoint_arraycopy = StubRoutines::_jbyte_disjoint_arraycopy; 2907 StubRoutines::_arrayof_jbyte_arraycopy = StubRoutines::_jbyte_arraycopy; 2908 2909 StubRoutines::_arrayof_jshort_disjoint_arraycopy = StubRoutines::_jshort_disjoint_arraycopy; 2910 StubRoutines::_arrayof_jshort_arraycopy = StubRoutines::_jshort_arraycopy; 2911 2912 StubRoutines::_arrayof_jint_disjoint_arraycopy = StubRoutines::_jint_disjoint_arraycopy; 2913 StubRoutines::_arrayof_jint_arraycopy = StubRoutines::_jint_arraycopy; 2914 2915 StubRoutines::_arrayof_jlong_disjoint_arraycopy = StubRoutines::_jlong_disjoint_arraycopy; 2916 StubRoutines::_arrayof_jlong_arraycopy = StubRoutines::_jlong_arraycopy; 2917 2918 StubRoutines::_arrayof_oop_disjoint_arraycopy = StubRoutines::_oop_disjoint_arraycopy; 2919 StubRoutines::_arrayof_oop_arraycopy = StubRoutines::_oop_arraycopy; 2920 2921 StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = StubRoutines::_oop_disjoint_arraycopy_uninit; 2922 StubRoutines::_arrayof_oop_arraycopy_uninit = StubRoutines::_oop_arraycopy_uninit; 2923 } 2924 2925 // AES intrinsic stubs 2926 enum {AESBlockSize = 16}; 2927 2928 address generate_key_shuffle_mask() { 2929 __ align(16); 2930 StubCodeMark mark(this, "StubRoutines", "key_shuffle_mask"); 2931 address start = __ pc(); 2932 __ emit_data64( 0x0405060700010203, relocInfo::none ); 2933 __ emit_data64( 0x0c0d0e0f08090a0b, relocInfo::none ); 2934 return start; 2935 } 2936 2937 address generate_counter_shuffle_mask() { 2938 __ align(16); 2939 StubCodeMark mark(this, "StubRoutines", "counter_shuffle_mask"); 2940 address start = __ pc(); 2941 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none); 2942 __ emit_data64(0x0001020304050607, relocInfo::none); 2943 return start; 2944 } 2945 2946 // Utility routine for loading a 128-bit key word in little endian format 2947 // can optionally specify that the shuffle mask is already in an xmmregister 2948 void load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask=NULL) { 2949 __ movdqu(xmmdst, Address(key, offset)); 2950 if (xmm_shuf_mask != NULL) { 2951 __ pshufb(xmmdst, xmm_shuf_mask); 2952 } else { 2953 __ pshufb(xmmdst, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 2954 } 2955 } 2956 2957 // Utility routine for increase 128bit counter (iv in CTR mode) 2958 void inc_counter(Register reg, XMMRegister xmmdst, int inc_delta, Label& next_block) { 2959 __ pextrq(reg, xmmdst, 0x0); 2960 __ addq(reg, inc_delta); 2961 __ pinsrq(xmmdst, reg, 0x0); 2962 __ jcc(Assembler::carryClear, next_block); // jump if no carry 2963 __ pextrq(reg, xmmdst, 0x01); // Carry 2964 __ addq(reg, 0x01); 2965 __ pinsrq(xmmdst, reg, 0x01); //Carry end 2966 __ BIND(next_block); // next instruction 2967 } 2968 2969 // Arguments: 2970 // 2971 // Inputs: 2972 // c_rarg0 - source byte array address 2973 // c_rarg1 - destination byte array address 2974 // c_rarg2 - K (key) in little endian int array 2975 // 2976 address generate_aescrypt_encryptBlock() { 2977 assert(UseAES, "need AES instructions and misaligned SSE support"); 2978 __ align(CodeEntryAlignment); 2979 StubCodeMark mark(this, "StubRoutines", "aescrypt_encryptBlock"); 2980 Label L_doLast; 2981 address start = __ pc(); 2982 2983 const Register from = c_rarg0; // source array address 2984 const Register to = c_rarg1; // destination array address 2985 const Register key = c_rarg2; // key array address 2986 const Register keylen = rax; 2987 2988 const XMMRegister xmm_result = xmm0; 2989 const XMMRegister xmm_key_shuf_mask = xmm1; 2990 // On win64 xmm6-xmm15 must be preserved so don't use them. 2991 const XMMRegister xmm_temp1 = xmm2; 2992 const XMMRegister xmm_temp2 = xmm3; 2993 const XMMRegister xmm_temp3 = xmm4; 2994 const XMMRegister xmm_temp4 = xmm5; 2995 2996 __ enter(); // required for proper stackwalking of RuntimeStub frame 2997 2998 // keylen could be only {11, 13, 15} * 4 = {44, 52, 60} 2999 __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3000 3001 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3002 __ movdqu(xmm_result, Address(from, 0)); // get 16 bytes of input 3003 3004 // For encryption, the java expanded key ordering is just what we need 3005 // we don't know if the key is aligned, hence not using load-execute form 3006 3007 load_key(xmm_temp1, key, 0x00, xmm_key_shuf_mask); 3008 __ pxor(xmm_result, xmm_temp1); 3009 3010 load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask); 3011 load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask); 3012 load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask); 3013 load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask); 3014 3015 __ aesenc(xmm_result, xmm_temp1); 3016 __ aesenc(xmm_result, xmm_temp2); 3017 __ aesenc(xmm_result, xmm_temp3); 3018 __ aesenc(xmm_result, xmm_temp4); 3019 3020 load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask); 3021 load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask); 3022 load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask); 3023 load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask); 3024 3025 __ aesenc(xmm_result, xmm_temp1); 3026 __ aesenc(xmm_result, xmm_temp2); 3027 __ aesenc(xmm_result, xmm_temp3); 3028 __ aesenc(xmm_result, xmm_temp4); 3029 3030 load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask); 3031 load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask); 3032 3033 __ cmpl(keylen, 44); 3034 __ jccb(Assembler::equal, L_doLast); 3035 3036 __ aesenc(xmm_result, xmm_temp1); 3037 __ aesenc(xmm_result, xmm_temp2); 3038 3039 load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask); 3040 load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask); 3041 3042 __ cmpl(keylen, 52); 3043 __ jccb(Assembler::equal, L_doLast); 3044 3045 __ aesenc(xmm_result, xmm_temp1); 3046 __ aesenc(xmm_result, xmm_temp2); 3047 3048 load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask); 3049 load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask); 3050 3051 __ BIND(L_doLast); 3052 __ aesenc(xmm_result, xmm_temp1); 3053 __ aesenclast(xmm_result, xmm_temp2); 3054 __ movdqu(Address(to, 0), xmm_result); // store the result 3055 __ xorptr(rax, rax); // return 0 3056 __ leave(); // required for proper stackwalking of RuntimeStub frame 3057 __ ret(0); 3058 3059 return start; 3060 } 3061 3062 3063 // Arguments: 3064 // 3065 // Inputs: 3066 // c_rarg0 - source byte array address 3067 // c_rarg1 - destination byte array address 3068 // c_rarg2 - K (key) in little endian int array 3069 // 3070 address generate_aescrypt_decryptBlock() { 3071 assert(UseAES, "need AES instructions and misaligned SSE support"); 3072 __ align(CodeEntryAlignment); 3073 StubCodeMark mark(this, "StubRoutines", "aescrypt_decryptBlock"); 3074 Label L_doLast; 3075 address start = __ pc(); 3076 3077 const Register from = c_rarg0; // source array address 3078 const Register to = c_rarg1; // destination array address 3079 const Register key = c_rarg2; // key array address 3080 const Register keylen = rax; 3081 3082 const XMMRegister xmm_result = xmm0; 3083 const XMMRegister xmm_key_shuf_mask = xmm1; 3084 // On win64 xmm6-xmm15 must be preserved so don't use them. 3085 const XMMRegister xmm_temp1 = xmm2; 3086 const XMMRegister xmm_temp2 = xmm3; 3087 const XMMRegister xmm_temp3 = xmm4; 3088 const XMMRegister xmm_temp4 = xmm5; 3089 3090 __ enter(); // required for proper stackwalking of RuntimeStub frame 3091 3092 // keylen could be only {11, 13, 15} * 4 = {44, 52, 60} 3093 __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3094 3095 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3096 __ movdqu(xmm_result, Address(from, 0)); 3097 3098 // for decryption java expanded key ordering is rotated one position from what we want 3099 // so we start from 0x10 here and hit 0x00 last 3100 // we don't know if the key is aligned, hence not using load-execute form 3101 load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask); 3102 load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask); 3103 load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask); 3104 load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask); 3105 3106 __ pxor (xmm_result, xmm_temp1); 3107 __ aesdec(xmm_result, xmm_temp2); 3108 __ aesdec(xmm_result, xmm_temp3); 3109 __ aesdec(xmm_result, xmm_temp4); 3110 3111 load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask); 3112 load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask); 3113 load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask); 3114 load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask); 3115 3116 __ aesdec(xmm_result, xmm_temp1); 3117 __ aesdec(xmm_result, xmm_temp2); 3118 __ aesdec(xmm_result, xmm_temp3); 3119 __ aesdec(xmm_result, xmm_temp4); 3120 3121 load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask); 3122 load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask); 3123 load_key(xmm_temp3, key, 0x00, xmm_key_shuf_mask); 3124 3125 __ cmpl(keylen, 44); 3126 __ jccb(Assembler::equal, L_doLast); 3127 3128 __ aesdec(xmm_result, xmm_temp1); 3129 __ aesdec(xmm_result, xmm_temp2); 3130 3131 load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask); 3132 load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask); 3133 3134 __ cmpl(keylen, 52); 3135 __ jccb(Assembler::equal, L_doLast); 3136 3137 __ aesdec(xmm_result, xmm_temp1); 3138 __ aesdec(xmm_result, xmm_temp2); 3139 3140 load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask); 3141 load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask); 3142 3143 __ BIND(L_doLast); 3144 __ aesdec(xmm_result, xmm_temp1); 3145 __ aesdec(xmm_result, xmm_temp2); 3146 3147 // for decryption the aesdeclast operation is always on key+0x00 3148 __ aesdeclast(xmm_result, xmm_temp3); 3149 __ movdqu(Address(to, 0), xmm_result); // store the result 3150 __ xorptr(rax, rax); // return 0 3151 __ leave(); // required for proper stackwalking of RuntimeStub frame 3152 __ ret(0); 3153 3154 return start; 3155 } 3156 3157 3158 // Arguments: 3159 // 3160 // Inputs: 3161 // c_rarg0 - source byte array address 3162 // c_rarg1 - destination byte array address 3163 // c_rarg2 - K (key) in little endian int array 3164 // c_rarg3 - r vector byte array address 3165 // c_rarg4 - input length 3166 // 3167 // Output: 3168 // rax - input length 3169 // 3170 address generate_cipherBlockChaining_encryptAESCrypt() { 3171 assert(UseAES, "need AES instructions and misaligned SSE support"); 3172 __ align(CodeEntryAlignment); 3173 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_encryptAESCrypt"); 3174 address start = __ pc(); 3175 3176 Label L_exit, L_key_192_256, L_key_256, L_loopTop_128, L_loopTop_192, L_loopTop_256; 3177 const Register from = c_rarg0; // source array address 3178 const Register to = c_rarg1; // destination array address 3179 const Register key = c_rarg2; // key array address 3180 const Register rvec = c_rarg3; // r byte array initialized from initvector array address 3181 // and left with the results of the last encryption block 3182 #ifndef _WIN64 3183 const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16) 3184 #else 3185 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 3186 const Register len_reg = r11; // pick the volatile windows register 3187 #endif 3188 const Register pos = rax; 3189 3190 // xmm register assignments for the loops below 3191 const XMMRegister xmm_result = xmm0; 3192 const XMMRegister xmm_temp = xmm1; 3193 // keys 0-10 preloaded into xmm2-xmm12 3194 const int XMM_REG_NUM_KEY_FIRST = 2; 3195 const int XMM_REG_NUM_KEY_LAST = 15; 3196 const XMMRegister xmm_key0 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST); 3197 const XMMRegister xmm_key10 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+10); 3198 const XMMRegister xmm_key11 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+11); 3199 const XMMRegister xmm_key12 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+12); 3200 const XMMRegister xmm_key13 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+13); 3201 3202 __ enter(); // required for proper stackwalking of RuntimeStub frame 3203 3204 #ifdef _WIN64 3205 // on win64, fill len_reg from stack position 3206 __ movl(len_reg, len_mem); 3207 #else 3208 __ push(len_reg); // Save 3209 #endif 3210 3211 const XMMRegister xmm_key_shuf_mask = xmm_temp; // used temporarily to swap key bytes up front 3212 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3213 // load up xmm regs xmm2 thru xmm12 with key 0x00 - 0xa0 3214 for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x00; rnum <= XMM_REG_NUM_KEY_FIRST+10; rnum++) { 3215 load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask); 3216 offset += 0x10; 3217 } 3218 __ movdqu(xmm_result, Address(rvec, 0x00)); // initialize xmm_result with r vec 3219 3220 // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256)) 3221 __ movl(rax, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3222 __ cmpl(rax, 44); 3223 __ jcc(Assembler::notEqual, L_key_192_256); 3224 3225 // 128 bit code follows here 3226 __ movptr(pos, 0); 3227 __ align(OptoLoopAlignment); 3228 3229 __ BIND(L_loopTop_128); 3230 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3231 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3232 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3233 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 9; rnum++) { 3234 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3235 } 3236 __ aesenclast(xmm_result, xmm_key10); 3237 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3238 // no need to store r to memory until we exit 3239 __ addptr(pos, AESBlockSize); 3240 __ subptr(len_reg, AESBlockSize); 3241 __ jcc(Assembler::notEqual, L_loopTop_128); 3242 3243 __ BIND(L_exit); 3244 __ movdqu(Address(rvec, 0), xmm_result); // final value of r stored in rvec of CipherBlockChaining object 3245 3246 #ifdef _WIN64 3247 __ movl(rax, len_mem); 3248 #else 3249 __ pop(rax); // return length 3250 #endif 3251 __ leave(); // required for proper stackwalking of RuntimeStub frame 3252 __ ret(0); 3253 3254 __ BIND(L_key_192_256); 3255 // here rax = len in ints of AESCrypt.KLE array (52=192, or 60=256) 3256 load_key(xmm_key11, key, 0xb0, xmm_key_shuf_mask); 3257 load_key(xmm_key12, key, 0xc0, xmm_key_shuf_mask); 3258 __ cmpl(rax, 52); 3259 __ jcc(Assembler::notEqual, L_key_256); 3260 3261 // 192-bit code follows here (could be changed to use more xmm registers) 3262 __ movptr(pos, 0); 3263 __ align(OptoLoopAlignment); 3264 3265 __ BIND(L_loopTop_192); 3266 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3267 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3268 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3269 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 11; rnum++) { 3270 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3271 } 3272 __ aesenclast(xmm_result, xmm_key12); 3273 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3274 // no need to store r to memory until we exit 3275 __ addptr(pos, AESBlockSize); 3276 __ subptr(len_reg, AESBlockSize); 3277 __ jcc(Assembler::notEqual, L_loopTop_192); 3278 __ jmp(L_exit); 3279 3280 __ BIND(L_key_256); 3281 // 256-bit code follows here (could be changed to use more xmm registers) 3282 load_key(xmm_key13, key, 0xd0, xmm_key_shuf_mask); 3283 __ movptr(pos, 0); 3284 __ align(OptoLoopAlignment); 3285 3286 __ BIND(L_loopTop_256); 3287 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3288 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3289 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3290 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 13; rnum++) { 3291 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3292 } 3293 load_key(xmm_temp, key, 0xe0); 3294 __ aesenclast(xmm_result, xmm_temp); 3295 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3296 // no need to store r to memory until we exit 3297 __ addptr(pos, AESBlockSize); 3298 __ subptr(len_reg, AESBlockSize); 3299 __ jcc(Assembler::notEqual, L_loopTop_256); 3300 __ jmp(L_exit); 3301 3302 return start; 3303 } 3304 3305 // Safefetch stubs. 3306 void generate_safefetch(const char* name, int size, address* entry, 3307 address* fault_pc, address* continuation_pc) { 3308 // safefetch signatures: 3309 // int SafeFetch32(int* adr, int errValue); 3310 // intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue); 3311 // 3312 // arguments: 3313 // c_rarg0 = adr 3314 // c_rarg1 = errValue 3315 // 3316 // result: 3317 // PPC_RET = *adr or errValue 3318 3319 StubCodeMark mark(this, "StubRoutines", name); 3320 3321 // Entry point, pc or function descriptor. 3322 *entry = __ pc(); 3323 3324 // Load *adr into c_rarg1, may fault. 3325 *fault_pc = __ pc(); 3326 switch (size) { 3327 case 4: 3328 // int32_t 3329 __ movl(c_rarg1, Address(c_rarg0, 0)); 3330 break; 3331 case 8: 3332 // int64_t 3333 __ movq(c_rarg1, Address(c_rarg0, 0)); 3334 break; 3335 default: 3336 ShouldNotReachHere(); 3337 } 3338 3339 // return errValue or *adr 3340 *continuation_pc = __ pc(); 3341 __ movq(rax, c_rarg1); 3342 __ ret(0); 3343 } 3344 3345 // This is a version of CBC/AES Decrypt which does 4 blocks in a loop at a time 3346 // to hide instruction latency 3347 // 3348 // Arguments: 3349 // 3350 // Inputs: 3351 // c_rarg0 - source byte array address 3352 // c_rarg1 - destination byte array address 3353 // c_rarg2 - K (key) in little endian int array 3354 // c_rarg3 - r vector byte array address 3355 // c_rarg4 - input length 3356 // 3357 // Output: 3358 // rax - input length 3359 // 3360 address generate_cipherBlockChaining_decryptAESCrypt_Parallel() { 3361 assert(UseAES, "need AES instructions and misaligned SSE support"); 3362 __ align(CodeEntryAlignment); 3363 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt"); 3364 address start = __ pc(); 3365 3366 const Register from = c_rarg0; // source array address 3367 const Register to = c_rarg1; // destination array address 3368 const Register key = c_rarg2; // key array address 3369 const Register rvec = c_rarg3; // r byte array initialized from initvector array address 3370 // and left with the results of the last encryption block 3371 #ifndef _WIN64 3372 const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16) 3373 #else 3374 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 3375 const Register len_reg = r11; // pick the volatile windows register 3376 #endif 3377 const Register pos = rax; 3378 3379 const int PARALLEL_FACTOR = 4; 3380 const int ROUNDS[3] = { 10, 12, 14 }; // aes rounds for key128, key192, key256 3381 3382 Label L_exit; 3383 Label L_singleBlock_loopTopHead[3]; // 128, 192, 256 3384 Label L_singleBlock_loopTopHead2[3]; // 128, 192, 256 3385 Label L_singleBlock_loopTop[3]; // 128, 192, 256 3386 Label L_multiBlock_loopTopHead[3]; // 128, 192, 256 3387 Label L_multiBlock_loopTop[3]; // 128, 192, 256 3388 3389 // keys 0-10 preloaded into xmm5-xmm15 3390 const int XMM_REG_NUM_KEY_FIRST = 5; 3391 const int XMM_REG_NUM_KEY_LAST = 15; 3392 const XMMRegister xmm_key_first = as_XMMRegister(XMM_REG_NUM_KEY_FIRST); 3393 const XMMRegister xmm_key_last = as_XMMRegister(XMM_REG_NUM_KEY_LAST); 3394 3395 __ enter(); // required for proper stackwalking of RuntimeStub frame 3396 3397 #ifdef _WIN64 3398 // on win64, fill len_reg from stack position 3399 __ movl(len_reg, len_mem); 3400 #else 3401 __ push(len_reg); // Save 3402 #endif 3403 __ push(rbx); 3404 // the java expanded key ordering is rotated one position from what we want 3405 // so we start from 0x10 here and hit 0x00 last 3406 const XMMRegister xmm_key_shuf_mask = xmm1; // used temporarily to swap key bytes up front 3407 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3408 // load up xmm regs 5 thru 15 with key 0x10 - 0xa0 - 0x00 3409 for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x10; rnum < XMM_REG_NUM_KEY_LAST; rnum++) { 3410 load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask); 3411 offset += 0x10; 3412 } 3413 load_key(xmm_key_last, key, 0x00, xmm_key_shuf_mask); 3414 3415 const XMMRegister xmm_prev_block_cipher = xmm1; // holds cipher of previous block 3416 3417 // registers holding the four results in the parallelized loop 3418 const XMMRegister xmm_result0 = xmm0; 3419 const XMMRegister xmm_result1 = xmm2; 3420 const XMMRegister xmm_result2 = xmm3; 3421 const XMMRegister xmm_result3 = xmm4; 3422 3423 __ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // initialize with initial rvec 3424 3425 __ xorptr(pos, pos); 3426 3427 // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256)) 3428 __ movl(rbx, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3429 __ cmpl(rbx, 52); 3430 __ jcc(Assembler::equal, L_multiBlock_loopTopHead[1]); 3431 __ cmpl(rbx, 60); 3432 __ jcc(Assembler::equal, L_multiBlock_loopTopHead[2]); 3433 3434 #define DoFour(opc, src_reg) \ 3435 __ opc(xmm_result0, src_reg); \ 3436 __ opc(xmm_result1, src_reg); \ 3437 __ opc(xmm_result2, src_reg); \ 3438 __ opc(xmm_result3, src_reg); \ 3439 3440 for (int k = 0; k < 3; ++k) { 3441 __ BIND(L_multiBlock_loopTopHead[k]); 3442 if (k != 0) { 3443 __ cmpptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // see if at least 4 blocks left 3444 __ jcc(Assembler::less, L_singleBlock_loopTopHead2[k]); 3445 } 3446 if (k == 1) { 3447 __ subptr(rsp, 6 * wordSize); 3448 __ movdqu(Address(rsp, 0), xmm15); //save last_key from xmm15 3449 load_key(xmm15, key, 0xb0); // 0xb0; 192-bit key goes up to 0xc0 3450 __ movdqu(Address(rsp, 2 * wordSize), xmm15); 3451 load_key(xmm1, key, 0xc0); // 0xc0; 3452 __ movdqu(Address(rsp, 4 * wordSize), xmm1); 3453 } else if (k == 2) { 3454 __ subptr(rsp, 10 * wordSize); 3455 __ movdqu(Address(rsp, 0), xmm15); //save last_key from xmm15 3456 load_key(xmm15, key, 0xd0); // 0xd0; 256-bit key goes upto 0xe0 3457 __ movdqu(Address(rsp, 6 * wordSize), xmm15); 3458 load_key(xmm1, key, 0xe0); // 0xe0; 3459 __ movdqu(Address(rsp, 8 * wordSize), xmm1); 3460 load_key(xmm15, key, 0xb0); // 0xb0; 3461 __ movdqu(Address(rsp, 2 * wordSize), xmm15); 3462 load_key(xmm1, key, 0xc0); // 0xc0; 3463 __ movdqu(Address(rsp, 4 * wordSize), xmm1); 3464 } 3465 __ align(OptoLoopAlignment); 3466 __ BIND(L_multiBlock_loopTop[k]); 3467 __ cmpptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // see if at least 4 blocks left 3468 __ jcc(Assembler::less, L_singleBlock_loopTopHead[k]); 3469 3470 if (k != 0) { 3471 __ movdqu(xmm15, Address(rsp, 2 * wordSize)); 3472 __ movdqu(xmm1, Address(rsp, 4 * wordSize)); 3473 } 3474 3475 __ movdqu(xmm_result0, Address(from, pos, Address::times_1, 0 * AESBlockSize)); // get next 4 blocks into xmmresult registers 3476 __ movdqu(xmm_result1, Address(from, pos, Address::times_1, 1 * AESBlockSize)); 3477 __ movdqu(xmm_result2, Address(from, pos, Address::times_1, 2 * AESBlockSize)); 3478 __ movdqu(xmm_result3, Address(from, pos, Address::times_1, 3 * AESBlockSize)); 3479 3480 DoFour(pxor, xmm_key_first); 3481 if (k == 0) { 3482 for (int rnum = 1; rnum < ROUNDS[k]; rnum++) { 3483 DoFour(aesdec, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3484 } 3485 DoFour(aesdeclast, xmm_key_last); 3486 } else if (k == 1) { 3487 for (int rnum = 1; rnum <= ROUNDS[k]-2; rnum++) { 3488 DoFour(aesdec, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3489 } 3490 __ movdqu(xmm_key_last, Address(rsp, 0)); // xmm15 needs to be loaded again. 3491 DoFour(aesdec, xmm1); // key : 0xc0 3492 __ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // xmm1 needs to be loaded again 3493 DoFour(aesdeclast, xmm_key_last); 3494 } else if (k == 2) { 3495 for (int rnum = 1; rnum <= ROUNDS[k] - 4; rnum++) { 3496 DoFour(aesdec, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3497 } 3498 DoFour(aesdec, xmm1); // key : 0xc0 3499 __ movdqu(xmm15, Address(rsp, 6 * wordSize)); 3500 __ movdqu(xmm1, Address(rsp, 8 * wordSize)); 3501 DoFour(aesdec, xmm15); // key : 0xd0 3502 __ movdqu(xmm_key_last, Address(rsp, 0)); // xmm15 needs to be loaded again. 3503 DoFour(aesdec, xmm1); // key : 0xe0 3504 __ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // xmm1 needs to be loaded again 3505 DoFour(aesdeclast, xmm_key_last); 3506 } 3507 3508 // for each result, xor with the r vector of previous cipher block 3509 __ pxor(xmm_result0, xmm_prev_block_cipher); 3510 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 0 * AESBlockSize)); 3511 __ pxor(xmm_result1, xmm_prev_block_cipher); 3512 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 1 * AESBlockSize)); 3513 __ pxor(xmm_result2, xmm_prev_block_cipher); 3514 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 2 * AESBlockSize)); 3515 __ pxor(xmm_result3, xmm_prev_block_cipher); 3516 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 3 * AESBlockSize)); // this will carry over to next set of blocks 3517 if (k != 0) { 3518 __ movdqu(Address(rvec, 0x00), xmm_prev_block_cipher); 3519 } 3520 3521 __ movdqu(Address(to, pos, Address::times_1, 0 * AESBlockSize), xmm_result0); // store 4 results into the next 64 bytes of output 3522 __ movdqu(Address(to, pos, Address::times_1, 1 * AESBlockSize), xmm_result1); 3523 __ movdqu(Address(to, pos, Address::times_1, 2 * AESBlockSize), xmm_result2); 3524 __ movdqu(Address(to, pos, Address::times_1, 3 * AESBlockSize), xmm_result3); 3525 3526 __ addptr(pos, PARALLEL_FACTOR * AESBlockSize); 3527 __ subptr(len_reg, PARALLEL_FACTOR * AESBlockSize); 3528 __ jmp(L_multiBlock_loopTop[k]); 3529 3530 // registers used in the non-parallelized loops 3531 // xmm register assignments for the loops below 3532 const XMMRegister xmm_result = xmm0; 3533 const XMMRegister xmm_prev_block_cipher_save = xmm2; 3534 const XMMRegister xmm_key11 = xmm3; 3535 const XMMRegister xmm_key12 = xmm4; 3536 const XMMRegister key_tmp = xmm4; 3537 3538 __ BIND(L_singleBlock_loopTopHead[k]); 3539 if (k == 1) { 3540 __ addptr(rsp, 6 * wordSize); 3541 } else if (k == 2) { 3542 __ addptr(rsp, 10 * wordSize); 3543 } 3544 __ cmpptr(len_reg, 0); // any blocks left?? 3545 __ jcc(Assembler::equal, L_exit); 3546 __ BIND(L_singleBlock_loopTopHead2[k]); 3547 if (k == 1) { 3548 load_key(xmm_key11, key, 0xb0); // 0xb0; 192-bit key goes upto 0xc0 3549 load_key(xmm_key12, key, 0xc0); // 0xc0; 192-bit key goes upto 0xc0 3550 } 3551 if (k == 2) { 3552 load_key(xmm_key11, key, 0xb0); // 0xb0; 256-bit key goes upto 0xe0 3553 } 3554 __ align(OptoLoopAlignment); 3555 __ BIND(L_singleBlock_loopTop[k]); 3556 __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of cipher input 3557 __ movdqa(xmm_prev_block_cipher_save, xmm_result); // save for next r vector 3558 __ pxor(xmm_result, xmm_key_first); // do the aes dec rounds 3559 for (int rnum = 1; rnum <= 9 ; rnum++) { 3560 __ aesdec(xmm_result, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3561 } 3562 if (k == 1) { 3563 __ aesdec(xmm_result, xmm_key11); 3564 __ aesdec(xmm_result, xmm_key12); 3565 } 3566 if (k == 2) { 3567 __ aesdec(xmm_result, xmm_key11); 3568 load_key(key_tmp, key, 0xc0); 3569 __ aesdec(xmm_result, key_tmp); 3570 load_key(key_tmp, key, 0xd0); 3571 __ aesdec(xmm_result, key_tmp); 3572 load_key(key_tmp, key, 0xe0); 3573 __ aesdec(xmm_result, key_tmp); 3574 } 3575 3576 __ aesdeclast(xmm_result, xmm_key_last); // xmm15 always came from key+0 3577 __ pxor(xmm_result, xmm_prev_block_cipher); // xor with the current r vector 3578 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3579 // no need to store r to memory until we exit 3580 __ movdqa(xmm_prev_block_cipher, xmm_prev_block_cipher_save); // set up next r vector with cipher input from this block 3581 __ addptr(pos, AESBlockSize); 3582 __ subptr(len_reg, AESBlockSize); 3583 __ jcc(Assembler::notEqual, L_singleBlock_loopTop[k]); 3584 if (k != 2) { 3585 __ jmp(L_exit); 3586 } 3587 } //for 128/192/256 3588 3589 __ BIND(L_exit); 3590 __ movdqu(Address(rvec, 0), xmm_prev_block_cipher); // final value of r stored in rvec of CipherBlockChaining object 3591 __ pop(rbx); 3592 #ifdef _WIN64 3593 __ movl(rax, len_mem); 3594 #else 3595 __ pop(rax); // return length 3596 #endif 3597 __ leave(); // required for proper stackwalking of RuntimeStub frame 3598 __ ret(0); 3599 return start; 3600 } 3601 3602 address generate_upper_word_mask() { 3603 __ align(64); 3604 StubCodeMark mark(this, "StubRoutines", "upper_word_mask"); 3605 address start = __ pc(); 3606 __ emit_data64(0x0000000000000000, relocInfo::none); 3607 __ emit_data64(0xFFFFFFFF00000000, relocInfo::none); 3608 return start; 3609 } 3610 3611 address generate_shuffle_byte_flip_mask() { 3612 __ align(64); 3613 StubCodeMark mark(this, "StubRoutines", "shuffle_byte_flip_mask"); 3614 address start = __ pc(); 3615 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none); 3616 __ emit_data64(0x0001020304050607, relocInfo::none); 3617 return start; 3618 } 3619 3620 // ofs and limit are use for multi-block byte array. 3621 // int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit) 3622 address generate_sha1_implCompress(bool multi_block, const char *name) { 3623 __ align(CodeEntryAlignment); 3624 StubCodeMark mark(this, "StubRoutines", name); 3625 address start = __ pc(); 3626 3627 Register buf = c_rarg0; 3628 Register state = c_rarg1; 3629 Register ofs = c_rarg2; 3630 Register limit = c_rarg3; 3631 3632 const XMMRegister abcd = xmm0; 3633 const XMMRegister e0 = xmm1; 3634 const XMMRegister e1 = xmm2; 3635 const XMMRegister msg0 = xmm3; 3636 3637 const XMMRegister msg1 = xmm4; 3638 const XMMRegister msg2 = xmm5; 3639 const XMMRegister msg3 = xmm6; 3640 const XMMRegister shuf_mask = xmm7; 3641 3642 __ enter(); 3643 3644 __ subptr(rsp, 4 * wordSize); 3645 3646 __ fast_sha1(abcd, e0, e1, msg0, msg1, msg2, msg3, shuf_mask, 3647 buf, state, ofs, limit, rsp, multi_block); 3648 3649 __ addptr(rsp, 4 * wordSize); 3650 3651 __ leave(); 3652 __ ret(0); 3653 return start; 3654 } 3655 3656 address generate_pshuffle_byte_flip_mask() { 3657 __ align(64); 3658 StubCodeMark mark(this, "StubRoutines", "pshuffle_byte_flip_mask"); 3659 address start = __ pc(); 3660 __ emit_data64(0x0405060700010203, relocInfo::none); 3661 __ emit_data64(0x0c0d0e0f08090a0b, relocInfo::none); 3662 3663 if (VM_Version::supports_avx2()) { 3664 __ emit_data64(0x0405060700010203, relocInfo::none); // second copy 3665 __ emit_data64(0x0c0d0e0f08090a0b, relocInfo::none); 3666 // _SHUF_00BA 3667 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3668 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3669 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3670 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3671 // _SHUF_DC00 3672 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3673 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3674 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3675 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3676 } 3677 3678 return start; 3679 } 3680 3681 //Mask for byte-swapping a couple of qwords in an XMM register using (v)pshufb. 3682 address generate_pshuffle_byte_flip_mask_sha512() { 3683 __ align(32); 3684 StubCodeMark mark(this, "StubRoutines", "pshuffle_byte_flip_mask_sha512"); 3685 address start = __ pc(); 3686 if (VM_Version::supports_avx2()) { 3687 __ emit_data64(0x0001020304050607, relocInfo::none); // PSHUFFLE_BYTE_FLIP_MASK 3688 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none); 3689 __ emit_data64(0x1011121314151617, relocInfo::none); 3690 __ emit_data64(0x18191a1b1c1d1e1f, relocInfo::none); 3691 __ emit_data64(0x0000000000000000, relocInfo::none); //MASK_YMM_LO 3692 __ emit_data64(0x0000000000000000, relocInfo::none); 3693 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3694 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3695 } 3696 3697 return start; 3698 } 3699 3700 // ofs and limit are use for multi-block byte array. 3701 // int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit) 3702 address generate_sha256_implCompress(bool multi_block, const char *name) { 3703 assert(VM_Version::supports_sha() || VM_Version::supports_avx2(), ""); 3704 __ align(CodeEntryAlignment); 3705 StubCodeMark mark(this, "StubRoutines", name); 3706 address start = __ pc(); 3707 3708 Register buf = c_rarg0; 3709 Register state = c_rarg1; 3710 Register ofs = c_rarg2; 3711 Register limit = c_rarg3; 3712 3713 const XMMRegister msg = xmm0; 3714 const XMMRegister state0 = xmm1; 3715 const XMMRegister state1 = xmm2; 3716 const XMMRegister msgtmp0 = xmm3; 3717 3718 const XMMRegister msgtmp1 = xmm4; 3719 const XMMRegister msgtmp2 = xmm5; 3720 const XMMRegister msgtmp3 = xmm6; 3721 const XMMRegister msgtmp4 = xmm7; 3722 3723 const XMMRegister shuf_mask = xmm8; 3724 3725 __ enter(); 3726 3727 __ subptr(rsp, 4 * wordSize); 3728 3729 if (VM_Version::supports_sha()) { 3730 __ fast_sha256(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4, 3731 buf, state, ofs, limit, rsp, multi_block, shuf_mask); 3732 } else if (VM_Version::supports_avx2()) { 3733 __ sha256_AVX2(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4, 3734 buf, state, ofs, limit, rsp, multi_block, shuf_mask); 3735 } 3736 __ addptr(rsp, 4 * wordSize); 3737 __ vzeroupper(); 3738 __ leave(); 3739 __ ret(0); 3740 return start; 3741 } 3742 3743 address generate_sha512_implCompress(bool multi_block, const char *name) { 3744 assert(VM_Version::supports_avx2(), ""); 3745 assert(VM_Version::supports_bmi2(), ""); 3746 __ align(CodeEntryAlignment); 3747 StubCodeMark mark(this, "StubRoutines", name); 3748 address start = __ pc(); 3749 3750 Register buf = c_rarg0; 3751 Register state = c_rarg1; 3752 Register ofs = c_rarg2; 3753 Register limit = c_rarg3; 3754 3755 const XMMRegister msg = xmm0; 3756 const XMMRegister state0 = xmm1; 3757 const XMMRegister state1 = xmm2; 3758 const XMMRegister msgtmp0 = xmm3; 3759 const XMMRegister msgtmp1 = xmm4; 3760 const XMMRegister msgtmp2 = xmm5; 3761 const XMMRegister msgtmp3 = xmm6; 3762 const XMMRegister msgtmp4 = xmm7; 3763 3764 const XMMRegister shuf_mask = xmm8; 3765 3766 __ enter(); 3767 3768 __ sha512_AVX2(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4, 3769 buf, state, ofs, limit, rsp, multi_block, shuf_mask); 3770 3771 __ vzeroupper(); 3772 __ leave(); 3773 __ ret(0); 3774 return start; 3775 } 3776 3777 // This is a version of CTR/AES crypt which does 6 blocks in a loop at a time 3778 // to hide instruction latency 3779 // 3780 // Arguments: 3781 // 3782 // Inputs: 3783 // c_rarg0 - source byte array address 3784 // c_rarg1 - destination byte array address 3785 // c_rarg2 - K (key) in little endian int array 3786 // c_rarg3 - counter vector byte array address 3787 // Linux 3788 // c_rarg4 - input length 3789 // c_rarg5 - saved encryptedCounter start 3790 // rbp + 6 * wordSize - saved used length 3791 // Windows 3792 // rbp + 6 * wordSize - input length 3793 // rbp + 7 * wordSize - saved encryptedCounter start 3794 // rbp + 8 * wordSize - saved used length 3795 // 3796 // Output: 3797 // rax - input length 3798 // 3799 address generate_counterMode_AESCrypt_Parallel() { 3800 assert(UseAES, "need AES instructions and misaligned SSE support"); 3801 __ align(CodeEntryAlignment); 3802 StubCodeMark mark(this, "StubRoutines", "counterMode_AESCrypt"); 3803 address start = __ pc(); 3804 const Register from = c_rarg0; // source array address 3805 const Register to = c_rarg1; // destination array address 3806 const Register key = c_rarg2; // key array address 3807 const Register counter = c_rarg3; // counter byte array initialized from counter array address 3808 // and updated with the incremented counter in the end 3809 #ifndef _WIN64 3810 const Register len_reg = c_rarg4; 3811 const Register saved_encCounter_start = c_rarg5; 3812 const Register used_addr = r10; 3813 const Address used_mem(rbp, 2 * wordSize); 3814 const Register used = r11; 3815 #else 3816 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 3817 const Address saved_encCounter_mem(rbp, 7 * wordSize); // length is on stack on Win64 3818 const Address used_mem(rbp, 8 * wordSize); // length is on stack on Win64 3819 const Register len_reg = r10; // pick the first volatile windows register 3820 const Register saved_encCounter_start = r11; 3821 const Register used_addr = r13; 3822 const Register used = r14; 3823 #endif 3824 const Register pos = rax; 3825 3826 const int PARALLEL_FACTOR = 6; 3827 const XMMRegister xmm_counter_shuf_mask = xmm0; 3828 const XMMRegister xmm_key_shuf_mask = xmm1; // used temporarily to swap key bytes up front 3829 const XMMRegister xmm_curr_counter = xmm2; 3830 3831 const XMMRegister xmm_key_tmp0 = xmm3; 3832 const XMMRegister xmm_key_tmp1 = xmm4; 3833 3834 // registers holding the four results in the parallelized loop 3835 const XMMRegister xmm_result0 = xmm5; 3836 const XMMRegister xmm_result1 = xmm6; 3837 const XMMRegister xmm_result2 = xmm7; 3838 const XMMRegister xmm_result3 = xmm8; 3839 const XMMRegister xmm_result4 = xmm9; 3840 const XMMRegister xmm_result5 = xmm10; 3841 3842 const XMMRegister xmm_from0 = xmm11; 3843 const XMMRegister xmm_from1 = xmm12; 3844 const XMMRegister xmm_from2 = xmm13; 3845 const XMMRegister xmm_from3 = xmm14; //the last one is xmm14. we have to preserve it on WIN64. 3846 const XMMRegister xmm_from4 = xmm3; //reuse xmm3~4. Because xmm_key_tmp0~1 are useless when loading input text 3847 const XMMRegister xmm_from5 = xmm4; 3848 3849 //for key_128, key_192, key_256 3850 const int rounds[3] = {10, 12, 14}; 3851 Label L_exit_preLoop, L_preLoop_start; 3852 Label L_multiBlock_loopTop[3]; 3853 Label L_singleBlockLoopTop[3]; 3854 Label L__incCounter[3][6]; //for 6 blocks 3855 Label L__incCounter_single[3]; //for single block, key128, key192, key256 3856 Label L_processTail_insr[3], L_processTail_4_insr[3], L_processTail_2_insr[3], L_processTail_1_insr[3], L_processTail_exit_insr[3]; 3857 Label L_processTail_4_extr[3], L_processTail_2_extr[3], L_processTail_1_extr[3], L_processTail_exit_extr[3]; 3858 3859 Label L_exit; 3860 3861 __ enter(); // required for proper stackwalking of RuntimeStub frame 3862 3863 #ifdef _WIN64 3864 // allocate spill slots for r13, r14 3865 enum { 3866 saved_r13_offset, 3867 saved_r14_offset 3868 }; 3869 __ subptr(rsp, 2 * wordSize); 3870 __ movptr(Address(rsp, saved_r13_offset * wordSize), r13); 3871 __ movptr(Address(rsp, saved_r14_offset * wordSize), r14); 3872 3873 // on win64, fill len_reg from stack position 3874 __ movl(len_reg, len_mem); 3875 __ movptr(saved_encCounter_start, saved_encCounter_mem); 3876 __ movptr(used_addr, used_mem); 3877 __ movl(used, Address(used_addr, 0)); 3878 #else 3879 __ push(len_reg); // Save 3880 __ movptr(used_addr, used_mem); 3881 __ movl(used, Address(used_addr, 0)); 3882 #endif 3883 3884 __ push(rbx); // Save RBX 3885 __ movdqu(xmm_curr_counter, Address(counter, 0x00)); // initialize counter with initial counter 3886 __ movdqu(xmm_counter_shuf_mask, ExternalAddress(StubRoutines::x86::counter_shuffle_mask_addr()), pos); // pos as scratch 3887 __ pshufb(xmm_curr_counter, xmm_counter_shuf_mask); //counter is shuffled 3888 __ movptr(pos, 0); 3889 3890 // Use the partially used encrpyted counter from last invocation 3891 __ BIND(L_preLoop_start); 3892 __ cmpptr(used, 16); 3893 __ jcc(Assembler::aboveEqual, L_exit_preLoop); 3894 __ cmpptr(len_reg, 0); 3895 __ jcc(Assembler::lessEqual, L_exit_preLoop); 3896 __ movb(rbx, Address(saved_encCounter_start, used)); 3897 __ xorb(rbx, Address(from, pos)); 3898 __ movb(Address(to, pos), rbx); 3899 __ addptr(pos, 1); 3900 __ addptr(used, 1); 3901 __ subptr(len_reg, 1); 3902 3903 __ jmp(L_preLoop_start); 3904 3905 __ BIND(L_exit_preLoop); 3906 __ movl(Address(used_addr, 0), used); 3907 3908 // key length could be only {11, 13, 15} * 4 = {44, 52, 60} 3909 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()), rbx); // rbx as scratch 3910 __ movl(rbx, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3911 __ cmpl(rbx, 52); 3912 __ jcc(Assembler::equal, L_multiBlock_loopTop[1]); 3913 __ cmpl(rbx, 60); 3914 __ jcc(Assembler::equal, L_multiBlock_loopTop[2]); 3915 3916 #define CTR_DoSix(opc, src_reg) \ 3917 __ opc(xmm_result0, src_reg); \ 3918 __ opc(xmm_result1, src_reg); \ 3919 __ opc(xmm_result2, src_reg); \ 3920 __ opc(xmm_result3, src_reg); \ 3921 __ opc(xmm_result4, src_reg); \ 3922 __ opc(xmm_result5, src_reg); 3923 3924 // k == 0 : generate code for key_128 3925 // k == 1 : generate code for key_192 3926 // k == 2 : generate code for key_256 3927 for (int k = 0; k < 3; ++k) { 3928 //multi blocks starts here 3929 __ align(OptoLoopAlignment); 3930 __ BIND(L_multiBlock_loopTop[k]); 3931 __ cmpptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // see if at least PARALLEL_FACTOR blocks left 3932 __ jcc(Assembler::less, L_singleBlockLoopTop[k]); 3933 load_key(xmm_key_tmp0, key, 0x00, xmm_key_shuf_mask); 3934 3935 //load, then increase counters 3936 CTR_DoSix(movdqa, xmm_curr_counter); 3937 inc_counter(rbx, xmm_result1, 0x01, L__incCounter[k][0]); 3938 inc_counter(rbx, xmm_result2, 0x02, L__incCounter[k][1]); 3939 inc_counter(rbx, xmm_result3, 0x03, L__incCounter[k][2]); 3940 inc_counter(rbx, xmm_result4, 0x04, L__incCounter[k][3]); 3941 inc_counter(rbx, xmm_result5, 0x05, L__incCounter[k][4]); 3942 inc_counter(rbx, xmm_curr_counter, 0x06, L__incCounter[k][5]); 3943 CTR_DoSix(pshufb, xmm_counter_shuf_mask); // after increased, shuffled counters back for PXOR 3944 CTR_DoSix(pxor, xmm_key_tmp0); //PXOR with Round 0 key 3945 3946 //load two ROUND_KEYs at a time 3947 for (int i = 1; i < rounds[k]; ) { 3948 load_key(xmm_key_tmp1, key, (0x10 * i), xmm_key_shuf_mask); 3949 load_key(xmm_key_tmp0, key, (0x10 * (i+1)), xmm_key_shuf_mask); 3950 CTR_DoSix(aesenc, xmm_key_tmp1); 3951 i++; 3952 if (i != rounds[k]) { 3953 CTR_DoSix(aesenc, xmm_key_tmp0); 3954 } else { 3955 CTR_DoSix(aesenclast, xmm_key_tmp0); 3956 } 3957 i++; 3958 } 3959 3960 // get next PARALLEL_FACTOR blocks into xmm_result registers 3961 __ movdqu(xmm_from0, Address(from, pos, Address::times_1, 0 * AESBlockSize)); 3962 __ movdqu(xmm_from1, Address(from, pos, Address::times_1, 1 * AESBlockSize)); 3963 __ movdqu(xmm_from2, Address(from, pos, Address::times_1, 2 * AESBlockSize)); 3964 __ movdqu(xmm_from3, Address(from, pos, Address::times_1, 3 * AESBlockSize)); 3965 __ movdqu(xmm_from4, Address(from, pos, Address::times_1, 4 * AESBlockSize)); 3966 __ movdqu(xmm_from5, Address(from, pos, Address::times_1, 5 * AESBlockSize)); 3967 3968 __ pxor(xmm_result0, xmm_from0); 3969 __ pxor(xmm_result1, xmm_from1); 3970 __ pxor(xmm_result2, xmm_from2); 3971 __ pxor(xmm_result3, xmm_from3); 3972 __ pxor(xmm_result4, xmm_from4); 3973 __ pxor(xmm_result5, xmm_from5); 3974 3975 // store 6 results into the next 64 bytes of output 3976 __ movdqu(Address(to, pos, Address::times_1, 0 * AESBlockSize), xmm_result0); 3977 __ movdqu(Address(to, pos, Address::times_1, 1 * AESBlockSize), xmm_result1); 3978 __ movdqu(Address(to, pos, Address::times_1, 2 * AESBlockSize), xmm_result2); 3979 __ movdqu(Address(to, pos, Address::times_1, 3 * AESBlockSize), xmm_result3); 3980 __ movdqu(Address(to, pos, Address::times_1, 4 * AESBlockSize), xmm_result4); 3981 __ movdqu(Address(to, pos, Address::times_1, 5 * AESBlockSize), xmm_result5); 3982 3983 __ addptr(pos, PARALLEL_FACTOR * AESBlockSize); // increase the length of crypt text 3984 __ subptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // decrease the remaining length 3985 __ jmp(L_multiBlock_loopTop[k]); 3986 3987 // singleBlock starts here 3988 __ align(OptoLoopAlignment); 3989 __ BIND(L_singleBlockLoopTop[k]); 3990 __ cmpptr(len_reg, 0); 3991 __ jcc(Assembler::lessEqual, L_exit); 3992 load_key(xmm_key_tmp0, key, 0x00, xmm_key_shuf_mask); 3993 __ movdqa(xmm_result0, xmm_curr_counter); 3994 inc_counter(rbx, xmm_curr_counter, 0x01, L__incCounter_single[k]); 3995 __ pshufb(xmm_result0, xmm_counter_shuf_mask); 3996 __ pxor(xmm_result0, xmm_key_tmp0); 3997 for (int i = 1; i < rounds[k]; i++) { 3998 load_key(xmm_key_tmp0, key, (0x10 * i), xmm_key_shuf_mask); 3999 __ aesenc(xmm_result0, xmm_key_tmp0); 4000 } 4001 load_key(xmm_key_tmp0, key, (rounds[k] * 0x10), xmm_key_shuf_mask); 4002 __ aesenclast(xmm_result0, xmm_key_tmp0); 4003 __ cmpptr(len_reg, AESBlockSize); 4004 __ jcc(Assembler::less, L_processTail_insr[k]); 4005 __ movdqu(xmm_from0, Address(from, pos, Address::times_1, 0 * AESBlockSize)); 4006 __ pxor(xmm_result0, xmm_from0); 4007 __ movdqu(Address(to, pos, Address::times_1, 0 * AESBlockSize), xmm_result0); 4008 __ addptr(pos, AESBlockSize); 4009 __ subptr(len_reg, AESBlockSize); 4010 __ jmp(L_singleBlockLoopTop[k]); 4011 __ BIND(L_processTail_insr[k]); // Process the tail part of the input array 4012 __ addptr(pos, len_reg); // 1. Insert bytes from src array into xmm_from0 register 4013 __ testptr(len_reg, 8); 4014 __ jcc(Assembler::zero, L_processTail_4_insr[k]); 4015 __ subptr(pos,8); 4016 __ pinsrq(xmm_from0, Address(from, pos), 0); 4017 __ BIND(L_processTail_4_insr[k]); 4018 __ testptr(len_reg, 4); 4019 __ jcc(Assembler::zero, L_processTail_2_insr[k]); 4020 __ subptr(pos,4); 4021 __ pslldq(xmm_from0, 4); 4022 __ pinsrd(xmm_from0, Address(from, pos), 0); 4023 __ BIND(L_processTail_2_insr[k]); 4024 __ testptr(len_reg, 2); 4025 __ jcc(Assembler::zero, L_processTail_1_insr[k]); 4026 __ subptr(pos, 2); 4027 __ pslldq(xmm_from0, 2); 4028 __ pinsrw(xmm_from0, Address(from, pos), 0); 4029 __ BIND(L_processTail_1_insr[k]); 4030 __ testptr(len_reg, 1); 4031 __ jcc(Assembler::zero, L_processTail_exit_insr[k]); 4032 __ subptr(pos, 1); 4033 __ pslldq(xmm_from0, 1); 4034 __ pinsrb(xmm_from0, Address(from, pos), 0); 4035 __ BIND(L_processTail_exit_insr[k]); 4036 4037 __ movdqu(Address(saved_encCounter_start, 0), xmm_result0); // 2. Perform pxor of the encrypted counter and plaintext Bytes. 4038 __ pxor(xmm_result0, xmm_from0); // Also the encrypted counter is saved for next invocation. 4039 4040 __ testptr(len_reg, 8); 4041 __ jcc(Assembler::zero, L_processTail_4_extr[k]); // 3. Extract bytes from xmm_result0 into the dest. array 4042 __ pextrq(Address(to, pos), xmm_result0, 0); 4043 __ psrldq(xmm_result0, 8); 4044 __ addptr(pos, 8); 4045 __ BIND(L_processTail_4_extr[k]); 4046 __ testptr(len_reg, 4); 4047 __ jcc(Assembler::zero, L_processTail_2_extr[k]); 4048 __ pextrd(Address(to, pos), xmm_result0, 0); 4049 __ psrldq(xmm_result0, 4); 4050 __ addptr(pos, 4); 4051 __ BIND(L_processTail_2_extr[k]); 4052 __ testptr(len_reg, 2); 4053 __ jcc(Assembler::zero, L_processTail_1_extr[k]); 4054 __ pextrw(Address(to, pos), xmm_result0, 0); 4055 __ psrldq(xmm_result0, 2); 4056 __ addptr(pos, 2); 4057 __ BIND(L_processTail_1_extr[k]); 4058 __ testptr(len_reg, 1); 4059 __ jcc(Assembler::zero, L_processTail_exit_extr[k]); 4060 __ pextrb(Address(to, pos), xmm_result0, 0); 4061 4062 __ BIND(L_processTail_exit_extr[k]); 4063 __ movl(Address(used_addr, 0), len_reg); 4064 __ jmp(L_exit); 4065 4066 } 4067 4068 __ BIND(L_exit); 4069 __ pshufb(xmm_curr_counter, xmm_counter_shuf_mask); //counter is shuffled back. 4070 __ movdqu(Address(counter, 0), xmm_curr_counter); //save counter back 4071 __ pop(rbx); // pop the saved RBX. 4072 #ifdef _WIN64 4073 __ movl(rax, len_mem); 4074 __ movptr(r13, Address(rsp, saved_r13_offset * wordSize)); 4075 __ movptr(r14, Address(rsp, saved_r14_offset * wordSize)); 4076 __ addptr(rsp, 2 * wordSize); 4077 #else 4078 __ pop(rax); // return 'len' 4079 #endif 4080 __ leave(); // required for proper stackwalking of RuntimeStub frame 4081 __ ret(0); 4082 return start; 4083 } 4084 4085 void roundDec(XMMRegister xmm_reg) { 4086 __ vaesdec(xmm1, xmm1, xmm_reg, Assembler::AVX_512bit); 4087 __ vaesdec(xmm2, xmm2, xmm_reg, Assembler::AVX_512bit); 4088 __ vaesdec(xmm3, xmm3, xmm_reg, Assembler::AVX_512bit); 4089 __ vaesdec(xmm4, xmm4, xmm_reg, Assembler::AVX_512bit); 4090 __ vaesdec(xmm5, xmm5, xmm_reg, Assembler::AVX_512bit); 4091 __ vaesdec(xmm6, xmm6, xmm_reg, Assembler::AVX_512bit); 4092 __ vaesdec(xmm7, xmm7, xmm_reg, Assembler::AVX_512bit); 4093 __ vaesdec(xmm8, xmm8, xmm_reg, Assembler::AVX_512bit); 4094 } 4095 4096 void roundDeclast(XMMRegister xmm_reg) { 4097 __ vaesdeclast(xmm1, xmm1, xmm_reg, Assembler::AVX_512bit); 4098 __ vaesdeclast(xmm2, xmm2, xmm_reg, Assembler::AVX_512bit); 4099 __ vaesdeclast(xmm3, xmm3, xmm_reg, Assembler::AVX_512bit); 4100 __ vaesdeclast(xmm4, xmm4, xmm_reg, Assembler::AVX_512bit); 4101 __ vaesdeclast(xmm5, xmm5, xmm_reg, Assembler::AVX_512bit); 4102 __ vaesdeclast(xmm6, xmm6, xmm_reg, Assembler::AVX_512bit); 4103 __ vaesdeclast(xmm7, xmm7, xmm_reg, Assembler::AVX_512bit); 4104 __ vaesdeclast(xmm8, xmm8, xmm_reg, Assembler::AVX_512bit); 4105 } 4106 4107 void ev_load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask = NULL) { 4108 __ movdqu(xmmdst, Address(key, offset)); 4109 if (xmm_shuf_mask != NULL) { 4110 __ pshufb(xmmdst, xmm_shuf_mask); 4111 } else { 4112 __ pshufb(xmmdst, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 4113 } 4114 __ evshufi64x2(xmmdst, xmmdst, xmmdst, 0x0, Assembler::AVX_512bit); 4115 4116 } 4117 4118 address generate_cipherBlockChaining_decryptVectorAESCrypt() { 4119 assert(VM_Version::supports_vaes(), "need AES instructions and misaligned SSE support"); 4120 __ align(CodeEntryAlignment); 4121 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt"); 4122 address start = __ pc(); 4123 4124 const Register from = c_rarg0; // source array address 4125 const Register to = c_rarg1; // destination array address 4126 const Register key = c_rarg2; // key array address 4127 const Register rvec = c_rarg3; // r byte array initialized from initvector array address 4128 // and left with the results of the last encryption block 4129 #ifndef _WIN64 4130 const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16) 4131 #else 4132 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 4133 const Register len_reg = r11; // pick the volatile windows register 4134 #endif 4135 4136 Label Loop, Loop1, L_128, L_256, L_192, KEY_192, KEY_256, Loop2, Lcbc_dec_rem_loop, 4137 Lcbc_dec_rem_last, Lcbc_dec_ret, Lcbc_dec_rem, Lcbc_exit; 4138 4139 __ enter(); 4140 4141 #ifdef _WIN64 4142 // on win64, fill len_reg from stack position 4143 __ movl(len_reg, len_mem); 4144 #else 4145 __ push(len_reg); // Save 4146 #endif 4147 __ push(rbx); 4148 __ vzeroupper(); 4149 4150 // Temporary variable declaration for swapping key bytes 4151 const XMMRegister xmm_key_shuf_mask = xmm1; 4152 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 4153 4154 // Calculate number of rounds from key size: 44 for 10-rounds, 52 for 12-rounds, 60 for 14-rounds 4155 const Register rounds = rbx; 4156 __ movl(rounds, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 4157 4158 const XMMRegister IV = xmm0; 4159 // Load IV and broadcast value to 512-bits 4160 __ evbroadcasti64x2(IV, Address(rvec, 0), Assembler::AVX_512bit); 4161 4162 // Temporary variables for storing round keys 4163 const XMMRegister RK0 = xmm30; 4164 const XMMRegister RK1 = xmm9; 4165 const XMMRegister RK2 = xmm18; 4166 const XMMRegister RK3 = xmm19; 4167 const XMMRegister RK4 = xmm20; 4168 const XMMRegister RK5 = xmm21; 4169 const XMMRegister RK6 = xmm22; 4170 const XMMRegister RK7 = xmm23; 4171 const XMMRegister RK8 = xmm24; 4172 const XMMRegister RK9 = xmm25; 4173 const XMMRegister RK10 = xmm26; 4174 4175 // Load and shuffle key 4176 // the java expanded key ordering is rotated one position from what we want 4177 // so we start from 1*16 here and hit 0*16 last 4178 ev_load_key(RK1, key, 1 * 16, xmm_key_shuf_mask); 4179 ev_load_key(RK2, key, 2 * 16, xmm_key_shuf_mask); 4180 ev_load_key(RK3, key, 3 * 16, xmm_key_shuf_mask); 4181 ev_load_key(RK4, key, 4 * 16, xmm_key_shuf_mask); 4182 ev_load_key(RK5, key, 5 * 16, xmm_key_shuf_mask); 4183 ev_load_key(RK6, key, 6 * 16, xmm_key_shuf_mask); 4184 ev_load_key(RK7, key, 7 * 16, xmm_key_shuf_mask); 4185 ev_load_key(RK8, key, 8 * 16, xmm_key_shuf_mask); 4186 ev_load_key(RK9, key, 9 * 16, xmm_key_shuf_mask); 4187 ev_load_key(RK10, key, 10 * 16, xmm_key_shuf_mask); 4188 ev_load_key(RK0, key, 0*16, xmm_key_shuf_mask); 4189 4190 // Variables for storing source cipher text 4191 const XMMRegister S0 = xmm10; 4192 const XMMRegister S1 = xmm11; 4193 const XMMRegister S2 = xmm12; 4194 const XMMRegister S3 = xmm13; 4195 const XMMRegister S4 = xmm14; 4196 const XMMRegister S5 = xmm15; 4197 const XMMRegister S6 = xmm16; 4198 const XMMRegister S7 = xmm17; 4199 4200 // Variables for storing decrypted text 4201 const XMMRegister B0 = xmm1; 4202 const XMMRegister B1 = xmm2; 4203 const XMMRegister B2 = xmm3; 4204 const XMMRegister B3 = xmm4; 4205 const XMMRegister B4 = xmm5; 4206 const XMMRegister B5 = xmm6; 4207 const XMMRegister B6 = xmm7; 4208 const XMMRegister B7 = xmm8; 4209 4210 __ cmpl(rounds, 44); 4211 __ jcc(Assembler::greater, KEY_192); 4212 __ jmp(Loop); 4213 4214 __ BIND(KEY_192); 4215 const XMMRegister RK11 = xmm27; 4216 const XMMRegister RK12 = xmm28; 4217 ev_load_key(RK11, key, 11*16, xmm_key_shuf_mask); 4218 ev_load_key(RK12, key, 12*16, xmm_key_shuf_mask); 4219 4220 __ cmpl(rounds, 52); 4221 __ jcc(Assembler::greater, KEY_256); 4222 __ jmp(Loop); 4223 4224 __ BIND(KEY_256); 4225 const XMMRegister RK13 = xmm29; 4226 const XMMRegister RK14 = xmm31; 4227 ev_load_key(RK13, key, 13*16, xmm_key_shuf_mask); 4228 ev_load_key(RK14, key, 14*16, xmm_key_shuf_mask); 4229 4230 __ BIND(Loop); 4231 __ cmpl(len_reg, 512); 4232 __ jcc(Assembler::below, Lcbc_dec_rem); 4233 __ BIND(Loop1); 4234 __ subl(len_reg, 512); 4235 __ evmovdquq(S0, Address(from, 0 * 64), Assembler::AVX_512bit); 4236 __ evmovdquq(S1, Address(from, 1 * 64), Assembler::AVX_512bit); 4237 __ evmovdquq(S2, Address(from, 2 * 64), Assembler::AVX_512bit); 4238 __ evmovdquq(S3, Address(from, 3 * 64), Assembler::AVX_512bit); 4239 __ evmovdquq(S4, Address(from, 4 * 64), Assembler::AVX_512bit); 4240 __ evmovdquq(S5, Address(from, 5 * 64), Assembler::AVX_512bit); 4241 __ evmovdquq(S6, Address(from, 6 * 64), Assembler::AVX_512bit); 4242 __ evmovdquq(S7, Address(from, 7 * 64), Assembler::AVX_512bit); 4243 __ leaq(from, Address(from, 8 * 64)); 4244 4245 __ evpxorq(B0, S0, RK1, Assembler::AVX_512bit); 4246 __ evpxorq(B1, S1, RK1, Assembler::AVX_512bit); 4247 __ evpxorq(B2, S2, RK1, Assembler::AVX_512bit); 4248 __ evpxorq(B3, S3, RK1, Assembler::AVX_512bit); 4249 __ evpxorq(B4, S4, RK1, Assembler::AVX_512bit); 4250 __ evpxorq(B5, S5, RK1, Assembler::AVX_512bit); 4251 __ evpxorq(B6, S6, RK1, Assembler::AVX_512bit); 4252 __ evpxorq(B7, S7, RK1, Assembler::AVX_512bit); 4253 4254 __ evalignq(IV, S0, IV, 0x06); 4255 __ evalignq(S0, S1, S0, 0x06); 4256 __ evalignq(S1, S2, S1, 0x06); 4257 __ evalignq(S2, S3, S2, 0x06); 4258 __ evalignq(S3, S4, S3, 0x06); 4259 __ evalignq(S4, S5, S4, 0x06); 4260 __ evalignq(S5, S6, S5, 0x06); 4261 __ evalignq(S6, S7, S6, 0x06); 4262 4263 roundDec(RK2); 4264 roundDec(RK3); 4265 roundDec(RK4); 4266 roundDec(RK5); 4267 roundDec(RK6); 4268 roundDec(RK7); 4269 roundDec(RK8); 4270 roundDec(RK9); 4271 roundDec(RK10); 4272 4273 __ cmpl(rounds, 44); 4274 __ jcc(Assembler::belowEqual, L_128); 4275 roundDec(RK11); 4276 roundDec(RK12); 4277 4278 __ cmpl(rounds, 52); 4279 __ jcc(Assembler::belowEqual, L_192); 4280 roundDec(RK13); 4281 roundDec(RK14); 4282 4283 __ BIND(L_256); 4284 roundDeclast(RK0); 4285 __ jmp(Loop2); 4286 4287 __ BIND(L_128); 4288 roundDeclast(RK0); 4289 __ jmp(Loop2); 4290 4291 __ BIND(L_192); 4292 roundDeclast(RK0); 4293 4294 __ BIND(Loop2); 4295 __ evpxorq(B0, B0, IV, Assembler::AVX_512bit); 4296 __ evpxorq(B1, B1, S0, Assembler::AVX_512bit); 4297 __ evpxorq(B2, B2, S1, Assembler::AVX_512bit); 4298 __ evpxorq(B3, B3, S2, Assembler::AVX_512bit); 4299 __ evpxorq(B4, B4, S3, Assembler::AVX_512bit); 4300 __ evpxorq(B5, B5, S4, Assembler::AVX_512bit); 4301 __ evpxorq(B6, B6, S5, Assembler::AVX_512bit); 4302 __ evpxorq(B7, B7, S6, Assembler::AVX_512bit); 4303 __ evmovdquq(IV, S7, Assembler::AVX_512bit); 4304 4305 __ evmovdquq(Address(to, 0 * 64), B0, Assembler::AVX_512bit); 4306 __ evmovdquq(Address(to, 1 * 64), B1, Assembler::AVX_512bit); 4307 __ evmovdquq(Address(to, 2 * 64), B2, Assembler::AVX_512bit); 4308 __ evmovdquq(Address(to, 3 * 64), B3, Assembler::AVX_512bit); 4309 __ evmovdquq(Address(to, 4 * 64), B4, Assembler::AVX_512bit); 4310 __ evmovdquq(Address(to, 5 * 64), B5, Assembler::AVX_512bit); 4311 __ evmovdquq(Address(to, 6 * 64), B6, Assembler::AVX_512bit); 4312 __ evmovdquq(Address(to, 7 * 64), B7, Assembler::AVX_512bit); 4313 __ leaq(to, Address(to, 8 * 64)); 4314 __ jmp(Loop); 4315 4316 __ BIND(Lcbc_dec_rem); 4317 __ evshufi64x2(IV, IV, IV, 0x03, Assembler::AVX_512bit); 4318 4319 __ BIND(Lcbc_dec_rem_loop); 4320 __ subl(len_reg, 16); 4321 __ jcc(Assembler::carrySet, Lcbc_dec_ret); 4322 4323 __ movdqu(S0, Address(from, 0)); 4324 __ evpxorq(B0, S0, RK1, Assembler::AVX_512bit); 4325 __ vaesdec(B0, B0, RK2, Assembler::AVX_512bit); 4326 __ vaesdec(B0, B0, RK3, Assembler::AVX_512bit); 4327 __ vaesdec(B0, B0, RK4, Assembler::AVX_512bit); 4328 __ vaesdec(B0, B0, RK5, Assembler::AVX_512bit); 4329 __ vaesdec(B0, B0, RK6, Assembler::AVX_512bit); 4330 __ vaesdec(B0, B0, RK7, Assembler::AVX_512bit); 4331 __ vaesdec(B0, B0, RK8, Assembler::AVX_512bit); 4332 __ vaesdec(B0, B0, RK9, Assembler::AVX_512bit); 4333 __ vaesdec(B0, B0, RK10, Assembler::AVX_512bit); 4334 __ cmpl(rounds, 44); 4335 __ jcc(Assembler::belowEqual, Lcbc_dec_rem_last); 4336 4337 __ vaesdec(B0, B0, RK11, Assembler::AVX_512bit); 4338 __ vaesdec(B0, B0, RK12, Assembler::AVX_512bit); 4339 __ cmpl(rounds, 52); 4340 __ jcc(Assembler::belowEqual, Lcbc_dec_rem_last); 4341 4342 __ vaesdec(B0, B0, RK13, Assembler::AVX_512bit); 4343 __ vaesdec(B0, B0, RK14, Assembler::AVX_512bit); 4344 4345 __ BIND(Lcbc_dec_rem_last); 4346 __ vaesdeclast(B0, B0, RK0, Assembler::AVX_512bit); 4347 4348 __ evpxorq(B0, B0, IV, Assembler::AVX_512bit); 4349 __ evmovdquq(IV, S0, Assembler::AVX_512bit); 4350 __ movdqu(Address(to, 0), B0); 4351 __ leaq(from, Address(from, 16)); 4352 __ leaq(to, Address(to, 16)); 4353 __ jmp(Lcbc_dec_rem_loop); 4354 4355 __ BIND(Lcbc_dec_ret); 4356 __ movdqu(Address(rvec, 0), IV); 4357 4358 // Zero out the round keys 4359 __ evpxorq(RK0, RK0, RK0, Assembler::AVX_512bit); 4360 __ evpxorq(RK1, RK1, RK1, Assembler::AVX_512bit); 4361 __ evpxorq(RK2, RK2, RK2, Assembler::AVX_512bit); 4362 __ evpxorq(RK3, RK3, RK3, Assembler::AVX_512bit); 4363 __ evpxorq(RK4, RK4, RK4, Assembler::AVX_512bit); 4364 __ evpxorq(RK5, RK5, RK5, Assembler::AVX_512bit); 4365 __ evpxorq(RK6, RK6, RK6, Assembler::AVX_512bit); 4366 __ evpxorq(RK7, RK7, RK7, Assembler::AVX_512bit); 4367 __ evpxorq(RK8, RK8, RK8, Assembler::AVX_512bit); 4368 __ evpxorq(RK9, RK9, RK9, Assembler::AVX_512bit); 4369 __ evpxorq(RK10, RK10, RK10, Assembler::AVX_512bit); 4370 __ cmpl(rounds, 44); 4371 __ jcc(Assembler::belowEqual, Lcbc_exit); 4372 __ evpxorq(RK11, RK11, RK11, Assembler::AVX_512bit); 4373 __ evpxorq(RK12, RK12, RK12, Assembler::AVX_512bit); 4374 __ cmpl(rounds, 52); 4375 __ jcc(Assembler::belowEqual, Lcbc_exit); 4376 __ evpxorq(RK13, RK13, RK13, Assembler::AVX_512bit); 4377 __ evpxorq(RK14, RK14, RK14, Assembler::AVX_512bit); 4378 4379 __ BIND(Lcbc_exit); 4380 __ pop(rbx); 4381 #ifdef _WIN64 4382 __ movl(rax, len_mem); 4383 #else 4384 __ pop(rax); // return length 4385 #endif 4386 __ leave(); // required for proper stackwalking of RuntimeStub frame 4387 __ ret(0); 4388 return start; 4389 } 4390 4391 // byte swap x86 long 4392 address generate_ghash_long_swap_mask() { 4393 __ align(CodeEntryAlignment); 4394 StubCodeMark mark(this, "StubRoutines", "ghash_long_swap_mask"); 4395 address start = __ pc(); 4396 __ emit_data64(0x0f0e0d0c0b0a0908, relocInfo::none ); 4397 __ emit_data64(0x0706050403020100, relocInfo::none ); 4398 return start; 4399 } 4400 4401 // byte swap x86 byte array 4402 address generate_ghash_byte_swap_mask() { 4403 __ align(CodeEntryAlignment); 4404 StubCodeMark mark(this, "StubRoutines", "ghash_byte_swap_mask"); 4405 address start = __ pc(); 4406 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none ); 4407 __ emit_data64(0x0001020304050607, relocInfo::none ); 4408 return start; 4409 } 4410 4411 /* Single and multi-block ghash operations */ 4412 address generate_ghash_processBlocks() { 4413 __ align(CodeEntryAlignment); 4414 Label L_ghash_loop, L_exit; 4415 StubCodeMark mark(this, "StubRoutines", "ghash_processBlocks"); 4416 address start = __ pc(); 4417 4418 const Register state = c_rarg0; 4419 const Register subkeyH = c_rarg1; 4420 const Register data = c_rarg2; 4421 const Register blocks = c_rarg3; 4422 4423 const XMMRegister xmm_temp0 = xmm0; 4424 const XMMRegister xmm_temp1 = xmm1; 4425 const XMMRegister xmm_temp2 = xmm2; 4426 const XMMRegister xmm_temp3 = xmm3; 4427 const XMMRegister xmm_temp4 = xmm4; 4428 const XMMRegister xmm_temp5 = xmm5; 4429 const XMMRegister xmm_temp6 = xmm6; 4430 const XMMRegister xmm_temp7 = xmm7; 4431 const XMMRegister xmm_temp8 = xmm8; 4432 const XMMRegister xmm_temp9 = xmm9; 4433 const XMMRegister xmm_temp10 = xmm10; 4434 4435 __ enter(); 4436 4437 __ movdqu(xmm_temp10, ExternalAddress(StubRoutines::x86::ghash_long_swap_mask_addr())); 4438 4439 __ movdqu(xmm_temp0, Address(state, 0)); 4440 __ pshufb(xmm_temp0, xmm_temp10); 4441 4442 4443 __ BIND(L_ghash_loop); 4444 __ movdqu(xmm_temp2, Address(data, 0)); 4445 __ pshufb(xmm_temp2, ExternalAddress(StubRoutines::x86::ghash_byte_swap_mask_addr())); 4446 4447 __ movdqu(xmm_temp1, Address(subkeyH, 0)); 4448 __ pshufb(xmm_temp1, xmm_temp10); 4449 4450 __ pxor(xmm_temp0, xmm_temp2); 4451 4452 // 4453 // Multiply with the hash key 4454 // 4455 __ movdqu(xmm_temp3, xmm_temp0); 4456 __ pclmulqdq(xmm_temp3, xmm_temp1, 0); // xmm3 holds a0*b0 4457 __ movdqu(xmm_temp4, xmm_temp0); 4458 __ pclmulqdq(xmm_temp4, xmm_temp1, 16); // xmm4 holds a0*b1 4459 4460 __ movdqu(xmm_temp5, xmm_temp0); 4461 __ pclmulqdq(xmm_temp5, xmm_temp1, 1); // xmm5 holds a1*b0 4462 __ movdqu(xmm_temp6, xmm_temp0); 4463 __ pclmulqdq(xmm_temp6, xmm_temp1, 17); // xmm6 holds a1*b1 4464 4465 __ pxor(xmm_temp4, xmm_temp5); // xmm4 holds a0*b1 + a1*b0 4466 4467 __ movdqu(xmm_temp5, xmm_temp4); // move the contents of xmm4 to xmm5 4468 __ psrldq(xmm_temp4, 8); // shift by xmm4 64 bits to the right 4469 __ pslldq(xmm_temp5, 8); // shift by xmm5 64 bits to the left 4470 __ pxor(xmm_temp3, xmm_temp5); 4471 __ pxor(xmm_temp6, xmm_temp4); // Register pair <xmm6:xmm3> holds the result 4472 // of the carry-less multiplication of 4473 // xmm0 by xmm1. 4474 4475 // We shift the result of the multiplication by one bit position 4476 // to the left to cope for the fact that the bits are reversed. 4477 __ movdqu(xmm_temp7, xmm_temp3); 4478 __ movdqu(xmm_temp8, xmm_temp6); 4479 __ pslld(xmm_temp3, 1); 4480 __ pslld(xmm_temp6, 1); 4481 __ psrld(xmm_temp7, 31); 4482 __ psrld(xmm_temp8, 31); 4483 __ movdqu(xmm_temp9, xmm_temp7); 4484 __ pslldq(xmm_temp8, 4); 4485 __ pslldq(xmm_temp7, 4); 4486 __ psrldq(xmm_temp9, 12); 4487 __ por(xmm_temp3, xmm_temp7); 4488 __ por(xmm_temp6, xmm_temp8); 4489 __ por(xmm_temp6, xmm_temp9); 4490 4491 // 4492 // First phase of the reduction 4493 // 4494 // Move xmm3 into xmm7, xmm8, xmm9 in order to perform the shifts 4495 // independently. 4496 __ movdqu(xmm_temp7, xmm_temp3); 4497 __ movdqu(xmm_temp8, xmm_temp3); 4498 __ movdqu(xmm_temp9, xmm_temp3); 4499 __ pslld(xmm_temp7, 31); // packed right shift shifting << 31 4500 __ pslld(xmm_temp8, 30); // packed right shift shifting << 30 4501 __ pslld(xmm_temp9, 25); // packed right shift shifting << 25 4502 __ pxor(xmm_temp7, xmm_temp8); // xor the shifted versions 4503 __ pxor(xmm_temp7, xmm_temp9); 4504 __ movdqu(xmm_temp8, xmm_temp7); 4505 __ pslldq(xmm_temp7, 12); 4506 __ psrldq(xmm_temp8, 4); 4507 __ pxor(xmm_temp3, xmm_temp7); // first phase of the reduction complete 4508 4509 // 4510 // Second phase of the reduction 4511 // 4512 // Make 3 copies of xmm3 in xmm2, xmm4, xmm5 for doing these 4513 // shift operations. 4514 __ movdqu(xmm_temp2, xmm_temp3); 4515 __ movdqu(xmm_temp4, xmm_temp3); 4516 __ movdqu(xmm_temp5, xmm_temp3); 4517 __ psrld(xmm_temp2, 1); // packed left shifting >> 1 4518 __ psrld(xmm_temp4, 2); // packed left shifting >> 2 4519 __ psrld(xmm_temp5, 7); // packed left shifting >> 7 4520 __ pxor(xmm_temp2, xmm_temp4); // xor the shifted versions 4521 __ pxor(xmm_temp2, xmm_temp5); 4522 __ pxor(xmm_temp2, xmm_temp8); 4523 __ pxor(xmm_temp3, xmm_temp2); 4524 __ pxor(xmm_temp6, xmm_temp3); // the result is in xmm6 4525 4526 __ decrement(blocks); 4527 __ jcc(Assembler::zero, L_exit); 4528 __ movdqu(xmm_temp0, xmm_temp6); 4529 __ addptr(data, 16); 4530 __ jmp(L_ghash_loop); 4531 4532 __ BIND(L_exit); 4533 __ pshufb(xmm_temp6, xmm_temp10); // Byte swap 16-byte result 4534 __ movdqu(Address(state, 0), xmm_temp6); // store the result 4535 __ leave(); 4536 __ ret(0); 4537 return start; 4538 } 4539 4540 //base64 character set 4541 address base64_charset_addr() { 4542 __ align(CodeEntryAlignment); 4543 StubCodeMark mark(this, "StubRoutines", "base64_charset"); 4544 address start = __ pc(); 4545 __ emit_data64(0x0000004200000041, relocInfo::none); 4546 __ emit_data64(0x0000004400000043, relocInfo::none); 4547 __ emit_data64(0x0000004600000045, relocInfo::none); 4548 __ emit_data64(0x0000004800000047, relocInfo::none); 4549 __ emit_data64(0x0000004a00000049, relocInfo::none); 4550 __ emit_data64(0x0000004c0000004b, relocInfo::none); 4551 __ emit_data64(0x0000004e0000004d, relocInfo::none); 4552 __ emit_data64(0x000000500000004f, relocInfo::none); 4553 __ emit_data64(0x0000005200000051, relocInfo::none); 4554 __ emit_data64(0x0000005400000053, relocInfo::none); 4555 __ emit_data64(0x0000005600000055, relocInfo::none); 4556 __ emit_data64(0x0000005800000057, relocInfo::none); 4557 __ emit_data64(0x0000005a00000059, relocInfo::none); 4558 __ emit_data64(0x0000006200000061, relocInfo::none); 4559 __ emit_data64(0x0000006400000063, relocInfo::none); 4560 __ emit_data64(0x0000006600000065, relocInfo::none); 4561 __ emit_data64(0x0000006800000067, relocInfo::none); 4562 __ emit_data64(0x0000006a00000069, relocInfo::none); 4563 __ emit_data64(0x0000006c0000006b, relocInfo::none); 4564 __ emit_data64(0x0000006e0000006d, relocInfo::none); 4565 __ emit_data64(0x000000700000006f, relocInfo::none); 4566 __ emit_data64(0x0000007200000071, relocInfo::none); 4567 __ emit_data64(0x0000007400000073, relocInfo::none); 4568 __ emit_data64(0x0000007600000075, relocInfo::none); 4569 __ emit_data64(0x0000007800000077, relocInfo::none); 4570 __ emit_data64(0x0000007a00000079, relocInfo::none); 4571 __ emit_data64(0x0000003100000030, relocInfo::none); 4572 __ emit_data64(0x0000003300000032, relocInfo::none); 4573 __ emit_data64(0x0000003500000034, relocInfo::none); 4574 __ emit_data64(0x0000003700000036, relocInfo::none); 4575 __ emit_data64(0x0000003900000038, relocInfo::none); 4576 __ emit_data64(0x0000002f0000002b, relocInfo::none); 4577 return start; 4578 } 4579 4580 //base64 url character set 4581 address base64url_charset_addr() { 4582 __ align(CodeEntryAlignment); 4583 StubCodeMark mark(this, "StubRoutines", "base64url_charset"); 4584 address start = __ pc(); 4585 __ emit_data64(0x0000004200000041, relocInfo::none); 4586 __ emit_data64(0x0000004400000043, relocInfo::none); 4587 __ emit_data64(0x0000004600000045, relocInfo::none); 4588 __ emit_data64(0x0000004800000047, relocInfo::none); 4589 __ emit_data64(0x0000004a00000049, relocInfo::none); 4590 __ emit_data64(0x0000004c0000004b, relocInfo::none); 4591 __ emit_data64(0x0000004e0000004d, relocInfo::none); 4592 __ emit_data64(0x000000500000004f, relocInfo::none); 4593 __ emit_data64(0x0000005200000051, relocInfo::none); 4594 __ emit_data64(0x0000005400000053, relocInfo::none); 4595 __ emit_data64(0x0000005600000055, relocInfo::none); 4596 __ emit_data64(0x0000005800000057, relocInfo::none); 4597 __ emit_data64(0x0000005a00000059, relocInfo::none); 4598 __ emit_data64(0x0000006200000061, relocInfo::none); 4599 __ emit_data64(0x0000006400000063, relocInfo::none); 4600 __ emit_data64(0x0000006600000065, relocInfo::none); 4601 __ emit_data64(0x0000006800000067, relocInfo::none); 4602 __ emit_data64(0x0000006a00000069, relocInfo::none); 4603 __ emit_data64(0x0000006c0000006b, relocInfo::none); 4604 __ emit_data64(0x0000006e0000006d, relocInfo::none); 4605 __ emit_data64(0x000000700000006f, relocInfo::none); 4606 __ emit_data64(0x0000007200000071, relocInfo::none); 4607 __ emit_data64(0x0000007400000073, relocInfo::none); 4608 __ emit_data64(0x0000007600000075, relocInfo::none); 4609 __ emit_data64(0x0000007800000077, relocInfo::none); 4610 __ emit_data64(0x0000007a00000079, relocInfo::none); 4611 __ emit_data64(0x0000003100000030, relocInfo::none); 4612 __ emit_data64(0x0000003300000032, relocInfo::none); 4613 __ emit_data64(0x0000003500000034, relocInfo::none); 4614 __ emit_data64(0x0000003700000036, relocInfo::none); 4615 __ emit_data64(0x0000003900000038, relocInfo::none); 4616 __ emit_data64(0x0000005f0000002d, relocInfo::none); 4617 4618 return start; 4619 } 4620 4621 address base64_bswap_mask_addr() { 4622 __ align(CodeEntryAlignment); 4623 StubCodeMark mark(this, "StubRoutines", "bswap_mask_base64"); 4624 address start = __ pc(); 4625 __ emit_data64(0x0504038002010080, relocInfo::none); 4626 __ emit_data64(0x0b0a098008070680, relocInfo::none); 4627 __ emit_data64(0x0908078006050480, relocInfo::none); 4628 __ emit_data64(0x0f0e0d800c0b0a80, relocInfo::none); 4629 __ emit_data64(0x0605048003020180, relocInfo::none); 4630 __ emit_data64(0x0c0b0a8009080780, relocInfo::none); 4631 __ emit_data64(0x0504038002010080, relocInfo::none); 4632 __ emit_data64(0x0b0a098008070680, relocInfo::none); 4633 4634 return start; 4635 } 4636 4637 address base64_right_shift_mask_addr() { 4638 __ align(CodeEntryAlignment); 4639 StubCodeMark mark(this, "StubRoutines", "right_shift_mask"); 4640 address start = __ pc(); 4641 __ emit_data64(0x0006000400020000, relocInfo::none); 4642 __ emit_data64(0x0006000400020000, relocInfo::none); 4643 __ emit_data64(0x0006000400020000, relocInfo::none); 4644 __ emit_data64(0x0006000400020000, relocInfo::none); 4645 __ emit_data64(0x0006000400020000, relocInfo::none); 4646 __ emit_data64(0x0006000400020000, relocInfo::none); 4647 __ emit_data64(0x0006000400020000, relocInfo::none); 4648 __ emit_data64(0x0006000400020000, relocInfo::none); 4649 4650 return start; 4651 } 4652 4653 address base64_left_shift_mask_addr() { 4654 __ align(CodeEntryAlignment); 4655 StubCodeMark mark(this, "StubRoutines", "left_shift_mask"); 4656 address start = __ pc(); 4657 __ emit_data64(0x0000000200040000, relocInfo::none); 4658 __ emit_data64(0x0000000200040000, relocInfo::none); 4659 __ emit_data64(0x0000000200040000, relocInfo::none); 4660 __ emit_data64(0x0000000200040000, relocInfo::none); 4661 __ emit_data64(0x0000000200040000, relocInfo::none); 4662 __ emit_data64(0x0000000200040000, relocInfo::none); 4663 __ emit_data64(0x0000000200040000, relocInfo::none); 4664 __ emit_data64(0x0000000200040000, relocInfo::none); 4665 4666 return start; 4667 } 4668 4669 address base64_and_mask_addr() { 4670 __ align(CodeEntryAlignment); 4671 StubCodeMark mark(this, "StubRoutines", "and_mask"); 4672 address start = __ pc(); 4673 __ emit_data64(0x3f003f003f000000, relocInfo::none); 4674 __ emit_data64(0x3f003f003f000000, relocInfo::none); 4675 __ emit_data64(0x3f003f003f000000, relocInfo::none); 4676 __ emit_data64(0x3f003f003f000000, relocInfo::none); 4677 __ emit_data64(0x3f003f003f000000, relocInfo::none); 4678 __ emit_data64(0x3f003f003f000000, relocInfo::none); 4679 __ emit_data64(0x3f003f003f000000, relocInfo::none); 4680 __ emit_data64(0x3f003f003f000000, relocInfo::none); 4681 return start; 4682 } 4683 4684 address base64_gather_mask_addr() { 4685 __ align(CodeEntryAlignment); 4686 StubCodeMark mark(this, "StubRoutines", "gather_mask"); 4687 address start = __ pc(); 4688 __ emit_data64(0xffffffffffffffff, relocInfo::none); 4689 return start; 4690 } 4691 4692 // Code for generating Base64 encoding. 4693 // Intrinsic function prototype in Base64.java: 4694 // private void encodeBlock(byte[] src, int sp, int sl, byte[] dst, int dp, boolean isURL) { 4695 address generate_base64_encodeBlock() { 4696 __ align(CodeEntryAlignment); 4697 StubCodeMark mark(this, "StubRoutines", "implEncode"); 4698 address start = __ pc(); 4699 __ enter(); 4700 4701 // Save callee-saved registers before using them 4702 __ push(r12); 4703 __ push(r13); 4704 __ push(r14); 4705 __ push(r15); 4706 4707 // arguments 4708 const Register source = c_rarg0; // Source Array 4709 const Register start_offset = c_rarg1; // start offset 4710 const Register end_offset = c_rarg2; // end offset 4711 const Register dest = c_rarg3; // destination array 4712 4713 #ifndef _WIN64 4714 const Register dp = c_rarg4; // Position for writing to dest array 4715 const Register isURL = c_rarg5;// Base64 or URL character set 4716 #else 4717 const Address dp_mem(rbp, 6 * wordSize); // length is on stack on Win64 4718 const Address isURL_mem(rbp, 7 * wordSize); 4719 const Register isURL = r10; // pick the volatile windows register 4720 const Register dp = r12; 4721 __ movl(dp, dp_mem); 4722 __ movl(isURL, isURL_mem); 4723 #endif 4724 4725 const Register length = r14; 4726 Label L_process80, L_process32, L_process3, L_exit, L_processdata; 4727 4728 // calculate length from offsets 4729 __ movl(length, end_offset); 4730 __ subl(length, start_offset); 4731 __ cmpl(length, 0); 4732 __ jcc(Assembler::lessEqual, L_exit); 4733 4734 __ lea(r11, ExternalAddress(StubRoutines::x86::base64_charset_addr())); 4735 // check if base64 charset(isURL=0) or base64 url charset(isURL=1) needs to be loaded 4736 __ cmpl(isURL, 0); 4737 __ jcc(Assembler::equal, L_processdata); 4738 __ lea(r11, ExternalAddress(StubRoutines::x86::base64url_charset_addr())); 4739 4740 // load masks required for encoding data 4741 __ BIND(L_processdata); 4742 __ movdqu(xmm16, ExternalAddress(StubRoutines::x86::base64_gather_mask_addr())); 4743 // Set 64 bits of K register. 4744 __ evpcmpeqb(k3, xmm16, xmm16, Assembler::AVX_512bit); 4745 __ evmovdquq(xmm12, ExternalAddress(StubRoutines::x86::base64_bswap_mask_addr()), Assembler::AVX_256bit, r13); 4746 __ evmovdquq(xmm13, ExternalAddress(StubRoutines::x86::base64_right_shift_mask_addr()), Assembler::AVX_512bit, r13); 4747 __ evmovdquq(xmm14, ExternalAddress(StubRoutines::x86::base64_left_shift_mask_addr()), Assembler::AVX_512bit, r13); 4748 __ evmovdquq(xmm15, ExternalAddress(StubRoutines::x86::base64_and_mask_addr()), Assembler::AVX_512bit, r13); 4749 4750 // Vector Base64 implementation, producing 96 bytes of encoded data 4751 __ BIND(L_process80); 4752 __ cmpl(length, 80); 4753 __ jcc(Assembler::below, L_process32); 4754 __ evmovdquq(xmm0, Address(source, start_offset, Address::times_1, 0), Assembler::AVX_256bit); 4755 __ evmovdquq(xmm1, Address(source, start_offset, Address::times_1, 24), Assembler::AVX_256bit); 4756 __ evmovdquq(xmm2, Address(source, start_offset, Address::times_1, 48), Assembler::AVX_256bit); 4757 4758 //permute the input data in such a manner that we have continuity of the source 4759 __ vpermq(xmm3, xmm0, 148, Assembler::AVX_256bit); 4760 __ vpermq(xmm4, xmm1, 148, Assembler::AVX_256bit); 4761 __ vpermq(xmm5, xmm2, 148, Assembler::AVX_256bit); 4762 4763 //shuffle input and group 3 bytes of data and to it add 0 as the 4th byte. 4764 //we can deal with 12 bytes at a time in a 128 bit register 4765 __ vpshufb(xmm3, xmm3, xmm12, Assembler::AVX_256bit); 4766 __ vpshufb(xmm4, xmm4, xmm12, Assembler::AVX_256bit); 4767 __ vpshufb(xmm5, xmm5, xmm12, Assembler::AVX_256bit); 4768 4769 //convert byte to word. Each 128 bit register will have 6 bytes for processing 4770 __ vpmovzxbw(xmm3, xmm3, Assembler::AVX_512bit); 4771 __ vpmovzxbw(xmm4, xmm4, Assembler::AVX_512bit); 4772 __ vpmovzxbw(xmm5, xmm5, Assembler::AVX_512bit); 4773 4774 // Extract bits in the following pattern 6, 4+2, 2+4, 6 to convert 3, 8 bit numbers to 4, 6 bit numbers 4775 __ evpsrlvw(xmm0, xmm3, xmm13, Assembler::AVX_512bit); 4776 __ evpsrlvw(xmm1, xmm4, xmm13, Assembler::AVX_512bit); 4777 __ evpsrlvw(xmm2, xmm5, xmm13, Assembler::AVX_512bit); 4778 4779 __ evpsllvw(xmm3, xmm3, xmm14, Assembler::AVX_512bit); 4780 __ evpsllvw(xmm4, xmm4, xmm14, Assembler::AVX_512bit); 4781 __ evpsllvw(xmm5, xmm5, xmm14, Assembler::AVX_512bit); 4782 4783 __ vpsrlq(xmm0, xmm0, 8, Assembler::AVX_512bit); 4784 __ vpsrlq(xmm1, xmm1, 8, Assembler::AVX_512bit); 4785 __ vpsrlq(xmm2, xmm2, 8, Assembler::AVX_512bit); 4786 4787 __ vpsllq(xmm3, xmm3, 8, Assembler::AVX_512bit); 4788 __ vpsllq(xmm4, xmm4, 8, Assembler::AVX_512bit); 4789 __ vpsllq(xmm5, xmm5, 8, Assembler::AVX_512bit); 4790 4791 __ vpandq(xmm3, xmm3, xmm15, Assembler::AVX_512bit); 4792 __ vpandq(xmm4, xmm4, xmm15, Assembler::AVX_512bit); 4793 __ vpandq(xmm5, xmm5, xmm15, Assembler::AVX_512bit); 4794 4795 // Get the final 4*6 bits base64 encoding 4796 __ vporq(xmm3, xmm3, xmm0, Assembler::AVX_512bit); 4797 __ vporq(xmm4, xmm4, xmm1, Assembler::AVX_512bit); 4798 __ vporq(xmm5, xmm5, xmm2, Assembler::AVX_512bit); 4799 4800 // Shift 4801 __ vpsrlq(xmm3, xmm3, 8, Assembler::AVX_512bit); 4802 __ vpsrlq(xmm4, xmm4, 8, Assembler::AVX_512bit); 4803 __ vpsrlq(xmm5, xmm5, 8, Assembler::AVX_512bit); 4804 4805 // look up 6 bits in the base64 character set to fetch the encoding 4806 // we are converting word to dword as gather instructions need dword indices for looking up encoding 4807 __ vextracti64x4(xmm6, xmm3, 0); 4808 __ vpmovzxwd(xmm0, xmm6, Assembler::AVX_512bit); 4809 __ vextracti64x4(xmm6, xmm3, 1); 4810 __ vpmovzxwd(xmm1, xmm6, Assembler::AVX_512bit); 4811 4812 __ vextracti64x4(xmm6, xmm4, 0); 4813 __ vpmovzxwd(xmm2, xmm6, Assembler::AVX_512bit); 4814 __ vextracti64x4(xmm6, xmm4, 1); 4815 __ vpmovzxwd(xmm3, xmm6, Assembler::AVX_512bit); 4816 4817 __ vextracti64x4(xmm4, xmm5, 0); 4818 __ vpmovzxwd(xmm6, xmm4, Assembler::AVX_512bit); 4819 4820 __ vextracti64x4(xmm4, xmm5, 1); 4821 __ vpmovzxwd(xmm7, xmm4, Assembler::AVX_512bit); 4822 4823 __ kmovql(k2, k3); 4824 __ evpgatherdd(xmm4, k2, Address(r11, xmm0, Address::times_4, 0), Assembler::AVX_512bit); 4825 __ kmovql(k2, k3); 4826 __ evpgatherdd(xmm5, k2, Address(r11, xmm1, Address::times_4, 0), Assembler::AVX_512bit); 4827 __ kmovql(k2, k3); 4828 __ evpgatherdd(xmm8, k2, Address(r11, xmm2, Address::times_4, 0), Assembler::AVX_512bit); 4829 __ kmovql(k2, k3); 4830 __ evpgatherdd(xmm9, k2, Address(r11, xmm3, Address::times_4, 0), Assembler::AVX_512bit); 4831 __ kmovql(k2, k3); 4832 __ evpgatherdd(xmm10, k2, Address(r11, xmm6, Address::times_4, 0), Assembler::AVX_512bit); 4833 __ kmovql(k2, k3); 4834 __ evpgatherdd(xmm11, k2, Address(r11, xmm7, Address::times_4, 0), Assembler::AVX_512bit); 4835 4836 //Down convert dword to byte. Final output is 16*6 = 96 bytes long 4837 __ evpmovdb(Address(dest, dp, Address::times_1, 0), xmm4, Assembler::AVX_512bit); 4838 __ evpmovdb(Address(dest, dp, Address::times_1, 16), xmm5, Assembler::AVX_512bit); 4839 __ evpmovdb(Address(dest, dp, Address::times_1, 32), xmm8, Assembler::AVX_512bit); 4840 __ evpmovdb(Address(dest, dp, Address::times_1, 48), xmm9, Assembler::AVX_512bit); 4841 __ evpmovdb(Address(dest, dp, Address::times_1, 64), xmm10, Assembler::AVX_512bit); 4842 __ evpmovdb(Address(dest, dp, Address::times_1, 80), xmm11, Assembler::AVX_512bit); 4843 4844 __ addq(dest, 96); 4845 __ addq(source, 72); 4846 __ subq(length, 72); 4847 __ jmp(L_process80); 4848 4849 // Vector Base64 implementation generating 32 bytes of encoded data 4850 __ BIND(L_process32); 4851 __ cmpl(length, 32); 4852 __ jcc(Assembler::below, L_process3); 4853 __ evmovdquq(xmm0, Address(source, start_offset), Assembler::AVX_256bit); 4854 __ vpermq(xmm0, xmm0, 148, Assembler::AVX_256bit); 4855 __ vpshufb(xmm6, xmm0, xmm12, Assembler::AVX_256bit); 4856 __ vpmovzxbw(xmm6, xmm6, Assembler::AVX_512bit); 4857 __ evpsrlvw(xmm2, xmm6, xmm13, Assembler::AVX_512bit); 4858 __ evpsllvw(xmm3, xmm6, xmm14, Assembler::AVX_512bit); 4859 4860 __ vpsrlq(xmm2, xmm2, 8, Assembler::AVX_512bit); 4861 __ vpsllq(xmm3, xmm3, 8, Assembler::AVX_512bit); 4862 __ vpandq(xmm3, xmm3, xmm15, Assembler::AVX_512bit); 4863 __ vporq(xmm1, xmm2, xmm3, Assembler::AVX_512bit); 4864 __ vpsrlq(xmm1, xmm1, 8, Assembler::AVX_512bit); 4865 __ vextracti64x4(xmm9, xmm1, 0); 4866 __ vpmovzxwd(xmm6, xmm9, Assembler::AVX_512bit); 4867 __ vextracti64x4(xmm9, xmm1, 1); 4868 __ vpmovzxwd(xmm5, xmm9, Assembler::AVX_512bit); 4869 __ kmovql(k2, k3); 4870 __ evpgatherdd(xmm8, k2, Address(r11, xmm6, Address::times_4, 0), Assembler::AVX_512bit); 4871 __ kmovql(k2, k3); 4872 __ evpgatherdd(xmm10, k2, Address(r11, xmm5, Address::times_4, 0), Assembler::AVX_512bit); 4873 __ evpmovdb(Address(dest, dp, Address::times_1, 0), xmm8, Assembler::AVX_512bit); 4874 __ evpmovdb(Address(dest, dp, Address::times_1, 16), xmm10, Assembler::AVX_512bit); 4875 __ subq(length, 24); 4876 __ addq(dest, 32); 4877 __ addq(source, 24); 4878 __ jmp(L_process32); 4879 4880 // Scalar data processing takes 3 bytes at a time and produces 4 bytes of encoded data 4881 /* This code corresponds to the scalar version of the following snippet in Base64.java 4882 ** int bits = (src[sp0++] & 0xff) << 16 |(src[sp0++] & 0xff) << 8 |(src[sp0++] & 0xff); 4883 ** dst[dp0++] = (byte)base64[(bits >> > 18) & 0x3f]; 4884 ** dst[dp0++] = (byte)base64[(bits >> > 12) & 0x3f]; 4885 ** dst[dp0++] = (byte)base64[(bits >> > 6) & 0x3f]; 4886 ** dst[dp0++] = (byte)base64[bits & 0x3f];*/ 4887 __ BIND(L_process3); 4888 __ cmpl(length, 3); 4889 __ jcc(Assembler::below, L_exit); 4890 // Read 1 byte at a time 4891 __ movzbl(rax, Address(source, start_offset)); 4892 __ shll(rax, 0x10); 4893 __ movl(r15, rax); 4894 __ movzbl(rax, Address(source, start_offset, Address::times_1, 1)); 4895 __ shll(rax, 0x8); 4896 __ movzwl(rax, rax); 4897 __ orl(r15, rax); 4898 __ movzbl(rax, Address(source, start_offset, Address::times_1, 2)); 4899 __ orl(rax, r15); 4900 // Save 3 bytes read in r15 4901 __ movl(r15, rax); 4902 __ shrl(rax, 0x12); 4903 __ andl(rax, 0x3f); 4904 // rax contains the index, r11 contains base64 lookup table 4905 __ movb(rax, Address(r11, rax, Address::times_4)); 4906 // Write the encoded byte to destination 4907 __ movb(Address(dest, dp, Address::times_1, 0), rax); 4908 __ movl(rax, r15); 4909 __ shrl(rax, 0xc); 4910 __ andl(rax, 0x3f); 4911 __ movb(rax, Address(r11, rax, Address::times_4)); 4912 __ movb(Address(dest, dp, Address::times_1, 1), rax); 4913 __ movl(rax, r15); 4914 __ shrl(rax, 0x6); 4915 __ andl(rax, 0x3f); 4916 __ movb(rax, Address(r11, rax, Address::times_4)); 4917 __ movb(Address(dest, dp, Address::times_1, 2), rax); 4918 __ movl(rax, r15); 4919 __ andl(rax, 0x3f); 4920 __ movb(rax, Address(r11, rax, Address::times_4)); 4921 __ movb(Address(dest, dp, Address::times_1, 3), rax); 4922 __ subl(length, 3); 4923 __ addq(dest, 4); 4924 __ addq(source, 3); 4925 __ jmp(L_process3); 4926 __ BIND(L_exit); 4927 __ pop(r15); 4928 __ pop(r14); 4929 __ pop(r13); 4930 __ pop(r12); 4931 __ leave(); 4932 __ ret(0); 4933 return start; 4934 } 4935 4936 /** 4937 * Arguments: 4938 * 4939 * Inputs: 4940 * c_rarg0 - int crc 4941 * c_rarg1 - byte* buf 4942 * c_rarg2 - int length 4943 * 4944 * Ouput: 4945 * rax - int crc result 4946 */ 4947 address generate_updateBytesCRC32() { 4948 assert(UseCRC32Intrinsics, "need AVX and CLMUL instructions"); 4949 4950 __ align(CodeEntryAlignment); 4951 StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32"); 4952 4953 address start = __ pc(); 4954 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 4955 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 4956 // rscratch1: r10 4957 const Register crc = c_rarg0; // crc 4958 const Register buf = c_rarg1; // source java byte array address 4959 const Register len = c_rarg2; // length 4960 const Register table = c_rarg3; // crc_table address (reuse register) 4961 const Register tmp = r11; 4962 assert_different_registers(crc, buf, len, table, tmp, rax); 4963 4964 BLOCK_COMMENT("Entry:"); 4965 __ enter(); // required for proper stackwalking of RuntimeStub frame 4966 4967 __ kernel_crc32(crc, buf, len, table, tmp); 4968 4969 __ movl(rax, crc); 4970 __ vzeroupper(); 4971 __ leave(); // required for proper stackwalking of RuntimeStub frame 4972 __ ret(0); 4973 4974 return start; 4975 } 4976 4977 /** 4978 * Arguments: 4979 * 4980 * Inputs: 4981 * c_rarg0 - int crc 4982 * c_rarg1 - byte* buf 4983 * c_rarg2 - long length 4984 * c_rarg3 - table_start - optional (present only when doing a library_call, 4985 * not used by x86 algorithm) 4986 * 4987 * Ouput: 4988 * rax - int crc result 4989 */ 4990 address generate_updateBytesCRC32C(bool is_pclmulqdq_supported) { 4991 assert(UseCRC32CIntrinsics, "need SSE4_2"); 4992 __ align(CodeEntryAlignment); 4993 StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32C"); 4994 address start = __ pc(); 4995 //reg.arg int#0 int#1 int#2 int#3 int#4 int#5 float regs 4996 //Windows RCX RDX R8 R9 none none XMM0..XMM3 4997 //Lin / Sol RDI RSI RDX RCX R8 R9 XMM0..XMM7 4998 const Register crc = c_rarg0; // crc 4999 const Register buf = c_rarg1; // source java byte array address 5000 const Register len = c_rarg2; // length 5001 const Register a = rax; 5002 const Register j = r9; 5003 const Register k = r10; 5004 const Register l = r11; 5005 #ifdef _WIN64 5006 const Register y = rdi; 5007 const Register z = rsi; 5008 #else 5009 const Register y = rcx; 5010 const Register z = r8; 5011 #endif 5012 assert_different_registers(crc, buf, len, a, j, k, l, y, z); 5013 5014 BLOCK_COMMENT("Entry:"); 5015 __ enter(); // required for proper stackwalking of RuntimeStub frame 5016 #ifdef _WIN64 5017 __ push(y); 5018 __ push(z); 5019 #endif 5020 __ crc32c_ipl_alg2_alt2(crc, buf, len, 5021 a, j, k, 5022 l, y, z, 5023 c_farg0, c_farg1, c_farg2, 5024 is_pclmulqdq_supported); 5025 __ movl(rax, crc); 5026 #ifdef _WIN64 5027 __ pop(z); 5028 __ pop(y); 5029 #endif 5030 __ vzeroupper(); 5031 __ leave(); // required for proper stackwalking of RuntimeStub frame 5032 __ ret(0); 5033 5034 return start; 5035 } 5036 5037 /** 5038 * Arguments: 5039 * 5040 * Input: 5041 * c_rarg0 - x address 5042 * c_rarg1 - x length 5043 * c_rarg2 - y address 5044 * c_rarg3 - y length 5045 * not Win64 5046 * c_rarg4 - z address 5047 * c_rarg5 - z length 5048 * Win64 5049 * rsp+40 - z address 5050 * rsp+48 - z length 5051 */ 5052 address generate_multiplyToLen() { 5053 __ align(CodeEntryAlignment); 5054 StubCodeMark mark(this, "StubRoutines", "multiplyToLen"); 5055 5056 address start = __ pc(); 5057 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 5058 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 5059 const Register x = rdi; 5060 const Register xlen = rax; 5061 const Register y = rsi; 5062 const Register ylen = rcx; 5063 const Register z = r8; 5064 const Register zlen = r11; 5065 5066 // Next registers will be saved on stack in multiply_to_len(). 5067 const Register tmp1 = r12; 5068 const Register tmp2 = r13; 5069 const Register tmp3 = r14; 5070 const Register tmp4 = r15; 5071 const Register tmp5 = rbx; 5072 5073 BLOCK_COMMENT("Entry:"); 5074 __ enter(); // required for proper stackwalking of RuntimeStub frame 5075 5076 #ifndef _WIN64 5077 __ movptr(zlen, r9); // Save r9 in r11 - zlen 5078 #endif 5079 setup_arg_regs(4); // x => rdi, xlen => rsi, y => rdx 5080 // ylen => rcx, z => r8, zlen => r11 5081 // r9 and r10 may be used to save non-volatile registers 5082 #ifdef _WIN64 5083 // last 2 arguments (#4, #5) are on stack on Win64 5084 __ movptr(z, Address(rsp, 6 * wordSize)); 5085 __ movptr(zlen, Address(rsp, 7 * wordSize)); 5086 #endif 5087 5088 __ movptr(xlen, rsi); 5089 __ movptr(y, rdx); 5090 __ multiply_to_len(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5); 5091 5092 restore_arg_regs(); 5093 5094 __ leave(); // required for proper stackwalking of RuntimeStub frame 5095 __ ret(0); 5096 5097 return start; 5098 } 5099 5100 /** 5101 * Arguments: 5102 * 5103 * Input: 5104 * c_rarg0 - obja address 5105 * c_rarg1 - objb address 5106 * c_rarg3 - length length 5107 * c_rarg4 - scale log2_array_indxscale 5108 * 5109 * Output: 5110 * rax - int >= mismatched index, < 0 bitwise complement of tail 5111 */ 5112 address generate_vectorizedMismatch() { 5113 __ align(CodeEntryAlignment); 5114 StubCodeMark mark(this, "StubRoutines", "vectorizedMismatch"); 5115 address start = __ pc(); 5116 5117 BLOCK_COMMENT("Entry:"); 5118 __ enter(); 5119 5120 #ifdef _WIN64 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 5121 const Register scale = c_rarg0; //rcx, will exchange with r9 5122 const Register objb = c_rarg1; //rdx 5123 const Register length = c_rarg2; //r8 5124 const Register obja = c_rarg3; //r9 5125 __ xchgq(obja, scale); //now obja and scale contains the correct contents 5126 5127 const Register tmp1 = r10; 5128 const Register tmp2 = r11; 5129 #endif 5130 #ifndef _WIN64 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 5131 const Register obja = c_rarg0; //U:rdi 5132 const Register objb = c_rarg1; //U:rsi 5133 const Register length = c_rarg2; //U:rdx 5134 const Register scale = c_rarg3; //U:rcx 5135 const Register tmp1 = r8; 5136 const Register tmp2 = r9; 5137 #endif 5138 const Register result = rax; //return value 5139 const XMMRegister vec0 = xmm0; 5140 const XMMRegister vec1 = xmm1; 5141 const XMMRegister vec2 = xmm2; 5142 5143 __ vectorized_mismatch(obja, objb, length, scale, result, tmp1, tmp2, vec0, vec1, vec2); 5144 5145 __ vzeroupper(); 5146 __ leave(); 5147 __ ret(0); 5148 5149 return start; 5150 } 5151 5152 /** 5153 * Arguments: 5154 * 5155 // Input: 5156 // c_rarg0 - x address 5157 // c_rarg1 - x length 5158 // c_rarg2 - z address 5159 // c_rarg3 - z lenth 5160 * 5161 */ 5162 address generate_squareToLen() { 5163 5164 __ align(CodeEntryAlignment); 5165 StubCodeMark mark(this, "StubRoutines", "squareToLen"); 5166 5167 address start = __ pc(); 5168 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 5169 // Unix: rdi, rsi, rdx, rcx (c_rarg0, c_rarg1, ...) 5170 const Register x = rdi; 5171 const Register len = rsi; 5172 const Register z = r8; 5173 const Register zlen = rcx; 5174 5175 const Register tmp1 = r12; 5176 const Register tmp2 = r13; 5177 const Register tmp3 = r14; 5178 const Register tmp4 = r15; 5179 const Register tmp5 = rbx; 5180 5181 BLOCK_COMMENT("Entry:"); 5182 __ enter(); // required for proper stackwalking of RuntimeStub frame 5183 5184 setup_arg_regs(4); // x => rdi, len => rsi, z => rdx 5185 // zlen => rcx 5186 // r9 and r10 may be used to save non-volatile registers 5187 __ movptr(r8, rdx); 5188 __ square_to_len(x, len, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5, rdx, rax); 5189 5190 restore_arg_regs(); 5191 5192 __ leave(); // required for proper stackwalking of RuntimeStub frame 5193 __ ret(0); 5194 5195 return start; 5196 } 5197 5198 address generate_method_entry_barrier() { 5199 __ align(CodeEntryAlignment); 5200 StubCodeMark mark(this, "StubRoutines", "nmethod_entry_barrier"); 5201 5202 Label deoptimize_label; 5203 5204 address start = __ pc(); 5205 5206 __ push(-1); // cookie, this is used for writing the new rsp when deoptimizing 5207 5208 BLOCK_COMMENT("Entry:"); 5209 __ enter(); // save rbp 5210 5211 // save c_rarg0, because we want to use that value. 5212 // We could do without it but then we depend on the number of slots used by pusha 5213 __ push(c_rarg0); 5214 5215 __ lea(c_rarg0, Address(rsp, wordSize * 3)); // 1 for cookie, 1 for rbp, 1 for c_rarg0 - this should be the return address 5216 5217 __ pusha(); 5218 5219 // The method may have floats as arguments, and we must spill them before calling 5220 // the VM runtime. 5221 assert(Argument::n_float_register_parameters_j == 8, "Assumption"); 5222 const int xmm_size = wordSize * 2; 5223 const int xmm_spill_size = xmm_size * Argument::n_float_register_parameters_j; 5224 __ subptr(rsp, xmm_spill_size); 5225 __ movdqu(Address(rsp, xmm_size * 7), xmm7); 5226 __ movdqu(Address(rsp, xmm_size * 6), xmm6); 5227 __ movdqu(Address(rsp, xmm_size * 5), xmm5); 5228 __ movdqu(Address(rsp, xmm_size * 4), xmm4); 5229 __ movdqu(Address(rsp, xmm_size * 3), xmm3); 5230 __ movdqu(Address(rsp, xmm_size * 2), xmm2); 5231 __ movdqu(Address(rsp, xmm_size * 1), xmm1); 5232 __ movdqu(Address(rsp, xmm_size * 0), xmm0); 5233 5234 __ call_VM_leaf(CAST_FROM_FN_PTR(address, static_cast<int (*)(address*)>(BarrierSetNMethod::nmethod_stub_entry_barrier)), 1); 5235 5236 __ movdqu(xmm0, Address(rsp, xmm_size * 0)); 5237 __ movdqu(xmm1, Address(rsp, xmm_size * 1)); 5238 __ movdqu(xmm2, Address(rsp, xmm_size * 2)); 5239 __ movdqu(xmm3, Address(rsp, xmm_size * 3)); 5240 __ movdqu(xmm4, Address(rsp, xmm_size * 4)); 5241 __ movdqu(xmm5, Address(rsp, xmm_size * 5)); 5242 __ movdqu(xmm6, Address(rsp, xmm_size * 6)); 5243 __ movdqu(xmm7, Address(rsp, xmm_size * 7)); 5244 __ addptr(rsp, xmm_spill_size); 5245 5246 __ cmpl(rax, 1); // 1 means deoptimize 5247 __ jcc(Assembler::equal, deoptimize_label); 5248 5249 __ popa(); 5250 __ pop(c_rarg0); 5251 5252 __ leave(); 5253 5254 __ addptr(rsp, 1 * wordSize); // cookie 5255 __ ret(0); 5256 5257 5258 __ BIND(deoptimize_label); 5259 5260 __ popa(); 5261 __ pop(c_rarg0); 5262 5263 __ leave(); 5264 5265 // this can be taken out, but is good for verification purposes. getting a SIGSEGV 5266 // here while still having a correct stack is valuable 5267 __ testptr(rsp, Address(rsp, 0)); 5268 5269 __ movptr(rsp, Address(rsp, 0)); // new rsp was written in the barrier 5270 __ jmp(Address(rsp, -1 * wordSize)); // jmp target should be callers verified_entry_point 5271 5272 return start; 5273 } 5274 5275 /** 5276 * Arguments: 5277 * 5278 * Input: 5279 * c_rarg0 - out address 5280 * c_rarg1 - in address 5281 * c_rarg2 - offset 5282 * c_rarg3 - len 5283 * not Win64 5284 * c_rarg4 - k 5285 * Win64 5286 * rsp+40 - k 5287 */ 5288 address generate_mulAdd() { 5289 __ align(CodeEntryAlignment); 5290 StubCodeMark mark(this, "StubRoutines", "mulAdd"); 5291 5292 address start = __ pc(); 5293 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 5294 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 5295 const Register out = rdi; 5296 const Register in = rsi; 5297 const Register offset = r11; 5298 const Register len = rcx; 5299 const Register k = r8; 5300 5301 // Next registers will be saved on stack in mul_add(). 5302 const Register tmp1 = r12; 5303 const Register tmp2 = r13; 5304 const Register tmp3 = r14; 5305 const Register tmp4 = r15; 5306 const Register tmp5 = rbx; 5307 5308 BLOCK_COMMENT("Entry:"); 5309 __ enter(); // required for proper stackwalking of RuntimeStub frame 5310 5311 setup_arg_regs(4); // out => rdi, in => rsi, offset => rdx 5312 // len => rcx, k => r8 5313 // r9 and r10 may be used to save non-volatile registers 5314 #ifdef _WIN64 5315 // last argument is on stack on Win64 5316 __ movl(k, Address(rsp, 6 * wordSize)); 5317 #endif 5318 __ movptr(r11, rdx); // move offset in rdx to offset(r11) 5319 __ mul_add(out, in, offset, len, k, tmp1, tmp2, tmp3, tmp4, tmp5, rdx, rax); 5320 5321 restore_arg_regs(); 5322 5323 __ leave(); // required for proper stackwalking of RuntimeStub frame 5324 __ ret(0); 5325 5326 return start; 5327 } 5328 5329 address generate_libmExp() { 5330 StubCodeMark mark(this, "StubRoutines", "libmExp"); 5331 5332 address start = __ pc(); 5333 5334 const XMMRegister x0 = xmm0; 5335 const XMMRegister x1 = xmm1; 5336 const XMMRegister x2 = xmm2; 5337 const XMMRegister x3 = xmm3; 5338 5339 const XMMRegister x4 = xmm4; 5340 const XMMRegister x5 = xmm5; 5341 const XMMRegister x6 = xmm6; 5342 const XMMRegister x7 = xmm7; 5343 5344 const Register tmp = r11; 5345 5346 BLOCK_COMMENT("Entry:"); 5347 __ enter(); // required for proper stackwalking of RuntimeStub frame 5348 5349 __ fast_exp(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp); 5350 5351 __ leave(); // required for proper stackwalking of RuntimeStub frame 5352 __ ret(0); 5353 5354 return start; 5355 5356 } 5357 5358 address generate_libmLog() { 5359 StubCodeMark mark(this, "StubRoutines", "libmLog"); 5360 5361 address start = __ pc(); 5362 5363 const XMMRegister x0 = xmm0; 5364 const XMMRegister x1 = xmm1; 5365 const XMMRegister x2 = xmm2; 5366 const XMMRegister x3 = xmm3; 5367 5368 const XMMRegister x4 = xmm4; 5369 const XMMRegister x5 = xmm5; 5370 const XMMRegister x6 = xmm6; 5371 const XMMRegister x7 = xmm7; 5372 5373 const Register tmp1 = r11; 5374 const Register tmp2 = r8; 5375 5376 BLOCK_COMMENT("Entry:"); 5377 __ enter(); // required for proper stackwalking of RuntimeStub frame 5378 5379 __ fast_log(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2); 5380 5381 __ leave(); // required for proper stackwalking of RuntimeStub frame 5382 __ ret(0); 5383 5384 return start; 5385 5386 } 5387 5388 address generate_libmLog10() { 5389 StubCodeMark mark(this, "StubRoutines", "libmLog10"); 5390 5391 address start = __ pc(); 5392 5393 const XMMRegister x0 = xmm0; 5394 const XMMRegister x1 = xmm1; 5395 const XMMRegister x2 = xmm2; 5396 const XMMRegister x3 = xmm3; 5397 5398 const XMMRegister x4 = xmm4; 5399 const XMMRegister x5 = xmm5; 5400 const XMMRegister x6 = xmm6; 5401 const XMMRegister x7 = xmm7; 5402 5403 const Register tmp = r11; 5404 5405 BLOCK_COMMENT("Entry:"); 5406 __ enter(); // required for proper stackwalking of RuntimeStub frame 5407 5408 __ fast_log10(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp); 5409 5410 __ leave(); // required for proper stackwalking of RuntimeStub frame 5411 __ ret(0); 5412 5413 return start; 5414 5415 } 5416 5417 address generate_libmPow() { 5418 StubCodeMark mark(this, "StubRoutines", "libmPow"); 5419 5420 address start = __ pc(); 5421 5422 const XMMRegister x0 = xmm0; 5423 const XMMRegister x1 = xmm1; 5424 const XMMRegister x2 = xmm2; 5425 const XMMRegister x3 = xmm3; 5426 5427 const XMMRegister x4 = xmm4; 5428 const XMMRegister x5 = xmm5; 5429 const XMMRegister x6 = xmm6; 5430 const XMMRegister x7 = xmm7; 5431 5432 const Register tmp1 = r8; 5433 const Register tmp2 = r9; 5434 const Register tmp3 = r10; 5435 const Register tmp4 = r11; 5436 5437 BLOCK_COMMENT("Entry:"); 5438 __ enter(); // required for proper stackwalking of RuntimeStub frame 5439 5440 __ fast_pow(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 5441 5442 __ leave(); // required for proper stackwalking of RuntimeStub frame 5443 __ ret(0); 5444 5445 return start; 5446 5447 } 5448 5449 address generate_libmSin() { 5450 StubCodeMark mark(this, "StubRoutines", "libmSin"); 5451 5452 address start = __ pc(); 5453 5454 const XMMRegister x0 = xmm0; 5455 const XMMRegister x1 = xmm1; 5456 const XMMRegister x2 = xmm2; 5457 const XMMRegister x3 = xmm3; 5458 5459 const XMMRegister x4 = xmm4; 5460 const XMMRegister x5 = xmm5; 5461 const XMMRegister x6 = xmm6; 5462 const XMMRegister x7 = xmm7; 5463 5464 const Register tmp1 = r8; 5465 const Register tmp2 = r9; 5466 const Register tmp3 = r10; 5467 const Register tmp4 = r11; 5468 5469 BLOCK_COMMENT("Entry:"); 5470 __ enter(); // required for proper stackwalking of RuntimeStub frame 5471 5472 #ifdef _WIN64 5473 __ push(rsi); 5474 __ push(rdi); 5475 #endif 5476 __ fast_sin(x0, x1, x2, x3, x4, x5, x6, x7, rax, rbx, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 5477 5478 #ifdef _WIN64 5479 __ pop(rdi); 5480 __ pop(rsi); 5481 #endif 5482 5483 __ leave(); // required for proper stackwalking of RuntimeStub frame 5484 __ ret(0); 5485 5486 return start; 5487 5488 } 5489 5490 address generate_libmCos() { 5491 StubCodeMark mark(this, "StubRoutines", "libmCos"); 5492 5493 address start = __ pc(); 5494 5495 const XMMRegister x0 = xmm0; 5496 const XMMRegister x1 = xmm1; 5497 const XMMRegister x2 = xmm2; 5498 const XMMRegister x3 = xmm3; 5499 5500 const XMMRegister x4 = xmm4; 5501 const XMMRegister x5 = xmm5; 5502 const XMMRegister x6 = xmm6; 5503 const XMMRegister x7 = xmm7; 5504 5505 const Register tmp1 = r8; 5506 const Register tmp2 = r9; 5507 const Register tmp3 = r10; 5508 const Register tmp4 = r11; 5509 5510 BLOCK_COMMENT("Entry:"); 5511 __ enter(); // required for proper stackwalking of RuntimeStub frame 5512 5513 #ifdef _WIN64 5514 __ push(rsi); 5515 __ push(rdi); 5516 #endif 5517 __ fast_cos(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 5518 5519 #ifdef _WIN64 5520 __ pop(rdi); 5521 __ pop(rsi); 5522 #endif 5523 5524 __ leave(); // required for proper stackwalking of RuntimeStub frame 5525 __ ret(0); 5526 5527 return start; 5528 5529 } 5530 5531 address generate_libmTan() { 5532 StubCodeMark mark(this, "StubRoutines", "libmTan"); 5533 5534 address start = __ pc(); 5535 5536 const XMMRegister x0 = xmm0; 5537 const XMMRegister x1 = xmm1; 5538 const XMMRegister x2 = xmm2; 5539 const XMMRegister x3 = xmm3; 5540 5541 const XMMRegister x4 = xmm4; 5542 const XMMRegister x5 = xmm5; 5543 const XMMRegister x6 = xmm6; 5544 const XMMRegister x7 = xmm7; 5545 5546 const Register tmp1 = r8; 5547 const Register tmp2 = r9; 5548 const Register tmp3 = r10; 5549 const Register tmp4 = r11; 5550 5551 BLOCK_COMMENT("Entry:"); 5552 __ enter(); // required for proper stackwalking of RuntimeStub frame 5553 5554 #ifdef _WIN64 5555 __ push(rsi); 5556 __ push(rdi); 5557 #endif 5558 __ fast_tan(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 5559 5560 #ifdef _WIN64 5561 __ pop(rdi); 5562 __ pop(rsi); 5563 #endif 5564 5565 __ leave(); // required for proper stackwalking of RuntimeStub frame 5566 __ ret(0); 5567 5568 return start; 5569 5570 } 5571 5572 #undef __ 5573 #define __ masm-> 5574 5575 // Continuation point for throwing of implicit exceptions that are 5576 // not handled in the current activation. Fabricates an exception 5577 // oop and initiates normal exception dispatching in this 5578 // frame. Since we need to preserve callee-saved values (currently 5579 // only for C2, but done for C1 as well) we need a callee-saved oop 5580 // map and therefore have to make these stubs into RuntimeStubs 5581 // rather than BufferBlobs. If the compiler needs all registers to 5582 // be preserved between the fault point and the exception handler 5583 // then it must assume responsibility for that in 5584 // AbstractCompiler::continuation_for_implicit_null_exception or 5585 // continuation_for_implicit_division_by_zero_exception. All other 5586 // implicit exceptions (e.g., NullPointerException or 5587 // AbstractMethodError on entry) are either at call sites or 5588 // otherwise assume that stack unwinding will be initiated, so 5589 // caller saved registers were assumed volatile in the compiler. 5590 address generate_throw_exception(const char* name, 5591 address runtime_entry, 5592 Register arg1 = noreg, 5593 Register arg2 = noreg) { 5594 // Information about frame layout at time of blocking runtime call. 5595 // Note that we only have to preserve callee-saved registers since 5596 // the compilers are responsible for supplying a continuation point 5597 // if they expect all registers to be preserved. 5598 enum layout { 5599 rbp_off = frame::arg_reg_save_area_bytes/BytesPerInt, 5600 rbp_off2, 5601 return_off, 5602 return_off2, 5603 framesize // inclusive of return address 5604 }; 5605 5606 int insts_size = 512; 5607 int locs_size = 64; 5608 5609 CodeBuffer code(name, insts_size, locs_size); 5610 OopMapSet* oop_maps = new OopMapSet(); 5611 MacroAssembler* masm = new MacroAssembler(&code); 5612 5613 address start = __ pc(); 5614 5615 // This is an inlined and slightly modified version of call_VM 5616 // which has the ability to fetch the return PC out of 5617 // thread-local storage and also sets up last_Java_sp slightly 5618 // differently than the real call_VM 5619 5620 __ enter(); // required for proper stackwalking of RuntimeStub frame 5621 5622 assert(is_even(framesize/2), "sp not 16-byte aligned"); 5623 5624 // return address and rbp are already in place 5625 __ subptr(rsp, (framesize-4) << LogBytesPerInt); // prolog 5626 5627 int frame_complete = __ pc() - start; 5628 5629 // Set up last_Java_sp and last_Java_fp 5630 address the_pc = __ pc(); 5631 __ set_last_Java_frame(rsp, rbp, the_pc); 5632 __ andptr(rsp, -(StackAlignmentInBytes)); // Align stack 5633 5634 // Call runtime 5635 if (arg1 != noreg) { 5636 assert(arg2 != c_rarg1, "clobbered"); 5637 __ movptr(c_rarg1, arg1); 5638 } 5639 if (arg2 != noreg) { 5640 __ movptr(c_rarg2, arg2); 5641 } 5642 __ movptr(c_rarg0, r15_thread); 5643 BLOCK_COMMENT("call runtime_entry"); 5644 __ call(RuntimeAddress(runtime_entry)); 5645 5646 // Generate oop map 5647 OopMap* map = new OopMap(framesize, 0); 5648 5649 oop_maps->add_gc_map(the_pc - start, map); 5650 5651 __ reset_last_Java_frame(true); 5652 5653 __ leave(); // required for proper stackwalking of RuntimeStub frame 5654 5655 // check for pending exceptions 5656 #ifdef ASSERT 5657 Label L; 5658 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), 5659 (int32_t) NULL_WORD); 5660 __ jcc(Assembler::notEqual, L); 5661 __ should_not_reach_here(); 5662 __ bind(L); 5663 #endif // ASSERT 5664 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 5665 5666 5667 // codeBlob framesize is in words (not VMRegImpl::slot_size) 5668 RuntimeStub* stub = 5669 RuntimeStub::new_runtime_stub(name, 5670 &code, 5671 frame_complete, 5672 (framesize >> (LogBytesPerWord - LogBytesPerInt)), 5673 oop_maps, false); 5674 return stub->entry_point(); 5675 } 5676 5677 void create_control_words() { 5678 // Round to nearest, 53-bit mode, exceptions masked 5679 StubRoutines::_fpu_cntrl_wrd_std = 0x027F; 5680 // Round to zero, 53-bit mode, exception mased 5681 StubRoutines::_fpu_cntrl_wrd_trunc = 0x0D7F; 5682 // Round to nearest, 24-bit mode, exceptions masked 5683 StubRoutines::_fpu_cntrl_wrd_24 = 0x007F; 5684 // Round to nearest, 64-bit mode, exceptions masked 5685 StubRoutines::_fpu_cntrl_wrd_64 = 0x037F; 5686 // Round to nearest, 64-bit mode, exceptions masked 5687 StubRoutines::_mxcsr_std = 0x1F80; 5688 // Note: the following two constants are 80-bit values 5689 // layout is critical for correct loading by FPU. 5690 // Bias for strict fp multiply/divide 5691 StubRoutines::_fpu_subnormal_bias1[0]= 0x00000000; // 2^(-15360) == 0x03ff 8000 0000 0000 0000 5692 StubRoutines::_fpu_subnormal_bias1[1]= 0x80000000; 5693 StubRoutines::_fpu_subnormal_bias1[2]= 0x03ff; 5694 // Un-Bias for strict fp multiply/divide 5695 StubRoutines::_fpu_subnormal_bias2[0]= 0x00000000; // 2^(+15360) == 0x7bff 8000 0000 0000 0000 5696 StubRoutines::_fpu_subnormal_bias2[1]= 0x80000000; 5697 StubRoutines::_fpu_subnormal_bias2[2]= 0x7bff; 5698 } 5699 5700 // Initialization 5701 void generate_initial() { 5702 // Generates all stubs and initializes the entry points 5703 5704 // This platform-specific settings are needed by generate_call_stub() 5705 create_control_words(); 5706 5707 // entry points that exist in all platforms Note: This is code 5708 // that could be shared among different platforms - however the 5709 // benefit seems to be smaller than the disadvantage of having a 5710 // much more complicated generator structure. See also comment in 5711 // stubRoutines.hpp. 5712 5713 StubRoutines::_forward_exception_entry = generate_forward_exception(); 5714 5715 StubRoutines::_call_stub_entry = 5716 generate_call_stub(StubRoutines::_call_stub_return_address); 5717 5718 // is referenced by megamorphic call 5719 StubRoutines::_catch_exception_entry = generate_catch_exception(); 5720 5721 // atomic calls 5722 StubRoutines::_atomic_xchg_entry = generate_atomic_xchg(); 5723 StubRoutines::_atomic_xchg_long_entry = generate_atomic_xchg_long(); 5724 StubRoutines::_atomic_cmpxchg_entry = generate_atomic_cmpxchg(); 5725 StubRoutines::_atomic_cmpxchg_byte_entry = generate_atomic_cmpxchg_byte(); 5726 StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long(); 5727 StubRoutines::_atomic_add_entry = generate_atomic_add(); 5728 StubRoutines::_atomic_add_long_entry = generate_atomic_add_long(); 5729 StubRoutines::_fence_entry = generate_orderaccess_fence(); 5730 5731 // platform dependent 5732 StubRoutines::x86::_get_previous_fp_entry = generate_get_previous_fp(); 5733 StubRoutines::x86::_get_previous_sp_entry = generate_get_previous_sp(); 5734 5735 StubRoutines::x86::_verify_mxcsr_entry = generate_verify_mxcsr(); 5736 5737 // Build this early so it's available for the interpreter. 5738 StubRoutines::_throw_StackOverflowError_entry = 5739 generate_throw_exception("StackOverflowError throw_exception", 5740 CAST_FROM_FN_PTR(address, 5741 SharedRuntime:: 5742 throw_StackOverflowError)); 5743 StubRoutines::_throw_delayed_StackOverflowError_entry = 5744 generate_throw_exception("delayed StackOverflowError throw_exception", 5745 CAST_FROM_FN_PTR(address, 5746 SharedRuntime:: 5747 throw_delayed_StackOverflowError)); 5748 if (UseCRC32Intrinsics) { 5749 // set table address before stub generation which use it 5750 StubRoutines::_crc_table_adr = (address)StubRoutines::x86::_crc_table; 5751 StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32(); 5752 } 5753 5754 if (UseCRC32CIntrinsics) { 5755 bool supports_clmul = VM_Version::supports_clmul(); 5756 StubRoutines::x86::generate_CRC32C_table(supports_clmul); 5757 StubRoutines::_crc32c_table_addr = (address)StubRoutines::x86::_crc32c_table; 5758 StubRoutines::_updateBytesCRC32C = generate_updateBytesCRC32C(supports_clmul); 5759 } 5760 if (VM_Version::supports_sse2() && UseLibmIntrinsic && InlineIntrinsics) { 5761 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin) || 5762 vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos) || 5763 vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dtan)) { 5764 StubRoutines::x86::_ONEHALF_adr = (address)StubRoutines::x86::_ONEHALF; 5765 StubRoutines::x86::_P_2_adr = (address)StubRoutines::x86::_P_2; 5766 StubRoutines::x86::_SC_4_adr = (address)StubRoutines::x86::_SC_4; 5767 StubRoutines::x86::_Ctable_adr = (address)StubRoutines::x86::_Ctable; 5768 StubRoutines::x86::_SC_2_adr = (address)StubRoutines::x86::_SC_2; 5769 StubRoutines::x86::_SC_3_adr = (address)StubRoutines::x86::_SC_3; 5770 StubRoutines::x86::_SC_1_adr = (address)StubRoutines::x86::_SC_1; 5771 StubRoutines::x86::_PI_INV_TABLE_adr = (address)StubRoutines::x86::_PI_INV_TABLE; 5772 StubRoutines::x86::_PI_4_adr = (address)StubRoutines::x86::_PI_4; 5773 StubRoutines::x86::_PI32INV_adr = (address)StubRoutines::x86::_PI32INV; 5774 StubRoutines::x86::_SIGN_MASK_adr = (address)StubRoutines::x86::_SIGN_MASK; 5775 StubRoutines::x86::_P_1_adr = (address)StubRoutines::x86::_P_1; 5776 StubRoutines::x86::_P_3_adr = (address)StubRoutines::x86::_P_3; 5777 StubRoutines::x86::_NEG_ZERO_adr = (address)StubRoutines::x86::_NEG_ZERO; 5778 } 5779 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dexp)) { 5780 StubRoutines::_dexp = generate_libmExp(); 5781 } 5782 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dlog)) { 5783 StubRoutines::_dlog = generate_libmLog(); 5784 } 5785 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dlog10)) { 5786 StubRoutines::_dlog10 = generate_libmLog10(); 5787 } 5788 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dpow)) { 5789 StubRoutines::_dpow = generate_libmPow(); 5790 } 5791 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin)) { 5792 StubRoutines::_dsin = generate_libmSin(); 5793 } 5794 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos)) { 5795 StubRoutines::_dcos = generate_libmCos(); 5796 } 5797 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dtan)) { 5798 StubRoutines::_dtan = generate_libmTan(); 5799 } 5800 } 5801 } 5802 5803 void generate_all() { 5804 // Generates all stubs and initializes the entry points 5805 5806 // These entry points require SharedInfo::stack0 to be set up in 5807 // non-core builds and need to be relocatable, so they each 5808 // fabricate a RuntimeStub internally. 5809 StubRoutines::_throw_AbstractMethodError_entry = 5810 generate_throw_exception("AbstractMethodError throw_exception", 5811 CAST_FROM_FN_PTR(address, 5812 SharedRuntime:: 5813 throw_AbstractMethodError)); 5814 5815 StubRoutines::_throw_IncompatibleClassChangeError_entry = 5816 generate_throw_exception("IncompatibleClassChangeError throw_exception", 5817 CAST_FROM_FN_PTR(address, 5818 SharedRuntime:: 5819 throw_IncompatibleClassChangeError)); 5820 5821 StubRoutines::_throw_NullPointerException_at_call_entry = 5822 generate_throw_exception("NullPointerException at call throw_exception", 5823 CAST_FROM_FN_PTR(address, 5824 SharedRuntime:: 5825 throw_NullPointerException_at_call)); 5826 5827 // entry points that are platform specific 5828 StubRoutines::x86::_f2i_fixup = generate_f2i_fixup(); 5829 StubRoutines::x86::_f2l_fixup = generate_f2l_fixup(); 5830 StubRoutines::x86::_d2i_fixup = generate_d2i_fixup(); 5831 StubRoutines::x86::_d2l_fixup = generate_d2l_fixup(); 5832 5833 StubRoutines::x86::_float_sign_mask = generate_fp_mask("float_sign_mask", 0x7FFFFFFF7FFFFFFF); 5834 StubRoutines::x86::_float_sign_flip = generate_fp_mask("float_sign_flip", 0x8000000080000000); 5835 StubRoutines::x86::_double_sign_mask = generate_fp_mask("double_sign_mask", 0x7FFFFFFFFFFFFFFF); 5836 StubRoutines::x86::_double_sign_flip = generate_fp_mask("double_sign_flip", 0x8000000000000000); 5837 5838 // support for verify_oop (must happen after universe_init) 5839 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop(); 5840 5841 // arraycopy stubs used by compilers 5842 generate_arraycopy_stubs(); 5843 5844 // don't bother generating these AES intrinsic stubs unless global flag is set 5845 if (UseAESIntrinsics) { 5846 StubRoutines::x86::_key_shuffle_mask_addr = generate_key_shuffle_mask(); // needed by the others 5847 StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock(); 5848 StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock(); 5849 StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt(); 5850 if (VM_Version::supports_vaes() && VM_Version::supports_avx512vl() && VM_Version::supports_avx512dq() ) { 5851 StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptVectorAESCrypt(); 5852 } else { 5853 StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt_Parallel(); 5854 } 5855 } 5856 if (UseAESCTRIntrinsics){ 5857 StubRoutines::x86::_counter_shuffle_mask_addr = generate_counter_shuffle_mask(); 5858 StubRoutines::_counterMode_AESCrypt = generate_counterMode_AESCrypt_Parallel(); 5859 } 5860 5861 if (UseSHA1Intrinsics) { 5862 StubRoutines::x86::_upper_word_mask_addr = generate_upper_word_mask(); 5863 StubRoutines::x86::_shuffle_byte_flip_mask_addr = generate_shuffle_byte_flip_mask(); 5864 StubRoutines::_sha1_implCompress = generate_sha1_implCompress(false, "sha1_implCompress"); 5865 StubRoutines::_sha1_implCompressMB = generate_sha1_implCompress(true, "sha1_implCompressMB"); 5866 } 5867 if (UseSHA256Intrinsics) { 5868 StubRoutines::x86::_k256_adr = (address)StubRoutines::x86::_k256; 5869 char* dst = (char*)StubRoutines::x86::_k256_W; 5870 char* src = (char*)StubRoutines::x86::_k256; 5871 for (int ii = 0; ii < 16; ++ii) { 5872 memcpy(dst + 32 * ii, src + 16 * ii, 16); 5873 memcpy(dst + 32 * ii + 16, src + 16 * ii, 16); 5874 } 5875 StubRoutines::x86::_k256_W_adr = (address)StubRoutines::x86::_k256_W; 5876 StubRoutines::x86::_pshuffle_byte_flip_mask_addr = generate_pshuffle_byte_flip_mask(); 5877 StubRoutines::_sha256_implCompress = generate_sha256_implCompress(false, "sha256_implCompress"); 5878 StubRoutines::_sha256_implCompressMB = generate_sha256_implCompress(true, "sha256_implCompressMB"); 5879 } 5880 if (UseSHA512Intrinsics) { 5881 StubRoutines::x86::_k512_W_addr = (address)StubRoutines::x86::_k512_W; 5882 StubRoutines::x86::_pshuffle_byte_flip_mask_addr_sha512 = generate_pshuffle_byte_flip_mask_sha512(); 5883 StubRoutines::_sha512_implCompress = generate_sha512_implCompress(false, "sha512_implCompress"); 5884 StubRoutines::_sha512_implCompressMB = generate_sha512_implCompress(true, "sha512_implCompressMB"); 5885 } 5886 5887 // Generate GHASH intrinsics code 5888 if (UseGHASHIntrinsics) { 5889 StubRoutines::x86::_ghash_long_swap_mask_addr = generate_ghash_long_swap_mask(); 5890 StubRoutines::x86::_ghash_byte_swap_mask_addr = generate_ghash_byte_swap_mask(); 5891 StubRoutines::_ghash_processBlocks = generate_ghash_processBlocks(); 5892 } 5893 5894 if (UseBASE64Intrinsics) { 5895 StubRoutines::x86::_and_mask = base64_and_mask_addr(); 5896 StubRoutines::x86::_bswap_mask = base64_bswap_mask_addr(); 5897 StubRoutines::x86::_base64_charset = base64_charset_addr(); 5898 StubRoutines::x86::_url_charset = base64url_charset_addr(); 5899 StubRoutines::x86::_gather_mask = base64_gather_mask_addr(); 5900 StubRoutines::x86::_left_shift_mask = base64_left_shift_mask_addr(); 5901 StubRoutines::x86::_right_shift_mask = base64_right_shift_mask_addr(); 5902 StubRoutines::_base64_encodeBlock = generate_base64_encodeBlock(); 5903 } 5904 5905 // Safefetch stubs. 5906 generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry, 5907 &StubRoutines::_safefetch32_fault_pc, 5908 &StubRoutines::_safefetch32_continuation_pc); 5909 generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry, 5910 &StubRoutines::_safefetchN_fault_pc, 5911 &StubRoutines::_safefetchN_continuation_pc); 5912 5913 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod(); 5914 if (bs_nm != NULL) { 5915 StubRoutines::x86::_method_entry_barrier = generate_method_entry_barrier(); 5916 } 5917 #ifdef COMPILER2 5918 if (UseMultiplyToLenIntrinsic) { 5919 StubRoutines::_multiplyToLen = generate_multiplyToLen(); 5920 } 5921 if (UseSquareToLenIntrinsic) { 5922 StubRoutines::_squareToLen = generate_squareToLen(); 5923 } 5924 if (UseMulAddIntrinsic) { 5925 StubRoutines::_mulAdd = generate_mulAdd(); 5926 } 5927 #ifndef _WINDOWS 5928 if (UseMontgomeryMultiplyIntrinsic) { 5929 StubRoutines::_montgomeryMultiply 5930 = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_multiply); 5931 } 5932 if (UseMontgomerySquareIntrinsic) { 5933 StubRoutines::_montgomerySquare 5934 = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_square); 5935 } 5936 #endif // WINDOWS 5937 #endif // COMPILER2 5938 5939 if (UseVectorizedMismatchIntrinsic) { 5940 StubRoutines::_vectorizedMismatch = generate_vectorizedMismatch(); 5941 } 5942 } 5943 5944 public: 5945 StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) { 5946 if (all) { 5947 generate_all(); 5948 } else { 5949 generate_initial(); 5950 } 5951 } 5952 }; // end class declaration 5953 5954 void StubGenerator_generate(CodeBuffer* code, bool all) { 5955 StubGenerator g(code, all); 5956 }