1 /* 2 * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "ci/ciUtilities.hpp" 29 #include "gc/shared/barrierSet.hpp" 30 #include "gc/shared/barrierSetAssembler.hpp" 31 #include "gc/shared/barrierSetNMethod.hpp" 32 #include "interpreter/interpreter.hpp" 33 #include "nativeInst_x86.hpp" 34 #include "oops/instanceOop.hpp" 35 #include "oops/method.hpp" 36 #include "oops/objArrayKlass.hpp" 37 #include "oops/oop.inline.hpp" 38 #include "prims/methodHandles.hpp" 39 #include "runtime/frame.inline.hpp" 40 #include "runtime/handles.inline.hpp" 41 #include "runtime/sharedRuntime.hpp" 42 #include "runtime/stubCodeGenerator.hpp" 43 #include "runtime/stubRoutines.hpp" 44 #include "runtime/thread.inline.hpp" 45 #ifdef COMPILER2 46 #include "opto/runtime.hpp" 47 #endif 48 #if INCLUDE_ZGC 49 #include "gc/z/zThreadLocalData.hpp" 50 #endif 51 52 // Declaration and definition of StubGenerator (no .hpp file). 53 // For a more detailed description of the stub routine structure 54 // see the comment in stubRoutines.hpp 55 56 #define __ _masm-> 57 #define TIMES_OOP (UseCompressedOops ? Address::times_4 : Address::times_8) 58 #define a__ ((Assembler*)_masm)-> 59 60 #ifdef PRODUCT 61 #define BLOCK_COMMENT(str) /* nothing */ 62 #else 63 #define BLOCK_COMMENT(str) __ block_comment(str) 64 #endif 65 66 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 67 const int MXCSR_MASK = 0xFFC0; // Mask out any pending exceptions 68 69 // Stub Code definitions 70 71 class StubGenerator: public StubCodeGenerator { 72 private: 73 74 #ifdef PRODUCT 75 #define inc_counter_np(counter) ((void)0) 76 #else 77 void inc_counter_np_(int& counter) { 78 // This can destroy rscratch1 if counter is far from the code cache 79 __ incrementl(ExternalAddress((address)&counter)); 80 } 81 #define inc_counter_np(counter) \ 82 BLOCK_COMMENT("inc_counter " #counter); \ 83 inc_counter_np_(counter); 84 #endif 85 86 // Call stubs are used to call Java from C 87 // 88 // Linux Arguments: 89 // c_rarg0: call wrapper address address 90 // c_rarg1: result address 91 // c_rarg2: result type BasicType 92 // c_rarg3: method Method* 93 // c_rarg4: (interpreter) entry point address 94 // c_rarg5: parameters intptr_t* 95 // 16(rbp): parameter size (in words) int 96 // 24(rbp): thread Thread* 97 // 98 // [ return_from_Java ] <--- rsp 99 // [ argument word n ] 100 // ... 101 // -12 [ argument word 1 ] 102 // -11 [ saved r15 ] <--- rsp_after_call 103 // -10 [ saved r14 ] 104 // -9 [ saved r13 ] 105 // -8 [ saved r12 ] 106 // -7 [ saved rbx ] 107 // -6 [ call wrapper ] 108 // -5 [ result ] 109 // -4 [ result type ] 110 // -3 [ method ] 111 // -2 [ entry point ] 112 // -1 [ parameters ] 113 // 0 [ saved rbp ] <--- rbp 114 // 1 [ return address ] 115 // 2 [ parameter size ] 116 // 3 [ thread ] 117 // 118 // Windows Arguments: 119 // c_rarg0: call wrapper address address 120 // c_rarg1: result address 121 // c_rarg2: result type BasicType 122 // c_rarg3: method Method* 123 // 48(rbp): (interpreter) entry point address 124 // 56(rbp): parameters intptr_t* 125 // 64(rbp): parameter size (in words) int 126 // 72(rbp): thread Thread* 127 // 128 // [ return_from_Java ] <--- rsp 129 // [ argument word n ] 130 // ... 131 // -60 [ argument word 1 ] 132 // -59 [ saved xmm31 ] <--- rsp after_call 133 // [ saved xmm16-xmm30 ] (EVEX enabled, else the space is blank) 134 // -27 [ saved xmm15 ] 135 // [ saved xmm7-xmm14 ] 136 // -9 [ saved xmm6 ] (each xmm register takes 2 slots) 137 // -7 [ saved r15 ] 138 // -6 [ saved r14 ] 139 // -5 [ saved r13 ] 140 // -4 [ saved r12 ] 141 // -3 [ saved rdi ] 142 // -2 [ saved rsi ] 143 // -1 [ saved rbx ] 144 // 0 [ saved rbp ] <--- rbp 145 // 1 [ return address ] 146 // 2 [ call wrapper ] 147 // 3 [ result ] 148 // 4 [ result type ] 149 // 5 [ method ] 150 // 6 [ entry point ] 151 // 7 [ parameters ] 152 // 8 [ parameter size ] 153 // 9 [ thread ] 154 // 155 // Windows reserves the callers stack space for arguments 1-4. 156 // We spill c_rarg0-c_rarg3 to this space. 157 158 // Call stub stack layout word offsets from rbp 159 enum call_stub_layout { 160 #ifdef _WIN64 161 xmm_save_first = 6, // save from xmm6 162 xmm_save_last = 31, // to xmm31 163 xmm_save_base = -9, 164 rsp_after_call_off = xmm_save_base - 2 * (xmm_save_last - xmm_save_first), // -27 165 r15_off = -7, 166 r14_off = -6, 167 r13_off = -5, 168 r12_off = -4, 169 rdi_off = -3, 170 rsi_off = -2, 171 rbx_off = -1, 172 rbp_off = 0, 173 retaddr_off = 1, 174 call_wrapper_off = 2, 175 result_off = 3, 176 result_type_off = 4, 177 method_off = 5, 178 entry_point_off = 6, 179 parameters_off = 7, 180 parameter_size_off = 8, 181 thread_off = 9 182 #else 183 rsp_after_call_off = -12, 184 mxcsr_off = rsp_after_call_off, 185 r15_off = -11, 186 r14_off = -10, 187 r13_off = -9, 188 r12_off = -8, 189 rbx_off = -7, 190 call_wrapper_off = -6, 191 result_off = -5, 192 result_type_off = -4, 193 method_off = -3, 194 entry_point_off = -2, 195 parameters_off = -1, 196 rbp_off = 0, 197 retaddr_off = 1, 198 parameter_size_off = 2, 199 thread_off = 3 200 #endif 201 }; 202 203 #ifdef _WIN64 204 Address xmm_save(int reg) { 205 assert(reg >= xmm_save_first && reg <= xmm_save_last, "XMM register number out of range"); 206 return Address(rbp, (xmm_save_base - (reg - xmm_save_first) * 2) * wordSize); 207 } 208 #endif 209 210 address generate_call_stub(address& return_address) { 211 assert((int)frame::entry_frame_after_call_words == -(int)rsp_after_call_off + 1 && 212 (int)frame::entry_frame_call_wrapper_offset == (int)call_wrapper_off, 213 "adjust this code"); 214 StubCodeMark mark(this, "StubRoutines", "call_stub"); 215 address start = __ pc(); 216 217 // same as in generate_catch_exception()! 218 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize); 219 220 const Address call_wrapper (rbp, call_wrapper_off * wordSize); 221 const Address result (rbp, result_off * wordSize); 222 const Address result_type (rbp, result_type_off * wordSize); 223 const Address method (rbp, method_off * wordSize); 224 const Address entry_point (rbp, entry_point_off * wordSize); 225 const Address parameters (rbp, parameters_off * wordSize); 226 const Address parameter_size(rbp, parameter_size_off * wordSize); 227 228 // same as in generate_catch_exception()! 229 const Address thread (rbp, thread_off * wordSize); 230 231 const Address r15_save(rbp, r15_off * wordSize); 232 const Address r14_save(rbp, r14_off * wordSize); 233 const Address r13_save(rbp, r13_off * wordSize); 234 const Address r12_save(rbp, r12_off * wordSize); 235 const Address rbx_save(rbp, rbx_off * wordSize); 236 237 // stub code 238 __ enter(); 239 __ subptr(rsp, -rsp_after_call_off * wordSize); 240 241 // save register parameters 242 #ifndef _WIN64 243 __ movptr(parameters, c_rarg5); // parameters 244 __ movptr(entry_point, c_rarg4); // entry_point 245 #endif 246 247 __ movptr(method, c_rarg3); // method 248 __ movl(result_type, c_rarg2); // result type 249 __ movptr(result, c_rarg1); // result 250 __ movptr(call_wrapper, c_rarg0); // call wrapper 251 252 // save regs belonging to calling function 253 __ movptr(rbx_save, rbx); 254 __ movptr(r12_save, r12); 255 __ movptr(r13_save, r13); 256 __ movptr(r14_save, r14); 257 __ movptr(r15_save, r15); 258 259 #ifdef _WIN64 260 int last_reg = 15; 261 if (UseAVX > 2) { 262 last_reg = 31; 263 } 264 if (VM_Version::supports_evex()) { 265 for (int i = xmm_save_first; i <= last_reg; i++) { 266 __ vextractf32x4(xmm_save(i), as_XMMRegister(i), 0); 267 } 268 } else { 269 for (int i = xmm_save_first; i <= last_reg; i++) { 270 __ movdqu(xmm_save(i), as_XMMRegister(i)); 271 } 272 } 273 274 const Address rdi_save(rbp, rdi_off * wordSize); 275 const Address rsi_save(rbp, rsi_off * wordSize); 276 277 __ movptr(rsi_save, rsi); 278 __ movptr(rdi_save, rdi); 279 #else 280 const Address mxcsr_save(rbp, mxcsr_off * wordSize); 281 { 282 Label skip_ldmx; 283 __ stmxcsr(mxcsr_save); 284 __ movl(rax, mxcsr_save); 285 __ andl(rax, MXCSR_MASK); // Only check control and mask bits 286 ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std()); 287 __ cmp32(rax, mxcsr_std); 288 __ jcc(Assembler::equal, skip_ldmx); 289 __ ldmxcsr(mxcsr_std); 290 __ bind(skip_ldmx); 291 } 292 #endif 293 294 // Load up thread register 295 __ movptr(r15_thread, thread); 296 __ reinit_heapbase(); 297 298 #ifdef ASSERT 299 // make sure we have no pending exceptions 300 { 301 Label L; 302 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 303 __ jcc(Assembler::equal, L); 304 __ stop("StubRoutines::call_stub: entered with pending exception"); 305 __ bind(L); 306 } 307 #endif 308 309 // pass parameters if any 310 BLOCK_COMMENT("pass parameters if any"); 311 Label parameters_done; 312 __ movl(c_rarg3, parameter_size); 313 __ testl(c_rarg3, c_rarg3); 314 __ jcc(Assembler::zero, parameters_done); 315 316 Label loop; 317 __ movptr(c_rarg2, parameters); // parameter pointer 318 __ movl(c_rarg1, c_rarg3); // parameter counter is in c_rarg1 319 __ BIND(loop); 320 __ movptr(rax, Address(c_rarg2, 0));// get parameter 321 __ addptr(c_rarg2, wordSize); // advance to next parameter 322 __ decrementl(c_rarg1); // decrement counter 323 __ push(rax); // pass parameter 324 __ jcc(Assembler::notZero, loop); 325 326 // call Java function 327 __ BIND(parameters_done); 328 __ movptr(rbx, method); // get Method* 329 __ movptr(c_rarg1, entry_point); // get entry_point 330 __ mov(r13, rsp); // set sender sp 331 BLOCK_COMMENT("call Java function"); 332 __ call(c_rarg1); 333 334 BLOCK_COMMENT("call_stub_return_address:"); 335 return_address = __ pc(); 336 337 // store result depending on type (everything that is not 338 // T_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT) 339 __ movptr(c_rarg0, result); 340 Label is_long, is_float, is_double, exit; 341 __ movl(c_rarg1, result_type); 342 __ cmpl(c_rarg1, T_OBJECT); 343 __ jcc(Assembler::equal, is_long); 344 __ cmpl(c_rarg1, T_LONG); 345 __ jcc(Assembler::equal, is_long); 346 __ cmpl(c_rarg1, T_FLOAT); 347 __ jcc(Assembler::equal, is_float); 348 __ cmpl(c_rarg1, T_DOUBLE); 349 __ jcc(Assembler::equal, is_double); 350 351 // handle T_INT case 352 __ movl(Address(c_rarg0, 0), rax); 353 354 __ BIND(exit); 355 356 // pop parameters 357 __ lea(rsp, rsp_after_call); 358 359 #ifdef ASSERT 360 // verify that threads correspond 361 { 362 Label L1, L2, L3; 363 __ cmpptr(r15_thread, thread); 364 __ jcc(Assembler::equal, L1); 365 __ stop("StubRoutines::call_stub: r15_thread is corrupted"); 366 __ bind(L1); 367 __ get_thread(rbx); 368 __ cmpptr(r15_thread, thread); 369 __ jcc(Assembler::equal, L2); 370 __ stop("StubRoutines::call_stub: r15_thread is modified by call"); 371 __ bind(L2); 372 __ cmpptr(r15_thread, rbx); 373 __ jcc(Assembler::equal, L3); 374 __ stop("StubRoutines::call_stub: threads must correspond"); 375 __ bind(L3); 376 } 377 #endif 378 379 // restore regs belonging to calling function 380 #ifdef _WIN64 381 // emit the restores for xmm regs 382 if (VM_Version::supports_evex()) { 383 for (int i = xmm_save_first; i <= last_reg; i++) { 384 __ vinsertf32x4(as_XMMRegister(i), as_XMMRegister(i), xmm_save(i), 0); 385 } 386 } else { 387 for (int i = xmm_save_first; i <= last_reg; i++) { 388 __ movdqu(as_XMMRegister(i), xmm_save(i)); 389 } 390 } 391 #endif 392 __ movptr(r15, r15_save); 393 __ movptr(r14, r14_save); 394 __ movptr(r13, r13_save); 395 __ movptr(r12, r12_save); 396 __ movptr(rbx, rbx_save); 397 398 #ifdef _WIN64 399 __ movptr(rdi, rdi_save); 400 __ movptr(rsi, rsi_save); 401 #else 402 __ ldmxcsr(mxcsr_save); 403 #endif 404 405 // restore rsp 406 __ addptr(rsp, -rsp_after_call_off * wordSize); 407 408 // return 409 __ vzeroupper(); 410 __ pop(rbp); 411 __ ret(0); 412 413 // handle return types different from T_INT 414 __ BIND(is_long); 415 __ movq(Address(c_rarg0, 0), rax); 416 __ jmp(exit); 417 418 __ BIND(is_float); 419 __ movflt(Address(c_rarg0, 0), xmm0); 420 __ jmp(exit); 421 422 __ BIND(is_double); 423 __ movdbl(Address(c_rarg0, 0), xmm0); 424 __ jmp(exit); 425 426 return start; 427 } 428 429 // Return point for a Java call if there's an exception thrown in 430 // Java code. The exception is caught and transformed into a 431 // pending exception stored in JavaThread that can be tested from 432 // within the VM. 433 // 434 // Note: Usually the parameters are removed by the callee. In case 435 // of an exception crossing an activation frame boundary, that is 436 // not the case if the callee is compiled code => need to setup the 437 // rsp. 438 // 439 // rax: exception oop 440 441 address generate_catch_exception() { 442 StubCodeMark mark(this, "StubRoutines", "catch_exception"); 443 address start = __ pc(); 444 445 // same as in generate_call_stub(): 446 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize); 447 const Address thread (rbp, thread_off * wordSize); 448 449 #ifdef ASSERT 450 // verify that threads correspond 451 { 452 Label L1, L2, L3; 453 __ cmpptr(r15_thread, thread); 454 __ jcc(Assembler::equal, L1); 455 __ stop("StubRoutines::catch_exception: r15_thread is corrupted"); 456 __ bind(L1); 457 __ get_thread(rbx); 458 __ cmpptr(r15_thread, thread); 459 __ jcc(Assembler::equal, L2); 460 __ stop("StubRoutines::catch_exception: r15_thread is modified by call"); 461 __ bind(L2); 462 __ cmpptr(r15_thread, rbx); 463 __ jcc(Assembler::equal, L3); 464 __ stop("StubRoutines::catch_exception: threads must correspond"); 465 __ bind(L3); 466 } 467 #endif 468 469 // set pending exception 470 __ verify_oop(rax); 471 472 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), rax); 473 __ lea(rscratch1, ExternalAddress((address)__FILE__)); 474 __ movptr(Address(r15_thread, Thread::exception_file_offset()), rscratch1); 475 __ movl(Address(r15_thread, Thread::exception_line_offset()), (int) __LINE__); 476 477 // complete return to VM 478 assert(StubRoutines::_call_stub_return_address != NULL, 479 "_call_stub_return_address must have been generated before"); 480 __ jump(RuntimeAddress(StubRoutines::_call_stub_return_address)); 481 482 return start; 483 } 484 485 // Continuation point for runtime calls returning with a pending 486 // exception. The pending exception check happened in the runtime 487 // or native call stub. The pending exception in Thread is 488 // converted into a Java-level exception. 489 // 490 // Contract with Java-level exception handlers: 491 // rax: exception 492 // rdx: throwing pc 493 // 494 // NOTE: At entry of this stub, exception-pc must be on stack !! 495 496 address generate_forward_exception() { 497 StubCodeMark mark(this, "StubRoutines", "forward exception"); 498 address start = __ pc(); 499 500 // Upon entry, the sp points to the return address returning into 501 // Java (interpreted or compiled) code; i.e., the return address 502 // becomes the throwing pc. 503 // 504 // Arguments pushed before the runtime call are still on the stack 505 // but the exception handler will reset the stack pointer -> 506 // ignore them. A potential result in registers can be ignored as 507 // well. 508 509 #ifdef ASSERT 510 // make sure this code is only executed if there is a pending exception 511 { 512 Label L; 513 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t) NULL); 514 __ jcc(Assembler::notEqual, L); 515 __ stop("StubRoutines::forward exception: no pending exception (1)"); 516 __ bind(L); 517 } 518 #endif 519 520 // compute exception handler into rbx 521 __ movptr(c_rarg0, Address(rsp, 0)); 522 BLOCK_COMMENT("call exception_handler_for_return_address"); 523 __ call_VM_leaf(CAST_FROM_FN_PTR(address, 524 SharedRuntime::exception_handler_for_return_address), 525 r15_thread, c_rarg0); 526 __ mov(rbx, rax); 527 528 // setup rax & rdx, remove return address & clear pending exception 529 __ pop(rdx); 530 __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset())); 531 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 532 533 #ifdef ASSERT 534 // make sure exception is set 535 { 536 Label L; 537 __ testptr(rax, rax); 538 __ jcc(Assembler::notEqual, L); 539 __ stop("StubRoutines::forward exception: no pending exception (2)"); 540 __ bind(L); 541 } 542 #endif 543 544 // continue at exception handler (return address removed) 545 // rax: exception 546 // rbx: exception handler 547 // rdx: throwing pc 548 __ verify_oop(rax); 549 __ jmp(rbx); 550 551 return start; 552 } 553 554 // Support for jint atomic::xchg(jint exchange_value, volatile jint* dest) 555 // 556 // Arguments : 557 // c_rarg0: exchange_value 558 // c_rarg0: dest 559 // 560 // Result: 561 // *dest <- ex, return (orig *dest) 562 address generate_atomic_xchg() { 563 StubCodeMark mark(this, "StubRoutines", "atomic_xchg"); 564 address start = __ pc(); 565 566 __ movl(rax, c_rarg0); // Copy to eax we need a return value anyhow 567 __ xchgl(rax, Address(c_rarg1, 0)); // automatic LOCK 568 __ ret(0); 569 570 return start; 571 } 572 573 // Support for intptr_t atomic::xchg_long(jlong exchange_value, volatile jlong* dest) 574 // 575 // Arguments : 576 // c_rarg0: exchange_value 577 // c_rarg1: dest 578 // 579 // Result: 580 // *dest <- ex, return (orig *dest) 581 address generate_atomic_xchg_long() { 582 StubCodeMark mark(this, "StubRoutines", "atomic_xchg_long"); 583 address start = __ pc(); 584 585 __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow 586 __ xchgptr(rax, Address(c_rarg1, 0)); // automatic LOCK 587 __ ret(0); 588 589 return start; 590 } 591 592 // Support for jint atomic::atomic_cmpxchg(jint exchange_value, volatile jint* dest, 593 // jint compare_value) 594 // 595 // Arguments : 596 // c_rarg0: exchange_value 597 // c_rarg1: dest 598 // c_rarg2: compare_value 599 // 600 // Result: 601 // if ( compare_value == *dest ) { 602 // *dest = exchange_value 603 // return compare_value; 604 // else 605 // return *dest; 606 address generate_atomic_cmpxchg() { 607 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg"); 608 address start = __ pc(); 609 610 __ movl(rax, c_rarg2); 611 __ lock(); 612 __ cmpxchgl(c_rarg0, Address(c_rarg1, 0)); 613 __ ret(0); 614 615 return start; 616 } 617 618 // Support for int8_t atomic::atomic_cmpxchg(int8_t exchange_value, volatile int8_t* dest, 619 // int8_t compare_value) 620 // 621 // Arguments : 622 // c_rarg0: exchange_value 623 // c_rarg1: dest 624 // c_rarg2: compare_value 625 // 626 // Result: 627 // if ( compare_value == *dest ) { 628 // *dest = exchange_value 629 // return compare_value; 630 // else 631 // return *dest; 632 address generate_atomic_cmpxchg_byte() { 633 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_byte"); 634 address start = __ pc(); 635 636 __ movsbq(rax, c_rarg2); 637 __ lock(); 638 __ cmpxchgb(c_rarg0, Address(c_rarg1, 0)); 639 __ ret(0); 640 641 return start; 642 } 643 644 // Support for int64_t atomic::atomic_cmpxchg(int64_t exchange_value, 645 // volatile int64_t* dest, 646 // int64_t compare_value) 647 // Arguments : 648 // c_rarg0: exchange_value 649 // c_rarg1: dest 650 // c_rarg2: compare_value 651 // 652 // Result: 653 // if ( compare_value == *dest ) { 654 // *dest = exchange_value 655 // return compare_value; 656 // else 657 // return *dest; 658 address generate_atomic_cmpxchg_long() { 659 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long"); 660 address start = __ pc(); 661 662 __ movq(rax, c_rarg2); 663 __ lock(); 664 __ cmpxchgq(c_rarg0, Address(c_rarg1, 0)); 665 __ ret(0); 666 667 return start; 668 } 669 670 // Support for jint atomic::add(jint add_value, volatile jint* dest) 671 // 672 // Arguments : 673 // c_rarg0: add_value 674 // c_rarg1: dest 675 // 676 // Result: 677 // *dest += add_value 678 // return *dest; 679 address generate_atomic_add() { 680 StubCodeMark mark(this, "StubRoutines", "atomic_add"); 681 address start = __ pc(); 682 683 __ movl(rax, c_rarg0); 684 __ lock(); 685 __ xaddl(Address(c_rarg1, 0), c_rarg0); 686 __ addl(rax, c_rarg0); 687 __ ret(0); 688 689 return start; 690 } 691 692 // Support for intptr_t atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) 693 // 694 // Arguments : 695 // c_rarg0: add_value 696 // c_rarg1: dest 697 // 698 // Result: 699 // *dest += add_value 700 // return *dest; 701 address generate_atomic_add_long() { 702 StubCodeMark mark(this, "StubRoutines", "atomic_add_long"); 703 address start = __ pc(); 704 705 __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow 706 __ lock(); 707 __ xaddptr(Address(c_rarg1, 0), c_rarg0); 708 __ addptr(rax, c_rarg0); 709 __ ret(0); 710 711 return start; 712 } 713 714 // Support for intptr_t OrderAccess::fence() 715 // 716 // Arguments : 717 // 718 // Result: 719 address generate_orderaccess_fence() { 720 StubCodeMark mark(this, "StubRoutines", "orderaccess_fence"); 721 address start = __ pc(); 722 __ membar(Assembler::StoreLoad); 723 __ ret(0); 724 725 return start; 726 } 727 728 // Support for intptr_t get_previous_fp() 729 // 730 // This routine is used to find the previous frame pointer for the 731 // caller (current_frame_guess). This is used as part of debugging 732 // ps() is seemingly lost trying to find frames. 733 // This code assumes that caller current_frame_guess) has a frame. 734 address generate_get_previous_fp() { 735 StubCodeMark mark(this, "StubRoutines", "get_previous_fp"); 736 const Address old_fp(rbp, 0); 737 const Address older_fp(rax, 0); 738 address start = __ pc(); 739 740 __ enter(); 741 __ movptr(rax, old_fp); // callers fp 742 __ movptr(rax, older_fp); // the frame for ps() 743 __ pop(rbp); 744 __ ret(0); 745 746 return start; 747 } 748 749 // Support for intptr_t get_previous_sp() 750 // 751 // This routine is used to find the previous stack pointer for the 752 // caller. 753 address generate_get_previous_sp() { 754 StubCodeMark mark(this, "StubRoutines", "get_previous_sp"); 755 address start = __ pc(); 756 757 __ movptr(rax, rsp); 758 __ addptr(rax, 8); // return address is at the top of the stack. 759 __ ret(0); 760 761 return start; 762 } 763 764 //---------------------------------------------------------------------------------------------------- 765 // Support for void verify_mxcsr() 766 // 767 // This routine is used with -Xcheck:jni to verify that native 768 // JNI code does not return to Java code without restoring the 769 // MXCSR register to our expected state. 770 771 address generate_verify_mxcsr() { 772 StubCodeMark mark(this, "StubRoutines", "verify_mxcsr"); 773 address start = __ pc(); 774 775 const Address mxcsr_save(rsp, 0); 776 777 if (CheckJNICalls) { 778 Label ok_ret; 779 ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std()); 780 __ push(rax); 781 __ subptr(rsp, wordSize); // allocate a temp location 782 __ stmxcsr(mxcsr_save); 783 __ movl(rax, mxcsr_save); 784 __ andl(rax, MXCSR_MASK); // Only check control and mask bits 785 __ cmp32(rax, mxcsr_std); 786 __ jcc(Assembler::equal, ok_ret); 787 788 __ warn("MXCSR changed by native JNI code, use -XX:+RestoreMXCSROnJNICall"); 789 790 __ ldmxcsr(mxcsr_std); 791 792 __ bind(ok_ret); 793 __ addptr(rsp, wordSize); 794 __ pop(rax); 795 } 796 797 __ ret(0); 798 799 return start; 800 } 801 802 address generate_f2i_fixup() { 803 StubCodeMark mark(this, "StubRoutines", "f2i_fixup"); 804 Address inout(rsp, 5 * wordSize); // return address + 4 saves 805 806 address start = __ pc(); 807 808 Label L; 809 810 __ push(rax); 811 __ push(c_rarg3); 812 __ push(c_rarg2); 813 __ push(c_rarg1); 814 815 __ movl(rax, 0x7f800000); 816 __ xorl(c_rarg3, c_rarg3); 817 __ movl(c_rarg2, inout); 818 __ movl(c_rarg1, c_rarg2); 819 __ andl(c_rarg1, 0x7fffffff); 820 __ cmpl(rax, c_rarg1); // NaN? -> 0 821 __ jcc(Assembler::negative, L); 822 __ testl(c_rarg2, c_rarg2); // signed ? min_jint : max_jint 823 __ movl(c_rarg3, 0x80000000); 824 __ movl(rax, 0x7fffffff); 825 __ cmovl(Assembler::positive, c_rarg3, rax); 826 827 __ bind(L); 828 __ movptr(inout, c_rarg3); 829 830 __ pop(c_rarg1); 831 __ pop(c_rarg2); 832 __ pop(c_rarg3); 833 __ pop(rax); 834 835 __ ret(0); 836 837 return start; 838 } 839 840 address generate_f2l_fixup() { 841 StubCodeMark mark(this, "StubRoutines", "f2l_fixup"); 842 Address inout(rsp, 5 * wordSize); // return address + 4 saves 843 address start = __ pc(); 844 845 Label L; 846 847 __ push(rax); 848 __ push(c_rarg3); 849 __ push(c_rarg2); 850 __ push(c_rarg1); 851 852 __ movl(rax, 0x7f800000); 853 __ xorl(c_rarg3, c_rarg3); 854 __ movl(c_rarg2, inout); 855 __ movl(c_rarg1, c_rarg2); 856 __ andl(c_rarg1, 0x7fffffff); 857 __ cmpl(rax, c_rarg1); // NaN? -> 0 858 __ jcc(Assembler::negative, L); 859 __ testl(c_rarg2, c_rarg2); // signed ? min_jlong : max_jlong 860 __ mov64(c_rarg3, 0x8000000000000000); 861 __ mov64(rax, 0x7fffffffffffffff); 862 __ cmov(Assembler::positive, c_rarg3, rax); 863 864 __ bind(L); 865 __ movptr(inout, c_rarg3); 866 867 __ pop(c_rarg1); 868 __ pop(c_rarg2); 869 __ pop(c_rarg3); 870 __ pop(rax); 871 872 __ ret(0); 873 874 return start; 875 } 876 877 address generate_d2i_fixup() { 878 StubCodeMark mark(this, "StubRoutines", "d2i_fixup"); 879 Address inout(rsp, 6 * wordSize); // return address + 5 saves 880 881 address start = __ pc(); 882 883 Label L; 884 885 __ push(rax); 886 __ push(c_rarg3); 887 __ push(c_rarg2); 888 __ push(c_rarg1); 889 __ push(c_rarg0); 890 891 __ movl(rax, 0x7ff00000); 892 __ movq(c_rarg2, inout); 893 __ movl(c_rarg3, c_rarg2); 894 __ mov(c_rarg1, c_rarg2); 895 __ mov(c_rarg0, c_rarg2); 896 __ negl(c_rarg3); 897 __ shrptr(c_rarg1, 0x20); 898 __ orl(c_rarg3, c_rarg2); 899 __ andl(c_rarg1, 0x7fffffff); 900 __ xorl(c_rarg2, c_rarg2); 901 __ shrl(c_rarg3, 0x1f); 902 __ orl(c_rarg1, c_rarg3); 903 __ cmpl(rax, c_rarg1); 904 __ jcc(Assembler::negative, L); // NaN -> 0 905 __ testptr(c_rarg0, c_rarg0); // signed ? min_jint : max_jint 906 __ movl(c_rarg2, 0x80000000); 907 __ movl(rax, 0x7fffffff); 908 __ cmov(Assembler::positive, c_rarg2, rax); 909 910 __ bind(L); 911 __ movptr(inout, c_rarg2); 912 913 __ pop(c_rarg0); 914 __ pop(c_rarg1); 915 __ pop(c_rarg2); 916 __ pop(c_rarg3); 917 __ pop(rax); 918 919 __ ret(0); 920 921 return start; 922 } 923 924 address generate_d2l_fixup() { 925 StubCodeMark mark(this, "StubRoutines", "d2l_fixup"); 926 Address inout(rsp, 6 * wordSize); // return address + 5 saves 927 928 address start = __ pc(); 929 930 Label L; 931 932 __ push(rax); 933 __ push(c_rarg3); 934 __ push(c_rarg2); 935 __ push(c_rarg1); 936 __ push(c_rarg0); 937 938 __ movl(rax, 0x7ff00000); 939 __ movq(c_rarg2, inout); 940 __ movl(c_rarg3, c_rarg2); 941 __ mov(c_rarg1, c_rarg2); 942 __ mov(c_rarg0, c_rarg2); 943 __ negl(c_rarg3); 944 __ shrptr(c_rarg1, 0x20); 945 __ orl(c_rarg3, c_rarg2); 946 __ andl(c_rarg1, 0x7fffffff); 947 __ xorl(c_rarg2, c_rarg2); 948 __ shrl(c_rarg3, 0x1f); 949 __ orl(c_rarg1, c_rarg3); 950 __ cmpl(rax, c_rarg1); 951 __ jcc(Assembler::negative, L); // NaN -> 0 952 __ testq(c_rarg0, c_rarg0); // signed ? min_jlong : max_jlong 953 __ mov64(c_rarg2, 0x8000000000000000); 954 __ mov64(rax, 0x7fffffffffffffff); 955 __ cmovq(Assembler::positive, c_rarg2, rax); 956 957 __ bind(L); 958 __ movq(inout, c_rarg2); 959 960 __ pop(c_rarg0); 961 __ pop(c_rarg1); 962 __ pop(c_rarg2); 963 __ pop(c_rarg3); 964 __ pop(rax); 965 966 __ ret(0); 967 968 return start; 969 } 970 971 address generate_fp_mask(const char *stub_name, int64_t mask) { 972 __ align(CodeEntryAlignment); 973 StubCodeMark mark(this, "StubRoutines", stub_name); 974 address start = __ pc(); 975 976 __ emit_data64( mask, relocInfo::none ); 977 __ emit_data64( mask, relocInfo::none ); 978 979 return start; 980 } 981 982 address generate_vector_mask(const char *stub_name, int64_t mask) { 983 __ align(CodeEntryAlignment); 984 StubCodeMark mark(this, "StubRoutines", stub_name); 985 address start = __ pc(); 986 987 __ emit_data64(mask, relocInfo::none); 988 __ emit_data64(mask, relocInfo::none); 989 __ emit_data64(mask, relocInfo::none); 990 __ emit_data64(mask, relocInfo::none); 991 __ emit_data64(mask, relocInfo::none); 992 __ emit_data64(mask, relocInfo::none); 993 __ emit_data64(mask, relocInfo::none); 994 __ emit_data64(mask, relocInfo::none); 995 996 return start; 997 } 998 999 address generate_vector_byte_perm_mask(const char *stub_name) { 1000 __ align(CodeEntryAlignment); 1001 StubCodeMark mark(this, "StubRoutines", stub_name); 1002 address start = __ pc(); 1003 1004 __ emit_data64(0x0000000000000001, relocInfo::none); 1005 __ emit_data64(0x0000000000000003, relocInfo::none); 1006 __ emit_data64(0x0000000000000005, relocInfo::none); 1007 __ emit_data64(0x0000000000000007, relocInfo::none); 1008 __ emit_data64(0x0000000000000000, relocInfo::none); 1009 __ emit_data64(0x0000000000000002, relocInfo::none); 1010 __ emit_data64(0x0000000000000004, relocInfo::none); 1011 __ emit_data64(0x0000000000000006, relocInfo::none); 1012 1013 return start; 1014 } 1015 1016 // Non-destructive plausibility checks for oops 1017 // 1018 // Arguments: 1019 // all args on stack! 1020 // 1021 // Stack after saving c_rarg3: 1022 // [tos + 0]: saved c_rarg3 1023 // [tos + 1]: saved c_rarg2 1024 // [tos + 2]: saved r12 (several TemplateTable methods use it) 1025 // [tos + 3]: saved flags 1026 // [tos + 4]: return address 1027 // * [tos + 5]: error message (char*) 1028 // * [tos + 6]: object to verify (oop) 1029 // * [tos + 7]: saved rax - saved by caller and bashed 1030 // * [tos + 8]: saved r10 (rscratch1) - saved by caller 1031 // * = popped on exit 1032 address generate_verify_oop() { 1033 StubCodeMark mark(this, "StubRoutines", "verify_oop"); 1034 address start = __ pc(); 1035 1036 Label exit, error; 1037 1038 __ pushf(); 1039 __ incrementl(ExternalAddress((address) StubRoutines::verify_oop_count_addr())); 1040 1041 __ push(r12); 1042 1043 // save c_rarg2 and c_rarg3 1044 __ push(c_rarg2); 1045 __ push(c_rarg3); 1046 1047 enum { 1048 // After previous pushes. 1049 oop_to_verify = 6 * wordSize, 1050 saved_rax = 7 * wordSize, 1051 saved_r10 = 8 * wordSize, 1052 1053 // Before the call to MacroAssembler::debug(), see below. 1054 return_addr = 16 * wordSize, 1055 error_msg = 17 * wordSize 1056 }; 1057 1058 // get object 1059 __ movptr(rax, Address(rsp, oop_to_verify)); 1060 1061 // make sure object is 'reasonable' 1062 __ testptr(rax, rax); 1063 __ jcc(Assembler::zero, exit); // if obj is NULL it is OK 1064 1065 #if INCLUDE_ZGC 1066 if (UseZGC) { 1067 // Check if metadata bits indicate a bad oop 1068 __ testptr(rax, Address(r15_thread, ZThreadLocalData::address_bad_mask_offset())); 1069 __ jcc(Assembler::notZero, error); 1070 } 1071 #endif 1072 1073 // Check if the oop is in the right area of memory 1074 __ movptr(c_rarg2, rax); 1075 __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_mask()); 1076 __ andptr(c_rarg2, c_rarg3); 1077 __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_bits()); 1078 __ cmpptr(c_rarg2, c_rarg3); 1079 __ jcc(Assembler::notZero, error); 1080 1081 // set r12 to heapbase for load_klass() 1082 __ reinit_heapbase(); 1083 1084 // make sure klass is 'reasonable', which is not zero. 1085 __ load_klass(rax, rax); // get klass 1086 __ testptr(rax, rax); 1087 __ jcc(Assembler::zero, error); // if klass is NULL it is broken 1088 1089 // return if everything seems ok 1090 __ bind(exit); 1091 __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back 1092 __ movptr(rscratch1, Address(rsp, saved_r10)); // get saved r10 back 1093 __ pop(c_rarg3); // restore c_rarg3 1094 __ pop(c_rarg2); // restore c_rarg2 1095 __ pop(r12); // restore r12 1096 __ popf(); // restore flags 1097 __ ret(4 * wordSize); // pop caller saved stuff 1098 1099 // handle errors 1100 __ bind(error); 1101 __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back 1102 __ movptr(rscratch1, Address(rsp, saved_r10)); // get saved r10 back 1103 __ pop(c_rarg3); // get saved c_rarg3 back 1104 __ pop(c_rarg2); // get saved c_rarg2 back 1105 __ pop(r12); // get saved r12 back 1106 __ popf(); // get saved flags off stack -- 1107 // will be ignored 1108 1109 __ pusha(); // push registers 1110 // (rip is already 1111 // already pushed) 1112 // debug(char* msg, int64_t pc, int64_t regs[]) 1113 // We've popped the registers we'd saved (c_rarg3, c_rarg2 and flags), and 1114 // pushed all the registers, so now the stack looks like: 1115 // [tos + 0] 16 saved registers 1116 // [tos + 16] return address 1117 // * [tos + 17] error message (char*) 1118 // * [tos + 18] object to verify (oop) 1119 // * [tos + 19] saved rax - saved by caller and bashed 1120 // * [tos + 20] saved r10 (rscratch1) - saved by caller 1121 // * = popped on exit 1122 1123 __ movptr(c_rarg0, Address(rsp, error_msg)); // pass address of error message 1124 __ movptr(c_rarg1, Address(rsp, return_addr)); // pass return address 1125 __ movq(c_rarg2, rsp); // pass address of regs on stack 1126 __ mov(r12, rsp); // remember rsp 1127 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1128 __ andptr(rsp, -16); // align stack as required by ABI 1129 BLOCK_COMMENT("call MacroAssembler::debug"); 1130 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64))); 1131 __ mov(rsp, r12); // restore rsp 1132 __ popa(); // pop registers (includes r12) 1133 __ ret(4 * wordSize); // pop caller saved stuff 1134 1135 return start; 1136 } 1137 1138 // 1139 // Verify that a register contains clean 32-bits positive value 1140 // (high 32-bits are 0) so it could be used in 64-bits shifts. 1141 // 1142 // Input: 1143 // Rint - 32-bits value 1144 // Rtmp - scratch 1145 // 1146 void assert_clean_int(Register Rint, Register Rtmp) { 1147 #ifdef ASSERT 1148 Label L; 1149 assert_different_registers(Rtmp, Rint); 1150 __ movslq(Rtmp, Rint); 1151 __ cmpq(Rtmp, Rint); 1152 __ jcc(Assembler::equal, L); 1153 __ stop("high 32-bits of int value are not 0"); 1154 __ bind(L); 1155 #endif 1156 } 1157 1158 // Generate overlap test for array copy stubs 1159 // 1160 // Input: 1161 // c_rarg0 - from 1162 // c_rarg1 - to 1163 // c_rarg2 - element count 1164 // 1165 // Output: 1166 // rax - &from[element count - 1] 1167 // 1168 void array_overlap_test(address no_overlap_target, Address::ScaleFactor sf) { 1169 assert(no_overlap_target != NULL, "must be generated"); 1170 array_overlap_test(no_overlap_target, NULL, sf); 1171 } 1172 void array_overlap_test(Label& L_no_overlap, Address::ScaleFactor sf) { 1173 array_overlap_test(NULL, &L_no_overlap, sf); 1174 } 1175 void array_overlap_test(address no_overlap_target, Label* NOLp, Address::ScaleFactor sf) { 1176 const Register from = c_rarg0; 1177 const Register to = c_rarg1; 1178 const Register count = c_rarg2; 1179 const Register end_from = rax; 1180 1181 __ cmpptr(to, from); 1182 __ lea(end_from, Address(from, count, sf, 0)); 1183 if (NOLp == NULL) { 1184 ExternalAddress no_overlap(no_overlap_target); 1185 __ jump_cc(Assembler::belowEqual, no_overlap); 1186 __ cmpptr(to, end_from); 1187 __ jump_cc(Assembler::aboveEqual, no_overlap); 1188 } else { 1189 __ jcc(Assembler::belowEqual, (*NOLp)); 1190 __ cmpptr(to, end_from); 1191 __ jcc(Assembler::aboveEqual, (*NOLp)); 1192 } 1193 } 1194 1195 // Shuffle first three arg regs on Windows into Linux/Solaris locations. 1196 // 1197 // Outputs: 1198 // rdi - rcx 1199 // rsi - rdx 1200 // rdx - r8 1201 // rcx - r9 1202 // 1203 // Registers r9 and r10 are used to save rdi and rsi on Windows, which latter 1204 // are non-volatile. r9 and r10 should not be used by the caller. 1205 // 1206 DEBUG_ONLY(bool regs_in_thread;) 1207 1208 void setup_arg_regs(int nargs = 3) { 1209 const Register saved_rdi = r9; 1210 const Register saved_rsi = r10; 1211 assert(nargs == 3 || nargs == 4, "else fix"); 1212 #ifdef _WIN64 1213 assert(c_rarg0 == rcx && c_rarg1 == rdx && c_rarg2 == r8 && c_rarg3 == r9, 1214 "unexpected argument registers"); 1215 if (nargs >= 4) 1216 __ mov(rax, r9); // r9 is also saved_rdi 1217 __ movptr(saved_rdi, rdi); 1218 __ movptr(saved_rsi, rsi); 1219 __ mov(rdi, rcx); // c_rarg0 1220 __ mov(rsi, rdx); // c_rarg1 1221 __ mov(rdx, r8); // c_rarg2 1222 if (nargs >= 4) 1223 __ mov(rcx, rax); // c_rarg3 (via rax) 1224 #else 1225 assert(c_rarg0 == rdi && c_rarg1 == rsi && c_rarg2 == rdx && c_rarg3 == rcx, 1226 "unexpected argument registers"); 1227 #endif 1228 DEBUG_ONLY(regs_in_thread = false;) 1229 } 1230 1231 void restore_arg_regs() { 1232 assert(!regs_in_thread, "wrong call to restore_arg_regs"); 1233 const Register saved_rdi = r9; 1234 const Register saved_rsi = r10; 1235 #ifdef _WIN64 1236 __ movptr(rdi, saved_rdi); 1237 __ movptr(rsi, saved_rsi); 1238 #endif 1239 } 1240 1241 // This is used in places where r10 is a scratch register, and can 1242 // be adapted if r9 is needed also. 1243 void setup_arg_regs_using_thread() { 1244 const Register saved_r15 = r9; 1245 #ifdef _WIN64 1246 __ mov(saved_r15, r15); // r15 is callee saved and needs to be restored 1247 __ get_thread(r15_thread); 1248 assert(c_rarg0 == rcx && c_rarg1 == rdx && c_rarg2 == r8 && c_rarg3 == r9, 1249 "unexpected argument registers"); 1250 __ movptr(Address(r15_thread, in_bytes(JavaThread::windows_saved_rdi_offset())), rdi); 1251 __ movptr(Address(r15_thread, in_bytes(JavaThread::windows_saved_rsi_offset())), rsi); 1252 1253 __ mov(rdi, rcx); // c_rarg0 1254 __ mov(rsi, rdx); // c_rarg1 1255 __ mov(rdx, r8); // c_rarg2 1256 #else 1257 assert(c_rarg0 == rdi && c_rarg1 == rsi && c_rarg2 == rdx && c_rarg3 == rcx, 1258 "unexpected argument registers"); 1259 #endif 1260 DEBUG_ONLY(regs_in_thread = true;) 1261 } 1262 1263 void restore_arg_regs_using_thread() { 1264 assert(regs_in_thread, "wrong call to restore_arg_regs"); 1265 const Register saved_r15 = r9; 1266 #ifdef _WIN64 1267 __ get_thread(r15_thread); 1268 __ movptr(rsi, Address(r15_thread, in_bytes(JavaThread::windows_saved_rsi_offset()))); 1269 __ movptr(rdi, Address(r15_thread, in_bytes(JavaThread::windows_saved_rdi_offset()))); 1270 __ mov(r15, saved_r15); // r15 is callee saved and needs to be restored 1271 #endif 1272 } 1273 1274 // Copy big chunks forward 1275 // 1276 // Inputs: 1277 // end_from - source arrays end address 1278 // end_to - destination array end address 1279 // qword_count - 64-bits element count, negative 1280 // to - scratch 1281 // L_copy_bytes - entry label 1282 // L_copy_8_bytes - exit label 1283 // 1284 void copy_bytes_forward(Register end_from, Register end_to, 1285 Register qword_count, Register to, 1286 Label& L_copy_bytes, Label& L_copy_8_bytes) { 1287 DEBUG_ONLY(__ stop("enter at entry label, not here")); 1288 Label L_loop; 1289 __ align(OptoLoopAlignment); 1290 if (UseUnalignedLoadStores) { 1291 Label L_end; 1292 // Copy 64-bytes per iteration 1293 __ BIND(L_loop); 1294 if (UseAVX > 2) { 1295 __ evmovdqul(xmm0, Address(end_from, qword_count, Address::times_8, -56), Assembler::AVX_512bit); 1296 __ evmovdqul(Address(end_to, qword_count, Address::times_8, -56), xmm0, Assembler::AVX_512bit); 1297 } else if (UseAVX == 2) { 1298 __ vmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56)); 1299 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0); 1300 __ vmovdqu(xmm1, Address(end_from, qword_count, Address::times_8, -24)); 1301 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -24), xmm1); 1302 } else { 1303 __ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56)); 1304 __ movdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0); 1305 __ movdqu(xmm1, Address(end_from, qword_count, Address::times_8, -40)); 1306 __ movdqu(Address(end_to, qword_count, Address::times_8, -40), xmm1); 1307 __ movdqu(xmm2, Address(end_from, qword_count, Address::times_8, -24)); 1308 __ movdqu(Address(end_to, qword_count, Address::times_8, -24), xmm2); 1309 __ movdqu(xmm3, Address(end_from, qword_count, Address::times_8, - 8)); 1310 __ movdqu(Address(end_to, qword_count, Address::times_8, - 8), xmm3); 1311 } 1312 __ BIND(L_copy_bytes); 1313 __ addptr(qword_count, 8); 1314 __ jcc(Assembler::lessEqual, L_loop); 1315 __ subptr(qword_count, 4); // sub(8) and add(4) 1316 __ jccb(Assembler::greater, L_end); 1317 // Copy trailing 32 bytes 1318 if (UseAVX >= 2) { 1319 __ vmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24)); 1320 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -24), xmm0); 1321 } else { 1322 __ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24)); 1323 __ movdqu(Address(end_to, qword_count, Address::times_8, -24), xmm0); 1324 __ movdqu(xmm1, Address(end_from, qword_count, Address::times_8, - 8)); 1325 __ movdqu(Address(end_to, qword_count, Address::times_8, - 8), xmm1); 1326 } 1327 __ addptr(qword_count, 4); 1328 __ BIND(L_end); 1329 if (UseAVX >= 2) { 1330 // clean upper bits of YMM registers 1331 __ vpxor(xmm0, xmm0); 1332 __ vpxor(xmm1, xmm1); 1333 } 1334 } else { 1335 // Copy 32-bytes per iteration 1336 __ BIND(L_loop); 1337 __ movq(to, Address(end_from, qword_count, Address::times_8, -24)); 1338 __ movq(Address(end_to, qword_count, Address::times_8, -24), to); 1339 __ movq(to, Address(end_from, qword_count, Address::times_8, -16)); 1340 __ movq(Address(end_to, qword_count, Address::times_8, -16), to); 1341 __ movq(to, Address(end_from, qword_count, Address::times_8, - 8)); 1342 __ movq(Address(end_to, qword_count, Address::times_8, - 8), to); 1343 __ movq(to, Address(end_from, qword_count, Address::times_8, - 0)); 1344 __ movq(Address(end_to, qword_count, Address::times_8, - 0), to); 1345 1346 __ BIND(L_copy_bytes); 1347 __ addptr(qword_count, 4); 1348 __ jcc(Assembler::lessEqual, L_loop); 1349 } 1350 __ subptr(qword_count, 4); 1351 __ jcc(Assembler::less, L_copy_8_bytes); // Copy trailing qwords 1352 } 1353 1354 // Copy big chunks backward 1355 // 1356 // Inputs: 1357 // from - source arrays address 1358 // dest - destination array address 1359 // qword_count - 64-bits element count 1360 // to - scratch 1361 // L_copy_bytes - entry label 1362 // L_copy_8_bytes - exit label 1363 // 1364 void copy_bytes_backward(Register from, Register dest, 1365 Register qword_count, Register to, 1366 Label& L_copy_bytes, Label& L_copy_8_bytes) { 1367 DEBUG_ONLY(__ stop("enter at entry label, not here")); 1368 Label L_loop; 1369 __ align(OptoLoopAlignment); 1370 if (UseUnalignedLoadStores) { 1371 Label L_end; 1372 // Copy 64-bytes per iteration 1373 __ BIND(L_loop); 1374 if (UseAVX > 2) { 1375 __ evmovdqul(xmm0, Address(from, qword_count, Address::times_8, 0), Assembler::AVX_512bit); 1376 __ evmovdqul(Address(dest, qword_count, Address::times_8, 0), xmm0, Assembler::AVX_512bit); 1377 } else if (UseAVX == 2) { 1378 __ vmovdqu(xmm0, Address(from, qword_count, Address::times_8, 32)); 1379 __ vmovdqu(Address(dest, qword_count, Address::times_8, 32), xmm0); 1380 __ vmovdqu(xmm1, Address(from, qword_count, Address::times_8, 0)); 1381 __ vmovdqu(Address(dest, qword_count, Address::times_8, 0), xmm1); 1382 } else { 1383 __ movdqu(xmm0, Address(from, qword_count, Address::times_8, 48)); 1384 __ movdqu(Address(dest, qword_count, Address::times_8, 48), xmm0); 1385 __ movdqu(xmm1, Address(from, qword_count, Address::times_8, 32)); 1386 __ movdqu(Address(dest, qword_count, Address::times_8, 32), xmm1); 1387 __ movdqu(xmm2, Address(from, qword_count, Address::times_8, 16)); 1388 __ movdqu(Address(dest, qword_count, Address::times_8, 16), xmm2); 1389 __ movdqu(xmm3, Address(from, qword_count, Address::times_8, 0)); 1390 __ movdqu(Address(dest, qword_count, Address::times_8, 0), xmm3); 1391 } 1392 __ BIND(L_copy_bytes); 1393 __ subptr(qword_count, 8); 1394 __ jcc(Assembler::greaterEqual, L_loop); 1395 1396 __ addptr(qword_count, 4); // add(8) and sub(4) 1397 __ jccb(Assembler::less, L_end); 1398 // Copy trailing 32 bytes 1399 if (UseAVX >= 2) { 1400 __ vmovdqu(xmm0, Address(from, qword_count, Address::times_8, 0)); 1401 __ vmovdqu(Address(dest, qword_count, Address::times_8, 0), xmm0); 1402 } else { 1403 __ movdqu(xmm0, Address(from, qword_count, Address::times_8, 16)); 1404 __ movdqu(Address(dest, qword_count, Address::times_8, 16), xmm0); 1405 __ movdqu(xmm1, Address(from, qword_count, Address::times_8, 0)); 1406 __ movdqu(Address(dest, qword_count, Address::times_8, 0), xmm1); 1407 } 1408 __ subptr(qword_count, 4); 1409 __ BIND(L_end); 1410 if (UseAVX >= 2) { 1411 // clean upper bits of YMM registers 1412 __ vpxor(xmm0, xmm0); 1413 __ vpxor(xmm1, xmm1); 1414 } 1415 } else { 1416 // Copy 32-bytes per iteration 1417 __ BIND(L_loop); 1418 __ movq(to, Address(from, qword_count, Address::times_8, 24)); 1419 __ movq(Address(dest, qword_count, Address::times_8, 24), to); 1420 __ movq(to, Address(from, qword_count, Address::times_8, 16)); 1421 __ movq(Address(dest, qword_count, Address::times_8, 16), to); 1422 __ movq(to, Address(from, qword_count, Address::times_8, 8)); 1423 __ movq(Address(dest, qword_count, Address::times_8, 8), to); 1424 __ movq(to, Address(from, qword_count, Address::times_8, 0)); 1425 __ movq(Address(dest, qword_count, Address::times_8, 0), to); 1426 1427 __ BIND(L_copy_bytes); 1428 __ subptr(qword_count, 4); 1429 __ jcc(Assembler::greaterEqual, L_loop); 1430 } 1431 __ addptr(qword_count, 4); 1432 __ jcc(Assembler::greater, L_copy_8_bytes); // Copy trailing qwords 1433 } 1434 1435 1436 // Arguments: 1437 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1438 // ignored 1439 // name - stub name string 1440 // 1441 // Inputs: 1442 // c_rarg0 - source array address 1443 // c_rarg1 - destination array address 1444 // c_rarg2 - element count, treated as ssize_t, can be zero 1445 // 1446 // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries, 1447 // we let the hardware handle it. The one to eight bytes within words, 1448 // dwords or qwords that span cache line boundaries will still be loaded 1449 // and stored atomically. 1450 // 1451 // Side Effects: 1452 // disjoint_byte_copy_entry is set to the no-overlap entry point 1453 // used by generate_conjoint_byte_copy(). 1454 // 1455 address generate_disjoint_byte_copy(bool aligned, address* entry, const char *name) { 1456 __ align(CodeEntryAlignment); 1457 StubCodeMark mark(this, "StubRoutines", name); 1458 address start = __ pc(); 1459 1460 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes; 1461 Label L_copy_byte, L_exit; 1462 const Register from = rdi; // source array address 1463 const Register to = rsi; // destination array address 1464 const Register count = rdx; // elements count 1465 const Register byte_count = rcx; 1466 const Register qword_count = count; 1467 const Register end_from = from; // source array end address 1468 const Register end_to = to; // destination array end address 1469 // End pointers are inclusive, and if count is not zero they point 1470 // to the last unit copied: end_to[0] := end_from[0] 1471 1472 __ enter(); // required for proper stackwalking of RuntimeStub frame 1473 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1474 1475 if (entry != NULL) { 1476 *entry = __ pc(); 1477 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1478 BLOCK_COMMENT("Entry:"); 1479 } 1480 1481 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1482 // r9 and r10 may be used to save non-volatile registers 1483 1484 // 'from', 'to' and 'count' are now valid 1485 __ movptr(byte_count, count); 1486 __ shrptr(count, 3); // count => qword_count 1487 1488 // Copy from low to high addresses. Use 'to' as scratch. 1489 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1490 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1491 __ negptr(qword_count); // make the count negative 1492 __ jmp(L_copy_bytes); 1493 1494 // Copy trailing qwords 1495 __ BIND(L_copy_8_bytes); 1496 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1497 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1498 __ increment(qword_count); 1499 __ jcc(Assembler::notZero, L_copy_8_bytes); 1500 1501 // Check for and copy trailing dword 1502 __ BIND(L_copy_4_bytes); 1503 __ testl(byte_count, 4); 1504 __ jccb(Assembler::zero, L_copy_2_bytes); 1505 __ movl(rax, Address(end_from, 8)); 1506 __ movl(Address(end_to, 8), rax); 1507 1508 __ addptr(end_from, 4); 1509 __ addptr(end_to, 4); 1510 1511 // Check for and copy trailing word 1512 __ BIND(L_copy_2_bytes); 1513 __ testl(byte_count, 2); 1514 __ jccb(Assembler::zero, L_copy_byte); 1515 __ movw(rax, Address(end_from, 8)); 1516 __ movw(Address(end_to, 8), rax); 1517 1518 __ addptr(end_from, 2); 1519 __ addptr(end_to, 2); 1520 1521 // Check for and copy trailing byte 1522 __ BIND(L_copy_byte); 1523 __ testl(byte_count, 1); 1524 __ jccb(Assembler::zero, L_exit); 1525 __ movb(rax, Address(end_from, 8)); 1526 __ movb(Address(end_to, 8), rax); 1527 1528 __ BIND(L_exit); 1529 restore_arg_regs(); 1530 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1531 __ xorptr(rax, rax); // return 0 1532 __ vzeroupper(); 1533 __ leave(); // required for proper stackwalking of RuntimeStub frame 1534 __ ret(0); 1535 1536 // Copy in multi-bytes chunks 1537 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1538 __ jmp(L_copy_4_bytes); 1539 1540 return start; 1541 } 1542 1543 // Arguments: 1544 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1545 // ignored 1546 // name - stub name string 1547 // 1548 // Inputs: 1549 // c_rarg0 - source array address 1550 // c_rarg1 - destination array address 1551 // c_rarg2 - element count, treated as ssize_t, can be zero 1552 // 1553 // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries, 1554 // we let the hardware handle it. The one to eight bytes within words, 1555 // dwords or qwords that span cache line boundaries will still be loaded 1556 // and stored atomically. 1557 // 1558 address generate_conjoint_byte_copy(bool aligned, address nooverlap_target, 1559 address* entry, const char *name) { 1560 __ align(CodeEntryAlignment); 1561 StubCodeMark mark(this, "StubRoutines", name); 1562 address start = __ pc(); 1563 1564 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes; 1565 const Register from = rdi; // source array address 1566 const Register to = rsi; // destination array address 1567 const Register count = rdx; // elements count 1568 const Register byte_count = rcx; 1569 const Register qword_count = count; 1570 1571 __ enter(); // required for proper stackwalking of RuntimeStub frame 1572 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1573 1574 if (entry != NULL) { 1575 *entry = __ pc(); 1576 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1577 BLOCK_COMMENT("Entry:"); 1578 } 1579 1580 array_overlap_test(nooverlap_target, Address::times_1); 1581 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1582 // r9 and r10 may be used to save non-volatile registers 1583 1584 // 'from', 'to' and 'count' are now valid 1585 __ movptr(byte_count, count); 1586 __ shrptr(count, 3); // count => qword_count 1587 1588 // Copy from high to low addresses. 1589 1590 // Check for and copy trailing byte 1591 __ testl(byte_count, 1); 1592 __ jcc(Assembler::zero, L_copy_2_bytes); 1593 __ movb(rax, Address(from, byte_count, Address::times_1, -1)); 1594 __ movb(Address(to, byte_count, Address::times_1, -1), rax); 1595 __ decrement(byte_count); // Adjust for possible trailing word 1596 1597 // Check for and copy trailing word 1598 __ BIND(L_copy_2_bytes); 1599 __ testl(byte_count, 2); 1600 __ jcc(Assembler::zero, L_copy_4_bytes); 1601 __ movw(rax, Address(from, byte_count, Address::times_1, -2)); 1602 __ movw(Address(to, byte_count, Address::times_1, -2), rax); 1603 1604 // Check for and copy trailing dword 1605 __ BIND(L_copy_4_bytes); 1606 __ testl(byte_count, 4); 1607 __ jcc(Assembler::zero, L_copy_bytes); 1608 __ movl(rax, Address(from, qword_count, Address::times_8)); 1609 __ movl(Address(to, qword_count, Address::times_8), rax); 1610 __ jmp(L_copy_bytes); 1611 1612 // Copy trailing qwords 1613 __ BIND(L_copy_8_bytes); 1614 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 1615 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 1616 __ decrement(qword_count); 1617 __ jcc(Assembler::notZero, L_copy_8_bytes); 1618 1619 restore_arg_regs(); 1620 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1621 __ xorptr(rax, rax); // return 0 1622 __ vzeroupper(); 1623 __ leave(); // required for proper stackwalking of RuntimeStub frame 1624 __ ret(0); 1625 1626 // Copy in multi-bytes chunks 1627 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1628 1629 restore_arg_regs(); 1630 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1631 __ xorptr(rax, rax); // return 0 1632 __ vzeroupper(); 1633 __ leave(); // required for proper stackwalking of RuntimeStub frame 1634 __ ret(0); 1635 1636 return start; 1637 } 1638 1639 // Arguments: 1640 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1641 // ignored 1642 // name - stub name string 1643 // 1644 // Inputs: 1645 // c_rarg0 - source array address 1646 // c_rarg1 - destination array address 1647 // c_rarg2 - element count, treated as ssize_t, can be zero 1648 // 1649 // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we 1650 // let the hardware handle it. The two or four words within dwords 1651 // or qwords that span cache line boundaries will still be loaded 1652 // and stored atomically. 1653 // 1654 // Side Effects: 1655 // disjoint_short_copy_entry is set to the no-overlap entry point 1656 // used by generate_conjoint_short_copy(). 1657 // 1658 address generate_disjoint_short_copy(bool aligned, address *entry, const char *name) { 1659 __ align(CodeEntryAlignment); 1660 StubCodeMark mark(this, "StubRoutines", name); 1661 address start = __ pc(); 1662 1663 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes,L_copy_2_bytes,L_exit; 1664 const Register from = rdi; // source array address 1665 const Register to = rsi; // destination array address 1666 const Register count = rdx; // elements count 1667 const Register word_count = rcx; 1668 const Register qword_count = count; 1669 const Register end_from = from; // source array end address 1670 const Register end_to = to; // destination array end address 1671 // End pointers are inclusive, and if count is not zero they point 1672 // to the last unit copied: end_to[0] := end_from[0] 1673 1674 __ enter(); // required for proper stackwalking of RuntimeStub frame 1675 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1676 1677 if (entry != NULL) { 1678 *entry = __ pc(); 1679 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1680 BLOCK_COMMENT("Entry:"); 1681 } 1682 1683 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1684 // r9 and r10 may be used to save non-volatile registers 1685 1686 // 'from', 'to' and 'count' are now valid 1687 __ movptr(word_count, count); 1688 __ shrptr(count, 2); // count => qword_count 1689 1690 // Copy from low to high addresses. Use 'to' as scratch. 1691 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1692 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1693 __ negptr(qword_count); 1694 __ jmp(L_copy_bytes); 1695 1696 // Copy trailing qwords 1697 __ BIND(L_copy_8_bytes); 1698 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1699 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1700 __ increment(qword_count); 1701 __ jcc(Assembler::notZero, L_copy_8_bytes); 1702 1703 // Original 'dest' is trashed, so we can't use it as a 1704 // base register for a possible trailing word copy 1705 1706 // Check for and copy trailing dword 1707 __ BIND(L_copy_4_bytes); 1708 __ testl(word_count, 2); 1709 __ jccb(Assembler::zero, L_copy_2_bytes); 1710 __ movl(rax, Address(end_from, 8)); 1711 __ movl(Address(end_to, 8), rax); 1712 1713 __ addptr(end_from, 4); 1714 __ addptr(end_to, 4); 1715 1716 // Check for and copy trailing word 1717 __ BIND(L_copy_2_bytes); 1718 __ testl(word_count, 1); 1719 __ jccb(Assembler::zero, L_exit); 1720 __ movw(rax, Address(end_from, 8)); 1721 __ movw(Address(end_to, 8), rax); 1722 1723 __ BIND(L_exit); 1724 restore_arg_regs(); 1725 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1726 __ xorptr(rax, rax); // return 0 1727 __ vzeroupper(); 1728 __ leave(); // required for proper stackwalking of RuntimeStub frame 1729 __ ret(0); 1730 1731 // Copy in multi-bytes chunks 1732 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1733 __ jmp(L_copy_4_bytes); 1734 1735 return start; 1736 } 1737 1738 address generate_fill(BasicType t, bool aligned, const char *name) { 1739 __ align(CodeEntryAlignment); 1740 StubCodeMark mark(this, "StubRoutines", name); 1741 address start = __ pc(); 1742 1743 BLOCK_COMMENT("Entry:"); 1744 1745 const Register to = c_rarg0; // source array address 1746 const Register value = c_rarg1; // value 1747 const Register count = c_rarg2; // elements count 1748 1749 __ enter(); // required for proper stackwalking of RuntimeStub frame 1750 1751 __ generate_fill(t, aligned, to, value, count, rax, xmm0); 1752 1753 __ vzeroupper(); 1754 __ leave(); // required for proper stackwalking of RuntimeStub frame 1755 __ ret(0); 1756 return start; 1757 } 1758 1759 // Arguments: 1760 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1761 // ignored 1762 // name - stub name string 1763 // 1764 // Inputs: 1765 // c_rarg0 - source array address 1766 // c_rarg1 - destination array address 1767 // c_rarg2 - element count, treated as ssize_t, can be zero 1768 // 1769 // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we 1770 // let the hardware handle it. The two or four words within dwords 1771 // or qwords that span cache line boundaries will still be loaded 1772 // and stored atomically. 1773 // 1774 address generate_conjoint_short_copy(bool aligned, address nooverlap_target, 1775 address *entry, const char *name) { 1776 __ align(CodeEntryAlignment); 1777 StubCodeMark mark(this, "StubRoutines", name); 1778 address start = __ pc(); 1779 1780 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes; 1781 const Register from = rdi; // source array address 1782 const Register to = rsi; // destination array address 1783 const Register count = rdx; // elements count 1784 const Register word_count = rcx; 1785 const Register qword_count = count; 1786 1787 __ enter(); // required for proper stackwalking of RuntimeStub frame 1788 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1789 1790 if (entry != NULL) { 1791 *entry = __ pc(); 1792 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1793 BLOCK_COMMENT("Entry:"); 1794 } 1795 1796 array_overlap_test(nooverlap_target, Address::times_2); 1797 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1798 // r9 and r10 may be used to save non-volatile registers 1799 1800 // 'from', 'to' and 'count' are now valid 1801 __ movptr(word_count, count); 1802 __ shrptr(count, 2); // count => qword_count 1803 1804 // Copy from high to low addresses. Use 'to' as scratch. 1805 1806 // Check for and copy trailing word 1807 __ testl(word_count, 1); 1808 __ jccb(Assembler::zero, L_copy_4_bytes); 1809 __ movw(rax, Address(from, word_count, Address::times_2, -2)); 1810 __ movw(Address(to, word_count, Address::times_2, -2), rax); 1811 1812 // Check for and copy trailing dword 1813 __ BIND(L_copy_4_bytes); 1814 __ testl(word_count, 2); 1815 __ jcc(Assembler::zero, L_copy_bytes); 1816 __ movl(rax, Address(from, qword_count, Address::times_8)); 1817 __ movl(Address(to, qword_count, Address::times_8), rax); 1818 __ jmp(L_copy_bytes); 1819 1820 // Copy trailing qwords 1821 __ BIND(L_copy_8_bytes); 1822 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 1823 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 1824 __ decrement(qword_count); 1825 __ jcc(Assembler::notZero, L_copy_8_bytes); 1826 1827 restore_arg_regs(); 1828 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1829 __ xorptr(rax, rax); // return 0 1830 __ vzeroupper(); 1831 __ leave(); // required for proper stackwalking of RuntimeStub frame 1832 __ ret(0); 1833 1834 // Copy in multi-bytes chunks 1835 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1836 1837 restore_arg_regs(); 1838 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1839 __ xorptr(rax, rax); // return 0 1840 __ vzeroupper(); 1841 __ leave(); // required for proper stackwalking of RuntimeStub frame 1842 __ ret(0); 1843 1844 return start; 1845 } 1846 1847 // Arguments: 1848 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1849 // ignored 1850 // is_oop - true => oop array, so generate store check code 1851 // name - stub name string 1852 // 1853 // Inputs: 1854 // c_rarg0 - source array address 1855 // c_rarg1 - destination array address 1856 // c_rarg2 - element count, treated as ssize_t, can be zero 1857 // 1858 // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let 1859 // the hardware handle it. The two dwords within qwords that span 1860 // cache line boundaries will still be loaded and stored atomicly. 1861 // 1862 // Side Effects: 1863 // disjoint_int_copy_entry is set to the no-overlap entry point 1864 // used by generate_conjoint_int_oop_copy(). 1865 // 1866 address generate_disjoint_int_oop_copy(bool aligned, bool is_oop, address* entry, 1867 const char *name, bool dest_uninitialized = false) { 1868 __ align(CodeEntryAlignment); 1869 StubCodeMark mark(this, "StubRoutines", name); 1870 address start = __ pc(); 1871 1872 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_exit; 1873 const Register from = rdi; // source array address 1874 const Register to = rsi; // destination array address 1875 const Register count = rdx; // elements count 1876 const Register dword_count = rcx; 1877 const Register qword_count = count; 1878 const Register end_from = from; // source array end address 1879 const Register end_to = to; // destination array end address 1880 // End pointers are inclusive, and if count is not zero they point 1881 // to the last unit copied: end_to[0] := end_from[0] 1882 1883 __ enter(); // required for proper stackwalking of RuntimeStub frame 1884 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1885 1886 if (entry != NULL) { 1887 *entry = __ pc(); 1888 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1889 BLOCK_COMMENT("Entry:"); 1890 } 1891 1892 setup_arg_regs_using_thread(); // from => rdi, to => rsi, count => rdx 1893 // r9 is used to save r15_thread 1894 1895 DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_DISJOINT; 1896 if (dest_uninitialized) { 1897 decorators |= IS_DEST_UNINITIALIZED; 1898 } 1899 if (aligned) { 1900 decorators |= ARRAYCOPY_ALIGNED; 1901 } 1902 1903 BasicType type = is_oop ? T_OBJECT : T_INT; 1904 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 1905 bs->arraycopy_prologue(_masm, decorators, type, from, to, count); 1906 1907 // 'from', 'to' and 'count' are now valid 1908 __ movptr(dword_count, count); 1909 __ shrptr(count, 1); // count => qword_count 1910 1911 // Copy from low to high addresses. Use 'to' as scratch. 1912 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1913 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1914 __ negptr(qword_count); 1915 __ jmp(L_copy_bytes); 1916 1917 // Copy trailing qwords 1918 __ BIND(L_copy_8_bytes); 1919 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1920 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1921 __ increment(qword_count); 1922 __ jcc(Assembler::notZero, L_copy_8_bytes); 1923 1924 // Check for and copy trailing dword 1925 __ BIND(L_copy_4_bytes); 1926 __ testl(dword_count, 1); // Only byte test since the value is 0 or 1 1927 __ jccb(Assembler::zero, L_exit); 1928 __ movl(rax, Address(end_from, 8)); 1929 __ movl(Address(end_to, 8), rax); 1930 1931 __ BIND(L_exit); 1932 bs->arraycopy_epilogue(_masm, decorators, type, from, to, dword_count); 1933 restore_arg_regs_using_thread(); 1934 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 1935 __ vzeroupper(); 1936 __ xorptr(rax, rax); // return 0 1937 __ leave(); // required for proper stackwalking of RuntimeStub frame 1938 __ ret(0); 1939 1940 // Copy in multi-bytes chunks 1941 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1942 __ jmp(L_copy_4_bytes); 1943 1944 return start; 1945 } 1946 1947 // Arguments: 1948 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1949 // ignored 1950 // is_oop - true => oop array, so generate store check code 1951 // name - stub name string 1952 // 1953 // Inputs: 1954 // c_rarg0 - source array address 1955 // c_rarg1 - destination array address 1956 // c_rarg2 - element count, treated as ssize_t, can be zero 1957 // 1958 // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let 1959 // the hardware handle it. The two dwords within qwords that span 1960 // cache line boundaries will still be loaded and stored atomicly. 1961 // 1962 address generate_conjoint_int_oop_copy(bool aligned, bool is_oop, address nooverlap_target, 1963 address *entry, const char *name, 1964 bool dest_uninitialized = false) { 1965 __ align(CodeEntryAlignment); 1966 StubCodeMark mark(this, "StubRoutines", name); 1967 address start = __ pc(); 1968 1969 Label L_copy_bytes, L_copy_8_bytes, L_exit; 1970 const Register from = rdi; // source array address 1971 const Register to = rsi; // destination array address 1972 const Register count = rdx; // elements count 1973 const Register dword_count = rcx; 1974 const Register qword_count = count; 1975 1976 __ enter(); // required for proper stackwalking of RuntimeStub frame 1977 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1978 1979 if (entry != NULL) { 1980 *entry = __ pc(); 1981 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1982 BLOCK_COMMENT("Entry:"); 1983 } 1984 1985 array_overlap_test(nooverlap_target, Address::times_4); 1986 setup_arg_regs_using_thread(); // from => rdi, to => rsi, count => rdx 1987 // r9 is used to save r15_thread 1988 1989 DecoratorSet decorators = IN_HEAP | IS_ARRAY; 1990 if (dest_uninitialized) { 1991 decorators |= IS_DEST_UNINITIALIZED; 1992 } 1993 if (aligned) { 1994 decorators |= ARRAYCOPY_ALIGNED; 1995 } 1996 1997 BasicType type = is_oop ? T_OBJECT : T_INT; 1998 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 1999 // no registers are destroyed by this call 2000 bs->arraycopy_prologue(_masm, decorators, type, from, to, count); 2001 2002 assert_clean_int(count, rax); // Make sure 'count' is clean int. 2003 // 'from', 'to' and 'count' are now valid 2004 __ movptr(dword_count, count); 2005 __ shrptr(count, 1); // count => qword_count 2006 2007 // Copy from high to low addresses. Use 'to' as scratch. 2008 2009 // Check for and copy trailing dword 2010 __ testl(dword_count, 1); 2011 __ jcc(Assembler::zero, L_copy_bytes); 2012 __ movl(rax, Address(from, dword_count, Address::times_4, -4)); 2013 __ movl(Address(to, dword_count, Address::times_4, -4), rax); 2014 __ jmp(L_copy_bytes); 2015 2016 // Copy trailing qwords 2017 __ BIND(L_copy_8_bytes); 2018 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 2019 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 2020 __ decrement(qword_count); 2021 __ jcc(Assembler::notZero, L_copy_8_bytes); 2022 2023 if (is_oop) { 2024 __ jmp(L_exit); 2025 } 2026 restore_arg_regs_using_thread(); 2027 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 2028 __ xorptr(rax, rax); // return 0 2029 __ vzeroupper(); 2030 __ leave(); // required for proper stackwalking of RuntimeStub frame 2031 __ ret(0); 2032 2033 // Copy in multi-bytes chunks 2034 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2035 2036 __ BIND(L_exit); 2037 bs->arraycopy_epilogue(_masm, decorators, type, from, to, dword_count); 2038 restore_arg_regs_using_thread(); 2039 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 2040 __ xorptr(rax, rax); // return 0 2041 __ vzeroupper(); 2042 __ leave(); // required for proper stackwalking of RuntimeStub frame 2043 __ ret(0); 2044 2045 return start; 2046 } 2047 2048 // Arguments: 2049 // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes 2050 // ignored 2051 // is_oop - true => oop array, so generate store check code 2052 // name - stub name string 2053 // 2054 // Inputs: 2055 // c_rarg0 - source array address 2056 // c_rarg1 - destination array address 2057 // c_rarg2 - element count, treated as ssize_t, can be zero 2058 // 2059 // Side Effects: 2060 // disjoint_oop_copy_entry or disjoint_long_copy_entry is set to the 2061 // no-overlap entry point used by generate_conjoint_long_oop_copy(). 2062 // 2063 address generate_disjoint_long_oop_copy(bool aligned, bool is_oop, address *entry, 2064 const char *name, bool dest_uninitialized = false) { 2065 __ align(CodeEntryAlignment); 2066 StubCodeMark mark(this, "StubRoutines", name); 2067 address start = __ pc(); 2068 2069 Label L_copy_bytes, L_copy_8_bytes, L_exit; 2070 const Register from = rdi; // source array address 2071 const Register to = rsi; // destination array address 2072 const Register qword_count = rdx; // elements count 2073 const Register end_from = from; // source array end address 2074 const Register end_to = rcx; // destination array end address 2075 const Register saved_count = r11; 2076 // End pointers are inclusive, and if count is not zero they point 2077 // to the last unit copied: end_to[0] := end_from[0] 2078 2079 __ enter(); // required for proper stackwalking of RuntimeStub frame 2080 // Save no-overlap entry point for generate_conjoint_long_oop_copy() 2081 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 2082 2083 if (entry != NULL) { 2084 *entry = __ pc(); 2085 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2086 BLOCK_COMMENT("Entry:"); 2087 } 2088 2089 setup_arg_regs_using_thread(); // from => rdi, to => rsi, count => rdx 2090 // r9 is used to save r15_thread 2091 // 'from', 'to' and 'qword_count' are now valid 2092 2093 DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_DISJOINT; 2094 if (dest_uninitialized) { 2095 decorators |= IS_DEST_UNINITIALIZED; 2096 } 2097 if (aligned) { 2098 decorators |= ARRAYCOPY_ALIGNED; 2099 } 2100 2101 BasicType type = is_oop ? T_OBJECT : T_LONG; 2102 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 2103 bs->arraycopy_prologue(_masm, decorators, type, from, to, qword_count); 2104 2105 // Copy from low to high addresses. Use 'to' as scratch. 2106 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 2107 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 2108 __ negptr(qword_count); 2109 __ jmp(L_copy_bytes); 2110 2111 // Copy trailing qwords 2112 __ BIND(L_copy_8_bytes); 2113 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 2114 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 2115 __ increment(qword_count); 2116 __ jcc(Assembler::notZero, L_copy_8_bytes); 2117 2118 if (is_oop) { 2119 __ jmp(L_exit); 2120 } else { 2121 restore_arg_regs_using_thread(); 2122 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2123 __ xorptr(rax, rax); // return 0 2124 __ vzeroupper(); 2125 __ leave(); // required for proper stackwalking of RuntimeStub frame 2126 __ ret(0); 2127 } 2128 2129 // Copy in multi-bytes chunks 2130 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2131 2132 __ BIND(L_exit); 2133 bs->arraycopy_epilogue(_masm, decorators, type, from, to, qword_count); 2134 restore_arg_regs_using_thread(); 2135 if (is_oop) { 2136 inc_counter_np(SharedRuntime::_oop_array_copy_ctr); // Update counter after rscratch1 is free 2137 } else { 2138 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2139 } 2140 __ vzeroupper(); 2141 __ xorptr(rax, rax); // return 0 2142 __ leave(); // required for proper stackwalking of RuntimeStub frame 2143 __ ret(0); 2144 2145 return start; 2146 } 2147 2148 // Arguments: 2149 // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes 2150 // ignored 2151 // is_oop - true => oop array, so generate store check code 2152 // name - stub name string 2153 // 2154 // Inputs: 2155 // c_rarg0 - source array address 2156 // c_rarg1 - destination array address 2157 // c_rarg2 - element count, treated as ssize_t, can be zero 2158 // 2159 address generate_conjoint_long_oop_copy(bool aligned, bool is_oop, 2160 address nooverlap_target, address *entry, 2161 const char *name, bool dest_uninitialized = false) { 2162 __ align(CodeEntryAlignment); 2163 StubCodeMark mark(this, "StubRoutines", name); 2164 address start = __ pc(); 2165 2166 Label L_copy_bytes, L_copy_8_bytes, L_exit; 2167 const Register from = rdi; // source array address 2168 const Register to = rsi; // destination array address 2169 const Register qword_count = rdx; // elements count 2170 const Register saved_count = rcx; 2171 2172 __ enter(); // required for proper stackwalking of RuntimeStub frame 2173 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 2174 2175 if (entry != NULL) { 2176 *entry = __ pc(); 2177 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2178 BLOCK_COMMENT("Entry:"); 2179 } 2180 2181 array_overlap_test(nooverlap_target, Address::times_8); 2182 setup_arg_regs_using_thread(); // from => rdi, to => rsi, count => rdx 2183 // r9 is used to save r15_thread 2184 // 'from', 'to' and 'qword_count' are now valid 2185 2186 DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_DISJOINT; 2187 if (dest_uninitialized) { 2188 decorators |= IS_DEST_UNINITIALIZED; 2189 } 2190 if (aligned) { 2191 decorators |= ARRAYCOPY_ALIGNED; 2192 } 2193 2194 BasicType type = is_oop ? T_OBJECT : T_LONG; 2195 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 2196 bs->arraycopy_prologue(_masm, decorators, type, from, to, qword_count); 2197 2198 __ jmp(L_copy_bytes); 2199 2200 // Copy trailing qwords 2201 __ BIND(L_copy_8_bytes); 2202 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 2203 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 2204 __ decrement(qword_count); 2205 __ jcc(Assembler::notZero, L_copy_8_bytes); 2206 2207 if (is_oop) { 2208 __ jmp(L_exit); 2209 } else { 2210 restore_arg_regs_using_thread(); 2211 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2212 __ xorptr(rax, rax); // return 0 2213 __ vzeroupper(); 2214 __ leave(); // required for proper stackwalking of RuntimeStub frame 2215 __ ret(0); 2216 } 2217 2218 // Copy in multi-bytes chunks 2219 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2220 2221 __ BIND(L_exit); 2222 bs->arraycopy_epilogue(_masm, decorators, type, from, to, qword_count); 2223 restore_arg_regs_using_thread(); 2224 if (is_oop) { 2225 inc_counter_np(SharedRuntime::_oop_array_copy_ctr); // Update counter after rscratch1 is free 2226 } else { 2227 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2228 } 2229 __ vzeroupper(); 2230 __ xorptr(rax, rax); // return 0 2231 __ leave(); // required for proper stackwalking of RuntimeStub frame 2232 __ ret(0); 2233 2234 return start; 2235 } 2236 2237 2238 // Helper for generating a dynamic type check. 2239 // Smashes no registers. 2240 void generate_type_check(Register sub_klass, 2241 Register super_check_offset, 2242 Register super_klass, 2243 Label& L_success) { 2244 assert_different_registers(sub_klass, super_check_offset, super_klass); 2245 2246 BLOCK_COMMENT("type_check:"); 2247 2248 Label L_miss; 2249 2250 __ check_klass_subtype_fast_path(sub_klass, super_klass, noreg, &L_success, &L_miss, NULL, 2251 super_check_offset); 2252 __ check_klass_subtype_slow_path(sub_klass, super_klass, noreg, noreg, &L_success, NULL); 2253 2254 // Fall through on failure! 2255 __ BIND(L_miss); 2256 } 2257 2258 // 2259 // Generate checkcasting array copy stub 2260 // 2261 // Input: 2262 // c_rarg0 - source array address 2263 // c_rarg1 - destination array address 2264 // c_rarg2 - element count, treated as ssize_t, can be zero 2265 // c_rarg3 - size_t ckoff (super_check_offset) 2266 // not Win64 2267 // c_rarg4 - oop ckval (super_klass) 2268 // Win64 2269 // rsp+40 - oop ckval (super_klass) 2270 // 2271 // Output: 2272 // rax == 0 - success 2273 // rax == -1^K - failure, where K is partial transfer count 2274 // 2275 address generate_checkcast_copy(const char *name, address *entry, 2276 bool dest_uninitialized = false) { 2277 2278 Label L_load_element, L_store_element, L_do_card_marks, L_done; 2279 2280 // Input registers (after setup_arg_regs) 2281 const Register from = rdi; // source array address 2282 const Register to = rsi; // destination array address 2283 const Register length = rdx; // elements count 2284 const Register ckoff = rcx; // super_check_offset 2285 const Register ckval = r8; // super_klass 2286 2287 // Registers used as temps (r13, r14 are save-on-entry) 2288 const Register end_from = from; // source array end address 2289 const Register end_to = r13; // destination array end address 2290 const Register count = rdx; // -(count_remaining) 2291 const Register r14_length = r14; // saved copy of length 2292 // End pointers are inclusive, and if length is not zero they point 2293 // to the last unit copied: end_to[0] := end_from[0] 2294 2295 const Register rax_oop = rax; // actual oop copied 2296 const Register r11_klass = r11; // oop._klass 2297 2298 //--------------------------------------------------------------- 2299 // Assembler stub will be used for this call to arraycopy 2300 // if the two arrays are subtypes of Object[] but the 2301 // destination array type is not equal to or a supertype 2302 // of the source type. Each element must be separately 2303 // checked. 2304 2305 __ align(CodeEntryAlignment); 2306 StubCodeMark mark(this, "StubRoutines", name); 2307 address start = __ pc(); 2308 2309 __ enter(); // required for proper stackwalking of RuntimeStub frame 2310 2311 #ifdef ASSERT 2312 // caller guarantees that the arrays really are different 2313 // otherwise, we would have to make conjoint checks 2314 { Label L; 2315 array_overlap_test(L, TIMES_OOP); 2316 __ stop("checkcast_copy within a single array"); 2317 __ bind(L); 2318 } 2319 #endif //ASSERT 2320 2321 setup_arg_regs(4); // from => rdi, to => rsi, length => rdx 2322 // ckoff => rcx, ckval => r8 2323 // r9 and r10 may be used to save non-volatile registers 2324 #ifdef _WIN64 2325 // last argument (#4) is on stack on Win64 2326 __ movptr(ckval, Address(rsp, 6 * wordSize)); 2327 #endif 2328 2329 // Caller of this entry point must set up the argument registers. 2330 if (entry != NULL) { 2331 *entry = __ pc(); 2332 BLOCK_COMMENT("Entry:"); 2333 } 2334 2335 // allocate spill slots for r13, r14 2336 enum { 2337 saved_r13_offset, 2338 saved_r14_offset, 2339 saved_r10_offset, 2340 saved_rbp_offset 2341 }; 2342 __ subptr(rsp, saved_rbp_offset * wordSize); 2343 __ movptr(Address(rsp, saved_r13_offset * wordSize), r13); 2344 __ movptr(Address(rsp, saved_r14_offset * wordSize), r14); 2345 __ movptr(Address(rsp, saved_r10_offset * wordSize), r10); 2346 2347 #ifdef ASSERT 2348 Label L2; 2349 __ get_thread(r14); 2350 __ cmpptr(r15_thread, r14); 2351 __ jcc(Assembler::equal, L2); 2352 __ stop("StubRoutines::call_stub: r15_thread is modified by call"); 2353 __ bind(L2); 2354 #endif // ASSERT 2355 2356 // check that int operands are properly extended to size_t 2357 assert_clean_int(length, rax); 2358 assert_clean_int(ckoff, rax); 2359 2360 #ifdef ASSERT 2361 BLOCK_COMMENT("assert consistent ckoff/ckval"); 2362 // The ckoff and ckval must be mutually consistent, 2363 // even though caller generates both. 2364 { Label L; 2365 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2366 __ cmpl(ckoff, Address(ckval, sco_offset)); 2367 __ jcc(Assembler::equal, L); 2368 __ stop("super_check_offset inconsistent"); 2369 __ bind(L); 2370 } 2371 #endif //ASSERT 2372 2373 // Loop-invariant addresses. They are exclusive end pointers. 2374 Address end_from_addr(from, length, TIMES_OOP, 0); 2375 Address end_to_addr(to, length, TIMES_OOP, 0); 2376 // Loop-variant addresses. They assume post-incremented count < 0. 2377 Address from_element_addr(end_from, count, TIMES_OOP, 0); 2378 Address to_element_addr(end_to, count, TIMES_OOP, 0); 2379 2380 DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_CHECKCAST; 2381 if (dest_uninitialized) { 2382 decorators |= IS_DEST_UNINITIALIZED; 2383 } 2384 2385 BasicType type = T_OBJECT; 2386 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 2387 bs->arraycopy_prologue(_masm, decorators, type, from, to, count); 2388 2389 // Copy from low to high addresses, indexed from the end of each array. 2390 __ lea(end_from, end_from_addr); 2391 __ lea(end_to, end_to_addr); 2392 __ movptr(r14_length, length); // save a copy of the length 2393 assert(length == count, ""); // else fix next line: 2394 __ negptr(count); // negate and test the length 2395 __ jcc(Assembler::notZero, L_load_element); 2396 2397 // Empty array: Nothing to do. 2398 __ xorptr(rax, rax); // return 0 on (trivial) success 2399 __ jmp(L_done); 2400 2401 // ======== begin loop ======== 2402 // (Loop is rotated; its entry is L_load_element.) 2403 // Loop control: 2404 // for (count = -count; count != 0; count++) 2405 // Base pointers src, dst are biased by 8*(count-1),to last element. 2406 __ align(OptoLoopAlignment); 2407 2408 __ BIND(L_store_element); 2409 __ store_heap_oop(to_element_addr, rax_oop, noreg, noreg, AS_RAW); // store the oop 2410 __ increment(count); // increment the count toward zero 2411 __ jcc(Assembler::zero, L_do_card_marks); 2412 2413 // ======== loop entry is here ======== 2414 __ BIND(L_load_element); 2415 __ load_heap_oop(rax_oop, from_element_addr, noreg, noreg, AS_RAW); // load the oop 2416 __ testptr(rax_oop, rax_oop); 2417 __ jcc(Assembler::zero, L_store_element); 2418 2419 __ load_klass(r11_klass, rax_oop);// query the object klass 2420 generate_type_check(r11_klass, ckoff, ckval, L_store_element); 2421 // ======== end loop ======== 2422 2423 // It was a real error; we must depend on the caller to finish the job. 2424 // Register rdx = -1 * number of *remaining* oops, r14 = *total* oops. 2425 // Emit GC store barriers for the oops we have copied (r14 + rdx), 2426 // and report their number to the caller. 2427 assert_different_registers(rax, r14_length, count, to, end_to, rcx, rscratch1); 2428 Label L_post_barrier; 2429 __ addptr(r14_length, count); // K = (original - remaining) oops 2430 __ movptr(rax, r14_length); // save the value 2431 __ notptr(rax); // report (-1^K) to caller (does not affect flags) 2432 __ jccb(Assembler::notZero, L_post_barrier); 2433 __ jmp(L_done); // K == 0, nothing was copied, skip post barrier 2434 2435 // Come here on success only. 2436 __ BIND(L_do_card_marks); 2437 __ xorptr(rax, rax); // return 0 on success 2438 2439 __ BIND(L_post_barrier); 2440 bs->arraycopy_epilogue(_masm, decorators, type, from, to, r14_length); 2441 2442 // Common exit point (success or failure). 2443 __ BIND(L_done); 2444 __ movptr(r13, Address(rsp, saved_r13_offset * wordSize)); 2445 __ movptr(r14, Address(rsp, saved_r14_offset * wordSize)); 2446 __ movptr(r10, Address(rsp, saved_r10_offset * wordSize)); 2447 restore_arg_regs(); 2448 inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr); // Update counter after rscratch1 is free 2449 __ leave(); // required for proper stackwalking of RuntimeStub frame 2450 __ ret(0); 2451 2452 return start; 2453 } 2454 2455 // 2456 // Generate 'unsafe' array copy stub 2457 // Though just as safe as the other stubs, it takes an unscaled 2458 // size_t argument instead of an element count. 2459 // 2460 // Input: 2461 // c_rarg0 - source array address 2462 // c_rarg1 - destination array address 2463 // c_rarg2 - byte count, treated as ssize_t, can be zero 2464 // 2465 // Examines the alignment of the operands and dispatches 2466 // to a long, int, short, or byte copy loop. 2467 // 2468 address generate_unsafe_copy(const char *name, 2469 address byte_copy_entry, address short_copy_entry, 2470 address int_copy_entry, address long_copy_entry) { 2471 2472 Label L_long_aligned, L_int_aligned, L_short_aligned; 2473 2474 // Input registers (before setup_arg_regs) 2475 const Register from = c_rarg0; // source array address 2476 const Register to = c_rarg1; // destination array address 2477 const Register size = c_rarg2; // byte count (size_t) 2478 2479 // Register used as a temp 2480 const Register bits = rax; // test copy of low bits 2481 2482 __ align(CodeEntryAlignment); 2483 StubCodeMark mark(this, "StubRoutines", name); 2484 address start = __ pc(); 2485 2486 __ enter(); // required for proper stackwalking of RuntimeStub frame 2487 2488 // bump this on entry, not on exit: 2489 inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr); 2490 2491 __ mov(bits, from); 2492 __ orptr(bits, to); 2493 __ orptr(bits, size); 2494 2495 __ testb(bits, BytesPerLong-1); 2496 __ jccb(Assembler::zero, L_long_aligned); 2497 2498 __ testb(bits, BytesPerInt-1); 2499 __ jccb(Assembler::zero, L_int_aligned); 2500 2501 __ testb(bits, BytesPerShort-1); 2502 __ jump_cc(Assembler::notZero, RuntimeAddress(byte_copy_entry)); 2503 2504 __ BIND(L_short_aligned); 2505 __ shrptr(size, LogBytesPerShort); // size => short_count 2506 __ jump(RuntimeAddress(short_copy_entry)); 2507 2508 __ BIND(L_int_aligned); 2509 __ shrptr(size, LogBytesPerInt); // size => int_count 2510 __ jump(RuntimeAddress(int_copy_entry)); 2511 2512 __ BIND(L_long_aligned); 2513 __ shrptr(size, LogBytesPerLong); // size => qword_count 2514 __ jump(RuntimeAddress(long_copy_entry)); 2515 2516 return start; 2517 } 2518 2519 // Perform range checks on the proposed arraycopy. 2520 // Kills temp, but nothing else. 2521 // Also, clean the sign bits of src_pos and dst_pos. 2522 void arraycopy_range_checks(Register src, // source array oop (c_rarg0) 2523 Register src_pos, // source position (c_rarg1) 2524 Register dst, // destination array oo (c_rarg2) 2525 Register dst_pos, // destination position (c_rarg3) 2526 Register length, 2527 Register temp, 2528 Label& L_failed) { 2529 BLOCK_COMMENT("arraycopy_range_checks:"); 2530 2531 // if (src_pos + length > arrayOop(src)->length()) FAIL; 2532 __ movl(temp, length); 2533 __ addl(temp, src_pos); // src_pos + length 2534 __ cmpl(temp, Address(src, arrayOopDesc::length_offset_in_bytes())); 2535 __ jcc(Assembler::above, L_failed); 2536 2537 // if (dst_pos + length > arrayOop(dst)->length()) FAIL; 2538 __ movl(temp, length); 2539 __ addl(temp, dst_pos); // dst_pos + length 2540 __ cmpl(temp, Address(dst, arrayOopDesc::length_offset_in_bytes())); 2541 __ jcc(Assembler::above, L_failed); 2542 2543 // Have to clean up high 32-bits of 'src_pos' and 'dst_pos'. 2544 // Move with sign extension can be used since they are positive. 2545 __ movslq(src_pos, src_pos); 2546 __ movslq(dst_pos, dst_pos); 2547 2548 BLOCK_COMMENT("arraycopy_range_checks done"); 2549 } 2550 2551 // 2552 // Generate generic array copy stubs 2553 // 2554 // Input: 2555 // c_rarg0 - src oop 2556 // c_rarg1 - src_pos (32-bits) 2557 // c_rarg2 - dst oop 2558 // c_rarg3 - dst_pos (32-bits) 2559 // not Win64 2560 // c_rarg4 - element count (32-bits) 2561 // Win64 2562 // rsp+40 - element count (32-bits) 2563 // 2564 // Output: 2565 // rax == 0 - success 2566 // rax == -1^K - failure, where K is partial transfer count 2567 // 2568 address generate_generic_copy(const char *name, 2569 address byte_copy_entry, address short_copy_entry, 2570 address int_copy_entry, address oop_copy_entry, 2571 address long_copy_entry, address checkcast_copy_entry) { 2572 2573 Label L_failed, L_failed_0, L_objArray; 2574 Label L_copy_bytes, L_copy_shorts, L_copy_ints, L_copy_longs; 2575 2576 // Input registers 2577 const Register src = c_rarg0; // source array oop 2578 const Register src_pos = c_rarg1; // source position 2579 const Register dst = c_rarg2; // destination array oop 2580 const Register dst_pos = c_rarg3; // destination position 2581 #ifndef _WIN64 2582 const Register length = c_rarg4; 2583 #else 2584 const Address length(rsp, 6 * wordSize); // elements count is on stack on Win64 2585 #endif 2586 2587 { int modulus = CodeEntryAlignment; 2588 int target = modulus - 5; // 5 = sizeof jmp(L_failed) 2589 int advance = target - (__ offset() % modulus); 2590 if (advance < 0) advance += modulus; 2591 if (advance > 0) __ nop(advance); 2592 } 2593 StubCodeMark mark(this, "StubRoutines", name); 2594 2595 // Short-hop target to L_failed. Makes for denser prologue code. 2596 __ BIND(L_failed_0); 2597 __ jmp(L_failed); 2598 assert(__ offset() % CodeEntryAlignment == 0, "no further alignment needed"); 2599 2600 __ align(CodeEntryAlignment); 2601 address start = __ pc(); 2602 2603 __ enter(); // required for proper stackwalking of RuntimeStub frame 2604 2605 // bump this on entry, not on exit: 2606 inc_counter_np(SharedRuntime::_generic_array_copy_ctr); 2607 2608 //----------------------------------------------------------------------- 2609 // Assembler stub will be used for this call to arraycopy 2610 // if the following conditions are met: 2611 // 2612 // (1) src and dst must not be null. 2613 // (2) src_pos must not be negative. 2614 // (3) dst_pos must not be negative. 2615 // (4) length must not be negative. 2616 // (5) src klass and dst klass should be the same and not NULL. 2617 // (6) src and dst should be arrays. 2618 // (7) src_pos + length must not exceed length of src. 2619 // (8) dst_pos + length must not exceed length of dst. 2620 // 2621 2622 // if (src == NULL) return -1; 2623 __ testptr(src, src); // src oop 2624 size_t j1off = __ offset(); 2625 __ jccb(Assembler::zero, L_failed_0); 2626 2627 // if (src_pos < 0) return -1; 2628 __ testl(src_pos, src_pos); // src_pos (32-bits) 2629 __ jccb(Assembler::negative, L_failed_0); 2630 2631 // if (dst == NULL) return -1; 2632 __ testptr(dst, dst); // dst oop 2633 __ jccb(Assembler::zero, L_failed_0); 2634 2635 // if (dst_pos < 0) return -1; 2636 __ testl(dst_pos, dst_pos); // dst_pos (32-bits) 2637 size_t j4off = __ offset(); 2638 __ jccb(Assembler::negative, L_failed_0); 2639 2640 // The first four tests are very dense code, 2641 // but not quite dense enough to put four 2642 // jumps in a 16-byte instruction fetch buffer. 2643 // That's good, because some branch predicters 2644 // do not like jumps so close together. 2645 // Make sure of this. 2646 guarantee(((j1off ^ j4off) & ~15) != 0, "I$ line of 1st & 4th jumps"); 2647 2648 // registers used as temp 2649 const Register r11_length = r11; // elements count to copy 2650 const Register r10_src_klass = r10; // array klass 2651 2652 // if (length < 0) return -1; 2653 __ movl(r11_length, length); // length (elements count, 32-bits value) 2654 __ testl(r11_length, r11_length); 2655 __ jccb(Assembler::negative, L_failed_0); 2656 2657 __ load_klass(r10_src_klass, src); 2658 #ifdef ASSERT 2659 // assert(src->klass() != NULL); 2660 { 2661 BLOCK_COMMENT("assert klasses not null {"); 2662 Label L1, L2; 2663 __ testptr(r10_src_klass, r10_src_klass); 2664 __ jcc(Assembler::notZero, L2); // it is broken if klass is NULL 2665 __ bind(L1); 2666 __ stop("broken null klass"); 2667 __ bind(L2); 2668 __ load_klass(rax, dst); 2669 __ cmpq(rax, 0); 2670 __ jcc(Assembler::equal, L1); // this would be broken also 2671 BLOCK_COMMENT("} assert klasses not null done"); 2672 } 2673 #endif 2674 2675 // Load layout helper (32-bits) 2676 // 2677 // |array_tag| | header_size | element_type | |log2_element_size| 2678 // 32 30 24 16 8 2 0 2679 // 2680 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0 2681 // 2682 2683 const int lh_offset = in_bytes(Klass::layout_helper_offset()); 2684 2685 // Handle objArrays completely differently... 2686 const jint objArray_lh = Klass::array_layout_helper(T_OBJECT); 2687 __ cmpl(Address(r10_src_klass, lh_offset), objArray_lh); 2688 __ jcc(Assembler::equal, L_objArray); 2689 2690 // if (src->klass() != dst->klass()) return -1; 2691 __ load_klass(rax, dst); 2692 __ cmpq(r10_src_klass, rax); 2693 __ jcc(Assembler::notEqual, L_failed); 2694 2695 const Register rax_lh = rax; // layout helper 2696 __ movl(rax_lh, Address(r10_src_klass, lh_offset)); 2697 2698 // if (!src->is_Array()) return -1; 2699 __ cmpl(rax_lh, Klass::_lh_neutral_value); 2700 __ jcc(Assembler::greaterEqual, L_failed); 2701 2702 // At this point, it is known to be a typeArray (array_tag 0x3). 2703 #ifdef ASSERT 2704 { 2705 BLOCK_COMMENT("assert primitive array {"); 2706 Label L; 2707 __ cmpl(rax_lh, (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift)); 2708 __ jcc(Assembler::greaterEqual, L); 2709 __ stop("must be a primitive array"); 2710 __ bind(L); 2711 BLOCK_COMMENT("} assert primitive array done"); 2712 } 2713 #endif 2714 2715 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2716 r10, L_failed); 2717 2718 // TypeArrayKlass 2719 // 2720 // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize); 2721 // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize); 2722 // 2723 2724 const Register r10_offset = r10; // array offset 2725 const Register rax_elsize = rax_lh; // element size 2726 2727 __ movl(r10_offset, rax_lh); 2728 __ shrl(r10_offset, Klass::_lh_header_size_shift); 2729 __ andptr(r10_offset, Klass::_lh_header_size_mask); // array_offset 2730 __ addptr(src, r10_offset); // src array offset 2731 __ addptr(dst, r10_offset); // dst array offset 2732 BLOCK_COMMENT("choose copy loop based on element size"); 2733 __ andl(rax_lh, Klass::_lh_log2_element_size_mask); // rax_lh -> rax_elsize 2734 2735 // next registers should be set before the jump to corresponding stub 2736 const Register from = c_rarg0; // source array address 2737 const Register to = c_rarg1; // destination array address 2738 const Register count = c_rarg2; // elements count 2739 2740 // 'from', 'to', 'count' registers should be set in such order 2741 // since they are the same as 'src', 'src_pos', 'dst'. 2742 2743 __ BIND(L_copy_bytes); 2744 __ cmpl(rax_elsize, 0); 2745 __ jccb(Assembler::notEqual, L_copy_shorts); 2746 __ lea(from, Address(src, src_pos, Address::times_1, 0));// src_addr 2747 __ lea(to, Address(dst, dst_pos, Address::times_1, 0));// dst_addr 2748 __ movl2ptr(count, r11_length); // length 2749 __ jump(RuntimeAddress(byte_copy_entry)); 2750 2751 __ BIND(L_copy_shorts); 2752 __ cmpl(rax_elsize, LogBytesPerShort); 2753 __ jccb(Assembler::notEqual, L_copy_ints); 2754 __ lea(from, Address(src, src_pos, Address::times_2, 0));// src_addr 2755 __ lea(to, Address(dst, dst_pos, Address::times_2, 0));// dst_addr 2756 __ movl2ptr(count, r11_length); // length 2757 __ jump(RuntimeAddress(short_copy_entry)); 2758 2759 __ BIND(L_copy_ints); 2760 __ cmpl(rax_elsize, LogBytesPerInt); 2761 __ jccb(Assembler::notEqual, L_copy_longs); 2762 __ lea(from, Address(src, src_pos, Address::times_4, 0));// src_addr 2763 __ lea(to, Address(dst, dst_pos, Address::times_4, 0));// dst_addr 2764 __ movl2ptr(count, r11_length); // length 2765 __ jump(RuntimeAddress(int_copy_entry)); 2766 2767 __ BIND(L_copy_longs); 2768 #ifdef ASSERT 2769 { 2770 BLOCK_COMMENT("assert long copy {"); 2771 Label L; 2772 __ cmpl(rax_elsize, LogBytesPerLong); 2773 __ jcc(Assembler::equal, L); 2774 __ stop("must be long copy, but elsize is wrong"); 2775 __ bind(L); 2776 BLOCK_COMMENT("} assert long copy done"); 2777 } 2778 #endif 2779 __ lea(from, Address(src, src_pos, Address::times_8, 0));// src_addr 2780 __ lea(to, Address(dst, dst_pos, Address::times_8, 0));// dst_addr 2781 __ movl2ptr(count, r11_length); // length 2782 __ jump(RuntimeAddress(long_copy_entry)); 2783 2784 // ObjArrayKlass 2785 __ BIND(L_objArray); 2786 // live at this point: r10_src_klass, r11_length, src[_pos], dst[_pos] 2787 2788 Label L_plain_copy, L_checkcast_copy; 2789 // test array classes for subtyping 2790 __ load_klass(rax, dst); 2791 __ cmpq(r10_src_klass, rax); // usual case is exact equality 2792 __ jcc(Assembler::notEqual, L_checkcast_copy); 2793 2794 // Identically typed arrays can be copied without element-wise checks. 2795 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2796 r10, L_failed); 2797 2798 __ lea(from, Address(src, src_pos, TIMES_OOP, 2799 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // src_addr 2800 __ lea(to, Address(dst, dst_pos, TIMES_OOP, 2801 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // dst_addr 2802 __ movl2ptr(count, r11_length); // length 2803 __ BIND(L_plain_copy); 2804 __ jump(RuntimeAddress(oop_copy_entry)); 2805 2806 __ BIND(L_checkcast_copy); 2807 // live at this point: r10_src_klass, r11_length, rax (dst_klass) 2808 { 2809 // Before looking at dst.length, make sure dst is also an objArray. 2810 __ cmpl(Address(rax, lh_offset), objArray_lh); 2811 __ jcc(Assembler::notEqual, L_failed); 2812 2813 // It is safe to examine both src.length and dst.length. 2814 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2815 rax, L_failed); 2816 2817 const Register r11_dst_klass = r11; 2818 __ load_klass(r11_dst_klass, dst); // reload 2819 2820 // Marshal the base address arguments now, freeing registers. 2821 __ lea(from, Address(src, src_pos, TIMES_OOP, 2822 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 2823 __ lea(to, Address(dst, dst_pos, TIMES_OOP, 2824 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 2825 __ movl(count, length); // length (reloaded) 2826 Register sco_temp = c_rarg3; // this register is free now 2827 assert_different_registers(from, to, count, sco_temp, 2828 r11_dst_klass, r10_src_klass); 2829 assert_clean_int(count, sco_temp); 2830 2831 // Generate the type check. 2832 const int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2833 __ movl(sco_temp, Address(r11_dst_klass, sco_offset)); 2834 assert_clean_int(sco_temp, rax); 2835 generate_type_check(r10_src_klass, sco_temp, r11_dst_klass, L_plain_copy); 2836 2837 // Fetch destination element klass from the ObjArrayKlass header. 2838 int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset()); 2839 __ movptr(r11_dst_klass, Address(r11_dst_klass, ek_offset)); 2840 __ movl( sco_temp, Address(r11_dst_klass, sco_offset)); 2841 assert_clean_int(sco_temp, rax); 2842 2843 // the checkcast_copy loop needs two extra arguments: 2844 assert(c_rarg3 == sco_temp, "#3 already in place"); 2845 // Set up arguments for checkcast_copy_entry. 2846 setup_arg_regs(4); 2847 __ movptr(r8, r11_dst_klass); // dst.klass.element_klass, r8 is c_rarg4 on Linux/Solaris 2848 __ jump(RuntimeAddress(checkcast_copy_entry)); 2849 } 2850 2851 __ BIND(L_failed); 2852 __ xorptr(rax, rax); 2853 __ notptr(rax); // return -1 2854 __ leave(); // required for proper stackwalking of RuntimeStub frame 2855 __ ret(0); 2856 2857 return start; 2858 } 2859 2860 void generate_arraycopy_stubs() { 2861 address entry; 2862 address entry_jbyte_arraycopy; 2863 address entry_jshort_arraycopy; 2864 address entry_jint_arraycopy; 2865 address entry_oop_arraycopy; 2866 address entry_jlong_arraycopy; 2867 address entry_checkcast_arraycopy; 2868 2869 StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, &entry, 2870 "jbyte_disjoint_arraycopy"); 2871 StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, entry, &entry_jbyte_arraycopy, 2872 "jbyte_arraycopy"); 2873 2874 StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, &entry, 2875 "jshort_disjoint_arraycopy"); 2876 StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, entry, &entry_jshort_arraycopy, 2877 "jshort_arraycopy"); 2878 2879 StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, false, &entry, 2880 "jint_disjoint_arraycopy"); 2881 StubRoutines::_jint_arraycopy = generate_conjoint_int_oop_copy(false, false, entry, 2882 &entry_jint_arraycopy, "jint_arraycopy"); 2883 2884 StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, false, &entry, 2885 "jlong_disjoint_arraycopy"); 2886 StubRoutines::_jlong_arraycopy = generate_conjoint_long_oop_copy(false, false, entry, 2887 &entry_jlong_arraycopy, "jlong_arraycopy"); 2888 2889 2890 if (UseCompressedOops) { 2891 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, true, &entry, 2892 "oop_disjoint_arraycopy"); 2893 StubRoutines::_oop_arraycopy = generate_conjoint_int_oop_copy(false, true, entry, 2894 &entry_oop_arraycopy, "oop_arraycopy"); 2895 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_int_oop_copy(false, true, &entry, 2896 "oop_disjoint_arraycopy_uninit", 2897 /*dest_uninitialized*/true); 2898 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_int_oop_copy(false, true, entry, 2899 NULL, "oop_arraycopy_uninit", 2900 /*dest_uninitialized*/true); 2901 } else { 2902 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, true, &entry, 2903 "oop_disjoint_arraycopy"); 2904 StubRoutines::_oop_arraycopy = generate_conjoint_long_oop_copy(false, true, entry, 2905 &entry_oop_arraycopy, "oop_arraycopy"); 2906 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_long_oop_copy(false, true, &entry, 2907 "oop_disjoint_arraycopy_uninit", 2908 /*dest_uninitialized*/true); 2909 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_long_oop_copy(false, true, entry, 2910 NULL, "oop_arraycopy_uninit", 2911 /*dest_uninitialized*/true); 2912 } 2913 2914 StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy); 2915 StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", NULL, 2916 /*dest_uninitialized*/true); 2917 2918 StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy", 2919 entry_jbyte_arraycopy, 2920 entry_jshort_arraycopy, 2921 entry_jint_arraycopy, 2922 entry_jlong_arraycopy); 2923 StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy", 2924 entry_jbyte_arraycopy, 2925 entry_jshort_arraycopy, 2926 entry_jint_arraycopy, 2927 entry_oop_arraycopy, 2928 entry_jlong_arraycopy, 2929 entry_checkcast_arraycopy); 2930 2931 StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill"); 2932 StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill"); 2933 StubRoutines::_jint_fill = generate_fill(T_INT, false, "jint_fill"); 2934 StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill"); 2935 StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill"); 2936 StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill"); 2937 2938 // We don't generate specialized code for HeapWord-aligned source 2939 // arrays, so just use the code we've already generated 2940 StubRoutines::_arrayof_jbyte_disjoint_arraycopy = StubRoutines::_jbyte_disjoint_arraycopy; 2941 StubRoutines::_arrayof_jbyte_arraycopy = StubRoutines::_jbyte_arraycopy; 2942 2943 StubRoutines::_arrayof_jshort_disjoint_arraycopy = StubRoutines::_jshort_disjoint_arraycopy; 2944 StubRoutines::_arrayof_jshort_arraycopy = StubRoutines::_jshort_arraycopy; 2945 2946 StubRoutines::_arrayof_jint_disjoint_arraycopy = StubRoutines::_jint_disjoint_arraycopy; 2947 StubRoutines::_arrayof_jint_arraycopy = StubRoutines::_jint_arraycopy; 2948 2949 StubRoutines::_arrayof_jlong_disjoint_arraycopy = StubRoutines::_jlong_disjoint_arraycopy; 2950 StubRoutines::_arrayof_jlong_arraycopy = StubRoutines::_jlong_arraycopy; 2951 2952 StubRoutines::_arrayof_oop_disjoint_arraycopy = StubRoutines::_oop_disjoint_arraycopy; 2953 StubRoutines::_arrayof_oop_arraycopy = StubRoutines::_oop_arraycopy; 2954 2955 StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = StubRoutines::_oop_disjoint_arraycopy_uninit; 2956 StubRoutines::_arrayof_oop_arraycopy_uninit = StubRoutines::_oop_arraycopy_uninit; 2957 } 2958 2959 // AES intrinsic stubs 2960 enum {AESBlockSize = 16}; 2961 2962 address generate_key_shuffle_mask() { 2963 __ align(16); 2964 StubCodeMark mark(this, "StubRoutines", "key_shuffle_mask"); 2965 address start = __ pc(); 2966 __ emit_data64( 0x0405060700010203, relocInfo::none ); 2967 __ emit_data64( 0x0c0d0e0f08090a0b, relocInfo::none ); 2968 return start; 2969 } 2970 2971 address generate_counter_shuffle_mask() { 2972 __ align(16); 2973 StubCodeMark mark(this, "StubRoutines", "counter_shuffle_mask"); 2974 address start = __ pc(); 2975 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none); 2976 __ emit_data64(0x0001020304050607, relocInfo::none); 2977 return start; 2978 } 2979 2980 // Utility routine for loading a 128-bit key word in little endian format 2981 // can optionally specify that the shuffle mask is already in an xmmregister 2982 void load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask=NULL) { 2983 __ movdqu(xmmdst, Address(key, offset)); 2984 if (xmm_shuf_mask != NULL) { 2985 __ pshufb(xmmdst, xmm_shuf_mask); 2986 } else { 2987 __ pshufb(xmmdst, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 2988 } 2989 } 2990 2991 // Utility routine for increase 128bit counter (iv in CTR mode) 2992 void inc_counter(Register reg, XMMRegister xmmdst, int inc_delta, Label& next_block) { 2993 __ pextrq(reg, xmmdst, 0x0); 2994 __ addq(reg, inc_delta); 2995 __ pinsrq(xmmdst, reg, 0x0); 2996 __ jcc(Assembler::carryClear, next_block); // jump if no carry 2997 __ pextrq(reg, xmmdst, 0x01); // Carry 2998 __ addq(reg, 0x01); 2999 __ pinsrq(xmmdst, reg, 0x01); //Carry end 3000 __ BIND(next_block); // next instruction 3001 } 3002 3003 // Arguments: 3004 // 3005 // Inputs: 3006 // c_rarg0 - source byte array address 3007 // c_rarg1 - destination byte array address 3008 // c_rarg2 - K (key) in little endian int array 3009 // 3010 address generate_aescrypt_encryptBlock() { 3011 assert(UseAES, "need AES instructions and misaligned SSE support"); 3012 __ align(CodeEntryAlignment); 3013 StubCodeMark mark(this, "StubRoutines", "aescrypt_encryptBlock"); 3014 Label L_doLast; 3015 address start = __ pc(); 3016 3017 const Register from = c_rarg0; // source array address 3018 const Register to = c_rarg1; // destination array address 3019 const Register key = c_rarg2; // key array address 3020 const Register keylen = rax; 3021 3022 const XMMRegister xmm_result = xmm0; 3023 const XMMRegister xmm_key_shuf_mask = xmm1; 3024 // On win64 xmm6-xmm15 must be preserved so don't use them. 3025 const XMMRegister xmm_temp1 = xmm2; 3026 const XMMRegister xmm_temp2 = xmm3; 3027 const XMMRegister xmm_temp3 = xmm4; 3028 const XMMRegister xmm_temp4 = xmm5; 3029 3030 __ enter(); // required for proper stackwalking of RuntimeStub frame 3031 3032 // keylen could be only {11, 13, 15} * 4 = {44, 52, 60} 3033 __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3034 3035 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3036 __ movdqu(xmm_result, Address(from, 0)); // get 16 bytes of input 3037 3038 // For encryption, the java expanded key ordering is just what we need 3039 // we don't know if the key is aligned, hence not using load-execute form 3040 3041 load_key(xmm_temp1, key, 0x00, xmm_key_shuf_mask); 3042 __ pxor(xmm_result, xmm_temp1); 3043 3044 load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask); 3045 load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask); 3046 load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask); 3047 load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask); 3048 3049 __ aesenc(xmm_result, xmm_temp1); 3050 __ aesenc(xmm_result, xmm_temp2); 3051 __ aesenc(xmm_result, xmm_temp3); 3052 __ aesenc(xmm_result, xmm_temp4); 3053 3054 load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask); 3055 load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask); 3056 load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask); 3057 load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask); 3058 3059 __ aesenc(xmm_result, xmm_temp1); 3060 __ aesenc(xmm_result, xmm_temp2); 3061 __ aesenc(xmm_result, xmm_temp3); 3062 __ aesenc(xmm_result, xmm_temp4); 3063 3064 load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask); 3065 load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask); 3066 3067 __ cmpl(keylen, 44); 3068 __ jccb(Assembler::equal, L_doLast); 3069 3070 __ aesenc(xmm_result, xmm_temp1); 3071 __ aesenc(xmm_result, xmm_temp2); 3072 3073 load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask); 3074 load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask); 3075 3076 __ cmpl(keylen, 52); 3077 __ jccb(Assembler::equal, L_doLast); 3078 3079 __ aesenc(xmm_result, xmm_temp1); 3080 __ aesenc(xmm_result, xmm_temp2); 3081 3082 load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask); 3083 load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask); 3084 3085 __ BIND(L_doLast); 3086 __ aesenc(xmm_result, xmm_temp1); 3087 __ aesenclast(xmm_result, xmm_temp2); 3088 __ movdqu(Address(to, 0), xmm_result); // store the result 3089 __ xorptr(rax, rax); // return 0 3090 __ leave(); // required for proper stackwalking of RuntimeStub frame 3091 __ ret(0); 3092 3093 return start; 3094 } 3095 3096 3097 // Arguments: 3098 // 3099 // Inputs: 3100 // c_rarg0 - source byte array address 3101 // c_rarg1 - destination byte array address 3102 // c_rarg2 - K (key) in little endian int array 3103 // 3104 address generate_aescrypt_decryptBlock() { 3105 assert(UseAES, "need AES instructions and misaligned SSE support"); 3106 __ align(CodeEntryAlignment); 3107 StubCodeMark mark(this, "StubRoutines", "aescrypt_decryptBlock"); 3108 Label L_doLast; 3109 address start = __ pc(); 3110 3111 const Register from = c_rarg0; // source array address 3112 const Register to = c_rarg1; // destination array address 3113 const Register key = c_rarg2; // key array address 3114 const Register keylen = rax; 3115 3116 const XMMRegister xmm_result = xmm0; 3117 const XMMRegister xmm_key_shuf_mask = xmm1; 3118 // On win64 xmm6-xmm15 must be preserved so don't use them. 3119 const XMMRegister xmm_temp1 = xmm2; 3120 const XMMRegister xmm_temp2 = xmm3; 3121 const XMMRegister xmm_temp3 = xmm4; 3122 const XMMRegister xmm_temp4 = xmm5; 3123 3124 __ enter(); // required for proper stackwalking of RuntimeStub frame 3125 3126 // keylen could be only {11, 13, 15} * 4 = {44, 52, 60} 3127 __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3128 3129 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3130 __ movdqu(xmm_result, Address(from, 0)); 3131 3132 // for decryption java expanded key ordering is rotated one position from what we want 3133 // so we start from 0x10 here and hit 0x00 last 3134 // we don't know if the key is aligned, hence not using load-execute form 3135 load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask); 3136 load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask); 3137 load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask); 3138 load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask); 3139 3140 __ pxor (xmm_result, xmm_temp1); 3141 __ aesdec(xmm_result, xmm_temp2); 3142 __ aesdec(xmm_result, xmm_temp3); 3143 __ aesdec(xmm_result, xmm_temp4); 3144 3145 load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask); 3146 load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask); 3147 load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask); 3148 load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask); 3149 3150 __ aesdec(xmm_result, xmm_temp1); 3151 __ aesdec(xmm_result, xmm_temp2); 3152 __ aesdec(xmm_result, xmm_temp3); 3153 __ aesdec(xmm_result, xmm_temp4); 3154 3155 load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask); 3156 load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask); 3157 load_key(xmm_temp3, key, 0x00, xmm_key_shuf_mask); 3158 3159 __ cmpl(keylen, 44); 3160 __ jccb(Assembler::equal, L_doLast); 3161 3162 __ aesdec(xmm_result, xmm_temp1); 3163 __ aesdec(xmm_result, xmm_temp2); 3164 3165 load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask); 3166 load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask); 3167 3168 __ cmpl(keylen, 52); 3169 __ jccb(Assembler::equal, L_doLast); 3170 3171 __ aesdec(xmm_result, xmm_temp1); 3172 __ aesdec(xmm_result, xmm_temp2); 3173 3174 load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask); 3175 load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask); 3176 3177 __ BIND(L_doLast); 3178 __ aesdec(xmm_result, xmm_temp1); 3179 __ aesdec(xmm_result, xmm_temp2); 3180 3181 // for decryption the aesdeclast operation is always on key+0x00 3182 __ aesdeclast(xmm_result, xmm_temp3); 3183 __ movdqu(Address(to, 0), xmm_result); // store the result 3184 __ xorptr(rax, rax); // return 0 3185 __ leave(); // required for proper stackwalking of RuntimeStub frame 3186 __ ret(0); 3187 3188 return start; 3189 } 3190 3191 3192 // Arguments: 3193 // 3194 // Inputs: 3195 // c_rarg0 - source byte array address 3196 // c_rarg1 - destination byte array address 3197 // c_rarg2 - K (key) in little endian int array 3198 // c_rarg3 - r vector byte array address 3199 // c_rarg4 - input length 3200 // 3201 // Output: 3202 // rax - input length 3203 // 3204 address generate_cipherBlockChaining_encryptAESCrypt() { 3205 assert(UseAES, "need AES instructions and misaligned SSE support"); 3206 __ align(CodeEntryAlignment); 3207 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_encryptAESCrypt"); 3208 address start = __ pc(); 3209 3210 Label L_exit, L_key_192_256, L_key_256, L_loopTop_128, L_loopTop_192, L_loopTop_256; 3211 const Register from = c_rarg0; // source array address 3212 const Register to = c_rarg1; // destination array address 3213 const Register key = c_rarg2; // key array address 3214 const Register rvec = c_rarg3; // r byte array initialized from initvector array address 3215 // and left with the results of the last encryption block 3216 #ifndef _WIN64 3217 const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16) 3218 #else 3219 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 3220 const Register len_reg = r11; // pick the volatile windows register 3221 #endif 3222 const Register pos = rax; 3223 3224 // xmm register assignments for the loops below 3225 const XMMRegister xmm_result = xmm0; 3226 const XMMRegister xmm_temp = xmm1; 3227 // keys 0-10 preloaded into xmm2-xmm12 3228 const int XMM_REG_NUM_KEY_FIRST = 2; 3229 const int XMM_REG_NUM_KEY_LAST = 15; 3230 const XMMRegister xmm_key0 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST); 3231 const XMMRegister xmm_key10 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+10); 3232 const XMMRegister xmm_key11 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+11); 3233 const XMMRegister xmm_key12 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+12); 3234 const XMMRegister xmm_key13 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+13); 3235 3236 __ enter(); // required for proper stackwalking of RuntimeStub frame 3237 3238 #ifdef _WIN64 3239 // on win64, fill len_reg from stack position 3240 __ movl(len_reg, len_mem); 3241 #else 3242 __ push(len_reg); // Save 3243 #endif 3244 3245 const XMMRegister xmm_key_shuf_mask = xmm_temp; // used temporarily to swap key bytes up front 3246 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3247 // load up xmm regs xmm2 thru xmm12 with key 0x00 - 0xa0 3248 for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x00; rnum <= XMM_REG_NUM_KEY_FIRST+10; rnum++) { 3249 load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask); 3250 offset += 0x10; 3251 } 3252 __ movdqu(xmm_result, Address(rvec, 0x00)); // initialize xmm_result with r vec 3253 3254 // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256)) 3255 __ movl(rax, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3256 __ cmpl(rax, 44); 3257 __ jcc(Assembler::notEqual, L_key_192_256); 3258 3259 // 128 bit code follows here 3260 __ movptr(pos, 0); 3261 __ align(OptoLoopAlignment); 3262 3263 __ BIND(L_loopTop_128); 3264 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3265 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3266 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3267 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 9; rnum++) { 3268 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3269 } 3270 __ aesenclast(xmm_result, xmm_key10); 3271 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3272 // no need to store r to memory until we exit 3273 __ addptr(pos, AESBlockSize); 3274 __ subptr(len_reg, AESBlockSize); 3275 __ jcc(Assembler::notEqual, L_loopTop_128); 3276 3277 __ BIND(L_exit); 3278 __ movdqu(Address(rvec, 0), xmm_result); // final value of r stored in rvec of CipherBlockChaining object 3279 3280 #ifdef _WIN64 3281 __ movl(rax, len_mem); 3282 #else 3283 __ pop(rax); // return length 3284 #endif 3285 __ leave(); // required for proper stackwalking of RuntimeStub frame 3286 __ ret(0); 3287 3288 __ BIND(L_key_192_256); 3289 // here rax = len in ints of AESCrypt.KLE array (52=192, or 60=256) 3290 load_key(xmm_key11, key, 0xb0, xmm_key_shuf_mask); 3291 load_key(xmm_key12, key, 0xc0, xmm_key_shuf_mask); 3292 __ cmpl(rax, 52); 3293 __ jcc(Assembler::notEqual, L_key_256); 3294 3295 // 192-bit code follows here (could be changed to use more xmm registers) 3296 __ movptr(pos, 0); 3297 __ align(OptoLoopAlignment); 3298 3299 __ BIND(L_loopTop_192); 3300 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3301 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3302 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3303 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 11; rnum++) { 3304 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3305 } 3306 __ aesenclast(xmm_result, xmm_key12); 3307 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3308 // no need to store r to memory until we exit 3309 __ addptr(pos, AESBlockSize); 3310 __ subptr(len_reg, AESBlockSize); 3311 __ jcc(Assembler::notEqual, L_loopTop_192); 3312 __ jmp(L_exit); 3313 3314 __ BIND(L_key_256); 3315 // 256-bit code follows here (could be changed to use more xmm registers) 3316 load_key(xmm_key13, key, 0xd0, xmm_key_shuf_mask); 3317 __ movptr(pos, 0); 3318 __ align(OptoLoopAlignment); 3319 3320 __ BIND(L_loopTop_256); 3321 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3322 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3323 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3324 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 13; rnum++) { 3325 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3326 } 3327 load_key(xmm_temp, key, 0xe0); 3328 __ aesenclast(xmm_result, xmm_temp); 3329 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3330 // no need to store r to memory until we exit 3331 __ addptr(pos, AESBlockSize); 3332 __ subptr(len_reg, AESBlockSize); 3333 __ jcc(Assembler::notEqual, L_loopTop_256); 3334 __ jmp(L_exit); 3335 3336 return start; 3337 } 3338 3339 // Safefetch stubs. 3340 void generate_safefetch(const char* name, int size, address* entry, 3341 address* fault_pc, address* continuation_pc) { 3342 // safefetch signatures: 3343 // int SafeFetch32(int* adr, int errValue); 3344 // intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue); 3345 // 3346 // arguments: 3347 // c_rarg0 = adr 3348 // c_rarg1 = errValue 3349 // 3350 // result: 3351 // PPC_RET = *adr or errValue 3352 3353 StubCodeMark mark(this, "StubRoutines", name); 3354 3355 // Entry point, pc or function descriptor. 3356 *entry = __ pc(); 3357 3358 // Load *adr into c_rarg1, may fault. 3359 *fault_pc = __ pc(); 3360 switch (size) { 3361 case 4: 3362 // int32_t 3363 __ movl(c_rarg1, Address(c_rarg0, 0)); 3364 break; 3365 case 8: 3366 // int64_t 3367 __ movq(c_rarg1, Address(c_rarg0, 0)); 3368 break; 3369 default: 3370 ShouldNotReachHere(); 3371 } 3372 3373 // return errValue or *adr 3374 *continuation_pc = __ pc(); 3375 __ movq(rax, c_rarg1); 3376 __ ret(0); 3377 } 3378 3379 // This is a version of CBC/AES Decrypt which does 4 blocks in a loop at a time 3380 // to hide instruction latency 3381 // 3382 // Arguments: 3383 // 3384 // Inputs: 3385 // c_rarg0 - source byte array address 3386 // c_rarg1 - destination byte array address 3387 // c_rarg2 - K (key) in little endian int array 3388 // c_rarg3 - r vector byte array address 3389 // c_rarg4 - input length 3390 // 3391 // Output: 3392 // rax - input length 3393 // 3394 address generate_cipherBlockChaining_decryptAESCrypt_Parallel() { 3395 assert(UseAES, "need AES instructions and misaligned SSE support"); 3396 __ align(CodeEntryAlignment); 3397 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt"); 3398 address start = __ pc(); 3399 3400 const Register from = c_rarg0; // source array address 3401 const Register to = c_rarg1; // destination array address 3402 const Register key = c_rarg2; // key array address 3403 const Register rvec = c_rarg3; // r byte array initialized from initvector array address 3404 // and left with the results of the last encryption block 3405 #ifndef _WIN64 3406 const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16) 3407 #else 3408 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 3409 const Register len_reg = r11; // pick the volatile windows register 3410 #endif 3411 const Register pos = rax; 3412 3413 const int PARALLEL_FACTOR = 4; 3414 const int ROUNDS[3] = { 10, 12, 14 }; // aes rounds for key128, key192, key256 3415 3416 Label L_exit; 3417 Label L_singleBlock_loopTopHead[3]; // 128, 192, 256 3418 Label L_singleBlock_loopTopHead2[3]; // 128, 192, 256 3419 Label L_singleBlock_loopTop[3]; // 128, 192, 256 3420 Label L_multiBlock_loopTopHead[3]; // 128, 192, 256 3421 Label L_multiBlock_loopTop[3]; // 128, 192, 256 3422 3423 // keys 0-10 preloaded into xmm5-xmm15 3424 const int XMM_REG_NUM_KEY_FIRST = 5; 3425 const int XMM_REG_NUM_KEY_LAST = 15; 3426 const XMMRegister xmm_key_first = as_XMMRegister(XMM_REG_NUM_KEY_FIRST); 3427 const XMMRegister xmm_key_last = as_XMMRegister(XMM_REG_NUM_KEY_LAST); 3428 3429 __ enter(); // required for proper stackwalking of RuntimeStub frame 3430 3431 #ifdef _WIN64 3432 // on win64, fill len_reg from stack position 3433 __ movl(len_reg, len_mem); 3434 #else 3435 __ push(len_reg); // Save 3436 #endif 3437 __ push(rbx); 3438 // the java expanded key ordering is rotated one position from what we want 3439 // so we start from 0x10 here and hit 0x00 last 3440 const XMMRegister xmm_key_shuf_mask = xmm1; // used temporarily to swap key bytes up front 3441 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3442 // load up xmm regs 5 thru 15 with key 0x10 - 0xa0 - 0x00 3443 for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x10; rnum < XMM_REG_NUM_KEY_LAST; rnum++) { 3444 load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask); 3445 offset += 0x10; 3446 } 3447 load_key(xmm_key_last, key, 0x00, xmm_key_shuf_mask); 3448 3449 const XMMRegister xmm_prev_block_cipher = xmm1; // holds cipher of previous block 3450 3451 // registers holding the four results in the parallelized loop 3452 const XMMRegister xmm_result0 = xmm0; 3453 const XMMRegister xmm_result1 = xmm2; 3454 const XMMRegister xmm_result2 = xmm3; 3455 const XMMRegister xmm_result3 = xmm4; 3456 3457 __ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // initialize with initial rvec 3458 3459 __ xorptr(pos, pos); 3460 3461 // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256)) 3462 __ movl(rbx, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3463 __ cmpl(rbx, 52); 3464 __ jcc(Assembler::equal, L_multiBlock_loopTopHead[1]); 3465 __ cmpl(rbx, 60); 3466 __ jcc(Assembler::equal, L_multiBlock_loopTopHead[2]); 3467 3468 #define DoFour(opc, src_reg) \ 3469 __ opc(xmm_result0, src_reg); \ 3470 __ opc(xmm_result1, src_reg); \ 3471 __ opc(xmm_result2, src_reg); \ 3472 __ opc(xmm_result3, src_reg); \ 3473 3474 for (int k = 0; k < 3; ++k) { 3475 __ BIND(L_multiBlock_loopTopHead[k]); 3476 if (k != 0) { 3477 __ cmpptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // see if at least 4 blocks left 3478 __ jcc(Assembler::less, L_singleBlock_loopTopHead2[k]); 3479 } 3480 if (k == 1) { 3481 __ subptr(rsp, 6 * wordSize); 3482 __ movdqu(Address(rsp, 0), xmm15); //save last_key from xmm15 3483 load_key(xmm15, key, 0xb0); // 0xb0; 192-bit key goes up to 0xc0 3484 __ movdqu(Address(rsp, 2 * wordSize), xmm15); 3485 load_key(xmm1, key, 0xc0); // 0xc0; 3486 __ movdqu(Address(rsp, 4 * wordSize), xmm1); 3487 } else if (k == 2) { 3488 __ subptr(rsp, 10 * wordSize); 3489 __ movdqu(Address(rsp, 0), xmm15); //save last_key from xmm15 3490 load_key(xmm15, key, 0xd0); // 0xd0; 256-bit key goes upto 0xe0 3491 __ movdqu(Address(rsp, 6 * wordSize), xmm15); 3492 load_key(xmm1, key, 0xe0); // 0xe0; 3493 __ movdqu(Address(rsp, 8 * wordSize), xmm1); 3494 load_key(xmm15, key, 0xb0); // 0xb0; 3495 __ movdqu(Address(rsp, 2 * wordSize), xmm15); 3496 load_key(xmm1, key, 0xc0); // 0xc0; 3497 __ movdqu(Address(rsp, 4 * wordSize), xmm1); 3498 } 3499 __ align(OptoLoopAlignment); 3500 __ BIND(L_multiBlock_loopTop[k]); 3501 __ cmpptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // see if at least 4 blocks left 3502 __ jcc(Assembler::less, L_singleBlock_loopTopHead[k]); 3503 3504 if (k != 0) { 3505 __ movdqu(xmm15, Address(rsp, 2 * wordSize)); 3506 __ movdqu(xmm1, Address(rsp, 4 * wordSize)); 3507 } 3508 3509 __ movdqu(xmm_result0, Address(from, pos, Address::times_1, 0 * AESBlockSize)); // get next 4 blocks into xmmresult registers 3510 __ movdqu(xmm_result1, Address(from, pos, Address::times_1, 1 * AESBlockSize)); 3511 __ movdqu(xmm_result2, Address(from, pos, Address::times_1, 2 * AESBlockSize)); 3512 __ movdqu(xmm_result3, Address(from, pos, Address::times_1, 3 * AESBlockSize)); 3513 3514 DoFour(pxor, xmm_key_first); 3515 if (k == 0) { 3516 for (int rnum = 1; rnum < ROUNDS[k]; rnum++) { 3517 DoFour(aesdec, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3518 } 3519 DoFour(aesdeclast, xmm_key_last); 3520 } else if (k == 1) { 3521 for (int rnum = 1; rnum <= ROUNDS[k]-2; rnum++) { 3522 DoFour(aesdec, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3523 } 3524 __ movdqu(xmm_key_last, Address(rsp, 0)); // xmm15 needs to be loaded again. 3525 DoFour(aesdec, xmm1); // key : 0xc0 3526 __ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // xmm1 needs to be loaded again 3527 DoFour(aesdeclast, xmm_key_last); 3528 } else if (k == 2) { 3529 for (int rnum = 1; rnum <= ROUNDS[k] - 4; rnum++) { 3530 DoFour(aesdec, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3531 } 3532 DoFour(aesdec, xmm1); // key : 0xc0 3533 __ movdqu(xmm15, Address(rsp, 6 * wordSize)); 3534 __ movdqu(xmm1, Address(rsp, 8 * wordSize)); 3535 DoFour(aesdec, xmm15); // key : 0xd0 3536 __ movdqu(xmm_key_last, Address(rsp, 0)); // xmm15 needs to be loaded again. 3537 DoFour(aesdec, xmm1); // key : 0xe0 3538 __ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // xmm1 needs to be loaded again 3539 DoFour(aesdeclast, xmm_key_last); 3540 } 3541 3542 // for each result, xor with the r vector of previous cipher block 3543 __ pxor(xmm_result0, xmm_prev_block_cipher); 3544 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 0 * AESBlockSize)); 3545 __ pxor(xmm_result1, xmm_prev_block_cipher); 3546 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 1 * AESBlockSize)); 3547 __ pxor(xmm_result2, xmm_prev_block_cipher); 3548 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 2 * AESBlockSize)); 3549 __ pxor(xmm_result3, xmm_prev_block_cipher); 3550 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 3 * AESBlockSize)); // this will carry over to next set of blocks 3551 if (k != 0) { 3552 __ movdqu(Address(rvec, 0x00), xmm_prev_block_cipher); 3553 } 3554 3555 __ movdqu(Address(to, pos, Address::times_1, 0 * AESBlockSize), xmm_result0); // store 4 results into the next 64 bytes of output 3556 __ movdqu(Address(to, pos, Address::times_1, 1 * AESBlockSize), xmm_result1); 3557 __ movdqu(Address(to, pos, Address::times_1, 2 * AESBlockSize), xmm_result2); 3558 __ movdqu(Address(to, pos, Address::times_1, 3 * AESBlockSize), xmm_result3); 3559 3560 __ addptr(pos, PARALLEL_FACTOR * AESBlockSize); 3561 __ subptr(len_reg, PARALLEL_FACTOR * AESBlockSize); 3562 __ jmp(L_multiBlock_loopTop[k]); 3563 3564 // registers used in the non-parallelized loops 3565 // xmm register assignments for the loops below 3566 const XMMRegister xmm_result = xmm0; 3567 const XMMRegister xmm_prev_block_cipher_save = xmm2; 3568 const XMMRegister xmm_key11 = xmm3; 3569 const XMMRegister xmm_key12 = xmm4; 3570 const XMMRegister key_tmp = xmm4; 3571 3572 __ BIND(L_singleBlock_loopTopHead[k]); 3573 if (k == 1) { 3574 __ addptr(rsp, 6 * wordSize); 3575 } else if (k == 2) { 3576 __ addptr(rsp, 10 * wordSize); 3577 } 3578 __ cmpptr(len_reg, 0); // any blocks left?? 3579 __ jcc(Assembler::equal, L_exit); 3580 __ BIND(L_singleBlock_loopTopHead2[k]); 3581 if (k == 1) { 3582 load_key(xmm_key11, key, 0xb0); // 0xb0; 192-bit key goes upto 0xc0 3583 load_key(xmm_key12, key, 0xc0); // 0xc0; 192-bit key goes upto 0xc0 3584 } 3585 if (k == 2) { 3586 load_key(xmm_key11, key, 0xb0); // 0xb0; 256-bit key goes upto 0xe0 3587 } 3588 __ align(OptoLoopAlignment); 3589 __ BIND(L_singleBlock_loopTop[k]); 3590 __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of cipher input 3591 __ movdqa(xmm_prev_block_cipher_save, xmm_result); // save for next r vector 3592 __ pxor(xmm_result, xmm_key_first); // do the aes dec rounds 3593 for (int rnum = 1; rnum <= 9 ; rnum++) { 3594 __ aesdec(xmm_result, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3595 } 3596 if (k == 1) { 3597 __ aesdec(xmm_result, xmm_key11); 3598 __ aesdec(xmm_result, xmm_key12); 3599 } 3600 if (k == 2) { 3601 __ aesdec(xmm_result, xmm_key11); 3602 load_key(key_tmp, key, 0xc0); 3603 __ aesdec(xmm_result, key_tmp); 3604 load_key(key_tmp, key, 0xd0); 3605 __ aesdec(xmm_result, key_tmp); 3606 load_key(key_tmp, key, 0xe0); 3607 __ aesdec(xmm_result, key_tmp); 3608 } 3609 3610 __ aesdeclast(xmm_result, xmm_key_last); // xmm15 always came from key+0 3611 __ pxor(xmm_result, xmm_prev_block_cipher); // xor with the current r vector 3612 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3613 // no need to store r to memory until we exit 3614 __ movdqa(xmm_prev_block_cipher, xmm_prev_block_cipher_save); // set up next r vector with cipher input from this block 3615 __ addptr(pos, AESBlockSize); 3616 __ subptr(len_reg, AESBlockSize); 3617 __ jcc(Assembler::notEqual, L_singleBlock_loopTop[k]); 3618 if (k != 2) { 3619 __ jmp(L_exit); 3620 } 3621 } //for 128/192/256 3622 3623 __ BIND(L_exit); 3624 __ movdqu(Address(rvec, 0), xmm_prev_block_cipher); // final value of r stored in rvec of CipherBlockChaining object 3625 __ pop(rbx); 3626 #ifdef _WIN64 3627 __ movl(rax, len_mem); 3628 #else 3629 __ pop(rax); // return length 3630 #endif 3631 __ leave(); // required for proper stackwalking of RuntimeStub frame 3632 __ ret(0); 3633 return start; 3634 } 3635 3636 address generate_upper_word_mask() { 3637 __ align(64); 3638 StubCodeMark mark(this, "StubRoutines", "upper_word_mask"); 3639 address start = __ pc(); 3640 __ emit_data64(0x0000000000000000, relocInfo::none); 3641 __ emit_data64(0xFFFFFFFF00000000, relocInfo::none); 3642 return start; 3643 } 3644 3645 address generate_shuffle_byte_flip_mask() { 3646 __ align(64); 3647 StubCodeMark mark(this, "StubRoutines", "shuffle_byte_flip_mask"); 3648 address start = __ pc(); 3649 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none); 3650 __ emit_data64(0x0001020304050607, relocInfo::none); 3651 return start; 3652 } 3653 3654 // ofs and limit are use for multi-block byte array. 3655 // int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit) 3656 address generate_sha1_implCompress(bool multi_block, const char *name) { 3657 __ align(CodeEntryAlignment); 3658 StubCodeMark mark(this, "StubRoutines", name); 3659 address start = __ pc(); 3660 3661 Register buf = c_rarg0; 3662 Register state = c_rarg1; 3663 Register ofs = c_rarg2; 3664 Register limit = c_rarg3; 3665 3666 const XMMRegister abcd = xmm0; 3667 const XMMRegister e0 = xmm1; 3668 const XMMRegister e1 = xmm2; 3669 const XMMRegister msg0 = xmm3; 3670 3671 const XMMRegister msg1 = xmm4; 3672 const XMMRegister msg2 = xmm5; 3673 const XMMRegister msg3 = xmm6; 3674 const XMMRegister shuf_mask = xmm7; 3675 3676 __ enter(); 3677 3678 __ subptr(rsp, 4 * wordSize); 3679 3680 __ fast_sha1(abcd, e0, e1, msg0, msg1, msg2, msg3, shuf_mask, 3681 buf, state, ofs, limit, rsp, multi_block); 3682 3683 __ addptr(rsp, 4 * wordSize); 3684 3685 __ leave(); 3686 __ ret(0); 3687 return start; 3688 } 3689 3690 address generate_pshuffle_byte_flip_mask() { 3691 __ align(64); 3692 StubCodeMark mark(this, "StubRoutines", "pshuffle_byte_flip_mask"); 3693 address start = __ pc(); 3694 __ emit_data64(0x0405060700010203, relocInfo::none); 3695 __ emit_data64(0x0c0d0e0f08090a0b, relocInfo::none); 3696 3697 if (VM_Version::supports_avx2()) { 3698 __ emit_data64(0x0405060700010203, relocInfo::none); // second copy 3699 __ emit_data64(0x0c0d0e0f08090a0b, relocInfo::none); 3700 // _SHUF_00BA 3701 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3702 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3703 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3704 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3705 // _SHUF_DC00 3706 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3707 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3708 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3709 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3710 } 3711 3712 return start; 3713 } 3714 3715 //Mask for byte-swapping a couple of qwords in an XMM register using (v)pshufb. 3716 address generate_pshuffle_byte_flip_mask_sha512() { 3717 __ align(32); 3718 StubCodeMark mark(this, "StubRoutines", "pshuffle_byte_flip_mask_sha512"); 3719 address start = __ pc(); 3720 if (VM_Version::supports_avx2()) { 3721 __ emit_data64(0x0001020304050607, relocInfo::none); // PSHUFFLE_BYTE_FLIP_MASK 3722 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none); 3723 __ emit_data64(0x1011121314151617, relocInfo::none); 3724 __ emit_data64(0x18191a1b1c1d1e1f, relocInfo::none); 3725 __ emit_data64(0x0000000000000000, relocInfo::none); //MASK_YMM_LO 3726 __ emit_data64(0x0000000000000000, relocInfo::none); 3727 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3728 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3729 } 3730 3731 return start; 3732 } 3733 3734 // ofs and limit are use for multi-block byte array. 3735 // int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit) 3736 address generate_sha256_implCompress(bool multi_block, const char *name) { 3737 assert(VM_Version::supports_sha() || VM_Version::supports_avx2(), ""); 3738 __ align(CodeEntryAlignment); 3739 StubCodeMark mark(this, "StubRoutines", name); 3740 address start = __ pc(); 3741 3742 Register buf = c_rarg0; 3743 Register state = c_rarg1; 3744 Register ofs = c_rarg2; 3745 Register limit = c_rarg3; 3746 3747 const XMMRegister msg = xmm0; 3748 const XMMRegister state0 = xmm1; 3749 const XMMRegister state1 = xmm2; 3750 const XMMRegister msgtmp0 = xmm3; 3751 3752 const XMMRegister msgtmp1 = xmm4; 3753 const XMMRegister msgtmp2 = xmm5; 3754 const XMMRegister msgtmp3 = xmm6; 3755 const XMMRegister msgtmp4 = xmm7; 3756 3757 const XMMRegister shuf_mask = xmm8; 3758 3759 __ enter(); 3760 3761 __ subptr(rsp, 4 * wordSize); 3762 3763 if (VM_Version::supports_sha()) { 3764 __ fast_sha256(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4, 3765 buf, state, ofs, limit, rsp, multi_block, shuf_mask); 3766 } else if (VM_Version::supports_avx2()) { 3767 __ sha256_AVX2(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4, 3768 buf, state, ofs, limit, rsp, multi_block, shuf_mask); 3769 } 3770 __ addptr(rsp, 4 * wordSize); 3771 __ vzeroupper(); 3772 __ leave(); 3773 __ ret(0); 3774 return start; 3775 } 3776 3777 address generate_sha512_implCompress(bool multi_block, const char *name) { 3778 assert(VM_Version::supports_avx2(), ""); 3779 assert(VM_Version::supports_bmi2(), ""); 3780 __ align(CodeEntryAlignment); 3781 StubCodeMark mark(this, "StubRoutines", name); 3782 address start = __ pc(); 3783 3784 Register buf = c_rarg0; 3785 Register state = c_rarg1; 3786 Register ofs = c_rarg2; 3787 Register limit = c_rarg3; 3788 3789 const XMMRegister msg = xmm0; 3790 const XMMRegister state0 = xmm1; 3791 const XMMRegister state1 = xmm2; 3792 const XMMRegister msgtmp0 = xmm3; 3793 const XMMRegister msgtmp1 = xmm4; 3794 const XMMRegister msgtmp2 = xmm5; 3795 const XMMRegister msgtmp3 = xmm6; 3796 const XMMRegister msgtmp4 = xmm7; 3797 3798 const XMMRegister shuf_mask = xmm8; 3799 3800 __ enter(); 3801 3802 __ sha512_AVX2(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4, 3803 buf, state, ofs, limit, rsp, multi_block, shuf_mask); 3804 3805 __ vzeroupper(); 3806 __ leave(); 3807 __ ret(0); 3808 return start; 3809 } 3810 3811 // This is a version of CTR/AES crypt which does 6 blocks in a loop at a time 3812 // to hide instruction latency 3813 // 3814 // Arguments: 3815 // 3816 // Inputs: 3817 // c_rarg0 - source byte array address 3818 // c_rarg1 - destination byte array address 3819 // c_rarg2 - K (key) in little endian int array 3820 // c_rarg3 - counter vector byte array address 3821 // Linux 3822 // c_rarg4 - input length 3823 // c_rarg5 - saved encryptedCounter start 3824 // rbp + 6 * wordSize - saved used length 3825 // Windows 3826 // rbp + 6 * wordSize - input length 3827 // rbp + 7 * wordSize - saved encryptedCounter start 3828 // rbp + 8 * wordSize - saved used length 3829 // 3830 // Output: 3831 // rax - input length 3832 // 3833 address generate_counterMode_AESCrypt_Parallel() { 3834 assert(UseAES, "need AES instructions and misaligned SSE support"); 3835 __ align(CodeEntryAlignment); 3836 StubCodeMark mark(this, "StubRoutines", "counterMode_AESCrypt"); 3837 address start = __ pc(); 3838 const Register from = c_rarg0; // source array address 3839 const Register to = c_rarg1; // destination array address 3840 const Register key = c_rarg2; // key array address 3841 const Register counter = c_rarg3; // counter byte array initialized from counter array address 3842 // and updated with the incremented counter in the end 3843 #ifndef _WIN64 3844 const Register len_reg = c_rarg4; 3845 const Register saved_encCounter_start = c_rarg5; 3846 const Register used_addr = r10; 3847 const Address used_mem(rbp, 2 * wordSize); 3848 const Register used = r11; 3849 #else 3850 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 3851 const Address saved_encCounter_mem(rbp, 7 * wordSize); // length is on stack on Win64 3852 const Address used_mem(rbp, 8 * wordSize); // length is on stack on Win64 3853 const Register len_reg = r10; // pick the first volatile windows register 3854 const Register saved_encCounter_start = r11; 3855 const Register used_addr = r13; 3856 const Register used = r14; 3857 #endif 3858 const Register pos = rax; 3859 3860 const int PARALLEL_FACTOR = 6; 3861 const XMMRegister xmm_counter_shuf_mask = xmm0; 3862 const XMMRegister xmm_key_shuf_mask = xmm1; // used temporarily to swap key bytes up front 3863 const XMMRegister xmm_curr_counter = xmm2; 3864 3865 const XMMRegister xmm_key_tmp0 = xmm3; 3866 const XMMRegister xmm_key_tmp1 = xmm4; 3867 3868 // registers holding the four results in the parallelized loop 3869 const XMMRegister xmm_result0 = xmm5; 3870 const XMMRegister xmm_result1 = xmm6; 3871 const XMMRegister xmm_result2 = xmm7; 3872 const XMMRegister xmm_result3 = xmm8; 3873 const XMMRegister xmm_result4 = xmm9; 3874 const XMMRegister xmm_result5 = xmm10; 3875 3876 const XMMRegister xmm_from0 = xmm11; 3877 const XMMRegister xmm_from1 = xmm12; 3878 const XMMRegister xmm_from2 = xmm13; 3879 const XMMRegister xmm_from3 = xmm14; //the last one is xmm14. we have to preserve it on WIN64. 3880 const XMMRegister xmm_from4 = xmm3; //reuse xmm3~4. Because xmm_key_tmp0~1 are useless when loading input text 3881 const XMMRegister xmm_from5 = xmm4; 3882 3883 //for key_128, key_192, key_256 3884 const int rounds[3] = {10, 12, 14}; 3885 Label L_exit_preLoop, L_preLoop_start; 3886 Label L_multiBlock_loopTop[3]; 3887 Label L_singleBlockLoopTop[3]; 3888 Label L__incCounter[3][6]; //for 6 blocks 3889 Label L__incCounter_single[3]; //for single block, key128, key192, key256 3890 Label L_processTail_insr[3], L_processTail_4_insr[3], L_processTail_2_insr[3], L_processTail_1_insr[3], L_processTail_exit_insr[3]; 3891 Label L_processTail_4_extr[3], L_processTail_2_extr[3], L_processTail_1_extr[3], L_processTail_exit_extr[3]; 3892 3893 Label L_exit; 3894 3895 __ enter(); // required for proper stackwalking of RuntimeStub frame 3896 3897 #ifdef _WIN64 3898 // allocate spill slots for r13, r14 3899 enum { 3900 saved_r13_offset, 3901 saved_r14_offset 3902 }; 3903 __ subptr(rsp, 2 * wordSize); 3904 __ movptr(Address(rsp, saved_r13_offset * wordSize), r13); 3905 __ movptr(Address(rsp, saved_r14_offset * wordSize), r14); 3906 3907 // on win64, fill len_reg from stack position 3908 __ movl(len_reg, len_mem); 3909 __ movptr(saved_encCounter_start, saved_encCounter_mem); 3910 __ movptr(used_addr, used_mem); 3911 __ movl(used, Address(used_addr, 0)); 3912 #else 3913 __ push(len_reg); // Save 3914 __ movptr(used_addr, used_mem); 3915 __ movl(used, Address(used_addr, 0)); 3916 #endif 3917 3918 __ push(rbx); // Save RBX 3919 __ movdqu(xmm_curr_counter, Address(counter, 0x00)); // initialize counter with initial counter 3920 __ movdqu(xmm_counter_shuf_mask, ExternalAddress(StubRoutines::x86::counter_shuffle_mask_addr()), pos); // pos as scratch 3921 __ pshufb(xmm_curr_counter, xmm_counter_shuf_mask); //counter is shuffled 3922 __ movptr(pos, 0); 3923 3924 // Use the partially used encrpyted counter from last invocation 3925 __ BIND(L_preLoop_start); 3926 __ cmpptr(used, 16); 3927 __ jcc(Assembler::aboveEqual, L_exit_preLoop); 3928 __ cmpptr(len_reg, 0); 3929 __ jcc(Assembler::lessEqual, L_exit_preLoop); 3930 __ movb(rbx, Address(saved_encCounter_start, used)); 3931 __ xorb(rbx, Address(from, pos)); 3932 __ movb(Address(to, pos), rbx); 3933 __ addptr(pos, 1); 3934 __ addptr(used, 1); 3935 __ subptr(len_reg, 1); 3936 3937 __ jmp(L_preLoop_start); 3938 3939 __ BIND(L_exit_preLoop); 3940 __ movl(Address(used_addr, 0), used); 3941 3942 // key length could be only {11, 13, 15} * 4 = {44, 52, 60} 3943 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()), rbx); // rbx as scratch 3944 __ movl(rbx, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3945 __ cmpl(rbx, 52); 3946 __ jcc(Assembler::equal, L_multiBlock_loopTop[1]); 3947 __ cmpl(rbx, 60); 3948 __ jcc(Assembler::equal, L_multiBlock_loopTop[2]); 3949 3950 #define CTR_DoSix(opc, src_reg) \ 3951 __ opc(xmm_result0, src_reg); \ 3952 __ opc(xmm_result1, src_reg); \ 3953 __ opc(xmm_result2, src_reg); \ 3954 __ opc(xmm_result3, src_reg); \ 3955 __ opc(xmm_result4, src_reg); \ 3956 __ opc(xmm_result5, src_reg); 3957 3958 // k == 0 : generate code for key_128 3959 // k == 1 : generate code for key_192 3960 // k == 2 : generate code for key_256 3961 for (int k = 0; k < 3; ++k) { 3962 //multi blocks starts here 3963 __ align(OptoLoopAlignment); 3964 __ BIND(L_multiBlock_loopTop[k]); 3965 __ cmpptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // see if at least PARALLEL_FACTOR blocks left 3966 __ jcc(Assembler::less, L_singleBlockLoopTop[k]); 3967 load_key(xmm_key_tmp0, key, 0x00, xmm_key_shuf_mask); 3968 3969 //load, then increase counters 3970 CTR_DoSix(movdqa, xmm_curr_counter); 3971 inc_counter(rbx, xmm_result1, 0x01, L__incCounter[k][0]); 3972 inc_counter(rbx, xmm_result2, 0x02, L__incCounter[k][1]); 3973 inc_counter(rbx, xmm_result3, 0x03, L__incCounter[k][2]); 3974 inc_counter(rbx, xmm_result4, 0x04, L__incCounter[k][3]); 3975 inc_counter(rbx, xmm_result5, 0x05, L__incCounter[k][4]); 3976 inc_counter(rbx, xmm_curr_counter, 0x06, L__incCounter[k][5]); 3977 CTR_DoSix(pshufb, xmm_counter_shuf_mask); // after increased, shuffled counters back for PXOR 3978 CTR_DoSix(pxor, xmm_key_tmp0); //PXOR with Round 0 key 3979 3980 //load two ROUND_KEYs at a time 3981 for (int i = 1; i < rounds[k]; ) { 3982 load_key(xmm_key_tmp1, key, (0x10 * i), xmm_key_shuf_mask); 3983 load_key(xmm_key_tmp0, key, (0x10 * (i+1)), xmm_key_shuf_mask); 3984 CTR_DoSix(aesenc, xmm_key_tmp1); 3985 i++; 3986 if (i != rounds[k]) { 3987 CTR_DoSix(aesenc, xmm_key_tmp0); 3988 } else { 3989 CTR_DoSix(aesenclast, xmm_key_tmp0); 3990 } 3991 i++; 3992 } 3993 3994 // get next PARALLEL_FACTOR blocks into xmm_result registers 3995 __ movdqu(xmm_from0, Address(from, pos, Address::times_1, 0 * AESBlockSize)); 3996 __ movdqu(xmm_from1, Address(from, pos, Address::times_1, 1 * AESBlockSize)); 3997 __ movdqu(xmm_from2, Address(from, pos, Address::times_1, 2 * AESBlockSize)); 3998 __ movdqu(xmm_from3, Address(from, pos, Address::times_1, 3 * AESBlockSize)); 3999 __ movdqu(xmm_from4, Address(from, pos, Address::times_1, 4 * AESBlockSize)); 4000 __ movdqu(xmm_from5, Address(from, pos, Address::times_1, 5 * AESBlockSize)); 4001 4002 __ pxor(xmm_result0, xmm_from0); 4003 __ pxor(xmm_result1, xmm_from1); 4004 __ pxor(xmm_result2, xmm_from2); 4005 __ pxor(xmm_result3, xmm_from3); 4006 __ pxor(xmm_result4, xmm_from4); 4007 __ pxor(xmm_result5, xmm_from5); 4008 4009 // store 6 results into the next 64 bytes of output 4010 __ movdqu(Address(to, pos, Address::times_1, 0 * AESBlockSize), xmm_result0); 4011 __ movdqu(Address(to, pos, Address::times_1, 1 * AESBlockSize), xmm_result1); 4012 __ movdqu(Address(to, pos, Address::times_1, 2 * AESBlockSize), xmm_result2); 4013 __ movdqu(Address(to, pos, Address::times_1, 3 * AESBlockSize), xmm_result3); 4014 __ movdqu(Address(to, pos, Address::times_1, 4 * AESBlockSize), xmm_result4); 4015 __ movdqu(Address(to, pos, Address::times_1, 5 * AESBlockSize), xmm_result5); 4016 4017 __ addptr(pos, PARALLEL_FACTOR * AESBlockSize); // increase the length of crypt text 4018 __ subptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // decrease the remaining length 4019 __ jmp(L_multiBlock_loopTop[k]); 4020 4021 // singleBlock starts here 4022 __ align(OptoLoopAlignment); 4023 __ BIND(L_singleBlockLoopTop[k]); 4024 __ cmpptr(len_reg, 0); 4025 __ jcc(Assembler::lessEqual, L_exit); 4026 load_key(xmm_key_tmp0, key, 0x00, xmm_key_shuf_mask); 4027 __ movdqa(xmm_result0, xmm_curr_counter); 4028 inc_counter(rbx, xmm_curr_counter, 0x01, L__incCounter_single[k]); 4029 __ pshufb(xmm_result0, xmm_counter_shuf_mask); 4030 __ pxor(xmm_result0, xmm_key_tmp0); 4031 for (int i = 1; i < rounds[k]; i++) { 4032 load_key(xmm_key_tmp0, key, (0x10 * i), xmm_key_shuf_mask); 4033 __ aesenc(xmm_result0, xmm_key_tmp0); 4034 } 4035 load_key(xmm_key_tmp0, key, (rounds[k] * 0x10), xmm_key_shuf_mask); 4036 __ aesenclast(xmm_result0, xmm_key_tmp0); 4037 __ cmpptr(len_reg, AESBlockSize); 4038 __ jcc(Assembler::less, L_processTail_insr[k]); 4039 __ movdqu(xmm_from0, Address(from, pos, Address::times_1, 0 * AESBlockSize)); 4040 __ pxor(xmm_result0, xmm_from0); 4041 __ movdqu(Address(to, pos, Address::times_1, 0 * AESBlockSize), xmm_result0); 4042 __ addptr(pos, AESBlockSize); 4043 __ subptr(len_reg, AESBlockSize); 4044 __ jmp(L_singleBlockLoopTop[k]); 4045 __ BIND(L_processTail_insr[k]); // Process the tail part of the input array 4046 __ addptr(pos, len_reg); // 1. Insert bytes from src array into xmm_from0 register 4047 __ testptr(len_reg, 8); 4048 __ jcc(Assembler::zero, L_processTail_4_insr[k]); 4049 __ subptr(pos,8); 4050 __ pinsrq(xmm_from0, Address(from, pos), 0); 4051 __ BIND(L_processTail_4_insr[k]); 4052 __ testptr(len_reg, 4); 4053 __ jcc(Assembler::zero, L_processTail_2_insr[k]); 4054 __ subptr(pos,4); 4055 __ pslldq(xmm_from0, 4); 4056 __ pinsrd(xmm_from0, Address(from, pos), 0); 4057 __ BIND(L_processTail_2_insr[k]); 4058 __ testptr(len_reg, 2); 4059 __ jcc(Assembler::zero, L_processTail_1_insr[k]); 4060 __ subptr(pos, 2); 4061 __ pslldq(xmm_from0, 2); 4062 __ pinsrw(xmm_from0, Address(from, pos), 0); 4063 __ BIND(L_processTail_1_insr[k]); 4064 __ testptr(len_reg, 1); 4065 __ jcc(Assembler::zero, L_processTail_exit_insr[k]); 4066 __ subptr(pos, 1); 4067 __ pslldq(xmm_from0, 1); 4068 __ pinsrb(xmm_from0, Address(from, pos), 0); 4069 __ BIND(L_processTail_exit_insr[k]); 4070 4071 __ movdqu(Address(saved_encCounter_start, 0), xmm_result0); // 2. Perform pxor of the encrypted counter and plaintext Bytes. 4072 __ pxor(xmm_result0, xmm_from0); // Also the encrypted counter is saved for next invocation. 4073 4074 __ testptr(len_reg, 8); 4075 __ jcc(Assembler::zero, L_processTail_4_extr[k]); // 3. Extract bytes from xmm_result0 into the dest. array 4076 __ pextrq(Address(to, pos), xmm_result0, 0); 4077 __ psrldq(xmm_result0, 8); 4078 __ addptr(pos, 8); 4079 __ BIND(L_processTail_4_extr[k]); 4080 __ testptr(len_reg, 4); 4081 __ jcc(Assembler::zero, L_processTail_2_extr[k]); 4082 __ pextrd(Address(to, pos), xmm_result0, 0); 4083 __ psrldq(xmm_result0, 4); 4084 __ addptr(pos, 4); 4085 __ BIND(L_processTail_2_extr[k]); 4086 __ testptr(len_reg, 2); 4087 __ jcc(Assembler::zero, L_processTail_1_extr[k]); 4088 __ pextrw(Address(to, pos), xmm_result0, 0); 4089 __ psrldq(xmm_result0, 2); 4090 __ addptr(pos, 2); 4091 __ BIND(L_processTail_1_extr[k]); 4092 __ testptr(len_reg, 1); 4093 __ jcc(Assembler::zero, L_processTail_exit_extr[k]); 4094 __ pextrb(Address(to, pos), xmm_result0, 0); 4095 4096 __ BIND(L_processTail_exit_extr[k]); 4097 __ movl(Address(used_addr, 0), len_reg); 4098 __ jmp(L_exit); 4099 4100 } 4101 4102 __ BIND(L_exit); 4103 __ pshufb(xmm_curr_counter, xmm_counter_shuf_mask); //counter is shuffled back. 4104 __ movdqu(Address(counter, 0), xmm_curr_counter); //save counter back 4105 __ pop(rbx); // pop the saved RBX. 4106 #ifdef _WIN64 4107 __ movl(rax, len_mem); 4108 __ movptr(r13, Address(rsp, saved_r13_offset * wordSize)); 4109 __ movptr(r14, Address(rsp, saved_r14_offset * wordSize)); 4110 __ addptr(rsp, 2 * wordSize); 4111 #else 4112 __ pop(rax); // return 'len' 4113 #endif 4114 __ leave(); // required for proper stackwalking of RuntimeStub frame 4115 __ ret(0); 4116 return start; 4117 } 4118 4119 void roundDec(XMMRegister xmm_reg) { 4120 __ vaesdec(xmm1, xmm1, xmm_reg, Assembler::AVX_512bit); 4121 __ vaesdec(xmm2, xmm2, xmm_reg, Assembler::AVX_512bit); 4122 __ vaesdec(xmm3, xmm3, xmm_reg, Assembler::AVX_512bit); 4123 __ vaesdec(xmm4, xmm4, xmm_reg, Assembler::AVX_512bit); 4124 __ vaesdec(xmm5, xmm5, xmm_reg, Assembler::AVX_512bit); 4125 __ vaesdec(xmm6, xmm6, xmm_reg, Assembler::AVX_512bit); 4126 __ vaesdec(xmm7, xmm7, xmm_reg, Assembler::AVX_512bit); 4127 __ vaesdec(xmm8, xmm8, xmm_reg, Assembler::AVX_512bit); 4128 } 4129 4130 void roundDeclast(XMMRegister xmm_reg) { 4131 __ vaesdeclast(xmm1, xmm1, xmm_reg, Assembler::AVX_512bit); 4132 __ vaesdeclast(xmm2, xmm2, xmm_reg, Assembler::AVX_512bit); 4133 __ vaesdeclast(xmm3, xmm3, xmm_reg, Assembler::AVX_512bit); 4134 __ vaesdeclast(xmm4, xmm4, xmm_reg, Assembler::AVX_512bit); 4135 __ vaesdeclast(xmm5, xmm5, xmm_reg, Assembler::AVX_512bit); 4136 __ vaesdeclast(xmm6, xmm6, xmm_reg, Assembler::AVX_512bit); 4137 __ vaesdeclast(xmm7, xmm7, xmm_reg, Assembler::AVX_512bit); 4138 __ vaesdeclast(xmm8, xmm8, xmm_reg, Assembler::AVX_512bit); 4139 } 4140 4141 void ev_load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask = NULL) { 4142 __ movdqu(xmmdst, Address(key, offset)); 4143 if (xmm_shuf_mask != NULL) { 4144 __ pshufb(xmmdst, xmm_shuf_mask); 4145 } else { 4146 __ pshufb(xmmdst, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 4147 } 4148 __ evshufi64x2(xmmdst, xmmdst, xmmdst, 0x0, Assembler::AVX_512bit); 4149 4150 } 4151 4152 address generate_cipherBlockChaining_decryptVectorAESCrypt() { 4153 assert(VM_Version::supports_vaes(), "need AES instructions and misaligned SSE support"); 4154 __ align(CodeEntryAlignment); 4155 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt"); 4156 address start = __ pc(); 4157 4158 const Register from = c_rarg0; // source array address 4159 const Register to = c_rarg1; // destination array address 4160 const Register key = c_rarg2; // key array address 4161 const Register rvec = c_rarg3; // r byte array initialized from initvector array address 4162 // and left with the results of the last encryption block 4163 #ifndef _WIN64 4164 const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16) 4165 #else 4166 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 4167 const Register len_reg = r11; // pick the volatile windows register 4168 #endif 4169 4170 Label Loop, Loop1, L_128, L_256, L_192, KEY_192, KEY_256, Loop2, Lcbc_dec_rem_loop, 4171 Lcbc_dec_rem_last, Lcbc_dec_ret, Lcbc_dec_rem, Lcbc_exit; 4172 4173 __ enter(); 4174 4175 #ifdef _WIN64 4176 // on win64, fill len_reg from stack position 4177 __ movl(len_reg, len_mem); 4178 #else 4179 __ push(len_reg); // Save 4180 #endif 4181 __ push(rbx); 4182 __ vzeroupper(); 4183 4184 // Temporary variable declaration for swapping key bytes 4185 const XMMRegister xmm_key_shuf_mask = xmm1; 4186 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 4187 4188 // Calculate number of rounds from key size: 44 for 10-rounds, 52 for 12-rounds, 60 for 14-rounds 4189 const Register rounds = rbx; 4190 __ movl(rounds, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 4191 4192 const XMMRegister IV = xmm0; 4193 // Load IV and broadcast value to 512-bits 4194 __ evbroadcasti64x2(IV, Address(rvec, 0), Assembler::AVX_512bit); 4195 4196 // Temporary variables for storing round keys 4197 const XMMRegister RK0 = xmm30; 4198 const XMMRegister RK1 = xmm9; 4199 const XMMRegister RK2 = xmm18; 4200 const XMMRegister RK3 = xmm19; 4201 const XMMRegister RK4 = xmm20; 4202 const XMMRegister RK5 = xmm21; 4203 const XMMRegister RK6 = xmm22; 4204 const XMMRegister RK7 = xmm23; 4205 const XMMRegister RK8 = xmm24; 4206 const XMMRegister RK9 = xmm25; 4207 const XMMRegister RK10 = xmm26; 4208 4209 // Load and shuffle key 4210 // the java expanded key ordering is rotated one position from what we want 4211 // so we start from 1*16 here and hit 0*16 last 4212 ev_load_key(RK1, key, 1 * 16, xmm_key_shuf_mask); 4213 ev_load_key(RK2, key, 2 * 16, xmm_key_shuf_mask); 4214 ev_load_key(RK3, key, 3 * 16, xmm_key_shuf_mask); 4215 ev_load_key(RK4, key, 4 * 16, xmm_key_shuf_mask); 4216 ev_load_key(RK5, key, 5 * 16, xmm_key_shuf_mask); 4217 ev_load_key(RK6, key, 6 * 16, xmm_key_shuf_mask); 4218 ev_load_key(RK7, key, 7 * 16, xmm_key_shuf_mask); 4219 ev_load_key(RK8, key, 8 * 16, xmm_key_shuf_mask); 4220 ev_load_key(RK9, key, 9 * 16, xmm_key_shuf_mask); 4221 ev_load_key(RK10, key, 10 * 16, xmm_key_shuf_mask); 4222 ev_load_key(RK0, key, 0*16, xmm_key_shuf_mask); 4223 4224 // Variables for storing source cipher text 4225 const XMMRegister S0 = xmm10; 4226 const XMMRegister S1 = xmm11; 4227 const XMMRegister S2 = xmm12; 4228 const XMMRegister S3 = xmm13; 4229 const XMMRegister S4 = xmm14; 4230 const XMMRegister S5 = xmm15; 4231 const XMMRegister S6 = xmm16; 4232 const XMMRegister S7 = xmm17; 4233 4234 // Variables for storing decrypted text 4235 const XMMRegister B0 = xmm1; 4236 const XMMRegister B1 = xmm2; 4237 const XMMRegister B2 = xmm3; 4238 const XMMRegister B3 = xmm4; 4239 const XMMRegister B4 = xmm5; 4240 const XMMRegister B5 = xmm6; 4241 const XMMRegister B6 = xmm7; 4242 const XMMRegister B7 = xmm8; 4243 4244 __ cmpl(rounds, 44); 4245 __ jcc(Assembler::greater, KEY_192); 4246 __ jmp(Loop); 4247 4248 __ BIND(KEY_192); 4249 const XMMRegister RK11 = xmm27; 4250 const XMMRegister RK12 = xmm28; 4251 ev_load_key(RK11, key, 11*16, xmm_key_shuf_mask); 4252 ev_load_key(RK12, key, 12*16, xmm_key_shuf_mask); 4253 4254 __ cmpl(rounds, 52); 4255 __ jcc(Assembler::greater, KEY_256); 4256 __ jmp(Loop); 4257 4258 __ BIND(KEY_256); 4259 const XMMRegister RK13 = xmm29; 4260 const XMMRegister RK14 = xmm31; 4261 ev_load_key(RK13, key, 13*16, xmm_key_shuf_mask); 4262 ev_load_key(RK14, key, 14*16, xmm_key_shuf_mask); 4263 4264 __ BIND(Loop); 4265 __ cmpl(len_reg, 512); 4266 __ jcc(Assembler::below, Lcbc_dec_rem); 4267 __ BIND(Loop1); 4268 __ subl(len_reg, 512); 4269 __ evmovdquq(S0, Address(from, 0 * 64), Assembler::AVX_512bit); 4270 __ evmovdquq(S1, Address(from, 1 * 64), Assembler::AVX_512bit); 4271 __ evmovdquq(S2, Address(from, 2 * 64), Assembler::AVX_512bit); 4272 __ evmovdquq(S3, Address(from, 3 * 64), Assembler::AVX_512bit); 4273 __ evmovdquq(S4, Address(from, 4 * 64), Assembler::AVX_512bit); 4274 __ evmovdquq(S5, Address(from, 5 * 64), Assembler::AVX_512bit); 4275 __ evmovdquq(S6, Address(from, 6 * 64), Assembler::AVX_512bit); 4276 __ evmovdquq(S7, Address(from, 7 * 64), Assembler::AVX_512bit); 4277 __ leaq(from, Address(from, 8 * 64)); 4278 4279 __ evpxorq(B0, S0, RK1, Assembler::AVX_512bit); 4280 __ evpxorq(B1, S1, RK1, Assembler::AVX_512bit); 4281 __ evpxorq(B2, S2, RK1, Assembler::AVX_512bit); 4282 __ evpxorq(B3, S3, RK1, Assembler::AVX_512bit); 4283 __ evpxorq(B4, S4, RK1, Assembler::AVX_512bit); 4284 __ evpxorq(B5, S5, RK1, Assembler::AVX_512bit); 4285 __ evpxorq(B6, S6, RK1, Assembler::AVX_512bit); 4286 __ evpxorq(B7, S7, RK1, Assembler::AVX_512bit); 4287 4288 __ evalignq(IV, S0, IV, 0x06); 4289 __ evalignq(S0, S1, S0, 0x06); 4290 __ evalignq(S1, S2, S1, 0x06); 4291 __ evalignq(S2, S3, S2, 0x06); 4292 __ evalignq(S3, S4, S3, 0x06); 4293 __ evalignq(S4, S5, S4, 0x06); 4294 __ evalignq(S5, S6, S5, 0x06); 4295 __ evalignq(S6, S7, S6, 0x06); 4296 4297 roundDec(RK2); 4298 roundDec(RK3); 4299 roundDec(RK4); 4300 roundDec(RK5); 4301 roundDec(RK6); 4302 roundDec(RK7); 4303 roundDec(RK8); 4304 roundDec(RK9); 4305 roundDec(RK10); 4306 4307 __ cmpl(rounds, 44); 4308 __ jcc(Assembler::belowEqual, L_128); 4309 roundDec(RK11); 4310 roundDec(RK12); 4311 4312 __ cmpl(rounds, 52); 4313 __ jcc(Assembler::belowEqual, L_192); 4314 roundDec(RK13); 4315 roundDec(RK14); 4316 4317 __ BIND(L_256); 4318 roundDeclast(RK0); 4319 __ jmp(Loop2); 4320 4321 __ BIND(L_128); 4322 roundDeclast(RK0); 4323 __ jmp(Loop2); 4324 4325 __ BIND(L_192); 4326 roundDeclast(RK0); 4327 4328 __ BIND(Loop2); 4329 __ evpxorq(B0, B0, IV, Assembler::AVX_512bit); 4330 __ evpxorq(B1, B1, S0, Assembler::AVX_512bit); 4331 __ evpxorq(B2, B2, S1, Assembler::AVX_512bit); 4332 __ evpxorq(B3, B3, S2, Assembler::AVX_512bit); 4333 __ evpxorq(B4, B4, S3, Assembler::AVX_512bit); 4334 __ evpxorq(B5, B5, S4, Assembler::AVX_512bit); 4335 __ evpxorq(B6, B6, S5, Assembler::AVX_512bit); 4336 __ evpxorq(B7, B7, S6, Assembler::AVX_512bit); 4337 __ evmovdquq(IV, S7, Assembler::AVX_512bit); 4338 4339 __ evmovdquq(Address(to, 0 * 64), B0, Assembler::AVX_512bit); 4340 __ evmovdquq(Address(to, 1 * 64), B1, Assembler::AVX_512bit); 4341 __ evmovdquq(Address(to, 2 * 64), B2, Assembler::AVX_512bit); 4342 __ evmovdquq(Address(to, 3 * 64), B3, Assembler::AVX_512bit); 4343 __ evmovdquq(Address(to, 4 * 64), B4, Assembler::AVX_512bit); 4344 __ evmovdquq(Address(to, 5 * 64), B5, Assembler::AVX_512bit); 4345 __ evmovdquq(Address(to, 6 * 64), B6, Assembler::AVX_512bit); 4346 __ evmovdquq(Address(to, 7 * 64), B7, Assembler::AVX_512bit); 4347 __ leaq(to, Address(to, 8 * 64)); 4348 __ jmp(Loop); 4349 4350 __ BIND(Lcbc_dec_rem); 4351 __ evshufi64x2(IV, IV, IV, 0x03, Assembler::AVX_512bit); 4352 4353 __ BIND(Lcbc_dec_rem_loop); 4354 __ subl(len_reg, 16); 4355 __ jcc(Assembler::carrySet, Lcbc_dec_ret); 4356 4357 __ movdqu(S0, Address(from, 0)); 4358 __ evpxorq(B0, S0, RK1, Assembler::AVX_512bit); 4359 __ vaesdec(B0, B0, RK2, Assembler::AVX_512bit); 4360 __ vaesdec(B0, B0, RK3, Assembler::AVX_512bit); 4361 __ vaesdec(B0, B0, RK4, Assembler::AVX_512bit); 4362 __ vaesdec(B0, B0, RK5, Assembler::AVX_512bit); 4363 __ vaesdec(B0, B0, RK6, Assembler::AVX_512bit); 4364 __ vaesdec(B0, B0, RK7, Assembler::AVX_512bit); 4365 __ vaesdec(B0, B0, RK8, Assembler::AVX_512bit); 4366 __ vaesdec(B0, B0, RK9, Assembler::AVX_512bit); 4367 __ vaesdec(B0, B0, RK10, Assembler::AVX_512bit); 4368 __ cmpl(rounds, 44); 4369 __ jcc(Assembler::belowEqual, Lcbc_dec_rem_last); 4370 4371 __ vaesdec(B0, B0, RK11, Assembler::AVX_512bit); 4372 __ vaesdec(B0, B0, RK12, Assembler::AVX_512bit); 4373 __ cmpl(rounds, 52); 4374 __ jcc(Assembler::belowEqual, Lcbc_dec_rem_last); 4375 4376 __ vaesdec(B0, B0, RK13, Assembler::AVX_512bit); 4377 __ vaesdec(B0, B0, RK14, Assembler::AVX_512bit); 4378 4379 __ BIND(Lcbc_dec_rem_last); 4380 __ vaesdeclast(B0, B0, RK0, Assembler::AVX_512bit); 4381 4382 __ evpxorq(B0, B0, IV, Assembler::AVX_512bit); 4383 __ evmovdquq(IV, S0, Assembler::AVX_512bit); 4384 __ movdqu(Address(to, 0), B0); 4385 __ leaq(from, Address(from, 16)); 4386 __ leaq(to, Address(to, 16)); 4387 __ jmp(Lcbc_dec_rem_loop); 4388 4389 __ BIND(Lcbc_dec_ret); 4390 __ movdqu(Address(rvec, 0), IV); 4391 4392 // Zero out the round keys 4393 __ evpxorq(RK0, RK0, RK0, Assembler::AVX_512bit); 4394 __ evpxorq(RK1, RK1, RK1, Assembler::AVX_512bit); 4395 __ evpxorq(RK2, RK2, RK2, Assembler::AVX_512bit); 4396 __ evpxorq(RK3, RK3, RK3, Assembler::AVX_512bit); 4397 __ evpxorq(RK4, RK4, RK4, Assembler::AVX_512bit); 4398 __ evpxorq(RK5, RK5, RK5, Assembler::AVX_512bit); 4399 __ evpxorq(RK6, RK6, RK6, Assembler::AVX_512bit); 4400 __ evpxorq(RK7, RK7, RK7, Assembler::AVX_512bit); 4401 __ evpxorq(RK8, RK8, RK8, Assembler::AVX_512bit); 4402 __ evpxorq(RK9, RK9, RK9, Assembler::AVX_512bit); 4403 __ evpxorq(RK10, RK10, RK10, Assembler::AVX_512bit); 4404 __ cmpl(rounds, 44); 4405 __ jcc(Assembler::belowEqual, Lcbc_exit); 4406 __ evpxorq(RK11, RK11, RK11, Assembler::AVX_512bit); 4407 __ evpxorq(RK12, RK12, RK12, Assembler::AVX_512bit); 4408 __ cmpl(rounds, 52); 4409 __ jcc(Assembler::belowEqual, Lcbc_exit); 4410 __ evpxorq(RK13, RK13, RK13, Assembler::AVX_512bit); 4411 __ evpxorq(RK14, RK14, RK14, Assembler::AVX_512bit); 4412 4413 __ BIND(Lcbc_exit); 4414 __ pop(rbx); 4415 #ifdef _WIN64 4416 __ movl(rax, len_mem); 4417 #else 4418 __ pop(rax); // return length 4419 #endif 4420 __ leave(); // required for proper stackwalking of RuntimeStub frame 4421 __ ret(0); 4422 return start; 4423 } 4424 4425 // Polynomial x^128+x^127+x^126+x^121+1 4426 address ghash_polynomial_addr() { 4427 __ align(CodeEntryAlignment); 4428 StubCodeMark mark(this, "StubRoutines", "_ghash_poly_addr"); 4429 address start = __ pc(); 4430 __ emit_data64(0x0000000000000001, relocInfo::none); 4431 __ emit_data64(0xc200000000000000, relocInfo::none); 4432 return start; 4433 } 4434 4435 address ghash_shufflemask_addr() { 4436 __ align(CodeEntryAlignment); 4437 StubCodeMark mark(this, "StubRoutines", "_ghash_shuffmask_addr"); 4438 address start = __ pc(); 4439 __ emit_data64(0x0f0f0f0f0f0f0f0f, relocInfo::none); 4440 __ emit_data64(0x0f0f0f0f0f0f0f0f, relocInfo::none); 4441 return start; 4442 } 4443 4444 // Ghash single and multi block operations using AVX instructions 4445 address generate_avx_ghash_processBlocks() { 4446 __ align(CodeEntryAlignment); 4447 4448 StubCodeMark mark(this, "StubRoutines", "ghash_processBlocks"); 4449 address start = __ pc(); 4450 4451 // arguments 4452 const Register state = c_rarg0; 4453 const Register htbl = c_rarg1; 4454 const Register data = c_rarg2; 4455 const Register blocks = c_rarg3; 4456 __ enter(); 4457 // Save state before entering routine 4458 __ avx_ghash(state, htbl, data, blocks); 4459 __ leave(); // required for proper stackwalking of RuntimeStub frame 4460 __ ret(0); 4461 return start; 4462 } 4463 4464 // byte swap x86 long 4465 address generate_ghash_long_swap_mask() { 4466 __ align(CodeEntryAlignment); 4467 StubCodeMark mark(this, "StubRoutines", "ghash_long_swap_mask"); 4468 address start = __ pc(); 4469 __ emit_data64(0x0f0e0d0c0b0a0908, relocInfo::none ); 4470 __ emit_data64(0x0706050403020100, relocInfo::none ); 4471 return start; 4472 } 4473 4474 // byte swap x86 byte array 4475 address generate_ghash_byte_swap_mask() { 4476 __ align(CodeEntryAlignment); 4477 StubCodeMark mark(this, "StubRoutines", "ghash_byte_swap_mask"); 4478 address start = __ pc(); 4479 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none ); 4480 __ emit_data64(0x0001020304050607, relocInfo::none ); 4481 return start; 4482 } 4483 4484 /* Single and multi-block ghash operations */ 4485 address generate_ghash_processBlocks() { 4486 __ align(CodeEntryAlignment); 4487 Label L_ghash_loop, L_exit; 4488 StubCodeMark mark(this, "StubRoutines", "ghash_processBlocks"); 4489 address start = __ pc(); 4490 4491 const Register state = c_rarg0; 4492 const Register subkeyH = c_rarg1; 4493 const Register data = c_rarg2; 4494 const Register blocks = c_rarg3; 4495 4496 const XMMRegister xmm_temp0 = xmm0; 4497 const XMMRegister xmm_temp1 = xmm1; 4498 const XMMRegister xmm_temp2 = xmm2; 4499 const XMMRegister xmm_temp3 = xmm3; 4500 const XMMRegister xmm_temp4 = xmm4; 4501 const XMMRegister xmm_temp5 = xmm5; 4502 const XMMRegister xmm_temp6 = xmm6; 4503 const XMMRegister xmm_temp7 = xmm7; 4504 const XMMRegister xmm_temp8 = xmm8; 4505 const XMMRegister xmm_temp9 = xmm9; 4506 const XMMRegister xmm_temp10 = xmm10; 4507 4508 __ enter(); 4509 4510 __ movdqu(xmm_temp10, ExternalAddress(StubRoutines::x86::ghash_long_swap_mask_addr())); 4511 4512 __ movdqu(xmm_temp0, Address(state, 0)); 4513 __ pshufb(xmm_temp0, xmm_temp10); 4514 4515 4516 __ BIND(L_ghash_loop); 4517 __ movdqu(xmm_temp2, Address(data, 0)); 4518 __ pshufb(xmm_temp2, ExternalAddress(StubRoutines::x86::ghash_byte_swap_mask_addr())); 4519 4520 __ movdqu(xmm_temp1, Address(subkeyH, 0)); 4521 __ pshufb(xmm_temp1, xmm_temp10); 4522 4523 __ pxor(xmm_temp0, xmm_temp2); 4524 4525 // 4526 // Multiply with the hash key 4527 // 4528 __ movdqu(xmm_temp3, xmm_temp0); 4529 __ pclmulqdq(xmm_temp3, xmm_temp1, 0); // xmm3 holds a0*b0 4530 __ movdqu(xmm_temp4, xmm_temp0); 4531 __ pclmulqdq(xmm_temp4, xmm_temp1, 16); // xmm4 holds a0*b1 4532 4533 __ movdqu(xmm_temp5, xmm_temp0); 4534 __ pclmulqdq(xmm_temp5, xmm_temp1, 1); // xmm5 holds a1*b0 4535 __ movdqu(xmm_temp6, xmm_temp0); 4536 __ pclmulqdq(xmm_temp6, xmm_temp1, 17); // xmm6 holds a1*b1 4537 4538 __ pxor(xmm_temp4, xmm_temp5); // xmm4 holds a0*b1 + a1*b0 4539 4540 __ movdqu(xmm_temp5, xmm_temp4); // move the contents of xmm4 to xmm5 4541 __ psrldq(xmm_temp4, 8); // shift by xmm4 64 bits to the right 4542 __ pslldq(xmm_temp5, 8); // shift by xmm5 64 bits to the left 4543 __ pxor(xmm_temp3, xmm_temp5); 4544 __ pxor(xmm_temp6, xmm_temp4); // Register pair <xmm6:xmm3> holds the result 4545 // of the carry-less multiplication of 4546 // xmm0 by xmm1. 4547 4548 // We shift the result of the multiplication by one bit position 4549 // to the left to cope for the fact that the bits are reversed. 4550 __ movdqu(xmm_temp7, xmm_temp3); 4551 __ movdqu(xmm_temp8, xmm_temp6); 4552 __ pslld(xmm_temp3, 1); 4553 __ pslld(xmm_temp6, 1); 4554 __ psrld(xmm_temp7, 31); 4555 __ psrld(xmm_temp8, 31); 4556 __ movdqu(xmm_temp9, xmm_temp7); 4557 __ pslldq(xmm_temp8, 4); 4558 __ pslldq(xmm_temp7, 4); 4559 __ psrldq(xmm_temp9, 12); 4560 __ por(xmm_temp3, xmm_temp7); 4561 __ por(xmm_temp6, xmm_temp8); 4562 __ por(xmm_temp6, xmm_temp9); 4563 4564 // 4565 // First phase of the reduction 4566 // 4567 // Move xmm3 into xmm7, xmm8, xmm9 in order to perform the shifts 4568 // independently. 4569 __ movdqu(xmm_temp7, xmm_temp3); 4570 __ movdqu(xmm_temp8, xmm_temp3); 4571 __ movdqu(xmm_temp9, xmm_temp3); 4572 __ pslld(xmm_temp7, 31); // packed right shift shifting << 31 4573 __ pslld(xmm_temp8, 30); // packed right shift shifting << 30 4574 __ pslld(xmm_temp9, 25); // packed right shift shifting << 25 4575 __ pxor(xmm_temp7, xmm_temp8); // xor the shifted versions 4576 __ pxor(xmm_temp7, xmm_temp9); 4577 __ movdqu(xmm_temp8, xmm_temp7); 4578 __ pslldq(xmm_temp7, 12); 4579 __ psrldq(xmm_temp8, 4); 4580 __ pxor(xmm_temp3, xmm_temp7); // first phase of the reduction complete 4581 4582 // 4583 // Second phase of the reduction 4584 // 4585 // Make 3 copies of xmm3 in xmm2, xmm4, xmm5 for doing these 4586 // shift operations. 4587 __ movdqu(xmm_temp2, xmm_temp3); 4588 __ movdqu(xmm_temp4, xmm_temp3); 4589 __ movdqu(xmm_temp5, xmm_temp3); 4590 __ psrld(xmm_temp2, 1); // packed left shifting >> 1 4591 __ psrld(xmm_temp4, 2); // packed left shifting >> 2 4592 __ psrld(xmm_temp5, 7); // packed left shifting >> 7 4593 __ pxor(xmm_temp2, xmm_temp4); // xor the shifted versions 4594 __ pxor(xmm_temp2, xmm_temp5); 4595 __ pxor(xmm_temp2, xmm_temp8); 4596 __ pxor(xmm_temp3, xmm_temp2); 4597 __ pxor(xmm_temp6, xmm_temp3); // the result is in xmm6 4598 4599 __ decrement(blocks); 4600 __ jcc(Assembler::zero, L_exit); 4601 __ movdqu(xmm_temp0, xmm_temp6); 4602 __ addptr(data, 16); 4603 __ jmp(L_ghash_loop); 4604 4605 __ BIND(L_exit); 4606 __ pshufb(xmm_temp6, xmm_temp10); // Byte swap 16-byte result 4607 __ movdqu(Address(state, 0), xmm_temp6); // store the result 4608 __ leave(); 4609 __ ret(0); 4610 return start; 4611 } 4612 4613 //base64 character set 4614 address base64_charset_addr() { 4615 __ align(CodeEntryAlignment); 4616 StubCodeMark mark(this, "StubRoutines", "base64_charset"); 4617 address start = __ pc(); 4618 __ emit_data64(0x0000004200000041, relocInfo::none); 4619 __ emit_data64(0x0000004400000043, relocInfo::none); 4620 __ emit_data64(0x0000004600000045, relocInfo::none); 4621 __ emit_data64(0x0000004800000047, relocInfo::none); 4622 __ emit_data64(0x0000004a00000049, relocInfo::none); 4623 __ emit_data64(0x0000004c0000004b, relocInfo::none); 4624 __ emit_data64(0x0000004e0000004d, relocInfo::none); 4625 __ emit_data64(0x000000500000004f, relocInfo::none); 4626 __ emit_data64(0x0000005200000051, relocInfo::none); 4627 __ emit_data64(0x0000005400000053, relocInfo::none); 4628 __ emit_data64(0x0000005600000055, relocInfo::none); 4629 __ emit_data64(0x0000005800000057, relocInfo::none); 4630 __ emit_data64(0x0000005a00000059, relocInfo::none); 4631 __ emit_data64(0x0000006200000061, relocInfo::none); 4632 __ emit_data64(0x0000006400000063, relocInfo::none); 4633 __ emit_data64(0x0000006600000065, relocInfo::none); 4634 __ emit_data64(0x0000006800000067, relocInfo::none); 4635 __ emit_data64(0x0000006a00000069, relocInfo::none); 4636 __ emit_data64(0x0000006c0000006b, relocInfo::none); 4637 __ emit_data64(0x0000006e0000006d, relocInfo::none); 4638 __ emit_data64(0x000000700000006f, relocInfo::none); 4639 __ emit_data64(0x0000007200000071, relocInfo::none); 4640 __ emit_data64(0x0000007400000073, relocInfo::none); 4641 __ emit_data64(0x0000007600000075, relocInfo::none); 4642 __ emit_data64(0x0000007800000077, relocInfo::none); 4643 __ emit_data64(0x0000007a00000079, relocInfo::none); 4644 __ emit_data64(0x0000003100000030, relocInfo::none); 4645 __ emit_data64(0x0000003300000032, relocInfo::none); 4646 __ emit_data64(0x0000003500000034, relocInfo::none); 4647 __ emit_data64(0x0000003700000036, relocInfo::none); 4648 __ emit_data64(0x0000003900000038, relocInfo::none); 4649 __ emit_data64(0x0000002f0000002b, relocInfo::none); 4650 return start; 4651 } 4652 4653 //base64 url character set 4654 address base64url_charset_addr() { 4655 __ align(CodeEntryAlignment); 4656 StubCodeMark mark(this, "StubRoutines", "base64url_charset"); 4657 address start = __ pc(); 4658 __ emit_data64(0x0000004200000041, relocInfo::none); 4659 __ emit_data64(0x0000004400000043, relocInfo::none); 4660 __ emit_data64(0x0000004600000045, relocInfo::none); 4661 __ emit_data64(0x0000004800000047, relocInfo::none); 4662 __ emit_data64(0x0000004a00000049, relocInfo::none); 4663 __ emit_data64(0x0000004c0000004b, relocInfo::none); 4664 __ emit_data64(0x0000004e0000004d, relocInfo::none); 4665 __ emit_data64(0x000000500000004f, relocInfo::none); 4666 __ emit_data64(0x0000005200000051, relocInfo::none); 4667 __ emit_data64(0x0000005400000053, relocInfo::none); 4668 __ emit_data64(0x0000005600000055, relocInfo::none); 4669 __ emit_data64(0x0000005800000057, relocInfo::none); 4670 __ emit_data64(0x0000005a00000059, relocInfo::none); 4671 __ emit_data64(0x0000006200000061, relocInfo::none); 4672 __ emit_data64(0x0000006400000063, relocInfo::none); 4673 __ emit_data64(0x0000006600000065, relocInfo::none); 4674 __ emit_data64(0x0000006800000067, relocInfo::none); 4675 __ emit_data64(0x0000006a00000069, relocInfo::none); 4676 __ emit_data64(0x0000006c0000006b, relocInfo::none); 4677 __ emit_data64(0x0000006e0000006d, relocInfo::none); 4678 __ emit_data64(0x000000700000006f, relocInfo::none); 4679 __ emit_data64(0x0000007200000071, relocInfo::none); 4680 __ emit_data64(0x0000007400000073, relocInfo::none); 4681 __ emit_data64(0x0000007600000075, relocInfo::none); 4682 __ emit_data64(0x0000007800000077, relocInfo::none); 4683 __ emit_data64(0x0000007a00000079, relocInfo::none); 4684 __ emit_data64(0x0000003100000030, relocInfo::none); 4685 __ emit_data64(0x0000003300000032, relocInfo::none); 4686 __ emit_data64(0x0000003500000034, relocInfo::none); 4687 __ emit_data64(0x0000003700000036, relocInfo::none); 4688 __ emit_data64(0x0000003900000038, relocInfo::none); 4689 __ emit_data64(0x0000005f0000002d, relocInfo::none); 4690 4691 return start; 4692 } 4693 4694 address base64_bswap_mask_addr() { 4695 __ align(CodeEntryAlignment); 4696 StubCodeMark mark(this, "StubRoutines", "bswap_mask_base64"); 4697 address start = __ pc(); 4698 __ emit_data64(0x0504038002010080, relocInfo::none); 4699 __ emit_data64(0x0b0a098008070680, relocInfo::none); 4700 __ emit_data64(0x0908078006050480, relocInfo::none); 4701 __ emit_data64(0x0f0e0d800c0b0a80, relocInfo::none); 4702 __ emit_data64(0x0605048003020180, relocInfo::none); 4703 __ emit_data64(0x0c0b0a8009080780, relocInfo::none); 4704 __ emit_data64(0x0504038002010080, relocInfo::none); 4705 __ emit_data64(0x0b0a098008070680, relocInfo::none); 4706 4707 return start; 4708 } 4709 4710 address base64_right_shift_mask_addr() { 4711 __ align(CodeEntryAlignment); 4712 StubCodeMark mark(this, "StubRoutines", "right_shift_mask"); 4713 address start = __ pc(); 4714 __ emit_data64(0x0006000400020000, relocInfo::none); 4715 __ emit_data64(0x0006000400020000, relocInfo::none); 4716 __ emit_data64(0x0006000400020000, relocInfo::none); 4717 __ emit_data64(0x0006000400020000, relocInfo::none); 4718 __ emit_data64(0x0006000400020000, relocInfo::none); 4719 __ emit_data64(0x0006000400020000, relocInfo::none); 4720 __ emit_data64(0x0006000400020000, relocInfo::none); 4721 __ emit_data64(0x0006000400020000, relocInfo::none); 4722 4723 return start; 4724 } 4725 4726 address base64_left_shift_mask_addr() { 4727 __ align(CodeEntryAlignment); 4728 StubCodeMark mark(this, "StubRoutines", "left_shift_mask"); 4729 address start = __ pc(); 4730 __ emit_data64(0x0000000200040000, relocInfo::none); 4731 __ emit_data64(0x0000000200040000, relocInfo::none); 4732 __ emit_data64(0x0000000200040000, relocInfo::none); 4733 __ emit_data64(0x0000000200040000, relocInfo::none); 4734 __ emit_data64(0x0000000200040000, relocInfo::none); 4735 __ emit_data64(0x0000000200040000, relocInfo::none); 4736 __ emit_data64(0x0000000200040000, relocInfo::none); 4737 __ emit_data64(0x0000000200040000, relocInfo::none); 4738 4739 return start; 4740 } 4741 4742 address base64_and_mask_addr() { 4743 __ align(CodeEntryAlignment); 4744 StubCodeMark mark(this, "StubRoutines", "and_mask"); 4745 address start = __ pc(); 4746 __ emit_data64(0x3f003f003f000000, relocInfo::none); 4747 __ emit_data64(0x3f003f003f000000, relocInfo::none); 4748 __ emit_data64(0x3f003f003f000000, relocInfo::none); 4749 __ emit_data64(0x3f003f003f000000, relocInfo::none); 4750 __ emit_data64(0x3f003f003f000000, relocInfo::none); 4751 __ emit_data64(0x3f003f003f000000, relocInfo::none); 4752 __ emit_data64(0x3f003f003f000000, relocInfo::none); 4753 __ emit_data64(0x3f003f003f000000, relocInfo::none); 4754 return start; 4755 } 4756 4757 address base64_gather_mask_addr() { 4758 __ align(CodeEntryAlignment); 4759 StubCodeMark mark(this, "StubRoutines", "gather_mask"); 4760 address start = __ pc(); 4761 __ emit_data64(0xffffffffffffffff, relocInfo::none); 4762 return start; 4763 } 4764 4765 // Code for generating Base64 encoding. 4766 // Intrinsic function prototype in Base64.java: 4767 // private void encodeBlock(byte[] src, int sp, int sl, byte[] dst, int dp, boolean isURL) { 4768 address generate_base64_encodeBlock() { 4769 __ align(CodeEntryAlignment); 4770 StubCodeMark mark(this, "StubRoutines", "implEncode"); 4771 address start = __ pc(); 4772 __ enter(); 4773 4774 // Save callee-saved registers before using them 4775 __ push(r12); 4776 __ push(r13); 4777 __ push(r14); 4778 __ push(r15); 4779 4780 // arguments 4781 const Register source = c_rarg0; // Source Array 4782 const Register start_offset = c_rarg1; // start offset 4783 const Register end_offset = c_rarg2; // end offset 4784 const Register dest = c_rarg3; // destination array 4785 4786 #ifndef _WIN64 4787 const Register dp = c_rarg4; // Position for writing to dest array 4788 const Register isURL = c_rarg5;// Base64 or URL character set 4789 #else 4790 const Address dp_mem(rbp, 6 * wordSize); // length is on stack on Win64 4791 const Address isURL_mem(rbp, 7 * wordSize); 4792 const Register isURL = r10; // pick the volatile windows register 4793 const Register dp = r12; 4794 __ movl(dp, dp_mem); 4795 __ movl(isURL, isURL_mem); 4796 #endif 4797 4798 const Register length = r14; 4799 Label L_process80, L_process32, L_process3, L_exit, L_processdata; 4800 4801 // calculate length from offsets 4802 __ movl(length, end_offset); 4803 __ subl(length, start_offset); 4804 __ cmpl(length, 0); 4805 __ jcc(Assembler::lessEqual, L_exit); 4806 4807 __ lea(r11, ExternalAddress(StubRoutines::x86::base64_charset_addr())); 4808 // check if base64 charset(isURL=0) or base64 url charset(isURL=1) needs to be loaded 4809 __ cmpl(isURL, 0); 4810 __ jcc(Assembler::equal, L_processdata); 4811 __ lea(r11, ExternalAddress(StubRoutines::x86::base64url_charset_addr())); 4812 4813 // load masks required for encoding data 4814 __ BIND(L_processdata); 4815 __ movdqu(xmm16, ExternalAddress(StubRoutines::x86::base64_gather_mask_addr())); 4816 // Set 64 bits of K register. 4817 __ evpcmpeqb(k3, xmm16, xmm16, Assembler::AVX_512bit); 4818 __ evmovdquq(xmm12, ExternalAddress(StubRoutines::x86::base64_bswap_mask_addr()), Assembler::AVX_256bit, r13); 4819 __ evmovdquq(xmm13, ExternalAddress(StubRoutines::x86::base64_right_shift_mask_addr()), Assembler::AVX_512bit, r13); 4820 __ evmovdquq(xmm14, ExternalAddress(StubRoutines::x86::base64_left_shift_mask_addr()), Assembler::AVX_512bit, r13); 4821 __ evmovdquq(xmm15, ExternalAddress(StubRoutines::x86::base64_and_mask_addr()), Assembler::AVX_512bit, r13); 4822 4823 // Vector Base64 implementation, producing 96 bytes of encoded data 4824 __ BIND(L_process80); 4825 __ cmpl(length, 80); 4826 __ jcc(Assembler::below, L_process32); 4827 __ evmovdquq(xmm0, Address(source, start_offset, Address::times_1, 0), Assembler::AVX_256bit); 4828 __ evmovdquq(xmm1, Address(source, start_offset, Address::times_1, 24), Assembler::AVX_256bit); 4829 __ evmovdquq(xmm2, Address(source, start_offset, Address::times_1, 48), Assembler::AVX_256bit); 4830 4831 //permute the input data in such a manner that we have continuity of the source 4832 __ vpermq(xmm3, xmm0, 148, Assembler::AVX_256bit); 4833 __ vpermq(xmm4, xmm1, 148, Assembler::AVX_256bit); 4834 __ vpermq(xmm5, xmm2, 148, Assembler::AVX_256bit); 4835 4836 //shuffle input and group 3 bytes of data and to it add 0 as the 4th byte. 4837 //we can deal with 12 bytes at a time in a 128 bit register 4838 __ vpshufb(xmm3, xmm3, xmm12, Assembler::AVX_256bit); 4839 __ vpshufb(xmm4, xmm4, xmm12, Assembler::AVX_256bit); 4840 __ vpshufb(xmm5, xmm5, xmm12, Assembler::AVX_256bit); 4841 4842 //convert byte to word. Each 128 bit register will have 6 bytes for processing 4843 __ vpmovzxbw(xmm3, xmm3, Assembler::AVX_512bit); 4844 __ vpmovzxbw(xmm4, xmm4, Assembler::AVX_512bit); 4845 __ vpmovzxbw(xmm5, xmm5, Assembler::AVX_512bit); 4846 4847 // Extract bits in the following pattern 6, 4+2, 2+4, 6 to convert 3, 8 bit numbers to 4, 6 bit numbers 4848 __ evpsrlvw(xmm0, xmm3, xmm13, Assembler::AVX_512bit); 4849 __ evpsrlvw(xmm1, xmm4, xmm13, Assembler::AVX_512bit); 4850 __ evpsrlvw(xmm2, xmm5, xmm13, Assembler::AVX_512bit); 4851 4852 __ evpsllvw(xmm3, xmm3, xmm14, Assembler::AVX_512bit); 4853 __ evpsllvw(xmm4, xmm4, xmm14, Assembler::AVX_512bit); 4854 __ evpsllvw(xmm5, xmm5, xmm14, Assembler::AVX_512bit); 4855 4856 __ vpsrlq(xmm0, xmm0, 8, Assembler::AVX_512bit); 4857 __ vpsrlq(xmm1, xmm1, 8, Assembler::AVX_512bit); 4858 __ vpsrlq(xmm2, xmm2, 8, Assembler::AVX_512bit); 4859 4860 __ vpsllq(xmm3, xmm3, 8, Assembler::AVX_512bit); 4861 __ vpsllq(xmm4, xmm4, 8, Assembler::AVX_512bit); 4862 __ vpsllq(xmm5, xmm5, 8, Assembler::AVX_512bit); 4863 4864 __ vpandq(xmm3, xmm3, xmm15, Assembler::AVX_512bit); 4865 __ vpandq(xmm4, xmm4, xmm15, Assembler::AVX_512bit); 4866 __ vpandq(xmm5, xmm5, xmm15, Assembler::AVX_512bit); 4867 4868 // Get the final 4*6 bits base64 encoding 4869 __ vporq(xmm3, xmm3, xmm0, Assembler::AVX_512bit); 4870 __ vporq(xmm4, xmm4, xmm1, Assembler::AVX_512bit); 4871 __ vporq(xmm5, xmm5, xmm2, Assembler::AVX_512bit); 4872 4873 // Shift 4874 __ vpsrlq(xmm3, xmm3, 8, Assembler::AVX_512bit); 4875 __ vpsrlq(xmm4, xmm4, 8, Assembler::AVX_512bit); 4876 __ vpsrlq(xmm5, xmm5, 8, Assembler::AVX_512bit); 4877 4878 // look up 6 bits in the base64 character set to fetch the encoding 4879 // we are converting word to dword as gather instructions need dword indices for looking up encoding 4880 __ vextracti64x4(xmm6, xmm3, 0); 4881 __ vpmovzxwd(xmm0, xmm6, Assembler::AVX_512bit); 4882 __ vextracti64x4(xmm6, xmm3, 1); 4883 __ vpmovzxwd(xmm1, xmm6, Assembler::AVX_512bit); 4884 4885 __ vextracti64x4(xmm6, xmm4, 0); 4886 __ vpmovzxwd(xmm2, xmm6, Assembler::AVX_512bit); 4887 __ vextracti64x4(xmm6, xmm4, 1); 4888 __ vpmovzxwd(xmm3, xmm6, Assembler::AVX_512bit); 4889 4890 __ vextracti64x4(xmm4, xmm5, 0); 4891 __ vpmovzxwd(xmm6, xmm4, Assembler::AVX_512bit); 4892 4893 __ vextracti64x4(xmm4, xmm5, 1); 4894 __ vpmovzxwd(xmm7, xmm4, Assembler::AVX_512bit); 4895 4896 __ kmovql(k2, k3); 4897 __ evpgatherdd(xmm4, k2, Address(r11, xmm0, Address::times_4, 0), Assembler::AVX_512bit); 4898 __ kmovql(k2, k3); 4899 __ evpgatherdd(xmm5, k2, Address(r11, xmm1, Address::times_4, 0), Assembler::AVX_512bit); 4900 __ kmovql(k2, k3); 4901 __ evpgatherdd(xmm8, k2, Address(r11, xmm2, Address::times_4, 0), Assembler::AVX_512bit); 4902 __ kmovql(k2, k3); 4903 __ evpgatherdd(xmm9, k2, Address(r11, xmm3, Address::times_4, 0), Assembler::AVX_512bit); 4904 __ kmovql(k2, k3); 4905 __ evpgatherdd(xmm10, k2, Address(r11, xmm6, Address::times_4, 0), Assembler::AVX_512bit); 4906 __ kmovql(k2, k3); 4907 __ evpgatherdd(xmm11, k2, Address(r11, xmm7, Address::times_4, 0), Assembler::AVX_512bit); 4908 4909 //Down convert dword to byte. Final output is 16*6 = 96 bytes long 4910 __ evpmovdb(Address(dest, dp, Address::times_1, 0), xmm4, Assembler::AVX_512bit); 4911 __ evpmovdb(Address(dest, dp, Address::times_1, 16), xmm5, Assembler::AVX_512bit); 4912 __ evpmovdb(Address(dest, dp, Address::times_1, 32), xmm8, Assembler::AVX_512bit); 4913 __ evpmovdb(Address(dest, dp, Address::times_1, 48), xmm9, Assembler::AVX_512bit); 4914 __ evpmovdb(Address(dest, dp, Address::times_1, 64), xmm10, Assembler::AVX_512bit); 4915 __ evpmovdb(Address(dest, dp, Address::times_1, 80), xmm11, Assembler::AVX_512bit); 4916 4917 __ addq(dest, 96); 4918 __ addq(source, 72); 4919 __ subq(length, 72); 4920 __ jmp(L_process80); 4921 4922 // Vector Base64 implementation generating 32 bytes of encoded data 4923 __ BIND(L_process32); 4924 __ cmpl(length, 32); 4925 __ jcc(Assembler::below, L_process3); 4926 __ evmovdquq(xmm0, Address(source, start_offset), Assembler::AVX_256bit); 4927 __ vpermq(xmm0, xmm0, 148, Assembler::AVX_256bit); 4928 __ vpshufb(xmm6, xmm0, xmm12, Assembler::AVX_256bit); 4929 __ vpmovzxbw(xmm6, xmm6, Assembler::AVX_512bit); 4930 __ evpsrlvw(xmm2, xmm6, xmm13, Assembler::AVX_512bit); 4931 __ evpsllvw(xmm3, xmm6, xmm14, Assembler::AVX_512bit); 4932 4933 __ vpsrlq(xmm2, xmm2, 8, Assembler::AVX_512bit); 4934 __ vpsllq(xmm3, xmm3, 8, Assembler::AVX_512bit); 4935 __ vpandq(xmm3, xmm3, xmm15, Assembler::AVX_512bit); 4936 __ vporq(xmm1, xmm2, xmm3, Assembler::AVX_512bit); 4937 __ vpsrlq(xmm1, xmm1, 8, Assembler::AVX_512bit); 4938 __ vextracti64x4(xmm9, xmm1, 0); 4939 __ vpmovzxwd(xmm6, xmm9, Assembler::AVX_512bit); 4940 __ vextracti64x4(xmm9, xmm1, 1); 4941 __ vpmovzxwd(xmm5, xmm9, Assembler::AVX_512bit); 4942 __ kmovql(k2, k3); 4943 __ evpgatherdd(xmm8, k2, Address(r11, xmm6, Address::times_4, 0), Assembler::AVX_512bit); 4944 __ kmovql(k2, k3); 4945 __ evpgatherdd(xmm10, k2, Address(r11, xmm5, Address::times_4, 0), Assembler::AVX_512bit); 4946 __ evpmovdb(Address(dest, dp, Address::times_1, 0), xmm8, Assembler::AVX_512bit); 4947 __ evpmovdb(Address(dest, dp, Address::times_1, 16), xmm10, Assembler::AVX_512bit); 4948 __ subq(length, 24); 4949 __ addq(dest, 32); 4950 __ addq(source, 24); 4951 __ jmp(L_process32); 4952 4953 // Scalar data processing takes 3 bytes at a time and produces 4 bytes of encoded data 4954 /* This code corresponds to the scalar version of the following snippet in Base64.java 4955 ** int bits = (src[sp0++] & 0xff) << 16 |(src[sp0++] & 0xff) << 8 |(src[sp0++] & 0xff); 4956 ** dst[dp0++] = (byte)base64[(bits >> > 18) & 0x3f]; 4957 ** dst[dp0++] = (byte)base64[(bits >> > 12) & 0x3f]; 4958 ** dst[dp0++] = (byte)base64[(bits >> > 6) & 0x3f]; 4959 ** dst[dp0++] = (byte)base64[bits & 0x3f];*/ 4960 __ BIND(L_process3); 4961 __ cmpl(length, 3); 4962 __ jcc(Assembler::below, L_exit); 4963 // Read 1 byte at a time 4964 __ movzbl(rax, Address(source, start_offset)); 4965 __ shll(rax, 0x10); 4966 __ movl(r15, rax); 4967 __ movzbl(rax, Address(source, start_offset, Address::times_1, 1)); 4968 __ shll(rax, 0x8); 4969 __ movzwl(rax, rax); 4970 __ orl(r15, rax); 4971 __ movzbl(rax, Address(source, start_offset, Address::times_1, 2)); 4972 __ orl(rax, r15); 4973 // Save 3 bytes read in r15 4974 __ movl(r15, rax); 4975 __ shrl(rax, 0x12); 4976 __ andl(rax, 0x3f); 4977 // rax contains the index, r11 contains base64 lookup table 4978 __ movb(rax, Address(r11, rax, Address::times_4)); 4979 // Write the encoded byte to destination 4980 __ movb(Address(dest, dp, Address::times_1, 0), rax); 4981 __ movl(rax, r15); 4982 __ shrl(rax, 0xc); 4983 __ andl(rax, 0x3f); 4984 __ movb(rax, Address(r11, rax, Address::times_4)); 4985 __ movb(Address(dest, dp, Address::times_1, 1), rax); 4986 __ movl(rax, r15); 4987 __ shrl(rax, 0x6); 4988 __ andl(rax, 0x3f); 4989 __ movb(rax, Address(r11, rax, Address::times_4)); 4990 __ movb(Address(dest, dp, Address::times_1, 2), rax); 4991 __ movl(rax, r15); 4992 __ andl(rax, 0x3f); 4993 __ movb(rax, Address(r11, rax, Address::times_4)); 4994 __ movb(Address(dest, dp, Address::times_1, 3), rax); 4995 __ subl(length, 3); 4996 __ addq(dest, 4); 4997 __ addq(source, 3); 4998 __ jmp(L_process3); 4999 __ BIND(L_exit); 5000 __ pop(r15); 5001 __ pop(r14); 5002 __ pop(r13); 5003 __ pop(r12); 5004 __ leave(); 5005 __ ret(0); 5006 return start; 5007 } 5008 5009 /** 5010 * Arguments: 5011 * 5012 * Inputs: 5013 * c_rarg0 - int crc 5014 * c_rarg1 - byte* buf 5015 * c_rarg2 - int length 5016 * 5017 * Ouput: 5018 * rax - int crc result 5019 */ 5020 address generate_updateBytesCRC32() { 5021 assert(UseCRC32Intrinsics, "need AVX and CLMUL instructions"); 5022 5023 __ align(CodeEntryAlignment); 5024 StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32"); 5025 5026 address start = __ pc(); 5027 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 5028 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 5029 // rscratch1: r10 5030 const Register crc = c_rarg0; // crc 5031 const Register buf = c_rarg1; // source java byte array address 5032 const Register len = c_rarg2; // length 5033 const Register table = c_rarg3; // crc_table address (reuse register) 5034 const Register tmp = r11; 5035 assert_different_registers(crc, buf, len, table, tmp, rax); 5036 5037 BLOCK_COMMENT("Entry:"); 5038 __ enter(); // required for proper stackwalking of RuntimeStub frame 5039 5040 __ kernel_crc32(crc, buf, len, table, tmp); 5041 5042 __ movl(rax, crc); 5043 __ vzeroupper(); 5044 __ leave(); // required for proper stackwalking of RuntimeStub frame 5045 __ ret(0); 5046 5047 return start; 5048 } 5049 5050 /** 5051 * Arguments: 5052 * 5053 * Inputs: 5054 * c_rarg0 - int crc 5055 * c_rarg1 - byte* buf 5056 * c_rarg2 - long length 5057 * c_rarg3 - table_start - optional (present only when doing a library_call, 5058 * not used by x86 algorithm) 5059 * 5060 * Ouput: 5061 * rax - int crc result 5062 */ 5063 address generate_updateBytesCRC32C(bool is_pclmulqdq_supported) { 5064 assert(UseCRC32CIntrinsics, "need SSE4_2"); 5065 __ align(CodeEntryAlignment); 5066 StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32C"); 5067 address start = __ pc(); 5068 //reg.arg int#0 int#1 int#2 int#3 int#4 int#5 float regs 5069 //Windows RCX RDX R8 R9 none none XMM0..XMM3 5070 //Lin / Sol RDI RSI RDX RCX R8 R9 XMM0..XMM7 5071 const Register crc = c_rarg0; // crc 5072 const Register buf = c_rarg1; // source java byte array address 5073 const Register len = c_rarg2; // length 5074 const Register a = rax; 5075 const Register j = r9; 5076 const Register k = r10; 5077 const Register l = r11; 5078 #ifdef _WIN64 5079 const Register y = rdi; 5080 const Register z = rsi; 5081 #else 5082 const Register y = rcx; 5083 const Register z = r8; 5084 #endif 5085 assert_different_registers(crc, buf, len, a, j, k, l, y, z); 5086 5087 BLOCK_COMMENT("Entry:"); 5088 __ enter(); // required for proper stackwalking of RuntimeStub frame 5089 #ifdef _WIN64 5090 __ push(y); 5091 __ push(z); 5092 #endif 5093 __ crc32c_ipl_alg2_alt2(crc, buf, len, 5094 a, j, k, 5095 l, y, z, 5096 c_farg0, c_farg1, c_farg2, 5097 is_pclmulqdq_supported); 5098 __ movl(rax, crc); 5099 #ifdef _WIN64 5100 __ pop(z); 5101 __ pop(y); 5102 #endif 5103 __ vzeroupper(); 5104 __ leave(); // required for proper stackwalking of RuntimeStub frame 5105 __ ret(0); 5106 5107 return start; 5108 } 5109 5110 /** 5111 * Arguments: 5112 * 5113 * Input: 5114 * c_rarg0 - x address 5115 * c_rarg1 - x length 5116 * c_rarg2 - y address 5117 * c_rarg3 - y length 5118 * not Win64 5119 * c_rarg4 - z address 5120 * c_rarg5 - z length 5121 * Win64 5122 * rsp+40 - z address 5123 * rsp+48 - z length 5124 */ 5125 address generate_multiplyToLen() { 5126 __ align(CodeEntryAlignment); 5127 StubCodeMark mark(this, "StubRoutines", "multiplyToLen"); 5128 5129 address start = __ pc(); 5130 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 5131 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 5132 const Register x = rdi; 5133 const Register xlen = rax; 5134 const Register y = rsi; 5135 const Register ylen = rcx; 5136 const Register z = r8; 5137 const Register zlen = r11; 5138 5139 // Next registers will be saved on stack in multiply_to_len(). 5140 const Register tmp1 = r12; 5141 const Register tmp2 = r13; 5142 const Register tmp3 = r14; 5143 const Register tmp4 = r15; 5144 const Register tmp5 = rbx; 5145 5146 BLOCK_COMMENT("Entry:"); 5147 __ enter(); // required for proper stackwalking of RuntimeStub frame 5148 5149 #ifndef _WIN64 5150 __ movptr(zlen, r9); // Save r9 in r11 - zlen 5151 #endif 5152 setup_arg_regs(4); // x => rdi, xlen => rsi, y => rdx 5153 // ylen => rcx, z => r8, zlen => r11 5154 // r9 and r10 may be used to save non-volatile registers 5155 #ifdef _WIN64 5156 // last 2 arguments (#4, #5) are on stack on Win64 5157 __ movptr(z, Address(rsp, 6 * wordSize)); 5158 __ movptr(zlen, Address(rsp, 7 * wordSize)); 5159 #endif 5160 5161 __ movptr(xlen, rsi); 5162 __ movptr(y, rdx); 5163 __ multiply_to_len(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5); 5164 5165 restore_arg_regs(); 5166 5167 __ leave(); // required for proper stackwalking of RuntimeStub frame 5168 __ ret(0); 5169 5170 return start; 5171 } 5172 5173 /** 5174 * Arguments: 5175 * 5176 * Input: 5177 * c_rarg0 - obja address 5178 * c_rarg1 - objb address 5179 * c_rarg3 - length length 5180 * c_rarg4 - scale log2_array_indxscale 5181 * 5182 * Output: 5183 * rax - int >= mismatched index, < 0 bitwise complement of tail 5184 */ 5185 address generate_vectorizedMismatch() { 5186 __ align(CodeEntryAlignment); 5187 StubCodeMark mark(this, "StubRoutines", "vectorizedMismatch"); 5188 address start = __ pc(); 5189 5190 BLOCK_COMMENT("Entry:"); 5191 __ enter(); 5192 5193 #ifdef _WIN64 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 5194 const Register scale = c_rarg0; //rcx, will exchange with r9 5195 const Register objb = c_rarg1; //rdx 5196 const Register length = c_rarg2; //r8 5197 const Register obja = c_rarg3; //r9 5198 __ xchgq(obja, scale); //now obja and scale contains the correct contents 5199 5200 const Register tmp1 = r10; 5201 const Register tmp2 = r11; 5202 #endif 5203 #ifndef _WIN64 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 5204 const Register obja = c_rarg0; //U:rdi 5205 const Register objb = c_rarg1; //U:rsi 5206 const Register length = c_rarg2; //U:rdx 5207 const Register scale = c_rarg3; //U:rcx 5208 const Register tmp1 = r8; 5209 const Register tmp2 = r9; 5210 #endif 5211 const Register result = rax; //return value 5212 const XMMRegister vec0 = xmm0; 5213 const XMMRegister vec1 = xmm1; 5214 const XMMRegister vec2 = xmm2; 5215 5216 __ vectorized_mismatch(obja, objb, length, scale, result, tmp1, tmp2, vec0, vec1, vec2); 5217 5218 __ vzeroupper(); 5219 __ leave(); 5220 __ ret(0); 5221 5222 return start; 5223 } 5224 5225 /** 5226 * Arguments: 5227 * 5228 // Input: 5229 // c_rarg0 - x address 5230 // c_rarg1 - x length 5231 // c_rarg2 - z address 5232 // c_rarg3 - z lenth 5233 * 5234 */ 5235 address generate_squareToLen() { 5236 5237 __ align(CodeEntryAlignment); 5238 StubCodeMark mark(this, "StubRoutines", "squareToLen"); 5239 5240 address start = __ pc(); 5241 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 5242 // Unix: rdi, rsi, rdx, rcx (c_rarg0, c_rarg1, ...) 5243 const Register x = rdi; 5244 const Register len = rsi; 5245 const Register z = r8; 5246 const Register zlen = rcx; 5247 5248 const Register tmp1 = r12; 5249 const Register tmp2 = r13; 5250 const Register tmp3 = r14; 5251 const Register tmp4 = r15; 5252 const Register tmp5 = rbx; 5253 5254 BLOCK_COMMENT("Entry:"); 5255 __ enter(); // required for proper stackwalking of RuntimeStub frame 5256 5257 setup_arg_regs(4); // x => rdi, len => rsi, z => rdx 5258 // zlen => rcx 5259 // r9 and r10 may be used to save non-volatile registers 5260 __ movptr(r8, rdx); 5261 __ square_to_len(x, len, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5, rdx, rax); 5262 5263 restore_arg_regs(); 5264 5265 __ leave(); // required for proper stackwalking of RuntimeStub frame 5266 __ ret(0); 5267 5268 return start; 5269 } 5270 5271 address generate_method_entry_barrier() { 5272 __ align(CodeEntryAlignment); 5273 StubCodeMark mark(this, "StubRoutines", "nmethod_entry_barrier"); 5274 5275 Label deoptimize_label; 5276 5277 address start = __ pc(); 5278 5279 __ push(-1); // cookie, this is used for writing the new rsp when deoptimizing 5280 5281 BLOCK_COMMENT("Entry:"); 5282 __ enter(); // save rbp 5283 5284 // save c_rarg0, because we want to use that value. 5285 // We could do without it but then we depend on the number of slots used by pusha 5286 __ push(c_rarg0); 5287 5288 __ lea(c_rarg0, Address(rsp, wordSize * 3)); // 1 for cookie, 1 for rbp, 1 for c_rarg0 - this should be the return address 5289 5290 __ pusha(); 5291 5292 // The method may have floats as arguments, and we must spill them before calling 5293 // the VM runtime. 5294 assert(Argument::n_float_register_parameters_j == 8, "Assumption"); 5295 const int xmm_size = wordSize * 2; 5296 const int xmm_spill_size = xmm_size * Argument::n_float_register_parameters_j; 5297 __ subptr(rsp, xmm_spill_size); 5298 __ movdqu(Address(rsp, xmm_size * 7), xmm7); 5299 __ movdqu(Address(rsp, xmm_size * 6), xmm6); 5300 __ movdqu(Address(rsp, xmm_size * 5), xmm5); 5301 __ movdqu(Address(rsp, xmm_size * 4), xmm4); 5302 __ movdqu(Address(rsp, xmm_size * 3), xmm3); 5303 __ movdqu(Address(rsp, xmm_size * 2), xmm2); 5304 __ movdqu(Address(rsp, xmm_size * 1), xmm1); 5305 __ movdqu(Address(rsp, xmm_size * 0), xmm0); 5306 5307 __ call_VM_leaf(CAST_FROM_FN_PTR(address, static_cast<int (*)(address*)>(BarrierSetNMethod::nmethod_stub_entry_barrier)), 1); 5308 5309 __ movdqu(xmm0, Address(rsp, xmm_size * 0)); 5310 __ movdqu(xmm1, Address(rsp, xmm_size * 1)); 5311 __ movdqu(xmm2, Address(rsp, xmm_size * 2)); 5312 __ movdqu(xmm3, Address(rsp, xmm_size * 3)); 5313 __ movdqu(xmm4, Address(rsp, xmm_size * 4)); 5314 __ movdqu(xmm5, Address(rsp, xmm_size * 5)); 5315 __ movdqu(xmm6, Address(rsp, xmm_size * 6)); 5316 __ movdqu(xmm7, Address(rsp, xmm_size * 7)); 5317 __ addptr(rsp, xmm_spill_size); 5318 5319 __ cmpl(rax, 1); // 1 means deoptimize 5320 __ jcc(Assembler::equal, deoptimize_label); 5321 5322 __ popa(); 5323 __ pop(c_rarg0); 5324 5325 __ leave(); 5326 5327 __ addptr(rsp, 1 * wordSize); // cookie 5328 __ ret(0); 5329 5330 5331 __ BIND(deoptimize_label); 5332 5333 __ popa(); 5334 __ pop(c_rarg0); 5335 5336 __ leave(); 5337 5338 // this can be taken out, but is good for verification purposes. getting a SIGSEGV 5339 // here while still having a correct stack is valuable 5340 __ testptr(rsp, Address(rsp, 0)); 5341 5342 __ movptr(rsp, Address(rsp, 0)); // new rsp was written in the barrier 5343 __ jmp(Address(rsp, -1 * wordSize)); // jmp target should be callers verified_entry_point 5344 5345 return start; 5346 } 5347 5348 /** 5349 * Arguments: 5350 * 5351 * Input: 5352 * c_rarg0 - out address 5353 * c_rarg1 - in address 5354 * c_rarg2 - offset 5355 * c_rarg3 - len 5356 * not Win64 5357 * c_rarg4 - k 5358 * Win64 5359 * rsp+40 - k 5360 */ 5361 address generate_mulAdd() { 5362 __ align(CodeEntryAlignment); 5363 StubCodeMark mark(this, "StubRoutines", "mulAdd"); 5364 5365 address start = __ pc(); 5366 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 5367 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 5368 const Register out = rdi; 5369 const Register in = rsi; 5370 const Register offset = r11; 5371 const Register len = rcx; 5372 const Register k = r8; 5373 5374 // Next registers will be saved on stack in mul_add(). 5375 const Register tmp1 = r12; 5376 const Register tmp2 = r13; 5377 const Register tmp3 = r14; 5378 const Register tmp4 = r15; 5379 const Register tmp5 = rbx; 5380 5381 BLOCK_COMMENT("Entry:"); 5382 __ enter(); // required for proper stackwalking of RuntimeStub frame 5383 5384 setup_arg_regs(4); // out => rdi, in => rsi, offset => rdx 5385 // len => rcx, k => r8 5386 // r9 and r10 may be used to save non-volatile registers 5387 #ifdef _WIN64 5388 // last argument is on stack on Win64 5389 __ movl(k, Address(rsp, 6 * wordSize)); 5390 #endif 5391 __ movptr(r11, rdx); // move offset in rdx to offset(r11) 5392 __ mul_add(out, in, offset, len, k, tmp1, tmp2, tmp3, tmp4, tmp5, rdx, rax); 5393 5394 restore_arg_regs(); 5395 5396 __ leave(); // required for proper stackwalking of RuntimeStub frame 5397 __ ret(0); 5398 5399 return start; 5400 } 5401 5402 address generate_libmExp() { 5403 StubCodeMark mark(this, "StubRoutines", "libmExp"); 5404 5405 address start = __ pc(); 5406 5407 const XMMRegister x0 = xmm0; 5408 const XMMRegister x1 = xmm1; 5409 const XMMRegister x2 = xmm2; 5410 const XMMRegister x3 = xmm3; 5411 5412 const XMMRegister x4 = xmm4; 5413 const XMMRegister x5 = xmm5; 5414 const XMMRegister x6 = xmm6; 5415 const XMMRegister x7 = xmm7; 5416 5417 const Register tmp = r11; 5418 5419 BLOCK_COMMENT("Entry:"); 5420 __ enter(); // required for proper stackwalking of RuntimeStub frame 5421 5422 __ fast_exp(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp); 5423 5424 __ leave(); // required for proper stackwalking of RuntimeStub frame 5425 __ ret(0); 5426 5427 return start; 5428 5429 } 5430 5431 address generate_libmLog() { 5432 StubCodeMark mark(this, "StubRoutines", "libmLog"); 5433 5434 address start = __ pc(); 5435 5436 const XMMRegister x0 = xmm0; 5437 const XMMRegister x1 = xmm1; 5438 const XMMRegister x2 = xmm2; 5439 const XMMRegister x3 = xmm3; 5440 5441 const XMMRegister x4 = xmm4; 5442 const XMMRegister x5 = xmm5; 5443 const XMMRegister x6 = xmm6; 5444 const XMMRegister x7 = xmm7; 5445 5446 const Register tmp1 = r11; 5447 const Register tmp2 = r8; 5448 5449 BLOCK_COMMENT("Entry:"); 5450 __ enter(); // required for proper stackwalking of RuntimeStub frame 5451 5452 __ fast_log(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2); 5453 5454 __ leave(); // required for proper stackwalking of RuntimeStub frame 5455 __ ret(0); 5456 5457 return start; 5458 5459 } 5460 5461 address generate_libmLog10() { 5462 StubCodeMark mark(this, "StubRoutines", "libmLog10"); 5463 5464 address start = __ pc(); 5465 5466 const XMMRegister x0 = xmm0; 5467 const XMMRegister x1 = xmm1; 5468 const XMMRegister x2 = xmm2; 5469 const XMMRegister x3 = xmm3; 5470 5471 const XMMRegister x4 = xmm4; 5472 const XMMRegister x5 = xmm5; 5473 const XMMRegister x6 = xmm6; 5474 const XMMRegister x7 = xmm7; 5475 5476 const Register tmp = r11; 5477 5478 BLOCK_COMMENT("Entry:"); 5479 __ enter(); // required for proper stackwalking of RuntimeStub frame 5480 5481 __ fast_log10(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp); 5482 5483 __ leave(); // required for proper stackwalking of RuntimeStub frame 5484 __ ret(0); 5485 5486 return start; 5487 5488 } 5489 5490 address generate_libmPow() { 5491 StubCodeMark mark(this, "StubRoutines", "libmPow"); 5492 5493 address start = __ pc(); 5494 5495 const XMMRegister x0 = xmm0; 5496 const XMMRegister x1 = xmm1; 5497 const XMMRegister x2 = xmm2; 5498 const XMMRegister x3 = xmm3; 5499 5500 const XMMRegister x4 = xmm4; 5501 const XMMRegister x5 = xmm5; 5502 const XMMRegister x6 = xmm6; 5503 const XMMRegister x7 = xmm7; 5504 5505 const Register tmp1 = r8; 5506 const Register tmp2 = r9; 5507 const Register tmp3 = r10; 5508 const Register tmp4 = r11; 5509 5510 BLOCK_COMMENT("Entry:"); 5511 __ enter(); // required for proper stackwalking of RuntimeStub frame 5512 5513 __ fast_pow(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 5514 5515 __ leave(); // required for proper stackwalking of RuntimeStub frame 5516 __ ret(0); 5517 5518 return start; 5519 5520 } 5521 5522 address generate_libmSin() { 5523 StubCodeMark mark(this, "StubRoutines", "libmSin"); 5524 5525 address start = __ pc(); 5526 5527 const XMMRegister x0 = xmm0; 5528 const XMMRegister x1 = xmm1; 5529 const XMMRegister x2 = xmm2; 5530 const XMMRegister x3 = xmm3; 5531 5532 const XMMRegister x4 = xmm4; 5533 const XMMRegister x5 = xmm5; 5534 const XMMRegister x6 = xmm6; 5535 const XMMRegister x7 = xmm7; 5536 5537 const Register tmp1 = r8; 5538 const Register tmp2 = r9; 5539 const Register tmp3 = r10; 5540 const Register tmp4 = r11; 5541 5542 BLOCK_COMMENT("Entry:"); 5543 __ enter(); // required for proper stackwalking of RuntimeStub frame 5544 5545 #ifdef _WIN64 5546 __ push(rsi); 5547 __ push(rdi); 5548 #endif 5549 __ fast_sin(x0, x1, x2, x3, x4, x5, x6, x7, rax, rbx, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 5550 5551 #ifdef _WIN64 5552 __ pop(rdi); 5553 __ pop(rsi); 5554 #endif 5555 5556 __ leave(); // required for proper stackwalking of RuntimeStub frame 5557 __ ret(0); 5558 5559 return start; 5560 5561 } 5562 5563 address generate_libmCos() { 5564 StubCodeMark mark(this, "StubRoutines", "libmCos"); 5565 5566 address start = __ pc(); 5567 5568 const XMMRegister x0 = xmm0; 5569 const XMMRegister x1 = xmm1; 5570 const XMMRegister x2 = xmm2; 5571 const XMMRegister x3 = xmm3; 5572 5573 const XMMRegister x4 = xmm4; 5574 const XMMRegister x5 = xmm5; 5575 const XMMRegister x6 = xmm6; 5576 const XMMRegister x7 = xmm7; 5577 5578 const Register tmp1 = r8; 5579 const Register tmp2 = r9; 5580 const Register tmp3 = r10; 5581 const Register tmp4 = r11; 5582 5583 BLOCK_COMMENT("Entry:"); 5584 __ enter(); // required for proper stackwalking of RuntimeStub frame 5585 5586 #ifdef _WIN64 5587 __ push(rsi); 5588 __ push(rdi); 5589 #endif 5590 __ fast_cos(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 5591 5592 #ifdef _WIN64 5593 __ pop(rdi); 5594 __ pop(rsi); 5595 #endif 5596 5597 __ leave(); // required for proper stackwalking of RuntimeStub frame 5598 __ ret(0); 5599 5600 return start; 5601 5602 } 5603 5604 address generate_libmTan() { 5605 StubCodeMark mark(this, "StubRoutines", "libmTan"); 5606 5607 address start = __ pc(); 5608 5609 const XMMRegister x0 = xmm0; 5610 const XMMRegister x1 = xmm1; 5611 const XMMRegister x2 = xmm2; 5612 const XMMRegister x3 = xmm3; 5613 5614 const XMMRegister x4 = xmm4; 5615 const XMMRegister x5 = xmm5; 5616 const XMMRegister x6 = xmm6; 5617 const XMMRegister x7 = xmm7; 5618 5619 const Register tmp1 = r8; 5620 const Register tmp2 = r9; 5621 const Register tmp3 = r10; 5622 const Register tmp4 = r11; 5623 5624 BLOCK_COMMENT("Entry:"); 5625 __ enter(); // required for proper stackwalking of RuntimeStub frame 5626 5627 #ifdef _WIN64 5628 __ push(rsi); 5629 __ push(rdi); 5630 #endif 5631 __ fast_tan(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 5632 5633 #ifdef _WIN64 5634 __ pop(rdi); 5635 __ pop(rsi); 5636 #endif 5637 5638 __ leave(); // required for proper stackwalking of RuntimeStub frame 5639 __ ret(0); 5640 5641 return start; 5642 5643 } 5644 5645 #undef __ 5646 #define __ masm-> 5647 5648 // Continuation point for throwing of implicit exceptions that are 5649 // not handled in the current activation. Fabricates an exception 5650 // oop and initiates normal exception dispatching in this 5651 // frame. Since we need to preserve callee-saved values (currently 5652 // only for C2, but done for C1 as well) we need a callee-saved oop 5653 // map and therefore have to make these stubs into RuntimeStubs 5654 // rather than BufferBlobs. If the compiler needs all registers to 5655 // be preserved between the fault point and the exception handler 5656 // then it must assume responsibility for that in 5657 // AbstractCompiler::continuation_for_implicit_null_exception or 5658 // continuation_for_implicit_division_by_zero_exception. All other 5659 // implicit exceptions (e.g., NullPointerException or 5660 // AbstractMethodError on entry) are either at call sites or 5661 // otherwise assume that stack unwinding will be initiated, so 5662 // caller saved registers were assumed volatile in the compiler. 5663 address generate_throw_exception(const char* name, 5664 address runtime_entry, 5665 Register arg1 = noreg, 5666 Register arg2 = noreg) { 5667 // Information about frame layout at time of blocking runtime call. 5668 // Note that we only have to preserve callee-saved registers since 5669 // the compilers are responsible for supplying a continuation point 5670 // if they expect all registers to be preserved. 5671 enum layout { 5672 rbp_off = frame::arg_reg_save_area_bytes/BytesPerInt, 5673 rbp_off2, 5674 return_off, 5675 return_off2, 5676 framesize // inclusive of return address 5677 }; 5678 5679 int insts_size = 512; 5680 int locs_size = 64; 5681 5682 CodeBuffer code(name, insts_size, locs_size); 5683 OopMapSet* oop_maps = new OopMapSet(); 5684 MacroAssembler* masm = new MacroAssembler(&code); 5685 5686 address start = __ pc(); 5687 5688 // This is an inlined and slightly modified version of call_VM 5689 // which has the ability to fetch the return PC out of 5690 // thread-local storage and also sets up last_Java_sp slightly 5691 // differently than the real call_VM 5692 5693 __ enter(); // required for proper stackwalking of RuntimeStub frame 5694 5695 assert(is_even(framesize/2), "sp not 16-byte aligned"); 5696 5697 // return address and rbp are already in place 5698 __ subptr(rsp, (framesize-4) << LogBytesPerInt); // prolog 5699 5700 int frame_complete = __ pc() - start; 5701 5702 // Set up last_Java_sp and last_Java_fp 5703 address the_pc = __ pc(); 5704 __ set_last_Java_frame(rsp, rbp, the_pc); 5705 __ andptr(rsp, -(StackAlignmentInBytes)); // Align stack 5706 5707 // Call runtime 5708 if (arg1 != noreg) { 5709 assert(arg2 != c_rarg1, "clobbered"); 5710 __ movptr(c_rarg1, arg1); 5711 } 5712 if (arg2 != noreg) { 5713 __ movptr(c_rarg2, arg2); 5714 } 5715 __ movptr(c_rarg0, r15_thread); 5716 BLOCK_COMMENT("call runtime_entry"); 5717 __ call(RuntimeAddress(runtime_entry)); 5718 5719 // Generate oop map 5720 OopMap* map = new OopMap(framesize, 0); 5721 5722 oop_maps->add_gc_map(the_pc - start, map); 5723 5724 __ reset_last_Java_frame(true); 5725 5726 __ leave(); // required for proper stackwalking of RuntimeStub frame 5727 5728 // check for pending exceptions 5729 #ifdef ASSERT 5730 Label L; 5731 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), 5732 (int32_t) NULL_WORD); 5733 __ jcc(Assembler::notEqual, L); 5734 __ should_not_reach_here(); 5735 __ bind(L); 5736 #endif // ASSERT 5737 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 5738 5739 5740 // codeBlob framesize is in words (not VMRegImpl::slot_size) 5741 RuntimeStub* stub = 5742 RuntimeStub::new_runtime_stub(name, 5743 &code, 5744 frame_complete, 5745 (framesize >> (LogBytesPerWord - LogBytesPerInt)), 5746 oop_maps, false); 5747 return stub->entry_point(); 5748 } 5749 5750 void create_control_words() { 5751 // Round to nearest, 53-bit mode, exceptions masked 5752 StubRoutines::_fpu_cntrl_wrd_std = 0x027F; 5753 // Round to zero, 53-bit mode, exception mased 5754 StubRoutines::_fpu_cntrl_wrd_trunc = 0x0D7F; 5755 // Round to nearest, 24-bit mode, exceptions masked 5756 StubRoutines::_fpu_cntrl_wrd_24 = 0x007F; 5757 // Round to nearest, 64-bit mode, exceptions masked 5758 StubRoutines::_mxcsr_std = 0x1F80; 5759 // Note: the following two constants are 80-bit values 5760 // layout is critical for correct loading by FPU. 5761 // Bias for strict fp multiply/divide 5762 StubRoutines::_fpu_subnormal_bias1[0]= 0x00000000; // 2^(-15360) == 0x03ff 8000 0000 0000 0000 5763 StubRoutines::_fpu_subnormal_bias1[1]= 0x80000000; 5764 StubRoutines::_fpu_subnormal_bias1[2]= 0x03ff; 5765 // Un-Bias for strict fp multiply/divide 5766 StubRoutines::_fpu_subnormal_bias2[0]= 0x00000000; // 2^(+15360) == 0x7bff 8000 0000 0000 0000 5767 StubRoutines::_fpu_subnormal_bias2[1]= 0x80000000; 5768 StubRoutines::_fpu_subnormal_bias2[2]= 0x7bff; 5769 } 5770 5771 // Initialization 5772 void generate_initial() { 5773 // Generates all stubs and initializes the entry points 5774 5775 // This platform-specific settings are needed by generate_call_stub() 5776 create_control_words(); 5777 5778 // entry points that exist in all platforms Note: This is code 5779 // that could be shared among different platforms - however the 5780 // benefit seems to be smaller than the disadvantage of having a 5781 // much more complicated generator structure. See also comment in 5782 // stubRoutines.hpp. 5783 5784 StubRoutines::_forward_exception_entry = generate_forward_exception(); 5785 5786 StubRoutines::_call_stub_entry = 5787 generate_call_stub(StubRoutines::_call_stub_return_address); 5788 5789 // is referenced by megamorphic call 5790 StubRoutines::_catch_exception_entry = generate_catch_exception(); 5791 5792 // atomic calls 5793 StubRoutines::_atomic_xchg_entry = generate_atomic_xchg(); 5794 StubRoutines::_atomic_xchg_long_entry = generate_atomic_xchg_long(); 5795 StubRoutines::_atomic_cmpxchg_entry = generate_atomic_cmpxchg(); 5796 StubRoutines::_atomic_cmpxchg_byte_entry = generate_atomic_cmpxchg_byte(); 5797 StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long(); 5798 StubRoutines::_atomic_add_entry = generate_atomic_add(); 5799 StubRoutines::_atomic_add_long_entry = generate_atomic_add_long(); 5800 StubRoutines::_fence_entry = generate_orderaccess_fence(); 5801 5802 // platform dependent 5803 StubRoutines::x86::_get_previous_fp_entry = generate_get_previous_fp(); 5804 StubRoutines::x86::_get_previous_sp_entry = generate_get_previous_sp(); 5805 5806 StubRoutines::x86::_verify_mxcsr_entry = generate_verify_mxcsr(); 5807 5808 // Build this early so it's available for the interpreter. 5809 StubRoutines::_throw_StackOverflowError_entry = 5810 generate_throw_exception("StackOverflowError throw_exception", 5811 CAST_FROM_FN_PTR(address, 5812 SharedRuntime:: 5813 throw_StackOverflowError)); 5814 StubRoutines::_throw_delayed_StackOverflowError_entry = 5815 generate_throw_exception("delayed StackOverflowError throw_exception", 5816 CAST_FROM_FN_PTR(address, 5817 SharedRuntime:: 5818 throw_delayed_StackOverflowError)); 5819 if (UseCRC32Intrinsics) { 5820 // set table address before stub generation which use it 5821 StubRoutines::_crc_table_adr = (address)StubRoutines::x86::_crc_table; 5822 StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32(); 5823 } 5824 5825 if (UseCRC32CIntrinsics) { 5826 bool supports_clmul = VM_Version::supports_clmul(); 5827 StubRoutines::x86::generate_CRC32C_table(supports_clmul); 5828 StubRoutines::_crc32c_table_addr = (address)StubRoutines::x86::_crc32c_table; 5829 StubRoutines::_updateBytesCRC32C = generate_updateBytesCRC32C(supports_clmul); 5830 } 5831 if (VM_Version::supports_sse2() && UseLibmIntrinsic && InlineIntrinsics) { 5832 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin) || 5833 vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos) || 5834 vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dtan)) { 5835 StubRoutines::x86::_ONEHALF_adr = (address)StubRoutines::x86::_ONEHALF; 5836 StubRoutines::x86::_P_2_adr = (address)StubRoutines::x86::_P_2; 5837 StubRoutines::x86::_SC_4_adr = (address)StubRoutines::x86::_SC_4; 5838 StubRoutines::x86::_Ctable_adr = (address)StubRoutines::x86::_Ctable; 5839 StubRoutines::x86::_SC_2_adr = (address)StubRoutines::x86::_SC_2; 5840 StubRoutines::x86::_SC_3_adr = (address)StubRoutines::x86::_SC_3; 5841 StubRoutines::x86::_SC_1_adr = (address)StubRoutines::x86::_SC_1; 5842 StubRoutines::x86::_PI_INV_TABLE_adr = (address)StubRoutines::x86::_PI_INV_TABLE; 5843 StubRoutines::x86::_PI_4_adr = (address)StubRoutines::x86::_PI_4; 5844 StubRoutines::x86::_PI32INV_adr = (address)StubRoutines::x86::_PI32INV; 5845 StubRoutines::x86::_SIGN_MASK_adr = (address)StubRoutines::x86::_SIGN_MASK; 5846 StubRoutines::x86::_P_1_adr = (address)StubRoutines::x86::_P_1; 5847 StubRoutines::x86::_P_3_adr = (address)StubRoutines::x86::_P_3; 5848 StubRoutines::x86::_NEG_ZERO_adr = (address)StubRoutines::x86::_NEG_ZERO; 5849 } 5850 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dexp)) { 5851 StubRoutines::_dexp = generate_libmExp(); 5852 } 5853 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dlog)) { 5854 StubRoutines::_dlog = generate_libmLog(); 5855 } 5856 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dlog10)) { 5857 StubRoutines::_dlog10 = generate_libmLog10(); 5858 } 5859 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dpow)) { 5860 StubRoutines::_dpow = generate_libmPow(); 5861 } 5862 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin)) { 5863 StubRoutines::_dsin = generate_libmSin(); 5864 } 5865 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos)) { 5866 StubRoutines::_dcos = generate_libmCos(); 5867 } 5868 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dtan)) { 5869 StubRoutines::_dtan = generate_libmTan(); 5870 } 5871 } 5872 } 5873 5874 void generate_all() { 5875 // Generates all stubs and initializes the entry points 5876 5877 // These entry points require SharedInfo::stack0 to be set up in 5878 // non-core builds and need to be relocatable, so they each 5879 // fabricate a RuntimeStub internally. 5880 StubRoutines::_throw_AbstractMethodError_entry = 5881 generate_throw_exception("AbstractMethodError throw_exception", 5882 CAST_FROM_FN_PTR(address, 5883 SharedRuntime:: 5884 throw_AbstractMethodError)); 5885 5886 StubRoutines::_throw_IncompatibleClassChangeError_entry = 5887 generate_throw_exception("IncompatibleClassChangeError throw_exception", 5888 CAST_FROM_FN_PTR(address, 5889 SharedRuntime:: 5890 throw_IncompatibleClassChangeError)); 5891 5892 StubRoutines::_throw_NullPointerException_at_call_entry = 5893 generate_throw_exception("NullPointerException at call throw_exception", 5894 CAST_FROM_FN_PTR(address, 5895 SharedRuntime:: 5896 throw_NullPointerException_at_call)); 5897 5898 // entry points that are platform specific 5899 StubRoutines::x86::_f2i_fixup = generate_f2i_fixup(); 5900 StubRoutines::x86::_f2l_fixup = generate_f2l_fixup(); 5901 StubRoutines::x86::_d2i_fixup = generate_d2i_fixup(); 5902 StubRoutines::x86::_d2l_fixup = generate_d2l_fixup(); 5903 5904 StubRoutines::x86::_float_sign_mask = generate_fp_mask("float_sign_mask", 0x7FFFFFFF7FFFFFFF); 5905 StubRoutines::x86::_float_sign_flip = generate_fp_mask("float_sign_flip", 0x8000000080000000); 5906 StubRoutines::x86::_double_sign_mask = generate_fp_mask("double_sign_mask", 0x7FFFFFFFFFFFFFFF); 5907 StubRoutines::x86::_double_sign_flip = generate_fp_mask("double_sign_flip", 0x8000000000000000); 5908 StubRoutines::x86::_vector_float_sign_mask = generate_vector_mask("vector_float_sign_mask", 0x7FFFFFFF7FFFFFFF); 5909 StubRoutines::x86::_vector_float_sign_flip = generate_vector_mask("vector_float_sign_flip", 0x8000000080000000); 5910 StubRoutines::x86::_vector_double_sign_mask = generate_vector_mask("vector_double_sign_mask", 0x7FFFFFFFFFFFFFFF); 5911 StubRoutines::x86::_vector_double_sign_flip = generate_vector_mask("vector_double_sign_flip", 0x8000000000000000); 5912 StubRoutines::x86::_vector_short_to_byte_mask = generate_vector_mask("vector_short_to_byte_mask", 0x00ff00ff00ff00ff); 5913 StubRoutines::x86::_vector_byte_perm_mask = generate_vector_byte_perm_mask("vector_byte_perm_mask"); 5914 StubRoutines::x86::_vector_long_sign_mask = generate_vector_mask("vector_long_sign_mask", 0x8000000000000000); 5915 5916 // support for verify_oop (must happen after universe_init) 5917 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop(); 5918 5919 // arraycopy stubs used by compilers 5920 generate_arraycopy_stubs(); 5921 5922 // don't bother generating these AES intrinsic stubs unless global flag is set 5923 if (UseAESIntrinsics) { 5924 StubRoutines::x86::_key_shuffle_mask_addr = generate_key_shuffle_mask(); // needed by the others 5925 StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock(); 5926 StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock(); 5927 StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt(); 5928 if (VM_Version::supports_vaes() && VM_Version::supports_avx512vl() && VM_Version::supports_avx512dq() ) { 5929 StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptVectorAESCrypt(); 5930 } else { 5931 StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt_Parallel(); 5932 } 5933 } 5934 if (UseAESCTRIntrinsics){ 5935 StubRoutines::x86::_counter_shuffle_mask_addr = generate_counter_shuffle_mask(); 5936 StubRoutines::_counterMode_AESCrypt = generate_counterMode_AESCrypt_Parallel(); 5937 } 5938 5939 if (UseSHA1Intrinsics) { 5940 StubRoutines::x86::_upper_word_mask_addr = generate_upper_word_mask(); 5941 StubRoutines::x86::_shuffle_byte_flip_mask_addr = generate_shuffle_byte_flip_mask(); 5942 StubRoutines::_sha1_implCompress = generate_sha1_implCompress(false, "sha1_implCompress"); 5943 StubRoutines::_sha1_implCompressMB = generate_sha1_implCompress(true, "sha1_implCompressMB"); 5944 } 5945 if (UseSHA256Intrinsics) { 5946 StubRoutines::x86::_k256_adr = (address)StubRoutines::x86::_k256; 5947 char* dst = (char*)StubRoutines::x86::_k256_W; 5948 char* src = (char*)StubRoutines::x86::_k256; 5949 for (int ii = 0; ii < 16; ++ii) { 5950 memcpy(dst + 32 * ii, src + 16 * ii, 16); 5951 memcpy(dst + 32 * ii + 16, src + 16 * ii, 16); 5952 } 5953 StubRoutines::x86::_k256_W_adr = (address)StubRoutines::x86::_k256_W; 5954 StubRoutines::x86::_pshuffle_byte_flip_mask_addr = generate_pshuffle_byte_flip_mask(); 5955 StubRoutines::_sha256_implCompress = generate_sha256_implCompress(false, "sha256_implCompress"); 5956 StubRoutines::_sha256_implCompressMB = generate_sha256_implCompress(true, "sha256_implCompressMB"); 5957 } 5958 if (UseSHA512Intrinsics) { 5959 StubRoutines::x86::_k512_W_addr = (address)StubRoutines::x86::_k512_W; 5960 StubRoutines::x86::_pshuffle_byte_flip_mask_addr_sha512 = generate_pshuffle_byte_flip_mask_sha512(); 5961 StubRoutines::_sha512_implCompress = generate_sha512_implCompress(false, "sha512_implCompress"); 5962 StubRoutines::_sha512_implCompressMB = generate_sha512_implCompress(true, "sha512_implCompressMB"); 5963 } 5964 5965 // Generate GHASH intrinsics code 5966 if (UseGHASHIntrinsics) { 5967 StubRoutines::x86::_ghash_long_swap_mask_addr = generate_ghash_long_swap_mask(); 5968 StubRoutines::x86::_ghash_byte_swap_mask_addr = generate_ghash_byte_swap_mask(); 5969 if (VM_Version::supports_avx()) { 5970 StubRoutines::x86::_ghash_shuffmask_addr = ghash_shufflemask_addr(); 5971 StubRoutines::x86::_ghash_poly_addr = ghash_polynomial_addr(); 5972 StubRoutines::_ghash_processBlocks = generate_avx_ghash_processBlocks(); 5973 } else { 5974 StubRoutines::_ghash_processBlocks = generate_ghash_processBlocks(); 5975 } 5976 } 5977 5978 if (UseBASE64Intrinsics) { 5979 StubRoutines::x86::_and_mask = base64_and_mask_addr(); 5980 StubRoutines::x86::_bswap_mask = base64_bswap_mask_addr(); 5981 StubRoutines::x86::_base64_charset = base64_charset_addr(); 5982 StubRoutines::x86::_url_charset = base64url_charset_addr(); 5983 StubRoutines::x86::_gather_mask = base64_gather_mask_addr(); 5984 StubRoutines::x86::_left_shift_mask = base64_left_shift_mask_addr(); 5985 StubRoutines::x86::_right_shift_mask = base64_right_shift_mask_addr(); 5986 StubRoutines::_base64_encodeBlock = generate_base64_encodeBlock(); 5987 } 5988 5989 // Safefetch stubs. 5990 generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry, 5991 &StubRoutines::_safefetch32_fault_pc, 5992 &StubRoutines::_safefetch32_continuation_pc); 5993 generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry, 5994 &StubRoutines::_safefetchN_fault_pc, 5995 &StubRoutines::_safefetchN_continuation_pc); 5996 5997 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod(); 5998 if (bs_nm != NULL) { 5999 StubRoutines::x86::_method_entry_barrier = generate_method_entry_barrier(); 6000 } 6001 #ifdef COMPILER2 6002 if (UseMultiplyToLenIntrinsic) { 6003 StubRoutines::_multiplyToLen = generate_multiplyToLen(); 6004 } 6005 if (UseSquareToLenIntrinsic) { 6006 StubRoutines::_squareToLen = generate_squareToLen(); 6007 } 6008 if (UseMulAddIntrinsic) { 6009 StubRoutines::_mulAdd = generate_mulAdd(); 6010 } 6011 #ifndef _WINDOWS 6012 if (UseMontgomeryMultiplyIntrinsic) { 6013 StubRoutines::_montgomeryMultiply 6014 = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_multiply); 6015 } 6016 if (UseMontgomerySquareIntrinsic) { 6017 StubRoutines::_montgomerySquare 6018 = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_square); 6019 } 6020 #endif // WINDOWS 6021 #endif // COMPILER2 6022 6023 if (UseVectorizedMismatchIntrinsic) { 6024 StubRoutines::_vectorizedMismatch = generate_vectorizedMismatch(); 6025 } 6026 } 6027 6028 public: 6029 StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) { 6030 if (all) { 6031 generate_all(); 6032 } else { 6033 generate_initial(); 6034 } 6035 } 6036 }; // end class declaration 6037 6038 void StubGenerator_generate(CodeBuffer* code, bool all) { 6039 StubGenerator g(code, all); 6040 }