1 /* 2 * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "ci/ciUtilities.hpp" 29 #include "gc/shared/barrierSet.hpp" 30 #include "gc/shared/barrierSetAssembler.hpp" 31 #include "gc/shared/barrierSetNMethod.hpp" 32 #include "interpreter/interpreter.hpp" 33 #include "memory/universe.hpp" 34 #include "nativeInst_x86.hpp" 35 #include "oops/instanceOop.hpp" 36 #include "oops/method.hpp" 37 #include "oops/objArrayKlass.hpp" 38 #include "oops/oop.inline.hpp" 39 #include "prims/methodHandles.hpp" 40 #include "runtime/frame.inline.hpp" 41 #include "runtime/handles.inline.hpp" 42 #include "runtime/sharedRuntime.hpp" 43 #include "runtime/stubCodeGenerator.hpp" 44 #include "runtime/stubRoutines.hpp" 45 #include "runtime/thread.inline.hpp" 46 #ifdef COMPILER2 47 #include "opto/runtime.hpp" 48 #endif 49 #if INCLUDE_ZGC 50 #include "gc/z/zThreadLocalData.hpp" 51 #endif 52 53 // Declaration and definition of StubGenerator (no .hpp file). 54 // For a more detailed description of the stub routine structure 55 // see the comment in stubRoutines.hpp 56 57 #define __ _masm-> 58 #define TIMES_OOP (UseCompressedOops ? Address::times_4 : Address::times_8) 59 #define a__ ((Assembler*)_masm)-> 60 61 #ifdef PRODUCT 62 #define BLOCK_COMMENT(str) /* nothing */ 63 #else 64 #define BLOCK_COMMENT(str) __ block_comment(str) 65 #endif 66 67 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 68 const int MXCSR_MASK = 0xFFC0; // Mask out any pending exceptions 69 70 // Stub Code definitions 71 72 class StubGenerator: public StubCodeGenerator { 73 private: 74 75 #ifdef PRODUCT 76 #define inc_counter_np(counter) ((void)0) 77 #else 78 void inc_counter_np_(int& counter) { 79 // This can destroy rscratch1 if counter is far from the code cache 80 __ incrementl(ExternalAddress((address)&counter)); 81 } 82 #define inc_counter_np(counter) \ 83 BLOCK_COMMENT("inc_counter " #counter); \ 84 inc_counter_np_(counter); 85 #endif 86 87 // Call stubs are used to call Java from C 88 // 89 // Linux Arguments: 90 // c_rarg0: call wrapper address address 91 // c_rarg1: result address 92 // c_rarg2: result type BasicType 93 // c_rarg3: method Method* 94 // c_rarg4: (interpreter) entry point address 95 // c_rarg5: parameters intptr_t* 96 // 16(rbp): parameter size (in words) int 97 // 24(rbp): thread Thread* 98 // 99 // [ return_from_Java ] <--- rsp 100 // [ argument word n ] 101 // ... 102 // -12 [ argument word 1 ] 103 // -11 [ saved r15 ] <--- rsp_after_call 104 // -10 [ saved r14 ] 105 // -9 [ saved r13 ] 106 // -8 [ saved r12 ] 107 // -7 [ saved rbx ] 108 // -6 [ call wrapper ] 109 // -5 [ result ] 110 // -4 [ result type ] 111 // -3 [ method ] 112 // -2 [ entry point ] 113 // -1 [ parameters ] 114 // 0 [ saved rbp ] <--- rbp 115 // 1 [ return address ] 116 // 2 [ parameter size ] 117 // 3 [ thread ] 118 // 119 // Windows Arguments: 120 // c_rarg0: call wrapper address address 121 // c_rarg1: result address 122 // c_rarg2: result type BasicType 123 // c_rarg3: method Method* 124 // 48(rbp): (interpreter) entry point address 125 // 56(rbp): parameters intptr_t* 126 // 64(rbp): parameter size (in words) int 127 // 72(rbp): thread Thread* 128 // 129 // [ return_from_Java ] <--- rsp 130 // [ argument word n ] 131 // ... 132 // -60 [ argument word 1 ] 133 // -59 [ saved xmm31 ] <--- rsp after_call 134 // [ saved xmm16-xmm30 ] (EVEX enabled, else the space is blank) 135 // -27 [ saved xmm15 ] 136 // [ saved xmm7-xmm14 ] 137 // -9 [ saved xmm6 ] (each xmm register takes 2 slots) 138 // -7 [ saved r15 ] 139 // -6 [ saved r14 ] 140 // -5 [ saved r13 ] 141 // -4 [ saved r12 ] 142 // -3 [ saved rdi ] 143 // -2 [ saved rsi ] 144 // -1 [ saved rbx ] 145 // 0 [ saved rbp ] <--- rbp 146 // 1 [ return address ] 147 // 2 [ call wrapper ] 148 // 3 [ result ] 149 // 4 [ result type ] 150 // 5 [ method ] 151 // 6 [ entry point ] 152 // 7 [ parameters ] 153 // 8 [ parameter size ] 154 // 9 [ thread ] 155 // 156 // Windows reserves the callers stack space for arguments 1-4. 157 // We spill c_rarg0-c_rarg3 to this space. 158 159 // Call stub stack layout word offsets from rbp 160 enum call_stub_layout { 161 #ifdef _WIN64 162 xmm_save_first = 6, // save from xmm6 163 xmm_save_last = 31, // to xmm31 164 xmm_save_base = -9, 165 rsp_after_call_off = xmm_save_base - 2 * (xmm_save_last - xmm_save_first), // -27 166 r15_off = -7, 167 r14_off = -6, 168 r13_off = -5, 169 r12_off = -4, 170 rdi_off = -3, 171 rsi_off = -2, 172 rbx_off = -1, 173 rbp_off = 0, 174 retaddr_off = 1, 175 call_wrapper_off = 2, 176 result_off = 3, 177 result_type_off = 4, 178 method_off = 5, 179 entry_point_off = 6, 180 parameters_off = 7, 181 parameter_size_off = 8, 182 thread_off = 9 183 #else 184 rsp_after_call_off = -12, 185 mxcsr_off = rsp_after_call_off, 186 r15_off = -11, 187 r14_off = -10, 188 r13_off = -9, 189 r12_off = -8, 190 rbx_off = -7, 191 call_wrapper_off = -6, 192 result_off = -5, 193 result_type_off = -4, 194 method_off = -3, 195 entry_point_off = -2, 196 parameters_off = -1, 197 rbp_off = 0, 198 retaddr_off = 1, 199 parameter_size_off = 2, 200 thread_off = 3 201 #endif 202 }; 203 204 #ifdef _WIN64 205 Address xmm_save(int reg) { 206 assert(reg >= xmm_save_first && reg <= xmm_save_last, "XMM register number out of range"); 207 return Address(rbp, (xmm_save_base - (reg - xmm_save_first) * 2) * wordSize); 208 } 209 #endif 210 211 address generate_call_stub(address& return_address) { 212 assert((int)frame::entry_frame_after_call_words == -(int)rsp_after_call_off + 1 && 213 (int)frame::entry_frame_call_wrapper_offset == (int)call_wrapper_off, 214 "adjust this code"); 215 StubCodeMark mark(this, "StubRoutines", "call_stub"); 216 address start = __ pc(); 217 218 // same as in generate_catch_exception()! 219 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize); 220 221 const Address call_wrapper (rbp, call_wrapper_off * wordSize); 222 const Address result (rbp, result_off * wordSize); 223 const Address result_type (rbp, result_type_off * wordSize); 224 const Address method (rbp, method_off * wordSize); 225 const Address entry_point (rbp, entry_point_off * wordSize); 226 const Address parameters (rbp, parameters_off * wordSize); 227 const Address parameter_size(rbp, parameter_size_off * wordSize); 228 229 // same as in generate_catch_exception()! 230 const Address thread (rbp, thread_off * wordSize); 231 232 const Address r15_save(rbp, r15_off * wordSize); 233 const Address r14_save(rbp, r14_off * wordSize); 234 const Address r13_save(rbp, r13_off * wordSize); 235 const Address r12_save(rbp, r12_off * wordSize); 236 const Address rbx_save(rbp, rbx_off * wordSize); 237 238 // stub code 239 __ enter(); 240 __ subptr(rsp, -rsp_after_call_off * wordSize); 241 242 // save register parameters 243 #ifndef _WIN64 244 __ movptr(parameters, c_rarg5); // parameters 245 __ movptr(entry_point, c_rarg4); // entry_point 246 #endif 247 248 __ movptr(method, c_rarg3); // method 249 __ movl(result_type, c_rarg2); // result type 250 __ movptr(result, c_rarg1); // result 251 __ movptr(call_wrapper, c_rarg0); // call wrapper 252 253 // save regs belonging to calling function 254 __ movptr(rbx_save, rbx); 255 __ movptr(r12_save, r12); 256 __ movptr(r13_save, r13); 257 __ movptr(r14_save, r14); 258 __ movptr(r15_save, r15); 259 260 #ifdef _WIN64 261 int last_reg = 15; 262 if (UseAVX > 2) { 263 last_reg = 31; 264 } 265 if (VM_Version::supports_evex()) { 266 for (int i = xmm_save_first; i <= last_reg; i++) { 267 __ vextractf32x4(xmm_save(i), as_XMMRegister(i), 0); 268 } 269 } else { 270 for (int i = xmm_save_first; i <= last_reg; i++) { 271 __ movdqu(xmm_save(i), as_XMMRegister(i)); 272 } 273 } 274 275 const Address rdi_save(rbp, rdi_off * wordSize); 276 const Address rsi_save(rbp, rsi_off * wordSize); 277 278 __ movptr(rsi_save, rsi); 279 __ movptr(rdi_save, rdi); 280 #else 281 const Address mxcsr_save(rbp, mxcsr_off * wordSize); 282 { 283 Label skip_ldmx; 284 __ stmxcsr(mxcsr_save); 285 __ movl(rax, mxcsr_save); 286 __ andl(rax, MXCSR_MASK); // Only check control and mask bits 287 ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std()); 288 __ cmp32(rax, mxcsr_std); 289 __ jcc(Assembler::equal, skip_ldmx); 290 __ ldmxcsr(mxcsr_std); 291 __ bind(skip_ldmx); 292 } 293 #endif 294 295 // Load up thread register 296 __ movptr(r15_thread, thread); 297 __ reinit_heapbase(); 298 299 #ifdef ASSERT 300 // make sure we have no pending exceptions 301 { 302 Label L; 303 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 304 __ jcc(Assembler::equal, L); 305 __ stop("StubRoutines::call_stub: entered with pending exception"); 306 __ bind(L); 307 } 308 #endif 309 310 // pass parameters if any 311 BLOCK_COMMENT("pass parameters if any"); 312 Label parameters_done; 313 __ movl(c_rarg3, parameter_size); 314 __ testl(c_rarg3, c_rarg3); 315 __ jcc(Assembler::zero, parameters_done); 316 317 Label loop; 318 __ movptr(c_rarg2, parameters); // parameter pointer 319 __ movl(c_rarg1, c_rarg3); // parameter counter is in c_rarg1 320 __ BIND(loop); 321 __ movptr(rax, Address(c_rarg2, 0));// get parameter 322 __ addptr(c_rarg2, wordSize); // advance to next parameter 323 __ decrementl(c_rarg1); // decrement counter 324 __ push(rax); // pass parameter 325 __ jcc(Assembler::notZero, loop); 326 327 // call Java function 328 __ BIND(parameters_done); 329 __ movptr(rbx, method); // get Method* 330 __ movptr(c_rarg1, entry_point); // get entry_point 331 __ mov(r13, rsp); // set sender sp 332 BLOCK_COMMENT("call Java function"); 333 __ call(c_rarg1); 334 335 BLOCK_COMMENT("call_stub_return_address:"); 336 return_address = __ pc(); 337 338 // store result depending on type (everything that is not 339 // T_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT) 340 __ movptr(c_rarg0, result); 341 Label is_long, is_float, is_double, exit; 342 __ movl(c_rarg1, result_type); 343 __ cmpl(c_rarg1, T_OBJECT); 344 __ jcc(Assembler::equal, is_long); 345 __ cmpl(c_rarg1, T_LONG); 346 __ jcc(Assembler::equal, is_long); 347 __ cmpl(c_rarg1, T_FLOAT); 348 __ jcc(Assembler::equal, is_float); 349 __ cmpl(c_rarg1, T_DOUBLE); 350 __ jcc(Assembler::equal, is_double); 351 352 // handle T_INT case 353 __ movl(Address(c_rarg0, 0), rax); 354 355 __ BIND(exit); 356 357 // pop parameters 358 __ lea(rsp, rsp_after_call); 359 360 #ifdef ASSERT 361 // verify that threads correspond 362 { 363 Label L1, L2, L3; 364 __ cmpptr(r15_thread, thread); 365 __ jcc(Assembler::equal, L1); 366 __ stop("StubRoutines::call_stub: r15_thread is corrupted"); 367 __ bind(L1); 368 __ get_thread(rbx); 369 __ cmpptr(r15_thread, thread); 370 __ jcc(Assembler::equal, L2); 371 __ stop("StubRoutines::call_stub: r15_thread is modified by call"); 372 __ bind(L2); 373 __ cmpptr(r15_thread, rbx); 374 __ jcc(Assembler::equal, L3); 375 __ stop("StubRoutines::call_stub: threads must correspond"); 376 __ bind(L3); 377 } 378 #endif 379 380 // restore regs belonging to calling function 381 #ifdef _WIN64 382 // emit the restores for xmm regs 383 if (VM_Version::supports_evex()) { 384 for (int i = xmm_save_first; i <= last_reg; i++) { 385 __ vinsertf32x4(as_XMMRegister(i), as_XMMRegister(i), xmm_save(i), 0); 386 } 387 } else { 388 for (int i = xmm_save_first; i <= last_reg; i++) { 389 __ movdqu(as_XMMRegister(i), xmm_save(i)); 390 } 391 } 392 #endif 393 __ movptr(r15, r15_save); 394 __ movptr(r14, r14_save); 395 __ movptr(r13, r13_save); 396 __ movptr(r12, r12_save); 397 __ movptr(rbx, rbx_save); 398 399 #ifdef _WIN64 400 __ movptr(rdi, rdi_save); 401 __ movptr(rsi, rsi_save); 402 #else 403 __ ldmxcsr(mxcsr_save); 404 #endif 405 406 // restore rsp 407 __ addptr(rsp, -rsp_after_call_off * wordSize); 408 409 // return 410 __ vzeroupper(); 411 __ pop(rbp); 412 __ ret(0); 413 414 // handle return types different from T_INT 415 __ BIND(is_long); 416 __ movq(Address(c_rarg0, 0), rax); 417 __ jmp(exit); 418 419 __ BIND(is_float); 420 __ movflt(Address(c_rarg0, 0), xmm0); 421 __ jmp(exit); 422 423 __ BIND(is_double); 424 __ movdbl(Address(c_rarg0, 0), xmm0); 425 __ jmp(exit); 426 427 return start; 428 } 429 430 // Return point for a Java call if there's an exception thrown in 431 // Java code. The exception is caught and transformed into a 432 // pending exception stored in JavaThread that can be tested from 433 // within the VM. 434 // 435 // Note: Usually the parameters are removed by the callee. In case 436 // of an exception crossing an activation frame boundary, that is 437 // not the case if the callee is compiled code => need to setup the 438 // rsp. 439 // 440 // rax: exception oop 441 442 address generate_catch_exception() { 443 StubCodeMark mark(this, "StubRoutines", "catch_exception"); 444 address start = __ pc(); 445 446 // same as in generate_call_stub(): 447 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize); 448 const Address thread (rbp, thread_off * wordSize); 449 450 #ifdef ASSERT 451 // verify that threads correspond 452 { 453 Label L1, L2, L3; 454 __ cmpptr(r15_thread, thread); 455 __ jcc(Assembler::equal, L1); 456 __ stop("StubRoutines::catch_exception: r15_thread is corrupted"); 457 __ bind(L1); 458 __ get_thread(rbx); 459 __ cmpptr(r15_thread, thread); 460 __ jcc(Assembler::equal, L2); 461 __ stop("StubRoutines::catch_exception: r15_thread is modified by call"); 462 __ bind(L2); 463 __ cmpptr(r15_thread, rbx); 464 __ jcc(Assembler::equal, L3); 465 __ stop("StubRoutines::catch_exception: threads must correspond"); 466 __ bind(L3); 467 } 468 #endif 469 470 // set pending exception 471 __ verify_oop(rax); 472 473 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), rax); 474 __ lea(rscratch1, ExternalAddress((address)__FILE__)); 475 __ movptr(Address(r15_thread, Thread::exception_file_offset()), rscratch1); 476 __ movl(Address(r15_thread, Thread::exception_line_offset()), (int) __LINE__); 477 478 // complete return to VM 479 assert(StubRoutines::_call_stub_return_address != NULL, 480 "_call_stub_return_address must have been generated before"); 481 __ jump(RuntimeAddress(StubRoutines::_call_stub_return_address)); 482 483 return start; 484 } 485 486 // Continuation point for runtime calls returning with a pending 487 // exception. The pending exception check happened in the runtime 488 // or native call stub. The pending exception in Thread is 489 // converted into a Java-level exception. 490 // 491 // Contract with Java-level exception handlers: 492 // rax: exception 493 // rdx: throwing pc 494 // 495 // NOTE: At entry of this stub, exception-pc must be on stack !! 496 497 address generate_forward_exception() { 498 StubCodeMark mark(this, "StubRoutines", "forward exception"); 499 address start = __ pc(); 500 501 // Upon entry, the sp points to the return address returning into 502 // Java (interpreted or compiled) code; i.e., the return address 503 // becomes the throwing pc. 504 // 505 // Arguments pushed before the runtime call are still on the stack 506 // but the exception handler will reset the stack pointer -> 507 // ignore them. A potential result in registers can be ignored as 508 // well. 509 510 #ifdef ASSERT 511 // make sure this code is only executed if there is a pending exception 512 { 513 Label L; 514 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t) NULL); 515 __ jcc(Assembler::notEqual, L); 516 __ stop("StubRoutines::forward exception: no pending exception (1)"); 517 __ bind(L); 518 } 519 #endif 520 521 // compute exception handler into rbx 522 __ movptr(c_rarg0, Address(rsp, 0)); 523 BLOCK_COMMENT("call exception_handler_for_return_address"); 524 __ call_VM_leaf(CAST_FROM_FN_PTR(address, 525 SharedRuntime::exception_handler_for_return_address), 526 r15_thread, c_rarg0); 527 __ mov(rbx, rax); 528 529 // setup rax & rdx, remove return address & clear pending exception 530 __ pop(rdx); 531 __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset())); 532 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 533 534 #ifdef ASSERT 535 // make sure exception is set 536 { 537 Label L; 538 __ testptr(rax, rax); 539 __ jcc(Assembler::notEqual, L); 540 __ stop("StubRoutines::forward exception: no pending exception (2)"); 541 __ bind(L); 542 } 543 #endif 544 545 // continue at exception handler (return address removed) 546 // rax: exception 547 // rbx: exception handler 548 // rdx: throwing pc 549 __ verify_oop(rax); 550 __ jmp(rbx); 551 552 return start; 553 } 554 555 // Support for jint atomic::xchg(jint exchange_value, volatile jint* dest) 556 // 557 // Arguments : 558 // c_rarg0: exchange_value 559 // c_rarg0: dest 560 // 561 // Result: 562 // *dest <- ex, return (orig *dest) 563 address generate_atomic_xchg() { 564 StubCodeMark mark(this, "StubRoutines", "atomic_xchg"); 565 address start = __ pc(); 566 567 __ movl(rax, c_rarg0); // Copy to eax we need a return value anyhow 568 __ xchgl(rax, Address(c_rarg1, 0)); // automatic LOCK 569 __ ret(0); 570 571 return start; 572 } 573 574 // Support for intptr_t atomic::xchg_long(jlong exchange_value, volatile jlong* dest) 575 // 576 // Arguments : 577 // c_rarg0: exchange_value 578 // c_rarg1: dest 579 // 580 // Result: 581 // *dest <- ex, return (orig *dest) 582 address generate_atomic_xchg_long() { 583 StubCodeMark mark(this, "StubRoutines", "atomic_xchg_long"); 584 address start = __ pc(); 585 586 __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow 587 __ xchgptr(rax, Address(c_rarg1, 0)); // automatic LOCK 588 __ ret(0); 589 590 return start; 591 } 592 593 // Support for jint atomic::atomic_cmpxchg(jint exchange_value, volatile jint* dest, 594 // jint compare_value) 595 // 596 // Arguments : 597 // c_rarg0: exchange_value 598 // c_rarg1: dest 599 // c_rarg2: compare_value 600 // 601 // Result: 602 // if ( compare_value == *dest ) { 603 // *dest = exchange_value 604 // return compare_value; 605 // else 606 // return *dest; 607 address generate_atomic_cmpxchg() { 608 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg"); 609 address start = __ pc(); 610 611 __ movl(rax, c_rarg2); 612 __ lock(); 613 __ cmpxchgl(c_rarg0, Address(c_rarg1, 0)); 614 __ ret(0); 615 616 return start; 617 } 618 619 // Support for int8_t atomic::atomic_cmpxchg(int8_t exchange_value, volatile int8_t* dest, 620 // int8_t compare_value) 621 // 622 // Arguments : 623 // c_rarg0: exchange_value 624 // c_rarg1: dest 625 // c_rarg2: compare_value 626 // 627 // Result: 628 // if ( compare_value == *dest ) { 629 // *dest = exchange_value 630 // return compare_value; 631 // else 632 // return *dest; 633 address generate_atomic_cmpxchg_byte() { 634 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_byte"); 635 address start = __ pc(); 636 637 __ movsbq(rax, c_rarg2); 638 __ lock(); 639 __ cmpxchgb(c_rarg0, Address(c_rarg1, 0)); 640 __ ret(0); 641 642 return start; 643 } 644 645 // Support for int64_t atomic::atomic_cmpxchg(int64_t exchange_value, 646 // volatile int64_t* dest, 647 // int64_t compare_value) 648 // Arguments : 649 // c_rarg0: exchange_value 650 // c_rarg1: dest 651 // c_rarg2: compare_value 652 // 653 // Result: 654 // if ( compare_value == *dest ) { 655 // *dest = exchange_value 656 // return compare_value; 657 // else 658 // return *dest; 659 address generate_atomic_cmpxchg_long() { 660 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long"); 661 address start = __ pc(); 662 663 __ movq(rax, c_rarg2); 664 __ lock(); 665 __ cmpxchgq(c_rarg0, Address(c_rarg1, 0)); 666 __ ret(0); 667 668 return start; 669 } 670 671 // Support for jint atomic::add(jint add_value, volatile jint* dest) 672 // 673 // Arguments : 674 // c_rarg0: add_value 675 // c_rarg1: dest 676 // 677 // Result: 678 // *dest += add_value 679 // return *dest; 680 address generate_atomic_add() { 681 StubCodeMark mark(this, "StubRoutines", "atomic_add"); 682 address start = __ pc(); 683 684 __ movl(rax, c_rarg0); 685 __ lock(); 686 __ xaddl(Address(c_rarg1, 0), c_rarg0); 687 __ addl(rax, c_rarg0); 688 __ ret(0); 689 690 return start; 691 } 692 693 // Support for intptr_t atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) 694 // 695 // Arguments : 696 // c_rarg0: add_value 697 // c_rarg1: dest 698 // 699 // Result: 700 // *dest += add_value 701 // return *dest; 702 address generate_atomic_add_long() { 703 StubCodeMark mark(this, "StubRoutines", "atomic_add_long"); 704 address start = __ pc(); 705 706 __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow 707 __ lock(); 708 __ xaddptr(Address(c_rarg1, 0), c_rarg0); 709 __ addptr(rax, c_rarg0); 710 __ ret(0); 711 712 return start; 713 } 714 715 // Support for intptr_t OrderAccess::fence() 716 // 717 // Arguments : 718 // 719 // Result: 720 address generate_orderaccess_fence() { 721 StubCodeMark mark(this, "StubRoutines", "orderaccess_fence"); 722 address start = __ pc(); 723 __ membar(Assembler::StoreLoad); 724 __ ret(0); 725 726 return start; 727 } 728 729 // Support for intptr_t get_previous_fp() 730 // 731 // This routine is used to find the previous frame pointer for the 732 // caller (current_frame_guess). This is used as part of debugging 733 // ps() is seemingly lost trying to find frames. 734 // This code assumes that caller current_frame_guess) has a frame. 735 address generate_get_previous_fp() { 736 StubCodeMark mark(this, "StubRoutines", "get_previous_fp"); 737 const Address old_fp(rbp, 0); 738 const Address older_fp(rax, 0); 739 address start = __ pc(); 740 741 __ enter(); 742 __ movptr(rax, old_fp); // callers fp 743 __ movptr(rax, older_fp); // the frame for ps() 744 __ pop(rbp); 745 __ ret(0); 746 747 return start; 748 } 749 750 // Support for intptr_t get_previous_sp() 751 // 752 // This routine is used to find the previous stack pointer for the 753 // caller. 754 address generate_get_previous_sp() { 755 StubCodeMark mark(this, "StubRoutines", "get_previous_sp"); 756 address start = __ pc(); 757 758 __ movptr(rax, rsp); 759 __ addptr(rax, 8); // return address is at the top of the stack. 760 __ ret(0); 761 762 return start; 763 } 764 765 //---------------------------------------------------------------------------------------------------- 766 // Support for void verify_mxcsr() 767 // 768 // This routine is used with -Xcheck:jni to verify that native 769 // JNI code does not return to Java code without restoring the 770 // MXCSR register to our expected state. 771 772 address generate_verify_mxcsr() { 773 StubCodeMark mark(this, "StubRoutines", "verify_mxcsr"); 774 address start = __ pc(); 775 776 const Address mxcsr_save(rsp, 0); 777 778 if (CheckJNICalls) { 779 Label ok_ret; 780 ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std()); 781 __ push(rax); 782 __ subptr(rsp, wordSize); // allocate a temp location 783 __ stmxcsr(mxcsr_save); 784 __ movl(rax, mxcsr_save); 785 __ andl(rax, MXCSR_MASK); // Only check control and mask bits 786 __ cmp32(rax, mxcsr_std); 787 __ jcc(Assembler::equal, ok_ret); 788 789 __ warn("MXCSR changed by native JNI code, use -XX:+RestoreMXCSROnJNICall"); 790 791 __ ldmxcsr(mxcsr_std); 792 793 __ bind(ok_ret); 794 __ addptr(rsp, wordSize); 795 __ pop(rax); 796 } 797 798 __ ret(0); 799 800 return start; 801 } 802 803 address generate_f2i_fixup() { 804 StubCodeMark mark(this, "StubRoutines", "f2i_fixup"); 805 Address inout(rsp, 5 * wordSize); // return address + 4 saves 806 807 address start = __ pc(); 808 809 Label L; 810 811 __ push(rax); 812 __ push(c_rarg3); 813 __ push(c_rarg2); 814 __ push(c_rarg1); 815 816 __ movl(rax, 0x7f800000); 817 __ xorl(c_rarg3, c_rarg3); 818 __ movl(c_rarg2, inout); 819 __ movl(c_rarg1, c_rarg2); 820 __ andl(c_rarg1, 0x7fffffff); 821 __ cmpl(rax, c_rarg1); // NaN? -> 0 822 __ jcc(Assembler::negative, L); 823 __ testl(c_rarg2, c_rarg2); // signed ? min_jint : max_jint 824 __ movl(c_rarg3, 0x80000000); 825 __ movl(rax, 0x7fffffff); 826 __ cmovl(Assembler::positive, c_rarg3, rax); 827 828 __ bind(L); 829 __ movptr(inout, c_rarg3); 830 831 __ pop(c_rarg1); 832 __ pop(c_rarg2); 833 __ pop(c_rarg3); 834 __ pop(rax); 835 836 __ ret(0); 837 838 return start; 839 } 840 841 address generate_f2l_fixup() { 842 StubCodeMark mark(this, "StubRoutines", "f2l_fixup"); 843 Address inout(rsp, 5 * wordSize); // return address + 4 saves 844 address start = __ pc(); 845 846 Label L; 847 848 __ push(rax); 849 __ push(c_rarg3); 850 __ push(c_rarg2); 851 __ push(c_rarg1); 852 853 __ movl(rax, 0x7f800000); 854 __ xorl(c_rarg3, c_rarg3); 855 __ movl(c_rarg2, inout); 856 __ movl(c_rarg1, c_rarg2); 857 __ andl(c_rarg1, 0x7fffffff); 858 __ cmpl(rax, c_rarg1); // NaN? -> 0 859 __ jcc(Assembler::negative, L); 860 __ testl(c_rarg2, c_rarg2); // signed ? min_jlong : max_jlong 861 __ mov64(c_rarg3, 0x8000000000000000); 862 __ mov64(rax, 0x7fffffffffffffff); 863 __ cmov(Assembler::positive, c_rarg3, rax); 864 865 __ bind(L); 866 __ movptr(inout, c_rarg3); 867 868 __ pop(c_rarg1); 869 __ pop(c_rarg2); 870 __ pop(c_rarg3); 871 __ pop(rax); 872 873 __ ret(0); 874 875 return start; 876 } 877 878 address generate_d2i_fixup() { 879 StubCodeMark mark(this, "StubRoutines", "d2i_fixup"); 880 Address inout(rsp, 6 * wordSize); // return address + 5 saves 881 882 address start = __ pc(); 883 884 Label L; 885 886 __ push(rax); 887 __ push(c_rarg3); 888 __ push(c_rarg2); 889 __ push(c_rarg1); 890 __ push(c_rarg0); 891 892 __ movl(rax, 0x7ff00000); 893 __ movq(c_rarg2, inout); 894 __ movl(c_rarg3, c_rarg2); 895 __ mov(c_rarg1, c_rarg2); 896 __ mov(c_rarg0, c_rarg2); 897 __ negl(c_rarg3); 898 __ shrptr(c_rarg1, 0x20); 899 __ orl(c_rarg3, c_rarg2); 900 __ andl(c_rarg1, 0x7fffffff); 901 __ xorl(c_rarg2, c_rarg2); 902 __ shrl(c_rarg3, 0x1f); 903 __ orl(c_rarg1, c_rarg3); 904 __ cmpl(rax, c_rarg1); 905 __ jcc(Assembler::negative, L); // NaN -> 0 906 __ testptr(c_rarg0, c_rarg0); // signed ? min_jint : max_jint 907 __ movl(c_rarg2, 0x80000000); 908 __ movl(rax, 0x7fffffff); 909 __ cmov(Assembler::positive, c_rarg2, rax); 910 911 __ bind(L); 912 __ movptr(inout, c_rarg2); 913 914 __ pop(c_rarg0); 915 __ pop(c_rarg1); 916 __ pop(c_rarg2); 917 __ pop(c_rarg3); 918 __ pop(rax); 919 920 __ ret(0); 921 922 return start; 923 } 924 925 address generate_d2l_fixup() { 926 StubCodeMark mark(this, "StubRoutines", "d2l_fixup"); 927 Address inout(rsp, 6 * wordSize); // return address + 5 saves 928 929 address start = __ pc(); 930 931 Label L; 932 933 __ push(rax); 934 __ push(c_rarg3); 935 __ push(c_rarg2); 936 __ push(c_rarg1); 937 __ push(c_rarg0); 938 939 __ movl(rax, 0x7ff00000); 940 __ movq(c_rarg2, inout); 941 __ movl(c_rarg3, c_rarg2); 942 __ mov(c_rarg1, c_rarg2); 943 __ mov(c_rarg0, c_rarg2); 944 __ negl(c_rarg3); 945 __ shrptr(c_rarg1, 0x20); 946 __ orl(c_rarg3, c_rarg2); 947 __ andl(c_rarg1, 0x7fffffff); 948 __ xorl(c_rarg2, c_rarg2); 949 __ shrl(c_rarg3, 0x1f); 950 __ orl(c_rarg1, c_rarg3); 951 __ cmpl(rax, c_rarg1); 952 __ jcc(Assembler::negative, L); // NaN -> 0 953 __ testq(c_rarg0, c_rarg0); // signed ? min_jlong : max_jlong 954 __ mov64(c_rarg2, 0x8000000000000000); 955 __ mov64(rax, 0x7fffffffffffffff); 956 __ cmovq(Assembler::positive, c_rarg2, rax); 957 958 __ bind(L); 959 __ movq(inout, c_rarg2); 960 961 __ pop(c_rarg0); 962 __ pop(c_rarg1); 963 __ pop(c_rarg2); 964 __ pop(c_rarg3); 965 __ pop(rax); 966 967 __ ret(0); 968 969 return start; 970 } 971 972 address generate_fp_mask(const char *stub_name, int64_t mask) { 973 __ align(CodeEntryAlignment); 974 StubCodeMark mark(this, "StubRoutines", stub_name); 975 address start = __ pc(); 976 977 __ emit_data64( mask, relocInfo::none ); 978 __ emit_data64( mask, relocInfo::none ); 979 980 return start; 981 } 982 983 address generate_vector_mask(const char *stub_name, int64_t mask) { 984 __ align(CodeEntryAlignment); 985 StubCodeMark mark(this, "StubRoutines", stub_name); 986 address start = __ pc(); 987 988 __ emit_data64(mask, relocInfo::none); 989 __ emit_data64(mask, relocInfo::none); 990 __ emit_data64(mask, relocInfo::none); 991 __ emit_data64(mask, relocInfo::none); 992 __ emit_data64(mask, relocInfo::none); 993 __ emit_data64(mask, relocInfo::none); 994 __ emit_data64(mask, relocInfo::none); 995 __ emit_data64(mask, relocInfo::none); 996 997 return start; 998 } 999 1000 address generate_vector_byte_perm_mask(const char *stub_name) { 1001 __ align(CodeEntryAlignment); 1002 StubCodeMark mark(this, "StubRoutines", stub_name); 1003 address start = __ pc(); 1004 1005 __ emit_data64(0x0000000000000001, relocInfo::none); 1006 __ emit_data64(0x0000000000000003, relocInfo::none); 1007 __ emit_data64(0x0000000000000005, relocInfo::none); 1008 __ emit_data64(0x0000000000000007, relocInfo::none); 1009 __ emit_data64(0x0000000000000000, relocInfo::none); 1010 __ emit_data64(0x0000000000000002, relocInfo::none); 1011 __ emit_data64(0x0000000000000004, relocInfo::none); 1012 __ emit_data64(0x0000000000000006, relocInfo::none); 1013 1014 return start; 1015 } 1016 1017 // Non-destructive plausibility checks for oops 1018 // 1019 // Arguments: 1020 // all args on stack! 1021 // 1022 // Stack after saving c_rarg3: 1023 // [tos + 0]: saved c_rarg3 1024 // [tos + 1]: saved c_rarg2 1025 // [tos + 2]: saved r12 (several TemplateTable methods use it) 1026 // [tos + 3]: saved flags 1027 // [tos + 4]: return address 1028 // * [tos + 5]: error message (char*) 1029 // * [tos + 6]: object to verify (oop) 1030 // * [tos + 7]: saved rax - saved by caller and bashed 1031 // * [tos + 8]: saved r10 (rscratch1) - saved by caller 1032 // * = popped on exit 1033 address generate_verify_oop() { 1034 StubCodeMark mark(this, "StubRoutines", "verify_oop"); 1035 address start = __ pc(); 1036 1037 Label exit, error; 1038 1039 __ pushf(); 1040 __ incrementl(ExternalAddress((address) StubRoutines::verify_oop_count_addr())); 1041 1042 __ push(r12); 1043 1044 // save c_rarg2 and c_rarg3 1045 __ push(c_rarg2); 1046 __ push(c_rarg3); 1047 1048 enum { 1049 // After previous pushes. 1050 oop_to_verify = 6 * wordSize, 1051 saved_rax = 7 * wordSize, 1052 saved_r10 = 8 * wordSize, 1053 1054 // Before the call to MacroAssembler::debug(), see below. 1055 return_addr = 16 * wordSize, 1056 error_msg = 17 * wordSize 1057 }; 1058 1059 // get object 1060 __ movptr(rax, Address(rsp, oop_to_verify)); 1061 1062 // make sure object is 'reasonable' 1063 __ testptr(rax, rax); 1064 __ jcc(Assembler::zero, exit); // if obj is NULL it is OK 1065 1066 #if INCLUDE_ZGC 1067 if (UseZGC) { 1068 // Check if metadata bits indicate a bad oop 1069 __ testptr(rax, Address(r15_thread, ZThreadLocalData::address_bad_mask_offset())); 1070 __ jcc(Assembler::notZero, error); 1071 } 1072 #endif 1073 1074 // Check if the oop is in the right area of memory 1075 __ movptr(c_rarg2, rax); 1076 __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_mask()); 1077 __ andptr(c_rarg2, c_rarg3); 1078 __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_bits()); 1079 __ cmpptr(c_rarg2, c_rarg3); 1080 __ jcc(Assembler::notZero, error); 1081 1082 // set r12 to heapbase for load_klass() 1083 __ reinit_heapbase(); 1084 1085 // make sure klass is 'reasonable', which is not zero. 1086 __ load_klass(rax, rax); // get klass 1087 __ testptr(rax, rax); 1088 __ jcc(Assembler::zero, error); // if klass is NULL it is broken 1089 1090 // return if everything seems ok 1091 __ bind(exit); 1092 __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back 1093 __ movptr(rscratch1, Address(rsp, saved_r10)); // get saved r10 back 1094 __ pop(c_rarg3); // restore c_rarg3 1095 __ pop(c_rarg2); // restore c_rarg2 1096 __ pop(r12); // restore r12 1097 __ popf(); // restore flags 1098 __ ret(4 * wordSize); // pop caller saved stuff 1099 1100 // handle errors 1101 __ bind(error); 1102 __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back 1103 __ movptr(rscratch1, Address(rsp, saved_r10)); // get saved r10 back 1104 __ pop(c_rarg3); // get saved c_rarg3 back 1105 __ pop(c_rarg2); // get saved c_rarg2 back 1106 __ pop(r12); // get saved r12 back 1107 __ popf(); // get saved flags off stack -- 1108 // will be ignored 1109 1110 __ pusha(); // push registers 1111 // (rip is already 1112 // already pushed) 1113 // debug(char* msg, int64_t pc, int64_t regs[]) 1114 // We've popped the registers we'd saved (c_rarg3, c_rarg2 and flags), and 1115 // pushed all the registers, so now the stack looks like: 1116 // [tos + 0] 16 saved registers 1117 // [tos + 16] return address 1118 // * [tos + 17] error message (char*) 1119 // * [tos + 18] object to verify (oop) 1120 // * [tos + 19] saved rax - saved by caller and bashed 1121 // * [tos + 20] saved r10 (rscratch1) - saved by caller 1122 // * = popped on exit 1123 1124 __ movptr(c_rarg0, Address(rsp, error_msg)); // pass address of error message 1125 __ movptr(c_rarg1, Address(rsp, return_addr)); // pass return address 1126 __ movq(c_rarg2, rsp); // pass address of regs on stack 1127 __ mov(r12, rsp); // remember rsp 1128 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1129 __ andptr(rsp, -16); // align stack as required by ABI 1130 BLOCK_COMMENT("call MacroAssembler::debug"); 1131 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64))); 1132 __ mov(rsp, r12); // restore rsp 1133 __ popa(); // pop registers (includes r12) 1134 __ ret(4 * wordSize); // pop caller saved stuff 1135 1136 return start; 1137 } 1138 1139 // 1140 // Verify that a register contains clean 32-bits positive value 1141 // (high 32-bits are 0) so it could be used in 64-bits shifts. 1142 // 1143 // Input: 1144 // Rint - 32-bits value 1145 // Rtmp - scratch 1146 // 1147 void assert_clean_int(Register Rint, Register Rtmp) { 1148 #ifdef ASSERT 1149 Label L; 1150 assert_different_registers(Rtmp, Rint); 1151 __ movslq(Rtmp, Rint); 1152 __ cmpq(Rtmp, Rint); 1153 __ jcc(Assembler::equal, L); 1154 __ stop("high 32-bits of int value are not 0"); 1155 __ bind(L); 1156 #endif 1157 } 1158 1159 // Generate overlap test for array copy stubs 1160 // 1161 // Input: 1162 // c_rarg0 - from 1163 // c_rarg1 - to 1164 // c_rarg2 - element count 1165 // 1166 // Output: 1167 // rax - &from[element count - 1] 1168 // 1169 void array_overlap_test(address no_overlap_target, Address::ScaleFactor sf) { 1170 assert(no_overlap_target != NULL, "must be generated"); 1171 array_overlap_test(no_overlap_target, NULL, sf); 1172 } 1173 void array_overlap_test(Label& L_no_overlap, Address::ScaleFactor sf) { 1174 array_overlap_test(NULL, &L_no_overlap, sf); 1175 } 1176 void array_overlap_test(address no_overlap_target, Label* NOLp, Address::ScaleFactor sf) { 1177 const Register from = c_rarg0; 1178 const Register to = c_rarg1; 1179 const Register count = c_rarg2; 1180 const Register end_from = rax; 1181 1182 __ cmpptr(to, from); 1183 __ lea(end_from, Address(from, count, sf, 0)); 1184 if (NOLp == NULL) { 1185 ExternalAddress no_overlap(no_overlap_target); 1186 __ jump_cc(Assembler::belowEqual, no_overlap); 1187 __ cmpptr(to, end_from); 1188 __ jump_cc(Assembler::aboveEqual, no_overlap); 1189 } else { 1190 __ jcc(Assembler::belowEqual, (*NOLp)); 1191 __ cmpptr(to, end_from); 1192 __ jcc(Assembler::aboveEqual, (*NOLp)); 1193 } 1194 } 1195 1196 // Shuffle first three arg regs on Windows into Linux/Solaris locations. 1197 // 1198 // Outputs: 1199 // rdi - rcx 1200 // rsi - rdx 1201 // rdx - r8 1202 // rcx - r9 1203 // 1204 // Registers r9 and r10 are used to save rdi and rsi on Windows, which latter 1205 // are non-volatile. r9 and r10 should not be used by the caller. 1206 // 1207 DEBUG_ONLY(bool regs_in_thread;) 1208 1209 void setup_arg_regs(int nargs = 3) { 1210 const Register saved_rdi = r9; 1211 const Register saved_rsi = r10; 1212 assert(nargs == 3 || nargs == 4, "else fix"); 1213 #ifdef _WIN64 1214 assert(c_rarg0 == rcx && c_rarg1 == rdx && c_rarg2 == r8 && c_rarg3 == r9, 1215 "unexpected argument registers"); 1216 if (nargs >= 4) 1217 __ mov(rax, r9); // r9 is also saved_rdi 1218 __ movptr(saved_rdi, rdi); 1219 __ movptr(saved_rsi, rsi); 1220 __ mov(rdi, rcx); // c_rarg0 1221 __ mov(rsi, rdx); // c_rarg1 1222 __ mov(rdx, r8); // c_rarg2 1223 if (nargs >= 4) 1224 __ mov(rcx, rax); // c_rarg3 (via rax) 1225 #else 1226 assert(c_rarg0 == rdi && c_rarg1 == rsi && c_rarg2 == rdx && c_rarg3 == rcx, 1227 "unexpected argument registers"); 1228 #endif 1229 DEBUG_ONLY(regs_in_thread = false;) 1230 } 1231 1232 void restore_arg_regs() { 1233 assert(!regs_in_thread, "wrong call to restore_arg_regs"); 1234 const Register saved_rdi = r9; 1235 const Register saved_rsi = r10; 1236 #ifdef _WIN64 1237 __ movptr(rdi, saved_rdi); 1238 __ movptr(rsi, saved_rsi); 1239 #endif 1240 } 1241 1242 // This is used in places where r10 is a scratch register, and can 1243 // be adapted if r9 is needed also. 1244 void setup_arg_regs_using_thread() { 1245 const Register saved_r15 = r9; 1246 #ifdef _WIN64 1247 __ mov(saved_r15, r15); // r15 is callee saved and needs to be restored 1248 __ get_thread(r15_thread); 1249 assert(c_rarg0 == rcx && c_rarg1 == rdx && c_rarg2 == r8 && c_rarg3 == r9, 1250 "unexpected argument registers"); 1251 __ movptr(Address(r15_thread, in_bytes(JavaThread::windows_saved_rdi_offset())), rdi); 1252 __ movptr(Address(r15_thread, in_bytes(JavaThread::windows_saved_rsi_offset())), rsi); 1253 1254 __ mov(rdi, rcx); // c_rarg0 1255 __ mov(rsi, rdx); // c_rarg1 1256 __ mov(rdx, r8); // c_rarg2 1257 #else 1258 assert(c_rarg0 == rdi && c_rarg1 == rsi && c_rarg2 == rdx && c_rarg3 == rcx, 1259 "unexpected argument registers"); 1260 #endif 1261 DEBUG_ONLY(regs_in_thread = true;) 1262 } 1263 1264 void restore_arg_regs_using_thread() { 1265 assert(regs_in_thread, "wrong call to restore_arg_regs"); 1266 const Register saved_r15 = r9; 1267 #ifdef _WIN64 1268 __ get_thread(r15_thread); 1269 __ movptr(rsi, Address(r15_thread, in_bytes(JavaThread::windows_saved_rsi_offset()))); 1270 __ movptr(rdi, Address(r15_thread, in_bytes(JavaThread::windows_saved_rdi_offset()))); 1271 __ mov(r15, saved_r15); // r15 is callee saved and needs to be restored 1272 #endif 1273 } 1274 1275 // Copy big chunks forward 1276 // 1277 // Inputs: 1278 // end_from - source arrays end address 1279 // end_to - destination array end address 1280 // qword_count - 64-bits element count, negative 1281 // to - scratch 1282 // L_copy_bytes - entry label 1283 // L_copy_8_bytes - exit label 1284 // 1285 void copy_bytes_forward(Register end_from, Register end_to, 1286 Register qword_count, Register to, 1287 Label& L_copy_bytes, Label& L_copy_8_bytes) { 1288 DEBUG_ONLY(__ stop("enter at entry label, not here")); 1289 Label L_loop; 1290 __ align(OptoLoopAlignment); 1291 if (UseUnalignedLoadStores) { 1292 Label L_end; 1293 // Copy 64-bytes per iteration 1294 __ BIND(L_loop); 1295 if (UseAVX > 2) { 1296 __ evmovdqul(xmm0, Address(end_from, qword_count, Address::times_8, -56), Assembler::AVX_512bit); 1297 __ evmovdqul(Address(end_to, qword_count, Address::times_8, -56), xmm0, Assembler::AVX_512bit); 1298 } else if (UseAVX == 2) { 1299 __ vmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56)); 1300 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0); 1301 __ vmovdqu(xmm1, Address(end_from, qword_count, Address::times_8, -24)); 1302 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -24), xmm1); 1303 } else { 1304 __ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56)); 1305 __ movdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0); 1306 __ movdqu(xmm1, Address(end_from, qword_count, Address::times_8, -40)); 1307 __ movdqu(Address(end_to, qword_count, Address::times_8, -40), xmm1); 1308 __ movdqu(xmm2, Address(end_from, qword_count, Address::times_8, -24)); 1309 __ movdqu(Address(end_to, qword_count, Address::times_8, -24), xmm2); 1310 __ movdqu(xmm3, Address(end_from, qword_count, Address::times_8, - 8)); 1311 __ movdqu(Address(end_to, qword_count, Address::times_8, - 8), xmm3); 1312 } 1313 __ BIND(L_copy_bytes); 1314 __ addptr(qword_count, 8); 1315 __ jcc(Assembler::lessEqual, L_loop); 1316 __ subptr(qword_count, 4); // sub(8) and add(4) 1317 __ jccb(Assembler::greater, L_end); 1318 // Copy trailing 32 bytes 1319 if (UseAVX >= 2) { 1320 __ vmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24)); 1321 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -24), xmm0); 1322 } else { 1323 __ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24)); 1324 __ movdqu(Address(end_to, qword_count, Address::times_8, -24), xmm0); 1325 __ movdqu(xmm1, Address(end_from, qword_count, Address::times_8, - 8)); 1326 __ movdqu(Address(end_to, qword_count, Address::times_8, - 8), xmm1); 1327 } 1328 __ addptr(qword_count, 4); 1329 __ BIND(L_end); 1330 if (UseAVX >= 2) { 1331 // clean upper bits of YMM registers 1332 __ vpxor(xmm0, xmm0); 1333 __ vpxor(xmm1, xmm1); 1334 } 1335 } else { 1336 // Copy 32-bytes per iteration 1337 __ BIND(L_loop); 1338 __ movq(to, Address(end_from, qword_count, Address::times_8, -24)); 1339 __ movq(Address(end_to, qword_count, Address::times_8, -24), to); 1340 __ movq(to, Address(end_from, qword_count, Address::times_8, -16)); 1341 __ movq(Address(end_to, qword_count, Address::times_8, -16), to); 1342 __ movq(to, Address(end_from, qword_count, Address::times_8, - 8)); 1343 __ movq(Address(end_to, qword_count, Address::times_8, - 8), to); 1344 __ movq(to, Address(end_from, qword_count, Address::times_8, - 0)); 1345 __ movq(Address(end_to, qword_count, Address::times_8, - 0), to); 1346 1347 __ BIND(L_copy_bytes); 1348 __ addptr(qword_count, 4); 1349 __ jcc(Assembler::lessEqual, L_loop); 1350 } 1351 __ subptr(qword_count, 4); 1352 __ jcc(Assembler::less, L_copy_8_bytes); // Copy trailing qwords 1353 } 1354 1355 // Copy big chunks backward 1356 // 1357 // Inputs: 1358 // from - source arrays address 1359 // dest - destination array address 1360 // qword_count - 64-bits element count 1361 // to - scratch 1362 // L_copy_bytes - entry label 1363 // L_copy_8_bytes - exit label 1364 // 1365 void copy_bytes_backward(Register from, Register dest, 1366 Register qword_count, Register to, 1367 Label& L_copy_bytes, Label& L_copy_8_bytes) { 1368 DEBUG_ONLY(__ stop("enter at entry label, not here")); 1369 Label L_loop; 1370 __ align(OptoLoopAlignment); 1371 if (UseUnalignedLoadStores) { 1372 Label L_end; 1373 // Copy 64-bytes per iteration 1374 __ BIND(L_loop); 1375 if (UseAVX > 2) { 1376 __ evmovdqul(xmm0, Address(from, qword_count, Address::times_8, 0), Assembler::AVX_512bit); 1377 __ evmovdqul(Address(dest, qword_count, Address::times_8, 0), xmm0, Assembler::AVX_512bit); 1378 } else if (UseAVX == 2) { 1379 __ vmovdqu(xmm0, Address(from, qword_count, Address::times_8, 32)); 1380 __ vmovdqu(Address(dest, qword_count, Address::times_8, 32), xmm0); 1381 __ vmovdqu(xmm1, Address(from, qword_count, Address::times_8, 0)); 1382 __ vmovdqu(Address(dest, qword_count, Address::times_8, 0), xmm1); 1383 } else { 1384 __ movdqu(xmm0, Address(from, qword_count, Address::times_8, 48)); 1385 __ movdqu(Address(dest, qword_count, Address::times_8, 48), xmm0); 1386 __ movdqu(xmm1, Address(from, qword_count, Address::times_8, 32)); 1387 __ movdqu(Address(dest, qword_count, Address::times_8, 32), xmm1); 1388 __ movdqu(xmm2, Address(from, qword_count, Address::times_8, 16)); 1389 __ movdqu(Address(dest, qword_count, Address::times_8, 16), xmm2); 1390 __ movdqu(xmm3, Address(from, qword_count, Address::times_8, 0)); 1391 __ movdqu(Address(dest, qword_count, Address::times_8, 0), xmm3); 1392 } 1393 __ BIND(L_copy_bytes); 1394 __ subptr(qword_count, 8); 1395 __ jcc(Assembler::greaterEqual, L_loop); 1396 1397 __ addptr(qword_count, 4); // add(8) and sub(4) 1398 __ jccb(Assembler::less, L_end); 1399 // Copy trailing 32 bytes 1400 if (UseAVX >= 2) { 1401 __ vmovdqu(xmm0, Address(from, qword_count, Address::times_8, 0)); 1402 __ vmovdqu(Address(dest, qword_count, Address::times_8, 0), xmm0); 1403 } else { 1404 __ movdqu(xmm0, Address(from, qword_count, Address::times_8, 16)); 1405 __ movdqu(Address(dest, qword_count, Address::times_8, 16), xmm0); 1406 __ movdqu(xmm1, Address(from, qword_count, Address::times_8, 0)); 1407 __ movdqu(Address(dest, qword_count, Address::times_8, 0), xmm1); 1408 } 1409 __ subptr(qword_count, 4); 1410 __ BIND(L_end); 1411 if (UseAVX >= 2) { 1412 // clean upper bits of YMM registers 1413 __ vpxor(xmm0, xmm0); 1414 __ vpxor(xmm1, xmm1); 1415 } 1416 } else { 1417 // Copy 32-bytes per iteration 1418 __ BIND(L_loop); 1419 __ movq(to, Address(from, qword_count, Address::times_8, 24)); 1420 __ movq(Address(dest, qword_count, Address::times_8, 24), to); 1421 __ movq(to, Address(from, qword_count, Address::times_8, 16)); 1422 __ movq(Address(dest, qword_count, Address::times_8, 16), to); 1423 __ movq(to, Address(from, qword_count, Address::times_8, 8)); 1424 __ movq(Address(dest, qword_count, Address::times_8, 8), to); 1425 __ movq(to, Address(from, qword_count, Address::times_8, 0)); 1426 __ movq(Address(dest, qword_count, Address::times_8, 0), to); 1427 1428 __ BIND(L_copy_bytes); 1429 __ subptr(qword_count, 4); 1430 __ jcc(Assembler::greaterEqual, L_loop); 1431 } 1432 __ addptr(qword_count, 4); 1433 __ jcc(Assembler::greater, L_copy_8_bytes); // Copy trailing qwords 1434 } 1435 1436 // Arguments: 1437 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1438 // ignored 1439 // name - stub name string 1440 // 1441 // Inputs: 1442 // c_rarg0 - source array address 1443 // c_rarg1 - destination array address 1444 // c_rarg2 - element count, treated as ssize_t, can be zero 1445 // 1446 // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries, 1447 // we let the hardware handle it. The one to eight bytes within words, 1448 // dwords or qwords that span cache line boundaries will still be loaded 1449 // and stored atomically. 1450 // 1451 // Side Effects: 1452 // disjoint_byte_copy_entry is set to the no-overlap entry point 1453 // used by generate_conjoint_byte_copy(). 1454 // 1455 address generate_disjoint_byte_copy(bool aligned, address* entry, const char *name) { 1456 __ align(CodeEntryAlignment); 1457 StubCodeMark mark(this, "StubRoutines", name); 1458 address start = __ pc(); 1459 1460 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes; 1461 Label L_copy_byte, L_exit; 1462 const Register from = rdi; // source array address 1463 const Register to = rsi; // destination array address 1464 const Register count = rdx; // elements count 1465 const Register byte_count = rcx; 1466 const Register qword_count = count; 1467 const Register end_from = from; // source array end address 1468 const Register end_to = to; // destination array end address 1469 // End pointers are inclusive, and if count is not zero they point 1470 // to the last unit copied: end_to[0] := end_from[0] 1471 1472 __ enter(); // required for proper stackwalking of RuntimeStub frame 1473 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1474 1475 if (entry != NULL) { 1476 *entry = __ pc(); 1477 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1478 BLOCK_COMMENT("Entry:"); 1479 } 1480 1481 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1482 // r9 and r10 may be used to save non-volatile registers 1483 1484 { 1485 // UnsafeCopyMemory page error: continue after ucm 1486 UnsafeCopyMemoryMark ucmm(this, !aligned, true); 1487 // 'from', 'to' and 'count' are now valid 1488 __ movptr(byte_count, count); 1489 __ shrptr(count, 3); // count => qword_count 1490 1491 // Copy from low to high addresses. Use 'to' as scratch. 1492 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1493 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1494 __ negptr(qword_count); // make the count negative 1495 __ jmp(L_copy_bytes); 1496 1497 // Copy trailing qwords 1498 __ BIND(L_copy_8_bytes); 1499 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1500 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1501 __ increment(qword_count); 1502 __ jcc(Assembler::notZero, L_copy_8_bytes); 1503 1504 // Check for and copy trailing dword 1505 __ BIND(L_copy_4_bytes); 1506 __ testl(byte_count, 4); 1507 __ jccb(Assembler::zero, L_copy_2_bytes); 1508 __ movl(rax, Address(end_from, 8)); 1509 __ movl(Address(end_to, 8), rax); 1510 1511 __ addptr(end_from, 4); 1512 __ addptr(end_to, 4); 1513 1514 // Check for and copy trailing word 1515 __ BIND(L_copy_2_bytes); 1516 __ testl(byte_count, 2); 1517 __ jccb(Assembler::zero, L_copy_byte); 1518 __ movw(rax, Address(end_from, 8)); 1519 __ movw(Address(end_to, 8), rax); 1520 1521 __ addptr(end_from, 2); 1522 __ addptr(end_to, 2); 1523 1524 // Check for and copy trailing byte 1525 __ BIND(L_copy_byte); 1526 __ testl(byte_count, 1); 1527 __ jccb(Assembler::zero, L_exit); 1528 __ movb(rax, Address(end_from, 8)); 1529 __ movb(Address(end_to, 8), rax); 1530 } 1531 __ BIND(L_exit); 1532 address ucme_exit_pc = __ pc(); 1533 restore_arg_regs(); 1534 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1535 __ xorptr(rax, rax); // return 0 1536 __ vzeroupper(); 1537 __ leave(); // required for proper stackwalking of RuntimeStub frame 1538 __ ret(0); 1539 1540 { 1541 UnsafeCopyMemoryMark ucmm(this, !aligned, false, ucme_exit_pc); 1542 // Copy in multi-bytes chunks 1543 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1544 __ jmp(L_copy_4_bytes); 1545 } 1546 return start; 1547 } 1548 1549 // Arguments: 1550 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1551 // ignored 1552 // name - stub name string 1553 // 1554 // Inputs: 1555 // c_rarg0 - source array address 1556 // c_rarg1 - destination array address 1557 // c_rarg2 - element count, treated as ssize_t, can be zero 1558 // 1559 // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries, 1560 // we let the hardware handle it. The one to eight bytes within words, 1561 // dwords or qwords that span cache line boundaries will still be loaded 1562 // and stored atomically. 1563 // 1564 address generate_conjoint_byte_copy(bool aligned, address nooverlap_target, 1565 address* entry, const char *name) { 1566 __ align(CodeEntryAlignment); 1567 StubCodeMark mark(this, "StubRoutines", name); 1568 address start = __ pc(); 1569 1570 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes; 1571 const Register from = rdi; // source array address 1572 const Register to = rsi; // destination array address 1573 const Register count = rdx; // elements count 1574 const Register byte_count = rcx; 1575 const Register qword_count = count; 1576 1577 __ enter(); // required for proper stackwalking of RuntimeStub frame 1578 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1579 1580 if (entry != NULL) { 1581 *entry = __ pc(); 1582 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1583 BLOCK_COMMENT("Entry:"); 1584 } 1585 1586 array_overlap_test(nooverlap_target, Address::times_1); 1587 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1588 // r9 and r10 may be used to save non-volatile registers 1589 1590 { 1591 // UnsafeCopyMemory page error: continue after ucm 1592 UnsafeCopyMemoryMark ucmm(this, !aligned, true); 1593 // 'from', 'to' and 'count' are now valid 1594 __ movptr(byte_count, count); 1595 __ shrptr(count, 3); // count => qword_count 1596 1597 // Copy from high to low addresses. 1598 1599 // Check for and copy trailing byte 1600 __ testl(byte_count, 1); 1601 __ jcc(Assembler::zero, L_copy_2_bytes); 1602 __ movb(rax, Address(from, byte_count, Address::times_1, -1)); 1603 __ movb(Address(to, byte_count, Address::times_1, -1), rax); 1604 __ decrement(byte_count); // Adjust for possible trailing word 1605 1606 // Check for and copy trailing word 1607 __ BIND(L_copy_2_bytes); 1608 __ testl(byte_count, 2); 1609 __ jcc(Assembler::zero, L_copy_4_bytes); 1610 __ movw(rax, Address(from, byte_count, Address::times_1, -2)); 1611 __ movw(Address(to, byte_count, Address::times_1, -2), rax); 1612 1613 // Check for and copy trailing dword 1614 __ BIND(L_copy_4_bytes); 1615 __ testl(byte_count, 4); 1616 __ jcc(Assembler::zero, L_copy_bytes); 1617 __ movl(rax, Address(from, qword_count, Address::times_8)); 1618 __ movl(Address(to, qword_count, Address::times_8), rax); 1619 __ jmp(L_copy_bytes); 1620 1621 // Copy trailing qwords 1622 __ BIND(L_copy_8_bytes); 1623 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 1624 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 1625 __ decrement(qword_count); 1626 __ jcc(Assembler::notZero, L_copy_8_bytes); 1627 } 1628 restore_arg_regs(); 1629 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1630 __ xorptr(rax, rax); // return 0 1631 __ vzeroupper(); 1632 __ leave(); // required for proper stackwalking of RuntimeStub frame 1633 __ ret(0); 1634 1635 { 1636 // UnsafeCopyMemory page error: continue after ucm 1637 UnsafeCopyMemoryMark ucmm(this, !aligned, true); 1638 // Copy in multi-bytes chunks 1639 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1640 } 1641 restore_arg_regs(); 1642 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1643 __ xorptr(rax, rax); // return 0 1644 __ vzeroupper(); 1645 __ leave(); // required for proper stackwalking of RuntimeStub frame 1646 __ ret(0); 1647 1648 return start; 1649 } 1650 1651 // Arguments: 1652 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1653 // ignored 1654 // name - stub name string 1655 // 1656 // Inputs: 1657 // c_rarg0 - source array address 1658 // c_rarg1 - destination array address 1659 // c_rarg2 - element count, treated as ssize_t, can be zero 1660 // 1661 // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we 1662 // let the hardware handle it. The two or four words within dwords 1663 // or qwords that span cache line boundaries will still be loaded 1664 // and stored atomically. 1665 // 1666 // Side Effects: 1667 // disjoint_short_copy_entry is set to the no-overlap entry point 1668 // used by generate_conjoint_short_copy(). 1669 // 1670 address generate_disjoint_short_copy(bool aligned, address *entry, const char *name) { 1671 __ align(CodeEntryAlignment); 1672 StubCodeMark mark(this, "StubRoutines", name); 1673 address start = __ pc(); 1674 1675 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes,L_copy_2_bytes,L_exit; 1676 const Register from = rdi; // source array address 1677 const Register to = rsi; // destination array address 1678 const Register count = rdx; // elements count 1679 const Register word_count = rcx; 1680 const Register qword_count = count; 1681 const Register end_from = from; // source array end address 1682 const Register end_to = to; // destination array end address 1683 // End pointers are inclusive, and if count is not zero they point 1684 // to the last unit copied: end_to[0] := end_from[0] 1685 1686 __ enter(); // required for proper stackwalking of RuntimeStub frame 1687 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1688 1689 if (entry != NULL) { 1690 *entry = __ pc(); 1691 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1692 BLOCK_COMMENT("Entry:"); 1693 } 1694 1695 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1696 // r9 and r10 may be used to save non-volatile registers 1697 1698 { 1699 // UnsafeCopyMemory page error: continue after ucm 1700 UnsafeCopyMemoryMark ucmm(this, !aligned, true); 1701 // 'from', 'to' and 'count' are now valid 1702 __ movptr(word_count, count); 1703 __ shrptr(count, 2); // count => qword_count 1704 1705 // Copy from low to high addresses. Use 'to' as scratch. 1706 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1707 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1708 __ negptr(qword_count); 1709 __ jmp(L_copy_bytes); 1710 1711 // Copy trailing qwords 1712 __ BIND(L_copy_8_bytes); 1713 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1714 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1715 __ increment(qword_count); 1716 __ jcc(Assembler::notZero, L_copy_8_bytes); 1717 1718 // Original 'dest' is trashed, so we can't use it as a 1719 // base register for a possible trailing word copy 1720 1721 // Check for and copy trailing dword 1722 __ BIND(L_copy_4_bytes); 1723 __ testl(word_count, 2); 1724 __ jccb(Assembler::zero, L_copy_2_bytes); 1725 __ movl(rax, Address(end_from, 8)); 1726 __ movl(Address(end_to, 8), rax); 1727 1728 __ addptr(end_from, 4); 1729 __ addptr(end_to, 4); 1730 1731 // Check for and copy trailing word 1732 __ BIND(L_copy_2_bytes); 1733 __ testl(word_count, 1); 1734 __ jccb(Assembler::zero, L_exit); 1735 __ movw(rax, Address(end_from, 8)); 1736 __ movw(Address(end_to, 8), rax); 1737 } 1738 __ BIND(L_exit); 1739 address ucme_exit_pc = __ pc(); 1740 restore_arg_regs(); 1741 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1742 __ xorptr(rax, rax); // return 0 1743 __ vzeroupper(); 1744 __ leave(); // required for proper stackwalking of RuntimeStub frame 1745 __ ret(0); 1746 1747 { 1748 UnsafeCopyMemoryMark ucmm(this, !aligned, false, ucme_exit_pc); 1749 // Copy in multi-bytes chunks 1750 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1751 __ jmp(L_copy_4_bytes); 1752 } 1753 1754 return start; 1755 } 1756 1757 address generate_fill(BasicType t, bool aligned, const char *name) { 1758 __ align(CodeEntryAlignment); 1759 StubCodeMark mark(this, "StubRoutines", name); 1760 address start = __ pc(); 1761 1762 BLOCK_COMMENT("Entry:"); 1763 1764 const Register to = c_rarg0; // source array address 1765 const Register value = c_rarg1; // value 1766 const Register count = c_rarg2; // elements count 1767 1768 __ enter(); // required for proper stackwalking of RuntimeStub frame 1769 1770 __ generate_fill(t, aligned, to, value, count, rax, xmm0); 1771 1772 __ vzeroupper(); 1773 __ leave(); // required for proper stackwalking of RuntimeStub frame 1774 __ ret(0); 1775 return start; 1776 } 1777 1778 // Arguments: 1779 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1780 // ignored 1781 // name - stub name string 1782 // 1783 // Inputs: 1784 // c_rarg0 - source array address 1785 // c_rarg1 - destination array address 1786 // c_rarg2 - element count, treated as ssize_t, can be zero 1787 // 1788 // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we 1789 // let the hardware handle it. The two or four words within dwords 1790 // or qwords that span cache line boundaries will still be loaded 1791 // and stored atomically. 1792 // 1793 address generate_conjoint_short_copy(bool aligned, address nooverlap_target, 1794 address *entry, const char *name) { 1795 __ align(CodeEntryAlignment); 1796 StubCodeMark mark(this, "StubRoutines", name); 1797 address start = __ pc(); 1798 1799 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes; 1800 const Register from = rdi; // source array address 1801 const Register to = rsi; // destination array address 1802 const Register count = rdx; // elements count 1803 const Register word_count = rcx; 1804 const Register qword_count = count; 1805 1806 __ enter(); // required for proper stackwalking of RuntimeStub frame 1807 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1808 1809 if (entry != NULL) { 1810 *entry = __ pc(); 1811 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1812 BLOCK_COMMENT("Entry:"); 1813 } 1814 1815 array_overlap_test(nooverlap_target, Address::times_2); 1816 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1817 // r9 and r10 may be used to save non-volatile registers 1818 1819 { 1820 // UnsafeCopyMemory page error: continue after ucm 1821 UnsafeCopyMemoryMark ucmm(this, !aligned, true); 1822 // 'from', 'to' and 'count' are now valid 1823 __ movptr(word_count, count); 1824 __ shrptr(count, 2); // count => qword_count 1825 1826 // Copy from high to low addresses. Use 'to' as scratch. 1827 1828 // Check for and copy trailing word 1829 __ testl(word_count, 1); 1830 __ jccb(Assembler::zero, L_copy_4_bytes); 1831 __ movw(rax, Address(from, word_count, Address::times_2, -2)); 1832 __ movw(Address(to, word_count, Address::times_2, -2), rax); 1833 1834 // Check for and copy trailing dword 1835 __ BIND(L_copy_4_bytes); 1836 __ testl(word_count, 2); 1837 __ jcc(Assembler::zero, L_copy_bytes); 1838 __ movl(rax, Address(from, qword_count, Address::times_8)); 1839 __ movl(Address(to, qword_count, Address::times_8), rax); 1840 __ jmp(L_copy_bytes); 1841 1842 // Copy trailing qwords 1843 __ BIND(L_copy_8_bytes); 1844 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 1845 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 1846 __ decrement(qword_count); 1847 __ jcc(Assembler::notZero, L_copy_8_bytes); 1848 } 1849 restore_arg_regs(); 1850 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1851 __ xorptr(rax, rax); // return 0 1852 __ vzeroupper(); 1853 __ leave(); // required for proper stackwalking of RuntimeStub frame 1854 __ ret(0); 1855 1856 { 1857 // UnsafeCopyMemory page error: continue after ucm 1858 UnsafeCopyMemoryMark ucmm(this, !aligned, true); 1859 // Copy in multi-bytes chunks 1860 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1861 } 1862 restore_arg_regs(); 1863 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1864 __ xorptr(rax, rax); // return 0 1865 __ vzeroupper(); 1866 __ leave(); // required for proper stackwalking of RuntimeStub frame 1867 __ ret(0); 1868 1869 return start; 1870 } 1871 1872 // Arguments: 1873 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1874 // ignored 1875 // is_oop - true => oop array, so generate store check code 1876 // name - stub name string 1877 // 1878 // Inputs: 1879 // c_rarg0 - source array address 1880 // c_rarg1 - destination array address 1881 // c_rarg2 - element count, treated as ssize_t, can be zero 1882 // 1883 // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let 1884 // the hardware handle it. The two dwords within qwords that span 1885 // cache line boundaries will still be loaded and stored atomicly. 1886 // 1887 // Side Effects: 1888 // disjoint_int_copy_entry is set to the no-overlap entry point 1889 // used by generate_conjoint_int_oop_copy(). 1890 // 1891 address generate_disjoint_int_oop_copy(bool aligned, bool is_oop, address* entry, 1892 const char *name, bool dest_uninitialized = false) { 1893 __ align(CodeEntryAlignment); 1894 StubCodeMark mark(this, "StubRoutines", name); 1895 address start = __ pc(); 1896 1897 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_exit; 1898 const Register from = rdi; // source array address 1899 const Register to = rsi; // destination array address 1900 const Register count = rdx; // elements count 1901 const Register dword_count = rcx; 1902 const Register qword_count = count; 1903 const Register end_from = from; // source array end address 1904 const Register end_to = to; // destination array end address 1905 // End pointers are inclusive, and if count is not zero they point 1906 // to the last unit copied: end_to[0] := end_from[0] 1907 1908 __ enter(); // required for proper stackwalking of RuntimeStub frame 1909 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1910 1911 if (entry != NULL) { 1912 *entry = __ pc(); 1913 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1914 BLOCK_COMMENT("Entry:"); 1915 } 1916 1917 setup_arg_regs_using_thread(); // from => rdi, to => rsi, count => rdx 1918 // r9 is used to save r15_thread 1919 1920 DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_DISJOINT; 1921 if (dest_uninitialized) { 1922 decorators |= IS_DEST_UNINITIALIZED; 1923 } 1924 if (aligned) { 1925 decorators |= ARRAYCOPY_ALIGNED; 1926 } 1927 1928 BasicType type = is_oop ? T_OBJECT : T_INT; 1929 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 1930 bs->arraycopy_prologue(_masm, decorators, type, from, to, count); 1931 1932 { 1933 // UnsafeCopyMemory page error: continue after ucm 1934 UnsafeCopyMemoryMark ucmm(this, !is_oop && !aligned, true); 1935 // 'from', 'to' and 'count' are now valid 1936 __ movptr(dword_count, count); 1937 __ shrptr(count, 1); // count => qword_count 1938 1939 // Copy from low to high addresses. Use 'to' as scratch. 1940 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1941 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1942 __ negptr(qword_count); 1943 __ jmp(L_copy_bytes); 1944 1945 // Copy trailing qwords 1946 __ BIND(L_copy_8_bytes); 1947 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1948 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1949 __ increment(qword_count); 1950 __ jcc(Assembler::notZero, L_copy_8_bytes); 1951 1952 // Check for and copy trailing dword 1953 __ BIND(L_copy_4_bytes); 1954 __ testl(dword_count, 1); // Only byte test since the value is 0 or 1 1955 __ jccb(Assembler::zero, L_exit); 1956 __ movl(rax, Address(end_from, 8)); 1957 __ movl(Address(end_to, 8), rax); 1958 } 1959 __ BIND(L_exit); 1960 address ucme_exit_pc = __ pc(); 1961 bs->arraycopy_epilogue(_masm, decorators, type, from, to, dword_count); 1962 restore_arg_regs_using_thread(); 1963 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 1964 __ vzeroupper(); 1965 __ xorptr(rax, rax); // return 0 1966 __ leave(); // required for proper stackwalking of RuntimeStub frame 1967 __ ret(0); 1968 1969 { 1970 UnsafeCopyMemoryMark ucmm(this, !is_oop && !aligned, false, ucme_exit_pc); 1971 // Copy in multi-bytes chunks 1972 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1973 __ jmp(L_copy_4_bytes); 1974 } 1975 1976 return start; 1977 } 1978 1979 // Arguments: 1980 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1981 // ignored 1982 // is_oop - true => oop array, so generate store check code 1983 // name - stub name string 1984 // 1985 // Inputs: 1986 // c_rarg0 - source array address 1987 // c_rarg1 - destination array address 1988 // c_rarg2 - element count, treated as ssize_t, can be zero 1989 // 1990 // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let 1991 // the hardware handle it. The two dwords within qwords that span 1992 // cache line boundaries will still be loaded and stored atomicly. 1993 // 1994 address generate_conjoint_int_oop_copy(bool aligned, bool is_oop, address nooverlap_target, 1995 address *entry, const char *name, 1996 bool dest_uninitialized = false) { 1997 __ align(CodeEntryAlignment); 1998 StubCodeMark mark(this, "StubRoutines", name); 1999 address start = __ pc(); 2000 2001 Label L_copy_bytes, L_copy_8_bytes, L_exit; 2002 const Register from = rdi; // source array address 2003 const Register to = rsi; // destination array address 2004 const Register count = rdx; // elements count 2005 const Register dword_count = rcx; 2006 const Register qword_count = count; 2007 2008 __ enter(); // required for proper stackwalking of RuntimeStub frame 2009 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 2010 2011 if (entry != NULL) { 2012 *entry = __ pc(); 2013 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2014 BLOCK_COMMENT("Entry:"); 2015 } 2016 2017 array_overlap_test(nooverlap_target, Address::times_4); 2018 setup_arg_regs_using_thread(); // from => rdi, to => rsi, count => rdx 2019 // r9 is used to save r15_thread 2020 2021 DecoratorSet decorators = IN_HEAP | IS_ARRAY; 2022 if (dest_uninitialized) { 2023 decorators |= IS_DEST_UNINITIALIZED; 2024 } 2025 if (aligned) { 2026 decorators |= ARRAYCOPY_ALIGNED; 2027 } 2028 2029 BasicType type = is_oop ? T_OBJECT : T_INT; 2030 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 2031 // no registers are destroyed by this call 2032 bs->arraycopy_prologue(_masm, decorators, type, from, to, count); 2033 2034 assert_clean_int(count, rax); // Make sure 'count' is clean int. 2035 { 2036 // UnsafeCopyMemory page error: continue after ucm 2037 UnsafeCopyMemoryMark ucmm(this, !is_oop && !aligned, true); 2038 // 'from', 'to' and 'count' are now valid 2039 __ movptr(dword_count, count); 2040 __ shrptr(count, 1); // count => qword_count 2041 2042 // Copy from high to low addresses. Use 'to' as scratch. 2043 2044 // Check for and copy trailing dword 2045 __ testl(dword_count, 1); 2046 __ jcc(Assembler::zero, L_copy_bytes); 2047 __ movl(rax, Address(from, dword_count, Address::times_4, -4)); 2048 __ movl(Address(to, dword_count, Address::times_4, -4), rax); 2049 __ jmp(L_copy_bytes); 2050 2051 // Copy trailing qwords 2052 __ BIND(L_copy_8_bytes); 2053 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 2054 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 2055 __ decrement(qword_count); 2056 __ jcc(Assembler::notZero, L_copy_8_bytes); 2057 } 2058 if (is_oop) { 2059 __ jmp(L_exit); 2060 } 2061 restore_arg_regs_using_thread(); 2062 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 2063 __ xorptr(rax, rax); // return 0 2064 __ vzeroupper(); 2065 __ leave(); // required for proper stackwalking of RuntimeStub frame 2066 __ ret(0); 2067 2068 { 2069 // UnsafeCopyMemory page error: continue after ucm 2070 UnsafeCopyMemoryMark ucmm(this, !is_oop && !aligned, true); 2071 // Copy in multi-bytes chunks 2072 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2073 } 2074 2075 __ BIND(L_exit); 2076 bs->arraycopy_epilogue(_masm, decorators, type, from, to, dword_count); 2077 restore_arg_regs_using_thread(); 2078 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 2079 __ xorptr(rax, rax); // return 0 2080 __ vzeroupper(); 2081 __ leave(); // required for proper stackwalking of RuntimeStub frame 2082 __ ret(0); 2083 2084 return start; 2085 } 2086 2087 // Arguments: 2088 // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes 2089 // ignored 2090 // is_oop - true => oop array, so generate store check code 2091 // name - stub name string 2092 // 2093 // Inputs: 2094 // c_rarg0 - source array address 2095 // c_rarg1 - destination array address 2096 // c_rarg2 - element count, treated as ssize_t, can be zero 2097 // 2098 // Side Effects: 2099 // disjoint_oop_copy_entry or disjoint_long_copy_entry is set to the 2100 // no-overlap entry point used by generate_conjoint_long_oop_copy(). 2101 // 2102 address generate_disjoint_long_oop_copy(bool aligned, bool is_oop, address *entry, 2103 const char *name, bool dest_uninitialized = false) { 2104 __ align(CodeEntryAlignment); 2105 StubCodeMark mark(this, "StubRoutines", name); 2106 address start = __ pc(); 2107 2108 Label L_copy_bytes, L_copy_8_bytes, L_exit; 2109 const Register from = rdi; // source array address 2110 const Register to = rsi; // destination array address 2111 const Register qword_count = rdx; // elements count 2112 const Register end_from = from; // source array end address 2113 const Register end_to = rcx; // destination array end address 2114 const Register saved_count = r11; 2115 // End pointers are inclusive, and if count is not zero they point 2116 // to the last unit copied: end_to[0] := end_from[0] 2117 2118 __ enter(); // required for proper stackwalking of RuntimeStub frame 2119 // Save no-overlap entry point for generate_conjoint_long_oop_copy() 2120 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 2121 2122 if (entry != NULL) { 2123 *entry = __ pc(); 2124 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2125 BLOCK_COMMENT("Entry:"); 2126 } 2127 2128 setup_arg_regs_using_thread(); // from => rdi, to => rsi, count => rdx 2129 // r9 is used to save r15_thread 2130 // 'from', 'to' and 'qword_count' are now valid 2131 2132 DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_DISJOINT; 2133 if (dest_uninitialized) { 2134 decorators |= IS_DEST_UNINITIALIZED; 2135 } 2136 if (aligned) { 2137 decorators |= ARRAYCOPY_ALIGNED; 2138 } 2139 2140 BasicType type = is_oop ? T_OBJECT : T_LONG; 2141 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 2142 bs->arraycopy_prologue(_masm, decorators, type, from, to, qword_count); 2143 { 2144 // UnsafeCopyMemory page error: continue after ucm 2145 UnsafeCopyMemoryMark ucmm(this, !is_oop && !aligned, true); 2146 2147 // Copy from low to high addresses. Use 'to' as scratch. 2148 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 2149 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 2150 __ negptr(qword_count); 2151 __ jmp(L_copy_bytes); 2152 2153 // Copy trailing qwords 2154 __ BIND(L_copy_8_bytes); 2155 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 2156 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 2157 __ increment(qword_count); 2158 __ jcc(Assembler::notZero, L_copy_8_bytes); 2159 } 2160 if (is_oop) { 2161 __ jmp(L_exit); 2162 } else { 2163 restore_arg_regs_using_thread(); 2164 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2165 __ xorptr(rax, rax); // return 0 2166 __ vzeroupper(); 2167 __ leave(); // required for proper stackwalking of RuntimeStub frame 2168 __ ret(0); 2169 } 2170 2171 { 2172 // UnsafeCopyMemory page error: continue after ucm 2173 UnsafeCopyMemoryMark ucmm(this, !is_oop && !aligned, true); 2174 // Copy in multi-bytes chunks 2175 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2176 } 2177 2178 __ BIND(L_exit); 2179 bs->arraycopy_epilogue(_masm, decorators, type, from, to, qword_count); 2180 restore_arg_regs_using_thread(); 2181 if (is_oop) { 2182 inc_counter_np(SharedRuntime::_oop_array_copy_ctr); // Update counter after rscratch1 is free 2183 } else { 2184 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2185 } 2186 __ vzeroupper(); 2187 __ xorptr(rax, rax); // return 0 2188 __ leave(); // required for proper stackwalking of RuntimeStub frame 2189 __ ret(0); 2190 2191 return start; 2192 } 2193 2194 // Arguments: 2195 // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes 2196 // ignored 2197 // is_oop - true => oop array, so generate store check code 2198 // name - stub name string 2199 // 2200 // Inputs: 2201 // c_rarg0 - source array address 2202 // c_rarg1 - destination array address 2203 // c_rarg2 - element count, treated as ssize_t, can be zero 2204 // 2205 address generate_conjoint_long_oop_copy(bool aligned, bool is_oop, 2206 address nooverlap_target, address *entry, 2207 const char *name, bool dest_uninitialized = false) { 2208 __ align(CodeEntryAlignment); 2209 StubCodeMark mark(this, "StubRoutines", name); 2210 address start = __ pc(); 2211 2212 Label L_copy_bytes, L_copy_8_bytes, L_exit; 2213 const Register from = rdi; // source array address 2214 const Register to = rsi; // destination array address 2215 const Register qword_count = rdx; // elements count 2216 const Register saved_count = rcx; 2217 2218 __ enter(); // required for proper stackwalking of RuntimeStub frame 2219 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 2220 2221 if (entry != NULL) { 2222 *entry = __ pc(); 2223 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2224 BLOCK_COMMENT("Entry:"); 2225 } 2226 2227 array_overlap_test(nooverlap_target, Address::times_8); 2228 setup_arg_regs_using_thread(); // from => rdi, to => rsi, count => rdx 2229 // r9 is used to save r15_thread 2230 // 'from', 'to' and 'qword_count' are now valid 2231 2232 DecoratorSet decorators = IN_HEAP | IS_ARRAY; 2233 if (dest_uninitialized) { 2234 decorators |= IS_DEST_UNINITIALIZED; 2235 } 2236 if (aligned) { 2237 decorators |= ARRAYCOPY_ALIGNED; 2238 } 2239 2240 BasicType type = is_oop ? T_OBJECT : T_LONG; 2241 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 2242 bs->arraycopy_prologue(_masm, decorators, type, from, to, qword_count); 2243 { 2244 // UnsafeCopyMemory page error: continue after ucm 2245 UnsafeCopyMemoryMark ucmm(this, !is_oop && !aligned, true); 2246 2247 __ jmp(L_copy_bytes); 2248 2249 // Copy trailing qwords 2250 __ BIND(L_copy_8_bytes); 2251 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 2252 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 2253 __ decrement(qword_count); 2254 __ jcc(Assembler::notZero, L_copy_8_bytes); 2255 } 2256 if (is_oop) { 2257 __ jmp(L_exit); 2258 } else { 2259 restore_arg_regs_using_thread(); 2260 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2261 __ xorptr(rax, rax); // return 0 2262 __ vzeroupper(); 2263 __ leave(); // required for proper stackwalking of RuntimeStub frame 2264 __ ret(0); 2265 } 2266 { 2267 // UnsafeCopyMemory page error: continue after ucm 2268 UnsafeCopyMemoryMark ucmm(this, !is_oop && !aligned, true); 2269 2270 // Copy in multi-bytes chunks 2271 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2272 } 2273 __ BIND(L_exit); 2274 bs->arraycopy_epilogue(_masm, decorators, type, from, to, qword_count); 2275 restore_arg_regs_using_thread(); 2276 if (is_oop) { 2277 inc_counter_np(SharedRuntime::_oop_array_copy_ctr); // Update counter after rscratch1 is free 2278 } else { 2279 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2280 } 2281 __ vzeroupper(); 2282 __ xorptr(rax, rax); // return 0 2283 __ leave(); // required for proper stackwalking of RuntimeStub frame 2284 __ ret(0); 2285 2286 return start; 2287 } 2288 2289 2290 // Helper for generating a dynamic type check. 2291 // Smashes no registers. 2292 void generate_type_check(Register sub_klass, 2293 Register super_check_offset, 2294 Register super_klass, 2295 Label& L_success) { 2296 assert_different_registers(sub_klass, super_check_offset, super_klass); 2297 2298 BLOCK_COMMENT("type_check:"); 2299 2300 Label L_miss; 2301 2302 __ check_klass_subtype_fast_path(sub_klass, super_klass, noreg, &L_success, &L_miss, NULL, 2303 super_check_offset); 2304 __ check_klass_subtype_slow_path(sub_klass, super_klass, noreg, noreg, &L_success, NULL); 2305 2306 // Fall through on failure! 2307 __ BIND(L_miss); 2308 } 2309 2310 // 2311 // Generate checkcasting array copy stub 2312 // 2313 // Input: 2314 // c_rarg0 - source array address 2315 // c_rarg1 - destination array address 2316 // c_rarg2 - element count, treated as ssize_t, can be zero 2317 // c_rarg3 - size_t ckoff (super_check_offset) 2318 // not Win64 2319 // c_rarg4 - oop ckval (super_klass) 2320 // Win64 2321 // rsp+40 - oop ckval (super_klass) 2322 // 2323 // Output: 2324 // rax == 0 - success 2325 // rax == -1^K - failure, where K is partial transfer count 2326 // 2327 address generate_checkcast_copy(const char *name, address *entry, 2328 bool dest_uninitialized = false) { 2329 2330 Label L_load_element, L_store_element, L_do_card_marks, L_done; 2331 2332 // Input registers (after setup_arg_regs) 2333 const Register from = rdi; // source array address 2334 const Register to = rsi; // destination array address 2335 const Register length = rdx; // elements count 2336 const Register ckoff = rcx; // super_check_offset 2337 const Register ckval = r8; // super_klass 2338 2339 // Registers used as temps (r13, r14 are save-on-entry) 2340 const Register end_from = from; // source array end address 2341 const Register end_to = r13; // destination array end address 2342 const Register count = rdx; // -(count_remaining) 2343 const Register r14_length = r14; // saved copy of length 2344 // End pointers are inclusive, and if length is not zero they point 2345 // to the last unit copied: end_to[0] := end_from[0] 2346 2347 const Register rax_oop = rax; // actual oop copied 2348 const Register r11_klass = r11; // oop._klass 2349 2350 //--------------------------------------------------------------- 2351 // Assembler stub will be used for this call to arraycopy 2352 // if the two arrays are subtypes of Object[] but the 2353 // destination array type is not equal to or a supertype 2354 // of the source type. Each element must be separately 2355 // checked. 2356 2357 __ align(CodeEntryAlignment); 2358 StubCodeMark mark(this, "StubRoutines", name); 2359 address start = __ pc(); 2360 2361 __ enter(); // required for proper stackwalking of RuntimeStub frame 2362 2363 #ifdef ASSERT 2364 // caller guarantees that the arrays really are different 2365 // otherwise, we would have to make conjoint checks 2366 { Label L; 2367 array_overlap_test(L, TIMES_OOP); 2368 __ stop("checkcast_copy within a single array"); 2369 __ bind(L); 2370 } 2371 #endif //ASSERT 2372 2373 setup_arg_regs(4); // from => rdi, to => rsi, length => rdx 2374 // ckoff => rcx, ckval => r8 2375 // r9 and r10 may be used to save non-volatile registers 2376 #ifdef _WIN64 2377 // last argument (#4) is on stack on Win64 2378 __ movptr(ckval, Address(rsp, 6 * wordSize)); 2379 #endif 2380 2381 // Caller of this entry point must set up the argument registers. 2382 if (entry != NULL) { 2383 *entry = __ pc(); 2384 BLOCK_COMMENT("Entry:"); 2385 } 2386 2387 // allocate spill slots for r13, r14 2388 enum { 2389 saved_r13_offset, 2390 saved_r14_offset, 2391 saved_r10_offset, 2392 saved_rbp_offset 2393 }; 2394 __ subptr(rsp, saved_rbp_offset * wordSize); 2395 __ movptr(Address(rsp, saved_r13_offset * wordSize), r13); 2396 __ movptr(Address(rsp, saved_r14_offset * wordSize), r14); 2397 __ movptr(Address(rsp, saved_r10_offset * wordSize), r10); 2398 2399 #ifdef ASSERT 2400 Label L2; 2401 __ get_thread(r14); 2402 __ cmpptr(r15_thread, r14); 2403 __ jcc(Assembler::equal, L2); 2404 __ stop("StubRoutines::call_stub: r15_thread is modified by call"); 2405 __ bind(L2); 2406 #endif // ASSERT 2407 2408 // check that int operands are properly extended to size_t 2409 assert_clean_int(length, rax); 2410 assert_clean_int(ckoff, rax); 2411 2412 #ifdef ASSERT 2413 BLOCK_COMMENT("assert consistent ckoff/ckval"); 2414 // The ckoff and ckval must be mutually consistent, 2415 // even though caller generates both. 2416 { Label L; 2417 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2418 __ cmpl(ckoff, Address(ckval, sco_offset)); 2419 __ jcc(Assembler::equal, L); 2420 __ stop("super_check_offset inconsistent"); 2421 __ bind(L); 2422 } 2423 #endif //ASSERT 2424 2425 // Loop-invariant addresses. They are exclusive end pointers. 2426 Address end_from_addr(from, length, TIMES_OOP, 0); 2427 Address end_to_addr(to, length, TIMES_OOP, 0); 2428 // Loop-variant addresses. They assume post-incremented count < 0. 2429 Address from_element_addr(end_from, count, TIMES_OOP, 0); 2430 Address to_element_addr(end_to, count, TIMES_OOP, 0); 2431 2432 DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_CHECKCAST | ARRAYCOPY_DISJOINT; 2433 if (dest_uninitialized) { 2434 decorators |= IS_DEST_UNINITIALIZED; 2435 } 2436 2437 BasicType type = T_OBJECT; 2438 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 2439 bs->arraycopy_prologue(_masm, decorators, type, from, to, count); 2440 2441 // Copy from low to high addresses, indexed from the end of each array. 2442 __ lea(end_from, end_from_addr); 2443 __ lea(end_to, end_to_addr); 2444 __ movptr(r14_length, length); // save a copy of the length 2445 assert(length == count, ""); // else fix next line: 2446 __ negptr(count); // negate and test the length 2447 __ jcc(Assembler::notZero, L_load_element); 2448 2449 // Empty array: Nothing to do. 2450 __ xorptr(rax, rax); // return 0 on (trivial) success 2451 __ jmp(L_done); 2452 2453 // ======== begin loop ======== 2454 // (Loop is rotated; its entry is L_load_element.) 2455 // Loop control: 2456 // for (count = -count; count != 0; count++) 2457 // Base pointers src, dst are biased by 8*(count-1),to last element. 2458 __ align(OptoLoopAlignment); 2459 2460 __ BIND(L_store_element); 2461 __ store_heap_oop(to_element_addr, rax_oop, noreg, noreg, AS_RAW); // store the oop 2462 __ increment(count); // increment the count toward zero 2463 __ jcc(Assembler::zero, L_do_card_marks); 2464 2465 // ======== loop entry is here ======== 2466 __ BIND(L_load_element); 2467 __ load_heap_oop(rax_oop, from_element_addr, noreg, noreg, AS_RAW); // load the oop 2468 __ testptr(rax_oop, rax_oop); 2469 __ jcc(Assembler::zero, L_store_element); 2470 2471 __ load_klass(r11_klass, rax_oop);// query the object klass 2472 generate_type_check(r11_klass, ckoff, ckval, L_store_element); 2473 // ======== end loop ======== 2474 2475 // It was a real error; we must depend on the caller to finish the job. 2476 // Register rdx = -1 * number of *remaining* oops, r14 = *total* oops. 2477 // Emit GC store barriers for the oops we have copied (r14 + rdx), 2478 // and report their number to the caller. 2479 assert_different_registers(rax, r14_length, count, to, end_to, rcx, rscratch1); 2480 Label L_post_barrier; 2481 __ addptr(r14_length, count); // K = (original - remaining) oops 2482 __ movptr(rax, r14_length); // save the value 2483 __ notptr(rax); // report (-1^K) to caller (does not affect flags) 2484 __ jccb(Assembler::notZero, L_post_barrier); 2485 __ jmp(L_done); // K == 0, nothing was copied, skip post barrier 2486 2487 // Come here on success only. 2488 __ BIND(L_do_card_marks); 2489 __ xorptr(rax, rax); // return 0 on success 2490 2491 __ BIND(L_post_barrier); 2492 bs->arraycopy_epilogue(_masm, decorators, type, from, to, r14_length); 2493 2494 // Common exit point (success or failure). 2495 __ BIND(L_done); 2496 __ movptr(r13, Address(rsp, saved_r13_offset * wordSize)); 2497 __ movptr(r14, Address(rsp, saved_r14_offset * wordSize)); 2498 __ movptr(r10, Address(rsp, saved_r10_offset * wordSize)); 2499 restore_arg_regs(); 2500 inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr); // Update counter after rscratch1 is free 2501 __ leave(); // required for proper stackwalking of RuntimeStub frame 2502 __ ret(0); 2503 2504 return start; 2505 } 2506 2507 // 2508 // Generate 'unsafe' array copy stub 2509 // Though just as safe as the other stubs, it takes an unscaled 2510 // size_t argument instead of an element count. 2511 // 2512 // Input: 2513 // c_rarg0 - source array address 2514 // c_rarg1 - destination array address 2515 // c_rarg2 - byte count, treated as ssize_t, can be zero 2516 // 2517 // Examines the alignment of the operands and dispatches 2518 // to a long, int, short, or byte copy loop. 2519 // 2520 address generate_unsafe_copy(const char *name, 2521 address byte_copy_entry, address short_copy_entry, 2522 address int_copy_entry, address long_copy_entry) { 2523 2524 Label L_long_aligned, L_int_aligned, L_short_aligned; 2525 2526 // Input registers (before setup_arg_regs) 2527 const Register from = c_rarg0; // source array address 2528 const Register to = c_rarg1; // destination array address 2529 const Register size = c_rarg2; // byte count (size_t) 2530 2531 // Register used as a temp 2532 const Register bits = rax; // test copy of low bits 2533 2534 __ align(CodeEntryAlignment); 2535 StubCodeMark mark(this, "StubRoutines", name); 2536 address start = __ pc(); 2537 2538 __ enter(); // required for proper stackwalking of RuntimeStub frame 2539 2540 // bump this on entry, not on exit: 2541 inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr); 2542 2543 __ mov(bits, from); 2544 __ orptr(bits, to); 2545 __ orptr(bits, size); 2546 2547 __ testb(bits, BytesPerLong-1); 2548 __ jccb(Assembler::zero, L_long_aligned); 2549 2550 __ testb(bits, BytesPerInt-1); 2551 __ jccb(Assembler::zero, L_int_aligned); 2552 2553 __ testb(bits, BytesPerShort-1); 2554 __ jump_cc(Assembler::notZero, RuntimeAddress(byte_copy_entry)); 2555 2556 __ BIND(L_short_aligned); 2557 __ shrptr(size, LogBytesPerShort); // size => short_count 2558 __ jump(RuntimeAddress(short_copy_entry)); 2559 2560 __ BIND(L_int_aligned); 2561 __ shrptr(size, LogBytesPerInt); // size => int_count 2562 __ jump(RuntimeAddress(int_copy_entry)); 2563 2564 __ BIND(L_long_aligned); 2565 __ shrptr(size, LogBytesPerLong); // size => qword_count 2566 __ jump(RuntimeAddress(long_copy_entry)); 2567 2568 return start; 2569 } 2570 2571 // Perform range checks on the proposed arraycopy. 2572 // Kills temp, but nothing else. 2573 // Also, clean the sign bits of src_pos and dst_pos. 2574 void arraycopy_range_checks(Register src, // source array oop (c_rarg0) 2575 Register src_pos, // source position (c_rarg1) 2576 Register dst, // destination array oo (c_rarg2) 2577 Register dst_pos, // destination position (c_rarg3) 2578 Register length, 2579 Register temp, 2580 Label& L_failed) { 2581 BLOCK_COMMENT("arraycopy_range_checks:"); 2582 2583 // if (src_pos + length > arrayOop(src)->length()) FAIL; 2584 __ movl(temp, length); 2585 __ addl(temp, src_pos); // src_pos + length 2586 __ cmpl(temp, Address(src, arrayOopDesc::length_offset_in_bytes())); 2587 __ jcc(Assembler::above, L_failed); 2588 2589 // if (dst_pos + length > arrayOop(dst)->length()) FAIL; 2590 __ movl(temp, length); 2591 __ addl(temp, dst_pos); // dst_pos + length 2592 __ cmpl(temp, Address(dst, arrayOopDesc::length_offset_in_bytes())); 2593 __ jcc(Assembler::above, L_failed); 2594 2595 // Have to clean up high 32-bits of 'src_pos' and 'dst_pos'. 2596 // Move with sign extension can be used since they are positive. 2597 __ movslq(src_pos, src_pos); 2598 __ movslq(dst_pos, dst_pos); 2599 2600 BLOCK_COMMENT("arraycopy_range_checks done"); 2601 } 2602 2603 // 2604 // Generate generic array copy stubs 2605 // 2606 // Input: 2607 // c_rarg0 - src oop 2608 // c_rarg1 - src_pos (32-bits) 2609 // c_rarg2 - dst oop 2610 // c_rarg3 - dst_pos (32-bits) 2611 // not Win64 2612 // c_rarg4 - element count (32-bits) 2613 // Win64 2614 // rsp+40 - element count (32-bits) 2615 // 2616 // Output: 2617 // rax == 0 - success 2618 // rax == -1^K - failure, where K is partial transfer count 2619 // 2620 address generate_generic_copy(const char *name, 2621 address byte_copy_entry, address short_copy_entry, 2622 address int_copy_entry, address oop_copy_entry, 2623 address long_copy_entry, address checkcast_copy_entry) { 2624 2625 Label L_failed, L_failed_0, L_objArray; 2626 Label L_copy_bytes, L_copy_shorts, L_copy_ints, L_copy_longs; 2627 2628 // Input registers 2629 const Register src = c_rarg0; // source array oop 2630 const Register src_pos = c_rarg1; // source position 2631 const Register dst = c_rarg2; // destination array oop 2632 const Register dst_pos = c_rarg3; // destination position 2633 #ifndef _WIN64 2634 const Register length = c_rarg4; 2635 #else 2636 const Address length(rsp, 6 * wordSize); // elements count is on stack on Win64 2637 #endif 2638 2639 { int modulus = CodeEntryAlignment; 2640 int target = modulus - 5; // 5 = sizeof jmp(L_failed) 2641 int advance = target - (__ offset() % modulus); 2642 if (advance < 0) advance += modulus; 2643 if (advance > 0) __ nop(advance); 2644 } 2645 StubCodeMark mark(this, "StubRoutines", name); 2646 2647 // Short-hop target to L_failed. Makes for denser prologue code. 2648 __ BIND(L_failed_0); 2649 __ jmp(L_failed); 2650 assert(__ offset() % CodeEntryAlignment == 0, "no further alignment needed"); 2651 2652 __ align(CodeEntryAlignment); 2653 address start = __ pc(); 2654 2655 __ enter(); // required for proper stackwalking of RuntimeStub frame 2656 2657 // bump this on entry, not on exit: 2658 inc_counter_np(SharedRuntime::_generic_array_copy_ctr); 2659 2660 //----------------------------------------------------------------------- 2661 // Assembler stub will be used for this call to arraycopy 2662 // if the following conditions are met: 2663 // 2664 // (1) src and dst must not be null. 2665 // (2) src_pos must not be negative. 2666 // (3) dst_pos must not be negative. 2667 // (4) length must not be negative. 2668 // (5) src klass and dst klass should be the same and not NULL. 2669 // (6) src and dst should be arrays. 2670 // (7) src_pos + length must not exceed length of src. 2671 // (8) dst_pos + length must not exceed length of dst. 2672 // 2673 2674 // if (src == NULL) return -1; 2675 __ testptr(src, src); // src oop 2676 size_t j1off = __ offset(); 2677 __ jccb(Assembler::zero, L_failed_0); 2678 2679 // if (src_pos < 0) return -1; 2680 __ testl(src_pos, src_pos); // src_pos (32-bits) 2681 __ jccb(Assembler::negative, L_failed_0); 2682 2683 // if (dst == NULL) return -1; 2684 __ testptr(dst, dst); // dst oop 2685 __ jccb(Assembler::zero, L_failed_0); 2686 2687 // if (dst_pos < 0) return -1; 2688 __ testl(dst_pos, dst_pos); // dst_pos (32-bits) 2689 size_t j4off = __ offset(); 2690 __ jccb(Assembler::negative, L_failed_0); 2691 2692 // The first four tests are very dense code, 2693 // but not quite dense enough to put four 2694 // jumps in a 16-byte instruction fetch buffer. 2695 // That's good, because some branch predicters 2696 // do not like jumps so close together. 2697 // Make sure of this. 2698 guarantee(((j1off ^ j4off) & ~15) != 0, "I$ line of 1st & 4th jumps"); 2699 2700 // registers used as temp 2701 const Register r11_length = r11; // elements count to copy 2702 const Register r10_src_klass = r10; // array klass 2703 2704 // if (length < 0) return -1; 2705 __ movl(r11_length, length); // length (elements count, 32-bits value) 2706 __ testl(r11_length, r11_length); 2707 __ jccb(Assembler::negative, L_failed_0); 2708 2709 __ load_klass(r10_src_klass, src); 2710 #ifdef ASSERT 2711 // assert(src->klass() != NULL); 2712 { 2713 BLOCK_COMMENT("assert klasses not null {"); 2714 Label L1, L2; 2715 __ testptr(r10_src_klass, r10_src_klass); 2716 __ jcc(Assembler::notZero, L2); // it is broken if klass is NULL 2717 __ bind(L1); 2718 __ stop("broken null klass"); 2719 __ bind(L2); 2720 __ load_klass(rax, dst); 2721 __ cmpq(rax, 0); 2722 __ jcc(Assembler::equal, L1); // this would be broken also 2723 BLOCK_COMMENT("} assert klasses not null done"); 2724 } 2725 #endif 2726 2727 // Load layout helper (32-bits) 2728 // 2729 // |array_tag| | header_size | element_type | |log2_element_size| 2730 // 32 30 24 16 8 2 0 2731 // 2732 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0 2733 // 2734 2735 const int lh_offset = in_bytes(Klass::layout_helper_offset()); 2736 2737 // Handle objArrays completely differently... 2738 const jint objArray_lh = Klass::array_layout_helper(T_OBJECT); 2739 __ cmpl(Address(r10_src_klass, lh_offset), objArray_lh); 2740 __ jcc(Assembler::equal, L_objArray); 2741 2742 // if (src->klass() != dst->klass()) return -1; 2743 __ load_klass(rax, dst); 2744 __ cmpq(r10_src_klass, rax); 2745 __ jcc(Assembler::notEqual, L_failed); 2746 2747 const Register rax_lh = rax; // layout helper 2748 __ movl(rax_lh, Address(r10_src_klass, lh_offset)); 2749 2750 // if (!src->is_Array()) return -1; 2751 __ cmpl(rax_lh, Klass::_lh_neutral_value); 2752 __ jcc(Assembler::greaterEqual, L_failed); 2753 2754 // At this point, it is known to be a typeArray (array_tag 0x3). 2755 #ifdef ASSERT 2756 { 2757 BLOCK_COMMENT("assert primitive array {"); 2758 Label L; 2759 __ cmpl(rax_lh, (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift)); 2760 __ jcc(Assembler::greaterEqual, L); 2761 __ stop("must be a primitive array"); 2762 __ bind(L); 2763 BLOCK_COMMENT("} assert primitive array done"); 2764 } 2765 #endif 2766 2767 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2768 r10, L_failed); 2769 2770 // TypeArrayKlass 2771 // 2772 // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize); 2773 // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize); 2774 // 2775 2776 const Register r10_offset = r10; // array offset 2777 const Register rax_elsize = rax_lh; // element size 2778 2779 __ movl(r10_offset, rax_lh); 2780 __ shrl(r10_offset, Klass::_lh_header_size_shift); 2781 __ andptr(r10_offset, Klass::_lh_header_size_mask); // array_offset 2782 __ addptr(src, r10_offset); // src array offset 2783 __ addptr(dst, r10_offset); // dst array offset 2784 BLOCK_COMMENT("choose copy loop based on element size"); 2785 __ andl(rax_lh, Klass::_lh_log2_element_size_mask); // rax_lh -> rax_elsize 2786 2787 // next registers should be set before the jump to corresponding stub 2788 const Register from = c_rarg0; // source array address 2789 const Register to = c_rarg1; // destination array address 2790 const Register count = c_rarg2; // elements count 2791 2792 // 'from', 'to', 'count' registers should be set in such order 2793 // since they are the same as 'src', 'src_pos', 'dst'. 2794 2795 __ BIND(L_copy_bytes); 2796 __ cmpl(rax_elsize, 0); 2797 __ jccb(Assembler::notEqual, L_copy_shorts); 2798 __ lea(from, Address(src, src_pos, Address::times_1, 0));// src_addr 2799 __ lea(to, Address(dst, dst_pos, Address::times_1, 0));// dst_addr 2800 __ movl2ptr(count, r11_length); // length 2801 __ jump(RuntimeAddress(byte_copy_entry)); 2802 2803 __ BIND(L_copy_shorts); 2804 __ cmpl(rax_elsize, LogBytesPerShort); 2805 __ jccb(Assembler::notEqual, L_copy_ints); 2806 __ lea(from, Address(src, src_pos, Address::times_2, 0));// src_addr 2807 __ lea(to, Address(dst, dst_pos, Address::times_2, 0));// dst_addr 2808 __ movl2ptr(count, r11_length); // length 2809 __ jump(RuntimeAddress(short_copy_entry)); 2810 2811 __ BIND(L_copy_ints); 2812 __ cmpl(rax_elsize, LogBytesPerInt); 2813 __ jccb(Assembler::notEqual, L_copy_longs); 2814 __ lea(from, Address(src, src_pos, Address::times_4, 0));// src_addr 2815 __ lea(to, Address(dst, dst_pos, Address::times_4, 0));// dst_addr 2816 __ movl2ptr(count, r11_length); // length 2817 __ jump(RuntimeAddress(int_copy_entry)); 2818 2819 __ BIND(L_copy_longs); 2820 #ifdef ASSERT 2821 { 2822 BLOCK_COMMENT("assert long copy {"); 2823 Label L; 2824 __ cmpl(rax_elsize, LogBytesPerLong); 2825 __ jcc(Assembler::equal, L); 2826 __ stop("must be long copy, but elsize is wrong"); 2827 __ bind(L); 2828 BLOCK_COMMENT("} assert long copy done"); 2829 } 2830 #endif 2831 __ lea(from, Address(src, src_pos, Address::times_8, 0));// src_addr 2832 __ lea(to, Address(dst, dst_pos, Address::times_8, 0));// dst_addr 2833 __ movl2ptr(count, r11_length); // length 2834 __ jump(RuntimeAddress(long_copy_entry)); 2835 2836 // ObjArrayKlass 2837 __ BIND(L_objArray); 2838 // live at this point: r10_src_klass, r11_length, src[_pos], dst[_pos] 2839 2840 Label L_plain_copy, L_checkcast_copy; 2841 // test array classes for subtyping 2842 __ load_klass(rax, dst); 2843 __ cmpq(r10_src_klass, rax); // usual case is exact equality 2844 __ jcc(Assembler::notEqual, L_checkcast_copy); 2845 2846 // Identically typed arrays can be copied without element-wise checks. 2847 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2848 r10, L_failed); 2849 2850 __ lea(from, Address(src, src_pos, TIMES_OOP, 2851 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // src_addr 2852 __ lea(to, Address(dst, dst_pos, TIMES_OOP, 2853 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // dst_addr 2854 __ movl2ptr(count, r11_length); // length 2855 __ BIND(L_plain_copy); 2856 __ jump(RuntimeAddress(oop_copy_entry)); 2857 2858 __ BIND(L_checkcast_copy); 2859 // live at this point: r10_src_klass, r11_length, rax (dst_klass) 2860 { 2861 // Before looking at dst.length, make sure dst is also an objArray. 2862 __ cmpl(Address(rax, lh_offset), objArray_lh); 2863 __ jcc(Assembler::notEqual, L_failed); 2864 2865 // It is safe to examine both src.length and dst.length. 2866 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2867 rax, L_failed); 2868 2869 const Register r11_dst_klass = r11; 2870 __ load_klass(r11_dst_klass, dst); // reload 2871 2872 // Marshal the base address arguments now, freeing registers. 2873 __ lea(from, Address(src, src_pos, TIMES_OOP, 2874 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 2875 __ lea(to, Address(dst, dst_pos, TIMES_OOP, 2876 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 2877 __ movl(count, length); // length (reloaded) 2878 Register sco_temp = c_rarg3; // this register is free now 2879 assert_different_registers(from, to, count, sco_temp, 2880 r11_dst_klass, r10_src_klass); 2881 assert_clean_int(count, sco_temp); 2882 2883 // Generate the type check. 2884 const int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2885 __ movl(sco_temp, Address(r11_dst_klass, sco_offset)); 2886 assert_clean_int(sco_temp, rax); 2887 generate_type_check(r10_src_klass, sco_temp, r11_dst_klass, L_plain_copy); 2888 2889 // Fetch destination element klass from the ObjArrayKlass header. 2890 int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset()); 2891 __ movptr(r11_dst_klass, Address(r11_dst_klass, ek_offset)); 2892 __ movl( sco_temp, Address(r11_dst_klass, sco_offset)); 2893 assert_clean_int(sco_temp, rax); 2894 2895 // the checkcast_copy loop needs two extra arguments: 2896 assert(c_rarg3 == sco_temp, "#3 already in place"); 2897 // Set up arguments for checkcast_copy_entry. 2898 setup_arg_regs(4); 2899 __ movptr(r8, r11_dst_klass); // dst.klass.element_klass, r8 is c_rarg4 on Linux/Solaris 2900 __ jump(RuntimeAddress(checkcast_copy_entry)); 2901 } 2902 2903 __ BIND(L_failed); 2904 __ xorptr(rax, rax); 2905 __ notptr(rax); // return -1 2906 __ leave(); // required for proper stackwalking of RuntimeStub frame 2907 __ ret(0); 2908 2909 return start; 2910 } 2911 2912 address generate_data_cache_writeback() { 2913 const Register src = c_rarg0; // source address 2914 2915 __ align(CodeEntryAlignment); 2916 2917 StubCodeMark mark(this, "StubRoutines", "_data_cache_writeback"); 2918 2919 address start = __ pc(); 2920 __ enter(); 2921 __ cache_wb(Address(src, 0)); 2922 __ leave(); 2923 __ ret(0); 2924 2925 return start; 2926 } 2927 2928 address generate_data_cache_writeback_sync() { 2929 const Register is_pre = c_rarg0; // pre or post sync 2930 2931 __ align(CodeEntryAlignment); 2932 2933 StubCodeMark mark(this, "StubRoutines", "_data_cache_writeback_sync"); 2934 2935 // pre wbsync is a no-op 2936 // post wbsync translates to an sfence 2937 2938 Label skip; 2939 address start = __ pc(); 2940 __ enter(); 2941 __ cmpl(is_pre, 0); 2942 __ jcc(Assembler::notEqual, skip); 2943 __ cache_wbsync(false); 2944 __ bind(skip); 2945 __ leave(); 2946 __ ret(0); 2947 2948 return start; 2949 } 2950 2951 void generate_arraycopy_stubs() { 2952 address entry; 2953 address entry_jbyte_arraycopy; 2954 address entry_jshort_arraycopy; 2955 address entry_jint_arraycopy; 2956 address entry_oop_arraycopy; 2957 address entry_jlong_arraycopy; 2958 address entry_checkcast_arraycopy; 2959 2960 StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, &entry, 2961 "jbyte_disjoint_arraycopy"); 2962 StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, entry, &entry_jbyte_arraycopy, 2963 "jbyte_arraycopy"); 2964 2965 StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, &entry, 2966 "jshort_disjoint_arraycopy"); 2967 StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, entry, &entry_jshort_arraycopy, 2968 "jshort_arraycopy"); 2969 2970 StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, false, &entry, 2971 "jint_disjoint_arraycopy"); 2972 StubRoutines::_jint_arraycopy = generate_conjoint_int_oop_copy(false, false, entry, 2973 &entry_jint_arraycopy, "jint_arraycopy"); 2974 2975 StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, false, &entry, 2976 "jlong_disjoint_arraycopy"); 2977 StubRoutines::_jlong_arraycopy = generate_conjoint_long_oop_copy(false, false, entry, 2978 &entry_jlong_arraycopy, "jlong_arraycopy"); 2979 2980 2981 if (UseCompressedOops) { 2982 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, true, &entry, 2983 "oop_disjoint_arraycopy"); 2984 StubRoutines::_oop_arraycopy = generate_conjoint_int_oop_copy(false, true, entry, 2985 &entry_oop_arraycopy, "oop_arraycopy"); 2986 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_int_oop_copy(false, true, &entry, 2987 "oop_disjoint_arraycopy_uninit", 2988 /*dest_uninitialized*/true); 2989 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_int_oop_copy(false, true, entry, 2990 NULL, "oop_arraycopy_uninit", 2991 /*dest_uninitialized*/true); 2992 } else { 2993 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, true, &entry, 2994 "oop_disjoint_arraycopy"); 2995 StubRoutines::_oop_arraycopy = generate_conjoint_long_oop_copy(false, true, entry, 2996 &entry_oop_arraycopy, "oop_arraycopy"); 2997 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_long_oop_copy(false, true, &entry, 2998 "oop_disjoint_arraycopy_uninit", 2999 /*dest_uninitialized*/true); 3000 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_long_oop_copy(false, true, entry, 3001 NULL, "oop_arraycopy_uninit", 3002 /*dest_uninitialized*/true); 3003 } 3004 3005 StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy); 3006 StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", NULL, 3007 /*dest_uninitialized*/true); 3008 3009 StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy", 3010 entry_jbyte_arraycopy, 3011 entry_jshort_arraycopy, 3012 entry_jint_arraycopy, 3013 entry_jlong_arraycopy); 3014 StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy", 3015 entry_jbyte_arraycopy, 3016 entry_jshort_arraycopy, 3017 entry_jint_arraycopy, 3018 entry_oop_arraycopy, 3019 entry_jlong_arraycopy, 3020 entry_checkcast_arraycopy); 3021 3022 StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill"); 3023 StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill"); 3024 StubRoutines::_jint_fill = generate_fill(T_INT, false, "jint_fill"); 3025 StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill"); 3026 StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill"); 3027 StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill"); 3028 3029 // We don't generate specialized code for HeapWord-aligned source 3030 // arrays, so just use the code we've already generated 3031 StubRoutines::_arrayof_jbyte_disjoint_arraycopy = StubRoutines::_jbyte_disjoint_arraycopy; 3032 StubRoutines::_arrayof_jbyte_arraycopy = StubRoutines::_jbyte_arraycopy; 3033 3034 StubRoutines::_arrayof_jshort_disjoint_arraycopy = StubRoutines::_jshort_disjoint_arraycopy; 3035 StubRoutines::_arrayof_jshort_arraycopy = StubRoutines::_jshort_arraycopy; 3036 3037 StubRoutines::_arrayof_jint_disjoint_arraycopy = StubRoutines::_jint_disjoint_arraycopy; 3038 StubRoutines::_arrayof_jint_arraycopy = StubRoutines::_jint_arraycopy; 3039 3040 StubRoutines::_arrayof_jlong_disjoint_arraycopy = StubRoutines::_jlong_disjoint_arraycopy; 3041 StubRoutines::_arrayof_jlong_arraycopy = StubRoutines::_jlong_arraycopy; 3042 3043 StubRoutines::_arrayof_oop_disjoint_arraycopy = StubRoutines::_oop_disjoint_arraycopy; 3044 StubRoutines::_arrayof_oop_arraycopy = StubRoutines::_oop_arraycopy; 3045 3046 StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = StubRoutines::_oop_disjoint_arraycopy_uninit; 3047 StubRoutines::_arrayof_oop_arraycopy_uninit = StubRoutines::_oop_arraycopy_uninit; 3048 } 3049 3050 // AES intrinsic stubs 3051 enum {AESBlockSize = 16}; 3052 3053 address generate_key_shuffle_mask() { 3054 __ align(16); 3055 StubCodeMark mark(this, "StubRoutines", "key_shuffle_mask"); 3056 address start = __ pc(); 3057 __ emit_data64( 0x0405060700010203, relocInfo::none ); 3058 __ emit_data64( 0x0c0d0e0f08090a0b, relocInfo::none ); 3059 return start; 3060 } 3061 3062 address generate_counter_shuffle_mask() { 3063 __ align(16); 3064 StubCodeMark mark(this, "StubRoutines", "counter_shuffle_mask"); 3065 address start = __ pc(); 3066 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none); 3067 __ emit_data64(0x0001020304050607, relocInfo::none); 3068 return start; 3069 } 3070 3071 // Utility routine for loading a 128-bit key word in little endian format 3072 // can optionally specify that the shuffle mask is already in an xmmregister 3073 void load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask=NULL) { 3074 __ movdqu(xmmdst, Address(key, offset)); 3075 if (xmm_shuf_mask != NULL) { 3076 __ pshufb(xmmdst, xmm_shuf_mask); 3077 } else { 3078 __ pshufb(xmmdst, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3079 } 3080 } 3081 3082 // Utility routine for increase 128bit counter (iv in CTR mode) 3083 void inc_counter(Register reg, XMMRegister xmmdst, int inc_delta, Label& next_block) { 3084 __ pextrq(reg, xmmdst, 0x0); 3085 __ addq(reg, inc_delta); 3086 __ pinsrq(xmmdst, reg, 0x0); 3087 __ jcc(Assembler::carryClear, next_block); // jump if no carry 3088 __ pextrq(reg, xmmdst, 0x01); // Carry 3089 __ addq(reg, 0x01); 3090 __ pinsrq(xmmdst, reg, 0x01); //Carry end 3091 __ BIND(next_block); // next instruction 3092 } 3093 3094 // Arguments: 3095 // 3096 // Inputs: 3097 // c_rarg0 - source byte array address 3098 // c_rarg1 - destination byte array address 3099 // c_rarg2 - K (key) in little endian int array 3100 // 3101 address generate_aescrypt_encryptBlock() { 3102 assert(UseAES, "need AES instructions and misaligned SSE support"); 3103 __ align(CodeEntryAlignment); 3104 StubCodeMark mark(this, "StubRoutines", "aescrypt_encryptBlock"); 3105 Label L_doLast; 3106 address start = __ pc(); 3107 3108 const Register from = c_rarg0; // source array address 3109 const Register to = c_rarg1; // destination array address 3110 const Register key = c_rarg2; // key array address 3111 const Register keylen = rax; 3112 3113 const XMMRegister xmm_result = xmm0; 3114 const XMMRegister xmm_key_shuf_mask = xmm1; 3115 // On win64 xmm6-xmm15 must be preserved so don't use them. 3116 const XMMRegister xmm_temp1 = xmm2; 3117 const XMMRegister xmm_temp2 = xmm3; 3118 const XMMRegister xmm_temp3 = xmm4; 3119 const XMMRegister xmm_temp4 = xmm5; 3120 3121 __ enter(); // required for proper stackwalking of RuntimeStub frame 3122 3123 // keylen could be only {11, 13, 15} * 4 = {44, 52, 60} 3124 __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3125 3126 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3127 __ movdqu(xmm_result, Address(from, 0)); // get 16 bytes of input 3128 3129 // For encryption, the java expanded key ordering is just what we need 3130 // we don't know if the key is aligned, hence not using load-execute form 3131 3132 load_key(xmm_temp1, key, 0x00, xmm_key_shuf_mask); 3133 __ pxor(xmm_result, xmm_temp1); 3134 3135 load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask); 3136 load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask); 3137 load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask); 3138 load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask); 3139 3140 __ aesenc(xmm_result, xmm_temp1); 3141 __ aesenc(xmm_result, xmm_temp2); 3142 __ aesenc(xmm_result, xmm_temp3); 3143 __ aesenc(xmm_result, xmm_temp4); 3144 3145 load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask); 3146 load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask); 3147 load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask); 3148 load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask); 3149 3150 __ aesenc(xmm_result, xmm_temp1); 3151 __ aesenc(xmm_result, xmm_temp2); 3152 __ aesenc(xmm_result, xmm_temp3); 3153 __ aesenc(xmm_result, xmm_temp4); 3154 3155 load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask); 3156 load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask); 3157 3158 __ cmpl(keylen, 44); 3159 __ jccb(Assembler::equal, L_doLast); 3160 3161 __ aesenc(xmm_result, xmm_temp1); 3162 __ aesenc(xmm_result, xmm_temp2); 3163 3164 load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask); 3165 load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask); 3166 3167 __ cmpl(keylen, 52); 3168 __ jccb(Assembler::equal, L_doLast); 3169 3170 __ aesenc(xmm_result, xmm_temp1); 3171 __ aesenc(xmm_result, xmm_temp2); 3172 3173 load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask); 3174 load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask); 3175 3176 __ BIND(L_doLast); 3177 __ aesenc(xmm_result, xmm_temp1); 3178 __ aesenclast(xmm_result, xmm_temp2); 3179 __ movdqu(Address(to, 0), xmm_result); // store the result 3180 __ xorptr(rax, rax); // return 0 3181 __ leave(); // required for proper stackwalking of RuntimeStub frame 3182 __ ret(0); 3183 3184 return start; 3185 } 3186 3187 3188 // Arguments: 3189 // 3190 // Inputs: 3191 // c_rarg0 - source byte array address 3192 // c_rarg1 - destination byte array address 3193 // c_rarg2 - K (key) in little endian int array 3194 // 3195 address generate_aescrypt_decryptBlock() { 3196 assert(UseAES, "need AES instructions and misaligned SSE support"); 3197 __ align(CodeEntryAlignment); 3198 StubCodeMark mark(this, "StubRoutines", "aescrypt_decryptBlock"); 3199 Label L_doLast; 3200 address start = __ pc(); 3201 3202 const Register from = c_rarg0; // source array address 3203 const Register to = c_rarg1; // destination array address 3204 const Register key = c_rarg2; // key array address 3205 const Register keylen = rax; 3206 3207 const XMMRegister xmm_result = xmm0; 3208 const XMMRegister xmm_key_shuf_mask = xmm1; 3209 // On win64 xmm6-xmm15 must be preserved so don't use them. 3210 const XMMRegister xmm_temp1 = xmm2; 3211 const XMMRegister xmm_temp2 = xmm3; 3212 const XMMRegister xmm_temp3 = xmm4; 3213 const XMMRegister xmm_temp4 = xmm5; 3214 3215 __ enter(); // required for proper stackwalking of RuntimeStub frame 3216 3217 // keylen could be only {11, 13, 15} * 4 = {44, 52, 60} 3218 __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3219 3220 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3221 __ movdqu(xmm_result, Address(from, 0)); 3222 3223 // for decryption java expanded key ordering is rotated one position from what we want 3224 // so we start from 0x10 here and hit 0x00 last 3225 // we don't know if the key is aligned, hence not using load-execute form 3226 load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask); 3227 load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask); 3228 load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask); 3229 load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask); 3230 3231 __ pxor (xmm_result, xmm_temp1); 3232 __ aesdec(xmm_result, xmm_temp2); 3233 __ aesdec(xmm_result, xmm_temp3); 3234 __ aesdec(xmm_result, xmm_temp4); 3235 3236 load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask); 3237 load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask); 3238 load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask); 3239 load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask); 3240 3241 __ aesdec(xmm_result, xmm_temp1); 3242 __ aesdec(xmm_result, xmm_temp2); 3243 __ aesdec(xmm_result, xmm_temp3); 3244 __ aesdec(xmm_result, xmm_temp4); 3245 3246 load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask); 3247 load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask); 3248 load_key(xmm_temp3, key, 0x00, xmm_key_shuf_mask); 3249 3250 __ cmpl(keylen, 44); 3251 __ jccb(Assembler::equal, L_doLast); 3252 3253 __ aesdec(xmm_result, xmm_temp1); 3254 __ aesdec(xmm_result, xmm_temp2); 3255 3256 load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask); 3257 load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask); 3258 3259 __ cmpl(keylen, 52); 3260 __ jccb(Assembler::equal, L_doLast); 3261 3262 __ aesdec(xmm_result, xmm_temp1); 3263 __ aesdec(xmm_result, xmm_temp2); 3264 3265 load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask); 3266 load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask); 3267 3268 __ BIND(L_doLast); 3269 __ aesdec(xmm_result, xmm_temp1); 3270 __ aesdec(xmm_result, xmm_temp2); 3271 3272 // for decryption the aesdeclast operation is always on key+0x00 3273 __ aesdeclast(xmm_result, xmm_temp3); 3274 __ movdqu(Address(to, 0), xmm_result); // store the result 3275 __ xorptr(rax, rax); // return 0 3276 __ leave(); // required for proper stackwalking of RuntimeStub frame 3277 __ ret(0); 3278 3279 return start; 3280 } 3281 3282 3283 // Arguments: 3284 // 3285 // Inputs: 3286 // c_rarg0 - source byte array address 3287 // c_rarg1 - destination byte array address 3288 // c_rarg2 - K (key) in little endian int array 3289 // c_rarg3 - r vector byte array address 3290 // c_rarg4 - input length 3291 // 3292 // Output: 3293 // rax - input length 3294 // 3295 address generate_cipherBlockChaining_encryptAESCrypt() { 3296 assert(UseAES, "need AES instructions and misaligned SSE support"); 3297 __ align(CodeEntryAlignment); 3298 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_encryptAESCrypt"); 3299 address start = __ pc(); 3300 3301 Label L_exit, L_key_192_256, L_key_256, L_loopTop_128, L_loopTop_192, L_loopTop_256; 3302 const Register from = c_rarg0; // source array address 3303 const Register to = c_rarg1; // destination array address 3304 const Register key = c_rarg2; // key array address 3305 const Register rvec = c_rarg3; // r byte array initialized from initvector array address 3306 // and left with the results of the last encryption block 3307 #ifndef _WIN64 3308 const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16) 3309 #else 3310 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 3311 const Register len_reg = r11; // pick the volatile windows register 3312 #endif 3313 const Register pos = rax; 3314 3315 // xmm register assignments for the loops below 3316 const XMMRegister xmm_result = xmm0; 3317 const XMMRegister xmm_temp = xmm1; 3318 // keys 0-10 preloaded into xmm2-xmm12 3319 const int XMM_REG_NUM_KEY_FIRST = 2; 3320 const int XMM_REG_NUM_KEY_LAST = 15; 3321 const XMMRegister xmm_key0 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST); 3322 const XMMRegister xmm_key10 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+10); 3323 const XMMRegister xmm_key11 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+11); 3324 const XMMRegister xmm_key12 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+12); 3325 const XMMRegister xmm_key13 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+13); 3326 3327 __ enter(); // required for proper stackwalking of RuntimeStub frame 3328 3329 #ifdef _WIN64 3330 // on win64, fill len_reg from stack position 3331 __ movl(len_reg, len_mem); 3332 #else 3333 __ push(len_reg); // Save 3334 #endif 3335 3336 const XMMRegister xmm_key_shuf_mask = xmm_temp; // used temporarily to swap key bytes up front 3337 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3338 // load up xmm regs xmm2 thru xmm12 with key 0x00 - 0xa0 3339 for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x00; rnum <= XMM_REG_NUM_KEY_FIRST+10; rnum++) { 3340 load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask); 3341 offset += 0x10; 3342 } 3343 __ movdqu(xmm_result, Address(rvec, 0x00)); // initialize xmm_result with r vec 3344 3345 // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256)) 3346 __ movl(rax, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3347 __ cmpl(rax, 44); 3348 __ jcc(Assembler::notEqual, L_key_192_256); 3349 3350 // 128 bit code follows here 3351 __ movptr(pos, 0); 3352 __ align(OptoLoopAlignment); 3353 3354 __ BIND(L_loopTop_128); 3355 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3356 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3357 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3358 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 9; rnum++) { 3359 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3360 } 3361 __ aesenclast(xmm_result, xmm_key10); 3362 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3363 // no need to store r to memory until we exit 3364 __ addptr(pos, AESBlockSize); 3365 __ subptr(len_reg, AESBlockSize); 3366 __ jcc(Assembler::notEqual, L_loopTop_128); 3367 3368 __ BIND(L_exit); 3369 __ movdqu(Address(rvec, 0), xmm_result); // final value of r stored in rvec of CipherBlockChaining object 3370 3371 #ifdef _WIN64 3372 __ movl(rax, len_mem); 3373 #else 3374 __ pop(rax); // return length 3375 #endif 3376 __ leave(); // required for proper stackwalking of RuntimeStub frame 3377 __ ret(0); 3378 3379 __ BIND(L_key_192_256); 3380 // here rax = len in ints of AESCrypt.KLE array (52=192, or 60=256) 3381 load_key(xmm_key11, key, 0xb0, xmm_key_shuf_mask); 3382 load_key(xmm_key12, key, 0xc0, xmm_key_shuf_mask); 3383 __ cmpl(rax, 52); 3384 __ jcc(Assembler::notEqual, L_key_256); 3385 3386 // 192-bit code follows here (could be changed to use more xmm registers) 3387 __ movptr(pos, 0); 3388 __ align(OptoLoopAlignment); 3389 3390 __ BIND(L_loopTop_192); 3391 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3392 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3393 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3394 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 11; rnum++) { 3395 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3396 } 3397 __ aesenclast(xmm_result, xmm_key12); 3398 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3399 // no need to store r to memory until we exit 3400 __ addptr(pos, AESBlockSize); 3401 __ subptr(len_reg, AESBlockSize); 3402 __ jcc(Assembler::notEqual, L_loopTop_192); 3403 __ jmp(L_exit); 3404 3405 __ BIND(L_key_256); 3406 // 256-bit code follows here (could be changed to use more xmm registers) 3407 load_key(xmm_key13, key, 0xd0, xmm_key_shuf_mask); 3408 __ movptr(pos, 0); 3409 __ align(OptoLoopAlignment); 3410 3411 __ BIND(L_loopTop_256); 3412 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3413 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3414 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3415 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 13; rnum++) { 3416 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3417 } 3418 load_key(xmm_temp, key, 0xe0); 3419 __ aesenclast(xmm_result, xmm_temp); 3420 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3421 // no need to store r to memory until we exit 3422 __ addptr(pos, AESBlockSize); 3423 __ subptr(len_reg, AESBlockSize); 3424 __ jcc(Assembler::notEqual, L_loopTop_256); 3425 __ jmp(L_exit); 3426 3427 return start; 3428 } 3429 3430 // Safefetch stubs. 3431 void generate_safefetch(const char* name, int size, address* entry, 3432 address* fault_pc, address* continuation_pc) { 3433 // safefetch signatures: 3434 // int SafeFetch32(int* adr, int errValue); 3435 // intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue); 3436 // 3437 // arguments: 3438 // c_rarg0 = adr 3439 // c_rarg1 = errValue 3440 // 3441 // result: 3442 // PPC_RET = *adr or errValue 3443 3444 StubCodeMark mark(this, "StubRoutines", name); 3445 3446 // Entry point, pc or function descriptor. 3447 *entry = __ pc(); 3448 3449 // Load *adr into c_rarg1, may fault. 3450 *fault_pc = __ pc(); 3451 switch (size) { 3452 case 4: 3453 // int32_t 3454 __ movl(c_rarg1, Address(c_rarg0, 0)); 3455 break; 3456 case 8: 3457 // int64_t 3458 __ movq(c_rarg1, Address(c_rarg0, 0)); 3459 break; 3460 default: 3461 ShouldNotReachHere(); 3462 } 3463 3464 // return errValue or *adr 3465 *continuation_pc = __ pc(); 3466 __ movq(rax, c_rarg1); 3467 __ ret(0); 3468 } 3469 3470 // This is a version of CBC/AES Decrypt which does 4 blocks in a loop at a time 3471 // to hide instruction latency 3472 // 3473 // Arguments: 3474 // 3475 // Inputs: 3476 // c_rarg0 - source byte array address 3477 // c_rarg1 - destination byte array address 3478 // c_rarg2 - K (key) in little endian int array 3479 // c_rarg3 - r vector byte array address 3480 // c_rarg4 - input length 3481 // 3482 // Output: 3483 // rax - input length 3484 // 3485 address generate_cipherBlockChaining_decryptAESCrypt_Parallel() { 3486 assert(UseAES, "need AES instructions and misaligned SSE support"); 3487 __ align(CodeEntryAlignment); 3488 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt"); 3489 address start = __ pc(); 3490 3491 const Register from = c_rarg0; // source array address 3492 const Register to = c_rarg1; // destination array address 3493 const Register key = c_rarg2; // key array address 3494 const Register rvec = c_rarg3; // r byte array initialized from initvector array address 3495 // and left with the results of the last encryption block 3496 #ifndef _WIN64 3497 const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16) 3498 #else 3499 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 3500 const Register len_reg = r11; // pick the volatile windows register 3501 #endif 3502 const Register pos = rax; 3503 3504 const int PARALLEL_FACTOR = 4; 3505 const int ROUNDS[3] = { 10, 12, 14 }; // aes rounds for key128, key192, key256 3506 3507 Label L_exit; 3508 Label L_singleBlock_loopTopHead[3]; // 128, 192, 256 3509 Label L_singleBlock_loopTopHead2[3]; // 128, 192, 256 3510 Label L_singleBlock_loopTop[3]; // 128, 192, 256 3511 Label L_multiBlock_loopTopHead[3]; // 128, 192, 256 3512 Label L_multiBlock_loopTop[3]; // 128, 192, 256 3513 3514 // keys 0-10 preloaded into xmm5-xmm15 3515 const int XMM_REG_NUM_KEY_FIRST = 5; 3516 const int XMM_REG_NUM_KEY_LAST = 15; 3517 const XMMRegister xmm_key_first = as_XMMRegister(XMM_REG_NUM_KEY_FIRST); 3518 const XMMRegister xmm_key_last = as_XMMRegister(XMM_REG_NUM_KEY_LAST); 3519 3520 __ enter(); // required for proper stackwalking of RuntimeStub frame 3521 3522 #ifdef _WIN64 3523 // on win64, fill len_reg from stack position 3524 __ movl(len_reg, len_mem); 3525 #else 3526 __ push(len_reg); // Save 3527 #endif 3528 __ push(rbx); 3529 // the java expanded key ordering is rotated one position from what we want 3530 // so we start from 0x10 here and hit 0x00 last 3531 const XMMRegister xmm_key_shuf_mask = xmm1; // used temporarily to swap key bytes up front 3532 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3533 // load up xmm regs 5 thru 15 with key 0x10 - 0xa0 - 0x00 3534 for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x10; rnum < XMM_REG_NUM_KEY_LAST; rnum++) { 3535 load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask); 3536 offset += 0x10; 3537 } 3538 load_key(xmm_key_last, key, 0x00, xmm_key_shuf_mask); 3539 3540 const XMMRegister xmm_prev_block_cipher = xmm1; // holds cipher of previous block 3541 3542 // registers holding the four results in the parallelized loop 3543 const XMMRegister xmm_result0 = xmm0; 3544 const XMMRegister xmm_result1 = xmm2; 3545 const XMMRegister xmm_result2 = xmm3; 3546 const XMMRegister xmm_result3 = xmm4; 3547 3548 __ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // initialize with initial rvec 3549 3550 __ xorptr(pos, pos); 3551 3552 // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256)) 3553 __ movl(rbx, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3554 __ cmpl(rbx, 52); 3555 __ jcc(Assembler::equal, L_multiBlock_loopTopHead[1]); 3556 __ cmpl(rbx, 60); 3557 __ jcc(Assembler::equal, L_multiBlock_loopTopHead[2]); 3558 3559 #define DoFour(opc, src_reg) \ 3560 __ opc(xmm_result0, src_reg); \ 3561 __ opc(xmm_result1, src_reg); \ 3562 __ opc(xmm_result2, src_reg); \ 3563 __ opc(xmm_result3, src_reg); \ 3564 3565 for (int k = 0; k < 3; ++k) { 3566 __ BIND(L_multiBlock_loopTopHead[k]); 3567 if (k != 0) { 3568 __ cmpptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // see if at least 4 blocks left 3569 __ jcc(Assembler::less, L_singleBlock_loopTopHead2[k]); 3570 } 3571 if (k == 1) { 3572 __ subptr(rsp, 6 * wordSize); 3573 __ movdqu(Address(rsp, 0), xmm15); //save last_key from xmm15 3574 load_key(xmm15, key, 0xb0); // 0xb0; 192-bit key goes up to 0xc0 3575 __ movdqu(Address(rsp, 2 * wordSize), xmm15); 3576 load_key(xmm1, key, 0xc0); // 0xc0; 3577 __ movdqu(Address(rsp, 4 * wordSize), xmm1); 3578 } else if (k == 2) { 3579 __ subptr(rsp, 10 * wordSize); 3580 __ movdqu(Address(rsp, 0), xmm15); //save last_key from xmm15 3581 load_key(xmm15, key, 0xd0); // 0xd0; 256-bit key goes upto 0xe0 3582 __ movdqu(Address(rsp, 6 * wordSize), xmm15); 3583 load_key(xmm1, key, 0xe0); // 0xe0; 3584 __ movdqu(Address(rsp, 8 * wordSize), xmm1); 3585 load_key(xmm15, key, 0xb0); // 0xb0; 3586 __ movdqu(Address(rsp, 2 * wordSize), xmm15); 3587 load_key(xmm1, key, 0xc0); // 0xc0; 3588 __ movdqu(Address(rsp, 4 * wordSize), xmm1); 3589 } 3590 __ align(OptoLoopAlignment); 3591 __ BIND(L_multiBlock_loopTop[k]); 3592 __ cmpptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // see if at least 4 blocks left 3593 __ jcc(Assembler::less, L_singleBlock_loopTopHead[k]); 3594 3595 if (k != 0) { 3596 __ movdqu(xmm15, Address(rsp, 2 * wordSize)); 3597 __ movdqu(xmm1, Address(rsp, 4 * wordSize)); 3598 } 3599 3600 __ movdqu(xmm_result0, Address(from, pos, Address::times_1, 0 * AESBlockSize)); // get next 4 blocks into xmmresult registers 3601 __ movdqu(xmm_result1, Address(from, pos, Address::times_1, 1 * AESBlockSize)); 3602 __ movdqu(xmm_result2, Address(from, pos, Address::times_1, 2 * AESBlockSize)); 3603 __ movdqu(xmm_result3, Address(from, pos, Address::times_1, 3 * AESBlockSize)); 3604 3605 DoFour(pxor, xmm_key_first); 3606 if (k == 0) { 3607 for (int rnum = 1; rnum < ROUNDS[k]; rnum++) { 3608 DoFour(aesdec, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3609 } 3610 DoFour(aesdeclast, xmm_key_last); 3611 } else if (k == 1) { 3612 for (int rnum = 1; rnum <= ROUNDS[k]-2; rnum++) { 3613 DoFour(aesdec, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3614 } 3615 __ movdqu(xmm_key_last, Address(rsp, 0)); // xmm15 needs to be loaded again. 3616 DoFour(aesdec, xmm1); // key : 0xc0 3617 __ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // xmm1 needs to be loaded again 3618 DoFour(aesdeclast, xmm_key_last); 3619 } else if (k == 2) { 3620 for (int rnum = 1; rnum <= ROUNDS[k] - 4; rnum++) { 3621 DoFour(aesdec, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3622 } 3623 DoFour(aesdec, xmm1); // key : 0xc0 3624 __ movdqu(xmm15, Address(rsp, 6 * wordSize)); 3625 __ movdqu(xmm1, Address(rsp, 8 * wordSize)); 3626 DoFour(aesdec, xmm15); // key : 0xd0 3627 __ movdqu(xmm_key_last, Address(rsp, 0)); // xmm15 needs to be loaded again. 3628 DoFour(aesdec, xmm1); // key : 0xe0 3629 __ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // xmm1 needs to be loaded again 3630 DoFour(aesdeclast, xmm_key_last); 3631 } 3632 3633 // for each result, xor with the r vector of previous cipher block 3634 __ pxor(xmm_result0, xmm_prev_block_cipher); 3635 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 0 * AESBlockSize)); 3636 __ pxor(xmm_result1, xmm_prev_block_cipher); 3637 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 1 * AESBlockSize)); 3638 __ pxor(xmm_result2, xmm_prev_block_cipher); 3639 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 2 * AESBlockSize)); 3640 __ pxor(xmm_result3, xmm_prev_block_cipher); 3641 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 3 * AESBlockSize)); // this will carry over to next set of blocks 3642 if (k != 0) { 3643 __ movdqu(Address(rvec, 0x00), xmm_prev_block_cipher); 3644 } 3645 3646 __ movdqu(Address(to, pos, Address::times_1, 0 * AESBlockSize), xmm_result0); // store 4 results into the next 64 bytes of output 3647 __ movdqu(Address(to, pos, Address::times_1, 1 * AESBlockSize), xmm_result1); 3648 __ movdqu(Address(to, pos, Address::times_1, 2 * AESBlockSize), xmm_result2); 3649 __ movdqu(Address(to, pos, Address::times_1, 3 * AESBlockSize), xmm_result3); 3650 3651 __ addptr(pos, PARALLEL_FACTOR * AESBlockSize); 3652 __ subptr(len_reg, PARALLEL_FACTOR * AESBlockSize); 3653 __ jmp(L_multiBlock_loopTop[k]); 3654 3655 // registers used in the non-parallelized loops 3656 // xmm register assignments for the loops below 3657 const XMMRegister xmm_result = xmm0; 3658 const XMMRegister xmm_prev_block_cipher_save = xmm2; 3659 const XMMRegister xmm_key11 = xmm3; 3660 const XMMRegister xmm_key12 = xmm4; 3661 const XMMRegister key_tmp = xmm4; 3662 3663 __ BIND(L_singleBlock_loopTopHead[k]); 3664 if (k == 1) { 3665 __ addptr(rsp, 6 * wordSize); 3666 } else if (k == 2) { 3667 __ addptr(rsp, 10 * wordSize); 3668 } 3669 __ cmpptr(len_reg, 0); // any blocks left?? 3670 __ jcc(Assembler::equal, L_exit); 3671 __ BIND(L_singleBlock_loopTopHead2[k]); 3672 if (k == 1) { 3673 load_key(xmm_key11, key, 0xb0); // 0xb0; 192-bit key goes upto 0xc0 3674 load_key(xmm_key12, key, 0xc0); // 0xc0; 192-bit key goes upto 0xc0 3675 } 3676 if (k == 2) { 3677 load_key(xmm_key11, key, 0xb0); // 0xb0; 256-bit key goes upto 0xe0 3678 } 3679 __ align(OptoLoopAlignment); 3680 __ BIND(L_singleBlock_loopTop[k]); 3681 __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of cipher input 3682 __ movdqa(xmm_prev_block_cipher_save, xmm_result); // save for next r vector 3683 __ pxor(xmm_result, xmm_key_first); // do the aes dec rounds 3684 for (int rnum = 1; rnum <= 9 ; rnum++) { 3685 __ aesdec(xmm_result, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3686 } 3687 if (k == 1) { 3688 __ aesdec(xmm_result, xmm_key11); 3689 __ aesdec(xmm_result, xmm_key12); 3690 } 3691 if (k == 2) { 3692 __ aesdec(xmm_result, xmm_key11); 3693 load_key(key_tmp, key, 0xc0); 3694 __ aesdec(xmm_result, key_tmp); 3695 load_key(key_tmp, key, 0xd0); 3696 __ aesdec(xmm_result, key_tmp); 3697 load_key(key_tmp, key, 0xe0); 3698 __ aesdec(xmm_result, key_tmp); 3699 } 3700 3701 __ aesdeclast(xmm_result, xmm_key_last); // xmm15 always came from key+0 3702 __ pxor(xmm_result, xmm_prev_block_cipher); // xor with the current r vector 3703 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3704 // no need to store r to memory until we exit 3705 __ movdqa(xmm_prev_block_cipher, xmm_prev_block_cipher_save); // set up next r vector with cipher input from this block 3706 __ addptr(pos, AESBlockSize); 3707 __ subptr(len_reg, AESBlockSize); 3708 __ jcc(Assembler::notEqual, L_singleBlock_loopTop[k]); 3709 if (k != 2) { 3710 __ jmp(L_exit); 3711 } 3712 } //for 128/192/256 3713 3714 __ BIND(L_exit); 3715 __ movdqu(Address(rvec, 0), xmm_prev_block_cipher); // final value of r stored in rvec of CipherBlockChaining object 3716 __ pop(rbx); 3717 #ifdef _WIN64 3718 __ movl(rax, len_mem); 3719 #else 3720 __ pop(rax); // return length 3721 #endif 3722 __ leave(); // required for proper stackwalking of RuntimeStub frame 3723 __ ret(0); 3724 return start; 3725 } 3726 3727 address generate_electronicCodeBook_encryptAESCrypt() { 3728 __ align(CodeEntryAlignment); 3729 StubCodeMark mark(this, "StubRoutines", "electronicCodeBook_encryptAESCrypt"); 3730 address start = __ pc(); 3731 const Register from = c_rarg0; // source array address 3732 const Register to = c_rarg1; // destination array address 3733 const Register key = c_rarg2; // key array address 3734 const Register len = c_rarg3; // src len (must be multiple of blocksize 16) 3735 __ enter(); // required for proper stackwalking of RuntimeStub frame 3736 __ aesecb_encrypt(from, to, key, len); 3737 __ leave(); // required for proper stackwalking of RuntimeStub frame 3738 __ ret(0); 3739 return start; 3740 } 3741 3742 address generate_electronicCodeBook_decryptAESCrypt() { 3743 __ align(CodeEntryAlignment); 3744 StubCodeMark mark(this, "StubRoutines", "electronicCodeBook_decryptAESCrypt"); 3745 address start = __ pc(); 3746 const Register from = c_rarg0; // source array address 3747 const Register to = c_rarg1; // destination array address 3748 const Register key = c_rarg2; // key array address 3749 const Register len = c_rarg3; // src len (must be multiple of blocksize 16) 3750 __ enter(); // required for proper stackwalking of RuntimeStub frame 3751 __ aesecb_decrypt(from, to, key, len); 3752 __ leave(); // required for proper stackwalking of RuntimeStub frame 3753 __ ret(0); 3754 return start; 3755 } 3756 3757 address generate_upper_word_mask() { 3758 __ align(64); 3759 StubCodeMark mark(this, "StubRoutines", "upper_word_mask"); 3760 address start = __ pc(); 3761 __ emit_data64(0x0000000000000000, relocInfo::none); 3762 __ emit_data64(0xFFFFFFFF00000000, relocInfo::none); 3763 return start; 3764 } 3765 3766 address generate_shuffle_byte_flip_mask() { 3767 __ align(64); 3768 StubCodeMark mark(this, "StubRoutines", "shuffle_byte_flip_mask"); 3769 address start = __ pc(); 3770 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none); 3771 __ emit_data64(0x0001020304050607, relocInfo::none); 3772 return start; 3773 } 3774 3775 // ofs and limit are use for multi-block byte array. 3776 // int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit) 3777 address generate_sha1_implCompress(bool multi_block, const char *name) { 3778 __ align(CodeEntryAlignment); 3779 StubCodeMark mark(this, "StubRoutines", name); 3780 address start = __ pc(); 3781 3782 Register buf = c_rarg0; 3783 Register state = c_rarg1; 3784 Register ofs = c_rarg2; 3785 Register limit = c_rarg3; 3786 3787 const XMMRegister abcd = xmm0; 3788 const XMMRegister e0 = xmm1; 3789 const XMMRegister e1 = xmm2; 3790 const XMMRegister msg0 = xmm3; 3791 3792 const XMMRegister msg1 = xmm4; 3793 const XMMRegister msg2 = xmm5; 3794 const XMMRegister msg3 = xmm6; 3795 const XMMRegister shuf_mask = xmm7; 3796 3797 __ enter(); 3798 3799 __ subptr(rsp, 4 * wordSize); 3800 3801 __ fast_sha1(abcd, e0, e1, msg0, msg1, msg2, msg3, shuf_mask, 3802 buf, state, ofs, limit, rsp, multi_block); 3803 3804 __ addptr(rsp, 4 * wordSize); 3805 3806 __ leave(); 3807 __ ret(0); 3808 return start; 3809 } 3810 3811 address generate_pshuffle_byte_flip_mask() { 3812 __ align(64); 3813 StubCodeMark mark(this, "StubRoutines", "pshuffle_byte_flip_mask"); 3814 address start = __ pc(); 3815 __ emit_data64(0x0405060700010203, relocInfo::none); 3816 __ emit_data64(0x0c0d0e0f08090a0b, relocInfo::none); 3817 3818 if (VM_Version::supports_avx2()) { 3819 __ emit_data64(0x0405060700010203, relocInfo::none); // second copy 3820 __ emit_data64(0x0c0d0e0f08090a0b, relocInfo::none); 3821 // _SHUF_00BA 3822 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3823 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3824 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3825 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3826 // _SHUF_DC00 3827 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3828 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3829 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3830 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3831 } 3832 3833 return start; 3834 } 3835 3836 //Mask for byte-swapping a couple of qwords in an XMM register using (v)pshufb. 3837 address generate_pshuffle_byte_flip_mask_sha512() { 3838 __ align(32); 3839 StubCodeMark mark(this, "StubRoutines", "pshuffle_byte_flip_mask_sha512"); 3840 address start = __ pc(); 3841 if (VM_Version::supports_avx2()) { 3842 __ emit_data64(0x0001020304050607, relocInfo::none); // PSHUFFLE_BYTE_FLIP_MASK 3843 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none); 3844 __ emit_data64(0x1011121314151617, relocInfo::none); 3845 __ emit_data64(0x18191a1b1c1d1e1f, relocInfo::none); 3846 __ emit_data64(0x0000000000000000, relocInfo::none); //MASK_YMM_LO 3847 __ emit_data64(0x0000000000000000, relocInfo::none); 3848 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3849 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3850 } 3851 3852 return start; 3853 } 3854 3855 // ofs and limit are use for multi-block byte array. 3856 // int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit) 3857 address generate_sha256_implCompress(bool multi_block, const char *name) { 3858 assert(VM_Version::supports_sha() || VM_Version::supports_avx2(), ""); 3859 __ align(CodeEntryAlignment); 3860 StubCodeMark mark(this, "StubRoutines", name); 3861 address start = __ pc(); 3862 3863 Register buf = c_rarg0; 3864 Register state = c_rarg1; 3865 Register ofs = c_rarg2; 3866 Register limit = c_rarg3; 3867 3868 const XMMRegister msg = xmm0; 3869 const XMMRegister state0 = xmm1; 3870 const XMMRegister state1 = xmm2; 3871 const XMMRegister msgtmp0 = xmm3; 3872 3873 const XMMRegister msgtmp1 = xmm4; 3874 const XMMRegister msgtmp2 = xmm5; 3875 const XMMRegister msgtmp3 = xmm6; 3876 const XMMRegister msgtmp4 = xmm7; 3877 3878 const XMMRegister shuf_mask = xmm8; 3879 3880 __ enter(); 3881 3882 __ subptr(rsp, 4 * wordSize); 3883 3884 if (VM_Version::supports_sha()) { 3885 __ fast_sha256(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4, 3886 buf, state, ofs, limit, rsp, multi_block, shuf_mask); 3887 } else if (VM_Version::supports_avx2()) { 3888 __ sha256_AVX2(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4, 3889 buf, state, ofs, limit, rsp, multi_block, shuf_mask); 3890 } 3891 __ addptr(rsp, 4 * wordSize); 3892 __ vzeroupper(); 3893 __ leave(); 3894 __ ret(0); 3895 return start; 3896 } 3897 3898 address generate_sha512_implCompress(bool multi_block, const char *name) { 3899 assert(VM_Version::supports_avx2(), ""); 3900 assert(VM_Version::supports_bmi2(), ""); 3901 __ align(CodeEntryAlignment); 3902 StubCodeMark mark(this, "StubRoutines", name); 3903 address start = __ pc(); 3904 3905 Register buf = c_rarg0; 3906 Register state = c_rarg1; 3907 Register ofs = c_rarg2; 3908 Register limit = c_rarg3; 3909 3910 const XMMRegister msg = xmm0; 3911 const XMMRegister state0 = xmm1; 3912 const XMMRegister state1 = xmm2; 3913 const XMMRegister msgtmp0 = xmm3; 3914 const XMMRegister msgtmp1 = xmm4; 3915 const XMMRegister msgtmp2 = xmm5; 3916 const XMMRegister msgtmp3 = xmm6; 3917 const XMMRegister msgtmp4 = xmm7; 3918 3919 const XMMRegister shuf_mask = xmm8; 3920 3921 __ enter(); 3922 3923 __ sha512_AVX2(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4, 3924 buf, state, ofs, limit, rsp, multi_block, shuf_mask); 3925 3926 __ vzeroupper(); 3927 __ leave(); 3928 __ ret(0); 3929 return start; 3930 } 3931 3932 // This is a version of CTR/AES crypt which does 6 blocks in a loop at a time 3933 // to hide instruction latency 3934 // 3935 // Arguments: 3936 // 3937 // Inputs: 3938 // c_rarg0 - source byte array address 3939 // c_rarg1 - destination byte array address 3940 // c_rarg2 - K (key) in little endian int array 3941 // c_rarg3 - counter vector byte array address 3942 // Linux 3943 // c_rarg4 - input length 3944 // c_rarg5 - saved encryptedCounter start 3945 // rbp + 6 * wordSize - saved used length 3946 // Windows 3947 // rbp + 6 * wordSize - input length 3948 // rbp + 7 * wordSize - saved encryptedCounter start 3949 // rbp + 8 * wordSize - saved used length 3950 // 3951 // Output: 3952 // rax - input length 3953 // 3954 address generate_counterMode_AESCrypt_Parallel() { 3955 assert(UseAES, "need AES instructions and misaligned SSE support"); 3956 __ align(CodeEntryAlignment); 3957 StubCodeMark mark(this, "StubRoutines", "counterMode_AESCrypt"); 3958 address start = __ pc(); 3959 const Register from = c_rarg0; // source array address 3960 const Register to = c_rarg1; // destination array address 3961 const Register key = c_rarg2; // key array address 3962 const Register counter = c_rarg3; // counter byte array initialized from counter array address 3963 // and updated with the incremented counter in the end 3964 #ifndef _WIN64 3965 const Register len_reg = c_rarg4; 3966 const Register saved_encCounter_start = c_rarg5; 3967 const Register used_addr = r10; 3968 const Address used_mem(rbp, 2 * wordSize); 3969 const Register used = r11; 3970 #else 3971 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 3972 const Address saved_encCounter_mem(rbp, 7 * wordSize); // length is on stack on Win64 3973 const Address used_mem(rbp, 8 * wordSize); // length is on stack on Win64 3974 const Register len_reg = r10; // pick the first volatile windows register 3975 const Register saved_encCounter_start = r11; 3976 const Register used_addr = r13; 3977 const Register used = r14; 3978 #endif 3979 const Register pos = rax; 3980 3981 const int PARALLEL_FACTOR = 6; 3982 const XMMRegister xmm_counter_shuf_mask = xmm0; 3983 const XMMRegister xmm_key_shuf_mask = xmm1; // used temporarily to swap key bytes up front 3984 const XMMRegister xmm_curr_counter = xmm2; 3985 3986 const XMMRegister xmm_key_tmp0 = xmm3; 3987 const XMMRegister xmm_key_tmp1 = xmm4; 3988 3989 // registers holding the four results in the parallelized loop 3990 const XMMRegister xmm_result0 = xmm5; 3991 const XMMRegister xmm_result1 = xmm6; 3992 const XMMRegister xmm_result2 = xmm7; 3993 const XMMRegister xmm_result3 = xmm8; 3994 const XMMRegister xmm_result4 = xmm9; 3995 const XMMRegister xmm_result5 = xmm10; 3996 3997 const XMMRegister xmm_from0 = xmm11; 3998 const XMMRegister xmm_from1 = xmm12; 3999 const XMMRegister xmm_from2 = xmm13; 4000 const XMMRegister xmm_from3 = xmm14; //the last one is xmm14. we have to preserve it on WIN64. 4001 const XMMRegister xmm_from4 = xmm3; //reuse xmm3~4. Because xmm_key_tmp0~1 are useless when loading input text 4002 const XMMRegister xmm_from5 = xmm4; 4003 4004 //for key_128, key_192, key_256 4005 const int rounds[3] = {10, 12, 14}; 4006 Label L_exit_preLoop, L_preLoop_start; 4007 Label L_multiBlock_loopTop[3]; 4008 Label L_singleBlockLoopTop[3]; 4009 Label L__incCounter[3][6]; //for 6 blocks 4010 Label L__incCounter_single[3]; //for single block, key128, key192, key256 4011 Label L_processTail_insr[3], L_processTail_4_insr[3], L_processTail_2_insr[3], L_processTail_1_insr[3], L_processTail_exit_insr[3]; 4012 Label L_processTail_4_extr[3], L_processTail_2_extr[3], L_processTail_1_extr[3], L_processTail_exit_extr[3]; 4013 4014 Label L_exit; 4015 4016 __ enter(); // required for proper stackwalking of RuntimeStub frame 4017 4018 #ifdef _WIN64 4019 // allocate spill slots for r13, r14 4020 enum { 4021 saved_r13_offset, 4022 saved_r14_offset 4023 }; 4024 __ subptr(rsp, 2 * wordSize); 4025 __ movptr(Address(rsp, saved_r13_offset * wordSize), r13); 4026 __ movptr(Address(rsp, saved_r14_offset * wordSize), r14); 4027 4028 // on win64, fill len_reg from stack position 4029 __ movl(len_reg, len_mem); 4030 __ movptr(saved_encCounter_start, saved_encCounter_mem); 4031 __ movptr(used_addr, used_mem); 4032 __ movl(used, Address(used_addr, 0)); 4033 #else 4034 __ push(len_reg); // Save 4035 __ movptr(used_addr, used_mem); 4036 __ movl(used, Address(used_addr, 0)); 4037 #endif 4038 4039 __ push(rbx); // Save RBX 4040 __ movdqu(xmm_curr_counter, Address(counter, 0x00)); // initialize counter with initial counter 4041 __ movdqu(xmm_counter_shuf_mask, ExternalAddress(StubRoutines::x86::counter_shuffle_mask_addr()), pos); // pos as scratch 4042 __ pshufb(xmm_curr_counter, xmm_counter_shuf_mask); //counter is shuffled 4043 __ movptr(pos, 0); 4044 4045 // Use the partially used encrpyted counter from last invocation 4046 __ BIND(L_preLoop_start); 4047 __ cmpptr(used, 16); 4048 __ jcc(Assembler::aboveEqual, L_exit_preLoop); 4049 __ cmpptr(len_reg, 0); 4050 __ jcc(Assembler::lessEqual, L_exit_preLoop); 4051 __ movb(rbx, Address(saved_encCounter_start, used)); 4052 __ xorb(rbx, Address(from, pos)); 4053 __ movb(Address(to, pos), rbx); 4054 __ addptr(pos, 1); 4055 __ addptr(used, 1); 4056 __ subptr(len_reg, 1); 4057 4058 __ jmp(L_preLoop_start); 4059 4060 __ BIND(L_exit_preLoop); 4061 __ movl(Address(used_addr, 0), used); 4062 4063 // key length could be only {11, 13, 15} * 4 = {44, 52, 60} 4064 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()), rbx); // rbx as scratch 4065 __ movl(rbx, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 4066 __ cmpl(rbx, 52); 4067 __ jcc(Assembler::equal, L_multiBlock_loopTop[1]); 4068 __ cmpl(rbx, 60); 4069 __ jcc(Assembler::equal, L_multiBlock_loopTop[2]); 4070 4071 #define CTR_DoSix(opc, src_reg) \ 4072 __ opc(xmm_result0, src_reg); \ 4073 __ opc(xmm_result1, src_reg); \ 4074 __ opc(xmm_result2, src_reg); \ 4075 __ opc(xmm_result3, src_reg); \ 4076 __ opc(xmm_result4, src_reg); \ 4077 __ opc(xmm_result5, src_reg); 4078 4079 // k == 0 : generate code for key_128 4080 // k == 1 : generate code for key_192 4081 // k == 2 : generate code for key_256 4082 for (int k = 0; k < 3; ++k) { 4083 //multi blocks starts here 4084 __ align(OptoLoopAlignment); 4085 __ BIND(L_multiBlock_loopTop[k]); 4086 __ cmpptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // see if at least PARALLEL_FACTOR blocks left 4087 __ jcc(Assembler::less, L_singleBlockLoopTop[k]); 4088 load_key(xmm_key_tmp0, key, 0x00, xmm_key_shuf_mask); 4089 4090 //load, then increase counters 4091 CTR_DoSix(movdqa, xmm_curr_counter); 4092 inc_counter(rbx, xmm_result1, 0x01, L__incCounter[k][0]); 4093 inc_counter(rbx, xmm_result2, 0x02, L__incCounter[k][1]); 4094 inc_counter(rbx, xmm_result3, 0x03, L__incCounter[k][2]); 4095 inc_counter(rbx, xmm_result4, 0x04, L__incCounter[k][3]); 4096 inc_counter(rbx, xmm_result5, 0x05, L__incCounter[k][4]); 4097 inc_counter(rbx, xmm_curr_counter, 0x06, L__incCounter[k][5]); 4098 CTR_DoSix(pshufb, xmm_counter_shuf_mask); // after increased, shuffled counters back for PXOR 4099 CTR_DoSix(pxor, xmm_key_tmp0); //PXOR with Round 0 key 4100 4101 //load two ROUND_KEYs at a time 4102 for (int i = 1; i < rounds[k]; ) { 4103 load_key(xmm_key_tmp1, key, (0x10 * i), xmm_key_shuf_mask); 4104 load_key(xmm_key_tmp0, key, (0x10 * (i+1)), xmm_key_shuf_mask); 4105 CTR_DoSix(aesenc, xmm_key_tmp1); 4106 i++; 4107 if (i != rounds[k]) { 4108 CTR_DoSix(aesenc, xmm_key_tmp0); 4109 } else { 4110 CTR_DoSix(aesenclast, xmm_key_tmp0); 4111 } 4112 i++; 4113 } 4114 4115 // get next PARALLEL_FACTOR blocks into xmm_result registers 4116 __ movdqu(xmm_from0, Address(from, pos, Address::times_1, 0 * AESBlockSize)); 4117 __ movdqu(xmm_from1, Address(from, pos, Address::times_1, 1 * AESBlockSize)); 4118 __ movdqu(xmm_from2, Address(from, pos, Address::times_1, 2 * AESBlockSize)); 4119 __ movdqu(xmm_from3, Address(from, pos, Address::times_1, 3 * AESBlockSize)); 4120 __ movdqu(xmm_from4, Address(from, pos, Address::times_1, 4 * AESBlockSize)); 4121 __ movdqu(xmm_from5, Address(from, pos, Address::times_1, 5 * AESBlockSize)); 4122 4123 __ pxor(xmm_result0, xmm_from0); 4124 __ pxor(xmm_result1, xmm_from1); 4125 __ pxor(xmm_result2, xmm_from2); 4126 __ pxor(xmm_result3, xmm_from3); 4127 __ pxor(xmm_result4, xmm_from4); 4128 __ pxor(xmm_result5, xmm_from5); 4129 4130 // store 6 results into the next 64 bytes of output 4131 __ movdqu(Address(to, pos, Address::times_1, 0 * AESBlockSize), xmm_result0); 4132 __ movdqu(Address(to, pos, Address::times_1, 1 * AESBlockSize), xmm_result1); 4133 __ movdqu(Address(to, pos, Address::times_1, 2 * AESBlockSize), xmm_result2); 4134 __ movdqu(Address(to, pos, Address::times_1, 3 * AESBlockSize), xmm_result3); 4135 __ movdqu(Address(to, pos, Address::times_1, 4 * AESBlockSize), xmm_result4); 4136 __ movdqu(Address(to, pos, Address::times_1, 5 * AESBlockSize), xmm_result5); 4137 4138 __ addptr(pos, PARALLEL_FACTOR * AESBlockSize); // increase the length of crypt text 4139 __ subptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // decrease the remaining length 4140 __ jmp(L_multiBlock_loopTop[k]); 4141 4142 // singleBlock starts here 4143 __ align(OptoLoopAlignment); 4144 __ BIND(L_singleBlockLoopTop[k]); 4145 __ cmpptr(len_reg, 0); 4146 __ jcc(Assembler::lessEqual, L_exit); 4147 load_key(xmm_key_tmp0, key, 0x00, xmm_key_shuf_mask); 4148 __ movdqa(xmm_result0, xmm_curr_counter); 4149 inc_counter(rbx, xmm_curr_counter, 0x01, L__incCounter_single[k]); 4150 __ pshufb(xmm_result0, xmm_counter_shuf_mask); 4151 __ pxor(xmm_result0, xmm_key_tmp0); 4152 for (int i = 1; i < rounds[k]; i++) { 4153 load_key(xmm_key_tmp0, key, (0x10 * i), xmm_key_shuf_mask); 4154 __ aesenc(xmm_result0, xmm_key_tmp0); 4155 } 4156 load_key(xmm_key_tmp0, key, (rounds[k] * 0x10), xmm_key_shuf_mask); 4157 __ aesenclast(xmm_result0, xmm_key_tmp0); 4158 __ cmpptr(len_reg, AESBlockSize); 4159 __ jcc(Assembler::less, L_processTail_insr[k]); 4160 __ movdqu(xmm_from0, Address(from, pos, Address::times_1, 0 * AESBlockSize)); 4161 __ pxor(xmm_result0, xmm_from0); 4162 __ movdqu(Address(to, pos, Address::times_1, 0 * AESBlockSize), xmm_result0); 4163 __ addptr(pos, AESBlockSize); 4164 __ subptr(len_reg, AESBlockSize); 4165 __ jmp(L_singleBlockLoopTop[k]); 4166 __ BIND(L_processTail_insr[k]); // Process the tail part of the input array 4167 __ addptr(pos, len_reg); // 1. Insert bytes from src array into xmm_from0 register 4168 __ testptr(len_reg, 8); 4169 __ jcc(Assembler::zero, L_processTail_4_insr[k]); 4170 __ subptr(pos,8); 4171 __ pinsrq(xmm_from0, Address(from, pos), 0); 4172 __ BIND(L_processTail_4_insr[k]); 4173 __ testptr(len_reg, 4); 4174 __ jcc(Assembler::zero, L_processTail_2_insr[k]); 4175 __ subptr(pos,4); 4176 __ pslldq(xmm_from0, 4); 4177 __ pinsrd(xmm_from0, Address(from, pos), 0); 4178 __ BIND(L_processTail_2_insr[k]); 4179 __ testptr(len_reg, 2); 4180 __ jcc(Assembler::zero, L_processTail_1_insr[k]); 4181 __ subptr(pos, 2); 4182 __ pslldq(xmm_from0, 2); 4183 __ pinsrw(xmm_from0, Address(from, pos), 0); 4184 __ BIND(L_processTail_1_insr[k]); 4185 __ testptr(len_reg, 1); 4186 __ jcc(Assembler::zero, L_processTail_exit_insr[k]); 4187 __ subptr(pos, 1); 4188 __ pslldq(xmm_from0, 1); 4189 __ pinsrb(xmm_from0, Address(from, pos), 0); 4190 __ BIND(L_processTail_exit_insr[k]); 4191 4192 __ movdqu(Address(saved_encCounter_start, 0), xmm_result0); // 2. Perform pxor of the encrypted counter and plaintext Bytes. 4193 __ pxor(xmm_result0, xmm_from0); // Also the encrypted counter is saved for next invocation. 4194 4195 __ testptr(len_reg, 8); 4196 __ jcc(Assembler::zero, L_processTail_4_extr[k]); // 3. Extract bytes from xmm_result0 into the dest. array 4197 __ pextrq(Address(to, pos), xmm_result0, 0); 4198 __ psrldq(xmm_result0, 8); 4199 __ addptr(pos, 8); 4200 __ BIND(L_processTail_4_extr[k]); 4201 __ testptr(len_reg, 4); 4202 __ jcc(Assembler::zero, L_processTail_2_extr[k]); 4203 __ pextrd(Address(to, pos), xmm_result0, 0); 4204 __ psrldq(xmm_result0, 4); 4205 __ addptr(pos, 4); 4206 __ BIND(L_processTail_2_extr[k]); 4207 __ testptr(len_reg, 2); 4208 __ jcc(Assembler::zero, L_processTail_1_extr[k]); 4209 __ pextrw(Address(to, pos), xmm_result0, 0); 4210 __ psrldq(xmm_result0, 2); 4211 __ addptr(pos, 2); 4212 __ BIND(L_processTail_1_extr[k]); 4213 __ testptr(len_reg, 1); 4214 __ jcc(Assembler::zero, L_processTail_exit_extr[k]); 4215 __ pextrb(Address(to, pos), xmm_result0, 0); 4216 4217 __ BIND(L_processTail_exit_extr[k]); 4218 __ movl(Address(used_addr, 0), len_reg); 4219 __ jmp(L_exit); 4220 4221 } 4222 4223 __ BIND(L_exit); 4224 __ pshufb(xmm_curr_counter, xmm_counter_shuf_mask); //counter is shuffled back. 4225 __ movdqu(Address(counter, 0), xmm_curr_counter); //save counter back 4226 __ pop(rbx); // pop the saved RBX. 4227 #ifdef _WIN64 4228 __ movl(rax, len_mem); 4229 __ movptr(r13, Address(rsp, saved_r13_offset * wordSize)); 4230 __ movptr(r14, Address(rsp, saved_r14_offset * wordSize)); 4231 __ addptr(rsp, 2 * wordSize); 4232 #else 4233 __ pop(rax); // return 'len' 4234 #endif 4235 __ leave(); // required for proper stackwalking of RuntimeStub frame 4236 __ ret(0); 4237 return start; 4238 } 4239 4240 void roundDec(XMMRegister xmm_reg) { 4241 __ vaesdec(xmm1, xmm1, xmm_reg, Assembler::AVX_512bit); 4242 __ vaesdec(xmm2, xmm2, xmm_reg, Assembler::AVX_512bit); 4243 __ vaesdec(xmm3, xmm3, xmm_reg, Assembler::AVX_512bit); 4244 __ vaesdec(xmm4, xmm4, xmm_reg, Assembler::AVX_512bit); 4245 __ vaesdec(xmm5, xmm5, xmm_reg, Assembler::AVX_512bit); 4246 __ vaesdec(xmm6, xmm6, xmm_reg, Assembler::AVX_512bit); 4247 __ vaesdec(xmm7, xmm7, xmm_reg, Assembler::AVX_512bit); 4248 __ vaesdec(xmm8, xmm8, xmm_reg, Assembler::AVX_512bit); 4249 } 4250 4251 void roundDeclast(XMMRegister xmm_reg) { 4252 __ vaesdeclast(xmm1, xmm1, xmm_reg, Assembler::AVX_512bit); 4253 __ vaesdeclast(xmm2, xmm2, xmm_reg, Assembler::AVX_512bit); 4254 __ vaesdeclast(xmm3, xmm3, xmm_reg, Assembler::AVX_512bit); 4255 __ vaesdeclast(xmm4, xmm4, xmm_reg, Assembler::AVX_512bit); 4256 __ vaesdeclast(xmm5, xmm5, xmm_reg, Assembler::AVX_512bit); 4257 __ vaesdeclast(xmm6, xmm6, xmm_reg, Assembler::AVX_512bit); 4258 __ vaesdeclast(xmm7, xmm7, xmm_reg, Assembler::AVX_512bit); 4259 __ vaesdeclast(xmm8, xmm8, xmm_reg, Assembler::AVX_512bit); 4260 } 4261 4262 void ev_load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask = NULL) { 4263 __ movdqu(xmmdst, Address(key, offset)); 4264 if (xmm_shuf_mask != NULL) { 4265 __ pshufb(xmmdst, xmm_shuf_mask); 4266 } else { 4267 __ pshufb(xmmdst, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 4268 } 4269 __ evshufi64x2(xmmdst, xmmdst, xmmdst, 0x0, Assembler::AVX_512bit); 4270 4271 } 4272 4273 address generate_cipherBlockChaining_decryptVectorAESCrypt() { 4274 assert(VM_Version::supports_vaes(), "need AES instructions and misaligned SSE support"); 4275 __ align(CodeEntryAlignment); 4276 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt"); 4277 address start = __ pc(); 4278 4279 const Register from = c_rarg0; // source array address 4280 const Register to = c_rarg1; // destination array address 4281 const Register key = c_rarg2; // key array address 4282 const Register rvec = c_rarg3; // r byte array initialized from initvector array address 4283 // and left with the results of the last encryption block 4284 #ifndef _WIN64 4285 const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16) 4286 #else 4287 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 4288 const Register len_reg = r11; // pick the volatile windows register 4289 #endif 4290 4291 Label Loop, Loop1, L_128, L_256, L_192, KEY_192, KEY_256, Loop2, Lcbc_dec_rem_loop, 4292 Lcbc_dec_rem_last, Lcbc_dec_ret, Lcbc_dec_rem, Lcbc_exit; 4293 4294 __ enter(); 4295 4296 #ifdef _WIN64 4297 // on win64, fill len_reg from stack position 4298 __ movl(len_reg, len_mem); 4299 #else 4300 __ push(len_reg); // Save 4301 #endif 4302 __ push(rbx); 4303 __ vzeroupper(); 4304 4305 // Temporary variable declaration for swapping key bytes 4306 const XMMRegister xmm_key_shuf_mask = xmm1; 4307 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 4308 4309 // Calculate number of rounds from key size: 44 for 10-rounds, 52 for 12-rounds, 60 for 14-rounds 4310 const Register rounds = rbx; 4311 __ movl(rounds, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 4312 4313 const XMMRegister IV = xmm0; 4314 // Load IV and broadcast value to 512-bits 4315 __ evbroadcasti64x2(IV, Address(rvec, 0), Assembler::AVX_512bit); 4316 4317 // Temporary variables for storing round keys 4318 const XMMRegister RK0 = xmm30; 4319 const XMMRegister RK1 = xmm9; 4320 const XMMRegister RK2 = xmm18; 4321 const XMMRegister RK3 = xmm19; 4322 const XMMRegister RK4 = xmm20; 4323 const XMMRegister RK5 = xmm21; 4324 const XMMRegister RK6 = xmm22; 4325 const XMMRegister RK7 = xmm23; 4326 const XMMRegister RK8 = xmm24; 4327 const XMMRegister RK9 = xmm25; 4328 const XMMRegister RK10 = xmm26; 4329 4330 // Load and shuffle key 4331 // the java expanded key ordering is rotated one position from what we want 4332 // so we start from 1*16 here and hit 0*16 last 4333 ev_load_key(RK1, key, 1 * 16, xmm_key_shuf_mask); 4334 ev_load_key(RK2, key, 2 * 16, xmm_key_shuf_mask); 4335 ev_load_key(RK3, key, 3 * 16, xmm_key_shuf_mask); 4336 ev_load_key(RK4, key, 4 * 16, xmm_key_shuf_mask); 4337 ev_load_key(RK5, key, 5 * 16, xmm_key_shuf_mask); 4338 ev_load_key(RK6, key, 6 * 16, xmm_key_shuf_mask); 4339 ev_load_key(RK7, key, 7 * 16, xmm_key_shuf_mask); 4340 ev_load_key(RK8, key, 8 * 16, xmm_key_shuf_mask); 4341 ev_load_key(RK9, key, 9 * 16, xmm_key_shuf_mask); 4342 ev_load_key(RK10, key, 10 * 16, xmm_key_shuf_mask); 4343 ev_load_key(RK0, key, 0*16, xmm_key_shuf_mask); 4344 4345 // Variables for storing source cipher text 4346 const XMMRegister S0 = xmm10; 4347 const XMMRegister S1 = xmm11; 4348 const XMMRegister S2 = xmm12; 4349 const XMMRegister S3 = xmm13; 4350 const XMMRegister S4 = xmm14; 4351 const XMMRegister S5 = xmm15; 4352 const XMMRegister S6 = xmm16; 4353 const XMMRegister S7 = xmm17; 4354 4355 // Variables for storing decrypted text 4356 const XMMRegister B0 = xmm1; 4357 const XMMRegister B1 = xmm2; 4358 const XMMRegister B2 = xmm3; 4359 const XMMRegister B3 = xmm4; 4360 const XMMRegister B4 = xmm5; 4361 const XMMRegister B5 = xmm6; 4362 const XMMRegister B6 = xmm7; 4363 const XMMRegister B7 = xmm8; 4364 4365 __ cmpl(rounds, 44); 4366 __ jcc(Assembler::greater, KEY_192); 4367 __ jmp(Loop); 4368 4369 __ BIND(KEY_192); 4370 const XMMRegister RK11 = xmm27; 4371 const XMMRegister RK12 = xmm28; 4372 ev_load_key(RK11, key, 11*16, xmm_key_shuf_mask); 4373 ev_load_key(RK12, key, 12*16, xmm_key_shuf_mask); 4374 4375 __ cmpl(rounds, 52); 4376 __ jcc(Assembler::greater, KEY_256); 4377 __ jmp(Loop); 4378 4379 __ BIND(KEY_256); 4380 const XMMRegister RK13 = xmm29; 4381 const XMMRegister RK14 = xmm31; 4382 ev_load_key(RK13, key, 13*16, xmm_key_shuf_mask); 4383 ev_load_key(RK14, key, 14*16, xmm_key_shuf_mask); 4384 4385 __ BIND(Loop); 4386 __ cmpl(len_reg, 512); 4387 __ jcc(Assembler::below, Lcbc_dec_rem); 4388 __ BIND(Loop1); 4389 __ subl(len_reg, 512); 4390 __ evmovdquq(S0, Address(from, 0 * 64), Assembler::AVX_512bit); 4391 __ evmovdquq(S1, Address(from, 1 * 64), Assembler::AVX_512bit); 4392 __ evmovdquq(S2, Address(from, 2 * 64), Assembler::AVX_512bit); 4393 __ evmovdquq(S3, Address(from, 3 * 64), Assembler::AVX_512bit); 4394 __ evmovdquq(S4, Address(from, 4 * 64), Assembler::AVX_512bit); 4395 __ evmovdquq(S5, Address(from, 5 * 64), Assembler::AVX_512bit); 4396 __ evmovdquq(S6, Address(from, 6 * 64), Assembler::AVX_512bit); 4397 __ evmovdquq(S7, Address(from, 7 * 64), Assembler::AVX_512bit); 4398 __ leaq(from, Address(from, 8 * 64)); 4399 4400 __ evpxorq(B0, S0, RK1, Assembler::AVX_512bit); 4401 __ evpxorq(B1, S1, RK1, Assembler::AVX_512bit); 4402 __ evpxorq(B2, S2, RK1, Assembler::AVX_512bit); 4403 __ evpxorq(B3, S3, RK1, Assembler::AVX_512bit); 4404 __ evpxorq(B4, S4, RK1, Assembler::AVX_512bit); 4405 __ evpxorq(B5, S5, RK1, Assembler::AVX_512bit); 4406 __ evpxorq(B6, S6, RK1, Assembler::AVX_512bit); 4407 __ evpxorq(B7, S7, RK1, Assembler::AVX_512bit); 4408 4409 __ evalignq(IV, S0, IV, 0x06); 4410 __ evalignq(S0, S1, S0, 0x06); 4411 __ evalignq(S1, S2, S1, 0x06); 4412 __ evalignq(S2, S3, S2, 0x06); 4413 __ evalignq(S3, S4, S3, 0x06); 4414 __ evalignq(S4, S5, S4, 0x06); 4415 __ evalignq(S5, S6, S5, 0x06); 4416 __ evalignq(S6, S7, S6, 0x06); 4417 4418 roundDec(RK2); 4419 roundDec(RK3); 4420 roundDec(RK4); 4421 roundDec(RK5); 4422 roundDec(RK6); 4423 roundDec(RK7); 4424 roundDec(RK8); 4425 roundDec(RK9); 4426 roundDec(RK10); 4427 4428 __ cmpl(rounds, 44); 4429 __ jcc(Assembler::belowEqual, L_128); 4430 roundDec(RK11); 4431 roundDec(RK12); 4432 4433 __ cmpl(rounds, 52); 4434 __ jcc(Assembler::belowEqual, L_192); 4435 roundDec(RK13); 4436 roundDec(RK14); 4437 4438 __ BIND(L_256); 4439 roundDeclast(RK0); 4440 __ jmp(Loop2); 4441 4442 __ BIND(L_128); 4443 roundDeclast(RK0); 4444 __ jmp(Loop2); 4445 4446 __ BIND(L_192); 4447 roundDeclast(RK0); 4448 4449 __ BIND(Loop2); 4450 __ evpxorq(B0, B0, IV, Assembler::AVX_512bit); 4451 __ evpxorq(B1, B1, S0, Assembler::AVX_512bit); 4452 __ evpxorq(B2, B2, S1, Assembler::AVX_512bit); 4453 __ evpxorq(B3, B3, S2, Assembler::AVX_512bit); 4454 __ evpxorq(B4, B4, S3, Assembler::AVX_512bit); 4455 __ evpxorq(B5, B5, S4, Assembler::AVX_512bit); 4456 __ evpxorq(B6, B6, S5, Assembler::AVX_512bit); 4457 __ evpxorq(B7, B7, S6, Assembler::AVX_512bit); 4458 __ evmovdquq(IV, S7, Assembler::AVX_512bit); 4459 4460 __ evmovdquq(Address(to, 0 * 64), B0, Assembler::AVX_512bit); 4461 __ evmovdquq(Address(to, 1 * 64), B1, Assembler::AVX_512bit); 4462 __ evmovdquq(Address(to, 2 * 64), B2, Assembler::AVX_512bit); 4463 __ evmovdquq(Address(to, 3 * 64), B3, Assembler::AVX_512bit); 4464 __ evmovdquq(Address(to, 4 * 64), B4, Assembler::AVX_512bit); 4465 __ evmovdquq(Address(to, 5 * 64), B5, Assembler::AVX_512bit); 4466 __ evmovdquq(Address(to, 6 * 64), B6, Assembler::AVX_512bit); 4467 __ evmovdquq(Address(to, 7 * 64), B7, Assembler::AVX_512bit); 4468 __ leaq(to, Address(to, 8 * 64)); 4469 __ jmp(Loop); 4470 4471 __ BIND(Lcbc_dec_rem); 4472 __ evshufi64x2(IV, IV, IV, 0x03, Assembler::AVX_512bit); 4473 4474 __ BIND(Lcbc_dec_rem_loop); 4475 __ subl(len_reg, 16); 4476 __ jcc(Assembler::carrySet, Lcbc_dec_ret); 4477 4478 __ movdqu(S0, Address(from, 0)); 4479 __ evpxorq(B0, S0, RK1, Assembler::AVX_512bit); 4480 __ vaesdec(B0, B0, RK2, Assembler::AVX_512bit); 4481 __ vaesdec(B0, B0, RK3, Assembler::AVX_512bit); 4482 __ vaesdec(B0, B0, RK4, Assembler::AVX_512bit); 4483 __ vaesdec(B0, B0, RK5, Assembler::AVX_512bit); 4484 __ vaesdec(B0, B0, RK6, Assembler::AVX_512bit); 4485 __ vaesdec(B0, B0, RK7, Assembler::AVX_512bit); 4486 __ vaesdec(B0, B0, RK8, Assembler::AVX_512bit); 4487 __ vaesdec(B0, B0, RK9, Assembler::AVX_512bit); 4488 __ vaesdec(B0, B0, RK10, Assembler::AVX_512bit); 4489 __ cmpl(rounds, 44); 4490 __ jcc(Assembler::belowEqual, Lcbc_dec_rem_last); 4491 4492 __ vaesdec(B0, B0, RK11, Assembler::AVX_512bit); 4493 __ vaesdec(B0, B0, RK12, Assembler::AVX_512bit); 4494 __ cmpl(rounds, 52); 4495 __ jcc(Assembler::belowEqual, Lcbc_dec_rem_last); 4496 4497 __ vaesdec(B0, B0, RK13, Assembler::AVX_512bit); 4498 __ vaesdec(B0, B0, RK14, Assembler::AVX_512bit); 4499 4500 __ BIND(Lcbc_dec_rem_last); 4501 __ vaesdeclast(B0, B0, RK0, Assembler::AVX_512bit); 4502 4503 __ evpxorq(B0, B0, IV, Assembler::AVX_512bit); 4504 __ evmovdquq(IV, S0, Assembler::AVX_512bit); 4505 __ movdqu(Address(to, 0), B0); 4506 __ leaq(from, Address(from, 16)); 4507 __ leaq(to, Address(to, 16)); 4508 __ jmp(Lcbc_dec_rem_loop); 4509 4510 __ BIND(Lcbc_dec_ret); 4511 __ movdqu(Address(rvec, 0), IV); 4512 4513 // Zero out the round keys 4514 __ evpxorq(RK0, RK0, RK0, Assembler::AVX_512bit); 4515 __ evpxorq(RK1, RK1, RK1, Assembler::AVX_512bit); 4516 __ evpxorq(RK2, RK2, RK2, Assembler::AVX_512bit); 4517 __ evpxorq(RK3, RK3, RK3, Assembler::AVX_512bit); 4518 __ evpxorq(RK4, RK4, RK4, Assembler::AVX_512bit); 4519 __ evpxorq(RK5, RK5, RK5, Assembler::AVX_512bit); 4520 __ evpxorq(RK6, RK6, RK6, Assembler::AVX_512bit); 4521 __ evpxorq(RK7, RK7, RK7, Assembler::AVX_512bit); 4522 __ evpxorq(RK8, RK8, RK8, Assembler::AVX_512bit); 4523 __ evpxorq(RK9, RK9, RK9, Assembler::AVX_512bit); 4524 __ evpxorq(RK10, RK10, RK10, Assembler::AVX_512bit); 4525 __ cmpl(rounds, 44); 4526 __ jcc(Assembler::belowEqual, Lcbc_exit); 4527 __ evpxorq(RK11, RK11, RK11, Assembler::AVX_512bit); 4528 __ evpxorq(RK12, RK12, RK12, Assembler::AVX_512bit); 4529 __ cmpl(rounds, 52); 4530 __ jcc(Assembler::belowEqual, Lcbc_exit); 4531 __ evpxorq(RK13, RK13, RK13, Assembler::AVX_512bit); 4532 __ evpxorq(RK14, RK14, RK14, Assembler::AVX_512bit); 4533 4534 __ BIND(Lcbc_exit); 4535 __ pop(rbx); 4536 #ifdef _WIN64 4537 __ movl(rax, len_mem); 4538 #else 4539 __ pop(rax); // return length 4540 #endif 4541 __ leave(); // required for proper stackwalking of RuntimeStub frame 4542 __ ret(0); 4543 return start; 4544 } 4545 4546 // Polynomial x^128+x^127+x^126+x^121+1 4547 address ghash_polynomial_addr() { 4548 __ align(CodeEntryAlignment); 4549 StubCodeMark mark(this, "StubRoutines", "_ghash_poly_addr"); 4550 address start = __ pc(); 4551 __ emit_data64(0x0000000000000001, relocInfo::none); 4552 __ emit_data64(0xc200000000000000, relocInfo::none); 4553 return start; 4554 } 4555 4556 address ghash_shufflemask_addr() { 4557 __ align(CodeEntryAlignment); 4558 StubCodeMark mark(this, "StubRoutines", "_ghash_shuffmask_addr"); 4559 address start = __ pc(); 4560 __ emit_data64(0x0f0f0f0f0f0f0f0f, relocInfo::none); 4561 __ emit_data64(0x0f0f0f0f0f0f0f0f, relocInfo::none); 4562 return start; 4563 } 4564 4565 // Ghash single and multi block operations using AVX instructions 4566 address generate_avx_ghash_processBlocks() { 4567 __ align(CodeEntryAlignment); 4568 4569 StubCodeMark mark(this, "StubRoutines", "ghash_processBlocks"); 4570 address start = __ pc(); 4571 4572 // arguments 4573 const Register state = c_rarg0; 4574 const Register htbl = c_rarg1; 4575 const Register data = c_rarg2; 4576 const Register blocks = c_rarg3; 4577 __ enter(); 4578 // Save state before entering routine 4579 __ avx_ghash(state, htbl, data, blocks); 4580 __ leave(); // required for proper stackwalking of RuntimeStub frame 4581 __ ret(0); 4582 return start; 4583 } 4584 4585 // byte swap x86 long 4586 address generate_ghash_long_swap_mask() { 4587 __ align(CodeEntryAlignment); 4588 StubCodeMark mark(this, "StubRoutines", "ghash_long_swap_mask"); 4589 address start = __ pc(); 4590 __ emit_data64(0x0f0e0d0c0b0a0908, relocInfo::none ); 4591 __ emit_data64(0x0706050403020100, relocInfo::none ); 4592 return start; 4593 } 4594 4595 // byte swap x86 byte array 4596 address generate_ghash_byte_swap_mask() { 4597 __ align(CodeEntryAlignment); 4598 StubCodeMark mark(this, "StubRoutines", "ghash_byte_swap_mask"); 4599 address start = __ pc(); 4600 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none ); 4601 __ emit_data64(0x0001020304050607, relocInfo::none ); 4602 return start; 4603 } 4604 4605 /* Single and multi-block ghash operations */ 4606 address generate_ghash_processBlocks() { 4607 __ align(CodeEntryAlignment); 4608 Label L_ghash_loop, L_exit; 4609 StubCodeMark mark(this, "StubRoutines", "ghash_processBlocks"); 4610 address start = __ pc(); 4611 4612 const Register state = c_rarg0; 4613 const Register subkeyH = c_rarg1; 4614 const Register data = c_rarg2; 4615 const Register blocks = c_rarg3; 4616 4617 const XMMRegister xmm_temp0 = xmm0; 4618 const XMMRegister xmm_temp1 = xmm1; 4619 const XMMRegister xmm_temp2 = xmm2; 4620 const XMMRegister xmm_temp3 = xmm3; 4621 const XMMRegister xmm_temp4 = xmm4; 4622 const XMMRegister xmm_temp5 = xmm5; 4623 const XMMRegister xmm_temp6 = xmm6; 4624 const XMMRegister xmm_temp7 = xmm7; 4625 const XMMRegister xmm_temp8 = xmm8; 4626 const XMMRegister xmm_temp9 = xmm9; 4627 const XMMRegister xmm_temp10 = xmm10; 4628 4629 __ enter(); 4630 4631 __ movdqu(xmm_temp10, ExternalAddress(StubRoutines::x86::ghash_long_swap_mask_addr())); 4632 4633 __ movdqu(xmm_temp0, Address(state, 0)); 4634 __ pshufb(xmm_temp0, xmm_temp10); 4635 4636 4637 __ BIND(L_ghash_loop); 4638 __ movdqu(xmm_temp2, Address(data, 0)); 4639 __ pshufb(xmm_temp2, ExternalAddress(StubRoutines::x86::ghash_byte_swap_mask_addr())); 4640 4641 __ movdqu(xmm_temp1, Address(subkeyH, 0)); 4642 __ pshufb(xmm_temp1, xmm_temp10); 4643 4644 __ pxor(xmm_temp0, xmm_temp2); 4645 4646 // 4647 // Multiply with the hash key 4648 // 4649 __ movdqu(xmm_temp3, xmm_temp0); 4650 __ pclmulqdq(xmm_temp3, xmm_temp1, 0); // xmm3 holds a0*b0 4651 __ movdqu(xmm_temp4, xmm_temp0); 4652 __ pclmulqdq(xmm_temp4, xmm_temp1, 16); // xmm4 holds a0*b1 4653 4654 __ movdqu(xmm_temp5, xmm_temp0); 4655 __ pclmulqdq(xmm_temp5, xmm_temp1, 1); // xmm5 holds a1*b0 4656 __ movdqu(xmm_temp6, xmm_temp0); 4657 __ pclmulqdq(xmm_temp6, xmm_temp1, 17); // xmm6 holds a1*b1 4658 4659 __ pxor(xmm_temp4, xmm_temp5); // xmm4 holds a0*b1 + a1*b0 4660 4661 __ movdqu(xmm_temp5, xmm_temp4); // move the contents of xmm4 to xmm5 4662 __ psrldq(xmm_temp4, 8); // shift by xmm4 64 bits to the right 4663 __ pslldq(xmm_temp5, 8); // shift by xmm5 64 bits to the left 4664 __ pxor(xmm_temp3, xmm_temp5); 4665 __ pxor(xmm_temp6, xmm_temp4); // Register pair <xmm6:xmm3> holds the result 4666 // of the carry-less multiplication of 4667 // xmm0 by xmm1. 4668 4669 // We shift the result of the multiplication by one bit position 4670 // to the left to cope for the fact that the bits are reversed. 4671 __ movdqu(xmm_temp7, xmm_temp3); 4672 __ movdqu(xmm_temp8, xmm_temp6); 4673 __ pslld(xmm_temp3, 1); 4674 __ pslld(xmm_temp6, 1); 4675 __ psrld(xmm_temp7, 31); 4676 __ psrld(xmm_temp8, 31); 4677 __ movdqu(xmm_temp9, xmm_temp7); 4678 __ pslldq(xmm_temp8, 4); 4679 __ pslldq(xmm_temp7, 4); 4680 __ psrldq(xmm_temp9, 12); 4681 __ por(xmm_temp3, xmm_temp7); 4682 __ por(xmm_temp6, xmm_temp8); 4683 __ por(xmm_temp6, xmm_temp9); 4684 4685 // 4686 // First phase of the reduction 4687 // 4688 // Move xmm3 into xmm7, xmm8, xmm9 in order to perform the shifts 4689 // independently. 4690 __ movdqu(xmm_temp7, xmm_temp3); 4691 __ movdqu(xmm_temp8, xmm_temp3); 4692 __ movdqu(xmm_temp9, xmm_temp3); 4693 __ pslld(xmm_temp7, 31); // packed right shift shifting << 31 4694 __ pslld(xmm_temp8, 30); // packed right shift shifting << 30 4695 __ pslld(xmm_temp9, 25); // packed right shift shifting << 25 4696 __ pxor(xmm_temp7, xmm_temp8); // xor the shifted versions 4697 __ pxor(xmm_temp7, xmm_temp9); 4698 __ movdqu(xmm_temp8, xmm_temp7); 4699 __ pslldq(xmm_temp7, 12); 4700 __ psrldq(xmm_temp8, 4); 4701 __ pxor(xmm_temp3, xmm_temp7); // first phase of the reduction complete 4702 4703 // 4704 // Second phase of the reduction 4705 // 4706 // Make 3 copies of xmm3 in xmm2, xmm4, xmm5 for doing these 4707 // shift operations. 4708 __ movdqu(xmm_temp2, xmm_temp3); 4709 __ movdqu(xmm_temp4, xmm_temp3); 4710 __ movdqu(xmm_temp5, xmm_temp3); 4711 __ psrld(xmm_temp2, 1); // packed left shifting >> 1 4712 __ psrld(xmm_temp4, 2); // packed left shifting >> 2 4713 __ psrld(xmm_temp5, 7); // packed left shifting >> 7 4714 __ pxor(xmm_temp2, xmm_temp4); // xor the shifted versions 4715 __ pxor(xmm_temp2, xmm_temp5); 4716 __ pxor(xmm_temp2, xmm_temp8); 4717 __ pxor(xmm_temp3, xmm_temp2); 4718 __ pxor(xmm_temp6, xmm_temp3); // the result is in xmm6 4719 4720 __ decrement(blocks); 4721 __ jcc(Assembler::zero, L_exit); 4722 __ movdqu(xmm_temp0, xmm_temp6); 4723 __ addptr(data, 16); 4724 __ jmp(L_ghash_loop); 4725 4726 __ BIND(L_exit); 4727 __ pshufb(xmm_temp6, xmm_temp10); // Byte swap 16-byte result 4728 __ movdqu(Address(state, 0), xmm_temp6); // store the result 4729 __ leave(); 4730 __ ret(0); 4731 return start; 4732 } 4733 4734 //base64 character set 4735 address base64_charset_addr() { 4736 __ align(CodeEntryAlignment); 4737 StubCodeMark mark(this, "StubRoutines", "base64_charset"); 4738 address start = __ pc(); 4739 __ emit_data64(0x0000004200000041, relocInfo::none); 4740 __ emit_data64(0x0000004400000043, relocInfo::none); 4741 __ emit_data64(0x0000004600000045, relocInfo::none); 4742 __ emit_data64(0x0000004800000047, relocInfo::none); 4743 __ emit_data64(0x0000004a00000049, relocInfo::none); 4744 __ emit_data64(0x0000004c0000004b, relocInfo::none); 4745 __ emit_data64(0x0000004e0000004d, relocInfo::none); 4746 __ emit_data64(0x000000500000004f, relocInfo::none); 4747 __ emit_data64(0x0000005200000051, relocInfo::none); 4748 __ emit_data64(0x0000005400000053, relocInfo::none); 4749 __ emit_data64(0x0000005600000055, relocInfo::none); 4750 __ emit_data64(0x0000005800000057, relocInfo::none); 4751 __ emit_data64(0x0000005a00000059, relocInfo::none); 4752 __ emit_data64(0x0000006200000061, relocInfo::none); 4753 __ emit_data64(0x0000006400000063, relocInfo::none); 4754 __ emit_data64(0x0000006600000065, relocInfo::none); 4755 __ emit_data64(0x0000006800000067, relocInfo::none); 4756 __ emit_data64(0x0000006a00000069, relocInfo::none); 4757 __ emit_data64(0x0000006c0000006b, relocInfo::none); 4758 __ emit_data64(0x0000006e0000006d, relocInfo::none); 4759 __ emit_data64(0x000000700000006f, relocInfo::none); 4760 __ emit_data64(0x0000007200000071, relocInfo::none); 4761 __ emit_data64(0x0000007400000073, relocInfo::none); 4762 __ emit_data64(0x0000007600000075, relocInfo::none); 4763 __ emit_data64(0x0000007800000077, relocInfo::none); 4764 __ emit_data64(0x0000007a00000079, relocInfo::none); 4765 __ emit_data64(0x0000003100000030, relocInfo::none); 4766 __ emit_data64(0x0000003300000032, relocInfo::none); 4767 __ emit_data64(0x0000003500000034, relocInfo::none); 4768 __ emit_data64(0x0000003700000036, relocInfo::none); 4769 __ emit_data64(0x0000003900000038, relocInfo::none); 4770 __ emit_data64(0x0000002f0000002b, relocInfo::none); 4771 return start; 4772 } 4773 4774 //base64 url character set 4775 address base64url_charset_addr() { 4776 __ align(CodeEntryAlignment); 4777 StubCodeMark mark(this, "StubRoutines", "base64url_charset"); 4778 address start = __ pc(); 4779 __ emit_data64(0x0000004200000041, relocInfo::none); 4780 __ emit_data64(0x0000004400000043, relocInfo::none); 4781 __ emit_data64(0x0000004600000045, relocInfo::none); 4782 __ emit_data64(0x0000004800000047, relocInfo::none); 4783 __ emit_data64(0x0000004a00000049, relocInfo::none); 4784 __ emit_data64(0x0000004c0000004b, relocInfo::none); 4785 __ emit_data64(0x0000004e0000004d, relocInfo::none); 4786 __ emit_data64(0x000000500000004f, relocInfo::none); 4787 __ emit_data64(0x0000005200000051, relocInfo::none); 4788 __ emit_data64(0x0000005400000053, relocInfo::none); 4789 __ emit_data64(0x0000005600000055, relocInfo::none); 4790 __ emit_data64(0x0000005800000057, relocInfo::none); 4791 __ emit_data64(0x0000005a00000059, relocInfo::none); 4792 __ emit_data64(0x0000006200000061, relocInfo::none); 4793 __ emit_data64(0x0000006400000063, relocInfo::none); 4794 __ emit_data64(0x0000006600000065, relocInfo::none); 4795 __ emit_data64(0x0000006800000067, relocInfo::none); 4796 __ emit_data64(0x0000006a00000069, relocInfo::none); 4797 __ emit_data64(0x0000006c0000006b, relocInfo::none); 4798 __ emit_data64(0x0000006e0000006d, relocInfo::none); 4799 __ emit_data64(0x000000700000006f, relocInfo::none); 4800 __ emit_data64(0x0000007200000071, relocInfo::none); 4801 __ emit_data64(0x0000007400000073, relocInfo::none); 4802 __ emit_data64(0x0000007600000075, relocInfo::none); 4803 __ emit_data64(0x0000007800000077, relocInfo::none); 4804 __ emit_data64(0x0000007a00000079, relocInfo::none); 4805 __ emit_data64(0x0000003100000030, relocInfo::none); 4806 __ emit_data64(0x0000003300000032, relocInfo::none); 4807 __ emit_data64(0x0000003500000034, relocInfo::none); 4808 __ emit_data64(0x0000003700000036, relocInfo::none); 4809 __ emit_data64(0x0000003900000038, relocInfo::none); 4810 __ emit_data64(0x0000005f0000002d, relocInfo::none); 4811 4812 return start; 4813 } 4814 4815 address base64_bswap_mask_addr() { 4816 __ align(CodeEntryAlignment); 4817 StubCodeMark mark(this, "StubRoutines", "bswap_mask_base64"); 4818 address start = __ pc(); 4819 __ emit_data64(0x0504038002010080, relocInfo::none); 4820 __ emit_data64(0x0b0a098008070680, relocInfo::none); 4821 __ emit_data64(0x0908078006050480, relocInfo::none); 4822 __ emit_data64(0x0f0e0d800c0b0a80, relocInfo::none); 4823 __ emit_data64(0x0605048003020180, relocInfo::none); 4824 __ emit_data64(0x0c0b0a8009080780, relocInfo::none); 4825 __ emit_data64(0x0504038002010080, relocInfo::none); 4826 __ emit_data64(0x0b0a098008070680, relocInfo::none); 4827 4828 return start; 4829 } 4830 4831 address base64_right_shift_mask_addr() { 4832 __ align(CodeEntryAlignment); 4833 StubCodeMark mark(this, "StubRoutines", "right_shift_mask"); 4834 address start = __ pc(); 4835 __ emit_data64(0x0006000400020000, relocInfo::none); 4836 __ emit_data64(0x0006000400020000, relocInfo::none); 4837 __ emit_data64(0x0006000400020000, relocInfo::none); 4838 __ emit_data64(0x0006000400020000, relocInfo::none); 4839 __ emit_data64(0x0006000400020000, relocInfo::none); 4840 __ emit_data64(0x0006000400020000, relocInfo::none); 4841 __ emit_data64(0x0006000400020000, relocInfo::none); 4842 __ emit_data64(0x0006000400020000, relocInfo::none); 4843 4844 return start; 4845 } 4846 4847 address base64_left_shift_mask_addr() { 4848 __ align(CodeEntryAlignment); 4849 StubCodeMark mark(this, "StubRoutines", "left_shift_mask"); 4850 address start = __ pc(); 4851 __ emit_data64(0x0000000200040000, relocInfo::none); 4852 __ emit_data64(0x0000000200040000, relocInfo::none); 4853 __ emit_data64(0x0000000200040000, relocInfo::none); 4854 __ emit_data64(0x0000000200040000, relocInfo::none); 4855 __ emit_data64(0x0000000200040000, relocInfo::none); 4856 __ emit_data64(0x0000000200040000, relocInfo::none); 4857 __ emit_data64(0x0000000200040000, relocInfo::none); 4858 __ emit_data64(0x0000000200040000, relocInfo::none); 4859 4860 return start; 4861 } 4862 4863 address base64_and_mask_addr() { 4864 __ align(CodeEntryAlignment); 4865 StubCodeMark mark(this, "StubRoutines", "and_mask"); 4866 address start = __ pc(); 4867 __ emit_data64(0x3f003f003f000000, relocInfo::none); 4868 __ emit_data64(0x3f003f003f000000, relocInfo::none); 4869 __ emit_data64(0x3f003f003f000000, relocInfo::none); 4870 __ emit_data64(0x3f003f003f000000, relocInfo::none); 4871 __ emit_data64(0x3f003f003f000000, relocInfo::none); 4872 __ emit_data64(0x3f003f003f000000, relocInfo::none); 4873 __ emit_data64(0x3f003f003f000000, relocInfo::none); 4874 __ emit_data64(0x3f003f003f000000, relocInfo::none); 4875 return start; 4876 } 4877 4878 address base64_gather_mask_addr() { 4879 __ align(CodeEntryAlignment); 4880 StubCodeMark mark(this, "StubRoutines", "gather_mask"); 4881 address start = __ pc(); 4882 __ emit_data64(0xffffffffffffffff, relocInfo::none); 4883 return start; 4884 } 4885 4886 // Code for generating Base64 encoding. 4887 // Intrinsic function prototype in Base64.java: 4888 // private void encodeBlock(byte[] src, int sp, int sl, byte[] dst, int dp, boolean isURL) { 4889 address generate_base64_encodeBlock() { 4890 __ align(CodeEntryAlignment); 4891 StubCodeMark mark(this, "StubRoutines", "implEncode"); 4892 address start = __ pc(); 4893 __ enter(); 4894 4895 // Save callee-saved registers before using them 4896 __ push(r12); 4897 __ push(r13); 4898 __ push(r14); 4899 __ push(r15); 4900 4901 // arguments 4902 const Register source = c_rarg0; // Source Array 4903 const Register start_offset = c_rarg1; // start offset 4904 const Register end_offset = c_rarg2; // end offset 4905 const Register dest = c_rarg3; // destination array 4906 4907 #ifndef _WIN64 4908 const Register dp = c_rarg4; // Position for writing to dest array 4909 const Register isURL = c_rarg5;// Base64 or URL character set 4910 #else 4911 const Address dp_mem(rbp, 6 * wordSize); // length is on stack on Win64 4912 const Address isURL_mem(rbp, 7 * wordSize); 4913 const Register isURL = r10; // pick the volatile windows register 4914 const Register dp = r12; 4915 __ movl(dp, dp_mem); 4916 __ movl(isURL, isURL_mem); 4917 #endif 4918 4919 const Register length = r14; 4920 Label L_process80, L_process32, L_process3, L_exit, L_processdata; 4921 4922 // calculate length from offsets 4923 __ movl(length, end_offset); 4924 __ subl(length, start_offset); 4925 __ cmpl(length, 0); 4926 __ jcc(Assembler::lessEqual, L_exit); 4927 4928 __ lea(r11, ExternalAddress(StubRoutines::x86::base64_charset_addr())); 4929 // check if base64 charset(isURL=0) or base64 url charset(isURL=1) needs to be loaded 4930 __ cmpl(isURL, 0); 4931 __ jcc(Assembler::equal, L_processdata); 4932 __ lea(r11, ExternalAddress(StubRoutines::x86::base64url_charset_addr())); 4933 4934 // load masks required for encoding data 4935 __ BIND(L_processdata); 4936 __ movdqu(xmm16, ExternalAddress(StubRoutines::x86::base64_gather_mask_addr())); 4937 // Set 64 bits of K register. 4938 __ evpcmpeqb(k3, xmm16, xmm16, Assembler::AVX_512bit); 4939 __ evmovdquq(xmm12, ExternalAddress(StubRoutines::x86::base64_bswap_mask_addr()), Assembler::AVX_256bit, r13); 4940 __ evmovdquq(xmm13, ExternalAddress(StubRoutines::x86::base64_right_shift_mask_addr()), Assembler::AVX_512bit, r13); 4941 __ evmovdquq(xmm14, ExternalAddress(StubRoutines::x86::base64_left_shift_mask_addr()), Assembler::AVX_512bit, r13); 4942 __ evmovdquq(xmm15, ExternalAddress(StubRoutines::x86::base64_and_mask_addr()), Assembler::AVX_512bit, r13); 4943 4944 // Vector Base64 implementation, producing 96 bytes of encoded data 4945 __ BIND(L_process80); 4946 __ cmpl(length, 80); 4947 __ jcc(Assembler::below, L_process32); 4948 __ evmovdquq(xmm0, Address(source, start_offset, Address::times_1, 0), Assembler::AVX_256bit); 4949 __ evmovdquq(xmm1, Address(source, start_offset, Address::times_1, 24), Assembler::AVX_256bit); 4950 __ evmovdquq(xmm2, Address(source, start_offset, Address::times_1, 48), Assembler::AVX_256bit); 4951 4952 //permute the input data in such a manner that we have continuity of the source 4953 __ vpermq(xmm3, xmm0, 148, Assembler::AVX_256bit); 4954 __ vpermq(xmm4, xmm1, 148, Assembler::AVX_256bit); 4955 __ vpermq(xmm5, xmm2, 148, Assembler::AVX_256bit); 4956 4957 //shuffle input and group 3 bytes of data and to it add 0 as the 4th byte. 4958 //we can deal with 12 bytes at a time in a 128 bit register 4959 __ vpshufb(xmm3, xmm3, xmm12, Assembler::AVX_256bit); 4960 __ vpshufb(xmm4, xmm4, xmm12, Assembler::AVX_256bit); 4961 __ vpshufb(xmm5, xmm5, xmm12, Assembler::AVX_256bit); 4962 4963 //convert byte to word. Each 128 bit register will have 6 bytes for processing 4964 __ vpmovzxbw(xmm3, xmm3, Assembler::AVX_512bit); 4965 __ vpmovzxbw(xmm4, xmm4, Assembler::AVX_512bit); 4966 __ vpmovzxbw(xmm5, xmm5, Assembler::AVX_512bit); 4967 4968 // Extract bits in the following pattern 6, 4+2, 2+4, 6 to convert 3, 8 bit numbers to 4, 6 bit numbers 4969 __ evpsrlvw(xmm0, xmm3, xmm13, Assembler::AVX_512bit); 4970 __ evpsrlvw(xmm1, xmm4, xmm13, Assembler::AVX_512bit); 4971 __ evpsrlvw(xmm2, xmm5, xmm13, Assembler::AVX_512bit); 4972 4973 __ evpsllvw(xmm3, xmm3, xmm14, Assembler::AVX_512bit); 4974 __ evpsllvw(xmm4, xmm4, xmm14, Assembler::AVX_512bit); 4975 __ evpsllvw(xmm5, xmm5, xmm14, Assembler::AVX_512bit); 4976 4977 __ vpsrlq(xmm0, xmm0, 8, Assembler::AVX_512bit); 4978 __ vpsrlq(xmm1, xmm1, 8, Assembler::AVX_512bit); 4979 __ vpsrlq(xmm2, xmm2, 8, Assembler::AVX_512bit); 4980 4981 __ vpsllq(xmm3, xmm3, 8, Assembler::AVX_512bit); 4982 __ vpsllq(xmm4, xmm4, 8, Assembler::AVX_512bit); 4983 __ vpsllq(xmm5, xmm5, 8, Assembler::AVX_512bit); 4984 4985 __ vpandq(xmm3, xmm3, xmm15, Assembler::AVX_512bit); 4986 __ vpandq(xmm4, xmm4, xmm15, Assembler::AVX_512bit); 4987 __ vpandq(xmm5, xmm5, xmm15, Assembler::AVX_512bit); 4988 4989 // Get the final 4*6 bits base64 encoding 4990 __ vporq(xmm3, xmm3, xmm0, Assembler::AVX_512bit); 4991 __ vporq(xmm4, xmm4, xmm1, Assembler::AVX_512bit); 4992 __ vporq(xmm5, xmm5, xmm2, Assembler::AVX_512bit); 4993 4994 // Shift 4995 __ vpsrlq(xmm3, xmm3, 8, Assembler::AVX_512bit); 4996 __ vpsrlq(xmm4, xmm4, 8, Assembler::AVX_512bit); 4997 __ vpsrlq(xmm5, xmm5, 8, Assembler::AVX_512bit); 4998 4999 // look up 6 bits in the base64 character set to fetch the encoding 5000 // we are converting word to dword as gather instructions need dword indices for looking up encoding 5001 __ vextracti64x4(xmm6, xmm3, 0); 5002 __ vpmovzxwd(xmm0, xmm6, Assembler::AVX_512bit); 5003 __ vextracti64x4(xmm6, xmm3, 1); 5004 __ vpmovzxwd(xmm1, xmm6, Assembler::AVX_512bit); 5005 5006 __ vextracti64x4(xmm6, xmm4, 0); 5007 __ vpmovzxwd(xmm2, xmm6, Assembler::AVX_512bit); 5008 __ vextracti64x4(xmm6, xmm4, 1); 5009 __ vpmovzxwd(xmm3, xmm6, Assembler::AVX_512bit); 5010 5011 __ vextracti64x4(xmm4, xmm5, 0); 5012 __ vpmovzxwd(xmm6, xmm4, Assembler::AVX_512bit); 5013 5014 __ vextracti64x4(xmm4, xmm5, 1); 5015 __ vpmovzxwd(xmm7, xmm4, Assembler::AVX_512bit); 5016 5017 __ kmovql(k2, k3); 5018 __ evpgatherdd(xmm4, k2, Address(r11, xmm0, Address::times_4, 0), Assembler::AVX_512bit); 5019 __ kmovql(k2, k3); 5020 __ evpgatherdd(xmm5, k2, Address(r11, xmm1, Address::times_4, 0), Assembler::AVX_512bit); 5021 __ kmovql(k2, k3); 5022 __ evpgatherdd(xmm8, k2, Address(r11, xmm2, Address::times_4, 0), Assembler::AVX_512bit); 5023 __ kmovql(k2, k3); 5024 __ evpgatherdd(xmm9, k2, Address(r11, xmm3, Address::times_4, 0), Assembler::AVX_512bit); 5025 __ kmovql(k2, k3); 5026 __ evpgatherdd(xmm10, k2, Address(r11, xmm6, Address::times_4, 0), Assembler::AVX_512bit); 5027 __ kmovql(k2, k3); 5028 __ evpgatherdd(xmm11, k2, Address(r11, xmm7, Address::times_4, 0), Assembler::AVX_512bit); 5029 5030 //Down convert dword to byte. Final output is 16*6 = 96 bytes long 5031 __ evpmovdb(Address(dest, dp, Address::times_1, 0), xmm4, Assembler::AVX_512bit); 5032 __ evpmovdb(Address(dest, dp, Address::times_1, 16), xmm5, Assembler::AVX_512bit); 5033 __ evpmovdb(Address(dest, dp, Address::times_1, 32), xmm8, Assembler::AVX_512bit); 5034 __ evpmovdb(Address(dest, dp, Address::times_1, 48), xmm9, Assembler::AVX_512bit); 5035 __ evpmovdb(Address(dest, dp, Address::times_1, 64), xmm10, Assembler::AVX_512bit); 5036 __ evpmovdb(Address(dest, dp, Address::times_1, 80), xmm11, Assembler::AVX_512bit); 5037 5038 __ addq(dest, 96); 5039 __ addq(source, 72); 5040 __ subq(length, 72); 5041 __ jmp(L_process80); 5042 5043 // Vector Base64 implementation generating 32 bytes of encoded data 5044 __ BIND(L_process32); 5045 __ cmpl(length, 32); 5046 __ jcc(Assembler::below, L_process3); 5047 __ evmovdquq(xmm0, Address(source, start_offset), Assembler::AVX_256bit); 5048 __ vpermq(xmm0, xmm0, 148, Assembler::AVX_256bit); 5049 __ vpshufb(xmm6, xmm0, xmm12, Assembler::AVX_256bit); 5050 __ vpmovzxbw(xmm6, xmm6, Assembler::AVX_512bit); 5051 __ evpsrlvw(xmm2, xmm6, xmm13, Assembler::AVX_512bit); 5052 __ evpsllvw(xmm3, xmm6, xmm14, Assembler::AVX_512bit); 5053 5054 __ vpsrlq(xmm2, xmm2, 8, Assembler::AVX_512bit); 5055 __ vpsllq(xmm3, xmm3, 8, Assembler::AVX_512bit); 5056 __ vpandq(xmm3, xmm3, xmm15, Assembler::AVX_512bit); 5057 __ vporq(xmm1, xmm2, xmm3, Assembler::AVX_512bit); 5058 __ vpsrlq(xmm1, xmm1, 8, Assembler::AVX_512bit); 5059 __ vextracti64x4(xmm9, xmm1, 0); 5060 __ vpmovzxwd(xmm6, xmm9, Assembler::AVX_512bit); 5061 __ vextracti64x4(xmm9, xmm1, 1); 5062 __ vpmovzxwd(xmm5, xmm9, Assembler::AVX_512bit); 5063 __ kmovql(k2, k3); 5064 __ evpgatherdd(xmm8, k2, Address(r11, xmm6, Address::times_4, 0), Assembler::AVX_512bit); 5065 __ kmovql(k2, k3); 5066 __ evpgatherdd(xmm10, k2, Address(r11, xmm5, Address::times_4, 0), Assembler::AVX_512bit); 5067 __ evpmovdb(Address(dest, dp, Address::times_1, 0), xmm8, Assembler::AVX_512bit); 5068 __ evpmovdb(Address(dest, dp, Address::times_1, 16), xmm10, Assembler::AVX_512bit); 5069 __ subq(length, 24); 5070 __ addq(dest, 32); 5071 __ addq(source, 24); 5072 __ jmp(L_process32); 5073 5074 // Scalar data processing takes 3 bytes at a time and produces 4 bytes of encoded data 5075 /* This code corresponds to the scalar version of the following snippet in Base64.java 5076 ** int bits = (src[sp0++] & 0xff) << 16 |(src[sp0++] & 0xff) << 8 |(src[sp0++] & 0xff); 5077 ** dst[dp0++] = (byte)base64[(bits >> > 18) & 0x3f]; 5078 ** dst[dp0++] = (byte)base64[(bits >> > 12) & 0x3f]; 5079 ** dst[dp0++] = (byte)base64[(bits >> > 6) & 0x3f]; 5080 ** dst[dp0++] = (byte)base64[bits & 0x3f];*/ 5081 __ BIND(L_process3); 5082 __ cmpl(length, 3); 5083 __ jcc(Assembler::below, L_exit); 5084 // Read 1 byte at a time 5085 __ movzbl(rax, Address(source, start_offset)); 5086 __ shll(rax, 0x10); 5087 __ movl(r15, rax); 5088 __ movzbl(rax, Address(source, start_offset, Address::times_1, 1)); 5089 __ shll(rax, 0x8); 5090 __ movzwl(rax, rax); 5091 __ orl(r15, rax); 5092 __ movzbl(rax, Address(source, start_offset, Address::times_1, 2)); 5093 __ orl(rax, r15); 5094 // Save 3 bytes read in r15 5095 __ movl(r15, rax); 5096 __ shrl(rax, 0x12); 5097 __ andl(rax, 0x3f); 5098 // rax contains the index, r11 contains base64 lookup table 5099 __ movb(rax, Address(r11, rax, Address::times_4)); 5100 // Write the encoded byte to destination 5101 __ movb(Address(dest, dp, Address::times_1, 0), rax); 5102 __ movl(rax, r15); 5103 __ shrl(rax, 0xc); 5104 __ andl(rax, 0x3f); 5105 __ movb(rax, Address(r11, rax, Address::times_4)); 5106 __ movb(Address(dest, dp, Address::times_1, 1), rax); 5107 __ movl(rax, r15); 5108 __ shrl(rax, 0x6); 5109 __ andl(rax, 0x3f); 5110 __ movb(rax, Address(r11, rax, Address::times_4)); 5111 __ movb(Address(dest, dp, Address::times_1, 2), rax); 5112 __ movl(rax, r15); 5113 __ andl(rax, 0x3f); 5114 __ movb(rax, Address(r11, rax, Address::times_4)); 5115 __ movb(Address(dest, dp, Address::times_1, 3), rax); 5116 __ subl(length, 3); 5117 __ addq(dest, 4); 5118 __ addq(source, 3); 5119 __ jmp(L_process3); 5120 __ BIND(L_exit); 5121 __ pop(r15); 5122 __ pop(r14); 5123 __ pop(r13); 5124 __ pop(r12); 5125 __ leave(); 5126 __ ret(0); 5127 return start; 5128 } 5129 5130 /** 5131 * Arguments: 5132 * 5133 * Inputs: 5134 * c_rarg0 - int crc 5135 * c_rarg1 - byte* buf 5136 * c_rarg2 - int length 5137 * 5138 * Ouput: 5139 * rax - int crc result 5140 */ 5141 address generate_updateBytesCRC32() { 5142 assert(UseCRC32Intrinsics, "need AVX and CLMUL instructions"); 5143 5144 __ align(CodeEntryAlignment); 5145 StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32"); 5146 5147 address start = __ pc(); 5148 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 5149 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 5150 // rscratch1: r10 5151 const Register crc = c_rarg0; // crc 5152 const Register buf = c_rarg1; // source java byte array address 5153 const Register len = c_rarg2; // length 5154 const Register table = c_rarg3; // crc_table address (reuse register) 5155 const Register tmp = r11; 5156 assert_different_registers(crc, buf, len, table, tmp, rax); 5157 5158 BLOCK_COMMENT("Entry:"); 5159 __ enter(); // required for proper stackwalking of RuntimeStub frame 5160 5161 __ kernel_crc32(crc, buf, len, table, tmp); 5162 5163 __ movl(rax, crc); 5164 __ vzeroupper(); 5165 __ leave(); // required for proper stackwalking of RuntimeStub frame 5166 __ ret(0); 5167 5168 return start; 5169 } 5170 5171 /** 5172 * Arguments: 5173 * 5174 * Inputs: 5175 * c_rarg0 - int crc 5176 * c_rarg1 - byte* buf 5177 * c_rarg2 - long length 5178 * c_rarg3 - table_start - optional (present only when doing a library_call, 5179 * not used by x86 algorithm) 5180 * 5181 * Ouput: 5182 * rax - int crc result 5183 */ 5184 address generate_updateBytesCRC32C(bool is_pclmulqdq_supported) { 5185 assert(UseCRC32CIntrinsics, "need SSE4_2"); 5186 __ align(CodeEntryAlignment); 5187 StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32C"); 5188 address start = __ pc(); 5189 //reg.arg int#0 int#1 int#2 int#3 int#4 int#5 float regs 5190 //Windows RCX RDX R8 R9 none none XMM0..XMM3 5191 //Lin / Sol RDI RSI RDX RCX R8 R9 XMM0..XMM7 5192 const Register crc = c_rarg0; // crc 5193 const Register buf = c_rarg1; // source java byte array address 5194 const Register len = c_rarg2; // length 5195 const Register a = rax; 5196 const Register j = r9; 5197 const Register k = r10; 5198 const Register l = r11; 5199 #ifdef _WIN64 5200 const Register y = rdi; 5201 const Register z = rsi; 5202 #else 5203 const Register y = rcx; 5204 const Register z = r8; 5205 #endif 5206 assert_different_registers(crc, buf, len, a, j, k, l, y, z); 5207 5208 BLOCK_COMMENT("Entry:"); 5209 __ enter(); // required for proper stackwalking of RuntimeStub frame 5210 #ifdef _WIN64 5211 __ push(y); 5212 __ push(z); 5213 #endif 5214 __ crc32c_ipl_alg2_alt2(crc, buf, len, 5215 a, j, k, 5216 l, y, z, 5217 c_farg0, c_farg1, c_farg2, 5218 is_pclmulqdq_supported); 5219 __ movl(rax, crc); 5220 #ifdef _WIN64 5221 __ pop(z); 5222 __ pop(y); 5223 #endif 5224 __ vzeroupper(); 5225 __ leave(); // required for proper stackwalking of RuntimeStub frame 5226 __ ret(0); 5227 5228 return start; 5229 } 5230 5231 /** 5232 * Arguments: 5233 * 5234 * Input: 5235 * c_rarg0 - x address 5236 * c_rarg1 - x length 5237 * c_rarg2 - y address 5238 * c_rarg3 - y length 5239 * not Win64 5240 * c_rarg4 - z address 5241 * c_rarg5 - z length 5242 * Win64 5243 * rsp+40 - z address 5244 * rsp+48 - z length 5245 */ 5246 address generate_multiplyToLen() { 5247 __ align(CodeEntryAlignment); 5248 StubCodeMark mark(this, "StubRoutines", "multiplyToLen"); 5249 5250 address start = __ pc(); 5251 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 5252 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 5253 const Register x = rdi; 5254 const Register xlen = rax; 5255 const Register y = rsi; 5256 const Register ylen = rcx; 5257 const Register z = r8; 5258 const Register zlen = r11; 5259 5260 // Next registers will be saved on stack in multiply_to_len(). 5261 const Register tmp1 = r12; 5262 const Register tmp2 = r13; 5263 const Register tmp3 = r14; 5264 const Register tmp4 = r15; 5265 const Register tmp5 = rbx; 5266 5267 BLOCK_COMMENT("Entry:"); 5268 __ enter(); // required for proper stackwalking of RuntimeStub frame 5269 5270 #ifndef _WIN64 5271 __ movptr(zlen, r9); // Save r9 in r11 - zlen 5272 #endif 5273 setup_arg_regs(4); // x => rdi, xlen => rsi, y => rdx 5274 // ylen => rcx, z => r8, zlen => r11 5275 // r9 and r10 may be used to save non-volatile registers 5276 #ifdef _WIN64 5277 // last 2 arguments (#4, #5) are on stack on Win64 5278 __ movptr(z, Address(rsp, 6 * wordSize)); 5279 __ movptr(zlen, Address(rsp, 7 * wordSize)); 5280 #endif 5281 5282 __ movptr(xlen, rsi); 5283 __ movptr(y, rdx); 5284 __ multiply_to_len(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5); 5285 5286 restore_arg_regs(); 5287 5288 __ leave(); // required for proper stackwalking of RuntimeStub frame 5289 __ ret(0); 5290 5291 return start; 5292 } 5293 5294 /** 5295 * Arguments: 5296 * 5297 * Input: 5298 * c_rarg0 - obja address 5299 * c_rarg1 - objb address 5300 * c_rarg3 - length length 5301 * c_rarg4 - scale log2_array_indxscale 5302 * 5303 * Output: 5304 * rax - int >= mismatched index, < 0 bitwise complement of tail 5305 */ 5306 address generate_vectorizedMismatch() { 5307 __ align(CodeEntryAlignment); 5308 StubCodeMark mark(this, "StubRoutines", "vectorizedMismatch"); 5309 address start = __ pc(); 5310 5311 BLOCK_COMMENT("Entry:"); 5312 __ enter(); 5313 5314 #ifdef _WIN64 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 5315 const Register scale = c_rarg0; //rcx, will exchange with r9 5316 const Register objb = c_rarg1; //rdx 5317 const Register length = c_rarg2; //r8 5318 const Register obja = c_rarg3; //r9 5319 __ xchgq(obja, scale); //now obja and scale contains the correct contents 5320 5321 const Register tmp1 = r10; 5322 const Register tmp2 = r11; 5323 #endif 5324 #ifndef _WIN64 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 5325 const Register obja = c_rarg0; //U:rdi 5326 const Register objb = c_rarg1; //U:rsi 5327 const Register length = c_rarg2; //U:rdx 5328 const Register scale = c_rarg3; //U:rcx 5329 const Register tmp1 = r8; 5330 const Register tmp2 = r9; 5331 #endif 5332 const Register result = rax; //return value 5333 const XMMRegister vec0 = xmm0; 5334 const XMMRegister vec1 = xmm1; 5335 const XMMRegister vec2 = xmm2; 5336 5337 __ vectorized_mismatch(obja, objb, length, scale, result, tmp1, tmp2, vec0, vec1, vec2); 5338 5339 __ vzeroupper(); 5340 __ leave(); 5341 __ ret(0); 5342 5343 return start; 5344 } 5345 5346 /** 5347 * Arguments: 5348 * 5349 // Input: 5350 // c_rarg0 - x address 5351 // c_rarg1 - x length 5352 // c_rarg2 - z address 5353 // c_rarg3 - z lenth 5354 * 5355 */ 5356 address generate_squareToLen() { 5357 5358 __ align(CodeEntryAlignment); 5359 StubCodeMark mark(this, "StubRoutines", "squareToLen"); 5360 5361 address start = __ pc(); 5362 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 5363 // Unix: rdi, rsi, rdx, rcx (c_rarg0, c_rarg1, ...) 5364 const Register x = rdi; 5365 const Register len = rsi; 5366 const Register z = r8; 5367 const Register zlen = rcx; 5368 5369 const Register tmp1 = r12; 5370 const Register tmp2 = r13; 5371 const Register tmp3 = r14; 5372 const Register tmp4 = r15; 5373 const Register tmp5 = rbx; 5374 5375 BLOCK_COMMENT("Entry:"); 5376 __ enter(); // required for proper stackwalking of RuntimeStub frame 5377 5378 setup_arg_regs(4); // x => rdi, len => rsi, z => rdx 5379 // zlen => rcx 5380 // r9 and r10 may be used to save non-volatile registers 5381 __ movptr(r8, rdx); 5382 __ square_to_len(x, len, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5, rdx, rax); 5383 5384 restore_arg_regs(); 5385 5386 __ leave(); // required for proper stackwalking of RuntimeStub frame 5387 __ ret(0); 5388 5389 return start; 5390 } 5391 5392 address generate_method_entry_barrier() { 5393 __ align(CodeEntryAlignment); 5394 StubCodeMark mark(this, "StubRoutines", "nmethod_entry_barrier"); 5395 5396 Label deoptimize_label; 5397 5398 address start = __ pc(); 5399 5400 __ push(-1); // cookie, this is used for writing the new rsp when deoptimizing 5401 5402 BLOCK_COMMENT("Entry:"); 5403 __ enter(); // save rbp 5404 5405 // save c_rarg0, because we want to use that value. 5406 // We could do without it but then we depend on the number of slots used by pusha 5407 __ push(c_rarg0); 5408 5409 __ lea(c_rarg0, Address(rsp, wordSize * 3)); // 1 for cookie, 1 for rbp, 1 for c_rarg0 - this should be the return address 5410 5411 __ pusha(); 5412 5413 // The method may have floats as arguments, and we must spill them before calling 5414 // the VM runtime. 5415 assert(Argument::n_float_register_parameters_j == 8, "Assumption"); 5416 const int xmm_size = wordSize * 2; 5417 const int xmm_spill_size = xmm_size * Argument::n_float_register_parameters_j; 5418 __ subptr(rsp, xmm_spill_size); 5419 __ movdqu(Address(rsp, xmm_size * 7), xmm7); 5420 __ movdqu(Address(rsp, xmm_size * 6), xmm6); 5421 __ movdqu(Address(rsp, xmm_size * 5), xmm5); 5422 __ movdqu(Address(rsp, xmm_size * 4), xmm4); 5423 __ movdqu(Address(rsp, xmm_size * 3), xmm3); 5424 __ movdqu(Address(rsp, xmm_size * 2), xmm2); 5425 __ movdqu(Address(rsp, xmm_size * 1), xmm1); 5426 __ movdqu(Address(rsp, xmm_size * 0), xmm0); 5427 5428 __ call_VM_leaf(CAST_FROM_FN_PTR(address, static_cast<int (*)(address*)>(BarrierSetNMethod::nmethod_stub_entry_barrier)), 1); 5429 5430 __ movdqu(xmm0, Address(rsp, xmm_size * 0)); 5431 __ movdqu(xmm1, Address(rsp, xmm_size * 1)); 5432 __ movdqu(xmm2, Address(rsp, xmm_size * 2)); 5433 __ movdqu(xmm3, Address(rsp, xmm_size * 3)); 5434 __ movdqu(xmm4, Address(rsp, xmm_size * 4)); 5435 __ movdqu(xmm5, Address(rsp, xmm_size * 5)); 5436 __ movdqu(xmm6, Address(rsp, xmm_size * 6)); 5437 __ movdqu(xmm7, Address(rsp, xmm_size * 7)); 5438 __ addptr(rsp, xmm_spill_size); 5439 5440 __ cmpl(rax, 1); // 1 means deoptimize 5441 __ jcc(Assembler::equal, deoptimize_label); 5442 5443 __ popa(); 5444 __ pop(c_rarg0); 5445 5446 __ leave(); 5447 5448 __ addptr(rsp, 1 * wordSize); // cookie 5449 __ ret(0); 5450 5451 5452 __ BIND(deoptimize_label); 5453 5454 __ popa(); 5455 __ pop(c_rarg0); 5456 5457 __ leave(); 5458 5459 // this can be taken out, but is good for verification purposes. getting a SIGSEGV 5460 // here while still having a correct stack is valuable 5461 __ testptr(rsp, Address(rsp, 0)); 5462 5463 __ movptr(rsp, Address(rsp, 0)); // new rsp was written in the barrier 5464 __ jmp(Address(rsp, -1 * wordSize)); // jmp target should be callers verified_entry_point 5465 5466 return start; 5467 } 5468 5469 /** 5470 * Arguments: 5471 * 5472 * Input: 5473 * c_rarg0 - out address 5474 * c_rarg1 - in address 5475 * c_rarg2 - offset 5476 * c_rarg3 - len 5477 * not Win64 5478 * c_rarg4 - k 5479 * Win64 5480 * rsp+40 - k 5481 */ 5482 address generate_mulAdd() { 5483 __ align(CodeEntryAlignment); 5484 StubCodeMark mark(this, "StubRoutines", "mulAdd"); 5485 5486 address start = __ pc(); 5487 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 5488 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 5489 const Register out = rdi; 5490 const Register in = rsi; 5491 const Register offset = r11; 5492 const Register len = rcx; 5493 const Register k = r8; 5494 5495 // Next registers will be saved on stack in mul_add(). 5496 const Register tmp1 = r12; 5497 const Register tmp2 = r13; 5498 const Register tmp3 = r14; 5499 const Register tmp4 = r15; 5500 const Register tmp5 = rbx; 5501 5502 BLOCK_COMMENT("Entry:"); 5503 __ enter(); // required for proper stackwalking of RuntimeStub frame 5504 5505 setup_arg_regs(4); // out => rdi, in => rsi, offset => rdx 5506 // len => rcx, k => r8 5507 // r9 and r10 may be used to save non-volatile registers 5508 #ifdef _WIN64 5509 // last argument is on stack on Win64 5510 __ movl(k, Address(rsp, 6 * wordSize)); 5511 #endif 5512 __ movptr(r11, rdx); // move offset in rdx to offset(r11) 5513 __ mul_add(out, in, offset, len, k, tmp1, tmp2, tmp3, tmp4, tmp5, rdx, rax); 5514 5515 restore_arg_regs(); 5516 5517 __ leave(); // required for proper stackwalking of RuntimeStub frame 5518 __ ret(0); 5519 5520 return start; 5521 } 5522 5523 address generate_libmExp() { 5524 StubCodeMark mark(this, "StubRoutines", "libmExp"); 5525 5526 address start = __ pc(); 5527 5528 const XMMRegister x0 = xmm0; 5529 const XMMRegister x1 = xmm1; 5530 const XMMRegister x2 = xmm2; 5531 const XMMRegister x3 = xmm3; 5532 5533 const XMMRegister x4 = xmm4; 5534 const XMMRegister x5 = xmm5; 5535 const XMMRegister x6 = xmm6; 5536 const XMMRegister x7 = xmm7; 5537 5538 const Register tmp = r11; 5539 5540 BLOCK_COMMENT("Entry:"); 5541 __ enter(); // required for proper stackwalking of RuntimeStub frame 5542 5543 __ fast_exp(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp); 5544 5545 __ leave(); // required for proper stackwalking of RuntimeStub frame 5546 __ ret(0); 5547 5548 return start; 5549 5550 } 5551 5552 address generate_libmLog() { 5553 StubCodeMark mark(this, "StubRoutines", "libmLog"); 5554 5555 address start = __ pc(); 5556 5557 const XMMRegister x0 = xmm0; 5558 const XMMRegister x1 = xmm1; 5559 const XMMRegister x2 = xmm2; 5560 const XMMRegister x3 = xmm3; 5561 5562 const XMMRegister x4 = xmm4; 5563 const XMMRegister x5 = xmm5; 5564 const XMMRegister x6 = xmm6; 5565 const XMMRegister x7 = xmm7; 5566 5567 const Register tmp1 = r11; 5568 const Register tmp2 = r8; 5569 5570 BLOCK_COMMENT("Entry:"); 5571 __ enter(); // required for proper stackwalking of RuntimeStub frame 5572 5573 __ fast_log(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2); 5574 5575 __ leave(); // required for proper stackwalking of RuntimeStub frame 5576 __ ret(0); 5577 5578 return start; 5579 5580 } 5581 5582 address generate_libmLog10() { 5583 StubCodeMark mark(this, "StubRoutines", "libmLog10"); 5584 5585 address start = __ pc(); 5586 5587 const XMMRegister x0 = xmm0; 5588 const XMMRegister x1 = xmm1; 5589 const XMMRegister x2 = xmm2; 5590 const XMMRegister x3 = xmm3; 5591 5592 const XMMRegister x4 = xmm4; 5593 const XMMRegister x5 = xmm5; 5594 const XMMRegister x6 = xmm6; 5595 const XMMRegister x7 = xmm7; 5596 5597 const Register tmp = r11; 5598 5599 BLOCK_COMMENT("Entry:"); 5600 __ enter(); // required for proper stackwalking of RuntimeStub frame 5601 5602 __ fast_log10(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp); 5603 5604 __ leave(); // required for proper stackwalking of RuntimeStub frame 5605 __ ret(0); 5606 5607 return start; 5608 5609 } 5610 5611 address generate_libmPow() { 5612 StubCodeMark mark(this, "StubRoutines", "libmPow"); 5613 5614 address start = __ pc(); 5615 5616 const XMMRegister x0 = xmm0; 5617 const XMMRegister x1 = xmm1; 5618 const XMMRegister x2 = xmm2; 5619 const XMMRegister x3 = xmm3; 5620 5621 const XMMRegister x4 = xmm4; 5622 const XMMRegister x5 = xmm5; 5623 const XMMRegister x6 = xmm6; 5624 const XMMRegister x7 = xmm7; 5625 5626 const Register tmp1 = r8; 5627 const Register tmp2 = r9; 5628 const Register tmp3 = r10; 5629 const Register tmp4 = r11; 5630 5631 BLOCK_COMMENT("Entry:"); 5632 __ enter(); // required for proper stackwalking of RuntimeStub frame 5633 5634 __ fast_pow(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 5635 5636 __ leave(); // required for proper stackwalking of RuntimeStub frame 5637 __ ret(0); 5638 5639 return start; 5640 5641 } 5642 5643 address generate_libmSin() { 5644 StubCodeMark mark(this, "StubRoutines", "libmSin"); 5645 5646 address start = __ pc(); 5647 5648 const XMMRegister x0 = xmm0; 5649 const XMMRegister x1 = xmm1; 5650 const XMMRegister x2 = xmm2; 5651 const XMMRegister x3 = xmm3; 5652 5653 const XMMRegister x4 = xmm4; 5654 const XMMRegister x5 = xmm5; 5655 const XMMRegister x6 = xmm6; 5656 const XMMRegister x7 = xmm7; 5657 5658 const Register tmp1 = r8; 5659 const Register tmp2 = r9; 5660 const Register tmp3 = r10; 5661 const Register tmp4 = r11; 5662 5663 BLOCK_COMMENT("Entry:"); 5664 __ enter(); // required for proper stackwalking of RuntimeStub frame 5665 5666 #ifdef _WIN64 5667 __ push(rsi); 5668 __ push(rdi); 5669 #endif 5670 __ fast_sin(x0, x1, x2, x3, x4, x5, x6, x7, rax, rbx, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 5671 5672 #ifdef _WIN64 5673 __ pop(rdi); 5674 __ pop(rsi); 5675 #endif 5676 5677 __ leave(); // required for proper stackwalking of RuntimeStub frame 5678 __ ret(0); 5679 5680 return start; 5681 5682 } 5683 5684 address generate_libmCos() { 5685 StubCodeMark mark(this, "StubRoutines", "libmCos"); 5686 5687 address start = __ pc(); 5688 5689 const XMMRegister x0 = xmm0; 5690 const XMMRegister x1 = xmm1; 5691 const XMMRegister x2 = xmm2; 5692 const XMMRegister x3 = xmm3; 5693 5694 const XMMRegister x4 = xmm4; 5695 const XMMRegister x5 = xmm5; 5696 const XMMRegister x6 = xmm6; 5697 const XMMRegister x7 = xmm7; 5698 5699 const Register tmp1 = r8; 5700 const Register tmp2 = r9; 5701 const Register tmp3 = r10; 5702 const Register tmp4 = r11; 5703 5704 BLOCK_COMMENT("Entry:"); 5705 __ enter(); // required for proper stackwalking of RuntimeStub frame 5706 5707 #ifdef _WIN64 5708 __ push(rsi); 5709 __ push(rdi); 5710 #endif 5711 __ fast_cos(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 5712 5713 #ifdef _WIN64 5714 __ pop(rdi); 5715 __ pop(rsi); 5716 #endif 5717 5718 __ leave(); // required for proper stackwalking of RuntimeStub frame 5719 __ ret(0); 5720 5721 return start; 5722 5723 } 5724 5725 address generate_libmTan() { 5726 StubCodeMark mark(this, "StubRoutines", "libmTan"); 5727 5728 address start = __ pc(); 5729 5730 const XMMRegister x0 = xmm0; 5731 const XMMRegister x1 = xmm1; 5732 const XMMRegister x2 = xmm2; 5733 const XMMRegister x3 = xmm3; 5734 5735 const XMMRegister x4 = xmm4; 5736 const XMMRegister x5 = xmm5; 5737 const XMMRegister x6 = xmm6; 5738 const XMMRegister x7 = xmm7; 5739 5740 const Register tmp1 = r8; 5741 const Register tmp2 = r9; 5742 const Register tmp3 = r10; 5743 const Register tmp4 = r11; 5744 5745 BLOCK_COMMENT("Entry:"); 5746 __ enter(); // required for proper stackwalking of RuntimeStub frame 5747 5748 #ifdef _WIN64 5749 __ push(rsi); 5750 __ push(rdi); 5751 #endif 5752 __ fast_tan(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 5753 5754 #ifdef _WIN64 5755 __ pop(rdi); 5756 __ pop(rsi); 5757 #endif 5758 5759 __ leave(); // required for proper stackwalking of RuntimeStub frame 5760 __ ret(0); 5761 5762 return start; 5763 5764 } 5765 5766 #undef __ 5767 #define __ masm-> 5768 5769 // Continuation point for throwing of implicit exceptions that are 5770 // not handled in the current activation. Fabricates an exception 5771 // oop and initiates normal exception dispatching in this 5772 // frame. Since we need to preserve callee-saved values (currently 5773 // only for C2, but done for C1 as well) we need a callee-saved oop 5774 // map and therefore have to make these stubs into RuntimeStubs 5775 // rather than BufferBlobs. If the compiler needs all registers to 5776 // be preserved between the fault point and the exception handler 5777 // then it must assume responsibility for that in 5778 // AbstractCompiler::continuation_for_implicit_null_exception or 5779 // continuation_for_implicit_division_by_zero_exception. All other 5780 // implicit exceptions (e.g., NullPointerException or 5781 // AbstractMethodError on entry) are either at call sites or 5782 // otherwise assume that stack unwinding will be initiated, so 5783 // caller saved registers were assumed volatile in the compiler. 5784 address generate_throw_exception(const char* name, 5785 address runtime_entry, 5786 Register arg1 = noreg, 5787 Register arg2 = noreg) { 5788 // Information about frame layout at time of blocking runtime call. 5789 // Note that we only have to preserve callee-saved registers since 5790 // the compilers are responsible for supplying a continuation point 5791 // if they expect all registers to be preserved. 5792 enum layout { 5793 rbp_off = frame::arg_reg_save_area_bytes/BytesPerInt, 5794 rbp_off2, 5795 return_off, 5796 return_off2, 5797 framesize // inclusive of return address 5798 }; 5799 5800 int insts_size = 512; 5801 int locs_size = 64; 5802 5803 CodeBuffer code(name, insts_size, locs_size); 5804 OopMapSet* oop_maps = new OopMapSet(); 5805 MacroAssembler* masm = new MacroAssembler(&code); 5806 5807 address start = __ pc(); 5808 5809 // This is an inlined and slightly modified version of call_VM 5810 // which has the ability to fetch the return PC out of 5811 // thread-local storage and also sets up last_Java_sp slightly 5812 // differently than the real call_VM 5813 5814 __ enter(); // required for proper stackwalking of RuntimeStub frame 5815 5816 assert(is_even(framesize/2), "sp not 16-byte aligned"); 5817 5818 // return address and rbp are already in place 5819 __ subptr(rsp, (framesize-4) << LogBytesPerInt); // prolog 5820 5821 int frame_complete = __ pc() - start; 5822 5823 // Set up last_Java_sp and last_Java_fp 5824 address the_pc = __ pc(); 5825 __ set_last_Java_frame(rsp, rbp, the_pc); 5826 __ andptr(rsp, -(StackAlignmentInBytes)); // Align stack 5827 5828 // Call runtime 5829 if (arg1 != noreg) { 5830 assert(arg2 != c_rarg1, "clobbered"); 5831 __ movptr(c_rarg1, arg1); 5832 } 5833 if (arg2 != noreg) { 5834 __ movptr(c_rarg2, arg2); 5835 } 5836 __ movptr(c_rarg0, r15_thread); 5837 BLOCK_COMMENT("call runtime_entry"); 5838 __ call(RuntimeAddress(runtime_entry)); 5839 5840 // Generate oop map 5841 OopMap* map = new OopMap(framesize, 0); 5842 5843 oop_maps->add_gc_map(the_pc - start, map); 5844 5845 __ reset_last_Java_frame(true); 5846 5847 __ leave(); // required for proper stackwalking of RuntimeStub frame 5848 5849 // check for pending exceptions 5850 #ifdef ASSERT 5851 Label L; 5852 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), 5853 (int32_t) NULL_WORD); 5854 __ jcc(Assembler::notEqual, L); 5855 __ should_not_reach_here(); 5856 __ bind(L); 5857 #endif // ASSERT 5858 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 5859 5860 5861 // codeBlob framesize is in words (not VMRegImpl::slot_size) 5862 RuntimeStub* stub = 5863 RuntimeStub::new_runtime_stub(name, 5864 &code, 5865 frame_complete, 5866 (framesize >> (LogBytesPerWord - LogBytesPerInt)), 5867 oop_maps, false); 5868 return stub->entry_point(); 5869 } 5870 5871 void create_control_words() { 5872 // Round to nearest, 53-bit mode, exceptions masked 5873 StubRoutines::_fpu_cntrl_wrd_std = 0x027F; 5874 // Round to zero, 53-bit mode, exception mased 5875 StubRoutines::_fpu_cntrl_wrd_trunc = 0x0D7F; 5876 // Round to nearest, 24-bit mode, exceptions masked 5877 StubRoutines::_fpu_cntrl_wrd_24 = 0x007F; 5878 // Round to nearest, 64-bit mode, exceptions masked 5879 StubRoutines::_mxcsr_std = 0x1F80; 5880 // Note: the following two constants are 80-bit values 5881 // layout is critical for correct loading by FPU. 5882 // Bias for strict fp multiply/divide 5883 StubRoutines::_fpu_subnormal_bias1[0]= 0x00000000; // 2^(-15360) == 0x03ff 8000 0000 0000 0000 5884 StubRoutines::_fpu_subnormal_bias1[1]= 0x80000000; 5885 StubRoutines::_fpu_subnormal_bias1[2]= 0x03ff; 5886 // Un-Bias for strict fp multiply/divide 5887 StubRoutines::_fpu_subnormal_bias2[0]= 0x00000000; // 2^(+15360) == 0x7bff 8000 0000 0000 0000 5888 StubRoutines::_fpu_subnormal_bias2[1]= 0x80000000; 5889 StubRoutines::_fpu_subnormal_bias2[2]= 0x7bff; 5890 } 5891 5892 // Initialization 5893 void generate_initial() { 5894 // Generates all stubs and initializes the entry points 5895 5896 // This platform-specific settings are needed by generate_call_stub() 5897 create_control_words(); 5898 5899 // entry points that exist in all platforms Note: This is code 5900 // that could be shared among different platforms - however the 5901 // benefit seems to be smaller than the disadvantage of having a 5902 // much more complicated generator structure. See also comment in 5903 // stubRoutines.hpp. 5904 5905 StubRoutines::_forward_exception_entry = generate_forward_exception(); 5906 5907 StubRoutines::_call_stub_entry = 5908 generate_call_stub(StubRoutines::_call_stub_return_address); 5909 5910 // is referenced by megamorphic call 5911 StubRoutines::_catch_exception_entry = generate_catch_exception(); 5912 5913 // atomic calls 5914 StubRoutines::_atomic_xchg_entry = generate_atomic_xchg(); 5915 StubRoutines::_atomic_xchg_long_entry = generate_atomic_xchg_long(); 5916 StubRoutines::_atomic_cmpxchg_entry = generate_atomic_cmpxchg(); 5917 StubRoutines::_atomic_cmpxchg_byte_entry = generate_atomic_cmpxchg_byte(); 5918 StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long(); 5919 StubRoutines::_atomic_add_entry = generate_atomic_add(); 5920 StubRoutines::_atomic_add_long_entry = generate_atomic_add_long(); 5921 StubRoutines::_fence_entry = generate_orderaccess_fence(); 5922 5923 // platform dependent 5924 StubRoutines::x86::_get_previous_fp_entry = generate_get_previous_fp(); 5925 StubRoutines::x86::_get_previous_sp_entry = generate_get_previous_sp(); 5926 5927 StubRoutines::x86::_verify_mxcsr_entry = generate_verify_mxcsr(); 5928 5929 // Build this early so it's available for the interpreter. 5930 StubRoutines::_throw_StackOverflowError_entry = 5931 generate_throw_exception("StackOverflowError throw_exception", 5932 CAST_FROM_FN_PTR(address, 5933 SharedRuntime:: 5934 throw_StackOverflowError)); 5935 StubRoutines::_throw_delayed_StackOverflowError_entry = 5936 generate_throw_exception("delayed StackOverflowError throw_exception", 5937 CAST_FROM_FN_PTR(address, 5938 SharedRuntime:: 5939 throw_delayed_StackOverflowError)); 5940 if (UseCRC32Intrinsics) { 5941 // set table address before stub generation which use it 5942 StubRoutines::_crc_table_adr = (address)StubRoutines::x86::_crc_table; 5943 StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32(); 5944 } 5945 5946 if (UseCRC32CIntrinsics) { 5947 bool supports_clmul = VM_Version::supports_clmul(); 5948 StubRoutines::x86::generate_CRC32C_table(supports_clmul); 5949 StubRoutines::_crc32c_table_addr = (address)StubRoutines::x86::_crc32c_table; 5950 StubRoutines::_updateBytesCRC32C = generate_updateBytesCRC32C(supports_clmul); 5951 } 5952 if (VM_Version::supports_sse2() && UseLibmIntrinsic && InlineIntrinsics) { 5953 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin) || 5954 vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos) || 5955 vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dtan)) { 5956 StubRoutines::x86::_ONEHALF_adr = (address)StubRoutines::x86::_ONEHALF; 5957 StubRoutines::x86::_P_2_adr = (address)StubRoutines::x86::_P_2; 5958 StubRoutines::x86::_SC_4_adr = (address)StubRoutines::x86::_SC_4; 5959 StubRoutines::x86::_Ctable_adr = (address)StubRoutines::x86::_Ctable; 5960 StubRoutines::x86::_SC_2_adr = (address)StubRoutines::x86::_SC_2; 5961 StubRoutines::x86::_SC_3_adr = (address)StubRoutines::x86::_SC_3; 5962 StubRoutines::x86::_SC_1_adr = (address)StubRoutines::x86::_SC_1; 5963 StubRoutines::x86::_PI_INV_TABLE_adr = (address)StubRoutines::x86::_PI_INV_TABLE; 5964 StubRoutines::x86::_PI_4_adr = (address)StubRoutines::x86::_PI_4; 5965 StubRoutines::x86::_PI32INV_adr = (address)StubRoutines::x86::_PI32INV; 5966 StubRoutines::x86::_SIGN_MASK_adr = (address)StubRoutines::x86::_SIGN_MASK; 5967 StubRoutines::x86::_P_1_adr = (address)StubRoutines::x86::_P_1; 5968 StubRoutines::x86::_P_3_adr = (address)StubRoutines::x86::_P_3; 5969 StubRoutines::x86::_NEG_ZERO_adr = (address)StubRoutines::x86::_NEG_ZERO; 5970 } 5971 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dexp)) { 5972 StubRoutines::_dexp = generate_libmExp(); 5973 } 5974 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dlog)) { 5975 StubRoutines::_dlog = generate_libmLog(); 5976 } 5977 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dlog10)) { 5978 StubRoutines::_dlog10 = generate_libmLog10(); 5979 } 5980 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dpow)) { 5981 StubRoutines::_dpow = generate_libmPow(); 5982 } 5983 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin)) { 5984 StubRoutines::_dsin = generate_libmSin(); 5985 } 5986 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos)) { 5987 StubRoutines::_dcos = generate_libmCos(); 5988 } 5989 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dtan)) { 5990 StubRoutines::_dtan = generate_libmTan(); 5991 } 5992 } 5993 } 5994 5995 void generate_all() { 5996 // Generates all stubs and initializes the entry points 5997 5998 // These entry points require SharedInfo::stack0 to be set up in 5999 // non-core builds and need to be relocatable, so they each 6000 // fabricate a RuntimeStub internally. 6001 StubRoutines::_throw_AbstractMethodError_entry = 6002 generate_throw_exception("AbstractMethodError throw_exception", 6003 CAST_FROM_FN_PTR(address, 6004 SharedRuntime:: 6005 throw_AbstractMethodError)); 6006 6007 StubRoutines::_throw_IncompatibleClassChangeError_entry = 6008 generate_throw_exception("IncompatibleClassChangeError throw_exception", 6009 CAST_FROM_FN_PTR(address, 6010 SharedRuntime:: 6011 throw_IncompatibleClassChangeError)); 6012 6013 StubRoutines::_throw_NullPointerException_at_call_entry = 6014 generate_throw_exception("NullPointerException at call throw_exception", 6015 CAST_FROM_FN_PTR(address, 6016 SharedRuntime:: 6017 throw_NullPointerException_at_call)); 6018 6019 // entry points that are platform specific 6020 StubRoutines::x86::_f2i_fixup = generate_f2i_fixup(); 6021 StubRoutines::x86::_f2l_fixup = generate_f2l_fixup(); 6022 StubRoutines::x86::_d2i_fixup = generate_d2i_fixup(); 6023 StubRoutines::x86::_d2l_fixup = generate_d2l_fixup(); 6024 6025 StubRoutines::x86::_float_sign_mask = generate_fp_mask("float_sign_mask", 0x7FFFFFFF7FFFFFFF); 6026 StubRoutines::x86::_float_sign_flip = generate_fp_mask("float_sign_flip", 0x8000000080000000); 6027 StubRoutines::x86::_double_sign_mask = generate_fp_mask("double_sign_mask", 0x7FFFFFFFFFFFFFFF); 6028 StubRoutines::x86::_double_sign_flip = generate_fp_mask("double_sign_flip", 0x8000000000000000); 6029 StubRoutines::x86::_vector_float_sign_mask = generate_vector_mask("vector_float_sign_mask", 0x7FFFFFFF7FFFFFFF); 6030 StubRoutines::x86::_vector_float_sign_flip = generate_vector_mask("vector_float_sign_flip", 0x8000000080000000); 6031 StubRoutines::x86::_vector_double_sign_mask = generate_vector_mask("vector_double_sign_mask", 0x7FFFFFFFFFFFFFFF); 6032 StubRoutines::x86::_vector_double_sign_flip = generate_vector_mask("vector_double_sign_flip", 0x8000000000000000); 6033 StubRoutines::x86::_vector_short_to_byte_mask = generate_vector_mask("vector_short_to_byte_mask", 0x00ff00ff00ff00ff); 6034 StubRoutines::x86::_vector_byte_perm_mask = generate_vector_byte_perm_mask("vector_byte_perm_mask"); 6035 StubRoutines::x86::_vector_long_sign_mask = generate_vector_mask("vector_long_sign_mask", 0x8000000000000000); 6036 6037 // support for verify_oop (must happen after universe_init) 6038 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop(); 6039 6040 // data cache line writeback 6041 StubRoutines::_data_cache_writeback = generate_data_cache_writeback(); 6042 StubRoutines::_data_cache_writeback_sync = generate_data_cache_writeback_sync(); 6043 6044 // arraycopy stubs used by compilers 6045 generate_arraycopy_stubs(); 6046 6047 // don't bother generating these AES intrinsic stubs unless global flag is set 6048 if (UseAESIntrinsics) { 6049 StubRoutines::x86::_key_shuffle_mask_addr = generate_key_shuffle_mask(); // needed by the others 6050 StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock(); 6051 StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock(); 6052 StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt(); 6053 if (VM_Version::supports_vaes() && VM_Version::supports_avx512vl() && VM_Version::supports_avx512dq() ) { 6054 StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptVectorAESCrypt(); 6055 StubRoutines::_electronicCodeBook_encryptAESCrypt = generate_electronicCodeBook_encryptAESCrypt(); 6056 StubRoutines::_electronicCodeBook_decryptAESCrypt = generate_electronicCodeBook_decryptAESCrypt(); 6057 } else { 6058 StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt_Parallel(); 6059 } 6060 } 6061 if (UseAESCTRIntrinsics){ 6062 StubRoutines::x86::_counter_shuffle_mask_addr = generate_counter_shuffle_mask(); 6063 StubRoutines::_counterMode_AESCrypt = generate_counterMode_AESCrypt_Parallel(); 6064 } 6065 6066 if (UseSHA1Intrinsics) { 6067 StubRoutines::x86::_upper_word_mask_addr = generate_upper_word_mask(); 6068 StubRoutines::x86::_shuffle_byte_flip_mask_addr = generate_shuffle_byte_flip_mask(); 6069 StubRoutines::_sha1_implCompress = generate_sha1_implCompress(false, "sha1_implCompress"); 6070 StubRoutines::_sha1_implCompressMB = generate_sha1_implCompress(true, "sha1_implCompressMB"); 6071 } 6072 if (UseSHA256Intrinsics) { 6073 StubRoutines::x86::_k256_adr = (address)StubRoutines::x86::_k256; 6074 char* dst = (char*)StubRoutines::x86::_k256_W; 6075 char* src = (char*)StubRoutines::x86::_k256; 6076 for (int ii = 0; ii < 16; ++ii) { 6077 memcpy(dst + 32 * ii, src + 16 * ii, 16); 6078 memcpy(dst + 32 * ii + 16, src + 16 * ii, 16); 6079 } 6080 StubRoutines::x86::_k256_W_adr = (address)StubRoutines::x86::_k256_W; 6081 StubRoutines::x86::_pshuffle_byte_flip_mask_addr = generate_pshuffle_byte_flip_mask(); 6082 StubRoutines::_sha256_implCompress = generate_sha256_implCompress(false, "sha256_implCompress"); 6083 StubRoutines::_sha256_implCompressMB = generate_sha256_implCompress(true, "sha256_implCompressMB"); 6084 } 6085 if (UseSHA512Intrinsics) { 6086 StubRoutines::x86::_k512_W_addr = (address)StubRoutines::x86::_k512_W; 6087 StubRoutines::x86::_pshuffle_byte_flip_mask_addr_sha512 = generate_pshuffle_byte_flip_mask_sha512(); 6088 StubRoutines::_sha512_implCompress = generate_sha512_implCompress(false, "sha512_implCompress"); 6089 StubRoutines::_sha512_implCompressMB = generate_sha512_implCompress(true, "sha512_implCompressMB"); 6090 } 6091 6092 // Generate GHASH intrinsics code 6093 if (UseGHASHIntrinsics) { 6094 StubRoutines::x86::_ghash_long_swap_mask_addr = generate_ghash_long_swap_mask(); 6095 StubRoutines::x86::_ghash_byte_swap_mask_addr = generate_ghash_byte_swap_mask(); 6096 if (VM_Version::supports_avx()) { 6097 StubRoutines::x86::_ghash_shuffmask_addr = ghash_shufflemask_addr(); 6098 StubRoutines::x86::_ghash_poly_addr = ghash_polynomial_addr(); 6099 StubRoutines::_ghash_processBlocks = generate_avx_ghash_processBlocks(); 6100 } else { 6101 StubRoutines::_ghash_processBlocks = generate_ghash_processBlocks(); 6102 } 6103 } 6104 6105 if (UseBASE64Intrinsics) { 6106 StubRoutines::x86::_and_mask = base64_and_mask_addr(); 6107 StubRoutines::x86::_bswap_mask = base64_bswap_mask_addr(); 6108 StubRoutines::x86::_base64_charset = base64_charset_addr(); 6109 StubRoutines::x86::_url_charset = base64url_charset_addr(); 6110 StubRoutines::x86::_gather_mask = base64_gather_mask_addr(); 6111 StubRoutines::x86::_left_shift_mask = base64_left_shift_mask_addr(); 6112 StubRoutines::x86::_right_shift_mask = base64_right_shift_mask_addr(); 6113 StubRoutines::_base64_encodeBlock = generate_base64_encodeBlock(); 6114 } 6115 6116 // Safefetch stubs. 6117 generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry, 6118 &StubRoutines::_safefetch32_fault_pc, 6119 &StubRoutines::_safefetch32_continuation_pc); 6120 generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry, 6121 &StubRoutines::_safefetchN_fault_pc, 6122 &StubRoutines::_safefetchN_continuation_pc); 6123 6124 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod(); 6125 if (bs_nm != NULL) { 6126 StubRoutines::x86::_method_entry_barrier = generate_method_entry_barrier(); 6127 } 6128 #ifdef COMPILER2 6129 if (UseMultiplyToLenIntrinsic) { 6130 StubRoutines::_multiplyToLen = generate_multiplyToLen(); 6131 } 6132 if (UseSquareToLenIntrinsic) { 6133 StubRoutines::_squareToLen = generate_squareToLen(); 6134 } 6135 if (UseMulAddIntrinsic) { 6136 StubRoutines::_mulAdd = generate_mulAdd(); 6137 } 6138 #ifndef _WINDOWS 6139 if (UseMontgomeryMultiplyIntrinsic) { 6140 StubRoutines::_montgomeryMultiply 6141 = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_multiply); 6142 } 6143 if (UseMontgomerySquareIntrinsic) { 6144 StubRoutines::_montgomerySquare 6145 = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_square); 6146 } 6147 #endif // WINDOWS 6148 #endif // COMPILER2 6149 6150 if (UseVectorizedMismatchIntrinsic) { 6151 StubRoutines::_vectorizedMismatch = generate_vectorizedMismatch(); 6152 } 6153 } 6154 6155 public: 6156 StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) { 6157 if (all) { 6158 generate_all(); 6159 } else { 6160 generate_initial(); 6161 } 6162 } 6163 }; // end class declaration 6164 6165 #define UCM_TABLE_MAX_ENTRIES 16 6166 void StubGenerator_generate(CodeBuffer* code, bool all) { 6167 if (UnsafeCopyMemory::_table == NULL) { 6168 UnsafeCopyMemory::create_table(UCM_TABLE_MAX_ENTRIES); 6169 } 6170 StubGenerator g(code, all); 6171 }