1 /* 2 * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "ci/ciUtilities.hpp" 29 #include "gc/shared/barrierSet.hpp" 30 #include "gc/shared/barrierSetAssembler.hpp" 31 #include "interpreter/interpreter.hpp" 32 #include "nativeInst_x86.hpp" 33 #include "oops/instanceOop.hpp" 34 #include "oops/method.hpp" 35 #include "oops/objArrayKlass.hpp" 36 #include "oops/oop.inline.hpp" 37 #include "prims/methodHandles.hpp" 38 #include "runtime/frame.inline.hpp" 39 #include "runtime/handles.inline.hpp" 40 #include "runtime/sharedRuntime.hpp" 41 #include "runtime/stubCodeGenerator.hpp" 42 #include "runtime/stubRoutines.hpp" 43 #include "runtime/thread.inline.hpp" 44 #ifdef COMPILER2 45 #include "opto/runtime.hpp" 46 #endif 47 #if INCLUDE_ZGC 48 #include "gc/z/zThreadLocalData.hpp" 49 #endif 50 51 // Declaration and definition of StubGenerator (no .hpp file). 52 // For a more detailed description of the stub routine structure 53 // see the comment in stubRoutines.hpp 54 55 #define __ _masm-> 56 #define TIMES_OOP (UseCompressedOops ? Address::times_4 : Address::times_8) 57 #define a__ ((Assembler*)_masm)-> 58 59 #ifdef PRODUCT 60 #define BLOCK_COMMENT(str) /* nothing */ 61 #else 62 #define BLOCK_COMMENT(str) __ block_comment(str) 63 #endif 64 65 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 66 const int MXCSR_MASK = 0xFFC0; // Mask out any pending exceptions 67 68 // Stub Code definitions 69 70 class StubGenerator: public StubCodeGenerator { 71 private: 72 73 #ifdef PRODUCT 74 #define inc_counter_np(counter) ((void)0) 75 #else 76 void inc_counter_np_(int& counter) { 77 // This can destroy rscratch1 if counter is far from the code cache 78 __ incrementl(ExternalAddress((address)&counter)); 79 } 80 #define inc_counter_np(counter) \ 81 BLOCK_COMMENT("inc_counter " #counter); \ 82 inc_counter_np_(counter); 83 #endif 84 85 // Call stubs are used to call Java from C 86 // 87 // Linux Arguments: 88 // c_rarg0: call wrapper address address 89 // c_rarg1: result address 90 // c_rarg2: result type BasicType 91 // c_rarg3: method Method* 92 // c_rarg4: (interpreter) entry point address 93 // c_rarg5: parameters intptr_t* 94 // 16(rbp): parameter size (in words) int 95 // 24(rbp): thread Thread* 96 // 97 // [ return_from_Java ] <--- rsp 98 // [ argument word n ] 99 // ... 100 // -12 [ argument word 1 ] 101 // -11 [ saved r15 ] <--- rsp_after_call 102 // -10 [ saved r14 ] 103 // -9 [ saved r13 ] 104 // -8 [ saved r12 ] 105 // -7 [ saved rbx ] 106 // -6 [ call wrapper ] 107 // -5 [ result ] 108 // -4 [ result type ] 109 // -3 [ method ] 110 // -2 [ entry point ] 111 // -1 [ parameters ] 112 // 0 [ saved rbp ] <--- rbp 113 // 1 [ return address ] 114 // 2 [ parameter size ] 115 // 3 [ thread ] 116 // 117 // Windows Arguments: 118 // c_rarg0: call wrapper address address 119 // c_rarg1: result address 120 // c_rarg2: result type BasicType 121 // c_rarg3: method Method* 122 // 48(rbp): (interpreter) entry point address 123 // 56(rbp): parameters intptr_t* 124 // 64(rbp): parameter size (in words) int 125 // 72(rbp): thread Thread* 126 // 127 // [ return_from_Java ] <--- rsp 128 // [ argument word n ] 129 // ... 130 // -60 [ argument word 1 ] 131 // -59 [ saved xmm31 ] <--- rsp after_call 132 // [ saved xmm16-xmm30 ] (EVEX enabled, else the space is blank) 133 // -27 [ saved xmm15 ] 134 // [ saved xmm7-xmm14 ] 135 // -9 [ saved xmm6 ] (each xmm register takes 2 slots) 136 // -7 [ saved r15 ] 137 // -6 [ saved r14 ] 138 // -5 [ saved r13 ] 139 // -4 [ saved r12 ] 140 // -3 [ saved rdi ] 141 // -2 [ saved rsi ] 142 // -1 [ saved rbx ] 143 // 0 [ saved rbp ] <--- rbp 144 // 1 [ return address ] 145 // 2 [ call wrapper ] 146 // 3 [ result ] 147 // 4 [ result type ] 148 // 5 [ method ] 149 // 6 [ entry point ] 150 // 7 [ parameters ] 151 // 8 [ parameter size ] 152 // 9 [ thread ] 153 // 154 // Windows reserves the callers stack space for arguments 1-4. 155 // We spill c_rarg0-c_rarg3 to this space. 156 157 // Call stub stack layout word offsets from rbp 158 enum call_stub_layout { 159 #ifdef _WIN64 160 xmm_save_first = 6, // save from xmm6 161 xmm_save_last = 31, // to xmm31 162 xmm_save_base = -9, 163 rsp_after_call_off = xmm_save_base - 2 * (xmm_save_last - xmm_save_first), // -27 164 r15_off = -7, 165 r14_off = -6, 166 r13_off = -5, 167 r12_off = -4, 168 rdi_off = -3, 169 rsi_off = -2, 170 rbx_off = -1, 171 rbp_off = 0, 172 retaddr_off = 1, 173 call_wrapper_off = 2, 174 result_off = 3, 175 result_type_off = 4, 176 method_off = 5, 177 entry_point_off = 6, 178 parameters_off = 7, 179 parameter_size_off = 8, 180 thread_off = 9 181 #else 182 rsp_after_call_off = -12, 183 mxcsr_off = rsp_after_call_off, 184 r15_off = -11, 185 r14_off = -10, 186 r13_off = -9, 187 r12_off = -8, 188 rbx_off = -7, 189 call_wrapper_off = -6, 190 result_off = -5, 191 result_type_off = -4, 192 method_off = -3, 193 entry_point_off = -2, 194 parameters_off = -1, 195 rbp_off = 0, 196 retaddr_off = 1, 197 parameter_size_off = 2, 198 thread_off = 3 199 #endif 200 }; 201 202 #ifdef _WIN64 203 Address xmm_save(int reg) { 204 assert(reg >= xmm_save_first && reg <= xmm_save_last, "XMM register number out of range"); 205 return Address(rbp, (xmm_save_base - (reg - xmm_save_first) * 2) * wordSize); 206 } 207 #endif 208 209 address generate_call_stub(address& return_address) { 210 assert((int)frame::entry_frame_after_call_words == -(int)rsp_after_call_off + 1 && 211 (int)frame::entry_frame_call_wrapper_offset == (int)call_wrapper_off, 212 "adjust this code"); 213 StubCodeMark mark(this, "StubRoutines", "call_stub"); 214 address start = __ pc(); 215 216 // same as in generate_catch_exception()! 217 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize); 218 219 const Address call_wrapper (rbp, call_wrapper_off * wordSize); 220 const Address result (rbp, result_off * wordSize); 221 const Address result_type (rbp, result_type_off * wordSize); 222 const Address method (rbp, method_off * wordSize); 223 const Address entry_point (rbp, entry_point_off * wordSize); 224 const Address parameters (rbp, parameters_off * wordSize); 225 const Address parameter_size(rbp, parameter_size_off * wordSize); 226 227 // same as in generate_catch_exception()! 228 const Address thread (rbp, thread_off * wordSize); 229 230 const Address r15_save(rbp, r15_off * wordSize); 231 const Address r14_save(rbp, r14_off * wordSize); 232 const Address r13_save(rbp, r13_off * wordSize); 233 const Address r12_save(rbp, r12_off * wordSize); 234 const Address rbx_save(rbp, rbx_off * wordSize); 235 236 // stub code 237 __ enter(); 238 __ subptr(rsp, -rsp_after_call_off * wordSize); 239 240 // save register parameters 241 #ifndef _WIN64 242 __ movptr(parameters, c_rarg5); // parameters 243 __ movptr(entry_point, c_rarg4); // entry_point 244 #endif 245 246 __ movptr(method, c_rarg3); // method 247 __ movl(result_type, c_rarg2); // result type 248 __ movptr(result, c_rarg1); // result 249 __ movptr(call_wrapper, c_rarg0); // call wrapper 250 251 // save regs belonging to calling function 252 __ movptr(rbx_save, rbx); 253 __ movptr(r12_save, r12); 254 __ movptr(r13_save, r13); 255 __ movptr(r14_save, r14); 256 __ movptr(r15_save, r15); 257 if (UseAVX > 2) { 258 __ movl(rbx, 0xffff); 259 __ kmovwl(k1, rbx); 260 } 261 #ifdef _WIN64 262 int last_reg = 15; 263 if (UseAVX > 2) { 264 last_reg = 31; 265 } 266 if (VM_Version::supports_evex()) { 267 for (int i = xmm_save_first; i <= last_reg; i++) { 268 __ vextractf32x4(xmm_save(i), as_XMMRegister(i), 0); 269 } 270 } else { 271 for (int i = xmm_save_first; i <= last_reg; i++) { 272 __ movdqu(xmm_save(i), as_XMMRegister(i)); 273 } 274 } 275 276 const Address rdi_save(rbp, rdi_off * wordSize); 277 const Address rsi_save(rbp, rsi_off * wordSize); 278 279 __ movptr(rsi_save, rsi); 280 __ movptr(rdi_save, rdi); 281 #else 282 const Address mxcsr_save(rbp, mxcsr_off * wordSize); 283 { 284 Label skip_ldmx; 285 __ stmxcsr(mxcsr_save); 286 __ movl(rax, mxcsr_save); 287 __ andl(rax, MXCSR_MASK); // Only check control and mask bits 288 ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std()); 289 __ cmp32(rax, mxcsr_std); 290 __ jcc(Assembler::equal, skip_ldmx); 291 __ ldmxcsr(mxcsr_std); 292 __ bind(skip_ldmx); 293 } 294 #endif 295 296 // Load up thread register 297 __ movptr(r15_thread, thread); 298 __ reinit_heapbase(); 299 300 #ifdef ASSERT 301 // make sure we have no pending exceptions 302 { 303 Label L; 304 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 305 __ jcc(Assembler::equal, L); 306 __ stop("StubRoutines::call_stub: entered with pending exception"); 307 __ bind(L); 308 } 309 #endif 310 311 // pass parameters if any 312 BLOCK_COMMENT("pass parameters if any"); 313 Label parameters_done; 314 __ movl(c_rarg3, parameter_size); 315 __ testl(c_rarg3, c_rarg3); 316 __ jcc(Assembler::zero, parameters_done); 317 318 Label loop; 319 __ movptr(c_rarg2, parameters); // parameter pointer 320 __ movl(c_rarg1, c_rarg3); // parameter counter is in c_rarg1 321 __ BIND(loop); 322 __ movptr(rax, Address(c_rarg2, 0));// get parameter 323 __ addptr(c_rarg2, wordSize); // advance to next parameter 324 __ decrementl(c_rarg1); // decrement counter 325 __ push(rax); // pass parameter 326 __ jcc(Assembler::notZero, loop); 327 328 // call Java function 329 __ BIND(parameters_done); 330 __ movptr(rbx, method); // get Method* 331 __ movptr(c_rarg1, entry_point); // get entry_point 332 __ mov(r13, rsp); // set sender sp 333 BLOCK_COMMENT("call Java function"); 334 __ call(c_rarg1); 335 336 BLOCK_COMMENT("call_stub_return_address:"); 337 return_address = __ pc(); 338 339 // store result depending on type (everything that is not 340 // T_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT) 341 __ movptr(c_rarg0, result); 342 Label is_long, is_float, is_double, exit; 343 __ movl(c_rarg1, result_type); 344 __ cmpl(c_rarg1, T_OBJECT); 345 __ jcc(Assembler::equal, is_long); 346 __ cmpl(c_rarg1, T_LONG); 347 __ jcc(Assembler::equal, is_long); 348 __ cmpl(c_rarg1, T_FLOAT); 349 __ jcc(Assembler::equal, is_float); 350 __ cmpl(c_rarg1, T_DOUBLE); 351 __ jcc(Assembler::equal, is_double); 352 353 // handle T_INT case 354 __ movl(Address(c_rarg0, 0), rax); 355 356 __ BIND(exit); 357 358 // pop parameters 359 __ lea(rsp, rsp_after_call); 360 361 #ifdef ASSERT 362 // verify that threads correspond 363 { 364 Label L1, L2, L3; 365 __ cmpptr(r15_thread, thread); 366 __ jcc(Assembler::equal, L1); 367 __ stop("StubRoutines::call_stub: r15_thread is corrupted"); 368 __ bind(L1); 369 __ get_thread(rbx); 370 __ cmpptr(r15_thread, thread); 371 __ jcc(Assembler::equal, L2); 372 __ stop("StubRoutines::call_stub: r15_thread is modified by call"); 373 __ bind(L2); 374 __ cmpptr(r15_thread, rbx); 375 __ jcc(Assembler::equal, L3); 376 __ stop("StubRoutines::call_stub: threads must correspond"); 377 __ bind(L3); 378 } 379 #endif 380 381 // restore regs belonging to calling function 382 #ifdef _WIN64 383 // emit the restores for xmm regs 384 if (VM_Version::supports_evex()) { 385 for (int i = xmm_save_first; i <= last_reg; i++) { 386 __ vinsertf32x4(as_XMMRegister(i), as_XMMRegister(i), xmm_save(i), 0); 387 } 388 } else { 389 for (int i = xmm_save_first; i <= last_reg; i++) { 390 __ movdqu(as_XMMRegister(i), xmm_save(i)); 391 } 392 } 393 #endif 394 __ movptr(r15, r15_save); 395 __ movptr(r14, r14_save); 396 __ movptr(r13, r13_save); 397 __ movptr(r12, r12_save); 398 __ movptr(rbx, rbx_save); 399 400 #ifdef _WIN64 401 __ movptr(rdi, rdi_save); 402 __ movptr(rsi, rsi_save); 403 #else 404 __ ldmxcsr(mxcsr_save); 405 #endif 406 407 // restore rsp 408 __ addptr(rsp, -rsp_after_call_off * wordSize); 409 410 // return 411 __ vzeroupper(); 412 __ pop(rbp); 413 __ ret(0); 414 415 // handle return types different from T_INT 416 __ BIND(is_long); 417 __ movq(Address(c_rarg0, 0), rax); 418 __ jmp(exit); 419 420 __ BIND(is_float); 421 __ movflt(Address(c_rarg0, 0), xmm0); 422 __ jmp(exit); 423 424 __ BIND(is_double); 425 __ movdbl(Address(c_rarg0, 0), xmm0); 426 __ jmp(exit); 427 428 return start; 429 } 430 431 // Return point for a Java call if there's an exception thrown in 432 // Java code. The exception is caught and transformed into a 433 // pending exception stored in JavaThread that can be tested from 434 // within the VM. 435 // 436 // Note: Usually the parameters are removed by the callee. In case 437 // of an exception crossing an activation frame boundary, that is 438 // not the case if the callee is compiled code => need to setup the 439 // rsp. 440 // 441 // rax: exception oop 442 443 address generate_catch_exception() { 444 StubCodeMark mark(this, "StubRoutines", "catch_exception"); 445 address start = __ pc(); 446 447 // same as in generate_call_stub(): 448 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize); 449 const Address thread (rbp, thread_off * wordSize); 450 451 #ifdef ASSERT 452 // verify that threads correspond 453 { 454 Label L1, L2, L3; 455 __ cmpptr(r15_thread, thread); 456 __ jcc(Assembler::equal, L1); 457 __ stop("StubRoutines::catch_exception: r15_thread is corrupted"); 458 __ bind(L1); 459 __ get_thread(rbx); 460 __ cmpptr(r15_thread, thread); 461 __ jcc(Assembler::equal, L2); 462 __ stop("StubRoutines::catch_exception: r15_thread is modified by call"); 463 __ bind(L2); 464 __ cmpptr(r15_thread, rbx); 465 __ jcc(Assembler::equal, L3); 466 __ stop("StubRoutines::catch_exception: threads must correspond"); 467 __ bind(L3); 468 } 469 #endif 470 471 // set pending exception 472 __ verify_oop(rax); 473 474 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), rax); 475 __ lea(rscratch1, ExternalAddress((address)__FILE__)); 476 __ movptr(Address(r15_thread, Thread::exception_file_offset()), rscratch1); 477 __ movl(Address(r15_thread, Thread::exception_line_offset()), (int) __LINE__); 478 479 // complete return to VM 480 assert(StubRoutines::_call_stub_return_address != NULL, 481 "_call_stub_return_address must have been generated before"); 482 __ jump(RuntimeAddress(StubRoutines::_call_stub_return_address)); 483 484 return start; 485 } 486 487 // Continuation point for runtime calls returning with a pending 488 // exception. The pending exception check happened in the runtime 489 // or native call stub. The pending exception in Thread is 490 // converted into a Java-level exception. 491 // 492 // Contract with Java-level exception handlers: 493 // rax: exception 494 // rdx: throwing pc 495 // 496 // NOTE: At entry of this stub, exception-pc must be on stack !! 497 498 address generate_forward_exception() { 499 StubCodeMark mark(this, "StubRoutines", "forward exception"); 500 address start = __ pc(); 501 502 // Upon entry, the sp points to the return address returning into 503 // Java (interpreted or compiled) code; i.e., the return address 504 // becomes the throwing pc. 505 // 506 // Arguments pushed before the runtime call are still on the stack 507 // but the exception handler will reset the stack pointer -> 508 // ignore them. A potential result in registers can be ignored as 509 // well. 510 511 #ifdef ASSERT 512 // make sure this code is only executed if there is a pending exception 513 { 514 Label L; 515 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t) NULL); 516 __ jcc(Assembler::notEqual, L); 517 __ stop("StubRoutines::forward exception: no pending exception (1)"); 518 __ bind(L); 519 } 520 #endif 521 522 // compute exception handler into rbx 523 __ movptr(c_rarg0, Address(rsp, 0)); 524 BLOCK_COMMENT("call exception_handler_for_return_address"); 525 __ call_VM_leaf(CAST_FROM_FN_PTR(address, 526 SharedRuntime::exception_handler_for_return_address), 527 r15_thread, c_rarg0); 528 __ mov(rbx, rax); 529 530 // setup rax & rdx, remove return address & clear pending exception 531 __ pop(rdx); 532 __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset())); 533 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 534 535 #ifdef ASSERT 536 // make sure exception is set 537 { 538 Label L; 539 __ testptr(rax, rax); 540 __ jcc(Assembler::notEqual, L); 541 __ stop("StubRoutines::forward exception: no pending exception (2)"); 542 __ bind(L); 543 } 544 #endif 545 546 // continue at exception handler (return address removed) 547 // rax: exception 548 // rbx: exception handler 549 // rdx: throwing pc 550 __ verify_oop(rax); 551 __ jmp(rbx); 552 553 return start; 554 } 555 556 // Support for jint atomic::xchg(jint exchange_value, volatile jint* dest) 557 // 558 // Arguments : 559 // c_rarg0: exchange_value 560 // c_rarg0: dest 561 // 562 // Result: 563 // *dest <- ex, return (orig *dest) 564 address generate_atomic_xchg() { 565 StubCodeMark mark(this, "StubRoutines", "atomic_xchg"); 566 address start = __ pc(); 567 568 __ movl(rax, c_rarg0); // Copy to eax we need a return value anyhow 569 __ xchgl(rax, Address(c_rarg1, 0)); // automatic LOCK 570 __ ret(0); 571 572 return start; 573 } 574 575 // Support for intptr_t atomic::xchg_long(jlong exchange_value, volatile jlong* dest) 576 // 577 // Arguments : 578 // c_rarg0: exchange_value 579 // c_rarg1: dest 580 // 581 // Result: 582 // *dest <- ex, return (orig *dest) 583 address generate_atomic_xchg_long() { 584 StubCodeMark mark(this, "StubRoutines", "atomic_xchg_long"); 585 address start = __ pc(); 586 587 __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow 588 __ xchgptr(rax, Address(c_rarg1, 0)); // automatic LOCK 589 __ ret(0); 590 591 return start; 592 } 593 594 // Support for jint atomic::atomic_cmpxchg(jint exchange_value, volatile jint* dest, 595 // jint compare_value) 596 // 597 // Arguments : 598 // c_rarg0: exchange_value 599 // c_rarg1: dest 600 // c_rarg2: compare_value 601 // 602 // Result: 603 // if ( compare_value == *dest ) { 604 // *dest = exchange_value 605 // return compare_value; 606 // else 607 // return *dest; 608 address generate_atomic_cmpxchg() { 609 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg"); 610 address start = __ pc(); 611 612 __ movl(rax, c_rarg2); 613 if ( os::is_MP() ) __ lock(); 614 __ cmpxchgl(c_rarg0, Address(c_rarg1, 0)); 615 __ ret(0); 616 617 return start; 618 } 619 620 // Support for int8_t atomic::atomic_cmpxchg(int8_t exchange_value, volatile int8_t* dest, 621 // int8_t compare_value) 622 // 623 // Arguments : 624 // c_rarg0: exchange_value 625 // c_rarg1: dest 626 // c_rarg2: compare_value 627 // 628 // Result: 629 // if ( compare_value == *dest ) { 630 // *dest = exchange_value 631 // return compare_value; 632 // else 633 // return *dest; 634 address generate_atomic_cmpxchg_byte() { 635 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_byte"); 636 address start = __ pc(); 637 638 __ movsbq(rax, c_rarg2); 639 if ( os::is_MP() ) __ lock(); 640 __ cmpxchgb(c_rarg0, Address(c_rarg1, 0)); 641 __ ret(0); 642 643 return start; 644 } 645 646 // Support for int64_t atomic::atomic_cmpxchg(int64_t exchange_value, 647 // volatile int64_t* dest, 648 // int64_t compare_value) 649 // Arguments : 650 // c_rarg0: exchange_value 651 // c_rarg1: dest 652 // c_rarg2: compare_value 653 // 654 // Result: 655 // if ( compare_value == *dest ) { 656 // *dest = exchange_value 657 // return compare_value; 658 // else 659 // return *dest; 660 address generate_atomic_cmpxchg_long() { 661 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long"); 662 address start = __ pc(); 663 664 __ movq(rax, c_rarg2); 665 if ( os::is_MP() ) __ lock(); 666 __ cmpxchgq(c_rarg0, Address(c_rarg1, 0)); 667 __ ret(0); 668 669 return start; 670 } 671 672 // Support for jint atomic::add(jint add_value, volatile jint* dest) 673 // 674 // Arguments : 675 // c_rarg0: add_value 676 // c_rarg1: dest 677 // 678 // Result: 679 // *dest += add_value 680 // return *dest; 681 address generate_atomic_add() { 682 StubCodeMark mark(this, "StubRoutines", "atomic_add"); 683 address start = __ pc(); 684 685 __ movl(rax, c_rarg0); 686 if ( os::is_MP() ) __ lock(); 687 __ xaddl(Address(c_rarg1, 0), c_rarg0); 688 __ addl(rax, c_rarg0); 689 __ ret(0); 690 691 return start; 692 } 693 694 // Support for intptr_t atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) 695 // 696 // Arguments : 697 // c_rarg0: add_value 698 // c_rarg1: dest 699 // 700 // Result: 701 // *dest += add_value 702 // return *dest; 703 address generate_atomic_add_long() { 704 StubCodeMark mark(this, "StubRoutines", "atomic_add_long"); 705 address start = __ pc(); 706 707 __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow 708 if ( os::is_MP() ) __ lock(); 709 __ xaddptr(Address(c_rarg1, 0), c_rarg0); 710 __ addptr(rax, c_rarg0); 711 __ ret(0); 712 713 return start; 714 } 715 716 // Support for intptr_t OrderAccess::fence() 717 // 718 // Arguments : 719 // 720 // Result: 721 address generate_orderaccess_fence() { 722 StubCodeMark mark(this, "StubRoutines", "orderaccess_fence"); 723 address start = __ pc(); 724 __ membar(Assembler::StoreLoad); 725 __ ret(0); 726 727 return start; 728 } 729 730 // Support for intptr_t get_previous_fp() 731 // 732 // This routine is used to find the previous frame pointer for the 733 // caller (current_frame_guess). This is used as part of debugging 734 // ps() is seemingly lost trying to find frames. 735 // This code assumes that caller current_frame_guess) has a frame. 736 address generate_get_previous_fp() { 737 StubCodeMark mark(this, "StubRoutines", "get_previous_fp"); 738 const Address old_fp(rbp, 0); 739 const Address older_fp(rax, 0); 740 address start = __ pc(); 741 742 __ enter(); 743 __ movptr(rax, old_fp); // callers fp 744 __ movptr(rax, older_fp); // the frame for ps() 745 __ pop(rbp); 746 __ ret(0); 747 748 return start; 749 } 750 751 // Support for intptr_t get_previous_sp() 752 // 753 // This routine is used to find the previous stack pointer for the 754 // caller. 755 address generate_get_previous_sp() { 756 StubCodeMark mark(this, "StubRoutines", "get_previous_sp"); 757 address start = __ pc(); 758 759 __ movptr(rax, rsp); 760 __ addptr(rax, 8); // return address is at the top of the stack. 761 __ ret(0); 762 763 return start; 764 } 765 766 //---------------------------------------------------------------------------------------------------- 767 // Support for void verify_mxcsr() 768 // 769 // This routine is used with -Xcheck:jni to verify that native 770 // JNI code does not return to Java code without restoring the 771 // MXCSR register to our expected state. 772 773 address generate_verify_mxcsr() { 774 StubCodeMark mark(this, "StubRoutines", "verify_mxcsr"); 775 address start = __ pc(); 776 777 const Address mxcsr_save(rsp, 0); 778 779 if (CheckJNICalls) { 780 Label ok_ret; 781 ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std()); 782 __ push(rax); 783 __ subptr(rsp, wordSize); // allocate a temp location 784 __ stmxcsr(mxcsr_save); 785 __ movl(rax, mxcsr_save); 786 __ andl(rax, MXCSR_MASK); // Only check control and mask bits 787 __ cmp32(rax, mxcsr_std); 788 __ jcc(Assembler::equal, ok_ret); 789 790 __ warn("MXCSR changed by native JNI code, use -XX:+RestoreMXCSROnJNICall"); 791 792 __ ldmxcsr(mxcsr_std); 793 794 __ bind(ok_ret); 795 __ addptr(rsp, wordSize); 796 __ pop(rax); 797 } 798 799 __ ret(0); 800 801 return start; 802 } 803 804 address generate_f2i_fixup() { 805 StubCodeMark mark(this, "StubRoutines", "f2i_fixup"); 806 Address inout(rsp, 5 * wordSize); // return address + 4 saves 807 808 address start = __ pc(); 809 810 Label L; 811 812 __ push(rax); 813 __ push(c_rarg3); 814 __ push(c_rarg2); 815 __ push(c_rarg1); 816 817 __ movl(rax, 0x7f800000); 818 __ xorl(c_rarg3, c_rarg3); 819 __ movl(c_rarg2, inout); 820 __ movl(c_rarg1, c_rarg2); 821 __ andl(c_rarg1, 0x7fffffff); 822 __ cmpl(rax, c_rarg1); // NaN? -> 0 823 __ jcc(Assembler::negative, L); 824 __ testl(c_rarg2, c_rarg2); // signed ? min_jint : max_jint 825 __ movl(c_rarg3, 0x80000000); 826 __ movl(rax, 0x7fffffff); 827 __ cmovl(Assembler::positive, c_rarg3, rax); 828 829 __ bind(L); 830 __ movptr(inout, c_rarg3); 831 832 __ pop(c_rarg1); 833 __ pop(c_rarg2); 834 __ pop(c_rarg3); 835 __ pop(rax); 836 837 __ ret(0); 838 839 return start; 840 } 841 842 address generate_f2l_fixup() { 843 StubCodeMark mark(this, "StubRoutines", "f2l_fixup"); 844 Address inout(rsp, 5 * wordSize); // return address + 4 saves 845 address start = __ pc(); 846 847 Label L; 848 849 __ push(rax); 850 __ push(c_rarg3); 851 __ push(c_rarg2); 852 __ push(c_rarg1); 853 854 __ movl(rax, 0x7f800000); 855 __ xorl(c_rarg3, c_rarg3); 856 __ movl(c_rarg2, inout); 857 __ movl(c_rarg1, c_rarg2); 858 __ andl(c_rarg1, 0x7fffffff); 859 __ cmpl(rax, c_rarg1); // NaN? -> 0 860 __ jcc(Assembler::negative, L); 861 __ testl(c_rarg2, c_rarg2); // signed ? min_jlong : max_jlong 862 __ mov64(c_rarg3, 0x8000000000000000); 863 __ mov64(rax, 0x7fffffffffffffff); 864 __ cmov(Assembler::positive, c_rarg3, rax); 865 866 __ bind(L); 867 __ movptr(inout, c_rarg3); 868 869 __ pop(c_rarg1); 870 __ pop(c_rarg2); 871 __ pop(c_rarg3); 872 __ pop(rax); 873 874 __ ret(0); 875 876 return start; 877 } 878 879 address generate_d2i_fixup() { 880 StubCodeMark mark(this, "StubRoutines", "d2i_fixup"); 881 Address inout(rsp, 6 * wordSize); // return address + 5 saves 882 883 address start = __ pc(); 884 885 Label L; 886 887 __ push(rax); 888 __ push(c_rarg3); 889 __ push(c_rarg2); 890 __ push(c_rarg1); 891 __ push(c_rarg0); 892 893 __ movl(rax, 0x7ff00000); 894 __ movq(c_rarg2, inout); 895 __ movl(c_rarg3, c_rarg2); 896 __ mov(c_rarg1, c_rarg2); 897 __ mov(c_rarg0, c_rarg2); 898 __ negl(c_rarg3); 899 __ shrptr(c_rarg1, 0x20); 900 __ orl(c_rarg3, c_rarg2); 901 __ andl(c_rarg1, 0x7fffffff); 902 __ xorl(c_rarg2, c_rarg2); 903 __ shrl(c_rarg3, 0x1f); 904 __ orl(c_rarg1, c_rarg3); 905 __ cmpl(rax, c_rarg1); 906 __ jcc(Assembler::negative, L); // NaN -> 0 907 __ testptr(c_rarg0, c_rarg0); // signed ? min_jint : max_jint 908 __ movl(c_rarg2, 0x80000000); 909 __ movl(rax, 0x7fffffff); 910 __ cmov(Assembler::positive, c_rarg2, rax); 911 912 __ bind(L); 913 __ movptr(inout, c_rarg2); 914 915 __ pop(c_rarg0); 916 __ pop(c_rarg1); 917 __ pop(c_rarg2); 918 __ pop(c_rarg3); 919 __ pop(rax); 920 921 __ ret(0); 922 923 return start; 924 } 925 926 address generate_d2l_fixup() { 927 StubCodeMark mark(this, "StubRoutines", "d2l_fixup"); 928 Address inout(rsp, 6 * wordSize); // return address + 5 saves 929 930 address start = __ pc(); 931 932 Label L; 933 934 __ push(rax); 935 __ push(c_rarg3); 936 __ push(c_rarg2); 937 __ push(c_rarg1); 938 __ push(c_rarg0); 939 940 __ movl(rax, 0x7ff00000); 941 __ movq(c_rarg2, inout); 942 __ movl(c_rarg3, c_rarg2); 943 __ mov(c_rarg1, c_rarg2); 944 __ mov(c_rarg0, c_rarg2); 945 __ negl(c_rarg3); 946 __ shrptr(c_rarg1, 0x20); 947 __ orl(c_rarg3, c_rarg2); 948 __ andl(c_rarg1, 0x7fffffff); 949 __ xorl(c_rarg2, c_rarg2); 950 __ shrl(c_rarg3, 0x1f); 951 __ orl(c_rarg1, c_rarg3); 952 __ cmpl(rax, c_rarg1); 953 __ jcc(Assembler::negative, L); // NaN -> 0 954 __ testq(c_rarg0, c_rarg0); // signed ? min_jlong : max_jlong 955 __ mov64(c_rarg2, 0x8000000000000000); 956 __ mov64(rax, 0x7fffffffffffffff); 957 __ cmovq(Assembler::positive, c_rarg2, rax); 958 959 __ bind(L); 960 __ movq(inout, c_rarg2); 961 962 __ pop(c_rarg0); 963 __ pop(c_rarg1); 964 __ pop(c_rarg2); 965 __ pop(c_rarg3); 966 __ pop(rax); 967 968 __ ret(0); 969 970 return start; 971 } 972 973 address generate_fp_mask(const char *stub_name, int64_t mask) { 974 __ align(CodeEntryAlignment); 975 StubCodeMark mark(this, "StubRoutines", stub_name); 976 address start = __ pc(); 977 978 __ emit_data64( mask, relocInfo::none ); 979 __ emit_data64( mask, relocInfo::none ); 980 981 return start; 982 } 983 984 // Non-destructive plausibility checks for oops 985 // 986 // Arguments: 987 // all args on stack! 988 // 989 // Stack after saving c_rarg3: 990 // [tos + 0]: saved c_rarg3 991 // [tos + 1]: saved c_rarg2 992 // [tos + 2]: saved r12 (several TemplateTable methods use it) 993 // [tos + 3]: saved flags 994 // [tos + 4]: return address 995 // * [tos + 5]: error message (char*) 996 // * [tos + 6]: object to verify (oop) 997 // * [tos + 7]: saved rax - saved by caller and bashed 998 // * [tos + 8]: saved r10 (rscratch1) - saved by caller 999 // * = popped on exit 1000 address generate_verify_oop() { 1001 StubCodeMark mark(this, "StubRoutines", "verify_oop"); 1002 address start = __ pc(); 1003 1004 Label exit, error; 1005 1006 __ pushf(); 1007 __ incrementl(ExternalAddress((address) StubRoutines::verify_oop_count_addr())); 1008 1009 __ push(r12); 1010 1011 // save c_rarg2 and c_rarg3 1012 __ push(c_rarg2); 1013 __ push(c_rarg3); 1014 1015 enum { 1016 // After previous pushes. 1017 oop_to_verify = 6 * wordSize, 1018 saved_rax = 7 * wordSize, 1019 saved_r10 = 8 * wordSize, 1020 1021 // Before the call to MacroAssembler::debug(), see below. 1022 return_addr = 16 * wordSize, 1023 error_msg = 17 * wordSize 1024 }; 1025 1026 // get object 1027 __ movptr(rax, Address(rsp, oop_to_verify)); 1028 1029 // make sure object is 'reasonable' 1030 __ testptr(rax, rax); 1031 __ jcc(Assembler::zero, exit); // if obj is NULL it is OK 1032 1033 #if INCLUDE_ZGC 1034 if (UseZGC) { 1035 // Check if metadata bits indicate a bad oop 1036 __ testptr(rax, Address(r15_thread, ZThreadLocalData::address_bad_mask_offset())); 1037 __ jcc(Assembler::notZero, error); 1038 } 1039 #endif 1040 1041 // Check if the oop is in the right area of memory 1042 __ movptr(c_rarg2, rax); 1043 __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_mask()); 1044 __ andptr(c_rarg2, c_rarg3); 1045 __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_bits()); 1046 __ cmpptr(c_rarg2, c_rarg3); 1047 __ jcc(Assembler::notZero, error); 1048 1049 // set r12 to heapbase for load_klass() 1050 __ reinit_heapbase(); 1051 1052 // make sure klass is 'reasonable', which is not zero. 1053 __ load_klass(rax, rax); // get klass 1054 __ testptr(rax, rax); 1055 __ jcc(Assembler::zero, error); // if klass is NULL it is broken 1056 1057 // return if everything seems ok 1058 __ bind(exit); 1059 __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back 1060 __ movptr(rscratch1, Address(rsp, saved_r10)); // get saved r10 back 1061 __ pop(c_rarg3); // restore c_rarg3 1062 __ pop(c_rarg2); // restore c_rarg2 1063 __ pop(r12); // restore r12 1064 __ popf(); // restore flags 1065 __ ret(4 * wordSize); // pop caller saved stuff 1066 1067 // handle errors 1068 __ bind(error); 1069 __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back 1070 __ movptr(rscratch1, Address(rsp, saved_r10)); // get saved r10 back 1071 __ pop(c_rarg3); // get saved c_rarg3 back 1072 __ pop(c_rarg2); // get saved c_rarg2 back 1073 __ pop(r12); // get saved r12 back 1074 __ popf(); // get saved flags off stack -- 1075 // will be ignored 1076 1077 __ pusha(); // push registers 1078 // (rip is already 1079 // already pushed) 1080 // debug(char* msg, int64_t pc, int64_t regs[]) 1081 // We've popped the registers we'd saved (c_rarg3, c_rarg2 and flags), and 1082 // pushed all the registers, so now the stack looks like: 1083 // [tos + 0] 16 saved registers 1084 // [tos + 16] return address 1085 // * [tos + 17] error message (char*) 1086 // * [tos + 18] object to verify (oop) 1087 // * [tos + 19] saved rax - saved by caller and bashed 1088 // * [tos + 20] saved r10 (rscratch1) - saved by caller 1089 // * = popped on exit 1090 1091 __ movptr(c_rarg0, Address(rsp, error_msg)); // pass address of error message 1092 __ movptr(c_rarg1, Address(rsp, return_addr)); // pass return address 1093 __ movq(c_rarg2, rsp); // pass address of regs on stack 1094 __ mov(r12, rsp); // remember rsp 1095 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1096 __ andptr(rsp, -16); // align stack as required by ABI 1097 BLOCK_COMMENT("call MacroAssembler::debug"); 1098 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64))); 1099 __ mov(rsp, r12); // restore rsp 1100 __ popa(); // pop registers (includes r12) 1101 __ ret(4 * wordSize); // pop caller saved stuff 1102 1103 return start; 1104 } 1105 1106 // 1107 // Verify that a register contains clean 32-bits positive value 1108 // (high 32-bits are 0) so it could be used in 64-bits shifts. 1109 // 1110 // Input: 1111 // Rint - 32-bits value 1112 // Rtmp - scratch 1113 // 1114 void assert_clean_int(Register Rint, Register Rtmp) { 1115 #ifdef ASSERT 1116 Label L; 1117 assert_different_registers(Rtmp, Rint); 1118 __ movslq(Rtmp, Rint); 1119 __ cmpq(Rtmp, Rint); 1120 __ jcc(Assembler::equal, L); 1121 __ stop("high 32-bits of int value are not 0"); 1122 __ bind(L); 1123 #endif 1124 } 1125 1126 // Generate overlap test for array copy stubs 1127 // 1128 // Input: 1129 // c_rarg0 - from 1130 // c_rarg1 - to 1131 // c_rarg2 - element count 1132 // 1133 // Output: 1134 // rax - &from[element count - 1] 1135 // 1136 void array_overlap_test(address no_overlap_target, Address::ScaleFactor sf) { 1137 assert(no_overlap_target != NULL, "must be generated"); 1138 array_overlap_test(no_overlap_target, NULL, sf); 1139 } 1140 void array_overlap_test(Label& L_no_overlap, Address::ScaleFactor sf) { 1141 array_overlap_test(NULL, &L_no_overlap, sf); 1142 } 1143 void array_overlap_test(address no_overlap_target, Label* NOLp, Address::ScaleFactor sf) { 1144 const Register from = c_rarg0; 1145 const Register to = c_rarg1; 1146 const Register count = c_rarg2; 1147 const Register end_from = rax; 1148 1149 __ cmpptr(to, from); 1150 __ lea(end_from, Address(from, count, sf, 0)); 1151 if (NOLp == NULL) { 1152 ExternalAddress no_overlap(no_overlap_target); 1153 __ jump_cc(Assembler::belowEqual, no_overlap); 1154 __ cmpptr(to, end_from); 1155 __ jump_cc(Assembler::aboveEqual, no_overlap); 1156 } else { 1157 __ jcc(Assembler::belowEqual, (*NOLp)); 1158 __ cmpptr(to, end_from); 1159 __ jcc(Assembler::aboveEqual, (*NOLp)); 1160 } 1161 } 1162 1163 // Shuffle first three arg regs on Windows into Linux/Solaris locations. 1164 // 1165 // Outputs: 1166 // rdi - rcx 1167 // rsi - rdx 1168 // rdx - r8 1169 // rcx - r9 1170 // 1171 // Registers r9 and r10 are used to save rdi and rsi on Windows, which latter 1172 // are non-volatile. r9 and r10 should not be used by the caller. 1173 // 1174 void setup_arg_regs(int nargs = 3) { 1175 const Register saved_rdi = r9; 1176 const Register saved_rsi = r10; 1177 assert(nargs == 3 || nargs == 4, "else fix"); 1178 #ifdef _WIN64 1179 assert(c_rarg0 == rcx && c_rarg1 == rdx && c_rarg2 == r8 && c_rarg3 == r9, 1180 "unexpected argument registers"); 1181 if (nargs >= 4) 1182 __ mov(rax, r9); // r9 is also saved_rdi 1183 __ movptr(saved_rdi, rdi); 1184 __ movptr(saved_rsi, rsi); 1185 __ mov(rdi, rcx); // c_rarg0 1186 __ mov(rsi, rdx); // c_rarg1 1187 __ mov(rdx, r8); // c_rarg2 1188 if (nargs >= 4) 1189 __ mov(rcx, rax); // c_rarg3 (via rax) 1190 #else 1191 assert(c_rarg0 == rdi && c_rarg1 == rsi && c_rarg2 == rdx && c_rarg3 == rcx, 1192 "unexpected argument registers"); 1193 #endif 1194 } 1195 1196 void restore_arg_regs() { 1197 const Register saved_rdi = r9; 1198 const Register saved_rsi = r10; 1199 #ifdef _WIN64 1200 __ movptr(rdi, saved_rdi); 1201 __ movptr(rsi, saved_rsi); 1202 #endif 1203 } 1204 1205 1206 // Copy big chunks forward 1207 // 1208 // Inputs: 1209 // end_from - source arrays end address 1210 // end_to - destination array end address 1211 // qword_count - 64-bits element count, negative 1212 // to - scratch 1213 // L_copy_bytes - entry label 1214 // L_copy_8_bytes - exit label 1215 // 1216 void copy_bytes_forward(Register end_from, Register end_to, 1217 Register qword_count, Register to, 1218 Label& L_copy_bytes, Label& L_copy_8_bytes) { 1219 DEBUG_ONLY(__ stop("enter at entry label, not here")); 1220 Label L_loop; 1221 __ align(OptoLoopAlignment); 1222 if (UseUnalignedLoadStores) { 1223 Label L_end; 1224 if (UseAVX > 2) { 1225 __ movl(to, 0xffff); 1226 __ kmovwl(k1, to); 1227 } 1228 // Copy 64-bytes per iteration 1229 __ BIND(L_loop); 1230 if (UseAVX > 2) { 1231 __ evmovdqul(xmm0, Address(end_from, qword_count, Address::times_8, -56), Assembler::AVX_512bit); 1232 __ evmovdqul(Address(end_to, qword_count, Address::times_8, -56), xmm0, Assembler::AVX_512bit); 1233 } else if (UseAVX == 2) { 1234 __ vmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56)); 1235 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0); 1236 __ vmovdqu(xmm1, Address(end_from, qword_count, Address::times_8, -24)); 1237 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -24), xmm1); 1238 } else { 1239 __ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56)); 1240 __ movdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0); 1241 __ movdqu(xmm1, Address(end_from, qword_count, Address::times_8, -40)); 1242 __ movdqu(Address(end_to, qword_count, Address::times_8, -40), xmm1); 1243 __ movdqu(xmm2, Address(end_from, qword_count, Address::times_8, -24)); 1244 __ movdqu(Address(end_to, qword_count, Address::times_8, -24), xmm2); 1245 __ movdqu(xmm3, Address(end_from, qword_count, Address::times_8, - 8)); 1246 __ movdqu(Address(end_to, qword_count, Address::times_8, - 8), xmm3); 1247 } 1248 __ BIND(L_copy_bytes); 1249 __ addptr(qword_count, 8); 1250 __ jcc(Assembler::lessEqual, L_loop); 1251 __ subptr(qword_count, 4); // sub(8) and add(4) 1252 __ jccb(Assembler::greater, L_end); 1253 // Copy trailing 32 bytes 1254 if (UseAVX >= 2) { 1255 __ vmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24)); 1256 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -24), xmm0); 1257 } else { 1258 __ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24)); 1259 __ movdqu(Address(end_to, qword_count, Address::times_8, -24), xmm0); 1260 __ movdqu(xmm1, Address(end_from, qword_count, Address::times_8, - 8)); 1261 __ movdqu(Address(end_to, qword_count, Address::times_8, - 8), xmm1); 1262 } 1263 __ addptr(qword_count, 4); 1264 __ BIND(L_end); 1265 if (UseAVX >= 2) { 1266 // clean upper bits of YMM registers 1267 __ vpxor(xmm0, xmm0); 1268 __ vpxor(xmm1, xmm1); 1269 } 1270 } else { 1271 // Copy 32-bytes per iteration 1272 __ BIND(L_loop); 1273 __ movq(to, Address(end_from, qword_count, Address::times_8, -24)); 1274 __ movq(Address(end_to, qword_count, Address::times_8, -24), to); 1275 __ movq(to, Address(end_from, qword_count, Address::times_8, -16)); 1276 __ movq(Address(end_to, qword_count, Address::times_8, -16), to); 1277 __ movq(to, Address(end_from, qword_count, Address::times_8, - 8)); 1278 __ movq(Address(end_to, qword_count, Address::times_8, - 8), to); 1279 __ movq(to, Address(end_from, qword_count, Address::times_8, - 0)); 1280 __ movq(Address(end_to, qword_count, Address::times_8, - 0), to); 1281 1282 __ BIND(L_copy_bytes); 1283 __ addptr(qword_count, 4); 1284 __ jcc(Assembler::lessEqual, L_loop); 1285 } 1286 __ subptr(qword_count, 4); 1287 __ jcc(Assembler::less, L_copy_8_bytes); // Copy trailing qwords 1288 } 1289 1290 // Copy big chunks backward 1291 // 1292 // Inputs: 1293 // from - source arrays address 1294 // dest - destination array address 1295 // qword_count - 64-bits element count 1296 // to - scratch 1297 // L_copy_bytes - entry label 1298 // L_copy_8_bytes - exit label 1299 // 1300 void copy_bytes_backward(Register from, Register dest, 1301 Register qword_count, Register to, 1302 Label& L_copy_bytes, Label& L_copy_8_bytes) { 1303 DEBUG_ONLY(__ stop("enter at entry label, not here")); 1304 Label L_loop; 1305 __ align(OptoLoopAlignment); 1306 if (UseUnalignedLoadStores) { 1307 Label L_end; 1308 if (UseAVX > 2) { 1309 __ movl(to, 0xffff); 1310 __ kmovwl(k1, to); 1311 } 1312 // Copy 64-bytes per iteration 1313 __ BIND(L_loop); 1314 if (UseAVX > 2) { 1315 __ evmovdqul(xmm0, Address(from, qword_count, Address::times_8, 0), Assembler::AVX_512bit); 1316 __ evmovdqul(Address(dest, qword_count, Address::times_8, 0), xmm0, Assembler::AVX_512bit); 1317 } else if (UseAVX == 2) { 1318 __ vmovdqu(xmm0, Address(from, qword_count, Address::times_8, 32)); 1319 __ vmovdqu(Address(dest, qword_count, Address::times_8, 32), xmm0); 1320 __ vmovdqu(xmm1, Address(from, qword_count, Address::times_8, 0)); 1321 __ vmovdqu(Address(dest, qword_count, Address::times_8, 0), xmm1); 1322 } else { 1323 __ movdqu(xmm0, Address(from, qword_count, Address::times_8, 48)); 1324 __ movdqu(Address(dest, qword_count, Address::times_8, 48), xmm0); 1325 __ movdqu(xmm1, Address(from, qword_count, Address::times_8, 32)); 1326 __ movdqu(Address(dest, qword_count, Address::times_8, 32), xmm1); 1327 __ movdqu(xmm2, Address(from, qword_count, Address::times_8, 16)); 1328 __ movdqu(Address(dest, qword_count, Address::times_8, 16), xmm2); 1329 __ movdqu(xmm3, Address(from, qword_count, Address::times_8, 0)); 1330 __ movdqu(Address(dest, qword_count, Address::times_8, 0), xmm3); 1331 } 1332 __ BIND(L_copy_bytes); 1333 __ subptr(qword_count, 8); 1334 __ jcc(Assembler::greaterEqual, L_loop); 1335 1336 __ addptr(qword_count, 4); // add(8) and sub(4) 1337 __ jccb(Assembler::less, L_end); 1338 // Copy trailing 32 bytes 1339 if (UseAVX >= 2) { 1340 __ vmovdqu(xmm0, Address(from, qword_count, Address::times_8, 0)); 1341 __ vmovdqu(Address(dest, qword_count, Address::times_8, 0), xmm0); 1342 } else { 1343 __ movdqu(xmm0, Address(from, qword_count, Address::times_8, 16)); 1344 __ movdqu(Address(dest, qword_count, Address::times_8, 16), xmm0); 1345 __ movdqu(xmm1, Address(from, qword_count, Address::times_8, 0)); 1346 __ movdqu(Address(dest, qword_count, Address::times_8, 0), xmm1); 1347 } 1348 __ subptr(qword_count, 4); 1349 __ BIND(L_end); 1350 if (UseAVX >= 2) { 1351 // clean upper bits of YMM registers 1352 __ vpxor(xmm0, xmm0); 1353 __ vpxor(xmm1, xmm1); 1354 } 1355 } else { 1356 // Copy 32-bytes per iteration 1357 __ BIND(L_loop); 1358 __ movq(to, Address(from, qword_count, Address::times_8, 24)); 1359 __ movq(Address(dest, qword_count, Address::times_8, 24), to); 1360 __ movq(to, Address(from, qword_count, Address::times_8, 16)); 1361 __ movq(Address(dest, qword_count, Address::times_8, 16), to); 1362 __ movq(to, Address(from, qword_count, Address::times_8, 8)); 1363 __ movq(Address(dest, qword_count, Address::times_8, 8), to); 1364 __ movq(to, Address(from, qword_count, Address::times_8, 0)); 1365 __ movq(Address(dest, qword_count, Address::times_8, 0), to); 1366 1367 __ BIND(L_copy_bytes); 1368 __ subptr(qword_count, 4); 1369 __ jcc(Assembler::greaterEqual, L_loop); 1370 } 1371 __ addptr(qword_count, 4); 1372 __ jcc(Assembler::greater, L_copy_8_bytes); // Copy trailing qwords 1373 } 1374 1375 1376 // Arguments: 1377 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1378 // ignored 1379 // name - stub name string 1380 // 1381 // Inputs: 1382 // c_rarg0 - source array address 1383 // c_rarg1 - destination array address 1384 // c_rarg2 - element count, treated as ssize_t, can be zero 1385 // 1386 // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries, 1387 // we let the hardware handle it. The one to eight bytes within words, 1388 // dwords or qwords that span cache line boundaries will still be loaded 1389 // and stored atomically. 1390 // 1391 // Side Effects: 1392 // disjoint_byte_copy_entry is set to the no-overlap entry point 1393 // used by generate_conjoint_byte_copy(). 1394 // 1395 address generate_disjoint_byte_copy(bool aligned, address* entry, const char *name) { 1396 __ align(CodeEntryAlignment); 1397 StubCodeMark mark(this, "StubRoutines", name); 1398 address start = __ pc(); 1399 1400 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes; 1401 Label L_copy_byte, L_exit; 1402 const Register from = rdi; // source array address 1403 const Register to = rsi; // destination array address 1404 const Register count = rdx; // elements count 1405 const Register byte_count = rcx; 1406 const Register qword_count = count; 1407 const Register end_from = from; // source array end address 1408 const Register end_to = to; // destination array end address 1409 // End pointers are inclusive, and if count is not zero they point 1410 // to the last unit copied: end_to[0] := end_from[0] 1411 1412 __ enter(); // required for proper stackwalking of RuntimeStub frame 1413 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1414 1415 if (entry != NULL) { 1416 *entry = __ pc(); 1417 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1418 BLOCK_COMMENT("Entry:"); 1419 } 1420 1421 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1422 // r9 and r10 may be used to save non-volatile registers 1423 1424 // 'from', 'to' and 'count' are now valid 1425 __ movptr(byte_count, count); 1426 __ shrptr(count, 3); // count => qword_count 1427 1428 // Copy from low to high addresses. Use 'to' as scratch. 1429 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1430 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1431 __ negptr(qword_count); // make the count negative 1432 __ jmp(L_copy_bytes); 1433 1434 // Copy trailing qwords 1435 __ BIND(L_copy_8_bytes); 1436 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1437 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1438 __ increment(qword_count); 1439 __ jcc(Assembler::notZero, L_copy_8_bytes); 1440 1441 // Check for and copy trailing dword 1442 __ BIND(L_copy_4_bytes); 1443 __ testl(byte_count, 4); 1444 __ jccb(Assembler::zero, L_copy_2_bytes); 1445 __ movl(rax, Address(end_from, 8)); 1446 __ movl(Address(end_to, 8), rax); 1447 1448 __ addptr(end_from, 4); 1449 __ addptr(end_to, 4); 1450 1451 // Check for and copy trailing word 1452 __ BIND(L_copy_2_bytes); 1453 __ testl(byte_count, 2); 1454 __ jccb(Assembler::zero, L_copy_byte); 1455 __ movw(rax, Address(end_from, 8)); 1456 __ movw(Address(end_to, 8), rax); 1457 1458 __ addptr(end_from, 2); 1459 __ addptr(end_to, 2); 1460 1461 // Check for and copy trailing byte 1462 __ BIND(L_copy_byte); 1463 __ testl(byte_count, 1); 1464 __ jccb(Assembler::zero, L_exit); 1465 __ movb(rax, Address(end_from, 8)); 1466 __ movb(Address(end_to, 8), rax); 1467 1468 __ BIND(L_exit); 1469 restore_arg_regs(); 1470 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1471 __ xorptr(rax, rax); // return 0 1472 __ vzeroupper(); 1473 __ leave(); // required for proper stackwalking of RuntimeStub frame 1474 __ ret(0); 1475 1476 // Copy in multi-bytes chunks 1477 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1478 __ jmp(L_copy_4_bytes); 1479 1480 return start; 1481 } 1482 1483 // Arguments: 1484 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1485 // ignored 1486 // name - stub name string 1487 // 1488 // Inputs: 1489 // c_rarg0 - source array address 1490 // c_rarg1 - destination array address 1491 // c_rarg2 - element count, treated as ssize_t, can be zero 1492 // 1493 // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries, 1494 // we let the hardware handle it. The one to eight bytes within words, 1495 // dwords or qwords that span cache line boundaries will still be loaded 1496 // and stored atomically. 1497 // 1498 address generate_conjoint_byte_copy(bool aligned, address nooverlap_target, 1499 address* entry, const char *name) { 1500 __ align(CodeEntryAlignment); 1501 StubCodeMark mark(this, "StubRoutines", name); 1502 address start = __ pc(); 1503 1504 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes; 1505 const Register from = rdi; // source array address 1506 const Register to = rsi; // destination array address 1507 const Register count = rdx; // elements count 1508 const Register byte_count = rcx; 1509 const Register qword_count = count; 1510 1511 __ enter(); // required for proper stackwalking of RuntimeStub frame 1512 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1513 1514 if (entry != NULL) { 1515 *entry = __ pc(); 1516 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1517 BLOCK_COMMENT("Entry:"); 1518 } 1519 1520 array_overlap_test(nooverlap_target, Address::times_1); 1521 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1522 // r9 and r10 may be used to save non-volatile registers 1523 1524 // 'from', 'to' and 'count' are now valid 1525 __ movptr(byte_count, count); 1526 __ shrptr(count, 3); // count => qword_count 1527 1528 // Copy from high to low addresses. 1529 1530 // Check for and copy trailing byte 1531 __ testl(byte_count, 1); 1532 __ jcc(Assembler::zero, L_copy_2_bytes); 1533 __ movb(rax, Address(from, byte_count, Address::times_1, -1)); 1534 __ movb(Address(to, byte_count, Address::times_1, -1), rax); 1535 __ decrement(byte_count); // Adjust for possible trailing word 1536 1537 // Check for and copy trailing word 1538 __ BIND(L_copy_2_bytes); 1539 __ testl(byte_count, 2); 1540 __ jcc(Assembler::zero, L_copy_4_bytes); 1541 __ movw(rax, Address(from, byte_count, Address::times_1, -2)); 1542 __ movw(Address(to, byte_count, Address::times_1, -2), rax); 1543 1544 // Check for and copy trailing dword 1545 __ BIND(L_copy_4_bytes); 1546 __ testl(byte_count, 4); 1547 __ jcc(Assembler::zero, L_copy_bytes); 1548 __ movl(rax, Address(from, qword_count, Address::times_8)); 1549 __ movl(Address(to, qword_count, Address::times_8), rax); 1550 __ jmp(L_copy_bytes); 1551 1552 // Copy trailing qwords 1553 __ BIND(L_copy_8_bytes); 1554 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 1555 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 1556 __ decrement(qword_count); 1557 __ jcc(Assembler::notZero, L_copy_8_bytes); 1558 1559 restore_arg_regs(); 1560 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1561 __ xorptr(rax, rax); // return 0 1562 __ vzeroupper(); 1563 __ leave(); // required for proper stackwalking of RuntimeStub frame 1564 __ ret(0); 1565 1566 // Copy in multi-bytes chunks 1567 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1568 1569 restore_arg_regs(); 1570 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1571 __ xorptr(rax, rax); // return 0 1572 __ vzeroupper(); 1573 __ leave(); // required for proper stackwalking of RuntimeStub frame 1574 __ ret(0); 1575 1576 return start; 1577 } 1578 1579 // Arguments: 1580 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1581 // ignored 1582 // name - stub name string 1583 // 1584 // Inputs: 1585 // c_rarg0 - source array address 1586 // c_rarg1 - destination array address 1587 // c_rarg2 - element count, treated as ssize_t, can be zero 1588 // 1589 // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we 1590 // let the hardware handle it. The two or four words within dwords 1591 // or qwords that span cache line boundaries will still be loaded 1592 // and stored atomically. 1593 // 1594 // Side Effects: 1595 // disjoint_short_copy_entry is set to the no-overlap entry point 1596 // used by generate_conjoint_short_copy(). 1597 // 1598 address generate_disjoint_short_copy(bool aligned, address *entry, const char *name) { 1599 __ align(CodeEntryAlignment); 1600 StubCodeMark mark(this, "StubRoutines", name); 1601 address start = __ pc(); 1602 1603 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes,L_copy_2_bytes,L_exit; 1604 const Register from = rdi; // source array address 1605 const Register to = rsi; // destination array address 1606 const Register count = rdx; // elements count 1607 const Register word_count = rcx; 1608 const Register qword_count = count; 1609 const Register end_from = from; // source array end address 1610 const Register end_to = to; // destination array end address 1611 // End pointers are inclusive, and if count is not zero they point 1612 // to the last unit copied: end_to[0] := end_from[0] 1613 1614 __ enter(); // required for proper stackwalking of RuntimeStub frame 1615 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1616 1617 if (entry != NULL) { 1618 *entry = __ pc(); 1619 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1620 BLOCK_COMMENT("Entry:"); 1621 } 1622 1623 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1624 // r9 and r10 may be used to save non-volatile registers 1625 1626 // 'from', 'to' and 'count' are now valid 1627 __ movptr(word_count, count); 1628 __ shrptr(count, 2); // count => qword_count 1629 1630 // Copy from low to high addresses. Use 'to' as scratch. 1631 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1632 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1633 __ negptr(qword_count); 1634 __ jmp(L_copy_bytes); 1635 1636 // Copy trailing qwords 1637 __ BIND(L_copy_8_bytes); 1638 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1639 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1640 __ increment(qword_count); 1641 __ jcc(Assembler::notZero, L_copy_8_bytes); 1642 1643 // Original 'dest' is trashed, so we can't use it as a 1644 // base register for a possible trailing word copy 1645 1646 // Check for and copy trailing dword 1647 __ BIND(L_copy_4_bytes); 1648 __ testl(word_count, 2); 1649 __ jccb(Assembler::zero, L_copy_2_bytes); 1650 __ movl(rax, Address(end_from, 8)); 1651 __ movl(Address(end_to, 8), rax); 1652 1653 __ addptr(end_from, 4); 1654 __ addptr(end_to, 4); 1655 1656 // Check for and copy trailing word 1657 __ BIND(L_copy_2_bytes); 1658 __ testl(word_count, 1); 1659 __ jccb(Assembler::zero, L_exit); 1660 __ movw(rax, Address(end_from, 8)); 1661 __ movw(Address(end_to, 8), rax); 1662 1663 __ BIND(L_exit); 1664 restore_arg_regs(); 1665 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1666 __ xorptr(rax, rax); // return 0 1667 __ vzeroupper(); 1668 __ leave(); // required for proper stackwalking of RuntimeStub frame 1669 __ ret(0); 1670 1671 // Copy in multi-bytes chunks 1672 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1673 __ jmp(L_copy_4_bytes); 1674 1675 return start; 1676 } 1677 1678 address generate_fill(BasicType t, bool aligned, const char *name) { 1679 __ align(CodeEntryAlignment); 1680 StubCodeMark mark(this, "StubRoutines", name); 1681 address start = __ pc(); 1682 1683 BLOCK_COMMENT("Entry:"); 1684 1685 const Register to = c_rarg0; // source array address 1686 const Register value = c_rarg1; // value 1687 const Register count = c_rarg2; // elements count 1688 1689 __ enter(); // required for proper stackwalking of RuntimeStub frame 1690 1691 __ generate_fill(t, aligned, to, value, count, rax, xmm0); 1692 1693 __ vzeroupper(); 1694 __ leave(); // required for proper stackwalking of RuntimeStub frame 1695 __ ret(0); 1696 return start; 1697 } 1698 1699 // Arguments: 1700 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1701 // ignored 1702 // name - stub name string 1703 // 1704 // Inputs: 1705 // c_rarg0 - source array address 1706 // c_rarg1 - destination array address 1707 // c_rarg2 - element count, treated as ssize_t, can be zero 1708 // 1709 // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we 1710 // let the hardware handle it. The two or four words within dwords 1711 // or qwords that span cache line boundaries will still be loaded 1712 // and stored atomically. 1713 // 1714 address generate_conjoint_short_copy(bool aligned, address nooverlap_target, 1715 address *entry, const char *name) { 1716 __ align(CodeEntryAlignment); 1717 StubCodeMark mark(this, "StubRoutines", name); 1718 address start = __ pc(); 1719 1720 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes; 1721 const Register from = rdi; // source array address 1722 const Register to = rsi; // destination array address 1723 const Register count = rdx; // elements count 1724 const Register word_count = rcx; 1725 const Register qword_count = count; 1726 1727 __ enter(); // required for proper stackwalking of RuntimeStub frame 1728 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1729 1730 if (entry != NULL) { 1731 *entry = __ pc(); 1732 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1733 BLOCK_COMMENT("Entry:"); 1734 } 1735 1736 array_overlap_test(nooverlap_target, Address::times_2); 1737 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1738 // r9 and r10 may be used to save non-volatile registers 1739 1740 // 'from', 'to' and 'count' are now valid 1741 __ movptr(word_count, count); 1742 __ shrptr(count, 2); // count => qword_count 1743 1744 // Copy from high to low addresses. Use 'to' as scratch. 1745 1746 // Check for and copy trailing word 1747 __ testl(word_count, 1); 1748 __ jccb(Assembler::zero, L_copy_4_bytes); 1749 __ movw(rax, Address(from, word_count, Address::times_2, -2)); 1750 __ movw(Address(to, word_count, Address::times_2, -2), rax); 1751 1752 // Check for and copy trailing dword 1753 __ BIND(L_copy_4_bytes); 1754 __ testl(word_count, 2); 1755 __ jcc(Assembler::zero, L_copy_bytes); 1756 __ movl(rax, Address(from, qword_count, Address::times_8)); 1757 __ movl(Address(to, qword_count, Address::times_8), rax); 1758 __ jmp(L_copy_bytes); 1759 1760 // Copy trailing qwords 1761 __ BIND(L_copy_8_bytes); 1762 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 1763 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 1764 __ decrement(qword_count); 1765 __ jcc(Assembler::notZero, L_copy_8_bytes); 1766 1767 restore_arg_regs(); 1768 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1769 __ xorptr(rax, rax); // return 0 1770 __ vzeroupper(); 1771 __ leave(); // required for proper stackwalking of RuntimeStub frame 1772 __ ret(0); 1773 1774 // Copy in multi-bytes chunks 1775 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1776 1777 restore_arg_regs(); 1778 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1779 __ xorptr(rax, rax); // return 0 1780 __ vzeroupper(); 1781 __ leave(); // required for proper stackwalking of RuntimeStub frame 1782 __ ret(0); 1783 1784 return start; 1785 } 1786 1787 // Arguments: 1788 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1789 // ignored 1790 // is_oop - true => oop array, so generate store check code 1791 // name - stub name string 1792 // 1793 // Inputs: 1794 // c_rarg0 - source array address 1795 // c_rarg1 - destination array address 1796 // c_rarg2 - element count, treated as ssize_t, can be zero 1797 // 1798 // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let 1799 // the hardware handle it. The two dwords within qwords that span 1800 // cache line boundaries will still be loaded and stored atomicly. 1801 // 1802 // Side Effects: 1803 // disjoint_int_copy_entry is set to the no-overlap entry point 1804 // used by generate_conjoint_int_oop_copy(). 1805 // 1806 address generate_disjoint_int_oop_copy(bool aligned, bool is_oop, address* entry, 1807 const char *name, bool dest_uninitialized = false) { 1808 __ align(CodeEntryAlignment); 1809 StubCodeMark mark(this, "StubRoutines", name); 1810 address start = __ pc(); 1811 1812 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_exit; 1813 const Register from = rdi; // source array address 1814 const Register to = rsi; // destination array address 1815 const Register count = rdx; // elements count 1816 const Register dword_count = rcx; 1817 const Register qword_count = count; 1818 const Register end_from = from; // source array end address 1819 const Register end_to = to; // destination array end address 1820 // End pointers are inclusive, and if count is not zero they point 1821 // to the last unit copied: end_to[0] := end_from[0] 1822 1823 __ enter(); // required for proper stackwalking of RuntimeStub frame 1824 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1825 1826 if (entry != NULL) { 1827 *entry = __ pc(); 1828 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1829 BLOCK_COMMENT("Entry:"); 1830 } 1831 1832 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1833 // r9 and r10 may be used to save non-volatile registers 1834 1835 DecoratorSet decorators = IN_HEAP | IN_HEAP_ARRAY | ARRAYCOPY_DISJOINT; 1836 if (dest_uninitialized) { 1837 decorators |= AS_DEST_NOT_INITIALIZED; 1838 } 1839 if (aligned) { 1840 decorators |= ARRAYCOPY_ALIGNED; 1841 } 1842 1843 BasicType type = is_oop ? T_OBJECT : T_INT; 1844 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 1845 bs->arraycopy_prologue(_masm, decorators, type, from, to, count); 1846 1847 // 'from', 'to' and 'count' are now valid 1848 __ movptr(dword_count, count); 1849 __ shrptr(count, 1); // count => qword_count 1850 1851 // Copy from low to high addresses. Use 'to' as scratch. 1852 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1853 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1854 __ negptr(qword_count); 1855 __ jmp(L_copy_bytes); 1856 1857 // Copy trailing qwords 1858 __ BIND(L_copy_8_bytes); 1859 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1860 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1861 __ increment(qword_count); 1862 __ jcc(Assembler::notZero, L_copy_8_bytes); 1863 1864 // Check for and copy trailing dword 1865 __ BIND(L_copy_4_bytes); 1866 __ testl(dword_count, 1); // Only byte test since the value is 0 or 1 1867 __ jccb(Assembler::zero, L_exit); 1868 __ movl(rax, Address(end_from, 8)); 1869 __ movl(Address(end_to, 8), rax); 1870 1871 __ BIND(L_exit); 1872 bs->arraycopy_epilogue(_masm, decorators, type, from, to, dword_count); 1873 restore_arg_regs(); 1874 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 1875 __ vzeroupper(); 1876 __ xorptr(rax, rax); // return 0 1877 __ leave(); // required for proper stackwalking of RuntimeStub frame 1878 __ ret(0); 1879 1880 // Copy in multi-bytes chunks 1881 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1882 __ jmp(L_copy_4_bytes); 1883 1884 return start; 1885 } 1886 1887 // Arguments: 1888 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1889 // ignored 1890 // is_oop - true => oop array, so generate store check code 1891 // name - stub name string 1892 // 1893 // Inputs: 1894 // c_rarg0 - source array address 1895 // c_rarg1 - destination array address 1896 // c_rarg2 - element count, treated as ssize_t, can be zero 1897 // 1898 // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let 1899 // the hardware handle it. The two dwords within qwords that span 1900 // cache line boundaries will still be loaded and stored atomicly. 1901 // 1902 address generate_conjoint_int_oop_copy(bool aligned, bool is_oop, address nooverlap_target, 1903 address *entry, const char *name, 1904 bool dest_uninitialized = false) { 1905 __ align(CodeEntryAlignment); 1906 StubCodeMark mark(this, "StubRoutines", name); 1907 address start = __ pc(); 1908 1909 Label L_copy_bytes, L_copy_8_bytes, L_copy_2_bytes, L_exit; 1910 const Register from = rdi; // source array address 1911 const Register to = rsi; // destination array address 1912 const Register count = rdx; // elements count 1913 const Register dword_count = rcx; 1914 const Register qword_count = count; 1915 1916 __ enter(); // required for proper stackwalking of RuntimeStub frame 1917 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1918 1919 if (entry != NULL) { 1920 *entry = __ pc(); 1921 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1922 BLOCK_COMMENT("Entry:"); 1923 } 1924 1925 array_overlap_test(nooverlap_target, Address::times_4); 1926 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1927 // r9 and r10 may be used to save non-volatile registers 1928 1929 DecoratorSet decorators = IN_HEAP | IN_HEAP_ARRAY; 1930 if (dest_uninitialized) { 1931 decorators |= AS_DEST_NOT_INITIALIZED; 1932 } 1933 if (aligned) { 1934 decorators |= ARRAYCOPY_ALIGNED; 1935 } 1936 1937 BasicType type = is_oop ? T_OBJECT : T_INT; 1938 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 1939 // no registers are destroyed by this call 1940 bs->arraycopy_prologue(_masm, decorators, type, from, to, count); 1941 1942 assert_clean_int(count, rax); // Make sure 'count' is clean int. 1943 // 'from', 'to' and 'count' are now valid 1944 __ movptr(dword_count, count); 1945 __ shrptr(count, 1); // count => qword_count 1946 1947 // Copy from high to low addresses. Use 'to' as scratch. 1948 1949 // Check for and copy trailing dword 1950 __ testl(dword_count, 1); 1951 __ jcc(Assembler::zero, L_copy_bytes); 1952 __ movl(rax, Address(from, dword_count, Address::times_4, -4)); 1953 __ movl(Address(to, dword_count, Address::times_4, -4), rax); 1954 __ jmp(L_copy_bytes); 1955 1956 // Copy trailing qwords 1957 __ BIND(L_copy_8_bytes); 1958 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 1959 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 1960 __ decrement(qword_count); 1961 __ jcc(Assembler::notZero, L_copy_8_bytes); 1962 1963 if (is_oop) { 1964 __ jmp(L_exit); 1965 } 1966 restore_arg_regs(); 1967 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 1968 __ xorptr(rax, rax); // return 0 1969 __ vzeroupper(); 1970 __ leave(); // required for proper stackwalking of RuntimeStub frame 1971 __ ret(0); 1972 1973 // Copy in multi-bytes chunks 1974 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1975 1976 __ BIND(L_exit); 1977 bs->arraycopy_epilogue(_masm, decorators, type, from, to, dword_count); 1978 restore_arg_regs(); 1979 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 1980 __ xorptr(rax, rax); // return 0 1981 __ vzeroupper(); 1982 __ leave(); // required for proper stackwalking of RuntimeStub frame 1983 __ ret(0); 1984 1985 return start; 1986 } 1987 1988 // Arguments: 1989 // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes 1990 // ignored 1991 // is_oop - true => oop array, so generate store check code 1992 // name - stub name string 1993 // 1994 // Inputs: 1995 // c_rarg0 - source array address 1996 // c_rarg1 - destination array address 1997 // c_rarg2 - element count, treated as ssize_t, can be zero 1998 // 1999 // Side Effects: 2000 // disjoint_oop_copy_entry or disjoint_long_copy_entry is set to the 2001 // no-overlap entry point used by generate_conjoint_long_oop_copy(). 2002 // 2003 address generate_disjoint_long_oop_copy(bool aligned, bool is_oop, address *entry, 2004 const char *name, bool dest_uninitialized = false) { 2005 __ align(CodeEntryAlignment); 2006 StubCodeMark mark(this, "StubRoutines", name); 2007 address start = __ pc(); 2008 2009 Label L_copy_bytes, L_copy_8_bytes, L_exit; 2010 const Register from = rdi; // source array address 2011 const Register to = rsi; // destination array address 2012 const Register qword_count = rdx; // elements count 2013 const Register end_from = from; // source array end address 2014 const Register end_to = rcx; // destination array end address 2015 const Register saved_count = r11; 2016 // End pointers are inclusive, and if count is not zero they point 2017 // to the last unit copied: end_to[0] := end_from[0] 2018 2019 __ enter(); // required for proper stackwalking of RuntimeStub frame 2020 // Save no-overlap entry point for generate_conjoint_long_oop_copy() 2021 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 2022 2023 if (entry != NULL) { 2024 *entry = __ pc(); 2025 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2026 BLOCK_COMMENT("Entry:"); 2027 } 2028 2029 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 2030 // r9 and r10 may be used to save non-volatile registers 2031 // 'from', 'to' and 'qword_count' are now valid 2032 2033 DecoratorSet decorators = IN_HEAP | IN_HEAP_ARRAY | ARRAYCOPY_DISJOINT; 2034 if (dest_uninitialized) { 2035 decorators |= AS_DEST_NOT_INITIALIZED; 2036 } 2037 if (aligned) { 2038 decorators |= ARRAYCOPY_ALIGNED; 2039 } 2040 2041 BasicType type = is_oop ? T_OBJECT : T_LONG; 2042 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 2043 bs->arraycopy_prologue(_masm, decorators, type, from, to, qword_count); 2044 2045 // Copy from low to high addresses. Use 'to' as scratch. 2046 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 2047 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 2048 __ negptr(qword_count); 2049 __ jmp(L_copy_bytes); 2050 2051 // Copy trailing qwords 2052 __ BIND(L_copy_8_bytes); 2053 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 2054 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 2055 __ increment(qword_count); 2056 __ jcc(Assembler::notZero, L_copy_8_bytes); 2057 2058 if (is_oop) { 2059 __ jmp(L_exit); 2060 } else { 2061 restore_arg_regs(); 2062 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2063 __ xorptr(rax, rax); // return 0 2064 __ vzeroupper(); 2065 __ leave(); // required for proper stackwalking of RuntimeStub frame 2066 __ ret(0); 2067 } 2068 2069 // Copy in multi-bytes chunks 2070 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2071 2072 __ BIND(L_exit); 2073 bs->arraycopy_epilogue(_masm, decorators, type, from, to, qword_count); 2074 restore_arg_regs(); 2075 if (is_oop) { 2076 inc_counter_np(SharedRuntime::_oop_array_copy_ctr); // Update counter after rscratch1 is free 2077 } else { 2078 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2079 } 2080 __ vzeroupper(); 2081 __ xorptr(rax, rax); // return 0 2082 __ leave(); // required for proper stackwalking of RuntimeStub frame 2083 __ ret(0); 2084 2085 return start; 2086 } 2087 2088 // Arguments: 2089 // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes 2090 // ignored 2091 // is_oop - true => oop array, so generate store check code 2092 // name - stub name string 2093 // 2094 // Inputs: 2095 // c_rarg0 - source array address 2096 // c_rarg1 - destination array address 2097 // c_rarg2 - element count, treated as ssize_t, can be zero 2098 // 2099 address generate_conjoint_long_oop_copy(bool aligned, bool is_oop, 2100 address nooverlap_target, address *entry, 2101 const char *name, bool dest_uninitialized = false) { 2102 __ align(CodeEntryAlignment); 2103 StubCodeMark mark(this, "StubRoutines", name); 2104 address start = __ pc(); 2105 2106 Label L_copy_bytes, L_copy_8_bytes, L_exit; 2107 const Register from = rdi; // source array address 2108 const Register to = rsi; // destination array address 2109 const Register qword_count = rdx; // elements count 2110 const Register saved_count = rcx; 2111 2112 __ enter(); // required for proper stackwalking of RuntimeStub frame 2113 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 2114 2115 if (entry != NULL) { 2116 *entry = __ pc(); 2117 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2118 BLOCK_COMMENT("Entry:"); 2119 } 2120 2121 array_overlap_test(nooverlap_target, Address::times_8); 2122 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 2123 // r9 and r10 may be used to save non-volatile registers 2124 // 'from', 'to' and 'qword_count' are now valid 2125 2126 DecoratorSet decorators = IN_HEAP | IN_HEAP_ARRAY | ARRAYCOPY_DISJOINT; 2127 if (dest_uninitialized) { 2128 decorators |= AS_DEST_NOT_INITIALIZED; 2129 } 2130 if (aligned) { 2131 decorators |= ARRAYCOPY_ALIGNED; 2132 } 2133 2134 BasicType type = is_oop ? T_OBJECT : T_LONG; 2135 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 2136 bs->arraycopy_prologue(_masm, decorators, type, from, to, qword_count); 2137 2138 __ jmp(L_copy_bytes); 2139 2140 // Copy trailing qwords 2141 __ BIND(L_copy_8_bytes); 2142 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 2143 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 2144 __ decrement(qword_count); 2145 __ jcc(Assembler::notZero, L_copy_8_bytes); 2146 2147 if (is_oop) { 2148 __ jmp(L_exit); 2149 } else { 2150 restore_arg_regs(); 2151 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2152 __ xorptr(rax, rax); // return 0 2153 __ vzeroupper(); 2154 __ leave(); // required for proper stackwalking of RuntimeStub frame 2155 __ ret(0); 2156 } 2157 2158 // Copy in multi-bytes chunks 2159 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2160 2161 __ BIND(L_exit); 2162 bs->arraycopy_epilogue(_masm, decorators, type, from, to, qword_count); 2163 restore_arg_regs(); 2164 if (is_oop) { 2165 inc_counter_np(SharedRuntime::_oop_array_copy_ctr); // Update counter after rscratch1 is free 2166 } else { 2167 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2168 } 2169 __ vzeroupper(); 2170 __ xorptr(rax, rax); // return 0 2171 __ leave(); // required for proper stackwalking of RuntimeStub frame 2172 __ ret(0); 2173 2174 return start; 2175 } 2176 2177 2178 // Helper for generating a dynamic type check. 2179 // Smashes no registers. 2180 void generate_type_check(Register sub_klass, 2181 Register super_check_offset, 2182 Register super_klass, 2183 Label& L_success) { 2184 assert_different_registers(sub_klass, super_check_offset, super_klass); 2185 2186 BLOCK_COMMENT("type_check:"); 2187 2188 Label L_miss; 2189 2190 __ check_klass_subtype_fast_path(sub_klass, super_klass, noreg, &L_success, &L_miss, NULL, 2191 super_check_offset); 2192 __ check_klass_subtype_slow_path(sub_klass, super_klass, noreg, noreg, &L_success, NULL); 2193 2194 // Fall through on failure! 2195 __ BIND(L_miss); 2196 } 2197 2198 // 2199 // Generate checkcasting array copy stub 2200 // 2201 // Input: 2202 // c_rarg0 - source array address 2203 // c_rarg1 - destination array address 2204 // c_rarg2 - element count, treated as ssize_t, can be zero 2205 // c_rarg3 - size_t ckoff (super_check_offset) 2206 // not Win64 2207 // c_rarg4 - oop ckval (super_klass) 2208 // Win64 2209 // rsp+40 - oop ckval (super_klass) 2210 // 2211 // Output: 2212 // rax == 0 - success 2213 // rax == -1^K - failure, where K is partial transfer count 2214 // 2215 address generate_checkcast_copy(const char *name, address *entry, 2216 bool dest_uninitialized = false) { 2217 2218 Label L_load_element, L_store_element, L_do_card_marks, L_done; 2219 2220 // Input registers (after setup_arg_regs) 2221 const Register from = rdi; // source array address 2222 const Register to = rsi; // destination array address 2223 const Register length = rdx; // elements count 2224 const Register ckoff = rcx; // super_check_offset 2225 const Register ckval = r8; // super_klass 2226 2227 // Registers used as temps (r13, r14 are save-on-entry) 2228 const Register end_from = from; // source array end address 2229 const Register end_to = r13; // destination array end address 2230 const Register count = rdx; // -(count_remaining) 2231 const Register r14_length = r14; // saved copy of length 2232 // End pointers are inclusive, and if length is not zero they point 2233 // to the last unit copied: end_to[0] := end_from[0] 2234 2235 const Register rax_oop = rax; // actual oop copied 2236 const Register r11_klass = r11; // oop._klass 2237 2238 //--------------------------------------------------------------- 2239 // Assembler stub will be used for this call to arraycopy 2240 // if the two arrays are subtypes of Object[] but the 2241 // destination array type is not equal to or a supertype 2242 // of the source type. Each element must be separately 2243 // checked. 2244 2245 __ align(CodeEntryAlignment); 2246 StubCodeMark mark(this, "StubRoutines", name); 2247 address start = __ pc(); 2248 2249 __ enter(); // required for proper stackwalking of RuntimeStub frame 2250 2251 #ifdef ASSERT 2252 // caller guarantees that the arrays really are different 2253 // otherwise, we would have to make conjoint checks 2254 { Label L; 2255 array_overlap_test(L, TIMES_OOP); 2256 __ stop("checkcast_copy within a single array"); 2257 __ bind(L); 2258 } 2259 #endif //ASSERT 2260 2261 setup_arg_regs(4); // from => rdi, to => rsi, length => rdx 2262 // ckoff => rcx, ckval => r8 2263 // r9 and r10 may be used to save non-volatile registers 2264 #ifdef _WIN64 2265 // last argument (#4) is on stack on Win64 2266 __ movptr(ckval, Address(rsp, 6 * wordSize)); 2267 #endif 2268 2269 // Caller of this entry point must set up the argument registers. 2270 if (entry != NULL) { 2271 *entry = __ pc(); 2272 BLOCK_COMMENT("Entry:"); 2273 } 2274 2275 // allocate spill slots for r13, r14 2276 enum { 2277 saved_r13_offset, 2278 saved_r14_offset, 2279 saved_rbp_offset 2280 }; 2281 __ subptr(rsp, saved_rbp_offset * wordSize); 2282 __ movptr(Address(rsp, saved_r13_offset * wordSize), r13); 2283 __ movptr(Address(rsp, saved_r14_offset * wordSize), r14); 2284 2285 // check that int operands are properly extended to size_t 2286 assert_clean_int(length, rax); 2287 assert_clean_int(ckoff, rax); 2288 2289 #ifdef ASSERT 2290 BLOCK_COMMENT("assert consistent ckoff/ckval"); 2291 // The ckoff and ckval must be mutually consistent, 2292 // even though caller generates both. 2293 { Label L; 2294 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2295 __ cmpl(ckoff, Address(ckval, sco_offset)); 2296 __ jcc(Assembler::equal, L); 2297 __ stop("super_check_offset inconsistent"); 2298 __ bind(L); 2299 } 2300 #endif //ASSERT 2301 2302 // Loop-invariant addresses. They are exclusive end pointers. 2303 Address end_from_addr(from, length, TIMES_OOP, 0); 2304 Address end_to_addr(to, length, TIMES_OOP, 0); 2305 // Loop-variant addresses. They assume post-incremented count < 0. 2306 Address from_element_addr(end_from, count, TIMES_OOP, 0); 2307 Address to_element_addr(end_to, count, TIMES_OOP, 0); 2308 2309 DecoratorSet decorators = IN_HEAP | IN_HEAP_ARRAY | ARRAYCOPY_CHECKCAST; 2310 if (dest_uninitialized) { 2311 decorators |= AS_DEST_NOT_INITIALIZED; 2312 } 2313 2314 BasicType type = T_OBJECT; 2315 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 2316 bs->arraycopy_prologue(_masm, decorators, type, from, to, count); 2317 2318 // Copy from low to high addresses, indexed from the end of each array. 2319 __ lea(end_from, end_from_addr); 2320 __ lea(end_to, end_to_addr); 2321 __ movptr(r14_length, length); // save a copy of the length 2322 assert(length == count, ""); // else fix next line: 2323 __ negptr(count); // negate and test the length 2324 __ jcc(Assembler::notZero, L_load_element); 2325 2326 // Empty array: Nothing to do. 2327 __ xorptr(rax, rax); // return 0 on (trivial) success 2328 __ jmp(L_done); 2329 2330 // ======== begin loop ======== 2331 // (Loop is rotated; its entry is L_load_element.) 2332 // Loop control: 2333 // for (count = -count; count != 0; count++) 2334 // Base pointers src, dst are biased by 8*(count-1),to last element. 2335 __ align(OptoLoopAlignment); 2336 2337 __ BIND(L_store_element); 2338 __ store_heap_oop(to_element_addr, rax_oop, noreg, noreg, AS_RAW); // store the oop 2339 __ increment(count); // increment the count toward zero 2340 __ jcc(Assembler::zero, L_do_card_marks); 2341 2342 // ======== loop entry is here ======== 2343 __ BIND(L_load_element); 2344 __ load_heap_oop(rax_oop, from_element_addr, noreg, noreg, AS_RAW); // load the oop 2345 __ testptr(rax_oop, rax_oop); 2346 __ jcc(Assembler::zero, L_store_element); 2347 2348 __ load_klass(r11_klass, rax_oop);// query the object klass 2349 generate_type_check(r11_klass, ckoff, ckval, L_store_element); 2350 // ======== end loop ======== 2351 2352 // It was a real error; we must depend on the caller to finish the job. 2353 // Register rdx = -1 * number of *remaining* oops, r14 = *total* oops. 2354 // Emit GC store barriers for the oops we have copied (r14 + rdx), 2355 // and report their number to the caller. 2356 assert_different_registers(rax, r14_length, count, to, end_to, rcx, rscratch1); 2357 Label L_post_barrier; 2358 __ addptr(r14_length, count); // K = (original - remaining) oops 2359 __ movptr(rax, r14_length); // save the value 2360 __ notptr(rax); // report (-1^K) to caller (does not affect flags) 2361 __ jccb(Assembler::notZero, L_post_barrier); 2362 __ jmp(L_done); // K == 0, nothing was copied, skip post barrier 2363 2364 // Come here on success only. 2365 __ BIND(L_do_card_marks); 2366 __ xorptr(rax, rax); // return 0 on success 2367 2368 __ BIND(L_post_barrier); 2369 bs->arraycopy_epilogue(_masm, decorators, type, from, to, r14_length); 2370 2371 // Common exit point (success or failure). 2372 __ BIND(L_done); 2373 __ movptr(r13, Address(rsp, saved_r13_offset * wordSize)); 2374 __ movptr(r14, Address(rsp, saved_r14_offset * wordSize)); 2375 restore_arg_regs(); 2376 inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr); // Update counter after rscratch1 is free 2377 __ leave(); // required for proper stackwalking of RuntimeStub frame 2378 __ ret(0); 2379 2380 return start; 2381 } 2382 2383 // 2384 // Generate 'unsafe' array copy stub 2385 // Though just as safe as the other stubs, it takes an unscaled 2386 // size_t argument instead of an element count. 2387 // 2388 // Input: 2389 // c_rarg0 - source array address 2390 // c_rarg1 - destination array address 2391 // c_rarg2 - byte count, treated as ssize_t, can be zero 2392 // 2393 // Examines the alignment of the operands and dispatches 2394 // to a long, int, short, or byte copy loop. 2395 // 2396 address generate_unsafe_copy(const char *name, 2397 address byte_copy_entry, address short_copy_entry, 2398 address int_copy_entry, address long_copy_entry) { 2399 2400 Label L_long_aligned, L_int_aligned, L_short_aligned; 2401 2402 // Input registers (before setup_arg_regs) 2403 const Register from = c_rarg0; // source array address 2404 const Register to = c_rarg1; // destination array address 2405 const Register size = c_rarg2; // byte count (size_t) 2406 2407 // Register used as a temp 2408 const Register bits = rax; // test copy of low bits 2409 2410 __ align(CodeEntryAlignment); 2411 StubCodeMark mark(this, "StubRoutines", name); 2412 address start = __ pc(); 2413 2414 __ enter(); // required for proper stackwalking of RuntimeStub frame 2415 2416 // bump this on entry, not on exit: 2417 inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr); 2418 2419 __ mov(bits, from); 2420 __ orptr(bits, to); 2421 __ orptr(bits, size); 2422 2423 __ testb(bits, BytesPerLong-1); 2424 __ jccb(Assembler::zero, L_long_aligned); 2425 2426 __ testb(bits, BytesPerInt-1); 2427 __ jccb(Assembler::zero, L_int_aligned); 2428 2429 __ testb(bits, BytesPerShort-1); 2430 __ jump_cc(Assembler::notZero, RuntimeAddress(byte_copy_entry)); 2431 2432 __ BIND(L_short_aligned); 2433 __ shrptr(size, LogBytesPerShort); // size => short_count 2434 __ jump(RuntimeAddress(short_copy_entry)); 2435 2436 __ BIND(L_int_aligned); 2437 __ shrptr(size, LogBytesPerInt); // size => int_count 2438 __ jump(RuntimeAddress(int_copy_entry)); 2439 2440 __ BIND(L_long_aligned); 2441 __ shrptr(size, LogBytesPerLong); // size => qword_count 2442 __ jump(RuntimeAddress(long_copy_entry)); 2443 2444 return start; 2445 } 2446 2447 // Perform range checks on the proposed arraycopy. 2448 // Kills temp, but nothing else. 2449 // Also, clean the sign bits of src_pos and dst_pos. 2450 void arraycopy_range_checks(Register src, // source array oop (c_rarg0) 2451 Register src_pos, // source position (c_rarg1) 2452 Register dst, // destination array oo (c_rarg2) 2453 Register dst_pos, // destination position (c_rarg3) 2454 Register length, 2455 Register temp, 2456 Label& L_failed) { 2457 BLOCK_COMMENT("arraycopy_range_checks:"); 2458 2459 // if (src_pos + length > arrayOop(src)->length()) FAIL; 2460 __ movl(temp, length); 2461 __ addl(temp, src_pos); // src_pos + length 2462 __ cmpl(temp, Address(src, arrayOopDesc::length_offset_in_bytes())); 2463 __ jcc(Assembler::above, L_failed); 2464 2465 // if (dst_pos + length > arrayOop(dst)->length()) FAIL; 2466 __ movl(temp, length); 2467 __ addl(temp, dst_pos); // dst_pos + length 2468 __ cmpl(temp, Address(dst, arrayOopDesc::length_offset_in_bytes())); 2469 __ jcc(Assembler::above, L_failed); 2470 2471 // Have to clean up high 32-bits of 'src_pos' and 'dst_pos'. 2472 // Move with sign extension can be used since they are positive. 2473 __ movslq(src_pos, src_pos); 2474 __ movslq(dst_pos, dst_pos); 2475 2476 BLOCK_COMMENT("arraycopy_range_checks done"); 2477 } 2478 2479 // 2480 // Generate generic array copy stubs 2481 // 2482 // Input: 2483 // c_rarg0 - src oop 2484 // c_rarg1 - src_pos (32-bits) 2485 // c_rarg2 - dst oop 2486 // c_rarg3 - dst_pos (32-bits) 2487 // not Win64 2488 // c_rarg4 - element count (32-bits) 2489 // Win64 2490 // rsp+40 - element count (32-bits) 2491 // 2492 // Output: 2493 // rax == 0 - success 2494 // rax == -1^K - failure, where K is partial transfer count 2495 // 2496 address generate_generic_copy(const char *name, 2497 address byte_copy_entry, address short_copy_entry, 2498 address int_copy_entry, address oop_copy_entry, 2499 address long_copy_entry, address checkcast_copy_entry) { 2500 2501 Label L_failed, L_failed_0, L_objArray; 2502 Label L_copy_bytes, L_copy_shorts, L_copy_ints, L_copy_longs; 2503 2504 // Input registers 2505 const Register src = c_rarg0; // source array oop 2506 const Register src_pos = c_rarg1; // source position 2507 const Register dst = c_rarg2; // destination array oop 2508 const Register dst_pos = c_rarg3; // destination position 2509 #ifndef _WIN64 2510 const Register length = c_rarg4; 2511 #else 2512 const Address length(rsp, 6 * wordSize); // elements count is on stack on Win64 2513 #endif 2514 2515 { int modulus = CodeEntryAlignment; 2516 int target = modulus - 5; // 5 = sizeof jmp(L_failed) 2517 int advance = target - (__ offset() % modulus); 2518 if (advance < 0) advance += modulus; 2519 if (advance > 0) __ nop(advance); 2520 } 2521 StubCodeMark mark(this, "StubRoutines", name); 2522 2523 // Short-hop target to L_failed. Makes for denser prologue code. 2524 __ BIND(L_failed_0); 2525 __ jmp(L_failed); 2526 assert(__ offset() % CodeEntryAlignment == 0, "no further alignment needed"); 2527 2528 __ align(CodeEntryAlignment); 2529 address start = __ pc(); 2530 2531 __ enter(); // required for proper stackwalking of RuntimeStub frame 2532 2533 // bump this on entry, not on exit: 2534 inc_counter_np(SharedRuntime::_generic_array_copy_ctr); 2535 2536 //----------------------------------------------------------------------- 2537 // Assembler stub will be used for this call to arraycopy 2538 // if the following conditions are met: 2539 // 2540 // (1) src and dst must not be null. 2541 // (2) src_pos must not be negative. 2542 // (3) dst_pos must not be negative. 2543 // (4) length must not be negative. 2544 // (5) src klass and dst klass should be the same and not NULL. 2545 // (6) src and dst should be arrays. 2546 // (7) src_pos + length must not exceed length of src. 2547 // (8) dst_pos + length must not exceed length of dst. 2548 // 2549 2550 // if (src == NULL) return -1; 2551 __ testptr(src, src); // src oop 2552 size_t j1off = __ offset(); 2553 __ jccb(Assembler::zero, L_failed_0); 2554 2555 // if (src_pos < 0) return -1; 2556 __ testl(src_pos, src_pos); // src_pos (32-bits) 2557 __ jccb(Assembler::negative, L_failed_0); 2558 2559 // if (dst == NULL) return -1; 2560 __ testptr(dst, dst); // dst oop 2561 __ jccb(Assembler::zero, L_failed_0); 2562 2563 // if (dst_pos < 0) return -1; 2564 __ testl(dst_pos, dst_pos); // dst_pos (32-bits) 2565 size_t j4off = __ offset(); 2566 __ jccb(Assembler::negative, L_failed_0); 2567 2568 // The first four tests are very dense code, 2569 // but not quite dense enough to put four 2570 // jumps in a 16-byte instruction fetch buffer. 2571 // That's good, because some branch predicters 2572 // do not like jumps so close together. 2573 // Make sure of this. 2574 guarantee(((j1off ^ j4off) & ~15) != 0, "I$ line of 1st & 4th jumps"); 2575 2576 // registers used as temp 2577 const Register r11_length = r11; // elements count to copy 2578 const Register r10_src_klass = r10; // array klass 2579 2580 // if (length < 0) return -1; 2581 __ movl(r11_length, length); // length (elements count, 32-bits value) 2582 __ testl(r11_length, r11_length); 2583 __ jccb(Assembler::negative, L_failed_0); 2584 2585 __ load_klass(r10_src_klass, src); 2586 #ifdef ASSERT 2587 // assert(src->klass() != NULL); 2588 { 2589 BLOCK_COMMENT("assert klasses not null {"); 2590 Label L1, L2; 2591 __ testptr(r10_src_klass, r10_src_klass); 2592 __ jcc(Assembler::notZero, L2); // it is broken if klass is NULL 2593 __ bind(L1); 2594 __ stop("broken null klass"); 2595 __ bind(L2); 2596 __ load_klass(rax, dst); 2597 __ cmpq(rax, 0); 2598 __ jcc(Assembler::equal, L1); // this would be broken also 2599 BLOCK_COMMENT("} assert klasses not null done"); 2600 } 2601 #endif 2602 2603 // Load layout helper (32-bits) 2604 // 2605 // |array_tag| | header_size | element_type | |log2_element_size| 2606 // 32 30 24 16 8 2 0 2607 // 2608 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0 2609 // 2610 2611 const int lh_offset = in_bytes(Klass::layout_helper_offset()); 2612 2613 // Handle objArrays completely differently... 2614 const jint objArray_lh = Klass::array_layout_helper(T_OBJECT); 2615 __ cmpl(Address(r10_src_klass, lh_offset), objArray_lh); 2616 __ jcc(Assembler::equal, L_objArray); 2617 2618 // if (src->klass() != dst->klass()) return -1; 2619 __ load_klass(rax, dst); 2620 __ cmpq(r10_src_klass, rax); 2621 __ jcc(Assembler::notEqual, L_failed); 2622 2623 const Register rax_lh = rax; // layout helper 2624 __ movl(rax_lh, Address(r10_src_klass, lh_offset)); 2625 2626 // if (!src->is_Array()) return -1; 2627 __ cmpl(rax_lh, Klass::_lh_neutral_value); 2628 __ jcc(Assembler::greaterEqual, L_failed); 2629 2630 // At this point, it is known to be a typeArray (array_tag 0x3). 2631 #ifdef ASSERT 2632 { 2633 BLOCK_COMMENT("assert primitive array {"); 2634 Label L; 2635 __ cmpl(rax_lh, (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift)); 2636 __ jcc(Assembler::greaterEqual, L); 2637 __ stop("must be a primitive array"); 2638 __ bind(L); 2639 BLOCK_COMMENT("} assert primitive array done"); 2640 } 2641 #endif 2642 2643 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2644 r10, L_failed); 2645 2646 // TypeArrayKlass 2647 // 2648 // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize); 2649 // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize); 2650 // 2651 2652 const Register r10_offset = r10; // array offset 2653 const Register rax_elsize = rax_lh; // element size 2654 2655 __ movl(r10_offset, rax_lh); 2656 __ shrl(r10_offset, Klass::_lh_header_size_shift); 2657 __ andptr(r10_offset, Klass::_lh_header_size_mask); // array_offset 2658 __ addptr(src, r10_offset); // src array offset 2659 __ addptr(dst, r10_offset); // dst array offset 2660 BLOCK_COMMENT("choose copy loop based on element size"); 2661 __ andl(rax_lh, Klass::_lh_log2_element_size_mask); // rax_lh -> rax_elsize 2662 2663 // next registers should be set before the jump to corresponding stub 2664 const Register from = c_rarg0; // source array address 2665 const Register to = c_rarg1; // destination array address 2666 const Register count = c_rarg2; // elements count 2667 2668 // 'from', 'to', 'count' registers should be set in such order 2669 // since they are the same as 'src', 'src_pos', 'dst'. 2670 2671 __ BIND(L_copy_bytes); 2672 __ cmpl(rax_elsize, 0); 2673 __ jccb(Assembler::notEqual, L_copy_shorts); 2674 __ lea(from, Address(src, src_pos, Address::times_1, 0));// src_addr 2675 __ lea(to, Address(dst, dst_pos, Address::times_1, 0));// dst_addr 2676 __ movl2ptr(count, r11_length); // length 2677 __ jump(RuntimeAddress(byte_copy_entry)); 2678 2679 __ BIND(L_copy_shorts); 2680 __ cmpl(rax_elsize, LogBytesPerShort); 2681 __ jccb(Assembler::notEqual, L_copy_ints); 2682 __ lea(from, Address(src, src_pos, Address::times_2, 0));// src_addr 2683 __ lea(to, Address(dst, dst_pos, Address::times_2, 0));// dst_addr 2684 __ movl2ptr(count, r11_length); // length 2685 __ jump(RuntimeAddress(short_copy_entry)); 2686 2687 __ BIND(L_copy_ints); 2688 __ cmpl(rax_elsize, LogBytesPerInt); 2689 __ jccb(Assembler::notEqual, L_copy_longs); 2690 __ lea(from, Address(src, src_pos, Address::times_4, 0));// src_addr 2691 __ lea(to, Address(dst, dst_pos, Address::times_4, 0));// dst_addr 2692 __ movl2ptr(count, r11_length); // length 2693 __ jump(RuntimeAddress(int_copy_entry)); 2694 2695 __ BIND(L_copy_longs); 2696 #ifdef ASSERT 2697 { 2698 BLOCK_COMMENT("assert long copy {"); 2699 Label L; 2700 __ cmpl(rax_elsize, LogBytesPerLong); 2701 __ jcc(Assembler::equal, L); 2702 __ stop("must be long copy, but elsize is wrong"); 2703 __ bind(L); 2704 BLOCK_COMMENT("} assert long copy done"); 2705 } 2706 #endif 2707 __ lea(from, Address(src, src_pos, Address::times_8, 0));// src_addr 2708 __ lea(to, Address(dst, dst_pos, Address::times_8, 0));// dst_addr 2709 __ movl2ptr(count, r11_length); // length 2710 __ jump(RuntimeAddress(long_copy_entry)); 2711 2712 // ObjArrayKlass 2713 __ BIND(L_objArray); 2714 // live at this point: r10_src_klass, r11_length, src[_pos], dst[_pos] 2715 2716 Label L_plain_copy, L_checkcast_copy; 2717 // test array classes for subtyping 2718 __ load_klass(rax, dst); 2719 __ cmpq(r10_src_klass, rax); // usual case is exact equality 2720 __ jcc(Assembler::notEqual, L_checkcast_copy); 2721 2722 // Identically typed arrays can be copied without element-wise checks. 2723 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2724 r10, L_failed); 2725 2726 __ lea(from, Address(src, src_pos, TIMES_OOP, 2727 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // src_addr 2728 __ lea(to, Address(dst, dst_pos, TIMES_OOP, 2729 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // dst_addr 2730 __ movl2ptr(count, r11_length); // length 2731 __ BIND(L_plain_copy); 2732 __ jump(RuntimeAddress(oop_copy_entry)); 2733 2734 __ BIND(L_checkcast_copy); 2735 // live at this point: r10_src_klass, r11_length, rax (dst_klass) 2736 { 2737 // Before looking at dst.length, make sure dst is also an objArray. 2738 __ cmpl(Address(rax, lh_offset), objArray_lh); 2739 __ jcc(Assembler::notEqual, L_failed); 2740 2741 // It is safe to examine both src.length and dst.length. 2742 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2743 rax, L_failed); 2744 2745 const Register r11_dst_klass = r11; 2746 __ load_klass(r11_dst_klass, dst); // reload 2747 2748 // Marshal the base address arguments now, freeing registers. 2749 __ lea(from, Address(src, src_pos, TIMES_OOP, 2750 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 2751 __ lea(to, Address(dst, dst_pos, TIMES_OOP, 2752 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 2753 __ movl(count, length); // length (reloaded) 2754 Register sco_temp = c_rarg3; // this register is free now 2755 assert_different_registers(from, to, count, sco_temp, 2756 r11_dst_klass, r10_src_klass); 2757 assert_clean_int(count, sco_temp); 2758 2759 // Generate the type check. 2760 const int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2761 __ movl(sco_temp, Address(r11_dst_klass, sco_offset)); 2762 assert_clean_int(sco_temp, rax); 2763 generate_type_check(r10_src_klass, sco_temp, r11_dst_klass, L_plain_copy); 2764 2765 // Fetch destination element klass from the ObjArrayKlass header. 2766 int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset()); 2767 __ movptr(r11_dst_klass, Address(r11_dst_klass, ek_offset)); 2768 __ movl( sco_temp, Address(r11_dst_klass, sco_offset)); 2769 assert_clean_int(sco_temp, rax); 2770 2771 // the checkcast_copy loop needs two extra arguments: 2772 assert(c_rarg3 == sco_temp, "#3 already in place"); 2773 // Set up arguments for checkcast_copy_entry. 2774 setup_arg_regs(4); 2775 __ movptr(r8, r11_dst_klass); // dst.klass.element_klass, r8 is c_rarg4 on Linux/Solaris 2776 __ jump(RuntimeAddress(checkcast_copy_entry)); 2777 } 2778 2779 __ BIND(L_failed); 2780 __ xorptr(rax, rax); 2781 __ notptr(rax); // return -1 2782 __ leave(); // required for proper stackwalking of RuntimeStub frame 2783 __ ret(0); 2784 2785 return start; 2786 } 2787 2788 void generate_arraycopy_stubs() { 2789 address entry; 2790 address entry_jbyte_arraycopy; 2791 address entry_jshort_arraycopy; 2792 address entry_jint_arraycopy; 2793 address entry_oop_arraycopy; 2794 address entry_jlong_arraycopy; 2795 address entry_checkcast_arraycopy; 2796 2797 StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, &entry, 2798 "jbyte_disjoint_arraycopy"); 2799 StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, entry, &entry_jbyte_arraycopy, 2800 "jbyte_arraycopy"); 2801 2802 StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, &entry, 2803 "jshort_disjoint_arraycopy"); 2804 StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, entry, &entry_jshort_arraycopy, 2805 "jshort_arraycopy"); 2806 2807 StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, false, &entry, 2808 "jint_disjoint_arraycopy"); 2809 StubRoutines::_jint_arraycopy = generate_conjoint_int_oop_copy(false, false, entry, 2810 &entry_jint_arraycopy, "jint_arraycopy"); 2811 2812 StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, false, &entry, 2813 "jlong_disjoint_arraycopy"); 2814 StubRoutines::_jlong_arraycopy = generate_conjoint_long_oop_copy(false, false, entry, 2815 &entry_jlong_arraycopy, "jlong_arraycopy"); 2816 2817 2818 if (UseCompressedOops) { 2819 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, true, &entry, 2820 "oop_disjoint_arraycopy"); 2821 StubRoutines::_oop_arraycopy = generate_conjoint_int_oop_copy(false, true, entry, 2822 &entry_oop_arraycopy, "oop_arraycopy"); 2823 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_int_oop_copy(false, true, &entry, 2824 "oop_disjoint_arraycopy_uninit", 2825 /*dest_uninitialized*/true); 2826 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_int_oop_copy(false, true, entry, 2827 NULL, "oop_arraycopy_uninit", 2828 /*dest_uninitialized*/true); 2829 } else { 2830 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, true, &entry, 2831 "oop_disjoint_arraycopy"); 2832 StubRoutines::_oop_arraycopy = generate_conjoint_long_oop_copy(false, true, entry, 2833 &entry_oop_arraycopy, "oop_arraycopy"); 2834 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_long_oop_copy(false, true, &entry, 2835 "oop_disjoint_arraycopy_uninit", 2836 /*dest_uninitialized*/true); 2837 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_long_oop_copy(false, true, entry, 2838 NULL, "oop_arraycopy_uninit", 2839 /*dest_uninitialized*/true); 2840 } 2841 2842 StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy); 2843 StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", NULL, 2844 /*dest_uninitialized*/true); 2845 2846 StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy", 2847 entry_jbyte_arraycopy, 2848 entry_jshort_arraycopy, 2849 entry_jint_arraycopy, 2850 entry_jlong_arraycopy); 2851 StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy", 2852 entry_jbyte_arraycopy, 2853 entry_jshort_arraycopy, 2854 entry_jint_arraycopy, 2855 entry_oop_arraycopy, 2856 entry_jlong_arraycopy, 2857 entry_checkcast_arraycopy); 2858 2859 StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill"); 2860 StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill"); 2861 StubRoutines::_jint_fill = generate_fill(T_INT, false, "jint_fill"); 2862 StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill"); 2863 StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill"); 2864 StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill"); 2865 2866 // We don't generate specialized code for HeapWord-aligned source 2867 // arrays, so just use the code we've already generated 2868 StubRoutines::_arrayof_jbyte_disjoint_arraycopy = StubRoutines::_jbyte_disjoint_arraycopy; 2869 StubRoutines::_arrayof_jbyte_arraycopy = StubRoutines::_jbyte_arraycopy; 2870 2871 StubRoutines::_arrayof_jshort_disjoint_arraycopy = StubRoutines::_jshort_disjoint_arraycopy; 2872 StubRoutines::_arrayof_jshort_arraycopy = StubRoutines::_jshort_arraycopy; 2873 2874 StubRoutines::_arrayof_jint_disjoint_arraycopy = StubRoutines::_jint_disjoint_arraycopy; 2875 StubRoutines::_arrayof_jint_arraycopy = StubRoutines::_jint_arraycopy; 2876 2877 StubRoutines::_arrayof_jlong_disjoint_arraycopy = StubRoutines::_jlong_disjoint_arraycopy; 2878 StubRoutines::_arrayof_jlong_arraycopy = StubRoutines::_jlong_arraycopy; 2879 2880 StubRoutines::_arrayof_oop_disjoint_arraycopy = StubRoutines::_oop_disjoint_arraycopy; 2881 StubRoutines::_arrayof_oop_arraycopy = StubRoutines::_oop_arraycopy; 2882 2883 StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = StubRoutines::_oop_disjoint_arraycopy_uninit; 2884 StubRoutines::_arrayof_oop_arraycopy_uninit = StubRoutines::_oop_arraycopy_uninit; 2885 } 2886 2887 // AES intrinsic stubs 2888 enum {AESBlockSize = 16}; 2889 2890 address generate_key_shuffle_mask() { 2891 __ align(16); 2892 StubCodeMark mark(this, "StubRoutines", "key_shuffle_mask"); 2893 address start = __ pc(); 2894 __ emit_data64( 0x0405060700010203, relocInfo::none ); 2895 __ emit_data64( 0x0c0d0e0f08090a0b, relocInfo::none ); 2896 return start; 2897 } 2898 2899 address generate_counter_shuffle_mask() { 2900 __ align(16); 2901 StubCodeMark mark(this, "StubRoutines", "counter_shuffle_mask"); 2902 address start = __ pc(); 2903 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none); 2904 __ emit_data64(0x0001020304050607, relocInfo::none); 2905 return start; 2906 } 2907 2908 // Utility routine for loading a 128-bit key word in little endian format 2909 // can optionally specify that the shuffle mask is already in an xmmregister 2910 void load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask=NULL) { 2911 __ movdqu(xmmdst, Address(key, offset)); 2912 if (xmm_shuf_mask != NULL) { 2913 __ pshufb(xmmdst, xmm_shuf_mask); 2914 } else { 2915 __ pshufb(xmmdst, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 2916 } 2917 } 2918 2919 // Utility routine for increase 128bit counter (iv in CTR mode) 2920 void inc_counter(Register reg, XMMRegister xmmdst, int inc_delta, Label& next_block) { 2921 __ pextrq(reg, xmmdst, 0x0); 2922 __ addq(reg, inc_delta); 2923 __ pinsrq(xmmdst, reg, 0x0); 2924 __ jcc(Assembler::carryClear, next_block); // jump if no carry 2925 __ pextrq(reg, xmmdst, 0x01); // Carry 2926 __ addq(reg, 0x01); 2927 __ pinsrq(xmmdst, reg, 0x01); //Carry end 2928 __ BIND(next_block); // next instruction 2929 } 2930 2931 // Arguments: 2932 // 2933 // Inputs: 2934 // c_rarg0 - source byte array address 2935 // c_rarg1 - destination byte array address 2936 // c_rarg2 - K (key) in little endian int array 2937 // 2938 address generate_aescrypt_encryptBlock() { 2939 assert(UseAES, "need AES instructions and misaligned SSE support"); 2940 __ align(CodeEntryAlignment); 2941 StubCodeMark mark(this, "StubRoutines", "aescrypt_encryptBlock"); 2942 Label L_doLast; 2943 address start = __ pc(); 2944 2945 const Register from = c_rarg0; // source array address 2946 const Register to = c_rarg1; // destination array address 2947 const Register key = c_rarg2; // key array address 2948 const Register keylen = rax; 2949 2950 const XMMRegister xmm_result = xmm0; 2951 const XMMRegister xmm_key_shuf_mask = xmm1; 2952 // On win64 xmm6-xmm15 must be preserved so don't use them. 2953 const XMMRegister xmm_temp1 = xmm2; 2954 const XMMRegister xmm_temp2 = xmm3; 2955 const XMMRegister xmm_temp3 = xmm4; 2956 const XMMRegister xmm_temp4 = xmm5; 2957 2958 __ enter(); // required for proper stackwalking of RuntimeStub frame 2959 2960 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 2961 // context for the registers used, where all instructions below are using 128-bit mode 2962 // On EVEX without VL and BW, these instructions will all be AVX. 2963 if (VM_Version::supports_avx512vlbw()) { 2964 __ movl(rax, 0xffff); 2965 __ kmovql(k1, rax); 2966 } 2967 2968 // keylen could be only {11, 13, 15} * 4 = {44, 52, 60} 2969 __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 2970 2971 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 2972 __ movdqu(xmm_result, Address(from, 0)); // get 16 bytes of input 2973 2974 // For encryption, the java expanded key ordering is just what we need 2975 // we don't know if the key is aligned, hence not using load-execute form 2976 2977 load_key(xmm_temp1, key, 0x00, xmm_key_shuf_mask); 2978 __ pxor(xmm_result, xmm_temp1); 2979 2980 load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask); 2981 load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask); 2982 load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask); 2983 load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask); 2984 2985 __ aesenc(xmm_result, xmm_temp1); 2986 __ aesenc(xmm_result, xmm_temp2); 2987 __ aesenc(xmm_result, xmm_temp3); 2988 __ aesenc(xmm_result, xmm_temp4); 2989 2990 load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask); 2991 load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask); 2992 load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask); 2993 load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask); 2994 2995 __ aesenc(xmm_result, xmm_temp1); 2996 __ aesenc(xmm_result, xmm_temp2); 2997 __ aesenc(xmm_result, xmm_temp3); 2998 __ aesenc(xmm_result, xmm_temp4); 2999 3000 load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask); 3001 load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask); 3002 3003 __ cmpl(keylen, 44); 3004 __ jccb(Assembler::equal, L_doLast); 3005 3006 __ aesenc(xmm_result, xmm_temp1); 3007 __ aesenc(xmm_result, xmm_temp2); 3008 3009 load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask); 3010 load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask); 3011 3012 __ cmpl(keylen, 52); 3013 __ jccb(Assembler::equal, L_doLast); 3014 3015 __ aesenc(xmm_result, xmm_temp1); 3016 __ aesenc(xmm_result, xmm_temp2); 3017 3018 load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask); 3019 load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask); 3020 3021 __ BIND(L_doLast); 3022 __ aesenc(xmm_result, xmm_temp1); 3023 __ aesenclast(xmm_result, xmm_temp2); 3024 __ movdqu(Address(to, 0), xmm_result); // store the result 3025 __ xorptr(rax, rax); // return 0 3026 __ leave(); // required for proper stackwalking of RuntimeStub frame 3027 __ ret(0); 3028 3029 return start; 3030 } 3031 3032 3033 // Arguments: 3034 // 3035 // Inputs: 3036 // c_rarg0 - source byte array address 3037 // c_rarg1 - destination byte array address 3038 // c_rarg2 - K (key) in little endian int array 3039 // 3040 address generate_aescrypt_decryptBlock() { 3041 assert(UseAES, "need AES instructions and misaligned SSE support"); 3042 __ align(CodeEntryAlignment); 3043 StubCodeMark mark(this, "StubRoutines", "aescrypt_decryptBlock"); 3044 Label L_doLast; 3045 address start = __ pc(); 3046 3047 const Register from = c_rarg0; // source array address 3048 const Register to = c_rarg1; // destination array address 3049 const Register key = c_rarg2; // key array address 3050 const Register keylen = rax; 3051 3052 const XMMRegister xmm_result = xmm0; 3053 const XMMRegister xmm_key_shuf_mask = xmm1; 3054 // On win64 xmm6-xmm15 must be preserved so don't use them. 3055 const XMMRegister xmm_temp1 = xmm2; 3056 const XMMRegister xmm_temp2 = xmm3; 3057 const XMMRegister xmm_temp3 = xmm4; 3058 const XMMRegister xmm_temp4 = xmm5; 3059 3060 __ enter(); // required for proper stackwalking of RuntimeStub frame 3061 3062 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 3063 // context for the registers used, where all instructions below are using 128-bit mode 3064 // On EVEX without VL and BW, these instructions will all be AVX. 3065 if (VM_Version::supports_avx512vlbw()) { 3066 __ movl(rax, 0xffff); 3067 __ kmovql(k1, rax); 3068 } 3069 3070 // keylen could be only {11, 13, 15} * 4 = {44, 52, 60} 3071 __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3072 3073 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3074 __ movdqu(xmm_result, Address(from, 0)); 3075 3076 // for decryption java expanded key ordering is rotated one position from what we want 3077 // so we start from 0x10 here and hit 0x00 last 3078 // we don't know if the key is aligned, hence not using load-execute form 3079 load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask); 3080 load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask); 3081 load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask); 3082 load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask); 3083 3084 __ pxor (xmm_result, xmm_temp1); 3085 __ aesdec(xmm_result, xmm_temp2); 3086 __ aesdec(xmm_result, xmm_temp3); 3087 __ aesdec(xmm_result, xmm_temp4); 3088 3089 load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask); 3090 load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask); 3091 load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask); 3092 load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask); 3093 3094 __ aesdec(xmm_result, xmm_temp1); 3095 __ aesdec(xmm_result, xmm_temp2); 3096 __ aesdec(xmm_result, xmm_temp3); 3097 __ aesdec(xmm_result, xmm_temp4); 3098 3099 load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask); 3100 load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask); 3101 load_key(xmm_temp3, key, 0x00, xmm_key_shuf_mask); 3102 3103 __ cmpl(keylen, 44); 3104 __ jccb(Assembler::equal, L_doLast); 3105 3106 __ aesdec(xmm_result, xmm_temp1); 3107 __ aesdec(xmm_result, xmm_temp2); 3108 3109 load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask); 3110 load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask); 3111 3112 __ cmpl(keylen, 52); 3113 __ jccb(Assembler::equal, L_doLast); 3114 3115 __ aesdec(xmm_result, xmm_temp1); 3116 __ aesdec(xmm_result, xmm_temp2); 3117 3118 load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask); 3119 load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask); 3120 3121 __ BIND(L_doLast); 3122 __ aesdec(xmm_result, xmm_temp1); 3123 __ aesdec(xmm_result, xmm_temp2); 3124 3125 // for decryption the aesdeclast operation is always on key+0x00 3126 __ aesdeclast(xmm_result, xmm_temp3); 3127 __ movdqu(Address(to, 0), xmm_result); // store the result 3128 __ xorptr(rax, rax); // return 0 3129 __ leave(); // required for proper stackwalking of RuntimeStub frame 3130 __ ret(0); 3131 3132 return start; 3133 } 3134 3135 3136 // Arguments: 3137 // 3138 // Inputs: 3139 // c_rarg0 - source byte array address 3140 // c_rarg1 - destination byte array address 3141 // c_rarg2 - K (key) in little endian int array 3142 // c_rarg3 - r vector byte array address 3143 // c_rarg4 - input length 3144 // 3145 // Output: 3146 // rax - input length 3147 // 3148 address generate_cipherBlockChaining_encryptAESCrypt() { 3149 assert(UseAES, "need AES instructions and misaligned SSE support"); 3150 __ align(CodeEntryAlignment); 3151 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_encryptAESCrypt"); 3152 address start = __ pc(); 3153 3154 Label L_exit, L_key_192_256, L_key_256, L_loopTop_128, L_loopTop_192, L_loopTop_256; 3155 const Register from = c_rarg0; // source array address 3156 const Register to = c_rarg1; // destination array address 3157 const Register key = c_rarg2; // key array address 3158 const Register rvec = c_rarg3; // r byte array initialized from initvector array address 3159 // and left with the results of the last encryption block 3160 #ifndef _WIN64 3161 const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16) 3162 #else 3163 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 3164 const Register len_reg = r11; // pick the volatile windows register 3165 #endif 3166 const Register pos = rax; 3167 3168 // xmm register assignments for the loops below 3169 const XMMRegister xmm_result = xmm0; 3170 const XMMRegister xmm_temp = xmm1; 3171 // keys 0-10 preloaded into xmm2-xmm12 3172 const int XMM_REG_NUM_KEY_FIRST = 2; 3173 const int XMM_REG_NUM_KEY_LAST = 15; 3174 const XMMRegister xmm_key0 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST); 3175 const XMMRegister xmm_key10 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+10); 3176 const XMMRegister xmm_key11 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+11); 3177 const XMMRegister xmm_key12 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+12); 3178 const XMMRegister xmm_key13 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+13); 3179 3180 __ enter(); // required for proper stackwalking of RuntimeStub frame 3181 3182 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 3183 // context for the registers used, where all instructions below are using 128-bit mode 3184 // On EVEX without VL and BW, these instructions will all be AVX. 3185 if (VM_Version::supports_avx512vlbw()) { 3186 __ movl(rax, 0xffff); 3187 __ kmovql(k1, rax); 3188 } 3189 3190 #ifdef _WIN64 3191 // on win64, fill len_reg from stack position 3192 __ movl(len_reg, len_mem); 3193 #else 3194 __ push(len_reg); // Save 3195 #endif 3196 3197 const XMMRegister xmm_key_shuf_mask = xmm_temp; // used temporarily to swap key bytes up front 3198 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3199 // load up xmm regs xmm2 thru xmm12 with key 0x00 - 0xa0 3200 for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x00; rnum <= XMM_REG_NUM_KEY_FIRST+10; rnum++) { 3201 load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask); 3202 offset += 0x10; 3203 } 3204 __ movdqu(xmm_result, Address(rvec, 0x00)); // initialize xmm_result with r vec 3205 3206 // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256)) 3207 __ movl(rax, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3208 __ cmpl(rax, 44); 3209 __ jcc(Assembler::notEqual, L_key_192_256); 3210 3211 // 128 bit code follows here 3212 __ movptr(pos, 0); 3213 __ align(OptoLoopAlignment); 3214 3215 __ BIND(L_loopTop_128); 3216 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3217 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3218 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3219 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 9; rnum++) { 3220 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3221 } 3222 __ aesenclast(xmm_result, xmm_key10); 3223 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3224 // no need to store r to memory until we exit 3225 __ addptr(pos, AESBlockSize); 3226 __ subptr(len_reg, AESBlockSize); 3227 __ jcc(Assembler::notEqual, L_loopTop_128); 3228 3229 __ BIND(L_exit); 3230 __ movdqu(Address(rvec, 0), xmm_result); // final value of r stored in rvec of CipherBlockChaining object 3231 3232 #ifdef _WIN64 3233 __ movl(rax, len_mem); 3234 #else 3235 __ pop(rax); // return length 3236 #endif 3237 __ leave(); // required for proper stackwalking of RuntimeStub frame 3238 __ ret(0); 3239 3240 __ BIND(L_key_192_256); 3241 // here rax = len in ints of AESCrypt.KLE array (52=192, or 60=256) 3242 load_key(xmm_key11, key, 0xb0, xmm_key_shuf_mask); 3243 load_key(xmm_key12, key, 0xc0, xmm_key_shuf_mask); 3244 __ cmpl(rax, 52); 3245 __ jcc(Assembler::notEqual, L_key_256); 3246 3247 // 192-bit code follows here (could be changed to use more xmm registers) 3248 __ movptr(pos, 0); 3249 __ align(OptoLoopAlignment); 3250 3251 __ BIND(L_loopTop_192); 3252 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3253 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3254 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3255 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 11; rnum++) { 3256 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3257 } 3258 __ aesenclast(xmm_result, xmm_key12); 3259 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3260 // no need to store r to memory until we exit 3261 __ addptr(pos, AESBlockSize); 3262 __ subptr(len_reg, AESBlockSize); 3263 __ jcc(Assembler::notEqual, L_loopTop_192); 3264 __ jmp(L_exit); 3265 3266 __ BIND(L_key_256); 3267 // 256-bit code follows here (could be changed to use more xmm registers) 3268 load_key(xmm_key13, key, 0xd0, xmm_key_shuf_mask); 3269 __ movptr(pos, 0); 3270 __ align(OptoLoopAlignment); 3271 3272 __ BIND(L_loopTop_256); 3273 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3274 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3275 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3276 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 13; rnum++) { 3277 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3278 } 3279 load_key(xmm_temp, key, 0xe0); 3280 __ aesenclast(xmm_result, xmm_temp); 3281 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3282 // no need to store r to memory until we exit 3283 __ addptr(pos, AESBlockSize); 3284 __ subptr(len_reg, AESBlockSize); 3285 __ jcc(Assembler::notEqual, L_loopTop_256); 3286 __ jmp(L_exit); 3287 3288 return start; 3289 } 3290 3291 // Safefetch stubs. 3292 void generate_safefetch(const char* name, int size, address* entry, 3293 address* fault_pc, address* continuation_pc) { 3294 // safefetch signatures: 3295 // int SafeFetch32(int* adr, int errValue); 3296 // intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue); 3297 // 3298 // arguments: 3299 // c_rarg0 = adr 3300 // c_rarg1 = errValue 3301 // 3302 // result: 3303 // PPC_RET = *adr or errValue 3304 3305 StubCodeMark mark(this, "StubRoutines", name); 3306 3307 // Entry point, pc or function descriptor. 3308 *entry = __ pc(); 3309 3310 // Load *adr into c_rarg1, may fault. 3311 *fault_pc = __ pc(); 3312 switch (size) { 3313 case 4: 3314 // int32_t 3315 __ movl(c_rarg1, Address(c_rarg0, 0)); 3316 break; 3317 case 8: 3318 // int64_t 3319 __ movq(c_rarg1, Address(c_rarg0, 0)); 3320 break; 3321 default: 3322 ShouldNotReachHere(); 3323 } 3324 3325 // return errValue or *adr 3326 *continuation_pc = __ pc(); 3327 __ movq(rax, c_rarg1); 3328 __ ret(0); 3329 } 3330 3331 // This is a version of CBC/AES Decrypt which does 4 blocks in a loop at a time 3332 // to hide instruction latency 3333 // 3334 // Arguments: 3335 // 3336 // Inputs: 3337 // c_rarg0 - source byte array address 3338 // c_rarg1 - destination byte array address 3339 // c_rarg2 - K (key) in little endian int array 3340 // c_rarg3 - r vector byte array address 3341 // c_rarg4 - input length 3342 // 3343 // Output: 3344 // rax - input length 3345 // 3346 address generate_cipherBlockChaining_decryptAESCrypt_Parallel() { 3347 assert(UseAES, "need AES instructions and misaligned SSE support"); 3348 __ align(CodeEntryAlignment); 3349 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt"); 3350 address start = __ pc(); 3351 3352 const Register from = c_rarg0; // source array address 3353 const Register to = c_rarg1; // destination array address 3354 const Register key = c_rarg2; // key array address 3355 const Register rvec = c_rarg3; // r byte array initialized from initvector array address 3356 // and left with the results of the last encryption block 3357 #ifndef _WIN64 3358 const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16) 3359 #else 3360 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 3361 const Register len_reg = r11; // pick the volatile windows register 3362 #endif 3363 const Register pos = rax; 3364 3365 const int PARALLEL_FACTOR = 4; 3366 const int ROUNDS[3] = { 10, 12, 14 }; // aes rounds for key128, key192, key256 3367 3368 Label L_exit; 3369 Label L_singleBlock_loopTopHead[3]; // 128, 192, 256 3370 Label L_singleBlock_loopTopHead2[3]; // 128, 192, 256 3371 Label L_singleBlock_loopTop[3]; // 128, 192, 256 3372 Label L_multiBlock_loopTopHead[3]; // 128, 192, 256 3373 Label L_multiBlock_loopTop[3]; // 128, 192, 256 3374 3375 // keys 0-10 preloaded into xmm5-xmm15 3376 const int XMM_REG_NUM_KEY_FIRST = 5; 3377 const int XMM_REG_NUM_KEY_LAST = 15; 3378 const XMMRegister xmm_key_first = as_XMMRegister(XMM_REG_NUM_KEY_FIRST); 3379 const XMMRegister xmm_key_last = as_XMMRegister(XMM_REG_NUM_KEY_LAST); 3380 3381 __ enter(); // required for proper stackwalking of RuntimeStub frame 3382 3383 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 3384 // context for the registers used, where all instructions below are using 128-bit mode 3385 // On EVEX without VL and BW, these instructions will all be AVX. 3386 if (VM_Version::supports_avx512vlbw()) { 3387 __ movl(rax, 0xffff); 3388 __ kmovql(k1, rax); 3389 } 3390 3391 #ifdef _WIN64 3392 // on win64, fill len_reg from stack position 3393 __ movl(len_reg, len_mem); 3394 #else 3395 __ push(len_reg); // Save 3396 #endif 3397 __ push(rbx); 3398 // the java expanded key ordering is rotated one position from what we want 3399 // so we start from 0x10 here and hit 0x00 last 3400 const XMMRegister xmm_key_shuf_mask = xmm1; // used temporarily to swap key bytes up front 3401 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3402 // load up xmm regs 5 thru 15 with key 0x10 - 0xa0 - 0x00 3403 for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x10; rnum < XMM_REG_NUM_KEY_LAST; rnum++) { 3404 load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask); 3405 offset += 0x10; 3406 } 3407 load_key(xmm_key_last, key, 0x00, xmm_key_shuf_mask); 3408 3409 const XMMRegister xmm_prev_block_cipher = xmm1; // holds cipher of previous block 3410 3411 // registers holding the four results in the parallelized loop 3412 const XMMRegister xmm_result0 = xmm0; 3413 const XMMRegister xmm_result1 = xmm2; 3414 const XMMRegister xmm_result2 = xmm3; 3415 const XMMRegister xmm_result3 = xmm4; 3416 3417 __ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // initialize with initial rvec 3418 3419 __ xorptr(pos, pos); 3420 3421 // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256)) 3422 __ movl(rbx, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3423 __ cmpl(rbx, 52); 3424 __ jcc(Assembler::equal, L_multiBlock_loopTopHead[1]); 3425 __ cmpl(rbx, 60); 3426 __ jcc(Assembler::equal, L_multiBlock_loopTopHead[2]); 3427 3428 #define DoFour(opc, src_reg) \ 3429 __ opc(xmm_result0, src_reg); \ 3430 __ opc(xmm_result1, src_reg); \ 3431 __ opc(xmm_result2, src_reg); \ 3432 __ opc(xmm_result3, src_reg); \ 3433 3434 for (int k = 0; k < 3; ++k) { 3435 __ BIND(L_multiBlock_loopTopHead[k]); 3436 if (k != 0) { 3437 __ cmpptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // see if at least 4 blocks left 3438 __ jcc(Assembler::less, L_singleBlock_loopTopHead2[k]); 3439 } 3440 if (k == 1) { 3441 __ subptr(rsp, 6 * wordSize); 3442 __ movdqu(Address(rsp, 0), xmm15); //save last_key from xmm15 3443 load_key(xmm15, key, 0xb0); // 0xb0; 192-bit key goes up to 0xc0 3444 __ movdqu(Address(rsp, 2 * wordSize), xmm15); 3445 load_key(xmm1, key, 0xc0); // 0xc0; 3446 __ movdqu(Address(rsp, 4 * wordSize), xmm1); 3447 } else if (k == 2) { 3448 __ subptr(rsp, 10 * wordSize); 3449 __ movdqu(Address(rsp, 0), xmm15); //save last_key from xmm15 3450 load_key(xmm15, key, 0xd0); // 0xd0; 256-bit key goes upto 0xe0 3451 __ movdqu(Address(rsp, 6 * wordSize), xmm15); 3452 load_key(xmm1, key, 0xe0); // 0xe0; 3453 __ movdqu(Address(rsp, 8 * wordSize), xmm1); 3454 load_key(xmm15, key, 0xb0); // 0xb0; 3455 __ movdqu(Address(rsp, 2 * wordSize), xmm15); 3456 load_key(xmm1, key, 0xc0); // 0xc0; 3457 __ movdqu(Address(rsp, 4 * wordSize), xmm1); 3458 } 3459 __ align(OptoLoopAlignment); 3460 __ BIND(L_multiBlock_loopTop[k]); 3461 __ cmpptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // see if at least 4 blocks left 3462 __ jcc(Assembler::less, L_singleBlock_loopTopHead[k]); 3463 3464 if (k != 0) { 3465 __ movdqu(xmm15, Address(rsp, 2 * wordSize)); 3466 __ movdqu(xmm1, Address(rsp, 4 * wordSize)); 3467 } 3468 3469 __ movdqu(xmm_result0, Address(from, pos, Address::times_1, 0 * AESBlockSize)); // get next 4 blocks into xmmresult registers 3470 __ movdqu(xmm_result1, Address(from, pos, Address::times_1, 1 * AESBlockSize)); 3471 __ movdqu(xmm_result2, Address(from, pos, Address::times_1, 2 * AESBlockSize)); 3472 __ movdqu(xmm_result3, Address(from, pos, Address::times_1, 3 * AESBlockSize)); 3473 3474 DoFour(pxor, xmm_key_first); 3475 if (k == 0) { 3476 for (int rnum = 1; rnum < ROUNDS[k]; rnum++) { 3477 DoFour(aesdec, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3478 } 3479 DoFour(aesdeclast, xmm_key_last); 3480 } else if (k == 1) { 3481 for (int rnum = 1; rnum <= ROUNDS[k]-2; rnum++) { 3482 DoFour(aesdec, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3483 } 3484 __ movdqu(xmm_key_last, Address(rsp, 0)); // xmm15 needs to be loaded again. 3485 DoFour(aesdec, xmm1); // key : 0xc0 3486 __ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // xmm1 needs to be loaded again 3487 DoFour(aesdeclast, xmm_key_last); 3488 } else if (k == 2) { 3489 for (int rnum = 1; rnum <= ROUNDS[k] - 4; rnum++) { 3490 DoFour(aesdec, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3491 } 3492 DoFour(aesdec, xmm1); // key : 0xc0 3493 __ movdqu(xmm15, Address(rsp, 6 * wordSize)); 3494 __ movdqu(xmm1, Address(rsp, 8 * wordSize)); 3495 DoFour(aesdec, xmm15); // key : 0xd0 3496 __ movdqu(xmm_key_last, Address(rsp, 0)); // xmm15 needs to be loaded again. 3497 DoFour(aesdec, xmm1); // key : 0xe0 3498 __ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // xmm1 needs to be loaded again 3499 DoFour(aesdeclast, xmm_key_last); 3500 } 3501 3502 // for each result, xor with the r vector of previous cipher block 3503 __ pxor(xmm_result0, xmm_prev_block_cipher); 3504 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 0 * AESBlockSize)); 3505 __ pxor(xmm_result1, xmm_prev_block_cipher); 3506 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 1 * AESBlockSize)); 3507 __ pxor(xmm_result2, xmm_prev_block_cipher); 3508 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 2 * AESBlockSize)); 3509 __ pxor(xmm_result3, xmm_prev_block_cipher); 3510 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 3 * AESBlockSize)); // this will carry over to next set of blocks 3511 if (k != 0) { 3512 __ movdqu(Address(rvec, 0x00), xmm_prev_block_cipher); 3513 } 3514 3515 __ movdqu(Address(to, pos, Address::times_1, 0 * AESBlockSize), xmm_result0); // store 4 results into the next 64 bytes of output 3516 __ movdqu(Address(to, pos, Address::times_1, 1 * AESBlockSize), xmm_result1); 3517 __ movdqu(Address(to, pos, Address::times_1, 2 * AESBlockSize), xmm_result2); 3518 __ movdqu(Address(to, pos, Address::times_1, 3 * AESBlockSize), xmm_result3); 3519 3520 __ addptr(pos, PARALLEL_FACTOR * AESBlockSize); 3521 __ subptr(len_reg, PARALLEL_FACTOR * AESBlockSize); 3522 __ jmp(L_multiBlock_loopTop[k]); 3523 3524 // registers used in the non-parallelized loops 3525 // xmm register assignments for the loops below 3526 const XMMRegister xmm_result = xmm0; 3527 const XMMRegister xmm_prev_block_cipher_save = xmm2; 3528 const XMMRegister xmm_key11 = xmm3; 3529 const XMMRegister xmm_key12 = xmm4; 3530 const XMMRegister key_tmp = xmm4; 3531 3532 __ BIND(L_singleBlock_loopTopHead[k]); 3533 if (k == 1) { 3534 __ addptr(rsp, 6 * wordSize); 3535 } else if (k == 2) { 3536 __ addptr(rsp, 10 * wordSize); 3537 } 3538 __ cmpptr(len_reg, 0); // any blocks left?? 3539 __ jcc(Assembler::equal, L_exit); 3540 __ BIND(L_singleBlock_loopTopHead2[k]); 3541 if (k == 1) { 3542 load_key(xmm_key11, key, 0xb0); // 0xb0; 192-bit key goes upto 0xc0 3543 load_key(xmm_key12, key, 0xc0); // 0xc0; 192-bit key goes upto 0xc0 3544 } 3545 if (k == 2) { 3546 load_key(xmm_key11, key, 0xb0); // 0xb0; 256-bit key goes upto 0xe0 3547 } 3548 __ align(OptoLoopAlignment); 3549 __ BIND(L_singleBlock_loopTop[k]); 3550 __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of cipher input 3551 __ movdqa(xmm_prev_block_cipher_save, xmm_result); // save for next r vector 3552 __ pxor(xmm_result, xmm_key_first); // do the aes dec rounds 3553 for (int rnum = 1; rnum <= 9 ; rnum++) { 3554 __ aesdec(xmm_result, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3555 } 3556 if (k == 1) { 3557 __ aesdec(xmm_result, xmm_key11); 3558 __ aesdec(xmm_result, xmm_key12); 3559 } 3560 if (k == 2) { 3561 __ aesdec(xmm_result, xmm_key11); 3562 load_key(key_tmp, key, 0xc0); 3563 __ aesdec(xmm_result, key_tmp); 3564 load_key(key_tmp, key, 0xd0); 3565 __ aesdec(xmm_result, key_tmp); 3566 load_key(key_tmp, key, 0xe0); 3567 __ aesdec(xmm_result, key_tmp); 3568 } 3569 3570 __ aesdeclast(xmm_result, xmm_key_last); // xmm15 always came from key+0 3571 __ pxor(xmm_result, xmm_prev_block_cipher); // xor with the current r vector 3572 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3573 // no need to store r to memory until we exit 3574 __ movdqa(xmm_prev_block_cipher, xmm_prev_block_cipher_save); // set up next r vector with cipher input from this block 3575 __ addptr(pos, AESBlockSize); 3576 __ subptr(len_reg, AESBlockSize); 3577 __ jcc(Assembler::notEqual, L_singleBlock_loopTop[k]); 3578 if (k != 2) { 3579 __ jmp(L_exit); 3580 } 3581 } //for 128/192/256 3582 3583 __ BIND(L_exit); 3584 __ movdqu(Address(rvec, 0), xmm_prev_block_cipher); // final value of r stored in rvec of CipherBlockChaining object 3585 __ pop(rbx); 3586 #ifdef _WIN64 3587 __ movl(rax, len_mem); 3588 #else 3589 __ pop(rax); // return length 3590 #endif 3591 __ leave(); // required for proper stackwalking of RuntimeStub frame 3592 __ ret(0); 3593 return start; 3594 } 3595 3596 address generate_upper_word_mask() { 3597 __ align(64); 3598 StubCodeMark mark(this, "StubRoutines", "upper_word_mask"); 3599 address start = __ pc(); 3600 __ emit_data64(0x0000000000000000, relocInfo::none); 3601 __ emit_data64(0xFFFFFFFF00000000, relocInfo::none); 3602 return start; 3603 } 3604 3605 address generate_shuffle_byte_flip_mask() { 3606 __ align(64); 3607 StubCodeMark mark(this, "StubRoutines", "shuffle_byte_flip_mask"); 3608 address start = __ pc(); 3609 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none); 3610 __ emit_data64(0x0001020304050607, relocInfo::none); 3611 return start; 3612 } 3613 3614 // ofs and limit are use for multi-block byte array. 3615 // int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit) 3616 address generate_sha1_implCompress(bool multi_block, const char *name) { 3617 __ align(CodeEntryAlignment); 3618 StubCodeMark mark(this, "StubRoutines", name); 3619 address start = __ pc(); 3620 3621 Register buf = c_rarg0; 3622 Register state = c_rarg1; 3623 Register ofs = c_rarg2; 3624 Register limit = c_rarg3; 3625 3626 const XMMRegister abcd = xmm0; 3627 const XMMRegister e0 = xmm1; 3628 const XMMRegister e1 = xmm2; 3629 const XMMRegister msg0 = xmm3; 3630 3631 const XMMRegister msg1 = xmm4; 3632 const XMMRegister msg2 = xmm5; 3633 const XMMRegister msg3 = xmm6; 3634 const XMMRegister shuf_mask = xmm7; 3635 3636 __ enter(); 3637 3638 __ subptr(rsp, 4 * wordSize); 3639 3640 __ fast_sha1(abcd, e0, e1, msg0, msg1, msg2, msg3, shuf_mask, 3641 buf, state, ofs, limit, rsp, multi_block); 3642 3643 __ addptr(rsp, 4 * wordSize); 3644 3645 __ leave(); 3646 __ ret(0); 3647 return start; 3648 } 3649 3650 address generate_pshuffle_byte_flip_mask() { 3651 __ align(64); 3652 StubCodeMark mark(this, "StubRoutines", "pshuffle_byte_flip_mask"); 3653 address start = __ pc(); 3654 __ emit_data64(0x0405060700010203, relocInfo::none); 3655 __ emit_data64(0x0c0d0e0f08090a0b, relocInfo::none); 3656 3657 if (VM_Version::supports_avx2()) { 3658 __ emit_data64(0x0405060700010203, relocInfo::none); // second copy 3659 __ emit_data64(0x0c0d0e0f08090a0b, relocInfo::none); 3660 // _SHUF_00BA 3661 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3662 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3663 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3664 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3665 // _SHUF_DC00 3666 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3667 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3668 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3669 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3670 } 3671 3672 return start; 3673 } 3674 3675 //Mask for byte-swapping a couple of qwords in an XMM register using (v)pshufb. 3676 address generate_pshuffle_byte_flip_mask_sha512() { 3677 __ align(32); 3678 StubCodeMark mark(this, "StubRoutines", "pshuffle_byte_flip_mask_sha512"); 3679 address start = __ pc(); 3680 if (VM_Version::supports_avx2()) { 3681 __ emit_data64(0x0001020304050607, relocInfo::none); // PSHUFFLE_BYTE_FLIP_MASK 3682 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none); 3683 __ emit_data64(0x1011121314151617, relocInfo::none); 3684 __ emit_data64(0x18191a1b1c1d1e1f, relocInfo::none); 3685 __ emit_data64(0x0000000000000000, relocInfo::none); //MASK_YMM_LO 3686 __ emit_data64(0x0000000000000000, relocInfo::none); 3687 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3688 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3689 } 3690 3691 return start; 3692 } 3693 3694 // ofs and limit are use for multi-block byte array. 3695 // int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit) 3696 address generate_sha256_implCompress(bool multi_block, const char *name) { 3697 assert(VM_Version::supports_sha() || VM_Version::supports_avx2(), ""); 3698 __ align(CodeEntryAlignment); 3699 StubCodeMark mark(this, "StubRoutines", name); 3700 address start = __ pc(); 3701 3702 Register buf = c_rarg0; 3703 Register state = c_rarg1; 3704 Register ofs = c_rarg2; 3705 Register limit = c_rarg3; 3706 3707 const XMMRegister msg = xmm0; 3708 const XMMRegister state0 = xmm1; 3709 const XMMRegister state1 = xmm2; 3710 const XMMRegister msgtmp0 = xmm3; 3711 3712 const XMMRegister msgtmp1 = xmm4; 3713 const XMMRegister msgtmp2 = xmm5; 3714 const XMMRegister msgtmp3 = xmm6; 3715 const XMMRegister msgtmp4 = xmm7; 3716 3717 const XMMRegister shuf_mask = xmm8; 3718 3719 __ enter(); 3720 3721 __ subptr(rsp, 4 * wordSize); 3722 3723 if (VM_Version::supports_sha()) { 3724 __ fast_sha256(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4, 3725 buf, state, ofs, limit, rsp, multi_block, shuf_mask); 3726 } else if (VM_Version::supports_avx2()) { 3727 __ sha256_AVX2(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4, 3728 buf, state, ofs, limit, rsp, multi_block, shuf_mask); 3729 } 3730 __ addptr(rsp, 4 * wordSize); 3731 __ vzeroupper(); 3732 __ leave(); 3733 __ ret(0); 3734 return start; 3735 } 3736 3737 address generate_sha512_implCompress(bool multi_block, const char *name) { 3738 assert(VM_Version::supports_avx2(), ""); 3739 assert(VM_Version::supports_bmi2(), ""); 3740 __ align(CodeEntryAlignment); 3741 StubCodeMark mark(this, "StubRoutines", name); 3742 address start = __ pc(); 3743 3744 Register buf = c_rarg0; 3745 Register state = c_rarg1; 3746 Register ofs = c_rarg2; 3747 Register limit = c_rarg3; 3748 3749 const XMMRegister msg = xmm0; 3750 const XMMRegister state0 = xmm1; 3751 const XMMRegister state1 = xmm2; 3752 const XMMRegister msgtmp0 = xmm3; 3753 const XMMRegister msgtmp1 = xmm4; 3754 const XMMRegister msgtmp2 = xmm5; 3755 const XMMRegister msgtmp3 = xmm6; 3756 const XMMRegister msgtmp4 = xmm7; 3757 3758 const XMMRegister shuf_mask = xmm8; 3759 3760 __ enter(); 3761 3762 __ sha512_AVX2(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4, 3763 buf, state, ofs, limit, rsp, multi_block, shuf_mask); 3764 3765 __ vzeroupper(); 3766 __ leave(); 3767 __ ret(0); 3768 return start; 3769 } 3770 3771 // This is a version of CTR/AES crypt which does 6 blocks in a loop at a time 3772 // to hide instruction latency 3773 // 3774 // Arguments: 3775 // 3776 // Inputs: 3777 // c_rarg0 - source byte array address 3778 // c_rarg1 - destination byte array address 3779 // c_rarg2 - K (key) in little endian int array 3780 // c_rarg3 - counter vector byte array address 3781 // Linux 3782 // c_rarg4 - input length 3783 // c_rarg5 - saved encryptedCounter start 3784 // rbp + 6 * wordSize - saved used length 3785 // Windows 3786 // rbp + 6 * wordSize - input length 3787 // rbp + 7 * wordSize - saved encryptedCounter start 3788 // rbp + 8 * wordSize - saved used length 3789 // 3790 // Output: 3791 // rax - input length 3792 // 3793 address generate_counterMode_AESCrypt_Parallel() { 3794 assert(UseAES, "need AES instructions and misaligned SSE support"); 3795 __ align(CodeEntryAlignment); 3796 StubCodeMark mark(this, "StubRoutines", "counterMode_AESCrypt"); 3797 address start = __ pc(); 3798 const Register from = c_rarg0; // source array address 3799 const Register to = c_rarg1; // destination array address 3800 const Register key = c_rarg2; // key array address 3801 const Register counter = c_rarg3; // counter byte array initialized from counter array address 3802 // and updated with the incremented counter in the end 3803 #ifndef _WIN64 3804 const Register len_reg = c_rarg4; 3805 const Register saved_encCounter_start = c_rarg5; 3806 const Register used_addr = r10; 3807 const Address used_mem(rbp, 2 * wordSize); 3808 const Register used = r11; 3809 #else 3810 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 3811 const Address saved_encCounter_mem(rbp, 7 * wordSize); // length is on stack on Win64 3812 const Address used_mem(rbp, 8 * wordSize); // length is on stack on Win64 3813 const Register len_reg = r10; // pick the first volatile windows register 3814 const Register saved_encCounter_start = r11; 3815 const Register used_addr = r13; 3816 const Register used = r14; 3817 #endif 3818 const Register pos = rax; 3819 3820 const int PARALLEL_FACTOR = 6; 3821 const XMMRegister xmm_counter_shuf_mask = xmm0; 3822 const XMMRegister xmm_key_shuf_mask = xmm1; // used temporarily to swap key bytes up front 3823 const XMMRegister xmm_curr_counter = xmm2; 3824 3825 const XMMRegister xmm_key_tmp0 = xmm3; 3826 const XMMRegister xmm_key_tmp1 = xmm4; 3827 3828 // registers holding the four results in the parallelized loop 3829 const XMMRegister xmm_result0 = xmm5; 3830 const XMMRegister xmm_result1 = xmm6; 3831 const XMMRegister xmm_result2 = xmm7; 3832 const XMMRegister xmm_result3 = xmm8; 3833 const XMMRegister xmm_result4 = xmm9; 3834 const XMMRegister xmm_result5 = xmm10; 3835 3836 const XMMRegister xmm_from0 = xmm11; 3837 const XMMRegister xmm_from1 = xmm12; 3838 const XMMRegister xmm_from2 = xmm13; 3839 const XMMRegister xmm_from3 = xmm14; //the last one is xmm14. we have to preserve it on WIN64. 3840 const XMMRegister xmm_from4 = xmm3; //reuse xmm3~4. Because xmm_key_tmp0~1 are useless when loading input text 3841 const XMMRegister xmm_from5 = xmm4; 3842 3843 //for key_128, key_192, key_256 3844 const int rounds[3] = {10, 12, 14}; 3845 Label L_exit_preLoop, L_preLoop_start; 3846 Label L_multiBlock_loopTop[3]; 3847 Label L_singleBlockLoopTop[3]; 3848 Label L__incCounter[3][6]; //for 6 blocks 3849 Label L__incCounter_single[3]; //for single block, key128, key192, key256 3850 Label L_processTail_insr[3], L_processTail_4_insr[3], L_processTail_2_insr[3], L_processTail_1_insr[3], L_processTail_exit_insr[3]; 3851 Label L_processTail_extr[3], L_processTail_4_extr[3], L_processTail_2_extr[3], L_processTail_1_extr[3], L_processTail_exit_extr[3]; 3852 3853 Label L_exit; 3854 3855 __ enter(); // required for proper stackwalking of RuntimeStub frame 3856 3857 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 3858 // context for the registers used, where all instructions below are using 128-bit mode 3859 // On EVEX without VL and BW, these instructions will all be AVX. 3860 if (VM_Version::supports_avx512vlbw()) { 3861 __ movl(rax, 0xffff); 3862 __ kmovql(k1, rax); 3863 } 3864 3865 #ifdef _WIN64 3866 // allocate spill slots for r13, r14 3867 enum { 3868 saved_r13_offset, 3869 saved_r14_offset 3870 }; 3871 __ subptr(rsp, 2 * wordSize); 3872 __ movptr(Address(rsp, saved_r13_offset * wordSize), r13); 3873 __ movptr(Address(rsp, saved_r14_offset * wordSize), r14); 3874 3875 // on win64, fill len_reg from stack position 3876 __ movl(len_reg, len_mem); 3877 __ movptr(saved_encCounter_start, saved_encCounter_mem); 3878 __ movptr(used_addr, used_mem); 3879 __ movl(used, Address(used_addr, 0)); 3880 #else 3881 __ push(len_reg); // Save 3882 __ movptr(used_addr, used_mem); 3883 __ movl(used, Address(used_addr, 0)); 3884 #endif 3885 3886 __ push(rbx); // Save RBX 3887 __ movdqu(xmm_curr_counter, Address(counter, 0x00)); // initialize counter with initial counter 3888 __ movdqu(xmm_counter_shuf_mask, ExternalAddress(StubRoutines::x86::counter_shuffle_mask_addr()), pos); // pos as scratch 3889 __ pshufb(xmm_curr_counter, xmm_counter_shuf_mask); //counter is shuffled 3890 __ movptr(pos, 0); 3891 3892 // Use the partially used encrpyted counter from last invocation 3893 __ BIND(L_preLoop_start); 3894 __ cmpptr(used, 16); 3895 __ jcc(Assembler::aboveEqual, L_exit_preLoop); 3896 __ cmpptr(len_reg, 0); 3897 __ jcc(Assembler::lessEqual, L_exit_preLoop); 3898 __ movb(rbx, Address(saved_encCounter_start, used)); 3899 __ xorb(rbx, Address(from, pos)); 3900 __ movb(Address(to, pos), rbx); 3901 __ addptr(pos, 1); 3902 __ addptr(used, 1); 3903 __ subptr(len_reg, 1); 3904 3905 __ jmp(L_preLoop_start); 3906 3907 __ BIND(L_exit_preLoop); 3908 __ movl(Address(used_addr, 0), used); 3909 3910 // key length could be only {11, 13, 15} * 4 = {44, 52, 60} 3911 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()), rbx); // rbx as scratch 3912 __ movl(rbx, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3913 __ cmpl(rbx, 52); 3914 __ jcc(Assembler::equal, L_multiBlock_loopTop[1]); 3915 __ cmpl(rbx, 60); 3916 __ jcc(Assembler::equal, L_multiBlock_loopTop[2]); 3917 3918 #define CTR_DoSix(opc, src_reg) \ 3919 __ opc(xmm_result0, src_reg); \ 3920 __ opc(xmm_result1, src_reg); \ 3921 __ opc(xmm_result2, src_reg); \ 3922 __ opc(xmm_result3, src_reg); \ 3923 __ opc(xmm_result4, src_reg); \ 3924 __ opc(xmm_result5, src_reg); 3925 3926 // k == 0 : generate code for key_128 3927 // k == 1 : generate code for key_192 3928 // k == 2 : generate code for key_256 3929 for (int k = 0; k < 3; ++k) { 3930 //multi blocks starts here 3931 __ align(OptoLoopAlignment); 3932 __ BIND(L_multiBlock_loopTop[k]); 3933 __ cmpptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // see if at least PARALLEL_FACTOR blocks left 3934 __ jcc(Assembler::less, L_singleBlockLoopTop[k]); 3935 load_key(xmm_key_tmp0, key, 0x00, xmm_key_shuf_mask); 3936 3937 //load, then increase counters 3938 CTR_DoSix(movdqa, xmm_curr_counter); 3939 inc_counter(rbx, xmm_result1, 0x01, L__incCounter[k][0]); 3940 inc_counter(rbx, xmm_result2, 0x02, L__incCounter[k][1]); 3941 inc_counter(rbx, xmm_result3, 0x03, L__incCounter[k][2]); 3942 inc_counter(rbx, xmm_result4, 0x04, L__incCounter[k][3]); 3943 inc_counter(rbx, xmm_result5, 0x05, L__incCounter[k][4]); 3944 inc_counter(rbx, xmm_curr_counter, 0x06, L__incCounter[k][5]); 3945 CTR_DoSix(pshufb, xmm_counter_shuf_mask); // after increased, shuffled counters back for PXOR 3946 CTR_DoSix(pxor, xmm_key_tmp0); //PXOR with Round 0 key 3947 3948 //load two ROUND_KEYs at a time 3949 for (int i = 1; i < rounds[k]; ) { 3950 load_key(xmm_key_tmp1, key, (0x10 * i), xmm_key_shuf_mask); 3951 load_key(xmm_key_tmp0, key, (0x10 * (i+1)), xmm_key_shuf_mask); 3952 CTR_DoSix(aesenc, xmm_key_tmp1); 3953 i++; 3954 if (i != rounds[k]) { 3955 CTR_DoSix(aesenc, xmm_key_tmp0); 3956 } else { 3957 CTR_DoSix(aesenclast, xmm_key_tmp0); 3958 } 3959 i++; 3960 } 3961 3962 // get next PARALLEL_FACTOR blocks into xmm_result registers 3963 __ movdqu(xmm_from0, Address(from, pos, Address::times_1, 0 * AESBlockSize)); 3964 __ movdqu(xmm_from1, Address(from, pos, Address::times_1, 1 * AESBlockSize)); 3965 __ movdqu(xmm_from2, Address(from, pos, Address::times_1, 2 * AESBlockSize)); 3966 __ movdqu(xmm_from3, Address(from, pos, Address::times_1, 3 * AESBlockSize)); 3967 __ movdqu(xmm_from4, Address(from, pos, Address::times_1, 4 * AESBlockSize)); 3968 __ movdqu(xmm_from5, Address(from, pos, Address::times_1, 5 * AESBlockSize)); 3969 3970 __ pxor(xmm_result0, xmm_from0); 3971 __ pxor(xmm_result1, xmm_from1); 3972 __ pxor(xmm_result2, xmm_from2); 3973 __ pxor(xmm_result3, xmm_from3); 3974 __ pxor(xmm_result4, xmm_from4); 3975 __ pxor(xmm_result5, xmm_from5); 3976 3977 // store 6 results into the next 64 bytes of output 3978 __ movdqu(Address(to, pos, Address::times_1, 0 * AESBlockSize), xmm_result0); 3979 __ movdqu(Address(to, pos, Address::times_1, 1 * AESBlockSize), xmm_result1); 3980 __ movdqu(Address(to, pos, Address::times_1, 2 * AESBlockSize), xmm_result2); 3981 __ movdqu(Address(to, pos, Address::times_1, 3 * AESBlockSize), xmm_result3); 3982 __ movdqu(Address(to, pos, Address::times_1, 4 * AESBlockSize), xmm_result4); 3983 __ movdqu(Address(to, pos, Address::times_1, 5 * AESBlockSize), xmm_result5); 3984 3985 __ addptr(pos, PARALLEL_FACTOR * AESBlockSize); // increase the length of crypt text 3986 __ subptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // decrease the remaining length 3987 __ jmp(L_multiBlock_loopTop[k]); 3988 3989 // singleBlock starts here 3990 __ align(OptoLoopAlignment); 3991 __ BIND(L_singleBlockLoopTop[k]); 3992 __ cmpptr(len_reg, 0); 3993 __ jcc(Assembler::lessEqual, L_exit); 3994 load_key(xmm_key_tmp0, key, 0x00, xmm_key_shuf_mask); 3995 __ movdqa(xmm_result0, xmm_curr_counter); 3996 inc_counter(rbx, xmm_curr_counter, 0x01, L__incCounter_single[k]); 3997 __ pshufb(xmm_result0, xmm_counter_shuf_mask); 3998 __ pxor(xmm_result0, xmm_key_tmp0); 3999 for (int i = 1; i < rounds[k]; i++) { 4000 load_key(xmm_key_tmp0, key, (0x10 * i), xmm_key_shuf_mask); 4001 __ aesenc(xmm_result0, xmm_key_tmp0); 4002 } 4003 load_key(xmm_key_tmp0, key, (rounds[k] * 0x10), xmm_key_shuf_mask); 4004 __ aesenclast(xmm_result0, xmm_key_tmp0); 4005 __ cmpptr(len_reg, AESBlockSize); 4006 __ jcc(Assembler::less, L_processTail_insr[k]); 4007 __ movdqu(xmm_from0, Address(from, pos, Address::times_1, 0 * AESBlockSize)); 4008 __ pxor(xmm_result0, xmm_from0); 4009 __ movdqu(Address(to, pos, Address::times_1, 0 * AESBlockSize), xmm_result0); 4010 __ addptr(pos, AESBlockSize); 4011 __ subptr(len_reg, AESBlockSize); 4012 __ jmp(L_singleBlockLoopTop[k]); 4013 __ BIND(L_processTail_insr[k]); // Process the tail part of the input array 4014 __ addptr(pos, len_reg); // 1. Insert bytes from src array into xmm_from0 register 4015 __ testptr(len_reg, 8); 4016 __ jcc(Assembler::zero, L_processTail_4_insr[k]); 4017 __ subptr(pos,8); 4018 __ pinsrq(xmm_from0, Address(from, pos), 0); 4019 __ BIND(L_processTail_4_insr[k]); 4020 __ testptr(len_reg, 4); 4021 __ jcc(Assembler::zero, L_processTail_2_insr[k]); 4022 __ subptr(pos,4); 4023 __ pslldq(xmm_from0, 4); 4024 __ pinsrd(xmm_from0, Address(from, pos), 0); 4025 __ BIND(L_processTail_2_insr[k]); 4026 __ testptr(len_reg, 2); 4027 __ jcc(Assembler::zero, L_processTail_1_insr[k]); 4028 __ subptr(pos, 2); 4029 __ pslldq(xmm_from0, 2); 4030 __ pinsrw(xmm_from0, Address(from, pos), 0); 4031 __ BIND(L_processTail_1_insr[k]); 4032 __ testptr(len_reg, 1); 4033 __ jcc(Assembler::zero, L_processTail_exit_insr[k]); 4034 __ subptr(pos, 1); 4035 __ pslldq(xmm_from0, 1); 4036 __ pinsrb(xmm_from0, Address(from, pos), 0); 4037 __ BIND(L_processTail_exit_insr[k]); 4038 4039 __ movdqu(Address(saved_encCounter_start, 0), xmm_result0); // 2. Perform pxor of the encrypted counter and plaintext Bytes. 4040 __ pxor(xmm_result0, xmm_from0); // Also the encrypted counter is saved for next invocation. 4041 4042 __ testptr(len_reg, 8); 4043 __ jcc(Assembler::zero, L_processTail_4_extr[k]); // 3. Extract bytes from xmm_result0 into the dest. array 4044 __ pextrq(Address(to, pos), xmm_result0, 0); 4045 __ psrldq(xmm_result0, 8); 4046 __ addptr(pos, 8); 4047 __ BIND(L_processTail_4_extr[k]); 4048 __ testptr(len_reg, 4); 4049 __ jcc(Assembler::zero, L_processTail_2_extr[k]); 4050 __ pextrd(Address(to, pos), xmm_result0, 0); 4051 __ psrldq(xmm_result0, 4); 4052 __ addptr(pos, 4); 4053 __ BIND(L_processTail_2_extr[k]); 4054 __ testptr(len_reg, 2); 4055 __ jcc(Assembler::zero, L_processTail_1_extr[k]); 4056 __ pextrw(Address(to, pos), xmm_result0, 0); 4057 __ psrldq(xmm_result0, 2); 4058 __ addptr(pos, 2); 4059 __ BIND(L_processTail_1_extr[k]); 4060 __ testptr(len_reg, 1); 4061 __ jcc(Assembler::zero, L_processTail_exit_extr[k]); 4062 __ pextrb(Address(to, pos), xmm_result0, 0); 4063 4064 __ BIND(L_processTail_exit_extr[k]); 4065 __ movl(Address(used_addr, 0), len_reg); 4066 __ jmp(L_exit); 4067 4068 } 4069 4070 __ BIND(L_exit); 4071 __ pshufb(xmm_curr_counter, xmm_counter_shuf_mask); //counter is shuffled back. 4072 __ movdqu(Address(counter, 0), xmm_curr_counter); //save counter back 4073 __ pop(rbx); // pop the saved RBX. 4074 #ifdef _WIN64 4075 __ movl(rax, len_mem); 4076 __ movptr(r13, Address(rsp, saved_r13_offset * wordSize)); 4077 __ movptr(r14, Address(rsp, saved_r14_offset * wordSize)); 4078 __ addptr(rsp, 2 * wordSize); 4079 #else 4080 __ pop(rax); // return 'len' 4081 #endif 4082 __ leave(); // required for proper stackwalking of RuntimeStub frame 4083 __ ret(0); 4084 return start; 4085 } 4086 4087 // byte swap x86 long 4088 address generate_ghash_long_swap_mask() { 4089 __ align(CodeEntryAlignment); 4090 StubCodeMark mark(this, "StubRoutines", "ghash_long_swap_mask"); 4091 address start = __ pc(); 4092 __ emit_data64(0x0f0e0d0c0b0a0908, relocInfo::none ); 4093 __ emit_data64(0x0706050403020100, relocInfo::none ); 4094 return start; 4095 } 4096 4097 // byte swap x86 byte array 4098 address generate_ghash_byte_swap_mask() { 4099 __ align(CodeEntryAlignment); 4100 StubCodeMark mark(this, "StubRoutines", "ghash_byte_swap_mask"); 4101 address start = __ pc(); 4102 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none ); 4103 __ emit_data64(0x0001020304050607, relocInfo::none ); 4104 return start; 4105 } 4106 4107 /* Single and multi-block ghash operations */ 4108 address generate_ghash_processBlocks() { 4109 __ align(CodeEntryAlignment); 4110 Label L_ghash_loop, L_exit; 4111 StubCodeMark mark(this, "StubRoutines", "ghash_processBlocks"); 4112 address start = __ pc(); 4113 4114 const Register state = c_rarg0; 4115 const Register subkeyH = c_rarg1; 4116 const Register data = c_rarg2; 4117 const Register blocks = c_rarg3; 4118 4119 const XMMRegister xmm_temp0 = xmm0; 4120 const XMMRegister xmm_temp1 = xmm1; 4121 const XMMRegister xmm_temp2 = xmm2; 4122 const XMMRegister xmm_temp3 = xmm3; 4123 const XMMRegister xmm_temp4 = xmm4; 4124 const XMMRegister xmm_temp5 = xmm5; 4125 const XMMRegister xmm_temp6 = xmm6; 4126 const XMMRegister xmm_temp7 = xmm7; 4127 const XMMRegister xmm_temp8 = xmm8; 4128 const XMMRegister xmm_temp9 = xmm9; 4129 const XMMRegister xmm_temp10 = xmm10; 4130 4131 __ enter(); 4132 4133 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 4134 // context for the registers used, where all instructions below are using 128-bit mode 4135 // On EVEX without VL and BW, these instructions will all be AVX. 4136 if (VM_Version::supports_avx512vlbw()) { 4137 __ movl(rax, 0xffff); 4138 __ kmovql(k1, rax); 4139 } 4140 4141 __ movdqu(xmm_temp10, ExternalAddress(StubRoutines::x86::ghash_long_swap_mask_addr())); 4142 4143 __ movdqu(xmm_temp0, Address(state, 0)); 4144 __ pshufb(xmm_temp0, xmm_temp10); 4145 4146 4147 __ BIND(L_ghash_loop); 4148 __ movdqu(xmm_temp2, Address(data, 0)); 4149 __ pshufb(xmm_temp2, ExternalAddress(StubRoutines::x86::ghash_byte_swap_mask_addr())); 4150 4151 __ movdqu(xmm_temp1, Address(subkeyH, 0)); 4152 __ pshufb(xmm_temp1, xmm_temp10); 4153 4154 __ pxor(xmm_temp0, xmm_temp2); 4155 4156 // 4157 // Multiply with the hash key 4158 // 4159 __ movdqu(xmm_temp3, xmm_temp0); 4160 __ pclmulqdq(xmm_temp3, xmm_temp1, 0); // xmm3 holds a0*b0 4161 __ movdqu(xmm_temp4, xmm_temp0); 4162 __ pclmulqdq(xmm_temp4, xmm_temp1, 16); // xmm4 holds a0*b1 4163 4164 __ movdqu(xmm_temp5, xmm_temp0); 4165 __ pclmulqdq(xmm_temp5, xmm_temp1, 1); // xmm5 holds a1*b0 4166 __ movdqu(xmm_temp6, xmm_temp0); 4167 __ pclmulqdq(xmm_temp6, xmm_temp1, 17); // xmm6 holds a1*b1 4168 4169 __ pxor(xmm_temp4, xmm_temp5); // xmm4 holds a0*b1 + a1*b0 4170 4171 __ movdqu(xmm_temp5, xmm_temp4); // move the contents of xmm4 to xmm5 4172 __ psrldq(xmm_temp4, 8); // shift by xmm4 64 bits to the right 4173 __ pslldq(xmm_temp5, 8); // shift by xmm5 64 bits to the left 4174 __ pxor(xmm_temp3, xmm_temp5); 4175 __ pxor(xmm_temp6, xmm_temp4); // Register pair <xmm6:xmm3> holds the result 4176 // of the carry-less multiplication of 4177 // xmm0 by xmm1. 4178 4179 // We shift the result of the multiplication by one bit position 4180 // to the left to cope for the fact that the bits are reversed. 4181 __ movdqu(xmm_temp7, xmm_temp3); 4182 __ movdqu(xmm_temp8, xmm_temp6); 4183 __ pslld(xmm_temp3, 1); 4184 __ pslld(xmm_temp6, 1); 4185 __ psrld(xmm_temp7, 31); 4186 __ psrld(xmm_temp8, 31); 4187 __ movdqu(xmm_temp9, xmm_temp7); 4188 __ pslldq(xmm_temp8, 4); 4189 __ pslldq(xmm_temp7, 4); 4190 __ psrldq(xmm_temp9, 12); 4191 __ por(xmm_temp3, xmm_temp7); 4192 __ por(xmm_temp6, xmm_temp8); 4193 __ por(xmm_temp6, xmm_temp9); 4194 4195 // 4196 // First phase of the reduction 4197 // 4198 // Move xmm3 into xmm7, xmm8, xmm9 in order to perform the shifts 4199 // independently. 4200 __ movdqu(xmm_temp7, xmm_temp3); 4201 __ movdqu(xmm_temp8, xmm_temp3); 4202 __ movdqu(xmm_temp9, xmm_temp3); 4203 __ pslld(xmm_temp7, 31); // packed right shift shifting << 31 4204 __ pslld(xmm_temp8, 30); // packed right shift shifting << 30 4205 __ pslld(xmm_temp9, 25); // packed right shift shifting << 25 4206 __ pxor(xmm_temp7, xmm_temp8); // xor the shifted versions 4207 __ pxor(xmm_temp7, xmm_temp9); 4208 __ movdqu(xmm_temp8, xmm_temp7); 4209 __ pslldq(xmm_temp7, 12); 4210 __ psrldq(xmm_temp8, 4); 4211 __ pxor(xmm_temp3, xmm_temp7); // first phase of the reduction complete 4212 4213 // 4214 // Second phase of the reduction 4215 // 4216 // Make 3 copies of xmm3 in xmm2, xmm4, xmm5 for doing these 4217 // shift operations. 4218 __ movdqu(xmm_temp2, xmm_temp3); 4219 __ movdqu(xmm_temp4, xmm_temp3); 4220 __ movdqu(xmm_temp5, xmm_temp3); 4221 __ psrld(xmm_temp2, 1); // packed left shifting >> 1 4222 __ psrld(xmm_temp4, 2); // packed left shifting >> 2 4223 __ psrld(xmm_temp5, 7); // packed left shifting >> 7 4224 __ pxor(xmm_temp2, xmm_temp4); // xor the shifted versions 4225 __ pxor(xmm_temp2, xmm_temp5); 4226 __ pxor(xmm_temp2, xmm_temp8); 4227 __ pxor(xmm_temp3, xmm_temp2); 4228 __ pxor(xmm_temp6, xmm_temp3); // the result is in xmm6 4229 4230 __ decrement(blocks); 4231 __ jcc(Assembler::zero, L_exit); 4232 __ movdqu(xmm_temp0, xmm_temp6); 4233 __ addptr(data, 16); 4234 __ jmp(L_ghash_loop); 4235 4236 __ BIND(L_exit); 4237 __ pshufb(xmm_temp6, xmm_temp10); // Byte swap 16-byte result 4238 __ movdqu(Address(state, 0), xmm_temp6); // store the result 4239 __ leave(); 4240 __ ret(0); 4241 return start; 4242 } 4243 4244 /** 4245 * Arguments: 4246 * 4247 * Inputs: 4248 * c_rarg0 - int crc 4249 * c_rarg1 - byte* buf 4250 * c_rarg2 - int length 4251 * 4252 * Ouput: 4253 * rax - int crc result 4254 */ 4255 address generate_updateBytesCRC32() { 4256 assert(UseCRC32Intrinsics, "need AVX and CLMUL instructions"); 4257 4258 __ align(CodeEntryAlignment); 4259 StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32"); 4260 4261 address start = __ pc(); 4262 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 4263 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 4264 // rscratch1: r10 4265 const Register crc = c_rarg0; // crc 4266 const Register buf = c_rarg1; // source java byte array address 4267 const Register len = c_rarg2; // length 4268 const Register table = c_rarg3; // crc_table address (reuse register) 4269 const Register tmp = r11; 4270 assert_different_registers(crc, buf, len, table, tmp, rax); 4271 4272 BLOCK_COMMENT("Entry:"); 4273 __ enter(); // required for proper stackwalking of RuntimeStub frame 4274 4275 __ kernel_crc32(crc, buf, len, table, tmp); 4276 4277 __ movl(rax, crc); 4278 __ vzeroupper(); 4279 __ leave(); // required for proper stackwalking of RuntimeStub frame 4280 __ ret(0); 4281 4282 return start; 4283 } 4284 4285 /** 4286 * Arguments: 4287 * 4288 * Inputs: 4289 * c_rarg0 - int crc 4290 * c_rarg1 - byte* buf 4291 * c_rarg2 - long length 4292 * c_rarg3 - table_start - optional (present only when doing a library_call, 4293 * not used by x86 algorithm) 4294 * 4295 * Ouput: 4296 * rax - int crc result 4297 */ 4298 address generate_updateBytesCRC32C(bool is_pclmulqdq_supported) { 4299 assert(UseCRC32CIntrinsics, "need SSE4_2"); 4300 __ align(CodeEntryAlignment); 4301 StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32C"); 4302 address start = __ pc(); 4303 //reg.arg int#0 int#1 int#2 int#3 int#4 int#5 float regs 4304 //Windows RCX RDX R8 R9 none none XMM0..XMM3 4305 //Lin / Sol RDI RSI RDX RCX R8 R9 XMM0..XMM7 4306 const Register crc = c_rarg0; // crc 4307 const Register buf = c_rarg1; // source java byte array address 4308 const Register len = c_rarg2; // length 4309 const Register a = rax; 4310 const Register j = r9; 4311 const Register k = r10; 4312 const Register l = r11; 4313 #ifdef _WIN64 4314 const Register y = rdi; 4315 const Register z = rsi; 4316 #else 4317 const Register y = rcx; 4318 const Register z = r8; 4319 #endif 4320 assert_different_registers(crc, buf, len, a, j, k, l, y, z); 4321 4322 BLOCK_COMMENT("Entry:"); 4323 __ enter(); // required for proper stackwalking of RuntimeStub frame 4324 #ifdef _WIN64 4325 __ push(y); 4326 __ push(z); 4327 #endif 4328 __ crc32c_ipl_alg2_alt2(crc, buf, len, 4329 a, j, k, 4330 l, y, z, 4331 c_farg0, c_farg1, c_farg2, 4332 is_pclmulqdq_supported); 4333 __ movl(rax, crc); 4334 #ifdef _WIN64 4335 __ pop(z); 4336 __ pop(y); 4337 #endif 4338 __ vzeroupper(); 4339 __ leave(); // required for proper stackwalking of RuntimeStub frame 4340 __ ret(0); 4341 4342 return start; 4343 } 4344 4345 /** 4346 * Arguments: 4347 * 4348 * Input: 4349 * c_rarg0 - x address 4350 * c_rarg1 - x length 4351 * c_rarg2 - y address 4352 * c_rarg3 - y length 4353 * not Win64 4354 * c_rarg4 - z address 4355 * c_rarg5 - z length 4356 * Win64 4357 * rsp+40 - z address 4358 * rsp+48 - z length 4359 */ 4360 address generate_multiplyToLen() { 4361 __ align(CodeEntryAlignment); 4362 StubCodeMark mark(this, "StubRoutines", "multiplyToLen"); 4363 4364 address start = __ pc(); 4365 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 4366 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 4367 const Register x = rdi; 4368 const Register xlen = rax; 4369 const Register y = rsi; 4370 const Register ylen = rcx; 4371 const Register z = r8; 4372 const Register zlen = r11; 4373 4374 // Next registers will be saved on stack in multiply_to_len(). 4375 const Register tmp1 = r12; 4376 const Register tmp2 = r13; 4377 const Register tmp3 = r14; 4378 const Register tmp4 = r15; 4379 const Register tmp5 = rbx; 4380 4381 BLOCK_COMMENT("Entry:"); 4382 __ enter(); // required for proper stackwalking of RuntimeStub frame 4383 4384 #ifndef _WIN64 4385 __ movptr(zlen, r9); // Save r9 in r11 - zlen 4386 #endif 4387 setup_arg_regs(4); // x => rdi, xlen => rsi, y => rdx 4388 // ylen => rcx, z => r8, zlen => r11 4389 // r9 and r10 may be used to save non-volatile registers 4390 #ifdef _WIN64 4391 // last 2 arguments (#4, #5) are on stack on Win64 4392 __ movptr(z, Address(rsp, 6 * wordSize)); 4393 __ movptr(zlen, Address(rsp, 7 * wordSize)); 4394 #endif 4395 4396 __ movptr(xlen, rsi); 4397 __ movptr(y, rdx); 4398 __ multiply_to_len(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5); 4399 4400 restore_arg_regs(); 4401 4402 __ leave(); // required for proper stackwalking of RuntimeStub frame 4403 __ ret(0); 4404 4405 return start; 4406 } 4407 4408 /** 4409 * Arguments: 4410 * 4411 * Input: 4412 * c_rarg0 - obja address 4413 * c_rarg1 - objb address 4414 * c_rarg3 - length length 4415 * c_rarg4 - scale log2_array_indxscale 4416 * 4417 * Output: 4418 * rax - int >= mismatched index, < 0 bitwise complement of tail 4419 */ 4420 address generate_vectorizedMismatch() { 4421 __ align(CodeEntryAlignment); 4422 StubCodeMark mark(this, "StubRoutines", "vectorizedMismatch"); 4423 address start = __ pc(); 4424 4425 BLOCK_COMMENT("Entry:"); 4426 __ enter(); 4427 4428 #ifdef _WIN64 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 4429 const Register scale = c_rarg0; //rcx, will exchange with r9 4430 const Register objb = c_rarg1; //rdx 4431 const Register length = c_rarg2; //r8 4432 const Register obja = c_rarg3; //r9 4433 __ xchgq(obja, scale); //now obja and scale contains the correct contents 4434 4435 const Register tmp1 = r10; 4436 const Register tmp2 = r11; 4437 #endif 4438 #ifndef _WIN64 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 4439 const Register obja = c_rarg0; //U:rdi 4440 const Register objb = c_rarg1; //U:rsi 4441 const Register length = c_rarg2; //U:rdx 4442 const Register scale = c_rarg3; //U:rcx 4443 const Register tmp1 = r8; 4444 const Register tmp2 = r9; 4445 #endif 4446 const Register result = rax; //return value 4447 const XMMRegister vec0 = xmm0; 4448 const XMMRegister vec1 = xmm1; 4449 const XMMRegister vec2 = xmm2; 4450 4451 __ vectorized_mismatch(obja, objb, length, scale, result, tmp1, tmp2, vec0, vec1, vec2); 4452 4453 __ vzeroupper(); 4454 __ leave(); 4455 __ ret(0); 4456 4457 return start; 4458 } 4459 4460 /** 4461 * Arguments: 4462 * 4463 // Input: 4464 // c_rarg0 - x address 4465 // c_rarg1 - x length 4466 // c_rarg2 - z address 4467 // c_rarg3 - z lenth 4468 * 4469 */ 4470 address generate_squareToLen() { 4471 4472 __ align(CodeEntryAlignment); 4473 StubCodeMark mark(this, "StubRoutines", "squareToLen"); 4474 4475 address start = __ pc(); 4476 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 4477 // Unix: rdi, rsi, rdx, rcx (c_rarg0, c_rarg1, ...) 4478 const Register x = rdi; 4479 const Register len = rsi; 4480 const Register z = r8; 4481 const Register zlen = rcx; 4482 4483 const Register tmp1 = r12; 4484 const Register tmp2 = r13; 4485 const Register tmp3 = r14; 4486 const Register tmp4 = r15; 4487 const Register tmp5 = rbx; 4488 4489 BLOCK_COMMENT("Entry:"); 4490 __ enter(); // required for proper stackwalking of RuntimeStub frame 4491 4492 setup_arg_regs(4); // x => rdi, len => rsi, z => rdx 4493 // zlen => rcx 4494 // r9 and r10 may be used to save non-volatile registers 4495 __ movptr(r8, rdx); 4496 __ square_to_len(x, len, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5, rdx, rax); 4497 4498 restore_arg_regs(); 4499 4500 __ leave(); // required for proper stackwalking of RuntimeStub frame 4501 __ ret(0); 4502 4503 return start; 4504 } 4505 4506 /** 4507 * Arguments: 4508 * 4509 * Input: 4510 * c_rarg0 - out address 4511 * c_rarg1 - in address 4512 * c_rarg2 - offset 4513 * c_rarg3 - len 4514 * not Win64 4515 * c_rarg4 - k 4516 * Win64 4517 * rsp+40 - k 4518 */ 4519 address generate_mulAdd() { 4520 __ align(CodeEntryAlignment); 4521 StubCodeMark mark(this, "StubRoutines", "mulAdd"); 4522 4523 address start = __ pc(); 4524 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 4525 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 4526 const Register out = rdi; 4527 const Register in = rsi; 4528 const Register offset = r11; 4529 const Register len = rcx; 4530 const Register k = r8; 4531 4532 // Next registers will be saved on stack in mul_add(). 4533 const Register tmp1 = r12; 4534 const Register tmp2 = r13; 4535 const Register tmp3 = r14; 4536 const Register tmp4 = r15; 4537 const Register tmp5 = rbx; 4538 4539 BLOCK_COMMENT("Entry:"); 4540 __ enter(); // required for proper stackwalking of RuntimeStub frame 4541 4542 setup_arg_regs(4); // out => rdi, in => rsi, offset => rdx 4543 // len => rcx, k => r8 4544 // r9 and r10 may be used to save non-volatile registers 4545 #ifdef _WIN64 4546 // last argument is on stack on Win64 4547 __ movl(k, Address(rsp, 6 * wordSize)); 4548 #endif 4549 __ movptr(r11, rdx); // move offset in rdx to offset(r11) 4550 __ mul_add(out, in, offset, len, k, tmp1, tmp2, tmp3, tmp4, tmp5, rdx, rax); 4551 4552 restore_arg_regs(); 4553 4554 __ leave(); // required for proper stackwalking of RuntimeStub frame 4555 __ ret(0); 4556 4557 return start; 4558 } 4559 4560 address generate_libmExp() { 4561 StubCodeMark mark(this, "StubRoutines", "libmExp"); 4562 4563 address start = __ pc(); 4564 4565 const XMMRegister x0 = xmm0; 4566 const XMMRegister x1 = xmm1; 4567 const XMMRegister x2 = xmm2; 4568 const XMMRegister x3 = xmm3; 4569 4570 const XMMRegister x4 = xmm4; 4571 const XMMRegister x5 = xmm5; 4572 const XMMRegister x6 = xmm6; 4573 const XMMRegister x7 = xmm7; 4574 4575 const Register tmp = r11; 4576 4577 BLOCK_COMMENT("Entry:"); 4578 __ enter(); // required for proper stackwalking of RuntimeStub frame 4579 4580 __ fast_exp(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp); 4581 4582 __ leave(); // required for proper stackwalking of RuntimeStub frame 4583 __ ret(0); 4584 4585 return start; 4586 4587 } 4588 4589 address generate_libmLog() { 4590 StubCodeMark mark(this, "StubRoutines", "libmLog"); 4591 4592 address start = __ pc(); 4593 4594 const XMMRegister x0 = xmm0; 4595 const XMMRegister x1 = xmm1; 4596 const XMMRegister x2 = xmm2; 4597 const XMMRegister x3 = xmm3; 4598 4599 const XMMRegister x4 = xmm4; 4600 const XMMRegister x5 = xmm5; 4601 const XMMRegister x6 = xmm6; 4602 const XMMRegister x7 = xmm7; 4603 4604 const Register tmp1 = r11; 4605 const Register tmp2 = r8; 4606 4607 BLOCK_COMMENT("Entry:"); 4608 __ enter(); // required for proper stackwalking of RuntimeStub frame 4609 4610 __ fast_log(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2); 4611 4612 __ leave(); // required for proper stackwalking of RuntimeStub frame 4613 __ ret(0); 4614 4615 return start; 4616 4617 } 4618 4619 address generate_libmLog10() { 4620 StubCodeMark mark(this, "StubRoutines", "libmLog10"); 4621 4622 address start = __ pc(); 4623 4624 const XMMRegister x0 = xmm0; 4625 const XMMRegister x1 = xmm1; 4626 const XMMRegister x2 = xmm2; 4627 const XMMRegister x3 = xmm3; 4628 4629 const XMMRegister x4 = xmm4; 4630 const XMMRegister x5 = xmm5; 4631 const XMMRegister x6 = xmm6; 4632 const XMMRegister x7 = xmm7; 4633 4634 const Register tmp = r11; 4635 4636 BLOCK_COMMENT("Entry:"); 4637 __ enter(); // required for proper stackwalking of RuntimeStub frame 4638 4639 __ fast_log10(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp); 4640 4641 __ leave(); // required for proper stackwalking of RuntimeStub frame 4642 __ ret(0); 4643 4644 return start; 4645 4646 } 4647 4648 address generate_libmPow() { 4649 StubCodeMark mark(this, "StubRoutines", "libmPow"); 4650 4651 address start = __ pc(); 4652 4653 const XMMRegister x0 = xmm0; 4654 const XMMRegister x1 = xmm1; 4655 const XMMRegister x2 = xmm2; 4656 const XMMRegister x3 = xmm3; 4657 4658 const XMMRegister x4 = xmm4; 4659 const XMMRegister x5 = xmm5; 4660 const XMMRegister x6 = xmm6; 4661 const XMMRegister x7 = xmm7; 4662 4663 const Register tmp1 = r8; 4664 const Register tmp2 = r9; 4665 const Register tmp3 = r10; 4666 const Register tmp4 = r11; 4667 4668 BLOCK_COMMENT("Entry:"); 4669 __ enter(); // required for proper stackwalking of RuntimeStub frame 4670 4671 __ fast_pow(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 4672 4673 __ leave(); // required for proper stackwalking of RuntimeStub frame 4674 __ ret(0); 4675 4676 return start; 4677 4678 } 4679 4680 address generate_libmSin() { 4681 StubCodeMark mark(this, "StubRoutines", "libmSin"); 4682 4683 address start = __ pc(); 4684 4685 const XMMRegister x0 = xmm0; 4686 const XMMRegister x1 = xmm1; 4687 const XMMRegister x2 = xmm2; 4688 const XMMRegister x3 = xmm3; 4689 4690 const XMMRegister x4 = xmm4; 4691 const XMMRegister x5 = xmm5; 4692 const XMMRegister x6 = xmm6; 4693 const XMMRegister x7 = xmm7; 4694 4695 const Register tmp1 = r8; 4696 const Register tmp2 = r9; 4697 const Register tmp3 = r10; 4698 const Register tmp4 = r11; 4699 4700 BLOCK_COMMENT("Entry:"); 4701 __ enter(); // required for proper stackwalking of RuntimeStub frame 4702 4703 #ifdef _WIN64 4704 __ push(rsi); 4705 __ push(rdi); 4706 #endif 4707 __ fast_sin(x0, x1, x2, x3, x4, x5, x6, x7, rax, rbx, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 4708 4709 #ifdef _WIN64 4710 __ pop(rdi); 4711 __ pop(rsi); 4712 #endif 4713 4714 __ leave(); // required for proper stackwalking of RuntimeStub frame 4715 __ ret(0); 4716 4717 return start; 4718 4719 } 4720 4721 address generate_libmCos() { 4722 StubCodeMark mark(this, "StubRoutines", "libmCos"); 4723 4724 address start = __ pc(); 4725 4726 const XMMRegister x0 = xmm0; 4727 const XMMRegister x1 = xmm1; 4728 const XMMRegister x2 = xmm2; 4729 const XMMRegister x3 = xmm3; 4730 4731 const XMMRegister x4 = xmm4; 4732 const XMMRegister x5 = xmm5; 4733 const XMMRegister x6 = xmm6; 4734 const XMMRegister x7 = xmm7; 4735 4736 const Register tmp1 = r8; 4737 const Register tmp2 = r9; 4738 const Register tmp3 = r10; 4739 const Register tmp4 = r11; 4740 4741 BLOCK_COMMENT("Entry:"); 4742 __ enter(); // required for proper stackwalking of RuntimeStub frame 4743 4744 #ifdef _WIN64 4745 __ push(rsi); 4746 __ push(rdi); 4747 #endif 4748 __ fast_cos(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 4749 4750 #ifdef _WIN64 4751 __ pop(rdi); 4752 __ pop(rsi); 4753 #endif 4754 4755 __ leave(); // required for proper stackwalking of RuntimeStub frame 4756 __ ret(0); 4757 4758 return start; 4759 4760 } 4761 4762 address generate_libmTan() { 4763 StubCodeMark mark(this, "StubRoutines", "libmTan"); 4764 4765 address start = __ pc(); 4766 4767 const XMMRegister x0 = xmm0; 4768 const XMMRegister x1 = xmm1; 4769 const XMMRegister x2 = xmm2; 4770 const XMMRegister x3 = xmm3; 4771 4772 const XMMRegister x4 = xmm4; 4773 const XMMRegister x5 = xmm5; 4774 const XMMRegister x6 = xmm6; 4775 const XMMRegister x7 = xmm7; 4776 4777 const Register tmp1 = r8; 4778 const Register tmp2 = r9; 4779 const Register tmp3 = r10; 4780 const Register tmp4 = r11; 4781 4782 BLOCK_COMMENT("Entry:"); 4783 __ enter(); // required for proper stackwalking of RuntimeStub frame 4784 4785 #ifdef _WIN64 4786 __ push(rsi); 4787 __ push(rdi); 4788 #endif 4789 __ fast_tan(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 4790 4791 #ifdef _WIN64 4792 __ pop(rdi); 4793 __ pop(rsi); 4794 #endif 4795 4796 __ leave(); // required for proper stackwalking of RuntimeStub frame 4797 __ ret(0); 4798 4799 return start; 4800 4801 } 4802 4803 #undef __ 4804 #define __ masm-> 4805 4806 // Continuation point for throwing of implicit exceptions that are 4807 // not handled in the current activation. Fabricates an exception 4808 // oop and initiates normal exception dispatching in this 4809 // frame. Since we need to preserve callee-saved values (currently 4810 // only for C2, but done for C1 as well) we need a callee-saved oop 4811 // map and therefore have to make these stubs into RuntimeStubs 4812 // rather than BufferBlobs. If the compiler needs all registers to 4813 // be preserved between the fault point and the exception handler 4814 // then it must assume responsibility for that in 4815 // AbstractCompiler::continuation_for_implicit_null_exception or 4816 // continuation_for_implicit_division_by_zero_exception. All other 4817 // implicit exceptions (e.g., NullPointerException or 4818 // AbstractMethodError on entry) are either at call sites or 4819 // otherwise assume that stack unwinding will be initiated, so 4820 // caller saved registers were assumed volatile in the compiler. 4821 address generate_throw_exception(const char* name, 4822 address runtime_entry, 4823 Register arg1 = noreg, 4824 Register arg2 = noreg) { 4825 // Information about frame layout at time of blocking runtime call. 4826 // Note that we only have to preserve callee-saved registers since 4827 // the compilers are responsible for supplying a continuation point 4828 // if they expect all registers to be preserved. 4829 enum layout { 4830 rbp_off = frame::arg_reg_save_area_bytes/BytesPerInt, 4831 rbp_off2, 4832 return_off, 4833 return_off2, 4834 framesize // inclusive of return address 4835 }; 4836 4837 int insts_size = 512; 4838 int locs_size = 64; 4839 4840 CodeBuffer code(name, insts_size, locs_size); 4841 OopMapSet* oop_maps = new OopMapSet(); 4842 MacroAssembler* masm = new MacroAssembler(&code); 4843 4844 address start = __ pc(); 4845 4846 // This is an inlined and slightly modified version of call_VM 4847 // which has the ability to fetch the return PC out of 4848 // thread-local storage and also sets up last_Java_sp slightly 4849 // differently than the real call_VM 4850 4851 __ enter(); // required for proper stackwalking of RuntimeStub frame 4852 4853 assert(is_even(framesize/2), "sp not 16-byte aligned"); 4854 4855 // return address and rbp are already in place 4856 __ subptr(rsp, (framesize-4) << LogBytesPerInt); // prolog 4857 4858 int frame_complete = __ pc() - start; 4859 4860 // Set up last_Java_sp and last_Java_fp 4861 address the_pc = __ pc(); 4862 __ set_last_Java_frame(rsp, rbp, the_pc); 4863 __ andptr(rsp, -(StackAlignmentInBytes)); // Align stack 4864 4865 // Call runtime 4866 if (arg1 != noreg) { 4867 assert(arg2 != c_rarg1, "clobbered"); 4868 __ movptr(c_rarg1, arg1); 4869 } 4870 if (arg2 != noreg) { 4871 __ movptr(c_rarg2, arg2); 4872 } 4873 __ movptr(c_rarg0, r15_thread); 4874 BLOCK_COMMENT("call runtime_entry"); 4875 __ call(RuntimeAddress(runtime_entry)); 4876 4877 // Generate oop map 4878 OopMap* map = new OopMap(framesize, 0); 4879 4880 oop_maps->add_gc_map(the_pc - start, map); 4881 4882 __ reset_last_Java_frame(true); 4883 4884 __ leave(); // required for proper stackwalking of RuntimeStub frame 4885 4886 // check for pending exceptions 4887 #ifdef ASSERT 4888 Label L; 4889 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), 4890 (int32_t) NULL_WORD); 4891 __ jcc(Assembler::notEqual, L); 4892 __ should_not_reach_here(); 4893 __ bind(L); 4894 #endif // ASSERT 4895 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 4896 4897 4898 // codeBlob framesize is in words (not VMRegImpl::slot_size) 4899 RuntimeStub* stub = 4900 RuntimeStub::new_runtime_stub(name, 4901 &code, 4902 frame_complete, 4903 (framesize >> (LogBytesPerWord - LogBytesPerInt)), 4904 oop_maps, false); 4905 return stub->entry_point(); 4906 } 4907 4908 void create_control_words() { 4909 // Round to nearest, 53-bit mode, exceptions masked 4910 StubRoutines::_fpu_cntrl_wrd_std = 0x027F; 4911 // Round to zero, 53-bit mode, exception mased 4912 StubRoutines::_fpu_cntrl_wrd_trunc = 0x0D7F; 4913 // Round to nearest, 24-bit mode, exceptions masked 4914 StubRoutines::_fpu_cntrl_wrd_24 = 0x007F; 4915 // Round to nearest, 64-bit mode, exceptions masked 4916 StubRoutines::_fpu_cntrl_wrd_64 = 0x037F; 4917 // Round to nearest, 64-bit mode, exceptions masked 4918 StubRoutines::_mxcsr_std = 0x1F80; 4919 // Note: the following two constants are 80-bit values 4920 // layout is critical for correct loading by FPU. 4921 // Bias for strict fp multiply/divide 4922 StubRoutines::_fpu_subnormal_bias1[0]= 0x00000000; // 2^(-15360) == 0x03ff 8000 0000 0000 0000 4923 StubRoutines::_fpu_subnormal_bias1[1]= 0x80000000; 4924 StubRoutines::_fpu_subnormal_bias1[2]= 0x03ff; 4925 // Un-Bias for strict fp multiply/divide 4926 StubRoutines::_fpu_subnormal_bias2[0]= 0x00000000; // 2^(+15360) == 0x7bff 8000 0000 0000 0000 4927 StubRoutines::_fpu_subnormal_bias2[1]= 0x80000000; 4928 StubRoutines::_fpu_subnormal_bias2[2]= 0x7bff; 4929 } 4930 4931 // Initialization 4932 void generate_initial() { 4933 // Generates all stubs and initializes the entry points 4934 4935 // This platform-specific settings are needed by generate_call_stub() 4936 create_control_words(); 4937 4938 // entry points that exist in all platforms Note: This is code 4939 // that could be shared among different platforms - however the 4940 // benefit seems to be smaller than the disadvantage of having a 4941 // much more complicated generator structure. See also comment in 4942 // stubRoutines.hpp. 4943 4944 StubRoutines::_forward_exception_entry = generate_forward_exception(); 4945 4946 StubRoutines::_call_stub_entry = 4947 generate_call_stub(StubRoutines::_call_stub_return_address); 4948 4949 // is referenced by megamorphic call 4950 StubRoutines::_catch_exception_entry = generate_catch_exception(); 4951 4952 // atomic calls 4953 StubRoutines::_atomic_xchg_entry = generate_atomic_xchg(); 4954 StubRoutines::_atomic_xchg_long_entry = generate_atomic_xchg_long(); 4955 StubRoutines::_atomic_cmpxchg_entry = generate_atomic_cmpxchg(); 4956 StubRoutines::_atomic_cmpxchg_byte_entry = generate_atomic_cmpxchg_byte(); 4957 StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long(); 4958 StubRoutines::_atomic_add_entry = generate_atomic_add(); 4959 StubRoutines::_atomic_add_long_entry = generate_atomic_add_long(); 4960 StubRoutines::_fence_entry = generate_orderaccess_fence(); 4961 4962 // platform dependent 4963 StubRoutines::x86::_get_previous_fp_entry = generate_get_previous_fp(); 4964 StubRoutines::x86::_get_previous_sp_entry = generate_get_previous_sp(); 4965 4966 StubRoutines::x86::_verify_mxcsr_entry = generate_verify_mxcsr(); 4967 4968 // Build this early so it's available for the interpreter. 4969 StubRoutines::_throw_StackOverflowError_entry = 4970 generate_throw_exception("StackOverflowError throw_exception", 4971 CAST_FROM_FN_PTR(address, 4972 SharedRuntime:: 4973 throw_StackOverflowError)); 4974 StubRoutines::_throw_delayed_StackOverflowError_entry = 4975 generate_throw_exception("delayed StackOverflowError throw_exception", 4976 CAST_FROM_FN_PTR(address, 4977 SharedRuntime:: 4978 throw_delayed_StackOverflowError)); 4979 if (UseCRC32Intrinsics) { 4980 // set table address before stub generation which use it 4981 StubRoutines::_crc_table_adr = (address)StubRoutines::x86::_crc_table; 4982 StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32(); 4983 } 4984 4985 if (UseCRC32CIntrinsics) { 4986 bool supports_clmul = VM_Version::supports_clmul(); 4987 StubRoutines::x86::generate_CRC32C_table(supports_clmul); 4988 StubRoutines::_crc32c_table_addr = (address)StubRoutines::x86::_crc32c_table; 4989 StubRoutines::_updateBytesCRC32C = generate_updateBytesCRC32C(supports_clmul); 4990 } 4991 if (VM_Version::supports_sse2() && UseLibmIntrinsic && InlineIntrinsics) { 4992 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin) || 4993 vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos) || 4994 vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dtan)) { 4995 StubRoutines::x86::_ONEHALF_adr = (address)StubRoutines::x86::_ONEHALF; 4996 StubRoutines::x86::_P_2_adr = (address)StubRoutines::x86::_P_2; 4997 StubRoutines::x86::_SC_4_adr = (address)StubRoutines::x86::_SC_4; 4998 StubRoutines::x86::_Ctable_adr = (address)StubRoutines::x86::_Ctable; 4999 StubRoutines::x86::_SC_2_adr = (address)StubRoutines::x86::_SC_2; 5000 StubRoutines::x86::_SC_3_adr = (address)StubRoutines::x86::_SC_3; 5001 StubRoutines::x86::_SC_1_adr = (address)StubRoutines::x86::_SC_1; 5002 StubRoutines::x86::_PI_INV_TABLE_adr = (address)StubRoutines::x86::_PI_INV_TABLE; 5003 StubRoutines::x86::_PI_4_adr = (address)StubRoutines::x86::_PI_4; 5004 StubRoutines::x86::_PI32INV_adr = (address)StubRoutines::x86::_PI32INV; 5005 StubRoutines::x86::_SIGN_MASK_adr = (address)StubRoutines::x86::_SIGN_MASK; 5006 StubRoutines::x86::_P_1_adr = (address)StubRoutines::x86::_P_1; 5007 StubRoutines::x86::_P_3_adr = (address)StubRoutines::x86::_P_3; 5008 StubRoutines::x86::_NEG_ZERO_adr = (address)StubRoutines::x86::_NEG_ZERO; 5009 } 5010 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dexp)) { 5011 StubRoutines::_dexp = generate_libmExp(); 5012 } 5013 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dlog)) { 5014 StubRoutines::_dlog = generate_libmLog(); 5015 } 5016 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dlog10)) { 5017 StubRoutines::_dlog10 = generate_libmLog10(); 5018 } 5019 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dpow)) { 5020 StubRoutines::_dpow = generate_libmPow(); 5021 } 5022 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin)) { 5023 StubRoutines::_dsin = generate_libmSin(); 5024 } 5025 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos)) { 5026 StubRoutines::_dcos = generate_libmCos(); 5027 } 5028 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dtan)) { 5029 StubRoutines::_dtan = generate_libmTan(); 5030 } 5031 } 5032 } 5033 5034 void generate_all() { 5035 // Generates all stubs and initializes the entry points 5036 5037 // These entry points require SharedInfo::stack0 to be set up in 5038 // non-core builds and need to be relocatable, so they each 5039 // fabricate a RuntimeStub internally. 5040 StubRoutines::_throw_AbstractMethodError_entry = 5041 generate_throw_exception("AbstractMethodError throw_exception", 5042 CAST_FROM_FN_PTR(address, 5043 SharedRuntime:: 5044 throw_AbstractMethodError)); 5045 5046 StubRoutines::_throw_IncompatibleClassChangeError_entry = 5047 generate_throw_exception("IncompatibleClassChangeError throw_exception", 5048 CAST_FROM_FN_PTR(address, 5049 SharedRuntime:: 5050 throw_IncompatibleClassChangeError)); 5051 5052 StubRoutines::_throw_NullPointerException_at_call_entry = 5053 generate_throw_exception("NullPointerException at call throw_exception", 5054 CAST_FROM_FN_PTR(address, 5055 SharedRuntime:: 5056 throw_NullPointerException_at_call)); 5057 5058 // entry points that are platform specific 5059 StubRoutines::x86::_f2i_fixup = generate_f2i_fixup(); 5060 StubRoutines::x86::_f2l_fixup = generate_f2l_fixup(); 5061 StubRoutines::x86::_d2i_fixup = generate_d2i_fixup(); 5062 StubRoutines::x86::_d2l_fixup = generate_d2l_fixup(); 5063 5064 StubRoutines::x86::_float_sign_mask = generate_fp_mask("float_sign_mask", 0x7FFFFFFF7FFFFFFF); 5065 StubRoutines::x86::_float_sign_flip = generate_fp_mask("float_sign_flip", 0x8000000080000000); 5066 StubRoutines::x86::_double_sign_mask = generate_fp_mask("double_sign_mask", 0x7FFFFFFFFFFFFFFF); 5067 StubRoutines::x86::_double_sign_flip = generate_fp_mask("double_sign_flip", 0x8000000000000000); 5068 5069 // support for verify_oop (must happen after universe_init) 5070 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop(); 5071 5072 // arraycopy stubs used by compilers 5073 generate_arraycopy_stubs(); 5074 5075 // don't bother generating these AES intrinsic stubs unless global flag is set 5076 if (UseAESIntrinsics) { 5077 StubRoutines::x86::_key_shuffle_mask_addr = generate_key_shuffle_mask(); // needed by the others 5078 StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock(); 5079 StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock(); 5080 StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt(); 5081 StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt_Parallel(); 5082 } 5083 if (UseAESCTRIntrinsics){ 5084 StubRoutines::x86::_counter_shuffle_mask_addr = generate_counter_shuffle_mask(); 5085 StubRoutines::_counterMode_AESCrypt = generate_counterMode_AESCrypt_Parallel(); 5086 } 5087 5088 if (UseSHA1Intrinsics) { 5089 StubRoutines::x86::_upper_word_mask_addr = generate_upper_word_mask(); 5090 StubRoutines::x86::_shuffle_byte_flip_mask_addr = generate_shuffle_byte_flip_mask(); 5091 StubRoutines::_sha1_implCompress = generate_sha1_implCompress(false, "sha1_implCompress"); 5092 StubRoutines::_sha1_implCompressMB = generate_sha1_implCompress(true, "sha1_implCompressMB"); 5093 } 5094 if (UseSHA256Intrinsics) { 5095 StubRoutines::x86::_k256_adr = (address)StubRoutines::x86::_k256; 5096 char* dst = (char*)StubRoutines::x86::_k256_W; 5097 char* src = (char*)StubRoutines::x86::_k256; 5098 for (int ii = 0; ii < 16; ++ii) { 5099 memcpy(dst + 32 * ii, src + 16 * ii, 16); 5100 memcpy(dst + 32 * ii + 16, src + 16 * ii, 16); 5101 } 5102 StubRoutines::x86::_k256_W_adr = (address)StubRoutines::x86::_k256_W; 5103 StubRoutines::x86::_pshuffle_byte_flip_mask_addr = generate_pshuffle_byte_flip_mask(); 5104 StubRoutines::_sha256_implCompress = generate_sha256_implCompress(false, "sha256_implCompress"); 5105 StubRoutines::_sha256_implCompressMB = generate_sha256_implCompress(true, "sha256_implCompressMB"); 5106 } 5107 if (UseSHA512Intrinsics) { 5108 StubRoutines::x86::_k512_W_addr = (address)StubRoutines::x86::_k512_W; 5109 StubRoutines::x86::_pshuffle_byte_flip_mask_addr_sha512 = generate_pshuffle_byte_flip_mask_sha512(); 5110 StubRoutines::_sha512_implCompress = generate_sha512_implCompress(false, "sha512_implCompress"); 5111 StubRoutines::_sha512_implCompressMB = generate_sha512_implCompress(true, "sha512_implCompressMB"); 5112 } 5113 5114 // Generate GHASH intrinsics code 5115 if (UseGHASHIntrinsics) { 5116 StubRoutines::x86::_ghash_long_swap_mask_addr = generate_ghash_long_swap_mask(); 5117 StubRoutines::x86::_ghash_byte_swap_mask_addr = generate_ghash_byte_swap_mask(); 5118 StubRoutines::_ghash_processBlocks = generate_ghash_processBlocks(); 5119 } 5120 5121 // Safefetch stubs. 5122 generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry, 5123 &StubRoutines::_safefetch32_fault_pc, 5124 &StubRoutines::_safefetch32_continuation_pc); 5125 generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry, 5126 &StubRoutines::_safefetchN_fault_pc, 5127 &StubRoutines::_safefetchN_continuation_pc); 5128 #ifdef COMPILER2 5129 if (UseMultiplyToLenIntrinsic) { 5130 StubRoutines::_multiplyToLen = generate_multiplyToLen(); 5131 } 5132 if (UseSquareToLenIntrinsic) { 5133 StubRoutines::_squareToLen = generate_squareToLen(); 5134 } 5135 if (UseMulAddIntrinsic) { 5136 StubRoutines::_mulAdd = generate_mulAdd(); 5137 } 5138 #ifndef _WINDOWS 5139 if (UseMontgomeryMultiplyIntrinsic) { 5140 StubRoutines::_montgomeryMultiply 5141 = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_multiply); 5142 } 5143 if (UseMontgomerySquareIntrinsic) { 5144 StubRoutines::_montgomerySquare 5145 = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_square); 5146 } 5147 #endif // WINDOWS 5148 #endif // COMPILER2 5149 5150 if (UseVectorizedMismatchIntrinsic) { 5151 StubRoutines::_vectorizedMismatch = generate_vectorizedMismatch(); 5152 } 5153 } 5154 5155 public: 5156 StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) { 5157 if (all) { 5158 generate_all(); 5159 } else { 5160 generate_initial(); 5161 } 5162 } 5163 }; // end class declaration 5164 5165 void StubGenerator_generate(CodeBuffer* code, bool all) { 5166 StubGenerator g(code, all); 5167 }