1 /* 2 * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "ci/ciUtilities.hpp" 29 #include "gc/shared/barrierSet.hpp" 30 #include "gc/shared/barrierSetAssembler.hpp" 31 #include "interpreter/interpreter.hpp" 32 #include "nativeInst_x86.hpp" 33 #include "oops/instanceOop.hpp" 34 #include "oops/method.hpp" 35 #include "oops/objArrayKlass.hpp" 36 #include "oops/oop.inline.hpp" 37 #include "prims/methodHandles.hpp" 38 #include "runtime/frame.inline.hpp" 39 #include "runtime/handles.inline.hpp" 40 #include "runtime/sharedRuntime.hpp" 41 #include "runtime/stubCodeGenerator.hpp" 42 #include "runtime/stubRoutines.hpp" 43 #include "runtime/thread.inline.hpp" 44 #include "utilities/macros.hpp" 45 #ifdef COMPILER2 46 #include "opto/runtime.hpp" 47 #endif 48 #if INCLUDE_SHENANDOAHGC 49 #include "gc/shenandoah/brooksPointer.hpp" 50 #include "gc/shenandoah/shenandoahHeap.hpp" 51 #include "gc/shenandoah/shenandoahHeapRegion.hpp" 52 #include "gc/shenandoah/shenandoahRuntime.hpp" 53 #endif 54 #if INCLUDE_ZGC 55 #include "gc/z/zThreadLocalData.hpp" 56 #endif 57 58 // Declaration and definition of StubGenerator (no .hpp file). 59 // For a more detailed description of the stub routine structure 60 // see the comment in stubRoutines.hpp 61 62 #define __ _masm-> 63 #define TIMES_OOP (UseCompressedOops ? Address::times_4 : Address::times_8) 64 #define a__ ((Assembler*)_masm)-> 65 66 #ifdef PRODUCT 67 #define BLOCK_COMMENT(str) /* nothing */ 68 #else 69 #define BLOCK_COMMENT(str) __ block_comment(str) 70 #endif 71 72 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 73 const int MXCSR_MASK = 0xFFC0; // Mask out any pending exceptions 74 75 // Stub Code definitions 76 77 class StubGenerator: public StubCodeGenerator { 78 private: 79 80 #ifdef PRODUCT 81 #define inc_counter_np(counter) ((void)0) 82 #else 83 void inc_counter_np_(int& counter) { 84 // This can destroy rscratch1 if counter is far from the code cache 85 __ incrementl(ExternalAddress((address)&counter)); 86 } 87 #define inc_counter_np(counter) \ 88 BLOCK_COMMENT("inc_counter " #counter); \ 89 inc_counter_np_(counter); 90 #endif 91 92 // Call stubs are used to call Java from C 93 // 94 // Linux Arguments: 95 // c_rarg0: call wrapper address address 96 // c_rarg1: result address 97 // c_rarg2: result type BasicType 98 // c_rarg3: method Method* 99 // c_rarg4: (interpreter) entry point address 100 // c_rarg5: parameters intptr_t* 101 // 16(rbp): parameter size (in words) int 102 // 24(rbp): thread Thread* 103 // 104 // [ return_from_Java ] <--- rsp 105 // [ argument word n ] 106 // ... 107 // -12 [ argument word 1 ] 108 // -11 [ saved r15 ] <--- rsp_after_call 109 // -10 [ saved r14 ] 110 // -9 [ saved r13 ] 111 // -8 [ saved r12 ] 112 // -7 [ saved rbx ] 113 // -6 [ call wrapper ] 114 // -5 [ result ] 115 // -4 [ result type ] 116 // -3 [ method ] 117 // -2 [ entry point ] 118 // -1 [ parameters ] 119 // 0 [ saved rbp ] <--- rbp 120 // 1 [ return address ] 121 // 2 [ parameter size ] 122 // 3 [ thread ] 123 // 124 // Windows Arguments: 125 // c_rarg0: call wrapper address address 126 // c_rarg1: result address 127 // c_rarg2: result type BasicType 128 // c_rarg3: method Method* 129 // 48(rbp): (interpreter) entry point address 130 // 56(rbp): parameters intptr_t* 131 // 64(rbp): parameter size (in words) int 132 // 72(rbp): thread Thread* 133 // 134 // [ return_from_Java ] <--- rsp 135 // [ argument word n ] 136 // ... 137 // -60 [ argument word 1 ] 138 // -59 [ saved xmm31 ] <--- rsp after_call 139 // [ saved xmm16-xmm30 ] (EVEX enabled, else the space is blank) 140 // -27 [ saved xmm15 ] 141 // [ saved xmm7-xmm14 ] 142 // -9 [ saved xmm6 ] (each xmm register takes 2 slots) 143 // -7 [ saved r15 ] 144 // -6 [ saved r14 ] 145 // -5 [ saved r13 ] 146 // -4 [ saved r12 ] 147 // -3 [ saved rdi ] 148 // -2 [ saved rsi ] 149 // -1 [ saved rbx ] 150 // 0 [ saved rbp ] <--- rbp 151 // 1 [ return address ] 152 // 2 [ call wrapper ] 153 // 3 [ result ] 154 // 4 [ result type ] 155 // 5 [ method ] 156 // 6 [ entry point ] 157 // 7 [ parameters ] 158 // 8 [ parameter size ] 159 // 9 [ thread ] 160 // 161 // Windows reserves the callers stack space for arguments 1-4. 162 // We spill c_rarg0-c_rarg3 to this space. 163 164 // Call stub stack layout word offsets from rbp 165 enum call_stub_layout { 166 #ifdef _WIN64 167 xmm_save_first = 6, // save from xmm6 168 xmm_save_last = 31, // to xmm31 169 xmm_save_base = -9, 170 rsp_after_call_off = xmm_save_base - 2 * (xmm_save_last - xmm_save_first), // -27 171 r15_off = -7, 172 r14_off = -6, 173 r13_off = -5, 174 r12_off = -4, 175 rdi_off = -3, 176 rsi_off = -2, 177 rbx_off = -1, 178 rbp_off = 0, 179 retaddr_off = 1, 180 call_wrapper_off = 2, 181 result_off = 3, 182 result_type_off = 4, 183 method_off = 5, 184 entry_point_off = 6, 185 parameters_off = 7, 186 parameter_size_off = 8, 187 thread_off = 9 188 #else 189 rsp_after_call_off = -12, 190 mxcsr_off = rsp_after_call_off, 191 r15_off = -11, 192 r14_off = -10, 193 r13_off = -9, 194 r12_off = -8, 195 rbx_off = -7, 196 call_wrapper_off = -6, 197 result_off = -5, 198 result_type_off = -4, 199 method_off = -3, 200 entry_point_off = -2, 201 parameters_off = -1, 202 rbp_off = 0, 203 retaddr_off = 1, 204 parameter_size_off = 2, 205 thread_off = 3 206 #endif 207 }; 208 209 #ifdef _WIN64 210 Address xmm_save(int reg) { 211 assert(reg >= xmm_save_first && reg <= xmm_save_last, "XMM register number out of range"); 212 return Address(rbp, (xmm_save_base - (reg - xmm_save_first) * 2) * wordSize); 213 } 214 #endif 215 216 address generate_call_stub(address& return_address) { 217 assert((int)frame::entry_frame_after_call_words == -(int)rsp_after_call_off + 1 && 218 (int)frame::entry_frame_call_wrapper_offset == (int)call_wrapper_off, 219 "adjust this code"); 220 StubCodeMark mark(this, "StubRoutines", "call_stub"); 221 address start = __ pc(); 222 223 // same as in generate_catch_exception()! 224 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize); 225 226 const Address call_wrapper (rbp, call_wrapper_off * wordSize); 227 const Address result (rbp, result_off * wordSize); 228 const Address result_type (rbp, result_type_off * wordSize); 229 const Address method (rbp, method_off * wordSize); 230 const Address entry_point (rbp, entry_point_off * wordSize); 231 const Address parameters (rbp, parameters_off * wordSize); 232 const Address parameter_size(rbp, parameter_size_off * wordSize); 233 234 // same as in generate_catch_exception()! 235 const Address thread (rbp, thread_off * wordSize); 236 237 const Address r15_save(rbp, r15_off * wordSize); 238 const Address r14_save(rbp, r14_off * wordSize); 239 const Address r13_save(rbp, r13_off * wordSize); 240 const Address r12_save(rbp, r12_off * wordSize); 241 const Address rbx_save(rbp, rbx_off * wordSize); 242 243 // stub code 244 __ enter(); 245 __ subptr(rsp, -rsp_after_call_off * wordSize); 246 247 // save register parameters 248 #ifndef _WIN64 249 __ movptr(parameters, c_rarg5); // parameters 250 __ movptr(entry_point, c_rarg4); // entry_point 251 #endif 252 253 __ movptr(method, c_rarg3); // method 254 __ movl(result_type, c_rarg2); // result type 255 __ movptr(result, c_rarg1); // result 256 __ movptr(call_wrapper, c_rarg0); // call wrapper 257 258 // save regs belonging to calling function 259 __ movptr(rbx_save, rbx); 260 __ movptr(r12_save, r12); 261 __ movptr(r13_save, r13); 262 __ movptr(r14_save, r14); 263 __ movptr(r15_save, r15); 264 if (UseAVX > 2) { 265 __ movl(rbx, 0xffff); 266 __ kmovwl(k1, rbx); 267 } 268 #ifdef _WIN64 269 int last_reg = 15; 270 if (UseAVX > 2) { 271 last_reg = 31; 272 } 273 if (VM_Version::supports_evex()) { 274 for (int i = xmm_save_first; i <= last_reg; i++) { 275 __ vextractf32x4(xmm_save(i), as_XMMRegister(i), 0); 276 } 277 } else { 278 for (int i = xmm_save_first; i <= last_reg; i++) { 279 __ movdqu(xmm_save(i), as_XMMRegister(i)); 280 } 281 } 282 283 const Address rdi_save(rbp, rdi_off * wordSize); 284 const Address rsi_save(rbp, rsi_off * wordSize); 285 286 __ movptr(rsi_save, rsi); 287 __ movptr(rdi_save, rdi); 288 #else 289 const Address mxcsr_save(rbp, mxcsr_off * wordSize); 290 { 291 Label skip_ldmx; 292 __ stmxcsr(mxcsr_save); 293 __ movl(rax, mxcsr_save); 294 __ andl(rax, MXCSR_MASK); // Only check control and mask bits 295 ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std()); 296 __ cmp32(rax, mxcsr_std); 297 __ jcc(Assembler::equal, skip_ldmx); 298 __ ldmxcsr(mxcsr_std); 299 __ bind(skip_ldmx); 300 } 301 #endif 302 303 // Load up thread register 304 __ movptr(r15_thread, thread); 305 __ reinit_heapbase(); 306 307 #ifdef ASSERT 308 // make sure we have no pending exceptions 309 { 310 Label L; 311 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 312 __ jcc(Assembler::equal, L); 313 __ stop("StubRoutines::call_stub: entered with pending exception"); 314 __ bind(L); 315 } 316 #endif 317 318 // pass parameters if any 319 BLOCK_COMMENT("pass parameters if any"); 320 Label parameters_done; 321 __ movl(c_rarg3, parameter_size); 322 __ testl(c_rarg3, c_rarg3); 323 __ jcc(Assembler::zero, parameters_done); 324 325 Label loop; 326 __ movptr(c_rarg2, parameters); // parameter pointer 327 __ movl(c_rarg1, c_rarg3); // parameter counter is in c_rarg1 328 __ BIND(loop); 329 __ movptr(rax, Address(c_rarg2, 0));// get parameter 330 __ addptr(c_rarg2, wordSize); // advance to next parameter 331 __ decrementl(c_rarg1); // decrement counter 332 __ push(rax); // pass parameter 333 __ jcc(Assembler::notZero, loop); 334 335 // call Java function 336 __ BIND(parameters_done); 337 __ movptr(rbx, method); // get Method* 338 __ movptr(c_rarg1, entry_point); // get entry_point 339 __ mov(r13, rsp); // set sender sp 340 BLOCK_COMMENT("call Java function"); 341 __ call(c_rarg1); 342 343 BLOCK_COMMENT("call_stub_return_address:"); 344 return_address = __ pc(); 345 346 // store result depending on type (everything that is not 347 // T_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT) 348 __ movptr(c_rarg0, result); 349 Label is_long, is_float, is_double, exit; 350 __ movl(c_rarg1, result_type); 351 __ cmpl(c_rarg1, T_OBJECT); 352 __ jcc(Assembler::equal, is_long); 353 __ cmpl(c_rarg1, T_LONG); 354 __ jcc(Assembler::equal, is_long); 355 __ cmpl(c_rarg1, T_FLOAT); 356 __ jcc(Assembler::equal, is_float); 357 __ cmpl(c_rarg1, T_DOUBLE); 358 __ jcc(Assembler::equal, is_double); 359 360 // handle T_INT case 361 __ movl(Address(c_rarg0, 0), rax); 362 363 __ BIND(exit); 364 365 // pop parameters 366 __ lea(rsp, rsp_after_call); 367 368 #ifdef ASSERT 369 // verify that threads correspond 370 { 371 Label L1, L2, L3; 372 __ cmpptr(r15_thread, thread); 373 __ jcc(Assembler::equal, L1); 374 __ stop("StubRoutines::call_stub: r15_thread is corrupted"); 375 __ bind(L1); 376 __ get_thread(rbx); 377 __ cmpptr(r15_thread, thread); 378 __ jcc(Assembler::equal, L2); 379 __ stop("StubRoutines::call_stub: r15_thread is modified by call"); 380 __ bind(L2); 381 __ cmpptr(r15_thread, rbx); 382 __ jcc(Assembler::equal, L3); 383 __ stop("StubRoutines::call_stub: threads must correspond"); 384 __ bind(L3); 385 } 386 #endif 387 388 // restore regs belonging to calling function 389 #ifdef _WIN64 390 // emit the restores for xmm regs 391 if (VM_Version::supports_evex()) { 392 for (int i = xmm_save_first; i <= last_reg; i++) { 393 __ vinsertf32x4(as_XMMRegister(i), as_XMMRegister(i), xmm_save(i), 0); 394 } 395 } else { 396 for (int i = xmm_save_first; i <= last_reg; i++) { 397 __ movdqu(as_XMMRegister(i), xmm_save(i)); 398 } 399 } 400 #endif 401 __ movptr(r15, r15_save); 402 __ movptr(r14, r14_save); 403 __ movptr(r13, r13_save); 404 __ movptr(r12, r12_save); 405 __ movptr(rbx, rbx_save); 406 407 #ifdef _WIN64 408 __ movptr(rdi, rdi_save); 409 __ movptr(rsi, rsi_save); 410 #else 411 __ ldmxcsr(mxcsr_save); 412 #endif 413 414 // restore rsp 415 __ addptr(rsp, -rsp_after_call_off * wordSize); 416 417 // return 418 __ vzeroupper(); 419 __ pop(rbp); 420 __ ret(0); 421 422 // handle return types different from T_INT 423 __ BIND(is_long); 424 __ movq(Address(c_rarg0, 0), rax); 425 __ jmp(exit); 426 427 __ BIND(is_float); 428 __ movflt(Address(c_rarg0, 0), xmm0); 429 __ jmp(exit); 430 431 __ BIND(is_double); 432 __ movdbl(Address(c_rarg0, 0), xmm0); 433 __ jmp(exit); 434 435 return start; 436 } 437 438 // Return point for a Java call if there's an exception thrown in 439 // Java code. The exception is caught and transformed into a 440 // pending exception stored in JavaThread that can be tested from 441 // within the VM. 442 // 443 // Note: Usually the parameters are removed by the callee. In case 444 // of an exception crossing an activation frame boundary, that is 445 // not the case if the callee is compiled code => need to setup the 446 // rsp. 447 // 448 // rax: exception oop 449 450 address generate_catch_exception() { 451 StubCodeMark mark(this, "StubRoutines", "catch_exception"); 452 address start = __ pc(); 453 454 // same as in generate_call_stub(): 455 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize); 456 const Address thread (rbp, thread_off * wordSize); 457 458 #ifdef ASSERT 459 // verify that threads correspond 460 { 461 Label L1, L2, L3; 462 __ cmpptr(r15_thread, thread); 463 __ jcc(Assembler::equal, L1); 464 __ stop("StubRoutines::catch_exception: r15_thread is corrupted"); 465 __ bind(L1); 466 __ get_thread(rbx); 467 __ cmpptr(r15_thread, thread); 468 __ jcc(Assembler::equal, L2); 469 __ stop("StubRoutines::catch_exception: r15_thread is modified by call"); 470 __ bind(L2); 471 __ cmpptr(r15_thread, rbx); 472 __ jcc(Assembler::equal, L3); 473 __ stop("StubRoutines::catch_exception: threads must correspond"); 474 __ bind(L3); 475 } 476 #endif 477 478 // set pending exception 479 __ verify_oop(rax); 480 481 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), rax); 482 __ lea(rscratch1, ExternalAddress((address)__FILE__)); 483 __ movptr(Address(r15_thread, Thread::exception_file_offset()), rscratch1); 484 __ movl(Address(r15_thread, Thread::exception_line_offset()), (int) __LINE__); 485 486 // complete return to VM 487 assert(StubRoutines::_call_stub_return_address != NULL, 488 "_call_stub_return_address must have been generated before"); 489 __ jump(RuntimeAddress(StubRoutines::_call_stub_return_address)); 490 491 return start; 492 } 493 494 // Continuation point for runtime calls returning with a pending 495 // exception. The pending exception check happened in the runtime 496 // or native call stub. The pending exception in Thread is 497 // converted into a Java-level exception. 498 // 499 // Contract with Java-level exception handlers: 500 // rax: exception 501 // rdx: throwing pc 502 // 503 // NOTE: At entry of this stub, exception-pc must be on stack !! 504 505 address generate_forward_exception() { 506 StubCodeMark mark(this, "StubRoutines", "forward exception"); 507 address start = __ pc(); 508 509 // Upon entry, the sp points to the return address returning into 510 // Java (interpreted or compiled) code; i.e., the return address 511 // becomes the throwing pc. 512 // 513 // Arguments pushed before the runtime call are still on the stack 514 // but the exception handler will reset the stack pointer -> 515 // ignore them. A potential result in registers can be ignored as 516 // well. 517 518 #ifdef ASSERT 519 // make sure this code is only executed if there is a pending exception 520 { 521 Label L; 522 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t) NULL); 523 __ jcc(Assembler::notEqual, L); 524 __ stop("StubRoutines::forward exception: no pending exception (1)"); 525 __ bind(L); 526 } 527 #endif 528 529 // compute exception handler into rbx 530 __ movptr(c_rarg0, Address(rsp, 0)); 531 BLOCK_COMMENT("call exception_handler_for_return_address"); 532 __ call_VM_leaf(CAST_FROM_FN_PTR(address, 533 SharedRuntime::exception_handler_for_return_address), 534 r15_thread, c_rarg0); 535 __ mov(rbx, rax); 536 537 // setup rax & rdx, remove return address & clear pending exception 538 __ pop(rdx); 539 __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset())); 540 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 541 542 #ifdef ASSERT 543 // make sure exception is set 544 { 545 Label L; 546 __ testptr(rax, rax); 547 __ jcc(Assembler::notEqual, L); 548 __ stop("StubRoutines::forward exception: no pending exception (2)"); 549 __ bind(L); 550 } 551 #endif 552 553 // continue at exception handler (return address removed) 554 // rax: exception 555 // rbx: exception handler 556 // rdx: throwing pc 557 __ verify_oop(rax); 558 __ jmp(rbx); 559 560 return start; 561 } 562 563 // Support for jint atomic::xchg(jint exchange_value, volatile jint* dest) 564 // 565 // Arguments : 566 // c_rarg0: exchange_value 567 // c_rarg0: dest 568 // 569 // Result: 570 // *dest <- ex, return (orig *dest) 571 address generate_atomic_xchg() { 572 StubCodeMark mark(this, "StubRoutines", "atomic_xchg"); 573 address start = __ pc(); 574 575 __ movl(rax, c_rarg0); // Copy to eax we need a return value anyhow 576 __ xchgl(rax, Address(c_rarg1, 0)); // automatic LOCK 577 __ ret(0); 578 579 return start; 580 } 581 582 // Support for intptr_t atomic::xchg_long(jlong exchange_value, volatile jlong* dest) 583 // 584 // Arguments : 585 // c_rarg0: exchange_value 586 // c_rarg1: dest 587 // 588 // Result: 589 // *dest <- ex, return (orig *dest) 590 address generate_atomic_xchg_long() { 591 StubCodeMark mark(this, "StubRoutines", "atomic_xchg_long"); 592 address start = __ pc(); 593 594 __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow 595 __ xchgptr(rax, Address(c_rarg1, 0)); // automatic LOCK 596 __ ret(0); 597 598 return start; 599 } 600 601 // Support for jint atomic::atomic_cmpxchg(jint exchange_value, volatile jint* dest, 602 // jint compare_value) 603 // 604 // Arguments : 605 // c_rarg0: exchange_value 606 // c_rarg1: dest 607 // c_rarg2: compare_value 608 // 609 // Result: 610 // if ( compare_value == *dest ) { 611 // *dest = exchange_value 612 // return compare_value; 613 // else 614 // return *dest; 615 address generate_atomic_cmpxchg() { 616 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg"); 617 address start = __ pc(); 618 619 __ movl(rax, c_rarg2); 620 if ( os::is_MP() ) __ lock(); 621 __ cmpxchgl(c_rarg0, Address(c_rarg1, 0)); 622 __ ret(0); 623 624 return start; 625 } 626 627 // Support for int8_t atomic::atomic_cmpxchg(int8_t exchange_value, volatile int8_t* dest, 628 // int8_t compare_value) 629 // 630 // Arguments : 631 // c_rarg0: exchange_value 632 // c_rarg1: dest 633 // c_rarg2: compare_value 634 // 635 // Result: 636 // if ( compare_value == *dest ) { 637 // *dest = exchange_value 638 // return compare_value; 639 // else 640 // return *dest; 641 address generate_atomic_cmpxchg_byte() { 642 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_byte"); 643 address start = __ pc(); 644 645 __ movsbq(rax, c_rarg2); 646 if ( os::is_MP() ) __ lock(); 647 __ cmpxchgb(c_rarg0, Address(c_rarg1, 0)); 648 __ ret(0); 649 650 return start; 651 } 652 653 // Support for int64_t atomic::atomic_cmpxchg(int64_t exchange_value, 654 // volatile int64_t* dest, 655 // int64_t compare_value) 656 // Arguments : 657 // c_rarg0: exchange_value 658 // c_rarg1: dest 659 // c_rarg2: compare_value 660 // 661 // Result: 662 // if ( compare_value == *dest ) { 663 // *dest = exchange_value 664 // return compare_value; 665 // else 666 // return *dest; 667 address generate_atomic_cmpxchg_long() { 668 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long"); 669 address start = __ pc(); 670 671 __ movq(rax, c_rarg2); 672 if ( os::is_MP() ) __ lock(); 673 __ cmpxchgq(c_rarg0, Address(c_rarg1, 0)); 674 __ ret(0); 675 676 return start; 677 } 678 679 // Support for jint atomic::add(jint add_value, volatile jint* dest) 680 // 681 // Arguments : 682 // c_rarg0: add_value 683 // c_rarg1: dest 684 // 685 // Result: 686 // *dest += add_value 687 // return *dest; 688 address generate_atomic_add() { 689 StubCodeMark mark(this, "StubRoutines", "atomic_add"); 690 address start = __ pc(); 691 692 __ movl(rax, c_rarg0); 693 if ( os::is_MP() ) __ lock(); 694 __ xaddl(Address(c_rarg1, 0), c_rarg0); 695 __ addl(rax, c_rarg0); 696 __ ret(0); 697 698 return start; 699 } 700 701 // Support for intptr_t atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) 702 // 703 // Arguments : 704 // c_rarg0: add_value 705 // c_rarg1: dest 706 // 707 // Result: 708 // *dest += add_value 709 // return *dest; 710 address generate_atomic_add_long() { 711 StubCodeMark mark(this, "StubRoutines", "atomic_add_long"); 712 address start = __ pc(); 713 714 __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow 715 if ( os::is_MP() ) __ lock(); 716 __ xaddptr(Address(c_rarg1, 0), c_rarg0); 717 __ addptr(rax, c_rarg0); 718 __ ret(0); 719 720 return start; 721 } 722 723 // Support for intptr_t OrderAccess::fence() 724 // 725 // Arguments : 726 // 727 // Result: 728 address generate_orderaccess_fence() { 729 StubCodeMark mark(this, "StubRoutines", "orderaccess_fence"); 730 address start = __ pc(); 731 __ membar(Assembler::StoreLoad); 732 __ ret(0); 733 734 return start; 735 } 736 737 // Support for intptr_t get_previous_fp() 738 // 739 // This routine is used to find the previous frame pointer for the 740 // caller (current_frame_guess). This is used as part of debugging 741 // ps() is seemingly lost trying to find frames. 742 // This code assumes that caller current_frame_guess) has a frame. 743 address generate_get_previous_fp() { 744 StubCodeMark mark(this, "StubRoutines", "get_previous_fp"); 745 const Address old_fp(rbp, 0); 746 const Address older_fp(rax, 0); 747 address start = __ pc(); 748 749 __ enter(); 750 __ movptr(rax, old_fp); // callers fp 751 __ movptr(rax, older_fp); // the frame for ps() 752 __ pop(rbp); 753 __ ret(0); 754 755 return start; 756 } 757 758 // Support for intptr_t get_previous_sp() 759 // 760 // This routine is used to find the previous stack pointer for the 761 // caller. 762 address generate_get_previous_sp() { 763 StubCodeMark mark(this, "StubRoutines", "get_previous_sp"); 764 address start = __ pc(); 765 766 __ movptr(rax, rsp); 767 __ addptr(rax, 8); // return address is at the top of the stack. 768 __ ret(0); 769 770 return start; 771 } 772 773 //---------------------------------------------------------------------------------------------------- 774 // Support for void verify_mxcsr() 775 // 776 // This routine is used with -Xcheck:jni to verify that native 777 // JNI code does not return to Java code without restoring the 778 // MXCSR register to our expected state. 779 780 address generate_verify_mxcsr() { 781 StubCodeMark mark(this, "StubRoutines", "verify_mxcsr"); 782 address start = __ pc(); 783 784 const Address mxcsr_save(rsp, 0); 785 786 if (CheckJNICalls) { 787 Label ok_ret; 788 ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std()); 789 __ push(rax); 790 __ subptr(rsp, wordSize); // allocate a temp location 791 __ stmxcsr(mxcsr_save); 792 __ movl(rax, mxcsr_save); 793 __ andl(rax, MXCSR_MASK); // Only check control and mask bits 794 __ cmp32(rax, mxcsr_std); 795 __ jcc(Assembler::equal, ok_ret); 796 797 __ warn("MXCSR changed by native JNI code, use -XX:+RestoreMXCSROnJNICall"); 798 799 __ ldmxcsr(mxcsr_std); 800 801 __ bind(ok_ret); 802 __ addptr(rsp, wordSize); 803 __ pop(rax); 804 } 805 806 __ ret(0); 807 808 return start; 809 } 810 811 #if INCLUDE_SHENANDOAHGC 812 address generate_shenandoah_wb(bool c_abi, bool do_cset_test) { 813 StubCodeMark mark(this, "StubRoutines", "shenandoah_wb"); 814 address start = __ pc(); 815 816 Label not_done; 817 818 // We use RDI, which also serves as argument register for slow call. 819 // RAX always holds the src object ptr, except after the slow call and 820 // the cmpxchg, then it holds the result. 821 // R8 and RCX are used as temporary registers. 822 if (!c_abi) { 823 __ push(rdi); 824 __ push(r8); 825 } 826 827 // Check for object beeing in the collection set. 828 // TODO: Can we use only 1 register here? 829 // The source object arrives here in rax. 830 // live: rax 831 // live: rdi 832 if (!c_abi) { 833 __ mov(rdi, rax); 834 } else { 835 if (rax != c_rarg0) { 836 __ mov(rax, c_rarg0); 837 } 838 } 839 if (do_cset_test) { 840 __ shrptr(rdi, ShenandoahHeapRegion::region_size_bytes_shift_jint()); 841 // live: r8 842 __ movptr(r8, (intptr_t) ShenandoahHeap::in_cset_fast_test_addr()); 843 __ movbool(r8, Address(r8, rdi, Address::times_1)); 844 // unlive: rdi 845 __ testbool(r8); 846 // unlive: r8 847 __ jccb(Assembler::notZero, not_done); 848 849 if (!c_abi) { 850 __ pop(r8); 851 __ pop(rdi); 852 } 853 __ ret(0); 854 855 __ bind(not_done); 856 } 857 858 if (!c_abi) { 859 __ push(rcx); 860 } 861 862 if (!c_abi) { 863 __ push(rdx); 864 __ push(rdi); 865 __ push(rsi); 866 __ push(r8); 867 __ push(r9); 868 __ push(r10); 869 __ push(r11); 870 __ push(r12); 871 __ push(r13); 872 __ push(r14); 873 __ push(r15); 874 } 875 __ save_vector_registers(); 876 __ movptr(rdi, rax); 877 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_JRT), rdi); 878 __ restore_vector_registers(); 879 if (!c_abi) { 880 __ pop(r15); 881 __ pop(r14); 882 __ pop(r13); 883 __ pop(r12); 884 __ pop(r11); 885 __ pop(r10); 886 __ pop(r9); 887 __ pop(r8); 888 __ pop(rsi); 889 __ pop(rdi); 890 __ pop(rdx); 891 892 __ pop(rcx); 893 __ pop(r8); 894 __ pop(rdi); 895 } 896 __ ret(0); 897 898 return start; 899 } 900 #endif 901 902 address generate_f2i_fixup() { 903 StubCodeMark mark(this, "StubRoutines", "f2i_fixup"); 904 Address inout(rsp, 5 * wordSize); // return address + 4 saves 905 906 address start = __ pc(); 907 908 Label L; 909 910 __ push(rax); 911 __ push(c_rarg3); 912 __ push(c_rarg2); 913 __ push(c_rarg1); 914 915 __ movl(rax, 0x7f800000); 916 __ xorl(c_rarg3, c_rarg3); 917 __ movl(c_rarg2, inout); 918 __ movl(c_rarg1, c_rarg2); 919 __ andl(c_rarg1, 0x7fffffff); 920 __ cmpl(rax, c_rarg1); // NaN? -> 0 921 __ jcc(Assembler::negative, L); 922 __ testl(c_rarg2, c_rarg2); // signed ? min_jint : max_jint 923 __ movl(c_rarg3, 0x80000000); 924 __ movl(rax, 0x7fffffff); 925 __ cmovl(Assembler::positive, c_rarg3, rax); 926 927 __ bind(L); 928 __ movptr(inout, c_rarg3); 929 930 __ pop(c_rarg1); 931 __ pop(c_rarg2); 932 __ pop(c_rarg3); 933 __ pop(rax); 934 935 __ ret(0); 936 937 return start; 938 } 939 940 address generate_f2l_fixup() { 941 StubCodeMark mark(this, "StubRoutines", "f2l_fixup"); 942 Address inout(rsp, 5 * wordSize); // return address + 4 saves 943 address start = __ pc(); 944 945 Label L; 946 947 __ push(rax); 948 __ push(c_rarg3); 949 __ push(c_rarg2); 950 __ push(c_rarg1); 951 952 __ movl(rax, 0x7f800000); 953 __ xorl(c_rarg3, c_rarg3); 954 __ movl(c_rarg2, inout); 955 __ movl(c_rarg1, c_rarg2); 956 __ andl(c_rarg1, 0x7fffffff); 957 __ cmpl(rax, c_rarg1); // NaN? -> 0 958 __ jcc(Assembler::negative, L); 959 __ testl(c_rarg2, c_rarg2); // signed ? min_jlong : max_jlong 960 __ mov64(c_rarg3, 0x8000000000000000); 961 __ mov64(rax, 0x7fffffffffffffff); 962 __ cmov(Assembler::positive, c_rarg3, rax); 963 964 __ bind(L); 965 __ movptr(inout, c_rarg3); 966 967 __ pop(c_rarg1); 968 __ pop(c_rarg2); 969 __ pop(c_rarg3); 970 __ pop(rax); 971 972 __ ret(0); 973 974 return start; 975 } 976 977 address generate_d2i_fixup() { 978 StubCodeMark mark(this, "StubRoutines", "d2i_fixup"); 979 Address inout(rsp, 6 * wordSize); // return address + 5 saves 980 981 address start = __ pc(); 982 983 Label L; 984 985 __ push(rax); 986 __ push(c_rarg3); 987 __ push(c_rarg2); 988 __ push(c_rarg1); 989 __ push(c_rarg0); 990 991 __ movl(rax, 0x7ff00000); 992 __ movq(c_rarg2, inout); 993 __ movl(c_rarg3, c_rarg2); 994 __ mov(c_rarg1, c_rarg2); 995 __ mov(c_rarg0, c_rarg2); 996 __ negl(c_rarg3); 997 __ shrptr(c_rarg1, 0x20); 998 __ orl(c_rarg3, c_rarg2); 999 __ andl(c_rarg1, 0x7fffffff); 1000 __ xorl(c_rarg2, c_rarg2); 1001 __ shrl(c_rarg3, 0x1f); 1002 __ orl(c_rarg1, c_rarg3); 1003 __ cmpl(rax, c_rarg1); 1004 __ jcc(Assembler::negative, L); // NaN -> 0 1005 __ testptr(c_rarg0, c_rarg0); // signed ? min_jint : max_jint 1006 __ movl(c_rarg2, 0x80000000); 1007 __ movl(rax, 0x7fffffff); 1008 __ cmov(Assembler::positive, c_rarg2, rax); 1009 1010 __ bind(L); 1011 __ movptr(inout, c_rarg2); 1012 1013 __ pop(c_rarg0); 1014 __ pop(c_rarg1); 1015 __ pop(c_rarg2); 1016 __ pop(c_rarg3); 1017 __ pop(rax); 1018 1019 __ ret(0); 1020 1021 return start; 1022 } 1023 1024 address generate_d2l_fixup() { 1025 StubCodeMark mark(this, "StubRoutines", "d2l_fixup"); 1026 Address inout(rsp, 6 * wordSize); // return address + 5 saves 1027 1028 address start = __ pc(); 1029 1030 Label L; 1031 1032 __ push(rax); 1033 __ push(c_rarg3); 1034 __ push(c_rarg2); 1035 __ push(c_rarg1); 1036 __ push(c_rarg0); 1037 1038 __ movl(rax, 0x7ff00000); 1039 __ movq(c_rarg2, inout); 1040 __ movl(c_rarg3, c_rarg2); 1041 __ mov(c_rarg1, c_rarg2); 1042 __ mov(c_rarg0, c_rarg2); 1043 __ negl(c_rarg3); 1044 __ shrptr(c_rarg1, 0x20); 1045 __ orl(c_rarg3, c_rarg2); 1046 __ andl(c_rarg1, 0x7fffffff); 1047 __ xorl(c_rarg2, c_rarg2); 1048 __ shrl(c_rarg3, 0x1f); 1049 __ orl(c_rarg1, c_rarg3); 1050 __ cmpl(rax, c_rarg1); 1051 __ jcc(Assembler::negative, L); // NaN -> 0 1052 __ testq(c_rarg0, c_rarg0); // signed ? min_jlong : max_jlong 1053 __ mov64(c_rarg2, 0x8000000000000000); 1054 __ mov64(rax, 0x7fffffffffffffff); 1055 __ cmovq(Assembler::positive, c_rarg2, rax); 1056 1057 __ bind(L); 1058 __ movq(inout, c_rarg2); 1059 1060 __ pop(c_rarg0); 1061 __ pop(c_rarg1); 1062 __ pop(c_rarg2); 1063 __ pop(c_rarg3); 1064 __ pop(rax); 1065 1066 __ ret(0); 1067 1068 return start; 1069 } 1070 1071 address generate_fp_mask(const char *stub_name, int64_t mask) { 1072 __ align(CodeEntryAlignment); 1073 StubCodeMark mark(this, "StubRoutines", stub_name); 1074 address start = __ pc(); 1075 1076 __ emit_data64( mask, relocInfo::none ); 1077 __ emit_data64( mask, relocInfo::none ); 1078 1079 return start; 1080 } 1081 1082 // Non-destructive plausibility checks for oops 1083 // 1084 // Arguments: 1085 // all args on stack! 1086 // 1087 // Stack after saving c_rarg3: 1088 // [tos + 0]: saved c_rarg3 1089 // [tos + 1]: saved c_rarg2 1090 // [tos + 2]: saved r12 (several TemplateTable methods use it) 1091 // [tos + 3]: saved flags 1092 // [tos + 4]: return address 1093 // * [tos + 5]: error message (char*) 1094 // * [tos + 6]: object to verify (oop) 1095 // * [tos + 7]: saved rax - saved by caller and bashed 1096 // * [tos + 8]: saved r10 (rscratch1) - saved by caller 1097 // * = popped on exit 1098 address generate_verify_oop() { 1099 StubCodeMark mark(this, "StubRoutines", "verify_oop"); 1100 address start = __ pc(); 1101 1102 Label exit, error; 1103 1104 __ pushf(); 1105 __ incrementl(ExternalAddress((address) StubRoutines::verify_oop_count_addr())); 1106 1107 __ push(r12); 1108 1109 // save c_rarg2 and c_rarg3 1110 __ push(c_rarg2); 1111 __ push(c_rarg3); 1112 1113 enum { 1114 // After previous pushes. 1115 oop_to_verify = 6 * wordSize, 1116 saved_rax = 7 * wordSize, 1117 saved_r10 = 8 * wordSize, 1118 1119 // Before the call to MacroAssembler::debug(), see below. 1120 return_addr = 16 * wordSize, 1121 error_msg = 17 * wordSize 1122 }; 1123 1124 // get object 1125 __ movptr(rax, Address(rsp, oop_to_verify)); 1126 1127 // make sure object is 'reasonable' 1128 __ testptr(rax, rax); 1129 __ jcc(Assembler::zero, exit); // if obj is NULL it is OK 1130 1131 #if INCLUDE_ZGC 1132 if (UseZGC) { 1133 // Check if metadata bits indicate a bad oop 1134 __ testptr(rax, Address(r15_thread, ZThreadLocalData::address_bad_mask_offset())); 1135 __ jcc(Assembler::notZero, error); 1136 } 1137 #endif 1138 1139 // Check if the oop is in the right area of memory 1140 __ movptr(c_rarg2, rax); 1141 __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_mask()); 1142 __ andptr(c_rarg2, c_rarg3); 1143 __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_bits()); 1144 __ cmpptr(c_rarg2, c_rarg3); 1145 __ jcc(Assembler::notZero, error); 1146 1147 // set r12 to heapbase for load_klass() 1148 __ reinit_heapbase(); 1149 1150 // make sure klass is 'reasonable', which is not zero. 1151 __ load_klass(rax, rax); // get klass 1152 __ testptr(rax, rax); 1153 __ jcc(Assembler::zero, error); // if klass is NULL it is broken 1154 1155 // return if everything seems ok 1156 __ bind(exit); 1157 __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back 1158 __ movptr(rscratch1, Address(rsp, saved_r10)); // get saved r10 back 1159 __ pop(c_rarg3); // restore c_rarg3 1160 __ pop(c_rarg2); // restore c_rarg2 1161 __ pop(r12); // restore r12 1162 __ popf(); // restore flags 1163 __ ret(4 * wordSize); // pop caller saved stuff 1164 1165 // handle errors 1166 __ bind(error); 1167 __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back 1168 __ movptr(rscratch1, Address(rsp, saved_r10)); // get saved r10 back 1169 __ pop(c_rarg3); // get saved c_rarg3 back 1170 __ pop(c_rarg2); // get saved c_rarg2 back 1171 __ pop(r12); // get saved r12 back 1172 __ popf(); // get saved flags off stack -- 1173 // will be ignored 1174 1175 __ pusha(); // push registers 1176 // (rip is already 1177 // already pushed) 1178 // debug(char* msg, int64_t pc, int64_t regs[]) 1179 // We've popped the registers we'd saved (c_rarg3, c_rarg2 and flags), and 1180 // pushed all the registers, so now the stack looks like: 1181 // [tos + 0] 16 saved registers 1182 // [tos + 16] return address 1183 // * [tos + 17] error message (char*) 1184 // * [tos + 18] object to verify (oop) 1185 // * [tos + 19] saved rax - saved by caller and bashed 1186 // * [tos + 20] saved r10 (rscratch1) - saved by caller 1187 // * = popped on exit 1188 1189 __ movptr(c_rarg0, Address(rsp, error_msg)); // pass address of error message 1190 __ movptr(c_rarg1, Address(rsp, return_addr)); // pass return address 1191 __ movq(c_rarg2, rsp); // pass address of regs on stack 1192 __ mov(r12, rsp); // remember rsp 1193 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1194 __ andptr(rsp, -16); // align stack as required by ABI 1195 BLOCK_COMMENT("call MacroAssembler::debug"); 1196 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64))); 1197 __ mov(rsp, r12); // restore rsp 1198 __ popa(); // pop registers (includes r12) 1199 __ ret(4 * wordSize); // pop caller saved stuff 1200 1201 return start; 1202 } 1203 1204 // 1205 // Verify that a register contains clean 32-bits positive value 1206 // (high 32-bits are 0) so it could be used in 64-bits shifts. 1207 // 1208 // Input: 1209 // Rint - 32-bits value 1210 // Rtmp - scratch 1211 // 1212 void assert_clean_int(Register Rint, Register Rtmp) { 1213 #ifdef ASSERT 1214 Label L; 1215 assert_different_registers(Rtmp, Rint); 1216 __ movslq(Rtmp, Rint); 1217 __ cmpq(Rtmp, Rint); 1218 __ jcc(Assembler::equal, L); 1219 __ stop("high 32-bits of int value are not 0"); 1220 __ bind(L); 1221 #endif 1222 } 1223 1224 // Generate overlap test for array copy stubs 1225 // 1226 // Input: 1227 // c_rarg0 - from 1228 // c_rarg1 - to 1229 // c_rarg2 - element count 1230 // 1231 // Output: 1232 // rax - &from[element count - 1] 1233 // 1234 void array_overlap_test(address no_overlap_target, Address::ScaleFactor sf) { 1235 assert(no_overlap_target != NULL, "must be generated"); 1236 array_overlap_test(no_overlap_target, NULL, sf); 1237 } 1238 void array_overlap_test(Label& L_no_overlap, Address::ScaleFactor sf) { 1239 array_overlap_test(NULL, &L_no_overlap, sf); 1240 } 1241 void array_overlap_test(address no_overlap_target, Label* NOLp, Address::ScaleFactor sf) { 1242 const Register from = c_rarg0; 1243 const Register to = c_rarg1; 1244 const Register count = c_rarg2; 1245 const Register end_from = rax; 1246 1247 __ cmpptr(to, from); 1248 __ lea(end_from, Address(from, count, sf, 0)); 1249 if (NOLp == NULL) { 1250 ExternalAddress no_overlap(no_overlap_target); 1251 __ jump_cc(Assembler::belowEqual, no_overlap); 1252 __ cmpptr(to, end_from); 1253 __ jump_cc(Assembler::aboveEqual, no_overlap); 1254 } else { 1255 __ jcc(Assembler::belowEqual, (*NOLp)); 1256 __ cmpptr(to, end_from); 1257 __ jcc(Assembler::aboveEqual, (*NOLp)); 1258 } 1259 } 1260 1261 // Shuffle first three arg regs on Windows into Linux/Solaris locations. 1262 // 1263 // Outputs: 1264 // rdi - rcx 1265 // rsi - rdx 1266 // rdx - r8 1267 // rcx - r9 1268 // 1269 // Registers r9 and r10 are used to save rdi and rsi on Windows, which latter 1270 // are non-volatile. r9 and r10 should not be used by the caller. 1271 // 1272 void setup_arg_regs(int nargs = 3) { 1273 const Register saved_rdi = r9; 1274 const Register saved_rsi = r10; 1275 assert(nargs == 3 || nargs == 4, "else fix"); 1276 #ifdef _WIN64 1277 assert(c_rarg0 == rcx && c_rarg1 == rdx && c_rarg2 == r8 && c_rarg3 == r9, 1278 "unexpected argument registers"); 1279 if (nargs >= 4) 1280 __ mov(rax, r9); // r9 is also saved_rdi 1281 __ movptr(saved_rdi, rdi); 1282 __ movptr(saved_rsi, rsi); 1283 __ mov(rdi, rcx); // c_rarg0 1284 __ mov(rsi, rdx); // c_rarg1 1285 __ mov(rdx, r8); // c_rarg2 1286 if (nargs >= 4) 1287 __ mov(rcx, rax); // c_rarg3 (via rax) 1288 #else 1289 assert(c_rarg0 == rdi && c_rarg1 == rsi && c_rarg2 == rdx && c_rarg3 == rcx, 1290 "unexpected argument registers"); 1291 #endif 1292 } 1293 1294 void restore_arg_regs() { 1295 const Register saved_rdi = r9; 1296 const Register saved_rsi = r10; 1297 #ifdef _WIN64 1298 __ movptr(rdi, saved_rdi); 1299 __ movptr(rsi, saved_rsi); 1300 #endif 1301 } 1302 1303 1304 // Copy big chunks forward 1305 // 1306 // Inputs: 1307 // end_from - source arrays end address 1308 // end_to - destination array end address 1309 // qword_count - 64-bits element count, negative 1310 // to - scratch 1311 // L_copy_bytes - entry label 1312 // L_copy_8_bytes - exit label 1313 // 1314 void copy_bytes_forward(Register end_from, Register end_to, 1315 Register qword_count, Register to, 1316 Label& L_copy_bytes, Label& L_copy_8_bytes) { 1317 DEBUG_ONLY(__ stop("enter at entry label, not here")); 1318 Label L_loop; 1319 __ align(OptoLoopAlignment); 1320 if (UseUnalignedLoadStores) { 1321 Label L_end; 1322 if (UseAVX > 2) { 1323 __ movl(to, 0xffff); 1324 __ kmovwl(k1, to); 1325 } 1326 // Copy 64-bytes per iteration 1327 __ BIND(L_loop); 1328 if (UseAVX > 2) { 1329 __ evmovdqul(xmm0, Address(end_from, qword_count, Address::times_8, -56), Assembler::AVX_512bit); 1330 __ evmovdqul(Address(end_to, qword_count, Address::times_8, -56), xmm0, Assembler::AVX_512bit); 1331 } else if (UseAVX == 2) { 1332 __ vmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56)); 1333 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0); 1334 __ vmovdqu(xmm1, Address(end_from, qword_count, Address::times_8, -24)); 1335 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -24), xmm1); 1336 } else { 1337 __ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56)); 1338 __ movdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0); 1339 __ movdqu(xmm1, Address(end_from, qword_count, Address::times_8, -40)); 1340 __ movdqu(Address(end_to, qword_count, Address::times_8, -40), xmm1); 1341 __ movdqu(xmm2, Address(end_from, qword_count, Address::times_8, -24)); 1342 __ movdqu(Address(end_to, qword_count, Address::times_8, -24), xmm2); 1343 __ movdqu(xmm3, Address(end_from, qword_count, Address::times_8, - 8)); 1344 __ movdqu(Address(end_to, qword_count, Address::times_8, - 8), xmm3); 1345 } 1346 __ BIND(L_copy_bytes); 1347 __ addptr(qword_count, 8); 1348 __ jcc(Assembler::lessEqual, L_loop); 1349 __ subptr(qword_count, 4); // sub(8) and add(4) 1350 __ jccb(Assembler::greater, L_end); 1351 // Copy trailing 32 bytes 1352 if (UseAVX >= 2) { 1353 __ vmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24)); 1354 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -24), xmm0); 1355 } else { 1356 __ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24)); 1357 __ movdqu(Address(end_to, qword_count, Address::times_8, -24), xmm0); 1358 __ movdqu(xmm1, Address(end_from, qword_count, Address::times_8, - 8)); 1359 __ movdqu(Address(end_to, qword_count, Address::times_8, - 8), xmm1); 1360 } 1361 __ addptr(qword_count, 4); 1362 __ BIND(L_end); 1363 if (UseAVX >= 2) { 1364 // clean upper bits of YMM registers 1365 __ vpxor(xmm0, xmm0); 1366 __ vpxor(xmm1, xmm1); 1367 } 1368 } else { 1369 // Copy 32-bytes per iteration 1370 __ BIND(L_loop); 1371 __ movq(to, Address(end_from, qword_count, Address::times_8, -24)); 1372 __ movq(Address(end_to, qword_count, Address::times_8, -24), to); 1373 __ movq(to, Address(end_from, qword_count, Address::times_8, -16)); 1374 __ movq(Address(end_to, qword_count, Address::times_8, -16), to); 1375 __ movq(to, Address(end_from, qword_count, Address::times_8, - 8)); 1376 __ movq(Address(end_to, qword_count, Address::times_8, - 8), to); 1377 __ movq(to, Address(end_from, qword_count, Address::times_8, - 0)); 1378 __ movq(Address(end_to, qword_count, Address::times_8, - 0), to); 1379 1380 __ BIND(L_copy_bytes); 1381 __ addptr(qword_count, 4); 1382 __ jcc(Assembler::lessEqual, L_loop); 1383 } 1384 __ subptr(qword_count, 4); 1385 __ jcc(Assembler::less, L_copy_8_bytes); // Copy trailing qwords 1386 } 1387 1388 // Copy big chunks backward 1389 // 1390 // Inputs: 1391 // from - source arrays address 1392 // dest - destination array address 1393 // qword_count - 64-bits element count 1394 // to - scratch 1395 // L_copy_bytes - entry label 1396 // L_copy_8_bytes - exit label 1397 // 1398 void copy_bytes_backward(Register from, Register dest, 1399 Register qword_count, Register to, 1400 Label& L_copy_bytes, Label& L_copy_8_bytes) { 1401 DEBUG_ONLY(__ stop("enter at entry label, not here")); 1402 Label L_loop; 1403 __ align(OptoLoopAlignment); 1404 if (UseUnalignedLoadStores) { 1405 Label L_end; 1406 if (UseAVX > 2) { 1407 __ movl(to, 0xffff); 1408 __ kmovwl(k1, to); 1409 } 1410 // Copy 64-bytes per iteration 1411 __ BIND(L_loop); 1412 if (UseAVX > 2) { 1413 __ evmovdqul(xmm0, Address(from, qword_count, Address::times_8, 0), Assembler::AVX_512bit); 1414 __ evmovdqul(Address(dest, qword_count, Address::times_8, 0), xmm0, Assembler::AVX_512bit); 1415 } else if (UseAVX == 2) { 1416 __ vmovdqu(xmm0, Address(from, qword_count, Address::times_8, 32)); 1417 __ vmovdqu(Address(dest, qword_count, Address::times_8, 32), xmm0); 1418 __ vmovdqu(xmm1, Address(from, qword_count, Address::times_8, 0)); 1419 __ vmovdqu(Address(dest, qword_count, Address::times_8, 0), xmm1); 1420 } else { 1421 __ movdqu(xmm0, Address(from, qword_count, Address::times_8, 48)); 1422 __ movdqu(Address(dest, qword_count, Address::times_8, 48), xmm0); 1423 __ movdqu(xmm1, Address(from, qword_count, Address::times_8, 32)); 1424 __ movdqu(Address(dest, qword_count, Address::times_8, 32), xmm1); 1425 __ movdqu(xmm2, Address(from, qword_count, Address::times_8, 16)); 1426 __ movdqu(Address(dest, qword_count, Address::times_8, 16), xmm2); 1427 __ movdqu(xmm3, Address(from, qword_count, Address::times_8, 0)); 1428 __ movdqu(Address(dest, qword_count, Address::times_8, 0), xmm3); 1429 } 1430 __ BIND(L_copy_bytes); 1431 __ subptr(qword_count, 8); 1432 __ jcc(Assembler::greaterEqual, L_loop); 1433 1434 __ addptr(qword_count, 4); // add(8) and sub(4) 1435 __ jccb(Assembler::less, L_end); 1436 // Copy trailing 32 bytes 1437 if (UseAVX >= 2) { 1438 __ vmovdqu(xmm0, Address(from, qword_count, Address::times_8, 0)); 1439 __ vmovdqu(Address(dest, qword_count, Address::times_8, 0), xmm0); 1440 } else { 1441 __ movdqu(xmm0, Address(from, qword_count, Address::times_8, 16)); 1442 __ movdqu(Address(dest, qword_count, Address::times_8, 16), xmm0); 1443 __ movdqu(xmm1, Address(from, qword_count, Address::times_8, 0)); 1444 __ movdqu(Address(dest, qword_count, Address::times_8, 0), xmm1); 1445 } 1446 __ subptr(qword_count, 4); 1447 __ BIND(L_end); 1448 if (UseAVX >= 2) { 1449 // clean upper bits of YMM registers 1450 __ vpxor(xmm0, xmm0); 1451 __ vpxor(xmm1, xmm1); 1452 } 1453 } else { 1454 // Copy 32-bytes per iteration 1455 __ BIND(L_loop); 1456 __ movq(to, Address(from, qword_count, Address::times_8, 24)); 1457 __ movq(Address(dest, qword_count, Address::times_8, 24), to); 1458 __ movq(to, Address(from, qword_count, Address::times_8, 16)); 1459 __ movq(Address(dest, qword_count, Address::times_8, 16), to); 1460 __ movq(to, Address(from, qword_count, Address::times_8, 8)); 1461 __ movq(Address(dest, qword_count, Address::times_8, 8), to); 1462 __ movq(to, Address(from, qword_count, Address::times_8, 0)); 1463 __ movq(Address(dest, qword_count, Address::times_8, 0), to); 1464 1465 __ BIND(L_copy_bytes); 1466 __ subptr(qword_count, 4); 1467 __ jcc(Assembler::greaterEqual, L_loop); 1468 } 1469 __ addptr(qword_count, 4); 1470 __ jcc(Assembler::greater, L_copy_8_bytes); // Copy trailing qwords 1471 } 1472 1473 1474 // Arguments: 1475 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1476 // ignored 1477 // name - stub name string 1478 // 1479 // Inputs: 1480 // c_rarg0 - source array address 1481 // c_rarg1 - destination array address 1482 // c_rarg2 - element count, treated as ssize_t, can be zero 1483 // 1484 // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries, 1485 // we let the hardware handle it. The one to eight bytes within words, 1486 // dwords or qwords that span cache line boundaries will still be loaded 1487 // and stored atomically. 1488 // 1489 // Side Effects: 1490 // disjoint_byte_copy_entry is set to the no-overlap entry point 1491 // used by generate_conjoint_byte_copy(). 1492 // 1493 address generate_disjoint_byte_copy(bool aligned, address* entry, const char *name) { 1494 __ align(CodeEntryAlignment); 1495 StubCodeMark mark(this, "StubRoutines", name); 1496 address start = __ pc(); 1497 1498 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes; 1499 Label L_copy_byte, L_exit; 1500 const Register from = rdi; // source array address 1501 const Register to = rsi; // destination array address 1502 const Register count = rdx; // elements count 1503 const Register byte_count = rcx; 1504 const Register qword_count = count; 1505 const Register end_from = from; // source array end address 1506 const Register end_to = to; // destination array end address 1507 // End pointers are inclusive, and if count is not zero they point 1508 // to the last unit copied: end_to[0] := end_from[0] 1509 1510 __ enter(); // required for proper stackwalking of RuntimeStub frame 1511 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1512 1513 if (entry != NULL) { 1514 *entry = __ pc(); 1515 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1516 BLOCK_COMMENT("Entry:"); 1517 } 1518 1519 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1520 // r9 and r10 may be used to save non-volatile registers 1521 1522 // 'from', 'to' and 'count' are now valid 1523 __ movptr(byte_count, count); 1524 __ shrptr(count, 3); // count => qword_count 1525 1526 // Copy from low to high addresses. Use 'to' as scratch. 1527 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1528 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1529 __ negptr(qword_count); // make the count negative 1530 __ jmp(L_copy_bytes); 1531 1532 // Copy trailing qwords 1533 __ BIND(L_copy_8_bytes); 1534 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1535 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1536 __ increment(qword_count); 1537 __ jcc(Assembler::notZero, L_copy_8_bytes); 1538 1539 // Check for and copy trailing dword 1540 __ BIND(L_copy_4_bytes); 1541 __ testl(byte_count, 4); 1542 __ jccb(Assembler::zero, L_copy_2_bytes); 1543 __ movl(rax, Address(end_from, 8)); 1544 __ movl(Address(end_to, 8), rax); 1545 1546 __ addptr(end_from, 4); 1547 __ addptr(end_to, 4); 1548 1549 // Check for and copy trailing word 1550 __ BIND(L_copy_2_bytes); 1551 __ testl(byte_count, 2); 1552 __ jccb(Assembler::zero, L_copy_byte); 1553 __ movw(rax, Address(end_from, 8)); 1554 __ movw(Address(end_to, 8), rax); 1555 1556 __ addptr(end_from, 2); 1557 __ addptr(end_to, 2); 1558 1559 // Check for and copy trailing byte 1560 __ BIND(L_copy_byte); 1561 __ testl(byte_count, 1); 1562 __ jccb(Assembler::zero, L_exit); 1563 __ movb(rax, Address(end_from, 8)); 1564 __ movb(Address(end_to, 8), rax); 1565 1566 __ BIND(L_exit); 1567 restore_arg_regs(); 1568 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1569 __ xorptr(rax, rax); // return 0 1570 __ vzeroupper(); 1571 __ leave(); // required for proper stackwalking of RuntimeStub frame 1572 __ ret(0); 1573 1574 // Copy in multi-bytes chunks 1575 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1576 __ jmp(L_copy_4_bytes); 1577 1578 return start; 1579 } 1580 1581 // Arguments: 1582 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1583 // ignored 1584 // name - stub name string 1585 // 1586 // Inputs: 1587 // c_rarg0 - source array address 1588 // c_rarg1 - destination array address 1589 // c_rarg2 - element count, treated as ssize_t, can be zero 1590 // 1591 // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries, 1592 // we let the hardware handle it. The one to eight bytes within words, 1593 // dwords or qwords that span cache line boundaries will still be loaded 1594 // and stored atomically. 1595 // 1596 address generate_conjoint_byte_copy(bool aligned, address nooverlap_target, 1597 address* entry, const char *name) { 1598 __ align(CodeEntryAlignment); 1599 StubCodeMark mark(this, "StubRoutines", name); 1600 address start = __ pc(); 1601 1602 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes; 1603 const Register from = rdi; // source array address 1604 const Register to = rsi; // destination array address 1605 const Register count = rdx; // elements count 1606 const Register byte_count = rcx; 1607 const Register qword_count = count; 1608 1609 __ enter(); // required for proper stackwalking of RuntimeStub frame 1610 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1611 1612 if (entry != NULL) { 1613 *entry = __ pc(); 1614 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1615 BLOCK_COMMENT("Entry:"); 1616 } 1617 1618 array_overlap_test(nooverlap_target, Address::times_1); 1619 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1620 // r9 and r10 may be used to save non-volatile registers 1621 1622 // 'from', 'to' and 'count' are now valid 1623 __ movptr(byte_count, count); 1624 __ shrptr(count, 3); // count => qword_count 1625 1626 // Copy from high to low addresses. 1627 1628 // Check for and copy trailing byte 1629 __ testl(byte_count, 1); 1630 __ jcc(Assembler::zero, L_copy_2_bytes); 1631 __ movb(rax, Address(from, byte_count, Address::times_1, -1)); 1632 __ movb(Address(to, byte_count, Address::times_1, -1), rax); 1633 __ decrement(byte_count); // Adjust for possible trailing word 1634 1635 // Check for and copy trailing word 1636 __ BIND(L_copy_2_bytes); 1637 __ testl(byte_count, 2); 1638 __ jcc(Assembler::zero, L_copy_4_bytes); 1639 __ movw(rax, Address(from, byte_count, Address::times_1, -2)); 1640 __ movw(Address(to, byte_count, Address::times_1, -2), rax); 1641 1642 // Check for and copy trailing dword 1643 __ BIND(L_copy_4_bytes); 1644 __ testl(byte_count, 4); 1645 __ jcc(Assembler::zero, L_copy_bytes); 1646 __ movl(rax, Address(from, qword_count, Address::times_8)); 1647 __ movl(Address(to, qword_count, Address::times_8), rax); 1648 __ jmp(L_copy_bytes); 1649 1650 // Copy trailing qwords 1651 __ BIND(L_copy_8_bytes); 1652 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 1653 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 1654 __ decrement(qword_count); 1655 __ jcc(Assembler::notZero, L_copy_8_bytes); 1656 1657 restore_arg_regs(); 1658 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1659 __ xorptr(rax, rax); // return 0 1660 __ vzeroupper(); 1661 __ leave(); // required for proper stackwalking of RuntimeStub frame 1662 __ ret(0); 1663 1664 // Copy in multi-bytes chunks 1665 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1666 1667 restore_arg_regs(); 1668 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1669 __ xorptr(rax, rax); // return 0 1670 __ vzeroupper(); 1671 __ leave(); // required for proper stackwalking of RuntimeStub frame 1672 __ ret(0); 1673 1674 return start; 1675 } 1676 1677 // Arguments: 1678 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1679 // ignored 1680 // name - stub name string 1681 // 1682 // Inputs: 1683 // c_rarg0 - source array address 1684 // c_rarg1 - destination array address 1685 // c_rarg2 - element count, treated as ssize_t, can be zero 1686 // 1687 // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we 1688 // let the hardware handle it. The two or four words within dwords 1689 // or qwords that span cache line boundaries will still be loaded 1690 // and stored atomically. 1691 // 1692 // Side Effects: 1693 // disjoint_short_copy_entry is set to the no-overlap entry point 1694 // used by generate_conjoint_short_copy(). 1695 // 1696 address generate_disjoint_short_copy(bool aligned, address *entry, const char *name) { 1697 __ align(CodeEntryAlignment); 1698 StubCodeMark mark(this, "StubRoutines", name); 1699 address start = __ pc(); 1700 1701 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes,L_copy_2_bytes,L_exit; 1702 const Register from = rdi; // source array address 1703 const Register to = rsi; // destination array address 1704 const Register count = rdx; // elements count 1705 const Register word_count = rcx; 1706 const Register qword_count = count; 1707 const Register end_from = from; // source array end address 1708 const Register end_to = to; // destination array end address 1709 // End pointers are inclusive, and if count is not zero they point 1710 // to the last unit copied: end_to[0] := end_from[0] 1711 1712 __ enter(); // required for proper stackwalking of RuntimeStub frame 1713 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1714 1715 if (entry != NULL) { 1716 *entry = __ pc(); 1717 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1718 BLOCK_COMMENT("Entry:"); 1719 } 1720 1721 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1722 // r9 and r10 may be used to save non-volatile registers 1723 1724 // 'from', 'to' and 'count' are now valid 1725 __ movptr(word_count, count); 1726 __ shrptr(count, 2); // count => qword_count 1727 1728 // Copy from low to high addresses. Use 'to' as scratch. 1729 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1730 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1731 __ negptr(qword_count); 1732 __ jmp(L_copy_bytes); 1733 1734 // Copy trailing qwords 1735 __ BIND(L_copy_8_bytes); 1736 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1737 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1738 __ increment(qword_count); 1739 __ jcc(Assembler::notZero, L_copy_8_bytes); 1740 1741 // Original 'dest' is trashed, so we can't use it as a 1742 // base register for a possible trailing word copy 1743 1744 // Check for and copy trailing dword 1745 __ BIND(L_copy_4_bytes); 1746 __ testl(word_count, 2); 1747 __ jccb(Assembler::zero, L_copy_2_bytes); 1748 __ movl(rax, Address(end_from, 8)); 1749 __ movl(Address(end_to, 8), rax); 1750 1751 __ addptr(end_from, 4); 1752 __ addptr(end_to, 4); 1753 1754 // Check for and copy trailing word 1755 __ BIND(L_copy_2_bytes); 1756 __ testl(word_count, 1); 1757 __ jccb(Assembler::zero, L_exit); 1758 __ movw(rax, Address(end_from, 8)); 1759 __ movw(Address(end_to, 8), rax); 1760 1761 __ BIND(L_exit); 1762 restore_arg_regs(); 1763 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1764 __ xorptr(rax, rax); // return 0 1765 __ vzeroupper(); 1766 __ leave(); // required for proper stackwalking of RuntimeStub frame 1767 __ ret(0); 1768 1769 // Copy in multi-bytes chunks 1770 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1771 __ jmp(L_copy_4_bytes); 1772 1773 return start; 1774 } 1775 1776 address generate_fill(BasicType t, bool aligned, const char *name) { 1777 __ align(CodeEntryAlignment); 1778 StubCodeMark mark(this, "StubRoutines", name); 1779 address start = __ pc(); 1780 1781 BLOCK_COMMENT("Entry:"); 1782 1783 const Register to = c_rarg0; // source array address 1784 const Register value = c_rarg1; // value 1785 const Register count = c_rarg2; // elements count 1786 1787 __ enter(); // required for proper stackwalking of RuntimeStub frame 1788 1789 __ generate_fill(t, aligned, to, value, count, rax, xmm0); 1790 1791 __ vzeroupper(); 1792 __ leave(); // required for proper stackwalking of RuntimeStub frame 1793 __ ret(0); 1794 return start; 1795 } 1796 1797 // Arguments: 1798 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1799 // ignored 1800 // name - stub name string 1801 // 1802 // Inputs: 1803 // c_rarg0 - source array address 1804 // c_rarg1 - destination array address 1805 // c_rarg2 - element count, treated as ssize_t, can be zero 1806 // 1807 // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we 1808 // let the hardware handle it. The two or four words within dwords 1809 // or qwords that span cache line boundaries will still be loaded 1810 // and stored atomically. 1811 // 1812 address generate_conjoint_short_copy(bool aligned, address nooverlap_target, 1813 address *entry, const char *name) { 1814 __ align(CodeEntryAlignment); 1815 StubCodeMark mark(this, "StubRoutines", name); 1816 address start = __ pc(); 1817 1818 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes; 1819 const Register from = rdi; // source array address 1820 const Register to = rsi; // destination array address 1821 const Register count = rdx; // elements count 1822 const Register word_count = rcx; 1823 const Register qword_count = count; 1824 1825 __ enter(); // required for proper stackwalking of RuntimeStub frame 1826 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1827 1828 if (entry != NULL) { 1829 *entry = __ pc(); 1830 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1831 BLOCK_COMMENT("Entry:"); 1832 } 1833 1834 array_overlap_test(nooverlap_target, Address::times_2); 1835 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1836 // r9 and r10 may be used to save non-volatile registers 1837 1838 // 'from', 'to' and 'count' are now valid 1839 __ movptr(word_count, count); 1840 __ shrptr(count, 2); // count => qword_count 1841 1842 // Copy from high to low addresses. Use 'to' as scratch. 1843 1844 // Check for and copy trailing word 1845 __ testl(word_count, 1); 1846 __ jccb(Assembler::zero, L_copy_4_bytes); 1847 __ movw(rax, Address(from, word_count, Address::times_2, -2)); 1848 __ movw(Address(to, word_count, Address::times_2, -2), rax); 1849 1850 // Check for and copy trailing dword 1851 __ BIND(L_copy_4_bytes); 1852 __ testl(word_count, 2); 1853 __ jcc(Assembler::zero, L_copy_bytes); 1854 __ movl(rax, Address(from, qword_count, Address::times_8)); 1855 __ movl(Address(to, qword_count, Address::times_8), rax); 1856 __ jmp(L_copy_bytes); 1857 1858 // Copy trailing qwords 1859 __ BIND(L_copy_8_bytes); 1860 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 1861 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 1862 __ decrement(qword_count); 1863 __ jcc(Assembler::notZero, L_copy_8_bytes); 1864 1865 restore_arg_regs(); 1866 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1867 __ xorptr(rax, rax); // return 0 1868 __ vzeroupper(); 1869 __ leave(); // required for proper stackwalking of RuntimeStub frame 1870 __ ret(0); 1871 1872 // Copy in multi-bytes chunks 1873 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1874 1875 restore_arg_regs(); 1876 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1877 __ xorptr(rax, rax); // return 0 1878 __ vzeroupper(); 1879 __ leave(); // required for proper stackwalking of RuntimeStub frame 1880 __ ret(0); 1881 1882 return start; 1883 } 1884 1885 // Arguments: 1886 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1887 // ignored 1888 // is_oop - true => oop array, so generate store check code 1889 // name - stub name string 1890 // 1891 // Inputs: 1892 // c_rarg0 - source array address 1893 // c_rarg1 - destination array address 1894 // c_rarg2 - element count, treated as ssize_t, can be zero 1895 // 1896 // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let 1897 // the hardware handle it. The two dwords within qwords that span 1898 // cache line boundaries will still be loaded and stored atomicly. 1899 // 1900 // Side Effects: 1901 // disjoint_int_copy_entry is set to the no-overlap entry point 1902 // used by generate_conjoint_int_oop_copy(). 1903 // 1904 address generate_disjoint_int_oop_copy(bool aligned, bool is_oop, address* entry, 1905 const char *name, bool dest_uninitialized = false) { 1906 __ align(CodeEntryAlignment); 1907 StubCodeMark mark(this, "StubRoutines", name); 1908 address start = __ pc(); 1909 1910 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_exit; 1911 const Register from = rdi; // source array address 1912 const Register to = rsi; // destination array address 1913 const Register count = rdx; // elements count 1914 const Register dword_count = rcx; 1915 const Register qword_count = count; 1916 const Register end_from = from; // source array end address 1917 const Register end_to = to; // destination array end address 1918 // End pointers are inclusive, and if count is not zero they point 1919 // to the last unit copied: end_to[0] := end_from[0] 1920 1921 __ enter(); // required for proper stackwalking of RuntimeStub frame 1922 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1923 1924 if (entry != NULL) { 1925 *entry = __ pc(); 1926 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1927 BLOCK_COMMENT("Entry:"); 1928 } 1929 1930 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1931 // r9 and r10 may be used to save non-volatile registers 1932 1933 DecoratorSet decorators = IN_HEAP | IN_HEAP_ARRAY | ARRAYCOPY_DISJOINT; 1934 if (dest_uninitialized) { 1935 decorators |= AS_DEST_NOT_INITIALIZED; 1936 } 1937 if (aligned) { 1938 decorators |= ARRAYCOPY_ALIGNED; 1939 } 1940 1941 BasicType type = is_oop ? T_OBJECT : T_INT; 1942 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 1943 bs->arraycopy_prologue(_masm, decorators, type, from, to, count); 1944 1945 // 'from', 'to' and 'count' are now valid 1946 __ movptr(dword_count, count); 1947 __ shrptr(count, 1); // count => qword_count 1948 1949 // Copy from low to high addresses. Use 'to' as scratch. 1950 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1951 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1952 __ negptr(qword_count); 1953 __ jmp(L_copy_bytes); 1954 1955 // Copy trailing qwords 1956 __ BIND(L_copy_8_bytes); 1957 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1958 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1959 __ increment(qword_count); 1960 __ jcc(Assembler::notZero, L_copy_8_bytes); 1961 1962 // Check for and copy trailing dword 1963 __ BIND(L_copy_4_bytes); 1964 __ testl(dword_count, 1); // Only byte test since the value is 0 or 1 1965 __ jccb(Assembler::zero, L_exit); 1966 __ movl(rax, Address(end_from, 8)); 1967 __ movl(Address(end_to, 8), rax); 1968 1969 __ BIND(L_exit); 1970 bs->arraycopy_epilogue(_masm, decorators, type, from, to, dword_count); 1971 restore_arg_regs(); 1972 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 1973 __ vzeroupper(); 1974 __ xorptr(rax, rax); // return 0 1975 __ leave(); // required for proper stackwalking of RuntimeStub frame 1976 __ ret(0); 1977 1978 // Copy in multi-bytes chunks 1979 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1980 __ jmp(L_copy_4_bytes); 1981 1982 return start; 1983 } 1984 1985 // Arguments: 1986 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1987 // ignored 1988 // is_oop - true => oop array, so generate store check code 1989 // name - stub name string 1990 // 1991 // Inputs: 1992 // c_rarg0 - source array address 1993 // c_rarg1 - destination array address 1994 // c_rarg2 - element count, treated as ssize_t, can be zero 1995 // 1996 // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let 1997 // the hardware handle it. The two dwords within qwords that span 1998 // cache line boundaries will still be loaded and stored atomicly. 1999 // 2000 address generate_conjoint_int_oop_copy(bool aligned, bool is_oop, address nooverlap_target, 2001 address *entry, const char *name, 2002 bool dest_uninitialized = false) { 2003 __ align(CodeEntryAlignment); 2004 StubCodeMark mark(this, "StubRoutines", name); 2005 address start = __ pc(); 2006 2007 Label L_copy_bytes, L_copy_8_bytes, L_copy_2_bytes, L_exit; 2008 const Register from = rdi; // source array address 2009 const Register to = rsi; // destination array address 2010 const Register count = rdx; // elements count 2011 const Register dword_count = rcx; 2012 const Register qword_count = count; 2013 2014 __ enter(); // required for proper stackwalking of RuntimeStub frame 2015 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 2016 2017 if (entry != NULL) { 2018 *entry = __ pc(); 2019 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2020 BLOCK_COMMENT("Entry:"); 2021 } 2022 2023 array_overlap_test(nooverlap_target, Address::times_4); 2024 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 2025 // r9 and r10 may be used to save non-volatile registers 2026 2027 DecoratorSet decorators = IN_HEAP | IN_HEAP_ARRAY; 2028 if (dest_uninitialized) { 2029 decorators |= AS_DEST_NOT_INITIALIZED; 2030 } 2031 if (aligned) { 2032 decorators |= ARRAYCOPY_ALIGNED; 2033 } 2034 2035 BasicType type = is_oop ? T_OBJECT : T_INT; 2036 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 2037 // no registers are destroyed by this call 2038 bs->arraycopy_prologue(_masm, decorators, type, from, to, count); 2039 2040 assert_clean_int(count, rax); // Make sure 'count' is clean int. 2041 // 'from', 'to' and 'count' are now valid 2042 __ movptr(dword_count, count); 2043 __ shrptr(count, 1); // count => qword_count 2044 2045 // Copy from high to low addresses. Use 'to' as scratch. 2046 2047 // Check for and copy trailing dword 2048 __ testl(dword_count, 1); 2049 __ jcc(Assembler::zero, L_copy_bytes); 2050 __ movl(rax, Address(from, dword_count, Address::times_4, -4)); 2051 __ movl(Address(to, dword_count, Address::times_4, -4), rax); 2052 __ jmp(L_copy_bytes); 2053 2054 // Copy trailing qwords 2055 __ BIND(L_copy_8_bytes); 2056 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 2057 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 2058 __ decrement(qword_count); 2059 __ jcc(Assembler::notZero, L_copy_8_bytes); 2060 2061 if (is_oop) { 2062 __ jmp(L_exit); 2063 } 2064 restore_arg_regs(); 2065 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 2066 __ xorptr(rax, rax); // return 0 2067 __ vzeroupper(); 2068 __ leave(); // required for proper stackwalking of RuntimeStub frame 2069 __ ret(0); 2070 2071 // Copy in multi-bytes chunks 2072 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2073 2074 __ BIND(L_exit); 2075 bs->arraycopy_epilogue(_masm, decorators, type, from, to, dword_count); 2076 restore_arg_regs(); 2077 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 2078 __ xorptr(rax, rax); // return 0 2079 __ vzeroupper(); 2080 __ leave(); // required for proper stackwalking of RuntimeStub frame 2081 __ ret(0); 2082 2083 return start; 2084 } 2085 2086 // Arguments: 2087 // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes 2088 // ignored 2089 // is_oop - true => oop array, so generate store check code 2090 // name - stub name string 2091 // 2092 // Inputs: 2093 // c_rarg0 - source array address 2094 // c_rarg1 - destination array address 2095 // c_rarg2 - element count, treated as ssize_t, can be zero 2096 // 2097 // Side Effects: 2098 // disjoint_oop_copy_entry or disjoint_long_copy_entry is set to the 2099 // no-overlap entry point used by generate_conjoint_long_oop_copy(). 2100 // 2101 address generate_disjoint_long_oop_copy(bool aligned, bool is_oop, address *entry, 2102 const char *name, bool dest_uninitialized = false) { 2103 __ align(CodeEntryAlignment); 2104 StubCodeMark mark(this, "StubRoutines", name); 2105 address start = __ pc(); 2106 2107 Label L_copy_bytes, L_copy_8_bytes, L_exit; 2108 const Register from = rdi; // source array address 2109 const Register to = rsi; // destination array address 2110 const Register qword_count = rdx; // elements count 2111 const Register end_from = from; // source array end address 2112 const Register end_to = rcx; // destination array end address 2113 const Register saved_count = r11; 2114 // End pointers are inclusive, and if count is not zero they point 2115 // to the last unit copied: end_to[0] := end_from[0] 2116 2117 __ enter(); // required for proper stackwalking of RuntimeStub frame 2118 // Save no-overlap entry point for generate_conjoint_long_oop_copy() 2119 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 2120 2121 if (entry != NULL) { 2122 *entry = __ pc(); 2123 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2124 BLOCK_COMMENT("Entry:"); 2125 } 2126 2127 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 2128 // r9 and r10 may be used to save non-volatile registers 2129 // 'from', 'to' and 'qword_count' are now valid 2130 2131 DecoratorSet decorators = IN_HEAP | IN_HEAP_ARRAY | ARRAYCOPY_DISJOINT; 2132 if (dest_uninitialized) { 2133 decorators |= AS_DEST_NOT_INITIALIZED; 2134 } 2135 if (aligned) { 2136 decorators |= ARRAYCOPY_ALIGNED; 2137 } 2138 2139 BasicType type = is_oop ? T_OBJECT : T_LONG; 2140 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 2141 bs->arraycopy_prologue(_masm, decorators, type, from, to, qword_count); 2142 2143 // Copy from low to high addresses. Use 'to' as scratch. 2144 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 2145 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 2146 __ negptr(qword_count); 2147 __ jmp(L_copy_bytes); 2148 2149 // Copy trailing qwords 2150 __ BIND(L_copy_8_bytes); 2151 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 2152 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 2153 __ increment(qword_count); 2154 __ jcc(Assembler::notZero, L_copy_8_bytes); 2155 2156 if (is_oop) { 2157 __ jmp(L_exit); 2158 } else { 2159 restore_arg_regs(); 2160 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2161 __ xorptr(rax, rax); // return 0 2162 __ vzeroupper(); 2163 __ leave(); // required for proper stackwalking of RuntimeStub frame 2164 __ ret(0); 2165 } 2166 2167 // Copy in multi-bytes chunks 2168 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2169 2170 __ BIND(L_exit); 2171 bs->arraycopy_epilogue(_masm, decorators, type, from, to, qword_count); 2172 restore_arg_regs(); 2173 if (is_oop) { 2174 inc_counter_np(SharedRuntime::_oop_array_copy_ctr); // Update counter after rscratch1 is free 2175 } else { 2176 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2177 } 2178 __ vzeroupper(); 2179 __ xorptr(rax, rax); // return 0 2180 __ leave(); // required for proper stackwalking of RuntimeStub frame 2181 __ ret(0); 2182 2183 return start; 2184 } 2185 2186 // Arguments: 2187 // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes 2188 // ignored 2189 // is_oop - true => oop array, so generate store check code 2190 // name - stub name string 2191 // 2192 // Inputs: 2193 // c_rarg0 - source array address 2194 // c_rarg1 - destination array address 2195 // c_rarg2 - element count, treated as ssize_t, can be zero 2196 // 2197 address generate_conjoint_long_oop_copy(bool aligned, bool is_oop, 2198 address nooverlap_target, address *entry, 2199 const char *name, bool dest_uninitialized = false) { 2200 __ align(CodeEntryAlignment); 2201 StubCodeMark mark(this, "StubRoutines", name); 2202 address start = __ pc(); 2203 2204 Label L_copy_bytes, L_copy_8_bytes, L_exit; 2205 const Register from = rdi; // source array address 2206 const Register to = rsi; // destination array address 2207 const Register qword_count = rdx; // elements count 2208 const Register saved_count = rcx; 2209 2210 __ enter(); // required for proper stackwalking of RuntimeStub frame 2211 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 2212 2213 if (entry != NULL) { 2214 *entry = __ pc(); 2215 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2216 BLOCK_COMMENT("Entry:"); 2217 } 2218 2219 array_overlap_test(nooverlap_target, Address::times_8); 2220 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 2221 // r9 and r10 may be used to save non-volatile registers 2222 // 'from', 'to' and 'qword_count' are now valid 2223 2224 DecoratorSet decorators = IN_HEAP | IN_HEAP_ARRAY | ARRAYCOPY_DISJOINT; 2225 if (dest_uninitialized) { 2226 decorators |= AS_DEST_NOT_INITIALIZED; 2227 } 2228 if (aligned) { 2229 decorators |= ARRAYCOPY_ALIGNED; 2230 } 2231 2232 BasicType type = is_oop ? T_OBJECT : T_LONG; 2233 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 2234 bs->arraycopy_prologue(_masm, decorators, type, from, to, qword_count); 2235 2236 __ jmp(L_copy_bytes); 2237 2238 // Copy trailing qwords 2239 __ BIND(L_copy_8_bytes); 2240 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 2241 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 2242 __ decrement(qword_count); 2243 __ jcc(Assembler::notZero, L_copy_8_bytes); 2244 2245 if (is_oop) { 2246 __ jmp(L_exit); 2247 } else { 2248 restore_arg_regs(); 2249 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2250 __ xorptr(rax, rax); // return 0 2251 __ vzeroupper(); 2252 __ leave(); // required for proper stackwalking of RuntimeStub frame 2253 __ ret(0); 2254 } 2255 2256 // Copy in multi-bytes chunks 2257 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2258 2259 __ BIND(L_exit); 2260 bs->arraycopy_epilogue(_masm, decorators, type, from, to, qword_count); 2261 restore_arg_regs(); 2262 if (is_oop) { 2263 inc_counter_np(SharedRuntime::_oop_array_copy_ctr); // Update counter after rscratch1 is free 2264 } else { 2265 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2266 } 2267 __ vzeroupper(); 2268 __ xorptr(rax, rax); // return 0 2269 __ leave(); // required for proper stackwalking of RuntimeStub frame 2270 __ ret(0); 2271 2272 return start; 2273 } 2274 2275 2276 // Helper for generating a dynamic type check. 2277 // Smashes no registers. 2278 void generate_type_check(Register sub_klass, 2279 Register super_check_offset, 2280 Register super_klass, 2281 Label& L_success) { 2282 assert_different_registers(sub_klass, super_check_offset, super_klass); 2283 2284 BLOCK_COMMENT("type_check:"); 2285 2286 Label L_miss; 2287 2288 __ check_klass_subtype_fast_path(sub_klass, super_klass, noreg, &L_success, &L_miss, NULL, 2289 super_check_offset); 2290 __ check_klass_subtype_slow_path(sub_klass, super_klass, noreg, noreg, &L_success, NULL); 2291 2292 // Fall through on failure! 2293 __ BIND(L_miss); 2294 } 2295 2296 // 2297 // Generate checkcasting array copy stub 2298 // 2299 // Input: 2300 // c_rarg0 - source array address 2301 // c_rarg1 - destination array address 2302 // c_rarg2 - element count, treated as ssize_t, can be zero 2303 // c_rarg3 - size_t ckoff (super_check_offset) 2304 // not Win64 2305 // c_rarg4 - oop ckval (super_klass) 2306 // Win64 2307 // rsp+40 - oop ckval (super_klass) 2308 // 2309 // Output: 2310 // rax == 0 - success 2311 // rax == -1^K - failure, where K is partial transfer count 2312 // 2313 address generate_checkcast_copy(const char *name, address *entry, 2314 bool dest_uninitialized = false) { 2315 2316 Label L_load_element, L_store_element, L_do_card_marks, L_done; 2317 2318 // Input registers (after setup_arg_regs) 2319 const Register from = rdi; // source array address 2320 const Register to = rsi; // destination array address 2321 const Register length = rdx; // elements count 2322 const Register ckoff = rcx; // super_check_offset 2323 const Register ckval = r8; // super_klass 2324 2325 // Registers used as temps (r13, r14 are save-on-entry) 2326 const Register end_from = from; // source array end address 2327 const Register end_to = r13; // destination array end address 2328 const Register count = rdx; // -(count_remaining) 2329 const Register r14_length = r14; // saved copy of length 2330 // End pointers are inclusive, and if length is not zero they point 2331 // to the last unit copied: end_to[0] := end_from[0] 2332 2333 const Register rax_oop = rax; // actual oop copied 2334 const Register r11_klass = r11; // oop._klass 2335 2336 //--------------------------------------------------------------- 2337 // Assembler stub will be used for this call to arraycopy 2338 // if the two arrays are subtypes of Object[] but the 2339 // destination array type is not equal to or a supertype 2340 // of the source type. Each element must be separately 2341 // checked. 2342 2343 __ align(CodeEntryAlignment); 2344 StubCodeMark mark(this, "StubRoutines", name); 2345 address start = __ pc(); 2346 2347 __ enter(); // required for proper stackwalking of RuntimeStub frame 2348 2349 #ifdef ASSERT 2350 // caller guarantees that the arrays really are different 2351 // otherwise, we would have to make conjoint checks 2352 { Label L; 2353 array_overlap_test(L, TIMES_OOP); 2354 __ stop("checkcast_copy within a single array"); 2355 __ bind(L); 2356 } 2357 #endif //ASSERT 2358 2359 setup_arg_regs(4); // from => rdi, to => rsi, length => rdx 2360 // ckoff => rcx, ckval => r8 2361 // r9 and r10 may be used to save non-volatile registers 2362 #ifdef _WIN64 2363 // last argument (#4) is on stack on Win64 2364 __ movptr(ckval, Address(rsp, 6 * wordSize)); 2365 #endif 2366 2367 // Caller of this entry point must set up the argument registers. 2368 if (entry != NULL) { 2369 *entry = __ pc(); 2370 BLOCK_COMMENT("Entry:"); 2371 } 2372 2373 // allocate spill slots for r13, r14 2374 enum { 2375 saved_r13_offset, 2376 saved_r14_offset, 2377 saved_rbp_offset 2378 }; 2379 __ subptr(rsp, saved_rbp_offset * wordSize); 2380 __ movptr(Address(rsp, saved_r13_offset * wordSize), r13); 2381 __ movptr(Address(rsp, saved_r14_offset * wordSize), r14); 2382 2383 // check that int operands are properly extended to size_t 2384 assert_clean_int(length, rax); 2385 assert_clean_int(ckoff, rax); 2386 2387 #ifdef ASSERT 2388 BLOCK_COMMENT("assert consistent ckoff/ckval"); 2389 // The ckoff and ckval must be mutually consistent, 2390 // even though caller generates both. 2391 { Label L; 2392 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2393 __ cmpl(ckoff, Address(ckval, sco_offset)); 2394 __ jcc(Assembler::equal, L); 2395 __ stop("super_check_offset inconsistent"); 2396 __ bind(L); 2397 } 2398 #endif //ASSERT 2399 2400 // Loop-invariant addresses. They are exclusive end pointers. 2401 Address end_from_addr(from, length, TIMES_OOP, 0); 2402 Address end_to_addr(to, length, TIMES_OOP, 0); 2403 // Loop-variant addresses. They assume post-incremented count < 0. 2404 Address from_element_addr(end_from, count, TIMES_OOP, 0); 2405 Address to_element_addr(end_to, count, TIMES_OOP, 0); 2406 2407 DecoratorSet decorators = IN_HEAP | IN_HEAP_ARRAY | ARRAYCOPY_CHECKCAST; 2408 if (dest_uninitialized) { 2409 decorators |= AS_DEST_NOT_INITIALIZED; 2410 } 2411 2412 BasicType type = T_OBJECT; 2413 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 2414 bs->arraycopy_prologue(_masm, decorators, type, from, to, count); 2415 2416 // Copy from low to high addresses, indexed from the end of each array. 2417 __ lea(end_from, end_from_addr); 2418 __ lea(end_to, end_to_addr); 2419 __ movptr(r14_length, length); // save a copy of the length 2420 assert(length == count, ""); // else fix next line: 2421 __ negptr(count); // negate and test the length 2422 __ jcc(Assembler::notZero, L_load_element); 2423 2424 // Empty array: Nothing to do. 2425 __ xorptr(rax, rax); // return 0 on (trivial) success 2426 __ jmp(L_done); 2427 2428 // ======== begin loop ======== 2429 // (Loop is rotated; its entry is L_load_element.) 2430 // Loop control: 2431 // for (count = -count; count != 0; count++) 2432 // Base pointers src, dst are biased by 8*(count-1),to last element. 2433 __ align(OptoLoopAlignment); 2434 2435 __ BIND(L_store_element); 2436 __ store_heap_oop(to_element_addr, rax_oop, noreg, noreg, AS_RAW); // store the oop 2437 __ increment(count); // increment the count toward zero 2438 __ jcc(Assembler::zero, L_do_card_marks); 2439 2440 // ======== loop entry is here ======== 2441 __ BIND(L_load_element); 2442 __ load_heap_oop(rax_oop, from_element_addr, noreg, noreg, AS_RAW); // load the oop 2443 __ testptr(rax_oop, rax_oop); 2444 __ jcc(Assembler::zero, L_store_element); 2445 2446 __ load_klass(r11_klass, rax_oop);// query the object klass 2447 generate_type_check(r11_klass, ckoff, ckval, L_store_element); 2448 // ======== end loop ======== 2449 2450 // It was a real error; we must depend on the caller to finish the job. 2451 // Register rdx = -1 * number of *remaining* oops, r14 = *total* oops. 2452 // Emit GC store barriers for the oops we have copied (r14 + rdx), 2453 // and report their number to the caller. 2454 assert_different_registers(rax, r14_length, count, to, end_to, rcx, rscratch1); 2455 Label L_post_barrier; 2456 __ addptr(r14_length, count); // K = (original - remaining) oops 2457 __ movptr(rax, r14_length); // save the value 2458 __ notptr(rax); // report (-1^K) to caller (does not affect flags) 2459 __ jccb(Assembler::notZero, L_post_barrier); 2460 __ jmp(L_done); // K == 0, nothing was copied, skip post barrier 2461 2462 // Come here on success only. 2463 __ BIND(L_do_card_marks); 2464 __ xorptr(rax, rax); // return 0 on success 2465 2466 __ BIND(L_post_barrier); 2467 bs->arraycopy_epilogue(_masm, decorators, type, from, to, r14_length); 2468 2469 // Common exit point (success or failure). 2470 __ BIND(L_done); 2471 __ movptr(r13, Address(rsp, saved_r13_offset * wordSize)); 2472 __ movptr(r14, Address(rsp, saved_r14_offset * wordSize)); 2473 restore_arg_regs(); 2474 inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr); // Update counter after rscratch1 is free 2475 __ leave(); // required for proper stackwalking of RuntimeStub frame 2476 __ ret(0); 2477 2478 return start; 2479 } 2480 2481 // 2482 // Generate 'unsafe' array copy stub 2483 // Though just as safe as the other stubs, it takes an unscaled 2484 // size_t argument instead of an element count. 2485 // 2486 // Input: 2487 // c_rarg0 - source array address 2488 // c_rarg1 - destination array address 2489 // c_rarg2 - byte count, treated as ssize_t, can be zero 2490 // 2491 // Examines the alignment of the operands and dispatches 2492 // to a long, int, short, or byte copy loop. 2493 // 2494 address generate_unsafe_copy(const char *name, 2495 address byte_copy_entry, address short_copy_entry, 2496 address int_copy_entry, address long_copy_entry) { 2497 2498 Label L_long_aligned, L_int_aligned, L_short_aligned; 2499 2500 // Input registers (before setup_arg_regs) 2501 const Register from = c_rarg0; // source array address 2502 const Register to = c_rarg1; // destination array address 2503 const Register size = c_rarg2; // byte count (size_t) 2504 2505 // Register used as a temp 2506 const Register bits = rax; // test copy of low bits 2507 2508 __ align(CodeEntryAlignment); 2509 StubCodeMark mark(this, "StubRoutines", name); 2510 address start = __ pc(); 2511 2512 __ enter(); // required for proper stackwalking of RuntimeStub frame 2513 2514 // bump this on entry, not on exit: 2515 inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr); 2516 2517 __ mov(bits, from); 2518 __ orptr(bits, to); 2519 __ orptr(bits, size); 2520 2521 __ testb(bits, BytesPerLong-1); 2522 __ jccb(Assembler::zero, L_long_aligned); 2523 2524 __ testb(bits, BytesPerInt-1); 2525 __ jccb(Assembler::zero, L_int_aligned); 2526 2527 __ testb(bits, BytesPerShort-1); 2528 __ jump_cc(Assembler::notZero, RuntimeAddress(byte_copy_entry)); 2529 2530 __ BIND(L_short_aligned); 2531 __ shrptr(size, LogBytesPerShort); // size => short_count 2532 __ jump(RuntimeAddress(short_copy_entry)); 2533 2534 __ BIND(L_int_aligned); 2535 __ shrptr(size, LogBytesPerInt); // size => int_count 2536 __ jump(RuntimeAddress(int_copy_entry)); 2537 2538 __ BIND(L_long_aligned); 2539 __ shrptr(size, LogBytesPerLong); // size => qword_count 2540 __ jump(RuntimeAddress(long_copy_entry)); 2541 2542 return start; 2543 } 2544 2545 // Perform range checks on the proposed arraycopy. 2546 // Kills temp, but nothing else. 2547 // Also, clean the sign bits of src_pos and dst_pos. 2548 void arraycopy_range_checks(Register src, // source array oop (c_rarg0) 2549 Register src_pos, // source position (c_rarg1) 2550 Register dst, // destination array oo (c_rarg2) 2551 Register dst_pos, // destination position (c_rarg3) 2552 Register length, 2553 Register temp, 2554 Label& L_failed) { 2555 BLOCK_COMMENT("arraycopy_range_checks:"); 2556 2557 // if (src_pos + length > arrayOop(src)->length()) FAIL; 2558 __ movl(temp, length); 2559 __ addl(temp, src_pos); // src_pos + length 2560 __ cmpl(temp, Address(src, arrayOopDesc::length_offset_in_bytes())); 2561 __ jcc(Assembler::above, L_failed); 2562 2563 // if (dst_pos + length > arrayOop(dst)->length()) FAIL; 2564 __ movl(temp, length); 2565 __ addl(temp, dst_pos); // dst_pos + length 2566 __ cmpl(temp, Address(dst, arrayOopDesc::length_offset_in_bytes())); 2567 __ jcc(Assembler::above, L_failed); 2568 2569 // Have to clean up high 32-bits of 'src_pos' and 'dst_pos'. 2570 // Move with sign extension can be used since they are positive. 2571 __ movslq(src_pos, src_pos); 2572 __ movslq(dst_pos, dst_pos); 2573 2574 BLOCK_COMMENT("arraycopy_range_checks done"); 2575 } 2576 2577 // 2578 // Generate generic array copy stubs 2579 // 2580 // Input: 2581 // c_rarg0 - src oop 2582 // c_rarg1 - src_pos (32-bits) 2583 // c_rarg2 - dst oop 2584 // c_rarg3 - dst_pos (32-bits) 2585 // not Win64 2586 // c_rarg4 - element count (32-bits) 2587 // Win64 2588 // rsp+40 - element count (32-bits) 2589 // 2590 // Output: 2591 // rax == 0 - success 2592 // rax == -1^K - failure, where K is partial transfer count 2593 // 2594 address generate_generic_copy(const char *name, 2595 address byte_copy_entry, address short_copy_entry, 2596 address int_copy_entry, address oop_copy_entry, 2597 address long_copy_entry, address checkcast_copy_entry) { 2598 2599 Label L_failed, L_failed_0, L_objArray; 2600 Label L_copy_bytes, L_copy_shorts, L_copy_ints, L_copy_longs; 2601 2602 // Input registers 2603 const Register src = c_rarg0; // source array oop 2604 const Register src_pos = c_rarg1; // source position 2605 const Register dst = c_rarg2; // destination array oop 2606 const Register dst_pos = c_rarg3; // destination position 2607 #ifndef _WIN64 2608 const Register length = c_rarg4; 2609 #else 2610 const Address length(rsp, 6 * wordSize); // elements count is on stack on Win64 2611 #endif 2612 2613 { int modulus = CodeEntryAlignment; 2614 int target = modulus - 5; // 5 = sizeof jmp(L_failed) 2615 int advance = target - (__ offset() % modulus); 2616 if (advance < 0) advance += modulus; 2617 if (advance > 0) __ nop(advance); 2618 } 2619 StubCodeMark mark(this, "StubRoutines", name); 2620 2621 // Short-hop target to L_failed. Makes for denser prologue code. 2622 __ BIND(L_failed_0); 2623 __ jmp(L_failed); 2624 assert(__ offset() % CodeEntryAlignment == 0, "no further alignment needed"); 2625 2626 __ align(CodeEntryAlignment); 2627 address start = __ pc(); 2628 2629 __ enter(); // required for proper stackwalking of RuntimeStub frame 2630 2631 // bump this on entry, not on exit: 2632 inc_counter_np(SharedRuntime::_generic_array_copy_ctr); 2633 2634 //----------------------------------------------------------------------- 2635 // Assembler stub will be used for this call to arraycopy 2636 // if the following conditions are met: 2637 // 2638 // (1) src and dst must not be null. 2639 // (2) src_pos must not be negative. 2640 // (3) dst_pos must not be negative. 2641 // (4) length must not be negative. 2642 // (5) src klass and dst klass should be the same and not NULL. 2643 // (6) src and dst should be arrays. 2644 // (7) src_pos + length must not exceed length of src. 2645 // (8) dst_pos + length must not exceed length of dst. 2646 // 2647 2648 // if (src == NULL) return -1; 2649 __ testptr(src, src); // src oop 2650 size_t j1off = __ offset(); 2651 __ jccb(Assembler::zero, L_failed_0); 2652 2653 // if (src_pos < 0) return -1; 2654 __ testl(src_pos, src_pos); // src_pos (32-bits) 2655 __ jccb(Assembler::negative, L_failed_0); 2656 2657 // if (dst == NULL) return -1; 2658 __ testptr(dst, dst); // dst oop 2659 __ jccb(Assembler::zero, L_failed_0); 2660 2661 // if (dst_pos < 0) return -1; 2662 __ testl(dst_pos, dst_pos); // dst_pos (32-bits) 2663 size_t j4off = __ offset(); 2664 __ jccb(Assembler::negative, L_failed_0); 2665 2666 // The first four tests are very dense code, 2667 // but not quite dense enough to put four 2668 // jumps in a 16-byte instruction fetch buffer. 2669 // That's good, because some branch predicters 2670 // do not like jumps so close together. 2671 // Make sure of this. 2672 guarantee(((j1off ^ j4off) & ~15) != 0, "I$ line of 1st & 4th jumps"); 2673 2674 // registers used as temp 2675 const Register r11_length = r11; // elements count to copy 2676 const Register r10_src_klass = r10; // array klass 2677 2678 // if (length < 0) return -1; 2679 __ movl(r11_length, length); // length (elements count, 32-bits value) 2680 __ testl(r11_length, r11_length); 2681 __ jccb(Assembler::negative, L_failed_0); 2682 2683 __ load_klass(r10_src_klass, src); 2684 #ifdef ASSERT 2685 // assert(src->klass() != NULL); 2686 { 2687 BLOCK_COMMENT("assert klasses not null {"); 2688 Label L1, L2; 2689 __ testptr(r10_src_klass, r10_src_klass); 2690 __ jcc(Assembler::notZero, L2); // it is broken if klass is NULL 2691 __ bind(L1); 2692 __ stop("broken null klass"); 2693 __ bind(L2); 2694 __ load_klass(rax, dst); 2695 __ cmpq(rax, 0); 2696 __ jcc(Assembler::equal, L1); // this would be broken also 2697 BLOCK_COMMENT("} assert klasses not null done"); 2698 } 2699 #endif 2700 2701 // Load layout helper (32-bits) 2702 // 2703 // |array_tag| | header_size | element_type | |log2_element_size| 2704 // 32 30 24 16 8 2 0 2705 // 2706 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0 2707 // 2708 2709 const int lh_offset = in_bytes(Klass::layout_helper_offset()); 2710 2711 // Handle objArrays completely differently... 2712 const jint objArray_lh = Klass::array_layout_helper(T_OBJECT); 2713 __ cmpl(Address(r10_src_klass, lh_offset), objArray_lh); 2714 __ jcc(Assembler::equal, L_objArray); 2715 2716 // if (src->klass() != dst->klass()) return -1; 2717 __ load_klass(rax, dst); 2718 __ cmpq(r10_src_klass, rax); 2719 __ jcc(Assembler::notEqual, L_failed); 2720 2721 const Register rax_lh = rax; // layout helper 2722 __ movl(rax_lh, Address(r10_src_klass, lh_offset)); 2723 2724 // if (!src->is_Array()) return -1; 2725 __ cmpl(rax_lh, Klass::_lh_neutral_value); 2726 __ jcc(Assembler::greaterEqual, L_failed); 2727 2728 // At this point, it is known to be a typeArray (array_tag 0x3). 2729 #ifdef ASSERT 2730 { 2731 BLOCK_COMMENT("assert primitive array {"); 2732 Label L; 2733 __ cmpl(rax_lh, (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift)); 2734 __ jcc(Assembler::greaterEqual, L); 2735 __ stop("must be a primitive array"); 2736 __ bind(L); 2737 BLOCK_COMMENT("} assert primitive array done"); 2738 } 2739 #endif 2740 2741 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2742 r10, L_failed); 2743 2744 // TypeArrayKlass 2745 // 2746 // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize); 2747 // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize); 2748 // 2749 2750 const Register r10_offset = r10; // array offset 2751 const Register rax_elsize = rax_lh; // element size 2752 2753 __ movl(r10_offset, rax_lh); 2754 __ shrl(r10_offset, Klass::_lh_header_size_shift); 2755 __ andptr(r10_offset, Klass::_lh_header_size_mask); // array_offset 2756 __ addptr(src, r10_offset); // src array offset 2757 __ addptr(dst, r10_offset); // dst array offset 2758 BLOCK_COMMENT("choose copy loop based on element size"); 2759 __ andl(rax_lh, Klass::_lh_log2_element_size_mask); // rax_lh -> rax_elsize 2760 2761 // next registers should be set before the jump to corresponding stub 2762 const Register from = c_rarg0; // source array address 2763 const Register to = c_rarg1; // destination array address 2764 const Register count = c_rarg2; // elements count 2765 2766 // 'from', 'to', 'count' registers should be set in such order 2767 // since they are the same as 'src', 'src_pos', 'dst'. 2768 2769 __ BIND(L_copy_bytes); 2770 __ cmpl(rax_elsize, 0); 2771 __ jccb(Assembler::notEqual, L_copy_shorts); 2772 __ lea(from, Address(src, src_pos, Address::times_1, 0));// src_addr 2773 __ lea(to, Address(dst, dst_pos, Address::times_1, 0));// dst_addr 2774 __ movl2ptr(count, r11_length); // length 2775 __ jump(RuntimeAddress(byte_copy_entry)); 2776 2777 __ BIND(L_copy_shorts); 2778 __ cmpl(rax_elsize, LogBytesPerShort); 2779 __ jccb(Assembler::notEqual, L_copy_ints); 2780 __ lea(from, Address(src, src_pos, Address::times_2, 0));// src_addr 2781 __ lea(to, Address(dst, dst_pos, Address::times_2, 0));// dst_addr 2782 __ movl2ptr(count, r11_length); // length 2783 __ jump(RuntimeAddress(short_copy_entry)); 2784 2785 __ BIND(L_copy_ints); 2786 __ cmpl(rax_elsize, LogBytesPerInt); 2787 __ jccb(Assembler::notEqual, L_copy_longs); 2788 __ lea(from, Address(src, src_pos, Address::times_4, 0));// src_addr 2789 __ lea(to, Address(dst, dst_pos, Address::times_4, 0));// dst_addr 2790 __ movl2ptr(count, r11_length); // length 2791 __ jump(RuntimeAddress(int_copy_entry)); 2792 2793 __ BIND(L_copy_longs); 2794 #ifdef ASSERT 2795 { 2796 BLOCK_COMMENT("assert long copy {"); 2797 Label L; 2798 __ cmpl(rax_elsize, LogBytesPerLong); 2799 __ jcc(Assembler::equal, L); 2800 __ stop("must be long copy, but elsize is wrong"); 2801 __ bind(L); 2802 BLOCK_COMMENT("} assert long copy done"); 2803 } 2804 #endif 2805 __ lea(from, Address(src, src_pos, Address::times_8, 0));// src_addr 2806 __ lea(to, Address(dst, dst_pos, Address::times_8, 0));// dst_addr 2807 __ movl2ptr(count, r11_length); // length 2808 __ jump(RuntimeAddress(long_copy_entry)); 2809 2810 // ObjArrayKlass 2811 __ BIND(L_objArray); 2812 // live at this point: r10_src_klass, r11_length, src[_pos], dst[_pos] 2813 2814 Label L_plain_copy, L_checkcast_copy; 2815 // test array classes for subtyping 2816 __ load_klass(rax, dst); 2817 __ cmpq(r10_src_klass, rax); // usual case is exact equality 2818 __ jcc(Assembler::notEqual, L_checkcast_copy); 2819 2820 // Identically typed arrays can be copied without element-wise checks. 2821 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2822 r10, L_failed); 2823 2824 __ lea(from, Address(src, src_pos, TIMES_OOP, 2825 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // src_addr 2826 __ lea(to, Address(dst, dst_pos, TIMES_OOP, 2827 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // dst_addr 2828 __ movl2ptr(count, r11_length); // length 2829 __ BIND(L_plain_copy); 2830 __ jump(RuntimeAddress(oop_copy_entry)); 2831 2832 __ BIND(L_checkcast_copy); 2833 // live at this point: r10_src_klass, r11_length, rax (dst_klass) 2834 { 2835 // Before looking at dst.length, make sure dst is also an objArray. 2836 __ cmpl(Address(rax, lh_offset), objArray_lh); 2837 __ jcc(Assembler::notEqual, L_failed); 2838 2839 // It is safe to examine both src.length and dst.length. 2840 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2841 rax, L_failed); 2842 2843 const Register r11_dst_klass = r11; 2844 __ load_klass(r11_dst_klass, dst); // reload 2845 2846 // Marshal the base address arguments now, freeing registers. 2847 __ lea(from, Address(src, src_pos, TIMES_OOP, 2848 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 2849 __ lea(to, Address(dst, dst_pos, TIMES_OOP, 2850 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 2851 __ movl(count, length); // length (reloaded) 2852 Register sco_temp = c_rarg3; // this register is free now 2853 assert_different_registers(from, to, count, sco_temp, 2854 r11_dst_klass, r10_src_klass); 2855 assert_clean_int(count, sco_temp); 2856 2857 // Generate the type check. 2858 const int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2859 __ movl(sco_temp, Address(r11_dst_klass, sco_offset)); 2860 assert_clean_int(sco_temp, rax); 2861 generate_type_check(r10_src_klass, sco_temp, r11_dst_klass, L_plain_copy); 2862 2863 // Fetch destination element klass from the ObjArrayKlass header. 2864 int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset()); 2865 __ movptr(r11_dst_klass, Address(r11_dst_klass, ek_offset)); 2866 __ movl( sco_temp, Address(r11_dst_klass, sco_offset)); 2867 assert_clean_int(sco_temp, rax); 2868 2869 // the checkcast_copy loop needs two extra arguments: 2870 assert(c_rarg3 == sco_temp, "#3 already in place"); 2871 // Set up arguments for checkcast_copy_entry. 2872 setup_arg_regs(4); 2873 __ movptr(r8, r11_dst_klass); // dst.klass.element_klass, r8 is c_rarg4 on Linux/Solaris 2874 __ jump(RuntimeAddress(checkcast_copy_entry)); 2875 } 2876 2877 __ BIND(L_failed); 2878 __ xorptr(rax, rax); 2879 __ notptr(rax); // return -1 2880 __ leave(); // required for proper stackwalking of RuntimeStub frame 2881 __ ret(0); 2882 2883 return start; 2884 } 2885 2886 void generate_arraycopy_stubs() { 2887 address entry; 2888 address entry_jbyte_arraycopy; 2889 address entry_jshort_arraycopy; 2890 address entry_jint_arraycopy; 2891 address entry_oop_arraycopy; 2892 address entry_jlong_arraycopy; 2893 address entry_checkcast_arraycopy; 2894 2895 StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, &entry, 2896 "jbyte_disjoint_arraycopy"); 2897 StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, entry, &entry_jbyte_arraycopy, 2898 "jbyte_arraycopy"); 2899 2900 StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, &entry, 2901 "jshort_disjoint_arraycopy"); 2902 StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, entry, &entry_jshort_arraycopy, 2903 "jshort_arraycopy"); 2904 2905 StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, false, &entry, 2906 "jint_disjoint_arraycopy"); 2907 StubRoutines::_jint_arraycopy = generate_conjoint_int_oop_copy(false, false, entry, 2908 &entry_jint_arraycopy, "jint_arraycopy"); 2909 2910 StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, false, &entry, 2911 "jlong_disjoint_arraycopy"); 2912 StubRoutines::_jlong_arraycopy = generate_conjoint_long_oop_copy(false, false, entry, 2913 &entry_jlong_arraycopy, "jlong_arraycopy"); 2914 2915 2916 if (UseCompressedOops) { 2917 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, true, &entry, 2918 "oop_disjoint_arraycopy"); 2919 StubRoutines::_oop_arraycopy = generate_conjoint_int_oop_copy(false, true, entry, 2920 &entry_oop_arraycopy, "oop_arraycopy"); 2921 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_int_oop_copy(false, true, &entry, 2922 "oop_disjoint_arraycopy_uninit", 2923 /*dest_uninitialized*/true); 2924 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_int_oop_copy(false, true, entry, 2925 NULL, "oop_arraycopy_uninit", 2926 /*dest_uninitialized*/true); 2927 } else { 2928 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, true, &entry, 2929 "oop_disjoint_arraycopy"); 2930 StubRoutines::_oop_arraycopy = generate_conjoint_long_oop_copy(false, true, entry, 2931 &entry_oop_arraycopy, "oop_arraycopy"); 2932 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_long_oop_copy(false, true, &entry, 2933 "oop_disjoint_arraycopy_uninit", 2934 /*dest_uninitialized*/true); 2935 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_long_oop_copy(false, true, entry, 2936 NULL, "oop_arraycopy_uninit", 2937 /*dest_uninitialized*/true); 2938 } 2939 2940 StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy); 2941 StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", NULL, 2942 /*dest_uninitialized*/true); 2943 2944 StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy", 2945 entry_jbyte_arraycopy, 2946 entry_jshort_arraycopy, 2947 entry_jint_arraycopy, 2948 entry_jlong_arraycopy); 2949 StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy", 2950 entry_jbyte_arraycopy, 2951 entry_jshort_arraycopy, 2952 entry_jint_arraycopy, 2953 entry_oop_arraycopy, 2954 entry_jlong_arraycopy, 2955 entry_checkcast_arraycopy); 2956 2957 StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill"); 2958 StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill"); 2959 StubRoutines::_jint_fill = generate_fill(T_INT, false, "jint_fill"); 2960 StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill"); 2961 StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill"); 2962 StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill"); 2963 2964 // We don't generate specialized code for HeapWord-aligned source 2965 // arrays, so just use the code we've already generated 2966 StubRoutines::_arrayof_jbyte_disjoint_arraycopy = StubRoutines::_jbyte_disjoint_arraycopy; 2967 StubRoutines::_arrayof_jbyte_arraycopy = StubRoutines::_jbyte_arraycopy; 2968 2969 StubRoutines::_arrayof_jshort_disjoint_arraycopy = StubRoutines::_jshort_disjoint_arraycopy; 2970 StubRoutines::_arrayof_jshort_arraycopy = StubRoutines::_jshort_arraycopy; 2971 2972 StubRoutines::_arrayof_jint_disjoint_arraycopy = StubRoutines::_jint_disjoint_arraycopy; 2973 StubRoutines::_arrayof_jint_arraycopy = StubRoutines::_jint_arraycopy; 2974 2975 StubRoutines::_arrayof_jlong_disjoint_arraycopy = StubRoutines::_jlong_disjoint_arraycopy; 2976 StubRoutines::_arrayof_jlong_arraycopy = StubRoutines::_jlong_arraycopy; 2977 2978 StubRoutines::_arrayof_oop_disjoint_arraycopy = StubRoutines::_oop_disjoint_arraycopy; 2979 StubRoutines::_arrayof_oop_arraycopy = StubRoutines::_oop_arraycopy; 2980 2981 StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = StubRoutines::_oop_disjoint_arraycopy_uninit; 2982 StubRoutines::_arrayof_oop_arraycopy_uninit = StubRoutines::_oop_arraycopy_uninit; 2983 } 2984 2985 // AES intrinsic stubs 2986 enum {AESBlockSize = 16}; 2987 2988 address generate_key_shuffle_mask() { 2989 __ align(16); 2990 StubCodeMark mark(this, "StubRoutines", "key_shuffle_mask"); 2991 address start = __ pc(); 2992 __ emit_data64( 0x0405060700010203, relocInfo::none ); 2993 __ emit_data64( 0x0c0d0e0f08090a0b, relocInfo::none ); 2994 return start; 2995 } 2996 2997 address generate_counter_shuffle_mask() { 2998 __ align(16); 2999 StubCodeMark mark(this, "StubRoutines", "counter_shuffle_mask"); 3000 address start = __ pc(); 3001 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none); 3002 __ emit_data64(0x0001020304050607, relocInfo::none); 3003 return start; 3004 } 3005 3006 // Utility routine for loading a 128-bit key word in little endian format 3007 // can optionally specify that the shuffle mask is already in an xmmregister 3008 void load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask=NULL) { 3009 __ movdqu(xmmdst, Address(key, offset)); 3010 if (xmm_shuf_mask != NULL) { 3011 __ pshufb(xmmdst, xmm_shuf_mask); 3012 } else { 3013 __ pshufb(xmmdst, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3014 } 3015 } 3016 3017 // Utility routine for increase 128bit counter (iv in CTR mode) 3018 void inc_counter(Register reg, XMMRegister xmmdst, int inc_delta, Label& next_block) { 3019 __ pextrq(reg, xmmdst, 0x0); 3020 __ addq(reg, inc_delta); 3021 __ pinsrq(xmmdst, reg, 0x0); 3022 __ jcc(Assembler::carryClear, next_block); // jump if no carry 3023 __ pextrq(reg, xmmdst, 0x01); // Carry 3024 __ addq(reg, 0x01); 3025 __ pinsrq(xmmdst, reg, 0x01); //Carry end 3026 __ BIND(next_block); // next instruction 3027 } 3028 3029 // Arguments: 3030 // 3031 // Inputs: 3032 // c_rarg0 - source byte array address 3033 // c_rarg1 - destination byte array address 3034 // c_rarg2 - K (key) in little endian int array 3035 // 3036 address generate_aescrypt_encryptBlock() { 3037 assert(UseAES, "need AES instructions and misaligned SSE support"); 3038 __ align(CodeEntryAlignment); 3039 StubCodeMark mark(this, "StubRoutines", "aescrypt_encryptBlock"); 3040 Label L_doLast; 3041 address start = __ pc(); 3042 3043 const Register from = c_rarg0; // source array address 3044 const Register to = c_rarg1; // destination array address 3045 const Register key = c_rarg2; // key array address 3046 const Register keylen = rax; 3047 3048 const XMMRegister xmm_result = xmm0; 3049 const XMMRegister xmm_key_shuf_mask = xmm1; 3050 // On win64 xmm6-xmm15 must be preserved so don't use them. 3051 const XMMRegister xmm_temp1 = xmm2; 3052 const XMMRegister xmm_temp2 = xmm3; 3053 const XMMRegister xmm_temp3 = xmm4; 3054 const XMMRegister xmm_temp4 = xmm5; 3055 3056 __ enter(); // required for proper stackwalking of RuntimeStub frame 3057 3058 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 3059 // context for the registers used, where all instructions below are using 128-bit mode 3060 // On EVEX without VL and BW, these instructions will all be AVX. 3061 if (VM_Version::supports_avx512vlbw()) { 3062 __ movl(rax, 0xffff); 3063 __ kmovql(k1, rax); 3064 } 3065 3066 // keylen could be only {11, 13, 15} * 4 = {44, 52, 60} 3067 __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3068 3069 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3070 __ movdqu(xmm_result, Address(from, 0)); // get 16 bytes of input 3071 3072 // For encryption, the java expanded key ordering is just what we need 3073 // we don't know if the key is aligned, hence not using load-execute form 3074 3075 load_key(xmm_temp1, key, 0x00, xmm_key_shuf_mask); 3076 __ pxor(xmm_result, xmm_temp1); 3077 3078 load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask); 3079 load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask); 3080 load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask); 3081 load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask); 3082 3083 __ aesenc(xmm_result, xmm_temp1); 3084 __ aesenc(xmm_result, xmm_temp2); 3085 __ aesenc(xmm_result, xmm_temp3); 3086 __ aesenc(xmm_result, xmm_temp4); 3087 3088 load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask); 3089 load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask); 3090 load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask); 3091 load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask); 3092 3093 __ aesenc(xmm_result, xmm_temp1); 3094 __ aesenc(xmm_result, xmm_temp2); 3095 __ aesenc(xmm_result, xmm_temp3); 3096 __ aesenc(xmm_result, xmm_temp4); 3097 3098 load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask); 3099 load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask); 3100 3101 __ cmpl(keylen, 44); 3102 __ jccb(Assembler::equal, L_doLast); 3103 3104 __ aesenc(xmm_result, xmm_temp1); 3105 __ aesenc(xmm_result, xmm_temp2); 3106 3107 load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask); 3108 load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask); 3109 3110 __ cmpl(keylen, 52); 3111 __ jccb(Assembler::equal, L_doLast); 3112 3113 __ aesenc(xmm_result, xmm_temp1); 3114 __ aesenc(xmm_result, xmm_temp2); 3115 3116 load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask); 3117 load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask); 3118 3119 __ BIND(L_doLast); 3120 __ aesenc(xmm_result, xmm_temp1); 3121 __ aesenclast(xmm_result, xmm_temp2); 3122 __ movdqu(Address(to, 0), xmm_result); // store the result 3123 __ xorptr(rax, rax); // return 0 3124 __ leave(); // required for proper stackwalking of RuntimeStub frame 3125 __ ret(0); 3126 3127 return start; 3128 } 3129 3130 3131 // Arguments: 3132 // 3133 // Inputs: 3134 // c_rarg0 - source byte array address 3135 // c_rarg1 - destination byte array address 3136 // c_rarg2 - K (key) in little endian int array 3137 // 3138 address generate_aescrypt_decryptBlock() { 3139 assert(UseAES, "need AES instructions and misaligned SSE support"); 3140 __ align(CodeEntryAlignment); 3141 StubCodeMark mark(this, "StubRoutines", "aescrypt_decryptBlock"); 3142 Label L_doLast; 3143 address start = __ pc(); 3144 3145 const Register from = c_rarg0; // source array address 3146 const Register to = c_rarg1; // destination array address 3147 const Register key = c_rarg2; // key array address 3148 const Register keylen = rax; 3149 3150 const XMMRegister xmm_result = xmm0; 3151 const XMMRegister xmm_key_shuf_mask = xmm1; 3152 // On win64 xmm6-xmm15 must be preserved so don't use them. 3153 const XMMRegister xmm_temp1 = xmm2; 3154 const XMMRegister xmm_temp2 = xmm3; 3155 const XMMRegister xmm_temp3 = xmm4; 3156 const XMMRegister xmm_temp4 = xmm5; 3157 3158 __ enter(); // required for proper stackwalking of RuntimeStub frame 3159 3160 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 3161 // context for the registers used, where all instructions below are using 128-bit mode 3162 // On EVEX without VL and BW, these instructions will all be AVX. 3163 if (VM_Version::supports_avx512vlbw()) { 3164 __ movl(rax, 0xffff); 3165 __ kmovql(k1, rax); 3166 } 3167 3168 // keylen could be only {11, 13, 15} * 4 = {44, 52, 60} 3169 __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3170 3171 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3172 __ movdqu(xmm_result, Address(from, 0)); 3173 3174 // for decryption java expanded key ordering is rotated one position from what we want 3175 // so we start from 0x10 here and hit 0x00 last 3176 // we don't know if the key is aligned, hence not using load-execute form 3177 load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask); 3178 load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask); 3179 load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask); 3180 load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask); 3181 3182 __ pxor (xmm_result, xmm_temp1); 3183 __ aesdec(xmm_result, xmm_temp2); 3184 __ aesdec(xmm_result, xmm_temp3); 3185 __ aesdec(xmm_result, xmm_temp4); 3186 3187 load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask); 3188 load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask); 3189 load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask); 3190 load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask); 3191 3192 __ aesdec(xmm_result, xmm_temp1); 3193 __ aesdec(xmm_result, xmm_temp2); 3194 __ aesdec(xmm_result, xmm_temp3); 3195 __ aesdec(xmm_result, xmm_temp4); 3196 3197 load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask); 3198 load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask); 3199 load_key(xmm_temp3, key, 0x00, xmm_key_shuf_mask); 3200 3201 __ cmpl(keylen, 44); 3202 __ jccb(Assembler::equal, L_doLast); 3203 3204 __ aesdec(xmm_result, xmm_temp1); 3205 __ aesdec(xmm_result, xmm_temp2); 3206 3207 load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask); 3208 load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask); 3209 3210 __ cmpl(keylen, 52); 3211 __ jccb(Assembler::equal, L_doLast); 3212 3213 __ aesdec(xmm_result, xmm_temp1); 3214 __ aesdec(xmm_result, xmm_temp2); 3215 3216 load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask); 3217 load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask); 3218 3219 __ BIND(L_doLast); 3220 __ aesdec(xmm_result, xmm_temp1); 3221 __ aesdec(xmm_result, xmm_temp2); 3222 3223 // for decryption the aesdeclast operation is always on key+0x00 3224 __ aesdeclast(xmm_result, xmm_temp3); 3225 __ movdqu(Address(to, 0), xmm_result); // store the result 3226 __ xorptr(rax, rax); // return 0 3227 __ leave(); // required for proper stackwalking of RuntimeStub frame 3228 __ ret(0); 3229 3230 return start; 3231 } 3232 3233 3234 // Arguments: 3235 // 3236 // Inputs: 3237 // c_rarg0 - source byte array address 3238 // c_rarg1 - destination byte array address 3239 // c_rarg2 - K (key) in little endian int array 3240 // c_rarg3 - r vector byte array address 3241 // c_rarg4 - input length 3242 // 3243 // Output: 3244 // rax - input length 3245 // 3246 address generate_cipherBlockChaining_encryptAESCrypt() { 3247 assert(UseAES, "need AES instructions and misaligned SSE support"); 3248 __ align(CodeEntryAlignment); 3249 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_encryptAESCrypt"); 3250 address start = __ pc(); 3251 3252 Label L_exit, L_key_192_256, L_key_256, L_loopTop_128, L_loopTop_192, L_loopTop_256; 3253 const Register from = c_rarg0; // source array address 3254 const Register to = c_rarg1; // destination array address 3255 const Register key = c_rarg2; // key array address 3256 const Register rvec = c_rarg3; // r byte array initialized from initvector array address 3257 // and left with the results of the last encryption block 3258 #ifndef _WIN64 3259 const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16) 3260 #else 3261 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 3262 const Register len_reg = r11; // pick the volatile windows register 3263 #endif 3264 const Register pos = rax; 3265 3266 // xmm register assignments for the loops below 3267 const XMMRegister xmm_result = xmm0; 3268 const XMMRegister xmm_temp = xmm1; 3269 // keys 0-10 preloaded into xmm2-xmm12 3270 const int XMM_REG_NUM_KEY_FIRST = 2; 3271 const int XMM_REG_NUM_KEY_LAST = 15; 3272 const XMMRegister xmm_key0 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST); 3273 const XMMRegister xmm_key10 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+10); 3274 const XMMRegister xmm_key11 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+11); 3275 const XMMRegister xmm_key12 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+12); 3276 const XMMRegister xmm_key13 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+13); 3277 3278 __ enter(); // required for proper stackwalking of RuntimeStub frame 3279 3280 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 3281 // context for the registers used, where all instructions below are using 128-bit mode 3282 // On EVEX without VL and BW, these instructions will all be AVX. 3283 if (VM_Version::supports_avx512vlbw()) { 3284 __ movl(rax, 0xffff); 3285 __ kmovql(k1, rax); 3286 } 3287 3288 #ifdef _WIN64 3289 // on win64, fill len_reg from stack position 3290 __ movl(len_reg, len_mem); 3291 #else 3292 __ push(len_reg); // Save 3293 #endif 3294 3295 const XMMRegister xmm_key_shuf_mask = xmm_temp; // used temporarily to swap key bytes up front 3296 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3297 // load up xmm regs xmm2 thru xmm12 with key 0x00 - 0xa0 3298 for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x00; rnum <= XMM_REG_NUM_KEY_FIRST+10; rnum++) { 3299 load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask); 3300 offset += 0x10; 3301 } 3302 __ movdqu(xmm_result, Address(rvec, 0x00)); // initialize xmm_result with r vec 3303 3304 // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256)) 3305 __ movl(rax, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3306 __ cmpl(rax, 44); 3307 __ jcc(Assembler::notEqual, L_key_192_256); 3308 3309 // 128 bit code follows here 3310 __ movptr(pos, 0); 3311 __ align(OptoLoopAlignment); 3312 3313 __ BIND(L_loopTop_128); 3314 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3315 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3316 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3317 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 9; rnum++) { 3318 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3319 } 3320 __ aesenclast(xmm_result, xmm_key10); 3321 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3322 // no need to store r to memory until we exit 3323 __ addptr(pos, AESBlockSize); 3324 __ subptr(len_reg, AESBlockSize); 3325 __ jcc(Assembler::notEqual, L_loopTop_128); 3326 3327 __ BIND(L_exit); 3328 __ movdqu(Address(rvec, 0), xmm_result); // final value of r stored in rvec of CipherBlockChaining object 3329 3330 #ifdef _WIN64 3331 __ movl(rax, len_mem); 3332 #else 3333 __ pop(rax); // return length 3334 #endif 3335 __ leave(); // required for proper stackwalking of RuntimeStub frame 3336 __ ret(0); 3337 3338 __ BIND(L_key_192_256); 3339 // here rax = len in ints of AESCrypt.KLE array (52=192, or 60=256) 3340 load_key(xmm_key11, key, 0xb0, xmm_key_shuf_mask); 3341 load_key(xmm_key12, key, 0xc0, xmm_key_shuf_mask); 3342 __ cmpl(rax, 52); 3343 __ jcc(Assembler::notEqual, L_key_256); 3344 3345 // 192-bit code follows here (could be changed to use more xmm registers) 3346 __ movptr(pos, 0); 3347 __ align(OptoLoopAlignment); 3348 3349 __ BIND(L_loopTop_192); 3350 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3351 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3352 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3353 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 11; rnum++) { 3354 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3355 } 3356 __ aesenclast(xmm_result, xmm_key12); 3357 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3358 // no need to store r to memory until we exit 3359 __ addptr(pos, AESBlockSize); 3360 __ subptr(len_reg, AESBlockSize); 3361 __ jcc(Assembler::notEqual, L_loopTop_192); 3362 __ jmp(L_exit); 3363 3364 __ BIND(L_key_256); 3365 // 256-bit code follows here (could be changed to use more xmm registers) 3366 load_key(xmm_key13, key, 0xd0, xmm_key_shuf_mask); 3367 __ movptr(pos, 0); 3368 __ align(OptoLoopAlignment); 3369 3370 __ BIND(L_loopTop_256); 3371 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3372 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3373 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3374 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 13; rnum++) { 3375 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3376 } 3377 load_key(xmm_temp, key, 0xe0); 3378 __ aesenclast(xmm_result, xmm_temp); 3379 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3380 // no need to store r to memory until we exit 3381 __ addptr(pos, AESBlockSize); 3382 __ subptr(len_reg, AESBlockSize); 3383 __ jcc(Assembler::notEqual, L_loopTop_256); 3384 __ jmp(L_exit); 3385 3386 return start; 3387 } 3388 3389 // Safefetch stubs. 3390 void generate_safefetch(const char* name, int size, address* entry, 3391 address* fault_pc, address* continuation_pc) { 3392 // safefetch signatures: 3393 // int SafeFetch32(int* adr, int errValue); 3394 // intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue); 3395 // 3396 // arguments: 3397 // c_rarg0 = adr 3398 // c_rarg1 = errValue 3399 // 3400 // result: 3401 // PPC_RET = *adr or errValue 3402 3403 StubCodeMark mark(this, "StubRoutines", name); 3404 3405 // Entry point, pc or function descriptor. 3406 *entry = __ pc(); 3407 3408 // Load *adr into c_rarg1, may fault. 3409 *fault_pc = __ pc(); 3410 switch (size) { 3411 case 4: 3412 // int32_t 3413 __ movl(c_rarg1, Address(c_rarg0, 0)); 3414 break; 3415 case 8: 3416 // int64_t 3417 __ movq(c_rarg1, Address(c_rarg0, 0)); 3418 break; 3419 default: 3420 ShouldNotReachHere(); 3421 } 3422 3423 // return errValue or *adr 3424 *continuation_pc = __ pc(); 3425 __ movq(rax, c_rarg1); 3426 __ ret(0); 3427 } 3428 3429 // This is a version of CBC/AES Decrypt which does 4 blocks in a loop at a time 3430 // to hide instruction latency 3431 // 3432 // Arguments: 3433 // 3434 // Inputs: 3435 // c_rarg0 - source byte array address 3436 // c_rarg1 - destination byte array address 3437 // c_rarg2 - K (key) in little endian int array 3438 // c_rarg3 - r vector byte array address 3439 // c_rarg4 - input length 3440 // 3441 // Output: 3442 // rax - input length 3443 // 3444 address generate_cipherBlockChaining_decryptAESCrypt_Parallel() { 3445 assert(UseAES, "need AES instructions and misaligned SSE support"); 3446 __ align(CodeEntryAlignment); 3447 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt"); 3448 address start = __ pc(); 3449 3450 const Register from = c_rarg0; // source array address 3451 const Register to = c_rarg1; // destination array address 3452 const Register key = c_rarg2; // key array address 3453 const Register rvec = c_rarg3; // r byte array initialized from initvector array address 3454 // and left with the results of the last encryption block 3455 #ifndef _WIN64 3456 const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16) 3457 #else 3458 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 3459 const Register len_reg = r11; // pick the volatile windows register 3460 #endif 3461 const Register pos = rax; 3462 3463 const int PARALLEL_FACTOR = 4; 3464 const int ROUNDS[3] = { 10, 12, 14 }; // aes rounds for key128, key192, key256 3465 3466 Label L_exit; 3467 Label L_singleBlock_loopTopHead[3]; // 128, 192, 256 3468 Label L_singleBlock_loopTopHead2[3]; // 128, 192, 256 3469 Label L_singleBlock_loopTop[3]; // 128, 192, 256 3470 Label L_multiBlock_loopTopHead[3]; // 128, 192, 256 3471 Label L_multiBlock_loopTop[3]; // 128, 192, 256 3472 3473 // keys 0-10 preloaded into xmm5-xmm15 3474 const int XMM_REG_NUM_KEY_FIRST = 5; 3475 const int XMM_REG_NUM_KEY_LAST = 15; 3476 const XMMRegister xmm_key_first = as_XMMRegister(XMM_REG_NUM_KEY_FIRST); 3477 const XMMRegister xmm_key_last = as_XMMRegister(XMM_REG_NUM_KEY_LAST); 3478 3479 __ enter(); // required for proper stackwalking of RuntimeStub frame 3480 3481 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 3482 // context for the registers used, where all instructions below are using 128-bit mode 3483 // On EVEX without VL and BW, these instructions will all be AVX. 3484 if (VM_Version::supports_avx512vlbw()) { 3485 __ movl(rax, 0xffff); 3486 __ kmovql(k1, rax); 3487 } 3488 3489 #ifdef _WIN64 3490 // on win64, fill len_reg from stack position 3491 __ movl(len_reg, len_mem); 3492 #else 3493 __ push(len_reg); // Save 3494 #endif 3495 __ push(rbx); 3496 // the java expanded key ordering is rotated one position from what we want 3497 // so we start from 0x10 here and hit 0x00 last 3498 const XMMRegister xmm_key_shuf_mask = xmm1; // used temporarily to swap key bytes up front 3499 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3500 // load up xmm regs 5 thru 15 with key 0x10 - 0xa0 - 0x00 3501 for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x10; rnum < XMM_REG_NUM_KEY_LAST; rnum++) { 3502 load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask); 3503 offset += 0x10; 3504 } 3505 load_key(xmm_key_last, key, 0x00, xmm_key_shuf_mask); 3506 3507 const XMMRegister xmm_prev_block_cipher = xmm1; // holds cipher of previous block 3508 3509 // registers holding the four results in the parallelized loop 3510 const XMMRegister xmm_result0 = xmm0; 3511 const XMMRegister xmm_result1 = xmm2; 3512 const XMMRegister xmm_result2 = xmm3; 3513 const XMMRegister xmm_result3 = xmm4; 3514 3515 __ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // initialize with initial rvec 3516 3517 __ xorptr(pos, pos); 3518 3519 // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256)) 3520 __ movl(rbx, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3521 __ cmpl(rbx, 52); 3522 __ jcc(Assembler::equal, L_multiBlock_loopTopHead[1]); 3523 __ cmpl(rbx, 60); 3524 __ jcc(Assembler::equal, L_multiBlock_loopTopHead[2]); 3525 3526 #define DoFour(opc, src_reg) \ 3527 __ opc(xmm_result0, src_reg); \ 3528 __ opc(xmm_result1, src_reg); \ 3529 __ opc(xmm_result2, src_reg); \ 3530 __ opc(xmm_result3, src_reg); \ 3531 3532 for (int k = 0; k < 3; ++k) { 3533 __ BIND(L_multiBlock_loopTopHead[k]); 3534 if (k != 0) { 3535 __ cmpptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // see if at least 4 blocks left 3536 __ jcc(Assembler::less, L_singleBlock_loopTopHead2[k]); 3537 } 3538 if (k == 1) { 3539 __ subptr(rsp, 6 * wordSize); 3540 __ movdqu(Address(rsp, 0), xmm15); //save last_key from xmm15 3541 load_key(xmm15, key, 0xb0); // 0xb0; 192-bit key goes up to 0xc0 3542 __ movdqu(Address(rsp, 2 * wordSize), xmm15); 3543 load_key(xmm1, key, 0xc0); // 0xc0; 3544 __ movdqu(Address(rsp, 4 * wordSize), xmm1); 3545 } else if (k == 2) { 3546 __ subptr(rsp, 10 * wordSize); 3547 __ movdqu(Address(rsp, 0), xmm15); //save last_key from xmm15 3548 load_key(xmm15, key, 0xd0); // 0xd0; 256-bit key goes upto 0xe0 3549 __ movdqu(Address(rsp, 6 * wordSize), xmm15); 3550 load_key(xmm1, key, 0xe0); // 0xe0; 3551 __ movdqu(Address(rsp, 8 * wordSize), xmm1); 3552 load_key(xmm15, key, 0xb0); // 0xb0; 3553 __ movdqu(Address(rsp, 2 * wordSize), xmm15); 3554 load_key(xmm1, key, 0xc0); // 0xc0; 3555 __ movdqu(Address(rsp, 4 * wordSize), xmm1); 3556 } 3557 __ align(OptoLoopAlignment); 3558 __ BIND(L_multiBlock_loopTop[k]); 3559 __ cmpptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // see if at least 4 blocks left 3560 __ jcc(Assembler::less, L_singleBlock_loopTopHead[k]); 3561 3562 if (k != 0) { 3563 __ movdqu(xmm15, Address(rsp, 2 * wordSize)); 3564 __ movdqu(xmm1, Address(rsp, 4 * wordSize)); 3565 } 3566 3567 __ movdqu(xmm_result0, Address(from, pos, Address::times_1, 0 * AESBlockSize)); // get next 4 blocks into xmmresult registers 3568 __ movdqu(xmm_result1, Address(from, pos, Address::times_1, 1 * AESBlockSize)); 3569 __ movdqu(xmm_result2, Address(from, pos, Address::times_1, 2 * AESBlockSize)); 3570 __ movdqu(xmm_result3, Address(from, pos, Address::times_1, 3 * AESBlockSize)); 3571 3572 DoFour(pxor, xmm_key_first); 3573 if (k == 0) { 3574 for (int rnum = 1; rnum < ROUNDS[k]; rnum++) { 3575 DoFour(aesdec, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3576 } 3577 DoFour(aesdeclast, xmm_key_last); 3578 } else if (k == 1) { 3579 for (int rnum = 1; rnum <= ROUNDS[k]-2; rnum++) { 3580 DoFour(aesdec, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3581 } 3582 __ movdqu(xmm_key_last, Address(rsp, 0)); // xmm15 needs to be loaded again. 3583 DoFour(aesdec, xmm1); // key : 0xc0 3584 __ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // xmm1 needs to be loaded again 3585 DoFour(aesdeclast, xmm_key_last); 3586 } else if (k == 2) { 3587 for (int rnum = 1; rnum <= ROUNDS[k] - 4; rnum++) { 3588 DoFour(aesdec, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3589 } 3590 DoFour(aesdec, xmm1); // key : 0xc0 3591 __ movdqu(xmm15, Address(rsp, 6 * wordSize)); 3592 __ movdqu(xmm1, Address(rsp, 8 * wordSize)); 3593 DoFour(aesdec, xmm15); // key : 0xd0 3594 __ movdqu(xmm_key_last, Address(rsp, 0)); // xmm15 needs to be loaded again. 3595 DoFour(aesdec, xmm1); // key : 0xe0 3596 __ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // xmm1 needs to be loaded again 3597 DoFour(aesdeclast, xmm_key_last); 3598 } 3599 3600 // for each result, xor with the r vector of previous cipher block 3601 __ pxor(xmm_result0, xmm_prev_block_cipher); 3602 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 0 * AESBlockSize)); 3603 __ pxor(xmm_result1, xmm_prev_block_cipher); 3604 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 1 * AESBlockSize)); 3605 __ pxor(xmm_result2, xmm_prev_block_cipher); 3606 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 2 * AESBlockSize)); 3607 __ pxor(xmm_result3, xmm_prev_block_cipher); 3608 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 3 * AESBlockSize)); // this will carry over to next set of blocks 3609 if (k != 0) { 3610 __ movdqu(Address(rvec, 0x00), xmm_prev_block_cipher); 3611 } 3612 3613 __ movdqu(Address(to, pos, Address::times_1, 0 * AESBlockSize), xmm_result0); // store 4 results into the next 64 bytes of output 3614 __ movdqu(Address(to, pos, Address::times_1, 1 * AESBlockSize), xmm_result1); 3615 __ movdqu(Address(to, pos, Address::times_1, 2 * AESBlockSize), xmm_result2); 3616 __ movdqu(Address(to, pos, Address::times_1, 3 * AESBlockSize), xmm_result3); 3617 3618 __ addptr(pos, PARALLEL_FACTOR * AESBlockSize); 3619 __ subptr(len_reg, PARALLEL_FACTOR * AESBlockSize); 3620 __ jmp(L_multiBlock_loopTop[k]); 3621 3622 // registers used in the non-parallelized loops 3623 // xmm register assignments for the loops below 3624 const XMMRegister xmm_result = xmm0; 3625 const XMMRegister xmm_prev_block_cipher_save = xmm2; 3626 const XMMRegister xmm_key11 = xmm3; 3627 const XMMRegister xmm_key12 = xmm4; 3628 const XMMRegister key_tmp = xmm4; 3629 3630 __ BIND(L_singleBlock_loopTopHead[k]); 3631 if (k == 1) { 3632 __ addptr(rsp, 6 * wordSize); 3633 } else if (k == 2) { 3634 __ addptr(rsp, 10 * wordSize); 3635 } 3636 __ cmpptr(len_reg, 0); // any blocks left?? 3637 __ jcc(Assembler::equal, L_exit); 3638 __ BIND(L_singleBlock_loopTopHead2[k]); 3639 if (k == 1) { 3640 load_key(xmm_key11, key, 0xb0); // 0xb0; 192-bit key goes upto 0xc0 3641 load_key(xmm_key12, key, 0xc0); // 0xc0; 192-bit key goes upto 0xc0 3642 } 3643 if (k == 2) { 3644 load_key(xmm_key11, key, 0xb0); // 0xb0; 256-bit key goes upto 0xe0 3645 } 3646 __ align(OptoLoopAlignment); 3647 __ BIND(L_singleBlock_loopTop[k]); 3648 __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of cipher input 3649 __ movdqa(xmm_prev_block_cipher_save, xmm_result); // save for next r vector 3650 __ pxor(xmm_result, xmm_key_first); // do the aes dec rounds 3651 for (int rnum = 1; rnum <= 9 ; rnum++) { 3652 __ aesdec(xmm_result, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3653 } 3654 if (k == 1) { 3655 __ aesdec(xmm_result, xmm_key11); 3656 __ aesdec(xmm_result, xmm_key12); 3657 } 3658 if (k == 2) { 3659 __ aesdec(xmm_result, xmm_key11); 3660 load_key(key_tmp, key, 0xc0); 3661 __ aesdec(xmm_result, key_tmp); 3662 load_key(key_tmp, key, 0xd0); 3663 __ aesdec(xmm_result, key_tmp); 3664 load_key(key_tmp, key, 0xe0); 3665 __ aesdec(xmm_result, key_tmp); 3666 } 3667 3668 __ aesdeclast(xmm_result, xmm_key_last); // xmm15 always came from key+0 3669 __ pxor(xmm_result, xmm_prev_block_cipher); // xor with the current r vector 3670 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3671 // no need to store r to memory until we exit 3672 __ movdqa(xmm_prev_block_cipher, xmm_prev_block_cipher_save); // set up next r vector with cipher input from this block 3673 __ addptr(pos, AESBlockSize); 3674 __ subptr(len_reg, AESBlockSize); 3675 __ jcc(Assembler::notEqual, L_singleBlock_loopTop[k]); 3676 if (k != 2) { 3677 __ jmp(L_exit); 3678 } 3679 } //for 128/192/256 3680 3681 __ BIND(L_exit); 3682 __ movdqu(Address(rvec, 0), xmm_prev_block_cipher); // final value of r stored in rvec of CipherBlockChaining object 3683 __ pop(rbx); 3684 #ifdef _WIN64 3685 __ movl(rax, len_mem); 3686 #else 3687 __ pop(rax); // return length 3688 #endif 3689 __ leave(); // required for proper stackwalking of RuntimeStub frame 3690 __ ret(0); 3691 return start; 3692 } 3693 3694 address generate_upper_word_mask() { 3695 __ align(64); 3696 StubCodeMark mark(this, "StubRoutines", "upper_word_mask"); 3697 address start = __ pc(); 3698 __ emit_data64(0x0000000000000000, relocInfo::none); 3699 __ emit_data64(0xFFFFFFFF00000000, relocInfo::none); 3700 return start; 3701 } 3702 3703 address generate_shuffle_byte_flip_mask() { 3704 __ align(64); 3705 StubCodeMark mark(this, "StubRoutines", "shuffle_byte_flip_mask"); 3706 address start = __ pc(); 3707 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none); 3708 __ emit_data64(0x0001020304050607, relocInfo::none); 3709 return start; 3710 } 3711 3712 // ofs and limit are use for multi-block byte array. 3713 // int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit) 3714 address generate_sha1_implCompress(bool multi_block, const char *name) { 3715 __ align(CodeEntryAlignment); 3716 StubCodeMark mark(this, "StubRoutines", name); 3717 address start = __ pc(); 3718 3719 Register buf = c_rarg0; 3720 Register state = c_rarg1; 3721 Register ofs = c_rarg2; 3722 Register limit = c_rarg3; 3723 3724 const XMMRegister abcd = xmm0; 3725 const XMMRegister e0 = xmm1; 3726 const XMMRegister e1 = xmm2; 3727 const XMMRegister msg0 = xmm3; 3728 3729 const XMMRegister msg1 = xmm4; 3730 const XMMRegister msg2 = xmm5; 3731 const XMMRegister msg3 = xmm6; 3732 const XMMRegister shuf_mask = xmm7; 3733 3734 __ enter(); 3735 3736 __ subptr(rsp, 4 * wordSize); 3737 3738 __ fast_sha1(abcd, e0, e1, msg0, msg1, msg2, msg3, shuf_mask, 3739 buf, state, ofs, limit, rsp, multi_block); 3740 3741 __ addptr(rsp, 4 * wordSize); 3742 3743 __ leave(); 3744 __ ret(0); 3745 return start; 3746 } 3747 3748 address generate_pshuffle_byte_flip_mask() { 3749 __ align(64); 3750 StubCodeMark mark(this, "StubRoutines", "pshuffle_byte_flip_mask"); 3751 address start = __ pc(); 3752 __ emit_data64(0x0405060700010203, relocInfo::none); 3753 __ emit_data64(0x0c0d0e0f08090a0b, relocInfo::none); 3754 3755 if (VM_Version::supports_avx2()) { 3756 __ emit_data64(0x0405060700010203, relocInfo::none); // second copy 3757 __ emit_data64(0x0c0d0e0f08090a0b, relocInfo::none); 3758 // _SHUF_00BA 3759 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3760 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3761 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3762 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3763 // _SHUF_DC00 3764 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3765 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3766 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3767 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3768 } 3769 3770 return start; 3771 } 3772 3773 //Mask for byte-swapping a couple of qwords in an XMM register using (v)pshufb. 3774 address generate_pshuffle_byte_flip_mask_sha512() { 3775 __ align(32); 3776 StubCodeMark mark(this, "StubRoutines", "pshuffle_byte_flip_mask_sha512"); 3777 address start = __ pc(); 3778 if (VM_Version::supports_avx2()) { 3779 __ emit_data64(0x0001020304050607, relocInfo::none); // PSHUFFLE_BYTE_FLIP_MASK 3780 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none); 3781 __ emit_data64(0x1011121314151617, relocInfo::none); 3782 __ emit_data64(0x18191a1b1c1d1e1f, relocInfo::none); 3783 __ emit_data64(0x0000000000000000, relocInfo::none); //MASK_YMM_LO 3784 __ emit_data64(0x0000000000000000, relocInfo::none); 3785 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3786 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3787 } 3788 3789 return start; 3790 } 3791 3792 // ofs and limit are use for multi-block byte array. 3793 // int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit) 3794 address generate_sha256_implCompress(bool multi_block, const char *name) { 3795 assert(VM_Version::supports_sha() || VM_Version::supports_avx2(), ""); 3796 __ align(CodeEntryAlignment); 3797 StubCodeMark mark(this, "StubRoutines", name); 3798 address start = __ pc(); 3799 3800 Register buf = c_rarg0; 3801 Register state = c_rarg1; 3802 Register ofs = c_rarg2; 3803 Register limit = c_rarg3; 3804 3805 const XMMRegister msg = xmm0; 3806 const XMMRegister state0 = xmm1; 3807 const XMMRegister state1 = xmm2; 3808 const XMMRegister msgtmp0 = xmm3; 3809 3810 const XMMRegister msgtmp1 = xmm4; 3811 const XMMRegister msgtmp2 = xmm5; 3812 const XMMRegister msgtmp3 = xmm6; 3813 const XMMRegister msgtmp4 = xmm7; 3814 3815 const XMMRegister shuf_mask = xmm8; 3816 3817 __ enter(); 3818 3819 __ subptr(rsp, 4 * wordSize); 3820 3821 if (VM_Version::supports_sha()) { 3822 __ fast_sha256(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4, 3823 buf, state, ofs, limit, rsp, multi_block, shuf_mask); 3824 } else if (VM_Version::supports_avx2()) { 3825 __ sha256_AVX2(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4, 3826 buf, state, ofs, limit, rsp, multi_block, shuf_mask); 3827 } 3828 __ addptr(rsp, 4 * wordSize); 3829 __ vzeroupper(); 3830 __ leave(); 3831 __ ret(0); 3832 return start; 3833 } 3834 3835 address generate_sha512_implCompress(bool multi_block, const char *name) { 3836 assert(VM_Version::supports_avx2(), ""); 3837 assert(VM_Version::supports_bmi2(), ""); 3838 __ align(CodeEntryAlignment); 3839 StubCodeMark mark(this, "StubRoutines", name); 3840 address start = __ pc(); 3841 3842 Register buf = c_rarg0; 3843 Register state = c_rarg1; 3844 Register ofs = c_rarg2; 3845 Register limit = c_rarg3; 3846 3847 const XMMRegister msg = xmm0; 3848 const XMMRegister state0 = xmm1; 3849 const XMMRegister state1 = xmm2; 3850 const XMMRegister msgtmp0 = xmm3; 3851 const XMMRegister msgtmp1 = xmm4; 3852 const XMMRegister msgtmp2 = xmm5; 3853 const XMMRegister msgtmp3 = xmm6; 3854 const XMMRegister msgtmp4 = xmm7; 3855 3856 const XMMRegister shuf_mask = xmm8; 3857 3858 __ enter(); 3859 3860 __ sha512_AVX2(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4, 3861 buf, state, ofs, limit, rsp, multi_block, shuf_mask); 3862 3863 __ vzeroupper(); 3864 __ leave(); 3865 __ ret(0); 3866 return start; 3867 } 3868 3869 // This is a version of CTR/AES crypt which does 6 blocks in a loop at a time 3870 // to hide instruction latency 3871 // 3872 // Arguments: 3873 // 3874 // Inputs: 3875 // c_rarg0 - source byte array address 3876 // c_rarg1 - destination byte array address 3877 // c_rarg2 - K (key) in little endian int array 3878 // c_rarg3 - counter vector byte array address 3879 // Linux 3880 // c_rarg4 - input length 3881 // c_rarg5 - saved encryptedCounter start 3882 // rbp + 6 * wordSize - saved used length 3883 // Windows 3884 // rbp + 6 * wordSize - input length 3885 // rbp + 7 * wordSize - saved encryptedCounter start 3886 // rbp + 8 * wordSize - saved used length 3887 // 3888 // Output: 3889 // rax - input length 3890 // 3891 address generate_counterMode_AESCrypt_Parallel() { 3892 assert(UseAES, "need AES instructions and misaligned SSE support"); 3893 __ align(CodeEntryAlignment); 3894 StubCodeMark mark(this, "StubRoutines", "counterMode_AESCrypt"); 3895 address start = __ pc(); 3896 const Register from = c_rarg0; // source array address 3897 const Register to = c_rarg1; // destination array address 3898 const Register key = c_rarg2; // key array address 3899 const Register counter = c_rarg3; // counter byte array initialized from counter array address 3900 // and updated with the incremented counter in the end 3901 #ifndef _WIN64 3902 const Register len_reg = c_rarg4; 3903 const Register saved_encCounter_start = c_rarg5; 3904 const Register used_addr = r10; 3905 const Address used_mem(rbp, 2 * wordSize); 3906 const Register used = r11; 3907 #else 3908 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 3909 const Address saved_encCounter_mem(rbp, 7 * wordSize); // length is on stack on Win64 3910 const Address used_mem(rbp, 8 * wordSize); // length is on stack on Win64 3911 const Register len_reg = r10; // pick the first volatile windows register 3912 const Register saved_encCounter_start = r11; 3913 const Register used_addr = r13; 3914 const Register used = r14; 3915 #endif 3916 const Register pos = rax; 3917 3918 const int PARALLEL_FACTOR = 6; 3919 const XMMRegister xmm_counter_shuf_mask = xmm0; 3920 const XMMRegister xmm_key_shuf_mask = xmm1; // used temporarily to swap key bytes up front 3921 const XMMRegister xmm_curr_counter = xmm2; 3922 3923 const XMMRegister xmm_key_tmp0 = xmm3; 3924 const XMMRegister xmm_key_tmp1 = xmm4; 3925 3926 // registers holding the four results in the parallelized loop 3927 const XMMRegister xmm_result0 = xmm5; 3928 const XMMRegister xmm_result1 = xmm6; 3929 const XMMRegister xmm_result2 = xmm7; 3930 const XMMRegister xmm_result3 = xmm8; 3931 const XMMRegister xmm_result4 = xmm9; 3932 const XMMRegister xmm_result5 = xmm10; 3933 3934 const XMMRegister xmm_from0 = xmm11; 3935 const XMMRegister xmm_from1 = xmm12; 3936 const XMMRegister xmm_from2 = xmm13; 3937 const XMMRegister xmm_from3 = xmm14; //the last one is xmm14. we have to preserve it on WIN64. 3938 const XMMRegister xmm_from4 = xmm3; //reuse xmm3~4. Because xmm_key_tmp0~1 are useless when loading input text 3939 const XMMRegister xmm_from5 = xmm4; 3940 3941 //for key_128, key_192, key_256 3942 const int rounds[3] = {10, 12, 14}; 3943 Label L_exit_preLoop, L_preLoop_start; 3944 Label L_multiBlock_loopTop[3]; 3945 Label L_singleBlockLoopTop[3]; 3946 Label L__incCounter[3][6]; //for 6 blocks 3947 Label L__incCounter_single[3]; //for single block, key128, key192, key256 3948 Label L_processTail_insr[3], L_processTail_4_insr[3], L_processTail_2_insr[3], L_processTail_1_insr[3], L_processTail_exit_insr[3]; 3949 Label L_processTail_extr[3], L_processTail_4_extr[3], L_processTail_2_extr[3], L_processTail_1_extr[3], L_processTail_exit_extr[3]; 3950 3951 Label L_exit; 3952 3953 __ enter(); // required for proper stackwalking of RuntimeStub frame 3954 3955 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 3956 // context for the registers used, where all instructions below are using 128-bit mode 3957 // On EVEX without VL and BW, these instructions will all be AVX. 3958 if (VM_Version::supports_avx512vlbw()) { 3959 __ movl(rax, 0xffff); 3960 __ kmovql(k1, rax); 3961 } 3962 3963 #ifdef _WIN64 3964 // allocate spill slots for r13, r14 3965 enum { 3966 saved_r13_offset, 3967 saved_r14_offset 3968 }; 3969 __ subptr(rsp, 2 * wordSize); 3970 __ movptr(Address(rsp, saved_r13_offset * wordSize), r13); 3971 __ movptr(Address(rsp, saved_r14_offset * wordSize), r14); 3972 3973 // on win64, fill len_reg from stack position 3974 __ movl(len_reg, len_mem); 3975 __ movptr(saved_encCounter_start, saved_encCounter_mem); 3976 __ movptr(used_addr, used_mem); 3977 __ movl(used, Address(used_addr, 0)); 3978 #else 3979 __ push(len_reg); // Save 3980 __ movptr(used_addr, used_mem); 3981 __ movl(used, Address(used_addr, 0)); 3982 #endif 3983 3984 __ push(rbx); // Save RBX 3985 __ movdqu(xmm_curr_counter, Address(counter, 0x00)); // initialize counter with initial counter 3986 __ movdqu(xmm_counter_shuf_mask, ExternalAddress(StubRoutines::x86::counter_shuffle_mask_addr()), pos); // pos as scratch 3987 __ pshufb(xmm_curr_counter, xmm_counter_shuf_mask); //counter is shuffled 3988 __ movptr(pos, 0); 3989 3990 // Use the partially used encrpyted counter from last invocation 3991 __ BIND(L_preLoop_start); 3992 __ cmpptr(used, 16); 3993 __ jcc(Assembler::aboveEqual, L_exit_preLoop); 3994 __ cmpptr(len_reg, 0); 3995 __ jcc(Assembler::lessEqual, L_exit_preLoop); 3996 __ movb(rbx, Address(saved_encCounter_start, used)); 3997 __ xorb(rbx, Address(from, pos)); 3998 __ movb(Address(to, pos), rbx); 3999 __ addptr(pos, 1); 4000 __ addptr(used, 1); 4001 __ subptr(len_reg, 1); 4002 4003 __ jmp(L_preLoop_start); 4004 4005 __ BIND(L_exit_preLoop); 4006 __ movl(Address(used_addr, 0), used); 4007 4008 // key length could be only {11, 13, 15} * 4 = {44, 52, 60} 4009 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()), rbx); // rbx as scratch 4010 __ movl(rbx, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 4011 __ cmpl(rbx, 52); 4012 __ jcc(Assembler::equal, L_multiBlock_loopTop[1]); 4013 __ cmpl(rbx, 60); 4014 __ jcc(Assembler::equal, L_multiBlock_loopTop[2]); 4015 4016 #define CTR_DoSix(opc, src_reg) \ 4017 __ opc(xmm_result0, src_reg); \ 4018 __ opc(xmm_result1, src_reg); \ 4019 __ opc(xmm_result2, src_reg); \ 4020 __ opc(xmm_result3, src_reg); \ 4021 __ opc(xmm_result4, src_reg); \ 4022 __ opc(xmm_result5, src_reg); 4023 4024 // k == 0 : generate code for key_128 4025 // k == 1 : generate code for key_192 4026 // k == 2 : generate code for key_256 4027 for (int k = 0; k < 3; ++k) { 4028 //multi blocks starts here 4029 __ align(OptoLoopAlignment); 4030 __ BIND(L_multiBlock_loopTop[k]); 4031 __ cmpptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // see if at least PARALLEL_FACTOR blocks left 4032 __ jcc(Assembler::less, L_singleBlockLoopTop[k]); 4033 load_key(xmm_key_tmp0, key, 0x00, xmm_key_shuf_mask); 4034 4035 //load, then increase counters 4036 CTR_DoSix(movdqa, xmm_curr_counter); 4037 inc_counter(rbx, xmm_result1, 0x01, L__incCounter[k][0]); 4038 inc_counter(rbx, xmm_result2, 0x02, L__incCounter[k][1]); 4039 inc_counter(rbx, xmm_result3, 0x03, L__incCounter[k][2]); 4040 inc_counter(rbx, xmm_result4, 0x04, L__incCounter[k][3]); 4041 inc_counter(rbx, xmm_result5, 0x05, L__incCounter[k][4]); 4042 inc_counter(rbx, xmm_curr_counter, 0x06, L__incCounter[k][5]); 4043 CTR_DoSix(pshufb, xmm_counter_shuf_mask); // after increased, shuffled counters back for PXOR 4044 CTR_DoSix(pxor, xmm_key_tmp0); //PXOR with Round 0 key 4045 4046 //load two ROUND_KEYs at a time 4047 for (int i = 1; i < rounds[k]; ) { 4048 load_key(xmm_key_tmp1, key, (0x10 * i), xmm_key_shuf_mask); 4049 load_key(xmm_key_tmp0, key, (0x10 * (i+1)), xmm_key_shuf_mask); 4050 CTR_DoSix(aesenc, xmm_key_tmp1); 4051 i++; 4052 if (i != rounds[k]) { 4053 CTR_DoSix(aesenc, xmm_key_tmp0); 4054 } else { 4055 CTR_DoSix(aesenclast, xmm_key_tmp0); 4056 } 4057 i++; 4058 } 4059 4060 // get next PARALLEL_FACTOR blocks into xmm_result registers 4061 __ movdqu(xmm_from0, Address(from, pos, Address::times_1, 0 * AESBlockSize)); 4062 __ movdqu(xmm_from1, Address(from, pos, Address::times_1, 1 * AESBlockSize)); 4063 __ movdqu(xmm_from2, Address(from, pos, Address::times_1, 2 * AESBlockSize)); 4064 __ movdqu(xmm_from3, Address(from, pos, Address::times_1, 3 * AESBlockSize)); 4065 __ movdqu(xmm_from4, Address(from, pos, Address::times_1, 4 * AESBlockSize)); 4066 __ movdqu(xmm_from5, Address(from, pos, Address::times_1, 5 * AESBlockSize)); 4067 4068 __ pxor(xmm_result0, xmm_from0); 4069 __ pxor(xmm_result1, xmm_from1); 4070 __ pxor(xmm_result2, xmm_from2); 4071 __ pxor(xmm_result3, xmm_from3); 4072 __ pxor(xmm_result4, xmm_from4); 4073 __ pxor(xmm_result5, xmm_from5); 4074 4075 // store 6 results into the next 64 bytes of output 4076 __ movdqu(Address(to, pos, Address::times_1, 0 * AESBlockSize), xmm_result0); 4077 __ movdqu(Address(to, pos, Address::times_1, 1 * AESBlockSize), xmm_result1); 4078 __ movdqu(Address(to, pos, Address::times_1, 2 * AESBlockSize), xmm_result2); 4079 __ movdqu(Address(to, pos, Address::times_1, 3 * AESBlockSize), xmm_result3); 4080 __ movdqu(Address(to, pos, Address::times_1, 4 * AESBlockSize), xmm_result4); 4081 __ movdqu(Address(to, pos, Address::times_1, 5 * AESBlockSize), xmm_result5); 4082 4083 __ addptr(pos, PARALLEL_FACTOR * AESBlockSize); // increase the length of crypt text 4084 __ subptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // decrease the remaining length 4085 __ jmp(L_multiBlock_loopTop[k]); 4086 4087 // singleBlock starts here 4088 __ align(OptoLoopAlignment); 4089 __ BIND(L_singleBlockLoopTop[k]); 4090 __ cmpptr(len_reg, 0); 4091 __ jcc(Assembler::lessEqual, L_exit); 4092 load_key(xmm_key_tmp0, key, 0x00, xmm_key_shuf_mask); 4093 __ movdqa(xmm_result0, xmm_curr_counter); 4094 inc_counter(rbx, xmm_curr_counter, 0x01, L__incCounter_single[k]); 4095 __ pshufb(xmm_result0, xmm_counter_shuf_mask); 4096 __ pxor(xmm_result0, xmm_key_tmp0); 4097 for (int i = 1; i < rounds[k]; i++) { 4098 load_key(xmm_key_tmp0, key, (0x10 * i), xmm_key_shuf_mask); 4099 __ aesenc(xmm_result0, xmm_key_tmp0); 4100 } 4101 load_key(xmm_key_tmp0, key, (rounds[k] * 0x10), xmm_key_shuf_mask); 4102 __ aesenclast(xmm_result0, xmm_key_tmp0); 4103 __ cmpptr(len_reg, AESBlockSize); 4104 __ jcc(Assembler::less, L_processTail_insr[k]); 4105 __ movdqu(xmm_from0, Address(from, pos, Address::times_1, 0 * AESBlockSize)); 4106 __ pxor(xmm_result0, xmm_from0); 4107 __ movdqu(Address(to, pos, Address::times_1, 0 * AESBlockSize), xmm_result0); 4108 __ addptr(pos, AESBlockSize); 4109 __ subptr(len_reg, AESBlockSize); 4110 __ jmp(L_singleBlockLoopTop[k]); 4111 __ BIND(L_processTail_insr[k]); // Process the tail part of the input array 4112 __ addptr(pos, len_reg); // 1. Insert bytes from src array into xmm_from0 register 4113 __ testptr(len_reg, 8); 4114 __ jcc(Assembler::zero, L_processTail_4_insr[k]); 4115 __ subptr(pos,8); 4116 __ pinsrq(xmm_from0, Address(from, pos), 0); 4117 __ BIND(L_processTail_4_insr[k]); 4118 __ testptr(len_reg, 4); 4119 __ jcc(Assembler::zero, L_processTail_2_insr[k]); 4120 __ subptr(pos,4); 4121 __ pslldq(xmm_from0, 4); 4122 __ pinsrd(xmm_from0, Address(from, pos), 0); 4123 __ BIND(L_processTail_2_insr[k]); 4124 __ testptr(len_reg, 2); 4125 __ jcc(Assembler::zero, L_processTail_1_insr[k]); 4126 __ subptr(pos, 2); 4127 __ pslldq(xmm_from0, 2); 4128 __ pinsrw(xmm_from0, Address(from, pos), 0); 4129 __ BIND(L_processTail_1_insr[k]); 4130 __ testptr(len_reg, 1); 4131 __ jcc(Assembler::zero, L_processTail_exit_insr[k]); 4132 __ subptr(pos, 1); 4133 __ pslldq(xmm_from0, 1); 4134 __ pinsrb(xmm_from0, Address(from, pos), 0); 4135 __ BIND(L_processTail_exit_insr[k]); 4136 4137 __ movdqu(Address(saved_encCounter_start, 0), xmm_result0); // 2. Perform pxor of the encrypted counter and plaintext Bytes. 4138 __ pxor(xmm_result0, xmm_from0); // Also the encrypted counter is saved for next invocation. 4139 4140 __ testptr(len_reg, 8); 4141 __ jcc(Assembler::zero, L_processTail_4_extr[k]); // 3. Extract bytes from xmm_result0 into the dest. array 4142 __ pextrq(Address(to, pos), xmm_result0, 0); 4143 __ psrldq(xmm_result0, 8); 4144 __ addptr(pos, 8); 4145 __ BIND(L_processTail_4_extr[k]); 4146 __ testptr(len_reg, 4); 4147 __ jcc(Assembler::zero, L_processTail_2_extr[k]); 4148 __ pextrd(Address(to, pos), xmm_result0, 0); 4149 __ psrldq(xmm_result0, 4); 4150 __ addptr(pos, 4); 4151 __ BIND(L_processTail_2_extr[k]); 4152 __ testptr(len_reg, 2); 4153 __ jcc(Assembler::zero, L_processTail_1_extr[k]); 4154 __ pextrw(Address(to, pos), xmm_result0, 0); 4155 __ psrldq(xmm_result0, 2); 4156 __ addptr(pos, 2); 4157 __ BIND(L_processTail_1_extr[k]); 4158 __ testptr(len_reg, 1); 4159 __ jcc(Assembler::zero, L_processTail_exit_extr[k]); 4160 __ pextrb(Address(to, pos), xmm_result0, 0); 4161 4162 __ BIND(L_processTail_exit_extr[k]); 4163 __ movl(Address(used_addr, 0), len_reg); 4164 __ jmp(L_exit); 4165 4166 } 4167 4168 __ BIND(L_exit); 4169 __ pshufb(xmm_curr_counter, xmm_counter_shuf_mask); //counter is shuffled back. 4170 __ movdqu(Address(counter, 0), xmm_curr_counter); //save counter back 4171 __ pop(rbx); // pop the saved RBX. 4172 #ifdef _WIN64 4173 __ movl(rax, len_mem); 4174 __ movptr(r13, Address(rsp, saved_r13_offset * wordSize)); 4175 __ movptr(r14, Address(rsp, saved_r14_offset * wordSize)); 4176 __ addptr(rsp, 2 * wordSize); 4177 #else 4178 __ pop(rax); // return 'len' 4179 #endif 4180 __ leave(); // required for proper stackwalking of RuntimeStub frame 4181 __ ret(0); 4182 return start; 4183 } 4184 4185 // byte swap x86 long 4186 address generate_ghash_long_swap_mask() { 4187 __ align(CodeEntryAlignment); 4188 StubCodeMark mark(this, "StubRoutines", "ghash_long_swap_mask"); 4189 address start = __ pc(); 4190 __ emit_data64(0x0f0e0d0c0b0a0908, relocInfo::none ); 4191 __ emit_data64(0x0706050403020100, relocInfo::none ); 4192 return start; 4193 } 4194 4195 // byte swap x86 byte array 4196 address generate_ghash_byte_swap_mask() { 4197 __ align(CodeEntryAlignment); 4198 StubCodeMark mark(this, "StubRoutines", "ghash_byte_swap_mask"); 4199 address start = __ pc(); 4200 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none ); 4201 __ emit_data64(0x0001020304050607, relocInfo::none ); 4202 return start; 4203 } 4204 4205 /* Single and multi-block ghash operations */ 4206 address generate_ghash_processBlocks() { 4207 __ align(CodeEntryAlignment); 4208 Label L_ghash_loop, L_exit; 4209 StubCodeMark mark(this, "StubRoutines", "ghash_processBlocks"); 4210 address start = __ pc(); 4211 4212 const Register state = c_rarg0; 4213 const Register subkeyH = c_rarg1; 4214 const Register data = c_rarg2; 4215 const Register blocks = c_rarg3; 4216 4217 const XMMRegister xmm_temp0 = xmm0; 4218 const XMMRegister xmm_temp1 = xmm1; 4219 const XMMRegister xmm_temp2 = xmm2; 4220 const XMMRegister xmm_temp3 = xmm3; 4221 const XMMRegister xmm_temp4 = xmm4; 4222 const XMMRegister xmm_temp5 = xmm5; 4223 const XMMRegister xmm_temp6 = xmm6; 4224 const XMMRegister xmm_temp7 = xmm7; 4225 const XMMRegister xmm_temp8 = xmm8; 4226 const XMMRegister xmm_temp9 = xmm9; 4227 const XMMRegister xmm_temp10 = xmm10; 4228 4229 __ enter(); 4230 4231 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 4232 // context for the registers used, where all instructions below are using 128-bit mode 4233 // On EVEX without VL and BW, these instructions will all be AVX. 4234 if (VM_Version::supports_avx512vlbw()) { 4235 __ movl(rax, 0xffff); 4236 __ kmovql(k1, rax); 4237 } 4238 4239 __ movdqu(xmm_temp10, ExternalAddress(StubRoutines::x86::ghash_long_swap_mask_addr())); 4240 4241 __ movdqu(xmm_temp0, Address(state, 0)); 4242 __ pshufb(xmm_temp0, xmm_temp10); 4243 4244 4245 __ BIND(L_ghash_loop); 4246 __ movdqu(xmm_temp2, Address(data, 0)); 4247 __ pshufb(xmm_temp2, ExternalAddress(StubRoutines::x86::ghash_byte_swap_mask_addr())); 4248 4249 __ movdqu(xmm_temp1, Address(subkeyH, 0)); 4250 __ pshufb(xmm_temp1, xmm_temp10); 4251 4252 __ pxor(xmm_temp0, xmm_temp2); 4253 4254 // 4255 // Multiply with the hash key 4256 // 4257 __ movdqu(xmm_temp3, xmm_temp0); 4258 __ pclmulqdq(xmm_temp3, xmm_temp1, 0); // xmm3 holds a0*b0 4259 __ movdqu(xmm_temp4, xmm_temp0); 4260 __ pclmulqdq(xmm_temp4, xmm_temp1, 16); // xmm4 holds a0*b1 4261 4262 __ movdqu(xmm_temp5, xmm_temp0); 4263 __ pclmulqdq(xmm_temp5, xmm_temp1, 1); // xmm5 holds a1*b0 4264 __ movdqu(xmm_temp6, xmm_temp0); 4265 __ pclmulqdq(xmm_temp6, xmm_temp1, 17); // xmm6 holds a1*b1 4266 4267 __ pxor(xmm_temp4, xmm_temp5); // xmm4 holds a0*b1 + a1*b0 4268 4269 __ movdqu(xmm_temp5, xmm_temp4); // move the contents of xmm4 to xmm5 4270 __ psrldq(xmm_temp4, 8); // shift by xmm4 64 bits to the right 4271 __ pslldq(xmm_temp5, 8); // shift by xmm5 64 bits to the left 4272 __ pxor(xmm_temp3, xmm_temp5); 4273 __ pxor(xmm_temp6, xmm_temp4); // Register pair <xmm6:xmm3> holds the result 4274 // of the carry-less multiplication of 4275 // xmm0 by xmm1. 4276 4277 // We shift the result of the multiplication by one bit position 4278 // to the left to cope for the fact that the bits are reversed. 4279 __ movdqu(xmm_temp7, xmm_temp3); 4280 __ movdqu(xmm_temp8, xmm_temp6); 4281 __ pslld(xmm_temp3, 1); 4282 __ pslld(xmm_temp6, 1); 4283 __ psrld(xmm_temp7, 31); 4284 __ psrld(xmm_temp8, 31); 4285 __ movdqu(xmm_temp9, xmm_temp7); 4286 __ pslldq(xmm_temp8, 4); 4287 __ pslldq(xmm_temp7, 4); 4288 __ psrldq(xmm_temp9, 12); 4289 __ por(xmm_temp3, xmm_temp7); 4290 __ por(xmm_temp6, xmm_temp8); 4291 __ por(xmm_temp6, xmm_temp9); 4292 4293 // 4294 // First phase of the reduction 4295 // 4296 // Move xmm3 into xmm7, xmm8, xmm9 in order to perform the shifts 4297 // independently. 4298 __ movdqu(xmm_temp7, xmm_temp3); 4299 __ movdqu(xmm_temp8, xmm_temp3); 4300 __ movdqu(xmm_temp9, xmm_temp3); 4301 __ pslld(xmm_temp7, 31); // packed right shift shifting << 31 4302 __ pslld(xmm_temp8, 30); // packed right shift shifting << 30 4303 __ pslld(xmm_temp9, 25); // packed right shift shifting << 25 4304 __ pxor(xmm_temp7, xmm_temp8); // xor the shifted versions 4305 __ pxor(xmm_temp7, xmm_temp9); 4306 __ movdqu(xmm_temp8, xmm_temp7); 4307 __ pslldq(xmm_temp7, 12); 4308 __ psrldq(xmm_temp8, 4); 4309 __ pxor(xmm_temp3, xmm_temp7); // first phase of the reduction complete 4310 4311 // 4312 // Second phase of the reduction 4313 // 4314 // Make 3 copies of xmm3 in xmm2, xmm4, xmm5 for doing these 4315 // shift operations. 4316 __ movdqu(xmm_temp2, xmm_temp3); 4317 __ movdqu(xmm_temp4, xmm_temp3); 4318 __ movdqu(xmm_temp5, xmm_temp3); 4319 __ psrld(xmm_temp2, 1); // packed left shifting >> 1 4320 __ psrld(xmm_temp4, 2); // packed left shifting >> 2 4321 __ psrld(xmm_temp5, 7); // packed left shifting >> 7 4322 __ pxor(xmm_temp2, xmm_temp4); // xor the shifted versions 4323 __ pxor(xmm_temp2, xmm_temp5); 4324 __ pxor(xmm_temp2, xmm_temp8); 4325 __ pxor(xmm_temp3, xmm_temp2); 4326 __ pxor(xmm_temp6, xmm_temp3); // the result is in xmm6 4327 4328 __ decrement(blocks); 4329 __ jcc(Assembler::zero, L_exit); 4330 __ movdqu(xmm_temp0, xmm_temp6); 4331 __ addptr(data, 16); 4332 __ jmp(L_ghash_loop); 4333 4334 __ BIND(L_exit); 4335 __ pshufb(xmm_temp6, xmm_temp10); // Byte swap 16-byte result 4336 __ movdqu(Address(state, 0), xmm_temp6); // store the result 4337 __ leave(); 4338 __ ret(0); 4339 return start; 4340 } 4341 4342 /** 4343 * Arguments: 4344 * 4345 * Inputs: 4346 * c_rarg0 - int crc 4347 * c_rarg1 - byte* buf 4348 * c_rarg2 - int length 4349 * 4350 * Ouput: 4351 * rax - int crc result 4352 */ 4353 address generate_updateBytesCRC32() { 4354 assert(UseCRC32Intrinsics, "need AVX and CLMUL instructions"); 4355 4356 __ align(CodeEntryAlignment); 4357 StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32"); 4358 4359 address start = __ pc(); 4360 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 4361 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 4362 // rscratch1: r10 4363 const Register crc = c_rarg0; // crc 4364 const Register buf = c_rarg1; // source java byte array address 4365 const Register len = c_rarg2; // length 4366 const Register table = c_rarg3; // crc_table address (reuse register) 4367 const Register tmp = r11; 4368 assert_different_registers(crc, buf, len, table, tmp, rax); 4369 4370 BLOCK_COMMENT("Entry:"); 4371 __ enter(); // required for proper stackwalking of RuntimeStub frame 4372 4373 __ kernel_crc32(crc, buf, len, table, tmp); 4374 4375 __ movl(rax, crc); 4376 __ vzeroupper(); 4377 __ leave(); // required for proper stackwalking of RuntimeStub frame 4378 __ ret(0); 4379 4380 return start; 4381 } 4382 4383 /** 4384 * Arguments: 4385 * 4386 * Inputs: 4387 * c_rarg0 - int crc 4388 * c_rarg1 - byte* buf 4389 * c_rarg2 - long length 4390 * c_rarg3 - table_start - optional (present only when doing a library_call, 4391 * not used by x86 algorithm) 4392 * 4393 * Ouput: 4394 * rax - int crc result 4395 */ 4396 address generate_updateBytesCRC32C(bool is_pclmulqdq_supported) { 4397 assert(UseCRC32CIntrinsics, "need SSE4_2"); 4398 __ align(CodeEntryAlignment); 4399 StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32C"); 4400 address start = __ pc(); 4401 //reg.arg int#0 int#1 int#2 int#3 int#4 int#5 float regs 4402 //Windows RCX RDX R8 R9 none none XMM0..XMM3 4403 //Lin / Sol RDI RSI RDX RCX R8 R9 XMM0..XMM7 4404 const Register crc = c_rarg0; // crc 4405 const Register buf = c_rarg1; // source java byte array address 4406 const Register len = c_rarg2; // length 4407 const Register a = rax; 4408 const Register j = r9; 4409 const Register k = r10; 4410 const Register l = r11; 4411 #ifdef _WIN64 4412 const Register y = rdi; 4413 const Register z = rsi; 4414 #else 4415 const Register y = rcx; 4416 const Register z = r8; 4417 #endif 4418 assert_different_registers(crc, buf, len, a, j, k, l, y, z); 4419 4420 BLOCK_COMMENT("Entry:"); 4421 __ enter(); // required for proper stackwalking of RuntimeStub frame 4422 #ifdef _WIN64 4423 __ push(y); 4424 __ push(z); 4425 #endif 4426 __ crc32c_ipl_alg2_alt2(crc, buf, len, 4427 a, j, k, 4428 l, y, z, 4429 c_farg0, c_farg1, c_farg2, 4430 is_pclmulqdq_supported); 4431 __ movl(rax, crc); 4432 #ifdef _WIN64 4433 __ pop(z); 4434 __ pop(y); 4435 #endif 4436 __ vzeroupper(); 4437 __ leave(); // required for proper stackwalking of RuntimeStub frame 4438 __ ret(0); 4439 4440 return start; 4441 } 4442 4443 /** 4444 * Arguments: 4445 * 4446 * Input: 4447 * c_rarg0 - x address 4448 * c_rarg1 - x length 4449 * c_rarg2 - y address 4450 * c_rarg3 - y length 4451 * not Win64 4452 * c_rarg4 - z address 4453 * c_rarg5 - z length 4454 * Win64 4455 * rsp+40 - z address 4456 * rsp+48 - z length 4457 */ 4458 address generate_multiplyToLen() { 4459 __ align(CodeEntryAlignment); 4460 StubCodeMark mark(this, "StubRoutines", "multiplyToLen"); 4461 4462 address start = __ pc(); 4463 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 4464 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 4465 const Register x = rdi; 4466 const Register xlen = rax; 4467 const Register y = rsi; 4468 const Register ylen = rcx; 4469 const Register z = r8; 4470 const Register zlen = r11; 4471 4472 // Next registers will be saved on stack in multiply_to_len(). 4473 const Register tmp1 = r12; 4474 const Register tmp2 = r13; 4475 const Register tmp3 = r14; 4476 const Register tmp4 = r15; 4477 const Register tmp5 = rbx; 4478 4479 BLOCK_COMMENT("Entry:"); 4480 __ enter(); // required for proper stackwalking of RuntimeStub frame 4481 4482 #ifndef _WIN64 4483 __ movptr(zlen, r9); // Save r9 in r11 - zlen 4484 #endif 4485 setup_arg_regs(4); // x => rdi, xlen => rsi, y => rdx 4486 // ylen => rcx, z => r8, zlen => r11 4487 // r9 and r10 may be used to save non-volatile registers 4488 #ifdef _WIN64 4489 // last 2 arguments (#4, #5) are on stack on Win64 4490 __ movptr(z, Address(rsp, 6 * wordSize)); 4491 __ movptr(zlen, Address(rsp, 7 * wordSize)); 4492 #endif 4493 4494 __ movptr(xlen, rsi); 4495 __ movptr(y, rdx); 4496 __ multiply_to_len(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5); 4497 4498 restore_arg_regs(); 4499 4500 __ leave(); // required for proper stackwalking of RuntimeStub frame 4501 __ ret(0); 4502 4503 return start; 4504 } 4505 4506 /** 4507 * Arguments: 4508 * 4509 * Input: 4510 * c_rarg0 - obja address 4511 * c_rarg1 - objb address 4512 * c_rarg3 - length length 4513 * c_rarg4 - scale log2_array_indxscale 4514 * 4515 * Output: 4516 * rax - int >= mismatched index, < 0 bitwise complement of tail 4517 */ 4518 address generate_vectorizedMismatch() { 4519 __ align(CodeEntryAlignment); 4520 StubCodeMark mark(this, "StubRoutines", "vectorizedMismatch"); 4521 address start = __ pc(); 4522 4523 BLOCK_COMMENT("Entry:"); 4524 __ enter(); 4525 4526 #ifdef _WIN64 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 4527 const Register scale = c_rarg0; //rcx, will exchange with r9 4528 const Register objb = c_rarg1; //rdx 4529 const Register length = c_rarg2; //r8 4530 const Register obja = c_rarg3; //r9 4531 __ xchgq(obja, scale); //now obja and scale contains the correct contents 4532 4533 const Register tmp1 = r10; 4534 const Register tmp2 = r11; 4535 #endif 4536 #ifndef _WIN64 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 4537 const Register obja = c_rarg0; //U:rdi 4538 const Register objb = c_rarg1; //U:rsi 4539 const Register length = c_rarg2; //U:rdx 4540 const Register scale = c_rarg3; //U:rcx 4541 const Register tmp1 = r8; 4542 const Register tmp2 = r9; 4543 #endif 4544 const Register result = rax; //return value 4545 const XMMRegister vec0 = xmm0; 4546 const XMMRegister vec1 = xmm1; 4547 const XMMRegister vec2 = xmm2; 4548 4549 __ vectorized_mismatch(obja, objb, length, scale, result, tmp1, tmp2, vec0, vec1, vec2); 4550 4551 __ vzeroupper(); 4552 __ leave(); 4553 __ ret(0); 4554 4555 return start; 4556 } 4557 4558 /** 4559 * Arguments: 4560 * 4561 // Input: 4562 // c_rarg0 - x address 4563 // c_rarg1 - x length 4564 // c_rarg2 - z address 4565 // c_rarg3 - z lenth 4566 * 4567 */ 4568 address generate_squareToLen() { 4569 4570 __ align(CodeEntryAlignment); 4571 StubCodeMark mark(this, "StubRoutines", "squareToLen"); 4572 4573 address start = __ pc(); 4574 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 4575 // Unix: rdi, rsi, rdx, rcx (c_rarg0, c_rarg1, ...) 4576 const Register x = rdi; 4577 const Register len = rsi; 4578 const Register z = r8; 4579 const Register zlen = rcx; 4580 4581 const Register tmp1 = r12; 4582 const Register tmp2 = r13; 4583 const Register tmp3 = r14; 4584 const Register tmp4 = r15; 4585 const Register tmp5 = rbx; 4586 4587 BLOCK_COMMENT("Entry:"); 4588 __ enter(); // required for proper stackwalking of RuntimeStub frame 4589 4590 setup_arg_regs(4); // x => rdi, len => rsi, z => rdx 4591 // zlen => rcx 4592 // r9 and r10 may be used to save non-volatile registers 4593 __ movptr(r8, rdx); 4594 __ square_to_len(x, len, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5, rdx, rax); 4595 4596 restore_arg_regs(); 4597 4598 __ leave(); // required for proper stackwalking of RuntimeStub frame 4599 __ ret(0); 4600 4601 return start; 4602 } 4603 4604 /** 4605 * Arguments: 4606 * 4607 * Input: 4608 * c_rarg0 - out address 4609 * c_rarg1 - in address 4610 * c_rarg2 - offset 4611 * c_rarg3 - len 4612 * not Win64 4613 * c_rarg4 - k 4614 * Win64 4615 * rsp+40 - k 4616 */ 4617 address generate_mulAdd() { 4618 __ align(CodeEntryAlignment); 4619 StubCodeMark mark(this, "StubRoutines", "mulAdd"); 4620 4621 address start = __ pc(); 4622 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 4623 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 4624 const Register out = rdi; 4625 const Register in = rsi; 4626 const Register offset = r11; 4627 const Register len = rcx; 4628 const Register k = r8; 4629 4630 // Next registers will be saved on stack in mul_add(). 4631 const Register tmp1 = r12; 4632 const Register tmp2 = r13; 4633 const Register tmp3 = r14; 4634 const Register tmp4 = r15; 4635 const Register tmp5 = rbx; 4636 4637 BLOCK_COMMENT("Entry:"); 4638 __ enter(); // required for proper stackwalking of RuntimeStub frame 4639 4640 setup_arg_regs(4); // out => rdi, in => rsi, offset => rdx 4641 // len => rcx, k => r8 4642 // r9 and r10 may be used to save non-volatile registers 4643 #ifdef _WIN64 4644 // last argument is on stack on Win64 4645 __ movl(k, Address(rsp, 6 * wordSize)); 4646 #endif 4647 __ movptr(r11, rdx); // move offset in rdx to offset(r11) 4648 __ mul_add(out, in, offset, len, k, tmp1, tmp2, tmp3, tmp4, tmp5, rdx, rax); 4649 4650 restore_arg_regs(); 4651 4652 __ leave(); // required for proper stackwalking of RuntimeStub frame 4653 __ ret(0); 4654 4655 return start; 4656 } 4657 4658 address generate_libmExp() { 4659 StubCodeMark mark(this, "StubRoutines", "libmExp"); 4660 4661 address start = __ pc(); 4662 4663 const XMMRegister x0 = xmm0; 4664 const XMMRegister x1 = xmm1; 4665 const XMMRegister x2 = xmm2; 4666 const XMMRegister x3 = xmm3; 4667 4668 const XMMRegister x4 = xmm4; 4669 const XMMRegister x5 = xmm5; 4670 const XMMRegister x6 = xmm6; 4671 const XMMRegister x7 = xmm7; 4672 4673 const Register tmp = r11; 4674 4675 BLOCK_COMMENT("Entry:"); 4676 __ enter(); // required for proper stackwalking of RuntimeStub frame 4677 4678 __ fast_exp(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp); 4679 4680 __ leave(); // required for proper stackwalking of RuntimeStub frame 4681 __ ret(0); 4682 4683 return start; 4684 4685 } 4686 4687 address generate_libmLog() { 4688 StubCodeMark mark(this, "StubRoutines", "libmLog"); 4689 4690 address start = __ pc(); 4691 4692 const XMMRegister x0 = xmm0; 4693 const XMMRegister x1 = xmm1; 4694 const XMMRegister x2 = xmm2; 4695 const XMMRegister x3 = xmm3; 4696 4697 const XMMRegister x4 = xmm4; 4698 const XMMRegister x5 = xmm5; 4699 const XMMRegister x6 = xmm6; 4700 const XMMRegister x7 = xmm7; 4701 4702 const Register tmp1 = r11; 4703 const Register tmp2 = r8; 4704 4705 BLOCK_COMMENT("Entry:"); 4706 __ enter(); // required for proper stackwalking of RuntimeStub frame 4707 4708 __ fast_log(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2); 4709 4710 __ leave(); // required for proper stackwalking of RuntimeStub frame 4711 __ ret(0); 4712 4713 return start; 4714 4715 } 4716 4717 address generate_libmLog10() { 4718 StubCodeMark mark(this, "StubRoutines", "libmLog10"); 4719 4720 address start = __ pc(); 4721 4722 const XMMRegister x0 = xmm0; 4723 const XMMRegister x1 = xmm1; 4724 const XMMRegister x2 = xmm2; 4725 const XMMRegister x3 = xmm3; 4726 4727 const XMMRegister x4 = xmm4; 4728 const XMMRegister x5 = xmm5; 4729 const XMMRegister x6 = xmm6; 4730 const XMMRegister x7 = xmm7; 4731 4732 const Register tmp = r11; 4733 4734 BLOCK_COMMENT("Entry:"); 4735 __ enter(); // required for proper stackwalking of RuntimeStub frame 4736 4737 __ fast_log10(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp); 4738 4739 __ leave(); // required for proper stackwalking of RuntimeStub frame 4740 __ ret(0); 4741 4742 return start; 4743 4744 } 4745 4746 address generate_libmPow() { 4747 StubCodeMark mark(this, "StubRoutines", "libmPow"); 4748 4749 address start = __ pc(); 4750 4751 const XMMRegister x0 = xmm0; 4752 const XMMRegister x1 = xmm1; 4753 const XMMRegister x2 = xmm2; 4754 const XMMRegister x3 = xmm3; 4755 4756 const XMMRegister x4 = xmm4; 4757 const XMMRegister x5 = xmm5; 4758 const XMMRegister x6 = xmm6; 4759 const XMMRegister x7 = xmm7; 4760 4761 const Register tmp1 = r8; 4762 const Register tmp2 = r9; 4763 const Register tmp3 = r10; 4764 const Register tmp4 = r11; 4765 4766 BLOCK_COMMENT("Entry:"); 4767 __ enter(); // required for proper stackwalking of RuntimeStub frame 4768 4769 __ fast_pow(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 4770 4771 __ leave(); // required for proper stackwalking of RuntimeStub frame 4772 __ ret(0); 4773 4774 return start; 4775 4776 } 4777 4778 address generate_libmSin() { 4779 StubCodeMark mark(this, "StubRoutines", "libmSin"); 4780 4781 address start = __ pc(); 4782 4783 const XMMRegister x0 = xmm0; 4784 const XMMRegister x1 = xmm1; 4785 const XMMRegister x2 = xmm2; 4786 const XMMRegister x3 = xmm3; 4787 4788 const XMMRegister x4 = xmm4; 4789 const XMMRegister x5 = xmm5; 4790 const XMMRegister x6 = xmm6; 4791 const XMMRegister x7 = xmm7; 4792 4793 const Register tmp1 = r8; 4794 const Register tmp2 = r9; 4795 const Register tmp3 = r10; 4796 const Register tmp4 = r11; 4797 4798 BLOCK_COMMENT("Entry:"); 4799 __ enter(); // required for proper stackwalking of RuntimeStub frame 4800 4801 #ifdef _WIN64 4802 __ push(rsi); 4803 __ push(rdi); 4804 #endif 4805 __ fast_sin(x0, x1, x2, x3, x4, x5, x6, x7, rax, rbx, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 4806 4807 #ifdef _WIN64 4808 __ pop(rdi); 4809 __ pop(rsi); 4810 #endif 4811 4812 __ leave(); // required for proper stackwalking of RuntimeStub frame 4813 __ ret(0); 4814 4815 return start; 4816 4817 } 4818 4819 address generate_libmCos() { 4820 StubCodeMark mark(this, "StubRoutines", "libmCos"); 4821 4822 address start = __ pc(); 4823 4824 const XMMRegister x0 = xmm0; 4825 const XMMRegister x1 = xmm1; 4826 const XMMRegister x2 = xmm2; 4827 const XMMRegister x3 = xmm3; 4828 4829 const XMMRegister x4 = xmm4; 4830 const XMMRegister x5 = xmm5; 4831 const XMMRegister x6 = xmm6; 4832 const XMMRegister x7 = xmm7; 4833 4834 const Register tmp1 = r8; 4835 const Register tmp2 = r9; 4836 const Register tmp3 = r10; 4837 const Register tmp4 = r11; 4838 4839 BLOCK_COMMENT("Entry:"); 4840 __ enter(); // required for proper stackwalking of RuntimeStub frame 4841 4842 #ifdef _WIN64 4843 __ push(rsi); 4844 __ push(rdi); 4845 #endif 4846 __ fast_cos(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 4847 4848 #ifdef _WIN64 4849 __ pop(rdi); 4850 __ pop(rsi); 4851 #endif 4852 4853 __ leave(); // required for proper stackwalking of RuntimeStub frame 4854 __ ret(0); 4855 4856 return start; 4857 4858 } 4859 4860 address generate_libmTan() { 4861 StubCodeMark mark(this, "StubRoutines", "libmTan"); 4862 4863 address start = __ pc(); 4864 4865 const XMMRegister x0 = xmm0; 4866 const XMMRegister x1 = xmm1; 4867 const XMMRegister x2 = xmm2; 4868 const XMMRegister x3 = xmm3; 4869 4870 const XMMRegister x4 = xmm4; 4871 const XMMRegister x5 = xmm5; 4872 const XMMRegister x6 = xmm6; 4873 const XMMRegister x7 = xmm7; 4874 4875 const Register tmp1 = r8; 4876 const Register tmp2 = r9; 4877 const Register tmp3 = r10; 4878 const Register tmp4 = r11; 4879 4880 BLOCK_COMMENT("Entry:"); 4881 __ enter(); // required for proper stackwalking of RuntimeStub frame 4882 4883 #ifdef _WIN64 4884 __ push(rsi); 4885 __ push(rdi); 4886 #endif 4887 __ fast_tan(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 4888 4889 #ifdef _WIN64 4890 __ pop(rdi); 4891 __ pop(rsi); 4892 #endif 4893 4894 __ leave(); // required for proper stackwalking of RuntimeStub frame 4895 __ ret(0); 4896 4897 return start; 4898 4899 } 4900 4901 #undef __ 4902 #define __ masm-> 4903 4904 // Continuation point for throwing of implicit exceptions that are 4905 // not handled in the current activation. Fabricates an exception 4906 // oop and initiates normal exception dispatching in this 4907 // frame. Since we need to preserve callee-saved values (currently 4908 // only for C2, but done for C1 as well) we need a callee-saved oop 4909 // map and therefore have to make these stubs into RuntimeStubs 4910 // rather than BufferBlobs. If the compiler needs all registers to 4911 // be preserved between the fault point and the exception handler 4912 // then it must assume responsibility for that in 4913 // AbstractCompiler::continuation_for_implicit_null_exception or 4914 // continuation_for_implicit_division_by_zero_exception. All other 4915 // implicit exceptions (e.g., NullPointerException or 4916 // AbstractMethodError on entry) are either at call sites or 4917 // otherwise assume that stack unwinding will be initiated, so 4918 // caller saved registers were assumed volatile in the compiler. 4919 address generate_throw_exception(const char* name, 4920 address runtime_entry, 4921 Register arg1 = noreg, 4922 Register arg2 = noreg) { 4923 // Information about frame layout at time of blocking runtime call. 4924 // Note that we only have to preserve callee-saved registers since 4925 // the compilers are responsible for supplying a continuation point 4926 // if they expect all registers to be preserved. 4927 enum layout { 4928 rbp_off = frame::arg_reg_save_area_bytes/BytesPerInt, 4929 rbp_off2, 4930 return_off, 4931 return_off2, 4932 framesize // inclusive of return address 4933 }; 4934 4935 int insts_size = 512; 4936 int locs_size = 64; 4937 4938 CodeBuffer code(name, insts_size, locs_size); 4939 OopMapSet* oop_maps = new OopMapSet(); 4940 MacroAssembler* masm = new MacroAssembler(&code); 4941 4942 address start = __ pc(); 4943 4944 // This is an inlined and slightly modified version of call_VM 4945 // which has the ability to fetch the return PC out of 4946 // thread-local storage and also sets up last_Java_sp slightly 4947 // differently than the real call_VM 4948 4949 __ enter(); // required for proper stackwalking of RuntimeStub frame 4950 4951 assert(is_even(framesize/2), "sp not 16-byte aligned"); 4952 4953 // return address and rbp are already in place 4954 __ subptr(rsp, (framesize-4) << LogBytesPerInt); // prolog 4955 4956 int frame_complete = __ pc() - start; 4957 4958 // Set up last_Java_sp and last_Java_fp 4959 address the_pc = __ pc(); 4960 __ set_last_Java_frame(rsp, rbp, the_pc); 4961 __ andptr(rsp, -(StackAlignmentInBytes)); // Align stack 4962 4963 // Call runtime 4964 if (arg1 != noreg) { 4965 assert(arg2 != c_rarg1, "clobbered"); 4966 __ movptr(c_rarg1, arg1); 4967 } 4968 if (arg2 != noreg) { 4969 __ movptr(c_rarg2, arg2); 4970 } 4971 __ movptr(c_rarg0, r15_thread); 4972 BLOCK_COMMENT("call runtime_entry"); 4973 __ call(RuntimeAddress(runtime_entry)); 4974 4975 // Generate oop map 4976 OopMap* map = new OopMap(framesize, 0); 4977 4978 oop_maps->add_gc_map(the_pc - start, map); 4979 4980 __ reset_last_Java_frame(true); 4981 4982 __ leave(); // required for proper stackwalking of RuntimeStub frame 4983 4984 // check for pending exceptions 4985 #ifdef ASSERT 4986 Label L; 4987 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), 4988 (int32_t) NULL_WORD); 4989 __ jcc(Assembler::notEqual, L); 4990 __ should_not_reach_here(); 4991 __ bind(L); 4992 #endif // ASSERT 4993 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 4994 4995 4996 // codeBlob framesize is in words (not VMRegImpl::slot_size) 4997 RuntimeStub* stub = 4998 RuntimeStub::new_runtime_stub(name, 4999 &code, 5000 frame_complete, 5001 (framesize >> (LogBytesPerWord - LogBytesPerInt)), 5002 oop_maps, false); 5003 return stub->entry_point(); 5004 } 5005 5006 void create_control_words() { 5007 // Round to nearest, 53-bit mode, exceptions masked 5008 StubRoutines::_fpu_cntrl_wrd_std = 0x027F; 5009 // Round to zero, 53-bit mode, exception mased 5010 StubRoutines::_fpu_cntrl_wrd_trunc = 0x0D7F; 5011 // Round to nearest, 24-bit mode, exceptions masked 5012 StubRoutines::_fpu_cntrl_wrd_24 = 0x007F; 5013 // Round to nearest, 64-bit mode, exceptions masked 5014 StubRoutines::_fpu_cntrl_wrd_64 = 0x037F; 5015 // Round to nearest, 64-bit mode, exceptions masked 5016 StubRoutines::_mxcsr_std = 0x1F80; 5017 // Note: the following two constants are 80-bit values 5018 // layout is critical for correct loading by FPU. 5019 // Bias for strict fp multiply/divide 5020 StubRoutines::_fpu_subnormal_bias1[0]= 0x00000000; // 2^(-15360) == 0x03ff 8000 0000 0000 0000 5021 StubRoutines::_fpu_subnormal_bias1[1]= 0x80000000; 5022 StubRoutines::_fpu_subnormal_bias1[2]= 0x03ff; 5023 // Un-Bias for strict fp multiply/divide 5024 StubRoutines::_fpu_subnormal_bias2[0]= 0x00000000; // 2^(+15360) == 0x7bff 8000 0000 0000 0000 5025 StubRoutines::_fpu_subnormal_bias2[1]= 0x80000000; 5026 StubRoutines::_fpu_subnormal_bias2[2]= 0x7bff; 5027 } 5028 5029 // Initialization 5030 void generate_initial() { 5031 // Generates all stubs and initializes the entry points 5032 5033 // This platform-specific settings are needed by generate_call_stub() 5034 create_control_words(); 5035 5036 // entry points that exist in all platforms Note: This is code 5037 // that could be shared among different platforms - however the 5038 // benefit seems to be smaller than the disadvantage of having a 5039 // much more complicated generator structure. See also comment in 5040 // stubRoutines.hpp. 5041 5042 StubRoutines::_forward_exception_entry = generate_forward_exception(); 5043 5044 StubRoutines::_call_stub_entry = 5045 generate_call_stub(StubRoutines::_call_stub_return_address); 5046 5047 // is referenced by megamorphic call 5048 StubRoutines::_catch_exception_entry = generate_catch_exception(); 5049 5050 // atomic calls 5051 StubRoutines::_atomic_xchg_entry = generate_atomic_xchg(); 5052 StubRoutines::_atomic_xchg_long_entry = generate_atomic_xchg_long(); 5053 StubRoutines::_atomic_cmpxchg_entry = generate_atomic_cmpxchg(); 5054 StubRoutines::_atomic_cmpxchg_byte_entry = generate_atomic_cmpxchg_byte(); 5055 StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long(); 5056 StubRoutines::_atomic_add_entry = generate_atomic_add(); 5057 StubRoutines::_atomic_add_long_entry = generate_atomic_add_long(); 5058 StubRoutines::_fence_entry = generate_orderaccess_fence(); 5059 5060 // platform dependent 5061 StubRoutines::x86::_get_previous_fp_entry = generate_get_previous_fp(); 5062 StubRoutines::x86::_get_previous_sp_entry = generate_get_previous_sp(); 5063 5064 StubRoutines::x86::_verify_mxcsr_entry = generate_verify_mxcsr(); 5065 5066 // Build this early so it's available for the interpreter. 5067 StubRoutines::_throw_StackOverflowError_entry = 5068 generate_throw_exception("StackOverflowError throw_exception", 5069 CAST_FROM_FN_PTR(address, 5070 SharedRuntime:: 5071 throw_StackOverflowError)); 5072 StubRoutines::_throw_delayed_StackOverflowError_entry = 5073 generate_throw_exception("delayed StackOverflowError throw_exception", 5074 CAST_FROM_FN_PTR(address, 5075 SharedRuntime:: 5076 throw_delayed_StackOverflowError)); 5077 if (UseCRC32Intrinsics) { 5078 // set table address before stub generation which use it 5079 StubRoutines::_crc_table_adr = (address)StubRoutines::x86::_crc_table; 5080 StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32(); 5081 } 5082 5083 if (UseCRC32CIntrinsics) { 5084 bool supports_clmul = VM_Version::supports_clmul(); 5085 StubRoutines::x86::generate_CRC32C_table(supports_clmul); 5086 StubRoutines::_crc32c_table_addr = (address)StubRoutines::x86::_crc32c_table; 5087 StubRoutines::_updateBytesCRC32C = generate_updateBytesCRC32C(supports_clmul); 5088 } 5089 if (VM_Version::supports_sse2() && UseLibmIntrinsic && InlineIntrinsics) { 5090 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin) || 5091 vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos) || 5092 vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dtan)) { 5093 StubRoutines::x86::_ONEHALF_adr = (address)StubRoutines::x86::_ONEHALF; 5094 StubRoutines::x86::_P_2_adr = (address)StubRoutines::x86::_P_2; 5095 StubRoutines::x86::_SC_4_adr = (address)StubRoutines::x86::_SC_4; 5096 StubRoutines::x86::_Ctable_adr = (address)StubRoutines::x86::_Ctable; 5097 StubRoutines::x86::_SC_2_adr = (address)StubRoutines::x86::_SC_2; 5098 StubRoutines::x86::_SC_3_adr = (address)StubRoutines::x86::_SC_3; 5099 StubRoutines::x86::_SC_1_adr = (address)StubRoutines::x86::_SC_1; 5100 StubRoutines::x86::_PI_INV_TABLE_adr = (address)StubRoutines::x86::_PI_INV_TABLE; 5101 StubRoutines::x86::_PI_4_adr = (address)StubRoutines::x86::_PI_4; 5102 StubRoutines::x86::_PI32INV_adr = (address)StubRoutines::x86::_PI32INV; 5103 StubRoutines::x86::_SIGN_MASK_adr = (address)StubRoutines::x86::_SIGN_MASK; 5104 StubRoutines::x86::_P_1_adr = (address)StubRoutines::x86::_P_1; 5105 StubRoutines::x86::_P_3_adr = (address)StubRoutines::x86::_P_3; 5106 StubRoutines::x86::_NEG_ZERO_adr = (address)StubRoutines::x86::_NEG_ZERO; 5107 } 5108 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dexp)) { 5109 StubRoutines::_dexp = generate_libmExp(); 5110 } 5111 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dlog)) { 5112 StubRoutines::_dlog = generate_libmLog(); 5113 } 5114 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dlog10)) { 5115 StubRoutines::_dlog10 = generate_libmLog10(); 5116 } 5117 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dpow)) { 5118 StubRoutines::_dpow = generate_libmPow(); 5119 } 5120 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin)) { 5121 StubRoutines::_dsin = generate_libmSin(); 5122 } 5123 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos)) { 5124 StubRoutines::_dcos = generate_libmCos(); 5125 } 5126 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dtan)) { 5127 StubRoutines::_dtan = generate_libmTan(); 5128 } 5129 } 5130 } 5131 5132 void generate_all() { 5133 // Generates all stubs and initializes the entry points 5134 5135 // These entry points require SharedInfo::stack0 to be set up in 5136 // non-core builds and need to be relocatable, so they each 5137 // fabricate a RuntimeStub internally. 5138 StubRoutines::_throw_AbstractMethodError_entry = 5139 generate_throw_exception("AbstractMethodError throw_exception", 5140 CAST_FROM_FN_PTR(address, 5141 SharedRuntime:: 5142 throw_AbstractMethodError)); 5143 5144 StubRoutines::_throw_IncompatibleClassChangeError_entry = 5145 generate_throw_exception("IncompatibleClassChangeError throw_exception", 5146 CAST_FROM_FN_PTR(address, 5147 SharedRuntime:: 5148 throw_IncompatibleClassChangeError)); 5149 5150 StubRoutines::_throw_NullPointerException_at_call_entry = 5151 generate_throw_exception("NullPointerException at call throw_exception", 5152 CAST_FROM_FN_PTR(address, 5153 SharedRuntime:: 5154 throw_NullPointerException_at_call)); 5155 5156 // entry points that are platform specific 5157 #if INCLUDE_SHENANDOAHGC 5158 if (UseShenandoahGC && (ShenandoahWriteBarrier || ShenandoahStoreValEnqueueBarrier)) { 5159 StubRoutines::x86::_shenandoah_wb = generate_shenandoah_wb(false, true); 5160 StubRoutines::_shenandoah_wb_C = generate_shenandoah_wb(true, !ShenandoahWriteBarrierCsetTestInIR); 5161 } 5162 #endif 5163 5164 StubRoutines::x86::_f2i_fixup = generate_f2i_fixup(); 5165 StubRoutines::x86::_f2l_fixup = generate_f2l_fixup(); 5166 StubRoutines::x86::_d2i_fixup = generate_d2i_fixup(); 5167 StubRoutines::x86::_d2l_fixup = generate_d2l_fixup(); 5168 5169 StubRoutines::x86::_float_sign_mask = generate_fp_mask("float_sign_mask", 0x7FFFFFFF7FFFFFFF); 5170 StubRoutines::x86::_float_sign_flip = generate_fp_mask("float_sign_flip", 0x8000000080000000); 5171 StubRoutines::x86::_double_sign_mask = generate_fp_mask("double_sign_mask", 0x7FFFFFFFFFFFFFFF); 5172 StubRoutines::x86::_double_sign_flip = generate_fp_mask("double_sign_flip", 0x8000000000000000); 5173 5174 // support for verify_oop (must happen after universe_init) 5175 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop(); 5176 5177 // arraycopy stubs used by compilers 5178 generate_arraycopy_stubs(); 5179 5180 // don't bother generating these AES intrinsic stubs unless global flag is set 5181 if (UseAESIntrinsics) { 5182 StubRoutines::x86::_key_shuffle_mask_addr = generate_key_shuffle_mask(); // needed by the others 5183 StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock(); 5184 StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock(); 5185 StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt(); 5186 StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt_Parallel(); 5187 } 5188 if (UseAESCTRIntrinsics){ 5189 StubRoutines::x86::_counter_shuffle_mask_addr = generate_counter_shuffle_mask(); 5190 StubRoutines::_counterMode_AESCrypt = generate_counterMode_AESCrypt_Parallel(); 5191 } 5192 5193 if (UseSHA1Intrinsics) { 5194 StubRoutines::x86::_upper_word_mask_addr = generate_upper_word_mask(); 5195 StubRoutines::x86::_shuffle_byte_flip_mask_addr = generate_shuffle_byte_flip_mask(); 5196 StubRoutines::_sha1_implCompress = generate_sha1_implCompress(false, "sha1_implCompress"); 5197 StubRoutines::_sha1_implCompressMB = generate_sha1_implCompress(true, "sha1_implCompressMB"); 5198 } 5199 if (UseSHA256Intrinsics) { 5200 StubRoutines::x86::_k256_adr = (address)StubRoutines::x86::_k256; 5201 char* dst = (char*)StubRoutines::x86::_k256_W; 5202 char* src = (char*)StubRoutines::x86::_k256; 5203 for (int ii = 0; ii < 16; ++ii) { 5204 memcpy(dst + 32 * ii, src + 16 * ii, 16); 5205 memcpy(dst + 32 * ii + 16, src + 16 * ii, 16); 5206 } 5207 StubRoutines::x86::_k256_W_adr = (address)StubRoutines::x86::_k256_W; 5208 StubRoutines::x86::_pshuffle_byte_flip_mask_addr = generate_pshuffle_byte_flip_mask(); 5209 StubRoutines::_sha256_implCompress = generate_sha256_implCompress(false, "sha256_implCompress"); 5210 StubRoutines::_sha256_implCompressMB = generate_sha256_implCompress(true, "sha256_implCompressMB"); 5211 } 5212 if (UseSHA512Intrinsics) { 5213 StubRoutines::x86::_k512_W_addr = (address)StubRoutines::x86::_k512_W; 5214 StubRoutines::x86::_pshuffle_byte_flip_mask_addr_sha512 = generate_pshuffle_byte_flip_mask_sha512(); 5215 StubRoutines::_sha512_implCompress = generate_sha512_implCompress(false, "sha512_implCompress"); 5216 StubRoutines::_sha512_implCompressMB = generate_sha512_implCompress(true, "sha512_implCompressMB"); 5217 } 5218 5219 // Generate GHASH intrinsics code 5220 if (UseGHASHIntrinsics) { 5221 StubRoutines::x86::_ghash_long_swap_mask_addr = generate_ghash_long_swap_mask(); 5222 StubRoutines::x86::_ghash_byte_swap_mask_addr = generate_ghash_byte_swap_mask(); 5223 StubRoutines::_ghash_processBlocks = generate_ghash_processBlocks(); 5224 } 5225 5226 // Safefetch stubs. 5227 generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry, 5228 &StubRoutines::_safefetch32_fault_pc, 5229 &StubRoutines::_safefetch32_continuation_pc); 5230 generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry, 5231 &StubRoutines::_safefetchN_fault_pc, 5232 &StubRoutines::_safefetchN_continuation_pc); 5233 #ifdef COMPILER2 5234 if (UseMultiplyToLenIntrinsic) { 5235 StubRoutines::_multiplyToLen = generate_multiplyToLen(); 5236 } 5237 if (UseSquareToLenIntrinsic) { 5238 StubRoutines::_squareToLen = generate_squareToLen(); 5239 } 5240 if (UseMulAddIntrinsic) { 5241 StubRoutines::_mulAdd = generate_mulAdd(); 5242 } 5243 #ifndef _WINDOWS 5244 if (UseMontgomeryMultiplyIntrinsic) { 5245 StubRoutines::_montgomeryMultiply 5246 = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_multiply); 5247 } 5248 if (UseMontgomerySquareIntrinsic) { 5249 StubRoutines::_montgomerySquare 5250 = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_square); 5251 } 5252 #endif // WINDOWS 5253 #endif // COMPILER2 5254 5255 if (UseVectorizedMismatchIntrinsic) { 5256 StubRoutines::_vectorizedMismatch = generate_vectorizedMismatch(); 5257 } 5258 } 5259 5260 public: 5261 StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) { 5262 if (all) { 5263 generate_all(); 5264 } else { 5265 generate_initial(); 5266 } 5267 } 5268 }; // end class declaration 5269 5270 void StubGenerator_generate(CodeBuffer* code, bool all) { 5271 StubGenerator g(code, all); 5272 }