1 /* 2 * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "ci/ciUtilities.hpp" 29 #include "gc/shared/cardTable.hpp" 30 #include "gc/shared/cardTableModRefBS.hpp" 31 #include "interpreter/interpreter.hpp" 32 #include "nativeInst_x86.hpp" 33 #include "oops/instanceOop.hpp" 34 #include "oops/method.hpp" 35 #include "oops/objArrayKlass.hpp" 36 #include "oops/oop.inline.hpp" 37 #include "prims/methodHandles.hpp" 38 #include "runtime/frame.inline.hpp" 39 #include "runtime/handles.inline.hpp" 40 #include "runtime/sharedRuntime.hpp" 41 #include "runtime/stubCodeGenerator.hpp" 42 #include "runtime/stubRoutines.hpp" 43 #include "runtime/thread.inline.hpp" 44 #ifdef COMPILER2 45 #include "opto/runtime.hpp" 46 #endif 47 #if INCLUDE_ALL_GCS 48 #include "gc/z/zBarrier.inline.hpp" 49 #include "gc/z/zGlobals.hpp" 50 #endif 51 52 // Declaration and definition of StubGenerator (no .hpp file). 53 // For a more detailed description of the stub routine structure 54 // see the comment in stubRoutines.hpp 55 56 #define __ _masm-> 57 #define TIMES_OOP (UseCompressedOops ? Address::times_4 : Address::times_8) 58 #define a__ ((Assembler*)_masm)-> 59 60 #ifdef PRODUCT 61 #define BLOCK_COMMENT(str) /* nothing */ 62 #else 63 #define BLOCK_COMMENT(str) __ block_comment(str) 64 #endif 65 66 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 67 const int MXCSR_MASK = 0xFFC0; // Mask out any pending exceptions 68 69 // Stub Code definitions 70 71 class StubGenerator: public StubCodeGenerator { 72 private: 73 74 #ifdef PRODUCT 75 #define inc_counter_np(counter) ((void)0) 76 #else 77 void inc_counter_np_(int& counter) { 78 // This can destroy rscratch1 if counter is far from the code cache 79 __ incrementl(ExternalAddress((address)&counter)); 80 } 81 #define inc_counter_np(counter) \ 82 BLOCK_COMMENT("inc_counter " #counter); \ 83 inc_counter_np_(counter); 84 #endif 85 86 // Call stubs are used to call Java from C 87 // 88 // Linux Arguments: 89 // c_rarg0: call wrapper address address 90 // c_rarg1: result address 91 // c_rarg2: result type BasicType 92 // c_rarg3: method Method* 93 // c_rarg4: (interpreter) entry point address 94 // c_rarg5: parameters intptr_t* 95 // 16(rbp): parameter size (in words) int 96 // 24(rbp): thread Thread* 97 // 98 // [ return_from_Java ] <--- rsp 99 // [ argument word n ] 100 // ... 101 // -12 [ argument word 1 ] 102 // -11 [ saved r15 ] <--- rsp_after_call 103 // -10 [ saved r14 ] 104 // -9 [ saved r13 ] 105 // -8 [ saved r12 ] 106 // -7 [ saved rbx ] 107 // -6 [ call wrapper ] 108 // -5 [ result ] 109 // -4 [ result type ] 110 // -3 [ method ] 111 // -2 [ entry point ] 112 // -1 [ parameters ] 113 // 0 [ saved rbp ] <--- rbp 114 // 1 [ return address ] 115 // 2 [ parameter size ] 116 // 3 [ thread ] 117 // 118 // Windows Arguments: 119 // c_rarg0: call wrapper address address 120 // c_rarg1: result address 121 // c_rarg2: result type BasicType 122 // c_rarg3: method Method* 123 // 48(rbp): (interpreter) entry point address 124 // 56(rbp): parameters intptr_t* 125 // 64(rbp): parameter size (in words) int 126 // 72(rbp): thread Thread* 127 // 128 // [ return_from_Java ] <--- rsp 129 // [ argument word n ] 130 // ... 131 // -60 [ argument word 1 ] 132 // -59 [ saved xmm31 ] <--- rsp after_call 133 // [ saved xmm16-xmm30 ] (EVEX enabled, else the space is blank) 134 // -27 [ saved xmm15 ] 135 // [ saved xmm7-xmm14 ] 136 // -9 [ saved xmm6 ] (each xmm register takes 2 slots) 137 // -7 [ saved r15 ] 138 // -6 [ saved r14 ] 139 // -5 [ saved r13 ] 140 // -4 [ saved r12 ] 141 // -3 [ saved rdi ] 142 // -2 [ saved rsi ] 143 // -1 [ saved rbx ] 144 // 0 [ saved rbp ] <--- rbp 145 // 1 [ return address ] 146 // 2 [ call wrapper ] 147 // 3 [ result ] 148 // 4 [ result type ] 149 // 5 [ method ] 150 // 6 [ entry point ] 151 // 7 [ parameters ] 152 // 8 [ parameter size ] 153 // 9 [ thread ] 154 // 155 // Windows reserves the callers stack space for arguments 1-4. 156 // We spill c_rarg0-c_rarg3 to this space. 157 158 // Call stub stack layout word offsets from rbp 159 enum call_stub_layout { 160 #ifdef _WIN64 161 xmm_save_first = 6, // save from xmm6 162 xmm_save_last = 31, // to xmm31 163 xmm_save_base = -9, 164 rsp_after_call_off = xmm_save_base - 2 * (xmm_save_last - xmm_save_first), // -27 165 r15_off = -7, 166 r14_off = -6, 167 r13_off = -5, 168 r12_off = -4, 169 rdi_off = -3, 170 rsi_off = -2, 171 rbx_off = -1, 172 rbp_off = 0, 173 retaddr_off = 1, 174 call_wrapper_off = 2, 175 result_off = 3, 176 result_type_off = 4, 177 method_off = 5, 178 entry_point_off = 6, 179 parameters_off = 7, 180 parameter_size_off = 8, 181 thread_off = 9 182 #else 183 rsp_after_call_off = -12, 184 mxcsr_off = rsp_after_call_off, 185 r15_off = -11, 186 r14_off = -10, 187 r13_off = -9, 188 r12_off = -8, 189 rbx_off = -7, 190 call_wrapper_off = -6, 191 result_off = -5, 192 result_type_off = -4, 193 method_off = -3, 194 entry_point_off = -2, 195 parameters_off = -1, 196 rbp_off = 0, 197 retaddr_off = 1, 198 parameter_size_off = 2, 199 thread_off = 3 200 #endif 201 }; 202 203 #ifdef _WIN64 204 Address xmm_save(int reg) { 205 assert(reg >= xmm_save_first && reg <= xmm_save_last, "XMM register number out of range"); 206 return Address(rbp, (xmm_save_base - (reg - xmm_save_first) * 2) * wordSize); 207 } 208 #endif 209 210 address generate_call_stub(address& return_address) { 211 assert((int)frame::entry_frame_after_call_words == -(int)rsp_after_call_off + 1 && 212 (int)frame::entry_frame_call_wrapper_offset == (int)call_wrapper_off, 213 "adjust this code"); 214 StubCodeMark mark(this, "StubRoutines", "call_stub"); 215 address start = __ pc(); 216 217 // same as in generate_catch_exception()! 218 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize); 219 220 const Address call_wrapper (rbp, call_wrapper_off * wordSize); 221 const Address result (rbp, result_off * wordSize); 222 const Address result_type (rbp, result_type_off * wordSize); 223 const Address method (rbp, method_off * wordSize); 224 const Address entry_point (rbp, entry_point_off * wordSize); 225 const Address parameters (rbp, parameters_off * wordSize); 226 const Address parameter_size(rbp, parameter_size_off * wordSize); 227 228 // same as in generate_catch_exception()! 229 const Address thread (rbp, thread_off * wordSize); 230 231 const Address r15_save(rbp, r15_off * wordSize); 232 const Address r14_save(rbp, r14_off * wordSize); 233 const Address r13_save(rbp, r13_off * wordSize); 234 const Address r12_save(rbp, r12_off * wordSize); 235 const Address rbx_save(rbp, rbx_off * wordSize); 236 237 // stub code 238 __ enter(); 239 __ subptr(rsp, -rsp_after_call_off * wordSize); 240 241 // save register parameters 242 #ifndef _WIN64 243 __ movptr(parameters, c_rarg5); // parameters 244 __ movptr(entry_point, c_rarg4); // entry_point 245 #endif 246 247 __ movptr(method, c_rarg3); // method 248 __ movl(result_type, c_rarg2); // result type 249 __ movptr(result, c_rarg1); // result 250 __ movptr(call_wrapper, c_rarg0); // call wrapper 251 252 // save regs belonging to calling function 253 __ movptr(rbx_save, rbx); 254 __ movptr(r12_save, r12); 255 __ movptr(r13_save, r13); 256 __ movptr(r14_save, r14); 257 __ movptr(r15_save, r15); 258 if (UseAVX > 2) { 259 __ movl(rbx, 0xffff); 260 __ kmovwl(k1, rbx); 261 } 262 #ifdef _WIN64 263 int last_reg = 15; 264 if (UseAVX > 2) { 265 last_reg = 31; 266 } 267 if (VM_Version::supports_evex()) { 268 for (int i = xmm_save_first; i <= last_reg; i++) { 269 __ vextractf32x4(xmm_save(i), as_XMMRegister(i), 0); 270 } 271 } else { 272 for (int i = xmm_save_first; i <= last_reg; i++) { 273 __ movdqu(xmm_save(i), as_XMMRegister(i)); 274 } 275 } 276 277 const Address rdi_save(rbp, rdi_off * wordSize); 278 const Address rsi_save(rbp, rsi_off * wordSize); 279 280 __ movptr(rsi_save, rsi); 281 __ movptr(rdi_save, rdi); 282 #else 283 const Address mxcsr_save(rbp, mxcsr_off * wordSize); 284 { 285 Label skip_ldmx; 286 __ stmxcsr(mxcsr_save); 287 __ movl(rax, mxcsr_save); 288 __ andl(rax, MXCSR_MASK); // Only check control and mask bits 289 ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std()); 290 __ cmp32(rax, mxcsr_std); 291 __ jcc(Assembler::equal, skip_ldmx); 292 __ ldmxcsr(mxcsr_std); 293 __ bind(skip_ldmx); 294 } 295 #endif 296 297 // Load up thread register 298 __ movptr(r15_thread, thread); 299 __ reinit_heapbase(); 300 301 #ifdef ASSERT 302 // make sure we have no pending exceptions 303 { 304 Label L; 305 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 306 __ jcc(Assembler::equal, L); 307 __ stop("StubRoutines::call_stub: entered with pending exception"); 308 __ bind(L); 309 } 310 #endif 311 312 // pass parameters if any 313 BLOCK_COMMENT("pass parameters if any"); 314 Label parameters_done; 315 __ movl(c_rarg3, parameter_size); 316 __ testl(c_rarg3, c_rarg3); 317 __ jcc(Assembler::zero, parameters_done); 318 319 Label loop; 320 __ movptr(c_rarg2, parameters); // parameter pointer 321 __ movl(c_rarg1, c_rarg3); // parameter counter is in c_rarg1 322 __ BIND(loop); 323 __ movptr(rax, Address(c_rarg2, 0));// get parameter 324 __ addptr(c_rarg2, wordSize); // advance to next parameter 325 __ decrementl(c_rarg1); // decrement counter 326 __ push(rax); // pass parameter 327 __ jcc(Assembler::notZero, loop); 328 329 // call Java function 330 __ BIND(parameters_done); 331 __ movptr(rbx, method); // get Method* 332 __ movptr(c_rarg1, entry_point); // get entry_point 333 __ mov(r13, rsp); // set sender sp 334 BLOCK_COMMENT("call Java function"); 335 __ call(c_rarg1); 336 337 BLOCK_COMMENT("call_stub_return_address:"); 338 return_address = __ pc(); 339 340 // store result depending on type (everything that is not 341 // T_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT) 342 __ movptr(c_rarg0, result); 343 Label is_long, is_float, is_double, exit; 344 __ movl(c_rarg1, result_type); 345 __ cmpl(c_rarg1, T_OBJECT); 346 __ jcc(Assembler::equal, is_long); 347 __ cmpl(c_rarg1, T_LONG); 348 __ jcc(Assembler::equal, is_long); 349 __ cmpl(c_rarg1, T_FLOAT); 350 __ jcc(Assembler::equal, is_float); 351 __ cmpl(c_rarg1, T_DOUBLE); 352 __ jcc(Assembler::equal, is_double); 353 354 // handle T_INT case 355 __ movl(Address(c_rarg0, 0), rax); 356 357 __ BIND(exit); 358 359 // pop parameters 360 __ lea(rsp, rsp_after_call); 361 362 #ifdef ASSERT 363 // verify that threads correspond 364 { 365 Label L1, L2, L3; 366 __ cmpptr(r15_thread, thread); 367 __ jcc(Assembler::equal, L1); 368 __ stop("StubRoutines::call_stub: r15_thread is corrupted"); 369 __ bind(L1); 370 __ get_thread(rbx); 371 __ cmpptr(r15_thread, thread); 372 __ jcc(Assembler::equal, L2); 373 __ stop("StubRoutines::call_stub: r15_thread is modified by call"); 374 __ bind(L2); 375 __ cmpptr(r15_thread, rbx); 376 __ jcc(Assembler::equal, L3); 377 __ stop("StubRoutines::call_stub: threads must correspond"); 378 __ bind(L3); 379 } 380 #endif 381 382 // restore regs belonging to calling function 383 #ifdef _WIN64 384 // emit the restores for xmm regs 385 if (VM_Version::supports_evex()) { 386 for (int i = xmm_save_first; i <= last_reg; i++) { 387 __ vinsertf32x4(as_XMMRegister(i), as_XMMRegister(i), xmm_save(i), 0); 388 } 389 } else { 390 for (int i = xmm_save_first; i <= last_reg; i++) { 391 __ movdqu(as_XMMRegister(i), xmm_save(i)); 392 } 393 } 394 #endif 395 __ movptr(r15, r15_save); 396 __ movptr(r14, r14_save); 397 __ movptr(r13, r13_save); 398 __ movptr(r12, r12_save); 399 __ movptr(rbx, rbx_save); 400 401 #ifdef _WIN64 402 __ movptr(rdi, rdi_save); 403 __ movptr(rsi, rsi_save); 404 #else 405 __ ldmxcsr(mxcsr_save); 406 #endif 407 408 // restore rsp 409 __ addptr(rsp, -rsp_after_call_off * wordSize); 410 411 // return 412 __ vzeroupper(); 413 __ pop(rbp); 414 __ ret(0); 415 416 // handle return types different from T_INT 417 __ BIND(is_long); 418 __ movq(Address(c_rarg0, 0), rax); 419 __ jmp(exit); 420 421 __ BIND(is_float); 422 __ movflt(Address(c_rarg0, 0), xmm0); 423 __ jmp(exit); 424 425 __ BIND(is_double); 426 __ movdbl(Address(c_rarg0, 0), xmm0); 427 __ jmp(exit); 428 429 return start; 430 } 431 432 // Return point for a Java call if there's an exception thrown in 433 // Java code. The exception is caught and transformed into a 434 // pending exception stored in JavaThread that can be tested from 435 // within the VM. 436 // 437 // Note: Usually the parameters are removed by the callee. In case 438 // of an exception crossing an activation frame boundary, that is 439 // not the case if the callee is compiled code => need to setup the 440 // rsp. 441 // 442 // rax: exception oop 443 444 address generate_catch_exception() { 445 StubCodeMark mark(this, "StubRoutines", "catch_exception"); 446 address start = __ pc(); 447 448 // same as in generate_call_stub(): 449 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize); 450 const Address thread (rbp, thread_off * wordSize); 451 452 #ifdef ASSERT 453 // verify that threads correspond 454 { 455 Label L1, L2, L3; 456 __ cmpptr(r15_thread, thread); 457 __ jcc(Assembler::equal, L1); 458 __ stop("StubRoutines::catch_exception: r15_thread is corrupted"); 459 __ bind(L1); 460 __ get_thread(rbx); 461 __ cmpptr(r15_thread, thread); 462 __ jcc(Assembler::equal, L2); 463 __ stop("StubRoutines::catch_exception: r15_thread is modified by call"); 464 __ bind(L2); 465 __ cmpptr(r15_thread, rbx); 466 __ jcc(Assembler::equal, L3); 467 __ stop("StubRoutines::catch_exception: threads must correspond"); 468 __ bind(L3); 469 } 470 #endif 471 472 // set pending exception 473 __ verify_oop(rax); 474 475 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), rax); 476 __ lea(rscratch1, ExternalAddress((address)__FILE__)); 477 __ movptr(Address(r15_thread, Thread::exception_file_offset()), rscratch1); 478 __ movl(Address(r15_thread, Thread::exception_line_offset()), (int) __LINE__); 479 480 // complete return to VM 481 assert(StubRoutines::_call_stub_return_address != NULL, 482 "_call_stub_return_address must have been generated before"); 483 __ jump(RuntimeAddress(StubRoutines::_call_stub_return_address)); 484 485 return start; 486 } 487 488 // Continuation point for runtime calls returning with a pending 489 // exception. The pending exception check happened in the runtime 490 // or native call stub. The pending exception in Thread is 491 // converted into a Java-level exception. 492 // 493 // Contract with Java-level exception handlers: 494 // rax: exception 495 // rdx: throwing pc 496 // 497 // NOTE: At entry of this stub, exception-pc must be on stack !! 498 499 address generate_forward_exception() { 500 StubCodeMark mark(this, "StubRoutines", "forward exception"); 501 address start = __ pc(); 502 503 // Upon entry, the sp points to the return address returning into 504 // Java (interpreted or compiled) code; i.e., the return address 505 // becomes the throwing pc. 506 // 507 // Arguments pushed before the runtime call are still on the stack 508 // but the exception handler will reset the stack pointer -> 509 // ignore them. A potential result in registers can be ignored as 510 // well. 511 512 #ifdef ASSERT 513 // make sure this code is only executed if there is a pending exception 514 { 515 Label L; 516 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t) NULL); 517 __ jcc(Assembler::notEqual, L); 518 __ stop("StubRoutines::forward exception: no pending exception (1)"); 519 __ bind(L); 520 } 521 #endif 522 523 // compute exception handler into rbx 524 __ movptr(c_rarg0, Address(rsp, 0)); 525 BLOCK_COMMENT("call exception_handler_for_return_address"); 526 __ call_VM_leaf(CAST_FROM_FN_PTR(address, 527 SharedRuntime::exception_handler_for_return_address), 528 r15_thread, c_rarg0); 529 __ mov(rbx, rax); 530 531 // setup rax & rdx, remove return address & clear pending exception 532 __ pop(rdx); 533 __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset())); 534 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 535 536 #ifdef ASSERT 537 // make sure exception is set 538 { 539 Label L; 540 __ testptr(rax, rax); 541 __ jcc(Assembler::notEqual, L); 542 __ stop("StubRoutines::forward exception: no pending exception (2)"); 543 __ bind(L); 544 } 545 #endif 546 547 // continue at exception handler (return address removed) 548 // rax: exception 549 // rbx: exception handler 550 // rdx: throwing pc 551 __ verify_oop(rax); 552 __ jmp(rbx); 553 554 return start; 555 } 556 557 // Support for jint atomic::xchg(jint exchange_value, volatile jint* dest) 558 // 559 // Arguments : 560 // c_rarg0: exchange_value 561 // c_rarg0: dest 562 // 563 // Result: 564 // *dest <- ex, return (orig *dest) 565 address generate_atomic_xchg() { 566 StubCodeMark mark(this, "StubRoutines", "atomic_xchg"); 567 address start = __ pc(); 568 569 __ movl(rax, c_rarg0); // Copy to eax we need a return value anyhow 570 __ xchgl(rax, Address(c_rarg1, 0)); // automatic LOCK 571 __ ret(0); 572 573 return start; 574 } 575 576 // Support for intptr_t atomic::xchg_long(jlong exchange_value, volatile jlong* dest) 577 // 578 // Arguments : 579 // c_rarg0: exchange_value 580 // c_rarg1: dest 581 // 582 // Result: 583 // *dest <- ex, return (orig *dest) 584 address generate_atomic_xchg_long() { 585 StubCodeMark mark(this, "StubRoutines", "atomic_xchg_long"); 586 address start = __ pc(); 587 588 __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow 589 __ xchgptr(rax, Address(c_rarg1, 0)); // automatic LOCK 590 __ ret(0); 591 592 return start; 593 } 594 595 // Support for jint atomic::atomic_cmpxchg(jint exchange_value, volatile jint* dest, 596 // jint compare_value) 597 // 598 // Arguments : 599 // c_rarg0: exchange_value 600 // c_rarg1: dest 601 // c_rarg2: compare_value 602 // 603 // Result: 604 // if ( compare_value == *dest ) { 605 // *dest = exchange_value 606 // return compare_value; 607 // else 608 // return *dest; 609 address generate_atomic_cmpxchg() { 610 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg"); 611 address start = __ pc(); 612 613 __ movl(rax, c_rarg2); 614 if ( os::is_MP() ) __ lock(); 615 __ cmpxchgl(c_rarg0, Address(c_rarg1, 0)); 616 __ ret(0); 617 618 return start; 619 } 620 621 // Support for int8_t atomic::atomic_cmpxchg(int8_t exchange_value, volatile int8_t* dest, 622 // int8_t compare_value) 623 // 624 // Arguments : 625 // c_rarg0: exchange_value 626 // c_rarg1: dest 627 // c_rarg2: compare_value 628 // 629 // Result: 630 // if ( compare_value == *dest ) { 631 // *dest = exchange_value 632 // return compare_value; 633 // else 634 // return *dest; 635 address generate_atomic_cmpxchg_byte() { 636 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_byte"); 637 address start = __ pc(); 638 639 __ movsbq(rax, c_rarg2); 640 if ( os::is_MP() ) __ lock(); 641 __ cmpxchgb(c_rarg0, Address(c_rarg1, 0)); 642 __ ret(0); 643 644 return start; 645 } 646 647 // Support for int64_t atomic::atomic_cmpxchg(int64_t exchange_value, 648 // volatile int64_t* dest, 649 // int64_t compare_value) 650 // Arguments : 651 // c_rarg0: exchange_value 652 // c_rarg1: dest 653 // c_rarg2: compare_value 654 // 655 // Result: 656 // if ( compare_value == *dest ) { 657 // *dest = exchange_value 658 // return compare_value; 659 // else 660 // return *dest; 661 address generate_atomic_cmpxchg_long() { 662 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long"); 663 address start = __ pc(); 664 665 __ movq(rax, c_rarg2); 666 if ( os::is_MP() ) __ lock(); 667 __ cmpxchgq(c_rarg0, Address(c_rarg1, 0)); 668 __ ret(0); 669 670 return start; 671 } 672 673 // Support for jint atomic::add(jint add_value, volatile jint* dest) 674 // 675 // Arguments : 676 // c_rarg0: add_value 677 // c_rarg1: dest 678 // 679 // Result: 680 // *dest += add_value 681 // return *dest; 682 address generate_atomic_add() { 683 StubCodeMark mark(this, "StubRoutines", "atomic_add"); 684 address start = __ pc(); 685 686 __ movl(rax, c_rarg0); 687 if ( os::is_MP() ) __ lock(); 688 __ xaddl(Address(c_rarg1, 0), c_rarg0); 689 __ addl(rax, c_rarg0); 690 __ ret(0); 691 692 return start; 693 } 694 695 // Support for intptr_t atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) 696 // 697 // Arguments : 698 // c_rarg0: add_value 699 // c_rarg1: dest 700 // 701 // Result: 702 // *dest += add_value 703 // return *dest; 704 address generate_atomic_add_long() { 705 StubCodeMark mark(this, "StubRoutines", "atomic_add_long"); 706 address start = __ pc(); 707 708 __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow 709 if ( os::is_MP() ) __ lock(); 710 __ xaddptr(Address(c_rarg1, 0), c_rarg0); 711 __ addptr(rax, c_rarg0); 712 __ ret(0); 713 714 return start; 715 } 716 717 // Support for intptr_t OrderAccess::fence() 718 // 719 // Arguments : 720 // 721 // Result: 722 address generate_orderaccess_fence() { 723 StubCodeMark mark(this, "StubRoutines", "orderaccess_fence"); 724 address start = __ pc(); 725 __ membar(Assembler::StoreLoad); 726 __ ret(0); 727 728 return start; 729 } 730 731 // Support for intptr_t get_previous_fp() 732 // 733 // This routine is used to find the previous frame pointer for the 734 // caller (current_frame_guess). This is used as part of debugging 735 // ps() is seemingly lost trying to find frames. 736 // This code assumes that caller current_frame_guess) has a frame. 737 address generate_get_previous_fp() { 738 StubCodeMark mark(this, "StubRoutines", "get_previous_fp"); 739 const Address old_fp(rbp, 0); 740 const Address older_fp(rax, 0); 741 address start = __ pc(); 742 743 __ enter(); 744 __ movptr(rax, old_fp); // callers fp 745 __ movptr(rax, older_fp); // the frame for ps() 746 __ pop(rbp); 747 __ ret(0); 748 749 return start; 750 } 751 752 // Support for intptr_t get_previous_sp() 753 // 754 // This routine is used to find the previous stack pointer for the 755 // caller. 756 address generate_get_previous_sp() { 757 StubCodeMark mark(this, "StubRoutines", "get_previous_sp"); 758 address start = __ pc(); 759 760 __ movptr(rax, rsp); 761 __ addptr(rax, 8); // return address is at the top of the stack. 762 __ ret(0); 763 764 return start; 765 } 766 767 //---------------------------------------------------------------------------------------------------- 768 // Support for void verify_mxcsr() 769 // 770 // This routine is used with -Xcheck:jni to verify that native 771 // JNI code does not return to Java code without restoring the 772 // MXCSR register to our expected state. 773 774 address generate_verify_mxcsr() { 775 StubCodeMark mark(this, "StubRoutines", "verify_mxcsr"); 776 address start = __ pc(); 777 778 const Address mxcsr_save(rsp, 0); 779 780 if (CheckJNICalls) { 781 Label ok_ret; 782 ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std()); 783 __ push(rax); 784 __ subptr(rsp, wordSize); // allocate a temp location 785 __ stmxcsr(mxcsr_save); 786 __ movl(rax, mxcsr_save); 787 __ andl(rax, MXCSR_MASK); // Only check control and mask bits 788 __ cmp32(rax, mxcsr_std); 789 __ jcc(Assembler::equal, ok_ret); 790 791 __ warn("MXCSR changed by native JNI code, use -XX:+RestoreMXCSROnJNICall"); 792 793 __ ldmxcsr(mxcsr_std); 794 795 __ bind(ok_ret); 796 __ addptr(rsp, wordSize); 797 __ pop(rax); 798 } 799 800 __ ret(0); 801 802 return start; 803 } 804 805 // Generates a register specific stub for calling 806 // SharedRuntime::z_load_barrier_on_oop_field_preloaded() or 807 // SharedRuntime::z_load_barrier_on_weak_oop_field_preloaded(). 808 // 809 // The raddr register serves as both input and output for this stub. When the stub is 810 // called the raddr register contains the object field address (oop*) where the bad oop 811 // was loaded from, which caused the slow path to be taken. On return from the stub the 812 // raddr register contains the good/healed oop returned from 813 // SharedRuntime::z_load_barrier_on_oop_field_preloaded() or 814 // SharedRuntime::z_load_barrier_on_weak_oop_field_preloaded(). 815 address generate_load_barrier_stub(Register raddr, address runtime_entry, bool is_weak) { 816 // Don't generate stub for invalid registers 817 if (raddr == rsp || raddr == r12 || raddr == r15) { 818 return NULL; 819 } 820 821 // Create stub name 822 char name[64]; 823 os::snprintf(name, sizeof(name), "load_barrier%s_slow_stub_%s", is_weak ? "_weak" : "", raddr->name()); 824 825 __ align(CodeEntryAlignment); 826 StubCodeMark mark(this, "StubRoutines", os::strdup(name)); 827 address start = __ pc(); 828 829 // Save live registers 830 if (raddr != rax) { 831 __ push(rax); 832 } 833 if (raddr != rcx) { 834 __ push(rcx); 835 } 836 if (raddr != rdx) { 837 __ push(rdx); 838 } 839 if (raddr != rsi) { 840 __ push(rsi); 841 } 842 if (raddr != rdi) { 843 __ push(rdi); 844 } 845 if (raddr != r8) { 846 __ push(r8); 847 } 848 if (raddr != r9) { 849 __ push(r9); 850 } 851 if (raddr != r10) { 852 __ push(r10); 853 } 854 if (raddr != r11) { 855 __ push(r11); 856 } 857 858 // Setup arguments 859 __ movq(c_rarg1, raddr); 860 __ movq(c_rarg0, Address(raddr, 0)); 861 862 // Call barrier function 863 __ call_VM_leaf(runtime_entry, c_rarg0, c_rarg1); 864 865 // Move result returned in rax to raddr, if needed 866 if (raddr != rax) { 867 __ movq(raddr, rax); 868 } 869 870 // Restore saved registers 871 if (raddr != r11) { 872 __ pop(r11); 873 } 874 if (raddr != r10) { 875 __ pop(r10); 876 } 877 if (raddr != r9) { 878 __ pop(r9); 879 } 880 if (raddr != r8) { 881 __ pop(r8); 882 } 883 if (raddr != rdi) { 884 __ pop(rdi); 885 } 886 if (raddr != rsi) { 887 __ pop(rsi); 888 } 889 if (raddr != rdx) { 890 __ pop(rdx); 891 } 892 if (raddr != rcx) { 893 __ pop(rcx); 894 } 895 if (raddr != rax) { 896 __ pop(rax); 897 } 898 899 __ ret(0); 900 901 return start; 902 } 903 904 address generate_f2i_fixup() { 905 StubCodeMark mark(this, "StubRoutines", "f2i_fixup"); 906 Address inout(rsp, 5 * wordSize); // return address + 4 saves 907 908 address start = __ pc(); 909 910 Label L; 911 912 __ push(rax); 913 __ push(c_rarg3); 914 __ push(c_rarg2); 915 __ push(c_rarg1); 916 917 __ movl(rax, 0x7f800000); 918 __ xorl(c_rarg3, c_rarg3); 919 __ movl(c_rarg2, inout); 920 __ movl(c_rarg1, c_rarg2); 921 __ andl(c_rarg1, 0x7fffffff); 922 __ cmpl(rax, c_rarg1); // NaN? -> 0 923 __ jcc(Assembler::negative, L); 924 __ testl(c_rarg2, c_rarg2); // signed ? min_jint : max_jint 925 __ movl(c_rarg3, 0x80000000); 926 __ movl(rax, 0x7fffffff); 927 __ cmovl(Assembler::positive, c_rarg3, rax); 928 929 __ bind(L); 930 __ movptr(inout, c_rarg3); 931 932 __ pop(c_rarg1); 933 __ pop(c_rarg2); 934 __ pop(c_rarg3); 935 __ pop(rax); 936 937 __ ret(0); 938 939 return start; 940 } 941 942 address generate_f2l_fixup() { 943 StubCodeMark mark(this, "StubRoutines", "f2l_fixup"); 944 Address inout(rsp, 5 * wordSize); // return address + 4 saves 945 address start = __ pc(); 946 947 Label L; 948 949 __ push(rax); 950 __ push(c_rarg3); 951 __ push(c_rarg2); 952 __ push(c_rarg1); 953 954 __ movl(rax, 0x7f800000); 955 __ xorl(c_rarg3, c_rarg3); 956 __ movl(c_rarg2, inout); 957 __ movl(c_rarg1, c_rarg2); 958 __ andl(c_rarg1, 0x7fffffff); 959 __ cmpl(rax, c_rarg1); // NaN? -> 0 960 __ jcc(Assembler::negative, L); 961 __ testl(c_rarg2, c_rarg2); // signed ? min_jlong : max_jlong 962 __ mov64(c_rarg3, 0x8000000000000000); 963 __ mov64(rax, 0x7fffffffffffffff); 964 __ cmov(Assembler::positive, c_rarg3, rax); 965 966 __ bind(L); 967 __ movptr(inout, c_rarg3); 968 969 __ pop(c_rarg1); 970 __ pop(c_rarg2); 971 __ pop(c_rarg3); 972 __ pop(rax); 973 974 __ ret(0); 975 976 return start; 977 } 978 979 address generate_d2i_fixup() { 980 StubCodeMark mark(this, "StubRoutines", "d2i_fixup"); 981 Address inout(rsp, 6 * wordSize); // return address + 5 saves 982 983 address start = __ pc(); 984 985 Label L; 986 987 __ push(rax); 988 __ push(c_rarg3); 989 __ push(c_rarg2); 990 __ push(c_rarg1); 991 __ push(c_rarg0); 992 993 __ movl(rax, 0x7ff00000); 994 __ movq(c_rarg2, inout); 995 __ movl(c_rarg3, c_rarg2); 996 __ mov(c_rarg1, c_rarg2); 997 __ mov(c_rarg0, c_rarg2); 998 __ negl(c_rarg3); 999 __ shrptr(c_rarg1, 0x20); 1000 __ orl(c_rarg3, c_rarg2); 1001 __ andl(c_rarg1, 0x7fffffff); 1002 __ xorl(c_rarg2, c_rarg2); 1003 __ shrl(c_rarg3, 0x1f); 1004 __ orl(c_rarg1, c_rarg3); 1005 __ cmpl(rax, c_rarg1); 1006 __ jcc(Assembler::negative, L); // NaN -> 0 1007 __ testptr(c_rarg0, c_rarg0); // signed ? min_jint : max_jint 1008 __ movl(c_rarg2, 0x80000000); 1009 __ movl(rax, 0x7fffffff); 1010 __ cmov(Assembler::positive, c_rarg2, rax); 1011 1012 __ bind(L); 1013 __ movptr(inout, c_rarg2); 1014 1015 __ pop(c_rarg0); 1016 __ pop(c_rarg1); 1017 __ pop(c_rarg2); 1018 __ pop(c_rarg3); 1019 __ pop(rax); 1020 1021 __ ret(0); 1022 1023 return start; 1024 } 1025 1026 address generate_d2l_fixup() { 1027 StubCodeMark mark(this, "StubRoutines", "d2l_fixup"); 1028 Address inout(rsp, 6 * wordSize); // return address + 5 saves 1029 1030 address start = __ pc(); 1031 1032 Label L; 1033 1034 __ push(rax); 1035 __ push(c_rarg3); 1036 __ push(c_rarg2); 1037 __ push(c_rarg1); 1038 __ push(c_rarg0); 1039 1040 __ movl(rax, 0x7ff00000); 1041 __ movq(c_rarg2, inout); 1042 __ movl(c_rarg3, c_rarg2); 1043 __ mov(c_rarg1, c_rarg2); 1044 __ mov(c_rarg0, c_rarg2); 1045 __ negl(c_rarg3); 1046 __ shrptr(c_rarg1, 0x20); 1047 __ orl(c_rarg3, c_rarg2); 1048 __ andl(c_rarg1, 0x7fffffff); 1049 __ xorl(c_rarg2, c_rarg2); 1050 __ shrl(c_rarg3, 0x1f); 1051 __ orl(c_rarg1, c_rarg3); 1052 __ cmpl(rax, c_rarg1); 1053 __ jcc(Assembler::negative, L); // NaN -> 0 1054 __ testq(c_rarg0, c_rarg0); // signed ? min_jlong : max_jlong 1055 __ mov64(c_rarg2, 0x8000000000000000); 1056 __ mov64(rax, 0x7fffffffffffffff); 1057 __ cmovq(Assembler::positive, c_rarg2, rax); 1058 1059 __ bind(L); 1060 __ movq(inout, c_rarg2); 1061 1062 __ pop(c_rarg0); 1063 __ pop(c_rarg1); 1064 __ pop(c_rarg2); 1065 __ pop(c_rarg3); 1066 __ pop(rax); 1067 1068 __ ret(0); 1069 1070 return start; 1071 } 1072 1073 address generate_fp_mask(const char *stub_name, int64_t mask) { 1074 __ align(CodeEntryAlignment); 1075 StubCodeMark mark(this, "StubRoutines", stub_name); 1076 address start = __ pc(); 1077 1078 __ emit_data64( mask, relocInfo::none ); 1079 __ emit_data64( mask, relocInfo::none ); 1080 1081 return start; 1082 } 1083 1084 // Non-destructive plausibility checks for oops 1085 // 1086 // Arguments: 1087 // all args on stack! 1088 // 1089 // Stack after saving c_rarg3: 1090 // [tos + 0]: saved c_rarg3 1091 // [tos + 1]: saved c_rarg2 1092 // [tos + 2]: saved r12 (several TemplateTable methods use it) 1093 // [tos + 3]: saved flags 1094 // [tos + 4]: return address 1095 // * [tos + 5]: error message (char*) 1096 // * [tos + 6]: object to verify (oop) 1097 // * [tos + 7]: saved rax - saved by caller and bashed 1098 // * [tos + 8]: saved r10 (rscratch1) - saved by caller 1099 // * = popped on exit 1100 address generate_verify_oop() { 1101 StubCodeMark mark(this, "StubRoutines", "verify_oop"); 1102 address start = __ pc(); 1103 1104 Label exit, error; 1105 1106 __ pushf(); 1107 __ incrementl(ExternalAddress((address) StubRoutines::verify_oop_count_addr())); 1108 1109 __ push(r12); 1110 1111 // save c_rarg2 and c_rarg3 1112 __ push(c_rarg2); 1113 __ push(c_rarg3); 1114 1115 enum { 1116 // After previous pushes. 1117 oop_to_verify = 6 * wordSize, 1118 saved_rax = 7 * wordSize, 1119 saved_r10 = 8 * wordSize, 1120 1121 // Before the call to MacroAssembler::debug(), see below. 1122 return_addr = 16 * wordSize, 1123 error_msg = 17 * wordSize 1124 }; 1125 1126 // get object 1127 __ movptr(rax, Address(rsp, oop_to_verify)); 1128 1129 // make sure object is 'reasonable' 1130 __ testptr(rax, rax); 1131 __ jcc(Assembler::zero, exit); // if obj is NULL it is OK 1132 1133 if (UseLoadBarrier) { 1134 // Check if metadata bits indicate a bad oop 1135 __ testptr(rax, Address(r15_thread, JavaThread::zaddress_bad_mask_offset())); 1136 __ jcc(Assembler::notZero, error); 1137 } 1138 1139 // Check if the oop is in the right area of memory 1140 __ movptr(c_rarg2, rax); 1141 __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_mask()); 1142 __ andptr(c_rarg2, c_rarg3); 1143 __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_bits()); 1144 __ cmpptr(c_rarg2, c_rarg3); 1145 __ jcc(Assembler::notZero, error); 1146 1147 // set r12 to heapbase for load_klass() 1148 __ reinit_heapbase(); 1149 1150 // make sure klass is 'reasonable', which is not zero. 1151 __ load_klass(rax, rax); // get klass 1152 __ testptr(rax, rax); 1153 __ jcc(Assembler::zero, error); // if klass is NULL it is broken 1154 1155 // return if everything seems ok 1156 __ bind(exit); 1157 __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back 1158 __ movptr(rscratch1, Address(rsp, saved_r10)); // get saved r10 back 1159 __ pop(c_rarg3); // restore c_rarg3 1160 __ pop(c_rarg2); // restore c_rarg2 1161 __ pop(r12); // restore r12 1162 __ popf(); // restore flags 1163 __ ret(4 * wordSize); // pop caller saved stuff 1164 1165 // handle errors 1166 __ bind(error); 1167 __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back 1168 __ movptr(rscratch1, Address(rsp, saved_r10)); // get saved r10 back 1169 __ pop(c_rarg3); // get saved c_rarg3 back 1170 __ pop(c_rarg2); // get saved c_rarg2 back 1171 __ pop(r12); // get saved r12 back 1172 __ popf(); // get saved flags off stack -- 1173 // will be ignored 1174 1175 __ pusha(); // push registers 1176 // (rip is already 1177 // already pushed) 1178 // debug(char* msg, int64_t pc, int64_t regs[]) 1179 // We've popped the registers we'd saved (c_rarg3, c_rarg2 and flags), and 1180 // pushed all the registers, so now the stack looks like: 1181 // [tos + 0] 16 saved registers 1182 // [tos + 16] return address 1183 // * [tos + 17] error message (char*) 1184 // * [tos + 18] object to verify (oop) 1185 // * [tos + 19] saved rax - saved by caller and bashed 1186 // * [tos + 20] saved r10 (rscratch1) - saved by caller 1187 // * = popped on exit 1188 1189 __ movptr(c_rarg0, Address(rsp, error_msg)); // pass address of error message 1190 __ movptr(c_rarg1, Address(rsp, return_addr)); // pass return address 1191 __ movq(c_rarg2, rsp); // pass address of regs on stack 1192 __ mov(r12, rsp); // remember rsp 1193 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1194 __ andptr(rsp, -16); // align stack as required by ABI 1195 BLOCK_COMMENT("call MacroAssembler::debug"); 1196 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64))); 1197 __ mov(rsp, r12); // restore rsp 1198 __ popa(); // pop registers (includes r12) 1199 __ ret(4 * wordSize); // pop caller saved stuff 1200 1201 return start; 1202 } 1203 1204 // 1205 // Verify that a register contains clean 32-bits positive value 1206 // (high 32-bits are 0) so it could be used in 64-bits shifts. 1207 // 1208 // Input: 1209 // Rint - 32-bits value 1210 // Rtmp - scratch 1211 // 1212 void assert_clean_int(Register Rint, Register Rtmp) { 1213 #ifdef ASSERT 1214 Label L; 1215 assert_different_registers(Rtmp, Rint); 1216 __ movslq(Rtmp, Rint); 1217 __ cmpq(Rtmp, Rint); 1218 __ jcc(Assembler::equal, L); 1219 __ stop("high 32-bits of int value are not 0"); 1220 __ bind(L); 1221 #endif 1222 } 1223 1224 // Generate overlap test for array copy stubs 1225 // 1226 // Input: 1227 // c_rarg0 - from 1228 // c_rarg1 - to 1229 // c_rarg2 - element count 1230 // 1231 // Output: 1232 // rax - &from[element count - 1] 1233 // 1234 void array_overlap_test(address no_overlap_target, Address::ScaleFactor sf) { 1235 assert(no_overlap_target != NULL, "must be generated"); 1236 array_overlap_test(no_overlap_target, NULL, sf); 1237 } 1238 void array_overlap_test(Label& L_no_overlap, Address::ScaleFactor sf) { 1239 array_overlap_test(NULL, &L_no_overlap, sf); 1240 } 1241 void array_overlap_test(address no_overlap_target, Label* NOLp, Address::ScaleFactor sf) { 1242 const Register from = c_rarg0; 1243 const Register to = c_rarg1; 1244 const Register count = c_rarg2; 1245 const Register end_from = rax; 1246 1247 __ cmpptr(to, from); 1248 __ lea(end_from, Address(from, count, sf, 0)); 1249 if (NOLp == NULL) { 1250 ExternalAddress no_overlap(no_overlap_target); 1251 __ jump_cc(Assembler::belowEqual, no_overlap); 1252 __ cmpptr(to, end_from); 1253 __ jump_cc(Assembler::aboveEqual, no_overlap); 1254 } else { 1255 __ jcc(Assembler::belowEqual, (*NOLp)); 1256 __ cmpptr(to, end_from); 1257 __ jcc(Assembler::aboveEqual, (*NOLp)); 1258 } 1259 } 1260 1261 // Shuffle first three arg regs on Windows into Linux/Solaris locations. 1262 // 1263 // Outputs: 1264 // rdi - rcx 1265 // rsi - rdx 1266 // rdx - r8 1267 // rcx - r9 1268 // 1269 // Registers r9 and r10 are used to save rdi and rsi on Windows, which latter 1270 // are non-volatile. r9 and r10 should not be used by the caller. 1271 // 1272 void setup_arg_regs(int nargs = 3) { 1273 const Register saved_rdi = r9; 1274 const Register saved_rsi = r10; 1275 assert(nargs == 3 || nargs == 4, "else fix"); 1276 #ifdef _WIN64 1277 assert(c_rarg0 == rcx && c_rarg1 == rdx && c_rarg2 == r8 && c_rarg3 == r9, 1278 "unexpected argument registers"); 1279 if (nargs >= 4) 1280 __ mov(rax, r9); // r9 is also saved_rdi 1281 __ movptr(saved_rdi, rdi); 1282 __ movptr(saved_rsi, rsi); 1283 __ mov(rdi, rcx); // c_rarg0 1284 __ mov(rsi, rdx); // c_rarg1 1285 __ mov(rdx, r8); // c_rarg2 1286 if (nargs >= 4) 1287 __ mov(rcx, rax); // c_rarg3 (via rax) 1288 #else 1289 assert(c_rarg0 == rdi && c_rarg1 == rsi && c_rarg2 == rdx && c_rarg3 == rcx, 1290 "unexpected argument registers"); 1291 #endif 1292 } 1293 1294 void restore_arg_regs() { 1295 const Register saved_rdi = r9; 1296 const Register saved_rsi = r10; 1297 #ifdef _WIN64 1298 __ movptr(rdi, saved_rdi); 1299 __ movptr(rsi, saved_rsi); 1300 #endif 1301 } 1302 1303 // Generate code for an array write pre barrier 1304 // 1305 // addr - starting address 1306 // count - element count 1307 // 1308 // Destroy no registers! 1309 // 1310 void gen_load_ref_array_barrier(Register addr, Register count) { 1311 BarrierSet* bs = Universe::heap()->barrier_set(); 1312 switch (bs->kind()) { 1313 case BarrierSet::Z: 1314 __ pusha(); // push registers 1315 if (count == c_rarg0) { 1316 if (addr == c_rarg1) { 1317 // exactly backwards!! 1318 __ xchgptr(c_rarg1, c_rarg0); 1319 } else { 1320 __ movptr(c_rarg1, count); 1321 __ movptr(c_rarg0, addr); 1322 } 1323 } else { 1324 __ movptr(c_rarg0, addr); 1325 __ movptr(c_rarg1, count); 1326 } 1327 __ call_VM_leaf(CAST_FROM_FN_PTR(address, static_cast<void (*)(volatile oop*, size_t)>(ZBarrier::load_barrier_on_oop_array)), 2); 1328 __ popa(); 1329 break; 1330 case BarrierSet::G1BarrierSet: 1331 case BarrierSet::CardTableModRef: 1332 // No barrier 1333 break; 1334 default: 1335 ShouldNotReachHere(); 1336 break; 1337 } 1338 } 1339 1340 // Generate code for an array write pre barrier 1341 // 1342 // addr - starting address 1343 // count - element count 1344 // tmp - scratch register 1345 // 1346 // Destroy no registers! 1347 // 1348 void gen_write_ref_array_pre_barrier(Register addr, Register count, bool dest_uninitialized) { 1349 BarrierSet* bs = Universe::heap()->barrier_set(); 1350 switch (bs->kind()) { 1351 case BarrierSet::G1BarrierSet: 1352 // With G1, don't generate the call if we statically know that the target in uninitialized 1353 if (!dest_uninitialized) { 1354 Label filtered; 1355 Address in_progress(r15_thread, in_bytes(JavaThread::satb_mark_queue_offset() + 1356 SATBMarkQueue::byte_offset_of_active())); 1357 // Is marking active? 1358 if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) { 1359 __ cmpl(in_progress, 0); 1360 } else { 1361 assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption"); 1362 __ cmpb(in_progress, 0); 1363 } 1364 __ jcc(Assembler::equal, filtered); 1365 1366 __ pusha(); // push registers 1367 if (count == c_rarg0) { 1368 if (addr == c_rarg1) { 1369 // exactly backwards!! 1370 __ xchgptr(c_rarg1, c_rarg0); 1371 } else { 1372 __ movptr(c_rarg1, count); 1373 __ movptr(c_rarg0, addr); 1374 } 1375 } else { 1376 __ movptr(c_rarg0, addr); 1377 __ movptr(c_rarg1, count); 1378 } 1379 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), 2); 1380 __ popa(); 1381 1382 __ bind(filtered); 1383 } 1384 break; 1385 case BarrierSet::CardTableModRef: 1386 case BarrierSet::Z: 1387 // No barrier 1388 break; 1389 default: 1390 ShouldNotReachHere(); 1391 1392 } 1393 } 1394 1395 // 1396 // Generate code for an array write post barrier 1397 // 1398 // Input: 1399 // start - register containing starting address of destination array 1400 // count - elements count 1401 // scratch - scratch register 1402 // 1403 // The input registers are overwritten. 1404 // 1405 void gen_write_ref_array_post_barrier(Register start, Register count, Register scratch) { 1406 assert_different_registers(start, count, scratch); 1407 BarrierSet* bs = Universe::heap()->barrier_set(); 1408 switch (bs->kind()) { 1409 case BarrierSet::G1BarrierSet: 1410 { 1411 __ pusha(); // push registers (overkill) 1412 if (c_rarg0 == count) { // On win64 c_rarg0 == rcx 1413 assert_different_registers(c_rarg1, start); 1414 __ mov(c_rarg1, count); 1415 __ mov(c_rarg0, start); 1416 } else { 1417 assert_different_registers(c_rarg0, count); 1418 __ mov(c_rarg0, start); 1419 __ mov(c_rarg1, count); 1420 } 1421 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), 2); 1422 __ popa(); 1423 } 1424 break; 1425 case BarrierSet::CardTableModRef: 1426 { 1427 Label L_loop, L_done; 1428 const Register end = count; 1429 1430 __ testl(count, count); 1431 __ jcc(Assembler::zero, L_done); // zero count - nothing to do 1432 1433 __ leaq(end, Address(start, count, TIMES_OOP, 0)); // end == start+count*oop_size 1434 __ subptr(end, BytesPerHeapOop); // end - 1 to make inclusive 1435 __ shrptr(start, CardTable::card_shift); 1436 __ shrptr(end, CardTable::card_shift); 1437 __ subptr(end, start); // end --> cards count 1438 1439 int64_t disp = ci_card_table_address_as<int64_t>(); 1440 __ mov64(scratch, disp); 1441 __ addptr(start, scratch); 1442 __ BIND(L_loop); 1443 __ movb(Address(start, count, Address::times_1), 0); 1444 __ decrement(count); 1445 __ jcc(Assembler::greaterEqual, L_loop); 1446 __ BIND(L_done); 1447 } 1448 break; 1449 case BarrierSet::Z: 1450 // No barrier 1451 break; 1452 default: 1453 ShouldNotReachHere(); 1454 1455 } 1456 } 1457 1458 1459 // Copy big chunks forward 1460 // 1461 // Inputs: 1462 // end_from - source arrays end address 1463 // end_to - destination array end address 1464 // qword_count - 64-bits element count, negative 1465 // to - scratch 1466 // L_copy_bytes - entry label 1467 // L_copy_8_bytes - exit label 1468 // 1469 void copy_bytes_forward(Register end_from, Register end_to, 1470 Register qword_count, Register to, 1471 Label& L_copy_bytes, Label& L_copy_8_bytes) { 1472 DEBUG_ONLY(__ stop("enter at entry label, not here")); 1473 Label L_loop; 1474 __ align(OptoLoopAlignment); 1475 if (UseUnalignedLoadStores) { 1476 Label L_end; 1477 if (UseAVX > 2) { 1478 __ movl(to, 0xffff); 1479 __ kmovwl(k1, to); 1480 } 1481 // Copy 64-bytes per iteration 1482 __ BIND(L_loop); 1483 if (UseAVX > 2) { 1484 __ evmovdqul(xmm0, Address(end_from, qword_count, Address::times_8, -56), Assembler::AVX_512bit); 1485 __ evmovdqul(Address(end_to, qword_count, Address::times_8, -56), xmm0, Assembler::AVX_512bit); 1486 } else if (UseAVX == 2) { 1487 __ vmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56)); 1488 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0); 1489 __ vmovdqu(xmm1, Address(end_from, qword_count, Address::times_8, -24)); 1490 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -24), xmm1); 1491 } else { 1492 __ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56)); 1493 __ movdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0); 1494 __ movdqu(xmm1, Address(end_from, qword_count, Address::times_8, -40)); 1495 __ movdqu(Address(end_to, qword_count, Address::times_8, -40), xmm1); 1496 __ movdqu(xmm2, Address(end_from, qword_count, Address::times_8, -24)); 1497 __ movdqu(Address(end_to, qword_count, Address::times_8, -24), xmm2); 1498 __ movdqu(xmm3, Address(end_from, qword_count, Address::times_8, - 8)); 1499 __ movdqu(Address(end_to, qword_count, Address::times_8, - 8), xmm3); 1500 } 1501 __ BIND(L_copy_bytes); 1502 __ addptr(qword_count, 8); 1503 __ jcc(Assembler::lessEqual, L_loop); 1504 __ subptr(qword_count, 4); // sub(8) and add(4) 1505 __ jccb(Assembler::greater, L_end); 1506 // Copy trailing 32 bytes 1507 if (UseAVX >= 2) { 1508 __ vmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24)); 1509 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -24), xmm0); 1510 } else { 1511 __ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24)); 1512 __ movdqu(Address(end_to, qword_count, Address::times_8, -24), xmm0); 1513 __ movdqu(xmm1, Address(end_from, qword_count, Address::times_8, - 8)); 1514 __ movdqu(Address(end_to, qword_count, Address::times_8, - 8), xmm1); 1515 } 1516 __ addptr(qword_count, 4); 1517 __ BIND(L_end); 1518 if (UseAVX >= 2) { 1519 // clean upper bits of YMM registers 1520 __ vpxor(xmm0, xmm0); 1521 __ vpxor(xmm1, xmm1); 1522 } 1523 } else { 1524 // Copy 32-bytes per iteration 1525 __ BIND(L_loop); 1526 __ movq(to, Address(end_from, qword_count, Address::times_8, -24)); 1527 __ movq(Address(end_to, qword_count, Address::times_8, -24), to); 1528 __ movq(to, Address(end_from, qword_count, Address::times_8, -16)); 1529 __ movq(Address(end_to, qword_count, Address::times_8, -16), to); 1530 __ movq(to, Address(end_from, qword_count, Address::times_8, - 8)); 1531 __ movq(Address(end_to, qword_count, Address::times_8, - 8), to); 1532 __ movq(to, Address(end_from, qword_count, Address::times_8, - 0)); 1533 __ movq(Address(end_to, qword_count, Address::times_8, - 0), to); 1534 1535 __ BIND(L_copy_bytes); 1536 __ addptr(qword_count, 4); 1537 __ jcc(Assembler::lessEqual, L_loop); 1538 } 1539 __ subptr(qword_count, 4); 1540 __ jcc(Assembler::less, L_copy_8_bytes); // Copy trailing qwords 1541 } 1542 1543 // Copy big chunks backward 1544 // 1545 // Inputs: 1546 // from - source arrays address 1547 // dest - destination array address 1548 // qword_count - 64-bits element count 1549 // to - scratch 1550 // L_copy_bytes - entry label 1551 // L_copy_8_bytes - exit label 1552 // 1553 void copy_bytes_backward(Register from, Register dest, 1554 Register qword_count, Register to, 1555 Label& L_copy_bytes, Label& L_copy_8_bytes) { 1556 DEBUG_ONLY(__ stop("enter at entry label, not here")); 1557 Label L_loop; 1558 __ align(OptoLoopAlignment); 1559 if (UseUnalignedLoadStores) { 1560 Label L_end; 1561 if (UseAVX > 2) { 1562 __ movl(to, 0xffff); 1563 __ kmovwl(k1, to); 1564 } 1565 // Copy 64-bytes per iteration 1566 __ BIND(L_loop); 1567 if (UseAVX > 2) { 1568 __ evmovdqul(xmm0, Address(from, qword_count, Address::times_8, 0), Assembler::AVX_512bit); 1569 __ evmovdqul(Address(dest, qword_count, Address::times_8, 0), xmm0, Assembler::AVX_512bit); 1570 } else if (UseAVX == 2) { 1571 __ vmovdqu(xmm0, Address(from, qword_count, Address::times_8, 32)); 1572 __ vmovdqu(Address(dest, qword_count, Address::times_8, 32), xmm0); 1573 __ vmovdqu(xmm1, Address(from, qword_count, Address::times_8, 0)); 1574 __ vmovdqu(Address(dest, qword_count, Address::times_8, 0), xmm1); 1575 } else { 1576 __ movdqu(xmm0, Address(from, qword_count, Address::times_8, 48)); 1577 __ movdqu(Address(dest, qword_count, Address::times_8, 48), xmm0); 1578 __ movdqu(xmm1, Address(from, qword_count, Address::times_8, 32)); 1579 __ movdqu(Address(dest, qword_count, Address::times_8, 32), xmm1); 1580 __ movdqu(xmm2, Address(from, qword_count, Address::times_8, 16)); 1581 __ movdqu(Address(dest, qword_count, Address::times_8, 16), xmm2); 1582 __ movdqu(xmm3, Address(from, qword_count, Address::times_8, 0)); 1583 __ movdqu(Address(dest, qword_count, Address::times_8, 0), xmm3); 1584 } 1585 __ BIND(L_copy_bytes); 1586 __ subptr(qword_count, 8); 1587 __ jcc(Assembler::greaterEqual, L_loop); 1588 1589 __ addptr(qword_count, 4); // add(8) and sub(4) 1590 __ jccb(Assembler::less, L_end); 1591 // Copy trailing 32 bytes 1592 if (UseAVX >= 2) { 1593 __ vmovdqu(xmm0, Address(from, qword_count, Address::times_8, 0)); 1594 __ vmovdqu(Address(dest, qword_count, Address::times_8, 0), xmm0); 1595 } else { 1596 __ movdqu(xmm0, Address(from, qword_count, Address::times_8, 16)); 1597 __ movdqu(Address(dest, qword_count, Address::times_8, 16), xmm0); 1598 __ movdqu(xmm1, Address(from, qword_count, Address::times_8, 0)); 1599 __ movdqu(Address(dest, qword_count, Address::times_8, 0), xmm1); 1600 } 1601 __ subptr(qword_count, 4); 1602 __ BIND(L_end); 1603 if (UseAVX >= 2) { 1604 // clean upper bits of YMM registers 1605 __ vpxor(xmm0, xmm0); 1606 __ vpxor(xmm1, xmm1); 1607 } 1608 } else { 1609 // Copy 32-bytes per iteration 1610 __ BIND(L_loop); 1611 __ movq(to, Address(from, qword_count, Address::times_8, 24)); 1612 __ movq(Address(dest, qword_count, Address::times_8, 24), to); 1613 __ movq(to, Address(from, qword_count, Address::times_8, 16)); 1614 __ movq(Address(dest, qword_count, Address::times_8, 16), to); 1615 __ movq(to, Address(from, qword_count, Address::times_8, 8)); 1616 __ movq(Address(dest, qword_count, Address::times_8, 8), to); 1617 __ movq(to, Address(from, qword_count, Address::times_8, 0)); 1618 __ movq(Address(dest, qword_count, Address::times_8, 0), to); 1619 1620 __ BIND(L_copy_bytes); 1621 __ subptr(qword_count, 4); 1622 __ jcc(Assembler::greaterEqual, L_loop); 1623 } 1624 __ addptr(qword_count, 4); 1625 __ jcc(Assembler::greater, L_copy_8_bytes); // Copy trailing qwords 1626 } 1627 1628 1629 // Arguments: 1630 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1631 // ignored 1632 // name - stub name string 1633 // 1634 // Inputs: 1635 // c_rarg0 - source array address 1636 // c_rarg1 - destination array address 1637 // c_rarg2 - element count, treated as ssize_t, can be zero 1638 // 1639 // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries, 1640 // we let the hardware handle it. The one to eight bytes within words, 1641 // dwords or qwords that span cache line boundaries will still be loaded 1642 // and stored atomically. 1643 // 1644 // Side Effects: 1645 // disjoint_byte_copy_entry is set to the no-overlap entry point 1646 // used by generate_conjoint_byte_copy(). 1647 // 1648 address generate_disjoint_byte_copy(bool aligned, address* entry, const char *name) { 1649 __ align(CodeEntryAlignment); 1650 StubCodeMark mark(this, "StubRoutines", name); 1651 address start = __ pc(); 1652 1653 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes; 1654 Label L_copy_byte, L_exit; 1655 const Register from = rdi; // source array address 1656 const Register to = rsi; // destination array address 1657 const Register count = rdx; // elements count 1658 const Register byte_count = rcx; 1659 const Register qword_count = count; 1660 const Register end_from = from; // source array end address 1661 const Register end_to = to; // destination array end address 1662 // End pointers are inclusive, and if count is not zero they point 1663 // to the last unit copied: end_to[0] := end_from[0] 1664 1665 __ enter(); // required for proper stackwalking of RuntimeStub frame 1666 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1667 1668 if (entry != NULL) { 1669 *entry = __ pc(); 1670 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1671 BLOCK_COMMENT("Entry:"); 1672 } 1673 1674 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1675 // r9 and r10 may be used to save non-volatile registers 1676 1677 // 'from', 'to' and 'count' are now valid 1678 __ movptr(byte_count, count); 1679 __ shrptr(count, 3); // count => qword_count 1680 1681 // Copy from low to high addresses. Use 'to' as scratch. 1682 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1683 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1684 __ negptr(qword_count); // make the count negative 1685 __ jmp(L_copy_bytes); 1686 1687 // Copy trailing qwords 1688 __ BIND(L_copy_8_bytes); 1689 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1690 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1691 __ increment(qword_count); 1692 __ jcc(Assembler::notZero, L_copy_8_bytes); 1693 1694 // Check for and copy trailing dword 1695 __ BIND(L_copy_4_bytes); 1696 __ testl(byte_count, 4); 1697 __ jccb(Assembler::zero, L_copy_2_bytes); 1698 __ movl(rax, Address(end_from, 8)); 1699 __ movl(Address(end_to, 8), rax); 1700 1701 __ addptr(end_from, 4); 1702 __ addptr(end_to, 4); 1703 1704 // Check for and copy trailing word 1705 __ BIND(L_copy_2_bytes); 1706 __ testl(byte_count, 2); 1707 __ jccb(Assembler::zero, L_copy_byte); 1708 __ movw(rax, Address(end_from, 8)); 1709 __ movw(Address(end_to, 8), rax); 1710 1711 __ addptr(end_from, 2); 1712 __ addptr(end_to, 2); 1713 1714 // Check for and copy trailing byte 1715 __ BIND(L_copy_byte); 1716 __ testl(byte_count, 1); 1717 __ jccb(Assembler::zero, L_exit); 1718 __ movb(rax, Address(end_from, 8)); 1719 __ movb(Address(end_to, 8), rax); 1720 1721 __ BIND(L_exit); 1722 restore_arg_regs(); 1723 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1724 __ xorptr(rax, rax); // return 0 1725 __ vzeroupper(); 1726 __ leave(); // required for proper stackwalking of RuntimeStub frame 1727 __ ret(0); 1728 1729 // Copy in multi-bytes chunks 1730 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1731 __ jmp(L_copy_4_bytes); 1732 1733 return start; 1734 } 1735 1736 // Arguments: 1737 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1738 // ignored 1739 // name - stub name string 1740 // 1741 // Inputs: 1742 // c_rarg0 - source array address 1743 // c_rarg1 - destination array address 1744 // c_rarg2 - element count, treated as ssize_t, can be zero 1745 // 1746 // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries, 1747 // we let the hardware handle it. The one to eight bytes within words, 1748 // dwords or qwords that span cache line boundaries will still be loaded 1749 // and stored atomically. 1750 // 1751 address generate_conjoint_byte_copy(bool aligned, address nooverlap_target, 1752 address* entry, const char *name) { 1753 __ align(CodeEntryAlignment); 1754 StubCodeMark mark(this, "StubRoutines", name); 1755 address start = __ pc(); 1756 1757 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes; 1758 const Register from = rdi; // source array address 1759 const Register to = rsi; // destination array address 1760 const Register count = rdx; // elements count 1761 const Register byte_count = rcx; 1762 const Register qword_count = count; 1763 1764 __ enter(); // required for proper stackwalking of RuntimeStub frame 1765 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1766 1767 if (entry != NULL) { 1768 *entry = __ pc(); 1769 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1770 BLOCK_COMMENT("Entry:"); 1771 } 1772 1773 array_overlap_test(nooverlap_target, Address::times_1); 1774 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1775 // r9 and r10 may be used to save non-volatile registers 1776 1777 // 'from', 'to' and 'count' are now valid 1778 __ movptr(byte_count, count); 1779 __ shrptr(count, 3); // count => qword_count 1780 1781 // Copy from high to low addresses. 1782 1783 // Check for and copy trailing byte 1784 __ testl(byte_count, 1); 1785 __ jcc(Assembler::zero, L_copy_2_bytes); 1786 __ movb(rax, Address(from, byte_count, Address::times_1, -1)); 1787 __ movb(Address(to, byte_count, Address::times_1, -1), rax); 1788 __ decrement(byte_count); // Adjust for possible trailing word 1789 1790 // Check for and copy trailing word 1791 __ BIND(L_copy_2_bytes); 1792 __ testl(byte_count, 2); 1793 __ jcc(Assembler::zero, L_copy_4_bytes); 1794 __ movw(rax, Address(from, byte_count, Address::times_1, -2)); 1795 __ movw(Address(to, byte_count, Address::times_1, -2), rax); 1796 1797 // Check for and copy trailing dword 1798 __ BIND(L_copy_4_bytes); 1799 __ testl(byte_count, 4); 1800 __ jcc(Assembler::zero, L_copy_bytes); 1801 __ movl(rax, Address(from, qword_count, Address::times_8)); 1802 __ movl(Address(to, qword_count, Address::times_8), rax); 1803 __ jmp(L_copy_bytes); 1804 1805 // Copy trailing qwords 1806 __ BIND(L_copy_8_bytes); 1807 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 1808 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 1809 __ decrement(qword_count); 1810 __ jcc(Assembler::notZero, L_copy_8_bytes); 1811 1812 restore_arg_regs(); 1813 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1814 __ xorptr(rax, rax); // return 0 1815 __ vzeroupper(); 1816 __ leave(); // required for proper stackwalking of RuntimeStub frame 1817 __ ret(0); 1818 1819 // Copy in multi-bytes chunks 1820 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1821 1822 restore_arg_regs(); 1823 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1824 __ xorptr(rax, rax); // return 0 1825 __ vzeroupper(); 1826 __ leave(); // required for proper stackwalking of RuntimeStub frame 1827 __ ret(0); 1828 1829 return start; 1830 } 1831 1832 // Arguments: 1833 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1834 // ignored 1835 // name - stub name string 1836 // 1837 // Inputs: 1838 // c_rarg0 - source array address 1839 // c_rarg1 - destination array address 1840 // c_rarg2 - element count, treated as ssize_t, can be zero 1841 // 1842 // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we 1843 // let the hardware handle it. The two or four words within dwords 1844 // or qwords that span cache line boundaries will still be loaded 1845 // and stored atomically. 1846 // 1847 // Side Effects: 1848 // disjoint_short_copy_entry is set to the no-overlap entry point 1849 // used by generate_conjoint_short_copy(). 1850 // 1851 address generate_disjoint_short_copy(bool aligned, address *entry, const char *name) { 1852 __ align(CodeEntryAlignment); 1853 StubCodeMark mark(this, "StubRoutines", name); 1854 address start = __ pc(); 1855 1856 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes,L_copy_2_bytes,L_exit; 1857 const Register from = rdi; // source array address 1858 const Register to = rsi; // destination array address 1859 const Register count = rdx; // elements count 1860 const Register word_count = rcx; 1861 const Register qword_count = count; 1862 const Register end_from = from; // source array end address 1863 const Register end_to = to; // destination array end address 1864 // End pointers are inclusive, and if count is not zero they point 1865 // to the last unit copied: end_to[0] := end_from[0] 1866 1867 __ enter(); // required for proper stackwalking of RuntimeStub frame 1868 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1869 1870 if (entry != NULL) { 1871 *entry = __ pc(); 1872 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1873 BLOCK_COMMENT("Entry:"); 1874 } 1875 1876 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1877 // r9 and r10 may be used to save non-volatile registers 1878 1879 // 'from', 'to' and 'count' are now valid 1880 __ movptr(word_count, count); 1881 __ shrptr(count, 2); // count => qword_count 1882 1883 // Copy from low to high addresses. Use 'to' as scratch. 1884 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1885 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1886 __ negptr(qword_count); 1887 __ jmp(L_copy_bytes); 1888 1889 // Copy trailing qwords 1890 __ BIND(L_copy_8_bytes); 1891 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1892 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1893 __ increment(qword_count); 1894 __ jcc(Assembler::notZero, L_copy_8_bytes); 1895 1896 // Original 'dest' is trashed, so we can't use it as a 1897 // base register for a possible trailing word copy 1898 1899 // Check for and copy trailing dword 1900 __ BIND(L_copy_4_bytes); 1901 __ testl(word_count, 2); 1902 __ jccb(Assembler::zero, L_copy_2_bytes); 1903 __ movl(rax, Address(end_from, 8)); 1904 __ movl(Address(end_to, 8), rax); 1905 1906 __ addptr(end_from, 4); 1907 __ addptr(end_to, 4); 1908 1909 // Check for and copy trailing word 1910 __ BIND(L_copy_2_bytes); 1911 __ testl(word_count, 1); 1912 __ jccb(Assembler::zero, L_exit); 1913 __ movw(rax, Address(end_from, 8)); 1914 __ movw(Address(end_to, 8), rax); 1915 1916 __ BIND(L_exit); 1917 restore_arg_regs(); 1918 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1919 __ xorptr(rax, rax); // return 0 1920 __ vzeroupper(); 1921 __ leave(); // required for proper stackwalking of RuntimeStub frame 1922 __ ret(0); 1923 1924 // Copy in multi-bytes chunks 1925 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1926 __ jmp(L_copy_4_bytes); 1927 1928 return start; 1929 } 1930 1931 address generate_fill(BasicType t, bool aligned, const char *name) { 1932 __ align(CodeEntryAlignment); 1933 StubCodeMark mark(this, "StubRoutines", name); 1934 address start = __ pc(); 1935 1936 BLOCK_COMMENT("Entry:"); 1937 1938 const Register to = c_rarg0; // source array address 1939 const Register value = c_rarg1; // value 1940 const Register count = c_rarg2; // elements count 1941 1942 __ enter(); // required for proper stackwalking of RuntimeStub frame 1943 1944 __ generate_fill(t, aligned, to, value, count, rax, xmm0); 1945 1946 __ vzeroupper(); 1947 __ leave(); // required for proper stackwalking of RuntimeStub frame 1948 __ ret(0); 1949 return start; 1950 } 1951 1952 // Arguments: 1953 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1954 // ignored 1955 // name - stub name string 1956 // 1957 // Inputs: 1958 // c_rarg0 - source array address 1959 // c_rarg1 - destination array address 1960 // c_rarg2 - element count, treated as ssize_t, can be zero 1961 // 1962 // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we 1963 // let the hardware handle it. The two or four words within dwords 1964 // or qwords that span cache line boundaries will still be loaded 1965 // and stored atomically. 1966 // 1967 address generate_conjoint_short_copy(bool aligned, address nooverlap_target, 1968 address *entry, const char *name) { 1969 __ align(CodeEntryAlignment); 1970 StubCodeMark mark(this, "StubRoutines", name); 1971 address start = __ pc(); 1972 1973 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes; 1974 const Register from = rdi; // source array address 1975 const Register to = rsi; // destination array address 1976 const Register count = rdx; // elements count 1977 const Register word_count = rcx; 1978 const Register qword_count = count; 1979 1980 __ enter(); // required for proper stackwalking of RuntimeStub frame 1981 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1982 1983 if (entry != NULL) { 1984 *entry = __ pc(); 1985 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1986 BLOCK_COMMENT("Entry:"); 1987 } 1988 1989 array_overlap_test(nooverlap_target, Address::times_2); 1990 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1991 // r9 and r10 may be used to save non-volatile registers 1992 1993 // 'from', 'to' and 'count' are now valid 1994 __ movptr(word_count, count); 1995 __ shrptr(count, 2); // count => qword_count 1996 1997 // Copy from high to low addresses. Use 'to' as scratch. 1998 1999 // Check for and copy trailing word 2000 __ testl(word_count, 1); 2001 __ jccb(Assembler::zero, L_copy_4_bytes); 2002 __ movw(rax, Address(from, word_count, Address::times_2, -2)); 2003 __ movw(Address(to, word_count, Address::times_2, -2), rax); 2004 2005 // Check for and copy trailing dword 2006 __ BIND(L_copy_4_bytes); 2007 __ testl(word_count, 2); 2008 __ jcc(Assembler::zero, L_copy_bytes); 2009 __ movl(rax, Address(from, qword_count, Address::times_8)); 2010 __ movl(Address(to, qword_count, Address::times_8), rax); 2011 __ jmp(L_copy_bytes); 2012 2013 // Copy trailing qwords 2014 __ BIND(L_copy_8_bytes); 2015 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 2016 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 2017 __ decrement(qword_count); 2018 __ jcc(Assembler::notZero, L_copy_8_bytes); 2019 2020 restore_arg_regs(); 2021 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 2022 __ xorptr(rax, rax); // return 0 2023 __ vzeroupper(); 2024 __ leave(); // required for proper stackwalking of RuntimeStub frame 2025 __ ret(0); 2026 2027 // Copy in multi-bytes chunks 2028 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2029 2030 restore_arg_regs(); 2031 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 2032 __ xorptr(rax, rax); // return 0 2033 __ vzeroupper(); 2034 __ leave(); // required for proper stackwalking of RuntimeStub frame 2035 __ ret(0); 2036 2037 return start; 2038 } 2039 2040 // Arguments: 2041 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 2042 // ignored 2043 // is_oop - true => oop array, so generate store check code 2044 // name - stub name string 2045 // 2046 // Inputs: 2047 // c_rarg0 - source array address 2048 // c_rarg1 - destination array address 2049 // c_rarg2 - element count, treated as ssize_t, can be zero 2050 // 2051 // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let 2052 // the hardware handle it. The two dwords within qwords that span 2053 // cache line boundaries will still be loaded and stored atomicly. 2054 // 2055 // Side Effects: 2056 // disjoint_int_copy_entry is set to the no-overlap entry point 2057 // used by generate_conjoint_int_oop_copy(). 2058 // 2059 address generate_disjoint_int_oop_copy(bool aligned, bool is_oop, address* entry, 2060 const char *name, bool dest_uninitialized = false) { 2061 __ align(CodeEntryAlignment); 2062 StubCodeMark mark(this, "StubRoutines", name); 2063 address start = __ pc(); 2064 2065 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_exit; 2066 const Register from = rdi; // source array address 2067 const Register to = rsi; // destination array address 2068 const Register count = rdx; // elements count 2069 const Register dword_count = rcx; 2070 const Register qword_count = count; 2071 const Register end_from = from; // source array end address 2072 const Register end_to = to; // destination array end address 2073 const Register saved_to = r11; // saved destination array address 2074 // End pointers are inclusive, and if count is not zero they point 2075 // to the last unit copied: end_to[0] := end_from[0] 2076 2077 __ enter(); // required for proper stackwalking of RuntimeStub frame 2078 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 2079 2080 if (entry != NULL) { 2081 *entry = __ pc(); 2082 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2083 BLOCK_COMMENT("Entry:"); 2084 } 2085 2086 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 2087 // r9 and r10 may be used to save non-volatile registers 2088 if (is_oop) { 2089 __ movq(saved_to, to); 2090 gen_load_ref_array_barrier(from, count); 2091 gen_write_ref_array_pre_barrier(to, count, dest_uninitialized); 2092 } 2093 2094 // 'from', 'to' and 'count' are now valid 2095 __ movptr(dword_count, count); 2096 __ shrptr(count, 1); // count => qword_count 2097 2098 // Copy from low to high addresses. Use 'to' as scratch. 2099 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 2100 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 2101 __ negptr(qword_count); 2102 __ jmp(L_copy_bytes); 2103 2104 // Copy trailing qwords 2105 __ BIND(L_copy_8_bytes); 2106 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 2107 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 2108 __ increment(qword_count); 2109 __ jcc(Assembler::notZero, L_copy_8_bytes); 2110 2111 // Check for and copy trailing dword 2112 __ BIND(L_copy_4_bytes); 2113 __ testl(dword_count, 1); // Only byte test since the value is 0 or 1 2114 __ jccb(Assembler::zero, L_exit); 2115 __ movl(rax, Address(end_from, 8)); 2116 __ movl(Address(end_to, 8), rax); 2117 2118 __ BIND(L_exit); 2119 if (is_oop) { 2120 gen_write_ref_array_post_barrier(saved_to, dword_count, rax); 2121 } 2122 restore_arg_regs(); 2123 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 2124 __ vzeroupper(); 2125 __ xorptr(rax, rax); // return 0 2126 __ leave(); // required for proper stackwalking of RuntimeStub frame 2127 __ ret(0); 2128 2129 // Copy in multi-bytes chunks 2130 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2131 __ jmp(L_copy_4_bytes); 2132 2133 return start; 2134 } 2135 2136 // Arguments: 2137 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 2138 // ignored 2139 // is_oop - true => oop array, so generate store check code 2140 // name - stub name string 2141 // 2142 // Inputs: 2143 // c_rarg0 - source array address 2144 // c_rarg1 - destination array address 2145 // c_rarg2 - element count, treated as ssize_t, can be zero 2146 // 2147 // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let 2148 // the hardware handle it. The two dwords within qwords that span 2149 // cache line boundaries will still be loaded and stored atomicly. 2150 // 2151 address generate_conjoint_int_oop_copy(bool aligned, bool is_oop, address nooverlap_target, 2152 address *entry, const char *name, 2153 bool dest_uninitialized = false) { 2154 __ align(CodeEntryAlignment); 2155 StubCodeMark mark(this, "StubRoutines", name); 2156 address start = __ pc(); 2157 2158 Label L_copy_bytes, L_copy_8_bytes, L_copy_2_bytes, L_exit; 2159 const Register from = rdi; // source array address 2160 const Register to = rsi; // destination array address 2161 const Register count = rdx; // elements count 2162 const Register dword_count = rcx; 2163 const Register qword_count = count; 2164 2165 __ enter(); // required for proper stackwalking of RuntimeStub frame 2166 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 2167 2168 if (entry != NULL) { 2169 *entry = __ pc(); 2170 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2171 BLOCK_COMMENT("Entry:"); 2172 } 2173 2174 array_overlap_test(nooverlap_target, Address::times_4); 2175 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 2176 // r9 and r10 may be used to save non-volatile registers 2177 2178 if (is_oop) { 2179 // no registers are destroyed by this call 2180 gen_load_ref_array_barrier(from, count); 2181 gen_write_ref_array_pre_barrier(to, count, dest_uninitialized); 2182 } 2183 2184 assert_clean_int(count, rax); // Make sure 'count' is clean int. 2185 // 'from', 'to' and 'count' are now valid 2186 __ movptr(dword_count, count); 2187 __ shrptr(count, 1); // count => qword_count 2188 2189 // Copy from high to low addresses. Use 'to' as scratch. 2190 2191 // Check for and copy trailing dword 2192 __ testl(dword_count, 1); 2193 __ jcc(Assembler::zero, L_copy_bytes); 2194 __ movl(rax, Address(from, dword_count, Address::times_4, -4)); 2195 __ movl(Address(to, dword_count, Address::times_4, -4), rax); 2196 __ jmp(L_copy_bytes); 2197 2198 // Copy trailing qwords 2199 __ BIND(L_copy_8_bytes); 2200 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 2201 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 2202 __ decrement(qword_count); 2203 __ jcc(Assembler::notZero, L_copy_8_bytes); 2204 2205 if (is_oop) { 2206 __ jmp(L_exit); 2207 } 2208 restore_arg_regs(); 2209 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 2210 __ xorptr(rax, rax); // return 0 2211 __ vzeroupper(); 2212 __ leave(); // required for proper stackwalking of RuntimeStub frame 2213 __ ret(0); 2214 2215 // Copy in multi-bytes chunks 2216 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2217 2218 __ BIND(L_exit); 2219 if (is_oop) { 2220 gen_write_ref_array_post_barrier(to, dword_count, rax); 2221 } 2222 restore_arg_regs(); 2223 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 2224 __ xorptr(rax, rax); // return 0 2225 __ vzeroupper(); 2226 __ leave(); // required for proper stackwalking of RuntimeStub frame 2227 __ ret(0); 2228 2229 return start; 2230 } 2231 2232 // Arguments: 2233 // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes 2234 // ignored 2235 // is_oop - true => oop array, so generate store check code 2236 // name - stub name string 2237 // 2238 // Inputs: 2239 // c_rarg0 - source array address 2240 // c_rarg1 - destination array address 2241 // c_rarg2 - element count, treated as ssize_t, can be zero 2242 // 2243 // Side Effects: 2244 // disjoint_oop_copy_entry or disjoint_long_copy_entry is set to the 2245 // no-overlap entry point used by generate_conjoint_long_oop_copy(). 2246 // 2247 address generate_disjoint_long_oop_copy(bool aligned, bool is_oop, address *entry, 2248 const char *name, bool dest_uninitialized = false) { 2249 __ align(CodeEntryAlignment); 2250 StubCodeMark mark(this, "StubRoutines", name); 2251 address start = __ pc(); 2252 2253 Label L_copy_bytes, L_copy_8_bytes, L_exit; 2254 const Register from = rdi; // source array address 2255 const Register to = rsi; // destination array address 2256 const Register qword_count = rdx; // elements count 2257 const Register end_from = from; // source array end address 2258 const Register end_to = rcx; // destination array end address 2259 const Register saved_to = to; 2260 const Register saved_count = r11; 2261 // End pointers are inclusive, and if count is not zero they point 2262 // to the last unit copied: end_to[0] := end_from[0] 2263 2264 __ enter(); // required for proper stackwalking of RuntimeStub frame 2265 // Save no-overlap entry point for generate_conjoint_long_oop_copy() 2266 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 2267 2268 if (entry != NULL) { 2269 *entry = __ pc(); 2270 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2271 BLOCK_COMMENT("Entry:"); 2272 } 2273 2274 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 2275 // r9 and r10 may be used to save non-volatile registers 2276 // 'from', 'to' and 'qword_count' are now valid 2277 if (is_oop) { 2278 // Save to and count for store barrier 2279 __ movptr(saved_count, qword_count); 2280 // no registers are destroyed by this call 2281 gen_load_ref_array_barrier(from, qword_count); 2282 gen_write_ref_array_pre_barrier(to, qword_count, dest_uninitialized); 2283 } 2284 2285 // Copy from low to high addresses. Use 'to' as scratch. 2286 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 2287 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 2288 __ negptr(qword_count); 2289 __ jmp(L_copy_bytes); 2290 2291 // Copy trailing qwords 2292 __ BIND(L_copy_8_bytes); 2293 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 2294 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 2295 __ increment(qword_count); 2296 __ jcc(Assembler::notZero, L_copy_8_bytes); 2297 2298 if (is_oop) { 2299 __ jmp(L_exit); 2300 } else { 2301 restore_arg_regs(); 2302 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2303 __ xorptr(rax, rax); // return 0 2304 __ vzeroupper(); 2305 __ leave(); // required for proper stackwalking of RuntimeStub frame 2306 __ ret(0); 2307 } 2308 2309 // Copy in multi-bytes chunks 2310 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2311 2312 if (is_oop) { 2313 __ BIND(L_exit); 2314 gen_write_ref_array_post_barrier(saved_to, saved_count, rax); 2315 } 2316 restore_arg_regs(); 2317 if (is_oop) { 2318 inc_counter_np(SharedRuntime::_oop_array_copy_ctr); // Update counter after rscratch1 is free 2319 } else { 2320 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2321 } 2322 __ vzeroupper(); 2323 __ xorptr(rax, rax); // return 0 2324 __ leave(); // required for proper stackwalking of RuntimeStub frame 2325 __ ret(0); 2326 2327 return start; 2328 } 2329 2330 // Arguments: 2331 // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes 2332 // ignored 2333 // is_oop - true => oop array, so generate store check code 2334 // name - stub name string 2335 // 2336 // Inputs: 2337 // c_rarg0 - source array address 2338 // c_rarg1 - destination array address 2339 // c_rarg2 - element count, treated as ssize_t, can be zero 2340 // 2341 address generate_conjoint_long_oop_copy(bool aligned, bool is_oop, 2342 address nooverlap_target, address *entry, 2343 const char *name, bool dest_uninitialized = false) { 2344 __ align(CodeEntryAlignment); 2345 StubCodeMark mark(this, "StubRoutines", name); 2346 address start = __ pc(); 2347 2348 Label L_copy_bytes, L_copy_8_bytes, L_exit; 2349 const Register from = rdi; // source array address 2350 const Register to = rsi; // destination array address 2351 const Register qword_count = rdx; // elements count 2352 const Register saved_count = rcx; 2353 2354 __ enter(); // required for proper stackwalking of RuntimeStub frame 2355 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 2356 2357 if (entry != NULL) { 2358 *entry = __ pc(); 2359 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2360 BLOCK_COMMENT("Entry:"); 2361 } 2362 2363 array_overlap_test(nooverlap_target, Address::times_8); 2364 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 2365 // r9 and r10 may be used to save non-volatile registers 2366 // 'from', 'to' and 'qword_count' are now valid 2367 if (is_oop) { 2368 // Save to and count for store barrier 2369 __ movptr(saved_count, qword_count); 2370 // No registers are destroyed by this call 2371 gen_load_ref_array_barrier(from, saved_count); 2372 gen_write_ref_array_pre_barrier(to, saved_count, dest_uninitialized); 2373 } 2374 2375 __ jmp(L_copy_bytes); 2376 2377 // Copy trailing qwords 2378 __ BIND(L_copy_8_bytes); 2379 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 2380 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 2381 __ decrement(qword_count); 2382 __ jcc(Assembler::notZero, L_copy_8_bytes); 2383 2384 if (is_oop) { 2385 __ jmp(L_exit); 2386 } else { 2387 restore_arg_regs(); 2388 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2389 __ xorptr(rax, rax); // return 0 2390 __ vzeroupper(); 2391 __ leave(); // required for proper stackwalking of RuntimeStub frame 2392 __ ret(0); 2393 } 2394 2395 // Copy in multi-bytes chunks 2396 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2397 2398 if (is_oop) { 2399 __ BIND(L_exit); 2400 gen_write_ref_array_post_barrier(to, saved_count, rax); 2401 } 2402 restore_arg_regs(); 2403 if (is_oop) { 2404 inc_counter_np(SharedRuntime::_oop_array_copy_ctr); // Update counter after rscratch1 is free 2405 } else { 2406 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2407 } 2408 __ vzeroupper(); 2409 __ xorptr(rax, rax); // return 0 2410 __ leave(); // required for proper stackwalking of RuntimeStub frame 2411 __ ret(0); 2412 2413 return start; 2414 } 2415 2416 2417 // Helper for generating a dynamic type check. 2418 // Smashes no registers. 2419 void generate_type_check(Register sub_klass, 2420 Register super_check_offset, 2421 Register super_klass, 2422 Label& L_success) { 2423 assert_different_registers(sub_klass, super_check_offset, super_klass); 2424 2425 BLOCK_COMMENT("type_check:"); 2426 2427 Label L_miss; 2428 2429 __ check_klass_subtype_fast_path(sub_klass, super_klass, noreg, &L_success, &L_miss, NULL, 2430 super_check_offset); 2431 __ check_klass_subtype_slow_path(sub_klass, super_klass, noreg, noreg, &L_success, NULL); 2432 2433 // Fall through on failure! 2434 __ BIND(L_miss); 2435 } 2436 2437 // 2438 // Generate checkcasting array copy stub 2439 // 2440 // Input: 2441 // c_rarg0 - source array address 2442 // c_rarg1 - destination array address 2443 // c_rarg2 - element count, treated as ssize_t, can be zero 2444 // c_rarg3 - size_t ckoff (super_check_offset) 2445 // not Win64 2446 // c_rarg4 - oop ckval (super_klass) 2447 // Win64 2448 // rsp+40 - oop ckval (super_klass) 2449 // 2450 // Output: 2451 // rax == 0 - success 2452 // rax == -1^K - failure, where K is partial transfer count 2453 // 2454 address generate_checkcast_copy(const char *name, address *entry, 2455 bool dest_uninitialized = false) { 2456 2457 Label L_load_element, L_store_element, L_do_card_marks, L_done; 2458 2459 // Input registers (after setup_arg_regs) 2460 const Register from = rdi; // source array address 2461 const Register to = rsi; // destination array address 2462 const Register length = rdx; // elements count 2463 const Register ckoff = rcx; // super_check_offset 2464 const Register ckval = r8; // super_klass 2465 2466 // Registers used as temps (r13, r14 are save-on-entry) 2467 const Register end_from = from; // source array end address 2468 const Register end_to = r13; // destination array end address 2469 const Register count = rdx; // -(count_remaining) 2470 const Register r14_length = r14; // saved copy of length 2471 // End pointers are inclusive, and if length is not zero they point 2472 // to the last unit copied: end_to[0] := end_from[0] 2473 2474 const Register rax_oop = rax; // actual oop copied 2475 const Register r11_klass = r11; // oop._klass 2476 2477 //--------------------------------------------------------------- 2478 // Assembler stub will be used for this call to arraycopy 2479 // if the two arrays are subtypes of Object[] but the 2480 // destination array type is not equal to or a supertype 2481 // of the source type. Each element must be separately 2482 // checked. 2483 2484 __ align(CodeEntryAlignment); 2485 StubCodeMark mark(this, "StubRoutines", name); 2486 address start = __ pc(); 2487 2488 __ enter(); // required for proper stackwalking of RuntimeStub frame 2489 2490 #ifdef ASSERT 2491 // caller guarantees that the arrays really are different 2492 // otherwise, we would have to make conjoint checks 2493 { Label L; 2494 array_overlap_test(L, TIMES_OOP); 2495 __ stop("checkcast_copy within a single array"); 2496 __ bind(L); 2497 } 2498 #endif //ASSERT 2499 2500 setup_arg_regs(4); // from => rdi, to => rsi, length => rdx 2501 // ckoff => rcx, ckval => r8 2502 // r9 and r10 may be used to save non-volatile registers 2503 #ifdef _WIN64 2504 // last argument (#4) is on stack on Win64 2505 __ movptr(ckval, Address(rsp, 6 * wordSize)); 2506 #endif 2507 2508 // Caller of this entry point must set up the argument registers. 2509 if (entry != NULL) { 2510 *entry = __ pc(); 2511 BLOCK_COMMENT("Entry:"); 2512 } 2513 2514 // allocate spill slots for r13, r14 2515 enum { 2516 saved_r13_offset, 2517 saved_r14_offset, 2518 saved_rbp_offset 2519 }; 2520 __ subptr(rsp, saved_rbp_offset * wordSize); 2521 __ movptr(Address(rsp, saved_r13_offset * wordSize), r13); 2522 __ movptr(Address(rsp, saved_r14_offset * wordSize), r14); 2523 2524 // check that int operands are properly extended to size_t 2525 assert_clean_int(length, rax); 2526 assert_clean_int(ckoff, rax); 2527 2528 #ifdef ASSERT 2529 BLOCK_COMMENT("assert consistent ckoff/ckval"); 2530 // The ckoff and ckval must be mutually consistent, 2531 // even though caller generates both. 2532 { Label L; 2533 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2534 __ cmpl(ckoff, Address(ckval, sco_offset)); 2535 __ jcc(Assembler::equal, L); 2536 __ stop("super_check_offset inconsistent"); 2537 __ bind(L); 2538 } 2539 #endif //ASSERT 2540 2541 // Loop-invariant addresses. They are exclusive end pointers. 2542 Address end_from_addr(from, length, TIMES_OOP, 0); 2543 Address end_to_addr(to, length, TIMES_OOP, 0); 2544 // Loop-variant addresses. They assume post-incremented count < 0. 2545 Address from_element_addr(end_from, count, TIMES_OOP, 0); 2546 Address to_element_addr(end_to, count, TIMES_OOP, 0); 2547 2548 gen_load_ref_array_barrier(from, count); 2549 gen_write_ref_array_pre_barrier(to, count, dest_uninitialized); 2550 2551 // Copy from low to high addresses, indexed from the end of each array. 2552 __ lea(end_from, end_from_addr); 2553 __ lea(end_to, end_to_addr); 2554 __ movptr(r14_length, length); // save a copy of the length 2555 assert(length == count, ""); // else fix next line: 2556 __ negptr(count); // negate and test the length 2557 __ jcc(Assembler::notZero, L_load_element); 2558 2559 // Empty array: Nothing to do. 2560 __ xorptr(rax, rax); // return 0 on (trivial) success 2561 __ jmp(L_done); 2562 2563 // ======== begin loop ======== 2564 // (Loop is rotated; its entry is L_load_element.) 2565 // Loop control: 2566 // for (count = -count; count != 0; count++) 2567 // Base pointers src, dst are biased by 8*(count-1),to last element. 2568 __ align(OptoLoopAlignment); 2569 2570 __ BIND(L_store_element); 2571 __ store_heap_oop(to_element_addr, rax_oop); // store the oop 2572 __ increment(count); // increment the count toward zero 2573 __ jcc(Assembler::zero, L_do_card_marks); 2574 2575 // ======== loop entry is here ======== 2576 __ BIND(L_load_element); 2577 __ load_heap_oop(rax_oop, from_element_addr); // load the oop 2578 __ testptr(rax_oop, rax_oop); 2579 __ jcc(Assembler::zero, L_store_element); 2580 2581 __ load_klass(r11_klass, rax_oop);// query the object klass 2582 generate_type_check(r11_klass, ckoff, ckval, L_store_element); 2583 // ======== end loop ======== 2584 2585 // It was a real error; we must depend on the caller to finish the job. 2586 // Register rdx = -1 * number of *remaining* oops, r14 = *total* oops. 2587 // Emit GC store barriers for the oops we have copied (r14 + rdx), 2588 // and report their number to the caller. 2589 assert_different_registers(rax, r14_length, count, to, end_to, rcx, rscratch1); 2590 Label L_post_barrier; 2591 __ addptr(r14_length, count); // K = (original - remaining) oops 2592 __ movptr(rax, r14_length); // save the value 2593 __ notptr(rax); // report (-1^K) to caller (does not affect flags) 2594 __ jccb(Assembler::notZero, L_post_barrier); 2595 __ jmp(L_done); // K == 0, nothing was copied, skip post barrier 2596 2597 // Come here on success only. 2598 __ BIND(L_do_card_marks); 2599 __ xorptr(rax, rax); // return 0 on success 2600 2601 __ BIND(L_post_barrier); 2602 gen_write_ref_array_post_barrier(to, r14_length, rscratch1); 2603 2604 // Common exit point (success or failure). 2605 __ BIND(L_done); 2606 __ movptr(r13, Address(rsp, saved_r13_offset * wordSize)); 2607 __ movptr(r14, Address(rsp, saved_r14_offset * wordSize)); 2608 restore_arg_regs(); 2609 inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr); // Update counter after rscratch1 is free 2610 __ leave(); // required for proper stackwalking of RuntimeStub frame 2611 __ ret(0); 2612 2613 return start; 2614 } 2615 2616 // 2617 // Generate 'unsafe' array copy stub 2618 // Though just as safe as the other stubs, it takes an unscaled 2619 // size_t argument instead of an element count. 2620 // 2621 // Input: 2622 // c_rarg0 - source array address 2623 // c_rarg1 - destination array address 2624 // c_rarg2 - byte count, treated as ssize_t, can be zero 2625 // 2626 // Examines the alignment of the operands and dispatches 2627 // to a long, int, short, or byte copy loop. 2628 // 2629 address generate_unsafe_copy(const char *name, 2630 address byte_copy_entry, address short_copy_entry, 2631 address int_copy_entry, address long_copy_entry) { 2632 2633 Label L_long_aligned, L_int_aligned, L_short_aligned; 2634 2635 // Input registers (before setup_arg_regs) 2636 const Register from = c_rarg0; // source array address 2637 const Register to = c_rarg1; // destination array address 2638 const Register size = c_rarg2; // byte count (size_t) 2639 2640 // Register used as a temp 2641 const Register bits = rax; // test copy of low bits 2642 2643 __ align(CodeEntryAlignment); 2644 StubCodeMark mark(this, "StubRoutines", name); 2645 address start = __ pc(); 2646 2647 __ enter(); // required for proper stackwalking of RuntimeStub frame 2648 2649 // bump this on entry, not on exit: 2650 inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr); 2651 2652 __ mov(bits, from); 2653 __ orptr(bits, to); 2654 __ orptr(bits, size); 2655 2656 __ testb(bits, BytesPerLong-1); 2657 __ jccb(Assembler::zero, L_long_aligned); 2658 2659 __ testb(bits, BytesPerInt-1); 2660 __ jccb(Assembler::zero, L_int_aligned); 2661 2662 __ testb(bits, BytesPerShort-1); 2663 __ jump_cc(Assembler::notZero, RuntimeAddress(byte_copy_entry)); 2664 2665 __ BIND(L_short_aligned); 2666 __ shrptr(size, LogBytesPerShort); // size => short_count 2667 __ jump(RuntimeAddress(short_copy_entry)); 2668 2669 __ BIND(L_int_aligned); 2670 __ shrptr(size, LogBytesPerInt); // size => int_count 2671 __ jump(RuntimeAddress(int_copy_entry)); 2672 2673 __ BIND(L_long_aligned); 2674 __ shrptr(size, LogBytesPerLong); // size => qword_count 2675 __ jump(RuntimeAddress(long_copy_entry)); 2676 2677 return start; 2678 } 2679 2680 // Perform range checks on the proposed arraycopy. 2681 // Kills temp, but nothing else. 2682 // Also, clean the sign bits of src_pos and dst_pos. 2683 void arraycopy_range_checks(Register src, // source array oop (c_rarg0) 2684 Register src_pos, // source position (c_rarg1) 2685 Register dst, // destination array oo (c_rarg2) 2686 Register dst_pos, // destination position (c_rarg3) 2687 Register length, 2688 Register temp, 2689 Label& L_failed) { 2690 BLOCK_COMMENT("arraycopy_range_checks:"); 2691 2692 // if (src_pos + length > arrayOop(src)->length()) FAIL; 2693 __ movl(temp, length); 2694 __ addl(temp, src_pos); // src_pos + length 2695 __ cmpl(temp, Address(src, arrayOopDesc::length_offset_in_bytes())); 2696 __ jcc(Assembler::above, L_failed); 2697 2698 // if (dst_pos + length > arrayOop(dst)->length()) FAIL; 2699 __ movl(temp, length); 2700 __ addl(temp, dst_pos); // dst_pos + length 2701 __ cmpl(temp, Address(dst, arrayOopDesc::length_offset_in_bytes())); 2702 __ jcc(Assembler::above, L_failed); 2703 2704 // Have to clean up high 32-bits of 'src_pos' and 'dst_pos'. 2705 // Move with sign extension can be used since they are positive. 2706 __ movslq(src_pos, src_pos); 2707 __ movslq(dst_pos, dst_pos); 2708 2709 BLOCK_COMMENT("arraycopy_range_checks done"); 2710 } 2711 2712 // 2713 // Generate generic array copy stubs 2714 // 2715 // Input: 2716 // c_rarg0 - src oop 2717 // c_rarg1 - src_pos (32-bits) 2718 // c_rarg2 - dst oop 2719 // c_rarg3 - dst_pos (32-bits) 2720 // not Win64 2721 // c_rarg4 - element count (32-bits) 2722 // Win64 2723 // rsp+40 - element count (32-bits) 2724 // 2725 // Output: 2726 // rax == 0 - success 2727 // rax == -1^K - failure, where K is partial transfer count 2728 // 2729 address generate_generic_copy(const char *name, 2730 address byte_copy_entry, address short_copy_entry, 2731 address int_copy_entry, address oop_copy_entry, 2732 address long_copy_entry, address checkcast_copy_entry) { 2733 2734 Label L_failed, L_failed_0, L_objArray; 2735 Label L_copy_bytes, L_copy_shorts, L_copy_ints, L_copy_longs; 2736 2737 // Input registers 2738 const Register src = c_rarg0; // source array oop 2739 const Register src_pos = c_rarg1; // source position 2740 const Register dst = c_rarg2; // destination array oop 2741 const Register dst_pos = c_rarg3; // destination position 2742 #ifndef _WIN64 2743 const Register length = c_rarg4; 2744 #else 2745 const Address length(rsp, 6 * wordSize); // elements count is on stack on Win64 2746 #endif 2747 2748 { int modulus = CodeEntryAlignment; 2749 int target = modulus - 5; // 5 = sizeof jmp(L_failed) 2750 int advance = target - (__ offset() % modulus); 2751 if (advance < 0) advance += modulus; 2752 if (advance > 0) __ nop(advance); 2753 } 2754 StubCodeMark mark(this, "StubRoutines", name); 2755 2756 // Short-hop target to L_failed. Makes for denser prologue code. 2757 __ BIND(L_failed_0); 2758 __ jmp(L_failed); 2759 assert(__ offset() % CodeEntryAlignment == 0, "no further alignment needed"); 2760 2761 __ align(CodeEntryAlignment); 2762 address start = __ pc(); 2763 2764 __ enter(); // required for proper stackwalking of RuntimeStub frame 2765 2766 // bump this on entry, not on exit: 2767 inc_counter_np(SharedRuntime::_generic_array_copy_ctr); 2768 2769 //----------------------------------------------------------------------- 2770 // Assembler stub will be used for this call to arraycopy 2771 // if the following conditions are met: 2772 // 2773 // (1) src and dst must not be null. 2774 // (2) src_pos must not be negative. 2775 // (3) dst_pos must not be negative. 2776 // (4) length must not be negative. 2777 // (5) src klass and dst klass should be the same and not NULL. 2778 // (6) src and dst should be arrays. 2779 // (7) src_pos + length must not exceed length of src. 2780 // (8) dst_pos + length must not exceed length of dst. 2781 // 2782 2783 // if (src == NULL) return -1; 2784 __ testptr(src, src); // src oop 2785 size_t j1off = __ offset(); 2786 __ jccb(Assembler::zero, L_failed_0); 2787 2788 // if (src_pos < 0) return -1; 2789 __ testl(src_pos, src_pos); // src_pos (32-bits) 2790 __ jccb(Assembler::negative, L_failed_0); 2791 2792 // if (dst == NULL) return -1; 2793 __ testptr(dst, dst); // dst oop 2794 __ jccb(Assembler::zero, L_failed_0); 2795 2796 // if (dst_pos < 0) return -1; 2797 __ testl(dst_pos, dst_pos); // dst_pos (32-bits) 2798 size_t j4off = __ offset(); 2799 __ jccb(Assembler::negative, L_failed_0); 2800 2801 // The first four tests are very dense code, 2802 // but not quite dense enough to put four 2803 // jumps in a 16-byte instruction fetch buffer. 2804 // That's good, because some branch predicters 2805 // do not like jumps so close together. 2806 // Make sure of this. 2807 guarantee(((j1off ^ j4off) & ~15) != 0, "I$ line of 1st & 4th jumps"); 2808 2809 // registers used as temp 2810 const Register r11_length = r11; // elements count to copy 2811 const Register r10_src_klass = r10; // array klass 2812 2813 // if (length < 0) return -1; 2814 __ movl(r11_length, length); // length (elements count, 32-bits value) 2815 __ testl(r11_length, r11_length); 2816 __ jccb(Assembler::negative, L_failed_0); 2817 2818 __ load_klass(r10_src_klass, src); 2819 #ifdef ASSERT 2820 // assert(src->klass() != NULL); 2821 { 2822 BLOCK_COMMENT("assert klasses not null {"); 2823 Label L1, L2; 2824 __ testptr(r10_src_klass, r10_src_klass); 2825 __ jcc(Assembler::notZero, L2); // it is broken if klass is NULL 2826 __ bind(L1); 2827 __ stop("broken null klass"); 2828 __ bind(L2); 2829 __ load_klass(rax, dst); 2830 __ cmpq(rax, 0); 2831 __ jcc(Assembler::equal, L1); // this would be broken also 2832 BLOCK_COMMENT("} assert klasses not null done"); 2833 } 2834 #endif 2835 2836 // Load layout helper (32-bits) 2837 // 2838 // |array_tag| | header_size | element_type | |log2_element_size| 2839 // 32 30 24 16 8 2 0 2840 // 2841 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0 2842 // 2843 2844 const int lh_offset = in_bytes(Klass::layout_helper_offset()); 2845 2846 // Handle objArrays completely differently... 2847 const jint objArray_lh = Klass::array_layout_helper(T_OBJECT); 2848 __ cmpl(Address(r10_src_klass, lh_offset), objArray_lh); 2849 __ jcc(Assembler::equal, L_objArray); 2850 2851 // if (src->klass() != dst->klass()) return -1; 2852 __ load_klass(rax, dst); 2853 __ cmpq(r10_src_klass, rax); 2854 __ jcc(Assembler::notEqual, L_failed); 2855 2856 const Register rax_lh = rax; // layout helper 2857 __ movl(rax_lh, Address(r10_src_klass, lh_offset)); 2858 2859 // if (!src->is_Array()) return -1; 2860 __ cmpl(rax_lh, Klass::_lh_neutral_value); 2861 __ jcc(Assembler::greaterEqual, L_failed); 2862 2863 // At this point, it is known to be a typeArray (array_tag 0x3). 2864 #ifdef ASSERT 2865 { 2866 BLOCK_COMMENT("assert primitive array {"); 2867 Label L; 2868 __ cmpl(rax_lh, (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift)); 2869 __ jcc(Assembler::greaterEqual, L); 2870 __ stop("must be a primitive array"); 2871 __ bind(L); 2872 BLOCK_COMMENT("} assert primitive array done"); 2873 } 2874 #endif 2875 2876 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2877 r10, L_failed); 2878 2879 // TypeArrayKlass 2880 // 2881 // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize); 2882 // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize); 2883 // 2884 2885 const Register r10_offset = r10; // array offset 2886 const Register rax_elsize = rax_lh; // element size 2887 2888 __ movl(r10_offset, rax_lh); 2889 __ shrl(r10_offset, Klass::_lh_header_size_shift); 2890 __ andptr(r10_offset, Klass::_lh_header_size_mask); // array_offset 2891 __ addptr(src, r10_offset); // src array offset 2892 __ addptr(dst, r10_offset); // dst array offset 2893 BLOCK_COMMENT("choose copy loop based on element size"); 2894 __ andl(rax_lh, Klass::_lh_log2_element_size_mask); // rax_lh -> rax_elsize 2895 2896 // next registers should be set before the jump to corresponding stub 2897 const Register from = c_rarg0; // source array address 2898 const Register to = c_rarg1; // destination array address 2899 const Register count = c_rarg2; // elements count 2900 2901 // 'from', 'to', 'count' registers should be set in such order 2902 // since they are the same as 'src', 'src_pos', 'dst'. 2903 2904 __ BIND(L_copy_bytes); 2905 __ cmpl(rax_elsize, 0); 2906 __ jccb(Assembler::notEqual, L_copy_shorts); 2907 __ lea(from, Address(src, src_pos, Address::times_1, 0));// src_addr 2908 __ lea(to, Address(dst, dst_pos, Address::times_1, 0));// dst_addr 2909 __ movl2ptr(count, r11_length); // length 2910 __ jump(RuntimeAddress(byte_copy_entry)); 2911 2912 __ BIND(L_copy_shorts); 2913 __ cmpl(rax_elsize, LogBytesPerShort); 2914 __ jccb(Assembler::notEqual, L_copy_ints); 2915 __ lea(from, Address(src, src_pos, Address::times_2, 0));// src_addr 2916 __ lea(to, Address(dst, dst_pos, Address::times_2, 0));// dst_addr 2917 __ movl2ptr(count, r11_length); // length 2918 __ jump(RuntimeAddress(short_copy_entry)); 2919 2920 __ BIND(L_copy_ints); 2921 __ cmpl(rax_elsize, LogBytesPerInt); 2922 __ jccb(Assembler::notEqual, L_copy_longs); 2923 __ lea(from, Address(src, src_pos, Address::times_4, 0));// src_addr 2924 __ lea(to, Address(dst, dst_pos, Address::times_4, 0));// dst_addr 2925 __ movl2ptr(count, r11_length); // length 2926 __ jump(RuntimeAddress(int_copy_entry)); 2927 2928 __ BIND(L_copy_longs); 2929 #ifdef ASSERT 2930 { 2931 BLOCK_COMMENT("assert long copy {"); 2932 Label L; 2933 __ cmpl(rax_elsize, LogBytesPerLong); 2934 __ jcc(Assembler::equal, L); 2935 __ stop("must be long copy, but elsize is wrong"); 2936 __ bind(L); 2937 BLOCK_COMMENT("} assert long copy done"); 2938 } 2939 #endif 2940 __ lea(from, Address(src, src_pos, Address::times_8, 0));// src_addr 2941 __ lea(to, Address(dst, dst_pos, Address::times_8, 0));// dst_addr 2942 __ movl2ptr(count, r11_length); // length 2943 __ jump(RuntimeAddress(long_copy_entry)); 2944 2945 // ObjArrayKlass 2946 __ BIND(L_objArray); 2947 // live at this point: r10_src_klass, r11_length, src[_pos], dst[_pos] 2948 2949 Label L_plain_copy, L_checkcast_copy; 2950 // test array classes for subtyping 2951 __ load_klass(rax, dst); 2952 __ cmpq(r10_src_klass, rax); // usual case is exact equality 2953 __ jcc(Assembler::notEqual, L_checkcast_copy); 2954 2955 // Identically typed arrays can be copied without element-wise checks. 2956 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2957 r10, L_failed); 2958 2959 __ lea(from, Address(src, src_pos, TIMES_OOP, 2960 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // src_addr 2961 __ lea(to, Address(dst, dst_pos, TIMES_OOP, 2962 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // dst_addr 2963 __ movl2ptr(count, r11_length); // length 2964 __ BIND(L_plain_copy); 2965 __ jump(RuntimeAddress(oop_copy_entry)); 2966 2967 __ BIND(L_checkcast_copy); 2968 // live at this point: r10_src_klass, r11_length, rax (dst_klass) 2969 { 2970 // Before looking at dst.length, make sure dst is also an objArray. 2971 __ cmpl(Address(rax, lh_offset), objArray_lh); 2972 __ jcc(Assembler::notEqual, L_failed); 2973 2974 // It is safe to examine both src.length and dst.length. 2975 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2976 rax, L_failed); 2977 2978 const Register r11_dst_klass = r11; 2979 __ load_klass(r11_dst_klass, dst); // reload 2980 2981 // Marshal the base address arguments now, freeing registers. 2982 __ lea(from, Address(src, src_pos, TIMES_OOP, 2983 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 2984 __ lea(to, Address(dst, dst_pos, TIMES_OOP, 2985 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 2986 __ movl(count, length); // length (reloaded) 2987 Register sco_temp = c_rarg3; // this register is free now 2988 assert_different_registers(from, to, count, sco_temp, 2989 r11_dst_klass, r10_src_klass); 2990 assert_clean_int(count, sco_temp); 2991 2992 // Generate the type check. 2993 const int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2994 __ movl(sco_temp, Address(r11_dst_klass, sco_offset)); 2995 assert_clean_int(sco_temp, rax); 2996 generate_type_check(r10_src_klass, sco_temp, r11_dst_klass, L_plain_copy); 2997 2998 // Fetch destination element klass from the ObjArrayKlass header. 2999 int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset()); 3000 __ movptr(r11_dst_klass, Address(r11_dst_klass, ek_offset)); 3001 __ movl( sco_temp, Address(r11_dst_klass, sco_offset)); 3002 assert_clean_int(sco_temp, rax); 3003 3004 // the checkcast_copy loop needs two extra arguments: 3005 assert(c_rarg3 == sco_temp, "#3 already in place"); 3006 // Set up arguments for checkcast_copy_entry. 3007 setup_arg_regs(4); 3008 __ movptr(r8, r11_dst_klass); // dst.klass.element_klass, r8 is c_rarg4 on Linux/Solaris 3009 __ jump(RuntimeAddress(checkcast_copy_entry)); 3010 } 3011 3012 __ BIND(L_failed); 3013 __ xorptr(rax, rax); 3014 __ notptr(rax); // return -1 3015 __ leave(); // required for proper stackwalking of RuntimeStub frame 3016 __ ret(0); 3017 3018 return start; 3019 } 3020 3021 void generate_arraycopy_stubs() { 3022 address entry; 3023 address entry_jbyte_arraycopy; 3024 address entry_jshort_arraycopy; 3025 address entry_jint_arraycopy; 3026 address entry_oop_arraycopy; 3027 address entry_jlong_arraycopy; 3028 address entry_checkcast_arraycopy; 3029 3030 StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, &entry, 3031 "jbyte_disjoint_arraycopy"); 3032 StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, entry, &entry_jbyte_arraycopy, 3033 "jbyte_arraycopy"); 3034 3035 StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, &entry, 3036 "jshort_disjoint_arraycopy"); 3037 StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, entry, &entry_jshort_arraycopy, 3038 "jshort_arraycopy"); 3039 3040 StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, false, &entry, 3041 "jint_disjoint_arraycopy"); 3042 StubRoutines::_jint_arraycopy = generate_conjoint_int_oop_copy(false, false, entry, 3043 &entry_jint_arraycopy, "jint_arraycopy"); 3044 3045 StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, false, &entry, 3046 "jlong_disjoint_arraycopy"); 3047 StubRoutines::_jlong_arraycopy = generate_conjoint_long_oop_copy(false, false, entry, 3048 &entry_jlong_arraycopy, "jlong_arraycopy"); 3049 3050 3051 if (UseCompressedOops) { 3052 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, true, &entry, 3053 "oop_disjoint_arraycopy"); 3054 StubRoutines::_oop_arraycopy = generate_conjoint_int_oop_copy(false, true, entry, 3055 &entry_oop_arraycopy, "oop_arraycopy"); 3056 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_int_oop_copy(false, true, &entry, 3057 "oop_disjoint_arraycopy_uninit", 3058 /*dest_uninitialized*/true); 3059 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_int_oop_copy(false, true, entry, 3060 NULL, "oop_arraycopy_uninit", 3061 /*dest_uninitialized*/true); 3062 } else { 3063 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, true, &entry, 3064 "oop_disjoint_arraycopy"); 3065 StubRoutines::_oop_arraycopy = generate_conjoint_long_oop_copy(false, true, entry, 3066 &entry_oop_arraycopy, "oop_arraycopy"); 3067 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_long_oop_copy(false, true, &entry, 3068 "oop_disjoint_arraycopy_uninit", 3069 /*dest_uninitialized*/true); 3070 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_long_oop_copy(false, true, entry, 3071 NULL, "oop_arraycopy_uninit", 3072 /*dest_uninitialized*/true); 3073 } 3074 3075 StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy); 3076 StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", NULL, 3077 /*dest_uninitialized*/true); 3078 3079 StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy", 3080 entry_jbyte_arraycopy, 3081 entry_jshort_arraycopy, 3082 entry_jint_arraycopy, 3083 entry_jlong_arraycopy); 3084 StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy", 3085 entry_jbyte_arraycopy, 3086 entry_jshort_arraycopy, 3087 entry_jint_arraycopy, 3088 entry_oop_arraycopy, 3089 entry_jlong_arraycopy, 3090 entry_checkcast_arraycopy); 3091 3092 StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill"); 3093 StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill"); 3094 StubRoutines::_jint_fill = generate_fill(T_INT, false, "jint_fill"); 3095 StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill"); 3096 StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill"); 3097 StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill"); 3098 3099 // We don't generate specialized code for HeapWord-aligned source 3100 // arrays, so just use the code we've already generated 3101 StubRoutines::_arrayof_jbyte_disjoint_arraycopy = StubRoutines::_jbyte_disjoint_arraycopy; 3102 StubRoutines::_arrayof_jbyte_arraycopy = StubRoutines::_jbyte_arraycopy; 3103 3104 StubRoutines::_arrayof_jshort_disjoint_arraycopy = StubRoutines::_jshort_disjoint_arraycopy; 3105 StubRoutines::_arrayof_jshort_arraycopy = StubRoutines::_jshort_arraycopy; 3106 3107 StubRoutines::_arrayof_jint_disjoint_arraycopy = StubRoutines::_jint_disjoint_arraycopy; 3108 StubRoutines::_arrayof_jint_arraycopy = StubRoutines::_jint_arraycopy; 3109 3110 StubRoutines::_arrayof_jlong_disjoint_arraycopy = StubRoutines::_jlong_disjoint_arraycopy; 3111 StubRoutines::_arrayof_jlong_arraycopy = StubRoutines::_jlong_arraycopy; 3112 3113 StubRoutines::_arrayof_oop_disjoint_arraycopy = StubRoutines::_oop_disjoint_arraycopy; 3114 StubRoutines::_arrayof_oop_arraycopy = StubRoutines::_oop_arraycopy; 3115 3116 StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = StubRoutines::_oop_disjoint_arraycopy_uninit; 3117 StubRoutines::_arrayof_oop_arraycopy_uninit = StubRoutines::_oop_arraycopy_uninit; 3118 } 3119 3120 // AES intrinsic stubs 3121 enum {AESBlockSize = 16}; 3122 3123 address generate_key_shuffle_mask() { 3124 __ align(16); 3125 StubCodeMark mark(this, "StubRoutines", "key_shuffle_mask"); 3126 address start = __ pc(); 3127 __ emit_data64( 0x0405060700010203, relocInfo::none ); 3128 __ emit_data64( 0x0c0d0e0f08090a0b, relocInfo::none ); 3129 return start; 3130 } 3131 3132 address generate_counter_shuffle_mask() { 3133 __ align(16); 3134 StubCodeMark mark(this, "StubRoutines", "counter_shuffle_mask"); 3135 address start = __ pc(); 3136 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none); 3137 __ emit_data64(0x0001020304050607, relocInfo::none); 3138 return start; 3139 } 3140 3141 // Utility routine for loading a 128-bit key word in little endian format 3142 // can optionally specify that the shuffle mask is already in an xmmregister 3143 void load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask=NULL) { 3144 __ movdqu(xmmdst, Address(key, offset)); 3145 if (xmm_shuf_mask != NULL) { 3146 __ pshufb(xmmdst, xmm_shuf_mask); 3147 } else { 3148 __ pshufb(xmmdst, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3149 } 3150 } 3151 3152 // Utility routine for increase 128bit counter (iv in CTR mode) 3153 void inc_counter(Register reg, XMMRegister xmmdst, int inc_delta, Label& next_block) { 3154 __ pextrq(reg, xmmdst, 0x0); 3155 __ addq(reg, inc_delta); 3156 __ pinsrq(xmmdst, reg, 0x0); 3157 __ jcc(Assembler::carryClear, next_block); // jump if no carry 3158 __ pextrq(reg, xmmdst, 0x01); // Carry 3159 __ addq(reg, 0x01); 3160 __ pinsrq(xmmdst, reg, 0x01); //Carry end 3161 __ BIND(next_block); // next instruction 3162 } 3163 3164 // Arguments: 3165 // 3166 // Inputs: 3167 // c_rarg0 - source byte array address 3168 // c_rarg1 - destination byte array address 3169 // c_rarg2 - K (key) in little endian int array 3170 // 3171 address generate_aescrypt_encryptBlock() { 3172 assert(UseAES, "need AES instructions and misaligned SSE support"); 3173 __ align(CodeEntryAlignment); 3174 StubCodeMark mark(this, "StubRoutines", "aescrypt_encryptBlock"); 3175 Label L_doLast; 3176 address start = __ pc(); 3177 3178 const Register from = c_rarg0; // source array address 3179 const Register to = c_rarg1; // destination array address 3180 const Register key = c_rarg2; // key array address 3181 const Register keylen = rax; 3182 3183 const XMMRegister xmm_result = xmm0; 3184 const XMMRegister xmm_key_shuf_mask = xmm1; 3185 // On win64 xmm6-xmm15 must be preserved so don't use them. 3186 const XMMRegister xmm_temp1 = xmm2; 3187 const XMMRegister xmm_temp2 = xmm3; 3188 const XMMRegister xmm_temp3 = xmm4; 3189 const XMMRegister xmm_temp4 = xmm5; 3190 3191 __ enter(); // required for proper stackwalking of RuntimeStub frame 3192 3193 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 3194 // context for the registers used, where all instructions below are using 128-bit mode 3195 // On EVEX without VL and BW, these instructions will all be AVX. 3196 if (VM_Version::supports_avx512vlbw()) { 3197 __ movl(rax, 0xffff); 3198 __ kmovql(k1, rax); 3199 } 3200 3201 // keylen could be only {11, 13, 15} * 4 = {44, 52, 60} 3202 __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3203 3204 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3205 __ movdqu(xmm_result, Address(from, 0)); // get 16 bytes of input 3206 3207 // For encryption, the java expanded key ordering is just what we need 3208 // we don't know if the key is aligned, hence not using load-execute form 3209 3210 load_key(xmm_temp1, key, 0x00, xmm_key_shuf_mask); 3211 __ pxor(xmm_result, xmm_temp1); 3212 3213 load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask); 3214 load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask); 3215 load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask); 3216 load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask); 3217 3218 __ aesenc(xmm_result, xmm_temp1); 3219 __ aesenc(xmm_result, xmm_temp2); 3220 __ aesenc(xmm_result, xmm_temp3); 3221 __ aesenc(xmm_result, xmm_temp4); 3222 3223 load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask); 3224 load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask); 3225 load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask); 3226 load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask); 3227 3228 __ aesenc(xmm_result, xmm_temp1); 3229 __ aesenc(xmm_result, xmm_temp2); 3230 __ aesenc(xmm_result, xmm_temp3); 3231 __ aesenc(xmm_result, xmm_temp4); 3232 3233 load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask); 3234 load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask); 3235 3236 __ cmpl(keylen, 44); 3237 __ jccb(Assembler::equal, L_doLast); 3238 3239 __ aesenc(xmm_result, xmm_temp1); 3240 __ aesenc(xmm_result, xmm_temp2); 3241 3242 load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask); 3243 load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask); 3244 3245 __ cmpl(keylen, 52); 3246 __ jccb(Assembler::equal, L_doLast); 3247 3248 __ aesenc(xmm_result, xmm_temp1); 3249 __ aesenc(xmm_result, xmm_temp2); 3250 3251 load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask); 3252 load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask); 3253 3254 __ BIND(L_doLast); 3255 __ aesenc(xmm_result, xmm_temp1); 3256 __ aesenclast(xmm_result, xmm_temp2); 3257 __ movdqu(Address(to, 0), xmm_result); // store the result 3258 __ xorptr(rax, rax); // return 0 3259 __ leave(); // required for proper stackwalking of RuntimeStub frame 3260 __ ret(0); 3261 3262 return start; 3263 } 3264 3265 3266 // Arguments: 3267 // 3268 // Inputs: 3269 // c_rarg0 - source byte array address 3270 // c_rarg1 - destination byte array address 3271 // c_rarg2 - K (key) in little endian int array 3272 // 3273 address generate_aescrypt_decryptBlock() { 3274 assert(UseAES, "need AES instructions and misaligned SSE support"); 3275 __ align(CodeEntryAlignment); 3276 StubCodeMark mark(this, "StubRoutines", "aescrypt_decryptBlock"); 3277 Label L_doLast; 3278 address start = __ pc(); 3279 3280 const Register from = c_rarg0; // source array address 3281 const Register to = c_rarg1; // destination array address 3282 const Register key = c_rarg2; // key array address 3283 const Register keylen = rax; 3284 3285 const XMMRegister xmm_result = xmm0; 3286 const XMMRegister xmm_key_shuf_mask = xmm1; 3287 // On win64 xmm6-xmm15 must be preserved so don't use them. 3288 const XMMRegister xmm_temp1 = xmm2; 3289 const XMMRegister xmm_temp2 = xmm3; 3290 const XMMRegister xmm_temp3 = xmm4; 3291 const XMMRegister xmm_temp4 = xmm5; 3292 3293 __ enter(); // required for proper stackwalking of RuntimeStub frame 3294 3295 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 3296 // context for the registers used, where all instructions below are using 128-bit mode 3297 // On EVEX without VL and BW, these instructions will all be AVX. 3298 if (VM_Version::supports_avx512vlbw()) { 3299 __ movl(rax, 0xffff); 3300 __ kmovql(k1, rax); 3301 } 3302 3303 // keylen could be only {11, 13, 15} * 4 = {44, 52, 60} 3304 __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3305 3306 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3307 __ movdqu(xmm_result, Address(from, 0)); 3308 3309 // for decryption java expanded key ordering is rotated one position from what we want 3310 // so we start from 0x10 here and hit 0x00 last 3311 // we don't know if the key is aligned, hence not using load-execute form 3312 load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask); 3313 load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask); 3314 load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask); 3315 load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask); 3316 3317 __ pxor (xmm_result, xmm_temp1); 3318 __ aesdec(xmm_result, xmm_temp2); 3319 __ aesdec(xmm_result, xmm_temp3); 3320 __ aesdec(xmm_result, xmm_temp4); 3321 3322 load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask); 3323 load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask); 3324 load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask); 3325 load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask); 3326 3327 __ aesdec(xmm_result, xmm_temp1); 3328 __ aesdec(xmm_result, xmm_temp2); 3329 __ aesdec(xmm_result, xmm_temp3); 3330 __ aesdec(xmm_result, xmm_temp4); 3331 3332 load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask); 3333 load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask); 3334 load_key(xmm_temp3, key, 0x00, xmm_key_shuf_mask); 3335 3336 __ cmpl(keylen, 44); 3337 __ jccb(Assembler::equal, L_doLast); 3338 3339 __ aesdec(xmm_result, xmm_temp1); 3340 __ aesdec(xmm_result, xmm_temp2); 3341 3342 load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask); 3343 load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask); 3344 3345 __ cmpl(keylen, 52); 3346 __ jccb(Assembler::equal, L_doLast); 3347 3348 __ aesdec(xmm_result, xmm_temp1); 3349 __ aesdec(xmm_result, xmm_temp2); 3350 3351 load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask); 3352 load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask); 3353 3354 __ BIND(L_doLast); 3355 __ aesdec(xmm_result, xmm_temp1); 3356 __ aesdec(xmm_result, xmm_temp2); 3357 3358 // for decryption the aesdeclast operation is always on key+0x00 3359 __ aesdeclast(xmm_result, xmm_temp3); 3360 __ movdqu(Address(to, 0), xmm_result); // store the result 3361 __ xorptr(rax, rax); // return 0 3362 __ leave(); // required for proper stackwalking of RuntimeStub frame 3363 __ ret(0); 3364 3365 return start; 3366 } 3367 3368 3369 // Arguments: 3370 // 3371 // Inputs: 3372 // c_rarg0 - source byte array address 3373 // c_rarg1 - destination byte array address 3374 // c_rarg2 - K (key) in little endian int array 3375 // c_rarg3 - r vector byte array address 3376 // c_rarg4 - input length 3377 // 3378 // Output: 3379 // rax - input length 3380 // 3381 address generate_cipherBlockChaining_encryptAESCrypt() { 3382 assert(UseAES, "need AES instructions and misaligned SSE support"); 3383 __ align(CodeEntryAlignment); 3384 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_encryptAESCrypt"); 3385 address start = __ pc(); 3386 3387 Label L_exit, L_key_192_256, L_key_256, L_loopTop_128, L_loopTop_192, L_loopTop_256; 3388 const Register from = c_rarg0; // source array address 3389 const Register to = c_rarg1; // destination array address 3390 const Register key = c_rarg2; // key array address 3391 const Register rvec = c_rarg3; // r byte array initialized from initvector array address 3392 // and left with the results of the last encryption block 3393 #ifndef _WIN64 3394 const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16) 3395 #else 3396 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 3397 const Register len_reg = r11; // pick the volatile windows register 3398 #endif 3399 const Register pos = rax; 3400 3401 // xmm register assignments for the loops below 3402 const XMMRegister xmm_result = xmm0; 3403 const XMMRegister xmm_temp = xmm1; 3404 // keys 0-10 preloaded into xmm2-xmm12 3405 const int XMM_REG_NUM_KEY_FIRST = 2; 3406 const int XMM_REG_NUM_KEY_LAST = 15; 3407 const XMMRegister xmm_key0 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST); 3408 const XMMRegister xmm_key10 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+10); 3409 const XMMRegister xmm_key11 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+11); 3410 const XMMRegister xmm_key12 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+12); 3411 const XMMRegister xmm_key13 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+13); 3412 3413 __ enter(); // required for proper stackwalking of RuntimeStub frame 3414 3415 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 3416 // context for the registers used, where all instructions below are using 128-bit mode 3417 // On EVEX without VL and BW, these instructions will all be AVX. 3418 if (VM_Version::supports_avx512vlbw()) { 3419 __ movl(rax, 0xffff); 3420 __ kmovql(k1, rax); 3421 } 3422 3423 #ifdef _WIN64 3424 // on win64, fill len_reg from stack position 3425 __ movl(len_reg, len_mem); 3426 #else 3427 __ push(len_reg); // Save 3428 #endif 3429 3430 const XMMRegister xmm_key_shuf_mask = xmm_temp; // used temporarily to swap key bytes up front 3431 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3432 // load up xmm regs xmm2 thru xmm12 with key 0x00 - 0xa0 3433 for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x00; rnum <= XMM_REG_NUM_KEY_FIRST+10; rnum++) { 3434 load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask); 3435 offset += 0x10; 3436 } 3437 __ movdqu(xmm_result, Address(rvec, 0x00)); // initialize xmm_result with r vec 3438 3439 // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256)) 3440 __ movl(rax, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3441 __ cmpl(rax, 44); 3442 __ jcc(Assembler::notEqual, L_key_192_256); 3443 3444 // 128 bit code follows here 3445 __ movptr(pos, 0); 3446 __ align(OptoLoopAlignment); 3447 3448 __ BIND(L_loopTop_128); 3449 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3450 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3451 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3452 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 9; rnum++) { 3453 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3454 } 3455 __ aesenclast(xmm_result, xmm_key10); 3456 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3457 // no need to store r to memory until we exit 3458 __ addptr(pos, AESBlockSize); 3459 __ subptr(len_reg, AESBlockSize); 3460 __ jcc(Assembler::notEqual, L_loopTop_128); 3461 3462 __ BIND(L_exit); 3463 __ movdqu(Address(rvec, 0), xmm_result); // final value of r stored in rvec of CipherBlockChaining object 3464 3465 #ifdef _WIN64 3466 __ movl(rax, len_mem); 3467 #else 3468 __ pop(rax); // return length 3469 #endif 3470 __ leave(); // required for proper stackwalking of RuntimeStub frame 3471 __ ret(0); 3472 3473 __ BIND(L_key_192_256); 3474 // here rax = len in ints of AESCrypt.KLE array (52=192, or 60=256) 3475 load_key(xmm_key11, key, 0xb0, xmm_key_shuf_mask); 3476 load_key(xmm_key12, key, 0xc0, xmm_key_shuf_mask); 3477 __ cmpl(rax, 52); 3478 __ jcc(Assembler::notEqual, L_key_256); 3479 3480 // 192-bit code follows here (could be changed to use more xmm registers) 3481 __ movptr(pos, 0); 3482 __ align(OptoLoopAlignment); 3483 3484 __ BIND(L_loopTop_192); 3485 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3486 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3487 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3488 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 11; rnum++) { 3489 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3490 } 3491 __ aesenclast(xmm_result, xmm_key12); 3492 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3493 // no need to store r to memory until we exit 3494 __ addptr(pos, AESBlockSize); 3495 __ subptr(len_reg, AESBlockSize); 3496 __ jcc(Assembler::notEqual, L_loopTop_192); 3497 __ jmp(L_exit); 3498 3499 __ BIND(L_key_256); 3500 // 256-bit code follows here (could be changed to use more xmm registers) 3501 load_key(xmm_key13, key, 0xd0, xmm_key_shuf_mask); 3502 __ movptr(pos, 0); 3503 __ align(OptoLoopAlignment); 3504 3505 __ BIND(L_loopTop_256); 3506 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3507 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3508 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3509 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 13; rnum++) { 3510 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3511 } 3512 load_key(xmm_temp, key, 0xe0); 3513 __ aesenclast(xmm_result, xmm_temp); 3514 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3515 // no need to store r to memory until we exit 3516 __ addptr(pos, AESBlockSize); 3517 __ subptr(len_reg, AESBlockSize); 3518 __ jcc(Assembler::notEqual, L_loopTop_256); 3519 __ jmp(L_exit); 3520 3521 return start; 3522 } 3523 3524 // Safefetch stubs. 3525 void generate_safefetch(const char* name, int size, address* entry, 3526 address* fault_pc, address* continuation_pc) { 3527 // safefetch signatures: 3528 // int SafeFetch32(int* adr, int errValue); 3529 // intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue); 3530 // 3531 // arguments: 3532 // c_rarg0 = adr 3533 // c_rarg1 = errValue 3534 // 3535 // result: 3536 // PPC_RET = *adr or errValue 3537 3538 StubCodeMark mark(this, "StubRoutines", name); 3539 3540 // Entry point, pc or function descriptor. 3541 *entry = __ pc(); 3542 3543 // Load *adr into c_rarg1, may fault. 3544 *fault_pc = __ pc(); 3545 switch (size) { 3546 case 4: 3547 // int32_t 3548 __ movl(c_rarg1, Address(c_rarg0, 0)); 3549 break; 3550 case 8: 3551 // int64_t 3552 __ movq(c_rarg1, Address(c_rarg0, 0)); 3553 break; 3554 default: 3555 ShouldNotReachHere(); 3556 } 3557 3558 // return errValue or *adr 3559 *continuation_pc = __ pc(); 3560 __ movq(rax, c_rarg1); 3561 __ ret(0); 3562 } 3563 3564 // This is a version of CBC/AES Decrypt which does 4 blocks in a loop at a time 3565 // to hide instruction latency 3566 // 3567 // Arguments: 3568 // 3569 // Inputs: 3570 // c_rarg0 - source byte array address 3571 // c_rarg1 - destination byte array address 3572 // c_rarg2 - K (key) in little endian int array 3573 // c_rarg3 - r vector byte array address 3574 // c_rarg4 - input length 3575 // 3576 // Output: 3577 // rax - input length 3578 // 3579 address generate_cipherBlockChaining_decryptAESCrypt_Parallel() { 3580 assert(UseAES, "need AES instructions and misaligned SSE support"); 3581 __ align(CodeEntryAlignment); 3582 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt"); 3583 address start = __ pc(); 3584 3585 const Register from = c_rarg0; // source array address 3586 const Register to = c_rarg1; // destination array address 3587 const Register key = c_rarg2; // key array address 3588 const Register rvec = c_rarg3; // r byte array initialized from initvector array address 3589 // and left with the results of the last encryption block 3590 #ifndef _WIN64 3591 const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16) 3592 #else 3593 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 3594 const Register len_reg = r11; // pick the volatile windows register 3595 #endif 3596 const Register pos = rax; 3597 3598 const int PARALLEL_FACTOR = 4; 3599 const int ROUNDS[3] = { 10, 12, 14 }; // aes rounds for key128, key192, key256 3600 3601 Label L_exit; 3602 Label L_singleBlock_loopTopHead[3]; // 128, 192, 256 3603 Label L_singleBlock_loopTopHead2[3]; // 128, 192, 256 3604 Label L_singleBlock_loopTop[3]; // 128, 192, 256 3605 Label L_multiBlock_loopTopHead[3]; // 128, 192, 256 3606 Label L_multiBlock_loopTop[3]; // 128, 192, 256 3607 3608 // keys 0-10 preloaded into xmm5-xmm15 3609 const int XMM_REG_NUM_KEY_FIRST = 5; 3610 const int XMM_REG_NUM_KEY_LAST = 15; 3611 const XMMRegister xmm_key_first = as_XMMRegister(XMM_REG_NUM_KEY_FIRST); 3612 const XMMRegister xmm_key_last = as_XMMRegister(XMM_REG_NUM_KEY_LAST); 3613 3614 __ enter(); // required for proper stackwalking of RuntimeStub frame 3615 3616 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 3617 // context for the registers used, where all instructions below are using 128-bit mode 3618 // On EVEX without VL and BW, these instructions will all be AVX. 3619 if (VM_Version::supports_avx512vlbw()) { 3620 __ movl(rax, 0xffff); 3621 __ kmovql(k1, rax); 3622 } 3623 3624 #ifdef _WIN64 3625 // on win64, fill len_reg from stack position 3626 __ movl(len_reg, len_mem); 3627 #else 3628 __ push(len_reg); // Save 3629 #endif 3630 __ push(rbx); 3631 // the java expanded key ordering is rotated one position from what we want 3632 // so we start from 0x10 here and hit 0x00 last 3633 const XMMRegister xmm_key_shuf_mask = xmm1; // used temporarily to swap key bytes up front 3634 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3635 // load up xmm regs 5 thru 15 with key 0x10 - 0xa0 - 0x00 3636 for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x10; rnum < XMM_REG_NUM_KEY_LAST; rnum++) { 3637 load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask); 3638 offset += 0x10; 3639 } 3640 load_key(xmm_key_last, key, 0x00, xmm_key_shuf_mask); 3641 3642 const XMMRegister xmm_prev_block_cipher = xmm1; // holds cipher of previous block 3643 3644 // registers holding the four results in the parallelized loop 3645 const XMMRegister xmm_result0 = xmm0; 3646 const XMMRegister xmm_result1 = xmm2; 3647 const XMMRegister xmm_result2 = xmm3; 3648 const XMMRegister xmm_result3 = xmm4; 3649 3650 __ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // initialize with initial rvec 3651 3652 __ xorptr(pos, pos); 3653 3654 // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256)) 3655 __ movl(rbx, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3656 __ cmpl(rbx, 52); 3657 __ jcc(Assembler::equal, L_multiBlock_loopTopHead[1]); 3658 __ cmpl(rbx, 60); 3659 __ jcc(Assembler::equal, L_multiBlock_loopTopHead[2]); 3660 3661 #define DoFour(opc, src_reg) \ 3662 __ opc(xmm_result0, src_reg); \ 3663 __ opc(xmm_result1, src_reg); \ 3664 __ opc(xmm_result2, src_reg); \ 3665 __ opc(xmm_result3, src_reg); \ 3666 3667 for (int k = 0; k < 3; ++k) { 3668 __ BIND(L_multiBlock_loopTopHead[k]); 3669 if (k != 0) { 3670 __ cmpptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // see if at least 4 blocks left 3671 __ jcc(Assembler::less, L_singleBlock_loopTopHead2[k]); 3672 } 3673 if (k == 1) { 3674 __ subptr(rsp, 6 * wordSize); 3675 __ movdqu(Address(rsp, 0), xmm15); //save last_key from xmm15 3676 load_key(xmm15, key, 0xb0); // 0xb0; 192-bit key goes up to 0xc0 3677 __ movdqu(Address(rsp, 2 * wordSize), xmm15); 3678 load_key(xmm1, key, 0xc0); // 0xc0; 3679 __ movdqu(Address(rsp, 4 * wordSize), xmm1); 3680 } else if (k == 2) { 3681 __ subptr(rsp, 10 * wordSize); 3682 __ movdqu(Address(rsp, 0), xmm15); //save last_key from xmm15 3683 load_key(xmm15, key, 0xd0); // 0xd0; 256-bit key goes upto 0xe0 3684 __ movdqu(Address(rsp, 6 * wordSize), xmm15); 3685 load_key(xmm1, key, 0xe0); // 0xe0; 3686 __ movdqu(Address(rsp, 8 * wordSize), xmm1); 3687 load_key(xmm15, key, 0xb0); // 0xb0; 3688 __ movdqu(Address(rsp, 2 * wordSize), xmm15); 3689 load_key(xmm1, key, 0xc0); // 0xc0; 3690 __ movdqu(Address(rsp, 4 * wordSize), xmm1); 3691 } 3692 __ align(OptoLoopAlignment); 3693 __ BIND(L_multiBlock_loopTop[k]); 3694 __ cmpptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // see if at least 4 blocks left 3695 __ jcc(Assembler::less, L_singleBlock_loopTopHead[k]); 3696 3697 if (k != 0) { 3698 __ movdqu(xmm15, Address(rsp, 2 * wordSize)); 3699 __ movdqu(xmm1, Address(rsp, 4 * wordSize)); 3700 } 3701 3702 __ movdqu(xmm_result0, Address(from, pos, Address::times_1, 0 * AESBlockSize)); // get next 4 blocks into xmmresult registers 3703 __ movdqu(xmm_result1, Address(from, pos, Address::times_1, 1 * AESBlockSize)); 3704 __ movdqu(xmm_result2, Address(from, pos, Address::times_1, 2 * AESBlockSize)); 3705 __ movdqu(xmm_result3, Address(from, pos, Address::times_1, 3 * AESBlockSize)); 3706 3707 DoFour(pxor, xmm_key_first); 3708 if (k == 0) { 3709 for (int rnum = 1; rnum < ROUNDS[k]; rnum++) { 3710 DoFour(aesdec, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3711 } 3712 DoFour(aesdeclast, xmm_key_last); 3713 } else if (k == 1) { 3714 for (int rnum = 1; rnum <= ROUNDS[k]-2; rnum++) { 3715 DoFour(aesdec, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3716 } 3717 __ movdqu(xmm_key_last, Address(rsp, 0)); // xmm15 needs to be loaded again. 3718 DoFour(aesdec, xmm1); // key : 0xc0 3719 __ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // xmm1 needs to be loaded again 3720 DoFour(aesdeclast, xmm_key_last); 3721 } else if (k == 2) { 3722 for (int rnum = 1; rnum <= ROUNDS[k] - 4; rnum++) { 3723 DoFour(aesdec, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3724 } 3725 DoFour(aesdec, xmm1); // key : 0xc0 3726 __ movdqu(xmm15, Address(rsp, 6 * wordSize)); 3727 __ movdqu(xmm1, Address(rsp, 8 * wordSize)); 3728 DoFour(aesdec, xmm15); // key : 0xd0 3729 __ movdqu(xmm_key_last, Address(rsp, 0)); // xmm15 needs to be loaded again. 3730 DoFour(aesdec, xmm1); // key : 0xe0 3731 __ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // xmm1 needs to be loaded again 3732 DoFour(aesdeclast, xmm_key_last); 3733 } 3734 3735 // for each result, xor with the r vector of previous cipher block 3736 __ pxor(xmm_result0, xmm_prev_block_cipher); 3737 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 0 * AESBlockSize)); 3738 __ pxor(xmm_result1, xmm_prev_block_cipher); 3739 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 1 * AESBlockSize)); 3740 __ pxor(xmm_result2, xmm_prev_block_cipher); 3741 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 2 * AESBlockSize)); 3742 __ pxor(xmm_result3, xmm_prev_block_cipher); 3743 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 3 * AESBlockSize)); // this will carry over to next set of blocks 3744 if (k != 0) { 3745 __ movdqu(Address(rvec, 0x00), xmm_prev_block_cipher); 3746 } 3747 3748 __ movdqu(Address(to, pos, Address::times_1, 0 * AESBlockSize), xmm_result0); // store 4 results into the next 64 bytes of output 3749 __ movdqu(Address(to, pos, Address::times_1, 1 * AESBlockSize), xmm_result1); 3750 __ movdqu(Address(to, pos, Address::times_1, 2 * AESBlockSize), xmm_result2); 3751 __ movdqu(Address(to, pos, Address::times_1, 3 * AESBlockSize), xmm_result3); 3752 3753 __ addptr(pos, PARALLEL_FACTOR * AESBlockSize); 3754 __ subptr(len_reg, PARALLEL_FACTOR * AESBlockSize); 3755 __ jmp(L_multiBlock_loopTop[k]); 3756 3757 // registers used in the non-parallelized loops 3758 // xmm register assignments for the loops below 3759 const XMMRegister xmm_result = xmm0; 3760 const XMMRegister xmm_prev_block_cipher_save = xmm2; 3761 const XMMRegister xmm_key11 = xmm3; 3762 const XMMRegister xmm_key12 = xmm4; 3763 const XMMRegister key_tmp = xmm4; 3764 3765 __ BIND(L_singleBlock_loopTopHead[k]); 3766 if (k == 1) { 3767 __ addptr(rsp, 6 * wordSize); 3768 } else if (k == 2) { 3769 __ addptr(rsp, 10 * wordSize); 3770 } 3771 __ cmpptr(len_reg, 0); // any blocks left?? 3772 __ jcc(Assembler::equal, L_exit); 3773 __ BIND(L_singleBlock_loopTopHead2[k]); 3774 if (k == 1) { 3775 load_key(xmm_key11, key, 0xb0); // 0xb0; 192-bit key goes upto 0xc0 3776 load_key(xmm_key12, key, 0xc0); // 0xc0; 192-bit key goes upto 0xc0 3777 } 3778 if (k == 2) { 3779 load_key(xmm_key11, key, 0xb0); // 0xb0; 256-bit key goes upto 0xe0 3780 } 3781 __ align(OptoLoopAlignment); 3782 __ BIND(L_singleBlock_loopTop[k]); 3783 __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of cipher input 3784 __ movdqa(xmm_prev_block_cipher_save, xmm_result); // save for next r vector 3785 __ pxor(xmm_result, xmm_key_first); // do the aes dec rounds 3786 for (int rnum = 1; rnum <= 9 ; rnum++) { 3787 __ aesdec(xmm_result, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3788 } 3789 if (k == 1) { 3790 __ aesdec(xmm_result, xmm_key11); 3791 __ aesdec(xmm_result, xmm_key12); 3792 } 3793 if (k == 2) { 3794 __ aesdec(xmm_result, xmm_key11); 3795 load_key(key_tmp, key, 0xc0); 3796 __ aesdec(xmm_result, key_tmp); 3797 load_key(key_tmp, key, 0xd0); 3798 __ aesdec(xmm_result, key_tmp); 3799 load_key(key_tmp, key, 0xe0); 3800 __ aesdec(xmm_result, key_tmp); 3801 } 3802 3803 __ aesdeclast(xmm_result, xmm_key_last); // xmm15 always came from key+0 3804 __ pxor(xmm_result, xmm_prev_block_cipher); // xor with the current r vector 3805 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3806 // no need to store r to memory until we exit 3807 __ movdqa(xmm_prev_block_cipher, xmm_prev_block_cipher_save); // set up next r vector with cipher input from this block 3808 __ addptr(pos, AESBlockSize); 3809 __ subptr(len_reg, AESBlockSize); 3810 __ jcc(Assembler::notEqual, L_singleBlock_loopTop[k]); 3811 if (k != 2) { 3812 __ jmp(L_exit); 3813 } 3814 } //for 128/192/256 3815 3816 __ BIND(L_exit); 3817 __ movdqu(Address(rvec, 0), xmm_prev_block_cipher); // final value of r stored in rvec of CipherBlockChaining object 3818 __ pop(rbx); 3819 #ifdef _WIN64 3820 __ movl(rax, len_mem); 3821 #else 3822 __ pop(rax); // return length 3823 #endif 3824 __ leave(); // required for proper stackwalking of RuntimeStub frame 3825 __ ret(0); 3826 return start; 3827 } 3828 3829 address generate_upper_word_mask() { 3830 __ align(64); 3831 StubCodeMark mark(this, "StubRoutines", "upper_word_mask"); 3832 address start = __ pc(); 3833 __ emit_data64(0x0000000000000000, relocInfo::none); 3834 __ emit_data64(0xFFFFFFFF00000000, relocInfo::none); 3835 return start; 3836 } 3837 3838 address generate_shuffle_byte_flip_mask() { 3839 __ align(64); 3840 StubCodeMark mark(this, "StubRoutines", "shuffle_byte_flip_mask"); 3841 address start = __ pc(); 3842 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none); 3843 __ emit_data64(0x0001020304050607, relocInfo::none); 3844 return start; 3845 } 3846 3847 // ofs and limit are use for multi-block byte array. 3848 // int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit) 3849 address generate_sha1_implCompress(bool multi_block, const char *name) { 3850 __ align(CodeEntryAlignment); 3851 StubCodeMark mark(this, "StubRoutines", name); 3852 address start = __ pc(); 3853 3854 Register buf = c_rarg0; 3855 Register state = c_rarg1; 3856 Register ofs = c_rarg2; 3857 Register limit = c_rarg3; 3858 3859 const XMMRegister abcd = xmm0; 3860 const XMMRegister e0 = xmm1; 3861 const XMMRegister e1 = xmm2; 3862 const XMMRegister msg0 = xmm3; 3863 3864 const XMMRegister msg1 = xmm4; 3865 const XMMRegister msg2 = xmm5; 3866 const XMMRegister msg3 = xmm6; 3867 const XMMRegister shuf_mask = xmm7; 3868 3869 __ enter(); 3870 3871 __ subptr(rsp, 4 * wordSize); 3872 3873 __ fast_sha1(abcd, e0, e1, msg0, msg1, msg2, msg3, shuf_mask, 3874 buf, state, ofs, limit, rsp, multi_block); 3875 3876 __ addptr(rsp, 4 * wordSize); 3877 3878 __ leave(); 3879 __ ret(0); 3880 return start; 3881 } 3882 3883 address generate_pshuffle_byte_flip_mask() { 3884 __ align(64); 3885 StubCodeMark mark(this, "StubRoutines", "pshuffle_byte_flip_mask"); 3886 address start = __ pc(); 3887 __ emit_data64(0x0405060700010203, relocInfo::none); 3888 __ emit_data64(0x0c0d0e0f08090a0b, relocInfo::none); 3889 3890 if (VM_Version::supports_avx2()) { 3891 __ emit_data64(0x0405060700010203, relocInfo::none); // second copy 3892 __ emit_data64(0x0c0d0e0f08090a0b, relocInfo::none); 3893 // _SHUF_00BA 3894 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3895 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3896 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3897 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3898 // _SHUF_DC00 3899 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3900 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3901 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3902 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3903 } 3904 3905 return start; 3906 } 3907 3908 //Mask for byte-swapping a couple of qwords in an XMM register using (v)pshufb. 3909 address generate_pshuffle_byte_flip_mask_sha512() { 3910 __ align(32); 3911 StubCodeMark mark(this, "StubRoutines", "pshuffle_byte_flip_mask_sha512"); 3912 address start = __ pc(); 3913 if (VM_Version::supports_avx2()) { 3914 __ emit_data64(0x0001020304050607, relocInfo::none); // PSHUFFLE_BYTE_FLIP_MASK 3915 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none); 3916 __ emit_data64(0x1011121314151617, relocInfo::none); 3917 __ emit_data64(0x18191a1b1c1d1e1f, relocInfo::none); 3918 __ emit_data64(0x0000000000000000, relocInfo::none); //MASK_YMM_LO 3919 __ emit_data64(0x0000000000000000, relocInfo::none); 3920 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3921 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3922 } 3923 3924 return start; 3925 } 3926 3927 // ofs and limit are use for multi-block byte array. 3928 // int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit) 3929 address generate_sha256_implCompress(bool multi_block, const char *name) { 3930 assert(VM_Version::supports_sha() || VM_Version::supports_avx2(), ""); 3931 __ align(CodeEntryAlignment); 3932 StubCodeMark mark(this, "StubRoutines", name); 3933 address start = __ pc(); 3934 3935 Register buf = c_rarg0; 3936 Register state = c_rarg1; 3937 Register ofs = c_rarg2; 3938 Register limit = c_rarg3; 3939 3940 const XMMRegister msg = xmm0; 3941 const XMMRegister state0 = xmm1; 3942 const XMMRegister state1 = xmm2; 3943 const XMMRegister msgtmp0 = xmm3; 3944 3945 const XMMRegister msgtmp1 = xmm4; 3946 const XMMRegister msgtmp2 = xmm5; 3947 const XMMRegister msgtmp3 = xmm6; 3948 const XMMRegister msgtmp4 = xmm7; 3949 3950 const XMMRegister shuf_mask = xmm8; 3951 3952 __ enter(); 3953 3954 __ subptr(rsp, 4 * wordSize); 3955 3956 if (VM_Version::supports_sha()) { 3957 __ fast_sha256(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4, 3958 buf, state, ofs, limit, rsp, multi_block, shuf_mask); 3959 } else if (VM_Version::supports_avx2()) { 3960 __ sha256_AVX2(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4, 3961 buf, state, ofs, limit, rsp, multi_block, shuf_mask); 3962 } 3963 __ addptr(rsp, 4 * wordSize); 3964 __ vzeroupper(); 3965 __ leave(); 3966 __ ret(0); 3967 return start; 3968 } 3969 3970 address generate_sha512_implCompress(bool multi_block, const char *name) { 3971 assert(VM_Version::supports_avx2(), ""); 3972 assert(VM_Version::supports_bmi2(), ""); 3973 __ align(CodeEntryAlignment); 3974 StubCodeMark mark(this, "StubRoutines", name); 3975 address start = __ pc(); 3976 3977 Register buf = c_rarg0; 3978 Register state = c_rarg1; 3979 Register ofs = c_rarg2; 3980 Register limit = c_rarg3; 3981 3982 const XMMRegister msg = xmm0; 3983 const XMMRegister state0 = xmm1; 3984 const XMMRegister state1 = xmm2; 3985 const XMMRegister msgtmp0 = xmm3; 3986 const XMMRegister msgtmp1 = xmm4; 3987 const XMMRegister msgtmp2 = xmm5; 3988 const XMMRegister msgtmp3 = xmm6; 3989 const XMMRegister msgtmp4 = xmm7; 3990 3991 const XMMRegister shuf_mask = xmm8; 3992 3993 __ enter(); 3994 3995 __ sha512_AVX2(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4, 3996 buf, state, ofs, limit, rsp, multi_block, shuf_mask); 3997 3998 __ vzeroupper(); 3999 __ leave(); 4000 __ ret(0); 4001 return start; 4002 } 4003 4004 // This is a version of CTR/AES crypt which does 6 blocks in a loop at a time 4005 // to hide instruction latency 4006 // 4007 // Arguments: 4008 // 4009 // Inputs: 4010 // c_rarg0 - source byte array address 4011 // c_rarg1 - destination byte array address 4012 // c_rarg2 - K (key) in little endian int array 4013 // c_rarg3 - counter vector byte array address 4014 // Linux 4015 // c_rarg4 - input length 4016 // c_rarg5 - saved encryptedCounter start 4017 // rbp + 6 * wordSize - saved used length 4018 // Windows 4019 // rbp + 6 * wordSize - input length 4020 // rbp + 7 * wordSize - saved encryptedCounter start 4021 // rbp + 8 * wordSize - saved used length 4022 // 4023 // Output: 4024 // rax - input length 4025 // 4026 address generate_counterMode_AESCrypt_Parallel() { 4027 assert(UseAES, "need AES instructions and misaligned SSE support"); 4028 __ align(CodeEntryAlignment); 4029 StubCodeMark mark(this, "StubRoutines", "counterMode_AESCrypt"); 4030 address start = __ pc(); 4031 const Register from = c_rarg0; // source array address 4032 const Register to = c_rarg1; // destination array address 4033 const Register key = c_rarg2; // key array address 4034 const Register counter = c_rarg3; // counter byte array initialized from counter array address 4035 // and updated with the incremented counter in the end 4036 #ifndef _WIN64 4037 const Register len_reg = c_rarg4; 4038 const Register saved_encCounter_start = c_rarg5; 4039 const Register used_addr = r10; 4040 const Address used_mem(rbp, 2 * wordSize); 4041 const Register used = r11; 4042 #else 4043 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 4044 const Address saved_encCounter_mem(rbp, 7 * wordSize); // length is on stack on Win64 4045 const Address used_mem(rbp, 8 * wordSize); // length is on stack on Win64 4046 const Register len_reg = r10; // pick the first volatile windows register 4047 const Register saved_encCounter_start = r11; 4048 const Register used_addr = r13; 4049 const Register used = r14; 4050 #endif 4051 const Register pos = rax; 4052 4053 const int PARALLEL_FACTOR = 6; 4054 const XMMRegister xmm_counter_shuf_mask = xmm0; 4055 const XMMRegister xmm_key_shuf_mask = xmm1; // used temporarily to swap key bytes up front 4056 const XMMRegister xmm_curr_counter = xmm2; 4057 4058 const XMMRegister xmm_key_tmp0 = xmm3; 4059 const XMMRegister xmm_key_tmp1 = xmm4; 4060 4061 // registers holding the four results in the parallelized loop 4062 const XMMRegister xmm_result0 = xmm5; 4063 const XMMRegister xmm_result1 = xmm6; 4064 const XMMRegister xmm_result2 = xmm7; 4065 const XMMRegister xmm_result3 = xmm8; 4066 const XMMRegister xmm_result4 = xmm9; 4067 const XMMRegister xmm_result5 = xmm10; 4068 4069 const XMMRegister xmm_from0 = xmm11; 4070 const XMMRegister xmm_from1 = xmm12; 4071 const XMMRegister xmm_from2 = xmm13; 4072 const XMMRegister xmm_from3 = xmm14; //the last one is xmm14. we have to preserve it on WIN64. 4073 const XMMRegister xmm_from4 = xmm3; //reuse xmm3~4. Because xmm_key_tmp0~1 are useless when loading input text 4074 const XMMRegister xmm_from5 = xmm4; 4075 4076 //for key_128, key_192, key_256 4077 const int rounds[3] = {10, 12, 14}; 4078 Label L_exit_preLoop, L_preLoop_start; 4079 Label L_multiBlock_loopTop[3]; 4080 Label L_singleBlockLoopTop[3]; 4081 Label L__incCounter[3][6]; //for 6 blocks 4082 Label L__incCounter_single[3]; //for single block, key128, key192, key256 4083 Label L_processTail_insr[3], L_processTail_4_insr[3], L_processTail_2_insr[3], L_processTail_1_insr[3], L_processTail_exit_insr[3]; 4084 Label L_processTail_extr[3], L_processTail_4_extr[3], L_processTail_2_extr[3], L_processTail_1_extr[3], L_processTail_exit_extr[3]; 4085 4086 Label L_exit; 4087 4088 __ enter(); // required for proper stackwalking of RuntimeStub frame 4089 4090 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 4091 // context for the registers used, where all instructions below are using 128-bit mode 4092 // On EVEX without VL and BW, these instructions will all be AVX. 4093 if (VM_Version::supports_avx512vlbw()) { 4094 __ movl(rax, 0xffff); 4095 __ kmovql(k1, rax); 4096 } 4097 4098 #ifdef _WIN64 4099 // allocate spill slots for r13, r14 4100 enum { 4101 saved_r13_offset, 4102 saved_r14_offset 4103 }; 4104 __ subptr(rsp, 2 * wordSize); 4105 __ movptr(Address(rsp, saved_r13_offset * wordSize), r13); 4106 __ movptr(Address(rsp, saved_r14_offset * wordSize), r14); 4107 4108 // on win64, fill len_reg from stack position 4109 __ movl(len_reg, len_mem); 4110 __ movptr(saved_encCounter_start, saved_encCounter_mem); 4111 __ movptr(used_addr, used_mem); 4112 __ movl(used, Address(used_addr, 0)); 4113 #else 4114 __ push(len_reg); // Save 4115 __ movptr(used_addr, used_mem); 4116 __ movl(used, Address(used_addr, 0)); 4117 #endif 4118 4119 __ push(rbx); // Save RBX 4120 __ movdqu(xmm_curr_counter, Address(counter, 0x00)); // initialize counter with initial counter 4121 __ movdqu(xmm_counter_shuf_mask, ExternalAddress(StubRoutines::x86::counter_shuffle_mask_addr()), pos); // pos as scratch 4122 __ pshufb(xmm_curr_counter, xmm_counter_shuf_mask); //counter is shuffled 4123 __ movptr(pos, 0); 4124 4125 // Use the partially used encrpyted counter from last invocation 4126 __ BIND(L_preLoop_start); 4127 __ cmpptr(used, 16); 4128 __ jcc(Assembler::aboveEqual, L_exit_preLoop); 4129 __ cmpptr(len_reg, 0); 4130 __ jcc(Assembler::lessEqual, L_exit_preLoop); 4131 __ movb(rbx, Address(saved_encCounter_start, used)); 4132 __ xorb(rbx, Address(from, pos)); 4133 __ movb(Address(to, pos), rbx); 4134 __ addptr(pos, 1); 4135 __ addptr(used, 1); 4136 __ subptr(len_reg, 1); 4137 4138 __ jmp(L_preLoop_start); 4139 4140 __ BIND(L_exit_preLoop); 4141 __ movl(Address(used_addr, 0), used); 4142 4143 // key length could be only {11, 13, 15} * 4 = {44, 52, 60} 4144 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()), rbx); // rbx as scratch 4145 __ movl(rbx, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 4146 __ cmpl(rbx, 52); 4147 __ jcc(Assembler::equal, L_multiBlock_loopTop[1]); 4148 __ cmpl(rbx, 60); 4149 __ jcc(Assembler::equal, L_multiBlock_loopTop[2]); 4150 4151 #define CTR_DoSix(opc, src_reg) \ 4152 __ opc(xmm_result0, src_reg); \ 4153 __ opc(xmm_result1, src_reg); \ 4154 __ opc(xmm_result2, src_reg); \ 4155 __ opc(xmm_result3, src_reg); \ 4156 __ opc(xmm_result4, src_reg); \ 4157 __ opc(xmm_result5, src_reg); 4158 4159 // k == 0 : generate code for key_128 4160 // k == 1 : generate code for key_192 4161 // k == 2 : generate code for key_256 4162 for (int k = 0; k < 3; ++k) { 4163 //multi blocks starts here 4164 __ align(OptoLoopAlignment); 4165 __ BIND(L_multiBlock_loopTop[k]); 4166 __ cmpptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // see if at least PARALLEL_FACTOR blocks left 4167 __ jcc(Assembler::less, L_singleBlockLoopTop[k]); 4168 load_key(xmm_key_tmp0, key, 0x00, xmm_key_shuf_mask); 4169 4170 //load, then increase counters 4171 CTR_DoSix(movdqa, xmm_curr_counter); 4172 inc_counter(rbx, xmm_result1, 0x01, L__incCounter[k][0]); 4173 inc_counter(rbx, xmm_result2, 0x02, L__incCounter[k][1]); 4174 inc_counter(rbx, xmm_result3, 0x03, L__incCounter[k][2]); 4175 inc_counter(rbx, xmm_result4, 0x04, L__incCounter[k][3]); 4176 inc_counter(rbx, xmm_result5, 0x05, L__incCounter[k][4]); 4177 inc_counter(rbx, xmm_curr_counter, 0x06, L__incCounter[k][5]); 4178 CTR_DoSix(pshufb, xmm_counter_shuf_mask); // after increased, shuffled counters back for PXOR 4179 CTR_DoSix(pxor, xmm_key_tmp0); //PXOR with Round 0 key 4180 4181 //load two ROUND_KEYs at a time 4182 for (int i = 1; i < rounds[k]; ) { 4183 load_key(xmm_key_tmp1, key, (0x10 * i), xmm_key_shuf_mask); 4184 load_key(xmm_key_tmp0, key, (0x10 * (i+1)), xmm_key_shuf_mask); 4185 CTR_DoSix(aesenc, xmm_key_tmp1); 4186 i++; 4187 if (i != rounds[k]) { 4188 CTR_DoSix(aesenc, xmm_key_tmp0); 4189 } else { 4190 CTR_DoSix(aesenclast, xmm_key_tmp0); 4191 } 4192 i++; 4193 } 4194 4195 // get next PARALLEL_FACTOR blocks into xmm_result registers 4196 __ movdqu(xmm_from0, Address(from, pos, Address::times_1, 0 * AESBlockSize)); 4197 __ movdqu(xmm_from1, Address(from, pos, Address::times_1, 1 * AESBlockSize)); 4198 __ movdqu(xmm_from2, Address(from, pos, Address::times_1, 2 * AESBlockSize)); 4199 __ movdqu(xmm_from3, Address(from, pos, Address::times_1, 3 * AESBlockSize)); 4200 __ movdqu(xmm_from4, Address(from, pos, Address::times_1, 4 * AESBlockSize)); 4201 __ movdqu(xmm_from5, Address(from, pos, Address::times_1, 5 * AESBlockSize)); 4202 4203 __ pxor(xmm_result0, xmm_from0); 4204 __ pxor(xmm_result1, xmm_from1); 4205 __ pxor(xmm_result2, xmm_from2); 4206 __ pxor(xmm_result3, xmm_from3); 4207 __ pxor(xmm_result4, xmm_from4); 4208 __ pxor(xmm_result5, xmm_from5); 4209 4210 // store 6 results into the next 64 bytes of output 4211 __ movdqu(Address(to, pos, Address::times_1, 0 * AESBlockSize), xmm_result0); 4212 __ movdqu(Address(to, pos, Address::times_1, 1 * AESBlockSize), xmm_result1); 4213 __ movdqu(Address(to, pos, Address::times_1, 2 * AESBlockSize), xmm_result2); 4214 __ movdqu(Address(to, pos, Address::times_1, 3 * AESBlockSize), xmm_result3); 4215 __ movdqu(Address(to, pos, Address::times_1, 4 * AESBlockSize), xmm_result4); 4216 __ movdqu(Address(to, pos, Address::times_1, 5 * AESBlockSize), xmm_result5); 4217 4218 __ addptr(pos, PARALLEL_FACTOR * AESBlockSize); // increase the length of crypt text 4219 __ subptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // decrease the remaining length 4220 __ jmp(L_multiBlock_loopTop[k]); 4221 4222 // singleBlock starts here 4223 __ align(OptoLoopAlignment); 4224 __ BIND(L_singleBlockLoopTop[k]); 4225 __ cmpptr(len_reg, 0); 4226 __ jcc(Assembler::lessEqual, L_exit); 4227 load_key(xmm_key_tmp0, key, 0x00, xmm_key_shuf_mask); 4228 __ movdqa(xmm_result0, xmm_curr_counter); 4229 inc_counter(rbx, xmm_curr_counter, 0x01, L__incCounter_single[k]); 4230 __ pshufb(xmm_result0, xmm_counter_shuf_mask); 4231 __ pxor(xmm_result0, xmm_key_tmp0); 4232 for (int i = 1; i < rounds[k]; i++) { 4233 load_key(xmm_key_tmp0, key, (0x10 * i), xmm_key_shuf_mask); 4234 __ aesenc(xmm_result0, xmm_key_tmp0); 4235 } 4236 load_key(xmm_key_tmp0, key, (rounds[k] * 0x10), xmm_key_shuf_mask); 4237 __ aesenclast(xmm_result0, xmm_key_tmp0); 4238 __ cmpptr(len_reg, AESBlockSize); 4239 __ jcc(Assembler::less, L_processTail_insr[k]); 4240 __ movdqu(xmm_from0, Address(from, pos, Address::times_1, 0 * AESBlockSize)); 4241 __ pxor(xmm_result0, xmm_from0); 4242 __ movdqu(Address(to, pos, Address::times_1, 0 * AESBlockSize), xmm_result0); 4243 __ addptr(pos, AESBlockSize); 4244 __ subptr(len_reg, AESBlockSize); 4245 __ jmp(L_singleBlockLoopTop[k]); 4246 __ BIND(L_processTail_insr[k]); // Process the tail part of the input array 4247 __ addptr(pos, len_reg); // 1. Insert bytes from src array into xmm_from0 register 4248 __ testptr(len_reg, 8); 4249 __ jcc(Assembler::zero, L_processTail_4_insr[k]); 4250 __ subptr(pos,8); 4251 __ pinsrq(xmm_from0, Address(from, pos), 0); 4252 __ BIND(L_processTail_4_insr[k]); 4253 __ testptr(len_reg, 4); 4254 __ jcc(Assembler::zero, L_processTail_2_insr[k]); 4255 __ subptr(pos,4); 4256 __ pslldq(xmm_from0, 4); 4257 __ pinsrd(xmm_from0, Address(from, pos), 0); 4258 __ BIND(L_processTail_2_insr[k]); 4259 __ testptr(len_reg, 2); 4260 __ jcc(Assembler::zero, L_processTail_1_insr[k]); 4261 __ subptr(pos, 2); 4262 __ pslldq(xmm_from0, 2); 4263 __ pinsrw(xmm_from0, Address(from, pos), 0); 4264 __ BIND(L_processTail_1_insr[k]); 4265 __ testptr(len_reg, 1); 4266 __ jcc(Assembler::zero, L_processTail_exit_insr[k]); 4267 __ subptr(pos, 1); 4268 __ pslldq(xmm_from0, 1); 4269 __ pinsrb(xmm_from0, Address(from, pos), 0); 4270 __ BIND(L_processTail_exit_insr[k]); 4271 4272 __ movdqu(Address(saved_encCounter_start, 0), xmm_result0); // 2. Perform pxor of the encrypted counter and plaintext Bytes. 4273 __ pxor(xmm_result0, xmm_from0); // Also the encrypted counter is saved for next invocation. 4274 4275 __ testptr(len_reg, 8); 4276 __ jcc(Assembler::zero, L_processTail_4_extr[k]); // 3. Extract bytes from xmm_result0 into the dest. array 4277 __ pextrq(Address(to, pos), xmm_result0, 0); 4278 __ psrldq(xmm_result0, 8); 4279 __ addptr(pos, 8); 4280 __ BIND(L_processTail_4_extr[k]); 4281 __ testptr(len_reg, 4); 4282 __ jcc(Assembler::zero, L_processTail_2_extr[k]); 4283 __ pextrd(Address(to, pos), xmm_result0, 0); 4284 __ psrldq(xmm_result0, 4); 4285 __ addptr(pos, 4); 4286 __ BIND(L_processTail_2_extr[k]); 4287 __ testptr(len_reg, 2); 4288 __ jcc(Assembler::zero, L_processTail_1_extr[k]); 4289 __ pextrw(Address(to, pos), xmm_result0, 0); 4290 __ psrldq(xmm_result0, 2); 4291 __ addptr(pos, 2); 4292 __ BIND(L_processTail_1_extr[k]); 4293 __ testptr(len_reg, 1); 4294 __ jcc(Assembler::zero, L_processTail_exit_extr[k]); 4295 __ pextrb(Address(to, pos), xmm_result0, 0); 4296 4297 __ BIND(L_processTail_exit_extr[k]); 4298 __ movl(Address(used_addr, 0), len_reg); 4299 __ jmp(L_exit); 4300 4301 } 4302 4303 __ BIND(L_exit); 4304 __ pshufb(xmm_curr_counter, xmm_counter_shuf_mask); //counter is shuffled back. 4305 __ movdqu(Address(counter, 0), xmm_curr_counter); //save counter back 4306 __ pop(rbx); // pop the saved RBX. 4307 #ifdef _WIN64 4308 __ movl(rax, len_mem); 4309 __ movptr(r13, Address(rsp, saved_r13_offset * wordSize)); 4310 __ movptr(r14, Address(rsp, saved_r14_offset * wordSize)); 4311 __ addptr(rsp, 2 * wordSize); 4312 #else 4313 __ pop(rax); // return 'len' 4314 #endif 4315 __ leave(); // required for proper stackwalking of RuntimeStub frame 4316 __ ret(0); 4317 return start; 4318 } 4319 4320 // byte swap x86 long 4321 address generate_ghash_long_swap_mask() { 4322 __ align(CodeEntryAlignment); 4323 StubCodeMark mark(this, "StubRoutines", "ghash_long_swap_mask"); 4324 address start = __ pc(); 4325 __ emit_data64(0x0f0e0d0c0b0a0908, relocInfo::none ); 4326 __ emit_data64(0x0706050403020100, relocInfo::none ); 4327 return start; 4328 } 4329 4330 // byte swap x86 byte array 4331 address generate_ghash_byte_swap_mask() { 4332 __ align(CodeEntryAlignment); 4333 StubCodeMark mark(this, "StubRoutines", "ghash_byte_swap_mask"); 4334 address start = __ pc(); 4335 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none ); 4336 __ emit_data64(0x0001020304050607, relocInfo::none ); 4337 return start; 4338 } 4339 4340 /* Single and multi-block ghash operations */ 4341 address generate_ghash_processBlocks() { 4342 __ align(CodeEntryAlignment); 4343 Label L_ghash_loop, L_exit; 4344 StubCodeMark mark(this, "StubRoutines", "ghash_processBlocks"); 4345 address start = __ pc(); 4346 4347 const Register state = c_rarg0; 4348 const Register subkeyH = c_rarg1; 4349 const Register data = c_rarg2; 4350 const Register blocks = c_rarg3; 4351 4352 const XMMRegister xmm_temp0 = xmm0; 4353 const XMMRegister xmm_temp1 = xmm1; 4354 const XMMRegister xmm_temp2 = xmm2; 4355 const XMMRegister xmm_temp3 = xmm3; 4356 const XMMRegister xmm_temp4 = xmm4; 4357 const XMMRegister xmm_temp5 = xmm5; 4358 const XMMRegister xmm_temp6 = xmm6; 4359 const XMMRegister xmm_temp7 = xmm7; 4360 const XMMRegister xmm_temp8 = xmm8; 4361 const XMMRegister xmm_temp9 = xmm9; 4362 const XMMRegister xmm_temp10 = xmm10; 4363 4364 __ enter(); 4365 4366 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 4367 // context for the registers used, where all instructions below are using 128-bit mode 4368 // On EVEX without VL and BW, these instructions will all be AVX. 4369 if (VM_Version::supports_avx512vlbw()) { 4370 __ movl(rax, 0xffff); 4371 __ kmovql(k1, rax); 4372 } 4373 4374 __ movdqu(xmm_temp10, ExternalAddress(StubRoutines::x86::ghash_long_swap_mask_addr())); 4375 4376 __ movdqu(xmm_temp0, Address(state, 0)); 4377 __ pshufb(xmm_temp0, xmm_temp10); 4378 4379 4380 __ BIND(L_ghash_loop); 4381 __ movdqu(xmm_temp2, Address(data, 0)); 4382 __ pshufb(xmm_temp2, ExternalAddress(StubRoutines::x86::ghash_byte_swap_mask_addr())); 4383 4384 __ movdqu(xmm_temp1, Address(subkeyH, 0)); 4385 __ pshufb(xmm_temp1, xmm_temp10); 4386 4387 __ pxor(xmm_temp0, xmm_temp2); 4388 4389 // 4390 // Multiply with the hash key 4391 // 4392 __ movdqu(xmm_temp3, xmm_temp0); 4393 __ pclmulqdq(xmm_temp3, xmm_temp1, 0); // xmm3 holds a0*b0 4394 __ movdqu(xmm_temp4, xmm_temp0); 4395 __ pclmulqdq(xmm_temp4, xmm_temp1, 16); // xmm4 holds a0*b1 4396 4397 __ movdqu(xmm_temp5, xmm_temp0); 4398 __ pclmulqdq(xmm_temp5, xmm_temp1, 1); // xmm5 holds a1*b0 4399 __ movdqu(xmm_temp6, xmm_temp0); 4400 __ pclmulqdq(xmm_temp6, xmm_temp1, 17); // xmm6 holds a1*b1 4401 4402 __ pxor(xmm_temp4, xmm_temp5); // xmm4 holds a0*b1 + a1*b0 4403 4404 __ movdqu(xmm_temp5, xmm_temp4); // move the contents of xmm4 to xmm5 4405 __ psrldq(xmm_temp4, 8); // shift by xmm4 64 bits to the right 4406 __ pslldq(xmm_temp5, 8); // shift by xmm5 64 bits to the left 4407 __ pxor(xmm_temp3, xmm_temp5); 4408 __ pxor(xmm_temp6, xmm_temp4); // Register pair <xmm6:xmm3> holds the result 4409 // of the carry-less multiplication of 4410 // xmm0 by xmm1. 4411 4412 // We shift the result of the multiplication by one bit position 4413 // to the left to cope for the fact that the bits are reversed. 4414 __ movdqu(xmm_temp7, xmm_temp3); 4415 __ movdqu(xmm_temp8, xmm_temp6); 4416 __ pslld(xmm_temp3, 1); 4417 __ pslld(xmm_temp6, 1); 4418 __ psrld(xmm_temp7, 31); 4419 __ psrld(xmm_temp8, 31); 4420 __ movdqu(xmm_temp9, xmm_temp7); 4421 __ pslldq(xmm_temp8, 4); 4422 __ pslldq(xmm_temp7, 4); 4423 __ psrldq(xmm_temp9, 12); 4424 __ por(xmm_temp3, xmm_temp7); 4425 __ por(xmm_temp6, xmm_temp8); 4426 __ por(xmm_temp6, xmm_temp9); 4427 4428 // 4429 // First phase of the reduction 4430 // 4431 // Move xmm3 into xmm7, xmm8, xmm9 in order to perform the shifts 4432 // independently. 4433 __ movdqu(xmm_temp7, xmm_temp3); 4434 __ movdqu(xmm_temp8, xmm_temp3); 4435 __ movdqu(xmm_temp9, xmm_temp3); 4436 __ pslld(xmm_temp7, 31); // packed right shift shifting << 31 4437 __ pslld(xmm_temp8, 30); // packed right shift shifting << 30 4438 __ pslld(xmm_temp9, 25); // packed right shift shifting << 25 4439 __ pxor(xmm_temp7, xmm_temp8); // xor the shifted versions 4440 __ pxor(xmm_temp7, xmm_temp9); 4441 __ movdqu(xmm_temp8, xmm_temp7); 4442 __ pslldq(xmm_temp7, 12); 4443 __ psrldq(xmm_temp8, 4); 4444 __ pxor(xmm_temp3, xmm_temp7); // first phase of the reduction complete 4445 4446 // 4447 // Second phase of the reduction 4448 // 4449 // Make 3 copies of xmm3 in xmm2, xmm4, xmm5 for doing these 4450 // shift operations. 4451 __ movdqu(xmm_temp2, xmm_temp3); 4452 __ movdqu(xmm_temp4, xmm_temp3); 4453 __ movdqu(xmm_temp5, xmm_temp3); 4454 __ psrld(xmm_temp2, 1); // packed left shifting >> 1 4455 __ psrld(xmm_temp4, 2); // packed left shifting >> 2 4456 __ psrld(xmm_temp5, 7); // packed left shifting >> 7 4457 __ pxor(xmm_temp2, xmm_temp4); // xor the shifted versions 4458 __ pxor(xmm_temp2, xmm_temp5); 4459 __ pxor(xmm_temp2, xmm_temp8); 4460 __ pxor(xmm_temp3, xmm_temp2); 4461 __ pxor(xmm_temp6, xmm_temp3); // the result is in xmm6 4462 4463 __ decrement(blocks); 4464 __ jcc(Assembler::zero, L_exit); 4465 __ movdqu(xmm_temp0, xmm_temp6); 4466 __ addptr(data, 16); 4467 __ jmp(L_ghash_loop); 4468 4469 __ BIND(L_exit); 4470 __ pshufb(xmm_temp6, xmm_temp10); // Byte swap 16-byte result 4471 __ movdqu(Address(state, 0), xmm_temp6); // store the result 4472 __ leave(); 4473 __ ret(0); 4474 return start; 4475 } 4476 4477 /** 4478 * Arguments: 4479 * 4480 * Inputs: 4481 * c_rarg0 - int crc 4482 * c_rarg1 - byte* buf 4483 * c_rarg2 - int length 4484 * 4485 * Ouput: 4486 * rax - int crc result 4487 */ 4488 address generate_updateBytesCRC32() { 4489 assert(UseCRC32Intrinsics, "need AVX and CLMUL instructions"); 4490 4491 __ align(CodeEntryAlignment); 4492 StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32"); 4493 4494 address start = __ pc(); 4495 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 4496 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 4497 // rscratch1: r10 4498 const Register crc = c_rarg0; // crc 4499 const Register buf = c_rarg1; // source java byte array address 4500 const Register len = c_rarg2; // length 4501 const Register table = c_rarg3; // crc_table address (reuse register) 4502 const Register tmp = r11; 4503 assert_different_registers(crc, buf, len, table, tmp, rax); 4504 4505 BLOCK_COMMENT("Entry:"); 4506 __ enter(); // required for proper stackwalking of RuntimeStub frame 4507 4508 __ kernel_crc32(crc, buf, len, table, tmp); 4509 4510 __ movl(rax, crc); 4511 __ vzeroupper(); 4512 __ leave(); // required for proper stackwalking of RuntimeStub frame 4513 __ ret(0); 4514 4515 return start; 4516 } 4517 4518 /** 4519 * Arguments: 4520 * 4521 * Inputs: 4522 * c_rarg0 - int crc 4523 * c_rarg1 - byte* buf 4524 * c_rarg2 - long length 4525 * c_rarg3 - table_start - optional (present only when doing a library_call, 4526 * not used by x86 algorithm) 4527 * 4528 * Ouput: 4529 * rax - int crc result 4530 */ 4531 address generate_updateBytesCRC32C(bool is_pclmulqdq_supported) { 4532 assert(UseCRC32CIntrinsics, "need SSE4_2"); 4533 __ align(CodeEntryAlignment); 4534 StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32C"); 4535 address start = __ pc(); 4536 //reg.arg int#0 int#1 int#2 int#3 int#4 int#5 float regs 4537 //Windows RCX RDX R8 R9 none none XMM0..XMM3 4538 //Lin / Sol RDI RSI RDX RCX R8 R9 XMM0..XMM7 4539 const Register crc = c_rarg0; // crc 4540 const Register buf = c_rarg1; // source java byte array address 4541 const Register len = c_rarg2; // length 4542 const Register a = rax; 4543 const Register j = r9; 4544 const Register k = r10; 4545 const Register l = r11; 4546 #ifdef _WIN64 4547 const Register y = rdi; 4548 const Register z = rsi; 4549 #else 4550 const Register y = rcx; 4551 const Register z = r8; 4552 #endif 4553 assert_different_registers(crc, buf, len, a, j, k, l, y, z); 4554 4555 BLOCK_COMMENT("Entry:"); 4556 __ enter(); // required for proper stackwalking of RuntimeStub frame 4557 #ifdef _WIN64 4558 __ push(y); 4559 __ push(z); 4560 #endif 4561 __ crc32c_ipl_alg2_alt2(crc, buf, len, 4562 a, j, k, 4563 l, y, z, 4564 c_farg0, c_farg1, c_farg2, 4565 is_pclmulqdq_supported); 4566 __ movl(rax, crc); 4567 #ifdef _WIN64 4568 __ pop(z); 4569 __ pop(y); 4570 #endif 4571 __ vzeroupper(); 4572 __ leave(); // required for proper stackwalking of RuntimeStub frame 4573 __ ret(0); 4574 4575 return start; 4576 } 4577 4578 /** 4579 * Arguments: 4580 * 4581 * Input: 4582 * c_rarg0 - x address 4583 * c_rarg1 - x length 4584 * c_rarg2 - y address 4585 * c_rarg3 - y length 4586 * not Win64 4587 * c_rarg4 - z address 4588 * c_rarg5 - z length 4589 * Win64 4590 * rsp+40 - z address 4591 * rsp+48 - z length 4592 */ 4593 address generate_multiplyToLen() { 4594 __ align(CodeEntryAlignment); 4595 StubCodeMark mark(this, "StubRoutines", "multiplyToLen"); 4596 4597 address start = __ pc(); 4598 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 4599 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 4600 const Register x = rdi; 4601 const Register xlen = rax; 4602 const Register y = rsi; 4603 const Register ylen = rcx; 4604 const Register z = r8; 4605 const Register zlen = r11; 4606 4607 // Next registers will be saved on stack in multiply_to_len(). 4608 const Register tmp1 = r12; 4609 const Register tmp2 = r13; 4610 const Register tmp3 = r14; 4611 const Register tmp4 = r15; 4612 const Register tmp5 = rbx; 4613 4614 BLOCK_COMMENT("Entry:"); 4615 __ enter(); // required for proper stackwalking of RuntimeStub frame 4616 4617 #ifndef _WIN64 4618 __ movptr(zlen, r9); // Save r9 in r11 - zlen 4619 #endif 4620 setup_arg_regs(4); // x => rdi, xlen => rsi, y => rdx 4621 // ylen => rcx, z => r8, zlen => r11 4622 // r9 and r10 may be used to save non-volatile registers 4623 #ifdef _WIN64 4624 // last 2 arguments (#4, #5) are on stack on Win64 4625 __ movptr(z, Address(rsp, 6 * wordSize)); 4626 __ movptr(zlen, Address(rsp, 7 * wordSize)); 4627 #endif 4628 4629 __ movptr(xlen, rsi); 4630 __ movptr(y, rdx); 4631 __ multiply_to_len(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5); 4632 4633 restore_arg_regs(); 4634 4635 __ leave(); // required for proper stackwalking of RuntimeStub frame 4636 __ ret(0); 4637 4638 return start; 4639 } 4640 4641 /** 4642 * Arguments: 4643 * 4644 * Input: 4645 * c_rarg0 - obja address 4646 * c_rarg1 - objb address 4647 * c_rarg3 - length length 4648 * c_rarg4 - scale log2_array_indxscale 4649 * 4650 * Output: 4651 * rax - int >= mismatched index, < 0 bitwise complement of tail 4652 */ 4653 address generate_vectorizedMismatch() { 4654 __ align(CodeEntryAlignment); 4655 StubCodeMark mark(this, "StubRoutines", "vectorizedMismatch"); 4656 address start = __ pc(); 4657 4658 BLOCK_COMMENT("Entry:"); 4659 __ enter(); 4660 4661 #ifdef _WIN64 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 4662 const Register scale = c_rarg0; //rcx, will exchange with r9 4663 const Register objb = c_rarg1; //rdx 4664 const Register length = c_rarg2; //r8 4665 const Register obja = c_rarg3; //r9 4666 __ xchgq(obja, scale); //now obja and scale contains the correct contents 4667 4668 const Register tmp1 = r10; 4669 const Register tmp2 = r11; 4670 #endif 4671 #ifndef _WIN64 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 4672 const Register obja = c_rarg0; //U:rdi 4673 const Register objb = c_rarg1; //U:rsi 4674 const Register length = c_rarg2; //U:rdx 4675 const Register scale = c_rarg3; //U:rcx 4676 const Register tmp1 = r8; 4677 const Register tmp2 = r9; 4678 #endif 4679 const Register result = rax; //return value 4680 const XMMRegister vec0 = xmm0; 4681 const XMMRegister vec1 = xmm1; 4682 const XMMRegister vec2 = xmm2; 4683 4684 __ vectorized_mismatch(obja, objb, length, scale, result, tmp1, tmp2, vec0, vec1, vec2); 4685 4686 __ vzeroupper(); 4687 __ leave(); 4688 __ ret(0); 4689 4690 return start; 4691 } 4692 4693 /** 4694 * Arguments: 4695 * 4696 // Input: 4697 // c_rarg0 - x address 4698 // c_rarg1 - x length 4699 // c_rarg2 - z address 4700 // c_rarg3 - z lenth 4701 * 4702 */ 4703 address generate_squareToLen() { 4704 4705 __ align(CodeEntryAlignment); 4706 StubCodeMark mark(this, "StubRoutines", "squareToLen"); 4707 4708 address start = __ pc(); 4709 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 4710 // Unix: rdi, rsi, rdx, rcx (c_rarg0, c_rarg1, ...) 4711 const Register x = rdi; 4712 const Register len = rsi; 4713 const Register z = r8; 4714 const Register zlen = rcx; 4715 4716 const Register tmp1 = r12; 4717 const Register tmp2 = r13; 4718 const Register tmp3 = r14; 4719 const Register tmp4 = r15; 4720 const Register tmp5 = rbx; 4721 4722 BLOCK_COMMENT("Entry:"); 4723 __ enter(); // required for proper stackwalking of RuntimeStub frame 4724 4725 setup_arg_regs(4); // x => rdi, len => rsi, z => rdx 4726 // zlen => rcx 4727 // r9 and r10 may be used to save non-volatile registers 4728 __ movptr(r8, rdx); 4729 __ square_to_len(x, len, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5, rdx, rax); 4730 4731 restore_arg_regs(); 4732 4733 __ leave(); // required for proper stackwalking of RuntimeStub frame 4734 __ ret(0); 4735 4736 return start; 4737 } 4738 4739 /** 4740 * Arguments: 4741 * 4742 * Input: 4743 * c_rarg0 - out address 4744 * c_rarg1 - in address 4745 * c_rarg2 - offset 4746 * c_rarg3 - len 4747 * not Win64 4748 * c_rarg4 - k 4749 * Win64 4750 * rsp+40 - k 4751 */ 4752 address generate_mulAdd() { 4753 __ align(CodeEntryAlignment); 4754 StubCodeMark mark(this, "StubRoutines", "mulAdd"); 4755 4756 address start = __ pc(); 4757 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 4758 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 4759 const Register out = rdi; 4760 const Register in = rsi; 4761 const Register offset = r11; 4762 const Register len = rcx; 4763 const Register k = r8; 4764 4765 // Next registers will be saved on stack in mul_add(). 4766 const Register tmp1 = r12; 4767 const Register tmp2 = r13; 4768 const Register tmp3 = r14; 4769 const Register tmp4 = r15; 4770 const Register tmp5 = rbx; 4771 4772 BLOCK_COMMENT("Entry:"); 4773 __ enter(); // required for proper stackwalking of RuntimeStub frame 4774 4775 setup_arg_regs(4); // out => rdi, in => rsi, offset => rdx 4776 // len => rcx, k => r8 4777 // r9 and r10 may be used to save non-volatile registers 4778 #ifdef _WIN64 4779 // last argument is on stack on Win64 4780 __ movl(k, Address(rsp, 6 * wordSize)); 4781 #endif 4782 __ movptr(r11, rdx); // move offset in rdx to offset(r11) 4783 __ mul_add(out, in, offset, len, k, tmp1, tmp2, tmp3, tmp4, tmp5, rdx, rax); 4784 4785 restore_arg_regs(); 4786 4787 __ leave(); // required for proper stackwalking of RuntimeStub frame 4788 __ ret(0); 4789 4790 return start; 4791 } 4792 4793 address generate_libmExp() { 4794 StubCodeMark mark(this, "StubRoutines", "libmExp"); 4795 4796 address start = __ pc(); 4797 4798 const XMMRegister x0 = xmm0; 4799 const XMMRegister x1 = xmm1; 4800 const XMMRegister x2 = xmm2; 4801 const XMMRegister x3 = xmm3; 4802 4803 const XMMRegister x4 = xmm4; 4804 const XMMRegister x5 = xmm5; 4805 const XMMRegister x6 = xmm6; 4806 const XMMRegister x7 = xmm7; 4807 4808 const Register tmp = r11; 4809 4810 BLOCK_COMMENT("Entry:"); 4811 __ enter(); // required for proper stackwalking of RuntimeStub frame 4812 4813 __ fast_exp(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp); 4814 4815 __ leave(); // required for proper stackwalking of RuntimeStub frame 4816 __ ret(0); 4817 4818 return start; 4819 4820 } 4821 4822 address generate_libmLog() { 4823 StubCodeMark mark(this, "StubRoutines", "libmLog"); 4824 4825 address start = __ pc(); 4826 4827 const XMMRegister x0 = xmm0; 4828 const XMMRegister x1 = xmm1; 4829 const XMMRegister x2 = xmm2; 4830 const XMMRegister x3 = xmm3; 4831 4832 const XMMRegister x4 = xmm4; 4833 const XMMRegister x5 = xmm5; 4834 const XMMRegister x6 = xmm6; 4835 const XMMRegister x7 = xmm7; 4836 4837 const Register tmp1 = r11; 4838 const Register tmp2 = r8; 4839 4840 BLOCK_COMMENT("Entry:"); 4841 __ enter(); // required for proper stackwalking of RuntimeStub frame 4842 4843 __ fast_log(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2); 4844 4845 __ leave(); // required for proper stackwalking of RuntimeStub frame 4846 __ ret(0); 4847 4848 return start; 4849 4850 } 4851 4852 address generate_libmLog10() { 4853 StubCodeMark mark(this, "StubRoutines", "libmLog10"); 4854 4855 address start = __ pc(); 4856 4857 const XMMRegister x0 = xmm0; 4858 const XMMRegister x1 = xmm1; 4859 const XMMRegister x2 = xmm2; 4860 const XMMRegister x3 = xmm3; 4861 4862 const XMMRegister x4 = xmm4; 4863 const XMMRegister x5 = xmm5; 4864 const XMMRegister x6 = xmm6; 4865 const XMMRegister x7 = xmm7; 4866 4867 const Register tmp = r11; 4868 4869 BLOCK_COMMENT("Entry:"); 4870 __ enter(); // required for proper stackwalking of RuntimeStub frame 4871 4872 __ fast_log10(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp); 4873 4874 __ leave(); // required for proper stackwalking of RuntimeStub frame 4875 __ ret(0); 4876 4877 return start; 4878 4879 } 4880 4881 address generate_libmPow() { 4882 StubCodeMark mark(this, "StubRoutines", "libmPow"); 4883 4884 address start = __ pc(); 4885 4886 const XMMRegister x0 = xmm0; 4887 const XMMRegister x1 = xmm1; 4888 const XMMRegister x2 = xmm2; 4889 const XMMRegister x3 = xmm3; 4890 4891 const XMMRegister x4 = xmm4; 4892 const XMMRegister x5 = xmm5; 4893 const XMMRegister x6 = xmm6; 4894 const XMMRegister x7 = xmm7; 4895 4896 const Register tmp1 = r8; 4897 const Register tmp2 = r9; 4898 const Register tmp3 = r10; 4899 const Register tmp4 = r11; 4900 4901 BLOCK_COMMENT("Entry:"); 4902 __ enter(); // required for proper stackwalking of RuntimeStub frame 4903 4904 __ fast_pow(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 4905 4906 __ leave(); // required for proper stackwalking of RuntimeStub frame 4907 __ ret(0); 4908 4909 return start; 4910 4911 } 4912 4913 address generate_libmSin() { 4914 StubCodeMark mark(this, "StubRoutines", "libmSin"); 4915 4916 address start = __ pc(); 4917 4918 const XMMRegister x0 = xmm0; 4919 const XMMRegister x1 = xmm1; 4920 const XMMRegister x2 = xmm2; 4921 const XMMRegister x3 = xmm3; 4922 4923 const XMMRegister x4 = xmm4; 4924 const XMMRegister x5 = xmm5; 4925 const XMMRegister x6 = xmm6; 4926 const XMMRegister x7 = xmm7; 4927 4928 const Register tmp1 = r8; 4929 const Register tmp2 = r9; 4930 const Register tmp3 = r10; 4931 const Register tmp4 = r11; 4932 4933 BLOCK_COMMENT("Entry:"); 4934 __ enter(); // required for proper stackwalking of RuntimeStub frame 4935 4936 #ifdef _WIN64 4937 __ push(rsi); 4938 __ push(rdi); 4939 #endif 4940 __ fast_sin(x0, x1, x2, x3, x4, x5, x6, x7, rax, rbx, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 4941 4942 #ifdef _WIN64 4943 __ pop(rdi); 4944 __ pop(rsi); 4945 #endif 4946 4947 __ leave(); // required for proper stackwalking of RuntimeStub frame 4948 __ ret(0); 4949 4950 return start; 4951 4952 } 4953 4954 address generate_libmCos() { 4955 StubCodeMark mark(this, "StubRoutines", "libmCos"); 4956 4957 address start = __ pc(); 4958 4959 const XMMRegister x0 = xmm0; 4960 const XMMRegister x1 = xmm1; 4961 const XMMRegister x2 = xmm2; 4962 const XMMRegister x3 = xmm3; 4963 4964 const XMMRegister x4 = xmm4; 4965 const XMMRegister x5 = xmm5; 4966 const XMMRegister x6 = xmm6; 4967 const XMMRegister x7 = xmm7; 4968 4969 const Register tmp1 = r8; 4970 const Register tmp2 = r9; 4971 const Register tmp3 = r10; 4972 const Register tmp4 = r11; 4973 4974 BLOCK_COMMENT("Entry:"); 4975 __ enter(); // required for proper stackwalking of RuntimeStub frame 4976 4977 #ifdef _WIN64 4978 __ push(rsi); 4979 __ push(rdi); 4980 #endif 4981 __ fast_cos(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 4982 4983 #ifdef _WIN64 4984 __ pop(rdi); 4985 __ pop(rsi); 4986 #endif 4987 4988 __ leave(); // required for proper stackwalking of RuntimeStub frame 4989 __ ret(0); 4990 4991 return start; 4992 4993 } 4994 4995 address generate_libmTan() { 4996 StubCodeMark mark(this, "StubRoutines", "libmTan"); 4997 4998 address start = __ pc(); 4999 5000 const XMMRegister x0 = xmm0; 5001 const XMMRegister x1 = xmm1; 5002 const XMMRegister x2 = xmm2; 5003 const XMMRegister x3 = xmm3; 5004 5005 const XMMRegister x4 = xmm4; 5006 const XMMRegister x5 = xmm5; 5007 const XMMRegister x6 = xmm6; 5008 const XMMRegister x7 = xmm7; 5009 5010 const Register tmp1 = r8; 5011 const Register tmp2 = r9; 5012 const Register tmp3 = r10; 5013 const Register tmp4 = r11; 5014 5015 BLOCK_COMMENT("Entry:"); 5016 __ enter(); // required for proper stackwalking of RuntimeStub frame 5017 5018 #ifdef _WIN64 5019 __ push(rsi); 5020 __ push(rdi); 5021 #endif 5022 __ fast_tan(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 5023 5024 #ifdef _WIN64 5025 __ pop(rdi); 5026 __ pop(rsi); 5027 #endif 5028 5029 __ leave(); // required for proper stackwalking of RuntimeStub frame 5030 __ ret(0); 5031 5032 return start; 5033 5034 } 5035 5036 #undef __ 5037 #define __ masm-> 5038 5039 // Continuation point for throwing of implicit exceptions that are 5040 // not handled in the current activation. Fabricates an exception 5041 // oop and initiates normal exception dispatching in this 5042 // frame. Since we need to preserve callee-saved values (currently 5043 // only for C2, but done for C1 as well) we need a callee-saved oop 5044 // map and therefore have to make these stubs into RuntimeStubs 5045 // rather than BufferBlobs. If the compiler needs all registers to 5046 // be preserved between the fault point and the exception handler 5047 // then it must assume responsibility for that in 5048 // AbstractCompiler::continuation_for_implicit_null_exception or 5049 // continuation_for_implicit_division_by_zero_exception. All other 5050 // implicit exceptions (e.g., NullPointerException or 5051 // AbstractMethodError on entry) are either at call sites or 5052 // otherwise assume that stack unwinding will be initiated, so 5053 // caller saved registers were assumed volatile in the compiler. 5054 address generate_throw_exception(const char* name, 5055 address runtime_entry, 5056 Register arg1 = noreg, 5057 Register arg2 = noreg) { 5058 // Information about frame layout at time of blocking runtime call. 5059 // Note that we only have to preserve callee-saved registers since 5060 // the compilers are responsible for supplying a continuation point 5061 // if they expect all registers to be preserved. 5062 enum layout { 5063 rbp_off = frame::arg_reg_save_area_bytes/BytesPerInt, 5064 rbp_off2, 5065 return_off, 5066 return_off2, 5067 framesize // inclusive of return address 5068 }; 5069 5070 int insts_size = 512; 5071 int locs_size = 64; 5072 5073 CodeBuffer code(name, insts_size, locs_size); 5074 OopMapSet* oop_maps = new OopMapSet(); 5075 MacroAssembler* masm = new MacroAssembler(&code); 5076 5077 address start = __ pc(); 5078 5079 // This is an inlined and slightly modified version of call_VM 5080 // which has the ability to fetch the return PC out of 5081 // thread-local storage and also sets up last_Java_sp slightly 5082 // differently than the real call_VM 5083 5084 __ enter(); // required for proper stackwalking of RuntimeStub frame 5085 5086 assert(is_even(framesize/2), "sp not 16-byte aligned"); 5087 5088 // return address and rbp are already in place 5089 __ subptr(rsp, (framesize-4) << LogBytesPerInt); // prolog 5090 5091 int frame_complete = __ pc() - start; 5092 5093 // Set up last_Java_sp and last_Java_fp 5094 address the_pc = __ pc(); 5095 __ set_last_Java_frame(rsp, rbp, the_pc); 5096 __ andptr(rsp, -(StackAlignmentInBytes)); // Align stack 5097 5098 // Call runtime 5099 if (arg1 != noreg) { 5100 assert(arg2 != c_rarg1, "clobbered"); 5101 __ movptr(c_rarg1, arg1); 5102 } 5103 if (arg2 != noreg) { 5104 __ movptr(c_rarg2, arg2); 5105 } 5106 __ movptr(c_rarg0, r15_thread); 5107 BLOCK_COMMENT("call runtime_entry"); 5108 __ call(RuntimeAddress(runtime_entry)); 5109 5110 // Generate oop map 5111 OopMap* map = new OopMap(framesize, 0); 5112 5113 oop_maps->add_gc_map(the_pc - start, map); 5114 5115 __ reset_last_Java_frame(true); 5116 5117 __ leave(); // required for proper stackwalking of RuntimeStub frame 5118 5119 // check for pending exceptions 5120 #ifdef ASSERT 5121 Label L; 5122 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), 5123 (int32_t) NULL_WORD); 5124 __ jcc(Assembler::notEqual, L); 5125 __ should_not_reach_here(); 5126 __ bind(L); 5127 #endif // ASSERT 5128 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 5129 5130 5131 // codeBlob framesize is in words (not VMRegImpl::slot_size) 5132 RuntimeStub* stub = 5133 RuntimeStub::new_runtime_stub(name, 5134 &code, 5135 frame_complete, 5136 (framesize >> (LogBytesPerWord - LogBytesPerInt)), 5137 oop_maps, false); 5138 return stub->entry_point(); 5139 } 5140 5141 void create_control_words() { 5142 // Round to nearest, 53-bit mode, exceptions masked 5143 StubRoutines::_fpu_cntrl_wrd_std = 0x027F; 5144 // Round to zero, 53-bit mode, exception mased 5145 StubRoutines::_fpu_cntrl_wrd_trunc = 0x0D7F; 5146 // Round to nearest, 24-bit mode, exceptions masked 5147 StubRoutines::_fpu_cntrl_wrd_24 = 0x007F; 5148 // Round to nearest, 64-bit mode, exceptions masked 5149 StubRoutines::_fpu_cntrl_wrd_64 = 0x037F; 5150 // Round to nearest, 64-bit mode, exceptions masked 5151 StubRoutines::_mxcsr_std = 0x1F80; 5152 // Note: the following two constants are 80-bit values 5153 // layout is critical for correct loading by FPU. 5154 // Bias for strict fp multiply/divide 5155 StubRoutines::_fpu_subnormal_bias1[0]= 0x00000000; // 2^(-15360) == 0x03ff 8000 0000 0000 0000 5156 StubRoutines::_fpu_subnormal_bias1[1]= 0x80000000; 5157 StubRoutines::_fpu_subnormal_bias1[2]= 0x03ff; 5158 // Un-Bias for strict fp multiply/divide 5159 StubRoutines::_fpu_subnormal_bias2[0]= 0x00000000; // 2^(+15360) == 0x7bff 8000 0000 0000 0000 5160 StubRoutines::_fpu_subnormal_bias2[1]= 0x80000000; 5161 StubRoutines::_fpu_subnormal_bias2[2]= 0x7bff; 5162 } 5163 5164 // Initialization 5165 void generate_initial() { 5166 // Generates all stubs and initializes the entry points 5167 5168 // This platform-specific settings are needed by generate_call_stub() 5169 create_control_words(); 5170 5171 // entry points that exist in all platforms Note: This is code 5172 // that could be shared among different platforms - however the 5173 // benefit seems to be smaller than the disadvantage of having a 5174 // much more complicated generator structure. See also comment in 5175 // stubRoutines.hpp. 5176 5177 StubRoutines::_forward_exception_entry = generate_forward_exception(); 5178 5179 StubRoutines::_call_stub_entry = 5180 generate_call_stub(StubRoutines::_call_stub_return_address); 5181 5182 // is referenced by megamorphic call 5183 StubRoutines::_catch_exception_entry = generate_catch_exception(); 5184 5185 // atomic calls 5186 StubRoutines::_atomic_xchg_entry = generate_atomic_xchg(); 5187 StubRoutines::_atomic_xchg_long_entry = generate_atomic_xchg_long(); 5188 StubRoutines::_atomic_cmpxchg_entry = generate_atomic_cmpxchg(); 5189 StubRoutines::_atomic_cmpxchg_byte_entry = generate_atomic_cmpxchg_byte(); 5190 StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long(); 5191 StubRoutines::_atomic_add_entry = generate_atomic_add(); 5192 StubRoutines::_atomic_add_long_entry = generate_atomic_add_long(); 5193 StubRoutines::_fence_entry = generate_orderaccess_fence(); 5194 5195 // platform dependent 5196 StubRoutines::x86::_get_previous_fp_entry = generate_get_previous_fp(); 5197 StubRoutines::x86::_get_previous_sp_entry = generate_get_previous_sp(); 5198 5199 StubRoutines::x86::_verify_mxcsr_entry = generate_verify_mxcsr(); 5200 5201 // Build this early so it's available for the interpreter. 5202 StubRoutines::_throw_StackOverflowError_entry = 5203 generate_throw_exception("StackOverflowError throw_exception", 5204 CAST_FROM_FN_PTR(address, 5205 SharedRuntime:: 5206 throw_StackOverflowError)); 5207 StubRoutines::_throw_delayed_StackOverflowError_entry = 5208 generate_throw_exception("delayed StackOverflowError throw_exception", 5209 CAST_FROM_FN_PTR(address, 5210 SharedRuntime:: 5211 throw_delayed_StackOverflowError)); 5212 if (UseCRC32Intrinsics) { 5213 // set table address before stub generation which use it 5214 StubRoutines::_crc_table_adr = (address)StubRoutines::x86::_crc_table; 5215 StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32(); 5216 } 5217 5218 if (UseCRC32CIntrinsics) { 5219 bool supports_clmul = VM_Version::supports_clmul(); 5220 StubRoutines::x86::generate_CRC32C_table(supports_clmul); 5221 StubRoutines::_crc32c_table_addr = (address)StubRoutines::x86::_crc32c_table; 5222 StubRoutines::_updateBytesCRC32C = generate_updateBytesCRC32C(supports_clmul); 5223 } 5224 if (VM_Version::supports_sse2() && UseLibmIntrinsic && InlineIntrinsics) { 5225 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin) || 5226 vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos) || 5227 vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dtan)) { 5228 StubRoutines::x86::_ONEHALF_adr = (address)StubRoutines::x86::_ONEHALF; 5229 StubRoutines::x86::_P_2_adr = (address)StubRoutines::x86::_P_2; 5230 StubRoutines::x86::_SC_4_adr = (address)StubRoutines::x86::_SC_4; 5231 StubRoutines::x86::_Ctable_adr = (address)StubRoutines::x86::_Ctable; 5232 StubRoutines::x86::_SC_2_adr = (address)StubRoutines::x86::_SC_2; 5233 StubRoutines::x86::_SC_3_adr = (address)StubRoutines::x86::_SC_3; 5234 StubRoutines::x86::_SC_1_adr = (address)StubRoutines::x86::_SC_1; 5235 StubRoutines::x86::_PI_INV_TABLE_adr = (address)StubRoutines::x86::_PI_INV_TABLE; 5236 StubRoutines::x86::_PI_4_adr = (address)StubRoutines::x86::_PI_4; 5237 StubRoutines::x86::_PI32INV_adr = (address)StubRoutines::x86::_PI32INV; 5238 StubRoutines::x86::_SIGN_MASK_adr = (address)StubRoutines::x86::_SIGN_MASK; 5239 StubRoutines::x86::_P_1_adr = (address)StubRoutines::x86::_P_1; 5240 StubRoutines::x86::_P_3_adr = (address)StubRoutines::x86::_P_3; 5241 StubRoutines::x86::_NEG_ZERO_adr = (address)StubRoutines::x86::_NEG_ZERO; 5242 } 5243 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dexp)) { 5244 StubRoutines::_dexp = generate_libmExp(); 5245 } 5246 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dlog)) { 5247 StubRoutines::_dlog = generate_libmLog(); 5248 } 5249 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dlog10)) { 5250 StubRoutines::_dlog10 = generate_libmLog10(); 5251 } 5252 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dpow)) { 5253 StubRoutines::_dpow = generate_libmPow(); 5254 } 5255 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin)) { 5256 StubRoutines::_dsin = generate_libmSin(); 5257 } 5258 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos)) { 5259 StubRoutines::_dcos = generate_libmCos(); 5260 } 5261 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dtan)) { 5262 StubRoutines::_dtan = generate_libmTan(); 5263 } 5264 } 5265 } 5266 5267 void generate_all() { 5268 // Generates all stubs and initializes the entry points 5269 5270 // These entry points require SharedInfo::stack0 to be set up in 5271 // non-core builds and need to be relocatable, so they each 5272 // fabricate a RuntimeStub internally. 5273 StubRoutines::_throw_AbstractMethodError_entry = 5274 generate_throw_exception("AbstractMethodError throw_exception", 5275 CAST_FROM_FN_PTR(address, 5276 SharedRuntime:: 5277 throw_AbstractMethodError)); 5278 5279 StubRoutines::_throw_IncompatibleClassChangeError_entry = 5280 generate_throw_exception("IncompatibleClassChangeError throw_exception", 5281 CAST_FROM_FN_PTR(address, 5282 SharedRuntime:: 5283 throw_IncompatibleClassChangeError)); 5284 5285 StubRoutines::_throw_NullPointerException_at_call_entry = 5286 generate_throw_exception("NullPointerException at call throw_exception", 5287 CAST_FROM_FN_PTR(address, 5288 SharedRuntime:: 5289 throw_NullPointerException_at_call)); 5290 5291 // entry points that are platform specific 5292 StubRoutines::x86::_f2i_fixup = generate_f2i_fixup(); 5293 StubRoutines::x86::_f2l_fixup = generate_f2l_fixup(); 5294 StubRoutines::x86::_d2i_fixup = generate_d2i_fixup(); 5295 StubRoutines::x86::_d2l_fixup = generate_d2l_fixup(); 5296 5297 StubRoutines::x86::_float_sign_mask = generate_fp_mask("float_sign_mask", 0x7FFFFFFF7FFFFFFF); 5298 StubRoutines::x86::_float_sign_flip = generate_fp_mask("float_sign_flip", 0x8000000080000000); 5299 StubRoutines::x86::_double_sign_mask = generate_fp_mask("double_sign_mask", 0x7FFFFFFFFFFFFFFF); 5300 StubRoutines::x86::_double_sign_flip = generate_fp_mask("double_sign_flip", 0x8000000000000000); 5301 5302 // support for verify_oop (must happen after universe_init) 5303 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop(); 5304 5305 // arraycopy stubs used by compilers 5306 generate_arraycopy_stubs(); 5307 5308 // Load barrier stubs 5309 if (UseLoadBarrier) { 5310 address loadbarrier_address = CAST_FROM_FN_PTR(address, SharedRuntime::z_load_barrier_on_oop_field_preloaded); 5311 address loadbarrier_weak_address = CAST_FROM_FN_PTR(address, SharedRuntime::z_load_barrier_on_weak_oop_field_preloaded); 5312 Register rr = as_Register(0); 5313 for (int i = 0; i < RegisterImpl::number_of_registers; i++) { 5314 StubRoutines::x86::_load_barrier_slow_stub[i] = generate_load_barrier_stub(rr, loadbarrier_address, false); 5315 StubRoutines::x86::_load_barrier_weak_slow_stub[i] = generate_load_barrier_stub(rr, loadbarrier_weak_address, true); 5316 rr = rr->successor(); 5317 } 5318 } 5319 5320 // don't bother generating these AES intrinsic stubs unless global flag is set 5321 if (UseAESIntrinsics) { 5322 StubRoutines::x86::_key_shuffle_mask_addr = generate_key_shuffle_mask(); // needed by the others 5323 StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock(); 5324 StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock(); 5325 StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt(); 5326 StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt_Parallel(); 5327 } 5328 if (UseAESCTRIntrinsics){ 5329 StubRoutines::x86::_counter_shuffle_mask_addr = generate_counter_shuffle_mask(); 5330 StubRoutines::_counterMode_AESCrypt = generate_counterMode_AESCrypt_Parallel(); 5331 } 5332 5333 if (UseSHA1Intrinsics) { 5334 StubRoutines::x86::_upper_word_mask_addr = generate_upper_word_mask(); 5335 StubRoutines::x86::_shuffle_byte_flip_mask_addr = generate_shuffle_byte_flip_mask(); 5336 StubRoutines::_sha1_implCompress = generate_sha1_implCompress(false, "sha1_implCompress"); 5337 StubRoutines::_sha1_implCompressMB = generate_sha1_implCompress(true, "sha1_implCompressMB"); 5338 } 5339 if (UseSHA256Intrinsics) { 5340 StubRoutines::x86::_k256_adr = (address)StubRoutines::x86::_k256; 5341 char* dst = (char*)StubRoutines::x86::_k256_W; 5342 char* src = (char*)StubRoutines::x86::_k256; 5343 for (int ii = 0; ii < 16; ++ii) { 5344 memcpy(dst + 32 * ii, src + 16 * ii, 16); 5345 memcpy(dst + 32 * ii + 16, src + 16 * ii, 16); 5346 } 5347 StubRoutines::x86::_k256_W_adr = (address)StubRoutines::x86::_k256_W; 5348 StubRoutines::x86::_pshuffle_byte_flip_mask_addr = generate_pshuffle_byte_flip_mask(); 5349 StubRoutines::_sha256_implCompress = generate_sha256_implCompress(false, "sha256_implCompress"); 5350 StubRoutines::_sha256_implCompressMB = generate_sha256_implCompress(true, "sha256_implCompressMB"); 5351 } 5352 if (UseSHA512Intrinsics) { 5353 StubRoutines::x86::_k512_W_addr = (address)StubRoutines::x86::_k512_W; 5354 StubRoutines::x86::_pshuffle_byte_flip_mask_addr_sha512 = generate_pshuffle_byte_flip_mask_sha512(); 5355 StubRoutines::_sha512_implCompress = generate_sha512_implCompress(false, "sha512_implCompress"); 5356 StubRoutines::_sha512_implCompressMB = generate_sha512_implCompress(true, "sha512_implCompressMB"); 5357 } 5358 5359 // Generate GHASH intrinsics code 5360 if (UseGHASHIntrinsics) { 5361 StubRoutines::x86::_ghash_long_swap_mask_addr = generate_ghash_long_swap_mask(); 5362 StubRoutines::x86::_ghash_byte_swap_mask_addr = generate_ghash_byte_swap_mask(); 5363 StubRoutines::_ghash_processBlocks = generate_ghash_processBlocks(); 5364 } 5365 5366 // Safefetch stubs. 5367 generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry, 5368 &StubRoutines::_safefetch32_fault_pc, 5369 &StubRoutines::_safefetch32_continuation_pc); 5370 generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry, 5371 &StubRoutines::_safefetchN_fault_pc, 5372 &StubRoutines::_safefetchN_continuation_pc); 5373 #ifdef COMPILER2 5374 if (UseMultiplyToLenIntrinsic) { 5375 StubRoutines::_multiplyToLen = generate_multiplyToLen(); 5376 } 5377 if (UseSquareToLenIntrinsic) { 5378 StubRoutines::_squareToLen = generate_squareToLen(); 5379 } 5380 if (UseMulAddIntrinsic) { 5381 StubRoutines::_mulAdd = generate_mulAdd(); 5382 } 5383 #ifndef _WINDOWS 5384 if (UseMontgomeryMultiplyIntrinsic) { 5385 StubRoutines::_montgomeryMultiply 5386 = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_multiply); 5387 } 5388 if (UseMontgomerySquareIntrinsic) { 5389 StubRoutines::_montgomerySquare 5390 = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_square); 5391 } 5392 #endif // WINDOWS 5393 #endif // COMPILER2 5394 5395 if (UseVectorizedMismatchIntrinsic) { 5396 StubRoutines::_vectorizedMismatch = generate_vectorizedMismatch(); 5397 } 5398 } 5399 5400 public: 5401 StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) { 5402 if (all) { 5403 generate_all(); 5404 } else { 5405 generate_initial(); 5406 } 5407 } 5408 }; // end class declaration 5409 5410 void StubGenerator_generate(CodeBuffer* code, bool all) { 5411 StubGenerator g(code, all); 5412 }