1 /* 2 * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "interpreter/interpreter.hpp" 29 #include "nativeInst_x86.hpp" 30 #include "oops/instanceOop.hpp" 31 #include "oops/method.hpp" 32 #include "oops/objArrayKlass.hpp" 33 #include "oops/oop.inline.hpp" 34 #include "prims/methodHandles.hpp" 35 #include "runtime/frame.inline.hpp" 36 #include "runtime/handles.inline.hpp" 37 #include "runtime/sharedRuntime.hpp" 38 #include "runtime/stubCodeGenerator.hpp" 39 #include "runtime/stubRoutines.hpp" 40 #include "runtime/thread.inline.hpp" 41 #ifdef COMPILER2 42 #include "opto/runtime.hpp" 43 #endif 44 45 // Declaration and definition of StubGenerator (no .hpp file). 46 // For a more detailed description of the stub routine structure 47 // see the comment in stubRoutines.hpp 48 49 #define __ _masm-> 50 #define TIMES_OOP (UseCompressedOops ? Address::times_4 : Address::times_8) 51 #define a__ ((Assembler*)_masm)-> 52 53 #ifdef PRODUCT 54 #define BLOCK_COMMENT(str) /* nothing */ 55 #else 56 #define BLOCK_COMMENT(str) __ block_comment(str) 57 #endif 58 59 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 60 const int MXCSR_MASK = 0xFFC0; // Mask out any pending exceptions 61 62 // Stub Code definitions 63 64 class StubGenerator: public StubCodeGenerator { 65 private: 66 67 #ifdef PRODUCT 68 #define inc_counter_np(counter) ((void)0) 69 #else 70 void inc_counter_np_(int& counter) { 71 // This can destroy rscratch1 if counter is far from the code cache 72 __ incrementl(ExternalAddress((address)&counter)); 73 } 74 #define inc_counter_np(counter) \ 75 BLOCK_COMMENT("inc_counter " #counter); \ 76 inc_counter_np_(counter); 77 #endif 78 79 // Call stubs are used to call Java from C 80 // 81 // Linux Arguments: 82 // c_rarg0: call wrapper address address 83 // c_rarg1: result address 84 // c_rarg2: result type BasicType 85 // c_rarg3: method Method* 86 // c_rarg4: (interpreter) entry point address 87 // c_rarg5: parameters intptr_t* 88 // 16(rbp): parameter size (in words) int 89 // 24(rbp): thread Thread* 90 // 91 // [ return_from_Java ] <--- rsp 92 // [ argument word n ] 93 // ... 94 // -12 [ argument word 1 ] 95 // -11 [ saved r15 ] <--- rsp_after_call 96 // -10 [ saved r14 ] 97 // -9 [ saved r13 ] 98 // -8 [ saved r12 ] 99 // -7 [ saved rbx ] 100 // -6 [ call wrapper ] 101 // -5 [ result ] 102 // -4 [ result type ] 103 // -3 [ method ] 104 // -2 [ entry point ] 105 // -1 [ parameters ] 106 // 0 [ saved rbp ] <--- rbp 107 // 1 [ return address ] 108 // 2 [ parameter size ] 109 // 3 [ thread ] 110 // 111 // Windows Arguments: 112 // c_rarg0: call wrapper address address 113 // c_rarg1: result address 114 // c_rarg2: result type BasicType 115 // c_rarg3: method Method* 116 // 48(rbp): (interpreter) entry point address 117 // 56(rbp): parameters intptr_t* 118 // 64(rbp): parameter size (in words) int 119 // 72(rbp): thread Thread* 120 // 121 // [ return_from_Java ] <--- rsp 122 // [ argument word n ] 123 // ... 124 // -60 [ argument word 1 ] 125 // -59 [ saved xmm31 ] <--- rsp after_call 126 // [ saved xmm16-xmm30 ] (EVEX enabled, else the space is blank) 127 // -27 [ saved xmm15 ] 128 // [ saved xmm7-xmm14 ] 129 // -9 [ saved xmm6 ] (each xmm register takes 2 slots) 130 // -7 [ saved r15 ] 131 // -6 [ saved r14 ] 132 // -5 [ saved r13 ] 133 // -4 [ saved r12 ] 134 // -3 [ saved rdi ] 135 // -2 [ saved rsi ] 136 // -1 [ saved rbx ] 137 // 0 [ saved rbp ] <--- rbp 138 // 1 [ return address ] 139 // 2 [ call wrapper ] 140 // 3 [ result ] 141 // 4 [ result type ] 142 // 5 [ method ] 143 // 6 [ entry point ] 144 // 7 [ parameters ] 145 // 8 [ parameter size ] 146 // 9 [ thread ] 147 // 148 // Windows reserves the callers stack space for arguments 1-4. 149 // We spill c_rarg0-c_rarg3 to this space. 150 151 // Call stub stack layout word offsets from rbp 152 enum call_stub_layout { 153 #ifdef _WIN64 154 xmm_save_first = 6, // save from xmm6 155 xmm_save_last = 31, // to xmm31 156 xmm_save_base = -9, 157 rsp_after_call_off = xmm_save_base - 2 * (xmm_save_last - xmm_save_first), // -27 158 r15_off = -7, 159 r14_off = -6, 160 r13_off = -5, 161 r12_off = -4, 162 rdi_off = -3, 163 rsi_off = -2, 164 rbx_off = -1, 165 rbp_off = 0, 166 retaddr_off = 1, 167 call_wrapper_off = 2, 168 result_off = 3, 169 result_type_off = 4, 170 method_off = 5, 171 entry_point_off = 6, 172 parameters_off = 7, 173 parameter_size_off = 8, 174 thread_off = 9 175 #else 176 rsp_after_call_off = -12, 177 mxcsr_off = rsp_after_call_off, 178 r15_off = -11, 179 r14_off = -10, 180 r13_off = -9, 181 r12_off = -8, 182 rbx_off = -7, 183 call_wrapper_off = -6, 184 result_off = -5, 185 result_type_off = -4, 186 method_off = -3, 187 entry_point_off = -2, 188 parameters_off = -1, 189 rbp_off = 0, 190 retaddr_off = 1, 191 parameter_size_off = 2, 192 thread_off = 3 193 #endif 194 }; 195 196 #ifdef _WIN64 197 Address xmm_save(int reg) { 198 assert(reg >= xmm_save_first && reg <= xmm_save_last, "XMM register number out of range"); 199 return Address(rbp, (xmm_save_base - (reg - xmm_save_first) * 2) * wordSize); 200 } 201 #endif 202 203 address generate_call_stub(address& return_address) { 204 assert((int)frame::entry_frame_after_call_words == -(int)rsp_after_call_off + 1 && 205 (int)frame::entry_frame_call_wrapper_offset == (int)call_wrapper_off, 206 "adjust this code"); 207 StubCodeMark mark(this, "StubRoutines", "call_stub"); 208 address start = __ pc(); 209 210 // same as in generate_catch_exception()! 211 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize); 212 213 const Address call_wrapper (rbp, call_wrapper_off * wordSize); 214 const Address result (rbp, result_off * wordSize); 215 const Address result_type (rbp, result_type_off * wordSize); 216 const Address method (rbp, method_off * wordSize); 217 const Address entry_point (rbp, entry_point_off * wordSize); 218 const Address parameters (rbp, parameters_off * wordSize); 219 const Address parameter_size(rbp, parameter_size_off * wordSize); 220 221 // same as in generate_catch_exception()! 222 const Address thread (rbp, thread_off * wordSize); 223 224 const Address r15_save(rbp, r15_off * wordSize); 225 const Address r14_save(rbp, r14_off * wordSize); 226 const Address r13_save(rbp, r13_off * wordSize); 227 const Address r12_save(rbp, r12_off * wordSize); 228 const Address rbx_save(rbp, rbx_off * wordSize); 229 230 // stub code 231 __ enter(); 232 __ subptr(rsp, -rsp_after_call_off * wordSize); 233 234 // save register parameters 235 #ifndef _WIN64 236 __ movptr(parameters, c_rarg5); // parameters 237 __ movptr(entry_point, c_rarg4); // entry_point 238 #endif 239 240 __ movptr(method, c_rarg3); // method 241 __ movl(result_type, c_rarg2); // result type 242 __ movptr(result, c_rarg1); // result 243 __ movptr(call_wrapper, c_rarg0); // call wrapper 244 245 // save regs belonging to calling function 246 __ movptr(rbx_save, rbx); 247 __ movptr(r12_save, r12); 248 __ movptr(r13_save, r13); 249 __ movptr(r14_save, r14); 250 __ movptr(r15_save, r15); 251 if (UseAVX > 2) { 252 __ movl(rbx, 0xffff); 253 __ kmovwl(k1, rbx); 254 } 255 #ifdef _WIN64 256 int last_reg = 15; 257 if (UseAVX > 2) { 258 last_reg = 31; 259 } 260 if (VM_Version::supports_evex()) { 261 for (int i = xmm_save_first; i <= last_reg; i++) { 262 __ vextractf32x4(xmm_save(i), as_XMMRegister(i), 0); 263 } 264 } else { 265 for (int i = xmm_save_first; i <= last_reg; i++) { 266 __ movdqu(xmm_save(i), as_XMMRegister(i)); 267 } 268 } 269 270 const Address rdi_save(rbp, rdi_off * wordSize); 271 const Address rsi_save(rbp, rsi_off * wordSize); 272 273 __ movptr(rsi_save, rsi); 274 __ movptr(rdi_save, rdi); 275 #else 276 const Address mxcsr_save(rbp, mxcsr_off * wordSize); 277 { 278 Label skip_ldmx; 279 __ stmxcsr(mxcsr_save); 280 __ movl(rax, mxcsr_save); 281 __ andl(rax, MXCSR_MASK); // Only check control and mask bits 282 ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std()); 283 __ cmp32(rax, mxcsr_std); 284 __ jcc(Assembler::equal, skip_ldmx); 285 __ ldmxcsr(mxcsr_std); 286 __ bind(skip_ldmx); 287 } 288 #endif 289 290 // Load up thread register 291 __ movptr(r15_thread, thread); 292 __ reinit_heapbase(); 293 294 #ifdef ASSERT 295 // make sure we have no pending exceptions 296 { 297 Label L; 298 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 299 __ jcc(Assembler::equal, L); 300 __ stop("StubRoutines::call_stub: entered with pending exception"); 301 __ bind(L); 302 } 303 #endif 304 305 // pass parameters if any 306 BLOCK_COMMENT("pass parameters if any"); 307 Label parameters_done; 308 __ movl(c_rarg3, parameter_size); 309 __ testl(c_rarg3, c_rarg3); 310 __ jcc(Assembler::zero, parameters_done); 311 312 Label loop; 313 __ movptr(c_rarg2, parameters); // parameter pointer 314 __ movl(c_rarg1, c_rarg3); // parameter counter is in c_rarg1 315 __ BIND(loop); 316 __ movptr(rax, Address(c_rarg2, 0));// get parameter 317 __ addptr(c_rarg2, wordSize); // advance to next parameter 318 __ decrementl(c_rarg1); // decrement counter 319 __ push(rax); // pass parameter 320 __ jcc(Assembler::notZero, loop); 321 322 // call Java function 323 __ BIND(parameters_done); 324 __ movptr(rbx, method); // get Method* 325 __ movptr(c_rarg1, entry_point); // get entry_point 326 __ mov(r13, rsp); // set sender sp 327 BLOCK_COMMENT("call Java function"); 328 __ call(c_rarg1); 329 330 BLOCK_COMMENT("call_stub_return_address:"); 331 return_address = __ pc(); 332 333 // store result depending on type (everything that is not 334 // T_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT) 335 __ movptr(c_rarg0, result); 336 Label is_long, is_float, is_double, exit; 337 __ movl(c_rarg1, result_type); 338 __ cmpl(c_rarg1, T_OBJECT); 339 __ jcc(Assembler::equal, is_long); 340 __ cmpl(c_rarg1, T_LONG); 341 __ jcc(Assembler::equal, is_long); 342 __ cmpl(c_rarg1, T_FLOAT); 343 __ jcc(Assembler::equal, is_float); 344 __ cmpl(c_rarg1, T_DOUBLE); 345 __ jcc(Assembler::equal, is_double); 346 347 // handle T_INT case 348 __ movl(Address(c_rarg0, 0), rax); 349 350 __ BIND(exit); 351 352 // pop parameters 353 __ lea(rsp, rsp_after_call); 354 355 #ifdef ASSERT 356 // verify that threads correspond 357 { 358 Label L1, L2, L3; 359 __ cmpptr(r15_thread, thread); 360 __ jcc(Assembler::equal, L1); 361 __ stop("StubRoutines::call_stub: r15_thread is corrupted"); 362 __ bind(L1); 363 __ get_thread(rbx); 364 __ cmpptr(r15_thread, thread); 365 __ jcc(Assembler::equal, L2); 366 __ stop("StubRoutines::call_stub: r15_thread is modified by call"); 367 __ bind(L2); 368 __ cmpptr(r15_thread, rbx); 369 __ jcc(Assembler::equal, L3); 370 __ stop("StubRoutines::call_stub: threads must correspond"); 371 __ bind(L3); 372 } 373 #endif 374 375 // restore regs belonging to calling function 376 #ifdef _WIN64 377 // emit the restores for xmm regs 378 if (VM_Version::supports_evex()) { 379 for (int i = xmm_save_first; i <= last_reg; i++) { 380 __ vinsertf32x4(as_XMMRegister(i), as_XMMRegister(i), xmm_save(i), 0); 381 } 382 } else { 383 for (int i = xmm_save_first; i <= last_reg; i++) { 384 __ movdqu(as_XMMRegister(i), xmm_save(i)); 385 } 386 } 387 #endif 388 __ movptr(r15, r15_save); 389 __ movptr(r14, r14_save); 390 __ movptr(r13, r13_save); 391 __ movptr(r12, r12_save); 392 __ movptr(rbx, rbx_save); 393 394 #ifdef _WIN64 395 __ movptr(rdi, rdi_save); 396 __ movptr(rsi, rsi_save); 397 #else 398 __ ldmxcsr(mxcsr_save); 399 #endif 400 401 // restore rsp 402 __ addptr(rsp, -rsp_after_call_off * wordSize); 403 404 // return 405 __ pop(rbp); 406 __ ret(0); 407 408 // handle return types different from T_INT 409 __ BIND(is_long); 410 __ movq(Address(c_rarg0, 0), rax); 411 __ jmp(exit); 412 413 __ BIND(is_float); 414 __ movflt(Address(c_rarg0, 0), xmm0); 415 __ jmp(exit); 416 417 __ BIND(is_double); 418 __ movdbl(Address(c_rarg0, 0), xmm0); 419 __ jmp(exit); 420 421 return start; 422 } 423 424 // Return point for a Java call if there's an exception thrown in 425 // Java code. The exception is caught and transformed into a 426 // pending exception stored in JavaThread that can be tested from 427 // within the VM. 428 // 429 // Note: Usually the parameters are removed by the callee. In case 430 // of an exception crossing an activation frame boundary, that is 431 // not the case if the callee is compiled code => need to setup the 432 // rsp. 433 // 434 // rax: exception oop 435 436 address generate_catch_exception() { 437 StubCodeMark mark(this, "StubRoutines", "catch_exception"); 438 address start = __ pc(); 439 440 // same as in generate_call_stub(): 441 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize); 442 const Address thread (rbp, thread_off * wordSize); 443 444 #ifdef ASSERT 445 // verify that threads correspond 446 { 447 Label L1, L2, L3; 448 __ cmpptr(r15_thread, thread); 449 __ jcc(Assembler::equal, L1); 450 __ stop("StubRoutines::catch_exception: r15_thread is corrupted"); 451 __ bind(L1); 452 __ get_thread(rbx); 453 __ cmpptr(r15_thread, thread); 454 __ jcc(Assembler::equal, L2); 455 __ stop("StubRoutines::catch_exception: r15_thread is modified by call"); 456 __ bind(L2); 457 __ cmpptr(r15_thread, rbx); 458 __ jcc(Assembler::equal, L3); 459 __ stop("StubRoutines::catch_exception: threads must correspond"); 460 __ bind(L3); 461 } 462 #endif 463 464 // set pending exception 465 __ verify_oop(rax); 466 467 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), rax); 468 __ lea(rscratch1, ExternalAddress((address)__FILE__)); 469 __ movptr(Address(r15_thread, Thread::exception_file_offset()), rscratch1); 470 __ movl(Address(r15_thread, Thread::exception_line_offset()), (int) __LINE__); 471 472 // complete return to VM 473 assert(StubRoutines::_call_stub_return_address != NULL, 474 "_call_stub_return_address must have been generated before"); 475 __ jump(RuntimeAddress(StubRoutines::_call_stub_return_address)); 476 477 return start; 478 } 479 480 // Continuation point for runtime calls returning with a pending 481 // exception. The pending exception check happened in the runtime 482 // or native call stub. The pending exception in Thread is 483 // converted into a Java-level exception. 484 // 485 // Contract with Java-level exception handlers: 486 // rax: exception 487 // rdx: throwing pc 488 // 489 // NOTE: At entry of this stub, exception-pc must be on stack !! 490 491 address generate_forward_exception() { 492 StubCodeMark mark(this, "StubRoutines", "forward exception"); 493 address start = __ pc(); 494 495 // Upon entry, the sp points to the return address returning into 496 // Java (interpreted or compiled) code; i.e., the return address 497 // becomes the throwing pc. 498 // 499 // Arguments pushed before the runtime call are still on the stack 500 // but the exception handler will reset the stack pointer -> 501 // ignore them. A potential result in registers can be ignored as 502 // well. 503 504 #ifdef ASSERT 505 // make sure this code is only executed if there is a pending exception 506 { 507 Label L; 508 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t) NULL); 509 __ jcc(Assembler::notEqual, L); 510 __ stop("StubRoutines::forward exception: no pending exception (1)"); 511 __ bind(L); 512 } 513 #endif 514 515 // compute exception handler into rbx 516 __ movptr(c_rarg0, Address(rsp, 0)); 517 BLOCK_COMMENT("call exception_handler_for_return_address"); 518 __ call_VM_leaf(CAST_FROM_FN_PTR(address, 519 SharedRuntime::exception_handler_for_return_address), 520 r15_thread, c_rarg0); 521 __ mov(rbx, rax); 522 523 // setup rax & rdx, remove return address & clear pending exception 524 __ pop(rdx); 525 __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset())); 526 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 527 528 #ifdef ASSERT 529 // make sure exception is set 530 { 531 Label L; 532 __ testptr(rax, rax); 533 __ jcc(Assembler::notEqual, L); 534 __ stop("StubRoutines::forward exception: no pending exception (2)"); 535 __ bind(L); 536 } 537 #endif 538 539 // continue at exception handler (return address removed) 540 // rax: exception 541 // rbx: exception handler 542 // rdx: throwing pc 543 __ verify_oop(rax); 544 __ jmp(rbx); 545 546 return start; 547 } 548 549 // Support for jint atomic::xchg(jint exchange_value, volatile jint* dest) 550 // 551 // Arguments : 552 // c_rarg0: exchange_value 553 // c_rarg0: dest 554 // 555 // Result: 556 // *dest <- ex, return (orig *dest) 557 address generate_atomic_xchg() { 558 StubCodeMark mark(this, "StubRoutines", "atomic_xchg"); 559 address start = __ pc(); 560 561 __ movl(rax, c_rarg0); // Copy to eax we need a return value anyhow 562 __ xchgl(rax, Address(c_rarg1, 0)); // automatic LOCK 563 __ ret(0); 564 565 return start; 566 } 567 568 // Support for intptr_t atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) 569 // 570 // Arguments : 571 // c_rarg0: exchange_value 572 // c_rarg1: dest 573 // 574 // Result: 575 // *dest <- ex, return (orig *dest) 576 address generate_atomic_xchg_ptr() { 577 StubCodeMark mark(this, "StubRoutines", "atomic_xchg_ptr"); 578 address start = __ pc(); 579 580 __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow 581 __ xchgptr(rax, Address(c_rarg1, 0)); // automatic LOCK 582 __ ret(0); 583 584 return start; 585 } 586 587 // Support for jint atomic::atomic_cmpxchg(jint exchange_value, volatile jint* dest, 588 // jint compare_value) 589 // 590 // Arguments : 591 // c_rarg0: exchange_value 592 // c_rarg1: dest 593 // c_rarg2: compare_value 594 // 595 // Result: 596 // if ( compare_value == *dest ) { 597 // *dest = exchange_value 598 // return compare_value; 599 // else 600 // return *dest; 601 address generate_atomic_cmpxchg() { 602 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg"); 603 address start = __ pc(); 604 605 __ movl(rax, c_rarg2); 606 if ( os::is_MP() ) __ lock(); 607 __ cmpxchgl(c_rarg0, Address(c_rarg1, 0)); 608 __ ret(0); 609 610 return start; 611 } 612 613 // Support for jbyte atomic::atomic_cmpxchg(jbyte exchange_value, volatile jbyte* dest, 614 // jbyte compare_value) 615 // 616 // Arguments : 617 // c_rarg0: exchange_value 618 // c_rarg1: dest 619 // c_rarg2: compare_value 620 // 621 // Result: 622 // if ( compare_value == *dest ) { 623 // *dest = exchange_value 624 // return compare_value; 625 // else 626 // return *dest; 627 address generate_atomic_cmpxchg_byte() { 628 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_byte"); 629 address start = __ pc(); 630 631 __ movsbq(rax, c_rarg2); 632 if ( os::is_MP() ) __ lock(); 633 __ cmpxchgb(c_rarg0, Address(c_rarg1, 0)); 634 __ ret(0); 635 636 return start; 637 } 638 639 // Support for jlong atomic::atomic_cmpxchg(jlong exchange_value, 640 // volatile jlong* dest, 641 // jlong compare_value) 642 // Arguments : 643 // c_rarg0: exchange_value 644 // c_rarg1: dest 645 // c_rarg2: compare_value 646 // 647 // Result: 648 // if ( compare_value == *dest ) { 649 // *dest = exchange_value 650 // return compare_value; 651 // else 652 // return *dest; 653 address generate_atomic_cmpxchg_long() { 654 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long"); 655 address start = __ pc(); 656 657 __ movq(rax, c_rarg2); 658 if ( os::is_MP() ) __ lock(); 659 __ cmpxchgq(c_rarg0, Address(c_rarg1, 0)); 660 __ ret(0); 661 662 return start; 663 } 664 665 // Support for jint atomic::add(jint add_value, volatile jint* dest) 666 // 667 // Arguments : 668 // c_rarg0: add_value 669 // c_rarg1: dest 670 // 671 // Result: 672 // *dest += add_value 673 // return *dest; 674 address generate_atomic_add() { 675 StubCodeMark mark(this, "StubRoutines", "atomic_add"); 676 address start = __ pc(); 677 678 __ movl(rax, c_rarg0); 679 if ( os::is_MP() ) __ lock(); 680 __ xaddl(Address(c_rarg1, 0), c_rarg0); 681 __ addl(rax, c_rarg0); 682 __ ret(0); 683 684 return start; 685 } 686 687 // Support for intptr_t atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) 688 // 689 // Arguments : 690 // c_rarg0: add_value 691 // c_rarg1: dest 692 // 693 // Result: 694 // *dest += add_value 695 // return *dest; 696 address generate_atomic_add_ptr() { 697 StubCodeMark mark(this, "StubRoutines", "atomic_add_ptr"); 698 address start = __ pc(); 699 700 __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow 701 if ( os::is_MP() ) __ lock(); 702 __ xaddptr(Address(c_rarg1, 0), c_rarg0); 703 __ addptr(rax, c_rarg0); 704 __ ret(0); 705 706 return start; 707 } 708 709 // Support for intptr_t OrderAccess::fence() 710 // 711 // Arguments : 712 // 713 // Result: 714 address generate_orderaccess_fence() { 715 StubCodeMark mark(this, "StubRoutines", "orderaccess_fence"); 716 address start = __ pc(); 717 __ membar(Assembler::StoreLoad); 718 __ ret(0); 719 720 return start; 721 } 722 723 // Support for intptr_t get_previous_fp() 724 // 725 // This routine is used to find the previous frame pointer for the 726 // caller (current_frame_guess). This is used as part of debugging 727 // ps() is seemingly lost trying to find frames. 728 // This code assumes that caller current_frame_guess) has a frame. 729 address generate_get_previous_fp() { 730 StubCodeMark mark(this, "StubRoutines", "get_previous_fp"); 731 const Address old_fp(rbp, 0); 732 const Address older_fp(rax, 0); 733 address start = __ pc(); 734 735 __ enter(); 736 __ movptr(rax, old_fp); // callers fp 737 __ movptr(rax, older_fp); // the frame for ps() 738 __ pop(rbp); 739 __ ret(0); 740 741 return start; 742 } 743 744 // Support for intptr_t get_previous_sp() 745 // 746 // This routine is used to find the previous stack pointer for the 747 // caller. 748 address generate_get_previous_sp() { 749 StubCodeMark mark(this, "StubRoutines", "get_previous_sp"); 750 address start = __ pc(); 751 752 __ movptr(rax, rsp); 753 __ addptr(rax, 8); // return address is at the top of the stack. 754 __ ret(0); 755 756 return start; 757 } 758 759 //---------------------------------------------------------------------------------------------------- 760 // Support for void verify_mxcsr() 761 // 762 // This routine is used with -Xcheck:jni to verify that native 763 // JNI code does not return to Java code without restoring the 764 // MXCSR register to our expected state. 765 766 address generate_verify_mxcsr() { 767 StubCodeMark mark(this, "StubRoutines", "verify_mxcsr"); 768 address start = __ pc(); 769 770 const Address mxcsr_save(rsp, 0); 771 772 if (CheckJNICalls) { 773 Label ok_ret; 774 ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std()); 775 __ push(rax); 776 __ subptr(rsp, wordSize); // allocate a temp location 777 __ stmxcsr(mxcsr_save); 778 __ movl(rax, mxcsr_save); 779 __ andl(rax, MXCSR_MASK); // Only check control and mask bits 780 __ cmp32(rax, mxcsr_std); 781 __ jcc(Assembler::equal, ok_ret); 782 783 __ warn("MXCSR changed by native JNI code, use -XX:+RestoreMXCSROnJNICall"); 784 785 __ ldmxcsr(mxcsr_std); 786 787 __ bind(ok_ret); 788 __ addptr(rsp, wordSize); 789 __ pop(rax); 790 } 791 792 __ ret(0); 793 794 return start; 795 } 796 797 address generate_f2i_fixup() { 798 StubCodeMark mark(this, "StubRoutines", "f2i_fixup"); 799 Address inout(rsp, 5 * wordSize); // return address + 4 saves 800 801 address start = __ pc(); 802 803 Label L; 804 805 __ push(rax); 806 __ push(c_rarg3); 807 __ push(c_rarg2); 808 __ push(c_rarg1); 809 810 __ movl(rax, 0x7f800000); 811 __ xorl(c_rarg3, c_rarg3); 812 __ movl(c_rarg2, inout); 813 __ movl(c_rarg1, c_rarg2); 814 __ andl(c_rarg1, 0x7fffffff); 815 __ cmpl(rax, c_rarg1); // NaN? -> 0 816 __ jcc(Assembler::negative, L); 817 __ testl(c_rarg2, c_rarg2); // signed ? min_jint : max_jint 818 __ movl(c_rarg3, 0x80000000); 819 __ movl(rax, 0x7fffffff); 820 __ cmovl(Assembler::positive, c_rarg3, rax); 821 822 __ bind(L); 823 __ movptr(inout, c_rarg3); 824 825 __ pop(c_rarg1); 826 __ pop(c_rarg2); 827 __ pop(c_rarg3); 828 __ pop(rax); 829 830 __ ret(0); 831 832 return start; 833 } 834 835 address generate_f2l_fixup() { 836 StubCodeMark mark(this, "StubRoutines", "f2l_fixup"); 837 Address inout(rsp, 5 * wordSize); // return address + 4 saves 838 address start = __ pc(); 839 840 Label L; 841 842 __ push(rax); 843 __ push(c_rarg3); 844 __ push(c_rarg2); 845 __ push(c_rarg1); 846 847 __ movl(rax, 0x7f800000); 848 __ xorl(c_rarg3, c_rarg3); 849 __ movl(c_rarg2, inout); 850 __ movl(c_rarg1, c_rarg2); 851 __ andl(c_rarg1, 0x7fffffff); 852 __ cmpl(rax, c_rarg1); // NaN? -> 0 853 __ jcc(Assembler::negative, L); 854 __ testl(c_rarg2, c_rarg2); // signed ? min_jlong : max_jlong 855 __ mov64(c_rarg3, 0x8000000000000000); 856 __ mov64(rax, 0x7fffffffffffffff); 857 __ cmov(Assembler::positive, c_rarg3, rax); 858 859 __ bind(L); 860 __ movptr(inout, c_rarg3); 861 862 __ pop(c_rarg1); 863 __ pop(c_rarg2); 864 __ pop(c_rarg3); 865 __ pop(rax); 866 867 __ ret(0); 868 869 return start; 870 } 871 872 address generate_d2i_fixup() { 873 StubCodeMark mark(this, "StubRoutines", "d2i_fixup"); 874 Address inout(rsp, 6 * wordSize); // return address + 5 saves 875 876 address start = __ pc(); 877 878 Label L; 879 880 __ push(rax); 881 __ push(c_rarg3); 882 __ push(c_rarg2); 883 __ push(c_rarg1); 884 __ push(c_rarg0); 885 886 __ movl(rax, 0x7ff00000); 887 __ movq(c_rarg2, inout); 888 __ movl(c_rarg3, c_rarg2); 889 __ mov(c_rarg1, c_rarg2); 890 __ mov(c_rarg0, c_rarg2); 891 __ negl(c_rarg3); 892 __ shrptr(c_rarg1, 0x20); 893 __ orl(c_rarg3, c_rarg2); 894 __ andl(c_rarg1, 0x7fffffff); 895 __ xorl(c_rarg2, c_rarg2); 896 __ shrl(c_rarg3, 0x1f); 897 __ orl(c_rarg1, c_rarg3); 898 __ cmpl(rax, c_rarg1); 899 __ jcc(Assembler::negative, L); // NaN -> 0 900 __ testptr(c_rarg0, c_rarg0); // signed ? min_jint : max_jint 901 __ movl(c_rarg2, 0x80000000); 902 __ movl(rax, 0x7fffffff); 903 __ cmov(Assembler::positive, c_rarg2, rax); 904 905 __ bind(L); 906 __ movptr(inout, c_rarg2); 907 908 __ pop(c_rarg0); 909 __ pop(c_rarg1); 910 __ pop(c_rarg2); 911 __ pop(c_rarg3); 912 __ pop(rax); 913 914 __ ret(0); 915 916 return start; 917 } 918 919 address generate_d2l_fixup() { 920 StubCodeMark mark(this, "StubRoutines", "d2l_fixup"); 921 Address inout(rsp, 6 * wordSize); // return address + 5 saves 922 923 address start = __ pc(); 924 925 Label L; 926 927 __ push(rax); 928 __ push(c_rarg3); 929 __ push(c_rarg2); 930 __ push(c_rarg1); 931 __ push(c_rarg0); 932 933 __ movl(rax, 0x7ff00000); 934 __ movq(c_rarg2, inout); 935 __ movl(c_rarg3, c_rarg2); 936 __ mov(c_rarg1, c_rarg2); 937 __ mov(c_rarg0, c_rarg2); 938 __ negl(c_rarg3); 939 __ shrptr(c_rarg1, 0x20); 940 __ orl(c_rarg3, c_rarg2); 941 __ andl(c_rarg1, 0x7fffffff); 942 __ xorl(c_rarg2, c_rarg2); 943 __ shrl(c_rarg3, 0x1f); 944 __ orl(c_rarg1, c_rarg3); 945 __ cmpl(rax, c_rarg1); 946 __ jcc(Assembler::negative, L); // NaN -> 0 947 __ testq(c_rarg0, c_rarg0); // signed ? min_jlong : max_jlong 948 __ mov64(c_rarg2, 0x8000000000000000); 949 __ mov64(rax, 0x7fffffffffffffff); 950 __ cmovq(Assembler::positive, c_rarg2, rax); 951 952 __ bind(L); 953 __ movq(inout, c_rarg2); 954 955 __ pop(c_rarg0); 956 __ pop(c_rarg1); 957 __ pop(c_rarg2); 958 __ pop(c_rarg3); 959 __ pop(rax); 960 961 __ ret(0); 962 963 return start; 964 } 965 966 address generate_fp_mask(const char *stub_name, int64_t mask) { 967 __ align(CodeEntryAlignment); 968 StubCodeMark mark(this, "StubRoutines", stub_name); 969 address start = __ pc(); 970 971 __ emit_data64( mask, relocInfo::none ); 972 __ emit_data64( mask, relocInfo::none ); 973 974 return start; 975 } 976 977 // Non-destructive plausibility checks for oops 978 // 979 // Arguments: 980 // all args on stack! 981 // 982 // Stack after saving c_rarg3: 983 // [tos + 0]: saved c_rarg3 984 // [tos + 1]: saved c_rarg2 985 // [tos + 2]: saved r12 (several TemplateTable methods use it) 986 // [tos + 3]: saved flags 987 // [tos + 4]: return address 988 // * [tos + 5]: error message (char*) 989 // * [tos + 6]: object to verify (oop) 990 // * [tos + 7]: saved rax - saved by caller and bashed 991 // * [tos + 8]: saved r10 (rscratch1) - saved by caller 992 // * = popped on exit 993 address generate_verify_oop() { 994 StubCodeMark mark(this, "StubRoutines", "verify_oop"); 995 address start = __ pc(); 996 997 Label exit, error; 998 999 __ pushf(); 1000 __ incrementl(ExternalAddress((address) StubRoutines::verify_oop_count_addr())); 1001 1002 __ push(r12); 1003 1004 // save c_rarg2 and c_rarg3 1005 __ push(c_rarg2); 1006 __ push(c_rarg3); 1007 1008 enum { 1009 // After previous pushes. 1010 oop_to_verify = 6 * wordSize, 1011 saved_rax = 7 * wordSize, 1012 saved_r10 = 8 * wordSize, 1013 1014 // Before the call to MacroAssembler::debug(), see below. 1015 return_addr = 16 * wordSize, 1016 error_msg = 17 * wordSize 1017 }; 1018 1019 // get object 1020 __ movptr(rax, Address(rsp, oop_to_verify)); 1021 1022 // make sure object is 'reasonable' 1023 __ testptr(rax, rax); 1024 __ jcc(Assembler::zero, exit); // if obj is NULL it is OK 1025 // Check if the oop is in the right area of memory 1026 __ movptr(c_rarg2, rax); 1027 __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_mask()); 1028 __ andptr(c_rarg2, c_rarg3); 1029 __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_bits()); 1030 __ cmpptr(c_rarg2, c_rarg3); 1031 __ jcc(Assembler::notZero, error); 1032 1033 // set r12 to heapbase for load_klass() 1034 __ reinit_heapbase(); 1035 1036 // make sure klass is 'reasonable', which is not zero. 1037 __ load_klass(rax, rax); // get klass 1038 __ testptr(rax, rax); 1039 __ jcc(Assembler::zero, error); // if klass is NULL it is broken 1040 1041 // return if everything seems ok 1042 __ bind(exit); 1043 __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back 1044 __ movptr(rscratch1, Address(rsp, saved_r10)); // get saved r10 back 1045 __ pop(c_rarg3); // restore c_rarg3 1046 __ pop(c_rarg2); // restore c_rarg2 1047 __ pop(r12); // restore r12 1048 __ popf(); // restore flags 1049 __ ret(4 * wordSize); // pop caller saved stuff 1050 1051 // handle errors 1052 __ bind(error); 1053 __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back 1054 __ movptr(rscratch1, Address(rsp, saved_r10)); // get saved r10 back 1055 __ pop(c_rarg3); // get saved c_rarg3 back 1056 __ pop(c_rarg2); // get saved c_rarg2 back 1057 __ pop(r12); // get saved r12 back 1058 __ popf(); // get saved flags off stack -- 1059 // will be ignored 1060 1061 __ pusha(); // push registers 1062 // (rip is already 1063 // already pushed) 1064 // debug(char* msg, int64_t pc, int64_t regs[]) 1065 // We've popped the registers we'd saved (c_rarg3, c_rarg2 and flags), and 1066 // pushed all the registers, so now the stack looks like: 1067 // [tos + 0] 16 saved registers 1068 // [tos + 16] return address 1069 // * [tos + 17] error message (char*) 1070 // * [tos + 18] object to verify (oop) 1071 // * [tos + 19] saved rax - saved by caller and bashed 1072 // * [tos + 20] saved r10 (rscratch1) - saved by caller 1073 // * = popped on exit 1074 1075 __ movptr(c_rarg0, Address(rsp, error_msg)); // pass address of error message 1076 __ movptr(c_rarg1, Address(rsp, return_addr)); // pass return address 1077 __ movq(c_rarg2, rsp); // pass address of regs on stack 1078 __ mov(r12, rsp); // remember rsp 1079 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1080 __ andptr(rsp, -16); // align stack as required by ABI 1081 BLOCK_COMMENT("call MacroAssembler::debug"); 1082 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64))); 1083 __ mov(rsp, r12); // restore rsp 1084 __ popa(); // pop registers (includes r12) 1085 __ ret(4 * wordSize); // pop caller saved stuff 1086 1087 return start; 1088 } 1089 1090 // 1091 // Verify that a register contains clean 32-bits positive value 1092 // (high 32-bits are 0) so it could be used in 64-bits shifts. 1093 // 1094 // Input: 1095 // Rint - 32-bits value 1096 // Rtmp - scratch 1097 // 1098 void assert_clean_int(Register Rint, Register Rtmp) { 1099 #ifdef ASSERT 1100 Label L; 1101 assert_different_registers(Rtmp, Rint); 1102 __ movslq(Rtmp, Rint); 1103 __ cmpq(Rtmp, Rint); 1104 __ jcc(Assembler::equal, L); 1105 __ stop("high 32-bits of int value are not 0"); 1106 __ bind(L); 1107 #endif 1108 } 1109 1110 // Generate overlap test for array copy stubs 1111 // 1112 // Input: 1113 // c_rarg0 - from 1114 // c_rarg1 - to 1115 // c_rarg2 - element count 1116 // 1117 // Output: 1118 // rax - &from[element count - 1] 1119 // 1120 void array_overlap_test(address no_overlap_target, Address::ScaleFactor sf) { 1121 assert(no_overlap_target != NULL, "must be generated"); 1122 array_overlap_test(no_overlap_target, NULL, sf); 1123 } 1124 void array_overlap_test(Label& L_no_overlap, Address::ScaleFactor sf) { 1125 array_overlap_test(NULL, &L_no_overlap, sf); 1126 } 1127 void array_overlap_test(address no_overlap_target, Label* NOLp, Address::ScaleFactor sf) { 1128 const Register from = c_rarg0; 1129 const Register to = c_rarg1; 1130 const Register count = c_rarg2; 1131 const Register end_from = rax; 1132 1133 __ cmpptr(to, from); 1134 __ lea(end_from, Address(from, count, sf, 0)); 1135 if (NOLp == NULL) { 1136 ExternalAddress no_overlap(no_overlap_target); 1137 __ jump_cc(Assembler::belowEqual, no_overlap); 1138 __ cmpptr(to, end_from); 1139 __ jump_cc(Assembler::aboveEqual, no_overlap); 1140 } else { 1141 __ jcc(Assembler::belowEqual, (*NOLp)); 1142 __ cmpptr(to, end_from); 1143 __ jcc(Assembler::aboveEqual, (*NOLp)); 1144 } 1145 } 1146 1147 // Shuffle first three arg regs on Windows into Linux/Solaris locations. 1148 // 1149 // Outputs: 1150 // rdi - rcx 1151 // rsi - rdx 1152 // rdx - r8 1153 // rcx - r9 1154 // 1155 // Registers r9 and r10 are used to save rdi and rsi on Windows, which latter 1156 // are non-volatile. r9 and r10 should not be used by the caller. 1157 // 1158 void setup_arg_regs(int nargs = 3) { 1159 const Register saved_rdi = r9; 1160 const Register saved_rsi = r10; 1161 assert(nargs == 3 || nargs == 4, "else fix"); 1162 #ifdef _WIN64 1163 assert(c_rarg0 == rcx && c_rarg1 == rdx && c_rarg2 == r8 && c_rarg3 == r9, 1164 "unexpected argument registers"); 1165 if (nargs >= 4) 1166 __ mov(rax, r9); // r9 is also saved_rdi 1167 __ movptr(saved_rdi, rdi); 1168 __ movptr(saved_rsi, rsi); 1169 __ mov(rdi, rcx); // c_rarg0 1170 __ mov(rsi, rdx); // c_rarg1 1171 __ mov(rdx, r8); // c_rarg2 1172 if (nargs >= 4) 1173 __ mov(rcx, rax); // c_rarg3 (via rax) 1174 #else 1175 assert(c_rarg0 == rdi && c_rarg1 == rsi && c_rarg2 == rdx && c_rarg3 == rcx, 1176 "unexpected argument registers"); 1177 #endif 1178 } 1179 1180 void restore_arg_regs() { 1181 const Register saved_rdi = r9; 1182 const Register saved_rsi = r10; 1183 #ifdef _WIN64 1184 __ movptr(rdi, saved_rdi); 1185 __ movptr(rsi, saved_rsi); 1186 #endif 1187 } 1188 1189 // Generate code for an array write pre barrier 1190 // 1191 // addr - starting address 1192 // count - element count 1193 // tmp - scratch register 1194 // 1195 // Destroy no registers! 1196 // 1197 void gen_write_ref_array_pre_barrier(Register addr, Register count, bool dest_uninitialized) { 1198 BarrierSet* bs = Universe::heap()->barrier_set(); 1199 switch (bs->kind()) { 1200 case BarrierSet::G1SATBCTLogging: 1201 // With G1, don't generate the call if we statically know that the target in uninitialized 1202 if (!dest_uninitialized) { 1203 __ pusha(); // push registers 1204 if (count == c_rarg0) { 1205 if (addr == c_rarg1) { 1206 // exactly backwards!! 1207 __ xchgptr(c_rarg1, c_rarg0); 1208 } else { 1209 __ movptr(c_rarg1, count); 1210 __ movptr(c_rarg0, addr); 1211 } 1212 } else { 1213 __ movptr(c_rarg0, addr); 1214 __ movptr(c_rarg1, count); 1215 } 1216 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), 2); 1217 __ popa(); 1218 } 1219 break; 1220 case BarrierSet::CardTableForRS: 1221 case BarrierSet::CardTableExtension: 1222 case BarrierSet::ModRef: 1223 break; 1224 default: 1225 ShouldNotReachHere(); 1226 1227 } 1228 } 1229 1230 // 1231 // Generate code for an array write post barrier 1232 // 1233 // Input: 1234 // start - register containing starting address of destination array 1235 // count - elements count 1236 // scratch - scratch register 1237 // 1238 // The input registers are overwritten. 1239 // 1240 void gen_write_ref_array_post_barrier(Register start, Register count, Register scratch) { 1241 assert_different_registers(start, count, scratch); 1242 BarrierSet* bs = Universe::heap()->barrier_set(); 1243 switch (bs->kind()) { 1244 case BarrierSet::G1SATBCTLogging: 1245 { 1246 __ pusha(); // push registers (overkill) 1247 if (c_rarg0 == count) { // On win64 c_rarg0 == rcx 1248 assert_different_registers(c_rarg1, start); 1249 __ mov(c_rarg1, count); 1250 __ mov(c_rarg0, start); 1251 } else { 1252 assert_different_registers(c_rarg0, count); 1253 __ mov(c_rarg0, start); 1254 __ mov(c_rarg1, count); 1255 } 1256 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), 2); 1257 __ popa(); 1258 } 1259 break; 1260 case BarrierSet::CardTableForRS: 1261 case BarrierSet::CardTableExtension: 1262 { 1263 CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs); 1264 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); 1265 1266 Label L_loop; 1267 const Register end = count; 1268 1269 __ leaq(end, Address(start, count, TIMES_OOP, 0)); // end == start+count*oop_size 1270 __ subptr(end, BytesPerHeapOop); // end - 1 to make inclusive 1271 __ shrptr(start, CardTableModRefBS::card_shift); 1272 __ shrptr(end, CardTableModRefBS::card_shift); 1273 __ subptr(end, start); // end --> cards count 1274 1275 int64_t disp = (int64_t) ct->byte_map_base; 1276 __ mov64(scratch, disp); 1277 __ addptr(start, scratch); 1278 __ BIND(L_loop); 1279 __ movb(Address(start, count, Address::times_1), 0); 1280 __ decrement(count); 1281 __ jcc(Assembler::greaterEqual, L_loop); 1282 } 1283 break; 1284 default: 1285 ShouldNotReachHere(); 1286 1287 } 1288 } 1289 1290 1291 // Copy big chunks forward 1292 // 1293 // Inputs: 1294 // end_from - source arrays end address 1295 // end_to - destination array end address 1296 // qword_count - 64-bits element count, negative 1297 // to - scratch 1298 // L_copy_bytes - entry label 1299 // L_copy_8_bytes - exit label 1300 // 1301 void copy_bytes_forward(Register end_from, Register end_to, 1302 Register qword_count, Register to, 1303 Label& L_copy_bytes, Label& L_copy_8_bytes) { 1304 DEBUG_ONLY(__ stop("enter at entry label, not here")); 1305 Label L_loop; 1306 __ align(OptoLoopAlignment); 1307 if (UseUnalignedLoadStores) { 1308 Label L_end; 1309 if (UseAVX > 2) { 1310 __ movl(to, 0xffff); 1311 __ kmovwl(k1, to); 1312 } 1313 // Copy 64-bytes per iteration 1314 __ BIND(L_loop); 1315 if (UseAVX > 2) { 1316 __ evmovdqul(xmm0, Address(end_from, qword_count, Address::times_8, -56), Assembler::AVX_512bit); 1317 __ evmovdqul(Address(end_to, qword_count, Address::times_8, -56), xmm0, Assembler::AVX_512bit); 1318 } else if (UseAVX == 2) { 1319 __ vmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56)); 1320 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0); 1321 __ vmovdqu(xmm1, Address(end_from, qword_count, Address::times_8, -24)); 1322 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -24), xmm1); 1323 } else { 1324 __ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56)); 1325 __ movdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0); 1326 __ movdqu(xmm1, Address(end_from, qword_count, Address::times_8, -40)); 1327 __ movdqu(Address(end_to, qword_count, Address::times_8, -40), xmm1); 1328 __ movdqu(xmm2, Address(end_from, qword_count, Address::times_8, -24)); 1329 __ movdqu(Address(end_to, qword_count, Address::times_8, -24), xmm2); 1330 __ movdqu(xmm3, Address(end_from, qword_count, Address::times_8, - 8)); 1331 __ movdqu(Address(end_to, qword_count, Address::times_8, - 8), xmm3); 1332 } 1333 __ BIND(L_copy_bytes); 1334 __ addptr(qword_count, 8); 1335 __ jcc(Assembler::lessEqual, L_loop); 1336 __ subptr(qword_count, 4); // sub(8) and add(4) 1337 __ jccb(Assembler::greater, L_end); 1338 // Copy trailing 32 bytes 1339 if (UseAVX >= 2) { 1340 __ vmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24)); 1341 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -24), xmm0); 1342 } else { 1343 __ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24)); 1344 __ movdqu(Address(end_to, qword_count, Address::times_8, -24), xmm0); 1345 __ movdqu(xmm1, Address(end_from, qword_count, Address::times_8, - 8)); 1346 __ movdqu(Address(end_to, qword_count, Address::times_8, - 8), xmm1); 1347 } 1348 __ addptr(qword_count, 4); 1349 __ BIND(L_end); 1350 if (UseAVX >= 2) { 1351 // clean upper bits of YMM registers 1352 __ vpxor(xmm0, xmm0); 1353 __ vpxor(xmm1, xmm1); 1354 } 1355 } else { 1356 // Copy 32-bytes per iteration 1357 __ BIND(L_loop); 1358 __ movq(to, Address(end_from, qword_count, Address::times_8, -24)); 1359 __ movq(Address(end_to, qword_count, Address::times_8, -24), to); 1360 __ movq(to, Address(end_from, qword_count, Address::times_8, -16)); 1361 __ movq(Address(end_to, qword_count, Address::times_8, -16), to); 1362 __ movq(to, Address(end_from, qword_count, Address::times_8, - 8)); 1363 __ movq(Address(end_to, qword_count, Address::times_8, - 8), to); 1364 __ movq(to, Address(end_from, qword_count, Address::times_8, - 0)); 1365 __ movq(Address(end_to, qword_count, Address::times_8, - 0), to); 1366 1367 __ BIND(L_copy_bytes); 1368 __ addptr(qword_count, 4); 1369 __ jcc(Assembler::lessEqual, L_loop); 1370 } 1371 __ subptr(qword_count, 4); 1372 __ jcc(Assembler::less, L_copy_8_bytes); // Copy trailing qwords 1373 } 1374 1375 // Copy big chunks backward 1376 // 1377 // Inputs: 1378 // from - source arrays address 1379 // dest - destination array address 1380 // qword_count - 64-bits element count 1381 // to - scratch 1382 // L_copy_bytes - entry label 1383 // L_copy_8_bytes - exit label 1384 // 1385 void copy_bytes_backward(Register from, Register dest, 1386 Register qword_count, Register to, 1387 Label& L_copy_bytes, Label& L_copy_8_bytes) { 1388 DEBUG_ONLY(__ stop("enter at entry label, not here")); 1389 Label L_loop; 1390 __ align(OptoLoopAlignment); 1391 if (UseUnalignedLoadStores) { 1392 Label L_end; 1393 if (UseAVX > 2) { 1394 __ movl(to, 0xffff); 1395 __ kmovwl(k1, to); 1396 } 1397 // Copy 64-bytes per iteration 1398 __ BIND(L_loop); 1399 if (UseAVX > 2) { 1400 __ evmovdqul(xmm0, Address(from, qword_count, Address::times_8, 0), Assembler::AVX_512bit); 1401 __ evmovdqul(Address(dest, qword_count, Address::times_8, 0), xmm0, Assembler::AVX_512bit); 1402 } else if (UseAVX == 2) { 1403 __ vmovdqu(xmm0, Address(from, qword_count, Address::times_8, 32)); 1404 __ vmovdqu(Address(dest, qword_count, Address::times_8, 32), xmm0); 1405 __ vmovdqu(xmm1, Address(from, qword_count, Address::times_8, 0)); 1406 __ vmovdqu(Address(dest, qword_count, Address::times_8, 0), xmm1); 1407 } else { 1408 __ movdqu(xmm0, Address(from, qword_count, Address::times_8, 48)); 1409 __ movdqu(Address(dest, qword_count, Address::times_8, 48), xmm0); 1410 __ movdqu(xmm1, Address(from, qword_count, Address::times_8, 32)); 1411 __ movdqu(Address(dest, qword_count, Address::times_8, 32), xmm1); 1412 __ movdqu(xmm2, Address(from, qword_count, Address::times_8, 16)); 1413 __ movdqu(Address(dest, qword_count, Address::times_8, 16), xmm2); 1414 __ movdqu(xmm3, Address(from, qword_count, Address::times_8, 0)); 1415 __ movdqu(Address(dest, qword_count, Address::times_8, 0), xmm3); 1416 } 1417 __ BIND(L_copy_bytes); 1418 __ subptr(qword_count, 8); 1419 __ jcc(Assembler::greaterEqual, L_loop); 1420 1421 __ addptr(qword_count, 4); // add(8) and sub(4) 1422 __ jccb(Assembler::less, L_end); 1423 // Copy trailing 32 bytes 1424 if (UseAVX >= 2) { 1425 __ vmovdqu(xmm0, Address(from, qword_count, Address::times_8, 0)); 1426 __ vmovdqu(Address(dest, qword_count, Address::times_8, 0), xmm0); 1427 } else { 1428 __ movdqu(xmm0, Address(from, qword_count, Address::times_8, 16)); 1429 __ movdqu(Address(dest, qword_count, Address::times_8, 16), xmm0); 1430 __ movdqu(xmm1, Address(from, qword_count, Address::times_8, 0)); 1431 __ movdqu(Address(dest, qword_count, Address::times_8, 0), xmm1); 1432 } 1433 __ subptr(qword_count, 4); 1434 __ BIND(L_end); 1435 if (UseAVX >= 2) { 1436 // clean upper bits of YMM registers 1437 __ vpxor(xmm0, xmm0); 1438 __ vpxor(xmm1, xmm1); 1439 } 1440 } else { 1441 // Copy 32-bytes per iteration 1442 __ BIND(L_loop); 1443 __ movq(to, Address(from, qword_count, Address::times_8, 24)); 1444 __ movq(Address(dest, qword_count, Address::times_8, 24), to); 1445 __ movq(to, Address(from, qword_count, Address::times_8, 16)); 1446 __ movq(Address(dest, qword_count, Address::times_8, 16), to); 1447 __ movq(to, Address(from, qword_count, Address::times_8, 8)); 1448 __ movq(Address(dest, qword_count, Address::times_8, 8), to); 1449 __ movq(to, Address(from, qword_count, Address::times_8, 0)); 1450 __ movq(Address(dest, qword_count, Address::times_8, 0), to); 1451 1452 __ BIND(L_copy_bytes); 1453 __ subptr(qword_count, 4); 1454 __ jcc(Assembler::greaterEqual, L_loop); 1455 } 1456 __ addptr(qword_count, 4); 1457 __ jcc(Assembler::greater, L_copy_8_bytes); // Copy trailing qwords 1458 } 1459 1460 1461 // Arguments: 1462 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1463 // ignored 1464 // name - stub name string 1465 // 1466 // Inputs: 1467 // c_rarg0 - source array address 1468 // c_rarg1 - destination array address 1469 // c_rarg2 - element count, treated as ssize_t, can be zero 1470 // 1471 // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries, 1472 // we let the hardware handle it. The one to eight bytes within words, 1473 // dwords or qwords that span cache line boundaries will still be loaded 1474 // and stored atomically. 1475 // 1476 // Side Effects: 1477 // disjoint_byte_copy_entry is set to the no-overlap entry point 1478 // used by generate_conjoint_byte_copy(). 1479 // 1480 address generate_disjoint_byte_copy(bool aligned, address* entry, const char *name) { 1481 __ align(CodeEntryAlignment); 1482 StubCodeMark mark(this, "StubRoutines", name); 1483 address start = __ pc(); 1484 1485 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes; 1486 Label L_copy_byte, L_exit; 1487 const Register from = rdi; // source array address 1488 const Register to = rsi; // destination array address 1489 const Register count = rdx; // elements count 1490 const Register byte_count = rcx; 1491 const Register qword_count = count; 1492 const Register end_from = from; // source array end address 1493 const Register end_to = to; // destination array end address 1494 // End pointers are inclusive, and if count is not zero they point 1495 // to the last unit copied: end_to[0] := end_from[0] 1496 1497 __ enter(); // required for proper stackwalking of RuntimeStub frame 1498 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1499 1500 if (entry != NULL) { 1501 *entry = __ pc(); 1502 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1503 BLOCK_COMMENT("Entry:"); 1504 } 1505 1506 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1507 // r9 and r10 may be used to save non-volatile registers 1508 1509 // 'from', 'to' and 'count' are now valid 1510 __ movptr(byte_count, count); 1511 __ shrptr(count, 3); // count => qword_count 1512 1513 // Copy from low to high addresses. Use 'to' as scratch. 1514 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1515 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1516 __ negptr(qword_count); // make the count negative 1517 __ jmp(L_copy_bytes); 1518 1519 // Copy trailing qwords 1520 __ BIND(L_copy_8_bytes); 1521 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1522 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1523 __ increment(qword_count); 1524 __ jcc(Assembler::notZero, L_copy_8_bytes); 1525 1526 // Check for and copy trailing dword 1527 __ BIND(L_copy_4_bytes); 1528 __ testl(byte_count, 4); 1529 __ jccb(Assembler::zero, L_copy_2_bytes); 1530 __ movl(rax, Address(end_from, 8)); 1531 __ movl(Address(end_to, 8), rax); 1532 1533 __ addptr(end_from, 4); 1534 __ addptr(end_to, 4); 1535 1536 // Check for and copy trailing word 1537 __ BIND(L_copy_2_bytes); 1538 __ testl(byte_count, 2); 1539 __ jccb(Assembler::zero, L_copy_byte); 1540 __ movw(rax, Address(end_from, 8)); 1541 __ movw(Address(end_to, 8), rax); 1542 1543 __ addptr(end_from, 2); 1544 __ addptr(end_to, 2); 1545 1546 // Check for and copy trailing byte 1547 __ BIND(L_copy_byte); 1548 __ testl(byte_count, 1); 1549 __ jccb(Assembler::zero, L_exit); 1550 __ movb(rax, Address(end_from, 8)); 1551 __ movb(Address(end_to, 8), rax); 1552 1553 __ BIND(L_exit); 1554 restore_arg_regs(); 1555 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1556 __ xorptr(rax, rax); // return 0 1557 __ leave(); // required for proper stackwalking of RuntimeStub frame 1558 __ ret(0); 1559 1560 // Copy in multi-bytes chunks 1561 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1562 __ jmp(L_copy_4_bytes); 1563 1564 return start; 1565 } 1566 1567 // Arguments: 1568 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1569 // ignored 1570 // name - stub name string 1571 // 1572 // Inputs: 1573 // c_rarg0 - source array address 1574 // c_rarg1 - destination array address 1575 // c_rarg2 - element count, treated as ssize_t, can be zero 1576 // 1577 // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries, 1578 // we let the hardware handle it. The one to eight bytes within words, 1579 // dwords or qwords that span cache line boundaries will still be loaded 1580 // and stored atomically. 1581 // 1582 address generate_conjoint_byte_copy(bool aligned, address nooverlap_target, 1583 address* entry, const char *name) { 1584 __ align(CodeEntryAlignment); 1585 StubCodeMark mark(this, "StubRoutines", name); 1586 address start = __ pc(); 1587 1588 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes; 1589 const Register from = rdi; // source array address 1590 const Register to = rsi; // destination array address 1591 const Register count = rdx; // elements count 1592 const Register byte_count = rcx; 1593 const Register qword_count = count; 1594 1595 __ enter(); // required for proper stackwalking of RuntimeStub frame 1596 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1597 1598 if (entry != NULL) { 1599 *entry = __ pc(); 1600 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1601 BLOCK_COMMENT("Entry:"); 1602 } 1603 1604 array_overlap_test(nooverlap_target, Address::times_1); 1605 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1606 // r9 and r10 may be used to save non-volatile registers 1607 1608 // 'from', 'to' and 'count' are now valid 1609 __ movptr(byte_count, count); 1610 __ shrptr(count, 3); // count => qword_count 1611 1612 // Copy from high to low addresses. 1613 1614 // Check for and copy trailing byte 1615 __ testl(byte_count, 1); 1616 __ jcc(Assembler::zero, L_copy_2_bytes); 1617 __ movb(rax, Address(from, byte_count, Address::times_1, -1)); 1618 __ movb(Address(to, byte_count, Address::times_1, -1), rax); 1619 __ decrement(byte_count); // Adjust for possible trailing word 1620 1621 // Check for and copy trailing word 1622 __ BIND(L_copy_2_bytes); 1623 __ testl(byte_count, 2); 1624 __ jcc(Assembler::zero, L_copy_4_bytes); 1625 __ movw(rax, Address(from, byte_count, Address::times_1, -2)); 1626 __ movw(Address(to, byte_count, Address::times_1, -2), rax); 1627 1628 // Check for and copy trailing dword 1629 __ BIND(L_copy_4_bytes); 1630 __ testl(byte_count, 4); 1631 __ jcc(Assembler::zero, L_copy_bytes); 1632 __ movl(rax, Address(from, qword_count, Address::times_8)); 1633 __ movl(Address(to, qword_count, Address::times_8), rax); 1634 __ jmp(L_copy_bytes); 1635 1636 // Copy trailing qwords 1637 __ BIND(L_copy_8_bytes); 1638 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 1639 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 1640 __ decrement(qword_count); 1641 __ jcc(Assembler::notZero, L_copy_8_bytes); 1642 1643 restore_arg_regs(); 1644 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1645 __ xorptr(rax, rax); // return 0 1646 __ leave(); // required for proper stackwalking of RuntimeStub frame 1647 __ ret(0); 1648 1649 // Copy in multi-bytes chunks 1650 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1651 1652 restore_arg_regs(); 1653 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1654 __ xorptr(rax, rax); // return 0 1655 __ leave(); // required for proper stackwalking of RuntimeStub frame 1656 __ ret(0); 1657 1658 return start; 1659 } 1660 1661 // Arguments: 1662 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1663 // ignored 1664 // name - stub name string 1665 // 1666 // Inputs: 1667 // c_rarg0 - source array address 1668 // c_rarg1 - destination array address 1669 // c_rarg2 - element count, treated as ssize_t, can be zero 1670 // 1671 // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we 1672 // let the hardware handle it. The two or four words within dwords 1673 // or qwords that span cache line boundaries will still be loaded 1674 // and stored atomically. 1675 // 1676 // Side Effects: 1677 // disjoint_short_copy_entry is set to the no-overlap entry point 1678 // used by generate_conjoint_short_copy(). 1679 // 1680 address generate_disjoint_short_copy(bool aligned, address *entry, const char *name) { 1681 __ align(CodeEntryAlignment); 1682 StubCodeMark mark(this, "StubRoutines", name); 1683 address start = __ pc(); 1684 1685 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes,L_copy_2_bytes,L_exit; 1686 const Register from = rdi; // source array address 1687 const Register to = rsi; // destination array address 1688 const Register count = rdx; // elements count 1689 const Register word_count = rcx; 1690 const Register qword_count = count; 1691 const Register end_from = from; // source array end address 1692 const Register end_to = to; // destination array end address 1693 // End pointers are inclusive, and if count is not zero they point 1694 // to the last unit copied: end_to[0] := end_from[0] 1695 1696 __ enter(); // required for proper stackwalking of RuntimeStub frame 1697 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1698 1699 if (entry != NULL) { 1700 *entry = __ pc(); 1701 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1702 BLOCK_COMMENT("Entry:"); 1703 } 1704 1705 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1706 // r9 and r10 may be used to save non-volatile registers 1707 1708 // 'from', 'to' and 'count' are now valid 1709 __ movptr(word_count, count); 1710 __ shrptr(count, 2); // count => qword_count 1711 1712 // Copy from low to high addresses. Use 'to' as scratch. 1713 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1714 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1715 __ negptr(qword_count); 1716 __ jmp(L_copy_bytes); 1717 1718 // Copy trailing qwords 1719 __ BIND(L_copy_8_bytes); 1720 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1721 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1722 __ increment(qword_count); 1723 __ jcc(Assembler::notZero, L_copy_8_bytes); 1724 1725 // Original 'dest' is trashed, so we can't use it as a 1726 // base register for a possible trailing word copy 1727 1728 // Check for and copy trailing dword 1729 __ BIND(L_copy_4_bytes); 1730 __ testl(word_count, 2); 1731 __ jccb(Assembler::zero, L_copy_2_bytes); 1732 __ movl(rax, Address(end_from, 8)); 1733 __ movl(Address(end_to, 8), rax); 1734 1735 __ addptr(end_from, 4); 1736 __ addptr(end_to, 4); 1737 1738 // Check for and copy trailing word 1739 __ BIND(L_copy_2_bytes); 1740 __ testl(word_count, 1); 1741 __ jccb(Assembler::zero, L_exit); 1742 __ movw(rax, Address(end_from, 8)); 1743 __ movw(Address(end_to, 8), rax); 1744 1745 __ BIND(L_exit); 1746 restore_arg_regs(); 1747 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1748 __ xorptr(rax, rax); // return 0 1749 __ leave(); // required for proper stackwalking of RuntimeStub frame 1750 __ ret(0); 1751 1752 // Copy in multi-bytes chunks 1753 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1754 __ jmp(L_copy_4_bytes); 1755 1756 return start; 1757 } 1758 1759 address generate_fill(BasicType t, bool aligned, const char *name) { 1760 __ align(CodeEntryAlignment); 1761 StubCodeMark mark(this, "StubRoutines", name); 1762 address start = __ pc(); 1763 1764 BLOCK_COMMENT("Entry:"); 1765 1766 const Register to = c_rarg0; // source array address 1767 const Register value = c_rarg1; // value 1768 const Register count = c_rarg2; // elements count 1769 1770 __ enter(); // required for proper stackwalking of RuntimeStub frame 1771 1772 __ generate_fill(t, aligned, to, value, count, rax, xmm0); 1773 1774 __ leave(); // required for proper stackwalking of RuntimeStub frame 1775 __ ret(0); 1776 return start; 1777 } 1778 1779 // Arguments: 1780 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1781 // ignored 1782 // name - stub name string 1783 // 1784 // Inputs: 1785 // c_rarg0 - source array address 1786 // c_rarg1 - destination array address 1787 // c_rarg2 - element count, treated as ssize_t, can be zero 1788 // 1789 // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we 1790 // let the hardware handle it. The two or four words within dwords 1791 // or qwords that span cache line boundaries will still be loaded 1792 // and stored atomically. 1793 // 1794 address generate_conjoint_short_copy(bool aligned, address nooverlap_target, 1795 address *entry, const char *name) { 1796 __ align(CodeEntryAlignment); 1797 StubCodeMark mark(this, "StubRoutines", name); 1798 address start = __ pc(); 1799 1800 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes; 1801 const Register from = rdi; // source array address 1802 const Register to = rsi; // destination array address 1803 const Register count = rdx; // elements count 1804 const Register word_count = rcx; 1805 const Register qword_count = count; 1806 1807 __ enter(); // required for proper stackwalking of RuntimeStub frame 1808 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1809 1810 if (entry != NULL) { 1811 *entry = __ pc(); 1812 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1813 BLOCK_COMMENT("Entry:"); 1814 } 1815 1816 array_overlap_test(nooverlap_target, Address::times_2); 1817 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1818 // r9 and r10 may be used to save non-volatile registers 1819 1820 // 'from', 'to' and 'count' are now valid 1821 __ movptr(word_count, count); 1822 __ shrptr(count, 2); // count => qword_count 1823 1824 // Copy from high to low addresses. Use 'to' as scratch. 1825 1826 // Check for and copy trailing word 1827 __ testl(word_count, 1); 1828 __ jccb(Assembler::zero, L_copy_4_bytes); 1829 __ movw(rax, Address(from, word_count, Address::times_2, -2)); 1830 __ movw(Address(to, word_count, Address::times_2, -2), rax); 1831 1832 // Check for and copy trailing dword 1833 __ BIND(L_copy_4_bytes); 1834 __ testl(word_count, 2); 1835 __ jcc(Assembler::zero, L_copy_bytes); 1836 __ movl(rax, Address(from, qword_count, Address::times_8)); 1837 __ movl(Address(to, qword_count, Address::times_8), rax); 1838 __ jmp(L_copy_bytes); 1839 1840 // Copy trailing qwords 1841 __ BIND(L_copy_8_bytes); 1842 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 1843 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 1844 __ decrement(qword_count); 1845 __ jcc(Assembler::notZero, L_copy_8_bytes); 1846 1847 restore_arg_regs(); 1848 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1849 __ xorptr(rax, rax); // return 0 1850 __ leave(); // required for proper stackwalking of RuntimeStub frame 1851 __ ret(0); 1852 1853 // Copy in multi-bytes chunks 1854 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1855 1856 restore_arg_regs(); 1857 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1858 __ xorptr(rax, rax); // return 0 1859 __ leave(); // required for proper stackwalking of RuntimeStub frame 1860 __ ret(0); 1861 1862 return start; 1863 } 1864 1865 // Arguments: 1866 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1867 // ignored 1868 // is_oop - true => oop array, so generate store check code 1869 // name - stub name string 1870 // 1871 // Inputs: 1872 // c_rarg0 - source array address 1873 // c_rarg1 - destination array address 1874 // c_rarg2 - element count, treated as ssize_t, can be zero 1875 // 1876 // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let 1877 // the hardware handle it. The two dwords within qwords that span 1878 // cache line boundaries will still be loaded and stored atomicly. 1879 // 1880 // Side Effects: 1881 // disjoint_int_copy_entry is set to the no-overlap entry point 1882 // used by generate_conjoint_int_oop_copy(). 1883 // 1884 address generate_disjoint_int_oop_copy(bool aligned, bool is_oop, address* entry, 1885 const char *name, bool dest_uninitialized = false) { 1886 __ align(CodeEntryAlignment); 1887 StubCodeMark mark(this, "StubRoutines", name); 1888 address start = __ pc(); 1889 1890 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_exit; 1891 const Register from = rdi; // source array address 1892 const Register to = rsi; // destination array address 1893 const Register count = rdx; // elements count 1894 const Register dword_count = rcx; 1895 const Register qword_count = count; 1896 const Register end_from = from; // source array end address 1897 const Register end_to = to; // destination array end address 1898 const Register saved_to = r11; // saved destination array address 1899 // End pointers are inclusive, and if count is not zero they point 1900 // to the last unit copied: end_to[0] := end_from[0] 1901 1902 __ enter(); // required for proper stackwalking of RuntimeStub frame 1903 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1904 1905 if (entry != NULL) { 1906 *entry = __ pc(); 1907 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1908 BLOCK_COMMENT("Entry:"); 1909 } 1910 1911 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1912 // r9 and r10 may be used to save non-volatile registers 1913 if (is_oop) { 1914 __ movq(saved_to, to); 1915 gen_write_ref_array_pre_barrier(to, count, dest_uninitialized); 1916 } 1917 1918 // 'from', 'to' and 'count' are now valid 1919 __ movptr(dword_count, count); 1920 __ shrptr(count, 1); // count => qword_count 1921 1922 // Copy from low to high addresses. Use 'to' as scratch. 1923 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1924 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1925 __ negptr(qword_count); 1926 __ jmp(L_copy_bytes); 1927 1928 // Copy trailing qwords 1929 __ BIND(L_copy_8_bytes); 1930 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1931 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1932 __ increment(qword_count); 1933 __ jcc(Assembler::notZero, L_copy_8_bytes); 1934 1935 // Check for and copy trailing dword 1936 __ BIND(L_copy_4_bytes); 1937 __ testl(dword_count, 1); // Only byte test since the value is 0 or 1 1938 __ jccb(Assembler::zero, L_exit); 1939 __ movl(rax, Address(end_from, 8)); 1940 __ movl(Address(end_to, 8), rax); 1941 1942 __ BIND(L_exit); 1943 if (is_oop) { 1944 gen_write_ref_array_post_barrier(saved_to, dword_count, rax); 1945 } 1946 restore_arg_regs(); 1947 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 1948 __ xorptr(rax, rax); // return 0 1949 __ leave(); // required for proper stackwalking of RuntimeStub frame 1950 __ ret(0); 1951 1952 // Copy in multi-bytes chunks 1953 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1954 __ jmp(L_copy_4_bytes); 1955 1956 return start; 1957 } 1958 1959 // Arguments: 1960 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1961 // ignored 1962 // is_oop - true => oop array, so generate store check code 1963 // name - stub name string 1964 // 1965 // Inputs: 1966 // c_rarg0 - source array address 1967 // c_rarg1 - destination array address 1968 // c_rarg2 - element count, treated as ssize_t, can be zero 1969 // 1970 // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let 1971 // the hardware handle it. The two dwords within qwords that span 1972 // cache line boundaries will still be loaded and stored atomicly. 1973 // 1974 address generate_conjoint_int_oop_copy(bool aligned, bool is_oop, address nooverlap_target, 1975 address *entry, const char *name, 1976 bool dest_uninitialized = false) { 1977 __ align(CodeEntryAlignment); 1978 StubCodeMark mark(this, "StubRoutines", name); 1979 address start = __ pc(); 1980 1981 Label L_copy_bytes, L_copy_8_bytes, L_copy_2_bytes, L_exit; 1982 const Register from = rdi; // source array address 1983 const Register to = rsi; // destination array address 1984 const Register count = rdx; // elements count 1985 const Register dword_count = rcx; 1986 const Register qword_count = count; 1987 1988 __ enter(); // required for proper stackwalking of RuntimeStub frame 1989 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1990 1991 if (entry != NULL) { 1992 *entry = __ pc(); 1993 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1994 BLOCK_COMMENT("Entry:"); 1995 } 1996 1997 array_overlap_test(nooverlap_target, Address::times_4); 1998 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1999 // r9 and r10 may be used to save non-volatile registers 2000 2001 if (is_oop) { 2002 // no registers are destroyed by this call 2003 gen_write_ref_array_pre_barrier(to, count, dest_uninitialized); 2004 } 2005 2006 assert_clean_int(count, rax); // Make sure 'count' is clean int. 2007 // 'from', 'to' and 'count' are now valid 2008 __ movptr(dword_count, count); 2009 __ shrptr(count, 1); // count => qword_count 2010 2011 // Copy from high to low addresses. Use 'to' as scratch. 2012 2013 // Check for and copy trailing dword 2014 __ testl(dword_count, 1); 2015 __ jcc(Assembler::zero, L_copy_bytes); 2016 __ movl(rax, Address(from, dword_count, Address::times_4, -4)); 2017 __ movl(Address(to, dword_count, Address::times_4, -4), rax); 2018 __ jmp(L_copy_bytes); 2019 2020 // Copy trailing qwords 2021 __ BIND(L_copy_8_bytes); 2022 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 2023 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 2024 __ decrement(qword_count); 2025 __ jcc(Assembler::notZero, L_copy_8_bytes); 2026 2027 if (is_oop) { 2028 __ jmp(L_exit); 2029 } 2030 restore_arg_regs(); 2031 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 2032 __ xorptr(rax, rax); // return 0 2033 __ leave(); // required for proper stackwalking of RuntimeStub frame 2034 __ ret(0); 2035 2036 // Copy in multi-bytes chunks 2037 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2038 2039 __ BIND(L_exit); 2040 if (is_oop) { 2041 gen_write_ref_array_post_barrier(to, dword_count, rax); 2042 } 2043 restore_arg_regs(); 2044 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 2045 __ xorptr(rax, rax); // return 0 2046 __ leave(); // required for proper stackwalking of RuntimeStub frame 2047 __ ret(0); 2048 2049 return start; 2050 } 2051 2052 // Arguments: 2053 // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes 2054 // ignored 2055 // is_oop - true => oop array, so generate store check code 2056 // name - stub name string 2057 // 2058 // Inputs: 2059 // c_rarg0 - source array address 2060 // c_rarg1 - destination array address 2061 // c_rarg2 - element count, treated as ssize_t, can be zero 2062 // 2063 // Side Effects: 2064 // disjoint_oop_copy_entry or disjoint_long_copy_entry is set to the 2065 // no-overlap entry point used by generate_conjoint_long_oop_copy(). 2066 // 2067 address generate_disjoint_long_oop_copy(bool aligned, bool is_oop, address *entry, 2068 const char *name, bool dest_uninitialized = false) { 2069 __ align(CodeEntryAlignment); 2070 StubCodeMark mark(this, "StubRoutines", name); 2071 address start = __ pc(); 2072 2073 Label L_copy_bytes, L_copy_8_bytes, L_exit; 2074 const Register from = rdi; // source array address 2075 const Register to = rsi; // destination array address 2076 const Register qword_count = rdx; // elements count 2077 const Register end_from = from; // source array end address 2078 const Register end_to = rcx; // destination array end address 2079 const Register saved_to = to; 2080 const Register saved_count = r11; 2081 // End pointers are inclusive, and if count is not zero they point 2082 // to the last unit copied: end_to[0] := end_from[0] 2083 2084 __ enter(); // required for proper stackwalking of RuntimeStub frame 2085 // Save no-overlap entry point for generate_conjoint_long_oop_copy() 2086 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 2087 2088 if (entry != NULL) { 2089 *entry = __ pc(); 2090 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2091 BLOCK_COMMENT("Entry:"); 2092 } 2093 2094 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 2095 // r9 and r10 may be used to save non-volatile registers 2096 // 'from', 'to' and 'qword_count' are now valid 2097 if (is_oop) { 2098 // Save to and count for store barrier 2099 __ movptr(saved_count, qword_count); 2100 // no registers are destroyed by this call 2101 gen_write_ref_array_pre_barrier(to, qword_count, dest_uninitialized); 2102 } 2103 2104 // Copy from low to high addresses. Use 'to' as scratch. 2105 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 2106 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 2107 __ negptr(qword_count); 2108 __ jmp(L_copy_bytes); 2109 2110 // Copy trailing qwords 2111 __ BIND(L_copy_8_bytes); 2112 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 2113 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 2114 __ increment(qword_count); 2115 __ jcc(Assembler::notZero, L_copy_8_bytes); 2116 2117 if (is_oop) { 2118 __ jmp(L_exit); 2119 } else { 2120 restore_arg_regs(); 2121 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2122 __ xorptr(rax, rax); // return 0 2123 __ leave(); // required for proper stackwalking of RuntimeStub frame 2124 __ ret(0); 2125 } 2126 2127 // Copy in multi-bytes chunks 2128 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2129 2130 if (is_oop) { 2131 __ BIND(L_exit); 2132 gen_write_ref_array_post_barrier(saved_to, saved_count, rax); 2133 } 2134 restore_arg_regs(); 2135 if (is_oop) { 2136 inc_counter_np(SharedRuntime::_oop_array_copy_ctr); // Update counter after rscratch1 is free 2137 } else { 2138 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2139 } 2140 __ xorptr(rax, rax); // return 0 2141 __ leave(); // required for proper stackwalking of RuntimeStub frame 2142 __ ret(0); 2143 2144 return start; 2145 } 2146 2147 // Arguments: 2148 // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes 2149 // ignored 2150 // is_oop - true => oop array, so generate store check code 2151 // name - stub name string 2152 // 2153 // Inputs: 2154 // c_rarg0 - source array address 2155 // c_rarg1 - destination array address 2156 // c_rarg2 - element count, treated as ssize_t, can be zero 2157 // 2158 address generate_conjoint_long_oop_copy(bool aligned, bool is_oop, 2159 address nooverlap_target, address *entry, 2160 const char *name, bool dest_uninitialized = false) { 2161 __ align(CodeEntryAlignment); 2162 StubCodeMark mark(this, "StubRoutines", name); 2163 address start = __ pc(); 2164 2165 Label L_copy_bytes, L_copy_8_bytes, L_exit; 2166 const Register from = rdi; // source array address 2167 const Register to = rsi; // destination array address 2168 const Register qword_count = rdx; // elements count 2169 const Register saved_count = rcx; 2170 2171 __ enter(); // required for proper stackwalking of RuntimeStub frame 2172 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 2173 2174 if (entry != NULL) { 2175 *entry = __ pc(); 2176 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2177 BLOCK_COMMENT("Entry:"); 2178 } 2179 2180 array_overlap_test(nooverlap_target, Address::times_8); 2181 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 2182 // r9 and r10 may be used to save non-volatile registers 2183 // 'from', 'to' and 'qword_count' are now valid 2184 if (is_oop) { 2185 // Save to and count for store barrier 2186 __ movptr(saved_count, qword_count); 2187 // No registers are destroyed by this call 2188 gen_write_ref_array_pre_barrier(to, saved_count, dest_uninitialized); 2189 } 2190 2191 __ jmp(L_copy_bytes); 2192 2193 // Copy trailing qwords 2194 __ BIND(L_copy_8_bytes); 2195 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 2196 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 2197 __ decrement(qword_count); 2198 __ jcc(Assembler::notZero, L_copy_8_bytes); 2199 2200 if (is_oop) { 2201 __ jmp(L_exit); 2202 } else { 2203 restore_arg_regs(); 2204 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2205 __ xorptr(rax, rax); // return 0 2206 __ leave(); // required for proper stackwalking of RuntimeStub frame 2207 __ ret(0); 2208 } 2209 2210 // Copy in multi-bytes chunks 2211 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2212 2213 if (is_oop) { 2214 __ BIND(L_exit); 2215 gen_write_ref_array_post_barrier(to, saved_count, rax); 2216 } 2217 restore_arg_regs(); 2218 if (is_oop) { 2219 inc_counter_np(SharedRuntime::_oop_array_copy_ctr); // Update counter after rscratch1 is free 2220 } else { 2221 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2222 } 2223 __ xorptr(rax, rax); // return 0 2224 __ leave(); // required for proper stackwalking of RuntimeStub frame 2225 __ ret(0); 2226 2227 return start; 2228 } 2229 2230 2231 // Helper for generating a dynamic type check. 2232 // Smashes no registers. 2233 void generate_type_check(Register sub_klass, 2234 Register super_check_offset, 2235 Register super_klass, 2236 Label& L_success) { 2237 assert_different_registers(sub_klass, super_check_offset, super_klass); 2238 2239 BLOCK_COMMENT("type_check:"); 2240 2241 Label L_miss; 2242 2243 __ check_klass_subtype_fast_path(sub_klass, super_klass, noreg, &L_success, &L_miss, NULL, 2244 super_check_offset); 2245 __ check_klass_subtype_slow_path(sub_klass, super_klass, noreg, noreg, &L_success, NULL); 2246 2247 // Fall through on failure! 2248 __ BIND(L_miss); 2249 } 2250 2251 // 2252 // Generate checkcasting array copy stub 2253 // 2254 // Input: 2255 // c_rarg0 - source array address 2256 // c_rarg1 - destination array address 2257 // c_rarg2 - element count, treated as ssize_t, can be zero 2258 // c_rarg3 - size_t ckoff (super_check_offset) 2259 // not Win64 2260 // c_rarg4 - oop ckval (super_klass) 2261 // Win64 2262 // rsp+40 - oop ckval (super_klass) 2263 // 2264 // Output: 2265 // rax == 0 - success 2266 // rax == -1^K - failure, where K is partial transfer count 2267 // 2268 address generate_checkcast_copy(const char *name, address *entry, 2269 bool dest_uninitialized = false) { 2270 2271 Label L_load_element, L_store_element, L_do_card_marks, L_done; 2272 2273 // Input registers (after setup_arg_regs) 2274 const Register from = rdi; // source array address 2275 const Register to = rsi; // destination array address 2276 const Register length = rdx; // elements count 2277 const Register ckoff = rcx; // super_check_offset 2278 const Register ckval = r8; // super_klass 2279 2280 // Registers used as temps (r13, r14 are save-on-entry) 2281 const Register end_from = from; // source array end address 2282 const Register end_to = r13; // destination array end address 2283 const Register count = rdx; // -(count_remaining) 2284 const Register r14_length = r14; // saved copy of length 2285 // End pointers are inclusive, and if length is not zero they point 2286 // to the last unit copied: end_to[0] := end_from[0] 2287 2288 const Register rax_oop = rax; // actual oop copied 2289 const Register r11_klass = r11; // oop._klass 2290 2291 //--------------------------------------------------------------- 2292 // Assembler stub will be used for this call to arraycopy 2293 // if the two arrays are subtypes of Object[] but the 2294 // destination array type is not equal to or a supertype 2295 // of the source type. Each element must be separately 2296 // checked. 2297 2298 __ align(CodeEntryAlignment); 2299 StubCodeMark mark(this, "StubRoutines", name); 2300 address start = __ pc(); 2301 2302 __ enter(); // required for proper stackwalking of RuntimeStub frame 2303 2304 #ifdef ASSERT 2305 // caller guarantees that the arrays really are different 2306 // otherwise, we would have to make conjoint checks 2307 { Label L; 2308 array_overlap_test(L, TIMES_OOP); 2309 __ stop("checkcast_copy within a single array"); 2310 __ bind(L); 2311 } 2312 #endif //ASSERT 2313 2314 setup_arg_regs(4); // from => rdi, to => rsi, length => rdx 2315 // ckoff => rcx, ckval => r8 2316 // r9 and r10 may be used to save non-volatile registers 2317 #ifdef _WIN64 2318 // last argument (#4) is on stack on Win64 2319 __ movptr(ckval, Address(rsp, 6 * wordSize)); 2320 #endif 2321 2322 // Caller of this entry point must set up the argument registers. 2323 if (entry != NULL) { 2324 *entry = __ pc(); 2325 BLOCK_COMMENT("Entry:"); 2326 } 2327 2328 // allocate spill slots for r13, r14 2329 enum { 2330 saved_r13_offset, 2331 saved_r14_offset, 2332 saved_rbp_offset 2333 }; 2334 __ subptr(rsp, saved_rbp_offset * wordSize); 2335 __ movptr(Address(rsp, saved_r13_offset * wordSize), r13); 2336 __ movptr(Address(rsp, saved_r14_offset * wordSize), r14); 2337 2338 // check that int operands are properly extended to size_t 2339 assert_clean_int(length, rax); 2340 assert_clean_int(ckoff, rax); 2341 2342 #ifdef ASSERT 2343 BLOCK_COMMENT("assert consistent ckoff/ckval"); 2344 // The ckoff and ckval must be mutually consistent, 2345 // even though caller generates both. 2346 { Label L; 2347 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2348 __ cmpl(ckoff, Address(ckval, sco_offset)); 2349 __ jcc(Assembler::equal, L); 2350 __ stop("super_check_offset inconsistent"); 2351 __ bind(L); 2352 } 2353 #endif //ASSERT 2354 2355 // Loop-invariant addresses. They are exclusive end pointers. 2356 Address end_from_addr(from, length, TIMES_OOP, 0); 2357 Address end_to_addr(to, length, TIMES_OOP, 0); 2358 // Loop-variant addresses. They assume post-incremented count < 0. 2359 Address from_element_addr(end_from, count, TIMES_OOP, 0); 2360 Address to_element_addr(end_to, count, TIMES_OOP, 0); 2361 2362 gen_write_ref_array_pre_barrier(to, count, dest_uninitialized); 2363 2364 // Copy from low to high addresses, indexed from the end of each array. 2365 __ lea(end_from, end_from_addr); 2366 __ lea(end_to, end_to_addr); 2367 __ movptr(r14_length, length); // save a copy of the length 2368 assert(length == count, ""); // else fix next line: 2369 __ negptr(count); // negate and test the length 2370 __ jcc(Assembler::notZero, L_load_element); 2371 2372 // Empty array: Nothing to do. 2373 __ xorptr(rax, rax); // return 0 on (trivial) success 2374 __ jmp(L_done); 2375 2376 // ======== begin loop ======== 2377 // (Loop is rotated; its entry is L_load_element.) 2378 // Loop control: 2379 // for (count = -count; count != 0; count++) 2380 // Base pointers src, dst are biased by 8*(count-1),to last element. 2381 __ align(OptoLoopAlignment); 2382 2383 __ BIND(L_store_element); 2384 __ store_heap_oop(to_element_addr, rax_oop); // store the oop 2385 __ increment(count); // increment the count toward zero 2386 __ jcc(Assembler::zero, L_do_card_marks); 2387 2388 // ======== loop entry is here ======== 2389 __ BIND(L_load_element); 2390 __ load_heap_oop(rax_oop, from_element_addr); // load the oop 2391 __ testptr(rax_oop, rax_oop); 2392 __ jcc(Assembler::zero, L_store_element); 2393 2394 __ load_klass(r11_klass, rax_oop);// query the object klass 2395 generate_type_check(r11_klass, ckoff, ckval, L_store_element); 2396 // ======== end loop ======== 2397 2398 // It was a real error; we must depend on the caller to finish the job. 2399 // Register rdx = -1 * number of *remaining* oops, r14 = *total* oops. 2400 // Emit GC store barriers for the oops we have copied (r14 + rdx), 2401 // and report their number to the caller. 2402 assert_different_registers(rax, r14_length, count, to, end_to, rcx, rscratch1); 2403 Label L_post_barrier; 2404 __ addptr(r14_length, count); // K = (original - remaining) oops 2405 __ movptr(rax, r14_length); // save the value 2406 __ notptr(rax); // report (-1^K) to caller (does not affect flags) 2407 __ jccb(Assembler::notZero, L_post_barrier); 2408 __ jmp(L_done); // K == 0, nothing was copied, skip post barrier 2409 2410 // Come here on success only. 2411 __ BIND(L_do_card_marks); 2412 __ xorptr(rax, rax); // return 0 on success 2413 2414 __ BIND(L_post_barrier); 2415 gen_write_ref_array_post_barrier(to, r14_length, rscratch1); 2416 2417 // Common exit point (success or failure). 2418 __ BIND(L_done); 2419 __ movptr(r13, Address(rsp, saved_r13_offset * wordSize)); 2420 __ movptr(r14, Address(rsp, saved_r14_offset * wordSize)); 2421 restore_arg_regs(); 2422 inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr); // Update counter after rscratch1 is free 2423 __ leave(); // required for proper stackwalking of RuntimeStub frame 2424 __ ret(0); 2425 2426 return start; 2427 } 2428 2429 // 2430 // Generate 'unsafe' array copy stub 2431 // Though just as safe as the other stubs, it takes an unscaled 2432 // size_t argument instead of an element count. 2433 // 2434 // Input: 2435 // c_rarg0 - source array address 2436 // c_rarg1 - destination array address 2437 // c_rarg2 - byte count, treated as ssize_t, can be zero 2438 // 2439 // Examines the alignment of the operands and dispatches 2440 // to a long, int, short, or byte copy loop. 2441 // 2442 address generate_unsafe_copy(const char *name, 2443 address byte_copy_entry, address short_copy_entry, 2444 address int_copy_entry, address long_copy_entry) { 2445 2446 Label L_long_aligned, L_int_aligned, L_short_aligned; 2447 2448 // Input registers (before setup_arg_regs) 2449 const Register from = c_rarg0; // source array address 2450 const Register to = c_rarg1; // destination array address 2451 const Register size = c_rarg2; // byte count (size_t) 2452 2453 // Register used as a temp 2454 const Register bits = rax; // test copy of low bits 2455 2456 __ align(CodeEntryAlignment); 2457 StubCodeMark mark(this, "StubRoutines", name); 2458 address start = __ pc(); 2459 2460 __ enter(); // required for proper stackwalking of RuntimeStub frame 2461 2462 // bump this on entry, not on exit: 2463 inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr); 2464 2465 __ mov(bits, from); 2466 __ orptr(bits, to); 2467 __ orptr(bits, size); 2468 2469 __ testb(bits, BytesPerLong-1); 2470 __ jccb(Assembler::zero, L_long_aligned); 2471 2472 __ testb(bits, BytesPerInt-1); 2473 __ jccb(Assembler::zero, L_int_aligned); 2474 2475 __ testb(bits, BytesPerShort-1); 2476 __ jump_cc(Assembler::notZero, RuntimeAddress(byte_copy_entry)); 2477 2478 __ BIND(L_short_aligned); 2479 __ shrptr(size, LogBytesPerShort); // size => short_count 2480 __ jump(RuntimeAddress(short_copy_entry)); 2481 2482 __ BIND(L_int_aligned); 2483 __ shrptr(size, LogBytesPerInt); // size => int_count 2484 __ jump(RuntimeAddress(int_copy_entry)); 2485 2486 __ BIND(L_long_aligned); 2487 __ shrptr(size, LogBytesPerLong); // size => qword_count 2488 __ jump(RuntimeAddress(long_copy_entry)); 2489 2490 return start; 2491 } 2492 2493 // Perform range checks on the proposed arraycopy. 2494 // Kills temp, but nothing else. 2495 // Also, clean the sign bits of src_pos and dst_pos. 2496 void arraycopy_range_checks(Register src, // source array oop (c_rarg0) 2497 Register src_pos, // source position (c_rarg1) 2498 Register dst, // destination array oo (c_rarg2) 2499 Register dst_pos, // destination position (c_rarg3) 2500 Register length, 2501 Register temp, 2502 Label& L_failed) { 2503 BLOCK_COMMENT("arraycopy_range_checks:"); 2504 2505 // if (src_pos + length > arrayOop(src)->length()) FAIL; 2506 __ movl(temp, length); 2507 __ addl(temp, src_pos); // src_pos + length 2508 __ cmpl(temp, Address(src, arrayOopDesc::length_offset_in_bytes())); 2509 __ jcc(Assembler::above, L_failed); 2510 2511 // if (dst_pos + length > arrayOop(dst)->length()) FAIL; 2512 __ movl(temp, length); 2513 __ addl(temp, dst_pos); // dst_pos + length 2514 __ cmpl(temp, Address(dst, arrayOopDesc::length_offset_in_bytes())); 2515 __ jcc(Assembler::above, L_failed); 2516 2517 // Have to clean up high 32-bits of 'src_pos' and 'dst_pos'. 2518 // Move with sign extension can be used since they are positive. 2519 __ movslq(src_pos, src_pos); 2520 __ movslq(dst_pos, dst_pos); 2521 2522 BLOCK_COMMENT("arraycopy_range_checks done"); 2523 } 2524 2525 // 2526 // Generate generic array copy stubs 2527 // 2528 // Input: 2529 // c_rarg0 - src oop 2530 // c_rarg1 - src_pos (32-bits) 2531 // c_rarg2 - dst oop 2532 // c_rarg3 - dst_pos (32-bits) 2533 // not Win64 2534 // c_rarg4 - element count (32-bits) 2535 // Win64 2536 // rsp+40 - element count (32-bits) 2537 // 2538 // Output: 2539 // rax == 0 - success 2540 // rax == -1^K - failure, where K is partial transfer count 2541 // 2542 address generate_generic_copy(const char *name, 2543 address byte_copy_entry, address short_copy_entry, 2544 address int_copy_entry, address oop_copy_entry, 2545 address long_copy_entry, address checkcast_copy_entry) { 2546 2547 Label L_failed, L_failed_0, L_objArray; 2548 Label L_copy_bytes, L_copy_shorts, L_copy_ints, L_copy_longs; 2549 2550 // Input registers 2551 const Register src = c_rarg0; // source array oop 2552 const Register src_pos = c_rarg1; // source position 2553 const Register dst = c_rarg2; // destination array oop 2554 const Register dst_pos = c_rarg3; // destination position 2555 #ifndef _WIN64 2556 const Register length = c_rarg4; 2557 #else 2558 const Address length(rsp, 6 * wordSize); // elements count is on stack on Win64 2559 #endif 2560 2561 { int modulus = CodeEntryAlignment; 2562 int target = modulus - 5; // 5 = sizeof jmp(L_failed) 2563 int advance = target - (__ offset() % modulus); 2564 if (advance < 0) advance += modulus; 2565 if (advance > 0) __ nop(advance); 2566 } 2567 StubCodeMark mark(this, "StubRoutines", name); 2568 2569 // Short-hop target to L_failed. Makes for denser prologue code. 2570 __ BIND(L_failed_0); 2571 __ jmp(L_failed); 2572 assert(__ offset() % CodeEntryAlignment == 0, "no further alignment needed"); 2573 2574 __ align(CodeEntryAlignment); 2575 address start = __ pc(); 2576 2577 __ enter(); // required for proper stackwalking of RuntimeStub frame 2578 2579 // bump this on entry, not on exit: 2580 inc_counter_np(SharedRuntime::_generic_array_copy_ctr); 2581 2582 //----------------------------------------------------------------------- 2583 // Assembler stub will be used for this call to arraycopy 2584 // if the following conditions are met: 2585 // 2586 // (1) src and dst must not be null. 2587 // (2) src_pos must not be negative. 2588 // (3) dst_pos must not be negative. 2589 // (4) length must not be negative. 2590 // (5) src klass and dst klass should be the same and not NULL. 2591 // (6) src and dst should be arrays. 2592 // (7) src_pos + length must not exceed length of src. 2593 // (8) dst_pos + length must not exceed length of dst. 2594 // 2595 2596 // if (src == NULL) return -1; 2597 __ testptr(src, src); // src oop 2598 size_t j1off = __ offset(); 2599 __ jccb(Assembler::zero, L_failed_0); 2600 2601 // if (src_pos < 0) return -1; 2602 __ testl(src_pos, src_pos); // src_pos (32-bits) 2603 __ jccb(Assembler::negative, L_failed_0); 2604 2605 // if (dst == NULL) return -1; 2606 __ testptr(dst, dst); // dst oop 2607 __ jccb(Assembler::zero, L_failed_0); 2608 2609 // if (dst_pos < 0) return -1; 2610 __ testl(dst_pos, dst_pos); // dst_pos (32-bits) 2611 size_t j4off = __ offset(); 2612 __ jccb(Assembler::negative, L_failed_0); 2613 2614 // The first four tests are very dense code, 2615 // but not quite dense enough to put four 2616 // jumps in a 16-byte instruction fetch buffer. 2617 // That's good, because some branch predicters 2618 // do not like jumps so close together. 2619 // Make sure of this. 2620 guarantee(((j1off ^ j4off) & ~15) != 0, "I$ line of 1st & 4th jumps"); 2621 2622 // registers used as temp 2623 const Register r11_length = r11; // elements count to copy 2624 const Register r10_src_klass = r10; // array klass 2625 2626 // if (length < 0) return -1; 2627 __ movl(r11_length, length); // length (elements count, 32-bits value) 2628 __ testl(r11_length, r11_length); 2629 __ jccb(Assembler::negative, L_failed_0); 2630 2631 __ load_klass(r10_src_klass, src); 2632 #ifdef ASSERT 2633 // assert(src->klass() != NULL); 2634 { 2635 BLOCK_COMMENT("assert klasses not null {"); 2636 Label L1, L2; 2637 __ testptr(r10_src_klass, r10_src_klass); 2638 __ jcc(Assembler::notZero, L2); // it is broken if klass is NULL 2639 __ bind(L1); 2640 __ stop("broken null klass"); 2641 __ bind(L2); 2642 __ load_klass(rax, dst); 2643 __ cmpq(rax, 0); 2644 __ jcc(Assembler::equal, L1); // this would be broken also 2645 BLOCK_COMMENT("} assert klasses not null done"); 2646 } 2647 #endif 2648 2649 // Load layout helper (32-bits) 2650 // 2651 // |array_tag| | header_size | element_type | |log2_element_size| 2652 // 32 30 24 16 8 2 0 2653 // 2654 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0 2655 // 2656 2657 const int lh_offset = in_bytes(Klass::layout_helper_offset()); 2658 2659 // Handle objArrays completely differently... 2660 const jint objArray_lh = Klass::array_layout_helper(T_OBJECT); 2661 __ cmpl(Address(r10_src_klass, lh_offset), objArray_lh); 2662 __ jcc(Assembler::equal, L_objArray); 2663 2664 // if (src->klass() != dst->klass()) return -1; 2665 __ load_klass(rax, dst); 2666 __ cmpq(r10_src_klass, rax); 2667 __ jcc(Assembler::notEqual, L_failed); 2668 2669 const Register rax_lh = rax; // layout helper 2670 __ movl(rax_lh, Address(r10_src_klass, lh_offset)); 2671 2672 // if (!src->is_Array()) return -1; 2673 __ cmpl(rax_lh, Klass::_lh_neutral_value); 2674 __ jcc(Assembler::greaterEqual, L_failed); 2675 2676 // At this point, it is known to be a typeArray (array_tag 0x3). 2677 #ifdef ASSERT 2678 { 2679 BLOCK_COMMENT("assert primitive array {"); 2680 Label L; 2681 __ cmpl(rax_lh, (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift)); 2682 __ jcc(Assembler::greaterEqual, L); 2683 __ stop("must be a primitive array"); 2684 __ bind(L); 2685 BLOCK_COMMENT("} assert primitive array done"); 2686 } 2687 #endif 2688 2689 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2690 r10, L_failed); 2691 2692 // TypeArrayKlass 2693 // 2694 // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize); 2695 // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize); 2696 // 2697 2698 const Register r10_offset = r10; // array offset 2699 const Register rax_elsize = rax_lh; // element size 2700 2701 __ movl(r10_offset, rax_lh); 2702 __ shrl(r10_offset, Klass::_lh_header_size_shift); 2703 __ andptr(r10_offset, Klass::_lh_header_size_mask); // array_offset 2704 __ addptr(src, r10_offset); // src array offset 2705 __ addptr(dst, r10_offset); // dst array offset 2706 BLOCK_COMMENT("choose copy loop based on element size"); 2707 __ andl(rax_lh, Klass::_lh_log2_element_size_mask); // rax_lh -> rax_elsize 2708 2709 // next registers should be set before the jump to corresponding stub 2710 const Register from = c_rarg0; // source array address 2711 const Register to = c_rarg1; // destination array address 2712 const Register count = c_rarg2; // elements count 2713 2714 // 'from', 'to', 'count' registers should be set in such order 2715 // since they are the same as 'src', 'src_pos', 'dst'. 2716 2717 __ BIND(L_copy_bytes); 2718 __ cmpl(rax_elsize, 0); 2719 __ jccb(Assembler::notEqual, L_copy_shorts); 2720 __ lea(from, Address(src, src_pos, Address::times_1, 0));// src_addr 2721 __ lea(to, Address(dst, dst_pos, Address::times_1, 0));// dst_addr 2722 __ movl2ptr(count, r11_length); // length 2723 __ jump(RuntimeAddress(byte_copy_entry)); 2724 2725 __ BIND(L_copy_shorts); 2726 __ cmpl(rax_elsize, LogBytesPerShort); 2727 __ jccb(Assembler::notEqual, L_copy_ints); 2728 __ lea(from, Address(src, src_pos, Address::times_2, 0));// src_addr 2729 __ lea(to, Address(dst, dst_pos, Address::times_2, 0));// dst_addr 2730 __ movl2ptr(count, r11_length); // length 2731 __ jump(RuntimeAddress(short_copy_entry)); 2732 2733 __ BIND(L_copy_ints); 2734 __ cmpl(rax_elsize, LogBytesPerInt); 2735 __ jccb(Assembler::notEqual, L_copy_longs); 2736 __ lea(from, Address(src, src_pos, Address::times_4, 0));// src_addr 2737 __ lea(to, Address(dst, dst_pos, Address::times_4, 0));// dst_addr 2738 __ movl2ptr(count, r11_length); // length 2739 __ jump(RuntimeAddress(int_copy_entry)); 2740 2741 __ BIND(L_copy_longs); 2742 #ifdef ASSERT 2743 { 2744 BLOCK_COMMENT("assert long copy {"); 2745 Label L; 2746 __ cmpl(rax_elsize, LogBytesPerLong); 2747 __ jcc(Assembler::equal, L); 2748 __ stop("must be long copy, but elsize is wrong"); 2749 __ bind(L); 2750 BLOCK_COMMENT("} assert long copy done"); 2751 } 2752 #endif 2753 __ lea(from, Address(src, src_pos, Address::times_8, 0));// src_addr 2754 __ lea(to, Address(dst, dst_pos, Address::times_8, 0));// dst_addr 2755 __ movl2ptr(count, r11_length); // length 2756 __ jump(RuntimeAddress(long_copy_entry)); 2757 2758 // ObjArrayKlass 2759 __ BIND(L_objArray); 2760 // live at this point: r10_src_klass, r11_length, src[_pos], dst[_pos] 2761 2762 Label L_plain_copy, L_checkcast_copy; 2763 // test array classes for subtyping 2764 __ load_klass(rax, dst); 2765 __ cmpq(r10_src_klass, rax); // usual case is exact equality 2766 __ jcc(Assembler::notEqual, L_checkcast_copy); 2767 2768 // Identically typed arrays can be copied without element-wise checks. 2769 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2770 r10, L_failed); 2771 2772 __ lea(from, Address(src, src_pos, TIMES_OOP, 2773 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // src_addr 2774 __ lea(to, Address(dst, dst_pos, TIMES_OOP, 2775 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // dst_addr 2776 __ movl2ptr(count, r11_length); // length 2777 __ BIND(L_plain_copy); 2778 __ jump(RuntimeAddress(oop_copy_entry)); 2779 2780 __ BIND(L_checkcast_copy); 2781 // live at this point: r10_src_klass, r11_length, rax (dst_klass) 2782 { 2783 // Before looking at dst.length, make sure dst is also an objArray. 2784 __ cmpl(Address(rax, lh_offset), objArray_lh); 2785 __ jcc(Assembler::notEqual, L_failed); 2786 2787 // It is safe to examine both src.length and dst.length. 2788 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2789 rax, L_failed); 2790 2791 const Register r11_dst_klass = r11; 2792 __ load_klass(r11_dst_klass, dst); // reload 2793 2794 // Marshal the base address arguments now, freeing registers. 2795 __ lea(from, Address(src, src_pos, TIMES_OOP, 2796 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 2797 __ lea(to, Address(dst, dst_pos, TIMES_OOP, 2798 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 2799 __ movl(count, length); // length (reloaded) 2800 Register sco_temp = c_rarg3; // this register is free now 2801 assert_different_registers(from, to, count, sco_temp, 2802 r11_dst_klass, r10_src_klass); 2803 assert_clean_int(count, sco_temp); 2804 2805 // Generate the type check. 2806 const int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2807 __ movl(sco_temp, Address(r11_dst_klass, sco_offset)); 2808 assert_clean_int(sco_temp, rax); 2809 generate_type_check(r10_src_klass, sco_temp, r11_dst_klass, L_plain_copy); 2810 2811 // Fetch destination element klass from the ObjArrayKlass header. 2812 int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset()); 2813 __ movptr(r11_dst_klass, Address(r11_dst_klass, ek_offset)); 2814 __ movl( sco_temp, Address(r11_dst_klass, sco_offset)); 2815 assert_clean_int(sco_temp, rax); 2816 2817 // the checkcast_copy loop needs two extra arguments: 2818 assert(c_rarg3 == sco_temp, "#3 already in place"); 2819 // Set up arguments for checkcast_copy_entry. 2820 setup_arg_regs(4); 2821 __ movptr(r8, r11_dst_klass); // dst.klass.element_klass, r8 is c_rarg4 on Linux/Solaris 2822 __ jump(RuntimeAddress(checkcast_copy_entry)); 2823 } 2824 2825 __ BIND(L_failed); 2826 __ xorptr(rax, rax); 2827 __ notptr(rax); // return -1 2828 __ leave(); // required for proper stackwalking of RuntimeStub frame 2829 __ ret(0); 2830 2831 return start; 2832 } 2833 2834 void generate_arraycopy_stubs() { 2835 address entry; 2836 address entry_jbyte_arraycopy; 2837 address entry_jshort_arraycopy; 2838 address entry_jint_arraycopy; 2839 address entry_oop_arraycopy; 2840 address entry_jlong_arraycopy; 2841 address entry_checkcast_arraycopy; 2842 2843 StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, &entry, 2844 "jbyte_disjoint_arraycopy"); 2845 StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, entry, &entry_jbyte_arraycopy, 2846 "jbyte_arraycopy"); 2847 2848 StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, &entry, 2849 "jshort_disjoint_arraycopy"); 2850 StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, entry, &entry_jshort_arraycopy, 2851 "jshort_arraycopy"); 2852 2853 StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, false, &entry, 2854 "jint_disjoint_arraycopy"); 2855 StubRoutines::_jint_arraycopy = generate_conjoint_int_oop_copy(false, false, entry, 2856 &entry_jint_arraycopy, "jint_arraycopy"); 2857 2858 StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, false, &entry, 2859 "jlong_disjoint_arraycopy"); 2860 StubRoutines::_jlong_arraycopy = generate_conjoint_long_oop_copy(false, false, entry, 2861 &entry_jlong_arraycopy, "jlong_arraycopy"); 2862 2863 2864 if (UseCompressedOops) { 2865 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, true, &entry, 2866 "oop_disjoint_arraycopy"); 2867 StubRoutines::_oop_arraycopy = generate_conjoint_int_oop_copy(false, true, entry, 2868 &entry_oop_arraycopy, "oop_arraycopy"); 2869 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_int_oop_copy(false, true, &entry, 2870 "oop_disjoint_arraycopy_uninit", 2871 /*dest_uninitialized*/true); 2872 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_int_oop_copy(false, true, entry, 2873 NULL, "oop_arraycopy_uninit", 2874 /*dest_uninitialized*/true); 2875 } else { 2876 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, true, &entry, 2877 "oop_disjoint_arraycopy"); 2878 StubRoutines::_oop_arraycopy = generate_conjoint_long_oop_copy(false, true, entry, 2879 &entry_oop_arraycopy, "oop_arraycopy"); 2880 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_long_oop_copy(false, true, &entry, 2881 "oop_disjoint_arraycopy_uninit", 2882 /*dest_uninitialized*/true); 2883 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_long_oop_copy(false, true, entry, 2884 NULL, "oop_arraycopy_uninit", 2885 /*dest_uninitialized*/true); 2886 } 2887 2888 StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy); 2889 StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", NULL, 2890 /*dest_uninitialized*/true); 2891 2892 StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy", 2893 entry_jbyte_arraycopy, 2894 entry_jshort_arraycopy, 2895 entry_jint_arraycopy, 2896 entry_jlong_arraycopy); 2897 StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy", 2898 entry_jbyte_arraycopy, 2899 entry_jshort_arraycopy, 2900 entry_jint_arraycopy, 2901 entry_oop_arraycopy, 2902 entry_jlong_arraycopy, 2903 entry_checkcast_arraycopy); 2904 2905 StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill"); 2906 StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill"); 2907 StubRoutines::_jint_fill = generate_fill(T_INT, false, "jint_fill"); 2908 StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill"); 2909 StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill"); 2910 StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill"); 2911 2912 // We don't generate specialized code for HeapWord-aligned source 2913 // arrays, so just use the code we've already generated 2914 StubRoutines::_arrayof_jbyte_disjoint_arraycopy = StubRoutines::_jbyte_disjoint_arraycopy; 2915 StubRoutines::_arrayof_jbyte_arraycopy = StubRoutines::_jbyte_arraycopy; 2916 2917 StubRoutines::_arrayof_jshort_disjoint_arraycopy = StubRoutines::_jshort_disjoint_arraycopy; 2918 StubRoutines::_arrayof_jshort_arraycopy = StubRoutines::_jshort_arraycopy; 2919 2920 StubRoutines::_arrayof_jint_disjoint_arraycopy = StubRoutines::_jint_disjoint_arraycopy; 2921 StubRoutines::_arrayof_jint_arraycopy = StubRoutines::_jint_arraycopy; 2922 2923 StubRoutines::_arrayof_jlong_disjoint_arraycopy = StubRoutines::_jlong_disjoint_arraycopy; 2924 StubRoutines::_arrayof_jlong_arraycopy = StubRoutines::_jlong_arraycopy; 2925 2926 StubRoutines::_arrayof_oop_disjoint_arraycopy = StubRoutines::_oop_disjoint_arraycopy; 2927 StubRoutines::_arrayof_oop_arraycopy = StubRoutines::_oop_arraycopy; 2928 2929 StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = StubRoutines::_oop_disjoint_arraycopy_uninit; 2930 StubRoutines::_arrayof_oop_arraycopy_uninit = StubRoutines::_oop_arraycopy_uninit; 2931 } 2932 2933 // AES intrinsic stubs 2934 enum {AESBlockSize = 16}; 2935 2936 address generate_key_shuffle_mask() { 2937 __ align(16); 2938 StubCodeMark mark(this, "StubRoutines", "key_shuffle_mask"); 2939 address start = __ pc(); 2940 __ emit_data64( 0x0405060700010203, relocInfo::none ); 2941 __ emit_data64( 0x0c0d0e0f08090a0b, relocInfo::none ); 2942 return start; 2943 } 2944 2945 address generate_counter_shuffle_mask() { 2946 __ align(16); 2947 StubCodeMark mark(this, "StubRoutines", "counter_shuffle_mask"); 2948 address start = __ pc(); 2949 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none); 2950 __ emit_data64(0x0001020304050607, relocInfo::none); 2951 return start; 2952 } 2953 2954 // Utility routine for loading a 128-bit key word in little endian format 2955 // can optionally specify that the shuffle mask is already in an xmmregister 2956 void load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask=NULL) { 2957 __ movdqu(xmmdst, Address(key, offset)); 2958 if (xmm_shuf_mask != NULL) { 2959 __ pshufb(xmmdst, xmm_shuf_mask); 2960 } else { 2961 __ pshufb(xmmdst, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 2962 } 2963 } 2964 2965 // Utility routine for increase 128bit counter (iv in CTR mode) 2966 void inc_counter(Register reg, XMMRegister xmmdst, int inc_delta, Label& next_block) { 2967 __ pextrq(reg, xmmdst, 0x0); 2968 __ addq(reg, inc_delta); 2969 __ pinsrq(xmmdst, reg, 0x0); 2970 __ jcc(Assembler::carryClear, next_block); // jump if no carry 2971 __ pextrq(reg, xmmdst, 0x01); // Carry 2972 __ addq(reg, 0x01); 2973 __ pinsrq(xmmdst, reg, 0x01); //Carry end 2974 __ BIND(next_block); // next instruction 2975 } 2976 2977 // Arguments: 2978 // 2979 // Inputs: 2980 // c_rarg0 - source byte array address 2981 // c_rarg1 - destination byte array address 2982 // c_rarg2 - K (key) in little endian int array 2983 // 2984 address generate_aescrypt_encryptBlock() { 2985 assert(UseAES, "need AES instructions and misaligned SSE support"); 2986 __ align(CodeEntryAlignment); 2987 StubCodeMark mark(this, "StubRoutines", "aescrypt_encryptBlock"); 2988 Label L_doLast; 2989 address start = __ pc(); 2990 2991 const Register from = c_rarg0; // source array address 2992 const Register to = c_rarg1; // destination array address 2993 const Register key = c_rarg2; // key array address 2994 const Register keylen = rax; 2995 2996 const XMMRegister xmm_result = xmm0; 2997 const XMMRegister xmm_key_shuf_mask = xmm1; 2998 // On win64 xmm6-xmm15 must be preserved so don't use them. 2999 const XMMRegister xmm_temp1 = xmm2; 3000 const XMMRegister xmm_temp2 = xmm3; 3001 const XMMRegister xmm_temp3 = xmm4; 3002 const XMMRegister xmm_temp4 = xmm5; 3003 3004 __ enter(); // required for proper stackwalking of RuntimeStub frame 3005 3006 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 3007 // context for the registers used, where all instructions below are using 128-bit mode 3008 // On EVEX without VL and BW, these instructions will all be AVX. 3009 if (VM_Version::supports_avx512vlbw()) { 3010 __ movl(rax, 0xffff); 3011 __ kmovql(k1, rax); 3012 } 3013 3014 // keylen could be only {11, 13, 15} * 4 = {44, 52, 60} 3015 __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3016 3017 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3018 __ movdqu(xmm_result, Address(from, 0)); // get 16 bytes of input 3019 3020 // For encryption, the java expanded key ordering is just what we need 3021 // we don't know if the key is aligned, hence not using load-execute form 3022 3023 load_key(xmm_temp1, key, 0x00, xmm_key_shuf_mask); 3024 __ pxor(xmm_result, xmm_temp1); 3025 3026 load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask); 3027 load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask); 3028 load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask); 3029 load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask); 3030 3031 __ aesenc(xmm_result, xmm_temp1); 3032 __ aesenc(xmm_result, xmm_temp2); 3033 __ aesenc(xmm_result, xmm_temp3); 3034 __ aesenc(xmm_result, xmm_temp4); 3035 3036 load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask); 3037 load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask); 3038 load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask); 3039 load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask); 3040 3041 __ aesenc(xmm_result, xmm_temp1); 3042 __ aesenc(xmm_result, xmm_temp2); 3043 __ aesenc(xmm_result, xmm_temp3); 3044 __ aesenc(xmm_result, xmm_temp4); 3045 3046 load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask); 3047 load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask); 3048 3049 __ cmpl(keylen, 44); 3050 __ jccb(Assembler::equal, L_doLast); 3051 3052 __ aesenc(xmm_result, xmm_temp1); 3053 __ aesenc(xmm_result, xmm_temp2); 3054 3055 load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask); 3056 load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask); 3057 3058 __ cmpl(keylen, 52); 3059 __ jccb(Assembler::equal, L_doLast); 3060 3061 __ aesenc(xmm_result, xmm_temp1); 3062 __ aesenc(xmm_result, xmm_temp2); 3063 3064 load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask); 3065 load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask); 3066 3067 __ BIND(L_doLast); 3068 __ aesenc(xmm_result, xmm_temp1); 3069 __ aesenclast(xmm_result, xmm_temp2); 3070 __ movdqu(Address(to, 0), xmm_result); // store the result 3071 __ xorptr(rax, rax); // return 0 3072 __ leave(); // required for proper stackwalking of RuntimeStub frame 3073 __ ret(0); 3074 3075 return start; 3076 } 3077 3078 3079 // Arguments: 3080 // 3081 // Inputs: 3082 // c_rarg0 - source byte array address 3083 // c_rarg1 - destination byte array address 3084 // c_rarg2 - K (key) in little endian int array 3085 // 3086 address generate_aescrypt_decryptBlock() { 3087 assert(UseAES, "need AES instructions and misaligned SSE support"); 3088 __ align(CodeEntryAlignment); 3089 StubCodeMark mark(this, "StubRoutines", "aescrypt_decryptBlock"); 3090 Label L_doLast; 3091 address start = __ pc(); 3092 3093 const Register from = c_rarg0; // source array address 3094 const Register to = c_rarg1; // destination array address 3095 const Register key = c_rarg2; // key array address 3096 const Register keylen = rax; 3097 3098 const XMMRegister xmm_result = xmm0; 3099 const XMMRegister xmm_key_shuf_mask = xmm1; 3100 // On win64 xmm6-xmm15 must be preserved so don't use them. 3101 const XMMRegister xmm_temp1 = xmm2; 3102 const XMMRegister xmm_temp2 = xmm3; 3103 const XMMRegister xmm_temp3 = xmm4; 3104 const XMMRegister xmm_temp4 = xmm5; 3105 3106 __ enter(); // required for proper stackwalking of RuntimeStub frame 3107 3108 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 3109 // context for the registers used, where all instructions below are using 128-bit mode 3110 // On EVEX without VL and BW, these instructions will all be AVX. 3111 if (VM_Version::supports_avx512vlbw()) { 3112 __ movl(rax, 0xffff); 3113 __ kmovql(k1, rax); 3114 } 3115 3116 // keylen could be only {11, 13, 15} * 4 = {44, 52, 60} 3117 __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3118 3119 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3120 __ movdqu(xmm_result, Address(from, 0)); 3121 3122 // for decryption java expanded key ordering is rotated one position from what we want 3123 // so we start from 0x10 here and hit 0x00 last 3124 // we don't know if the key is aligned, hence not using load-execute form 3125 load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask); 3126 load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask); 3127 load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask); 3128 load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask); 3129 3130 __ pxor (xmm_result, xmm_temp1); 3131 __ aesdec(xmm_result, xmm_temp2); 3132 __ aesdec(xmm_result, xmm_temp3); 3133 __ aesdec(xmm_result, xmm_temp4); 3134 3135 load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask); 3136 load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask); 3137 load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask); 3138 load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask); 3139 3140 __ aesdec(xmm_result, xmm_temp1); 3141 __ aesdec(xmm_result, xmm_temp2); 3142 __ aesdec(xmm_result, xmm_temp3); 3143 __ aesdec(xmm_result, xmm_temp4); 3144 3145 load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask); 3146 load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask); 3147 load_key(xmm_temp3, key, 0x00, xmm_key_shuf_mask); 3148 3149 __ cmpl(keylen, 44); 3150 __ jccb(Assembler::equal, L_doLast); 3151 3152 __ aesdec(xmm_result, xmm_temp1); 3153 __ aesdec(xmm_result, xmm_temp2); 3154 3155 load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask); 3156 load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask); 3157 3158 __ cmpl(keylen, 52); 3159 __ jccb(Assembler::equal, L_doLast); 3160 3161 __ aesdec(xmm_result, xmm_temp1); 3162 __ aesdec(xmm_result, xmm_temp2); 3163 3164 load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask); 3165 load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask); 3166 3167 __ BIND(L_doLast); 3168 __ aesdec(xmm_result, xmm_temp1); 3169 __ aesdec(xmm_result, xmm_temp2); 3170 3171 // for decryption the aesdeclast operation is always on key+0x00 3172 __ aesdeclast(xmm_result, xmm_temp3); 3173 __ movdqu(Address(to, 0), xmm_result); // store the result 3174 __ xorptr(rax, rax); // return 0 3175 __ leave(); // required for proper stackwalking of RuntimeStub frame 3176 __ ret(0); 3177 3178 return start; 3179 } 3180 3181 3182 // Arguments: 3183 // 3184 // Inputs: 3185 // c_rarg0 - source byte array address 3186 // c_rarg1 - destination byte array address 3187 // c_rarg2 - K (key) in little endian int array 3188 // c_rarg3 - r vector byte array address 3189 // c_rarg4 - input length 3190 // 3191 // Output: 3192 // rax - input length 3193 // 3194 address generate_cipherBlockChaining_encryptAESCrypt() { 3195 assert(UseAES, "need AES instructions and misaligned SSE support"); 3196 __ align(CodeEntryAlignment); 3197 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_encryptAESCrypt"); 3198 address start = __ pc(); 3199 3200 Label L_exit, L_key_192_256, L_key_256, L_loopTop_128, L_loopTop_192, L_loopTop_256; 3201 const Register from = c_rarg0; // source array address 3202 const Register to = c_rarg1; // destination array address 3203 const Register key = c_rarg2; // key array address 3204 const Register rvec = c_rarg3; // r byte array initialized from initvector array address 3205 // and left with the results of the last encryption block 3206 #ifndef _WIN64 3207 const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16) 3208 #else 3209 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 3210 const Register len_reg = r10; // pick the first volatile windows register 3211 #endif 3212 const Register pos = rax; 3213 3214 // xmm register assignments for the loops below 3215 const XMMRegister xmm_result = xmm0; 3216 const XMMRegister xmm_temp = xmm1; 3217 // keys 0-10 preloaded into xmm2-xmm12 3218 const int XMM_REG_NUM_KEY_FIRST = 2; 3219 const int XMM_REG_NUM_KEY_LAST = 15; 3220 const XMMRegister xmm_key0 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST); 3221 const XMMRegister xmm_key10 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+10); 3222 const XMMRegister xmm_key11 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+11); 3223 const XMMRegister xmm_key12 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+12); 3224 const XMMRegister xmm_key13 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+13); 3225 3226 __ enter(); // required for proper stackwalking of RuntimeStub frame 3227 3228 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 3229 // context for the registers used, where all instructions below are using 128-bit mode 3230 // On EVEX without VL and BW, these instructions will all be AVX. 3231 if (VM_Version::supports_avx512vlbw()) { 3232 __ movl(rax, 0xffff); 3233 __ kmovql(k1, rax); 3234 } 3235 3236 #ifdef _WIN64 3237 // on win64, fill len_reg from stack position 3238 __ movl(len_reg, len_mem); 3239 // save the xmm registers which must be preserved 6-15 3240 __ subptr(rsp, -rsp_after_call_off * wordSize); 3241 for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) { 3242 __ movdqu(xmm_save(i), as_XMMRegister(i)); 3243 } 3244 #else 3245 __ push(len_reg); // Save 3246 #endif 3247 3248 const XMMRegister xmm_key_shuf_mask = xmm_temp; // used temporarily to swap key bytes up front 3249 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3250 // load up xmm regs xmm2 thru xmm12 with key 0x00 - 0xa0 3251 for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x00; rnum <= XMM_REG_NUM_KEY_FIRST+10; rnum++) { 3252 load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask); 3253 offset += 0x10; 3254 } 3255 __ movdqu(xmm_result, Address(rvec, 0x00)); // initialize xmm_result with r vec 3256 3257 // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256)) 3258 __ movl(rax, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3259 __ cmpl(rax, 44); 3260 __ jcc(Assembler::notEqual, L_key_192_256); 3261 3262 // 128 bit code follows here 3263 __ movptr(pos, 0); 3264 __ align(OptoLoopAlignment); 3265 3266 __ BIND(L_loopTop_128); 3267 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3268 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3269 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3270 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 9; rnum++) { 3271 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3272 } 3273 __ aesenclast(xmm_result, xmm_key10); 3274 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3275 // no need to store r to memory until we exit 3276 __ addptr(pos, AESBlockSize); 3277 __ subptr(len_reg, AESBlockSize); 3278 __ jcc(Assembler::notEqual, L_loopTop_128); 3279 3280 __ BIND(L_exit); 3281 __ movdqu(Address(rvec, 0), xmm_result); // final value of r stored in rvec of CipherBlockChaining object 3282 3283 #ifdef _WIN64 3284 // restore xmm regs belonging to calling function 3285 for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) { 3286 __ movdqu(as_XMMRegister(i), xmm_save(i)); 3287 } 3288 __ movl(rax, len_mem); 3289 #else 3290 __ pop(rax); // return length 3291 #endif 3292 __ leave(); // required for proper stackwalking of RuntimeStub frame 3293 __ ret(0); 3294 3295 __ BIND(L_key_192_256); 3296 // here rax = len in ints of AESCrypt.KLE array (52=192, or 60=256) 3297 load_key(xmm_key11, key, 0xb0, xmm_key_shuf_mask); 3298 load_key(xmm_key12, key, 0xc0, xmm_key_shuf_mask); 3299 __ cmpl(rax, 52); 3300 __ jcc(Assembler::notEqual, L_key_256); 3301 3302 // 192-bit code follows here (could be changed to use more xmm registers) 3303 __ movptr(pos, 0); 3304 __ align(OptoLoopAlignment); 3305 3306 __ BIND(L_loopTop_192); 3307 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3308 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3309 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3310 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 11; rnum++) { 3311 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3312 } 3313 __ aesenclast(xmm_result, xmm_key12); 3314 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3315 // no need to store r to memory until we exit 3316 __ addptr(pos, AESBlockSize); 3317 __ subptr(len_reg, AESBlockSize); 3318 __ jcc(Assembler::notEqual, L_loopTop_192); 3319 __ jmp(L_exit); 3320 3321 __ BIND(L_key_256); 3322 // 256-bit code follows here (could be changed to use more xmm registers) 3323 load_key(xmm_key13, key, 0xd0, xmm_key_shuf_mask); 3324 __ movptr(pos, 0); 3325 __ align(OptoLoopAlignment); 3326 3327 __ BIND(L_loopTop_256); 3328 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3329 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3330 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3331 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 13; rnum++) { 3332 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3333 } 3334 load_key(xmm_temp, key, 0xe0); 3335 __ aesenclast(xmm_result, xmm_temp); 3336 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3337 // no need to store r to memory until we exit 3338 __ addptr(pos, AESBlockSize); 3339 __ subptr(len_reg, AESBlockSize); 3340 __ jcc(Assembler::notEqual, L_loopTop_256); 3341 __ jmp(L_exit); 3342 3343 return start; 3344 } 3345 3346 // Safefetch stubs. 3347 void generate_safefetch(const char* name, int size, address* entry, 3348 address* fault_pc, address* continuation_pc) { 3349 // safefetch signatures: 3350 // int SafeFetch32(int* adr, int errValue); 3351 // intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue); 3352 // 3353 // arguments: 3354 // c_rarg0 = adr 3355 // c_rarg1 = errValue 3356 // 3357 // result: 3358 // PPC_RET = *adr or errValue 3359 3360 StubCodeMark mark(this, "StubRoutines", name); 3361 3362 // Entry point, pc or function descriptor. 3363 *entry = __ pc(); 3364 3365 // Load *adr into c_rarg1, may fault. 3366 *fault_pc = __ pc(); 3367 switch (size) { 3368 case 4: 3369 // int32_t 3370 __ movl(c_rarg1, Address(c_rarg0, 0)); 3371 break; 3372 case 8: 3373 // int64_t 3374 __ movq(c_rarg1, Address(c_rarg0, 0)); 3375 break; 3376 default: 3377 ShouldNotReachHere(); 3378 } 3379 3380 // return errValue or *adr 3381 *continuation_pc = __ pc(); 3382 __ movq(rax, c_rarg1); 3383 __ ret(0); 3384 } 3385 3386 // This is a version of CBC/AES Decrypt which does 4 blocks in a loop at a time 3387 // to hide instruction latency 3388 // 3389 // Arguments: 3390 // 3391 // Inputs: 3392 // c_rarg0 - source byte array address 3393 // c_rarg1 - destination byte array address 3394 // c_rarg2 - K (key) in little endian int array 3395 // c_rarg3 - r vector byte array address 3396 // c_rarg4 - input length 3397 // 3398 // Output: 3399 // rax - input length 3400 // 3401 address generate_cipherBlockChaining_decryptAESCrypt_Parallel() { 3402 assert(UseAES, "need AES instructions and misaligned SSE support"); 3403 __ align(CodeEntryAlignment); 3404 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt"); 3405 address start = __ pc(); 3406 3407 const Register from = c_rarg0; // source array address 3408 const Register to = c_rarg1; // destination array address 3409 const Register key = c_rarg2; // key array address 3410 const Register rvec = c_rarg3; // r byte array initialized from initvector array address 3411 // and left with the results of the last encryption block 3412 #ifndef _WIN64 3413 const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16) 3414 #else 3415 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 3416 const Register len_reg = r10; // pick the first volatile windows register 3417 #endif 3418 const Register pos = rax; 3419 3420 const int PARALLEL_FACTOR = 4; 3421 const int ROUNDS[3] = { 10, 12, 14 }; // aes rounds for key128, key192, key256 3422 3423 Label L_exit; 3424 Label L_singleBlock_loopTopHead[3]; // 128, 192, 256 3425 Label L_singleBlock_loopTopHead2[3]; // 128, 192, 256 3426 Label L_singleBlock_loopTop[3]; // 128, 192, 256 3427 Label L_multiBlock_loopTopHead[3]; // 128, 192, 256 3428 Label L_multiBlock_loopTop[3]; // 128, 192, 256 3429 3430 // keys 0-10 preloaded into xmm5-xmm15 3431 const int XMM_REG_NUM_KEY_FIRST = 5; 3432 const int XMM_REG_NUM_KEY_LAST = 15; 3433 const XMMRegister xmm_key_first = as_XMMRegister(XMM_REG_NUM_KEY_FIRST); 3434 const XMMRegister xmm_key_last = as_XMMRegister(XMM_REG_NUM_KEY_LAST); 3435 3436 __ enter(); // required for proper stackwalking of RuntimeStub frame 3437 3438 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 3439 // context for the registers used, where all instructions below are using 128-bit mode 3440 // On EVEX without VL and BW, these instructions will all be AVX. 3441 if (VM_Version::supports_avx512vlbw()) { 3442 __ movl(rax, 0xffff); 3443 __ kmovql(k1, rax); 3444 } 3445 3446 #ifdef _WIN64 3447 // on win64, fill len_reg from stack position 3448 __ movl(len_reg, len_mem); 3449 // save the xmm registers which must be preserved 6-15 3450 __ subptr(rsp, -rsp_after_call_off * wordSize); 3451 for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) { 3452 __ movdqu(xmm_save(i), as_XMMRegister(i)); 3453 } 3454 #else 3455 __ push(len_reg); // Save 3456 #endif 3457 __ push(rbx); 3458 // the java expanded key ordering is rotated one position from what we want 3459 // so we start from 0x10 here and hit 0x00 last 3460 const XMMRegister xmm_key_shuf_mask = xmm1; // used temporarily to swap key bytes up front 3461 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3462 // load up xmm regs 5 thru 15 with key 0x10 - 0xa0 - 0x00 3463 for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x10; rnum < XMM_REG_NUM_KEY_LAST; rnum++) { 3464 load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask); 3465 offset += 0x10; 3466 } 3467 load_key(xmm_key_last, key, 0x00, xmm_key_shuf_mask); 3468 3469 const XMMRegister xmm_prev_block_cipher = xmm1; // holds cipher of previous block 3470 3471 // registers holding the four results in the parallelized loop 3472 const XMMRegister xmm_result0 = xmm0; 3473 const XMMRegister xmm_result1 = xmm2; 3474 const XMMRegister xmm_result2 = xmm3; 3475 const XMMRegister xmm_result3 = xmm4; 3476 3477 __ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // initialize with initial rvec 3478 3479 __ xorptr(pos, pos); 3480 3481 // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256)) 3482 __ movl(rbx, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3483 __ cmpl(rbx, 52); 3484 __ jcc(Assembler::equal, L_multiBlock_loopTopHead[1]); 3485 __ cmpl(rbx, 60); 3486 __ jcc(Assembler::equal, L_multiBlock_loopTopHead[2]); 3487 3488 #define DoFour(opc, src_reg) \ 3489 __ opc(xmm_result0, src_reg); \ 3490 __ opc(xmm_result1, src_reg); \ 3491 __ opc(xmm_result2, src_reg); \ 3492 __ opc(xmm_result3, src_reg); \ 3493 3494 for (int k = 0; k < 3; ++k) { 3495 __ BIND(L_multiBlock_loopTopHead[k]); 3496 if (k != 0) { 3497 __ cmpptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // see if at least 4 blocks left 3498 __ jcc(Assembler::less, L_singleBlock_loopTopHead2[k]); 3499 } 3500 if (k == 1) { 3501 __ subptr(rsp, 6 * wordSize); 3502 __ movdqu(Address(rsp, 0), xmm15); //save last_key from xmm15 3503 load_key(xmm15, key, 0xb0); // 0xb0; 192-bit key goes up to 0xc0 3504 __ movdqu(Address(rsp, 2 * wordSize), xmm15); 3505 load_key(xmm1, key, 0xc0); // 0xc0; 3506 __ movdqu(Address(rsp, 4 * wordSize), xmm1); 3507 } else if (k == 2) { 3508 __ subptr(rsp, 10 * wordSize); 3509 __ movdqu(Address(rsp, 0), xmm15); //save last_key from xmm15 3510 load_key(xmm15, key, 0xd0); // 0xd0; 256-bit key goes upto 0xe0 3511 __ movdqu(Address(rsp, 6 * wordSize), xmm15); 3512 load_key(xmm1, key, 0xe0); // 0xe0; 3513 __ movdqu(Address(rsp, 8 * wordSize), xmm1); 3514 load_key(xmm15, key, 0xb0); // 0xb0; 3515 __ movdqu(Address(rsp, 2 * wordSize), xmm15); 3516 load_key(xmm1, key, 0xc0); // 0xc0; 3517 __ movdqu(Address(rsp, 4 * wordSize), xmm1); 3518 } 3519 __ align(OptoLoopAlignment); 3520 __ BIND(L_multiBlock_loopTop[k]); 3521 __ cmpptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // see if at least 4 blocks left 3522 __ jcc(Assembler::less, L_singleBlock_loopTopHead[k]); 3523 3524 if (k != 0) { 3525 __ movdqu(xmm15, Address(rsp, 2 * wordSize)); 3526 __ movdqu(xmm1, Address(rsp, 4 * wordSize)); 3527 } 3528 3529 __ movdqu(xmm_result0, Address(from, pos, Address::times_1, 0 * AESBlockSize)); // get next 4 blocks into xmmresult registers 3530 __ movdqu(xmm_result1, Address(from, pos, Address::times_1, 1 * AESBlockSize)); 3531 __ movdqu(xmm_result2, Address(from, pos, Address::times_1, 2 * AESBlockSize)); 3532 __ movdqu(xmm_result3, Address(from, pos, Address::times_1, 3 * AESBlockSize)); 3533 3534 DoFour(pxor, xmm_key_first); 3535 if (k == 0) { 3536 for (int rnum = 1; rnum < ROUNDS[k]; rnum++) { 3537 DoFour(aesdec, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3538 } 3539 DoFour(aesdeclast, xmm_key_last); 3540 } else if (k == 1) { 3541 for (int rnum = 1; rnum <= ROUNDS[k]-2; rnum++) { 3542 DoFour(aesdec, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3543 } 3544 __ movdqu(xmm_key_last, Address(rsp, 0)); // xmm15 needs to be loaded again. 3545 DoFour(aesdec, xmm1); // key : 0xc0 3546 __ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // xmm1 needs to be loaded again 3547 DoFour(aesdeclast, xmm_key_last); 3548 } else if (k == 2) { 3549 for (int rnum = 1; rnum <= ROUNDS[k] - 4; rnum++) { 3550 DoFour(aesdec, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3551 } 3552 DoFour(aesdec, xmm1); // key : 0xc0 3553 __ movdqu(xmm15, Address(rsp, 6 * wordSize)); 3554 __ movdqu(xmm1, Address(rsp, 8 * wordSize)); 3555 DoFour(aesdec, xmm15); // key : 0xd0 3556 __ movdqu(xmm_key_last, Address(rsp, 0)); // xmm15 needs to be loaded again. 3557 DoFour(aesdec, xmm1); // key : 0xe0 3558 __ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // xmm1 needs to be loaded again 3559 DoFour(aesdeclast, xmm_key_last); 3560 } 3561 3562 // for each result, xor with the r vector of previous cipher block 3563 __ pxor(xmm_result0, xmm_prev_block_cipher); 3564 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 0 * AESBlockSize)); 3565 __ pxor(xmm_result1, xmm_prev_block_cipher); 3566 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 1 * AESBlockSize)); 3567 __ pxor(xmm_result2, xmm_prev_block_cipher); 3568 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 2 * AESBlockSize)); 3569 __ pxor(xmm_result3, xmm_prev_block_cipher); 3570 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 3 * AESBlockSize)); // this will carry over to next set of blocks 3571 if (k != 0) { 3572 __ movdqu(Address(rvec, 0x00), xmm_prev_block_cipher); 3573 } 3574 3575 __ movdqu(Address(to, pos, Address::times_1, 0 * AESBlockSize), xmm_result0); // store 4 results into the next 64 bytes of output 3576 __ movdqu(Address(to, pos, Address::times_1, 1 * AESBlockSize), xmm_result1); 3577 __ movdqu(Address(to, pos, Address::times_1, 2 * AESBlockSize), xmm_result2); 3578 __ movdqu(Address(to, pos, Address::times_1, 3 * AESBlockSize), xmm_result3); 3579 3580 __ addptr(pos, PARALLEL_FACTOR * AESBlockSize); 3581 __ subptr(len_reg, PARALLEL_FACTOR * AESBlockSize); 3582 __ jmp(L_multiBlock_loopTop[k]); 3583 3584 // registers used in the non-parallelized loops 3585 // xmm register assignments for the loops below 3586 const XMMRegister xmm_result = xmm0; 3587 const XMMRegister xmm_prev_block_cipher_save = xmm2; 3588 const XMMRegister xmm_key11 = xmm3; 3589 const XMMRegister xmm_key12 = xmm4; 3590 const XMMRegister key_tmp = xmm4; 3591 3592 __ BIND(L_singleBlock_loopTopHead[k]); 3593 if (k == 1) { 3594 __ addptr(rsp, 6 * wordSize); 3595 } else if (k == 2) { 3596 __ addptr(rsp, 10 * wordSize); 3597 } 3598 __ cmpptr(len_reg, 0); // any blocks left?? 3599 __ jcc(Assembler::equal, L_exit); 3600 __ BIND(L_singleBlock_loopTopHead2[k]); 3601 if (k == 1) { 3602 load_key(xmm_key11, key, 0xb0); // 0xb0; 192-bit key goes upto 0xc0 3603 load_key(xmm_key12, key, 0xc0); // 0xc0; 192-bit key goes upto 0xc0 3604 } 3605 if (k == 2) { 3606 load_key(xmm_key11, key, 0xb0); // 0xb0; 256-bit key goes upto 0xe0 3607 } 3608 __ align(OptoLoopAlignment); 3609 __ BIND(L_singleBlock_loopTop[k]); 3610 __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of cipher input 3611 __ movdqa(xmm_prev_block_cipher_save, xmm_result); // save for next r vector 3612 __ pxor(xmm_result, xmm_key_first); // do the aes dec rounds 3613 for (int rnum = 1; rnum <= 9 ; rnum++) { 3614 __ aesdec(xmm_result, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3615 } 3616 if (k == 1) { 3617 __ aesdec(xmm_result, xmm_key11); 3618 __ aesdec(xmm_result, xmm_key12); 3619 } 3620 if (k == 2) { 3621 __ aesdec(xmm_result, xmm_key11); 3622 load_key(key_tmp, key, 0xc0); 3623 __ aesdec(xmm_result, key_tmp); 3624 load_key(key_tmp, key, 0xd0); 3625 __ aesdec(xmm_result, key_tmp); 3626 load_key(key_tmp, key, 0xe0); 3627 __ aesdec(xmm_result, key_tmp); 3628 } 3629 3630 __ aesdeclast(xmm_result, xmm_key_last); // xmm15 always came from key+0 3631 __ pxor(xmm_result, xmm_prev_block_cipher); // xor with the current r vector 3632 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3633 // no need to store r to memory until we exit 3634 __ movdqa(xmm_prev_block_cipher, xmm_prev_block_cipher_save); // set up next r vector with cipher input from this block 3635 __ addptr(pos, AESBlockSize); 3636 __ subptr(len_reg, AESBlockSize); 3637 __ jcc(Assembler::notEqual, L_singleBlock_loopTop[k]); 3638 if (k != 2) { 3639 __ jmp(L_exit); 3640 } 3641 } //for 128/192/256 3642 3643 __ BIND(L_exit); 3644 __ movdqu(Address(rvec, 0), xmm_prev_block_cipher); // final value of r stored in rvec of CipherBlockChaining object 3645 __ pop(rbx); 3646 #ifdef _WIN64 3647 // restore regs belonging to calling function 3648 for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) { 3649 __ movdqu(as_XMMRegister(i), xmm_save(i)); 3650 } 3651 __ movl(rax, len_mem); 3652 #else 3653 __ pop(rax); // return length 3654 #endif 3655 __ leave(); // required for proper stackwalking of RuntimeStub frame 3656 __ ret(0); 3657 return start; 3658 } 3659 3660 address generate_upper_word_mask() { 3661 __ align(64); 3662 StubCodeMark mark(this, "StubRoutines", "upper_word_mask"); 3663 address start = __ pc(); 3664 __ emit_data64(0x0000000000000000, relocInfo::none); 3665 __ emit_data64(0xFFFFFFFF00000000, relocInfo::none); 3666 return start; 3667 } 3668 3669 address generate_shuffle_byte_flip_mask() { 3670 __ align(64); 3671 StubCodeMark mark(this, "StubRoutines", "shuffle_byte_flip_mask"); 3672 address start = __ pc(); 3673 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none); 3674 __ emit_data64(0x0001020304050607, relocInfo::none); 3675 return start; 3676 } 3677 3678 // ofs and limit are use for multi-block byte array. 3679 // int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit) 3680 address generate_sha1_implCompress(bool multi_block, const char *name) { 3681 __ align(CodeEntryAlignment); 3682 StubCodeMark mark(this, "StubRoutines", name); 3683 address start = __ pc(); 3684 3685 Register buf = c_rarg0; 3686 Register state = c_rarg1; 3687 Register ofs = c_rarg2; 3688 Register limit = c_rarg3; 3689 3690 const XMMRegister abcd = xmm0; 3691 const XMMRegister e0 = xmm1; 3692 const XMMRegister e1 = xmm2; 3693 const XMMRegister msg0 = xmm3; 3694 3695 const XMMRegister msg1 = xmm4; 3696 const XMMRegister msg2 = xmm5; 3697 const XMMRegister msg3 = xmm6; 3698 const XMMRegister shuf_mask = xmm7; 3699 3700 __ enter(); 3701 3702 #ifdef _WIN64 3703 // save the xmm registers which must be preserved 6-7 3704 __ subptr(rsp, 4 * wordSize); 3705 __ movdqu(Address(rsp, 0), xmm6); 3706 __ movdqu(Address(rsp, 2 * wordSize), xmm7); 3707 #endif 3708 3709 __ subptr(rsp, 4 * wordSize); 3710 3711 __ fast_sha1(abcd, e0, e1, msg0, msg1, msg2, msg3, shuf_mask, 3712 buf, state, ofs, limit, rsp, multi_block); 3713 3714 __ addptr(rsp, 4 * wordSize); 3715 #ifdef _WIN64 3716 // restore xmm regs belonging to calling function 3717 __ movdqu(xmm6, Address(rsp, 0)); 3718 __ movdqu(xmm7, Address(rsp, 2 * wordSize)); 3719 __ addptr(rsp, 4 * wordSize); 3720 #endif 3721 3722 __ leave(); 3723 __ ret(0); 3724 return start; 3725 } 3726 3727 address generate_pshuffle_byte_flip_mask() { 3728 __ align(64); 3729 StubCodeMark mark(this, "StubRoutines", "pshuffle_byte_flip_mask"); 3730 address start = __ pc(); 3731 __ emit_data64(0x0405060700010203, relocInfo::none); 3732 __ emit_data64(0x0c0d0e0f08090a0b, relocInfo::none); 3733 3734 if (VM_Version::supports_avx2()) { 3735 __ emit_data64(0x0405060700010203, relocInfo::none); // second copy 3736 __ emit_data64(0x0c0d0e0f08090a0b, relocInfo::none); 3737 // _SHUF_00BA 3738 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3739 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3740 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3741 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3742 // _SHUF_DC00 3743 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3744 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3745 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3746 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3747 } 3748 3749 return start; 3750 } 3751 3752 // ofs and limit are use for multi-block byte array. 3753 // int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit) 3754 address generate_sha256_implCompress(bool multi_block, const char *name) { 3755 assert(VM_Version::supports_sha() || VM_Version::supports_avx2(), ""); 3756 __ align(CodeEntryAlignment); 3757 StubCodeMark mark(this, "StubRoutines", name); 3758 address start = __ pc(); 3759 3760 Register buf = c_rarg0; 3761 Register state = c_rarg1; 3762 Register ofs = c_rarg2; 3763 Register limit = c_rarg3; 3764 3765 const XMMRegister msg = xmm0; 3766 const XMMRegister state0 = xmm1; 3767 const XMMRegister state1 = xmm2; 3768 const XMMRegister msgtmp0 = xmm3; 3769 3770 const XMMRegister msgtmp1 = xmm4; 3771 const XMMRegister msgtmp2 = xmm5; 3772 const XMMRegister msgtmp3 = xmm6; 3773 const XMMRegister msgtmp4 = xmm7; 3774 3775 const XMMRegister shuf_mask = xmm8; 3776 3777 __ enter(); 3778 #ifdef _WIN64 3779 // save the xmm registers which must be preserved 6-7 3780 __ subptr(rsp, 6 * wordSize); 3781 __ movdqu(Address(rsp, 0), xmm6); 3782 __ movdqu(Address(rsp, 2 * wordSize), xmm7); 3783 __ movdqu(Address(rsp, 4 * wordSize), xmm8); 3784 3785 if (!VM_Version::supports_sha() && VM_Version::supports_avx2()) { 3786 __ subptr(rsp, 10 * wordSize); 3787 __ movdqu(Address(rsp, 0), xmm9); 3788 __ movdqu(Address(rsp, 2 * wordSize), xmm10); 3789 __ movdqu(Address(rsp, 4 * wordSize), xmm11); 3790 __ movdqu(Address(rsp, 6 * wordSize), xmm12); 3791 __ movdqu(Address(rsp, 8 * wordSize), xmm13); 3792 } 3793 #endif 3794 3795 __ subptr(rsp, 4 * wordSize); 3796 3797 if (VM_Version::supports_sha()) { 3798 __ fast_sha256(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4, 3799 buf, state, ofs, limit, rsp, multi_block, shuf_mask); 3800 } else if (VM_Version::supports_avx2()) { 3801 __ sha256_AVX2(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4, 3802 buf, state, ofs, limit, rsp, multi_block, shuf_mask); 3803 } 3804 __ addptr(rsp, 4 * wordSize); 3805 #ifdef _WIN64 3806 // restore xmm regs belonging to calling function 3807 if (!VM_Version::supports_sha() && VM_Version::supports_avx2()) { 3808 __ movdqu(xmm9, Address(rsp, 0)); 3809 __ movdqu(xmm10, Address(rsp, 2 * wordSize)); 3810 __ movdqu(xmm11, Address(rsp, 4 * wordSize)); 3811 __ movdqu(xmm12, Address(rsp, 6 * wordSize)); 3812 __ movdqu(xmm13, Address(rsp, 8 * wordSize)); 3813 __ addptr(rsp, 10 * wordSize); 3814 } 3815 __ movdqu(xmm6, Address(rsp, 0)); 3816 __ movdqu(xmm7, Address(rsp, 2 * wordSize)); 3817 __ movdqu(xmm8, Address(rsp, 4 * wordSize)); 3818 __ addptr(rsp, 6 * wordSize); 3819 #endif 3820 __ leave(); 3821 __ ret(0); 3822 return start; 3823 } 3824 3825 // This is a version of CTR/AES crypt which does 6 blocks in a loop at a time 3826 // to hide instruction latency 3827 // 3828 // Arguments: 3829 // 3830 // Inputs: 3831 // c_rarg0 - source byte array address 3832 // c_rarg1 - destination byte array address 3833 // c_rarg2 - K (key) in little endian int array 3834 // c_rarg3 - counter vector byte array address 3835 // Linux 3836 // c_rarg4 - input length 3837 // c_rarg5 - saved encryptedCounter start 3838 // rbp + 6 * wordSize - saved used length 3839 // Windows 3840 // rbp + 6 * wordSize - input length 3841 // rbp + 7 * wordSize - saved encryptedCounter start 3842 // rbp + 8 * wordSize - saved used length 3843 // 3844 // Output: 3845 // rax - input length 3846 // 3847 address generate_counterMode_AESCrypt_Parallel() { 3848 assert(UseAES, "need AES instructions and misaligned SSE support"); 3849 __ align(CodeEntryAlignment); 3850 StubCodeMark mark(this, "StubRoutines", "counterMode_AESCrypt"); 3851 address start = __ pc(); 3852 const Register from = c_rarg0; // source array address 3853 const Register to = c_rarg1; // destination array address 3854 const Register key = c_rarg2; // key array address 3855 const Register counter = c_rarg3; // counter byte array initialized from counter array address 3856 // and updated with the incremented counter in the end 3857 #ifndef _WIN64 3858 const Register len_reg = c_rarg4; 3859 const Register saved_encCounter_start = c_rarg5; 3860 const Register used_addr = r10; 3861 const Address used_mem(rbp, 2 * wordSize); 3862 const Register used = r11; 3863 #else 3864 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 3865 const Address saved_encCounter_mem(rbp, 7 * wordSize); // length is on stack on Win64 3866 const Address used_mem(rbp, 8 * wordSize); // length is on stack on Win64 3867 const Register len_reg = r10; // pick the first volatile windows register 3868 const Register saved_encCounter_start = r11; 3869 const Register used_addr = r13; 3870 const Register used = r14; 3871 #endif 3872 const Register pos = rax; 3873 3874 const int PARALLEL_FACTOR = 6; 3875 const XMMRegister xmm_counter_shuf_mask = xmm0; 3876 const XMMRegister xmm_key_shuf_mask = xmm1; // used temporarily to swap key bytes up front 3877 const XMMRegister xmm_curr_counter = xmm2; 3878 3879 const XMMRegister xmm_key_tmp0 = xmm3; 3880 const XMMRegister xmm_key_tmp1 = xmm4; 3881 3882 // registers holding the four results in the parallelized loop 3883 const XMMRegister xmm_result0 = xmm5; 3884 const XMMRegister xmm_result1 = xmm6; 3885 const XMMRegister xmm_result2 = xmm7; 3886 const XMMRegister xmm_result3 = xmm8; 3887 const XMMRegister xmm_result4 = xmm9; 3888 const XMMRegister xmm_result5 = xmm10; 3889 3890 const XMMRegister xmm_from0 = xmm11; 3891 const XMMRegister xmm_from1 = xmm12; 3892 const XMMRegister xmm_from2 = xmm13; 3893 const XMMRegister xmm_from3 = xmm14; //the last one is xmm14. we have to preserve it on WIN64. 3894 const XMMRegister xmm_from4 = xmm3; //reuse xmm3~4. Because xmm_key_tmp0~1 are useless when loading input text 3895 const XMMRegister xmm_from5 = xmm4; 3896 3897 //for key_128, key_192, key_256 3898 const int rounds[3] = {10, 12, 14}; 3899 Label L_exit_preLoop, L_preLoop_start; 3900 Label L_multiBlock_loopTop[3]; 3901 Label L_singleBlockLoopTop[3]; 3902 Label L__incCounter[3][6]; //for 6 blocks 3903 Label L__incCounter_single[3]; //for single block, key128, key192, key256 3904 Label L_processTail_insr[3], L_processTail_4_insr[3], L_processTail_2_insr[3], L_processTail_1_insr[3], L_processTail_exit_insr[3]; 3905 Label L_processTail_extr[3], L_processTail_4_extr[3], L_processTail_2_extr[3], L_processTail_1_extr[3], L_processTail_exit_extr[3]; 3906 3907 Label L_exit; 3908 3909 __ enter(); // required for proper stackwalking of RuntimeStub frame 3910 3911 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 3912 // context for the registers used, where all instructions below are using 128-bit mode 3913 // On EVEX without VL and BW, these instructions will all be AVX. 3914 if (VM_Version::supports_avx512vlbw()) { 3915 __ movl(rax, 0xffff); 3916 __ kmovql(k1, rax); 3917 } 3918 3919 #ifdef _WIN64 3920 // save the xmm registers which must be preserved 6-14 3921 const int XMM_REG_NUM_KEY_LAST = 14; 3922 __ subptr(rsp, -rsp_after_call_off * wordSize); 3923 for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) { 3924 __ movdqu(xmm_save(i), as_XMMRegister(i)); 3925 } 3926 3927 const Address r13_save(rbp, rdi_off * wordSize); 3928 const Address r14_save(rbp, rsi_off * wordSize); 3929 3930 __ movptr(r13_save, r13); 3931 __ movptr(r14_save, r14); 3932 3933 // on win64, fill len_reg from stack position 3934 __ movl(len_reg, len_mem); 3935 __ movptr(saved_encCounter_start, saved_encCounter_mem); 3936 __ movptr(used_addr, used_mem); 3937 __ movl(used, Address(used_addr, 0)); 3938 #else 3939 __ push(len_reg); // Save 3940 __ movptr(used_addr, used_mem); 3941 __ movl(used, Address(used_addr, 0)); 3942 #endif 3943 3944 __ push(rbx); // Save RBX 3945 __ movdqu(xmm_curr_counter, Address(counter, 0x00)); // initialize counter with initial counter 3946 __ movdqu(xmm_counter_shuf_mask, ExternalAddress(StubRoutines::x86::counter_shuffle_mask_addr())); 3947 __ pshufb(xmm_curr_counter, xmm_counter_shuf_mask); //counter is shuffled 3948 __ movptr(pos, 0); 3949 3950 // Use the partially used encrpyted counter from last invocation 3951 __ BIND(L_preLoop_start); 3952 __ cmpptr(used, 16); 3953 __ jcc(Assembler::aboveEqual, L_exit_preLoop); 3954 __ cmpptr(len_reg, 0); 3955 __ jcc(Assembler::lessEqual, L_exit_preLoop); 3956 __ movb(rbx, Address(saved_encCounter_start, used)); 3957 __ xorb(rbx, Address(from, pos)); 3958 __ movb(Address(to, pos), rbx); 3959 __ addptr(pos, 1); 3960 __ addptr(used, 1); 3961 __ subptr(len_reg, 1); 3962 3963 __ jmp(L_preLoop_start); 3964 3965 __ BIND(L_exit_preLoop); 3966 __ movl(Address(used_addr, 0), used); 3967 3968 // key length could be only {11, 13, 15} * 4 = {44, 52, 60} 3969 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3970 __ movl(rbx, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3971 __ cmpl(rbx, 52); 3972 __ jcc(Assembler::equal, L_multiBlock_loopTop[1]); 3973 __ cmpl(rbx, 60); 3974 __ jcc(Assembler::equal, L_multiBlock_loopTop[2]); 3975 3976 #define CTR_DoSix(opc, src_reg) \ 3977 __ opc(xmm_result0, src_reg); \ 3978 __ opc(xmm_result1, src_reg); \ 3979 __ opc(xmm_result2, src_reg); \ 3980 __ opc(xmm_result3, src_reg); \ 3981 __ opc(xmm_result4, src_reg); \ 3982 __ opc(xmm_result5, src_reg); 3983 3984 // k == 0 : generate code for key_128 3985 // k == 1 : generate code for key_192 3986 // k == 2 : generate code for key_256 3987 for (int k = 0; k < 3; ++k) { 3988 //multi blocks starts here 3989 __ align(OptoLoopAlignment); 3990 __ BIND(L_multiBlock_loopTop[k]); 3991 __ cmpptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // see if at least PARALLEL_FACTOR blocks left 3992 __ jcc(Assembler::less, L_singleBlockLoopTop[k]); 3993 load_key(xmm_key_tmp0, key, 0x00, xmm_key_shuf_mask); 3994 3995 //load, then increase counters 3996 CTR_DoSix(movdqa, xmm_curr_counter); 3997 inc_counter(rbx, xmm_result1, 0x01, L__incCounter[k][0]); 3998 inc_counter(rbx, xmm_result2, 0x02, L__incCounter[k][1]); 3999 inc_counter(rbx, xmm_result3, 0x03, L__incCounter[k][2]); 4000 inc_counter(rbx, xmm_result4, 0x04, L__incCounter[k][3]); 4001 inc_counter(rbx, xmm_result5, 0x05, L__incCounter[k][4]); 4002 inc_counter(rbx, xmm_curr_counter, 0x06, L__incCounter[k][5]); 4003 CTR_DoSix(pshufb, xmm_counter_shuf_mask); // after increased, shuffled counters back for PXOR 4004 CTR_DoSix(pxor, xmm_key_tmp0); //PXOR with Round 0 key 4005 4006 //load two ROUND_KEYs at a time 4007 for (int i = 1; i < rounds[k]; ) { 4008 load_key(xmm_key_tmp1, key, (0x10 * i), xmm_key_shuf_mask); 4009 load_key(xmm_key_tmp0, key, (0x10 * (i+1)), xmm_key_shuf_mask); 4010 CTR_DoSix(aesenc, xmm_key_tmp1); 4011 i++; 4012 if (i != rounds[k]) { 4013 CTR_DoSix(aesenc, xmm_key_tmp0); 4014 } else { 4015 CTR_DoSix(aesenclast, xmm_key_tmp0); 4016 } 4017 i++; 4018 } 4019 4020 // get next PARALLEL_FACTOR blocks into xmm_result registers 4021 __ movdqu(xmm_from0, Address(from, pos, Address::times_1, 0 * AESBlockSize)); 4022 __ movdqu(xmm_from1, Address(from, pos, Address::times_1, 1 * AESBlockSize)); 4023 __ movdqu(xmm_from2, Address(from, pos, Address::times_1, 2 * AESBlockSize)); 4024 __ movdqu(xmm_from3, Address(from, pos, Address::times_1, 3 * AESBlockSize)); 4025 __ movdqu(xmm_from4, Address(from, pos, Address::times_1, 4 * AESBlockSize)); 4026 __ movdqu(xmm_from5, Address(from, pos, Address::times_1, 5 * AESBlockSize)); 4027 4028 __ pxor(xmm_result0, xmm_from0); 4029 __ pxor(xmm_result1, xmm_from1); 4030 __ pxor(xmm_result2, xmm_from2); 4031 __ pxor(xmm_result3, xmm_from3); 4032 __ pxor(xmm_result4, xmm_from4); 4033 __ pxor(xmm_result5, xmm_from5); 4034 4035 // store 6 results into the next 64 bytes of output 4036 __ movdqu(Address(to, pos, Address::times_1, 0 * AESBlockSize), xmm_result0); 4037 __ movdqu(Address(to, pos, Address::times_1, 1 * AESBlockSize), xmm_result1); 4038 __ movdqu(Address(to, pos, Address::times_1, 2 * AESBlockSize), xmm_result2); 4039 __ movdqu(Address(to, pos, Address::times_1, 3 * AESBlockSize), xmm_result3); 4040 __ movdqu(Address(to, pos, Address::times_1, 4 * AESBlockSize), xmm_result4); 4041 __ movdqu(Address(to, pos, Address::times_1, 5 * AESBlockSize), xmm_result5); 4042 4043 __ addptr(pos, PARALLEL_FACTOR * AESBlockSize); // increase the length of crypt text 4044 __ subptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // decrease the remaining length 4045 __ jmp(L_multiBlock_loopTop[k]); 4046 4047 // singleBlock starts here 4048 __ align(OptoLoopAlignment); 4049 __ BIND(L_singleBlockLoopTop[k]); 4050 __ cmpptr(len_reg, 0); 4051 __ jcc(Assembler::lessEqual, L_exit); 4052 load_key(xmm_key_tmp0, key, 0x00, xmm_key_shuf_mask); 4053 __ movdqa(xmm_result0, xmm_curr_counter); 4054 inc_counter(rbx, xmm_curr_counter, 0x01, L__incCounter_single[k]); 4055 __ pshufb(xmm_result0, xmm_counter_shuf_mask); 4056 __ pxor(xmm_result0, xmm_key_tmp0); 4057 for (int i = 1; i < rounds[k]; i++) { 4058 load_key(xmm_key_tmp0, key, (0x10 * i), xmm_key_shuf_mask); 4059 __ aesenc(xmm_result0, xmm_key_tmp0); 4060 } 4061 load_key(xmm_key_tmp0, key, (rounds[k] * 0x10), xmm_key_shuf_mask); 4062 __ aesenclast(xmm_result0, xmm_key_tmp0); 4063 __ cmpptr(len_reg, AESBlockSize); 4064 __ jcc(Assembler::less, L_processTail_insr[k]); 4065 __ movdqu(xmm_from0, Address(from, pos, Address::times_1, 0 * AESBlockSize)); 4066 __ pxor(xmm_result0, xmm_from0); 4067 __ movdqu(Address(to, pos, Address::times_1, 0 * AESBlockSize), xmm_result0); 4068 __ addptr(pos, AESBlockSize); 4069 __ subptr(len_reg, AESBlockSize); 4070 __ jmp(L_singleBlockLoopTop[k]); 4071 __ BIND(L_processTail_insr[k]); // Process the tail part of the input array 4072 __ addptr(pos, len_reg); // 1. Insert bytes from src array into xmm_from0 register 4073 __ testptr(len_reg, 8); 4074 __ jcc(Assembler::zero, L_processTail_4_insr[k]); 4075 __ subptr(pos,8); 4076 __ pinsrq(xmm_from0, Address(from, pos), 0); 4077 __ BIND(L_processTail_4_insr[k]); 4078 __ testptr(len_reg, 4); 4079 __ jcc(Assembler::zero, L_processTail_2_insr[k]); 4080 __ subptr(pos,4); 4081 __ pslldq(xmm_from0, 4); 4082 __ pinsrd(xmm_from0, Address(from, pos), 0); 4083 __ BIND(L_processTail_2_insr[k]); 4084 __ testptr(len_reg, 2); 4085 __ jcc(Assembler::zero, L_processTail_1_insr[k]); 4086 __ subptr(pos, 2); 4087 __ pslldq(xmm_from0, 2); 4088 __ pinsrw(xmm_from0, Address(from, pos), 0); 4089 __ BIND(L_processTail_1_insr[k]); 4090 __ testptr(len_reg, 1); 4091 __ jcc(Assembler::zero, L_processTail_exit_insr[k]); 4092 __ subptr(pos, 1); 4093 __ pslldq(xmm_from0, 1); 4094 __ pinsrb(xmm_from0, Address(from, pos), 0); 4095 __ BIND(L_processTail_exit_insr[k]); 4096 4097 __ movdqu(Address(saved_encCounter_start, 0), xmm_result0); // 2. Perform pxor of the encrypted counter and plaintext Bytes. 4098 __ pxor(xmm_result0, xmm_from0); // Also the encrypted counter is saved for next invocation. 4099 4100 __ testptr(len_reg, 8); 4101 __ jcc(Assembler::zero, L_processTail_4_extr[k]); // 3. Extract bytes from xmm_result0 into the dest. array 4102 __ pextrq(Address(to, pos), xmm_result0, 0); 4103 __ psrldq(xmm_result0, 8); 4104 __ addptr(pos, 8); 4105 __ BIND(L_processTail_4_extr[k]); 4106 __ testptr(len_reg, 4); 4107 __ jcc(Assembler::zero, L_processTail_2_extr[k]); 4108 __ pextrd(Address(to, pos), xmm_result0, 0); 4109 __ psrldq(xmm_result0, 4); 4110 __ addptr(pos, 4); 4111 __ BIND(L_processTail_2_extr[k]); 4112 __ testptr(len_reg, 2); 4113 __ jcc(Assembler::zero, L_processTail_1_extr[k]); 4114 __ pextrw(Address(to, pos), xmm_result0, 0); 4115 __ psrldq(xmm_result0, 2); 4116 __ addptr(pos, 2); 4117 __ BIND(L_processTail_1_extr[k]); 4118 __ testptr(len_reg, 1); 4119 __ jcc(Assembler::zero, L_processTail_exit_extr[k]); 4120 __ pextrb(Address(to, pos), xmm_result0, 0); 4121 4122 __ BIND(L_processTail_exit_extr[k]); 4123 __ movl(Address(used_addr, 0), len_reg); 4124 __ jmp(L_exit); 4125 4126 } 4127 4128 __ BIND(L_exit); 4129 __ pshufb(xmm_curr_counter, xmm_counter_shuf_mask); //counter is shuffled back. 4130 __ movdqu(Address(counter, 0), xmm_curr_counter); //save counter back 4131 __ pop(rbx); // pop the saved RBX. 4132 #ifdef _WIN64 4133 // restore regs belonging to calling function 4134 for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) { 4135 __ movdqu(as_XMMRegister(i), xmm_save(i)); 4136 } 4137 __ movl(rax, len_mem); 4138 __ movptr(r13, r13_save); 4139 __ movptr(r14, r14_save); 4140 #else 4141 __ pop(rax); // return 'len' 4142 #endif 4143 __ leave(); // required for proper stackwalking of RuntimeStub frame 4144 __ ret(0); 4145 return start; 4146 } 4147 4148 // byte swap x86 long 4149 address generate_ghash_long_swap_mask() { 4150 __ align(CodeEntryAlignment); 4151 StubCodeMark mark(this, "StubRoutines", "ghash_long_swap_mask"); 4152 address start = __ pc(); 4153 __ emit_data64(0x0f0e0d0c0b0a0908, relocInfo::none ); 4154 __ emit_data64(0x0706050403020100, relocInfo::none ); 4155 return start; 4156 } 4157 4158 // byte swap x86 byte array 4159 address generate_ghash_byte_swap_mask() { 4160 __ align(CodeEntryAlignment); 4161 StubCodeMark mark(this, "StubRoutines", "ghash_byte_swap_mask"); 4162 address start = __ pc(); 4163 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none ); 4164 __ emit_data64(0x0001020304050607, relocInfo::none ); 4165 return start; 4166 } 4167 4168 /* Single and multi-block ghash operations */ 4169 address generate_ghash_processBlocks() { 4170 __ align(CodeEntryAlignment); 4171 Label L_ghash_loop, L_exit; 4172 StubCodeMark mark(this, "StubRoutines", "ghash_processBlocks"); 4173 address start = __ pc(); 4174 4175 const Register state = c_rarg0; 4176 const Register subkeyH = c_rarg1; 4177 const Register data = c_rarg2; 4178 const Register blocks = c_rarg3; 4179 4180 #ifdef _WIN64 4181 const int XMM_REG_LAST = 10; 4182 #endif 4183 4184 const XMMRegister xmm_temp0 = xmm0; 4185 const XMMRegister xmm_temp1 = xmm1; 4186 const XMMRegister xmm_temp2 = xmm2; 4187 const XMMRegister xmm_temp3 = xmm3; 4188 const XMMRegister xmm_temp4 = xmm4; 4189 const XMMRegister xmm_temp5 = xmm5; 4190 const XMMRegister xmm_temp6 = xmm6; 4191 const XMMRegister xmm_temp7 = xmm7; 4192 const XMMRegister xmm_temp8 = xmm8; 4193 const XMMRegister xmm_temp9 = xmm9; 4194 const XMMRegister xmm_temp10 = xmm10; 4195 4196 __ enter(); 4197 4198 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 4199 // context for the registers used, where all instructions below are using 128-bit mode 4200 // On EVEX without VL and BW, these instructions will all be AVX. 4201 if (VM_Version::supports_avx512vlbw()) { 4202 __ movl(rax, 0xffff); 4203 __ kmovql(k1, rax); 4204 } 4205 4206 #ifdef _WIN64 4207 // save the xmm registers which must be preserved 6-10 4208 __ subptr(rsp, -rsp_after_call_off * wordSize); 4209 for (int i = 6; i <= XMM_REG_LAST; i++) { 4210 __ movdqu(xmm_save(i), as_XMMRegister(i)); 4211 } 4212 #endif 4213 4214 __ movdqu(xmm_temp10, ExternalAddress(StubRoutines::x86::ghash_long_swap_mask_addr())); 4215 4216 __ movdqu(xmm_temp0, Address(state, 0)); 4217 __ pshufb(xmm_temp0, xmm_temp10); 4218 4219 4220 __ BIND(L_ghash_loop); 4221 __ movdqu(xmm_temp2, Address(data, 0)); 4222 __ pshufb(xmm_temp2, ExternalAddress(StubRoutines::x86::ghash_byte_swap_mask_addr())); 4223 4224 __ movdqu(xmm_temp1, Address(subkeyH, 0)); 4225 __ pshufb(xmm_temp1, xmm_temp10); 4226 4227 __ pxor(xmm_temp0, xmm_temp2); 4228 4229 // 4230 // Multiply with the hash key 4231 // 4232 __ movdqu(xmm_temp3, xmm_temp0); 4233 __ pclmulqdq(xmm_temp3, xmm_temp1, 0); // xmm3 holds a0*b0 4234 __ movdqu(xmm_temp4, xmm_temp0); 4235 __ pclmulqdq(xmm_temp4, xmm_temp1, 16); // xmm4 holds a0*b1 4236 4237 __ movdqu(xmm_temp5, xmm_temp0); 4238 __ pclmulqdq(xmm_temp5, xmm_temp1, 1); // xmm5 holds a1*b0 4239 __ movdqu(xmm_temp6, xmm_temp0); 4240 __ pclmulqdq(xmm_temp6, xmm_temp1, 17); // xmm6 holds a1*b1 4241 4242 __ pxor(xmm_temp4, xmm_temp5); // xmm4 holds a0*b1 + a1*b0 4243 4244 __ movdqu(xmm_temp5, xmm_temp4); // move the contents of xmm4 to xmm5 4245 __ psrldq(xmm_temp4, 8); // shift by xmm4 64 bits to the right 4246 __ pslldq(xmm_temp5, 8); // shift by xmm5 64 bits to the left 4247 __ pxor(xmm_temp3, xmm_temp5); 4248 __ pxor(xmm_temp6, xmm_temp4); // Register pair <xmm6:xmm3> holds the result 4249 // of the carry-less multiplication of 4250 // xmm0 by xmm1. 4251 4252 // We shift the result of the multiplication by one bit position 4253 // to the left to cope for the fact that the bits are reversed. 4254 __ movdqu(xmm_temp7, xmm_temp3); 4255 __ movdqu(xmm_temp8, xmm_temp6); 4256 __ pslld(xmm_temp3, 1); 4257 __ pslld(xmm_temp6, 1); 4258 __ psrld(xmm_temp7, 31); 4259 __ psrld(xmm_temp8, 31); 4260 __ movdqu(xmm_temp9, xmm_temp7); 4261 __ pslldq(xmm_temp8, 4); 4262 __ pslldq(xmm_temp7, 4); 4263 __ psrldq(xmm_temp9, 12); 4264 __ por(xmm_temp3, xmm_temp7); 4265 __ por(xmm_temp6, xmm_temp8); 4266 __ por(xmm_temp6, xmm_temp9); 4267 4268 // 4269 // First phase of the reduction 4270 // 4271 // Move xmm3 into xmm7, xmm8, xmm9 in order to perform the shifts 4272 // independently. 4273 __ movdqu(xmm_temp7, xmm_temp3); 4274 __ movdqu(xmm_temp8, xmm_temp3); 4275 __ movdqu(xmm_temp9, xmm_temp3); 4276 __ pslld(xmm_temp7, 31); // packed right shift shifting << 31 4277 __ pslld(xmm_temp8, 30); // packed right shift shifting << 30 4278 __ pslld(xmm_temp9, 25); // packed right shift shifting << 25 4279 __ pxor(xmm_temp7, xmm_temp8); // xor the shifted versions 4280 __ pxor(xmm_temp7, xmm_temp9); 4281 __ movdqu(xmm_temp8, xmm_temp7); 4282 __ pslldq(xmm_temp7, 12); 4283 __ psrldq(xmm_temp8, 4); 4284 __ pxor(xmm_temp3, xmm_temp7); // first phase of the reduction complete 4285 4286 // 4287 // Second phase of the reduction 4288 // 4289 // Make 3 copies of xmm3 in xmm2, xmm4, xmm5 for doing these 4290 // shift operations. 4291 __ movdqu(xmm_temp2, xmm_temp3); 4292 __ movdqu(xmm_temp4, xmm_temp3); 4293 __ movdqu(xmm_temp5, xmm_temp3); 4294 __ psrld(xmm_temp2, 1); // packed left shifting >> 1 4295 __ psrld(xmm_temp4, 2); // packed left shifting >> 2 4296 __ psrld(xmm_temp5, 7); // packed left shifting >> 7 4297 __ pxor(xmm_temp2, xmm_temp4); // xor the shifted versions 4298 __ pxor(xmm_temp2, xmm_temp5); 4299 __ pxor(xmm_temp2, xmm_temp8); 4300 __ pxor(xmm_temp3, xmm_temp2); 4301 __ pxor(xmm_temp6, xmm_temp3); // the result is in xmm6 4302 4303 __ decrement(blocks); 4304 __ jcc(Assembler::zero, L_exit); 4305 __ movdqu(xmm_temp0, xmm_temp6); 4306 __ addptr(data, 16); 4307 __ jmp(L_ghash_loop); 4308 4309 __ BIND(L_exit); 4310 __ pshufb(xmm_temp6, xmm_temp10); // Byte swap 16-byte result 4311 __ movdqu(Address(state, 0), xmm_temp6); // store the result 4312 4313 #ifdef _WIN64 4314 // restore xmm regs belonging to calling function 4315 for (int i = 6; i <= XMM_REG_LAST; i++) { 4316 __ movdqu(as_XMMRegister(i), xmm_save(i)); 4317 } 4318 #endif 4319 __ leave(); 4320 __ ret(0); 4321 return start; 4322 } 4323 4324 /** 4325 * Arguments: 4326 * 4327 * Inputs: 4328 * c_rarg0 - int crc 4329 * c_rarg1 - byte* buf 4330 * c_rarg2 - int length 4331 * 4332 * Ouput: 4333 * rax - int crc result 4334 */ 4335 address generate_updateBytesCRC32() { 4336 assert(UseCRC32Intrinsics, "need AVX and CLMUL instructions"); 4337 4338 __ align(CodeEntryAlignment); 4339 StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32"); 4340 4341 address start = __ pc(); 4342 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 4343 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 4344 // rscratch1: r10 4345 const Register crc = c_rarg0; // crc 4346 const Register buf = c_rarg1; // source java byte array address 4347 const Register len = c_rarg2; // length 4348 const Register table = c_rarg3; // crc_table address (reuse register) 4349 const Register tmp = r11; 4350 assert_different_registers(crc, buf, len, table, tmp, rax); 4351 4352 BLOCK_COMMENT("Entry:"); 4353 __ enter(); // required for proper stackwalking of RuntimeStub frame 4354 4355 __ kernel_crc32(crc, buf, len, table, tmp); 4356 4357 __ movl(rax, crc); 4358 __ leave(); // required for proper stackwalking of RuntimeStub frame 4359 __ ret(0); 4360 4361 return start; 4362 } 4363 4364 /** 4365 * Arguments: 4366 * 4367 * Inputs: 4368 * c_rarg0 - int crc 4369 * c_rarg1 - byte* buf 4370 * c_rarg2 - long length 4371 * c_rarg3 - table_start - optional (present only when doing a library_call, 4372 * not used by x86 algorithm) 4373 * 4374 * Ouput: 4375 * rax - int crc result 4376 */ 4377 address generate_updateBytesCRC32C(bool is_pclmulqdq_supported) { 4378 assert(UseCRC32CIntrinsics, "need SSE4_2"); 4379 __ align(CodeEntryAlignment); 4380 StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32C"); 4381 address start = __ pc(); 4382 //reg.arg int#0 int#1 int#2 int#3 int#4 int#5 float regs 4383 //Windows RCX RDX R8 R9 none none XMM0..XMM3 4384 //Lin / Sol RDI RSI RDX RCX R8 R9 XMM0..XMM7 4385 const Register crc = c_rarg0; // crc 4386 const Register buf = c_rarg1; // source java byte array address 4387 const Register len = c_rarg2; // length 4388 const Register a = rax; 4389 const Register j = r9; 4390 const Register k = r10; 4391 const Register l = r11; 4392 #ifdef _WIN64 4393 const Register y = rdi; 4394 const Register z = rsi; 4395 #else 4396 const Register y = rcx; 4397 const Register z = r8; 4398 #endif 4399 assert_different_registers(crc, buf, len, a, j, k, l, y, z); 4400 4401 BLOCK_COMMENT("Entry:"); 4402 __ enter(); // required for proper stackwalking of RuntimeStub frame 4403 #ifdef _WIN64 4404 __ push(y); 4405 __ push(z); 4406 #endif 4407 __ crc32c_ipl_alg2_alt2(crc, buf, len, 4408 a, j, k, 4409 l, y, z, 4410 c_farg0, c_farg1, c_farg2, 4411 is_pclmulqdq_supported); 4412 __ movl(rax, crc); 4413 #ifdef _WIN64 4414 __ pop(z); 4415 __ pop(y); 4416 #endif 4417 __ leave(); // required for proper stackwalking of RuntimeStub frame 4418 __ ret(0); 4419 4420 return start; 4421 } 4422 4423 /** 4424 * Arguments: 4425 * 4426 * Input: 4427 * c_rarg0 - x address 4428 * c_rarg1 - x length 4429 * c_rarg2 - y address 4430 * c_rarg3 - y lenth 4431 * not Win64 4432 * c_rarg4 - z address 4433 * c_rarg5 - z length 4434 * Win64 4435 * rsp+40 - z address 4436 * rsp+48 - z length 4437 */ 4438 address generate_multiplyToLen() { 4439 __ align(CodeEntryAlignment); 4440 StubCodeMark mark(this, "StubRoutines", "multiplyToLen"); 4441 4442 address start = __ pc(); 4443 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 4444 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 4445 const Register x = rdi; 4446 const Register xlen = rax; 4447 const Register y = rsi; 4448 const Register ylen = rcx; 4449 const Register z = r8; 4450 const Register zlen = r11; 4451 4452 // Next registers will be saved on stack in multiply_to_len(). 4453 const Register tmp1 = r12; 4454 const Register tmp2 = r13; 4455 const Register tmp3 = r14; 4456 const Register tmp4 = r15; 4457 const Register tmp5 = rbx; 4458 4459 BLOCK_COMMENT("Entry:"); 4460 __ enter(); // required for proper stackwalking of RuntimeStub frame 4461 4462 #ifndef _WIN64 4463 __ movptr(zlen, r9); // Save r9 in r11 - zlen 4464 #endif 4465 setup_arg_regs(4); // x => rdi, xlen => rsi, y => rdx 4466 // ylen => rcx, z => r8, zlen => r11 4467 // r9 and r10 may be used to save non-volatile registers 4468 #ifdef _WIN64 4469 // last 2 arguments (#4, #5) are on stack on Win64 4470 __ movptr(z, Address(rsp, 6 * wordSize)); 4471 __ movptr(zlen, Address(rsp, 7 * wordSize)); 4472 #endif 4473 4474 __ movptr(xlen, rsi); 4475 __ movptr(y, rdx); 4476 __ multiply_to_len(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5); 4477 4478 restore_arg_regs(); 4479 4480 __ leave(); // required for proper stackwalking of RuntimeStub frame 4481 __ ret(0); 4482 4483 return start; 4484 } 4485 4486 /** 4487 * Arguments: 4488 * 4489 * Input: 4490 * c_rarg0 - obja address 4491 * c_rarg1 - objb address 4492 * c_rarg3 - length length 4493 * c_rarg4 - scale log2_array_indxscale 4494 * 4495 * Output: 4496 * rax - int >= mismatched index, < 0 bitwise complement of tail 4497 */ 4498 address generate_vectorizedMismatch() { 4499 __ align(CodeEntryAlignment); 4500 StubCodeMark mark(this, "StubRoutines", "vectorizedMismatch"); 4501 address start = __ pc(); 4502 4503 BLOCK_COMMENT("Entry:"); 4504 __ enter(); 4505 4506 #ifdef _WIN64 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 4507 const Register scale = c_rarg0; //rcx, will exchange with r9 4508 const Register objb = c_rarg1; //rdx 4509 const Register length = c_rarg2; //r8 4510 const Register obja = c_rarg3; //r9 4511 __ xchgq(obja, scale); //now obja and scale contains the correct contents 4512 4513 const Register tmp1 = r10; 4514 const Register tmp2 = r11; 4515 #endif 4516 #ifndef _WIN64 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 4517 const Register obja = c_rarg0; //U:rdi 4518 const Register objb = c_rarg1; //U:rsi 4519 const Register length = c_rarg2; //U:rdx 4520 const Register scale = c_rarg3; //U:rcx 4521 const Register tmp1 = r8; 4522 const Register tmp2 = r9; 4523 #endif 4524 const Register result = rax; //return value 4525 const XMMRegister vec0 = xmm0; 4526 const XMMRegister vec1 = xmm1; 4527 const XMMRegister vec2 = xmm2; 4528 4529 __ vectorized_mismatch(obja, objb, length, scale, result, tmp1, tmp2, vec0, vec1, vec2); 4530 4531 __ leave(); 4532 __ ret(0); 4533 4534 return start; 4535 } 4536 4537 /** 4538 * Arguments: 4539 * 4540 // Input: 4541 // c_rarg0 - x address 4542 // c_rarg1 - x length 4543 // c_rarg2 - z address 4544 // c_rarg3 - z lenth 4545 * 4546 */ 4547 address generate_squareToLen() { 4548 4549 __ align(CodeEntryAlignment); 4550 StubCodeMark mark(this, "StubRoutines", "squareToLen"); 4551 4552 address start = __ pc(); 4553 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 4554 // Unix: rdi, rsi, rdx, rcx (c_rarg0, c_rarg1, ...) 4555 const Register x = rdi; 4556 const Register len = rsi; 4557 const Register z = r8; 4558 const Register zlen = rcx; 4559 4560 const Register tmp1 = r12; 4561 const Register tmp2 = r13; 4562 const Register tmp3 = r14; 4563 const Register tmp4 = r15; 4564 const Register tmp5 = rbx; 4565 4566 BLOCK_COMMENT("Entry:"); 4567 __ enter(); // required for proper stackwalking of RuntimeStub frame 4568 4569 setup_arg_regs(4); // x => rdi, len => rsi, z => rdx 4570 // zlen => rcx 4571 // r9 and r10 may be used to save non-volatile registers 4572 __ movptr(r8, rdx); 4573 __ square_to_len(x, len, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5, rdx, rax); 4574 4575 restore_arg_regs(); 4576 4577 __ leave(); // required for proper stackwalking of RuntimeStub frame 4578 __ ret(0); 4579 4580 return start; 4581 } 4582 4583 /** 4584 * Arguments: 4585 * 4586 * Input: 4587 * c_rarg0 - out address 4588 * c_rarg1 - in address 4589 * c_rarg2 - offset 4590 * c_rarg3 - len 4591 * not Win64 4592 * c_rarg4 - k 4593 * Win64 4594 * rsp+40 - k 4595 */ 4596 address generate_mulAdd() { 4597 __ align(CodeEntryAlignment); 4598 StubCodeMark mark(this, "StubRoutines", "mulAdd"); 4599 4600 address start = __ pc(); 4601 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 4602 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 4603 const Register out = rdi; 4604 const Register in = rsi; 4605 const Register offset = r11; 4606 const Register len = rcx; 4607 const Register k = r8; 4608 4609 // Next registers will be saved on stack in mul_add(). 4610 const Register tmp1 = r12; 4611 const Register tmp2 = r13; 4612 const Register tmp3 = r14; 4613 const Register tmp4 = r15; 4614 const Register tmp5 = rbx; 4615 4616 BLOCK_COMMENT("Entry:"); 4617 __ enter(); // required for proper stackwalking of RuntimeStub frame 4618 4619 setup_arg_regs(4); // out => rdi, in => rsi, offset => rdx 4620 // len => rcx, k => r8 4621 // r9 and r10 may be used to save non-volatile registers 4622 #ifdef _WIN64 4623 // last argument is on stack on Win64 4624 __ movl(k, Address(rsp, 6 * wordSize)); 4625 #endif 4626 __ movptr(r11, rdx); // move offset in rdx to offset(r11) 4627 __ mul_add(out, in, offset, len, k, tmp1, tmp2, tmp3, tmp4, tmp5, rdx, rax); 4628 4629 restore_arg_regs(); 4630 4631 __ leave(); // required for proper stackwalking of RuntimeStub frame 4632 __ ret(0); 4633 4634 return start; 4635 } 4636 4637 address generate_libmExp() { 4638 address start = __ pc(); 4639 4640 const XMMRegister x0 = xmm0; 4641 const XMMRegister x1 = xmm1; 4642 const XMMRegister x2 = xmm2; 4643 const XMMRegister x3 = xmm3; 4644 4645 const XMMRegister x4 = xmm4; 4646 const XMMRegister x5 = xmm5; 4647 const XMMRegister x6 = xmm6; 4648 const XMMRegister x7 = xmm7; 4649 4650 const Register tmp = r11; 4651 4652 BLOCK_COMMENT("Entry:"); 4653 __ enter(); // required for proper stackwalking of RuntimeStub frame 4654 4655 #ifdef _WIN64 4656 // save the xmm registers which must be preserved 6-7 4657 __ subptr(rsp, 4 * wordSize); 4658 __ movdqu(Address(rsp, 0), xmm6); 4659 __ movdqu(Address(rsp, 2 * wordSize), xmm7); 4660 #endif 4661 __ fast_exp(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp); 4662 4663 #ifdef _WIN64 4664 // restore xmm regs belonging to calling function 4665 __ movdqu(xmm6, Address(rsp, 0)); 4666 __ movdqu(xmm7, Address(rsp, 2 * wordSize)); 4667 __ addptr(rsp, 4 * wordSize); 4668 #endif 4669 4670 __ leave(); // required for proper stackwalking of RuntimeStub frame 4671 __ ret(0); 4672 4673 return start; 4674 4675 } 4676 4677 address generate_libmLog() { 4678 address start = __ pc(); 4679 4680 const XMMRegister x0 = xmm0; 4681 const XMMRegister x1 = xmm1; 4682 const XMMRegister x2 = xmm2; 4683 const XMMRegister x3 = xmm3; 4684 4685 const XMMRegister x4 = xmm4; 4686 const XMMRegister x5 = xmm5; 4687 const XMMRegister x6 = xmm6; 4688 const XMMRegister x7 = xmm7; 4689 4690 const Register tmp1 = r11; 4691 const Register tmp2 = r8; 4692 4693 BLOCK_COMMENT("Entry:"); 4694 __ enter(); // required for proper stackwalking of RuntimeStub frame 4695 4696 #ifdef _WIN64 4697 // save the xmm registers which must be preserved 6-7 4698 __ subptr(rsp, 4 * wordSize); 4699 __ movdqu(Address(rsp, 0), xmm6); 4700 __ movdqu(Address(rsp, 2 * wordSize), xmm7); 4701 #endif 4702 __ fast_log(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2); 4703 4704 #ifdef _WIN64 4705 // restore xmm regs belonging to calling function 4706 __ movdqu(xmm6, Address(rsp, 0)); 4707 __ movdqu(xmm7, Address(rsp, 2 * wordSize)); 4708 __ addptr(rsp, 4 * wordSize); 4709 #endif 4710 4711 __ leave(); // required for proper stackwalking of RuntimeStub frame 4712 __ ret(0); 4713 4714 return start; 4715 4716 } 4717 4718 address generate_libmLog10() { 4719 address start = __ pc(); 4720 4721 const XMMRegister x0 = xmm0; 4722 const XMMRegister x1 = xmm1; 4723 const XMMRegister x2 = xmm2; 4724 const XMMRegister x3 = xmm3; 4725 4726 const XMMRegister x4 = xmm4; 4727 const XMMRegister x5 = xmm5; 4728 const XMMRegister x6 = xmm6; 4729 const XMMRegister x7 = xmm7; 4730 4731 const Register tmp = r11; 4732 4733 BLOCK_COMMENT("Entry:"); 4734 __ enter(); // required for proper stackwalking of RuntimeStub frame 4735 4736 #ifdef _WIN64 4737 // save the xmm registers which must be preserved 6-7 4738 __ subptr(rsp, 4 * wordSize); 4739 __ movdqu(Address(rsp, 0), xmm6); 4740 __ movdqu(Address(rsp, 2 * wordSize), xmm7); 4741 #endif 4742 __ fast_log10(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp); 4743 4744 #ifdef _WIN64 4745 // restore xmm regs belonging to calling function 4746 __ movdqu(xmm6, Address(rsp, 0)); 4747 __ movdqu(xmm7, Address(rsp, 2 * wordSize)); 4748 __ addptr(rsp, 4 * wordSize); 4749 #endif 4750 4751 __ leave(); // required for proper stackwalking of RuntimeStub frame 4752 __ ret(0); 4753 4754 return start; 4755 4756 } 4757 4758 address generate_libmPow() { 4759 address start = __ pc(); 4760 4761 const XMMRegister x0 = xmm0; 4762 const XMMRegister x1 = xmm1; 4763 const XMMRegister x2 = xmm2; 4764 const XMMRegister x3 = xmm3; 4765 4766 const XMMRegister x4 = xmm4; 4767 const XMMRegister x5 = xmm5; 4768 const XMMRegister x6 = xmm6; 4769 const XMMRegister x7 = xmm7; 4770 4771 const Register tmp1 = r8; 4772 const Register tmp2 = r9; 4773 const Register tmp3 = r10; 4774 const Register tmp4 = r11; 4775 4776 BLOCK_COMMENT("Entry:"); 4777 __ enter(); // required for proper stackwalking of RuntimeStub frame 4778 4779 #ifdef _WIN64 4780 // save the xmm registers which must be preserved 6-7 4781 __ subptr(rsp, 4 * wordSize); 4782 __ movdqu(Address(rsp, 0), xmm6); 4783 __ movdqu(Address(rsp, 2 * wordSize), xmm7); 4784 #endif 4785 __ fast_pow(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 4786 4787 #ifdef _WIN64 4788 // restore xmm regs belonging to calling function 4789 __ movdqu(xmm6, Address(rsp, 0)); 4790 __ movdqu(xmm7, Address(rsp, 2 * wordSize)); 4791 __ addptr(rsp, 4 * wordSize); 4792 #endif 4793 4794 __ leave(); // required for proper stackwalking of RuntimeStub frame 4795 __ ret(0); 4796 4797 return start; 4798 4799 } 4800 4801 address generate_libmSin() { 4802 address start = __ pc(); 4803 4804 const XMMRegister x0 = xmm0; 4805 const XMMRegister x1 = xmm1; 4806 const XMMRegister x2 = xmm2; 4807 const XMMRegister x3 = xmm3; 4808 4809 const XMMRegister x4 = xmm4; 4810 const XMMRegister x5 = xmm5; 4811 const XMMRegister x6 = xmm6; 4812 const XMMRegister x7 = xmm7; 4813 4814 const Register tmp1 = r8; 4815 const Register tmp2 = r9; 4816 const Register tmp3 = r10; 4817 const Register tmp4 = r11; 4818 4819 BLOCK_COMMENT("Entry:"); 4820 __ enter(); // required for proper stackwalking of RuntimeStub frame 4821 4822 #ifdef _WIN64 4823 __ push(rsi); 4824 __ push(rdi); 4825 // save the xmm registers which must be preserved 6-7 4826 __ subptr(rsp, 4 * wordSize); 4827 __ movdqu(Address(rsp, 0), xmm6); 4828 __ movdqu(Address(rsp, 2 * wordSize), xmm7); 4829 #endif 4830 __ fast_sin(x0, x1, x2, x3, x4, x5, x6, x7, rax, rbx, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 4831 4832 #ifdef _WIN64 4833 // restore xmm regs belonging to calling function 4834 __ movdqu(xmm6, Address(rsp, 0)); 4835 __ movdqu(xmm7, Address(rsp, 2 * wordSize)); 4836 __ addptr(rsp, 4 * wordSize); 4837 __ pop(rdi); 4838 __ pop(rsi); 4839 #endif 4840 4841 __ leave(); // required for proper stackwalking of RuntimeStub frame 4842 __ ret(0); 4843 4844 return start; 4845 4846 } 4847 4848 address generate_libmCos() { 4849 address start = __ pc(); 4850 4851 const XMMRegister x0 = xmm0; 4852 const XMMRegister x1 = xmm1; 4853 const XMMRegister x2 = xmm2; 4854 const XMMRegister x3 = xmm3; 4855 4856 const XMMRegister x4 = xmm4; 4857 const XMMRegister x5 = xmm5; 4858 const XMMRegister x6 = xmm6; 4859 const XMMRegister x7 = xmm7; 4860 4861 const Register tmp1 = r8; 4862 const Register tmp2 = r9; 4863 const Register tmp3 = r10; 4864 const Register tmp4 = r11; 4865 4866 BLOCK_COMMENT("Entry:"); 4867 __ enter(); // required for proper stackwalking of RuntimeStub frame 4868 4869 #ifdef _WIN64 4870 __ push(rsi); 4871 __ push(rdi); 4872 // save the xmm registers which must be preserved 6-7 4873 __ subptr(rsp, 4 * wordSize); 4874 __ movdqu(Address(rsp, 0), xmm6); 4875 __ movdqu(Address(rsp, 2 * wordSize), xmm7); 4876 #endif 4877 __ fast_cos(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 4878 4879 #ifdef _WIN64 4880 // restore xmm regs belonging to calling function 4881 __ movdqu(xmm6, Address(rsp, 0)); 4882 __ movdqu(xmm7, Address(rsp, 2 * wordSize)); 4883 __ addptr(rsp, 4 * wordSize); 4884 __ pop(rdi); 4885 __ pop(rsi); 4886 #endif 4887 4888 __ leave(); // required for proper stackwalking of RuntimeStub frame 4889 __ ret(0); 4890 4891 return start; 4892 4893 } 4894 4895 address generate_libmTan() { 4896 address start = __ pc(); 4897 4898 const XMMRegister x0 = xmm0; 4899 const XMMRegister x1 = xmm1; 4900 const XMMRegister x2 = xmm2; 4901 const XMMRegister x3 = xmm3; 4902 4903 const XMMRegister x4 = xmm4; 4904 const XMMRegister x5 = xmm5; 4905 const XMMRegister x6 = xmm6; 4906 const XMMRegister x7 = xmm7; 4907 4908 const Register tmp1 = r8; 4909 const Register tmp2 = r9; 4910 const Register tmp3 = r10; 4911 const Register tmp4 = r11; 4912 4913 BLOCK_COMMENT("Entry:"); 4914 __ enter(); // required for proper stackwalking of RuntimeStub frame 4915 4916 #ifdef _WIN64 4917 __ push(rsi); 4918 __ push(rdi); 4919 // save the xmm registers which must be preserved 6-7 4920 __ subptr(rsp, 4 * wordSize); 4921 __ movdqu(Address(rsp, 0), xmm6); 4922 __ movdqu(Address(rsp, 2 * wordSize), xmm7); 4923 #endif 4924 __ fast_tan(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 4925 4926 #ifdef _WIN64 4927 // restore xmm regs belonging to calling function 4928 __ movdqu(xmm6, Address(rsp, 0)); 4929 __ movdqu(xmm7, Address(rsp, 2 * wordSize)); 4930 __ addptr(rsp, 4 * wordSize); 4931 __ pop(rdi); 4932 __ pop(rsi); 4933 #endif 4934 4935 __ leave(); // required for proper stackwalking of RuntimeStub frame 4936 __ ret(0); 4937 4938 return start; 4939 4940 } 4941 4942 #undef __ 4943 #define __ masm-> 4944 4945 // Continuation point for throwing of implicit exceptions that are 4946 // not handled in the current activation. Fabricates an exception 4947 // oop and initiates normal exception dispatching in this 4948 // frame. Since we need to preserve callee-saved values (currently 4949 // only for C2, but done for C1 as well) we need a callee-saved oop 4950 // map and therefore have to make these stubs into RuntimeStubs 4951 // rather than BufferBlobs. If the compiler needs all registers to 4952 // be preserved between the fault point and the exception handler 4953 // then it must assume responsibility for that in 4954 // AbstractCompiler::continuation_for_implicit_null_exception or 4955 // continuation_for_implicit_division_by_zero_exception. All other 4956 // implicit exceptions (e.g., NullPointerException or 4957 // AbstractMethodError on entry) are either at call sites or 4958 // otherwise assume that stack unwinding will be initiated, so 4959 // caller saved registers were assumed volatile in the compiler. 4960 address generate_throw_exception(const char* name, 4961 address runtime_entry, 4962 Register arg1 = noreg, 4963 Register arg2 = noreg) { 4964 // Information about frame layout at time of blocking runtime call. 4965 // Note that we only have to preserve callee-saved registers since 4966 // the compilers are responsible for supplying a continuation point 4967 // if they expect all registers to be preserved. 4968 enum layout { 4969 rbp_off = frame::arg_reg_save_area_bytes/BytesPerInt, 4970 rbp_off2, 4971 return_off, 4972 return_off2, 4973 framesize // inclusive of return address 4974 }; 4975 4976 int insts_size = 512; 4977 int locs_size = 64; 4978 4979 CodeBuffer code(name, insts_size, locs_size); 4980 OopMapSet* oop_maps = new OopMapSet(); 4981 MacroAssembler* masm = new MacroAssembler(&code); 4982 4983 address start = __ pc(); 4984 4985 // This is an inlined and slightly modified version of call_VM 4986 // which has the ability to fetch the return PC out of 4987 // thread-local storage and also sets up last_Java_sp slightly 4988 // differently than the real call_VM 4989 4990 __ enter(); // required for proper stackwalking of RuntimeStub frame 4991 4992 assert(is_even(framesize/2), "sp not 16-byte aligned"); 4993 4994 // return address and rbp are already in place 4995 __ subptr(rsp, (framesize-4) << LogBytesPerInt); // prolog 4996 4997 int frame_complete = __ pc() - start; 4998 4999 // Set up last_Java_sp and last_Java_fp 5000 address the_pc = __ pc(); 5001 __ set_last_Java_frame(rsp, rbp, the_pc); 5002 __ andptr(rsp, -(StackAlignmentInBytes)); // Align stack 5003 5004 // Call runtime 5005 if (arg1 != noreg) { 5006 assert(arg2 != c_rarg1, "clobbered"); 5007 __ movptr(c_rarg1, arg1); 5008 } 5009 if (arg2 != noreg) { 5010 __ movptr(c_rarg2, arg2); 5011 } 5012 __ movptr(c_rarg0, r15_thread); 5013 BLOCK_COMMENT("call runtime_entry"); 5014 __ call(RuntimeAddress(runtime_entry)); 5015 5016 // Generate oop map 5017 OopMap* map = new OopMap(framesize, 0); 5018 5019 oop_maps->add_gc_map(the_pc - start, map); 5020 5021 __ reset_last_Java_frame(true, true); 5022 5023 __ leave(); // required for proper stackwalking of RuntimeStub frame 5024 5025 // check for pending exceptions 5026 #ifdef ASSERT 5027 Label L; 5028 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), 5029 (int32_t) NULL_WORD); 5030 __ jcc(Assembler::notEqual, L); 5031 __ should_not_reach_here(); 5032 __ bind(L); 5033 #endif // ASSERT 5034 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 5035 5036 5037 // codeBlob framesize is in words (not VMRegImpl::slot_size) 5038 RuntimeStub* stub = 5039 RuntimeStub::new_runtime_stub(name, 5040 &code, 5041 frame_complete, 5042 (framesize >> (LogBytesPerWord - LogBytesPerInt)), 5043 oop_maps, false); 5044 return stub->entry_point(); 5045 } 5046 5047 void create_control_words() { 5048 // Round to nearest, 53-bit mode, exceptions masked 5049 StubRoutines::_fpu_cntrl_wrd_std = 0x027F; 5050 // Round to zero, 53-bit mode, exception mased 5051 StubRoutines::_fpu_cntrl_wrd_trunc = 0x0D7F; 5052 // Round to nearest, 24-bit mode, exceptions masked 5053 StubRoutines::_fpu_cntrl_wrd_24 = 0x007F; 5054 // Round to nearest, 64-bit mode, exceptions masked 5055 StubRoutines::_fpu_cntrl_wrd_64 = 0x037F; 5056 // Round to nearest, 64-bit mode, exceptions masked 5057 StubRoutines::_mxcsr_std = 0x1F80; 5058 // Note: the following two constants are 80-bit values 5059 // layout is critical for correct loading by FPU. 5060 // Bias for strict fp multiply/divide 5061 StubRoutines::_fpu_subnormal_bias1[0]= 0x00000000; // 2^(-15360) == 0x03ff 8000 0000 0000 0000 5062 StubRoutines::_fpu_subnormal_bias1[1]= 0x80000000; 5063 StubRoutines::_fpu_subnormal_bias1[2]= 0x03ff; 5064 // Un-Bias for strict fp multiply/divide 5065 StubRoutines::_fpu_subnormal_bias2[0]= 0x00000000; // 2^(+15360) == 0x7bff 8000 0000 0000 0000 5066 StubRoutines::_fpu_subnormal_bias2[1]= 0x80000000; 5067 StubRoutines::_fpu_subnormal_bias2[2]= 0x7bff; 5068 } 5069 5070 // Initialization 5071 void generate_initial() { 5072 // Generates all stubs and initializes the entry points 5073 5074 // This platform-specific settings are needed by generate_call_stub() 5075 create_control_words(); 5076 5077 // entry points that exist in all platforms Note: This is code 5078 // that could be shared among different platforms - however the 5079 // benefit seems to be smaller than the disadvantage of having a 5080 // much more complicated generator structure. See also comment in 5081 // stubRoutines.hpp. 5082 5083 StubRoutines::_forward_exception_entry = generate_forward_exception(); 5084 5085 StubRoutines::_call_stub_entry = 5086 generate_call_stub(StubRoutines::_call_stub_return_address); 5087 5088 // is referenced by megamorphic call 5089 StubRoutines::_catch_exception_entry = generate_catch_exception(); 5090 5091 // atomic calls 5092 StubRoutines::_atomic_xchg_entry = generate_atomic_xchg(); 5093 StubRoutines::_atomic_xchg_ptr_entry = generate_atomic_xchg_ptr(); 5094 StubRoutines::_atomic_cmpxchg_entry = generate_atomic_cmpxchg(); 5095 StubRoutines::_atomic_cmpxchg_byte_entry = generate_atomic_cmpxchg_byte(); 5096 StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long(); 5097 StubRoutines::_atomic_add_entry = generate_atomic_add(); 5098 StubRoutines::_atomic_add_ptr_entry = generate_atomic_add_ptr(); 5099 StubRoutines::_fence_entry = generate_orderaccess_fence(); 5100 5101 // platform dependent 5102 StubRoutines::x86::_get_previous_fp_entry = generate_get_previous_fp(); 5103 StubRoutines::x86::_get_previous_sp_entry = generate_get_previous_sp(); 5104 5105 StubRoutines::x86::_verify_mxcsr_entry = generate_verify_mxcsr(); 5106 5107 // Build this early so it's available for the interpreter. 5108 StubRoutines::_throw_StackOverflowError_entry = 5109 generate_throw_exception("StackOverflowError throw_exception", 5110 CAST_FROM_FN_PTR(address, 5111 SharedRuntime:: 5112 throw_StackOverflowError)); 5113 StubRoutines::_throw_delayed_StackOverflowError_entry = 5114 generate_throw_exception("delayed StackOverflowError throw_exception", 5115 CAST_FROM_FN_PTR(address, 5116 SharedRuntime:: 5117 throw_delayed_StackOverflowError)); 5118 if (UseCRC32Intrinsics) { 5119 // set table address before stub generation which use it 5120 StubRoutines::_crc_table_adr = (address)StubRoutines::x86::_crc_table; 5121 StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32(); 5122 } 5123 5124 if (UseCRC32CIntrinsics) { 5125 bool supports_clmul = VM_Version::supports_clmul(); 5126 StubRoutines::x86::generate_CRC32C_table(supports_clmul); 5127 StubRoutines::_crc32c_table_addr = (address)StubRoutines::x86::_crc32c_table; 5128 StubRoutines::_updateBytesCRC32C = generate_updateBytesCRC32C(supports_clmul); 5129 } 5130 if (VM_Version::supports_sse2() && UseLibmIntrinsic) { 5131 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin) || 5132 vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos) || 5133 vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dtan)) { 5134 StubRoutines::x86::_ONEHALF_adr = (address)StubRoutines::x86::_ONEHALF; 5135 StubRoutines::x86::_P_2_adr = (address)StubRoutines::x86::_P_2; 5136 StubRoutines::x86::_SC_4_adr = (address)StubRoutines::x86::_SC_4; 5137 StubRoutines::x86::_Ctable_adr = (address)StubRoutines::x86::_Ctable; 5138 StubRoutines::x86::_SC_2_adr = (address)StubRoutines::x86::_SC_2; 5139 StubRoutines::x86::_SC_3_adr = (address)StubRoutines::x86::_SC_3; 5140 StubRoutines::x86::_SC_1_adr = (address)StubRoutines::x86::_SC_1; 5141 StubRoutines::x86::_PI_INV_TABLE_adr = (address)StubRoutines::x86::_PI_INV_TABLE; 5142 StubRoutines::x86::_PI_4_adr = (address)StubRoutines::x86::_PI_4; 5143 StubRoutines::x86::_PI32INV_adr = (address)StubRoutines::x86::_PI32INV; 5144 StubRoutines::x86::_SIGN_MASK_adr = (address)StubRoutines::x86::_SIGN_MASK; 5145 StubRoutines::x86::_P_1_adr = (address)StubRoutines::x86::_P_1; 5146 StubRoutines::x86::_P_3_adr = (address)StubRoutines::x86::_P_3; 5147 StubRoutines::x86::_NEG_ZERO_adr = (address)StubRoutines::x86::_NEG_ZERO; 5148 } 5149 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dexp)) { 5150 StubRoutines::_dexp = generate_libmExp(); 5151 } 5152 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dlog)) { 5153 StubRoutines::_dlog = generate_libmLog(); 5154 } 5155 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dlog10)) { 5156 StubRoutines::_dlog10 = generate_libmLog10(); 5157 } 5158 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dpow)) { 5159 StubRoutines::_dpow = generate_libmPow(); 5160 } 5161 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin)) { 5162 StubRoutines::_dsin = generate_libmSin(); 5163 } 5164 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos)) { 5165 StubRoutines::_dcos = generate_libmCos(); 5166 } 5167 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dtan)) { 5168 StubRoutines::_dtan = generate_libmTan(); 5169 } 5170 } 5171 } 5172 5173 void generate_all() { 5174 // Generates all stubs and initializes the entry points 5175 5176 // These entry points require SharedInfo::stack0 to be set up in 5177 // non-core builds and need to be relocatable, so they each 5178 // fabricate a RuntimeStub internally. 5179 StubRoutines::_throw_AbstractMethodError_entry = 5180 generate_throw_exception("AbstractMethodError throw_exception", 5181 CAST_FROM_FN_PTR(address, 5182 SharedRuntime:: 5183 throw_AbstractMethodError)); 5184 5185 StubRoutines::_throw_IncompatibleClassChangeError_entry = 5186 generate_throw_exception("IncompatibleClassChangeError throw_exception", 5187 CAST_FROM_FN_PTR(address, 5188 SharedRuntime:: 5189 throw_IncompatibleClassChangeError)); 5190 5191 StubRoutines::_throw_NullPointerException_at_call_entry = 5192 generate_throw_exception("NullPointerException at call throw_exception", 5193 CAST_FROM_FN_PTR(address, 5194 SharedRuntime:: 5195 throw_NullPointerException_at_call)); 5196 5197 // entry points that are platform specific 5198 StubRoutines::x86::_f2i_fixup = generate_f2i_fixup(); 5199 StubRoutines::x86::_f2l_fixup = generate_f2l_fixup(); 5200 StubRoutines::x86::_d2i_fixup = generate_d2i_fixup(); 5201 StubRoutines::x86::_d2l_fixup = generate_d2l_fixup(); 5202 5203 StubRoutines::x86::_float_sign_mask = generate_fp_mask("float_sign_mask", 0x7FFFFFFF7FFFFFFF); 5204 StubRoutines::x86::_float_sign_flip = generate_fp_mask("float_sign_flip", 0x8000000080000000); 5205 StubRoutines::x86::_double_sign_mask = generate_fp_mask("double_sign_mask", 0x7FFFFFFFFFFFFFFF); 5206 StubRoutines::x86::_double_sign_flip = generate_fp_mask("double_sign_flip", 0x8000000000000000); 5207 5208 // support for verify_oop (must happen after universe_init) 5209 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop(); 5210 5211 // arraycopy stubs used by compilers 5212 generate_arraycopy_stubs(); 5213 5214 // don't bother generating these AES intrinsic stubs unless global flag is set 5215 if (UseAESIntrinsics) { 5216 StubRoutines::x86::_key_shuffle_mask_addr = generate_key_shuffle_mask(); // needed by the others 5217 StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock(); 5218 StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock(); 5219 StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt(); 5220 StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt_Parallel(); 5221 } 5222 if (UseAESCTRIntrinsics){ 5223 StubRoutines::x86::_counter_shuffle_mask_addr = generate_counter_shuffle_mask(); 5224 StubRoutines::_counterMode_AESCrypt = generate_counterMode_AESCrypt_Parallel(); 5225 } 5226 5227 if (UseSHA1Intrinsics) { 5228 StubRoutines::x86::_upper_word_mask_addr = generate_upper_word_mask(); 5229 StubRoutines::x86::_shuffle_byte_flip_mask_addr = generate_shuffle_byte_flip_mask(); 5230 StubRoutines::_sha1_implCompress = generate_sha1_implCompress(false, "sha1_implCompress"); 5231 StubRoutines::_sha1_implCompressMB = generate_sha1_implCompress(true, "sha1_implCompressMB"); 5232 } 5233 if (UseSHA256Intrinsics) { 5234 StubRoutines::x86::_k256_adr = (address)StubRoutines::x86::_k256; 5235 char* dst = (char*)StubRoutines::x86::_k256_W; 5236 char* src = (char*)StubRoutines::x86::_k256; 5237 for (int ii = 0; ii < 16; ++ii) { 5238 memcpy(dst + 32 * ii, src + 16 * ii, 16); 5239 memcpy(dst + 32 * ii + 16, src + 16 * ii, 16); 5240 } 5241 StubRoutines::x86::_k256_W_adr = (address)StubRoutines::x86::_k256_W; 5242 StubRoutines::x86::_pshuffle_byte_flip_mask_addr = generate_pshuffle_byte_flip_mask(); 5243 StubRoutines::_sha256_implCompress = generate_sha256_implCompress(false, "sha256_implCompress"); 5244 StubRoutines::_sha256_implCompressMB = generate_sha256_implCompress(true, "sha256_implCompressMB"); 5245 } 5246 5247 // Generate GHASH intrinsics code 5248 if (UseGHASHIntrinsics) { 5249 StubRoutines::x86::_ghash_long_swap_mask_addr = generate_ghash_long_swap_mask(); 5250 StubRoutines::x86::_ghash_byte_swap_mask_addr = generate_ghash_byte_swap_mask(); 5251 StubRoutines::_ghash_processBlocks = generate_ghash_processBlocks(); 5252 } 5253 5254 // Safefetch stubs. 5255 generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry, 5256 &StubRoutines::_safefetch32_fault_pc, 5257 &StubRoutines::_safefetch32_continuation_pc); 5258 generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry, 5259 &StubRoutines::_safefetchN_fault_pc, 5260 &StubRoutines::_safefetchN_continuation_pc); 5261 #ifdef COMPILER2 5262 if (UseMultiplyToLenIntrinsic) { 5263 StubRoutines::_multiplyToLen = generate_multiplyToLen(); 5264 } 5265 if (UseSquareToLenIntrinsic) { 5266 StubRoutines::_squareToLen = generate_squareToLen(); 5267 } 5268 if (UseMulAddIntrinsic) { 5269 StubRoutines::_mulAdd = generate_mulAdd(); 5270 } 5271 #ifndef _WINDOWS 5272 if (UseMontgomeryMultiplyIntrinsic) { 5273 StubRoutines::_montgomeryMultiply 5274 = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_multiply); 5275 } 5276 if (UseMontgomerySquareIntrinsic) { 5277 StubRoutines::_montgomerySquare 5278 = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_square); 5279 } 5280 #endif // WINDOWS 5281 #endif // COMPILER2 5282 5283 if (UseVectorizedMismatchIntrinsic) { 5284 StubRoutines::_vectorizedMismatch = generate_vectorizedMismatch(); 5285 } 5286 } 5287 5288 public: 5289 StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) { 5290 if (all) { 5291 generate_all(); 5292 } else { 5293 generate_initial(); 5294 } 5295 } 5296 }; // end class declaration 5297 5298 void StubGenerator_generate(CodeBuffer* code, bool all) { 5299 StubGenerator g(code, all); 5300 }