1 /* 2 * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "interpreter/interpreter.hpp" 29 #include "nativeInst_x86.hpp" 30 #include "oops/instanceOop.hpp" 31 #include "oops/method.hpp" 32 #include "oops/objArrayKlass.hpp" 33 #include "oops/oop.inline.hpp" 34 #include "prims/methodHandles.hpp" 35 #include "runtime/frame.inline.hpp" 36 #include "runtime/handles.inline.hpp" 37 #include "runtime/sharedRuntime.hpp" 38 #include "runtime/stubCodeGenerator.hpp" 39 #include "runtime/stubRoutines.hpp" 40 #include "runtime/thread.inline.hpp" 41 #ifdef COMPILER2 42 #include "opto/runtime.hpp" 43 #endif 44 45 // Declaration and definition of StubGenerator (no .hpp file). 46 // For a more detailed description of the stub routine structure 47 // see the comment in stubRoutines.hpp 48 49 #define __ _masm-> 50 #define TIMES_OOP (UseCompressedOops ? Address::times_4 : Address::times_8) 51 #define a__ ((Assembler*)_masm)-> 52 53 #ifdef PRODUCT 54 #define BLOCK_COMMENT(str) /* nothing */ 55 #else 56 #define BLOCK_COMMENT(str) __ block_comment(str) 57 #endif 58 59 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 60 const int MXCSR_MASK = 0xFFC0; // Mask out any pending exceptions 61 62 // Stub Code definitions 63 64 class StubGenerator: public StubCodeGenerator { 65 private: 66 67 #ifdef PRODUCT 68 #define inc_counter_np(counter) ((void)0) 69 #else 70 void inc_counter_np_(int& counter) { 71 // This can destroy rscratch1 if counter is far from the code cache 72 __ incrementl(ExternalAddress((address)&counter)); 73 } 74 #define inc_counter_np(counter) \ 75 BLOCK_COMMENT("inc_counter " #counter); \ 76 inc_counter_np_(counter); 77 #endif 78 79 // Call stubs are used to call Java from C 80 // 81 // Linux Arguments: 82 // c_rarg0: call wrapper address address 83 // c_rarg1: result address 84 // c_rarg2: result type BasicType 85 // c_rarg3: method Method* 86 // c_rarg4: (interpreter) entry point address 87 // c_rarg5: parameters intptr_t* 88 // 16(rbp): parameter size (in words) int 89 // 24(rbp): thread Thread* 90 // 91 // [ return_from_Java ] <--- rsp 92 // [ argument word n ] 93 // ... 94 // -12 [ argument word 1 ] 95 // -11 [ saved r15 ] <--- rsp_after_call 96 // -10 [ saved r14 ] 97 // -9 [ saved r13 ] 98 // -8 [ saved r12 ] 99 // -7 [ saved rbx ] 100 // -6 [ call wrapper ] 101 // -5 [ result ] 102 // -4 [ result type ] 103 // -3 [ method ] 104 // -2 [ entry point ] 105 // -1 [ parameters ] 106 // 0 [ saved rbp ] <--- rbp 107 // 1 [ return address ] 108 // 2 [ parameter size ] 109 // 3 [ thread ] 110 // 111 // Windows Arguments: 112 // c_rarg0: call wrapper address address 113 // c_rarg1: result address 114 // c_rarg2: result type BasicType 115 // c_rarg3: method Method* 116 // 48(rbp): (interpreter) entry point address 117 // 56(rbp): parameters intptr_t* 118 // 64(rbp): parameter size (in words) int 119 // 72(rbp): thread Thread* 120 // 121 // [ return_from_Java ] <--- rsp 122 // [ argument word n ] 123 // ... 124 // -60 [ argument word 1 ] 125 // -59 [ saved xmm31 ] <--- rsp after_call 126 // [ saved xmm16-xmm30 ] (EVEX enabled, else the space is blank) 127 // -27 [ saved xmm15 ] 128 // [ saved xmm7-xmm14 ] 129 // -9 [ saved xmm6 ] (each xmm register takes 2 slots) 130 // -7 [ saved r15 ] 131 // -6 [ saved r14 ] 132 // -5 [ saved r13 ] 133 // -4 [ saved r12 ] 134 // -3 [ saved rdi ] 135 // -2 [ saved rsi ] 136 // -1 [ saved rbx ] 137 // 0 [ saved rbp ] <--- rbp 138 // 1 [ return address ] 139 // 2 [ call wrapper ] 140 // 3 [ result ] 141 // 4 [ result type ] 142 // 5 [ method ] 143 // 6 [ entry point ] 144 // 7 [ parameters ] 145 // 8 [ parameter size ] 146 // 9 [ thread ] 147 // 148 // Windows reserves the callers stack space for arguments 1-4. 149 // We spill c_rarg0-c_rarg3 to this space. 150 151 // Call stub stack layout word offsets from rbp 152 enum call_stub_layout { 153 #ifdef _WIN64 154 xmm_save_first = 6, // save from xmm6 155 xmm_save_last = 31, // to xmm31 156 xmm_save_base = -9, 157 rsp_after_call_off = xmm_save_base - 2 * (xmm_save_last - xmm_save_first), // -27 158 r15_off = -7, 159 r14_off = -6, 160 r13_off = -5, 161 r12_off = -4, 162 rdi_off = -3, 163 rsi_off = -2, 164 rbx_off = -1, 165 rbp_off = 0, 166 retaddr_off = 1, 167 call_wrapper_off = 2, 168 result_off = 3, 169 result_type_off = 4, 170 method_off = 5, 171 entry_point_off = 6, 172 parameters_off = 7, 173 parameter_size_off = 8, 174 thread_off = 9 175 #else 176 rsp_after_call_off = -12, 177 mxcsr_off = rsp_after_call_off, 178 r15_off = -11, 179 r14_off = -10, 180 r13_off = -9, 181 r12_off = -8, 182 rbx_off = -7, 183 call_wrapper_off = -6, 184 result_off = -5, 185 result_type_off = -4, 186 method_off = -3, 187 entry_point_off = -2, 188 parameters_off = -1, 189 rbp_off = 0, 190 retaddr_off = 1, 191 parameter_size_off = 2, 192 thread_off = 3 193 #endif 194 }; 195 196 #ifdef _WIN64 197 Address xmm_save(int reg) { 198 assert(reg >= xmm_save_first && reg <= xmm_save_last, "XMM register number out of range"); 199 return Address(rbp, (xmm_save_base - (reg - xmm_save_first) * 2) * wordSize); 200 } 201 #endif 202 203 address generate_call_stub(address& return_address) { 204 assert((int)frame::entry_frame_after_call_words == -(int)rsp_after_call_off + 1 && 205 (int)frame::entry_frame_call_wrapper_offset == (int)call_wrapper_off, 206 "adjust this code"); 207 StubCodeMark mark(this, "StubRoutines", "call_stub"); 208 address start = __ pc(); 209 210 // same as in generate_catch_exception()! 211 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize); 212 213 const Address call_wrapper (rbp, call_wrapper_off * wordSize); 214 const Address result (rbp, result_off * wordSize); 215 const Address result_type (rbp, result_type_off * wordSize); 216 const Address method (rbp, method_off * wordSize); 217 const Address entry_point (rbp, entry_point_off * wordSize); 218 const Address parameters (rbp, parameters_off * wordSize); 219 const Address parameter_size(rbp, parameter_size_off * wordSize); 220 221 // same as in generate_catch_exception()! 222 const Address thread (rbp, thread_off * wordSize); 223 224 const Address r15_save(rbp, r15_off * wordSize); 225 const Address r14_save(rbp, r14_off * wordSize); 226 const Address r13_save(rbp, r13_off * wordSize); 227 const Address r12_save(rbp, r12_off * wordSize); 228 const Address rbx_save(rbp, rbx_off * wordSize); 229 230 // stub code 231 __ enter(); 232 __ subptr(rsp, -rsp_after_call_off * wordSize); 233 234 // save register parameters 235 #ifndef _WIN64 236 __ movptr(parameters, c_rarg5); // parameters 237 __ movptr(entry_point, c_rarg4); // entry_point 238 #endif 239 240 __ movptr(method, c_rarg3); // method 241 __ movl(result_type, c_rarg2); // result type 242 __ movptr(result, c_rarg1); // result 243 __ movptr(call_wrapper, c_rarg0); // call wrapper 244 245 // save regs belonging to calling function 246 __ movptr(rbx_save, rbx); 247 __ movptr(r12_save, r12); 248 __ movptr(r13_save, r13); 249 __ movptr(r14_save, r14); 250 __ movptr(r15_save, r15); 251 if (UseAVX > 2) { 252 __ movl(rbx, 0xffff); 253 __ kmovwl(k1, rbx); 254 } 255 #ifdef _WIN64 256 int last_reg = 15; 257 if (UseAVX > 2) { 258 last_reg = 31; 259 } 260 if (VM_Version::supports_evex()) { 261 for (int i = xmm_save_first; i <= last_reg; i++) { 262 __ vextractf32x4(xmm_save(i), as_XMMRegister(i), 0); 263 } 264 } else { 265 for (int i = xmm_save_first; i <= last_reg; i++) { 266 __ movdqu(xmm_save(i), as_XMMRegister(i)); 267 } 268 } 269 270 const Address rdi_save(rbp, rdi_off * wordSize); 271 const Address rsi_save(rbp, rsi_off * wordSize); 272 273 __ movptr(rsi_save, rsi); 274 __ movptr(rdi_save, rdi); 275 #else 276 const Address mxcsr_save(rbp, mxcsr_off * wordSize); 277 { 278 Label skip_ldmx; 279 __ stmxcsr(mxcsr_save); 280 __ movl(rax, mxcsr_save); 281 __ andl(rax, MXCSR_MASK); // Only check control and mask bits 282 ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std()); 283 __ cmp32(rax, mxcsr_std); 284 __ jcc(Assembler::equal, skip_ldmx); 285 __ ldmxcsr(mxcsr_std); 286 __ bind(skip_ldmx); 287 } 288 #endif 289 290 // Load up thread register 291 __ movptr(r15_thread, thread); 292 __ reinit_heapbase(); 293 294 #ifdef ASSERT 295 // make sure we have no pending exceptions 296 { 297 Label L; 298 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 299 __ jcc(Assembler::equal, L); 300 __ stop("StubRoutines::call_stub: entered with pending exception"); 301 __ bind(L); 302 } 303 #endif 304 305 // pass parameters if any 306 BLOCK_COMMENT("pass parameters if any"); 307 Label parameters_done; 308 __ movl(c_rarg3, parameter_size); 309 __ testl(c_rarg3, c_rarg3); 310 __ jcc(Assembler::zero, parameters_done); 311 312 Label loop; 313 __ movptr(c_rarg2, parameters); // parameter pointer 314 __ movl(c_rarg1, c_rarg3); // parameter counter is in c_rarg1 315 __ BIND(loop); 316 __ movptr(rax, Address(c_rarg2, 0));// get parameter 317 __ addptr(c_rarg2, wordSize); // advance to next parameter 318 __ decrementl(c_rarg1); // decrement counter 319 __ push(rax); // pass parameter 320 __ jcc(Assembler::notZero, loop); 321 322 // call Java function 323 __ BIND(parameters_done); 324 __ movptr(rbx, method); // get Method* 325 __ movptr(c_rarg1, entry_point); // get entry_point 326 __ mov(r13, rsp); // set sender sp 327 BLOCK_COMMENT("call Java function"); 328 __ call(c_rarg1); 329 330 BLOCK_COMMENT("call_stub_return_address:"); 331 return_address = __ pc(); 332 333 // store result depending on type (everything that is not 334 // T_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT) 335 __ movptr(c_rarg0, result); 336 Label is_long, is_float, is_double, exit; 337 __ movl(c_rarg1, result_type); 338 __ cmpl(c_rarg1, T_OBJECT); 339 __ jcc(Assembler::equal, is_long); 340 __ cmpl(c_rarg1, T_LONG); 341 __ jcc(Assembler::equal, is_long); 342 __ cmpl(c_rarg1, T_FLOAT); 343 __ jcc(Assembler::equal, is_float); 344 __ cmpl(c_rarg1, T_DOUBLE); 345 __ jcc(Assembler::equal, is_double); 346 347 // handle T_INT case 348 __ movl(Address(c_rarg0, 0), rax); 349 350 __ BIND(exit); 351 352 // pop parameters 353 __ lea(rsp, rsp_after_call); 354 355 #ifdef ASSERT 356 // verify that threads correspond 357 { 358 Label L1, L2, L3; 359 __ cmpptr(r15_thread, thread); 360 __ jcc(Assembler::equal, L1); 361 __ stop("StubRoutines::call_stub: r15_thread is corrupted"); 362 __ bind(L1); 363 __ get_thread(rbx); 364 __ cmpptr(r15_thread, thread); 365 __ jcc(Assembler::equal, L2); 366 __ stop("StubRoutines::call_stub: r15_thread is modified by call"); 367 __ bind(L2); 368 __ cmpptr(r15_thread, rbx); 369 __ jcc(Assembler::equal, L3); 370 __ stop("StubRoutines::call_stub: threads must correspond"); 371 __ bind(L3); 372 } 373 #endif 374 375 // restore regs belonging to calling function 376 #ifdef _WIN64 377 // emit the restores for xmm regs 378 if (VM_Version::supports_evex()) { 379 for (int i = xmm_save_first; i <= last_reg; i++) { 380 __ vinsertf32x4(as_XMMRegister(i), as_XMMRegister(i), xmm_save(i), 0); 381 } 382 } else { 383 for (int i = xmm_save_first; i <= last_reg; i++) { 384 __ movdqu(as_XMMRegister(i), xmm_save(i)); 385 } 386 } 387 #endif 388 __ movptr(r15, r15_save); 389 __ movptr(r14, r14_save); 390 __ movptr(r13, r13_save); 391 __ movptr(r12, r12_save); 392 __ movptr(rbx, rbx_save); 393 394 #ifdef _WIN64 395 __ movptr(rdi, rdi_save); 396 __ movptr(rsi, rsi_save); 397 #else 398 __ ldmxcsr(mxcsr_save); 399 #endif 400 401 // restore rsp 402 __ addptr(rsp, -rsp_after_call_off * wordSize); 403 404 // return 405 __ vzeroupper(); 406 __ pop(rbp); 407 __ ret(0); 408 409 // handle return types different from T_INT 410 __ BIND(is_long); 411 __ movq(Address(c_rarg0, 0), rax); 412 __ jmp(exit); 413 414 __ BIND(is_float); 415 __ movflt(Address(c_rarg0, 0), xmm0); 416 __ jmp(exit); 417 418 __ BIND(is_double); 419 __ movdbl(Address(c_rarg0, 0), xmm0); 420 __ jmp(exit); 421 422 return start; 423 } 424 425 // Return point for a Java call if there's an exception thrown in 426 // Java code. The exception is caught and transformed into a 427 // pending exception stored in JavaThread that can be tested from 428 // within the VM. 429 // 430 // Note: Usually the parameters are removed by the callee. In case 431 // of an exception crossing an activation frame boundary, that is 432 // not the case if the callee is compiled code => need to setup the 433 // rsp. 434 // 435 // rax: exception oop 436 437 address generate_catch_exception() { 438 StubCodeMark mark(this, "StubRoutines", "catch_exception"); 439 address start = __ pc(); 440 441 // same as in generate_call_stub(): 442 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize); 443 const Address thread (rbp, thread_off * wordSize); 444 445 #ifdef ASSERT 446 // verify that threads correspond 447 { 448 Label L1, L2, L3; 449 __ cmpptr(r15_thread, thread); 450 __ jcc(Assembler::equal, L1); 451 __ stop("StubRoutines::catch_exception: r15_thread is corrupted"); 452 __ bind(L1); 453 __ get_thread(rbx); 454 __ cmpptr(r15_thread, thread); 455 __ jcc(Assembler::equal, L2); 456 __ stop("StubRoutines::catch_exception: r15_thread is modified by call"); 457 __ bind(L2); 458 __ cmpptr(r15_thread, rbx); 459 __ jcc(Assembler::equal, L3); 460 __ stop("StubRoutines::catch_exception: threads must correspond"); 461 __ bind(L3); 462 } 463 #endif 464 465 // set pending exception 466 __ verify_oop(rax); 467 468 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), rax); 469 __ lea(rscratch1, ExternalAddress((address)__FILE__)); 470 __ movptr(Address(r15_thread, Thread::exception_file_offset()), rscratch1); 471 __ movl(Address(r15_thread, Thread::exception_line_offset()), (int) __LINE__); 472 473 // complete return to VM 474 assert(StubRoutines::_call_stub_return_address != NULL, 475 "_call_stub_return_address must have been generated before"); 476 __ jump(RuntimeAddress(StubRoutines::_call_stub_return_address)); 477 478 return start; 479 } 480 481 // Continuation point for runtime calls returning with a pending 482 // exception. The pending exception check happened in the runtime 483 // or native call stub. The pending exception in Thread is 484 // converted into a Java-level exception. 485 // 486 // Contract with Java-level exception handlers: 487 // rax: exception 488 // rdx: throwing pc 489 // 490 // NOTE: At entry of this stub, exception-pc must be on stack !! 491 492 address generate_forward_exception() { 493 StubCodeMark mark(this, "StubRoutines", "forward exception"); 494 address start = __ pc(); 495 496 // Upon entry, the sp points to the return address returning into 497 // Java (interpreted or compiled) code; i.e., the return address 498 // becomes the throwing pc. 499 // 500 // Arguments pushed before the runtime call are still on the stack 501 // but the exception handler will reset the stack pointer -> 502 // ignore them. A potential result in registers can be ignored as 503 // well. 504 505 #ifdef ASSERT 506 // make sure this code is only executed if there is a pending exception 507 { 508 Label L; 509 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t) NULL); 510 __ jcc(Assembler::notEqual, L); 511 __ stop("StubRoutines::forward exception: no pending exception (1)"); 512 __ bind(L); 513 } 514 #endif 515 516 // compute exception handler into rbx 517 __ movptr(c_rarg0, Address(rsp, 0)); 518 BLOCK_COMMENT("call exception_handler_for_return_address"); 519 __ call_VM_leaf(CAST_FROM_FN_PTR(address, 520 SharedRuntime::exception_handler_for_return_address), 521 r15_thread, c_rarg0); 522 __ mov(rbx, rax); 523 524 // setup rax & rdx, remove return address & clear pending exception 525 __ pop(rdx); 526 __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset())); 527 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 528 529 #ifdef ASSERT 530 // make sure exception is set 531 { 532 Label L; 533 __ testptr(rax, rax); 534 __ jcc(Assembler::notEqual, L); 535 __ stop("StubRoutines::forward exception: no pending exception (2)"); 536 __ bind(L); 537 } 538 #endif 539 540 // continue at exception handler (return address removed) 541 // rax: exception 542 // rbx: exception handler 543 // rdx: throwing pc 544 __ verify_oop(rax); 545 __ jmp(rbx); 546 547 return start; 548 } 549 550 // Support for jint atomic::xchg(jint exchange_value, volatile jint* dest) 551 // 552 // Arguments : 553 // c_rarg0: exchange_value 554 // c_rarg0: dest 555 // 556 // Result: 557 // *dest <- ex, return (orig *dest) 558 address generate_atomic_xchg() { 559 StubCodeMark mark(this, "StubRoutines", "atomic_xchg"); 560 address start = __ pc(); 561 562 __ movl(rax, c_rarg0); // Copy to eax we need a return value anyhow 563 __ xchgl(rax, Address(c_rarg1, 0)); // automatic LOCK 564 __ ret(0); 565 566 return start; 567 } 568 569 // Support for intptr_t atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) 570 // 571 // Arguments : 572 // c_rarg0: exchange_value 573 // c_rarg1: dest 574 // 575 // Result: 576 // *dest <- ex, return (orig *dest) 577 address generate_atomic_xchg_ptr() { 578 StubCodeMark mark(this, "StubRoutines", "atomic_xchg_ptr"); 579 address start = __ pc(); 580 581 __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow 582 __ xchgptr(rax, Address(c_rarg1, 0)); // automatic LOCK 583 __ ret(0); 584 585 return start; 586 } 587 588 // Support for jint atomic::atomic_cmpxchg(jint exchange_value, volatile jint* dest, 589 // jint compare_value) 590 // 591 // Arguments : 592 // c_rarg0: exchange_value 593 // c_rarg1: dest 594 // c_rarg2: compare_value 595 // 596 // Result: 597 // if ( compare_value == *dest ) { 598 // *dest = exchange_value 599 // return compare_value; 600 // else 601 // return *dest; 602 address generate_atomic_cmpxchg() { 603 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg"); 604 address start = __ pc(); 605 606 __ movl(rax, c_rarg2); 607 if ( os::is_MP() ) __ lock(); 608 __ cmpxchgl(c_rarg0, Address(c_rarg1, 0)); 609 __ ret(0); 610 611 return start; 612 } 613 614 // Support for jbyte atomic::atomic_cmpxchg(jbyte exchange_value, volatile jbyte* dest, 615 // jbyte compare_value) 616 // 617 // Arguments : 618 // c_rarg0: exchange_value 619 // c_rarg1: dest 620 // c_rarg2: compare_value 621 // 622 // Result: 623 // if ( compare_value == *dest ) { 624 // *dest = exchange_value 625 // return compare_value; 626 // else 627 // return *dest; 628 address generate_atomic_cmpxchg_byte() { 629 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_byte"); 630 address start = __ pc(); 631 632 __ movsbq(rax, c_rarg2); 633 if ( os::is_MP() ) __ lock(); 634 __ cmpxchgb(c_rarg0, Address(c_rarg1, 0)); 635 __ ret(0); 636 637 return start; 638 } 639 640 // Support for jlong atomic::atomic_cmpxchg(jlong exchange_value, 641 // volatile jlong* dest, 642 // jlong compare_value) 643 // Arguments : 644 // c_rarg0: exchange_value 645 // c_rarg1: dest 646 // c_rarg2: compare_value 647 // 648 // Result: 649 // if ( compare_value == *dest ) { 650 // *dest = exchange_value 651 // return compare_value; 652 // else 653 // return *dest; 654 address generate_atomic_cmpxchg_long() { 655 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long"); 656 address start = __ pc(); 657 658 __ movq(rax, c_rarg2); 659 if ( os::is_MP() ) __ lock(); 660 __ cmpxchgq(c_rarg0, Address(c_rarg1, 0)); 661 __ ret(0); 662 663 return start; 664 } 665 666 // Support for jint atomic::add(jint add_value, volatile jint* dest) 667 // 668 // Arguments : 669 // c_rarg0: add_value 670 // c_rarg1: dest 671 // 672 // Result: 673 // *dest += add_value 674 // return *dest; 675 address generate_atomic_add() { 676 StubCodeMark mark(this, "StubRoutines", "atomic_add"); 677 address start = __ pc(); 678 679 __ movl(rax, c_rarg0); 680 if ( os::is_MP() ) __ lock(); 681 __ xaddl(Address(c_rarg1, 0), c_rarg0); 682 __ addl(rax, c_rarg0); 683 __ ret(0); 684 685 return start; 686 } 687 688 // Support for intptr_t atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) 689 // 690 // Arguments : 691 // c_rarg0: add_value 692 // c_rarg1: dest 693 // 694 // Result: 695 // *dest += add_value 696 // return *dest; 697 address generate_atomic_add_ptr() { 698 StubCodeMark mark(this, "StubRoutines", "atomic_add_ptr"); 699 address start = __ pc(); 700 701 __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow 702 if ( os::is_MP() ) __ lock(); 703 __ xaddptr(Address(c_rarg1, 0), c_rarg0); 704 __ addptr(rax, c_rarg0); 705 __ ret(0); 706 707 return start; 708 } 709 710 // Support for intptr_t OrderAccess::fence() 711 // 712 // Arguments : 713 // 714 // Result: 715 address generate_orderaccess_fence() { 716 StubCodeMark mark(this, "StubRoutines", "orderaccess_fence"); 717 address start = __ pc(); 718 __ membar(Assembler::StoreLoad); 719 __ ret(0); 720 721 return start; 722 } 723 724 // Support for intptr_t get_previous_fp() 725 // 726 // This routine is used to find the previous frame pointer for the 727 // caller (current_frame_guess). This is used as part of debugging 728 // ps() is seemingly lost trying to find frames. 729 // This code assumes that caller current_frame_guess) has a frame. 730 address generate_get_previous_fp() { 731 StubCodeMark mark(this, "StubRoutines", "get_previous_fp"); 732 const Address old_fp(rbp, 0); 733 const Address older_fp(rax, 0); 734 address start = __ pc(); 735 736 __ enter(); 737 __ movptr(rax, old_fp); // callers fp 738 __ movptr(rax, older_fp); // the frame for ps() 739 __ pop(rbp); 740 __ ret(0); 741 742 return start; 743 } 744 745 // Support for intptr_t get_previous_sp() 746 // 747 // This routine is used to find the previous stack pointer for the 748 // caller. 749 address generate_get_previous_sp() { 750 StubCodeMark mark(this, "StubRoutines", "get_previous_sp"); 751 address start = __ pc(); 752 753 __ movptr(rax, rsp); 754 __ addptr(rax, 8); // return address is at the top of the stack. 755 __ ret(0); 756 757 return start; 758 } 759 760 //---------------------------------------------------------------------------------------------------- 761 // Support for void verify_mxcsr() 762 // 763 // This routine is used with -Xcheck:jni to verify that native 764 // JNI code does not return to Java code without restoring the 765 // MXCSR register to our expected state. 766 767 address generate_verify_mxcsr() { 768 StubCodeMark mark(this, "StubRoutines", "verify_mxcsr"); 769 address start = __ pc(); 770 771 const Address mxcsr_save(rsp, 0); 772 773 if (CheckJNICalls) { 774 Label ok_ret; 775 ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std()); 776 __ push(rax); 777 __ subptr(rsp, wordSize); // allocate a temp location 778 __ stmxcsr(mxcsr_save); 779 __ movl(rax, mxcsr_save); 780 __ andl(rax, MXCSR_MASK); // Only check control and mask bits 781 __ cmp32(rax, mxcsr_std); 782 __ jcc(Assembler::equal, ok_ret); 783 784 __ warn("MXCSR changed by native JNI code, use -XX:+RestoreMXCSROnJNICall"); 785 786 __ ldmxcsr(mxcsr_std); 787 788 __ bind(ok_ret); 789 __ addptr(rsp, wordSize); 790 __ pop(rax); 791 } 792 793 __ ret(0); 794 795 return start; 796 } 797 798 address generate_f2i_fixup() { 799 StubCodeMark mark(this, "StubRoutines", "f2i_fixup"); 800 Address inout(rsp, 5 * wordSize); // return address + 4 saves 801 802 address start = __ pc(); 803 804 Label L; 805 806 __ push(rax); 807 __ push(c_rarg3); 808 __ push(c_rarg2); 809 __ push(c_rarg1); 810 811 __ movl(rax, 0x7f800000); 812 __ xorl(c_rarg3, c_rarg3); 813 __ movl(c_rarg2, inout); 814 __ movl(c_rarg1, c_rarg2); 815 __ andl(c_rarg1, 0x7fffffff); 816 __ cmpl(rax, c_rarg1); // NaN? -> 0 817 __ jcc(Assembler::negative, L); 818 __ testl(c_rarg2, c_rarg2); // signed ? min_jint : max_jint 819 __ movl(c_rarg3, 0x80000000); 820 __ movl(rax, 0x7fffffff); 821 __ cmovl(Assembler::positive, c_rarg3, rax); 822 823 __ bind(L); 824 __ movptr(inout, c_rarg3); 825 826 __ pop(c_rarg1); 827 __ pop(c_rarg2); 828 __ pop(c_rarg3); 829 __ pop(rax); 830 831 __ ret(0); 832 833 return start; 834 } 835 836 address generate_f2l_fixup() { 837 StubCodeMark mark(this, "StubRoutines", "f2l_fixup"); 838 Address inout(rsp, 5 * wordSize); // return address + 4 saves 839 address start = __ pc(); 840 841 Label L; 842 843 __ push(rax); 844 __ push(c_rarg3); 845 __ push(c_rarg2); 846 __ push(c_rarg1); 847 848 __ movl(rax, 0x7f800000); 849 __ xorl(c_rarg3, c_rarg3); 850 __ movl(c_rarg2, inout); 851 __ movl(c_rarg1, c_rarg2); 852 __ andl(c_rarg1, 0x7fffffff); 853 __ cmpl(rax, c_rarg1); // NaN? -> 0 854 __ jcc(Assembler::negative, L); 855 __ testl(c_rarg2, c_rarg2); // signed ? min_jlong : max_jlong 856 __ mov64(c_rarg3, 0x8000000000000000); 857 __ mov64(rax, 0x7fffffffffffffff); 858 __ cmov(Assembler::positive, c_rarg3, rax); 859 860 __ bind(L); 861 __ movptr(inout, c_rarg3); 862 863 __ pop(c_rarg1); 864 __ pop(c_rarg2); 865 __ pop(c_rarg3); 866 __ pop(rax); 867 868 __ ret(0); 869 870 return start; 871 } 872 873 address generate_d2i_fixup() { 874 StubCodeMark mark(this, "StubRoutines", "d2i_fixup"); 875 Address inout(rsp, 6 * wordSize); // return address + 5 saves 876 877 address start = __ pc(); 878 879 Label L; 880 881 __ push(rax); 882 __ push(c_rarg3); 883 __ push(c_rarg2); 884 __ push(c_rarg1); 885 __ push(c_rarg0); 886 887 __ movl(rax, 0x7ff00000); 888 __ movq(c_rarg2, inout); 889 __ movl(c_rarg3, c_rarg2); 890 __ mov(c_rarg1, c_rarg2); 891 __ mov(c_rarg0, c_rarg2); 892 __ negl(c_rarg3); 893 __ shrptr(c_rarg1, 0x20); 894 __ orl(c_rarg3, c_rarg2); 895 __ andl(c_rarg1, 0x7fffffff); 896 __ xorl(c_rarg2, c_rarg2); 897 __ shrl(c_rarg3, 0x1f); 898 __ orl(c_rarg1, c_rarg3); 899 __ cmpl(rax, c_rarg1); 900 __ jcc(Assembler::negative, L); // NaN -> 0 901 __ testptr(c_rarg0, c_rarg0); // signed ? min_jint : max_jint 902 __ movl(c_rarg2, 0x80000000); 903 __ movl(rax, 0x7fffffff); 904 __ cmov(Assembler::positive, c_rarg2, rax); 905 906 __ bind(L); 907 __ movptr(inout, c_rarg2); 908 909 __ pop(c_rarg0); 910 __ pop(c_rarg1); 911 __ pop(c_rarg2); 912 __ pop(c_rarg3); 913 __ pop(rax); 914 915 __ ret(0); 916 917 return start; 918 } 919 920 address generate_d2l_fixup() { 921 StubCodeMark mark(this, "StubRoutines", "d2l_fixup"); 922 Address inout(rsp, 6 * wordSize); // return address + 5 saves 923 924 address start = __ pc(); 925 926 Label L; 927 928 __ push(rax); 929 __ push(c_rarg3); 930 __ push(c_rarg2); 931 __ push(c_rarg1); 932 __ push(c_rarg0); 933 934 __ movl(rax, 0x7ff00000); 935 __ movq(c_rarg2, inout); 936 __ movl(c_rarg3, c_rarg2); 937 __ mov(c_rarg1, c_rarg2); 938 __ mov(c_rarg0, c_rarg2); 939 __ negl(c_rarg3); 940 __ shrptr(c_rarg1, 0x20); 941 __ orl(c_rarg3, c_rarg2); 942 __ andl(c_rarg1, 0x7fffffff); 943 __ xorl(c_rarg2, c_rarg2); 944 __ shrl(c_rarg3, 0x1f); 945 __ orl(c_rarg1, c_rarg3); 946 __ cmpl(rax, c_rarg1); 947 __ jcc(Assembler::negative, L); // NaN -> 0 948 __ testq(c_rarg0, c_rarg0); // signed ? min_jlong : max_jlong 949 __ mov64(c_rarg2, 0x8000000000000000); 950 __ mov64(rax, 0x7fffffffffffffff); 951 __ cmovq(Assembler::positive, c_rarg2, rax); 952 953 __ bind(L); 954 __ movq(inout, c_rarg2); 955 956 __ pop(c_rarg0); 957 __ pop(c_rarg1); 958 __ pop(c_rarg2); 959 __ pop(c_rarg3); 960 __ pop(rax); 961 962 __ ret(0); 963 964 return start; 965 } 966 967 address generate_fp_mask(const char *stub_name, int64_t mask) { 968 __ align(CodeEntryAlignment); 969 StubCodeMark mark(this, "StubRoutines", stub_name); 970 address start = __ pc(); 971 972 __ emit_data64( mask, relocInfo::none ); 973 __ emit_data64( mask, relocInfo::none ); 974 975 return start; 976 } 977 978 // Non-destructive plausibility checks for oops 979 // 980 // Arguments: 981 // all args on stack! 982 // 983 // Stack after saving c_rarg3: 984 // [tos + 0]: saved c_rarg3 985 // [tos + 1]: saved c_rarg2 986 // [tos + 2]: saved r12 (several TemplateTable methods use it) 987 // [tos + 3]: saved flags 988 // [tos + 4]: return address 989 // * [tos + 5]: error message (char*) 990 // * [tos + 6]: object to verify (oop) 991 // * [tos + 7]: saved rax - saved by caller and bashed 992 // * [tos + 8]: saved r10 (rscratch1) - saved by caller 993 // * = popped on exit 994 address generate_verify_oop() { 995 StubCodeMark mark(this, "StubRoutines", "verify_oop"); 996 address start = __ pc(); 997 998 Label exit, error; 999 1000 __ pushf(); 1001 __ incrementl(ExternalAddress((address) StubRoutines::verify_oop_count_addr())); 1002 1003 __ push(r12); 1004 1005 // save c_rarg2 and c_rarg3 1006 __ push(c_rarg2); 1007 __ push(c_rarg3); 1008 1009 enum { 1010 // After previous pushes. 1011 oop_to_verify = 6 * wordSize, 1012 saved_rax = 7 * wordSize, 1013 saved_r10 = 8 * wordSize, 1014 1015 // Before the call to MacroAssembler::debug(), see below. 1016 return_addr = 16 * wordSize, 1017 error_msg = 17 * wordSize 1018 }; 1019 1020 // get object 1021 __ movptr(rax, Address(rsp, oop_to_verify)); 1022 1023 // make sure object is 'reasonable' 1024 __ testptr(rax, rax); 1025 __ jcc(Assembler::zero, exit); // if obj is NULL it is OK 1026 // Check if the oop is in the right area of memory 1027 __ movptr(c_rarg2, rax); 1028 __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_mask()); 1029 __ andptr(c_rarg2, c_rarg3); 1030 __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_bits()); 1031 __ cmpptr(c_rarg2, c_rarg3); 1032 __ jcc(Assembler::notZero, error); 1033 1034 // set r12 to heapbase for load_klass() 1035 __ reinit_heapbase(); 1036 1037 // make sure klass is 'reasonable', which is not zero. 1038 __ load_klass(rax, rax); // get klass 1039 __ testptr(rax, rax); 1040 __ jcc(Assembler::zero, error); // if klass is NULL it is broken 1041 1042 // return if everything seems ok 1043 __ bind(exit); 1044 __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back 1045 __ movptr(rscratch1, Address(rsp, saved_r10)); // get saved r10 back 1046 __ pop(c_rarg3); // restore c_rarg3 1047 __ pop(c_rarg2); // restore c_rarg2 1048 __ pop(r12); // restore r12 1049 __ popf(); // restore flags 1050 __ ret(4 * wordSize); // pop caller saved stuff 1051 1052 // handle errors 1053 __ bind(error); 1054 __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back 1055 __ movptr(rscratch1, Address(rsp, saved_r10)); // get saved r10 back 1056 __ pop(c_rarg3); // get saved c_rarg3 back 1057 __ pop(c_rarg2); // get saved c_rarg2 back 1058 __ pop(r12); // get saved r12 back 1059 __ popf(); // get saved flags off stack -- 1060 // will be ignored 1061 1062 __ pusha(); // push registers 1063 // (rip is already 1064 // already pushed) 1065 // debug(char* msg, int64_t pc, int64_t regs[]) 1066 // We've popped the registers we'd saved (c_rarg3, c_rarg2 and flags), and 1067 // pushed all the registers, so now the stack looks like: 1068 // [tos + 0] 16 saved registers 1069 // [tos + 16] return address 1070 // * [tos + 17] error message (char*) 1071 // * [tos + 18] object to verify (oop) 1072 // * [tos + 19] saved rax - saved by caller and bashed 1073 // * [tos + 20] saved r10 (rscratch1) - saved by caller 1074 // * = popped on exit 1075 1076 __ movptr(c_rarg0, Address(rsp, error_msg)); // pass address of error message 1077 __ movptr(c_rarg1, Address(rsp, return_addr)); // pass return address 1078 __ movq(c_rarg2, rsp); // pass address of regs on stack 1079 __ mov(r12, rsp); // remember rsp 1080 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1081 __ andptr(rsp, -16); // align stack as required by ABI 1082 BLOCK_COMMENT("call MacroAssembler::debug"); 1083 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64))); 1084 __ mov(rsp, r12); // restore rsp 1085 __ popa(); // pop registers (includes r12) 1086 __ ret(4 * wordSize); // pop caller saved stuff 1087 1088 return start; 1089 } 1090 1091 // 1092 // Verify that a register contains clean 32-bits positive value 1093 // (high 32-bits are 0) so it could be used in 64-bits shifts. 1094 // 1095 // Input: 1096 // Rint - 32-bits value 1097 // Rtmp - scratch 1098 // 1099 void assert_clean_int(Register Rint, Register Rtmp) { 1100 #ifdef ASSERT 1101 Label L; 1102 assert_different_registers(Rtmp, Rint); 1103 __ movslq(Rtmp, Rint); 1104 __ cmpq(Rtmp, Rint); 1105 __ jcc(Assembler::equal, L); 1106 __ stop("high 32-bits of int value are not 0"); 1107 __ bind(L); 1108 #endif 1109 } 1110 1111 // Generate overlap test for array copy stubs 1112 // 1113 // Input: 1114 // c_rarg0 - from 1115 // c_rarg1 - to 1116 // c_rarg2 - element count 1117 // 1118 // Output: 1119 // rax - &from[element count - 1] 1120 // 1121 void array_overlap_test(address no_overlap_target, Address::ScaleFactor sf) { 1122 assert(no_overlap_target != NULL, "must be generated"); 1123 array_overlap_test(no_overlap_target, NULL, sf); 1124 } 1125 void array_overlap_test(Label& L_no_overlap, Address::ScaleFactor sf) { 1126 array_overlap_test(NULL, &L_no_overlap, sf); 1127 } 1128 void array_overlap_test(address no_overlap_target, Label* NOLp, Address::ScaleFactor sf) { 1129 const Register from = c_rarg0; 1130 const Register to = c_rarg1; 1131 const Register count = c_rarg2; 1132 const Register end_from = rax; 1133 1134 __ cmpptr(to, from); 1135 __ lea(end_from, Address(from, count, sf, 0)); 1136 if (NOLp == NULL) { 1137 ExternalAddress no_overlap(no_overlap_target); 1138 __ jump_cc(Assembler::belowEqual, no_overlap); 1139 __ cmpptr(to, end_from); 1140 __ jump_cc(Assembler::aboveEqual, no_overlap); 1141 } else { 1142 __ jcc(Assembler::belowEqual, (*NOLp)); 1143 __ cmpptr(to, end_from); 1144 __ jcc(Assembler::aboveEqual, (*NOLp)); 1145 } 1146 } 1147 1148 // Shuffle first three arg regs on Windows into Linux/Solaris locations. 1149 // 1150 // Outputs: 1151 // rdi - rcx 1152 // rsi - rdx 1153 // rdx - r8 1154 // rcx - r9 1155 // 1156 // Registers r9 and r10 are used to save rdi and rsi on Windows, which latter 1157 // are non-volatile. r9 and r10 should not be used by the caller. 1158 // 1159 void setup_arg_regs(int nargs = 3) { 1160 const Register saved_rdi = r9; 1161 const Register saved_rsi = r10; 1162 assert(nargs == 3 || nargs == 4, "else fix"); 1163 #ifdef _WIN64 1164 assert(c_rarg0 == rcx && c_rarg1 == rdx && c_rarg2 == r8 && c_rarg3 == r9, 1165 "unexpected argument registers"); 1166 if (nargs >= 4) 1167 __ mov(rax, r9); // r9 is also saved_rdi 1168 __ movptr(saved_rdi, rdi); 1169 __ movptr(saved_rsi, rsi); 1170 __ mov(rdi, rcx); // c_rarg0 1171 __ mov(rsi, rdx); // c_rarg1 1172 __ mov(rdx, r8); // c_rarg2 1173 if (nargs >= 4) 1174 __ mov(rcx, rax); // c_rarg3 (via rax) 1175 #else 1176 assert(c_rarg0 == rdi && c_rarg1 == rsi && c_rarg2 == rdx && c_rarg3 == rcx, 1177 "unexpected argument registers"); 1178 #endif 1179 } 1180 1181 void restore_arg_regs() { 1182 const Register saved_rdi = r9; 1183 const Register saved_rsi = r10; 1184 #ifdef _WIN64 1185 __ movptr(rdi, saved_rdi); 1186 __ movptr(rsi, saved_rsi); 1187 #endif 1188 } 1189 1190 // Generate code for an array write pre barrier 1191 // 1192 // addr - starting address 1193 // count - element count 1194 // tmp - scratch register 1195 // 1196 // Destroy no registers! 1197 // 1198 void gen_write_ref_array_pre_barrier(Register addr, Register count, bool dest_uninitialized) { 1199 BarrierSet* bs = Universe::heap()->barrier_set(); 1200 switch (bs->kind()) { 1201 case BarrierSet::G1SATBCTLogging: 1202 // With G1, don't generate the call if we statically know that the target in uninitialized 1203 if (!dest_uninitialized) { 1204 __ pusha(); // push registers 1205 if (count == c_rarg0) { 1206 if (addr == c_rarg1) { 1207 // exactly backwards!! 1208 __ xchgptr(c_rarg1, c_rarg0); 1209 } else { 1210 __ movptr(c_rarg1, count); 1211 __ movptr(c_rarg0, addr); 1212 } 1213 } else { 1214 __ movptr(c_rarg0, addr); 1215 __ movptr(c_rarg1, count); 1216 } 1217 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), 2); 1218 __ popa(); 1219 } 1220 break; 1221 case BarrierSet::CardTableForRS: 1222 case BarrierSet::CardTableExtension: 1223 case BarrierSet::ModRef: 1224 break; 1225 default: 1226 ShouldNotReachHere(); 1227 1228 } 1229 } 1230 1231 // 1232 // Generate code for an array write post barrier 1233 // 1234 // Input: 1235 // start - register containing starting address of destination array 1236 // count - elements count 1237 // scratch - scratch register 1238 // 1239 // The input registers are overwritten. 1240 // 1241 void gen_write_ref_array_post_barrier(Register start, Register count, Register scratch) { 1242 assert_different_registers(start, count, scratch); 1243 BarrierSet* bs = Universe::heap()->barrier_set(); 1244 switch (bs->kind()) { 1245 case BarrierSet::G1SATBCTLogging: 1246 { 1247 __ pusha(); // push registers (overkill) 1248 if (c_rarg0 == count) { // On win64 c_rarg0 == rcx 1249 assert_different_registers(c_rarg1, start); 1250 __ mov(c_rarg1, count); 1251 __ mov(c_rarg0, start); 1252 } else { 1253 assert_different_registers(c_rarg0, count); 1254 __ mov(c_rarg0, start); 1255 __ mov(c_rarg1, count); 1256 } 1257 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), 2); 1258 __ popa(); 1259 } 1260 break; 1261 case BarrierSet::CardTableForRS: 1262 case BarrierSet::CardTableExtension: 1263 { 1264 CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs); 1265 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); 1266 1267 Label L_loop; 1268 const Register end = count; 1269 1270 __ leaq(end, Address(start, count, TIMES_OOP, 0)); // end == start+count*oop_size 1271 __ subptr(end, BytesPerHeapOop); // end - 1 to make inclusive 1272 __ shrptr(start, CardTableModRefBS::card_shift); 1273 __ shrptr(end, CardTableModRefBS::card_shift); 1274 __ subptr(end, start); // end --> cards count 1275 1276 int64_t disp = (int64_t) ct->byte_map_base; 1277 __ mov64(scratch, disp); 1278 __ addptr(start, scratch); 1279 __ BIND(L_loop); 1280 __ movb(Address(start, count, Address::times_1), 0); 1281 __ decrement(count); 1282 __ jcc(Assembler::greaterEqual, L_loop); 1283 } 1284 break; 1285 default: 1286 ShouldNotReachHere(); 1287 1288 } 1289 } 1290 1291 1292 // Copy big chunks forward 1293 // 1294 // Inputs: 1295 // end_from - source arrays end address 1296 // end_to - destination array end address 1297 // qword_count - 64-bits element count, negative 1298 // to - scratch 1299 // L_copy_bytes - entry label 1300 // L_copy_8_bytes - exit label 1301 // 1302 void copy_bytes_forward(Register end_from, Register end_to, 1303 Register qword_count, Register to, 1304 Label& L_copy_bytes, Label& L_copy_8_bytes) { 1305 DEBUG_ONLY(__ stop("enter at entry label, not here")); 1306 Label L_loop; 1307 __ align(OptoLoopAlignment); 1308 if (UseUnalignedLoadStores) { 1309 Label L_end; 1310 if (UseAVX > 2) { 1311 __ movl(to, 0xffff); 1312 __ kmovwl(k1, to); 1313 } 1314 // Copy 64-bytes per iteration 1315 __ BIND(L_loop); 1316 if (UseAVX > 2) { 1317 __ evmovdqul(xmm0, Address(end_from, qword_count, Address::times_8, -56), Assembler::AVX_512bit); 1318 __ evmovdqul(Address(end_to, qword_count, Address::times_8, -56), xmm0, Assembler::AVX_512bit); 1319 } else if (UseAVX == 2) { 1320 __ vmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56)); 1321 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0); 1322 __ vmovdqu(xmm1, Address(end_from, qword_count, Address::times_8, -24)); 1323 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -24), xmm1); 1324 } else { 1325 __ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56)); 1326 __ movdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0); 1327 __ movdqu(xmm1, Address(end_from, qword_count, Address::times_8, -40)); 1328 __ movdqu(Address(end_to, qword_count, Address::times_8, -40), xmm1); 1329 __ movdqu(xmm2, Address(end_from, qword_count, Address::times_8, -24)); 1330 __ movdqu(Address(end_to, qword_count, Address::times_8, -24), xmm2); 1331 __ movdqu(xmm3, Address(end_from, qword_count, Address::times_8, - 8)); 1332 __ movdqu(Address(end_to, qword_count, Address::times_8, - 8), xmm3); 1333 } 1334 __ BIND(L_copy_bytes); 1335 __ addptr(qword_count, 8); 1336 __ jcc(Assembler::lessEqual, L_loop); 1337 __ subptr(qword_count, 4); // sub(8) and add(4) 1338 __ jccb(Assembler::greater, L_end); 1339 // Copy trailing 32 bytes 1340 if (UseAVX >= 2) { 1341 __ vmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24)); 1342 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -24), xmm0); 1343 } else { 1344 __ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24)); 1345 __ movdqu(Address(end_to, qword_count, Address::times_8, -24), xmm0); 1346 __ movdqu(xmm1, Address(end_from, qword_count, Address::times_8, - 8)); 1347 __ movdqu(Address(end_to, qword_count, Address::times_8, - 8), xmm1); 1348 } 1349 __ addptr(qword_count, 4); 1350 __ BIND(L_end); 1351 if (UseAVX >= 2) { 1352 // clean upper bits of YMM registers 1353 __ vpxor(xmm0, xmm0); 1354 __ vpxor(xmm1, xmm1); 1355 } 1356 } else { 1357 // Copy 32-bytes per iteration 1358 __ BIND(L_loop); 1359 __ movq(to, Address(end_from, qword_count, Address::times_8, -24)); 1360 __ movq(Address(end_to, qword_count, Address::times_8, -24), to); 1361 __ movq(to, Address(end_from, qword_count, Address::times_8, -16)); 1362 __ movq(Address(end_to, qword_count, Address::times_8, -16), to); 1363 __ movq(to, Address(end_from, qword_count, Address::times_8, - 8)); 1364 __ movq(Address(end_to, qword_count, Address::times_8, - 8), to); 1365 __ movq(to, Address(end_from, qword_count, Address::times_8, - 0)); 1366 __ movq(Address(end_to, qword_count, Address::times_8, - 0), to); 1367 1368 __ BIND(L_copy_bytes); 1369 __ addptr(qword_count, 4); 1370 __ jcc(Assembler::lessEqual, L_loop); 1371 } 1372 __ subptr(qword_count, 4); 1373 __ jcc(Assembler::less, L_copy_8_bytes); // Copy trailing qwords 1374 } 1375 1376 // Copy big chunks backward 1377 // 1378 // Inputs: 1379 // from - source arrays address 1380 // dest - destination array address 1381 // qword_count - 64-bits element count 1382 // to - scratch 1383 // L_copy_bytes - entry label 1384 // L_copy_8_bytes - exit label 1385 // 1386 void copy_bytes_backward(Register from, Register dest, 1387 Register qword_count, Register to, 1388 Label& L_copy_bytes, Label& L_copy_8_bytes) { 1389 DEBUG_ONLY(__ stop("enter at entry label, not here")); 1390 Label L_loop; 1391 __ align(OptoLoopAlignment); 1392 if (UseUnalignedLoadStores) { 1393 Label L_end; 1394 if (UseAVX > 2) { 1395 __ movl(to, 0xffff); 1396 __ kmovwl(k1, to); 1397 } 1398 // Copy 64-bytes per iteration 1399 __ BIND(L_loop); 1400 if (UseAVX > 2) { 1401 __ evmovdqul(xmm0, Address(from, qword_count, Address::times_8, 0), Assembler::AVX_512bit); 1402 __ evmovdqul(Address(dest, qword_count, Address::times_8, 0), xmm0, Assembler::AVX_512bit); 1403 } else if (UseAVX == 2) { 1404 __ vmovdqu(xmm0, Address(from, qword_count, Address::times_8, 32)); 1405 __ vmovdqu(Address(dest, qword_count, Address::times_8, 32), xmm0); 1406 __ vmovdqu(xmm1, Address(from, qword_count, Address::times_8, 0)); 1407 __ vmovdqu(Address(dest, qword_count, Address::times_8, 0), xmm1); 1408 } else { 1409 __ movdqu(xmm0, Address(from, qword_count, Address::times_8, 48)); 1410 __ movdqu(Address(dest, qword_count, Address::times_8, 48), xmm0); 1411 __ movdqu(xmm1, Address(from, qword_count, Address::times_8, 32)); 1412 __ movdqu(Address(dest, qword_count, Address::times_8, 32), xmm1); 1413 __ movdqu(xmm2, Address(from, qword_count, Address::times_8, 16)); 1414 __ movdqu(Address(dest, qword_count, Address::times_8, 16), xmm2); 1415 __ movdqu(xmm3, Address(from, qword_count, Address::times_8, 0)); 1416 __ movdqu(Address(dest, qword_count, Address::times_8, 0), xmm3); 1417 } 1418 __ BIND(L_copy_bytes); 1419 __ subptr(qword_count, 8); 1420 __ jcc(Assembler::greaterEqual, L_loop); 1421 1422 __ addptr(qword_count, 4); // add(8) and sub(4) 1423 __ jccb(Assembler::less, L_end); 1424 // Copy trailing 32 bytes 1425 if (UseAVX >= 2) { 1426 __ vmovdqu(xmm0, Address(from, qword_count, Address::times_8, 0)); 1427 __ vmovdqu(Address(dest, qword_count, Address::times_8, 0), xmm0); 1428 } else { 1429 __ movdqu(xmm0, Address(from, qword_count, Address::times_8, 16)); 1430 __ movdqu(Address(dest, qword_count, Address::times_8, 16), xmm0); 1431 __ movdqu(xmm1, Address(from, qword_count, Address::times_8, 0)); 1432 __ movdqu(Address(dest, qword_count, Address::times_8, 0), xmm1); 1433 } 1434 __ subptr(qword_count, 4); 1435 __ BIND(L_end); 1436 if (UseAVX >= 2) { 1437 // clean upper bits of YMM registers 1438 __ vpxor(xmm0, xmm0); 1439 __ vpxor(xmm1, xmm1); 1440 } 1441 } else { 1442 // Copy 32-bytes per iteration 1443 __ BIND(L_loop); 1444 __ movq(to, Address(from, qword_count, Address::times_8, 24)); 1445 __ movq(Address(dest, qword_count, Address::times_8, 24), to); 1446 __ movq(to, Address(from, qword_count, Address::times_8, 16)); 1447 __ movq(Address(dest, qword_count, Address::times_8, 16), to); 1448 __ movq(to, Address(from, qword_count, Address::times_8, 8)); 1449 __ movq(Address(dest, qword_count, Address::times_8, 8), to); 1450 __ movq(to, Address(from, qword_count, Address::times_8, 0)); 1451 __ movq(Address(dest, qword_count, Address::times_8, 0), to); 1452 1453 __ BIND(L_copy_bytes); 1454 __ subptr(qword_count, 4); 1455 __ jcc(Assembler::greaterEqual, L_loop); 1456 } 1457 __ addptr(qword_count, 4); 1458 __ jcc(Assembler::greater, L_copy_8_bytes); // Copy trailing qwords 1459 } 1460 1461 1462 // Arguments: 1463 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1464 // ignored 1465 // name - stub name string 1466 // 1467 // Inputs: 1468 // c_rarg0 - source array address 1469 // c_rarg1 - destination array address 1470 // c_rarg2 - element count, treated as ssize_t, can be zero 1471 // 1472 // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries, 1473 // we let the hardware handle it. The one to eight bytes within words, 1474 // dwords or qwords that span cache line boundaries will still be loaded 1475 // and stored atomically. 1476 // 1477 // Side Effects: 1478 // disjoint_byte_copy_entry is set to the no-overlap entry point 1479 // used by generate_conjoint_byte_copy(). 1480 // 1481 address generate_disjoint_byte_copy(bool aligned, address* entry, const char *name) { 1482 __ align(CodeEntryAlignment); 1483 StubCodeMark mark(this, "StubRoutines", name); 1484 address start = __ pc(); 1485 1486 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes; 1487 Label L_copy_byte, L_exit; 1488 const Register from = rdi; // source array address 1489 const Register to = rsi; // destination array address 1490 const Register count = rdx; // elements count 1491 const Register byte_count = rcx; 1492 const Register qword_count = count; 1493 const Register end_from = from; // source array end address 1494 const Register end_to = to; // destination array end address 1495 // End pointers are inclusive, and if count is not zero they point 1496 // to the last unit copied: end_to[0] := end_from[0] 1497 1498 __ enter(); // required for proper stackwalking of RuntimeStub frame 1499 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1500 1501 if (entry != NULL) { 1502 *entry = __ pc(); 1503 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1504 BLOCK_COMMENT("Entry:"); 1505 } 1506 1507 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1508 // r9 and r10 may be used to save non-volatile registers 1509 1510 // 'from', 'to' and 'count' are now valid 1511 __ movptr(byte_count, count); 1512 __ shrptr(count, 3); // count => qword_count 1513 1514 // Copy from low to high addresses. Use 'to' as scratch. 1515 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1516 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1517 __ negptr(qword_count); // make the count negative 1518 __ jmp(L_copy_bytes); 1519 1520 // Copy trailing qwords 1521 __ BIND(L_copy_8_bytes); 1522 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1523 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1524 __ increment(qword_count); 1525 __ jcc(Assembler::notZero, L_copy_8_bytes); 1526 1527 // Check for and copy trailing dword 1528 __ BIND(L_copy_4_bytes); 1529 __ testl(byte_count, 4); 1530 __ jccb(Assembler::zero, L_copy_2_bytes); 1531 __ movl(rax, Address(end_from, 8)); 1532 __ movl(Address(end_to, 8), rax); 1533 1534 __ addptr(end_from, 4); 1535 __ addptr(end_to, 4); 1536 1537 // Check for and copy trailing word 1538 __ BIND(L_copy_2_bytes); 1539 __ testl(byte_count, 2); 1540 __ jccb(Assembler::zero, L_copy_byte); 1541 __ movw(rax, Address(end_from, 8)); 1542 __ movw(Address(end_to, 8), rax); 1543 1544 __ addptr(end_from, 2); 1545 __ addptr(end_to, 2); 1546 1547 // Check for and copy trailing byte 1548 __ BIND(L_copy_byte); 1549 __ testl(byte_count, 1); 1550 __ jccb(Assembler::zero, L_exit); 1551 __ movb(rax, Address(end_from, 8)); 1552 __ movb(Address(end_to, 8), rax); 1553 1554 __ BIND(L_exit); 1555 restore_arg_regs(); 1556 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1557 __ xorptr(rax, rax); // return 0 1558 __ vzeroupper(); 1559 __ leave(); // required for proper stackwalking of RuntimeStub frame 1560 __ ret(0); 1561 1562 // Copy in multi-bytes chunks 1563 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1564 __ jmp(L_copy_4_bytes); 1565 1566 return start; 1567 } 1568 1569 // Arguments: 1570 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1571 // ignored 1572 // name - stub name string 1573 // 1574 // Inputs: 1575 // c_rarg0 - source array address 1576 // c_rarg1 - destination array address 1577 // c_rarg2 - element count, treated as ssize_t, can be zero 1578 // 1579 // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries, 1580 // we let the hardware handle it. The one to eight bytes within words, 1581 // dwords or qwords that span cache line boundaries will still be loaded 1582 // and stored atomically. 1583 // 1584 address generate_conjoint_byte_copy(bool aligned, address nooverlap_target, 1585 address* entry, const char *name) { 1586 __ align(CodeEntryAlignment); 1587 StubCodeMark mark(this, "StubRoutines", name); 1588 address start = __ pc(); 1589 1590 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes; 1591 const Register from = rdi; // source array address 1592 const Register to = rsi; // destination array address 1593 const Register count = rdx; // elements count 1594 const Register byte_count = rcx; 1595 const Register qword_count = count; 1596 1597 __ enter(); // required for proper stackwalking of RuntimeStub frame 1598 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1599 1600 if (entry != NULL) { 1601 *entry = __ pc(); 1602 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1603 BLOCK_COMMENT("Entry:"); 1604 } 1605 1606 array_overlap_test(nooverlap_target, Address::times_1); 1607 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1608 // r9 and r10 may be used to save non-volatile registers 1609 1610 // 'from', 'to' and 'count' are now valid 1611 __ movptr(byte_count, count); 1612 __ shrptr(count, 3); // count => qword_count 1613 1614 // Copy from high to low addresses. 1615 1616 // Check for and copy trailing byte 1617 __ testl(byte_count, 1); 1618 __ jcc(Assembler::zero, L_copy_2_bytes); 1619 __ movb(rax, Address(from, byte_count, Address::times_1, -1)); 1620 __ movb(Address(to, byte_count, Address::times_1, -1), rax); 1621 __ decrement(byte_count); // Adjust for possible trailing word 1622 1623 // Check for and copy trailing word 1624 __ BIND(L_copy_2_bytes); 1625 __ testl(byte_count, 2); 1626 __ jcc(Assembler::zero, L_copy_4_bytes); 1627 __ movw(rax, Address(from, byte_count, Address::times_1, -2)); 1628 __ movw(Address(to, byte_count, Address::times_1, -2), rax); 1629 1630 // Check for and copy trailing dword 1631 __ BIND(L_copy_4_bytes); 1632 __ testl(byte_count, 4); 1633 __ jcc(Assembler::zero, L_copy_bytes); 1634 __ movl(rax, Address(from, qword_count, Address::times_8)); 1635 __ movl(Address(to, qword_count, Address::times_8), rax); 1636 __ jmp(L_copy_bytes); 1637 1638 // Copy trailing qwords 1639 __ BIND(L_copy_8_bytes); 1640 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 1641 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 1642 __ decrement(qword_count); 1643 __ jcc(Assembler::notZero, L_copy_8_bytes); 1644 1645 restore_arg_regs(); 1646 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1647 __ xorptr(rax, rax); // return 0 1648 __ vzeroupper(); 1649 __ leave(); // required for proper stackwalking of RuntimeStub frame 1650 __ ret(0); 1651 1652 // Copy in multi-bytes chunks 1653 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1654 1655 restore_arg_regs(); 1656 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1657 __ xorptr(rax, rax); // return 0 1658 __ vzeroupper(); 1659 __ leave(); // required for proper stackwalking of RuntimeStub frame 1660 __ ret(0); 1661 1662 return start; 1663 } 1664 1665 // Arguments: 1666 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1667 // ignored 1668 // name - stub name string 1669 // 1670 // Inputs: 1671 // c_rarg0 - source array address 1672 // c_rarg1 - destination array address 1673 // c_rarg2 - element count, treated as ssize_t, can be zero 1674 // 1675 // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we 1676 // let the hardware handle it. The two or four words within dwords 1677 // or qwords that span cache line boundaries will still be loaded 1678 // and stored atomically. 1679 // 1680 // Side Effects: 1681 // disjoint_short_copy_entry is set to the no-overlap entry point 1682 // used by generate_conjoint_short_copy(). 1683 // 1684 address generate_disjoint_short_copy(bool aligned, address *entry, const char *name) { 1685 __ align(CodeEntryAlignment); 1686 StubCodeMark mark(this, "StubRoutines", name); 1687 address start = __ pc(); 1688 1689 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes,L_copy_2_bytes,L_exit; 1690 const Register from = rdi; // source array address 1691 const Register to = rsi; // destination array address 1692 const Register count = rdx; // elements count 1693 const Register word_count = rcx; 1694 const Register qword_count = count; 1695 const Register end_from = from; // source array end address 1696 const Register end_to = to; // destination array end address 1697 // End pointers are inclusive, and if count is not zero they point 1698 // to the last unit copied: end_to[0] := end_from[0] 1699 1700 __ enter(); // required for proper stackwalking of RuntimeStub frame 1701 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1702 1703 if (entry != NULL) { 1704 *entry = __ pc(); 1705 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1706 BLOCK_COMMENT("Entry:"); 1707 } 1708 1709 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1710 // r9 and r10 may be used to save non-volatile registers 1711 1712 // 'from', 'to' and 'count' are now valid 1713 __ movptr(word_count, count); 1714 __ shrptr(count, 2); // count => qword_count 1715 1716 // Copy from low to high addresses. Use 'to' as scratch. 1717 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1718 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1719 __ negptr(qword_count); 1720 __ jmp(L_copy_bytes); 1721 1722 // Copy trailing qwords 1723 __ BIND(L_copy_8_bytes); 1724 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1725 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1726 __ increment(qword_count); 1727 __ jcc(Assembler::notZero, L_copy_8_bytes); 1728 1729 // Original 'dest' is trashed, so we can't use it as a 1730 // base register for a possible trailing word copy 1731 1732 // Check for and copy trailing dword 1733 __ BIND(L_copy_4_bytes); 1734 __ testl(word_count, 2); 1735 __ jccb(Assembler::zero, L_copy_2_bytes); 1736 __ movl(rax, Address(end_from, 8)); 1737 __ movl(Address(end_to, 8), rax); 1738 1739 __ addptr(end_from, 4); 1740 __ addptr(end_to, 4); 1741 1742 // Check for and copy trailing word 1743 __ BIND(L_copy_2_bytes); 1744 __ testl(word_count, 1); 1745 __ jccb(Assembler::zero, L_exit); 1746 __ movw(rax, Address(end_from, 8)); 1747 __ movw(Address(end_to, 8), rax); 1748 1749 __ BIND(L_exit); 1750 restore_arg_regs(); 1751 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1752 __ xorptr(rax, rax); // return 0 1753 __ vzeroupper(); 1754 __ leave(); // required for proper stackwalking of RuntimeStub frame 1755 __ ret(0); 1756 1757 // Copy in multi-bytes chunks 1758 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1759 __ jmp(L_copy_4_bytes); 1760 1761 return start; 1762 } 1763 1764 address generate_fill(BasicType t, bool aligned, const char *name) { 1765 __ align(CodeEntryAlignment); 1766 StubCodeMark mark(this, "StubRoutines", name); 1767 address start = __ pc(); 1768 1769 BLOCK_COMMENT("Entry:"); 1770 1771 const Register to = c_rarg0; // source array address 1772 const Register value = c_rarg1; // value 1773 const Register count = c_rarg2; // elements count 1774 1775 __ enter(); // required for proper stackwalking of RuntimeStub frame 1776 1777 __ generate_fill(t, aligned, to, value, count, rax, xmm0); 1778 1779 __ vzeroupper(); 1780 __ leave(); // required for proper stackwalking of RuntimeStub frame 1781 __ ret(0); 1782 return start; 1783 } 1784 1785 // Arguments: 1786 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1787 // ignored 1788 // name - stub name string 1789 // 1790 // Inputs: 1791 // c_rarg0 - source array address 1792 // c_rarg1 - destination array address 1793 // c_rarg2 - element count, treated as ssize_t, can be zero 1794 // 1795 // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we 1796 // let the hardware handle it. The two or four words within dwords 1797 // or qwords that span cache line boundaries will still be loaded 1798 // and stored atomically. 1799 // 1800 address generate_conjoint_short_copy(bool aligned, address nooverlap_target, 1801 address *entry, const char *name) { 1802 __ align(CodeEntryAlignment); 1803 StubCodeMark mark(this, "StubRoutines", name); 1804 address start = __ pc(); 1805 1806 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes; 1807 const Register from = rdi; // source array address 1808 const Register to = rsi; // destination array address 1809 const Register count = rdx; // elements count 1810 const Register word_count = rcx; 1811 const Register qword_count = count; 1812 1813 __ enter(); // required for proper stackwalking of RuntimeStub frame 1814 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1815 1816 if (entry != NULL) { 1817 *entry = __ pc(); 1818 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1819 BLOCK_COMMENT("Entry:"); 1820 } 1821 1822 array_overlap_test(nooverlap_target, Address::times_2); 1823 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1824 // r9 and r10 may be used to save non-volatile registers 1825 1826 // 'from', 'to' and 'count' are now valid 1827 __ movptr(word_count, count); 1828 __ shrptr(count, 2); // count => qword_count 1829 1830 // Copy from high to low addresses. Use 'to' as scratch. 1831 1832 // Check for and copy trailing word 1833 __ testl(word_count, 1); 1834 __ jccb(Assembler::zero, L_copy_4_bytes); 1835 __ movw(rax, Address(from, word_count, Address::times_2, -2)); 1836 __ movw(Address(to, word_count, Address::times_2, -2), rax); 1837 1838 // Check for and copy trailing dword 1839 __ BIND(L_copy_4_bytes); 1840 __ testl(word_count, 2); 1841 __ jcc(Assembler::zero, L_copy_bytes); 1842 __ movl(rax, Address(from, qword_count, Address::times_8)); 1843 __ movl(Address(to, qword_count, Address::times_8), rax); 1844 __ jmp(L_copy_bytes); 1845 1846 // Copy trailing qwords 1847 __ BIND(L_copy_8_bytes); 1848 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 1849 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 1850 __ decrement(qword_count); 1851 __ jcc(Assembler::notZero, L_copy_8_bytes); 1852 1853 restore_arg_regs(); 1854 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1855 __ xorptr(rax, rax); // return 0 1856 __ vzeroupper(); 1857 __ leave(); // required for proper stackwalking of RuntimeStub frame 1858 __ ret(0); 1859 1860 // Copy in multi-bytes chunks 1861 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1862 1863 restore_arg_regs(); 1864 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1865 __ xorptr(rax, rax); // return 0 1866 __ vzeroupper(); 1867 __ leave(); // required for proper stackwalking of RuntimeStub frame 1868 __ ret(0); 1869 1870 return start; 1871 } 1872 1873 // Arguments: 1874 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1875 // ignored 1876 // is_oop - true => oop array, so generate store check code 1877 // name - stub name string 1878 // 1879 // Inputs: 1880 // c_rarg0 - source array address 1881 // c_rarg1 - destination array address 1882 // c_rarg2 - element count, treated as ssize_t, can be zero 1883 // 1884 // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let 1885 // the hardware handle it. The two dwords within qwords that span 1886 // cache line boundaries will still be loaded and stored atomicly. 1887 // 1888 // Side Effects: 1889 // disjoint_int_copy_entry is set to the no-overlap entry point 1890 // used by generate_conjoint_int_oop_copy(). 1891 // 1892 address generate_disjoint_int_oop_copy(bool aligned, bool is_oop, address* entry, 1893 const char *name, bool dest_uninitialized = false) { 1894 __ align(CodeEntryAlignment); 1895 StubCodeMark mark(this, "StubRoutines", name); 1896 address start = __ pc(); 1897 1898 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_exit; 1899 const Register from = rdi; // source array address 1900 const Register to = rsi; // destination array address 1901 const Register count = rdx; // elements count 1902 const Register dword_count = rcx; 1903 const Register qword_count = count; 1904 const Register end_from = from; // source array end address 1905 const Register end_to = to; // destination array end address 1906 const Register saved_to = r11; // saved destination array address 1907 // End pointers are inclusive, and if count is not zero they point 1908 // to the last unit copied: end_to[0] := end_from[0] 1909 1910 __ enter(); // required for proper stackwalking of RuntimeStub frame 1911 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1912 1913 if (entry != NULL) { 1914 *entry = __ pc(); 1915 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1916 BLOCK_COMMENT("Entry:"); 1917 } 1918 1919 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1920 // r9 and r10 may be used to save non-volatile registers 1921 if (is_oop) { 1922 __ movq(saved_to, to); 1923 gen_write_ref_array_pre_barrier(to, count, dest_uninitialized); 1924 } 1925 1926 // 'from', 'to' and 'count' are now valid 1927 __ movptr(dword_count, count); 1928 __ shrptr(count, 1); // count => qword_count 1929 1930 // Copy from low to high addresses. Use 'to' as scratch. 1931 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1932 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1933 __ negptr(qword_count); 1934 __ jmp(L_copy_bytes); 1935 1936 // Copy trailing qwords 1937 __ BIND(L_copy_8_bytes); 1938 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1939 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1940 __ increment(qword_count); 1941 __ jcc(Assembler::notZero, L_copy_8_bytes); 1942 1943 // Check for and copy trailing dword 1944 __ BIND(L_copy_4_bytes); 1945 __ testl(dword_count, 1); // Only byte test since the value is 0 or 1 1946 __ jccb(Assembler::zero, L_exit); 1947 __ movl(rax, Address(end_from, 8)); 1948 __ movl(Address(end_to, 8), rax); 1949 1950 __ BIND(L_exit); 1951 if (is_oop) { 1952 gen_write_ref_array_post_barrier(saved_to, dword_count, rax); 1953 } 1954 restore_arg_regs(); 1955 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 1956 __ vzeroupper(); 1957 __ xorptr(rax, rax); // return 0 1958 __ leave(); // required for proper stackwalking of RuntimeStub frame 1959 __ ret(0); 1960 1961 // Copy in multi-bytes chunks 1962 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1963 __ jmp(L_copy_4_bytes); 1964 1965 return start; 1966 } 1967 1968 // Arguments: 1969 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1970 // ignored 1971 // is_oop - true => oop array, so generate store check code 1972 // name - stub name string 1973 // 1974 // Inputs: 1975 // c_rarg0 - source array address 1976 // c_rarg1 - destination array address 1977 // c_rarg2 - element count, treated as ssize_t, can be zero 1978 // 1979 // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let 1980 // the hardware handle it. The two dwords within qwords that span 1981 // cache line boundaries will still be loaded and stored atomicly. 1982 // 1983 address generate_conjoint_int_oop_copy(bool aligned, bool is_oop, address nooverlap_target, 1984 address *entry, const char *name, 1985 bool dest_uninitialized = false) { 1986 __ align(CodeEntryAlignment); 1987 StubCodeMark mark(this, "StubRoutines", name); 1988 address start = __ pc(); 1989 1990 Label L_copy_bytes, L_copy_8_bytes, L_copy_2_bytes, L_exit; 1991 const Register from = rdi; // source array address 1992 const Register to = rsi; // destination array address 1993 const Register count = rdx; // elements count 1994 const Register dword_count = rcx; 1995 const Register qword_count = count; 1996 1997 __ enter(); // required for proper stackwalking of RuntimeStub frame 1998 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1999 2000 if (entry != NULL) { 2001 *entry = __ pc(); 2002 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2003 BLOCK_COMMENT("Entry:"); 2004 } 2005 2006 array_overlap_test(nooverlap_target, Address::times_4); 2007 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 2008 // r9 and r10 may be used to save non-volatile registers 2009 2010 if (is_oop) { 2011 // no registers are destroyed by this call 2012 gen_write_ref_array_pre_barrier(to, count, dest_uninitialized); 2013 } 2014 2015 assert_clean_int(count, rax); // Make sure 'count' is clean int. 2016 // 'from', 'to' and 'count' are now valid 2017 __ movptr(dword_count, count); 2018 __ shrptr(count, 1); // count => qword_count 2019 2020 // Copy from high to low addresses. Use 'to' as scratch. 2021 2022 // Check for and copy trailing dword 2023 __ testl(dword_count, 1); 2024 __ jcc(Assembler::zero, L_copy_bytes); 2025 __ movl(rax, Address(from, dword_count, Address::times_4, -4)); 2026 __ movl(Address(to, dword_count, Address::times_4, -4), rax); 2027 __ jmp(L_copy_bytes); 2028 2029 // Copy trailing qwords 2030 __ BIND(L_copy_8_bytes); 2031 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 2032 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 2033 __ decrement(qword_count); 2034 __ jcc(Assembler::notZero, L_copy_8_bytes); 2035 2036 if (is_oop) { 2037 __ jmp(L_exit); 2038 } 2039 restore_arg_regs(); 2040 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 2041 __ xorptr(rax, rax); // return 0 2042 __ vzeroupper(); 2043 __ leave(); // required for proper stackwalking of RuntimeStub frame 2044 __ ret(0); 2045 2046 // Copy in multi-bytes chunks 2047 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2048 2049 __ BIND(L_exit); 2050 if (is_oop) { 2051 gen_write_ref_array_post_barrier(to, dword_count, rax); 2052 } 2053 restore_arg_regs(); 2054 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 2055 __ xorptr(rax, rax); // return 0 2056 __ vzeroupper(); 2057 __ leave(); // required for proper stackwalking of RuntimeStub frame 2058 __ ret(0); 2059 2060 return start; 2061 } 2062 2063 // Arguments: 2064 // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes 2065 // ignored 2066 // is_oop - true => oop array, so generate store check code 2067 // name - stub name string 2068 // 2069 // Inputs: 2070 // c_rarg0 - source array address 2071 // c_rarg1 - destination array address 2072 // c_rarg2 - element count, treated as ssize_t, can be zero 2073 // 2074 // Side Effects: 2075 // disjoint_oop_copy_entry or disjoint_long_copy_entry is set to the 2076 // no-overlap entry point used by generate_conjoint_long_oop_copy(). 2077 // 2078 address generate_disjoint_long_oop_copy(bool aligned, bool is_oop, address *entry, 2079 const char *name, bool dest_uninitialized = false) { 2080 __ align(CodeEntryAlignment); 2081 StubCodeMark mark(this, "StubRoutines", name); 2082 address start = __ pc(); 2083 2084 Label L_copy_bytes, L_copy_8_bytes, L_exit; 2085 const Register from = rdi; // source array address 2086 const Register to = rsi; // destination array address 2087 const Register qword_count = rdx; // elements count 2088 const Register end_from = from; // source array end address 2089 const Register end_to = rcx; // destination array end address 2090 const Register saved_to = to; 2091 const Register saved_count = r11; 2092 // End pointers are inclusive, and if count is not zero they point 2093 // to the last unit copied: end_to[0] := end_from[0] 2094 2095 __ enter(); // required for proper stackwalking of RuntimeStub frame 2096 // Save no-overlap entry point for generate_conjoint_long_oop_copy() 2097 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 2098 2099 if (entry != NULL) { 2100 *entry = __ pc(); 2101 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2102 BLOCK_COMMENT("Entry:"); 2103 } 2104 2105 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 2106 // r9 and r10 may be used to save non-volatile registers 2107 // 'from', 'to' and 'qword_count' are now valid 2108 if (is_oop) { 2109 // Save to and count for store barrier 2110 __ movptr(saved_count, qword_count); 2111 // no registers are destroyed by this call 2112 gen_write_ref_array_pre_barrier(to, qword_count, dest_uninitialized); 2113 } 2114 2115 // Copy from low to high addresses. Use 'to' as scratch. 2116 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 2117 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 2118 __ negptr(qword_count); 2119 __ jmp(L_copy_bytes); 2120 2121 // Copy trailing qwords 2122 __ BIND(L_copy_8_bytes); 2123 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 2124 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 2125 __ increment(qword_count); 2126 __ jcc(Assembler::notZero, L_copy_8_bytes); 2127 2128 if (is_oop) { 2129 __ jmp(L_exit); 2130 } else { 2131 restore_arg_regs(); 2132 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2133 __ xorptr(rax, rax); // return 0 2134 __ vzeroupper(); 2135 __ leave(); // required for proper stackwalking of RuntimeStub frame 2136 __ ret(0); 2137 } 2138 2139 // Copy in multi-bytes chunks 2140 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2141 2142 if (is_oop) { 2143 __ BIND(L_exit); 2144 gen_write_ref_array_post_barrier(saved_to, saved_count, rax); 2145 } 2146 restore_arg_regs(); 2147 if (is_oop) { 2148 inc_counter_np(SharedRuntime::_oop_array_copy_ctr); // Update counter after rscratch1 is free 2149 } else { 2150 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2151 } 2152 __ vzeroupper(); 2153 __ xorptr(rax, rax); // return 0 2154 __ leave(); // required for proper stackwalking of RuntimeStub frame 2155 __ ret(0); 2156 2157 return start; 2158 } 2159 2160 // Arguments: 2161 // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes 2162 // ignored 2163 // is_oop - true => oop array, so generate store check code 2164 // name - stub name string 2165 // 2166 // Inputs: 2167 // c_rarg0 - source array address 2168 // c_rarg1 - destination array address 2169 // c_rarg2 - element count, treated as ssize_t, can be zero 2170 // 2171 address generate_conjoint_long_oop_copy(bool aligned, bool is_oop, 2172 address nooverlap_target, address *entry, 2173 const char *name, bool dest_uninitialized = false) { 2174 __ align(CodeEntryAlignment); 2175 StubCodeMark mark(this, "StubRoutines", name); 2176 address start = __ pc(); 2177 2178 Label L_copy_bytes, L_copy_8_bytes, L_exit; 2179 const Register from = rdi; // source array address 2180 const Register to = rsi; // destination array address 2181 const Register qword_count = rdx; // elements count 2182 const Register saved_count = rcx; 2183 2184 __ enter(); // required for proper stackwalking of RuntimeStub frame 2185 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 2186 2187 if (entry != NULL) { 2188 *entry = __ pc(); 2189 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2190 BLOCK_COMMENT("Entry:"); 2191 } 2192 2193 array_overlap_test(nooverlap_target, Address::times_8); 2194 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 2195 // r9 and r10 may be used to save non-volatile registers 2196 // 'from', 'to' and 'qword_count' are now valid 2197 if (is_oop) { 2198 // Save to and count for store barrier 2199 __ movptr(saved_count, qword_count); 2200 // No registers are destroyed by this call 2201 gen_write_ref_array_pre_barrier(to, saved_count, dest_uninitialized); 2202 } 2203 2204 __ jmp(L_copy_bytes); 2205 2206 // Copy trailing qwords 2207 __ BIND(L_copy_8_bytes); 2208 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 2209 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 2210 __ decrement(qword_count); 2211 __ jcc(Assembler::notZero, L_copy_8_bytes); 2212 2213 if (is_oop) { 2214 __ jmp(L_exit); 2215 } else { 2216 restore_arg_regs(); 2217 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2218 __ xorptr(rax, rax); // return 0 2219 __ vzeroupper(); 2220 __ leave(); // required for proper stackwalking of RuntimeStub frame 2221 __ ret(0); 2222 } 2223 2224 // Copy in multi-bytes chunks 2225 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2226 2227 if (is_oop) { 2228 __ BIND(L_exit); 2229 gen_write_ref_array_post_barrier(to, saved_count, rax); 2230 } 2231 restore_arg_regs(); 2232 if (is_oop) { 2233 inc_counter_np(SharedRuntime::_oop_array_copy_ctr); // Update counter after rscratch1 is free 2234 } else { 2235 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2236 } 2237 __ vzeroupper(); 2238 __ xorptr(rax, rax); // return 0 2239 __ leave(); // required for proper stackwalking of RuntimeStub frame 2240 __ ret(0); 2241 2242 return start; 2243 } 2244 2245 2246 // Helper for generating a dynamic type check. 2247 // Smashes no registers. 2248 void generate_type_check(Register sub_klass, 2249 Register super_check_offset, 2250 Register super_klass, 2251 Label& L_success) { 2252 assert_different_registers(sub_klass, super_check_offset, super_klass); 2253 2254 BLOCK_COMMENT("type_check:"); 2255 2256 Label L_miss; 2257 2258 __ check_klass_subtype_fast_path(sub_klass, super_klass, noreg, &L_success, &L_miss, NULL, 2259 super_check_offset); 2260 __ check_klass_subtype_slow_path(sub_klass, super_klass, noreg, noreg, &L_success, NULL); 2261 2262 // Fall through on failure! 2263 __ BIND(L_miss); 2264 } 2265 2266 // 2267 // Generate checkcasting array copy stub 2268 // 2269 // Input: 2270 // c_rarg0 - source array address 2271 // c_rarg1 - destination array address 2272 // c_rarg2 - element count, treated as ssize_t, can be zero 2273 // c_rarg3 - size_t ckoff (super_check_offset) 2274 // not Win64 2275 // c_rarg4 - oop ckval (super_klass) 2276 // Win64 2277 // rsp+40 - oop ckval (super_klass) 2278 // 2279 // Output: 2280 // rax == 0 - success 2281 // rax == -1^K - failure, where K is partial transfer count 2282 // 2283 address generate_checkcast_copy(const char *name, address *entry, 2284 bool dest_uninitialized = false) { 2285 2286 Label L_load_element, L_store_element, L_do_card_marks, L_done; 2287 2288 // Input registers (after setup_arg_regs) 2289 const Register from = rdi; // source array address 2290 const Register to = rsi; // destination array address 2291 const Register length = rdx; // elements count 2292 const Register ckoff = rcx; // super_check_offset 2293 const Register ckval = r8; // super_klass 2294 2295 // Registers used as temps (r13, r14 are save-on-entry) 2296 const Register end_from = from; // source array end address 2297 const Register end_to = r13; // destination array end address 2298 const Register count = rdx; // -(count_remaining) 2299 const Register r14_length = r14; // saved copy of length 2300 // End pointers are inclusive, and if length is not zero they point 2301 // to the last unit copied: end_to[0] := end_from[0] 2302 2303 const Register rax_oop = rax; // actual oop copied 2304 const Register r11_klass = r11; // oop._klass 2305 2306 //--------------------------------------------------------------- 2307 // Assembler stub will be used for this call to arraycopy 2308 // if the two arrays are subtypes of Object[] but the 2309 // destination array type is not equal to or a supertype 2310 // of the source type. Each element must be separately 2311 // checked. 2312 2313 __ align(CodeEntryAlignment); 2314 StubCodeMark mark(this, "StubRoutines", name); 2315 address start = __ pc(); 2316 2317 __ enter(); // required for proper stackwalking of RuntimeStub frame 2318 2319 #ifdef ASSERT 2320 // caller guarantees that the arrays really are different 2321 // otherwise, we would have to make conjoint checks 2322 { Label L; 2323 array_overlap_test(L, TIMES_OOP); 2324 __ stop("checkcast_copy within a single array"); 2325 __ bind(L); 2326 } 2327 #endif //ASSERT 2328 2329 setup_arg_regs(4); // from => rdi, to => rsi, length => rdx 2330 // ckoff => rcx, ckval => r8 2331 // r9 and r10 may be used to save non-volatile registers 2332 #ifdef _WIN64 2333 // last argument (#4) is on stack on Win64 2334 __ movptr(ckval, Address(rsp, 6 * wordSize)); 2335 #endif 2336 2337 // Caller of this entry point must set up the argument registers. 2338 if (entry != NULL) { 2339 *entry = __ pc(); 2340 BLOCK_COMMENT("Entry:"); 2341 } 2342 2343 // allocate spill slots for r13, r14 2344 enum { 2345 saved_r13_offset, 2346 saved_r14_offset, 2347 saved_rbp_offset 2348 }; 2349 __ subptr(rsp, saved_rbp_offset * wordSize); 2350 __ movptr(Address(rsp, saved_r13_offset * wordSize), r13); 2351 __ movptr(Address(rsp, saved_r14_offset * wordSize), r14); 2352 2353 // check that int operands are properly extended to size_t 2354 assert_clean_int(length, rax); 2355 assert_clean_int(ckoff, rax); 2356 2357 #ifdef ASSERT 2358 BLOCK_COMMENT("assert consistent ckoff/ckval"); 2359 // The ckoff and ckval must be mutually consistent, 2360 // even though caller generates both. 2361 { Label L; 2362 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2363 __ cmpl(ckoff, Address(ckval, sco_offset)); 2364 __ jcc(Assembler::equal, L); 2365 __ stop("super_check_offset inconsistent"); 2366 __ bind(L); 2367 } 2368 #endif //ASSERT 2369 2370 // Loop-invariant addresses. They are exclusive end pointers. 2371 Address end_from_addr(from, length, TIMES_OOP, 0); 2372 Address end_to_addr(to, length, TIMES_OOP, 0); 2373 // Loop-variant addresses. They assume post-incremented count < 0. 2374 Address from_element_addr(end_from, count, TIMES_OOP, 0); 2375 Address to_element_addr(end_to, count, TIMES_OOP, 0); 2376 2377 gen_write_ref_array_pre_barrier(to, count, dest_uninitialized); 2378 2379 // Copy from low to high addresses, indexed from the end of each array. 2380 __ lea(end_from, end_from_addr); 2381 __ lea(end_to, end_to_addr); 2382 __ movptr(r14_length, length); // save a copy of the length 2383 assert(length == count, ""); // else fix next line: 2384 __ negptr(count); // negate and test the length 2385 __ jcc(Assembler::notZero, L_load_element); 2386 2387 // Empty array: Nothing to do. 2388 __ xorptr(rax, rax); // return 0 on (trivial) success 2389 __ jmp(L_done); 2390 2391 // ======== begin loop ======== 2392 // (Loop is rotated; its entry is L_load_element.) 2393 // Loop control: 2394 // for (count = -count; count != 0; count++) 2395 // Base pointers src, dst are biased by 8*(count-1),to last element. 2396 __ align(OptoLoopAlignment); 2397 2398 __ BIND(L_store_element); 2399 __ store_heap_oop(to_element_addr, rax_oop); // store the oop 2400 __ increment(count); // increment the count toward zero 2401 __ jcc(Assembler::zero, L_do_card_marks); 2402 2403 // ======== loop entry is here ======== 2404 __ BIND(L_load_element); 2405 __ load_heap_oop(rax_oop, from_element_addr); // load the oop 2406 __ testptr(rax_oop, rax_oop); 2407 __ jcc(Assembler::zero, L_store_element); 2408 2409 __ load_klass(r11_klass, rax_oop);// query the object klass 2410 generate_type_check(r11_klass, ckoff, ckval, L_store_element); 2411 // ======== end loop ======== 2412 2413 // It was a real error; we must depend on the caller to finish the job. 2414 // Register rdx = -1 * number of *remaining* oops, r14 = *total* oops. 2415 // Emit GC store barriers for the oops we have copied (r14 + rdx), 2416 // and report their number to the caller. 2417 assert_different_registers(rax, r14_length, count, to, end_to, rcx, rscratch1); 2418 Label L_post_barrier; 2419 __ addptr(r14_length, count); // K = (original - remaining) oops 2420 __ movptr(rax, r14_length); // save the value 2421 __ notptr(rax); // report (-1^K) to caller (does not affect flags) 2422 __ jccb(Assembler::notZero, L_post_barrier); 2423 __ jmp(L_done); // K == 0, nothing was copied, skip post barrier 2424 2425 // Come here on success only. 2426 __ BIND(L_do_card_marks); 2427 __ xorptr(rax, rax); // return 0 on success 2428 2429 __ BIND(L_post_barrier); 2430 gen_write_ref_array_post_barrier(to, r14_length, rscratch1); 2431 2432 // Common exit point (success or failure). 2433 __ BIND(L_done); 2434 __ movptr(r13, Address(rsp, saved_r13_offset * wordSize)); 2435 __ movptr(r14, Address(rsp, saved_r14_offset * wordSize)); 2436 restore_arg_regs(); 2437 inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr); // Update counter after rscratch1 is free 2438 __ leave(); // required for proper stackwalking of RuntimeStub frame 2439 __ ret(0); 2440 2441 return start; 2442 } 2443 2444 // 2445 // Generate 'unsafe' array copy stub 2446 // Though just as safe as the other stubs, it takes an unscaled 2447 // size_t argument instead of an element count. 2448 // 2449 // Input: 2450 // c_rarg0 - source array address 2451 // c_rarg1 - destination array address 2452 // c_rarg2 - byte count, treated as ssize_t, can be zero 2453 // 2454 // Examines the alignment of the operands and dispatches 2455 // to a long, int, short, or byte copy loop. 2456 // 2457 address generate_unsafe_copy(const char *name, 2458 address byte_copy_entry, address short_copy_entry, 2459 address int_copy_entry, address long_copy_entry) { 2460 2461 Label L_long_aligned, L_int_aligned, L_short_aligned; 2462 2463 // Input registers (before setup_arg_regs) 2464 const Register from = c_rarg0; // source array address 2465 const Register to = c_rarg1; // destination array address 2466 const Register size = c_rarg2; // byte count (size_t) 2467 2468 // Register used as a temp 2469 const Register bits = rax; // test copy of low bits 2470 2471 __ align(CodeEntryAlignment); 2472 StubCodeMark mark(this, "StubRoutines", name); 2473 address start = __ pc(); 2474 2475 __ enter(); // required for proper stackwalking of RuntimeStub frame 2476 2477 // bump this on entry, not on exit: 2478 inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr); 2479 2480 __ mov(bits, from); 2481 __ orptr(bits, to); 2482 __ orptr(bits, size); 2483 2484 __ testb(bits, BytesPerLong-1); 2485 __ jccb(Assembler::zero, L_long_aligned); 2486 2487 __ testb(bits, BytesPerInt-1); 2488 __ jccb(Assembler::zero, L_int_aligned); 2489 2490 __ testb(bits, BytesPerShort-1); 2491 __ jump_cc(Assembler::notZero, RuntimeAddress(byte_copy_entry)); 2492 2493 __ BIND(L_short_aligned); 2494 __ shrptr(size, LogBytesPerShort); // size => short_count 2495 __ jump(RuntimeAddress(short_copy_entry)); 2496 2497 __ BIND(L_int_aligned); 2498 __ shrptr(size, LogBytesPerInt); // size => int_count 2499 __ jump(RuntimeAddress(int_copy_entry)); 2500 2501 __ BIND(L_long_aligned); 2502 __ shrptr(size, LogBytesPerLong); // size => qword_count 2503 __ jump(RuntimeAddress(long_copy_entry)); 2504 2505 return start; 2506 } 2507 2508 // Perform range checks on the proposed arraycopy. 2509 // Kills temp, but nothing else. 2510 // Also, clean the sign bits of src_pos and dst_pos. 2511 void arraycopy_range_checks(Register src, // source array oop (c_rarg0) 2512 Register src_pos, // source position (c_rarg1) 2513 Register dst, // destination array oo (c_rarg2) 2514 Register dst_pos, // destination position (c_rarg3) 2515 Register length, 2516 Register temp, 2517 Label& L_failed) { 2518 BLOCK_COMMENT("arraycopy_range_checks:"); 2519 2520 // if (src_pos + length > arrayOop(src)->length()) FAIL; 2521 __ movl(temp, length); 2522 __ addl(temp, src_pos); // src_pos + length 2523 __ cmpl(temp, Address(src, arrayOopDesc::length_offset_in_bytes())); 2524 __ jcc(Assembler::above, L_failed); 2525 2526 // if (dst_pos + length > arrayOop(dst)->length()) FAIL; 2527 __ movl(temp, length); 2528 __ addl(temp, dst_pos); // dst_pos + length 2529 __ cmpl(temp, Address(dst, arrayOopDesc::length_offset_in_bytes())); 2530 __ jcc(Assembler::above, L_failed); 2531 2532 // Have to clean up high 32-bits of 'src_pos' and 'dst_pos'. 2533 // Move with sign extension can be used since they are positive. 2534 __ movslq(src_pos, src_pos); 2535 __ movslq(dst_pos, dst_pos); 2536 2537 BLOCK_COMMENT("arraycopy_range_checks done"); 2538 } 2539 2540 // 2541 // Generate generic array copy stubs 2542 // 2543 // Input: 2544 // c_rarg0 - src oop 2545 // c_rarg1 - src_pos (32-bits) 2546 // c_rarg2 - dst oop 2547 // c_rarg3 - dst_pos (32-bits) 2548 // not Win64 2549 // c_rarg4 - element count (32-bits) 2550 // Win64 2551 // rsp+40 - element count (32-bits) 2552 // 2553 // Output: 2554 // rax == 0 - success 2555 // rax == -1^K - failure, where K is partial transfer count 2556 // 2557 address generate_generic_copy(const char *name, 2558 address byte_copy_entry, address short_copy_entry, 2559 address int_copy_entry, address oop_copy_entry, 2560 address long_copy_entry, address checkcast_copy_entry) { 2561 2562 Label L_failed, L_failed_0, L_objArray; 2563 Label L_copy_bytes, L_copy_shorts, L_copy_ints, L_copy_longs; 2564 2565 // Input registers 2566 const Register src = c_rarg0; // source array oop 2567 const Register src_pos = c_rarg1; // source position 2568 const Register dst = c_rarg2; // destination array oop 2569 const Register dst_pos = c_rarg3; // destination position 2570 #ifndef _WIN64 2571 const Register length = c_rarg4; 2572 #else 2573 const Address length(rsp, 6 * wordSize); // elements count is on stack on Win64 2574 #endif 2575 2576 { int modulus = CodeEntryAlignment; 2577 int target = modulus - 5; // 5 = sizeof jmp(L_failed) 2578 int advance = target - (__ offset() % modulus); 2579 if (advance < 0) advance += modulus; 2580 if (advance > 0) __ nop(advance); 2581 } 2582 StubCodeMark mark(this, "StubRoutines", name); 2583 2584 // Short-hop target to L_failed. Makes for denser prologue code. 2585 __ BIND(L_failed_0); 2586 __ jmp(L_failed); 2587 assert(__ offset() % CodeEntryAlignment == 0, "no further alignment needed"); 2588 2589 __ align(CodeEntryAlignment); 2590 address start = __ pc(); 2591 2592 __ enter(); // required for proper stackwalking of RuntimeStub frame 2593 2594 // bump this on entry, not on exit: 2595 inc_counter_np(SharedRuntime::_generic_array_copy_ctr); 2596 2597 //----------------------------------------------------------------------- 2598 // Assembler stub will be used for this call to arraycopy 2599 // if the following conditions are met: 2600 // 2601 // (1) src and dst must not be null. 2602 // (2) src_pos must not be negative. 2603 // (3) dst_pos must not be negative. 2604 // (4) length must not be negative. 2605 // (5) src klass and dst klass should be the same and not NULL. 2606 // (6) src and dst should be arrays. 2607 // (7) src_pos + length must not exceed length of src. 2608 // (8) dst_pos + length must not exceed length of dst. 2609 // 2610 2611 // if (src == NULL) return -1; 2612 __ testptr(src, src); // src oop 2613 size_t j1off = __ offset(); 2614 __ jccb(Assembler::zero, L_failed_0); 2615 2616 // if (src_pos < 0) return -1; 2617 __ testl(src_pos, src_pos); // src_pos (32-bits) 2618 __ jccb(Assembler::negative, L_failed_0); 2619 2620 // if (dst == NULL) return -1; 2621 __ testptr(dst, dst); // dst oop 2622 __ jccb(Assembler::zero, L_failed_0); 2623 2624 // if (dst_pos < 0) return -1; 2625 __ testl(dst_pos, dst_pos); // dst_pos (32-bits) 2626 size_t j4off = __ offset(); 2627 __ jccb(Assembler::negative, L_failed_0); 2628 2629 // The first four tests are very dense code, 2630 // but not quite dense enough to put four 2631 // jumps in a 16-byte instruction fetch buffer. 2632 // That's good, because some branch predicters 2633 // do not like jumps so close together. 2634 // Make sure of this. 2635 guarantee(((j1off ^ j4off) & ~15) != 0, "I$ line of 1st & 4th jumps"); 2636 2637 // registers used as temp 2638 const Register r11_length = r11; // elements count to copy 2639 const Register r10_src_klass = r10; // array klass 2640 2641 // if (length < 0) return -1; 2642 __ movl(r11_length, length); // length (elements count, 32-bits value) 2643 __ testl(r11_length, r11_length); 2644 __ jccb(Assembler::negative, L_failed_0); 2645 2646 __ load_klass(r10_src_klass, src); 2647 #ifdef ASSERT 2648 // assert(src->klass() != NULL); 2649 { 2650 BLOCK_COMMENT("assert klasses not null {"); 2651 Label L1, L2; 2652 __ testptr(r10_src_klass, r10_src_klass); 2653 __ jcc(Assembler::notZero, L2); // it is broken if klass is NULL 2654 __ bind(L1); 2655 __ stop("broken null klass"); 2656 __ bind(L2); 2657 __ load_klass(rax, dst); 2658 __ cmpq(rax, 0); 2659 __ jcc(Assembler::equal, L1); // this would be broken also 2660 BLOCK_COMMENT("} assert klasses not null done"); 2661 } 2662 #endif 2663 2664 // Load layout helper (32-bits) 2665 // 2666 // |array_tag| | header_size | element_type | |log2_element_size| 2667 // 32 30 24 16 8 2 0 2668 // 2669 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0 2670 // 2671 2672 const int lh_offset = in_bytes(Klass::layout_helper_offset()); 2673 2674 // Handle objArrays completely differently... 2675 const jint objArray_lh = Klass::array_layout_helper(T_OBJECT); 2676 __ cmpl(Address(r10_src_klass, lh_offset), objArray_lh); 2677 __ jcc(Assembler::equal, L_objArray); 2678 2679 // if (src->klass() != dst->klass()) return -1; 2680 __ load_klass(rax, dst); 2681 __ cmpq(r10_src_klass, rax); 2682 __ jcc(Assembler::notEqual, L_failed); 2683 2684 const Register rax_lh = rax; // layout helper 2685 __ movl(rax_lh, Address(r10_src_klass, lh_offset)); 2686 2687 // if (!src->is_Array()) return -1; 2688 __ cmpl(rax_lh, Klass::_lh_neutral_value); 2689 __ jcc(Assembler::greaterEqual, L_failed); 2690 2691 // At this point, it is known to be a typeArray (array_tag 0x3). 2692 #ifdef ASSERT 2693 { 2694 BLOCK_COMMENT("assert primitive array {"); 2695 Label L; 2696 __ cmpl(rax_lh, (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift)); 2697 __ jcc(Assembler::greaterEqual, L); 2698 __ stop("must be a primitive array"); 2699 __ bind(L); 2700 BLOCK_COMMENT("} assert primitive array done"); 2701 } 2702 #endif 2703 2704 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2705 r10, L_failed); 2706 2707 // TypeArrayKlass 2708 // 2709 // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize); 2710 // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize); 2711 // 2712 2713 const Register r10_offset = r10; // array offset 2714 const Register rax_elsize = rax_lh; // element size 2715 2716 __ movl(r10_offset, rax_lh); 2717 __ shrl(r10_offset, Klass::_lh_header_size_shift); 2718 __ andptr(r10_offset, Klass::_lh_header_size_mask); // array_offset 2719 __ addptr(src, r10_offset); // src array offset 2720 __ addptr(dst, r10_offset); // dst array offset 2721 BLOCK_COMMENT("choose copy loop based on element size"); 2722 __ andl(rax_lh, Klass::_lh_log2_element_size_mask); // rax_lh -> rax_elsize 2723 2724 // next registers should be set before the jump to corresponding stub 2725 const Register from = c_rarg0; // source array address 2726 const Register to = c_rarg1; // destination array address 2727 const Register count = c_rarg2; // elements count 2728 2729 // 'from', 'to', 'count' registers should be set in such order 2730 // since they are the same as 'src', 'src_pos', 'dst'. 2731 2732 __ BIND(L_copy_bytes); 2733 __ cmpl(rax_elsize, 0); 2734 __ jccb(Assembler::notEqual, L_copy_shorts); 2735 __ lea(from, Address(src, src_pos, Address::times_1, 0));// src_addr 2736 __ lea(to, Address(dst, dst_pos, Address::times_1, 0));// dst_addr 2737 __ movl2ptr(count, r11_length); // length 2738 __ jump(RuntimeAddress(byte_copy_entry)); 2739 2740 __ BIND(L_copy_shorts); 2741 __ cmpl(rax_elsize, LogBytesPerShort); 2742 __ jccb(Assembler::notEqual, L_copy_ints); 2743 __ lea(from, Address(src, src_pos, Address::times_2, 0));// src_addr 2744 __ lea(to, Address(dst, dst_pos, Address::times_2, 0));// dst_addr 2745 __ movl2ptr(count, r11_length); // length 2746 __ jump(RuntimeAddress(short_copy_entry)); 2747 2748 __ BIND(L_copy_ints); 2749 __ cmpl(rax_elsize, LogBytesPerInt); 2750 __ jccb(Assembler::notEqual, L_copy_longs); 2751 __ lea(from, Address(src, src_pos, Address::times_4, 0));// src_addr 2752 __ lea(to, Address(dst, dst_pos, Address::times_4, 0));// dst_addr 2753 __ movl2ptr(count, r11_length); // length 2754 __ jump(RuntimeAddress(int_copy_entry)); 2755 2756 __ BIND(L_copy_longs); 2757 #ifdef ASSERT 2758 { 2759 BLOCK_COMMENT("assert long copy {"); 2760 Label L; 2761 __ cmpl(rax_elsize, LogBytesPerLong); 2762 __ jcc(Assembler::equal, L); 2763 __ stop("must be long copy, but elsize is wrong"); 2764 __ bind(L); 2765 BLOCK_COMMENT("} assert long copy done"); 2766 } 2767 #endif 2768 __ lea(from, Address(src, src_pos, Address::times_8, 0));// src_addr 2769 __ lea(to, Address(dst, dst_pos, Address::times_8, 0));// dst_addr 2770 __ movl2ptr(count, r11_length); // length 2771 __ jump(RuntimeAddress(long_copy_entry)); 2772 2773 // ObjArrayKlass 2774 __ BIND(L_objArray); 2775 // live at this point: r10_src_klass, r11_length, src[_pos], dst[_pos] 2776 2777 Label L_plain_copy, L_checkcast_copy; 2778 // test array classes for subtyping 2779 __ load_klass(rax, dst); 2780 __ cmpq(r10_src_klass, rax); // usual case is exact equality 2781 __ jcc(Assembler::notEqual, L_checkcast_copy); 2782 2783 // Identically typed arrays can be copied without element-wise checks. 2784 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2785 r10, L_failed); 2786 2787 __ lea(from, Address(src, src_pos, TIMES_OOP, 2788 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // src_addr 2789 __ lea(to, Address(dst, dst_pos, TIMES_OOP, 2790 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // dst_addr 2791 __ movl2ptr(count, r11_length); // length 2792 __ BIND(L_plain_copy); 2793 __ jump(RuntimeAddress(oop_copy_entry)); 2794 2795 __ BIND(L_checkcast_copy); 2796 // live at this point: r10_src_klass, r11_length, rax (dst_klass) 2797 { 2798 // Before looking at dst.length, make sure dst is also an objArray. 2799 __ cmpl(Address(rax, lh_offset), objArray_lh); 2800 __ jcc(Assembler::notEqual, L_failed); 2801 2802 // It is safe to examine both src.length and dst.length. 2803 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2804 rax, L_failed); 2805 2806 const Register r11_dst_klass = r11; 2807 __ load_klass(r11_dst_klass, dst); // reload 2808 2809 // Marshal the base address arguments now, freeing registers. 2810 __ lea(from, Address(src, src_pos, TIMES_OOP, 2811 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 2812 __ lea(to, Address(dst, dst_pos, TIMES_OOP, 2813 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 2814 __ movl(count, length); // length (reloaded) 2815 Register sco_temp = c_rarg3; // this register is free now 2816 assert_different_registers(from, to, count, sco_temp, 2817 r11_dst_klass, r10_src_klass); 2818 assert_clean_int(count, sco_temp); 2819 2820 // Generate the type check. 2821 const int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2822 __ movl(sco_temp, Address(r11_dst_klass, sco_offset)); 2823 assert_clean_int(sco_temp, rax); 2824 generate_type_check(r10_src_klass, sco_temp, r11_dst_klass, L_plain_copy); 2825 2826 // Fetch destination element klass from the ObjArrayKlass header. 2827 int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset()); 2828 __ movptr(r11_dst_klass, Address(r11_dst_klass, ek_offset)); 2829 __ movl( sco_temp, Address(r11_dst_klass, sco_offset)); 2830 assert_clean_int(sco_temp, rax); 2831 2832 // the checkcast_copy loop needs two extra arguments: 2833 assert(c_rarg3 == sco_temp, "#3 already in place"); 2834 // Set up arguments for checkcast_copy_entry. 2835 setup_arg_regs(4); 2836 __ movptr(r8, r11_dst_klass); // dst.klass.element_klass, r8 is c_rarg4 on Linux/Solaris 2837 __ jump(RuntimeAddress(checkcast_copy_entry)); 2838 } 2839 2840 __ BIND(L_failed); 2841 __ xorptr(rax, rax); 2842 __ notptr(rax); // return -1 2843 __ leave(); // required for proper stackwalking of RuntimeStub frame 2844 __ ret(0); 2845 2846 return start; 2847 } 2848 2849 void generate_arraycopy_stubs() { 2850 address entry; 2851 address entry_jbyte_arraycopy; 2852 address entry_jshort_arraycopy; 2853 address entry_jint_arraycopy; 2854 address entry_oop_arraycopy; 2855 address entry_jlong_arraycopy; 2856 address entry_checkcast_arraycopy; 2857 2858 StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, &entry, 2859 "jbyte_disjoint_arraycopy"); 2860 StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, entry, &entry_jbyte_arraycopy, 2861 "jbyte_arraycopy"); 2862 2863 StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, &entry, 2864 "jshort_disjoint_arraycopy"); 2865 StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, entry, &entry_jshort_arraycopy, 2866 "jshort_arraycopy"); 2867 2868 StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, false, &entry, 2869 "jint_disjoint_arraycopy"); 2870 StubRoutines::_jint_arraycopy = generate_conjoint_int_oop_copy(false, false, entry, 2871 &entry_jint_arraycopy, "jint_arraycopy"); 2872 2873 StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, false, &entry, 2874 "jlong_disjoint_arraycopy"); 2875 StubRoutines::_jlong_arraycopy = generate_conjoint_long_oop_copy(false, false, entry, 2876 &entry_jlong_arraycopy, "jlong_arraycopy"); 2877 2878 2879 if (UseCompressedOops) { 2880 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, true, &entry, 2881 "oop_disjoint_arraycopy"); 2882 StubRoutines::_oop_arraycopy = generate_conjoint_int_oop_copy(false, true, entry, 2883 &entry_oop_arraycopy, "oop_arraycopy"); 2884 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_int_oop_copy(false, true, &entry, 2885 "oop_disjoint_arraycopy_uninit", 2886 /*dest_uninitialized*/true); 2887 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_int_oop_copy(false, true, entry, 2888 NULL, "oop_arraycopy_uninit", 2889 /*dest_uninitialized*/true); 2890 } else { 2891 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, true, &entry, 2892 "oop_disjoint_arraycopy"); 2893 StubRoutines::_oop_arraycopy = generate_conjoint_long_oop_copy(false, true, entry, 2894 &entry_oop_arraycopy, "oop_arraycopy"); 2895 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_long_oop_copy(false, true, &entry, 2896 "oop_disjoint_arraycopy_uninit", 2897 /*dest_uninitialized*/true); 2898 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_long_oop_copy(false, true, entry, 2899 NULL, "oop_arraycopy_uninit", 2900 /*dest_uninitialized*/true); 2901 } 2902 2903 StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy); 2904 StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", NULL, 2905 /*dest_uninitialized*/true); 2906 2907 StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy", 2908 entry_jbyte_arraycopy, 2909 entry_jshort_arraycopy, 2910 entry_jint_arraycopy, 2911 entry_jlong_arraycopy); 2912 StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy", 2913 entry_jbyte_arraycopy, 2914 entry_jshort_arraycopy, 2915 entry_jint_arraycopy, 2916 entry_oop_arraycopy, 2917 entry_jlong_arraycopy, 2918 entry_checkcast_arraycopy); 2919 2920 StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill"); 2921 StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill"); 2922 StubRoutines::_jint_fill = generate_fill(T_INT, false, "jint_fill"); 2923 StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill"); 2924 StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill"); 2925 StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill"); 2926 2927 // We don't generate specialized code for HeapWord-aligned source 2928 // arrays, so just use the code we've already generated 2929 StubRoutines::_arrayof_jbyte_disjoint_arraycopy = StubRoutines::_jbyte_disjoint_arraycopy; 2930 StubRoutines::_arrayof_jbyte_arraycopy = StubRoutines::_jbyte_arraycopy; 2931 2932 StubRoutines::_arrayof_jshort_disjoint_arraycopy = StubRoutines::_jshort_disjoint_arraycopy; 2933 StubRoutines::_arrayof_jshort_arraycopy = StubRoutines::_jshort_arraycopy; 2934 2935 StubRoutines::_arrayof_jint_disjoint_arraycopy = StubRoutines::_jint_disjoint_arraycopy; 2936 StubRoutines::_arrayof_jint_arraycopy = StubRoutines::_jint_arraycopy; 2937 2938 StubRoutines::_arrayof_jlong_disjoint_arraycopy = StubRoutines::_jlong_disjoint_arraycopy; 2939 StubRoutines::_arrayof_jlong_arraycopy = StubRoutines::_jlong_arraycopy; 2940 2941 StubRoutines::_arrayof_oop_disjoint_arraycopy = StubRoutines::_oop_disjoint_arraycopy; 2942 StubRoutines::_arrayof_oop_arraycopy = StubRoutines::_oop_arraycopy; 2943 2944 StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = StubRoutines::_oop_disjoint_arraycopy_uninit; 2945 StubRoutines::_arrayof_oop_arraycopy_uninit = StubRoutines::_oop_arraycopy_uninit; 2946 } 2947 2948 // AES intrinsic stubs 2949 enum {AESBlockSize = 16}; 2950 2951 address generate_key_shuffle_mask() { 2952 __ align(16); 2953 StubCodeMark mark(this, "StubRoutines", "key_shuffle_mask"); 2954 address start = __ pc(); 2955 __ emit_data64( 0x0405060700010203, relocInfo::none ); 2956 __ emit_data64( 0x0c0d0e0f08090a0b, relocInfo::none ); 2957 return start; 2958 } 2959 2960 address generate_counter_shuffle_mask() { 2961 __ align(16); 2962 StubCodeMark mark(this, "StubRoutines", "counter_shuffle_mask"); 2963 address start = __ pc(); 2964 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none); 2965 __ emit_data64(0x0001020304050607, relocInfo::none); 2966 return start; 2967 } 2968 2969 // Utility routine for loading a 128-bit key word in little endian format 2970 // can optionally specify that the shuffle mask is already in an xmmregister 2971 void load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask=NULL) { 2972 __ movdqu(xmmdst, Address(key, offset)); 2973 if (xmm_shuf_mask != NULL) { 2974 __ pshufb(xmmdst, xmm_shuf_mask); 2975 } else { 2976 __ pshufb(xmmdst, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 2977 } 2978 } 2979 2980 // Utility routine for increase 128bit counter (iv in CTR mode) 2981 void inc_counter(Register reg, XMMRegister xmmdst, int inc_delta, Label& next_block) { 2982 __ pextrq(reg, xmmdst, 0x0); 2983 __ addq(reg, inc_delta); 2984 __ pinsrq(xmmdst, reg, 0x0); 2985 __ jcc(Assembler::carryClear, next_block); // jump if no carry 2986 __ pextrq(reg, xmmdst, 0x01); // Carry 2987 __ addq(reg, 0x01); 2988 __ pinsrq(xmmdst, reg, 0x01); //Carry end 2989 __ BIND(next_block); // next instruction 2990 } 2991 2992 // Arguments: 2993 // 2994 // Inputs: 2995 // c_rarg0 - source byte array address 2996 // c_rarg1 - destination byte array address 2997 // c_rarg2 - K (key) in little endian int array 2998 // 2999 address generate_aescrypt_encryptBlock() { 3000 assert(UseAES, "need AES instructions and misaligned SSE support"); 3001 __ align(CodeEntryAlignment); 3002 StubCodeMark mark(this, "StubRoutines", "aescrypt_encryptBlock"); 3003 Label L_doLast; 3004 address start = __ pc(); 3005 3006 const Register from = c_rarg0; // source array address 3007 const Register to = c_rarg1; // destination array address 3008 const Register key = c_rarg2; // key array address 3009 const Register keylen = rax; 3010 3011 const XMMRegister xmm_result = xmm0; 3012 const XMMRegister xmm_key_shuf_mask = xmm1; 3013 // On win64 xmm6-xmm15 must be preserved so don't use them. 3014 const XMMRegister xmm_temp1 = xmm2; 3015 const XMMRegister xmm_temp2 = xmm3; 3016 const XMMRegister xmm_temp3 = xmm4; 3017 const XMMRegister xmm_temp4 = xmm5; 3018 3019 __ enter(); // required for proper stackwalking of RuntimeStub frame 3020 3021 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 3022 // context for the registers used, where all instructions below are using 128-bit mode 3023 // On EVEX without VL and BW, these instructions will all be AVX. 3024 if (VM_Version::supports_avx512vlbw()) { 3025 __ movl(rax, 0xffff); 3026 __ kmovql(k1, rax); 3027 } 3028 3029 // keylen could be only {11, 13, 15} * 4 = {44, 52, 60} 3030 __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3031 3032 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3033 __ movdqu(xmm_result, Address(from, 0)); // get 16 bytes of input 3034 3035 // For encryption, the java expanded key ordering is just what we need 3036 // we don't know if the key is aligned, hence not using load-execute form 3037 3038 load_key(xmm_temp1, key, 0x00, xmm_key_shuf_mask); 3039 __ pxor(xmm_result, xmm_temp1); 3040 3041 load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask); 3042 load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask); 3043 load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask); 3044 load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask); 3045 3046 __ aesenc(xmm_result, xmm_temp1); 3047 __ aesenc(xmm_result, xmm_temp2); 3048 __ aesenc(xmm_result, xmm_temp3); 3049 __ aesenc(xmm_result, xmm_temp4); 3050 3051 load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask); 3052 load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask); 3053 load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask); 3054 load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask); 3055 3056 __ aesenc(xmm_result, xmm_temp1); 3057 __ aesenc(xmm_result, xmm_temp2); 3058 __ aesenc(xmm_result, xmm_temp3); 3059 __ aesenc(xmm_result, xmm_temp4); 3060 3061 load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask); 3062 load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask); 3063 3064 __ cmpl(keylen, 44); 3065 __ jccb(Assembler::equal, L_doLast); 3066 3067 __ aesenc(xmm_result, xmm_temp1); 3068 __ aesenc(xmm_result, xmm_temp2); 3069 3070 load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask); 3071 load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask); 3072 3073 __ cmpl(keylen, 52); 3074 __ jccb(Assembler::equal, L_doLast); 3075 3076 __ aesenc(xmm_result, xmm_temp1); 3077 __ aesenc(xmm_result, xmm_temp2); 3078 3079 load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask); 3080 load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask); 3081 3082 __ BIND(L_doLast); 3083 __ aesenc(xmm_result, xmm_temp1); 3084 __ aesenclast(xmm_result, xmm_temp2); 3085 __ movdqu(Address(to, 0), xmm_result); // store the result 3086 __ xorptr(rax, rax); // return 0 3087 __ leave(); // required for proper stackwalking of RuntimeStub frame 3088 __ ret(0); 3089 3090 return start; 3091 } 3092 3093 3094 // Arguments: 3095 // 3096 // Inputs: 3097 // c_rarg0 - source byte array address 3098 // c_rarg1 - destination byte array address 3099 // c_rarg2 - K (key) in little endian int array 3100 // 3101 address generate_aescrypt_decryptBlock() { 3102 assert(UseAES, "need AES instructions and misaligned SSE support"); 3103 __ align(CodeEntryAlignment); 3104 StubCodeMark mark(this, "StubRoutines", "aescrypt_decryptBlock"); 3105 Label L_doLast; 3106 address start = __ pc(); 3107 3108 const Register from = c_rarg0; // source array address 3109 const Register to = c_rarg1; // destination array address 3110 const Register key = c_rarg2; // key array address 3111 const Register keylen = rax; 3112 3113 const XMMRegister xmm_result = xmm0; 3114 const XMMRegister xmm_key_shuf_mask = xmm1; 3115 // On win64 xmm6-xmm15 must be preserved so don't use them. 3116 const XMMRegister xmm_temp1 = xmm2; 3117 const XMMRegister xmm_temp2 = xmm3; 3118 const XMMRegister xmm_temp3 = xmm4; 3119 const XMMRegister xmm_temp4 = xmm5; 3120 3121 __ enter(); // required for proper stackwalking of RuntimeStub frame 3122 3123 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 3124 // context for the registers used, where all instructions below are using 128-bit mode 3125 // On EVEX without VL and BW, these instructions will all be AVX. 3126 if (VM_Version::supports_avx512vlbw()) { 3127 __ movl(rax, 0xffff); 3128 __ kmovql(k1, rax); 3129 } 3130 3131 // keylen could be only {11, 13, 15} * 4 = {44, 52, 60} 3132 __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3133 3134 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3135 __ movdqu(xmm_result, Address(from, 0)); 3136 3137 // for decryption java expanded key ordering is rotated one position from what we want 3138 // so we start from 0x10 here and hit 0x00 last 3139 // we don't know if the key is aligned, hence not using load-execute form 3140 load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask); 3141 load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask); 3142 load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask); 3143 load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask); 3144 3145 __ pxor (xmm_result, xmm_temp1); 3146 __ aesdec(xmm_result, xmm_temp2); 3147 __ aesdec(xmm_result, xmm_temp3); 3148 __ aesdec(xmm_result, xmm_temp4); 3149 3150 load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask); 3151 load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask); 3152 load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask); 3153 load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask); 3154 3155 __ aesdec(xmm_result, xmm_temp1); 3156 __ aesdec(xmm_result, xmm_temp2); 3157 __ aesdec(xmm_result, xmm_temp3); 3158 __ aesdec(xmm_result, xmm_temp4); 3159 3160 load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask); 3161 load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask); 3162 load_key(xmm_temp3, key, 0x00, xmm_key_shuf_mask); 3163 3164 __ cmpl(keylen, 44); 3165 __ jccb(Assembler::equal, L_doLast); 3166 3167 __ aesdec(xmm_result, xmm_temp1); 3168 __ aesdec(xmm_result, xmm_temp2); 3169 3170 load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask); 3171 load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask); 3172 3173 __ cmpl(keylen, 52); 3174 __ jccb(Assembler::equal, L_doLast); 3175 3176 __ aesdec(xmm_result, xmm_temp1); 3177 __ aesdec(xmm_result, xmm_temp2); 3178 3179 load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask); 3180 load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask); 3181 3182 __ BIND(L_doLast); 3183 __ aesdec(xmm_result, xmm_temp1); 3184 __ aesdec(xmm_result, xmm_temp2); 3185 3186 // for decryption the aesdeclast operation is always on key+0x00 3187 __ aesdeclast(xmm_result, xmm_temp3); 3188 __ movdqu(Address(to, 0), xmm_result); // store the result 3189 __ xorptr(rax, rax); // return 0 3190 __ leave(); // required for proper stackwalking of RuntimeStub frame 3191 __ ret(0); 3192 3193 return start; 3194 } 3195 3196 3197 // Arguments: 3198 // 3199 // Inputs: 3200 // c_rarg0 - source byte array address 3201 // c_rarg1 - destination byte array address 3202 // c_rarg2 - K (key) in little endian int array 3203 // c_rarg3 - r vector byte array address 3204 // c_rarg4 - input length 3205 // 3206 // Output: 3207 // rax - input length 3208 // 3209 address generate_cipherBlockChaining_encryptAESCrypt() { 3210 assert(UseAES, "need AES instructions and misaligned SSE support"); 3211 __ align(CodeEntryAlignment); 3212 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_encryptAESCrypt"); 3213 address start = __ pc(); 3214 3215 Label L_exit, L_key_192_256, L_key_256, L_loopTop_128, L_loopTop_192, L_loopTop_256; 3216 const Register from = c_rarg0; // source array address 3217 const Register to = c_rarg1; // destination array address 3218 const Register key = c_rarg2; // key array address 3219 const Register rvec = c_rarg3; // r byte array initialized from initvector array address 3220 // and left with the results of the last encryption block 3221 #ifndef _WIN64 3222 const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16) 3223 #else 3224 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 3225 const Register len_reg = r11; // pick the volatile windows register 3226 #endif 3227 const Register pos = rax; 3228 3229 // xmm register assignments for the loops below 3230 const XMMRegister xmm_result = xmm0; 3231 const XMMRegister xmm_temp = xmm1; 3232 // keys 0-10 preloaded into xmm2-xmm12 3233 const int XMM_REG_NUM_KEY_FIRST = 2; 3234 const int XMM_REG_NUM_KEY_LAST = 15; 3235 const XMMRegister xmm_key0 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST); 3236 const XMMRegister xmm_key10 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+10); 3237 const XMMRegister xmm_key11 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+11); 3238 const XMMRegister xmm_key12 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+12); 3239 const XMMRegister xmm_key13 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+13); 3240 3241 __ enter(); // required for proper stackwalking of RuntimeStub frame 3242 3243 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 3244 // context for the registers used, where all instructions below are using 128-bit mode 3245 // On EVEX without VL and BW, these instructions will all be AVX. 3246 if (VM_Version::supports_avx512vlbw()) { 3247 __ movl(rax, 0xffff); 3248 __ kmovql(k1, rax); 3249 } 3250 3251 #ifdef _WIN64 3252 // on win64, fill len_reg from stack position 3253 __ movl(len_reg, len_mem); 3254 #else 3255 __ push(len_reg); // Save 3256 #endif 3257 3258 const XMMRegister xmm_key_shuf_mask = xmm_temp; // used temporarily to swap key bytes up front 3259 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3260 // load up xmm regs xmm2 thru xmm12 with key 0x00 - 0xa0 3261 for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x00; rnum <= XMM_REG_NUM_KEY_FIRST+10; rnum++) { 3262 load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask); 3263 offset += 0x10; 3264 } 3265 __ movdqu(xmm_result, Address(rvec, 0x00)); // initialize xmm_result with r vec 3266 3267 // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256)) 3268 __ movl(rax, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3269 __ cmpl(rax, 44); 3270 __ jcc(Assembler::notEqual, L_key_192_256); 3271 3272 // 128 bit code follows here 3273 __ movptr(pos, 0); 3274 __ align(OptoLoopAlignment); 3275 3276 __ BIND(L_loopTop_128); 3277 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3278 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3279 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3280 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 9; rnum++) { 3281 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3282 } 3283 __ aesenclast(xmm_result, xmm_key10); 3284 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3285 // no need to store r to memory until we exit 3286 __ addptr(pos, AESBlockSize); 3287 __ subptr(len_reg, AESBlockSize); 3288 __ jcc(Assembler::notEqual, L_loopTop_128); 3289 3290 __ BIND(L_exit); 3291 __ movdqu(Address(rvec, 0), xmm_result); // final value of r stored in rvec of CipherBlockChaining object 3292 3293 #ifdef _WIN64 3294 __ movl(rax, len_mem); 3295 #else 3296 __ pop(rax); // return length 3297 #endif 3298 __ leave(); // required for proper stackwalking of RuntimeStub frame 3299 __ ret(0); 3300 3301 __ BIND(L_key_192_256); 3302 // here rax = len in ints of AESCrypt.KLE array (52=192, or 60=256) 3303 load_key(xmm_key11, key, 0xb0, xmm_key_shuf_mask); 3304 load_key(xmm_key12, key, 0xc0, xmm_key_shuf_mask); 3305 __ cmpl(rax, 52); 3306 __ jcc(Assembler::notEqual, L_key_256); 3307 3308 // 192-bit code follows here (could be changed to use more xmm registers) 3309 __ movptr(pos, 0); 3310 __ align(OptoLoopAlignment); 3311 3312 __ BIND(L_loopTop_192); 3313 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3314 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3315 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3316 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 11; rnum++) { 3317 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3318 } 3319 __ aesenclast(xmm_result, xmm_key12); 3320 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3321 // no need to store r to memory until we exit 3322 __ addptr(pos, AESBlockSize); 3323 __ subptr(len_reg, AESBlockSize); 3324 __ jcc(Assembler::notEqual, L_loopTop_192); 3325 __ jmp(L_exit); 3326 3327 __ BIND(L_key_256); 3328 // 256-bit code follows here (could be changed to use more xmm registers) 3329 load_key(xmm_key13, key, 0xd0, xmm_key_shuf_mask); 3330 __ movptr(pos, 0); 3331 __ align(OptoLoopAlignment); 3332 3333 __ BIND(L_loopTop_256); 3334 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3335 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3336 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3337 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 13; rnum++) { 3338 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3339 } 3340 load_key(xmm_temp, key, 0xe0); 3341 __ aesenclast(xmm_result, xmm_temp); 3342 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3343 // no need to store r to memory until we exit 3344 __ addptr(pos, AESBlockSize); 3345 __ subptr(len_reg, AESBlockSize); 3346 __ jcc(Assembler::notEqual, L_loopTop_256); 3347 __ jmp(L_exit); 3348 3349 return start; 3350 } 3351 3352 // Safefetch stubs. 3353 void generate_safefetch(const char* name, int size, address* entry, 3354 address* fault_pc, address* continuation_pc) { 3355 // safefetch signatures: 3356 // int SafeFetch32(int* adr, int errValue); 3357 // intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue); 3358 // 3359 // arguments: 3360 // c_rarg0 = adr 3361 // c_rarg1 = errValue 3362 // 3363 // result: 3364 // PPC_RET = *adr or errValue 3365 3366 StubCodeMark mark(this, "StubRoutines", name); 3367 3368 // Entry point, pc or function descriptor. 3369 *entry = __ pc(); 3370 3371 // Load *adr into c_rarg1, may fault. 3372 *fault_pc = __ pc(); 3373 switch (size) { 3374 case 4: 3375 // int32_t 3376 __ movl(c_rarg1, Address(c_rarg0, 0)); 3377 break; 3378 case 8: 3379 // int64_t 3380 __ movq(c_rarg1, Address(c_rarg0, 0)); 3381 break; 3382 default: 3383 ShouldNotReachHere(); 3384 } 3385 3386 // return errValue or *adr 3387 *continuation_pc = __ pc(); 3388 __ movq(rax, c_rarg1); 3389 __ ret(0); 3390 } 3391 3392 // This is a version of CBC/AES Decrypt which does 4 blocks in a loop at a time 3393 // to hide instruction latency 3394 // 3395 // Arguments: 3396 // 3397 // Inputs: 3398 // c_rarg0 - source byte array address 3399 // c_rarg1 - destination byte array address 3400 // c_rarg2 - K (key) in little endian int array 3401 // c_rarg3 - r vector byte array address 3402 // c_rarg4 - input length 3403 // 3404 // Output: 3405 // rax - input length 3406 // 3407 address generate_cipherBlockChaining_decryptAESCrypt_Parallel() { 3408 assert(UseAES, "need AES instructions and misaligned SSE support"); 3409 __ align(CodeEntryAlignment); 3410 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt"); 3411 address start = __ pc(); 3412 3413 const Register from = c_rarg0; // source array address 3414 const Register to = c_rarg1; // destination array address 3415 const Register key = c_rarg2; // key array address 3416 const Register rvec = c_rarg3; // r byte array initialized from initvector array address 3417 // and left with the results of the last encryption block 3418 #ifndef _WIN64 3419 const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16) 3420 #else 3421 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 3422 const Register len_reg = r11; // pick the volatile windows register 3423 #endif 3424 const Register pos = rax; 3425 3426 const int PARALLEL_FACTOR = 4; 3427 const int ROUNDS[3] = { 10, 12, 14 }; // aes rounds for key128, key192, key256 3428 3429 Label L_exit; 3430 Label L_singleBlock_loopTopHead[3]; // 128, 192, 256 3431 Label L_singleBlock_loopTopHead2[3]; // 128, 192, 256 3432 Label L_singleBlock_loopTop[3]; // 128, 192, 256 3433 Label L_multiBlock_loopTopHead[3]; // 128, 192, 256 3434 Label L_multiBlock_loopTop[3]; // 128, 192, 256 3435 3436 // keys 0-10 preloaded into xmm5-xmm15 3437 const int XMM_REG_NUM_KEY_FIRST = 5; 3438 const int XMM_REG_NUM_KEY_LAST = 15; 3439 const XMMRegister xmm_key_first = as_XMMRegister(XMM_REG_NUM_KEY_FIRST); 3440 const XMMRegister xmm_key_last = as_XMMRegister(XMM_REG_NUM_KEY_LAST); 3441 3442 __ enter(); // required for proper stackwalking of RuntimeStub frame 3443 3444 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 3445 // context for the registers used, where all instructions below are using 128-bit mode 3446 // On EVEX without VL and BW, these instructions will all be AVX. 3447 if (VM_Version::supports_avx512vlbw()) { 3448 __ movl(rax, 0xffff); 3449 __ kmovql(k1, rax); 3450 } 3451 3452 #ifdef _WIN64 3453 // on win64, fill len_reg from stack position 3454 __ movl(len_reg, len_mem); 3455 #else 3456 __ push(len_reg); // Save 3457 #endif 3458 __ push(rbx); 3459 // the java expanded key ordering is rotated one position from what we want 3460 // so we start from 0x10 here and hit 0x00 last 3461 const XMMRegister xmm_key_shuf_mask = xmm1; // used temporarily to swap key bytes up front 3462 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3463 // load up xmm regs 5 thru 15 with key 0x10 - 0xa0 - 0x00 3464 for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x10; rnum < XMM_REG_NUM_KEY_LAST; rnum++) { 3465 load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask); 3466 offset += 0x10; 3467 } 3468 load_key(xmm_key_last, key, 0x00, xmm_key_shuf_mask); 3469 3470 const XMMRegister xmm_prev_block_cipher = xmm1; // holds cipher of previous block 3471 3472 // registers holding the four results in the parallelized loop 3473 const XMMRegister xmm_result0 = xmm0; 3474 const XMMRegister xmm_result1 = xmm2; 3475 const XMMRegister xmm_result2 = xmm3; 3476 const XMMRegister xmm_result3 = xmm4; 3477 3478 __ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // initialize with initial rvec 3479 3480 __ xorptr(pos, pos); 3481 3482 // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256)) 3483 __ movl(rbx, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3484 __ cmpl(rbx, 52); 3485 __ jcc(Assembler::equal, L_multiBlock_loopTopHead[1]); 3486 __ cmpl(rbx, 60); 3487 __ jcc(Assembler::equal, L_multiBlock_loopTopHead[2]); 3488 3489 #define DoFour(opc, src_reg) \ 3490 __ opc(xmm_result0, src_reg); \ 3491 __ opc(xmm_result1, src_reg); \ 3492 __ opc(xmm_result2, src_reg); \ 3493 __ opc(xmm_result3, src_reg); \ 3494 3495 for (int k = 0; k < 3; ++k) { 3496 __ BIND(L_multiBlock_loopTopHead[k]); 3497 if (k != 0) { 3498 __ cmpptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // see if at least 4 blocks left 3499 __ jcc(Assembler::less, L_singleBlock_loopTopHead2[k]); 3500 } 3501 if (k == 1) { 3502 __ subptr(rsp, 6 * wordSize); 3503 __ movdqu(Address(rsp, 0), xmm15); //save last_key from xmm15 3504 load_key(xmm15, key, 0xb0); // 0xb0; 192-bit key goes up to 0xc0 3505 __ movdqu(Address(rsp, 2 * wordSize), xmm15); 3506 load_key(xmm1, key, 0xc0); // 0xc0; 3507 __ movdqu(Address(rsp, 4 * wordSize), xmm1); 3508 } else if (k == 2) { 3509 __ subptr(rsp, 10 * wordSize); 3510 __ movdqu(Address(rsp, 0), xmm15); //save last_key from xmm15 3511 load_key(xmm15, key, 0xd0); // 0xd0; 256-bit key goes upto 0xe0 3512 __ movdqu(Address(rsp, 6 * wordSize), xmm15); 3513 load_key(xmm1, key, 0xe0); // 0xe0; 3514 __ movdqu(Address(rsp, 8 * wordSize), xmm1); 3515 load_key(xmm15, key, 0xb0); // 0xb0; 3516 __ movdqu(Address(rsp, 2 * wordSize), xmm15); 3517 load_key(xmm1, key, 0xc0); // 0xc0; 3518 __ movdqu(Address(rsp, 4 * wordSize), xmm1); 3519 } 3520 __ align(OptoLoopAlignment); 3521 __ BIND(L_multiBlock_loopTop[k]); 3522 __ cmpptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // see if at least 4 blocks left 3523 __ jcc(Assembler::less, L_singleBlock_loopTopHead[k]); 3524 3525 if (k != 0) { 3526 __ movdqu(xmm15, Address(rsp, 2 * wordSize)); 3527 __ movdqu(xmm1, Address(rsp, 4 * wordSize)); 3528 } 3529 3530 __ movdqu(xmm_result0, Address(from, pos, Address::times_1, 0 * AESBlockSize)); // get next 4 blocks into xmmresult registers 3531 __ movdqu(xmm_result1, Address(from, pos, Address::times_1, 1 * AESBlockSize)); 3532 __ movdqu(xmm_result2, Address(from, pos, Address::times_1, 2 * AESBlockSize)); 3533 __ movdqu(xmm_result3, Address(from, pos, Address::times_1, 3 * AESBlockSize)); 3534 3535 DoFour(pxor, xmm_key_first); 3536 if (k == 0) { 3537 for (int rnum = 1; rnum < ROUNDS[k]; rnum++) { 3538 DoFour(aesdec, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3539 } 3540 DoFour(aesdeclast, xmm_key_last); 3541 } else if (k == 1) { 3542 for (int rnum = 1; rnum <= ROUNDS[k]-2; rnum++) { 3543 DoFour(aesdec, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3544 } 3545 __ movdqu(xmm_key_last, Address(rsp, 0)); // xmm15 needs to be loaded again. 3546 DoFour(aesdec, xmm1); // key : 0xc0 3547 __ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // xmm1 needs to be loaded again 3548 DoFour(aesdeclast, xmm_key_last); 3549 } else if (k == 2) { 3550 for (int rnum = 1; rnum <= ROUNDS[k] - 4; rnum++) { 3551 DoFour(aesdec, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3552 } 3553 DoFour(aesdec, xmm1); // key : 0xc0 3554 __ movdqu(xmm15, Address(rsp, 6 * wordSize)); 3555 __ movdqu(xmm1, Address(rsp, 8 * wordSize)); 3556 DoFour(aesdec, xmm15); // key : 0xd0 3557 __ movdqu(xmm_key_last, Address(rsp, 0)); // xmm15 needs to be loaded again. 3558 DoFour(aesdec, xmm1); // key : 0xe0 3559 __ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // xmm1 needs to be loaded again 3560 DoFour(aesdeclast, xmm_key_last); 3561 } 3562 3563 // for each result, xor with the r vector of previous cipher block 3564 __ pxor(xmm_result0, xmm_prev_block_cipher); 3565 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 0 * AESBlockSize)); 3566 __ pxor(xmm_result1, xmm_prev_block_cipher); 3567 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 1 * AESBlockSize)); 3568 __ pxor(xmm_result2, xmm_prev_block_cipher); 3569 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 2 * AESBlockSize)); 3570 __ pxor(xmm_result3, xmm_prev_block_cipher); 3571 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 3 * AESBlockSize)); // this will carry over to next set of blocks 3572 if (k != 0) { 3573 __ movdqu(Address(rvec, 0x00), xmm_prev_block_cipher); 3574 } 3575 3576 __ movdqu(Address(to, pos, Address::times_1, 0 * AESBlockSize), xmm_result0); // store 4 results into the next 64 bytes of output 3577 __ movdqu(Address(to, pos, Address::times_1, 1 * AESBlockSize), xmm_result1); 3578 __ movdqu(Address(to, pos, Address::times_1, 2 * AESBlockSize), xmm_result2); 3579 __ movdqu(Address(to, pos, Address::times_1, 3 * AESBlockSize), xmm_result3); 3580 3581 __ addptr(pos, PARALLEL_FACTOR * AESBlockSize); 3582 __ subptr(len_reg, PARALLEL_FACTOR * AESBlockSize); 3583 __ jmp(L_multiBlock_loopTop[k]); 3584 3585 // registers used in the non-parallelized loops 3586 // xmm register assignments for the loops below 3587 const XMMRegister xmm_result = xmm0; 3588 const XMMRegister xmm_prev_block_cipher_save = xmm2; 3589 const XMMRegister xmm_key11 = xmm3; 3590 const XMMRegister xmm_key12 = xmm4; 3591 const XMMRegister key_tmp = xmm4; 3592 3593 __ BIND(L_singleBlock_loopTopHead[k]); 3594 if (k == 1) { 3595 __ addptr(rsp, 6 * wordSize); 3596 } else if (k == 2) { 3597 __ addptr(rsp, 10 * wordSize); 3598 } 3599 __ cmpptr(len_reg, 0); // any blocks left?? 3600 __ jcc(Assembler::equal, L_exit); 3601 __ BIND(L_singleBlock_loopTopHead2[k]); 3602 if (k == 1) { 3603 load_key(xmm_key11, key, 0xb0); // 0xb0; 192-bit key goes upto 0xc0 3604 load_key(xmm_key12, key, 0xc0); // 0xc0; 192-bit key goes upto 0xc0 3605 } 3606 if (k == 2) { 3607 load_key(xmm_key11, key, 0xb0); // 0xb0; 256-bit key goes upto 0xe0 3608 } 3609 __ align(OptoLoopAlignment); 3610 __ BIND(L_singleBlock_loopTop[k]); 3611 __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of cipher input 3612 __ movdqa(xmm_prev_block_cipher_save, xmm_result); // save for next r vector 3613 __ pxor(xmm_result, xmm_key_first); // do the aes dec rounds 3614 for (int rnum = 1; rnum <= 9 ; rnum++) { 3615 __ aesdec(xmm_result, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3616 } 3617 if (k == 1) { 3618 __ aesdec(xmm_result, xmm_key11); 3619 __ aesdec(xmm_result, xmm_key12); 3620 } 3621 if (k == 2) { 3622 __ aesdec(xmm_result, xmm_key11); 3623 load_key(key_tmp, key, 0xc0); 3624 __ aesdec(xmm_result, key_tmp); 3625 load_key(key_tmp, key, 0xd0); 3626 __ aesdec(xmm_result, key_tmp); 3627 load_key(key_tmp, key, 0xe0); 3628 __ aesdec(xmm_result, key_tmp); 3629 } 3630 3631 __ aesdeclast(xmm_result, xmm_key_last); // xmm15 always came from key+0 3632 __ pxor(xmm_result, xmm_prev_block_cipher); // xor with the current r vector 3633 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3634 // no need to store r to memory until we exit 3635 __ movdqa(xmm_prev_block_cipher, xmm_prev_block_cipher_save); // set up next r vector with cipher input from this block 3636 __ addptr(pos, AESBlockSize); 3637 __ subptr(len_reg, AESBlockSize); 3638 __ jcc(Assembler::notEqual, L_singleBlock_loopTop[k]); 3639 if (k != 2) { 3640 __ jmp(L_exit); 3641 } 3642 } //for 128/192/256 3643 3644 __ BIND(L_exit); 3645 __ movdqu(Address(rvec, 0), xmm_prev_block_cipher); // final value of r stored in rvec of CipherBlockChaining object 3646 __ pop(rbx); 3647 #ifdef _WIN64 3648 __ movl(rax, len_mem); 3649 #else 3650 __ pop(rax); // return length 3651 #endif 3652 __ leave(); // required for proper stackwalking of RuntimeStub frame 3653 __ ret(0); 3654 return start; 3655 } 3656 3657 address generate_upper_word_mask() { 3658 __ align(64); 3659 StubCodeMark mark(this, "StubRoutines", "upper_word_mask"); 3660 address start = __ pc(); 3661 __ emit_data64(0x0000000000000000, relocInfo::none); 3662 __ emit_data64(0xFFFFFFFF00000000, relocInfo::none); 3663 return start; 3664 } 3665 3666 address generate_shuffle_byte_flip_mask() { 3667 __ align(64); 3668 StubCodeMark mark(this, "StubRoutines", "shuffle_byte_flip_mask"); 3669 address start = __ pc(); 3670 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none); 3671 __ emit_data64(0x0001020304050607, relocInfo::none); 3672 return start; 3673 } 3674 3675 // ofs and limit are use for multi-block byte array. 3676 // int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit) 3677 address generate_sha1_implCompress(bool multi_block, const char *name) { 3678 __ align(CodeEntryAlignment); 3679 StubCodeMark mark(this, "StubRoutines", name); 3680 address start = __ pc(); 3681 3682 Register buf = c_rarg0; 3683 Register state = c_rarg1; 3684 Register ofs = c_rarg2; 3685 Register limit = c_rarg3; 3686 3687 const XMMRegister abcd = xmm0; 3688 const XMMRegister e0 = xmm1; 3689 const XMMRegister e1 = xmm2; 3690 const XMMRegister msg0 = xmm3; 3691 3692 const XMMRegister msg1 = xmm4; 3693 const XMMRegister msg2 = xmm5; 3694 const XMMRegister msg3 = xmm6; 3695 const XMMRegister shuf_mask = xmm7; 3696 3697 __ enter(); 3698 3699 __ subptr(rsp, 4 * wordSize); 3700 3701 __ fast_sha1(abcd, e0, e1, msg0, msg1, msg2, msg3, shuf_mask, 3702 buf, state, ofs, limit, rsp, multi_block); 3703 3704 __ addptr(rsp, 4 * wordSize); 3705 3706 __ leave(); 3707 __ ret(0); 3708 return start; 3709 } 3710 3711 address generate_pshuffle_byte_flip_mask() { 3712 __ align(64); 3713 StubCodeMark mark(this, "StubRoutines", "pshuffle_byte_flip_mask"); 3714 address start = __ pc(); 3715 __ emit_data64(0x0405060700010203, relocInfo::none); 3716 __ emit_data64(0x0c0d0e0f08090a0b, relocInfo::none); 3717 3718 if (VM_Version::supports_avx2()) { 3719 __ emit_data64(0x0405060700010203, relocInfo::none); // second copy 3720 __ emit_data64(0x0c0d0e0f08090a0b, relocInfo::none); 3721 // _SHUF_00BA 3722 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3723 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3724 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3725 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3726 // _SHUF_DC00 3727 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3728 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3729 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3730 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3731 } 3732 3733 return start; 3734 } 3735 3736 //Mask for byte-swapping a couple of qwords in an XMM register using (v)pshufb. 3737 address generate_pshuffle_byte_flip_mask_sha512() { 3738 __ align(32); 3739 StubCodeMark mark(this, "StubRoutines", "pshuffle_byte_flip_mask_sha512"); 3740 address start = __ pc(); 3741 if (VM_Version::supports_avx2()) { 3742 __ emit_data64(0x0001020304050607, relocInfo::none); // PSHUFFLE_BYTE_FLIP_MASK 3743 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none); 3744 __ emit_data64(0x1011121314151617, relocInfo::none); 3745 __ emit_data64(0x18191a1b1c1d1e1f, relocInfo::none); 3746 __ emit_data64(0x0000000000000000, relocInfo::none); //MASK_YMM_LO 3747 __ emit_data64(0x0000000000000000, relocInfo::none); 3748 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3749 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3750 } 3751 3752 return start; 3753 } 3754 3755 // ofs and limit are use for multi-block byte array. 3756 // int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit) 3757 address generate_sha256_implCompress(bool multi_block, const char *name) { 3758 assert(VM_Version::supports_sha() || VM_Version::supports_avx2(), ""); 3759 __ align(CodeEntryAlignment); 3760 StubCodeMark mark(this, "StubRoutines", name); 3761 address start = __ pc(); 3762 3763 Register buf = c_rarg0; 3764 Register state = c_rarg1; 3765 Register ofs = c_rarg2; 3766 Register limit = c_rarg3; 3767 3768 const XMMRegister msg = xmm0; 3769 const XMMRegister state0 = xmm1; 3770 const XMMRegister state1 = xmm2; 3771 const XMMRegister msgtmp0 = xmm3; 3772 3773 const XMMRegister msgtmp1 = xmm4; 3774 const XMMRegister msgtmp2 = xmm5; 3775 const XMMRegister msgtmp3 = xmm6; 3776 const XMMRegister msgtmp4 = xmm7; 3777 3778 const XMMRegister shuf_mask = xmm8; 3779 3780 __ enter(); 3781 3782 __ subptr(rsp, 4 * wordSize); 3783 3784 if (VM_Version::supports_sha()) { 3785 __ fast_sha256(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4, 3786 buf, state, ofs, limit, rsp, multi_block, shuf_mask); 3787 } else if (VM_Version::supports_avx2()) { 3788 __ sha256_AVX2(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4, 3789 buf, state, ofs, limit, rsp, multi_block, shuf_mask); 3790 } 3791 __ addptr(rsp, 4 * wordSize); 3792 __ vzeroupper(); 3793 __ leave(); 3794 __ ret(0); 3795 return start; 3796 } 3797 3798 address generate_sha512_implCompress(bool multi_block, const char *name) { 3799 assert(VM_Version::supports_avx2(), ""); 3800 assert(VM_Version::supports_bmi2(), ""); 3801 __ align(CodeEntryAlignment); 3802 StubCodeMark mark(this, "StubRoutines", name); 3803 address start = __ pc(); 3804 3805 Register buf = c_rarg0; 3806 Register state = c_rarg1; 3807 Register ofs = c_rarg2; 3808 Register limit = c_rarg3; 3809 3810 const XMMRegister msg = xmm0; 3811 const XMMRegister state0 = xmm1; 3812 const XMMRegister state1 = xmm2; 3813 const XMMRegister msgtmp0 = xmm3; 3814 const XMMRegister msgtmp1 = xmm4; 3815 const XMMRegister msgtmp2 = xmm5; 3816 const XMMRegister msgtmp3 = xmm6; 3817 const XMMRegister msgtmp4 = xmm7; 3818 3819 const XMMRegister shuf_mask = xmm8; 3820 3821 __ enter(); 3822 3823 __ sha512_AVX2(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4, 3824 buf, state, ofs, limit, rsp, multi_block, shuf_mask); 3825 3826 __ vzeroupper(); 3827 __ leave(); 3828 __ ret(0); 3829 return start; 3830 } 3831 3832 // This is a version of CTR/AES crypt which does 6 blocks in a loop at a time 3833 // to hide instruction latency 3834 // 3835 // Arguments: 3836 // 3837 // Inputs: 3838 // c_rarg0 - source byte array address 3839 // c_rarg1 - destination byte array address 3840 // c_rarg2 - K (key) in little endian int array 3841 // c_rarg3 - counter vector byte array address 3842 // Linux 3843 // c_rarg4 - input length 3844 // c_rarg5 - saved encryptedCounter start 3845 // rbp + 6 * wordSize - saved used length 3846 // Windows 3847 // rbp + 6 * wordSize - input length 3848 // rbp + 7 * wordSize - saved encryptedCounter start 3849 // rbp + 8 * wordSize - saved used length 3850 // 3851 // Output: 3852 // rax - input length 3853 // 3854 address generate_counterMode_AESCrypt_Parallel() { 3855 assert(UseAES, "need AES instructions and misaligned SSE support"); 3856 __ align(CodeEntryAlignment); 3857 StubCodeMark mark(this, "StubRoutines", "counterMode_AESCrypt"); 3858 address start = __ pc(); 3859 const Register from = c_rarg0; // source array address 3860 const Register to = c_rarg1; // destination array address 3861 const Register key = c_rarg2; // key array address 3862 const Register counter = c_rarg3; // counter byte array initialized from counter array address 3863 // and updated with the incremented counter in the end 3864 #ifndef _WIN64 3865 const Register len_reg = c_rarg4; 3866 const Register saved_encCounter_start = c_rarg5; 3867 const Register used_addr = r10; 3868 const Address used_mem(rbp, 2 * wordSize); 3869 const Register used = r11; 3870 #else 3871 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 3872 const Address saved_encCounter_mem(rbp, 7 * wordSize); // length is on stack on Win64 3873 const Address used_mem(rbp, 8 * wordSize); // length is on stack on Win64 3874 const Register len_reg = r10; // pick the first volatile windows register 3875 const Register saved_encCounter_start = r11; 3876 const Register used_addr = r13; 3877 const Register used = r14; 3878 #endif 3879 const Register pos = rax; 3880 3881 const int PARALLEL_FACTOR = 6; 3882 const XMMRegister xmm_counter_shuf_mask = xmm0; 3883 const XMMRegister xmm_key_shuf_mask = xmm1; // used temporarily to swap key bytes up front 3884 const XMMRegister xmm_curr_counter = xmm2; 3885 3886 const XMMRegister xmm_key_tmp0 = xmm3; 3887 const XMMRegister xmm_key_tmp1 = xmm4; 3888 3889 // registers holding the four results in the parallelized loop 3890 const XMMRegister xmm_result0 = xmm5; 3891 const XMMRegister xmm_result1 = xmm6; 3892 const XMMRegister xmm_result2 = xmm7; 3893 const XMMRegister xmm_result3 = xmm8; 3894 const XMMRegister xmm_result4 = xmm9; 3895 const XMMRegister xmm_result5 = xmm10; 3896 3897 const XMMRegister xmm_from0 = xmm11; 3898 const XMMRegister xmm_from1 = xmm12; 3899 const XMMRegister xmm_from2 = xmm13; 3900 const XMMRegister xmm_from3 = xmm14; //the last one is xmm14. we have to preserve it on WIN64. 3901 const XMMRegister xmm_from4 = xmm3; //reuse xmm3~4. Because xmm_key_tmp0~1 are useless when loading input text 3902 const XMMRegister xmm_from5 = xmm4; 3903 3904 //for key_128, key_192, key_256 3905 const int rounds[3] = {10, 12, 14}; 3906 Label L_exit_preLoop, L_preLoop_start; 3907 Label L_multiBlock_loopTop[3]; 3908 Label L_singleBlockLoopTop[3]; 3909 Label L__incCounter[3][6]; //for 6 blocks 3910 Label L__incCounter_single[3]; //for single block, key128, key192, key256 3911 Label L_processTail_insr[3], L_processTail_4_insr[3], L_processTail_2_insr[3], L_processTail_1_insr[3], L_processTail_exit_insr[3]; 3912 Label L_processTail_extr[3], L_processTail_4_extr[3], L_processTail_2_extr[3], L_processTail_1_extr[3], L_processTail_exit_extr[3]; 3913 3914 Label L_exit; 3915 3916 __ enter(); // required for proper stackwalking of RuntimeStub frame 3917 3918 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 3919 // context for the registers used, where all instructions below are using 128-bit mode 3920 // On EVEX without VL and BW, these instructions will all be AVX. 3921 if (VM_Version::supports_avx512vlbw()) { 3922 __ movl(rax, 0xffff); 3923 __ kmovql(k1, rax); 3924 } 3925 3926 #ifdef _WIN64 3927 // allocate spill slots for r13, r14 3928 enum { 3929 saved_r13_offset, 3930 saved_r14_offset 3931 }; 3932 __ subptr(rsp, 2 * wordSize); 3933 __ movptr(Address(rsp, saved_r13_offset * wordSize), r13); 3934 __ movptr(Address(rsp, saved_r14_offset * wordSize), r14); 3935 3936 // on win64, fill len_reg from stack position 3937 __ movl(len_reg, len_mem); 3938 __ movptr(saved_encCounter_start, saved_encCounter_mem); 3939 __ movptr(used_addr, used_mem); 3940 __ movl(used, Address(used_addr, 0)); 3941 #else 3942 __ push(len_reg); // Save 3943 __ movptr(used_addr, used_mem); 3944 __ movl(used, Address(used_addr, 0)); 3945 #endif 3946 3947 __ push(rbx); // Save RBX 3948 __ movdqu(xmm_curr_counter, Address(counter, 0x00)); // initialize counter with initial counter 3949 __ movdqu(xmm_counter_shuf_mask, ExternalAddress(StubRoutines::x86::counter_shuffle_mask_addr()), pos); // pos as scratch 3950 __ pshufb(xmm_curr_counter, xmm_counter_shuf_mask); //counter is shuffled 3951 __ movptr(pos, 0); 3952 3953 // Use the partially used encrpyted counter from last invocation 3954 __ BIND(L_preLoop_start); 3955 __ cmpptr(used, 16); 3956 __ jcc(Assembler::aboveEqual, L_exit_preLoop); 3957 __ cmpptr(len_reg, 0); 3958 __ jcc(Assembler::lessEqual, L_exit_preLoop); 3959 __ movb(rbx, Address(saved_encCounter_start, used)); 3960 __ xorb(rbx, Address(from, pos)); 3961 __ movb(Address(to, pos), rbx); 3962 __ addptr(pos, 1); 3963 __ addptr(used, 1); 3964 __ subptr(len_reg, 1); 3965 3966 __ jmp(L_preLoop_start); 3967 3968 __ BIND(L_exit_preLoop); 3969 __ movl(Address(used_addr, 0), used); 3970 3971 // key length could be only {11, 13, 15} * 4 = {44, 52, 60} 3972 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()), rbx); // rbx as scratch 3973 __ movl(rbx, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3974 __ cmpl(rbx, 52); 3975 __ jcc(Assembler::equal, L_multiBlock_loopTop[1]); 3976 __ cmpl(rbx, 60); 3977 __ jcc(Assembler::equal, L_multiBlock_loopTop[2]); 3978 3979 #define CTR_DoSix(opc, src_reg) \ 3980 __ opc(xmm_result0, src_reg); \ 3981 __ opc(xmm_result1, src_reg); \ 3982 __ opc(xmm_result2, src_reg); \ 3983 __ opc(xmm_result3, src_reg); \ 3984 __ opc(xmm_result4, src_reg); \ 3985 __ opc(xmm_result5, src_reg); 3986 3987 // k == 0 : generate code for key_128 3988 // k == 1 : generate code for key_192 3989 // k == 2 : generate code for key_256 3990 for (int k = 0; k < 3; ++k) { 3991 //multi blocks starts here 3992 __ align(OptoLoopAlignment); 3993 __ BIND(L_multiBlock_loopTop[k]); 3994 __ cmpptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // see if at least PARALLEL_FACTOR blocks left 3995 __ jcc(Assembler::less, L_singleBlockLoopTop[k]); 3996 load_key(xmm_key_tmp0, key, 0x00, xmm_key_shuf_mask); 3997 3998 //load, then increase counters 3999 CTR_DoSix(movdqa, xmm_curr_counter); 4000 inc_counter(rbx, xmm_result1, 0x01, L__incCounter[k][0]); 4001 inc_counter(rbx, xmm_result2, 0x02, L__incCounter[k][1]); 4002 inc_counter(rbx, xmm_result3, 0x03, L__incCounter[k][2]); 4003 inc_counter(rbx, xmm_result4, 0x04, L__incCounter[k][3]); 4004 inc_counter(rbx, xmm_result5, 0x05, L__incCounter[k][4]); 4005 inc_counter(rbx, xmm_curr_counter, 0x06, L__incCounter[k][5]); 4006 CTR_DoSix(pshufb, xmm_counter_shuf_mask); // after increased, shuffled counters back for PXOR 4007 CTR_DoSix(pxor, xmm_key_tmp0); //PXOR with Round 0 key 4008 4009 //load two ROUND_KEYs at a time 4010 for (int i = 1; i < rounds[k]; ) { 4011 load_key(xmm_key_tmp1, key, (0x10 * i), xmm_key_shuf_mask); 4012 load_key(xmm_key_tmp0, key, (0x10 * (i+1)), xmm_key_shuf_mask); 4013 CTR_DoSix(aesenc, xmm_key_tmp1); 4014 i++; 4015 if (i != rounds[k]) { 4016 CTR_DoSix(aesenc, xmm_key_tmp0); 4017 } else { 4018 CTR_DoSix(aesenclast, xmm_key_tmp0); 4019 } 4020 i++; 4021 } 4022 4023 // get next PARALLEL_FACTOR blocks into xmm_result registers 4024 __ movdqu(xmm_from0, Address(from, pos, Address::times_1, 0 * AESBlockSize)); 4025 __ movdqu(xmm_from1, Address(from, pos, Address::times_1, 1 * AESBlockSize)); 4026 __ movdqu(xmm_from2, Address(from, pos, Address::times_1, 2 * AESBlockSize)); 4027 __ movdqu(xmm_from3, Address(from, pos, Address::times_1, 3 * AESBlockSize)); 4028 __ movdqu(xmm_from4, Address(from, pos, Address::times_1, 4 * AESBlockSize)); 4029 __ movdqu(xmm_from5, Address(from, pos, Address::times_1, 5 * AESBlockSize)); 4030 4031 __ pxor(xmm_result0, xmm_from0); 4032 __ pxor(xmm_result1, xmm_from1); 4033 __ pxor(xmm_result2, xmm_from2); 4034 __ pxor(xmm_result3, xmm_from3); 4035 __ pxor(xmm_result4, xmm_from4); 4036 __ pxor(xmm_result5, xmm_from5); 4037 4038 // store 6 results into the next 64 bytes of output 4039 __ movdqu(Address(to, pos, Address::times_1, 0 * AESBlockSize), xmm_result0); 4040 __ movdqu(Address(to, pos, Address::times_1, 1 * AESBlockSize), xmm_result1); 4041 __ movdqu(Address(to, pos, Address::times_1, 2 * AESBlockSize), xmm_result2); 4042 __ movdqu(Address(to, pos, Address::times_1, 3 * AESBlockSize), xmm_result3); 4043 __ movdqu(Address(to, pos, Address::times_1, 4 * AESBlockSize), xmm_result4); 4044 __ movdqu(Address(to, pos, Address::times_1, 5 * AESBlockSize), xmm_result5); 4045 4046 __ addptr(pos, PARALLEL_FACTOR * AESBlockSize); // increase the length of crypt text 4047 __ subptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // decrease the remaining length 4048 __ jmp(L_multiBlock_loopTop[k]); 4049 4050 // singleBlock starts here 4051 __ align(OptoLoopAlignment); 4052 __ BIND(L_singleBlockLoopTop[k]); 4053 __ cmpptr(len_reg, 0); 4054 __ jcc(Assembler::lessEqual, L_exit); 4055 load_key(xmm_key_tmp0, key, 0x00, xmm_key_shuf_mask); 4056 __ movdqa(xmm_result0, xmm_curr_counter); 4057 inc_counter(rbx, xmm_curr_counter, 0x01, L__incCounter_single[k]); 4058 __ pshufb(xmm_result0, xmm_counter_shuf_mask); 4059 __ pxor(xmm_result0, xmm_key_tmp0); 4060 for (int i = 1; i < rounds[k]; i++) { 4061 load_key(xmm_key_tmp0, key, (0x10 * i), xmm_key_shuf_mask); 4062 __ aesenc(xmm_result0, xmm_key_tmp0); 4063 } 4064 load_key(xmm_key_tmp0, key, (rounds[k] * 0x10), xmm_key_shuf_mask); 4065 __ aesenclast(xmm_result0, xmm_key_tmp0); 4066 __ cmpptr(len_reg, AESBlockSize); 4067 __ jcc(Assembler::less, L_processTail_insr[k]); 4068 __ movdqu(xmm_from0, Address(from, pos, Address::times_1, 0 * AESBlockSize)); 4069 __ pxor(xmm_result0, xmm_from0); 4070 __ movdqu(Address(to, pos, Address::times_1, 0 * AESBlockSize), xmm_result0); 4071 __ addptr(pos, AESBlockSize); 4072 __ subptr(len_reg, AESBlockSize); 4073 __ jmp(L_singleBlockLoopTop[k]); 4074 __ BIND(L_processTail_insr[k]); // Process the tail part of the input array 4075 __ addptr(pos, len_reg); // 1. Insert bytes from src array into xmm_from0 register 4076 __ testptr(len_reg, 8); 4077 __ jcc(Assembler::zero, L_processTail_4_insr[k]); 4078 __ subptr(pos,8); 4079 __ pinsrq(xmm_from0, Address(from, pos), 0); 4080 __ BIND(L_processTail_4_insr[k]); 4081 __ testptr(len_reg, 4); 4082 __ jcc(Assembler::zero, L_processTail_2_insr[k]); 4083 __ subptr(pos,4); 4084 __ pslldq(xmm_from0, 4); 4085 __ pinsrd(xmm_from0, Address(from, pos), 0); 4086 __ BIND(L_processTail_2_insr[k]); 4087 __ testptr(len_reg, 2); 4088 __ jcc(Assembler::zero, L_processTail_1_insr[k]); 4089 __ subptr(pos, 2); 4090 __ pslldq(xmm_from0, 2); 4091 __ pinsrw(xmm_from0, Address(from, pos), 0); 4092 __ BIND(L_processTail_1_insr[k]); 4093 __ testptr(len_reg, 1); 4094 __ jcc(Assembler::zero, L_processTail_exit_insr[k]); 4095 __ subptr(pos, 1); 4096 __ pslldq(xmm_from0, 1); 4097 __ pinsrb(xmm_from0, Address(from, pos), 0); 4098 __ BIND(L_processTail_exit_insr[k]); 4099 4100 __ movdqu(Address(saved_encCounter_start, 0), xmm_result0); // 2. Perform pxor of the encrypted counter and plaintext Bytes. 4101 __ pxor(xmm_result0, xmm_from0); // Also the encrypted counter is saved for next invocation. 4102 4103 __ testptr(len_reg, 8); 4104 __ jcc(Assembler::zero, L_processTail_4_extr[k]); // 3. Extract bytes from xmm_result0 into the dest. array 4105 __ pextrq(Address(to, pos), xmm_result0, 0); 4106 __ psrldq(xmm_result0, 8); 4107 __ addptr(pos, 8); 4108 __ BIND(L_processTail_4_extr[k]); 4109 __ testptr(len_reg, 4); 4110 __ jcc(Assembler::zero, L_processTail_2_extr[k]); 4111 __ pextrd(Address(to, pos), xmm_result0, 0); 4112 __ psrldq(xmm_result0, 4); 4113 __ addptr(pos, 4); 4114 __ BIND(L_processTail_2_extr[k]); 4115 __ testptr(len_reg, 2); 4116 __ jcc(Assembler::zero, L_processTail_1_extr[k]); 4117 __ pextrw(Address(to, pos), xmm_result0, 0); 4118 __ psrldq(xmm_result0, 2); 4119 __ addptr(pos, 2); 4120 __ BIND(L_processTail_1_extr[k]); 4121 __ testptr(len_reg, 1); 4122 __ jcc(Assembler::zero, L_processTail_exit_extr[k]); 4123 __ pextrb(Address(to, pos), xmm_result0, 0); 4124 4125 __ BIND(L_processTail_exit_extr[k]); 4126 __ movl(Address(used_addr, 0), len_reg); 4127 __ jmp(L_exit); 4128 4129 } 4130 4131 __ BIND(L_exit); 4132 __ pshufb(xmm_curr_counter, xmm_counter_shuf_mask); //counter is shuffled back. 4133 __ movdqu(Address(counter, 0), xmm_curr_counter); //save counter back 4134 __ pop(rbx); // pop the saved RBX. 4135 #ifdef _WIN64 4136 __ movl(rax, len_mem); 4137 __ movptr(r13, Address(rsp, saved_r13_offset * wordSize)); 4138 __ movptr(r14, Address(rsp, saved_r14_offset * wordSize)); 4139 __ addptr(rsp, 2 * wordSize); 4140 #else 4141 __ pop(rax); // return 'len' 4142 #endif 4143 __ leave(); // required for proper stackwalking of RuntimeStub frame 4144 __ ret(0); 4145 return start; 4146 } 4147 4148 // byte swap x86 long 4149 address generate_ghash_long_swap_mask() { 4150 __ align(CodeEntryAlignment); 4151 StubCodeMark mark(this, "StubRoutines", "ghash_long_swap_mask"); 4152 address start = __ pc(); 4153 __ emit_data64(0x0f0e0d0c0b0a0908, relocInfo::none ); 4154 __ emit_data64(0x0706050403020100, relocInfo::none ); 4155 return start; 4156 } 4157 4158 // byte swap x86 byte array 4159 address generate_ghash_byte_swap_mask() { 4160 __ align(CodeEntryAlignment); 4161 StubCodeMark mark(this, "StubRoutines", "ghash_byte_swap_mask"); 4162 address start = __ pc(); 4163 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none ); 4164 __ emit_data64(0x0001020304050607, relocInfo::none ); 4165 return start; 4166 } 4167 4168 /* Single and multi-block ghash operations */ 4169 address generate_ghash_processBlocks() { 4170 __ align(CodeEntryAlignment); 4171 Label L_ghash_loop, L_exit; 4172 StubCodeMark mark(this, "StubRoutines", "ghash_processBlocks"); 4173 address start = __ pc(); 4174 4175 const Register state = c_rarg0; 4176 const Register subkeyH = c_rarg1; 4177 const Register data = c_rarg2; 4178 const Register blocks = c_rarg3; 4179 4180 const XMMRegister xmm_temp0 = xmm0; 4181 const XMMRegister xmm_temp1 = xmm1; 4182 const XMMRegister xmm_temp2 = xmm2; 4183 const XMMRegister xmm_temp3 = xmm3; 4184 const XMMRegister xmm_temp4 = xmm4; 4185 const XMMRegister xmm_temp5 = xmm5; 4186 const XMMRegister xmm_temp6 = xmm6; 4187 const XMMRegister xmm_temp7 = xmm7; 4188 const XMMRegister xmm_temp8 = xmm8; 4189 const XMMRegister xmm_temp9 = xmm9; 4190 const XMMRegister xmm_temp10 = xmm10; 4191 4192 __ enter(); 4193 4194 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 4195 // context for the registers used, where all instructions below are using 128-bit mode 4196 // On EVEX without VL and BW, these instructions will all be AVX. 4197 if (VM_Version::supports_avx512vlbw()) { 4198 __ movl(rax, 0xffff); 4199 __ kmovql(k1, rax); 4200 } 4201 4202 __ movdqu(xmm_temp10, ExternalAddress(StubRoutines::x86::ghash_long_swap_mask_addr())); 4203 4204 __ movdqu(xmm_temp0, Address(state, 0)); 4205 __ pshufb(xmm_temp0, xmm_temp10); 4206 4207 4208 __ BIND(L_ghash_loop); 4209 __ movdqu(xmm_temp2, Address(data, 0)); 4210 __ pshufb(xmm_temp2, ExternalAddress(StubRoutines::x86::ghash_byte_swap_mask_addr())); 4211 4212 __ movdqu(xmm_temp1, Address(subkeyH, 0)); 4213 __ pshufb(xmm_temp1, xmm_temp10); 4214 4215 __ pxor(xmm_temp0, xmm_temp2); 4216 4217 // 4218 // Multiply with the hash key 4219 // 4220 __ movdqu(xmm_temp3, xmm_temp0); 4221 __ pclmulqdq(xmm_temp3, xmm_temp1, 0); // xmm3 holds a0*b0 4222 __ movdqu(xmm_temp4, xmm_temp0); 4223 __ pclmulqdq(xmm_temp4, xmm_temp1, 16); // xmm4 holds a0*b1 4224 4225 __ movdqu(xmm_temp5, xmm_temp0); 4226 __ pclmulqdq(xmm_temp5, xmm_temp1, 1); // xmm5 holds a1*b0 4227 __ movdqu(xmm_temp6, xmm_temp0); 4228 __ pclmulqdq(xmm_temp6, xmm_temp1, 17); // xmm6 holds a1*b1 4229 4230 __ pxor(xmm_temp4, xmm_temp5); // xmm4 holds a0*b1 + a1*b0 4231 4232 __ movdqu(xmm_temp5, xmm_temp4); // move the contents of xmm4 to xmm5 4233 __ psrldq(xmm_temp4, 8); // shift by xmm4 64 bits to the right 4234 __ pslldq(xmm_temp5, 8); // shift by xmm5 64 bits to the left 4235 __ pxor(xmm_temp3, xmm_temp5); 4236 __ pxor(xmm_temp6, xmm_temp4); // Register pair <xmm6:xmm3> holds the result 4237 // of the carry-less multiplication of 4238 // xmm0 by xmm1. 4239 4240 // We shift the result of the multiplication by one bit position 4241 // to the left to cope for the fact that the bits are reversed. 4242 __ movdqu(xmm_temp7, xmm_temp3); 4243 __ movdqu(xmm_temp8, xmm_temp6); 4244 __ pslld(xmm_temp3, 1); 4245 __ pslld(xmm_temp6, 1); 4246 __ psrld(xmm_temp7, 31); 4247 __ psrld(xmm_temp8, 31); 4248 __ movdqu(xmm_temp9, xmm_temp7); 4249 __ pslldq(xmm_temp8, 4); 4250 __ pslldq(xmm_temp7, 4); 4251 __ psrldq(xmm_temp9, 12); 4252 __ por(xmm_temp3, xmm_temp7); 4253 __ por(xmm_temp6, xmm_temp8); 4254 __ por(xmm_temp6, xmm_temp9); 4255 4256 // 4257 // First phase of the reduction 4258 // 4259 // Move xmm3 into xmm7, xmm8, xmm9 in order to perform the shifts 4260 // independently. 4261 __ movdqu(xmm_temp7, xmm_temp3); 4262 __ movdqu(xmm_temp8, xmm_temp3); 4263 __ movdqu(xmm_temp9, xmm_temp3); 4264 __ pslld(xmm_temp7, 31); // packed right shift shifting << 31 4265 __ pslld(xmm_temp8, 30); // packed right shift shifting << 30 4266 __ pslld(xmm_temp9, 25); // packed right shift shifting << 25 4267 __ pxor(xmm_temp7, xmm_temp8); // xor the shifted versions 4268 __ pxor(xmm_temp7, xmm_temp9); 4269 __ movdqu(xmm_temp8, xmm_temp7); 4270 __ pslldq(xmm_temp7, 12); 4271 __ psrldq(xmm_temp8, 4); 4272 __ pxor(xmm_temp3, xmm_temp7); // first phase of the reduction complete 4273 4274 // 4275 // Second phase of the reduction 4276 // 4277 // Make 3 copies of xmm3 in xmm2, xmm4, xmm5 for doing these 4278 // shift operations. 4279 __ movdqu(xmm_temp2, xmm_temp3); 4280 __ movdqu(xmm_temp4, xmm_temp3); 4281 __ movdqu(xmm_temp5, xmm_temp3); 4282 __ psrld(xmm_temp2, 1); // packed left shifting >> 1 4283 __ psrld(xmm_temp4, 2); // packed left shifting >> 2 4284 __ psrld(xmm_temp5, 7); // packed left shifting >> 7 4285 __ pxor(xmm_temp2, xmm_temp4); // xor the shifted versions 4286 __ pxor(xmm_temp2, xmm_temp5); 4287 __ pxor(xmm_temp2, xmm_temp8); 4288 __ pxor(xmm_temp3, xmm_temp2); 4289 __ pxor(xmm_temp6, xmm_temp3); // the result is in xmm6 4290 4291 __ decrement(blocks); 4292 __ jcc(Assembler::zero, L_exit); 4293 __ movdqu(xmm_temp0, xmm_temp6); 4294 __ addptr(data, 16); 4295 __ jmp(L_ghash_loop); 4296 4297 __ BIND(L_exit); 4298 __ pshufb(xmm_temp6, xmm_temp10); // Byte swap 16-byte result 4299 __ movdqu(Address(state, 0), xmm_temp6); // store the result 4300 __ leave(); 4301 __ ret(0); 4302 return start; 4303 } 4304 4305 /** 4306 * Arguments: 4307 * 4308 * Inputs: 4309 * c_rarg0 - int crc 4310 * c_rarg1 - byte* buf 4311 * c_rarg2 - int length 4312 * 4313 * Ouput: 4314 * rax - int crc result 4315 */ 4316 address generate_updateBytesCRC32() { 4317 assert(UseCRC32Intrinsics, "need AVX and CLMUL instructions"); 4318 4319 __ align(CodeEntryAlignment); 4320 StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32"); 4321 4322 address start = __ pc(); 4323 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 4324 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 4325 // rscratch1: r10 4326 const Register crc = c_rarg0; // crc 4327 const Register buf = c_rarg1; // source java byte array address 4328 const Register len = c_rarg2; // length 4329 const Register table = c_rarg3; // crc_table address (reuse register) 4330 const Register tmp = r11; 4331 assert_different_registers(crc, buf, len, table, tmp, rax); 4332 4333 BLOCK_COMMENT("Entry:"); 4334 __ enter(); // required for proper stackwalking of RuntimeStub frame 4335 4336 __ kernel_crc32(crc, buf, len, table, tmp); 4337 4338 __ movl(rax, crc); 4339 __ vzeroupper(); 4340 __ leave(); // required for proper stackwalking of RuntimeStub frame 4341 __ ret(0); 4342 4343 return start; 4344 } 4345 4346 /** 4347 * Arguments: 4348 * 4349 * Inputs: 4350 * c_rarg0 - int crc 4351 * c_rarg1 - byte* buf 4352 * c_rarg2 - long length 4353 * c_rarg3 - table_start - optional (present only when doing a library_call, 4354 * not used by x86 algorithm) 4355 * 4356 * Ouput: 4357 * rax - int crc result 4358 */ 4359 address generate_updateBytesCRC32C(bool is_pclmulqdq_supported) { 4360 assert(UseCRC32CIntrinsics, "need SSE4_2"); 4361 __ align(CodeEntryAlignment); 4362 StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32C"); 4363 address start = __ pc(); 4364 //reg.arg int#0 int#1 int#2 int#3 int#4 int#5 float regs 4365 //Windows RCX RDX R8 R9 none none XMM0..XMM3 4366 //Lin / Sol RDI RSI RDX RCX R8 R9 XMM0..XMM7 4367 const Register crc = c_rarg0; // crc 4368 const Register buf = c_rarg1; // source java byte array address 4369 const Register len = c_rarg2; // length 4370 const Register a = rax; 4371 const Register j = r9; 4372 const Register k = r10; 4373 const Register l = r11; 4374 #ifdef _WIN64 4375 const Register y = rdi; 4376 const Register z = rsi; 4377 #else 4378 const Register y = rcx; 4379 const Register z = r8; 4380 #endif 4381 assert_different_registers(crc, buf, len, a, j, k, l, y, z); 4382 4383 BLOCK_COMMENT("Entry:"); 4384 __ enter(); // required for proper stackwalking of RuntimeStub frame 4385 #ifdef _WIN64 4386 __ push(y); 4387 __ push(z); 4388 #endif 4389 __ crc32c_ipl_alg2_alt2(crc, buf, len, 4390 a, j, k, 4391 l, y, z, 4392 c_farg0, c_farg1, c_farg2, 4393 is_pclmulqdq_supported); 4394 __ movl(rax, crc); 4395 #ifdef _WIN64 4396 __ pop(z); 4397 __ pop(y); 4398 #endif 4399 __ vzeroupper(); 4400 __ leave(); // required for proper stackwalking of RuntimeStub frame 4401 __ ret(0); 4402 4403 return start; 4404 } 4405 4406 /** 4407 * Arguments: 4408 * 4409 * Input: 4410 * c_rarg0 - x address 4411 * c_rarg1 - x length 4412 * c_rarg2 - y address 4413 * c_rarg3 - y lenth 4414 * not Win64 4415 * c_rarg4 - z address 4416 * c_rarg5 - z length 4417 * Win64 4418 * rsp+40 - z address 4419 * rsp+48 - z length 4420 */ 4421 address generate_multiplyToLen() { 4422 __ align(CodeEntryAlignment); 4423 StubCodeMark mark(this, "StubRoutines", "multiplyToLen"); 4424 4425 address start = __ pc(); 4426 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 4427 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 4428 const Register x = rdi; 4429 const Register xlen = rax; 4430 const Register y = rsi; 4431 const Register ylen = rcx; 4432 const Register z = r8; 4433 const Register zlen = r11; 4434 4435 // Next registers will be saved on stack in multiply_to_len(). 4436 const Register tmp1 = r12; 4437 const Register tmp2 = r13; 4438 const Register tmp3 = r14; 4439 const Register tmp4 = r15; 4440 const Register tmp5 = rbx; 4441 4442 BLOCK_COMMENT("Entry:"); 4443 __ enter(); // required for proper stackwalking of RuntimeStub frame 4444 4445 #ifndef _WIN64 4446 __ movptr(zlen, r9); // Save r9 in r11 - zlen 4447 #endif 4448 setup_arg_regs(4); // x => rdi, xlen => rsi, y => rdx 4449 // ylen => rcx, z => r8, zlen => r11 4450 // r9 and r10 may be used to save non-volatile registers 4451 #ifdef _WIN64 4452 // last 2 arguments (#4, #5) are on stack on Win64 4453 __ movptr(z, Address(rsp, 6 * wordSize)); 4454 __ movptr(zlen, Address(rsp, 7 * wordSize)); 4455 #endif 4456 4457 __ movptr(xlen, rsi); 4458 __ movptr(y, rdx); 4459 __ multiply_to_len(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5); 4460 4461 restore_arg_regs(); 4462 4463 __ leave(); // required for proper stackwalking of RuntimeStub frame 4464 __ ret(0); 4465 4466 return start; 4467 } 4468 4469 /** 4470 * Arguments: 4471 * 4472 * Input: 4473 * c_rarg0 - obja address 4474 * c_rarg1 - objb address 4475 * c_rarg3 - length length 4476 * c_rarg4 - scale log2_array_indxscale 4477 * 4478 * Output: 4479 * rax - int >= mismatched index, < 0 bitwise complement of tail 4480 */ 4481 address generate_vectorizedMismatch() { 4482 __ align(CodeEntryAlignment); 4483 StubCodeMark mark(this, "StubRoutines", "vectorizedMismatch"); 4484 address start = __ pc(); 4485 4486 BLOCK_COMMENT("Entry:"); 4487 __ enter(); 4488 4489 #ifdef _WIN64 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 4490 const Register scale = c_rarg0; //rcx, will exchange with r9 4491 const Register objb = c_rarg1; //rdx 4492 const Register length = c_rarg2; //r8 4493 const Register obja = c_rarg3; //r9 4494 __ xchgq(obja, scale); //now obja and scale contains the correct contents 4495 4496 const Register tmp1 = r10; 4497 const Register tmp2 = r11; 4498 #endif 4499 #ifndef _WIN64 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 4500 const Register obja = c_rarg0; //U:rdi 4501 const Register objb = c_rarg1; //U:rsi 4502 const Register length = c_rarg2; //U:rdx 4503 const Register scale = c_rarg3; //U:rcx 4504 const Register tmp1 = r8; 4505 const Register tmp2 = r9; 4506 #endif 4507 const Register result = rax; //return value 4508 const XMMRegister vec0 = xmm0; 4509 const XMMRegister vec1 = xmm1; 4510 const XMMRegister vec2 = xmm2; 4511 4512 __ vectorized_mismatch(obja, objb, length, scale, result, tmp1, tmp2, vec0, vec1, vec2); 4513 4514 __ vzeroupper(); 4515 __ leave(); 4516 __ ret(0); 4517 4518 return start; 4519 } 4520 4521 /** 4522 * Arguments: 4523 * 4524 // Input: 4525 // c_rarg0 - x address 4526 // c_rarg1 - x length 4527 // c_rarg2 - z address 4528 // c_rarg3 - z lenth 4529 * 4530 */ 4531 address generate_squareToLen() { 4532 4533 __ align(CodeEntryAlignment); 4534 StubCodeMark mark(this, "StubRoutines", "squareToLen"); 4535 4536 address start = __ pc(); 4537 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 4538 // Unix: rdi, rsi, rdx, rcx (c_rarg0, c_rarg1, ...) 4539 const Register x = rdi; 4540 const Register len = rsi; 4541 const Register z = r8; 4542 const Register zlen = rcx; 4543 4544 const Register tmp1 = r12; 4545 const Register tmp2 = r13; 4546 const Register tmp3 = r14; 4547 const Register tmp4 = r15; 4548 const Register tmp5 = rbx; 4549 4550 BLOCK_COMMENT("Entry:"); 4551 __ enter(); // required for proper stackwalking of RuntimeStub frame 4552 4553 setup_arg_regs(4); // x => rdi, len => rsi, z => rdx 4554 // zlen => rcx 4555 // r9 and r10 may be used to save non-volatile registers 4556 __ movptr(r8, rdx); 4557 __ square_to_len(x, len, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5, rdx, rax); 4558 4559 restore_arg_regs(); 4560 4561 __ leave(); // required for proper stackwalking of RuntimeStub frame 4562 __ ret(0); 4563 4564 return start; 4565 } 4566 4567 /** 4568 * Arguments: 4569 * 4570 * Input: 4571 * c_rarg0 - out address 4572 * c_rarg1 - in address 4573 * c_rarg2 - offset 4574 * c_rarg3 - len 4575 * not Win64 4576 * c_rarg4 - k 4577 * Win64 4578 * rsp+40 - k 4579 */ 4580 address generate_mulAdd() { 4581 __ align(CodeEntryAlignment); 4582 StubCodeMark mark(this, "StubRoutines", "mulAdd"); 4583 4584 address start = __ pc(); 4585 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 4586 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 4587 const Register out = rdi; 4588 const Register in = rsi; 4589 const Register offset = r11; 4590 const Register len = rcx; 4591 const Register k = r8; 4592 4593 // Next registers will be saved on stack in mul_add(). 4594 const Register tmp1 = r12; 4595 const Register tmp2 = r13; 4596 const Register tmp3 = r14; 4597 const Register tmp4 = r15; 4598 const Register tmp5 = rbx; 4599 4600 BLOCK_COMMENT("Entry:"); 4601 __ enter(); // required for proper stackwalking of RuntimeStub frame 4602 4603 setup_arg_regs(4); // out => rdi, in => rsi, offset => rdx 4604 // len => rcx, k => r8 4605 // r9 and r10 may be used to save non-volatile registers 4606 #ifdef _WIN64 4607 // last argument is on stack on Win64 4608 __ movl(k, Address(rsp, 6 * wordSize)); 4609 #endif 4610 __ movptr(r11, rdx); // move offset in rdx to offset(r11) 4611 __ mul_add(out, in, offset, len, k, tmp1, tmp2, tmp3, tmp4, tmp5, rdx, rax); 4612 4613 restore_arg_regs(); 4614 4615 __ leave(); // required for proper stackwalking of RuntimeStub frame 4616 __ ret(0); 4617 4618 return start; 4619 } 4620 4621 address generate_libmExp() { 4622 address start = __ pc(); 4623 4624 const XMMRegister x0 = xmm0; 4625 const XMMRegister x1 = xmm1; 4626 const XMMRegister x2 = xmm2; 4627 const XMMRegister x3 = xmm3; 4628 4629 const XMMRegister x4 = xmm4; 4630 const XMMRegister x5 = xmm5; 4631 const XMMRegister x6 = xmm6; 4632 const XMMRegister x7 = xmm7; 4633 4634 const Register tmp = r11; 4635 4636 BLOCK_COMMENT("Entry:"); 4637 __ enter(); // required for proper stackwalking of RuntimeStub frame 4638 4639 __ fast_exp(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp); 4640 4641 __ leave(); // required for proper stackwalking of RuntimeStub frame 4642 __ ret(0); 4643 4644 return start; 4645 4646 } 4647 4648 address generate_libmLog() { 4649 address start = __ pc(); 4650 4651 const XMMRegister x0 = xmm0; 4652 const XMMRegister x1 = xmm1; 4653 const XMMRegister x2 = xmm2; 4654 const XMMRegister x3 = xmm3; 4655 4656 const XMMRegister x4 = xmm4; 4657 const XMMRegister x5 = xmm5; 4658 const XMMRegister x6 = xmm6; 4659 const XMMRegister x7 = xmm7; 4660 4661 const Register tmp1 = r11; 4662 const Register tmp2 = r8; 4663 4664 BLOCK_COMMENT("Entry:"); 4665 __ enter(); // required for proper stackwalking of RuntimeStub frame 4666 4667 __ fast_log(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2); 4668 4669 __ leave(); // required for proper stackwalking of RuntimeStub frame 4670 __ ret(0); 4671 4672 return start; 4673 4674 } 4675 4676 address generate_libmLog10() { 4677 address start = __ pc(); 4678 4679 const XMMRegister x0 = xmm0; 4680 const XMMRegister x1 = xmm1; 4681 const XMMRegister x2 = xmm2; 4682 const XMMRegister x3 = xmm3; 4683 4684 const XMMRegister x4 = xmm4; 4685 const XMMRegister x5 = xmm5; 4686 const XMMRegister x6 = xmm6; 4687 const XMMRegister x7 = xmm7; 4688 4689 const Register tmp = r11; 4690 4691 BLOCK_COMMENT("Entry:"); 4692 __ enter(); // required for proper stackwalking of RuntimeStub frame 4693 4694 __ fast_log10(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp); 4695 4696 __ leave(); // required for proper stackwalking of RuntimeStub frame 4697 __ ret(0); 4698 4699 return start; 4700 4701 } 4702 4703 address generate_libmPow() { 4704 address start = __ pc(); 4705 4706 const XMMRegister x0 = xmm0; 4707 const XMMRegister x1 = xmm1; 4708 const XMMRegister x2 = xmm2; 4709 const XMMRegister x3 = xmm3; 4710 4711 const XMMRegister x4 = xmm4; 4712 const XMMRegister x5 = xmm5; 4713 const XMMRegister x6 = xmm6; 4714 const XMMRegister x7 = xmm7; 4715 4716 const Register tmp1 = r8; 4717 const Register tmp2 = r9; 4718 const Register tmp3 = r10; 4719 const Register tmp4 = r11; 4720 4721 BLOCK_COMMENT("Entry:"); 4722 __ enter(); // required for proper stackwalking of RuntimeStub frame 4723 4724 __ fast_pow(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 4725 4726 __ leave(); // required for proper stackwalking of RuntimeStub frame 4727 __ ret(0); 4728 4729 return start; 4730 4731 } 4732 4733 address generate_libmSin() { 4734 address start = __ pc(); 4735 4736 const XMMRegister x0 = xmm0; 4737 const XMMRegister x1 = xmm1; 4738 const XMMRegister x2 = xmm2; 4739 const XMMRegister x3 = xmm3; 4740 4741 const XMMRegister x4 = xmm4; 4742 const XMMRegister x5 = xmm5; 4743 const XMMRegister x6 = xmm6; 4744 const XMMRegister x7 = xmm7; 4745 4746 const Register tmp1 = r8; 4747 const Register tmp2 = r9; 4748 const Register tmp3 = r10; 4749 const Register tmp4 = r11; 4750 4751 BLOCK_COMMENT("Entry:"); 4752 __ enter(); // required for proper stackwalking of RuntimeStub frame 4753 4754 #ifdef _WIN64 4755 __ push(rsi); 4756 __ push(rdi); 4757 #endif 4758 __ fast_sin(x0, x1, x2, x3, x4, x5, x6, x7, rax, rbx, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 4759 4760 #ifdef _WIN64 4761 __ pop(rdi); 4762 __ pop(rsi); 4763 #endif 4764 4765 __ leave(); // required for proper stackwalking of RuntimeStub frame 4766 __ ret(0); 4767 4768 return start; 4769 4770 } 4771 4772 address generate_libmCos() { 4773 address start = __ pc(); 4774 4775 const XMMRegister x0 = xmm0; 4776 const XMMRegister x1 = xmm1; 4777 const XMMRegister x2 = xmm2; 4778 const XMMRegister x3 = xmm3; 4779 4780 const XMMRegister x4 = xmm4; 4781 const XMMRegister x5 = xmm5; 4782 const XMMRegister x6 = xmm6; 4783 const XMMRegister x7 = xmm7; 4784 4785 const Register tmp1 = r8; 4786 const Register tmp2 = r9; 4787 const Register tmp3 = r10; 4788 const Register tmp4 = r11; 4789 4790 BLOCK_COMMENT("Entry:"); 4791 __ enter(); // required for proper stackwalking of RuntimeStub frame 4792 4793 #ifdef _WIN64 4794 __ push(rsi); 4795 __ push(rdi); 4796 #endif 4797 __ fast_cos(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 4798 4799 #ifdef _WIN64 4800 __ pop(rdi); 4801 __ pop(rsi); 4802 #endif 4803 4804 __ leave(); // required for proper stackwalking of RuntimeStub frame 4805 __ ret(0); 4806 4807 return start; 4808 4809 } 4810 4811 address generate_libmTan() { 4812 address start = __ pc(); 4813 4814 const XMMRegister x0 = xmm0; 4815 const XMMRegister x1 = xmm1; 4816 const XMMRegister x2 = xmm2; 4817 const XMMRegister x3 = xmm3; 4818 4819 const XMMRegister x4 = xmm4; 4820 const XMMRegister x5 = xmm5; 4821 const XMMRegister x6 = xmm6; 4822 const XMMRegister x7 = xmm7; 4823 4824 const Register tmp1 = r8; 4825 const Register tmp2 = r9; 4826 const Register tmp3 = r10; 4827 const Register tmp4 = r11; 4828 4829 BLOCK_COMMENT("Entry:"); 4830 __ enter(); // required for proper stackwalking of RuntimeStub frame 4831 4832 #ifdef _WIN64 4833 __ push(rsi); 4834 __ push(rdi); 4835 #endif 4836 __ fast_tan(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 4837 4838 #ifdef _WIN64 4839 __ pop(rdi); 4840 __ pop(rsi); 4841 #endif 4842 4843 __ leave(); // required for proper stackwalking of RuntimeStub frame 4844 __ ret(0); 4845 4846 return start; 4847 4848 } 4849 4850 #undef __ 4851 #define __ masm-> 4852 4853 // Continuation point for throwing of implicit exceptions that are 4854 // not handled in the current activation. Fabricates an exception 4855 // oop and initiates normal exception dispatching in this 4856 // frame. Since we need to preserve callee-saved values (currently 4857 // only for C2, but done for C1 as well) we need a callee-saved oop 4858 // map and therefore have to make these stubs into RuntimeStubs 4859 // rather than BufferBlobs. If the compiler needs all registers to 4860 // be preserved between the fault point and the exception handler 4861 // then it must assume responsibility for that in 4862 // AbstractCompiler::continuation_for_implicit_null_exception or 4863 // continuation_for_implicit_division_by_zero_exception. All other 4864 // implicit exceptions (e.g., NullPointerException or 4865 // AbstractMethodError on entry) are either at call sites or 4866 // otherwise assume that stack unwinding will be initiated, so 4867 // caller saved registers were assumed volatile in the compiler. 4868 address generate_throw_exception(const char* name, 4869 address runtime_entry, 4870 Register arg1 = noreg, 4871 Register arg2 = noreg) { 4872 // Information about frame layout at time of blocking runtime call. 4873 // Note that we only have to preserve callee-saved registers since 4874 // the compilers are responsible for supplying a continuation point 4875 // if they expect all registers to be preserved. 4876 enum layout { 4877 rbp_off = frame::arg_reg_save_area_bytes/BytesPerInt, 4878 rbp_off2, 4879 return_off, 4880 return_off2, 4881 framesize // inclusive of return address 4882 }; 4883 4884 int insts_size = 512; 4885 int locs_size = 64; 4886 4887 CodeBuffer code(name, insts_size, locs_size); 4888 OopMapSet* oop_maps = new OopMapSet(); 4889 MacroAssembler* masm = new MacroAssembler(&code); 4890 4891 address start = __ pc(); 4892 4893 // This is an inlined and slightly modified version of call_VM 4894 // which has the ability to fetch the return PC out of 4895 // thread-local storage and also sets up last_Java_sp slightly 4896 // differently than the real call_VM 4897 4898 __ enter(); // required for proper stackwalking of RuntimeStub frame 4899 4900 assert(is_even(framesize/2), "sp not 16-byte aligned"); 4901 4902 // return address and rbp are already in place 4903 __ subptr(rsp, (framesize-4) << LogBytesPerInt); // prolog 4904 4905 int frame_complete = __ pc() - start; 4906 4907 // Set up last_Java_sp and last_Java_fp 4908 address the_pc = __ pc(); 4909 __ set_last_Java_frame(rsp, rbp, the_pc); 4910 __ andptr(rsp, -(StackAlignmentInBytes)); // Align stack 4911 4912 // Call runtime 4913 if (arg1 != noreg) { 4914 assert(arg2 != c_rarg1, "clobbered"); 4915 __ movptr(c_rarg1, arg1); 4916 } 4917 if (arg2 != noreg) { 4918 __ movptr(c_rarg2, arg2); 4919 } 4920 __ movptr(c_rarg0, r15_thread); 4921 BLOCK_COMMENT("call runtime_entry"); 4922 __ call(RuntimeAddress(runtime_entry)); 4923 4924 // Generate oop map 4925 OopMap* map = new OopMap(framesize, 0); 4926 4927 oop_maps->add_gc_map(the_pc - start, map); 4928 4929 __ reset_last_Java_frame(true); 4930 4931 __ leave(); // required for proper stackwalking of RuntimeStub frame 4932 4933 // check for pending exceptions 4934 #ifdef ASSERT 4935 Label L; 4936 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), 4937 (int32_t) NULL_WORD); 4938 __ jcc(Assembler::notEqual, L); 4939 __ should_not_reach_here(); 4940 __ bind(L); 4941 #endif // ASSERT 4942 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 4943 4944 4945 // codeBlob framesize is in words (not VMRegImpl::slot_size) 4946 RuntimeStub* stub = 4947 RuntimeStub::new_runtime_stub(name, 4948 &code, 4949 frame_complete, 4950 (framesize >> (LogBytesPerWord - LogBytesPerInt)), 4951 oop_maps, false); 4952 return stub->entry_point(); 4953 } 4954 4955 void create_control_words() { 4956 // Round to nearest, 53-bit mode, exceptions masked 4957 StubRoutines::_fpu_cntrl_wrd_std = 0x027F; 4958 // Round to zero, 53-bit mode, exception mased 4959 StubRoutines::_fpu_cntrl_wrd_trunc = 0x0D7F; 4960 // Round to nearest, 24-bit mode, exceptions masked 4961 StubRoutines::_fpu_cntrl_wrd_24 = 0x007F; 4962 // Round to nearest, 64-bit mode, exceptions masked 4963 StubRoutines::_fpu_cntrl_wrd_64 = 0x037F; 4964 // Round to nearest, 64-bit mode, exceptions masked 4965 StubRoutines::_mxcsr_std = 0x1F80; 4966 // Note: the following two constants are 80-bit values 4967 // layout is critical for correct loading by FPU. 4968 // Bias for strict fp multiply/divide 4969 StubRoutines::_fpu_subnormal_bias1[0]= 0x00000000; // 2^(-15360) == 0x03ff 8000 0000 0000 0000 4970 StubRoutines::_fpu_subnormal_bias1[1]= 0x80000000; 4971 StubRoutines::_fpu_subnormal_bias1[2]= 0x03ff; 4972 // Un-Bias for strict fp multiply/divide 4973 StubRoutines::_fpu_subnormal_bias2[0]= 0x00000000; // 2^(+15360) == 0x7bff 8000 0000 0000 0000 4974 StubRoutines::_fpu_subnormal_bias2[1]= 0x80000000; 4975 StubRoutines::_fpu_subnormal_bias2[2]= 0x7bff; 4976 } 4977 4978 // Call here from the interpreter or compiled code to either load 4979 // multiple returned values from the value type instance being 4980 // returned to registers or to store returned values to a newly 4981 // allocated value type instance. 4982 address generate_return_value_stub(address destination, const char* name, bool has_res) { 4983 // We need to save all registers the calling convention may use so 4984 // the runtime calls read or update those registers. This needs to 4985 // be in sync with SharedRuntime::java_return_convention(). 4986 enum layout { 4987 pad_off, pad_off_2, 4988 rax_off, rax_off_2, 4989 j_rarg5_off, j_rarg5_2, 4990 j_rarg4_off, j_rarg4_2, 4991 j_rarg3_off, j_rarg3_2, 4992 j_rarg2_off, j_rarg2_2, 4993 j_rarg1_off, j_rarg1_2, 4994 j_rarg0_off, j_rarg0_2, 4995 j_farg0_off, j_farg0_2, 4996 j_farg1_off, j_farg1_2, 4997 j_farg2_off, j_farg2_2, 4998 j_farg3_off, j_farg3_2, 4999 j_farg4_off, j_farg4_2, 5000 j_farg5_off, j_farg5_2, 5001 j_farg6_off, j_farg6_2, 5002 j_farg7_off, j_farg7_2, 5003 rbp_off, rbp_off_2, 5004 return_off, return_off_2, 5005 5006 framesize 5007 }; 5008 5009 CodeBuffer buffer(name, 1000, 512); 5010 MacroAssembler* masm = new MacroAssembler(&buffer); 5011 5012 int frame_size_in_bytes = round_to(framesize*BytesPerInt, 16); 5013 assert(frame_size_in_bytes == framesize*BytesPerInt, "misaligned"); 5014 int frame_size_in_slots = frame_size_in_bytes / BytesPerInt; 5015 int frame_size_in_words = frame_size_in_bytes / wordSize; 5016 5017 OopMapSet *oop_maps = new OopMapSet(); 5018 OopMap* map = new OopMap(frame_size_in_slots, 0); 5019 5020 map->set_callee_saved(VMRegImpl::stack2reg(rax_off), rax->as_VMReg()); 5021 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg5_off), j_rarg5->as_VMReg()); 5022 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg4_off), j_rarg4->as_VMReg()); 5023 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg3_off), j_rarg3->as_VMReg()); 5024 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg2_off), j_rarg2->as_VMReg()); 5025 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg1_off), j_rarg1->as_VMReg()); 5026 map->set_callee_saved(VMRegImpl::stack2reg(j_rarg0_off), j_rarg0->as_VMReg()); 5027 map->set_callee_saved(VMRegImpl::stack2reg(j_farg0_off), j_farg0->as_VMReg()); 5028 map->set_callee_saved(VMRegImpl::stack2reg(j_farg1_off), j_farg1->as_VMReg()); 5029 map->set_callee_saved(VMRegImpl::stack2reg(j_farg2_off), j_farg2->as_VMReg()); 5030 map->set_callee_saved(VMRegImpl::stack2reg(j_farg3_off), j_farg3->as_VMReg()); 5031 map->set_callee_saved(VMRegImpl::stack2reg(j_farg4_off), j_farg4->as_VMReg()); 5032 map->set_callee_saved(VMRegImpl::stack2reg(j_farg5_off), j_farg5->as_VMReg()); 5033 map->set_callee_saved(VMRegImpl::stack2reg(j_farg6_off), j_farg6->as_VMReg()); 5034 map->set_callee_saved(VMRegImpl::stack2reg(j_farg7_off), j_farg7->as_VMReg()); 5035 5036 int start = __ offset(); 5037 5038 __ subptr(rsp, frame_size_in_bytes - 8 /* return address*/); 5039 5040 __ movptr(Address(rsp, rbp_off * BytesPerInt), rbp); 5041 __ movdbl(Address(rsp, j_farg7_off * BytesPerInt), j_farg7); 5042 __ movdbl(Address(rsp, j_farg6_off * BytesPerInt), j_farg6); 5043 __ movdbl(Address(rsp, j_farg5_off * BytesPerInt), j_farg5); 5044 __ movdbl(Address(rsp, j_farg4_off * BytesPerInt), j_farg4); 5045 __ movdbl(Address(rsp, j_farg3_off * BytesPerInt), j_farg3); 5046 __ movdbl(Address(rsp, j_farg2_off * BytesPerInt), j_farg2); 5047 __ movdbl(Address(rsp, j_farg1_off * BytesPerInt), j_farg1); 5048 __ movdbl(Address(rsp, j_farg0_off * BytesPerInt), j_farg0); 5049 5050 __ movptr(Address(rsp, j_rarg0_off * BytesPerInt), j_rarg0); 5051 __ movptr(Address(rsp, j_rarg1_off * BytesPerInt), j_rarg1); 5052 __ movptr(Address(rsp, j_rarg2_off * BytesPerInt), j_rarg2); 5053 __ movptr(Address(rsp, j_rarg3_off * BytesPerInt), j_rarg3); 5054 __ movptr(Address(rsp, j_rarg4_off * BytesPerInt), j_rarg4); 5055 __ movptr(Address(rsp, j_rarg5_off * BytesPerInt), j_rarg5); 5056 __ movptr(Address(rsp, rax_off * BytesPerInt), rax); 5057 5058 int frame_complete = __ offset(); 5059 5060 __ set_last_Java_frame(noreg, noreg, NULL); 5061 5062 __ mov(c_rarg0, r15_thread); 5063 __ mov(c_rarg1, rax); 5064 5065 __ call(RuntimeAddress(destination)); 5066 5067 // Set an oopmap for the call site. 5068 5069 oop_maps->add_gc_map( __ offset() - start, map); 5070 5071 // clear last_Java_sp 5072 __ reset_last_Java_frame(false); 5073 5074 __ movptr(rbp, Address(rsp, rbp_off * BytesPerInt)); 5075 __ movdbl(j_farg7, Address(rsp, j_farg7_off * BytesPerInt)); 5076 __ movdbl(j_farg6, Address(rsp, j_farg6_off * BytesPerInt)); 5077 __ movdbl(j_farg5, Address(rsp, j_farg5_off * BytesPerInt)); 5078 __ movdbl(j_farg4, Address(rsp, j_farg4_off * BytesPerInt)); 5079 __ movdbl(j_farg3, Address(rsp, j_farg3_off * BytesPerInt)); 5080 __ movdbl(j_farg2, Address(rsp, j_farg2_off * BytesPerInt)); 5081 __ movdbl(j_farg1, Address(rsp, j_farg1_off * BytesPerInt)); 5082 __ movdbl(j_farg0, Address(rsp, j_farg0_off * BytesPerInt)); 5083 5084 __ movptr(j_rarg0, Address(rsp, j_rarg0_off * BytesPerInt)); 5085 __ movptr(j_rarg1, Address(rsp, j_rarg1_off * BytesPerInt)); 5086 __ movptr(j_rarg2, Address(rsp, j_rarg2_off * BytesPerInt)); 5087 __ movptr(j_rarg3, Address(rsp, j_rarg3_off * BytesPerInt)); 5088 __ movptr(j_rarg4, Address(rsp, j_rarg4_off * BytesPerInt)); 5089 __ movptr(j_rarg5, Address(rsp, j_rarg5_off * BytesPerInt)); 5090 __ movptr(rax, Address(rsp, rax_off * BytesPerInt)); 5091 5092 __ addptr(rsp, frame_size_in_bytes-8); 5093 5094 // check for pending exceptions 5095 Label pending; 5096 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 5097 __ jcc(Assembler::notEqual, pending); 5098 5099 if (has_res) { 5100 __ get_vm_result(rax, r15_thread); 5101 } 5102 5103 __ ret(0); 5104 5105 __ bind(pending); 5106 5107 __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset())); 5108 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 5109 5110 // ------------- 5111 // make sure all code is generated 5112 masm->flush(); 5113 5114 RuntimeStub* stub = RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_words, oop_maps, false); 5115 return stub->entry_point(); 5116 } 5117 5118 // Initialization 5119 void generate_initial() { 5120 // Generates all stubs and initializes the entry points 5121 5122 // This platform-specific settings are needed by generate_call_stub() 5123 create_control_words(); 5124 5125 // entry points that exist in all platforms Note: This is code 5126 // that could be shared among different platforms - however the 5127 // benefit seems to be smaller than the disadvantage of having a 5128 // much more complicated generator structure. See also comment in 5129 // stubRoutines.hpp. 5130 5131 StubRoutines::_forward_exception_entry = generate_forward_exception(); 5132 5133 StubRoutines::_call_stub_entry = 5134 generate_call_stub(StubRoutines::_call_stub_return_address); 5135 5136 // is referenced by megamorphic call 5137 StubRoutines::_catch_exception_entry = generate_catch_exception(); 5138 5139 // atomic calls 5140 StubRoutines::_atomic_xchg_entry = generate_atomic_xchg(); 5141 StubRoutines::_atomic_xchg_ptr_entry = generate_atomic_xchg_ptr(); 5142 StubRoutines::_atomic_cmpxchg_entry = generate_atomic_cmpxchg(); 5143 StubRoutines::_atomic_cmpxchg_byte_entry = generate_atomic_cmpxchg_byte(); 5144 StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long(); 5145 StubRoutines::_atomic_add_entry = generate_atomic_add(); 5146 StubRoutines::_atomic_add_ptr_entry = generate_atomic_add_ptr(); 5147 StubRoutines::_fence_entry = generate_orderaccess_fence(); 5148 5149 // platform dependent 5150 StubRoutines::x86::_get_previous_fp_entry = generate_get_previous_fp(); 5151 StubRoutines::x86::_get_previous_sp_entry = generate_get_previous_sp(); 5152 5153 StubRoutines::x86::_verify_mxcsr_entry = generate_verify_mxcsr(); 5154 5155 // Build this early so it's available for the interpreter. 5156 StubRoutines::_throw_StackOverflowError_entry = 5157 generate_throw_exception("StackOverflowError throw_exception", 5158 CAST_FROM_FN_PTR(address, 5159 SharedRuntime:: 5160 throw_StackOverflowError)); 5161 StubRoutines::_throw_delayed_StackOverflowError_entry = 5162 generate_throw_exception("delayed StackOverflowError throw_exception", 5163 CAST_FROM_FN_PTR(address, 5164 SharedRuntime:: 5165 throw_delayed_StackOverflowError)); 5166 if (UseCRC32Intrinsics) { 5167 // set table address before stub generation which use it 5168 StubRoutines::_crc_table_adr = (address)StubRoutines::x86::_crc_table; 5169 StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32(); 5170 } 5171 5172 if (UseCRC32CIntrinsics) { 5173 bool supports_clmul = VM_Version::supports_clmul(); 5174 StubRoutines::x86::generate_CRC32C_table(supports_clmul); 5175 StubRoutines::_crc32c_table_addr = (address)StubRoutines::x86::_crc32c_table; 5176 StubRoutines::_updateBytesCRC32C = generate_updateBytesCRC32C(supports_clmul); 5177 } 5178 if (VM_Version::supports_sse2() && UseLibmIntrinsic && InlineIntrinsics) { 5179 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin) || 5180 vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos) || 5181 vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dtan)) { 5182 StubRoutines::x86::_ONEHALF_adr = (address)StubRoutines::x86::_ONEHALF; 5183 StubRoutines::x86::_P_2_adr = (address)StubRoutines::x86::_P_2; 5184 StubRoutines::x86::_SC_4_adr = (address)StubRoutines::x86::_SC_4; 5185 StubRoutines::x86::_Ctable_adr = (address)StubRoutines::x86::_Ctable; 5186 StubRoutines::x86::_SC_2_adr = (address)StubRoutines::x86::_SC_2; 5187 StubRoutines::x86::_SC_3_adr = (address)StubRoutines::x86::_SC_3; 5188 StubRoutines::x86::_SC_1_adr = (address)StubRoutines::x86::_SC_1; 5189 StubRoutines::x86::_PI_INV_TABLE_adr = (address)StubRoutines::x86::_PI_INV_TABLE; 5190 StubRoutines::x86::_PI_4_adr = (address)StubRoutines::x86::_PI_4; 5191 StubRoutines::x86::_PI32INV_adr = (address)StubRoutines::x86::_PI32INV; 5192 StubRoutines::x86::_SIGN_MASK_adr = (address)StubRoutines::x86::_SIGN_MASK; 5193 StubRoutines::x86::_P_1_adr = (address)StubRoutines::x86::_P_1; 5194 StubRoutines::x86::_P_3_adr = (address)StubRoutines::x86::_P_3; 5195 StubRoutines::x86::_NEG_ZERO_adr = (address)StubRoutines::x86::_NEG_ZERO; 5196 } 5197 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dexp)) { 5198 StubRoutines::_dexp = generate_libmExp(); 5199 } 5200 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dlog)) { 5201 StubRoutines::_dlog = generate_libmLog(); 5202 } 5203 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dlog10)) { 5204 StubRoutines::_dlog10 = generate_libmLog10(); 5205 } 5206 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dpow)) { 5207 StubRoutines::_dpow = generate_libmPow(); 5208 } 5209 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin)) { 5210 StubRoutines::_dsin = generate_libmSin(); 5211 } 5212 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos)) { 5213 StubRoutines::_dcos = generate_libmCos(); 5214 } 5215 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dtan)) { 5216 StubRoutines::_dtan = generate_libmTan(); 5217 } 5218 } 5219 5220 StubRoutines::_load_value_type_fields_in_regs = generate_return_value_stub(CAST_FROM_FN_PTR(address, SharedRuntime::load_value_type_fields_in_regs), "load_value_type_fields_in_regs", false); 5221 StubRoutines::_store_value_type_fields_to_buf = generate_return_value_stub(CAST_FROM_FN_PTR(address, SharedRuntime::store_value_type_fields_to_buf), "store_value_type_fields_to_buf", true); 5222 } 5223 5224 void generate_all() { 5225 // Generates all stubs and initializes the entry points 5226 5227 // These entry points require SharedInfo::stack0 to be set up in 5228 // non-core builds and need to be relocatable, so they each 5229 // fabricate a RuntimeStub internally. 5230 StubRoutines::_throw_AbstractMethodError_entry = 5231 generate_throw_exception("AbstractMethodError throw_exception", 5232 CAST_FROM_FN_PTR(address, 5233 SharedRuntime:: 5234 throw_AbstractMethodError)); 5235 5236 StubRoutines::_throw_IncompatibleClassChangeError_entry = 5237 generate_throw_exception("IncompatibleClassChangeError throw_exception", 5238 CAST_FROM_FN_PTR(address, 5239 SharedRuntime:: 5240 throw_IncompatibleClassChangeError)); 5241 5242 StubRoutines::_throw_NullPointerException_at_call_entry = 5243 generate_throw_exception("NullPointerException at call throw_exception", 5244 CAST_FROM_FN_PTR(address, 5245 SharedRuntime:: 5246 throw_NullPointerException_at_call)); 5247 5248 // entry points that are platform specific 5249 StubRoutines::x86::_f2i_fixup = generate_f2i_fixup(); 5250 StubRoutines::x86::_f2l_fixup = generate_f2l_fixup(); 5251 StubRoutines::x86::_d2i_fixup = generate_d2i_fixup(); 5252 StubRoutines::x86::_d2l_fixup = generate_d2l_fixup(); 5253 5254 StubRoutines::x86::_float_sign_mask = generate_fp_mask("float_sign_mask", 0x7FFFFFFF7FFFFFFF); 5255 StubRoutines::x86::_float_sign_flip = generate_fp_mask("float_sign_flip", 0x8000000080000000); 5256 StubRoutines::x86::_double_sign_mask = generate_fp_mask("double_sign_mask", 0x7FFFFFFFFFFFFFFF); 5257 StubRoutines::x86::_double_sign_flip = generate_fp_mask("double_sign_flip", 0x8000000000000000); 5258 5259 // support for verify_oop (must happen after universe_init) 5260 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop(); 5261 5262 // arraycopy stubs used by compilers 5263 generate_arraycopy_stubs(); 5264 5265 // don't bother generating these AES intrinsic stubs unless global flag is set 5266 if (UseAESIntrinsics) { 5267 StubRoutines::x86::_key_shuffle_mask_addr = generate_key_shuffle_mask(); // needed by the others 5268 StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock(); 5269 StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock(); 5270 StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt(); 5271 StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt_Parallel(); 5272 } 5273 if (UseAESCTRIntrinsics){ 5274 StubRoutines::x86::_counter_shuffle_mask_addr = generate_counter_shuffle_mask(); 5275 StubRoutines::_counterMode_AESCrypt = generate_counterMode_AESCrypt_Parallel(); 5276 } 5277 5278 if (UseSHA1Intrinsics) { 5279 StubRoutines::x86::_upper_word_mask_addr = generate_upper_word_mask(); 5280 StubRoutines::x86::_shuffle_byte_flip_mask_addr = generate_shuffle_byte_flip_mask(); 5281 StubRoutines::_sha1_implCompress = generate_sha1_implCompress(false, "sha1_implCompress"); 5282 StubRoutines::_sha1_implCompressMB = generate_sha1_implCompress(true, "sha1_implCompressMB"); 5283 } 5284 if (UseSHA256Intrinsics) { 5285 StubRoutines::x86::_k256_adr = (address)StubRoutines::x86::_k256; 5286 char* dst = (char*)StubRoutines::x86::_k256_W; 5287 char* src = (char*)StubRoutines::x86::_k256; 5288 for (int ii = 0; ii < 16; ++ii) { 5289 memcpy(dst + 32 * ii, src + 16 * ii, 16); 5290 memcpy(dst + 32 * ii + 16, src + 16 * ii, 16); 5291 } 5292 StubRoutines::x86::_k256_W_adr = (address)StubRoutines::x86::_k256_W; 5293 StubRoutines::x86::_pshuffle_byte_flip_mask_addr = generate_pshuffle_byte_flip_mask(); 5294 StubRoutines::_sha256_implCompress = generate_sha256_implCompress(false, "sha256_implCompress"); 5295 StubRoutines::_sha256_implCompressMB = generate_sha256_implCompress(true, "sha256_implCompressMB"); 5296 } 5297 if (UseSHA512Intrinsics) { 5298 StubRoutines::x86::_k512_W_addr = (address)StubRoutines::x86::_k512_W; 5299 StubRoutines::x86::_pshuffle_byte_flip_mask_addr_sha512 = generate_pshuffle_byte_flip_mask_sha512(); 5300 StubRoutines::_sha512_implCompress = generate_sha512_implCompress(false, "sha512_implCompress"); 5301 StubRoutines::_sha512_implCompressMB = generate_sha512_implCompress(true, "sha512_implCompressMB"); 5302 } 5303 5304 // Generate GHASH intrinsics code 5305 if (UseGHASHIntrinsics) { 5306 StubRoutines::x86::_ghash_long_swap_mask_addr = generate_ghash_long_swap_mask(); 5307 StubRoutines::x86::_ghash_byte_swap_mask_addr = generate_ghash_byte_swap_mask(); 5308 StubRoutines::_ghash_processBlocks = generate_ghash_processBlocks(); 5309 } 5310 5311 // Safefetch stubs. 5312 generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry, 5313 &StubRoutines::_safefetch32_fault_pc, 5314 &StubRoutines::_safefetch32_continuation_pc); 5315 generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry, 5316 &StubRoutines::_safefetchN_fault_pc, 5317 &StubRoutines::_safefetchN_continuation_pc); 5318 #ifdef COMPILER2 5319 if (UseMultiplyToLenIntrinsic) { 5320 StubRoutines::_multiplyToLen = generate_multiplyToLen(); 5321 } 5322 if (UseSquareToLenIntrinsic) { 5323 StubRoutines::_squareToLen = generate_squareToLen(); 5324 } 5325 if (UseMulAddIntrinsic) { 5326 StubRoutines::_mulAdd = generate_mulAdd(); 5327 } 5328 #ifndef _WINDOWS 5329 if (UseMontgomeryMultiplyIntrinsic) { 5330 StubRoutines::_montgomeryMultiply 5331 = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_multiply); 5332 } 5333 if (UseMontgomerySquareIntrinsic) { 5334 StubRoutines::_montgomerySquare 5335 = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_square); 5336 } 5337 #endif // WINDOWS 5338 #endif // COMPILER2 5339 5340 if (UseVectorizedMismatchIntrinsic) { 5341 StubRoutines::_vectorizedMismatch = generate_vectorizedMismatch(); 5342 } 5343 } 5344 5345 public: 5346 StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) { 5347 if (all) { 5348 generate_all(); 5349 } else { 5350 generate_initial(); 5351 } 5352 } 5353 }; // end class declaration 5354 5355 void StubGenerator_generate(CodeBuffer* code, bool all) { 5356 StubGenerator g(code, all); 5357 }