1 /* 2 * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "ci/ciUtilities.hpp" 29 #include "gc/shared/barrierSet.hpp" 30 #include "gc/shared/barrierSetAssembler.hpp" 31 #include "interpreter/interpreter.hpp" 32 #include "nativeInst_x86.hpp" 33 #include "oops/instanceOop.hpp" 34 #include "oops/method.hpp" 35 #include "oops/objArrayKlass.hpp" 36 #include "oops/oop.inline.hpp" 37 #include "prims/methodHandles.hpp" 38 #include "runtime/frame.inline.hpp" 39 #include "runtime/handles.inline.hpp" 40 #include "runtime/sharedRuntime.hpp" 41 #include "runtime/stubCodeGenerator.hpp" 42 #include "runtime/stubRoutines.hpp" 43 #include "runtime/thread.inline.hpp" 44 #ifdef COMPILER2 45 #include "opto/runtime.hpp" 46 #endif 47 48 // Declaration and definition of StubGenerator (no .hpp file). 49 // For a more detailed description of the stub routine structure 50 // see the comment in stubRoutines.hpp 51 52 #define __ _masm-> 53 #define TIMES_OOP (UseCompressedOops ? Address::times_4 : Address::times_8) 54 #define a__ ((Assembler*)_masm)-> 55 56 #ifdef PRODUCT 57 #define BLOCK_COMMENT(str) /* nothing */ 58 #else 59 #define BLOCK_COMMENT(str) __ block_comment(str) 60 #endif 61 62 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 63 const int MXCSR_MASK = 0xFFC0; // Mask out any pending exceptions 64 65 // Stub Code definitions 66 67 class StubGenerator: public StubCodeGenerator { 68 private: 69 70 #ifdef PRODUCT 71 #define inc_counter_np(counter) ((void)0) 72 #else 73 void inc_counter_np_(int& counter) { 74 // This can destroy rscratch1 if counter is far from the code cache 75 __ incrementl(ExternalAddress((address)&counter)); 76 } 77 #define inc_counter_np(counter) \ 78 BLOCK_COMMENT("inc_counter " #counter); \ 79 inc_counter_np_(counter); 80 #endif 81 82 // Call stubs are used to call Java from C 83 // 84 // Linux Arguments: 85 // c_rarg0: call wrapper address address 86 // c_rarg1: result address 87 // c_rarg2: result type BasicType 88 // c_rarg3: method Method* 89 // c_rarg4: (interpreter) entry point address 90 // c_rarg5: parameters intptr_t* 91 // 16(rbp): parameter size (in words) int 92 // 24(rbp): thread Thread* 93 // 94 // [ return_from_Java ] <--- rsp 95 // [ argument word n ] 96 // ... 97 // -12 [ argument word 1 ] 98 // -11 [ saved r15 ] <--- rsp_after_call 99 // -10 [ saved r14 ] 100 // -9 [ saved r13 ] 101 // -8 [ saved r12 ] 102 // -7 [ saved rbx ] 103 // -6 [ call wrapper ] 104 // -5 [ result ] 105 // -4 [ result type ] 106 // -3 [ method ] 107 // -2 [ entry point ] 108 // -1 [ parameters ] 109 // 0 [ saved rbp ] <--- rbp 110 // 1 [ return address ] 111 // 2 [ parameter size ] 112 // 3 [ thread ] 113 // 114 // Windows Arguments: 115 // c_rarg0: call wrapper address address 116 // c_rarg1: result address 117 // c_rarg2: result type BasicType 118 // c_rarg3: method Method* 119 // 48(rbp): (interpreter) entry point address 120 // 56(rbp): parameters intptr_t* 121 // 64(rbp): parameter size (in words) int 122 // 72(rbp): thread Thread* 123 // 124 // [ return_from_Java ] <--- rsp 125 // [ argument word n ] 126 // ... 127 // -60 [ argument word 1 ] 128 // -59 [ saved xmm31 ] <--- rsp after_call 129 // [ saved xmm16-xmm30 ] (EVEX enabled, else the space is blank) 130 // -27 [ saved xmm15 ] 131 // [ saved xmm7-xmm14 ] 132 // -9 [ saved xmm6 ] (each xmm register takes 2 slots) 133 // -7 [ saved r15 ] 134 // -6 [ saved r14 ] 135 // -5 [ saved r13 ] 136 // -4 [ saved r12 ] 137 // -3 [ saved rdi ] 138 // -2 [ saved rsi ] 139 // -1 [ saved rbx ] 140 // 0 [ saved rbp ] <--- rbp 141 // 1 [ return address ] 142 // 2 [ call wrapper ] 143 // 3 [ result ] 144 // 4 [ result type ] 145 // 5 [ method ] 146 // 6 [ entry point ] 147 // 7 [ parameters ] 148 // 8 [ parameter size ] 149 // 9 [ thread ] 150 // 151 // Windows reserves the callers stack space for arguments 1-4. 152 // We spill c_rarg0-c_rarg3 to this space. 153 154 // Call stub stack layout word offsets from rbp 155 enum call_stub_layout { 156 #ifdef _WIN64 157 xmm_save_first = 6, // save from xmm6 158 xmm_save_last = 31, // to xmm31 159 xmm_save_base = -9, 160 rsp_after_call_off = xmm_save_base - 2 * (xmm_save_last - xmm_save_first), // -27 161 r15_off = -7, 162 r14_off = -6, 163 r13_off = -5, 164 r12_off = -4, 165 rdi_off = -3, 166 rsi_off = -2, 167 rbx_off = -1, 168 rbp_off = 0, 169 retaddr_off = 1, 170 call_wrapper_off = 2, 171 result_off = 3, 172 result_type_off = 4, 173 method_off = 5, 174 entry_point_off = 6, 175 parameters_off = 7, 176 parameter_size_off = 8, 177 thread_off = 9 178 #else 179 rsp_after_call_off = -12, 180 mxcsr_off = rsp_after_call_off, 181 r15_off = -11, 182 r14_off = -10, 183 r13_off = -9, 184 r12_off = -8, 185 rbx_off = -7, 186 call_wrapper_off = -6, 187 result_off = -5, 188 result_type_off = -4, 189 method_off = -3, 190 entry_point_off = -2, 191 parameters_off = -1, 192 rbp_off = 0, 193 retaddr_off = 1, 194 parameter_size_off = 2, 195 thread_off = 3 196 #endif 197 }; 198 199 #ifdef _WIN64 200 Address xmm_save(int reg) { 201 assert(reg >= xmm_save_first && reg <= xmm_save_last, "XMM register number out of range"); 202 return Address(rbp, (xmm_save_base - (reg - xmm_save_first) * 2) * wordSize); 203 } 204 #endif 205 206 address generate_call_stub(address& return_address) { 207 assert((int)frame::entry_frame_after_call_words == -(int)rsp_after_call_off + 1 && 208 (int)frame::entry_frame_call_wrapper_offset == (int)call_wrapper_off, 209 "adjust this code"); 210 StubCodeMark mark(this, "StubRoutines", "call_stub"); 211 address start = __ pc(); 212 213 // same as in generate_catch_exception()! 214 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize); 215 216 const Address call_wrapper (rbp, call_wrapper_off * wordSize); 217 const Address result (rbp, result_off * wordSize); 218 const Address result_type (rbp, result_type_off * wordSize); 219 const Address method (rbp, method_off * wordSize); 220 const Address entry_point (rbp, entry_point_off * wordSize); 221 const Address parameters (rbp, parameters_off * wordSize); 222 const Address parameter_size(rbp, parameter_size_off * wordSize); 223 224 // same as in generate_catch_exception()! 225 const Address thread (rbp, thread_off * wordSize); 226 227 const Address r15_save(rbp, r15_off * wordSize); 228 const Address r14_save(rbp, r14_off * wordSize); 229 const Address r13_save(rbp, r13_off * wordSize); 230 const Address r12_save(rbp, r12_off * wordSize); 231 const Address rbx_save(rbp, rbx_off * wordSize); 232 233 // stub code 234 __ enter(); 235 __ subptr(rsp, -rsp_after_call_off * wordSize); 236 237 // save register parameters 238 #ifndef _WIN64 239 __ movptr(parameters, c_rarg5); // parameters 240 __ movptr(entry_point, c_rarg4); // entry_point 241 #endif 242 243 __ movptr(method, c_rarg3); // method 244 __ movl(result_type, c_rarg2); // result type 245 __ movptr(result, c_rarg1); // result 246 __ movptr(call_wrapper, c_rarg0); // call wrapper 247 248 // save regs belonging to calling function 249 __ movptr(rbx_save, rbx); 250 __ movptr(r12_save, r12); 251 __ movptr(r13_save, r13); 252 __ movptr(r14_save, r14); 253 __ movptr(r15_save, r15); 254 if (UseAVX > 2) { 255 __ movl(rbx, 0xffff); 256 __ kmovwl(k1, rbx); 257 } 258 #ifdef _WIN64 259 int last_reg = 15; 260 if (UseAVX > 2) { 261 last_reg = 31; 262 } 263 if (VM_Version::supports_evex()) { 264 for (int i = xmm_save_first; i <= last_reg; i++) { 265 __ vextractf32x4(xmm_save(i), as_XMMRegister(i), 0); 266 } 267 } else { 268 for (int i = xmm_save_first; i <= last_reg; i++) { 269 __ movdqu(xmm_save(i), as_XMMRegister(i)); 270 } 271 } 272 273 const Address rdi_save(rbp, rdi_off * wordSize); 274 const Address rsi_save(rbp, rsi_off * wordSize); 275 276 __ movptr(rsi_save, rsi); 277 __ movptr(rdi_save, rdi); 278 #else 279 const Address mxcsr_save(rbp, mxcsr_off * wordSize); 280 { 281 Label skip_ldmx; 282 __ stmxcsr(mxcsr_save); 283 __ movl(rax, mxcsr_save); 284 __ andl(rax, MXCSR_MASK); // Only check control and mask bits 285 ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std()); 286 __ cmp32(rax, mxcsr_std); 287 __ jcc(Assembler::equal, skip_ldmx); 288 __ ldmxcsr(mxcsr_std); 289 __ bind(skip_ldmx); 290 } 291 #endif 292 293 // Load up thread register 294 __ movptr(r15_thread, thread); 295 __ reinit_heapbase(); 296 297 #ifdef ASSERT 298 // make sure we have no pending exceptions 299 { 300 Label L; 301 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 302 __ jcc(Assembler::equal, L); 303 __ stop("StubRoutines::call_stub: entered with pending exception"); 304 __ bind(L); 305 } 306 #endif 307 308 // pass parameters if any 309 BLOCK_COMMENT("pass parameters if any"); 310 Label parameters_done; 311 __ movl(c_rarg3, parameter_size); 312 __ testl(c_rarg3, c_rarg3); 313 __ jcc(Assembler::zero, parameters_done); 314 315 Label loop; 316 __ movptr(c_rarg2, parameters); // parameter pointer 317 __ movl(c_rarg1, c_rarg3); // parameter counter is in c_rarg1 318 __ BIND(loop); 319 __ movptr(rax, Address(c_rarg2, 0));// get parameter 320 __ addptr(c_rarg2, wordSize); // advance to next parameter 321 __ decrementl(c_rarg1); // decrement counter 322 __ push(rax); // pass parameter 323 __ jcc(Assembler::notZero, loop); 324 325 // call Java function 326 __ BIND(parameters_done); 327 __ movptr(rbx, method); // get Method* 328 __ movptr(c_rarg1, entry_point); // get entry_point 329 __ mov(r13, rsp); // set sender sp 330 BLOCK_COMMENT("call Java function"); 331 __ call(c_rarg1); 332 333 BLOCK_COMMENT("call_stub_return_address:"); 334 return_address = __ pc(); 335 336 // store result depending on type (everything that is not 337 // T_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT) 338 __ movptr(c_rarg0, result); 339 Label is_long, is_float, is_double, exit; 340 __ movl(c_rarg1, result_type); 341 __ cmpl(c_rarg1, T_OBJECT); 342 __ jcc(Assembler::equal, is_long); 343 __ cmpl(c_rarg1, T_LONG); 344 __ jcc(Assembler::equal, is_long); 345 __ cmpl(c_rarg1, T_FLOAT); 346 __ jcc(Assembler::equal, is_float); 347 __ cmpl(c_rarg1, T_DOUBLE); 348 __ jcc(Assembler::equal, is_double); 349 350 // handle T_INT case 351 __ movl(Address(c_rarg0, 0), rax); 352 353 __ BIND(exit); 354 355 // pop parameters 356 __ lea(rsp, rsp_after_call); 357 358 #ifdef ASSERT 359 // verify that threads correspond 360 { 361 Label L1, L2, L3; 362 __ cmpptr(r15_thread, thread); 363 __ jcc(Assembler::equal, L1); 364 __ stop("StubRoutines::call_stub: r15_thread is corrupted"); 365 __ bind(L1); 366 __ get_thread(rbx); 367 __ cmpptr(r15_thread, thread); 368 __ jcc(Assembler::equal, L2); 369 __ stop("StubRoutines::call_stub: r15_thread is modified by call"); 370 __ bind(L2); 371 __ cmpptr(r15_thread, rbx); 372 __ jcc(Assembler::equal, L3); 373 __ stop("StubRoutines::call_stub: threads must correspond"); 374 __ bind(L3); 375 } 376 #endif 377 378 // restore regs belonging to calling function 379 #ifdef _WIN64 380 // emit the restores for xmm regs 381 if (VM_Version::supports_evex()) { 382 for (int i = xmm_save_first; i <= last_reg; i++) { 383 __ vinsertf32x4(as_XMMRegister(i), as_XMMRegister(i), xmm_save(i), 0); 384 } 385 } else { 386 for (int i = xmm_save_first; i <= last_reg; i++) { 387 __ movdqu(as_XMMRegister(i), xmm_save(i)); 388 } 389 } 390 #endif 391 __ movptr(r15, r15_save); 392 __ movptr(r14, r14_save); 393 __ movptr(r13, r13_save); 394 __ movptr(r12, r12_save); 395 __ movptr(rbx, rbx_save); 396 397 #ifdef _WIN64 398 __ movptr(rdi, rdi_save); 399 __ movptr(rsi, rsi_save); 400 #else 401 __ ldmxcsr(mxcsr_save); 402 #endif 403 404 // restore rsp 405 __ addptr(rsp, -rsp_after_call_off * wordSize); 406 407 // return 408 __ vzeroupper(); 409 __ pop(rbp); 410 __ ret(0); 411 412 // handle return types different from T_INT 413 __ BIND(is_long); 414 __ movq(Address(c_rarg0, 0), rax); 415 __ jmp(exit); 416 417 __ BIND(is_float); 418 __ movflt(Address(c_rarg0, 0), xmm0); 419 __ jmp(exit); 420 421 __ BIND(is_double); 422 __ movdbl(Address(c_rarg0, 0), xmm0); 423 __ jmp(exit); 424 425 return start; 426 } 427 428 // Return point for a Java call if there's an exception thrown in 429 // Java code. The exception is caught and transformed into a 430 // pending exception stored in JavaThread that can be tested from 431 // within the VM. 432 // 433 // Note: Usually the parameters are removed by the callee. In case 434 // of an exception crossing an activation frame boundary, that is 435 // not the case if the callee is compiled code => need to setup the 436 // rsp. 437 // 438 // rax: exception oop 439 440 address generate_catch_exception() { 441 StubCodeMark mark(this, "StubRoutines", "catch_exception"); 442 address start = __ pc(); 443 444 // same as in generate_call_stub(): 445 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize); 446 const Address thread (rbp, thread_off * wordSize); 447 448 #ifdef ASSERT 449 // verify that threads correspond 450 { 451 Label L1, L2, L3; 452 __ cmpptr(r15_thread, thread); 453 __ jcc(Assembler::equal, L1); 454 __ stop("StubRoutines::catch_exception: r15_thread is corrupted"); 455 __ bind(L1); 456 __ get_thread(rbx); 457 __ cmpptr(r15_thread, thread); 458 __ jcc(Assembler::equal, L2); 459 __ stop("StubRoutines::catch_exception: r15_thread is modified by call"); 460 __ bind(L2); 461 __ cmpptr(r15_thread, rbx); 462 __ jcc(Assembler::equal, L3); 463 __ stop("StubRoutines::catch_exception: threads must correspond"); 464 __ bind(L3); 465 } 466 #endif 467 468 // set pending exception 469 __ verify_oop(rax); 470 471 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), rax); 472 __ lea(rscratch1, ExternalAddress((address)__FILE__)); 473 __ movptr(Address(r15_thread, Thread::exception_file_offset()), rscratch1); 474 __ movl(Address(r15_thread, Thread::exception_line_offset()), (int) __LINE__); 475 476 // complete return to VM 477 assert(StubRoutines::_call_stub_return_address != NULL, 478 "_call_stub_return_address must have been generated before"); 479 __ jump(RuntimeAddress(StubRoutines::_call_stub_return_address)); 480 481 return start; 482 } 483 484 // Continuation point for runtime calls returning with a pending 485 // exception. The pending exception check happened in the runtime 486 // or native call stub. The pending exception in Thread is 487 // converted into a Java-level exception. 488 // 489 // Contract with Java-level exception handlers: 490 // rax: exception 491 // rdx: throwing pc 492 // 493 // NOTE: At entry of this stub, exception-pc must be on stack !! 494 495 address generate_forward_exception() { 496 StubCodeMark mark(this, "StubRoutines", "forward exception"); 497 address start = __ pc(); 498 499 // Upon entry, the sp points to the return address returning into 500 // Java (interpreted or compiled) code; i.e., the return address 501 // becomes the throwing pc. 502 // 503 // Arguments pushed before the runtime call are still on the stack 504 // but the exception handler will reset the stack pointer -> 505 // ignore them. A potential result in registers can be ignored as 506 // well. 507 508 #ifdef ASSERT 509 // make sure this code is only executed if there is a pending exception 510 { 511 Label L; 512 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t) NULL); 513 __ jcc(Assembler::notEqual, L); 514 __ stop("StubRoutines::forward exception: no pending exception (1)"); 515 __ bind(L); 516 } 517 #endif 518 519 // compute exception handler into rbx 520 __ movptr(c_rarg0, Address(rsp, 0)); 521 BLOCK_COMMENT("call exception_handler_for_return_address"); 522 __ call_VM_leaf(CAST_FROM_FN_PTR(address, 523 SharedRuntime::exception_handler_for_return_address), 524 r15_thread, c_rarg0); 525 __ mov(rbx, rax); 526 527 // setup rax & rdx, remove return address & clear pending exception 528 __ pop(rdx); 529 __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset())); 530 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 531 532 #ifdef ASSERT 533 // make sure exception is set 534 { 535 Label L; 536 __ testptr(rax, rax); 537 __ jcc(Assembler::notEqual, L); 538 __ stop("StubRoutines::forward exception: no pending exception (2)"); 539 __ bind(L); 540 } 541 #endif 542 543 // continue at exception handler (return address removed) 544 // rax: exception 545 // rbx: exception handler 546 // rdx: throwing pc 547 __ verify_oop(rax); 548 __ jmp(rbx); 549 550 return start; 551 } 552 553 // Support for jint atomic::xchg(jint exchange_value, volatile jint* dest) 554 // 555 // Arguments : 556 // c_rarg0: exchange_value 557 // c_rarg0: dest 558 // 559 // Result: 560 // *dest <- ex, return (orig *dest) 561 address generate_atomic_xchg() { 562 StubCodeMark mark(this, "StubRoutines", "atomic_xchg"); 563 address start = __ pc(); 564 565 __ movl(rax, c_rarg0); // Copy to eax we need a return value anyhow 566 __ xchgl(rax, Address(c_rarg1, 0)); // automatic LOCK 567 __ ret(0); 568 569 return start; 570 } 571 572 // Support for intptr_t atomic::xchg_long(jlong exchange_value, volatile jlong* dest) 573 // 574 // Arguments : 575 // c_rarg0: exchange_value 576 // c_rarg1: dest 577 // 578 // Result: 579 // *dest <- ex, return (orig *dest) 580 address generate_atomic_xchg_long() { 581 StubCodeMark mark(this, "StubRoutines", "atomic_xchg_long"); 582 address start = __ pc(); 583 584 __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow 585 __ xchgptr(rax, Address(c_rarg1, 0)); // automatic LOCK 586 __ ret(0); 587 588 return start; 589 } 590 591 // Support for jint atomic::atomic_cmpxchg(jint exchange_value, volatile jint* dest, 592 // jint compare_value) 593 // 594 // Arguments : 595 // c_rarg0: exchange_value 596 // c_rarg1: dest 597 // c_rarg2: compare_value 598 // 599 // Result: 600 // if ( compare_value == *dest ) { 601 // *dest = exchange_value 602 // return compare_value; 603 // else 604 // return *dest; 605 address generate_atomic_cmpxchg() { 606 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg"); 607 address start = __ pc(); 608 609 __ movl(rax, c_rarg2); 610 if ( os::is_MP() ) __ lock(); 611 __ cmpxchgl(c_rarg0, Address(c_rarg1, 0)); 612 __ ret(0); 613 614 return start; 615 } 616 617 // Support for int8_t atomic::atomic_cmpxchg(int8_t exchange_value, volatile int8_t* dest, 618 // int8_t compare_value) 619 // 620 // Arguments : 621 // c_rarg0: exchange_value 622 // c_rarg1: dest 623 // c_rarg2: compare_value 624 // 625 // Result: 626 // if ( compare_value == *dest ) { 627 // *dest = exchange_value 628 // return compare_value; 629 // else 630 // return *dest; 631 address generate_atomic_cmpxchg_byte() { 632 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_byte"); 633 address start = __ pc(); 634 635 __ movsbq(rax, c_rarg2); 636 if ( os::is_MP() ) __ lock(); 637 __ cmpxchgb(c_rarg0, Address(c_rarg1, 0)); 638 __ ret(0); 639 640 return start; 641 } 642 643 // Support for int64_t atomic::atomic_cmpxchg(int64_t exchange_value, 644 // volatile int64_t* dest, 645 // int64_t compare_value) 646 // Arguments : 647 // c_rarg0: exchange_value 648 // c_rarg1: dest 649 // c_rarg2: compare_value 650 // 651 // Result: 652 // if ( compare_value == *dest ) { 653 // *dest = exchange_value 654 // return compare_value; 655 // else 656 // return *dest; 657 address generate_atomic_cmpxchg_long() { 658 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long"); 659 address start = __ pc(); 660 661 __ movq(rax, c_rarg2); 662 if ( os::is_MP() ) __ lock(); 663 __ cmpxchgq(c_rarg0, Address(c_rarg1, 0)); 664 __ ret(0); 665 666 return start; 667 } 668 669 // Support for jint atomic::add(jint add_value, volatile jint* dest) 670 // 671 // Arguments : 672 // c_rarg0: add_value 673 // c_rarg1: dest 674 // 675 // Result: 676 // *dest += add_value 677 // return *dest; 678 address generate_atomic_add() { 679 StubCodeMark mark(this, "StubRoutines", "atomic_add"); 680 address start = __ pc(); 681 682 __ movl(rax, c_rarg0); 683 if ( os::is_MP() ) __ lock(); 684 __ xaddl(Address(c_rarg1, 0), c_rarg0); 685 __ addl(rax, c_rarg0); 686 __ ret(0); 687 688 return start; 689 } 690 691 // Support for intptr_t atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) 692 // 693 // Arguments : 694 // c_rarg0: add_value 695 // c_rarg1: dest 696 // 697 // Result: 698 // *dest += add_value 699 // return *dest; 700 address generate_atomic_add_long() { 701 StubCodeMark mark(this, "StubRoutines", "atomic_add_long"); 702 address start = __ pc(); 703 704 __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow 705 if ( os::is_MP() ) __ lock(); 706 __ xaddptr(Address(c_rarg1, 0), c_rarg0); 707 __ addptr(rax, c_rarg0); 708 __ ret(0); 709 710 return start; 711 } 712 713 // Support for intptr_t OrderAccess::fence() 714 // 715 // Arguments : 716 // 717 // Result: 718 address generate_orderaccess_fence() { 719 StubCodeMark mark(this, "StubRoutines", "orderaccess_fence"); 720 address start = __ pc(); 721 __ membar(Assembler::StoreLoad); 722 __ ret(0); 723 724 return start; 725 } 726 727 // Support for intptr_t get_previous_fp() 728 // 729 // This routine is used to find the previous frame pointer for the 730 // caller (current_frame_guess). This is used as part of debugging 731 // ps() is seemingly lost trying to find frames. 732 // This code assumes that caller current_frame_guess) has a frame. 733 address generate_get_previous_fp() { 734 StubCodeMark mark(this, "StubRoutines", "get_previous_fp"); 735 const Address old_fp(rbp, 0); 736 const Address older_fp(rax, 0); 737 address start = __ pc(); 738 739 __ enter(); 740 __ movptr(rax, old_fp); // callers fp 741 __ movptr(rax, older_fp); // the frame for ps() 742 __ pop(rbp); 743 __ ret(0); 744 745 return start; 746 } 747 748 // Support for intptr_t get_previous_sp() 749 // 750 // This routine is used to find the previous stack pointer for the 751 // caller. 752 address generate_get_previous_sp() { 753 StubCodeMark mark(this, "StubRoutines", "get_previous_sp"); 754 address start = __ pc(); 755 756 __ movptr(rax, rsp); 757 __ addptr(rax, 8); // return address is at the top of the stack. 758 __ ret(0); 759 760 return start; 761 } 762 763 //---------------------------------------------------------------------------------------------------- 764 // Support for void verify_mxcsr() 765 // 766 // This routine is used with -Xcheck:jni to verify that native 767 // JNI code does not return to Java code without restoring the 768 // MXCSR register to our expected state. 769 770 address generate_verify_mxcsr() { 771 StubCodeMark mark(this, "StubRoutines", "verify_mxcsr"); 772 address start = __ pc(); 773 774 const Address mxcsr_save(rsp, 0); 775 776 if (CheckJNICalls) { 777 Label ok_ret; 778 ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std()); 779 __ push(rax); 780 __ subptr(rsp, wordSize); // allocate a temp location 781 __ stmxcsr(mxcsr_save); 782 __ movl(rax, mxcsr_save); 783 __ andl(rax, MXCSR_MASK); // Only check control and mask bits 784 __ cmp32(rax, mxcsr_std); 785 __ jcc(Assembler::equal, ok_ret); 786 787 __ warn("MXCSR changed by native JNI code, use -XX:+RestoreMXCSROnJNICall"); 788 789 __ ldmxcsr(mxcsr_std); 790 791 __ bind(ok_ret); 792 __ addptr(rsp, wordSize); 793 __ pop(rax); 794 } 795 796 __ ret(0); 797 798 return start; 799 } 800 801 address generate_f2i_fixup() { 802 StubCodeMark mark(this, "StubRoutines", "f2i_fixup"); 803 Address inout(rsp, 5 * wordSize); // return address + 4 saves 804 805 address start = __ pc(); 806 807 Label L; 808 809 __ push(rax); 810 __ push(c_rarg3); 811 __ push(c_rarg2); 812 __ push(c_rarg1); 813 814 __ movl(rax, 0x7f800000); 815 __ xorl(c_rarg3, c_rarg3); 816 __ movl(c_rarg2, inout); 817 __ movl(c_rarg1, c_rarg2); 818 __ andl(c_rarg1, 0x7fffffff); 819 __ cmpl(rax, c_rarg1); // NaN? -> 0 820 __ jcc(Assembler::negative, L); 821 __ testl(c_rarg2, c_rarg2); // signed ? min_jint : max_jint 822 __ movl(c_rarg3, 0x80000000); 823 __ movl(rax, 0x7fffffff); 824 __ cmovl(Assembler::positive, c_rarg3, rax); 825 826 __ bind(L); 827 __ movptr(inout, c_rarg3); 828 829 __ pop(c_rarg1); 830 __ pop(c_rarg2); 831 __ pop(c_rarg3); 832 __ pop(rax); 833 834 __ ret(0); 835 836 return start; 837 } 838 839 address generate_f2l_fixup() { 840 StubCodeMark mark(this, "StubRoutines", "f2l_fixup"); 841 Address inout(rsp, 5 * wordSize); // return address + 4 saves 842 address start = __ pc(); 843 844 Label L; 845 846 __ push(rax); 847 __ push(c_rarg3); 848 __ push(c_rarg2); 849 __ push(c_rarg1); 850 851 __ movl(rax, 0x7f800000); 852 __ xorl(c_rarg3, c_rarg3); 853 __ movl(c_rarg2, inout); 854 __ movl(c_rarg1, c_rarg2); 855 __ andl(c_rarg1, 0x7fffffff); 856 __ cmpl(rax, c_rarg1); // NaN? -> 0 857 __ jcc(Assembler::negative, L); 858 __ testl(c_rarg2, c_rarg2); // signed ? min_jlong : max_jlong 859 __ mov64(c_rarg3, 0x8000000000000000); 860 __ mov64(rax, 0x7fffffffffffffff); 861 __ cmov(Assembler::positive, c_rarg3, rax); 862 863 __ bind(L); 864 __ movptr(inout, c_rarg3); 865 866 __ pop(c_rarg1); 867 __ pop(c_rarg2); 868 __ pop(c_rarg3); 869 __ pop(rax); 870 871 __ ret(0); 872 873 return start; 874 } 875 876 address generate_d2i_fixup() { 877 StubCodeMark mark(this, "StubRoutines", "d2i_fixup"); 878 Address inout(rsp, 6 * wordSize); // return address + 5 saves 879 880 address start = __ pc(); 881 882 Label L; 883 884 __ push(rax); 885 __ push(c_rarg3); 886 __ push(c_rarg2); 887 __ push(c_rarg1); 888 __ push(c_rarg0); 889 890 __ movl(rax, 0x7ff00000); 891 __ movq(c_rarg2, inout); 892 __ movl(c_rarg3, c_rarg2); 893 __ mov(c_rarg1, c_rarg2); 894 __ mov(c_rarg0, c_rarg2); 895 __ negl(c_rarg3); 896 __ shrptr(c_rarg1, 0x20); 897 __ orl(c_rarg3, c_rarg2); 898 __ andl(c_rarg1, 0x7fffffff); 899 __ xorl(c_rarg2, c_rarg2); 900 __ shrl(c_rarg3, 0x1f); 901 __ orl(c_rarg1, c_rarg3); 902 __ cmpl(rax, c_rarg1); 903 __ jcc(Assembler::negative, L); // NaN -> 0 904 __ testptr(c_rarg0, c_rarg0); // signed ? min_jint : max_jint 905 __ movl(c_rarg2, 0x80000000); 906 __ movl(rax, 0x7fffffff); 907 __ cmov(Assembler::positive, c_rarg2, rax); 908 909 __ bind(L); 910 __ movptr(inout, c_rarg2); 911 912 __ pop(c_rarg0); 913 __ pop(c_rarg1); 914 __ pop(c_rarg2); 915 __ pop(c_rarg3); 916 __ pop(rax); 917 918 __ ret(0); 919 920 return start; 921 } 922 923 address generate_d2l_fixup() { 924 StubCodeMark mark(this, "StubRoutines", "d2l_fixup"); 925 Address inout(rsp, 6 * wordSize); // return address + 5 saves 926 927 address start = __ pc(); 928 929 Label L; 930 931 __ push(rax); 932 __ push(c_rarg3); 933 __ push(c_rarg2); 934 __ push(c_rarg1); 935 __ push(c_rarg0); 936 937 __ movl(rax, 0x7ff00000); 938 __ movq(c_rarg2, inout); 939 __ movl(c_rarg3, c_rarg2); 940 __ mov(c_rarg1, c_rarg2); 941 __ mov(c_rarg0, c_rarg2); 942 __ negl(c_rarg3); 943 __ shrptr(c_rarg1, 0x20); 944 __ orl(c_rarg3, c_rarg2); 945 __ andl(c_rarg1, 0x7fffffff); 946 __ xorl(c_rarg2, c_rarg2); 947 __ shrl(c_rarg3, 0x1f); 948 __ orl(c_rarg1, c_rarg3); 949 __ cmpl(rax, c_rarg1); 950 __ jcc(Assembler::negative, L); // NaN -> 0 951 __ testq(c_rarg0, c_rarg0); // signed ? min_jlong : max_jlong 952 __ mov64(c_rarg2, 0x8000000000000000); 953 __ mov64(rax, 0x7fffffffffffffff); 954 __ cmovq(Assembler::positive, c_rarg2, rax); 955 956 __ bind(L); 957 __ movq(inout, c_rarg2); 958 959 __ pop(c_rarg0); 960 __ pop(c_rarg1); 961 __ pop(c_rarg2); 962 __ pop(c_rarg3); 963 __ pop(rax); 964 965 __ ret(0); 966 967 return start; 968 } 969 970 address generate_fp_mask(const char *stub_name, int64_t mask) { 971 __ align(CodeEntryAlignment); 972 StubCodeMark mark(this, "StubRoutines", stub_name); 973 address start = __ pc(); 974 975 __ emit_data64( mask, relocInfo::none ); 976 __ emit_data64( mask, relocInfo::none ); 977 978 return start; 979 } 980 981 address generate_vector_fp_mask(const char *stub_name, int64_t mask) { 982 __ align(CodeEntryAlignment); 983 StubCodeMark mark(this, "StubRoutines", stub_name); 984 address start = __ pc(); 985 986 __ emit_data64(mask, relocInfo::none); 987 __ emit_data64(mask, relocInfo::none); 988 __ emit_data64(mask, relocInfo::none); 989 __ emit_data64(mask, relocInfo::none); 990 __ emit_data64(mask, relocInfo::none); 991 __ emit_data64(mask, relocInfo::none); 992 __ emit_data64(mask, relocInfo::none); 993 __ emit_data64(mask, relocInfo::none); 994 995 return start; 996 } 997 998 address generate_vector_custom_i32(const char *stub_name, Assembler::AvxVectorLen len, 999 int32_t val0, int32_t val1, int32_t val2, int32_t val3, 1000 int32_t val4 = 0, int32_t val5 = 0, int32_t val6 = 0, int32_t val7 = 0, 1001 int32_t val8 = 0, int32_t val9 = 0, int32_t val10 = 0, int32_t val11 = 0, 1002 int32_t val12 = 0, int32_t val13 = 0, int32_t val14 = 0, int32_t val15 = 0) { 1003 __ align(CodeEntryAlignment); 1004 StubCodeMark mark(this, "StubRoutines", stub_name); 1005 address start = __ pc(); 1006 1007 assert(len != Assembler::AVX_NoVec, "vector len must be specified"); 1008 __ emit_data(val0, relocInfo::none, 0); 1009 __ emit_data(val1, relocInfo::none, 0); 1010 __ emit_data(val2, relocInfo::none, 0); 1011 __ emit_data(val3, relocInfo::none, 0); 1012 if (len >= Assembler::AVX_256bit) { 1013 __ emit_data(val4, relocInfo::none, 0); 1014 __ emit_data(val5, relocInfo::none, 0); 1015 __ emit_data(val6, relocInfo::none, 0); 1016 __ emit_data(val7, relocInfo::none, 0); 1017 if (len >= Assembler::AVX_512bit) { 1018 __ emit_data(val8, relocInfo::none, 0); 1019 __ emit_data(val9, relocInfo::none, 0); 1020 __ emit_data(val10, relocInfo::none, 0); 1021 __ emit_data(val11, relocInfo::none, 0); 1022 __ emit_data(val12, relocInfo::none, 0); 1023 __ emit_data(val13, relocInfo::none, 0); 1024 __ emit_data(val14, relocInfo::none, 0); 1025 __ emit_data(val15, relocInfo::none, 0); 1026 } 1027 } 1028 1029 return start; 1030 } 1031 1032 // Non-destructive plausibility checks for oops 1033 // 1034 // Arguments: 1035 // all args on stack! 1036 // 1037 // Stack after saving c_rarg3: 1038 // [tos + 0]: saved c_rarg3 1039 // [tos + 1]: saved c_rarg2 1040 // [tos + 2]: saved r12 (several TemplateTable methods use it) 1041 // [tos + 3]: saved flags 1042 // [tos + 4]: return address 1043 // * [tos + 5]: error message (char*) 1044 // * [tos + 6]: object to verify (oop) 1045 // * [tos + 7]: saved rax - saved by caller and bashed 1046 // * [tos + 8]: saved r10 (rscratch1) - saved by caller 1047 // * = popped on exit 1048 address generate_verify_oop() { 1049 StubCodeMark mark(this, "StubRoutines", "verify_oop"); 1050 address start = __ pc(); 1051 1052 Label exit, error; 1053 1054 __ pushf(); 1055 __ incrementl(ExternalAddress((address) StubRoutines::verify_oop_count_addr())); 1056 1057 __ push(r12); 1058 1059 // save c_rarg2 and c_rarg3 1060 __ push(c_rarg2); 1061 __ push(c_rarg3); 1062 1063 enum { 1064 // After previous pushes. 1065 oop_to_verify = 6 * wordSize, 1066 saved_rax = 7 * wordSize, 1067 saved_r10 = 8 * wordSize, 1068 1069 // Before the call to MacroAssembler::debug(), see below. 1070 return_addr = 16 * wordSize, 1071 error_msg = 17 * wordSize 1072 }; 1073 1074 // get object 1075 __ movptr(rax, Address(rsp, oop_to_verify)); 1076 1077 // make sure object is 'reasonable' 1078 __ testptr(rax, rax); 1079 __ jcc(Assembler::zero, exit); // if obj is NULL it is OK 1080 // Check if the oop is in the right area of memory 1081 __ movptr(c_rarg2, rax); 1082 __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_mask()); 1083 __ andptr(c_rarg2, c_rarg3); 1084 __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_bits()); 1085 __ cmpptr(c_rarg2, c_rarg3); 1086 __ jcc(Assembler::notZero, error); 1087 1088 // set r12 to heapbase for load_klass() 1089 __ reinit_heapbase(); 1090 1091 // make sure klass is 'reasonable', which is not zero. 1092 __ load_klass(rax, rax); // get klass 1093 __ testptr(rax, rax); 1094 __ jcc(Assembler::zero, error); // if klass is NULL it is broken 1095 1096 // return if everything seems ok 1097 __ bind(exit); 1098 __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back 1099 __ movptr(rscratch1, Address(rsp, saved_r10)); // get saved r10 back 1100 __ pop(c_rarg3); // restore c_rarg3 1101 __ pop(c_rarg2); // restore c_rarg2 1102 __ pop(r12); // restore r12 1103 __ popf(); // restore flags 1104 __ ret(4 * wordSize); // pop caller saved stuff 1105 1106 // handle errors 1107 __ bind(error); 1108 __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back 1109 __ movptr(rscratch1, Address(rsp, saved_r10)); // get saved r10 back 1110 __ pop(c_rarg3); // get saved c_rarg3 back 1111 __ pop(c_rarg2); // get saved c_rarg2 back 1112 __ pop(r12); // get saved r12 back 1113 __ popf(); // get saved flags off stack -- 1114 // will be ignored 1115 1116 __ pusha(); // push registers 1117 // (rip is already 1118 // already pushed) 1119 // debug(char* msg, int64_t pc, int64_t regs[]) 1120 // We've popped the registers we'd saved (c_rarg3, c_rarg2 and flags), and 1121 // pushed all the registers, so now the stack looks like: 1122 // [tos + 0] 16 saved registers 1123 // [tos + 16] return address 1124 // * [tos + 17] error message (char*) 1125 // * [tos + 18] object to verify (oop) 1126 // * [tos + 19] saved rax - saved by caller and bashed 1127 // * [tos + 20] saved r10 (rscratch1) - saved by caller 1128 // * = popped on exit 1129 1130 __ movptr(c_rarg0, Address(rsp, error_msg)); // pass address of error message 1131 __ movptr(c_rarg1, Address(rsp, return_addr)); // pass return address 1132 __ movq(c_rarg2, rsp); // pass address of regs on stack 1133 __ mov(r12, rsp); // remember rsp 1134 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1135 __ andptr(rsp, -16); // align stack as required by ABI 1136 BLOCK_COMMENT("call MacroAssembler::debug"); 1137 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64))); 1138 __ mov(rsp, r12); // restore rsp 1139 __ popa(); // pop registers (includes r12) 1140 __ ret(4 * wordSize); // pop caller saved stuff 1141 1142 return start; 1143 } 1144 1145 // 1146 // Verify that a register contains clean 32-bits positive value 1147 // (high 32-bits are 0) so it could be used in 64-bits shifts. 1148 // 1149 // Input: 1150 // Rint - 32-bits value 1151 // Rtmp - scratch 1152 // 1153 void assert_clean_int(Register Rint, Register Rtmp) { 1154 #ifdef ASSERT 1155 Label L; 1156 assert_different_registers(Rtmp, Rint); 1157 __ movslq(Rtmp, Rint); 1158 __ cmpq(Rtmp, Rint); 1159 __ jcc(Assembler::equal, L); 1160 __ stop("high 32-bits of int value are not 0"); 1161 __ bind(L); 1162 #endif 1163 } 1164 1165 // Generate overlap test for array copy stubs 1166 // 1167 // Input: 1168 // c_rarg0 - from 1169 // c_rarg1 - to 1170 // c_rarg2 - element count 1171 // 1172 // Output: 1173 // rax - &from[element count - 1] 1174 // 1175 void array_overlap_test(address no_overlap_target, Address::ScaleFactor sf) { 1176 assert(no_overlap_target != NULL, "must be generated"); 1177 array_overlap_test(no_overlap_target, NULL, sf); 1178 } 1179 void array_overlap_test(Label& L_no_overlap, Address::ScaleFactor sf) { 1180 array_overlap_test(NULL, &L_no_overlap, sf); 1181 } 1182 void array_overlap_test(address no_overlap_target, Label* NOLp, Address::ScaleFactor sf) { 1183 const Register from = c_rarg0; 1184 const Register to = c_rarg1; 1185 const Register count = c_rarg2; 1186 const Register end_from = rax; 1187 1188 __ cmpptr(to, from); 1189 __ lea(end_from, Address(from, count, sf, 0)); 1190 if (NOLp == NULL) { 1191 ExternalAddress no_overlap(no_overlap_target); 1192 __ jump_cc(Assembler::belowEqual, no_overlap); 1193 __ cmpptr(to, end_from); 1194 __ jump_cc(Assembler::aboveEqual, no_overlap); 1195 } else { 1196 __ jcc(Assembler::belowEqual, (*NOLp)); 1197 __ cmpptr(to, end_from); 1198 __ jcc(Assembler::aboveEqual, (*NOLp)); 1199 } 1200 } 1201 1202 // Shuffle first three arg regs on Windows into Linux/Solaris locations. 1203 // 1204 // Outputs: 1205 // rdi - rcx 1206 // rsi - rdx 1207 // rdx - r8 1208 // rcx - r9 1209 // 1210 // Registers r9 and r10 are used to save rdi and rsi on Windows, which latter 1211 // are non-volatile. r9 and r10 should not be used by the caller. 1212 // 1213 void setup_arg_regs(int nargs = 3) { 1214 const Register saved_rdi = r9; 1215 const Register saved_rsi = r10; 1216 assert(nargs == 3 || nargs == 4, "else fix"); 1217 #ifdef _WIN64 1218 assert(c_rarg0 == rcx && c_rarg1 == rdx && c_rarg2 == r8 && c_rarg3 == r9, 1219 "unexpected argument registers"); 1220 if (nargs >= 4) 1221 __ mov(rax, r9); // r9 is also saved_rdi 1222 __ movptr(saved_rdi, rdi); 1223 __ movptr(saved_rsi, rsi); 1224 __ mov(rdi, rcx); // c_rarg0 1225 __ mov(rsi, rdx); // c_rarg1 1226 __ mov(rdx, r8); // c_rarg2 1227 if (nargs >= 4) 1228 __ mov(rcx, rax); // c_rarg3 (via rax) 1229 #else 1230 assert(c_rarg0 == rdi && c_rarg1 == rsi && c_rarg2 == rdx && c_rarg3 == rcx, 1231 "unexpected argument registers"); 1232 #endif 1233 } 1234 1235 void restore_arg_regs() { 1236 const Register saved_rdi = r9; 1237 const Register saved_rsi = r10; 1238 #ifdef _WIN64 1239 __ movptr(rdi, saved_rdi); 1240 __ movptr(rsi, saved_rsi); 1241 #endif 1242 } 1243 1244 1245 // Copy big chunks forward 1246 // 1247 // Inputs: 1248 // end_from - source arrays end address 1249 // end_to - destination array end address 1250 // qword_count - 64-bits element count, negative 1251 // to - scratch 1252 // L_copy_bytes - entry label 1253 // L_copy_8_bytes - exit label 1254 // 1255 void copy_bytes_forward(Register end_from, Register end_to, 1256 Register qword_count, Register to, 1257 Label& L_copy_bytes, Label& L_copy_8_bytes) { 1258 DEBUG_ONLY(__ stop("enter at entry label, not here")); 1259 Label L_loop; 1260 __ align(OptoLoopAlignment); 1261 if (UseUnalignedLoadStores) { 1262 Label L_end; 1263 if (UseAVX > 2) { 1264 __ movl(to, 0xffff); 1265 __ kmovwl(k1, to); 1266 } 1267 // Copy 64-bytes per iteration 1268 __ BIND(L_loop); 1269 if (UseAVX > 2) { 1270 __ evmovdqul(xmm0, Address(end_from, qword_count, Address::times_8, -56), Assembler::AVX_512bit); 1271 __ evmovdqul(Address(end_to, qword_count, Address::times_8, -56), xmm0, Assembler::AVX_512bit); 1272 } else if (UseAVX == 2) { 1273 __ vmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56)); 1274 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0); 1275 __ vmovdqu(xmm1, Address(end_from, qword_count, Address::times_8, -24)); 1276 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -24), xmm1); 1277 } else { 1278 __ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56)); 1279 __ movdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0); 1280 __ movdqu(xmm1, Address(end_from, qword_count, Address::times_8, -40)); 1281 __ movdqu(Address(end_to, qword_count, Address::times_8, -40), xmm1); 1282 __ movdqu(xmm2, Address(end_from, qword_count, Address::times_8, -24)); 1283 __ movdqu(Address(end_to, qword_count, Address::times_8, -24), xmm2); 1284 __ movdqu(xmm3, Address(end_from, qword_count, Address::times_8, - 8)); 1285 __ movdqu(Address(end_to, qword_count, Address::times_8, - 8), xmm3); 1286 } 1287 __ BIND(L_copy_bytes); 1288 __ addptr(qword_count, 8); 1289 __ jcc(Assembler::lessEqual, L_loop); 1290 __ subptr(qword_count, 4); // sub(8) and add(4) 1291 __ jccb(Assembler::greater, L_end); 1292 // Copy trailing 32 bytes 1293 if (UseAVX >= 2) { 1294 __ vmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24)); 1295 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -24), xmm0); 1296 } else { 1297 __ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24)); 1298 __ movdqu(Address(end_to, qword_count, Address::times_8, -24), xmm0); 1299 __ movdqu(xmm1, Address(end_from, qword_count, Address::times_8, - 8)); 1300 __ movdqu(Address(end_to, qword_count, Address::times_8, - 8), xmm1); 1301 } 1302 __ addptr(qword_count, 4); 1303 __ BIND(L_end); 1304 if (UseAVX >= 2) { 1305 // clean upper bits of YMM registers 1306 __ vpxor(xmm0, xmm0); 1307 __ vpxor(xmm1, xmm1); 1308 } 1309 } else { 1310 // Copy 32-bytes per iteration 1311 __ BIND(L_loop); 1312 __ movq(to, Address(end_from, qword_count, Address::times_8, -24)); 1313 __ movq(Address(end_to, qword_count, Address::times_8, -24), to); 1314 __ movq(to, Address(end_from, qword_count, Address::times_8, -16)); 1315 __ movq(Address(end_to, qword_count, Address::times_8, -16), to); 1316 __ movq(to, Address(end_from, qword_count, Address::times_8, - 8)); 1317 __ movq(Address(end_to, qword_count, Address::times_8, - 8), to); 1318 __ movq(to, Address(end_from, qword_count, Address::times_8, - 0)); 1319 __ movq(Address(end_to, qword_count, Address::times_8, - 0), to); 1320 1321 __ BIND(L_copy_bytes); 1322 __ addptr(qword_count, 4); 1323 __ jcc(Assembler::lessEqual, L_loop); 1324 } 1325 __ subptr(qword_count, 4); 1326 __ jcc(Assembler::less, L_copy_8_bytes); // Copy trailing qwords 1327 } 1328 1329 // Copy big chunks backward 1330 // 1331 // Inputs: 1332 // from - source arrays address 1333 // dest - destination array address 1334 // qword_count - 64-bits element count 1335 // to - scratch 1336 // L_copy_bytes - entry label 1337 // L_copy_8_bytes - exit label 1338 // 1339 void copy_bytes_backward(Register from, Register dest, 1340 Register qword_count, Register to, 1341 Label& L_copy_bytes, Label& L_copy_8_bytes) { 1342 DEBUG_ONLY(__ stop("enter at entry label, not here")); 1343 Label L_loop; 1344 __ align(OptoLoopAlignment); 1345 if (UseUnalignedLoadStores) { 1346 Label L_end; 1347 if (UseAVX > 2) { 1348 __ movl(to, 0xffff); 1349 __ kmovwl(k1, to); 1350 } 1351 // Copy 64-bytes per iteration 1352 __ BIND(L_loop); 1353 if (UseAVX > 2) { 1354 __ evmovdqul(xmm0, Address(from, qword_count, Address::times_8, 0), Assembler::AVX_512bit); 1355 __ evmovdqul(Address(dest, qword_count, Address::times_8, 0), xmm0, Assembler::AVX_512bit); 1356 } else if (UseAVX == 2) { 1357 __ vmovdqu(xmm0, Address(from, qword_count, Address::times_8, 32)); 1358 __ vmovdqu(Address(dest, qword_count, Address::times_8, 32), xmm0); 1359 __ vmovdqu(xmm1, Address(from, qword_count, Address::times_8, 0)); 1360 __ vmovdqu(Address(dest, qword_count, Address::times_8, 0), xmm1); 1361 } else { 1362 __ movdqu(xmm0, Address(from, qword_count, Address::times_8, 48)); 1363 __ movdqu(Address(dest, qword_count, Address::times_8, 48), xmm0); 1364 __ movdqu(xmm1, Address(from, qword_count, Address::times_8, 32)); 1365 __ movdqu(Address(dest, qword_count, Address::times_8, 32), xmm1); 1366 __ movdqu(xmm2, Address(from, qword_count, Address::times_8, 16)); 1367 __ movdqu(Address(dest, qword_count, Address::times_8, 16), xmm2); 1368 __ movdqu(xmm3, Address(from, qword_count, Address::times_8, 0)); 1369 __ movdqu(Address(dest, qword_count, Address::times_8, 0), xmm3); 1370 } 1371 __ BIND(L_copy_bytes); 1372 __ subptr(qword_count, 8); 1373 __ jcc(Assembler::greaterEqual, L_loop); 1374 1375 __ addptr(qword_count, 4); // add(8) and sub(4) 1376 __ jccb(Assembler::less, L_end); 1377 // Copy trailing 32 bytes 1378 if (UseAVX >= 2) { 1379 __ vmovdqu(xmm0, Address(from, qword_count, Address::times_8, 0)); 1380 __ vmovdqu(Address(dest, qword_count, Address::times_8, 0), xmm0); 1381 } else { 1382 __ movdqu(xmm0, Address(from, qword_count, Address::times_8, 16)); 1383 __ movdqu(Address(dest, qword_count, Address::times_8, 16), xmm0); 1384 __ movdqu(xmm1, Address(from, qword_count, Address::times_8, 0)); 1385 __ movdqu(Address(dest, qword_count, Address::times_8, 0), xmm1); 1386 } 1387 __ subptr(qword_count, 4); 1388 __ BIND(L_end); 1389 if (UseAVX >= 2) { 1390 // clean upper bits of YMM registers 1391 __ vpxor(xmm0, xmm0); 1392 __ vpxor(xmm1, xmm1); 1393 } 1394 } else { 1395 // Copy 32-bytes per iteration 1396 __ BIND(L_loop); 1397 __ movq(to, Address(from, qword_count, Address::times_8, 24)); 1398 __ movq(Address(dest, qword_count, Address::times_8, 24), to); 1399 __ movq(to, Address(from, qword_count, Address::times_8, 16)); 1400 __ movq(Address(dest, qword_count, Address::times_8, 16), to); 1401 __ movq(to, Address(from, qword_count, Address::times_8, 8)); 1402 __ movq(Address(dest, qword_count, Address::times_8, 8), to); 1403 __ movq(to, Address(from, qword_count, Address::times_8, 0)); 1404 __ movq(Address(dest, qword_count, Address::times_8, 0), to); 1405 1406 __ BIND(L_copy_bytes); 1407 __ subptr(qword_count, 4); 1408 __ jcc(Assembler::greaterEqual, L_loop); 1409 } 1410 __ addptr(qword_count, 4); 1411 __ jcc(Assembler::greater, L_copy_8_bytes); // Copy trailing qwords 1412 } 1413 1414 1415 // Arguments: 1416 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1417 // ignored 1418 // name - stub name string 1419 // 1420 // Inputs: 1421 // c_rarg0 - source array address 1422 // c_rarg1 - destination array address 1423 // c_rarg2 - element count, treated as ssize_t, can be zero 1424 // 1425 // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries, 1426 // we let the hardware handle it. The one to eight bytes within words, 1427 // dwords or qwords that span cache line boundaries will still be loaded 1428 // and stored atomically. 1429 // 1430 // Side Effects: 1431 // disjoint_byte_copy_entry is set to the no-overlap entry point 1432 // used by generate_conjoint_byte_copy(). 1433 // 1434 address generate_disjoint_byte_copy(bool aligned, address* entry, const char *name) { 1435 __ align(CodeEntryAlignment); 1436 StubCodeMark mark(this, "StubRoutines", name); 1437 address start = __ pc(); 1438 1439 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes; 1440 Label L_copy_byte, L_exit; 1441 const Register from = rdi; // source array address 1442 const Register to = rsi; // destination array address 1443 const Register count = rdx; // elements count 1444 const Register byte_count = rcx; 1445 const Register qword_count = count; 1446 const Register end_from = from; // source array end address 1447 const Register end_to = to; // destination array end address 1448 // End pointers are inclusive, and if count is not zero they point 1449 // to the last unit copied: end_to[0] := end_from[0] 1450 1451 __ enter(); // required for proper stackwalking of RuntimeStub frame 1452 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1453 1454 if (entry != NULL) { 1455 *entry = __ pc(); 1456 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1457 BLOCK_COMMENT("Entry:"); 1458 } 1459 1460 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1461 // r9 and r10 may be used to save non-volatile registers 1462 1463 // 'from', 'to' and 'count' are now valid 1464 __ movptr(byte_count, count); 1465 __ shrptr(count, 3); // count => qword_count 1466 1467 // Copy from low to high addresses. Use 'to' as scratch. 1468 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1469 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1470 __ negptr(qword_count); // make the count negative 1471 __ jmp(L_copy_bytes); 1472 1473 // Copy trailing qwords 1474 __ BIND(L_copy_8_bytes); 1475 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1476 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1477 __ increment(qword_count); 1478 __ jcc(Assembler::notZero, L_copy_8_bytes); 1479 1480 // Check for and copy trailing dword 1481 __ BIND(L_copy_4_bytes); 1482 __ testl(byte_count, 4); 1483 __ jccb(Assembler::zero, L_copy_2_bytes); 1484 __ movl(rax, Address(end_from, 8)); 1485 __ movl(Address(end_to, 8), rax); 1486 1487 __ addptr(end_from, 4); 1488 __ addptr(end_to, 4); 1489 1490 // Check for and copy trailing word 1491 __ BIND(L_copy_2_bytes); 1492 __ testl(byte_count, 2); 1493 __ jccb(Assembler::zero, L_copy_byte); 1494 __ movw(rax, Address(end_from, 8)); 1495 __ movw(Address(end_to, 8), rax); 1496 1497 __ addptr(end_from, 2); 1498 __ addptr(end_to, 2); 1499 1500 // Check for and copy trailing byte 1501 __ BIND(L_copy_byte); 1502 __ testl(byte_count, 1); 1503 __ jccb(Assembler::zero, L_exit); 1504 __ movb(rax, Address(end_from, 8)); 1505 __ movb(Address(end_to, 8), rax); 1506 1507 __ BIND(L_exit); 1508 restore_arg_regs(); 1509 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1510 __ xorptr(rax, rax); // return 0 1511 __ vzeroupper(); 1512 __ leave(); // required for proper stackwalking of RuntimeStub frame 1513 __ ret(0); 1514 1515 // Copy in multi-bytes chunks 1516 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1517 __ jmp(L_copy_4_bytes); 1518 1519 return start; 1520 } 1521 1522 // Arguments: 1523 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1524 // ignored 1525 // name - stub name string 1526 // 1527 // Inputs: 1528 // c_rarg0 - source array address 1529 // c_rarg1 - destination array address 1530 // c_rarg2 - element count, treated as ssize_t, can be zero 1531 // 1532 // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries, 1533 // we let the hardware handle it. The one to eight bytes within words, 1534 // dwords or qwords that span cache line boundaries will still be loaded 1535 // and stored atomically. 1536 // 1537 address generate_conjoint_byte_copy(bool aligned, address nooverlap_target, 1538 address* entry, const char *name) { 1539 __ align(CodeEntryAlignment); 1540 StubCodeMark mark(this, "StubRoutines", name); 1541 address start = __ pc(); 1542 1543 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes; 1544 const Register from = rdi; // source array address 1545 const Register to = rsi; // destination array address 1546 const Register count = rdx; // elements count 1547 const Register byte_count = rcx; 1548 const Register qword_count = count; 1549 1550 __ enter(); // required for proper stackwalking of RuntimeStub frame 1551 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1552 1553 if (entry != NULL) { 1554 *entry = __ pc(); 1555 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1556 BLOCK_COMMENT("Entry:"); 1557 } 1558 1559 array_overlap_test(nooverlap_target, Address::times_1); 1560 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1561 // r9 and r10 may be used to save non-volatile registers 1562 1563 // 'from', 'to' and 'count' are now valid 1564 __ movptr(byte_count, count); 1565 __ shrptr(count, 3); // count => qword_count 1566 1567 // Copy from high to low addresses. 1568 1569 // Check for and copy trailing byte 1570 __ testl(byte_count, 1); 1571 __ jcc(Assembler::zero, L_copy_2_bytes); 1572 __ movb(rax, Address(from, byte_count, Address::times_1, -1)); 1573 __ movb(Address(to, byte_count, Address::times_1, -1), rax); 1574 __ decrement(byte_count); // Adjust for possible trailing word 1575 1576 // Check for and copy trailing word 1577 __ BIND(L_copy_2_bytes); 1578 __ testl(byte_count, 2); 1579 __ jcc(Assembler::zero, L_copy_4_bytes); 1580 __ movw(rax, Address(from, byte_count, Address::times_1, -2)); 1581 __ movw(Address(to, byte_count, Address::times_1, -2), rax); 1582 1583 // Check for and copy trailing dword 1584 __ BIND(L_copy_4_bytes); 1585 __ testl(byte_count, 4); 1586 __ jcc(Assembler::zero, L_copy_bytes); 1587 __ movl(rax, Address(from, qword_count, Address::times_8)); 1588 __ movl(Address(to, qword_count, Address::times_8), rax); 1589 __ jmp(L_copy_bytes); 1590 1591 // Copy trailing qwords 1592 __ BIND(L_copy_8_bytes); 1593 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 1594 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 1595 __ decrement(qword_count); 1596 __ jcc(Assembler::notZero, L_copy_8_bytes); 1597 1598 restore_arg_regs(); 1599 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1600 __ xorptr(rax, rax); // return 0 1601 __ vzeroupper(); 1602 __ leave(); // required for proper stackwalking of RuntimeStub frame 1603 __ ret(0); 1604 1605 // Copy in multi-bytes chunks 1606 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1607 1608 restore_arg_regs(); 1609 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1610 __ xorptr(rax, rax); // return 0 1611 __ vzeroupper(); 1612 __ leave(); // required for proper stackwalking of RuntimeStub frame 1613 __ ret(0); 1614 1615 return start; 1616 } 1617 1618 // Arguments: 1619 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1620 // ignored 1621 // name - stub name string 1622 // 1623 // Inputs: 1624 // c_rarg0 - source array address 1625 // c_rarg1 - destination array address 1626 // c_rarg2 - element count, treated as ssize_t, can be zero 1627 // 1628 // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we 1629 // let the hardware handle it. The two or four words within dwords 1630 // or qwords that span cache line boundaries will still be loaded 1631 // and stored atomically. 1632 // 1633 // Side Effects: 1634 // disjoint_short_copy_entry is set to the no-overlap entry point 1635 // used by generate_conjoint_short_copy(). 1636 // 1637 address generate_disjoint_short_copy(bool aligned, address *entry, const char *name) { 1638 __ align(CodeEntryAlignment); 1639 StubCodeMark mark(this, "StubRoutines", name); 1640 address start = __ pc(); 1641 1642 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes,L_copy_2_bytes,L_exit; 1643 const Register from = rdi; // source array address 1644 const Register to = rsi; // destination array address 1645 const Register count = rdx; // elements count 1646 const Register word_count = rcx; 1647 const Register qword_count = count; 1648 const Register end_from = from; // source array end address 1649 const Register end_to = to; // destination array end address 1650 // End pointers are inclusive, and if count is not zero they point 1651 // to the last unit copied: end_to[0] := end_from[0] 1652 1653 __ enter(); // required for proper stackwalking of RuntimeStub frame 1654 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1655 1656 if (entry != NULL) { 1657 *entry = __ pc(); 1658 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1659 BLOCK_COMMENT("Entry:"); 1660 } 1661 1662 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1663 // r9 and r10 may be used to save non-volatile registers 1664 1665 // 'from', 'to' and 'count' are now valid 1666 __ movptr(word_count, count); 1667 __ shrptr(count, 2); // count => qword_count 1668 1669 // Copy from low to high addresses. Use 'to' as scratch. 1670 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1671 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1672 __ negptr(qword_count); 1673 __ jmp(L_copy_bytes); 1674 1675 // Copy trailing qwords 1676 __ BIND(L_copy_8_bytes); 1677 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1678 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1679 __ increment(qword_count); 1680 __ jcc(Assembler::notZero, L_copy_8_bytes); 1681 1682 // Original 'dest' is trashed, so we can't use it as a 1683 // base register for a possible trailing word copy 1684 1685 // Check for and copy trailing dword 1686 __ BIND(L_copy_4_bytes); 1687 __ testl(word_count, 2); 1688 __ jccb(Assembler::zero, L_copy_2_bytes); 1689 __ movl(rax, Address(end_from, 8)); 1690 __ movl(Address(end_to, 8), rax); 1691 1692 __ addptr(end_from, 4); 1693 __ addptr(end_to, 4); 1694 1695 // Check for and copy trailing word 1696 __ BIND(L_copy_2_bytes); 1697 __ testl(word_count, 1); 1698 __ jccb(Assembler::zero, L_exit); 1699 __ movw(rax, Address(end_from, 8)); 1700 __ movw(Address(end_to, 8), rax); 1701 1702 __ BIND(L_exit); 1703 restore_arg_regs(); 1704 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1705 __ xorptr(rax, rax); // return 0 1706 __ vzeroupper(); 1707 __ leave(); // required for proper stackwalking of RuntimeStub frame 1708 __ ret(0); 1709 1710 // Copy in multi-bytes chunks 1711 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1712 __ jmp(L_copy_4_bytes); 1713 1714 return start; 1715 } 1716 1717 address generate_fill(BasicType t, bool aligned, const char *name) { 1718 __ align(CodeEntryAlignment); 1719 StubCodeMark mark(this, "StubRoutines", name); 1720 address start = __ pc(); 1721 1722 BLOCK_COMMENT("Entry:"); 1723 1724 const Register to = c_rarg0; // source array address 1725 const Register value = c_rarg1; // value 1726 const Register count = c_rarg2; // elements count 1727 1728 __ enter(); // required for proper stackwalking of RuntimeStub frame 1729 1730 __ generate_fill(t, aligned, to, value, count, rax, xmm0); 1731 1732 __ vzeroupper(); 1733 __ leave(); // required for proper stackwalking of RuntimeStub frame 1734 __ ret(0); 1735 return start; 1736 } 1737 1738 // Arguments: 1739 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1740 // ignored 1741 // name - stub name string 1742 // 1743 // Inputs: 1744 // c_rarg0 - source array address 1745 // c_rarg1 - destination array address 1746 // c_rarg2 - element count, treated as ssize_t, can be zero 1747 // 1748 // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we 1749 // let the hardware handle it. The two or four words within dwords 1750 // or qwords that span cache line boundaries will still be loaded 1751 // and stored atomically. 1752 // 1753 address generate_conjoint_short_copy(bool aligned, address nooverlap_target, 1754 address *entry, const char *name) { 1755 __ align(CodeEntryAlignment); 1756 StubCodeMark mark(this, "StubRoutines", name); 1757 address start = __ pc(); 1758 1759 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes; 1760 const Register from = rdi; // source array address 1761 const Register to = rsi; // destination array address 1762 const Register count = rdx; // elements count 1763 const Register word_count = rcx; 1764 const Register qword_count = count; 1765 1766 __ enter(); // required for proper stackwalking of RuntimeStub frame 1767 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1768 1769 if (entry != NULL) { 1770 *entry = __ pc(); 1771 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1772 BLOCK_COMMENT("Entry:"); 1773 } 1774 1775 array_overlap_test(nooverlap_target, Address::times_2); 1776 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1777 // r9 and r10 may be used to save non-volatile registers 1778 1779 // 'from', 'to' and 'count' are now valid 1780 __ movptr(word_count, count); 1781 __ shrptr(count, 2); // count => qword_count 1782 1783 // Copy from high to low addresses. Use 'to' as scratch. 1784 1785 // Check for and copy trailing word 1786 __ testl(word_count, 1); 1787 __ jccb(Assembler::zero, L_copy_4_bytes); 1788 __ movw(rax, Address(from, word_count, Address::times_2, -2)); 1789 __ movw(Address(to, word_count, Address::times_2, -2), rax); 1790 1791 // Check for and copy trailing dword 1792 __ BIND(L_copy_4_bytes); 1793 __ testl(word_count, 2); 1794 __ jcc(Assembler::zero, L_copy_bytes); 1795 __ movl(rax, Address(from, qword_count, Address::times_8)); 1796 __ movl(Address(to, qword_count, Address::times_8), rax); 1797 __ jmp(L_copy_bytes); 1798 1799 // Copy trailing qwords 1800 __ BIND(L_copy_8_bytes); 1801 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 1802 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 1803 __ decrement(qword_count); 1804 __ jcc(Assembler::notZero, L_copy_8_bytes); 1805 1806 restore_arg_regs(); 1807 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1808 __ xorptr(rax, rax); // return 0 1809 __ vzeroupper(); 1810 __ leave(); // required for proper stackwalking of RuntimeStub frame 1811 __ ret(0); 1812 1813 // Copy in multi-bytes chunks 1814 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1815 1816 restore_arg_regs(); 1817 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1818 __ xorptr(rax, rax); // return 0 1819 __ vzeroupper(); 1820 __ leave(); // required for proper stackwalking of RuntimeStub frame 1821 __ ret(0); 1822 1823 return start; 1824 } 1825 1826 // Arguments: 1827 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1828 // ignored 1829 // is_oop - true => oop array, so generate store check code 1830 // name - stub name string 1831 // 1832 // Inputs: 1833 // c_rarg0 - source array address 1834 // c_rarg1 - destination array address 1835 // c_rarg2 - element count, treated as ssize_t, can be zero 1836 // 1837 // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let 1838 // the hardware handle it. The two dwords within qwords that span 1839 // cache line boundaries will still be loaded and stored atomicly. 1840 // 1841 // Side Effects: 1842 // disjoint_int_copy_entry is set to the no-overlap entry point 1843 // used by generate_conjoint_int_oop_copy(). 1844 // 1845 address generate_disjoint_int_oop_copy(bool aligned, bool is_oop, address* entry, 1846 const char *name, bool dest_uninitialized = false) { 1847 __ align(CodeEntryAlignment); 1848 StubCodeMark mark(this, "StubRoutines", name); 1849 address start = __ pc(); 1850 1851 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_exit; 1852 const Register from = rdi; // source array address 1853 const Register to = rsi; // destination array address 1854 const Register count = rdx; // elements count 1855 const Register dword_count = rcx; 1856 const Register qword_count = count; 1857 const Register end_from = from; // source array end address 1858 const Register end_to = to; // destination array end address 1859 // End pointers are inclusive, and if count is not zero they point 1860 // to the last unit copied: end_to[0] := end_from[0] 1861 1862 __ enter(); // required for proper stackwalking of RuntimeStub frame 1863 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1864 1865 if (entry != NULL) { 1866 *entry = __ pc(); 1867 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1868 BLOCK_COMMENT("Entry:"); 1869 } 1870 1871 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1872 // r9 and r10 may be used to save non-volatile registers 1873 1874 DecoratorSet decorators = ARRAYCOPY_DISJOINT; 1875 if (dest_uninitialized) { 1876 decorators |= AS_DEST_NOT_INITIALIZED; 1877 } 1878 if (aligned) { 1879 decorators |= ARRAYCOPY_ALIGNED; 1880 } 1881 1882 BasicType type = is_oop ? T_OBJECT : T_INT; 1883 BarrierSetAssembler *bs = Universe::heap()->barrier_set()->barrier_set_assembler(); 1884 bs->arraycopy_prologue(_masm, decorators, type, from, to, count); 1885 1886 // 'from', 'to' and 'count' are now valid 1887 __ movptr(dword_count, count); 1888 __ shrptr(count, 1); // count => qword_count 1889 1890 // Copy from low to high addresses. Use 'to' as scratch. 1891 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1892 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1893 __ negptr(qword_count); 1894 __ jmp(L_copy_bytes); 1895 1896 // Copy trailing qwords 1897 __ BIND(L_copy_8_bytes); 1898 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1899 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1900 __ increment(qword_count); 1901 __ jcc(Assembler::notZero, L_copy_8_bytes); 1902 1903 // Check for and copy trailing dword 1904 __ BIND(L_copy_4_bytes); 1905 __ testl(dword_count, 1); // Only byte test since the value is 0 or 1 1906 __ jccb(Assembler::zero, L_exit); 1907 __ movl(rax, Address(end_from, 8)); 1908 __ movl(Address(end_to, 8), rax); 1909 1910 __ BIND(L_exit); 1911 bs->arraycopy_epilogue(_masm, decorators, type, from, to, dword_count); 1912 restore_arg_regs(); 1913 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 1914 __ vzeroupper(); 1915 __ xorptr(rax, rax); // return 0 1916 __ leave(); // required for proper stackwalking of RuntimeStub frame 1917 __ ret(0); 1918 1919 // Copy in multi-bytes chunks 1920 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1921 __ jmp(L_copy_4_bytes); 1922 1923 return start; 1924 } 1925 1926 // Arguments: 1927 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1928 // ignored 1929 // is_oop - true => oop array, so generate store check code 1930 // name - stub name string 1931 // 1932 // Inputs: 1933 // c_rarg0 - source array address 1934 // c_rarg1 - destination array address 1935 // c_rarg2 - element count, treated as ssize_t, can be zero 1936 // 1937 // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let 1938 // the hardware handle it. The two dwords within qwords that span 1939 // cache line boundaries will still be loaded and stored atomicly. 1940 // 1941 address generate_conjoint_int_oop_copy(bool aligned, bool is_oop, address nooverlap_target, 1942 address *entry, const char *name, 1943 bool dest_uninitialized = false) { 1944 __ align(CodeEntryAlignment); 1945 StubCodeMark mark(this, "StubRoutines", name); 1946 address start = __ pc(); 1947 1948 Label L_copy_bytes, L_copy_8_bytes, L_copy_2_bytes, L_exit; 1949 const Register from = rdi; // source array address 1950 const Register to = rsi; // destination array address 1951 const Register count = rdx; // elements count 1952 const Register dword_count = rcx; 1953 const Register qword_count = count; 1954 1955 __ enter(); // required for proper stackwalking of RuntimeStub frame 1956 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1957 1958 if (entry != NULL) { 1959 *entry = __ pc(); 1960 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1961 BLOCK_COMMENT("Entry:"); 1962 } 1963 1964 array_overlap_test(nooverlap_target, Address::times_4); 1965 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1966 // r9 and r10 may be used to save non-volatile registers 1967 1968 DecoratorSet decorators = 0; 1969 if (dest_uninitialized) { 1970 decorators |= AS_DEST_NOT_INITIALIZED; 1971 } 1972 if (aligned) { 1973 decorators |= ARRAYCOPY_ALIGNED; 1974 } 1975 1976 BasicType type = is_oop ? T_OBJECT : T_INT; 1977 BarrierSetAssembler *bs = Universe::heap()->barrier_set()->barrier_set_assembler(); 1978 // no registers are destroyed by this call 1979 bs->arraycopy_prologue(_masm, decorators, type, from, to, count); 1980 1981 assert_clean_int(count, rax); // Make sure 'count' is clean int. 1982 // 'from', 'to' and 'count' are now valid 1983 __ movptr(dword_count, count); 1984 __ shrptr(count, 1); // count => qword_count 1985 1986 // Copy from high to low addresses. Use 'to' as scratch. 1987 1988 // Check for and copy trailing dword 1989 __ testl(dword_count, 1); 1990 __ jcc(Assembler::zero, L_copy_bytes); 1991 __ movl(rax, Address(from, dword_count, Address::times_4, -4)); 1992 __ movl(Address(to, dword_count, Address::times_4, -4), rax); 1993 __ jmp(L_copy_bytes); 1994 1995 // Copy trailing qwords 1996 __ BIND(L_copy_8_bytes); 1997 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 1998 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 1999 __ decrement(qword_count); 2000 __ jcc(Assembler::notZero, L_copy_8_bytes); 2001 2002 if (is_oop) { 2003 __ jmp(L_exit); 2004 } 2005 restore_arg_regs(); 2006 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 2007 __ xorptr(rax, rax); // return 0 2008 __ vzeroupper(); 2009 __ leave(); // required for proper stackwalking of RuntimeStub frame 2010 __ ret(0); 2011 2012 // Copy in multi-bytes chunks 2013 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2014 2015 __ BIND(L_exit); 2016 bs->arraycopy_epilogue(_masm, decorators, type, from, to, dword_count); 2017 restore_arg_regs(); 2018 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 2019 __ xorptr(rax, rax); // return 0 2020 __ vzeroupper(); 2021 __ leave(); // required for proper stackwalking of RuntimeStub frame 2022 __ ret(0); 2023 2024 return start; 2025 } 2026 2027 // Arguments: 2028 // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes 2029 // ignored 2030 // is_oop - true => oop array, so generate store check code 2031 // name - stub name string 2032 // 2033 // Inputs: 2034 // c_rarg0 - source array address 2035 // c_rarg1 - destination array address 2036 // c_rarg2 - element count, treated as ssize_t, can be zero 2037 // 2038 // Side Effects: 2039 // disjoint_oop_copy_entry or disjoint_long_copy_entry is set to the 2040 // no-overlap entry point used by generate_conjoint_long_oop_copy(). 2041 // 2042 address generate_disjoint_long_oop_copy(bool aligned, bool is_oop, address *entry, 2043 const char *name, bool dest_uninitialized = false) { 2044 __ align(CodeEntryAlignment); 2045 StubCodeMark mark(this, "StubRoutines", name); 2046 address start = __ pc(); 2047 2048 Label L_copy_bytes, L_copy_8_bytes, L_exit; 2049 const Register from = rdi; // source array address 2050 const Register to = rsi; // destination array address 2051 const Register qword_count = rdx; // elements count 2052 const Register end_from = from; // source array end address 2053 const Register end_to = rcx; // destination array end address 2054 const Register saved_count = r11; 2055 // End pointers are inclusive, and if count is not zero they point 2056 // to the last unit copied: end_to[0] := end_from[0] 2057 2058 __ enter(); // required for proper stackwalking of RuntimeStub frame 2059 // Save no-overlap entry point for generate_conjoint_long_oop_copy() 2060 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 2061 2062 if (entry != NULL) { 2063 *entry = __ pc(); 2064 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2065 BLOCK_COMMENT("Entry:"); 2066 } 2067 2068 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 2069 // r9 and r10 may be used to save non-volatile registers 2070 // 'from', 'to' and 'qword_count' are now valid 2071 2072 DecoratorSet decorators = ARRAYCOPY_DISJOINT; 2073 if (dest_uninitialized) { 2074 decorators |= AS_DEST_NOT_INITIALIZED; 2075 } 2076 if (aligned) { 2077 decorators |= ARRAYCOPY_ALIGNED; 2078 } 2079 2080 BasicType type = is_oop ? T_OBJECT : T_LONG; 2081 BarrierSetAssembler *bs = Universe::heap()->barrier_set()->barrier_set_assembler(); 2082 bs->arraycopy_prologue(_masm, decorators, type, from, to, qword_count); 2083 2084 // Copy from low to high addresses. Use 'to' as scratch. 2085 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 2086 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 2087 __ negptr(qword_count); 2088 __ jmp(L_copy_bytes); 2089 2090 // Copy trailing qwords 2091 __ BIND(L_copy_8_bytes); 2092 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 2093 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 2094 __ increment(qword_count); 2095 __ jcc(Assembler::notZero, L_copy_8_bytes); 2096 2097 if (is_oop) { 2098 __ jmp(L_exit); 2099 } else { 2100 restore_arg_regs(); 2101 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2102 __ xorptr(rax, rax); // return 0 2103 __ vzeroupper(); 2104 __ leave(); // required for proper stackwalking of RuntimeStub frame 2105 __ ret(0); 2106 } 2107 2108 // Copy in multi-bytes chunks 2109 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2110 2111 __ BIND(L_exit); 2112 bs->arraycopy_epilogue(_masm, decorators, type, from, to, qword_count); 2113 restore_arg_regs(); 2114 if (is_oop) { 2115 inc_counter_np(SharedRuntime::_oop_array_copy_ctr); // Update counter after rscratch1 is free 2116 } else { 2117 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2118 } 2119 __ vzeroupper(); 2120 __ xorptr(rax, rax); // return 0 2121 __ leave(); // required for proper stackwalking of RuntimeStub frame 2122 __ ret(0); 2123 2124 return start; 2125 } 2126 2127 // Arguments: 2128 // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes 2129 // ignored 2130 // is_oop - true => oop array, so generate store check code 2131 // name - stub name string 2132 // 2133 // Inputs: 2134 // c_rarg0 - source array address 2135 // c_rarg1 - destination array address 2136 // c_rarg2 - element count, treated as ssize_t, can be zero 2137 // 2138 address generate_conjoint_long_oop_copy(bool aligned, bool is_oop, 2139 address nooverlap_target, address *entry, 2140 const char *name, bool dest_uninitialized = false) { 2141 __ align(CodeEntryAlignment); 2142 StubCodeMark mark(this, "StubRoutines", name); 2143 address start = __ pc(); 2144 2145 Label L_copy_bytes, L_copy_8_bytes, L_exit; 2146 const Register from = rdi; // source array address 2147 const Register to = rsi; // destination array address 2148 const Register qword_count = rdx; // elements count 2149 const Register saved_count = rcx; 2150 2151 __ enter(); // required for proper stackwalking of RuntimeStub frame 2152 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 2153 2154 if (entry != NULL) { 2155 *entry = __ pc(); 2156 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2157 BLOCK_COMMENT("Entry:"); 2158 } 2159 2160 array_overlap_test(nooverlap_target, Address::times_8); 2161 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 2162 // r9 and r10 may be used to save non-volatile registers 2163 // 'from', 'to' and 'qword_count' are now valid 2164 2165 DecoratorSet decorators = ARRAYCOPY_DISJOINT; 2166 if (dest_uninitialized) { 2167 decorators |= AS_DEST_NOT_INITIALIZED; 2168 } 2169 if (aligned) { 2170 decorators |= ARRAYCOPY_ALIGNED; 2171 } 2172 2173 BasicType type = is_oop ? T_OBJECT : T_LONG; 2174 BarrierSetAssembler *bs = Universe::heap()->barrier_set()->barrier_set_assembler(); 2175 bs->arraycopy_prologue(_masm, decorators, type, from, to, qword_count); 2176 2177 __ jmp(L_copy_bytes); 2178 2179 // Copy trailing qwords 2180 __ BIND(L_copy_8_bytes); 2181 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 2182 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 2183 __ decrement(qword_count); 2184 __ jcc(Assembler::notZero, L_copy_8_bytes); 2185 2186 if (is_oop) { 2187 __ jmp(L_exit); 2188 } else { 2189 restore_arg_regs(); 2190 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2191 __ xorptr(rax, rax); // return 0 2192 __ vzeroupper(); 2193 __ leave(); // required for proper stackwalking of RuntimeStub frame 2194 __ ret(0); 2195 } 2196 2197 // Copy in multi-bytes chunks 2198 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2199 2200 __ BIND(L_exit); 2201 bs->arraycopy_epilogue(_masm, decorators, type, from, to, qword_count); 2202 restore_arg_regs(); 2203 if (is_oop) { 2204 inc_counter_np(SharedRuntime::_oop_array_copy_ctr); // Update counter after rscratch1 is free 2205 } else { 2206 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2207 } 2208 __ vzeroupper(); 2209 __ xorptr(rax, rax); // return 0 2210 __ leave(); // required for proper stackwalking of RuntimeStub frame 2211 __ ret(0); 2212 2213 return start; 2214 } 2215 2216 2217 // Helper for generating a dynamic type check. 2218 // Smashes no registers. 2219 void generate_type_check(Register sub_klass, 2220 Register super_check_offset, 2221 Register super_klass, 2222 Label& L_success) { 2223 assert_different_registers(sub_klass, super_check_offset, super_klass); 2224 2225 BLOCK_COMMENT("type_check:"); 2226 2227 Label L_miss; 2228 2229 __ check_klass_subtype_fast_path(sub_klass, super_klass, noreg, &L_success, &L_miss, NULL, 2230 super_check_offset); 2231 __ check_klass_subtype_slow_path(sub_klass, super_klass, noreg, noreg, &L_success, NULL); 2232 2233 // Fall through on failure! 2234 __ BIND(L_miss); 2235 } 2236 2237 // 2238 // Generate checkcasting array copy stub 2239 // 2240 // Input: 2241 // c_rarg0 - source array address 2242 // c_rarg1 - destination array address 2243 // c_rarg2 - element count, treated as ssize_t, can be zero 2244 // c_rarg3 - size_t ckoff (super_check_offset) 2245 // not Win64 2246 // c_rarg4 - oop ckval (super_klass) 2247 // Win64 2248 // rsp+40 - oop ckval (super_klass) 2249 // 2250 // Output: 2251 // rax == 0 - success 2252 // rax == -1^K - failure, where K is partial transfer count 2253 // 2254 address generate_checkcast_copy(const char *name, address *entry, 2255 bool dest_uninitialized = false) { 2256 2257 Label L_load_element, L_store_element, L_do_card_marks, L_done; 2258 2259 // Input registers (after setup_arg_regs) 2260 const Register from = rdi; // source array address 2261 const Register to = rsi; // destination array address 2262 const Register length = rdx; // elements count 2263 const Register ckoff = rcx; // super_check_offset 2264 const Register ckval = r8; // super_klass 2265 2266 // Registers used as temps (r13, r14 are save-on-entry) 2267 const Register end_from = from; // source array end address 2268 const Register end_to = r13; // destination array end address 2269 const Register count = rdx; // -(count_remaining) 2270 const Register r14_length = r14; // saved copy of length 2271 // End pointers are inclusive, and if length is not zero they point 2272 // to the last unit copied: end_to[0] := end_from[0] 2273 2274 const Register rax_oop = rax; // actual oop copied 2275 const Register r11_klass = r11; // oop._klass 2276 2277 //--------------------------------------------------------------- 2278 // Assembler stub will be used for this call to arraycopy 2279 // if the two arrays are subtypes of Object[] but the 2280 // destination array type is not equal to or a supertype 2281 // of the source type. Each element must be separately 2282 // checked. 2283 2284 __ align(CodeEntryAlignment); 2285 StubCodeMark mark(this, "StubRoutines", name); 2286 address start = __ pc(); 2287 2288 __ enter(); // required for proper stackwalking of RuntimeStub frame 2289 2290 #ifdef ASSERT 2291 // caller guarantees that the arrays really are different 2292 // otherwise, we would have to make conjoint checks 2293 { Label L; 2294 array_overlap_test(L, TIMES_OOP); 2295 __ stop("checkcast_copy within a single array"); 2296 __ bind(L); 2297 } 2298 #endif //ASSERT 2299 2300 setup_arg_regs(4); // from => rdi, to => rsi, length => rdx 2301 // ckoff => rcx, ckval => r8 2302 // r9 and r10 may be used to save non-volatile registers 2303 #ifdef _WIN64 2304 // last argument (#4) is on stack on Win64 2305 __ movptr(ckval, Address(rsp, 6 * wordSize)); 2306 #endif 2307 2308 // Caller of this entry point must set up the argument registers. 2309 if (entry != NULL) { 2310 *entry = __ pc(); 2311 BLOCK_COMMENT("Entry:"); 2312 } 2313 2314 // allocate spill slots for r13, r14 2315 enum { 2316 saved_r13_offset, 2317 saved_r14_offset, 2318 saved_rbp_offset 2319 }; 2320 __ subptr(rsp, saved_rbp_offset * wordSize); 2321 __ movptr(Address(rsp, saved_r13_offset * wordSize), r13); 2322 __ movptr(Address(rsp, saved_r14_offset * wordSize), r14); 2323 2324 // check that int operands are properly extended to size_t 2325 assert_clean_int(length, rax); 2326 assert_clean_int(ckoff, rax); 2327 2328 #ifdef ASSERT 2329 BLOCK_COMMENT("assert consistent ckoff/ckval"); 2330 // The ckoff and ckval must be mutually consistent, 2331 // even though caller generates both. 2332 { Label L; 2333 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2334 __ cmpl(ckoff, Address(ckval, sco_offset)); 2335 __ jcc(Assembler::equal, L); 2336 __ stop("super_check_offset inconsistent"); 2337 __ bind(L); 2338 } 2339 #endif //ASSERT 2340 2341 // Loop-invariant addresses. They are exclusive end pointers. 2342 Address end_from_addr(from, length, TIMES_OOP, 0); 2343 Address end_to_addr(to, length, TIMES_OOP, 0); 2344 // Loop-variant addresses. They assume post-incremented count < 0. 2345 Address from_element_addr(end_from, count, TIMES_OOP, 0); 2346 Address to_element_addr(end_to, count, TIMES_OOP, 0); 2347 2348 DecoratorSet decorators = ARRAYCOPY_CHECKCAST; 2349 if (dest_uninitialized) { 2350 decorators |= AS_DEST_NOT_INITIALIZED; 2351 } 2352 2353 BasicType type = T_OBJECT; 2354 BarrierSetAssembler *bs = Universe::heap()->barrier_set()->barrier_set_assembler(); 2355 bs->arraycopy_prologue(_masm, decorators, type, from, to, count); 2356 2357 // Copy from low to high addresses, indexed from the end of each array. 2358 __ lea(end_from, end_from_addr); 2359 __ lea(end_to, end_to_addr); 2360 __ movptr(r14_length, length); // save a copy of the length 2361 assert(length == count, ""); // else fix next line: 2362 __ negptr(count); // negate and test the length 2363 __ jcc(Assembler::notZero, L_load_element); 2364 2365 // Empty array: Nothing to do. 2366 __ xorptr(rax, rax); // return 0 on (trivial) success 2367 __ jmp(L_done); 2368 2369 // ======== begin loop ======== 2370 // (Loop is rotated; its entry is L_load_element.) 2371 // Loop control: 2372 // for (count = -count; count != 0; count++) 2373 // Base pointers src, dst are biased by 8*(count-1),to last element. 2374 __ align(OptoLoopAlignment); 2375 2376 __ BIND(L_store_element); 2377 __ store_heap_oop(to_element_addr, rax_oop); // store the oop 2378 __ increment(count); // increment the count toward zero 2379 __ jcc(Assembler::zero, L_do_card_marks); 2380 2381 // ======== loop entry is here ======== 2382 __ BIND(L_load_element); 2383 __ load_heap_oop(rax_oop, from_element_addr); // load the oop 2384 __ testptr(rax_oop, rax_oop); 2385 __ jcc(Assembler::zero, L_store_element); 2386 2387 __ load_klass(r11_klass, rax_oop);// query the object klass 2388 generate_type_check(r11_klass, ckoff, ckval, L_store_element); 2389 // ======== end loop ======== 2390 2391 // It was a real error; we must depend on the caller to finish the job. 2392 // Register rdx = -1 * number of *remaining* oops, r14 = *total* oops. 2393 // Emit GC store barriers for the oops we have copied (r14 + rdx), 2394 // and report their number to the caller. 2395 assert_different_registers(rax, r14_length, count, to, end_to, rcx, rscratch1); 2396 Label L_post_barrier; 2397 __ addptr(r14_length, count); // K = (original - remaining) oops 2398 __ movptr(rax, r14_length); // save the value 2399 __ notptr(rax); // report (-1^K) to caller (does not affect flags) 2400 __ jccb(Assembler::notZero, L_post_barrier); 2401 __ jmp(L_done); // K == 0, nothing was copied, skip post barrier 2402 2403 // Come here on success only. 2404 __ BIND(L_do_card_marks); 2405 __ xorptr(rax, rax); // return 0 on success 2406 2407 __ BIND(L_post_barrier); 2408 bs->arraycopy_epilogue(_masm, decorators, type, from, to, r14_length); 2409 2410 // Common exit point (success or failure). 2411 __ BIND(L_done); 2412 __ movptr(r13, Address(rsp, saved_r13_offset * wordSize)); 2413 __ movptr(r14, Address(rsp, saved_r14_offset * wordSize)); 2414 restore_arg_regs(); 2415 inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr); // Update counter after rscratch1 is free 2416 __ leave(); // required for proper stackwalking of RuntimeStub frame 2417 __ ret(0); 2418 2419 return start; 2420 } 2421 2422 // 2423 // Generate 'unsafe' array copy stub 2424 // Though just as safe as the other stubs, it takes an unscaled 2425 // size_t argument instead of an element count. 2426 // 2427 // Input: 2428 // c_rarg0 - source array address 2429 // c_rarg1 - destination array address 2430 // c_rarg2 - byte count, treated as ssize_t, can be zero 2431 // 2432 // Examines the alignment of the operands and dispatches 2433 // to a long, int, short, or byte copy loop. 2434 // 2435 address generate_unsafe_copy(const char *name, 2436 address byte_copy_entry, address short_copy_entry, 2437 address int_copy_entry, address long_copy_entry) { 2438 2439 Label L_long_aligned, L_int_aligned, L_short_aligned; 2440 2441 // Input registers (before setup_arg_regs) 2442 const Register from = c_rarg0; // source array address 2443 const Register to = c_rarg1; // destination array address 2444 const Register size = c_rarg2; // byte count (size_t) 2445 2446 // Register used as a temp 2447 const Register bits = rax; // test copy of low bits 2448 2449 __ align(CodeEntryAlignment); 2450 StubCodeMark mark(this, "StubRoutines", name); 2451 address start = __ pc(); 2452 2453 __ enter(); // required for proper stackwalking of RuntimeStub frame 2454 2455 // bump this on entry, not on exit: 2456 inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr); 2457 2458 __ mov(bits, from); 2459 __ orptr(bits, to); 2460 __ orptr(bits, size); 2461 2462 __ testb(bits, BytesPerLong-1); 2463 __ jccb(Assembler::zero, L_long_aligned); 2464 2465 __ testb(bits, BytesPerInt-1); 2466 __ jccb(Assembler::zero, L_int_aligned); 2467 2468 __ testb(bits, BytesPerShort-1); 2469 __ jump_cc(Assembler::notZero, RuntimeAddress(byte_copy_entry)); 2470 2471 __ BIND(L_short_aligned); 2472 __ shrptr(size, LogBytesPerShort); // size => short_count 2473 __ jump(RuntimeAddress(short_copy_entry)); 2474 2475 __ BIND(L_int_aligned); 2476 __ shrptr(size, LogBytesPerInt); // size => int_count 2477 __ jump(RuntimeAddress(int_copy_entry)); 2478 2479 __ BIND(L_long_aligned); 2480 __ shrptr(size, LogBytesPerLong); // size => qword_count 2481 __ jump(RuntimeAddress(long_copy_entry)); 2482 2483 return start; 2484 } 2485 2486 // Perform range checks on the proposed arraycopy. 2487 // Kills temp, but nothing else. 2488 // Also, clean the sign bits of src_pos and dst_pos. 2489 void arraycopy_range_checks(Register src, // source array oop (c_rarg0) 2490 Register src_pos, // source position (c_rarg1) 2491 Register dst, // destination array oo (c_rarg2) 2492 Register dst_pos, // destination position (c_rarg3) 2493 Register length, 2494 Register temp, 2495 Label& L_failed) { 2496 BLOCK_COMMENT("arraycopy_range_checks:"); 2497 2498 // if (src_pos + length > arrayOop(src)->length()) FAIL; 2499 __ movl(temp, length); 2500 __ addl(temp, src_pos); // src_pos + length 2501 __ cmpl(temp, Address(src, arrayOopDesc::length_offset_in_bytes())); 2502 __ jcc(Assembler::above, L_failed); 2503 2504 // if (dst_pos + length > arrayOop(dst)->length()) FAIL; 2505 __ movl(temp, length); 2506 __ addl(temp, dst_pos); // dst_pos + length 2507 __ cmpl(temp, Address(dst, arrayOopDesc::length_offset_in_bytes())); 2508 __ jcc(Assembler::above, L_failed); 2509 2510 // Have to clean up high 32-bits of 'src_pos' and 'dst_pos'. 2511 // Move with sign extension can be used since they are positive. 2512 __ movslq(src_pos, src_pos); 2513 __ movslq(dst_pos, dst_pos); 2514 2515 BLOCK_COMMENT("arraycopy_range_checks done"); 2516 } 2517 2518 // 2519 // Generate generic array copy stubs 2520 // 2521 // Input: 2522 // c_rarg0 - src oop 2523 // c_rarg1 - src_pos (32-bits) 2524 // c_rarg2 - dst oop 2525 // c_rarg3 - dst_pos (32-bits) 2526 // not Win64 2527 // c_rarg4 - element count (32-bits) 2528 // Win64 2529 // rsp+40 - element count (32-bits) 2530 // 2531 // Output: 2532 // rax == 0 - success 2533 // rax == -1^K - failure, where K is partial transfer count 2534 // 2535 address generate_generic_copy(const char *name, 2536 address byte_copy_entry, address short_copy_entry, 2537 address int_copy_entry, address oop_copy_entry, 2538 address long_copy_entry, address checkcast_copy_entry) { 2539 2540 Label L_failed, L_failed_0, L_objArray; 2541 Label L_copy_bytes, L_copy_shorts, L_copy_ints, L_copy_longs; 2542 2543 // Input registers 2544 const Register src = c_rarg0; // source array oop 2545 const Register src_pos = c_rarg1; // source position 2546 const Register dst = c_rarg2; // destination array oop 2547 const Register dst_pos = c_rarg3; // destination position 2548 #ifndef _WIN64 2549 const Register length = c_rarg4; 2550 #else 2551 const Address length(rsp, 6 * wordSize); // elements count is on stack on Win64 2552 #endif 2553 2554 { int modulus = CodeEntryAlignment; 2555 int target = modulus - 5; // 5 = sizeof jmp(L_failed) 2556 int advance = target - (__ offset() % modulus); 2557 if (advance < 0) advance += modulus; 2558 if (advance > 0) __ nop(advance); 2559 } 2560 StubCodeMark mark(this, "StubRoutines", name); 2561 2562 // Short-hop target to L_failed. Makes for denser prologue code. 2563 __ BIND(L_failed_0); 2564 __ jmp(L_failed); 2565 assert(__ offset() % CodeEntryAlignment == 0, "no further alignment needed"); 2566 2567 __ align(CodeEntryAlignment); 2568 address start = __ pc(); 2569 2570 __ enter(); // required for proper stackwalking of RuntimeStub frame 2571 2572 // bump this on entry, not on exit: 2573 inc_counter_np(SharedRuntime::_generic_array_copy_ctr); 2574 2575 //----------------------------------------------------------------------- 2576 // Assembler stub will be used for this call to arraycopy 2577 // if the following conditions are met: 2578 // 2579 // (1) src and dst must not be null. 2580 // (2) src_pos must not be negative. 2581 // (3) dst_pos must not be negative. 2582 // (4) length must not be negative. 2583 // (5) src klass and dst klass should be the same and not NULL. 2584 // (6) src and dst should be arrays. 2585 // (7) src_pos + length must not exceed length of src. 2586 // (8) dst_pos + length must not exceed length of dst. 2587 // 2588 2589 // if (src == NULL) return -1; 2590 __ testptr(src, src); // src oop 2591 size_t j1off = __ offset(); 2592 __ jccb(Assembler::zero, L_failed_0); 2593 2594 // if (src_pos < 0) return -1; 2595 __ testl(src_pos, src_pos); // src_pos (32-bits) 2596 __ jccb(Assembler::negative, L_failed_0); 2597 2598 // if (dst == NULL) return -1; 2599 __ testptr(dst, dst); // dst oop 2600 __ jccb(Assembler::zero, L_failed_0); 2601 2602 // if (dst_pos < 0) return -1; 2603 __ testl(dst_pos, dst_pos); // dst_pos (32-bits) 2604 size_t j4off = __ offset(); 2605 __ jccb(Assembler::negative, L_failed_0); 2606 2607 // The first four tests are very dense code, 2608 // but not quite dense enough to put four 2609 // jumps in a 16-byte instruction fetch buffer. 2610 // That's good, because some branch predicters 2611 // do not like jumps so close together. 2612 // Make sure of this. 2613 guarantee(((j1off ^ j4off) & ~15) != 0, "I$ line of 1st & 4th jumps"); 2614 2615 // registers used as temp 2616 const Register r11_length = r11; // elements count to copy 2617 const Register r10_src_klass = r10; // array klass 2618 2619 // if (length < 0) return -1; 2620 __ movl(r11_length, length); // length (elements count, 32-bits value) 2621 __ testl(r11_length, r11_length); 2622 __ jccb(Assembler::negative, L_failed_0); 2623 2624 __ load_klass(r10_src_klass, src); 2625 #ifdef ASSERT 2626 // assert(src->klass() != NULL); 2627 { 2628 BLOCK_COMMENT("assert klasses not null {"); 2629 Label L1, L2; 2630 __ testptr(r10_src_klass, r10_src_klass); 2631 __ jcc(Assembler::notZero, L2); // it is broken if klass is NULL 2632 __ bind(L1); 2633 __ stop("broken null klass"); 2634 __ bind(L2); 2635 __ load_klass(rax, dst); 2636 __ cmpq(rax, 0); 2637 __ jcc(Assembler::equal, L1); // this would be broken also 2638 BLOCK_COMMENT("} assert klasses not null done"); 2639 } 2640 #endif 2641 2642 // Load layout helper (32-bits) 2643 // 2644 // |array_tag| | header_size | element_type | |log2_element_size| 2645 // 32 30 24 16 8 2 0 2646 // 2647 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0 2648 // 2649 2650 const int lh_offset = in_bytes(Klass::layout_helper_offset()); 2651 2652 // Handle objArrays completely differently... 2653 const jint objArray_lh = Klass::array_layout_helper(T_OBJECT); 2654 __ cmpl(Address(r10_src_klass, lh_offset), objArray_lh); 2655 __ jcc(Assembler::equal, L_objArray); 2656 2657 // if (src->klass() != dst->klass()) return -1; 2658 __ load_klass(rax, dst); 2659 __ cmpq(r10_src_klass, rax); 2660 __ jcc(Assembler::notEqual, L_failed); 2661 2662 const Register rax_lh = rax; // layout helper 2663 __ movl(rax_lh, Address(r10_src_klass, lh_offset)); 2664 2665 // if (!src->is_Array()) return -1; 2666 __ cmpl(rax_lh, Klass::_lh_neutral_value); 2667 __ jcc(Assembler::greaterEqual, L_failed); 2668 2669 // At this point, it is known to be a typeArray (array_tag 0x3). 2670 #ifdef ASSERT 2671 { 2672 BLOCK_COMMENT("assert primitive array {"); 2673 Label L; 2674 __ cmpl(rax_lh, (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift)); 2675 __ jcc(Assembler::greaterEqual, L); 2676 __ stop("must be a primitive array"); 2677 __ bind(L); 2678 BLOCK_COMMENT("} assert primitive array done"); 2679 } 2680 #endif 2681 2682 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2683 r10, L_failed); 2684 2685 // TypeArrayKlass 2686 // 2687 // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize); 2688 // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize); 2689 // 2690 2691 const Register r10_offset = r10; // array offset 2692 const Register rax_elsize = rax_lh; // element size 2693 2694 __ movl(r10_offset, rax_lh); 2695 __ shrl(r10_offset, Klass::_lh_header_size_shift); 2696 __ andptr(r10_offset, Klass::_lh_header_size_mask); // array_offset 2697 __ addptr(src, r10_offset); // src array offset 2698 __ addptr(dst, r10_offset); // dst array offset 2699 BLOCK_COMMENT("choose copy loop based on element size"); 2700 __ andl(rax_lh, Klass::_lh_log2_element_size_mask); // rax_lh -> rax_elsize 2701 2702 // next registers should be set before the jump to corresponding stub 2703 const Register from = c_rarg0; // source array address 2704 const Register to = c_rarg1; // destination array address 2705 const Register count = c_rarg2; // elements count 2706 2707 // 'from', 'to', 'count' registers should be set in such order 2708 // since they are the same as 'src', 'src_pos', 'dst'. 2709 2710 __ BIND(L_copy_bytes); 2711 __ cmpl(rax_elsize, 0); 2712 __ jccb(Assembler::notEqual, L_copy_shorts); 2713 __ lea(from, Address(src, src_pos, Address::times_1, 0));// src_addr 2714 __ lea(to, Address(dst, dst_pos, Address::times_1, 0));// dst_addr 2715 __ movl2ptr(count, r11_length); // length 2716 __ jump(RuntimeAddress(byte_copy_entry)); 2717 2718 __ BIND(L_copy_shorts); 2719 __ cmpl(rax_elsize, LogBytesPerShort); 2720 __ jccb(Assembler::notEqual, L_copy_ints); 2721 __ lea(from, Address(src, src_pos, Address::times_2, 0));// src_addr 2722 __ lea(to, Address(dst, dst_pos, Address::times_2, 0));// dst_addr 2723 __ movl2ptr(count, r11_length); // length 2724 __ jump(RuntimeAddress(short_copy_entry)); 2725 2726 __ BIND(L_copy_ints); 2727 __ cmpl(rax_elsize, LogBytesPerInt); 2728 __ jccb(Assembler::notEqual, L_copy_longs); 2729 __ lea(from, Address(src, src_pos, Address::times_4, 0));// src_addr 2730 __ lea(to, Address(dst, dst_pos, Address::times_4, 0));// dst_addr 2731 __ movl2ptr(count, r11_length); // length 2732 __ jump(RuntimeAddress(int_copy_entry)); 2733 2734 __ BIND(L_copy_longs); 2735 #ifdef ASSERT 2736 { 2737 BLOCK_COMMENT("assert long copy {"); 2738 Label L; 2739 __ cmpl(rax_elsize, LogBytesPerLong); 2740 __ jcc(Assembler::equal, L); 2741 __ stop("must be long copy, but elsize is wrong"); 2742 __ bind(L); 2743 BLOCK_COMMENT("} assert long copy done"); 2744 } 2745 #endif 2746 __ lea(from, Address(src, src_pos, Address::times_8, 0));// src_addr 2747 __ lea(to, Address(dst, dst_pos, Address::times_8, 0));// dst_addr 2748 __ movl2ptr(count, r11_length); // length 2749 __ jump(RuntimeAddress(long_copy_entry)); 2750 2751 // ObjArrayKlass 2752 __ BIND(L_objArray); 2753 // live at this point: r10_src_klass, r11_length, src[_pos], dst[_pos] 2754 2755 Label L_plain_copy, L_checkcast_copy; 2756 // test array classes for subtyping 2757 __ load_klass(rax, dst); 2758 __ cmpq(r10_src_klass, rax); // usual case is exact equality 2759 __ jcc(Assembler::notEqual, L_checkcast_copy); 2760 2761 // Identically typed arrays can be copied without element-wise checks. 2762 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2763 r10, L_failed); 2764 2765 __ lea(from, Address(src, src_pos, TIMES_OOP, 2766 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // src_addr 2767 __ lea(to, Address(dst, dst_pos, TIMES_OOP, 2768 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // dst_addr 2769 __ movl2ptr(count, r11_length); // length 2770 __ BIND(L_plain_copy); 2771 __ jump(RuntimeAddress(oop_copy_entry)); 2772 2773 __ BIND(L_checkcast_copy); 2774 // live at this point: r10_src_klass, r11_length, rax (dst_klass) 2775 { 2776 // Before looking at dst.length, make sure dst is also an objArray. 2777 __ cmpl(Address(rax, lh_offset), objArray_lh); 2778 __ jcc(Assembler::notEqual, L_failed); 2779 2780 // It is safe to examine both src.length and dst.length. 2781 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2782 rax, L_failed); 2783 2784 const Register r11_dst_klass = r11; 2785 __ load_klass(r11_dst_klass, dst); // reload 2786 2787 // Marshal the base address arguments now, freeing registers. 2788 __ lea(from, Address(src, src_pos, TIMES_OOP, 2789 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 2790 __ lea(to, Address(dst, dst_pos, TIMES_OOP, 2791 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 2792 __ movl(count, length); // length (reloaded) 2793 Register sco_temp = c_rarg3; // this register is free now 2794 assert_different_registers(from, to, count, sco_temp, 2795 r11_dst_klass, r10_src_klass); 2796 assert_clean_int(count, sco_temp); 2797 2798 // Generate the type check. 2799 const int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2800 __ movl(sco_temp, Address(r11_dst_klass, sco_offset)); 2801 assert_clean_int(sco_temp, rax); 2802 generate_type_check(r10_src_klass, sco_temp, r11_dst_klass, L_plain_copy); 2803 2804 // Fetch destination element klass from the ObjArrayKlass header. 2805 int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset()); 2806 __ movptr(r11_dst_klass, Address(r11_dst_klass, ek_offset)); 2807 __ movl( sco_temp, Address(r11_dst_klass, sco_offset)); 2808 assert_clean_int(sco_temp, rax); 2809 2810 // the checkcast_copy loop needs two extra arguments: 2811 assert(c_rarg3 == sco_temp, "#3 already in place"); 2812 // Set up arguments for checkcast_copy_entry. 2813 setup_arg_regs(4); 2814 __ movptr(r8, r11_dst_klass); // dst.klass.element_klass, r8 is c_rarg4 on Linux/Solaris 2815 __ jump(RuntimeAddress(checkcast_copy_entry)); 2816 } 2817 2818 __ BIND(L_failed); 2819 __ xorptr(rax, rax); 2820 __ notptr(rax); // return -1 2821 __ leave(); // required for proper stackwalking of RuntimeStub frame 2822 __ ret(0); 2823 2824 return start; 2825 } 2826 2827 void generate_arraycopy_stubs() { 2828 address entry; 2829 address entry_jbyte_arraycopy; 2830 address entry_jshort_arraycopy; 2831 address entry_jint_arraycopy; 2832 address entry_oop_arraycopy; 2833 address entry_jlong_arraycopy; 2834 address entry_checkcast_arraycopy; 2835 2836 StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, &entry, 2837 "jbyte_disjoint_arraycopy"); 2838 StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, entry, &entry_jbyte_arraycopy, 2839 "jbyte_arraycopy"); 2840 2841 StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, &entry, 2842 "jshort_disjoint_arraycopy"); 2843 StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, entry, &entry_jshort_arraycopy, 2844 "jshort_arraycopy"); 2845 2846 StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, false, &entry, 2847 "jint_disjoint_arraycopy"); 2848 StubRoutines::_jint_arraycopy = generate_conjoint_int_oop_copy(false, false, entry, 2849 &entry_jint_arraycopy, "jint_arraycopy"); 2850 2851 StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, false, &entry, 2852 "jlong_disjoint_arraycopy"); 2853 StubRoutines::_jlong_arraycopy = generate_conjoint_long_oop_copy(false, false, entry, 2854 &entry_jlong_arraycopy, "jlong_arraycopy"); 2855 2856 2857 if (UseCompressedOops) { 2858 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, true, &entry, 2859 "oop_disjoint_arraycopy"); 2860 StubRoutines::_oop_arraycopy = generate_conjoint_int_oop_copy(false, true, entry, 2861 &entry_oop_arraycopy, "oop_arraycopy"); 2862 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_int_oop_copy(false, true, &entry, 2863 "oop_disjoint_arraycopy_uninit", 2864 /*dest_uninitialized*/true); 2865 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_int_oop_copy(false, true, entry, 2866 NULL, "oop_arraycopy_uninit", 2867 /*dest_uninitialized*/true); 2868 } else { 2869 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, true, &entry, 2870 "oop_disjoint_arraycopy"); 2871 StubRoutines::_oop_arraycopy = generate_conjoint_long_oop_copy(false, true, entry, 2872 &entry_oop_arraycopy, "oop_arraycopy"); 2873 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_long_oop_copy(false, true, &entry, 2874 "oop_disjoint_arraycopy_uninit", 2875 /*dest_uninitialized*/true); 2876 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_long_oop_copy(false, true, entry, 2877 NULL, "oop_arraycopy_uninit", 2878 /*dest_uninitialized*/true); 2879 } 2880 2881 StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy); 2882 StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", NULL, 2883 /*dest_uninitialized*/true); 2884 2885 StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy", 2886 entry_jbyte_arraycopy, 2887 entry_jshort_arraycopy, 2888 entry_jint_arraycopy, 2889 entry_jlong_arraycopy); 2890 StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy", 2891 entry_jbyte_arraycopy, 2892 entry_jshort_arraycopy, 2893 entry_jint_arraycopy, 2894 entry_oop_arraycopy, 2895 entry_jlong_arraycopy, 2896 entry_checkcast_arraycopy); 2897 2898 StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill"); 2899 StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill"); 2900 StubRoutines::_jint_fill = generate_fill(T_INT, false, "jint_fill"); 2901 StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill"); 2902 StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill"); 2903 StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill"); 2904 2905 // We don't generate specialized code for HeapWord-aligned source 2906 // arrays, so just use the code we've already generated 2907 StubRoutines::_arrayof_jbyte_disjoint_arraycopy = StubRoutines::_jbyte_disjoint_arraycopy; 2908 StubRoutines::_arrayof_jbyte_arraycopy = StubRoutines::_jbyte_arraycopy; 2909 2910 StubRoutines::_arrayof_jshort_disjoint_arraycopy = StubRoutines::_jshort_disjoint_arraycopy; 2911 StubRoutines::_arrayof_jshort_arraycopy = StubRoutines::_jshort_arraycopy; 2912 2913 StubRoutines::_arrayof_jint_disjoint_arraycopy = StubRoutines::_jint_disjoint_arraycopy; 2914 StubRoutines::_arrayof_jint_arraycopy = StubRoutines::_jint_arraycopy; 2915 2916 StubRoutines::_arrayof_jlong_disjoint_arraycopy = StubRoutines::_jlong_disjoint_arraycopy; 2917 StubRoutines::_arrayof_jlong_arraycopy = StubRoutines::_jlong_arraycopy; 2918 2919 StubRoutines::_arrayof_oop_disjoint_arraycopy = StubRoutines::_oop_disjoint_arraycopy; 2920 StubRoutines::_arrayof_oop_arraycopy = StubRoutines::_oop_arraycopy; 2921 2922 StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = StubRoutines::_oop_disjoint_arraycopy_uninit; 2923 StubRoutines::_arrayof_oop_arraycopy_uninit = StubRoutines::_oop_arraycopy_uninit; 2924 } 2925 2926 // AES intrinsic stubs 2927 enum {AESBlockSize = 16}; 2928 2929 address generate_key_shuffle_mask() { 2930 __ align(16); 2931 StubCodeMark mark(this, "StubRoutines", "key_shuffle_mask"); 2932 address start = __ pc(); 2933 __ emit_data64( 0x0405060700010203, relocInfo::none ); 2934 __ emit_data64( 0x0c0d0e0f08090a0b, relocInfo::none ); 2935 return start; 2936 } 2937 2938 address generate_counter_shuffle_mask() { 2939 __ align(16); 2940 StubCodeMark mark(this, "StubRoutines", "counter_shuffle_mask"); 2941 address start = __ pc(); 2942 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none); 2943 __ emit_data64(0x0001020304050607, relocInfo::none); 2944 return start; 2945 } 2946 2947 // Utility routine for loading a 128-bit key word in little endian format 2948 // can optionally specify that the shuffle mask is already in an xmmregister 2949 void load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask=NULL) { 2950 __ movdqu(xmmdst, Address(key, offset)); 2951 if (xmm_shuf_mask != NULL) { 2952 __ pshufb(xmmdst, xmm_shuf_mask); 2953 } else { 2954 __ pshufb(xmmdst, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 2955 } 2956 } 2957 2958 // Utility routine for increase 128bit counter (iv in CTR mode) 2959 void inc_counter(Register reg, XMMRegister xmmdst, int inc_delta, Label& next_block) { 2960 __ pextrq(reg, xmmdst, 0x0); 2961 __ addq(reg, inc_delta); 2962 __ pinsrq(xmmdst, reg, 0x0); 2963 __ jcc(Assembler::carryClear, next_block); // jump if no carry 2964 __ pextrq(reg, xmmdst, 0x01); // Carry 2965 __ addq(reg, 0x01); 2966 __ pinsrq(xmmdst, reg, 0x01); //Carry end 2967 __ BIND(next_block); // next instruction 2968 } 2969 2970 // Arguments: 2971 // 2972 // Inputs: 2973 // c_rarg0 - source byte array address 2974 // c_rarg1 - destination byte array address 2975 // c_rarg2 - K (key) in little endian int array 2976 // 2977 address generate_aescrypt_encryptBlock() { 2978 assert(UseAES, "need AES instructions and misaligned SSE support"); 2979 __ align(CodeEntryAlignment); 2980 StubCodeMark mark(this, "StubRoutines", "aescrypt_encryptBlock"); 2981 Label L_doLast; 2982 address start = __ pc(); 2983 2984 const Register from = c_rarg0; // source array address 2985 const Register to = c_rarg1; // destination array address 2986 const Register key = c_rarg2; // key array address 2987 const Register keylen = rax; 2988 2989 const XMMRegister xmm_result = xmm0; 2990 const XMMRegister xmm_key_shuf_mask = xmm1; 2991 // On win64 xmm6-xmm15 must be preserved so don't use them. 2992 const XMMRegister xmm_temp1 = xmm2; 2993 const XMMRegister xmm_temp2 = xmm3; 2994 const XMMRegister xmm_temp3 = xmm4; 2995 const XMMRegister xmm_temp4 = xmm5; 2996 2997 __ enter(); // required for proper stackwalking of RuntimeStub frame 2998 2999 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 3000 // context for the registers used, where all instructions below are using 128-bit mode 3001 // On EVEX without VL and BW, these instructions will all be AVX. 3002 if (VM_Version::supports_avx512vlbw()) { 3003 __ movl(rax, 0xffff); 3004 __ kmovql(k1, rax); 3005 } 3006 3007 // keylen could be only {11, 13, 15} * 4 = {44, 52, 60} 3008 __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3009 3010 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3011 __ movdqu(xmm_result, Address(from, 0)); // get 16 bytes of input 3012 3013 // For encryption, the java expanded key ordering is just what we need 3014 // we don't know if the key is aligned, hence not using load-execute form 3015 3016 load_key(xmm_temp1, key, 0x00, xmm_key_shuf_mask); 3017 __ pxor(xmm_result, xmm_temp1); 3018 3019 load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask); 3020 load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask); 3021 load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask); 3022 load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask); 3023 3024 __ aesenc(xmm_result, xmm_temp1); 3025 __ aesenc(xmm_result, xmm_temp2); 3026 __ aesenc(xmm_result, xmm_temp3); 3027 __ aesenc(xmm_result, xmm_temp4); 3028 3029 load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask); 3030 load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask); 3031 load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask); 3032 load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask); 3033 3034 __ aesenc(xmm_result, xmm_temp1); 3035 __ aesenc(xmm_result, xmm_temp2); 3036 __ aesenc(xmm_result, xmm_temp3); 3037 __ aesenc(xmm_result, xmm_temp4); 3038 3039 load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask); 3040 load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask); 3041 3042 __ cmpl(keylen, 44); 3043 __ jccb(Assembler::equal, L_doLast); 3044 3045 __ aesenc(xmm_result, xmm_temp1); 3046 __ aesenc(xmm_result, xmm_temp2); 3047 3048 load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask); 3049 load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask); 3050 3051 __ cmpl(keylen, 52); 3052 __ jccb(Assembler::equal, L_doLast); 3053 3054 __ aesenc(xmm_result, xmm_temp1); 3055 __ aesenc(xmm_result, xmm_temp2); 3056 3057 load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask); 3058 load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask); 3059 3060 __ BIND(L_doLast); 3061 __ aesenc(xmm_result, xmm_temp1); 3062 __ aesenclast(xmm_result, xmm_temp2); 3063 __ movdqu(Address(to, 0), xmm_result); // store the result 3064 __ xorptr(rax, rax); // return 0 3065 __ leave(); // required for proper stackwalking of RuntimeStub frame 3066 __ ret(0); 3067 3068 return start; 3069 } 3070 3071 3072 // Arguments: 3073 // 3074 // Inputs: 3075 // c_rarg0 - source byte array address 3076 // c_rarg1 - destination byte array address 3077 // c_rarg2 - K (key) in little endian int array 3078 // 3079 address generate_aescrypt_decryptBlock() { 3080 assert(UseAES, "need AES instructions and misaligned SSE support"); 3081 __ align(CodeEntryAlignment); 3082 StubCodeMark mark(this, "StubRoutines", "aescrypt_decryptBlock"); 3083 Label L_doLast; 3084 address start = __ pc(); 3085 3086 const Register from = c_rarg0; // source array address 3087 const Register to = c_rarg1; // destination array address 3088 const Register key = c_rarg2; // key array address 3089 const Register keylen = rax; 3090 3091 const XMMRegister xmm_result = xmm0; 3092 const XMMRegister xmm_key_shuf_mask = xmm1; 3093 // On win64 xmm6-xmm15 must be preserved so don't use them. 3094 const XMMRegister xmm_temp1 = xmm2; 3095 const XMMRegister xmm_temp2 = xmm3; 3096 const XMMRegister xmm_temp3 = xmm4; 3097 const XMMRegister xmm_temp4 = xmm5; 3098 3099 __ enter(); // required for proper stackwalking of RuntimeStub frame 3100 3101 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 3102 // context for the registers used, where all instructions below are using 128-bit mode 3103 // On EVEX without VL and BW, these instructions will all be AVX. 3104 if (VM_Version::supports_avx512vlbw()) { 3105 __ movl(rax, 0xffff); 3106 __ kmovql(k1, rax); 3107 } 3108 3109 // keylen could be only {11, 13, 15} * 4 = {44, 52, 60} 3110 __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3111 3112 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3113 __ movdqu(xmm_result, Address(from, 0)); 3114 3115 // for decryption java expanded key ordering is rotated one position from what we want 3116 // so we start from 0x10 here and hit 0x00 last 3117 // we don't know if the key is aligned, hence not using load-execute form 3118 load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask); 3119 load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask); 3120 load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask); 3121 load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask); 3122 3123 __ pxor (xmm_result, xmm_temp1); 3124 __ aesdec(xmm_result, xmm_temp2); 3125 __ aesdec(xmm_result, xmm_temp3); 3126 __ aesdec(xmm_result, xmm_temp4); 3127 3128 load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask); 3129 load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask); 3130 load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask); 3131 load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask); 3132 3133 __ aesdec(xmm_result, xmm_temp1); 3134 __ aesdec(xmm_result, xmm_temp2); 3135 __ aesdec(xmm_result, xmm_temp3); 3136 __ aesdec(xmm_result, xmm_temp4); 3137 3138 load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask); 3139 load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask); 3140 load_key(xmm_temp3, key, 0x00, xmm_key_shuf_mask); 3141 3142 __ cmpl(keylen, 44); 3143 __ jccb(Assembler::equal, L_doLast); 3144 3145 __ aesdec(xmm_result, xmm_temp1); 3146 __ aesdec(xmm_result, xmm_temp2); 3147 3148 load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask); 3149 load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask); 3150 3151 __ cmpl(keylen, 52); 3152 __ jccb(Assembler::equal, L_doLast); 3153 3154 __ aesdec(xmm_result, xmm_temp1); 3155 __ aesdec(xmm_result, xmm_temp2); 3156 3157 load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask); 3158 load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask); 3159 3160 __ BIND(L_doLast); 3161 __ aesdec(xmm_result, xmm_temp1); 3162 __ aesdec(xmm_result, xmm_temp2); 3163 3164 // for decryption the aesdeclast operation is always on key+0x00 3165 __ aesdeclast(xmm_result, xmm_temp3); 3166 __ movdqu(Address(to, 0), xmm_result); // store the result 3167 __ xorptr(rax, rax); // return 0 3168 __ leave(); // required for proper stackwalking of RuntimeStub frame 3169 __ ret(0); 3170 3171 return start; 3172 } 3173 3174 3175 // Arguments: 3176 // 3177 // Inputs: 3178 // c_rarg0 - source byte array address 3179 // c_rarg1 - destination byte array address 3180 // c_rarg2 - K (key) in little endian int array 3181 // c_rarg3 - r vector byte array address 3182 // c_rarg4 - input length 3183 // 3184 // Output: 3185 // rax - input length 3186 // 3187 address generate_cipherBlockChaining_encryptAESCrypt() { 3188 assert(UseAES, "need AES instructions and misaligned SSE support"); 3189 __ align(CodeEntryAlignment); 3190 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_encryptAESCrypt"); 3191 address start = __ pc(); 3192 3193 Label L_exit, L_key_192_256, L_key_256, L_loopTop_128, L_loopTop_192, L_loopTop_256; 3194 const Register from = c_rarg0; // source array address 3195 const Register to = c_rarg1; // destination array address 3196 const Register key = c_rarg2; // key array address 3197 const Register rvec = c_rarg3; // r byte array initialized from initvector array address 3198 // and left with the results of the last encryption block 3199 #ifndef _WIN64 3200 const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16) 3201 #else 3202 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 3203 const Register len_reg = r11; // pick the volatile windows register 3204 #endif 3205 const Register pos = rax; 3206 3207 // xmm register assignments for the loops below 3208 const XMMRegister xmm_result = xmm0; 3209 const XMMRegister xmm_temp = xmm1; 3210 // keys 0-10 preloaded into xmm2-xmm12 3211 const int XMM_REG_NUM_KEY_FIRST = 2; 3212 const int XMM_REG_NUM_KEY_LAST = 15; 3213 const XMMRegister xmm_key0 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST); 3214 const XMMRegister xmm_key10 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+10); 3215 const XMMRegister xmm_key11 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+11); 3216 const XMMRegister xmm_key12 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+12); 3217 const XMMRegister xmm_key13 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+13); 3218 3219 __ enter(); // required for proper stackwalking of RuntimeStub frame 3220 3221 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 3222 // context for the registers used, where all instructions below are using 128-bit mode 3223 // On EVEX without VL and BW, these instructions will all be AVX. 3224 if (VM_Version::supports_avx512vlbw()) { 3225 __ movl(rax, 0xffff); 3226 __ kmovql(k1, rax); 3227 } 3228 3229 #ifdef _WIN64 3230 // on win64, fill len_reg from stack position 3231 __ movl(len_reg, len_mem); 3232 #else 3233 __ push(len_reg); // Save 3234 #endif 3235 3236 const XMMRegister xmm_key_shuf_mask = xmm_temp; // used temporarily to swap key bytes up front 3237 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3238 // load up xmm regs xmm2 thru xmm12 with key 0x00 - 0xa0 3239 for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x00; rnum <= XMM_REG_NUM_KEY_FIRST+10; rnum++) { 3240 load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask); 3241 offset += 0x10; 3242 } 3243 __ movdqu(xmm_result, Address(rvec, 0x00)); // initialize xmm_result with r vec 3244 3245 // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256)) 3246 __ movl(rax, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3247 __ cmpl(rax, 44); 3248 __ jcc(Assembler::notEqual, L_key_192_256); 3249 3250 // 128 bit code follows here 3251 __ movptr(pos, 0); 3252 __ align(OptoLoopAlignment); 3253 3254 __ BIND(L_loopTop_128); 3255 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3256 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3257 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3258 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 9; rnum++) { 3259 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3260 } 3261 __ aesenclast(xmm_result, xmm_key10); 3262 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3263 // no need to store r to memory until we exit 3264 __ addptr(pos, AESBlockSize); 3265 __ subptr(len_reg, AESBlockSize); 3266 __ jcc(Assembler::notEqual, L_loopTop_128); 3267 3268 __ BIND(L_exit); 3269 __ movdqu(Address(rvec, 0), xmm_result); // final value of r stored in rvec of CipherBlockChaining object 3270 3271 #ifdef _WIN64 3272 __ movl(rax, len_mem); 3273 #else 3274 __ pop(rax); // return length 3275 #endif 3276 __ leave(); // required for proper stackwalking of RuntimeStub frame 3277 __ ret(0); 3278 3279 __ BIND(L_key_192_256); 3280 // here rax = len in ints of AESCrypt.KLE array (52=192, or 60=256) 3281 load_key(xmm_key11, key, 0xb0, xmm_key_shuf_mask); 3282 load_key(xmm_key12, key, 0xc0, xmm_key_shuf_mask); 3283 __ cmpl(rax, 52); 3284 __ jcc(Assembler::notEqual, L_key_256); 3285 3286 // 192-bit code follows here (could be changed to use more xmm registers) 3287 __ movptr(pos, 0); 3288 __ align(OptoLoopAlignment); 3289 3290 __ BIND(L_loopTop_192); 3291 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3292 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3293 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3294 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 11; rnum++) { 3295 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3296 } 3297 __ aesenclast(xmm_result, xmm_key12); 3298 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3299 // no need to store r to memory until we exit 3300 __ addptr(pos, AESBlockSize); 3301 __ subptr(len_reg, AESBlockSize); 3302 __ jcc(Assembler::notEqual, L_loopTop_192); 3303 __ jmp(L_exit); 3304 3305 __ BIND(L_key_256); 3306 // 256-bit code follows here (could be changed to use more xmm registers) 3307 load_key(xmm_key13, key, 0xd0, xmm_key_shuf_mask); 3308 __ movptr(pos, 0); 3309 __ align(OptoLoopAlignment); 3310 3311 __ BIND(L_loopTop_256); 3312 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3313 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3314 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3315 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 13; rnum++) { 3316 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3317 } 3318 load_key(xmm_temp, key, 0xe0); 3319 __ aesenclast(xmm_result, xmm_temp); 3320 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3321 // no need to store r to memory until we exit 3322 __ addptr(pos, AESBlockSize); 3323 __ subptr(len_reg, AESBlockSize); 3324 __ jcc(Assembler::notEqual, L_loopTop_256); 3325 __ jmp(L_exit); 3326 3327 return start; 3328 } 3329 3330 // Safefetch stubs. 3331 void generate_safefetch(const char* name, int size, address* entry, 3332 address* fault_pc, address* continuation_pc) { 3333 // safefetch signatures: 3334 // int SafeFetch32(int* adr, int errValue); 3335 // intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue); 3336 // 3337 // arguments: 3338 // c_rarg0 = adr 3339 // c_rarg1 = errValue 3340 // 3341 // result: 3342 // PPC_RET = *adr or errValue 3343 3344 StubCodeMark mark(this, "StubRoutines", name); 3345 3346 // Entry point, pc or function descriptor. 3347 *entry = __ pc(); 3348 3349 // Load *adr into c_rarg1, may fault. 3350 *fault_pc = __ pc(); 3351 switch (size) { 3352 case 4: 3353 // int32_t 3354 __ movl(c_rarg1, Address(c_rarg0, 0)); 3355 break; 3356 case 8: 3357 // int64_t 3358 __ movq(c_rarg1, Address(c_rarg0, 0)); 3359 break; 3360 default: 3361 ShouldNotReachHere(); 3362 } 3363 3364 // return errValue or *adr 3365 *continuation_pc = __ pc(); 3366 __ movq(rax, c_rarg1); 3367 __ ret(0); 3368 } 3369 3370 // This is a version of CBC/AES Decrypt which does 4 blocks in a loop at a time 3371 // to hide instruction latency 3372 // 3373 // Arguments: 3374 // 3375 // Inputs: 3376 // c_rarg0 - source byte array address 3377 // c_rarg1 - destination byte array address 3378 // c_rarg2 - K (key) in little endian int array 3379 // c_rarg3 - r vector byte array address 3380 // c_rarg4 - input length 3381 // 3382 // Output: 3383 // rax - input length 3384 // 3385 address generate_cipherBlockChaining_decryptAESCrypt_Parallel() { 3386 assert(UseAES, "need AES instructions and misaligned SSE support"); 3387 __ align(CodeEntryAlignment); 3388 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt"); 3389 address start = __ pc(); 3390 3391 const Register from = c_rarg0; // source array address 3392 const Register to = c_rarg1; // destination array address 3393 const Register key = c_rarg2; // key array address 3394 const Register rvec = c_rarg3; // r byte array initialized from initvector array address 3395 // and left with the results of the last encryption block 3396 #ifndef _WIN64 3397 const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16) 3398 #else 3399 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 3400 const Register len_reg = r11; // pick the volatile windows register 3401 #endif 3402 const Register pos = rax; 3403 3404 const int PARALLEL_FACTOR = 4; 3405 const int ROUNDS[3] = { 10, 12, 14 }; // aes rounds for key128, key192, key256 3406 3407 Label L_exit; 3408 Label L_singleBlock_loopTopHead[3]; // 128, 192, 256 3409 Label L_singleBlock_loopTopHead2[3]; // 128, 192, 256 3410 Label L_singleBlock_loopTop[3]; // 128, 192, 256 3411 Label L_multiBlock_loopTopHead[3]; // 128, 192, 256 3412 Label L_multiBlock_loopTop[3]; // 128, 192, 256 3413 3414 // keys 0-10 preloaded into xmm5-xmm15 3415 const int XMM_REG_NUM_KEY_FIRST = 5; 3416 const int XMM_REG_NUM_KEY_LAST = 15; 3417 const XMMRegister xmm_key_first = as_XMMRegister(XMM_REG_NUM_KEY_FIRST); 3418 const XMMRegister xmm_key_last = as_XMMRegister(XMM_REG_NUM_KEY_LAST); 3419 3420 __ enter(); // required for proper stackwalking of RuntimeStub frame 3421 3422 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 3423 // context for the registers used, where all instructions below are using 128-bit mode 3424 // On EVEX without VL and BW, these instructions will all be AVX. 3425 if (VM_Version::supports_avx512vlbw()) { 3426 __ movl(rax, 0xffff); 3427 __ kmovql(k1, rax); 3428 } 3429 3430 #ifdef _WIN64 3431 // on win64, fill len_reg from stack position 3432 __ movl(len_reg, len_mem); 3433 #else 3434 __ push(len_reg); // Save 3435 #endif 3436 __ push(rbx); 3437 // the java expanded key ordering is rotated one position from what we want 3438 // so we start from 0x10 here and hit 0x00 last 3439 const XMMRegister xmm_key_shuf_mask = xmm1; // used temporarily to swap key bytes up front 3440 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3441 // load up xmm regs 5 thru 15 with key 0x10 - 0xa0 - 0x00 3442 for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x10; rnum < XMM_REG_NUM_KEY_LAST; rnum++) { 3443 load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask); 3444 offset += 0x10; 3445 } 3446 load_key(xmm_key_last, key, 0x00, xmm_key_shuf_mask); 3447 3448 const XMMRegister xmm_prev_block_cipher = xmm1; // holds cipher of previous block 3449 3450 // registers holding the four results in the parallelized loop 3451 const XMMRegister xmm_result0 = xmm0; 3452 const XMMRegister xmm_result1 = xmm2; 3453 const XMMRegister xmm_result2 = xmm3; 3454 const XMMRegister xmm_result3 = xmm4; 3455 3456 __ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // initialize with initial rvec 3457 3458 __ xorptr(pos, pos); 3459 3460 // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256)) 3461 __ movl(rbx, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3462 __ cmpl(rbx, 52); 3463 __ jcc(Assembler::equal, L_multiBlock_loopTopHead[1]); 3464 __ cmpl(rbx, 60); 3465 __ jcc(Assembler::equal, L_multiBlock_loopTopHead[2]); 3466 3467 #define DoFour(opc, src_reg) \ 3468 __ opc(xmm_result0, src_reg); \ 3469 __ opc(xmm_result1, src_reg); \ 3470 __ opc(xmm_result2, src_reg); \ 3471 __ opc(xmm_result3, src_reg); \ 3472 3473 for (int k = 0; k < 3; ++k) { 3474 __ BIND(L_multiBlock_loopTopHead[k]); 3475 if (k != 0) { 3476 __ cmpptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // see if at least 4 blocks left 3477 __ jcc(Assembler::less, L_singleBlock_loopTopHead2[k]); 3478 } 3479 if (k == 1) { 3480 __ subptr(rsp, 6 * wordSize); 3481 __ movdqu(Address(rsp, 0), xmm15); //save last_key from xmm15 3482 load_key(xmm15, key, 0xb0); // 0xb0; 192-bit key goes up to 0xc0 3483 __ movdqu(Address(rsp, 2 * wordSize), xmm15); 3484 load_key(xmm1, key, 0xc0); // 0xc0; 3485 __ movdqu(Address(rsp, 4 * wordSize), xmm1); 3486 } else if (k == 2) { 3487 __ subptr(rsp, 10 * wordSize); 3488 __ movdqu(Address(rsp, 0), xmm15); //save last_key from xmm15 3489 load_key(xmm15, key, 0xd0); // 0xd0; 256-bit key goes upto 0xe0 3490 __ movdqu(Address(rsp, 6 * wordSize), xmm15); 3491 load_key(xmm1, key, 0xe0); // 0xe0; 3492 __ movdqu(Address(rsp, 8 * wordSize), xmm1); 3493 load_key(xmm15, key, 0xb0); // 0xb0; 3494 __ movdqu(Address(rsp, 2 * wordSize), xmm15); 3495 load_key(xmm1, key, 0xc0); // 0xc0; 3496 __ movdqu(Address(rsp, 4 * wordSize), xmm1); 3497 } 3498 __ align(OptoLoopAlignment); 3499 __ BIND(L_multiBlock_loopTop[k]); 3500 __ cmpptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // see if at least 4 blocks left 3501 __ jcc(Assembler::less, L_singleBlock_loopTopHead[k]); 3502 3503 if (k != 0) { 3504 __ movdqu(xmm15, Address(rsp, 2 * wordSize)); 3505 __ movdqu(xmm1, Address(rsp, 4 * wordSize)); 3506 } 3507 3508 __ movdqu(xmm_result0, Address(from, pos, Address::times_1, 0 * AESBlockSize)); // get next 4 blocks into xmmresult registers 3509 __ movdqu(xmm_result1, Address(from, pos, Address::times_1, 1 * AESBlockSize)); 3510 __ movdqu(xmm_result2, Address(from, pos, Address::times_1, 2 * AESBlockSize)); 3511 __ movdqu(xmm_result3, Address(from, pos, Address::times_1, 3 * AESBlockSize)); 3512 3513 DoFour(pxor, xmm_key_first); 3514 if (k == 0) { 3515 for (int rnum = 1; rnum < ROUNDS[k]; rnum++) { 3516 DoFour(aesdec, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3517 } 3518 DoFour(aesdeclast, xmm_key_last); 3519 } else if (k == 1) { 3520 for (int rnum = 1; rnum <= ROUNDS[k]-2; rnum++) { 3521 DoFour(aesdec, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3522 } 3523 __ movdqu(xmm_key_last, Address(rsp, 0)); // xmm15 needs to be loaded again. 3524 DoFour(aesdec, xmm1); // key : 0xc0 3525 __ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // xmm1 needs to be loaded again 3526 DoFour(aesdeclast, xmm_key_last); 3527 } else if (k == 2) { 3528 for (int rnum = 1; rnum <= ROUNDS[k] - 4; rnum++) { 3529 DoFour(aesdec, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3530 } 3531 DoFour(aesdec, xmm1); // key : 0xc0 3532 __ movdqu(xmm15, Address(rsp, 6 * wordSize)); 3533 __ movdqu(xmm1, Address(rsp, 8 * wordSize)); 3534 DoFour(aesdec, xmm15); // key : 0xd0 3535 __ movdqu(xmm_key_last, Address(rsp, 0)); // xmm15 needs to be loaded again. 3536 DoFour(aesdec, xmm1); // key : 0xe0 3537 __ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // xmm1 needs to be loaded again 3538 DoFour(aesdeclast, xmm_key_last); 3539 } 3540 3541 // for each result, xor with the r vector of previous cipher block 3542 __ pxor(xmm_result0, xmm_prev_block_cipher); 3543 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 0 * AESBlockSize)); 3544 __ pxor(xmm_result1, xmm_prev_block_cipher); 3545 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 1 * AESBlockSize)); 3546 __ pxor(xmm_result2, xmm_prev_block_cipher); 3547 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 2 * AESBlockSize)); 3548 __ pxor(xmm_result3, xmm_prev_block_cipher); 3549 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 3 * AESBlockSize)); // this will carry over to next set of blocks 3550 if (k != 0) { 3551 __ movdqu(Address(rvec, 0x00), xmm_prev_block_cipher); 3552 } 3553 3554 __ movdqu(Address(to, pos, Address::times_1, 0 * AESBlockSize), xmm_result0); // store 4 results into the next 64 bytes of output 3555 __ movdqu(Address(to, pos, Address::times_1, 1 * AESBlockSize), xmm_result1); 3556 __ movdqu(Address(to, pos, Address::times_1, 2 * AESBlockSize), xmm_result2); 3557 __ movdqu(Address(to, pos, Address::times_1, 3 * AESBlockSize), xmm_result3); 3558 3559 __ addptr(pos, PARALLEL_FACTOR * AESBlockSize); 3560 __ subptr(len_reg, PARALLEL_FACTOR * AESBlockSize); 3561 __ jmp(L_multiBlock_loopTop[k]); 3562 3563 // registers used in the non-parallelized loops 3564 // xmm register assignments for the loops below 3565 const XMMRegister xmm_result = xmm0; 3566 const XMMRegister xmm_prev_block_cipher_save = xmm2; 3567 const XMMRegister xmm_key11 = xmm3; 3568 const XMMRegister xmm_key12 = xmm4; 3569 const XMMRegister key_tmp = xmm4; 3570 3571 __ BIND(L_singleBlock_loopTopHead[k]); 3572 if (k == 1) { 3573 __ addptr(rsp, 6 * wordSize); 3574 } else if (k == 2) { 3575 __ addptr(rsp, 10 * wordSize); 3576 } 3577 __ cmpptr(len_reg, 0); // any blocks left?? 3578 __ jcc(Assembler::equal, L_exit); 3579 __ BIND(L_singleBlock_loopTopHead2[k]); 3580 if (k == 1) { 3581 load_key(xmm_key11, key, 0xb0); // 0xb0; 192-bit key goes upto 0xc0 3582 load_key(xmm_key12, key, 0xc0); // 0xc0; 192-bit key goes upto 0xc0 3583 } 3584 if (k == 2) { 3585 load_key(xmm_key11, key, 0xb0); // 0xb0; 256-bit key goes upto 0xe0 3586 } 3587 __ align(OptoLoopAlignment); 3588 __ BIND(L_singleBlock_loopTop[k]); 3589 __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of cipher input 3590 __ movdqa(xmm_prev_block_cipher_save, xmm_result); // save for next r vector 3591 __ pxor(xmm_result, xmm_key_first); // do the aes dec rounds 3592 for (int rnum = 1; rnum <= 9 ; rnum++) { 3593 __ aesdec(xmm_result, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3594 } 3595 if (k == 1) { 3596 __ aesdec(xmm_result, xmm_key11); 3597 __ aesdec(xmm_result, xmm_key12); 3598 } 3599 if (k == 2) { 3600 __ aesdec(xmm_result, xmm_key11); 3601 load_key(key_tmp, key, 0xc0); 3602 __ aesdec(xmm_result, key_tmp); 3603 load_key(key_tmp, key, 0xd0); 3604 __ aesdec(xmm_result, key_tmp); 3605 load_key(key_tmp, key, 0xe0); 3606 __ aesdec(xmm_result, key_tmp); 3607 } 3608 3609 __ aesdeclast(xmm_result, xmm_key_last); // xmm15 always came from key+0 3610 __ pxor(xmm_result, xmm_prev_block_cipher); // xor with the current r vector 3611 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3612 // no need to store r to memory until we exit 3613 __ movdqa(xmm_prev_block_cipher, xmm_prev_block_cipher_save); // set up next r vector with cipher input from this block 3614 __ addptr(pos, AESBlockSize); 3615 __ subptr(len_reg, AESBlockSize); 3616 __ jcc(Assembler::notEqual, L_singleBlock_loopTop[k]); 3617 if (k != 2) { 3618 __ jmp(L_exit); 3619 } 3620 } //for 128/192/256 3621 3622 __ BIND(L_exit); 3623 __ movdqu(Address(rvec, 0), xmm_prev_block_cipher); // final value of r stored in rvec of CipherBlockChaining object 3624 __ pop(rbx); 3625 #ifdef _WIN64 3626 __ movl(rax, len_mem); 3627 #else 3628 __ pop(rax); // return length 3629 #endif 3630 __ leave(); // required for proper stackwalking of RuntimeStub frame 3631 __ ret(0); 3632 return start; 3633 } 3634 3635 address generate_upper_word_mask() { 3636 __ align(64); 3637 StubCodeMark mark(this, "StubRoutines", "upper_word_mask"); 3638 address start = __ pc(); 3639 __ emit_data64(0x0000000000000000, relocInfo::none); 3640 __ emit_data64(0xFFFFFFFF00000000, relocInfo::none); 3641 return start; 3642 } 3643 3644 address generate_shuffle_byte_flip_mask() { 3645 __ align(64); 3646 StubCodeMark mark(this, "StubRoutines", "shuffle_byte_flip_mask"); 3647 address start = __ pc(); 3648 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none); 3649 __ emit_data64(0x0001020304050607, relocInfo::none); 3650 return start; 3651 } 3652 3653 // ofs and limit are use for multi-block byte array. 3654 // int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit) 3655 address generate_sha1_implCompress(bool multi_block, const char *name) { 3656 __ align(CodeEntryAlignment); 3657 StubCodeMark mark(this, "StubRoutines", name); 3658 address start = __ pc(); 3659 3660 Register buf = c_rarg0; 3661 Register state = c_rarg1; 3662 Register ofs = c_rarg2; 3663 Register limit = c_rarg3; 3664 3665 const XMMRegister abcd = xmm0; 3666 const XMMRegister e0 = xmm1; 3667 const XMMRegister e1 = xmm2; 3668 const XMMRegister msg0 = xmm3; 3669 3670 const XMMRegister msg1 = xmm4; 3671 const XMMRegister msg2 = xmm5; 3672 const XMMRegister msg3 = xmm6; 3673 const XMMRegister shuf_mask = xmm7; 3674 3675 __ enter(); 3676 3677 __ subptr(rsp, 4 * wordSize); 3678 3679 __ fast_sha1(abcd, e0, e1, msg0, msg1, msg2, msg3, shuf_mask, 3680 buf, state, ofs, limit, rsp, multi_block); 3681 3682 __ addptr(rsp, 4 * wordSize); 3683 3684 __ leave(); 3685 __ ret(0); 3686 return start; 3687 } 3688 3689 address generate_pshuffle_byte_flip_mask() { 3690 __ align(64); 3691 StubCodeMark mark(this, "StubRoutines", "pshuffle_byte_flip_mask"); 3692 address start = __ pc(); 3693 __ emit_data64(0x0405060700010203, relocInfo::none); 3694 __ emit_data64(0x0c0d0e0f08090a0b, relocInfo::none); 3695 3696 if (VM_Version::supports_avx2()) { 3697 __ emit_data64(0x0405060700010203, relocInfo::none); // second copy 3698 __ emit_data64(0x0c0d0e0f08090a0b, relocInfo::none); 3699 // _SHUF_00BA 3700 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3701 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3702 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3703 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3704 // _SHUF_DC00 3705 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3706 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3707 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3708 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3709 } 3710 3711 return start; 3712 } 3713 3714 //Mask for byte-swapping a couple of qwords in an XMM register using (v)pshufb. 3715 address generate_pshuffle_byte_flip_mask_sha512() { 3716 __ align(32); 3717 StubCodeMark mark(this, "StubRoutines", "pshuffle_byte_flip_mask_sha512"); 3718 address start = __ pc(); 3719 if (VM_Version::supports_avx2()) { 3720 __ emit_data64(0x0001020304050607, relocInfo::none); // PSHUFFLE_BYTE_FLIP_MASK 3721 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none); 3722 __ emit_data64(0x1011121314151617, relocInfo::none); 3723 __ emit_data64(0x18191a1b1c1d1e1f, relocInfo::none); 3724 __ emit_data64(0x0000000000000000, relocInfo::none); //MASK_YMM_LO 3725 __ emit_data64(0x0000000000000000, relocInfo::none); 3726 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3727 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3728 } 3729 3730 return start; 3731 } 3732 3733 // ofs and limit are use for multi-block byte array. 3734 // int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit) 3735 address generate_sha256_implCompress(bool multi_block, const char *name) { 3736 assert(VM_Version::supports_sha() || VM_Version::supports_avx2(), ""); 3737 __ align(CodeEntryAlignment); 3738 StubCodeMark mark(this, "StubRoutines", name); 3739 address start = __ pc(); 3740 3741 Register buf = c_rarg0; 3742 Register state = c_rarg1; 3743 Register ofs = c_rarg2; 3744 Register limit = c_rarg3; 3745 3746 const XMMRegister msg = xmm0; 3747 const XMMRegister state0 = xmm1; 3748 const XMMRegister state1 = xmm2; 3749 const XMMRegister msgtmp0 = xmm3; 3750 3751 const XMMRegister msgtmp1 = xmm4; 3752 const XMMRegister msgtmp2 = xmm5; 3753 const XMMRegister msgtmp3 = xmm6; 3754 const XMMRegister msgtmp4 = xmm7; 3755 3756 const XMMRegister shuf_mask = xmm8; 3757 3758 __ enter(); 3759 3760 __ subptr(rsp, 4 * wordSize); 3761 3762 if (VM_Version::supports_sha()) { 3763 __ fast_sha256(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4, 3764 buf, state, ofs, limit, rsp, multi_block, shuf_mask); 3765 } else if (VM_Version::supports_avx2()) { 3766 __ sha256_AVX2(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4, 3767 buf, state, ofs, limit, rsp, multi_block, shuf_mask); 3768 } 3769 __ addptr(rsp, 4 * wordSize); 3770 __ vzeroupper(); 3771 __ leave(); 3772 __ ret(0); 3773 return start; 3774 } 3775 3776 address generate_sha512_implCompress(bool multi_block, const char *name) { 3777 assert(VM_Version::supports_avx2(), ""); 3778 assert(VM_Version::supports_bmi2(), ""); 3779 __ align(CodeEntryAlignment); 3780 StubCodeMark mark(this, "StubRoutines", name); 3781 address start = __ pc(); 3782 3783 Register buf = c_rarg0; 3784 Register state = c_rarg1; 3785 Register ofs = c_rarg2; 3786 Register limit = c_rarg3; 3787 3788 const XMMRegister msg = xmm0; 3789 const XMMRegister state0 = xmm1; 3790 const XMMRegister state1 = xmm2; 3791 const XMMRegister msgtmp0 = xmm3; 3792 const XMMRegister msgtmp1 = xmm4; 3793 const XMMRegister msgtmp2 = xmm5; 3794 const XMMRegister msgtmp3 = xmm6; 3795 const XMMRegister msgtmp4 = xmm7; 3796 3797 const XMMRegister shuf_mask = xmm8; 3798 3799 __ enter(); 3800 3801 __ sha512_AVX2(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4, 3802 buf, state, ofs, limit, rsp, multi_block, shuf_mask); 3803 3804 __ vzeroupper(); 3805 __ leave(); 3806 __ ret(0); 3807 return start; 3808 } 3809 3810 // This is a version of CTR/AES crypt which does 6 blocks in a loop at a time 3811 // to hide instruction latency 3812 // 3813 // Arguments: 3814 // 3815 // Inputs: 3816 // c_rarg0 - source byte array address 3817 // c_rarg1 - destination byte array address 3818 // c_rarg2 - K (key) in little endian int array 3819 // c_rarg3 - counter vector byte array address 3820 // Linux 3821 // c_rarg4 - input length 3822 // c_rarg5 - saved encryptedCounter start 3823 // rbp + 6 * wordSize - saved used length 3824 // Windows 3825 // rbp + 6 * wordSize - input length 3826 // rbp + 7 * wordSize - saved encryptedCounter start 3827 // rbp + 8 * wordSize - saved used length 3828 // 3829 // Output: 3830 // rax - input length 3831 // 3832 address generate_counterMode_AESCrypt_Parallel() { 3833 assert(UseAES, "need AES instructions and misaligned SSE support"); 3834 __ align(CodeEntryAlignment); 3835 StubCodeMark mark(this, "StubRoutines", "counterMode_AESCrypt"); 3836 address start = __ pc(); 3837 const Register from = c_rarg0; // source array address 3838 const Register to = c_rarg1; // destination array address 3839 const Register key = c_rarg2; // key array address 3840 const Register counter = c_rarg3; // counter byte array initialized from counter array address 3841 // and updated with the incremented counter in the end 3842 #ifndef _WIN64 3843 const Register len_reg = c_rarg4; 3844 const Register saved_encCounter_start = c_rarg5; 3845 const Register used_addr = r10; 3846 const Address used_mem(rbp, 2 * wordSize); 3847 const Register used = r11; 3848 #else 3849 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 3850 const Address saved_encCounter_mem(rbp, 7 * wordSize); // length is on stack on Win64 3851 const Address used_mem(rbp, 8 * wordSize); // length is on stack on Win64 3852 const Register len_reg = r10; // pick the first volatile windows register 3853 const Register saved_encCounter_start = r11; 3854 const Register used_addr = r13; 3855 const Register used = r14; 3856 #endif 3857 const Register pos = rax; 3858 3859 const int PARALLEL_FACTOR = 6; 3860 const XMMRegister xmm_counter_shuf_mask = xmm0; 3861 const XMMRegister xmm_key_shuf_mask = xmm1; // used temporarily to swap key bytes up front 3862 const XMMRegister xmm_curr_counter = xmm2; 3863 3864 const XMMRegister xmm_key_tmp0 = xmm3; 3865 const XMMRegister xmm_key_tmp1 = xmm4; 3866 3867 // registers holding the four results in the parallelized loop 3868 const XMMRegister xmm_result0 = xmm5; 3869 const XMMRegister xmm_result1 = xmm6; 3870 const XMMRegister xmm_result2 = xmm7; 3871 const XMMRegister xmm_result3 = xmm8; 3872 const XMMRegister xmm_result4 = xmm9; 3873 const XMMRegister xmm_result5 = xmm10; 3874 3875 const XMMRegister xmm_from0 = xmm11; 3876 const XMMRegister xmm_from1 = xmm12; 3877 const XMMRegister xmm_from2 = xmm13; 3878 const XMMRegister xmm_from3 = xmm14; //the last one is xmm14. we have to preserve it on WIN64. 3879 const XMMRegister xmm_from4 = xmm3; //reuse xmm3~4. Because xmm_key_tmp0~1 are useless when loading input text 3880 const XMMRegister xmm_from5 = xmm4; 3881 3882 //for key_128, key_192, key_256 3883 const int rounds[3] = {10, 12, 14}; 3884 Label L_exit_preLoop, L_preLoop_start; 3885 Label L_multiBlock_loopTop[3]; 3886 Label L_singleBlockLoopTop[3]; 3887 Label L__incCounter[3][6]; //for 6 blocks 3888 Label L__incCounter_single[3]; //for single block, key128, key192, key256 3889 Label L_processTail_insr[3], L_processTail_4_insr[3], L_processTail_2_insr[3], L_processTail_1_insr[3], L_processTail_exit_insr[3]; 3890 Label L_processTail_extr[3], L_processTail_4_extr[3], L_processTail_2_extr[3], L_processTail_1_extr[3], L_processTail_exit_extr[3]; 3891 3892 Label L_exit; 3893 3894 __ enter(); // required for proper stackwalking of RuntimeStub frame 3895 3896 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 3897 // context for the registers used, where all instructions below are using 128-bit mode 3898 // On EVEX without VL and BW, these instructions will all be AVX. 3899 if (VM_Version::supports_avx512vlbw()) { 3900 __ movl(rax, 0xffff); 3901 __ kmovql(k1, rax); 3902 } 3903 3904 #ifdef _WIN64 3905 // allocate spill slots for r13, r14 3906 enum { 3907 saved_r13_offset, 3908 saved_r14_offset 3909 }; 3910 __ subptr(rsp, 2 * wordSize); 3911 __ movptr(Address(rsp, saved_r13_offset * wordSize), r13); 3912 __ movptr(Address(rsp, saved_r14_offset * wordSize), r14); 3913 3914 // on win64, fill len_reg from stack position 3915 __ movl(len_reg, len_mem); 3916 __ movptr(saved_encCounter_start, saved_encCounter_mem); 3917 __ movptr(used_addr, used_mem); 3918 __ movl(used, Address(used_addr, 0)); 3919 #else 3920 __ push(len_reg); // Save 3921 __ movptr(used_addr, used_mem); 3922 __ movl(used, Address(used_addr, 0)); 3923 #endif 3924 3925 __ push(rbx); // Save RBX 3926 __ movdqu(xmm_curr_counter, Address(counter, 0x00)); // initialize counter with initial counter 3927 __ movdqu(xmm_counter_shuf_mask, ExternalAddress(StubRoutines::x86::counter_shuffle_mask_addr()), pos); // pos as scratch 3928 __ pshufb(xmm_curr_counter, xmm_counter_shuf_mask); //counter is shuffled 3929 __ movptr(pos, 0); 3930 3931 // Use the partially used encrpyted counter from last invocation 3932 __ BIND(L_preLoop_start); 3933 __ cmpptr(used, 16); 3934 __ jcc(Assembler::aboveEqual, L_exit_preLoop); 3935 __ cmpptr(len_reg, 0); 3936 __ jcc(Assembler::lessEqual, L_exit_preLoop); 3937 __ movb(rbx, Address(saved_encCounter_start, used)); 3938 __ xorb(rbx, Address(from, pos)); 3939 __ movb(Address(to, pos), rbx); 3940 __ addptr(pos, 1); 3941 __ addptr(used, 1); 3942 __ subptr(len_reg, 1); 3943 3944 __ jmp(L_preLoop_start); 3945 3946 __ BIND(L_exit_preLoop); 3947 __ movl(Address(used_addr, 0), used); 3948 3949 // key length could be only {11, 13, 15} * 4 = {44, 52, 60} 3950 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()), rbx); // rbx as scratch 3951 __ movl(rbx, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3952 __ cmpl(rbx, 52); 3953 __ jcc(Assembler::equal, L_multiBlock_loopTop[1]); 3954 __ cmpl(rbx, 60); 3955 __ jcc(Assembler::equal, L_multiBlock_loopTop[2]); 3956 3957 #define CTR_DoSix(opc, src_reg) \ 3958 __ opc(xmm_result0, src_reg); \ 3959 __ opc(xmm_result1, src_reg); \ 3960 __ opc(xmm_result2, src_reg); \ 3961 __ opc(xmm_result3, src_reg); \ 3962 __ opc(xmm_result4, src_reg); \ 3963 __ opc(xmm_result5, src_reg); 3964 3965 // k == 0 : generate code for key_128 3966 // k == 1 : generate code for key_192 3967 // k == 2 : generate code for key_256 3968 for (int k = 0; k < 3; ++k) { 3969 //multi blocks starts here 3970 __ align(OptoLoopAlignment); 3971 __ BIND(L_multiBlock_loopTop[k]); 3972 __ cmpptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // see if at least PARALLEL_FACTOR blocks left 3973 __ jcc(Assembler::less, L_singleBlockLoopTop[k]); 3974 load_key(xmm_key_tmp0, key, 0x00, xmm_key_shuf_mask); 3975 3976 //load, then increase counters 3977 CTR_DoSix(movdqa, xmm_curr_counter); 3978 inc_counter(rbx, xmm_result1, 0x01, L__incCounter[k][0]); 3979 inc_counter(rbx, xmm_result2, 0x02, L__incCounter[k][1]); 3980 inc_counter(rbx, xmm_result3, 0x03, L__incCounter[k][2]); 3981 inc_counter(rbx, xmm_result4, 0x04, L__incCounter[k][3]); 3982 inc_counter(rbx, xmm_result5, 0x05, L__incCounter[k][4]); 3983 inc_counter(rbx, xmm_curr_counter, 0x06, L__incCounter[k][5]); 3984 CTR_DoSix(pshufb, xmm_counter_shuf_mask); // after increased, shuffled counters back for PXOR 3985 CTR_DoSix(pxor, xmm_key_tmp0); //PXOR with Round 0 key 3986 3987 //load two ROUND_KEYs at a time 3988 for (int i = 1; i < rounds[k]; ) { 3989 load_key(xmm_key_tmp1, key, (0x10 * i), xmm_key_shuf_mask); 3990 load_key(xmm_key_tmp0, key, (0x10 * (i+1)), xmm_key_shuf_mask); 3991 CTR_DoSix(aesenc, xmm_key_tmp1); 3992 i++; 3993 if (i != rounds[k]) { 3994 CTR_DoSix(aesenc, xmm_key_tmp0); 3995 } else { 3996 CTR_DoSix(aesenclast, xmm_key_tmp0); 3997 } 3998 i++; 3999 } 4000 4001 // get next PARALLEL_FACTOR blocks into xmm_result registers 4002 __ movdqu(xmm_from0, Address(from, pos, Address::times_1, 0 * AESBlockSize)); 4003 __ movdqu(xmm_from1, Address(from, pos, Address::times_1, 1 * AESBlockSize)); 4004 __ movdqu(xmm_from2, Address(from, pos, Address::times_1, 2 * AESBlockSize)); 4005 __ movdqu(xmm_from3, Address(from, pos, Address::times_1, 3 * AESBlockSize)); 4006 __ movdqu(xmm_from4, Address(from, pos, Address::times_1, 4 * AESBlockSize)); 4007 __ movdqu(xmm_from5, Address(from, pos, Address::times_1, 5 * AESBlockSize)); 4008 4009 __ pxor(xmm_result0, xmm_from0); 4010 __ pxor(xmm_result1, xmm_from1); 4011 __ pxor(xmm_result2, xmm_from2); 4012 __ pxor(xmm_result3, xmm_from3); 4013 __ pxor(xmm_result4, xmm_from4); 4014 __ pxor(xmm_result5, xmm_from5); 4015 4016 // store 6 results into the next 64 bytes of output 4017 __ movdqu(Address(to, pos, Address::times_1, 0 * AESBlockSize), xmm_result0); 4018 __ movdqu(Address(to, pos, Address::times_1, 1 * AESBlockSize), xmm_result1); 4019 __ movdqu(Address(to, pos, Address::times_1, 2 * AESBlockSize), xmm_result2); 4020 __ movdqu(Address(to, pos, Address::times_1, 3 * AESBlockSize), xmm_result3); 4021 __ movdqu(Address(to, pos, Address::times_1, 4 * AESBlockSize), xmm_result4); 4022 __ movdqu(Address(to, pos, Address::times_1, 5 * AESBlockSize), xmm_result5); 4023 4024 __ addptr(pos, PARALLEL_FACTOR * AESBlockSize); // increase the length of crypt text 4025 __ subptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // decrease the remaining length 4026 __ jmp(L_multiBlock_loopTop[k]); 4027 4028 // singleBlock starts here 4029 __ align(OptoLoopAlignment); 4030 __ BIND(L_singleBlockLoopTop[k]); 4031 __ cmpptr(len_reg, 0); 4032 __ jcc(Assembler::lessEqual, L_exit); 4033 load_key(xmm_key_tmp0, key, 0x00, xmm_key_shuf_mask); 4034 __ movdqa(xmm_result0, xmm_curr_counter); 4035 inc_counter(rbx, xmm_curr_counter, 0x01, L__incCounter_single[k]); 4036 __ pshufb(xmm_result0, xmm_counter_shuf_mask); 4037 __ pxor(xmm_result0, xmm_key_tmp0); 4038 for (int i = 1; i < rounds[k]; i++) { 4039 load_key(xmm_key_tmp0, key, (0x10 * i), xmm_key_shuf_mask); 4040 __ aesenc(xmm_result0, xmm_key_tmp0); 4041 } 4042 load_key(xmm_key_tmp0, key, (rounds[k] * 0x10), xmm_key_shuf_mask); 4043 __ aesenclast(xmm_result0, xmm_key_tmp0); 4044 __ cmpptr(len_reg, AESBlockSize); 4045 __ jcc(Assembler::less, L_processTail_insr[k]); 4046 __ movdqu(xmm_from0, Address(from, pos, Address::times_1, 0 * AESBlockSize)); 4047 __ pxor(xmm_result0, xmm_from0); 4048 __ movdqu(Address(to, pos, Address::times_1, 0 * AESBlockSize), xmm_result0); 4049 __ addptr(pos, AESBlockSize); 4050 __ subptr(len_reg, AESBlockSize); 4051 __ jmp(L_singleBlockLoopTop[k]); 4052 __ BIND(L_processTail_insr[k]); // Process the tail part of the input array 4053 __ addptr(pos, len_reg); // 1. Insert bytes from src array into xmm_from0 register 4054 __ testptr(len_reg, 8); 4055 __ jcc(Assembler::zero, L_processTail_4_insr[k]); 4056 __ subptr(pos,8); 4057 __ pinsrq(xmm_from0, Address(from, pos), 0); 4058 __ BIND(L_processTail_4_insr[k]); 4059 __ testptr(len_reg, 4); 4060 __ jcc(Assembler::zero, L_processTail_2_insr[k]); 4061 __ subptr(pos,4); 4062 __ pslldq(xmm_from0, 4); 4063 __ pinsrd(xmm_from0, Address(from, pos), 0); 4064 __ BIND(L_processTail_2_insr[k]); 4065 __ testptr(len_reg, 2); 4066 __ jcc(Assembler::zero, L_processTail_1_insr[k]); 4067 __ subptr(pos, 2); 4068 __ pslldq(xmm_from0, 2); 4069 __ pinsrw(xmm_from0, Address(from, pos), 0); 4070 __ BIND(L_processTail_1_insr[k]); 4071 __ testptr(len_reg, 1); 4072 __ jcc(Assembler::zero, L_processTail_exit_insr[k]); 4073 __ subptr(pos, 1); 4074 __ pslldq(xmm_from0, 1); 4075 __ pinsrb(xmm_from0, Address(from, pos), 0); 4076 __ BIND(L_processTail_exit_insr[k]); 4077 4078 __ movdqu(Address(saved_encCounter_start, 0), xmm_result0); // 2. Perform pxor of the encrypted counter and plaintext Bytes. 4079 __ pxor(xmm_result0, xmm_from0); // Also the encrypted counter is saved for next invocation. 4080 4081 __ testptr(len_reg, 8); 4082 __ jcc(Assembler::zero, L_processTail_4_extr[k]); // 3. Extract bytes from xmm_result0 into the dest. array 4083 __ pextrq(Address(to, pos), xmm_result0, 0); 4084 __ psrldq(xmm_result0, 8); 4085 __ addptr(pos, 8); 4086 __ BIND(L_processTail_4_extr[k]); 4087 __ testptr(len_reg, 4); 4088 __ jcc(Assembler::zero, L_processTail_2_extr[k]); 4089 __ pextrd(Address(to, pos), xmm_result0, 0); 4090 __ psrldq(xmm_result0, 4); 4091 __ addptr(pos, 4); 4092 __ BIND(L_processTail_2_extr[k]); 4093 __ testptr(len_reg, 2); 4094 __ jcc(Assembler::zero, L_processTail_1_extr[k]); 4095 __ pextrw(Address(to, pos), xmm_result0, 0); 4096 __ psrldq(xmm_result0, 2); 4097 __ addptr(pos, 2); 4098 __ BIND(L_processTail_1_extr[k]); 4099 __ testptr(len_reg, 1); 4100 __ jcc(Assembler::zero, L_processTail_exit_extr[k]); 4101 __ pextrb(Address(to, pos), xmm_result0, 0); 4102 4103 __ BIND(L_processTail_exit_extr[k]); 4104 __ movl(Address(used_addr, 0), len_reg); 4105 __ jmp(L_exit); 4106 4107 } 4108 4109 __ BIND(L_exit); 4110 __ pshufb(xmm_curr_counter, xmm_counter_shuf_mask); //counter is shuffled back. 4111 __ movdqu(Address(counter, 0), xmm_curr_counter); //save counter back 4112 __ pop(rbx); // pop the saved RBX. 4113 #ifdef _WIN64 4114 __ movl(rax, len_mem); 4115 __ movptr(r13, Address(rsp, saved_r13_offset * wordSize)); 4116 __ movptr(r14, Address(rsp, saved_r14_offset * wordSize)); 4117 __ addptr(rsp, 2 * wordSize); 4118 #else 4119 __ pop(rax); // return 'len' 4120 #endif 4121 __ leave(); // required for proper stackwalking of RuntimeStub frame 4122 __ ret(0); 4123 return start; 4124 } 4125 4126 // byte swap x86 long 4127 address generate_ghash_long_swap_mask() { 4128 __ align(CodeEntryAlignment); 4129 StubCodeMark mark(this, "StubRoutines", "ghash_long_swap_mask"); 4130 address start = __ pc(); 4131 __ emit_data64(0x0f0e0d0c0b0a0908, relocInfo::none ); 4132 __ emit_data64(0x0706050403020100, relocInfo::none ); 4133 return start; 4134 } 4135 4136 // byte swap x86 byte array 4137 address generate_ghash_byte_swap_mask() { 4138 __ align(CodeEntryAlignment); 4139 StubCodeMark mark(this, "StubRoutines", "ghash_byte_swap_mask"); 4140 address start = __ pc(); 4141 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none ); 4142 __ emit_data64(0x0001020304050607, relocInfo::none ); 4143 return start; 4144 } 4145 4146 /* Single and multi-block ghash operations */ 4147 address generate_ghash_processBlocks() { 4148 __ align(CodeEntryAlignment); 4149 Label L_ghash_loop, L_exit; 4150 StubCodeMark mark(this, "StubRoutines", "ghash_processBlocks"); 4151 address start = __ pc(); 4152 4153 const Register state = c_rarg0; 4154 const Register subkeyH = c_rarg1; 4155 const Register data = c_rarg2; 4156 const Register blocks = c_rarg3; 4157 4158 const XMMRegister xmm_temp0 = xmm0; 4159 const XMMRegister xmm_temp1 = xmm1; 4160 const XMMRegister xmm_temp2 = xmm2; 4161 const XMMRegister xmm_temp3 = xmm3; 4162 const XMMRegister xmm_temp4 = xmm4; 4163 const XMMRegister xmm_temp5 = xmm5; 4164 const XMMRegister xmm_temp6 = xmm6; 4165 const XMMRegister xmm_temp7 = xmm7; 4166 const XMMRegister xmm_temp8 = xmm8; 4167 const XMMRegister xmm_temp9 = xmm9; 4168 const XMMRegister xmm_temp10 = xmm10; 4169 4170 __ enter(); 4171 4172 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 4173 // context for the registers used, where all instructions below are using 128-bit mode 4174 // On EVEX without VL and BW, these instructions will all be AVX. 4175 if (VM_Version::supports_avx512vlbw()) { 4176 __ movl(rax, 0xffff); 4177 __ kmovql(k1, rax); 4178 } 4179 4180 __ movdqu(xmm_temp10, ExternalAddress(StubRoutines::x86::ghash_long_swap_mask_addr())); 4181 4182 __ movdqu(xmm_temp0, Address(state, 0)); 4183 __ pshufb(xmm_temp0, xmm_temp10); 4184 4185 4186 __ BIND(L_ghash_loop); 4187 __ movdqu(xmm_temp2, Address(data, 0)); 4188 __ pshufb(xmm_temp2, ExternalAddress(StubRoutines::x86::ghash_byte_swap_mask_addr())); 4189 4190 __ movdqu(xmm_temp1, Address(subkeyH, 0)); 4191 __ pshufb(xmm_temp1, xmm_temp10); 4192 4193 __ pxor(xmm_temp0, xmm_temp2); 4194 4195 // 4196 // Multiply with the hash key 4197 // 4198 __ movdqu(xmm_temp3, xmm_temp0); 4199 __ pclmulqdq(xmm_temp3, xmm_temp1, 0); // xmm3 holds a0*b0 4200 __ movdqu(xmm_temp4, xmm_temp0); 4201 __ pclmulqdq(xmm_temp4, xmm_temp1, 16); // xmm4 holds a0*b1 4202 4203 __ movdqu(xmm_temp5, xmm_temp0); 4204 __ pclmulqdq(xmm_temp5, xmm_temp1, 1); // xmm5 holds a1*b0 4205 __ movdqu(xmm_temp6, xmm_temp0); 4206 __ pclmulqdq(xmm_temp6, xmm_temp1, 17); // xmm6 holds a1*b1 4207 4208 __ pxor(xmm_temp4, xmm_temp5); // xmm4 holds a0*b1 + a1*b0 4209 4210 __ movdqu(xmm_temp5, xmm_temp4); // move the contents of xmm4 to xmm5 4211 __ psrldq(xmm_temp4, 8); // shift by xmm4 64 bits to the right 4212 __ pslldq(xmm_temp5, 8); // shift by xmm5 64 bits to the left 4213 __ pxor(xmm_temp3, xmm_temp5); 4214 __ pxor(xmm_temp6, xmm_temp4); // Register pair <xmm6:xmm3> holds the result 4215 // of the carry-less multiplication of 4216 // xmm0 by xmm1. 4217 4218 // We shift the result of the multiplication by one bit position 4219 // to the left to cope for the fact that the bits are reversed. 4220 __ movdqu(xmm_temp7, xmm_temp3); 4221 __ movdqu(xmm_temp8, xmm_temp6); 4222 __ pslld(xmm_temp3, 1); 4223 __ pslld(xmm_temp6, 1); 4224 __ psrld(xmm_temp7, 31); 4225 __ psrld(xmm_temp8, 31); 4226 __ movdqu(xmm_temp9, xmm_temp7); 4227 __ pslldq(xmm_temp8, 4); 4228 __ pslldq(xmm_temp7, 4); 4229 __ psrldq(xmm_temp9, 12); 4230 __ por(xmm_temp3, xmm_temp7); 4231 __ por(xmm_temp6, xmm_temp8); 4232 __ por(xmm_temp6, xmm_temp9); 4233 4234 // 4235 // First phase of the reduction 4236 // 4237 // Move xmm3 into xmm7, xmm8, xmm9 in order to perform the shifts 4238 // independently. 4239 __ movdqu(xmm_temp7, xmm_temp3); 4240 __ movdqu(xmm_temp8, xmm_temp3); 4241 __ movdqu(xmm_temp9, xmm_temp3); 4242 __ pslld(xmm_temp7, 31); // packed right shift shifting << 31 4243 __ pslld(xmm_temp8, 30); // packed right shift shifting << 30 4244 __ pslld(xmm_temp9, 25); // packed right shift shifting << 25 4245 __ pxor(xmm_temp7, xmm_temp8); // xor the shifted versions 4246 __ pxor(xmm_temp7, xmm_temp9); 4247 __ movdqu(xmm_temp8, xmm_temp7); 4248 __ pslldq(xmm_temp7, 12); 4249 __ psrldq(xmm_temp8, 4); 4250 __ pxor(xmm_temp3, xmm_temp7); // first phase of the reduction complete 4251 4252 // 4253 // Second phase of the reduction 4254 // 4255 // Make 3 copies of xmm3 in xmm2, xmm4, xmm5 for doing these 4256 // shift operations. 4257 __ movdqu(xmm_temp2, xmm_temp3); 4258 __ movdqu(xmm_temp4, xmm_temp3); 4259 __ movdqu(xmm_temp5, xmm_temp3); 4260 __ psrld(xmm_temp2, 1); // packed left shifting >> 1 4261 __ psrld(xmm_temp4, 2); // packed left shifting >> 2 4262 __ psrld(xmm_temp5, 7); // packed left shifting >> 7 4263 __ pxor(xmm_temp2, xmm_temp4); // xor the shifted versions 4264 __ pxor(xmm_temp2, xmm_temp5); 4265 __ pxor(xmm_temp2, xmm_temp8); 4266 __ pxor(xmm_temp3, xmm_temp2); 4267 __ pxor(xmm_temp6, xmm_temp3); // the result is in xmm6 4268 4269 __ decrement(blocks); 4270 __ jcc(Assembler::zero, L_exit); 4271 __ movdqu(xmm_temp0, xmm_temp6); 4272 __ addptr(data, 16); 4273 __ jmp(L_ghash_loop); 4274 4275 __ BIND(L_exit); 4276 __ pshufb(xmm_temp6, xmm_temp10); // Byte swap 16-byte result 4277 __ movdqu(Address(state, 0), xmm_temp6); // store the result 4278 __ leave(); 4279 __ ret(0); 4280 return start; 4281 } 4282 4283 /** 4284 * Arguments: 4285 * 4286 * Inputs: 4287 * c_rarg0 - int crc 4288 * c_rarg1 - byte* buf 4289 * c_rarg2 - int length 4290 * 4291 * Ouput: 4292 * rax - int crc result 4293 */ 4294 address generate_updateBytesCRC32() { 4295 assert(UseCRC32Intrinsics, "need AVX and CLMUL instructions"); 4296 4297 __ align(CodeEntryAlignment); 4298 StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32"); 4299 4300 address start = __ pc(); 4301 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 4302 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 4303 // rscratch1: r10 4304 const Register crc = c_rarg0; // crc 4305 const Register buf = c_rarg1; // source java byte array address 4306 const Register len = c_rarg2; // length 4307 const Register table = c_rarg3; // crc_table address (reuse register) 4308 const Register tmp = r11; 4309 assert_different_registers(crc, buf, len, table, tmp, rax); 4310 4311 BLOCK_COMMENT("Entry:"); 4312 __ enter(); // required for proper stackwalking of RuntimeStub frame 4313 4314 __ kernel_crc32(crc, buf, len, table, tmp); 4315 4316 __ movl(rax, crc); 4317 __ vzeroupper(); 4318 __ leave(); // required for proper stackwalking of RuntimeStub frame 4319 __ ret(0); 4320 4321 return start; 4322 } 4323 4324 /** 4325 * Arguments: 4326 * 4327 * Inputs: 4328 * c_rarg0 - int crc 4329 * c_rarg1 - byte* buf 4330 * c_rarg2 - long length 4331 * c_rarg3 - table_start - optional (present only when doing a library_call, 4332 * not used by x86 algorithm) 4333 * 4334 * Ouput: 4335 * rax - int crc result 4336 */ 4337 address generate_updateBytesCRC32C(bool is_pclmulqdq_supported) { 4338 assert(UseCRC32CIntrinsics, "need SSE4_2"); 4339 __ align(CodeEntryAlignment); 4340 StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32C"); 4341 address start = __ pc(); 4342 //reg.arg int#0 int#1 int#2 int#3 int#4 int#5 float regs 4343 //Windows RCX RDX R8 R9 none none XMM0..XMM3 4344 //Lin / Sol RDI RSI RDX RCX R8 R9 XMM0..XMM7 4345 const Register crc = c_rarg0; // crc 4346 const Register buf = c_rarg1; // source java byte array address 4347 const Register len = c_rarg2; // length 4348 const Register a = rax; 4349 const Register j = r9; 4350 const Register k = r10; 4351 const Register l = r11; 4352 #ifdef _WIN64 4353 const Register y = rdi; 4354 const Register z = rsi; 4355 #else 4356 const Register y = rcx; 4357 const Register z = r8; 4358 #endif 4359 assert_different_registers(crc, buf, len, a, j, k, l, y, z); 4360 4361 BLOCK_COMMENT("Entry:"); 4362 __ enter(); // required for proper stackwalking of RuntimeStub frame 4363 #ifdef _WIN64 4364 __ push(y); 4365 __ push(z); 4366 #endif 4367 __ crc32c_ipl_alg2_alt2(crc, buf, len, 4368 a, j, k, 4369 l, y, z, 4370 c_farg0, c_farg1, c_farg2, 4371 is_pclmulqdq_supported); 4372 __ movl(rax, crc); 4373 #ifdef _WIN64 4374 __ pop(z); 4375 __ pop(y); 4376 #endif 4377 __ vzeroupper(); 4378 __ leave(); // required for proper stackwalking of RuntimeStub frame 4379 __ ret(0); 4380 4381 return start; 4382 } 4383 4384 /** 4385 * Arguments: 4386 * 4387 * Input: 4388 * c_rarg0 - x address 4389 * c_rarg1 - x length 4390 * c_rarg2 - y address 4391 * c_rarg3 - y length 4392 * not Win64 4393 * c_rarg4 - z address 4394 * c_rarg5 - z length 4395 * Win64 4396 * rsp+40 - z address 4397 * rsp+48 - z length 4398 */ 4399 address generate_multiplyToLen() { 4400 __ align(CodeEntryAlignment); 4401 StubCodeMark mark(this, "StubRoutines", "multiplyToLen"); 4402 4403 address start = __ pc(); 4404 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 4405 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 4406 const Register x = rdi; 4407 const Register xlen = rax; 4408 const Register y = rsi; 4409 const Register ylen = rcx; 4410 const Register z = r8; 4411 const Register zlen = r11; 4412 4413 // Next registers will be saved on stack in multiply_to_len(). 4414 const Register tmp1 = r12; 4415 const Register tmp2 = r13; 4416 const Register tmp3 = r14; 4417 const Register tmp4 = r15; 4418 const Register tmp5 = rbx; 4419 4420 BLOCK_COMMENT("Entry:"); 4421 __ enter(); // required for proper stackwalking of RuntimeStub frame 4422 4423 #ifndef _WIN64 4424 __ movptr(zlen, r9); // Save r9 in r11 - zlen 4425 #endif 4426 setup_arg_regs(4); // x => rdi, xlen => rsi, y => rdx 4427 // ylen => rcx, z => r8, zlen => r11 4428 // r9 and r10 may be used to save non-volatile registers 4429 #ifdef _WIN64 4430 // last 2 arguments (#4, #5) are on stack on Win64 4431 __ movptr(z, Address(rsp, 6 * wordSize)); 4432 __ movptr(zlen, Address(rsp, 7 * wordSize)); 4433 #endif 4434 4435 __ movptr(xlen, rsi); 4436 __ movptr(y, rdx); 4437 __ multiply_to_len(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5); 4438 4439 restore_arg_regs(); 4440 4441 __ leave(); // required for proper stackwalking of RuntimeStub frame 4442 __ ret(0); 4443 4444 return start; 4445 } 4446 4447 /** 4448 * Arguments: 4449 * 4450 * Input: 4451 * c_rarg0 - obja address 4452 * c_rarg1 - objb address 4453 * c_rarg3 - length length 4454 * c_rarg4 - scale log2_array_indxscale 4455 * 4456 * Output: 4457 * rax - int >= mismatched index, < 0 bitwise complement of tail 4458 */ 4459 address generate_vectorizedMismatch() { 4460 __ align(CodeEntryAlignment); 4461 StubCodeMark mark(this, "StubRoutines", "vectorizedMismatch"); 4462 address start = __ pc(); 4463 4464 BLOCK_COMMENT("Entry:"); 4465 __ enter(); 4466 4467 #ifdef _WIN64 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 4468 const Register scale = c_rarg0; //rcx, will exchange with r9 4469 const Register objb = c_rarg1; //rdx 4470 const Register length = c_rarg2; //r8 4471 const Register obja = c_rarg3; //r9 4472 __ xchgq(obja, scale); //now obja and scale contains the correct contents 4473 4474 const Register tmp1 = r10; 4475 const Register tmp2 = r11; 4476 #endif 4477 #ifndef _WIN64 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 4478 const Register obja = c_rarg0; //U:rdi 4479 const Register objb = c_rarg1; //U:rsi 4480 const Register length = c_rarg2; //U:rdx 4481 const Register scale = c_rarg3; //U:rcx 4482 const Register tmp1 = r8; 4483 const Register tmp2 = r9; 4484 #endif 4485 const Register result = rax; //return value 4486 const XMMRegister vec0 = xmm0; 4487 const XMMRegister vec1 = xmm1; 4488 const XMMRegister vec2 = xmm2; 4489 4490 __ vectorized_mismatch(obja, objb, length, scale, result, tmp1, tmp2, vec0, vec1, vec2); 4491 4492 __ vzeroupper(); 4493 __ leave(); 4494 __ ret(0); 4495 4496 return start; 4497 } 4498 4499 /** 4500 * Arguments: 4501 * 4502 // Input: 4503 // c_rarg0 - x address 4504 // c_rarg1 - x length 4505 // c_rarg2 - z address 4506 // c_rarg3 - z lenth 4507 * 4508 */ 4509 address generate_squareToLen() { 4510 4511 __ align(CodeEntryAlignment); 4512 StubCodeMark mark(this, "StubRoutines", "squareToLen"); 4513 4514 address start = __ pc(); 4515 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 4516 // Unix: rdi, rsi, rdx, rcx (c_rarg0, c_rarg1, ...) 4517 const Register x = rdi; 4518 const Register len = rsi; 4519 const Register z = r8; 4520 const Register zlen = rcx; 4521 4522 const Register tmp1 = r12; 4523 const Register tmp2 = r13; 4524 const Register tmp3 = r14; 4525 const Register tmp4 = r15; 4526 const Register tmp5 = rbx; 4527 4528 BLOCK_COMMENT("Entry:"); 4529 __ enter(); // required for proper stackwalking of RuntimeStub frame 4530 4531 setup_arg_regs(4); // x => rdi, len => rsi, z => rdx 4532 // zlen => rcx 4533 // r9 and r10 may be used to save non-volatile registers 4534 __ movptr(r8, rdx); 4535 __ square_to_len(x, len, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5, rdx, rax); 4536 4537 restore_arg_regs(); 4538 4539 __ leave(); // required for proper stackwalking of RuntimeStub frame 4540 __ ret(0); 4541 4542 return start; 4543 } 4544 4545 /** 4546 * Arguments: 4547 * 4548 * Input: 4549 * c_rarg0 - out address 4550 * c_rarg1 - in address 4551 * c_rarg2 - offset 4552 * c_rarg3 - len 4553 * not Win64 4554 * c_rarg4 - k 4555 * Win64 4556 * rsp+40 - k 4557 */ 4558 address generate_mulAdd() { 4559 __ align(CodeEntryAlignment); 4560 StubCodeMark mark(this, "StubRoutines", "mulAdd"); 4561 4562 address start = __ pc(); 4563 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 4564 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 4565 const Register out = rdi; 4566 const Register in = rsi; 4567 const Register offset = r11; 4568 const Register len = rcx; 4569 const Register k = r8; 4570 4571 // Next registers will be saved on stack in mul_add(). 4572 const Register tmp1 = r12; 4573 const Register tmp2 = r13; 4574 const Register tmp3 = r14; 4575 const Register tmp4 = r15; 4576 const Register tmp5 = rbx; 4577 4578 BLOCK_COMMENT("Entry:"); 4579 __ enter(); // required for proper stackwalking of RuntimeStub frame 4580 4581 setup_arg_regs(4); // out => rdi, in => rsi, offset => rdx 4582 // len => rcx, k => r8 4583 // r9 and r10 may be used to save non-volatile registers 4584 #ifdef _WIN64 4585 // last argument is on stack on Win64 4586 __ movl(k, Address(rsp, 6 * wordSize)); 4587 #endif 4588 __ movptr(r11, rdx); // move offset in rdx to offset(r11) 4589 __ mul_add(out, in, offset, len, k, tmp1, tmp2, tmp3, tmp4, tmp5, rdx, rax); 4590 4591 restore_arg_regs(); 4592 4593 __ leave(); // required for proper stackwalking of RuntimeStub frame 4594 __ ret(0); 4595 4596 return start; 4597 } 4598 4599 address generate_libmExp() { 4600 StubCodeMark mark(this, "StubRoutines", "libmExp"); 4601 4602 address start = __ pc(); 4603 4604 const XMMRegister x0 = xmm0; 4605 const XMMRegister x1 = xmm1; 4606 const XMMRegister x2 = xmm2; 4607 const XMMRegister x3 = xmm3; 4608 4609 const XMMRegister x4 = xmm4; 4610 const XMMRegister x5 = xmm5; 4611 const XMMRegister x6 = xmm6; 4612 const XMMRegister x7 = xmm7; 4613 4614 const Register tmp = r11; 4615 4616 BLOCK_COMMENT("Entry:"); 4617 __ enter(); // required for proper stackwalking of RuntimeStub frame 4618 4619 __ fast_exp(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp); 4620 4621 __ leave(); // required for proper stackwalking of RuntimeStub frame 4622 __ ret(0); 4623 4624 return start; 4625 4626 } 4627 4628 address generate_libmLog() { 4629 StubCodeMark mark(this, "StubRoutines", "libmLog"); 4630 4631 address start = __ pc(); 4632 4633 const XMMRegister x0 = xmm0; 4634 const XMMRegister x1 = xmm1; 4635 const XMMRegister x2 = xmm2; 4636 const XMMRegister x3 = xmm3; 4637 4638 const XMMRegister x4 = xmm4; 4639 const XMMRegister x5 = xmm5; 4640 const XMMRegister x6 = xmm6; 4641 const XMMRegister x7 = xmm7; 4642 4643 const Register tmp1 = r11; 4644 const Register tmp2 = r8; 4645 4646 BLOCK_COMMENT("Entry:"); 4647 __ enter(); // required for proper stackwalking of RuntimeStub frame 4648 4649 __ fast_log(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2); 4650 4651 __ leave(); // required for proper stackwalking of RuntimeStub frame 4652 __ ret(0); 4653 4654 return start; 4655 4656 } 4657 4658 address generate_libmLog10() { 4659 StubCodeMark mark(this, "StubRoutines", "libmLog10"); 4660 4661 address start = __ pc(); 4662 4663 const XMMRegister x0 = xmm0; 4664 const XMMRegister x1 = xmm1; 4665 const XMMRegister x2 = xmm2; 4666 const XMMRegister x3 = xmm3; 4667 4668 const XMMRegister x4 = xmm4; 4669 const XMMRegister x5 = xmm5; 4670 const XMMRegister x6 = xmm6; 4671 const XMMRegister x7 = xmm7; 4672 4673 const Register tmp = r11; 4674 4675 BLOCK_COMMENT("Entry:"); 4676 __ enter(); // required for proper stackwalking of RuntimeStub frame 4677 4678 __ fast_log10(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp); 4679 4680 __ leave(); // required for proper stackwalking of RuntimeStub frame 4681 __ ret(0); 4682 4683 return start; 4684 4685 } 4686 4687 address generate_libmPow() { 4688 StubCodeMark mark(this, "StubRoutines", "libmPow"); 4689 4690 address start = __ pc(); 4691 4692 const XMMRegister x0 = xmm0; 4693 const XMMRegister x1 = xmm1; 4694 const XMMRegister x2 = xmm2; 4695 const XMMRegister x3 = xmm3; 4696 4697 const XMMRegister x4 = xmm4; 4698 const XMMRegister x5 = xmm5; 4699 const XMMRegister x6 = xmm6; 4700 const XMMRegister x7 = xmm7; 4701 4702 const Register tmp1 = r8; 4703 const Register tmp2 = r9; 4704 const Register tmp3 = r10; 4705 const Register tmp4 = r11; 4706 4707 BLOCK_COMMENT("Entry:"); 4708 __ enter(); // required for proper stackwalking of RuntimeStub frame 4709 4710 __ fast_pow(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 4711 4712 __ leave(); // required for proper stackwalking of RuntimeStub frame 4713 __ ret(0); 4714 4715 return start; 4716 4717 } 4718 4719 address generate_libmSin() { 4720 StubCodeMark mark(this, "StubRoutines", "libmSin"); 4721 4722 address start = __ pc(); 4723 4724 const XMMRegister x0 = xmm0; 4725 const XMMRegister x1 = xmm1; 4726 const XMMRegister x2 = xmm2; 4727 const XMMRegister x3 = xmm3; 4728 4729 const XMMRegister x4 = xmm4; 4730 const XMMRegister x5 = xmm5; 4731 const XMMRegister x6 = xmm6; 4732 const XMMRegister x7 = xmm7; 4733 4734 const Register tmp1 = r8; 4735 const Register tmp2 = r9; 4736 const Register tmp3 = r10; 4737 const Register tmp4 = r11; 4738 4739 BLOCK_COMMENT("Entry:"); 4740 __ enter(); // required for proper stackwalking of RuntimeStub frame 4741 4742 #ifdef _WIN64 4743 __ push(rsi); 4744 __ push(rdi); 4745 #endif 4746 __ fast_sin(x0, x1, x2, x3, x4, x5, x6, x7, rax, rbx, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 4747 4748 #ifdef _WIN64 4749 __ pop(rdi); 4750 __ pop(rsi); 4751 #endif 4752 4753 __ leave(); // required for proper stackwalking of RuntimeStub frame 4754 __ ret(0); 4755 4756 return start; 4757 4758 } 4759 4760 address generate_libmCos() { 4761 StubCodeMark mark(this, "StubRoutines", "libmCos"); 4762 4763 address start = __ pc(); 4764 4765 const XMMRegister x0 = xmm0; 4766 const XMMRegister x1 = xmm1; 4767 const XMMRegister x2 = xmm2; 4768 const XMMRegister x3 = xmm3; 4769 4770 const XMMRegister x4 = xmm4; 4771 const XMMRegister x5 = xmm5; 4772 const XMMRegister x6 = xmm6; 4773 const XMMRegister x7 = xmm7; 4774 4775 const Register tmp1 = r8; 4776 const Register tmp2 = r9; 4777 const Register tmp3 = r10; 4778 const Register tmp4 = r11; 4779 4780 BLOCK_COMMENT("Entry:"); 4781 __ enter(); // required for proper stackwalking of RuntimeStub frame 4782 4783 #ifdef _WIN64 4784 __ push(rsi); 4785 __ push(rdi); 4786 #endif 4787 __ fast_cos(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 4788 4789 #ifdef _WIN64 4790 __ pop(rdi); 4791 __ pop(rsi); 4792 #endif 4793 4794 __ leave(); // required for proper stackwalking of RuntimeStub frame 4795 __ ret(0); 4796 4797 return start; 4798 4799 } 4800 4801 address generate_libmTan() { 4802 StubCodeMark mark(this, "StubRoutines", "libmTan"); 4803 4804 address start = __ pc(); 4805 4806 const XMMRegister x0 = xmm0; 4807 const XMMRegister x1 = xmm1; 4808 const XMMRegister x2 = xmm2; 4809 const XMMRegister x3 = xmm3; 4810 4811 const XMMRegister x4 = xmm4; 4812 const XMMRegister x5 = xmm5; 4813 const XMMRegister x6 = xmm6; 4814 const XMMRegister x7 = xmm7; 4815 4816 const Register tmp1 = r8; 4817 const Register tmp2 = r9; 4818 const Register tmp3 = r10; 4819 const Register tmp4 = r11; 4820 4821 BLOCK_COMMENT("Entry:"); 4822 __ enter(); // required for proper stackwalking of RuntimeStub frame 4823 4824 #ifdef _WIN64 4825 __ push(rsi); 4826 __ push(rdi); 4827 #endif 4828 __ fast_tan(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 4829 4830 #ifdef _WIN64 4831 __ pop(rdi); 4832 __ pop(rsi); 4833 #endif 4834 4835 __ leave(); // required for proper stackwalking of RuntimeStub frame 4836 __ ret(0); 4837 4838 return start; 4839 4840 } 4841 4842 #undef __ 4843 #define __ masm-> 4844 4845 // Continuation point for throwing of implicit exceptions that are 4846 // not handled in the current activation. Fabricates an exception 4847 // oop and initiates normal exception dispatching in this 4848 // frame. Since we need to preserve callee-saved values (currently 4849 // only for C2, but done for C1 as well) we need a callee-saved oop 4850 // map and therefore have to make these stubs into RuntimeStubs 4851 // rather than BufferBlobs. If the compiler needs all registers to 4852 // be preserved between the fault point and the exception handler 4853 // then it must assume responsibility for that in 4854 // AbstractCompiler::continuation_for_implicit_null_exception or 4855 // continuation_for_implicit_division_by_zero_exception. All other 4856 // implicit exceptions (e.g., NullPointerException or 4857 // AbstractMethodError on entry) are either at call sites or 4858 // otherwise assume that stack unwinding will be initiated, so 4859 // caller saved registers were assumed volatile in the compiler. 4860 address generate_throw_exception(const char* name, 4861 address runtime_entry, 4862 Register arg1 = noreg, 4863 Register arg2 = noreg) { 4864 // Information about frame layout at time of blocking runtime call. 4865 // Note that we only have to preserve callee-saved registers since 4866 // the compilers are responsible for supplying a continuation point 4867 // if they expect all registers to be preserved. 4868 enum layout { 4869 rbp_off = frame::arg_reg_save_area_bytes/BytesPerInt, 4870 rbp_off2, 4871 return_off, 4872 return_off2, 4873 framesize // inclusive of return address 4874 }; 4875 4876 int insts_size = 512; 4877 int locs_size = 64; 4878 4879 CodeBuffer code(name, insts_size, locs_size); 4880 OopMapSet* oop_maps = new OopMapSet(); 4881 MacroAssembler* masm = new MacroAssembler(&code); 4882 4883 address start = __ pc(); 4884 4885 // This is an inlined and slightly modified version of call_VM 4886 // which has the ability to fetch the return PC out of 4887 // thread-local storage and also sets up last_Java_sp slightly 4888 // differently than the real call_VM 4889 4890 __ enter(); // required for proper stackwalking of RuntimeStub frame 4891 4892 assert(is_even(framesize/2), "sp not 16-byte aligned"); 4893 4894 // return address and rbp are already in place 4895 __ subptr(rsp, (framesize-4) << LogBytesPerInt); // prolog 4896 4897 int frame_complete = __ pc() - start; 4898 4899 // Set up last_Java_sp and last_Java_fp 4900 address the_pc = __ pc(); 4901 __ set_last_Java_frame(rsp, rbp, the_pc); 4902 __ andptr(rsp, -(StackAlignmentInBytes)); // Align stack 4903 4904 // Call runtime 4905 if (arg1 != noreg) { 4906 assert(arg2 != c_rarg1, "clobbered"); 4907 __ movptr(c_rarg1, arg1); 4908 } 4909 if (arg2 != noreg) { 4910 __ movptr(c_rarg2, arg2); 4911 } 4912 __ movptr(c_rarg0, r15_thread); 4913 BLOCK_COMMENT("call runtime_entry"); 4914 __ call(RuntimeAddress(runtime_entry)); 4915 4916 // Generate oop map 4917 OopMap* map = new OopMap(framesize, 0); 4918 4919 oop_maps->add_gc_map(the_pc - start, map); 4920 4921 __ reset_last_Java_frame(true); 4922 4923 __ leave(); // required for proper stackwalking of RuntimeStub frame 4924 4925 // check for pending exceptions 4926 #ifdef ASSERT 4927 Label L; 4928 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), 4929 (int32_t) NULL_WORD); 4930 __ jcc(Assembler::notEqual, L); 4931 __ should_not_reach_here(); 4932 __ bind(L); 4933 #endif // ASSERT 4934 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 4935 4936 4937 // codeBlob framesize is in words (not VMRegImpl::slot_size) 4938 RuntimeStub* stub = 4939 RuntimeStub::new_runtime_stub(name, 4940 &code, 4941 frame_complete, 4942 (framesize >> (LogBytesPerWord - LogBytesPerInt)), 4943 oop_maps, false); 4944 return stub->entry_point(); 4945 } 4946 4947 void create_control_words() { 4948 // Round to nearest, 53-bit mode, exceptions masked 4949 StubRoutines::_fpu_cntrl_wrd_std = 0x027F; 4950 // Round to zero, 53-bit mode, exception mased 4951 StubRoutines::_fpu_cntrl_wrd_trunc = 0x0D7F; 4952 // Round to nearest, 24-bit mode, exceptions masked 4953 StubRoutines::_fpu_cntrl_wrd_24 = 0x007F; 4954 // Round to nearest, 64-bit mode, exceptions masked 4955 StubRoutines::_fpu_cntrl_wrd_64 = 0x037F; 4956 // Round to nearest, 64-bit mode, exceptions masked 4957 StubRoutines::_mxcsr_std = 0x1F80; 4958 // Note: the following two constants are 80-bit values 4959 // layout is critical for correct loading by FPU. 4960 // Bias for strict fp multiply/divide 4961 StubRoutines::_fpu_subnormal_bias1[0]= 0x00000000; // 2^(-15360) == 0x03ff 8000 0000 0000 0000 4962 StubRoutines::_fpu_subnormal_bias1[1]= 0x80000000; 4963 StubRoutines::_fpu_subnormal_bias1[2]= 0x03ff; 4964 // Un-Bias for strict fp multiply/divide 4965 StubRoutines::_fpu_subnormal_bias2[0]= 0x00000000; // 2^(+15360) == 0x7bff 8000 0000 0000 0000 4966 StubRoutines::_fpu_subnormal_bias2[1]= 0x80000000; 4967 StubRoutines::_fpu_subnormal_bias2[2]= 0x7bff; 4968 } 4969 4970 // Initialization 4971 void generate_initial() { 4972 // Generates all stubs and initializes the entry points 4973 4974 // This platform-specific settings are needed by generate_call_stub() 4975 create_control_words(); 4976 4977 // entry points that exist in all platforms Note: This is code 4978 // that could be shared among different platforms - however the 4979 // benefit seems to be smaller than the disadvantage of having a 4980 // much more complicated generator structure. See also comment in 4981 // stubRoutines.hpp. 4982 4983 StubRoutines::_forward_exception_entry = generate_forward_exception(); 4984 4985 StubRoutines::_call_stub_entry = 4986 generate_call_stub(StubRoutines::_call_stub_return_address); 4987 4988 // is referenced by megamorphic call 4989 StubRoutines::_catch_exception_entry = generate_catch_exception(); 4990 4991 // atomic calls 4992 StubRoutines::_atomic_xchg_entry = generate_atomic_xchg(); 4993 StubRoutines::_atomic_xchg_long_entry = generate_atomic_xchg_long(); 4994 StubRoutines::_atomic_cmpxchg_entry = generate_atomic_cmpxchg(); 4995 StubRoutines::_atomic_cmpxchg_byte_entry = generate_atomic_cmpxchg_byte(); 4996 StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long(); 4997 StubRoutines::_atomic_add_entry = generate_atomic_add(); 4998 StubRoutines::_atomic_add_long_entry = generate_atomic_add_long(); 4999 StubRoutines::_fence_entry = generate_orderaccess_fence(); 5000 5001 // platform dependent 5002 StubRoutines::x86::_get_previous_fp_entry = generate_get_previous_fp(); 5003 StubRoutines::x86::_get_previous_sp_entry = generate_get_previous_sp(); 5004 5005 StubRoutines::x86::_verify_mxcsr_entry = generate_verify_mxcsr(); 5006 5007 // Build this early so it's available for the interpreter. 5008 StubRoutines::_throw_StackOverflowError_entry = 5009 generate_throw_exception("StackOverflowError throw_exception", 5010 CAST_FROM_FN_PTR(address, 5011 SharedRuntime:: 5012 throw_StackOverflowError)); 5013 StubRoutines::_throw_delayed_StackOverflowError_entry = 5014 generate_throw_exception("delayed StackOverflowError throw_exception", 5015 CAST_FROM_FN_PTR(address, 5016 SharedRuntime:: 5017 throw_delayed_StackOverflowError)); 5018 if (UseCRC32Intrinsics) { 5019 // set table address before stub generation which use it 5020 StubRoutines::_crc_table_adr = (address)StubRoutines::x86::_crc_table; 5021 StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32(); 5022 } 5023 5024 if (UseCRC32CIntrinsics) { 5025 bool supports_clmul = VM_Version::supports_clmul(); 5026 StubRoutines::x86::generate_CRC32C_table(supports_clmul); 5027 StubRoutines::_crc32c_table_addr = (address)StubRoutines::x86::_crc32c_table; 5028 StubRoutines::_updateBytesCRC32C = generate_updateBytesCRC32C(supports_clmul); 5029 } 5030 if (VM_Version::supports_sse2() && UseLibmIntrinsic && InlineIntrinsics) { 5031 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin) || 5032 vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos) || 5033 vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dtan)) { 5034 StubRoutines::x86::_ONEHALF_adr = (address)StubRoutines::x86::_ONEHALF; 5035 StubRoutines::x86::_P_2_adr = (address)StubRoutines::x86::_P_2; 5036 StubRoutines::x86::_SC_4_adr = (address)StubRoutines::x86::_SC_4; 5037 StubRoutines::x86::_Ctable_adr = (address)StubRoutines::x86::_Ctable; 5038 StubRoutines::x86::_SC_2_adr = (address)StubRoutines::x86::_SC_2; 5039 StubRoutines::x86::_SC_3_adr = (address)StubRoutines::x86::_SC_3; 5040 StubRoutines::x86::_SC_1_adr = (address)StubRoutines::x86::_SC_1; 5041 StubRoutines::x86::_PI_INV_TABLE_adr = (address)StubRoutines::x86::_PI_INV_TABLE; 5042 StubRoutines::x86::_PI_4_adr = (address)StubRoutines::x86::_PI_4; 5043 StubRoutines::x86::_PI32INV_adr = (address)StubRoutines::x86::_PI32INV; 5044 StubRoutines::x86::_SIGN_MASK_adr = (address)StubRoutines::x86::_SIGN_MASK; 5045 StubRoutines::x86::_P_1_adr = (address)StubRoutines::x86::_P_1; 5046 StubRoutines::x86::_P_3_adr = (address)StubRoutines::x86::_P_3; 5047 StubRoutines::x86::_NEG_ZERO_adr = (address)StubRoutines::x86::_NEG_ZERO; 5048 } 5049 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dexp)) { 5050 StubRoutines::_dexp = generate_libmExp(); 5051 } 5052 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dlog)) { 5053 StubRoutines::_dlog = generate_libmLog(); 5054 } 5055 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dlog10)) { 5056 StubRoutines::_dlog10 = generate_libmLog10(); 5057 } 5058 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dpow)) { 5059 StubRoutines::_dpow = generate_libmPow(); 5060 } 5061 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin)) { 5062 StubRoutines::_dsin = generate_libmSin(); 5063 } 5064 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos)) { 5065 StubRoutines::_dcos = generate_libmCos(); 5066 } 5067 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dtan)) { 5068 StubRoutines::_dtan = generate_libmTan(); 5069 } 5070 } 5071 } 5072 5073 void generate_all() { 5074 // Generates all stubs and initializes the entry points 5075 5076 // These entry points require SharedInfo::stack0 to be set up in 5077 // non-core builds and need to be relocatable, so they each 5078 // fabricate a RuntimeStub internally. 5079 StubRoutines::_throw_AbstractMethodError_entry = 5080 generate_throw_exception("AbstractMethodError throw_exception", 5081 CAST_FROM_FN_PTR(address, 5082 SharedRuntime:: 5083 throw_AbstractMethodError)); 5084 5085 StubRoutines::_throw_IncompatibleClassChangeError_entry = 5086 generate_throw_exception("IncompatibleClassChangeError throw_exception", 5087 CAST_FROM_FN_PTR(address, 5088 SharedRuntime:: 5089 throw_IncompatibleClassChangeError)); 5090 5091 StubRoutines::_throw_NullPointerException_at_call_entry = 5092 generate_throw_exception("NullPointerException at call throw_exception", 5093 CAST_FROM_FN_PTR(address, 5094 SharedRuntime:: 5095 throw_NullPointerException_at_call)); 5096 5097 // entry points that are platform specific 5098 StubRoutines::x86::_f2i_fixup = generate_f2i_fixup(); 5099 StubRoutines::x86::_f2l_fixup = generate_f2l_fixup(); 5100 StubRoutines::x86::_d2i_fixup = generate_d2i_fixup(); 5101 StubRoutines::x86::_d2l_fixup = generate_d2l_fixup(); 5102 5103 StubRoutines::x86::_float_sign_mask = generate_fp_mask("float_sign_mask", 0x7FFFFFFF7FFFFFFF); 5104 StubRoutines::x86::_float_sign_flip = generate_fp_mask("float_sign_flip", 0x8000000080000000); 5105 StubRoutines::x86::_double_sign_mask = generate_fp_mask("double_sign_mask", 0x7FFFFFFFFFFFFFFF); 5106 StubRoutines::x86::_double_sign_flip = generate_fp_mask("double_sign_flip", 0x8000000000000000); 5107 StubRoutines::x86::_vector_float_sign_mask = generate_vector_fp_mask("vector_float_sign_mask", 0x7FFFFFFF7FFFFFFF); 5108 StubRoutines::x86::_vector_float_sign_flip = generate_vector_fp_mask("vector_float_sign_flip", 0x8000000080000000); 5109 StubRoutines::x86::_vector_double_sign_mask = generate_vector_fp_mask("vector_double_sign_mask", 0x7FFFFFFFFFFFFFFF); 5110 StubRoutines::x86::_vector_double_sign_flip = generate_vector_fp_mask("vector_double_sign_flip", 0x8000000000000000); 5111 StubRoutines::x86::_vector_all_bits_set = generate_vector_fp_mask("vector_all_bits_set", 0xFFFFFFFFFFFFFFFF); 5112 StubRoutines::x86::_vector_byte_bitset = generate_vector_fp_mask("vector_byte_bitset", 0x0101010101010101); 5113 StubRoutines::x86::_vector_long_perm_mask = generate_vector_custom_i32("vector_long_perm_mask", Assembler::AVX_512bit, 5114 0, 2, 4, 6, 8, 10, 12, 14); 5115 StubRoutines::x86::_vector_byte_saturation_mask = generate_vector_fp_mask("vector_byte_saturation_mask", 0x00ff00ff00ff00ff); 5116 5117 // support for verify_oop (must happen after universe_init) 5118 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop(); 5119 5120 // arraycopy stubs used by compilers 5121 generate_arraycopy_stubs(); 5122 5123 // don't bother generating these AES intrinsic stubs unless global flag is set 5124 if (UseAESIntrinsics) { 5125 StubRoutines::x86::_key_shuffle_mask_addr = generate_key_shuffle_mask(); // needed by the others 5126 StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock(); 5127 StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock(); 5128 StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt(); 5129 StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt_Parallel(); 5130 } 5131 if (UseAESCTRIntrinsics){ 5132 StubRoutines::x86::_counter_shuffle_mask_addr = generate_counter_shuffle_mask(); 5133 StubRoutines::_counterMode_AESCrypt = generate_counterMode_AESCrypt_Parallel(); 5134 } 5135 5136 if (UseSHA1Intrinsics) { 5137 StubRoutines::x86::_upper_word_mask_addr = generate_upper_word_mask(); 5138 StubRoutines::x86::_shuffle_byte_flip_mask_addr = generate_shuffle_byte_flip_mask(); 5139 StubRoutines::_sha1_implCompress = generate_sha1_implCompress(false, "sha1_implCompress"); 5140 StubRoutines::_sha1_implCompressMB = generate_sha1_implCompress(true, "sha1_implCompressMB"); 5141 } 5142 if (UseSHA256Intrinsics) { 5143 StubRoutines::x86::_k256_adr = (address)StubRoutines::x86::_k256; 5144 char* dst = (char*)StubRoutines::x86::_k256_W; 5145 char* src = (char*)StubRoutines::x86::_k256; 5146 for (int ii = 0; ii < 16; ++ii) { 5147 memcpy(dst + 32 * ii, src + 16 * ii, 16); 5148 memcpy(dst + 32 * ii + 16, src + 16 * ii, 16); 5149 } 5150 StubRoutines::x86::_k256_W_adr = (address)StubRoutines::x86::_k256_W; 5151 StubRoutines::x86::_pshuffle_byte_flip_mask_addr = generate_pshuffle_byte_flip_mask(); 5152 StubRoutines::_sha256_implCompress = generate_sha256_implCompress(false, "sha256_implCompress"); 5153 StubRoutines::_sha256_implCompressMB = generate_sha256_implCompress(true, "sha256_implCompressMB"); 5154 } 5155 if (UseSHA512Intrinsics) { 5156 StubRoutines::x86::_k512_W_addr = (address)StubRoutines::x86::_k512_W; 5157 StubRoutines::x86::_pshuffle_byte_flip_mask_addr_sha512 = generate_pshuffle_byte_flip_mask_sha512(); 5158 StubRoutines::_sha512_implCompress = generate_sha512_implCompress(false, "sha512_implCompress"); 5159 StubRoutines::_sha512_implCompressMB = generate_sha512_implCompress(true, "sha512_implCompressMB"); 5160 } 5161 5162 // Generate GHASH intrinsics code 5163 if (UseGHASHIntrinsics) { 5164 StubRoutines::x86::_ghash_long_swap_mask_addr = generate_ghash_long_swap_mask(); 5165 StubRoutines::x86::_ghash_byte_swap_mask_addr = generate_ghash_byte_swap_mask(); 5166 StubRoutines::_ghash_processBlocks = generate_ghash_processBlocks(); 5167 } 5168 5169 // Safefetch stubs. 5170 generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry, 5171 &StubRoutines::_safefetch32_fault_pc, 5172 &StubRoutines::_safefetch32_continuation_pc); 5173 generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry, 5174 &StubRoutines::_safefetchN_fault_pc, 5175 &StubRoutines::_safefetchN_continuation_pc); 5176 #ifdef COMPILER2 5177 if (UseMultiplyToLenIntrinsic) { 5178 StubRoutines::_multiplyToLen = generate_multiplyToLen(); 5179 } 5180 if (UseSquareToLenIntrinsic) { 5181 StubRoutines::_squareToLen = generate_squareToLen(); 5182 } 5183 if (UseMulAddIntrinsic) { 5184 StubRoutines::_mulAdd = generate_mulAdd(); 5185 } 5186 #ifndef _WINDOWS 5187 if (UseMontgomeryMultiplyIntrinsic) { 5188 StubRoutines::_montgomeryMultiply 5189 = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_multiply); 5190 } 5191 if (UseMontgomerySquareIntrinsic) { 5192 StubRoutines::_montgomerySquare 5193 = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_square); 5194 } 5195 #endif // WINDOWS 5196 #endif // COMPILER2 5197 5198 if (UseVectorizedMismatchIntrinsic) { 5199 StubRoutines::_vectorizedMismatch = generate_vectorizedMismatch(); 5200 } 5201 } 5202 5203 public: 5204 StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) { 5205 if (all) { 5206 generate_all(); 5207 } else { 5208 generate_initial(); 5209 } 5210 } 5211 }; // end class declaration 5212 5213 void StubGenerator_generate(CodeBuffer* code, bool all) { 5214 StubGenerator g(code, all); 5215 }