1 /* 2 * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "gc/shared/barrierSetCodeGen.hpp" 29 #include "interpreter/interpreter.hpp" 30 #include "nativeInst_x86.hpp" 31 #include "oops/instanceOop.hpp" 32 #include "oops/method.hpp" 33 #include "oops/objArrayKlass.hpp" 34 #include "oops/oop.inline.hpp" 35 #include "prims/methodHandles.hpp" 36 #include "runtime/frame.inline.hpp" 37 #include "runtime/handles.inline.hpp" 38 #include "runtime/sharedRuntime.hpp" 39 #include "runtime/stubCodeGenerator.hpp" 40 #include "runtime/stubRoutines.hpp" 41 #include "runtime/thread.inline.hpp" 42 #ifdef COMPILER2 43 #include "opto/runtime.hpp" 44 #endif 45 46 // Declaration and definition of StubGenerator (no .hpp file). 47 // For a more detailed description of the stub routine structure 48 // see the comment in stubRoutines.hpp 49 50 #define __ _masm-> 51 #define TIMES_OOP (UseCompressedOops ? Address::times_4 : Address::times_8) 52 #define a__ ((Assembler*)_masm)-> 53 54 #ifdef PRODUCT 55 #define BLOCK_COMMENT(str) /* nothing */ 56 #else 57 #define BLOCK_COMMENT(str) __ block_comment(str) 58 #endif 59 60 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 61 const int MXCSR_MASK = 0xFFC0; // Mask out any pending exceptions 62 63 // Stub Code definitions 64 65 class StubGenerator: public StubCodeGenerator { 66 private: 67 68 #ifdef PRODUCT 69 #define inc_counter_np(counter) ((void)0) 70 #else 71 void inc_counter_np_(int& counter) { 72 // This can destroy rscratch1 if counter is far from the code cache 73 __ incrementl(ExternalAddress((address)&counter)); 74 } 75 #define inc_counter_np(counter) \ 76 BLOCK_COMMENT("inc_counter " #counter); \ 77 inc_counter_np_(counter); 78 #endif 79 80 // Call stubs are used to call Java from C 81 // 82 // Linux Arguments: 83 // c_rarg0: call wrapper address address 84 // c_rarg1: result address 85 // c_rarg2: result type BasicType 86 // c_rarg3: method Method* 87 // c_rarg4: (interpreter) entry point address 88 // c_rarg5: parameters intptr_t* 89 // 16(rbp): parameter size (in words) int 90 // 24(rbp): thread Thread* 91 // 92 // [ return_from_Java ] <--- rsp 93 // [ argument word n ] 94 // ... 95 // -12 [ argument word 1 ] 96 // -11 [ saved r15 ] <--- rsp_after_call 97 // -10 [ saved r14 ] 98 // -9 [ saved r13 ] 99 // -8 [ saved r12 ] 100 // -7 [ saved rbx ] 101 // -6 [ call wrapper ] 102 // -5 [ result ] 103 // -4 [ result type ] 104 // -3 [ method ] 105 // -2 [ entry point ] 106 // -1 [ parameters ] 107 // 0 [ saved rbp ] <--- rbp 108 // 1 [ return address ] 109 // 2 [ parameter size ] 110 // 3 [ thread ] 111 // 112 // Windows Arguments: 113 // c_rarg0: call wrapper address address 114 // c_rarg1: result address 115 // c_rarg2: result type BasicType 116 // c_rarg3: method Method* 117 // 48(rbp): (interpreter) entry point address 118 // 56(rbp): parameters intptr_t* 119 // 64(rbp): parameter size (in words) int 120 // 72(rbp): thread Thread* 121 // 122 // [ return_from_Java ] <--- rsp 123 // [ argument word n ] 124 // ... 125 // -60 [ argument word 1 ] 126 // -59 [ saved xmm31 ] <--- rsp after_call 127 // [ saved xmm16-xmm30 ] (EVEX enabled, else the space is blank) 128 // -27 [ saved xmm15 ] 129 // [ saved xmm7-xmm14 ] 130 // -9 [ saved xmm6 ] (each xmm register takes 2 slots) 131 // -7 [ saved r15 ] 132 // -6 [ saved r14 ] 133 // -5 [ saved r13 ] 134 // -4 [ saved r12 ] 135 // -3 [ saved rdi ] 136 // -2 [ saved rsi ] 137 // -1 [ saved rbx ] 138 // 0 [ saved rbp ] <--- rbp 139 // 1 [ return address ] 140 // 2 [ call wrapper ] 141 // 3 [ result ] 142 // 4 [ result type ] 143 // 5 [ method ] 144 // 6 [ entry point ] 145 // 7 [ parameters ] 146 // 8 [ parameter size ] 147 // 9 [ thread ] 148 // 149 // Windows reserves the callers stack space for arguments 1-4. 150 // We spill c_rarg0-c_rarg3 to this space. 151 152 // Call stub stack layout word offsets from rbp 153 enum call_stub_layout { 154 #ifdef _WIN64 155 xmm_save_first = 6, // save from xmm6 156 xmm_save_last = 31, // to xmm31 157 xmm_save_base = -9, 158 rsp_after_call_off = xmm_save_base - 2 * (xmm_save_last - xmm_save_first), // -27 159 r15_off = -7, 160 r14_off = -6, 161 r13_off = -5, 162 r12_off = -4, 163 rdi_off = -3, 164 rsi_off = -2, 165 rbx_off = -1, 166 rbp_off = 0, 167 retaddr_off = 1, 168 call_wrapper_off = 2, 169 result_off = 3, 170 result_type_off = 4, 171 method_off = 5, 172 entry_point_off = 6, 173 parameters_off = 7, 174 parameter_size_off = 8, 175 thread_off = 9 176 #else 177 rsp_after_call_off = -12, 178 mxcsr_off = rsp_after_call_off, 179 r15_off = -11, 180 r14_off = -10, 181 r13_off = -9, 182 r12_off = -8, 183 rbx_off = -7, 184 call_wrapper_off = -6, 185 result_off = -5, 186 result_type_off = -4, 187 method_off = -3, 188 entry_point_off = -2, 189 parameters_off = -1, 190 rbp_off = 0, 191 retaddr_off = 1, 192 parameter_size_off = 2, 193 thread_off = 3 194 #endif 195 }; 196 197 #ifdef _WIN64 198 Address xmm_save(int reg) { 199 assert(reg >= xmm_save_first && reg <= xmm_save_last, "XMM register number out of range"); 200 return Address(rbp, (xmm_save_base - (reg - xmm_save_first) * 2) * wordSize); 201 } 202 #endif 203 204 address generate_call_stub(address& return_address) { 205 assert((int)frame::entry_frame_after_call_words == -(int)rsp_after_call_off + 1 && 206 (int)frame::entry_frame_call_wrapper_offset == (int)call_wrapper_off, 207 "adjust this code"); 208 StubCodeMark mark(this, "StubRoutines", "call_stub"); 209 address start = __ pc(); 210 211 // same as in generate_catch_exception()! 212 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize); 213 214 const Address call_wrapper (rbp, call_wrapper_off * wordSize); 215 const Address result (rbp, result_off * wordSize); 216 const Address result_type (rbp, result_type_off * wordSize); 217 const Address method (rbp, method_off * wordSize); 218 const Address entry_point (rbp, entry_point_off * wordSize); 219 const Address parameters (rbp, parameters_off * wordSize); 220 const Address parameter_size(rbp, parameter_size_off * wordSize); 221 222 // same as in generate_catch_exception()! 223 const Address thread (rbp, thread_off * wordSize); 224 225 const Address r15_save(rbp, r15_off * wordSize); 226 const Address r14_save(rbp, r14_off * wordSize); 227 const Address r13_save(rbp, r13_off * wordSize); 228 const Address r12_save(rbp, r12_off * wordSize); 229 const Address rbx_save(rbp, rbx_off * wordSize); 230 231 // stub code 232 __ enter(); 233 __ subptr(rsp, -rsp_after_call_off * wordSize); 234 235 // save register parameters 236 #ifndef _WIN64 237 __ movptr(parameters, c_rarg5); // parameters 238 __ movptr(entry_point, c_rarg4); // entry_point 239 #endif 240 241 __ movptr(method, c_rarg3); // method 242 __ movl(result_type, c_rarg2); // result type 243 __ movptr(result, c_rarg1); // result 244 __ movptr(call_wrapper, c_rarg0); // call wrapper 245 246 // save regs belonging to calling function 247 __ movptr(rbx_save, rbx); 248 __ movptr(r12_save, r12); 249 __ movptr(r13_save, r13); 250 __ movptr(r14_save, r14); 251 __ movptr(r15_save, r15); 252 if (UseAVX > 2) { 253 __ movl(rbx, 0xffff); 254 __ kmovwl(k1, rbx); 255 } 256 #ifdef _WIN64 257 int last_reg = 15; 258 if (UseAVX > 2) { 259 last_reg = 31; 260 } 261 if (VM_Version::supports_evex()) { 262 for (int i = xmm_save_first; i <= last_reg; i++) { 263 __ vextractf32x4(xmm_save(i), as_XMMRegister(i), 0); 264 } 265 } else { 266 for (int i = xmm_save_first; i <= last_reg; i++) { 267 __ movdqu(xmm_save(i), as_XMMRegister(i)); 268 } 269 } 270 271 const Address rdi_save(rbp, rdi_off * wordSize); 272 const Address rsi_save(rbp, rsi_off * wordSize); 273 274 __ movptr(rsi_save, rsi); 275 __ movptr(rdi_save, rdi); 276 #else 277 const Address mxcsr_save(rbp, mxcsr_off * wordSize); 278 { 279 Label skip_ldmx; 280 __ stmxcsr(mxcsr_save); 281 __ movl(rax, mxcsr_save); 282 __ andl(rax, MXCSR_MASK); // Only check control and mask bits 283 ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std()); 284 __ cmp32(rax, mxcsr_std); 285 __ jcc(Assembler::equal, skip_ldmx); 286 __ ldmxcsr(mxcsr_std); 287 __ bind(skip_ldmx); 288 } 289 #endif 290 291 // Load up thread register 292 __ movptr(r15_thread, thread); 293 __ reinit_heapbase(); 294 295 #ifdef ASSERT 296 // make sure we have no pending exceptions 297 { 298 Label L; 299 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 300 __ jcc(Assembler::equal, L); 301 __ stop("StubRoutines::call_stub: entered with pending exception"); 302 __ bind(L); 303 } 304 #endif 305 306 // pass parameters if any 307 BLOCK_COMMENT("pass parameters if any"); 308 Label parameters_done; 309 __ movl(c_rarg3, parameter_size); 310 __ testl(c_rarg3, c_rarg3); 311 __ jcc(Assembler::zero, parameters_done); 312 313 Label loop; 314 __ movptr(c_rarg2, parameters); // parameter pointer 315 __ movl(c_rarg1, c_rarg3); // parameter counter is in c_rarg1 316 __ BIND(loop); 317 __ movptr(rax, Address(c_rarg2, 0));// get parameter 318 __ addptr(c_rarg2, wordSize); // advance to next parameter 319 __ decrementl(c_rarg1); // decrement counter 320 __ push(rax); // pass parameter 321 __ jcc(Assembler::notZero, loop); 322 323 // call Java function 324 __ BIND(parameters_done); 325 __ movptr(rbx, method); // get Method* 326 __ movptr(c_rarg1, entry_point); // get entry_point 327 __ mov(r13, rsp); // set sender sp 328 BLOCK_COMMENT("call Java function"); 329 __ call(c_rarg1); 330 331 BLOCK_COMMENT("call_stub_return_address:"); 332 return_address = __ pc(); 333 334 // store result depending on type (everything that is not 335 // T_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT) 336 __ movptr(c_rarg0, result); 337 Label is_long, is_float, is_double, exit; 338 __ movl(c_rarg1, result_type); 339 __ cmpl(c_rarg1, T_OBJECT); 340 __ jcc(Assembler::equal, is_long); 341 __ cmpl(c_rarg1, T_LONG); 342 __ jcc(Assembler::equal, is_long); 343 __ cmpl(c_rarg1, T_FLOAT); 344 __ jcc(Assembler::equal, is_float); 345 __ cmpl(c_rarg1, T_DOUBLE); 346 __ jcc(Assembler::equal, is_double); 347 348 // handle T_INT case 349 __ movl(Address(c_rarg0, 0), rax); 350 351 __ BIND(exit); 352 353 // pop parameters 354 __ lea(rsp, rsp_after_call); 355 356 #ifdef ASSERT 357 // verify that threads correspond 358 { 359 Label L1, L2, L3; 360 __ cmpptr(r15_thread, thread); 361 __ jcc(Assembler::equal, L1); 362 __ stop("StubRoutines::call_stub: r15_thread is corrupted"); 363 __ bind(L1); 364 __ get_thread(rbx); 365 __ cmpptr(r15_thread, thread); 366 __ jcc(Assembler::equal, L2); 367 __ stop("StubRoutines::call_stub: r15_thread is modified by call"); 368 __ bind(L2); 369 __ cmpptr(r15_thread, rbx); 370 __ jcc(Assembler::equal, L3); 371 __ stop("StubRoutines::call_stub: threads must correspond"); 372 __ bind(L3); 373 } 374 #endif 375 376 // restore regs belonging to calling function 377 #ifdef _WIN64 378 // emit the restores for xmm regs 379 if (VM_Version::supports_evex()) { 380 for (int i = xmm_save_first; i <= last_reg; i++) { 381 __ vinsertf32x4(as_XMMRegister(i), as_XMMRegister(i), xmm_save(i), 0); 382 } 383 } else { 384 for (int i = xmm_save_first; i <= last_reg; i++) { 385 __ movdqu(as_XMMRegister(i), xmm_save(i)); 386 } 387 } 388 #endif 389 __ movptr(r15, r15_save); 390 __ movptr(r14, r14_save); 391 __ movptr(r13, r13_save); 392 __ movptr(r12, r12_save); 393 __ movptr(rbx, rbx_save); 394 395 #ifdef _WIN64 396 __ movptr(rdi, rdi_save); 397 __ movptr(rsi, rsi_save); 398 #else 399 __ ldmxcsr(mxcsr_save); 400 #endif 401 402 // restore rsp 403 __ addptr(rsp, -rsp_after_call_off * wordSize); 404 405 // return 406 __ pop(rbp); 407 __ ret(0); 408 409 // handle return types different from T_INT 410 __ BIND(is_long); 411 __ movq(Address(c_rarg0, 0), rax); 412 __ jmp(exit); 413 414 __ BIND(is_float); 415 __ movflt(Address(c_rarg0, 0), xmm0); 416 __ jmp(exit); 417 418 __ BIND(is_double); 419 __ movdbl(Address(c_rarg0, 0), xmm0); 420 __ jmp(exit); 421 422 return start; 423 } 424 425 // Return point for a Java call if there's an exception thrown in 426 // Java code. The exception is caught and transformed into a 427 // pending exception stored in JavaThread that can be tested from 428 // within the VM. 429 // 430 // Note: Usually the parameters are removed by the callee. In case 431 // of an exception crossing an activation frame boundary, that is 432 // not the case if the callee is compiled code => need to setup the 433 // rsp. 434 // 435 // rax: exception oop 436 437 address generate_catch_exception() { 438 StubCodeMark mark(this, "StubRoutines", "catch_exception"); 439 address start = __ pc(); 440 441 // same as in generate_call_stub(): 442 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize); 443 const Address thread (rbp, thread_off * wordSize); 444 445 #ifdef ASSERT 446 // verify that threads correspond 447 { 448 Label L1, L2, L3; 449 __ cmpptr(r15_thread, thread); 450 __ jcc(Assembler::equal, L1); 451 __ stop("StubRoutines::catch_exception: r15_thread is corrupted"); 452 __ bind(L1); 453 __ get_thread(rbx); 454 __ cmpptr(r15_thread, thread); 455 __ jcc(Assembler::equal, L2); 456 __ stop("StubRoutines::catch_exception: r15_thread is modified by call"); 457 __ bind(L2); 458 __ cmpptr(r15_thread, rbx); 459 __ jcc(Assembler::equal, L3); 460 __ stop("StubRoutines::catch_exception: threads must correspond"); 461 __ bind(L3); 462 } 463 #endif 464 465 // set pending exception 466 __ verify_oop(rax); 467 468 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), rax); 469 __ lea(rscratch1, ExternalAddress((address)__FILE__)); 470 __ movptr(Address(r15_thread, Thread::exception_file_offset()), rscratch1); 471 __ movl(Address(r15_thread, Thread::exception_line_offset()), (int) __LINE__); 472 473 // complete return to VM 474 assert(StubRoutines::_call_stub_return_address != NULL, 475 "_call_stub_return_address must have been generated before"); 476 __ jump(RuntimeAddress(StubRoutines::_call_stub_return_address)); 477 478 return start; 479 } 480 481 // Continuation point for runtime calls returning with a pending 482 // exception. The pending exception check happened in the runtime 483 // or native call stub. The pending exception in Thread is 484 // converted into a Java-level exception. 485 // 486 // Contract with Java-level exception handlers: 487 // rax: exception 488 // rdx: throwing pc 489 // 490 // NOTE: At entry of this stub, exception-pc must be on stack !! 491 492 address generate_forward_exception() { 493 StubCodeMark mark(this, "StubRoutines", "forward exception"); 494 address start = __ pc(); 495 496 // Upon entry, the sp points to the return address returning into 497 // Java (interpreted or compiled) code; i.e., the return address 498 // becomes the throwing pc. 499 // 500 // Arguments pushed before the runtime call are still on the stack 501 // but the exception handler will reset the stack pointer -> 502 // ignore them. A potential result in registers can be ignored as 503 // well. 504 505 #ifdef ASSERT 506 // make sure this code is only executed if there is a pending exception 507 { 508 Label L; 509 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t) NULL); 510 __ jcc(Assembler::notEqual, L); 511 __ stop("StubRoutines::forward exception: no pending exception (1)"); 512 __ bind(L); 513 } 514 #endif 515 516 // compute exception handler into rbx 517 __ movptr(c_rarg0, Address(rsp, 0)); 518 BLOCK_COMMENT("call exception_handler_for_return_address"); 519 __ call_VM_leaf(CAST_FROM_FN_PTR(address, 520 SharedRuntime::exception_handler_for_return_address), 521 r15_thread, c_rarg0); 522 __ mov(rbx, rax); 523 524 // setup rax & rdx, remove return address & clear pending exception 525 __ pop(rdx); 526 __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset())); 527 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 528 529 #ifdef ASSERT 530 // make sure exception is set 531 { 532 Label L; 533 __ testptr(rax, rax); 534 __ jcc(Assembler::notEqual, L); 535 __ stop("StubRoutines::forward exception: no pending exception (2)"); 536 __ bind(L); 537 } 538 #endif 539 540 // continue at exception handler (return address removed) 541 // rax: exception 542 // rbx: exception handler 543 // rdx: throwing pc 544 __ verify_oop(rax); 545 __ jmp(rbx); 546 547 return start; 548 } 549 550 // Support for jint atomic::xchg(jint exchange_value, volatile jint* dest) 551 // 552 // Arguments : 553 // c_rarg0: exchange_value 554 // c_rarg0: dest 555 // 556 // Result: 557 // *dest <- ex, return (orig *dest) 558 address generate_atomic_xchg() { 559 StubCodeMark mark(this, "StubRoutines", "atomic_xchg"); 560 address start = __ pc(); 561 562 __ movl(rax, c_rarg0); // Copy to eax we need a return value anyhow 563 __ xchgl(rax, Address(c_rarg1, 0)); // automatic LOCK 564 __ ret(0); 565 566 return start; 567 } 568 569 // Support for intptr_t atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) 570 // 571 // Arguments : 572 // c_rarg0: exchange_value 573 // c_rarg1: dest 574 // 575 // Result: 576 // *dest <- ex, return (orig *dest) 577 address generate_atomic_xchg_ptr() { 578 StubCodeMark mark(this, "StubRoutines", "atomic_xchg_ptr"); 579 address start = __ pc(); 580 581 __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow 582 __ xchgptr(rax, Address(c_rarg1, 0)); // automatic LOCK 583 __ ret(0); 584 585 return start; 586 } 587 588 // Support for jint atomic::atomic_cmpxchg(jint exchange_value, volatile jint* dest, 589 // jint compare_value) 590 // 591 // Arguments : 592 // c_rarg0: exchange_value 593 // c_rarg1: dest 594 // c_rarg2: compare_value 595 // 596 // Result: 597 // if ( compare_value == *dest ) { 598 // *dest = exchange_value 599 // return compare_value; 600 // else 601 // return *dest; 602 address generate_atomic_cmpxchg() { 603 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg"); 604 address start = __ pc(); 605 606 __ movl(rax, c_rarg2); 607 if ( os::is_MP() ) __ lock(); 608 __ cmpxchgl(c_rarg0, Address(c_rarg1, 0)); 609 __ ret(0); 610 611 return start; 612 } 613 614 // Support for jbyte atomic::atomic_cmpxchg(jbyte exchange_value, volatile jbyte* dest, 615 // jbyte compare_value) 616 // 617 // Arguments : 618 // c_rarg0: exchange_value 619 // c_rarg1: dest 620 // c_rarg2: compare_value 621 // 622 // Result: 623 // if ( compare_value == *dest ) { 624 // *dest = exchange_value 625 // return compare_value; 626 // else 627 // return *dest; 628 address generate_atomic_cmpxchg_byte() { 629 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_byte"); 630 address start = __ pc(); 631 632 __ movsbq(rax, c_rarg2); 633 if ( os::is_MP() ) __ lock(); 634 __ cmpxchgb(c_rarg0, Address(c_rarg1, 0)); 635 __ ret(0); 636 637 return start; 638 } 639 640 // Support for jlong atomic::atomic_cmpxchg(jlong exchange_value, 641 // volatile jlong* dest, 642 // jlong compare_value) 643 // Arguments : 644 // c_rarg0: exchange_value 645 // c_rarg1: dest 646 // c_rarg2: compare_value 647 // 648 // Result: 649 // if ( compare_value == *dest ) { 650 // *dest = exchange_value 651 // return compare_value; 652 // else 653 // return *dest; 654 address generate_atomic_cmpxchg_long() { 655 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long"); 656 address start = __ pc(); 657 658 __ movq(rax, c_rarg2); 659 if ( os::is_MP() ) __ lock(); 660 __ cmpxchgq(c_rarg0, Address(c_rarg1, 0)); 661 __ ret(0); 662 663 return start; 664 } 665 666 // Support for jint atomic::add(jint add_value, volatile jint* dest) 667 // 668 // Arguments : 669 // c_rarg0: add_value 670 // c_rarg1: dest 671 // 672 // Result: 673 // *dest += add_value 674 // return *dest; 675 address generate_atomic_add() { 676 StubCodeMark mark(this, "StubRoutines", "atomic_add"); 677 address start = __ pc(); 678 679 __ movl(rax, c_rarg0); 680 if ( os::is_MP() ) __ lock(); 681 __ xaddl(Address(c_rarg1, 0), c_rarg0); 682 __ addl(rax, c_rarg0); 683 __ ret(0); 684 685 return start; 686 } 687 688 // Support for intptr_t atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) 689 // 690 // Arguments : 691 // c_rarg0: add_value 692 // c_rarg1: dest 693 // 694 // Result: 695 // *dest += add_value 696 // return *dest; 697 address generate_atomic_add_ptr() { 698 StubCodeMark mark(this, "StubRoutines", "atomic_add_ptr"); 699 address start = __ pc(); 700 701 __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow 702 if ( os::is_MP() ) __ lock(); 703 __ xaddptr(Address(c_rarg1, 0), c_rarg0); 704 __ addptr(rax, c_rarg0); 705 __ ret(0); 706 707 return start; 708 } 709 710 // Support for intptr_t OrderAccess::fence() 711 // 712 // Arguments : 713 // 714 // Result: 715 address generate_orderaccess_fence() { 716 StubCodeMark mark(this, "StubRoutines", "orderaccess_fence"); 717 address start = __ pc(); 718 __ membar(Assembler::StoreLoad); 719 __ ret(0); 720 721 return start; 722 } 723 724 // Support for intptr_t get_previous_fp() 725 // 726 // This routine is used to find the previous frame pointer for the 727 // caller (current_frame_guess). This is used as part of debugging 728 // ps() is seemingly lost trying to find frames. 729 // This code assumes that caller current_frame_guess) has a frame. 730 address generate_get_previous_fp() { 731 StubCodeMark mark(this, "StubRoutines", "get_previous_fp"); 732 const Address old_fp(rbp, 0); 733 const Address older_fp(rax, 0); 734 address start = __ pc(); 735 736 __ enter(); 737 __ movptr(rax, old_fp); // callers fp 738 __ movptr(rax, older_fp); // the frame for ps() 739 __ pop(rbp); 740 __ ret(0); 741 742 return start; 743 } 744 745 // Support for intptr_t get_previous_sp() 746 // 747 // This routine is used to find the previous stack pointer for the 748 // caller. 749 address generate_get_previous_sp() { 750 StubCodeMark mark(this, "StubRoutines", "get_previous_sp"); 751 address start = __ pc(); 752 753 __ movptr(rax, rsp); 754 __ addptr(rax, 8); // return address is at the top of the stack. 755 __ ret(0); 756 757 return start; 758 } 759 760 //---------------------------------------------------------------------------------------------------- 761 // Support for void verify_mxcsr() 762 // 763 // This routine is used with -Xcheck:jni to verify that native 764 // JNI code does not return to Java code without restoring the 765 // MXCSR register to our expected state. 766 767 address generate_verify_mxcsr() { 768 StubCodeMark mark(this, "StubRoutines", "verify_mxcsr"); 769 address start = __ pc(); 770 771 const Address mxcsr_save(rsp, 0); 772 773 if (CheckJNICalls) { 774 Label ok_ret; 775 ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std()); 776 __ push(rax); 777 __ subptr(rsp, wordSize); // allocate a temp location 778 __ stmxcsr(mxcsr_save); 779 __ movl(rax, mxcsr_save); 780 __ andl(rax, MXCSR_MASK); // Only check control and mask bits 781 __ cmp32(rax, mxcsr_std); 782 __ jcc(Assembler::equal, ok_ret); 783 784 __ warn("MXCSR changed by native JNI code, use -XX:+RestoreMXCSROnJNICall"); 785 786 __ ldmxcsr(mxcsr_std); 787 788 __ bind(ok_ret); 789 __ addptr(rsp, wordSize); 790 __ pop(rax); 791 } 792 793 __ ret(0); 794 795 return start; 796 } 797 798 address generate_f2i_fixup() { 799 StubCodeMark mark(this, "StubRoutines", "f2i_fixup"); 800 Address inout(rsp, 5 * wordSize); // return address + 4 saves 801 802 address start = __ pc(); 803 804 Label L; 805 806 __ push(rax); 807 __ push(c_rarg3); 808 __ push(c_rarg2); 809 __ push(c_rarg1); 810 811 __ movl(rax, 0x7f800000); 812 __ xorl(c_rarg3, c_rarg3); 813 __ movl(c_rarg2, inout); 814 __ movl(c_rarg1, c_rarg2); 815 __ andl(c_rarg1, 0x7fffffff); 816 __ cmpl(rax, c_rarg1); // NaN? -> 0 817 __ jcc(Assembler::negative, L); 818 __ testl(c_rarg2, c_rarg2); // signed ? min_jint : max_jint 819 __ movl(c_rarg3, 0x80000000); 820 __ movl(rax, 0x7fffffff); 821 __ cmovl(Assembler::positive, c_rarg3, rax); 822 823 __ bind(L); 824 __ movptr(inout, c_rarg3); 825 826 __ pop(c_rarg1); 827 __ pop(c_rarg2); 828 __ pop(c_rarg3); 829 __ pop(rax); 830 831 __ ret(0); 832 833 return start; 834 } 835 836 address generate_f2l_fixup() { 837 StubCodeMark mark(this, "StubRoutines", "f2l_fixup"); 838 Address inout(rsp, 5 * wordSize); // return address + 4 saves 839 address start = __ pc(); 840 841 Label L; 842 843 __ push(rax); 844 __ push(c_rarg3); 845 __ push(c_rarg2); 846 __ push(c_rarg1); 847 848 __ movl(rax, 0x7f800000); 849 __ xorl(c_rarg3, c_rarg3); 850 __ movl(c_rarg2, inout); 851 __ movl(c_rarg1, c_rarg2); 852 __ andl(c_rarg1, 0x7fffffff); 853 __ cmpl(rax, c_rarg1); // NaN? -> 0 854 __ jcc(Assembler::negative, L); 855 __ testl(c_rarg2, c_rarg2); // signed ? min_jlong : max_jlong 856 __ mov64(c_rarg3, 0x8000000000000000); 857 __ mov64(rax, 0x7fffffffffffffff); 858 __ cmov(Assembler::positive, c_rarg3, rax); 859 860 __ bind(L); 861 __ movptr(inout, c_rarg3); 862 863 __ pop(c_rarg1); 864 __ pop(c_rarg2); 865 __ pop(c_rarg3); 866 __ pop(rax); 867 868 __ ret(0); 869 870 return start; 871 } 872 873 address generate_d2i_fixup() { 874 StubCodeMark mark(this, "StubRoutines", "d2i_fixup"); 875 Address inout(rsp, 6 * wordSize); // return address + 5 saves 876 877 address start = __ pc(); 878 879 Label L; 880 881 __ push(rax); 882 __ push(c_rarg3); 883 __ push(c_rarg2); 884 __ push(c_rarg1); 885 __ push(c_rarg0); 886 887 __ movl(rax, 0x7ff00000); 888 __ movq(c_rarg2, inout); 889 __ movl(c_rarg3, c_rarg2); 890 __ mov(c_rarg1, c_rarg2); 891 __ mov(c_rarg0, c_rarg2); 892 __ negl(c_rarg3); 893 __ shrptr(c_rarg1, 0x20); 894 __ orl(c_rarg3, c_rarg2); 895 __ andl(c_rarg1, 0x7fffffff); 896 __ xorl(c_rarg2, c_rarg2); 897 __ shrl(c_rarg3, 0x1f); 898 __ orl(c_rarg1, c_rarg3); 899 __ cmpl(rax, c_rarg1); 900 __ jcc(Assembler::negative, L); // NaN -> 0 901 __ testptr(c_rarg0, c_rarg0); // signed ? min_jint : max_jint 902 __ movl(c_rarg2, 0x80000000); 903 __ movl(rax, 0x7fffffff); 904 __ cmov(Assembler::positive, c_rarg2, rax); 905 906 __ bind(L); 907 __ movptr(inout, c_rarg2); 908 909 __ pop(c_rarg0); 910 __ pop(c_rarg1); 911 __ pop(c_rarg2); 912 __ pop(c_rarg3); 913 __ pop(rax); 914 915 __ ret(0); 916 917 return start; 918 } 919 920 address generate_d2l_fixup() { 921 StubCodeMark mark(this, "StubRoutines", "d2l_fixup"); 922 Address inout(rsp, 6 * wordSize); // return address + 5 saves 923 924 address start = __ pc(); 925 926 Label L; 927 928 __ push(rax); 929 __ push(c_rarg3); 930 __ push(c_rarg2); 931 __ push(c_rarg1); 932 __ push(c_rarg0); 933 934 __ movl(rax, 0x7ff00000); 935 __ movq(c_rarg2, inout); 936 __ movl(c_rarg3, c_rarg2); 937 __ mov(c_rarg1, c_rarg2); 938 __ mov(c_rarg0, c_rarg2); 939 __ negl(c_rarg3); 940 __ shrptr(c_rarg1, 0x20); 941 __ orl(c_rarg3, c_rarg2); 942 __ andl(c_rarg1, 0x7fffffff); 943 __ xorl(c_rarg2, c_rarg2); 944 __ shrl(c_rarg3, 0x1f); 945 __ orl(c_rarg1, c_rarg3); 946 __ cmpl(rax, c_rarg1); 947 __ jcc(Assembler::negative, L); // NaN -> 0 948 __ testq(c_rarg0, c_rarg0); // signed ? min_jlong : max_jlong 949 __ mov64(c_rarg2, 0x8000000000000000); 950 __ mov64(rax, 0x7fffffffffffffff); 951 __ cmovq(Assembler::positive, c_rarg2, rax); 952 953 __ bind(L); 954 __ movq(inout, c_rarg2); 955 956 __ pop(c_rarg0); 957 __ pop(c_rarg1); 958 __ pop(c_rarg2); 959 __ pop(c_rarg3); 960 __ pop(rax); 961 962 __ ret(0); 963 964 return start; 965 } 966 967 address generate_fp_mask(const char *stub_name, int64_t mask) { 968 __ align(CodeEntryAlignment); 969 StubCodeMark mark(this, "StubRoutines", stub_name); 970 address start = __ pc(); 971 972 __ emit_data64( mask, relocInfo::none ); 973 __ emit_data64( mask, relocInfo::none ); 974 975 return start; 976 } 977 978 // Non-destructive plausibility checks for oops 979 // 980 // Arguments: 981 // all args on stack! 982 // 983 // Stack after saving c_rarg3: 984 // [tos + 0]: saved c_rarg3 985 // [tos + 1]: saved c_rarg2 986 // [tos + 2]: saved r12 (several TemplateTable methods use it) 987 // [tos + 3]: saved flags 988 // [tos + 4]: return address 989 // * [tos + 5]: error message (char*) 990 // * [tos + 6]: object to verify (oop) 991 // * [tos + 7]: saved rax - saved by caller and bashed 992 // * [tos + 8]: saved r10 (rscratch1) - saved by caller 993 // * = popped on exit 994 address generate_verify_oop() { 995 StubCodeMark mark(this, "StubRoutines", "verify_oop"); 996 address start = __ pc(); 997 998 Label exit, error; 999 1000 __ pushf(); 1001 __ incrementl(ExternalAddress((address) StubRoutines::verify_oop_count_addr())); 1002 1003 __ push(r12); 1004 1005 // save c_rarg2 and c_rarg3 1006 __ push(c_rarg2); 1007 __ push(c_rarg3); 1008 1009 enum { 1010 // After previous pushes. 1011 oop_to_verify = 6 * wordSize, 1012 saved_rax = 7 * wordSize, 1013 saved_r10 = 8 * wordSize, 1014 1015 // Before the call to MacroAssembler::debug(), see below. 1016 return_addr = 16 * wordSize, 1017 error_msg = 17 * wordSize 1018 }; 1019 1020 // get object 1021 __ movptr(rax, Address(rsp, oop_to_verify)); 1022 1023 // make sure object is 'reasonable' 1024 __ testptr(rax, rax); 1025 __ jcc(Assembler::zero, exit); // if obj is NULL it is OK 1026 // Check if the oop is in the right area of memory 1027 __ movptr(c_rarg2, rax); 1028 __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_mask()); 1029 __ andptr(c_rarg2, c_rarg3); 1030 __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_bits()); 1031 __ cmpptr(c_rarg2, c_rarg3); 1032 __ jcc(Assembler::notZero, error); 1033 1034 // set r12 to heapbase for load_klass() 1035 __ reinit_heapbase(); 1036 1037 // make sure klass is 'reasonable', which is not zero. 1038 __ load_klass(rax, rax); // get klass 1039 __ testptr(rax, rax); 1040 __ jcc(Assembler::zero, error); // if klass is NULL it is broken 1041 1042 // return if everything seems ok 1043 __ bind(exit); 1044 __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back 1045 __ movptr(rscratch1, Address(rsp, saved_r10)); // get saved r10 back 1046 __ pop(c_rarg3); // restore c_rarg3 1047 __ pop(c_rarg2); // restore c_rarg2 1048 __ pop(r12); // restore r12 1049 __ popf(); // restore flags 1050 __ ret(4 * wordSize); // pop caller saved stuff 1051 1052 // handle errors 1053 __ bind(error); 1054 __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back 1055 __ movptr(rscratch1, Address(rsp, saved_r10)); // get saved r10 back 1056 __ pop(c_rarg3); // get saved c_rarg3 back 1057 __ pop(c_rarg2); // get saved c_rarg2 back 1058 __ pop(r12); // get saved r12 back 1059 __ popf(); // get saved flags off stack -- 1060 // will be ignored 1061 1062 __ pusha(); // push registers 1063 // (rip is already 1064 // already pushed) 1065 // debug(char* msg, int64_t pc, int64_t regs[]) 1066 // We've popped the registers we'd saved (c_rarg3, c_rarg2 and flags), and 1067 // pushed all the registers, so now the stack looks like: 1068 // [tos + 0] 16 saved registers 1069 // [tos + 16] return address 1070 // * [tos + 17] error message (char*) 1071 // * [tos + 18] object to verify (oop) 1072 // * [tos + 19] saved rax - saved by caller and bashed 1073 // * [tos + 20] saved r10 (rscratch1) - saved by caller 1074 // * = popped on exit 1075 1076 __ movptr(c_rarg0, Address(rsp, error_msg)); // pass address of error message 1077 __ movptr(c_rarg1, Address(rsp, return_addr)); // pass return address 1078 __ movq(c_rarg2, rsp); // pass address of regs on stack 1079 __ mov(r12, rsp); // remember rsp 1080 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1081 __ andptr(rsp, -16); // align stack as required by ABI 1082 BLOCK_COMMENT("call MacroAssembler::debug"); 1083 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64))); 1084 __ mov(rsp, r12); // restore rsp 1085 __ popa(); // pop registers (includes r12) 1086 __ ret(4 * wordSize); // pop caller saved stuff 1087 1088 return start; 1089 } 1090 1091 // 1092 // Verify that a register contains clean 32-bits positive value 1093 // (high 32-bits are 0) so it could be used in 64-bits shifts. 1094 // 1095 // Input: 1096 // Rint - 32-bits value 1097 // Rtmp - scratch 1098 // 1099 void assert_clean_int(Register Rint, Register Rtmp) { 1100 #ifdef ASSERT 1101 Label L; 1102 assert_different_registers(Rtmp, Rint); 1103 __ movslq(Rtmp, Rint); 1104 __ cmpq(Rtmp, Rint); 1105 __ jcc(Assembler::equal, L); 1106 __ stop("high 32-bits of int value are not 0"); 1107 __ bind(L); 1108 #endif 1109 } 1110 1111 // Generate overlap test for array copy stubs 1112 // 1113 // Input: 1114 // c_rarg0 - from 1115 // c_rarg1 - to 1116 // c_rarg2 - element count 1117 // 1118 // Output: 1119 // rax - &from[element count - 1] 1120 // 1121 void array_overlap_test(address no_overlap_target, Address::ScaleFactor sf) { 1122 assert(no_overlap_target != NULL, "must be generated"); 1123 array_overlap_test(no_overlap_target, NULL, sf); 1124 } 1125 void array_overlap_test(Label& L_no_overlap, Address::ScaleFactor sf) { 1126 array_overlap_test(NULL, &L_no_overlap, sf); 1127 } 1128 void array_overlap_test(address no_overlap_target, Label* NOLp, Address::ScaleFactor sf) { 1129 const Register from = c_rarg0; 1130 const Register to = c_rarg1; 1131 const Register count = c_rarg2; 1132 const Register end_from = rax; 1133 1134 __ cmpptr(to, from); 1135 __ lea(end_from, Address(from, count, sf, 0)); 1136 if (NOLp == NULL) { 1137 ExternalAddress no_overlap(no_overlap_target); 1138 __ jump_cc(Assembler::belowEqual, no_overlap); 1139 __ cmpptr(to, end_from); 1140 __ jump_cc(Assembler::aboveEqual, no_overlap); 1141 } else { 1142 __ jcc(Assembler::belowEqual, (*NOLp)); 1143 __ cmpptr(to, end_from); 1144 __ jcc(Assembler::aboveEqual, (*NOLp)); 1145 } 1146 } 1147 1148 // Shuffle first three arg regs on Windows into Linux/Solaris locations. 1149 // 1150 // Outputs: 1151 // rdi - rcx 1152 // rsi - rdx 1153 // rdx - r8 1154 // rcx - r9 1155 // 1156 // Registers r9 and r10 are used to save rdi and rsi on Windows, which latter 1157 // are non-volatile. r9 and r10 should not be used by the caller. 1158 // 1159 void setup_arg_regs(int nargs = 3) { 1160 const Register saved_rdi = r9; 1161 const Register saved_rsi = r10; 1162 assert(nargs == 3 || nargs == 4, "else fix"); 1163 #ifdef _WIN64 1164 assert(c_rarg0 == rcx && c_rarg1 == rdx && c_rarg2 == r8 && c_rarg3 == r9, 1165 "unexpected argument registers"); 1166 if (nargs >= 4) 1167 __ mov(rax, r9); // r9 is also saved_rdi 1168 __ movptr(saved_rdi, rdi); 1169 __ movptr(saved_rsi, rsi); 1170 __ mov(rdi, rcx); // c_rarg0 1171 __ mov(rsi, rdx); // c_rarg1 1172 __ mov(rdx, r8); // c_rarg2 1173 if (nargs >= 4) 1174 __ mov(rcx, rax); // c_rarg3 (via rax) 1175 #else 1176 assert(c_rarg0 == rdi && c_rarg1 == rsi && c_rarg2 == rdx && c_rarg3 == rcx, 1177 "unexpected argument registers"); 1178 #endif 1179 } 1180 1181 void restore_arg_regs() { 1182 const Register saved_rdi = r9; 1183 const Register saved_rsi = r10; 1184 #ifdef _WIN64 1185 __ movptr(rdi, saved_rdi); 1186 __ movptr(rsi, saved_rsi); 1187 #endif 1188 } 1189 1190 // Copy big chunks forward 1191 // 1192 // Inputs: 1193 // end_from - source arrays end address 1194 // end_to - destination array end address 1195 // qword_count - 64-bits element count, negative 1196 // to - scratch 1197 // L_copy_bytes - entry label 1198 // L_copy_8_bytes - exit label 1199 // 1200 void copy_bytes_forward(Register end_from, Register end_to, 1201 Register qword_count, Register to, 1202 Label& L_copy_bytes, Label& L_copy_8_bytes) { 1203 DEBUG_ONLY(__ stop("enter at entry label, not here")); 1204 Label L_loop; 1205 __ align(OptoLoopAlignment); 1206 if (UseUnalignedLoadStores) { 1207 Label L_end; 1208 if (UseAVX > 2) { 1209 __ movl(to, 0xffff); 1210 __ kmovwl(k1, to); 1211 } 1212 // Copy 64-bytes per iteration 1213 __ BIND(L_loop); 1214 if (UseAVX > 2) { 1215 __ evmovdqul(xmm0, Address(end_from, qword_count, Address::times_8, -56), Assembler::AVX_512bit); 1216 __ evmovdqul(Address(end_to, qword_count, Address::times_8, -56), xmm0, Assembler::AVX_512bit); 1217 } else if (UseAVX == 2) { 1218 __ vmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56)); 1219 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0); 1220 __ vmovdqu(xmm1, Address(end_from, qword_count, Address::times_8, -24)); 1221 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -24), xmm1); 1222 } else { 1223 __ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56)); 1224 __ movdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0); 1225 __ movdqu(xmm1, Address(end_from, qword_count, Address::times_8, -40)); 1226 __ movdqu(Address(end_to, qword_count, Address::times_8, -40), xmm1); 1227 __ movdqu(xmm2, Address(end_from, qword_count, Address::times_8, -24)); 1228 __ movdqu(Address(end_to, qword_count, Address::times_8, -24), xmm2); 1229 __ movdqu(xmm3, Address(end_from, qword_count, Address::times_8, - 8)); 1230 __ movdqu(Address(end_to, qword_count, Address::times_8, - 8), xmm3); 1231 } 1232 __ BIND(L_copy_bytes); 1233 __ addptr(qword_count, 8); 1234 __ jcc(Assembler::lessEqual, L_loop); 1235 __ subptr(qword_count, 4); // sub(8) and add(4) 1236 __ jccb(Assembler::greater, L_end); 1237 // Copy trailing 32 bytes 1238 if (UseAVX >= 2) { 1239 __ vmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24)); 1240 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -24), xmm0); 1241 } else { 1242 __ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24)); 1243 __ movdqu(Address(end_to, qword_count, Address::times_8, -24), xmm0); 1244 __ movdqu(xmm1, Address(end_from, qword_count, Address::times_8, - 8)); 1245 __ movdqu(Address(end_to, qword_count, Address::times_8, - 8), xmm1); 1246 } 1247 __ addptr(qword_count, 4); 1248 __ BIND(L_end); 1249 if (UseAVX >= 2) { 1250 // clean upper bits of YMM registers 1251 __ vpxor(xmm0, xmm0); 1252 __ vpxor(xmm1, xmm1); 1253 } 1254 } else { 1255 // Copy 32-bytes per iteration 1256 __ BIND(L_loop); 1257 __ movq(to, Address(end_from, qword_count, Address::times_8, -24)); 1258 __ movq(Address(end_to, qword_count, Address::times_8, -24), to); 1259 __ movq(to, Address(end_from, qword_count, Address::times_8, -16)); 1260 __ movq(Address(end_to, qword_count, Address::times_8, -16), to); 1261 __ movq(to, Address(end_from, qword_count, Address::times_8, - 8)); 1262 __ movq(Address(end_to, qword_count, Address::times_8, - 8), to); 1263 __ movq(to, Address(end_from, qword_count, Address::times_8, - 0)); 1264 __ movq(Address(end_to, qword_count, Address::times_8, - 0), to); 1265 1266 __ BIND(L_copy_bytes); 1267 __ addptr(qword_count, 4); 1268 __ jcc(Assembler::lessEqual, L_loop); 1269 } 1270 __ subptr(qword_count, 4); 1271 __ jcc(Assembler::less, L_copy_8_bytes); // Copy trailing qwords 1272 } 1273 1274 // Copy big chunks backward 1275 // 1276 // Inputs: 1277 // from - source arrays address 1278 // dest - destination array address 1279 // qword_count - 64-bits element count 1280 // to - scratch 1281 // L_copy_bytes - entry label 1282 // L_copy_8_bytes - exit label 1283 // 1284 void copy_bytes_backward(Register from, Register dest, 1285 Register qword_count, Register to, 1286 Label& L_copy_bytes, Label& L_copy_8_bytes) { 1287 DEBUG_ONLY(__ stop("enter at entry label, not here")); 1288 Label L_loop; 1289 __ align(OptoLoopAlignment); 1290 if (UseUnalignedLoadStores) { 1291 Label L_end; 1292 if (UseAVX > 2) { 1293 __ movl(to, 0xffff); 1294 __ kmovwl(k1, to); 1295 } 1296 // Copy 64-bytes per iteration 1297 __ BIND(L_loop); 1298 if (UseAVX > 2) { 1299 __ evmovdqul(xmm0, Address(from, qword_count, Address::times_8, 0), Assembler::AVX_512bit); 1300 __ evmovdqul(Address(dest, qword_count, Address::times_8, 0), xmm0, Assembler::AVX_512bit); 1301 } else if (UseAVX == 2) { 1302 __ vmovdqu(xmm0, Address(from, qword_count, Address::times_8, 32)); 1303 __ vmovdqu(Address(dest, qword_count, Address::times_8, 32), xmm0); 1304 __ vmovdqu(xmm1, Address(from, qword_count, Address::times_8, 0)); 1305 __ vmovdqu(Address(dest, qword_count, Address::times_8, 0), xmm1); 1306 } else { 1307 __ movdqu(xmm0, Address(from, qword_count, Address::times_8, 48)); 1308 __ movdqu(Address(dest, qword_count, Address::times_8, 48), xmm0); 1309 __ movdqu(xmm1, Address(from, qword_count, Address::times_8, 32)); 1310 __ movdqu(Address(dest, qword_count, Address::times_8, 32), xmm1); 1311 __ movdqu(xmm2, Address(from, qword_count, Address::times_8, 16)); 1312 __ movdqu(Address(dest, qword_count, Address::times_8, 16), xmm2); 1313 __ movdqu(xmm3, Address(from, qword_count, Address::times_8, 0)); 1314 __ movdqu(Address(dest, qword_count, Address::times_8, 0), xmm3); 1315 } 1316 __ BIND(L_copy_bytes); 1317 __ subptr(qword_count, 8); 1318 __ jcc(Assembler::greaterEqual, L_loop); 1319 1320 __ addptr(qword_count, 4); // add(8) and sub(4) 1321 __ jccb(Assembler::less, L_end); 1322 // Copy trailing 32 bytes 1323 if (UseAVX >= 2) { 1324 __ vmovdqu(xmm0, Address(from, qword_count, Address::times_8, 0)); 1325 __ vmovdqu(Address(dest, qword_count, Address::times_8, 0), xmm0); 1326 } else { 1327 __ movdqu(xmm0, Address(from, qword_count, Address::times_8, 16)); 1328 __ movdqu(Address(dest, qword_count, Address::times_8, 16), xmm0); 1329 __ movdqu(xmm1, Address(from, qword_count, Address::times_8, 0)); 1330 __ movdqu(Address(dest, qword_count, Address::times_8, 0), xmm1); 1331 } 1332 __ subptr(qword_count, 4); 1333 __ BIND(L_end); 1334 if (UseAVX >= 2) { 1335 // clean upper bits of YMM registers 1336 __ vpxor(xmm0, xmm0); 1337 __ vpxor(xmm1, xmm1); 1338 } 1339 } else { 1340 // Copy 32-bytes per iteration 1341 __ BIND(L_loop); 1342 __ movq(to, Address(from, qword_count, Address::times_8, 24)); 1343 __ movq(Address(dest, qword_count, Address::times_8, 24), to); 1344 __ movq(to, Address(from, qword_count, Address::times_8, 16)); 1345 __ movq(Address(dest, qword_count, Address::times_8, 16), to); 1346 __ movq(to, Address(from, qword_count, Address::times_8, 8)); 1347 __ movq(Address(dest, qword_count, Address::times_8, 8), to); 1348 __ movq(to, Address(from, qword_count, Address::times_8, 0)); 1349 __ movq(Address(dest, qword_count, Address::times_8, 0), to); 1350 1351 __ BIND(L_copy_bytes); 1352 __ subptr(qword_count, 4); 1353 __ jcc(Assembler::greaterEqual, L_loop); 1354 } 1355 __ addptr(qword_count, 4); 1356 __ jcc(Assembler::greater, L_copy_8_bytes); // Copy trailing qwords 1357 } 1358 1359 1360 // Arguments: 1361 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1362 // ignored 1363 // name - stub name string 1364 // 1365 // Inputs: 1366 // c_rarg0 - source array address 1367 // c_rarg1 - destination array address 1368 // c_rarg2 - element count, treated as ssize_t, can be zero 1369 // 1370 // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries, 1371 // we let the hardware handle it. The one to eight bytes within words, 1372 // dwords or qwords that span cache line boundaries will still be loaded 1373 // and stored atomically. 1374 // 1375 // Side Effects: 1376 // disjoint_byte_copy_entry is set to the no-overlap entry point 1377 // used by generate_conjoint_byte_copy(). 1378 // 1379 address generate_disjoint_byte_copy(bool aligned, address* entry, const char *name) { 1380 __ align(CodeEntryAlignment); 1381 StubCodeMark mark(this, "StubRoutines", name); 1382 address start = __ pc(); 1383 1384 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes; 1385 Label L_copy_byte, L_exit; 1386 const Register from = rdi; // source array address 1387 const Register to = rsi; // destination array address 1388 const Register count = rdx; // elements count 1389 const Register byte_count = rcx; 1390 const Register qword_count = count; 1391 const Register end_from = from; // source array end address 1392 const Register end_to = to; // destination array end address 1393 // End pointers are inclusive, and if count is not zero they point 1394 // to the last unit copied: end_to[0] := end_from[0] 1395 1396 __ enter(); // required for proper stackwalking of RuntimeStub frame 1397 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1398 1399 if (entry != NULL) { 1400 *entry = __ pc(); 1401 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1402 BLOCK_COMMENT("Entry:"); 1403 } 1404 1405 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1406 // r9 and r10 may be used to save non-volatile registers 1407 1408 // 'from', 'to' and 'count' are now valid 1409 __ movptr(byte_count, count); 1410 __ shrptr(count, 3); // count => qword_count 1411 1412 // Copy from low to high addresses. Use 'to' as scratch. 1413 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1414 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1415 __ negptr(qword_count); // make the count negative 1416 __ jmp(L_copy_bytes); 1417 1418 // Copy trailing qwords 1419 __ BIND(L_copy_8_bytes); 1420 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1421 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1422 __ increment(qword_count); 1423 __ jcc(Assembler::notZero, L_copy_8_bytes); 1424 1425 // Check for and copy trailing dword 1426 __ BIND(L_copy_4_bytes); 1427 __ testl(byte_count, 4); 1428 __ jccb(Assembler::zero, L_copy_2_bytes); 1429 __ movl(rax, Address(end_from, 8)); 1430 __ movl(Address(end_to, 8), rax); 1431 1432 __ addptr(end_from, 4); 1433 __ addptr(end_to, 4); 1434 1435 // Check for and copy trailing word 1436 __ BIND(L_copy_2_bytes); 1437 __ testl(byte_count, 2); 1438 __ jccb(Assembler::zero, L_copy_byte); 1439 __ movw(rax, Address(end_from, 8)); 1440 __ movw(Address(end_to, 8), rax); 1441 1442 __ addptr(end_from, 2); 1443 __ addptr(end_to, 2); 1444 1445 // Check for and copy trailing byte 1446 __ BIND(L_copy_byte); 1447 __ testl(byte_count, 1); 1448 __ jccb(Assembler::zero, L_exit); 1449 __ movb(rax, Address(end_from, 8)); 1450 __ movb(Address(end_to, 8), rax); 1451 1452 __ BIND(L_exit); 1453 restore_arg_regs(); 1454 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1455 __ xorptr(rax, rax); // return 0 1456 __ leave(); // required for proper stackwalking of RuntimeStub frame 1457 __ ret(0); 1458 1459 // Copy in multi-bytes chunks 1460 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1461 __ jmp(L_copy_4_bytes); 1462 1463 return start; 1464 } 1465 1466 // Arguments: 1467 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1468 // ignored 1469 // name - stub name string 1470 // 1471 // Inputs: 1472 // c_rarg0 - source array address 1473 // c_rarg1 - destination array address 1474 // c_rarg2 - element count, treated as ssize_t, can be zero 1475 // 1476 // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries, 1477 // we let the hardware handle it. The one to eight bytes within words, 1478 // dwords or qwords that span cache line boundaries will still be loaded 1479 // and stored atomically. 1480 // 1481 address generate_conjoint_byte_copy(bool aligned, address nooverlap_target, 1482 address* entry, const char *name) { 1483 __ align(CodeEntryAlignment); 1484 StubCodeMark mark(this, "StubRoutines", name); 1485 address start = __ pc(); 1486 1487 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes; 1488 const Register from = rdi; // source array address 1489 const Register to = rsi; // destination array address 1490 const Register count = rdx; // elements count 1491 const Register byte_count = rcx; 1492 const Register qword_count = count; 1493 1494 __ enter(); // required for proper stackwalking of RuntimeStub frame 1495 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1496 1497 if (entry != NULL) { 1498 *entry = __ pc(); 1499 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1500 BLOCK_COMMENT("Entry:"); 1501 } 1502 1503 array_overlap_test(nooverlap_target, Address::times_1); 1504 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1505 // r9 and r10 may be used to save non-volatile registers 1506 1507 // 'from', 'to' and 'count' are now valid 1508 __ movptr(byte_count, count); 1509 __ shrptr(count, 3); // count => qword_count 1510 1511 // Copy from high to low addresses. 1512 1513 // Check for and copy trailing byte 1514 __ testl(byte_count, 1); 1515 __ jcc(Assembler::zero, L_copy_2_bytes); 1516 __ movb(rax, Address(from, byte_count, Address::times_1, -1)); 1517 __ movb(Address(to, byte_count, Address::times_1, -1), rax); 1518 __ decrement(byte_count); // Adjust for possible trailing word 1519 1520 // Check for and copy trailing word 1521 __ BIND(L_copy_2_bytes); 1522 __ testl(byte_count, 2); 1523 __ jcc(Assembler::zero, L_copy_4_bytes); 1524 __ movw(rax, Address(from, byte_count, Address::times_1, -2)); 1525 __ movw(Address(to, byte_count, Address::times_1, -2), rax); 1526 1527 // Check for and copy trailing dword 1528 __ BIND(L_copy_4_bytes); 1529 __ testl(byte_count, 4); 1530 __ jcc(Assembler::zero, L_copy_bytes); 1531 __ movl(rax, Address(from, qword_count, Address::times_8)); 1532 __ movl(Address(to, qword_count, Address::times_8), rax); 1533 __ jmp(L_copy_bytes); 1534 1535 // Copy trailing qwords 1536 __ BIND(L_copy_8_bytes); 1537 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 1538 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 1539 __ decrement(qword_count); 1540 __ jcc(Assembler::notZero, L_copy_8_bytes); 1541 1542 restore_arg_regs(); 1543 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1544 __ xorptr(rax, rax); // return 0 1545 __ leave(); // required for proper stackwalking of RuntimeStub frame 1546 __ ret(0); 1547 1548 // Copy in multi-bytes chunks 1549 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1550 1551 restore_arg_regs(); 1552 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1553 __ xorptr(rax, rax); // return 0 1554 __ leave(); // required for proper stackwalking of RuntimeStub frame 1555 __ ret(0); 1556 1557 return start; 1558 } 1559 1560 // Arguments: 1561 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1562 // ignored 1563 // name - stub name string 1564 // 1565 // Inputs: 1566 // c_rarg0 - source array address 1567 // c_rarg1 - destination array address 1568 // c_rarg2 - element count, treated as ssize_t, can be zero 1569 // 1570 // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we 1571 // let the hardware handle it. The two or four words within dwords 1572 // or qwords that span cache line boundaries will still be loaded 1573 // and stored atomically. 1574 // 1575 // Side Effects: 1576 // disjoint_short_copy_entry is set to the no-overlap entry point 1577 // used by generate_conjoint_short_copy(). 1578 // 1579 address generate_disjoint_short_copy(bool aligned, address *entry, const char *name) { 1580 __ align(CodeEntryAlignment); 1581 StubCodeMark mark(this, "StubRoutines", name); 1582 address start = __ pc(); 1583 1584 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes,L_copy_2_bytes,L_exit; 1585 const Register from = rdi; // source array address 1586 const Register to = rsi; // destination array address 1587 const Register count = rdx; // elements count 1588 const Register word_count = rcx; 1589 const Register qword_count = count; 1590 const Register end_from = from; // source array end address 1591 const Register end_to = to; // destination array end address 1592 // End pointers are inclusive, and if count is not zero they point 1593 // to the last unit copied: end_to[0] := end_from[0] 1594 1595 __ enter(); // required for proper stackwalking of RuntimeStub frame 1596 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1597 1598 if (entry != NULL) { 1599 *entry = __ pc(); 1600 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1601 BLOCK_COMMENT("Entry:"); 1602 } 1603 1604 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1605 // r9 and r10 may be used to save non-volatile registers 1606 1607 // 'from', 'to' and 'count' are now valid 1608 __ movptr(word_count, count); 1609 __ shrptr(count, 2); // count => qword_count 1610 1611 // Copy from low to high addresses. Use 'to' as scratch. 1612 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1613 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1614 __ negptr(qword_count); 1615 __ jmp(L_copy_bytes); 1616 1617 // Copy trailing qwords 1618 __ BIND(L_copy_8_bytes); 1619 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1620 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1621 __ increment(qword_count); 1622 __ jcc(Assembler::notZero, L_copy_8_bytes); 1623 1624 // Original 'dest' is trashed, so we can't use it as a 1625 // base register for a possible trailing word copy 1626 1627 // Check for and copy trailing dword 1628 __ BIND(L_copy_4_bytes); 1629 __ testl(word_count, 2); 1630 __ jccb(Assembler::zero, L_copy_2_bytes); 1631 __ movl(rax, Address(end_from, 8)); 1632 __ movl(Address(end_to, 8), rax); 1633 1634 __ addptr(end_from, 4); 1635 __ addptr(end_to, 4); 1636 1637 // Check for and copy trailing word 1638 __ BIND(L_copy_2_bytes); 1639 __ testl(word_count, 1); 1640 __ jccb(Assembler::zero, L_exit); 1641 __ movw(rax, Address(end_from, 8)); 1642 __ movw(Address(end_to, 8), rax); 1643 1644 __ BIND(L_exit); 1645 restore_arg_regs(); 1646 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1647 __ xorptr(rax, rax); // return 0 1648 __ leave(); // required for proper stackwalking of RuntimeStub frame 1649 __ ret(0); 1650 1651 // Copy in multi-bytes chunks 1652 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1653 __ jmp(L_copy_4_bytes); 1654 1655 return start; 1656 } 1657 1658 address generate_fill(BasicType t, bool aligned, const char *name) { 1659 __ align(CodeEntryAlignment); 1660 StubCodeMark mark(this, "StubRoutines", name); 1661 address start = __ pc(); 1662 1663 BLOCK_COMMENT("Entry:"); 1664 1665 const Register to = c_rarg0; // source array address 1666 const Register value = c_rarg1; // value 1667 const Register count = c_rarg2; // elements count 1668 1669 __ enter(); // required for proper stackwalking of RuntimeStub frame 1670 1671 __ generate_fill(t, aligned, to, value, count, rax, xmm0); 1672 1673 __ leave(); // required for proper stackwalking of RuntimeStub frame 1674 __ ret(0); 1675 return start; 1676 } 1677 1678 // Arguments: 1679 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1680 // ignored 1681 // name - stub name string 1682 // 1683 // Inputs: 1684 // c_rarg0 - source array address 1685 // c_rarg1 - destination array address 1686 // c_rarg2 - element count, treated as ssize_t, can be zero 1687 // 1688 // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we 1689 // let the hardware handle it. The two or four words within dwords 1690 // or qwords that span cache line boundaries will still be loaded 1691 // and stored atomically. 1692 // 1693 address generate_conjoint_short_copy(bool aligned, address nooverlap_target, 1694 address *entry, const char *name) { 1695 __ align(CodeEntryAlignment); 1696 StubCodeMark mark(this, "StubRoutines", name); 1697 address start = __ pc(); 1698 1699 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes; 1700 const Register from = rdi; // source array address 1701 const Register to = rsi; // destination array address 1702 const Register count = rdx; // elements count 1703 const Register word_count = rcx; 1704 const Register qword_count = count; 1705 1706 __ enter(); // required for proper stackwalking of RuntimeStub frame 1707 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1708 1709 if (entry != NULL) { 1710 *entry = __ pc(); 1711 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1712 BLOCK_COMMENT("Entry:"); 1713 } 1714 1715 array_overlap_test(nooverlap_target, Address::times_2); 1716 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1717 // r9 and r10 may be used to save non-volatile registers 1718 1719 // 'from', 'to' and 'count' are now valid 1720 __ movptr(word_count, count); 1721 __ shrptr(count, 2); // count => qword_count 1722 1723 // Copy from high to low addresses. Use 'to' as scratch. 1724 1725 // Check for and copy trailing word 1726 __ testl(word_count, 1); 1727 __ jccb(Assembler::zero, L_copy_4_bytes); 1728 __ movw(rax, Address(from, word_count, Address::times_2, -2)); 1729 __ movw(Address(to, word_count, Address::times_2, -2), rax); 1730 1731 // Check for and copy trailing dword 1732 __ BIND(L_copy_4_bytes); 1733 __ testl(word_count, 2); 1734 __ jcc(Assembler::zero, L_copy_bytes); 1735 __ movl(rax, Address(from, qword_count, Address::times_8)); 1736 __ movl(Address(to, qword_count, Address::times_8), rax); 1737 __ jmp(L_copy_bytes); 1738 1739 // Copy trailing qwords 1740 __ BIND(L_copy_8_bytes); 1741 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 1742 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 1743 __ decrement(qword_count); 1744 __ jcc(Assembler::notZero, L_copy_8_bytes); 1745 1746 restore_arg_regs(); 1747 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1748 __ xorptr(rax, rax); // return 0 1749 __ leave(); // required for proper stackwalking of RuntimeStub frame 1750 __ ret(0); 1751 1752 // Copy in multi-bytes chunks 1753 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1754 1755 restore_arg_regs(); 1756 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1757 __ xorptr(rax, rax); // return 0 1758 __ leave(); // required for proper stackwalking of RuntimeStub frame 1759 __ ret(0); 1760 1761 return start; 1762 } 1763 1764 // Arguments: 1765 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1766 // ignored 1767 // is_oop - true => oop array, so generate store check code 1768 // name - stub name string 1769 // 1770 // Inputs: 1771 // c_rarg0 - source array address 1772 // c_rarg1 - destination array address 1773 // c_rarg2 - element count, treated as ssize_t, can be zero 1774 // 1775 // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let 1776 // the hardware handle it. The two dwords within qwords that span 1777 // cache line boundaries will still be loaded and stored atomicly. 1778 // 1779 // Side Effects: 1780 // disjoint_int_copy_entry is set to the no-overlap entry point 1781 // used by generate_conjoint_int_oop_copy(). 1782 // 1783 address generate_disjoint_int_oop_copy(bool aligned, bool is_oop, address* entry, 1784 const char *name, bool dest_uninitialized = false) { 1785 __ align(CodeEntryAlignment); 1786 StubCodeMark mark(this, "StubRoutines", name); 1787 address start = __ pc(); 1788 1789 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_exit; 1790 const Register from = rdi; // source array address 1791 const Register to = rsi; // destination array address 1792 const Register count = rdx; // elements count 1793 const Register dword_count = rcx; 1794 const Register qword_count = count; 1795 const Register end_from = from; // source array end address 1796 const Register end_to = to; // destination array end address 1797 // End pointers are inclusive, and if count is not zero they point 1798 // to the last unit copied: end_to[0] := end_from[0] 1799 1800 __ enter(); // required for proper stackwalking of RuntimeStub frame 1801 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1802 1803 if (entry != NULL) { 1804 *entry = __ pc(); 1805 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1806 BLOCK_COMMENT("Entry:"); 1807 } 1808 1809 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1810 // r9 and r10 may be used to save non-volatile registers 1811 1812 BarrierSetCodeGen *bs = Universe::heap()->barrier_set()->code_gen(); 1813 DecoratorSet decorators = DEST_COVARIANT | DEST_DISJOINT; 1814 BasicType type = is_oop ? T_OBJECT : T_INT; 1815 if (dest_uninitialized) { 1816 decorators |= DEST_NOT_INITIALIZED; 1817 } 1818 if (aligned) { 1819 decorators |= ACCESS_ALIGNED; 1820 } 1821 1822 bs->arraycopy_prologue(_masm, decorators, type, from, to, count); 1823 1824 // 'from', 'to' and 'count' are now valid 1825 __ movptr(dword_count, count); 1826 __ shrptr(count, 1); // count => qword_count 1827 1828 // Copy from low to high addresses. Use 'to' as scratch. 1829 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1830 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1831 __ negptr(qword_count); 1832 __ jmp(L_copy_bytes); 1833 1834 // Copy trailing qwords 1835 __ BIND(L_copy_8_bytes); 1836 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1837 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1838 __ increment(qword_count); 1839 __ jcc(Assembler::notZero, L_copy_8_bytes); 1840 1841 // Check for and copy trailing dword 1842 __ BIND(L_copy_4_bytes); 1843 __ testl(dword_count, 1); // Only byte test since the value is 0 or 1 1844 __ jccb(Assembler::zero, L_exit); 1845 __ movl(rax, Address(end_from, 8)); 1846 __ movl(Address(end_to, 8), rax); 1847 1848 __ BIND(L_exit); 1849 bs->arraycopy_epilogue(_masm, decorators, type, from, to, dword_count); 1850 restore_arg_regs(); 1851 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 1852 __ xorptr(rax, rax); // return 0 1853 __ leave(); // required for proper stackwalking of RuntimeStub frame 1854 __ ret(0); 1855 1856 // Copy in multi-bytes chunks 1857 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1858 __ jmp(L_copy_4_bytes); 1859 1860 return start; 1861 } 1862 1863 // Arguments: 1864 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1865 // ignored 1866 // is_oop - true => oop array, so generate store check code 1867 // name - stub name string 1868 // 1869 // Inputs: 1870 // c_rarg0 - source array address 1871 // c_rarg1 - destination array address 1872 // c_rarg2 - element count, treated as ssize_t, can be zero 1873 // 1874 // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let 1875 // the hardware handle it. The two dwords within qwords that span 1876 // cache line boundaries will still be loaded and stored atomicly. 1877 // 1878 address generate_conjoint_int_oop_copy(bool aligned, bool is_oop, address nooverlap_target, 1879 address *entry, const char *name, 1880 bool dest_uninitialized = false) { 1881 __ align(CodeEntryAlignment); 1882 StubCodeMark mark(this, "StubRoutines", name); 1883 address start = __ pc(); 1884 1885 Label L_copy_bytes, L_copy_8_bytes, L_copy_2_bytes, L_exit; 1886 const Register from = rdi; // source array address 1887 const Register to = rsi; // destination array address 1888 const Register count = rdx; // elements count 1889 const Register dword_count = rcx; 1890 const Register qword_count = count; 1891 1892 __ enter(); // required for proper stackwalking of RuntimeStub frame 1893 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1894 1895 if (entry != NULL) { 1896 *entry = __ pc(); 1897 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1898 BLOCK_COMMENT("Entry:"); 1899 } 1900 1901 array_overlap_test(nooverlap_target, Address::times_4); 1902 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1903 // r9 and r10 may be used to save non-volatile registers 1904 1905 BarrierSetCodeGen *bs = Universe::heap()->barrier_set()->code_gen(); 1906 DecoratorSet decorators = DEST_COVARIANT | DEST_CONJOINT; 1907 BasicType type = is_oop ? T_OBJECT : T_INT; 1908 if (dest_uninitialized) { 1909 decorators |= DEST_NOT_INITIALIZED; 1910 } 1911 if (aligned) { 1912 decorators |= ACCESS_ALIGNED; 1913 } 1914 1915 // no registers are destroyed by this call 1916 bs->arraycopy_prologue(_masm, decorators, type, from, to, count); 1917 1918 assert_clean_int(count, rax); // Make sure 'count' is clean int. 1919 // 'from', 'to' and 'count' are now valid 1920 __ movptr(dword_count, count); 1921 __ shrptr(count, 1); // count => qword_count 1922 1923 // Copy from high to low addresses. Use 'to' as scratch. 1924 1925 // Check for and copy trailing dword 1926 __ testl(dword_count, 1); 1927 __ jcc(Assembler::zero, L_copy_bytes); 1928 __ movl(rax, Address(from, dword_count, Address::times_4, -4)); 1929 __ movl(Address(to, dword_count, Address::times_4, -4), rax); 1930 __ jmp(L_copy_bytes); 1931 1932 // Copy trailing qwords 1933 __ BIND(L_copy_8_bytes); 1934 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 1935 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 1936 __ decrement(qword_count); 1937 __ jcc(Assembler::notZero, L_copy_8_bytes); 1938 1939 if (is_oop) { 1940 __ jmp(L_exit); 1941 } 1942 restore_arg_regs(); 1943 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 1944 __ xorptr(rax, rax); // return 0 1945 __ leave(); // required for proper stackwalking of RuntimeStub frame 1946 __ ret(0); 1947 1948 // Copy in multi-bytes chunks 1949 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1950 1951 __ BIND(L_exit); 1952 bs->arraycopy_epilogue(_masm, decorators, type, from, to, dword_count); 1953 restore_arg_regs(); 1954 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 1955 __ xorptr(rax, rax); // return 0 1956 __ leave(); // required for proper stackwalking of RuntimeStub frame 1957 __ ret(0); 1958 1959 return start; 1960 } 1961 1962 // Arguments: 1963 // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes 1964 // ignored 1965 // is_oop - true => oop array, so generate store check code 1966 // name - stub name string 1967 // 1968 // Inputs: 1969 // c_rarg0 - source array address 1970 // c_rarg1 - destination array address 1971 // c_rarg2 - element count, treated as ssize_t, can be zero 1972 // 1973 // Side Effects: 1974 // disjoint_oop_copy_entry or disjoint_long_copy_entry is set to the 1975 // no-overlap entry point used by generate_conjoint_long_oop_copy(). 1976 // 1977 address generate_disjoint_long_oop_copy(bool aligned, bool is_oop, address *entry, 1978 const char *name, bool dest_uninitialized = false) { 1979 __ align(CodeEntryAlignment); 1980 StubCodeMark mark(this, "StubRoutines", name); 1981 address start = __ pc(); 1982 1983 Label L_copy_bytes, L_copy_8_bytes, L_exit; 1984 const Register from = rdi; // source array address 1985 const Register to = rsi; // destination array address 1986 const Register qword_count = rdx; // elements count 1987 const Register end_from = from; // source array end address 1988 const Register end_to = rcx; // destination array end address 1989 const Register saved_count = r11; 1990 // End pointers are inclusive, and if count is not zero they point 1991 // to the last unit copied: end_to[0] := end_from[0] 1992 1993 __ enter(); // required for proper stackwalking of RuntimeStub frame 1994 // Save no-overlap entry point for generate_conjoint_long_oop_copy() 1995 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1996 1997 if (entry != NULL) { 1998 *entry = __ pc(); 1999 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2000 BLOCK_COMMENT("Entry:"); 2001 } 2002 2003 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 2004 // r9 and r10 may be used to save non-volatile registers 2005 // 'from', 'to' and 'qword_count' are now valid 2006 2007 BarrierSetCodeGen *bs = Universe::heap()->barrier_set()->code_gen(); 2008 DecoratorSet decorators = DEST_COVARIANT | DEST_DISJOINT; 2009 BasicType type = is_oop ? T_OBJECT : T_LONG; 2010 if (dest_uninitialized) { 2011 decorators |= DEST_NOT_INITIALIZED; 2012 } 2013 if (aligned) { 2014 decorators |= ACCESS_ALIGNED; 2015 } 2016 2017 bs->arraycopy_prologue(_masm, decorators, type, from, to, qword_count); 2018 2019 // Copy from low to high addresses. Use 'to' as scratch. 2020 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 2021 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 2022 __ negptr(qword_count); 2023 __ jmp(L_copy_bytes); 2024 2025 // Copy trailing qwords 2026 __ BIND(L_copy_8_bytes); 2027 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 2028 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 2029 __ increment(qword_count); 2030 __ jcc(Assembler::notZero, L_copy_8_bytes); 2031 2032 if (is_oop) { 2033 __ jmp(L_exit); 2034 } else { 2035 restore_arg_regs(); 2036 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2037 __ xorptr(rax, rax); // return 0 2038 __ leave(); // required for proper stackwalking of RuntimeStub frame 2039 __ ret(0); 2040 } 2041 2042 // Copy in multi-bytes chunks 2043 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2044 2045 __ BIND(L_exit); 2046 bs->arraycopy_epilogue(_masm, decorators, type, from, to, qword_count); 2047 restore_arg_regs(); 2048 if (is_oop) { 2049 inc_counter_np(SharedRuntime::_oop_array_copy_ctr); // Update counter after rscratch1 is free 2050 } else { 2051 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2052 } 2053 __ xorptr(rax, rax); // return 0 2054 __ leave(); // required for proper stackwalking of RuntimeStub frame 2055 __ ret(0); 2056 2057 return start; 2058 } 2059 2060 // Arguments: 2061 // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes 2062 // ignored 2063 // is_oop - true => oop array, so generate store check code 2064 // name - stub name string 2065 // 2066 // Inputs: 2067 // c_rarg0 - source array address 2068 // c_rarg1 - destination array address 2069 // c_rarg2 - element count, treated as ssize_t, can be zero 2070 // 2071 address generate_conjoint_long_oop_copy(bool aligned, bool is_oop, 2072 address nooverlap_target, address *entry, 2073 const char *name, bool dest_uninitialized = false) { 2074 __ align(CodeEntryAlignment); 2075 StubCodeMark mark(this, "StubRoutines", name); 2076 address start = __ pc(); 2077 2078 Label L_copy_bytes, L_copy_8_bytes, L_exit; 2079 const Register from = rdi; // source array address 2080 const Register to = rsi; // destination array address 2081 const Register qword_count = rdx; // elements count 2082 const Register saved_count = rcx; 2083 2084 __ enter(); // required for proper stackwalking of RuntimeStub frame 2085 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 2086 2087 if (entry != NULL) { 2088 *entry = __ pc(); 2089 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2090 BLOCK_COMMENT("Entry:"); 2091 } 2092 2093 array_overlap_test(nooverlap_target, Address::times_8); 2094 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 2095 // r9 and r10 may be used to save non-volatile registers 2096 // 'from', 'to' and 'qword_count' are now valid 2097 2098 BarrierSetCodeGen *bs = Universe::heap()->barrier_set()->code_gen(); 2099 DecoratorSet decorators = DEST_COVARIANT | DEST_DISJOINT; 2100 BasicType type = is_oop ? T_OBJECT : T_LONG; 2101 if (dest_uninitialized) { 2102 decorators |= DEST_NOT_INITIALIZED; 2103 } 2104 if (aligned) { 2105 decorators |= ACCESS_ALIGNED; 2106 } 2107 2108 bs->arraycopy_prologue(_masm, decorators, type, from, to, qword_count); 2109 2110 __ jmp(L_copy_bytes); 2111 2112 // Copy trailing qwords 2113 __ BIND(L_copy_8_bytes); 2114 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 2115 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 2116 __ decrement(qword_count); 2117 __ jcc(Assembler::notZero, L_copy_8_bytes); 2118 2119 if (is_oop) { 2120 __ jmp(L_exit); 2121 } else { 2122 restore_arg_regs(); 2123 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2124 __ xorptr(rax, rax); // return 0 2125 __ leave(); // required for proper stackwalking of RuntimeStub frame 2126 __ ret(0); 2127 } 2128 2129 // Copy in multi-bytes chunks 2130 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2131 2132 __ BIND(L_exit); 2133 bs->arraycopy_epilogue(_masm, decorators, type, from, to, qword_count); 2134 restore_arg_regs(); 2135 if (is_oop) { 2136 inc_counter_np(SharedRuntime::_oop_array_copy_ctr); // Update counter after rscratch1 is free 2137 } else { 2138 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2139 } 2140 __ xorptr(rax, rax); // return 0 2141 __ leave(); // required for proper stackwalking of RuntimeStub frame 2142 __ ret(0); 2143 2144 return start; 2145 } 2146 2147 2148 // Helper for generating a dynamic type check. 2149 // Smashes no registers. 2150 void generate_type_check(Register sub_klass, 2151 Register super_check_offset, 2152 Register super_klass, 2153 Label& L_success) { 2154 assert_different_registers(sub_klass, super_check_offset, super_klass); 2155 2156 BLOCK_COMMENT("type_check:"); 2157 2158 Label L_miss; 2159 2160 __ check_klass_subtype_fast_path(sub_klass, super_klass, noreg, &L_success, &L_miss, NULL, 2161 super_check_offset); 2162 __ check_klass_subtype_slow_path(sub_klass, super_klass, noreg, noreg, &L_success, NULL); 2163 2164 // Fall through on failure! 2165 __ BIND(L_miss); 2166 } 2167 2168 // 2169 // Generate checkcasting array copy stub 2170 // 2171 // Input: 2172 // c_rarg0 - source array address 2173 // c_rarg1 - destination array address 2174 // c_rarg2 - element count, treated as ssize_t, can be zero 2175 // c_rarg3 - size_t ckoff (super_check_offset) 2176 // not Win64 2177 // c_rarg4 - oop ckval (super_klass) 2178 // Win64 2179 // rsp+40 - oop ckval (super_klass) 2180 // 2181 // Output: 2182 // rax == 0 - success 2183 // rax == -1^K - failure, where K is partial transfer count 2184 // 2185 address generate_checkcast_copy(const char *name, address *entry, 2186 bool dest_uninitialized = false) { 2187 2188 Label L_load_element, L_store_element, L_do_card_marks, L_done; 2189 2190 // Input registers (after setup_arg_regs) 2191 const Register from = rdi; // source array address 2192 const Register to = rsi; // destination array address 2193 const Register length = rdx; // elements count 2194 const Register ckoff = rcx; // super_check_offset 2195 const Register ckval = r8; // super_klass 2196 2197 // Registers used as temps (r13, r14 are save-on-entry) 2198 const Register end_from = from; // source array end address 2199 const Register end_to = r13; // destination array end address 2200 const Register count = rdx; // -(count_remaining) 2201 const Register r14_length = r14; // saved copy of length 2202 // End pointers are inclusive, and if length is not zero they point 2203 // to the last unit copied: end_to[0] := end_from[0] 2204 2205 const Register rax_oop = rax; // actual oop copied 2206 const Register r11_klass = r11; // oop._klass 2207 2208 //--------------------------------------------------------------- 2209 // Assembler stub will be used for this call to arraycopy 2210 // if the two arrays are subtypes of Object[] but the 2211 // destination array type is not equal to or a supertype 2212 // of the source type. Each element must be separately 2213 // checked. 2214 2215 __ align(CodeEntryAlignment); 2216 StubCodeMark mark(this, "StubRoutines", name); 2217 address start = __ pc(); 2218 2219 __ enter(); // required for proper stackwalking of RuntimeStub frame 2220 2221 #ifdef ASSERT 2222 // caller guarantees that the arrays really are different 2223 // otherwise, we would have to make conjoint checks 2224 { Label L; 2225 array_overlap_test(L, TIMES_OOP); 2226 __ stop("checkcast_copy within a single array"); 2227 __ bind(L); 2228 } 2229 #endif //ASSERT 2230 2231 setup_arg_regs(4); // from => rdi, to => rsi, length => rdx 2232 // ckoff => rcx, ckval => r8 2233 // r9 and r10 may be used to save non-volatile registers 2234 #ifdef _WIN64 2235 // last argument (#4) is on stack on Win64 2236 __ movptr(ckval, Address(rsp, 6 * wordSize)); 2237 #endif 2238 2239 // Caller of this entry point must set up the argument registers. 2240 if (entry != NULL) { 2241 *entry = __ pc(); 2242 BLOCK_COMMENT("Entry:"); 2243 } 2244 2245 // allocate spill slots for r13, r14 2246 enum { 2247 saved_r13_offset, 2248 saved_r14_offset, 2249 saved_rbp_offset 2250 }; 2251 __ subptr(rsp, saved_rbp_offset * wordSize); 2252 __ movptr(Address(rsp, saved_r13_offset * wordSize), r13); 2253 __ movptr(Address(rsp, saved_r14_offset * wordSize), r14); 2254 2255 // check that int operands are properly extended to size_t 2256 assert_clean_int(length, rax); 2257 assert_clean_int(ckoff, rax); 2258 2259 #ifdef ASSERT 2260 BLOCK_COMMENT("assert consistent ckoff/ckval"); 2261 // The ckoff and ckval must be mutually consistent, 2262 // even though caller generates both. 2263 { Label L; 2264 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2265 __ cmpl(ckoff, Address(ckval, sco_offset)); 2266 __ jcc(Assembler::equal, L); 2267 __ stop("super_check_offset inconsistent"); 2268 __ bind(L); 2269 } 2270 #endif //ASSERT 2271 2272 // Loop-invariant addresses. They are exclusive end pointers. 2273 Address end_from_addr(from, length, TIMES_OOP, 0); 2274 Address end_to_addr(to, length, TIMES_OOP, 0); 2275 // Loop-variant addresses. They assume post-incremented count < 0. 2276 Address from_element_addr(end_from, count, TIMES_OOP, 0); 2277 Address to_element_addr(end_to, count, TIMES_OOP, 0); 2278 2279 BarrierSetCodeGen *bs = Universe::heap()->barrier_set()->code_gen(); 2280 DecoratorSet decorators = DEST_CONTRAVARIANT | DEST_DISJOINT; 2281 BasicType type = T_OBJECT; 2282 if (dest_uninitialized) { 2283 decorators |= DEST_NOT_INITIALIZED; 2284 } 2285 2286 bs->arraycopy_prologue(_masm, decorators, type, from, to, count); 2287 2288 // Copy from low to high addresses, indexed from the end of each array. 2289 __ lea(end_from, end_from_addr); 2290 __ lea(end_to, end_to_addr); 2291 __ movptr(r14_length, length); // save a copy of the length 2292 assert(length == count, ""); // else fix next line: 2293 __ negptr(count); // negate and test the length 2294 __ jcc(Assembler::notZero, L_load_element); 2295 2296 // Empty array: Nothing to do. 2297 __ xorptr(rax, rax); // return 0 on (trivial) success 2298 __ jmp(L_done); 2299 2300 // ======== begin loop ======== 2301 // (Loop is rotated; its entry is L_load_element.) 2302 // Loop control: 2303 // for (count = -count; count != 0; count++) 2304 // Base pointers src, dst are biased by 8*(count-1),to last element. 2305 __ align(OptoLoopAlignment); 2306 2307 __ BIND(L_store_element); 2308 __ store_heap_oop(to_element_addr, rax_oop); // store the oop 2309 __ increment(count); // increment the count toward zero 2310 __ jcc(Assembler::zero, L_do_card_marks); 2311 2312 // ======== loop entry is here ======== 2313 __ BIND(L_load_element); 2314 __ load_heap_oop(rax_oop, from_element_addr); // load the oop 2315 __ testptr(rax_oop, rax_oop); 2316 __ jcc(Assembler::zero, L_store_element); 2317 2318 __ load_klass(r11_klass, rax_oop);// query the object klass 2319 generate_type_check(r11_klass, ckoff, ckval, L_store_element); 2320 // ======== end loop ======== 2321 2322 // It was a real error; we must depend on the caller to finish the job. 2323 // Register rdx = -1 * number of *remaining* oops, r14 = *total* oops. 2324 // Emit GC store barriers for the oops we have copied (r14 + rdx), 2325 // and report their number to the caller. 2326 assert_different_registers(rax, r14_length, count, to, end_to, rcx, rscratch1); 2327 Label L_post_barrier; 2328 __ addptr(r14_length, count); // K = (original - remaining) oops 2329 __ movptr(rax, r14_length); // save the value 2330 __ notptr(rax); // report (-1^K) to caller (does not affect flags) 2331 __ jccb(Assembler::notZero, L_post_barrier); 2332 __ jmp(L_done); // K == 0, nothing was copied, skip post barrier 2333 2334 // Come here on success only. 2335 __ BIND(L_do_card_marks); 2336 __ xorptr(rax, rax); // return 0 on success 2337 2338 __ BIND(L_post_barrier); 2339 bs->arraycopy_epilogue(_masm, decorators, type, from, to, r14_length); 2340 2341 // Common exit point (success or failure). 2342 __ BIND(L_done); 2343 __ movptr(r13, Address(rsp, saved_r13_offset * wordSize)); 2344 __ movptr(r14, Address(rsp, saved_r14_offset * wordSize)); 2345 restore_arg_regs(); 2346 inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr); // Update counter after rscratch1 is free 2347 __ leave(); // required for proper stackwalking of RuntimeStub frame 2348 __ ret(0); 2349 2350 return start; 2351 } 2352 2353 // 2354 // Generate 'unsafe' array copy stub 2355 // Though just as safe as the other stubs, it takes an unscaled 2356 // size_t argument instead of an element count. 2357 // 2358 // Input: 2359 // c_rarg0 - source array address 2360 // c_rarg1 - destination array address 2361 // c_rarg2 - byte count, treated as ssize_t, can be zero 2362 // 2363 // Examines the alignment of the operands and dispatches 2364 // to a long, int, short, or byte copy loop. 2365 // 2366 address generate_unsafe_copy(const char *name, 2367 address byte_copy_entry, address short_copy_entry, 2368 address int_copy_entry, address long_copy_entry) { 2369 2370 Label L_long_aligned, L_int_aligned, L_short_aligned; 2371 2372 // Input registers (before setup_arg_regs) 2373 const Register from = c_rarg0; // source array address 2374 const Register to = c_rarg1; // destination array address 2375 const Register size = c_rarg2; // byte count (size_t) 2376 2377 // Register used as a temp 2378 const Register bits = rax; // test copy of low bits 2379 2380 __ align(CodeEntryAlignment); 2381 StubCodeMark mark(this, "StubRoutines", name); 2382 address start = __ pc(); 2383 2384 __ enter(); // required for proper stackwalking of RuntimeStub frame 2385 2386 // bump this on entry, not on exit: 2387 inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr); 2388 2389 __ mov(bits, from); 2390 __ orptr(bits, to); 2391 __ orptr(bits, size); 2392 2393 __ testb(bits, BytesPerLong-1); 2394 __ jccb(Assembler::zero, L_long_aligned); 2395 2396 __ testb(bits, BytesPerInt-1); 2397 __ jccb(Assembler::zero, L_int_aligned); 2398 2399 __ testb(bits, BytesPerShort-1); 2400 __ jump_cc(Assembler::notZero, RuntimeAddress(byte_copy_entry)); 2401 2402 __ BIND(L_short_aligned); 2403 __ shrptr(size, LogBytesPerShort); // size => short_count 2404 __ jump(RuntimeAddress(short_copy_entry)); 2405 2406 __ BIND(L_int_aligned); 2407 __ shrptr(size, LogBytesPerInt); // size => int_count 2408 __ jump(RuntimeAddress(int_copy_entry)); 2409 2410 __ BIND(L_long_aligned); 2411 __ shrptr(size, LogBytesPerLong); // size => qword_count 2412 __ jump(RuntimeAddress(long_copy_entry)); 2413 2414 return start; 2415 } 2416 2417 // Perform range checks on the proposed arraycopy. 2418 // Kills temp, but nothing else. 2419 // Also, clean the sign bits of src_pos and dst_pos. 2420 void arraycopy_range_checks(Register src, // source array oop (c_rarg0) 2421 Register src_pos, // source position (c_rarg1) 2422 Register dst, // destination array oo (c_rarg2) 2423 Register dst_pos, // destination position (c_rarg3) 2424 Register length, 2425 Register temp, 2426 Label& L_failed) { 2427 BLOCK_COMMENT("arraycopy_range_checks:"); 2428 2429 // if (src_pos + length > arrayOop(src)->length()) FAIL; 2430 __ movl(temp, length); 2431 __ addl(temp, src_pos); // src_pos + length 2432 __ cmpl(temp, Address(src, arrayOopDesc::length_offset_in_bytes())); 2433 __ jcc(Assembler::above, L_failed); 2434 2435 // if (dst_pos + length > arrayOop(dst)->length()) FAIL; 2436 __ movl(temp, length); 2437 __ addl(temp, dst_pos); // dst_pos + length 2438 __ cmpl(temp, Address(dst, arrayOopDesc::length_offset_in_bytes())); 2439 __ jcc(Assembler::above, L_failed); 2440 2441 // Have to clean up high 32-bits of 'src_pos' and 'dst_pos'. 2442 // Move with sign extension can be used since they are positive. 2443 __ movslq(src_pos, src_pos); 2444 __ movslq(dst_pos, dst_pos); 2445 2446 BLOCK_COMMENT("arraycopy_range_checks done"); 2447 } 2448 2449 // 2450 // Generate generic array copy stubs 2451 // 2452 // Input: 2453 // c_rarg0 - src oop 2454 // c_rarg1 - src_pos (32-bits) 2455 // c_rarg2 - dst oop 2456 // c_rarg3 - dst_pos (32-bits) 2457 // not Win64 2458 // c_rarg4 - element count (32-bits) 2459 // Win64 2460 // rsp+40 - element count (32-bits) 2461 // 2462 // Output: 2463 // rax == 0 - success 2464 // rax == -1^K - failure, where K is partial transfer count 2465 // 2466 address generate_generic_copy(const char *name, 2467 address byte_copy_entry, address short_copy_entry, 2468 address int_copy_entry, address oop_copy_entry, 2469 address long_copy_entry, address checkcast_copy_entry) { 2470 2471 Label L_failed, L_failed_0, L_objArray; 2472 Label L_copy_bytes, L_copy_shorts, L_copy_ints, L_copy_longs; 2473 2474 // Input registers 2475 const Register src = c_rarg0; // source array oop 2476 const Register src_pos = c_rarg1; // source position 2477 const Register dst = c_rarg2; // destination array oop 2478 const Register dst_pos = c_rarg3; // destination position 2479 #ifndef _WIN64 2480 const Register length = c_rarg4; 2481 #else 2482 const Address length(rsp, 6 * wordSize); // elements count is on stack on Win64 2483 #endif 2484 2485 { int modulus = CodeEntryAlignment; 2486 int target = modulus - 5; // 5 = sizeof jmp(L_failed) 2487 int advance = target - (__ offset() % modulus); 2488 if (advance < 0) advance += modulus; 2489 if (advance > 0) __ nop(advance); 2490 } 2491 StubCodeMark mark(this, "StubRoutines", name); 2492 2493 // Short-hop target to L_failed. Makes for denser prologue code. 2494 __ BIND(L_failed_0); 2495 __ jmp(L_failed); 2496 assert(__ offset() % CodeEntryAlignment == 0, "no further alignment needed"); 2497 2498 __ align(CodeEntryAlignment); 2499 address start = __ pc(); 2500 2501 __ enter(); // required for proper stackwalking of RuntimeStub frame 2502 2503 // bump this on entry, not on exit: 2504 inc_counter_np(SharedRuntime::_generic_array_copy_ctr); 2505 2506 //----------------------------------------------------------------------- 2507 // Assembler stub will be used for this call to arraycopy 2508 // if the following conditions are met: 2509 // 2510 // (1) src and dst must not be null. 2511 // (2) src_pos must not be negative. 2512 // (3) dst_pos must not be negative. 2513 // (4) length must not be negative. 2514 // (5) src klass and dst klass should be the same and not NULL. 2515 // (6) src and dst should be arrays. 2516 // (7) src_pos + length must not exceed length of src. 2517 // (8) dst_pos + length must not exceed length of dst. 2518 // 2519 2520 // if (src == NULL) return -1; 2521 __ testptr(src, src); // src oop 2522 size_t j1off = __ offset(); 2523 __ jccb(Assembler::zero, L_failed_0); 2524 2525 // if (src_pos < 0) return -1; 2526 __ testl(src_pos, src_pos); // src_pos (32-bits) 2527 __ jccb(Assembler::negative, L_failed_0); 2528 2529 // if (dst == NULL) return -1; 2530 __ testptr(dst, dst); // dst oop 2531 __ jccb(Assembler::zero, L_failed_0); 2532 2533 // if (dst_pos < 0) return -1; 2534 __ testl(dst_pos, dst_pos); // dst_pos (32-bits) 2535 size_t j4off = __ offset(); 2536 __ jccb(Assembler::negative, L_failed_0); 2537 2538 // The first four tests are very dense code, 2539 // but not quite dense enough to put four 2540 // jumps in a 16-byte instruction fetch buffer. 2541 // That's good, because some branch predicters 2542 // do not like jumps so close together. 2543 // Make sure of this. 2544 guarantee(((j1off ^ j4off) & ~15) != 0, "I$ line of 1st & 4th jumps"); 2545 2546 // registers used as temp 2547 const Register r11_length = r11; // elements count to copy 2548 const Register r10_src_klass = r10; // array klass 2549 2550 // if (length < 0) return -1; 2551 __ movl(r11_length, length); // length (elements count, 32-bits value) 2552 __ testl(r11_length, r11_length); 2553 __ jccb(Assembler::negative, L_failed_0); 2554 2555 __ load_klass(r10_src_klass, src); 2556 #ifdef ASSERT 2557 // assert(src->klass() != NULL); 2558 { 2559 BLOCK_COMMENT("assert klasses not null {"); 2560 Label L1, L2; 2561 __ testptr(r10_src_klass, r10_src_klass); 2562 __ jcc(Assembler::notZero, L2); // it is broken if klass is NULL 2563 __ bind(L1); 2564 __ stop("broken null klass"); 2565 __ bind(L2); 2566 __ load_klass(rax, dst); 2567 __ cmpq(rax, 0); 2568 __ jcc(Assembler::equal, L1); // this would be broken also 2569 BLOCK_COMMENT("} assert klasses not null done"); 2570 } 2571 #endif 2572 2573 // Load layout helper (32-bits) 2574 // 2575 // |array_tag| | header_size | element_type | |log2_element_size| 2576 // 32 30 24 16 8 2 0 2577 // 2578 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0 2579 // 2580 2581 const int lh_offset = in_bytes(Klass::layout_helper_offset()); 2582 2583 // Handle objArrays completely differently... 2584 const jint objArray_lh = Klass::array_layout_helper(T_OBJECT); 2585 __ cmpl(Address(r10_src_klass, lh_offset), objArray_lh); 2586 __ jcc(Assembler::equal, L_objArray); 2587 2588 // if (src->klass() != dst->klass()) return -1; 2589 __ load_klass(rax, dst); 2590 __ cmpq(r10_src_klass, rax); 2591 __ jcc(Assembler::notEqual, L_failed); 2592 2593 const Register rax_lh = rax; // layout helper 2594 __ movl(rax_lh, Address(r10_src_klass, lh_offset)); 2595 2596 // if (!src->is_Array()) return -1; 2597 __ cmpl(rax_lh, Klass::_lh_neutral_value); 2598 __ jcc(Assembler::greaterEqual, L_failed); 2599 2600 // At this point, it is known to be a typeArray (array_tag 0x3). 2601 #ifdef ASSERT 2602 { 2603 BLOCK_COMMENT("assert primitive array {"); 2604 Label L; 2605 __ cmpl(rax_lh, (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift)); 2606 __ jcc(Assembler::greaterEqual, L); 2607 __ stop("must be a primitive array"); 2608 __ bind(L); 2609 BLOCK_COMMENT("} assert primitive array done"); 2610 } 2611 #endif 2612 2613 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2614 r10, L_failed); 2615 2616 // TypeArrayKlass 2617 // 2618 // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize); 2619 // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize); 2620 // 2621 2622 const Register r10_offset = r10; // array offset 2623 const Register rax_elsize = rax_lh; // element size 2624 2625 __ movl(r10_offset, rax_lh); 2626 __ shrl(r10_offset, Klass::_lh_header_size_shift); 2627 __ andptr(r10_offset, Klass::_lh_header_size_mask); // array_offset 2628 __ addptr(src, r10_offset); // src array offset 2629 __ addptr(dst, r10_offset); // dst array offset 2630 BLOCK_COMMENT("choose copy loop based on element size"); 2631 __ andl(rax_lh, Klass::_lh_log2_element_size_mask); // rax_lh -> rax_elsize 2632 2633 // next registers should be set before the jump to corresponding stub 2634 const Register from = c_rarg0; // source array address 2635 const Register to = c_rarg1; // destination array address 2636 const Register count = c_rarg2; // elements count 2637 2638 // 'from', 'to', 'count' registers should be set in such order 2639 // since they are the same as 'src', 'src_pos', 'dst'. 2640 2641 __ BIND(L_copy_bytes); 2642 __ cmpl(rax_elsize, 0); 2643 __ jccb(Assembler::notEqual, L_copy_shorts); 2644 __ lea(from, Address(src, src_pos, Address::times_1, 0));// src_addr 2645 __ lea(to, Address(dst, dst_pos, Address::times_1, 0));// dst_addr 2646 __ movl2ptr(count, r11_length); // length 2647 __ jump(RuntimeAddress(byte_copy_entry)); 2648 2649 __ BIND(L_copy_shorts); 2650 __ cmpl(rax_elsize, LogBytesPerShort); 2651 __ jccb(Assembler::notEqual, L_copy_ints); 2652 __ lea(from, Address(src, src_pos, Address::times_2, 0));// src_addr 2653 __ lea(to, Address(dst, dst_pos, Address::times_2, 0));// dst_addr 2654 __ movl2ptr(count, r11_length); // length 2655 __ jump(RuntimeAddress(short_copy_entry)); 2656 2657 __ BIND(L_copy_ints); 2658 __ cmpl(rax_elsize, LogBytesPerInt); 2659 __ jccb(Assembler::notEqual, L_copy_longs); 2660 __ lea(from, Address(src, src_pos, Address::times_4, 0));// src_addr 2661 __ lea(to, Address(dst, dst_pos, Address::times_4, 0));// dst_addr 2662 __ movl2ptr(count, r11_length); // length 2663 __ jump(RuntimeAddress(int_copy_entry)); 2664 2665 __ BIND(L_copy_longs); 2666 #ifdef ASSERT 2667 { 2668 BLOCK_COMMENT("assert long copy {"); 2669 Label L; 2670 __ cmpl(rax_elsize, LogBytesPerLong); 2671 __ jcc(Assembler::equal, L); 2672 __ stop("must be long copy, but elsize is wrong"); 2673 __ bind(L); 2674 BLOCK_COMMENT("} assert long copy done"); 2675 } 2676 #endif 2677 __ lea(from, Address(src, src_pos, Address::times_8, 0));// src_addr 2678 __ lea(to, Address(dst, dst_pos, Address::times_8, 0));// dst_addr 2679 __ movl2ptr(count, r11_length); // length 2680 __ jump(RuntimeAddress(long_copy_entry)); 2681 2682 // ObjArrayKlass 2683 __ BIND(L_objArray); 2684 // live at this point: r10_src_klass, r11_length, src[_pos], dst[_pos] 2685 2686 Label L_plain_copy, L_checkcast_copy; 2687 // test array classes for subtyping 2688 __ load_klass(rax, dst); 2689 __ cmpq(r10_src_klass, rax); // usual case is exact equality 2690 __ jcc(Assembler::notEqual, L_checkcast_copy); 2691 2692 // Identically typed arrays can be copied without element-wise checks. 2693 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2694 r10, L_failed); 2695 2696 __ lea(from, Address(src, src_pos, TIMES_OOP, 2697 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // src_addr 2698 __ lea(to, Address(dst, dst_pos, TIMES_OOP, 2699 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // dst_addr 2700 __ movl2ptr(count, r11_length); // length 2701 __ BIND(L_plain_copy); 2702 __ jump(RuntimeAddress(oop_copy_entry)); 2703 2704 __ BIND(L_checkcast_copy); 2705 // live at this point: r10_src_klass, r11_length, rax (dst_klass) 2706 { 2707 // Before looking at dst.length, make sure dst is also an objArray. 2708 __ cmpl(Address(rax, lh_offset), objArray_lh); 2709 __ jcc(Assembler::notEqual, L_failed); 2710 2711 // It is safe to examine both src.length and dst.length. 2712 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2713 rax, L_failed); 2714 2715 const Register r11_dst_klass = r11; 2716 __ load_klass(r11_dst_klass, dst); // reload 2717 2718 // Marshal the base address arguments now, freeing registers. 2719 __ lea(from, Address(src, src_pos, TIMES_OOP, 2720 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 2721 __ lea(to, Address(dst, dst_pos, TIMES_OOP, 2722 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 2723 __ movl(count, length); // length (reloaded) 2724 Register sco_temp = c_rarg3; // this register is free now 2725 assert_different_registers(from, to, count, sco_temp, 2726 r11_dst_klass, r10_src_klass); 2727 assert_clean_int(count, sco_temp); 2728 2729 // Generate the type check. 2730 const int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2731 __ movl(sco_temp, Address(r11_dst_klass, sco_offset)); 2732 assert_clean_int(sco_temp, rax); 2733 generate_type_check(r10_src_klass, sco_temp, r11_dst_klass, L_plain_copy); 2734 2735 // Fetch destination element klass from the ObjArrayKlass header. 2736 int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset()); 2737 __ movptr(r11_dst_klass, Address(r11_dst_klass, ek_offset)); 2738 __ movl( sco_temp, Address(r11_dst_klass, sco_offset)); 2739 assert_clean_int(sco_temp, rax); 2740 2741 // the checkcast_copy loop needs two extra arguments: 2742 assert(c_rarg3 == sco_temp, "#3 already in place"); 2743 // Set up arguments for checkcast_copy_entry. 2744 setup_arg_regs(4); 2745 __ movptr(r8, r11_dst_klass); // dst.klass.element_klass, r8 is c_rarg4 on Linux/Solaris 2746 __ jump(RuntimeAddress(checkcast_copy_entry)); 2747 } 2748 2749 __ BIND(L_failed); 2750 __ xorptr(rax, rax); 2751 __ notptr(rax); // return -1 2752 __ leave(); // required for proper stackwalking of RuntimeStub frame 2753 __ ret(0); 2754 2755 return start; 2756 } 2757 2758 void generate_arraycopy_stubs() { 2759 address entry; 2760 address entry_jbyte_arraycopy; 2761 address entry_jshort_arraycopy; 2762 address entry_jint_arraycopy; 2763 address entry_oop_arraycopy; 2764 address entry_jlong_arraycopy; 2765 address entry_checkcast_arraycopy; 2766 2767 StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, &entry, 2768 "jbyte_disjoint_arraycopy"); 2769 StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, entry, &entry_jbyte_arraycopy, 2770 "jbyte_arraycopy"); 2771 2772 StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, &entry, 2773 "jshort_disjoint_arraycopy"); 2774 StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, entry, &entry_jshort_arraycopy, 2775 "jshort_arraycopy"); 2776 2777 StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, false, &entry, 2778 "jint_disjoint_arraycopy"); 2779 StubRoutines::_jint_arraycopy = generate_conjoint_int_oop_copy(false, false, entry, 2780 &entry_jint_arraycopy, "jint_arraycopy"); 2781 2782 StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, false, &entry, 2783 "jlong_disjoint_arraycopy"); 2784 StubRoutines::_jlong_arraycopy = generate_conjoint_long_oop_copy(false, false, entry, 2785 &entry_jlong_arraycopy, "jlong_arraycopy"); 2786 2787 2788 if (UseCompressedOops) { 2789 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, true, &entry, 2790 "oop_disjoint_arraycopy"); 2791 StubRoutines::_oop_arraycopy = generate_conjoint_int_oop_copy(false, true, entry, 2792 &entry_oop_arraycopy, "oop_arraycopy"); 2793 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_int_oop_copy(false, true, &entry, 2794 "oop_disjoint_arraycopy_uninit", 2795 /*dest_uninitialized*/true); 2796 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_int_oop_copy(false, true, entry, 2797 NULL, "oop_arraycopy_uninit", 2798 /*dest_uninitialized*/true); 2799 } else { 2800 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, true, &entry, 2801 "oop_disjoint_arraycopy"); 2802 StubRoutines::_oop_arraycopy = generate_conjoint_long_oop_copy(false, true, entry, 2803 &entry_oop_arraycopy, "oop_arraycopy"); 2804 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_long_oop_copy(false, true, &entry, 2805 "oop_disjoint_arraycopy_uninit", 2806 /*dest_uninitialized*/true); 2807 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_long_oop_copy(false, true, entry, 2808 NULL, "oop_arraycopy_uninit", 2809 /*dest_uninitialized*/true); 2810 } 2811 2812 StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy); 2813 StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", NULL, 2814 /*dest_uninitialized*/true); 2815 2816 StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy", 2817 entry_jbyte_arraycopy, 2818 entry_jshort_arraycopy, 2819 entry_jint_arraycopy, 2820 entry_jlong_arraycopy); 2821 StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy", 2822 entry_jbyte_arraycopy, 2823 entry_jshort_arraycopy, 2824 entry_jint_arraycopy, 2825 entry_oop_arraycopy, 2826 entry_jlong_arraycopy, 2827 entry_checkcast_arraycopy); 2828 2829 StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill"); 2830 StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill"); 2831 StubRoutines::_jint_fill = generate_fill(T_INT, false, "jint_fill"); 2832 StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill"); 2833 StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill"); 2834 StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill"); 2835 2836 // We don't generate specialized code for HeapWord-aligned source 2837 // arrays, so just use the code we've already generated 2838 StubRoutines::_arrayof_jbyte_disjoint_arraycopy = StubRoutines::_jbyte_disjoint_arraycopy; 2839 StubRoutines::_arrayof_jbyte_arraycopy = StubRoutines::_jbyte_arraycopy; 2840 2841 StubRoutines::_arrayof_jshort_disjoint_arraycopy = StubRoutines::_jshort_disjoint_arraycopy; 2842 StubRoutines::_arrayof_jshort_arraycopy = StubRoutines::_jshort_arraycopy; 2843 2844 StubRoutines::_arrayof_jint_disjoint_arraycopy = StubRoutines::_jint_disjoint_arraycopy; 2845 StubRoutines::_arrayof_jint_arraycopy = StubRoutines::_jint_arraycopy; 2846 2847 StubRoutines::_arrayof_jlong_disjoint_arraycopy = StubRoutines::_jlong_disjoint_arraycopy; 2848 StubRoutines::_arrayof_jlong_arraycopy = StubRoutines::_jlong_arraycopy; 2849 2850 StubRoutines::_arrayof_oop_disjoint_arraycopy = StubRoutines::_oop_disjoint_arraycopy; 2851 StubRoutines::_arrayof_oop_arraycopy = StubRoutines::_oop_arraycopy; 2852 2853 StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = StubRoutines::_oop_disjoint_arraycopy_uninit; 2854 StubRoutines::_arrayof_oop_arraycopy_uninit = StubRoutines::_oop_arraycopy_uninit; 2855 } 2856 2857 // AES intrinsic stubs 2858 enum {AESBlockSize = 16}; 2859 2860 address generate_key_shuffle_mask() { 2861 __ align(16); 2862 StubCodeMark mark(this, "StubRoutines", "key_shuffle_mask"); 2863 address start = __ pc(); 2864 __ emit_data64( 0x0405060700010203, relocInfo::none ); 2865 __ emit_data64( 0x0c0d0e0f08090a0b, relocInfo::none ); 2866 return start; 2867 } 2868 2869 address generate_counter_shuffle_mask() { 2870 __ align(16); 2871 StubCodeMark mark(this, "StubRoutines", "counter_shuffle_mask"); 2872 address start = __ pc(); 2873 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none); 2874 __ emit_data64(0x0001020304050607, relocInfo::none); 2875 return start; 2876 } 2877 2878 // Utility routine for loading a 128-bit key word in little endian format 2879 // can optionally specify that the shuffle mask is already in an xmmregister 2880 void load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask=NULL) { 2881 __ movdqu(xmmdst, Address(key, offset)); 2882 if (xmm_shuf_mask != NULL) { 2883 __ pshufb(xmmdst, xmm_shuf_mask); 2884 } else { 2885 __ pshufb(xmmdst, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 2886 } 2887 } 2888 2889 // Utility routine for increase 128bit counter (iv in CTR mode) 2890 void inc_counter(Register reg, XMMRegister xmmdst, int inc_delta, Label& next_block) { 2891 __ pextrq(reg, xmmdst, 0x0); 2892 __ addq(reg, inc_delta); 2893 __ pinsrq(xmmdst, reg, 0x0); 2894 __ jcc(Assembler::carryClear, next_block); // jump if no carry 2895 __ pextrq(reg, xmmdst, 0x01); // Carry 2896 __ addq(reg, 0x01); 2897 __ pinsrq(xmmdst, reg, 0x01); //Carry end 2898 __ BIND(next_block); // next instruction 2899 } 2900 2901 // Arguments: 2902 // 2903 // Inputs: 2904 // c_rarg0 - source byte array address 2905 // c_rarg1 - destination byte array address 2906 // c_rarg2 - K (key) in little endian int array 2907 // 2908 address generate_aescrypt_encryptBlock() { 2909 assert(UseAES, "need AES instructions and misaligned SSE support"); 2910 __ align(CodeEntryAlignment); 2911 StubCodeMark mark(this, "StubRoutines", "aescrypt_encryptBlock"); 2912 Label L_doLast; 2913 address start = __ pc(); 2914 2915 const Register from = c_rarg0; // source array address 2916 const Register to = c_rarg1; // destination array address 2917 const Register key = c_rarg2; // key array address 2918 const Register keylen = rax; 2919 2920 const XMMRegister xmm_result = xmm0; 2921 const XMMRegister xmm_key_shuf_mask = xmm1; 2922 // On win64 xmm6-xmm15 must be preserved so don't use them. 2923 const XMMRegister xmm_temp1 = xmm2; 2924 const XMMRegister xmm_temp2 = xmm3; 2925 const XMMRegister xmm_temp3 = xmm4; 2926 const XMMRegister xmm_temp4 = xmm5; 2927 2928 __ enter(); // required for proper stackwalking of RuntimeStub frame 2929 2930 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 2931 // context for the registers used, where all instructions below are using 128-bit mode 2932 // On EVEX without VL and BW, these instructions will all be AVX. 2933 if (VM_Version::supports_avx512vlbw()) { 2934 __ movl(rax, 0xffff); 2935 __ kmovql(k1, rax); 2936 } 2937 2938 // keylen could be only {11, 13, 15} * 4 = {44, 52, 60} 2939 __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 2940 2941 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 2942 __ movdqu(xmm_result, Address(from, 0)); // get 16 bytes of input 2943 2944 // For encryption, the java expanded key ordering is just what we need 2945 // we don't know if the key is aligned, hence not using load-execute form 2946 2947 load_key(xmm_temp1, key, 0x00, xmm_key_shuf_mask); 2948 __ pxor(xmm_result, xmm_temp1); 2949 2950 load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask); 2951 load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask); 2952 load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask); 2953 load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask); 2954 2955 __ aesenc(xmm_result, xmm_temp1); 2956 __ aesenc(xmm_result, xmm_temp2); 2957 __ aesenc(xmm_result, xmm_temp3); 2958 __ aesenc(xmm_result, xmm_temp4); 2959 2960 load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask); 2961 load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask); 2962 load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask); 2963 load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask); 2964 2965 __ aesenc(xmm_result, xmm_temp1); 2966 __ aesenc(xmm_result, xmm_temp2); 2967 __ aesenc(xmm_result, xmm_temp3); 2968 __ aesenc(xmm_result, xmm_temp4); 2969 2970 load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask); 2971 load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask); 2972 2973 __ cmpl(keylen, 44); 2974 __ jccb(Assembler::equal, L_doLast); 2975 2976 __ aesenc(xmm_result, xmm_temp1); 2977 __ aesenc(xmm_result, xmm_temp2); 2978 2979 load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask); 2980 load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask); 2981 2982 __ cmpl(keylen, 52); 2983 __ jccb(Assembler::equal, L_doLast); 2984 2985 __ aesenc(xmm_result, xmm_temp1); 2986 __ aesenc(xmm_result, xmm_temp2); 2987 2988 load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask); 2989 load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask); 2990 2991 __ BIND(L_doLast); 2992 __ aesenc(xmm_result, xmm_temp1); 2993 __ aesenclast(xmm_result, xmm_temp2); 2994 __ movdqu(Address(to, 0), xmm_result); // store the result 2995 __ xorptr(rax, rax); // return 0 2996 __ leave(); // required for proper stackwalking of RuntimeStub frame 2997 __ ret(0); 2998 2999 return start; 3000 } 3001 3002 3003 // Arguments: 3004 // 3005 // Inputs: 3006 // c_rarg0 - source byte array address 3007 // c_rarg1 - destination byte array address 3008 // c_rarg2 - K (key) in little endian int array 3009 // 3010 address generate_aescrypt_decryptBlock() { 3011 assert(UseAES, "need AES instructions and misaligned SSE support"); 3012 __ align(CodeEntryAlignment); 3013 StubCodeMark mark(this, "StubRoutines", "aescrypt_decryptBlock"); 3014 Label L_doLast; 3015 address start = __ pc(); 3016 3017 const Register from = c_rarg0; // source array address 3018 const Register to = c_rarg1; // destination array address 3019 const Register key = c_rarg2; // key array address 3020 const Register keylen = rax; 3021 3022 const XMMRegister xmm_result = xmm0; 3023 const XMMRegister xmm_key_shuf_mask = xmm1; 3024 // On win64 xmm6-xmm15 must be preserved so don't use them. 3025 const XMMRegister xmm_temp1 = xmm2; 3026 const XMMRegister xmm_temp2 = xmm3; 3027 const XMMRegister xmm_temp3 = xmm4; 3028 const XMMRegister xmm_temp4 = xmm5; 3029 3030 __ enter(); // required for proper stackwalking of RuntimeStub frame 3031 3032 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 3033 // context for the registers used, where all instructions below are using 128-bit mode 3034 // On EVEX without VL and BW, these instructions will all be AVX. 3035 if (VM_Version::supports_avx512vlbw()) { 3036 __ movl(rax, 0xffff); 3037 __ kmovql(k1, rax); 3038 } 3039 3040 // keylen could be only {11, 13, 15} * 4 = {44, 52, 60} 3041 __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3042 3043 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3044 __ movdqu(xmm_result, Address(from, 0)); 3045 3046 // for decryption java expanded key ordering is rotated one position from what we want 3047 // so we start from 0x10 here and hit 0x00 last 3048 // we don't know if the key is aligned, hence not using load-execute form 3049 load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask); 3050 load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask); 3051 load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask); 3052 load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask); 3053 3054 __ pxor (xmm_result, xmm_temp1); 3055 __ aesdec(xmm_result, xmm_temp2); 3056 __ aesdec(xmm_result, xmm_temp3); 3057 __ aesdec(xmm_result, xmm_temp4); 3058 3059 load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask); 3060 load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask); 3061 load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask); 3062 load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask); 3063 3064 __ aesdec(xmm_result, xmm_temp1); 3065 __ aesdec(xmm_result, xmm_temp2); 3066 __ aesdec(xmm_result, xmm_temp3); 3067 __ aesdec(xmm_result, xmm_temp4); 3068 3069 load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask); 3070 load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask); 3071 load_key(xmm_temp3, key, 0x00, xmm_key_shuf_mask); 3072 3073 __ cmpl(keylen, 44); 3074 __ jccb(Assembler::equal, L_doLast); 3075 3076 __ aesdec(xmm_result, xmm_temp1); 3077 __ aesdec(xmm_result, xmm_temp2); 3078 3079 load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask); 3080 load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask); 3081 3082 __ cmpl(keylen, 52); 3083 __ jccb(Assembler::equal, L_doLast); 3084 3085 __ aesdec(xmm_result, xmm_temp1); 3086 __ aesdec(xmm_result, xmm_temp2); 3087 3088 load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask); 3089 load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask); 3090 3091 __ BIND(L_doLast); 3092 __ aesdec(xmm_result, xmm_temp1); 3093 __ aesdec(xmm_result, xmm_temp2); 3094 3095 // for decryption the aesdeclast operation is always on key+0x00 3096 __ aesdeclast(xmm_result, xmm_temp3); 3097 __ movdqu(Address(to, 0), xmm_result); // store the result 3098 __ xorptr(rax, rax); // return 0 3099 __ leave(); // required for proper stackwalking of RuntimeStub frame 3100 __ ret(0); 3101 3102 return start; 3103 } 3104 3105 3106 // Arguments: 3107 // 3108 // Inputs: 3109 // c_rarg0 - source byte array address 3110 // c_rarg1 - destination byte array address 3111 // c_rarg2 - K (key) in little endian int array 3112 // c_rarg3 - r vector byte array address 3113 // c_rarg4 - input length 3114 // 3115 // Output: 3116 // rax - input length 3117 // 3118 address generate_cipherBlockChaining_encryptAESCrypt() { 3119 assert(UseAES, "need AES instructions and misaligned SSE support"); 3120 __ align(CodeEntryAlignment); 3121 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_encryptAESCrypt"); 3122 address start = __ pc(); 3123 3124 Label L_exit, L_key_192_256, L_key_256, L_loopTop_128, L_loopTop_192, L_loopTop_256; 3125 const Register from = c_rarg0; // source array address 3126 const Register to = c_rarg1; // destination array address 3127 const Register key = c_rarg2; // key array address 3128 const Register rvec = c_rarg3; // r byte array initialized from initvector array address 3129 // and left with the results of the last encryption block 3130 #ifndef _WIN64 3131 const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16) 3132 #else 3133 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 3134 const Register len_reg = r11; // pick the volatile windows register 3135 #endif 3136 const Register pos = rax; 3137 3138 // xmm register assignments for the loops below 3139 const XMMRegister xmm_result = xmm0; 3140 const XMMRegister xmm_temp = xmm1; 3141 // keys 0-10 preloaded into xmm2-xmm12 3142 const int XMM_REG_NUM_KEY_FIRST = 2; 3143 const int XMM_REG_NUM_KEY_LAST = 15; 3144 const XMMRegister xmm_key0 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST); 3145 const XMMRegister xmm_key10 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+10); 3146 const XMMRegister xmm_key11 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+11); 3147 const XMMRegister xmm_key12 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+12); 3148 const XMMRegister xmm_key13 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+13); 3149 3150 __ enter(); // required for proper stackwalking of RuntimeStub frame 3151 3152 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 3153 // context for the registers used, where all instructions below are using 128-bit mode 3154 // On EVEX without VL and BW, these instructions will all be AVX. 3155 if (VM_Version::supports_avx512vlbw()) { 3156 __ movl(rax, 0xffff); 3157 __ kmovql(k1, rax); 3158 } 3159 3160 #ifdef _WIN64 3161 // on win64, fill len_reg from stack position 3162 __ movl(len_reg, len_mem); 3163 #else 3164 __ push(len_reg); // Save 3165 #endif 3166 3167 const XMMRegister xmm_key_shuf_mask = xmm_temp; // used temporarily to swap key bytes up front 3168 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3169 // load up xmm regs xmm2 thru xmm12 with key 0x00 - 0xa0 3170 for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x00; rnum <= XMM_REG_NUM_KEY_FIRST+10; rnum++) { 3171 load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask); 3172 offset += 0x10; 3173 } 3174 __ movdqu(xmm_result, Address(rvec, 0x00)); // initialize xmm_result with r vec 3175 3176 // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256)) 3177 __ movl(rax, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3178 __ cmpl(rax, 44); 3179 __ jcc(Assembler::notEqual, L_key_192_256); 3180 3181 // 128 bit code follows here 3182 __ movptr(pos, 0); 3183 __ align(OptoLoopAlignment); 3184 3185 __ BIND(L_loopTop_128); 3186 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3187 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3188 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3189 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 9; rnum++) { 3190 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3191 } 3192 __ aesenclast(xmm_result, xmm_key10); 3193 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3194 // no need to store r to memory until we exit 3195 __ addptr(pos, AESBlockSize); 3196 __ subptr(len_reg, AESBlockSize); 3197 __ jcc(Assembler::notEqual, L_loopTop_128); 3198 3199 __ BIND(L_exit); 3200 __ movdqu(Address(rvec, 0), xmm_result); // final value of r stored in rvec of CipherBlockChaining object 3201 3202 #ifdef _WIN64 3203 __ movl(rax, len_mem); 3204 #else 3205 __ pop(rax); // return length 3206 #endif 3207 __ leave(); // required for proper stackwalking of RuntimeStub frame 3208 __ ret(0); 3209 3210 __ BIND(L_key_192_256); 3211 // here rax = len in ints of AESCrypt.KLE array (52=192, or 60=256) 3212 load_key(xmm_key11, key, 0xb0, xmm_key_shuf_mask); 3213 load_key(xmm_key12, key, 0xc0, xmm_key_shuf_mask); 3214 __ cmpl(rax, 52); 3215 __ jcc(Assembler::notEqual, L_key_256); 3216 3217 // 192-bit code follows here (could be changed to use more xmm registers) 3218 __ movptr(pos, 0); 3219 __ align(OptoLoopAlignment); 3220 3221 __ BIND(L_loopTop_192); 3222 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3223 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3224 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3225 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 11; rnum++) { 3226 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3227 } 3228 __ aesenclast(xmm_result, xmm_key12); 3229 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3230 // no need to store r to memory until we exit 3231 __ addptr(pos, AESBlockSize); 3232 __ subptr(len_reg, AESBlockSize); 3233 __ jcc(Assembler::notEqual, L_loopTop_192); 3234 __ jmp(L_exit); 3235 3236 __ BIND(L_key_256); 3237 // 256-bit code follows here (could be changed to use more xmm registers) 3238 load_key(xmm_key13, key, 0xd0, xmm_key_shuf_mask); 3239 __ movptr(pos, 0); 3240 __ align(OptoLoopAlignment); 3241 3242 __ BIND(L_loopTop_256); 3243 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3244 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3245 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3246 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 13; rnum++) { 3247 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3248 } 3249 load_key(xmm_temp, key, 0xe0); 3250 __ aesenclast(xmm_result, xmm_temp); 3251 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3252 // no need to store r to memory until we exit 3253 __ addptr(pos, AESBlockSize); 3254 __ subptr(len_reg, AESBlockSize); 3255 __ jcc(Assembler::notEqual, L_loopTop_256); 3256 __ jmp(L_exit); 3257 3258 return start; 3259 } 3260 3261 // Safefetch stubs. 3262 void generate_safefetch(const char* name, int size, address* entry, 3263 address* fault_pc, address* continuation_pc) { 3264 // safefetch signatures: 3265 // int SafeFetch32(int* adr, int errValue); 3266 // intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue); 3267 // 3268 // arguments: 3269 // c_rarg0 = adr 3270 // c_rarg1 = errValue 3271 // 3272 // result: 3273 // PPC_RET = *adr or errValue 3274 3275 StubCodeMark mark(this, "StubRoutines", name); 3276 3277 // Entry point, pc or function descriptor. 3278 *entry = __ pc(); 3279 3280 // Load *adr into c_rarg1, may fault. 3281 *fault_pc = __ pc(); 3282 switch (size) { 3283 case 4: 3284 // int32_t 3285 __ movl(c_rarg1, Address(c_rarg0, 0)); 3286 break; 3287 case 8: 3288 // int64_t 3289 __ movq(c_rarg1, Address(c_rarg0, 0)); 3290 break; 3291 default: 3292 ShouldNotReachHere(); 3293 } 3294 3295 // return errValue or *adr 3296 *continuation_pc = __ pc(); 3297 __ movq(rax, c_rarg1); 3298 __ ret(0); 3299 } 3300 3301 // This is a version of CBC/AES Decrypt which does 4 blocks in a loop at a time 3302 // to hide instruction latency 3303 // 3304 // Arguments: 3305 // 3306 // Inputs: 3307 // c_rarg0 - source byte array address 3308 // c_rarg1 - destination byte array address 3309 // c_rarg2 - K (key) in little endian int array 3310 // c_rarg3 - r vector byte array address 3311 // c_rarg4 - input length 3312 // 3313 // Output: 3314 // rax - input length 3315 // 3316 address generate_cipherBlockChaining_decryptAESCrypt_Parallel() { 3317 assert(UseAES, "need AES instructions and misaligned SSE support"); 3318 __ align(CodeEntryAlignment); 3319 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt"); 3320 address start = __ pc(); 3321 3322 const Register from = c_rarg0; // source array address 3323 const Register to = c_rarg1; // destination array address 3324 const Register key = c_rarg2; // key array address 3325 const Register rvec = c_rarg3; // r byte array initialized from initvector array address 3326 // and left with the results of the last encryption block 3327 #ifndef _WIN64 3328 const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16) 3329 #else 3330 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 3331 const Register len_reg = r11; // pick the volatile windows register 3332 #endif 3333 const Register pos = rax; 3334 3335 const int PARALLEL_FACTOR = 4; 3336 const int ROUNDS[3] = { 10, 12, 14 }; // aes rounds for key128, key192, key256 3337 3338 Label L_exit; 3339 Label L_singleBlock_loopTopHead[3]; // 128, 192, 256 3340 Label L_singleBlock_loopTopHead2[3]; // 128, 192, 256 3341 Label L_singleBlock_loopTop[3]; // 128, 192, 256 3342 Label L_multiBlock_loopTopHead[3]; // 128, 192, 256 3343 Label L_multiBlock_loopTop[3]; // 128, 192, 256 3344 3345 // keys 0-10 preloaded into xmm5-xmm15 3346 const int XMM_REG_NUM_KEY_FIRST = 5; 3347 const int XMM_REG_NUM_KEY_LAST = 15; 3348 const XMMRegister xmm_key_first = as_XMMRegister(XMM_REG_NUM_KEY_FIRST); 3349 const XMMRegister xmm_key_last = as_XMMRegister(XMM_REG_NUM_KEY_LAST); 3350 3351 __ enter(); // required for proper stackwalking of RuntimeStub frame 3352 3353 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 3354 // context for the registers used, where all instructions below are using 128-bit mode 3355 // On EVEX without VL and BW, these instructions will all be AVX. 3356 if (VM_Version::supports_avx512vlbw()) { 3357 __ movl(rax, 0xffff); 3358 __ kmovql(k1, rax); 3359 } 3360 3361 #ifdef _WIN64 3362 // on win64, fill len_reg from stack position 3363 __ movl(len_reg, len_mem); 3364 #else 3365 __ push(len_reg); // Save 3366 #endif 3367 __ push(rbx); 3368 // the java expanded key ordering is rotated one position from what we want 3369 // so we start from 0x10 here and hit 0x00 last 3370 const XMMRegister xmm_key_shuf_mask = xmm1; // used temporarily to swap key bytes up front 3371 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3372 // load up xmm regs 5 thru 15 with key 0x10 - 0xa0 - 0x00 3373 for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x10; rnum < XMM_REG_NUM_KEY_LAST; rnum++) { 3374 load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask); 3375 offset += 0x10; 3376 } 3377 load_key(xmm_key_last, key, 0x00, xmm_key_shuf_mask); 3378 3379 const XMMRegister xmm_prev_block_cipher = xmm1; // holds cipher of previous block 3380 3381 // registers holding the four results in the parallelized loop 3382 const XMMRegister xmm_result0 = xmm0; 3383 const XMMRegister xmm_result1 = xmm2; 3384 const XMMRegister xmm_result2 = xmm3; 3385 const XMMRegister xmm_result3 = xmm4; 3386 3387 __ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // initialize with initial rvec 3388 3389 __ xorptr(pos, pos); 3390 3391 // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256)) 3392 __ movl(rbx, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3393 __ cmpl(rbx, 52); 3394 __ jcc(Assembler::equal, L_multiBlock_loopTopHead[1]); 3395 __ cmpl(rbx, 60); 3396 __ jcc(Assembler::equal, L_multiBlock_loopTopHead[2]); 3397 3398 #define DoFour(opc, src_reg) \ 3399 __ opc(xmm_result0, src_reg); \ 3400 __ opc(xmm_result1, src_reg); \ 3401 __ opc(xmm_result2, src_reg); \ 3402 __ opc(xmm_result3, src_reg); \ 3403 3404 for (int k = 0; k < 3; ++k) { 3405 __ BIND(L_multiBlock_loopTopHead[k]); 3406 if (k != 0) { 3407 __ cmpptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // see if at least 4 blocks left 3408 __ jcc(Assembler::less, L_singleBlock_loopTopHead2[k]); 3409 } 3410 if (k == 1) { 3411 __ subptr(rsp, 6 * wordSize); 3412 __ movdqu(Address(rsp, 0), xmm15); //save last_key from xmm15 3413 load_key(xmm15, key, 0xb0); // 0xb0; 192-bit key goes up to 0xc0 3414 __ movdqu(Address(rsp, 2 * wordSize), xmm15); 3415 load_key(xmm1, key, 0xc0); // 0xc0; 3416 __ movdqu(Address(rsp, 4 * wordSize), xmm1); 3417 } else if (k == 2) { 3418 __ subptr(rsp, 10 * wordSize); 3419 __ movdqu(Address(rsp, 0), xmm15); //save last_key from xmm15 3420 load_key(xmm15, key, 0xd0); // 0xd0; 256-bit key goes upto 0xe0 3421 __ movdqu(Address(rsp, 6 * wordSize), xmm15); 3422 load_key(xmm1, key, 0xe0); // 0xe0; 3423 __ movdqu(Address(rsp, 8 * wordSize), xmm1); 3424 load_key(xmm15, key, 0xb0); // 0xb0; 3425 __ movdqu(Address(rsp, 2 * wordSize), xmm15); 3426 load_key(xmm1, key, 0xc0); // 0xc0; 3427 __ movdqu(Address(rsp, 4 * wordSize), xmm1); 3428 } 3429 __ align(OptoLoopAlignment); 3430 __ BIND(L_multiBlock_loopTop[k]); 3431 __ cmpptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // see if at least 4 blocks left 3432 __ jcc(Assembler::less, L_singleBlock_loopTopHead[k]); 3433 3434 if (k != 0) { 3435 __ movdqu(xmm15, Address(rsp, 2 * wordSize)); 3436 __ movdqu(xmm1, Address(rsp, 4 * wordSize)); 3437 } 3438 3439 __ movdqu(xmm_result0, Address(from, pos, Address::times_1, 0 * AESBlockSize)); // get next 4 blocks into xmmresult registers 3440 __ movdqu(xmm_result1, Address(from, pos, Address::times_1, 1 * AESBlockSize)); 3441 __ movdqu(xmm_result2, Address(from, pos, Address::times_1, 2 * AESBlockSize)); 3442 __ movdqu(xmm_result3, Address(from, pos, Address::times_1, 3 * AESBlockSize)); 3443 3444 DoFour(pxor, xmm_key_first); 3445 if (k == 0) { 3446 for (int rnum = 1; rnum < ROUNDS[k]; rnum++) { 3447 DoFour(aesdec, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3448 } 3449 DoFour(aesdeclast, xmm_key_last); 3450 } else if (k == 1) { 3451 for (int rnum = 1; rnum <= ROUNDS[k]-2; rnum++) { 3452 DoFour(aesdec, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3453 } 3454 __ movdqu(xmm_key_last, Address(rsp, 0)); // xmm15 needs to be loaded again. 3455 DoFour(aesdec, xmm1); // key : 0xc0 3456 __ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // xmm1 needs to be loaded again 3457 DoFour(aesdeclast, xmm_key_last); 3458 } else if (k == 2) { 3459 for (int rnum = 1; rnum <= ROUNDS[k] - 4; rnum++) { 3460 DoFour(aesdec, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3461 } 3462 DoFour(aesdec, xmm1); // key : 0xc0 3463 __ movdqu(xmm15, Address(rsp, 6 * wordSize)); 3464 __ movdqu(xmm1, Address(rsp, 8 * wordSize)); 3465 DoFour(aesdec, xmm15); // key : 0xd0 3466 __ movdqu(xmm_key_last, Address(rsp, 0)); // xmm15 needs to be loaded again. 3467 DoFour(aesdec, xmm1); // key : 0xe0 3468 __ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // xmm1 needs to be loaded again 3469 DoFour(aesdeclast, xmm_key_last); 3470 } 3471 3472 // for each result, xor with the r vector of previous cipher block 3473 __ pxor(xmm_result0, xmm_prev_block_cipher); 3474 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 0 * AESBlockSize)); 3475 __ pxor(xmm_result1, xmm_prev_block_cipher); 3476 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 1 * AESBlockSize)); 3477 __ pxor(xmm_result2, xmm_prev_block_cipher); 3478 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 2 * AESBlockSize)); 3479 __ pxor(xmm_result3, xmm_prev_block_cipher); 3480 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 3 * AESBlockSize)); // this will carry over to next set of blocks 3481 if (k != 0) { 3482 __ movdqu(Address(rvec, 0x00), xmm_prev_block_cipher); 3483 } 3484 3485 __ movdqu(Address(to, pos, Address::times_1, 0 * AESBlockSize), xmm_result0); // store 4 results into the next 64 bytes of output 3486 __ movdqu(Address(to, pos, Address::times_1, 1 * AESBlockSize), xmm_result1); 3487 __ movdqu(Address(to, pos, Address::times_1, 2 * AESBlockSize), xmm_result2); 3488 __ movdqu(Address(to, pos, Address::times_1, 3 * AESBlockSize), xmm_result3); 3489 3490 __ addptr(pos, PARALLEL_FACTOR * AESBlockSize); 3491 __ subptr(len_reg, PARALLEL_FACTOR * AESBlockSize); 3492 __ jmp(L_multiBlock_loopTop[k]); 3493 3494 // registers used in the non-parallelized loops 3495 // xmm register assignments for the loops below 3496 const XMMRegister xmm_result = xmm0; 3497 const XMMRegister xmm_prev_block_cipher_save = xmm2; 3498 const XMMRegister xmm_key11 = xmm3; 3499 const XMMRegister xmm_key12 = xmm4; 3500 const XMMRegister key_tmp = xmm4; 3501 3502 __ BIND(L_singleBlock_loopTopHead[k]); 3503 if (k == 1) { 3504 __ addptr(rsp, 6 * wordSize); 3505 } else if (k == 2) { 3506 __ addptr(rsp, 10 * wordSize); 3507 } 3508 __ cmpptr(len_reg, 0); // any blocks left?? 3509 __ jcc(Assembler::equal, L_exit); 3510 __ BIND(L_singleBlock_loopTopHead2[k]); 3511 if (k == 1) { 3512 load_key(xmm_key11, key, 0xb0); // 0xb0; 192-bit key goes upto 0xc0 3513 load_key(xmm_key12, key, 0xc0); // 0xc0; 192-bit key goes upto 0xc0 3514 } 3515 if (k == 2) { 3516 load_key(xmm_key11, key, 0xb0); // 0xb0; 256-bit key goes upto 0xe0 3517 } 3518 __ align(OptoLoopAlignment); 3519 __ BIND(L_singleBlock_loopTop[k]); 3520 __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of cipher input 3521 __ movdqa(xmm_prev_block_cipher_save, xmm_result); // save for next r vector 3522 __ pxor(xmm_result, xmm_key_first); // do the aes dec rounds 3523 for (int rnum = 1; rnum <= 9 ; rnum++) { 3524 __ aesdec(xmm_result, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3525 } 3526 if (k == 1) { 3527 __ aesdec(xmm_result, xmm_key11); 3528 __ aesdec(xmm_result, xmm_key12); 3529 } 3530 if (k == 2) { 3531 __ aesdec(xmm_result, xmm_key11); 3532 load_key(key_tmp, key, 0xc0); 3533 __ aesdec(xmm_result, key_tmp); 3534 load_key(key_tmp, key, 0xd0); 3535 __ aesdec(xmm_result, key_tmp); 3536 load_key(key_tmp, key, 0xe0); 3537 __ aesdec(xmm_result, key_tmp); 3538 } 3539 3540 __ aesdeclast(xmm_result, xmm_key_last); // xmm15 always came from key+0 3541 __ pxor(xmm_result, xmm_prev_block_cipher); // xor with the current r vector 3542 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3543 // no need to store r to memory until we exit 3544 __ movdqa(xmm_prev_block_cipher, xmm_prev_block_cipher_save); // set up next r vector with cipher input from this block 3545 __ addptr(pos, AESBlockSize); 3546 __ subptr(len_reg, AESBlockSize); 3547 __ jcc(Assembler::notEqual, L_singleBlock_loopTop[k]); 3548 if (k != 2) { 3549 __ jmp(L_exit); 3550 } 3551 } //for 128/192/256 3552 3553 __ BIND(L_exit); 3554 __ movdqu(Address(rvec, 0), xmm_prev_block_cipher); // final value of r stored in rvec of CipherBlockChaining object 3555 __ pop(rbx); 3556 #ifdef _WIN64 3557 __ movl(rax, len_mem); 3558 #else 3559 __ pop(rax); // return length 3560 #endif 3561 __ leave(); // required for proper stackwalking of RuntimeStub frame 3562 __ ret(0); 3563 return start; 3564 } 3565 3566 address generate_upper_word_mask() { 3567 __ align(64); 3568 StubCodeMark mark(this, "StubRoutines", "upper_word_mask"); 3569 address start = __ pc(); 3570 __ emit_data64(0x0000000000000000, relocInfo::none); 3571 __ emit_data64(0xFFFFFFFF00000000, relocInfo::none); 3572 return start; 3573 } 3574 3575 address generate_shuffle_byte_flip_mask() { 3576 __ align(64); 3577 StubCodeMark mark(this, "StubRoutines", "shuffle_byte_flip_mask"); 3578 address start = __ pc(); 3579 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none); 3580 __ emit_data64(0x0001020304050607, relocInfo::none); 3581 return start; 3582 } 3583 3584 // ofs and limit are use for multi-block byte array. 3585 // int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit) 3586 address generate_sha1_implCompress(bool multi_block, const char *name) { 3587 __ align(CodeEntryAlignment); 3588 StubCodeMark mark(this, "StubRoutines", name); 3589 address start = __ pc(); 3590 3591 Register buf = c_rarg0; 3592 Register state = c_rarg1; 3593 Register ofs = c_rarg2; 3594 Register limit = c_rarg3; 3595 3596 const XMMRegister abcd = xmm0; 3597 const XMMRegister e0 = xmm1; 3598 const XMMRegister e1 = xmm2; 3599 const XMMRegister msg0 = xmm3; 3600 3601 const XMMRegister msg1 = xmm4; 3602 const XMMRegister msg2 = xmm5; 3603 const XMMRegister msg3 = xmm6; 3604 const XMMRegister shuf_mask = xmm7; 3605 3606 __ enter(); 3607 3608 __ subptr(rsp, 4 * wordSize); 3609 3610 __ fast_sha1(abcd, e0, e1, msg0, msg1, msg2, msg3, shuf_mask, 3611 buf, state, ofs, limit, rsp, multi_block); 3612 3613 __ addptr(rsp, 4 * wordSize); 3614 3615 __ leave(); 3616 __ ret(0); 3617 return start; 3618 } 3619 3620 address generate_pshuffle_byte_flip_mask() { 3621 __ align(64); 3622 StubCodeMark mark(this, "StubRoutines", "pshuffle_byte_flip_mask"); 3623 address start = __ pc(); 3624 __ emit_data64(0x0405060700010203, relocInfo::none); 3625 __ emit_data64(0x0c0d0e0f08090a0b, relocInfo::none); 3626 3627 if (VM_Version::supports_avx2()) { 3628 __ emit_data64(0x0405060700010203, relocInfo::none); // second copy 3629 __ emit_data64(0x0c0d0e0f08090a0b, relocInfo::none); 3630 // _SHUF_00BA 3631 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3632 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3633 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3634 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3635 // _SHUF_DC00 3636 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3637 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3638 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3639 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3640 } 3641 3642 return start; 3643 } 3644 3645 //Mask for byte-swapping a couple of qwords in an XMM register using (v)pshufb. 3646 address generate_pshuffle_byte_flip_mask_sha512() { 3647 __ align(32); 3648 StubCodeMark mark(this, "StubRoutines", "pshuffle_byte_flip_mask_sha512"); 3649 address start = __ pc(); 3650 if (VM_Version::supports_avx2()) { 3651 __ emit_data64(0x0001020304050607, relocInfo::none); // PSHUFFLE_BYTE_FLIP_MASK 3652 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none); 3653 __ emit_data64(0x1011121314151617, relocInfo::none); 3654 __ emit_data64(0x18191a1b1c1d1e1f, relocInfo::none); 3655 __ emit_data64(0x0000000000000000, relocInfo::none); //MASK_YMM_LO 3656 __ emit_data64(0x0000000000000000, relocInfo::none); 3657 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3658 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3659 } 3660 3661 return start; 3662 } 3663 3664 // ofs and limit are use for multi-block byte array. 3665 // int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit) 3666 address generate_sha256_implCompress(bool multi_block, const char *name) { 3667 assert(VM_Version::supports_sha() || VM_Version::supports_avx2(), ""); 3668 __ align(CodeEntryAlignment); 3669 StubCodeMark mark(this, "StubRoutines", name); 3670 address start = __ pc(); 3671 3672 Register buf = c_rarg0; 3673 Register state = c_rarg1; 3674 Register ofs = c_rarg2; 3675 Register limit = c_rarg3; 3676 3677 const XMMRegister msg = xmm0; 3678 const XMMRegister state0 = xmm1; 3679 const XMMRegister state1 = xmm2; 3680 const XMMRegister msgtmp0 = xmm3; 3681 3682 const XMMRegister msgtmp1 = xmm4; 3683 const XMMRegister msgtmp2 = xmm5; 3684 const XMMRegister msgtmp3 = xmm6; 3685 const XMMRegister msgtmp4 = xmm7; 3686 3687 const XMMRegister shuf_mask = xmm8; 3688 3689 __ enter(); 3690 3691 __ subptr(rsp, 4 * wordSize); 3692 3693 if (VM_Version::supports_sha()) { 3694 __ fast_sha256(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4, 3695 buf, state, ofs, limit, rsp, multi_block, shuf_mask); 3696 } else if (VM_Version::supports_avx2()) { 3697 __ sha256_AVX2(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4, 3698 buf, state, ofs, limit, rsp, multi_block, shuf_mask); 3699 } 3700 __ addptr(rsp, 4 * wordSize); 3701 3702 __ leave(); 3703 __ ret(0); 3704 return start; 3705 } 3706 3707 address generate_sha512_implCompress(bool multi_block, const char *name) { 3708 assert(VM_Version::supports_avx2(), ""); 3709 assert(VM_Version::supports_bmi2(), ""); 3710 __ align(CodeEntryAlignment); 3711 StubCodeMark mark(this, "StubRoutines", name); 3712 address start = __ pc(); 3713 3714 Register buf = c_rarg0; 3715 Register state = c_rarg1; 3716 Register ofs = c_rarg2; 3717 Register limit = c_rarg3; 3718 3719 const XMMRegister msg = xmm0; 3720 const XMMRegister state0 = xmm1; 3721 const XMMRegister state1 = xmm2; 3722 const XMMRegister msgtmp0 = xmm3; 3723 const XMMRegister msgtmp1 = xmm4; 3724 const XMMRegister msgtmp2 = xmm5; 3725 const XMMRegister msgtmp3 = xmm6; 3726 const XMMRegister msgtmp4 = xmm7; 3727 3728 const XMMRegister shuf_mask = xmm8; 3729 3730 __ enter(); 3731 3732 __ sha512_AVX2(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4, 3733 buf, state, ofs, limit, rsp, multi_block, shuf_mask); 3734 3735 __ leave(); 3736 __ ret(0); 3737 return start; 3738 } 3739 3740 // This is a version of CTR/AES crypt which does 6 blocks in a loop at a time 3741 // to hide instruction latency 3742 // 3743 // Arguments: 3744 // 3745 // Inputs: 3746 // c_rarg0 - source byte array address 3747 // c_rarg1 - destination byte array address 3748 // c_rarg2 - K (key) in little endian int array 3749 // c_rarg3 - counter vector byte array address 3750 // Linux 3751 // c_rarg4 - input length 3752 // c_rarg5 - saved encryptedCounter start 3753 // rbp + 6 * wordSize - saved used length 3754 // Windows 3755 // rbp + 6 * wordSize - input length 3756 // rbp + 7 * wordSize - saved encryptedCounter start 3757 // rbp + 8 * wordSize - saved used length 3758 // 3759 // Output: 3760 // rax - input length 3761 // 3762 address generate_counterMode_AESCrypt_Parallel() { 3763 assert(UseAES, "need AES instructions and misaligned SSE support"); 3764 __ align(CodeEntryAlignment); 3765 StubCodeMark mark(this, "StubRoutines", "counterMode_AESCrypt"); 3766 address start = __ pc(); 3767 const Register from = c_rarg0; // source array address 3768 const Register to = c_rarg1; // destination array address 3769 const Register key = c_rarg2; // key array address 3770 const Register counter = c_rarg3; // counter byte array initialized from counter array address 3771 // and updated with the incremented counter in the end 3772 #ifndef _WIN64 3773 const Register len_reg = c_rarg4; 3774 const Register saved_encCounter_start = c_rarg5; 3775 const Register used_addr = r10; 3776 const Address used_mem(rbp, 2 * wordSize); 3777 const Register used = r11; 3778 #else 3779 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 3780 const Address saved_encCounter_mem(rbp, 7 * wordSize); // length is on stack on Win64 3781 const Address used_mem(rbp, 8 * wordSize); // length is on stack on Win64 3782 const Register len_reg = r10; // pick the first volatile windows register 3783 const Register saved_encCounter_start = r11; 3784 const Register used_addr = r13; 3785 const Register used = r14; 3786 #endif 3787 const Register pos = rax; 3788 3789 const int PARALLEL_FACTOR = 6; 3790 const XMMRegister xmm_counter_shuf_mask = xmm0; 3791 const XMMRegister xmm_key_shuf_mask = xmm1; // used temporarily to swap key bytes up front 3792 const XMMRegister xmm_curr_counter = xmm2; 3793 3794 const XMMRegister xmm_key_tmp0 = xmm3; 3795 const XMMRegister xmm_key_tmp1 = xmm4; 3796 3797 // registers holding the four results in the parallelized loop 3798 const XMMRegister xmm_result0 = xmm5; 3799 const XMMRegister xmm_result1 = xmm6; 3800 const XMMRegister xmm_result2 = xmm7; 3801 const XMMRegister xmm_result3 = xmm8; 3802 const XMMRegister xmm_result4 = xmm9; 3803 const XMMRegister xmm_result5 = xmm10; 3804 3805 const XMMRegister xmm_from0 = xmm11; 3806 const XMMRegister xmm_from1 = xmm12; 3807 const XMMRegister xmm_from2 = xmm13; 3808 const XMMRegister xmm_from3 = xmm14; //the last one is xmm14. we have to preserve it on WIN64. 3809 const XMMRegister xmm_from4 = xmm3; //reuse xmm3~4. Because xmm_key_tmp0~1 are useless when loading input text 3810 const XMMRegister xmm_from5 = xmm4; 3811 3812 //for key_128, key_192, key_256 3813 const int rounds[3] = {10, 12, 14}; 3814 Label L_exit_preLoop, L_preLoop_start; 3815 Label L_multiBlock_loopTop[3]; 3816 Label L_singleBlockLoopTop[3]; 3817 Label L__incCounter[3][6]; //for 6 blocks 3818 Label L__incCounter_single[3]; //for single block, key128, key192, key256 3819 Label L_processTail_insr[3], L_processTail_4_insr[3], L_processTail_2_insr[3], L_processTail_1_insr[3], L_processTail_exit_insr[3]; 3820 Label L_processTail_extr[3], L_processTail_4_extr[3], L_processTail_2_extr[3], L_processTail_1_extr[3], L_processTail_exit_extr[3]; 3821 3822 Label L_exit; 3823 3824 __ enter(); // required for proper stackwalking of RuntimeStub frame 3825 3826 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 3827 // context for the registers used, where all instructions below are using 128-bit mode 3828 // On EVEX without VL and BW, these instructions will all be AVX. 3829 if (VM_Version::supports_avx512vlbw()) { 3830 __ movl(rax, 0xffff); 3831 __ kmovql(k1, rax); 3832 } 3833 3834 #ifdef _WIN64 3835 // allocate spill slots for r13, r14 3836 enum { 3837 saved_r13_offset, 3838 saved_r14_offset 3839 }; 3840 __ subptr(rsp, 2 * wordSize); 3841 __ movptr(Address(rsp, saved_r13_offset * wordSize), r13); 3842 __ movptr(Address(rsp, saved_r14_offset * wordSize), r14); 3843 3844 // on win64, fill len_reg from stack position 3845 __ movl(len_reg, len_mem); 3846 __ movptr(saved_encCounter_start, saved_encCounter_mem); 3847 __ movptr(used_addr, used_mem); 3848 __ movl(used, Address(used_addr, 0)); 3849 #else 3850 __ push(len_reg); // Save 3851 __ movptr(used_addr, used_mem); 3852 __ movl(used, Address(used_addr, 0)); 3853 #endif 3854 3855 __ push(rbx); // Save RBX 3856 __ movdqu(xmm_curr_counter, Address(counter, 0x00)); // initialize counter with initial counter 3857 __ movdqu(xmm_counter_shuf_mask, ExternalAddress(StubRoutines::x86::counter_shuffle_mask_addr()), pos); // pos as scratch 3858 __ pshufb(xmm_curr_counter, xmm_counter_shuf_mask); //counter is shuffled 3859 __ movptr(pos, 0); 3860 3861 // Use the partially used encrpyted counter from last invocation 3862 __ BIND(L_preLoop_start); 3863 __ cmpptr(used, 16); 3864 __ jcc(Assembler::aboveEqual, L_exit_preLoop); 3865 __ cmpptr(len_reg, 0); 3866 __ jcc(Assembler::lessEqual, L_exit_preLoop); 3867 __ movb(rbx, Address(saved_encCounter_start, used)); 3868 __ xorb(rbx, Address(from, pos)); 3869 __ movb(Address(to, pos), rbx); 3870 __ addptr(pos, 1); 3871 __ addptr(used, 1); 3872 __ subptr(len_reg, 1); 3873 3874 __ jmp(L_preLoop_start); 3875 3876 __ BIND(L_exit_preLoop); 3877 __ movl(Address(used_addr, 0), used); 3878 3879 // key length could be only {11, 13, 15} * 4 = {44, 52, 60} 3880 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()), rbx); // rbx as scratch 3881 __ movl(rbx, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3882 __ cmpl(rbx, 52); 3883 __ jcc(Assembler::equal, L_multiBlock_loopTop[1]); 3884 __ cmpl(rbx, 60); 3885 __ jcc(Assembler::equal, L_multiBlock_loopTop[2]); 3886 3887 #define CTR_DoSix(opc, src_reg) \ 3888 __ opc(xmm_result0, src_reg); \ 3889 __ opc(xmm_result1, src_reg); \ 3890 __ opc(xmm_result2, src_reg); \ 3891 __ opc(xmm_result3, src_reg); \ 3892 __ opc(xmm_result4, src_reg); \ 3893 __ opc(xmm_result5, src_reg); 3894 3895 // k == 0 : generate code for key_128 3896 // k == 1 : generate code for key_192 3897 // k == 2 : generate code for key_256 3898 for (int k = 0; k < 3; ++k) { 3899 //multi blocks starts here 3900 __ align(OptoLoopAlignment); 3901 __ BIND(L_multiBlock_loopTop[k]); 3902 __ cmpptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // see if at least PARALLEL_FACTOR blocks left 3903 __ jcc(Assembler::less, L_singleBlockLoopTop[k]); 3904 load_key(xmm_key_tmp0, key, 0x00, xmm_key_shuf_mask); 3905 3906 //load, then increase counters 3907 CTR_DoSix(movdqa, xmm_curr_counter); 3908 inc_counter(rbx, xmm_result1, 0x01, L__incCounter[k][0]); 3909 inc_counter(rbx, xmm_result2, 0x02, L__incCounter[k][1]); 3910 inc_counter(rbx, xmm_result3, 0x03, L__incCounter[k][2]); 3911 inc_counter(rbx, xmm_result4, 0x04, L__incCounter[k][3]); 3912 inc_counter(rbx, xmm_result5, 0x05, L__incCounter[k][4]); 3913 inc_counter(rbx, xmm_curr_counter, 0x06, L__incCounter[k][5]); 3914 CTR_DoSix(pshufb, xmm_counter_shuf_mask); // after increased, shuffled counters back for PXOR 3915 CTR_DoSix(pxor, xmm_key_tmp0); //PXOR with Round 0 key 3916 3917 //load two ROUND_KEYs at a time 3918 for (int i = 1; i < rounds[k]; ) { 3919 load_key(xmm_key_tmp1, key, (0x10 * i), xmm_key_shuf_mask); 3920 load_key(xmm_key_tmp0, key, (0x10 * (i+1)), xmm_key_shuf_mask); 3921 CTR_DoSix(aesenc, xmm_key_tmp1); 3922 i++; 3923 if (i != rounds[k]) { 3924 CTR_DoSix(aesenc, xmm_key_tmp0); 3925 } else { 3926 CTR_DoSix(aesenclast, xmm_key_tmp0); 3927 } 3928 i++; 3929 } 3930 3931 // get next PARALLEL_FACTOR blocks into xmm_result registers 3932 __ movdqu(xmm_from0, Address(from, pos, Address::times_1, 0 * AESBlockSize)); 3933 __ movdqu(xmm_from1, Address(from, pos, Address::times_1, 1 * AESBlockSize)); 3934 __ movdqu(xmm_from2, Address(from, pos, Address::times_1, 2 * AESBlockSize)); 3935 __ movdqu(xmm_from3, Address(from, pos, Address::times_1, 3 * AESBlockSize)); 3936 __ movdqu(xmm_from4, Address(from, pos, Address::times_1, 4 * AESBlockSize)); 3937 __ movdqu(xmm_from5, Address(from, pos, Address::times_1, 5 * AESBlockSize)); 3938 3939 __ pxor(xmm_result0, xmm_from0); 3940 __ pxor(xmm_result1, xmm_from1); 3941 __ pxor(xmm_result2, xmm_from2); 3942 __ pxor(xmm_result3, xmm_from3); 3943 __ pxor(xmm_result4, xmm_from4); 3944 __ pxor(xmm_result5, xmm_from5); 3945 3946 // store 6 results into the next 64 bytes of output 3947 __ movdqu(Address(to, pos, Address::times_1, 0 * AESBlockSize), xmm_result0); 3948 __ movdqu(Address(to, pos, Address::times_1, 1 * AESBlockSize), xmm_result1); 3949 __ movdqu(Address(to, pos, Address::times_1, 2 * AESBlockSize), xmm_result2); 3950 __ movdqu(Address(to, pos, Address::times_1, 3 * AESBlockSize), xmm_result3); 3951 __ movdqu(Address(to, pos, Address::times_1, 4 * AESBlockSize), xmm_result4); 3952 __ movdqu(Address(to, pos, Address::times_1, 5 * AESBlockSize), xmm_result5); 3953 3954 __ addptr(pos, PARALLEL_FACTOR * AESBlockSize); // increase the length of crypt text 3955 __ subptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // decrease the remaining length 3956 __ jmp(L_multiBlock_loopTop[k]); 3957 3958 // singleBlock starts here 3959 __ align(OptoLoopAlignment); 3960 __ BIND(L_singleBlockLoopTop[k]); 3961 __ cmpptr(len_reg, 0); 3962 __ jcc(Assembler::lessEqual, L_exit); 3963 load_key(xmm_key_tmp0, key, 0x00, xmm_key_shuf_mask); 3964 __ movdqa(xmm_result0, xmm_curr_counter); 3965 inc_counter(rbx, xmm_curr_counter, 0x01, L__incCounter_single[k]); 3966 __ pshufb(xmm_result0, xmm_counter_shuf_mask); 3967 __ pxor(xmm_result0, xmm_key_tmp0); 3968 for (int i = 1; i < rounds[k]; i++) { 3969 load_key(xmm_key_tmp0, key, (0x10 * i), xmm_key_shuf_mask); 3970 __ aesenc(xmm_result0, xmm_key_tmp0); 3971 } 3972 load_key(xmm_key_tmp0, key, (rounds[k] * 0x10), xmm_key_shuf_mask); 3973 __ aesenclast(xmm_result0, xmm_key_tmp0); 3974 __ cmpptr(len_reg, AESBlockSize); 3975 __ jcc(Assembler::less, L_processTail_insr[k]); 3976 __ movdqu(xmm_from0, Address(from, pos, Address::times_1, 0 * AESBlockSize)); 3977 __ pxor(xmm_result0, xmm_from0); 3978 __ movdqu(Address(to, pos, Address::times_1, 0 * AESBlockSize), xmm_result0); 3979 __ addptr(pos, AESBlockSize); 3980 __ subptr(len_reg, AESBlockSize); 3981 __ jmp(L_singleBlockLoopTop[k]); 3982 __ BIND(L_processTail_insr[k]); // Process the tail part of the input array 3983 __ addptr(pos, len_reg); // 1. Insert bytes from src array into xmm_from0 register 3984 __ testptr(len_reg, 8); 3985 __ jcc(Assembler::zero, L_processTail_4_insr[k]); 3986 __ subptr(pos,8); 3987 __ pinsrq(xmm_from0, Address(from, pos), 0); 3988 __ BIND(L_processTail_4_insr[k]); 3989 __ testptr(len_reg, 4); 3990 __ jcc(Assembler::zero, L_processTail_2_insr[k]); 3991 __ subptr(pos,4); 3992 __ pslldq(xmm_from0, 4); 3993 __ pinsrd(xmm_from0, Address(from, pos), 0); 3994 __ BIND(L_processTail_2_insr[k]); 3995 __ testptr(len_reg, 2); 3996 __ jcc(Assembler::zero, L_processTail_1_insr[k]); 3997 __ subptr(pos, 2); 3998 __ pslldq(xmm_from0, 2); 3999 __ pinsrw(xmm_from0, Address(from, pos), 0); 4000 __ BIND(L_processTail_1_insr[k]); 4001 __ testptr(len_reg, 1); 4002 __ jcc(Assembler::zero, L_processTail_exit_insr[k]); 4003 __ subptr(pos, 1); 4004 __ pslldq(xmm_from0, 1); 4005 __ pinsrb(xmm_from0, Address(from, pos), 0); 4006 __ BIND(L_processTail_exit_insr[k]); 4007 4008 __ movdqu(Address(saved_encCounter_start, 0), xmm_result0); // 2. Perform pxor of the encrypted counter and plaintext Bytes. 4009 __ pxor(xmm_result0, xmm_from0); // Also the encrypted counter is saved for next invocation. 4010 4011 __ testptr(len_reg, 8); 4012 __ jcc(Assembler::zero, L_processTail_4_extr[k]); // 3. Extract bytes from xmm_result0 into the dest. array 4013 __ pextrq(Address(to, pos), xmm_result0, 0); 4014 __ psrldq(xmm_result0, 8); 4015 __ addptr(pos, 8); 4016 __ BIND(L_processTail_4_extr[k]); 4017 __ testptr(len_reg, 4); 4018 __ jcc(Assembler::zero, L_processTail_2_extr[k]); 4019 __ pextrd(Address(to, pos), xmm_result0, 0); 4020 __ psrldq(xmm_result0, 4); 4021 __ addptr(pos, 4); 4022 __ BIND(L_processTail_2_extr[k]); 4023 __ testptr(len_reg, 2); 4024 __ jcc(Assembler::zero, L_processTail_1_extr[k]); 4025 __ pextrw(Address(to, pos), xmm_result0, 0); 4026 __ psrldq(xmm_result0, 2); 4027 __ addptr(pos, 2); 4028 __ BIND(L_processTail_1_extr[k]); 4029 __ testptr(len_reg, 1); 4030 __ jcc(Assembler::zero, L_processTail_exit_extr[k]); 4031 __ pextrb(Address(to, pos), xmm_result0, 0); 4032 4033 __ BIND(L_processTail_exit_extr[k]); 4034 __ movl(Address(used_addr, 0), len_reg); 4035 __ jmp(L_exit); 4036 4037 } 4038 4039 __ BIND(L_exit); 4040 __ pshufb(xmm_curr_counter, xmm_counter_shuf_mask); //counter is shuffled back. 4041 __ movdqu(Address(counter, 0), xmm_curr_counter); //save counter back 4042 __ pop(rbx); // pop the saved RBX. 4043 #ifdef _WIN64 4044 __ movl(rax, len_mem); 4045 __ movptr(r13, Address(rsp, saved_r13_offset * wordSize)); 4046 __ movptr(r14, Address(rsp, saved_r14_offset * wordSize)); 4047 __ addptr(rsp, 2 * wordSize); 4048 #else 4049 __ pop(rax); // return 'len' 4050 #endif 4051 __ leave(); // required for proper stackwalking of RuntimeStub frame 4052 __ ret(0); 4053 return start; 4054 } 4055 4056 // byte swap x86 long 4057 address generate_ghash_long_swap_mask() { 4058 __ align(CodeEntryAlignment); 4059 StubCodeMark mark(this, "StubRoutines", "ghash_long_swap_mask"); 4060 address start = __ pc(); 4061 __ emit_data64(0x0f0e0d0c0b0a0908, relocInfo::none ); 4062 __ emit_data64(0x0706050403020100, relocInfo::none ); 4063 return start; 4064 } 4065 4066 // byte swap x86 byte array 4067 address generate_ghash_byte_swap_mask() { 4068 __ align(CodeEntryAlignment); 4069 StubCodeMark mark(this, "StubRoutines", "ghash_byte_swap_mask"); 4070 address start = __ pc(); 4071 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none ); 4072 __ emit_data64(0x0001020304050607, relocInfo::none ); 4073 return start; 4074 } 4075 4076 /* Single and multi-block ghash operations */ 4077 address generate_ghash_processBlocks() { 4078 __ align(CodeEntryAlignment); 4079 Label L_ghash_loop, L_exit; 4080 StubCodeMark mark(this, "StubRoutines", "ghash_processBlocks"); 4081 address start = __ pc(); 4082 4083 const Register state = c_rarg0; 4084 const Register subkeyH = c_rarg1; 4085 const Register data = c_rarg2; 4086 const Register blocks = c_rarg3; 4087 4088 const XMMRegister xmm_temp0 = xmm0; 4089 const XMMRegister xmm_temp1 = xmm1; 4090 const XMMRegister xmm_temp2 = xmm2; 4091 const XMMRegister xmm_temp3 = xmm3; 4092 const XMMRegister xmm_temp4 = xmm4; 4093 const XMMRegister xmm_temp5 = xmm5; 4094 const XMMRegister xmm_temp6 = xmm6; 4095 const XMMRegister xmm_temp7 = xmm7; 4096 const XMMRegister xmm_temp8 = xmm8; 4097 const XMMRegister xmm_temp9 = xmm9; 4098 const XMMRegister xmm_temp10 = xmm10; 4099 4100 __ enter(); 4101 4102 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 4103 // context for the registers used, where all instructions below are using 128-bit mode 4104 // On EVEX without VL and BW, these instructions will all be AVX. 4105 if (VM_Version::supports_avx512vlbw()) { 4106 __ movl(rax, 0xffff); 4107 __ kmovql(k1, rax); 4108 } 4109 4110 __ movdqu(xmm_temp10, ExternalAddress(StubRoutines::x86::ghash_long_swap_mask_addr())); 4111 4112 __ movdqu(xmm_temp0, Address(state, 0)); 4113 __ pshufb(xmm_temp0, xmm_temp10); 4114 4115 4116 __ BIND(L_ghash_loop); 4117 __ movdqu(xmm_temp2, Address(data, 0)); 4118 __ pshufb(xmm_temp2, ExternalAddress(StubRoutines::x86::ghash_byte_swap_mask_addr())); 4119 4120 __ movdqu(xmm_temp1, Address(subkeyH, 0)); 4121 __ pshufb(xmm_temp1, xmm_temp10); 4122 4123 __ pxor(xmm_temp0, xmm_temp2); 4124 4125 // 4126 // Multiply with the hash key 4127 // 4128 __ movdqu(xmm_temp3, xmm_temp0); 4129 __ pclmulqdq(xmm_temp3, xmm_temp1, 0); // xmm3 holds a0*b0 4130 __ movdqu(xmm_temp4, xmm_temp0); 4131 __ pclmulqdq(xmm_temp4, xmm_temp1, 16); // xmm4 holds a0*b1 4132 4133 __ movdqu(xmm_temp5, xmm_temp0); 4134 __ pclmulqdq(xmm_temp5, xmm_temp1, 1); // xmm5 holds a1*b0 4135 __ movdqu(xmm_temp6, xmm_temp0); 4136 __ pclmulqdq(xmm_temp6, xmm_temp1, 17); // xmm6 holds a1*b1 4137 4138 __ pxor(xmm_temp4, xmm_temp5); // xmm4 holds a0*b1 + a1*b0 4139 4140 __ movdqu(xmm_temp5, xmm_temp4); // move the contents of xmm4 to xmm5 4141 __ psrldq(xmm_temp4, 8); // shift by xmm4 64 bits to the right 4142 __ pslldq(xmm_temp5, 8); // shift by xmm5 64 bits to the left 4143 __ pxor(xmm_temp3, xmm_temp5); 4144 __ pxor(xmm_temp6, xmm_temp4); // Register pair <xmm6:xmm3> holds the result 4145 // of the carry-less multiplication of 4146 // xmm0 by xmm1. 4147 4148 // We shift the result of the multiplication by one bit position 4149 // to the left to cope for the fact that the bits are reversed. 4150 __ movdqu(xmm_temp7, xmm_temp3); 4151 __ movdqu(xmm_temp8, xmm_temp6); 4152 __ pslld(xmm_temp3, 1); 4153 __ pslld(xmm_temp6, 1); 4154 __ psrld(xmm_temp7, 31); 4155 __ psrld(xmm_temp8, 31); 4156 __ movdqu(xmm_temp9, xmm_temp7); 4157 __ pslldq(xmm_temp8, 4); 4158 __ pslldq(xmm_temp7, 4); 4159 __ psrldq(xmm_temp9, 12); 4160 __ por(xmm_temp3, xmm_temp7); 4161 __ por(xmm_temp6, xmm_temp8); 4162 __ por(xmm_temp6, xmm_temp9); 4163 4164 // 4165 // First phase of the reduction 4166 // 4167 // Move xmm3 into xmm7, xmm8, xmm9 in order to perform the shifts 4168 // independently. 4169 __ movdqu(xmm_temp7, xmm_temp3); 4170 __ movdqu(xmm_temp8, xmm_temp3); 4171 __ movdqu(xmm_temp9, xmm_temp3); 4172 __ pslld(xmm_temp7, 31); // packed right shift shifting << 31 4173 __ pslld(xmm_temp8, 30); // packed right shift shifting << 30 4174 __ pslld(xmm_temp9, 25); // packed right shift shifting << 25 4175 __ pxor(xmm_temp7, xmm_temp8); // xor the shifted versions 4176 __ pxor(xmm_temp7, xmm_temp9); 4177 __ movdqu(xmm_temp8, xmm_temp7); 4178 __ pslldq(xmm_temp7, 12); 4179 __ psrldq(xmm_temp8, 4); 4180 __ pxor(xmm_temp3, xmm_temp7); // first phase of the reduction complete 4181 4182 // 4183 // Second phase of the reduction 4184 // 4185 // Make 3 copies of xmm3 in xmm2, xmm4, xmm5 for doing these 4186 // shift operations. 4187 __ movdqu(xmm_temp2, xmm_temp3); 4188 __ movdqu(xmm_temp4, xmm_temp3); 4189 __ movdqu(xmm_temp5, xmm_temp3); 4190 __ psrld(xmm_temp2, 1); // packed left shifting >> 1 4191 __ psrld(xmm_temp4, 2); // packed left shifting >> 2 4192 __ psrld(xmm_temp5, 7); // packed left shifting >> 7 4193 __ pxor(xmm_temp2, xmm_temp4); // xor the shifted versions 4194 __ pxor(xmm_temp2, xmm_temp5); 4195 __ pxor(xmm_temp2, xmm_temp8); 4196 __ pxor(xmm_temp3, xmm_temp2); 4197 __ pxor(xmm_temp6, xmm_temp3); // the result is in xmm6 4198 4199 __ decrement(blocks); 4200 __ jcc(Assembler::zero, L_exit); 4201 __ movdqu(xmm_temp0, xmm_temp6); 4202 __ addptr(data, 16); 4203 __ jmp(L_ghash_loop); 4204 4205 __ BIND(L_exit); 4206 __ pshufb(xmm_temp6, xmm_temp10); // Byte swap 16-byte result 4207 __ movdqu(Address(state, 0), xmm_temp6); // store the result 4208 4209 __ leave(); 4210 __ ret(0); 4211 return start; 4212 } 4213 4214 /** 4215 * Arguments: 4216 * 4217 * Inputs: 4218 * c_rarg0 - int crc 4219 * c_rarg1 - byte* buf 4220 * c_rarg2 - int length 4221 * 4222 * Ouput: 4223 * rax - int crc result 4224 */ 4225 address generate_updateBytesCRC32() { 4226 assert(UseCRC32Intrinsics, "need AVX and CLMUL instructions"); 4227 4228 __ align(CodeEntryAlignment); 4229 StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32"); 4230 4231 address start = __ pc(); 4232 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 4233 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 4234 // rscratch1: r10 4235 const Register crc = c_rarg0; // crc 4236 const Register buf = c_rarg1; // source java byte array address 4237 const Register len = c_rarg2; // length 4238 const Register table = c_rarg3; // crc_table address (reuse register) 4239 const Register tmp = r11; 4240 assert_different_registers(crc, buf, len, table, tmp, rax); 4241 4242 BLOCK_COMMENT("Entry:"); 4243 __ enter(); // required for proper stackwalking of RuntimeStub frame 4244 4245 __ kernel_crc32(crc, buf, len, table, tmp); 4246 4247 __ movl(rax, crc); 4248 __ leave(); // required for proper stackwalking of RuntimeStub frame 4249 __ ret(0); 4250 4251 return start; 4252 } 4253 4254 /** 4255 * Arguments: 4256 * 4257 * Inputs: 4258 * c_rarg0 - int crc 4259 * c_rarg1 - byte* buf 4260 * c_rarg2 - long length 4261 * c_rarg3 - table_start - optional (present only when doing a library_call, 4262 * not used by x86 algorithm) 4263 * 4264 * Ouput: 4265 * rax - int crc result 4266 */ 4267 address generate_updateBytesCRC32C(bool is_pclmulqdq_supported) { 4268 assert(UseCRC32CIntrinsics, "need SSE4_2"); 4269 __ align(CodeEntryAlignment); 4270 StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32C"); 4271 address start = __ pc(); 4272 //reg.arg int#0 int#1 int#2 int#3 int#4 int#5 float regs 4273 //Windows RCX RDX R8 R9 none none XMM0..XMM3 4274 //Lin / Sol RDI RSI RDX RCX R8 R9 XMM0..XMM7 4275 const Register crc = c_rarg0; // crc 4276 const Register buf = c_rarg1; // source java byte array address 4277 const Register len = c_rarg2; // length 4278 const Register a = rax; 4279 const Register j = r9; 4280 const Register k = r10; 4281 const Register l = r11; 4282 #ifdef _WIN64 4283 const Register y = rdi; 4284 const Register z = rsi; 4285 #else 4286 const Register y = rcx; 4287 const Register z = r8; 4288 #endif 4289 assert_different_registers(crc, buf, len, a, j, k, l, y, z); 4290 4291 BLOCK_COMMENT("Entry:"); 4292 __ enter(); // required for proper stackwalking of RuntimeStub frame 4293 #ifdef _WIN64 4294 __ push(y); 4295 __ push(z); 4296 #endif 4297 __ crc32c_ipl_alg2_alt2(crc, buf, len, 4298 a, j, k, 4299 l, y, z, 4300 c_farg0, c_farg1, c_farg2, 4301 is_pclmulqdq_supported); 4302 __ movl(rax, crc); 4303 #ifdef _WIN64 4304 __ pop(z); 4305 __ pop(y); 4306 #endif 4307 __ leave(); // required for proper stackwalking of RuntimeStub frame 4308 __ ret(0); 4309 4310 return start; 4311 } 4312 4313 /** 4314 * Arguments: 4315 * 4316 * Input: 4317 * c_rarg0 - x address 4318 * c_rarg1 - x length 4319 * c_rarg2 - y address 4320 * c_rarg3 - y lenth 4321 * not Win64 4322 * c_rarg4 - z address 4323 * c_rarg5 - z length 4324 * Win64 4325 * rsp+40 - z address 4326 * rsp+48 - z length 4327 */ 4328 address generate_multiplyToLen() { 4329 __ align(CodeEntryAlignment); 4330 StubCodeMark mark(this, "StubRoutines", "multiplyToLen"); 4331 4332 address start = __ pc(); 4333 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 4334 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 4335 const Register x = rdi; 4336 const Register xlen = rax; 4337 const Register y = rsi; 4338 const Register ylen = rcx; 4339 const Register z = r8; 4340 const Register zlen = r11; 4341 4342 // Next registers will be saved on stack in multiply_to_len(). 4343 const Register tmp1 = r12; 4344 const Register tmp2 = r13; 4345 const Register tmp3 = r14; 4346 const Register tmp4 = r15; 4347 const Register tmp5 = rbx; 4348 4349 BLOCK_COMMENT("Entry:"); 4350 __ enter(); // required for proper stackwalking of RuntimeStub frame 4351 4352 #ifndef _WIN64 4353 __ movptr(zlen, r9); // Save r9 in r11 - zlen 4354 #endif 4355 setup_arg_regs(4); // x => rdi, xlen => rsi, y => rdx 4356 // ylen => rcx, z => r8, zlen => r11 4357 // r9 and r10 may be used to save non-volatile registers 4358 #ifdef _WIN64 4359 // last 2 arguments (#4, #5) are on stack on Win64 4360 __ movptr(z, Address(rsp, 6 * wordSize)); 4361 __ movptr(zlen, Address(rsp, 7 * wordSize)); 4362 #endif 4363 4364 __ movptr(xlen, rsi); 4365 __ movptr(y, rdx); 4366 __ multiply_to_len(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5); 4367 4368 restore_arg_regs(); 4369 4370 __ leave(); // required for proper stackwalking of RuntimeStub frame 4371 __ ret(0); 4372 4373 return start; 4374 } 4375 4376 /** 4377 * Arguments: 4378 * 4379 * Input: 4380 * c_rarg0 - obja address 4381 * c_rarg1 - objb address 4382 * c_rarg3 - length length 4383 * c_rarg4 - scale log2_array_indxscale 4384 * 4385 * Output: 4386 * rax - int >= mismatched index, < 0 bitwise complement of tail 4387 */ 4388 address generate_vectorizedMismatch() { 4389 __ align(CodeEntryAlignment); 4390 StubCodeMark mark(this, "StubRoutines", "vectorizedMismatch"); 4391 address start = __ pc(); 4392 4393 BLOCK_COMMENT("Entry:"); 4394 __ enter(); 4395 4396 #ifdef _WIN64 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 4397 const Register scale = c_rarg0; //rcx, will exchange with r9 4398 const Register objb = c_rarg1; //rdx 4399 const Register length = c_rarg2; //r8 4400 const Register obja = c_rarg3; //r9 4401 __ xchgq(obja, scale); //now obja and scale contains the correct contents 4402 4403 const Register tmp1 = r10; 4404 const Register tmp2 = r11; 4405 #endif 4406 #ifndef _WIN64 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 4407 const Register obja = c_rarg0; //U:rdi 4408 const Register objb = c_rarg1; //U:rsi 4409 const Register length = c_rarg2; //U:rdx 4410 const Register scale = c_rarg3; //U:rcx 4411 const Register tmp1 = r8; 4412 const Register tmp2 = r9; 4413 #endif 4414 const Register result = rax; //return value 4415 const XMMRegister vec0 = xmm0; 4416 const XMMRegister vec1 = xmm1; 4417 const XMMRegister vec2 = xmm2; 4418 4419 __ vectorized_mismatch(obja, objb, length, scale, result, tmp1, tmp2, vec0, vec1, vec2); 4420 4421 __ leave(); 4422 __ ret(0); 4423 4424 return start; 4425 } 4426 4427 /** 4428 * Arguments: 4429 * 4430 // Input: 4431 // c_rarg0 - x address 4432 // c_rarg1 - x length 4433 // c_rarg2 - z address 4434 // c_rarg3 - z lenth 4435 * 4436 */ 4437 address generate_squareToLen() { 4438 4439 __ align(CodeEntryAlignment); 4440 StubCodeMark mark(this, "StubRoutines", "squareToLen"); 4441 4442 address start = __ pc(); 4443 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 4444 // Unix: rdi, rsi, rdx, rcx (c_rarg0, c_rarg1, ...) 4445 const Register x = rdi; 4446 const Register len = rsi; 4447 const Register z = r8; 4448 const Register zlen = rcx; 4449 4450 const Register tmp1 = r12; 4451 const Register tmp2 = r13; 4452 const Register tmp3 = r14; 4453 const Register tmp4 = r15; 4454 const Register tmp5 = rbx; 4455 4456 BLOCK_COMMENT("Entry:"); 4457 __ enter(); // required for proper stackwalking of RuntimeStub frame 4458 4459 setup_arg_regs(4); // x => rdi, len => rsi, z => rdx 4460 // zlen => rcx 4461 // r9 and r10 may be used to save non-volatile registers 4462 __ movptr(r8, rdx); 4463 __ square_to_len(x, len, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5, rdx, rax); 4464 4465 restore_arg_regs(); 4466 4467 __ leave(); // required for proper stackwalking of RuntimeStub frame 4468 __ ret(0); 4469 4470 return start; 4471 } 4472 4473 /** 4474 * Arguments: 4475 * 4476 * Input: 4477 * c_rarg0 - out address 4478 * c_rarg1 - in address 4479 * c_rarg2 - offset 4480 * c_rarg3 - len 4481 * not Win64 4482 * c_rarg4 - k 4483 * Win64 4484 * rsp+40 - k 4485 */ 4486 address generate_mulAdd() { 4487 __ align(CodeEntryAlignment); 4488 StubCodeMark mark(this, "StubRoutines", "mulAdd"); 4489 4490 address start = __ pc(); 4491 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 4492 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 4493 const Register out = rdi; 4494 const Register in = rsi; 4495 const Register offset = r11; 4496 const Register len = rcx; 4497 const Register k = r8; 4498 4499 // Next registers will be saved on stack in mul_add(). 4500 const Register tmp1 = r12; 4501 const Register tmp2 = r13; 4502 const Register tmp3 = r14; 4503 const Register tmp4 = r15; 4504 const Register tmp5 = rbx; 4505 4506 BLOCK_COMMENT("Entry:"); 4507 __ enter(); // required for proper stackwalking of RuntimeStub frame 4508 4509 setup_arg_regs(4); // out => rdi, in => rsi, offset => rdx 4510 // len => rcx, k => r8 4511 // r9 and r10 may be used to save non-volatile registers 4512 #ifdef _WIN64 4513 // last argument is on stack on Win64 4514 __ movl(k, Address(rsp, 6 * wordSize)); 4515 #endif 4516 __ movptr(r11, rdx); // move offset in rdx to offset(r11) 4517 __ mul_add(out, in, offset, len, k, tmp1, tmp2, tmp3, tmp4, tmp5, rdx, rax); 4518 4519 restore_arg_regs(); 4520 4521 __ leave(); // required for proper stackwalking of RuntimeStub frame 4522 __ ret(0); 4523 4524 return start; 4525 } 4526 4527 address generate_libmExp() { 4528 address start = __ pc(); 4529 4530 const XMMRegister x0 = xmm0; 4531 const XMMRegister x1 = xmm1; 4532 const XMMRegister x2 = xmm2; 4533 const XMMRegister x3 = xmm3; 4534 4535 const XMMRegister x4 = xmm4; 4536 const XMMRegister x5 = xmm5; 4537 const XMMRegister x6 = xmm6; 4538 const XMMRegister x7 = xmm7; 4539 4540 const Register tmp = r11; 4541 4542 BLOCK_COMMENT("Entry:"); 4543 __ enter(); // required for proper stackwalking of RuntimeStub frame 4544 4545 __ fast_exp(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp); 4546 4547 __ leave(); // required for proper stackwalking of RuntimeStub frame 4548 __ ret(0); 4549 4550 return start; 4551 4552 } 4553 4554 address generate_libmLog() { 4555 address start = __ pc(); 4556 4557 const XMMRegister x0 = xmm0; 4558 const XMMRegister x1 = xmm1; 4559 const XMMRegister x2 = xmm2; 4560 const XMMRegister x3 = xmm3; 4561 4562 const XMMRegister x4 = xmm4; 4563 const XMMRegister x5 = xmm5; 4564 const XMMRegister x6 = xmm6; 4565 const XMMRegister x7 = xmm7; 4566 4567 const Register tmp1 = r11; 4568 const Register tmp2 = r8; 4569 4570 BLOCK_COMMENT("Entry:"); 4571 __ enter(); // required for proper stackwalking of RuntimeStub frame 4572 4573 __ fast_log(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2); 4574 4575 __ leave(); // required for proper stackwalking of RuntimeStub frame 4576 __ ret(0); 4577 4578 return start; 4579 4580 } 4581 4582 address generate_libmLog10() { 4583 address start = __ pc(); 4584 4585 const XMMRegister x0 = xmm0; 4586 const XMMRegister x1 = xmm1; 4587 const XMMRegister x2 = xmm2; 4588 const XMMRegister x3 = xmm3; 4589 4590 const XMMRegister x4 = xmm4; 4591 const XMMRegister x5 = xmm5; 4592 const XMMRegister x6 = xmm6; 4593 const XMMRegister x7 = xmm7; 4594 4595 const Register tmp = r11; 4596 4597 BLOCK_COMMENT("Entry:"); 4598 __ enter(); // required for proper stackwalking of RuntimeStub frame 4599 4600 __ fast_log10(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp); 4601 4602 __ leave(); // required for proper stackwalking of RuntimeStub frame 4603 __ ret(0); 4604 4605 return start; 4606 4607 } 4608 4609 address generate_libmPow() { 4610 address start = __ pc(); 4611 4612 const XMMRegister x0 = xmm0; 4613 const XMMRegister x1 = xmm1; 4614 const XMMRegister x2 = xmm2; 4615 const XMMRegister x3 = xmm3; 4616 4617 const XMMRegister x4 = xmm4; 4618 const XMMRegister x5 = xmm5; 4619 const XMMRegister x6 = xmm6; 4620 const XMMRegister x7 = xmm7; 4621 4622 const Register tmp1 = r8; 4623 const Register tmp2 = r9; 4624 const Register tmp3 = r10; 4625 const Register tmp4 = r11; 4626 4627 BLOCK_COMMENT("Entry:"); 4628 __ enter(); // required for proper stackwalking of RuntimeStub frame 4629 4630 __ fast_pow(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 4631 4632 __ leave(); // required for proper stackwalking of RuntimeStub frame 4633 __ ret(0); 4634 4635 return start; 4636 4637 } 4638 4639 address generate_libmSin() { 4640 address start = __ pc(); 4641 4642 const XMMRegister x0 = xmm0; 4643 const XMMRegister x1 = xmm1; 4644 const XMMRegister x2 = xmm2; 4645 const XMMRegister x3 = xmm3; 4646 4647 const XMMRegister x4 = xmm4; 4648 const XMMRegister x5 = xmm5; 4649 const XMMRegister x6 = xmm6; 4650 const XMMRegister x7 = xmm7; 4651 4652 const Register tmp1 = r8; 4653 const Register tmp2 = r9; 4654 const Register tmp3 = r10; 4655 const Register tmp4 = r11; 4656 4657 BLOCK_COMMENT("Entry:"); 4658 __ enter(); // required for proper stackwalking of RuntimeStub frame 4659 4660 #ifdef _WIN64 4661 __ push(rsi); 4662 __ push(rdi); 4663 #endif 4664 __ fast_sin(x0, x1, x2, x3, x4, x5, x6, x7, rax, rbx, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 4665 4666 #ifdef _WIN64 4667 __ pop(rdi); 4668 __ pop(rsi); 4669 #endif 4670 4671 __ leave(); // required for proper stackwalking of RuntimeStub frame 4672 __ ret(0); 4673 4674 return start; 4675 4676 } 4677 4678 address generate_libmCos() { 4679 address start = __ pc(); 4680 4681 const XMMRegister x0 = xmm0; 4682 const XMMRegister x1 = xmm1; 4683 const XMMRegister x2 = xmm2; 4684 const XMMRegister x3 = xmm3; 4685 4686 const XMMRegister x4 = xmm4; 4687 const XMMRegister x5 = xmm5; 4688 const XMMRegister x6 = xmm6; 4689 const XMMRegister x7 = xmm7; 4690 4691 const Register tmp1 = r8; 4692 const Register tmp2 = r9; 4693 const Register tmp3 = r10; 4694 const Register tmp4 = r11; 4695 4696 BLOCK_COMMENT("Entry:"); 4697 __ enter(); // required for proper stackwalking of RuntimeStub frame 4698 4699 #ifdef _WIN64 4700 __ push(rsi); 4701 __ push(rdi); 4702 #endif 4703 __ fast_cos(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 4704 4705 #ifdef _WIN64 4706 __ pop(rdi); 4707 __ pop(rsi); 4708 #endif 4709 4710 __ leave(); // required for proper stackwalking of RuntimeStub frame 4711 __ ret(0); 4712 4713 return start; 4714 4715 } 4716 4717 address generate_libmTan() { 4718 address start = __ pc(); 4719 4720 const XMMRegister x0 = xmm0; 4721 const XMMRegister x1 = xmm1; 4722 const XMMRegister x2 = xmm2; 4723 const XMMRegister x3 = xmm3; 4724 4725 const XMMRegister x4 = xmm4; 4726 const XMMRegister x5 = xmm5; 4727 const XMMRegister x6 = xmm6; 4728 const XMMRegister x7 = xmm7; 4729 4730 const Register tmp1 = r8; 4731 const Register tmp2 = r9; 4732 const Register tmp3 = r10; 4733 const Register tmp4 = r11; 4734 4735 BLOCK_COMMENT("Entry:"); 4736 __ enter(); // required for proper stackwalking of RuntimeStub frame 4737 4738 #ifdef _WIN64 4739 __ push(rsi); 4740 __ push(rdi); 4741 #endif 4742 __ fast_tan(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 4743 4744 #ifdef _WIN64 4745 __ pop(rdi); 4746 __ pop(rsi); 4747 #endif 4748 4749 __ leave(); // required for proper stackwalking of RuntimeStub frame 4750 __ ret(0); 4751 4752 return start; 4753 4754 } 4755 4756 #undef __ 4757 #define __ masm-> 4758 4759 // Continuation point for throwing of implicit exceptions that are 4760 // not handled in the current activation. Fabricates an exception 4761 // oop and initiates normal exception dispatching in this 4762 // frame. Since we need to preserve callee-saved values (currently 4763 // only for C2, but done for C1 as well) we need a callee-saved oop 4764 // map and therefore have to make these stubs into RuntimeStubs 4765 // rather than BufferBlobs. If the compiler needs all registers to 4766 // be preserved between the fault point and the exception handler 4767 // then it must assume responsibility for that in 4768 // AbstractCompiler::continuation_for_implicit_null_exception or 4769 // continuation_for_implicit_division_by_zero_exception. All other 4770 // implicit exceptions (e.g., NullPointerException or 4771 // AbstractMethodError on entry) are either at call sites or 4772 // otherwise assume that stack unwinding will be initiated, so 4773 // caller saved registers were assumed volatile in the compiler. 4774 address generate_throw_exception(const char* name, 4775 address runtime_entry, 4776 Register arg1 = noreg, 4777 Register arg2 = noreg) { 4778 // Information about frame layout at time of blocking runtime call. 4779 // Note that we only have to preserve callee-saved registers since 4780 // the compilers are responsible for supplying a continuation point 4781 // if they expect all registers to be preserved. 4782 enum layout { 4783 rbp_off = frame::arg_reg_save_area_bytes/BytesPerInt, 4784 rbp_off2, 4785 return_off, 4786 return_off2, 4787 framesize // inclusive of return address 4788 }; 4789 4790 int insts_size = 512; 4791 int locs_size = 64; 4792 4793 CodeBuffer code(name, insts_size, locs_size); 4794 OopMapSet* oop_maps = new OopMapSet(); 4795 MacroAssembler* masm = new MacroAssembler(&code); 4796 4797 address start = __ pc(); 4798 4799 // This is an inlined and slightly modified version of call_VM 4800 // which has the ability to fetch the return PC out of 4801 // thread-local storage and also sets up last_Java_sp slightly 4802 // differently than the real call_VM 4803 4804 __ enter(); // required for proper stackwalking of RuntimeStub frame 4805 4806 assert(is_even(framesize/2), "sp not 16-byte aligned"); 4807 4808 // return address and rbp are already in place 4809 __ subptr(rsp, (framesize-4) << LogBytesPerInt); // prolog 4810 4811 int frame_complete = __ pc() - start; 4812 4813 // Set up last_Java_sp and last_Java_fp 4814 address the_pc = __ pc(); 4815 __ set_last_Java_frame(rsp, rbp, the_pc); 4816 __ andptr(rsp, -(StackAlignmentInBytes)); // Align stack 4817 4818 // Call runtime 4819 if (arg1 != noreg) { 4820 assert(arg2 != c_rarg1, "clobbered"); 4821 __ movptr(c_rarg1, arg1); 4822 } 4823 if (arg2 != noreg) { 4824 __ movptr(c_rarg2, arg2); 4825 } 4826 __ movptr(c_rarg0, r15_thread); 4827 BLOCK_COMMENT("call runtime_entry"); 4828 __ call(RuntimeAddress(runtime_entry)); 4829 4830 // Generate oop map 4831 OopMap* map = new OopMap(framesize, 0); 4832 4833 oop_maps->add_gc_map(the_pc - start, map); 4834 4835 __ reset_last_Java_frame(true); 4836 4837 __ leave(); // required for proper stackwalking of RuntimeStub frame 4838 4839 // check for pending exceptions 4840 #ifdef ASSERT 4841 Label L; 4842 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), 4843 (int32_t) NULL_WORD); 4844 __ jcc(Assembler::notEqual, L); 4845 __ should_not_reach_here(); 4846 __ bind(L); 4847 #endif // ASSERT 4848 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 4849 4850 4851 // codeBlob framesize is in words (not VMRegImpl::slot_size) 4852 RuntimeStub* stub = 4853 RuntimeStub::new_runtime_stub(name, 4854 &code, 4855 frame_complete, 4856 (framesize >> (LogBytesPerWord - LogBytesPerInt)), 4857 oop_maps, false); 4858 return stub->entry_point(); 4859 } 4860 4861 void create_control_words() { 4862 // Round to nearest, 53-bit mode, exceptions masked 4863 StubRoutines::_fpu_cntrl_wrd_std = 0x027F; 4864 // Round to zero, 53-bit mode, exception mased 4865 StubRoutines::_fpu_cntrl_wrd_trunc = 0x0D7F; 4866 // Round to nearest, 24-bit mode, exceptions masked 4867 StubRoutines::_fpu_cntrl_wrd_24 = 0x007F; 4868 // Round to nearest, 64-bit mode, exceptions masked 4869 StubRoutines::_fpu_cntrl_wrd_64 = 0x037F; 4870 // Round to nearest, 64-bit mode, exceptions masked 4871 StubRoutines::_mxcsr_std = 0x1F80; 4872 // Note: the following two constants are 80-bit values 4873 // layout is critical for correct loading by FPU. 4874 // Bias for strict fp multiply/divide 4875 StubRoutines::_fpu_subnormal_bias1[0]= 0x00000000; // 2^(-15360) == 0x03ff 8000 0000 0000 0000 4876 StubRoutines::_fpu_subnormal_bias1[1]= 0x80000000; 4877 StubRoutines::_fpu_subnormal_bias1[2]= 0x03ff; 4878 // Un-Bias for strict fp multiply/divide 4879 StubRoutines::_fpu_subnormal_bias2[0]= 0x00000000; // 2^(+15360) == 0x7bff 8000 0000 0000 0000 4880 StubRoutines::_fpu_subnormal_bias2[1]= 0x80000000; 4881 StubRoutines::_fpu_subnormal_bias2[2]= 0x7bff; 4882 } 4883 4884 // Initialization 4885 void generate_initial() { 4886 // Generates all stubs and initializes the entry points 4887 4888 // This platform-specific settings are needed by generate_call_stub() 4889 create_control_words(); 4890 4891 // entry points that exist in all platforms Note: This is code 4892 // that could be shared among different platforms - however the 4893 // benefit seems to be smaller than the disadvantage of having a 4894 // much more complicated generator structure. See also comment in 4895 // stubRoutines.hpp. 4896 4897 StubRoutines::_forward_exception_entry = generate_forward_exception(); 4898 4899 StubRoutines::_call_stub_entry = 4900 generate_call_stub(StubRoutines::_call_stub_return_address); 4901 4902 // is referenced by megamorphic call 4903 StubRoutines::_catch_exception_entry = generate_catch_exception(); 4904 4905 // atomic calls 4906 StubRoutines::_atomic_xchg_entry = generate_atomic_xchg(); 4907 StubRoutines::_atomic_xchg_ptr_entry = generate_atomic_xchg_ptr(); 4908 StubRoutines::_atomic_cmpxchg_entry = generate_atomic_cmpxchg(); 4909 StubRoutines::_atomic_cmpxchg_byte_entry = generate_atomic_cmpxchg_byte(); 4910 StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long(); 4911 StubRoutines::_atomic_add_entry = generate_atomic_add(); 4912 StubRoutines::_atomic_add_ptr_entry = generate_atomic_add_ptr(); 4913 StubRoutines::_fence_entry = generate_orderaccess_fence(); 4914 4915 // platform dependent 4916 StubRoutines::x86::_get_previous_fp_entry = generate_get_previous_fp(); 4917 StubRoutines::x86::_get_previous_sp_entry = generate_get_previous_sp(); 4918 4919 StubRoutines::x86::_verify_mxcsr_entry = generate_verify_mxcsr(); 4920 4921 // Build this early so it's available for the interpreter. 4922 StubRoutines::_throw_StackOverflowError_entry = 4923 generate_throw_exception("StackOverflowError throw_exception", 4924 CAST_FROM_FN_PTR(address, 4925 SharedRuntime:: 4926 throw_StackOverflowError)); 4927 StubRoutines::_throw_delayed_StackOverflowError_entry = 4928 generate_throw_exception("delayed StackOverflowError throw_exception", 4929 CAST_FROM_FN_PTR(address, 4930 SharedRuntime:: 4931 throw_delayed_StackOverflowError)); 4932 if (UseCRC32Intrinsics) { 4933 // set table address before stub generation which use it 4934 StubRoutines::_crc_table_adr = (address)StubRoutines::x86::_crc_table; 4935 StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32(); 4936 } 4937 4938 if (UseCRC32CIntrinsics) { 4939 bool supports_clmul = VM_Version::supports_clmul(); 4940 StubRoutines::x86::generate_CRC32C_table(supports_clmul); 4941 StubRoutines::_crc32c_table_addr = (address)StubRoutines::x86::_crc32c_table; 4942 StubRoutines::_updateBytesCRC32C = generate_updateBytesCRC32C(supports_clmul); 4943 } 4944 if (VM_Version::supports_sse2() && UseLibmIntrinsic && InlineIntrinsics) { 4945 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin) || 4946 vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos) || 4947 vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dtan)) { 4948 StubRoutines::x86::_ONEHALF_adr = (address)StubRoutines::x86::_ONEHALF; 4949 StubRoutines::x86::_P_2_adr = (address)StubRoutines::x86::_P_2; 4950 StubRoutines::x86::_SC_4_adr = (address)StubRoutines::x86::_SC_4; 4951 StubRoutines::x86::_Ctable_adr = (address)StubRoutines::x86::_Ctable; 4952 StubRoutines::x86::_SC_2_adr = (address)StubRoutines::x86::_SC_2; 4953 StubRoutines::x86::_SC_3_adr = (address)StubRoutines::x86::_SC_3; 4954 StubRoutines::x86::_SC_1_adr = (address)StubRoutines::x86::_SC_1; 4955 StubRoutines::x86::_PI_INV_TABLE_adr = (address)StubRoutines::x86::_PI_INV_TABLE; 4956 StubRoutines::x86::_PI_4_adr = (address)StubRoutines::x86::_PI_4; 4957 StubRoutines::x86::_PI32INV_adr = (address)StubRoutines::x86::_PI32INV; 4958 StubRoutines::x86::_SIGN_MASK_adr = (address)StubRoutines::x86::_SIGN_MASK; 4959 StubRoutines::x86::_P_1_adr = (address)StubRoutines::x86::_P_1; 4960 StubRoutines::x86::_P_3_adr = (address)StubRoutines::x86::_P_3; 4961 StubRoutines::x86::_NEG_ZERO_adr = (address)StubRoutines::x86::_NEG_ZERO; 4962 } 4963 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dexp)) { 4964 StubRoutines::_dexp = generate_libmExp(); 4965 } 4966 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dlog)) { 4967 StubRoutines::_dlog = generate_libmLog(); 4968 } 4969 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dlog10)) { 4970 StubRoutines::_dlog10 = generate_libmLog10(); 4971 } 4972 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dpow)) { 4973 StubRoutines::_dpow = generate_libmPow(); 4974 } 4975 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin)) { 4976 StubRoutines::_dsin = generate_libmSin(); 4977 } 4978 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos)) { 4979 StubRoutines::_dcos = generate_libmCos(); 4980 } 4981 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dtan)) { 4982 StubRoutines::_dtan = generate_libmTan(); 4983 } 4984 } 4985 } 4986 4987 void generate_all() { 4988 // Generates all stubs and initializes the entry points 4989 4990 // These entry points require SharedInfo::stack0 to be set up in 4991 // non-core builds and need to be relocatable, so they each 4992 // fabricate a RuntimeStub internally. 4993 StubRoutines::_throw_AbstractMethodError_entry = 4994 generate_throw_exception("AbstractMethodError throw_exception", 4995 CAST_FROM_FN_PTR(address, 4996 SharedRuntime:: 4997 throw_AbstractMethodError)); 4998 4999 StubRoutines::_throw_IncompatibleClassChangeError_entry = 5000 generate_throw_exception("IncompatibleClassChangeError throw_exception", 5001 CAST_FROM_FN_PTR(address, 5002 SharedRuntime:: 5003 throw_IncompatibleClassChangeError)); 5004 5005 StubRoutines::_throw_NullPointerException_at_call_entry = 5006 generate_throw_exception("NullPointerException at call throw_exception", 5007 CAST_FROM_FN_PTR(address, 5008 SharedRuntime:: 5009 throw_NullPointerException_at_call)); 5010 5011 // entry points that are platform specific 5012 StubRoutines::x86::_f2i_fixup = generate_f2i_fixup(); 5013 StubRoutines::x86::_f2l_fixup = generate_f2l_fixup(); 5014 StubRoutines::x86::_d2i_fixup = generate_d2i_fixup(); 5015 StubRoutines::x86::_d2l_fixup = generate_d2l_fixup(); 5016 5017 StubRoutines::x86::_float_sign_mask = generate_fp_mask("float_sign_mask", 0x7FFFFFFF7FFFFFFF); 5018 StubRoutines::x86::_float_sign_flip = generate_fp_mask("float_sign_flip", 0x8000000080000000); 5019 StubRoutines::x86::_double_sign_mask = generate_fp_mask("double_sign_mask", 0x7FFFFFFFFFFFFFFF); 5020 StubRoutines::x86::_double_sign_flip = generate_fp_mask("double_sign_flip", 0x8000000000000000); 5021 5022 // support for verify_oop (must happen after universe_init) 5023 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop(); 5024 5025 // arraycopy stubs used by compilers 5026 generate_arraycopy_stubs(); 5027 5028 // don't bother generating these AES intrinsic stubs unless global flag is set 5029 if (UseAESIntrinsics) { 5030 StubRoutines::x86::_key_shuffle_mask_addr = generate_key_shuffle_mask(); // needed by the others 5031 StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock(); 5032 StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock(); 5033 StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt(); 5034 StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt_Parallel(); 5035 } 5036 if (UseAESCTRIntrinsics){ 5037 StubRoutines::x86::_counter_shuffle_mask_addr = generate_counter_shuffle_mask(); 5038 StubRoutines::_counterMode_AESCrypt = generate_counterMode_AESCrypt_Parallel(); 5039 } 5040 5041 if (UseSHA1Intrinsics) { 5042 StubRoutines::x86::_upper_word_mask_addr = generate_upper_word_mask(); 5043 StubRoutines::x86::_shuffle_byte_flip_mask_addr = generate_shuffle_byte_flip_mask(); 5044 StubRoutines::_sha1_implCompress = generate_sha1_implCompress(false, "sha1_implCompress"); 5045 StubRoutines::_sha1_implCompressMB = generate_sha1_implCompress(true, "sha1_implCompressMB"); 5046 } 5047 if (UseSHA256Intrinsics) { 5048 StubRoutines::x86::_k256_adr = (address)StubRoutines::x86::_k256; 5049 char* dst = (char*)StubRoutines::x86::_k256_W; 5050 char* src = (char*)StubRoutines::x86::_k256; 5051 for (int ii = 0; ii < 16; ++ii) { 5052 memcpy(dst + 32 * ii, src + 16 * ii, 16); 5053 memcpy(dst + 32 * ii + 16, src + 16 * ii, 16); 5054 } 5055 StubRoutines::x86::_k256_W_adr = (address)StubRoutines::x86::_k256_W; 5056 StubRoutines::x86::_pshuffle_byte_flip_mask_addr = generate_pshuffle_byte_flip_mask(); 5057 StubRoutines::_sha256_implCompress = generate_sha256_implCompress(false, "sha256_implCompress"); 5058 StubRoutines::_sha256_implCompressMB = generate_sha256_implCompress(true, "sha256_implCompressMB"); 5059 } 5060 if (UseSHA512Intrinsics) { 5061 StubRoutines::x86::_k512_W_addr = (address)StubRoutines::x86::_k512_W; 5062 StubRoutines::x86::_pshuffle_byte_flip_mask_addr_sha512 = generate_pshuffle_byte_flip_mask_sha512(); 5063 StubRoutines::_sha512_implCompress = generate_sha512_implCompress(false, "sha512_implCompress"); 5064 StubRoutines::_sha512_implCompressMB = generate_sha512_implCompress(true, "sha512_implCompressMB"); 5065 } 5066 5067 // Generate GHASH intrinsics code 5068 if (UseGHASHIntrinsics) { 5069 StubRoutines::x86::_ghash_long_swap_mask_addr = generate_ghash_long_swap_mask(); 5070 StubRoutines::x86::_ghash_byte_swap_mask_addr = generate_ghash_byte_swap_mask(); 5071 StubRoutines::_ghash_processBlocks = generate_ghash_processBlocks(); 5072 } 5073 5074 // Safefetch stubs. 5075 generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry, 5076 &StubRoutines::_safefetch32_fault_pc, 5077 &StubRoutines::_safefetch32_continuation_pc); 5078 generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry, 5079 &StubRoutines::_safefetchN_fault_pc, 5080 &StubRoutines::_safefetchN_continuation_pc); 5081 #ifdef COMPILER2 5082 if (UseMultiplyToLenIntrinsic) { 5083 StubRoutines::_multiplyToLen = generate_multiplyToLen(); 5084 } 5085 if (UseSquareToLenIntrinsic) { 5086 StubRoutines::_squareToLen = generate_squareToLen(); 5087 } 5088 if (UseMulAddIntrinsic) { 5089 StubRoutines::_mulAdd = generate_mulAdd(); 5090 } 5091 #ifndef _WINDOWS 5092 if (UseMontgomeryMultiplyIntrinsic) { 5093 StubRoutines::_montgomeryMultiply 5094 = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_multiply); 5095 } 5096 if (UseMontgomerySquareIntrinsic) { 5097 StubRoutines::_montgomerySquare 5098 = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_square); 5099 } 5100 #endif // WINDOWS 5101 #endif // COMPILER2 5102 5103 if (UseVectorizedMismatchIntrinsic) { 5104 StubRoutines::_vectorizedMismatch = generate_vectorizedMismatch(); 5105 } 5106 } 5107 5108 public: 5109 StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) { 5110 if (all) { 5111 generate_all(); 5112 } else { 5113 generate_initial(); 5114 } 5115 } 5116 }; // end class declaration 5117 5118 void StubGenerator_generate(CodeBuffer* code, bool all) { 5119 StubGenerator g(code, all); 5120 }