1 /* 2 * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "gc/shenandoah/brooksPointer.hpp" 29 #include "gc/shenandoah/shenandoahBarrierSet.hpp" 30 #include "gc/shenandoah/shenandoahHeap.hpp" 31 #include "gc/shenandoah/shenandoahHeapRegion.hpp" 32 #include "ci/ciUtilities.hpp" 33 #include "gc/shared/barrierSet.hpp" 34 #include "gc/shared/barrierSetAssembler.hpp" 35 #include "interpreter/interpreter.hpp" 36 #include "nativeInst_x86.hpp" 37 #include "oops/instanceOop.hpp" 38 #include "oops/method.hpp" 39 #include "oops/objArrayKlass.hpp" 40 #include "oops/oop.inline.hpp" 41 #include "prims/methodHandles.hpp" 42 #include "runtime/frame.inline.hpp" 43 #include "runtime/handles.inline.hpp" 44 #include "runtime/sharedRuntime.hpp" 45 #include "runtime/stubCodeGenerator.hpp" 46 #include "runtime/stubRoutines.hpp" 47 #include "runtime/thread.inline.hpp" 48 #ifdef COMPILER2 49 #include "opto/runtime.hpp" 50 #endif 51 52 // Declaration and definition of StubGenerator (no .hpp file). 53 // For a more detailed description of the stub routine structure 54 // see the comment in stubRoutines.hpp 55 56 #define __ _masm-> 57 #define TIMES_OOP (UseCompressedOops ? Address::times_4 : Address::times_8) 58 #define a__ ((Assembler*)_masm)-> 59 60 #ifdef PRODUCT 61 #define BLOCK_COMMENT(str) /* nothing */ 62 #else 63 #define BLOCK_COMMENT(str) __ block_comment(str) 64 #endif 65 66 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 67 const int MXCSR_MASK = 0xFFC0; // Mask out any pending exceptions 68 69 // Stub Code definitions 70 71 class StubGenerator: public StubCodeGenerator { 72 private: 73 74 #ifdef PRODUCT 75 #define inc_counter_np(counter) ((void)0) 76 #else 77 void inc_counter_np_(int& counter) { 78 // This can destroy rscratch1 if counter is far from the code cache 79 __ incrementl(ExternalAddress((address)&counter)); 80 } 81 #define inc_counter_np(counter) \ 82 BLOCK_COMMENT("inc_counter " #counter); \ 83 inc_counter_np_(counter); 84 #endif 85 86 // Call stubs are used to call Java from C 87 // 88 // Linux Arguments: 89 // c_rarg0: call wrapper address address 90 // c_rarg1: result address 91 // c_rarg2: result type BasicType 92 // c_rarg3: method Method* 93 // c_rarg4: (interpreter) entry point address 94 // c_rarg5: parameters intptr_t* 95 // 16(rbp): parameter size (in words) int 96 // 24(rbp): thread Thread* 97 // 98 // [ return_from_Java ] <--- rsp 99 // [ argument word n ] 100 // ... 101 // -12 [ argument word 1 ] 102 // -11 [ saved r15 ] <--- rsp_after_call 103 // -10 [ saved r14 ] 104 // -9 [ saved r13 ] 105 // -8 [ saved r12 ] 106 // -7 [ saved rbx ] 107 // -6 [ call wrapper ] 108 // -5 [ result ] 109 // -4 [ result type ] 110 // -3 [ method ] 111 // -2 [ entry point ] 112 // -1 [ parameters ] 113 // 0 [ saved rbp ] <--- rbp 114 // 1 [ return address ] 115 // 2 [ parameter size ] 116 // 3 [ thread ] 117 // 118 // Windows Arguments: 119 // c_rarg0: call wrapper address address 120 // c_rarg1: result address 121 // c_rarg2: result type BasicType 122 // c_rarg3: method Method* 123 // 48(rbp): (interpreter) entry point address 124 // 56(rbp): parameters intptr_t* 125 // 64(rbp): parameter size (in words) int 126 // 72(rbp): thread Thread* 127 // 128 // [ return_from_Java ] <--- rsp 129 // [ argument word n ] 130 // ... 131 // -60 [ argument word 1 ] 132 // -59 [ saved xmm31 ] <--- rsp after_call 133 // [ saved xmm16-xmm30 ] (EVEX enabled, else the space is blank) 134 // -27 [ saved xmm15 ] 135 // [ saved xmm7-xmm14 ] 136 // -9 [ saved xmm6 ] (each xmm register takes 2 slots) 137 // -7 [ saved r15 ] 138 // -6 [ saved r14 ] 139 // -5 [ saved r13 ] 140 // -4 [ saved r12 ] 141 // -3 [ saved rdi ] 142 // -2 [ saved rsi ] 143 // -1 [ saved rbx ] 144 // 0 [ saved rbp ] <--- rbp 145 // 1 [ return address ] 146 // 2 [ call wrapper ] 147 // 3 [ result ] 148 // 4 [ result type ] 149 // 5 [ method ] 150 // 6 [ entry point ] 151 // 7 [ parameters ] 152 // 8 [ parameter size ] 153 // 9 [ thread ] 154 // 155 // Windows reserves the callers stack space for arguments 1-4. 156 // We spill c_rarg0-c_rarg3 to this space. 157 158 // Call stub stack layout word offsets from rbp 159 enum call_stub_layout { 160 #ifdef _WIN64 161 xmm_save_first = 6, // save from xmm6 162 xmm_save_last = 31, // to xmm31 163 xmm_save_base = -9, 164 rsp_after_call_off = xmm_save_base - 2 * (xmm_save_last - xmm_save_first), // -27 165 r15_off = -7, 166 r14_off = -6, 167 r13_off = -5, 168 r12_off = -4, 169 rdi_off = -3, 170 rsi_off = -2, 171 rbx_off = -1, 172 rbp_off = 0, 173 retaddr_off = 1, 174 call_wrapper_off = 2, 175 result_off = 3, 176 result_type_off = 4, 177 method_off = 5, 178 entry_point_off = 6, 179 parameters_off = 7, 180 parameter_size_off = 8, 181 thread_off = 9 182 #else 183 rsp_after_call_off = -12, 184 mxcsr_off = rsp_after_call_off, 185 r15_off = -11, 186 r14_off = -10, 187 r13_off = -9, 188 r12_off = -8, 189 rbx_off = -7, 190 call_wrapper_off = -6, 191 result_off = -5, 192 result_type_off = -4, 193 method_off = -3, 194 entry_point_off = -2, 195 parameters_off = -1, 196 rbp_off = 0, 197 retaddr_off = 1, 198 parameter_size_off = 2, 199 thread_off = 3 200 #endif 201 }; 202 203 #ifdef _WIN64 204 Address xmm_save(int reg) { 205 assert(reg >= xmm_save_first && reg <= xmm_save_last, "XMM register number out of range"); 206 return Address(rbp, (xmm_save_base - (reg - xmm_save_first) * 2) * wordSize); 207 } 208 #endif 209 210 address generate_call_stub(address& return_address) { 211 assert((int)frame::entry_frame_after_call_words == -(int)rsp_after_call_off + 1 && 212 (int)frame::entry_frame_call_wrapper_offset == (int)call_wrapper_off, 213 "adjust this code"); 214 StubCodeMark mark(this, "StubRoutines", "call_stub"); 215 address start = __ pc(); 216 217 // same as in generate_catch_exception()! 218 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize); 219 220 const Address call_wrapper (rbp, call_wrapper_off * wordSize); 221 const Address result (rbp, result_off * wordSize); 222 const Address result_type (rbp, result_type_off * wordSize); 223 const Address method (rbp, method_off * wordSize); 224 const Address entry_point (rbp, entry_point_off * wordSize); 225 const Address parameters (rbp, parameters_off * wordSize); 226 const Address parameter_size(rbp, parameter_size_off * wordSize); 227 228 // same as in generate_catch_exception()! 229 const Address thread (rbp, thread_off * wordSize); 230 231 const Address r15_save(rbp, r15_off * wordSize); 232 const Address r14_save(rbp, r14_off * wordSize); 233 const Address r13_save(rbp, r13_off * wordSize); 234 const Address r12_save(rbp, r12_off * wordSize); 235 const Address rbx_save(rbp, rbx_off * wordSize); 236 237 // stub code 238 __ enter(); 239 __ subptr(rsp, -rsp_after_call_off * wordSize); 240 241 // save register parameters 242 #ifndef _WIN64 243 __ movptr(parameters, c_rarg5); // parameters 244 __ movptr(entry_point, c_rarg4); // entry_point 245 #endif 246 247 __ movptr(method, c_rarg3); // method 248 __ movl(result_type, c_rarg2); // result type 249 __ movptr(result, c_rarg1); // result 250 __ movptr(call_wrapper, c_rarg0); // call wrapper 251 252 // save regs belonging to calling function 253 __ movptr(rbx_save, rbx); 254 __ movptr(r12_save, r12); 255 __ movptr(r13_save, r13); 256 __ movptr(r14_save, r14); 257 __ movptr(r15_save, r15); 258 if (UseAVX > 2) { 259 __ movl(rbx, 0xffff); 260 __ kmovwl(k1, rbx); 261 } 262 #ifdef _WIN64 263 int last_reg = 15; 264 if (UseAVX > 2) { 265 last_reg = 31; 266 } 267 if (VM_Version::supports_evex()) { 268 for (int i = xmm_save_first; i <= last_reg; i++) { 269 __ vextractf32x4(xmm_save(i), as_XMMRegister(i), 0); 270 } 271 } else { 272 for (int i = xmm_save_first; i <= last_reg; i++) { 273 __ movdqu(xmm_save(i), as_XMMRegister(i)); 274 } 275 } 276 277 const Address rdi_save(rbp, rdi_off * wordSize); 278 const Address rsi_save(rbp, rsi_off * wordSize); 279 280 __ movptr(rsi_save, rsi); 281 __ movptr(rdi_save, rdi); 282 #else 283 const Address mxcsr_save(rbp, mxcsr_off * wordSize); 284 { 285 Label skip_ldmx; 286 __ stmxcsr(mxcsr_save); 287 __ movl(rax, mxcsr_save); 288 __ andl(rax, MXCSR_MASK); // Only check control and mask bits 289 ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std()); 290 __ cmp32(rax, mxcsr_std); 291 __ jcc(Assembler::equal, skip_ldmx); 292 __ ldmxcsr(mxcsr_std); 293 __ bind(skip_ldmx); 294 } 295 #endif 296 297 // Load up thread register 298 __ movptr(r15_thread, thread); 299 __ reinit_heapbase(); 300 301 #ifdef ASSERT 302 // make sure we have no pending exceptions 303 { 304 Label L; 305 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 306 __ jcc(Assembler::equal, L); 307 __ stop("StubRoutines::call_stub: entered with pending exception"); 308 __ bind(L); 309 } 310 #endif 311 312 // pass parameters if any 313 BLOCK_COMMENT("pass parameters if any"); 314 Label parameters_done; 315 __ movl(c_rarg3, parameter_size); 316 __ testl(c_rarg3, c_rarg3); 317 __ jcc(Assembler::zero, parameters_done); 318 319 Label loop; 320 __ movptr(c_rarg2, parameters); // parameter pointer 321 __ movl(c_rarg1, c_rarg3); // parameter counter is in c_rarg1 322 __ BIND(loop); 323 __ movptr(rax, Address(c_rarg2, 0));// get parameter 324 __ addptr(c_rarg2, wordSize); // advance to next parameter 325 __ decrementl(c_rarg1); // decrement counter 326 __ push(rax); // pass parameter 327 __ jcc(Assembler::notZero, loop); 328 329 // call Java function 330 __ BIND(parameters_done); 331 __ movptr(rbx, method); // get Method* 332 __ movptr(c_rarg1, entry_point); // get entry_point 333 __ mov(r13, rsp); // set sender sp 334 BLOCK_COMMENT("call Java function"); 335 __ call(c_rarg1); 336 337 BLOCK_COMMENT("call_stub_return_address:"); 338 return_address = __ pc(); 339 340 // store result depending on type (everything that is not 341 // T_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT) 342 __ movptr(c_rarg0, result); 343 Label is_long, is_float, is_double, exit; 344 __ movl(c_rarg1, result_type); 345 __ cmpl(c_rarg1, T_OBJECT); 346 __ jcc(Assembler::equal, is_long); 347 __ cmpl(c_rarg1, T_LONG); 348 __ jcc(Assembler::equal, is_long); 349 __ cmpl(c_rarg1, T_FLOAT); 350 __ jcc(Assembler::equal, is_float); 351 __ cmpl(c_rarg1, T_DOUBLE); 352 __ jcc(Assembler::equal, is_double); 353 354 // handle T_INT case 355 __ movl(Address(c_rarg0, 0), rax); 356 357 __ BIND(exit); 358 359 // pop parameters 360 __ lea(rsp, rsp_after_call); 361 362 #ifdef ASSERT 363 // verify that threads correspond 364 { 365 Label L1, L2, L3; 366 __ cmpptr(r15_thread, thread); 367 __ jcc(Assembler::equal, L1); 368 __ stop("StubRoutines::call_stub: r15_thread is corrupted"); 369 __ bind(L1); 370 __ get_thread(rbx); 371 __ cmpptr(r15_thread, thread); 372 __ jcc(Assembler::equal, L2); 373 __ stop("StubRoutines::call_stub: r15_thread is modified by call"); 374 __ bind(L2); 375 __ cmpptr(r15_thread, rbx); 376 __ jcc(Assembler::equal, L3); 377 __ stop("StubRoutines::call_stub: threads must correspond"); 378 __ bind(L3); 379 } 380 #endif 381 382 // restore regs belonging to calling function 383 #ifdef _WIN64 384 // emit the restores for xmm regs 385 if (VM_Version::supports_evex()) { 386 for (int i = xmm_save_first; i <= last_reg; i++) { 387 __ vinsertf32x4(as_XMMRegister(i), as_XMMRegister(i), xmm_save(i), 0); 388 } 389 } else { 390 for (int i = xmm_save_first; i <= last_reg; i++) { 391 __ movdqu(as_XMMRegister(i), xmm_save(i)); 392 } 393 } 394 #endif 395 __ movptr(r15, r15_save); 396 __ movptr(r14, r14_save); 397 __ movptr(r13, r13_save); 398 __ movptr(r12, r12_save); 399 __ movptr(rbx, rbx_save); 400 401 #ifdef _WIN64 402 __ movptr(rdi, rdi_save); 403 __ movptr(rsi, rsi_save); 404 #else 405 __ ldmxcsr(mxcsr_save); 406 #endif 407 408 // restore rsp 409 __ addptr(rsp, -rsp_after_call_off * wordSize); 410 411 // return 412 __ vzeroupper(); 413 __ pop(rbp); 414 __ ret(0); 415 416 // handle return types different from T_INT 417 __ BIND(is_long); 418 __ movq(Address(c_rarg0, 0), rax); 419 __ jmp(exit); 420 421 __ BIND(is_float); 422 __ movflt(Address(c_rarg0, 0), xmm0); 423 __ jmp(exit); 424 425 __ BIND(is_double); 426 __ movdbl(Address(c_rarg0, 0), xmm0); 427 __ jmp(exit); 428 429 return start; 430 } 431 432 // Return point for a Java call if there's an exception thrown in 433 // Java code. The exception is caught and transformed into a 434 // pending exception stored in JavaThread that can be tested from 435 // within the VM. 436 // 437 // Note: Usually the parameters are removed by the callee. In case 438 // of an exception crossing an activation frame boundary, that is 439 // not the case if the callee is compiled code => need to setup the 440 // rsp. 441 // 442 // rax: exception oop 443 444 address generate_catch_exception() { 445 StubCodeMark mark(this, "StubRoutines", "catch_exception"); 446 address start = __ pc(); 447 448 // same as in generate_call_stub(): 449 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize); 450 const Address thread (rbp, thread_off * wordSize); 451 452 #ifdef ASSERT 453 // verify that threads correspond 454 { 455 Label L1, L2, L3; 456 __ cmpptr(r15_thread, thread); 457 __ jcc(Assembler::equal, L1); 458 __ stop("StubRoutines::catch_exception: r15_thread is corrupted"); 459 __ bind(L1); 460 __ get_thread(rbx); 461 __ cmpptr(r15_thread, thread); 462 __ jcc(Assembler::equal, L2); 463 __ stop("StubRoutines::catch_exception: r15_thread is modified by call"); 464 __ bind(L2); 465 __ cmpptr(r15_thread, rbx); 466 __ jcc(Assembler::equal, L3); 467 __ stop("StubRoutines::catch_exception: threads must correspond"); 468 __ bind(L3); 469 } 470 #endif 471 472 // set pending exception 473 __ verify_oop(rax); 474 475 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), rax); 476 __ lea(rscratch1, ExternalAddress((address)__FILE__)); 477 __ movptr(Address(r15_thread, Thread::exception_file_offset()), rscratch1); 478 __ movl(Address(r15_thread, Thread::exception_line_offset()), (int) __LINE__); 479 480 // complete return to VM 481 assert(StubRoutines::_call_stub_return_address != NULL, 482 "_call_stub_return_address must have been generated before"); 483 __ jump(RuntimeAddress(StubRoutines::_call_stub_return_address)); 484 485 return start; 486 } 487 488 // Continuation point for runtime calls returning with a pending 489 // exception. The pending exception check happened in the runtime 490 // or native call stub. The pending exception in Thread is 491 // converted into a Java-level exception. 492 // 493 // Contract with Java-level exception handlers: 494 // rax: exception 495 // rdx: throwing pc 496 // 497 // NOTE: At entry of this stub, exception-pc must be on stack !! 498 499 address generate_forward_exception() { 500 StubCodeMark mark(this, "StubRoutines", "forward exception"); 501 address start = __ pc(); 502 503 // Upon entry, the sp points to the return address returning into 504 // Java (interpreted or compiled) code; i.e., the return address 505 // becomes the throwing pc. 506 // 507 // Arguments pushed before the runtime call are still on the stack 508 // but the exception handler will reset the stack pointer -> 509 // ignore them. A potential result in registers can be ignored as 510 // well. 511 512 #ifdef ASSERT 513 // make sure this code is only executed if there is a pending exception 514 { 515 Label L; 516 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t) NULL); 517 __ jcc(Assembler::notEqual, L); 518 __ stop("StubRoutines::forward exception: no pending exception (1)"); 519 __ bind(L); 520 } 521 #endif 522 523 // compute exception handler into rbx 524 __ movptr(c_rarg0, Address(rsp, 0)); 525 BLOCK_COMMENT("call exception_handler_for_return_address"); 526 __ call_VM_leaf(CAST_FROM_FN_PTR(address, 527 SharedRuntime::exception_handler_for_return_address), 528 r15_thread, c_rarg0); 529 __ mov(rbx, rax); 530 531 // setup rax & rdx, remove return address & clear pending exception 532 __ pop(rdx); 533 __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset())); 534 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 535 536 #ifdef ASSERT 537 // make sure exception is set 538 { 539 Label L; 540 __ testptr(rax, rax); 541 __ jcc(Assembler::notEqual, L); 542 __ stop("StubRoutines::forward exception: no pending exception (2)"); 543 __ bind(L); 544 } 545 #endif 546 547 // continue at exception handler (return address removed) 548 // rax: exception 549 // rbx: exception handler 550 // rdx: throwing pc 551 __ verify_oop(rax); 552 __ jmp(rbx); 553 554 return start; 555 } 556 557 // Support for jint atomic::xchg(jint exchange_value, volatile jint* dest) 558 // 559 // Arguments : 560 // c_rarg0: exchange_value 561 // c_rarg0: dest 562 // 563 // Result: 564 // *dest <- ex, return (orig *dest) 565 address generate_atomic_xchg() { 566 StubCodeMark mark(this, "StubRoutines", "atomic_xchg"); 567 address start = __ pc(); 568 569 __ movl(rax, c_rarg0); // Copy to eax we need a return value anyhow 570 __ xchgl(rax, Address(c_rarg1, 0)); // automatic LOCK 571 __ ret(0); 572 573 return start; 574 } 575 576 // Support for intptr_t atomic::xchg_long(jlong exchange_value, volatile jlong* dest) 577 // 578 // Arguments : 579 // c_rarg0: exchange_value 580 // c_rarg1: dest 581 // 582 // Result: 583 // *dest <- ex, return (orig *dest) 584 address generate_atomic_xchg_long() { 585 StubCodeMark mark(this, "StubRoutines", "atomic_xchg_long"); 586 address start = __ pc(); 587 588 __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow 589 __ xchgptr(rax, Address(c_rarg1, 0)); // automatic LOCK 590 __ ret(0); 591 592 return start; 593 } 594 595 // Support for jint atomic::atomic_cmpxchg(jint exchange_value, volatile jint* dest, 596 // jint compare_value) 597 // 598 // Arguments : 599 // c_rarg0: exchange_value 600 // c_rarg1: dest 601 // c_rarg2: compare_value 602 // 603 // Result: 604 // if ( compare_value == *dest ) { 605 // *dest = exchange_value 606 // return compare_value; 607 // else 608 // return *dest; 609 address generate_atomic_cmpxchg() { 610 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg"); 611 address start = __ pc(); 612 613 __ movl(rax, c_rarg2); 614 if ( os::is_MP() ) __ lock(); 615 __ cmpxchgl(c_rarg0, Address(c_rarg1, 0)); 616 __ ret(0); 617 618 return start; 619 } 620 621 // Support for int8_t atomic::atomic_cmpxchg(int8_t exchange_value, volatile int8_t* dest, 622 // int8_t compare_value) 623 // 624 // Arguments : 625 // c_rarg0: exchange_value 626 // c_rarg1: dest 627 // c_rarg2: compare_value 628 // 629 // Result: 630 // if ( compare_value == *dest ) { 631 // *dest = exchange_value 632 // return compare_value; 633 // else 634 // return *dest; 635 address generate_atomic_cmpxchg_byte() { 636 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_byte"); 637 address start = __ pc(); 638 639 __ movsbq(rax, c_rarg2); 640 if ( os::is_MP() ) __ lock(); 641 __ cmpxchgb(c_rarg0, Address(c_rarg1, 0)); 642 __ ret(0); 643 644 return start; 645 } 646 647 // Support for int64_t atomic::atomic_cmpxchg(int64_t exchange_value, 648 // volatile int64_t* dest, 649 // int64_t compare_value) 650 // Arguments : 651 // c_rarg0: exchange_value 652 // c_rarg1: dest 653 // c_rarg2: compare_value 654 // 655 // Result: 656 // if ( compare_value == *dest ) { 657 // *dest = exchange_value 658 // return compare_value; 659 // else 660 // return *dest; 661 address generate_atomic_cmpxchg_long() { 662 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long"); 663 address start = __ pc(); 664 665 __ movq(rax, c_rarg2); 666 if ( os::is_MP() ) __ lock(); 667 __ cmpxchgq(c_rarg0, Address(c_rarg1, 0)); 668 __ ret(0); 669 670 return start; 671 } 672 673 // Support for jint atomic::add(jint add_value, volatile jint* dest) 674 // 675 // Arguments : 676 // c_rarg0: add_value 677 // c_rarg1: dest 678 // 679 // Result: 680 // *dest += add_value 681 // return *dest; 682 address generate_atomic_add() { 683 StubCodeMark mark(this, "StubRoutines", "atomic_add"); 684 address start = __ pc(); 685 686 __ movl(rax, c_rarg0); 687 if ( os::is_MP() ) __ lock(); 688 __ xaddl(Address(c_rarg1, 0), c_rarg0); 689 __ addl(rax, c_rarg0); 690 __ ret(0); 691 692 return start; 693 } 694 695 // Support for intptr_t atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) 696 // 697 // Arguments : 698 // c_rarg0: add_value 699 // c_rarg1: dest 700 // 701 // Result: 702 // *dest += add_value 703 // return *dest; 704 address generate_atomic_add_long() { 705 StubCodeMark mark(this, "StubRoutines", "atomic_add_long"); 706 address start = __ pc(); 707 708 __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow 709 if ( os::is_MP() ) __ lock(); 710 __ xaddptr(Address(c_rarg1, 0), c_rarg0); 711 __ addptr(rax, c_rarg0); 712 __ ret(0); 713 714 return start; 715 } 716 717 // Support for intptr_t OrderAccess::fence() 718 // 719 // Arguments : 720 // 721 // Result: 722 address generate_orderaccess_fence() { 723 StubCodeMark mark(this, "StubRoutines", "orderaccess_fence"); 724 address start = __ pc(); 725 __ membar(Assembler::StoreLoad); 726 __ ret(0); 727 728 return start; 729 } 730 731 // Support for intptr_t get_previous_fp() 732 // 733 // This routine is used to find the previous frame pointer for the 734 // caller (current_frame_guess). This is used as part of debugging 735 // ps() is seemingly lost trying to find frames. 736 // This code assumes that caller current_frame_guess) has a frame. 737 address generate_get_previous_fp() { 738 StubCodeMark mark(this, "StubRoutines", "get_previous_fp"); 739 const Address old_fp(rbp, 0); 740 const Address older_fp(rax, 0); 741 address start = __ pc(); 742 743 __ enter(); 744 __ movptr(rax, old_fp); // callers fp 745 __ movptr(rax, older_fp); // the frame for ps() 746 __ pop(rbp); 747 __ ret(0); 748 749 return start; 750 } 751 752 // Support for intptr_t get_previous_sp() 753 // 754 // This routine is used to find the previous stack pointer for the 755 // caller. 756 address generate_get_previous_sp() { 757 StubCodeMark mark(this, "StubRoutines", "get_previous_sp"); 758 address start = __ pc(); 759 760 __ movptr(rax, rsp); 761 __ addptr(rax, 8); // return address is at the top of the stack. 762 __ ret(0); 763 764 return start; 765 } 766 767 //---------------------------------------------------------------------------------------------------- 768 // Support for void verify_mxcsr() 769 // 770 // This routine is used with -Xcheck:jni to verify that native 771 // JNI code does not return to Java code without restoring the 772 // MXCSR register to our expected state. 773 774 address generate_verify_mxcsr() { 775 StubCodeMark mark(this, "StubRoutines", "verify_mxcsr"); 776 address start = __ pc(); 777 778 const Address mxcsr_save(rsp, 0); 779 780 if (CheckJNICalls) { 781 Label ok_ret; 782 ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std()); 783 __ push(rax); 784 __ subptr(rsp, wordSize); // allocate a temp location 785 __ stmxcsr(mxcsr_save); 786 __ movl(rax, mxcsr_save); 787 __ andl(rax, MXCSR_MASK); // Only check control and mask bits 788 __ cmp32(rax, mxcsr_std); 789 __ jcc(Assembler::equal, ok_ret); 790 791 __ warn("MXCSR changed by native JNI code, use -XX:+RestoreMXCSROnJNICall"); 792 793 __ ldmxcsr(mxcsr_std); 794 795 __ bind(ok_ret); 796 __ addptr(rsp, wordSize); 797 __ pop(rax); 798 } 799 800 __ ret(0); 801 802 return start; 803 } 804 805 address generate_shenandoah_wb(bool c_abi, bool do_cset_test) { 806 StubCodeMark mark(this, "StubRoutines", "shenandoah_wb"); 807 address start = __ pc(); 808 809 Label not_done; 810 811 // We use RDI, which also serves as argument register for slow call. 812 // RAX always holds the src object ptr, except after the slow call and 813 // the cmpxchg, then it holds the result. 814 // R8 and RCX are used as temporary registers. 815 if (!c_abi) { 816 __ push(rdi); 817 __ push(r8); 818 } 819 820 // Check for object beeing in the collection set. 821 // TODO: Can we use only 1 register here? 822 // The source object arrives here in rax. 823 // live: rax 824 // live: rdi 825 if (!c_abi) { 826 __ mov(rdi, rax); 827 } else { 828 if (rax != c_rarg0) { 829 __ mov(rax, c_rarg0); 830 } 831 } 832 if (do_cset_test) { 833 __ shrptr(rdi, ShenandoahHeapRegion::region_size_bytes_shift_jint()); 834 // live: r8 835 __ movptr(r8, (intptr_t) ShenandoahHeap::in_cset_fast_test_addr()); 836 __ movbool(r8, Address(r8, rdi, Address::times_1)); 837 // unlive: rdi 838 __ testbool(r8); 839 // unlive: r8 840 __ jccb(Assembler::notZero, not_done); 841 842 if (!c_abi) { 843 __ pop(r8); 844 __ pop(rdi); 845 } 846 __ ret(0); 847 848 __ bind(not_done); 849 } 850 851 if (!c_abi) { 852 __ push(rcx); 853 } 854 855 if (!c_abi) { 856 __ push(rdx); 857 __ push(rdi); 858 __ push(rsi); 859 __ push(r8); 860 __ push(r9); 861 __ push(r10); 862 __ push(r11); 863 __ push(r12); 864 __ push(r13); 865 __ push(r14); 866 __ push(r15); 867 } 868 __ save_vector_registers(); 869 __ movptr(rdi, rax); 870 __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahBarrierSet::write_barrier_JRT), rdi); 871 __ restore_vector_registers(); 872 if (!c_abi) { 873 __ pop(r15); 874 __ pop(r14); 875 __ pop(r13); 876 __ pop(r12); 877 __ pop(r11); 878 __ pop(r10); 879 __ pop(r9); 880 __ pop(r8); 881 __ pop(rsi); 882 __ pop(rdi); 883 __ pop(rdx); 884 885 __ pop(rcx); 886 __ pop(r8); 887 __ pop(rdi); 888 } 889 __ ret(0); 890 891 return start; 892 } 893 894 address generate_f2i_fixup() { 895 StubCodeMark mark(this, "StubRoutines", "f2i_fixup"); 896 Address inout(rsp, 5 * wordSize); // return address + 4 saves 897 898 address start = __ pc(); 899 900 Label L; 901 902 __ push(rax); 903 __ push(c_rarg3); 904 __ push(c_rarg2); 905 __ push(c_rarg1); 906 907 __ movl(rax, 0x7f800000); 908 __ xorl(c_rarg3, c_rarg3); 909 __ movl(c_rarg2, inout); 910 __ movl(c_rarg1, c_rarg2); 911 __ andl(c_rarg1, 0x7fffffff); 912 __ cmpl(rax, c_rarg1); // NaN? -> 0 913 __ jcc(Assembler::negative, L); 914 __ testl(c_rarg2, c_rarg2); // signed ? min_jint : max_jint 915 __ movl(c_rarg3, 0x80000000); 916 __ movl(rax, 0x7fffffff); 917 __ cmovl(Assembler::positive, c_rarg3, rax); 918 919 __ bind(L); 920 __ movptr(inout, c_rarg3); 921 922 __ pop(c_rarg1); 923 __ pop(c_rarg2); 924 __ pop(c_rarg3); 925 __ pop(rax); 926 927 __ ret(0); 928 929 return start; 930 } 931 932 address generate_f2l_fixup() { 933 StubCodeMark mark(this, "StubRoutines", "f2l_fixup"); 934 Address inout(rsp, 5 * wordSize); // return address + 4 saves 935 address start = __ pc(); 936 937 Label L; 938 939 __ push(rax); 940 __ push(c_rarg3); 941 __ push(c_rarg2); 942 __ push(c_rarg1); 943 944 __ movl(rax, 0x7f800000); 945 __ xorl(c_rarg3, c_rarg3); 946 __ movl(c_rarg2, inout); 947 __ movl(c_rarg1, c_rarg2); 948 __ andl(c_rarg1, 0x7fffffff); 949 __ cmpl(rax, c_rarg1); // NaN? -> 0 950 __ jcc(Assembler::negative, L); 951 __ testl(c_rarg2, c_rarg2); // signed ? min_jlong : max_jlong 952 __ mov64(c_rarg3, 0x8000000000000000); 953 __ mov64(rax, 0x7fffffffffffffff); 954 __ cmov(Assembler::positive, c_rarg3, rax); 955 956 __ bind(L); 957 __ movptr(inout, c_rarg3); 958 959 __ pop(c_rarg1); 960 __ pop(c_rarg2); 961 __ pop(c_rarg3); 962 __ pop(rax); 963 964 __ ret(0); 965 966 return start; 967 } 968 969 address generate_d2i_fixup() { 970 StubCodeMark mark(this, "StubRoutines", "d2i_fixup"); 971 Address inout(rsp, 6 * wordSize); // return address + 5 saves 972 973 address start = __ pc(); 974 975 Label L; 976 977 __ push(rax); 978 __ push(c_rarg3); 979 __ push(c_rarg2); 980 __ push(c_rarg1); 981 __ push(c_rarg0); 982 983 __ movl(rax, 0x7ff00000); 984 __ movq(c_rarg2, inout); 985 __ movl(c_rarg3, c_rarg2); 986 __ mov(c_rarg1, c_rarg2); 987 __ mov(c_rarg0, c_rarg2); 988 __ negl(c_rarg3); 989 __ shrptr(c_rarg1, 0x20); 990 __ orl(c_rarg3, c_rarg2); 991 __ andl(c_rarg1, 0x7fffffff); 992 __ xorl(c_rarg2, c_rarg2); 993 __ shrl(c_rarg3, 0x1f); 994 __ orl(c_rarg1, c_rarg3); 995 __ cmpl(rax, c_rarg1); 996 __ jcc(Assembler::negative, L); // NaN -> 0 997 __ testptr(c_rarg0, c_rarg0); // signed ? min_jint : max_jint 998 __ movl(c_rarg2, 0x80000000); 999 __ movl(rax, 0x7fffffff); 1000 __ cmov(Assembler::positive, c_rarg2, rax); 1001 1002 __ bind(L); 1003 __ movptr(inout, c_rarg2); 1004 1005 __ pop(c_rarg0); 1006 __ pop(c_rarg1); 1007 __ pop(c_rarg2); 1008 __ pop(c_rarg3); 1009 __ pop(rax); 1010 1011 __ ret(0); 1012 1013 return start; 1014 } 1015 1016 address generate_d2l_fixup() { 1017 StubCodeMark mark(this, "StubRoutines", "d2l_fixup"); 1018 Address inout(rsp, 6 * wordSize); // return address + 5 saves 1019 1020 address start = __ pc(); 1021 1022 Label L; 1023 1024 __ push(rax); 1025 __ push(c_rarg3); 1026 __ push(c_rarg2); 1027 __ push(c_rarg1); 1028 __ push(c_rarg0); 1029 1030 __ movl(rax, 0x7ff00000); 1031 __ movq(c_rarg2, inout); 1032 __ movl(c_rarg3, c_rarg2); 1033 __ mov(c_rarg1, c_rarg2); 1034 __ mov(c_rarg0, c_rarg2); 1035 __ negl(c_rarg3); 1036 __ shrptr(c_rarg1, 0x20); 1037 __ orl(c_rarg3, c_rarg2); 1038 __ andl(c_rarg1, 0x7fffffff); 1039 __ xorl(c_rarg2, c_rarg2); 1040 __ shrl(c_rarg3, 0x1f); 1041 __ orl(c_rarg1, c_rarg3); 1042 __ cmpl(rax, c_rarg1); 1043 __ jcc(Assembler::negative, L); // NaN -> 0 1044 __ testq(c_rarg0, c_rarg0); // signed ? min_jlong : max_jlong 1045 __ mov64(c_rarg2, 0x8000000000000000); 1046 __ mov64(rax, 0x7fffffffffffffff); 1047 __ cmovq(Assembler::positive, c_rarg2, rax); 1048 1049 __ bind(L); 1050 __ movq(inout, c_rarg2); 1051 1052 __ pop(c_rarg0); 1053 __ pop(c_rarg1); 1054 __ pop(c_rarg2); 1055 __ pop(c_rarg3); 1056 __ pop(rax); 1057 1058 __ ret(0); 1059 1060 return start; 1061 } 1062 1063 address generate_fp_mask(const char *stub_name, int64_t mask) { 1064 __ align(CodeEntryAlignment); 1065 StubCodeMark mark(this, "StubRoutines", stub_name); 1066 address start = __ pc(); 1067 1068 __ emit_data64( mask, relocInfo::none ); 1069 __ emit_data64( mask, relocInfo::none ); 1070 1071 return start; 1072 } 1073 1074 // Non-destructive plausibility checks for oops 1075 // 1076 // Arguments: 1077 // all args on stack! 1078 // 1079 // Stack after saving c_rarg3: 1080 // [tos + 0]: saved c_rarg3 1081 // [tos + 1]: saved c_rarg2 1082 // [tos + 2]: saved r12 (several TemplateTable methods use it) 1083 // [tos + 3]: saved flags 1084 // [tos + 4]: return address 1085 // * [tos + 5]: error message (char*) 1086 // * [tos + 6]: object to verify (oop) 1087 // * [tos + 7]: saved rax - saved by caller and bashed 1088 // * [tos + 8]: saved r10 (rscratch1) - saved by caller 1089 // * = popped on exit 1090 address generate_verify_oop() { 1091 StubCodeMark mark(this, "StubRoutines", "verify_oop"); 1092 address start = __ pc(); 1093 1094 Label exit, error; 1095 1096 __ pushf(); 1097 __ incrementl(ExternalAddress((address) StubRoutines::verify_oop_count_addr())); 1098 1099 __ push(r12); 1100 1101 // save c_rarg2 and c_rarg3 1102 __ push(c_rarg2); 1103 __ push(c_rarg3); 1104 1105 enum { 1106 // After previous pushes. 1107 oop_to_verify = 6 * wordSize, 1108 saved_rax = 7 * wordSize, 1109 saved_r10 = 8 * wordSize, 1110 1111 // Before the call to MacroAssembler::debug(), see below. 1112 return_addr = 16 * wordSize, 1113 error_msg = 17 * wordSize 1114 }; 1115 1116 // get object 1117 __ movptr(rax, Address(rsp, oop_to_verify)); 1118 1119 // make sure object is 'reasonable' 1120 __ testptr(rax, rax); 1121 __ jcc(Assembler::zero, exit); // if obj is NULL it is OK 1122 // Check if the oop is in the right area of memory 1123 __ movptr(c_rarg2, rax); 1124 __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_mask()); 1125 __ andptr(c_rarg2, c_rarg3); 1126 __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_bits()); 1127 __ cmpptr(c_rarg2, c_rarg3); 1128 __ jcc(Assembler::notZero, error); 1129 1130 // set r12 to heapbase for load_klass() 1131 __ reinit_heapbase(); 1132 1133 // make sure klass is 'reasonable', which is not zero. 1134 __ load_klass(rax, rax); // get klass 1135 __ testptr(rax, rax); 1136 __ jcc(Assembler::zero, error); // if klass is NULL it is broken 1137 1138 // return if everything seems ok 1139 __ bind(exit); 1140 __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back 1141 __ movptr(rscratch1, Address(rsp, saved_r10)); // get saved r10 back 1142 __ pop(c_rarg3); // restore c_rarg3 1143 __ pop(c_rarg2); // restore c_rarg2 1144 __ pop(r12); // restore r12 1145 __ popf(); // restore flags 1146 __ ret(4 * wordSize); // pop caller saved stuff 1147 1148 // handle errors 1149 __ bind(error); 1150 __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back 1151 __ movptr(rscratch1, Address(rsp, saved_r10)); // get saved r10 back 1152 __ pop(c_rarg3); // get saved c_rarg3 back 1153 __ pop(c_rarg2); // get saved c_rarg2 back 1154 __ pop(r12); // get saved r12 back 1155 __ popf(); // get saved flags off stack -- 1156 // will be ignored 1157 1158 __ pusha(); // push registers 1159 // (rip is already 1160 // already pushed) 1161 // debug(char* msg, int64_t pc, int64_t regs[]) 1162 // We've popped the registers we'd saved (c_rarg3, c_rarg2 and flags), and 1163 // pushed all the registers, so now the stack looks like: 1164 // [tos + 0] 16 saved registers 1165 // [tos + 16] return address 1166 // * [tos + 17] error message (char*) 1167 // * [tos + 18] object to verify (oop) 1168 // * [tos + 19] saved rax - saved by caller and bashed 1169 // * [tos + 20] saved r10 (rscratch1) - saved by caller 1170 // * = popped on exit 1171 1172 __ movptr(c_rarg0, Address(rsp, error_msg)); // pass address of error message 1173 __ movptr(c_rarg1, Address(rsp, return_addr)); // pass return address 1174 __ movq(c_rarg2, rsp); // pass address of regs on stack 1175 __ mov(r12, rsp); // remember rsp 1176 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1177 __ andptr(rsp, -16); // align stack as required by ABI 1178 BLOCK_COMMENT("call MacroAssembler::debug"); 1179 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64))); 1180 __ mov(rsp, r12); // restore rsp 1181 __ popa(); // pop registers (includes r12) 1182 __ ret(4 * wordSize); // pop caller saved stuff 1183 1184 return start; 1185 } 1186 1187 // 1188 // Verify that a register contains clean 32-bits positive value 1189 // (high 32-bits are 0) so it could be used in 64-bits shifts. 1190 // 1191 // Input: 1192 // Rint - 32-bits value 1193 // Rtmp - scratch 1194 // 1195 void assert_clean_int(Register Rint, Register Rtmp) { 1196 #ifdef ASSERT 1197 Label L; 1198 assert_different_registers(Rtmp, Rint); 1199 __ movslq(Rtmp, Rint); 1200 __ cmpq(Rtmp, Rint); 1201 __ jcc(Assembler::equal, L); 1202 __ stop("high 32-bits of int value are not 0"); 1203 __ bind(L); 1204 #endif 1205 } 1206 1207 // Generate overlap test for array copy stubs 1208 // 1209 // Input: 1210 // c_rarg0 - from 1211 // c_rarg1 - to 1212 // c_rarg2 - element count 1213 // 1214 // Output: 1215 // rax - &from[element count - 1] 1216 // 1217 void array_overlap_test(address no_overlap_target, Address::ScaleFactor sf) { 1218 assert(no_overlap_target != NULL, "must be generated"); 1219 array_overlap_test(no_overlap_target, NULL, sf); 1220 } 1221 void array_overlap_test(Label& L_no_overlap, Address::ScaleFactor sf) { 1222 array_overlap_test(NULL, &L_no_overlap, sf); 1223 } 1224 void array_overlap_test(address no_overlap_target, Label* NOLp, Address::ScaleFactor sf) { 1225 const Register from = c_rarg0; 1226 const Register to = c_rarg1; 1227 const Register count = c_rarg2; 1228 const Register end_from = rax; 1229 1230 __ cmpptr(to, from); 1231 __ lea(end_from, Address(from, count, sf, 0)); 1232 if (NOLp == NULL) { 1233 ExternalAddress no_overlap(no_overlap_target); 1234 __ jump_cc(Assembler::belowEqual, no_overlap); 1235 __ cmpptr(to, end_from); 1236 __ jump_cc(Assembler::aboveEqual, no_overlap); 1237 } else { 1238 __ jcc(Assembler::belowEqual, (*NOLp)); 1239 __ cmpptr(to, end_from); 1240 __ jcc(Assembler::aboveEqual, (*NOLp)); 1241 } 1242 } 1243 1244 // Shuffle first three arg regs on Windows into Linux/Solaris locations. 1245 // 1246 // Outputs: 1247 // rdi - rcx 1248 // rsi - rdx 1249 // rdx - r8 1250 // rcx - r9 1251 // 1252 // Registers r9 and r10 are used to save rdi and rsi on Windows, which latter 1253 // are non-volatile. r9 and r10 should not be used by the caller. 1254 // 1255 void setup_arg_regs(int nargs = 3) { 1256 const Register saved_rdi = r9; 1257 const Register saved_rsi = r10; 1258 assert(nargs == 3 || nargs == 4, "else fix"); 1259 #ifdef _WIN64 1260 assert(c_rarg0 == rcx && c_rarg1 == rdx && c_rarg2 == r8 && c_rarg3 == r9, 1261 "unexpected argument registers"); 1262 if (nargs >= 4) 1263 __ mov(rax, r9); // r9 is also saved_rdi 1264 __ movptr(saved_rdi, rdi); 1265 __ movptr(saved_rsi, rsi); 1266 __ mov(rdi, rcx); // c_rarg0 1267 __ mov(rsi, rdx); // c_rarg1 1268 __ mov(rdx, r8); // c_rarg2 1269 if (nargs >= 4) 1270 __ mov(rcx, rax); // c_rarg3 (via rax) 1271 #else 1272 assert(c_rarg0 == rdi && c_rarg1 == rsi && c_rarg2 == rdx && c_rarg3 == rcx, 1273 "unexpected argument registers"); 1274 #endif 1275 } 1276 1277 void restore_arg_regs() { 1278 const Register saved_rdi = r9; 1279 const Register saved_rsi = r10; 1280 #ifdef _WIN64 1281 __ movptr(rdi, saved_rdi); 1282 __ movptr(rsi, saved_rsi); 1283 #endif 1284 } 1285 1286 1287 // Copy big chunks forward 1288 // 1289 // Inputs: 1290 // end_from - source arrays end address 1291 // end_to - destination array end address 1292 // qword_count - 64-bits element count, negative 1293 // to - scratch 1294 // L_copy_bytes - entry label 1295 // L_copy_8_bytes - exit label 1296 // 1297 void copy_bytes_forward(Register end_from, Register end_to, 1298 Register qword_count, Register to, 1299 Label& L_copy_bytes, Label& L_copy_8_bytes) { 1300 DEBUG_ONLY(__ stop("enter at entry label, not here")); 1301 Label L_loop; 1302 __ align(OptoLoopAlignment); 1303 if (UseUnalignedLoadStores) { 1304 Label L_end; 1305 if (UseAVX > 2) { 1306 __ movl(to, 0xffff); 1307 __ kmovwl(k1, to); 1308 } 1309 // Copy 64-bytes per iteration 1310 __ BIND(L_loop); 1311 if (UseAVX > 2) { 1312 __ evmovdqul(xmm0, Address(end_from, qword_count, Address::times_8, -56), Assembler::AVX_512bit); 1313 __ evmovdqul(Address(end_to, qword_count, Address::times_8, -56), xmm0, Assembler::AVX_512bit); 1314 } else if (UseAVX == 2) { 1315 __ vmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56)); 1316 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0); 1317 __ vmovdqu(xmm1, Address(end_from, qword_count, Address::times_8, -24)); 1318 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -24), xmm1); 1319 } else { 1320 __ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56)); 1321 __ movdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0); 1322 __ movdqu(xmm1, Address(end_from, qword_count, Address::times_8, -40)); 1323 __ movdqu(Address(end_to, qword_count, Address::times_8, -40), xmm1); 1324 __ movdqu(xmm2, Address(end_from, qword_count, Address::times_8, -24)); 1325 __ movdqu(Address(end_to, qword_count, Address::times_8, -24), xmm2); 1326 __ movdqu(xmm3, Address(end_from, qword_count, Address::times_8, - 8)); 1327 __ movdqu(Address(end_to, qword_count, Address::times_8, - 8), xmm3); 1328 } 1329 __ BIND(L_copy_bytes); 1330 __ addptr(qword_count, 8); 1331 __ jcc(Assembler::lessEqual, L_loop); 1332 __ subptr(qword_count, 4); // sub(8) and add(4) 1333 __ jccb(Assembler::greater, L_end); 1334 // Copy trailing 32 bytes 1335 if (UseAVX >= 2) { 1336 __ vmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24)); 1337 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -24), xmm0); 1338 } else { 1339 __ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24)); 1340 __ movdqu(Address(end_to, qword_count, Address::times_8, -24), xmm0); 1341 __ movdqu(xmm1, Address(end_from, qword_count, Address::times_8, - 8)); 1342 __ movdqu(Address(end_to, qword_count, Address::times_8, - 8), xmm1); 1343 } 1344 __ addptr(qword_count, 4); 1345 __ BIND(L_end); 1346 if (UseAVX >= 2) { 1347 // clean upper bits of YMM registers 1348 __ vpxor(xmm0, xmm0); 1349 __ vpxor(xmm1, xmm1); 1350 } 1351 } else { 1352 // Copy 32-bytes per iteration 1353 __ BIND(L_loop); 1354 __ movq(to, Address(end_from, qword_count, Address::times_8, -24)); 1355 __ movq(Address(end_to, qword_count, Address::times_8, -24), to); 1356 __ movq(to, Address(end_from, qword_count, Address::times_8, -16)); 1357 __ movq(Address(end_to, qword_count, Address::times_8, -16), to); 1358 __ movq(to, Address(end_from, qword_count, Address::times_8, - 8)); 1359 __ movq(Address(end_to, qword_count, Address::times_8, - 8), to); 1360 __ movq(to, Address(end_from, qword_count, Address::times_8, - 0)); 1361 __ movq(Address(end_to, qword_count, Address::times_8, - 0), to); 1362 1363 __ BIND(L_copy_bytes); 1364 __ addptr(qword_count, 4); 1365 __ jcc(Assembler::lessEqual, L_loop); 1366 } 1367 __ subptr(qword_count, 4); 1368 __ jcc(Assembler::less, L_copy_8_bytes); // Copy trailing qwords 1369 } 1370 1371 // Copy big chunks backward 1372 // 1373 // Inputs: 1374 // from - source arrays address 1375 // dest - destination array address 1376 // qword_count - 64-bits element count 1377 // to - scratch 1378 // L_copy_bytes - entry label 1379 // L_copy_8_bytes - exit label 1380 // 1381 void copy_bytes_backward(Register from, Register dest, 1382 Register qword_count, Register to, 1383 Label& L_copy_bytes, Label& L_copy_8_bytes) { 1384 DEBUG_ONLY(__ stop("enter at entry label, not here")); 1385 Label L_loop; 1386 __ align(OptoLoopAlignment); 1387 if (UseUnalignedLoadStores) { 1388 Label L_end; 1389 if (UseAVX > 2) { 1390 __ movl(to, 0xffff); 1391 __ kmovwl(k1, to); 1392 } 1393 // Copy 64-bytes per iteration 1394 __ BIND(L_loop); 1395 if (UseAVX > 2) { 1396 __ evmovdqul(xmm0, Address(from, qword_count, Address::times_8, 0), Assembler::AVX_512bit); 1397 __ evmovdqul(Address(dest, qword_count, Address::times_8, 0), xmm0, Assembler::AVX_512bit); 1398 } else if (UseAVX == 2) { 1399 __ vmovdqu(xmm0, Address(from, qword_count, Address::times_8, 32)); 1400 __ vmovdqu(Address(dest, qword_count, Address::times_8, 32), xmm0); 1401 __ vmovdqu(xmm1, Address(from, qword_count, Address::times_8, 0)); 1402 __ vmovdqu(Address(dest, qword_count, Address::times_8, 0), xmm1); 1403 } else { 1404 __ movdqu(xmm0, Address(from, qword_count, Address::times_8, 48)); 1405 __ movdqu(Address(dest, qword_count, Address::times_8, 48), xmm0); 1406 __ movdqu(xmm1, Address(from, qword_count, Address::times_8, 32)); 1407 __ movdqu(Address(dest, qword_count, Address::times_8, 32), xmm1); 1408 __ movdqu(xmm2, Address(from, qword_count, Address::times_8, 16)); 1409 __ movdqu(Address(dest, qword_count, Address::times_8, 16), xmm2); 1410 __ movdqu(xmm3, Address(from, qword_count, Address::times_8, 0)); 1411 __ movdqu(Address(dest, qword_count, Address::times_8, 0), xmm3); 1412 } 1413 __ BIND(L_copy_bytes); 1414 __ subptr(qword_count, 8); 1415 __ jcc(Assembler::greaterEqual, L_loop); 1416 1417 __ addptr(qword_count, 4); // add(8) and sub(4) 1418 __ jccb(Assembler::less, L_end); 1419 // Copy trailing 32 bytes 1420 if (UseAVX >= 2) { 1421 __ vmovdqu(xmm0, Address(from, qword_count, Address::times_8, 0)); 1422 __ vmovdqu(Address(dest, qword_count, Address::times_8, 0), xmm0); 1423 } else { 1424 __ movdqu(xmm0, Address(from, qword_count, Address::times_8, 16)); 1425 __ movdqu(Address(dest, qword_count, Address::times_8, 16), xmm0); 1426 __ movdqu(xmm1, Address(from, qword_count, Address::times_8, 0)); 1427 __ movdqu(Address(dest, qword_count, Address::times_8, 0), xmm1); 1428 } 1429 __ subptr(qword_count, 4); 1430 __ BIND(L_end); 1431 if (UseAVX >= 2) { 1432 // clean upper bits of YMM registers 1433 __ vpxor(xmm0, xmm0); 1434 __ vpxor(xmm1, xmm1); 1435 } 1436 } else { 1437 // Copy 32-bytes per iteration 1438 __ BIND(L_loop); 1439 __ movq(to, Address(from, qword_count, Address::times_8, 24)); 1440 __ movq(Address(dest, qword_count, Address::times_8, 24), to); 1441 __ movq(to, Address(from, qword_count, Address::times_8, 16)); 1442 __ movq(Address(dest, qword_count, Address::times_8, 16), to); 1443 __ movq(to, Address(from, qword_count, Address::times_8, 8)); 1444 __ movq(Address(dest, qword_count, Address::times_8, 8), to); 1445 __ movq(to, Address(from, qword_count, Address::times_8, 0)); 1446 __ movq(Address(dest, qword_count, Address::times_8, 0), to); 1447 1448 __ BIND(L_copy_bytes); 1449 __ subptr(qword_count, 4); 1450 __ jcc(Assembler::greaterEqual, L_loop); 1451 } 1452 __ addptr(qword_count, 4); 1453 __ jcc(Assembler::greater, L_copy_8_bytes); // Copy trailing qwords 1454 } 1455 1456 1457 // Arguments: 1458 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1459 // ignored 1460 // name - stub name string 1461 // 1462 // Inputs: 1463 // c_rarg0 - source array address 1464 // c_rarg1 - destination array address 1465 // c_rarg2 - element count, treated as ssize_t, can be zero 1466 // 1467 // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries, 1468 // we let the hardware handle it. The one to eight bytes within words, 1469 // dwords or qwords that span cache line boundaries will still be loaded 1470 // and stored atomically. 1471 // 1472 // Side Effects: 1473 // disjoint_byte_copy_entry is set to the no-overlap entry point 1474 // used by generate_conjoint_byte_copy(). 1475 // 1476 address generate_disjoint_byte_copy(bool aligned, address* entry, const char *name) { 1477 __ align(CodeEntryAlignment); 1478 StubCodeMark mark(this, "StubRoutines", name); 1479 address start = __ pc(); 1480 1481 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes; 1482 Label L_copy_byte, L_exit; 1483 const Register from = rdi; // source array address 1484 const Register to = rsi; // destination array address 1485 const Register count = rdx; // elements count 1486 const Register byte_count = rcx; 1487 const Register qword_count = count; 1488 const Register end_from = from; // source array end address 1489 const Register end_to = to; // destination array end address 1490 // End pointers are inclusive, and if count is not zero they point 1491 // to the last unit copied: end_to[0] := end_from[0] 1492 1493 __ enter(); // required for proper stackwalking of RuntimeStub frame 1494 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1495 1496 if (entry != NULL) { 1497 *entry = __ pc(); 1498 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1499 BLOCK_COMMENT("Entry:"); 1500 } 1501 1502 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1503 // r9 and r10 may be used to save non-volatile registers 1504 1505 // 'from', 'to' and 'count' are now valid 1506 __ movptr(byte_count, count); 1507 __ shrptr(count, 3); // count => qword_count 1508 1509 // Copy from low to high addresses. Use 'to' as scratch. 1510 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1511 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1512 __ negptr(qword_count); // make the count negative 1513 __ jmp(L_copy_bytes); 1514 1515 // Copy trailing qwords 1516 __ BIND(L_copy_8_bytes); 1517 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1518 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1519 __ increment(qword_count); 1520 __ jcc(Assembler::notZero, L_copy_8_bytes); 1521 1522 // Check for and copy trailing dword 1523 __ BIND(L_copy_4_bytes); 1524 __ testl(byte_count, 4); 1525 __ jccb(Assembler::zero, L_copy_2_bytes); 1526 __ movl(rax, Address(end_from, 8)); 1527 __ movl(Address(end_to, 8), rax); 1528 1529 __ addptr(end_from, 4); 1530 __ addptr(end_to, 4); 1531 1532 // Check for and copy trailing word 1533 __ BIND(L_copy_2_bytes); 1534 __ testl(byte_count, 2); 1535 __ jccb(Assembler::zero, L_copy_byte); 1536 __ movw(rax, Address(end_from, 8)); 1537 __ movw(Address(end_to, 8), rax); 1538 1539 __ addptr(end_from, 2); 1540 __ addptr(end_to, 2); 1541 1542 // Check for and copy trailing byte 1543 __ BIND(L_copy_byte); 1544 __ testl(byte_count, 1); 1545 __ jccb(Assembler::zero, L_exit); 1546 __ movb(rax, Address(end_from, 8)); 1547 __ movb(Address(end_to, 8), rax); 1548 1549 __ BIND(L_exit); 1550 restore_arg_regs(); 1551 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1552 __ xorptr(rax, rax); // return 0 1553 __ vzeroupper(); 1554 __ leave(); // required for proper stackwalking of RuntimeStub frame 1555 __ ret(0); 1556 1557 // Copy in multi-bytes chunks 1558 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1559 __ jmp(L_copy_4_bytes); 1560 1561 return start; 1562 } 1563 1564 // Arguments: 1565 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1566 // ignored 1567 // name - stub name string 1568 // 1569 // Inputs: 1570 // c_rarg0 - source array address 1571 // c_rarg1 - destination array address 1572 // c_rarg2 - element count, treated as ssize_t, can be zero 1573 // 1574 // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries, 1575 // we let the hardware handle it. The one to eight bytes within words, 1576 // dwords or qwords that span cache line boundaries will still be loaded 1577 // and stored atomically. 1578 // 1579 address generate_conjoint_byte_copy(bool aligned, address nooverlap_target, 1580 address* entry, const char *name) { 1581 __ align(CodeEntryAlignment); 1582 StubCodeMark mark(this, "StubRoutines", name); 1583 address start = __ pc(); 1584 1585 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes; 1586 const Register from = rdi; // source array address 1587 const Register to = rsi; // destination array address 1588 const Register count = rdx; // elements count 1589 const Register byte_count = rcx; 1590 const Register qword_count = count; 1591 1592 __ enter(); // required for proper stackwalking of RuntimeStub frame 1593 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1594 1595 if (entry != NULL) { 1596 *entry = __ pc(); 1597 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1598 BLOCK_COMMENT("Entry:"); 1599 } 1600 1601 array_overlap_test(nooverlap_target, Address::times_1); 1602 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1603 // r9 and r10 may be used to save non-volatile registers 1604 1605 // 'from', 'to' and 'count' are now valid 1606 __ movptr(byte_count, count); 1607 __ shrptr(count, 3); // count => qword_count 1608 1609 // Copy from high to low addresses. 1610 1611 // Check for and copy trailing byte 1612 __ testl(byte_count, 1); 1613 __ jcc(Assembler::zero, L_copy_2_bytes); 1614 __ movb(rax, Address(from, byte_count, Address::times_1, -1)); 1615 __ movb(Address(to, byte_count, Address::times_1, -1), rax); 1616 __ decrement(byte_count); // Adjust for possible trailing word 1617 1618 // Check for and copy trailing word 1619 __ BIND(L_copy_2_bytes); 1620 __ testl(byte_count, 2); 1621 __ jcc(Assembler::zero, L_copy_4_bytes); 1622 __ movw(rax, Address(from, byte_count, Address::times_1, -2)); 1623 __ movw(Address(to, byte_count, Address::times_1, -2), rax); 1624 1625 // Check for and copy trailing dword 1626 __ BIND(L_copy_4_bytes); 1627 __ testl(byte_count, 4); 1628 __ jcc(Assembler::zero, L_copy_bytes); 1629 __ movl(rax, Address(from, qword_count, Address::times_8)); 1630 __ movl(Address(to, qword_count, Address::times_8), rax); 1631 __ jmp(L_copy_bytes); 1632 1633 // Copy trailing qwords 1634 __ BIND(L_copy_8_bytes); 1635 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 1636 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 1637 __ decrement(qword_count); 1638 __ jcc(Assembler::notZero, L_copy_8_bytes); 1639 1640 restore_arg_regs(); 1641 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1642 __ xorptr(rax, rax); // return 0 1643 __ vzeroupper(); 1644 __ leave(); // required for proper stackwalking of RuntimeStub frame 1645 __ ret(0); 1646 1647 // Copy in multi-bytes chunks 1648 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1649 1650 restore_arg_regs(); 1651 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1652 __ xorptr(rax, rax); // return 0 1653 __ vzeroupper(); 1654 __ leave(); // required for proper stackwalking of RuntimeStub frame 1655 __ ret(0); 1656 1657 return start; 1658 } 1659 1660 // Arguments: 1661 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1662 // ignored 1663 // name - stub name string 1664 // 1665 // Inputs: 1666 // c_rarg0 - source array address 1667 // c_rarg1 - destination array address 1668 // c_rarg2 - element count, treated as ssize_t, can be zero 1669 // 1670 // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we 1671 // let the hardware handle it. The two or four words within dwords 1672 // or qwords that span cache line boundaries will still be loaded 1673 // and stored atomically. 1674 // 1675 // Side Effects: 1676 // disjoint_short_copy_entry is set to the no-overlap entry point 1677 // used by generate_conjoint_short_copy(). 1678 // 1679 address generate_disjoint_short_copy(bool aligned, address *entry, const char *name) { 1680 __ align(CodeEntryAlignment); 1681 StubCodeMark mark(this, "StubRoutines", name); 1682 address start = __ pc(); 1683 1684 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes,L_copy_2_bytes,L_exit; 1685 const Register from = rdi; // source array address 1686 const Register to = rsi; // destination array address 1687 const Register count = rdx; // elements count 1688 const Register word_count = rcx; 1689 const Register qword_count = count; 1690 const Register end_from = from; // source array end address 1691 const Register end_to = to; // destination array end address 1692 // End pointers are inclusive, and if count is not zero they point 1693 // to the last unit copied: end_to[0] := end_from[0] 1694 1695 __ enter(); // required for proper stackwalking of RuntimeStub frame 1696 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1697 1698 if (entry != NULL) { 1699 *entry = __ pc(); 1700 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1701 BLOCK_COMMENT("Entry:"); 1702 } 1703 1704 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1705 // r9 and r10 may be used to save non-volatile registers 1706 1707 // 'from', 'to' and 'count' are now valid 1708 __ movptr(word_count, count); 1709 __ shrptr(count, 2); // count => qword_count 1710 1711 // Copy from low to high addresses. Use 'to' as scratch. 1712 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1713 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1714 __ negptr(qword_count); 1715 __ jmp(L_copy_bytes); 1716 1717 // Copy trailing qwords 1718 __ BIND(L_copy_8_bytes); 1719 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1720 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1721 __ increment(qword_count); 1722 __ jcc(Assembler::notZero, L_copy_8_bytes); 1723 1724 // Original 'dest' is trashed, so we can't use it as a 1725 // base register for a possible trailing word copy 1726 1727 // Check for and copy trailing dword 1728 __ BIND(L_copy_4_bytes); 1729 __ testl(word_count, 2); 1730 __ jccb(Assembler::zero, L_copy_2_bytes); 1731 __ movl(rax, Address(end_from, 8)); 1732 __ movl(Address(end_to, 8), rax); 1733 1734 __ addptr(end_from, 4); 1735 __ addptr(end_to, 4); 1736 1737 // Check for and copy trailing word 1738 __ BIND(L_copy_2_bytes); 1739 __ testl(word_count, 1); 1740 __ jccb(Assembler::zero, L_exit); 1741 __ movw(rax, Address(end_from, 8)); 1742 __ movw(Address(end_to, 8), rax); 1743 1744 __ BIND(L_exit); 1745 restore_arg_regs(); 1746 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1747 __ xorptr(rax, rax); // return 0 1748 __ vzeroupper(); 1749 __ leave(); // required for proper stackwalking of RuntimeStub frame 1750 __ ret(0); 1751 1752 // Copy in multi-bytes chunks 1753 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1754 __ jmp(L_copy_4_bytes); 1755 1756 return start; 1757 } 1758 1759 address generate_fill(BasicType t, bool aligned, const char *name) { 1760 __ align(CodeEntryAlignment); 1761 StubCodeMark mark(this, "StubRoutines", name); 1762 address start = __ pc(); 1763 1764 BLOCK_COMMENT("Entry:"); 1765 1766 const Register to = c_rarg0; // source array address 1767 const Register value = c_rarg1; // value 1768 const Register count = c_rarg2; // elements count 1769 1770 __ enter(); // required for proper stackwalking of RuntimeStub frame 1771 1772 __ generate_fill(t, aligned, to, value, count, rax, xmm0); 1773 1774 __ vzeroupper(); 1775 __ leave(); // required for proper stackwalking of RuntimeStub frame 1776 __ ret(0); 1777 return start; 1778 } 1779 1780 // Arguments: 1781 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1782 // ignored 1783 // name - stub name string 1784 // 1785 // Inputs: 1786 // c_rarg0 - source array address 1787 // c_rarg1 - destination array address 1788 // c_rarg2 - element count, treated as ssize_t, can be zero 1789 // 1790 // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we 1791 // let the hardware handle it. The two or four words within dwords 1792 // or qwords that span cache line boundaries will still be loaded 1793 // and stored atomically. 1794 // 1795 address generate_conjoint_short_copy(bool aligned, address nooverlap_target, 1796 address *entry, const char *name) { 1797 __ align(CodeEntryAlignment); 1798 StubCodeMark mark(this, "StubRoutines", name); 1799 address start = __ pc(); 1800 1801 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes; 1802 const Register from = rdi; // source array address 1803 const Register to = rsi; // destination array address 1804 const Register count = rdx; // elements count 1805 const Register word_count = rcx; 1806 const Register qword_count = count; 1807 1808 __ enter(); // required for proper stackwalking of RuntimeStub frame 1809 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1810 1811 if (entry != NULL) { 1812 *entry = __ pc(); 1813 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1814 BLOCK_COMMENT("Entry:"); 1815 } 1816 1817 array_overlap_test(nooverlap_target, Address::times_2); 1818 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1819 // r9 and r10 may be used to save non-volatile registers 1820 1821 // 'from', 'to' and 'count' are now valid 1822 __ movptr(word_count, count); 1823 __ shrptr(count, 2); // count => qword_count 1824 1825 // Copy from high to low addresses. Use 'to' as scratch. 1826 1827 // Check for and copy trailing word 1828 __ testl(word_count, 1); 1829 __ jccb(Assembler::zero, L_copy_4_bytes); 1830 __ movw(rax, Address(from, word_count, Address::times_2, -2)); 1831 __ movw(Address(to, word_count, Address::times_2, -2), rax); 1832 1833 // Check for and copy trailing dword 1834 __ BIND(L_copy_4_bytes); 1835 __ testl(word_count, 2); 1836 __ jcc(Assembler::zero, L_copy_bytes); 1837 __ movl(rax, Address(from, qword_count, Address::times_8)); 1838 __ movl(Address(to, qword_count, Address::times_8), rax); 1839 __ jmp(L_copy_bytes); 1840 1841 // Copy trailing qwords 1842 __ BIND(L_copy_8_bytes); 1843 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 1844 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 1845 __ decrement(qword_count); 1846 __ jcc(Assembler::notZero, L_copy_8_bytes); 1847 1848 restore_arg_regs(); 1849 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1850 __ xorptr(rax, rax); // return 0 1851 __ vzeroupper(); 1852 __ leave(); // required for proper stackwalking of RuntimeStub frame 1853 __ ret(0); 1854 1855 // Copy in multi-bytes chunks 1856 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1857 1858 restore_arg_regs(); 1859 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1860 __ xorptr(rax, rax); // return 0 1861 __ vzeroupper(); 1862 __ leave(); // required for proper stackwalking of RuntimeStub frame 1863 __ ret(0); 1864 1865 return start; 1866 } 1867 1868 // Arguments: 1869 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1870 // ignored 1871 // is_oop - true => oop array, so generate store check code 1872 // name - stub name string 1873 // 1874 // Inputs: 1875 // c_rarg0 - source array address 1876 // c_rarg1 - destination array address 1877 // c_rarg2 - element count, treated as ssize_t, can be zero 1878 // 1879 // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let 1880 // the hardware handle it. The two dwords within qwords that span 1881 // cache line boundaries will still be loaded and stored atomicly. 1882 // 1883 // Side Effects: 1884 // disjoint_int_copy_entry is set to the no-overlap entry point 1885 // used by generate_conjoint_int_oop_copy(). 1886 // 1887 address generate_disjoint_int_oop_copy(bool aligned, bool is_oop, address* entry, 1888 const char *name, bool dest_uninitialized = false) { 1889 __ align(CodeEntryAlignment); 1890 StubCodeMark mark(this, "StubRoutines", name); 1891 address start = __ pc(); 1892 1893 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_exit; 1894 const Register from = rdi; // source array address 1895 const Register to = rsi; // destination array address 1896 const Register count = rdx; // elements count 1897 const Register dword_count = rcx; 1898 const Register qword_count = count; 1899 const Register end_from = from; // source array end address 1900 const Register end_to = to; // destination array end address 1901 // End pointers are inclusive, and if count is not zero they point 1902 // to the last unit copied: end_to[0] := end_from[0] 1903 1904 __ enter(); // required for proper stackwalking of RuntimeStub frame 1905 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1906 1907 if (entry != NULL) { 1908 *entry = __ pc(); 1909 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1910 BLOCK_COMMENT("Entry:"); 1911 } 1912 1913 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1914 // r9 and r10 may be used to save non-volatile registers 1915 1916 DecoratorSet decorators = ARRAYCOPY_DISJOINT; 1917 if (dest_uninitialized) { 1918 decorators |= AS_DEST_NOT_INITIALIZED; 1919 } 1920 if (aligned) { 1921 decorators |= ARRAYCOPY_ALIGNED; 1922 } 1923 1924 BasicType type = is_oop ? T_OBJECT : T_INT; 1925 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 1926 bs->arraycopy_prologue(_masm, decorators, type, from, to, count); 1927 1928 // 'from', 'to' and 'count' are now valid 1929 __ movptr(dword_count, count); 1930 __ shrptr(count, 1); // count => qword_count 1931 1932 // Copy from low to high addresses. Use 'to' as scratch. 1933 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1934 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1935 __ negptr(qword_count); 1936 __ jmp(L_copy_bytes); 1937 1938 // Copy trailing qwords 1939 __ BIND(L_copy_8_bytes); 1940 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1941 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1942 __ increment(qword_count); 1943 __ jcc(Assembler::notZero, L_copy_8_bytes); 1944 1945 // Check for and copy trailing dword 1946 __ BIND(L_copy_4_bytes); 1947 __ testl(dword_count, 1); // Only byte test since the value is 0 or 1 1948 __ jccb(Assembler::zero, L_exit); 1949 __ movl(rax, Address(end_from, 8)); 1950 __ movl(Address(end_to, 8), rax); 1951 1952 __ BIND(L_exit); 1953 bs->arraycopy_epilogue(_masm, decorators, type, from, to, dword_count); 1954 restore_arg_regs(); 1955 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 1956 __ vzeroupper(); 1957 __ xorptr(rax, rax); // return 0 1958 __ leave(); // required for proper stackwalking of RuntimeStub frame 1959 __ ret(0); 1960 1961 // Copy in multi-bytes chunks 1962 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1963 __ jmp(L_copy_4_bytes); 1964 1965 return start; 1966 } 1967 1968 // Arguments: 1969 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1970 // ignored 1971 // is_oop - true => oop array, so generate store check code 1972 // name - stub name string 1973 // 1974 // Inputs: 1975 // c_rarg0 - source array address 1976 // c_rarg1 - destination array address 1977 // c_rarg2 - element count, treated as ssize_t, can be zero 1978 // 1979 // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let 1980 // the hardware handle it. The two dwords within qwords that span 1981 // cache line boundaries will still be loaded and stored atomicly. 1982 // 1983 address generate_conjoint_int_oop_copy(bool aligned, bool is_oop, address nooverlap_target, 1984 address *entry, const char *name, 1985 bool dest_uninitialized = false) { 1986 __ align(CodeEntryAlignment); 1987 StubCodeMark mark(this, "StubRoutines", name); 1988 address start = __ pc(); 1989 1990 Label L_copy_bytes, L_copy_8_bytes, L_copy_2_bytes, L_exit; 1991 const Register from = rdi; // source array address 1992 const Register to = rsi; // destination array address 1993 const Register count = rdx; // elements count 1994 const Register dword_count = rcx; 1995 const Register qword_count = count; 1996 1997 __ enter(); // required for proper stackwalking of RuntimeStub frame 1998 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1999 2000 if (entry != NULL) { 2001 *entry = __ pc(); 2002 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2003 BLOCK_COMMENT("Entry:"); 2004 } 2005 2006 array_overlap_test(nooverlap_target, Address::times_4); 2007 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 2008 // r9 and r10 may be used to save non-volatile registers 2009 2010 DecoratorSet decorators = 0; 2011 if (dest_uninitialized) { 2012 decorators |= AS_DEST_NOT_INITIALIZED; 2013 } 2014 if (aligned) { 2015 decorators |= ARRAYCOPY_ALIGNED; 2016 } 2017 2018 BasicType type = is_oop ? T_OBJECT : T_INT; 2019 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 2020 // no registers are destroyed by this call 2021 bs->arraycopy_prologue(_masm, decorators, type, from, to, count); 2022 2023 assert_clean_int(count, rax); // Make sure 'count' is clean int. 2024 // 'from', 'to' and 'count' are now valid 2025 __ movptr(dword_count, count); 2026 __ shrptr(count, 1); // count => qword_count 2027 2028 // Copy from high to low addresses. Use 'to' as scratch. 2029 2030 // Check for and copy trailing dword 2031 __ testl(dword_count, 1); 2032 __ jcc(Assembler::zero, L_copy_bytes); 2033 __ movl(rax, Address(from, dword_count, Address::times_4, -4)); 2034 __ movl(Address(to, dword_count, Address::times_4, -4), rax); 2035 __ jmp(L_copy_bytes); 2036 2037 // Copy trailing qwords 2038 __ BIND(L_copy_8_bytes); 2039 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 2040 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 2041 __ decrement(qword_count); 2042 __ jcc(Assembler::notZero, L_copy_8_bytes); 2043 2044 if (is_oop) { 2045 __ jmp(L_exit); 2046 } 2047 restore_arg_regs(); 2048 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 2049 __ xorptr(rax, rax); // return 0 2050 __ vzeroupper(); 2051 __ leave(); // required for proper stackwalking of RuntimeStub frame 2052 __ ret(0); 2053 2054 // Copy in multi-bytes chunks 2055 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2056 2057 __ BIND(L_exit); 2058 bs->arraycopy_epilogue(_masm, decorators, type, from, to, dword_count); 2059 restore_arg_regs(); 2060 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 2061 __ xorptr(rax, rax); // return 0 2062 __ vzeroupper(); 2063 __ leave(); // required for proper stackwalking of RuntimeStub frame 2064 __ ret(0); 2065 2066 return start; 2067 } 2068 2069 // Arguments: 2070 // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes 2071 // ignored 2072 // is_oop - true => oop array, so generate store check code 2073 // name - stub name string 2074 // 2075 // Inputs: 2076 // c_rarg0 - source array address 2077 // c_rarg1 - destination array address 2078 // c_rarg2 - element count, treated as ssize_t, can be zero 2079 // 2080 // Side Effects: 2081 // disjoint_oop_copy_entry or disjoint_long_copy_entry is set to the 2082 // no-overlap entry point used by generate_conjoint_long_oop_copy(). 2083 // 2084 address generate_disjoint_long_oop_copy(bool aligned, bool is_oop, address *entry, 2085 const char *name, bool dest_uninitialized = false) { 2086 __ align(CodeEntryAlignment); 2087 StubCodeMark mark(this, "StubRoutines", name); 2088 address start = __ pc(); 2089 2090 Label L_copy_bytes, L_copy_8_bytes, L_exit; 2091 const Register from = rdi; // source array address 2092 const Register to = rsi; // destination array address 2093 const Register qword_count = rdx; // elements count 2094 const Register end_from = from; // source array end address 2095 const Register end_to = rcx; // destination array end address 2096 const Register saved_count = r11; 2097 // End pointers are inclusive, and if count is not zero they point 2098 // to the last unit copied: end_to[0] := end_from[0] 2099 2100 __ enter(); // required for proper stackwalking of RuntimeStub frame 2101 // Save no-overlap entry point for generate_conjoint_long_oop_copy() 2102 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 2103 2104 if (entry != NULL) { 2105 *entry = __ pc(); 2106 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2107 BLOCK_COMMENT("Entry:"); 2108 } 2109 2110 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 2111 // r9 and r10 may be used to save non-volatile registers 2112 // 'from', 'to' and 'qword_count' are now valid 2113 2114 DecoratorSet decorators = ARRAYCOPY_DISJOINT; 2115 if (dest_uninitialized) { 2116 decorators |= AS_DEST_NOT_INITIALIZED; 2117 } 2118 if (aligned) { 2119 decorators |= ARRAYCOPY_ALIGNED; 2120 } 2121 2122 BasicType type = is_oop ? T_OBJECT : T_LONG; 2123 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 2124 bs->arraycopy_prologue(_masm, decorators, type, from, to, qword_count); 2125 2126 // Copy from low to high addresses. Use 'to' as scratch. 2127 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 2128 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 2129 __ negptr(qword_count); 2130 __ jmp(L_copy_bytes); 2131 2132 // Copy trailing qwords 2133 __ BIND(L_copy_8_bytes); 2134 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 2135 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 2136 __ increment(qword_count); 2137 __ jcc(Assembler::notZero, L_copy_8_bytes); 2138 2139 if (is_oop) { 2140 __ jmp(L_exit); 2141 } else { 2142 restore_arg_regs(); 2143 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2144 __ xorptr(rax, rax); // return 0 2145 __ vzeroupper(); 2146 __ leave(); // required for proper stackwalking of RuntimeStub frame 2147 __ ret(0); 2148 } 2149 2150 // Copy in multi-bytes chunks 2151 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2152 2153 __ BIND(L_exit); 2154 bs->arraycopy_epilogue(_masm, decorators, type, from, to, qword_count); 2155 restore_arg_regs(); 2156 if (is_oop) { 2157 inc_counter_np(SharedRuntime::_oop_array_copy_ctr); // Update counter after rscratch1 is free 2158 } else { 2159 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2160 } 2161 __ vzeroupper(); 2162 __ xorptr(rax, rax); // return 0 2163 __ leave(); // required for proper stackwalking of RuntimeStub frame 2164 __ ret(0); 2165 2166 return start; 2167 } 2168 2169 // Arguments: 2170 // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes 2171 // ignored 2172 // is_oop - true => oop array, so generate store check code 2173 // name - stub name string 2174 // 2175 // Inputs: 2176 // c_rarg0 - source array address 2177 // c_rarg1 - destination array address 2178 // c_rarg2 - element count, treated as ssize_t, can be zero 2179 // 2180 address generate_conjoint_long_oop_copy(bool aligned, bool is_oop, 2181 address nooverlap_target, address *entry, 2182 const char *name, bool dest_uninitialized = false) { 2183 __ align(CodeEntryAlignment); 2184 StubCodeMark mark(this, "StubRoutines", name); 2185 address start = __ pc(); 2186 2187 Label L_copy_bytes, L_copy_8_bytes, L_exit; 2188 const Register from = rdi; // source array address 2189 const Register to = rsi; // destination array address 2190 const Register qword_count = rdx; // elements count 2191 const Register saved_count = rcx; 2192 2193 __ enter(); // required for proper stackwalking of RuntimeStub frame 2194 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 2195 2196 if (entry != NULL) { 2197 *entry = __ pc(); 2198 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2199 BLOCK_COMMENT("Entry:"); 2200 } 2201 2202 array_overlap_test(nooverlap_target, Address::times_8); 2203 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 2204 // r9 and r10 may be used to save non-volatile registers 2205 // 'from', 'to' and 'qword_count' are now valid 2206 2207 DecoratorSet decorators = ARRAYCOPY_DISJOINT; 2208 if (dest_uninitialized) { 2209 decorators |= AS_DEST_NOT_INITIALIZED; 2210 } 2211 if (aligned) { 2212 decorators |= ARRAYCOPY_ALIGNED; 2213 } 2214 2215 BasicType type = is_oop ? T_OBJECT : T_LONG; 2216 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 2217 bs->arraycopy_prologue(_masm, decorators, type, from, to, qword_count); 2218 2219 __ jmp(L_copy_bytes); 2220 2221 // Copy trailing qwords 2222 __ BIND(L_copy_8_bytes); 2223 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 2224 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 2225 __ decrement(qword_count); 2226 __ jcc(Assembler::notZero, L_copy_8_bytes); 2227 2228 if (is_oop) { 2229 __ jmp(L_exit); 2230 } else { 2231 restore_arg_regs(); 2232 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2233 __ xorptr(rax, rax); // return 0 2234 __ vzeroupper(); 2235 __ leave(); // required for proper stackwalking of RuntimeStub frame 2236 __ ret(0); 2237 } 2238 2239 // Copy in multi-bytes chunks 2240 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2241 2242 __ BIND(L_exit); 2243 bs->arraycopy_epilogue(_masm, decorators, type, from, to, qword_count); 2244 restore_arg_regs(); 2245 if (is_oop) { 2246 inc_counter_np(SharedRuntime::_oop_array_copy_ctr); // Update counter after rscratch1 is free 2247 } else { 2248 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2249 } 2250 __ vzeroupper(); 2251 __ xorptr(rax, rax); // return 0 2252 __ leave(); // required for proper stackwalking of RuntimeStub frame 2253 __ ret(0); 2254 2255 return start; 2256 } 2257 2258 2259 // Helper for generating a dynamic type check. 2260 // Smashes no registers. 2261 void generate_type_check(Register sub_klass, 2262 Register super_check_offset, 2263 Register super_klass, 2264 Label& L_success) { 2265 assert_different_registers(sub_klass, super_check_offset, super_klass); 2266 2267 BLOCK_COMMENT("type_check:"); 2268 2269 Label L_miss; 2270 2271 __ check_klass_subtype_fast_path(sub_klass, super_klass, noreg, &L_success, &L_miss, NULL, 2272 super_check_offset); 2273 __ check_klass_subtype_slow_path(sub_klass, super_klass, noreg, noreg, &L_success, NULL); 2274 2275 // Fall through on failure! 2276 __ BIND(L_miss); 2277 } 2278 2279 // 2280 // Generate checkcasting array copy stub 2281 // 2282 // Input: 2283 // c_rarg0 - source array address 2284 // c_rarg1 - destination array address 2285 // c_rarg2 - element count, treated as ssize_t, can be zero 2286 // c_rarg3 - size_t ckoff (super_check_offset) 2287 // not Win64 2288 // c_rarg4 - oop ckval (super_klass) 2289 // Win64 2290 // rsp+40 - oop ckval (super_klass) 2291 // 2292 // Output: 2293 // rax == 0 - success 2294 // rax == -1^K - failure, where K is partial transfer count 2295 // 2296 address generate_checkcast_copy(const char *name, address *entry, 2297 bool dest_uninitialized = false) { 2298 2299 Label L_load_element, L_store_element, L_do_card_marks, L_done; 2300 2301 // Input registers (after setup_arg_regs) 2302 const Register from = rdi; // source array address 2303 const Register to = rsi; // destination array address 2304 const Register length = rdx; // elements count 2305 const Register ckoff = rcx; // super_check_offset 2306 const Register ckval = r8; // super_klass 2307 2308 // Registers used as temps (r13, r14 are save-on-entry) 2309 const Register end_from = from; // source array end address 2310 const Register end_to = r13; // destination array end address 2311 const Register count = rdx; // -(count_remaining) 2312 const Register r14_length = r14; // saved copy of length 2313 // End pointers are inclusive, and if length is not zero they point 2314 // to the last unit copied: end_to[0] := end_from[0] 2315 2316 const Register rax_oop = rax; // actual oop copied 2317 const Register r11_klass = r11; // oop._klass 2318 2319 //--------------------------------------------------------------- 2320 // Assembler stub will be used for this call to arraycopy 2321 // if the two arrays are subtypes of Object[] but the 2322 // destination array type is not equal to or a supertype 2323 // of the source type. Each element must be separately 2324 // checked. 2325 2326 __ align(CodeEntryAlignment); 2327 StubCodeMark mark(this, "StubRoutines", name); 2328 address start = __ pc(); 2329 2330 __ enter(); // required for proper stackwalking of RuntimeStub frame 2331 2332 #ifdef ASSERT 2333 // caller guarantees that the arrays really are different 2334 // otherwise, we would have to make conjoint checks 2335 { Label L; 2336 array_overlap_test(L, TIMES_OOP); 2337 __ stop("checkcast_copy within a single array"); 2338 __ bind(L); 2339 } 2340 #endif //ASSERT 2341 2342 setup_arg_regs(4); // from => rdi, to => rsi, length => rdx 2343 // ckoff => rcx, ckval => r8 2344 // r9 and r10 may be used to save non-volatile registers 2345 #ifdef _WIN64 2346 // last argument (#4) is on stack on Win64 2347 __ movptr(ckval, Address(rsp, 6 * wordSize)); 2348 #endif 2349 2350 // Caller of this entry point must set up the argument registers. 2351 if (entry != NULL) { 2352 *entry = __ pc(); 2353 BLOCK_COMMENT("Entry:"); 2354 } 2355 2356 // allocate spill slots for r13, r14 2357 enum { 2358 saved_r13_offset, 2359 saved_r14_offset, 2360 saved_rbp_offset 2361 }; 2362 __ subptr(rsp, saved_rbp_offset * wordSize); 2363 __ movptr(Address(rsp, saved_r13_offset * wordSize), r13); 2364 __ movptr(Address(rsp, saved_r14_offset * wordSize), r14); 2365 2366 // check that int operands are properly extended to size_t 2367 assert_clean_int(length, rax); 2368 assert_clean_int(ckoff, rax); 2369 2370 #ifdef ASSERT 2371 BLOCK_COMMENT("assert consistent ckoff/ckval"); 2372 // The ckoff and ckval must be mutually consistent, 2373 // even though caller generates both. 2374 { Label L; 2375 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2376 __ cmpl(ckoff, Address(ckval, sco_offset)); 2377 __ jcc(Assembler::equal, L); 2378 __ stop("super_check_offset inconsistent"); 2379 __ bind(L); 2380 } 2381 #endif //ASSERT 2382 2383 // Loop-invariant addresses. They are exclusive end pointers. 2384 Address end_from_addr(from, length, TIMES_OOP, 0); 2385 Address end_to_addr(to, length, TIMES_OOP, 0); 2386 // Loop-variant addresses. They assume post-incremented count < 0. 2387 Address from_element_addr(end_from, count, TIMES_OOP, 0); 2388 Address to_element_addr(end_to, count, TIMES_OOP, 0); 2389 2390 DecoratorSet decorators = ARRAYCOPY_CHECKCAST; 2391 if (dest_uninitialized) { 2392 decorators |= AS_DEST_NOT_INITIALIZED; 2393 } 2394 2395 BasicType type = T_OBJECT; 2396 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 2397 bs->arraycopy_prologue(_masm, decorators, type, from, to, count); 2398 2399 // Copy from low to high addresses, indexed from the end of each array. 2400 __ lea(end_from, end_from_addr); 2401 __ lea(end_to, end_to_addr); 2402 __ movptr(r14_length, length); // save a copy of the length 2403 assert(length == count, ""); // else fix next line: 2404 __ negptr(count); // negate and test the length 2405 __ jcc(Assembler::notZero, L_load_element); 2406 2407 // Empty array: Nothing to do. 2408 __ xorptr(rax, rax); // return 0 on (trivial) success 2409 __ jmp(L_done); 2410 2411 // ======== begin loop ======== 2412 // (Loop is rotated; its entry is L_load_element.) 2413 // Loop control: 2414 // for (count = -count; count != 0; count++) 2415 // Base pointers src, dst are biased by 8*(count-1),to last element. 2416 __ align(OptoLoopAlignment); 2417 2418 __ BIND(L_store_element); 2419 __ store_heap_oop(to_element_addr, rax_oop, noreg, noreg, AS_RAW); // store the oop 2420 __ increment(count); // increment the count toward zero 2421 __ jcc(Assembler::zero, L_do_card_marks); 2422 2423 // ======== loop entry is here ======== 2424 __ BIND(L_load_element); 2425 __ load_heap_oop(rax_oop, from_element_addr, noreg, noreg, AS_RAW); // load the oop 2426 __ testptr(rax_oop, rax_oop); 2427 __ jcc(Assembler::zero, L_store_element); 2428 2429 __ load_klass(r11_klass, rax_oop);// query the object klass 2430 generate_type_check(r11_klass, ckoff, ckval, L_store_element); 2431 // ======== end loop ======== 2432 2433 // It was a real error; we must depend on the caller to finish the job. 2434 // Register rdx = -1 * number of *remaining* oops, r14 = *total* oops. 2435 // Emit GC store barriers for the oops we have copied (r14 + rdx), 2436 // and report their number to the caller. 2437 assert_different_registers(rax, r14_length, count, to, end_to, rcx, rscratch1); 2438 Label L_post_barrier; 2439 __ addptr(r14_length, count); // K = (original - remaining) oops 2440 __ movptr(rax, r14_length); // save the value 2441 __ notptr(rax); // report (-1^K) to caller (does not affect flags) 2442 __ jccb(Assembler::notZero, L_post_barrier); 2443 __ jmp(L_done); // K == 0, nothing was copied, skip post barrier 2444 2445 // Come here on success only. 2446 __ BIND(L_do_card_marks); 2447 __ xorptr(rax, rax); // return 0 on success 2448 2449 __ BIND(L_post_barrier); 2450 bs->arraycopy_epilogue(_masm, decorators, type, from, to, r14_length); 2451 2452 // Common exit point (success or failure). 2453 __ BIND(L_done); 2454 __ movptr(r13, Address(rsp, saved_r13_offset * wordSize)); 2455 __ movptr(r14, Address(rsp, saved_r14_offset * wordSize)); 2456 restore_arg_regs(); 2457 inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr); // Update counter after rscratch1 is free 2458 __ leave(); // required for proper stackwalking of RuntimeStub frame 2459 __ ret(0); 2460 2461 return start; 2462 } 2463 2464 // 2465 // Generate 'unsafe' array copy stub 2466 // Though just as safe as the other stubs, it takes an unscaled 2467 // size_t argument instead of an element count. 2468 // 2469 // Input: 2470 // c_rarg0 - source array address 2471 // c_rarg1 - destination array address 2472 // c_rarg2 - byte count, treated as ssize_t, can be zero 2473 // 2474 // Examines the alignment of the operands and dispatches 2475 // to a long, int, short, or byte copy loop. 2476 // 2477 address generate_unsafe_copy(const char *name, 2478 address byte_copy_entry, address short_copy_entry, 2479 address int_copy_entry, address long_copy_entry) { 2480 2481 Label L_long_aligned, L_int_aligned, L_short_aligned; 2482 2483 // Input registers (before setup_arg_regs) 2484 const Register from = c_rarg0; // source array address 2485 const Register to = c_rarg1; // destination array address 2486 const Register size = c_rarg2; // byte count (size_t) 2487 2488 // Register used as a temp 2489 const Register bits = rax; // test copy of low bits 2490 2491 __ align(CodeEntryAlignment); 2492 StubCodeMark mark(this, "StubRoutines", name); 2493 address start = __ pc(); 2494 2495 __ enter(); // required for proper stackwalking of RuntimeStub frame 2496 2497 // bump this on entry, not on exit: 2498 inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr); 2499 2500 __ mov(bits, from); 2501 __ orptr(bits, to); 2502 __ orptr(bits, size); 2503 2504 __ testb(bits, BytesPerLong-1); 2505 __ jccb(Assembler::zero, L_long_aligned); 2506 2507 __ testb(bits, BytesPerInt-1); 2508 __ jccb(Assembler::zero, L_int_aligned); 2509 2510 __ testb(bits, BytesPerShort-1); 2511 __ jump_cc(Assembler::notZero, RuntimeAddress(byte_copy_entry)); 2512 2513 __ BIND(L_short_aligned); 2514 __ shrptr(size, LogBytesPerShort); // size => short_count 2515 __ jump(RuntimeAddress(short_copy_entry)); 2516 2517 __ BIND(L_int_aligned); 2518 __ shrptr(size, LogBytesPerInt); // size => int_count 2519 __ jump(RuntimeAddress(int_copy_entry)); 2520 2521 __ BIND(L_long_aligned); 2522 __ shrptr(size, LogBytesPerLong); // size => qword_count 2523 __ jump(RuntimeAddress(long_copy_entry)); 2524 2525 return start; 2526 } 2527 2528 // Perform range checks on the proposed arraycopy. 2529 // Kills temp, but nothing else. 2530 // Also, clean the sign bits of src_pos and dst_pos. 2531 void arraycopy_range_checks(Register src, // source array oop (c_rarg0) 2532 Register src_pos, // source position (c_rarg1) 2533 Register dst, // destination array oo (c_rarg2) 2534 Register dst_pos, // destination position (c_rarg3) 2535 Register length, 2536 Register temp, 2537 Label& L_failed) { 2538 BLOCK_COMMENT("arraycopy_range_checks:"); 2539 2540 // if (src_pos + length > arrayOop(src)->length()) FAIL; 2541 __ movl(temp, length); 2542 __ addl(temp, src_pos); // src_pos + length 2543 __ cmpl(temp, Address(src, arrayOopDesc::length_offset_in_bytes())); 2544 __ jcc(Assembler::above, L_failed); 2545 2546 // if (dst_pos + length > arrayOop(dst)->length()) FAIL; 2547 __ movl(temp, length); 2548 __ addl(temp, dst_pos); // dst_pos + length 2549 __ cmpl(temp, Address(dst, arrayOopDesc::length_offset_in_bytes())); 2550 __ jcc(Assembler::above, L_failed); 2551 2552 // Have to clean up high 32-bits of 'src_pos' and 'dst_pos'. 2553 // Move with sign extension can be used since they are positive. 2554 __ movslq(src_pos, src_pos); 2555 __ movslq(dst_pos, dst_pos); 2556 2557 BLOCK_COMMENT("arraycopy_range_checks done"); 2558 } 2559 2560 // 2561 // Generate generic array copy stubs 2562 // 2563 // Input: 2564 // c_rarg0 - src oop 2565 // c_rarg1 - src_pos (32-bits) 2566 // c_rarg2 - dst oop 2567 // c_rarg3 - dst_pos (32-bits) 2568 // not Win64 2569 // c_rarg4 - element count (32-bits) 2570 // Win64 2571 // rsp+40 - element count (32-bits) 2572 // 2573 // Output: 2574 // rax == 0 - success 2575 // rax == -1^K - failure, where K is partial transfer count 2576 // 2577 address generate_generic_copy(const char *name, 2578 address byte_copy_entry, address short_copy_entry, 2579 address int_copy_entry, address oop_copy_entry, 2580 address long_copy_entry, address checkcast_copy_entry) { 2581 2582 Label L_failed, L_failed_0, L_objArray; 2583 Label L_copy_bytes, L_copy_shorts, L_copy_ints, L_copy_longs; 2584 2585 // Input registers 2586 const Register src = c_rarg0; // source array oop 2587 const Register src_pos = c_rarg1; // source position 2588 const Register dst = c_rarg2; // destination array oop 2589 const Register dst_pos = c_rarg3; // destination position 2590 #ifndef _WIN64 2591 const Register length = c_rarg4; 2592 #else 2593 const Address length(rsp, 6 * wordSize); // elements count is on stack on Win64 2594 #endif 2595 2596 { int modulus = CodeEntryAlignment; 2597 int target = modulus - 5; // 5 = sizeof jmp(L_failed) 2598 int advance = target - (__ offset() % modulus); 2599 if (advance < 0) advance += modulus; 2600 if (advance > 0) __ nop(advance); 2601 } 2602 StubCodeMark mark(this, "StubRoutines", name); 2603 2604 // Short-hop target to L_failed. Makes for denser prologue code. 2605 __ BIND(L_failed_0); 2606 __ jmp(L_failed); 2607 assert(__ offset() % CodeEntryAlignment == 0, "no further alignment needed"); 2608 2609 __ align(CodeEntryAlignment); 2610 address start = __ pc(); 2611 2612 __ enter(); // required for proper stackwalking of RuntimeStub frame 2613 2614 // bump this on entry, not on exit: 2615 inc_counter_np(SharedRuntime::_generic_array_copy_ctr); 2616 2617 //----------------------------------------------------------------------- 2618 // Assembler stub will be used for this call to arraycopy 2619 // if the following conditions are met: 2620 // 2621 // (1) src and dst must not be null. 2622 // (2) src_pos must not be negative. 2623 // (3) dst_pos must not be negative. 2624 // (4) length must not be negative. 2625 // (5) src klass and dst klass should be the same and not NULL. 2626 // (6) src and dst should be arrays. 2627 // (7) src_pos + length must not exceed length of src. 2628 // (8) dst_pos + length must not exceed length of dst. 2629 // 2630 2631 // if (src == NULL) return -1; 2632 __ testptr(src, src); // src oop 2633 size_t j1off = __ offset(); 2634 __ jccb(Assembler::zero, L_failed_0); 2635 2636 // if (src_pos < 0) return -1; 2637 __ testl(src_pos, src_pos); // src_pos (32-bits) 2638 __ jccb(Assembler::negative, L_failed_0); 2639 2640 // if (dst == NULL) return -1; 2641 __ testptr(dst, dst); // dst oop 2642 __ jccb(Assembler::zero, L_failed_0); 2643 2644 // if (dst_pos < 0) return -1; 2645 __ testl(dst_pos, dst_pos); // dst_pos (32-bits) 2646 size_t j4off = __ offset(); 2647 __ jccb(Assembler::negative, L_failed_0); 2648 2649 // The first four tests are very dense code, 2650 // but not quite dense enough to put four 2651 // jumps in a 16-byte instruction fetch buffer. 2652 // That's good, because some branch predicters 2653 // do not like jumps so close together. 2654 // Make sure of this. 2655 guarantee(((j1off ^ j4off) & ~15) != 0, "I$ line of 1st & 4th jumps"); 2656 2657 // registers used as temp 2658 const Register r11_length = r11; // elements count to copy 2659 const Register r10_src_klass = r10; // array klass 2660 2661 // if (length < 0) return -1; 2662 __ movl(r11_length, length); // length (elements count, 32-bits value) 2663 __ testl(r11_length, r11_length); 2664 __ jccb(Assembler::negative, L_failed_0); 2665 2666 __ load_klass(r10_src_klass, src); 2667 #ifdef ASSERT 2668 // assert(src->klass() != NULL); 2669 { 2670 BLOCK_COMMENT("assert klasses not null {"); 2671 Label L1, L2; 2672 __ testptr(r10_src_klass, r10_src_klass); 2673 __ jcc(Assembler::notZero, L2); // it is broken if klass is NULL 2674 __ bind(L1); 2675 __ stop("broken null klass"); 2676 __ bind(L2); 2677 __ load_klass(rax, dst); 2678 __ cmpq(rax, 0); 2679 __ jcc(Assembler::equal, L1); // this would be broken also 2680 BLOCK_COMMENT("} assert klasses not null done"); 2681 } 2682 #endif 2683 2684 // Load layout helper (32-bits) 2685 // 2686 // |array_tag| | header_size | element_type | |log2_element_size| 2687 // 32 30 24 16 8 2 0 2688 // 2689 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0 2690 // 2691 2692 const int lh_offset = in_bytes(Klass::layout_helper_offset()); 2693 2694 // Handle objArrays completely differently... 2695 const jint objArray_lh = Klass::array_layout_helper(T_OBJECT); 2696 __ cmpl(Address(r10_src_klass, lh_offset), objArray_lh); 2697 __ jcc(Assembler::equal, L_objArray); 2698 2699 // if (src->klass() != dst->klass()) return -1; 2700 __ load_klass(rax, dst); 2701 __ cmpq(r10_src_klass, rax); 2702 __ jcc(Assembler::notEqual, L_failed); 2703 2704 const Register rax_lh = rax; // layout helper 2705 __ movl(rax_lh, Address(r10_src_klass, lh_offset)); 2706 2707 // if (!src->is_Array()) return -1; 2708 __ cmpl(rax_lh, Klass::_lh_neutral_value); 2709 __ jcc(Assembler::greaterEqual, L_failed); 2710 2711 // At this point, it is known to be a typeArray (array_tag 0x3). 2712 #ifdef ASSERT 2713 { 2714 BLOCK_COMMENT("assert primitive array {"); 2715 Label L; 2716 __ cmpl(rax_lh, (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift)); 2717 __ jcc(Assembler::greaterEqual, L); 2718 __ stop("must be a primitive array"); 2719 __ bind(L); 2720 BLOCK_COMMENT("} assert primitive array done"); 2721 } 2722 #endif 2723 2724 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2725 r10, L_failed); 2726 2727 // TypeArrayKlass 2728 // 2729 // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize); 2730 // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize); 2731 // 2732 2733 const Register r10_offset = r10; // array offset 2734 const Register rax_elsize = rax_lh; // element size 2735 2736 __ movl(r10_offset, rax_lh); 2737 __ shrl(r10_offset, Klass::_lh_header_size_shift); 2738 __ andptr(r10_offset, Klass::_lh_header_size_mask); // array_offset 2739 __ addptr(src, r10_offset); // src array offset 2740 __ addptr(dst, r10_offset); // dst array offset 2741 BLOCK_COMMENT("choose copy loop based on element size"); 2742 __ andl(rax_lh, Klass::_lh_log2_element_size_mask); // rax_lh -> rax_elsize 2743 2744 // next registers should be set before the jump to corresponding stub 2745 const Register from = c_rarg0; // source array address 2746 const Register to = c_rarg1; // destination array address 2747 const Register count = c_rarg2; // elements count 2748 2749 // 'from', 'to', 'count' registers should be set in such order 2750 // since they are the same as 'src', 'src_pos', 'dst'. 2751 2752 __ BIND(L_copy_bytes); 2753 __ cmpl(rax_elsize, 0); 2754 __ jccb(Assembler::notEqual, L_copy_shorts); 2755 __ lea(from, Address(src, src_pos, Address::times_1, 0));// src_addr 2756 __ lea(to, Address(dst, dst_pos, Address::times_1, 0));// dst_addr 2757 __ movl2ptr(count, r11_length); // length 2758 __ jump(RuntimeAddress(byte_copy_entry)); 2759 2760 __ BIND(L_copy_shorts); 2761 __ cmpl(rax_elsize, LogBytesPerShort); 2762 __ jccb(Assembler::notEqual, L_copy_ints); 2763 __ lea(from, Address(src, src_pos, Address::times_2, 0));// src_addr 2764 __ lea(to, Address(dst, dst_pos, Address::times_2, 0));// dst_addr 2765 __ movl2ptr(count, r11_length); // length 2766 __ jump(RuntimeAddress(short_copy_entry)); 2767 2768 __ BIND(L_copy_ints); 2769 __ cmpl(rax_elsize, LogBytesPerInt); 2770 __ jccb(Assembler::notEqual, L_copy_longs); 2771 __ lea(from, Address(src, src_pos, Address::times_4, 0));// src_addr 2772 __ lea(to, Address(dst, dst_pos, Address::times_4, 0));// dst_addr 2773 __ movl2ptr(count, r11_length); // length 2774 __ jump(RuntimeAddress(int_copy_entry)); 2775 2776 __ BIND(L_copy_longs); 2777 #ifdef ASSERT 2778 { 2779 BLOCK_COMMENT("assert long copy {"); 2780 Label L; 2781 __ cmpl(rax_elsize, LogBytesPerLong); 2782 __ jcc(Assembler::equal, L); 2783 __ stop("must be long copy, but elsize is wrong"); 2784 __ bind(L); 2785 BLOCK_COMMENT("} assert long copy done"); 2786 } 2787 #endif 2788 __ lea(from, Address(src, src_pos, Address::times_8, 0));// src_addr 2789 __ lea(to, Address(dst, dst_pos, Address::times_8, 0));// dst_addr 2790 __ movl2ptr(count, r11_length); // length 2791 __ jump(RuntimeAddress(long_copy_entry)); 2792 2793 // ObjArrayKlass 2794 __ BIND(L_objArray); 2795 // live at this point: r10_src_klass, r11_length, src[_pos], dst[_pos] 2796 2797 Label L_plain_copy, L_checkcast_copy; 2798 // test array classes for subtyping 2799 __ load_klass(rax, dst); 2800 __ cmpq(r10_src_klass, rax); // usual case is exact equality 2801 __ jcc(Assembler::notEqual, L_checkcast_copy); 2802 2803 // Identically typed arrays can be copied without element-wise checks. 2804 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2805 r10, L_failed); 2806 2807 __ lea(from, Address(src, src_pos, TIMES_OOP, 2808 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // src_addr 2809 __ lea(to, Address(dst, dst_pos, TIMES_OOP, 2810 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // dst_addr 2811 __ movl2ptr(count, r11_length); // length 2812 __ BIND(L_plain_copy); 2813 __ jump(RuntimeAddress(oop_copy_entry)); 2814 2815 __ BIND(L_checkcast_copy); 2816 // live at this point: r10_src_klass, r11_length, rax (dst_klass) 2817 { 2818 // Before looking at dst.length, make sure dst is also an objArray. 2819 __ cmpl(Address(rax, lh_offset), objArray_lh); 2820 __ jcc(Assembler::notEqual, L_failed); 2821 2822 // It is safe to examine both src.length and dst.length. 2823 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2824 rax, L_failed); 2825 2826 const Register r11_dst_klass = r11; 2827 __ load_klass(r11_dst_klass, dst); // reload 2828 2829 // Marshal the base address arguments now, freeing registers. 2830 __ lea(from, Address(src, src_pos, TIMES_OOP, 2831 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 2832 __ lea(to, Address(dst, dst_pos, TIMES_OOP, 2833 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 2834 __ movl(count, length); // length (reloaded) 2835 Register sco_temp = c_rarg3; // this register is free now 2836 assert_different_registers(from, to, count, sco_temp, 2837 r11_dst_klass, r10_src_klass); 2838 assert_clean_int(count, sco_temp); 2839 2840 // Generate the type check. 2841 const int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2842 __ movl(sco_temp, Address(r11_dst_klass, sco_offset)); 2843 assert_clean_int(sco_temp, rax); 2844 generate_type_check(r10_src_klass, sco_temp, r11_dst_klass, L_plain_copy); 2845 2846 // Fetch destination element klass from the ObjArrayKlass header. 2847 int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset()); 2848 __ movptr(r11_dst_klass, Address(r11_dst_klass, ek_offset)); 2849 __ movl( sco_temp, Address(r11_dst_klass, sco_offset)); 2850 assert_clean_int(sco_temp, rax); 2851 2852 // the checkcast_copy loop needs two extra arguments: 2853 assert(c_rarg3 == sco_temp, "#3 already in place"); 2854 // Set up arguments for checkcast_copy_entry. 2855 setup_arg_regs(4); 2856 __ movptr(r8, r11_dst_klass); // dst.klass.element_klass, r8 is c_rarg4 on Linux/Solaris 2857 __ jump(RuntimeAddress(checkcast_copy_entry)); 2858 } 2859 2860 __ BIND(L_failed); 2861 __ xorptr(rax, rax); 2862 __ notptr(rax); // return -1 2863 __ leave(); // required for proper stackwalking of RuntimeStub frame 2864 __ ret(0); 2865 2866 return start; 2867 } 2868 2869 void generate_arraycopy_stubs() { 2870 address entry; 2871 address entry_jbyte_arraycopy; 2872 address entry_jshort_arraycopy; 2873 address entry_jint_arraycopy; 2874 address entry_oop_arraycopy; 2875 address entry_jlong_arraycopy; 2876 address entry_checkcast_arraycopy; 2877 2878 StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, &entry, 2879 "jbyte_disjoint_arraycopy"); 2880 StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, entry, &entry_jbyte_arraycopy, 2881 "jbyte_arraycopy"); 2882 2883 StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, &entry, 2884 "jshort_disjoint_arraycopy"); 2885 StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, entry, &entry_jshort_arraycopy, 2886 "jshort_arraycopy"); 2887 2888 StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, false, &entry, 2889 "jint_disjoint_arraycopy"); 2890 StubRoutines::_jint_arraycopy = generate_conjoint_int_oop_copy(false, false, entry, 2891 &entry_jint_arraycopy, "jint_arraycopy"); 2892 2893 StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, false, &entry, 2894 "jlong_disjoint_arraycopy"); 2895 StubRoutines::_jlong_arraycopy = generate_conjoint_long_oop_copy(false, false, entry, 2896 &entry_jlong_arraycopy, "jlong_arraycopy"); 2897 2898 2899 if (UseCompressedOops) { 2900 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, true, &entry, 2901 "oop_disjoint_arraycopy"); 2902 StubRoutines::_oop_arraycopy = generate_conjoint_int_oop_copy(false, true, entry, 2903 &entry_oop_arraycopy, "oop_arraycopy"); 2904 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_int_oop_copy(false, true, &entry, 2905 "oop_disjoint_arraycopy_uninit", 2906 /*dest_uninitialized*/true); 2907 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_int_oop_copy(false, true, entry, 2908 NULL, "oop_arraycopy_uninit", 2909 /*dest_uninitialized*/true); 2910 } else { 2911 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, true, &entry, 2912 "oop_disjoint_arraycopy"); 2913 StubRoutines::_oop_arraycopy = generate_conjoint_long_oop_copy(false, true, entry, 2914 &entry_oop_arraycopy, "oop_arraycopy"); 2915 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_long_oop_copy(false, true, &entry, 2916 "oop_disjoint_arraycopy_uninit", 2917 /*dest_uninitialized*/true); 2918 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_long_oop_copy(false, true, entry, 2919 NULL, "oop_arraycopy_uninit", 2920 /*dest_uninitialized*/true); 2921 } 2922 2923 StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy); 2924 StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", NULL, 2925 /*dest_uninitialized*/true); 2926 2927 StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy", 2928 entry_jbyte_arraycopy, 2929 entry_jshort_arraycopy, 2930 entry_jint_arraycopy, 2931 entry_jlong_arraycopy); 2932 StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy", 2933 entry_jbyte_arraycopy, 2934 entry_jshort_arraycopy, 2935 entry_jint_arraycopy, 2936 entry_oop_arraycopy, 2937 entry_jlong_arraycopy, 2938 entry_checkcast_arraycopy); 2939 2940 StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill"); 2941 StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill"); 2942 StubRoutines::_jint_fill = generate_fill(T_INT, false, "jint_fill"); 2943 StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill"); 2944 StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill"); 2945 StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill"); 2946 2947 // We don't generate specialized code for HeapWord-aligned source 2948 // arrays, so just use the code we've already generated 2949 StubRoutines::_arrayof_jbyte_disjoint_arraycopy = StubRoutines::_jbyte_disjoint_arraycopy; 2950 StubRoutines::_arrayof_jbyte_arraycopy = StubRoutines::_jbyte_arraycopy; 2951 2952 StubRoutines::_arrayof_jshort_disjoint_arraycopy = StubRoutines::_jshort_disjoint_arraycopy; 2953 StubRoutines::_arrayof_jshort_arraycopy = StubRoutines::_jshort_arraycopy; 2954 2955 StubRoutines::_arrayof_jint_disjoint_arraycopy = StubRoutines::_jint_disjoint_arraycopy; 2956 StubRoutines::_arrayof_jint_arraycopy = StubRoutines::_jint_arraycopy; 2957 2958 StubRoutines::_arrayof_jlong_disjoint_arraycopy = StubRoutines::_jlong_disjoint_arraycopy; 2959 StubRoutines::_arrayof_jlong_arraycopy = StubRoutines::_jlong_arraycopy; 2960 2961 StubRoutines::_arrayof_oop_disjoint_arraycopy = StubRoutines::_oop_disjoint_arraycopy; 2962 StubRoutines::_arrayof_oop_arraycopy = StubRoutines::_oop_arraycopy; 2963 2964 StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = StubRoutines::_oop_disjoint_arraycopy_uninit; 2965 StubRoutines::_arrayof_oop_arraycopy_uninit = StubRoutines::_oop_arraycopy_uninit; 2966 } 2967 2968 // AES intrinsic stubs 2969 enum {AESBlockSize = 16}; 2970 2971 address generate_key_shuffle_mask() { 2972 __ align(16); 2973 StubCodeMark mark(this, "StubRoutines", "key_shuffle_mask"); 2974 address start = __ pc(); 2975 __ emit_data64( 0x0405060700010203, relocInfo::none ); 2976 __ emit_data64( 0x0c0d0e0f08090a0b, relocInfo::none ); 2977 return start; 2978 } 2979 2980 address generate_counter_shuffle_mask() { 2981 __ align(16); 2982 StubCodeMark mark(this, "StubRoutines", "counter_shuffle_mask"); 2983 address start = __ pc(); 2984 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none); 2985 __ emit_data64(0x0001020304050607, relocInfo::none); 2986 return start; 2987 } 2988 2989 // Utility routine for loading a 128-bit key word in little endian format 2990 // can optionally specify that the shuffle mask is already in an xmmregister 2991 void load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask=NULL) { 2992 __ movdqu(xmmdst, Address(key, offset)); 2993 if (xmm_shuf_mask != NULL) { 2994 __ pshufb(xmmdst, xmm_shuf_mask); 2995 } else { 2996 __ pshufb(xmmdst, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 2997 } 2998 } 2999 3000 // Utility routine for increase 128bit counter (iv in CTR mode) 3001 void inc_counter(Register reg, XMMRegister xmmdst, int inc_delta, Label& next_block) { 3002 __ pextrq(reg, xmmdst, 0x0); 3003 __ addq(reg, inc_delta); 3004 __ pinsrq(xmmdst, reg, 0x0); 3005 __ jcc(Assembler::carryClear, next_block); // jump if no carry 3006 __ pextrq(reg, xmmdst, 0x01); // Carry 3007 __ addq(reg, 0x01); 3008 __ pinsrq(xmmdst, reg, 0x01); //Carry end 3009 __ BIND(next_block); // next instruction 3010 } 3011 3012 // Arguments: 3013 // 3014 // Inputs: 3015 // c_rarg0 - source byte array address 3016 // c_rarg1 - destination byte array address 3017 // c_rarg2 - K (key) in little endian int array 3018 // 3019 address generate_aescrypt_encryptBlock() { 3020 assert(UseAES, "need AES instructions and misaligned SSE support"); 3021 __ align(CodeEntryAlignment); 3022 StubCodeMark mark(this, "StubRoutines", "aescrypt_encryptBlock"); 3023 Label L_doLast; 3024 address start = __ pc(); 3025 3026 const Register from = c_rarg0; // source array address 3027 const Register to = c_rarg1; // destination array address 3028 const Register key = c_rarg2; // key array address 3029 const Register keylen = rax; 3030 3031 const XMMRegister xmm_result = xmm0; 3032 const XMMRegister xmm_key_shuf_mask = xmm1; 3033 // On win64 xmm6-xmm15 must be preserved so don't use them. 3034 const XMMRegister xmm_temp1 = xmm2; 3035 const XMMRegister xmm_temp2 = xmm3; 3036 const XMMRegister xmm_temp3 = xmm4; 3037 const XMMRegister xmm_temp4 = xmm5; 3038 3039 __ enter(); // required for proper stackwalking of RuntimeStub frame 3040 3041 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 3042 // context for the registers used, where all instructions below are using 128-bit mode 3043 // On EVEX without VL and BW, these instructions will all be AVX. 3044 if (VM_Version::supports_avx512vlbw()) { 3045 __ movl(rax, 0xffff); 3046 __ kmovql(k1, rax); 3047 } 3048 3049 // keylen could be only {11, 13, 15} * 4 = {44, 52, 60} 3050 __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3051 3052 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3053 __ movdqu(xmm_result, Address(from, 0)); // get 16 bytes of input 3054 3055 // For encryption, the java expanded key ordering is just what we need 3056 // we don't know if the key is aligned, hence not using load-execute form 3057 3058 load_key(xmm_temp1, key, 0x00, xmm_key_shuf_mask); 3059 __ pxor(xmm_result, xmm_temp1); 3060 3061 load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask); 3062 load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask); 3063 load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask); 3064 load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask); 3065 3066 __ aesenc(xmm_result, xmm_temp1); 3067 __ aesenc(xmm_result, xmm_temp2); 3068 __ aesenc(xmm_result, xmm_temp3); 3069 __ aesenc(xmm_result, xmm_temp4); 3070 3071 load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask); 3072 load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask); 3073 load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask); 3074 load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask); 3075 3076 __ aesenc(xmm_result, xmm_temp1); 3077 __ aesenc(xmm_result, xmm_temp2); 3078 __ aesenc(xmm_result, xmm_temp3); 3079 __ aesenc(xmm_result, xmm_temp4); 3080 3081 load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask); 3082 load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask); 3083 3084 __ cmpl(keylen, 44); 3085 __ jccb(Assembler::equal, L_doLast); 3086 3087 __ aesenc(xmm_result, xmm_temp1); 3088 __ aesenc(xmm_result, xmm_temp2); 3089 3090 load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask); 3091 load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask); 3092 3093 __ cmpl(keylen, 52); 3094 __ jccb(Assembler::equal, L_doLast); 3095 3096 __ aesenc(xmm_result, xmm_temp1); 3097 __ aesenc(xmm_result, xmm_temp2); 3098 3099 load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask); 3100 load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask); 3101 3102 __ BIND(L_doLast); 3103 __ aesenc(xmm_result, xmm_temp1); 3104 __ aesenclast(xmm_result, xmm_temp2); 3105 __ movdqu(Address(to, 0), xmm_result); // store the result 3106 __ xorptr(rax, rax); // return 0 3107 __ leave(); // required for proper stackwalking of RuntimeStub frame 3108 __ ret(0); 3109 3110 return start; 3111 } 3112 3113 3114 // Arguments: 3115 // 3116 // Inputs: 3117 // c_rarg0 - source byte array address 3118 // c_rarg1 - destination byte array address 3119 // c_rarg2 - K (key) in little endian int array 3120 // 3121 address generate_aescrypt_decryptBlock() { 3122 assert(UseAES, "need AES instructions and misaligned SSE support"); 3123 __ align(CodeEntryAlignment); 3124 StubCodeMark mark(this, "StubRoutines", "aescrypt_decryptBlock"); 3125 Label L_doLast; 3126 address start = __ pc(); 3127 3128 const Register from = c_rarg0; // source array address 3129 const Register to = c_rarg1; // destination array address 3130 const Register key = c_rarg2; // key array address 3131 const Register keylen = rax; 3132 3133 const XMMRegister xmm_result = xmm0; 3134 const XMMRegister xmm_key_shuf_mask = xmm1; 3135 // On win64 xmm6-xmm15 must be preserved so don't use them. 3136 const XMMRegister xmm_temp1 = xmm2; 3137 const XMMRegister xmm_temp2 = xmm3; 3138 const XMMRegister xmm_temp3 = xmm4; 3139 const XMMRegister xmm_temp4 = xmm5; 3140 3141 __ enter(); // required for proper stackwalking of RuntimeStub frame 3142 3143 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 3144 // context for the registers used, where all instructions below are using 128-bit mode 3145 // On EVEX without VL and BW, these instructions will all be AVX. 3146 if (VM_Version::supports_avx512vlbw()) { 3147 __ movl(rax, 0xffff); 3148 __ kmovql(k1, rax); 3149 } 3150 3151 // keylen could be only {11, 13, 15} * 4 = {44, 52, 60} 3152 __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3153 3154 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3155 __ movdqu(xmm_result, Address(from, 0)); 3156 3157 // for decryption java expanded key ordering is rotated one position from what we want 3158 // so we start from 0x10 here and hit 0x00 last 3159 // we don't know if the key is aligned, hence not using load-execute form 3160 load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask); 3161 load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask); 3162 load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask); 3163 load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask); 3164 3165 __ pxor (xmm_result, xmm_temp1); 3166 __ aesdec(xmm_result, xmm_temp2); 3167 __ aesdec(xmm_result, xmm_temp3); 3168 __ aesdec(xmm_result, xmm_temp4); 3169 3170 load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask); 3171 load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask); 3172 load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask); 3173 load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask); 3174 3175 __ aesdec(xmm_result, xmm_temp1); 3176 __ aesdec(xmm_result, xmm_temp2); 3177 __ aesdec(xmm_result, xmm_temp3); 3178 __ aesdec(xmm_result, xmm_temp4); 3179 3180 load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask); 3181 load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask); 3182 load_key(xmm_temp3, key, 0x00, xmm_key_shuf_mask); 3183 3184 __ cmpl(keylen, 44); 3185 __ jccb(Assembler::equal, L_doLast); 3186 3187 __ aesdec(xmm_result, xmm_temp1); 3188 __ aesdec(xmm_result, xmm_temp2); 3189 3190 load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask); 3191 load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask); 3192 3193 __ cmpl(keylen, 52); 3194 __ jccb(Assembler::equal, L_doLast); 3195 3196 __ aesdec(xmm_result, xmm_temp1); 3197 __ aesdec(xmm_result, xmm_temp2); 3198 3199 load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask); 3200 load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask); 3201 3202 __ BIND(L_doLast); 3203 __ aesdec(xmm_result, xmm_temp1); 3204 __ aesdec(xmm_result, xmm_temp2); 3205 3206 // for decryption the aesdeclast operation is always on key+0x00 3207 __ aesdeclast(xmm_result, xmm_temp3); 3208 __ movdqu(Address(to, 0), xmm_result); // store the result 3209 __ xorptr(rax, rax); // return 0 3210 __ leave(); // required for proper stackwalking of RuntimeStub frame 3211 __ ret(0); 3212 3213 return start; 3214 } 3215 3216 3217 // Arguments: 3218 // 3219 // Inputs: 3220 // c_rarg0 - source byte array address 3221 // c_rarg1 - destination byte array address 3222 // c_rarg2 - K (key) in little endian int array 3223 // c_rarg3 - r vector byte array address 3224 // c_rarg4 - input length 3225 // 3226 // Output: 3227 // rax - input length 3228 // 3229 address generate_cipherBlockChaining_encryptAESCrypt() { 3230 assert(UseAES, "need AES instructions and misaligned SSE support"); 3231 __ align(CodeEntryAlignment); 3232 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_encryptAESCrypt"); 3233 address start = __ pc(); 3234 3235 Label L_exit, L_key_192_256, L_key_256, L_loopTop_128, L_loopTop_192, L_loopTop_256; 3236 const Register from = c_rarg0; // source array address 3237 const Register to = c_rarg1; // destination array address 3238 const Register key = c_rarg2; // key array address 3239 const Register rvec = c_rarg3; // r byte array initialized from initvector array address 3240 // and left with the results of the last encryption block 3241 #ifndef _WIN64 3242 const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16) 3243 #else 3244 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 3245 const Register len_reg = r11; // pick the volatile windows register 3246 #endif 3247 const Register pos = rax; 3248 3249 // xmm register assignments for the loops below 3250 const XMMRegister xmm_result = xmm0; 3251 const XMMRegister xmm_temp = xmm1; 3252 // keys 0-10 preloaded into xmm2-xmm12 3253 const int XMM_REG_NUM_KEY_FIRST = 2; 3254 const int XMM_REG_NUM_KEY_LAST = 15; 3255 const XMMRegister xmm_key0 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST); 3256 const XMMRegister xmm_key10 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+10); 3257 const XMMRegister xmm_key11 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+11); 3258 const XMMRegister xmm_key12 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+12); 3259 const XMMRegister xmm_key13 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+13); 3260 3261 __ enter(); // required for proper stackwalking of RuntimeStub frame 3262 3263 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 3264 // context for the registers used, where all instructions below are using 128-bit mode 3265 // On EVEX without VL and BW, these instructions will all be AVX. 3266 if (VM_Version::supports_avx512vlbw()) { 3267 __ movl(rax, 0xffff); 3268 __ kmovql(k1, rax); 3269 } 3270 3271 #ifdef _WIN64 3272 // on win64, fill len_reg from stack position 3273 __ movl(len_reg, len_mem); 3274 #else 3275 __ push(len_reg); // Save 3276 #endif 3277 3278 const XMMRegister xmm_key_shuf_mask = xmm_temp; // used temporarily to swap key bytes up front 3279 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3280 // load up xmm regs xmm2 thru xmm12 with key 0x00 - 0xa0 3281 for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x00; rnum <= XMM_REG_NUM_KEY_FIRST+10; rnum++) { 3282 load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask); 3283 offset += 0x10; 3284 } 3285 __ movdqu(xmm_result, Address(rvec, 0x00)); // initialize xmm_result with r vec 3286 3287 // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256)) 3288 __ movl(rax, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3289 __ cmpl(rax, 44); 3290 __ jcc(Assembler::notEqual, L_key_192_256); 3291 3292 // 128 bit code follows here 3293 __ movptr(pos, 0); 3294 __ align(OptoLoopAlignment); 3295 3296 __ BIND(L_loopTop_128); 3297 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3298 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3299 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3300 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 9; rnum++) { 3301 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3302 } 3303 __ aesenclast(xmm_result, xmm_key10); 3304 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3305 // no need to store r to memory until we exit 3306 __ addptr(pos, AESBlockSize); 3307 __ subptr(len_reg, AESBlockSize); 3308 __ jcc(Assembler::notEqual, L_loopTop_128); 3309 3310 __ BIND(L_exit); 3311 __ movdqu(Address(rvec, 0), xmm_result); // final value of r stored in rvec of CipherBlockChaining object 3312 3313 #ifdef _WIN64 3314 __ movl(rax, len_mem); 3315 #else 3316 __ pop(rax); // return length 3317 #endif 3318 __ leave(); // required for proper stackwalking of RuntimeStub frame 3319 __ ret(0); 3320 3321 __ BIND(L_key_192_256); 3322 // here rax = len in ints of AESCrypt.KLE array (52=192, or 60=256) 3323 load_key(xmm_key11, key, 0xb0, xmm_key_shuf_mask); 3324 load_key(xmm_key12, key, 0xc0, xmm_key_shuf_mask); 3325 __ cmpl(rax, 52); 3326 __ jcc(Assembler::notEqual, L_key_256); 3327 3328 // 192-bit code follows here (could be changed to use more xmm registers) 3329 __ movptr(pos, 0); 3330 __ align(OptoLoopAlignment); 3331 3332 __ BIND(L_loopTop_192); 3333 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3334 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3335 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3336 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 11; rnum++) { 3337 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3338 } 3339 __ aesenclast(xmm_result, xmm_key12); 3340 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3341 // no need to store r to memory until we exit 3342 __ addptr(pos, AESBlockSize); 3343 __ subptr(len_reg, AESBlockSize); 3344 __ jcc(Assembler::notEqual, L_loopTop_192); 3345 __ jmp(L_exit); 3346 3347 __ BIND(L_key_256); 3348 // 256-bit code follows here (could be changed to use more xmm registers) 3349 load_key(xmm_key13, key, 0xd0, xmm_key_shuf_mask); 3350 __ movptr(pos, 0); 3351 __ align(OptoLoopAlignment); 3352 3353 __ BIND(L_loopTop_256); 3354 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3355 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3356 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3357 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 13; rnum++) { 3358 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3359 } 3360 load_key(xmm_temp, key, 0xe0); 3361 __ aesenclast(xmm_result, xmm_temp); 3362 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3363 // no need to store r to memory until we exit 3364 __ addptr(pos, AESBlockSize); 3365 __ subptr(len_reg, AESBlockSize); 3366 __ jcc(Assembler::notEqual, L_loopTop_256); 3367 __ jmp(L_exit); 3368 3369 return start; 3370 } 3371 3372 // Safefetch stubs. 3373 void generate_safefetch(const char* name, int size, address* entry, 3374 address* fault_pc, address* continuation_pc) { 3375 // safefetch signatures: 3376 // int SafeFetch32(int* adr, int errValue); 3377 // intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue); 3378 // 3379 // arguments: 3380 // c_rarg0 = adr 3381 // c_rarg1 = errValue 3382 // 3383 // result: 3384 // PPC_RET = *adr or errValue 3385 3386 StubCodeMark mark(this, "StubRoutines", name); 3387 3388 // Entry point, pc or function descriptor. 3389 *entry = __ pc(); 3390 3391 // Load *adr into c_rarg1, may fault. 3392 *fault_pc = __ pc(); 3393 switch (size) { 3394 case 4: 3395 // int32_t 3396 __ movl(c_rarg1, Address(c_rarg0, 0)); 3397 break; 3398 case 8: 3399 // int64_t 3400 __ movq(c_rarg1, Address(c_rarg0, 0)); 3401 break; 3402 default: 3403 ShouldNotReachHere(); 3404 } 3405 3406 // return errValue or *adr 3407 *continuation_pc = __ pc(); 3408 __ movq(rax, c_rarg1); 3409 __ ret(0); 3410 } 3411 3412 // This is a version of CBC/AES Decrypt which does 4 blocks in a loop at a time 3413 // to hide instruction latency 3414 // 3415 // Arguments: 3416 // 3417 // Inputs: 3418 // c_rarg0 - source byte array address 3419 // c_rarg1 - destination byte array address 3420 // c_rarg2 - K (key) in little endian int array 3421 // c_rarg3 - r vector byte array address 3422 // c_rarg4 - input length 3423 // 3424 // Output: 3425 // rax - input length 3426 // 3427 address generate_cipherBlockChaining_decryptAESCrypt_Parallel() { 3428 assert(UseAES, "need AES instructions and misaligned SSE support"); 3429 __ align(CodeEntryAlignment); 3430 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt"); 3431 address start = __ pc(); 3432 3433 const Register from = c_rarg0; // source array address 3434 const Register to = c_rarg1; // destination array address 3435 const Register key = c_rarg2; // key array address 3436 const Register rvec = c_rarg3; // r byte array initialized from initvector array address 3437 // and left with the results of the last encryption block 3438 #ifndef _WIN64 3439 const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16) 3440 #else 3441 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 3442 const Register len_reg = r11; // pick the volatile windows register 3443 #endif 3444 const Register pos = rax; 3445 3446 const int PARALLEL_FACTOR = 4; 3447 const int ROUNDS[3] = { 10, 12, 14 }; // aes rounds for key128, key192, key256 3448 3449 Label L_exit; 3450 Label L_singleBlock_loopTopHead[3]; // 128, 192, 256 3451 Label L_singleBlock_loopTopHead2[3]; // 128, 192, 256 3452 Label L_singleBlock_loopTop[3]; // 128, 192, 256 3453 Label L_multiBlock_loopTopHead[3]; // 128, 192, 256 3454 Label L_multiBlock_loopTop[3]; // 128, 192, 256 3455 3456 // keys 0-10 preloaded into xmm5-xmm15 3457 const int XMM_REG_NUM_KEY_FIRST = 5; 3458 const int XMM_REG_NUM_KEY_LAST = 15; 3459 const XMMRegister xmm_key_first = as_XMMRegister(XMM_REG_NUM_KEY_FIRST); 3460 const XMMRegister xmm_key_last = as_XMMRegister(XMM_REG_NUM_KEY_LAST); 3461 3462 __ enter(); // required for proper stackwalking of RuntimeStub frame 3463 3464 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 3465 // context for the registers used, where all instructions below are using 128-bit mode 3466 // On EVEX without VL and BW, these instructions will all be AVX. 3467 if (VM_Version::supports_avx512vlbw()) { 3468 __ movl(rax, 0xffff); 3469 __ kmovql(k1, rax); 3470 } 3471 3472 #ifdef _WIN64 3473 // on win64, fill len_reg from stack position 3474 __ movl(len_reg, len_mem); 3475 #else 3476 __ push(len_reg); // Save 3477 #endif 3478 __ push(rbx); 3479 // the java expanded key ordering is rotated one position from what we want 3480 // so we start from 0x10 here and hit 0x00 last 3481 const XMMRegister xmm_key_shuf_mask = xmm1; // used temporarily to swap key bytes up front 3482 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3483 // load up xmm regs 5 thru 15 with key 0x10 - 0xa0 - 0x00 3484 for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x10; rnum < XMM_REG_NUM_KEY_LAST; rnum++) { 3485 load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask); 3486 offset += 0x10; 3487 } 3488 load_key(xmm_key_last, key, 0x00, xmm_key_shuf_mask); 3489 3490 const XMMRegister xmm_prev_block_cipher = xmm1; // holds cipher of previous block 3491 3492 // registers holding the four results in the parallelized loop 3493 const XMMRegister xmm_result0 = xmm0; 3494 const XMMRegister xmm_result1 = xmm2; 3495 const XMMRegister xmm_result2 = xmm3; 3496 const XMMRegister xmm_result3 = xmm4; 3497 3498 __ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // initialize with initial rvec 3499 3500 __ xorptr(pos, pos); 3501 3502 // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256)) 3503 __ movl(rbx, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3504 __ cmpl(rbx, 52); 3505 __ jcc(Assembler::equal, L_multiBlock_loopTopHead[1]); 3506 __ cmpl(rbx, 60); 3507 __ jcc(Assembler::equal, L_multiBlock_loopTopHead[2]); 3508 3509 #define DoFour(opc, src_reg) \ 3510 __ opc(xmm_result0, src_reg); \ 3511 __ opc(xmm_result1, src_reg); \ 3512 __ opc(xmm_result2, src_reg); \ 3513 __ opc(xmm_result3, src_reg); \ 3514 3515 for (int k = 0; k < 3; ++k) { 3516 __ BIND(L_multiBlock_loopTopHead[k]); 3517 if (k != 0) { 3518 __ cmpptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // see if at least 4 blocks left 3519 __ jcc(Assembler::less, L_singleBlock_loopTopHead2[k]); 3520 } 3521 if (k == 1) { 3522 __ subptr(rsp, 6 * wordSize); 3523 __ movdqu(Address(rsp, 0), xmm15); //save last_key from xmm15 3524 load_key(xmm15, key, 0xb0); // 0xb0; 192-bit key goes up to 0xc0 3525 __ movdqu(Address(rsp, 2 * wordSize), xmm15); 3526 load_key(xmm1, key, 0xc0); // 0xc0; 3527 __ movdqu(Address(rsp, 4 * wordSize), xmm1); 3528 } else if (k == 2) { 3529 __ subptr(rsp, 10 * wordSize); 3530 __ movdqu(Address(rsp, 0), xmm15); //save last_key from xmm15 3531 load_key(xmm15, key, 0xd0); // 0xd0; 256-bit key goes upto 0xe0 3532 __ movdqu(Address(rsp, 6 * wordSize), xmm15); 3533 load_key(xmm1, key, 0xe0); // 0xe0; 3534 __ movdqu(Address(rsp, 8 * wordSize), xmm1); 3535 load_key(xmm15, key, 0xb0); // 0xb0; 3536 __ movdqu(Address(rsp, 2 * wordSize), xmm15); 3537 load_key(xmm1, key, 0xc0); // 0xc0; 3538 __ movdqu(Address(rsp, 4 * wordSize), xmm1); 3539 } 3540 __ align(OptoLoopAlignment); 3541 __ BIND(L_multiBlock_loopTop[k]); 3542 __ cmpptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // see if at least 4 blocks left 3543 __ jcc(Assembler::less, L_singleBlock_loopTopHead[k]); 3544 3545 if (k != 0) { 3546 __ movdqu(xmm15, Address(rsp, 2 * wordSize)); 3547 __ movdqu(xmm1, Address(rsp, 4 * wordSize)); 3548 } 3549 3550 __ movdqu(xmm_result0, Address(from, pos, Address::times_1, 0 * AESBlockSize)); // get next 4 blocks into xmmresult registers 3551 __ movdqu(xmm_result1, Address(from, pos, Address::times_1, 1 * AESBlockSize)); 3552 __ movdqu(xmm_result2, Address(from, pos, Address::times_1, 2 * AESBlockSize)); 3553 __ movdqu(xmm_result3, Address(from, pos, Address::times_1, 3 * AESBlockSize)); 3554 3555 DoFour(pxor, xmm_key_first); 3556 if (k == 0) { 3557 for (int rnum = 1; rnum < ROUNDS[k]; rnum++) { 3558 DoFour(aesdec, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3559 } 3560 DoFour(aesdeclast, xmm_key_last); 3561 } else if (k == 1) { 3562 for (int rnum = 1; rnum <= ROUNDS[k]-2; rnum++) { 3563 DoFour(aesdec, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3564 } 3565 __ movdqu(xmm_key_last, Address(rsp, 0)); // xmm15 needs to be loaded again. 3566 DoFour(aesdec, xmm1); // key : 0xc0 3567 __ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // xmm1 needs to be loaded again 3568 DoFour(aesdeclast, xmm_key_last); 3569 } else if (k == 2) { 3570 for (int rnum = 1; rnum <= ROUNDS[k] - 4; rnum++) { 3571 DoFour(aesdec, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3572 } 3573 DoFour(aesdec, xmm1); // key : 0xc0 3574 __ movdqu(xmm15, Address(rsp, 6 * wordSize)); 3575 __ movdqu(xmm1, Address(rsp, 8 * wordSize)); 3576 DoFour(aesdec, xmm15); // key : 0xd0 3577 __ movdqu(xmm_key_last, Address(rsp, 0)); // xmm15 needs to be loaded again. 3578 DoFour(aesdec, xmm1); // key : 0xe0 3579 __ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // xmm1 needs to be loaded again 3580 DoFour(aesdeclast, xmm_key_last); 3581 } 3582 3583 // for each result, xor with the r vector of previous cipher block 3584 __ pxor(xmm_result0, xmm_prev_block_cipher); 3585 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 0 * AESBlockSize)); 3586 __ pxor(xmm_result1, xmm_prev_block_cipher); 3587 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 1 * AESBlockSize)); 3588 __ pxor(xmm_result2, xmm_prev_block_cipher); 3589 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 2 * AESBlockSize)); 3590 __ pxor(xmm_result3, xmm_prev_block_cipher); 3591 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 3 * AESBlockSize)); // this will carry over to next set of blocks 3592 if (k != 0) { 3593 __ movdqu(Address(rvec, 0x00), xmm_prev_block_cipher); 3594 } 3595 3596 __ movdqu(Address(to, pos, Address::times_1, 0 * AESBlockSize), xmm_result0); // store 4 results into the next 64 bytes of output 3597 __ movdqu(Address(to, pos, Address::times_1, 1 * AESBlockSize), xmm_result1); 3598 __ movdqu(Address(to, pos, Address::times_1, 2 * AESBlockSize), xmm_result2); 3599 __ movdqu(Address(to, pos, Address::times_1, 3 * AESBlockSize), xmm_result3); 3600 3601 __ addptr(pos, PARALLEL_FACTOR * AESBlockSize); 3602 __ subptr(len_reg, PARALLEL_FACTOR * AESBlockSize); 3603 __ jmp(L_multiBlock_loopTop[k]); 3604 3605 // registers used in the non-parallelized loops 3606 // xmm register assignments for the loops below 3607 const XMMRegister xmm_result = xmm0; 3608 const XMMRegister xmm_prev_block_cipher_save = xmm2; 3609 const XMMRegister xmm_key11 = xmm3; 3610 const XMMRegister xmm_key12 = xmm4; 3611 const XMMRegister key_tmp = xmm4; 3612 3613 __ BIND(L_singleBlock_loopTopHead[k]); 3614 if (k == 1) { 3615 __ addptr(rsp, 6 * wordSize); 3616 } else if (k == 2) { 3617 __ addptr(rsp, 10 * wordSize); 3618 } 3619 __ cmpptr(len_reg, 0); // any blocks left?? 3620 __ jcc(Assembler::equal, L_exit); 3621 __ BIND(L_singleBlock_loopTopHead2[k]); 3622 if (k == 1) { 3623 load_key(xmm_key11, key, 0xb0); // 0xb0; 192-bit key goes upto 0xc0 3624 load_key(xmm_key12, key, 0xc0); // 0xc0; 192-bit key goes upto 0xc0 3625 } 3626 if (k == 2) { 3627 load_key(xmm_key11, key, 0xb0); // 0xb0; 256-bit key goes upto 0xe0 3628 } 3629 __ align(OptoLoopAlignment); 3630 __ BIND(L_singleBlock_loopTop[k]); 3631 __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of cipher input 3632 __ movdqa(xmm_prev_block_cipher_save, xmm_result); // save for next r vector 3633 __ pxor(xmm_result, xmm_key_first); // do the aes dec rounds 3634 for (int rnum = 1; rnum <= 9 ; rnum++) { 3635 __ aesdec(xmm_result, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3636 } 3637 if (k == 1) { 3638 __ aesdec(xmm_result, xmm_key11); 3639 __ aesdec(xmm_result, xmm_key12); 3640 } 3641 if (k == 2) { 3642 __ aesdec(xmm_result, xmm_key11); 3643 load_key(key_tmp, key, 0xc0); 3644 __ aesdec(xmm_result, key_tmp); 3645 load_key(key_tmp, key, 0xd0); 3646 __ aesdec(xmm_result, key_tmp); 3647 load_key(key_tmp, key, 0xe0); 3648 __ aesdec(xmm_result, key_tmp); 3649 } 3650 3651 __ aesdeclast(xmm_result, xmm_key_last); // xmm15 always came from key+0 3652 __ pxor(xmm_result, xmm_prev_block_cipher); // xor with the current r vector 3653 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3654 // no need to store r to memory until we exit 3655 __ movdqa(xmm_prev_block_cipher, xmm_prev_block_cipher_save); // set up next r vector with cipher input from this block 3656 __ addptr(pos, AESBlockSize); 3657 __ subptr(len_reg, AESBlockSize); 3658 __ jcc(Assembler::notEqual, L_singleBlock_loopTop[k]); 3659 if (k != 2) { 3660 __ jmp(L_exit); 3661 } 3662 } //for 128/192/256 3663 3664 __ BIND(L_exit); 3665 __ movdqu(Address(rvec, 0), xmm_prev_block_cipher); // final value of r stored in rvec of CipherBlockChaining object 3666 __ pop(rbx); 3667 #ifdef _WIN64 3668 __ movl(rax, len_mem); 3669 #else 3670 __ pop(rax); // return length 3671 #endif 3672 __ leave(); // required for proper stackwalking of RuntimeStub frame 3673 __ ret(0); 3674 return start; 3675 } 3676 3677 address generate_upper_word_mask() { 3678 __ align(64); 3679 StubCodeMark mark(this, "StubRoutines", "upper_word_mask"); 3680 address start = __ pc(); 3681 __ emit_data64(0x0000000000000000, relocInfo::none); 3682 __ emit_data64(0xFFFFFFFF00000000, relocInfo::none); 3683 return start; 3684 } 3685 3686 address generate_shuffle_byte_flip_mask() { 3687 __ align(64); 3688 StubCodeMark mark(this, "StubRoutines", "shuffle_byte_flip_mask"); 3689 address start = __ pc(); 3690 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none); 3691 __ emit_data64(0x0001020304050607, relocInfo::none); 3692 return start; 3693 } 3694 3695 // ofs and limit are use for multi-block byte array. 3696 // int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit) 3697 address generate_sha1_implCompress(bool multi_block, const char *name) { 3698 __ align(CodeEntryAlignment); 3699 StubCodeMark mark(this, "StubRoutines", name); 3700 address start = __ pc(); 3701 3702 Register buf = c_rarg0; 3703 Register state = c_rarg1; 3704 Register ofs = c_rarg2; 3705 Register limit = c_rarg3; 3706 3707 const XMMRegister abcd = xmm0; 3708 const XMMRegister e0 = xmm1; 3709 const XMMRegister e1 = xmm2; 3710 const XMMRegister msg0 = xmm3; 3711 3712 const XMMRegister msg1 = xmm4; 3713 const XMMRegister msg2 = xmm5; 3714 const XMMRegister msg3 = xmm6; 3715 const XMMRegister shuf_mask = xmm7; 3716 3717 __ enter(); 3718 3719 __ subptr(rsp, 4 * wordSize); 3720 3721 __ fast_sha1(abcd, e0, e1, msg0, msg1, msg2, msg3, shuf_mask, 3722 buf, state, ofs, limit, rsp, multi_block); 3723 3724 __ addptr(rsp, 4 * wordSize); 3725 3726 __ leave(); 3727 __ ret(0); 3728 return start; 3729 } 3730 3731 address generate_pshuffle_byte_flip_mask() { 3732 __ align(64); 3733 StubCodeMark mark(this, "StubRoutines", "pshuffle_byte_flip_mask"); 3734 address start = __ pc(); 3735 __ emit_data64(0x0405060700010203, relocInfo::none); 3736 __ emit_data64(0x0c0d0e0f08090a0b, relocInfo::none); 3737 3738 if (VM_Version::supports_avx2()) { 3739 __ emit_data64(0x0405060700010203, relocInfo::none); // second copy 3740 __ emit_data64(0x0c0d0e0f08090a0b, relocInfo::none); 3741 // _SHUF_00BA 3742 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3743 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3744 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3745 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3746 // _SHUF_DC00 3747 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3748 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3749 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3750 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3751 } 3752 3753 return start; 3754 } 3755 3756 //Mask for byte-swapping a couple of qwords in an XMM register using (v)pshufb. 3757 address generate_pshuffle_byte_flip_mask_sha512() { 3758 __ align(32); 3759 StubCodeMark mark(this, "StubRoutines", "pshuffle_byte_flip_mask_sha512"); 3760 address start = __ pc(); 3761 if (VM_Version::supports_avx2()) { 3762 __ emit_data64(0x0001020304050607, relocInfo::none); // PSHUFFLE_BYTE_FLIP_MASK 3763 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none); 3764 __ emit_data64(0x1011121314151617, relocInfo::none); 3765 __ emit_data64(0x18191a1b1c1d1e1f, relocInfo::none); 3766 __ emit_data64(0x0000000000000000, relocInfo::none); //MASK_YMM_LO 3767 __ emit_data64(0x0000000000000000, relocInfo::none); 3768 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3769 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3770 } 3771 3772 return start; 3773 } 3774 3775 // ofs and limit are use for multi-block byte array. 3776 // int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit) 3777 address generate_sha256_implCompress(bool multi_block, const char *name) { 3778 assert(VM_Version::supports_sha() || VM_Version::supports_avx2(), ""); 3779 __ align(CodeEntryAlignment); 3780 StubCodeMark mark(this, "StubRoutines", name); 3781 address start = __ pc(); 3782 3783 Register buf = c_rarg0; 3784 Register state = c_rarg1; 3785 Register ofs = c_rarg2; 3786 Register limit = c_rarg3; 3787 3788 const XMMRegister msg = xmm0; 3789 const XMMRegister state0 = xmm1; 3790 const XMMRegister state1 = xmm2; 3791 const XMMRegister msgtmp0 = xmm3; 3792 3793 const XMMRegister msgtmp1 = xmm4; 3794 const XMMRegister msgtmp2 = xmm5; 3795 const XMMRegister msgtmp3 = xmm6; 3796 const XMMRegister msgtmp4 = xmm7; 3797 3798 const XMMRegister shuf_mask = xmm8; 3799 3800 __ enter(); 3801 3802 __ subptr(rsp, 4 * wordSize); 3803 3804 if (VM_Version::supports_sha()) { 3805 __ fast_sha256(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4, 3806 buf, state, ofs, limit, rsp, multi_block, shuf_mask); 3807 } else if (VM_Version::supports_avx2()) { 3808 __ sha256_AVX2(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4, 3809 buf, state, ofs, limit, rsp, multi_block, shuf_mask); 3810 } 3811 __ addptr(rsp, 4 * wordSize); 3812 __ vzeroupper(); 3813 __ leave(); 3814 __ ret(0); 3815 return start; 3816 } 3817 3818 address generate_sha512_implCompress(bool multi_block, const char *name) { 3819 assert(VM_Version::supports_avx2(), ""); 3820 assert(VM_Version::supports_bmi2(), ""); 3821 __ align(CodeEntryAlignment); 3822 StubCodeMark mark(this, "StubRoutines", name); 3823 address start = __ pc(); 3824 3825 Register buf = c_rarg0; 3826 Register state = c_rarg1; 3827 Register ofs = c_rarg2; 3828 Register limit = c_rarg3; 3829 3830 const XMMRegister msg = xmm0; 3831 const XMMRegister state0 = xmm1; 3832 const XMMRegister state1 = xmm2; 3833 const XMMRegister msgtmp0 = xmm3; 3834 const XMMRegister msgtmp1 = xmm4; 3835 const XMMRegister msgtmp2 = xmm5; 3836 const XMMRegister msgtmp3 = xmm6; 3837 const XMMRegister msgtmp4 = xmm7; 3838 3839 const XMMRegister shuf_mask = xmm8; 3840 3841 __ enter(); 3842 3843 __ sha512_AVX2(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4, 3844 buf, state, ofs, limit, rsp, multi_block, shuf_mask); 3845 3846 __ vzeroupper(); 3847 __ leave(); 3848 __ ret(0); 3849 return start; 3850 } 3851 3852 // This is a version of CTR/AES crypt which does 6 blocks in a loop at a time 3853 // to hide instruction latency 3854 // 3855 // Arguments: 3856 // 3857 // Inputs: 3858 // c_rarg0 - source byte array address 3859 // c_rarg1 - destination byte array address 3860 // c_rarg2 - K (key) in little endian int array 3861 // c_rarg3 - counter vector byte array address 3862 // Linux 3863 // c_rarg4 - input length 3864 // c_rarg5 - saved encryptedCounter start 3865 // rbp + 6 * wordSize - saved used length 3866 // Windows 3867 // rbp + 6 * wordSize - input length 3868 // rbp + 7 * wordSize - saved encryptedCounter start 3869 // rbp + 8 * wordSize - saved used length 3870 // 3871 // Output: 3872 // rax - input length 3873 // 3874 address generate_counterMode_AESCrypt_Parallel() { 3875 assert(UseAES, "need AES instructions and misaligned SSE support"); 3876 __ align(CodeEntryAlignment); 3877 StubCodeMark mark(this, "StubRoutines", "counterMode_AESCrypt"); 3878 address start = __ pc(); 3879 const Register from = c_rarg0; // source array address 3880 const Register to = c_rarg1; // destination array address 3881 const Register key = c_rarg2; // key array address 3882 const Register counter = c_rarg3; // counter byte array initialized from counter array address 3883 // and updated with the incremented counter in the end 3884 #ifndef _WIN64 3885 const Register len_reg = c_rarg4; 3886 const Register saved_encCounter_start = c_rarg5; 3887 const Register used_addr = r10; 3888 const Address used_mem(rbp, 2 * wordSize); 3889 const Register used = r11; 3890 #else 3891 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 3892 const Address saved_encCounter_mem(rbp, 7 * wordSize); // length is on stack on Win64 3893 const Address used_mem(rbp, 8 * wordSize); // length is on stack on Win64 3894 const Register len_reg = r10; // pick the first volatile windows register 3895 const Register saved_encCounter_start = r11; 3896 const Register used_addr = r13; 3897 const Register used = r14; 3898 #endif 3899 const Register pos = rax; 3900 3901 const int PARALLEL_FACTOR = 6; 3902 const XMMRegister xmm_counter_shuf_mask = xmm0; 3903 const XMMRegister xmm_key_shuf_mask = xmm1; // used temporarily to swap key bytes up front 3904 const XMMRegister xmm_curr_counter = xmm2; 3905 3906 const XMMRegister xmm_key_tmp0 = xmm3; 3907 const XMMRegister xmm_key_tmp1 = xmm4; 3908 3909 // registers holding the four results in the parallelized loop 3910 const XMMRegister xmm_result0 = xmm5; 3911 const XMMRegister xmm_result1 = xmm6; 3912 const XMMRegister xmm_result2 = xmm7; 3913 const XMMRegister xmm_result3 = xmm8; 3914 const XMMRegister xmm_result4 = xmm9; 3915 const XMMRegister xmm_result5 = xmm10; 3916 3917 const XMMRegister xmm_from0 = xmm11; 3918 const XMMRegister xmm_from1 = xmm12; 3919 const XMMRegister xmm_from2 = xmm13; 3920 const XMMRegister xmm_from3 = xmm14; //the last one is xmm14. we have to preserve it on WIN64. 3921 const XMMRegister xmm_from4 = xmm3; //reuse xmm3~4. Because xmm_key_tmp0~1 are useless when loading input text 3922 const XMMRegister xmm_from5 = xmm4; 3923 3924 //for key_128, key_192, key_256 3925 const int rounds[3] = {10, 12, 14}; 3926 Label L_exit_preLoop, L_preLoop_start; 3927 Label L_multiBlock_loopTop[3]; 3928 Label L_singleBlockLoopTop[3]; 3929 Label L__incCounter[3][6]; //for 6 blocks 3930 Label L__incCounter_single[3]; //for single block, key128, key192, key256 3931 Label L_processTail_insr[3], L_processTail_4_insr[3], L_processTail_2_insr[3], L_processTail_1_insr[3], L_processTail_exit_insr[3]; 3932 Label L_processTail_extr[3], L_processTail_4_extr[3], L_processTail_2_extr[3], L_processTail_1_extr[3], L_processTail_exit_extr[3]; 3933 3934 Label L_exit; 3935 3936 __ enter(); // required for proper stackwalking of RuntimeStub frame 3937 3938 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 3939 // context for the registers used, where all instructions below are using 128-bit mode 3940 // On EVEX without VL and BW, these instructions will all be AVX. 3941 if (VM_Version::supports_avx512vlbw()) { 3942 __ movl(rax, 0xffff); 3943 __ kmovql(k1, rax); 3944 } 3945 3946 #ifdef _WIN64 3947 // allocate spill slots for r13, r14 3948 enum { 3949 saved_r13_offset, 3950 saved_r14_offset 3951 }; 3952 __ subptr(rsp, 2 * wordSize); 3953 __ movptr(Address(rsp, saved_r13_offset * wordSize), r13); 3954 __ movptr(Address(rsp, saved_r14_offset * wordSize), r14); 3955 3956 // on win64, fill len_reg from stack position 3957 __ movl(len_reg, len_mem); 3958 __ movptr(saved_encCounter_start, saved_encCounter_mem); 3959 __ movptr(used_addr, used_mem); 3960 __ movl(used, Address(used_addr, 0)); 3961 #else 3962 __ push(len_reg); // Save 3963 __ movptr(used_addr, used_mem); 3964 __ movl(used, Address(used_addr, 0)); 3965 #endif 3966 3967 __ push(rbx); // Save RBX 3968 __ movdqu(xmm_curr_counter, Address(counter, 0x00)); // initialize counter with initial counter 3969 __ movdqu(xmm_counter_shuf_mask, ExternalAddress(StubRoutines::x86::counter_shuffle_mask_addr()), pos); // pos as scratch 3970 __ pshufb(xmm_curr_counter, xmm_counter_shuf_mask); //counter is shuffled 3971 __ movptr(pos, 0); 3972 3973 // Use the partially used encrpyted counter from last invocation 3974 __ BIND(L_preLoop_start); 3975 __ cmpptr(used, 16); 3976 __ jcc(Assembler::aboveEqual, L_exit_preLoop); 3977 __ cmpptr(len_reg, 0); 3978 __ jcc(Assembler::lessEqual, L_exit_preLoop); 3979 __ movb(rbx, Address(saved_encCounter_start, used)); 3980 __ xorb(rbx, Address(from, pos)); 3981 __ movb(Address(to, pos), rbx); 3982 __ addptr(pos, 1); 3983 __ addptr(used, 1); 3984 __ subptr(len_reg, 1); 3985 3986 __ jmp(L_preLoop_start); 3987 3988 __ BIND(L_exit_preLoop); 3989 __ movl(Address(used_addr, 0), used); 3990 3991 // key length could be only {11, 13, 15} * 4 = {44, 52, 60} 3992 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()), rbx); // rbx as scratch 3993 __ movl(rbx, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3994 __ cmpl(rbx, 52); 3995 __ jcc(Assembler::equal, L_multiBlock_loopTop[1]); 3996 __ cmpl(rbx, 60); 3997 __ jcc(Assembler::equal, L_multiBlock_loopTop[2]); 3998 3999 #define CTR_DoSix(opc, src_reg) \ 4000 __ opc(xmm_result0, src_reg); \ 4001 __ opc(xmm_result1, src_reg); \ 4002 __ opc(xmm_result2, src_reg); \ 4003 __ opc(xmm_result3, src_reg); \ 4004 __ opc(xmm_result4, src_reg); \ 4005 __ opc(xmm_result5, src_reg); 4006 4007 // k == 0 : generate code for key_128 4008 // k == 1 : generate code for key_192 4009 // k == 2 : generate code for key_256 4010 for (int k = 0; k < 3; ++k) { 4011 //multi blocks starts here 4012 __ align(OptoLoopAlignment); 4013 __ BIND(L_multiBlock_loopTop[k]); 4014 __ cmpptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // see if at least PARALLEL_FACTOR blocks left 4015 __ jcc(Assembler::less, L_singleBlockLoopTop[k]); 4016 load_key(xmm_key_tmp0, key, 0x00, xmm_key_shuf_mask); 4017 4018 //load, then increase counters 4019 CTR_DoSix(movdqa, xmm_curr_counter); 4020 inc_counter(rbx, xmm_result1, 0x01, L__incCounter[k][0]); 4021 inc_counter(rbx, xmm_result2, 0x02, L__incCounter[k][1]); 4022 inc_counter(rbx, xmm_result3, 0x03, L__incCounter[k][2]); 4023 inc_counter(rbx, xmm_result4, 0x04, L__incCounter[k][3]); 4024 inc_counter(rbx, xmm_result5, 0x05, L__incCounter[k][4]); 4025 inc_counter(rbx, xmm_curr_counter, 0x06, L__incCounter[k][5]); 4026 CTR_DoSix(pshufb, xmm_counter_shuf_mask); // after increased, shuffled counters back for PXOR 4027 CTR_DoSix(pxor, xmm_key_tmp0); //PXOR with Round 0 key 4028 4029 //load two ROUND_KEYs at a time 4030 for (int i = 1; i < rounds[k]; ) { 4031 load_key(xmm_key_tmp1, key, (0x10 * i), xmm_key_shuf_mask); 4032 load_key(xmm_key_tmp0, key, (0x10 * (i+1)), xmm_key_shuf_mask); 4033 CTR_DoSix(aesenc, xmm_key_tmp1); 4034 i++; 4035 if (i != rounds[k]) { 4036 CTR_DoSix(aesenc, xmm_key_tmp0); 4037 } else { 4038 CTR_DoSix(aesenclast, xmm_key_tmp0); 4039 } 4040 i++; 4041 } 4042 4043 // get next PARALLEL_FACTOR blocks into xmm_result registers 4044 __ movdqu(xmm_from0, Address(from, pos, Address::times_1, 0 * AESBlockSize)); 4045 __ movdqu(xmm_from1, Address(from, pos, Address::times_1, 1 * AESBlockSize)); 4046 __ movdqu(xmm_from2, Address(from, pos, Address::times_1, 2 * AESBlockSize)); 4047 __ movdqu(xmm_from3, Address(from, pos, Address::times_1, 3 * AESBlockSize)); 4048 __ movdqu(xmm_from4, Address(from, pos, Address::times_1, 4 * AESBlockSize)); 4049 __ movdqu(xmm_from5, Address(from, pos, Address::times_1, 5 * AESBlockSize)); 4050 4051 __ pxor(xmm_result0, xmm_from0); 4052 __ pxor(xmm_result1, xmm_from1); 4053 __ pxor(xmm_result2, xmm_from2); 4054 __ pxor(xmm_result3, xmm_from3); 4055 __ pxor(xmm_result4, xmm_from4); 4056 __ pxor(xmm_result5, xmm_from5); 4057 4058 // store 6 results into the next 64 bytes of output 4059 __ movdqu(Address(to, pos, Address::times_1, 0 * AESBlockSize), xmm_result0); 4060 __ movdqu(Address(to, pos, Address::times_1, 1 * AESBlockSize), xmm_result1); 4061 __ movdqu(Address(to, pos, Address::times_1, 2 * AESBlockSize), xmm_result2); 4062 __ movdqu(Address(to, pos, Address::times_1, 3 * AESBlockSize), xmm_result3); 4063 __ movdqu(Address(to, pos, Address::times_1, 4 * AESBlockSize), xmm_result4); 4064 __ movdqu(Address(to, pos, Address::times_1, 5 * AESBlockSize), xmm_result5); 4065 4066 __ addptr(pos, PARALLEL_FACTOR * AESBlockSize); // increase the length of crypt text 4067 __ subptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // decrease the remaining length 4068 __ jmp(L_multiBlock_loopTop[k]); 4069 4070 // singleBlock starts here 4071 __ align(OptoLoopAlignment); 4072 __ BIND(L_singleBlockLoopTop[k]); 4073 __ cmpptr(len_reg, 0); 4074 __ jcc(Assembler::lessEqual, L_exit); 4075 load_key(xmm_key_tmp0, key, 0x00, xmm_key_shuf_mask); 4076 __ movdqa(xmm_result0, xmm_curr_counter); 4077 inc_counter(rbx, xmm_curr_counter, 0x01, L__incCounter_single[k]); 4078 __ pshufb(xmm_result0, xmm_counter_shuf_mask); 4079 __ pxor(xmm_result0, xmm_key_tmp0); 4080 for (int i = 1; i < rounds[k]; i++) { 4081 load_key(xmm_key_tmp0, key, (0x10 * i), xmm_key_shuf_mask); 4082 __ aesenc(xmm_result0, xmm_key_tmp0); 4083 } 4084 load_key(xmm_key_tmp0, key, (rounds[k] * 0x10), xmm_key_shuf_mask); 4085 __ aesenclast(xmm_result0, xmm_key_tmp0); 4086 __ cmpptr(len_reg, AESBlockSize); 4087 __ jcc(Assembler::less, L_processTail_insr[k]); 4088 __ movdqu(xmm_from0, Address(from, pos, Address::times_1, 0 * AESBlockSize)); 4089 __ pxor(xmm_result0, xmm_from0); 4090 __ movdqu(Address(to, pos, Address::times_1, 0 * AESBlockSize), xmm_result0); 4091 __ addptr(pos, AESBlockSize); 4092 __ subptr(len_reg, AESBlockSize); 4093 __ jmp(L_singleBlockLoopTop[k]); 4094 __ BIND(L_processTail_insr[k]); // Process the tail part of the input array 4095 __ addptr(pos, len_reg); // 1. Insert bytes from src array into xmm_from0 register 4096 __ testptr(len_reg, 8); 4097 __ jcc(Assembler::zero, L_processTail_4_insr[k]); 4098 __ subptr(pos,8); 4099 __ pinsrq(xmm_from0, Address(from, pos), 0); 4100 __ BIND(L_processTail_4_insr[k]); 4101 __ testptr(len_reg, 4); 4102 __ jcc(Assembler::zero, L_processTail_2_insr[k]); 4103 __ subptr(pos,4); 4104 __ pslldq(xmm_from0, 4); 4105 __ pinsrd(xmm_from0, Address(from, pos), 0); 4106 __ BIND(L_processTail_2_insr[k]); 4107 __ testptr(len_reg, 2); 4108 __ jcc(Assembler::zero, L_processTail_1_insr[k]); 4109 __ subptr(pos, 2); 4110 __ pslldq(xmm_from0, 2); 4111 __ pinsrw(xmm_from0, Address(from, pos), 0); 4112 __ BIND(L_processTail_1_insr[k]); 4113 __ testptr(len_reg, 1); 4114 __ jcc(Assembler::zero, L_processTail_exit_insr[k]); 4115 __ subptr(pos, 1); 4116 __ pslldq(xmm_from0, 1); 4117 __ pinsrb(xmm_from0, Address(from, pos), 0); 4118 __ BIND(L_processTail_exit_insr[k]); 4119 4120 __ movdqu(Address(saved_encCounter_start, 0), xmm_result0); // 2. Perform pxor of the encrypted counter and plaintext Bytes. 4121 __ pxor(xmm_result0, xmm_from0); // Also the encrypted counter is saved for next invocation. 4122 4123 __ testptr(len_reg, 8); 4124 __ jcc(Assembler::zero, L_processTail_4_extr[k]); // 3. Extract bytes from xmm_result0 into the dest. array 4125 __ pextrq(Address(to, pos), xmm_result0, 0); 4126 __ psrldq(xmm_result0, 8); 4127 __ addptr(pos, 8); 4128 __ BIND(L_processTail_4_extr[k]); 4129 __ testptr(len_reg, 4); 4130 __ jcc(Assembler::zero, L_processTail_2_extr[k]); 4131 __ pextrd(Address(to, pos), xmm_result0, 0); 4132 __ psrldq(xmm_result0, 4); 4133 __ addptr(pos, 4); 4134 __ BIND(L_processTail_2_extr[k]); 4135 __ testptr(len_reg, 2); 4136 __ jcc(Assembler::zero, L_processTail_1_extr[k]); 4137 __ pextrw(Address(to, pos), xmm_result0, 0); 4138 __ psrldq(xmm_result0, 2); 4139 __ addptr(pos, 2); 4140 __ BIND(L_processTail_1_extr[k]); 4141 __ testptr(len_reg, 1); 4142 __ jcc(Assembler::zero, L_processTail_exit_extr[k]); 4143 __ pextrb(Address(to, pos), xmm_result0, 0); 4144 4145 __ BIND(L_processTail_exit_extr[k]); 4146 __ movl(Address(used_addr, 0), len_reg); 4147 __ jmp(L_exit); 4148 4149 } 4150 4151 __ BIND(L_exit); 4152 __ pshufb(xmm_curr_counter, xmm_counter_shuf_mask); //counter is shuffled back. 4153 __ movdqu(Address(counter, 0), xmm_curr_counter); //save counter back 4154 __ pop(rbx); // pop the saved RBX. 4155 #ifdef _WIN64 4156 __ movl(rax, len_mem); 4157 __ movptr(r13, Address(rsp, saved_r13_offset * wordSize)); 4158 __ movptr(r14, Address(rsp, saved_r14_offset * wordSize)); 4159 __ addptr(rsp, 2 * wordSize); 4160 #else 4161 __ pop(rax); // return 'len' 4162 #endif 4163 __ leave(); // required for proper stackwalking of RuntimeStub frame 4164 __ ret(0); 4165 return start; 4166 } 4167 4168 // byte swap x86 long 4169 address generate_ghash_long_swap_mask() { 4170 __ align(CodeEntryAlignment); 4171 StubCodeMark mark(this, "StubRoutines", "ghash_long_swap_mask"); 4172 address start = __ pc(); 4173 __ emit_data64(0x0f0e0d0c0b0a0908, relocInfo::none ); 4174 __ emit_data64(0x0706050403020100, relocInfo::none ); 4175 return start; 4176 } 4177 4178 // byte swap x86 byte array 4179 address generate_ghash_byte_swap_mask() { 4180 __ align(CodeEntryAlignment); 4181 StubCodeMark mark(this, "StubRoutines", "ghash_byte_swap_mask"); 4182 address start = __ pc(); 4183 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none ); 4184 __ emit_data64(0x0001020304050607, relocInfo::none ); 4185 return start; 4186 } 4187 4188 /* Single and multi-block ghash operations */ 4189 address generate_ghash_processBlocks() { 4190 __ align(CodeEntryAlignment); 4191 Label L_ghash_loop, L_exit; 4192 StubCodeMark mark(this, "StubRoutines", "ghash_processBlocks"); 4193 address start = __ pc(); 4194 4195 const Register state = c_rarg0; 4196 const Register subkeyH = c_rarg1; 4197 const Register data = c_rarg2; 4198 const Register blocks = c_rarg3; 4199 4200 const XMMRegister xmm_temp0 = xmm0; 4201 const XMMRegister xmm_temp1 = xmm1; 4202 const XMMRegister xmm_temp2 = xmm2; 4203 const XMMRegister xmm_temp3 = xmm3; 4204 const XMMRegister xmm_temp4 = xmm4; 4205 const XMMRegister xmm_temp5 = xmm5; 4206 const XMMRegister xmm_temp6 = xmm6; 4207 const XMMRegister xmm_temp7 = xmm7; 4208 const XMMRegister xmm_temp8 = xmm8; 4209 const XMMRegister xmm_temp9 = xmm9; 4210 const XMMRegister xmm_temp10 = xmm10; 4211 4212 __ enter(); 4213 4214 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 4215 // context for the registers used, where all instructions below are using 128-bit mode 4216 // On EVEX without VL and BW, these instructions will all be AVX. 4217 if (VM_Version::supports_avx512vlbw()) { 4218 __ movl(rax, 0xffff); 4219 __ kmovql(k1, rax); 4220 } 4221 4222 __ movdqu(xmm_temp10, ExternalAddress(StubRoutines::x86::ghash_long_swap_mask_addr())); 4223 4224 __ movdqu(xmm_temp0, Address(state, 0)); 4225 __ pshufb(xmm_temp0, xmm_temp10); 4226 4227 4228 __ BIND(L_ghash_loop); 4229 __ movdqu(xmm_temp2, Address(data, 0)); 4230 __ pshufb(xmm_temp2, ExternalAddress(StubRoutines::x86::ghash_byte_swap_mask_addr())); 4231 4232 __ movdqu(xmm_temp1, Address(subkeyH, 0)); 4233 __ pshufb(xmm_temp1, xmm_temp10); 4234 4235 __ pxor(xmm_temp0, xmm_temp2); 4236 4237 // 4238 // Multiply with the hash key 4239 // 4240 __ movdqu(xmm_temp3, xmm_temp0); 4241 __ pclmulqdq(xmm_temp3, xmm_temp1, 0); // xmm3 holds a0*b0 4242 __ movdqu(xmm_temp4, xmm_temp0); 4243 __ pclmulqdq(xmm_temp4, xmm_temp1, 16); // xmm4 holds a0*b1 4244 4245 __ movdqu(xmm_temp5, xmm_temp0); 4246 __ pclmulqdq(xmm_temp5, xmm_temp1, 1); // xmm5 holds a1*b0 4247 __ movdqu(xmm_temp6, xmm_temp0); 4248 __ pclmulqdq(xmm_temp6, xmm_temp1, 17); // xmm6 holds a1*b1 4249 4250 __ pxor(xmm_temp4, xmm_temp5); // xmm4 holds a0*b1 + a1*b0 4251 4252 __ movdqu(xmm_temp5, xmm_temp4); // move the contents of xmm4 to xmm5 4253 __ psrldq(xmm_temp4, 8); // shift by xmm4 64 bits to the right 4254 __ pslldq(xmm_temp5, 8); // shift by xmm5 64 bits to the left 4255 __ pxor(xmm_temp3, xmm_temp5); 4256 __ pxor(xmm_temp6, xmm_temp4); // Register pair <xmm6:xmm3> holds the result 4257 // of the carry-less multiplication of 4258 // xmm0 by xmm1. 4259 4260 // We shift the result of the multiplication by one bit position 4261 // to the left to cope for the fact that the bits are reversed. 4262 __ movdqu(xmm_temp7, xmm_temp3); 4263 __ movdqu(xmm_temp8, xmm_temp6); 4264 __ pslld(xmm_temp3, 1); 4265 __ pslld(xmm_temp6, 1); 4266 __ psrld(xmm_temp7, 31); 4267 __ psrld(xmm_temp8, 31); 4268 __ movdqu(xmm_temp9, xmm_temp7); 4269 __ pslldq(xmm_temp8, 4); 4270 __ pslldq(xmm_temp7, 4); 4271 __ psrldq(xmm_temp9, 12); 4272 __ por(xmm_temp3, xmm_temp7); 4273 __ por(xmm_temp6, xmm_temp8); 4274 __ por(xmm_temp6, xmm_temp9); 4275 4276 // 4277 // First phase of the reduction 4278 // 4279 // Move xmm3 into xmm7, xmm8, xmm9 in order to perform the shifts 4280 // independently. 4281 __ movdqu(xmm_temp7, xmm_temp3); 4282 __ movdqu(xmm_temp8, xmm_temp3); 4283 __ movdqu(xmm_temp9, xmm_temp3); 4284 __ pslld(xmm_temp7, 31); // packed right shift shifting << 31 4285 __ pslld(xmm_temp8, 30); // packed right shift shifting << 30 4286 __ pslld(xmm_temp9, 25); // packed right shift shifting << 25 4287 __ pxor(xmm_temp7, xmm_temp8); // xor the shifted versions 4288 __ pxor(xmm_temp7, xmm_temp9); 4289 __ movdqu(xmm_temp8, xmm_temp7); 4290 __ pslldq(xmm_temp7, 12); 4291 __ psrldq(xmm_temp8, 4); 4292 __ pxor(xmm_temp3, xmm_temp7); // first phase of the reduction complete 4293 4294 // 4295 // Second phase of the reduction 4296 // 4297 // Make 3 copies of xmm3 in xmm2, xmm4, xmm5 for doing these 4298 // shift operations. 4299 __ movdqu(xmm_temp2, xmm_temp3); 4300 __ movdqu(xmm_temp4, xmm_temp3); 4301 __ movdqu(xmm_temp5, xmm_temp3); 4302 __ psrld(xmm_temp2, 1); // packed left shifting >> 1 4303 __ psrld(xmm_temp4, 2); // packed left shifting >> 2 4304 __ psrld(xmm_temp5, 7); // packed left shifting >> 7 4305 __ pxor(xmm_temp2, xmm_temp4); // xor the shifted versions 4306 __ pxor(xmm_temp2, xmm_temp5); 4307 __ pxor(xmm_temp2, xmm_temp8); 4308 __ pxor(xmm_temp3, xmm_temp2); 4309 __ pxor(xmm_temp6, xmm_temp3); // the result is in xmm6 4310 4311 __ decrement(blocks); 4312 __ jcc(Assembler::zero, L_exit); 4313 __ movdqu(xmm_temp0, xmm_temp6); 4314 __ addptr(data, 16); 4315 __ jmp(L_ghash_loop); 4316 4317 __ BIND(L_exit); 4318 __ pshufb(xmm_temp6, xmm_temp10); // Byte swap 16-byte result 4319 __ movdqu(Address(state, 0), xmm_temp6); // store the result 4320 __ leave(); 4321 __ ret(0); 4322 return start; 4323 } 4324 4325 /** 4326 * Arguments: 4327 * 4328 * Inputs: 4329 * c_rarg0 - int crc 4330 * c_rarg1 - byte* buf 4331 * c_rarg2 - int length 4332 * 4333 * Ouput: 4334 * rax - int crc result 4335 */ 4336 address generate_updateBytesCRC32() { 4337 assert(UseCRC32Intrinsics, "need AVX and CLMUL instructions"); 4338 4339 __ align(CodeEntryAlignment); 4340 StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32"); 4341 4342 address start = __ pc(); 4343 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 4344 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 4345 // rscratch1: r10 4346 const Register crc = c_rarg0; // crc 4347 const Register buf = c_rarg1; // source java byte array address 4348 const Register len = c_rarg2; // length 4349 const Register table = c_rarg3; // crc_table address (reuse register) 4350 const Register tmp = r11; 4351 assert_different_registers(crc, buf, len, table, tmp, rax); 4352 4353 BLOCK_COMMENT("Entry:"); 4354 __ enter(); // required for proper stackwalking of RuntimeStub frame 4355 4356 __ kernel_crc32(crc, buf, len, table, tmp); 4357 4358 __ movl(rax, crc); 4359 __ vzeroupper(); 4360 __ leave(); // required for proper stackwalking of RuntimeStub frame 4361 __ ret(0); 4362 4363 return start; 4364 } 4365 4366 /** 4367 * Arguments: 4368 * 4369 * Inputs: 4370 * c_rarg0 - int crc 4371 * c_rarg1 - byte* buf 4372 * c_rarg2 - long length 4373 * c_rarg3 - table_start - optional (present only when doing a library_call, 4374 * not used by x86 algorithm) 4375 * 4376 * Ouput: 4377 * rax - int crc result 4378 */ 4379 address generate_updateBytesCRC32C(bool is_pclmulqdq_supported) { 4380 assert(UseCRC32CIntrinsics, "need SSE4_2"); 4381 __ align(CodeEntryAlignment); 4382 StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32C"); 4383 address start = __ pc(); 4384 //reg.arg int#0 int#1 int#2 int#3 int#4 int#5 float regs 4385 //Windows RCX RDX R8 R9 none none XMM0..XMM3 4386 //Lin / Sol RDI RSI RDX RCX R8 R9 XMM0..XMM7 4387 const Register crc = c_rarg0; // crc 4388 const Register buf = c_rarg1; // source java byte array address 4389 const Register len = c_rarg2; // length 4390 const Register a = rax; 4391 const Register j = r9; 4392 const Register k = r10; 4393 const Register l = r11; 4394 #ifdef _WIN64 4395 const Register y = rdi; 4396 const Register z = rsi; 4397 #else 4398 const Register y = rcx; 4399 const Register z = r8; 4400 #endif 4401 assert_different_registers(crc, buf, len, a, j, k, l, y, z); 4402 4403 BLOCK_COMMENT("Entry:"); 4404 __ enter(); // required for proper stackwalking of RuntimeStub frame 4405 #ifdef _WIN64 4406 __ push(y); 4407 __ push(z); 4408 #endif 4409 __ crc32c_ipl_alg2_alt2(crc, buf, len, 4410 a, j, k, 4411 l, y, z, 4412 c_farg0, c_farg1, c_farg2, 4413 is_pclmulqdq_supported); 4414 __ movl(rax, crc); 4415 #ifdef _WIN64 4416 __ pop(z); 4417 __ pop(y); 4418 #endif 4419 __ vzeroupper(); 4420 __ leave(); // required for proper stackwalking of RuntimeStub frame 4421 __ ret(0); 4422 4423 return start; 4424 } 4425 4426 /** 4427 * Arguments: 4428 * 4429 * Input: 4430 * c_rarg0 - x address 4431 * c_rarg1 - x length 4432 * c_rarg2 - y address 4433 * c_rarg3 - y length 4434 * not Win64 4435 * c_rarg4 - z address 4436 * c_rarg5 - z length 4437 * Win64 4438 * rsp+40 - z address 4439 * rsp+48 - z length 4440 */ 4441 address generate_multiplyToLen() { 4442 __ align(CodeEntryAlignment); 4443 StubCodeMark mark(this, "StubRoutines", "multiplyToLen"); 4444 4445 address start = __ pc(); 4446 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 4447 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 4448 const Register x = rdi; 4449 const Register xlen = rax; 4450 const Register y = rsi; 4451 const Register ylen = rcx; 4452 const Register z = r8; 4453 const Register zlen = r11; 4454 4455 // Next registers will be saved on stack in multiply_to_len(). 4456 const Register tmp1 = r12; 4457 const Register tmp2 = r13; 4458 const Register tmp3 = r14; 4459 const Register tmp4 = r15; 4460 const Register tmp5 = rbx; 4461 4462 BLOCK_COMMENT("Entry:"); 4463 __ enter(); // required for proper stackwalking of RuntimeStub frame 4464 4465 #ifndef _WIN64 4466 __ movptr(zlen, r9); // Save r9 in r11 - zlen 4467 #endif 4468 setup_arg_regs(4); // x => rdi, xlen => rsi, y => rdx 4469 // ylen => rcx, z => r8, zlen => r11 4470 // r9 and r10 may be used to save non-volatile registers 4471 #ifdef _WIN64 4472 // last 2 arguments (#4, #5) are on stack on Win64 4473 __ movptr(z, Address(rsp, 6 * wordSize)); 4474 __ movptr(zlen, Address(rsp, 7 * wordSize)); 4475 #endif 4476 4477 __ movptr(xlen, rsi); 4478 __ movptr(y, rdx); 4479 __ multiply_to_len(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5); 4480 4481 restore_arg_regs(); 4482 4483 __ leave(); // required for proper stackwalking of RuntimeStub frame 4484 __ ret(0); 4485 4486 return start; 4487 } 4488 4489 /** 4490 * Arguments: 4491 * 4492 * Input: 4493 * c_rarg0 - obja address 4494 * c_rarg1 - objb address 4495 * c_rarg3 - length length 4496 * c_rarg4 - scale log2_array_indxscale 4497 * 4498 * Output: 4499 * rax - int >= mismatched index, < 0 bitwise complement of tail 4500 */ 4501 address generate_vectorizedMismatch() { 4502 __ align(CodeEntryAlignment); 4503 StubCodeMark mark(this, "StubRoutines", "vectorizedMismatch"); 4504 address start = __ pc(); 4505 4506 BLOCK_COMMENT("Entry:"); 4507 __ enter(); 4508 4509 #ifdef _WIN64 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 4510 const Register scale = c_rarg0; //rcx, will exchange with r9 4511 const Register objb = c_rarg1; //rdx 4512 const Register length = c_rarg2; //r8 4513 const Register obja = c_rarg3; //r9 4514 __ xchgq(obja, scale); //now obja and scale contains the correct contents 4515 4516 const Register tmp1 = r10; 4517 const Register tmp2 = r11; 4518 #endif 4519 #ifndef _WIN64 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 4520 const Register obja = c_rarg0; //U:rdi 4521 const Register objb = c_rarg1; //U:rsi 4522 const Register length = c_rarg2; //U:rdx 4523 const Register scale = c_rarg3; //U:rcx 4524 const Register tmp1 = r8; 4525 const Register tmp2 = r9; 4526 #endif 4527 const Register result = rax; //return value 4528 const XMMRegister vec0 = xmm0; 4529 const XMMRegister vec1 = xmm1; 4530 const XMMRegister vec2 = xmm2; 4531 4532 __ vectorized_mismatch(obja, objb, length, scale, result, tmp1, tmp2, vec0, vec1, vec2); 4533 4534 __ vzeroupper(); 4535 __ leave(); 4536 __ ret(0); 4537 4538 return start; 4539 } 4540 4541 /** 4542 * Arguments: 4543 * 4544 // Input: 4545 // c_rarg0 - x address 4546 // c_rarg1 - x length 4547 // c_rarg2 - z address 4548 // c_rarg3 - z lenth 4549 * 4550 */ 4551 address generate_squareToLen() { 4552 4553 __ align(CodeEntryAlignment); 4554 StubCodeMark mark(this, "StubRoutines", "squareToLen"); 4555 4556 address start = __ pc(); 4557 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 4558 // Unix: rdi, rsi, rdx, rcx (c_rarg0, c_rarg1, ...) 4559 const Register x = rdi; 4560 const Register len = rsi; 4561 const Register z = r8; 4562 const Register zlen = rcx; 4563 4564 const Register tmp1 = r12; 4565 const Register tmp2 = r13; 4566 const Register tmp3 = r14; 4567 const Register tmp4 = r15; 4568 const Register tmp5 = rbx; 4569 4570 BLOCK_COMMENT("Entry:"); 4571 __ enter(); // required for proper stackwalking of RuntimeStub frame 4572 4573 setup_arg_regs(4); // x => rdi, len => rsi, z => rdx 4574 // zlen => rcx 4575 // r9 and r10 may be used to save non-volatile registers 4576 __ movptr(r8, rdx); 4577 __ square_to_len(x, len, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5, rdx, rax); 4578 4579 restore_arg_regs(); 4580 4581 __ leave(); // required for proper stackwalking of RuntimeStub frame 4582 __ ret(0); 4583 4584 return start; 4585 } 4586 4587 /** 4588 * Arguments: 4589 * 4590 * Input: 4591 * c_rarg0 - out address 4592 * c_rarg1 - in address 4593 * c_rarg2 - offset 4594 * c_rarg3 - len 4595 * not Win64 4596 * c_rarg4 - k 4597 * Win64 4598 * rsp+40 - k 4599 */ 4600 address generate_mulAdd() { 4601 __ align(CodeEntryAlignment); 4602 StubCodeMark mark(this, "StubRoutines", "mulAdd"); 4603 4604 address start = __ pc(); 4605 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 4606 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 4607 const Register out = rdi; 4608 const Register in = rsi; 4609 const Register offset = r11; 4610 const Register len = rcx; 4611 const Register k = r8; 4612 4613 // Next registers will be saved on stack in mul_add(). 4614 const Register tmp1 = r12; 4615 const Register tmp2 = r13; 4616 const Register tmp3 = r14; 4617 const Register tmp4 = r15; 4618 const Register tmp5 = rbx; 4619 4620 BLOCK_COMMENT("Entry:"); 4621 __ enter(); // required for proper stackwalking of RuntimeStub frame 4622 4623 setup_arg_regs(4); // out => rdi, in => rsi, offset => rdx 4624 // len => rcx, k => r8 4625 // r9 and r10 may be used to save non-volatile registers 4626 #ifdef _WIN64 4627 // last argument is on stack on Win64 4628 __ movl(k, Address(rsp, 6 * wordSize)); 4629 #endif 4630 __ movptr(r11, rdx); // move offset in rdx to offset(r11) 4631 __ mul_add(out, in, offset, len, k, tmp1, tmp2, tmp3, tmp4, tmp5, rdx, rax); 4632 4633 restore_arg_regs(); 4634 4635 __ leave(); // required for proper stackwalking of RuntimeStub frame 4636 __ ret(0); 4637 4638 return start; 4639 } 4640 4641 address generate_libmExp() { 4642 StubCodeMark mark(this, "StubRoutines", "libmExp"); 4643 4644 address start = __ pc(); 4645 4646 const XMMRegister x0 = xmm0; 4647 const XMMRegister x1 = xmm1; 4648 const XMMRegister x2 = xmm2; 4649 const XMMRegister x3 = xmm3; 4650 4651 const XMMRegister x4 = xmm4; 4652 const XMMRegister x5 = xmm5; 4653 const XMMRegister x6 = xmm6; 4654 const XMMRegister x7 = xmm7; 4655 4656 const Register tmp = r11; 4657 4658 BLOCK_COMMENT("Entry:"); 4659 __ enter(); // required for proper stackwalking of RuntimeStub frame 4660 4661 __ fast_exp(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp); 4662 4663 __ leave(); // required for proper stackwalking of RuntimeStub frame 4664 __ ret(0); 4665 4666 return start; 4667 4668 } 4669 4670 address generate_libmLog() { 4671 StubCodeMark mark(this, "StubRoutines", "libmLog"); 4672 4673 address start = __ pc(); 4674 4675 const XMMRegister x0 = xmm0; 4676 const XMMRegister x1 = xmm1; 4677 const XMMRegister x2 = xmm2; 4678 const XMMRegister x3 = xmm3; 4679 4680 const XMMRegister x4 = xmm4; 4681 const XMMRegister x5 = xmm5; 4682 const XMMRegister x6 = xmm6; 4683 const XMMRegister x7 = xmm7; 4684 4685 const Register tmp1 = r11; 4686 const Register tmp2 = r8; 4687 4688 BLOCK_COMMENT("Entry:"); 4689 __ enter(); // required for proper stackwalking of RuntimeStub frame 4690 4691 __ fast_log(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2); 4692 4693 __ leave(); // required for proper stackwalking of RuntimeStub frame 4694 __ ret(0); 4695 4696 return start; 4697 4698 } 4699 4700 address generate_libmLog10() { 4701 StubCodeMark mark(this, "StubRoutines", "libmLog10"); 4702 4703 address start = __ pc(); 4704 4705 const XMMRegister x0 = xmm0; 4706 const XMMRegister x1 = xmm1; 4707 const XMMRegister x2 = xmm2; 4708 const XMMRegister x3 = xmm3; 4709 4710 const XMMRegister x4 = xmm4; 4711 const XMMRegister x5 = xmm5; 4712 const XMMRegister x6 = xmm6; 4713 const XMMRegister x7 = xmm7; 4714 4715 const Register tmp = r11; 4716 4717 BLOCK_COMMENT("Entry:"); 4718 __ enter(); // required for proper stackwalking of RuntimeStub frame 4719 4720 __ fast_log10(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp); 4721 4722 __ leave(); // required for proper stackwalking of RuntimeStub frame 4723 __ ret(0); 4724 4725 return start; 4726 4727 } 4728 4729 address generate_libmPow() { 4730 StubCodeMark mark(this, "StubRoutines", "libmPow"); 4731 4732 address start = __ pc(); 4733 4734 const XMMRegister x0 = xmm0; 4735 const XMMRegister x1 = xmm1; 4736 const XMMRegister x2 = xmm2; 4737 const XMMRegister x3 = xmm3; 4738 4739 const XMMRegister x4 = xmm4; 4740 const XMMRegister x5 = xmm5; 4741 const XMMRegister x6 = xmm6; 4742 const XMMRegister x7 = xmm7; 4743 4744 const Register tmp1 = r8; 4745 const Register tmp2 = r9; 4746 const Register tmp3 = r10; 4747 const Register tmp4 = r11; 4748 4749 BLOCK_COMMENT("Entry:"); 4750 __ enter(); // required for proper stackwalking of RuntimeStub frame 4751 4752 __ fast_pow(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 4753 4754 __ leave(); // required for proper stackwalking of RuntimeStub frame 4755 __ ret(0); 4756 4757 return start; 4758 4759 } 4760 4761 address generate_libmSin() { 4762 StubCodeMark mark(this, "StubRoutines", "libmSin"); 4763 4764 address start = __ pc(); 4765 4766 const XMMRegister x0 = xmm0; 4767 const XMMRegister x1 = xmm1; 4768 const XMMRegister x2 = xmm2; 4769 const XMMRegister x3 = xmm3; 4770 4771 const XMMRegister x4 = xmm4; 4772 const XMMRegister x5 = xmm5; 4773 const XMMRegister x6 = xmm6; 4774 const XMMRegister x7 = xmm7; 4775 4776 const Register tmp1 = r8; 4777 const Register tmp2 = r9; 4778 const Register tmp3 = r10; 4779 const Register tmp4 = r11; 4780 4781 BLOCK_COMMENT("Entry:"); 4782 __ enter(); // required for proper stackwalking of RuntimeStub frame 4783 4784 #ifdef _WIN64 4785 __ push(rsi); 4786 __ push(rdi); 4787 #endif 4788 __ fast_sin(x0, x1, x2, x3, x4, x5, x6, x7, rax, rbx, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 4789 4790 #ifdef _WIN64 4791 __ pop(rdi); 4792 __ pop(rsi); 4793 #endif 4794 4795 __ leave(); // required for proper stackwalking of RuntimeStub frame 4796 __ ret(0); 4797 4798 return start; 4799 4800 } 4801 4802 address generate_libmCos() { 4803 StubCodeMark mark(this, "StubRoutines", "libmCos"); 4804 4805 address start = __ pc(); 4806 4807 const XMMRegister x0 = xmm0; 4808 const XMMRegister x1 = xmm1; 4809 const XMMRegister x2 = xmm2; 4810 const XMMRegister x3 = xmm3; 4811 4812 const XMMRegister x4 = xmm4; 4813 const XMMRegister x5 = xmm5; 4814 const XMMRegister x6 = xmm6; 4815 const XMMRegister x7 = xmm7; 4816 4817 const Register tmp1 = r8; 4818 const Register tmp2 = r9; 4819 const Register tmp3 = r10; 4820 const Register tmp4 = r11; 4821 4822 BLOCK_COMMENT("Entry:"); 4823 __ enter(); // required for proper stackwalking of RuntimeStub frame 4824 4825 #ifdef _WIN64 4826 __ push(rsi); 4827 __ push(rdi); 4828 #endif 4829 __ fast_cos(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 4830 4831 #ifdef _WIN64 4832 __ pop(rdi); 4833 __ pop(rsi); 4834 #endif 4835 4836 __ leave(); // required for proper stackwalking of RuntimeStub frame 4837 __ ret(0); 4838 4839 return start; 4840 4841 } 4842 4843 address generate_libmTan() { 4844 StubCodeMark mark(this, "StubRoutines", "libmTan"); 4845 4846 address start = __ pc(); 4847 4848 const XMMRegister x0 = xmm0; 4849 const XMMRegister x1 = xmm1; 4850 const XMMRegister x2 = xmm2; 4851 const XMMRegister x3 = xmm3; 4852 4853 const XMMRegister x4 = xmm4; 4854 const XMMRegister x5 = xmm5; 4855 const XMMRegister x6 = xmm6; 4856 const XMMRegister x7 = xmm7; 4857 4858 const Register tmp1 = r8; 4859 const Register tmp2 = r9; 4860 const Register tmp3 = r10; 4861 const Register tmp4 = r11; 4862 4863 BLOCK_COMMENT("Entry:"); 4864 __ enter(); // required for proper stackwalking of RuntimeStub frame 4865 4866 #ifdef _WIN64 4867 __ push(rsi); 4868 __ push(rdi); 4869 #endif 4870 __ fast_tan(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 4871 4872 #ifdef _WIN64 4873 __ pop(rdi); 4874 __ pop(rsi); 4875 #endif 4876 4877 __ leave(); // required for proper stackwalking of RuntimeStub frame 4878 __ ret(0); 4879 4880 return start; 4881 4882 } 4883 4884 #undef __ 4885 #define __ masm-> 4886 4887 // Continuation point for throwing of implicit exceptions that are 4888 // not handled in the current activation. Fabricates an exception 4889 // oop and initiates normal exception dispatching in this 4890 // frame. Since we need to preserve callee-saved values (currently 4891 // only for C2, but done for C1 as well) we need a callee-saved oop 4892 // map and therefore have to make these stubs into RuntimeStubs 4893 // rather than BufferBlobs. If the compiler needs all registers to 4894 // be preserved between the fault point and the exception handler 4895 // then it must assume responsibility for that in 4896 // AbstractCompiler::continuation_for_implicit_null_exception or 4897 // continuation_for_implicit_division_by_zero_exception. All other 4898 // implicit exceptions (e.g., NullPointerException or 4899 // AbstractMethodError on entry) are either at call sites or 4900 // otherwise assume that stack unwinding will be initiated, so 4901 // caller saved registers were assumed volatile in the compiler. 4902 address generate_throw_exception(const char* name, 4903 address runtime_entry, 4904 Register arg1 = noreg, 4905 Register arg2 = noreg) { 4906 // Information about frame layout at time of blocking runtime call. 4907 // Note that we only have to preserve callee-saved registers since 4908 // the compilers are responsible for supplying a continuation point 4909 // if they expect all registers to be preserved. 4910 enum layout { 4911 rbp_off = frame::arg_reg_save_area_bytes/BytesPerInt, 4912 rbp_off2, 4913 return_off, 4914 return_off2, 4915 framesize // inclusive of return address 4916 }; 4917 4918 int insts_size = 512; 4919 int locs_size = 64; 4920 4921 CodeBuffer code(name, insts_size, locs_size); 4922 OopMapSet* oop_maps = new OopMapSet(); 4923 MacroAssembler* masm = new MacroAssembler(&code); 4924 4925 address start = __ pc(); 4926 4927 // This is an inlined and slightly modified version of call_VM 4928 // which has the ability to fetch the return PC out of 4929 // thread-local storage and also sets up last_Java_sp slightly 4930 // differently than the real call_VM 4931 4932 __ enter(); // required for proper stackwalking of RuntimeStub frame 4933 4934 assert(is_even(framesize/2), "sp not 16-byte aligned"); 4935 4936 // return address and rbp are already in place 4937 __ subptr(rsp, (framesize-4) << LogBytesPerInt); // prolog 4938 4939 int frame_complete = __ pc() - start; 4940 4941 // Set up last_Java_sp and last_Java_fp 4942 address the_pc = __ pc(); 4943 __ set_last_Java_frame(rsp, rbp, the_pc); 4944 __ andptr(rsp, -(StackAlignmentInBytes)); // Align stack 4945 4946 // Call runtime 4947 if (arg1 != noreg) { 4948 assert(arg2 != c_rarg1, "clobbered"); 4949 __ movptr(c_rarg1, arg1); 4950 } 4951 if (arg2 != noreg) { 4952 __ movptr(c_rarg2, arg2); 4953 } 4954 __ movptr(c_rarg0, r15_thread); 4955 BLOCK_COMMENT("call runtime_entry"); 4956 __ call(RuntimeAddress(runtime_entry)); 4957 4958 // Generate oop map 4959 OopMap* map = new OopMap(framesize, 0); 4960 4961 oop_maps->add_gc_map(the_pc - start, map); 4962 4963 __ reset_last_Java_frame(true); 4964 4965 __ leave(); // required for proper stackwalking of RuntimeStub frame 4966 4967 // check for pending exceptions 4968 #ifdef ASSERT 4969 Label L; 4970 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), 4971 (int32_t) NULL_WORD); 4972 __ jcc(Assembler::notEqual, L); 4973 __ should_not_reach_here(); 4974 __ bind(L); 4975 #endif // ASSERT 4976 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 4977 4978 4979 // codeBlob framesize is in words (not VMRegImpl::slot_size) 4980 RuntimeStub* stub = 4981 RuntimeStub::new_runtime_stub(name, 4982 &code, 4983 frame_complete, 4984 (framesize >> (LogBytesPerWord - LogBytesPerInt)), 4985 oop_maps, false); 4986 return stub->entry_point(); 4987 } 4988 4989 void create_control_words() { 4990 // Round to nearest, 53-bit mode, exceptions masked 4991 StubRoutines::_fpu_cntrl_wrd_std = 0x027F; 4992 // Round to zero, 53-bit mode, exception mased 4993 StubRoutines::_fpu_cntrl_wrd_trunc = 0x0D7F; 4994 // Round to nearest, 24-bit mode, exceptions masked 4995 StubRoutines::_fpu_cntrl_wrd_24 = 0x007F; 4996 // Round to nearest, 64-bit mode, exceptions masked 4997 StubRoutines::_fpu_cntrl_wrd_64 = 0x037F; 4998 // Round to nearest, 64-bit mode, exceptions masked 4999 StubRoutines::_mxcsr_std = 0x1F80; 5000 // Note: the following two constants are 80-bit values 5001 // layout is critical for correct loading by FPU. 5002 // Bias for strict fp multiply/divide 5003 StubRoutines::_fpu_subnormal_bias1[0]= 0x00000000; // 2^(-15360) == 0x03ff 8000 0000 0000 0000 5004 StubRoutines::_fpu_subnormal_bias1[1]= 0x80000000; 5005 StubRoutines::_fpu_subnormal_bias1[2]= 0x03ff; 5006 // Un-Bias for strict fp multiply/divide 5007 StubRoutines::_fpu_subnormal_bias2[0]= 0x00000000; // 2^(+15360) == 0x7bff 8000 0000 0000 0000 5008 StubRoutines::_fpu_subnormal_bias2[1]= 0x80000000; 5009 StubRoutines::_fpu_subnormal_bias2[2]= 0x7bff; 5010 } 5011 5012 // Initialization 5013 void generate_initial() { 5014 // Generates all stubs and initializes the entry points 5015 5016 // This platform-specific settings are needed by generate_call_stub() 5017 create_control_words(); 5018 5019 // entry points that exist in all platforms Note: This is code 5020 // that could be shared among different platforms - however the 5021 // benefit seems to be smaller than the disadvantage of having a 5022 // much more complicated generator structure. See also comment in 5023 // stubRoutines.hpp. 5024 5025 StubRoutines::_forward_exception_entry = generate_forward_exception(); 5026 5027 StubRoutines::_call_stub_entry = 5028 generate_call_stub(StubRoutines::_call_stub_return_address); 5029 5030 // is referenced by megamorphic call 5031 StubRoutines::_catch_exception_entry = generate_catch_exception(); 5032 5033 // atomic calls 5034 StubRoutines::_atomic_xchg_entry = generate_atomic_xchg(); 5035 StubRoutines::_atomic_xchg_long_entry = generate_atomic_xchg_long(); 5036 StubRoutines::_atomic_cmpxchg_entry = generate_atomic_cmpxchg(); 5037 StubRoutines::_atomic_cmpxchg_byte_entry = generate_atomic_cmpxchg_byte(); 5038 StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long(); 5039 StubRoutines::_atomic_add_entry = generate_atomic_add(); 5040 StubRoutines::_atomic_add_long_entry = generate_atomic_add_long(); 5041 StubRoutines::_fence_entry = generate_orderaccess_fence(); 5042 5043 // platform dependent 5044 StubRoutines::x86::_get_previous_fp_entry = generate_get_previous_fp(); 5045 StubRoutines::x86::_get_previous_sp_entry = generate_get_previous_sp(); 5046 5047 StubRoutines::x86::_verify_mxcsr_entry = generate_verify_mxcsr(); 5048 5049 // Build this early so it's available for the interpreter. 5050 StubRoutines::_throw_StackOverflowError_entry = 5051 generate_throw_exception("StackOverflowError throw_exception", 5052 CAST_FROM_FN_PTR(address, 5053 SharedRuntime:: 5054 throw_StackOverflowError)); 5055 StubRoutines::_throw_delayed_StackOverflowError_entry = 5056 generate_throw_exception("delayed StackOverflowError throw_exception", 5057 CAST_FROM_FN_PTR(address, 5058 SharedRuntime:: 5059 throw_delayed_StackOverflowError)); 5060 if (UseCRC32Intrinsics) { 5061 // set table address before stub generation which use it 5062 StubRoutines::_crc_table_adr = (address)StubRoutines::x86::_crc_table; 5063 StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32(); 5064 } 5065 5066 if (UseCRC32CIntrinsics) { 5067 bool supports_clmul = VM_Version::supports_clmul(); 5068 StubRoutines::x86::generate_CRC32C_table(supports_clmul); 5069 StubRoutines::_crc32c_table_addr = (address)StubRoutines::x86::_crc32c_table; 5070 StubRoutines::_updateBytesCRC32C = generate_updateBytesCRC32C(supports_clmul); 5071 } 5072 if (VM_Version::supports_sse2() && UseLibmIntrinsic && InlineIntrinsics) { 5073 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin) || 5074 vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos) || 5075 vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dtan)) { 5076 StubRoutines::x86::_ONEHALF_adr = (address)StubRoutines::x86::_ONEHALF; 5077 StubRoutines::x86::_P_2_adr = (address)StubRoutines::x86::_P_2; 5078 StubRoutines::x86::_SC_4_adr = (address)StubRoutines::x86::_SC_4; 5079 StubRoutines::x86::_Ctable_adr = (address)StubRoutines::x86::_Ctable; 5080 StubRoutines::x86::_SC_2_adr = (address)StubRoutines::x86::_SC_2; 5081 StubRoutines::x86::_SC_3_adr = (address)StubRoutines::x86::_SC_3; 5082 StubRoutines::x86::_SC_1_adr = (address)StubRoutines::x86::_SC_1; 5083 StubRoutines::x86::_PI_INV_TABLE_adr = (address)StubRoutines::x86::_PI_INV_TABLE; 5084 StubRoutines::x86::_PI_4_adr = (address)StubRoutines::x86::_PI_4; 5085 StubRoutines::x86::_PI32INV_adr = (address)StubRoutines::x86::_PI32INV; 5086 StubRoutines::x86::_SIGN_MASK_adr = (address)StubRoutines::x86::_SIGN_MASK; 5087 StubRoutines::x86::_P_1_adr = (address)StubRoutines::x86::_P_1; 5088 StubRoutines::x86::_P_3_adr = (address)StubRoutines::x86::_P_3; 5089 StubRoutines::x86::_NEG_ZERO_adr = (address)StubRoutines::x86::_NEG_ZERO; 5090 } 5091 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dexp)) { 5092 StubRoutines::_dexp = generate_libmExp(); 5093 } 5094 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dlog)) { 5095 StubRoutines::_dlog = generate_libmLog(); 5096 } 5097 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dlog10)) { 5098 StubRoutines::_dlog10 = generate_libmLog10(); 5099 } 5100 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dpow)) { 5101 StubRoutines::_dpow = generate_libmPow(); 5102 } 5103 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin)) { 5104 StubRoutines::_dsin = generate_libmSin(); 5105 } 5106 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos)) { 5107 StubRoutines::_dcos = generate_libmCos(); 5108 } 5109 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dtan)) { 5110 StubRoutines::_dtan = generate_libmTan(); 5111 } 5112 } 5113 } 5114 5115 void generate_all() { 5116 // Generates all stubs and initializes the entry points 5117 5118 // These entry points require SharedInfo::stack0 to be set up in 5119 // non-core builds and need to be relocatable, so they each 5120 // fabricate a RuntimeStub internally. 5121 StubRoutines::_throw_AbstractMethodError_entry = 5122 generate_throw_exception("AbstractMethodError throw_exception", 5123 CAST_FROM_FN_PTR(address, 5124 SharedRuntime:: 5125 throw_AbstractMethodError)); 5126 5127 StubRoutines::_throw_IncompatibleClassChangeError_entry = 5128 generate_throw_exception("IncompatibleClassChangeError throw_exception", 5129 CAST_FROM_FN_PTR(address, 5130 SharedRuntime:: 5131 throw_IncompatibleClassChangeError)); 5132 5133 StubRoutines::_throw_NullPointerException_at_call_entry = 5134 generate_throw_exception("NullPointerException at call throw_exception", 5135 CAST_FROM_FN_PTR(address, 5136 SharedRuntime:: 5137 throw_NullPointerException_at_call)); 5138 5139 // entry points that are platform specific 5140 if (UseShenandoahGC && (ShenandoahWriteBarrier || ShenandoahStoreValWriteBarrier || ShenandoahStoreValEnqueueBarrier)) { 5141 StubRoutines::x86::_shenandoah_wb = generate_shenandoah_wb(false, true); 5142 StubRoutines::_shenandoah_wb_C = generate_shenandoah_wb(true, !ShenandoahWriteBarrierCsetTestInIR); 5143 } 5144 StubRoutines::x86::_f2i_fixup = generate_f2i_fixup(); 5145 StubRoutines::x86::_f2l_fixup = generate_f2l_fixup(); 5146 StubRoutines::x86::_d2i_fixup = generate_d2i_fixup(); 5147 StubRoutines::x86::_d2l_fixup = generate_d2l_fixup(); 5148 5149 StubRoutines::x86::_float_sign_mask = generate_fp_mask("float_sign_mask", 0x7FFFFFFF7FFFFFFF); 5150 StubRoutines::x86::_float_sign_flip = generate_fp_mask("float_sign_flip", 0x8000000080000000); 5151 StubRoutines::x86::_double_sign_mask = generate_fp_mask("double_sign_mask", 0x7FFFFFFFFFFFFFFF); 5152 StubRoutines::x86::_double_sign_flip = generate_fp_mask("double_sign_flip", 0x8000000000000000); 5153 5154 // support for verify_oop (must happen after universe_init) 5155 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop(); 5156 5157 // arraycopy stubs used by compilers 5158 generate_arraycopy_stubs(); 5159 5160 // don't bother generating these AES intrinsic stubs unless global flag is set 5161 if (UseAESIntrinsics) { 5162 StubRoutines::x86::_key_shuffle_mask_addr = generate_key_shuffle_mask(); // needed by the others 5163 StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock(); 5164 StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock(); 5165 StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt(); 5166 StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt_Parallel(); 5167 } 5168 if (UseAESCTRIntrinsics){ 5169 StubRoutines::x86::_counter_shuffle_mask_addr = generate_counter_shuffle_mask(); 5170 StubRoutines::_counterMode_AESCrypt = generate_counterMode_AESCrypt_Parallel(); 5171 } 5172 5173 if (UseSHA1Intrinsics) { 5174 StubRoutines::x86::_upper_word_mask_addr = generate_upper_word_mask(); 5175 StubRoutines::x86::_shuffle_byte_flip_mask_addr = generate_shuffle_byte_flip_mask(); 5176 StubRoutines::_sha1_implCompress = generate_sha1_implCompress(false, "sha1_implCompress"); 5177 StubRoutines::_sha1_implCompressMB = generate_sha1_implCompress(true, "sha1_implCompressMB"); 5178 } 5179 if (UseSHA256Intrinsics) { 5180 StubRoutines::x86::_k256_adr = (address)StubRoutines::x86::_k256; 5181 char* dst = (char*)StubRoutines::x86::_k256_W; 5182 char* src = (char*)StubRoutines::x86::_k256; 5183 for (int ii = 0; ii < 16; ++ii) { 5184 memcpy(dst + 32 * ii, src + 16 * ii, 16); 5185 memcpy(dst + 32 * ii + 16, src + 16 * ii, 16); 5186 } 5187 StubRoutines::x86::_k256_W_adr = (address)StubRoutines::x86::_k256_W; 5188 StubRoutines::x86::_pshuffle_byte_flip_mask_addr = generate_pshuffle_byte_flip_mask(); 5189 StubRoutines::_sha256_implCompress = generate_sha256_implCompress(false, "sha256_implCompress"); 5190 StubRoutines::_sha256_implCompressMB = generate_sha256_implCompress(true, "sha256_implCompressMB"); 5191 } 5192 if (UseSHA512Intrinsics) { 5193 StubRoutines::x86::_k512_W_addr = (address)StubRoutines::x86::_k512_W; 5194 StubRoutines::x86::_pshuffle_byte_flip_mask_addr_sha512 = generate_pshuffle_byte_flip_mask_sha512(); 5195 StubRoutines::_sha512_implCompress = generate_sha512_implCompress(false, "sha512_implCompress"); 5196 StubRoutines::_sha512_implCompressMB = generate_sha512_implCompress(true, "sha512_implCompressMB"); 5197 } 5198 5199 // Generate GHASH intrinsics code 5200 if (UseGHASHIntrinsics) { 5201 StubRoutines::x86::_ghash_long_swap_mask_addr = generate_ghash_long_swap_mask(); 5202 StubRoutines::x86::_ghash_byte_swap_mask_addr = generate_ghash_byte_swap_mask(); 5203 StubRoutines::_ghash_processBlocks = generate_ghash_processBlocks(); 5204 } 5205 5206 // Safefetch stubs. 5207 generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry, 5208 &StubRoutines::_safefetch32_fault_pc, 5209 &StubRoutines::_safefetch32_continuation_pc); 5210 generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry, 5211 &StubRoutines::_safefetchN_fault_pc, 5212 &StubRoutines::_safefetchN_continuation_pc); 5213 #ifdef COMPILER2 5214 if (UseMultiplyToLenIntrinsic) { 5215 StubRoutines::_multiplyToLen = generate_multiplyToLen(); 5216 } 5217 if (UseSquareToLenIntrinsic) { 5218 StubRoutines::_squareToLen = generate_squareToLen(); 5219 } 5220 if (UseMulAddIntrinsic) { 5221 StubRoutines::_mulAdd = generate_mulAdd(); 5222 } 5223 #ifndef _WINDOWS 5224 if (UseMontgomeryMultiplyIntrinsic) { 5225 StubRoutines::_montgomeryMultiply 5226 = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_multiply); 5227 } 5228 if (UseMontgomerySquareIntrinsic) { 5229 StubRoutines::_montgomerySquare 5230 = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_square); 5231 } 5232 #endif // WINDOWS 5233 #endif // COMPILER2 5234 5235 if (UseVectorizedMismatchIntrinsic) { 5236 StubRoutines::_vectorizedMismatch = generate_vectorizedMismatch(); 5237 } 5238 } 5239 5240 public: 5241 StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) { 5242 if (all) { 5243 generate_all(); 5244 } else { 5245 generate_initial(); 5246 } 5247 } 5248 }; // end class declaration 5249 5250 void StubGenerator_generate(CodeBuffer* code, bool all) { 5251 StubGenerator g(code, all); 5252 }