1 /* 2 * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "ci/ciUtilities.hpp" 29 #include "gc/shared/cardTable.hpp" 30 #include "gc/shared/cardTableModRefBS.hpp" 31 #include "interpreter/interpreter.hpp" 32 #include "nativeInst_x86.hpp" 33 #include "oops/instanceOop.hpp" 34 #include "oops/method.hpp" 35 #include "oops/objArrayKlass.hpp" 36 #include "oops/oop.inline.hpp" 37 #include "prims/methodHandles.hpp" 38 #include "runtime/frame.inline.hpp" 39 #include "runtime/handles.inline.hpp" 40 #include "runtime/sharedRuntime.hpp" 41 #include "runtime/stubCodeGenerator.hpp" 42 #include "runtime/stubRoutines.hpp" 43 #include "runtime/thread.inline.hpp" 44 #ifdef COMPILER2 45 #include "opto/runtime.hpp" 46 #endif 47 48 // Declaration and definition of StubGenerator (no .hpp file). 49 // For a more detailed description of the stub routine structure 50 // see the comment in stubRoutines.hpp 51 52 #define __ _masm-> 53 #define TIMES_OOP (UseCompressedOops ? Address::times_4 : Address::times_8) 54 #define a__ ((Assembler*)_masm)-> 55 56 #ifdef PRODUCT 57 #define BLOCK_COMMENT(str) /* nothing */ 58 #else 59 #define BLOCK_COMMENT(str) __ block_comment(str) 60 #endif 61 62 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 63 const int MXCSR_MASK = 0xFFC0; // Mask out any pending exceptions 64 65 // Stub Code definitions 66 67 class StubGenerator: public StubCodeGenerator { 68 private: 69 70 #ifdef PRODUCT 71 #define inc_counter_np(counter) ((void)0) 72 #else 73 void inc_counter_np_(int& counter) { 74 // This can destroy rscratch1 if counter is far from the code cache 75 __ incrementl(ExternalAddress((address)&counter)); 76 } 77 #define inc_counter_np(counter) \ 78 BLOCK_COMMENT("inc_counter " #counter); \ 79 inc_counter_np_(counter); 80 #endif 81 82 // Call stubs are used to call Java from C 83 // 84 // Linux Arguments: 85 // c_rarg0: call wrapper address address 86 // c_rarg1: result address 87 // c_rarg2: result type BasicType 88 // c_rarg3: method Method* 89 // c_rarg4: (interpreter) entry point address 90 // c_rarg5: parameters intptr_t* 91 // 16(rbp): parameter size (in words) int 92 // 24(rbp): thread Thread* 93 // 94 // [ return_from_Java ] <--- rsp 95 // [ argument word n ] 96 // ... 97 // -12 [ argument word 1 ] 98 // -11 [ saved r15 ] <--- rsp_after_call 99 // -10 [ saved r14 ] 100 // -9 [ saved r13 ] 101 // -8 [ saved r12 ] 102 // -7 [ saved rbx ] 103 // -6 [ call wrapper ] 104 // -5 [ result ] 105 // -4 [ result type ] 106 // -3 [ method ] 107 // -2 [ entry point ] 108 // -1 [ parameters ] 109 // 0 [ saved rbp ] <--- rbp 110 // 1 [ return address ] 111 // 2 [ parameter size ] 112 // 3 [ thread ] 113 // 114 // Windows Arguments: 115 // c_rarg0: call wrapper address address 116 // c_rarg1: result address 117 // c_rarg2: result type BasicType 118 // c_rarg3: method Method* 119 // 48(rbp): (interpreter) entry point address 120 // 56(rbp): parameters intptr_t* 121 // 64(rbp): parameter size (in words) int 122 // 72(rbp): thread Thread* 123 // 124 // [ return_from_Java ] <--- rsp 125 // [ argument word n ] 126 // ... 127 // -60 [ argument word 1 ] 128 // -59 [ saved xmm31 ] <--- rsp after_call 129 // [ saved xmm16-xmm30 ] (EVEX enabled, else the space is blank) 130 // -27 [ saved xmm15 ] 131 // [ saved xmm7-xmm14 ] 132 // -9 [ saved xmm6 ] (each xmm register takes 2 slots) 133 // -7 [ saved r15 ] 134 // -6 [ saved r14 ] 135 // -5 [ saved r13 ] 136 // -4 [ saved r12 ] 137 // -3 [ saved rdi ] 138 // -2 [ saved rsi ] 139 // -1 [ saved rbx ] 140 // 0 [ saved rbp ] <--- rbp 141 // 1 [ return address ] 142 // 2 [ call wrapper ] 143 // 3 [ result ] 144 // 4 [ result type ] 145 // 5 [ method ] 146 // 6 [ entry point ] 147 // 7 [ parameters ] 148 // 8 [ parameter size ] 149 // 9 [ thread ] 150 // 151 // Windows reserves the callers stack space for arguments 1-4. 152 // We spill c_rarg0-c_rarg3 to this space. 153 154 // Call stub stack layout word offsets from rbp 155 enum call_stub_layout { 156 #ifdef _WIN64 157 xmm_save_first = 6, // save from xmm6 158 xmm_save_last = 31, // to xmm31 159 xmm_save_base = -9, 160 rsp_after_call_off = xmm_save_base - 2 * (xmm_save_last - xmm_save_first), // -27 161 r15_off = -7, 162 r14_off = -6, 163 r13_off = -5, 164 r12_off = -4, 165 rdi_off = -3, 166 rsi_off = -2, 167 rbx_off = -1, 168 rbp_off = 0, 169 retaddr_off = 1, 170 call_wrapper_off = 2, 171 result_off = 3, 172 result_type_off = 4, 173 method_off = 5, 174 entry_point_off = 6, 175 parameters_off = 7, 176 parameter_size_off = 8, 177 thread_off = 9 178 #else 179 rsp_after_call_off = -12, 180 mxcsr_off = rsp_after_call_off, 181 r15_off = -11, 182 r14_off = -10, 183 r13_off = -9, 184 r12_off = -8, 185 rbx_off = -7, 186 call_wrapper_off = -6, 187 result_off = -5, 188 result_type_off = -4, 189 method_off = -3, 190 entry_point_off = -2, 191 parameters_off = -1, 192 rbp_off = 0, 193 retaddr_off = 1, 194 parameter_size_off = 2, 195 thread_off = 3 196 #endif 197 }; 198 199 #ifdef _WIN64 200 Address xmm_save(int reg) { 201 assert(reg >= xmm_save_first && reg <= xmm_save_last, "XMM register number out of range"); 202 return Address(rbp, (xmm_save_base - (reg - xmm_save_first) * 2) * wordSize); 203 } 204 #endif 205 206 address generate_call_stub(address& return_address) { 207 assert((int)frame::entry_frame_after_call_words == -(int)rsp_after_call_off + 1 && 208 (int)frame::entry_frame_call_wrapper_offset == (int)call_wrapper_off, 209 "adjust this code"); 210 StubCodeMark mark(this, "StubRoutines", "call_stub"); 211 address start = __ pc(); 212 213 // same as in generate_catch_exception()! 214 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize); 215 216 const Address call_wrapper (rbp, call_wrapper_off * wordSize); 217 const Address result (rbp, result_off * wordSize); 218 const Address result_type (rbp, result_type_off * wordSize); 219 const Address method (rbp, method_off * wordSize); 220 const Address entry_point (rbp, entry_point_off * wordSize); 221 const Address parameters (rbp, parameters_off * wordSize); 222 const Address parameter_size(rbp, parameter_size_off * wordSize); 223 224 // same as in generate_catch_exception()! 225 const Address thread (rbp, thread_off * wordSize); 226 227 const Address r15_save(rbp, r15_off * wordSize); 228 const Address r14_save(rbp, r14_off * wordSize); 229 const Address r13_save(rbp, r13_off * wordSize); 230 const Address r12_save(rbp, r12_off * wordSize); 231 const Address rbx_save(rbp, rbx_off * wordSize); 232 233 // stub code 234 __ enter(); 235 __ subptr(rsp, -rsp_after_call_off * wordSize); 236 237 // save register parameters 238 #ifndef _WIN64 239 __ movptr(parameters, c_rarg5); // parameters 240 __ movptr(entry_point, c_rarg4); // entry_point 241 #endif 242 243 __ movptr(method, c_rarg3); // method 244 __ movl(result_type, c_rarg2); // result type 245 __ movptr(result, c_rarg1); // result 246 __ movptr(call_wrapper, c_rarg0); // call wrapper 247 248 // save regs belonging to calling function 249 __ movptr(rbx_save, rbx); 250 __ movptr(r12_save, r12); 251 __ movptr(r13_save, r13); 252 __ movptr(r14_save, r14); 253 __ movptr(r15_save, r15); 254 if (UseAVX > 2) { 255 __ movl(rbx, 0xffff); 256 __ kmovwl(k1, rbx); 257 } 258 #ifdef _WIN64 259 int last_reg = 15; 260 if (UseAVX > 2) { 261 last_reg = 31; 262 } 263 if (VM_Version::supports_evex()) { 264 for (int i = xmm_save_first; i <= last_reg; i++) { 265 __ vextractf32x4(xmm_save(i), as_XMMRegister(i), 0); 266 } 267 } else { 268 for (int i = xmm_save_first; i <= last_reg; i++) { 269 __ movdqu(xmm_save(i), as_XMMRegister(i)); 270 } 271 } 272 273 const Address rdi_save(rbp, rdi_off * wordSize); 274 const Address rsi_save(rbp, rsi_off * wordSize); 275 276 __ movptr(rsi_save, rsi); 277 __ movptr(rdi_save, rdi); 278 #else 279 const Address mxcsr_save(rbp, mxcsr_off * wordSize); 280 { 281 Label skip_ldmx; 282 __ stmxcsr(mxcsr_save); 283 __ movl(rax, mxcsr_save); 284 __ andl(rax, MXCSR_MASK); // Only check control and mask bits 285 ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std()); 286 __ cmp32(rax, mxcsr_std); 287 __ jcc(Assembler::equal, skip_ldmx); 288 __ ldmxcsr(mxcsr_std); 289 __ bind(skip_ldmx); 290 } 291 #endif 292 293 // Load up thread register 294 __ movptr(r15_thread, thread); 295 __ reinit_heapbase(); 296 297 #ifdef ASSERT 298 // make sure we have no pending exceptions 299 { 300 Label L; 301 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 302 __ jcc(Assembler::equal, L); 303 __ stop("StubRoutines::call_stub: entered with pending exception"); 304 __ bind(L); 305 } 306 #endif 307 308 // pass parameters if any 309 BLOCK_COMMENT("pass parameters if any"); 310 Label parameters_done; 311 __ movl(c_rarg3, parameter_size); 312 __ testl(c_rarg3, c_rarg3); 313 __ jcc(Assembler::zero, parameters_done); 314 315 Label loop; 316 __ movptr(c_rarg2, parameters); // parameter pointer 317 __ movl(c_rarg1, c_rarg3); // parameter counter is in c_rarg1 318 __ BIND(loop); 319 __ movptr(rax, Address(c_rarg2, 0));// get parameter 320 __ addptr(c_rarg2, wordSize); // advance to next parameter 321 __ decrementl(c_rarg1); // decrement counter 322 __ push(rax); // pass parameter 323 __ jcc(Assembler::notZero, loop); 324 325 // call Java function 326 __ BIND(parameters_done); 327 __ movptr(rbx, method); // get Method* 328 __ movptr(c_rarg1, entry_point); // get entry_point 329 __ mov(r13, rsp); // set sender sp 330 BLOCK_COMMENT("call Java function"); 331 __ call(c_rarg1); 332 333 BLOCK_COMMENT("call_stub_return_address:"); 334 return_address = __ pc(); 335 336 // store result depending on type (everything that is not 337 // T_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT) 338 __ movptr(c_rarg0, result); 339 Label is_long, is_float, is_double, exit; 340 __ movl(c_rarg1, result_type); 341 __ cmpl(c_rarg1, T_OBJECT); 342 __ jcc(Assembler::equal, is_long); 343 __ cmpl(c_rarg1, T_LONG); 344 __ jcc(Assembler::equal, is_long); 345 __ cmpl(c_rarg1, T_FLOAT); 346 __ jcc(Assembler::equal, is_float); 347 __ cmpl(c_rarg1, T_DOUBLE); 348 __ jcc(Assembler::equal, is_double); 349 350 // handle T_INT case 351 __ movl(Address(c_rarg0, 0), rax); 352 353 __ BIND(exit); 354 355 // pop parameters 356 __ lea(rsp, rsp_after_call); 357 358 #ifdef ASSERT 359 // verify that threads correspond 360 { 361 Label L1, L2, L3; 362 __ cmpptr(r15_thread, thread); 363 __ jcc(Assembler::equal, L1); 364 __ stop("StubRoutines::call_stub: r15_thread is corrupted"); 365 __ bind(L1); 366 __ get_thread(rbx); 367 __ cmpptr(r15_thread, thread); 368 __ jcc(Assembler::equal, L2); 369 __ stop("StubRoutines::call_stub: r15_thread is modified by call"); 370 __ bind(L2); 371 __ cmpptr(r15_thread, rbx); 372 __ jcc(Assembler::equal, L3); 373 __ stop("StubRoutines::call_stub: threads must correspond"); 374 __ bind(L3); 375 } 376 #endif 377 378 // restore regs belonging to calling function 379 #ifdef _WIN64 380 // emit the restores for xmm regs 381 if (VM_Version::supports_evex()) { 382 for (int i = xmm_save_first; i <= last_reg; i++) { 383 __ vinsertf32x4(as_XMMRegister(i), as_XMMRegister(i), xmm_save(i), 0); 384 } 385 } else { 386 for (int i = xmm_save_first; i <= last_reg; i++) { 387 __ movdqu(as_XMMRegister(i), xmm_save(i)); 388 } 389 } 390 #endif 391 __ movptr(r15, r15_save); 392 __ movptr(r14, r14_save); 393 __ movptr(r13, r13_save); 394 __ movptr(r12, r12_save); 395 __ movptr(rbx, rbx_save); 396 397 #ifdef _WIN64 398 __ movptr(rdi, rdi_save); 399 __ movptr(rsi, rsi_save); 400 #else 401 __ ldmxcsr(mxcsr_save); 402 #endif 403 404 // restore rsp 405 __ addptr(rsp, -rsp_after_call_off * wordSize); 406 407 // return 408 __ vzeroupper(); 409 __ pop(rbp); 410 __ ret(0); 411 412 // handle return types different from T_INT 413 __ BIND(is_long); 414 __ movq(Address(c_rarg0, 0), rax); 415 __ jmp(exit); 416 417 __ BIND(is_float); 418 __ movflt(Address(c_rarg0, 0), xmm0); 419 __ jmp(exit); 420 421 __ BIND(is_double); 422 __ movdbl(Address(c_rarg0, 0), xmm0); 423 __ jmp(exit); 424 425 return start; 426 } 427 428 // Return point for a Java call if there's an exception thrown in 429 // Java code. The exception is caught and transformed into a 430 // pending exception stored in JavaThread that can be tested from 431 // within the VM. 432 // 433 // Note: Usually the parameters are removed by the callee. In case 434 // of an exception crossing an activation frame boundary, that is 435 // not the case if the callee is compiled code => need to setup the 436 // rsp. 437 // 438 // rax: exception oop 439 440 address generate_catch_exception() { 441 StubCodeMark mark(this, "StubRoutines", "catch_exception"); 442 address start = __ pc(); 443 444 // same as in generate_call_stub(): 445 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize); 446 const Address thread (rbp, thread_off * wordSize); 447 448 #ifdef ASSERT 449 // verify that threads correspond 450 { 451 Label L1, L2, L3; 452 __ cmpptr(r15_thread, thread); 453 __ jcc(Assembler::equal, L1); 454 __ stop("StubRoutines::catch_exception: r15_thread is corrupted"); 455 __ bind(L1); 456 __ get_thread(rbx); 457 __ cmpptr(r15_thread, thread); 458 __ jcc(Assembler::equal, L2); 459 __ stop("StubRoutines::catch_exception: r15_thread is modified by call"); 460 __ bind(L2); 461 __ cmpptr(r15_thread, rbx); 462 __ jcc(Assembler::equal, L3); 463 __ stop("StubRoutines::catch_exception: threads must correspond"); 464 __ bind(L3); 465 } 466 #endif 467 468 // set pending exception 469 __ verify_oop(rax); 470 471 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), rax); 472 __ lea(rscratch1, ExternalAddress((address)__FILE__)); 473 __ movptr(Address(r15_thread, Thread::exception_file_offset()), rscratch1); 474 __ movl(Address(r15_thread, Thread::exception_line_offset()), (int) __LINE__); 475 476 // complete return to VM 477 assert(StubRoutines::_call_stub_return_address != NULL, 478 "_call_stub_return_address must have been generated before"); 479 __ jump(RuntimeAddress(StubRoutines::_call_stub_return_address)); 480 481 return start; 482 } 483 484 // Continuation point for runtime calls returning with a pending 485 // exception. The pending exception check happened in the runtime 486 // or native call stub. The pending exception in Thread is 487 // converted into a Java-level exception. 488 // 489 // Contract with Java-level exception handlers: 490 // rax: exception 491 // rdx: throwing pc 492 // 493 // NOTE: At entry of this stub, exception-pc must be on stack !! 494 495 address generate_forward_exception() { 496 StubCodeMark mark(this, "StubRoutines", "forward exception"); 497 address start = __ pc(); 498 499 // Upon entry, the sp points to the return address returning into 500 // Java (interpreted or compiled) code; i.e., the return address 501 // becomes the throwing pc. 502 // 503 // Arguments pushed before the runtime call are still on the stack 504 // but the exception handler will reset the stack pointer -> 505 // ignore them. A potential result in registers can be ignored as 506 // well. 507 508 #ifdef ASSERT 509 // make sure this code is only executed if there is a pending exception 510 { 511 Label L; 512 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t) NULL); 513 __ jcc(Assembler::notEqual, L); 514 __ stop("StubRoutines::forward exception: no pending exception (1)"); 515 __ bind(L); 516 } 517 #endif 518 519 // compute exception handler into rbx 520 __ movptr(c_rarg0, Address(rsp, 0)); 521 BLOCK_COMMENT("call exception_handler_for_return_address"); 522 __ call_VM_leaf(CAST_FROM_FN_PTR(address, 523 SharedRuntime::exception_handler_for_return_address), 524 r15_thread, c_rarg0); 525 __ mov(rbx, rax); 526 527 // setup rax & rdx, remove return address & clear pending exception 528 __ pop(rdx); 529 __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset())); 530 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 531 532 #ifdef ASSERT 533 // make sure exception is set 534 { 535 Label L; 536 __ testptr(rax, rax); 537 __ jcc(Assembler::notEqual, L); 538 __ stop("StubRoutines::forward exception: no pending exception (2)"); 539 __ bind(L); 540 } 541 #endif 542 543 // continue at exception handler (return address removed) 544 // rax: exception 545 // rbx: exception handler 546 // rdx: throwing pc 547 __ verify_oop(rax); 548 __ jmp(rbx); 549 550 return start; 551 } 552 553 // Support for jint atomic::xchg(jint exchange_value, volatile jint* dest) 554 // 555 // Arguments : 556 // c_rarg0: exchange_value 557 // c_rarg0: dest 558 // 559 // Result: 560 // *dest <- ex, return (orig *dest) 561 address generate_atomic_xchg() { 562 StubCodeMark mark(this, "StubRoutines", "atomic_xchg"); 563 address start = __ pc(); 564 565 __ movl(rax, c_rarg0); // Copy to eax we need a return value anyhow 566 __ xchgl(rax, Address(c_rarg1, 0)); // automatic LOCK 567 __ ret(0); 568 569 return start; 570 } 571 572 // Support for intptr_t atomic::xchg_long(jlong exchange_value, volatile jlong* dest) 573 // 574 // Arguments : 575 // c_rarg0: exchange_value 576 // c_rarg1: dest 577 // 578 // Result: 579 // *dest <- ex, return (orig *dest) 580 address generate_atomic_xchg_long() { 581 StubCodeMark mark(this, "StubRoutines", "atomic_xchg_long"); 582 address start = __ pc(); 583 584 __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow 585 __ xchgptr(rax, Address(c_rarg1, 0)); // automatic LOCK 586 __ ret(0); 587 588 return start; 589 } 590 591 // Support for jint atomic::atomic_cmpxchg(jint exchange_value, volatile jint* dest, 592 // jint compare_value) 593 // 594 // Arguments : 595 // c_rarg0: exchange_value 596 // c_rarg1: dest 597 // c_rarg2: compare_value 598 // 599 // Result: 600 // if ( compare_value == *dest ) { 601 // *dest = exchange_value 602 // return compare_value; 603 // else 604 // return *dest; 605 address generate_atomic_cmpxchg() { 606 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg"); 607 address start = __ pc(); 608 609 __ movl(rax, c_rarg2); 610 if ( os::is_MP() ) __ lock(); 611 __ cmpxchgl(c_rarg0, Address(c_rarg1, 0)); 612 __ ret(0); 613 614 return start; 615 } 616 617 // Support for int8_t atomic::atomic_cmpxchg(int8_t exchange_value, volatile int8_t* dest, 618 // int8_t compare_value) 619 // 620 // Arguments : 621 // c_rarg0: exchange_value 622 // c_rarg1: dest 623 // c_rarg2: compare_value 624 // 625 // Result: 626 // if ( compare_value == *dest ) { 627 // *dest = exchange_value 628 // return compare_value; 629 // else 630 // return *dest; 631 address generate_atomic_cmpxchg_byte() { 632 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_byte"); 633 address start = __ pc(); 634 635 __ movsbq(rax, c_rarg2); 636 if ( os::is_MP() ) __ lock(); 637 __ cmpxchgb(c_rarg0, Address(c_rarg1, 0)); 638 __ ret(0); 639 640 return start; 641 } 642 643 // Support for int64_t atomic::atomic_cmpxchg(int64_t exchange_value, 644 // volatile int64_t* dest, 645 // int64_t compare_value) 646 // Arguments : 647 // c_rarg0: exchange_value 648 // c_rarg1: dest 649 // c_rarg2: compare_value 650 // 651 // Result: 652 // if ( compare_value == *dest ) { 653 // *dest = exchange_value 654 // return compare_value; 655 // else 656 // return *dest; 657 address generate_atomic_cmpxchg_long() { 658 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long"); 659 address start = __ pc(); 660 661 __ movq(rax, c_rarg2); 662 if ( os::is_MP() ) __ lock(); 663 __ cmpxchgq(c_rarg0, Address(c_rarg1, 0)); 664 __ ret(0); 665 666 return start; 667 } 668 669 // Support for jint atomic::add(jint add_value, volatile jint* dest) 670 // 671 // Arguments : 672 // c_rarg0: add_value 673 // c_rarg1: dest 674 // 675 // Result: 676 // *dest += add_value 677 // return *dest; 678 address generate_atomic_add() { 679 StubCodeMark mark(this, "StubRoutines", "atomic_add"); 680 address start = __ pc(); 681 682 __ movl(rax, c_rarg0); 683 if ( os::is_MP() ) __ lock(); 684 __ xaddl(Address(c_rarg1, 0), c_rarg0); 685 __ addl(rax, c_rarg0); 686 __ ret(0); 687 688 return start; 689 } 690 691 // Support for intptr_t atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) 692 // 693 // Arguments : 694 // c_rarg0: add_value 695 // c_rarg1: dest 696 // 697 // Result: 698 // *dest += add_value 699 // return *dest; 700 address generate_atomic_add_long() { 701 StubCodeMark mark(this, "StubRoutines", "atomic_add_long"); 702 address start = __ pc(); 703 704 __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow 705 if ( os::is_MP() ) __ lock(); 706 __ xaddptr(Address(c_rarg1, 0), c_rarg0); 707 __ addptr(rax, c_rarg0); 708 __ ret(0); 709 710 return start; 711 } 712 713 // Support for intptr_t OrderAccess::fence() 714 // 715 // Arguments : 716 // 717 // Result: 718 address generate_orderaccess_fence() { 719 StubCodeMark mark(this, "StubRoutines", "orderaccess_fence"); 720 address start = __ pc(); 721 __ membar(Assembler::StoreLoad); 722 __ ret(0); 723 724 return start; 725 } 726 727 // Support for intptr_t get_previous_fp() 728 // 729 // This routine is used to find the previous frame pointer for the 730 // caller (current_frame_guess). This is used as part of debugging 731 // ps() is seemingly lost trying to find frames. 732 // This code assumes that caller current_frame_guess) has a frame. 733 address generate_get_previous_fp() { 734 StubCodeMark mark(this, "StubRoutines", "get_previous_fp"); 735 const Address old_fp(rbp, 0); 736 const Address older_fp(rax, 0); 737 address start = __ pc(); 738 739 __ enter(); 740 __ movptr(rax, old_fp); // callers fp 741 __ movptr(rax, older_fp); // the frame for ps() 742 __ pop(rbp); 743 __ ret(0); 744 745 return start; 746 } 747 748 // Support for intptr_t get_previous_sp() 749 // 750 // This routine is used to find the previous stack pointer for the 751 // caller. 752 address generate_get_previous_sp() { 753 StubCodeMark mark(this, "StubRoutines", "get_previous_sp"); 754 address start = __ pc(); 755 756 __ movptr(rax, rsp); 757 __ addptr(rax, 8); // return address is at the top of the stack. 758 __ ret(0); 759 760 return start; 761 } 762 763 //---------------------------------------------------------------------------------------------------- 764 // Support for void verify_mxcsr() 765 // 766 // This routine is used with -Xcheck:jni to verify that native 767 // JNI code does not return to Java code without restoring the 768 // MXCSR register to our expected state. 769 770 address generate_verify_mxcsr() { 771 StubCodeMark mark(this, "StubRoutines", "verify_mxcsr"); 772 address start = __ pc(); 773 774 const Address mxcsr_save(rsp, 0); 775 776 if (CheckJNICalls) { 777 Label ok_ret; 778 ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std()); 779 __ push(rax); 780 __ subptr(rsp, wordSize); // allocate a temp location 781 __ stmxcsr(mxcsr_save); 782 __ movl(rax, mxcsr_save); 783 __ andl(rax, MXCSR_MASK); // Only check control and mask bits 784 __ cmp32(rax, mxcsr_std); 785 __ jcc(Assembler::equal, ok_ret); 786 787 __ warn("MXCSR changed by native JNI code, use -XX:+RestoreMXCSROnJNICall"); 788 789 __ ldmxcsr(mxcsr_std); 790 791 __ bind(ok_ret); 792 __ addptr(rsp, wordSize); 793 __ pop(rax); 794 } 795 796 __ ret(0); 797 798 return start; 799 } 800 801 address generate_f2i_fixup() { 802 StubCodeMark mark(this, "StubRoutines", "f2i_fixup"); 803 Address inout(rsp, 5 * wordSize); // return address + 4 saves 804 805 address start = __ pc(); 806 807 Label L; 808 809 __ push(rax); 810 __ push(c_rarg3); 811 __ push(c_rarg2); 812 __ push(c_rarg1); 813 814 __ movl(rax, 0x7f800000); 815 __ xorl(c_rarg3, c_rarg3); 816 __ movl(c_rarg2, inout); 817 __ movl(c_rarg1, c_rarg2); 818 __ andl(c_rarg1, 0x7fffffff); 819 __ cmpl(rax, c_rarg1); // NaN? -> 0 820 __ jcc(Assembler::negative, L); 821 __ testl(c_rarg2, c_rarg2); // signed ? min_jint : max_jint 822 __ movl(c_rarg3, 0x80000000); 823 __ movl(rax, 0x7fffffff); 824 __ cmovl(Assembler::positive, c_rarg3, rax); 825 826 __ bind(L); 827 __ movptr(inout, c_rarg3); 828 829 __ pop(c_rarg1); 830 __ pop(c_rarg2); 831 __ pop(c_rarg3); 832 __ pop(rax); 833 834 __ ret(0); 835 836 return start; 837 } 838 839 address generate_f2l_fixup() { 840 StubCodeMark mark(this, "StubRoutines", "f2l_fixup"); 841 Address inout(rsp, 5 * wordSize); // return address + 4 saves 842 address start = __ pc(); 843 844 Label L; 845 846 __ push(rax); 847 __ push(c_rarg3); 848 __ push(c_rarg2); 849 __ push(c_rarg1); 850 851 __ movl(rax, 0x7f800000); 852 __ xorl(c_rarg3, c_rarg3); 853 __ movl(c_rarg2, inout); 854 __ movl(c_rarg1, c_rarg2); 855 __ andl(c_rarg1, 0x7fffffff); 856 __ cmpl(rax, c_rarg1); // NaN? -> 0 857 __ jcc(Assembler::negative, L); 858 __ testl(c_rarg2, c_rarg2); // signed ? min_jlong : max_jlong 859 __ mov64(c_rarg3, 0x8000000000000000); 860 __ mov64(rax, 0x7fffffffffffffff); 861 __ cmov(Assembler::positive, c_rarg3, rax); 862 863 __ bind(L); 864 __ movptr(inout, c_rarg3); 865 866 __ pop(c_rarg1); 867 __ pop(c_rarg2); 868 __ pop(c_rarg3); 869 __ pop(rax); 870 871 __ ret(0); 872 873 return start; 874 } 875 876 address generate_d2i_fixup() { 877 StubCodeMark mark(this, "StubRoutines", "d2i_fixup"); 878 Address inout(rsp, 6 * wordSize); // return address + 5 saves 879 880 address start = __ pc(); 881 882 Label L; 883 884 __ push(rax); 885 __ push(c_rarg3); 886 __ push(c_rarg2); 887 __ push(c_rarg1); 888 __ push(c_rarg0); 889 890 __ movl(rax, 0x7ff00000); 891 __ movq(c_rarg2, inout); 892 __ movl(c_rarg3, c_rarg2); 893 __ mov(c_rarg1, c_rarg2); 894 __ mov(c_rarg0, c_rarg2); 895 __ negl(c_rarg3); 896 __ shrptr(c_rarg1, 0x20); 897 __ orl(c_rarg3, c_rarg2); 898 __ andl(c_rarg1, 0x7fffffff); 899 __ xorl(c_rarg2, c_rarg2); 900 __ shrl(c_rarg3, 0x1f); 901 __ orl(c_rarg1, c_rarg3); 902 __ cmpl(rax, c_rarg1); 903 __ jcc(Assembler::negative, L); // NaN -> 0 904 __ testptr(c_rarg0, c_rarg0); // signed ? min_jint : max_jint 905 __ movl(c_rarg2, 0x80000000); 906 __ movl(rax, 0x7fffffff); 907 __ cmov(Assembler::positive, c_rarg2, rax); 908 909 __ bind(L); 910 __ movptr(inout, c_rarg2); 911 912 __ pop(c_rarg0); 913 __ pop(c_rarg1); 914 __ pop(c_rarg2); 915 __ pop(c_rarg3); 916 __ pop(rax); 917 918 __ ret(0); 919 920 return start; 921 } 922 923 address generate_d2l_fixup() { 924 StubCodeMark mark(this, "StubRoutines", "d2l_fixup"); 925 Address inout(rsp, 6 * wordSize); // return address + 5 saves 926 927 address start = __ pc(); 928 929 Label L; 930 931 __ push(rax); 932 __ push(c_rarg3); 933 __ push(c_rarg2); 934 __ push(c_rarg1); 935 __ push(c_rarg0); 936 937 __ movl(rax, 0x7ff00000); 938 __ movq(c_rarg2, inout); 939 __ movl(c_rarg3, c_rarg2); 940 __ mov(c_rarg1, c_rarg2); 941 __ mov(c_rarg0, c_rarg2); 942 __ negl(c_rarg3); 943 __ shrptr(c_rarg1, 0x20); 944 __ orl(c_rarg3, c_rarg2); 945 __ andl(c_rarg1, 0x7fffffff); 946 __ xorl(c_rarg2, c_rarg2); 947 __ shrl(c_rarg3, 0x1f); 948 __ orl(c_rarg1, c_rarg3); 949 __ cmpl(rax, c_rarg1); 950 __ jcc(Assembler::negative, L); // NaN -> 0 951 __ testq(c_rarg0, c_rarg0); // signed ? min_jlong : max_jlong 952 __ mov64(c_rarg2, 0x8000000000000000); 953 __ mov64(rax, 0x7fffffffffffffff); 954 __ cmovq(Assembler::positive, c_rarg2, rax); 955 956 __ bind(L); 957 __ movq(inout, c_rarg2); 958 959 __ pop(c_rarg0); 960 __ pop(c_rarg1); 961 __ pop(c_rarg2); 962 __ pop(c_rarg3); 963 __ pop(rax); 964 965 __ ret(0); 966 967 return start; 968 } 969 970 address generate_fp_mask(const char *stub_name, int64_t mask) { 971 __ align(CodeEntryAlignment); 972 StubCodeMark mark(this, "StubRoutines", stub_name); 973 address start = __ pc(); 974 975 __ emit_data64( mask, relocInfo::none ); 976 __ emit_data64( mask, relocInfo::none ); 977 978 return start; 979 } 980 981 // Non-destructive plausibility checks for oops 982 // 983 // Arguments: 984 // all args on stack! 985 // 986 // Stack after saving c_rarg3: 987 // [tos + 0]: saved c_rarg3 988 // [tos + 1]: saved c_rarg2 989 // [tos + 2]: saved r12 (several TemplateTable methods use it) 990 // [tos + 3]: saved flags 991 // [tos + 4]: return address 992 // * [tos + 5]: error message (char*) 993 // * [tos + 6]: object to verify (oop) 994 // * [tos + 7]: saved rax - saved by caller and bashed 995 // * [tos + 8]: saved r10 (rscratch1) - saved by caller 996 // * = popped on exit 997 address generate_verify_oop() { 998 StubCodeMark mark(this, "StubRoutines", "verify_oop"); 999 address start = __ pc(); 1000 1001 Label exit, error; 1002 1003 __ pushf(); 1004 __ incrementl(ExternalAddress((address) StubRoutines::verify_oop_count_addr())); 1005 1006 __ push(r12); 1007 1008 // save c_rarg2 and c_rarg3 1009 __ push(c_rarg2); 1010 __ push(c_rarg3); 1011 1012 enum { 1013 // After previous pushes. 1014 oop_to_verify = 6 * wordSize, 1015 saved_rax = 7 * wordSize, 1016 saved_r10 = 8 * wordSize, 1017 1018 // Before the call to MacroAssembler::debug(), see below. 1019 return_addr = 16 * wordSize, 1020 error_msg = 17 * wordSize 1021 }; 1022 1023 // get object 1024 __ movptr(rax, Address(rsp, oop_to_verify)); 1025 1026 // make sure object is 'reasonable' 1027 __ testptr(rax, rax); 1028 __ jcc(Assembler::zero, exit); // if obj is NULL it is OK 1029 // Check if the oop is in the right area of memory 1030 __ movptr(c_rarg2, rax); 1031 __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_mask()); 1032 __ andptr(c_rarg2, c_rarg3); 1033 __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_bits()); 1034 __ cmpptr(c_rarg2, c_rarg3); 1035 __ jcc(Assembler::notZero, error); 1036 1037 // set r12 to heapbase for load_klass() 1038 __ reinit_heapbase(); 1039 1040 // make sure klass is 'reasonable', which is not zero. 1041 __ load_klass(rax, rax); // get klass 1042 __ testptr(rax, rax); 1043 __ jcc(Assembler::zero, error); // if klass is NULL it is broken 1044 1045 // return if everything seems ok 1046 __ bind(exit); 1047 __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back 1048 __ movptr(rscratch1, Address(rsp, saved_r10)); // get saved r10 back 1049 __ pop(c_rarg3); // restore c_rarg3 1050 __ pop(c_rarg2); // restore c_rarg2 1051 __ pop(r12); // restore r12 1052 __ popf(); // restore flags 1053 __ ret(4 * wordSize); // pop caller saved stuff 1054 1055 // handle errors 1056 __ bind(error); 1057 __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back 1058 __ movptr(rscratch1, Address(rsp, saved_r10)); // get saved r10 back 1059 __ pop(c_rarg3); // get saved c_rarg3 back 1060 __ pop(c_rarg2); // get saved c_rarg2 back 1061 __ pop(r12); // get saved r12 back 1062 __ popf(); // get saved flags off stack -- 1063 // will be ignored 1064 1065 __ pusha(); // push registers 1066 // (rip is already 1067 // already pushed) 1068 // debug(char* msg, int64_t pc, int64_t regs[]) 1069 // We've popped the registers we'd saved (c_rarg3, c_rarg2 and flags), and 1070 // pushed all the registers, so now the stack looks like: 1071 // [tos + 0] 16 saved registers 1072 // [tos + 16] return address 1073 // * [tos + 17] error message (char*) 1074 // * [tos + 18] object to verify (oop) 1075 // * [tos + 19] saved rax - saved by caller and bashed 1076 // * [tos + 20] saved r10 (rscratch1) - saved by caller 1077 // * = popped on exit 1078 1079 __ movptr(c_rarg0, Address(rsp, error_msg)); // pass address of error message 1080 __ movptr(c_rarg1, Address(rsp, return_addr)); // pass return address 1081 __ movq(c_rarg2, rsp); // pass address of regs on stack 1082 __ mov(r12, rsp); // remember rsp 1083 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1084 __ andptr(rsp, -16); // align stack as required by ABI 1085 BLOCK_COMMENT("call MacroAssembler::debug"); 1086 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64))); 1087 __ mov(rsp, r12); // restore rsp 1088 __ popa(); // pop registers (includes r12) 1089 __ ret(4 * wordSize); // pop caller saved stuff 1090 1091 return start; 1092 } 1093 1094 // 1095 // Verify that a register contains clean 32-bits positive value 1096 // (high 32-bits are 0) so it could be used in 64-bits shifts. 1097 // 1098 // Input: 1099 // Rint - 32-bits value 1100 // Rtmp - scratch 1101 // 1102 void assert_clean_int(Register Rint, Register Rtmp) { 1103 #ifdef ASSERT 1104 Label L; 1105 assert_different_registers(Rtmp, Rint); 1106 __ movslq(Rtmp, Rint); 1107 __ cmpq(Rtmp, Rint); 1108 __ jcc(Assembler::equal, L); 1109 __ stop("high 32-bits of int value are not 0"); 1110 __ bind(L); 1111 #endif 1112 } 1113 1114 // Generate overlap test for array copy stubs 1115 // 1116 // Input: 1117 // c_rarg0 - from 1118 // c_rarg1 - to 1119 // c_rarg2 - element count 1120 // 1121 // Output: 1122 // rax - &from[element count - 1] 1123 // 1124 void array_overlap_test(address no_overlap_target, Address::ScaleFactor sf) { 1125 assert(no_overlap_target != NULL, "must be generated"); 1126 array_overlap_test(no_overlap_target, NULL, sf); 1127 } 1128 void array_overlap_test(Label& L_no_overlap, Address::ScaleFactor sf) { 1129 array_overlap_test(NULL, &L_no_overlap, sf); 1130 } 1131 void array_overlap_test(address no_overlap_target, Label* NOLp, Address::ScaleFactor sf) { 1132 const Register from = c_rarg0; 1133 const Register to = c_rarg1; 1134 const Register count = c_rarg2; 1135 const Register end_from = rax; 1136 1137 __ cmpptr(to, from); 1138 __ lea(end_from, Address(from, count, sf, 0)); 1139 if (NOLp == NULL) { 1140 ExternalAddress no_overlap(no_overlap_target); 1141 __ jump_cc(Assembler::belowEqual, no_overlap); 1142 __ cmpptr(to, end_from); 1143 __ jump_cc(Assembler::aboveEqual, no_overlap); 1144 } else { 1145 __ jcc(Assembler::belowEqual, (*NOLp)); 1146 __ cmpptr(to, end_from); 1147 __ jcc(Assembler::aboveEqual, (*NOLp)); 1148 } 1149 } 1150 1151 // Shuffle first three arg regs on Windows into Linux/Solaris locations. 1152 // 1153 // Outputs: 1154 // rdi - rcx 1155 // rsi - rdx 1156 // rdx - r8 1157 // rcx - r9 1158 // 1159 // Registers r9 and r10 are used to save rdi and rsi on Windows, which latter 1160 // are non-volatile. r9 and r10 should not be used by the caller. 1161 // 1162 void setup_arg_regs(int nargs = 3) { 1163 const Register saved_rdi = r9; 1164 const Register saved_rsi = r10; 1165 assert(nargs == 3 || nargs == 4, "else fix"); 1166 #ifdef _WIN64 1167 assert(c_rarg0 == rcx && c_rarg1 == rdx && c_rarg2 == r8 && c_rarg3 == r9, 1168 "unexpected argument registers"); 1169 if (nargs >= 4) 1170 __ mov(rax, r9); // r9 is also saved_rdi 1171 __ movptr(saved_rdi, rdi); 1172 __ movptr(saved_rsi, rsi); 1173 __ mov(rdi, rcx); // c_rarg0 1174 __ mov(rsi, rdx); // c_rarg1 1175 __ mov(rdx, r8); // c_rarg2 1176 if (nargs >= 4) 1177 __ mov(rcx, rax); // c_rarg3 (via rax) 1178 #else 1179 assert(c_rarg0 == rdi && c_rarg1 == rsi && c_rarg2 == rdx && c_rarg3 == rcx, 1180 "unexpected argument registers"); 1181 #endif 1182 } 1183 1184 void restore_arg_regs() { 1185 const Register saved_rdi = r9; 1186 const Register saved_rsi = r10; 1187 #ifdef _WIN64 1188 __ movptr(rdi, saved_rdi); 1189 __ movptr(rsi, saved_rsi); 1190 #endif 1191 } 1192 1193 // Generate code for an array write pre barrier 1194 // 1195 // addr - starting address 1196 // count - element count 1197 // tmp - scratch register 1198 // 1199 // Destroy no registers! 1200 // 1201 void gen_write_ref_array_pre_barrier(Register addr, Register count, bool dest_uninitialized) { 1202 BarrierSet* bs = Universe::heap()->barrier_set(); 1203 switch (bs->kind()) { 1204 case BarrierSet::G1SATBCTLogging: 1205 // With G1, don't generate the call if we statically know that the target in uninitialized 1206 if (!dest_uninitialized) { 1207 Label filtered; 1208 Address in_progress(r15_thread, in_bytes(JavaThread::satb_mark_queue_offset() + 1209 SATBMarkQueue::byte_offset_of_active())); 1210 // Is marking active? 1211 if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) { 1212 __ cmpl(in_progress, 0); 1213 } else { 1214 assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption"); 1215 __ cmpb(in_progress, 0); 1216 } 1217 __ jcc(Assembler::equal, filtered); 1218 1219 __ pusha(); // push registers 1220 if (count == c_rarg0) { 1221 if (addr == c_rarg1) { 1222 // exactly backwards!! 1223 __ xchgptr(c_rarg1, c_rarg0); 1224 } else { 1225 __ movptr(c_rarg1, count); 1226 __ movptr(c_rarg0, addr); 1227 } 1228 } else { 1229 __ movptr(c_rarg0, addr); 1230 __ movptr(c_rarg1, count); 1231 } 1232 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), 2); 1233 __ popa(); 1234 1235 __ bind(filtered); 1236 } 1237 break; 1238 case BarrierSet::CardTableModRef: 1239 break; 1240 default: 1241 ShouldNotReachHere(); 1242 1243 } 1244 } 1245 1246 // 1247 // Generate code for an array write post barrier 1248 // 1249 // Input: 1250 // start - register containing starting address of destination array 1251 // count - elements count 1252 // scratch - scratch register 1253 // 1254 // The input registers are overwritten. 1255 // 1256 void gen_write_ref_array_post_barrier(Register start, Register count, Register scratch) { 1257 assert_different_registers(start, count, scratch); 1258 BarrierSet* bs = Universe::heap()->barrier_set(); 1259 switch (bs->kind()) { 1260 case BarrierSet::G1SATBCTLogging: 1261 { 1262 __ pusha(); // push registers (overkill) 1263 if (c_rarg0 == count) { // On win64 c_rarg0 == rcx 1264 assert_different_registers(c_rarg1, start); 1265 __ mov(c_rarg1, count); 1266 __ mov(c_rarg0, start); 1267 } else { 1268 assert_different_registers(c_rarg0, count); 1269 __ mov(c_rarg0, start); 1270 __ mov(c_rarg1, count); 1271 } 1272 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), 2); 1273 __ popa(); 1274 } 1275 break; 1276 case BarrierSet::CardTableModRef: 1277 { 1278 Label L_loop, L_done; 1279 const Register end = count; 1280 1281 __ testl(count, count); 1282 __ jcc(Assembler::zero, L_done); // zero count - nothing to do 1283 1284 __ leaq(end, Address(start, count, TIMES_OOP, 0)); // end == start+count*oop_size 1285 __ subptr(end, BytesPerHeapOop); // end - 1 to make inclusive 1286 __ shrptr(start, CardTable::card_shift); 1287 __ shrptr(end, CardTable::card_shift); 1288 __ subptr(end, start); // end --> cards count 1289 1290 int64_t disp = ci_card_table_address_as<int64_t>(); 1291 __ mov64(scratch, disp); 1292 __ addptr(start, scratch); 1293 __ BIND(L_loop); 1294 __ movb(Address(start, count, Address::times_1), 0); 1295 __ decrement(count); 1296 __ jcc(Assembler::greaterEqual, L_loop); 1297 __ BIND(L_done); 1298 } 1299 break; 1300 default: 1301 ShouldNotReachHere(); 1302 1303 } 1304 } 1305 1306 1307 // Copy big chunks forward 1308 // 1309 // Inputs: 1310 // end_from - source arrays end address 1311 // end_to - destination array end address 1312 // qword_count - 64-bits element count, negative 1313 // to - scratch 1314 // L_copy_bytes - entry label 1315 // L_copy_8_bytes - exit label 1316 // 1317 void copy_bytes_forward(Register end_from, Register end_to, 1318 Register qword_count, Register to, 1319 Label& L_copy_bytes, Label& L_copy_8_bytes) { 1320 DEBUG_ONLY(__ stop("enter at entry label, not here")); 1321 Label L_loop; 1322 __ align(OptoLoopAlignment); 1323 if (UseUnalignedLoadStores) { 1324 Label L_end; 1325 if (UseAVX > 2) { 1326 __ movl(to, 0xffff); 1327 __ kmovwl(k1, to); 1328 } 1329 // Copy 64-bytes per iteration 1330 __ BIND(L_loop); 1331 if (UseAVX > 2) { 1332 __ evmovdqul(xmm0, Address(end_from, qword_count, Address::times_8, -56), Assembler::AVX_512bit); 1333 __ evmovdqul(Address(end_to, qword_count, Address::times_8, -56), xmm0, Assembler::AVX_512bit); 1334 } else if (UseAVX == 2) { 1335 __ vmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56)); 1336 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0); 1337 __ vmovdqu(xmm1, Address(end_from, qword_count, Address::times_8, -24)); 1338 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -24), xmm1); 1339 } else { 1340 __ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56)); 1341 __ movdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0); 1342 __ movdqu(xmm1, Address(end_from, qword_count, Address::times_8, -40)); 1343 __ movdqu(Address(end_to, qword_count, Address::times_8, -40), xmm1); 1344 __ movdqu(xmm2, Address(end_from, qword_count, Address::times_8, -24)); 1345 __ movdqu(Address(end_to, qword_count, Address::times_8, -24), xmm2); 1346 __ movdqu(xmm3, Address(end_from, qword_count, Address::times_8, - 8)); 1347 __ movdqu(Address(end_to, qword_count, Address::times_8, - 8), xmm3); 1348 } 1349 __ BIND(L_copy_bytes); 1350 __ addptr(qword_count, 8); 1351 __ jcc(Assembler::lessEqual, L_loop); 1352 __ subptr(qword_count, 4); // sub(8) and add(4) 1353 __ jccb(Assembler::greater, L_end); 1354 // Copy trailing 32 bytes 1355 if (UseAVX >= 2) { 1356 __ vmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24)); 1357 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -24), xmm0); 1358 } else { 1359 __ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24)); 1360 __ movdqu(Address(end_to, qword_count, Address::times_8, -24), xmm0); 1361 __ movdqu(xmm1, Address(end_from, qword_count, Address::times_8, - 8)); 1362 __ movdqu(Address(end_to, qword_count, Address::times_8, - 8), xmm1); 1363 } 1364 __ addptr(qword_count, 4); 1365 __ BIND(L_end); 1366 if (UseAVX >= 2) { 1367 // clean upper bits of YMM registers 1368 __ vpxor(xmm0, xmm0); 1369 __ vpxor(xmm1, xmm1); 1370 } 1371 } else { 1372 // Copy 32-bytes per iteration 1373 __ BIND(L_loop); 1374 __ movq(to, Address(end_from, qword_count, Address::times_8, -24)); 1375 __ movq(Address(end_to, qword_count, Address::times_8, -24), to); 1376 __ movq(to, Address(end_from, qword_count, Address::times_8, -16)); 1377 __ movq(Address(end_to, qword_count, Address::times_8, -16), to); 1378 __ movq(to, Address(end_from, qword_count, Address::times_8, - 8)); 1379 __ movq(Address(end_to, qword_count, Address::times_8, - 8), to); 1380 __ movq(to, Address(end_from, qword_count, Address::times_8, - 0)); 1381 __ movq(Address(end_to, qword_count, Address::times_8, - 0), to); 1382 1383 __ BIND(L_copy_bytes); 1384 __ addptr(qword_count, 4); 1385 __ jcc(Assembler::lessEqual, L_loop); 1386 } 1387 __ subptr(qword_count, 4); 1388 __ jcc(Assembler::less, L_copy_8_bytes); // Copy trailing qwords 1389 } 1390 1391 // Copy big chunks backward 1392 // 1393 // Inputs: 1394 // from - source arrays address 1395 // dest - destination array address 1396 // qword_count - 64-bits element count 1397 // to - scratch 1398 // L_copy_bytes - entry label 1399 // L_copy_8_bytes - exit label 1400 // 1401 void copy_bytes_backward(Register from, Register dest, 1402 Register qword_count, Register to, 1403 Label& L_copy_bytes, Label& L_copy_8_bytes) { 1404 DEBUG_ONLY(__ stop("enter at entry label, not here")); 1405 Label L_loop; 1406 __ align(OptoLoopAlignment); 1407 if (UseUnalignedLoadStores) { 1408 Label L_end; 1409 if (UseAVX > 2) { 1410 __ movl(to, 0xffff); 1411 __ kmovwl(k1, to); 1412 } 1413 // Copy 64-bytes per iteration 1414 __ BIND(L_loop); 1415 if (UseAVX > 2) { 1416 __ evmovdqul(xmm0, Address(from, qword_count, Address::times_8, 0), Assembler::AVX_512bit); 1417 __ evmovdqul(Address(dest, qword_count, Address::times_8, 0), xmm0, Assembler::AVX_512bit); 1418 } else if (UseAVX == 2) { 1419 __ vmovdqu(xmm0, Address(from, qword_count, Address::times_8, 32)); 1420 __ vmovdqu(Address(dest, qword_count, Address::times_8, 32), xmm0); 1421 __ vmovdqu(xmm1, Address(from, qword_count, Address::times_8, 0)); 1422 __ vmovdqu(Address(dest, qword_count, Address::times_8, 0), xmm1); 1423 } else { 1424 __ movdqu(xmm0, Address(from, qword_count, Address::times_8, 48)); 1425 __ movdqu(Address(dest, qword_count, Address::times_8, 48), xmm0); 1426 __ movdqu(xmm1, Address(from, qword_count, Address::times_8, 32)); 1427 __ movdqu(Address(dest, qword_count, Address::times_8, 32), xmm1); 1428 __ movdqu(xmm2, Address(from, qword_count, Address::times_8, 16)); 1429 __ movdqu(Address(dest, qword_count, Address::times_8, 16), xmm2); 1430 __ movdqu(xmm3, Address(from, qword_count, Address::times_8, 0)); 1431 __ movdqu(Address(dest, qword_count, Address::times_8, 0), xmm3); 1432 } 1433 __ BIND(L_copy_bytes); 1434 __ subptr(qword_count, 8); 1435 __ jcc(Assembler::greaterEqual, L_loop); 1436 1437 __ addptr(qword_count, 4); // add(8) and sub(4) 1438 __ jccb(Assembler::less, L_end); 1439 // Copy trailing 32 bytes 1440 if (UseAVX >= 2) { 1441 __ vmovdqu(xmm0, Address(from, qword_count, Address::times_8, 0)); 1442 __ vmovdqu(Address(dest, qword_count, Address::times_8, 0), xmm0); 1443 } else { 1444 __ movdqu(xmm0, Address(from, qword_count, Address::times_8, 16)); 1445 __ movdqu(Address(dest, qword_count, Address::times_8, 16), xmm0); 1446 __ movdqu(xmm1, Address(from, qword_count, Address::times_8, 0)); 1447 __ movdqu(Address(dest, qword_count, Address::times_8, 0), xmm1); 1448 } 1449 __ subptr(qword_count, 4); 1450 __ BIND(L_end); 1451 if (UseAVX >= 2) { 1452 // clean upper bits of YMM registers 1453 __ vpxor(xmm0, xmm0); 1454 __ vpxor(xmm1, xmm1); 1455 } 1456 } else { 1457 // Copy 32-bytes per iteration 1458 __ BIND(L_loop); 1459 __ movq(to, Address(from, qword_count, Address::times_8, 24)); 1460 __ movq(Address(dest, qword_count, Address::times_8, 24), to); 1461 __ movq(to, Address(from, qword_count, Address::times_8, 16)); 1462 __ movq(Address(dest, qword_count, Address::times_8, 16), to); 1463 __ movq(to, Address(from, qword_count, Address::times_8, 8)); 1464 __ movq(Address(dest, qword_count, Address::times_8, 8), to); 1465 __ movq(to, Address(from, qword_count, Address::times_8, 0)); 1466 __ movq(Address(dest, qword_count, Address::times_8, 0), to); 1467 1468 __ BIND(L_copy_bytes); 1469 __ subptr(qword_count, 4); 1470 __ jcc(Assembler::greaterEqual, L_loop); 1471 } 1472 __ addptr(qword_count, 4); 1473 __ jcc(Assembler::greater, L_copy_8_bytes); // Copy trailing qwords 1474 } 1475 1476 1477 // Arguments: 1478 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1479 // ignored 1480 // name - stub name string 1481 // 1482 // Inputs: 1483 // c_rarg0 - source array address 1484 // c_rarg1 - destination array address 1485 // c_rarg2 - element count, treated as ssize_t, can be zero 1486 // 1487 // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries, 1488 // we let the hardware handle it. The one to eight bytes within words, 1489 // dwords or qwords that span cache line boundaries will still be loaded 1490 // and stored atomically. 1491 // 1492 // Side Effects: 1493 // disjoint_byte_copy_entry is set to the no-overlap entry point 1494 // used by generate_conjoint_byte_copy(). 1495 // 1496 address generate_disjoint_byte_copy(bool aligned, address* entry, const char *name) { 1497 __ align(CodeEntryAlignment); 1498 StubCodeMark mark(this, "StubRoutines", name); 1499 address start = __ pc(); 1500 1501 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes; 1502 Label L_copy_byte, L_exit; 1503 const Register from = rdi; // source array address 1504 const Register to = rsi; // destination array address 1505 const Register count = rdx; // elements count 1506 const Register byte_count = rcx; 1507 const Register qword_count = count; 1508 const Register end_from = from; // source array end address 1509 const Register end_to = to; // destination array end address 1510 // End pointers are inclusive, and if count is not zero they point 1511 // to the last unit copied: end_to[0] := end_from[0] 1512 1513 __ enter(); // required for proper stackwalking of RuntimeStub frame 1514 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1515 1516 if (entry != NULL) { 1517 *entry = __ pc(); 1518 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1519 BLOCK_COMMENT("Entry:"); 1520 } 1521 1522 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1523 // r9 and r10 may be used to save non-volatile registers 1524 1525 // 'from', 'to' and 'count' are now valid 1526 __ movptr(byte_count, count); 1527 __ shrptr(count, 3); // count => qword_count 1528 1529 // Copy from low to high addresses. Use 'to' as scratch. 1530 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1531 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1532 __ negptr(qword_count); // make the count negative 1533 __ jmp(L_copy_bytes); 1534 1535 // Copy trailing qwords 1536 __ BIND(L_copy_8_bytes); 1537 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1538 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1539 __ increment(qword_count); 1540 __ jcc(Assembler::notZero, L_copy_8_bytes); 1541 1542 // Check for and copy trailing dword 1543 __ BIND(L_copy_4_bytes); 1544 __ testl(byte_count, 4); 1545 __ jccb(Assembler::zero, L_copy_2_bytes); 1546 __ movl(rax, Address(end_from, 8)); 1547 __ movl(Address(end_to, 8), rax); 1548 1549 __ addptr(end_from, 4); 1550 __ addptr(end_to, 4); 1551 1552 // Check for and copy trailing word 1553 __ BIND(L_copy_2_bytes); 1554 __ testl(byte_count, 2); 1555 __ jccb(Assembler::zero, L_copy_byte); 1556 __ movw(rax, Address(end_from, 8)); 1557 __ movw(Address(end_to, 8), rax); 1558 1559 __ addptr(end_from, 2); 1560 __ addptr(end_to, 2); 1561 1562 // Check for and copy trailing byte 1563 __ BIND(L_copy_byte); 1564 __ testl(byte_count, 1); 1565 __ jccb(Assembler::zero, L_exit); 1566 __ movb(rax, Address(end_from, 8)); 1567 __ movb(Address(end_to, 8), rax); 1568 1569 __ BIND(L_exit); 1570 restore_arg_regs(); 1571 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1572 __ xorptr(rax, rax); // return 0 1573 __ vzeroupper(); 1574 __ leave(); // required for proper stackwalking of RuntimeStub frame 1575 __ ret(0); 1576 1577 // Copy in multi-bytes chunks 1578 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1579 __ jmp(L_copy_4_bytes); 1580 1581 return start; 1582 } 1583 1584 // Arguments: 1585 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1586 // ignored 1587 // name - stub name string 1588 // 1589 // Inputs: 1590 // c_rarg0 - source array address 1591 // c_rarg1 - destination array address 1592 // c_rarg2 - element count, treated as ssize_t, can be zero 1593 // 1594 // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries, 1595 // we let the hardware handle it. The one to eight bytes within words, 1596 // dwords or qwords that span cache line boundaries will still be loaded 1597 // and stored atomically. 1598 // 1599 address generate_conjoint_byte_copy(bool aligned, address nooverlap_target, 1600 address* entry, const char *name) { 1601 __ align(CodeEntryAlignment); 1602 StubCodeMark mark(this, "StubRoutines", name); 1603 address start = __ pc(); 1604 1605 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes; 1606 const Register from = rdi; // source array address 1607 const Register to = rsi; // destination array address 1608 const Register count = rdx; // elements count 1609 const Register byte_count = rcx; 1610 const Register qword_count = count; 1611 1612 __ enter(); // required for proper stackwalking of RuntimeStub frame 1613 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1614 1615 if (entry != NULL) { 1616 *entry = __ pc(); 1617 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1618 BLOCK_COMMENT("Entry:"); 1619 } 1620 1621 array_overlap_test(nooverlap_target, Address::times_1); 1622 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1623 // r9 and r10 may be used to save non-volatile registers 1624 1625 // 'from', 'to' and 'count' are now valid 1626 __ movptr(byte_count, count); 1627 __ shrptr(count, 3); // count => qword_count 1628 1629 // Copy from high to low addresses. 1630 1631 // Check for and copy trailing byte 1632 __ testl(byte_count, 1); 1633 __ jcc(Assembler::zero, L_copy_2_bytes); 1634 __ movb(rax, Address(from, byte_count, Address::times_1, -1)); 1635 __ movb(Address(to, byte_count, Address::times_1, -1), rax); 1636 __ decrement(byte_count); // Adjust for possible trailing word 1637 1638 // Check for and copy trailing word 1639 __ BIND(L_copy_2_bytes); 1640 __ testl(byte_count, 2); 1641 __ jcc(Assembler::zero, L_copy_4_bytes); 1642 __ movw(rax, Address(from, byte_count, Address::times_1, -2)); 1643 __ movw(Address(to, byte_count, Address::times_1, -2), rax); 1644 1645 // Check for and copy trailing dword 1646 __ BIND(L_copy_4_bytes); 1647 __ testl(byte_count, 4); 1648 __ jcc(Assembler::zero, L_copy_bytes); 1649 __ movl(rax, Address(from, qword_count, Address::times_8)); 1650 __ movl(Address(to, qword_count, Address::times_8), rax); 1651 __ jmp(L_copy_bytes); 1652 1653 // Copy trailing qwords 1654 __ BIND(L_copy_8_bytes); 1655 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 1656 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 1657 __ decrement(qword_count); 1658 __ jcc(Assembler::notZero, L_copy_8_bytes); 1659 1660 restore_arg_regs(); 1661 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1662 __ xorptr(rax, rax); // return 0 1663 __ vzeroupper(); 1664 __ leave(); // required for proper stackwalking of RuntimeStub frame 1665 __ ret(0); 1666 1667 // Copy in multi-bytes chunks 1668 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1669 1670 restore_arg_regs(); 1671 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1672 __ xorptr(rax, rax); // return 0 1673 __ vzeroupper(); 1674 __ leave(); // required for proper stackwalking of RuntimeStub frame 1675 __ ret(0); 1676 1677 return start; 1678 } 1679 1680 // Arguments: 1681 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1682 // ignored 1683 // name - stub name string 1684 // 1685 // Inputs: 1686 // c_rarg0 - source array address 1687 // c_rarg1 - destination array address 1688 // c_rarg2 - element count, treated as ssize_t, can be zero 1689 // 1690 // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we 1691 // let the hardware handle it. The two or four words within dwords 1692 // or qwords that span cache line boundaries will still be loaded 1693 // and stored atomically. 1694 // 1695 // Side Effects: 1696 // disjoint_short_copy_entry is set to the no-overlap entry point 1697 // used by generate_conjoint_short_copy(). 1698 // 1699 address generate_disjoint_short_copy(bool aligned, address *entry, const char *name) { 1700 __ align(CodeEntryAlignment); 1701 StubCodeMark mark(this, "StubRoutines", name); 1702 address start = __ pc(); 1703 1704 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes,L_copy_2_bytes,L_exit; 1705 const Register from = rdi; // source array address 1706 const Register to = rsi; // destination array address 1707 const Register count = rdx; // elements count 1708 const Register word_count = rcx; 1709 const Register qword_count = count; 1710 const Register end_from = from; // source array end address 1711 const Register end_to = to; // destination array end address 1712 // End pointers are inclusive, and if count is not zero they point 1713 // to the last unit copied: end_to[0] := end_from[0] 1714 1715 __ enter(); // required for proper stackwalking of RuntimeStub frame 1716 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1717 1718 if (entry != NULL) { 1719 *entry = __ pc(); 1720 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1721 BLOCK_COMMENT("Entry:"); 1722 } 1723 1724 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1725 // r9 and r10 may be used to save non-volatile registers 1726 1727 // 'from', 'to' and 'count' are now valid 1728 __ movptr(word_count, count); 1729 __ shrptr(count, 2); // count => qword_count 1730 1731 // Copy from low to high addresses. Use 'to' as scratch. 1732 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1733 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1734 __ negptr(qword_count); 1735 __ jmp(L_copy_bytes); 1736 1737 // Copy trailing qwords 1738 __ BIND(L_copy_8_bytes); 1739 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1740 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1741 __ increment(qword_count); 1742 __ jcc(Assembler::notZero, L_copy_8_bytes); 1743 1744 // Original 'dest' is trashed, so we can't use it as a 1745 // base register for a possible trailing word copy 1746 1747 // Check for and copy trailing dword 1748 __ BIND(L_copy_4_bytes); 1749 __ testl(word_count, 2); 1750 __ jccb(Assembler::zero, L_copy_2_bytes); 1751 __ movl(rax, Address(end_from, 8)); 1752 __ movl(Address(end_to, 8), rax); 1753 1754 __ addptr(end_from, 4); 1755 __ addptr(end_to, 4); 1756 1757 // Check for and copy trailing word 1758 __ BIND(L_copy_2_bytes); 1759 __ testl(word_count, 1); 1760 __ jccb(Assembler::zero, L_exit); 1761 __ movw(rax, Address(end_from, 8)); 1762 __ movw(Address(end_to, 8), rax); 1763 1764 __ BIND(L_exit); 1765 restore_arg_regs(); 1766 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1767 __ xorptr(rax, rax); // return 0 1768 __ vzeroupper(); 1769 __ leave(); // required for proper stackwalking of RuntimeStub frame 1770 __ ret(0); 1771 1772 // Copy in multi-bytes chunks 1773 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1774 __ jmp(L_copy_4_bytes); 1775 1776 return start; 1777 } 1778 1779 address generate_fill(BasicType t, bool aligned, const char *name) { 1780 __ align(CodeEntryAlignment); 1781 StubCodeMark mark(this, "StubRoutines", name); 1782 address start = __ pc(); 1783 1784 BLOCK_COMMENT("Entry:"); 1785 1786 const Register to = c_rarg0; // source array address 1787 const Register value = c_rarg1; // value 1788 const Register count = c_rarg2; // elements count 1789 1790 __ enter(); // required for proper stackwalking of RuntimeStub frame 1791 1792 __ generate_fill(t, aligned, to, value, count, rax, xmm0); 1793 1794 __ vzeroupper(); 1795 __ leave(); // required for proper stackwalking of RuntimeStub frame 1796 __ ret(0); 1797 return start; 1798 } 1799 1800 // Arguments: 1801 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1802 // ignored 1803 // name - stub name string 1804 // 1805 // Inputs: 1806 // c_rarg0 - source array address 1807 // c_rarg1 - destination array address 1808 // c_rarg2 - element count, treated as ssize_t, can be zero 1809 // 1810 // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we 1811 // let the hardware handle it. The two or four words within dwords 1812 // or qwords that span cache line boundaries will still be loaded 1813 // and stored atomically. 1814 // 1815 address generate_conjoint_short_copy(bool aligned, address nooverlap_target, 1816 address *entry, const char *name) { 1817 __ align(CodeEntryAlignment); 1818 StubCodeMark mark(this, "StubRoutines", name); 1819 address start = __ pc(); 1820 1821 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes; 1822 const Register from = rdi; // source array address 1823 const Register to = rsi; // destination array address 1824 const Register count = rdx; // elements count 1825 const Register word_count = rcx; 1826 const Register qword_count = count; 1827 1828 __ enter(); // required for proper stackwalking of RuntimeStub frame 1829 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1830 1831 if (entry != NULL) { 1832 *entry = __ pc(); 1833 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1834 BLOCK_COMMENT("Entry:"); 1835 } 1836 1837 array_overlap_test(nooverlap_target, Address::times_2); 1838 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1839 // r9 and r10 may be used to save non-volatile registers 1840 1841 // 'from', 'to' and 'count' are now valid 1842 __ movptr(word_count, count); 1843 __ shrptr(count, 2); // count => qword_count 1844 1845 // Copy from high to low addresses. Use 'to' as scratch. 1846 1847 // Check for and copy trailing word 1848 __ testl(word_count, 1); 1849 __ jccb(Assembler::zero, L_copy_4_bytes); 1850 __ movw(rax, Address(from, word_count, Address::times_2, -2)); 1851 __ movw(Address(to, word_count, Address::times_2, -2), rax); 1852 1853 // Check for and copy trailing dword 1854 __ BIND(L_copy_4_bytes); 1855 __ testl(word_count, 2); 1856 __ jcc(Assembler::zero, L_copy_bytes); 1857 __ movl(rax, Address(from, qword_count, Address::times_8)); 1858 __ movl(Address(to, qword_count, Address::times_8), rax); 1859 __ jmp(L_copy_bytes); 1860 1861 // Copy trailing qwords 1862 __ BIND(L_copy_8_bytes); 1863 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 1864 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 1865 __ decrement(qword_count); 1866 __ jcc(Assembler::notZero, L_copy_8_bytes); 1867 1868 restore_arg_regs(); 1869 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1870 __ xorptr(rax, rax); // return 0 1871 __ vzeroupper(); 1872 __ leave(); // required for proper stackwalking of RuntimeStub frame 1873 __ ret(0); 1874 1875 // Copy in multi-bytes chunks 1876 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1877 1878 restore_arg_regs(); 1879 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1880 __ xorptr(rax, rax); // return 0 1881 __ vzeroupper(); 1882 __ leave(); // required for proper stackwalking of RuntimeStub frame 1883 __ ret(0); 1884 1885 return start; 1886 } 1887 1888 // Arguments: 1889 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1890 // ignored 1891 // is_oop - true => oop array, so generate store check code 1892 // name - stub name string 1893 // 1894 // Inputs: 1895 // c_rarg0 - source array address 1896 // c_rarg1 - destination array address 1897 // c_rarg2 - element count, treated as ssize_t, can be zero 1898 // 1899 // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let 1900 // the hardware handle it. The two dwords within qwords that span 1901 // cache line boundaries will still be loaded and stored atomicly. 1902 // 1903 // Side Effects: 1904 // disjoint_int_copy_entry is set to the no-overlap entry point 1905 // used by generate_conjoint_int_oop_copy(). 1906 // 1907 address generate_disjoint_int_oop_copy(bool aligned, bool is_oop, address* entry, 1908 const char *name, bool dest_uninitialized = false) { 1909 __ align(CodeEntryAlignment); 1910 StubCodeMark mark(this, "StubRoutines", name); 1911 address start = __ pc(); 1912 1913 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_exit; 1914 const Register from = rdi; // source array address 1915 const Register to = rsi; // destination array address 1916 const Register count = rdx; // elements count 1917 const Register dword_count = rcx; 1918 const Register qword_count = count; 1919 const Register end_from = from; // source array end address 1920 const Register end_to = to; // destination array end address 1921 const Register saved_to = r11; // saved destination array address 1922 // End pointers are inclusive, and if count is not zero they point 1923 // to the last unit copied: end_to[0] := end_from[0] 1924 1925 __ enter(); // required for proper stackwalking of RuntimeStub frame 1926 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1927 1928 if (entry != NULL) { 1929 *entry = __ pc(); 1930 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1931 BLOCK_COMMENT("Entry:"); 1932 } 1933 1934 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1935 // r9 and r10 may be used to save non-volatile registers 1936 if (is_oop) { 1937 __ movq(saved_to, to); 1938 gen_write_ref_array_pre_barrier(to, count, dest_uninitialized); 1939 } 1940 1941 // 'from', 'to' and 'count' are now valid 1942 __ movptr(dword_count, count); 1943 __ shrptr(count, 1); // count => qword_count 1944 1945 // Copy from low to high addresses. Use 'to' as scratch. 1946 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1947 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1948 __ negptr(qword_count); 1949 __ jmp(L_copy_bytes); 1950 1951 // Copy trailing qwords 1952 __ BIND(L_copy_8_bytes); 1953 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1954 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1955 __ increment(qword_count); 1956 __ jcc(Assembler::notZero, L_copy_8_bytes); 1957 1958 // Check for and copy trailing dword 1959 __ BIND(L_copy_4_bytes); 1960 __ testl(dword_count, 1); // Only byte test since the value is 0 or 1 1961 __ jccb(Assembler::zero, L_exit); 1962 __ movl(rax, Address(end_from, 8)); 1963 __ movl(Address(end_to, 8), rax); 1964 1965 __ BIND(L_exit); 1966 if (is_oop) { 1967 gen_write_ref_array_post_barrier(saved_to, dword_count, rax); 1968 } 1969 restore_arg_regs(); 1970 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 1971 __ vzeroupper(); 1972 __ xorptr(rax, rax); // return 0 1973 __ leave(); // required for proper stackwalking of RuntimeStub frame 1974 __ ret(0); 1975 1976 // Copy in multi-bytes chunks 1977 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1978 __ jmp(L_copy_4_bytes); 1979 1980 return start; 1981 } 1982 1983 // Arguments: 1984 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1985 // ignored 1986 // is_oop - true => oop array, so generate store check code 1987 // name - stub name string 1988 // 1989 // Inputs: 1990 // c_rarg0 - source array address 1991 // c_rarg1 - destination array address 1992 // c_rarg2 - element count, treated as ssize_t, can be zero 1993 // 1994 // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let 1995 // the hardware handle it. The two dwords within qwords that span 1996 // cache line boundaries will still be loaded and stored atomicly. 1997 // 1998 address generate_conjoint_int_oop_copy(bool aligned, bool is_oop, address nooverlap_target, 1999 address *entry, const char *name, 2000 bool dest_uninitialized = false) { 2001 __ align(CodeEntryAlignment); 2002 StubCodeMark mark(this, "StubRoutines", name); 2003 address start = __ pc(); 2004 2005 Label L_copy_bytes, L_copy_8_bytes, L_copy_2_bytes, L_exit; 2006 const Register from = rdi; // source array address 2007 const Register to = rsi; // destination array address 2008 const Register count = rdx; // elements count 2009 const Register dword_count = rcx; 2010 const Register qword_count = count; 2011 2012 __ enter(); // required for proper stackwalking of RuntimeStub frame 2013 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 2014 2015 if (entry != NULL) { 2016 *entry = __ pc(); 2017 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2018 BLOCK_COMMENT("Entry:"); 2019 } 2020 2021 array_overlap_test(nooverlap_target, Address::times_4); 2022 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 2023 // r9 and r10 may be used to save non-volatile registers 2024 2025 if (is_oop) { 2026 // no registers are destroyed by this call 2027 gen_write_ref_array_pre_barrier(to, count, dest_uninitialized); 2028 } 2029 2030 assert_clean_int(count, rax); // Make sure 'count' is clean int. 2031 // 'from', 'to' and 'count' are now valid 2032 __ movptr(dword_count, count); 2033 __ shrptr(count, 1); // count => qword_count 2034 2035 // Copy from high to low addresses. Use 'to' as scratch. 2036 2037 // Check for and copy trailing dword 2038 __ testl(dword_count, 1); 2039 __ jcc(Assembler::zero, L_copy_bytes); 2040 __ movl(rax, Address(from, dword_count, Address::times_4, -4)); 2041 __ movl(Address(to, dword_count, Address::times_4, -4), rax); 2042 __ jmp(L_copy_bytes); 2043 2044 // Copy trailing qwords 2045 __ BIND(L_copy_8_bytes); 2046 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 2047 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 2048 __ decrement(qword_count); 2049 __ jcc(Assembler::notZero, L_copy_8_bytes); 2050 2051 if (is_oop) { 2052 __ jmp(L_exit); 2053 } 2054 restore_arg_regs(); 2055 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 2056 __ xorptr(rax, rax); // return 0 2057 __ vzeroupper(); 2058 __ leave(); // required for proper stackwalking of RuntimeStub frame 2059 __ ret(0); 2060 2061 // Copy in multi-bytes chunks 2062 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2063 2064 __ BIND(L_exit); 2065 if (is_oop) { 2066 gen_write_ref_array_post_barrier(to, dword_count, rax); 2067 } 2068 restore_arg_regs(); 2069 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 2070 __ xorptr(rax, rax); // return 0 2071 __ vzeroupper(); 2072 __ leave(); // required for proper stackwalking of RuntimeStub frame 2073 __ ret(0); 2074 2075 return start; 2076 } 2077 2078 // Arguments: 2079 // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes 2080 // ignored 2081 // is_oop - true => oop array, so generate store check code 2082 // name - stub name string 2083 // 2084 // Inputs: 2085 // c_rarg0 - source array address 2086 // c_rarg1 - destination array address 2087 // c_rarg2 - element count, treated as ssize_t, can be zero 2088 // 2089 // Side Effects: 2090 // disjoint_oop_copy_entry or disjoint_long_copy_entry is set to the 2091 // no-overlap entry point used by generate_conjoint_long_oop_copy(). 2092 // 2093 address generate_disjoint_long_oop_copy(bool aligned, bool is_oop, address *entry, 2094 const char *name, bool dest_uninitialized = false) { 2095 __ align(CodeEntryAlignment); 2096 StubCodeMark mark(this, "StubRoutines", name); 2097 address start = __ pc(); 2098 2099 Label L_copy_bytes, L_copy_8_bytes, L_exit; 2100 const Register from = rdi; // source array address 2101 const Register to = rsi; // destination array address 2102 const Register qword_count = rdx; // elements count 2103 const Register end_from = from; // source array end address 2104 const Register end_to = rcx; // destination array end address 2105 const Register saved_to = to; 2106 const Register saved_count = r11; 2107 // End pointers are inclusive, and if count is not zero they point 2108 // to the last unit copied: end_to[0] := end_from[0] 2109 2110 __ enter(); // required for proper stackwalking of RuntimeStub frame 2111 // Save no-overlap entry point for generate_conjoint_long_oop_copy() 2112 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 2113 2114 if (entry != NULL) { 2115 *entry = __ pc(); 2116 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2117 BLOCK_COMMENT("Entry:"); 2118 } 2119 2120 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 2121 // r9 and r10 may be used to save non-volatile registers 2122 // 'from', 'to' and 'qword_count' are now valid 2123 if (is_oop) { 2124 // Save to and count for store barrier 2125 __ movptr(saved_count, qword_count); 2126 // no registers are destroyed by this call 2127 gen_write_ref_array_pre_barrier(to, qword_count, dest_uninitialized); 2128 } 2129 2130 // Copy from low to high addresses. Use 'to' as scratch. 2131 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 2132 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 2133 __ negptr(qword_count); 2134 __ jmp(L_copy_bytes); 2135 2136 // Copy trailing qwords 2137 __ BIND(L_copy_8_bytes); 2138 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 2139 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 2140 __ increment(qword_count); 2141 __ jcc(Assembler::notZero, L_copy_8_bytes); 2142 2143 if (is_oop) { 2144 __ jmp(L_exit); 2145 } else { 2146 restore_arg_regs(); 2147 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2148 __ xorptr(rax, rax); // return 0 2149 __ vzeroupper(); 2150 __ leave(); // required for proper stackwalking of RuntimeStub frame 2151 __ ret(0); 2152 } 2153 2154 // Copy in multi-bytes chunks 2155 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2156 2157 if (is_oop) { 2158 __ BIND(L_exit); 2159 gen_write_ref_array_post_barrier(saved_to, saved_count, rax); 2160 } 2161 restore_arg_regs(); 2162 if (is_oop) { 2163 inc_counter_np(SharedRuntime::_oop_array_copy_ctr); // Update counter after rscratch1 is free 2164 } else { 2165 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2166 } 2167 __ vzeroupper(); 2168 __ xorptr(rax, rax); // return 0 2169 __ leave(); // required for proper stackwalking of RuntimeStub frame 2170 __ ret(0); 2171 2172 return start; 2173 } 2174 2175 // Arguments: 2176 // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes 2177 // ignored 2178 // is_oop - true => oop array, so generate store check code 2179 // name - stub name string 2180 // 2181 // Inputs: 2182 // c_rarg0 - source array address 2183 // c_rarg1 - destination array address 2184 // c_rarg2 - element count, treated as ssize_t, can be zero 2185 // 2186 address generate_conjoint_long_oop_copy(bool aligned, bool is_oop, 2187 address nooverlap_target, address *entry, 2188 const char *name, bool dest_uninitialized = false) { 2189 __ align(CodeEntryAlignment); 2190 StubCodeMark mark(this, "StubRoutines", name); 2191 address start = __ pc(); 2192 2193 Label L_copy_bytes, L_copy_8_bytes, L_exit; 2194 const Register from = rdi; // source array address 2195 const Register to = rsi; // destination array address 2196 const Register qword_count = rdx; // elements count 2197 const Register saved_count = rcx; 2198 2199 __ enter(); // required for proper stackwalking of RuntimeStub frame 2200 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 2201 2202 if (entry != NULL) { 2203 *entry = __ pc(); 2204 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2205 BLOCK_COMMENT("Entry:"); 2206 } 2207 2208 array_overlap_test(nooverlap_target, Address::times_8); 2209 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 2210 // r9 and r10 may be used to save non-volatile registers 2211 // 'from', 'to' and 'qword_count' are now valid 2212 if (is_oop) { 2213 // Save to and count for store barrier 2214 __ movptr(saved_count, qword_count); 2215 // No registers are destroyed by this call 2216 gen_write_ref_array_pre_barrier(to, saved_count, dest_uninitialized); 2217 } 2218 2219 __ jmp(L_copy_bytes); 2220 2221 // Copy trailing qwords 2222 __ BIND(L_copy_8_bytes); 2223 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 2224 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 2225 __ decrement(qword_count); 2226 __ jcc(Assembler::notZero, L_copy_8_bytes); 2227 2228 if (is_oop) { 2229 __ jmp(L_exit); 2230 } else { 2231 restore_arg_regs(); 2232 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2233 __ xorptr(rax, rax); // return 0 2234 __ vzeroupper(); 2235 __ leave(); // required for proper stackwalking of RuntimeStub frame 2236 __ ret(0); 2237 } 2238 2239 // Copy in multi-bytes chunks 2240 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2241 2242 if (is_oop) { 2243 __ BIND(L_exit); 2244 gen_write_ref_array_post_barrier(to, saved_count, rax); 2245 } 2246 restore_arg_regs(); 2247 if (is_oop) { 2248 inc_counter_np(SharedRuntime::_oop_array_copy_ctr); // Update counter after rscratch1 is free 2249 } else { 2250 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2251 } 2252 __ vzeroupper(); 2253 __ xorptr(rax, rax); // return 0 2254 __ leave(); // required for proper stackwalking of RuntimeStub frame 2255 __ ret(0); 2256 2257 return start; 2258 } 2259 2260 2261 // Helper for generating a dynamic type check. 2262 // Smashes no registers. 2263 void generate_type_check(Register sub_klass, 2264 Register super_check_offset, 2265 Register super_klass, 2266 Label& L_success) { 2267 assert_different_registers(sub_klass, super_check_offset, super_klass); 2268 2269 BLOCK_COMMENT("type_check:"); 2270 2271 Label L_miss; 2272 2273 __ check_klass_subtype_fast_path(sub_klass, super_klass, noreg, &L_success, &L_miss, NULL, 2274 super_check_offset); 2275 __ check_klass_subtype_slow_path(sub_klass, super_klass, noreg, noreg, &L_success, NULL); 2276 2277 // Fall through on failure! 2278 __ BIND(L_miss); 2279 } 2280 2281 // 2282 // Generate checkcasting array copy stub 2283 // 2284 // Input: 2285 // c_rarg0 - source array address 2286 // c_rarg1 - destination array address 2287 // c_rarg2 - element count, treated as ssize_t, can be zero 2288 // c_rarg3 - size_t ckoff (super_check_offset) 2289 // not Win64 2290 // c_rarg4 - oop ckval (super_klass) 2291 // Win64 2292 // rsp+40 - oop ckval (super_klass) 2293 // 2294 // Output: 2295 // rax == 0 - success 2296 // rax == -1^K - failure, where K is partial transfer count 2297 // 2298 address generate_checkcast_copy(const char *name, address *entry, 2299 bool dest_uninitialized = false) { 2300 2301 Label L_load_element, L_store_element, L_do_card_marks, L_done; 2302 2303 // Input registers (after setup_arg_regs) 2304 const Register from = rdi; // source array address 2305 const Register to = rsi; // destination array address 2306 const Register length = rdx; // elements count 2307 const Register ckoff = rcx; // super_check_offset 2308 const Register ckval = r8; // super_klass 2309 2310 // Registers used as temps (r13, r14 are save-on-entry) 2311 const Register end_from = from; // source array end address 2312 const Register end_to = r13; // destination array end address 2313 const Register count = rdx; // -(count_remaining) 2314 const Register r14_length = r14; // saved copy of length 2315 // End pointers are inclusive, and if length is not zero they point 2316 // to the last unit copied: end_to[0] := end_from[0] 2317 2318 const Register rax_oop = rax; // actual oop copied 2319 const Register r11_klass = r11; // oop._klass 2320 2321 //--------------------------------------------------------------- 2322 // Assembler stub will be used for this call to arraycopy 2323 // if the two arrays are subtypes of Object[] but the 2324 // destination array type is not equal to or a supertype 2325 // of the source type. Each element must be separately 2326 // checked. 2327 2328 __ align(CodeEntryAlignment); 2329 StubCodeMark mark(this, "StubRoutines", name); 2330 address start = __ pc(); 2331 2332 __ enter(); // required for proper stackwalking of RuntimeStub frame 2333 2334 #ifdef ASSERT 2335 // caller guarantees that the arrays really are different 2336 // otherwise, we would have to make conjoint checks 2337 { Label L; 2338 array_overlap_test(L, TIMES_OOP); 2339 __ stop("checkcast_copy within a single array"); 2340 __ bind(L); 2341 } 2342 #endif //ASSERT 2343 2344 setup_arg_regs(4); // from => rdi, to => rsi, length => rdx 2345 // ckoff => rcx, ckval => r8 2346 // r9 and r10 may be used to save non-volatile registers 2347 #ifdef _WIN64 2348 // last argument (#4) is on stack on Win64 2349 __ movptr(ckval, Address(rsp, 6 * wordSize)); 2350 #endif 2351 2352 // Caller of this entry point must set up the argument registers. 2353 if (entry != NULL) { 2354 *entry = __ pc(); 2355 BLOCK_COMMENT("Entry:"); 2356 } 2357 2358 // allocate spill slots for r13, r14 2359 enum { 2360 saved_r13_offset, 2361 saved_r14_offset, 2362 saved_rbp_offset 2363 }; 2364 __ subptr(rsp, saved_rbp_offset * wordSize); 2365 __ movptr(Address(rsp, saved_r13_offset * wordSize), r13); 2366 __ movptr(Address(rsp, saved_r14_offset * wordSize), r14); 2367 2368 // check that int operands are properly extended to size_t 2369 assert_clean_int(length, rax); 2370 assert_clean_int(ckoff, rax); 2371 2372 #ifdef ASSERT 2373 BLOCK_COMMENT("assert consistent ckoff/ckval"); 2374 // The ckoff and ckval must be mutually consistent, 2375 // even though caller generates both. 2376 { Label L; 2377 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2378 __ cmpl(ckoff, Address(ckval, sco_offset)); 2379 __ jcc(Assembler::equal, L); 2380 __ stop("super_check_offset inconsistent"); 2381 __ bind(L); 2382 } 2383 #endif //ASSERT 2384 2385 // Loop-invariant addresses. They are exclusive end pointers. 2386 Address end_from_addr(from, length, TIMES_OOP, 0); 2387 Address end_to_addr(to, length, TIMES_OOP, 0); 2388 // Loop-variant addresses. They assume post-incremented count < 0. 2389 Address from_element_addr(end_from, count, TIMES_OOP, 0); 2390 Address to_element_addr(end_to, count, TIMES_OOP, 0); 2391 2392 gen_write_ref_array_pre_barrier(to, count, dest_uninitialized); 2393 2394 // Copy from low to high addresses, indexed from the end of each array. 2395 __ lea(end_from, end_from_addr); 2396 __ lea(end_to, end_to_addr); 2397 __ movptr(r14_length, length); // save a copy of the length 2398 assert(length == count, ""); // else fix next line: 2399 __ negptr(count); // negate and test the length 2400 __ jcc(Assembler::notZero, L_load_element); 2401 2402 // Empty array: Nothing to do. 2403 __ xorptr(rax, rax); // return 0 on (trivial) success 2404 __ jmp(L_done); 2405 2406 // ======== begin loop ======== 2407 // (Loop is rotated; its entry is L_load_element.) 2408 // Loop control: 2409 // for (count = -count; count != 0; count++) 2410 // Base pointers src, dst are biased by 8*(count-1),to last element. 2411 __ align(OptoLoopAlignment); 2412 2413 __ BIND(L_store_element); 2414 __ store_heap_oop(to_element_addr, rax_oop); // store the oop 2415 __ increment(count); // increment the count toward zero 2416 __ jcc(Assembler::zero, L_do_card_marks); 2417 2418 // ======== loop entry is here ======== 2419 __ BIND(L_load_element); 2420 __ load_heap_oop(rax_oop, from_element_addr); // load the oop 2421 __ testptr(rax_oop, rax_oop); 2422 __ jcc(Assembler::zero, L_store_element); 2423 2424 __ load_klass(r11_klass, rax_oop);// query the object klass 2425 generate_type_check(r11_klass, ckoff, ckval, L_store_element); 2426 // ======== end loop ======== 2427 2428 // It was a real error; we must depend on the caller to finish the job. 2429 // Register rdx = -1 * number of *remaining* oops, r14 = *total* oops. 2430 // Emit GC store barriers for the oops we have copied (r14 + rdx), 2431 // and report their number to the caller. 2432 assert_different_registers(rax, r14_length, count, to, end_to, rcx, rscratch1); 2433 Label L_post_barrier; 2434 __ addptr(r14_length, count); // K = (original - remaining) oops 2435 __ movptr(rax, r14_length); // save the value 2436 __ notptr(rax); // report (-1^K) to caller (does not affect flags) 2437 __ jccb(Assembler::notZero, L_post_barrier); 2438 __ jmp(L_done); // K == 0, nothing was copied, skip post barrier 2439 2440 // Come here on success only. 2441 __ BIND(L_do_card_marks); 2442 __ xorptr(rax, rax); // return 0 on success 2443 2444 __ BIND(L_post_barrier); 2445 gen_write_ref_array_post_barrier(to, r14_length, rscratch1); 2446 2447 // Common exit point (success or failure). 2448 __ BIND(L_done); 2449 __ movptr(r13, Address(rsp, saved_r13_offset * wordSize)); 2450 __ movptr(r14, Address(rsp, saved_r14_offset * wordSize)); 2451 restore_arg_regs(); 2452 inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr); // Update counter after rscratch1 is free 2453 __ leave(); // required for proper stackwalking of RuntimeStub frame 2454 __ ret(0); 2455 2456 return start; 2457 } 2458 2459 // 2460 // Generate 'unsafe' array copy stub 2461 // Though just as safe as the other stubs, it takes an unscaled 2462 // size_t argument instead of an element count. 2463 // 2464 // Input: 2465 // c_rarg0 - source array address 2466 // c_rarg1 - destination array address 2467 // c_rarg2 - byte count, treated as ssize_t, can be zero 2468 // 2469 // Examines the alignment of the operands and dispatches 2470 // to a long, int, short, or byte copy loop. 2471 // 2472 address generate_unsafe_copy(const char *name, 2473 address byte_copy_entry, address short_copy_entry, 2474 address int_copy_entry, address long_copy_entry) { 2475 2476 Label L_long_aligned, L_int_aligned, L_short_aligned; 2477 2478 // Input registers (before setup_arg_regs) 2479 const Register from = c_rarg0; // source array address 2480 const Register to = c_rarg1; // destination array address 2481 const Register size = c_rarg2; // byte count (size_t) 2482 2483 // Register used as a temp 2484 const Register bits = rax; // test copy of low bits 2485 2486 __ align(CodeEntryAlignment); 2487 StubCodeMark mark(this, "StubRoutines", name); 2488 address start = __ pc(); 2489 2490 __ enter(); // required for proper stackwalking of RuntimeStub frame 2491 2492 // bump this on entry, not on exit: 2493 inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr); 2494 2495 __ mov(bits, from); 2496 __ orptr(bits, to); 2497 __ orptr(bits, size); 2498 2499 __ testb(bits, BytesPerLong-1); 2500 __ jccb(Assembler::zero, L_long_aligned); 2501 2502 __ testb(bits, BytesPerInt-1); 2503 __ jccb(Assembler::zero, L_int_aligned); 2504 2505 __ testb(bits, BytesPerShort-1); 2506 __ jump_cc(Assembler::notZero, RuntimeAddress(byte_copy_entry)); 2507 2508 __ BIND(L_short_aligned); 2509 __ shrptr(size, LogBytesPerShort); // size => short_count 2510 __ jump(RuntimeAddress(short_copy_entry)); 2511 2512 __ BIND(L_int_aligned); 2513 __ shrptr(size, LogBytesPerInt); // size => int_count 2514 __ jump(RuntimeAddress(int_copy_entry)); 2515 2516 __ BIND(L_long_aligned); 2517 __ shrptr(size, LogBytesPerLong); // size => qword_count 2518 __ jump(RuntimeAddress(long_copy_entry)); 2519 2520 return start; 2521 } 2522 2523 // Perform range checks on the proposed arraycopy. 2524 // Kills temp, but nothing else. 2525 // Also, clean the sign bits of src_pos and dst_pos. 2526 void arraycopy_range_checks(Register src, // source array oop (c_rarg0) 2527 Register src_pos, // source position (c_rarg1) 2528 Register dst, // destination array oo (c_rarg2) 2529 Register dst_pos, // destination position (c_rarg3) 2530 Register length, 2531 Register temp, 2532 Label& L_failed) { 2533 BLOCK_COMMENT("arraycopy_range_checks:"); 2534 2535 // if (src_pos + length > arrayOop(src)->length()) FAIL; 2536 __ movl(temp, length); 2537 __ addl(temp, src_pos); // src_pos + length 2538 __ cmpl(temp, Address(src, arrayOopDesc::length_offset_in_bytes())); 2539 __ jcc(Assembler::above, L_failed); 2540 2541 // if (dst_pos + length > arrayOop(dst)->length()) FAIL; 2542 __ movl(temp, length); 2543 __ addl(temp, dst_pos); // dst_pos + length 2544 __ cmpl(temp, Address(dst, arrayOopDesc::length_offset_in_bytes())); 2545 __ jcc(Assembler::above, L_failed); 2546 2547 // Have to clean up high 32-bits of 'src_pos' and 'dst_pos'. 2548 // Move with sign extension can be used since they are positive. 2549 __ movslq(src_pos, src_pos); 2550 __ movslq(dst_pos, dst_pos); 2551 2552 BLOCK_COMMENT("arraycopy_range_checks done"); 2553 } 2554 2555 // 2556 // Generate generic array copy stubs 2557 // 2558 // Input: 2559 // c_rarg0 - src oop 2560 // c_rarg1 - src_pos (32-bits) 2561 // c_rarg2 - dst oop 2562 // c_rarg3 - dst_pos (32-bits) 2563 // not Win64 2564 // c_rarg4 - element count (32-bits) 2565 // Win64 2566 // rsp+40 - element count (32-bits) 2567 // 2568 // Output: 2569 // rax == 0 - success 2570 // rax == -1^K - failure, where K is partial transfer count 2571 // 2572 address generate_generic_copy(const char *name, 2573 address byte_copy_entry, address short_copy_entry, 2574 address int_copy_entry, address oop_copy_entry, 2575 address long_copy_entry, address checkcast_copy_entry) { 2576 2577 Label L_failed, L_failed_0, L_objArray; 2578 Label L_copy_bytes, L_copy_shorts, L_copy_ints, L_copy_longs; 2579 2580 // Input registers 2581 const Register src = c_rarg0; // source array oop 2582 const Register src_pos = c_rarg1; // source position 2583 const Register dst = c_rarg2; // destination array oop 2584 const Register dst_pos = c_rarg3; // destination position 2585 #ifndef _WIN64 2586 const Register length = c_rarg4; 2587 #else 2588 const Address length(rsp, 6 * wordSize); // elements count is on stack on Win64 2589 #endif 2590 2591 { int modulus = CodeEntryAlignment; 2592 int target = modulus - 5; // 5 = sizeof jmp(L_failed) 2593 int advance = target - (__ offset() % modulus); 2594 if (advance < 0) advance += modulus; 2595 if (advance > 0) __ nop(advance); 2596 } 2597 StubCodeMark mark(this, "StubRoutines", name); 2598 2599 // Short-hop target to L_failed. Makes for denser prologue code. 2600 __ BIND(L_failed_0); 2601 __ jmp(L_failed); 2602 assert(__ offset() % CodeEntryAlignment == 0, "no further alignment needed"); 2603 2604 __ align(CodeEntryAlignment); 2605 address start = __ pc(); 2606 2607 __ enter(); // required for proper stackwalking of RuntimeStub frame 2608 2609 // bump this on entry, not on exit: 2610 inc_counter_np(SharedRuntime::_generic_array_copy_ctr); 2611 2612 //----------------------------------------------------------------------- 2613 // Assembler stub will be used for this call to arraycopy 2614 // if the following conditions are met: 2615 // 2616 // (1) src and dst must not be null. 2617 // (2) src_pos must not be negative. 2618 // (3) dst_pos must not be negative. 2619 // (4) length must not be negative. 2620 // (5) src klass and dst klass should be the same and not NULL. 2621 // (6) src and dst should be arrays. 2622 // (7) src_pos + length must not exceed length of src. 2623 // (8) dst_pos + length must not exceed length of dst. 2624 // 2625 2626 // if (src == NULL) return -1; 2627 __ testptr(src, src); // src oop 2628 size_t j1off = __ offset(); 2629 __ jccb(Assembler::zero, L_failed_0); 2630 2631 // if (src_pos < 0) return -1; 2632 __ testl(src_pos, src_pos); // src_pos (32-bits) 2633 __ jccb(Assembler::negative, L_failed_0); 2634 2635 // if (dst == NULL) return -1; 2636 __ testptr(dst, dst); // dst oop 2637 __ jccb(Assembler::zero, L_failed_0); 2638 2639 // if (dst_pos < 0) return -1; 2640 __ testl(dst_pos, dst_pos); // dst_pos (32-bits) 2641 size_t j4off = __ offset(); 2642 __ jccb(Assembler::negative, L_failed_0); 2643 2644 // The first four tests are very dense code, 2645 // but not quite dense enough to put four 2646 // jumps in a 16-byte instruction fetch buffer. 2647 // That's good, because some branch predicters 2648 // do not like jumps so close together. 2649 // Make sure of this. 2650 guarantee(((j1off ^ j4off) & ~15) != 0, "I$ line of 1st & 4th jumps"); 2651 2652 // registers used as temp 2653 const Register r11_length = r11; // elements count to copy 2654 const Register r10_src_klass = r10; // array klass 2655 2656 // if (length < 0) return -1; 2657 __ movl(r11_length, length); // length (elements count, 32-bits value) 2658 __ testl(r11_length, r11_length); 2659 __ jccb(Assembler::negative, L_failed_0); 2660 2661 __ load_klass(r10_src_klass, src); 2662 #ifdef ASSERT 2663 // assert(src->klass() != NULL); 2664 { 2665 BLOCK_COMMENT("assert klasses not null {"); 2666 Label L1, L2; 2667 __ testptr(r10_src_klass, r10_src_klass); 2668 __ jcc(Assembler::notZero, L2); // it is broken if klass is NULL 2669 __ bind(L1); 2670 __ stop("broken null klass"); 2671 __ bind(L2); 2672 __ load_klass(rax, dst); 2673 __ cmpq(rax, 0); 2674 __ jcc(Assembler::equal, L1); // this would be broken also 2675 BLOCK_COMMENT("} assert klasses not null done"); 2676 } 2677 #endif 2678 2679 // Load layout helper (32-bits) 2680 // 2681 // |array_tag| | header_size | element_type | |log2_element_size| 2682 // 32 30 24 16 8 2 0 2683 // 2684 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0 2685 // 2686 2687 const int lh_offset = in_bytes(Klass::layout_helper_offset()); 2688 2689 // Handle objArrays completely differently... 2690 const jint objArray_lh = Klass::array_layout_helper(T_OBJECT); 2691 __ cmpl(Address(r10_src_klass, lh_offset), objArray_lh); 2692 __ jcc(Assembler::equal, L_objArray); 2693 2694 // if (src->klass() != dst->klass()) return -1; 2695 __ load_klass(rax, dst); 2696 __ cmpq(r10_src_klass, rax); 2697 __ jcc(Assembler::notEqual, L_failed); 2698 2699 const Register rax_lh = rax; // layout helper 2700 __ movl(rax_lh, Address(r10_src_klass, lh_offset)); 2701 2702 // if (!src->is_Array()) return -1; 2703 __ cmpl(rax_lh, Klass::_lh_neutral_value); 2704 __ jcc(Assembler::greaterEqual, L_failed); 2705 2706 // At this point, it is known to be a typeArray (array_tag 0x3). 2707 #ifdef ASSERT 2708 { 2709 BLOCK_COMMENT("assert primitive array {"); 2710 Label L; 2711 __ cmpl(rax_lh, (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift)); 2712 __ jcc(Assembler::greaterEqual, L); 2713 __ stop("must be a primitive array"); 2714 __ bind(L); 2715 BLOCK_COMMENT("} assert primitive array done"); 2716 } 2717 #endif 2718 2719 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2720 r10, L_failed); 2721 2722 // TypeArrayKlass 2723 // 2724 // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize); 2725 // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize); 2726 // 2727 2728 const Register r10_offset = r10; // array offset 2729 const Register rax_elsize = rax_lh; // element size 2730 2731 __ movl(r10_offset, rax_lh); 2732 __ shrl(r10_offset, Klass::_lh_header_size_shift); 2733 __ andptr(r10_offset, Klass::_lh_header_size_mask); // array_offset 2734 __ addptr(src, r10_offset); // src array offset 2735 __ addptr(dst, r10_offset); // dst array offset 2736 BLOCK_COMMENT("choose copy loop based on element size"); 2737 __ andl(rax_lh, Klass::_lh_log2_element_size_mask); // rax_lh -> rax_elsize 2738 2739 // next registers should be set before the jump to corresponding stub 2740 const Register from = c_rarg0; // source array address 2741 const Register to = c_rarg1; // destination array address 2742 const Register count = c_rarg2; // elements count 2743 2744 // 'from', 'to', 'count' registers should be set in such order 2745 // since they are the same as 'src', 'src_pos', 'dst'. 2746 2747 __ BIND(L_copy_bytes); 2748 __ cmpl(rax_elsize, 0); 2749 __ jccb(Assembler::notEqual, L_copy_shorts); 2750 __ lea(from, Address(src, src_pos, Address::times_1, 0));// src_addr 2751 __ lea(to, Address(dst, dst_pos, Address::times_1, 0));// dst_addr 2752 __ movl2ptr(count, r11_length); // length 2753 __ jump(RuntimeAddress(byte_copy_entry)); 2754 2755 __ BIND(L_copy_shorts); 2756 __ cmpl(rax_elsize, LogBytesPerShort); 2757 __ jccb(Assembler::notEqual, L_copy_ints); 2758 __ lea(from, Address(src, src_pos, Address::times_2, 0));// src_addr 2759 __ lea(to, Address(dst, dst_pos, Address::times_2, 0));// dst_addr 2760 __ movl2ptr(count, r11_length); // length 2761 __ jump(RuntimeAddress(short_copy_entry)); 2762 2763 __ BIND(L_copy_ints); 2764 __ cmpl(rax_elsize, LogBytesPerInt); 2765 __ jccb(Assembler::notEqual, L_copy_longs); 2766 __ lea(from, Address(src, src_pos, Address::times_4, 0));// src_addr 2767 __ lea(to, Address(dst, dst_pos, Address::times_4, 0));// dst_addr 2768 __ movl2ptr(count, r11_length); // length 2769 __ jump(RuntimeAddress(int_copy_entry)); 2770 2771 __ BIND(L_copy_longs); 2772 #ifdef ASSERT 2773 { 2774 BLOCK_COMMENT("assert long copy {"); 2775 Label L; 2776 __ cmpl(rax_elsize, LogBytesPerLong); 2777 __ jcc(Assembler::equal, L); 2778 __ stop("must be long copy, but elsize is wrong"); 2779 __ bind(L); 2780 BLOCK_COMMENT("} assert long copy done"); 2781 } 2782 #endif 2783 __ lea(from, Address(src, src_pos, Address::times_8, 0));// src_addr 2784 __ lea(to, Address(dst, dst_pos, Address::times_8, 0));// dst_addr 2785 __ movl2ptr(count, r11_length); // length 2786 __ jump(RuntimeAddress(long_copy_entry)); 2787 2788 // ObjArrayKlass 2789 __ BIND(L_objArray); 2790 // live at this point: r10_src_klass, r11_length, src[_pos], dst[_pos] 2791 2792 Label L_plain_copy, L_checkcast_copy; 2793 // test array classes for subtyping 2794 __ load_klass(rax, dst); 2795 __ cmpq(r10_src_klass, rax); // usual case is exact equality 2796 __ jcc(Assembler::notEqual, L_checkcast_copy); 2797 2798 // Identically typed arrays can be copied without element-wise checks. 2799 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2800 r10, L_failed); 2801 2802 __ lea(from, Address(src, src_pos, TIMES_OOP, 2803 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // src_addr 2804 __ lea(to, Address(dst, dst_pos, TIMES_OOP, 2805 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // dst_addr 2806 __ movl2ptr(count, r11_length); // length 2807 __ BIND(L_plain_copy); 2808 __ jump(RuntimeAddress(oop_copy_entry)); 2809 2810 __ BIND(L_checkcast_copy); 2811 // live at this point: r10_src_klass, r11_length, rax (dst_klass) 2812 { 2813 // Before looking at dst.length, make sure dst is also an objArray. 2814 __ cmpl(Address(rax, lh_offset), objArray_lh); 2815 __ jcc(Assembler::notEqual, L_failed); 2816 2817 // It is safe to examine both src.length and dst.length. 2818 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2819 rax, L_failed); 2820 2821 const Register r11_dst_klass = r11; 2822 __ load_klass(r11_dst_klass, dst); // reload 2823 2824 // Marshal the base address arguments now, freeing registers. 2825 __ lea(from, Address(src, src_pos, TIMES_OOP, 2826 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 2827 __ lea(to, Address(dst, dst_pos, TIMES_OOP, 2828 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 2829 __ movl(count, length); // length (reloaded) 2830 Register sco_temp = c_rarg3; // this register is free now 2831 assert_different_registers(from, to, count, sco_temp, 2832 r11_dst_klass, r10_src_klass); 2833 assert_clean_int(count, sco_temp); 2834 2835 // Generate the type check. 2836 const int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2837 __ movl(sco_temp, Address(r11_dst_klass, sco_offset)); 2838 assert_clean_int(sco_temp, rax); 2839 generate_type_check(r10_src_klass, sco_temp, r11_dst_klass, L_plain_copy); 2840 2841 // Fetch destination element klass from the ObjArrayKlass header. 2842 int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset()); 2843 __ movptr(r11_dst_klass, Address(r11_dst_klass, ek_offset)); 2844 __ movl( sco_temp, Address(r11_dst_klass, sco_offset)); 2845 assert_clean_int(sco_temp, rax); 2846 2847 // the checkcast_copy loop needs two extra arguments: 2848 assert(c_rarg3 == sco_temp, "#3 already in place"); 2849 // Set up arguments for checkcast_copy_entry. 2850 setup_arg_regs(4); 2851 __ movptr(r8, r11_dst_klass); // dst.klass.element_klass, r8 is c_rarg4 on Linux/Solaris 2852 __ jump(RuntimeAddress(checkcast_copy_entry)); 2853 } 2854 2855 __ BIND(L_failed); 2856 __ xorptr(rax, rax); 2857 __ notptr(rax); // return -1 2858 __ leave(); // required for proper stackwalking of RuntimeStub frame 2859 __ ret(0); 2860 2861 return start; 2862 } 2863 2864 void generate_arraycopy_stubs() { 2865 address entry; 2866 address entry_jbyte_arraycopy; 2867 address entry_jshort_arraycopy; 2868 address entry_jint_arraycopy; 2869 address entry_oop_arraycopy; 2870 address entry_jlong_arraycopy; 2871 address entry_checkcast_arraycopy; 2872 2873 StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, &entry, 2874 "jbyte_disjoint_arraycopy"); 2875 StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, entry, &entry_jbyte_arraycopy, 2876 "jbyte_arraycopy"); 2877 2878 StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, &entry, 2879 "jshort_disjoint_arraycopy"); 2880 StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, entry, &entry_jshort_arraycopy, 2881 "jshort_arraycopy"); 2882 2883 StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, false, &entry, 2884 "jint_disjoint_arraycopy"); 2885 StubRoutines::_jint_arraycopy = generate_conjoint_int_oop_copy(false, false, entry, 2886 &entry_jint_arraycopy, "jint_arraycopy"); 2887 2888 StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, false, &entry, 2889 "jlong_disjoint_arraycopy"); 2890 StubRoutines::_jlong_arraycopy = generate_conjoint_long_oop_copy(false, false, entry, 2891 &entry_jlong_arraycopy, "jlong_arraycopy"); 2892 2893 2894 if (UseCompressedOops) { 2895 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, true, &entry, 2896 "oop_disjoint_arraycopy"); 2897 StubRoutines::_oop_arraycopy = generate_conjoint_int_oop_copy(false, true, entry, 2898 &entry_oop_arraycopy, "oop_arraycopy"); 2899 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_int_oop_copy(false, true, &entry, 2900 "oop_disjoint_arraycopy_uninit", 2901 /*dest_uninitialized*/true); 2902 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_int_oop_copy(false, true, entry, 2903 NULL, "oop_arraycopy_uninit", 2904 /*dest_uninitialized*/true); 2905 } else { 2906 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, true, &entry, 2907 "oop_disjoint_arraycopy"); 2908 StubRoutines::_oop_arraycopy = generate_conjoint_long_oop_copy(false, true, entry, 2909 &entry_oop_arraycopy, "oop_arraycopy"); 2910 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_long_oop_copy(false, true, &entry, 2911 "oop_disjoint_arraycopy_uninit", 2912 /*dest_uninitialized*/true); 2913 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_long_oop_copy(false, true, entry, 2914 NULL, "oop_arraycopy_uninit", 2915 /*dest_uninitialized*/true); 2916 } 2917 2918 StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy); 2919 StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", NULL, 2920 /*dest_uninitialized*/true); 2921 2922 StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy", 2923 entry_jbyte_arraycopy, 2924 entry_jshort_arraycopy, 2925 entry_jint_arraycopy, 2926 entry_jlong_arraycopy); 2927 StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy", 2928 entry_jbyte_arraycopy, 2929 entry_jshort_arraycopy, 2930 entry_jint_arraycopy, 2931 entry_oop_arraycopy, 2932 entry_jlong_arraycopy, 2933 entry_checkcast_arraycopy); 2934 2935 StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill"); 2936 StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill"); 2937 StubRoutines::_jint_fill = generate_fill(T_INT, false, "jint_fill"); 2938 StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill"); 2939 StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill"); 2940 StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill"); 2941 2942 // We don't generate specialized code for HeapWord-aligned source 2943 // arrays, so just use the code we've already generated 2944 StubRoutines::_arrayof_jbyte_disjoint_arraycopy = StubRoutines::_jbyte_disjoint_arraycopy; 2945 StubRoutines::_arrayof_jbyte_arraycopy = StubRoutines::_jbyte_arraycopy; 2946 2947 StubRoutines::_arrayof_jshort_disjoint_arraycopy = StubRoutines::_jshort_disjoint_arraycopy; 2948 StubRoutines::_arrayof_jshort_arraycopy = StubRoutines::_jshort_arraycopy; 2949 2950 StubRoutines::_arrayof_jint_disjoint_arraycopy = StubRoutines::_jint_disjoint_arraycopy; 2951 StubRoutines::_arrayof_jint_arraycopy = StubRoutines::_jint_arraycopy; 2952 2953 StubRoutines::_arrayof_jlong_disjoint_arraycopy = StubRoutines::_jlong_disjoint_arraycopy; 2954 StubRoutines::_arrayof_jlong_arraycopy = StubRoutines::_jlong_arraycopy; 2955 2956 StubRoutines::_arrayof_oop_disjoint_arraycopy = StubRoutines::_oop_disjoint_arraycopy; 2957 StubRoutines::_arrayof_oop_arraycopy = StubRoutines::_oop_arraycopy; 2958 2959 StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = StubRoutines::_oop_disjoint_arraycopy_uninit; 2960 StubRoutines::_arrayof_oop_arraycopy_uninit = StubRoutines::_oop_arraycopy_uninit; 2961 } 2962 2963 // AES intrinsic stubs 2964 enum {AESBlockSize = 16}; 2965 2966 address generate_key_shuffle_mask() { 2967 __ align(16); 2968 StubCodeMark mark(this, "StubRoutines", "key_shuffle_mask"); 2969 address start = __ pc(); 2970 __ emit_data64( 0x0405060700010203, relocInfo::none ); 2971 __ emit_data64( 0x0c0d0e0f08090a0b, relocInfo::none ); 2972 return start; 2973 } 2974 2975 address generate_counter_shuffle_mask() { 2976 __ align(16); 2977 StubCodeMark mark(this, "StubRoutines", "counter_shuffle_mask"); 2978 address start = __ pc(); 2979 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none); 2980 __ emit_data64(0x0001020304050607, relocInfo::none); 2981 return start; 2982 } 2983 2984 // Utility routine for loading a 128-bit key word in little endian format 2985 // can optionally specify that the shuffle mask is already in an xmmregister 2986 void load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask=NULL) { 2987 __ movdqu(xmmdst, Address(key, offset)); 2988 if (xmm_shuf_mask != NULL) { 2989 __ pshufb(xmmdst, xmm_shuf_mask); 2990 } else { 2991 __ pshufb(xmmdst, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 2992 } 2993 } 2994 2995 // Utility routine for increase 128bit counter (iv in CTR mode) 2996 void inc_counter(Register reg, XMMRegister xmmdst, int inc_delta, Label& next_block) { 2997 __ pextrq(reg, xmmdst, 0x0); 2998 __ addq(reg, inc_delta); 2999 __ pinsrq(xmmdst, reg, 0x0); 3000 __ jcc(Assembler::carryClear, next_block); // jump if no carry 3001 __ pextrq(reg, xmmdst, 0x01); // Carry 3002 __ addq(reg, 0x01); 3003 __ pinsrq(xmmdst, reg, 0x01); //Carry end 3004 __ BIND(next_block); // next instruction 3005 } 3006 3007 // Arguments: 3008 // 3009 // Inputs: 3010 // c_rarg0 - source byte array address 3011 // c_rarg1 - destination byte array address 3012 // c_rarg2 - K (key) in little endian int array 3013 // 3014 address generate_aescrypt_encryptBlock() { 3015 assert(UseAES, "need AES instructions and misaligned SSE support"); 3016 __ align(CodeEntryAlignment); 3017 StubCodeMark mark(this, "StubRoutines", "aescrypt_encryptBlock"); 3018 Label L_doLast; 3019 address start = __ pc(); 3020 3021 const Register from = c_rarg0; // source array address 3022 const Register to = c_rarg1; // destination array address 3023 const Register key = c_rarg2; // key array address 3024 const Register keylen = rax; 3025 3026 const XMMRegister xmm_result = xmm0; 3027 const XMMRegister xmm_key_shuf_mask = xmm1; 3028 // On win64 xmm6-xmm15 must be preserved so don't use them. 3029 const XMMRegister xmm_temp1 = xmm2; 3030 const XMMRegister xmm_temp2 = xmm3; 3031 const XMMRegister xmm_temp3 = xmm4; 3032 const XMMRegister xmm_temp4 = xmm5; 3033 3034 __ enter(); // required for proper stackwalking of RuntimeStub frame 3035 3036 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 3037 // context for the registers used, where all instructions below are using 128-bit mode 3038 // On EVEX without VL and BW, these instructions will all be AVX. 3039 if (VM_Version::supports_avx512vlbw()) { 3040 __ movl(rax, 0xffff); 3041 __ kmovql(k1, rax); 3042 } 3043 3044 // keylen could be only {11, 13, 15} * 4 = {44, 52, 60} 3045 __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3046 3047 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3048 __ movdqu(xmm_result, Address(from, 0)); // get 16 bytes of input 3049 3050 // For encryption, the java expanded key ordering is just what we need 3051 // we don't know if the key is aligned, hence not using load-execute form 3052 3053 load_key(xmm_temp1, key, 0x00, xmm_key_shuf_mask); 3054 __ pxor(xmm_result, xmm_temp1); 3055 3056 load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask); 3057 load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask); 3058 load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask); 3059 load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask); 3060 3061 __ aesenc(xmm_result, xmm_temp1); 3062 __ aesenc(xmm_result, xmm_temp2); 3063 __ aesenc(xmm_result, xmm_temp3); 3064 __ aesenc(xmm_result, xmm_temp4); 3065 3066 load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask); 3067 load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask); 3068 load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask); 3069 load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask); 3070 3071 __ aesenc(xmm_result, xmm_temp1); 3072 __ aesenc(xmm_result, xmm_temp2); 3073 __ aesenc(xmm_result, xmm_temp3); 3074 __ aesenc(xmm_result, xmm_temp4); 3075 3076 load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask); 3077 load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask); 3078 3079 __ cmpl(keylen, 44); 3080 __ jccb(Assembler::equal, L_doLast); 3081 3082 __ aesenc(xmm_result, xmm_temp1); 3083 __ aesenc(xmm_result, xmm_temp2); 3084 3085 load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask); 3086 load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask); 3087 3088 __ cmpl(keylen, 52); 3089 __ jccb(Assembler::equal, L_doLast); 3090 3091 __ aesenc(xmm_result, xmm_temp1); 3092 __ aesenc(xmm_result, xmm_temp2); 3093 3094 load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask); 3095 load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask); 3096 3097 __ BIND(L_doLast); 3098 __ aesenc(xmm_result, xmm_temp1); 3099 __ aesenclast(xmm_result, xmm_temp2); 3100 __ movdqu(Address(to, 0), xmm_result); // store the result 3101 __ xorptr(rax, rax); // return 0 3102 __ leave(); // required for proper stackwalking of RuntimeStub frame 3103 __ ret(0); 3104 3105 return start; 3106 } 3107 3108 3109 // Arguments: 3110 // 3111 // Inputs: 3112 // c_rarg0 - source byte array address 3113 // c_rarg1 - destination byte array address 3114 // c_rarg2 - K (key) in little endian int array 3115 // 3116 address generate_aescrypt_decryptBlock() { 3117 assert(UseAES, "need AES instructions and misaligned SSE support"); 3118 __ align(CodeEntryAlignment); 3119 StubCodeMark mark(this, "StubRoutines", "aescrypt_decryptBlock"); 3120 Label L_doLast; 3121 address start = __ pc(); 3122 3123 const Register from = c_rarg0; // source array address 3124 const Register to = c_rarg1; // destination array address 3125 const Register key = c_rarg2; // key array address 3126 const Register keylen = rax; 3127 3128 const XMMRegister xmm_result = xmm0; 3129 const XMMRegister xmm_key_shuf_mask = xmm1; 3130 // On win64 xmm6-xmm15 must be preserved so don't use them. 3131 const XMMRegister xmm_temp1 = xmm2; 3132 const XMMRegister xmm_temp2 = xmm3; 3133 const XMMRegister xmm_temp3 = xmm4; 3134 const XMMRegister xmm_temp4 = xmm5; 3135 3136 __ enter(); // required for proper stackwalking of RuntimeStub frame 3137 3138 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 3139 // context for the registers used, where all instructions below are using 128-bit mode 3140 // On EVEX without VL and BW, these instructions will all be AVX. 3141 if (VM_Version::supports_avx512vlbw()) { 3142 __ movl(rax, 0xffff); 3143 __ kmovql(k1, rax); 3144 } 3145 3146 // keylen could be only {11, 13, 15} * 4 = {44, 52, 60} 3147 __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3148 3149 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3150 __ movdqu(xmm_result, Address(from, 0)); 3151 3152 // for decryption java expanded key ordering is rotated one position from what we want 3153 // so we start from 0x10 here and hit 0x00 last 3154 // we don't know if the key is aligned, hence not using load-execute form 3155 load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask); 3156 load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask); 3157 load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask); 3158 load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask); 3159 3160 __ pxor (xmm_result, xmm_temp1); 3161 __ aesdec(xmm_result, xmm_temp2); 3162 __ aesdec(xmm_result, xmm_temp3); 3163 __ aesdec(xmm_result, xmm_temp4); 3164 3165 load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask); 3166 load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask); 3167 load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask); 3168 load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask); 3169 3170 __ aesdec(xmm_result, xmm_temp1); 3171 __ aesdec(xmm_result, xmm_temp2); 3172 __ aesdec(xmm_result, xmm_temp3); 3173 __ aesdec(xmm_result, xmm_temp4); 3174 3175 load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask); 3176 load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask); 3177 load_key(xmm_temp3, key, 0x00, xmm_key_shuf_mask); 3178 3179 __ cmpl(keylen, 44); 3180 __ jccb(Assembler::equal, L_doLast); 3181 3182 __ aesdec(xmm_result, xmm_temp1); 3183 __ aesdec(xmm_result, xmm_temp2); 3184 3185 load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask); 3186 load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask); 3187 3188 __ cmpl(keylen, 52); 3189 __ jccb(Assembler::equal, L_doLast); 3190 3191 __ aesdec(xmm_result, xmm_temp1); 3192 __ aesdec(xmm_result, xmm_temp2); 3193 3194 load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask); 3195 load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask); 3196 3197 __ BIND(L_doLast); 3198 __ aesdec(xmm_result, xmm_temp1); 3199 __ aesdec(xmm_result, xmm_temp2); 3200 3201 // for decryption the aesdeclast operation is always on key+0x00 3202 __ aesdeclast(xmm_result, xmm_temp3); 3203 __ movdqu(Address(to, 0), xmm_result); // store the result 3204 __ xorptr(rax, rax); // return 0 3205 __ leave(); // required for proper stackwalking of RuntimeStub frame 3206 __ ret(0); 3207 3208 return start; 3209 } 3210 3211 3212 // Arguments: 3213 // 3214 // Inputs: 3215 // c_rarg0 - source byte array address 3216 // c_rarg1 - destination byte array address 3217 // c_rarg2 - K (key) in little endian int array 3218 // c_rarg3 - r vector byte array address 3219 // c_rarg4 - input length 3220 // 3221 // Output: 3222 // rax - input length 3223 // 3224 address generate_cipherBlockChaining_encryptAESCrypt() { 3225 assert(UseAES, "need AES instructions and misaligned SSE support"); 3226 __ align(CodeEntryAlignment); 3227 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_encryptAESCrypt"); 3228 address start = __ pc(); 3229 3230 Label L_exit, L_key_192_256, L_key_256, L_loopTop_128, L_loopTop_192, L_loopTop_256; 3231 const Register from = c_rarg0; // source array address 3232 const Register to = c_rarg1; // destination array address 3233 const Register key = c_rarg2; // key array address 3234 const Register rvec = c_rarg3; // r byte array initialized from initvector array address 3235 // and left with the results of the last encryption block 3236 #ifndef _WIN64 3237 const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16) 3238 #else 3239 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 3240 const Register len_reg = r11; // pick the volatile windows register 3241 #endif 3242 const Register pos = rax; 3243 3244 // xmm register assignments for the loops below 3245 const XMMRegister xmm_result = xmm0; 3246 const XMMRegister xmm_temp = xmm1; 3247 // keys 0-10 preloaded into xmm2-xmm12 3248 const int XMM_REG_NUM_KEY_FIRST = 2; 3249 const int XMM_REG_NUM_KEY_LAST = 15; 3250 const XMMRegister xmm_key0 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST); 3251 const XMMRegister xmm_key10 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+10); 3252 const XMMRegister xmm_key11 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+11); 3253 const XMMRegister xmm_key12 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+12); 3254 const XMMRegister xmm_key13 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+13); 3255 3256 __ enter(); // required for proper stackwalking of RuntimeStub frame 3257 3258 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 3259 // context for the registers used, where all instructions below are using 128-bit mode 3260 // On EVEX without VL and BW, these instructions will all be AVX. 3261 if (VM_Version::supports_avx512vlbw()) { 3262 __ movl(rax, 0xffff); 3263 __ kmovql(k1, rax); 3264 } 3265 3266 #ifdef _WIN64 3267 // on win64, fill len_reg from stack position 3268 __ movl(len_reg, len_mem); 3269 #else 3270 __ push(len_reg); // Save 3271 #endif 3272 3273 const XMMRegister xmm_key_shuf_mask = xmm_temp; // used temporarily to swap key bytes up front 3274 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3275 // load up xmm regs xmm2 thru xmm12 with key 0x00 - 0xa0 3276 for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x00; rnum <= XMM_REG_NUM_KEY_FIRST+10; rnum++) { 3277 load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask); 3278 offset += 0x10; 3279 } 3280 __ movdqu(xmm_result, Address(rvec, 0x00)); // initialize xmm_result with r vec 3281 3282 // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256)) 3283 __ movl(rax, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3284 __ cmpl(rax, 44); 3285 __ jcc(Assembler::notEqual, L_key_192_256); 3286 3287 // 128 bit code follows here 3288 __ movptr(pos, 0); 3289 __ align(OptoLoopAlignment); 3290 3291 __ BIND(L_loopTop_128); 3292 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3293 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3294 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3295 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 9; rnum++) { 3296 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3297 } 3298 __ aesenclast(xmm_result, xmm_key10); 3299 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3300 // no need to store r to memory until we exit 3301 __ addptr(pos, AESBlockSize); 3302 __ subptr(len_reg, AESBlockSize); 3303 __ jcc(Assembler::notEqual, L_loopTop_128); 3304 3305 __ BIND(L_exit); 3306 __ movdqu(Address(rvec, 0), xmm_result); // final value of r stored in rvec of CipherBlockChaining object 3307 3308 #ifdef _WIN64 3309 __ movl(rax, len_mem); 3310 #else 3311 __ pop(rax); // return length 3312 #endif 3313 __ leave(); // required for proper stackwalking of RuntimeStub frame 3314 __ ret(0); 3315 3316 __ BIND(L_key_192_256); 3317 // here rax = len in ints of AESCrypt.KLE array (52=192, or 60=256) 3318 load_key(xmm_key11, key, 0xb0, xmm_key_shuf_mask); 3319 load_key(xmm_key12, key, 0xc0, xmm_key_shuf_mask); 3320 __ cmpl(rax, 52); 3321 __ jcc(Assembler::notEqual, L_key_256); 3322 3323 // 192-bit code follows here (could be changed to use more xmm registers) 3324 __ movptr(pos, 0); 3325 __ align(OptoLoopAlignment); 3326 3327 __ BIND(L_loopTop_192); 3328 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3329 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3330 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3331 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 11; rnum++) { 3332 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3333 } 3334 __ aesenclast(xmm_result, xmm_key12); 3335 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3336 // no need to store r to memory until we exit 3337 __ addptr(pos, AESBlockSize); 3338 __ subptr(len_reg, AESBlockSize); 3339 __ jcc(Assembler::notEqual, L_loopTop_192); 3340 __ jmp(L_exit); 3341 3342 __ BIND(L_key_256); 3343 // 256-bit code follows here (could be changed to use more xmm registers) 3344 load_key(xmm_key13, key, 0xd0, xmm_key_shuf_mask); 3345 __ movptr(pos, 0); 3346 __ align(OptoLoopAlignment); 3347 3348 __ BIND(L_loopTop_256); 3349 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3350 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3351 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3352 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 13; rnum++) { 3353 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3354 } 3355 load_key(xmm_temp, key, 0xe0); 3356 __ aesenclast(xmm_result, xmm_temp); 3357 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3358 // no need to store r to memory until we exit 3359 __ addptr(pos, AESBlockSize); 3360 __ subptr(len_reg, AESBlockSize); 3361 __ jcc(Assembler::notEqual, L_loopTop_256); 3362 __ jmp(L_exit); 3363 3364 return start; 3365 } 3366 3367 // Safefetch stubs. 3368 void generate_safefetch(const char* name, int size, address* entry, 3369 address* fault_pc, address* continuation_pc) { 3370 // safefetch signatures: 3371 // int SafeFetch32(int* adr, int errValue); 3372 // intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue); 3373 // 3374 // arguments: 3375 // c_rarg0 = adr 3376 // c_rarg1 = errValue 3377 // 3378 // result: 3379 // PPC_RET = *adr or errValue 3380 3381 StubCodeMark mark(this, "StubRoutines", name); 3382 3383 // Entry point, pc or function descriptor. 3384 *entry = __ pc(); 3385 3386 // Load *adr into c_rarg1, may fault. 3387 *fault_pc = __ pc(); 3388 switch (size) { 3389 case 4: 3390 // int32_t 3391 __ movl(c_rarg1, Address(c_rarg0, 0)); 3392 break; 3393 case 8: 3394 // int64_t 3395 __ movq(c_rarg1, Address(c_rarg0, 0)); 3396 break; 3397 default: 3398 ShouldNotReachHere(); 3399 } 3400 3401 // return errValue or *adr 3402 *continuation_pc = __ pc(); 3403 __ movq(rax, c_rarg1); 3404 __ ret(0); 3405 } 3406 3407 // This is a version of CBC/AES Decrypt which does 4 blocks in a loop at a time 3408 // to hide instruction latency 3409 // 3410 // Arguments: 3411 // 3412 // Inputs: 3413 // c_rarg0 - source byte array address 3414 // c_rarg1 - destination byte array address 3415 // c_rarg2 - K (key) in little endian int array 3416 // c_rarg3 - r vector byte array address 3417 // c_rarg4 - input length 3418 // 3419 // Output: 3420 // rax - input length 3421 // 3422 address generate_cipherBlockChaining_decryptAESCrypt_Parallel() { 3423 assert(UseAES, "need AES instructions and misaligned SSE support"); 3424 __ align(CodeEntryAlignment); 3425 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt"); 3426 address start = __ pc(); 3427 3428 const Register from = c_rarg0; // source array address 3429 const Register to = c_rarg1; // destination array address 3430 const Register key = c_rarg2; // key array address 3431 const Register rvec = c_rarg3; // r byte array initialized from initvector array address 3432 // and left with the results of the last encryption block 3433 #ifndef _WIN64 3434 const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16) 3435 #else 3436 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 3437 const Register len_reg = r11; // pick the volatile windows register 3438 #endif 3439 const Register pos = rax; 3440 3441 const int PARALLEL_FACTOR = 4; 3442 const int ROUNDS[3] = { 10, 12, 14 }; // aes rounds for key128, key192, key256 3443 3444 Label L_exit; 3445 Label L_singleBlock_loopTopHead[3]; // 128, 192, 256 3446 Label L_singleBlock_loopTopHead2[3]; // 128, 192, 256 3447 Label L_singleBlock_loopTop[3]; // 128, 192, 256 3448 Label L_multiBlock_loopTopHead[3]; // 128, 192, 256 3449 Label L_multiBlock_loopTop[3]; // 128, 192, 256 3450 3451 // keys 0-10 preloaded into xmm5-xmm15 3452 const int XMM_REG_NUM_KEY_FIRST = 5; 3453 const int XMM_REG_NUM_KEY_LAST = 15; 3454 const XMMRegister xmm_key_first = as_XMMRegister(XMM_REG_NUM_KEY_FIRST); 3455 const XMMRegister xmm_key_last = as_XMMRegister(XMM_REG_NUM_KEY_LAST); 3456 3457 __ enter(); // required for proper stackwalking of RuntimeStub frame 3458 3459 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 3460 // context for the registers used, where all instructions below are using 128-bit mode 3461 // On EVEX without VL and BW, these instructions will all be AVX. 3462 if (VM_Version::supports_avx512vlbw()) { 3463 __ movl(rax, 0xffff); 3464 __ kmovql(k1, rax); 3465 } 3466 3467 #ifdef _WIN64 3468 // on win64, fill len_reg from stack position 3469 __ movl(len_reg, len_mem); 3470 #else 3471 __ push(len_reg); // Save 3472 #endif 3473 __ push(rbx); 3474 // the java expanded key ordering is rotated one position from what we want 3475 // so we start from 0x10 here and hit 0x00 last 3476 const XMMRegister xmm_key_shuf_mask = xmm1; // used temporarily to swap key bytes up front 3477 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3478 // load up xmm regs 5 thru 15 with key 0x10 - 0xa0 - 0x00 3479 for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x10; rnum < XMM_REG_NUM_KEY_LAST; rnum++) { 3480 load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask); 3481 offset += 0x10; 3482 } 3483 load_key(xmm_key_last, key, 0x00, xmm_key_shuf_mask); 3484 3485 const XMMRegister xmm_prev_block_cipher = xmm1; // holds cipher of previous block 3486 3487 // registers holding the four results in the parallelized loop 3488 const XMMRegister xmm_result0 = xmm0; 3489 const XMMRegister xmm_result1 = xmm2; 3490 const XMMRegister xmm_result2 = xmm3; 3491 const XMMRegister xmm_result3 = xmm4; 3492 3493 __ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // initialize with initial rvec 3494 3495 __ xorptr(pos, pos); 3496 3497 // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256)) 3498 __ movl(rbx, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3499 __ cmpl(rbx, 52); 3500 __ jcc(Assembler::equal, L_multiBlock_loopTopHead[1]); 3501 __ cmpl(rbx, 60); 3502 __ jcc(Assembler::equal, L_multiBlock_loopTopHead[2]); 3503 3504 #define DoFour(opc, src_reg) \ 3505 __ opc(xmm_result0, src_reg); \ 3506 __ opc(xmm_result1, src_reg); \ 3507 __ opc(xmm_result2, src_reg); \ 3508 __ opc(xmm_result3, src_reg); \ 3509 3510 for (int k = 0; k < 3; ++k) { 3511 __ BIND(L_multiBlock_loopTopHead[k]); 3512 if (k != 0) { 3513 __ cmpptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // see if at least 4 blocks left 3514 __ jcc(Assembler::less, L_singleBlock_loopTopHead2[k]); 3515 } 3516 if (k == 1) { 3517 __ subptr(rsp, 6 * wordSize); 3518 __ movdqu(Address(rsp, 0), xmm15); //save last_key from xmm15 3519 load_key(xmm15, key, 0xb0); // 0xb0; 192-bit key goes up to 0xc0 3520 __ movdqu(Address(rsp, 2 * wordSize), xmm15); 3521 load_key(xmm1, key, 0xc0); // 0xc0; 3522 __ movdqu(Address(rsp, 4 * wordSize), xmm1); 3523 } else if (k == 2) { 3524 __ subptr(rsp, 10 * wordSize); 3525 __ movdqu(Address(rsp, 0), xmm15); //save last_key from xmm15 3526 load_key(xmm15, key, 0xd0); // 0xd0; 256-bit key goes upto 0xe0 3527 __ movdqu(Address(rsp, 6 * wordSize), xmm15); 3528 load_key(xmm1, key, 0xe0); // 0xe0; 3529 __ movdqu(Address(rsp, 8 * wordSize), xmm1); 3530 load_key(xmm15, key, 0xb0); // 0xb0; 3531 __ movdqu(Address(rsp, 2 * wordSize), xmm15); 3532 load_key(xmm1, key, 0xc0); // 0xc0; 3533 __ movdqu(Address(rsp, 4 * wordSize), xmm1); 3534 } 3535 __ align(OptoLoopAlignment); 3536 __ BIND(L_multiBlock_loopTop[k]); 3537 __ cmpptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // see if at least 4 blocks left 3538 __ jcc(Assembler::less, L_singleBlock_loopTopHead[k]); 3539 3540 if (k != 0) { 3541 __ movdqu(xmm15, Address(rsp, 2 * wordSize)); 3542 __ movdqu(xmm1, Address(rsp, 4 * wordSize)); 3543 } 3544 3545 __ movdqu(xmm_result0, Address(from, pos, Address::times_1, 0 * AESBlockSize)); // get next 4 blocks into xmmresult registers 3546 __ movdqu(xmm_result1, Address(from, pos, Address::times_1, 1 * AESBlockSize)); 3547 __ movdqu(xmm_result2, Address(from, pos, Address::times_1, 2 * AESBlockSize)); 3548 __ movdqu(xmm_result3, Address(from, pos, Address::times_1, 3 * AESBlockSize)); 3549 3550 DoFour(pxor, xmm_key_first); 3551 if (k == 0) { 3552 for (int rnum = 1; rnum < ROUNDS[k]; rnum++) { 3553 DoFour(aesdec, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3554 } 3555 DoFour(aesdeclast, xmm_key_last); 3556 } else if (k == 1) { 3557 for (int rnum = 1; rnum <= ROUNDS[k]-2; rnum++) { 3558 DoFour(aesdec, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3559 } 3560 __ movdqu(xmm_key_last, Address(rsp, 0)); // xmm15 needs to be loaded again. 3561 DoFour(aesdec, xmm1); // key : 0xc0 3562 __ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // xmm1 needs to be loaded again 3563 DoFour(aesdeclast, xmm_key_last); 3564 } else if (k == 2) { 3565 for (int rnum = 1; rnum <= ROUNDS[k] - 4; rnum++) { 3566 DoFour(aesdec, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3567 } 3568 DoFour(aesdec, xmm1); // key : 0xc0 3569 __ movdqu(xmm15, Address(rsp, 6 * wordSize)); 3570 __ movdqu(xmm1, Address(rsp, 8 * wordSize)); 3571 DoFour(aesdec, xmm15); // key : 0xd0 3572 __ movdqu(xmm_key_last, Address(rsp, 0)); // xmm15 needs to be loaded again. 3573 DoFour(aesdec, xmm1); // key : 0xe0 3574 __ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // xmm1 needs to be loaded again 3575 DoFour(aesdeclast, xmm_key_last); 3576 } 3577 3578 // for each result, xor with the r vector of previous cipher block 3579 __ pxor(xmm_result0, xmm_prev_block_cipher); 3580 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 0 * AESBlockSize)); 3581 __ pxor(xmm_result1, xmm_prev_block_cipher); 3582 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 1 * AESBlockSize)); 3583 __ pxor(xmm_result2, xmm_prev_block_cipher); 3584 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 2 * AESBlockSize)); 3585 __ pxor(xmm_result3, xmm_prev_block_cipher); 3586 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 3 * AESBlockSize)); // this will carry over to next set of blocks 3587 if (k != 0) { 3588 __ movdqu(Address(rvec, 0x00), xmm_prev_block_cipher); 3589 } 3590 3591 __ movdqu(Address(to, pos, Address::times_1, 0 * AESBlockSize), xmm_result0); // store 4 results into the next 64 bytes of output 3592 __ movdqu(Address(to, pos, Address::times_1, 1 * AESBlockSize), xmm_result1); 3593 __ movdqu(Address(to, pos, Address::times_1, 2 * AESBlockSize), xmm_result2); 3594 __ movdqu(Address(to, pos, Address::times_1, 3 * AESBlockSize), xmm_result3); 3595 3596 __ addptr(pos, PARALLEL_FACTOR * AESBlockSize); 3597 __ subptr(len_reg, PARALLEL_FACTOR * AESBlockSize); 3598 __ jmp(L_multiBlock_loopTop[k]); 3599 3600 // registers used in the non-parallelized loops 3601 // xmm register assignments for the loops below 3602 const XMMRegister xmm_result = xmm0; 3603 const XMMRegister xmm_prev_block_cipher_save = xmm2; 3604 const XMMRegister xmm_key11 = xmm3; 3605 const XMMRegister xmm_key12 = xmm4; 3606 const XMMRegister key_tmp = xmm4; 3607 3608 __ BIND(L_singleBlock_loopTopHead[k]); 3609 if (k == 1) { 3610 __ addptr(rsp, 6 * wordSize); 3611 } else if (k == 2) { 3612 __ addptr(rsp, 10 * wordSize); 3613 } 3614 __ cmpptr(len_reg, 0); // any blocks left?? 3615 __ jcc(Assembler::equal, L_exit); 3616 __ BIND(L_singleBlock_loopTopHead2[k]); 3617 if (k == 1) { 3618 load_key(xmm_key11, key, 0xb0); // 0xb0; 192-bit key goes upto 0xc0 3619 load_key(xmm_key12, key, 0xc0); // 0xc0; 192-bit key goes upto 0xc0 3620 } 3621 if (k == 2) { 3622 load_key(xmm_key11, key, 0xb0); // 0xb0; 256-bit key goes upto 0xe0 3623 } 3624 __ align(OptoLoopAlignment); 3625 __ BIND(L_singleBlock_loopTop[k]); 3626 __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of cipher input 3627 __ movdqa(xmm_prev_block_cipher_save, xmm_result); // save for next r vector 3628 __ pxor(xmm_result, xmm_key_first); // do the aes dec rounds 3629 for (int rnum = 1; rnum <= 9 ; rnum++) { 3630 __ aesdec(xmm_result, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3631 } 3632 if (k == 1) { 3633 __ aesdec(xmm_result, xmm_key11); 3634 __ aesdec(xmm_result, xmm_key12); 3635 } 3636 if (k == 2) { 3637 __ aesdec(xmm_result, xmm_key11); 3638 load_key(key_tmp, key, 0xc0); 3639 __ aesdec(xmm_result, key_tmp); 3640 load_key(key_tmp, key, 0xd0); 3641 __ aesdec(xmm_result, key_tmp); 3642 load_key(key_tmp, key, 0xe0); 3643 __ aesdec(xmm_result, key_tmp); 3644 } 3645 3646 __ aesdeclast(xmm_result, xmm_key_last); // xmm15 always came from key+0 3647 __ pxor(xmm_result, xmm_prev_block_cipher); // xor with the current r vector 3648 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3649 // no need to store r to memory until we exit 3650 __ movdqa(xmm_prev_block_cipher, xmm_prev_block_cipher_save); // set up next r vector with cipher input from this block 3651 __ addptr(pos, AESBlockSize); 3652 __ subptr(len_reg, AESBlockSize); 3653 __ jcc(Assembler::notEqual, L_singleBlock_loopTop[k]); 3654 if (k != 2) { 3655 __ jmp(L_exit); 3656 } 3657 } //for 128/192/256 3658 3659 __ BIND(L_exit); 3660 __ movdqu(Address(rvec, 0), xmm_prev_block_cipher); // final value of r stored in rvec of CipherBlockChaining object 3661 __ pop(rbx); 3662 #ifdef _WIN64 3663 __ movl(rax, len_mem); 3664 #else 3665 __ pop(rax); // return length 3666 #endif 3667 __ leave(); // required for proper stackwalking of RuntimeStub frame 3668 __ ret(0); 3669 return start; 3670 } 3671 3672 address generate_upper_word_mask() { 3673 __ align(64); 3674 StubCodeMark mark(this, "StubRoutines", "upper_word_mask"); 3675 address start = __ pc(); 3676 __ emit_data64(0x0000000000000000, relocInfo::none); 3677 __ emit_data64(0xFFFFFFFF00000000, relocInfo::none); 3678 return start; 3679 } 3680 3681 address generate_shuffle_byte_flip_mask() { 3682 __ align(64); 3683 StubCodeMark mark(this, "StubRoutines", "shuffle_byte_flip_mask"); 3684 address start = __ pc(); 3685 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none); 3686 __ emit_data64(0x0001020304050607, relocInfo::none); 3687 return start; 3688 } 3689 3690 // ofs and limit are use for multi-block byte array. 3691 // int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit) 3692 address generate_sha1_implCompress(bool multi_block, const char *name) { 3693 __ align(CodeEntryAlignment); 3694 StubCodeMark mark(this, "StubRoutines", name); 3695 address start = __ pc(); 3696 3697 Register buf = c_rarg0; 3698 Register state = c_rarg1; 3699 Register ofs = c_rarg2; 3700 Register limit = c_rarg3; 3701 3702 const XMMRegister abcd = xmm0; 3703 const XMMRegister e0 = xmm1; 3704 const XMMRegister e1 = xmm2; 3705 const XMMRegister msg0 = xmm3; 3706 3707 const XMMRegister msg1 = xmm4; 3708 const XMMRegister msg2 = xmm5; 3709 const XMMRegister msg3 = xmm6; 3710 const XMMRegister shuf_mask = xmm7; 3711 3712 __ enter(); 3713 3714 __ subptr(rsp, 4 * wordSize); 3715 3716 __ fast_sha1(abcd, e0, e1, msg0, msg1, msg2, msg3, shuf_mask, 3717 buf, state, ofs, limit, rsp, multi_block); 3718 3719 __ addptr(rsp, 4 * wordSize); 3720 3721 __ leave(); 3722 __ ret(0); 3723 return start; 3724 } 3725 3726 address generate_pshuffle_byte_flip_mask() { 3727 __ align(64); 3728 StubCodeMark mark(this, "StubRoutines", "pshuffle_byte_flip_mask"); 3729 address start = __ pc(); 3730 __ emit_data64(0x0405060700010203, relocInfo::none); 3731 __ emit_data64(0x0c0d0e0f08090a0b, relocInfo::none); 3732 3733 if (VM_Version::supports_avx2()) { 3734 __ emit_data64(0x0405060700010203, relocInfo::none); // second copy 3735 __ emit_data64(0x0c0d0e0f08090a0b, relocInfo::none); 3736 // _SHUF_00BA 3737 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3738 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3739 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3740 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3741 // _SHUF_DC00 3742 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3743 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3744 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3745 __ emit_data64(0x0b0a090803020100, relocInfo::none); 3746 } 3747 3748 return start; 3749 } 3750 3751 //Mask for byte-swapping a couple of qwords in an XMM register using (v)pshufb. 3752 address generate_pshuffle_byte_flip_mask_sha512() { 3753 __ align(32); 3754 StubCodeMark mark(this, "StubRoutines", "pshuffle_byte_flip_mask_sha512"); 3755 address start = __ pc(); 3756 if (VM_Version::supports_avx2()) { 3757 __ emit_data64(0x0001020304050607, relocInfo::none); // PSHUFFLE_BYTE_FLIP_MASK 3758 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none); 3759 __ emit_data64(0x1011121314151617, relocInfo::none); 3760 __ emit_data64(0x18191a1b1c1d1e1f, relocInfo::none); 3761 __ emit_data64(0x0000000000000000, relocInfo::none); //MASK_YMM_LO 3762 __ emit_data64(0x0000000000000000, relocInfo::none); 3763 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3764 __ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none); 3765 } 3766 3767 return start; 3768 } 3769 3770 // ofs and limit are use for multi-block byte array. 3771 // int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit) 3772 address generate_sha256_implCompress(bool multi_block, const char *name) { 3773 assert(VM_Version::supports_sha() || VM_Version::supports_avx2(), ""); 3774 __ align(CodeEntryAlignment); 3775 StubCodeMark mark(this, "StubRoutines", name); 3776 address start = __ pc(); 3777 3778 Register buf = c_rarg0; 3779 Register state = c_rarg1; 3780 Register ofs = c_rarg2; 3781 Register limit = c_rarg3; 3782 3783 const XMMRegister msg = xmm0; 3784 const XMMRegister state0 = xmm1; 3785 const XMMRegister state1 = xmm2; 3786 const XMMRegister msgtmp0 = xmm3; 3787 3788 const XMMRegister msgtmp1 = xmm4; 3789 const XMMRegister msgtmp2 = xmm5; 3790 const XMMRegister msgtmp3 = xmm6; 3791 const XMMRegister msgtmp4 = xmm7; 3792 3793 const XMMRegister shuf_mask = xmm8; 3794 3795 __ enter(); 3796 3797 __ subptr(rsp, 4 * wordSize); 3798 3799 if (VM_Version::supports_sha()) { 3800 __ fast_sha256(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4, 3801 buf, state, ofs, limit, rsp, multi_block, shuf_mask); 3802 } else if (VM_Version::supports_avx2()) { 3803 __ sha256_AVX2(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4, 3804 buf, state, ofs, limit, rsp, multi_block, shuf_mask); 3805 } 3806 __ addptr(rsp, 4 * wordSize); 3807 __ vzeroupper(); 3808 __ leave(); 3809 __ ret(0); 3810 return start; 3811 } 3812 3813 address generate_sha512_implCompress(bool multi_block, const char *name) { 3814 assert(VM_Version::supports_avx2(), ""); 3815 assert(VM_Version::supports_bmi2(), ""); 3816 __ align(CodeEntryAlignment); 3817 StubCodeMark mark(this, "StubRoutines", name); 3818 address start = __ pc(); 3819 3820 Register buf = c_rarg0; 3821 Register state = c_rarg1; 3822 Register ofs = c_rarg2; 3823 Register limit = c_rarg3; 3824 3825 const XMMRegister msg = xmm0; 3826 const XMMRegister state0 = xmm1; 3827 const XMMRegister state1 = xmm2; 3828 const XMMRegister msgtmp0 = xmm3; 3829 const XMMRegister msgtmp1 = xmm4; 3830 const XMMRegister msgtmp2 = xmm5; 3831 const XMMRegister msgtmp3 = xmm6; 3832 const XMMRegister msgtmp4 = xmm7; 3833 3834 const XMMRegister shuf_mask = xmm8; 3835 3836 __ enter(); 3837 3838 __ sha512_AVX2(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4, 3839 buf, state, ofs, limit, rsp, multi_block, shuf_mask); 3840 3841 __ vzeroupper(); 3842 __ leave(); 3843 __ ret(0); 3844 return start; 3845 } 3846 3847 // This is a version of CTR/AES crypt which does 6 blocks in a loop at a time 3848 // to hide instruction latency 3849 // 3850 // Arguments: 3851 // 3852 // Inputs: 3853 // c_rarg0 - source byte array address 3854 // c_rarg1 - destination byte array address 3855 // c_rarg2 - K (key) in little endian int array 3856 // c_rarg3 - counter vector byte array address 3857 // Linux 3858 // c_rarg4 - input length 3859 // c_rarg5 - saved encryptedCounter start 3860 // rbp + 6 * wordSize - saved used length 3861 // Windows 3862 // rbp + 6 * wordSize - input length 3863 // rbp + 7 * wordSize - saved encryptedCounter start 3864 // rbp + 8 * wordSize - saved used length 3865 // 3866 // Output: 3867 // rax - input length 3868 // 3869 address generate_counterMode_AESCrypt_Parallel() { 3870 assert(UseAES, "need AES instructions and misaligned SSE support"); 3871 __ align(CodeEntryAlignment); 3872 StubCodeMark mark(this, "StubRoutines", "counterMode_AESCrypt"); 3873 address start = __ pc(); 3874 const Register from = c_rarg0; // source array address 3875 const Register to = c_rarg1; // destination array address 3876 const Register key = c_rarg2; // key array address 3877 const Register counter = c_rarg3; // counter byte array initialized from counter array address 3878 // and updated with the incremented counter in the end 3879 #ifndef _WIN64 3880 const Register len_reg = c_rarg4; 3881 const Register saved_encCounter_start = c_rarg5; 3882 const Register used_addr = r10; 3883 const Address used_mem(rbp, 2 * wordSize); 3884 const Register used = r11; 3885 #else 3886 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 3887 const Address saved_encCounter_mem(rbp, 7 * wordSize); // length is on stack on Win64 3888 const Address used_mem(rbp, 8 * wordSize); // length is on stack on Win64 3889 const Register len_reg = r10; // pick the first volatile windows register 3890 const Register saved_encCounter_start = r11; 3891 const Register used_addr = r13; 3892 const Register used = r14; 3893 #endif 3894 const Register pos = rax; 3895 3896 const int PARALLEL_FACTOR = 6; 3897 const XMMRegister xmm_counter_shuf_mask = xmm0; 3898 const XMMRegister xmm_key_shuf_mask = xmm1; // used temporarily to swap key bytes up front 3899 const XMMRegister xmm_curr_counter = xmm2; 3900 3901 const XMMRegister xmm_key_tmp0 = xmm3; 3902 const XMMRegister xmm_key_tmp1 = xmm4; 3903 3904 // registers holding the four results in the parallelized loop 3905 const XMMRegister xmm_result0 = xmm5; 3906 const XMMRegister xmm_result1 = xmm6; 3907 const XMMRegister xmm_result2 = xmm7; 3908 const XMMRegister xmm_result3 = xmm8; 3909 const XMMRegister xmm_result4 = xmm9; 3910 const XMMRegister xmm_result5 = xmm10; 3911 3912 const XMMRegister xmm_from0 = xmm11; 3913 const XMMRegister xmm_from1 = xmm12; 3914 const XMMRegister xmm_from2 = xmm13; 3915 const XMMRegister xmm_from3 = xmm14; //the last one is xmm14. we have to preserve it on WIN64. 3916 const XMMRegister xmm_from4 = xmm3; //reuse xmm3~4. Because xmm_key_tmp0~1 are useless when loading input text 3917 const XMMRegister xmm_from5 = xmm4; 3918 3919 //for key_128, key_192, key_256 3920 const int rounds[3] = {10, 12, 14}; 3921 Label L_exit_preLoop, L_preLoop_start; 3922 Label L_multiBlock_loopTop[3]; 3923 Label L_singleBlockLoopTop[3]; 3924 Label L__incCounter[3][6]; //for 6 blocks 3925 Label L__incCounter_single[3]; //for single block, key128, key192, key256 3926 Label L_processTail_insr[3], L_processTail_4_insr[3], L_processTail_2_insr[3], L_processTail_1_insr[3], L_processTail_exit_insr[3]; 3927 Label L_processTail_extr[3], L_processTail_4_extr[3], L_processTail_2_extr[3], L_processTail_1_extr[3], L_processTail_exit_extr[3]; 3928 3929 Label L_exit; 3930 3931 __ enter(); // required for proper stackwalking of RuntimeStub frame 3932 3933 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 3934 // context for the registers used, where all instructions below are using 128-bit mode 3935 // On EVEX without VL and BW, these instructions will all be AVX. 3936 if (VM_Version::supports_avx512vlbw()) { 3937 __ movl(rax, 0xffff); 3938 __ kmovql(k1, rax); 3939 } 3940 3941 #ifdef _WIN64 3942 // allocate spill slots for r13, r14 3943 enum { 3944 saved_r13_offset, 3945 saved_r14_offset 3946 }; 3947 __ subptr(rsp, 2 * wordSize); 3948 __ movptr(Address(rsp, saved_r13_offset * wordSize), r13); 3949 __ movptr(Address(rsp, saved_r14_offset * wordSize), r14); 3950 3951 // on win64, fill len_reg from stack position 3952 __ movl(len_reg, len_mem); 3953 __ movptr(saved_encCounter_start, saved_encCounter_mem); 3954 __ movptr(used_addr, used_mem); 3955 __ movl(used, Address(used_addr, 0)); 3956 #else 3957 __ push(len_reg); // Save 3958 __ movptr(used_addr, used_mem); 3959 __ movl(used, Address(used_addr, 0)); 3960 #endif 3961 3962 __ push(rbx); // Save RBX 3963 __ movdqu(xmm_curr_counter, Address(counter, 0x00)); // initialize counter with initial counter 3964 __ movdqu(xmm_counter_shuf_mask, ExternalAddress(StubRoutines::x86::counter_shuffle_mask_addr()), pos); // pos as scratch 3965 __ pshufb(xmm_curr_counter, xmm_counter_shuf_mask); //counter is shuffled 3966 __ movptr(pos, 0); 3967 3968 // Use the partially used encrpyted counter from last invocation 3969 __ BIND(L_preLoop_start); 3970 __ cmpptr(used, 16); 3971 __ jcc(Assembler::aboveEqual, L_exit_preLoop); 3972 __ cmpptr(len_reg, 0); 3973 __ jcc(Assembler::lessEqual, L_exit_preLoop); 3974 __ movb(rbx, Address(saved_encCounter_start, used)); 3975 __ xorb(rbx, Address(from, pos)); 3976 __ movb(Address(to, pos), rbx); 3977 __ addptr(pos, 1); 3978 __ addptr(used, 1); 3979 __ subptr(len_reg, 1); 3980 3981 __ jmp(L_preLoop_start); 3982 3983 __ BIND(L_exit_preLoop); 3984 __ movl(Address(used_addr, 0), used); 3985 3986 // key length could be only {11, 13, 15} * 4 = {44, 52, 60} 3987 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()), rbx); // rbx as scratch 3988 __ movl(rbx, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3989 __ cmpl(rbx, 52); 3990 __ jcc(Assembler::equal, L_multiBlock_loopTop[1]); 3991 __ cmpl(rbx, 60); 3992 __ jcc(Assembler::equal, L_multiBlock_loopTop[2]); 3993 3994 #define CTR_DoSix(opc, src_reg) \ 3995 __ opc(xmm_result0, src_reg); \ 3996 __ opc(xmm_result1, src_reg); \ 3997 __ opc(xmm_result2, src_reg); \ 3998 __ opc(xmm_result3, src_reg); \ 3999 __ opc(xmm_result4, src_reg); \ 4000 __ opc(xmm_result5, src_reg); 4001 4002 // k == 0 : generate code for key_128 4003 // k == 1 : generate code for key_192 4004 // k == 2 : generate code for key_256 4005 for (int k = 0; k < 3; ++k) { 4006 //multi blocks starts here 4007 __ align(OptoLoopAlignment); 4008 __ BIND(L_multiBlock_loopTop[k]); 4009 __ cmpptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // see if at least PARALLEL_FACTOR blocks left 4010 __ jcc(Assembler::less, L_singleBlockLoopTop[k]); 4011 load_key(xmm_key_tmp0, key, 0x00, xmm_key_shuf_mask); 4012 4013 //load, then increase counters 4014 CTR_DoSix(movdqa, xmm_curr_counter); 4015 inc_counter(rbx, xmm_result1, 0x01, L__incCounter[k][0]); 4016 inc_counter(rbx, xmm_result2, 0x02, L__incCounter[k][1]); 4017 inc_counter(rbx, xmm_result3, 0x03, L__incCounter[k][2]); 4018 inc_counter(rbx, xmm_result4, 0x04, L__incCounter[k][3]); 4019 inc_counter(rbx, xmm_result5, 0x05, L__incCounter[k][4]); 4020 inc_counter(rbx, xmm_curr_counter, 0x06, L__incCounter[k][5]); 4021 CTR_DoSix(pshufb, xmm_counter_shuf_mask); // after increased, shuffled counters back for PXOR 4022 CTR_DoSix(pxor, xmm_key_tmp0); //PXOR with Round 0 key 4023 4024 //load two ROUND_KEYs at a time 4025 for (int i = 1; i < rounds[k]; ) { 4026 load_key(xmm_key_tmp1, key, (0x10 * i), xmm_key_shuf_mask); 4027 load_key(xmm_key_tmp0, key, (0x10 * (i+1)), xmm_key_shuf_mask); 4028 CTR_DoSix(aesenc, xmm_key_tmp1); 4029 i++; 4030 if (i != rounds[k]) { 4031 CTR_DoSix(aesenc, xmm_key_tmp0); 4032 } else { 4033 CTR_DoSix(aesenclast, xmm_key_tmp0); 4034 } 4035 i++; 4036 } 4037 4038 // get next PARALLEL_FACTOR blocks into xmm_result registers 4039 __ movdqu(xmm_from0, Address(from, pos, Address::times_1, 0 * AESBlockSize)); 4040 __ movdqu(xmm_from1, Address(from, pos, Address::times_1, 1 * AESBlockSize)); 4041 __ movdqu(xmm_from2, Address(from, pos, Address::times_1, 2 * AESBlockSize)); 4042 __ movdqu(xmm_from3, Address(from, pos, Address::times_1, 3 * AESBlockSize)); 4043 __ movdqu(xmm_from4, Address(from, pos, Address::times_1, 4 * AESBlockSize)); 4044 __ movdqu(xmm_from5, Address(from, pos, Address::times_1, 5 * AESBlockSize)); 4045 4046 __ pxor(xmm_result0, xmm_from0); 4047 __ pxor(xmm_result1, xmm_from1); 4048 __ pxor(xmm_result2, xmm_from2); 4049 __ pxor(xmm_result3, xmm_from3); 4050 __ pxor(xmm_result4, xmm_from4); 4051 __ pxor(xmm_result5, xmm_from5); 4052 4053 // store 6 results into the next 64 bytes of output 4054 __ movdqu(Address(to, pos, Address::times_1, 0 * AESBlockSize), xmm_result0); 4055 __ movdqu(Address(to, pos, Address::times_1, 1 * AESBlockSize), xmm_result1); 4056 __ movdqu(Address(to, pos, Address::times_1, 2 * AESBlockSize), xmm_result2); 4057 __ movdqu(Address(to, pos, Address::times_1, 3 * AESBlockSize), xmm_result3); 4058 __ movdqu(Address(to, pos, Address::times_1, 4 * AESBlockSize), xmm_result4); 4059 __ movdqu(Address(to, pos, Address::times_1, 5 * AESBlockSize), xmm_result5); 4060 4061 __ addptr(pos, PARALLEL_FACTOR * AESBlockSize); // increase the length of crypt text 4062 __ subptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // decrease the remaining length 4063 __ jmp(L_multiBlock_loopTop[k]); 4064 4065 // singleBlock starts here 4066 __ align(OptoLoopAlignment); 4067 __ BIND(L_singleBlockLoopTop[k]); 4068 __ cmpptr(len_reg, 0); 4069 __ jcc(Assembler::lessEqual, L_exit); 4070 load_key(xmm_key_tmp0, key, 0x00, xmm_key_shuf_mask); 4071 __ movdqa(xmm_result0, xmm_curr_counter); 4072 inc_counter(rbx, xmm_curr_counter, 0x01, L__incCounter_single[k]); 4073 __ pshufb(xmm_result0, xmm_counter_shuf_mask); 4074 __ pxor(xmm_result0, xmm_key_tmp0); 4075 for (int i = 1; i < rounds[k]; i++) { 4076 load_key(xmm_key_tmp0, key, (0x10 * i), xmm_key_shuf_mask); 4077 __ aesenc(xmm_result0, xmm_key_tmp0); 4078 } 4079 load_key(xmm_key_tmp0, key, (rounds[k] * 0x10), xmm_key_shuf_mask); 4080 __ aesenclast(xmm_result0, xmm_key_tmp0); 4081 __ cmpptr(len_reg, AESBlockSize); 4082 __ jcc(Assembler::less, L_processTail_insr[k]); 4083 __ movdqu(xmm_from0, Address(from, pos, Address::times_1, 0 * AESBlockSize)); 4084 __ pxor(xmm_result0, xmm_from0); 4085 __ movdqu(Address(to, pos, Address::times_1, 0 * AESBlockSize), xmm_result0); 4086 __ addptr(pos, AESBlockSize); 4087 __ subptr(len_reg, AESBlockSize); 4088 __ jmp(L_singleBlockLoopTop[k]); 4089 __ BIND(L_processTail_insr[k]); // Process the tail part of the input array 4090 __ addptr(pos, len_reg); // 1. Insert bytes from src array into xmm_from0 register 4091 __ testptr(len_reg, 8); 4092 __ jcc(Assembler::zero, L_processTail_4_insr[k]); 4093 __ subptr(pos,8); 4094 __ pinsrq(xmm_from0, Address(from, pos), 0); 4095 __ BIND(L_processTail_4_insr[k]); 4096 __ testptr(len_reg, 4); 4097 __ jcc(Assembler::zero, L_processTail_2_insr[k]); 4098 __ subptr(pos,4); 4099 __ pslldq(xmm_from0, 4); 4100 __ pinsrd(xmm_from0, Address(from, pos), 0); 4101 __ BIND(L_processTail_2_insr[k]); 4102 __ testptr(len_reg, 2); 4103 __ jcc(Assembler::zero, L_processTail_1_insr[k]); 4104 __ subptr(pos, 2); 4105 __ pslldq(xmm_from0, 2); 4106 __ pinsrw(xmm_from0, Address(from, pos), 0); 4107 __ BIND(L_processTail_1_insr[k]); 4108 __ testptr(len_reg, 1); 4109 __ jcc(Assembler::zero, L_processTail_exit_insr[k]); 4110 __ subptr(pos, 1); 4111 __ pslldq(xmm_from0, 1); 4112 __ pinsrb(xmm_from0, Address(from, pos), 0); 4113 __ BIND(L_processTail_exit_insr[k]); 4114 4115 __ movdqu(Address(saved_encCounter_start, 0), xmm_result0); // 2. Perform pxor of the encrypted counter and plaintext Bytes. 4116 __ pxor(xmm_result0, xmm_from0); // Also the encrypted counter is saved for next invocation. 4117 4118 __ testptr(len_reg, 8); 4119 __ jcc(Assembler::zero, L_processTail_4_extr[k]); // 3. Extract bytes from xmm_result0 into the dest. array 4120 __ pextrq(Address(to, pos), xmm_result0, 0); 4121 __ psrldq(xmm_result0, 8); 4122 __ addptr(pos, 8); 4123 __ BIND(L_processTail_4_extr[k]); 4124 __ testptr(len_reg, 4); 4125 __ jcc(Assembler::zero, L_processTail_2_extr[k]); 4126 __ pextrd(Address(to, pos), xmm_result0, 0); 4127 __ psrldq(xmm_result0, 4); 4128 __ addptr(pos, 4); 4129 __ BIND(L_processTail_2_extr[k]); 4130 __ testptr(len_reg, 2); 4131 __ jcc(Assembler::zero, L_processTail_1_extr[k]); 4132 __ pextrw(Address(to, pos), xmm_result0, 0); 4133 __ psrldq(xmm_result0, 2); 4134 __ addptr(pos, 2); 4135 __ BIND(L_processTail_1_extr[k]); 4136 __ testptr(len_reg, 1); 4137 __ jcc(Assembler::zero, L_processTail_exit_extr[k]); 4138 __ pextrb(Address(to, pos), xmm_result0, 0); 4139 4140 __ BIND(L_processTail_exit_extr[k]); 4141 __ movl(Address(used_addr, 0), len_reg); 4142 __ jmp(L_exit); 4143 4144 } 4145 4146 __ BIND(L_exit); 4147 __ pshufb(xmm_curr_counter, xmm_counter_shuf_mask); //counter is shuffled back. 4148 __ movdqu(Address(counter, 0), xmm_curr_counter); //save counter back 4149 __ pop(rbx); // pop the saved RBX. 4150 #ifdef _WIN64 4151 __ movl(rax, len_mem); 4152 __ movptr(r13, Address(rsp, saved_r13_offset * wordSize)); 4153 __ movptr(r14, Address(rsp, saved_r14_offset * wordSize)); 4154 __ addptr(rsp, 2 * wordSize); 4155 #else 4156 __ pop(rax); // return 'len' 4157 #endif 4158 __ leave(); // required for proper stackwalking of RuntimeStub frame 4159 __ ret(0); 4160 return start; 4161 } 4162 4163 // byte swap x86 long 4164 address generate_ghash_long_swap_mask() { 4165 __ align(CodeEntryAlignment); 4166 StubCodeMark mark(this, "StubRoutines", "ghash_long_swap_mask"); 4167 address start = __ pc(); 4168 __ emit_data64(0x0f0e0d0c0b0a0908, relocInfo::none ); 4169 __ emit_data64(0x0706050403020100, relocInfo::none ); 4170 return start; 4171 } 4172 4173 // byte swap x86 byte array 4174 address generate_ghash_byte_swap_mask() { 4175 __ align(CodeEntryAlignment); 4176 StubCodeMark mark(this, "StubRoutines", "ghash_byte_swap_mask"); 4177 address start = __ pc(); 4178 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none ); 4179 __ emit_data64(0x0001020304050607, relocInfo::none ); 4180 return start; 4181 } 4182 4183 /* Single and multi-block ghash operations */ 4184 address generate_ghash_processBlocks() { 4185 __ align(CodeEntryAlignment); 4186 Label L_ghash_loop, L_exit; 4187 StubCodeMark mark(this, "StubRoutines", "ghash_processBlocks"); 4188 address start = __ pc(); 4189 4190 const Register state = c_rarg0; 4191 const Register subkeyH = c_rarg1; 4192 const Register data = c_rarg2; 4193 const Register blocks = c_rarg3; 4194 4195 const XMMRegister xmm_temp0 = xmm0; 4196 const XMMRegister xmm_temp1 = xmm1; 4197 const XMMRegister xmm_temp2 = xmm2; 4198 const XMMRegister xmm_temp3 = xmm3; 4199 const XMMRegister xmm_temp4 = xmm4; 4200 const XMMRegister xmm_temp5 = xmm5; 4201 const XMMRegister xmm_temp6 = xmm6; 4202 const XMMRegister xmm_temp7 = xmm7; 4203 const XMMRegister xmm_temp8 = xmm8; 4204 const XMMRegister xmm_temp9 = xmm9; 4205 const XMMRegister xmm_temp10 = xmm10; 4206 4207 __ enter(); 4208 4209 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 4210 // context for the registers used, where all instructions below are using 128-bit mode 4211 // On EVEX without VL and BW, these instructions will all be AVX. 4212 if (VM_Version::supports_avx512vlbw()) { 4213 __ movl(rax, 0xffff); 4214 __ kmovql(k1, rax); 4215 } 4216 4217 __ movdqu(xmm_temp10, ExternalAddress(StubRoutines::x86::ghash_long_swap_mask_addr())); 4218 4219 __ movdqu(xmm_temp0, Address(state, 0)); 4220 __ pshufb(xmm_temp0, xmm_temp10); 4221 4222 4223 __ BIND(L_ghash_loop); 4224 __ movdqu(xmm_temp2, Address(data, 0)); 4225 __ pshufb(xmm_temp2, ExternalAddress(StubRoutines::x86::ghash_byte_swap_mask_addr())); 4226 4227 __ movdqu(xmm_temp1, Address(subkeyH, 0)); 4228 __ pshufb(xmm_temp1, xmm_temp10); 4229 4230 __ pxor(xmm_temp0, xmm_temp2); 4231 4232 // 4233 // Multiply with the hash key 4234 // 4235 __ movdqu(xmm_temp3, xmm_temp0); 4236 __ pclmulqdq(xmm_temp3, xmm_temp1, 0); // xmm3 holds a0*b0 4237 __ movdqu(xmm_temp4, xmm_temp0); 4238 __ pclmulqdq(xmm_temp4, xmm_temp1, 16); // xmm4 holds a0*b1 4239 4240 __ movdqu(xmm_temp5, xmm_temp0); 4241 __ pclmulqdq(xmm_temp5, xmm_temp1, 1); // xmm5 holds a1*b0 4242 __ movdqu(xmm_temp6, xmm_temp0); 4243 __ pclmulqdq(xmm_temp6, xmm_temp1, 17); // xmm6 holds a1*b1 4244 4245 __ pxor(xmm_temp4, xmm_temp5); // xmm4 holds a0*b1 + a1*b0 4246 4247 __ movdqu(xmm_temp5, xmm_temp4); // move the contents of xmm4 to xmm5 4248 __ psrldq(xmm_temp4, 8); // shift by xmm4 64 bits to the right 4249 __ pslldq(xmm_temp5, 8); // shift by xmm5 64 bits to the left 4250 __ pxor(xmm_temp3, xmm_temp5); 4251 __ pxor(xmm_temp6, xmm_temp4); // Register pair <xmm6:xmm3> holds the result 4252 // of the carry-less multiplication of 4253 // xmm0 by xmm1. 4254 4255 // We shift the result of the multiplication by one bit position 4256 // to the left to cope for the fact that the bits are reversed. 4257 __ movdqu(xmm_temp7, xmm_temp3); 4258 __ movdqu(xmm_temp8, xmm_temp6); 4259 __ pslld(xmm_temp3, 1); 4260 __ pslld(xmm_temp6, 1); 4261 __ psrld(xmm_temp7, 31); 4262 __ psrld(xmm_temp8, 31); 4263 __ movdqu(xmm_temp9, xmm_temp7); 4264 __ pslldq(xmm_temp8, 4); 4265 __ pslldq(xmm_temp7, 4); 4266 __ psrldq(xmm_temp9, 12); 4267 __ por(xmm_temp3, xmm_temp7); 4268 __ por(xmm_temp6, xmm_temp8); 4269 __ por(xmm_temp6, xmm_temp9); 4270 4271 // 4272 // First phase of the reduction 4273 // 4274 // Move xmm3 into xmm7, xmm8, xmm9 in order to perform the shifts 4275 // independently. 4276 __ movdqu(xmm_temp7, xmm_temp3); 4277 __ movdqu(xmm_temp8, xmm_temp3); 4278 __ movdqu(xmm_temp9, xmm_temp3); 4279 __ pslld(xmm_temp7, 31); // packed right shift shifting << 31 4280 __ pslld(xmm_temp8, 30); // packed right shift shifting << 30 4281 __ pslld(xmm_temp9, 25); // packed right shift shifting << 25 4282 __ pxor(xmm_temp7, xmm_temp8); // xor the shifted versions 4283 __ pxor(xmm_temp7, xmm_temp9); 4284 __ movdqu(xmm_temp8, xmm_temp7); 4285 __ pslldq(xmm_temp7, 12); 4286 __ psrldq(xmm_temp8, 4); 4287 __ pxor(xmm_temp3, xmm_temp7); // first phase of the reduction complete 4288 4289 // 4290 // Second phase of the reduction 4291 // 4292 // Make 3 copies of xmm3 in xmm2, xmm4, xmm5 for doing these 4293 // shift operations. 4294 __ movdqu(xmm_temp2, xmm_temp3); 4295 __ movdqu(xmm_temp4, xmm_temp3); 4296 __ movdqu(xmm_temp5, xmm_temp3); 4297 __ psrld(xmm_temp2, 1); // packed left shifting >> 1 4298 __ psrld(xmm_temp4, 2); // packed left shifting >> 2 4299 __ psrld(xmm_temp5, 7); // packed left shifting >> 7 4300 __ pxor(xmm_temp2, xmm_temp4); // xor the shifted versions 4301 __ pxor(xmm_temp2, xmm_temp5); 4302 __ pxor(xmm_temp2, xmm_temp8); 4303 __ pxor(xmm_temp3, xmm_temp2); 4304 __ pxor(xmm_temp6, xmm_temp3); // the result is in xmm6 4305 4306 __ decrement(blocks); 4307 __ jcc(Assembler::zero, L_exit); 4308 __ movdqu(xmm_temp0, xmm_temp6); 4309 __ addptr(data, 16); 4310 __ jmp(L_ghash_loop); 4311 4312 __ BIND(L_exit); 4313 __ pshufb(xmm_temp6, xmm_temp10); // Byte swap 16-byte result 4314 __ movdqu(Address(state, 0), xmm_temp6); // store the result 4315 __ leave(); 4316 __ ret(0); 4317 return start; 4318 } 4319 4320 /** 4321 * Arguments: 4322 * 4323 * Inputs: 4324 * c_rarg0 - int crc 4325 * c_rarg1 - byte* buf 4326 * c_rarg2 - int length 4327 * 4328 * Ouput: 4329 * rax - int crc result 4330 */ 4331 address generate_updateBytesCRC32() { 4332 assert(UseCRC32Intrinsics, "need AVX and CLMUL instructions"); 4333 4334 __ align(CodeEntryAlignment); 4335 StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32"); 4336 4337 address start = __ pc(); 4338 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 4339 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 4340 // rscratch1: r10 4341 const Register crc = c_rarg0; // crc 4342 const Register buf = c_rarg1; // source java byte array address 4343 const Register len = c_rarg2; // length 4344 const Register table = c_rarg3; // crc_table address (reuse register) 4345 const Register tmp = r11; 4346 assert_different_registers(crc, buf, len, table, tmp, rax); 4347 4348 BLOCK_COMMENT("Entry:"); 4349 __ enter(); // required for proper stackwalking of RuntimeStub frame 4350 4351 __ kernel_crc32(crc, buf, len, table, tmp); 4352 4353 __ movl(rax, crc); 4354 __ vzeroupper(); 4355 __ leave(); // required for proper stackwalking of RuntimeStub frame 4356 __ ret(0); 4357 4358 return start; 4359 } 4360 4361 /** 4362 * Arguments: 4363 * 4364 * Inputs: 4365 * c_rarg0 - int crc 4366 * c_rarg1 - byte* buf 4367 * c_rarg2 - long length 4368 * c_rarg3 - table_start - optional (present only when doing a library_call, 4369 * not used by x86 algorithm) 4370 * 4371 * Ouput: 4372 * rax - int crc result 4373 */ 4374 address generate_updateBytesCRC32C(bool is_pclmulqdq_supported) { 4375 assert(UseCRC32CIntrinsics, "need SSE4_2"); 4376 __ align(CodeEntryAlignment); 4377 StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32C"); 4378 address start = __ pc(); 4379 //reg.arg int#0 int#1 int#2 int#3 int#4 int#5 float regs 4380 //Windows RCX RDX R8 R9 none none XMM0..XMM3 4381 //Lin / Sol RDI RSI RDX RCX R8 R9 XMM0..XMM7 4382 const Register crc = c_rarg0; // crc 4383 const Register buf = c_rarg1; // source java byte array address 4384 const Register len = c_rarg2; // length 4385 const Register a = rax; 4386 const Register j = r9; 4387 const Register k = r10; 4388 const Register l = r11; 4389 #ifdef _WIN64 4390 const Register y = rdi; 4391 const Register z = rsi; 4392 #else 4393 const Register y = rcx; 4394 const Register z = r8; 4395 #endif 4396 assert_different_registers(crc, buf, len, a, j, k, l, y, z); 4397 4398 BLOCK_COMMENT("Entry:"); 4399 __ enter(); // required for proper stackwalking of RuntimeStub frame 4400 #ifdef _WIN64 4401 __ push(y); 4402 __ push(z); 4403 #endif 4404 __ crc32c_ipl_alg2_alt2(crc, buf, len, 4405 a, j, k, 4406 l, y, z, 4407 c_farg0, c_farg1, c_farg2, 4408 is_pclmulqdq_supported); 4409 __ movl(rax, crc); 4410 #ifdef _WIN64 4411 __ pop(z); 4412 __ pop(y); 4413 #endif 4414 __ vzeroupper(); 4415 __ leave(); // required for proper stackwalking of RuntimeStub frame 4416 __ ret(0); 4417 4418 return start; 4419 } 4420 4421 /** 4422 * Arguments: 4423 * 4424 * Input: 4425 * c_rarg0 - x address 4426 * c_rarg1 - x length 4427 * c_rarg2 - y address 4428 * c_rarg3 - y length 4429 * not Win64 4430 * c_rarg4 - z address 4431 * c_rarg5 - z length 4432 * Win64 4433 * rsp+40 - z address 4434 * rsp+48 - z length 4435 */ 4436 address generate_multiplyToLen() { 4437 __ align(CodeEntryAlignment); 4438 StubCodeMark mark(this, "StubRoutines", "multiplyToLen"); 4439 4440 address start = __ pc(); 4441 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 4442 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 4443 const Register x = rdi; 4444 const Register xlen = rax; 4445 const Register y = rsi; 4446 const Register ylen = rcx; 4447 const Register z = r8; 4448 const Register zlen = r11; 4449 4450 // Next registers will be saved on stack in multiply_to_len(). 4451 const Register tmp1 = r12; 4452 const Register tmp2 = r13; 4453 const Register tmp3 = r14; 4454 const Register tmp4 = r15; 4455 const Register tmp5 = rbx; 4456 4457 BLOCK_COMMENT("Entry:"); 4458 __ enter(); // required for proper stackwalking of RuntimeStub frame 4459 4460 #ifndef _WIN64 4461 __ movptr(zlen, r9); // Save r9 in r11 - zlen 4462 #endif 4463 setup_arg_regs(4); // x => rdi, xlen => rsi, y => rdx 4464 // ylen => rcx, z => r8, zlen => r11 4465 // r9 and r10 may be used to save non-volatile registers 4466 #ifdef _WIN64 4467 // last 2 arguments (#4, #5) are on stack on Win64 4468 __ movptr(z, Address(rsp, 6 * wordSize)); 4469 __ movptr(zlen, Address(rsp, 7 * wordSize)); 4470 #endif 4471 4472 __ movptr(xlen, rsi); 4473 __ movptr(y, rdx); 4474 __ multiply_to_len(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5); 4475 4476 restore_arg_regs(); 4477 4478 __ leave(); // required for proper stackwalking of RuntimeStub frame 4479 __ ret(0); 4480 4481 return start; 4482 } 4483 4484 /** 4485 * Arguments: 4486 * 4487 * Input: 4488 * c_rarg0 - obja address 4489 * c_rarg1 - objb address 4490 * c_rarg3 - length length 4491 * c_rarg4 - scale log2_array_indxscale 4492 * 4493 * Output: 4494 * rax - int >= mismatched index, < 0 bitwise complement of tail 4495 */ 4496 address generate_vectorizedMismatch() { 4497 __ align(CodeEntryAlignment); 4498 StubCodeMark mark(this, "StubRoutines", "vectorizedMismatch"); 4499 address start = __ pc(); 4500 4501 BLOCK_COMMENT("Entry:"); 4502 __ enter(); 4503 4504 #ifdef _WIN64 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 4505 const Register scale = c_rarg0; //rcx, will exchange with r9 4506 const Register objb = c_rarg1; //rdx 4507 const Register length = c_rarg2; //r8 4508 const Register obja = c_rarg3; //r9 4509 __ xchgq(obja, scale); //now obja and scale contains the correct contents 4510 4511 const Register tmp1 = r10; 4512 const Register tmp2 = r11; 4513 #endif 4514 #ifndef _WIN64 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 4515 const Register obja = c_rarg0; //U:rdi 4516 const Register objb = c_rarg1; //U:rsi 4517 const Register length = c_rarg2; //U:rdx 4518 const Register scale = c_rarg3; //U:rcx 4519 const Register tmp1 = r8; 4520 const Register tmp2 = r9; 4521 #endif 4522 const Register result = rax; //return value 4523 const XMMRegister vec0 = xmm0; 4524 const XMMRegister vec1 = xmm1; 4525 const XMMRegister vec2 = xmm2; 4526 4527 __ vectorized_mismatch(obja, objb, length, scale, result, tmp1, tmp2, vec0, vec1, vec2); 4528 4529 __ vzeroupper(); 4530 __ leave(); 4531 __ ret(0); 4532 4533 return start; 4534 } 4535 4536 /** 4537 * Arguments: 4538 * 4539 // Input: 4540 // c_rarg0 - x address 4541 // c_rarg1 - x length 4542 // c_rarg2 - z address 4543 // c_rarg3 - z lenth 4544 * 4545 */ 4546 address generate_squareToLen() { 4547 4548 __ align(CodeEntryAlignment); 4549 StubCodeMark mark(this, "StubRoutines", "squareToLen"); 4550 4551 address start = __ pc(); 4552 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 4553 // Unix: rdi, rsi, rdx, rcx (c_rarg0, c_rarg1, ...) 4554 const Register x = rdi; 4555 const Register len = rsi; 4556 const Register z = r8; 4557 const Register zlen = rcx; 4558 4559 const Register tmp1 = r12; 4560 const Register tmp2 = r13; 4561 const Register tmp3 = r14; 4562 const Register tmp4 = r15; 4563 const Register tmp5 = rbx; 4564 4565 BLOCK_COMMENT("Entry:"); 4566 __ enter(); // required for proper stackwalking of RuntimeStub frame 4567 4568 setup_arg_regs(4); // x => rdi, len => rsi, z => rdx 4569 // zlen => rcx 4570 // r9 and r10 may be used to save non-volatile registers 4571 __ movptr(r8, rdx); 4572 __ square_to_len(x, len, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5, rdx, rax); 4573 4574 restore_arg_regs(); 4575 4576 __ leave(); // required for proper stackwalking of RuntimeStub frame 4577 __ ret(0); 4578 4579 return start; 4580 } 4581 4582 /** 4583 * Arguments: 4584 * 4585 * Input: 4586 * c_rarg0 - out address 4587 * c_rarg1 - in address 4588 * c_rarg2 - offset 4589 * c_rarg3 - len 4590 * not Win64 4591 * c_rarg4 - k 4592 * Win64 4593 * rsp+40 - k 4594 */ 4595 address generate_mulAdd() { 4596 __ align(CodeEntryAlignment); 4597 StubCodeMark mark(this, "StubRoutines", "mulAdd"); 4598 4599 address start = __ pc(); 4600 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 4601 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 4602 const Register out = rdi; 4603 const Register in = rsi; 4604 const Register offset = r11; 4605 const Register len = rcx; 4606 const Register k = r8; 4607 4608 // Next registers will be saved on stack in mul_add(). 4609 const Register tmp1 = r12; 4610 const Register tmp2 = r13; 4611 const Register tmp3 = r14; 4612 const Register tmp4 = r15; 4613 const Register tmp5 = rbx; 4614 4615 BLOCK_COMMENT("Entry:"); 4616 __ enter(); // required for proper stackwalking of RuntimeStub frame 4617 4618 setup_arg_regs(4); // out => rdi, in => rsi, offset => rdx 4619 // len => rcx, k => r8 4620 // r9 and r10 may be used to save non-volatile registers 4621 #ifdef _WIN64 4622 // last argument is on stack on Win64 4623 __ movl(k, Address(rsp, 6 * wordSize)); 4624 #endif 4625 __ movptr(r11, rdx); // move offset in rdx to offset(r11) 4626 __ mul_add(out, in, offset, len, k, tmp1, tmp2, tmp3, tmp4, tmp5, rdx, rax); 4627 4628 restore_arg_regs(); 4629 4630 __ leave(); // required for proper stackwalking of RuntimeStub frame 4631 __ ret(0); 4632 4633 return start; 4634 } 4635 4636 address generate_libmExp() { 4637 StubCodeMark mark(this, "StubRoutines", "libmExp"); 4638 4639 address start = __ pc(); 4640 4641 const XMMRegister x0 = xmm0; 4642 const XMMRegister x1 = xmm1; 4643 const XMMRegister x2 = xmm2; 4644 const XMMRegister x3 = xmm3; 4645 4646 const XMMRegister x4 = xmm4; 4647 const XMMRegister x5 = xmm5; 4648 const XMMRegister x6 = xmm6; 4649 const XMMRegister x7 = xmm7; 4650 4651 const Register tmp = r11; 4652 4653 BLOCK_COMMENT("Entry:"); 4654 __ enter(); // required for proper stackwalking of RuntimeStub frame 4655 4656 __ fast_exp(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp); 4657 4658 __ leave(); // required for proper stackwalking of RuntimeStub frame 4659 __ ret(0); 4660 4661 return start; 4662 4663 } 4664 4665 address generate_libmLog() { 4666 StubCodeMark mark(this, "StubRoutines", "libmLog"); 4667 4668 address start = __ pc(); 4669 4670 const XMMRegister x0 = xmm0; 4671 const XMMRegister x1 = xmm1; 4672 const XMMRegister x2 = xmm2; 4673 const XMMRegister x3 = xmm3; 4674 4675 const XMMRegister x4 = xmm4; 4676 const XMMRegister x5 = xmm5; 4677 const XMMRegister x6 = xmm6; 4678 const XMMRegister x7 = xmm7; 4679 4680 const Register tmp1 = r11; 4681 const Register tmp2 = r8; 4682 4683 BLOCK_COMMENT("Entry:"); 4684 __ enter(); // required for proper stackwalking of RuntimeStub frame 4685 4686 __ fast_log(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2); 4687 4688 __ leave(); // required for proper stackwalking of RuntimeStub frame 4689 __ ret(0); 4690 4691 return start; 4692 4693 } 4694 4695 address generate_libmLog10() { 4696 StubCodeMark mark(this, "StubRoutines", "libmLog10"); 4697 4698 address start = __ pc(); 4699 4700 const XMMRegister x0 = xmm0; 4701 const XMMRegister x1 = xmm1; 4702 const XMMRegister x2 = xmm2; 4703 const XMMRegister x3 = xmm3; 4704 4705 const XMMRegister x4 = xmm4; 4706 const XMMRegister x5 = xmm5; 4707 const XMMRegister x6 = xmm6; 4708 const XMMRegister x7 = xmm7; 4709 4710 const Register tmp = r11; 4711 4712 BLOCK_COMMENT("Entry:"); 4713 __ enter(); // required for proper stackwalking of RuntimeStub frame 4714 4715 __ fast_log10(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp); 4716 4717 __ leave(); // required for proper stackwalking of RuntimeStub frame 4718 __ ret(0); 4719 4720 return start; 4721 4722 } 4723 4724 address generate_libmPow() { 4725 StubCodeMark mark(this, "StubRoutines", "libmPow"); 4726 4727 address start = __ pc(); 4728 4729 const XMMRegister x0 = xmm0; 4730 const XMMRegister x1 = xmm1; 4731 const XMMRegister x2 = xmm2; 4732 const XMMRegister x3 = xmm3; 4733 4734 const XMMRegister x4 = xmm4; 4735 const XMMRegister x5 = xmm5; 4736 const XMMRegister x6 = xmm6; 4737 const XMMRegister x7 = xmm7; 4738 4739 const Register tmp1 = r8; 4740 const Register tmp2 = r9; 4741 const Register tmp3 = r10; 4742 const Register tmp4 = r11; 4743 4744 BLOCK_COMMENT("Entry:"); 4745 __ enter(); // required for proper stackwalking of RuntimeStub frame 4746 4747 __ fast_pow(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 4748 4749 __ leave(); // required for proper stackwalking of RuntimeStub frame 4750 __ ret(0); 4751 4752 return start; 4753 4754 } 4755 4756 address generate_libmSin() { 4757 StubCodeMark mark(this, "StubRoutines", "libmSin"); 4758 4759 address start = __ pc(); 4760 4761 const XMMRegister x0 = xmm0; 4762 const XMMRegister x1 = xmm1; 4763 const XMMRegister x2 = xmm2; 4764 const XMMRegister x3 = xmm3; 4765 4766 const XMMRegister x4 = xmm4; 4767 const XMMRegister x5 = xmm5; 4768 const XMMRegister x6 = xmm6; 4769 const XMMRegister x7 = xmm7; 4770 4771 const Register tmp1 = r8; 4772 const Register tmp2 = r9; 4773 const Register tmp3 = r10; 4774 const Register tmp4 = r11; 4775 4776 BLOCK_COMMENT("Entry:"); 4777 __ enter(); // required for proper stackwalking of RuntimeStub frame 4778 4779 #ifdef _WIN64 4780 __ push(rsi); 4781 __ push(rdi); 4782 #endif 4783 __ fast_sin(x0, x1, x2, x3, x4, x5, x6, x7, rax, rbx, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 4784 4785 #ifdef _WIN64 4786 __ pop(rdi); 4787 __ pop(rsi); 4788 #endif 4789 4790 __ leave(); // required for proper stackwalking of RuntimeStub frame 4791 __ ret(0); 4792 4793 return start; 4794 4795 } 4796 4797 address generate_libmCos() { 4798 StubCodeMark mark(this, "StubRoutines", "libmCos"); 4799 4800 address start = __ pc(); 4801 4802 const XMMRegister x0 = xmm0; 4803 const XMMRegister x1 = xmm1; 4804 const XMMRegister x2 = xmm2; 4805 const XMMRegister x3 = xmm3; 4806 4807 const XMMRegister x4 = xmm4; 4808 const XMMRegister x5 = xmm5; 4809 const XMMRegister x6 = xmm6; 4810 const XMMRegister x7 = xmm7; 4811 4812 const Register tmp1 = r8; 4813 const Register tmp2 = r9; 4814 const Register tmp3 = r10; 4815 const Register tmp4 = r11; 4816 4817 BLOCK_COMMENT("Entry:"); 4818 __ enter(); // required for proper stackwalking of RuntimeStub frame 4819 4820 #ifdef _WIN64 4821 __ push(rsi); 4822 __ push(rdi); 4823 #endif 4824 __ fast_cos(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 4825 4826 #ifdef _WIN64 4827 __ pop(rdi); 4828 __ pop(rsi); 4829 #endif 4830 4831 __ leave(); // required for proper stackwalking of RuntimeStub frame 4832 __ ret(0); 4833 4834 return start; 4835 4836 } 4837 4838 address generate_libmTan() { 4839 StubCodeMark mark(this, "StubRoutines", "libmTan"); 4840 4841 address start = __ pc(); 4842 4843 const XMMRegister x0 = xmm0; 4844 const XMMRegister x1 = xmm1; 4845 const XMMRegister x2 = xmm2; 4846 const XMMRegister x3 = xmm3; 4847 4848 const XMMRegister x4 = xmm4; 4849 const XMMRegister x5 = xmm5; 4850 const XMMRegister x6 = xmm6; 4851 const XMMRegister x7 = xmm7; 4852 4853 const Register tmp1 = r8; 4854 const Register tmp2 = r9; 4855 const Register tmp3 = r10; 4856 const Register tmp4 = r11; 4857 4858 BLOCK_COMMENT("Entry:"); 4859 __ enter(); // required for proper stackwalking of RuntimeStub frame 4860 4861 #ifdef _WIN64 4862 __ push(rsi); 4863 __ push(rdi); 4864 #endif 4865 __ fast_tan(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 4866 4867 #ifdef _WIN64 4868 __ pop(rdi); 4869 __ pop(rsi); 4870 #endif 4871 4872 __ leave(); // required for proper stackwalking of RuntimeStub frame 4873 __ ret(0); 4874 4875 return start; 4876 4877 } 4878 4879 #undef __ 4880 #define __ masm-> 4881 4882 // Continuation point for throwing of implicit exceptions that are 4883 // not handled in the current activation. Fabricates an exception 4884 // oop and initiates normal exception dispatching in this 4885 // frame. Since we need to preserve callee-saved values (currently 4886 // only for C2, but done for C1 as well) we need a callee-saved oop 4887 // map and therefore have to make these stubs into RuntimeStubs 4888 // rather than BufferBlobs. If the compiler needs all registers to 4889 // be preserved between the fault point and the exception handler 4890 // then it must assume responsibility for that in 4891 // AbstractCompiler::continuation_for_implicit_null_exception or 4892 // continuation_for_implicit_division_by_zero_exception. All other 4893 // implicit exceptions (e.g., NullPointerException or 4894 // AbstractMethodError on entry) are either at call sites or 4895 // otherwise assume that stack unwinding will be initiated, so 4896 // caller saved registers were assumed volatile in the compiler. 4897 address generate_throw_exception(const char* name, 4898 address runtime_entry, 4899 Register arg1 = noreg, 4900 Register arg2 = noreg) { 4901 // Information about frame layout at time of blocking runtime call. 4902 // Note that we only have to preserve callee-saved registers since 4903 // the compilers are responsible for supplying a continuation point 4904 // if they expect all registers to be preserved. 4905 enum layout { 4906 rbp_off = frame::arg_reg_save_area_bytes/BytesPerInt, 4907 rbp_off2, 4908 return_off, 4909 return_off2, 4910 framesize // inclusive of return address 4911 }; 4912 4913 int insts_size = 512; 4914 int locs_size = 64; 4915 4916 CodeBuffer code(name, insts_size, locs_size); 4917 OopMapSet* oop_maps = new OopMapSet(); 4918 MacroAssembler* masm = new MacroAssembler(&code); 4919 4920 address start = __ pc(); 4921 4922 // This is an inlined and slightly modified version of call_VM 4923 // which has the ability to fetch the return PC out of 4924 // thread-local storage and also sets up last_Java_sp slightly 4925 // differently than the real call_VM 4926 4927 __ enter(); // required for proper stackwalking of RuntimeStub frame 4928 4929 assert(is_even(framesize/2), "sp not 16-byte aligned"); 4930 4931 // return address and rbp are already in place 4932 __ subptr(rsp, (framesize-4) << LogBytesPerInt); // prolog 4933 4934 int frame_complete = __ pc() - start; 4935 4936 // Set up last_Java_sp and last_Java_fp 4937 address the_pc = __ pc(); 4938 __ set_last_Java_frame(rsp, rbp, the_pc); 4939 __ andptr(rsp, -(StackAlignmentInBytes)); // Align stack 4940 4941 // Call runtime 4942 if (arg1 != noreg) { 4943 assert(arg2 != c_rarg1, "clobbered"); 4944 __ movptr(c_rarg1, arg1); 4945 } 4946 if (arg2 != noreg) { 4947 __ movptr(c_rarg2, arg2); 4948 } 4949 __ movptr(c_rarg0, r15_thread); 4950 BLOCK_COMMENT("call runtime_entry"); 4951 __ call(RuntimeAddress(runtime_entry)); 4952 4953 // Generate oop map 4954 OopMap* map = new OopMap(framesize, 0); 4955 4956 oop_maps->add_gc_map(the_pc - start, map); 4957 4958 __ reset_last_Java_frame(true); 4959 4960 __ leave(); // required for proper stackwalking of RuntimeStub frame 4961 4962 // check for pending exceptions 4963 #ifdef ASSERT 4964 Label L; 4965 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), 4966 (int32_t) NULL_WORD); 4967 __ jcc(Assembler::notEqual, L); 4968 __ should_not_reach_here(); 4969 __ bind(L); 4970 #endif // ASSERT 4971 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 4972 4973 4974 // codeBlob framesize is in words (not VMRegImpl::slot_size) 4975 RuntimeStub* stub = 4976 RuntimeStub::new_runtime_stub(name, 4977 &code, 4978 frame_complete, 4979 (framesize >> (LogBytesPerWord - LogBytesPerInt)), 4980 oop_maps, false); 4981 return stub->entry_point(); 4982 } 4983 4984 void create_control_words() { 4985 // Round to nearest, 53-bit mode, exceptions masked 4986 StubRoutines::_fpu_cntrl_wrd_std = 0x027F; 4987 // Round to zero, 53-bit mode, exception mased 4988 StubRoutines::_fpu_cntrl_wrd_trunc = 0x0D7F; 4989 // Round to nearest, 24-bit mode, exceptions masked 4990 StubRoutines::_fpu_cntrl_wrd_24 = 0x007F; 4991 // Round to nearest, 64-bit mode, exceptions masked 4992 StubRoutines::_fpu_cntrl_wrd_64 = 0x037F; 4993 // Round to nearest, 64-bit mode, exceptions masked 4994 StubRoutines::_mxcsr_std = 0x1F80; 4995 // Note: the following two constants are 80-bit values 4996 // layout is critical for correct loading by FPU. 4997 // Bias for strict fp multiply/divide 4998 StubRoutines::_fpu_subnormal_bias1[0]= 0x00000000; // 2^(-15360) == 0x03ff 8000 0000 0000 0000 4999 StubRoutines::_fpu_subnormal_bias1[1]= 0x80000000; 5000 StubRoutines::_fpu_subnormal_bias1[2]= 0x03ff; 5001 // Un-Bias for strict fp multiply/divide 5002 StubRoutines::_fpu_subnormal_bias2[0]= 0x00000000; // 2^(+15360) == 0x7bff 8000 0000 0000 0000 5003 StubRoutines::_fpu_subnormal_bias2[1]= 0x80000000; 5004 StubRoutines::_fpu_subnormal_bias2[2]= 0x7bff; 5005 } 5006 5007 // Initialization 5008 void generate_initial() { 5009 // Generates all stubs and initializes the entry points 5010 5011 // This platform-specific settings are needed by generate_call_stub() 5012 create_control_words(); 5013 5014 // entry points that exist in all platforms Note: This is code 5015 // that could be shared among different platforms - however the 5016 // benefit seems to be smaller than the disadvantage of having a 5017 // much more complicated generator structure. See also comment in 5018 // stubRoutines.hpp. 5019 5020 StubRoutines::_forward_exception_entry = generate_forward_exception(); 5021 5022 StubRoutines::_call_stub_entry = 5023 generate_call_stub(StubRoutines::_call_stub_return_address); 5024 5025 // is referenced by megamorphic call 5026 StubRoutines::_catch_exception_entry = generate_catch_exception(); 5027 5028 // atomic calls 5029 StubRoutines::_atomic_xchg_entry = generate_atomic_xchg(); 5030 StubRoutines::_atomic_xchg_long_entry = generate_atomic_xchg_long(); 5031 StubRoutines::_atomic_cmpxchg_entry = generate_atomic_cmpxchg(); 5032 StubRoutines::_atomic_cmpxchg_byte_entry = generate_atomic_cmpxchg_byte(); 5033 StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long(); 5034 StubRoutines::_atomic_add_entry = generate_atomic_add(); 5035 StubRoutines::_atomic_add_long_entry = generate_atomic_add_long(); 5036 StubRoutines::_fence_entry = generate_orderaccess_fence(); 5037 5038 // platform dependent 5039 StubRoutines::x86::_get_previous_fp_entry = generate_get_previous_fp(); 5040 StubRoutines::x86::_get_previous_sp_entry = generate_get_previous_sp(); 5041 5042 StubRoutines::x86::_verify_mxcsr_entry = generate_verify_mxcsr(); 5043 5044 // Build this early so it's available for the interpreter. 5045 StubRoutines::_throw_StackOverflowError_entry = 5046 generate_throw_exception("StackOverflowError throw_exception", 5047 CAST_FROM_FN_PTR(address, 5048 SharedRuntime:: 5049 throw_StackOverflowError)); 5050 StubRoutines::_throw_delayed_StackOverflowError_entry = 5051 generate_throw_exception("delayed StackOverflowError throw_exception", 5052 CAST_FROM_FN_PTR(address, 5053 SharedRuntime:: 5054 throw_delayed_StackOverflowError)); 5055 if (UseCRC32Intrinsics) { 5056 // set table address before stub generation which use it 5057 StubRoutines::_crc_table_adr = (address)StubRoutines::x86::_crc_table; 5058 StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32(); 5059 } 5060 5061 if (UseCRC32CIntrinsics) { 5062 bool supports_clmul = VM_Version::supports_clmul(); 5063 StubRoutines::x86::generate_CRC32C_table(supports_clmul); 5064 StubRoutines::_crc32c_table_addr = (address)StubRoutines::x86::_crc32c_table; 5065 StubRoutines::_updateBytesCRC32C = generate_updateBytesCRC32C(supports_clmul); 5066 } 5067 if (VM_Version::supports_sse2() && UseLibmIntrinsic && InlineIntrinsics) { 5068 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin) || 5069 vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos) || 5070 vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dtan)) { 5071 StubRoutines::x86::_ONEHALF_adr = (address)StubRoutines::x86::_ONEHALF; 5072 StubRoutines::x86::_P_2_adr = (address)StubRoutines::x86::_P_2; 5073 StubRoutines::x86::_SC_4_adr = (address)StubRoutines::x86::_SC_4; 5074 StubRoutines::x86::_Ctable_adr = (address)StubRoutines::x86::_Ctable; 5075 StubRoutines::x86::_SC_2_adr = (address)StubRoutines::x86::_SC_2; 5076 StubRoutines::x86::_SC_3_adr = (address)StubRoutines::x86::_SC_3; 5077 StubRoutines::x86::_SC_1_adr = (address)StubRoutines::x86::_SC_1; 5078 StubRoutines::x86::_PI_INV_TABLE_adr = (address)StubRoutines::x86::_PI_INV_TABLE; 5079 StubRoutines::x86::_PI_4_adr = (address)StubRoutines::x86::_PI_4; 5080 StubRoutines::x86::_PI32INV_adr = (address)StubRoutines::x86::_PI32INV; 5081 StubRoutines::x86::_SIGN_MASK_adr = (address)StubRoutines::x86::_SIGN_MASK; 5082 StubRoutines::x86::_P_1_adr = (address)StubRoutines::x86::_P_1; 5083 StubRoutines::x86::_P_3_adr = (address)StubRoutines::x86::_P_3; 5084 StubRoutines::x86::_NEG_ZERO_adr = (address)StubRoutines::x86::_NEG_ZERO; 5085 } 5086 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dexp)) { 5087 StubRoutines::_dexp = generate_libmExp(); 5088 } 5089 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dlog)) { 5090 StubRoutines::_dlog = generate_libmLog(); 5091 } 5092 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dlog10)) { 5093 StubRoutines::_dlog10 = generate_libmLog10(); 5094 } 5095 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dpow)) { 5096 StubRoutines::_dpow = generate_libmPow(); 5097 } 5098 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin)) { 5099 StubRoutines::_dsin = generate_libmSin(); 5100 } 5101 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos)) { 5102 StubRoutines::_dcos = generate_libmCos(); 5103 } 5104 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dtan)) { 5105 StubRoutines::_dtan = generate_libmTan(); 5106 } 5107 } 5108 } 5109 5110 void generate_all() { 5111 // Generates all stubs and initializes the entry points 5112 5113 // These entry points require SharedInfo::stack0 to be set up in 5114 // non-core builds and need to be relocatable, so they each 5115 // fabricate a RuntimeStub internally. 5116 StubRoutines::_throw_AbstractMethodError_entry = 5117 generate_throw_exception("AbstractMethodError throw_exception", 5118 CAST_FROM_FN_PTR(address, 5119 SharedRuntime:: 5120 throw_AbstractMethodError)); 5121 5122 StubRoutines::_throw_IncompatibleClassChangeError_entry = 5123 generate_throw_exception("IncompatibleClassChangeError throw_exception", 5124 CAST_FROM_FN_PTR(address, 5125 SharedRuntime:: 5126 throw_IncompatibleClassChangeError)); 5127 5128 StubRoutines::_throw_NullPointerException_at_call_entry = 5129 generate_throw_exception("NullPointerException at call throw_exception", 5130 CAST_FROM_FN_PTR(address, 5131 SharedRuntime:: 5132 throw_NullPointerException_at_call)); 5133 5134 // entry points that are platform specific 5135 StubRoutines::x86::_f2i_fixup = generate_f2i_fixup(); 5136 StubRoutines::x86::_f2l_fixup = generate_f2l_fixup(); 5137 StubRoutines::x86::_d2i_fixup = generate_d2i_fixup(); 5138 StubRoutines::x86::_d2l_fixup = generate_d2l_fixup(); 5139 5140 StubRoutines::x86::_float_sign_mask = generate_fp_mask("float_sign_mask", 0x7FFFFFFF7FFFFFFF); 5141 StubRoutines::x86::_float_sign_flip = generate_fp_mask("float_sign_flip", 0x8000000080000000); 5142 StubRoutines::x86::_double_sign_mask = generate_fp_mask("double_sign_mask", 0x7FFFFFFFFFFFFFFF); 5143 StubRoutines::x86::_double_sign_flip = generate_fp_mask("double_sign_flip", 0x8000000000000000); 5144 5145 // support for verify_oop (must happen after universe_init) 5146 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop(); 5147 5148 // arraycopy stubs used by compilers 5149 generate_arraycopy_stubs(); 5150 5151 // don't bother generating these AES intrinsic stubs unless global flag is set 5152 if (UseAESIntrinsics) { 5153 StubRoutines::x86::_key_shuffle_mask_addr = generate_key_shuffle_mask(); // needed by the others 5154 StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock(); 5155 StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock(); 5156 StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt(); 5157 StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt_Parallel(); 5158 } 5159 if (UseAESCTRIntrinsics){ 5160 StubRoutines::x86::_counter_shuffle_mask_addr = generate_counter_shuffle_mask(); 5161 StubRoutines::_counterMode_AESCrypt = generate_counterMode_AESCrypt_Parallel(); 5162 } 5163 5164 if (UseSHA1Intrinsics) { 5165 StubRoutines::x86::_upper_word_mask_addr = generate_upper_word_mask(); 5166 StubRoutines::x86::_shuffle_byte_flip_mask_addr = generate_shuffle_byte_flip_mask(); 5167 StubRoutines::_sha1_implCompress = generate_sha1_implCompress(false, "sha1_implCompress"); 5168 StubRoutines::_sha1_implCompressMB = generate_sha1_implCompress(true, "sha1_implCompressMB"); 5169 } 5170 if (UseSHA256Intrinsics) { 5171 StubRoutines::x86::_k256_adr = (address)StubRoutines::x86::_k256; 5172 char* dst = (char*)StubRoutines::x86::_k256_W; 5173 char* src = (char*)StubRoutines::x86::_k256; 5174 for (int ii = 0; ii < 16; ++ii) { 5175 memcpy(dst + 32 * ii, src + 16 * ii, 16); 5176 memcpy(dst + 32 * ii + 16, src + 16 * ii, 16); 5177 } 5178 StubRoutines::x86::_k256_W_adr = (address)StubRoutines::x86::_k256_W; 5179 StubRoutines::x86::_pshuffle_byte_flip_mask_addr = generate_pshuffle_byte_flip_mask(); 5180 StubRoutines::_sha256_implCompress = generate_sha256_implCompress(false, "sha256_implCompress"); 5181 StubRoutines::_sha256_implCompressMB = generate_sha256_implCompress(true, "sha256_implCompressMB"); 5182 } 5183 if (UseSHA512Intrinsics) { 5184 StubRoutines::x86::_k512_W_addr = (address)StubRoutines::x86::_k512_W; 5185 StubRoutines::x86::_pshuffle_byte_flip_mask_addr_sha512 = generate_pshuffle_byte_flip_mask_sha512(); 5186 StubRoutines::_sha512_implCompress = generate_sha512_implCompress(false, "sha512_implCompress"); 5187 StubRoutines::_sha512_implCompressMB = generate_sha512_implCompress(true, "sha512_implCompressMB"); 5188 } 5189 5190 // Generate GHASH intrinsics code 5191 if (UseGHASHIntrinsics) { 5192 StubRoutines::x86::_ghash_long_swap_mask_addr = generate_ghash_long_swap_mask(); 5193 StubRoutines::x86::_ghash_byte_swap_mask_addr = generate_ghash_byte_swap_mask(); 5194 StubRoutines::_ghash_processBlocks = generate_ghash_processBlocks(); 5195 } 5196 5197 // Safefetch stubs. 5198 generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry, 5199 &StubRoutines::_safefetch32_fault_pc, 5200 &StubRoutines::_safefetch32_continuation_pc); 5201 generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry, 5202 &StubRoutines::_safefetchN_fault_pc, 5203 &StubRoutines::_safefetchN_continuation_pc); 5204 #ifdef COMPILER2 5205 if (UseMultiplyToLenIntrinsic) { 5206 StubRoutines::_multiplyToLen = generate_multiplyToLen(); 5207 } 5208 if (UseSquareToLenIntrinsic) { 5209 StubRoutines::_squareToLen = generate_squareToLen(); 5210 } 5211 if (UseMulAddIntrinsic) { 5212 StubRoutines::_mulAdd = generate_mulAdd(); 5213 } 5214 #ifndef _WINDOWS 5215 if (UseMontgomeryMultiplyIntrinsic) { 5216 StubRoutines::_montgomeryMultiply 5217 = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_multiply); 5218 } 5219 if (UseMontgomerySquareIntrinsic) { 5220 StubRoutines::_montgomerySquare 5221 = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_square); 5222 } 5223 #endif // WINDOWS 5224 #endif // COMPILER2 5225 5226 if (UseVectorizedMismatchIntrinsic) { 5227 StubRoutines::_vectorizedMismatch = generate_vectorizedMismatch(); 5228 } 5229 } 5230 5231 public: 5232 StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) { 5233 if (all) { 5234 generate_all(); 5235 } else { 5236 generate_initial(); 5237 } 5238 } 5239 }; // end class declaration 5240 5241 void StubGenerator_generate(CodeBuffer* code, bool all) { 5242 StubGenerator g(code, all); 5243 }