1 /* 2 * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "interpreter/interpreter.hpp" 29 #include "nativeInst_x86.hpp" 30 #include "oops/instanceOop.hpp" 31 #include "oops/method.hpp" 32 #include "oops/objArrayKlass.hpp" 33 #include "oops/oop.inline.hpp" 34 #include "prims/methodHandles.hpp" 35 #include "runtime/frame.inline.hpp" 36 #include "runtime/handles.inline.hpp" 37 #include "runtime/sharedRuntime.hpp" 38 #include "runtime/stubCodeGenerator.hpp" 39 #include "runtime/stubRoutines.hpp" 40 #include "runtime/thread.inline.hpp" 41 #include "utilities/top.hpp" 42 #ifdef COMPILER2 43 #include "opto/runtime.hpp" 44 #endif 45 46 // Declaration and definition of StubGenerator (no .hpp file). 47 // For a more detailed description of the stub routine structure 48 // see the comment in stubRoutines.hpp 49 50 #define __ _masm-> 51 #define TIMES_OOP (UseCompressedOops ? Address::times_4 : Address::times_8) 52 #define a__ ((Assembler*)_masm)-> 53 54 #ifdef PRODUCT 55 #define BLOCK_COMMENT(str) /* nothing */ 56 #else 57 #define BLOCK_COMMENT(str) __ block_comment(str) 58 #endif 59 60 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 61 const int MXCSR_MASK = 0xFFC0; // Mask out any pending exceptions 62 63 // Stub Code definitions 64 65 static address handle_unsafe_access() { 66 JavaThread* thread = JavaThread::current(); 67 address pc = thread->saved_exception_pc(); 68 // pc is the instruction which we must emulate 69 // doing a no-op is fine: return garbage from the load 70 // therefore, compute npc 71 address npc = Assembler::locate_next_instruction(pc); 72 73 // request an async exception 74 thread->set_pending_unsafe_access_error(); 75 76 // return address of next instruction to execute 77 return npc; 78 } 79 80 class StubGenerator: public StubCodeGenerator { 81 private: 82 83 #ifdef PRODUCT 84 #define inc_counter_np(counter) ((void)0) 85 #else 86 void inc_counter_np_(int& counter) { 87 // This can destroy rscratch1 if counter is far from the code cache 88 __ incrementl(ExternalAddress((address)&counter)); 89 } 90 #define inc_counter_np(counter) \ 91 BLOCK_COMMENT("inc_counter " #counter); \ 92 inc_counter_np_(counter); 93 #endif 94 95 // Call stubs are used to call Java from C 96 // 97 // Linux Arguments: 98 // c_rarg0: call wrapper address address 99 // c_rarg1: result address 100 // c_rarg2: result type BasicType 101 // c_rarg3: method Method* 102 // c_rarg4: (interpreter) entry point address 103 // c_rarg5: parameters intptr_t* 104 // 16(rbp): parameter size (in words) int 105 // 24(rbp): thread Thread* 106 // 107 // [ return_from_Java ] <--- rsp 108 // [ argument word n ] 109 // ... 110 // -12 [ argument word 1 ] 111 // -11 [ saved r15 ] <--- rsp_after_call 112 // -10 [ saved r14 ] 113 // -9 [ saved r13 ] 114 // -8 [ saved r12 ] 115 // -7 [ saved rbx ] 116 // -6 [ call wrapper ] 117 // -5 [ result ] 118 // -4 [ result type ] 119 // -3 [ method ] 120 // -2 [ entry point ] 121 // -1 [ parameters ] 122 // 0 [ saved rbp ] <--- rbp 123 // 1 [ return address ] 124 // 2 [ parameter size ] 125 // 3 [ thread ] 126 // 127 // Windows Arguments: 128 // c_rarg0: call wrapper address address 129 // c_rarg1: result address 130 // c_rarg2: result type BasicType 131 // c_rarg3: method Method* 132 // 48(rbp): (interpreter) entry point address 133 // 56(rbp): parameters intptr_t* 134 // 64(rbp): parameter size (in words) int 135 // 72(rbp): thread Thread* 136 // 137 // [ return_from_Java ] <--- rsp 138 // [ argument word n ] 139 // ... 140 // -60 [ argument word 1 ] 141 // -59 [ saved xmm31 ] <--- rsp after_call 142 // [ saved xmm16-xmm30 ] (EVEX enabled, else the space is blank) 143 // -27 [ saved xmm15 ] 144 // [ saved xmm7-xmm14 ] 145 // -9 [ saved xmm6 ] (each xmm register takes 2 slots) 146 // -7 [ saved r15 ] 147 // -6 [ saved r14 ] 148 // -5 [ saved r13 ] 149 // -4 [ saved r12 ] 150 // -3 [ saved rdi ] 151 // -2 [ saved rsi ] 152 // -1 [ saved rbx ] 153 // 0 [ saved rbp ] <--- rbp 154 // 1 [ return address ] 155 // 2 [ call wrapper ] 156 // 3 [ result ] 157 // 4 [ result type ] 158 // 5 [ method ] 159 // 6 [ entry point ] 160 // 7 [ parameters ] 161 // 8 [ parameter size ] 162 // 9 [ thread ] 163 // 164 // Windows reserves the callers stack space for arguments 1-4. 165 // We spill c_rarg0-c_rarg3 to this space. 166 167 // Call stub stack layout word offsets from rbp 168 enum call_stub_layout { 169 #ifdef _WIN64 170 xmm_save_first = 6, // save from xmm6 171 xmm_save_last = 31, // to xmm31 172 xmm_save_base = -9, 173 rsp_after_call_off = xmm_save_base - 2 * (xmm_save_last - xmm_save_first), // -27 174 r15_off = -7, 175 r14_off = -6, 176 r13_off = -5, 177 r12_off = -4, 178 rdi_off = -3, 179 rsi_off = -2, 180 rbx_off = -1, 181 rbp_off = 0, 182 retaddr_off = 1, 183 call_wrapper_off = 2, 184 result_off = 3, 185 result_type_off = 4, 186 method_off = 5, 187 entry_point_off = 6, 188 parameters_off = 7, 189 parameter_size_off = 8, 190 thread_off = 9 191 #else 192 rsp_after_call_off = -12, 193 mxcsr_off = rsp_after_call_off, 194 r15_off = -11, 195 r14_off = -10, 196 r13_off = -9, 197 r12_off = -8, 198 rbx_off = -7, 199 call_wrapper_off = -6, 200 result_off = -5, 201 result_type_off = -4, 202 method_off = -3, 203 entry_point_off = -2, 204 parameters_off = -1, 205 rbp_off = 0, 206 retaddr_off = 1, 207 parameter_size_off = 2, 208 thread_off = 3 209 #endif 210 }; 211 212 #ifdef _WIN64 213 Address xmm_save(int reg) { 214 assert(reg >= xmm_save_first && reg <= xmm_save_last, "XMM register number out of range"); 215 return Address(rbp, (xmm_save_base - (reg - xmm_save_first) * 2) * wordSize); 216 } 217 #endif 218 219 address generate_call_stub(address& return_address) { 220 assert((int)frame::entry_frame_after_call_words == -(int)rsp_after_call_off + 1 && 221 (int)frame::entry_frame_call_wrapper_offset == (int)call_wrapper_off, 222 "adjust this code"); 223 StubCodeMark mark(this, "StubRoutines", "call_stub"); 224 address start = __ pc(); 225 226 // same as in generate_catch_exception()! 227 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize); 228 229 const Address call_wrapper (rbp, call_wrapper_off * wordSize); 230 const Address result (rbp, result_off * wordSize); 231 const Address result_type (rbp, result_type_off * wordSize); 232 const Address method (rbp, method_off * wordSize); 233 const Address entry_point (rbp, entry_point_off * wordSize); 234 const Address parameters (rbp, parameters_off * wordSize); 235 const Address parameter_size(rbp, parameter_size_off * wordSize); 236 237 // same as in generate_catch_exception()! 238 const Address thread (rbp, thread_off * wordSize); 239 240 const Address r15_save(rbp, r15_off * wordSize); 241 const Address r14_save(rbp, r14_off * wordSize); 242 const Address r13_save(rbp, r13_off * wordSize); 243 const Address r12_save(rbp, r12_off * wordSize); 244 const Address rbx_save(rbp, rbx_off * wordSize); 245 246 // stub code 247 __ enter(); 248 __ subptr(rsp, -rsp_after_call_off * wordSize); 249 250 // save register parameters 251 #ifndef _WIN64 252 __ movptr(parameters, c_rarg5); // parameters 253 __ movptr(entry_point, c_rarg4); // entry_point 254 #endif 255 256 __ movptr(method, c_rarg3); // method 257 __ movl(result_type, c_rarg2); // result type 258 __ movptr(result, c_rarg1); // result 259 __ movptr(call_wrapper, c_rarg0); // call wrapper 260 261 // save regs belonging to calling function 262 __ movptr(rbx_save, rbx); 263 __ movptr(r12_save, r12); 264 __ movptr(r13_save, r13); 265 __ movptr(r14_save, r14); 266 __ movptr(r15_save, r15); 267 if (UseAVX > 2) { 268 __ movl(rbx, 0xffff); 269 __ kmovwl(k1, rbx); 270 } 271 #ifdef _WIN64 272 int last_reg = 15; 273 if (UseAVX > 2) { 274 last_reg = 31; 275 } 276 if (VM_Version::supports_evex()) { 277 for (int i = xmm_save_first; i <= last_reg; i++) { 278 __ vextractf32x4(xmm_save(i), as_XMMRegister(i), 0); 279 } 280 } else { 281 for (int i = xmm_save_first; i <= last_reg; i++) { 282 __ movdqu(xmm_save(i), as_XMMRegister(i)); 283 } 284 } 285 286 const Address rdi_save(rbp, rdi_off * wordSize); 287 const Address rsi_save(rbp, rsi_off * wordSize); 288 289 __ movptr(rsi_save, rsi); 290 __ movptr(rdi_save, rdi); 291 #else 292 const Address mxcsr_save(rbp, mxcsr_off * wordSize); 293 { 294 Label skip_ldmx; 295 __ stmxcsr(mxcsr_save); 296 __ movl(rax, mxcsr_save); 297 __ andl(rax, MXCSR_MASK); // Only check control and mask bits 298 ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std()); 299 __ cmp32(rax, mxcsr_std); 300 __ jcc(Assembler::equal, skip_ldmx); 301 __ ldmxcsr(mxcsr_std); 302 __ bind(skip_ldmx); 303 } 304 #endif 305 306 // Load up thread register 307 __ movptr(r15_thread, thread); 308 __ reinit_heapbase(); 309 310 #ifdef ASSERT 311 // make sure we have no pending exceptions 312 { 313 Label L; 314 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 315 __ jcc(Assembler::equal, L); 316 __ stop("StubRoutines::call_stub: entered with pending exception"); 317 __ bind(L); 318 } 319 #endif 320 321 // pass parameters if any 322 BLOCK_COMMENT("pass parameters if any"); 323 Label parameters_done; 324 __ movl(c_rarg3, parameter_size); 325 __ testl(c_rarg3, c_rarg3); 326 __ jcc(Assembler::zero, parameters_done); 327 328 Label loop; 329 __ movptr(c_rarg2, parameters); // parameter pointer 330 __ movl(c_rarg1, c_rarg3); // parameter counter is in c_rarg1 331 __ BIND(loop); 332 __ movptr(rax, Address(c_rarg2, 0));// get parameter 333 __ addptr(c_rarg2, wordSize); // advance to next parameter 334 __ decrementl(c_rarg1); // decrement counter 335 __ push(rax); // pass parameter 336 __ jcc(Assembler::notZero, loop); 337 338 // call Java function 339 __ BIND(parameters_done); 340 __ movptr(rbx, method); // get Method* 341 __ movptr(c_rarg1, entry_point); // get entry_point 342 __ mov(r13, rsp); // set sender sp 343 BLOCK_COMMENT("call Java function"); 344 __ call(c_rarg1); 345 346 BLOCK_COMMENT("call_stub_return_address:"); 347 return_address = __ pc(); 348 349 // store result depending on type (everything that is not 350 // T_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT) 351 __ movptr(c_rarg0, result); 352 Label is_long, is_float, is_double, exit; 353 __ movl(c_rarg1, result_type); 354 __ cmpl(c_rarg1, T_OBJECT); 355 __ jcc(Assembler::equal, is_long); 356 __ cmpl(c_rarg1, T_LONG); 357 __ jcc(Assembler::equal, is_long); 358 __ cmpl(c_rarg1, T_FLOAT); 359 __ jcc(Assembler::equal, is_float); 360 __ cmpl(c_rarg1, T_DOUBLE); 361 __ jcc(Assembler::equal, is_double); 362 363 // handle T_INT case 364 __ movl(Address(c_rarg0, 0), rax); 365 366 __ BIND(exit); 367 368 // pop parameters 369 __ lea(rsp, rsp_after_call); 370 371 #ifdef ASSERT 372 // verify that threads correspond 373 { 374 Label L1, L2, L3; 375 __ cmpptr(r15_thread, thread); 376 __ jcc(Assembler::equal, L1); 377 __ stop("StubRoutines::call_stub: r15_thread is corrupted"); 378 __ bind(L1); 379 __ get_thread(rbx); 380 __ cmpptr(r15_thread, thread); 381 __ jcc(Assembler::equal, L2); 382 __ stop("StubRoutines::call_stub: r15_thread is modified by call"); 383 __ bind(L2); 384 __ cmpptr(r15_thread, rbx); 385 __ jcc(Assembler::equal, L3); 386 __ stop("StubRoutines::call_stub: threads must correspond"); 387 __ bind(L3); 388 } 389 #endif 390 391 // restore regs belonging to calling function 392 #ifdef _WIN64 393 // emit the restores for xmm regs 394 if (VM_Version::supports_evex()) { 395 for (int i = xmm_save_first; i <= last_reg; i++) { 396 __ vinsertf32x4(as_XMMRegister(i), as_XMMRegister(i), xmm_save(i), 0); 397 } 398 } else { 399 for (int i = xmm_save_first; i <= last_reg; i++) { 400 __ movdqu(as_XMMRegister(i), xmm_save(i)); 401 } 402 } 403 #endif 404 __ movptr(r15, r15_save); 405 __ movptr(r14, r14_save); 406 __ movptr(r13, r13_save); 407 __ movptr(r12, r12_save); 408 __ movptr(rbx, rbx_save); 409 410 #ifdef _WIN64 411 __ movptr(rdi, rdi_save); 412 __ movptr(rsi, rsi_save); 413 #else 414 __ ldmxcsr(mxcsr_save); 415 #endif 416 417 // restore rsp 418 __ addptr(rsp, -rsp_after_call_off * wordSize); 419 420 // return 421 __ pop(rbp); 422 __ ret(0); 423 424 // handle return types different from T_INT 425 __ BIND(is_long); 426 __ movq(Address(c_rarg0, 0), rax); 427 __ jmp(exit); 428 429 __ BIND(is_float); 430 __ movflt(Address(c_rarg0, 0), xmm0); 431 __ jmp(exit); 432 433 __ BIND(is_double); 434 __ movdbl(Address(c_rarg0, 0), xmm0); 435 __ jmp(exit); 436 437 return start; 438 } 439 440 // Return point for a Java call if there's an exception thrown in 441 // Java code. The exception is caught and transformed into a 442 // pending exception stored in JavaThread that can be tested from 443 // within the VM. 444 // 445 // Note: Usually the parameters are removed by the callee. In case 446 // of an exception crossing an activation frame boundary, that is 447 // not the case if the callee is compiled code => need to setup the 448 // rsp. 449 // 450 // rax: exception oop 451 452 address generate_catch_exception() { 453 StubCodeMark mark(this, "StubRoutines", "catch_exception"); 454 address start = __ pc(); 455 456 // same as in generate_call_stub(): 457 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize); 458 const Address thread (rbp, thread_off * wordSize); 459 460 #ifdef ASSERT 461 // verify that threads correspond 462 { 463 Label L1, L2, L3; 464 __ cmpptr(r15_thread, thread); 465 __ jcc(Assembler::equal, L1); 466 __ stop("StubRoutines::catch_exception: r15_thread is corrupted"); 467 __ bind(L1); 468 __ get_thread(rbx); 469 __ cmpptr(r15_thread, thread); 470 __ jcc(Assembler::equal, L2); 471 __ stop("StubRoutines::catch_exception: r15_thread is modified by call"); 472 __ bind(L2); 473 __ cmpptr(r15_thread, rbx); 474 __ jcc(Assembler::equal, L3); 475 __ stop("StubRoutines::catch_exception: threads must correspond"); 476 __ bind(L3); 477 } 478 #endif 479 480 // set pending exception 481 __ verify_oop(rax); 482 483 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), rax); 484 __ lea(rscratch1, ExternalAddress((address)__FILE__)); 485 __ movptr(Address(r15_thread, Thread::exception_file_offset()), rscratch1); 486 __ movl(Address(r15_thread, Thread::exception_line_offset()), (int) __LINE__); 487 488 // complete return to VM 489 assert(StubRoutines::_call_stub_return_address != NULL, 490 "_call_stub_return_address must have been generated before"); 491 __ jump(RuntimeAddress(StubRoutines::_call_stub_return_address)); 492 493 return start; 494 } 495 496 // Continuation point for runtime calls returning with a pending 497 // exception. The pending exception check happened in the runtime 498 // or native call stub. The pending exception in Thread is 499 // converted into a Java-level exception. 500 // 501 // Contract with Java-level exception handlers: 502 // rax: exception 503 // rdx: throwing pc 504 // 505 // NOTE: At entry of this stub, exception-pc must be on stack !! 506 507 address generate_forward_exception() { 508 StubCodeMark mark(this, "StubRoutines", "forward exception"); 509 address start = __ pc(); 510 511 // Upon entry, the sp points to the return address returning into 512 // Java (interpreted or compiled) code; i.e., the return address 513 // becomes the throwing pc. 514 // 515 // Arguments pushed before the runtime call are still on the stack 516 // but the exception handler will reset the stack pointer -> 517 // ignore them. A potential result in registers can be ignored as 518 // well. 519 520 #ifdef ASSERT 521 // make sure this code is only executed if there is a pending exception 522 { 523 Label L; 524 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t) NULL); 525 __ jcc(Assembler::notEqual, L); 526 __ stop("StubRoutines::forward exception: no pending exception (1)"); 527 __ bind(L); 528 } 529 #endif 530 531 // compute exception handler into rbx 532 __ movptr(c_rarg0, Address(rsp, 0)); 533 BLOCK_COMMENT("call exception_handler_for_return_address"); 534 __ call_VM_leaf(CAST_FROM_FN_PTR(address, 535 SharedRuntime::exception_handler_for_return_address), 536 r15_thread, c_rarg0); 537 __ mov(rbx, rax); 538 539 // setup rax & rdx, remove return address & clear pending exception 540 __ pop(rdx); 541 __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset())); 542 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 543 544 #ifdef ASSERT 545 // make sure exception is set 546 { 547 Label L; 548 __ testptr(rax, rax); 549 __ jcc(Assembler::notEqual, L); 550 __ stop("StubRoutines::forward exception: no pending exception (2)"); 551 __ bind(L); 552 } 553 #endif 554 555 // continue at exception handler (return address removed) 556 // rax: exception 557 // rbx: exception handler 558 // rdx: throwing pc 559 __ verify_oop(rax); 560 __ jmp(rbx); 561 562 return start; 563 } 564 565 // Support for jint atomic::xchg(jint exchange_value, volatile jint* dest) 566 // 567 // Arguments : 568 // c_rarg0: exchange_value 569 // c_rarg0: dest 570 // 571 // Result: 572 // *dest <- ex, return (orig *dest) 573 address generate_atomic_xchg() { 574 StubCodeMark mark(this, "StubRoutines", "atomic_xchg"); 575 address start = __ pc(); 576 577 __ movl(rax, c_rarg0); // Copy to eax we need a return value anyhow 578 __ xchgl(rax, Address(c_rarg1, 0)); // automatic LOCK 579 __ ret(0); 580 581 return start; 582 } 583 584 // Support for intptr_t atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) 585 // 586 // Arguments : 587 // c_rarg0: exchange_value 588 // c_rarg1: dest 589 // 590 // Result: 591 // *dest <- ex, return (orig *dest) 592 address generate_atomic_xchg_ptr() { 593 StubCodeMark mark(this, "StubRoutines", "atomic_xchg_ptr"); 594 address start = __ pc(); 595 596 __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow 597 __ xchgptr(rax, Address(c_rarg1, 0)); // automatic LOCK 598 __ ret(0); 599 600 return start; 601 } 602 603 // Support for jint atomic::atomic_cmpxchg(jint exchange_value, volatile jint* dest, 604 // jint compare_value) 605 // 606 // Arguments : 607 // c_rarg0: exchange_value 608 // c_rarg1: dest 609 // c_rarg2: compare_value 610 // 611 // Result: 612 // if ( compare_value == *dest ) { 613 // *dest = exchange_value 614 // return compare_value; 615 // else 616 // return *dest; 617 address generate_atomic_cmpxchg() { 618 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg"); 619 address start = __ pc(); 620 621 __ movl(rax, c_rarg2); 622 if ( os::is_MP() ) __ lock(); 623 __ cmpxchgl(c_rarg0, Address(c_rarg1, 0)); 624 __ ret(0); 625 626 return start; 627 } 628 629 // Support for jbyte atomic::atomic_cmpxchg(jbyte exchange_value, volatile jbyte* dest, 630 // jbyte compare_value) 631 // 632 // Arguments : 633 // c_rarg0: exchange_value 634 // c_rarg1: dest 635 // c_rarg2: compare_value 636 // 637 // Result: 638 // if ( compare_value == *dest ) { 639 // *dest = exchange_value 640 // return compare_value; 641 // else 642 // return *dest; 643 address generate_atomic_cmpxchg_byte() { 644 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_byte"); 645 address start = __ pc(); 646 647 __ movsbq(rax, c_rarg2); 648 if ( os::is_MP() ) __ lock(); 649 __ cmpxchgb(c_rarg0, Address(c_rarg1, 0)); 650 __ ret(0); 651 652 return start; 653 } 654 655 // Support for jlong atomic::atomic_cmpxchg(jlong exchange_value, 656 // volatile jlong* dest, 657 // jlong compare_value) 658 // Arguments : 659 // c_rarg0: exchange_value 660 // c_rarg1: dest 661 // c_rarg2: compare_value 662 // 663 // Result: 664 // if ( compare_value == *dest ) { 665 // *dest = exchange_value 666 // return compare_value; 667 // else 668 // return *dest; 669 address generate_atomic_cmpxchg_long() { 670 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long"); 671 address start = __ pc(); 672 673 __ movq(rax, c_rarg2); 674 if ( os::is_MP() ) __ lock(); 675 __ cmpxchgq(c_rarg0, Address(c_rarg1, 0)); 676 __ ret(0); 677 678 return start; 679 } 680 681 // Support for jint atomic::add(jint add_value, volatile jint* dest) 682 // 683 // Arguments : 684 // c_rarg0: add_value 685 // c_rarg1: dest 686 // 687 // Result: 688 // *dest += add_value 689 // return *dest; 690 address generate_atomic_add() { 691 StubCodeMark mark(this, "StubRoutines", "atomic_add"); 692 address start = __ pc(); 693 694 __ movl(rax, c_rarg0); 695 if ( os::is_MP() ) __ lock(); 696 __ xaddl(Address(c_rarg1, 0), c_rarg0); 697 __ addl(rax, c_rarg0); 698 __ ret(0); 699 700 return start; 701 } 702 703 // Support for intptr_t atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) 704 // 705 // Arguments : 706 // c_rarg0: add_value 707 // c_rarg1: dest 708 // 709 // Result: 710 // *dest += add_value 711 // return *dest; 712 address generate_atomic_add_ptr() { 713 StubCodeMark mark(this, "StubRoutines", "atomic_add_ptr"); 714 address start = __ pc(); 715 716 __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow 717 if ( os::is_MP() ) __ lock(); 718 __ xaddptr(Address(c_rarg1, 0), c_rarg0); 719 __ addptr(rax, c_rarg0); 720 __ ret(0); 721 722 return start; 723 } 724 725 // Support for intptr_t OrderAccess::fence() 726 // 727 // Arguments : 728 // 729 // Result: 730 address generate_orderaccess_fence() { 731 StubCodeMark mark(this, "StubRoutines", "orderaccess_fence"); 732 address start = __ pc(); 733 __ membar(Assembler::StoreLoad); 734 __ ret(0); 735 736 return start; 737 } 738 739 // Support for intptr_t get_previous_fp() 740 // 741 // This routine is used to find the previous frame pointer for the 742 // caller (current_frame_guess). This is used as part of debugging 743 // ps() is seemingly lost trying to find frames. 744 // This code assumes that caller current_frame_guess) has a frame. 745 address generate_get_previous_fp() { 746 StubCodeMark mark(this, "StubRoutines", "get_previous_fp"); 747 const Address old_fp(rbp, 0); 748 const Address older_fp(rax, 0); 749 address start = __ pc(); 750 751 __ enter(); 752 __ movptr(rax, old_fp); // callers fp 753 __ movptr(rax, older_fp); // the frame for ps() 754 __ pop(rbp); 755 __ ret(0); 756 757 return start; 758 } 759 760 // Support for intptr_t get_previous_sp() 761 // 762 // This routine is used to find the previous stack pointer for the 763 // caller. 764 address generate_get_previous_sp() { 765 StubCodeMark mark(this, "StubRoutines", "get_previous_sp"); 766 address start = __ pc(); 767 768 __ movptr(rax, rsp); 769 __ addptr(rax, 8); // return address is at the top of the stack. 770 __ ret(0); 771 772 return start; 773 } 774 775 //---------------------------------------------------------------------------------------------------- 776 // Support for void verify_mxcsr() 777 // 778 // This routine is used with -Xcheck:jni to verify that native 779 // JNI code does not return to Java code without restoring the 780 // MXCSR register to our expected state. 781 782 address generate_verify_mxcsr() { 783 StubCodeMark mark(this, "StubRoutines", "verify_mxcsr"); 784 address start = __ pc(); 785 786 const Address mxcsr_save(rsp, 0); 787 788 if (CheckJNICalls) { 789 Label ok_ret; 790 ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std()); 791 __ push(rax); 792 __ subptr(rsp, wordSize); // allocate a temp location 793 __ stmxcsr(mxcsr_save); 794 __ movl(rax, mxcsr_save); 795 __ andl(rax, MXCSR_MASK); // Only check control and mask bits 796 __ cmp32(rax, mxcsr_std); 797 __ jcc(Assembler::equal, ok_ret); 798 799 __ warn("MXCSR changed by native JNI code, use -XX:+RestoreMXCSROnJNICall"); 800 801 __ ldmxcsr(mxcsr_std); 802 803 __ bind(ok_ret); 804 __ addptr(rsp, wordSize); 805 __ pop(rax); 806 } 807 808 __ ret(0); 809 810 return start; 811 } 812 813 address generate_f2i_fixup() { 814 StubCodeMark mark(this, "StubRoutines", "f2i_fixup"); 815 Address inout(rsp, 5 * wordSize); // return address + 4 saves 816 817 address start = __ pc(); 818 819 Label L; 820 821 __ push(rax); 822 __ push(c_rarg3); 823 __ push(c_rarg2); 824 __ push(c_rarg1); 825 826 __ movl(rax, 0x7f800000); 827 __ xorl(c_rarg3, c_rarg3); 828 __ movl(c_rarg2, inout); 829 __ movl(c_rarg1, c_rarg2); 830 __ andl(c_rarg1, 0x7fffffff); 831 __ cmpl(rax, c_rarg1); // NaN? -> 0 832 __ jcc(Assembler::negative, L); 833 __ testl(c_rarg2, c_rarg2); // signed ? min_jint : max_jint 834 __ movl(c_rarg3, 0x80000000); 835 __ movl(rax, 0x7fffffff); 836 __ cmovl(Assembler::positive, c_rarg3, rax); 837 838 __ bind(L); 839 __ movptr(inout, c_rarg3); 840 841 __ pop(c_rarg1); 842 __ pop(c_rarg2); 843 __ pop(c_rarg3); 844 __ pop(rax); 845 846 __ ret(0); 847 848 return start; 849 } 850 851 address generate_f2l_fixup() { 852 StubCodeMark mark(this, "StubRoutines", "f2l_fixup"); 853 Address inout(rsp, 5 * wordSize); // return address + 4 saves 854 address start = __ pc(); 855 856 Label L; 857 858 __ push(rax); 859 __ push(c_rarg3); 860 __ push(c_rarg2); 861 __ push(c_rarg1); 862 863 __ movl(rax, 0x7f800000); 864 __ xorl(c_rarg3, c_rarg3); 865 __ movl(c_rarg2, inout); 866 __ movl(c_rarg1, c_rarg2); 867 __ andl(c_rarg1, 0x7fffffff); 868 __ cmpl(rax, c_rarg1); // NaN? -> 0 869 __ jcc(Assembler::negative, L); 870 __ testl(c_rarg2, c_rarg2); // signed ? min_jlong : max_jlong 871 __ mov64(c_rarg3, 0x8000000000000000); 872 __ mov64(rax, 0x7fffffffffffffff); 873 __ cmov(Assembler::positive, c_rarg3, rax); 874 875 __ bind(L); 876 __ movptr(inout, c_rarg3); 877 878 __ pop(c_rarg1); 879 __ pop(c_rarg2); 880 __ pop(c_rarg3); 881 __ pop(rax); 882 883 __ ret(0); 884 885 return start; 886 } 887 888 address generate_d2i_fixup() { 889 StubCodeMark mark(this, "StubRoutines", "d2i_fixup"); 890 Address inout(rsp, 6 * wordSize); // return address + 5 saves 891 892 address start = __ pc(); 893 894 Label L; 895 896 __ push(rax); 897 __ push(c_rarg3); 898 __ push(c_rarg2); 899 __ push(c_rarg1); 900 __ push(c_rarg0); 901 902 __ movl(rax, 0x7ff00000); 903 __ movq(c_rarg2, inout); 904 __ movl(c_rarg3, c_rarg2); 905 __ mov(c_rarg1, c_rarg2); 906 __ mov(c_rarg0, c_rarg2); 907 __ negl(c_rarg3); 908 __ shrptr(c_rarg1, 0x20); 909 __ orl(c_rarg3, c_rarg2); 910 __ andl(c_rarg1, 0x7fffffff); 911 __ xorl(c_rarg2, c_rarg2); 912 __ shrl(c_rarg3, 0x1f); 913 __ orl(c_rarg1, c_rarg3); 914 __ cmpl(rax, c_rarg1); 915 __ jcc(Assembler::negative, L); // NaN -> 0 916 __ testptr(c_rarg0, c_rarg0); // signed ? min_jint : max_jint 917 __ movl(c_rarg2, 0x80000000); 918 __ movl(rax, 0x7fffffff); 919 __ cmov(Assembler::positive, c_rarg2, rax); 920 921 __ bind(L); 922 __ movptr(inout, c_rarg2); 923 924 __ pop(c_rarg0); 925 __ pop(c_rarg1); 926 __ pop(c_rarg2); 927 __ pop(c_rarg3); 928 __ pop(rax); 929 930 __ ret(0); 931 932 return start; 933 } 934 935 address generate_d2l_fixup() { 936 StubCodeMark mark(this, "StubRoutines", "d2l_fixup"); 937 Address inout(rsp, 6 * wordSize); // return address + 5 saves 938 939 address start = __ pc(); 940 941 Label L; 942 943 __ push(rax); 944 __ push(c_rarg3); 945 __ push(c_rarg2); 946 __ push(c_rarg1); 947 __ push(c_rarg0); 948 949 __ movl(rax, 0x7ff00000); 950 __ movq(c_rarg2, inout); 951 __ movl(c_rarg3, c_rarg2); 952 __ mov(c_rarg1, c_rarg2); 953 __ mov(c_rarg0, c_rarg2); 954 __ negl(c_rarg3); 955 __ shrptr(c_rarg1, 0x20); 956 __ orl(c_rarg3, c_rarg2); 957 __ andl(c_rarg1, 0x7fffffff); 958 __ xorl(c_rarg2, c_rarg2); 959 __ shrl(c_rarg3, 0x1f); 960 __ orl(c_rarg1, c_rarg3); 961 __ cmpl(rax, c_rarg1); 962 __ jcc(Assembler::negative, L); // NaN -> 0 963 __ testq(c_rarg0, c_rarg0); // signed ? min_jlong : max_jlong 964 __ mov64(c_rarg2, 0x8000000000000000); 965 __ mov64(rax, 0x7fffffffffffffff); 966 __ cmovq(Assembler::positive, c_rarg2, rax); 967 968 __ bind(L); 969 __ movq(inout, c_rarg2); 970 971 __ pop(c_rarg0); 972 __ pop(c_rarg1); 973 __ pop(c_rarg2); 974 __ pop(c_rarg3); 975 __ pop(rax); 976 977 __ ret(0); 978 979 return start; 980 } 981 982 address generate_fp_mask(const char *stub_name, int64_t mask) { 983 __ align(CodeEntryAlignment); 984 StubCodeMark mark(this, "StubRoutines", stub_name); 985 address start = __ pc(); 986 987 __ emit_data64( mask, relocInfo::none ); 988 __ emit_data64( mask, relocInfo::none ); 989 990 return start; 991 } 992 993 // The following routine generates a subroutine to throw an 994 // asynchronous UnknownError when an unsafe access gets a fault that 995 // could not be reasonably prevented by the programmer. (Example: 996 // SIGBUS/OBJERR.) 997 address generate_handler_for_unsafe_access() { 998 StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access"); 999 address start = __ pc(); 1000 1001 __ push(0); // hole for return address-to-be 1002 __ pusha(); // push registers 1003 Address next_pc(rsp, RegisterImpl::number_of_registers * BytesPerWord); 1004 1005 // FIXME: this probably needs alignment logic 1006 1007 __ subptr(rsp, frame::arg_reg_save_area_bytes); 1008 BLOCK_COMMENT("call handle_unsafe_access"); 1009 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, handle_unsafe_access))); 1010 __ addptr(rsp, frame::arg_reg_save_area_bytes); 1011 1012 __ movptr(next_pc, rax); // stuff next address 1013 __ popa(); 1014 __ ret(0); // jump to next address 1015 1016 return start; 1017 } 1018 1019 // Non-destructive plausibility checks for oops 1020 // 1021 // Arguments: 1022 // all args on stack! 1023 // 1024 // Stack after saving c_rarg3: 1025 // [tos + 0]: saved c_rarg3 1026 // [tos + 1]: saved c_rarg2 1027 // [tos + 2]: saved r12 (several TemplateTable methods use it) 1028 // [tos + 3]: saved flags 1029 // [tos + 4]: return address 1030 // * [tos + 5]: error message (char*) 1031 // * [tos + 6]: object to verify (oop) 1032 // * [tos + 7]: saved rax - saved by caller and bashed 1033 // * [tos + 8]: saved r10 (rscratch1) - saved by caller 1034 // * = popped on exit 1035 address generate_verify_oop() { 1036 StubCodeMark mark(this, "StubRoutines", "verify_oop"); 1037 address start = __ pc(); 1038 1039 Label exit, error; 1040 1041 __ pushf(); 1042 __ incrementl(ExternalAddress((address) StubRoutines::verify_oop_count_addr())); 1043 1044 __ push(r12); 1045 1046 // save c_rarg2 and c_rarg3 1047 __ push(c_rarg2); 1048 __ push(c_rarg3); 1049 1050 enum { 1051 // After previous pushes. 1052 oop_to_verify = 6 * wordSize, 1053 saved_rax = 7 * wordSize, 1054 saved_r10 = 8 * wordSize, 1055 1056 // Before the call to MacroAssembler::debug(), see below. 1057 return_addr = 16 * wordSize, 1058 error_msg = 17 * wordSize 1059 }; 1060 1061 // get object 1062 __ movptr(rax, Address(rsp, oop_to_verify)); 1063 1064 // make sure object is 'reasonable' 1065 __ testptr(rax, rax); 1066 __ jcc(Assembler::zero, exit); // if obj is NULL it is OK 1067 // Check if the oop is in the right area of memory 1068 __ movptr(c_rarg2, rax); 1069 __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_mask()); 1070 __ andptr(c_rarg2, c_rarg3); 1071 __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_bits()); 1072 __ cmpptr(c_rarg2, c_rarg3); 1073 __ jcc(Assembler::notZero, error); 1074 1075 // set r12 to heapbase for load_klass() 1076 __ reinit_heapbase(); 1077 1078 // make sure klass is 'reasonable', which is not zero. 1079 __ load_klass(rax, rax); // get klass 1080 __ testptr(rax, rax); 1081 __ jcc(Assembler::zero, error); // if klass is NULL it is broken 1082 1083 // return if everything seems ok 1084 __ bind(exit); 1085 __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back 1086 __ movptr(rscratch1, Address(rsp, saved_r10)); // get saved r10 back 1087 __ pop(c_rarg3); // restore c_rarg3 1088 __ pop(c_rarg2); // restore c_rarg2 1089 __ pop(r12); // restore r12 1090 __ popf(); // restore flags 1091 __ ret(4 * wordSize); // pop caller saved stuff 1092 1093 // handle errors 1094 __ bind(error); 1095 __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back 1096 __ movptr(rscratch1, Address(rsp, saved_r10)); // get saved r10 back 1097 __ pop(c_rarg3); // get saved c_rarg3 back 1098 __ pop(c_rarg2); // get saved c_rarg2 back 1099 __ pop(r12); // get saved r12 back 1100 __ popf(); // get saved flags off stack -- 1101 // will be ignored 1102 1103 __ pusha(); // push registers 1104 // (rip is already 1105 // already pushed) 1106 // debug(char* msg, int64_t pc, int64_t regs[]) 1107 // We've popped the registers we'd saved (c_rarg3, c_rarg2 and flags), and 1108 // pushed all the registers, so now the stack looks like: 1109 // [tos + 0] 16 saved registers 1110 // [tos + 16] return address 1111 // * [tos + 17] error message (char*) 1112 // * [tos + 18] object to verify (oop) 1113 // * [tos + 19] saved rax - saved by caller and bashed 1114 // * [tos + 20] saved r10 (rscratch1) - saved by caller 1115 // * = popped on exit 1116 1117 __ movptr(c_rarg0, Address(rsp, error_msg)); // pass address of error message 1118 __ movptr(c_rarg1, Address(rsp, return_addr)); // pass return address 1119 __ movq(c_rarg2, rsp); // pass address of regs on stack 1120 __ mov(r12, rsp); // remember rsp 1121 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1122 __ andptr(rsp, -16); // align stack as required by ABI 1123 BLOCK_COMMENT("call MacroAssembler::debug"); 1124 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64))); 1125 __ mov(rsp, r12); // restore rsp 1126 __ popa(); // pop registers (includes r12) 1127 __ ret(4 * wordSize); // pop caller saved stuff 1128 1129 return start; 1130 } 1131 1132 // 1133 // Verify that a register contains clean 32-bits positive value 1134 // (high 32-bits are 0) so it could be used in 64-bits shifts. 1135 // 1136 // Input: 1137 // Rint - 32-bits value 1138 // Rtmp - scratch 1139 // 1140 void assert_clean_int(Register Rint, Register Rtmp) { 1141 #ifdef ASSERT 1142 Label L; 1143 assert_different_registers(Rtmp, Rint); 1144 __ movslq(Rtmp, Rint); 1145 __ cmpq(Rtmp, Rint); 1146 __ jcc(Assembler::equal, L); 1147 __ stop("high 32-bits of int value are not 0"); 1148 __ bind(L); 1149 #endif 1150 } 1151 1152 // Generate overlap test for array copy stubs 1153 // 1154 // Input: 1155 // c_rarg0 - from 1156 // c_rarg1 - to 1157 // c_rarg2 - element count 1158 // 1159 // Output: 1160 // rax - &from[element count - 1] 1161 // 1162 void array_overlap_test(address no_overlap_target, Address::ScaleFactor sf) { 1163 assert(no_overlap_target != NULL, "must be generated"); 1164 array_overlap_test(no_overlap_target, NULL, sf); 1165 } 1166 void array_overlap_test(Label& L_no_overlap, Address::ScaleFactor sf) { 1167 array_overlap_test(NULL, &L_no_overlap, sf); 1168 } 1169 void array_overlap_test(address no_overlap_target, Label* NOLp, Address::ScaleFactor sf) { 1170 const Register from = c_rarg0; 1171 const Register to = c_rarg1; 1172 const Register count = c_rarg2; 1173 const Register end_from = rax; 1174 1175 __ cmpptr(to, from); 1176 __ lea(end_from, Address(from, count, sf, 0)); 1177 if (NOLp == NULL) { 1178 ExternalAddress no_overlap(no_overlap_target); 1179 __ jump_cc(Assembler::belowEqual, no_overlap); 1180 __ cmpptr(to, end_from); 1181 __ jump_cc(Assembler::aboveEqual, no_overlap); 1182 } else { 1183 __ jcc(Assembler::belowEqual, (*NOLp)); 1184 __ cmpptr(to, end_from); 1185 __ jcc(Assembler::aboveEqual, (*NOLp)); 1186 } 1187 } 1188 1189 // Shuffle first three arg regs on Windows into Linux/Solaris locations. 1190 // 1191 // Outputs: 1192 // rdi - rcx 1193 // rsi - rdx 1194 // rdx - r8 1195 // rcx - r9 1196 // 1197 // Registers r9 and r10 are used to save rdi and rsi on Windows, which latter 1198 // are non-volatile. r9 and r10 should not be used by the caller. 1199 // 1200 void setup_arg_regs(int nargs = 3) { 1201 const Register saved_rdi = r9; 1202 const Register saved_rsi = r10; 1203 assert(nargs == 3 || nargs == 4, "else fix"); 1204 #ifdef _WIN64 1205 assert(c_rarg0 == rcx && c_rarg1 == rdx && c_rarg2 == r8 && c_rarg3 == r9, 1206 "unexpected argument registers"); 1207 if (nargs >= 4) 1208 __ mov(rax, r9); // r9 is also saved_rdi 1209 __ movptr(saved_rdi, rdi); 1210 __ movptr(saved_rsi, rsi); 1211 __ mov(rdi, rcx); // c_rarg0 1212 __ mov(rsi, rdx); // c_rarg1 1213 __ mov(rdx, r8); // c_rarg2 1214 if (nargs >= 4) 1215 __ mov(rcx, rax); // c_rarg3 (via rax) 1216 #else 1217 assert(c_rarg0 == rdi && c_rarg1 == rsi && c_rarg2 == rdx && c_rarg3 == rcx, 1218 "unexpected argument registers"); 1219 #endif 1220 } 1221 1222 void restore_arg_regs() { 1223 const Register saved_rdi = r9; 1224 const Register saved_rsi = r10; 1225 #ifdef _WIN64 1226 __ movptr(rdi, saved_rdi); 1227 __ movptr(rsi, saved_rsi); 1228 #endif 1229 } 1230 1231 // Generate code for an array write pre barrier 1232 // 1233 // addr - starting address 1234 // count - element count 1235 // tmp - scratch register 1236 // 1237 // Destroy no registers! 1238 // 1239 void gen_write_ref_array_pre_barrier(Register addr, Register count, bool dest_uninitialized) { 1240 BarrierSet* bs = Universe::heap()->barrier_set(); 1241 switch (bs->kind()) { 1242 case BarrierSet::G1SATBCTLogging: 1243 // With G1, don't generate the call if we statically know that the target in uninitialized 1244 if (!dest_uninitialized) { 1245 __ pusha(); // push registers 1246 if (count == c_rarg0) { 1247 if (addr == c_rarg1) { 1248 // exactly backwards!! 1249 __ xchgptr(c_rarg1, c_rarg0); 1250 } else { 1251 __ movptr(c_rarg1, count); 1252 __ movptr(c_rarg0, addr); 1253 } 1254 } else { 1255 __ movptr(c_rarg0, addr); 1256 __ movptr(c_rarg1, count); 1257 } 1258 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), 2); 1259 __ popa(); 1260 } 1261 break; 1262 case BarrierSet::CardTableForRS: 1263 case BarrierSet::CardTableExtension: 1264 case BarrierSet::ModRef: 1265 break; 1266 default: 1267 ShouldNotReachHere(); 1268 1269 } 1270 } 1271 1272 // 1273 // Generate code for an array write post barrier 1274 // 1275 // Input: 1276 // start - register containing starting address of destination array 1277 // count - elements count 1278 // scratch - scratch register 1279 // 1280 // The input registers are overwritten. 1281 // 1282 void gen_write_ref_array_post_barrier(Register start, Register count, Register scratch) { 1283 assert_different_registers(start, count, scratch); 1284 BarrierSet* bs = Universe::heap()->barrier_set(); 1285 switch (bs->kind()) { 1286 case BarrierSet::G1SATBCTLogging: 1287 { 1288 __ pusha(); // push registers (overkill) 1289 if (c_rarg0 == count) { // On win64 c_rarg0 == rcx 1290 assert_different_registers(c_rarg1, start); 1291 __ mov(c_rarg1, count); 1292 __ mov(c_rarg0, start); 1293 } else { 1294 assert_different_registers(c_rarg0, count); 1295 __ mov(c_rarg0, start); 1296 __ mov(c_rarg1, count); 1297 } 1298 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), 2); 1299 __ popa(); 1300 } 1301 break; 1302 case BarrierSet::CardTableForRS: 1303 case BarrierSet::CardTableExtension: 1304 { 1305 CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs); 1306 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); 1307 1308 Label L_loop; 1309 const Register end = count; 1310 1311 __ leaq(end, Address(start, count, TIMES_OOP, 0)); // end == start+count*oop_size 1312 __ subptr(end, BytesPerHeapOop); // end - 1 to make inclusive 1313 __ shrptr(start, CardTableModRefBS::card_shift); 1314 __ shrptr(end, CardTableModRefBS::card_shift); 1315 __ subptr(end, start); // end --> cards count 1316 1317 int64_t disp = (int64_t) ct->byte_map_base; 1318 __ mov64(scratch, disp); 1319 __ addptr(start, scratch); 1320 __ BIND(L_loop); 1321 __ movb(Address(start, count, Address::times_1), 0); 1322 __ decrement(count); 1323 __ jcc(Assembler::greaterEqual, L_loop); 1324 } 1325 break; 1326 default: 1327 ShouldNotReachHere(); 1328 1329 } 1330 } 1331 1332 1333 // Copy big chunks forward 1334 // 1335 // Inputs: 1336 // end_from - source arrays end address 1337 // end_to - destination array end address 1338 // qword_count - 64-bits element count, negative 1339 // to - scratch 1340 // L_copy_bytes - entry label 1341 // L_copy_8_bytes - exit label 1342 // 1343 void copy_bytes_forward(Register end_from, Register end_to, 1344 Register qword_count, Register to, 1345 Label& L_copy_bytes, Label& L_copy_8_bytes) { 1346 DEBUG_ONLY(__ stop("enter at entry label, not here")); 1347 Label L_loop; 1348 __ align(OptoLoopAlignment); 1349 if (UseUnalignedLoadStores) { 1350 Label L_end; 1351 if (UseAVX > 2) { 1352 __ movl(to, 0xffff); 1353 __ kmovwl(k1, to); 1354 } 1355 // Copy 64-bytes per iteration 1356 __ BIND(L_loop); 1357 if (UseAVX > 2) { 1358 __ evmovdqul(xmm0, Address(end_from, qword_count, Address::times_8, -56), Assembler::AVX_512bit); 1359 __ evmovdqul(Address(end_to, qword_count, Address::times_8, -56), xmm0, Assembler::AVX_512bit); 1360 } else if (UseAVX == 2) { 1361 __ vmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56)); 1362 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0); 1363 __ vmovdqu(xmm1, Address(end_from, qword_count, Address::times_8, -24)); 1364 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -24), xmm1); 1365 } else { 1366 __ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56)); 1367 __ movdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0); 1368 __ movdqu(xmm1, Address(end_from, qword_count, Address::times_8, -40)); 1369 __ movdqu(Address(end_to, qword_count, Address::times_8, -40), xmm1); 1370 __ movdqu(xmm2, Address(end_from, qword_count, Address::times_8, -24)); 1371 __ movdqu(Address(end_to, qword_count, Address::times_8, -24), xmm2); 1372 __ movdqu(xmm3, Address(end_from, qword_count, Address::times_8, - 8)); 1373 __ movdqu(Address(end_to, qword_count, Address::times_8, - 8), xmm3); 1374 } 1375 __ BIND(L_copy_bytes); 1376 __ addptr(qword_count, 8); 1377 __ jcc(Assembler::lessEqual, L_loop); 1378 __ subptr(qword_count, 4); // sub(8) and add(4) 1379 __ jccb(Assembler::greater, L_end); 1380 // Copy trailing 32 bytes 1381 if (UseAVX >= 2) { 1382 __ vmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24)); 1383 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -24), xmm0); 1384 } else { 1385 __ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24)); 1386 __ movdqu(Address(end_to, qword_count, Address::times_8, -24), xmm0); 1387 __ movdqu(xmm1, Address(end_from, qword_count, Address::times_8, - 8)); 1388 __ movdqu(Address(end_to, qword_count, Address::times_8, - 8), xmm1); 1389 } 1390 __ addptr(qword_count, 4); 1391 __ BIND(L_end); 1392 if (UseAVX >= 2) { 1393 // clean upper bits of YMM registers 1394 __ vpxor(xmm0, xmm0); 1395 __ vpxor(xmm1, xmm1); 1396 } 1397 } else { 1398 // Copy 32-bytes per iteration 1399 __ BIND(L_loop); 1400 __ movq(to, Address(end_from, qword_count, Address::times_8, -24)); 1401 __ movq(Address(end_to, qword_count, Address::times_8, -24), to); 1402 __ movq(to, Address(end_from, qword_count, Address::times_8, -16)); 1403 __ movq(Address(end_to, qword_count, Address::times_8, -16), to); 1404 __ movq(to, Address(end_from, qword_count, Address::times_8, - 8)); 1405 __ movq(Address(end_to, qword_count, Address::times_8, - 8), to); 1406 __ movq(to, Address(end_from, qword_count, Address::times_8, - 0)); 1407 __ movq(Address(end_to, qword_count, Address::times_8, - 0), to); 1408 1409 __ BIND(L_copy_bytes); 1410 __ addptr(qword_count, 4); 1411 __ jcc(Assembler::lessEqual, L_loop); 1412 } 1413 __ subptr(qword_count, 4); 1414 __ jcc(Assembler::less, L_copy_8_bytes); // Copy trailing qwords 1415 } 1416 1417 // Copy big chunks backward 1418 // 1419 // Inputs: 1420 // from - source arrays address 1421 // dest - destination array address 1422 // qword_count - 64-bits element count 1423 // to - scratch 1424 // L_copy_bytes - entry label 1425 // L_copy_8_bytes - exit label 1426 // 1427 void copy_bytes_backward(Register from, Register dest, 1428 Register qword_count, Register to, 1429 Label& L_copy_bytes, Label& L_copy_8_bytes) { 1430 DEBUG_ONLY(__ stop("enter at entry label, not here")); 1431 Label L_loop; 1432 __ align(OptoLoopAlignment); 1433 if (UseUnalignedLoadStores) { 1434 Label L_end; 1435 if (UseAVX > 2) { 1436 __ movl(to, 0xffff); 1437 __ kmovwl(k1, to); 1438 } 1439 // Copy 64-bytes per iteration 1440 __ BIND(L_loop); 1441 if (UseAVX > 2) { 1442 __ evmovdqul(xmm0, Address(from, qword_count, Address::times_8, 0), Assembler::AVX_512bit); 1443 __ evmovdqul(Address(dest, qword_count, Address::times_8, 0), xmm0, Assembler::AVX_512bit); 1444 } else if (UseAVX == 2) { 1445 __ vmovdqu(xmm0, Address(from, qword_count, Address::times_8, 32)); 1446 __ vmovdqu(Address(dest, qword_count, Address::times_8, 32), xmm0); 1447 __ vmovdqu(xmm1, Address(from, qword_count, Address::times_8, 0)); 1448 __ vmovdqu(Address(dest, qword_count, Address::times_8, 0), xmm1); 1449 } else { 1450 __ movdqu(xmm0, Address(from, qword_count, Address::times_8, 48)); 1451 __ movdqu(Address(dest, qword_count, Address::times_8, 48), xmm0); 1452 __ movdqu(xmm1, Address(from, qword_count, Address::times_8, 32)); 1453 __ movdqu(Address(dest, qword_count, Address::times_8, 32), xmm1); 1454 __ movdqu(xmm2, Address(from, qword_count, Address::times_8, 16)); 1455 __ movdqu(Address(dest, qword_count, Address::times_8, 16), xmm2); 1456 __ movdqu(xmm3, Address(from, qword_count, Address::times_8, 0)); 1457 __ movdqu(Address(dest, qword_count, Address::times_8, 0), xmm3); 1458 } 1459 __ BIND(L_copy_bytes); 1460 __ subptr(qword_count, 8); 1461 __ jcc(Assembler::greaterEqual, L_loop); 1462 1463 __ addptr(qword_count, 4); // add(8) and sub(4) 1464 __ jccb(Assembler::less, L_end); 1465 // Copy trailing 32 bytes 1466 if (UseAVX >= 2) { 1467 __ vmovdqu(xmm0, Address(from, qword_count, Address::times_8, 0)); 1468 __ vmovdqu(Address(dest, qword_count, Address::times_8, 0), xmm0); 1469 } else { 1470 __ movdqu(xmm0, Address(from, qword_count, Address::times_8, 16)); 1471 __ movdqu(Address(dest, qword_count, Address::times_8, 16), xmm0); 1472 __ movdqu(xmm1, Address(from, qword_count, Address::times_8, 0)); 1473 __ movdqu(Address(dest, qword_count, Address::times_8, 0), xmm1); 1474 } 1475 __ subptr(qword_count, 4); 1476 __ BIND(L_end); 1477 if (UseAVX >= 2) { 1478 // clean upper bits of YMM registers 1479 __ vpxor(xmm0, xmm0); 1480 __ vpxor(xmm1, xmm1); 1481 } 1482 } else { 1483 // Copy 32-bytes per iteration 1484 __ BIND(L_loop); 1485 __ movq(to, Address(from, qword_count, Address::times_8, 24)); 1486 __ movq(Address(dest, qword_count, Address::times_8, 24), to); 1487 __ movq(to, Address(from, qword_count, Address::times_8, 16)); 1488 __ movq(Address(dest, qword_count, Address::times_8, 16), to); 1489 __ movq(to, Address(from, qword_count, Address::times_8, 8)); 1490 __ movq(Address(dest, qword_count, Address::times_8, 8), to); 1491 __ movq(to, Address(from, qword_count, Address::times_8, 0)); 1492 __ movq(Address(dest, qword_count, Address::times_8, 0), to); 1493 1494 __ BIND(L_copy_bytes); 1495 __ subptr(qword_count, 4); 1496 __ jcc(Assembler::greaterEqual, L_loop); 1497 } 1498 __ addptr(qword_count, 4); 1499 __ jcc(Assembler::greater, L_copy_8_bytes); // Copy trailing qwords 1500 } 1501 1502 1503 // Arguments: 1504 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1505 // ignored 1506 // name - stub name string 1507 // 1508 // Inputs: 1509 // c_rarg0 - source array address 1510 // c_rarg1 - destination array address 1511 // c_rarg2 - element count, treated as ssize_t, can be zero 1512 // 1513 // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries, 1514 // we let the hardware handle it. The one to eight bytes within words, 1515 // dwords or qwords that span cache line boundaries will still be loaded 1516 // and stored atomically. 1517 // 1518 // Side Effects: 1519 // disjoint_byte_copy_entry is set to the no-overlap entry point 1520 // used by generate_conjoint_byte_copy(). 1521 // 1522 address generate_disjoint_byte_copy(bool aligned, address* entry, const char *name) { 1523 __ align(CodeEntryAlignment); 1524 StubCodeMark mark(this, "StubRoutines", name); 1525 address start = __ pc(); 1526 1527 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes; 1528 Label L_copy_byte, L_exit; 1529 const Register from = rdi; // source array address 1530 const Register to = rsi; // destination array address 1531 const Register count = rdx; // elements count 1532 const Register byte_count = rcx; 1533 const Register qword_count = count; 1534 const Register end_from = from; // source array end address 1535 const Register end_to = to; // destination array end address 1536 // End pointers are inclusive, and if count is not zero they point 1537 // to the last unit copied: end_to[0] := end_from[0] 1538 1539 __ enter(); // required for proper stackwalking of RuntimeStub frame 1540 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1541 1542 if (entry != NULL) { 1543 *entry = __ pc(); 1544 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1545 BLOCK_COMMENT("Entry:"); 1546 } 1547 1548 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1549 // r9 and r10 may be used to save non-volatile registers 1550 1551 // 'from', 'to' and 'count' are now valid 1552 __ movptr(byte_count, count); 1553 __ shrptr(count, 3); // count => qword_count 1554 1555 // Copy from low to high addresses. Use 'to' as scratch. 1556 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1557 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1558 __ negptr(qword_count); // make the count negative 1559 __ jmp(L_copy_bytes); 1560 1561 // Copy trailing qwords 1562 __ BIND(L_copy_8_bytes); 1563 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1564 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1565 __ increment(qword_count); 1566 __ jcc(Assembler::notZero, L_copy_8_bytes); 1567 1568 // Check for and copy trailing dword 1569 __ BIND(L_copy_4_bytes); 1570 __ testl(byte_count, 4); 1571 __ jccb(Assembler::zero, L_copy_2_bytes); 1572 __ movl(rax, Address(end_from, 8)); 1573 __ movl(Address(end_to, 8), rax); 1574 1575 __ addptr(end_from, 4); 1576 __ addptr(end_to, 4); 1577 1578 // Check for and copy trailing word 1579 __ BIND(L_copy_2_bytes); 1580 __ testl(byte_count, 2); 1581 __ jccb(Assembler::zero, L_copy_byte); 1582 __ movw(rax, Address(end_from, 8)); 1583 __ movw(Address(end_to, 8), rax); 1584 1585 __ addptr(end_from, 2); 1586 __ addptr(end_to, 2); 1587 1588 // Check for and copy trailing byte 1589 __ BIND(L_copy_byte); 1590 __ testl(byte_count, 1); 1591 __ jccb(Assembler::zero, L_exit); 1592 __ movb(rax, Address(end_from, 8)); 1593 __ movb(Address(end_to, 8), rax); 1594 1595 __ BIND(L_exit); 1596 restore_arg_regs(); 1597 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1598 __ xorptr(rax, rax); // return 0 1599 __ leave(); // required for proper stackwalking of RuntimeStub frame 1600 __ ret(0); 1601 1602 // Copy in multi-bytes chunks 1603 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1604 __ jmp(L_copy_4_bytes); 1605 1606 return start; 1607 } 1608 1609 // Arguments: 1610 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1611 // ignored 1612 // name - stub name string 1613 // 1614 // Inputs: 1615 // c_rarg0 - source array address 1616 // c_rarg1 - destination array address 1617 // c_rarg2 - element count, treated as ssize_t, can be zero 1618 // 1619 // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries, 1620 // we let the hardware handle it. The one to eight bytes within words, 1621 // dwords or qwords that span cache line boundaries will still be loaded 1622 // and stored atomically. 1623 // 1624 address generate_conjoint_byte_copy(bool aligned, address nooverlap_target, 1625 address* entry, const char *name) { 1626 __ align(CodeEntryAlignment); 1627 StubCodeMark mark(this, "StubRoutines", name); 1628 address start = __ pc(); 1629 1630 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes; 1631 const Register from = rdi; // source array address 1632 const Register to = rsi; // destination array address 1633 const Register count = rdx; // elements count 1634 const Register byte_count = rcx; 1635 const Register qword_count = count; 1636 1637 __ enter(); // required for proper stackwalking of RuntimeStub frame 1638 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1639 1640 if (entry != NULL) { 1641 *entry = __ pc(); 1642 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1643 BLOCK_COMMENT("Entry:"); 1644 } 1645 1646 array_overlap_test(nooverlap_target, Address::times_1); 1647 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1648 // r9 and r10 may be used to save non-volatile registers 1649 1650 // 'from', 'to' and 'count' are now valid 1651 __ movptr(byte_count, count); 1652 __ shrptr(count, 3); // count => qword_count 1653 1654 // Copy from high to low addresses. 1655 1656 // Check for and copy trailing byte 1657 __ testl(byte_count, 1); 1658 __ jcc(Assembler::zero, L_copy_2_bytes); 1659 __ movb(rax, Address(from, byte_count, Address::times_1, -1)); 1660 __ movb(Address(to, byte_count, Address::times_1, -1), rax); 1661 __ decrement(byte_count); // Adjust for possible trailing word 1662 1663 // Check for and copy trailing word 1664 __ BIND(L_copy_2_bytes); 1665 __ testl(byte_count, 2); 1666 __ jcc(Assembler::zero, L_copy_4_bytes); 1667 __ movw(rax, Address(from, byte_count, Address::times_1, -2)); 1668 __ movw(Address(to, byte_count, Address::times_1, -2), rax); 1669 1670 // Check for and copy trailing dword 1671 __ BIND(L_copy_4_bytes); 1672 __ testl(byte_count, 4); 1673 __ jcc(Assembler::zero, L_copy_bytes); 1674 __ movl(rax, Address(from, qword_count, Address::times_8)); 1675 __ movl(Address(to, qword_count, Address::times_8), rax); 1676 __ jmp(L_copy_bytes); 1677 1678 // Copy trailing qwords 1679 __ BIND(L_copy_8_bytes); 1680 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 1681 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 1682 __ decrement(qword_count); 1683 __ jcc(Assembler::notZero, L_copy_8_bytes); 1684 1685 restore_arg_regs(); 1686 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1687 __ xorptr(rax, rax); // return 0 1688 __ leave(); // required for proper stackwalking of RuntimeStub frame 1689 __ ret(0); 1690 1691 // Copy in multi-bytes chunks 1692 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1693 1694 restore_arg_regs(); 1695 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1696 __ xorptr(rax, rax); // return 0 1697 __ leave(); // required for proper stackwalking of RuntimeStub frame 1698 __ ret(0); 1699 1700 return start; 1701 } 1702 1703 // Arguments: 1704 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1705 // ignored 1706 // name - stub name string 1707 // 1708 // Inputs: 1709 // c_rarg0 - source array address 1710 // c_rarg1 - destination array address 1711 // c_rarg2 - element count, treated as ssize_t, can be zero 1712 // 1713 // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we 1714 // let the hardware handle it. The two or four words within dwords 1715 // or qwords that span cache line boundaries will still be loaded 1716 // and stored atomically. 1717 // 1718 // Side Effects: 1719 // disjoint_short_copy_entry is set to the no-overlap entry point 1720 // used by generate_conjoint_short_copy(). 1721 // 1722 address generate_disjoint_short_copy(bool aligned, address *entry, const char *name) { 1723 __ align(CodeEntryAlignment); 1724 StubCodeMark mark(this, "StubRoutines", name); 1725 address start = __ pc(); 1726 1727 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes,L_copy_2_bytes,L_exit; 1728 const Register from = rdi; // source array address 1729 const Register to = rsi; // destination array address 1730 const Register count = rdx; // elements count 1731 const Register word_count = rcx; 1732 const Register qword_count = count; 1733 const Register end_from = from; // source array end address 1734 const Register end_to = to; // destination array end address 1735 // End pointers are inclusive, and if count is not zero they point 1736 // to the last unit copied: end_to[0] := end_from[0] 1737 1738 __ enter(); // required for proper stackwalking of RuntimeStub frame 1739 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1740 1741 if (entry != NULL) { 1742 *entry = __ pc(); 1743 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1744 BLOCK_COMMENT("Entry:"); 1745 } 1746 1747 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1748 // r9 and r10 may be used to save non-volatile registers 1749 1750 // 'from', 'to' and 'count' are now valid 1751 __ movptr(word_count, count); 1752 __ shrptr(count, 2); // count => qword_count 1753 1754 // Copy from low to high addresses. Use 'to' as scratch. 1755 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1756 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1757 __ negptr(qword_count); 1758 __ jmp(L_copy_bytes); 1759 1760 // Copy trailing qwords 1761 __ BIND(L_copy_8_bytes); 1762 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1763 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1764 __ increment(qword_count); 1765 __ jcc(Assembler::notZero, L_copy_8_bytes); 1766 1767 // Original 'dest' is trashed, so we can't use it as a 1768 // base register for a possible trailing word copy 1769 1770 // Check for and copy trailing dword 1771 __ BIND(L_copy_4_bytes); 1772 __ testl(word_count, 2); 1773 __ jccb(Assembler::zero, L_copy_2_bytes); 1774 __ movl(rax, Address(end_from, 8)); 1775 __ movl(Address(end_to, 8), rax); 1776 1777 __ addptr(end_from, 4); 1778 __ addptr(end_to, 4); 1779 1780 // Check for and copy trailing word 1781 __ BIND(L_copy_2_bytes); 1782 __ testl(word_count, 1); 1783 __ jccb(Assembler::zero, L_exit); 1784 __ movw(rax, Address(end_from, 8)); 1785 __ movw(Address(end_to, 8), rax); 1786 1787 __ BIND(L_exit); 1788 restore_arg_regs(); 1789 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1790 __ xorptr(rax, rax); // return 0 1791 __ leave(); // required for proper stackwalking of RuntimeStub frame 1792 __ ret(0); 1793 1794 // Copy in multi-bytes chunks 1795 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1796 __ jmp(L_copy_4_bytes); 1797 1798 return start; 1799 } 1800 1801 address generate_fill(BasicType t, bool aligned, const char *name) { 1802 __ align(CodeEntryAlignment); 1803 StubCodeMark mark(this, "StubRoutines", name); 1804 address start = __ pc(); 1805 1806 BLOCK_COMMENT("Entry:"); 1807 1808 const Register to = c_rarg0; // source array address 1809 const Register value = c_rarg1; // value 1810 const Register count = c_rarg2; // elements count 1811 1812 __ enter(); // required for proper stackwalking of RuntimeStub frame 1813 1814 __ generate_fill(t, aligned, to, value, count, rax, xmm0); 1815 1816 __ leave(); // required for proper stackwalking of RuntimeStub frame 1817 __ ret(0); 1818 return start; 1819 } 1820 1821 // Arguments: 1822 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1823 // ignored 1824 // name - stub name string 1825 // 1826 // Inputs: 1827 // c_rarg0 - source array address 1828 // c_rarg1 - destination array address 1829 // c_rarg2 - element count, treated as ssize_t, can be zero 1830 // 1831 // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we 1832 // let the hardware handle it. The two or four words within dwords 1833 // or qwords that span cache line boundaries will still be loaded 1834 // and stored atomically. 1835 // 1836 address generate_conjoint_short_copy(bool aligned, address nooverlap_target, 1837 address *entry, const char *name) { 1838 __ align(CodeEntryAlignment); 1839 StubCodeMark mark(this, "StubRoutines", name); 1840 address start = __ pc(); 1841 1842 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes; 1843 const Register from = rdi; // source array address 1844 const Register to = rsi; // destination array address 1845 const Register count = rdx; // elements count 1846 const Register word_count = rcx; 1847 const Register qword_count = count; 1848 1849 __ enter(); // required for proper stackwalking of RuntimeStub frame 1850 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1851 1852 if (entry != NULL) { 1853 *entry = __ pc(); 1854 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1855 BLOCK_COMMENT("Entry:"); 1856 } 1857 1858 array_overlap_test(nooverlap_target, Address::times_2); 1859 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1860 // r9 and r10 may be used to save non-volatile registers 1861 1862 // 'from', 'to' and 'count' are now valid 1863 __ movptr(word_count, count); 1864 __ shrptr(count, 2); // count => qword_count 1865 1866 // Copy from high to low addresses. Use 'to' as scratch. 1867 1868 // Check for and copy trailing word 1869 __ testl(word_count, 1); 1870 __ jccb(Assembler::zero, L_copy_4_bytes); 1871 __ movw(rax, Address(from, word_count, Address::times_2, -2)); 1872 __ movw(Address(to, word_count, Address::times_2, -2), rax); 1873 1874 // Check for and copy trailing dword 1875 __ BIND(L_copy_4_bytes); 1876 __ testl(word_count, 2); 1877 __ jcc(Assembler::zero, L_copy_bytes); 1878 __ movl(rax, Address(from, qword_count, Address::times_8)); 1879 __ movl(Address(to, qword_count, Address::times_8), rax); 1880 __ jmp(L_copy_bytes); 1881 1882 // Copy trailing qwords 1883 __ BIND(L_copy_8_bytes); 1884 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 1885 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 1886 __ decrement(qword_count); 1887 __ jcc(Assembler::notZero, L_copy_8_bytes); 1888 1889 restore_arg_regs(); 1890 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1891 __ xorptr(rax, rax); // return 0 1892 __ leave(); // required for proper stackwalking of RuntimeStub frame 1893 __ ret(0); 1894 1895 // Copy in multi-bytes chunks 1896 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1897 1898 restore_arg_regs(); 1899 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1900 __ xorptr(rax, rax); // return 0 1901 __ leave(); // required for proper stackwalking of RuntimeStub frame 1902 __ ret(0); 1903 1904 return start; 1905 } 1906 1907 // Arguments: 1908 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1909 // ignored 1910 // is_oop - true => oop array, so generate store check code 1911 // name - stub name string 1912 // 1913 // Inputs: 1914 // c_rarg0 - source array address 1915 // c_rarg1 - destination array address 1916 // c_rarg2 - element count, treated as ssize_t, can be zero 1917 // 1918 // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let 1919 // the hardware handle it. The two dwords within qwords that span 1920 // cache line boundaries will still be loaded and stored atomicly. 1921 // 1922 // Side Effects: 1923 // disjoint_int_copy_entry is set to the no-overlap entry point 1924 // used by generate_conjoint_int_oop_copy(). 1925 // 1926 address generate_disjoint_int_oop_copy(bool aligned, bool is_oop, address* entry, 1927 const char *name, bool dest_uninitialized = false) { 1928 __ align(CodeEntryAlignment); 1929 StubCodeMark mark(this, "StubRoutines", name); 1930 address start = __ pc(); 1931 1932 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_exit; 1933 const Register from = rdi; // source array address 1934 const Register to = rsi; // destination array address 1935 const Register count = rdx; // elements count 1936 const Register dword_count = rcx; 1937 const Register qword_count = count; 1938 const Register end_from = from; // source array end address 1939 const Register end_to = to; // destination array end address 1940 const Register saved_to = r11; // saved destination array address 1941 // End pointers are inclusive, and if count is not zero they point 1942 // to the last unit copied: end_to[0] := end_from[0] 1943 1944 __ enter(); // required for proper stackwalking of RuntimeStub frame 1945 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1946 1947 if (entry != NULL) { 1948 *entry = __ pc(); 1949 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1950 BLOCK_COMMENT("Entry:"); 1951 } 1952 1953 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1954 // r9 and r10 may be used to save non-volatile registers 1955 if (is_oop) { 1956 __ movq(saved_to, to); 1957 gen_write_ref_array_pre_barrier(to, count, dest_uninitialized); 1958 } 1959 1960 // 'from', 'to' and 'count' are now valid 1961 __ movptr(dword_count, count); 1962 __ shrptr(count, 1); // count => qword_count 1963 1964 // Copy from low to high addresses. Use 'to' as scratch. 1965 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1966 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1967 __ negptr(qword_count); 1968 __ jmp(L_copy_bytes); 1969 1970 // Copy trailing qwords 1971 __ BIND(L_copy_8_bytes); 1972 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1973 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1974 __ increment(qword_count); 1975 __ jcc(Assembler::notZero, L_copy_8_bytes); 1976 1977 // Check for and copy trailing dword 1978 __ BIND(L_copy_4_bytes); 1979 __ testl(dword_count, 1); // Only byte test since the value is 0 or 1 1980 __ jccb(Assembler::zero, L_exit); 1981 __ movl(rax, Address(end_from, 8)); 1982 __ movl(Address(end_to, 8), rax); 1983 1984 __ BIND(L_exit); 1985 if (is_oop) { 1986 gen_write_ref_array_post_barrier(saved_to, dword_count, rax); 1987 } 1988 restore_arg_regs(); 1989 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 1990 __ xorptr(rax, rax); // return 0 1991 __ leave(); // required for proper stackwalking of RuntimeStub frame 1992 __ ret(0); 1993 1994 // Copy in multi-bytes chunks 1995 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1996 __ jmp(L_copy_4_bytes); 1997 1998 return start; 1999 } 2000 2001 // Arguments: 2002 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 2003 // ignored 2004 // is_oop - true => oop array, so generate store check code 2005 // name - stub name string 2006 // 2007 // Inputs: 2008 // c_rarg0 - source array address 2009 // c_rarg1 - destination array address 2010 // c_rarg2 - element count, treated as ssize_t, can be zero 2011 // 2012 // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let 2013 // the hardware handle it. The two dwords within qwords that span 2014 // cache line boundaries will still be loaded and stored atomicly. 2015 // 2016 address generate_conjoint_int_oop_copy(bool aligned, bool is_oop, address nooverlap_target, 2017 address *entry, const char *name, 2018 bool dest_uninitialized = false) { 2019 __ align(CodeEntryAlignment); 2020 StubCodeMark mark(this, "StubRoutines", name); 2021 address start = __ pc(); 2022 2023 Label L_copy_bytes, L_copy_8_bytes, L_copy_2_bytes, L_exit; 2024 const Register from = rdi; // source array address 2025 const Register to = rsi; // destination array address 2026 const Register count = rdx; // elements count 2027 const Register dword_count = rcx; 2028 const Register qword_count = count; 2029 2030 __ enter(); // required for proper stackwalking of RuntimeStub frame 2031 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 2032 2033 if (entry != NULL) { 2034 *entry = __ pc(); 2035 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2036 BLOCK_COMMENT("Entry:"); 2037 } 2038 2039 array_overlap_test(nooverlap_target, Address::times_4); 2040 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 2041 // r9 and r10 may be used to save non-volatile registers 2042 2043 if (is_oop) { 2044 // no registers are destroyed by this call 2045 gen_write_ref_array_pre_barrier(to, count, dest_uninitialized); 2046 } 2047 2048 assert_clean_int(count, rax); // Make sure 'count' is clean int. 2049 // 'from', 'to' and 'count' are now valid 2050 __ movptr(dword_count, count); 2051 __ shrptr(count, 1); // count => qword_count 2052 2053 // Copy from high to low addresses. Use 'to' as scratch. 2054 2055 // Check for and copy trailing dword 2056 __ testl(dword_count, 1); 2057 __ jcc(Assembler::zero, L_copy_bytes); 2058 __ movl(rax, Address(from, dword_count, Address::times_4, -4)); 2059 __ movl(Address(to, dword_count, Address::times_4, -4), rax); 2060 __ jmp(L_copy_bytes); 2061 2062 // Copy trailing qwords 2063 __ BIND(L_copy_8_bytes); 2064 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 2065 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 2066 __ decrement(qword_count); 2067 __ jcc(Assembler::notZero, L_copy_8_bytes); 2068 2069 if (is_oop) { 2070 __ jmp(L_exit); 2071 } 2072 restore_arg_regs(); 2073 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 2074 __ xorptr(rax, rax); // return 0 2075 __ leave(); // required for proper stackwalking of RuntimeStub frame 2076 __ ret(0); 2077 2078 // Copy in multi-bytes chunks 2079 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2080 2081 __ BIND(L_exit); 2082 if (is_oop) { 2083 gen_write_ref_array_post_barrier(to, dword_count, rax); 2084 } 2085 restore_arg_regs(); 2086 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 2087 __ xorptr(rax, rax); // return 0 2088 __ leave(); // required for proper stackwalking of RuntimeStub frame 2089 __ ret(0); 2090 2091 return start; 2092 } 2093 2094 // Arguments: 2095 // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes 2096 // ignored 2097 // is_oop - true => oop array, so generate store check code 2098 // name - stub name string 2099 // 2100 // Inputs: 2101 // c_rarg0 - source array address 2102 // c_rarg1 - destination array address 2103 // c_rarg2 - element count, treated as ssize_t, can be zero 2104 // 2105 // Side Effects: 2106 // disjoint_oop_copy_entry or disjoint_long_copy_entry is set to the 2107 // no-overlap entry point used by generate_conjoint_long_oop_copy(). 2108 // 2109 address generate_disjoint_long_oop_copy(bool aligned, bool is_oop, address *entry, 2110 const char *name, bool dest_uninitialized = false) { 2111 __ align(CodeEntryAlignment); 2112 StubCodeMark mark(this, "StubRoutines", name); 2113 address start = __ pc(); 2114 2115 Label L_copy_bytes, L_copy_8_bytes, L_exit; 2116 const Register from = rdi; // source array address 2117 const Register to = rsi; // destination array address 2118 const Register qword_count = rdx; // elements count 2119 const Register end_from = from; // source array end address 2120 const Register end_to = rcx; // destination array end address 2121 const Register saved_to = to; 2122 const Register saved_count = r11; 2123 // End pointers are inclusive, and if count is not zero they point 2124 // to the last unit copied: end_to[0] := end_from[0] 2125 2126 __ enter(); // required for proper stackwalking of RuntimeStub frame 2127 // Save no-overlap entry point for generate_conjoint_long_oop_copy() 2128 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 2129 2130 if (entry != NULL) { 2131 *entry = __ pc(); 2132 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2133 BLOCK_COMMENT("Entry:"); 2134 } 2135 2136 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 2137 // r9 and r10 may be used to save non-volatile registers 2138 // 'from', 'to' and 'qword_count' are now valid 2139 if (is_oop) { 2140 // Save to and count for store barrier 2141 __ movptr(saved_count, qword_count); 2142 // no registers are destroyed by this call 2143 gen_write_ref_array_pre_barrier(to, qword_count, dest_uninitialized); 2144 } 2145 2146 // Copy from low to high addresses. Use 'to' as scratch. 2147 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 2148 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 2149 __ negptr(qword_count); 2150 __ jmp(L_copy_bytes); 2151 2152 // Copy trailing qwords 2153 __ BIND(L_copy_8_bytes); 2154 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 2155 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 2156 __ increment(qword_count); 2157 __ jcc(Assembler::notZero, L_copy_8_bytes); 2158 2159 if (is_oop) { 2160 __ jmp(L_exit); 2161 } else { 2162 restore_arg_regs(); 2163 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2164 __ xorptr(rax, rax); // return 0 2165 __ leave(); // required for proper stackwalking of RuntimeStub frame 2166 __ ret(0); 2167 } 2168 2169 // Copy in multi-bytes chunks 2170 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2171 2172 if (is_oop) { 2173 __ BIND(L_exit); 2174 gen_write_ref_array_post_barrier(saved_to, saved_count, rax); 2175 } 2176 restore_arg_regs(); 2177 if (is_oop) { 2178 inc_counter_np(SharedRuntime::_oop_array_copy_ctr); // Update counter after rscratch1 is free 2179 } else { 2180 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2181 } 2182 __ xorptr(rax, rax); // return 0 2183 __ leave(); // required for proper stackwalking of RuntimeStub frame 2184 __ ret(0); 2185 2186 return start; 2187 } 2188 2189 // Arguments: 2190 // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes 2191 // ignored 2192 // is_oop - true => oop array, so generate store check code 2193 // name - stub name string 2194 // 2195 // Inputs: 2196 // c_rarg0 - source array address 2197 // c_rarg1 - destination array address 2198 // c_rarg2 - element count, treated as ssize_t, can be zero 2199 // 2200 address generate_conjoint_long_oop_copy(bool aligned, bool is_oop, 2201 address nooverlap_target, address *entry, 2202 const char *name, bool dest_uninitialized = false) { 2203 __ align(CodeEntryAlignment); 2204 StubCodeMark mark(this, "StubRoutines", name); 2205 address start = __ pc(); 2206 2207 Label L_copy_bytes, L_copy_8_bytes, L_exit; 2208 const Register from = rdi; // source array address 2209 const Register to = rsi; // destination array address 2210 const Register qword_count = rdx; // elements count 2211 const Register saved_count = rcx; 2212 2213 __ enter(); // required for proper stackwalking of RuntimeStub frame 2214 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 2215 2216 if (entry != NULL) { 2217 *entry = __ pc(); 2218 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2219 BLOCK_COMMENT("Entry:"); 2220 } 2221 2222 array_overlap_test(nooverlap_target, Address::times_8); 2223 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 2224 // r9 and r10 may be used to save non-volatile registers 2225 // 'from', 'to' and 'qword_count' are now valid 2226 if (is_oop) { 2227 // Save to and count for store barrier 2228 __ movptr(saved_count, qword_count); 2229 // No registers are destroyed by this call 2230 gen_write_ref_array_pre_barrier(to, saved_count, dest_uninitialized); 2231 } 2232 2233 __ jmp(L_copy_bytes); 2234 2235 // Copy trailing qwords 2236 __ BIND(L_copy_8_bytes); 2237 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 2238 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 2239 __ decrement(qword_count); 2240 __ jcc(Assembler::notZero, L_copy_8_bytes); 2241 2242 if (is_oop) { 2243 __ jmp(L_exit); 2244 } else { 2245 restore_arg_regs(); 2246 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2247 __ xorptr(rax, rax); // return 0 2248 __ leave(); // required for proper stackwalking of RuntimeStub frame 2249 __ ret(0); 2250 } 2251 2252 // Copy in multi-bytes chunks 2253 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2254 2255 if (is_oop) { 2256 __ BIND(L_exit); 2257 gen_write_ref_array_post_barrier(to, saved_count, rax); 2258 } 2259 restore_arg_regs(); 2260 if (is_oop) { 2261 inc_counter_np(SharedRuntime::_oop_array_copy_ctr); // Update counter after rscratch1 is free 2262 } else { 2263 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2264 } 2265 __ xorptr(rax, rax); // return 0 2266 __ leave(); // required for proper stackwalking of RuntimeStub frame 2267 __ ret(0); 2268 2269 return start; 2270 } 2271 2272 2273 // Helper for generating a dynamic type check. 2274 // Smashes no registers. 2275 void generate_type_check(Register sub_klass, 2276 Register super_check_offset, 2277 Register super_klass, 2278 Label& L_success) { 2279 assert_different_registers(sub_klass, super_check_offset, super_klass); 2280 2281 BLOCK_COMMENT("type_check:"); 2282 2283 Label L_miss; 2284 2285 __ check_klass_subtype_fast_path(sub_klass, super_klass, noreg, &L_success, &L_miss, NULL, 2286 super_check_offset); 2287 __ check_klass_subtype_slow_path(sub_klass, super_klass, noreg, noreg, &L_success, NULL); 2288 2289 // Fall through on failure! 2290 __ BIND(L_miss); 2291 } 2292 2293 // 2294 // Generate checkcasting array copy stub 2295 // 2296 // Input: 2297 // c_rarg0 - source array address 2298 // c_rarg1 - destination array address 2299 // c_rarg2 - element count, treated as ssize_t, can be zero 2300 // c_rarg3 - size_t ckoff (super_check_offset) 2301 // not Win64 2302 // c_rarg4 - oop ckval (super_klass) 2303 // Win64 2304 // rsp+40 - oop ckval (super_klass) 2305 // 2306 // Output: 2307 // rax == 0 - success 2308 // rax == -1^K - failure, where K is partial transfer count 2309 // 2310 address generate_checkcast_copy(const char *name, address *entry, 2311 bool dest_uninitialized = false) { 2312 2313 Label L_load_element, L_store_element, L_do_card_marks, L_done; 2314 2315 // Input registers (after setup_arg_regs) 2316 const Register from = rdi; // source array address 2317 const Register to = rsi; // destination array address 2318 const Register length = rdx; // elements count 2319 const Register ckoff = rcx; // super_check_offset 2320 const Register ckval = r8; // super_klass 2321 2322 // Registers used as temps (r13, r14 are save-on-entry) 2323 const Register end_from = from; // source array end address 2324 const Register end_to = r13; // destination array end address 2325 const Register count = rdx; // -(count_remaining) 2326 const Register r14_length = r14; // saved copy of length 2327 // End pointers are inclusive, and if length is not zero they point 2328 // to the last unit copied: end_to[0] := end_from[0] 2329 2330 const Register rax_oop = rax; // actual oop copied 2331 const Register r11_klass = r11; // oop._klass 2332 2333 //--------------------------------------------------------------- 2334 // Assembler stub will be used for this call to arraycopy 2335 // if the two arrays are subtypes of Object[] but the 2336 // destination array type is not equal to or a supertype 2337 // of the source type. Each element must be separately 2338 // checked. 2339 2340 __ align(CodeEntryAlignment); 2341 StubCodeMark mark(this, "StubRoutines", name); 2342 address start = __ pc(); 2343 2344 __ enter(); // required for proper stackwalking of RuntimeStub frame 2345 2346 #ifdef ASSERT 2347 // caller guarantees that the arrays really are different 2348 // otherwise, we would have to make conjoint checks 2349 { Label L; 2350 array_overlap_test(L, TIMES_OOP); 2351 __ stop("checkcast_copy within a single array"); 2352 __ bind(L); 2353 } 2354 #endif //ASSERT 2355 2356 setup_arg_regs(4); // from => rdi, to => rsi, length => rdx 2357 // ckoff => rcx, ckval => r8 2358 // r9 and r10 may be used to save non-volatile registers 2359 #ifdef _WIN64 2360 // last argument (#4) is on stack on Win64 2361 __ movptr(ckval, Address(rsp, 6 * wordSize)); 2362 #endif 2363 2364 // Caller of this entry point must set up the argument registers. 2365 if (entry != NULL) { 2366 *entry = __ pc(); 2367 BLOCK_COMMENT("Entry:"); 2368 } 2369 2370 // allocate spill slots for r13, r14 2371 enum { 2372 saved_r13_offset, 2373 saved_r14_offset, 2374 saved_rbp_offset 2375 }; 2376 __ subptr(rsp, saved_rbp_offset * wordSize); 2377 __ movptr(Address(rsp, saved_r13_offset * wordSize), r13); 2378 __ movptr(Address(rsp, saved_r14_offset * wordSize), r14); 2379 2380 // check that int operands are properly extended to size_t 2381 assert_clean_int(length, rax); 2382 assert_clean_int(ckoff, rax); 2383 2384 #ifdef ASSERT 2385 BLOCK_COMMENT("assert consistent ckoff/ckval"); 2386 // The ckoff and ckval must be mutually consistent, 2387 // even though caller generates both. 2388 { Label L; 2389 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2390 __ cmpl(ckoff, Address(ckval, sco_offset)); 2391 __ jcc(Assembler::equal, L); 2392 __ stop("super_check_offset inconsistent"); 2393 __ bind(L); 2394 } 2395 #endif //ASSERT 2396 2397 // Loop-invariant addresses. They are exclusive end pointers. 2398 Address end_from_addr(from, length, TIMES_OOP, 0); 2399 Address end_to_addr(to, length, TIMES_OOP, 0); 2400 // Loop-variant addresses. They assume post-incremented count < 0. 2401 Address from_element_addr(end_from, count, TIMES_OOP, 0); 2402 Address to_element_addr(end_to, count, TIMES_OOP, 0); 2403 2404 gen_write_ref_array_pre_barrier(to, count, dest_uninitialized); 2405 2406 // Copy from low to high addresses, indexed from the end of each array. 2407 __ lea(end_from, end_from_addr); 2408 __ lea(end_to, end_to_addr); 2409 __ movptr(r14_length, length); // save a copy of the length 2410 assert(length == count, ""); // else fix next line: 2411 __ negptr(count); // negate and test the length 2412 __ jcc(Assembler::notZero, L_load_element); 2413 2414 // Empty array: Nothing to do. 2415 __ xorptr(rax, rax); // return 0 on (trivial) success 2416 __ jmp(L_done); 2417 2418 // ======== begin loop ======== 2419 // (Loop is rotated; its entry is L_load_element.) 2420 // Loop control: 2421 // for (count = -count; count != 0; count++) 2422 // Base pointers src, dst are biased by 8*(count-1),to last element. 2423 __ align(OptoLoopAlignment); 2424 2425 __ BIND(L_store_element); 2426 __ store_heap_oop(to_element_addr, rax_oop); // store the oop 2427 __ increment(count); // increment the count toward zero 2428 __ jcc(Assembler::zero, L_do_card_marks); 2429 2430 // ======== loop entry is here ======== 2431 __ BIND(L_load_element); 2432 __ load_heap_oop(rax_oop, from_element_addr); // load the oop 2433 __ testptr(rax_oop, rax_oop); 2434 __ jcc(Assembler::zero, L_store_element); 2435 2436 __ load_klass(r11_klass, rax_oop);// query the object klass 2437 generate_type_check(r11_klass, ckoff, ckval, L_store_element); 2438 // ======== end loop ======== 2439 2440 // It was a real error; we must depend on the caller to finish the job. 2441 // Register rdx = -1 * number of *remaining* oops, r14 = *total* oops. 2442 // Emit GC store barriers for the oops we have copied (r14 + rdx), 2443 // and report their number to the caller. 2444 assert_different_registers(rax, r14_length, count, to, end_to, rcx, rscratch1); 2445 Label L_post_barrier; 2446 __ addptr(r14_length, count); // K = (original - remaining) oops 2447 __ movptr(rax, r14_length); // save the value 2448 __ notptr(rax); // report (-1^K) to caller (does not affect flags) 2449 __ jccb(Assembler::notZero, L_post_barrier); 2450 __ jmp(L_done); // K == 0, nothing was copied, skip post barrier 2451 2452 // Come here on success only. 2453 __ BIND(L_do_card_marks); 2454 __ xorptr(rax, rax); // return 0 on success 2455 2456 __ BIND(L_post_barrier); 2457 gen_write_ref_array_post_barrier(to, r14_length, rscratch1); 2458 2459 // Common exit point (success or failure). 2460 __ BIND(L_done); 2461 __ movptr(r13, Address(rsp, saved_r13_offset * wordSize)); 2462 __ movptr(r14, Address(rsp, saved_r14_offset * wordSize)); 2463 restore_arg_regs(); 2464 inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr); // Update counter after rscratch1 is free 2465 __ leave(); // required for proper stackwalking of RuntimeStub frame 2466 __ ret(0); 2467 2468 return start; 2469 } 2470 2471 // 2472 // Generate 'unsafe' array copy stub 2473 // Though just as safe as the other stubs, it takes an unscaled 2474 // size_t argument instead of an element count. 2475 // 2476 // Input: 2477 // c_rarg0 - source array address 2478 // c_rarg1 - destination array address 2479 // c_rarg2 - byte count, treated as ssize_t, can be zero 2480 // 2481 // Examines the alignment of the operands and dispatches 2482 // to a long, int, short, or byte copy loop. 2483 // 2484 address generate_unsafe_copy(const char *name, 2485 address byte_copy_entry, address short_copy_entry, 2486 address int_copy_entry, address long_copy_entry) { 2487 2488 Label L_long_aligned, L_int_aligned, L_short_aligned; 2489 2490 // Input registers (before setup_arg_regs) 2491 const Register from = c_rarg0; // source array address 2492 const Register to = c_rarg1; // destination array address 2493 const Register size = c_rarg2; // byte count (size_t) 2494 2495 // Register used as a temp 2496 const Register bits = rax; // test copy of low bits 2497 2498 __ align(CodeEntryAlignment); 2499 StubCodeMark mark(this, "StubRoutines", name); 2500 address start = __ pc(); 2501 2502 __ enter(); // required for proper stackwalking of RuntimeStub frame 2503 2504 // bump this on entry, not on exit: 2505 inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr); 2506 2507 __ mov(bits, from); 2508 __ orptr(bits, to); 2509 __ orptr(bits, size); 2510 2511 __ testb(bits, BytesPerLong-1); 2512 __ jccb(Assembler::zero, L_long_aligned); 2513 2514 __ testb(bits, BytesPerInt-1); 2515 __ jccb(Assembler::zero, L_int_aligned); 2516 2517 __ testb(bits, BytesPerShort-1); 2518 __ jump_cc(Assembler::notZero, RuntimeAddress(byte_copy_entry)); 2519 2520 __ BIND(L_short_aligned); 2521 __ shrptr(size, LogBytesPerShort); // size => short_count 2522 __ jump(RuntimeAddress(short_copy_entry)); 2523 2524 __ BIND(L_int_aligned); 2525 __ shrptr(size, LogBytesPerInt); // size => int_count 2526 __ jump(RuntimeAddress(int_copy_entry)); 2527 2528 __ BIND(L_long_aligned); 2529 __ shrptr(size, LogBytesPerLong); // size => qword_count 2530 __ jump(RuntimeAddress(long_copy_entry)); 2531 2532 return start; 2533 } 2534 2535 // Perform range checks on the proposed arraycopy. 2536 // Kills temp, but nothing else. 2537 // Also, clean the sign bits of src_pos and dst_pos. 2538 void arraycopy_range_checks(Register src, // source array oop (c_rarg0) 2539 Register src_pos, // source position (c_rarg1) 2540 Register dst, // destination array oo (c_rarg2) 2541 Register dst_pos, // destination position (c_rarg3) 2542 Register length, 2543 Register temp, 2544 Label& L_failed) { 2545 BLOCK_COMMENT("arraycopy_range_checks:"); 2546 2547 // if (src_pos + length > arrayOop(src)->length()) FAIL; 2548 __ movl(temp, length); 2549 __ addl(temp, src_pos); // src_pos + length 2550 __ cmpl(temp, Address(src, arrayOopDesc::length_offset_in_bytes())); 2551 __ jcc(Assembler::above, L_failed); 2552 2553 // if (dst_pos + length > arrayOop(dst)->length()) FAIL; 2554 __ movl(temp, length); 2555 __ addl(temp, dst_pos); // dst_pos + length 2556 __ cmpl(temp, Address(dst, arrayOopDesc::length_offset_in_bytes())); 2557 __ jcc(Assembler::above, L_failed); 2558 2559 // Have to clean up high 32-bits of 'src_pos' and 'dst_pos'. 2560 // Move with sign extension can be used since they are positive. 2561 __ movslq(src_pos, src_pos); 2562 __ movslq(dst_pos, dst_pos); 2563 2564 BLOCK_COMMENT("arraycopy_range_checks done"); 2565 } 2566 2567 // 2568 // Generate generic array copy stubs 2569 // 2570 // Input: 2571 // c_rarg0 - src oop 2572 // c_rarg1 - src_pos (32-bits) 2573 // c_rarg2 - dst oop 2574 // c_rarg3 - dst_pos (32-bits) 2575 // not Win64 2576 // c_rarg4 - element count (32-bits) 2577 // Win64 2578 // rsp+40 - element count (32-bits) 2579 // 2580 // Output: 2581 // rax == 0 - success 2582 // rax == -1^K - failure, where K is partial transfer count 2583 // 2584 address generate_generic_copy(const char *name, 2585 address byte_copy_entry, address short_copy_entry, 2586 address int_copy_entry, address oop_copy_entry, 2587 address long_copy_entry, address checkcast_copy_entry) { 2588 2589 Label L_failed, L_failed_0, L_objArray; 2590 Label L_copy_bytes, L_copy_shorts, L_copy_ints, L_copy_longs; 2591 2592 // Input registers 2593 const Register src = c_rarg0; // source array oop 2594 const Register src_pos = c_rarg1; // source position 2595 const Register dst = c_rarg2; // destination array oop 2596 const Register dst_pos = c_rarg3; // destination position 2597 #ifndef _WIN64 2598 const Register length = c_rarg4; 2599 #else 2600 const Address length(rsp, 6 * wordSize); // elements count is on stack on Win64 2601 #endif 2602 2603 { int modulus = CodeEntryAlignment; 2604 int target = modulus - 5; // 5 = sizeof jmp(L_failed) 2605 int advance = target - (__ offset() % modulus); 2606 if (advance < 0) advance += modulus; 2607 if (advance > 0) __ nop(advance); 2608 } 2609 StubCodeMark mark(this, "StubRoutines", name); 2610 2611 // Short-hop target to L_failed. Makes for denser prologue code. 2612 __ BIND(L_failed_0); 2613 __ jmp(L_failed); 2614 assert(__ offset() % CodeEntryAlignment == 0, "no further alignment needed"); 2615 2616 __ align(CodeEntryAlignment); 2617 address start = __ pc(); 2618 2619 __ enter(); // required for proper stackwalking of RuntimeStub frame 2620 2621 // bump this on entry, not on exit: 2622 inc_counter_np(SharedRuntime::_generic_array_copy_ctr); 2623 2624 //----------------------------------------------------------------------- 2625 // Assembler stub will be used for this call to arraycopy 2626 // if the following conditions are met: 2627 // 2628 // (1) src and dst must not be null. 2629 // (2) src_pos must not be negative. 2630 // (3) dst_pos must not be negative. 2631 // (4) length must not be negative. 2632 // (5) src klass and dst klass should be the same and not NULL. 2633 // (6) src and dst should be arrays. 2634 // (7) src_pos + length must not exceed length of src. 2635 // (8) dst_pos + length must not exceed length of dst. 2636 // 2637 2638 // if (src == NULL) return -1; 2639 __ testptr(src, src); // src oop 2640 size_t j1off = __ offset(); 2641 __ jccb(Assembler::zero, L_failed_0); 2642 2643 // if (src_pos < 0) return -1; 2644 __ testl(src_pos, src_pos); // src_pos (32-bits) 2645 __ jccb(Assembler::negative, L_failed_0); 2646 2647 // if (dst == NULL) return -1; 2648 __ testptr(dst, dst); // dst oop 2649 __ jccb(Assembler::zero, L_failed_0); 2650 2651 // if (dst_pos < 0) return -1; 2652 __ testl(dst_pos, dst_pos); // dst_pos (32-bits) 2653 size_t j4off = __ offset(); 2654 __ jccb(Assembler::negative, L_failed_0); 2655 2656 // The first four tests are very dense code, 2657 // but not quite dense enough to put four 2658 // jumps in a 16-byte instruction fetch buffer. 2659 // That's good, because some branch predicters 2660 // do not like jumps so close together. 2661 // Make sure of this. 2662 guarantee(((j1off ^ j4off) & ~15) != 0, "I$ line of 1st & 4th jumps"); 2663 2664 // registers used as temp 2665 const Register r11_length = r11; // elements count to copy 2666 const Register r10_src_klass = r10; // array klass 2667 2668 // if (length < 0) return -1; 2669 __ movl(r11_length, length); // length (elements count, 32-bits value) 2670 __ testl(r11_length, r11_length); 2671 __ jccb(Assembler::negative, L_failed_0); 2672 2673 __ load_klass(r10_src_klass, src); 2674 #ifdef ASSERT 2675 // assert(src->klass() != NULL); 2676 { 2677 BLOCK_COMMENT("assert klasses not null {"); 2678 Label L1, L2; 2679 __ testptr(r10_src_klass, r10_src_klass); 2680 __ jcc(Assembler::notZero, L2); // it is broken if klass is NULL 2681 __ bind(L1); 2682 __ stop("broken null klass"); 2683 __ bind(L2); 2684 __ load_klass(rax, dst); 2685 __ cmpq(rax, 0); 2686 __ jcc(Assembler::equal, L1); // this would be broken also 2687 BLOCK_COMMENT("} assert klasses not null done"); 2688 } 2689 #endif 2690 2691 // Load layout helper (32-bits) 2692 // 2693 // |array_tag| | header_size | element_type | |log2_element_size| 2694 // 32 30 24 16 8 2 0 2695 // 2696 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0 2697 // 2698 2699 const int lh_offset = in_bytes(Klass::layout_helper_offset()); 2700 2701 // Handle objArrays completely differently... 2702 const jint objArray_lh = Klass::array_layout_helper(T_OBJECT); 2703 __ cmpl(Address(r10_src_klass, lh_offset), objArray_lh); 2704 __ jcc(Assembler::equal, L_objArray); 2705 2706 // if (src->klass() != dst->klass()) return -1; 2707 __ load_klass(rax, dst); 2708 __ cmpq(r10_src_klass, rax); 2709 __ jcc(Assembler::notEqual, L_failed); 2710 2711 const Register rax_lh = rax; // layout helper 2712 __ movl(rax_lh, Address(r10_src_klass, lh_offset)); 2713 2714 // if (!src->is_Array()) return -1; 2715 __ cmpl(rax_lh, Klass::_lh_neutral_value); 2716 __ jcc(Assembler::greaterEqual, L_failed); 2717 2718 // At this point, it is known to be a typeArray (array_tag 0x3). 2719 #ifdef ASSERT 2720 { 2721 BLOCK_COMMENT("assert primitive array {"); 2722 Label L; 2723 __ cmpl(rax_lh, (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift)); 2724 __ jcc(Assembler::greaterEqual, L); 2725 __ stop("must be a primitive array"); 2726 __ bind(L); 2727 BLOCK_COMMENT("} assert primitive array done"); 2728 } 2729 #endif 2730 2731 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2732 r10, L_failed); 2733 2734 // TypeArrayKlass 2735 // 2736 // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize); 2737 // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize); 2738 // 2739 2740 const Register r10_offset = r10; // array offset 2741 const Register rax_elsize = rax_lh; // element size 2742 2743 __ movl(r10_offset, rax_lh); 2744 __ shrl(r10_offset, Klass::_lh_header_size_shift); 2745 __ andptr(r10_offset, Klass::_lh_header_size_mask); // array_offset 2746 __ addptr(src, r10_offset); // src array offset 2747 __ addptr(dst, r10_offset); // dst array offset 2748 BLOCK_COMMENT("choose copy loop based on element size"); 2749 __ andl(rax_lh, Klass::_lh_log2_element_size_mask); // rax_lh -> rax_elsize 2750 2751 // next registers should be set before the jump to corresponding stub 2752 const Register from = c_rarg0; // source array address 2753 const Register to = c_rarg1; // destination array address 2754 const Register count = c_rarg2; // elements count 2755 2756 // 'from', 'to', 'count' registers should be set in such order 2757 // since they are the same as 'src', 'src_pos', 'dst'. 2758 2759 __ BIND(L_copy_bytes); 2760 __ cmpl(rax_elsize, 0); 2761 __ jccb(Assembler::notEqual, L_copy_shorts); 2762 __ lea(from, Address(src, src_pos, Address::times_1, 0));// src_addr 2763 __ lea(to, Address(dst, dst_pos, Address::times_1, 0));// dst_addr 2764 __ movl2ptr(count, r11_length); // length 2765 __ jump(RuntimeAddress(byte_copy_entry)); 2766 2767 __ BIND(L_copy_shorts); 2768 __ cmpl(rax_elsize, LogBytesPerShort); 2769 __ jccb(Assembler::notEqual, L_copy_ints); 2770 __ lea(from, Address(src, src_pos, Address::times_2, 0));// src_addr 2771 __ lea(to, Address(dst, dst_pos, Address::times_2, 0));// dst_addr 2772 __ movl2ptr(count, r11_length); // length 2773 __ jump(RuntimeAddress(short_copy_entry)); 2774 2775 __ BIND(L_copy_ints); 2776 __ cmpl(rax_elsize, LogBytesPerInt); 2777 __ jccb(Assembler::notEqual, L_copy_longs); 2778 __ lea(from, Address(src, src_pos, Address::times_4, 0));// src_addr 2779 __ lea(to, Address(dst, dst_pos, Address::times_4, 0));// dst_addr 2780 __ movl2ptr(count, r11_length); // length 2781 __ jump(RuntimeAddress(int_copy_entry)); 2782 2783 __ BIND(L_copy_longs); 2784 #ifdef ASSERT 2785 { 2786 BLOCK_COMMENT("assert long copy {"); 2787 Label L; 2788 __ cmpl(rax_elsize, LogBytesPerLong); 2789 __ jcc(Assembler::equal, L); 2790 __ stop("must be long copy, but elsize is wrong"); 2791 __ bind(L); 2792 BLOCK_COMMENT("} assert long copy done"); 2793 } 2794 #endif 2795 __ lea(from, Address(src, src_pos, Address::times_8, 0));// src_addr 2796 __ lea(to, Address(dst, dst_pos, Address::times_8, 0));// dst_addr 2797 __ movl2ptr(count, r11_length); // length 2798 __ jump(RuntimeAddress(long_copy_entry)); 2799 2800 // ObjArrayKlass 2801 __ BIND(L_objArray); 2802 // live at this point: r10_src_klass, r11_length, src[_pos], dst[_pos] 2803 2804 Label L_plain_copy, L_checkcast_copy; 2805 // test array classes for subtyping 2806 __ load_klass(rax, dst); 2807 __ cmpq(r10_src_klass, rax); // usual case is exact equality 2808 __ jcc(Assembler::notEqual, L_checkcast_copy); 2809 2810 // Identically typed arrays can be copied without element-wise checks. 2811 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2812 r10, L_failed); 2813 2814 __ lea(from, Address(src, src_pos, TIMES_OOP, 2815 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // src_addr 2816 __ lea(to, Address(dst, dst_pos, TIMES_OOP, 2817 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // dst_addr 2818 __ movl2ptr(count, r11_length); // length 2819 __ BIND(L_plain_copy); 2820 __ jump(RuntimeAddress(oop_copy_entry)); 2821 2822 __ BIND(L_checkcast_copy); 2823 // live at this point: r10_src_klass, r11_length, rax (dst_klass) 2824 { 2825 // Before looking at dst.length, make sure dst is also an objArray. 2826 __ cmpl(Address(rax, lh_offset), objArray_lh); 2827 __ jcc(Assembler::notEqual, L_failed); 2828 2829 // It is safe to examine both src.length and dst.length. 2830 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2831 rax, L_failed); 2832 2833 const Register r11_dst_klass = r11; 2834 __ load_klass(r11_dst_klass, dst); // reload 2835 2836 // Marshal the base address arguments now, freeing registers. 2837 __ lea(from, Address(src, src_pos, TIMES_OOP, 2838 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 2839 __ lea(to, Address(dst, dst_pos, TIMES_OOP, 2840 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 2841 __ movl(count, length); // length (reloaded) 2842 Register sco_temp = c_rarg3; // this register is free now 2843 assert_different_registers(from, to, count, sco_temp, 2844 r11_dst_klass, r10_src_klass); 2845 assert_clean_int(count, sco_temp); 2846 2847 // Generate the type check. 2848 const int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2849 __ movl(sco_temp, Address(r11_dst_klass, sco_offset)); 2850 assert_clean_int(sco_temp, rax); 2851 generate_type_check(r10_src_klass, sco_temp, r11_dst_klass, L_plain_copy); 2852 2853 // Fetch destination element klass from the ObjArrayKlass header. 2854 int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset()); 2855 __ movptr(r11_dst_klass, Address(r11_dst_klass, ek_offset)); 2856 __ movl( sco_temp, Address(r11_dst_klass, sco_offset)); 2857 assert_clean_int(sco_temp, rax); 2858 2859 // the checkcast_copy loop needs two extra arguments: 2860 assert(c_rarg3 == sco_temp, "#3 already in place"); 2861 // Set up arguments for checkcast_copy_entry. 2862 setup_arg_regs(4); 2863 __ movptr(r8, r11_dst_klass); // dst.klass.element_klass, r8 is c_rarg4 on Linux/Solaris 2864 __ jump(RuntimeAddress(checkcast_copy_entry)); 2865 } 2866 2867 __ BIND(L_failed); 2868 __ xorptr(rax, rax); 2869 __ notptr(rax); // return -1 2870 __ leave(); // required for proper stackwalking of RuntimeStub frame 2871 __ ret(0); 2872 2873 return start; 2874 } 2875 2876 void generate_arraycopy_stubs() { 2877 address entry; 2878 address entry_jbyte_arraycopy; 2879 address entry_jshort_arraycopy; 2880 address entry_jint_arraycopy; 2881 address entry_oop_arraycopy; 2882 address entry_jlong_arraycopy; 2883 address entry_checkcast_arraycopy; 2884 2885 StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, &entry, 2886 "jbyte_disjoint_arraycopy"); 2887 StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, entry, &entry_jbyte_arraycopy, 2888 "jbyte_arraycopy"); 2889 2890 StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, &entry, 2891 "jshort_disjoint_arraycopy"); 2892 StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, entry, &entry_jshort_arraycopy, 2893 "jshort_arraycopy"); 2894 2895 StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, false, &entry, 2896 "jint_disjoint_arraycopy"); 2897 StubRoutines::_jint_arraycopy = generate_conjoint_int_oop_copy(false, false, entry, 2898 &entry_jint_arraycopy, "jint_arraycopy"); 2899 2900 StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, false, &entry, 2901 "jlong_disjoint_arraycopy"); 2902 StubRoutines::_jlong_arraycopy = generate_conjoint_long_oop_copy(false, false, entry, 2903 &entry_jlong_arraycopy, "jlong_arraycopy"); 2904 2905 2906 if (UseCompressedOops) { 2907 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, true, &entry, 2908 "oop_disjoint_arraycopy"); 2909 StubRoutines::_oop_arraycopy = generate_conjoint_int_oop_copy(false, true, entry, 2910 &entry_oop_arraycopy, "oop_arraycopy"); 2911 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_int_oop_copy(false, true, &entry, 2912 "oop_disjoint_arraycopy_uninit", 2913 /*dest_uninitialized*/true); 2914 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_int_oop_copy(false, true, entry, 2915 NULL, "oop_arraycopy_uninit", 2916 /*dest_uninitialized*/true); 2917 } else { 2918 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, true, &entry, 2919 "oop_disjoint_arraycopy"); 2920 StubRoutines::_oop_arraycopy = generate_conjoint_long_oop_copy(false, true, entry, 2921 &entry_oop_arraycopy, "oop_arraycopy"); 2922 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_long_oop_copy(false, true, &entry, 2923 "oop_disjoint_arraycopy_uninit", 2924 /*dest_uninitialized*/true); 2925 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_long_oop_copy(false, true, entry, 2926 NULL, "oop_arraycopy_uninit", 2927 /*dest_uninitialized*/true); 2928 } 2929 2930 StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy); 2931 StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", NULL, 2932 /*dest_uninitialized*/true); 2933 2934 StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy", 2935 entry_jbyte_arraycopy, 2936 entry_jshort_arraycopy, 2937 entry_jint_arraycopy, 2938 entry_jlong_arraycopy); 2939 StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy", 2940 entry_jbyte_arraycopy, 2941 entry_jshort_arraycopy, 2942 entry_jint_arraycopy, 2943 entry_oop_arraycopy, 2944 entry_jlong_arraycopy, 2945 entry_checkcast_arraycopy); 2946 2947 StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill"); 2948 StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill"); 2949 StubRoutines::_jint_fill = generate_fill(T_INT, false, "jint_fill"); 2950 StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill"); 2951 StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill"); 2952 StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill"); 2953 2954 // We don't generate specialized code for HeapWord-aligned source 2955 // arrays, so just use the code we've already generated 2956 StubRoutines::_arrayof_jbyte_disjoint_arraycopy = StubRoutines::_jbyte_disjoint_arraycopy; 2957 StubRoutines::_arrayof_jbyte_arraycopy = StubRoutines::_jbyte_arraycopy; 2958 2959 StubRoutines::_arrayof_jshort_disjoint_arraycopy = StubRoutines::_jshort_disjoint_arraycopy; 2960 StubRoutines::_arrayof_jshort_arraycopy = StubRoutines::_jshort_arraycopy; 2961 2962 StubRoutines::_arrayof_jint_disjoint_arraycopy = StubRoutines::_jint_disjoint_arraycopy; 2963 StubRoutines::_arrayof_jint_arraycopy = StubRoutines::_jint_arraycopy; 2964 2965 StubRoutines::_arrayof_jlong_disjoint_arraycopy = StubRoutines::_jlong_disjoint_arraycopy; 2966 StubRoutines::_arrayof_jlong_arraycopy = StubRoutines::_jlong_arraycopy; 2967 2968 StubRoutines::_arrayof_oop_disjoint_arraycopy = StubRoutines::_oop_disjoint_arraycopy; 2969 StubRoutines::_arrayof_oop_arraycopy = StubRoutines::_oop_arraycopy; 2970 2971 StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = StubRoutines::_oop_disjoint_arraycopy_uninit; 2972 StubRoutines::_arrayof_oop_arraycopy_uninit = StubRoutines::_oop_arraycopy_uninit; 2973 } 2974 2975 // AES intrinsic stubs 2976 enum {AESBlockSize = 16}; 2977 2978 address generate_key_shuffle_mask() { 2979 __ align(16); 2980 StubCodeMark mark(this, "StubRoutines", "key_shuffle_mask"); 2981 address start = __ pc(); 2982 __ emit_data64( 0x0405060700010203, relocInfo::none ); 2983 __ emit_data64( 0x0c0d0e0f08090a0b, relocInfo::none ); 2984 return start; 2985 } 2986 2987 address generate_counter_shuffle_mask() { 2988 __ align(16); 2989 StubCodeMark mark(this, "StubRoutines", "counter_shuffle_mask"); 2990 address start = __ pc(); 2991 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none); 2992 __ emit_data64(0x0001020304050607, relocInfo::none); 2993 return start; 2994 } 2995 2996 // Utility routine for loading a 128-bit key word in little endian format 2997 // can optionally specify that the shuffle mask is already in an xmmregister 2998 void load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask=NULL) { 2999 __ movdqu(xmmdst, Address(key, offset)); 3000 if (xmm_shuf_mask != NULL) { 3001 __ pshufb(xmmdst, xmm_shuf_mask); 3002 } else { 3003 __ pshufb(xmmdst, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3004 } 3005 } 3006 3007 // Utility routine for increase 128bit counter (iv in CTR mode) 3008 void inc_counter(Register reg, XMMRegister xmmdst, int inc_delta, Label& next_block) { 3009 __ pextrq(reg, xmmdst, 0x0); 3010 __ addq(reg, inc_delta); 3011 __ pinsrq(xmmdst, reg, 0x0); 3012 __ jcc(Assembler::carryClear, next_block); // jump if no carry 3013 __ pextrq(reg, xmmdst, 0x01); // Carry 3014 __ addq(reg, 0x01); 3015 __ pinsrq(xmmdst, reg, 0x01); //Carry end 3016 __ BIND(next_block); // next instruction 3017 } 3018 3019 // Arguments: 3020 // 3021 // Inputs: 3022 // c_rarg0 - source byte array address 3023 // c_rarg1 - destination byte array address 3024 // c_rarg2 - K (key) in little endian int array 3025 // 3026 address generate_aescrypt_encryptBlock() { 3027 assert(UseAES, "need AES instructions and misaligned SSE support"); 3028 __ align(CodeEntryAlignment); 3029 StubCodeMark mark(this, "StubRoutines", "aescrypt_encryptBlock"); 3030 Label L_doLast; 3031 address start = __ pc(); 3032 3033 const Register from = c_rarg0; // source array address 3034 const Register to = c_rarg1; // destination array address 3035 const Register key = c_rarg2; // key array address 3036 const Register keylen = rax; 3037 3038 const XMMRegister xmm_result = xmm0; 3039 const XMMRegister xmm_key_shuf_mask = xmm1; 3040 // On win64 xmm6-xmm15 must be preserved so don't use them. 3041 const XMMRegister xmm_temp1 = xmm2; 3042 const XMMRegister xmm_temp2 = xmm3; 3043 const XMMRegister xmm_temp3 = xmm4; 3044 const XMMRegister xmm_temp4 = xmm5; 3045 3046 __ enter(); // required for proper stackwalking of RuntimeStub frame 3047 3048 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 3049 // context for the registers used, where all instructions below are using 128-bit mode 3050 // On EVEX without VL and BW, these instructions will all be AVX. 3051 if (VM_Version::supports_avx512vlbw()) { 3052 __ movl(rax, 0xffff); 3053 __ kmovql(k1, rax); 3054 } 3055 3056 // keylen could be only {11, 13, 15} * 4 = {44, 52, 60} 3057 __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3058 3059 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3060 __ movdqu(xmm_result, Address(from, 0)); // get 16 bytes of input 3061 3062 // For encryption, the java expanded key ordering is just what we need 3063 // we don't know if the key is aligned, hence not using load-execute form 3064 3065 load_key(xmm_temp1, key, 0x00, xmm_key_shuf_mask); 3066 __ pxor(xmm_result, xmm_temp1); 3067 3068 load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask); 3069 load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask); 3070 load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask); 3071 load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask); 3072 3073 __ aesenc(xmm_result, xmm_temp1); 3074 __ aesenc(xmm_result, xmm_temp2); 3075 __ aesenc(xmm_result, xmm_temp3); 3076 __ aesenc(xmm_result, xmm_temp4); 3077 3078 load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask); 3079 load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask); 3080 load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask); 3081 load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask); 3082 3083 __ aesenc(xmm_result, xmm_temp1); 3084 __ aesenc(xmm_result, xmm_temp2); 3085 __ aesenc(xmm_result, xmm_temp3); 3086 __ aesenc(xmm_result, xmm_temp4); 3087 3088 load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask); 3089 load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask); 3090 3091 __ cmpl(keylen, 44); 3092 __ jccb(Assembler::equal, L_doLast); 3093 3094 __ aesenc(xmm_result, xmm_temp1); 3095 __ aesenc(xmm_result, xmm_temp2); 3096 3097 load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask); 3098 load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask); 3099 3100 __ cmpl(keylen, 52); 3101 __ jccb(Assembler::equal, L_doLast); 3102 3103 __ aesenc(xmm_result, xmm_temp1); 3104 __ aesenc(xmm_result, xmm_temp2); 3105 3106 load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask); 3107 load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask); 3108 3109 __ BIND(L_doLast); 3110 __ aesenc(xmm_result, xmm_temp1); 3111 __ aesenclast(xmm_result, xmm_temp2); 3112 __ movdqu(Address(to, 0), xmm_result); // store the result 3113 __ xorptr(rax, rax); // return 0 3114 __ leave(); // required for proper stackwalking of RuntimeStub frame 3115 __ ret(0); 3116 3117 return start; 3118 } 3119 3120 3121 // Arguments: 3122 // 3123 // Inputs: 3124 // c_rarg0 - source byte array address 3125 // c_rarg1 - destination byte array address 3126 // c_rarg2 - K (key) in little endian int array 3127 // 3128 address generate_aescrypt_decryptBlock() { 3129 assert(UseAES, "need AES instructions and misaligned SSE support"); 3130 __ align(CodeEntryAlignment); 3131 StubCodeMark mark(this, "StubRoutines", "aescrypt_decryptBlock"); 3132 Label L_doLast; 3133 address start = __ pc(); 3134 3135 const Register from = c_rarg0; // source array address 3136 const Register to = c_rarg1; // destination array address 3137 const Register key = c_rarg2; // key array address 3138 const Register keylen = rax; 3139 3140 const XMMRegister xmm_result = xmm0; 3141 const XMMRegister xmm_key_shuf_mask = xmm1; 3142 // On win64 xmm6-xmm15 must be preserved so don't use them. 3143 const XMMRegister xmm_temp1 = xmm2; 3144 const XMMRegister xmm_temp2 = xmm3; 3145 const XMMRegister xmm_temp3 = xmm4; 3146 const XMMRegister xmm_temp4 = xmm5; 3147 3148 __ enter(); // required for proper stackwalking of RuntimeStub frame 3149 3150 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 3151 // context for the registers used, where all instructions below are using 128-bit mode 3152 // On EVEX without VL and BW, these instructions will all be AVX. 3153 if (VM_Version::supports_avx512vlbw()) { 3154 __ movl(rax, 0xffff); 3155 __ kmovql(k1, rax); 3156 } 3157 3158 // keylen could be only {11, 13, 15} * 4 = {44, 52, 60} 3159 __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3160 3161 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3162 __ movdqu(xmm_result, Address(from, 0)); 3163 3164 // for decryption java expanded key ordering is rotated one position from what we want 3165 // so we start from 0x10 here and hit 0x00 last 3166 // we don't know if the key is aligned, hence not using load-execute form 3167 load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask); 3168 load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask); 3169 load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask); 3170 load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask); 3171 3172 __ pxor (xmm_result, xmm_temp1); 3173 __ aesdec(xmm_result, xmm_temp2); 3174 __ aesdec(xmm_result, xmm_temp3); 3175 __ aesdec(xmm_result, xmm_temp4); 3176 3177 load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask); 3178 load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask); 3179 load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask); 3180 load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask); 3181 3182 __ aesdec(xmm_result, xmm_temp1); 3183 __ aesdec(xmm_result, xmm_temp2); 3184 __ aesdec(xmm_result, xmm_temp3); 3185 __ aesdec(xmm_result, xmm_temp4); 3186 3187 load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask); 3188 load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask); 3189 load_key(xmm_temp3, key, 0x00, xmm_key_shuf_mask); 3190 3191 __ cmpl(keylen, 44); 3192 __ jccb(Assembler::equal, L_doLast); 3193 3194 __ aesdec(xmm_result, xmm_temp1); 3195 __ aesdec(xmm_result, xmm_temp2); 3196 3197 load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask); 3198 load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask); 3199 3200 __ cmpl(keylen, 52); 3201 __ jccb(Assembler::equal, L_doLast); 3202 3203 __ aesdec(xmm_result, xmm_temp1); 3204 __ aesdec(xmm_result, xmm_temp2); 3205 3206 load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask); 3207 load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask); 3208 3209 __ BIND(L_doLast); 3210 __ aesdec(xmm_result, xmm_temp1); 3211 __ aesdec(xmm_result, xmm_temp2); 3212 3213 // for decryption the aesdeclast operation is always on key+0x00 3214 __ aesdeclast(xmm_result, xmm_temp3); 3215 __ movdqu(Address(to, 0), xmm_result); // store the result 3216 __ xorptr(rax, rax); // return 0 3217 __ leave(); // required for proper stackwalking of RuntimeStub frame 3218 __ ret(0); 3219 3220 return start; 3221 } 3222 3223 3224 // Arguments: 3225 // 3226 // Inputs: 3227 // c_rarg0 - source byte array address 3228 // c_rarg1 - destination byte array address 3229 // c_rarg2 - K (key) in little endian int array 3230 // c_rarg3 - r vector byte array address 3231 // c_rarg4 - input length 3232 // 3233 // Output: 3234 // rax - input length 3235 // 3236 address generate_cipherBlockChaining_encryptAESCrypt() { 3237 assert(UseAES, "need AES instructions and misaligned SSE support"); 3238 __ align(CodeEntryAlignment); 3239 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_encryptAESCrypt"); 3240 address start = __ pc(); 3241 3242 Label L_exit, L_key_192_256, L_key_256, L_loopTop_128, L_loopTop_192, L_loopTop_256; 3243 const Register from = c_rarg0; // source array address 3244 const Register to = c_rarg1; // destination array address 3245 const Register key = c_rarg2; // key array address 3246 const Register rvec = c_rarg3; // r byte array initialized from initvector array address 3247 // and left with the results of the last encryption block 3248 #ifndef _WIN64 3249 const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16) 3250 #else 3251 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 3252 const Register len_reg = r10; // pick the first volatile windows register 3253 #endif 3254 const Register pos = rax; 3255 3256 // xmm register assignments for the loops below 3257 const XMMRegister xmm_result = xmm0; 3258 const XMMRegister xmm_temp = xmm1; 3259 // keys 0-10 preloaded into xmm2-xmm12 3260 const int XMM_REG_NUM_KEY_FIRST = 2; 3261 const int XMM_REG_NUM_KEY_LAST = 15; 3262 const XMMRegister xmm_key0 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST); 3263 const XMMRegister xmm_key10 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+10); 3264 const XMMRegister xmm_key11 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+11); 3265 const XMMRegister xmm_key12 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+12); 3266 const XMMRegister xmm_key13 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+13); 3267 3268 __ enter(); // required for proper stackwalking of RuntimeStub frame 3269 3270 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 3271 // context for the registers used, where all instructions below are using 128-bit mode 3272 // On EVEX without VL and BW, these instructions will all be AVX. 3273 if (VM_Version::supports_avx512vlbw()) { 3274 __ movl(rax, 0xffff); 3275 __ kmovql(k1, rax); 3276 } 3277 3278 #ifdef _WIN64 3279 // on win64, fill len_reg from stack position 3280 __ movl(len_reg, len_mem); 3281 // save the xmm registers which must be preserved 6-15 3282 __ subptr(rsp, -rsp_after_call_off * wordSize); 3283 for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) { 3284 __ movdqu(xmm_save(i), as_XMMRegister(i)); 3285 } 3286 #else 3287 __ push(len_reg); // Save 3288 #endif 3289 3290 const XMMRegister xmm_key_shuf_mask = xmm_temp; // used temporarily to swap key bytes up front 3291 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3292 // load up xmm regs xmm2 thru xmm12 with key 0x00 - 0xa0 3293 for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x00; rnum <= XMM_REG_NUM_KEY_FIRST+10; rnum++) { 3294 load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask); 3295 offset += 0x10; 3296 } 3297 __ movdqu(xmm_result, Address(rvec, 0x00)); // initialize xmm_result with r vec 3298 3299 // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256)) 3300 __ movl(rax, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3301 __ cmpl(rax, 44); 3302 __ jcc(Assembler::notEqual, L_key_192_256); 3303 3304 // 128 bit code follows here 3305 __ movptr(pos, 0); 3306 __ align(OptoLoopAlignment); 3307 3308 __ BIND(L_loopTop_128); 3309 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3310 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3311 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3312 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 9; rnum++) { 3313 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3314 } 3315 __ aesenclast(xmm_result, xmm_key10); 3316 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3317 // no need to store r to memory until we exit 3318 __ addptr(pos, AESBlockSize); 3319 __ subptr(len_reg, AESBlockSize); 3320 __ jcc(Assembler::notEqual, L_loopTop_128); 3321 3322 __ BIND(L_exit); 3323 __ movdqu(Address(rvec, 0), xmm_result); // final value of r stored in rvec of CipherBlockChaining object 3324 3325 #ifdef _WIN64 3326 // restore xmm regs belonging to calling function 3327 for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) { 3328 __ movdqu(as_XMMRegister(i), xmm_save(i)); 3329 } 3330 __ movl(rax, len_mem); 3331 #else 3332 __ pop(rax); // return length 3333 #endif 3334 __ leave(); // required for proper stackwalking of RuntimeStub frame 3335 __ ret(0); 3336 3337 __ BIND(L_key_192_256); 3338 // here rax = len in ints of AESCrypt.KLE array (52=192, or 60=256) 3339 load_key(xmm_key11, key, 0xb0, xmm_key_shuf_mask); 3340 load_key(xmm_key12, key, 0xc0, xmm_key_shuf_mask); 3341 __ cmpl(rax, 52); 3342 __ jcc(Assembler::notEqual, L_key_256); 3343 3344 // 192-bit code follows here (could be changed to use more xmm registers) 3345 __ movptr(pos, 0); 3346 __ align(OptoLoopAlignment); 3347 3348 __ BIND(L_loopTop_192); 3349 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3350 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3351 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3352 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 11; rnum++) { 3353 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3354 } 3355 __ aesenclast(xmm_result, xmm_key12); 3356 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3357 // no need to store r to memory until we exit 3358 __ addptr(pos, AESBlockSize); 3359 __ subptr(len_reg, AESBlockSize); 3360 __ jcc(Assembler::notEqual, L_loopTop_192); 3361 __ jmp(L_exit); 3362 3363 __ BIND(L_key_256); 3364 // 256-bit code follows here (could be changed to use more xmm registers) 3365 load_key(xmm_key13, key, 0xd0, xmm_key_shuf_mask); 3366 __ movptr(pos, 0); 3367 __ align(OptoLoopAlignment); 3368 3369 __ BIND(L_loopTop_256); 3370 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3371 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3372 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3373 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 13; rnum++) { 3374 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3375 } 3376 load_key(xmm_temp, key, 0xe0); 3377 __ aesenclast(xmm_result, xmm_temp); 3378 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3379 // no need to store r to memory until we exit 3380 __ addptr(pos, AESBlockSize); 3381 __ subptr(len_reg, AESBlockSize); 3382 __ jcc(Assembler::notEqual, L_loopTop_256); 3383 __ jmp(L_exit); 3384 3385 return start; 3386 } 3387 3388 // Safefetch stubs. 3389 void generate_safefetch(const char* name, int size, address* entry, 3390 address* fault_pc, address* continuation_pc) { 3391 // safefetch signatures: 3392 // int SafeFetch32(int* adr, int errValue); 3393 // intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue); 3394 // 3395 // arguments: 3396 // c_rarg0 = adr 3397 // c_rarg1 = errValue 3398 // 3399 // result: 3400 // PPC_RET = *adr or errValue 3401 3402 StubCodeMark mark(this, "StubRoutines", name); 3403 3404 // Entry point, pc or function descriptor. 3405 *entry = __ pc(); 3406 3407 // Load *adr into c_rarg1, may fault. 3408 *fault_pc = __ pc(); 3409 switch (size) { 3410 case 4: 3411 // int32_t 3412 __ movl(c_rarg1, Address(c_rarg0, 0)); 3413 break; 3414 case 8: 3415 // int64_t 3416 __ movq(c_rarg1, Address(c_rarg0, 0)); 3417 break; 3418 default: 3419 ShouldNotReachHere(); 3420 } 3421 3422 // return errValue or *adr 3423 *continuation_pc = __ pc(); 3424 __ movq(rax, c_rarg1); 3425 __ ret(0); 3426 } 3427 3428 // This is a version of CBC/AES Decrypt which does 4 blocks in a loop at a time 3429 // to hide instruction latency 3430 // 3431 // Arguments: 3432 // 3433 // Inputs: 3434 // c_rarg0 - source byte array address 3435 // c_rarg1 - destination byte array address 3436 // c_rarg2 - K (key) in little endian int array 3437 // c_rarg3 - r vector byte array address 3438 // c_rarg4 - input length 3439 // 3440 // Output: 3441 // rax - input length 3442 // 3443 address generate_cipherBlockChaining_decryptAESCrypt_Parallel() { 3444 assert(UseAES, "need AES instructions and misaligned SSE support"); 3445 __ align(CodeEntryAlignment); 3446 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt"); 3447 address start = __ pc(); 3448 3449 const Register from = c_rarg0; // source array address 3450 const Register to = c_rarg1; // destination array address 3451 const Register key = c_rarg2; // key array address 3452 const Register rvec = c_rarg3; // r byte array initialized from initvector array address 3453 // and left with the results of the last encryption block 3454 #ifndef _WIN64 3455 const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16) 3456 #else 3457 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 3458 const Register len_reg = r10; // pick the first volatile windows register 3459 #endif 3460 const Register pos = rax; 3461 3462 const int PARALLEL_FACTOR = 4; 3463 const int ROUNDS[3] = { 10, 12, 14 }; // aes rounds for key128, key192, key256 3464 3465 Label L_exit; 3466 Label L_singleBlock_loopTopHead[3]; // 128, 192, 256 3467 Label L_singleBlock_loopTopHead2[3]; // 128, 192, 256 3468 Label L_singleBlock_loopTop[3]; // 128, 192, 256 3469 Label L_multiBlock_loopTopHead[3]; // 128, 192, 256 3470 Label L_multiBlock_loopTop[3]; // 128, 192, 256 3471 3472 // keys 0-10 preloaded into xmm5-xmm15 3473 const int XMM_REG_NUM_KEY_FIRST = 5; 3474 const int XMM_REG_NUM_KEY_LAST = 15; 3475 const XMMRegister xmm_key_first = as_XMMRegister(XMM_REG_NUM_KEY_FIRST); 3476 const XMMRegister xmm_key_last = as_XMMRegister(XMM_REG_NUM_KEY_LAST); 3477 3478 __ enter(); // required for proper stackwalking of RuntimeStub frame 3479 3480 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 3481 // context for the registers used, where all instructions below are using 128-bit mode 3482 // On EVEX without VL and BW, these instructions will all be AVX. 3483 if (VM_Version::supports_avx512vlbw()) { 3484 __ movl(rax, 0xffff); 3485 __ kmovql(k1, rax); 3486 } 3487 3488 #ifdef _WIN64 3489 // on win64, fill len_reg from stack position 3490 __ movl(len_reg, len_mem); 3491 // save the xmm registers which must be preserved 6-15 3492 __ subptr(rsp, -rsp_after_call_off * wordSize); 3493 for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) { 3494 __ movdqu(xmm_save(i), as_XMMRegister(i)); 3495 } 3496 #else 3497 __ push(len_reg); // Save 3498 #endif 3499 __ push(rbx); 3500 // the java expanded key ordering is rotated one position from what we want 3501 // so we start from 0x10 here and hit 0x00 last 3502 const XMMRegister xmm_key_shuf_mask = xmm1; // used temporarily to swap key bytes up front 3503 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3504 // load up xmm regs 5 thru 15 with key 0x10 - 0xa0 - 0x00 3505 for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x10; rnum < XMM_REG_NUM_KEY_LAST; rnum++) { 3506 load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask); 3507 offset += 0x10; 3508 } 3509 load_key(xmm_key_last, key, 0x00, xmm_key_shuf_mask); 3510 3511 const XMMRegister xmm_prev_block_cipher = xmm1; // holds cipher of previous block 3512 3513 // registers holding the four results in the parallelized loop 3514 const XMMRegister xmm_result0 = xmm0; 3515 const XMMRegister xmm_result1 = xmm2; 3516 const XMMRegister xmm_result2 = xmm3; 3517 const XMMRegister xmm_result3 = xmm4; 3518 3519 __ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // initialize with initial rvec 3520 3521 __ xorptr(pos, pos); 3522 3523 // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256)) 3524 __ movl(rbx, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3525 __ cmpl(rbx, 52); 3526 __ jcc(Assembler::equal, L_multiBlock_loopTopHead[1]); 3527 __ cmpl(rbx, 60); 3528 __ jcc(Assembler::equal, L_multiBlock_loopTopHead[2]); 3529 3530 #define DoFour(opc, src_reg) \ 3531 __ opc(xmm_result0, src_reg); \ 3532 __ opc(xmm_result1, src_reg); \ 3533 __ opc(xmm_result2, src_reg); \ 3534 __ opc(xmm_result3, src_reg); \ 3535 3536 for (int k = 0; k < 3; ++k) { 3537 __ BIND(L_multiBlock_loopTopHead[k]); 3538 if (k != 0) { 3539 __ cmpptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // see if at least 4 blocks left 3540 __ jcc(Assembler::less, L_singleBlock_loopTopHead2[k]); 3541 } 3542 if (k == 1) { 3543 __ subptr(rsp, 6 * wordSize); 3544 __ movdqu(Address(rsp, 0), xmm15); //save last_key from xmm15 3545 load_key(xmm15, key, 0xb0); // 0xb0; 192-bit key goes up to 0xc0 3546 __ movdqu(Address(rsp, 2 * wordSize), xmm15); 3547 load_key(xmm1, key, 0xc0); // 0xc0; 3548 __ movdqu(Address(rsp, 4 * wordSize), xmm1); 3549 } else if (k == 2) { 3550 __ subptr(rsp, 10 * wordSize); 3551 __ movdqu(Address(rsp, 0), xmm15); //save last_key from xmm15 3552 load_key(xmm15, key, 0xd0); // 0xd0; 256-bit key goes upto 0xe0 3553 __ movdqu(Address(rsp, 6 * wordSize), xmm15); 3554 load_key(xmm1, key, 0xe0); // 0xe0; 3555 __ movdqu(Address(rsp, 8 * wordSize), xmm1); 3556 load_key(xmm15, key, 0xb0); // 0xb0; 3557 __ movdqu(Address(rsp, 2 * wordSize), xmm15); 3558 load_key(xmm1, key, 0xc0); // 0xc0; 3559 __ movdqu(Address(rsp, 4 * wordSize), xmm1); 3560 } 3561 __ align(OptoLoopAlignment); 3562 __ BIND(L_multiBlock_loopTop[k]); 3563 __ cmpptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // see if at least 4 blocks left 3564 __ jcc(Assembler::less, L_singleBlock_loopTopHead[k]); 3565 3566 if (k != 0) { 3567 __ movdqu(xmm15, Address(rsp, 2 * wordSize)); 3568 __ movdqu(xmm1, Address(rsp, 4 * wordSize)); 3569 } 3570 3571 __ movdqu(xmm_result0, Address(from, pos, Address::times_1, 0 * AESBlockSize)); // get next 4 blocks into xmmresult registers 3572 __ movdqu(xmm_result1, Address(from, pos, Address::times_1, 1 * AESBlockSize)); 3573 __ movdqu(xmm_result2, Address(from, pos, Address::times_1, 2 * AESBlockSize)); 3574 __ movdqu(xmm_result3, Address(from, pos, Address::times_1, 3 * AESBlockSize)); 3575 3576 DoFour(pxor, xmm_key_first); 3577 if (k == 0) { 3578 for (int rnum = 1; rnum < ROUNDS[k]; rnum++) { 3579 DoFour(aesdec, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3580 } 3581 DoFour(aesdeclast, xmm_key_last); 3582 } else if (k == 1) { 3583 for (int rnum = 1; rnum <= ROUNDS[k]-2; rnum++) { 3584 DoFour(aesdec, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3585 } 3586 __ movdqu(xmm_key_last, Address(rsp, 0)); // xmm15 needs to be loaded again. 3587 DoFour(aesdec, xmm1); // key : 0xc0 3588 __ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // xmm1 needs to be loaded again 3589 DoFour(aesdeclast, xmm_key_last); 3590 } else if (k == 2) { 3591 for (int rnum = 1; rnum <= ROUNDS[k] - 4; rnum++) { 3592 DoFour(aesdec, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3593 } 3594 DoFour(aesdec, xmm1); // key : 0xc0 3595 __ movdqu(xmm15, Address(rsp, 6 * wordSize)); 3596 __ movdqu(xmm1, Address(rsp, 8 * wordSize)); 3597 DoFour(aesdec, xmm15); // key : 0xd0 3598 __ movdqu(xmm_key_last, Address(rsp, 0)); // xmm15 needs to be loaded again. 3599 DoFour(aesdec, xmm1); // key : 0xe0 3600 __ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // xmm1 needs to be loaded again 3601 DoFour(aesdeclast, xmm_key_last); 3602 } 3603 3604 // for each result, xor with the r vector of previous cipher block 3605 __ pxor(xmm_result0, xmm_prev_block_cipher); 3606 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 0 * AESBlockSize)); 3607 __ pxor(xmm_result1, xmm_prev_block_cipher); 3608 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 1 * AESBlockSize)); 3609 __ pxor(xmm_result2, xmm_prev_block_cipher); 3610 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 2 * AESBlockSize)); 3611 __ pxor(xmm_result3, xmm_prev_block_cipher); 3612 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 3 * AESBlockSize)); // this will carry over to next set of blocks 3613 if (k != 0) { 3614 __ movdqu(Address(rvec, 0x00), xmm_prev_block_cipher); 3615 } 3616 3617 __ movdqu(Address(to, pos, Address::times_1, 0 * AESBlockSize), xmm_result0); // store 4 results into the next 64 bytes of output 3618 __ movdqu(Address(to, pos, Address::times_1, 1 * AESBlockSize), xmm_result1); 3619 __ movdqu(Address(to, pos, Address::times_1, 2 * AESBlockSize), xmm_result2); 3620 __ movdqu(Address(to, pos, Address::times_1, 3 * AESBlockSize), xmm_result3); 3621 3622 __ addptr(pos, PARALLEL_FACTOR * AESBlockSize); 3623 __ subptr(len_reg, PARALLEL_FACTOR * AESBlockSize); 3624 __ jmp(L_multiBlock_loopTop[k]); 3625 3626 // registers used in the non-parallelized loops 3627 // xmm register assignments for the loops below 3628 const XMMRegister xmm_result = xmm0; 3629 const XMMRegister xmm_prev_block_cipher_save = xmm2; 3630 const XMMRegister xmm_key11 = xmm3; 3631 const XMMRegister xmm_key12 = xmm4; 3632 const XMMRegister key_tmp = xmm4; 3633 3634 __ BIND(L_singleBlock_loopTopHead[k]); 3635 if (k == 1) { 3636 __ addptr(rsp, 6 * wordSize); 3637 } else if (k == 2) { 3638 __ addptr(rsp, 10 * wordSize); 3639 } 3640 __ cmpptr(len_reg, 0); // any blocks left?? 3641 __ jcc(Assembler::equal, L_exit); 3642 __ BIND(L_singleBlock_loopTopHead2[k]); 3643 if (k == 1) { 3644 load_key(xmm_key11, key, 0xb0); // 0xb0; 192-bit key goes upto 0xc0 3645 load_key(xmm_key12, key, 0xc0); // 0xc0; 192-bit key goes upto 0xc0 3646 } 3647 if (k == 2) { 3648 load_key(xmm_key11, key, 0xb0); // 0xb0; 256-bit key goes upto 0xe0 3649 } 3650 __ align(OptoLoopAlignment); 3651 __ BIND(L_singleBlock_loopTop[k]); 3652 __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of cipher input 3653 __ movdqa(xmm_prev_block_cipher_save, xmm_result); // save for next r vector 3654 __ pxor(xmm_result, xmm_key_first); // do the aes dec rounds 3655 for (int rnum = 1; rnum <= 9 ; rnum++) { 3656 __ aesdec(xmm_result, as_XMMRegister(rnum + XMM_REG_NUM_KEY_FIRST)); 3657 } 3658 if (k == 1) { 3659 __ aesdec(xmm_result, xmm_key11); 3660 __ aesdec(xmm_result, xmm_key12); 3661 } 3662 if (k == 2) { 3663 __ aesdec(xmm_result, xmm_key11); 3664 load_key(key_tmp, key, 0xc0); 3665 __ aesdec(xmm_result, key_tmp); 3666 load_key(key_tmp, key, 0xd0); 3667 __ aesdec(xmm_result, key_tmp); 3668 load_key(key_tmp, key, 0xe0); 3669 __ aesdec(xmm_result, key_tmp); 3670 } 3671 3672 __ aesdeclast(xmm_result, xmm_key_last); // xmm15 always came from key+0 3673 __ pxor(xmm_result, xmm_prev_block_cipher); // xor with the current r vector 3674 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3675 // no need to store r to memory until we exit 3676 __ movdqa(xmm_prev_block_cipher, xmm_prev_block_cipher_save); // set up next r vector with cipher input from this block 3677 __ addptr(pos, AESBlockSize); 3678 __ subptr(len_reg, AESBlockSize); 3679 __ jcc(Assembler::notEqual, L_singleBlock_loopTop[k]); 3680 if (k != 2) { 3681 __ jmp(L_exit); 3682 } 3683 } //for 128/192/256 3684 3685 __ BIND(L_exit); 3686 __ movdqu(Address(rvec, 0), xmm_prev_block_cipher); // final value of r stored in rvec of CipherBlockChaining object 3687 __ pop(rbx); 3688 #ifdef _WIN64 3689 // restore regs belonging to calling function 3690 for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) { 3691 __ movdqu(as_XMMRegister(i), xmm_save(i)); 3692 } 3693 __ movl(rax, len_mem); 3694 #else 3695 __ pop(rax); // return length 3696 #endif 3697 __ leave(); // required for proper stackwalking of RuntimeStub frame 3698 __ ret(0); 3699 return start; 3700 } 3701 3702 address generate_upper_word_mask() { 3703 __ align(64); 3704 StubCodeMark mark(this, "StubRoutines", "upper_word_mask"); 3705 address start = __ pc(); 3706 __ emit_data64(0x0000000000000000, relocInfo::none); 3707 __ emit_data64(0xFFFFFFFF00000000, relocInfo::none); 3708 return start; 3709 } 3710 3711 address generate_shuffle_byte_flip_mask() { 3712 __ align(64); 3713 StubCodeMark mark(this, "StubRoutines", "shuffle_byte_flip_mask"); 3714 address start = __ pc(); 3715 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none); 3716 __ emit_data64(0x0001020304050607, relocInfo::none); 3717 return start; 3718 } 3719 3720 // ofs and limit are use for multi-block byte array. 3721 // int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit) 3722 address generate_sha1_implCompress(bool multi_block, const char *name) { 3723 __ align(CodeEntryAlignment); 3724 StubCodeMark mark(this, "StubRoutines", name); 3725 address start = __ pc(); 3726 3727 Register buf = c_rarg0; 3728 Register state = c_rarg1; 3729 Register ofs = c_rarg2; 3730 Register limit = c_rarg3; 3731 3732 const XMMRegister abcd = xmm0; 3733 const XMMRegister e0 = xmm1; 3734 const XMMRegister e1 = xmm2; 3735 const XMMRegister msg0 = xmm3; 3736 3737 const XMMRegister msg1 = xmm4; 3738 const XMMRegister msg2 = xmm5; 3739 const XMMRegister msg3 = xmm6; 3740 const XMMRegister shuf_mask = xmm7; 3741 3742 __ enter(); 3743 3744 #ifdef _WIN64 3745 // save the xmm registers which must be preserved 6-7 3746 __ subptr(rsp, 4 * wordSize); 3747 __ movdqu(Address(rsp, 0), xmm6); 3748 __ movdqu(Address(rsp, 2 * wordSize), xmm7); 3749 #endif 3750 3751 __ subptr(rsp, 4 * wordSize); 3752 3753 __ fast_sha1(abcd, e0, e1, msg0, msg1, msg2, msg3, shuf_mask, 3754 buf, state, ofs, limit, rsp, multi_block); 3755 3756 __ addptr(rsp, 4 * wordSize); 3757 #ifdef _WIN64 3758 // restore xmm regs belonging to calling function 3759 __ movdqu(xmm6, Address(rsp, 0)); 3760 __ movdqu(xmm7, Address(rsp, 2 * wordSize)); 3761 __ addptr(rsp, 4 * wordSize); 3762 #endif 3763 3764 __ leave(); 3765 __ ret(0); 3766 return start; 3767 } 3768 3769 address generate_pshuffle_byte_flip_mask() { 3770 __ align(64); 3771 StubCodeMark mark(this, "StubRoutines", "pshuffle_byte_flip_mask"); 3772 address start = __ pc(); 3773 __ emit_data64(0x0405060700010203, relocInfo::none); 3774 __ emit_data64(0x0c0d0e0f08090a0b, relocInfo::none); 3775 return start; 3776 } 3777 3778 // ofs and limit are use for multi-block byte array. 3779 // int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit) 3780 address generate_sha256_implCompress(bool multi_block, const char *name) { 3781 __ align(CodeEntryAlignment); 3782 StubCodeMark mark(this, "StubRoutines", name); 3783 address start = __ pc(); 3784 3785 Register buf = c_rarg0; 3786 Register state = c_rarg1; 3787 Register ofs = c_rarg2; 3788 Register limit = c_rarg3; 3789 3790 const XMMRegister msg = xmm0; 3791 const XMMRegister state0 = xmm1; 3792 const XMMRegister state1 = xmm2; 3793 const XMMRegister msgtmp0 = xmm3; 3794 3795 const XMMRegister msgtmp1 = xmm4; 3796 const XMMRegister msgtmp2 = xmm5; 3797 const XMMRegister msgtmp3 = xmm6; 3798 const XMMRegister msgtmp4 = xmm7; 3799 3800 const XMMRegister shuf_mask = xmm8; 3801 3802 __ enter(); 3803 #ifdef _WIN64 3804 // save the xmm registers which must be preserved 6-7 3805 __ subptr(rsp, 6 * wordSize); 3806 __ movdqu(Address(rsp, 0), xmm6); 3807 __ movdqu(Address(rsp, 2 * wordSize), xmm7); 3808 __ movdqu(Address(rsp, 4 * wordSize), xmm8); 3809 #endif 3810 3811 __ subptr(rsp, 4 * wordSize); 3812 3813 __ fast_sha256(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4, 3814 buf, state, ofs, limit, rsp, multi_block, shuf_mask); 3815 3816 __ addptr(rsp, 4 * wordSize); 3817 #ifdef _WIN64 3818 // restore xmm regs belonging to calling function 3819 __ movdqu(xmm6, Address(rsp, 0)); 3820 __ movdqu(xmm7, Address(rsp, 2 * wordSize)); 3821 __ movdqu(xmm8, Address(rsp, 4 * wordSize)); 3822 __ addptr(rsp, 6 * wordSize); 3823 #endif 3824 __ leave(); 3825 __ ret(0); 3826 return start; 3827 } 3828 3829 // This is a version of CTR/AES crypt which does 6 blocks in a loop at a time 3830 // to hide instruction latency 3831 // 3832 // Arguments: 3833 // 3834 // Inputs: 3835 // c_rarg0 - source byte array address 3836 // c_rarg1 - destination byte array address 3837 // c_rarg2 - K (key) in little endian int array 3838 // c_rarg3 - counter vector byte array address 3839 // Linux 3840 // c_rarg4 - input length 3841 // c_rarg5 - saved encryptedCounter start 3842 // rbp + 6 * wordSize - saved used length 3843 // Windows 3844 // rbp + 6 * wordSize - input length 3845 // rbp + 7 * wordSize - saved encryptedCounter start 3846 // rbp + 8 * wordSize - saved used length 3847 // 3848 // Output: 3849 // rax - input length 3850 // 3851 address generate_counterMode_AESCrypt_Parallel() { 3852 assert(UseAES, "need AES instructions and misaligned SSE support"); 3853 __ align(CodeEntryAlignment); 3854 StubCodeMark mark(this, "StubRoutines", "counterMode_AESCrypt"); 3855 address start = __ pc(); 3856 const Register from = c_rarg0; // source array address 3857 const Register to = c_rarg1; // destination array address 3858 const Register key = c_rarg2; // key array address 3859 const Register counter = c_rarg3; // counter byte array initialized from counter array address 3860 // and updated with the incremented counter in the end 3861 #ifndef _WIN64 3862 const Register len_reg = c_rarg4; 3863 const Register saved_encCounter_start = c_rarg5; 3864 const Register used_addr = r10; 3865 const Address used_mem(rbp, 2 * wordSize); 3866 const Register used = r11; 3867 #else 3868 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 3869 const Address saved_encCounter_mem(rbp, 7 * wordSize); // length is on stack on Win64 3870 const Address used_mem(rbp, 8 * wordSize); // length is on stack on Win64 3871 const Register len_reg = r10; // pick the first volatile windows register 3872 const Register saved_encCounter_start = r11; 3873 const Register used_addr = r13; 3874 const Register used = r14; 3875 #endif 3876 const Register pos = rax; 3877 3878 const int PARALLEL_FACTOR = 6; 3879 const XMMRegister xmm_counter_shuf_mask = xmm0; 3880 const XMMRegister xmm_key_shuf_mask = xmm1; // used temporarily to swap key bytes up front 3881 const XMMRegister xmm_curr_counter = xmm2; 3882 3883 const XMMRegister xmm_key_tmp0 = xmm3; 3884 const XMMRegister xmm_key_tmp1 = xmm4; 3885 3886 // registers holding the four results in the parallelized loop 3887 const XMMRegister xmm_result0 = xmm5; 3888 const XMMRegister xmm_result1 = xmm6; 3889 const XMMRegister xmm_result2 = xmm7; 3890 const XMMRegister xmm_result3 = xmm8; 3891 const XMMRegister xmm_result4 = xmm9; 3892 const XMMRegister xmm_result5 = xmm10; 3893 3894 const XMMRegister xmm_from0 = xmm11; 3895 const XMMRegister xmm_from1 = xmm12; 3896 const XMMRegister xmm_from2 = xmm13; 3897 const XMMRegister xmm_from3 = xmm14; //the last one is xmm14. we have to preserve it on WIN64. 3898 const XMMRegister xmm_from4 = xmm3; //reuse xmm3~4. Because xmm_key_tmp0~1 are useless when loading input text 3899 const XMMRegister xmm_from5 = xmm4; 3900 3901 //for key_128, key_192, key_256 3902 const int rounds[3] = {10, 12, 14}; 3903 Label L_exit_preLoop, L_preLoop_start; 3904 Label L_multiBlock_loopTop[3]; 3905 Label L_singleBlockLoopTop[3]; 3906 Label L__incCounter[3][6]; //for 6 blocks 3907 Label L__incCounter_single[3]; //for single block, key128, key192, key256 3908 Label L_processTail_insr[3], L_processTail_4_insr[3], L_processTail_2_insr[3], L_processTail_1_insr[3], L_processTail_exit_insr[3]; 3909 Label L_processTail_extr[3], L_processTail_4_extr[3], L_processTail_2_extr[3], L_processTail_1_extr[3], L_processTail_exit_extr[3]; 3910 3911 Label L_exit; 3912 3913 __ enter(); // required for proper stackwalking of RuntimeStub frame 3914 3915 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 3916 // context for the registers used, where all instructions below are using 128-bit mode 3917 // On EVEX without VL and BW, these instructions will all be AVX. 3918 if (VM_Version::supports_avx512vlbw()) { 3919 __ movl(rax, 0xffff); 3920 __ kmovql(k1, rax); 3921 } 3922 3923 #ifdef _WIN64 3924 // save the xmm registers which must be preserved 6-14 3925 const int XMM_REG_NUM_KEY_LAST = 14; 3926 __ subptr(rsp, -rsp_after_call_off * wordSize); 3927 for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) { 3928 __ movdqu(xmm_save(i), as_XMMRegister(i)); 3929 } 3930 3931 const Address r13_save(rbp, rdi_off * wordSize); 3932 const Address r14_save(rbp, rsi_off * wordSize); 3933 3934 __ movptr(r13_save, r13); 3935 __ movptr(r14_save, r14); 3936 3937 // on win64, fill len_reg from stack position 3938 __ movl(len_reg, len_mem); 3939 __ movptr(saved_encCounter_start, saved_encCounter_mem); 3940 __ movptr(used_addr, used_mem); 3941 __ movl(used, Address(used_addr, 0)); 3942 #else 3943 __ push(len_reg); // Save 3944 __ movptr(used_addr, used_mem); 3945 __ movl(used, Address(used_addr, 0)); 3946 #endif 3947 3948 __ push(rbx); // Save RBX 3949 __ movdqu(xmm_curr_counter, Address(counter, 0x00)); // initialize counter with initial counter 3950 __ movdqu(xmm_counter_shuf_mask, ExternalAddress(StubRoutines::x86::counter_shuffle_mask_addr())); 3951 __ pshufb(xmm_curr_counter, xmm_counter_shuf_mask); //counter is shuffled 3952 __ movptr(pos, 0); 3953 3954 // Use the partially used encrpyted counter from last invocation 3955 __ BIND(L_preLoop_start); 3956 __ cmpptr(used, 16); 3957 __ jcc(Assembler::aboveEqual, L_exit_preLoop); 3958 __ cmpptr(len_reg, 0); 3959 __ jcc(Assembler::lessEqual, L_exit_preLoop); 3960 __ movb(rbx, Address(saved_encCounter_start, used)); 3961 __ xorb(rbx, Address(from, pos)); 3962 __ movb(Address(to, pos), rbx); 3963 __ addptr(pos, 1); 3964 __ addptr(used, 1); 3965 __ subptr(len_reg, 1); 3966 3967 __ jmp(L_preLoop_start); 3968 3969 __ BIND(L_exit_preLoop); 3970 __ movl(Address(used_addr, 0), used); 3971 3972 // key length could be only {11, 13, 15} * 4 = {44, 52, 60} 3973 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3974 __ movl(rbx, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3975 __ cmpl(rbx, 52); 3976 __ jcc(Assembler::equal, L_multiBlock_loopTop[1]); 3977 __ cmpl(rbx, 60); 3978 __ jcc(Assembler::equal, L_multiBlock_loopTop[2]); 3979 3980 #define CTR_DoSix(opc, src_reg) \ 3981 __ opc(xmm_result0, src_reg); \ 3982 __ opc(xmm_result1, src_reg); \ 3983 __ opc(xmm_result2, src_reg); \ 3984 __ opc(xmm_result3, src_reg); \ 3985 __ opc(xmm_result4, src_reg); \ 3986 __ opc(xmm_result5, src_reg); 3987 3988 // k == 0 : generate code for key_128 3989 // k == 1 : generate code for key_192 3990 // k == 2 : generate code for key_256 3991 for (int k = 0; k < 3; ++k) { 3992 //multi blocks starts here 3993 __ align(OptoLoopAlignment); 3994 __ BIND(L_multiBlock_loopTop[k]); 3995 __ cmpptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // see if at least PARALLEL_FACTOR blocks left 3996 __ jcc(Assembler::less, L_singleBlockLoopTop[k]); 3997 load_key(xmm_key_tmp0, key, 0x00, xmm_key_shuf_mask); 3998 3999 //load, then increase counters 4000 CTR_DoSix(movdqa, xmm_curr_counter); 4001 inc_counter(rbx, xmm_result1, 0x01, L__incCounter[k][0]); 4002 inc_counter(rbx, xmm_result2, 0x02, L__incCounter[k][1]); 4003 inc_counter(rbx, xmm_result3, 0x03, L__incCounter[k][2]); 4004 inc_counter(rbx, xmm_result4, 0x04, L__incCounter[k][3]); 4005 inc_counter(rbx, xmm_result5, 0x05, L__incCounter[k][4]); 4006 inc_counter(rbx, xmm_curr_counter, 0x06, L__incCounter[k][5]); 4007 CTR_DoSix(pshufb, xmm_counter_shuf_mask); // after increased, shuffled counters back for PXOR 4008 CTR_DoSix(pxor, xmm_key_tmp0); //PXOR with Round 0 key 4009 4010 //load two ROUND_KEYs at a time 4011 for (int i = 1; i < rounds[k]; ) { 4012 load_key(xmm_key_tmp1, key, (0x10 * i), xmm_key_shuf_mask); 4013 load_key(xmm_key_tmp0, key, (0x10 * (i+1)), xmm_key_shuf_mask); 4014 CTR_DoSix(aesenc, xmm_key_tmp1); 4015 i++; 4016 if (i != rounds[k]) { 4017 CTR_DoSix(aesenc, xmm_key_tmp0); 4018 } else { 4019 CTR_DoSix(aesenclast, xmm_key_tmp0); 4020 } 4021 i++; 4022 } 4023 4024 // get next PARALLEL_FACTOR blocks into xmm_result registers 4025 __ movdqu(xmm_from0, Address(from, pos, Address::times_1, 0 * AESBlockSize)); 4026 __ movdqu(xmm_from1, Address(from, pos, Address::times_1, 1 * AESBlockSize)); 4027 __ movdqu(xmm_from2, Address(from, pos, Address::times_1, 2 * AESBlockSize)); 4028 __ movdqu(xmm_from3, Address(from, pos, Address::times_1, 3 * AESBlockSize)); 4029 __ movdqu(xmm_from4, Address(from, pos, Address::times_1, 4 * AESBlockSize)); 4030 __ movdqu(xmm_from5, Address(from, pos, Address::times_1, 5 * AESBlockSize)); 4031 4032 __ pxor(xmm_result0, xmm_from0); 4033 __ pxor(xmm_result1, xmm_from1); 4034 __ pxor(xmm_result2, xmm_from2); 4035 __ pxor(xmm_result3, xmm_from3); 4036 __ pxor(xmm_result4, xmm_from4); 4037 __ pxor(xmm_result5, xmm_from5); 4038 4039 // store 6 results into the next 64 bytes of output 4040 __ movdqu(Address(to, pos, Address::times_1, 0 * AESBlockSize), xmm_result0); 4041 __ movdqu(Address(to, pos, Address::times_1, 1 * AESBlockSize), xmm_result1); 4042 __ movdqu(Address(to, pos, Address::times_1, 2 * AESBlockSize), xmm_result2); 4043 __ movdqu(Address(to, pos, Address::times_1, 3 * AESBlockSize), xmm_result3); 4044 __ movdqu(Address(to, pos, Address::times_1, 4 * AESBlockSize), xmm_result4); 4045 __ movdqu(Address(to, pos, Address::times_1, 5 * AESBlockSize), xmm_result5); 4046 4047 __ addptr(pos, PARALLEL_FACTOR * AESBlockSize); // increase the length of crypt text 4048 __ subptr(len_reg, PARALLEL_FACTOR * AESBlockSize); // decrease the remaining length 4049 __ jmp(L_multiBlock_loopTop[k]); 4050 4051 // singleBlock starts here 4052 __ align(OptoLoopAlignment); 4053 __ BIND(L_singleBlockLoopTop[k]); 4054 __ cmpptr(len_reg, 0); 4055 __ jcc(Assembler::lessEqual, L_exit); 4056 load_key(xmm_key_tmp0, key, 0x00, xmm_key_shuf_mask); 4057 __ movdqa(xmm_result0, xmm_curr_counter); 4058 inc_counter(rbx, xmm_curr_counter, 0x01, L__incCounter_single[k]); 4059 __ pshufb(xmm_result0, xmm_counter_shuf_mask); 4060 __ pxor(xmm_result0, xmm_key_tmp0); 4061 for (int i = 1; i < rounds[k]; i++) { 4062 load_key(xmm_key_tmp0, key, (0x10 * i), xmm_key_shuf_mask); 4063 __ aesenc(xmm_result0, xmm_key_tmp0); 4064 } 4065 load_key(xmm_key_tmp0, key, (rounds[k] * 0x10), xmm_key_shuf_mask); 4066 __ aesenclast(xmm_result0, xmm_key_tmp0); 4067 __ cmpptr(len_reg, AESBlockSize); 4068 __ jcc(Assembler::less, L_processTail_insr[k]); 4069 __ movdqu(xmm_from0, Address(from, pos, Address::times_1, 0 * AESBlockSize)); 4070 __ pxor(xmm_result0, xmm_from0); 4071 __ movdqu(Address(to, pos, Address::times_1, 0 * AESBlockSize), xmm_result0); 4072 __ addptr(pos, AESBlockSize); 4073 __ subptr(len_reg, AESBlockSize); 4074 __ jmp(L_singleBlockLoopTop[k]); 4075 __ BIND(L_processTail_insr[k]); // Process the tail part of the input array 4076 __ addptr(pos, len_reg); // 1. Insert bytes from src array into xmm_from0 register 4077 __ testptr(len_reg, 8); 4078 __ jcc(Assembler::zero, L_processTail_4_insr[k]); 4079 __ subptr(pos,8); 4080 __ pinsrq(xmm_from0, Address(from, pos), 0); 4081 __ BIND(L_processTail_4_insr[k]); 4082 __ testptr(len_reg, 4); 4083 __ jcc(Assembler::zero, L_processTail_2_insr[k]); 4084 __ subptr(pos,4); 4085 __ pslldq(xmm_from0, 4); 4086 __ pinsrd(xmm_from0, Address(from, pos), 0); 4087 __ BIND(L_processTail_2_insr[k]); 4088 __ testptr(len_reg, 2); 4089 __ jcc(Assembler::zero, L_processTail_1_insr[k]); 4090 __ subptr(pos, 2); 4091 __ pslldq(xmm_from0, 2); 4092 __ pinsrw(xmm_from0, Address(from, pos), 0); 4093 __ BIND(L_processTail_1_insr[k]); 4094 __ testptr(len_reg, 1); 4095 __ jcc(Assembler::zero, L_processTail_exit_insr[k]); 4096 __ subptr(pos, 1); 4097 __ pslldq(xmm_from0, 1); 4098 __ pinsrb(xmm_from0, Address(from, pos), 0); 4099 __ BIND(L_processTail_exit_insr[k]); 4100 4101 __ movdqu(Address(saved_encCounter_start, 0), xmm_result0); // 2. Perform pxor of the encrypted counter and plaintext Bytes. 4102 __ pxor(xmm_result0, xmm_from0); // Also the encrypted counter is saved for next invocation. 4103 4104 __ testptr(len_reg, 8); 4105 __ jcc(Assembler::zero, L_processTail_4_extr[k]); // 3. Extract bytes from xmm_result0 into the dest. array 4106 __ pextrq(Address(to, pos), xmm_result0, 0); 4107 __ psrldq(xmm_result0, 8); 4108 __ addptr(pos, 8); 4109 __ BIND(L_processTail_4_extr[k]); 4110 __ testptr(len_reg, 4); 4111 __ jcc(Assembler::zero, L_processTail_2_extr[k]); 4112 __ pextrd(Address(to, pos), xmm_result0, 0); 4113 __ psrldq(xmm_result0, 4); 4114 __ addptr(pos, 4); 4115 __ BIND(L_processTail_2_extr[k]); 4116 __ testptr(len_reg, 2); 4117 __ jcc(Assembler::zero, L_processTail_1_extr[k]); 4118 __ pextrw(Address(to, pos), xmm_result0, 0); 4119 __ psrldq(xmm_result0, 2); 4120 __ addptr(pos, 2); 4121 __ BIND(L_processTail_1_extr[k]); 4122 __ testptr(len_reg, 1); 4123 __ jcc(Assembler::zero, L_processTail_exit_extr[k]); 4124 __ pextrb(Address(to, pos), xmm_result0, 0); 4125 4126 __ BIND(L_processTail_exit_extr[k]); 4127 __ movl(Address(used_addr, 0), len_reg); 4128 __ jmp(L_exit); 4129 4130 } 4131 4132 __ BIND(L_exit); 4133 __ pshufb(xmm_curr_counter, xmm_counter_shuf_mask); //counter is shuffled back. 4134 __ movdqu(Address(counter, 0), xmm_curr_counter); //save counter back 4135 __ pop(rbx); // pop the saved RBX. 4136 #ifdef _WIN64 4137 // restore regs belonging to calling function 4138 for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) { 4139 __ movdqu(as_XMMRegister(i), xmm_save(i)); 4140 } 4141 __ movl(rax, len_mem); 4142 __ movptr(r13, r13_save); 4143 __ movptr(r14, r14_save); 4144 #else 4145 __ pop(rax); // return 'len' 4146 #endif 4147 __ leave(); // required for proper stackwalking of RuntimeStub frame 4148 __ ret(0); 4149 return start; 4150 } 4151 4152 // byte swap x86 long 4153 address generate_ghash_long_swap_mask() { 4154 __ align(CodeEntryAlignment); 4155 StubCodeMark mark(this, "StubRoutines", "ghash_long_swap_mask"); 4156 address start = __ pc(); 4157 __ emit_data64(0x0f0e0d0c0b0a0908, relocInfo::none ); 4158 __ emit_data64(0x0706050403020100, relocInfo::none ); 4159 return start; 4160 } 4161 4162 // byte swap x86 byte array 4163 address generate_ghash_byte_swap_mask() { 4164 __ align(CodeEntryAlignment); 4165 StubCodeMark mark(this, "StubRoutines", "ghash_byte_swap_mask"); 4166 address start = __ pc(); 4167 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none ); 4168 __ emit_data64(0x0001020304050607, relocInfo::none ); 4169 return start; 4170 } 4171 4172 /* Single and multi-block ghash operations */ 4173 address generate_ghash_processBlocks() { 4174 __ align(CodeEntryAlignment); 4175 Label L_ghash_loop, L_exit; 4176 StubCodeMark mark(this, "StubRoutines", "ghash_processBlocks"); 4177 address start = __ pc(); 4178 4179 const Register state = c_rarg0; 4180 const Register subkeyH = c_rarg1; 4181 const Register data = c_rarg2; 4182 const Register blocks = c_rarg3; 4183 4184 #ifdef _WIN64 4185 const int XMM_REG_LAST = 10; 4186 #endif 4187 4188 const XMMRegister xmm_temp0 = xmm0; 4189 const XMMRegister xmm_temp1 = xmm1; 4190 const XMMRegister xmm_temp2 = xmm2; 4191 const XMMRegister xmm_temp3 = xmm3; 4192 const XMMRegister xmm_temp4 = xmm4; 4193 const XMMRegister xmm_temp5 = xmm5; 4194 const XMMRegister xmm_temp6 = xmm6; 4195 const XMMRegister xmm_temp7 = xmm7; 4196 const XMMRegister xmm_temp8 = xmm8; 4197 const XMMRegister xmm_temp9 = xmm9; 4198 const XMMRegister xmm_temp10 = xmm10; 4199 4200 __ enter(); 4201 4202 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 4203 // context for the registers used, where all instructions below are using 128-bit mode 4204 // On EVEX without VL and BW, these instructions will all be AVX. 4205 if (VM_Version::supports_avx512vlbw()) { 4206 __ movl(rax, 0xffff); 4207 __ kmovql(k1, rax); 4208 } 4209 4210 #ifdef _WIN64 4211 // save the xmm registers which must be preserved 6-10 4212 __ subptr(rsp, -rsp_after_call_off * wordSize); 4213 for (int i = 6; i <= XMM_REG_LAST; i++) { 4214 __ movdqu(xmm_save(i), as_XMMRegister(i)); 4215 } 4216 #endif 4217 4218 __ movdqu(xmm_temp10, ExternalAddress(StubRoutines::x86::ghash_long_swap_mask_addr())); 4219 4220 __ movdqu(xmm_temp0, Address(state, 0)); 4221 __ pshufb(xmm_temp0, xmm_temp10); 4222 4223 4224 __ BIND(L_ghash_loop); 4225 __ movdqu(xmm_temp2, Address(data, 0)); 4226 __ pshufb(xmm_temp2, ExternalAddress(StubRoutines::x86::ghash_byte_swap_mask_addr())); 4227 4228 __ movdqu(xmm_temp1, Address(subkeyH, 0)); 4229 __ pshufb(xmm_temp1, xmm_temp10); 4230 4231 __ pxor(xmm_temp0, xmm_temp2); 4232 4233 // 4234 // Multiply with the hash key 4235 // 4236 __ movdqu(xmm_temp3, xmm_temp0); 4237 __ pclmulqdq(xmm_temp3, xmm_temp1, 0); // xmm3 holds a0*b0 4238 __ movdqu(xmm_temp4, xmm_temp0); 4239 __ pclmulqdq(xmm_temp4, xmm_temp1, 16); // xmm4 holds a0*b1 4240 4241 __ movdqu(xmm_temp5, xmm_temp0); 4242 __ pclmulqdq(xmm_temp5, xmm_temp1, 1); // xmm5 holds a1*b0 4243 __ movdqu(xmm_temp6, xmm_temp0); 4244 __ pclmulqdq(xmm_temp6, xmm_temp1, 17); // xmm6 holds a1*b1 4245 4246 __ pxor(xmm_temp4, xmm_temp5); // xmm4 holds a0*b1 + a1*b0 4247 4248 __ movdqu(xmm_temp5, xmm_temp4); // move the contents of xmm4 to xmm5 4249 __ psrldq(xmm_temp4, 8); // shift by xmm4 64 bits to the right 4250 __ pslldq(xmm_temp5, 8); // shift by xmm5 64 bits to the left 4251 __ pxor(xmm_temp3, xmm_temp5); 4252 __ pxor(xmm_temp6, xmm_temp4); // Register pair <xmm6:xmm3> holds the result 4253 // of the carry-less multiplication of 4254 // xmm0 by xmm1. 4255 4256 // We shift the result of the multiplication by one bit position 4257 // to the left to cope for the fact that the bits are reversed. 4258 __ movdqu(xmm_temp7, xmm_temp3); 4259 __ movdqu(xmm_temp8, xmm_temp6); 4260 __ pslld(xmm_temp3, 1); 4261 __ pslld(xmm_temp6, 1); 4262 __ psrld(xmm_temp7, 31); 4263 __ psrld(xmm_temp8, 31); 4264 __ movdqu(xmm_temp9, xmm_temp7); 4265 __ pslldq(xmm_temp8, 4); 4266 __ pslldq(xmm_temp7, 4); 4267 __ psrldq(xmm_temp9, 12); 4268 __ por(xmm_temp3, xmm_temp7); 4269 __ por(xmm_temp6, xmm_temp8); 4270 __ por(xmm_temp6, xmm_temp9); 4271 4272 // 4273 // First phase of the reduction 4274 // 4275 // Move xmm3 into xmm7, xmm8, xmm9 in order to perform the shifts 4276 // independently. 4277 __ movdqu(xmm_temp7, xmm_temp3); 4278 __ movdqu(xmm_temp8, xmm_temp3); 4279 __ movdqu(xmm_temp9, xmm_temp3); 4280 __ pslld(xmm_temp7, 31); // packed right shift shifting << 31 4281 __ pslld(xmm_temp8, 30); // packed right shift shifting << 30 4282 __ pslld(xmm_temp9, 25); // packed right shift shifting << 25 4283 __ pxor(xmm_temp7, xmm_temp8); // xor the shifted versions 4284 __ pxor(xmm_temp7, xmm_temp9); 4285 __ movdqu(xmm_temp8, xmm_temp7); 4286 __ pslldq(xmm_temp7, 12); 4287 __ psrldq(xmm_temp8, 4); 4288 __ pxor(xmm_temp3, xmm_temp7); // first phase of the reduction complete 4289 4290 // 4291 // Second phase of the reduction 4292 // 4293 // Make 3 copies of xmm3 in xmm2, xmm4, xmm5 for doing these 4294 // shift operations. 4295 __ movdqu(xmm_temp2, xmm_temp3); 4296 __ movdqu(xmm_temp4, xmm_temp3); 4297 __ movdqu(xmm_temp5, xmm_temp3); 4298 __ psrld(xmm_temp2, 1); // packed left shifting >> 1 4299 __ psrld(xmm_temp4, 2); // packed left shifting >> 2 4300 __ psrld(xmm_temp5, 7); // packed left shifting >> 7 4301 __ pxor(xmm_temp2, xmm_temp4); // xor the shifted versions 4302 __ pxor(xmm_temp2, xmm_temp5); 4303 __ pxor(xmm_temp2, xmm_temp8); 4304 __ pxor(xmm_temp3, xmm_temp2); 4305 __ pxor(xmm_temp6, xmm_temp3); // the result is in xmm6 4306 4307 __ decrement(blocks); 4308 __ jcc(Assembler::zero, L_exit); 4309 __ movdqu(xmm_temp0, xmm_temp6); 4310 __ addptr(data, 16); 4311 __ jmp(L_ghash_loop); 4312 4313 __ BIND(L_exit); 4314 __ pshufb(xmm_temp6, xmm_temp10); // Byte swap 16-byte result 4315 __ movdqu(Address(state, 0), xmm_temp6); // store the result 4316 4317 #ifdef _WIN64 4318 // restore xmm regs belonging to calling function 4319 for (int i = 6; i <= XMM_REG_LAST; i++) { 4320 __ movdqu(as_XMMRegister(i), xmm_save(i)); 4321 } 4322 #endif 4323 __ leave(); 4324 __ ret(0); 4325 return start; 4326 } 4327 4328 /** 4329 * Arguments: 4330 * 4331 * Inputs: 4332 * c_rarg0 - int crc 4333 * c_rarg1 - byte* buf 4334 * c_rarg2 - int length 4335 * 4336 * Ouput: 4337 * rax - int crc result 4338 */ 4339 address generate_updateBytesCRC32() { 4340 assert(UseCRC32Intrinsics, "need AVX and CLMUL instructions"); 4341 4342 __ align(CodeEntryAlignment); 4343 StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32"); 4344 4345 address start = __ pc(); 4346 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 4347 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 4348 // rscratch1: r10 4349 const Register crc = c_rarg0; // crc 4350 const Register buf = c_rarg1; // source java byte array address 4351 const Register len = c_rarg2; // length 4352 const Register table = c_rarg3; // crc_table address (reuse register) 4353 const Register tmp = r11; 4354 assert_different_registers(crc, buf, len, table, tmp, rax); 4355 4356 BLOCK_COMMENT("Entry:"); 4357 __ enter(); // required for proper stackwalking of RuntimeStub frame 4358 4359 __ kernel_crc32(crc, buf, len, table, tmp); 4360 4361 __ movl(rax, crc); 4362 __ leave(); // required for proper stackwalking of RuntimeStub frame 4363 __ ret(0); 4364 4365 return start; 4366 } 4367 4368 /** 4369 * Arguments: 4370 * 4371 * Inputs: 4372 * c_rarg0 - int crc 4373 * c_rarg1 - byte* buf 4374 * c_rarg2 - long length 4375 * c_rarg3 - table_start - optional (present only when doing a library_calll, 4376 * not used by x86 algorithm) 4377 * 4378 * Ouput: 4379 * rax - int crc result 4380 */ 4381 address generate_updateBytesCRC32C(bool is_pclmulqdq_supported) { 4382 assert(UseCRC32CIntrinsics, "need SSE4_2"); 4383 __ align(CodeEntryAlignment); 4384 StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32C"); 4385 address start = __ pc(); 4386 //reg.arg int#0 int#1 int#2 int#3 int#4 int#5 float regs 4387 //Windows RCX RDX R8 R9 none none XMM0..XMM3 4388 //Lin / Sol RDI RSI RDX RCX R8 R9 XMM0..XMM7 4389 const Register crc = c_rarg0; // crc 4390 const Register buf = c_rarg1; // source java byte array address 4391 const Register len = c_rarg2; // length 4392 const Register a = rax; 4393 const Register j = r9; 4394 const Register k = r10; 4395 const Register l = r11; 4396 #ifdef _WIN64 4397 const Register y = rdi; 4398 const Register z = rsi; 4399 #else 4400 const Register y = rcx; 4401 const Register z = r8; 4402 #endif 4403 assert_different_registers(crc, buf, len, a, j, k, l, y, z); 4404 4405 BLOCK_COMMENT("Entry:"); 4406 __ enter(); // required for proper stackwalking of RuntimeStub frame 4407 #ifdef _WIN64 4408 __ push(y); 4409 __ push(z); 4410 #endif 4411 __ crc32c_ipl_alg2_alt2(crc, buf, len, 4412 a, j, k, 4413 l, y, z, 4414 c_farg0, c_farg1, c_farg2, 4415 is_pclmulqdq_supported); 4416 __ movl(rax, crc); 4417 #ifdef _WIN64 4418 __ pop(z); 4419 __ pop(y); 4420 #endif 4421 __ leave(); // required for proper stackwalking of RuntimeStub frame 4422 __ ret(0); 4423 4424 return start; 4425 } 4426 4427 /** 4428 * Arguments: 4429 * 4430 * Input: 4431 * c_rarg0 - x address 4432 * c_rarg1 - x length 4433 * c_rarg2 - y address 4434 * c_rarg3 - y lenth 4435 * not Win64 4436 * c_rarg4 - z address 4437 * c_rarg5 - z length 4438 * Win64 4439 * rsp+40 - z address 4440 * rsp+48 - z length 4441 */ 4442 address generate_multiplyToLen() { 4443 __ align(CodeEntryAlignment); 4444 StubCodeMark mark(this, "StubRoutines", "multiplyToLen"); 4445 4446 address start = __ pc(); 4447 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 4448 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 4449 const Register x = rdi; 4450 const Register xlen = rax; 4451 const Register y = rsi; 4452 const Register ylen = rcx; 4453 const Register z = r8; 4454 const Register zlen = r11; 4455 4456 // Next registers will be saved on stack in multiply_to_len(). 4457 const Register tmp1 = r12; 4458 const Register tmp2 = r13; 4459 const Register tmp3 = r14; 4460 const Register tmp4 = r15; 4461 const Register tmp5 = rbx; 4462 4463 BLOCK_COMMENT("Entry:"); 4464 __ enter(); // required for proper stackwalking of RuntimeStub frame 4465 4466 #ifndef _WIN64 4467 __ movptr(zlen, r9); // Save r9 in r11 - zlen 4468 #endif 4469 setup_arg_regs(4); // x => rdi, xlen => rsi, y => rdx 4470 // ylen => rcx, z => r8, zlen => r11 4471 // r9 and r10 may be used to save non-volatile registers 4472 #ifdef _WIN64 4473 // last 2 arguments (#4, #5) are on stack on Win64 4474 __ movptr(z, Address(rsp, 6 * wordSize)); 4475 __ movptr(zlen, Address(rsp, 7 * wordSize)); 4476 #endif 4477 4478 __ movptr(xlen, rsi); 4479 __ movptr(y, rdx); 4480 __ multiply_to_len(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5); 4481 4482 restore_arg_regs(); 4483 4484 __ leave(); // required for proper stackwalking of RuntimeStub frame 4485 __ ret(0); 4486 4487 return start; 4488 } 4489 4490 /** 4491 * Arguments: 4492 * 4493 * Input: 4494 * c_rarg0 - obja address 4495 * c_rarg1 - objb address 4496 * c_rarg3 - length length 4497 * c_rarg4 - scale log2_array_indxscale 4498 */ 4499 address generate_vectorizedMismatch() { 4500 __ align(CodeEntryAlignment); 4501 StubCodeMark mark(this, "StubRoutines", "vectorizedMismatch"); 4502 address start = __ pc(); 4503 4504 BLOCK_COMMENT("Entry:"); 4505 __ enter(); 4506 4507 #ifdef _WIN64 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 4508 const Register scale = c_rarg0; //rcx, will exchange with r9 4509 const Register objb = c_rarg1; //rdx 4510 const Register length = c_rarg2; //r8 4511 const Register obja = c_rarg3; //r9 4512 __ xchgq(obja, scale); //now obja and scale contains the correct contents 4513 4514 const Register tmp1 = r10; 4515 const Register tmp2 = r11; 4516 #endif 4517 #ifndef _WIN64 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 4518 const Register obja = c_rarg0; //U:rdi 4519 const Register objb = c_rarg1; //U:rsi 4520 const Register length = c_rarg2; //U:rdx 4521 const Register scale = c_rarg3; //U:rcx 4522 const Register tmp1 = r8; 4523 const Register tmp2 = r9; 4524 #endif 4525 const Register result = rax; //return value 4526 const XMMRegister vec0 = xmm0; 4527 const XMMRegister vec1 = xmm1; 4528 const XMMRegister vec2 = xmm2; 4529 4530 __ vectorized_mismatch(obja, objb, length, scale, result, tmp1, tmp2, vec0, vec1, vec2); 4531 4532 __ leave(); 4533 __ ret(0); 4534 4535 return start; 4536 } 4537 4538 /** 4539 * Arguments: 4540 * 4541 // Input: 4542 // c_rarg0 - x address 4543 // c_rarg1 - x length 4544 // c_rarg2 - z address 4545 // c_rarg3 - z lenth 4546 * 4547 */ 4548 address generate_squareToLen() { 4549 4550 __ align(CodeEntryAlignment); 4551 StubCodeMark mark(this, "StubRoutines", "squareToLen"); 4552 4553 address start = __ pc(); 4554 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 4555 // Unix: rdi, rsi, rdx, rcx (c_rarg0, c_rarg1, ...) 4556 const Register x = rdi; 4557 const Register len = rsi; 4558 const Register z = r8; 4559 const Register zlen = rcx; 4560 4561 const Register tmp1 = r12; 4562 const Register tmp2 = r13; 4563 const Register tmp3 = r14; 4564 const Register tmp4 = r15; 4565 const Register tmp5 = rbx; 4566 4567 BLOCK_COMMENT("Entry:"); 4568 __ enter(); // required for proper stackwalking of RuntimeStub frame 4569 4570 setup_arg_regs(4); // x => rdi, len => rsi, z => rdx 4571 // zlen => rcx 4572 // r9 and r10 may be used to save non-volatile registers 4573 __ movptr(r8, rdx); 4574 __ square_to_len(x, len, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5, rdx, rax); 4575 4576 restore_arg_regs(); 4577 4578 __ leave(); // required for proper stackwalking of RuntimeStub frame 4579 __ ret(0); 4580 4581 return start; 4582 } 4583 4584 /** 4585 * Arguments: 4586 * 4587 * Input: 4588 * c_rarg0 - out address 4589 * c_rarg1 - in address 4590 * c_rarg2 - offset 4591 * c_rarg3 - len 4592 * not Win64 4593 * c_rarg4 - k 4594 * Win64 4595 * rsp+40 - k 4596 */ 4597 address generate_mulAdd() { 4598 __ align(CodeEntryAlignment); 4599 StubCodeMark mark(this, "StubRoutines", "mulAdd"); 4600 4601 address start = __ pc(); 4602 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 4603 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 4604 const Register out = rdi; 4605 const Register in = rsi; 4606 const Register offset = r11; 4607 const Register len = rcx; 4608 const Register k = r8; 4609 4610 // Next registers will be saved on stack in mul_add(). 4611 const Register tmp1 = r12; 4612 const Register tmp2 = r13; 4613 const Register tmp3 = r14; 4614 const Register tmp4 = r15; 4615 const Register tmp5 = rbx; 4616 4617 BLOCK_COMMENT("Entry:"); 4618 __ enter(); // required for proper stackwalking of RuntimeStub frame 4619 4620 setup_arg_regs(4); // out => rdi, in => rsi, offset => rdx 4621 // len => rcx, k => r8 4622 // r9 and r10 may be used to save non-volatile registers 4623 #ifdef _WIN64 4624 // last argument is on stack on Win64 4625 __ movl(k, Address(rsp, 6 * wordSize)); 4626 #endif 4627 __ movptr(r11, rdx); // move offset in rdx to offset(r11) 4628 __ mul_add(out, in, offset, len, k, tmp1, tmp2, tmp3, tmp4, tmp5, rdx, rax); 4629 4630 restore_arg_regs(); 4631 4632 __ leave(); // required for proper stackwalking of RuntimeStub frame 4633 __ ret(0); 4634 4635 return start; 4636 } 4637 4638 address generate_libmExp() { 4639 address start = __ pc(); 4640 4641 const XMMRegister x0 = xmm0; 4642 const XMMRegister x1 = xmm1; 4643 const XMMRegister x2 = xmm2; 4644 const XMMRegister x3 = xmm3; 4645 4646 const XMMRegister x4 = xmm4; 4647 const XMMRegister x5 = xmm5; 4648 const XMMRegister x6 = xmm6; 4649 const XMMRegister x7 = xmm7; 4650 4651 const Register tmp = r11; 4652 4653 BLOCK_COMMENT("Entry:"); 4654 __ enter(); // required for proper stackwalking of RuntimeStub frame 4655 4656 #ifdef _WIN64 4657 // save the xmm registers which must be preserved 6-7 4658 __ subptr(rsp, 4 * wordSize); 4659 __ movdqu(Address(rsp, 0), xmm6); 4660 __ movdqu(Address(rsp, 2 * wordSize), xmm7); 4661 #endif 4662 __ fast_exp(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp); 4663 4664 #ifdef _WIN64 4665 // restore xmm regs belonging to calling function 4666 __ movdqu(xmm6, Address(rsp, 0)); 4667 __ movdqu(xmm7, Address(rsp, 2 * wordSize)); 4668 __ addptr(rsp, 4 * wordSize); 4669 #endif 4670 4671 __ leave(); // required for proper stackwalking of RuntimeStub frame 4672 __ ret(0); 4673 4674 return start; 4675 4676 } 4677 4678 address generate_libmLog() { 4679 address start = __ pc(); 4680 4681 const XMMRegister x0 = xmm0; 4682 const XMMRegister x1 = xmm1; 4683 const XMMRegister x2 = xmm2; 4684 const XMMRegister x3 = xmm3; 4685 4686 const XMMRegister x4 = xmm4; 4687 const XMMRegister x5 = xmm5; 4688 const XMMRegister x6 = xmm6; 4689 const XMMRegister x7 = xmm7; 4690 4691 const Register tmp1 = r11; 4692 const Register tmp2 = r8; 4693 4694 BLOCK_COMMENT("Entry:"); 4695 __ enter(); // required for proper stackwalking of RuntimeStub frame 4696 4697 #ifdef _WIN64 4698 // save the xmm registers which must be preserved 6-7 4699 __ subptr(rsp, 4 * wordSize); 4700 __ movdqu(Address(rsp, 0), xmm6); 4701 __ movdqu(Address(rsp, 2 * wordSize), xmm7); 4702 #endif 4703 __ fast_log(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2); 4704 4705 #ifdef _WIN64 4706 // restore xmm regs belonging to calling function 4707 __ movdqu(xmm6, Address(rsp, 0)); 4708 __ movdqu(xmm7, Address(rsp, 2 * wordSize)); 4709 __ addptr(rsp, 4 * wordSize); 4710 #endif 4711 4712 __ leave(); // required for proper stackwalking of RuntimeStub frame 4713 __ ret(0); 4714 4715 return start; 4716 4717 } 4718 4719 address generate_libmLog10() { 4720 address start = __ pc(); 4721 4722 const XMMRegister x0 = xmm0; 4723 const XMMRegister x1 = xmm1; 4724 const XMMRegister x2 = xmm2; 4725 const XMMRegister x3 = xmm3; 4726 4727 const XMMRegister x4 = xmm4; 4728 const XMMRegister x5 = xmm5; 4729 const XMMRegister x6 = xmm6; 4730 const XMMRegister x7 = xmm7; 4731 4732 const Register tmp = r11; 4733 4734 BLOCK_COMMENT("Entry:"); 4735 __ enter(); // required for proper stackwalking of RuntimeStub frame 4736 4737 #ifdef _WIN64 4738 // save the xmm registers which must be preserved 6-7 4739 __ subptr(rsp, 4 * wordSize); 4740 __ movdqu(Address(rsp, 0), xmm6); 4741 __ movdqu(Address(rsp, 2 * wordSize), xmm7); 4742 #endif 4743 __ fast_log10(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp); 4744 4745 #ifdef _WIN64 4746 // restore xmm regs belonging to calling function 4747 __ movdqu(xmm6, Address(rsp, 0)); 4748 __ movdqu(xmm7, Address(rsp, 2 * wordSize)); 4749 __ addptr(rsp, 4 * wordSize); 4750 #endif 4751 4752 __ leave(); // required for proper stackwalking of RuntimeStub frame 4753 __ ret(0); 4754 4755 return start; 4756 4757 } 4758 4759 address generate_libmPow() { 4760 address start = __ pc(); 4761 4762 const XMMRegister x0 = xmm0; 4763 const XMMRegister x1 = xmm1; 4764 const XMMRegister x2 = xmm2; 4765 const XMMRegister x3 = xmm3; 4766 4767 const XMMRegister x4 = xmm4; 4768 const XMMRegister x5 = xmm5; 4769 const XMMRegister x6 = xmm6; 4770 const XMMRegister x7 = xmm7; 4771 4772 const Register tmp1 = r8; 4773 const Register tmp2 = r9; 4774 const Register tmp3 = r10; 4775 const Register tmp4 = r11; 4776 4777 BLOCK_COMMENT("Entry:"); 4778 __ enter(); // required for proper stackwalking of RuntimeStub frame 4779 4780 #ifdef _WIN64 4781 // save the xmm registers which must be preserved 6-7 4782 __ subptr(rsp, 4 * wordSize); 4783 __ movdqu(Address(rsp, 0), xmm6); 4784 __ movdqu(Address(rsp, 2 * wordSize), xmm7); 4785 #endif 4786 __ fast_pow(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 4787 4788 #ifdef _WIN64 4789 // restore xmm regs belonging to calling function 4790 __ movdqu(xmm6, Address(rsp, 0)); 4791 __ movdqu(xmm7, Address(rsp, 2 * wordSize)); 4792 __ addptr(rsp, 4 * wordSize); 4793 #endif 4794 4795 __ leave(); // required for proper stackwalking of RuntimeStub frame 4796 __ ret(0); 4797 4798 return start; 4799 4800 } 4801 4802 address generate_libmSin() { 4803 address start = __ pc(); 4804 4805 const XMMRegister x0 = xmm0; 4806 const XMMRegister x1 = xmm1; 4807 const XMMRegister x2 = xmm2; 4808 const XMMRegister x3 = xmm3; 4809 4810 const XMMRegister x4 = xmm4; 4811 const XMMRegister x5 = xmm5; 4812 const XMMRegister x6 = xmm6; 4813 const XMMRegister x7 = xmm7; 4814 4815 const Register tmp1 = r8; 4816 const Register tmp2 = r9; 4817 const Register tmp3 = r10; 4818 const Register tmp4 = r11; 4819 4820 BLOCK_COMMENT("Entry:"); 4821 __ enter(); // required for proper stackwalking of RuntimeStub frame 4822 4823 #ifdef _WIN64 4824 __ push(rsi); 4825 __ push(rdi); 4826 // save the xmm registers which must be preserved 6-7 4827 __ subptr(rsp, 4 * wordSize); 4828 __ movdqu(Address(rsp, 0), xmm6); 4829 __ movdqu(Address(rsp, 2 * wordSize), xmm7); 4830 #endif 4831 __ fast_sin(x0, x1, x2, x3, x4, x5, x6, x7, rax, rbx, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 4832 4833 #ifdef _WIN64 4834 // restore xmm regs belonging to calling function 4835 __ movdqu(xmm6, Address(rsp, 0)); 4836 __ movdqu(xmm7, Address(rsp, 2 * wordSize)); 4837 __ addptr(rsp, 4 * wordSize); 4838 __ pop(rdi); 4839 __ pop(rsi); 4840 #endif 4841 4842 __ leave(); // required for proper stackwalking of RuntimeStub frame 4843 __ ret(0); 4844 4845 return start; 4846 4847 } 4848 4849 address generate_libmCos() { 4850 address start = __ pc(); 4851 4852 const XMMRegister x0 = xmm0; 4853 const XMMRegister x1 = xmm1; 4854 const XMMRegister x2 = xmm2; 4855 const XMMRegister x3 = xmm3; 4856 4857 const XMMRegister x4 = xmm4; 4858 const XMMRegister x5 = xmm5; 4859 const XMMRegister x6 = xmm6; 4860 const XMMRegister x7 = xmm7; 4861 4862 const Register tmp1 = r8; 4863 const Register tmp2 = r9; 4864 const Register tmp3 = r10; 4865 const Register tmp4 = r11; 4866 4867 BLOCK_COMMENT("Entry:"); 4868 __ enter(); // required for proper stackwalking of RuntimeStub frame 4869 4870 #ifdef _WIN64 4871 __ push(rsi); 4872 __ push(rdi); 4873 // save the xmm registers which must be preserved 6-7 4874 __ subptr(rsp, 4 * wordSize); 4875 __ movdqu(Address(rsp, 0), xmm6); 4876 __ movdqu(Address(rsp, 2 * wordSize), xmm7); 4877 #endif 4878 __ fast_cos(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 4879 4880 #ifdef _WIN64 4881 // restore xmm regs belonging to calling function 4882 __ movdqu(xmm6, Address(rsp, 0)); 4883 __ movdqu(xmm7, Address(rsp, 2 * wordSize)); 4884 __ addptr(rsp, 4 * wordSize); 4885 __ pop(rdi); 4886 __ pop(rsi); 4887 #endif 4888 4889 __ leave(); // required for proper stackwalking of RuntimeStub frame 4890 __ ret(0); 4891 4892 return start; 4893 4894 } 4895 4896 address generate_libmTan() { 4897 address start = __ pc(); 4898 4899 const XMMRegister x0 = xmm0; 4900 const XMMRegister x1 = xmm1; 4901 const XMMRegister x2 = xmm2; 4902 const XMMRegister x3 = xmm3; 4903 4904 const XMMRegister x4 = xmm4; 4905 const XMMRegister x5 = xmm5; 4906 const XMMRegister x6 = xmm6; 4907 const XMMRegister x7 = xmm7; 4908 4909 const Register tmp1 = r8; 4910 const Register tmp2 = r9; 4911 const Register tmp3 = r10; 4912 const Register tmp4 = r11; 4913 4914 BLOCK_COMMENT("Entry:"); 4915 __ enter(); // required for proper stackwalking of RuntimeStub frame 4916 4917 #ifdef _WIN64 4918 __ push(rsi); 4919 __ push(rdi); 4920 // save the xmm registers which must be preserved 6-7 4921 __ subptr(rsp, 4 * wordSize); 4922 __ movdqu(Address(rsp, 0), xmm6); 4923 __ movdqu(Address(rsp, 2 * wordSize), xmm7); 4924 #endif 4925 __ fast_tan(x0, x1, x2, x3, x4, x5, x6, x7, rax, rcx, rdx, tmp1, tmp2, tmp3, tmp4); 4926 4927 #ifdef _WIN64 4928 // restore xmm regs belonging to calling function 4929 __ movdqu(xmm6, Address(rsp, 0)); 4930 __ movdqu(xmm7, Address(rsp, 2 * wordSize)); 4931 __ addptr(rsp, 4 * wordSize); 4932 __ pop(rdi); 4933 __ pop(rsi); 4934 #endif 4935 4936 __ leave(); // required for proper stackwalking of RuntimeStub frame 4937 __ ret(0); 4938 4939 return start; 4940 4941 } 4942 4943 #undef __ 4944 #define __ masm-> 4945 4946 // Continuation point for throwing of implicit exceptions that are 4947 // not handled in the current activation. Fabricates an exception 4948 // oop and initiates normal exception dispatching in this 4949 // frame. Since we need to preserve callee-saved values (currently 4950 // only for C2, but done for C1 as well) we need a callee-saved oop 4951 // map and therefore have to make these stubs into RuntimeStubs 4952 // rather than BufferBlobs. If the compiler needs all registers to 4953 // be preserved between the fault point and the exception handler 4954 // then it must assume responsibility for that in 4955 // AbstractCompiler::continuation_for_implicit_null_exception or 4956 // continuation_for_implicit_division_by_zero_exception. All other 4957 // implicit exceptions (e.g., NullPointerException or 4958 // AbstractMethodError on entry) are either at call sites or 4959 // otherwise assume that stack unwinding will be initiated, so 4960 // caller saved registers were assumed volatile in the compiler. 4961 address generate_throw_exception(const char* name, 4962 address runtime_entry, 4963 Register arg1 = noreg, 4964 Register arg2 = noreg) { 4965 // Information about frame layout at time of blocking runtime call. 4966 // Note that we only have to preserve callee-saved registers since 4967 // the compilers are responsible for supplying a continuation point 4968 // if they expect all registers to be preserved. 4969 enum layout { 4970 rbp_off = frame::arg_reg_save_area_bytes/BytesPerInt, 4971 rbp_off2, 4972 return_off, 4973 return_off2, 4974 framesize // inclusive of return address 4975 }; 4976 4977 int insts_size = 512; 4978 int locs_size = 64; 4979 4980 CodeBuffer code(name, insts_size, locs_size); 4981 OopMapSet* oop_maps = new OopMapSet(); 4982 MacroAssembler* masm = new MacroAssembler(&code); 4983 4984 address start = __ pc(); 4985 4986 // This is an inlined and slightly modified version of call_VM 4987 // which has the ability to fetch the return PC out of 4988 // thread-local storage and also sets up last_Java_sp slightly 4989 // differently than the real call_VM 4990 4991 __ enter(); // required for proper stackwalking of RuntimeStub frame 4992 4993 assert(is_even(framesize/2), "sp not 16-byte aligned"); 4994 4995 // return address and rbp are already in place 4996 __ subptr(rsp, (framesize-4) << LogBytesPerInt); // prolog 4997 4998 int frame_complete = __ pc() - start; 4999 5000 // Set up last_Java_sp and last_Java_fp 5001 address the_pc = __ pc(); 5002 __ set_last_Java_frame(rsp, rbp, the_pc); 5003 __ andptr(rsp, -(StackAlignmentInBytes)); // Align stack 5004 5005 // Call runtime 5006 if (arg1 != noreg) { 5007 assert(arg2 != c_rarg1, "clobbered"); 5008 __ movptr(c_rarg1, arg1); 5009 } 5010 if (arg2 != noreg) { 5011 __ movptr(c_rarg2, arg2); 5012 } 5013 __ movptr(c_rarg0, r15_thread); 5014 BLOCK_COMMENT("call runtime_entry"); 5015 __ call(RuntimeAddress(runtime_entry)); 5016 5017 // Generate oop map 5018 OopMap* map = new OopMap(framesize, 0); 5019 5020 oop_maps->add_gc_map(the_pc - start, map); 5021 5022 __ reset_last_Java_frame(true, true); 5023 5024 __ leave(); // required for proper stackwalking of RuntimeStub frame 5025 5026 // check for pending exceptions 5027 #ifdef ASSERT 5028 Label L; 5029 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), 5030 (int32_t) NULL_WORD); 5031 __ jcc(Assembler::notEqual, L); 5032 __ should_not_reach_here(); 5033 __ bind(L); 5034 #endif // ASSERT 5035 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 5036 5037 5038 // codeBlob framesize is in words (not VMRegImpl::slot_size) 5039 RuntimeStub* stub = 5040 RuntimeStub::new_runtime_stub(name, 5041 &code, 5042 frame_complete, 5043 (framesize >> (LogBytesPerWord - LogBytesPerInt)), 5044 oop_maps, false); 5045 return stub->entry_point(); 5046 } 5047 5048 void create_control_words() { 5049 // Round to nearest, 53-bit mode, exceptions masked 5050 StubRoutines::_fpu_cntrl_wrd_std = 0x027F; 5051 // Round to zero, 53-bit mode, exception mased 5052 StubRoutines::_fpu_cntrl_wrd_trunc = 0x0D7F; 5053 // Round to nearest, 24-bit mode, exceptions masked 5054 StubRoutines::_fpu_cntrl_wrd_24 = 0x007F; 5055 // Round to nearest, 64-bit mode, exceptions masked 5056 StubRoutines::_fpu_cntrl_wrd_64 = 0x037F; 5057 // Round to nearest, 64-bit mode, exceptions masked 5058 StubRoutines::_mxcsr_std = 0x1F80; 5059 // Note: the following two constants are 80-bit values 5060 // layout is critical for correct loading by FPU. 5061 // Bias for strict fp multiply/divide 5062 StubRoutines::_fpu_subnormal_bias1[0]= 0x00000000; // 2^(-15360) == 0x03ff 8000 0000 0000 0000 5063 StubRoutines::_fpu_subnormal_bias1[1]= 0x80000000; 5064 StubRoutines::_fpu_subnormal_bias1[2]= 0x03ff; 5065 // Un-Bias for strict fp multiply/divide 5066 StubRoutines::_fpu_subnormal_bias2[0]= 0x00000000; // 2^(+15360) == 0x7bff 8000 0000 0000 0000 5067 StubRoutines::_fpu_subnormal_bias2[1]= 0x80000000; 5068 StubRoutines::_fpu_subnormal_bias2[2]= 0x7bff; 5069 } 5070 5071 // Initialization 5072 void generate_initial() { 5073 // Generates all stubs and initializes the entry points 5074 5075 // This platform-specific settings are needed by generate_call_stub() 5076 create_control_words(); 5077 5078 // entry points that exist in all platforms Note: This is code 5079 // that could be shared among different platforms - however the 5080 // benefit seems to be smaller than the disadvantage of having a 5081 // much more complicated generator structure. See also comment in 5082 // stubRoutines.hpp. 5083 5084 StubRoutines::_forward_exception_entry = generate_forward_exception(); 5085 5086 StubRoutines::_call_stub_entry = 5087 generate_call_stub(StubRoutines::_call_stub_return_address); 5088 5089 // is referenced by megamorphic call 5090 StubRoutines::_catch_exception_entry = generate_catch_exception(); 5091 5092 // atomic calls 5093 StubRoutines::_atomic_xchg_entry = generate_atomic_xchg(); 5094 StubRoutines::_atomic_xchg_ptr_entry = generate_atomic_xchg_ptr(); 5095 StubRoutines::_atomic_cmpxchg_entry = generate_atomic_cmpxchg(); 5096 StubRoutines::_atomic_cmpxchg_byte_entry = generate_atomic_cmpxchg_byte(); 5097 StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long(); 5098 StubRoutines::_atomic_add_entry = generate_atomic_add(); 5099 StubRoutines::_atomic_add_ptr_entry = generate_atomic_add_ptr(); 5100 StubRoutines::_fence_entry = generate_orderaccess_fence(); 5101 5102 StubRoutines::_handler_for_unsafe_access_entry = 5103 generate_handler_for_unsafe_access(); 5104 5105 // platform dependent 5106 StubRoutines::x86::_get_previous_fp_entry = generate_get_previous_fp(); 5107 StubRoutines::x86::_get_previous_sp_entry = generate_get_previous_sp(); 5108 5109 StubRoutines::x86::_verify_mxcsr_entry = generate_verify_mxcsr(); 5110 5111 // Build this early so it's available for the interpreter. 5112 StubRoutines::_throw_StackOverflowError_entry = 5113 generate_throw_exception("StackOverflowError throw_exception", 5114 CAST_FROM_FN_PTR(address, 5115 SharedRuntime:: 5116 throw_StackOverflowError)); 5117 StubRoutines::_throw_delayed_StackOverflowError_entry = 5118 generate_throw_exception("delayed StackOverflowError throw_exception", 5119 CAST_FROM_FN_PTR(address, 5120 SharedRuntime:: 5121 throw_delayed_StackOverflowError)); 5122 if (UseCRC32Intrinsics) { 5123 // set table address before stub generation which use it 5124 StubRoutines::_crc_table_adr = (address)StubRoutines::x86::_crc_table; 5125 StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32(); 5126 } 5127 5128 if (UseCRC32CIntrinsics) { 5129 bool supports_clmul = VM_Version::supports_clmul(); 5130 StubRoutines::x86::generate_CRC32C_table(supports_clmul); 5131 StubRoutines::_crc32c_table_addr = (address)StubRoutines::x86::_crc32c_table; 5132 StubRoutines::_updateBytesCRC32C = generate_updateBytesCRC32C(supports_clmul); 5133 } 5134 if (VM_Version::supports_sse2() && UseLibmIntrinsic) { 5135 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin) || 5136 vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos) || 5137 vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dtan)) { 5138 StubRoutines::x86::_ONEHALF_adr = (address)StubRoutines::x86::_ONEHALF; 5139 StubRoutines::x86::_P_2_adr = (address)StubRoutines::x86::_P_2; 5140 StubRoutines::x86::_SC_4_adr = (address)StubRoutines::x86::_SC_4; 5141 StubRoutines::x86::_Ctable_adr = (address)StubRoutines::x86::_Ctable; 5142 StubRoutines::x86::_SC_2_adr = (address)StubRoutines::x86::_SC_2; 5143 StubRoutines::x86::_SC_3_adr = (address)StubRoutines::x86::_SC_3; 5144 StubRoutines::x86::_SC_1_adr = (address)StubRoutines::x86::_SC_1; 5145 StubRoutines::x86::_PI_INV_TABLE_adr = (address)StubRoutines::x86::_PI_INV_TABLE; 5146 StubRoutines::x86::_PI_4_adr = (address)StubRoutines::x86::_PI_4; 5147 StubRoutines::x86::_PI32INV_adr = (address)StubRoutines::x86::_PI32INV; 5148 StubRoutines::x86::_SIGN_MASK_adr = (address)StubRoutines::x86::_SIGN_MASK; 5149 StubRoutines::x86::_P_1_adr = (address)StubRoutines::x86::_P_1; 5150 StubRoutines::x86::_P_3_adr = (address)StubRoutines::x86::_P_3; 5151 StubRoutines::x86::_NEG_ZERO_adr = (address)StubRoutines::x86::_NEG_ZERO; 5152 } 5153 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dexp)) { 5154 StubRoutines::_dexp = generate_libmExp(); 5155 } 5156 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dlog)) { 5157 StubRoutines::_dlog = generate_libmLog(); 5158 } 5159 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dlog10)) { 5160 StubRoutines::_dlog10 = generate_libmLog10(); 5161 } 5162 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dpow)) { 5163 StubRoutines::_dpow = generate_libmPow(); 5164 } 5165 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dsin)) { 5166 StubRoutines::_dsin = generate_libmSin(); 5167 } 5168 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dcos)) { 5169 StubRoutines::_dcos = generate_libmCos(); 5170 } 5171 if (vmIntrinsics::is_intrinsic_available(vmIntrinsics::_dtan)) { 5172 StubRoutines::_dtan = generate_libmTan(); 5173 } 5174 } 5175 } 5176 5177 void generate_all() { 5178 // Generates all stubs and initializes the entry points 5179 5180 // These entry points require SharedInfo::stack0 to be set up in 5181 // non-core builds and need to be relocatable, so they each 5182 // fabricate a RuntimeStub internally. 5183 StubRoutines::_throw_AbstractMethodError_entry = 5184 generate_throw_exception("AbstractMethodError throw_exception", 5185 CAST_FROM_FN_PTR(address, 5186 SharedRuntime:: 5187 throw_AbstractMethodError)); 5188 5189 StubRoutines::_throw_IncompatibleClassChangeError_entry = 5190 generate_throw_exception("IncompatibleClassChangeError throw_exception", 5191 CAST_FROM_FN_PTR(address, 5192 SharedRuntime:: 5193 throw_IncompatibleClassChangeError)); 5194 5195 StubRoutines::_throw_NullPointerException_at_call_entry = 5196 generate_throw_exception("NullPointerException at call throw_exception", 5197 CAST_FROM_FN_PTR(address, 5198 SharedRuntime:: 5199 throw_NullPointerException_at_call)); 5200 5201 // entry points that are platform specific 5202 StubRoutines::x86::_f2i_fixup = generate_f2i_fixup(); 5203 StubRoutines::x86::_f2l_fixup = generate_f2l_fixup(); 5204 StubRoutines::x86::_d2i_fixup = generate_d2i_fixup(); 5205 StubRoutines::x86::_d2l_fixup = generate_d2l_fixup(); 5206 5207 StubRoutines::x86::_float_sign_mask = generate_fp_mask("float_sign_mask", 0x7FFFFFFF7FFFFFFF); 5208 StubRoutines::x86::_float_sign_flip = generate_fp_mask("float_sign_flip", 0x8000000080000000); 5209 StubRoutines::x86::_double_sign_mask = generate_fp_mask("double_sign_mask", 0x7FFFFFFFFFFFFFFF); 5210 StubRoutines::x86::_double_sign_flip = generate_fp_mask("double_sign_flip", 0x8000000000000000); 5211 5212 // support for verify_oop (must happen after universe_init) 5213 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop(); 5214 5215 // arraycopy stubs used by compilers 5216 generate_arraycopy_stubs(); 5217 5218 // don't bother generating these AES intrinsic stubs unless global flag is set 5219 if (UseAESIntrinsics) { 5220 StubRoutines::x86::_key_shuffle_mask_addr = generate_key_shuffle_mask(); // needed by the others 5221 StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock(); 5222 StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock(); 5223 StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt(); 5224 StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt_Parallel(); 5225 } 5226 if (UseAESCTRIntrinsics){ 5227 StubRoutines::x86::_counter_shuffle_mask_addr = generate_counter_shuffle_mask(); 5228 StubRoutines::_counterMode_AESCrypt = generate_counterMode_AESCrypt_Parallel(); 5229 } 5230 5231 if (UseSHA1Intrinsics) { 5232 StubRoutines::x86::_upper_word_mask_addr = generate_upper_word_mask(); 5233 StubRoutines::x86::_shuffle_byte_flip_mask_addr = generate_shuffle_byte_flip_mask(); 5234 StubRoutines::_sha1_implCompress = generate_sha1_implCompress(false, "sha1_implCompress"); 5235 StubRoutines::_sha1_implCompressMB = generate_sha1_implCompress(true, "sha1_implCompressMB"); 5236 } 5237 if (UseSHA256Intrinsics) { 5238 StubRoutines::x86::_k256_adr = (address)StubRoutines::x86::_k256; 5239 StubRoutines::x86::_pshuffle_byte_flip_mask_addr = generate_pshuffle_byte_flip_mask(); 5240 StubRoutines::_sha256_implCompress = generate_sha256_implCompress(false, "sha256_implCompress"); 5241 StubRoutines::_sha256_implCompressMB = generate_sha256_implCompress(true, "sha256_implCompressMB"); 5242 } 5243 5244 // Generate GHASH intrinsics code 5245 if (UseGHASHIntrinsics) { 5246 StubRoutines::x86::_ghash_long_swap_mask_addr = generate_ghash_long_swap_mask(); 5247 StubRoutines::x86::_ghash_byte_swap_mask_addr = generate_ghash_byte_swap_mask(); 5248 StubRoutines::_ghash_processBlocks = generate_ghash_processBlocks(); 5249 } 5250 5251 // Safefetch stubs. 5252 generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry, 5253 &StubRoutines::_safefetch32_fault_pc, 5254 &StubRoutines::_safefetch32_continuation_pc); 5255 generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry, 5256 &StubRoutines::_safefetchN_fault_pc, 5257 &StubRoutines::_safefetchN_continuation_pc); 5258 #ifdef COMPILER2 5259 if (UseMultiplyToLenIntrinsic) { 5260 StubRoutines::_multiplyToLen = generate_multiplyToLen(); 5261 } 5262 if (UseSquareToLenIntrinsic) { 5263 StubRoutines::_squareToLen = generate_squareToLen(); 5264 } 5265 if (UseMulAddIntrinsic) { 5266 StubRoutines::_mulAdd = generate_mulAdd(); 5267 } 5268 if (UseVectorizedMismatchIntrinsic) { 5269 StubRoutines::_vectorizedMismatch = generate_vectorizedMismatch(); 5270 } 5271 #ifndef _WINDOWS 5272 if (UseMontgomeryMultiplyIntrinsic) { 5273 StubRoutines::_montgomeryMultiply 5274 = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_multiply); 5275 } 5276 if (UseMontgomerySquareIntrinsic) { 5277 StubRoutines::_montgomerySquare 5278 = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_square); 5279 } 5280 #endif // WINDOWS 5281 #endif // COMPILER2 5282 } 5283 5284 public: 5285 StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) { 5286 if (all) { 5287 generate_all(); 5288 } else { 5289 generate_initial(); 5290 } 5291 } 5292 }; // end class declaration 5293 5294 void StubGenerator_generate(CodeBuffer* code, bool all) { 5295 StubGenerator g(code, all); 5296 }