1 /* 2 * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "interpreter/interpreter.hpp" 29 #include "nativeInst_x86.hpp" 30 #include "oops/instanceOop.hpp" 31 #include "oops/method.hpp" 32 #include "oops/objArrayKlass.hpp" 33 #include "oops/oop.inline.hpp" 34 #include "prims/methodHandles.hpp" 35 #include "runtime/frame.inline.hpp" 36 #include "runtime/handles.inline.hpp" 37 #include "runtime/sharedRuntime.hpp" 38 #include "runtime/stubCodeGenerator.hpp" 39 #include "runtime/stubRoutines.hpp" 40 #include "runtime/thread.inline.hpp" 41 #include "utilities/top.hpp" 42 #ifdef COMPILER2 43 #include "opto/runtime.hpp" 44 #endif 45 46 // Declaration and definition of StubGenerator (no .hpp file). 47 // For a more detailed description of the stub routine structure 48 // see the comment in stubRoutines.hpp 49 50 #define __ _masm-> 51 #define TIMES_OOP (UseCompressedOops ? Address::times_4 : Address::times_8) 52 #define a__ ((Assembler*)_masm)-> 53 54 #ifdef PRODUCT 55 #define BLOCK_COMMENT(str) /* nothing */ 56 #else 57 #define BLOCK_COMMENT(str) __ block_comment(str) 58 #endif 59 60 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 61 const int MXCSR_MASK = 0xFFC0; // Mask out any pending exceptions 62 63 // Stub Code definitions 64 65 static address handle_unsafe_access() { 66 JavaThread* thread = JavaThread::current(); 67 address pc = thread->saved_exception_pc(); 68 // pc is the instruction which we must emulate 69 // doing a no-op is fine: return garbage from the load 70 // therefore, compute npc 71 address npc = Assembler::locate_next_instruction(pc); 72 73 // request an async exception 74 thread->set_pending_unsafe_access_error(); 75 76 // return address of next instruction to execute 77 return npc; 78 } 79 80 class StubGenerator: public StubCodeGenerator { 81 private: 82 83 #ifdef PRODUCT 84 #define inc_counter_np(counter) ((void)0) 85 #else 86 void inc_counter_np_(int& counter) { 87 // This can destroy rscratch1 if counter is far from the code cache 88 __ incrementl(ExternalAddress((address)&counter)); 89 } 90 #define inc_counter_np(counter) \ 91 BLOCK_COMMENT("inc_counter " #counter); \ 92 inc_counter_np_(counter); 93 #endif 94 95 // Call stubs are used to call Java from C 96 // 97 // Linux Arguments: 98 // c_rarg0: call wrapper address address 99 // c_rarg1: result address 100 // c_rarg2: result type BasicType 101 // c_rarg3: method Method* 102 // c_rarg4: (interpreter) entry point address 103 // c_rarg5: parameters intptr_t* 104 // 16(rbp): parameter size (in words) int 105 // 24(rbp): thread Thread* 106 // 107 // [ return_from_Java ] <--- rsp 108 // [ argument word n ] 109 // ... 110 // -12 [ argument word 1 ] 111 // -11 [ saved r15 ] <--- rsp_after_call 112 // -10 [ saved r14 ] 113 // -9 [ saved r13 ] 114 // -8 [ saved r12 ] 115 // -7 [ saved rbx ] 116 // -6 [ call wrapper ] 117 // -5 [ result ] 118 // -4 [ result type ] 119 // -3 [ method ] 120 // -2 [ entry point ] 121 // -1 [ parameters ] 122 // 0 [ saved rbp ] <--- rbp 123 // 1 [ return address ] 124 // 2 [ parameter size ] 125 // 3 [ thread ] 126 // 127 // Windows Arguments: 128 // c_rarg0: call wrapper address address 129 // c_rarg1: result address 130 // c_rarg2: result type BasicType 131 // c_rarg3: method Method* 132 // 48(rbp): (interpreter) entry point address 133 // 56(rbp): parameters intptr_t* 134 // 64(rbp): parameter size (in words) int 135 // 72(rbp): thread Thread* 136 // 137 // [ return_from_Java ] <--- rsp 138 // [ argument word n ] 139 // ... 140 // -28 [ argument word 1 ] 141 // -27 [ saved xmm15 ] <--- rsp_after_call 142 // [ saved xmm7-xmm14 ] 143 // -9 [ saved xmm6 ] (each xmm register takes 2 slots) 144 // -7 [ saved r15 ] 145 // -6 [ saved r14 ] 146 // -5 [ saved r13 ] 147 // -4 [ saved r12 ] 148 // -3 [ saved rdi ] 149 // -2 [ saved rsi ] 150 // -1 [ saved rbx ] 151 // 0 [ saved rbp ] <--- rbp 152 // 1 [ return address ] 153 // 2 [ call wrapper ] 154 // 3 [ result ] 155 // 4 [ result type ] 156 // 5 [ method ] 157 // 6 [ entry point ] 158 // 7 [ parameters ] 159 // 8 [ parameter size ] 160 // 9 [ thread ] 161 // 162 // Windows reserves the callers stack space for arguments 1-4. 163 // We spill c_rarg0-c_rarg3 to this space. 164 165 // Call stub stack layout word offsets from rbp 166 enum call_stub_layout { 167 #ifdef _WIN64 168 xmm_save_first = 6, // save from xmm6 169 xmm_save_last = 15, // to xmm15 170 xmm_save_base = -9, 171 rsp_after_call_off = xmm_save_base - 2 * (xmm_save_last - xmm_save_first), // -27 172 r15_off = -7, 173 r14_off = -6, 174 r13_off = -5, 175 r12_off = -4, 176 rdi_off = -3, 177 rsi_off = -2, 178 rbx_off = -1, 179 rbp_off = 0, 180 retaddr_off = 1, 181 call_wrapper_off = 2, 182 result_off = 3, 183 result_type_off = 4, 184 method_off = 5, 185 entry_point_off = 6, 186 parameters_off = 7, 187 parameter_size_off = 8, 188 thread_off = 9 189 #else 190 rsp_after_call_off = -12, 191 mxcsr_off = rsp_after_call_off, 192 r15_off = -11, 193 r14_off = -10, 194 r13_off = -9, 195 r12_off = -8, 196 rbx_off = -7, 197 call_wrapper_off = -6, 198 result_off = -5, 199 result_type_off = -4, 200 method_off = -3, 201 entry_point_off = -2, 202 parameters_off = -1, 203 rbp_off = 0, 204 retaddr_off = 1, 205 parameter_size_off = 2, 206 thread_off = 3 207 #endif 208 }; 209 210 #ifdef _WIN64 211 Address xmm_save(int reg) { 212 assert(reg >= xmm_save_first && reg <= xmm_save_last, "XMM register number out of range"); 213 return Address(rbp, (xmm_save_base - (reg - xmm_save_first) * 2) * wordSize); 214 } 215 #endif 216 217 address generate_call_stub(address& return_address) { 218 assert((int)frame::entry_frame_after_call_words == -(int)rsp_after_call_off + 1 && 219 (int)frame::entry_frame_call_wrapper_offset == (int)call_wrapper_off, 220 "adjust this code"); 221 StubCodeMark mark(this, "StubRoutines", "call_stub"); 222 address start = __ pc(); 223 224 // same as in generate_catch_exception()! 225 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize); 226 227 const Address call_wrapper (rbp, call_wrapper_off * wordSize); 228 const Address result (rbp, result_off * wordSize); 229 const Address result_type (rbp, result_type_off * wordSize); 230 const Address method (rbp, method_off * wordSize); 231 const Address entry_point (rbp, entry_point_off * wordSize); 232 const Address parameters (rbp, parameters_off * wordSize); 233 const Address parameter_size(rbp, parameter_size_off * wordSize); 234 235 // same as in generate_catch_exception()! 236 const Address thread (rbp, thread_off * wordSize); 237 238 const Address r15_save(rbp, r15_off * wordSize); 239 const Address r14_save(rbp, r14_off * wordSize); 240 const Address r13_save(rbp, r13_off * wordSize); 241 const Address r12_save(rbp, r12_off * wordSize); 242 const Address rbx_save(rbp, rbx_off * wordSize); 243 244 // stub code 245 __ enter(); 246 __ subptr(rsp, -rsp_after_call_off * wordSize); 247 248 // save register parameters 249 #ifndef _WIN64 250 __ movptr(parameters, c_rarg5); // parameters 251 __ movptr(entry_point, c_rarg4); // entry_point 252 #endif 253 254 __ movptr(method, c_rarg3); // method 255 __ movl(result_type, c_rarg2); // result type 256 __ movptr(result, c_rarg1); // result 257 __ movptr(call_wrapper, c_rarg0); // call wrapper 258 259 // save regs belonging to calling function 260 __ movptr(rbx_save, rbx); 261 __ movptr(r12_save, r12); 262 __ movptr(r13_save, r13); 263 __ movptr(r14_save, r14); 264 __ movptr(r15_save, r15); 265 #ifdef _WIN64 266 for (int i = 6; i <= 15; i++) { 267 __ movdqu(xmm_save(i), as_XMMRegister(i)); 268 } 269 270 const Address rdi_save(rbp, rdi_off * wordSize); 271 const Address rsi_save(rbp, rsi_off * wordSize); 272 273 __ movptr(rsi_save, rsi); 274 __ movptr(rdi_save, rdi); 275 #else 276 const Address mxcsr_save(rbp, mxcsr_off * wordSize); 277 { 278 Label skip_ldmx; 279 __ stmxcsr(mxcsr_save); 280 __ movl(rax, mxcsr_save); 281 __ andl(rax, MXCSR_MASK); // Only check control and mask bits 282 ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std()); 283 __ cmp32(rax, mxcsr_std); 284 __ jcc(Assembler::equal, skip_ldmx); 285 __ ldmxcsr(mxcsr_std); 286 __ bind(skip_ldmx); 287 } 288 #endif 289 290 // Load up thread register 291 __ movptr(r15_thread, thread); 292 __ reinit_heapbase(); 293 294 #ifdef ASSERT 295 // make sure we have no pending exceptions 296 { 297 Label L; 298 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 299 __ jcc(Assembler::equal, L); 300 __ stop("StubRoutines::call_stub: entered with pending exception"); 301 __ bind(L); 302 } 303 #endif 304 305 // pass parameters if any 306 BLOCK_COMMENT("pass parameters if any"); 307 Label parameters_done; 308 __ movl(c_rarg3, parameter_size); 309 __ testl(c_rarg3, c_rarg3); 310 __ jcc(Assembler::zero, parameters_done); 311 312 Label loop; 313 __ movptr(c_rarg2, parameters); // parameter pointer 314 __ movl(c_rarg1, c_rarg3); // parameter counter is in c_rarg1 315 __ BIND(loop); 316 __ movptr(rax, Address(c_rarg2, 0));// get parameter 317 __ addptr(c_rarg2, wordSize); // advance to next parameter 318 __ decrementl(c_rarg1); // decrement counter 319 __ push(rax); // pass parameter 320 __ jcc(Assembler::notZero, loop); 321 322 // call Java function 323 __ BIND(parameters_done); 324 __ movptr(rbx, method); // get Method* 325 __ movptr(c_rarg1, entry_point); // get entry_point 326 __ mov(r13, rsp); // set sender sp 327 BLOCK_COMMENT("call Java function"); 328 __ call(c_rarg1); 329 330 BLOCK_COMMENT("call_stub_return_address:"); 331 return_address = __ pc(); 332 333 // store result depending on type (everything that is not 334 // T_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT) 335 __ movptr(c_rarg0, result); 336 Label is_long, is_float, is_double, exit; 337 __ movl(c_rarg1, result_type); 338 __ cmpl(c_rarg1, T_OBJECT); 339 __ jcc(Assembler::equal, is_long); 340 __ cmpl(c_rarg1, T_LONG); 341 __ jcc(Assembler::equal, is_long); 342 __ cmpl(c_rarg1, T_FLOAT); 343 __ jcc(Assembler::equal, is_float); 344 __ cmpl(c_rarg1, T_DOUBLE); 345 __ jcc(Assembler::equal, is_double); 346 347 // handle T_INT case 348 __ movl(Address(c_rarg0, 0), rax); 349 350 __ BIND(exit); 351 352 // pop parameters 353 __ lea(rsp, rsp_after_call); 354 355 #ifdef ASSERT 356 // verify that threads correspond 357 { 358 Label L, S; 359 __ cmpptr(r15_thread, thread); 360 __ jcc(Assembler::notEqual, S); 361 __ get_thread(rbx); 362 __ cmpptr(r15_thread, rbx); 363 __ jcc(Assembler::equal, L); 364 __ bind(S); 365 __ jcc(Assembler::equal, L); 366 __ stop("StubRoutines::call_stub: threads must correspond"); 367 __ bind(L); 368 } 369 #endif 370 371 // restore regs belonging to calling function 372 #ifdef _WIN64 373 for (int i = 15; i >= 6; i--) { 374 __ movdqu(as_XMMRegister(i), xmm_save(i)); 375 } 376 #endif 377 __ movptr(r15, r15_save); 378 __ movptr(r14, r14_save); 379 __ movptr(r13, r13_save); 380 __ movptr(r12, r12_save); 381 __ movptr(rbx, rbx_save); 382 383 #ifdef _WIN64 384 __ movptr(rdi, rdi_save); 385 __ movptr(rsi, rsi_save); 386 #else 387 __ ldmxcsr(mxcsr_save); 388 #endif 389 390 // restore rsp 391 __ addptr(rsp, -rsp_after_call_off * wordSize); 392 393 // return 394 __ pop(rbp); 395 __ ret(0); 396 397 // handle return types different from T_INT 398 __ BIND(is_long); 399 __ movq(Address(c_rarg0, 0), rax); 400 __ jmp(exit); 401 402 __ BIND(is_float); 403 __ movflt(Address(c_rarg0, 0), xmm0); 404 __ jmp(exit); 405 406 __ BIND(is_double); 407 __ movdbl(Address(c_rarg0, 0), xmm0); 408 __ jmp(exit); 409 410 return start; 411 } 412 413 // Return point for a Java call if there's an exception thrown in 414 // Java code. The exception is caught and transformed into a 415 // pending exception stored in JavaThread that can be tested from 416 // within the VM. 417 // 418 // Note: Usually the parameters are removed by the callee. In case 419 // of an exception crossing an activation frame boundary, that is 420 // not the case if the callee is compiled code => need to setup the 421 // rsp. 422 // 423 // rax: exception oop 424 425 address generate_catch_exception() { 426 StubCodeMark mark(this, "StubRoutines", "catch_exception"); 427 address start = __ pc(); 428 429 // same as in generate_call_stub(): 430 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize); 431 const Address thread (rbp, thread_off * wordSize); 432 433 #ifdef ASSERT 434 // verify that threads correspond 435 { 436 Label L, S; 437 __ cmpptr(r15_thread, thread); 438 __ jcc(Assembler::notEqual, S); 439 __ get_thread(rbx); 440 __ cmpptr(r15_thread, rbx); 441 __ jcc(Assembler::equal, L); 442 __ bind(S); 443 __ stop("StubRoutines::catch_exception: threads must correspond"); 444 __ bind(L); 445 } 446 #endif 447 448 // set pending exception 449 __ verify_oop(rax); 450 451 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), rax); 452 __ lea(rscratch1, ExternalAddress((address)__FILE__)); 453 __ movptr(Address(r15_thread, Thread::exception_file_offset()), rscratch1); 454 __ movl(Address(r15_thread, Thread::exception_line_offset()), (int) __LINE__); 455 456 // complete return to VM 457 assert(StubRoutines::_call_stub_return_address != NULL, 458 "_call_stub_return_address must have been generated before"); 459 __ jump(RuntimeAddress(StubRoutines::_call_stub_return_address)); 460 461 return start; 462 } 463 464 // Continuation point for runtime calls returning with a pending 465 // exception. The pending exception check happened in the runtime 466 // or native call stub. The pending exception in Thread is 467 // converted into a Java-level exception. 468 // 469 // Contract with Java-level exception handlers: 470 // rax: exception 471 // rdx: throwing pc 472 // 473 // NOTE: At entry of this stub, exception-pc must be on stack !! 474 475 address generate_forward_exception() { 476 StubCodeMark mark(this, "StubRoutines", "forward exception"); 477 address start = __ pc(); 478 479 // Upon entry, the sp points to the return address returning into 480 // Java (interpreted or compiled) code; i.e., the return address 481 // becomes the throwing pc. 482 // 483 // Arguments pushed before the runtime call are still on the stack 484 // but the exception handler will reset the stack pointer -> 485 // ignore them. A potential result in registers can be ignored as 486 // well. 487 488 #ifdef ASSERT 489 // make sure this code is only executed if there is a pending exception 490 { 491 Label L; 492 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t) NULL); 493 __ jcc(Assembler::notEqual, L); 494 __ stop("StubRoutines::forward exception: no pending exception (1)"); 495 __ bind(L); 496 } 497 #endif 498 499 // compute exception handler into rbx 500 __ movptr(c_rarg0, Address(rsp, 0)); 501 BLOCK_COMMENT("call exception_handler_for_return_address"); 502 __ call_VM_leaf(CAST_FROM_FN_PTR(address, 503 SharedRuntime::exception_handler_for_return_address), 504 r15_thread, c_rarg0); 505 __ mov(rbx, rax); 506 507 // setup rax & rdx, remove return address & clear pending exception 508 __ pop(rdx); 509 __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset())); 510 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 511 512 #ifdef ASSERT 513 // make sure exception is set 514 { 515 Label L; 516 __ testptr(rax, rax); 517 __ jcc(Assembler::notEqual, L); 518 __ stop("StubRoutines::forward exception: no pending exception (2)"); 519 __ bind(L); 520 } 521 #endif 522 523 // continue at exception handler (return address removed) 524 // rax: exception 525 // rbx: exception handler 526 // rdx: throwing pc 527 __ verify_oop(rax); 528 __ jmp(rbx); 529 530 return start; 531 } 532 533 // Support for jint atomic::xchg(jint exchange_value, volatile jint* dest) 534 // 535 // Arguments : 536 // c_rarg0: exchange_value 537 // c_rarg0: dest 538 // 539 // Result: 540 // *dest <- ex, return (orig *dest) 541 address generate_atomic_xchg() { 542 StubCodeMark mark(this, "StubRoutines", "atomic_xchg"); 543 address start = __ pc(); 544 545 __ movl(rax, c_rarg0); // Copy to eax we need a return value anyhow 546 __ xchgl(rax, Address(c_rarg1, 0)); // automatic LOCK 547 __ ret(0); 548 549 return start; 550 } 551 552 // Support for intptr_t atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) 553 // 554 // Arguments : 555 // c_rarg0: exchange_value 556 // c_rarg1: dest 557 // 558 // Result: 559 // *dest <- ex, return (orig *dest) 560 address generate_atomic_xchg_ptr() { 561 StubCodeMark mark(this, "StubRoutines", "atomic_xchg_ptr"); 562 address start = __ pc(); 563 564 __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow 565 __ xchgptr(rax, Address(c_rarg1, 0)); // automatic LOCK 566 __ ret(0); 567 568 return start; 569 } 570 571 // Support for jint atomic::atomic_cmpxchg(jint exchange_value, volatile jint* dest, 572 // jint compare_value) 573 // 574 // Arguments : 575 // c_rarg0: exchange_value 576 // c_rarg1: dest 577 // c_rarg2: compare_value 578 // 579 // Result: 580 // if ( compare_value == *dest ) { 581 // *dest = exchange_value 582 // return compare_value; 583 // else 584 // return *dest; 585 address generate_atomic_cmpxchg() { 586 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg"); 587 address start = __ pc(); 588 589 __ movl(rax, c_rarg2); 590 if ( os::is_MP() ) __ lock(); 591 __ cmpxchgl(c_rarg0, Address(c_rarg1, 0)); 592 __ ret(0); 593 594 return start; 595 } 596 597 // Support for jbyte atomic::atomic_cmpxchg(jbyte exchange_value, volatile jbyte* dest, 598 // jbyte compare_value) 599 // 600 // Arguments : 601 // c_rarg0: exchange_value 602 // c_rarg1: dest 603 // c_rarg2: compare_value 604 // 605 // Result: 606 // if ( compare_value == *dest ) { 607 // *dest = exchange_value 608 // return compare_value; 609 // else 610 // return *dest; 611 address generate_atomic_cmpxchg_byte() { 612 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_byte"); 613 address start = __ pc(); 614 615 __ movsbq(rax, c_rarg2); 616 if ( os::is_MP() ) __ lock(); 617 __ cmpxchgb(c_rarg0, Address(c_rarg1, 0)); 618 __ ret(0); 619 620 return start; 621 } 622 623 // Support for jlong atomic::atomic_cmpxchg(jlong exchange_value, 624 // volatile jlong* dest, 625 // jlong compare_value) 626 // Arguments : 627 // c_rarg0: exchange_value 628 // c_rarg1: dest 629 // c_rarg2: compare_value 630 // 631 // Result: 632 // if ( compare_value == *dest ) { 633 // *dest = exchange_value 634 // return compare_value; 635 // else 636 // return *dest; 637 address generate_atomic_cmpxchg_long() { 638 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long"); 639 address start = __ pc(); 640 641 __ movq(rax, c_rarg2); 642 if ( os::is_MP() ) __ lock(); 643 __ cmpxchgq(c_rarg0, Address(c_rarg1, 0)); 644 __ ret(0); 645 646 return start; 647 } 648 649 // Support for jint atomic::add(jint add_value, volatile jint* dest) 650 // 651 // Arguments : 652 // c_rarg0: add_value 653 // c_rarg1: dest 654 // 655 // Result: 656 // *dest += add_value 657 // return *dest; 658 address generate_atomic_add() { 659 StubCodeMark mark(this, "StubRoutines", "atomic_add"); 660 address start = __ pc(); 661 662 __ movl(rax, c_rarg0); 663 if ( os::is_MP() ) __ lock(); 664 __ xaddl(Address(c_rarg1, 0), c_rarg0); 665 __ addl(rax, c_rarg0); 666 __ ret(0); 667 668 return start; 669 } 670 671 // Support for intptr_t atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) 672 // 673 // Arguments : 674 // c_rarg0: add_value 675 // c_rarg1: dest 676 // 677 // Result: 678 // *dest += add_value 679 // return *dest; 680 address generate_atomic_add_ptr() { 681 StubCodeMark mark(this, "StubRoutines", "atomic_add_ptr"); 682 address start = __ pc(); 683 684 __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow 685 if ( os::is_MP() ) __ lock(); 686 __ xaddptr(Address(c_rarg1, 0), c_rarg0); 687 __ addptr(rax, c_rarg0); 688 __ ret(0); 689 690 return start; 691 } 692 693 // Support for intptr_t OrderAccess::fence() 694 // 695 // Arguments : 696 // 697 // Result: 698 address generate_orderaccess_fence() { 699 StubCodeMark mark(this, "StubRoutines", "orderaccess_fence"); 700 address start = __ pc(); 701 __ membar(Assembler::StoreLoad); 702 __ ret(0); 703 704 return start; 705 } 706 707 // Support for intptr_t get_previous_fp() 708 // 709 // This routine is used to find the previous frame pointer for the 710 // caller (current_frame_guess). This is used as part of debugging 711 // ps() is seemingly lost trying to find frames. 712 // This code assumes that caller current_frame_guess) has a frame. 713 address generate_get_previous_fp() { 714 StubCodeMark mark(this, "StubRoutines", "get_previous_fp"); 715 const Address old_fp(rbp, 0); 716 const Address older_fp(rax, 0); 717 address start = __ pc(); 718 719 __ enter(); 720 __ movptr(rax, old_fp); // callers fp 721 __ movptr(rax, older_fp); // the frame for ps() 722 __ pop(rbp); 723 __ ret(0); 724 725 return start; 726 } 727 728 // Support for intptr_t get_previous_sp() 729 // 730 // This routine is used to find the previous stack pointer for the 731 // caller. 732 address generate_get_previous_sp() { 733 StubCodeMark mark(this, "StubRoutines", "get_previous_sp"); 734 address start = __ pc(); 735 736 __ movptr(rax, rsp); 737 __ addptr(rax, 8); // return address is at the top of the stack. 738 __ ret(0); 739 740 return start; 741 } 742 743 //---------------------------------------------------------------------------------------------------- 744 // Support for void verify_mxcsr() 745 // 746 // This routine is used with -Xcheck:jni to verify that native 747 // JNI code does not return to Java code without restoring the 748 // MXCSR register to our expected state. 749 750 address generate_verify_mxcsr() { 751 StubCodeMark mark(this, "StubRoutines", "verify_mxcsr"); 752 address start = __ pc(); 753 754 const Address mxcsr_save(rsp, 0); 755 756 if (CheckJNICalls) { 757 Label ok_ret; 758 ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std()); 759 __ push(rax); 760 __ subptr(rsp, wordSize); // allocate a temp location 761 __ stmxcsr(mxcsr_save); 762 __ movl(rax, mxcsr_save); 763 __ andl(rax, MXCSR_MASK); // Only check control and mask bits 764 __ cmp32(rax, mxcsr_std); 765 __ jcc(Assembler::equal, ok_ret); 766 767 __ warn("MXCSR changed by native JNI code, use -XX:+RestoreMXCSROnJNICall"); 768 769 __ ldmxcsr(mxcsr_std); 770 771 __ bind(ok_ret); 772 __ addptr(rsp, wordSize); 773 __ pop(rax); 774 } 775 776 __ ret(0); 777 778 return start; 779 } 780 781 address generate_f2i_fixup() { 782 StubCodeMark mark(this, "StubRoutines", "f2i_fixup"); 783 Address inout(rsp, 5 * wordSize); // return address + 4 saves 784 785 address start = __ pc(); 786 787 Label L; 788 789 __ push(rax); 790 __ push(c_rarg3); 791 __ push(c_rarg2); 792 __ push(c_rarg1); 793 794 __ movl(rax, 0x7f800000); 795 __ xorl(c_rarg3, c_rarg3); 796 __ movl(c_rarg2, inout); 797 __ movl(c_rarg1, c_rarg2); 798 __ andl(c_rarg1, 0x7fffffff); 799 __ cmpl(rax, c_rarg1); // NaN? -> 0 800 __ jcc(Assembler::negative, L); 801 __ testl(c_rarg2, c_rarg2); // signed ? min_jint : max_jint 802 __ movl(c_rarg3, 0x80000000); 803 __ movl(rax, 0x7fffffff); 804 __ cmovl(Assembler::positive, c_rarg3, rax); 805 806 __ bind(L); 807 __ movptr(inout, c_rarg3); 808 809 __ pop(c_rarg1); 810 __ pop(c_rarg2); 811 __ pop(c_rarg3); 812 __ pop(rax); 813 814 __ ret(0); 815 816 return start; 817 } 818 819 address generate_f2l_fixup() { 820 StubCodeMark mark(this, "StubRoutines", "f2l_fixup"); 821 Address inout(rsp, 5 * wordSize); // return address + 4 saves 822 address start = __ pc(); 823 824 Label L; 825 826 __ push(rax); 827 __ push(c_rarg3); 828 __ push(c_rarg2); 829 __ push(c_rarg1); 830 831 __ movl(rax, 0x7f800000); 832 __ xorl(c_rarg3, c_rarg3); 833 __ movl(c_rarg2, inout); 834 __ movl(c_rarg1, c_rarg2); 835 __ andl(c_rarg1, 0x7fffffff); 836 __ cmpl(rax, c_rarg1); // NaN? -> 0 837 __ jcc(Assembler::negative, L); 838 __ testl(c_rarg2, c_rarg2); // signed ? min_jlong : max_jlong 839 __ mov64(c_rarg3, 0x8000000000000000); 840 __ mov64(rax, 0x7fffffffffffffff); 841 __ cmov(Assembler::positive, c_rarg3, rax); 842 843 __ bind(L); 844 __ movptr(inout, c_rarg3); 845 846 __ pop(c_rarg1); 847 __ pop(c_rarg2); 848 __ pop(c_rarg3); 849 __ pop(rax); 850 851 __ ret(0); 852 853 return start; 854 } 855 856 address generate_d2i_fixup() { 857 StubCodeMark mark(this, "StubRoutines", "d2i_fixup"); 858 Address inout(rsp, 6 * wordSize); // return address + 5 saves 859 860 address start = __ pc(); 861 862 Label L; 863 864 __ push(rax); 865 __ push(c_rarg3); 866 __ push(c_rarg2); 867 __ push(c_rarg1); 868 __ push(c_rarg0); 869 870 __ movl(rax, 0x7ff00000); 871 __ movq(c_rarg2, inout); 872 __ movl(c_rarg3, c_rarg2); 873 __ mov(c_rarg1, c_rarg2); 874 __ mov(c_rarg0, c_rarg2); 875 __ negl(c_rarg3); 876 __ shrptr(c_rarg1, 0x20); 877 __ orl(c_rarg3, c_rarg2); 878 __ andl(c_rarg1, 0x7fffffff); 879 __ xorl(c_rarg2, c_rarg2); 880 __ shrl(c_rarg3, 0x1f); 881 __ orl(c_rarg1, c_rarg3); 882 __ cmpl(rax, c_rarg1); 883 __ jcc(Assembler::negative, L); // NaN -> 0 884 __ testptr(c_rarg0, c_rarg0); // signed ? min_jint : max_jint 885 __ movl(c_rarg2, 0x80000000); 886 __ movl(rax, 0x7fffffff); 887 __ cmov(Assembler::positive, c_rarg2, rax); 888 889 __ bind(L); 890 __ movptr(inout, c_rarg2); 891 892 __ pop(c_rarg0); 893 __ pop(c_rarg1); 894 __ pop(c_rarg2); 895 __ pop(c_rarg3); 896 __ pop(rax); 897 898 __ ret(0); 899 900 return start; 901 } 902 903 address generate_d2l_fixup() { 904 StubCodeMark mark(this, "StubRoutines", "d2l_fixup"); 905 Address inout(rsp, 6 * wordSize); // return address + 5 saves 906 907 address start = __ pc(); 908 909 Label L; 910 911 __ push(rax); 912 __ push(c_rarg3); 913 __ push(c_rarg2); 914 __ push(c_rarg1); 915 __ push(c_rarg0); 916 917 __ movl(rax, 0x7ff00000); 918 __ movq(c_rarg2, inout); 919 __ movl(c_rarg3, c_rarg2); 920 __ mov(c_rarg1, c_rarg2); 921 __ mov(c_rarg0, c_rarg2); 922 __ negl(c_rarg3); 923 __ shrptr(c_rarg1, 0x20); 924 __ orl(c_rarg3, c_rarg2); 925 __ andl(c_rarg1, 0x7fffffff); 926 __ xorl(c_rarg2, c_rarg2); 927 __ shrl(c_rarg3, 0x1f); 928 __ orl(c_rarg1, c_rarg3); 929 __ cmpl(rax, c_rarg1); 930 __ jcc(Assembler::negative, L); // NaN -> 0 931 __ testq(c_rarg0, c_rarg0); // signed ? min_jlong : max_jlong 932 __ mov64(c_rarg2, 0x8000000000000000); 933 __ mov64(rax, 0x7fffffffffffffff); 934 __ cmovq(Assembler::positive, c_rarg2, rax); 935 936 __ bind(L); 937 __ movq(inout, c_rarg2); 938 939 __ pop(c_rarg0); 940 __ pop(c_rarg1); 941 __ pop(c_rarg2); 942 __ pop(c_rarg3); 943 __ pop(rax); 944 945 __ ret(0); 946 947 return start; 948 } 949 950 address generate_fp_mask(const char *stub_name, int64_t mask) { 951 __ align(CodeEntryAlignment); 952 StubCodeMark mark(this, "StubRoutines", stub_name); 953 address start = __ pc(); 954 955 __ emit_data64( mask, relocInfo::none ); 956 __ emit_data64( mask, relocInfo::none ); 957 958 return start; 959 } 960 961 // The following routine generates a subroutine to throw an 962 // asynchronous UnknownError when an unsafe access gets a fault that 963 // could not be reasonably prevented by the programmer. (Example: 964 // SIGBUS/OBJERR.) 965 address generate_handler_for_unsafe_access() { 966 StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access"); 967 address start = __ pc(); 968 969 __ push(0); // hole for return address-to-be 970 __ pusha(); // push registers 971 Address next_pc(rsp, RegisterImpl::number_of_registers * BytesPerWord); 972 973 // FIXME: this probably needs alignment logic 974 975 __ subptr(rsp, frame::arg_reg_save_area_bytes); 976 BLOCK_COMMENT("call handle_unsafe_access"); 977 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, handle_unsafe_access))); 978 __ addptr(rsp, frame::arg_reg_save_area_bytes); 979 980 __ movptr(next_pc, rax); // stuff next address 981 __ popa(); 982 __ ret(0); // jump to next address 983 984 return start; 985 } 986 987 // Non-destructive plausibility checks for oops 988 // 989 // Arguments: 990 // all args on stack! 991 // 992 // Stack after saving c_rarg3: 993 // [tos + 0]: saved c_rarg3 994 // [tos + 1]: saved c_rarg2 995 // [tos + 2]: saved r12 (several TemplateTable methods use it) 996 // [tos + 3]: saved flags 997 // [tos + 4]: return address 998 // * [tos + 5]: error message (char*) 999 // * [tos + 6]: object to verify (oop) 1000 // * [tos + 7]: saved rax - saved by caller and bashed 1001 // * [tos + 8]: saved r10 (rscratch1) - saved by caller 1002 // * = popped on exit 1003 address generate_verify_oop() { 1004 StubCodeMark mark(this, "StubRoutines", "verify_oop"); 1005 address start = __ pc(); 1006 1007 Label exit, error; 1008 1009 __ pushf(); 1010 __ incrementl(ExternalAddress((address) StubRoutines::verify_oop_count_addr())); 1011 1012 __ push(r12); 1013 1014 // save c_rarg2 and c_rarg3 1015 __ push(c_rarg2); 1016 __ push(c_rarg3); 1017 1018 enum { 1019 // After previous pushes. 1020 oop_to_verify = 6 * wordSize, 1021 saved_rax = 7 * wordSize, 1022 saved_r10 = 8 * wordSize, 1023 1024 // Before the call to MacroAssembler::debug(), see below. 1025 return_addr = 16 * wordSize, 1026 error_msg = 17 * wordSize 1027 }; 1028 1029 // get object 1030 __ movptr(rax, Address(rsp, oop_to_verify)); 1031 1032 // make sure object is 'reasonable' 1033 __ testptr(rax, rax); 1034 __ jcc(Assembler::zero, exit); // if obj is NULL it is OK 1035 // Check if the oop is in the right area of memory 1036 __ movptr(c_rarg2, rax); 1037 __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_mask()); 1038 __ andptr(c_rarg2, c_rarg3); 1039 __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_bits()); 1040 __ cmpptr(c_rarg2, c_rarg3); 1041 __ jcc(Assembler::notZero, error); 1042 1043 // set r12 to heapbase for load_klass() 1044 __ reinit_heapbase(); 1045 1046 // make sure klass is 'reasonable', which is not zero. 1047 __ load_klass(rax, rax); // get klass 1048 __ testptr(rax, rax); 1049 __ jcc(Assembler::zero, error); // if klass is NULL it is broken 1050 1051 // return if everything seems ok 1052 __ bind(exit); 1053 __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back 1054 __ movptr(rscratch1, Address(rsp, saved_r10)); // get saved r10 back 1055 __ pop(c_rarg3); // restore c_rarg3 1056 __ pop(c_rarg2); // restore c_rarg2 1057 __ pop(r12); // restore r12 1058 __ popf(); // restore flags 1059 __ ret(4 * wordSize); // pop caller saved stuff 1060 1061 // handle errors 1062 __ bind(error); 1063 __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back 1064 __ movptr(rscratch1, Address(rsp, saved_r10)); // get saved r10 back 1065 __ pop(c_rarg3); // get saved c_rarg3 back 1066 __ pop(c_rarg2); // get saved c_rarg2 back 1067 __ pop(r12); // get saved r12 back 1068 __ popf(); // get saved flags off stack -- 1069 // will be ignored 1070 1071 __ pusha(); // push registers 1072 // (rip is already 1073 // already pushed) 1074 // debug(char* msg, int64_t pc, int64_t regs[]) 1075 // We've popped the registers we'd saved (c_rarg3, c_rarg2 and flags), and 1076 // pushed all the registers, so now the stack looks like: 1077 // [tos + 0] 16 saved registers 1078 // [tos + 16] return address 1079 // * [tos + 17] error message (char*) 1080 // * [tos + 18] object to verify (oop) 1081 // * [tos + 19] saved rax - saved by caller and bashed 1082 // * [tos + 20] saved r10 (rscratch1) - saved by caller 1083 // * = popped on exit 1084 1085 __ movptr(c_rarg0, Address(rsp, error_msg)); // pass address of error message 1086 __ movptr(c_rarg1, Address(rsp, return_addr)); // pass return address 1087 __ movq(c_rarg2, rsp); // pass address of regs on stack 1088 __ mov(r12, rsp); // remember rsp 1089 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1090 __ andptr(rsp, -16); // align stack as required by ABI 1091 BLOCK_COMMENT("call MacroAssembler::debug"); 1092 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64))); 1093 __ mov(rsp, r12); // restore rsp 1094 __ popa(); // pop registers (includes r12) 1095 __ ret(4 * wordSize); // pop caller saved stuff 1096 1097 return start; 1098 } 1099 1100 // 1101 // Verify that a register contains clean 32-bits positive value 1102 // (high 32-bits are 0) so it could be used in 64-bits shifts. 1103 // 1104 // Input: 1105 // Rint - 32-bits value 1106 // Rtmp - scratch 1107 // 1108 void assert_clean_int(Register Rint, Register Rtmp) { 1109 #ifdef ASSERT 1110 Label L; 1111 assert_different_registers(Rtmp, Rint); 1112 __ movslq(Rtmp, Rint); 1113 __ cmpq(Rtmp, Rint); 1114 __ jcc(Assembler::equal, L); 1115 __ stop("high 32-bits of int value are not 0"); 1116 __ bind(L); 1117 #endif 1118 } 1119 1120 // Generate overlap test for array copy stubs 1121 // 1122 // Input: 1123 // c_rarg0 - from 1124 // c_rarg1 - to 1125 // c_rarg2 - element count 1126 // 1127 // Output: 1128 // rax - &from[element count - 1] 1129 // 1130 void array_overlap_test(address no_overlap_target, Address::ScaleFactor sf) { 1131 assert(no_overlap_target != NULL, "must be generated"); 1132 array_overlap_test(no_overlap_target, NULL, sf); 1133 } 1134 void array_overlap_test(Label& L_no_overlap, Address::ScaleFactor sf) { 1135 array_overlap_test(NULL, &L_no_overlap, sf); 1136 } 1137 void array_overlap_test(address no_overlap_target, Label* NOLp, Address::ScaleFactor sf) { 1138 const Register from = c_rarg0; 1139 const Register to = c_rarg1; 1140 const Register count = c_rarg2; 1141 const Register end_from = rax; 1142 1143 __ cmpptr(to, from); 1144 __ lea(end_from, Address(from, count, sf, 0)); 1145 if (NOLp == NULL) { 1146 ExternalAddress no_overlap(no_overlap_target); 1147 __ jump_cc(Assembler::belowEqual, no_overlap); 1148 __ cmpptr(to, end_from); 1149 __ jump_cc(Assembler::aboveEqual, no_overlap); 1150 } else { 1151 __ jcc(Assembler::belowEqual, (*NOLp)); 1152 __ cmpptr(to, end_from); 1153 __ jcc(Assembler::aboveEqual, (*NOLp)); 1154 } 1155 } 1156 1157 // Shuffle first three arg regs on Windows into Linux/Solaris locations. 1158 // 1159 // Outputs: 1160 // rdi - rcx 1161 // rsi - rdx 1162 // rdx - r8 1163 // rcx - r9 1164 // 1165 // Registers r9 and r10 are used to save rdi and rsi on Windows, which latter 1166 // are non-volatile. r9 and r10 should not be used by the caller. 1167 // 1168 void setup_arg_regs(int nargs = 3) { 1169 const Register saved_rdi = r9; 1170 const Register saved_rsi = r10; 1171 assert(nargs == 3 || nargs == 4, "else fix"); 1172 #ifdef _WIN64 1173 assert(c_rarg0 == rcx && c_rarg1 == rdx && c_rarg2 == r8 && c_rarg3 == r9, 1174 "unexpected argument registers"); 1175 if (nargs >= 4) 1176 __ mov(rax, r9); // r9 is also saved_rdi 1177 __ movptr(saved_rdi, rdi); 1178 __ movptr(saved_rsi, rsi); 1179 __ mov(rdi, rcx); // c_rarg0 1180 __ mov(rsi, rdx); // c_rarg1 1181 __ mov(rdx, r8); // c_rarg2 1182 if (nargs >= 4) 1183 __ mov(rcx, rax); // c_rarg3 (via rax) 1184 #else 1185 assert(c_rarg0 == rdi && c_rarg1 == rsi && c_rarg2 == rdx && c_rarg3 == rcx, 1186 "unexpected argument registers"); 1187 #endif 1188 } 1189 1190 void restore_arg_regs() { 1191 const Register saved_rdi = r9; 1192 const Register saved_rsi = r10; 1193 #ifdef _WIN64 1194 __ movptr(rdi, saved_rdi); 1195 __ movptr(rsi, saved_rsi); 1196 #endif 1197 } 1198 1199 // Generate code for an array write pre barrier 1200 // 1201 // addr - starting address 1202 // count - element count 1203 // tmp - scratch register 1204 // 1205 // Destroy no registers! 1206 // 1207 void gen_write_ref_array_pre_barrier(Register addr, Register count, bool dest_uninitialized) { 1208 BarrierSet* bs = Universe::heap()->barrier_set(); 1209 switch (bs->kind()) { 1210 case BarrierSet::G1SATBCT: 1211 case BarrierSet::G1SATBCTLogging: 1212 // With G1, don't generate the call if we statically know that the target in uninitialized 1213 if (!dest_uninitialized) { 1214 __ pusha(); // push registers 1215 if (count == c_rarg0) { 1216 if (addr == c_rarg1) { 1217 // exactly backwards!! 1218 __ xchgptr(c_rarg1, c_rarg0); 1219 } else { 1220 __ movptr(c_rarg1, count); 1221 __ movptr(c_rarg0, addr); 1222 } 1223 } else { 1224 __ movptr(c_rarg0, addr); 1225 __ movptr(c_rarg1, count); 1226 } 1227 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), 2); 1228 __ popa(); 1229 } 1230 break; 1231 case BarrierSet::CardTableModRef: 1232 case BarrierSet::CardTableExtension: 1233 case BarrierSet::ModRef: 1234 break; 1235 default: 1236 ShouldNotReachHere(); 1237 1238 } 1239 } 1240 1241 // 1242 // Generate code for an array write post barrier 1243 // 1244 // Input: 1245 // start - register containing starting address of destination array 1246 // count - elements count 1247 // scratch - scratch register 1248 // 1249 // The input registers are overwritten. 1250 // 1251 void gen_write_ref_array_post_barrier(Register start, Register count, Register scratch) { 1252 assert_different_registers(start, count, scratch); 1253 BarrierSet* bs = Universe::heap()->barrier_set(); 1254 switch (bs->kind()) { 1255 case BarrierSet::G1SATBCT: 1256 case BarrierSet::G1SATBCTLogging: 1257 { 1258 __ pusha(); // push registers (overkill) 1259 if (c_rarg0 == count) { // On win64 c_rarg0 == rcx 1260 assert_different_registers(c_rarg1, start); 1261 __ mov(c_rarg1, count); 1262 __ mov(c_rarg0, start); 1263 } else { 1264 assert_different_registers(c_rarg0, count); 1265 __ mov(c_rarg0, start); 1266 __ mov(c_rarg1, count); 1267 } 1268 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), 2); 1269 __ popa(); 1270 } 1271 break; 1272 case BarrierSet::CardTableModRef: 1273 case BarrierSet::CardTableExtension: 1274 { 1275 CardTableModRefBS* ct = (CardTableModRefBS*)bs; 1276 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); 1277 1278 Label L_loop; 1279 const Register end = count; 1280 1281 __ leaq(end, Address(start, count, TIMES_OOP, 0)); // end == start+count*oop_size 1282 __ subptr(end, BytesPerHeapOop); // end - 1 to make inclusive 1283 __ shrptr(start, CardTableModRefBS::card_shift); 1284 __ shrptr(end, CardTableModRefBS::card_shift); 1285 __ subptr(end, start); // end --> cards count 1286 1287 int64_t disp = (int64_t) ct->byte_map_base; 1288 __ mov64(scratch, disp); 1289 __ addptr(start, scratch); 1290 __ BIND(L_loop); 1291 __ movb(Address(start, count, Address::times_1), 0); 1292 __ decrement(count); 1293 __ jcc(Assembler::greaterEqual, L_loop); 1294 } 1295 break; 1296 default: 1297 ShouldNotReachHere(); 1298 1299 } 1300 } 1301 1302 1303 // Copy big chunks forward 1304 // 1305 // Inputs: 1306 // end_from - source arrays end address 1307 // end_to - destination array end address 1308 // qword_count - 64-bits element count, negative 1309 // to - scratch 1310 // L_copy_bytes - entry label 1311 // L_copy_8_bytes - exit label 1312 // 1313 void copy_bytes_forward(Register end_from, Register end_to, 1314 Register qword_count, Register to, 1315 Label& L_copy_bytes, Label& L_copy_8_bytes) { 1316 DEBUG_ONLY(__ stop("enter at entry label, not here")); 1317 Label L_loop; 1318 __ align(OptoLoopAlignment); 1319 if (UseUnalignedLoadStores) { 1320 Label L_end; 1321 // Copy 64-bytes per iteration 1322 __ BIND(L_loop); 1323 if (UseAVX >= 2) { 1324 __ vmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56)); 1325 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0); 1326 __ vmovdqu(xmm1, Address(end_from, qword_count, Address::times_8, -24)); 1327 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -24), xmm1); 1328 } else { 1329 __ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56)); 1330 __ movdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0); 1331 __ movdqu(xmm1, Address(end_from, qword_count, Address::times_8, -40)); 1332 __ movdqu(Address(end_to, qword_count, Address::times_8, -40), xmm1); 1333 __ movdqu(xmm2, Address(end_from, qword_count, Address::times_8, -24)); 1334 __ movdqu(Address(end_to, qword_count, Address::times_8, -24), xmm2); 1335 __ movdqu(xmm3, Address(end_from, qword_count, Address::times_8, - 8)); 1336 __ movdqu(Address(end_to, qword_count, Address::times_8, - 8), xmm3); 1337 } 1338 __ BIND(L_copy_bytes); 1339 __ addptr(qword_count, 8); 1340 __ jcc(Assembler::lessEqual, L_loop); 1341 __ subptr(qword_count, 4); // sub(8) and add(4) 1342 __ jccb(Assembler::greater, L_end); 1343 // Copy trailing 32 bytes 1344 if (UseAVX >= 2) { 1345 __ vmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24)); 1346 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -24), xmm0); 1347 } else { 1348 __ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24)); 1349 __ movdqu(Address(end_to, qword_count, Address::times_8, -24), xmm0); 1350 __ movdqu(xmm1, Address(end_from, qword_count, Address::times_8, - 8)); 1351 __ movdqu(Address(end_to, qword_count, Address::times_8, - 8), xmm1); 1352 } 1353 __ addptr(qword_count, 4); 1354 __ BIND(L_end); 1355 if (UseAVX >= 2) { 1356 // clean upper bits of YMM registers 1357 __ vzeroupper(); 1358 } 1359 } else { 1360 // Copy 32-bytes per iteration 1361 __ BIND(L_loop); 1362 __ movq(to, Address(end_from, qword_count, Address::times_8, -24)); 1363 __ movq(Address(end_to, qword_count, Address::times_8, -24), to); 1364 __ movq(to, Address(end_from, qword_count, Address::times_8, -16)); 1365 __ movq(Address(end_to, qword_count, Address::times_8, -16), to); 1366 __ movq(to, Address(end_from, qword_count, Address::times_8, - 8)); 1367 __ movq(Address(end_to, qword_count, Address::times_8, - 8), to); 1368 __ movq(to, Address(end_from, qword_count, Address::times_8, - 0)); 1369 __ movq(Address(end_to, qword_count, Address::times_8, - 0), to); 1370 1371 __ BIND(L_copy_bytes); 1372 __ addptr(qword_count, 4); 1373 __ jcc(Assembler::lessEqual, L_loop); 1374 } 1375 __ subptr(qword_count, 4); 1376 __ jcc(Assembler::less, L_copy_8_bytes); // Copy trailing qwords 1377 } 1378 1379 // Copy big chunks backward 1380 // 1381 // Inputs: 1382 // from - source arrays address 1383 // dest - destination array address 1384 // qword_count - 64-bits element count 1385 // to - scratch 1386 // L_copy_bytes - entry label 1387 // L_copy_8_bytes - exit label 1388 // 1389 void copy_bytes_backward(Register from, Register dest, 1390 Register qword_count, Register to, 1391 Label& L_copy_bytes, Label& L_copy_8_bytes) { 1392 DEBUG_ONLY(__ stop("enter at entry label, not here")); 1393 Label L_loop; 1394 __ align(OptoLoopAlignment); 1395 if (UseUnalignedLoadStores) { 1396 Label L_end; 1397 // Copy 64-bytes per iteration 1398 __ BIND(L_loop); 1399 if (UseAVX >= 2) { 1400 __ vmovdqu(xmm0, Address(from, qword_count, Address::times_8, 32)); 1401 __ vmovdqu(Address(dest, qword_count, Address::times_8, 32), xmm0); 1402 __ vmovdqu(xmm1, Address(from, qword_count, Address::times_8, 0)); 1403 __ vmovdqu(Address(dest, qword_count, Address::times_8, 0), xmm1); 1404 } else { 1405 __ movdqu(xmm0, Address(from, qword_count, Address::times_8, 48)); 1406 __ movdqu(Address(dest, qword_count, Address::times_8, 48), xmm0); 1407 __ movdqu(xmm1, Address(from, qword_count, Address::times_8, 32)); 1408 __ movdqu(Address(dest, qword_count, Address::times_8, 32), xmm1); 1409 __ movdqu(xmm2, Address(from, qword_count, Address::times_8, 16)); 1410 __ movdqu(Address(dest, qword_count, Address::times_8, 16), xmm2); 1411 __ movdqu(xmm3, Address(from, qword_count, Address::times_8, 0)); 1412 __ movdqu(Address(dest, qword_count, Address::times_8, 0), xmm3); 1413 } 1414 __ BIND(L_copy_bytes); 1415 __ subptr(qword_count, 8); 1416 __ jcc(Assembler::greaterEqual, L_loop); 1417 1418 __ addptr(qword_count, 4); // add(8) and sub(4) 1419 __ jccb(Assembler::less, L_end); 1420 // Copy trailing 32 bytes 1421 if (UseAVX >= 2) { 1422 __ vmovdqu(xmm0, Address(from, qword_count, Address::times_8, 0)); 1423 __ vmovdqu(Address(dest, qword_count, Address::times_8, 0), xmm0); 1424 } else { 1425 __ movdqu(xmm0, Address(from, qword_count, Address::times_8, 16)); 1426 __ movdqu(Address(dest, qword_count, Address::times_8, 16), xmm0); 1427 __ movdqu(xmm1, Address(from, qword_count, Address::times_8, 0)); 1428 __ movdqu(Address(dest, qword_count, Address::times_8, 0), xmm1); 1429 } 1430 __ subptr(qword_count, 4); 1431 __ BIND(L_end); 1432 if (UseAVX >= 2) { 1433 // clean upper bits of YMM registers 1434 __ vzeroupper(); 1435 } 1436 } else { 1437 // Copy 32-bytes per iteration 1438 __ BIND(L_loop); 1439 __ movq(to, Address(from, qword_count, Address::times_8, 24)); 1440 __ movq(Address(dest, qword_count, Address::times_8, 24), to); 1441 __ movq(to, Address(from, qword_count, Address::times_8, 16)); 1442 __ movq(Address(dest, qword_count, Address::times_8, 16), to); 1443 __ movq(to, Address(from, qword_count, Address::times_8, 8)); 1444 __ movq(Address(dest, qword_count, Address::times_8, 8), to); 1445 __ movq(to, Address(from, qword_count, Address::times_8, 0)); 1446 __ movq(Address(dest, qword_count, Address::times_8, 0), to); 1447 1448 __ BIND(L_copy_bytes); 1449 __ subptr(qword_count, 4); 1450 __ jcc(Assembler::greaterEqual, L_loop); 1451 } 1452 __ addptr(qword_count, 4); 1453 __ jcc(Assembler::greater, L_copy_8_bytes); // Copy trailing qwords 1454 } 1455 1456 1457 // Arguments: 1458 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1459 // ignored 1460 // name - stub name string 1461 // 1462 // Inputs: 1463 // c_rarg0 - source array address 1464 // c_rarg1 - destination array address 1465 // c_rarg2 - element count, treated as ssize_t, can be zero 1466 // 1467 // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries, 1468 // we let the hardware handle it. The one to eight bytes within words, 1469 // dwords or qwords that span cache line boundaries will still be loaded 1470 // and stored atomically. 1471 // 1472 // Side Effects: 1473 // disjoint_byte_copy_entry is set to the no-overlap entry point 1474 // used by generate_conjoint_byte_copy(). 1475 // 1476 address generate_disjoint_byte_copy(bool aligned, address* entry, const char *name) { 1477 __ align(CodeEntryAlignment); 1478 StubCodeMark mark(this, "StubRoutines", name); 1479 address start = __ pc(); 1480 1481 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes; 1482 Label L_copy_byte, L_exit; 1483 const Register from = rdi; // source array address 1484 const Register to = rsi; // destination array address 1485 const Register count = rdx; // elements count 1486 const Register byte_count = rcx; 1487 const Register qword_count = count; 1488 const Register end_from = from; // source array end address 1489 const Register end_to = to; // destination array end address 1490 // End pointers are inclusive, and if count is not zero they point 1491 // to the last unit copied: end_to[0] := end_from[0] 1492 1493 __ enter(); // required for proper stackwalking of RuntimeStub frame 1494 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1495 1496 if (entry != NULL) { 1497 *entry = __ pc(); 1498 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1499 BLOCK_COMMENT("Entry:"); 1500 } 1501 1502 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1503 // r9 and r10 may be used to save non-volatile registers 1504 1505 // 'from', 'to' and 'count' are now valid 1506 __ movptr(byte_count, count); 1507 __ shrptr(count, 3); // count => qword_count 1508 1509 // Copy from low to high addresses. Use 'to' as scratch. 1510 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1511 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1512 __ negptr(qword_count); // make the count negative 1513 __ jmp(L_copy_bytes); 1514 1515 // Copy trailing qwords 1516 __ BIND(L_copy_8_bytes); 1517 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1518 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1519 __ increment(qword_count); 1520 __ jcc(Assembler::notZero, L_copy_8_bytes); 1521 1522 // Check for and copy trailing dword 1523 __ BIND(L_copy_4_bytes); 1524 __ testl(byte_count, 4); 1525 __ jccb(Assembler::zero, L_copy_2_bytes); 1526 __ movl(rax, Address(end_from, 8)); 1527 __ movl(Address(end_to, 8), rax); 1528 1529 __ addptr(end_from, 4); 1530 __ addptr(end_to, 4); 1531 1532 // Check for and copy trailing word 1533 __ BIND(L_copy_2_bytes); 1534 __ testl(byte_count, 2); 1535 __ jccb(Assembler::zero, L_copy_byte); 1536 __ movw(rax, Address(end_from, 8)); 1537 __ movw(Address(end_to, 8), rax); 1538 1539 __ addptr(end_from, 2); 1540 __ addptr(end_to, 2); 1541 1542 // Check for and copy trailing byte 1543 __ BIND(L_copy_byte); 1544 __ testl(byte_count, 1); 1545 __ jccb(Assembler::zero, L_exit); 1546 __ movb(rax, Address(end_from, 8)); 1547 __ movb(Address(end_to, 8), rax); 1548 1549 __ BIND(L_exit); 1550 restore_arg_regs(); 1551 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1552 __ xorptr(rax, rax); // return 0 1553 __ leave(); // required for proper stackwalking of RuntimeStub frame 1554 __ ret(0); 1555 1556 // Copy in multi-bytes chunks 1557 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1558 __ jmp(L_copy_4_bytes); 1559 1560 return start; 1561 } 1562 1563 // Arguments: 1564 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1565 // ignored 1566 // name - stub name string 1567 // 1568 // Inputs: 1569 // c_rarg0 - source array address 1570 // c_rarg1 - destination array address 1571 // c_rarg2 - element count, treated as ssize_t, can be zero 1572 // 1573 // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries, 1574 // we let the hardware handle it. The one to eight bytes within words, 1575 // dwords or qwords that span cache line boundaries will still be loaded 1576 // and stored atomically. 1577 // 1578 address generate_conjoint_byte_copy(bool aligned, address nooverlap_target, 1579 address* entry, const char *name) { 1580 __ align(CodeEntryAlignment); 1581 StubCodeMark mark(this, "StubRoutines", name); 1582 address start = __ pc(); 1583 1584 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes; 1585 const Register from = rdi; // source array address 1586 const Register to = rsi; // destination array address 1587 const Register count = rdx; // elements count 1588 const Register byte_count = rcx; 1589 const Register qword_count = count; 1590 1591 __ enter(); // required for proper stackwalking of RuntimeStub frame 1592 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1593 1594 if (entry != NULL) { 1595 *entry = __ pc(); 1596 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1597 BLOCK_COMMENT("Entry:"); 1598 } 1599 1600 array_overlap_test(nooverlap_target, Address::times_1); 1601 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1602 // r9 and r10 may be used to save non-volatile registers 1603 1604 // 'from', 'to' and 'count' are now valid 1605 __ movptr(byte_count, count); 1606 __ shrptr(count, 3); // count => qword_count 1607 1608 // Copy from high to low addresses. 1609 1610 // Check for and copy trailing byte 1611 __ testl(byte_count, 1); 1612 __ jcc(Assembler::zero, L_copy_2_bytes); 1613 __ movb(rax, Address(from, byte_count, Address::times_1, -1)); 1614 __ movb(Address(to, byte_count, Address::times_1, -1), rax); 1615 __ decrement(byte_count); // Adjust for possible trailing word 1616 1617 // Check for and copy trailing word 1618 __ BIND(L_copy_2_bytes); 1619 __ testl(byte_count, 2); 1620 __ jcc(Assembler::zero, L_copy_4_bytes); 1621 __ movw(rax, Address(from, byte_count, Address::times_1, -2)); 1622 __ movw(Address(to, byte_count, Address::times_1, -2), rax); 1623 1624 // Check for and copy trailing dword 1625 __ BIND(L_copy_4_bytes); 1626 __ testl(byte_count, 4); 1627 __ jcc(Assembler::zero, L_copy_bytes); 1628 __ movl(rax, Address(from, qword_count, Address::times_8)); 1629 __ movl(Address(to, qword_count, Address::times_8), rax); 1630 __ jmp(L_copy_bytes); 1631 1632 // Copy trailing qwords 1633 __ BIND(L_copy_8_bytes); 1634 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 1635 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 1636 __ decrement(qword_count); 1637 __ jcc(Assembler::notZero, L_copy_8_bytes); 1638 1639 restore_arg_regs(); 1640 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1641 __ xorptr(rax, rax); // return 0 1642 __ leave(); // required for proper stackwalking of RuntimeStub frame 1643 __ ret(0); 1644 1645 // Copy in multi-bytes chunks 1646 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1647 1648 restore_arg_regs(); 1649 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1650 __ xorptr(rax, rax); // return 0 1651 __ leave(); // required for proper stackwalking of RuntimeStub frame 1652 __ ret(0); 1653 1654 return start; 1655 } 1656 1657 // Arguments: 1658 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1659 // ignored 1660 // name - stub name string 1661 // 1662 // Inputs: 1663 // c_rarg0 - source array address 1664 // c_rarg1 - destination array address 1665 // c_rarg2 - element count, treated as ssize_t, can be zero 1666 // 1667 // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we 1668 // let the hardware handle it. The two or four words within dwords 1669 // or qwords that span cache line boundaries will still be loaded 1670 // and stored atomically. 1671 // 1672 // Side Effects: 1673 // disjoint_short_copy_entry is set to the no-overlap entry point 1674 // used by generate_conjoint_short_copy(). 1675 // 1676 address generate_disjoint_short_copy(bool aligned, address *entry, const char *name) { 1677 __ align(CodeEntryAlignment); 1678 StubCodeMark mark(this, "StubRoutines", name); 1679 address start = __ pc(); 1680 1681 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes,L_copy_2_bytes,L_exit; 1682 const Register from = rdi; // source array address 1683 const Register to = rsi; // destination array address 1684 const Register count = rdx; // elements count 1685 const Register word_count = rcx; 1686 const Register qword_count = count; 1687 const Register end_from = from; // source array end address 1688 const Register end_to = to; // destination array end address 1689 // End pointers are inclusive, and if count is not zero they point 1690 // to the last unit copied: end_to[0] := end_from[0] 1691 1692 __ enter(); // required for proper stackwalking of RuntimeStub frame 1693 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1694 1695 if (entry != NULL) { 1696 *entry = __ pc(); 1697 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1698 BLOCK_COMMENT("Entry:"); 1699 } 1700 1701 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1702 // r9 and r10 may be used to save non-volatile registers 1703 1704 // 'from', 'to' and 'count' are now valid 1705 __ movptr(word_count, count); 1706 __ shrptr(count, 2); // count => qword_count 1707 1708 // Copy from low to high addresses. Use 'to' as scratch. 1709 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1710 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1711 __ negptr(qword_count); 1712 __ jmp(L_copy_bytes); 1713 1714 // Copy trailing qwords 1715 __ BIND(L_copy_8_bytes); 1716 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1717 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1718 __ increment(qword_count); 1719 __ jcc(Assembler::notZero, L_copy_8_bytes); 1720 1721 // Original 'dest' is trashed, so we can't use it as a 1722 // base register for a possible trailing word copy 1723 1724 // Check for and copy trailing dword 1725 __ BIND(L_copy_4_bytes); 1726 __ testl(word_count, 2); 1727 __ jccb(Assembler::zero, L_copy_2_bytes); 1728 __ movl(rax, Address(end_from, 8)); 1729 __ movl(Address(end_to, 8), rax); 1730 1731 __ addptr(end_from, 4); 1732 __ addptr(end_to, 4); 1733 1734 // Check for and copy trailing word 1735 __ BIND(L_copy_2_bytes); 1736 __ testl(word_count, 1); 1737 __ jccb(Assembler::zero, L_exit); 1738 __ movw(rax, Address(end_from, 8)); 1739 __ movw(Address(end_to, 8), rax); 1740 1741 __ BIND(L_exit); 1742 restore_arg_regs(); 1743 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1744 __ xorptr(rax, rax); // return 0 1745 __ leave(); // required for proper stackwalking of RuntimeStub frame 1746 __ ret(0); 1747 1748 // Copy in multi-bytes chunks 1749 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1750 __ jmp(L_copy_4_bytes); 1751 1752 return start; 1753 } 1754 1755 address generate_fill(BasicType t, bool aligned, const char *name) { 1756 __ align(CodeEntryAlignment); 1757 StubCodeMark mark(this, "StubRoutines", name); 1758 address start = __ pc(); 1759 1760 BLOCK_COMMENT("Entry:"); 1761 1762 const Register to = c_rarg0; // source array address 1763 const Register value = c_rarg1; // value 1764 const Register count = c_rarg2; // elements count 1765 1766 __ enter(); // required for proper stackwalking of RuntimeStub frame 1767 1768 __ generate_fill(t, aligned, to, value, count, rax, xmm0); 1769 1770 __ leave(); // required for proper stackwalking of RuntimeStub frame 1771 __ ret(0); 1772 return start; 1773 } 1774 1775 // Arguments: 1776 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1777 // ignored 1778 // name - stub name string 1779 // 1780 // Inputs: 1781 // c_rarg0 - source array address 1782 // c_rarg1 - destination array address 1783 // c_rarg2 - element count, treated as ssize_t, can be zero 1784 // 1785 // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we 1786 // let the hardware handle it. The two or four words within dwords 1787 // or qwords that span cache line boundaries will still be loaded 1788 // and stored atomically. 1789 // 1790 address generate_conjoint_short_copy(bool aligned, address nooverlap_target, 1791 address *entry, const char *name) { 1792 __ align(CodeEntryAlignment); 1793 StubCodeMark mark(this, "StubRoutines", name); 1794 address start = __ pc(); 1795 1796 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes; 1797 const Register from = rdi; // source array address 1798 const Register to = rsi; // destination array address 1799 const Register count = rdx; // elements count 1800 const Register word_count = rcx; 1801 const Register qword_count = count; 1802 1803 __ enter(); // required for proper stackwalking of RuntimeStub frame 1804 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1805 1806 if (entry != NULL) { 1807 *entry = __ pc(); 1808 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1809 BLOCK_COMMENT("Entry:"); 1810 } 1811 1812 array_overlap_test(nooverlap_target, Address::times_2); 1813 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1814 // r9 and r10 may be used to save non-volatile registers 1815 1816 // 'from', 'to' and 'count' are now valid 1817 __ movptr(word_count, count); 1818 __ shrptr(count, 2); // count => qword_count 1819 1820 // Copy from high to low addresses. Use 'to' as scratch. 1821 1822 // Check for and copy trailing word 1823 __ testl(word_count, 1); 1824 __ jccb(Assembler::zero, L_copy_4_bytes); 1825 __ movw(rax, Address(from, word_count, Address::times_2, -2)); 1826 __ movw(Address(to, word_count, Address::times_2, -2), rax); 1827 1828 // Check for and copy trailing dword 1829 __ BIND(L_copy_4_bytes); 1830 __ testl(word_count, 2); 1831 __ jcc(Assembler::zero, L_copy_bytes); 1832 __ movl(rax, Address(from, qword_count, Address::times_8)); 1833 __ movl(Address(to, qword_count, Address::times_8), rax); 1834 __ jmp(L_copy_bytes); 1835 1836 // Copy trailing qwords 1837 __ BIND(L_copy_8_bytes); 1838 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 1839 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 1840 __ decrement(qword_count); 1841 __ jcc(Assembler::notZero, L_copy_8_bytes); 1842 1843 restore_arg_regs(); 1844 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1845 __ xorptr(rax, rax); // return 0 1846 __ leave(); // required for proper stackwalking of RuntimeStub frame 1847 __ ret(0); 1848 1849 // Copy in multi-bytes chunks 1850 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1851 1852 restore_arg_regs(); 1853 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1854 __ xorptr(rax, rax); // return 0 1855 __ leave(); // required for proper stackwalking of RuntimeStub frame 1856 __ ret(0); 1857 1858 return start; 1859 } 1860 1861 // Arguments: 1862 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1863 // ignored 1864 // is_oop - true => oop array, so generate store check code 1865 // name - stub name string 1866 // 1867 // Inputs: 1868 // c_rarg0 - source array address 1869 // c_rarg1 - destination array address 1870 // c_rarg2 - element count, treated as ssize_t, can be zero 1871 // 1872 // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let 1873 // the hardware handle it. The two dwords within qwords that span 1874 // cache line boundaries will still be loaded and stored atomicly. 1875 // 1876 // Side Effects: 1877 // disjoint_int_copy_entry is set to the no-overlap entry point 1878 // used by generate_conjoint_int_oop_copy(). 1879 // 1880 address generate_disjoint_int_oop_copy(bool aligned, bool is_oop, address* entry, 1881 const char *name, bool dest_uninitialized = false) { 1882 __ align(CodeEntryAlignment); 1883 StubCodeMark mark(this, "StubRoutines", name); 1884 address start = __ pc(); 1885 1886 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_exit; 1887 const Register from = rdi; // source array address 1888 const Register to = rsi; // destination array address 1889 const Register count = rdx; // elements count 1890 const Register dword_count = rcx; 1891 const Register qword_count = count; 1892 const Register end_from = from; // source array end address 1893 const Register end_to = to; // destination array end address 1894 const Register saved_to = r11; // saved destination array address 1895 // End pointers are inclusive, and if count is not zero they point 1896 // to the last unit copied: end_to[0] := end_from[0] 1897 1898 __ enter(); // required for proper stackwalking of RuntimeStub frame 1899 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1900 1901 if (entry != NULL) { 1902 *entry = __ pc(); 1903 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1904 BLOCK_COMMENT("Entry:"); 1905 } 1906 1907 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1908 // r9 and r10 may be used to save non-volatile registers 1909 if (is_oop) { 1910 __ movq(saved_to, to); 1911 gen_write_ref_array_pre_barrier(to, count, dest_uninitialized); 1912 } 1913 1914 // 'from', 'to' and 'count' are now valid 1915 __ movptr(dword_count, count); 1916 __ shrptr(count, 1); // count => qword_count 1917 1918 // Copy from low to high addresses. Use 'to' as scratch. 1919 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1920 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1921 __ negptr(qword_count); 1922 __ jmp(L_copy_bytes); 1923 1924 // Copy trailing qwords 1925 __ BIND(L_copy_8_bytes); 1926 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1927 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1928 __ increment(qword_count); 1929 __ jcc(Assembler::notZero, L_copy_8_bytes); 1930 1931 // Check for and copy trailing dword 1932 __ BIND(L_copy_4_bytes); 1933 __ testl(dword_count, 1); // Only byte test since the value is 0 or 1 1934 __ jccb(Assembler::zero, L_exit); 1935 __ movl(rax, Address(end_from, 8)); 1936 __ movl(Address(end_to, 8), rax); 1937 1938 __ BIND(L_exit); 1939 if (is_oop) { 1940 gen_write_ref_array_post_barrier(saved_to, dword_count, rax); 1941 } 1942 restore_arg_regs(); 1943 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 1944 __ xorptr(rax, rax); // return 0 1945 __ leave(); // required for proper stackwalking of RuntimeStub frame 1946 __ ret(0); 1947 1948 // Copy in multi-bytes chunks 1949 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1950 __ jmp(L_copy_4_bytes); 1951 1952 return start; 1953 } 1954 1955 // Arguments: 1956 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1957 // ignored 1958 // is_oop - true => oop array, so generate store check code 1959 // name - stub name string 1960 // 1961 // Inputs: 1962 // c_rarg0 - source array address 1963 // c_rarg1 - destination array address 1964 // c_rarg2 - element count, treated as ssize_t, can be zero 1965 // 1966 // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let 1967 // the hardware handle it. The two dwords within qwords that span 1968 // cache line boundaries will still be loaded and stored atomicly. 1969 // 1970 address generate_conjoint_int_oop_copy(bool aligned, bool is_oop, address nooverlap_target, 1971 address *entry, const char *name, 1972 bool dest_uninitialized = false) { 1973 __ align(CodeEntryAlignment); 1974 StubCodeMark mark(this, "StubRoutines", name); 1975 address start = __ pc(); 1976 1977 Label L_copy_bytes, L_copy_8_bytes, L_copy_2_bytes, L_exit; 1978 const Register from = rdi; // source array address 1979 const Register to = rsi; // destination array address 1980 const Register count = rdx; // elements count 1981 const Register dword_count = rcx; 1982 const Register qword_count = count; 1983 1984 __ enter(); // required for proper stackwalking of RuntimeStub frame 1985 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1986 1987 if (entry != NULL) { 1988 *entry = __ pc(); 1989 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1990 BLOCK_COMMENT("Entry:"); 1991 } 1992 1993 array_overlap_test(nooverlap_target, Address::times_4); 1994 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1995 // r9 and r10 may be used to save non-volatile registers 1996 1997 if (is_oop) { 1998 // no registers are destroyed by this call 1999 gen_write_ref_array_pre_barrier(to, count, dest_uninitialized); 2000 } 2001 2002 assert_clean_int(count, rax); // Make sure 'count' is clean int. 2003 // 'from', 'to' and 'count' are now valid 2004 __ movptr(dword_count, count); 2005 __ shrptr(count, 1); // count => qword_count 2006 2007 // Copy from high to low addresses. Use 'to' as scratch. 2008 2009 // Check for and copy trailing dword 2010 __ testl(dword_count, 1); 2011 __ jcc(Assembler::zero, L_copy_bytes); 2012 __ movl(rax, Address(from, dword_count, Address::times_4, -4)); 2013 __ movl(Address(to, dword_count, Address::times_4, -4), rax); 2014 __ jmp(L_copy_bytes); 2015 2016 // Copy trailing qwords 2017 __ BIND(L_copy_8_bytes); 2018 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 2019 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 2020 __ decrement(qword_count); 2021 __ jcc(Assembler::notZero, L_copy_8_bytes); 2022 2023 if (is_oop) { 2024 __ jmp(L_exit); 2025 } 2026 restore_arg_regs(); 2027 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 2028 __ xorptr(rax, rax); // return 0 2029 __ leave(); // required for proper stackwalking of RuntimeStub frame 2030 __ ret(0); 2031 2032 // Copy in multi-bytes chunks 2033 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2034 2035 __ BIND(L_exit); 2036 if (is_oop) { 2037 gen_write_ref_array_post_barrier(to, dword_count, rax); 2038 } 2039 restore_arg_regs(); 2040 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 2041 __ xorptr(rax, rax); // return 0 2042 __ leave(); // required for proper stackwalking of RuntimeStub frame 2043 __ ret(0); 2044 2045 return start; 2046 } 2047 2048 // Arguments: 2049 // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes 2050 // ignored 2051 // is_oop - true => oop array, so generate store check code 2052 // name - stub name string 2053 // 2054 // Inputs: 2055 // c_rarg0 - source array address 2056 // c_rarg1 - destination array address 2057 // c_rarg2 - element count, treated as ssize_t, can be zero 2058 // 2059 // Side Effects: 2060 // disjoint_oop_copy_entry or disjoint_long_copy_entry is set to the 2061 // no-overlap entry point used by generate_conjoint_long_oop_copy(). 2062 // 2063 address generate_disjoint_long_oop_copy(bool aligned, bool is_oop, address *entry, 2064 const char *name, bool dest_uninitialized = false) { 2065 __ align(CodeEntryAlignment); 2066 StubCodeMark mark(this, "StubRoutines", name); 2067 address start = __ pc(); 2068 2069 Label L_copy_bytes, L_copy_8_bytes, L_exit; 2070 const Register from = rdi; // source array address 2071 const Register to = rsi; // destination array address 2072 const Register qword_count = rdx; // elements count 2073 const Register end_from = from; // source array end address 2074 const Register end_to = rcx; // destination array end address 2075 const Register saved_to = to; 2076 const Register saved_count = r11; 2077 // End pointers are inclusive, and if count is not zero they point 2078 // to the last unit copied: end_to[0] := end_from[0] 2079 2080 __ enter(); // required for proper stackwalking of RuntimeStub frame 2081 // Save no-overlap entry point for generate_conjoint_long_oop_copy() 2082 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 2083 2084 if (entry != NULL) { 2085 *entry = __ pc(); 2086 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2087 BLOCK_COMMENT("Entry:"); 2088 } 2089 2090 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 2091 // r9 and r10 may be used to save non-volatile registers 2092 // 'from', 'to' and 'qword_count' are now valid 2093 if (is_oop) { 2094 // Save to and count for store barrier 2095 __ movptr(saved_count, qword_count); 2096 // no registers are destroyed by this call 2097 gen_write_ref_array_pre_barrier(to, qword_count, dest_uninitialized); 2098 } 2099 2100 // Copy from low to high addresses. Use 'to' as scratch. 2101 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 2102 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 2103 __ negptr(qword_count); 2104 __ jmp(L_copy_bytes); 2105 2106 // Copy trailing qwords 2107 __ BIND(L_copy_8_bytes); 2108 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 2109 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 2110 __ increment(qword_count); 2111 __ jcc(Assembler::notZero, L_copy_8_bytes); 2112 2113 if (is_oop) { 2114 __ jmp(L_exit); 2115 } else { 2116 restore_arg_regs(); 2117 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2118 __ xorptr(rax, rax); // return 0 2119 __ leave(); // required for proper stackwalking of RuntimeStub frame 2120 __ ret(0); 2121 } 2122 2123 // Copy in multi-bytes chunks 2124 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2125 2126 if (is_oop) { 2127 __ BIND(L_exit); 2128 gen_write_ref_array_post_barrier(saved_to, saved_count, rax); 2129 } 2130 restore_arg_regs(); 2131 if (is_oop) { 2132 inc_counter_np(SharedRuntime::_oop_array_copy_ctr); // Update counter after rscratch1 is free 2133 } else { 2134 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2135 } 2136 __ xorptr(rax, rax); // return 0 2137 __ leave(); // required for proper stackwalking of RuntimeStub frame 2138 __ ret(0); 2139 2140 return start; 2141 } 2142 2143 // Arguments: 2144 // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes 2145 // ignored 2146 // is_oop - true => oop array, so generate store check code 2147 // name - stub name string 2148 // 2149 // Inputs: 2150 // c_rarg0 - source array address 2151 // c_rarg1 - destination array address 2152 // c_rarg2 - element count, treated as ssize_t, can be zero 2153 // 2154 address generate_conjoint_long_oop_copy(bool aligned, bool is_oop, 2155 address nooverlap_target, address *entry, 2156 const char *name, bool dest_uninitialized = false) { 2157 __ align(CodeEntryAlignment); 2158 StubCodeMark mark(this, "StubRoutines", name); 2159 address start = __ pc(); 2160 2161 Label L_copy_bytes, L_copy_8_bytes, L_exit; 2162 const Register from = rdi; // source array address 2163 const Register to = rsi; // destination array address 2164 const Register qword_count = rdx; // elements count 2165 const Register saved_count = rcx; 2166 2167 __ enter(); // required for proper stackwalking of RuntimeStub frame 2168 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 2169 2170 if (entry != NULL) { 2171 *entry = __ pc(); 2172 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2173 BLOCK_COMMENT("Entry:"); 2174 } 2175 2176 array_overlap_test(nooverlap_target, Address::times_8); 2177 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 2178 // r9 and r10 may be used to save non-volatile registers 2179 // 'from', 'to' and 'qword_count' are now valid 2180 if (is_oop) { 2181 // Save to and count for store barrier 2182 __ movptr(saved_count, qword_count); 2183 // No registers are destroyed by this call 2184 gen_write_ref_array_pre_barrier(to, saved_count, dest_uninitialized); 2185 } 2186 2187 __ jmp(L_copy_bytes); 2188 2189 // Copy trailing qwords 2190 __ BIND(L_copy_8_bytes); 2191 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 2192 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 2193 __ decrement(qword_count); 2194 __ jcc(Assembler::notZero, L_copy_8_bytes); 2195 2196 if (is_oop) { 2197 __ jmp(L_exit); 2198 } else { 2199 restore_arg_regs(); 2200 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2201 __ xorptr(rax, rax); // return 0 2202 __ leave(); // required for proper stackwalking of RuntimeStub frame 2203 __ ret(0); 2204 } 2205 2206 // Copy in multi-bytes chunks 2207 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2208 2209 if (is_oop) { 2210 __ BIND(L_exit); 2211 gen_write_ref_array_post_barrier(to, saved_count, rax); 2212 } 2213 restore_arg_regs(); 2214 if (is_oop) { 2215 inc_counter_np(SharedRuntime::_oop_array_copy_ctr); // Update counter after rscratch1 is free 2216 } else { 2217 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2218 } 2219 __ xorptr(rax, rax); // return 0 2220 __ leave(); // required for proper stackwalking of RuntimeStub frame 2221 __ ret(0); 2222 2223 return start; 2224 } 2225 2226 2227 // Helper for generating a dynamic type check. 2228 // Smashes no registers. 2229 void generate_type_check(Register sub_klass, 2230 Register super_check_offset, 2231 Register super_klass, 2232 Label& L_success) { 2233 assert_different_registers(sub_klass, super_check_offset, super_klass); 2234 2235 BLOCK_COMMENT("type_check:"); 2236 2237 Label L_miss; 2238 2239 __ check_klass_subtype_fast_path(sub_klass, super_klass, noreg, &L_success, &L_miss, NULL, 2240 super_check_offset); 2241 __ check_klass_subtype_slow_path(sub_klass, super_klass, noreg, noreg, &L_success, NULL); 2242 2243 // Fall through on failure! 2244 __ BIND(L_miss); 2245 } 2246 2247 // 2248 // Generate checkcasting array copy stub 2249 // 2250 // Input: 2251 // c_rarg0 - source array address 2252 // c_rarg1 - destination array address 2253 // c_rarg2 - element count, treated as ssize_t, can be zero 2254 // c_rarg3 - size_t ckoff (super_check_offset) 2255 // not Win64 2256 // c_rarg4 - oop ckval (super_klass) 2257 // Win64 2258 // rsp+40 - oop ckval (super_klass) 2259 // 2260 // Output: 2261 // rax == 0 - success 2262 // rax == -1^K - failure, where K is partial transfer count 2263 // 2264 address generate_checkcast_copy(const char *name, address *entry, 2265 bool dest_uninitialized = false) { 2266 2267 Label L_load_element, L_store_element, L_do_card_marks, L_done; 2268 2269 // Input registers (after setup_arg_regs) 2270 const Register from = rdi; // source array address 2271 const Register to = rsi; // destination array address 2272 const Register length = rdx; // elements count 2273 const Register ckoff = rcx; // super_check_offset 2274 const Register ckval = r8; // super_klass 2275 2276 // Registers used as temps (r13, r14 are save-on-entry) 2277 const Register end_from = from; // source array end address 2278 const Register end_to = r13; // destination array end address 2279 const Register count = rdx; // -(count_remaining) 2280 const Register r14_length = r14; // saved copy of length 2281 // End pointers are inclusive, and if length is not zero they point 2282 // to the last unit copied: end_to[0] := end_from[0] 2283 2284 const Register rax_oop = rax; // actual oop copied 2285 const Register r11_klass = r11; // oop._klass 2286 2287 //--------------------------------------------------------------- 2288 // Assembler stub will be used for this call to arraycopy 2289 // if the two arrays are subtypes of Object[] but the 2290 // destination array type is not equal to or a supertype 2291 // of the source type. Each element must be separately 2292 // checked. 2293 2294 __ align(CodeEntryAlignment); 2295 StubCodeMark mark(this, "StubRoutines", name); 2296 address start = __ pc(); 2297 2298 __ enter(); // required for proper stackwalking of RuntimeStub frame 2299 2300 #ifdef ASSERT 2301 // caller guarantees that the arrays really are different 2302 // otherwise, we would have to make conjoint checks 2303 { Label L; 2304 array_overlap_test(L, TIMES_OOP); 2305 __ stop("checkcast_copy within a single array"); 2306 __ bind(L); 2307 } 2308 #endif //ASSERT 2309 2310 setup_arg_regs(4); // from => rdi, to => rsi, length => rdx 2311 // ckoff => rcx, ckval => r8 2312 // r9 and r10 may be used to save non-volatile registers 2313 #ifdef _WIN64 2314 // last argument (#4) is on stack on Win64 2315 __ movptr(ckval, Address(rsp, 6 * wordSize)); 2316 #endif 2317 2318 // Caller of this entry point must set up the argument registers. 2319 if (entry != NULL) { 2320 *entry = __ pc(); 2321 BLOCK_COMMENT("Entry:"); 2322 } 2323 2324 // allocate spill slots for r13, r14 2325 enum { 2326 saved_r13_offset, 2327 saved_r14_offset, 2328 saved_rbp_offset 2329 }; 2330 __ subptr(rsp, saved_rbp_offset * wordSize); 2331 __ movptr(Address(rsp, saved_r13_offset * wordSize), r13); 2332 __ movptr(Address(rsp, saved_r14_offset * wordSize), r14); 2333 2334 // check that int operands are properly extended to size_t 2335 assert_clean_int(length, rax); 2336 assert_clean_int(ckoff, rax); 2337 2338 #ifdef ASSERT 2339 BLOCK_COMMENT("assert consistent ckoff/ckval"); 2340 // The ckoff and ckval must be mutually consistent, 2341 // even though caller generates both. 2342 { Label L; 2343 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2344 __ cmpl(ckoff, Address(ckval, sco_offset)); 2345 __ jcc(Assembler::equal, L); 2346 __ stop("super_check_offset inconsistent"); 2347 __ bind(L); 2348 } 2349 #endif //ASSERT 2350 2351 // Loop-invariant addresses. They are exclusive end pointers. 2352 Address end_from_addr(from, length, TIMES_OOP, 0); 2353 Address end_to_addr(to, length, TIMES_OOP, 0); 2354 // Loop-variant addresses. They assume post-incremented count < 0. 2355 Address from_element_addr(end_from, count, TIMES_OOP, 0); 2356 Address to_element_addr(end_to, count, TIMES_OOP, 0); 2357 2358 gen_write_ref_array_pre_barrier(to, count, dest_uninitialized); 2359 2360 // Copy from low to high addresses, indexed from the end of each array. 2361 __ lea(end_from, end_from_addr); 2362 __ lea(end_to, end_to_addr); 2363 __ movptr(r14_length, length); // save a copy of the length 2364 assert(length == count, ""); // else fix next line: 2365 __ negptr(count); // negate and test the length 2366 __ jcc(Assembler::notZero, L_load_element); 2367 2368 // Empty array: Nothing to do. 2369 __ xorptr(rax, rax); // return 0 on (trivial) success 2370 __ jmp(L_done); 2371 2372 // ======== begin loop ======== 2373 // (Loop is rotated; its entry is L_load_element.) 2374 // Loop control: 2375 // for (count = -count; count != 0; count++) 2376 // Base pointers src, dst are biased by 8*(count-1),to last element. 2377 __ align(OptoLoopAlignment); 2378 2379 __ BIND(L_store_element); 2380 __ store_heap_oop(to_element_addr, rax_oop); // store the oop 2381 __ increment(count); // increment the count toward zero 2382 __ jcc(Assembler::zero, L_do_card_marks); 2383 2384 // ======== loop entry is here ======== 2385 __ BIND(L_load_element); 2386 __ load_heap_oop(rax_oop, from_element_addr); // load the oop 2387 __ testptr(rax_oop, rax_oop); 2388 __ jcc(Assembler::zero, L_store_element); 2389 2390 __ load_klass(r11_klass, rax_oop);// query the object klass 2391 generate_type_check(r11_klass, ckoff, ckval, L_store_element); 2392 // ======== end loop ======== 2393 2394 // It was a real error; we must depend on the caller to finish the job. 2395 // Register rdx = -1 * number of *remaining* oops, r14 = *total* oops. 2396 // Emit GC store barriers for the oops we have copied (r14 + rdx), 2397 // and report their number to the caller. 2398 assert_different_registers(rax, r14_length, count, to, end_to, rcx, rscratch1); 2399 Label L_post_barrier; 2400 __ addptr(r14_length, count); // K = (original - remaining) oops 2401 __ movptr(rax, r14_length); // save the value 2402 __ notptr(rax); // report (-1^K) to caller (does not affect flags) 2403 __ jccb(Assembler::notZero, L_post_barrier); 2404 __ jmp(L_done); // K == 0, nothing was copied, skip post barrier 2405 2406 // Come here on success only. 2407 __ BIND(L_do_card_marks); 2408 __ xorptr(rax, rax); // return 0 on success 2409 2410 __ BIND(L_post_barrier); 2411 gen_write_ref_array_post_barrier(to, r14_length, rscratch1); 2412 2413 // Common exit point (success or failure). 2414 __ BIND(L_done); 2415 __ movptr(r13, Address(rsp, saved_r13_offset * wordSize)); 2416 __ movptr(r14, Address(rsp, saved_r14_offset * wordSize)); 2417 restore_arg_regs(); 2418 inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr); // Update counter after rscratch1 is free 2419 __ leave(); // required for proper stackwalking of RuntimeStub frame 2420 __ ret(0); 2421 2422 return start; 2423 } 2424 2425 // 2426 // Generate 'unsafe' array copy stub 2427 // Though just as safe as the other stubs, it takes an unscaled 2428 // size_t argument instead of an element count. 2429 // 2430 // Input: 2431 // c_rarg0 - source array address 2432 // c_rarg1 - destination array address 2433 // c_rarg2 - byte count, treated as ssize_t, can be zero 2434 // 2435 // Examines the alignment of the operands and dispatches 2436 // to a long, int, short, or byte copy loop. 2437 // 2438 address generate_unsafe_copy(const char *name, 2439 address byte_copy_entry, address short_copy_entry, 2440 address int_copy_entry, address long_copy_entry) { 2441 2442 Label L_long_aligned, L_int_aligned, L_short_aligned; 2443 2444 // Input registers (before setup_arg_regs) 2445 const Register from = c_rarg0; // source array address 2446 const Register to = c_rarg1; // destination array address 2447 const Register size = c_rarg2; // byte count (size_t) 2448 2449 // Register used as a temp 2450 const Register bits = rax; // test copy of low bits 2451 2452 __ align(CodeEntryAlignment); 2453 StubCodeMark mark(this, "StubRoutines", name); 2454 address start = __ pc(); 2455 2456 __ enter(); // required for proper stackwalking of RuntimeStub frame 2457 2458 // bump this on entry, not on exit: 2459 inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr); 2460 2461 __ mov(bits, from); 2462 __ orptr(bits, to); 2463 __ orptr(bits, size); 2464 2465 __ testb(bits, BytesPerLong-1); 2466 __ jccb(Assembler::zero, L_long_aligned); 2467 2468 __ testb(bits, BytesPerInt-1); 2469 __ jccb(Assembler::zero, L_int_aligned); 2470 2471 __ testb(bits, BytesPerShort-1); 2472 __ jump_cc(Assembler::notZero, RuntimeAddress(byte_copy_entry)); 2473 2474 __ BIND(L_short_aligned); 2475 __ shrptr(size, LogBytesPerShort); // size => short_count 2476 __ jump(RuntimeAddress(short_copy_entry)); 2477 2478 __ BIND(L_int_aligned); 2479 __ shrptr(size, LogBytesPerInt); // size => int_count 2480 __ jump(RuntimeAddress(int_copy_entry)); 2481 2482 __ BIND(L_long_aligned); 2483 __ shrptr(size, LogBytesPerLong); // size => qword_count 2484 __ jump(RuntimeAddress(long_copy_entry)); 2485 2486 return start; 2487 } 2488 2489 // Perform range checks on the proposed arraycopy. 2490 // Kills temp, but nothing else. 2491 // Also, clean the sign bits of src_pos and dst_pos. 2492 void arraycopy_range_checks(Register src, // source array oop (c_rarg0) 2493 Register src_pos, // source position (c_rarg1) 2494 Register dst, // destination array oo (c_rarg2) 2495 Register dst_pos, // destination position (c_rarg3) 2496 Register length, 2497 Register temp, 2498 Label& L_failed) { 2499 BLOCK_COMMENT("arraycopy_range_checks:"); 2500 2501 // if (src_pos + length > arrayOop(src)->length()) FAIL; 2502 __ movl(temp, length); 2503 __ addl(temp, src_pos); // src_pos + length 2504 __ cmpl(temp, Address(src, arrayOopDesc::length_offset_in_bytes())); 2505 __ jcc(Assembler::above, L_failed); 2506 2507 // if (dst_pos + length > arrayOop(dst)->length()) FAIL; 2508 __ movl(temp, length); 2509 __ addl(temp, dst_pos); // dst_pos + length 2510 __ cmpl(temp, Address(dst, arrayOopDesc::length_offset_in_bytes())); 2511 __ jcc(Assembler::above, L_failed); 2512 2513 // Have to clean up high 32-bits of 'src_pos' and 'dst_pos'. 2514 // Move with sign extension can be used since they are positive. 2515 __ movslq(src_pos, src_pos); 2516 __ movslq(dst_pos, dst_pos); 2517 2518 BLOCK_COMMENT("arraycopy_range_checks done"); 2519 } 2520 2521 // 2522 // Generate generic array copy stubs 2523 // 2524 // Input: 2525 // c_rarg0 - src oop 2526 // c_rarg1 - src_pos (32-bits) 2527 // c_rarg2 - dst oop 2528 // c_rarg3 - dst_pos (32-bits) 2529 // not Win64 2530 // c_rarg4 - element count (32-bits) 2531 // Win64 2532 // rsp+40 - element count (32-bits) 2533 // 2534 // Output: 2535 // rax == 0 - success 2536 // rax == -1^K - failure, where K is partial transfer count 2537 // 2538 address generate_generic_copy(const char *name, 2539 address byte_copy_entry, address short_copy_entry, 2540 address int_copy_entry, address oop_copy_entry, 2541 address long_copy_entry, address checkcast_copy_entry) { 2542 2543 Label L_failed, L_failed_0, L_objArray; 2544 Label L_copy_bytes, L_copy_shorts, L_copy_ints, L_copy_longs; 2545 2546 // Input registers 2547 const Register src = c_rarg0; // source array oop 2548 const Register src_pos = c_rarg1; // source position 2549 const Register dst = c_rarg2; // destination array oop 2550 const Register dst_pos = c_rarg3; // destination position 2551 #ifndef _WIN64 2552 const Register length = c_rarg4; 2553 #else 2554 const Address length(rsp, 6 * wordSize); // elements count is on stack on Win64 2555 #endif 2556 2557 { int modulus = CodeEntryAlignment; 2558 int target = modulus - 5; // 5 = sizeof jmp(L_failed) 2559 int advance = target - (__ offset() % modulus); 2560 if (advance < 0) advance += modulus; 2561 if (advance > 0) __ nop(advance); 2562 } 2563 StubCodeMark mark(this, "StubRoutines", name); 2564 2565 // Short-hop target to L_failed. Makes for denser prologue code. 2566 __ BIND(L_failed_0); 2567 __ jmp(L_failed); 2568 assert(__ offset() % CodeEntryAlignment == 0, "no further alignment needed"); 2569 2570 __ align(CodeEntryAlignment); 2571 address start = __ pc(); 2572 2573 __ enter(); // required for proper stackwalking of RuntimeStub frame 2574 2575 // bump this on entry, not on exit: 2576 inc_counter_np(SharedRuntime::_generic_array_copy_ctr); 2577 2578 //----------------------------------------------------------------------- 2579 // Assembler stub will be used for this call to arraycopy 2580 // if the following conditions are met: 2581 // 2582 // (1) src and dst must not be null. 2583 // (2) src_pos must not be negative. 2584 // (3) dst_pos must not be negative. 2585 // (4) length must not be negative. 2586 // (5) src klass and dst klass should be the same and not NULL. 2587 // (6) src and dst should be arrays. 2588 // (7) src_pos + length must not exceed length of src. 2589 // (8) dst_pos + length must not exceed length of dst. 2590 // 2591 2592 // if (src == NULL) return -1; 2593 __ testptr(src, src); // src oop 2594 size_t j1off = __ offset(); 2595 __ jccb(Assembler::zero, L_failed_0); 2596 2597 // if (src_pos < 0) return -1; 2598 __ testl(src_pos, src_pos); // src_pos (32-bits) 2599 __ jccb(Assembler::negative, L_failed_0); 2600 2601 // if (dst == NULL) return -1; 2602 __ testptr(dst, dst); // dst oop 2603 __ jccb(Assembler::zero, L_failed_0); 2604 2605 // if (dst_pos < 0) return -1; 2606 __ testl(dst_pos, dst_pos); // dst_pos (32-bits) 2607 size_t j4off = __ offset(); 2608 __ jccb(Assembler::negative, L_failed_0); 2609 2610 // The first four tests are very dense code, 2611 // but not quite dense enough to put four 2612 // jumps in a 16-byte instruction fetch buffer. 2613 // That's good, because some branch predicters 2614 // do not like jumps so close together. 2615 // Make sure of this. 2616 guarantee(((j1off ^ j4off) & ~15) != 0, "I$ line of 1st & 4th jumps"); 2617 2618 // registers used as temp 2619 const Register r11_length = r11; // elements count to copy 2620 const Register r10_src_klass = r10; // array klass 2621 2622 // if (length < 0) return -1; 2623 __ movl(r11_length, length); // length (elements count, 32-bits value) 2624 __ testl(r11_length, r11_length); 2625 __ jccb(Assembler::negative, L_failed_0); 2626 2627 __ load_klass(r10_src_klass, src); 2628 #ifdef ASSERT 2629 // assert(src->klass() != NULL); 2630 { 2631 BLOCK_COMMENT("assert klasses not null {"); 2632 Label L1, L2; 2633 __ testptr(r10_src_klass, r10_src_klass); 2634 __ jcc(Assembler::notZero, L2); // it is broken if klass is NULL 2635 __ bind(L1); 2636 __ stop("broken null klass"); 2637 __ bind(L2); 2638 __ load_klass(rax, dst); 2639 __ cmpq(rax, 0); 2640 __ jcc(Assembler::equal, L1); // this would be broken also 2641 BLOCK_COMMENT("} assert klasses not null done"); 2642 } 2643 #endif 2644 2645 // Load layout helper (32-bits) 2646 // 2647 // |array_tag| | header_size | element_type | |log2_element_size| 2648 // 32 30 24 16 8 2 0 2649 // 2650 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0 2651 // 2652 2653 const int lh_offset = in_bytes(Klass::layout_helper_offset()); 2654 2655 // Handle objArrays completely differently... 2656 const jint objArray_lh = Klass::array_layout_helper(T_OBJECT); 2657 __ cmpl(Address(r10_src_klass, lh_offset), objArray_lh); 2658 __ jcc(Assembler::equal, L_objArray); 2659 2660 // if (src->klass() != dst->klass()) return -1; 2661 __ load_klass(rax, dst); 2662 __ cmpq(r10_src_klass, rax); 2663 __ jcc(Assembler::notEqual, L_failed); 2664 2665 const Register rax_lh = rax; // layout helper 2666 __ movl(rax_lh, Address(r10_src_klass, lh_offset)); 2667 2668 // if (!src->is_Array()) return -1; 2669 __ cmpl(rax_lh, Klass::_lh_neutral_value); 2670 __ jcc(Assembler::greaterEqual, L_failed); 2671 2672 // At this point, it is known to be a typeArray (array_tag 0x3). 2673 #ifdef ASSERT 2674 { 2675 BLOCK_COMMENT("assert primitive array {"); 2676 Label L; 2677 __ cmpl(rax_lh, (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift)); 2678 __ jcc(Assembler::greaterEqual, L); 2679 __ stop("must be a primitive array"); 2680 __ bind(L); 2681 BLOCK_COMMENT("} assert primitive array done"); 2682 } 2683 #endif 2684 2685 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2686 r10, L_failed); 2687 2688 // TypeArrayKlass 2689 // 2690 // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize); 2691 // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize); 2692 // 2693 2694 const Register r10_offset = r10; // array offset 2695 const Register rax_elsize = rax_lh; // element size 2696 2697 __ movl(r10_offset, rax_lh); 2698 __ shrl(r10_offset, Klass::_lh_header_size_shift); 2699 __ andptr(r10_offset, Klass::_lh_header_size_mask); // array_offset 2700 __ addptr(src, r10_offset); // src array offset 2701 __ addptr(dst, r10_offset); // dst array offset 2702 BLOCK_COMMENT("choose copy loop based on element size"); 2703 __ andl(rax_lh, Klass::_lh_log2_element_size_mask); // rax_lh -> rax_elsize 2704 2705 // next registers should be set before the jump to corresponding stub 2706 const Register from = c_rarg0; // source array address 2707 const Register to = c_rarg1; // destination array address 2708 const Register count = c_rarg2; // elements count 2709 2710 // 'from', 'to', 'count' registers should be set in such order 2711 // since they are the same as 'src', 'src_pos', 'dst'. 2712 2713 __ BIND(L_copy_bytes); 2714 __ cmpl(rax_elsize, 0); 2715 __ jccb(Assembler::notEqual, L_copy_shorts); 2716 __ lea(from, Address(src, src_pos, Address::times_1, 0));// src_addr 2717 __ lea(to, Address(dst, dst_pos, Address::times_1, 0));// dst_addr 2718 __ movl2ptr(count, r11_length); // length 2719 __ jump(RuntimeAddress(byte_copy_entry)); 2720 2721 __ BIND(L_copy_shorts); 2722 __ cmpl(rax_elsize, LogBytesPerShort); 2723 __ jccb(Assembler::notEqual, L_copy_ints); 2724 __ lea(from, Address(src, src_pos, Address::times_2, 0));// src_addr 2725 __ lea(to, Address(dst, dst_pos, Address::times_2, 0));// dst_addr 2726 __ movl2ptr(count, r11_length); // length 2727 __ jump(RuntimeAddress(short_copy_entry)); 2728 2729 __ BIND(L_copy_ints); 2730 __ cmpl(rax_elsize, LogBytesPerInt); 2731 __ jccb(Assembler::notEqual, L_copy_longs); 2732 __ lea(from, Address(src, src_pos, Address::times_4, 0));// src_addr 2733 __ lea(to, Address(dst, dst_pos, Address::times_4, 0));// dst_addr 2734 __ movl2ptr(count, r11_length); // length 2735 __ jump(RuntimeAddress(int_copy_entry)); 2736 2737 __ BIND(L_copy_longs); 2738 #ifdef ASSERT 2739 { 2740 BLOCK_COMMENT("assert long copy {"); 2741 Label L; 2742 __ cmpl(rax_elsize, LogBytesPerLong); 2743 __ jcc(Assembler::equal, L); 2744 __ stop("must be long copy, but elsize is wrong"); 2745 __ bind(L); 2746 BLOCK_COMMENT("} assert long copy done"); 2747 } 2748 #endif 2749 __ lea(from, Address(src, src_pos, Address::times_8, 0));// src_addr 2750 __ lea(to, Address(dst, dst_pos, Address::times_8, 0));// dst_addr 2751 __ movl2ptr(count, r11_length); // length 2752 __ jump(RuntimeAddress(long_copy_entry)); 2753 2754 // ObjArrayKlass 2755 __ BIND(L_objArray); 2756 // live at this point: r10_src_klass, r11_length, src[_pos], dst[_pos] 2757 2758 Label L_plain_copy, L_checkcast_copy; 2759 // test array classes for subtyping 2760 __ load_klass(rax, dst); 2761 __ cmpq(r10_src_klass, rax); // usual case is exact equality 2762 __ jcc(Assembler::notEqual, L_checkcast_copy); 2763 2764 // Identically typed arrays can be copied without element-wise checks. 2765 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2766 r10, L_failed); 2767 2768 __ lea(from, Address(src, src_pos, TIMES_OOP, 2769 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // src_addr 2770 __ lea(to, Address(dst, dst_pos, TIMES_OOP, 2771 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // dst_addr 2772 __ movl2ptr(count, r11_length); // length 2773 __ BIND(L_plain_copy); 2774 __ jump(RuntimeAddress(oop_copy_entry)); 2775 2776 __ BIND(L_checkcast_copy); 2777 // live at this point: r10_src_klass, r11_length, rax (dst_klass) 2778 { 2779 // Before looking at dst.length, make sure dst is also an objArray. 2780 __ cmpl(Address(rax, lh_offset), objArray_lh); 2781 __ jcc(Assembler::notEqual, L_failed); 2782 2783 // It is safe to examine both src.length and dst.length. 2784 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2785 rax, L_failed); 2786 2787 const Register r11_dst_klass = r11; 2788 __ load_klass(r11_dst_klass, dst); // reload 2789 2790 // Marshal the base address arguments now, freeing registers. 2791 __ lea(from, Address(src, src_pos, TIMES_OOP, 2792 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 2793 __ lea(to, Address(dst, dst_pos, TIMES_OOP, 2794 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 2795 __ movl(count, length); // length (reloaded) 2796 Register sco_temp = c_rarg3; // this register is free now 2797 assert_different_registers(from, to, count, sco_temp, 2798 r11_dst_klass, r10_src_klass); 2799 assert_clean_int(count, sco_temp); 2800 2801 // Generate the type check. 2802 const int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2803 __ movl(sco_temp, Address(r11_dst_klass, sco_offset)); 2804 assert_clean_int(sco_temp, rax); 2805 generate_type_check(r10_src_klass, sco_temp, r11_dst_klass, L_plain_copy); 2806 2807 // Fetch destination element klass from the ObjArrayKlass header. 2808 int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset()); 2809 __ movptr(r11_dst_klass, Address(r11_dst_klass, ek_offset)); 2810 __ movl( sco_temp, Address(r11_dst_klass, sco_offset)); 2811 assert_clean_int(sco_temp, rax); 2812 2813 // the checkcast_copy loop needs two extra arguments: 2814 assert(c_rarg3 == sco_temp, "#3 already in place"); 2815 // Set up arguments for checkcast_copy_entry. 2816 setup_arg_regs(4); 2817 __ movptr(r8, r11_dst_klass); // dst.klass.element_klass, r8 is c_rarg4 on Linux/Solaris 2818 __ jump(RuntimeAddress(checkcast_copy_entry)); 2819 } 2820 2821 __ BIND(L_failed); 2822 __ xorptr(rax, rax); 2823 __ notptr(rax); // return -1 2824 __ leave(); // required for proper stackwalking of RuntimeStub frame 2825 __ ret(0); 2826 2827 return start; 2828 } 2829 2830 void generate_arraycopy_stubs() { 2831 address entry; 2832 address entry_jbyte_arraycopy; 2833 address entry_jshort_arraycopy; 2834 address entry_jint_arraycopy; 2835 address entry_oop_arraycopy; 2836 address entry_jlong_arraycopy; 2837 address entry_checkcast_arraycopy; 2838 2839 StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, &entry, 2840 "jbyte_disjoint_arraycopy"); 2841 StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, entry, &entry_jbyte_arraycopy, 2842 "jbyte_arraycopy"); 2843 2844 StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, &entry, 2845 "jshort_disjoint_arraycopy"); 2846 StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, entry, &entry_jshort_arraycopy, 2847 "jshort_arraycopy"); 2848 2849 StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, false, &entry, 2850 "jint_disjoint_arraycopy"); 2851 StubRoutines::_jint_arraycopy = generate_conjoint_int_oop_copy(false, false, entry, 2852 &entry_jint_arraycopy, "jint_arraycopy"); 2853 2854 StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, false, &entry, 2855 "jlong_disjoint_arraycopy"); 2856 StubRoutines::_jlong_arraycopy = generate_conjoint_long_oop_copy(false, false, entry, 2857 &entry_jlong_arraycopy, "jlong_arraycopy"); 2858 2859 2860 if (UseCompressedOops) { 2861 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, true, &entry, 2862 "oop_disjoint_arraycopy"); 2863 StubRoutines::_oop_arraycopy = generate_conjoint_int_oop_copy(false, true, entry, 2864 &entry_oop_arraycopy, "oop_arraycopy"); 2865 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_int_oop_copy(false, true, &entry, 2866 "oop_disjoint_arraycopy_uninit", 2867 /*dest_uninitialized*/true); 2868 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_int_oop_copy(false, true, entry, 2869 NULL, "oop_arraycopy_uninit", 2870 /*dest_uninitialized*/true); 2871 } else { 2872 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, true, &entry, 2873 "oop_disjoint_arraycopy"); 2874 StubRoutines::_oop_arraycopy = generate_conjoint_long_oop_copy(false, true, entry, 2875 &entry_oop_arraycopy, "oop_arraycopy"); 2876 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_long_oop_copy(false, true, &entry, 2877 "oop_disjoint_arraycopy_uninit", 2878 /*dest_uninitialized*/true); 2879 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_long_oop_copy(false, true, entry, 2880 NULL, "oop_arraycopy_uninit", 2881 /*dest_uninitialized*/true); 2882 } 2883 2884 StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy); 2885 StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", NULL, 2886 /*dest_uninitialized*/true); 2887 2888 StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy", 2889 entry_jbyte_arraycopy, 2890 entry_jshort_arraycopy, 2891 entry_jint_arraycopy, 2892 entry_jlong_arraycopy); 2893 StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy", 2894 entry_jbyte_arraycopy, 2895 entry_jshort_arraycopy, 2896 entry_jint_arraycopy, 2897 entry_oop_arraycopy, 2898 entry_jlong_arraycopy, 2899 entry_checkcast_arraycopy); 2900 2901 StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill"); 2902 StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill"); 2903 StubRoutines::_jint_fill = generate_fill(T_INT, false, "jint_fill"); 2904 StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill"); 2905 StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill"); 2906 StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill"); 2907 2908 // We don't generate specialized code for HeapWord-aligned source 2909 // arrays, so just use the code we've already generated 2910 StubRoutines::_arrayof_jbyte_disjoint_arraycopy = StubRoutines::_jbyte_disjoint_arraycopy; 2911 StubRoutines::_arrayof_jbyte_arraycopy = StubRoutines::_jbyte_arraycopy; 2912 2913 StubRoutines::_arrayof_jshort_disjoint_arraycopy = StubRoutines::_jshort_disjoint_arraycopy; 2914 StubRoutines::_arrayof_jshort_arraycopy = StubRoutines::_jshort_arraycopy; 2915 2916 StubRoutines::_arrayof_jint_disjoint_arraycopy = StubRoutines::_jint_disjoint_arraycopy; 2917 StubRoutines::_arrayof_jint_arraycopy = StubRoutines::_jint_arraycopy; 2918 2919 StubRoutines::_arrayof_jlong_disjoint_arraycopy = StubRoutines::_jlong_disjoint_arraycopy; 2920 StubRoutines::_arrayof_jlong_arraycopy = StubRoutines::_jlong_arraycopy; 2921 2922 StubRoutines::_arrayof_oop_disjoint_arraycopy = StubRoutines::_oop_disjoint_arraycopy; 2923 StubRoutines::_arrayof_oop_arraycopy = StubRoutines::_oop_arraycopy; 2924 2925 StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = StubRoutines::_oop_disjoint_arraycopy_uninit; 2926 StubRoutines::_arrayof_oop_arraycopy_uninit = StubRoutines::_oop_arraycopy_uninit; 2927 } 2928 2929 void generate_math_stubs() { 2930 { 2931 StubCodeMark mark(this, "StubRoutines", "log"); 2932 StubRoutines::_intrinsic_log = (double (*)(double)) __ pc(); 2933 2934 __ subq(rsp, 8); 2935 __ movdbl(Address(rsp, 0), xmm0); 2936 __ fld_d(Address(rsp, 0)); 2937 __ flog(); 2938 __ fstp_d(Address(rsp, 0)); 2939 __ movdbl(xmm0, Address(rsp, 0)); 2940 __ addq(rsp, 8); 2941 __ ret(0); 2942 } 2943 { 2944 StubCodeMark mark(this, "StubRoutines", "log10"); 2945 StubRoutines::_intrinsic_log10 = (double (*)(double)) __ pc(); 2946 2947 __ subq(rsp, 8); 2948 __ movdbl(Address(rsp, 0), xmm0); 2949 __ fld_d(Address(rsp, 0)); 2950 __ flog10(); 2951 __ fstp_d(Address(rsp, 0)); 2952 __ movdbl(xmm0, Address(rsp, 0)); 2953 __ addq(rsp, 8); 2954 __ ret(0); 2955 } 2956 { 2957 StubCodeMark mark(this, "StubRoutines", "sin"); 2958 StubRoutines::_intrinsic_sin = (double (*)(double)) __ pc(); 2959 2960 __ subq(rsp, 8); 2961 __ movdbl(Address(rsp, 0), xmm0); 2962 __ fld_d(Address(rsp, 0)); 2963 __ trigfunc('s'); 2964 __ fstp_d(Address(rsp, 0)); 2965 __ movdbl(xmm0, Address(rsp, 0)); 2966 __ addq(rsp, 8); 2967 __ ret(0); 2968 } 2969 { 2970 StubCodeMark mark(this, "StubRoutines", "cos"); 2971 StubRoutines::_intrinsic_cos = (double (*)(double)) __ pc(); 2972 2973 __ subq(rsp, 8); 2974 __ movdbl(Address(rsp, 0), xmm0); 2975 __ fld_d(Address(rsp, 0)); 2976 __ trigfunc('c'); 2977 __ fstp_d(Address(rsp, 0)); 2978 __ movdbl(xmm0, Address(rsp, 0)); 2979 __ addq(rsp, 8); 2980 __ ret(0); 2981 } 2982 { 2983 StubCodeMark mark(this, "StubRoutines", "tan"); 2984 StubRoutines::_intrinsic_tan = (double (*)(double)) __ pc(); 2985 2986 __ subq(rsp, 8); 2987 __ movdbl(Address(rsp, 0), xmm0); 2988 __ fld_d(Address(rsp, 0)); 2989 __ trigfunc('t'); 2990 __ fstp_d(Address(rsp, 0)); 2991 __ movdbl(xmm0, Address(rsp, 0)); 2992 __ addq(rsp, 8); 2993 __ ret(0); 2994 } 2995 { 2996 StubCodeMark mark(this, "StubRoutines", "exp"); 2997 StubRoutines::_intrinsic_exp = (double (*)(double)) __ pc(); 2998 2999 __ subq(rsp, 8); 3000 __ movdbl(Address(rsp, 0), xmm0); 3001 __ fld_d(Address(rsp, 0)); 3002 __ exp_with_fallback(0); 3003 __ fstp_d(Address(rsp, 0)); 3004 __ movdbl(xmm0, Address(rsp, 0)); 3005 __ addq(rsp, 8); 3006 __ ret(0); 3007 } 3008 { 3009 StubCodeMark mark(this, "StubRoutines", "pow"); 3010 StubRoutines::_intrinsic_pow = (double (*)(double,double)) __ pc(); 3011 3012 __ subq(rsp, 8); 3013 __ movdbl(Address(rsp, 0), xmm1); 3014 __ fld_d(Address(rsp, 0)); 3015 __ movdbl(Address(rsp, 0), xmm0); 3016 __ fld_d(Address(rsp, 0)); 3017 __ pow_with_fallback(0); 3018 __ fstp_d(Address(rsp, 0)); 3019 __ movdbl(xmm0, Address(rsp, 0)); 3020 __ addq(rsp, 8); 3021 __ ret(0); 3022 } 3023 } 3024 3025 // AES intrinsic stubs 3026 enum {AESBlockSize = 16}; 3027 3028 address generate_key_shuffle_mask() { 3029 __ align(16); 3030 StubCodeMark mark(this, "StubRoutines", "key_shuffle_mask"); 3031 address start = __ pc(); 3032 __ emit_data64( 0x0405060700010203, relocInfo::none ); 3033 __ emit_data64( 0x0c0d0e0f08090a0b, relocInfo::none ); 3034 return start; 3035 } 3036 3037 // Utility routine for loading a 128-bit key word in little endian format 3038 // can optionally specify that the shuffle mask is already in an xmmregister 3039 void load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask=NULL) { 3040 __ movdqu(xmmdst, Address(key, offset)); 3041 if (xmm_shuf_mask != NULL) { 3042 __ pshufb(xmmdst, xmm_shuf_mask); 3043 } else { 3044 __ pshufb(xmmdst, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3045 } 3046 } 3047 3048 // Arguments: 3049 // 3050 // Inputs: 3051 // c_rarg0 - source byte array address 3052 // c_rarg1 - destination byte array address 3053 // c_rarg2 - K (key) in little endian int array 3054 // 3055 address generate_aescrypt_encryptBlock() { 3056 assert(UseAES, "need AES instructions and misaligned SSE support"); 3057 __ align(CodeEntryAlignment); 3058 StubCodeMark mark(this, "StubRoutines", "aescrypt_encryptBlock"); 3059 Label L_doLast; 3060 address start = __ pc(); 3061 3062 const Register from = c_rarg0; // source array address 3063 const Register to = c_rarg1; // destination array address 3064 const Register key = c_rarg2; // key array address 3065 const Register keylen = rax; 3066 3067 const XMMRegister xmm_result = xmm0; 3068 const XMMRegister xmm_key_shuf_mask = xmm1; 3069 // On win64 xmm6-xmm15 must be preserved so don't use them. 3070 const XMMRegister xmm_temp1 = xmm2; 3071 const XMMRegister xmm_temp2 = xmm3; 3072 const XMMRegister xmm_temp3 = xmm4; 3073 const XMMRegister xmm_temp4 = xmm5; 3074 3075 __ enter(); // required for proper stackwalking of RuntimeStub frame 3076 3077 // keylen could be only {11, 13, 15} * 4 = {44, 52, 60} 3078 __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3079 3080 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3081 __ movdqu(xmm_result, Address(from, 0)); // get 16 bytes of input 3082 3083 // For encryption, the java expanded key ordering is just what we need 3084 // we don't know if the key is aligned, hence not using load-execute form 3085 3086 load_key(xmm_temp1, key, 0x00, xmm_key_shuf_mask); 3087 __ pxor(xmm_result, xmm_temp1); 3088 3089 load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask); 3090 load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask); 3091 load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask); 3092 load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask); 3093 3094 __ aesenc(xmm_result, xmm_temp1); 3095 __ aesenc(xmm_result, xmm_temp2); 3096 __ aesenc(xmm_result, xmm_temp3); 3097 __ aesenc(xmm_result, xmm_temp4); 3098 3099 load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask); 3100 load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask); 3101 load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask); 3102 load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask); 3103 3104 __ aesenc(xmm_result, xmm_temp1); 3105 __ aesenc(xmm_result, xmm_temp2); 3106 __ aesenc(xmm_result, xmm_temp3); 3107 __ aesenc(xmm_result, xmm_temp4); 3108 3109 load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask); 3110 load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask); 3111 3112 __ cmpl(keylen, 44); 3113 __ jccb(Assembler::equal, L_doLast); 3114 3115 __ aesenc(xmm_result, xmm_temp1); 3116 __ aesenc(xmm_result, xmm_temp2); 3117 3118 load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask); 3119 load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask); 3120 3121 __ cmpl(keylen, 52); 3122 __ jccb(Assembler::equal, L_doLast); 3123 3124 __ aesenc(xmm_result, xmm_temp1); 3125 __ aesenc(xmm_result, xmm_temp2); 3126 3127 load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask); 3128 load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask); 3129 3130 __ BIND(L_doLast); 3131 __ aesenc(xmm_result, xmm_temp1); 3132 __ aesenclast(xmm_result, xmm_temp2); 3133 __ movdqu(Address(to, 0), xmm_result); // store the result 3134 __ xorptr(rax, rax); // return 0 3135 __ leave(); // required for proper stackwalking of RuntimeStub frame 3136 __ ret(0); 3137 3138 return start; 3139 } 3140 3141 3142 // Arguments: 3143 // 3144 // Inputs: 3145 // c_rarg0 - source byte array address 3146 // c_rarg1 - destination byte array address 3147 // c_rarg2 - K (key) in little endian int array 3148 // 3149 address generate_aescrypt_decryptBlock() { 3150 assert(UseAES, "need AES instructions and misaligned SSE support"); 3151 __ align(CodeEntryAlignment); 3152 StubCodeMark mark(this, "StubRoutines", "aescrypt_decryptBlock"); 3153 Label L_doLast; 3154 address start = __ pc(); 3155 3156 const Register from = c_rarg0; // source array address 3157 const Register to = c_rarg1; // destination array address 3158 const Register key = c_rarg2; // key array address 3159 const Register keylen = rax; 3160 3161 const XMMRegister xmm_result = xmm0; 3162 const XMMRegister xmm_key_shuf_mask = xmm1; 3163 // On win64 xmm6-xmm15 must be preserved so don't use them. 3164 const XMMRegister xmm_temp1 = xmm2; 3165 const XMMRegister xmm_temp2 = xmm3; 3166 const XMMRegister xmm_temp3 = xmm4; 3167 const XMMRegister xmm_temp4 = xmm5; 3168 3169 __ enter(); // required for proper stackwalking of RuntimeStub frame 3170 3171 // keylen could be only {11, 13, 15} * 4 = {44, 52, 60} 3172 __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3173 3174 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3175 __ movdqu(xmm_result, Address(from, 0)); 3176 3177 // for decryption java expanded key ordering is rotated one position from what we want 3178 // so we start from 0x10 here and hit 0x00 last 3179 // we don't know if the key is aligned, hence not using load-execute form 3180 load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask); 3181 load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask); 3182 load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask); 3183 load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask); 3184 3185 __ pxor (xmm_result, xmm_temp1); 3186 __ aesdec(xmm_result, xmm_temp2); 3187 __ aesdec(xmm_result, xmm_temp3); 3188 __ aesdec(xmm_result, xmm_temp4); 3189 3190 load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask); 3191 load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask); 3192 load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask); 3193 load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask); 3194 3195 __ aesdec(xmm_result, xmm_temp1); 3196 __ aesdec(xmm_result, xmm_temp2); 3197 __ aesdec(xmm_result, xmm_temp3); 3198 __ aesdec(xmm_result, xmm_temp4); 3199 3200 load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask); 3201 load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask); 3202 load_key(xmm_temp3, key, 0x00, xmm_key_shuf_mask); 3203 3204 __ cmpl(keylen, 44); 3205 __ jccb(Assembler::equal, L_doLast); 3206 3207 __ aesdec(xmm_result, xmm_temp1); 3208 __ aesdec(xmm_result, xmm_temp2); 3209 3210 load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask); 3211 load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask); 3212 3213 __ cmpl(keylen, 52); 3214 __ jccb(Assembler::equal, L_doLast); 3215 3216 __ aesdec(xmm_result, xmm_temp1); 3217 __ aesdec(xmm_result, xmm_temp2); 3218 3219 load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask); 3220 load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask); 3221 3222 __ BIND(L_doLast); 3223 __ aesdec(xmm_result, xmm_temp1); 3224 __ aesdec(xmm_result, xmm_temp2); 3225 3226 // for decryption the aesdeclast operation is always on key+0x00 3227 __ aesdeclast(xmm_result, xmm_temp3); 3228 __ movdqu(Address(to, 0), xmm_result); // store the result 3229 __ xorptr(rax, rax); // return 0 3230 __ leave(); // required for proper stackwalking of RuntimeStub frame 3231 __ ret(0); 3232 3233 return start; 3234 } 3235 3236 3237 // Arguments: 3238 // 3239 // Inputs: 3240 // c_rarg0 - source byte array address 3241 // c_rarg1 - destination byte array address 3242 // c_rarg2 - K (key) in little endian int array 3243 // c_rarg3 - r vector byte array address 3244 // c_rarg4 - input length 3245 // 3246 // Output: 3247 // rax - input length 3248 // 3249 address generate_cipherBlockChaining_encryptAESCrypt() { 3250 assert(UseAES, "need AES instructions and misaligned SSE support"); 3251 __ align(CodeEntryAlignment); 3252 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_encryptAESCrypt"); 3253 address start = __ pc(); 3254 3255 Label L_exit, L_key_192_256, L_key_256, L_loopTop_128, L_loopTop_192, L_loopTop_256; 3256 const Register from = c_rarg0; // source array address 3257 const Register to = c_rarg1; // destination array address 3258 const Register key = c_rarg2; // key array address 3259 const Register rvec = c_rarg3; // r byte array initialized from initvector array address 3260 // and left with the results of the last encryption block 3261 #ifndef _WIN64 3262 const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16) 3263 #else 3264 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 3265 const Register len_reg = r10; // pick the first volatile windows register 3266 #endif 3267 const Register pos = rax; 3268 3269 // xmm register assignments for the loops below 3270 const XMMRegister xmm_result = xmm0; 3271 const XMMRegister xmm_temp = xmm1; 3272 // keys 0-10 preloaded into xmm2-xmm12 3273 const int XMM_REG_NUM_KEY_FIRST = 2; 3274 const int XMM_REG_NUM_KEY_LAST = 15; 3275 const XMMRegister xmm_key0 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST); 3276 const XMMRegister xmm_key10 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+10); 3277 const XMMRegister xmm_key11 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+11); 3278 const XMMRegister xmm_key12 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+12); 3279 const XMMRegister xmm_key13 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+13); 3280 3281 __ enter(); // required for proper stackwalking of RuntimeStub frame 3282 3283 #ifdef _WIN64 3284 // on win64, fill len_reg from stack position 3285 __ movl(len_reg, len_mem); 3286 // save the xmm registers which must be preserved 6-15 3287 __ subptr(rsp, -rsp_after_call_off * wordSize); 3288 for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) { 3289 __ movdqu(xmm_save(i), as_XMMRegister(i)); 3290 } 3291 #else 3292 __ push(len_reg); // Save 3293 #endif 3294 3295 const XMMRegister xmm_key_shuf_mask = xmm_temp; // used temporarily to swap key bytes up front 3296 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3297 // load up xmm regs xmm2 thru xmm12 with key 0x00 - 0xa0 3298 for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x00; rnum <= XMM_REG_NUM_KEY_FIRST+10; rnum++) { 3299 load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask); 3300 offset += 0x10; 3301 } 3302 __ movdqu(xmm_result, Address(rvec, 0x00)); // initialize xmm_result with r vec 3303 3304 // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256)) 3305 __ movl(rax, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3306 __ cmpl(rax, 44); 3307 __ jcc(Assembler::notEqual, L_key_192_256); 3308 3309 // 128 bit code follows here 3310 __ movptr(pos, 0); 3311 __ align(OptoLoopAlignment); 3312 3313 __ BIND(L_loopTop_128); 3314 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3315 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3316 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3317 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 9; rnum++) { 3318 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3319 } 3320 __ aesenclast(xmm_result, xmm_key10); 3321 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3322 // no need to store r to memory until we exit 3323 __ addptr(pos, AESBlockSize); 3324 __ subptr(len_reg, AESBlockSize); 3325 __ jcc(Assembler::notEqual, L_loopTop_128); 3326 3327 __ BIND(L_exit); 3328 __ movdqu(Address(rvec, 0), xmm_result); // final value of r stored in rvec of CipherBlockChaining object 3329 3330 #ifdef _WIN64 3331 // restore xmm regs belonging to calling function 3332 for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) { 3333 __ movdqu(as_XMMRegister(i), xmm_save(i)); 3334 } 3335 __ movl(rax, len_mem); 3336 #else 3337 __ pop(rax); // return length 3338 #endif 3339 __ leave(); // required for proper stackwalking of RuntimeStub frame 3340 __ ret(0); 3341 3342 __ BIND(L_key_192_256); 3343 // here rax = len in ints of AESCrypt.KLE array (52=192, or 60=256) 3344 load_key(xmm_key11, key, 0xb0, xmm_key_shuf_mask); 3345 load_key(xmm_key12, key, 0xc0, xmm_key_shuf_mask); 3346 __ cmpl(rax, 52); 3347 __ jcc(Assembler::notEqual, L_key_256); 3348 3349 // 192-bit code follows here (could be changed to use more xmm registers) 3350 __ movptr(pos, 0); 3351 __ align(OptoLoopAlignment); 3352 3353 __ BIND(L_loopTop_192); 3354 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3355 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3356 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3357 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 11; rnum++) { 3358 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3359 } 3360 __ aesenclast(xmm_result, xmm_key12); 3361 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3362 // no need to store r to memory until we exit 3363 __ addptr(pos, AESBlockSize); 3364 __ subptr(len_reg, AESBlockSize); 3365 __ jcc(Assembler::notEqual, L_loopTop_192); 3366 __ jmp(L_exit); 3367 3368 __ BIND(L_key_256); 3369 // 256-bit code follows here (could be changed to use more xmm registers) 3370 load_key(xmm_key13, key, 0xd0, xmm_key_shuf_mask); 3371 __ movptr(pos, 0); 3372 __ align(OptoLoopAlignment); 3373 3374 __ BIND(L_loopTop_256); 3375 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3376 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3377 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3378 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 13; rnum++) { 3379 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3380 } 3381 load_key(xmm_temp, key, 0xe0); 3382 __ aesenclast(xmm_result, xmm_temp); 3383 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3384 // no need to store r to memory until we exit 3385 __ addptr(pos, AESBlockSize); 3386 __ subptr(len_reg, AESBlockSize); 3387 __ jcc(Assembler::notEqual, L_loopTop_256); 3388 __ jmp(L_exit); 3389 3390 return start; 3391 } 3392 3393 // Safefetch stubs. 3394 void generate_safefetch(const char* name, int size, address* entry, 3395 address* fault_pc, address* continuation_pc) { 3396 // safefetch signatures: 3397 // int SafeFetch32(int* adr, int errValue); 3398 // intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue); 3399 // 3400 // arguments: 3401 // c_rarg0 = adr 3402 // c_rarg1 = errValue 3403 // 3404 // result: 3405 // PPC_RET = *adr or errValue 3406 3407 StubCodeMark mark(this, "StubRoutines", name); 3408 3409 // Entry point, pc or function descriptor. 3410 *entry = __ pc(); 3411 3412 // Load *adr into c_rarg1, may fault. 3413 *fault_pc = __ pc(); 3414 switch (size) { 3415 case 4: 3416 // int32_t 3417 __ movl(c_rarg1, Address(c_rarg0, 0)); 3418 break; 3419 case 8: 3420 // int64_t 3421 __ movq(c_rarg1, Address(c_rarg0, 0)); 3422 break; 3423 default: 3424 ShouldNotReachHere(); 3425 } 3426 3427 // return errValue or *adr 3428 *continuation_pc = __ pc(); 3429 __ movq(rax, c_rarg1); 3430 __ ret(0); 3431 } 3432 3433 // This is a version of CBC/AES Decrypt which does 4 blocks in a loop at a time 3434 // to hide instruction latency 3435 // 3436 // Arguments: 3437 // 3438 // Inputs: 3439 // c_rarg0 - source byte array address 3440 // c_rarg1 - destination byte array address 3441 // c_rarg2 - K (key) in little endian int array 3442 // c_rarg3 - r vector byte array address 3443 // c_rarg4 - input length 3444 // 3445 // Output: 3446 // rax - input length 3447 // 3448 3449 address generate_cipherBlockChaining_decryptAESCrypt_Parallel() { 3450 assert(UseAES, "need AES instructions and misaligned SSE support"); 3451 __ align(CodeEntryAlignment); 3452 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt"); 3453 address start = __ pc(); 3454 3455 Label L_exit, L_key_192_256, L_key_256; 3456 Label L_singleBlock_loopTop_128, L_multiBlock_loopTop_128; 3457 Label L_singleBlock_loopTop_192, L_singleBlock_loopTop_256; 3458 const Register from = c_rarg0; // source array address 3459 const Register to = c_rarg1; // destination array address 3460 const Register key = c_rarg2; // key array address 3461 const Register rvec = c_rarg3; // r byte array initialized from initvector array address 3462 // and left with the results of the last encryption block 3463 #ifndef _WIN64 3464 const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16) 3465 #else 3466 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 3467 const Register len_reg = r10; // pick the first volatile windows register 3468 #endif 3469 const Register pos = rax; 3470 3471 // keys 0-10 preloaded into xmm2-xmm12 3472 const int XMM_REG_NUM_KEY_FIRST = 5; 3473 const int XMM_REG_NUM_KEY_LAST = 15; 3474 const XMMRegister xmm_key_first = as_XMMRegister(XMM_REG_NUM_KEY_FIRST); 3475 const XMMRegister xmm_key_last = as_XMMRegister(XMM_REG_NUM_KEY_LAST); 3476 3477 __ enter(); // required for proper stackwalking of RuntimeStub frame 3478 3479 #ifdef _WIN64 3480 // on win64, fill len_reg from stack position 3481 __ movl(len_reg, len_mem); 3482 // save the xmm registers which must be preserved 6-15 3483 __ subptr(rsp, -rsp_after_call_off * wordSize); 3484 for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) { 3485 __ movdqu(xmm_save(i), as_XMMRegister(i)); 3486 } 3487 #else 3488 __ push(len_reg); // Save 3489 #endif 3490 3491 // the java expanded key ordering is rotated one position from what we want 3492 // so we start from 0x10 here and hit 0x00 last 3493 const XMMRegister xmm_key_shuf_mask = xmm1; // used temporarily to swap key bytes up front 3494 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3495 // load up xmm regs 5 thru 15 with key 0x10 - 0xa0 - 0x00 3496 for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x10; rnum < XMM_REG_NUM_KEY_LAST; rnum++) { 3497 load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask); 3498 offset += 0x10; 3499 } 3500 load_key(xmm_key_last, key, 0x00, xmm_key_shuf_mask); 3501 3502 const XMMRegister xmm_prev_block_cipher = xmm1; // holds cipher of previous block 3503 3504 // registers holding the four results in the parallelized loop 3505 const XMMRegister xmm_result0 = xmm0; 3506 const XMMRegister xmm_result1 = xmm2; 3507 const XMMRegister xmm_result2 = xmm3; 3508 const XMMRegister xmm_result3 = xmm4; 3509 3510 __ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // initialize with initial rvec 3511 3512 // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256)) 3513 __ movl(rax, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3514 __ cmpl(rax, 44); 3515 __ jcc(Assembler::notEqual, L_key_192_256); 3516 3517 3518 // 128-bit code follows here, parallelized 3519 __ movptr(pos, 0); 3520 __ align(OptoLoopAlignment); 3521 __ BIND(L_multiBlock_loopTop_128); 3522 __ cmpptr(len_reg, 4*AESBlockSize); // see if at least 4 blocks left 3523 __ jcc(Assembler::less, L_singleBlock_loopTop_128); 3524 3525 __ movdqu(xmm_result0, Address(from, pos, Address::times_1, 0*AESBlockSize)); // get next 4 blocks into xmmresult registers 3526 __ movdqu(xmm_result1, Address(from, pos, Address::times_1, 1*AESBlockSize)); 3527 __ movdqu(xmm_result2, Address(from, pos, Address::times_1, 2*AESBlockSize)); 3528 __ movdqu(xmm_result3, Address(from, pos, Address::times_1, 3*AESBlockSize)); 3529 3530 #define DoFour(opc, src_reg) \ 3531 __ opc(xmm_result0, src_reg); \ 3532 __ opc(xmm_result1, src_reg); \ 3533 __ opc(xmm_result2, src_reg); \ 3534 __ opc(xmm_result3, src_reg); 3535 3536 DoFour(pxor, xmm_key_first); 3537 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST - 1; rnum++) { 3538 DoFour(aesdec, as_XMMRegister(rnum)); 3539 } 3540 DoFour(aesdeclast, xmm_key_last); 3541 // for each result, xor with the r vector of previous cipher block 3542 __ pxor(xmm_result0, xmm_prev_block_cipher); 3543 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 0*AESBlockSize)); 3544 __ pxor(xmm_result1, xmm_prev_block_cipher); 3545 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 1*AESBlockSize)); 3546 __ pxor(xmm_result2, xmm_prev_block_cipher); 3547 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 2*AESBlockSize)); 3548 __ pxor(xmm_result3, xmm_prev_block_cipher); 3549 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 3*AESBlockSize)); // this will carry over to next set of blocks 3550 3551 __ movdqu(Address(to, pos, Address::times_1, 0*AESBlockSize), xmm_result0); // store 4 results into the next 64 bytes of output 3552 __ movdqu(Address(to, pos, Address::times_1, 1*AESBlockSize), xmm_result1); 3553 __ movdqu(Address(to, pos, Address::times_1, 2*AESBlockSize), xmm_result2); 3554 __ movdqu(Address(to, pos, Address::times_1, 3*AESBlockSize), xmm_result3); 3555 3556 __ addptr(pos, 4*AESBlockSize); 3557 __ subptr(len_reg, 4*AESBlockSize); 3558 __ jmp(L_multiBlock_loopTop_128); 3559 3560 // registers used in the non-parallelized loops 3561 // xmm register assignments for the loops below 3562 const XMMRegister xmm_result = xmm0; 3563 const XMMRegister xmm_prev_block_cipher_save = xmm2; 3564 const XMMRegister xmm_key11 = xmm3; 3565 const XMMRegister xmm_key12 = xmm4; 3566 const XMMRegister xmm_temp = xmm4; 3567 3568 __ align(OptoLoopAlignment); 3569 __ BIND(L_singleBlock_loopTop_128); 3570 __ cmpptr(len_reg, 0); // any blocks left?? 3571 __ jcc(Assembler::equal, L_exit); 3572 __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of cipher input 3573 __ movdqa(xmm_prev_block_cipher_save, xmm_result); // save for next r vector 3574 __ pxor (xmm_result, xmm_key_first); // do the aes dec rounds 3575 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST - 1; rnum++) { 3576 __ aesdec(xmm_result, as_XMMRegister(rnum)); 3577 } 3578 __ aesdeclast(xmm_result, xmm_key_last); 3579 __ pxor (xmm_result, xmm_prev_block_cipher); // xor with the current r vector 3580 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3581 // no need to store r to memory until we exit 3582 __ movdqa(xmm_prev_block_cipher, xmm_prev_block_cipher_save); // set up next r vector with cipher input from this block 3583 3584 __ addptr(pos, AESBlockSize); 3585 __ subptr(len_reg, AESBlockSize); 3586 __ jmp(L_singleBlock_loopTop_128); 3587 3588 3589 __ BIND(L_exit); 3590 __ movdqu(Address(rvec, 0), xmm_prev_block_cipher); // final value of r stored in rvec of CipherBlockChaining object 3591 #ifdef _WIN64 3592 // restore regs belonging to calling function 3593 for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) { 3594 __ movdqu(as_XMMRegister(i), xmm_save(i)); 3595 } 3596 __ movl(rax, len_mem); 3597 #else 3598 __ pop(rax); // return length 3599 #endif 3600 __ leave(); // required for proper stackwalking of RuntimeStub frame 3601 __ ret(0); 3602 3603 3604 __ BIND(L_key_192_256); 3605 // here rax = len in ints of AESCrypt.KLE array (52=192, or 60=256) 3606 load_key(xmm_key11, key, 0xb0); 3607 __ cmpl(rax, 52); 3608 __ jcc(Assembler::notEqual, L_key_256); 3609 3610 // 192-bit code follows here (could be optimized to use parallelism) 3611 load_key(xmm_key12, key, 0xc0); // 192-bit key goes up to c0 3612 __ movptr(pos, 0); 3613 __ align(OptoLoopAlignment); 3614 3615 __ BIND(L_singleBlock_loopTop_192); 3616 __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of cipher input 3617 __ movdqa(xmm_prev_block_cipher_save, xmm_result); // save for next r vector 3618 __ pxor (xmm_result, xmm_key_first); // do the aes dec rounds 3619 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST - 1; rnum++) { 3620 __ aesdec(xmm_result, as_XMMRegister(rnum)); 3621 } 3622 __ aesdec(xmm_result, xmm_key11); 3623 __ aesdec(xmm_result, xmm_key12); 3624 __ aesdeclast(xmm_result, xmm_key_last); // xmm15 always came from key+0 3625 __ pxor (xmm_result, xmm_prev_block_cipher); // xor with the current r vector 3626 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3627 // no need to store r to memory until we exit 3628 __ movdqa(xmm_prev_block_cipher, xmm_prev_block_cipher_save); // set up next r vector with cipher input from this block 3629 __ addptr(pos, AESBlockSize); 3630 __ subptr(len_reg, AESBlockSize); 3631 __ jcc(Assembler::notEqual,L_singleBlock_loopTop_192); 3632 __ jmp(L_exit); 3633 3634 __ BIND(L_key_256); 3635 // 256-bit code follows here (could be optimized to use parallelism) 3636 __ movptr(pos, 0); 3637 __ align(OptoLoopAlignment); 3638 3639 __ BIND(L_singleBlock_loopTop_256); 3640 __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of cipher input 3641 __ movdqa(xmm_prev_block_cipher_save, xmm_result); // save for next r vector 3642 __ pxor (xmm_result, xmm_key_first); // do the aes dec rounds 3643 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST - 1; rnum++) { 3644 __ aesdec(xmm_result, as_XMMRegister(rnum)); 3645 } 3646 __ aesdec(xmm_result, xmm_key11); 3647 load_key(xmm_temp, key, 0xc0); 3648 __ aesdec(xmm_result, xmm_temp); 3649 load_key(xmm_temp, key, 0xd0); 3650 __ aesdec(xmm_result, xmm_temp); 3651 load_key(xmm_temp, key, 0xe0); // 256-bit key goes up to e0 3652 __ aesdec(xmm_result, xmm_temp); 3653 __ aesdeclast(xmm_result, xmm_key_last); // xmm15 came from key+0 3654 __ pxor (xmm_result, xmm_prev_block_cipher); // xor with the current r vector 3655 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3656 // no need to store r to memory until we exit 3657 __ movdqa(xmm_prev_block_cipher, xmm_prev_block_cipher_save); // set up next r vector with cipher input from this block 3658 __ addptr(pos, AESBlockSize); 3659 __ subptr(len_reg, AESBlockSize); 3660 __ jcc(Assembler::notEqual,L_singleBlock_loopTop_256); 3661 __ jmp(L_exit); 3662 3663 return start; 3664 } 3665 3666 3667 // byte swap x86 long 3668 address generate_ghash_long_swap_mask() { 3669 __ align(CodeEntryAlignment); 3670 StubCodeMark mark(this, "StubRoutines", "ghash_long_swap_mask"); 3671 address start = __ pc(); 3672 __ emit_data64(0x0f0e0d0c0b0a0908, relocInfo::none ); 3673 __ emit_data64(0x0706050403020100, relocInfo::none ); 3674 return start; 3675 } 3676 3677 // byte swap x86 byte array 3678 address generate_ghash_byte_swap_mask() { 3679 __ align(CodeEntryAlignment); 3680 StubCodeMark mark(this, "StubRoutines", "ghash_byte_swap_mask"); 3681 address start = __ pc(); 3682 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none ); 3683 __ emit_data64(0x0001020304050607, relocInfo::none ); 3684 return start; 3685 } 3686 3687 /* Single and multi-block ghash operations */ 3688 address generate_ghash_processBlocks() { 3689 __ align(CodeEntryAlignment); 3690 Label L_ghash_loop, L_exit; 3691 StubCodeMark mark(this, "StubRoutines", "ghash_processBlocks"); 3692 address start = __ pc(); 3693 3694 const Register state = c_rarg0; 3695 const Register subkeyH = c_rarg1; 3696 const Register data = c_rarg2; 3697 const Register blocks = c_rarg3; 3698 3699 #ifdef _WIN64 3700 const int XMM_REG_LAST = 10; 3701 #endif 3702 3703 const XMMRegister xmm_temp0 = xmm0; 3704 const XMMRegister xmm_temp1 = xmm1; 3705 const XMMRegister xmm_temp2 = xmm2; 3706 const XMMRegister xmm_temp3 = xmm3; 3707 const XMMRegister xmm_temp4 = xmm4; 3708 const XMMRegister xmm_temp5 = xmm5; 3709 const XMMRegister xmm_temp6 = xmm6; 3710 const XMMRegister xmm_temp7 = xmm7; 3711 const XMMRegister xmm_temp8 = xmm8; 3712 const XMMRegister xmm_temp9 = xmm9; 3713 const XMMRegister xmm_temp10 = xmm10; 3714 3715 __ enter(); 3716 3717 #ifdef _WIN64 3718 // save the xmm registers which must be preserved 6-10 3719 __ subptr(rsp, -rsp_after_call_off * wordSize); 3720 for (int i = 6; i <= XMM_REG_LAST; i++) { 3721 __ movdqu(xmm_save(i), as_XMMRegister(i)); 3722 } 3723 #endif 3724 3725 __ movdqu(xmm_temp10, ExternalAddress(StubRoutines::x86::ghash_long_swap_mask_addr())); 3726 3727 __ movdqu(xmm_temp0, Address(state, 0)); 3728 __ pshufb(xmm_temp0, xmm_temp10); 3729 3730 3731 __ BIND(L_ghash_loop); 3732 __ movdqu(xmm_temp2, Address(data, 0)); 3733 __ pshufb(xmm_temp2, ExternalAddress(StubRoutines::x86::ghash_byte_swap_mask_addr())); 3734 3735 __ movdqu(xmm_temp1, Address(subkeyH, 0)); 3736 __ pshufb(xmm_temp1, xmm_temp10); 3737 3738 __ pxor(xmm_temp0, xmm_temp2); 3739 3740 // 3741 // Multiply with the hash key 3742 // 3743 __ movdqu(xmm_temp3, xmm_temp0); 3744 __ pclmulqdq(xmm_temp3, xmm_temp1, 0); // xmm3 holds a0*b0 3745 __ movdqu(xmm_temp4, xmm_temp0); 3746 __ pclmulqdq(xmm_temp4, xmm_temp1, 16); // xmm4 holds a0*b1 3747 3748 __ movdqu(xmm_temp5, xmm_temp0); 3749 __ pclmulqdq(xmm_temp5, xmm_temp1, 1); // xmm5 holds a1*b0 3750 __ movdqu(xmm_temp6, xmm_temp0); 3751 __ pclmulqdq(xmm_temp6, xmm_temp1, 17); // xmm6 holds a1*b1 3752 3753 __ pxor(xmm_temp4, xmm_temp5); // xmm4 holds a0*b1 + a1*b0 3754 3755 __ movdqu(xmm_temp5, xmm_temp4); // move the contents of xmm4 to xmm5 3756 __ psrldq(xmm_temp4, 8); // shift by xmm4 64 bits to the right 3757 __ pslldq(xmm_temp5, 8); // shift by xmm5 64 bits to the left 3758 __ pxor(xmm_temp3, xmm_temp5); 3759 __ pxor(xmm_temp6, xmm_temp4); // Register pair <xmm6:xmm3> holds the result 3760 // of the carry-less multiplication of 3761 // xmm0 by xmm1. 3762 3763 // We shift the result of the multiplication by one bit position 3764 // to the left to cope for the fact that the bits are reversed. 3765 __ movdqu(xmm_temp7, xmm_temp3); 3766 __ movdqu(xmm_temp8, xmm_temp6); 3767 __ pslld(xmm_temp3, 1); 3768 __ pslld(xmm_temp6, 1); 3769 __ psrld(xmm_temp7, 31); 3770 __ psrld(xmm_temp8, 31); 3771 __ movdqu(xmm_temp9, xmm_temp7); 3772 __ pslldq(xmm_temp8, 4); 3773 __ pslldq(xmm_temp7, 4); 3774 __ psrldq(xmm_temp9, 12); 3775 __ por(xmm_temp3, xmm_temp7); 3776 __ por(xmm_temp6, xmm_temp8); 3777 __ por(xmm_temp6, xmm_temp9); 3778 3779 // 3780 // First phase of the reduction 3781 // 3782 // Move xmm3 into xmm7, xmm8, xmm9 in order to perform the shifts 3783 // independently. 3784 __ movdqu(xmm_temp7, xmm_temp3); 3785 __ movdqu(xmm_temp8, xmm_temp3); 3786 __ movdqu(xmm_temp9, xmm_temp3); 3787 __ pslld(xmm_temp7, 31); // packed right shift shifting << 31 3788 __ pslld(xmm_temp8, 30); // packed right shift shifting << 30 3789 __ pslld(xmm_temp9, 25); // packed right shift shifting << 25 3790 __ pxor(xmm_temp7, xmm_temp8); // xor the shifted versions 3791 __ pxor(xmm_temp7, xmm_temp9); 3792 __ movdqu(xmm_temp8, xmm_temp7); 3793 __ pslldq(xmm_temp7, 12); 3794 __ psrldq(xmm_temp8, 4); 3795 __ pxor(xmm_temp3, xmm_temp7); // first phase of the reduction complete 3796 3797 // 3798 // Second phase of the reduction 3799 // 3800 // Make 3 copies of xmm3 in xmm2, xmm4, xmm5 for doing these 3801 // shift operations. 3802 __ movdqu(xmm_temp2, xmm_temp3); 3803 __ movdqu(xmm_temp4, xmm_temp3); 3804 __ movdqu(xmm_temp5, xmm_temp3); 3805 __ psrld(xmm_temp2, 1); // packed left shifting >> 1 3806 __ psrld(xmm_temp4, 2); // packed left shifting >> 2 3807 __ psrld(xmm_temp5, 7); // packed left shifting >> 7 3808 __ pxor(xmm_temp2, xmm_temp4); // xor the shifted versions 3809 __ pxor(xmm_temp2, xmm_temp5); 3810 __ pxor(xmm_temp2, xmm_temp8); 3811 __ pxor(xmm_temp3, xmm_temp2); 3812 __ pxor(xmm_temp6, xmm_temp3); // the result is in xmm6 3813 3814 __ decrement(blocks); 3815 __ jcc(Assembler::zero, L_exit); 3816 __ movdqu(xmm_temp0, xmm_temp6); 3817 __ addptr(data, 16); 3818 __ jmp(L_ghash_loop); 3819 3820 __ BIND(L_exit); 3821 __ pshufb(xmm_temp6, xmm_temp10); // Byte swap 16-byte result 3822 __ movdqu(Address(state, 0), xmm_temp6); // store the result 3823 3824 #ifdef _WIN64 3825 // restore xmm regs belonging to calling function 3826 for (int i = 6; i <= XMM_REG_LAST; i++) { 3827 __ movdqu(as_XMMRegister(i), xmm_save(i)); 3828 } 3829 #endif 3830 __ leave(); 3831 __ ret(0); 3832 return start; 3833 } 3834 3835 /** 3836 * Arguments: 3837 * 3838 * Inputs: 3839 * c_rarg0 - int crc 3840 * c_rarg1 - byte* buf 3841 * c_rarg2 - int length 3842 * 3843 * Ouput: 3844 * rax - int crc result 3845 */ 3846 address generate_updateBytesCRC32() { 3847 assert(UseCRC32Intrinsics, "need AVX and CLMUL instructions"); 3848 3849 __ align(CodeEntryAlignment); 3850 StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32"); 3851 3852 address start = __ pc(); 3853 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 3854 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 3855 // rscratch1: r10 3856 const Register crc = c_rarg0; // crc 3857 const Register buf = c_rarg1; // source java byte array address 3858 const Register len = c_rarg2; // length 3859 const Register table = c_rarg3; // crc_table address (reuse register) 3860 const Register tmp = r11; 3861 assert_different_registers(crc, buf, len, table, tmp, rax); 3862 3863 BLOCK_COMMENT("Entry:"); 3864 __ enter(); // required for proper stackwalking of RuntimeStub frame 3865 3866 __ kernel_crc32(crc, buf, len, table, tmp); 3867 3868 __ movl(rax, crc); 3869 __ leave(); // required for proper stackwalking of RuntimeStub frame 3870 __ ret(0); 3871 3872 return start; 3873 } 3874 3875 3876 /** 3877 * Arguments: 3878 * 3879 * Input: 3880 * c_rarg0 - x address 3881 * c_rarg1 - x length 3882 * c_rarg2 - y address 3883 * c_rarg3 - y lenth 3884 * not Win64 3885 * c_rarg4 - z address 3886 * c_rarg5 - z length 3887 * Win64 3888 * rsp+40 - z address 3889 * rsp+48 - z length 3890 */ 3891 address generate_multiplyToLen() { 3892 __ align(CodeEntryAlignment); 3893 StubCodeMark mark(this, "StubRoutines", "multiplyToLen"); 3894 3895 address start = __ pc(); 3896 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 3897 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 3898 const Register x = rdi; 3899 const Register xlen = rax; 3900 const Register y = rsi; 3901 const Register ylen = rcx; 3902 const Register z = r8; 3903 const Register zlen = r11; 3904 3905 // Next registers will be saved on stack in multiply_to_len(). 3906 const Register tmp1 = r12; 3907 const Register tmp2 = r13; 3908 const Register tmp3 = r14; 3909 const Register tmp4 = r15; 3910 const Register tmp5 = rbx; 3911 3912 BLOCK_COMMENT("Entry:"); 3913 __ enter(); // required for proper stackwalking of RuntimeStub frame 3914 3915 #ifndef _WIN64 3916 __ movptr(zlen, r9); // Save r9 in r11 - zlen 3917 #endif 3918 setup_arg_regs(4); // x => rdi, xlen => rsi, y => rdx 3919 // ylen => rcx, z => r8, zlen => r11 3920 // r9 and r10 may be used to save non-volatile registers 3921 #ifdef _WIN64 3922 // last 2 arguments (#4, #5) are on stack on Win64 3923 __ movptr(z, Address(rsp, 6 * wordSize)); 3924 __ movptr(zlen, Address(rsp, 7 * wordSize)); 3925 #endif 3926 3927 __ movptr(xlen, rsi); 3928 __ movptr(y, rdx); 3929 __ multiply_to_len(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5); 3930 3931 restore_arg_regs(); 3932 3933 __ leave(); // required for proper stackwalking of RuntimeStub frame 3934 __ ret(0); 3935 3936 return start; 3937 } 3938 3939 #undef __ 3940 #define __ masm-> 3941 3942 // Continuation point for throwing of implicit exceptions that are 3943 // not handled in the current activation. Fabricates an exception 3944 // oop and initiates normal exception dispatching in this 3945 // frame. Since we need to preserve callee-saved values (currently 3946 // only for C2, but done for C1 as well) we need a callee-saved oop 3947 // map and therefore have to make these stubs into RuntimeStubs 3948 // rather than BufferBlobs. If the compiler needs all registers to 3949 // be preserved between the fault point and the exception handler 3950 // then it must assume responsibility for that in 3951 // AbstractCompiler::continuation_for_implicit_null_exception or 3952 // continuation_for_implicit_division_by_zero_exception. All other 3953 // implicit exceptions (e.g., NullPointerException or 3954 // AbstractMethodError on entry) are either at call sites or 3955 // otherwise assume that stack unwinding will be initiated, so 3956 // caller saved registers were assumed volatile in the compiler. 3957 address generate_throw_exception(const char* name, 3958 address runtime_entry, 3959 Register arg1 = noreg, 3960 Register arg2 = noreg) { 3961 // Information about frame layout at time of blocking runtime call. 3962 // Note that we only have to preserve callee-saved registers since 3963 // the compilers are responsible for supplying a continuation point 3964 // if they expect all registers to be preserved. 3965 enum layout { 3966 rbp_off = frame::arg_reg_save_area_bytes/BytesPerInt, 3967 rbp_off2, 3968 return_off, 3969 return_off2, 3970 framesize // inclusive of return address 3971 }; 3972 3973 int insts_size = 512; 3974 int locs_size = 64; 3975 3976 CodeBuffer code(name, insts_size, locs_size); 3977 OopMapSet* oop_maps = new OopMapSet(); 3978 MacroAssembler* masm = new MacroAssembler(&code); 3979 3980 address start = __ pc(); 3981 3982 // This is an inlined and slightly modified version of call_VM 3983 // which has the ability to fetch the return PC out of 3984 // thread-local storage and also sets up last_Java_sp slightly 3985 // differently than the real call_VM 3986 3987 __ enter(); // required for proper stackwalking of RuntimeStub frame 3988 3989 assert(is_even(framesize/2), "sp not 16-byte aligned"); 3990 3991 // return address and rbp are already in place 3992 __ subptr(rsp, (framesize-4) << LogBytesPerInt); // prolog 3993 3994 int frame_complete = __ pc() - start; 3995 3996 // Set up last_Java_sp and last_Java_fp 3997 address the_pc = __ pc(); 3998 __ set_last_Java_frame(rsp, rbp, the_pc); 3999 __ andptr(rsp, -(StackAlignmentInBytes)); // Align stack 4000 4001 // Call runtime 4002 if (arg1 != noreg) { 4003 assert(arg2 != c_rarg1, "clobbered"); 4004 __ movptr(c_rarg1, arg1); 4005 } 4006 if (arg2 != noreg) { 4007 __ movptr(c_rarg2, arg2); 4008 } 4009 __ movptr(c_rarg0, r15_thread); 4010 BLOCK_COMMENT("call runtime_entry"); 4011 __ call(RuntimeAddress(runtime_entry)); 4012 4013 // Generate oop map 4014 OopMap* map = new OopMap(framesize, 0); 4015 4016 oop_maps->add_gc_map(the_pc - start, map); 4017 4018 __ reset_last_Java_frame(true, true); 4019 4020 __ leave(); // required for proper stackwalking of RuntimeStub frame 4021 4022 // check for pending exceptions 4023 #ifdef ASSERT 4024 Label L; 4025 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), 4026 (int32_t) NULL_WORD); 4027 __ jcc(Assembler::notEqual, L); 4028 __ should_not_reach_here(); 4029 __ bind(L); 4030 #endif // ASSERT 4031 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 4032 4033 4034 // codeBlob framesize is in words (not VMRegImpl::slot_size) 4035 RuntimeStub* stub = 4036 RuntimeStub::new_runtime_stub(name, 4037 &code, 4038 frame_complete, 4039 (framesize >> (LogBytesPerWord - LogBytesPerInt)), 4040 oop_maps, false); 4041 return stub->entry_point(); 4042 } 4043 4044 void create_control_words() { 4045 // Round to nearest, 53-bit mode, exceptions masked 4046 StubRoutines::_fpu_cntrl_wrd_std = 0x027F; 4047 // Round to zero, 53-bit mode, exception mased 4048 StubRoutines::_fpu_cntrl_wrd_trunc = 0x0D7F; 4049 // Round to nearest, 24-bit mode, exceptions masked 4050 StubRoutines::_fpu_cntrl_wrd_24 = 0x007F; 4051 // Round to nearest, 64-bit mode, exceptions masked 4052 StubRoutines::_fpu_cntrl_wrd_64 = 0x037F; 4053 // Round to nearest, 64-bit mode, exceptions masked 4054 StubRoutines::_mxcsr_std = 0x1F80; 4055 // Note: the following two constants are 80-bit values 4056 // layout is critical for correct loading by FPU. 4057 // Bias for strict fp multiply/divide 4058 StubRoutines::_fpu_subnormal_bias1[0]= 0x00000000; // 2^(-15360) == 0x03ff 8000 0000 0000 0000 4059 StubRoutines::_fpu_subnormal_bias1[1]= 0x80000000; 4060 StubRoutines::_fpu_subnormal_bias1[2]= 0x03ff; 4061 // Un-Bias for strict fp multiply/divide 4062 StubRoutines::_fpu_subnormal_bias2[0]= 0x00000000; // 2^(+15360) == 0x7bff 8000 0000 0000 0000 4063 StubRoutines::_fpu_subnormal_bias2[1]= 0x80000000; 4064 StubRoutines::_fpu_subnormal_bias2[2]= 0x7bff; 4065 } 4066 4067 // Initialization 4068 void generate_initial() { 4069 // Generates all stubs and initializes the entry points 4070 4071 // This platform-specific settings are needed by generate_call_stub() 4072 create_control_words(); 4073 4074 // entry points that exist in all platforms Note: This is code 4075 // that could be shared among different platforms - however the 4076 // benefit seems to be smaller than the disadvantage of having a 4077 // much more complicated generator structure. See also comment in 4078 // stubRoutines.hpp. 4079 4080 StubRoutines::_forward_exception_entry = generate_forward_exception(); 4081 4082 StubRoutines::_call_stub_entry = 4083 generate_call_stub(StubRoutines::_call_stub_return_address); 4084 4085 // is referenced by megamorphic call 4086 StubRoutines::_catch_exception_entry = generate_catch_exception(); 4087 4088 // atomic calls 4089 StubRoutines::_atomic_xchg_entry = generate_atomic_xchg(); 4090 StubRoutines::_atomic_xchg_ptr_entry = generate_atomic_xchg_ptr(); 4091 StubRoutines::_atomic_cmpxchg_entry = generate_atomic_cmpxchg(); 4092 StubRoutines::_atomic_cmpxchg_byte_entry = generate_atomic_cmpxchg_byte(); 4093 StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long(); 4094 StubRoutines::_atomic_add_entry = generate_atomic_add(); 4095 StubRoutines::_atomic_add_ptr_entry = generate_atomic_add_ptr(); 4096 StubRoutines::_fence_entry = generate_orderaccess_fence(); 4097 4098 StubRoutines::_handler_for_unsafe_access_entry = 4099 generate_handler_for_unsafe_access(); 4100 4101 // platform dependent 4102 StubRoutines::x86::_get_previous_fp_entry = generate_get_previous_fp(); 4103 StubRoutines::x86::_get_previous_sp_entry = generate_get_previous_sp(); 4104 4105 StubRoutines::x86::_verify_mxcsr_entry = generate_verify_mxcsr(); 4106 4107 // Build this early so it's available for the interpreter. 4108 StubRoutines::_throw_StackOverflowError_entry = 4109 generate_throw_exception("StackOverflowError throw_exception", 4110 CAST_FROM_FN_PTR(address, 4111 SharedRuntime:: 4112 throw_StackOverflowError)); 4113 if (UseCRC32Intrinsics) { 4114 // set table address before stub generation which use it 4115 StubRoutines::_crc_table_adr = (address)StubRoutines::x86::_crc_table; 4116 StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32(); 4117 } 4118 } 4119 4120 void generate_all() { 4121 // Generates all stubs and initializes the entry points 4122 4123 // These entry points require SharedInfo::stack0 to be set up in 4124 // non-core builds and need to be relocatable, so they each 4125 // fabricate a RuntimeStub internally. 4126 StubRoutines::_throw_AbstractMethodError_entry = 4127 generate_throw_exception("AbstractMethodError throw_exception", 4128 CAST_FROM_FN_PTR(address, 4129 SharedRuntime:: 4130 throw_AbstractMethodError)); 4131 4132 StubRoutines::_throw_IncompatibleClassChangeError_entry = 4133 generate_throw_exception("IncompatibleClassChangeError throw_exception", 4134 CAST_FROM_FN_PTR(address, 4135 SharedRuntime:: 4136 throw_IncompatibleClassChangeError)); 4137 4138 StubRoutines::_throw_NullPointerException_at_call_entry = 4139 generate_throw_exception("NullPointerException at call throw_exception", 4140 CAST_FROM_FN_PTR(address, 4141 SharedRuntime:: 4142 throw_NullPointerException_at_call)); 4143 4144 // entry points that are platform specific 4145 StubRoutines::x86::_f2i_fixup = generate_f2i_fixup(); 4146 StubRoutines::x86::_f2l_fixup = generate_f2l_fixup(); 4147 StubRoutines::x86::_d2i_fixup = generate_d2i_fixup(); 4148 StubRoutines::x86::_d2l_fixup = generate_d2l_fixup(); 4149 4150 StubRoutines::x86::_float_sign_mask = generate_fp_mask("float_sign_mask", 0x7FFFFFFF7FFFFFFF); 4151 StubRoutines::x86::_float_sign_flip = generate_fp_mask("float_sign_flip", 0x8000000080000000); 4152 StubRoutines::x86::_double_sign_mask = generate_fp_mask("double_sign_mask", 0x7FFFFFFFFFFFFFFF); 4153 StubRoutines::x86::_double_sign_flip = generate_fp_mask("double_sign_flip", 0x8000000000000000); 4154 4155 // support for verify_oop (must happen after universe_init) 4156 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop(); 4157 4158 // arraycopy stubs used by compilers 4159 generate_arraycopy_stubs(); 4160 4161 generate_math_stubs(); 4162 4163 // don't bother generating these AES intrinsic stubs unless global flag is set 4164 if (UseAESIntrinsics) { 4165 StubRoutines::x86::_key_shuffle_mask_addr = generate_key_shuffle_mask(); // needed by the others 4166 4167 StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock(); 4168 StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock(); 4169 StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt(); 4170 StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt_Parallel(); 4171 } 4172 4173 // Generate GHASH intrinsics code 4174 if (UseGHASHIntrinsics) { 4175 StubRoutines::x86::_ghash_long_swap_mask_addr = generate_ghash_long_swap_mask(); 4176 StubRoutines::x86::_ghash_byte_swap_mask_addr = generate_ghash_byte_swap_mask(); 4177 StubRoutines::_ghash_processBlocks = generate_ghash_processBlocks(); 4178 } 4179 4180 // Safefetch stubs. 4181 generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry, 4182 &StubRoutines::_safefetch32_fault_pc, 4183 &StubRoutines::_safefetch32_continuation_pc); 4184 generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry, 4185 &StubRoutines::_safefetchN_fault_pc, 4186 &StubRoutines::_safefetchN_continuation_pc); 4187 #ifdef COMPILER2 4188 if (UseMultiplyToLenIntrinsic) { 4189 StubRoutines::_multiplyToLen = generate_multiplyToLen(); 4190 } 4191 #endif 4192 } 4193 4194 public: 4195 StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) { 4196 if (all) { 4197 generate_all(); 4198 } else { 4199 generate_initial(); 4200 } 4201 } 4202 }; // end class declaration 4203 4204 void StubGenerator_generate(CodeBuffer* code, bool all) { 4205 StubGenerator g(code, all); 4206 }