1 /* 2 * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "interpreter/interpreter.hpp" 29 #include "nativeInst_x86.hpp" 30 #include "oops/instanceOop.hpp" 31 #include "oops/method.hpp" 32 #include "oops/objArrayKlass.hpp" 33 #include "oops/oop.inline.hpp" 34 #include "prims/methodHandles.hpp" 35 #include "runtime/frame.inline.hpp" 36 #include "runtime/handles.inline.hpp" 37 #include "runtime/sharedRuntime.hpp" 38 #include "runtime/stubCodeGenerator.hpp" 39 #include "runtime/stubRoutines.hpp" 40 #include "runtime/thread.inline.hpp" 41 #include "utilities/top.hpp" 42 #ifdef COMPILER2 43 #include "opto/runtime.hpp" 44 #endif 45 46 // Declaration and definition of StubGenerator (no .hpp file). 47 // For a more detailed description of the stub routine structure 48 // see the comment in stubRoutines.hpp 49 50 #define __ _masm-> 51 #define TIMES_OOP (UseCompressedOops ? Address::times_4 : Address::times_8) 52 #define a__ ((Assembler*)_masm)-> 53 54 #ifdef PRODUCT 55 #define BLOCK_COMMENT(str) /* nothing */ 56 #else 57 #define BLOCK_COMMENT(str) __ block_comment(str) 58 #endif 59 60 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 61 const int MXCSR_MASK = 0xFFC0; // Mask out any pending exceptions 62 63 // Stub Code definitions 64 65 static address handle_unsafe_access() { 66 JavaThread* thread = JavaThread::current(); 67 address pc = thread->saved_exception_pc(); 68 // pc is the instruction which we must emulate 69 // doing a no-op is fine: return garbage from the load 70 // therefore, compute npc 71 address npc = Assembler::locate_next_instruction(pc); 72 73 // request an async exception 74 thread->set_pending_unsafe_access_error(); 75 76 // return address of next instruction to execute 77 return npc; 78 } 79 80 class StubGenerator: public StubCodeGenerator { 81 private: 82 83 #ifdef PRODUCT 84 #define inc_counter_np(counter) ((void)0) 85 #else 86 void inc_counter_np_(int& counter) { 87 // This can destroy rscratch1 if counter is far from the code cache 88 __ incrementl(ExternalAddress((address)&counter)); 89 } 90 #define inc_counter_np(counter) \ 91 BLOCK_COMMENT("inc_counter " #counter); \ 92 inc_counter_np_(counter); 93 #endif 94 95 // Call stubs are used to call Java from C 96 // 97 // Linux Arguments: 98 // c_rarg0: call wrapper address address 99 // c_rarg1: result address 100 // c_rarg2: result type BasicType 101 // c_rarg3: method Method* 102 // c_rarg4: (interpreter) entry point address 103 // c_rarg5: parameters intptr_t* 104 // 16(rbp): parameter size (in words) int 105 // 24(rbp): thread Thread* 106 // 107 // [ return_from_Java ] <--- rsp 108 // [ argument word n ] 109 // ... 110 // -12 [ argument word 1 ] 111 // -11 [ saved r15 ] <--- rsp_after_call 112 // -10 [ saved r14 ] 113 // -9 [ saved r13 ] 114 // -8 [ saved r12 ] 115 // -7 [ saved rbx ] 116 // -6 [ call wrapper ] 117 // -5 [ result ] 118 // -4 [ result type ] 119 // -3 [ method ] 120 // -2 [ entry point ] 121 // -1 [ parameters ] 122 // 0 [ saved rbp ] <--- rbp 123 // 1 [ return address ] 124 // 2 [ parameter size ] 125 // 3 [ thread ] 126 // 127 // Windows Arguments: 128 // c_rarg0: call wrapper address address 129 // c_rarg1: result address 130 // c_rarg2: result type BasicType 131 // c_rarg3: method Method* 132 // 48(rbp): (interpreter) entry point address 133 // 56(rbp): parameters intptr_t* 134 // 64(rbp): parameter size (in words) int 135 // 72(rbp): thread Thread* 136 // 137 // [ return_from_Java ] <--- rsp 138 // [ argument word n ] 139 // ... 140 // -60 [ argument word 1 ] 141 // -59 [ saved xmm31 ] <--- rsp after_call 142 // [ saved xmm16-xmm30 ] (EVEX enabled, else the space is blank) 143 // -27 [ saved xmm15 ] 144 // [ saved xmm7-xmm14 ] 145 // -9 [ saved xmm6 ] (each xmm register takes 2 slots) 146 // -7 [ saved r15 ] 147 // -6 [ saved r14 ] 148 // -5 [ saved r13 ] 149 // -4 [ saved r12 ] 150 // -3 [ saved rdi ] 151 // -2 [ saved rsi ] 152 // -1 [ saved rbx ] 153 // 0 [ saved rbp ] <--- rbp 154 // 1 [ return address ] 155 // 2 [ call wrapper ] 156 // 3 [ result ] 157 // 4 [ result type ] 158 // 5 [ method ] 159 // 6 [ entry point ] 160 // 7 [ parameters ] 161 // 8 [ parameter size ] 162 // 9 [ thread ] 163 // 164 // Windows reserves the callers stack space for arguments 1-4. 165 // We spill c_rarg0-c_rarg3 to this space. 166 167 // Call stub stack layout word offsets from rbp 168 enum call_stub_layout { 169 #ifdef _WIN64 170 xmm_save_first = 6, // save from xmm6 171 xmm_save_last = 31, // to xmm31 172 xmm_save_base = -9, 173 rsp_after_call_off = xmm_save_base - 2 * (xmm_save_last - xmm_save_first), // -27 174 r15_off = -7, 175 r14_off = -6, 176 r13_off = -5, 177 r12_off = -4, 178 rdi_off = -3, 179 rsi_off = -2, 180 rbx_off = -1, 181 rbp_off = 0, 182 retaddr_off = 1, 183 call_wrapper_off = 2, 184 result_off = 3, 185 result_type_off = 4, 186 method_off = 5, 187 entry_point_off = 6, 188 parameters_off = 7, 189 parameter_size_off = 8, 190 thread_off = 9 191 #else 192 rsp_after_call_off = -12, 193 mxcsr_off = rsp_after_call_off, 194 r15_off = -11, 195 r14_off = -10, 196 r13_off = -9, 197 r12_off = -8, 198 rbx_off = -7, 199 call_wrapper_off = -6, 200 result_off = -5, 201 result_type_off = -4, 202 method_off = -3, 203 entry_point_off = -2, 204 parameters_off = -1, 205 rbp_off = 0, 206 retaddr_off = 1, 207 parameter_size_off = 2, 208 thread_off = 3 209 #endif 210 }; 211 212 #ifdef _WIN64 213 Address xmm_save(int reg) { 214 assert(reg >= xmm_save_first && reg <= xmm_save_last, "XMM register number out of range"); 215 return Address(rbp, (xmm_save_base - (reg - xmm_save_first) * 2) * wordSize); 216 } 217 #endif 218 219 address generate_call_stub(address& return_address) { 220 assert((int)frame::entry_frame_after_call_words == -(int)rsp_after_call_off + 1 && 221 (int)frame::entry_frame_call_wrapper_offset == (int)call_wrapper_off, 222 "adjust this code"); 223 StubCodeMark mark(this, "StubRoutines", "call_stub"); 224 address start = __ pc(); 225 226 // same as in generate_catch_exception()! 227 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize); 228 229 const Address call_wrapper (rbp, call_wrapper_off * wordSize); 230 const Address result (rbp, result_off * wordSize); 231 const Address result_type (rbp, result_type_off * wordSize); 232 const Address method (rbp, method_off * wordSize); 233 const Address entry_point (rbp, entry_point_off * wordSize); 234 const Address parameters (rbp, parameters_off * wordSize); 235 const Address parameter_size(rbp, parameter_size_off * wordSize); 236 237 // same as in generate_catch_exception()! 238 const Address thread (rbp, thread_off * wordSize); 239 240 const Address r15_save(rbp, r15_off * wordSize); 241 const Address r14_save(rbp, r14_off * wordSize); 242 const Address r13_save(rbp, r13_off * wordSize); 243 const Address r12_save(rbp, r12_off * wordSize); 244 const Address rbx_save(rbp, rbx_off * wordSize); 245 246 // stub code 247 __ enter(); 248 __ subptr(rsp, -rsp_after_call_off * wordSize); 249 250 // save register parameters 251 #ifndef _WIN64 252 __ movptr(parameters, c_rarg5); // parameters 253 __ movptr(entry_point, c_rarg4); // entry_point 254 #endif 255 256 __ movptr(method, c_rarg3); // method 257 __ movl(result_type, c_rarg2); // result type 258 __ movptr(result, c_rarg1); // result 259 __ movptr(call_wrapper, c_rarg0); // call wrapper 260 261 // save regs belonging to calling function 262 __ movptr(rbx_save, rbx); 263 __ movptr(r12_save, r12); 264 __ movptr(r13_save, r13); 265 __ movptr(r14_save, r14); 266 __ movptr(r15_save, r15); 267 if (UseAVX > 2) { 268 __ movl(rbx, 0xffff); 269 __ kmovql(k1, rbx); 270 } 271 #ifdef _WIN64 272 if (UseAVX > 2) { 273 for (int i = 6; i <= 31; i++) { 274 __ movdqu(xmm_save(i), as_XMMRegister(i)); 275 } 276 } else { 277 for (int i = 6; i <= 15; i++) { 278 __ movdqu(xmm_save(i), as_XMMRegister(i)); 279 } 280 } 281 282 const Address rdi_save(rbp, rdi_off * wordSize); 283 const Address rsi_save(rbp, rsi_off * wordSize); 284 285 __ movptr(rsi_save, rsi); 286 __ movptr(rdi_save, rdi); 287 #else 288 const Address mxcsr_save(rbp, mxcsr_off * wordSize); 289 { 290 Label skip_ldmx; 291 __ stmxcsr(mxcsr_save); 292 __ movl(rax, mxcsr_save); 293 __ andl(rax, MXCSR_MASK); // Only check control and mask bits 294 ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std()); 295 __ cmp32(rax, mxcsr_std); 296 __ jcc(Assembler::equal, skip_ldmx); 297 __ ldmxcsr(mxcsr_std); 298 __ bind(skip_ldmx); 299 } 300 #endif 301 302 // Load up thread register 303 __ movptr(r15_thread, thread); 304 __ reinit_heapbase(); 305 306 #ifdef ASSERT 307 // make sure we have no pending exceptions 308 { 309 Label L; 310 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 311 __ jcc(Assembler::equal, L); 312 __ stop("StubRoutines::call_stub: entered with pending exception"); 313 __ bind(L); 314 } 315 #endif 316 317 // pass parameters if any 318 BLOCK_COMMENT("pass parameters if any"); 319 Label parameters_done; 320 __ movl(c_rarg3, parameter_size); 321 __ testl(c_rarg3, c_rarg3); 322 __ jcc(Assembler::zero, parameters_done); 323 324 Label loop; 325 __ movptr(c_rarg2, parameters); // parameter pointer 326 __ movl(c_rarg1, c_rarg3); // parameter counter is in c_rarg1 327 __ BIND(loop); 328 __ movptr(rax, Address(c_rarg2, 0));// get parameter 329 __ addptr(c_rarg2, wordSize); // advance to next parameter 330 __ decrementl(c_rarg1); // decrement counter 331 __ push(rax); // pass parameter 332 __ jcc(Assembler::notZero, loop); 333 334 // call Java function 335 __ BIND(parameters_done); 336 __ movptr(rbx, method); // get Method* 337 __ movptr(c_rarg1, entry_point); // get entry_point 338 __ mov(r13, rsp); // set sender sp 339 BLOCK_COMMENT("call Java function"); 340 __ call(c_rarg1); 341 342 BLOCK_COMMENT("call_stub_return_address:"); 343 return_address = __ pc(); 344 345 // store result depending on type (everything that is not 346 // T_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT) 347 __ movptr(c_rarg0, result); 348 Label is_long, is_float, is_double, exit; 349 __ movl(c_rarg1, result_type); 350 __ cmpl(c_rarg1, T_OBJECT); 351 __ jcc(Assembler::equal, is_long); 352 __ cmpl(c_rarg1, T_LONG); 353 __ jcc(Assembler::equal, is_long); 354 __ cmpl(c_rarg1, T_FLOAT); 355 __ jcc(Assembler::equal, is_float); 356 __ cmpl(c_rarg1, T_DOUBLE); 357 __ jcc(Assembler::equal, is_double); 358 359 // handle T_INT case 360 __ movl(Address(c_rarg0, 0), rax); 361 362 __ BIND(exit); 363 364 // pop parameters 365 __ lea(rsp, rsp_after_call); 366 367 #ifdef ASSERT 368 // verify that threads correspond 369 { 370 Label L1, L2, L3; 371 __ cmpptr(r15_thread, thread); 372 __ jcc(Assembler::equal, L1); 373 __ stop("StubRoutines::call_stub: r15_thread is corrupted"); 374 __ bind(L1); 375 __ get_thread(rbx); 376 __ cmpptr(r15_thread, thread); 377 __ jcc(Assembler::equal, L2); 378 __ stop("StubRoutines::call_stub: r15_thread is modified by call"); 379 __ bind(L2); 380 __ cmpptr(r15_thread, rbx); 381 __ jcc(Assembler::equal, L3); 382 __ stop("StubRoutines::call_stub: threads must correspond"); 383 __ bind(L3); 384 } 385 #endif 386 387 // restore regs belonging to calling function 388 #ifdef _WIN64 389 int xmm_ub = 15; 390 if (UseAVX > 2) { 391 xmm_ub = 31; 392 } 393 // emit the restores for xmm regs 394 for (int i = 6; i <= xmm_ub; i++) { 395 __ movdqu(as_XMMRegister(i), xmm_save(i)); 396 } 397 #endif 398 __ movptr(r15, r15_save); 399 __ movptr(r14, r14_save); 400 __ movptr(r13, r13_save); 401 __ movptr(r12, r12_save); 402 __ movptr(rbx, rbx_save); 403 404 #ifdef _WIN64 405 __ movptr(rdi, rdi_save); 406 __ movptr(rsi, rsi_save); 407 #else 408 __ ldmxcsr(mxcsr_save); 409 #endif 410 411 // restore rsp 412 __ addptr(rsp, -rsp_after_call_off * wordSize); 413 414 // return 415 __ pop(rbp); 416 __ ret(0); 417 418 // handle return types different from T_INT 419 __ BIND(is_long); 420 __ movq(Address(c_rarg0, 0), rax); 421 __ jmp(exit); 422 423 __ BIND(is_float); 424 __ movflt(Address(c_rarg0, 0), xmm0); 425 __ jmp(exit); 426 427 __ BIND(is_double); 428 __ movdbl(Address(c_rarg0, 0), xmm0); 429 __ jmp(exit); 430 431 return start; 432 } 433 434 // Return point for a Java call if there's an exception thrown in 435 // Java code. The exception is caught and transformed into a 436 // pending exception stored in JavaThread that can be tested from 437 // within the VM. 438 // 439 // Note: Usually the parameters are removed by the callee. In case 440 // of an exception crossing an activation frame boundary, that is 441 // not the case if the callee is compiled code => need to setup the 442 // rsp. 443 // 444 // rax: exception oop 445 446 address generate_catch_exception() { 447 StubCodeMark mark(this, "StubRoutines", "catch_exception"); 448 address start = __ pc(); 449 450 // same as in generate_call_stub(): 451 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize); 452 const Address thread (rbp, thread_off * wordSize); 453 454 #ifdef ASSERT 455 // verify that threads correspond 456 { 457 Label L1, L2, L3; 458 __ cmpptr(r15_thread, thread); 459 __ jcc(Assembler::equal, L1); 460 __ stop("StubRoutines::catch_exception: r15_thread is corrupted"); 461 __ bind(L1); 462 __ get_thread(rbx); 463 __ cmpptr(r15_thread, thread); 464 __ jcc(Assembler::equal, L2); 465 __ stop("StubRoutines::catch_exception: r15_thread is modified by call"); 466 __ bind(L2); 467 __ cmpptr(r15_thread, rbx); 468 __ jcc(Assembler::equal, L3); 469 __ stop("StubRoutines::catch_exception: threads must correspond"); 470 __ bind(L3); 471 } 472 #endif 473 474 // set pending exception 475 __ verify_oop(rax); 476 477 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), rax); 478 __ lea(rscratch1, ExternalAddress((address)__FILE__)); 479 __ movptr(Address(r15_thread, Thread::exception_file_offset()), rscratch1); 480 __ movl(Address(r15_thread, Thread::exception_line_offset()), (int) __LINE__); 481 482 // complete return to VM 483 assert(StubRoutines::_call_stub_return_address != NULL, 484 "_call_stub_return_address must have been generated before"); 485 __ jump(RuntimeAddress(StubRoutines::_call_stub_return_address)); 486 487 return start; 488 } 489 490 // Continuation point for runtime calls returning with a pending 491 // exception. The pending exception check happened in the runtime 492 // or native call stub. The pending exception in Thread is 493 // converted into a Java-level exception. 494 // 495 // Contract with Java-level exception handlers: 496 // rax: exception 497 // rdx: throwing pc 498 // 499 // NOTE: At entry of this stub, exception-pc must be on stack !! 500 501 address generate_forward_exception() { 502 StubCodeMark mark(this, "StubRoutines", "forward exception"); 503 address start = __ pc(); 504 505 // Upon entry, the sp points to the return address returning into 506 // Java (interpreted or compiled) code; i.e., the return address 507 // becomes the throwing pc. 508 // 509 // Arguments pushed before the runtime call are still on the stack 510 // but the exception handler will reset the stack pointer -> 511 // ignore them. A potential result in registers can be ignored as 512 // well. 513 514 #ifdef ASSERT 515 // make sure this code is only executed if there is a pending exception 516 { 517 Label L; 518 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t) NULL); 519 __ jcc(Assembler::notEqual, L); 520 __ stop("StubRoutines::forward exception: no pending exception (1)"); 521 __ bind(L); 522 } 523 #endif 524 525 // compute exception handler into rbx 526 __ movptr(c_rarg0, Address(rsp, 0)); 527 BLOCK_COMMENT("call exception_handler_for_return_address"); 528 __ call_VM_leaf(CAST_FROM_FN_PTR(address, 529 SharedRuntime::exception_handler_for_return_address), 530 r15_thread, c_rarg0); 531 __ mov(rbx, rax); 532 533 // setup rax & rdx, remove return address & clear pending exception 534 __ pop(rdx); 535 __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset())); 536 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 537 538 #ifdef ASSERT 539 // make sure exception is set 540 { 541 Label L; 542 __ testptr(rax, rax); 543 __ jcc(Assembler::notEqual, L); 544 __ stop("StubRoutines::forward exception: no pending exception (2)"); 545 __ bind(L); 546 } 547 #endif 548 549 // continue at exception handler (return address removed) 550 // rax: exception 551 // rbx: exception handler 552 // rdx: throwing pc 553 __ verify_oop(rax); 554 __ jmp(rbx); 555 556 return start; 557 } 558 559 // Support for jint atomic::xchg(jint exchange_value, volatile jint* dest) 560 // 561 // Arguments : 562 // c_rarg0: exchange_value 563 // c_rarg0: dest 564 // 565 // Result: 566 // *dest <- ex, return (orig *dest) 567 address generate_atomic_xchg() { 568 StubCodeMark mark(this, "StubRoutines", "atomic_xchg"); 569 address start = __ pc(); 570 571 __ movl(rax, c_rarg0); // Copy to eax we need a return value anyhow 572 __ xchgl(rax, Address(c_rarg1, 0)); // automatic LOCK 573 __ ret(0); 574 575 return start; 576 } 577 578 // Support for intptr_t atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) 579 // 580 // Arguments : 581 // c_rarg0: exchange_value 582 // c_rarg1: dest 583 // 584 // Result: 585 // *dest <- ex, return (orig *dest) 586 address generate_atomic_xchg_ptr() { 587 StubCodeMark mark(this, "StubRoutines", "atomic_xchg_ptr"); 588 address start = __ pc(); 589 590 __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow 591 __ xchgptr(rax, Address(c_rarg1, 0)); // automatic LOCK 592 __ ret(0); 593 594 return start; 595 } 596 597 // Support for jint atomic::atomic_cmpxchg(jint exchange_value, volatile jint* dest, 598 // jint compare_value) 599 // 600 // Arguments : 601 // c_rarg0: exchange_value 602 // c_rarg1: dest 603 // c_rarg2: compare_value 604 // 605 // Result: 606 // if ( compare_value == *dest ) { 607 // *dest = exchange_value 608 // return compare_value; 609 // else 610 // return *dest; 611 address generate_atomic_cmpxchg() { 612 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg"); 613 address start = __ pc(); 614 615 __ movl(rax, c_rarg2); 616 if ( os::is_MP() ) __ lock(); 617 __ cmpxchgl(c_rarg0, Address(c_rarg1, 0)); 618 __ ret(0); 619 620 return start; 621 } 622 623 // Support for jbyte atomic::atomic_cmpxchg(jbyte exchange_value, volatile jbyte* dest, 624 // jbyte compare_value) 625 // 626 // Arguments : 627 // c_rarg0: exchange_value 628 // c_rarg1: dest 629 // c_rarg2: compare_value 630 // 631 // Result: 632 // if ( compare_value == *dest ) { 633 // *dest = exchange_value 634 // return compare_value; 635 // else 636 // return *dest; 637 address generate_atomic_cmpxchg_byte() { 638 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_byte"); 639 address start = __ pc(); 640 641 __ movsbq(rax, c_rarg2); 642 if ( os::is_MP() ) __ lock(); 643 __ cmpxchgb(c_rarg0, Address(c_rarg1, 0)); 644 __ ret(0); 645 646 return start; 647 } 648 649 // Support for jlong atomic::atomic_cmpxchg(jlong exchange_value, 650 // volatile jlong* dest, 651 // jlong compare_value) 652 // Arguments : 653 // c_rarg0: exchange_value 654 // c_rarg1: dest 655 // c_rarg2: compare_value 656 // 657 // Result: 658 // if ( compare_value == *dest ) { 659 // *dest = exchange_value 660 // return compare_value; 661 // else 662 // return *dest; 663 address generate_atomic_cmpxchg_long() { 664 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long"); 665 address start = __ pc(); 666 667 __ movq(rax, c_rarg2); 668 if ( os::is_MP() ) __ lock(); 669 __ cmpxchgq(c_rarg0, Address(c_rarg1, 0)); 670 __ ret(0); 671 672 return start; 673 } 674 675 // Support for jint atomic::add(jint add_value, volatile jint* dest) 676 // 677 // Arguments : 678 // c_rarg0: add_value 679 // c_rarg1: dest 680 // 681 // Result: 682 // *dest += add_value 683 // return *dest; 684 address generate_atomic_add() { 685 StubCodeMark mark(this, "StubRoutines", "atomic_add"); 686 address start = __ pc(); 687 688 __ movl(rax, c_rarg0); 689 if ( os::is_MP() ) __ lock(); 690 __ xaddl(Address(c_rarg1, 0), c_rarg0); 691 __ addl(rax, c_rarg0); 692 __ ret(0); 693 694 return start; 695 } 696 697 // Support for intptr_t atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) 698 // 699 // Arguments : 700 // c_rarg0: add_value 701 // c_rarg1: dest 702 // 703 // Result: 704 // *dest += add_value 705 // return *dest; 706 address generate_atomic_add_ptr() { 707 StubCodeMark mark(this, "StubRoutines", "atomic_add_ptr"); 708 address start = __ pc(); 709 710 __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow 711 if ( os::is_MP() ) __ lock(); 712 __ xaddptr(Address(c_rarg1, 0), c_rarg0); 713 __ addptr(rax, c_rarg0); 714 __ ret(0); 715 716 return start; 717 } 718 719 // Support for intptr_t OrderAccess::fence() 720 // 721 // Arguments : 722 // 723 // Result: 724 address generate_orderaccess_fence() { 725 StubCodeMark mark(this, "StubRoutines", "orderaccess_fence"); 726 address start = __ pc(); 727 __ membar(Assembler::StoreLoad); 728 __ ret(0); 729 730 return start; 731 } 732 733 // Support for intptr_t get_previous_fp() 734 // 735 // This routine is used to find the previous frame pointer for the 736 // caller (current_frame_guess). This is used as part of debugging 737 // ps() is seemingly lost trying to find frames. 738 // This code assumes that caller current_frame_guess) has a frame. 739 address generate_get_previous_fp() { 740 StubCodeMark mark(this, "StubRoutines", "get_previous_fp"); 741 const Address old_fp(rbp, 0); 742 const Address older_fp(rax, 0); 743 address start = __ pc(); 744 745 __ enter(); 746 __ movptr(rax, old_fp); // callers fp 747 __ movptr(rax, older_fp); // the frame for ps() 748 __ pop(rbp); 749 __ ret(0); 750 751 return start; 752 } 753 754 // Support for intptr_t get_previous_sp() 755 // 756 // This routine is used to find the previous stack pointer for the 757 // caller. 758 address generate_get_previous_sp() { 759 StubCodeMark mark(this, "StubRoutines", "get_previous_sp"); 760 address start = __ pc(); 761 762 __ movptr(rax, rsp); 763 __ addptr(rax, 8); // return address is at the top of the stack. 764 __ ret(0); 765 766 return start; 767 } 768 769 //---------------------------------------------------------------------------------------------------- 770 // Support for void verify_mxcsr() 771 // 772 // This routine is used with -Xcheck:jni to verify that native 773 // JNI code does not return to Java code without restoring the 774 // MXCSR register to our expected state. 775 776 address generate_verify_mxcsr() { 777 StubCodeMark mark(this, "StubRoutines", "verify_mxcsr"); 778 address start = __ pc(); 779 780 const Address mxcsr_save(rsp, 0); 781 782 if (CheckJNICalls) { 783 Label ok_ret; 784 ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std()); 785 __ push(rax); 786 __ subptr(rsp, wordSize); // allocate a temp location 787 __ stmxcsr(mxcsr_save); 788 __ movl(rax, mxcsr_save); 789 __ andl(rax, MXCSR_MASK); // Only check control and mask bits 790 __ cmp32(rax, mxcsr_std); 791 __ jcc(Assembler::equal, ok_ret); 792 793 __ warn("MXCSR changed by native JNI code, use -XX:+RestoreMXCSROnJNICall"); 794 795 __ ldmxcsr(mxcsr_std); 796 797 __ bind(ok_ret); 798 __ addptr(rsp, wordSize); 799 __ pop(rax); 800 } 801 802 __ ret(0); 803 804 return start; 805 } 806 807 address generate_f2i_fixup() { 808 StubCodeMark mark(this, "StubRoutines", "f2i_fixup"); 809 Address inout(rsp, 5 * wordSize); // return address + 4 saves 810 811 address start = __ pc(); 812 813 Label L; 814 815 __ push(rax); 816 __ push(c_rarg3); 817 __ push(c_rarg2); 818 __ push(c_rarg1); 819 820 __ movl(rax, 0x7f800000); 821 __ xorl(c_rarg3, c_rarg3); 822 __ movl(c_rarg2, inout); 823 __ movl(c_rarg1, c_rarg2); 824 __ andl(c_rarg1, 0x7fffffff); 825 __ cmpl(rax, c_rarg1); // NaN? -> 0 826 __ jcc(Assembler::negative, L); 827 __ testl(c_rarg2, c_rarg2); // signed ? min_jint : max_jint 828 __ movl(c_rarg3, 0x80000000); 829 __ movl(rax, 0x7fffffff); 830 __ cmovl(Assembler::positive, c_rarg3, rax); 831 832 __ bind(L); 833 __ movptr(inout, c_rarg3); 834 835 __ pop(c_rarg1); 836 __ pop(c_rarg2); 837 __ pop(c_rarg3); 838 __ pop(rax); 839 840 __ ret(0); 841 842 return start; 843 } 844 845 address generate_f2l_fixup() { 846 StubCodeMark mark(this, "StubRoutines", "f2l_fixup"); 847 Address inout(rsp, 5 * wordSize); // return address + 4 saves 848 address start = __ pc(); 849 850 Label L; 851 852 __ push(rax); 853 __ push(c_rarg3); 854 __ push(c_rarg2); 855 __ push(c_rarg1); 856 857 __ movl(rax, 0x7f800000); 858 __ xorl(c_rarg3, c_rarg3); 859 __ movl(c_rarg2, inout); 860 __ movl(c_rarg1, c_rarg2); 861 __ andl(c_rarg1, 0x7fffffff); 862 __ cmpl(rax, c_rarg1); // NaN? -> 0 863 __ jcc(Assembler::negative, L); 864 __ testl(c_rarg2, c_rarg2); // signed ? min_jlong : max_jlong 865 __ mov64(c_rarg3, 0x8000000000000000); 866 __ mov64(rax, 0x7fffffffffffffff); 867 __ cmov(Assembler::positive, c_rarg3, rax); 868 869 __ bind(L); 870 __ movptr(inout, c_rarg3); 871 872 __ pop(c_rarg1); 873 __ pop(c_rarg2); 874 __ pop(c_rarg3); 875 __ pop(rax); 876 877 __ ret(0); 878 879 return start; 880 } 881 882 address generate_d2i_fixup() { 883 StubCodeMark mark(this, "StubRoutines", "d2i_fixup"); 884 Address inout(rsp, 6 * wordSize); // return address + 5 saves 885 886 address start = __ pc(); 887 888 Label L; 889 890 __ push(rax); 891 __ push(c_rarg3); 892 __ push(c_rarg2); 893 __ push(c_rarg1); 894 __ push(c_rarg0); 895 896 __ movl(rax, 0x7ff00000); 897 __ movq(c_rarg2, inout); 898 __ movl(c_rarg3, c_rarg2); 899 __ mov(c_rarg1, c_rarg2); 900 __ mov(c_rarg0, c_rarg2); 901 __ negl(c_rarg3); 902 __ shrptr(c_rarg1, 0x20); 903 __ orl(c_rarg3, c_rarg2); 904 __ andl(c_rarg1, 0x7fffffff); 905 __ xorl(c_rarg2, c_rarg2); 906 __ shrl(c_rarg3, 0x1f); 907 __ orl(c_rarg1, c_rarg3); 908 __ cmpl(rax, c_rarg1); 909 __ jcc(Assembler::negative, L); // NaN -> 0 910 __ testptr(c_rarg0, c_rarg0); // signed ? min_jint : max_jint 911 __ movl(c_rarg2, 0x80000000); 912 __ movl(rax, 0x7fffffff); 913 __ cmov(Assembler::positive, c_rarg2, rax); 914 915 __ bind(L); 916 __ movptr(inout, c_rarg2); 917 918 __ pop(c_rarg0); 919 __ pop(c_rarg1); 920 __ pop(c_rarg2); 921 __ pop(c_rarg3); 922 __ pop(rax); 923 924 __ ret(0); 925 926 return start; 927 } 928 929 address generate_d2l_fixup() { 930 StubCodeMark mark(this, "StubRoutines", "d2l_fixup"); 931 Address inout(rsp, 6 * wordSize); // return address + 5 saves 932 933 address start = __ pc(); 934 935 Label L; 936 937 __ push(rax); 938 __ push(c_rarg3); 939 __ push(c_rarg2); 940 __ push(c_rarg1); 941 __ push(c_rarg0); 942 943 __ movl(rax, 0x7ff00000); 944 __ movq(c_rarg2, inout); 945 __ movl(c_rarg3, c_rarg2); 946 __ mov(c_rarg1, c_rarg2); 947 __ mov(c_rarg0, c_rarg2); 948 __ negl(c_rarg3); 949 __ shrptr(c_rarg1, 0x20); 950 __ orl(c_rarg3, c_rarg2); 951 __ andl(c_rarg1, 0x7fffffff); 952 __ xorl(c_rarg2, c_rarg2); 953 __ shrl(c_rarg3, 0x1f); 954 __ orl(c_rarg1, c_rarg3); 955 __ cmpl(rax, c_rarg1); 956 __ jcc(Assembler::negative, L); // NaN -> 0 957 __ testq(c_rarg0, c_rarg0); // signed ? min_jlong : max_jlong 958 __ mov64(c_rarg2, 0x8000000000000000); 959 __ mov64(rax, 0x7fffffffffffffff); 960 __ cmovq(Assembler::positive, c_rarg2, rax); 961 962 __ bind(L); 963 __ movq(inout, c_rarg2); 964 965 __ pop(c_rarg0); 966 __ pop(c_rarg1); 967 __ pop(c_rarg2); 968 __ pop(c_rarg3); 969 __ pop(rax); 970 971 __ ret(0); 972 973 return start; 974 } 975 976 address generate_fp_mask(const char *stub_name, int64_t mask) { 977 __ align(CodeEntryAlignment); 978 StubCodeMark mark(this, "StubRoutines", stub_name); 979 address start = __ pc(); 980 981 __ emit_data64( mask, relocInfo::none ); 982 __ emit_data64( mask, relocInfo::none ); 983 984 return start; 985 } 986 987 // The following routine generates a subroutine to throw an 988 // asynchronous UnknownError when an unsafe access gets a fault that 989 // could not be reasonably prevented by the programmer. (Example: 990 // SIGBUS/OBJERR.) 991 address generate_handler_for_unsafe_access() { 992 StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access"); 993 address start = __ pc(); 994 995 __ push(0); // hole for return address-to-be 996 __ pusha(); // push registers 997 Address next_pc(rsp, RegisterImpl::number_of_registers * BytesPerWord); 998 999 // FIXME: this probably needs alignment logic 1000 1001 __ subptr(rsp, frame::arg_reg_save_area_bytes); 1002 BLOCK_COMMENT("call handle_unsafe_access"); 1003 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, handle_unsafe_access))); 1004 __ addptr(rsp, frame::arg_reg_save_area_bytes); 1005 1006 __ movptr(next_pc, rax); // stuff next address 1007 __ popa(); 1008 __ ret(0); // jump to next address 1009 1010 return start; 1011 } 1012 1013 // Non-destructive plausibility checks for oops 1014 // 1015 // Arguments: 1016 // all args on stack! 1017 // 1018 // Stack after saving c_rarg3: 1019 // [tos + 0]: saved c_rarg3 1020 // [tos + 1]: saved c_rarg2 1021 // [tos + 2]: saved r12 (several TemplateTable methods use it) 1022 // [tos + 3]: saved flags 1023 // [tos + 4]: return address 1024 // * [tos + 5]: error message (char*) 1025 // * [tos + 6]: object to verify (oop) 1026 // * [tos + 7]: saved rax - saved by caller and bashed 1027 // * [tos + 8]: saved r10 (rscratch1) - saved by caller 1028 // * = popped on exit 1029 address generate_verify_oop() { 1030 StubCodeMark mark(this, "StubRoutines", "verify_oop"); 1031 address start = __ pc(); 1032 1033 Label exit, error; 1034 1035 __ pushf(); 1036 __ incrementl(ExternalAddress((address) StubRoutines::verify_oop_count_addr())); 1037 1038 __ push(r12); 1039 1040 // save c_rarg2 and c_rarg3 1041 __ push(c_rarg2); 1042 __ push(c_rarg3); 1043 1044 enum { 1045 // After previous pushes. 1046 oop_to_verify = 6 * wordSize, 1047 saved_rax = 7 * wordSize, 1048 saved_r10 = 8 * wordSize, 1049 1050 // Before the call to MacroAssembler::debug(), see below. 1051 return_addr = 16 * wordSize, 1052 error_msg = 17 * wordSize 1053 }; 1054 1055 // get object 1056 __ movptr(rax, Address(rsp, oop_to_verify)); 1057 1058 // make sure object is 'reasonable' 1059 __ testptr(rax, rax); 1060 __ jcc(Assembler::zero, exit); // if obj is NULL it is OK 1061 // Check if the oop is in the right area of memory 1062 __ movptr(c_rarg2, rax); 1063 __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_mask()); 1064 __ andptr(c_rarg2, c_rarg3); 1065 __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_bits()); 1066 __ cmpptr(c_rarg2, c_rarg3); 1067 __ jcc(Assembler::notZero, error); 1068 1069 // set r12 to heapbase for load_klass() 1070 __ reinit_heapbase(); 1071 1072 // make sure klass is 'reasonable', which is not zero. 1073 __ load_klass(rax, rax); // get klass 1074 __ testptr(rax, rax); 1075 __ jcc(Assembler::zero, error); // if klass is NULL it is broken 1076 1077 // return if everything seems ok 1078 __ bind(exit); 1079 __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back 1080 __ movptr(rscratch1, Address(rsp, saved_r10)); // get saved r10 back 1081 __ pop(c_rarg3); // restore c_rarg3 1082 __ pop(c_rarg2); // restore c_rarg2 1083 __ pop(r12); // restore r12 1084 __ popf(); // restore flags 1085 __ ret(4 * wordSize); // pop caller saved stuff 1086 1087 // handle errors 1088 __ bind(error); 1089 __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back 1090 __ movptr(rscratch1, Address(rsp, saved_r10)); // get saved r10 back 1091 __ pop(c_rarg3); // get saved c_rarg3 back 1092 __ pop(c_rarg2); // get saved c_rarg2 back 1093 __ pop(r12); // get saved r12 back 1094 __ popf(); // get saved flags off stack -- 1095 // will be ignored 1096 1097 __ pusha(); // push registers 1098 // (rip is already 1099 // already pushed) 1100 // debug(char* msg, int64_t pc, int64_t regs[]) 1101 // We've popped the registers we'd saved (c_rarg3, c_rarg2 and flags), and 1102 // pushed all the registers, so now the stack looks like: 1103 // [tos + 0] 16 saved registers 1104 // [tos + 16] return address 1105 // * [tos + 17] error message (char*) 1106 // * [tos + 18] object to verify (oop) 1107 // * [tos + 19] saved rax - saved by caller and bashed 1108 // * [tos + 20] saved r10 (rscratch1) - saved by caller 1109 // * = popped on exit 1110 1111 __ movptr(c_rarg0, Address(rsp, error_msg)); // pass address of error message 1112 __ movptr(c_rarg1, Address(rsp, return_addr)); // pass return address 1113 __ movq(c_rarg2, rsp); // pass address of regs on stack 1114 __ mov(r12, rsp); // remember rsp 1115 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1116 __ andptr(rsp, -16); // align stack as required by ABI 1117 BLOCK_COMMENT("call MacroAssembler::debug"); 1118 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64))); 1119 __ mov(rsp, r12); // restore rsp 1120 __ popa(); // pop registers (includes r12) 1121 __ ret(4 * wordSize); // pop caller saved stuff 1122 1123 return start; 1124 } 1125 1126 // 1127 // Verify that a register contains clean 32-bits positive value 1128 // (high 32-bits are 0) so it could be used in 64-bits shifts. 1129 // 1130 // Input: 1131 // Rint - 32-bits value 1132 // Rtmp - scratch 1133 // 1134 void assert_clean_int(Register Rint, Register Rtmp) { 1135 #ifdef ASSERT 1136 Label L; 1137 assert_different_registers(Rtmp, Rint); 1138 __ movslq(Rtmp, Rint); 1139 __ cmpq(Rtmp, Rint); 1140 __ jcc(Assembler::equal, L); 1141 __ stop("high 32-bits of int value are not 0"); 1142 __ bind(L); 1143 #endif 1144 } 1145 1146 // Generate overlap test for array copy stubs 1147 // 1148 // Input: 1149 // c_rarg0 - from 1150 // c_rarg1 - to 1151 // c_rarg2 - element count 1152 // 1153 // Output: 1154 // rax - &from[element count - 1] 1155 // 1156 void array_overlap_test(address no_overlap_target, Address::ScaleFactor sf) { 1157 assert(no_overlap_target != NULL, "must be generated"); 1158 array_overlap_test(no_overlap_target, NULL, sf); 1159 } 1160 void array_overlap_test(Label& L_no_overlap, Address::ScaleFactor sf) { 1161 array_overlap_test(NULL, &L_no_overlap, sf); 1162 } 1163 void array_overlap_test(address no_overlap_target, Label* NOLp, Address::ScaleFactor sf) { 1164 const Register from = c_rarg0; 1165 const Register to = c_rarg1; 1166 const Register count = c_rarg2; 1167 const Register end_from = rax; 1168 1169 __ cmpptr(to, from); 1170 __ lea(end_from, Address(from, count, sf, 0)); 1171 if (NOLp == NULL) { 1172 ExternalAddress no_overlap(no_overlap_target); 1173 __ jump_cc(Assembler::belowEqual, no_overlap); 1174 __ cmpptr(to, end_from); 1175 __ jump_cc(Assembler::aboveEqual, no_overlap); 1176 } else { 1177 __ jcc(Assembler::belowEqual, (*NOLp)); 1178 __ cmpptr(to, end_from); 1179 __ jcc(Assembler::aboveEqual, (*NOLp)); 1180 } 1181 } 1182 1183 // Shuffle first three arg regs on Windows into Linux/Solaris locations. 1184 // 1185 // Outputs: 1186 // rdi - rcx 1187 // rsi - rdx 1188 // rdx - r8 1189 // rcx - r9 1190 // 1191 // Registers r9 and r10 are used to save rdi and rsi on Windows, which latter 1192 // are non-volatile. r9 and r10 should not be used by the caller. 1193 // 1194 void setup_arg_regs(int nargs = 3) { 1195 const Register saved_rdi = r9; 1196 const Register saved_rsi = r10; 1197 assert(nargs == 3 || nargs == 4, "else fix"); 1198 #ifdef _WIN64 1199 assert(c_rarg0 == rcx && c_rarg1 == rdx && c_rarg2 == r8 && c_rarg3 == r9, 1200 "unexpected argument registers"); 1201 if (nargs >= 4) 1202 __ mov(rax, r9); // r9 is also saved_rdi 1203 __ movptr(saved_rdi, rdi); 1204 __ movptr(saved_rsi, rsi); 1205 __ mov(rdi, rcx); // c_rarg0 1206 __ mov(rsi, rdx); // c_rarg1 1207 __ mov(rdx, r8); // c_rarg2 1208 if (nargs >= 4) 1209 __ mov(rcx, rax); // c_rarg3 (via rax) 1210 #else 1211 assert(c_rarg0 == rdi && c_rarg1 == rsi && c_rarg2 == rdx && c_rarg3 == rcx, 1212 "unexpected argument registers"); 1213 #endif 1214 } 1215 1216 void restore_arg_regs() { 1217 const Register saved_rdi = r9; 1218 const Register saved_rsi = r10; 1219 #ifdef _WIN64 1220 __ movptr(rdi, saved_rdi); 1221 __ movptr(rsi, saved_rsi); 1222 #endif 1223 } 1224 1225 // Generate code for an array write pre barrier 1226 // 1227 // addr - starting address 1228 // count - element count 1229 // tmp - scratch register 1230 // 1231 // Destroy no registers! 1232 // 1233 void gen_write_ref_array_pre_barrier(Register addr, Register count, bool dest_uninitialized) { 1234 BarrierSet* bs = Universe::heap()->barrier_set(); 1235 switch (bs->kind()) { 1236 case BarrierSet::G1SATBCTLogging: 1237 // With G1, don't generate the call if we statically know that the target in uninitialized 1238 if (!dest_uninitialized) { 1239 __ pusha(); // push registers 1240 if (count == c_rarg0) { 1241 if (addr == c_rarg1) { 1242 // exactly backwards!! 1243 __ xchgptr(c_rarg1, c_rarg0); 1244 } else { 1245 __ movptr(c_rarg1, count); 1246 __ movptr(c_rarg0, addr); 1247 } 1248 } else { 1249 __ movptr(c_rarg0, addr); 1250 __ movptr(c_rarg1, count); 1251 } 1252 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), 2); 1253 __ popa(); 1254 } 1255 break; 1256 case BarrierSet::CardTableForRS: 1257 case BarrierSet::CardTableExtension: 1258 case BarrierSet::ModRef: 1259 break; 1260 default: 1261 ShouldNotReachHere(); 1262 1263 } 1264 } 1265 1266 // 1267 // Generate code for an array write post barrier 1268 // 1269 // Input: 1270 // start - register containing starting address of destination array 1271 // count - elements count 1272 // scratch - scratch register 1273 // 1274 // The input registers are overwritten. 1275 // 1276 void gen_write_ref_array_post_barrier(Register start, Register count, Register scratch) { 1277 assert_different_registers(start, count, scratch); 1278 BarrierSet* bs = Universe::heap()->barrier_set(); 1279 switch (bs->kind()) { 1280 case BarrierSet::G1SATBCTLogging: 1281 { 1282 __ pusha(); // push registers (overkill) 1283 if (c_rarg0 == count) { // On win64 c_rarg0 == rcx 1284 assert_different_registers(c_rarg1, start); 1285 __ mov(c_rarg1, count); 1286 __ mov(c_rarg0, start); 1287 } else { 1288 assert_different_registers(c_rarg0, count); 1289 __ mov(c_rarg0, start); 1290 __ mov(c_rarg1, count); 1291 } 1292 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), 2); 1293 __ popa(); 1294 } 1295 break; 1296 case BarrierSet::CardTableForRS: 1297 case BarrierSet::CardTableExtension: 1298 { 1299 CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs); 1300 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); 1301 1302 Label L_loop; 1303 const Register end = count; 1304 1305 __ leaq(end, Address(start, count, TIMES_OOP, 0)); // end == start+count*oop_size 1306 __ subptr(end, BytesPerHeapOop); // end - 1 to make inclusive 1307 __ shrptr(start, CardTableModRefBS::card_shift); 1308 __ shrptr(end, CardTableModRefBS::card_shift); 1309 __ subptr(end, start); // end --> cards count 1310 1311 int64_t disp = (int64_t) ct->byte_map_base; 1312 __ mov64(scratch, disp); 1313 __ addptr(start, scratch); 1314 __ BIND(L_loop); 1315 __ movb(Address(start, count, Address::times_1), 0); 1316 __ decrement(count); 1317 __ jcc(Assembler::greaterEqual, L_loop); 1318 } 1319 break; 1320 default: 1321 ShouldNotReachHere(); 1322 1323 } 1324 } 1325 1326 1327 // Copy big chunks forward 1328 // 1329 // Inputs: 1330 // end_from - source arrays end address 1331 // end_to - destination array end address 1332 // qword_count - 64-bits element count, negative 1333 // to - scratch 1334 // L_copy_bytes - entry label 1335 // L_copy_8_bytes - exit label 1336 // 1337 void copy_bytes_forward(Register end_from, Register end_to, 1338 Register qword_count, Register to, 1339 Label& L_copy_bytes, Label& L_copy_8_bytes) { 1340 DEBUG_ONLY(__ stop("enter at entry label, not here")); 1341 Label L_loop; 1342 __ align(OptoLoopAlignment); 1343 if (UseUnalignedLoadStores) { 1344 Label L_end; 1345 // Copy 64-bytes per iteration 1346 __ BIND(L_loop); 1347 if (UseAVX > 2) { 1348 __ evmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56), Assembler::AVX_512bit); 1349 __ evmovdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0, Assembler::AVX_512bit); 1350 } else if (UseAVX == 2) { 1351 __ vmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56)); 1352 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0); 1353 __ vmovdqu(xmm1, Address(end_from, qword_count, Address::times_8, -24)); 1354 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -24), xmm1); 1355 } else { 1356 __ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56)); 1357 __ movdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0); 1358 __ movdqu(xmm1, Address(end_from, qword_count, Address::times_8, -40)); 1359 __ movdqu(Address(end_to, qword_count, Address::times_8, -40), xmm1); 1360 __ movdqu(xmm2, Address(end_from, qword_count, Address::times_8, -24)); 1361 __ movdqu(Address(end_to, qword_count, Address::times_8, -24), xmm2); 1362 __ movdqu(xmm3, Address(end_from, qword_count, Address::times_8, - 8)); 1363 __ movdqu(Address(end_to, qword_count, Address::times_8, - 8), xmm3); 1364 } 1365 __ BIND(L_copy_bytes); 1366 __ addptr(qword_count, 8); 1367 __ jcc(Assembler::lessEqual, L_loop); 1368 __ subptr(qword_count, 4); // sub(8) and add(4) 1369 __ jccb(Assembler::greater, L_end); 1370 // Copy trailing 32 bytes 1371 if (UseAVX >= 2) { 1372 __ vmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24)); 1373 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -24), xmm0); 1374 } else { 1375 __ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24)); 1376 __ movdqu(Address(end_to, qword_count, Address::times_8, -24), xmm0); 1377 __ movdqu(xmm1, Address(end_from, qword_count, Address::times_8, - 8)); 1378 __ movdqu(Address(end_to, qword_count, Address::times_8, - 8), xmm1); 1379 } 1380 __ addptr(qword_count, 4); 1381 __ BIND(L_end); 1382 if (UseAVX >= 2) { 1383 // clean upper bits of YMM registers 1384 __ vpxor(xmm0, xmm0); 1385 __ vpxor(xmm1, xmm1); 1386 } 1387 } else { 1388 // Copy 32-bytes per iteration 1389 __ BIND(L_loop); 1390 __ movq(to, Address(end_from, qword_count, Address::times_8, -24)); 1391 __ movq(Address(end_to, qword_count, Address::times_8, -24), to); 1392 __ movq(to, Address(end_from, qword_count, Address::times_8, -16)); 1393 __ movq(Address(end_to, qword_count, Address::times_8, -16), to); 1394 __ movq(to, Address(end_from, qword_count, Address::times_8, - 8)); 1395 __ movq(Address(end_to, qword_count, Address::times_8, - 8), to); 1396 __ movq(to, Address(end_from, qword_count, Address::times_8, - 0)); 1397 __ movq(Address(end_to, qword_count, Address::times_8, - 0), to); 1398 1399 __ BIND(L_copy_bytes); 1400 __ addptr(qword_count, 4); 1401 __ jcc(Assembler::lessEqual, L_loop); 1402 } 1403 __ subptr(qword_count, 4); 1404 __ jcc(Assembler::less, L_copy_8_bytes); // Copy trailing qwords 1405 } 1406 1407 // Copy big chunks backward 1408 // 1409 // Inputs: 1410 // from - source arrays address 1411 // dest - destination array address 1412 // qword_count - 64-bits element count 1413 // to - scratch 1414 // L_copy_bytes - entry label 1415 // L_copy_8_bytes - exit label 1416 // 1417 void copy_bytes_backward(Register from, Register dest, 1418 Register qword_count, Register to, 1419 Label& L_copy_bytes, Label& L_copy_8_bytes) { 1420 DEBUG_ONLY(__ stop("enter at entry label, not here")); 1421 Label L_loop; 1422 __ align(OptoLoopAlignment); 1423 if (UseUnalignedLoadStores) { 1424 Label L_end; 1425 // Copy 64-bytes per iteration 1426 __ BIND(L_loop); 1427 if (UseAVX > 2) { 1428 __ evmovdqu(xmm0, Address(from, qword_count, Address::times_8, 32), Assembler::AVX_512bit); 1429 __ evmovdqu(Address(dest, qword_count, Address::times_8, 32), xmm0, Assembler::AVX_512bit); 1430 } else if (UseAVX == 2) { 1431 __ vmovdqu(xmm0, Address(from, qword_count, Address::times_8, 32)); 1432 __ vmovdqu(Address(dest, qword_count, Address::times_8, 32), xmm0); 1433 __ vmovdqu(xmm1, Address(from, qword_count, Address::times_8, 0)); 1434 __ vmovdqu(Address(dest, qword_count, Address::times_8, 0), xmm1); 1435 } else { 1436 __ movdqu(xmm0, Address(from, qword_count, Address::times_8, 48)); 1437 __ movdqu(Address(dest, qword_count, Address::times_8, 48), xmm0); 1438 __ movdqu(xmm1, Address(from, qword_count, Address::times_8, 32)); 1439 __ movdqu(Address(dest, qword_count, Address::times_8, 32), xmm1); 1440 __ movdqu(xmm2, Address(from, qword_count, Address::times_8, 16)); 1441 __ movdqu(Address(dest, qword_count, Address::times_8, 16), xmm2); 1442 __ movdqu(xmm3, Address(from, qword_count, Address::times_8, 0)); 1443 __ movdqu(Address(dest, qword_count, Address::times_8, 0), xmm3); 1444 } 1445 __ BIND(L_copy_bytes); 1446 __ subptr(qword_count, 8); 1447 __ jcc(Assembler::greaterEqual, L_loop); 1448 1449 __ addptr(qword_count, 4); // add(8) and sub(4) 1450 __ jccb(Assembler::less, L_end); 1451 // Copy trailing 32 bytes 1452 if (UseAVX >= 2) { 1453 __ vmovdqu(xmm0, Address(from, qword_count, Address::times_8, 0)); 1454 __ vmovdqu(Address(dest, qword_count, Address::times_8, 0), xmm0); 1455 } else { 1456 __ movdqu(xmm0, Address(from, qword_count, Address::times_8, 16)); 1457 __ movdqu(Address(dest, qword_count, Address::times_8, 16), xmm0); 1458 __ movdqu(xmm1, Address(from, qword_count, Address::times_8, 0)); 1459 __ movdqu(Address(dest, qword_count, Address::times_8, 0), xmm1); 1460 } 1461 __ subptr(qword_count, 4); 1462 __ BIND(L_end); 1463 if (UseAVX >= 2) { 1464 // clean upper bits of YMM registers 1465 __ vpxor(xmm0, xmm0); 1466 __ vpxor(xmm1, xmm1); 1467 } 1468 } else { 1469 // Copy 32-bytes per iteration 1470 __ BIND(L_loop); 1471 __ movq(to, Address(from, qword_count, Address::times_8, 24)); 1472 __ movq(Address(dest, qword_count, Address::times_8, 24), to); 1473 __ movq(to, Address(from, qword_count, Address::times_8, 16)); 1474 __ movq(Address(dest, qword_count, Address::times_8, 16), to); 1475 __ movq(to, Address(from, qword_count, Address::times_8, 8)); 1476 __ movq(Address(dest, qword_count, Address::times_8, 8), to); 1477 __ movq(to, Address(from, qword_count, Address::times_8, 0)); 1478 __ movq(Address(dest, qword_count, Address::times_8, 0), to); 1479 1480 __ BIND(L_copy_bytes); 1481 __ subptr(qword_count, 4); 1482 __ jcc(Assembler::greaterEqual, L_loop); 1483 } 1484 __ addptr(qword_count, 4); 1485 __ jcc(Assembler::greater, L_copy_8_bytes); // Copy trailing qwords 1486 } 1487 1488 1489 // Arguments: 1490 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1491 // ignored 1492 // name - stub name string 1493 // 1494 // Inputs: 1495 // c_rarg0 - source array address 1496 // c_rarg1 - destination array address 1497 // c_rarg2 - element count, treated as ssize_t, can be zero 1498 // 1499 // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries, 1500 // we let the hardware handle it. The one to eight bytes within words, 1501 // dwords or qwords that span cache line boundaries will still be loaded 1502 // and stored atomically. 1503 // 1504 // Side Effects: 1505 // disjoint_byte_copy_entry is set to the no-overlap entry point 1506 // used by generate_conjoint_byte_copy(). 1507 // 1508 address generate_disjoint_byte_copy(bool aligned, address* entry, const char *name) { 1509 __ align(CodeEntryAlignment); 1510 StubCodeMark mark(this, "StubRoutines", name); 1511 address start = __ pc(); 1512 1513 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes; 1514 Label L_copy_byte, L_exit; 1515 const Register from = rdi; // source array address 1516 const Register to = rsi; // destination array address 1517 const Register count = rdx; // elements count 1518 const Register byte_count = rcx; 1519 const Register qword_count = count; 1520 const Register end_from = from; // source array end address 1521 const Register end_to = to; // destination array end address 1522 // End pointers are inclusive, and if count is not zero they point 1523 // to the last unit copied: end_to[0] := end_from[0] 1524 1525 __ enter(); // required for proper stackwalking of RuntimeStub frame 1526 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1527 1528 if (entry != NULL) { 1529 *entry = __ pc(); 1530 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1531 BLOCK_COMMENT("Entry:"); 1532 } 1533 1534 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1535 // r9 and r10 may be used to save non-volatile registers 1536 1537 // 'from', 'to' and 'count' are now valid 1538 __ movptr(byte_count, count); 1539 __ shrptr(count, 3); // count => qword_count 1540 1541 // Copy from low to high addresses. Use 'to' as scratch. 1542 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1543 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1544 __ negptr(qword_count); // make the count negative 1545 __ jmp(L_copy_bytes); 1546 1547 // Copy trailing qwords 1548 __ BIND(L_copy_8_bytes); 1549 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1550 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1551 __ increment(qword_count); 1552 __ jcc(Assembler::notZero, L_copy_8_bytes); 1553 1554 // Check for and copy trailing dword 1555 __ BIND(L_copy_4_bytes); 1556 __ testl(byte_count, 4); 1557 __ jccb(Assembler::zero, L_copy_2_bytes); 1558 __ movl(rax, Address(end_from, 8)); 1559 __ movl(Address(end_to, 8), rax); 1560 1561 __ addptr(end_from, 4); 1562 __ addptr(end_to, 4); 1563 1564 // Check for and copy trailing word 1565 __ BIND(L_copy_2_bytes); 1566 __ testl(byte_count, 2); 1567 __ jccb(Assembler::zero, L_copy_byte); 1568 __ movw(rax, Address(end_from, 8)); 1569 __ movw(Address(end_to, 8), rax); 1570 1571 __ addptr(end_from, 2); 1572 __ addptr(end_to, 2); 1573 1574 // Check for and copy trailing byte 1575 __ BIND(L_copy_byte); 1576 __ testl(byte_count, 1); 1577 __ jccb(Assembler::zero, L_exit); 1578 __ movb(rax, Address(end_from, 8)); 1579 __ movb(Address(end_to, 8), rax); 1580 1581 __ BIND(L_exit); 1582 restore_arg_regs(); 1583 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1584 __ xorptr(rax, rax); // return 0 1585 __ leave(); // required for proper stackwalking of RuntimeStub frame 1586 __ ret(0); 1587 1588 // Copy in multi-bytes chunks 1589 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1590 __ jmp(L_copy_4_bytes); 1591 1592 return start; 1593 } 1594 1595 // Arguments: 1596 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1597 // ignored 1598 // name - stub name string 1599 // 1600 // Inputs: 1601 // c_rarg0 - source array address 1602 // c_rarg1 - destination array address 1603 // c_rarg2 - element count, treated as ssize_t, can be zero 1604 // 1605 // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries, 1606 // we let the hardware handle it. The one to eight bytes within words, 1607 // dwords or qwords that span cache line boundaries will still be loaded 1608 // and stored atomically. 1609 // 1610 address generate_conjoint_byte_copy(bool aligned, address nooverlap_target, 1611 address* entry, const char *name) { 1612 __ align(CodeEntryAlignment); 1613 StubCodeMark mark(this, "StubRoutines", name); 1614 address start = __ pc(); 1615 1616 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes; 1617 const Register from = rdi; // source array address 1618 const Register to = rsi; // destination array address 1619 const Register count = rdx; // elements count 1620 const Register byte_count = rcx; 1621 const Register qword_count = count; 1622 1623 __ enter(); // required for proper stackwalking of RuntimeStub frame 1624 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1625 1626 if (entry != NULL) { 1627 *entry = __ pc(); 1628 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1629 BLOCK_COMMENT("Entry:"); 1630 } 1631 1632 array_overlap_test(nooverlap_target, Address::times_1); 1633 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1634 // r9 and r10 may be used to save non-volatile registers 1635 1636 // 'from', 'to' and 'count' are now valid 1637 __ movptr(byte_count, count); 1638 __ shrptr(count, 3); // count => qword_count 1639 1640 // Copy from high to low addresses. 1641 1642 // Check for and copy trailing byte 1643 __ testl(byte_count, 1); 1644 __ jcc(Assembler::zero, L_copy_2_bytes); 1645 __ movb(rax, Address(from, byte_count, Address::times_1, -1)); 1646 __ movb(Address(to, byte_count, Address::times_1, -1), rax); 1647 __ decrement(byte_count); // Adjust for possible trailing word 1648 1649 // Check for and copy trailing word 1650 __ BIND(L_copy_2_bytes); 1651 __ testl(byte_count, 2); 1652 __ jcc(Assembler::zero, L_copy_4_bytes); 1653 __ movw(rax, Address(from, byte_count, Address::times_1, -2)); 1654 __ movw(Address(to, byte_count, Address::times_1, -2), rax); 1655 1656 // Check for and copy trailing dword 1657 __ BIND(L_copy_4_bytes); 1658 __ testl(byte_count, 4); 1659 __ jcc(Assembler::zero, L_copy_bytes); 1660 __ movl(rax, Address(from, qword_count, Address::times_8)); 1661 __ movl(Address(to, qword_count, Address::times_8), rax); 1662 __ jmp(L_copy_bytes); 1663 1664 // Copy trailing qwords 1665 __ BIND(L_copy_8_bytes); 1666 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 1667 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 1668 __ decrement(qword_count); 1669 __ jcc(Assembler::notZero, L_copy_8_bytes); 1670 1671 restore_arg_regs(); 1672 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1673 __ xorptr(rax, rax); // return 0 1674 __ leave(); // required for proper stackwalking of RuntimeStub frame 1675 __ ret(0); 1676 1677 // Copy in multi-bytes chunks 1678 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1679 1680 restore_arg_regs(); 1681 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1682 __ xorptr(rax, rax); // return 0 1683 __ leave(); // required for proper stackwalking of RuntimeStub frame 1684 __ ret(0); 1685 1686 return start; 1687 } 1688 1689 // Arguments: 1690 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1691 // ignored 1692 // name - stub name string 1693 // 1694 // Inputs: 1695 // c_rarg0 - source array address 1696 // c_rarg1 - destination array address 1697 // c_rarg2 - element count, treated as ssize_t, can be zero 1698 // 1699 // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we 1700 // let the hardware handle it. The two or four words within dwords 1701 // or qwords that span cache line boundaries will still be loaded 1702 // and stored atomically. 1703 // 1704 // Side Effects: 1705 // disjoint_short_copy_entry is set to the no-overlap entry point 1706 // used by generate_conjoint_short_copy(). 1707 // 1708 address generate_disjoint_short_copy(bool aligned, address *entry, const char *name) { 1709 __ align(CodeEntryAlignment); 1710 StubCodeMark mark(this, "StubRoutines", name); 1711 address start = __ pc(); 1712 1713 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes,L_copy_2_bytes,L_exit; 1714 const Register from = rdi; // source array address 1715 const Register to = rsi; // destination array address 1716 const Register count = rdx; // elements count 1717 const Register word_count = rcx; 1718 const Register qword_count = count; 1719 const Register end_from = from; // source array end address 1720 const Register end_to = to; // destination array end address 1721 // End pointers are inclusive, and if count is not zero they point 1722 // to the last unit copied: end_to[0] := end_from[0] 1723 1724 __ enter(); // required for proper stackwalking of RuntimeStub frame 1725 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1726 1727 if (entry != NULL) { 1728 *entry = __ pc(); 1729 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1730 BLOCK_COMMENT("Entry:"); 1731 } 1732 1733 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1734 // r9 and r10 may be used to save non-volatile registers 1735 1736 // 'from', 'to' and 'count' are now valid 1737 __ movptr(word_count, count); 1738 __ shrptr(count, 2); // count => qword_count 1739 1740 // Copy from low to high addresses. Use 'to' as scratch. 1741 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1742 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1743 __ negptr(qword_count); 1744 __ jmp(L_copy_bytes); 1745 1746 // Copy trailing qwords 1747 __ BIND(L_copy_8_bytes); 1748 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1749 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1750 __ increment(qword_count); 1751 __ jcc(Assembler::notZero, L_copy_8_bytes); 1752 1753 // Original 'dest' is trashed, so we can't use it as a 1754 // base register for a possible trailing word copy 1755 1756 // Check for and copy trailing dword 1757 __ BIND(L_copy_4_bytes); 1758 __ testl(word_count, 2); 1759 __ jccb(Assembler::zero, L_copy_2_bytes); 1760 __ movl(rax, Address(end_from, 8)); 1761 __ movl(Address(end_to, 8), rax); 1762 1763 __ addptr(end_from, 4); 1764 __ addptr(end_to, 4); 1765 1766 // Check for and copy trailing word 1767 __ BIND(L_copy_2_bytes); 1768 __ testl(word_count, 1); 1769 __ jccb(Assembler::zero, L_exit); 1770 __ movw(rax, Address(end_from, 8)); 1771 __ movw(Address(end_to, 8), rax); 1772 1773 __ BIND(L_exit); 1774 restore_arg_regs(); 1775 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1776 __ xorptr(rax, rax); // return 0 1777 __ leave(); // required for proper stackwalking of RuntimeStub frame 1778 __ ret(0); 1779 1780 // Copy in multi-bytes chunks 1781 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1782 __ jmp(L_copy_4_bytes); 1783 1784 return start; 1785 } 1786 1787 address generate_fill(BasicType t, bool aligned, const char *name) { 1788 __ align(CodeEntryAlignment); 1789 StubCodeMark mark(this, "StubRoutines", name); 1790 address start = __ pc(); 1791 1792 BLOCK_COMMENT("Entry:"); 1793 1794 const Register to = c_rarg0; // source array address 1795 const Register value = c_rarg1; // value 1796 const Register count = c_rarg2; // elements count 1797 1798 __ enter(); // required for proper stackwalking of RuntimeStub frame 1799 1800 __ generate_fill(t, aligned, to, value, count, rax, xmm0); 1801 1802 __ leave(); // required for proper stackwalking of RuntimeStub frame 1803 __ ret(0); 1804 return start; 1805 } 1806 1807 // Arguments: 1808 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1809 // ignored 1810 // name - stub name string 1811 // 1812 // Inputs: 1813 // c_rarg0 - source array address 1814 // c_rarg1 - destination array address 1815 // c_rarg2 - element count, treated as ssize_t, can be zero 1816 // 1817 // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we 1818 // let the hardware handle it. The two or four words within dwords 1819 // or qwords that span cache line boundaries will still be loaded 1820 // and stored atomically. 1821 // 1822 address generate_conjoint_short_copy(bool aligned, address nooverlap_target, 1823 address *entry, const char *name) { 1824 __ align(CodeEntryAlignment); 1825 StubCodeMark mark(this, "StubRoutines", name); 1826 address start = __ pc(); 1827 1828 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes; 1829 const Register from = rdi; // source array address 1830 const Register to = rsi; // destination array address 1831 const Register count = rdx; // elements count 1832 const Register word_count = rcx; 1833 const Register qword_count = count; 1834 1835 __ enter(); // required for proper stackwalking of RuntimeStub frame 1836 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1837 1838 if (entry != NULL) { 1839 *entry = __ pc(); 1840 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1841 BLOCK_COMMENT("Entry:"); 1842 } 1843 1844 array_overlap_test(nooverlap_target, Address::times_2); 1845 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1846 // r9 and r10 may be used to save non-volatile registers 1847 1848 // 'from', 'to' and 'count' are now valid 1849 __ movptr(word_count, count); 1850 __ shrptr(count, 2); // count => qword_count 1851 1852 // Copy from high to low addresses. Use 'to' as scratch. 1853 1854 // Check for and copy trailing word 1855 __ testl(word_count, 1); 1856 __ jccb(Assembler::zero, L_copy_4_bytes); 1857 __ movw(rax, Address(from, word_count, Address::times_2, -2)); 1858 __ movw(Address(to, word_count, Address::times_2, -2), rax); 1859 1860 // Check for and copy trailing dword 1861 __ BIND(L_copy_4_bytes); 1862 __ testl(word_count, 2); 1863 __ jcc(Assembler::zero, L_copy_bytes); 1864 __ movl(rax, Address(from, qword_count, Address::times_8)); 1865 __ movl(Address(to, qword_count, Address::times_8), rax); 1866 __ jmp(L_copy_bytes); 1867 1868 // Copy trailing qwords 1869 __ BIND(L_copy_8_bytes); 1870 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 1871 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 1872 __ decrement(qword_count); 1873 __ jcc(Assembler::notZero, L_copy_8_bytes); 1874 1875 restore_arg_regs(); 1876 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1877 __ xorptr(rax, rax); // return 0 1878 __ leave(); // required for proper stackwalking of RuntimeStub frame 1879 __ ret(0); 1880 1881 // Copy in multi-bytes chunks 1882 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1883 1884 restore_arg_regs(); 1885 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1886 __ xorptr(rax, rax); // return 0 1887 __ leave(); // required for proper stackwalking of RuntimeStub frame 1888 __ ret(0); 1889 1890 return start; 1891 } 1892 1893 // Arguments: 1894 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1895 // ignored 1896 // is_oop - true => oop array, so generate store check code 1897 // name - stub name string 1898 // 1899 // Inputs: 1900 // c_rarg0 - source array address 1901 // c_rarg1 - destination array address 1902 // c_rarg2 - element count, treated as ssize_t, can be zero 1903 // 1904 // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let 1905 // the hardware handle it. The two dwords within qwords that span 1906 // cache line boundaries will still be loaded and stored atomicly. 1907 // 1908 // Side Effects: 1909 // disjoint_int_copy_entry is set to the no-overlap entry point 1910 // used by generate_conjoint_int_oop_copy(). 1911 // 1912 address generate_disjoint_int_oop_copy(bool aligned, bool is_oop, address* entry, 1913 const char *name, bool dest_uninitialized = false) { 1914 __ align(CodeEntryAlignment); 1915 StubCodeMark mark(this, "StubRoutines", name); 1916 address start = __ pc(); 1917 1918 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_exit; 1919 const Register from = rdi; // source array address 1920 const Register to = rsi; // destination array address 1921 const Register count = rdx; // elements count 1922 const Register dword_count = rcx; 1923 const Register qword_count = count; 1924 const Register end_from = from; // source array end address 1925 const Register end_to = to; // destination array end address 1926 const Register saved_to = r11; // saved destination array address 1927 // End pointers are inclusive, and if count is not zero they point 1928 // to the last unit copied: end_to[0] := end_from[0] 1929 1930 __ enter(); // required for proper stackwalking of RuntimeStub frame 1931 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1932 1933 if (entry != NULL) { 1934 *entry = __ pc(); 1935 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1936 BLOCK_COMMENT("Entry:"); 1937 } 1938 1939 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1940 // r9 and r10 may be used to save non-volatile registers 1941 if (is_oop) { 1942 __ movq(saved_to, to); 1943 gen_write_ref_array_pre_barrier(to, count, dest_uninitialized); 1944 } 1945 1946 // 'from', 'to' and 'count' are now valid 1947 __ movptr(dword_count, count); 1948 __ shrptr(count, 1); // count => qword_count 1949 1950 // Copy from low to high addresses. Use 'to' as scratch. 1951 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1952 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1953 __ negptr(qword_count); 1954 __ jmp(L_copy_bytes); 1955 1956 // Copy trailing qwords 1957 __ BIND(L_copy_8_bytes); 1958 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1959 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1960 __ increment(qword_count); 1961 __ jcc(Assembler::notZero, L_copy_8_bytes); 1962 1963 // Check for and copy trailing dword 1964 __ BIND(L_copy_4_bytes); 1965 __ testl(dword_count, 1); // Only byte test since the value is 0 or 1 1966 __ jccb(Assembler::zero, L_exit); 1967 __ movl(rax, Address(end_from, 8)); 1968 __ movl(Address(end_to, 8), rax); 1969 1970 __ BIND(L_exit); 1971 if (is_oop) { 1972 gen_write_ref_array_post_barrier(saved_to, dword_count, rax); 1973 } 1974 restore_arg_regs(); 1975 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 1976 __ xorptr(rax, rax); // return 0 1977 __ leave(); // required for proper stackwalking of RuntimeStub frame 1978 __ ret(0); 1979 1980 // Copy in multi-bytes chunks 1981 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1982 __ jmp(L_copy_4_bytes); 1983 1984 return start; 1985 } 1986 1987 // Arguments: 1988 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1989 // ignored 1990 // is_oop - true => oop array, so generate store check code 1991 // name - stub name string 1992 // 1993 // Inputs: 1994 // c_rarg0 - source array address 1995 // c_rarg1 - destination array address 1996 // c_rarg2 - element count, treated as ssize_t, can be zero 1997 // 1998 // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let 1999 // the hardware handle it. The two dwords within qwords that span 2000 // cache line boundaries will still be loaded and stored atomicly. 2001 // 2002 address generate_conjoint_int_oop_copy(bool aligned, bool is_oop, address nooverlap_target, 2003 address *entry, const char *name, 2004 bool dest_uninitialized = false) { 2005 __ align(CodeEntryAlignment); 2006 StubCodeMark mark(this, "StubRoutines", name); 2007 address start = __ pc(); 2008 2009 Label L_copy_bytes, L_copy_8_bytes, L_copy_2_bytes, L_exit; 2010 const Register from = rdi; // source array address 2011 const Register to = rsi; // destination array address 2012 const Register count = rdx; // elements count 2013 const Register dword_count = rcx; 2014 const Register qword_count = count; 2015 2016 __ enter(); // required for proper stackwalking of RuntimeStub frame 2017 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 2018 2019 if (entry != NULL) { 2020 *entry = __ pc(); 2021 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2022 BLOCK_COMMENT("Entry:"); 2023 } 2024 2025 array_overlap_test(nooverlap_target, Address::times_4); 2026 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 2027 // r9 and r10 may be used to save non-volatile registers 2028 2029 if (is_oop) { 2030 // no registers are destroyed by this call 2031 gen_write_ref_array_pre_barrier(to, count, dest_uninitialized); 2032 } 2033 2034 assert_clean_int(count, rax); // Make sure 'count' is clean int. 2035 // 'from', 'to' and 'count' are now valid 2036 __ movptr(dword_count, count); 2037 __ shrptr(count, 1); // count => qword_count 2038 2039 // Copy from high to low addresses. Use 'to' as scratch. 2040 2041 // Check for and copy trailing dword 2042 __ testl(dword_count, 1); 2043 __ jcc(Assembler::zero, L_copy_bytes); 2044 __ movl(rax, Address(from, dword_count, Address::times_4, -4)); 2045 __ movl(Address(to, dword_count, Address::times_4, -4), rax); 2046 __ jmp(L_copy_bytes); 2047 2048 // Copy trailing qwords 2049 __ BIND(L_copy_8_bytes); 2050 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 2051 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 2052 __ decrement(qword_count); 2053 __ jcc(Assembler::notZero, L_copy_8_bytes); 2054 2055 if (is_oop) { 2056 __ jmp(L_exit); 2057 } 2058 restore_arg_regs(); 2059 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 2060 __ xorptr(rax, rax); // return 0 2061 __ leave(); // required for proper stackwalking of RuntimeStub frame 2062 __ ret(0); 2063 2064 // Copy in multi-bytes chunks 2065 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2066 2067 __ BIND(L_exit); 2068 if (is_oop) { 2069 gen_write_ref_array_post_barrier(to, dword_count, rax); 2070 } 2071 restore_arg_regs(); 2072 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 2073 __ xorptr(rax, rax); // return 0 2074 __ leave(); // required for proper stackwalking of RuntimeStub frame 2075 __ ret(0); 2076 2077 return start; 2078 } 2079 2080 // Arguments: 2081 // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes 2082 // ignored 2083 // is_oop - true => oop array, so generate store check code 2084 // name - stub name string 2085 // 2086 // Inputs: 2087 // c_rarg0 - source array address 2088 // c_rarg1 - destination array address 2089 // c_rarg2 - element count, treated as ssize_t, can be zero 2090 // 2091 // Side Effects: 2092 // disjoint_oop_copy_entry or disjoint_long_copy_entry is set to the 2093 // no-overlap entry point used by generate_conjoint_long_oop_copy(). 2094 // 2095 address generate_disjoint_long_oop_copy(bool aligned, bool is_oop, address *entry, 2096 const char *name, bool dest_uninitialized = false) { 2097 __ align(CodeEntryAlignment); 2098 StubCodeMark mark(this, "StubRoutines", name); 2099 address start = __ pc(); 2100 2101 Label L_copy_bytes, L_copy_8_bytes, L_exit; 2102 const Register from = rdi; // source array address 2103 const Register to = rsi; // destination array address 2104 const Register qword_count = rdx; // elements count 2105 const Register end_from = from; // source array end address 2106 const Register end_to = rcx; // destination array end address 2107 const Register saved_to = to; 2108 const Register saved_count = r11; 2109 // End pointers are inclusive, and if count is not zero they point 2110 // to the last unit copied: end_to[0] := end_from[0] 2111 2112 __ enter(); // required for proper stackwalking of RuntimeStub frame 2113 // Save no-overlap entry point for generate_conjoint_long_oop_copy() 2114 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 2115 2116 if (entry != NULL) { 2117 *entry = __ pc(); 2118 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2119 BLOCK_COMMENT("Entry:"); 2120 } 2121 2122 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 2123 // r9 and r10 may be used to save non-volatile registers 2124 // 'from', 'to' and 'qword_count' are now valid 2125 if (is_oop) { 2126 // Save to and count for store barrier 2127 __ movptr(saved_count, qword_count); 2128 // no registers are destroyed by this call 2129 gen_write_ref_array_pre_barrier(to, qword_count, dest_uninitialized); 2130 } 2131 2132 // Copy from low to high addresses. Use 'to' as scratch. 2133 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 2134 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 2135 __ negptr(qword_count); 2136 __ jmp(L_copy_bytes); 2137 2138 // Copy trailing qwords 2139 __ BIND(L_copy_8_bytes); 2140 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 2141 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 2142 __ increment(qword_count); 2143 __ jcc(Assembler::notZero, L_copy_8_bytes); 2144 2145 if (is_oop) { 2146 __ jmp(L_exit); 2147 } else { 2148 restore_arg_regs(); 2149 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2150 __ xorptr(rax, rax); // return 0 2151 __ leave(); // required for proper stackwalking of RuntimeStub frame 2152 __ ret(0); 2153 } 2154 2155 // Copy in multi-bytes chunks 2156 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2157 2158 if (is_oop) { 2159 __ BIND(L_exit); 2160 gen_write_ref_array_post_barrier(saved_to, saved_count, rax); 2161 } 2162 restore_arg_regs(); 2163 if (is_oop) { 2164 inc_counter_np(SharedRuntime::_oop_array_copy_ctr); // Update counter after rscratch1 is free 2165 } else { 2166 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2167 } 2168 __ xorptr(rax, rax); // return 0 2169 __ leave(); // required for proper stackwalking of RuntimeStub frame 2170 __ ret(0); 2171 2172 return start; 2173 } 2174 2175 // Arguments: 2176 // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes 2177 // ignored 2178 // is_oop - true => oop array, so generate store check code 2179 // name - stub name string 2180 // 2181 // Inputs: 2182 // c_rarg0 - source array address 2183 // c_rarg1 - destination array address 2184 // c_rarg2 - element count, treated as ssize_t, can be zero 2185 // 2186 address generate_conjoint_long_oop_copy(bool aligned, bool is_oop, 2187 address nooverlap_target, address *entry, 2188 const char *name, bool dest_uninitialized = false) { 2189 __ align(CodeEntryAlignment); 2190 StubCodeMark mark(this, "StubRoutines", name); 2191 address start = __ pc(); 2192 2193 Label L_copy_bytes, L_copy_8_bytes, L_exit; 2194 const Register from = rdi; // source array address 2195 const Register to = rsi; // destination array address 2196 const Register qword_count = rdx; // elements count 2197 const Register saved_count = rcx; 2198 2199 __ enter(); // required for proper stackwalking of RuntimeStub frame 2200 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 2201 2202 if (entry != NULL) { 2203 *entry = __ pc(); 2204 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2205 BLOCK_COMMENT("Entry:"); 2206 } 2207 2208 array_overlap_test(nooverlap_target, Address::times_8); 2209 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 2210 // r9 and r10 may be used to save non-volatile registers 2211 // 'from', 'to' and 'qword_count' are now valid 2212 if (is_oop) { 2213 // Save to and count for store barrier 2214 __ movptr(saved_count, qword_count); 2215 // No registers are destroyed by this call 2216 gen_write_ref_array_pre_barrier(to, saved_count, dest_uninitialized); 2217 } 2218 2219 __ jmp(L_copy_bytes); 2220 2221 // Copy trailing qwords 2222 __ BIND(L_copy_8_bytes); 2223 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 2224 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 2225 __ decrement(qword_count); 2226 __ jcc(Assembler::notZero, L_copy_8_bytes); 2227 2228 if (is_oop) { 2229 __ jmp(L_exit); 2230 } else { 2231 restore_arg_regs(); 2232 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2233 __ xorptr(rax, rax); // return 0 2234 __ leave(); // required for proper stackwalking of RuntimeStub frame 2235 __ ret(0); 2236 } 2237 2238 // Copy in multi-bytes chunks 2239 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2240 2241 if (is_oop) { 2242 __ BIND(L_exit); 2243 gen_write_ref_array_post_barrier(to, saved_count, rax); 2244 } 2245 restore_arg_regs(); 2246 if (is_oop) { 2247 inc_counter_np(SharedRuntime::_oop_array_copy_ctr); // Update counter after rscratch1 is free 2248 } else { 2249 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2250 } 2251 __ xorptr(rax, rax); // return 0 2252 __ leave(); // required for proper stackwalking of RuntimeStub frame 2253 __ ret(0); 2254 2255 return start; 2256 } 2257 2258 2259 // Helper for generating a dynamic type check. 2260 // Smashes no registers. 2261 void generate_type_check(Register sub_klass, 2262 Register super_check_offset, 2263 Register super_klass, 2264 Label& L_success) { 2265 assert_different_registers(sub_klass, super_check_offset, super_klass); 2266 2267 BLOCK_COMMENT("type_check:"); 2268 2269 Label L_miss; 2270 2271 __ check_klass_subtype_fast_path(sub_klass, super_klass, noreg, &L_success, &L_miss, NULL, 2272 super_check_offset); 2273 __ check_klass_subtype_slow_path(sub_klass, super_klass, noreg, noreg, &L_success, NULL); 2274 2275 // Fall through on failure! 2276 __ BIND(L_miss); 2277 } 2278 2279 // 2280 // Generate checkcasting array copy stub 2281 // 2282 // Input: 2283 // c_rarg0 - source array address 2284 // c_rarg1 - destination array address 2285 // c_rarg2 - element count, treated as ssize_t, can be zero 2286 // c_rarg3 - size_t ckoff (super_check_offset) 2287 // not Win64 2288 // c_rarg4 - oop ckval (super_klass) 2289 // Win64 2290 // rsp+40 - oop ckval (super_klass) 2291 // 2292 // Output: 2293 // rax == 0 - success 2294 // rax == -1^K - failure, where K is partial transfer count 2295 // 2296 address generate_checkcast_copy(const char *name, address *entry, 2297 bool dest_uninitialized = false) { 2298 2299 Label L_load_element, L_store_element, L_do_card_marks, L_done; 2300 2301 // Input registers (after setup_arg_regs) 2302 const Register from = rdi; // source array address 2303 const Register to = rsi; // destination array address 2304 const Register length = rdx; // elements count 2305 const Register ckoff = rcx; // super_check_offset 2306 const Register ckval = r8; // super_klass 2307 2308 // Registers used as temps (r13, r14 are save-on-entry) 2309 const Register end_from = from; // source array end address 2310 const Register end_to = r13; // destination array end address 2311 const Register count = rdx; // -(count_remaining) 2312 const Register r14_length = r14; // saved copy of length 2313 // End pointers are inclusive, and if length is not zero they point 2314 // to the last unit copied: end_to[0] := end_from[0] 2315 2316 const Register rax_oop = rax; // actual oop copied 2317 const Register r11_klass = r11; // oop._klass 2318 2319 //--------------------------------------------------------------- 2320 // Assembler stub will be used for this call to arraycopy 2321 // if the two arrays are subtypes of Object[] but the 2322 // destination array type is not equal to or a supertype 2323 // of the source type. Each element must be separately 2324 // checked. 2325 2326 __ align(CodeEntryAlignment); 2327 StubCodeMark mark(this, "StubRoutines", name); 2328 address start = __ pc(); 2329 2330 __ enter(); // required for proper stackwalking of RuntimeStub frame 2331 2332 #ifdef ASSERT 2333 // caller guarantees that the arrays really are different 2334 // otherwise, we would have to make conjoint checks 2335 { Label L; 2336 array_overlap_test(L, TIMES_OOP); 2337 __ stop("checkcast_copy within a single array"); 2338 __ bind(L); 2339 } 2340 #endif //ASSERT 2341 2342 setup_arg_regs(4); // from => rdi, to => rsi, length => rdx 2343 // ckoff => rcx, ckval => r8 2344 // r9 and r10 may be used to save non-volatile registers 2345 #ifdef _WIN64 2346 // last argument (#4) is on stack on Win64 2347 __ movptr(ckval, Address(rsp, 6 * wordSize)); 2348 #endif 2349 2350 // Caller of this entry point must set up the argument registers. 2351 if (entry != NULL) { 2352 *entry = __ pc(); 2353 BLOCK_COMMENT("Entry:"); 2354 } 2355 2356 // allocate spill slots for r13, r14 2357 enum { 2358 saved_r13_offset, 2359 saved_r14_offset, 2360 saved_rbp_offset 2361 }; 2362 __ subptr(rsp, saved_rbp_offset * wordSize); 2363 __ movptr(Address(rsp, saved_r13_offset * wordSize), r13); 2364 __ movptr(Address(rsp, saved_r14_offset * wordSize), r14); 2365 2366 // check that int operands are properly extended to size_t 2367 assert_clean_int(length, rax); 2368 assert_clean_int(ckoff, rax); 2369 2370 #ifdef ASSERT 2371 BLOCK_COMMENT("assert consistent ckoff/ckval"); 2372 // The ckoff and ckval must be mutually consistent, 2373 // even though caller generates both. 2374 { Label L; 2375 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2376 __ cmpl(ckoff, Address(ckval, sco_offset)); 2377 __ jcc(Assembler::equal, L); 2378 __ stop("super_check_offset inconsistent"); 2379 __ bind(L); 2380 } 2381 #endif //ASSERT 2382 2383 // Loop-invariant addresses. They are exclusive end pointers. 2384 Address end_from_addr(from, length, TIMES_OOP, 0); 2385 Address end_to_addr(to, length, TIMES_OOP, 0); 2386 // Loop-variant addresses. They assume post-incremented count < 0. 2387 Address from_element_addr(end_from, count, TIMES_OOP, 0); 2388 Address to_element_addr(end_to, count, TIMES_OOP, 0); 2389 2390 gen_write_ref_array_pre_barrier(to, count, dest_uninitialized); 2391 2392 // Copy from low to high addresses, indexed from the end of each array. 2393 __ lea(end_from, end_from_addr); 2394 __ lea(end_to, end_to_addr); 2395 __ movptr(r14_length, length); // save a copy of the length 2396 assert(length == count, ""); // else fix next line: 2397 __ negptr(count); // negate and test the length 2398 __ jcc(Assembler::notZero, L_load_element); 2399 2400 // Empty array: Nothing to do. 2401 __ xorptr(rax, rax); // return 0 on (trivial) success 2402 __ jmp(L_done); 2403 2404 // ======== begin loop ======== 2405 // (Loop is rotated; its entry is L_load_element.) 2406 // Loop control: 2407 // for (count = -count; count != 0; count++) 2408 // Base pointers src, dst are biased by 8*(count-1),to last element. 2409 __ align(OptoLoopAlignment); 2410 2411 __ BIND(L_store_element); 2412 __ store_heap_oop(to_element_addr, rax_oop); // store the oop 2413 __ increment(count); // increment the count toward zero 2414 __ jcc(Assembler::zero, L_do_card_marks); 2415 2416 // ======== loop entry is here ======== 2417 __ BIND(L_load_element); 2418 __ load_heap_oop(rax_oop, from_element_addr); // load the oop 2419 __ testptr(rax_oop, rax_oop); 2420 __ jcc(Assembler::zero, L_store_element); 2421 2422 __ load_klass(r11_klass, rax_oop);// query the object klass 2423 generate_type_check(r11_klass, ckoff, ckval, L_store_element); 2424 // ======== end loop ======== 2425 2426 // It was a real error; we must depend on the caller to finish the job. 2427 // Register rdx = -1 * number of *remaining* oops, r14 = *total* oops. 2428 // Emit GC store barriers for the oops we have copied (r14 + rdx), 2429 // and report their number to the caller. 2430 assert_different_registers(rax, r14_length, count, to, end_to, rcx, rscratch1); 2431 Label L_post_barrier; 2432 __ addptr(r14_length, count); // K = (original - remaining) oops 2433 __ movptr(rax, r14_length); // save the value 2434 __ notptr(rax); // report (-1^K) to caller (does not affect flags) 2435 __ jccb(Assembler::notZero, L_post_barrier); 2436 __ jmp(L_done); // K == 0, nothing was copied, skip post barrier 2437 2438 // Come here on success only. 2439 __ BIND(L_do_card_marks); 2440 __ xorptr(rax, rax); // return 0 on success 2441 2442 __ BIND(L_post_barrier); 2443 gen_write_ref_array_post_barrier(to, r14_length, rscratch1); 2444 2445 // Common exit point (success or failure). 2446 __ BIND(L_done); 2447 __ movptr(r13, Address(rsp, saved_r13_offset * wordSize)); 2448 __ movptr(r14, Address(rsp, saved_r14_offset * wordSize)); 2449 restore_arg_regs(); 2450 inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr); // Update counter after rscratch1 is free 2451 __ leave(); // required for proper stackwalking of RuntimeStub frame 2452 __ ret(0); 2453 2454 return start; 2455 } 2456 2457 // 2458 // Generate 'unsafe' array copy stub 2459 // Though just as safe as the other stubs, it takes an unscaled 2460 // size_t argument instead of an element count. 2461 // 2462 // Input: 2463 // c_rarg0 - source array address 2464 // c_rarg1 - destination array address 2465 // c_rarg2 - byte count, treated as ssize_t, can be zero 2466 // 2467 // Examines the alignment of the operands and dispatches 2468 // to a long, int, short, or byte copy loop. 2469 // 2470 address generate_unsafe_copy(const char *name, 2471 address byte_copy_entry, address short_copy_entry, 2472 address int_copy_entry, address long_copy_entry) { 2473 2474 Label L_long_aligned, L_int_aligned, L_short_aligned; 2475 2476 // Input registers (before setup_arg_regs) 2477 const Register from = c_rarg0; // source array address 2478 const Register to = c_rarg1; // destination array address 2479 const Register size = c_rarg2; // byte count (size_t) 2480 2481 // Register used as a temp 2482 const Register bits = rax; // test copy of low bits 2483 2484 __ align(CodeEntryAlignment); 2485 StubCodeMark mark(this, "StubRoutines", name); 2486 address start = __ pc(); 2487 2488 __ enter(); // required for proper stackwalking of RuntimeStub frame 2489 2490 // bump this on entry, not on exit: 2491 inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr); 2492 2493 __ mov(bits, from); 2494 __ orptr(bits, to); 2495 __ orptr(bits, size); 2496 2497 __ testb(bits, BytesPerLong-1); 2498 __ jccb(Assembler::zero, L_long_aligned); 2499 2500 __ testb(bits, BytesPerInt-1); 2501 __ jccb(Assembler::zero, L_int_aligned); 2502 2503 __ testb(bits, BytesPerShort-1); 2504 __ jump_cc(Assembler::notZero, RuntimeAddress(byte_copy_entry)); 2505 2506 __ BIND(L_short_aligned); 2507 __ shrptr(size, LogBytesPerShort); // size => short_count 2508 __ jump(RuntimeAddress(short_copy_entry)); 2509 2510 __ BIND(L_int_aligned); 2511 __ shrptr(size, LogBytesPerInt); // size => int_count 2512 __ jump(RuntimeAddress(int_copy_entry)); 2513 2514 __ BIND(L_long_aligned); 2515 __ shrptr(size, LogBytesPerLong); // size => qword_count 2516 __ jump(RuntimeAddress(long_copy_entry)); 2517 2518 return start; 2519 } 2520 2521 // Perform range checks on the proposed arraycopy. 2522 // Kills temp, but nothing else. 2523 // Also, clean the sign bits of src_pos and dst_pos. 2524 void arraycopy_range_checks(Register src, // source array oop (c_rarg0) 2525 Register src_pos, // source position (c_rarg1) 2526 Register dst, // destination array oo (c_rarg2) 2527 Register dst_pos, // destination position (c_rarg3) 2528 Register length, 2529 Register temp, 2530 Label& L_failed) { 2531 BLOCK_COMMENT("arraycopy_range_checks:"); 2532 2533 // if (src_pos + length > arrayOop(src)->length()) FAIL; 2534 __ movl(temp, length); 2535 __ addl(temp, src_pos); // src_pos + length 2536 __ cmpl(temp, Address(src, arrayOopDesc::length_offset_in_bytes())); 2537 __ jcc(Assembler::above, L_failed); 2538 2539 // if (dst_pos + length > arrayOop(dst)->length()) FAIL; 2540 __ movl(temp, length); 2541 __ addl(temp, dst_pos); // dst_pos + length 2542 __ cmpl(temp, Address(dst, arrayOopDesc::length_offset_in_bytes())); 2543 __ jcc(Assembler::above, L_failed); 2544 2545 // Have to clean up high 32-bits of 'src_pos' and 'dst_pos'. 2546 // Move with sign extension can be used since they are positive. 2547 __ movslq(src_pos, src_pos); 2548 __ movslq(dst_pos, dst_pos); 2549 2550 BLOCK_COMMENT("arraycopy_range_checks done"); 2551 } 2552 2553 // 2554 // Generate generic array copy stubs 2555 // 2556 // Input: 2557 // c_rarg0 - src oop 2558 // c_rarg1 - src_pos (32-bits) 2559 // c_rarg2 - dst oop 2560 // c_rarg3 - dst_pos (32-bits) 2561 // not Win64 2562 // c_rarg4 - element count (32-bits) 2563 // Win64 2564 // rsp+40 - element count (32-bits) 2565 // 2566 // Output: 2567 // rax == 0 - success 2568 // rax == -1^K - failure, where K is partial transfer count 2569 // 2570 address generate_generic_copy(const char *name, 2571 address byte_copy_entry, address short_copy_entry, 2572 address int_copy_entry, address oop_copy_entry, 2573 address long_copy_entry, address checkcast_copy_entry) { 2574 2575 Label L_failed, L_failed_0, L_objArray; 2576 Label L_copy_bytes, L_copy_shorts, L_copy_ints, L_copy_longs; 2577 2578 // Input registers 2579 const Register src = c_rarg0; // source array oop 2580 const Register src_pos = c_rarg1; // source position 2581 const Register dst = c_rarg2; // destination array oop 2582 const Register dst_pos = c_rarg3; // destination position 2583 #ifndef _WIN64 2584 const Register length = c_rarg4; 2585 #else 2586 const Address length(rsp, 6 * wordSize); // elements count is on stack on Win64 2587 #endif 2588 2589 { int modulus = CodeEntryAlignment; 2590 int target = modulus - 5; // 5 = sizeof jmp(L_failed) 2591 int advance = target - (__ offset() % modulus); 2592 if (advance < 0) advance += modulus; 2593 if (advance > 0) __ nop(advance); 2594 } 2595 StubCodeMark mark(this, "StubRoutines", name); 2596 2597 // Short-hop target to L_failed. Makes for denser prologue code. 2598 __ BIND(L_failed_0); 2599 __ jmp(L_failed); 2600 assert(__ offset() % CodeEntryAlignment == 0, "no further alignment needed"); 2601 2602 __ align(CodeEntryAlignment); 2603 address start = __ pc(); 2604 2605 __ enter(); // required for proper stackwalking of RuntimeStub frame 2606 2607 // bump this on entry, not on exit: 2608 inc_counter_np(SharedRuntime::_generic_array_copy_ctr); 2609 2610 //----------------------------------------------------------------------- 2611 // Assembler stub will be used for this call to arraycopy 2612 // if the following conditions are met: 2613 // 2614 // (1) src and dst must not be null. 2615 // (2) src_pos must not be negative. 2616 // (3) dst_pos must not be negative. 2617 // (4) length must not be negative. 2618 // (5) src klass and dst klass should be the same and not NULL. 2619 // (6) src and dst should be arrays. 2620 // (7) src_pos + length must not exceed length of src. 2621 // (8) dst_pos + length must not exceed length of dst. 2622 // 2623 2624 // if (src == NULL) return -1; 2625 __ testptr(src, src); // src oop 2626 size_t j1off = __ offset(); 2627 __ jccb(Assembler::zero, L_failed_0); 2628 2629 // if (src_pos < 0) return -1; 2630 __ testl(src_pos, src_pos); // src_pos (32-bits) 2631 __ jccb(Assembler::negative, L_failed_0); 2632 2633 // if (dst == NULL) return -1; 2634 __ testptr(dst, dst); // dst oop 2635 __ jccb(Assembler::zero, L_failed_0); 2636 2637 // if (dst_pos < 0) return -1; 2638 __ testl(dst_pos, dst_pos); // dst_pos (32-bits) 2639 size_t j4off = __ offset(); 2640 __ jccb(Assembler::negative, L_failed_0); 2641 2642 // The first four tests are very dense code, 2643 // but not quite dense enough to put four 2644 // jumps in a 16-byte instruction fetch buffer. 2645 // That's good, because some branch predicters 2646 // do not like jumps so close together. 2647 // Make sure of this. 2648 guarantee(((j1off ^ j4off) & ~15) != 0, "I$ line of 1st & 4th jumps"); 2649 2650 // registers used as temp 2651 const Register r11_length = r11; // elements count to copy 2652 const Register r10_src_klass = r10; // array klass 2653 2654 // if (length < 0) return -1; 2655 __ movl(r11_length, length); // length (elements count, 32-bits value) 2656 __ testl(r11_length, r11_length); 2657 __ jccb(Assembler::negative, L_failed_0); 2658 2659 __ load_klass(r10_src_klass, src); 2660 #ifdef ASSERT 2661 // assert(src->klass() != NULL); 2662 { 2663 BLOCK_COMMENT("assert klasses not null {"); 2664 Label L1, L2; 2665 __ testptr(r10_src_klass, r10_src_klass); 2666 __ jcc(Assembler::notZero, L2); // it is broken if klass is NULL 2667 __ bind(L1); 2668 __ stop("broken null klass"); 2669 __ bind(L2); 2670 __ load_klass(rax, dst); 2671 __ cmpq(rax, 0); 2672 __ jcc(Assembler::equal, L1); // this would be broken also 2673 BLOCK_COMMENT("} assert klasses not null done"); 2674 } 2675 #endif 2676 2677 // Load layout helper (32-bits) 2678 // 2679 // |array_tag| | header_size | element_type | |log2_element_size| 2680 // 32 30 24 16 8 2 0 2681 // 2682 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0 2683 // 2684 2685 const int lh_offset = in_bytes(Klass::layout_helper_offset()); 2686 2687 // Handle objArrays completely differently... 2688 const jint objArray_lh = Klass::array_layout_helper(T_OBJECT); 2689 __ cmpl(Address(r10_src_klass, lh_offset), objArray_lh); 2690 __ jcc(Assembler::equal, L_objArray); 2691 2692 // if (src->klass() != dst->klass()) return -1; 2693 __ load_klass(rax, dst); 2694 __ cmpq(r10_src_klass, rax); 2695 __ jcc(Assembler::notEqual, L_failed); 2696 2697 const Register rax_lh = rax; // layout helper 2698 __ movl(rax_lh, Address(r10_src_klass, lh_offset)); 2699 2700 // if (!src->is_Array()) return -1; 2701 __ cmpl(rax_lh, Klass::_lh_neutral_value); 2702 __ jcc(Assembler::greaterEqual, L_failed); 2703 2704 // At this point, it is known to be a typeArray (array_tag 0x3). 2705 #ifdef ASSERT 2706 { 2707 BLOCK_COMMENT("assert primitive array {"); 2708 Label L; 2709 __ cmpl(rax_lh, (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift)); 2710 __ jcc(Assembler::greaterEqual, L); 2711 __ stop("must be a primitive array"); 2712 __ bind(L); 2713 BLOCK_COMMENT("} assert primitive array done"); 2714 } 2715 #endif 2716 2717 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2718 r10, L_failed); 2719 2720 // TypeArrayKlass 2721 // 2722 // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize); 2723 // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize); 2724 // 2725 2726 const Register r10_offset = r10; // array offset 2727 const Register rax_elsize = rax_lh; // element size 2728 2729 __ movl(r10_offset, rax_lh); 2730 __ shrl(r10_offset, Klass::_lh_header_size_shift); 2731 __ andptr(r10_offset, Klass::_lh_header_size_mask); // array_offset 2732 __ addptr(src, r10_offset); // src array offset 2733 __ addptr(dst, r10_offset); // dst array offset 2734 BLOCK_COMMENT("choose copy loop based on element size"); 2735 __ andl(rax_lh, Klass::_lh_log2_element_size_mask); // rax_lh -> rax_elsize 2736 2737 // next registers should be set before the jump to corresponding stub 2738 const Register from = c_rarg0; // source array address 2739 const Register to = c_rarg1; // destination array address 2740 const Register count = c_rarg2; // elements count 2741 2742 // 'from', 'to', 'count' registers should be set in such order 2743 // since they are the same as 'src', 'src_pos', 'dst'. 2744 2745 __ BIND(L_copy_bytes); 2746 __ cmpl(rax_elsize, 0); 2747 __ jccb(Assembler::notEqual, L_copy_shorts); 2748 __ lea(from, Address(src, src_pos, Address::times_1, 0));// src_addr 2749 __ lea(to, Address(dst, dst_pos, Address::times_1, 0));// dst_addr 2750 __ movl2ptr(count, r11_length); // length 2751 __ jump(RuntimeAddress(byte_copy_entry)); 2752 2753 __ BIND(L_copy_shorts); 2754 __ cmpl(rax_elsize, LogBytesPerShort); 2755 __ jccb(Assembler::notEqual, L_copy_ints); 2756 __ lea(from, Address(src, src_pos, Address::times_2, 0));// src_addr 2757 __ lea(to, Address(dst, dst_pos, Address::times_2, 0));// dst_addr 2758 __ movl2ptr(count, r11_length); // length 2759 __ jump(RuntimeAddress(short_copy_entry)); 2760 2761 __ BIND(L_copy_ints); 2762 __ cmpl(rax_elsize, LogBytesPerInt); 2763 __ jccb(Assembler::notEqual, L_copy_longs); 2764 __ lea(from, Address(src, src_pos, Address::times_4, 0));// src_addr 2765 __ lea(to, Address(dst, dst_pos, Address::times_4, 0));// dst_addr 2766 __ movl2ptr(count, r11_length); // length 2767 __ jump(RuntimeAddress(int_copy_entry)); 2768 2769 __ BIND(L_copy_longs); 2770 #ifdef ASSERT 2771 { 2772 BLOCK_COMMENT("assert long copy {"); 2773 Label L; 2774 __ cmpl(rax_elsize, LogBytesPerLong); 2775 __ jcc(Assembler::equal, L); 2776 __ stop("must be long copy, but elsize is wrong"); 2777 __ bind(L); 2778 BLOCK_COMMENT("} assert long copy done"); 2779 } 2780 #endif 2781 __ lea(from, Address(src, src_pos, Address::times_8, 0));// src_addr 2782 __ lea(to, Address(dst, dst_pos, Address::times_8, 0));// dst_addr 2783 __ movl2ptr(count, r11_length); // length 2784 __ jump(RuntimeAddress(long_copy_entry)); 2785 2786 // ObjArrayKlass 2787 __ BIND(L_objArray); 2788 // live at this point: r10_src_klass, r11_length, src[_pos], dst[_pos] 2789 2790 Label L_plain_copy, L_checkcast_copy; 2791 // test array classes for subtyping 2792 __ load_klass(rax, dst); 2793 __ cmpq(r10_src_klass, rax); // usual case is exact equality 2794 __ jcc(Assembler::notEqual, L_checkcast_copy); 2795 2796 // Identically typed arrays can be copied without element-wise checks. 2797 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2798 r10, L_failed); 2799 2800 __ lea(from, Address(src, src_pos, TIMES_OOP, 2801 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // src_addr 2802 __ lea(to, Address(dst, dst_pos, TIMES_OOP, 2803 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // dst_addr 2804 __ movl2ptr(count, r11_length); // length 2805 __ BIND(L_plain_copy); 2806 __ jump(RuntimeAddress(oop_copy_entry)); 2807 2808 __ BIND(L_checkcast_copy); 2809 // live at this point: r10_src_klass, r11_length, rax (dst_klass) 2810 { 2811 // Before looking at dst.length, make sure dst is also an objArray. 2812 __ cmpl(Address(rax, lh_offset), objArray_lh); 2813 __ jcc(Assembler::notEqual, L_failed); 2814 2815 // It is safe to examine both src.length and dst.length. 2816 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2817 rax, L_failed); 2818 2819 const Register r11_dst_klass = r11; 2820 __ load_klass(r11_dst_klass, dst); // reload 2821 2822 // Marshal the base address arguments now, freeing registers. 2823 __ lea(from, Address(src, src_pos, TIMES_OOP, 2824 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 2825 __ lea(to, Address(dst, dst_pos, TIMES_OOP, 2826 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 2827 __ movl(count, length); // length (reloaded) 2828 Register sco_temp = c_rarg3; // this register is free now 2829 assert_different_registers(from, to, count, sco_temp, 2830 r11_dst_klass, r10_src_klass); 2831 assert_clean_int(count, sco_temp); 2832 2833 // Generate the type check. 2834 const int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2835 __ movl(sco_temp, Address(r11_dst_klass, sco_offset)); 2836 assert_clean_int(sco_temp, rax); 2837 generate_type_check(r10_src_klass, sco_temp, r11_dst_klass, L_plain_copy); 2838 2839 // Fetch destination element klass from the ObjArrayKlass header. 2840 int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset()); 2841 __ movptr(r11_dst_klass, Address(r11_dst_klass, ek_offset)); 2842 __ movl( sco_temp, Address(r11_dst_klass, sco_offset)); 2843 assert_clean_int(sco_temp, rax); 2844 2845 // the checkcast_copy loop needs two extra arguments: 2846 assert(c_rarg3 == sco_temp, "#3 already in place"); 2847 // Set up arguments for checkcast_copy_entry. 2848 setup_arg_regs(4); 2849 __ movptr(r8, r11_dst_klass); // dst.klass.element_klass, r8 is c_rarg4 on Linux/Solaris 2850 __ jump(RuntimeAddress(checkcast_copy_entry)); 2851 } 2852 2853 __ BIND(L_failed); 2854 __ xorptr(rax, rax); 2855 __ notptr(rax); // return -1 2856 __ leave(); // required for proper stackwalking of RuntimeStub frame 2857 __ ret(0); 2858 2859 return start; 2860 } 2861 2862 void generate_arraycopy_stubs() { 2863 address entry; 2864 address entry_jbyte_arraycopy; 2865 address entry_jshort_arraycopy; 2866 address entry_jint_arraycopy; 2867 address entry_oop_arraycopy; 2868 address entry_jlong_arraycopy; 2869 address entry_checkcast_arraycopy; 2870 2871 StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, &entry, 2872 "jbyte_disjoint_arraycopy"); 2873 StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, entry, &entry_jbyte_arraycopy, 2874 "jbyte_arraycopy"); 2875 2876 StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, &entry, 2877 "jshort_disjoint_arraycopy"); 2878 StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, entry, &entry_jshort_arraycopy, 2879 "jshort_arraycopy"); 2880 2881 StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, false, &entry, 2882 "jint_disjoint_arraycopy"); 2883 StubRoutines::_jint_arraycopy = generate_conjoint_int_oop_copy(false, false, entry, 2884 &entry_jint_arraycopy, "jint_arraycopy"); 2885 2886 StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, false, &entry, 2887 "jlong_disjoint_arraycopy"); 2888 StubRoutines::_jlong_arraycopy = generate_conjoint_long_oop_copy(false, false, entry, 2889 &entry_jlong_arraycopy, "jlong_arraycopy"); 2890 2891 2892 if (UseCompressedOops) { 2893 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, true, &entry, 2894 "oop_disjoint_arraycopy"); 2895 StubRoutines::_oop_arraycopy = generate_conjoint_int_oop_copy(false, true, entry, 2896 &entry_oop_arraycopy, "oop_arraycopy"); 2897 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_int_oop_copy(false, true, &entry, 2898 "oop_disjoint_arraycopy_uninit", 2899 /*dest_uninitialized*/true); 2900 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_int_oop_copy(false, true, entry, 2901 NULL, "oop_arraycopy_uninit", 2902 /*dest_uninitialized*/true); 2903 } else { 2904 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, true, &entry, 2905 "oop_disjoint_arraycopy"); 2906 StubRoutines::_oop_arraycopy = generate_conjoint_long_oop_copy(false, true, entry, 2907 &entry_oop_arraycopy, "oop_arraycopy"); 2908 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_long_oop_copy(false, true, &entry, 2909 "oop_disjoint_arraycopy_uninit", 2910 /*dest_uninitialized*/true); 2911 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_long_oop_copy(false, true, entry, 2912 NULL, "oop_arraycopy_uninit", 2913 /*dest_uninitialized*/true); 2914 } 2915 2916 StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy); 2917 StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", NULL, 2918 /*dest_uninitialized*/true); 2919 2920 StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy", 2921 entry_jbyte_arraycopy, 2922 entry_jshort_arraycopy, 2923 entry_jint_arraycopy, 2924 entry_jlong_arraycopy); 2925 StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy", 2926 entry_jbyte_arraycopy, 2927 entry_jshort_arraycopy, 2928 entry_jint_arraycopy, 2929 entry_oop_arraycopy, 2930 entry_jlong_arraycopy, 2931 entry_checkcast_arraycopy); 2932 2933 StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill"); 2934 StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill"); 2935 StubRoutines::_jint_fill = generate_fill(T_INT, false, "jint_fill"); 2936 StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill"); 2937 StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill"); 2938 StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill"); 2939 2940 // We don't generate specialized code for HeapWord-aligned source 2941 // arrays, so just use the code we've already generated 2942 StubRoutines::_arrayof_jbyte_disjoint_arraycopy = StubRoutines::_jbyte_disjoint_arraycopy; 2943 StubRoutines::_arrayof_jbyte_arraycopy = StubRoutines::_jbyte_arraycopy; 2944 2945 StubRoutines::_arrayof_jshort_disjoint_arraycopy = StubRoutines::_jshort_disjoint_arraycopy; 2946 StubRoutines::_arrayof_jshort_arraycopy = StubRoutines::_jshort_arraycopy; 2947 2948 StubRoutines::_arrayof_jint_disjoint_arraycopy = StubRoutines::_jint_disjoint_arraycopy; 2949 StubRoutines::_arrayof_jint_arraycopy = StubRoutines::_jint_arraycopy; 2950 2951 StubRoutines::_arrayof_jlong_disjoint_arraycopy = StubRoutines::_jlong_disjoint_arraycopy; 2952 StubRoutines::_arrayof_jlong_arraycopy = StubRoutines::_jlong_arraycopy; 2953 2954 StubRoutines::_arrayof_oop_disjoint_arraycopy = StubRoutines::_oop_disjoint_arraycopy; 2955 StubRoutines::_arrayof_oop_arraycopy = StubRoutines::_oop_arraycopy; 2956 2957 StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = StubRoutines::_oop_disjoint_arraycopy_uninit; 2958 StubRoutines::_arrayof_oop_arraycopy_uninit = StubRoutines::_oop_arraycopy_uninit; 2959 } 2960 2961 void generate_math_stubs() { 2962 { 2963 StubCodeMark mark(this, "StubRoutines", "log"); 2964 StubRoutines::_intrinsic_log = (double (*)(double)) __ pc(); 2965 2966 __ subq(rsp, 8); 2967 __ movdbl(Address(rsp, 0), xmm0); 2968 __ fld_d(Address(rsp, 0)); 2969 __ flog(); 2970 __ fstp_d(Address(rsp, 0)); 2971 __ movdbl(xmm0, Address(rsp, 0)); 2972 __ addq(rsp, 8); 2973 __ ret(0); 2974 } 2975 { 2976 StubCodeMark mark(this, "StubRoutines", "log10"); 2977 StubRoutines::_intrinsic_log10 = (double (*)(double)) __ pc(); 2978 2979 __ subq(rsp, 8); 2980 __ movdbl(Address(rsp, 0), xmm0); 2981 __ fld_d(Address(rsp, 0)); 2982 __ flog10(); 2983 __ fstp_d(Address(rsp, 0)); 2984 __ movdbl(xmm0, Address(rsp, 0)); 2985 __ addq(rsp, 8); 2986 __ ret(0); 2987 } 2988 { 2989 StubCodeMark mark(this, "StubRoutines", "sin"); 2990 StubRoutines::_intrinsic_sin = (double (*)(double)) __ pc(); 2991 2992 __ subq(rsp, 8); 2993 __ movdbl(Address(rsp, 0), xmm0); 2994 __ fld_d(Address(rsp, 0)); 2995 __ trigfunc('s'); 2996 __ fstp_d(Address(rsp, 0)); 2997 __ movdbl(xmm0, Address(rsp, 0)); 2998 __ addq(rsp, 8); 2999 __ ret(0); 3000 } 3001 { 3002 StubCodeMark mark(this, "StubRoutines", "cos"); 3003 StubRoutines::_intrinsic_cos = (double (*)(double)) __ pc(); 3004 3005 __ subq(rsp, 8); 3006 __ movdbl(Address(rsp, 0), xmm0); 3007 __ fld_d(Address(rsp, 0)); 3008 __ trigfunc('c'); 3009 __ fstp_d(Address(rsp, 0)); 3010 __ movdbl(xmm0, Address(rsp, 0)); 3011 __ addq(rsp, 8); 3012 __ ret(0); 3013 } 3014 { 3015 StubCodeMark mark(this, "StubRoutines", "tan"); 3016 StubRoutines::_intrinsic_tan = (double (*)(double)) __ pc(); 3017 3018 __ subq(rsp, 8); 3019 __ movdbl(Address(rsp, 0), xmm0); 3020 __ fld_d(Address(rsp, 0)); 3021 __ trigfunc('t'); 3022 __ fstp_d(Address(rsp, 0)); 3023 __ movdbl(xmm0, Address(rsp, 0)); 3024 __ addq(rsp, 8); 3025 __ ret(0); 3026 } 3027 { 3028 StubCodeMark mark(this, "StubRoutines", "exp"); 3029 StubRoutines::_intrinsic_exp = (double (*)(double)) __ pc(); 3030 3031 __ subq(rsp, 8); 3032 __ movdbl(Address(rsp, 0), xmm0); 3033 __ fld_d(Address(rsp, 0)); 3034 __ exp_with_fallback(0); 3035 __ fstp_d(Address(rsp, 0)); 3036 __ movdbl(xmm0, Address(rsp, 0)); 3037 __ addq(rsp, 8); 3038 __ ret(0); 3039 } 3040 { 3041 StubCodeMark mark(this, "StubRoutines", "pow"); 3042 StubRoutines::_intrinsic_pow = (double (*)(double,double)) __ pc(); 3043 3044 __ subq(rsp, 8); 3045 __ movdbl(Address(rsp, 0), xmm1); 3046 __ fld_d(Address(rsp, 0)); 3047 __ movdbl(Address(rsp, 0), xmm0); 3048 __ fld_d(Address(rsp, 0)); 3049 __ pow_with_fallback(0); 3050 __ fstp_d(Address(rsp, 0)); 3051 __ movdbl(xmm0, Address(rsp, 0)); 3052 __ addq(rsp, 8); 3053 __ ret(0); 3054 } 3055 } 3056 3057 // AES intrinsic stubs 3058 enum {AESBlockSize = 16}; 3059 3060 address generate_key_shuffle_mask() { 3061 __ align(16); 3062 StubCodeMark mark(this, "StubRoutines", "key_shuffle_mask"); 3063 address start = __ pc(); 3064 __ emit_data64( 0x0405060700010203, relocInfo::none ); 3065 __ emit_data64( 0x0c0d0e0f08090a0b, relocInfo::none ); 3066 return start; 3067 } 3068 3069 // Utility routine for loading a 128-bit key word in little endian format 3070 // can optionally specify that the shuffle mask is already in an xmmregister 3071 void load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask=NULL) { 3072 __ movdqu(xmmdst, Address(key, offset)); 3073 if (xmm_shuf_mask != NULL) { 3074 __ pshufb(xmmdst, xmm_shuf_mask); 3075 } else { 3076 __ pshufb(xmmdst, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3077 } 3078 } 3079 3080 // Arguments: 3081 // 3082 // Inputs: 3083 // c_rarg0 - source byte array address 3084 // c_rarg1 - destination byte array address 3085 // c_rarg2 - K (key) in little endian int array 3086 // 3087 address generate_aescrypt_encryptBlock() { 3088 assert(UseAES, "need AES instructions and misaligned SSE support"); 3089 __ align(CodeEntryAlignment); 3090 StubCodeMark mark(this, "StubRoutines", "aescrypt_encryptBlock"); 3091 Label L_doLast; 3092 address start = __ pc(); 3093 3094 const Register from = c_rarg0; // source array address 3095 const Register to = c_rarg1; // destination array address 3096 const Register key = c_rarg2; // key array address 3097 const Register keylen = rax; 3098 3099 const XMMRegister xmm_result = xmm0; 3100 const XMMRegister xmm_key_shuf_mask = xmm1; 3101 // On win64 xmm6-xmm15 must be preserved so don't use them. 3102 const XMMRegister xmm_temp1 = xmm2; 3103 const XMMRegister xmm_temp2 = xmm3; 3104 const XMMRegister xmm_temp3 = xmm4; 3105 const XMMRegister xmm_temp4 = xmm5; 3106 3107 __ enter(); // required for proper stackwalking of RuntimeStub frame 3108 3109 // keylen could be only {11, 13, 15} * 4 = {44, 52, 60} 3110 __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3111 3112 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3113 __ movdqu(xmm_result, Address(from, 0)); // get 16 bytes of input 3114 3115 // For encryption, the java expanded key ordering is just what we need 3116 // we don't know if the key is aligned, hence not using load-execute form 3117 3118 load_key(xmm_temp1, key, 0x00, xmm_key_shuf_mask); 3119 __ pxor(xmm_result, xmm_temp1); 3120 3121 load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask); 3122 load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask); 3123 load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask); 3124 load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask); 3125 3126 __ aesenc(xmm_result, xmm_temp1); 3127 __ aesenc(xmm_result, xmm_temp2); 3128 __ aesenc(xmm_result, xmm_temp3); 3129 __ aesenc(xmm_result, xmm_temp4); 3130 3131 load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask); 3132 load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask); 3133 load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask); 3134 load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask); 3135 3136 __ aesenc(xmm_result, xmm_temp1); 3137 __ aesenc(xmm_result, xmm_temp2); 3138 __ aesenc(xmm_result, xmm_temp3); 3139 __ aesenc(xmm_result, xmm_temp4); 3140 3141 load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask); 3142 load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask); 3143 3144 __ cmpl(keylen, 44); 3145 __ jccb(Assembler::equal, L_doLast); 3146 3147 __ aesenc(xmm_result, xmm_temp1); 3148 __ aesenc(xmm_result, xmm_temp2); 3149 3150 load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask); 3151 load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask); 3152 3153 __ cmpl(keylen, 52); 3154 __ jccb(Assembler::equal, L_doLast); 3155 3156 __ aesenc(xmm_result, xmm_temp1); 3157 __ aesenc(xmm_result, xmm_temp2); 3158 3159 load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask); 3160 load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask); 3161 3162 __ BIND(L_doLast); 3163 __ aesenc(xmm_result, xmm_temp1); 3164 __ aesenclast(xmm_result, xmm_temp2); 3165 __ movdqu(Address(to, 0), xmm_result); // store the result 3166 __ xorptr(rax, rax); // return 0 3167 __ leave(); // required for proper stackwalking of RuntimeStub frame 3168 __ ret(0); 3169 3170 return start; 3171 } 3172 3173 3174 // Arguments: 3175 // 3176 // Inputs: 3177 // c_rarg0 - source byte array address 3178 // c_rarg1 - destination byte array address 3179 // c_rarg2 - K (key) in little endian int array 3180 // 3181 address generate_aescrypt_decryptBlock() { 3182 assert(UseAES, "need AES instructions and misaligned SSE support"); 3183 __ align(CodeEntryAlignment); 3184 StubCodeMark mark(this, "StubRoutines", "aescrypt_decryptBlock"); 3185 Label L_doLast; 3186 address start = __ pc(); 3187 3188 const Register from = c_rarg0; // source array address 3189 const Register to = c_rarg1; // destination array address 3190 const Register key = c_rarg2; // key array address 3191 const Register keylen = rax; 3192 3193 const XMMRegister xmm_result = xmm0; 3194 const XMMRegister xmm_key_shuf_mask = xmm1; 3195 // On win64 xmm6-xmm15 must be preserved so don't use them. 3196 const XMMRegister xmm_temp1 = xmm2; 3197 const XMMRegister xmm_temp2 = xmm3; 3198 const XMMRegister xmm_temp3 = xmm4; 3199 const XMMRegister xmm_temp4 = xmm5; 3200 3201 __ enter(); // required for proper stackwalking of RuntimeStub frame 3202 3203 // keylen could be only {11, 13, 15} * 4 = {44, 52, 60} 3204 __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3205 3206 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3207 __ movdqu(xmm_result, Address(from, 0)); 3208 3209 // for decryption java expanded key ordering is rotated one position from what we want 3210 // so we start from 0x10 here and hit 0x00 last 3211 // we don't know if the key is aligned, hence not using load-execute form 3212 load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask); 3213 load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask); 3214 load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask); 3215 load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask); 3216 3217 __ pxor (xmm_result, xmm_temp1); 3218 __ aesdec(xmm_result, xmm_temp2); 3219 __ aesdec(xmm_result, xmm_temp3); 3220 __ aesdec(xmm_result, xmm_temp4); 3221 3222 load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask); 3223 load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask); 3224 load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask); 3225 load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask); 3226 3227 __ aesdec(xmm_result, xmm_temp1); 3228 __ aesdec(xmm_result, xmm_temp2); 3229 __ aesdec(xmm_result, xmm_temp3); 3230 __ aesdec(xmm_result, xmm_temp4); 3231 3232 load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask); 3233 load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask); 3234 load_key(xmm_temp3, key, 0x00, xmm_key_shuf_mask); 3235 3236 __ cmpl(keylen, 44); 3237 __ jccb(Assembler::equal, L_doLast); 3238 3239 __ aesdec(xmm_result, xmm_temp1); 3240 __ aesdec(xmm_result, xmm_temp2); 3241 3242 load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask); 3243 load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask); 3244 3245 __ cmpl(keylen, 52); 3246 __ jccb(Assembler::equal, L_doLast); 3247 3248 __ aesdec(xmm_result, xmm_temp1); 3249 __ aesdec(xmm_result, xmm_temp2); 3250 3251 load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask); 3252 load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask); 3253 3254 __ BIND(L_doLast); 3255 __ aesdec(xmm_result, xmm_temp1); 3256 __ aesdec(xmm_result, xmm_temp2); 3257 3258 // for decryption the aesdeclast operation is always on key+0x00 3259 __ aesdeclast(xmm_result, xmm_temp3); 3260 __ movdqu(Address(to, 0), xmm_result); // store the result 3261 __ xorptr(rax, rax); // return 0 3262 __ leave(); // required for proper stackwalking of RuntimeStub frame 3263 __ ret(0); 3264 3265 return start; 3266 } 3267 3268 3269 // Arguments: 3270 // 3271 // Inputs: 3272 // c_rarg0 - source byte array address 3273 // c_rarg1 - destination byte array address 3274 // c_rarg2 - K (key) in little endian int array 3275 // c_rarg3 - r vector byte array address 3276 // c_rarg4 - input length 3277 // 3278 // Output: 3279 // rax - input length 3280 // 3281 address generate_cipherBlockChaining_encryptAESCrypt() { 3282 assert(UseAES, "need AES instructions and misaligned SSE support"); 3283 __ align(CodeEntryAlignment); 3284 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_encryptAESCrypt"); 3285 address start = __ pc(); 3286 3287 Label L_exit, L_key_192_256, L_key_256, L_loopTop_128, L_loopTop_192, L_loopTop_256; 3288 const Register from = c_rarg0; // source array address 3289 const Register to = c_rarg1; // destination array address 3290 const Register key = c_rarg2; // key array address 3291 const Register rvec = c_rarg3; // r byte array initialized from initvector array address 3292 // and left with the results of the last encryption block 3293 #ifndef _WIN64 3294 const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16) 3295 #else 3296 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 3297 const Register len_reg = r10; // pick the first volatile windows register 3298 #endif 3299 const Register pos = rax; 3300 3301 // xmm register assignments for the loops below 3302 const XMMRegister xmm_result = xmm0; 3303 const XMMRegister xmm_temp = xmm1; 3304 // keys 0-10 preloaded into xmm2-xmm12 3305 const int XMM_REG_NUM_KEY_FIRST = 2; 3306 const int XMM_REG_NUM_KEY_LAST = 15; 3307 const XMMRegister xmm_key0 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST); 3308 const XMMRegister xmm_key10 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+10); 3309 const XMMRegister xmm_key11 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+11); 3310 const XMMRegister xmm_key12 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+12); 3311 const XMMRegister xmm_key13 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+13); 3312 3313 __ enter(); // required for proper stackwalking of RuntimeStub frame 3314 3315 #ifdef _WIN64 3316 // on win64, fill len_reg from stack position 3317 __ movl(len_reg, len_mem); 3318 // save the xmm registers which must be preserved 6-15 3319 __ subptr(rsp, -rsp_after_call_off * wordSize); 3320 for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) { 3321 __ movdqu(xmm_save(i), as_XMMRegister(i)); 3322 } 3323 #else 3324 __ push(len_reg); // Save 3325 #endif 3326 3327 const XMMRegister xmm_key_shuf_mask = xmm_temp; // used temporarily to swap key bytes up front 3328 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3329 // load up xmm regs xmm2 thru xmm12 with key 0x00 - 0xa0 3330 for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x00; rnum <= XMM_REG_NUM_KEY_FIRST+10; rnum++) { 3331 load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask); 3332 offset += 0x10; 3333 } 3334 __ movdqu(xmm_result, Address(rvec, 0x00)); // initialize xmm_result with r vec 3335 3336 // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256)) 3337 __ movl(rax, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3338 __ cmpl(rax, 44); 3339 __ jcc(Assembler::notEqual, L_key_192_256); 3340 3341 // 128 bit code follows here 3342 __ movptr(pos, 0); 3343 __ align(OptoLoopAlignment); 3344 3345 __ BIND(L_loopTop_128); 3346 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3347 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3348 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3349 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 9; rnum++) { 3350 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3351 } 3352 __ aesenclast(xmm_result, xmm_key10); 3353 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3354 // no need to store r to memory until we exit 3355 __ addptr(pos, AESBlockSize); 3356 __ subptr(len_reg, AESBlockSize); 3357 __ jcc(Assembler::notEqual, L_loopTop_128); 3358 3359 __ BIND(L_exit); 3360 __ movdqu(Address(rvec, 0), xmm_result); // final value of r stored in rvec of CipherBlockChaining object 3361 3362 #ifdef _WIN64 3363 // restore xmm regs belonging to calling function 3364 for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) { 3365 __ movdqu(as_XMMRegister(i), xmm_save(i)); 3366 } 3367 __ movl(rax, len_mem); 3368 #else 3369 __ pop(rax); // return length 3370 #endif 3371 __ leave(); // required for proper stackwalking of RuntimeStub frame 3372 __ ret(0); 3373 3374 __ BIND(L_key_192_256); 3375 // here rax = len in ints of AESCrypt.KLE array (52=192, or 60=256) 3376 load_key(xmm_key11, key, 0xb0, xmm_key_shuf_mask); 3377 load_key(xmm_key12, key, 0xc0, xmm_key_shuf_mask); 3378 __ cmpl(rax, 52); 3379 __ jcc(Assembler::notEqual, L_key_256); 3380 3381 // 192-bit code follows here (could be changed to use more xmm registers) 3382 __ movptr(pos, 0); 3383 __ align(OptoLoopAlignment); 3384 3385 __ BIND(L_loopTop_192); 3386 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3387 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3388 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3389 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 11; rnum++) { 3390 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3391 } 3392 __ aesenclast(xmm_result, xmm_key12); 3393 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3394 // no need to store r to memory until we exit 3395 __ addptr(pos, AESBlockSize); 3396 __ subptr(len_reg, AESBlockSize); 3397 __ jcc(Assembler::notEqual, L_loopTop_192); 3398 __ jmp(L_exit); 3399 3400 __ BIND(L_key_256); 3401 // 256-bit code follows here (could be changed to use more xmm registers) 3402 load_key(xmm_key13, key, 0xd0, xmm_key_shuf_mask); 3403 __ movptr(pos, 0); 3404 __ align(OptoLoopAlignment); 3405 3406 __ BIND(L_loopTop_256); 3407 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3408 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3409 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3410 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 13; rnum++) { 3411 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3412 } 3413 load_key(xmm_temp, key, 0xe0); 3414 __ aesenclast(xmm_result, xmm_temp); 3415 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3416 // no need to store r to memory until we exit 3417 __ addptr(pos, AESBlockSize); 3418 __ subptr(len_reg, AESBlockSize); 3419 __ jcc(Assembler::notEqual, L_loopTop_256); 3420 __ jmp(L_exit); 3421 3422 return start; 3423 } 3424 3425 // Safefetch stubs. 3426 void generate_safefetch(const char* name, int size, address* entry, 3427 address* fault_pc, address* continuation_pc) { 3428 // safefetch signatures: 3429 // int SafeFetch32(int* adr, int errValue); 3430 // intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue); 3431 // 3432 // arguments: 3433 // c_rarg0 = adr 3434 // c_rarg1 = errValue 3435 // 3436 // result: 3437 // PPC_RET = *adr or errValue 3438 3439 StubCodeMark mark(this, "StubRoutines", name); 3440 3441 // Entry point, pc or function descriptor. 3442 *entry = __ pc(); 3443 3444 // Load *adr into c_rarg1, may fault. 3445 *fault_pc = __ pc(); 3446 switch (size) { 3447 case 4: 3448 // int32_t 3449 __ movl(c_rarg1, Address(c_rarg0, 0)); 3450 break; 3451 case 8: 3452 // int64_t 3453 __ movq(c_rarg1, Address(c_rarg0, 0)); 3454 break; 3455 default: 3456 ShouldNotReachHere(); 3457 } 3458 3459 // return errValue or *adr 3460 *continuation_pc = __ pc(); 3461 __ movq(rax, c_rarg1); 3462 __ ret(0); 3463 } 3464 3465 // This is a version of CBC/AES Decrypt which does 4 blocks in a loop at a time 3466 // to hide instruction latency 3467 // 3468 // Arguments: 3469 // 3470 // Inputs: 3471 // c_rarg0 - source byte array address 3472 // c_rarg1 - destination byte array address 3473 // c_rarg2 - K (key) in little endian int array 3474 // c_rarg3 - r vector byte array address 3475 // c_rarg4 - input length 3476 // 3477 // Output: 3478 // rax - input length 3479 // 3480 3481 address generate_cipherBlockChaining_decryptAESCrypt_Parallel() { 3482 assert(UseAES, "need AES instructions and misaligned SSE support"); 3483 __ align(CodeEntryAlignment); 3484 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt"); 3485 address start = __ pc(); 3486 3487 Label L_exit, L_key_192_256, L_key_256; 3488 Label L_singleBlock_loopTop_128, L_multiBlock_loopTop_128; 3489 Label L_singleBlock_loopTop_192, L_singleBlock_loopTop_256; 3490 const Register from = c_rarg0; // source array address 3491 const Register to = c_rarg1; // destination array address 3492 const Register key = c_rarg2; // key array address 3493 const Register rvec = c_rarg3; // r byte array initialized from initvector array address 3494 // and left with the results of the last encryption block 3495 #ifndef _WIN64 3496 const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16) 3497 #else 3498 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 3499 const Register len_reg = r10; // pick the first volatile windows register 3500 #endif 3501 const Register pos = rax; 3502 3503 // keys 0-10 preloaded into xmm2-xmm12 3504 const int XMM_REG_NUM_KEY_FIRST = 5; 3505 const int XMM_REG_NUM_KEY_LAST = 15; 3506 const XMMRegister xmm_key_first = as_XMMRegister(XMM_REG_NUM_KEY_FIRST); 3507 const XMMRegister xmm_key_last = as_XMMRegister(XMM_REG_NUM_KEY_LAST); 3508 3509 __ enter(); // required for proper stackwalking of RuntimeStub frame 3510 3511 #ifdef _WIN64 3512 // on win64, fill len_reg from stack position 3513 __ movl(len_reg, len_mem); 3514 // save the xmm registers which must be preserved 6-15 3515 __ subptr(rsp, -rsp_after_call_off * wordSize); 3516 for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) { 3517 __ movdqu(xmm_save(i), as_XMMRegister(i)); 3518 } 3519 #else 3520 __ push(len_reg); // Save 3521 #endif 3522 3523 // the java expanded key ordering is rotated one position from what we want 3524 // so we start from 0x10 here and hit 0x00 last 3525 const XMMRegister xmm_key_shuf_mask = xmm1; // used temporarily to swap key bytes up front 3526 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3527 // load up xmm regs 5 thru 15 with key 0x10 - 0xa0 - 0x00 3528 for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x10; rnum < XMM_REG_NUM_KEY_LAST; rnum++) { 3529 load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask); 3530 offset += 0x10; 3531 } 3532 load_key(xmm_key_last, key, 0x00, xmm_key_shuf_mask); 3533 3534 const XMMRegister xmm_prev_block_cipher = xmm1; // holds cipher of previous block 3535 3536 // registers holding the four results in the parallelized loop 3537 const XMMRegister xmm_result0 = xmm0; 3538 const XMMRegister xmm_result1 = xmm2; 3539 const XMMRegister xmm_result2 = xmm3; 3540 const XMMRegister xmm_result3 = xmm4; 3541 3542 __ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // initialize with initial rvec 3543 3544 // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256)) 3545 __ movl(rax, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3546 __ cmpl(rax, 44); 3547 __ jcc(Assembler::notEqual, L_key_192_256); 3548 3549 3550 // 128-bit code follows here, parallelized 3551 __ movptr(pos, 0); 3552 __ align(OptoLoopAlignment); 3553 __ BIND(L_multiBlock_loopTop_128); 3554 __ cmpptr(len_reg, 4*AESBlockSize); // see if at least 4 blocks left 3555 __ jcc(Assembler::less, L_singleBlock_loopTop_128); 3556 3557 __ movdqu(xmm_result0, Address(from, pos, Address::times_1, 0*AESBlockSize)); // get next 4 blocks into xmmresult registers 3558 __ movdqu(xmm_result1, Address(from, pos, Address::times_1, 1*AESBlockSize)); 3559 __ movdqu(xmm_result2, Address(from, pos, Address::times_1, 2*AESBlockSize)); 3560 __ movdqu(xmm_result3, Address(from, pos, Address::times_1, 3*AESBlockSize)); 3561 3562 #define DoFour(opc, src_reg) \ 3563 __ opc(xmm_result0, src_reg); \ 3564 __ opc(xmm_result1, src_reg); \ 3565 __ opc(xmm_result2, src_reg); \ 3566 __ opc(xmm_result3, src_reg); 3567 3568 DoFour(pxor, xmm_key_first); 3569 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST - 1; rnum++) { 3570 DoFour(aesdec, as_XMMRegister(rnum)); 3571 } 3572 DoFour(aesdeclast, xmm_key_last); 3573 // for each result, xor with the r vector of previous cipher block 3574 __ pxor(xmm_result0, xmm_prev_block_cipher); 3575 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 0*AESBlockSize)); 3576 __ pxor(xmm_result1, xmm_prev_block_cipher); 3577 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 1*AESBlockSize)); 3578 __ pxor(xmm_result2, xmm_prev_block_cipher); 3579 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 2*AESBlockSize)); 3580 __ pxor(xmm_result3, xmm_prev_block_cipher); 3581 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 3*AESBlockSize)); // this will carry over to next set of blocks 3582 3583 __ movdqu(Address(to, pos, Address::times_1, 0*AESBlockSize), xmm_result0); // store 4 results into the next 64 bytes of output 3584 __ movdqu(Address(to, pos, Address::times_1, 1*AESBlockSize), xmm_result1); 3585 __ movdqu(Address(to, pos, Address::times_1, 2*AESBlockSize), xmm_result2); 3586 __ movdqu(Address(to, pos, Address::times_1, 3*AESBlockSize), xmm_result3); 3587 3588 __ addptr(pos, 4*AESBlockSize); 3589 __ subptr(len_reg, 4*AESBlockSize); 3590 __ jmp(L_multiBlock_loopTop_128); 3591 3592 // registers used in the non-parallelized loops 3593 // xmm register assignments for the loops below 3594 const XMMRegister xmm_result = xmm0; 3595 const XMMRegister xmm_prev_block_cipher_save = xmm2; 3596 const XMMRegister xmm_key11 = xmm3; 3597 const XMMRegister xmm_key12 = xmm4; 3598 const XMMRegister xmm_temp = xmm4; 3599 3600 __ align(OptoLoopAlignment); 3601 __ BIND(L_singleBlock_loopTop_128); 3602 __ cmpptr(len_reg, 0); // any blocks left?? 3603 __ jcc(Assembler::equal, L_exit); 3604 __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of cipher input 3605 __ movdqa(xmm_prev_block_cipher_save, xmm_result); // save for next r vector 3606 __ pxor (xmm_result, xmm_key_first); // do the aes dec rounds 3607 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST - 1; rnum++) { 3608 __ aesdec(xmm_result, as_XMMRegister(rnum)); 3609 } 3610 __ aesdeclast(xmm_result, xmm_key_last); 3611 __ pxor (xmm_result, xmm_prev_block_cipher); // xor with the current r vector 3612 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3613 // no need to store r to memory until we exit 3614 __ movdqa(xmm_prev_block_cipher, xmm_prev_block_cipher_save); // set up next r vector with cipher input from this block 3615 3616 __ addptr(pos, AESBlockSize); 3617 __ subptr(len_reg, AESBlockSize); 3618 __ jmp(L_singleBlock_loopTop_128); 3619 3620 3621 __ BIND(L_exit); 3622 __ movdqu(Address(rvec, 0), xmm_prev_block_cipher); // final value of r stored in rvec of CipherBlockChaining object 3623 #ifdef _WIN64 3624 // restore regs belonging to calling function 3625 for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) { 3626 __ movdqu(as_XMMRegister(i), xmm_save(i)); 3627 } 3628 __ movl(rax, len_mem); 3629 #else 3630 __ pop(rax); // return length 3631 #endif 3632 __ leave(); // required for proper stackwalking of RuntimeStub frame 3633 __ ret(0); 3634 3635 3636 __ BIND(L_key_192_256); 3637 // here rax = len in ints of AESCrypt.KLE array (52=192, or 60=256) 3638 load_key(xmm_key11, key, 0xb0); 3639 __ cmpl(rax, 52); 3640 __ jcc(Assembler::notEqual, L_key_256); 3641 3642 // 192-bit code follows here (could be optimized to use parallelism) 3643 load_key(xmm_key12, key, 0xc0); // 192-bit key goes up to c0 3644 __ movptr(pos, 0); 3645 __ align(OptoLoopAlignment); 3646 3647 __ BIND(L_singleBlock_loopTop_192); 3648 __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of cipher input 3649 __ movdqa(xmm_prev_block_cipher_save, xmm_result); // save for next r vector 3650 __ pxor (xmm_result, xmm_key_first); // do the aes dec rounds 3651 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST - 1; rnum++) { 3652 __ aesdec(xmm_result, as_XMMRegister(rnum)); 3653 } 3654 __ aesdec(xmm_result, xmm_key11); 3655 __ aesdec(xmm_result, xmm_key12); 3656 __ aesdeclast(xmm_result, xmm_key_last); // xmm15 always came from key+0 3657 __ pxor (xmm_result, xmm_prev_block_cipher); // xor with the current r vector 3658 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3659 // no need to store r to memory until we exit 3660 __ movdqa(xmm_prev_block_cipher, xmm_prev_block_cipher_save); // set up next r vector with cipher input from this block 3661 __ addptr(pos, AESBlockSize); 3662 __ subptr(len_reg, AESBlockSize); 3663 __ jcc(Assembler::notEqual,L_singleBlock_loopTop_192); 3664 __ jmp(L_exit); 3665 3666 __ BIND(L_key_256); 3667 // 256-bit code follows here (could be optimized to use parallelism) 3668 __ movptr(pos, 0); 3669 __ align(OptoLoopAlignment); 3670 3671 __ BIND(L_singleBlock_loopTop_256); 3672 __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of cipher input 3673 __ movdqa(xmm_prev_block_cipher_save, xmm_result); // save for next r vector 3674 __ pxor (xmm_result, xmm_key_first); // do the aes dec rounds 3675 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST - 1; rnum++) { 3676 __ aesdec(xmm_result, as_XMMRegister(rnum)); 3677 } 3678 __ aesdec(xmm_result, xmm_key11); 3679 load_key(xmm_temp, key, 0xc0); 3680 __ aesdec(xmm_result, xmm_temp); 3681 load_key(xmm_temp, key, 0xd0); 3682 __ aesdec(xmm_result, xmm_temp); 3683 load_key(xmm_temp, key, 0xe0); // 256-bit key goes up to e0 3684 __ aesdec(xmm_result, xmm_temp); 3685 __ aesdeclast(xmm_result, xmm_key_last); // xmm15 came from key+0 3686 __ pxor (xmm_result, xmm_prev_block_cipher); // xor with the current r vector 3687 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3688 // no need to store r to memory until we exit 3689 __ movdqa(xmm_prev_block_cipher, xmm_prev_block_cipher_save); // set up next r vector with cipher input from this block 3690 __ addptr(pos, AESBlockSize); 3691 __ subptr(len_reg, AESBlockSize); 3692 __ jcc(Assembler::notEqual,L_singleBlock_loopTop_256); 3693 __ jmp(L_exit); 3694 3695 return start; 3696 } 3697 3698 3699 // byte swap x86 long 3700 address generate_ghash_long_swap_mask() { 3701 __ align(CodeEntryAlignment); 3702 StubCodeMark mark(this, "StubRoutines", "ghash_long_swap_mask"); 3703 address start = __ pc(); 3704 __ emit_data64(0x0f0e0d0c0b0a0908, relocInfo::none ); 3705 __ emit_data64(0x0706050403020100, relocInfo::none ); 3706 return start; 3707 } 3708 3709 // byte swap x86 byte array 3710 address generate_ghash_byte_swap_mask() { 3711 __ align(CodeEntryAlignment); 3712 StubCodeMark mark(this, "StubRoutines", "ghash_byte_swap_mask"); 3713 address start = __ pc(); 3714 __ emit_data64(0x08090a0b0c0d0e0f, relocInfo::none ); 3715 __ emit_data64(0x0001020304050607, relocInfo::none ); 3716 return start; 3717 } 3718 3719 /* Single and multi-block ghash operations */ 3720 address generate_ghash_processBlocks() { 3721 __ align(CodeEntryAlignment); 3722 Label L_ghash_loop, L_exit; 3723 StubCodeMark mark(this, "StubRoutines", "ghash_processBlocks"); 3724 address start = __ pc(); 3725 3726 const Register state = c_rarg0; 3727 const Register subkeyH = c_rarg1; 3728 const Register data = c_rarg2; 3729 const Register blocks = c_rarg3; 3730 3731 #ifdef _WIN64 3732 const int XMM_REG_LAST = 10; 3733 #endif 3734 3735 const XMMRegister xmm_temp0 = xmm0; 3736 const XMMRegister xmm_temp1 = xmm1; 3737 const XMMRegister xmm_temp2 = xmm2; 3738 const XMMRegister xmm_temp3 = xmm3; 3739 const XMMRegister xmm_temp4 = xmm4; 3740 const XMMRegister xmm_temp5 = xmm5; 3741 const XMMRegister xmm_temp6 = xmm6; 3742 const XMMRegister xmm_temp7 = xmm7; 3743 const XMMRegister xmm_temp8 = xmm8; 3744 const XMMRegister xmm_temp9 = xmm9; 3745 const XMMRegister xmm_temp10 = xmm10; 3746 3747 __ enter(); 3748 3749 #ifdef _WIN64 3750 // save the xmm registers which must be preserved 6-10 3751 __ subptr(rsp, -rsp_after_call_off * wordSize); 3752 for (int i = 6; i <= XMM_REG_LAST; i++) { 3753 __ movdqu(xmm_save(i), as_XMMRegister(i)); 3754 } 3755 #endif 3756 3757 __ movdqu(xmm_temp10, ExternalAddress(StubRoutines::x86::ghash_long_swap_mask_addr())); 3758 3759 __ movdqu(xmm_temp0, Address(state, 0)); 3760 __ pshufb(xmm_temp0, xmm_temp10); 3761 3762 3763 __ BIND(L_ghash_loop); 3764 __ movdqu(xmm_temp2, Address(data, 0)); 3765 __ pshufb(xmm_temp2, ExternalAddress(StubRoutines::x86::ghash_byte_swap_mask_addr())); 3766 3767 __ movdqu(xmm_temp1, Address(subkeyH, 0)); 3768 __ pshufb(xmm_temp1, xmm_temp10); 3769 3770 __ pxor(xmm_temp0, xmm_temp2); 3771 3772 // 3773 // Multiply with the hash key 3774 // 3775 __ movdqu(xmm_temp3, xmm_temp0); 3776 __ pclmulqdq(xmm_temp3, xmm_temp1, 0); // xmm3 holds a0*b0 3777 __ movdqu(xmm_temp4, xmm_temp0); 3778 __ pclmulqdq(xmm_temp4, xmm_temp1, 16); // xmm4 holds a0*b1 3779 3780 __ movdqu(xmm_temp5, xmm_temp0); 3781 __ pclmulqdq(xmm_temp5, xmm_temp1, 1); // xmm5 holds a1*b0 3782 __ movdqu(xmm_temp6, xmm_temp0); 3783 __ pclmulqdq(xmm_temp6, xmm_temp1, 17); // xmm6 holds a1*b1 3784 3785 __ pxor(xmm_temp4, xmm_temp5); // xmm4 holds a0*b1 + a1*b0 3786 3787 __ movdqu(xmm_temp5, xmm_temp4); // move the contents of xmm4 to xmm5 3788 __ psrldq(xmm_temp4, 8); // shift by xmm4 64 bits to the right 3789 __ pslldq(xmm_temp5, 8); // shift by xmm5 64 bits to the left 3790 __ pxor(xmm_temp3, xmm_temp5); 3791 __ pxor(xmm_temp6, xmm_temp4); // Register pair <xmm6:xmm3> holds the result 3792 // of the carry-less multiplication of 3793 // xmm0 by xmm1. 3794 3795 // We shift the result of the multiplication by one bit position 3796 // to the left to cope for the fact that the bits are reversed. 3797 __ movdqu(xmm_temp7, xmm_temp3); 3798 __ movdqu(xmm_temp8, xmm_temp6); 3799 __ pslld(xmm_temp3, 1); 3800 __ pslld(xmm_temp6, 1); 3801 __ psrld(xmm_temp7, 31); 3802 __ psrld(xmm_temp8, 31); 3803 __ movdqu(xmm_temp9, xmm_temp7); 3804 __ pslldq(xmm_temp8, 4); 3805 __ pslldq(xmm_temp7, 4); 3806 __ psrldq(xmm_temp9, 12); 3807 __ por(xmm_temp3, xmm_temp7); 3808 __ por(xmm_temp6, xmm_temp8); 3809 __ por(xmm_temp6, xmm_temp9); 3810 3811 // 3812 // First phase of the reduction 3813 // 3814 // Move xmm3 into xmm7, xmm8, xmm9 in order to perform the shifts 3815 // independently. 3816 __ movdqu(xmm_temp7, xmm_temp3); 3817 __ movdqu(xmm_temp8, xmm_temp3); 3818 __ movdqu(xmm_temp9, xmm_temp3); 3819 __ pslld(xmm_temp7, 31); // packed right shift shifting << 31 3820 __ pslld(xmm_temp8, 30); // packed right shift shifting << 30 3821 __ pslld(xmm_temp9, 25); // packed right shift shifting << 25 3822 __ pxor(xmm_temp7, xmm_temp8); // xor the shifted versions 3823 __ pxor(xmm_temp7, xmm_temp9); 3824 __ movdqu(xmm_temp8, xmm_temp7); 3825 __ pslldq(xmm_temp7, 12); 3826 __ psrldq(xmm_temp8, 4); 3827 __ pxor(xmm_temp3, xmm_temp7); // first phase of the reduction complete 3828 3829 // 3830 // Second phase of the reduction 3831 // 3832 // Make 3 copies of xmm3 in xmm2, xmm4, xmm5 for doing these 3833 // shift operations. 3834 __ movdqu(xmm_temp2, xmm_temp3); 3835 __ movdqu(xmm_temp4, xmm_temp3); 3836 __ movdqu(xmm_temp5, xmm_temp3); 3837 __ psrld(xmm_temp2, 1); // packed left shifting >> 1 3838 __ psrld(xmm_temp4, 2); // packed left shifting >> 2 3839 __ psrld(xmm_temp5, 7); // packed left shifting >> 7 3840 __ pxor(xmm_temp2, xmm_temp4); // xor the shifted versions 3841 __ pxor(xmm_temp2, xmm_temp5); 3842 __ pxor(xmm_temp2, xmm_temp8); 3843 __ pxor(xmm_temp3, xmm_temp2); 3844 __ pxor(xmm_temp6, xmm_temp3); // the result is in xmm6 3845 3846 __ decrement(blocks); 3847 __ jcc(Assembler::zero, L_exit); 3848 __ movdqu(xmm_temp0, xmm_temp6); 3849 __ addptr(data, 16); 3850 __ jmp(L_ghash_loop); 3851 3852 __ BIND(L_exit); 3853 __ pshufb(xmm_temp6, xmm_temp10); // Byte swap 16-byte result 3854 __ movdqu(Address(state, 0), xmm_temp6); // store the result 3855 3856 #ifdef _WIN64 3857 // restore xmm regs belonging to calling function 3858 for (int i = 6; i <= XMM_REG_LAST; i++) { 3859 __ movdqu(as_XMMRegister(i), xmm_save(i)); 3860 } 3861 #endif 3862 __ leave(); 3863 __ ret(0); 3864 return start; 3865 } 3866 3867 /** 3868 * Arguments: 3869 * 3870 * Inputs: 3871 * c_rarg0 - int crc 3872 * c_rarg1 - byte* buf 3873 * c_rarg2 - int length 3874 * 3875 * Ouput: 3876 * rax - int crc result 3877 */ 3878 address generate_updateBytesCRC32() { 3879 assert(UseCRC32Intrinsics, "need AVX and CLMUL instructions"); 3880 3881 __ align(CodeEntryAlignment); 3882 StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32"); 3883 3884 address start = __ pc(); 3885 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 3886 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 3887 // rscratch1: r10 3888 const Register crc = c_rarg0; // crc 3889 const Register buf = c_rarg1; // source java byte array address 3890 const Register len = c_rarg2; // length 3891 const Register table = c_rarg3; // crc_table address (reuse register) 3892 const Register tmp = r11; 3893 assert_different_registers(crc, buf, len, table, tmp, rax); 3894 3895 BLOCK_COMMENT("Entry:"); 3896 __ enter(); // required for proper stackwalking of RuntimeStub frame 3897 3898 __ kernel_crc32(crc, buf, len, table, tmp); 3899 3900 __ movl(rax, crc); 3901 __ leave(); // required for proper stackwalking of RuntimeStub frame 3902 __ ret(0); 3903 3904 return start; 3905 } 3906 3907 3908 /** 3909 * Arguments: 3910 * 3911 * Input: 3912 * c_rarg0 - x address 3913 * c_rarg1 - x length 3914 * c_rarg2 - y address 3915 * c_rarg3 - y lenth 3916 * not Win64 3917 * c_rarg4 - z address 3918 * c_rarg5 - z length 3919 * Win64 3920 * rsp+40 - z address 3921 * rsp+48 - z length 3922 */ 3923 address generate_multiplyToLen() { 3924 __ align(CodeEntryAlignment); 3925 StubCodeMark mark(this, "StubRoutines", "multiplyToLen"); 3926 3927 address start = __ pc(); 3928 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 3929 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 3930 const Register x = rdi; 3931 const Register xlen = rax; 3932 const Register y = rsi; 3933 const Register ylen = rcx; 3934 const Register z = r8; 3935 const Register zlen = r11; 3936 3937 // Next registers will be saved on stack in multiply_to_len(). 3938 const Register tmp1 = r12; 3939 const Register tmp2 = r13; 3940 const Register tmp3 = r14; 3941 const Register tmp4 = r15; 3942 const Register tmp5 = rbx; 3943 3944 BLOCK_COMMENT("Entry:"); 3945 __ enter(); // required for proper stackwalking of RuntimeStub frame 3946 3947 #ifndef _WIN64 3948 __ movptr(zlen, r9); // Save r9 in r11 - zlen 3949 #endif 3950 setup_arg_regs(4); // x => rdi, xlen => rsi, y => rdx 3951 // ylen => rcx, z => r8, zlen => r11 3952 // r9 and r10 may be used to save non-volatile registers 3953 #ifdef _WIN64 3954 // last 2 arguments (#4, #5) are on stack on Win64 3955 __ movptr(z, Address(rsp, 6 * wordSize)); 3956 __ movptr(zlen, Address(rsp, 7 * wordSize)); 3957 #endif 3958 3959 __ movptr(xlen, rsi); 3960 __ movptr(y, rdx); 3961 __ multiply_to_len(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5); 3962 3963 restore_arg_regs(); 3964 3965 __ leave(); // required for proper stackwalking of RuntimeStub frame 3966 __ ret(0); 3967 3968 return start; 3969 } 3970 3971 /** 3972 * Arguments: 3973 * 3974 // Input: 3975 // c_rarg0 - x address 3976 // c_rarg1 - x length 3977 // c_rarg2 - z address 3978 // c_rarg3 - z lenth 3979 * 3980 */ 3981 address generate_squareToLen() { 3982 3983 __ align(CodeEntryAlignment); 3984 StubCodeMark mark(this, "StubRoutines", "squareToLen"); 3985 3986 address start = __ pc(); 3987 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 3988 // Unix: rdi, rsi, rdx, rcx (c_rarg0, c_rarg1, ...) 3989 const Register x = rdi; 3990 const Register len = rsi; 3991 const Register z = r8; 3992 const Register zlen = rcx; 3993 3994 const Register tmp1 = r12; 3995 const Register tmp2 = r13; 3996 const Register tmp3 = r14; 3997 const Register tmp4 = r15; 3998 const Register tmp5 = rbx; 3999 4000 BLOCK_COMMENT("Entry:"); 4001 __ enter(); // required for proper stackwalking of RuntimeStub frame 4002 4003 setup_arg_regs(4); // x => rdi, len => rsi, z => rdx 4004 // zlen => rcx 4005 // r9 and r10 may be used to save non-volatile registers 4006 __ movptr(r8, rdx); 4007 __ square_to_len(x, len, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5, rdx, rax); 4008 4009 restore_arg_regs(); 4010 4011 __ leave(); // required for proper stackwalking of RuntimeStub frame 4012 __ ret(0); 4013 4014 return start; 4015 } 4016 4017 /** 4018 * Arguments: 4019 * 4020 * Input: 4021 * c_rarg0 - out address 4022 * c_rarg1 - in address 4023 * c_rarg2 - offset 4024 * c_rarg3 - len 4025 * not Win64 4026 * c_rarg4 - k 4027 * Win64 4028 * rsp+40 - k 4029 */ 4030 address generate_mulAdd() { 4031 __ align(CodeEntryAlignment); 4032 StubCodeMark mark(this, "StubRoutines", "mulAdd"); 4033 4034 address start = __ pc(); 4035 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 4036 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 4037 const Register out = rdi; 4038 const Register in = rsi; 4039 const Register offset = r11; 4040 const Register len = rcx; 4041 const Register k = r8; 4042 4043 // Next registers will be saved on stack in mul_add(). 4044 const Register tmp1 = r12; 4045 const Register tmp2 = r13; 4046 const Register tmp3 = r14; 4047 const Register tmp4 = r15; 4048 const Register tmp5 = rbx; 4049 4050 BLOCK_COMMENT("Entry:"); 4051 __ enter(); // required for proper stackwalking of RuntimeStub frame 4052 4053 setup_arg_regs(4); // out => rdi, in => rsi, offset => rdx 4054 // len => rcx, k => r8 4055 // r9 and r10 may be used to save non-volatile registers 4056 #ifdef _WIN64 4057 // last argument is on stack on Win64 4058 __ movl(k, Address(rsp, 6 * wordSize)); 4059 #endif 4060 __ movptr(r11, rdx); // move offset in rdx to offset(r11) 4061 __ mul_add(out, in, offset, len, k, tmp1, tmp2, tmp3, tmp4, tmp5, rdx, rax); 4062 4063 restore_arg_regs(); 4064 4065 __ leave(); // required for proper stackwalking of RuntimeStub frame 4066 __ ret(0); 4067 4068 return start; 4069 } 4070 4071 4072 #undef __ 4073 #define __ masm-> 4074 4075 // Continuation point for throwing of implicit exceptions that are 4076 // not handled in the current activation. Fabricates an exception 4077 // oop and initiates normal exception dispatching in this 4078 // frame. Since we need to preserve callee-saved values (currently 4079 // only for C2, but done for C1 as well) we need a callee-saved oop 4080 // map and therefore have to make these stubs into RuntimeStubs 4081 // rather than BufferBlobs. If the compiler needs all registers to 4082 // be preserved between the fault point and the exception handler 4083 // then it must assume responsibility for that in 4084 // AbstractCompiler::continuation_for_implicit_null_exception or 4085 // continuation_for_implicit_division_by_zero_exception. All other 4086 // implicit exceptions (e.g., NullPointerException or 4087 // AbstractMethodError on entry) are either at call sites or 4088 // otherwise assume that stack unwinding will be initiated, so 4089 // caller saved registers were assumed volatile in the compiler. 4090 address generate_throw_exception(const char* name, 4091 address runtime_entry, 4092 Register arg1 = noreg, 4093 Register arg2 = noreg) { 4094 // Information about frame layout at time of blocking runtime call. 4095 // Note that we only have to preserve callee-saved registers since 4096 // the compilers are responsible for supplying a continuation point 4097 // if they expect all registers to be preserved. 4098 enum layout { 4099 rbp_off = frame::arg_reg_save_area_bytes/BytesPerInt, 4100 rbp_off2, 4101 return_off, 4102 return_off2, 4103 framesize // inclusive of return address 4104 }; 4105 4106 int insts_size = 512; 4107 int locs_size = 64; 4108 4109 CodeBuffer code(name, insts_size, locs_size); 4110 OopMapSet* oop_maps = new OopMapSet(); 4111 MacroAssembler* masm = new MacroAssembler(&code); 4112 4113 address start = __ pc(); 4114 4115 // This is an inlined and slightly modified version of call_VM 4116 // which has the ability to fetch the return PC out of 4117 // thread-local storage and also sets up last_Java_sp slightly 4118 // differently than the real call_VM 4119 4120 __ enter(); // required for proper stackwalking of RuntimeStub frame 4121 4122 assert(is_even(framesize/2), "sp not 16-byte aligned"); 4123 4124 // return address and rbp are already in place 4125 __ subptr(rsp, (framesize-4) << LogBytesPerInt); // prolog 4126 4127 int frame_complete = __ pc() - start; 4128 4129 // Set up last_Java_sp and last_Java_fp 4130 address the_pc = __ pc(); 4131 __ set_last_Java_frame(rsp, rbp, the_pc); 4132 __ andptr(rsp, -(StackAlignmentInBytes)); // Align stack 4133 4134 // Call runtime 4135 if (arg1 != noreg) { 4136 assert(arg2 != c_rarg1, "clobbered"); 4137 __ movptr(c_rarg1, arg1); 4138 } 4139 if (arg2 != noreg) { 4140 __ movptr(c_rarg2, arg2); 4141 } 4142 __ movptr(c_rarg0, r15_thread); 4143 BLOCK_COMMENT("call runtime_entry"); 4144 __ call(RuntimeAddress(runtime_entry)); 4145 4146 // Generate oop map 4147 OopMap* map = new OopMap(framesize, 0); 4148 4149 oop_maps->add_gc_map(the_pc - start, map); 4150 4151 __ reset_last_Java_frame(true, true); 4152 4153 __ leave(); // required for proper stackwalking of RuntimeStub frame 4154 4155 // check for pending exceptions 4156 #ifdef ASSERT 4157 Label L; 4158 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), 4159 (int32_t) NULL_WORD); 4160 __ jcc(Assembler::notEqual, L); 4161 __ should_not_reach_here(); 4162 __ bind(L); 4163 #endif // ASSERT 4164 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 4165 4166 4167 // codeBlob framesize is in words (not VMRegImpl::slot_size) 4168 RuntimeStub* stub = 4169 RuntimeStub::new_runtime_stub(name, 4170 &code, 4171 frame_complete, 4172 (framesize >> (LogBytesPerWord - LogBytesPerInt)), 4173 oop_maps, false); 4174 return stub->entry_point(); 4175 } 4176 4177 void create_control_words() { 4178 // Round to nearest, 53-bit mode, exceptions masked 4179 StubRoutines::_fpu_cntrl_wrd_std = 0x027F; 4180 // Round to zero, 53-bit mode, exception mased 4181 StubRoutines::_fpu_cntrl_wrd_trunc = 0x0D7F; 4182 // Round to nearest, 24-bit mode, exceptions masked 4183 StubRoutines::_fpu_cntrl_wrd_24 = 0x007F; 4184 // Round to nearest, 64-bit mode, exceptions masked 4185 StubRoutines::_fpu_cntrl_wrd_64 = 0x037F; 4186 // Round to nearest, 64-bit mode, exceptions masked 4187 StubRoutines::_mxcsr_std = 0x1F80; 4188 // Note: the following two constants are 80-bit values 4189 // layout is critical for correct loading by FPU. 4190 // Bias for strict fp multiply/divide 4191 StubRoutines::_fpu_subnormal_bias1[0]= 0x00000000; // 2^(-15360) == 0x03ff 8000 0000 0000 0000 4192 StubRoutines::_fpu_subnormal_bias1[1]= 0x80000000; 4193 StubRoutines::_fpu_subnormal_bias1[2]= 0x03ff; 4194 // Un-Bias for strict fp multiply/divide 4195 StubRoutines::_fpu_subnormal_bias2[0]= 0x00000000; // 2^(+15360) == 0x7bff 8000 0000 0000 0000 4196 StubRoutines::_fpu_subnormal_bias2[1]= 0x80000000; 4197 StubRoutines::_fpu_subnormal_bias2[2]= 0x7bff; 4198 } 4199 4200 // Initialization 4201 void generate_initial() { 4202 // Generates all stubs and initializes the entry points 4203 4204 // This platform-specific settings are needed by generate_call_stub() 4205 create_control_words(); 4206 4207 // entry points that exist in all platforms Note: This is code 4208 // that could be shared among different platforms - however the 4209 // benefit seems to be smaller than the disadvantage of having a 4210 // much more complicated generator structure. See also comment in 4211 // stubRoutines.hpp. 4212 4213 StubRoutines::_forward_exception_entry = generate_forward_exception(); 4214 4215 StubRoutines::_call_stub_entry = 4216 generate_call_stub(StubRoutines::_call_stub_return_address); 4217 4218 // is referenced by megamorphic call 4219 StubRoutines::_catch_exception_entry = generate_catch_exception(); 4220 4221 // atomic calls 4222 StubRoutines::_atomic_xchg_entry = generate_atomic_xchg(); 4223 StubRoutines::_atomic_xchg_ptr_entry = generate_atomic_xchg_ptr(); 4224 StubRoutines::_atomic_cmpxchg_entry = generate_atomic_cmpxchg(); 4225 StubRoutines::_atomic_cmpxchg_byte_entry = generate_atomic_cmpxchg_byte(); 4226 StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long(); 4227 StubRoutines::_atomic_add_entry = generate_atomic_add(); 4228 StubRoutines::_atomic_add_ptr_entry = generate_atomic_add_ptr(); 4229 StubRoutines::_fence_entry = generate_orderaccess_fence(); 4230 4231 StubRoutines::_handler_for_unsafe_access_entry = 4232 generate_handler_for_unsafe_access(); 4233 4234 // platform dependent 4235 StubRoutines::x86::_get_previous_fp_entry = generate_get_previous_fp(); 4236 StubRoutines::x86::_get_previous_sp_entry = generate_get_previous_sp(); 4237 4238 StubRoutines::x86::_verify_mxcsr_entry = generate_verify_mxcsr(); 4239 4240 // Build this early so it's available for the interpreter. 4241 StubRoutines::_throw_StackOverflowError_entry = 4242 generate_throw_exception("StackOverflowError throw_exception", 4243 CAST_FROM_FN_PTR(address, 4244 SharedRuntime:: 4245 throw_StackOverflowError)); 4246 if (UseCRC32Intrinsics) { 4247 // set table address before stub generation which use it 4248 StubRoutines::_crc_table_adr = (address)StubRoutines::x86::_crc_table; 4249 StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32(); 4250 } 4251 } 4252 4253 void generate_all() { 4254 // Generates all stubs and initializes the entry points 4255 4256 // These entry points require SharedInfo::stack0 to be set up in 4257 // non-core builds and need to be relocatable, so they each 4258 // fabricate a RuntimeStub internally. 4259 StubRoutines::_throw_AbstractMethodError_entry = 4260 generate_throw_exception("AbstractMethodError throw_exception", 4261 CAST_FROM_FN_PTR(address, 4262 SharedRuntime:: 4263 throw_AbstractMethodError)); 4264 4265 StubRoutines::_throw_IncompatibleClassChangeError_entry = 4266 generate_throw_exception("IncompatibleClassChangeError throw_exception", 4267 CAST_FROM_FN_PTR(address, 4268 SharedRuntime:: 4269 throw_IncompatibleClassChangeError)); 4270 4271 StubRoutines::_throw_NullPointerException_at_call_entry = 4272 generate_throw_exception("NullPointerException at call throw_exception", 4273 CAST_FROM_FN_PTR(address, 4274 SharedRuntime:: 4275 throw_NullPointerException_at_call)); 4276 4277 // entry points that are platform specific 4278 StubRoutines::x86::_f2i_fixup = generate_f2i_fixup(); 4279 StubRoutines::x86::_f2l_fixup = generate_f2l_fixup(); 4280 StubRoutines::x86::_d2i_fixup = generate_d2i_fixup(); 4281 StubRoutines::x86::_d2l_fixup = generate_d2l_fixup(); 4282 4283 StubRoutines::x86::_float_sign_mask = generate_fp_mask("float_sign_mask", 0x7FFFFFFF7FFFFFFF); 4284 StubRoutines::x86::_float_sign_flip = generate_fp_mask("float_sign_flip", 0x8000000080000000); 4285 StubRoutines::x86::_double_sign_mask = generate_fp_mask("double_sign_mask", 0x7FFFFFFFFFFFFFFF); 4286 StubRoutines::x86::_double_sign_flip = generate_fp_mask("double_sign_flip", 0x8000000000000000); 4287 4288 // support for verify_oop (must happen after universe_init) 4289 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop(); 4290 4291 // arraycopy stubs used by compilers 4292 generate_arraycopy_stubs(); 4293 4294 generate_math_stubs(); 4295 4296 // don't bother generating these AES intrinsic stubs unless global flag is set 4297 if (UseAESIntrinsics) { 4298 StubRoutines::x86::_key_shuffle_mask_addr = generate_key_shuffle_mask(); // needed by the others 4299 4300 StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock(); 4301 StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock(); 4302 StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt(); 4303 StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt_Parallel(); 4304 } 4305 4306 // Generate GHASH intrinsics code 4307 if (UseGHASHIntrinsics) { 4308 StubRoutines::x86::_ghash_long_swap_mask_addr = generate_ghash_long_swap_mask(); 4309 StubRoutines::x86::_ghash_byte_swap_mask_addr = generate_ghash_byte_swap_mask(); 4310 StubRoutines::_ghash_processBlocks = generate_ghash_processBlocks(); 4311 } 4312 4313 // Safefetch stubs. 4314 generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry, 4315 &StubRoutines::_safefetch32_fault_pc, 4316 &StubRoutines::_safefetch32_continuation_pc); 4317 generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry, 4318 &StubRoutines::_safefetchN_fault_pc, 4319 &StubRoutines::_safefetchN_continuation_pc); 4320 #ifdef COMPILER2 4321 if (UseMultiplyToLenIntrinsic) { 4322 StubRoutines::_multiplyToLen = generate_multiplyToLen(); 4323 } 4324 if (UseSquareToLenIntrinsic) { 4325 StubRoutines::_squareToLen = generate_squareToLen(); 4326 } 4327 if (UseMulAddIntrinsic) { 4328 StubRoutines::_mulAdd = generate_mulAdd(); 4329 } 4330 4331 #ifndef _WINDOWS 4332 if (UseMontgomeryMultiplyIntrinsic) { 4333 StubRoutines::_montgomeryMultiply 4334 = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_multiply); 4335 } 4336 if (UseMontgomerySquareIntrinsic) { 4337 StubRoutines::_montgomerySquare 4338 = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_square); 4339 } 4340 #endif // WINDOWS 4341 #endif // COMPILER2 4342 } 4343 4344 public: 4345 StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) { 4346 if (all) { 4347 generate_all(); 4348 } else { 4349 generate_initial(); 4350 } 4351 } 4352 }; // end class declaration 4353 4354 void StubGenerator_generate(CodeBuffer* code, bool all) { 4355 StubGenerator g(code, all); 4356 }