1 /* 2 * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/macroAssembler.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "interpreter/interpreter.hpp" 29 #include "nativeInst_x86.hpp" 30 #include "oops/instanceOop.hpp" 31 #include "oops/method.hpp" 32 #include "oops/objArrayKlass.hpp" 33 #include "oops/oop.inline.hpp" 34 #include "prims/methodHandles.hpp" 35 #include "runtime/frame.inline.hpp" 36 #include "runtime/handles.inline.hpp" 37 #include "runtime/sharedRuntime.hpp" 38 #include "runtime/stubCodeGenerator.hpp" 39 #include "runtime/stubRoutines.hpp" 40 #include "runtime/thread.inline.hpp" 41 #include "utilities/top.hpp" 42 #ifdef COMPILER2 43 #include "opto/runtime.hpp" 44 #endif 45 46 // Declaration and definition of StubGenerator (no .hpp file). 47 // For a more detailed description of the stub routine structure 48 // see the comment in stubRoutines.hpp 49 50 #define __ _masm-> 51 #define TIMES_OOP (UseCompressedOops ? Address::times_4 : Address::times_8) 52 #define a__ ((Assembler*)_masm)-> 53 54 #ifdef PRODUCT 55 #define BLOCK_COMMENT(str) /* nothing */ 56 #else 57 #define BLOCK_COMMENT(str) __ block_comment(str) 58 #endif 59 60 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 61 const int MXCSR_MASK = 0xFFC0; // Mask out any pending exceptions 62 63 // Stub Code definitions 64 65 static address handle_unsafe_access() { 66 JavaThread* thread = JavaThread::current(); 67 address pc = thread->saved_exception_pc(); 68 // pc is the instruction which we must emulate 69 // doing a no-op is fine: return garbage from the load 70 // therefore, compute npc 71 address npc = Assembler::locate_next_instruction(pc); 72 73 // request an async exception 74 thread->set_pending_unsafe_access_error(); 75 76 // return address of next instruction to execute 77 return npc; 78 } 79 80 class StubGenerator: public StubCodeGenerator { 81 private: 82 83 #ifdef PRODUCT 84 #define inc_counter_np(counter) ((void)0) 85 #else 86 void inc_counter_np_(int& counter) { 87 // This can destroy rscratch1 if counter is far from the code cache 88 __ incrementl(ExternalAddress((address)&counter)); 89 } 90 #define inc_counter_np(counter) \ 91 BLOCK_COMMENT("inc_counter " #counter); \ 92 inc_counter_np_(counter); 93 #endif 94 95 // Call stubs are used to call Java from C 96 // 97 // Linux Arguments: 98 // c_rarg0: call wrapper address address 99 // c_rarg1: result address 100 // c_rarg2: result type BasicType 101 // c_rarg3: method Method* 102 // c_rarg4: (interpreter) entry point address 103 // c_rarg5: parameters intptr_t* 104 // 16(rbp): parameter size (in words) int 105 // 24(rbp): thread Thread* 106 // 107 // [ return_from_Java ] <--- rsp 108 // [ argument word n ] 109 // ... 110 // -12 [ argument word 1 ] 111 // -11 [ saved r15 ] <--- rsp_after_call 112 // -10 [ saved r14 ] 113 // -9 [ saved r13 ] 114 // -8 [ saved r12 ] 115 // -7 [ saved rbx ] 116 // -6 [ call wrapper ] 117 // -5 [ result ] 118 // -4 [ result type ] 119 // -3 [ method ] 120 // -2 [ entry point ] 121 // -1 [ parameters ] 122 // 0 [ saved rbp ] <--- rbp 123 // 1 [ return address ] 124 // 2 [ parameter size ] 125 // 3 [ thread ] 126 // 127 // Windows Arguments: 128 // c_rarg0: call wrapper address address 129 // c_rarg1: result address 130 // c_rarg2: result type BasicType 131 // c_rarg3: method Method* 132 // 48(rbp): (interpreter) entry point address 133 // 56(rbp): parameters intptr_t* 134 // 64(rbp): parameter size (in words) int 135 // 72(rbp): thread Thread* 136 // 137 // [ return_from_Java ] <--- rsp 138 // [ argument word n ] 139 // ... 140 // -60 [ argument word 1 ] 141 // -59 [ saved xmm31 ] <--- rsp after_call 142 // [ saved xmm16-xmm30 ] (EVEX enabled, else the space is blank) 143 // -27 [ saved xmm15 ] 144 // [ saved xmm7-xmm14 ] 145 // -9 [ saved xmm6 ] (each xmm register takes 2 slots) 146 // -7 [ saved r15 ] 147 // -6 [ saved r14 ] 148 // -5 [ saved r13 ] 149 // -4 [ saved r12 ] 150 // -3 [ saved rdi ] 151 // -2 [ saved rsi ] 152 // -1 [ saved rbx ] 153 // 0 [ saved rbp ] <--- rbp 154 // 1 [ return address ] 155 // 2 [ call wrapper ] 156 // 3 [ result ] 157 // 4 [ result type ] 158 // 5 [ method ] 159 // 6 [ entry point ] 160 // 7 [ parameters ] 161 // 8 [ parameter size ] 162 // 9 [ thread ] 163 // 164 // Windows reserves the callers stack space for arguments 1-4. 165 // We spill c_rarg0-c_rarg3 to this space. 166 167 // Call stub stack layout word offsets from rbp 168 enum call_stub_layout { 169 #ifdef _WIN64 170 xmm_save_first = 6, // save from xmm6 171 xmm_save_last = 31, // to xmm31 172 xmm_save_base = -9, 173 rsp_after_call_off = xmm_save_base - 2 * (xmm_save_last - xmm_save_first), // -27 174 r15_off = -7, 175 r14_off = -6, 176 r13_off = -5, 177 r12_off = -4, 178 rdi_off = -3, 179 rsi_off = -2, 180 rbx_off = -1, 181 rbp_off = 0, 182 retaddr_off = 1, 183 call_wrapper_off = 2, 184 result_off = 3, 185 result_type_off = 4, 186 method_off = 5, 187 entry_point_off = 6, 188 parameters_off = 7, 189 parameter_size_off = 8, 190 thread_off = 9 191 #else 192 rsp_after_call_off = -12, 193 mxcsr_off = rsp_after_call_off, 194 r15_off = -11, 195 r14_off = -10, 196 r13_off = -9, 197 r12_off = -8, 198 rbx_off = -7, 199 call_wrapper_off = -6, 200 result_off = -5, 201 result_type_off = -4, 202 method_off = -3, 203 entry_point_off = -2, 204 parameters_off = -1, 205 rbp_off = 0, 206 retaddr_off = 1, 207 parameter_size_off = 2, 208 thread_off = 3 209 #endif 210 }; 211 212 #ifdef _WIN64 213 Address xmm_save(int reg) { 214 assert(reg >= xmm_save_first && reg <= xmm_save_last, "XMM register number out of range"); 215 return Address(rbp, (xmm_save_base - (reg - xmm_save_first) * 2) * wordSize); 216 } 217 #endif 218 219 address generate_call_stub(address& return_address) { 220 assert((int)frame::entry_frame_after_call_words == -(int)rsp_after_call_off + 1 && 221 (int)frame::entry_frame_call_wrapper_offset == (int)call_wrapper_off, 222 "adjust this code"); 223 StubCodeMark mark(this, "StubRoutines", "call_stub"); 224 address start = __ pc(); 225 226 // same as in generate_catch_exception()! 227 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize); 228 229 const Address call_wrapper (rbp, call_wrapper_off * wordSize); 230 const Address result (rbp, result_off * wordSize); 231 const Address result_type (rbp, result_type_off * wordSize); 232 const Address method (rbp, method_off * wordSize); 233 const Address entry_point (rbp, entry_point_off * wordSize); 234 const Address parameters (rbp, parameters_off * wordSize); 235 const Address parameter_size(rbp, parameter_size_off * wordSize); 236 237 // same as in generate_catch_exception()! 238 const Address thread (rbp, thread_off * wordSize); 239 240 const Address r15_save(rbp, r15_off * wordSize); 241 const Address r14_save(rbp, r14_off * wordSize); 242 const Address r13_save(rbp, r13_off * wordSize); 243 const Address r12_save(rbp, r12_off * wordSize); 244 const Address rbx_save(rbp, rbx_off * wordSize); 245 246 // stub code 247 __ enter(); 248 __ subptr(rsp, -rsp_after_call_off * wordSize); 249 250 // save register parameters 251 #ifndef _WIN64 252 __ movptr(parameters, c_rarg5); // parameters 253 __ movptr(entry_point, c_rarg4); // entry_point 254 #endif 255 256 __ movptr(method, c_rarg3); // method 257 __ movl(result_type, c_rarg2); // result type 258 __ movptr(result, c_rarg1); // result 259 __ movptr(call_wrapper, c_rarg0); // call wrapper 260 261 // save regs belonging to calling function 262 __ movptr(rbx_save, rbx); 263 __ movptr(r12_save, r12); 264 __ movptr(r13_save, r13); 265 __ movptr(r14_save, r14); 266 __ movptr(r15_save, r15); 267 if (UseAVX > 2) { 268 __ movl(rbx, 0xffff); 269 __ kmovql(k1, rbx); 270 } 271 #ifdef _WIN64 272 if (UseAVX > 2) { 273 for (int i = 6; i <= 31; i++) { 274 __ movdqu(xmm_save(i), as_XMMRegister(i)); 275 } 276 } else { 277 for (int i = 6; i <= 15; i++) { 278 __ movdqu(xmm_save(i), as_XMMRegister(i)); 279 } 280 } 281 282 const Address rdi_save(rbp, rdi_off * wordSize); 283 const Address rsi_save(rbp, rsi_off * wordSize); 284 285 __ movptr(rsi_save, rsi); 286 __ movptr(rdi_save, rdi); 287 #else 288 const Address mxcsr_save(rbp, mxcsr_off * wordSize); 289 { 290 Label skip_ldmx; 291 __ stmxcsr(mxcsr_save); 292 __ movl(rax, mxcsr_save); 293 __ andl(rax, MXCSR_MASK); // Only check control and mask bits 294 ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std()); 295 __ cmp32(rax, mxcsr_std); 296 __ jcc(Assembler::equal, skip_ldmx); 297 __ ldmxcsr(mxcsr_std); 298 __ bind(skip_ldmx); 299 } 300 #endif 301 302 // Load up thread register 303 __ movptr(r15_thread, thread); 304 __ reinit_heapbase(); 305 306 #ifdef ASSERT 307 // make sure we have no pending exceptions 308 { 309 Label L; 310 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 311 __ jcc(Assembler::equal, L); 312 __ stop("StubRoutines::call_stub: entered with pending exception"); 313 __ bind(L); 314 } 315 #endif 316 317 // pass parameters if any 318 BLOCK_COMMENT("pass parameters if any"); 319 Label parameters_done; 320 __ movl(c_rarg3, parameter_size); 321 __ testl(c_rarg3, c_rarg3); 322 __ jcc(Assembler::zero, parameters_done); 323 324 Label loop; 325 __ movptr(c_rarg2, parameters); // parameter pointer 326 __ movl(c_rarg1, c_rarg3); // parameter counter is in c_rarg1 327 __ BIND(loop); 328 __ movptr(rax, Address(c_rarg2, 0));// get parameter 329 __ addptr(c_rarg2, wordSize); // advance to next parameter 330 __ decrementl(c_rarg1); // decrement counter 331 __ push(rax); // pass parameter 332 __ jcc(Assembler::notZero, loop); 333 334 // call Java function 335 __ BIND(parameters_done); 336 __ movptr(rbx, method); // get Method* 337 __ movptr(c_rarg1, entry_point); // get entry_point 338 __ mov(r13, rsp); // set sender sp 339 BLOCK_COMMENT("call Java function"); 340 __ call(c_rarg1); 341 342 BLOCK_COMMENT("call_stub_return_address:"); 343 return_address = __ pc(); 344 345 // store result depending on type (everything that is not 346 // T_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT) 347 __ movptr(c_rarg0, result); 348 Label is_long, is_float, is_double, exit; 349 __ movl(c_rarg1, result_type); 350 __ cmpl(c_rarg1, T_OBJECT); 351 __ jcc(Assembler::equal, is_long); 352 __ cmpl(c_rarg1, T_LONG); 353 __ jcc(Assembler::equal, is_long); 354 __ cmpl(c_rarg1, T_FLOAT); 355 __ jcc(Assembler::equal, is_float); 356 __ cmpl(c_rarg1, T_DOUBLE); 357 __ jcc(Assembler::equal, is_double); 358 359 // handle T_INT case 360 __ movl(Address(c_rarg0, 0), rax); 361 362 __ BIND(exit); 363 364 // pop parameters 365 __ lea(rsp, rsp_after_call); 366 367 #ifdef ASSERT 368 // verify that threads correspond 369 { 370 Label L, S; 371 __ cmpptr(r15_thread, thread); 372 __ jcc(Assembler::notEqual, S); 373 __ get_thread(rbx); 374 __ cmpptr(r15_thread, rbx); 375 __ jcc(Assembler::equal, L); 376 __ bind(S); 377 __ jcc(Assembler::equal, L); 378 __ stop("StubRoutines::call_stub: threads must correspond"); 379 __ bind(L); 380 } 381 #endif 382 383 // restore regs belonging to calling function 384 #ifdef _WIN64 385 for (int i = 15; i >= 6; i--) { 386 __ movdqu(as_XMMRegister(i), xmm_save(i)); 387 } 388 #endif 389 __ movptr(r15, r15_save); 390 __ movptr(r14, r14_save); 391 __ movptr(r13, r13_save); 392 __ movptr(r12, r12_save); 393 __ movptr(rbx, rbx_save); 394 395 #ifdef _WIN64 396 __ movptr(rdi, rdi_save); 397 __ movptr(rsi, rsi_save); 398 #else 399 __ ldmxcsr(mxcsr_save); 400 #endif 401 402 // restore rsp 403 __ addptr(rsp, -rsp_after_call_off * wordSize); 404 405 // return 406 __ pop(rbp); 407 __ ret(0); 408 409 // handle return types different from T_INT 410 __ BIND(is_long); 411 __ movq(Address(c_rarg0, 0), rax); 412 __ jmp(exit); 413 414 __ BIND(is_float); 415 __ movflt(Address(c_rarg0, 0), xmm0); 416 __ jmp(exit); 417 418 __ BIND(is_double); 419 __ movdbl(Address(c_rarg0, 0), xmm0); 420 __ jmp(exit); 421 422 return start; 423 } 424 425 // Return point for a Java call if there's an exception thrown in 426 // Java code. The exception is caught and transformed into a 427 // pending exception stored in JavaThread that can be tested from 428 // within the VM. 429 // 430 // Note: Usually the parameters are removed by the callee. In case 431 // of an exception crossing an activation frame boundary, that is 432 // not the case if the callee is compiled code => need to setup the 433 // rsp. 434 // 435 // rax: exception oop 436 437 address generate_catch_exception() { 438 StubCodeMark mark(this, "StubRoutines", "catch_exception"); 439 address start = __ pc(); 440 441 // same as in generate_call_stub(): 442 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize); 443 const Address thread (rbp, thread_off * wordSize); 444 445 #ifdef ASSERT 446 // verify that threads correspond 447 { 448 Label L, S; 449 __ cmpptr(r15_thread, thread); 450 __ jcc(Assembler::notEqual, S); 451 __ get_thread(rbx); 452 __ cmpptr(r15_thread, rbx); 453 __ jcc(Assembler::equal, L); 454 __ bind(S); 455 __ stop("StubRoutines::catch_exception: threads must correspond"); 456 __ bind(L); 457 } 458 #endif 459 460 // set pending exception 461 __ verify_oop(rax); 462 463 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), rax); 464 __ lea(rscratch1, ExternalAddress((address)__FILE__)); 465 __ movptr(Address(r15_thread, Thread::exception_file_offset()), rscratch1); 466 __ movl(Address(r15_thread, Thread::exception_line_offset()), (int) __LINE__); 467 468 // complete return to VM 469 assert(StubRoutines::_call_stub_return_address != NULL, 470 "_call_stub_return_address must have been generated before"); 471 __ jump(RuntimeAddress(StubRoutines::_call_stub_return_address)); 472 473 return start; 474 } 475 476 // Continuation point for runtime calls returning with a pending 477 // exception. The pending exception check happened in the runtime 478 // or native call stub. The pending exception in Thread is 479 // converted into a Java-level exception. 480 // 481 // Contract with Java-level exception handlers: 482 // rax: exception 483 // rdx: throwing pc 484 // 485 // NOTE: At entry of this stub, exception-pc must be on stack !! 486 487 address generate_forward_exception() { 488 StubCodeMark mark(this, "StubRoutines", "forward exception"); 489 address start = __ pc(); 490 491 // Upon entry, the sp points to the return address returning into 492 // Java (interpreted or compiled) code; i.e., the return address 493 // becomes the throwing pc. 494 // 495 // Arguments pushed before the runtime call are still on the stack 496 // but the exception handler will reset the stack pointer -> 497 // ignore them. A potential result in registers can be ignored as 498 // well. 499 500 #ifdef ASSERT 501 // make sure this code is only executed if there is a pending exception 502 { 503 Label L; 504 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t) NULL); 505 __ jcc(Assembler::notEqual, L); 506 __ stop("StubRoutines::forward exception: no pending exception (1)"); 507 __ bind(L); 508 } 509 #endif 510 511 // compute exception handler into rbx 512 __ movptr(c_rarg0, Address(rsp, 0)); 513 BLOCK_COMMENT("call exception_handler_for_return_address"); 514 __ call_VM_leaf(CAST_FROM_FN_PTR(address, 515 SharedRuntime::exception_handler_for_return_address), 516 r15_thread, c_rarg0); 517 __ mov(rbx, rax); 518 519 // setup rax & rdx, remove return address & clear pending exception 520 __ pop(rdx); 521 __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset())); 522 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 523 524 #ifdef ASSERT 525 // make sure exception is set 526 { 527 Label L; 528 __ testptr(rax, rax); 529 __ jcc(Assembler::notEqual, L); 530 __ stop("StubRoutines::forward exception: no pending exception (2)"); 531 __ bind(L); 532 } 533 #endif 534 535 // continue at exception handler (return address removed) 536 // rax: exception 537 // rbx: exception handler 538 // rdx: throwing pc 539 __ verify_oop(rax); 540 __ jmp(rbx); 541 542 return start; 543 } 544 545 // Support for jint atomic::xchg(jint exchange_value, volatile jint* dest) 546 // 547 // Arguments : 548 // c_rarg0: exchange_value 549 // c_rarg0: dest 550 // 551 // Result: 552 // *dest <- ex, return (orig *dest) 553 address generate_atomic_xchg() { 554 StubCodeMark mark(this, "StubRoutines", "atomic_xchg"); 555 address start = __ pc(); 556 557 __ movl(rax, c_rarg0); // Copy to eax we need a return value anyhow 558 __ xchgl(rax, Address(c_rarg1, 0)); // automatic LOCK 559 __ ret(0); 560 561 return start; 562 } 563 564 // Support for intptr_t atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) 565 // 566 // Arguments : 567 // c_rarg0: exchange_value 568 // c_rarg1: dest 569 // 570 // Result: 571 // *dest <- ex, return (orig *dest) 572 address generate_atomic_xchg_ptr() { 573 StubCodeMark mark(this, "StubRoutines", "atomic_xchg_ptr"); 574 address start = __ pc(); 575 576 __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow 577 __ xchgptr(rax, Address(c_rarg1, 0)); // automatic LOCK 578 __ ret(0); 579 580 return start; 581 } 582 583 // Support for jint atomic::atomic_cmpxchg(jint exchange_value, volatile jint* dest, 584 // jint compare_value) 585 // 586 // Arguments : 587 // c_rarg0: exchange_value 588 // c_rarg1: dest 589 // c_rarg2: compare_value 590 // 591 // Result: 592 // if ( compare_value == *dest ) { 593 // *dest = exchange_value 594 // return compare_value; 595 // else 596 // return *dest; 597 address generate_atomic_cmpxchg() { 598 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg"); 599 address start = __ pc(); 600 601 __ movl(rax, c_rarg2); 602 if ( os::is_MP() ) __ lock(); 603 __ cmpxchgl(c_rarg0, Address(c_rarg1, 0)); 604 __ ret(0); 605 606 return start; 607 } 608 609 // Support for jbyte atomic::atomic_cmpxchg(jbyte exchange_value, volatile jbyte* dest, 610 // jbyte compare_value) 611 // 612 // Arguments : 613 // c_rarg0: exchange_value 614 // c_rarg1: dest 615 // c_rarg2: compare_value 616 // 617 // Result: 618 // if ( compare_value == *dest ) { 619 // *dest = exchange_value 620 // return compare_value; 621 // else 622 // return *dest; 623 address generate_atomic_cmpxchg_byte() { 624 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_byte"); 625 address start = __ pc(); 626 627 __ movsbq(rax, c_rarg2); 628 if ( os::is_MP() ) __ lock(); 629 __ cmpxchgb(c_rarg0, Address(c_rarg1, 0)); 630 __ ret(0); 631 632 return start; 633 } 634 635 // Support for jlong atomic::atomic_cmpxchg(jlong exchange_value, 636 // volatile jlong* dest, 637 // jlong compare_value) 638 // Arguments : 639 // c_rarg0: exchange_value 640 // c_rarg1: dest 641 // c_rarg2: compare_value 642 // 643 // Result: 644 // if ( compare_value == *dest ) { 645 // *dest = exchange_value 646 // return compare_value; 647 // else 648 // return *dest; 649 address generate_atomic_cmpxchg_long() { 650 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long"); 651 address start = __ pc(); 652 653 __ movq(rax, c_rarg2); 654 if ( os::is_MP() ) __ lock(); 655 __ cmpxchgq(c_rarg0, Address(c_rarg1, 0)); 656 __ ret(0); 657 658 return start; 659 } 660 661 // Support for jint atomic::add(jint add_value, volatile jint* dest) 662 // 663 // Arguments : 664 // c_rarg0: add_value 665 // c_rarg1: dest 666 // 667 // Result: 668 // *dest += add_value 669 // return *dest; 670 address generate_atomic_add() { 671 StubCodeMark mark(this, "StubRoutines", "atomic_add"); 672 address start = __ pc(); 673 674 __ movl(rax, c_rarg0); 675 if ( os::is_MP() ) __ lock(); 676 __ xaddl(Address(c_rarg1, 0), c_rarg0); 677 __ addl(rax, c_rarg0); 678 __ ret(0); 679 680 return start; 681 } 682 683 // Support for intptr_t atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) 684 // 685 // Arguments : 686 // c_rarg0: add_value 687 // c_rarg1: dest 688 // 689 // Result: 690 // *dest += add_value 691 // return *dest; 692 address generate_atomic_add_ptr() { 693 StubCodeMark mark(this, "StubRoutines", "atomic_add_ptr"); 694 address start = __ pc(); 695 696 __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow 697 if ( os::is_MP() ) __ lock(); 698 __ xaddptr(Address(c_rarg1, 0), c_rarg0); 699 __ addptr(rax, c_rarg0); 700 __ ret(0); 701 702 return start; 703 } 704 705 // Support for intptr_t OrderAccess::fence() 706 // 707 // Arguments : 708 // 709 // Result: 710 address generate_orderaccess_fence() { 711 StubCodeMark mark(this, "StubRoutines", "orderaccess_fence"); 712 address start = __ pc(); 713 __ membar(Assembler::StoreLoad); 714 __ ret(0); 715 716 return start; 717 } 718 719 // Support for intptr_t get_previous_fp() 720 // 721 // This routine is used to find the previous frame pointer for the 722 // caller (current_frame_guess). This is used as part of debugging 723 // ps() is seemingly lost trying to find frames. 724 // This code assumes that caller current_frame_guess) has a frame. 725 address generate_get_previous_fp() { 726 StubCodeMark mark(this, "StubRoutines", "get_previous_fp"); 727 const Address old_fp(rbp, 0); 728 const Address older_fp(rax, 0); 729 address start = __ pc(); 730 731 __ enter(); 732 __ movptr(rax, old_fp); // callers fp 733 __ movptr(rax, older_fp); // the frame for ps() 734 __ pop(rbp); 735 __ ret(0); 736 737 return start; 738 } 739 740 // Support for intptr_t get_previous_sp() 741 // 742 // This routine is used to find the previous stack pointer for the 743 // caller. 744 address generate_get_previous_sp() { 745 StubCodeMark mark(this, "StubRoutines", "get_previous_sp"); 746 address start = __ pc(); 747 748 __ movptr(rax, rsp); 749 __ addptr(rax, 8); // return address is at the top of the stack. 750 __ ret(0); 751 752 return start; 753 } 754 755 //---------------------------------------------------------------------------------------------------- 756 // Support for void verify_mxcsr() 757 // 758 // This routine is used with -Xcheck:jni to verify that native 759 // JNI code does not return to Java code without restoring the 760 // MXCSR register to our expected state. 761 762 address generate_verify_mxcsr() { 763 StubCodeMark mark(this, "StubRoutines", "verify_mxcsr"); 764 address start = __ pc(); 765 766 const Address mxcsr_save(rsp, 0); 767 768 if (CheckJNICalls) { 769 Label ok_ret; 770 ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std()); 771 __ push(rax); 772 __ subptr(rsp, wordSize); // allocate a temp location 773 __ stmxcsr(mxcsr_save); 774 __ movl(rax, mxcsr_save); 775 __ andl(rax, MXCSR_MASK); // Only check control and mask bits 776 __ cmp32(rax, mxcsr_std); 777 __ jcc(Assembler::equal, ok_ret); 778 779 __ warn("MXCSR changed by native JNI code, use -XX:+RestoreMXCSROnJNICall"); 780 781 __ ldmxcsr(mxcsr_std); 782 783 __ bind(ok_ret); 784 __ addptr(rsp, wordSize); 785 __ pop(rax); 786 } 787 788 __ ret(0); 789 790 return start; 791 } 792 793 address generate_f2i_fixup() { 794 StubCodeMark mark(this, "StubRoutines", "f2i_fixup"); 795 Address inout(rsp, 5 * wordSize); // return address + 4 saves 796 797 address start = __ pc(); 798 799 Label L; 800 801 __ push(rax); 802 __ push(c_rarg3); 803 __ push(c_rarg2); 804 __ push(c_rarg1); 805 806 __ movl(rax, 0x7f800000); 807 __ xorl(c_rarg3, c_rarg3); 808 __ movl(c_rarg2, inout); 809 __ movl(c_rarg1, c_rarg2); 810 __ andl(c_rarg1, 0x7fffffff); 811 __ cmpl(rax, c_rarg1); // NaN? -> 0 812 __ jcc(Assembler::negative, L); 813 __ testl(c_rarg2, c_rarg2); // signed ? min_jint : max_jint 814 __ movl(c_rarg3, 0x80000000); 815 __ movl(rax, 0x7fffffff); 816 __ cmovl(Assembler::positive, c_rarg3, rax); 817 818 __ bind(L); 819 __ movptr(inout, c_rarg3); 820 821 __ pop(c_rarg1); 822 __ pop(c_rarg2); 823 __ pop(c_rarg3); 824 __ pop(rax); 825 826 __ ret(0); 827 828 return start; 829 } 830 831 address generate_f2l_fixup() { 832 StubCodeMark mark(this, "StubRoutines", "f2l_fixup"); 833 Address inout(rsp, 5 * wordSize); // return address + 4 saves 834 address start = __ pc(); 835 836 Label L; 837 838 __ push(rax); 839 __ push(c_rarg3); 840 __ push(c_rarg2); 841 __ push(c_rarg1); 842 843 __ movl(rax, 0x7f800000); 844 __ xorl(c_rarg3, c_rarg3); 845 __ movl(c_rarg2, inout); 846 __ movl(c_rarg1, c_rarg2); 847 __ andl(c_rarg1, 0x7fffffff); 848 __ cmpl(rax, c_rarg1); // NaN? -> 0 849 __ jcc(Assembler::negative, L); 850 __ testl(c_rarg2, c_rarg2); // signed ? min_jlong : max_jlong 851 __ mov64(c_rarg3, 0x8000000000000000); 852 __ mov64(rax, 0x7fffffffffffffff); 853 __ cmov(Assembler::positive, c_rarg3, rax); 854 855 __ bind(L); 856 __ movptr(inout, c_rarg3); 857 858 __ pop(c_rarg1); 859 __ pop(c_rarg2); 860 __ pop(c_rarg3); 861 __ pop(rax); 862 863 __ ret(0); 864 865 return start; 866 } 867 868 address generate_d2i_fixup() { 869 StubCodeMark mark(this, "StubRoutines", "d2i_fixup"); 870 Address inout(rsp, 6 * wordSize); // return address + 5 saves 871 872 address start = __ pc(); 873 874 Label L; 875 876 __ push(rax); 877 __ push(c_rarg3); 878 __ push(c_rarg2); 879 __ push(c_rarg1); 880 __ push(c_rarg0); 881 882 __ movl(rax, 0x7ff00000); 883 __ movq(c_rarg2, inout); 884 __ movl(c_rarg3, c_rarg2); 885 __ mov(c_rarg1, c_rarg2); 886 __ mov(c_rarg0, c_rarg2); 887 __ negl(c_rarg3); 888 __ shrptr(c_rarg1, 0x20); 889 __ orl(c_rarg3, c_rarg2); 890 __ andl(c_rarg1, 0x7fffffff); 891 __ xorl(c_rarg2, c_rarg2); 892 __ shrl(c_rarg3, 0x1f); 893 __ orl(c_rarg1, c_rarg3); 894 __ cmpl(rax, c_rarg1); 895 __ jcc(Assembler::negative, L); // NaN -> 0 896 __ testptr(c_rarg0, c_rarg0); // signed ? min_jint : max_jint 897 __ movl(c_rarg2, 0x80000000); 898 __ movl(rax, 0x7fffffff); 899 __ cmov(Assembler::positive, c_rarg2, rax); 900 901 __ bind(L); 902 __ movptr(inout, c_rarg2); 903 904 __ pop(c_rarg0); 905 __ pop(c_rarg1); 906 __ pop(c_rarg2); 907 __ pop(c_rarg3); 908 __ pop(rax); 909 910 __ ret(0); 911 912 return start; 913 } 914 915 address generate_d2l_fixup() { 916 StubCodeMark mark(this, "StubRoutines", "d2l_fixup"); 917 Address inout(rsp, 6 * wordSize); // return address + 5 saves 918 919 address start = __ pc(); 920 921 Label L; 922 923 __ push(rax); 924 __ push(c_rarg3); 925 __ push(c_rarg2); 926 __ push(c_rarg1); 927 __ push(c_rarg0); 928 929 __ movl(rax, 0x7ff00000); 930 __ movq(c_rarg2, inout); 931 __ movl(c_rarg3, c_rarg2); 932 __ mov(c_rarg1, c_rarg2); 933 __ mov(c_rarg0, c_rarg2); 934 __ negl(c_rarg3); 935 __ shrptr(c_rarg1, 0x20); 936 __ orl(c_rarg3, c_rarg2); 937 __ andl(c_rarg1, 0x7fffffff); 938 __ xorl(c_rarg2, c_rarg2); 939 __ shrl(c_rarg3, 0x1f); 940 __ orl(c_rarg1, c_rarg3); 941 __ cmpl(rax, c_rarg1); 942 __ jcc(Assembler::negative, L); // NaN -> 0 943 __ testq(c_rarg0, c_rarg0); // signed ? min_jlong : max_jlong 944 __ mov64(c_rarg2, 0x8000000000000000); 945 __ mov64(rax, 0x7fffffffffffffff); 946 __ cmovq(Assembler::positive, c_rarg2, rax); 947 948 __ bind(L); 949 __ movq(inout, c_rarg2); 950 951 __ pop(c_rarg0); 952 __ pop(c_rarg1); 953 __ pop(c_rarg2); 954 __ pop(c_rarg3); 955 __ pop(rax); 956 957 __ ret(0); 958 959 return start; 960 } 961 962 address generate_fp_mask(const char *stub_name, int64_t mask) { 963 __ align(CodeEntryAlignment); 964 StubCodeMark mark(this, "StubRoutines", stub_name); 965 address start = __ pc(); 966 967 __ emit_data64( mask, relocInfo::none ); 968 __ emit_data64( mask, relocInfo::none ); 969 970 return start; 971 } 972 973 // The following routine generates a subroutine to throw an 974 // asynchronous UnknownError when an unsafe access gets a fault that 975 // could not be reasonably prevented by the programmer. (Example: 976 // SIGBUS/OBJERR.) 977 address generate_handler_for_unsafe_access() { 978 StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access"); 979 address start = __ pc(); 980 981 __ push(0); // hole for return address-to-be 982 __ pusha(); // push registers 983 Address next_pc(rsp, RegisterImpl::number_of_registers * BytesPerWord); 984 985 // FIXME: this probably needs alignment logic 986 987 __ subptr(rsp, frame::arg_reg_save_area_bytes); 988 BLOCK_COMMENT("call handle_unsafe_access"); 989 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, handle_unsafe_access))); 990 __ addptr(rsp, frame::arg_reg_save_area_bytes); 991 992 __ movptr(next_pc, rax); // stuff next address 993 __ popa(); 994 __ ret(0); // jump to next address 995 996 return start; 997 } 998 999 // Non-destructive plausibility checks for oops 1000 // 1001 // Arguments: 1002 // all args on stack! 1003 // 1004 // Stack after saving c_rarg3: 1005 // [tos + 0]: saved c_rarg3 1006 // [tos + 1]: saved c_rarg2 1007 // [tos + 2]: saved r12 (several TemplateTable methods use it) 1008 // [tos + 3]: saved flags 1009 // [tos + 4]: return address 1010 // * [tos + 5]: error message (char*) 1011 // * [tos + 6]: object to verify (oop) 1012 // * [tos + 7]: saved rax - saved by caller and bashed 1013 // * [tos + 8]: saved r10 (rscratch1) - saved by caller 1014 // * = popped on exit 1015 address generate_verify_oop() { 1016 StubCodeMark mark(this, "StubRoutines", "verify_oop"); 1017 address start = __ pc(); 1018 1019 Label exit, error; 1020 1021 __ pushf(); 1022 __ incrementl(ExternalAddress((address) StubRoutines::verify_oop_count_addr())); 1023 1024 __ push(r12); 1025 1026 // save c_rarg2 and c_rarg3 1027 __ push(c_rarg2); 1028 __ push(c_rarg3); 1029 1030 enum { 1031 // After previous pushes. 1032 oop_to_verify = 6 * wordSize, 1033 saved_rax = 7 * wordSize, 1034 saved_r10 = 8 * wordSize, 1035 1036 // Before the call to MacroAssembler::debug(), see below. 1037 return_addr = 16 * wordSize, 1038 error_msg = 17 * wordSize 1039 }; 1040 1041 // get object 1042 __ movptr(rax, Address(rsp, oop_to_verify)); 1043 1044 // make sure object is 'reasonable' 1045 __ testptr(rax, rax); 1046 __ jcc(Assembler::zero, exit); // if obj is NULL it is OK 1047 // Check if the oop is in the right area of memory 1048 __ movptr(c_rarg2, rax); 1049 __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_mask()); 1050 __ andptr(c_rarg2, c_rarg3); 1051 __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_bits()); 1052 __ cmpptr(c_rarg2, c_rarg3); 1053 __ jcc(Assembler::notZero, error); 1054 1055 // set r12 to heapbase for load_klass() 1056 __ reinit_heapbase(); 1057 1058 // make sure klass is 'reasonable', which is not zero. 1059 __ load_klass(rax, rax); // get klass 1060 __ testptr(rax, rax); 1061 __ jcc(Assembler::zero, error); // if klass is NULL it is broken 1062 1063 // return if everything seems ok 1064 __ bind(exit); 1065 __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back 1066 __ movptr(rscratch1, Address(rsp, saved_r10)); // get saved r10 back 1067 __ pop(c_rarg3); // restore c_rarg3 1068 __ pop(c_rarg2); // restore c_rarg2 1069 __ pop(r12); // restore r12 1070 __ popf(); // restore flags 1071 __ ret(4 * wordSize); // pop caller saved stuff 1072 1073 // handle errors 1074 __ bind(error); 1075 __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back 1076 __ movptr(rscratch1, Address(rsp, saved_r10)); // get saved r10 back 1077 __ pop(c_rarg3); // get saved c_rarg3 back 1078 __ pop(c_rarg2); // get saved c_rarg2 back 1079 __ pop(r12); // get saved r12 back 1080 __ popf(); // get saved flags off stack -- 1081 // will be ignored 1082 1083 __ pusha(); // push registers 1084 // (rip is already 1085 // already pushed) 1086 // debug(char* msg, int64_t pc, int64_t regs[]) 1087 // We've popped the registers we'd saved (c_rarg3, c_rarg2 and flags), and 1088 // pushed all the registers, so now the stack looks like: 1089 // [tos + 0] 16 saved registers 1090 // [tos + 16] return address 1091 // * [tos + 17] error message (char*) 1092 // * [tos + 18] object to verify (oop) 1093 // * [tos + 19] saved rax - saved by caller and bashed 1094 // * [tos + 20] saved r10 (rscratch1) - saved by caller 1095 // * = popped on exit 1096 1097 __ movptr(c_rarg0, Address(rsp, error_msg)); // pass address of error message 1098 __ movptr(c_rarg1, Address(rsp, return_addr)); // pass return address 1099 __ movq(c_rarg2, rsp); // pass address of regs on stack 1100 __ mov(r12, rsp); // remember rsp 1101 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1102 __ andptr(rsp, -16); // align stack as required by ABI 1103 BLOCK_COMMENT("call MacroAssembler::debug"); 1104 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64))); 1105 __ mov(rsp, r12); // restore rsp 1106 __ popa(); // pop registers (includes r12) 1107 __ ret(4 * wordSize); // pop caller saved stuff 1108 1109 return start; 1110 } 1111 1112 // 1113 // Verify that a register contains clean 32-bits positive value 1114 // (high 32-bits are 0) so it could be used in 64-bits shifts. 1115 // 1116 // Input: 1117 // Rint - 32-bits value 1118 // Rtmp - scratch 1119 // 1120 void assert_clean_int(Register Rint, Register Rtmp) { 1121 #ifdef ASSERT 1122 Label L; 1123 assert_different_registers(Rtmp, Rint); 1124 __ movslq(Rtmp, Rint); 1125 __ cmpq(Rtmp, Rint); 1126 __ jcc(Assembler::equal, L); 1127 __ stop("high 32-bits of int value are not 0"); 1128 __ bind(L); 1129 #endif 1130 } 1131 1132 // Generate overlap test for array copy stubs 1133 // 1134 // Input: 1135 // c_rarg0 - from 1136 // c_rarg1 - to 1137 // c_rarg2 - element count 1138 // 1139 // Output: 1140 // rax - &from[element count - 1] 1141 // 1142 void array_overlap_test(address no_overlap_target, Address::ScaleFactor sf) { 1143 assert(no_overlap_target != NULL, "must be generated"); 1144 array_overlap_test(no_overlap_target, NULL, sf); 1145 } 1146 void array_overlap_test(Label& L_no_overlap, Address::ScaleFactor sf) { 1147 array_overlap_test(NULL, &L_no_overlap, sf); 1148 } 1149 void array_overlap_test(address no_overlap_target, Label* NOLp, Address::ScaleFactor sf) { 1150 const Register from = c_rarg0; 1151 const Register to = c_rarg1; 1152 const Register count = c_rarg2; 1153 const Register end_from = rax; 1154 1155 __ cmpptr(to, from); 1156 __ lea(end_from, Address(from, count, sf, 0)); 1157 if (NOLp == NULL) { 1158 ExternalAddress no_overlap(no_overlap_target); 1159 __ jump_cc(Assembler::belowEqual, no_overlap); 1160 __ cmpptr(to, end_from); 1161 __ jump_cc(Assembler::aboveEqual, no_overlap); 1162 } else { 1163 __ jcc(Assembler::belowEqual, (*NOLp)); 1164 __ cmpptr(to, end_from); 1165 __ jcc(Assembler::aboveEqual, (*NOLp)); 1166 } 1167 } 1168 1169 // Shuffle first three arg regs on Windows into Linux/Solaris locations. 1170 // 1171 // Outputs: 1172 // rdi - rcx 1173 // rsi - rdx 1174 // rdx - r8 1175 // rcx - r9 1176 // 1177 // Registers r9 and r10 are used to save rdi and rsi on Windows, which latter 1178 // are non-volatile. r9 and r10 should not be used by the caller. 1179 // 1180 void setup_arg_regs(int nargs = 3) { 1181 const Register saved_rdi = r9; 1182 const Register saved_rsi = r10; 1183 assert(nargs == 3 || nargs == 4, "else fix"); 1184 #ifdef _WIN64 1185 assert(c_rarg0 == rcx && c_rarg1 == rdx && c_rarg2 == r8 && c_rarg3 == r9, 1186 "unexpected argument registers"); 1187 if (nargs >= 4) 1188 __ mov(rax, r9); // r9 is also saved_rdi 1189 __ movptr(saved_rdi, rdi); 1190 __ movptr(saved_rsi, rsi); 1191 __ mov(rdi, rcx); // c_rarg0 1192 __ mov(rsi, rdx); // c_rarg1 1193 __ mov(rdx, r8); // c_rarg2 1194 if (nargs >= 4) 1195 __ mov(rcx, rax); // c_rarg3 (via rax) 1196 #else 1197 assert(c_rarg0 == rdi && c_rarg1 == rsi && c_rarg2 == rdx && c_rarg3 == rcx, 1198 "unexpected argument registers"); 1199 #endif 1200 } 1201 1202 void restore_arg_regs() { 1203 const Register saved_rdi = r9; 1204 const Register saved_rsi = r10; 1205 #ifdef _WIN64 1206 __ movptr(rdi, saved_rdi); 1207 __ movptr(rsi, saved_rsi); 1208 #endif 1209 } 1210 1211 // Generate code for an array write pre barrier 1212 // 1213 // addr - starting address 1214 // count - element count 1215 // tmp - scratch register 1216 // 1217 // Destroy no registers! 1218 // 1219 void gen_write_ref_array_pre_barrier(Register addr, Register count, bool dest_uninitialized) { 1220 BarrierSet* bs = Universe::heap()->barrier_set(); 1221 switch (bs->kind()) { 1222 case BarrierSet::G1SATBCTLogging: 1223 // With G1, don't generate the call if we statically know that the target in uninitialized 1224 if (!dest_uninitialized) { 1225 __ pusha(); // push registers 1226 if (count == c_rarg0) { 1227 if (addr == c_rarg1) { 1228 // exactly backwards!! 1229 __ xchgptr(c_rarg1, c_rarg0); 1230 } else { 1231 __ movptr(c_rarg1, count); 1232 __ movptr(c_rarg0, addr); 1233 } 1234 } else { 1235 __ movptr(c_rarg0, addr); 1236 __ movptr(c_rarg1, count); 1237 } 1238 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), 2); 1239 __ popa(); 1240 } 1241 break; 1242 case BarrierSet::CardTableModRef: 1243 case BarrierSet::CardTableExtension: 1244 case BarrierSet::ModRef: 1245 break; 1246 default: 1247 ShouldNotReachHere(); 1248 1249 } 1250 } 1251 1252 // 1253 // Generate code for an array write post barrier 1254 // 1255 // Input: 1256 // start - register containing starting address of destination array 1257 // count - elements count 1258 // scratch - scratch register 1259 // 1260 // The input registers are overwritten. 1261 // 1262 void gen_write_ref_array_post_barrier(Register start, Register count, Register scratch) { 1263 assert_different_registers(start, count, scratch); 1264 BarrierSet* bs = Universe::heap()->barrier_set(); 1265 switch (bs->kind()) { 1266 case BarrierSet::G1SATBCTLogging: 1267 { 1268 __ pusha(); // push registers (overkill) 1269 if (c_rarg0 == count) { // On win64 c_rarg0 == rcx 1270 assert_different_registers(c_rarg1, start); 1271 __ mov(c_rarg1, count); 1272 __ mov(c_rarg0, start); 1273 } else { 1274 assert_different_registers(c_rarg0, count); 1275 __ mov(c_rarg0, start); 1276 __ mov(c_rarg1, count); 1277 } 1278 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), 2); 1279 __ popa(); 1280 } 1281 break; 1282 case BarrierSet::CardTableModRef: 1283 case BarrierSet::CardTableExtension: 1284 { 1285 CardTableModRefBS* ct = barrier_set_cast<CardTableModRefBS>(bs); 1286 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); 1287 1288 Label L_loop; 1289 const Register end = count; 1290 1291 __ leaq(end, Address(start, count, TIMES_OOP, 0)); // end == start+count*oop_size 1292 __ subptr(end, BytesPerHeapOop); // end - 1 to make inclusive 1293 __ shrptr(start, CardTableModRefBS::card_shift); 1294 __ shrptr(end, CardTableModRefBS::card_shift); 1295 __ subptr(end, start); // end --> cards count 1296 1297 int64_t disp = (int64_t) ct->byte_map_base; 1298 __ mov64(scratch, disp); 1299 __ addptr(start, scratch); 1300 __ BIND(L_loop); 1301 __ movb(Address(start, count, Address::times_1), 0); 1302 __ decrement(count); 1303 __ jcc(Assembler::greaterEqual, L_loop); 1304 } 1305 break; 1306 default: 1307 ShouldNotReachHere(); 1308 1309 } 1310 } 1311 1312 1313 // Copy big chunks forward 1314 // 1315 // Inputs: 1316 // end_from - source arrays end address 1317 // end_to - destination array end address 1318 // qword_count - 64-bits element count, negative 1319 // to - scratch 1320 // L_copy_bytes - entry label 1321 // L_copy_8_bytes - exit label 1322 // 1323 void copy_bytes_forward(Register end_from, Register end_to, 1324 Register qword_count, Register to, 1325 Label& L_copy_bytes, Label& L_copy_8_bytes) { 1326 DEBUG_ONLY(__ stop("enter at entry label, not here")); 1327 Label L_loop; 1328 __ align(OptoLoopAlignment); 1329 if (UseUnalignedLoadStores) { 1330 Label L_end; 1331 // Copy 64-bytes per iteration 1332 __ BIND(L_loop); 1333 if (UseAVX > 2) { 1334 __ evmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56), Assembler::AVX_512bit); 1335 __ evmovdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0, Assembler::AVX_512bit); 1336 } else if (UseAVX == 2) { 1337 __ vmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56)); 1338 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0); 1339 __ vmovdqu(xmm1, Address(end_from, qword_count, Address::times_8, -24)); 1340 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -24), xmm1); 1341 } else { 1342 __ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -56)); 1343 __ movdqu(Address(end_to, qword_count, Address::times_8, -56), xmm0); 1344 __ movdqu(xmm1, Address(end_from, qword_count, Address::times_8, -40)); 1345 __ movdqu(Address(end_to, qword_count, Address::times_8, -40), xmm1); 1346 __ movdqu(xmm2, Address(end_from, qword_count, Address::times_8, -24)); 1347 __ movdqu(Address(end_to, qword_count, Address::times_8, -24), xmm2); 1348 __ movdqu(xmm3, Address(end_from, qword_count, Address::times_8, - 8)); 1349 __ movdqu(Address(end_to, qword_count, Address::times_8, - 8), xmm3); 1350 } 1351 __ BIND(L_copy_bytes); 1352 __ addptr(qword_count, 8); 1353 __ jcc(Assembler::lessEqual, L_loop); 1354 __ subptr(qword_count, 4); // sub(8) and add(4) 1355 __ jccb(Assembler::greater, L_end); 1356 // Copy trailing 32 bytes 1357 if (UseAVX >= 2) { 1358 __ vmovdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24)); 1359 __ vmovdqu(Address(end_to, qword_count, Address::times_8, -24), xmm0); 1360 } else { 1361 __ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24)); 1362 __ movdqu(Address(end_to, qword_count, Address::times_8, -24), xmm0); 1363 __ movdqu(xmm1, Address(end_from, qword_count, Address::times_8, - 8)); 1364 __ movdqu(Address(end_to, qword_count, Address::times_8, - 8), xmm1); 1365 } 1366 __ addptr(qword_count, 4); 1367 __ BIND(L_end); 1368 if (UseAVX >= 2) { 1369 // clean upper bits of YMM registers 1370 __ vpxor(xmm0, xmm0); 1371 __ vpxor(xmm1, xmm1); 1372 } 1373 } else { 1374 // Copy 32-bytes per iteration 1375 __ BIND(L_loop); 1376 __ movq(to, Address(end_from, qword_count, Address::times_8, -24)); 1377 __ movq(Address(end_to, qword_count, Address::times_8, -24), to); 1378 __ movq(to, Address(end_from, qword_count, Address::times_8, -16)); 1379 __ movq(Address(end_to, qword_count, Address::times_8, -16), to); 1380 __ movq(to, Address(end_from, qword_count, Address::times_8, - 8)); 1381 __ movq(Address(end_to, qword_count, Address::times_8, - 8), to); 1382 __ movq(to, Address(end_from, qword_count, Address::times_8, - 0)); 1383 __ movq(Address(end_to, qword_count, Address::times_8, - 0), to); 1384 1385 __ BIND(L_copy_bytes); 1386 __ addptr(qword_count, 4); 1387 __ jcc(Assembler::lessEqual, L_loop); 1388 } 1389 __ subptr(qword_count, 4); 1390 __ jcc(Assembler::less, L_copy_8_bytes); // Copy trailing qwords 1391 } 1392 1393 // Copy big chunks backward 1394 // 1395 // Inputs: 1396 // from - source arrays address 1397 // dest - destination array address 1398 // qword_count - 64-bits element count 1399 // to - scratch 1400 // L_copy_bytes - entry label 1401 // L_copy_8_bytes - exit label 1402 // 1403 void copy_bytes_backward(Register from, Register dest, 1404 Register qword_count, Register to, 1405 Label& L_copy_bytes, Label& L_copy_8_bytes) { 1406 DEBUG_ONLY(__ stop("enter at entry label, not here")); 1407 Label L_loop; 1408 __ align(OptoLoopAlignment); 1409 if (UseUnalignedLoadStores) { 1410 Label L_end; 1411 // Copy 64-bytes per iteration 1412 __ BIND(L_loop); 1413 if (UseAVX > 2) { 1414 __ evmovdqu(xmm0, Address(from, qword_count, Address::times_8, 32), Assembler::AVX_512bit); 1415 __ evmovdqu(Address(dest, qword_count, Address::times_8, 32), xmm0, Assembler::AVX_512bit); 1416 } else if (UseAVX == 2) { 1417 __ vmovdqu(xmm0, Address(from, qword_count, Address::times_8, 32)); 1418 __ vmovdqu(Address(dest, qword_count, Address::times_8, 32), xmm0); 1419 __ vmovdqu(xmm1, Address(from, qword_count, Address::times_8, 0)); 1420 __ vmovdqu(Address(dest, qword_count, Address::times_8, 0), xmm1); 1421 } else { 1422 __ movdqu(xmm0, Address(from, qword_count, Address::times_8, 48)); 1423 __ movdqu(Address(dest, qword_count, Address::times_8, 48), xmm0); 1424 __ movdqu(xmm1, Address(from, qword_count, Address::times_8, 32)); 1425 __ movdqu(Address(dest, qword_count, Address::times_8, 32), xmm1); 1426 __ movdqu(xmm2, Address(from, qword_count, Address::times_8, 16)); 1427 __ movdqu(Address(dest, qword_count, Address::times_8, 16), xmm2); 1428 __ movdqu(xmm3, Address(from, qword_count, Address::times_8, 0)); 1429 __ movdqu(Address(dest, qword_count, Address::times_8, 0), xmm3); 1430 } 1431 __ BIND(L_copy_bytes); 1432 __ subptr(qword_count, 8); 1433 __ jcc(Assembler::greaterEqual, L_loop); 1434 1435 __ addptr(qword_count, 4); // add(8) and sub(4) 1436 __ jccb(Assembler::less, L_end); 1437 // Copy trailing 32 bytes 1438 if (UseAVX >= 2) { 1439 __ vmovdqu(xmm0, Address(from, qword_count, Address::times_8, 0)); 1440 __ vmovdqu(Address(dest, qword_count, Address::times_8, 0), xmm0); 1441 } else { 1442 __ movdqu(xmm0, Address(from, qword_count, Address::times_8, 16)); 1443 __ movdqu(Address(dest, qword_count, Address::times_8, 16), xmm0); 1444 __ movdqu(xmm1, Address(from, qword_count, Address::times_8, 0)); 1445 __ movdqu(Address(dest, qword_count, Address::times_8, 0), xmm1); 1446 } 1447 __ subptr(qword_count, 4); 1448 __ BIND(L_end); 1449 if (UseAVX >= 2) { 1450 // clean upper bits of YMM registers 1451 __ vpxor(xmm0, xmm0); 1452 __ vpxor(xmm1, xmm1); 1453 } 1454 } else { 1455 // Copy 32-bytes per iteration 1456 __ BIND(L_loop); 1457 __ movq(to, Address(from, qword_count, Address::times_8, 24)); 1458 __ movq(Address(dest, qword_count, Address::times_8, 24), to); 1459 __ movq(to, Address(from, qword_count, Address::times_8, 16)); 1460 __ movq(Address(dest, qword_count, Address::times_8, 16), to); 1461 __ movq(to, Address(from, qword_count, Address::times_8, 8)); 1462 __ movq(Address(dest, qword_count, Address::times_8, 8), to); 1463 __ movq(to, Address(from, qword_count, Address::times_8, 0)); 1464 __ movq(Address(dest, qword_count, Address::times_8, 0), to); 1465 1466 __ BIND(L_copy_bytes); 1467 __ subptr(qword_count, 4); 1468 __ jcc(Assembler::greaterEqual, L_loop); 1469 } 1470 __ addptr(qword_count, 4); 1471 __ jcc(Assembler::greater, L_copy_8_bytes); // Copy trailing qwords 1472 } 1473 1474 1475 // Arguments: 1476 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1477 // ignored 1478 // name - stub name string 1479 // 1480 // Inputs: 1481 // c_rarg0 - source array address 1482 // c_rarg1 - destination array address 1483 // c_rarg2 - element count, treated as ssize_t, can be zero 1484 // 1485 // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries, 1486 // we let the hardware handle it. The one to eight bytes within words, 1487 // dwords or qwords that span cache line boundaries will still be loaded 1488 // and stored atomically. 1489 // 1490 // Side Effects: 1491 // disjoint_byte_copy_entry is set to the no-overlap entry point 1492 // used by generate_conjoint_byte_copy(). 1493 // 1494 address generate_disjoint_byte_copy(bool aligned, address* entry, const char *name) { 1495 __ align(CodeEntryAlignment); 1496 StubCodeMark mark(this, "StubRoutines", name); 1497 address start = __ pc(); 1498 1499 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes; 1500 Label L_copy_byte, L_exit; 1501 const Register from = rdi; // source array address 1502 const Register to = rsi; // destination array address 1503 const Register count = rdx; // elements count 1504 const Register byte_count = rcx; 1505 const Register qword_count = count; 1506 const Register end_from = from; // source array end address 1507 const Register end_to = to; // destination array end address 1508 // End pointers are inclusive, and if count is not zero they point 1509 // to the last unit copied: end_to[0] := end_from[0] 1510 1511 __ enter(); // required for proper stackwalking of RuntimeStub frame 1512 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1513 1514 if (entry != NULL) { 1515 *entry = __ pc(); 1516 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1517 BLOCK_COMMENT("Entry:"); 1518 } 1519 1520 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1521 // r9 and r10 may be used to save non-volatile registers 1522 1523 // 'from', 'to' and 'count' are now valid 1524 __ movptr(byte_count, count); 1525 __ shrptr(count, 3); // count => qword_count 1526 1527 // Copy from low to high addresses. Use 'to' as scratch. 1528 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1529 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1530 __ negptr(qword_count); // make the count negative 1531 __ jmp(L_copy_bytes); 1532 1533 // Copy trailing qwords 1534 __ BIND(L_copy_8_bytes); 1535 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1536 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1537 __ increment(qword_count); 1538 __ jcc(Assembler::notZero, L_copy_8_bytes); 1539 1540 // Check for and copy trailing dword 1541 __ BIND(L_copy_4_bytes); 1542 __ testl(byte_count, 4); 1543 __ jccb(Assembler::zero, L_copy_2_bytes); 1544 __ movl(rax, Address(end_from, 8)); 1545 __ movl(Address(end_to, 8), rax); 1546 1547 __ addptr(end_from, 4); 1548 __ addptr(end_to, 4); 1549 1550 // Check for and copy trailing word 1551 __ BIND(L_copy_2_bytes); 1552 __ testl(byte_count, 2); 1553 __ jccb(Assembler::zero, L_copy_byte); 1554 __ movw(rax, Address(end_from, 8)); 1555 __ movw(Address(end_to, 8), rax); 1556 1557 __ addptr(end_from, 2); 1558 __ addptr(end_to, 2); 1559 1560 // Check for and copy trailing byte 1561 __ BIND(L_copy_byte); 1562 __ testl(byte_count, 1); 1563 __ jccb(Assembler::zero, L_exit); 1564 __ movb(rax, Address(end_from, 8)); 1565 __ movb(Address(end_to, 8), rax); 1566 1567 __ BIND(L_exit); 1568 restore_arg_regs(); 1569 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1570 __ xorptr(rax, rax); // return 0 1571 __ leave(); // required for proper stackwalking of RuntimeStub frame 1572 __ ret(0); 1573 1574 // Copy in multi-bytes chunks 1575 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1576 __ jmp(L_copy_4_bytes); 1577 1578 return start; 1579 } 1580 1581 // Arguments: 1582 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1583 // ignored 1584 // name - stub name string 1585 // 1586 // Inputs: 1587 // c_rarg0 - source array address 1588 // c_rarg1 - destination array address 1589 // c_rarg2 - element count, treated as ssize_t, can be zero 1590 // 1591 // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries, 1592 // we let the hardware handle it. The one to eight bytes within words, 1593 // dwords or qwords that span cache line boundaries will still be loaded 1594 // and stored atomically. 1595 // 1596 address generate_conjoint_byte_copy(bool aligned, address nooverlap_target, 1597 address* entry, const char *name) { 1598 __ align(CodeEntryAlignment); 1599 StubCodeMark mark(this, "StubRoutines", name); 1600 address start = __ pc(); 1601 1602 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes; 1603 const Register from = rdi; // source array address 1604 const Register to = rsi; // destination array address 1605 const Register count = rdx; // elements count 1606 const Register byte_count = rcx; 1607 const Register qword_count = count; 1608 1609 __ enter(); // required for proper stackwalking of RuntimeStub frame 1610 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1611 1612 if (entry != NULL) { 1613 *entry = __ pc(); 1614 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1615 BLOCK_COMMENT("Entry:"); 1616 } 1617 1618 array_overlap_test(nooverlap_target, Address::times_1); 1619 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1620 // r9 and r10 may be used to save non-volatile registers 1621 1622 // 'from', 'to' and 'count' are now valid 1623 __ movptr(byte_count, count); 1624 __ shrptr(count, 3); // count => qword_count 1625 1626 // Copy from high to low addresses. 1627 1628 // Check for and copy trailing byte 1629 __ testl(byte_count, 1); 1630 __ jcc(Assembler::zero, L_copy_2_bytes); 1631 __ movb(rax, Address(from, byte_count, Address::times_1, -1)); 1632 __ movb(Address(to, byte_count, Address::times_1, -1), rax); 1633 __ decrement(byte_count); // Adjust for possible trailing word 1634 1635 // Check for and copy trailing word 1636 __ BIND(L_copy_2_bytes); 1637 __ testl(byte_count, 2); 1638 __ jcc(Assembler::zero, L_copy_4_bytes); 1639 __ movw(rax, Address(from, byte_count, Address::times_1, -2)); 1640 __ movw(Address(to, byte_count, Address::times_1, -2), rax); 1641 1642 // Check for and copy trailing dword 1643 __ BIND(L_copy_4_bytes); 1644 __ testl(byte_count, 4); 1645 __ jcc(Assembler::zero, L_copy_bytes); 1646 __ movl(rax, Address(from, qword_count, Address::times_8)); 1647 __ movl(Address(to, qword_count, Address::times_8), rax); 1648 __ jmp(L_copy_bytes); 1649 1650 // Copy trailing qwords 1651 __ BIND(L_copy_8_bytes); 1652 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 1653 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 1654 __ decrement(qword_count); 1655 __ jcc(Assembler::notZero, L_copy_8_bytes); 1656 1657 restore_arg_regs(); 1658 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1659 __ xorptr(rax, rax); // return 0 1660 __ leave(); // required for proper stackwalking of RuntimeStub frame 1661 __ ret(0); 1662 1663 // Copy in multi-bytes chunks 1664 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1665 1666 restore_arg_regs(); 1667 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); // Update counter after rscratch1 is free 1668 __ xorptr(rax, rax); // return 0 1669 __ leave(); // required for proper stackwalking of RuntimeStub frame 1670 __ ret(0); 1671 1672 return start; 1673 } 1674 1675 // Arguments: 1676 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1677 // ignored 1678 // name - stub name string 1679 // 1680 // Inputs: 1681 // c_rarg0 - source array address 1682 // c_rarg1 - destination array address 1683 // c_rarg2 - element count, treated as ssize_t, can be zero 1684 // 1685 // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we 1686 // let the hardware handle it. The two or four words within dwords 1687 // or qwords that span cache line boundaries will still be loaded 1688 // and stored atomically. 1689 // 1690 // Side Effects: 1691 // disjoint_short_copy_entry is set to the no-overlap entry point 1692 // used by generate_conjoint_short_copy(). 1693 // 1694 address generate_disjoint_short_copy(bool aligned, address *entry, const char *name) { 1695 __ align(CodeEntryAlignment); 1696 StubCodeMark mark(this, "StubRoutines", name); 1697 address start = __ pc(); 1698 1699 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes,L_copy_2_bytes,L_exit; 1700 const Register from = rdi; // source array address 1701 const Register to = rsi; // destination array address 1702 const Register count = rdx; // elements count 1703 const Register word_count = rcx; 1704 const Register qword_count = count; 1705 const Register end_from = from; // source array end address 1706 const Register end_to = to; // destination array end address 1707 // End pointers are inclusive, and if count is not zero they point 1708 // to the last unit copied: end_to[0] := end_from[0] 1709 1710 __ enter(); // required for proper stackwalking of RuntimeStub frame 1711 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1712 1713 if (entry != NULL) { 1714 *entry = __ pc(); 1715 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1716 BLOCK_COMMENT("Entry:"); 1717 } 1718 1719 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1720 // r9 and r10 may be used to save non-volatile registers 1721 1722 // 'from', 'to' and 'count' are now valid 1723 __ movptr(word_count, count); 1724 __ shrptr(count, 2); // count => qword_count 1725 1726 // Copy from low to high addresses. Use 'to' as scratch. 1727 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1728 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1729 __ negptr(qword_count); 1730 __ jmp(L_copy_bytes); 1731 1732 // Copy trailing qwords 1733 __ BIND(L_copy_8_bytes); 1734 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1735 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1736 __ increment(qword_count); 1737 __ jcc(Assembler::notZero, L_copy_8_bytes); 1738 1739 // Original 'dest' is trashed, so we can't use it as a 1740 // base register for a possible trailing word copy 1741 1742 // Check for and copy trailing dword 1743 __ BIND(L_copy_4_bytes); 1744 __ testl(word_count, 2); 1745 __ jccb(Assembler::zero, L_copy_2_bytes); 1746 __ movl(rax, Address(end_from, 8)); 1747 __ movl(Address(end_to, 8), rax); 1748 1749 __ addptr(end_from, 4); 1750 __ addptr(end_to, 4); 1751 1752 // Check for and copy trailing word 1753 __ BIND(L_copy_2_bytes); 1754 __ testl(word_count, 1); 1755 __ jccb(Assembler::zero, L_exit); 1756 __ movw(rax, Address(end_from, 8)); 1757 __ movw(Address(end_to, 8), rax); 1758 1759 __ BIND(L_exit); 1760 restore_arg_regs(); 1761 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1762 __ xorptr(rax, rax); // return 0 1763 __ leave(); // required for proper stackwalking of RuntimeStub frame 1764 __ ret(0); 1765 1766 // Copy in multi-bytes chunks 1767 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1768 __ jmp(L_copy_4_bytes); 1769 1770 return start; 1771 } 1772 1773 address generate_fill(BasicType t, bool aligned, const char *name) { 1774 __ align(CodeEntryAlignment); 1775 StubCodeMark mark(this, "StubRoutines", name); 1776 address start = __ pc(); 1777 1778 BLOCK_COMMENT("Entry:"); 1779 1780 const Register to = c_rarg0; // source array address 1781 const Register value = c_rarg1; // value 1782 const Register count = c_rarg2; // elements count 1783 1784 __ enter(); // required for proper stackwalking of RuntimeStub frame 1785 1786 __ generate_fill(t, aligned, to, value, count, rax, xmm0); 1787 1788 __ leave(); // required for proper stackwalking of RuntimeStub frame 1789 __ ret(0); 1790 return start; 1791 } 1792 1793 // Arguments: 1794 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1795 // ignored 1796 // name - stub name string 1797 // 1798 // Inputs: 1799 // c_rarg0 - source array address 1800 // c_rarg1 - destination array address 1801 // c_rarg2 - element count, treated as ssize_t, can be zero 1802 // 1803 // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we 1804 // let the hardware handle it. The two or four words within dwords 1805 // or qwords that span cache line boundaries will still be loaded 1806 // and stored atomically. 1807 // 1808 address generate_conjoint_short_copy(bool aligned, address nooverlap_target, 1809 address *entry, const char *name) { 1810 __ align(CodeEntryAlignment); 1811 StubCodeMark mark(this, "StubRoutines", name); 1812 address start = __ pc(); 1813 1814 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes; 1815 const Register from = rdi; // source array address 1816 const Register to = rsi; // destination array address 1817 const Register count = rdx; // elements count 1818 const Register word_count = rcx; 1819 const Register qword_count = count; 1820 1821 __ enter(); // required for proper stackwalking of RuntimeStub frame 1822 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1823 1824 if (entry != NULL) { 1825 *entry = __ pc(); 1826 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1827 BLOCK_COMMENT("Entry:"); 1828 } 1829 1830 array_overlap_test(nooverlap_target, Address::times_2); 1831 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1832 // r9 and r10 may be used to save non-volatile registers 1833 1834 // 'from', 'to' and 'count' are now valid 1835 __ movptr(word_count, count); 1836 __ shrptr(count, 2); // count => qword_count 1837 1838 // Copy from high to low addresses. Use 'to' as scratch. 1839 1840 // Check for and copy trailing word 1841 __ testl(word_count, 1); 1842 __ jccb(Assembler::zero, L_copy_4_bytes); 1843 __ movw(rax, Address(from, word_count, Address::times_2, -2)); 1844 __ movw(Address(to, word_count, Address::times_2, -2), rax); 1845 1846 // Check for and copy trailing dword 1847 __ BIND(L_copy_4_bytes); 1848 __ testl(word_count, 2); 1849 __ jcc(Assembler::zero, L_copy_bytes); 1850 __ movl(rax, Address(from, qword_count, Address::times_8)); 1851 __ movl(Address(to, qword_count, Address::times_8), rax); 1852 __ jmp(L_copy_bytes); 1853 1854 // Copy trailing qwords 1855 __ BIND(L_copy_8_bytes); 1856 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 1857 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 1858 __ decrement(qword_count); 1859 __ jcc(Assembler::notZero, L_copy_8_bytes); 1860 1861 restore_arg_regs(); 1862 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1863 __ xorptr(rax, rax); // return 0 1864 __ leave(); // required for proper stackwalking of RuntimeStub frame 1865 __ ret(0); 1866 1867 // Copy in multi-bytes chunks 1868 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1869 1870 restore_arg_regs(); 1871 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); // Update counter after rscratch1 is free 1872 __ xorptr(rax, rax); // return 0 1873 __ leave(); // required for proper stackwalking of RuntimeStub frame 1874 __ ret(0); 1875 1876 return start; 1877 } 1878 1879 // Arguments: 1880 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1881 // ignored 1882 // is_oop - true => oop array, so generate store check code 1883 // name - stub name string 1884 // 1885 // Inputs: 1886 // c_rarg0 - source array address 1887 // c_rarg1 - destination array address 1888 // c_rarg2 - element count, treated as ssize_t, can be zero 1889 // 1890 // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let 1891 // the hardware handle it. The two dwords within qwords that span 1892 // cache line boundaries will still be loaded and stored atomicly. 1893 // 1894 // Side Effects: 1895 // disjoint_int_copy_entry is set to the no-overlap entry point 1896 // used by generate_conjoint_int_oop_copy(). 1897 // 1898 address generate_disjoint_int_oop_copy(bool aligned, bool is_oop, address* entry, 1899 const char *name, bool dest_uninitialized = false) { 1900 __ align(CodeEntryAlignment); 1901 StubCodeMark mark(this, "StubRoutines", name); 1902 address start = __ pc(); 1903 1904 Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_exit; 1905 const Register from = rdi; // source array address 1906 const Register to = rsi; // destination array address 1907 const Register count = rdx; // elements count 1908 const Register dword_count = rcx; 1909 const Register qword_count = count; 1910 const Register end_from = from; // source array end address 1911 const Register end_to = to; // destination array end address 1912 const Register saved_to = r11; // saved destination array address 1913 // End pointers are inclusive, and if count is not zero they point 1914 // to the last unit copied: end_to[0] := end_from[0] 1915 1916 __ enter(); // required for proper stackwalking of RuntimeStub frame 1917 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1918 1919 if (entry != NULL) { 1920 *entry = __ pc(); 1921 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1922 BLOCK_COMMENT("Entry:"); 1923 } 1924 1925 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1926 // r9 and r10 may be used to save non-volatile registers 1927 if (is_oop) { 1928 __ movq(saved_to, to); 1929 gen_write_ref_array_pre_barrier(to, count, dest_uninitialized); 1930 } 1931 1932 // 'from', 'to' and 'count' are now valid 1933 __ movptr(dword_count, count); 1934 __ shrptr(count, 1); // count => qword_count 1935 1936 // Copy from low to high addresses. Use 'to' as scratch. 1937 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1938 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1939 __ negptr(qword_count); 1940 __ jmp(L_copy_bytes); 1941 1942 // Copy trailing qwords 1943 __ BIND(L_copy_8_bytes); 1944 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1945 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1946 __ increment(qword_count); 1947 __ jcc(Assembler::notZero, L_copy_8_bytes); 1948 1949 // Check for and copy trailing dword 1950 __ BIND(L_copy_4_bytes); 1951 __ testl(dword_count, 1); // Only byte test since the value is 0 or 1 1952 __ jccb(Assembler::zero, L_exit); 1953 __ movl(rax, Address(end_from, 8)); 1954 __ movl(Address(end_to, 8), rax); 1955 1956 __ BIND(L_exit); 1957 if (is_oop) { 1958 gen_write_ref_array_post_barrier(saved_to, dword_count, rax); 1959 } 1960 restore_arg_regs(); 1961 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 1962 __ xorptr(rax, rax); // return 0 1963 __ leave(); // required for proper stackwalking of RuntimeStub frame 1964 __ ret(0); 1965 1966 // Copy in multi-bytes chunks 1967 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 1968 __ jmp(L_copy_4_bytes); 1969 1970 return start; 1971 } 1972 1973 // Arguments: 1974 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1975 // ignored 1976 // is_oop - true => oop array, so generate store check code 1977 // name - stub name string 1978 // 1979 // Inputs: 1980 // c_rarg0 - source array address 1981 // c_rarg1 - destination array address 1982 // c_rarg2 - element count, treated as ssize_t, can be zero 1983 // 1984 // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let 1985 // the hardware handle it. The two dwords within qwords that span 1986 // cache line boundaries will still be loaded and stored atomicly. 1987 // 1988 address generate_conjoint_int_oop_copy(bool aligned, bool is_oop, address nooverlap_target, 1989 address *entry, const char *name, 1990 bool dest_uninitialized = false) { 1991 __ align(CodeEntryAlignment); 1992 StubCodeMark mark(this, "StubRoutines", name); 1993 address start = __ pc(); 1994 1995 Label L_copy_bytes, L_copy_8_bytes, L_copy_2_bytes, L_exit; 1996 const Register from = rdi; // source array address 1997 const Register to = rsi; // destination array address 1998 const Register count = rdx; // elements count 1999 const Register dword_count = rcx; 2000 const Register qword_count = count; 2001 2002 __ enter(); // required for proper stackwalking of RuntimeStub frame 2003 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 2004 2005 if (entry != NULL) { 2006 *entry = __ pc(); 2007 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2008 BLOCK_COMMENT("Entry:"); 2009 } 2010 2011 array_overlap_test(nooverlap_target, Address::times_4); 2012 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 2013 // r9 and r10 may be used to save non-volatile registers 2014 2015 if (is_oop) { 2016 // no registers are destroyed by this call 2017 gen_write_ref_array_pre_barrier(to, count, dest_uninitialized); 2018 } 2019 2020 assert_clean_int(count, rax); // Make sure 'count' is clean int. 2021 // 'from', 'to' and 'count' are now valid 2022 __ movptr(dword_count, count); 2023 __ shrptr(count, 1); // count => qword_count 2024 2025 // Copy from high to low addresses. Use 'to' as scratch. 2026 2027 // Check for and copy trailing dword 2028 __ testl(dword_count, 1); 2029 __ jcc(Assembler::zero, L_copy_bytes); 2030 __ movl(rax, Address(from, dword_count, Address::times_4, -4)); 2031 __ movl(Address(to, dword_count, Address::times_4, -4), rax); 2032 __ jmp(L_copy_bytes); 2033 2034 // Copy trailing qwords 2035 __ BIND(L_copy_8_bytes); 2036 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 2037 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 2038 __ decrement(qword_count); 2039 __ jcc(Assembler::notZero, L_copy_8_bytes); 2040 2041 if (is_oop) { 2042 __ jmp(L_exit); 2043 } 2044 restore_arg_regs(); 2045 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 2046 __ xorptr(rax, rax); // return 0 2047 __ leave(); // required for proper stackwalking of RuntimeStub frame 2048 __ ret(0); 2049 2050 // Copy in multi-bytes chunks 2051 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2052 2053 __ BIND(L_exit); 2054 if (is_oop) { 2055 gen_write_ref_array_post_barrier(to, dword_count, rax); 2056 } 2057 restore_arg_regs(); 2058 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free 2059 __ xorptr(rax, rax); // return 0 2060 __ leave(); // required for proper stackwalking of RuntimeStub frame 2061 __ ret(0); 2062 2063 return start; 2064 } 2065 2066 // Arguments: 2067 // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes 2068 // ignored 2069 // is_oop - true => oop array, so generate store check code 2070 // name - stub name string 2071 // 2072 // Inputs: 2073 // c_rarg0 - source array address 2074 // c_rarg1 - destination array address 2075 // c_rarg2 - element count, treated as ssize_t, can be zero 2076 // 2077 // Side Effects: 2078 // disjoint_oop_copy_entry or disjoint_long_copy_entry is set to the 2079 // no-overlap entry point used by generate_conjoint_long_oop_copy(). 2080 // 2081 address generate_disjoint_long_oop_copy(bool aligned, bool is_oop, address *entry, 2082 const char *name, bool dest_uninitialized = false) { 2083 __ align(CodeEntryAlignment); 2084 StubCodeMark mark(this, "StubRoutines", name); 2085 address start = __ pc(); 2086 2087 Label L_copy_bytes, L_copy_8_bytes, L_exit; 2088 const Register from = rdi; // source array address 2089 const Register to = rsi; // destination array address 2090 const Register qword_count = rdx; // elements count 2091 const Register end_from = from; // source array end address 2092 const Register end_to = rcx; // destination array end address 2093 const Register saved_to = to; 2094 const Register saved_count = r11; 2095 // End pointers are inclusive, and if count is not zero they point 2096 // to the last unit copied: end_to[0] := end_from[0] 2097 2098 __ enter(); // required for proper stackwalking of RuntimeStub frame 2099 // Save no-overlap entry point for generate_conjoint_long_oop_copy() 2100 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 2101 2102 if (entry != NULL) { 2103 *entry = __ pc(); 2104 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2105 BLOCK_COMMENT("Entry:"); 2106 } 2107 2108 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 2109 // r9 and r10 may be used to save non-volatile registers 2110 // 'from', 'to' and 'qword_count' are now valid 2111 if (is_oop) { 2112 // Save to and count for store barrier 2113 __ movptr(saved_count, qword_count); 2114 // no registers are destroyed by this call 2115 gen_write_ref_array_pre_barrier(to, qword_count, dest_uninitialized); 2116 } 2117 2118 // Copy from low to high addresses. Use 'to' as scratch. 2119 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 2120 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 2121 __ negptr(qword_count); 2122 __ jmp(L_copy_bytes); 2123 2124 // Copy trailing qwords 2125 __ BIND(L_copy_8_bytes); 2126 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 2127 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 2128 __ increment(qword_count); 2129 __ jcc(Assembler::notZero, L_copy_8_bytes); 2130 2131 if (is_oop) { 2132 __ jmp(L_exit); 2133 } else { 2134 restore_arg_regs(); 2135 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2136 __ xorptr(rax, rax); // return 0 2137 __ leave(); // required for proper stackwalking of RuntimeStub frame 2138 __ ret(0); 2139 } 2140 2141 // Copy in multi-bytes chunks 2142 copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2143 2144 if (is_oop) { 2145 __ BIND(L_exit); 2146 gen_write_ref_array_post_barrier(saved_to, saved_count, rax); 2147 } 2148 restore_arg_regs(); 2149 if (is_oop) { 2150 inc_counter_np(SharedRuntime::_oop_array_copy_ctr); // Update counter after rscratch1 is free 2151 } else { 2152 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2153 } 2154 __ xorptr(rax, rax); // return 0 2155 __ leave(); // required for proper stackwalking of RuntimeStub frame 2156 __ ret(0); 2157 2158 return start; 2159 } 2160 2161 // Arguments: 2162 // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes 2163 // ignored 2164 // is_oop - true => oop array, so generate store check code 2165 // name - stub name string 2166 // 2167 // Inputs: 2168 // c_rarg0 - source array address 2169 // c_rarg1 - destination array address 2170 // c_rarg2 - element count, treated as ssize_t, can be zero 2171 // 2172 address generate_conjoint_long_oop_copy(bool aligned, bool is_oop, 2173 address nooverlap_target, address *entry, 2174 const char *name, bool dest_uninitialized = false) { 2175 __ align(CodeEntryAlignment); 2176 StubCodeMark mark(this, "StubRoutines", name); 2177 address start = __ pc(); 2178 2179 Label L_copy_bytes, L_copy_8_bytes, L_exit; 2180 const Register from = rdi; // source array address 2181 const Register to = rsi; // destination array address 2182 const Register qword_count = rdx; // elements count 2183 const Register saved_count = rcx; 2184 2185 __ enter(); // required for proper stackwalking of RuntimeStub frame 2186 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 2187 2188 if (entry != NULL) { 2189 *entry = __ pc(); 2190 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2191 BLOCK_COMMENT("Entry:"); 2192 } 2193 2194 array_overlap_test(nooverlap_target, Address::times_8); 2195 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 2196 // r9 and r10 may be used to save non-volatile registers 2197 // 'from', 'to' and 'qword_count' are now valid 2198 if (is_oop) { 2199 // Save to and count for store barrier 2200 __ movptr(saved_count, qword_count); 2201 // No registers are destroyed by this call 2202 gen_write_ref_array_pre_barrier(to, saved_count, dest_uninitialized); 2203 } 2204 2205 __ jmp(L_copy_bytes); 2206 2207 // Copy trailing qwords 2208 __ BIND(L_copy_8_bytes); 2209 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 2210 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 2211 __ decrement(qword_count); 2212 __ jcc(Assembler::notZero, L_copy_8_bytes); 2213 2214 if (is_oop) { 2215 __ jmp(L_exit); 2216 } else { 2217 restore_arg_regs(); 2218 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2219 __ xorptr(rax, rax); // return 0 2220 __ leave(); // required for proper stackwalking of RuntimeStub frame 2221 __ ret(0); 2222 } 2223 2224 // Copy in multi-bytes chunks 2225 copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); 2226 2227 if (is_oop) { 2228 __ BIND(L_exit); 2229 gen_write_ref_array_post_barrier(to, saved_count, rax); 2230 } 2231 restore_arg_regs(); 2232 if (is_oop) { 2233 inc_counter_np(SharedRuntime::_oop_array_copy_ctr); // Update counter after rscratch1 is free 2234 } else { 2235 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); // Update counter after rscratch1 is free 2236 } 2237 __ xorptr(rax, rax); // return 0 2238 __ leave(); // required for proper stackwalking of RuntimeStub frame 2239 __ ret(0); 2240 2241 return start; 2242 } 2243 2244 2245 // Helper for generating a dynamic type check. 2246 // Smashes no registers. 2247 void generate_type_check(Register sub_klass, 2248 Register super_check_offset, 2249 Register super_klass, 2250 Label& L_success) { 2251 assert_different_registers(sub_klass, super_check_offset, super_klass); 2252 2253 BLOCK_COMMENT("type_check:"); 2254 2255 Label L_miss; 2256 2257 __ check_klass_subtype_fast_path(sub_klass, super_klass, noreg, &L_success, &L_miss, NULL, 2258 super_check_offset); 2259 __ check_klass_subtype_slow_path(sub_klass, super_klass, noreg, noreg, &L_success, NULL); 2260 2261 // Fall through on failure! 2262 __ BIND(L_miss); 2263 } 2264 2265 // 2266 // Generate checkcasting array copy stub 2267 // 2268 // Input: 2269 // c_rarg0 - source array address 2270 // c_rarg1 - destination array address 2271 // c_rarg2 - element count, treated as ssize_t, can be zero 2272 // c_rarg3 - size_t ckoff (super_check_offset) 2273 // not Win64 2274 // c_rarg4 - oop ckval (super_klass) 2275 // Win64 2276 // rsp+40 - oop ckval (super_klass) 2277 // 2278 // Output: 2279 // rax == 0 - success 2280 // rax == -1^K - failure, where K is partial transfer count 2281 // 2282 address generate_checkcast_copy(const char *name, address *entry, 2283 bool dest_uninitialized = false) { 2284 2285 Label L_load_element, L_store_element, L_do_card_marks, L_done; 2286 2287 // Input registers (after setup_arg_regs) 2288 const Register from = rdi; // source array address 2289 const Register to = rsi; // destination array address 2290 const Register length = rdx; // elements count 2291 const Register ckoff = rcx; // super_check_offset 2292 const Register ckval = r8; // super_klass 2293 2294 // Registers used as temps (r13, r14 are save-on-entry) 2295 const Register end_from = from; // source array end address 2296 const Register end_to = r13; // destination array end address 2297 const Register count = rdx; // -(count_remaining) 2298 const Register r14_length = r14; // saved copy of length 2299 // End pointers are inclusive, and if length is not zero they point 2300 // to the last unit copied: end_to[0] := end_from[0] 2301 2302 const Register rax_oop = rax; // actual oop copied 2303 const Register r11_klass = r11; // oop._klass 2304 2305 //--------------------------------------------------------------- 2306 // Assembler stub will be used for this call to arraycopy 2307 // if the two arrays are subtypes of Object[] but the 2308 // destination array type is not equal to or a supertype 2309 // of the source type. Each element must be separately 2310 // checked. 2311 2312 __ align(CodeEntryAlignment); 2313 StubCodeMark mark(this, "StubRoutines", name); 2314 address start = __ pc(); 2315 2316 __ enter(); // required for proper stackwalking of RuntimeStub frame 2317 2318 #ifdef ASSERT 2319 // caller guarantees that the arrays really are different 2320 // otherwise, we would have to make conjoint checks 2321 { Label L; 2322 array_overlap_test(L, TIMES_OOP); 2323 __ stop("checkcast_copy within a single array"); 2324 __ bind(L); 2325 } 2326 #endif //ASSERT 2327 2328 setup_arg_regs(4); // from => rdi, to => rsi, length => rdx 2329 // ckoff => rcx, ckval => r8 2330 // r9 and r10 may be used to save non-volatile registers 2331 #ifdef _WIN64 2332 // last argument (#4) is on stack on Win64 2333 __ movptr(ckval, Address(rsp, 6 * wordSize)); 2334 #endif 2335 2336 // Caller of this entry point must set up the argument registers. 2337 if (entry != NULL) { 2338 *entry = __ pc(); 2339 BLOCK_COMMENT("Entry:"); 2340 } 2341 2342 // allocate spill slots for r13, r14 2343 enum { 2344 saved_r13_offset, 2345 saved_r14_offset, 2346 saved_rbp_offset 2347 }; 2348 __ subptr(rsp, saved_rbp_offset * wordSize); 2349 __ movptr(Address(rsp, saved_r13_offset * wordSize), r13); 2350 __ movptr(Address(rsp, saved_r14_offset * wordSize), r14); 2351 2352 // check that int operands are properly extended to size_t 2353 assert_clean_int(length, rax); 2354 assert_clean_int(ckoff, rax); 2355 2356 #ifdef ASSERT 2357 BLOCK_COMMENT("assert consistent ckoff/ckval"); 2358 // The ckoff and ckval must be mutually consistent, 2359 // even though caller generates both. 2360 { Label L; 2361 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2362 __ cmpl(ckoff, Address(ckval, sco_offset)); 2363 __ jcc(Assembler::equal, L); 2364 __ stop("super_check_offset inconsistent"); 2365 __ bind(L); 2366 } 2367 #endif //ASSERT 2368 2369 // Loop-invariant addresses. They are exclusive end pointers. 2370 Address end_from_addr(from, length, TIMES_OOP, 0); 2371 Address end_to_addr(to, length, TIMES_OOP, 0); 2372 // Loop-variant addresses. They assume post-incremented count < 0. 2373 Address from_element_addr(end_from, count, TIMES_OOP, 0); 2374 Address to_element_addr(end_to, count, TIMES_OOP, 0); 2375 2376 gen_write_ref_array_pre_barrier(to, count, dest_uninitialized); 2377 2378 // Copy from low to high addresses, indexed from the end of each array. 2379 __ lea(end_from, end_from_addr); 2380 __ lea(end_to, end_to_addr); 2381 __ movptr(r14_length, length); // save a copy of the length 2382 assert(length == count, ""); // else fix next line: 2383 __ negptr(count); // negate and test the length 2384 __ jcc(Assembler::notZero, L_load_element); 2385 2386 // Empty array: Nothing to do. 2387 __ xorptr(rax, rax); // return 0 on (trivial) success 2388 __ jmp(L_done); 2389 2390 // ======== begin loop ======== 2391 // (Loop is rotated; its entry is L_load_element.) 2392 // Loop control: 2393 // for (count = -count; count != 0; count++) 2394 // Base pointers src, dst are biased by 8*(count-1),to last element. 2395 __ align(OptoLoopAlignment); 2396 2397 __ BIND(L_store_element); 2398 __ store_heap_oop(to_element_addr, rax_oop); // store the oop 2399 __ increment(count); // increment the count toward zero 2400 __ jcc(Assembler::zero, L_do_card_marks); 2401 2402 // ======== loop entry is here ======== 2403 __ BIND(L_load_element); 2404 __ load_heap_oop(rax_oop, from_element_addr); // load the oop 2405 __ testptr(rax_oop, rax_oop); 2406 __ jcc(Assembler::zero, L_store_element); 2407 2408 __ load_klass(r11_klass, rax_oop);// query the object klass 2409 generate_type_check(r11_klass, ckoff, ckval, L_store_element); 2410 // ======== end loop ======== 2411 2412 // It was a real error; we must depend on the caller to finish the job. 2413 // Register rdx = -1 * number of *remaining* oops, r14 = *total* oops. 2414 // Emit GC store barriers for the oops we have copied (r14 + rdx), 2415 // and report their number to the caller. 2416 assert_different_registers(rax, r14_length, count, to, end_to, rcx, rscratch1); 2417 Label L_post_barrier; 2418 __ addptr(r14_length, count); // K = (original - remaining) oops 2419 __ movptr(rax, r14_length); // save the value 2420 __ notptr(rax); // report (-1^K) to caller (does not affect flags) 2421 __ jccb(Assembler::notZero, L_post_barrier); 2422 __ jmp(L_done); // K == 0, nothing was copied, skip post barrier 2423 2424 // Come here on success only. 2425 __ BIND(L_do_card_marks); 2426 __ xorptr(rax, rax); // return 0 on success 2427 2428 __ BIND(L_post_barrier); 2429 gen_write_ref_array_post_barrier(to, r14_length, rscratch1); 2430 2431 // Common exit point (success or failure). 2432 __ BIND(L_done); 2433 __ movptr(r13, Address(rsp, saved_r13_offset * wordSize)); 2434 __ movptr(r14, Address(rsp, saved_r14_offset * wordSize)); 2435 restore_arg_regs(); 2436 inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr); // Update counter after rscratch1 is free 2437 __ leave(); // required for proper stackwalking of RuntimeStub frame 2438 __ ret(0); 2439 2440 return start; 2441 } 2442 2443 // 2444 // Generate 'unsafe' array copy stub 2445 // Though just as safe as the other stubs, it takes an unscaled 2446 // size_t argument instead of an element count. 2447 // 2448 // Input: 2449 // c_rarg0 - source array address 2450 // c_rarg1 - destination array address 2451 // c_rarg2 - byte count, treated as ssize_t, can be zero 2452 // 2453 // Examines the alignment of the operands and dispatches 2454 // to a long, int, short, or byte copy loop. 2455 // 2456 address generate_unsafe_copy(const char *name, 2457 address byte_copy_entry, address short_copy_entry, 2458 address int_copy_entry, address long_copy_entry) { 2459 2460 Label L_long_aligned, L_int_aligned, L_short_aligned; 2461 2462 // Input registers (before setup_arg_regs) 2463 const Register from = c_rarg0; // source array address 2464 const Register to = c_rarg1; // destination array address 2465 const Register size = c_rarg2; // byte count (size_t) 2466 2467 // Register used as a temp 2468 const Register bits = rax; // test copy of low bits 2469 2470 __ align(CodeEntryAlignment); 2471 StubCodeMark mark(this, "StubRoutines", name); 2472 address start = __ pc(); 2473 2474 __ enter(); // required for proper stackwalking of RuntimeStub frame 2475 2476 // bump this on entry, not on exit: 2477 inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr); 2478 2479 __ mov(bits, from); 2480 __ orptr(bits, to); 2481 __ orptr(bits, size); 2482 2483 __ testb(bits, BytesPerLong-1); 2484 __ jccb(Assembler::zero, L_long_aligned); 2485 2486 __ testb(bits, BytesPerInt-1); 2487 __ jccb(Assembler::zero, L_int_aligned); 2488 2489 __ testb(bits, BytesPerShort-1); 2490 __ jump_cc(Assembler::notZero, RuntimeAddress(byte_copy_entry)); 2491 2492 __ BIND(L_short_aligned); 2493 __ shrptr(size, LogBytesPerShort); // size => short_count 2494 __ jump(RuntimeAddress(short_copy_entry)); 2495 2496 __ BIND(L_int_aligned); 2497 __ shrptr(size, LogBytesPerInt); // size => int_count 2498 __ jump(RuntimeAddress(int_copy_entry)); 2499 2500 __ BIND(L_long_aligned); 2501 __ shrptr(size, LogBytesPerLong); // size => qword_count 2502 __ jump(RuntimeAddress(long_copy_entry)); 2503 2504 return start; 2505 } 2506 2507 // Perform range checks on the proposed arraycopy. 2508 // Kills temp, but nothing else. 2509 // Also, clean the sign bits of src_pos and dst_pos. 2510 void arraycopy_range_checks(Register src, // source array oop (c_rarg0) 2511 Register src_pos, // source position (c_rarg1) 2512 Register dst, // destination array oo (c_rarg2) 2513 Register dst_pos, // destination position (c_rarg3) 2514 Register length, 2515 Register temp, 2516 Label& L_failed) { 2517 BLOCK_COMMENT("arraycopy_range_checks:"); 2518 2519 // if (src_pos + length > arrayOop(src)->length()) FAIL; 2520 __ movl(temp, length); 2521 __ addl(temp, src_pos); // src_pos + length 2522 __ cmpl(temp, Address(src, arrayOopDesc::length_offset_in_bytes())); 2523 __ jcc(Assembler::above, L_failed); 2524 2525 // if (dst_pos + length > arrayOop(dst)->length()) FAIL; 2526 __ movl(temp, length); 2527 __ addl(temp, dst_pos); // dst_pos + length 2528 __ cmpl(temp, Address(dst, arrayOopDesc::length_offset_in_bytes())); 2529 __ jcc(Assembler::above, L_failed); 2530 2531 // Have to clean up high 32-bits of 'src_pos' and 'dst_pos'. 2532 // Move with sign extension can be used since they are positive. 2533 __ movslq(src_pos, src_pos); 2534 __ movslq(dst_pos, dst_pos); 2535 2536 BLOCK_COMMENT("arraycopy_range_checks done"); 2537 } 2538 2539 // 2540 // Generate generic array copy stubs 2541 // 2542 // Input: 2543 // c_rarg0 - src oop 2544 // c_rarg1 - src_pos (32-bits) 2545 // c_rarg2 - dst oop 2546 // c_rarg3 - dst_pos (32-bits) 2547 // not Win64 2548 // c_rarg4 - element count (32-bits) 2549 // Win64 2550 // rsp+40 - element count (32-bits) 2551 // 2552 // Output: 2553 // rax == 0 - success 2554 // rax == -1^K - failure, where K is partial transfer count 2555 // 2556 address generate_generic_copy(const char *name, 2557 address byte_copy_entry, address short_copy_entry, 2558 address int_copy_entry, address oop_copy_entry, 2559 address long_copy_entry, address checkcast_copy_entry) { 2560 2561 Label L_failed, L_failed_0, L_objArray; 2562 Label L_copy_bytes, L_copy_shorts, L_copy_ints, L_copy_longs; 2563 2564 // Input registers 2565 const Register src = c_rarg0; // source array oop 2566 const Register src_pos = c_rarg1; // source position 2567 const Register dst = c_rarg2; // destination array oop 2568 const Register dst_pos = c_rarg3; // destination position 2569 #ifndef _WIN64 2570 const Register length = c_rarg4; 2571 #else 2572 const Address length(rsp, 6 * wordSize); // elements count is on stack on Win64 2573 #endif 2574 2575 { int modulus = CodeEntryAlignment; 2576 int target = modulus - 5; // 5 = sizeof jmp(L_failed) 2577 int advance = target - (__ offset() % modulus); 2578 if (advance < 0) advance += modulus; 2579 if (advance > 0) __ nop(advance); 2580 } 2581 StubCodeMark mark(this, "StubRoutines", name); 2582 2583 // Short-hop target to L_failed. Makes for denser prologue code. 2584 __ BIND(L_failed_0); 2585 __ jmp(L_failed); 2586 assert(__ offset() % CodeEntryAlignment == 0, "no further alignment needed"); 2587 2588 __ align(CodeEntryAlignment); 2589 address start = __ pc(); 2590 2591 __ enter(); // required for proper stackwalking of RuntimeStub frame 2592 2593 // bump this on entry, not on exit: 2594 inc_counter_np(SharedRuntime::_generic_array_copy_ctr); 2595 2596 //----------------------------------------------------------------------- 2597 // Assembler stub will be used for this call to arraycopy 2598 // if the following conditions are met: 2599 // 2600 // (1) src and dst must not be null. 2601 // (2) src_pos must not be negative. 2602 // (3) dst_pos must not be negative. 2603 // (4) length must not be negative. 2604 // (5) src klass and dst klass should be the same and not NULL. 2605 // (6) src and dst should be arrays. 2606 // (7) src_pos + length must not exceed length of src. 2607 // (8) dst_pos + length must not exceed length of dst. 2608 // 2609 2610 // if (src == NULL) return -1; 2611 __ testptr(src, src); // src oop 2612 size_t j1off = __ offset(); 2613 __ jccb(Assembler::zero, L_failed_0); 2614 2615 // if (src_pos < 0) return -1; 2616 __ testl(src_pos, src_pos); // src_pos (32-bits) 2617 __ jccb(Assembler::negative, L_failed_0); 2618 2619 // if (dst == NULL) return -1; 2620 __ testptr(dst, dst); // dst oop 2621 __ jccb(Assembler::zero, L_failed_0); 2622 2623 // if (dst_pos < 0) return -1; 2624 __ testl(dst_pos, dst_pos); // dst_pos (32-bits) 2625 size_t j4off = __ offset(); 2626 __ jccb(Assembler::negative, L_failed_0); 2627 2628 // The first four tests are very dense code, 2629 // but not quite dense enough to put four 2630 // jumps in a 16-byte instruction fetch buffer. 2631 // That's good, because some branch predicters 2632 // do not like jumps so close together. 2633 // Make sure of this. 2634 guarantee(((j1off ^ j4off) & ~15) != 0, "I$ line of 1st & 4th jumps"); 2635 2636 // registers used as temp 2637 const Register r11_length = r11; // elements count to copy 2638 const Register r10_src_klass = r10; // array klass 2639 2640 // if (length < 0) return -1; 2641 __ movl(r11_length, length); // length (elements count, 32-bits value) 2642 __ testl(r11_length, r11_length); 2643 __ jccb(Assembler::negative, L_failed_0); 2644 2645 __ load_klass(r10_src_klass, src); 2646 #ifdef ASSERT 2647 // assert(src->klass() != NULL); 2648 { 2649 BLOCK_COMMENT("assert klasses not null {"); 2650 Label L1, L2; 2651 __ testptr(r10_src_klass, r10_src_klass); 2652 __ jcc(Assembler::notZero, L2); // it is broken if klass is NULL 2653 __ bind(L1); 2654 __ stop("broken null klass"); 2655 __ bind(L2); 2656 __ load_klass(rax, dst); 2657 __ cmpq(rax, 0); 2658 __ jcc(Assembler::equal, L1); // this would be broken also 2659 BLOCK_COMMENT("} assert klasses not null done"); 2660 } 2661 #endif 2662 2663 // Load layout helper (32-bits) 2664 // 2665 // |array_tag| | header_size | element_type | |log2_element_size| 2666 // 32 30 24 16 8 2 0 2667 // 2668 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0 2669 // 2670 2671 const int lh_offset = in_bytes(Klass::layout_helper_offset()); 2672 2673 // Handle objArrays completely differently... 2674 const jint objArray_lh = Klass::array_layout_helper(T_OBJECT); 2675 __ cmpl(Address(r10_src_klass, lh_offset), objArray_lh); 2676 __ jcc(Assembler::equal, L_objArray); 2677 2678 // if (src->klass() != dst->klass()) return -1; 2679 __ load_klass(rax, dst); 2680 __ cmpq(r10_src_klass, rax); 2681 __ jcc(Assembler::notEqual, L_failed); 2682 2683 const Register rax_lh = rax; // layout helper 2684 __ movl(rax_lh, Address(r10_src_klass, lh_offset)); 2685 2686 // if (!src->is_Array()) return -1; 2687 __ cmpl(rax_lh, Klass::_lh_neutral_value); 2688 __ jcc(Assembler::greaterEqual, L_failed); 2689 2690 // At this point, it is known to be a typeArray (array_tag 0x3). 2691 #ifdef ASSERT 2692 { 2693 BLOCK_COMMENT("assert primitive array {"); 2694 Label L; 2695 __ cmpl(rax_lh, (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift)); 2696 __ jcc(Assembler::greaterEqual, L); 2697 __ stop("must be a primitive array"); 2698 __ bind(L); 2699 BLOCK_COMMENT("} assert primitive array done"); 2700 } 2701 #endif 2702 2703 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2704 r10, L_failed); 2705 2706 // TypeArrayKlass 2707 // 2708 // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize); 2709 // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize); 2710 // 2711 2712 const Register r10_offset = r10; // array offset 2713 const Register rax_elsize = rax_lh; // element size 2714 2715 __ movl(r10_offset, rax_lh); 2716 __ shrl(r10_offset, Klass::_lh_header_size_shift); 2717 __ andptr(r10_offset, Klass::_lh_header_size_mask); // array_offset 2718 __ addptr(src, r10_offset); // src array offset 2719 __ addptr(dst, r10_offset); // dst array offset 2720 BLOCK_COMMENT("choose copy loop based on element size"); 2721 __ andl(rax_lh, Klass::_lh_log2_element_size_mask); // rax_lh -> rax_elsize 2722 2723 // next registers should be set before the jump to corresponding stub 2724 const Register from = c_rarg0; // source array address 2725 const Register to = c_rarg1; // destination array address 2726 const Register count = c_rarg2; // elements count 2727 2728 // 'from', 'to', 'count' registers should be set in such order 2729 // since they are the same as 'src', 'src_pos', 'dst'. 2730 2731 __ BIND(L_copy_bytes); 2732 __ cmpl(rax_elsize, 0); 2733 __ jccb(Assembler::notEqual, L_copy_shorts); 2734 __ lea(from, Address(src, src_pos, Address::times_1, 0));// src_addr 2735 __ lea(to, Address(dst, dst_pos, Address::times_1, 0));// dst_addr 2736 __ movl2ptr(count, r11_length); // length 2737 __ jump(RuntimeAddress(byte_copy_entry)); 2738 2739 __ BIND(L_copy_shorts); 2740 __ cmpl(rax_elsize, LogBytesPerShort); 2741 __ jccb(Assembler::notEqual, L_copy_ints); 2742 __ lea(from, Address(src, src_pos, Address::times_2, 0));// src_addr 2743 __ lea(to, Address(dst, dst_pos, Address::times_2, 0));// dst_addr 2744 __ movl2ptr(count, r11_length); // length 2745 __ jump(RuntimeAddress(short_copy_entry)); 2746 2747 __ BIND(L_copy_ints); 2748 __ cmpl(rax_elsize, LogBytesPerInt); 2749 __ jccb(Assembler::notEqual, L_copy_longs); 2750 __ lea(from, Address(src, src_pos, Address::times_4, 0));// src_addr 2751 __ lea(to, Address(dst, dst_pos, Address::times_4, 0));// dst_addr 2752 __ movl2ptr(count, r11_length); // length 2753 __ jump(RuntimeAddress(int_copy_entry)); 2754 2755 __ BIND(L_copy_longs); 2756 #ifdef ASSERT 2757 { 2758 BLOCK_COMMENT("assert long copy {"); 2759 Label L; 2760 __ cmpl(rax_elsize, LogBytesPerLong); 2761 __ jcc(Assembler::equal, L); 2762 __ stop("must be long copy, but elsize is wrong"); 2763 __ bind(L); 2764 BLOCK_COMMENT("} assert long copy done"); 2765 } 2766 #endif 2767 __ lea(from, Address(src, src_pos, Address::times_8, 0));// src_addr 2768 __ lea(to, Address(dst, dst_pos, Address::times_8, 0));// dst_addr 2769 __ movl2ptr(count, r11_length); // length 2770 __ jump(RuntimeAddress(long_copy_entry)); 2771 2772 // ObjArrayKlass 2773 __ BIND(L_objArray); 2774 // live at this point: r10_src_klass, r11_length, src[_pos], dst[_pos] 2775 2776 Label L_plain_copy, L_checkcast_copy; 2777 // test array classes for subtyping 2778 __ load_klass(rax, dst); 2779 __ cmpq(r10_src_klass, rax); // usual case is exact equality 2780 __ jcc(Assembler::notEqual, L_checkcast_copy); 2781 2782 // Identically typed arrays can be copied without element-wise checks. 2783 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2784 r10, L_failed); 2785 2786 __ lea(from, Address(src, src_pos, TIMES_OOP, 2787 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // src_addr 2788 __ lea(to, Address(dst, dst_pos, TIMES_OOP, 2789 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // dst_addr 2790 __ movl2ptr(count, r11_length); // length 2791 __ BIND(L_plain_copy); 2792 __ jump(RuntimeAddress(oop_copy_entry)); 2793 2794 __ BIND(L_checkcast_copy); 2795 // live at this point: r10_src_klass, r11_length, rax (dst_klass) 2796 { 2797 // Before looking at dst.length, make sure dst is also an objArray. 2798 __ cmpl(Address(rax, lh_offset), objArray_lh); 2799 __ jcc(Assembler::notEqual, L_failed); 2800 2801 // It is safe to examine both src.length and dst.length. 2802 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2803 rax, L_failed); 2804 2805 const Register r11_dst_klass = r11; 2806 __ load_klass(r11_dst_klass, dst); // reload 2807 2808 // Marshal the base address arguments now, freeing registers. 2809 __ lea(from, Address(src, src_pos, TIMES_OOP, 2810 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 2811 __ lea(to, Address(dst, dst_pos, TIMES_OOP, 2812 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 2813 __ movl(count, length); // length (reloaded) 2814 Register sco_temp = c_rarg3; // this register is free now 2815 assert_different_registers(from, to, count, sco_temp, 2816 r11_dst_klass, r10_src_klass); 2817 assert_clean_int(count, sco_temp); 2818 2819 // Generate the type check. 2820 const int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2821 __ movl(sco_temp, Address(r11_dst_klass, sco_offset)); 2822 assert_clean_int(sco_temp, rax); 2823 generate_type_check(r10_src_klass, sco_temp, r11_dst_klass, L_plain_copy); 2824 2825 // Fetch destination element klass from the ObjArrayKlass header. 2826 int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset()); 2827 __ movptr(r11_dst_klass, Address(r11_dst_klass, ek_offset)); 2828 __ movl( sco_temp, Address(r11_dst_klass, sco_offset)); 2829 assert_clean_int(sco_temp, rax); 2830 2831 // the checkcast_copy loop needs two extra arguments: 2832 assert(c_rarg3 == sco_temp, "#3 already in place"); 2833 // Set up arguments for checkcast_copy_entry. 2834 setup_arg_regs(4); 2835 __ movptr(r8, r11_dst_klass); // dst.klass.element_klass, r8 is c_rarg4 on Linux/Solaris 2836 __ jump(RuntimeAddress(checkcast_copy_entry)); 2837 } 2838 2839 __ BIND(L_failed); 2840 __ xorptr(rax, rax); 2841 __ notptr(rax); // return -1 2842 __ leave(); // required for proper stackwalking of RuntimeStub frame 2843 __ ret(0); 2844 2845 return start; 2846 } 2847 2848 void generate_arraycopy_stubs() { 2849 address entry; 2850 address entry_jbyte_arraycopy; 2851 address entry_jshort_arraycopy; 2852 address entry_jint_arraycopy; 2853 address entry_oop_arraycopy; 2854 address entry_jlong_arraycopy; 2855 address entry_checkcast_arraycopy; 2856 2857 StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, &entry, 2858 "jbyte_disjoint_arraycopy"); 2859 StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, entry, &entry_jbyte_arraycopy, 2860 "jbyte_arraycopy"); 2861 2862 StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, &entry, 2863 "jshort_disjoint_arraycopy"); 2864 StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, entry, &entry_jshort_arraycopy, 2865 "jshort_arraycopy"); 2866 2867 StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, false, &entry, 2868 "jint_disjoint_arraycopy"); 2869 StubRoutines::_jint_arraycopy = generate_conjoint_int_oop_copy(false, false, entry, 2870 &entry_jint_arraycopy, "jint_arraycopy"); 2871 2872 StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, false, &entry, 2873 "jlong_disjoint_arraycopy"); 2874 StubRoutines::_jlong_arraycopy = generate_conjoint_long_oop_copy(false, false, entry, 2875 &entry_jlong_arraycopy, "jlong_arraycopy"); 2876 2877 2878 if (UseCompressedOops) { 2879 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, true, &entry, 2880 "oop_disjoint_arraycopy"); 2881 StubRoutines::_oop_arraycopy = generate_conjoint_int_oop_copy(false, true, entry, 2882 &entry_oop_arraycopy, "oop_arraycopy"); 2883 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_int_oop_copy(false, true, &entry, 2884 "oop_disjoint_arraycopy_uninit", 2885 /*dest_uninitialized*/true); 2886 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_int_oop_copy(false, true, entry, 2887 NULL, "oop_arraycopy_uninit", 2888 /*dest_uninitialized*/true); 2889 } else { 2890 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, true, &entry, 2891 "oop_disjoint_arraycopy"); 2892 StubRoutines::_oop_arraycopy = generate_conjoint_long_oop_copy(false, true, entry, 2893 &entry_oop_arraycopy, "oop_arraycopy"); 2894 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_long_oop_copy(false, true, &entry, 2895 "oop_disjoint_arraycopy_uninit", 2896 /*dest_uninitialized*/true); 2897 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_long_oop_copy(false, true, entry, 2898 NULL, "oop_arraycopy_uninit", 2899 /*dest_uninitialized*/true); 2900 } 2901 2902 StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy); 2903 StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", NULL, 2904 /*dest_uninitialized*/true); 2905 2906 StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy", 2907 entry_jbyte_arraycopy, 2908 entry_jshort_arraycopy, 2909 entry_jint_arraycopy, 2910 entry_jlong_arraycopy); 2911 StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy", 2912 entry_jbyte_arraycopy, 2913 entry_jshort_arraycopy, 2914 entry_jint_arraycopy, 2915 entry_oop_arraycopy, 2916 entry_jlong_arraycopy, 2917 entry_checkcast_arraycopy); 2918 2919 StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill"); 2920 StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill"); 2921 StubRoutines::_jint_fill = generate_fill(T_INT, false, "jint_fill"); 2922 StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill"); 2923 StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill"); 2924 StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill"); 2925 2926 // We don't generate specialized code for HeapWord-aligned source 2927 // arrays, so just use the code we've already generated 2928 StubRoutines::_arrayof_jbyte_disjoint_arraycopy = StubRoutines::_jbyte_disjoint_arraycopy; 2929 StubRoutines::_arrayof_jbyte_arraycopy = StubRoutines::_jbyte_arraycopy; 2930 2931 StubRoutines::_arrayof_jshort_disjoint_arraycopy = StubRoutines::_jshort_disjoint_arraycopy; 2932 StubRoutines::_arrayof_jshort_arraycopy = StubRoutines::_jshort_arraycopy; 2933 2934 StubRoutines::_arrayof_jint_disjoint_arraycopy = StubRoutines::_jint_disjoint_arraycopy; 2935 StubRoutines::_arrayof_jint_arraycopy = StubRoutines::_jint_arraycopy; 2936 2937 StubRoutines::_arrayof_jlong_disjoint_arraycopy = StubRoutines::_jlong_disjoint_arraycopy; 2938 StubRoutines::_arrayof_jlong_arraycopy = StubRoutines::_jlong_arraycopy; 2939 2940 StubRoutines::_arrayof_oop_disjoint_arraycopy = StubRoutines::_oop_disjoint_arraycopy; 2941 StubRoutines::_arrayof_oop_arraycopy = StubRoutines::_oop_arraycopy; 2942 2943 StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = StubRoutines::_oop_disjoint_arraycopy_uninit; 2944 StubRoutines::_arrayof_oop_arraycopy_uninit = StubRoutines::_oop_arraycopy_uninit; 2945 } 2946 2947 void generate_math_stubs() { 2948 { 2949 StubCodeMark mark(this, "StubRoutines", "log"); 2950 StubRoutines::_intrinsic_log = (double (*)(double)) __ pc(); 2951 2952 __ subq(rsp, 8); 2953 __ movdbl(Address(rsp, 0), xmm0); 2954 __ fld_d(Address(rsp, 0)); 2955 __ flog(); 2956 __ fstp_d(Address(rsp, 0)); 2957 __ movdbl(xmm0, Address(rsp, 0)); 2958 __ addq(rsp, 8); 2959 __ ret(0); 2960 } 2961 { 2962 StubCodeMark mark(this, "StubRoutines", "log10"); 2963 StubRoutines::_intrinsic_log10 = (double (*)(double)) __ pc(); 2964 2965 __ subq(rsp, 8); 2966 __ movdbl(Address(rsp, 0), xmm0); 2967 __ fld_d(Address(rsp, 0)); 2968 __ flog10(); 2969 __ fstp_d(Address(rsp, 0)); 2970 __ movdbl(xmm0, Address(rsp, 0)); 2971 __ addq(rsp, 8); 2972 __ ret(0); 2973 } 2974 { 2975 StubCodeMark mark(this, "StubRoutines", "sin"); 2976 StubRoutines::_intrinsic_sin = (double (*)(double)) __ pc(); 2977 2978 __ subq(rsp, 8); 2979 __ movdbl(Address(rsp, 0), xmm0); 2980 __ fld_d(Address(rsp, 0)); 2981 __ trigfunc('s'); 2982 __ fstp_d(Address(rsp, 0)); 2983 __ movdbl(xmm0, Address(rsp, 0)); 2984 __ addq(rsp, 8); 2985 __ ret(0); 2986 } 2987 { 2988 StubCodeMark mark(this, "StubRoutines", "cos"); 2989 StubRoutines::_intrinsic_cos = (double (*)(double)) __ pc(); 2990 2991 __ subq(rsp, 8); 2992 __ movdbl(Address(rsp, 0), xmm0); 2993 __ fld_d(Address(rsp, 0)); 2994 __ trigfunc('c'); 2995 __ fstp_d(Address(rsp, 0)); 2996 __ movdbl(xmm0, Address(rsp, 0)); 2997 __ addq(rsp, 8); 2998 __ ret(0); 2999 } 3000 { 3001 StubCodeMark mark(this, "StubRoutines", "tan"); 3002 StubRoutines::_intrinsic_tan = (double (*)(double)) __ pc(); 3003 3004 __ subq(rsp, 8); 3005 __ movdbl(Address(rsp, 0), xmm0); 3006 __ fld_d(Address(rsp, 0)); 3007 __ trigfunc('t'); 3008 __ fstp_d(Address(rsp, 0)); 3009 __ movdbl(xmm0, Address(rsp, 0)); 3010 __ addq(rsp, 8); 3011 __ ret(0); 3012 } 3013 { 3014 StubCodeMark mark(this, "StubRoutines", "exp"); 3015 StubRoutines::_intrinsic_exp = (double (*)(double)) __ pc(); 3016 3017 __ subq(rsp, 8); 3018 __ movdbl(Address(rsp, 0), xmm0); 3019 __ fld_d(Address(rsp, 0)); 3020 __ exp_with_fallback(0); 3021 __ fstp_d(Address(rsp, 0)); 3022 __ movdbl(xmm0, Address(rsp, 0)); 3023 __ addq(rsp, 8); 3024 __ ret(0); 3025 } 3026 { 3027 StubCodeMark mark(this, "StubRoutines", "pow"); 3028 StubRoutines::_intrinsic_pow = (double (*)(double,double)) __ pc(); 3029 3030 __ subq(rsp, 8); 3031 __ movdbl(Address(rsp, 0), xmm1); 3032 __ fld_d(Address(rsp, 0)); 3033 __ movdbl(Address(rsp, 0), xmm0); 3034 __ fld_d(Address(rsp, 0)); 3035 __ pow_with_fallback(0); 3036 __ fstp_d(Address(rsp, 0)); 3037 __ movdbl(xmm0, Address(rsp, 0)); 3038 __ addq(rsp, 8); 3039 __ ret(0); 3040 } 3041 } 3042 3043 // AES intrinsic stubs 3044 enum {AESBlockSize = 16}; 3045 3046 address generate_key_shuffle_mask() { 3047 __ align(16); 3048 StubCodeMark mark(this, "StubRoutines", "key_shuffle_mask"); 3049 address start = __ pc(); 3050 __ emit_data64( 0x0405060700010203, relocInfo::none ); 3051 __ emit_data64( 0x0c0d0e0f08090a0b, relocInfo::none ); 3052 return start; 3053 } 3054 3055 // Utility routine for loading a 128-bit key word in little endian format 3056 // can optionally specify that the shuffle mask is already in an xmmregister 3057 void load_key(XMMRegister xmmdst, Register key, int offset, XMMRegister xmm_shuf_mask=NULL) { 3058 __ movdqu(xmmdst, Address(key, offset)); 3059 if (xmm_shuf_mask != NULL) { 3060 __ pshufb(xmmdst, xmm_shuf_mask); 3061 } else { 3062 __ pshufb(xmmdst, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3063 } 3064 } 3065 3066 // Arguments: 3067 // 3068 // Inputs: 3069 // c_rarg0 - source byte array address 3070 // c_rarg1 - destination byte array address 3071 // c_rarg2 - K (key) in little endian int array 3072 // 3073 address generate_aescrypt_encryptBlock() { 3074 assert(UseAES, "need AES instructions and misaligned SSE support"); 3075 __ align(CodeEntryAlignment); 3076 StubCodeMark mark(this, "StubRoutines", "aescrypt_encryptBlock"); 3077 Label L_doLast; 3078 address start = __ pc(); 3079 3080 const Register from = c_rarg0; // source array address 3081 const Register to = c_rarg1; // destination array address 3082 const Register key = c_rarg2; // key array address 3083 const Register keylen = rax; 3084 3085 const XMMRegister xmm_result = xmm0; 3086 const XMMRegister xmm_key_shuf_mask = xmm1; 3087 // On win64 xmm6-xmm15 must be preserved so don't use them. 3088 const XMMRegister xmm_temp1 = xmm2; 3089 const XMMRegister xmm_temp2 = xmm3; 3090 const XMMRegister xmm_temp3 = xmm4; 3091 const XMMRegister xmm_temp4 = xmm5; 3092 3093 __ enter(); // required for proper stackwalking of RuntimeStub frame 3094 3095 // keylen could be only {11, 13, 15} * 4 = {44, 52, 60} 3096 __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3097 3098 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3099 __ movdqu(xmm_result, Address(from, 0)); // get 16 bytes of input 3100 3101 // For encryption, the java expanded key ordering is just what we need 3102 // we don't know if the key is aligned, hence not using load-execute form 3103 3104 load_key(xmm_temp1, key, 0x00, xmm_key_shuf_mask); 3105 __ pxor(xmm_result, xmm_temp1); 3106 3107 load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask); 3108 load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask); 3109 load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask); 3110 load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask); 3111 3112 __ aesenc(xmm_result, xmm_temp1); 3113 __ aesenc(xmm_result, xmm_temp2); 3114 __ aesenc(xmm_result, xmm_temp3); 3115 __ aesenc(xmm_result, xmm_temp4); 3116 3117 load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask); 3118 load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask); 3119 load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask); 3120 load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask); 3121 3122 __ aesenc(xmm_result, xmm_temp1); 3123 __ aesenc(xmm_result, xmm_temp2); 3124 __ aesenc(xmm_result, xmm_temp3); 3125 __ aesenc(xmm_result, xmm_temp4); 3126 3127 load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask); 3128 load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask); 3129 3130 __ cmpl(keylen, 44); 3131 __ jccb(Assembler::equal, L_doLast); 3132 3133 __ aesenc(xmm_result, xmm_temp1); 3134 __ aesenc(xmm_result, xmm_temp2); 3135 3136 load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask); 3137 load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask); 3138 3139 __ cmpl(keylen, 52); 3140 __ jccb(Assembler::equal, L_doLast); 3141 3142 __ aesenc(xmm_result, xmm_temp1); 3143 __ aesenc(xmm_result, xmm_temp2); 3144 3145 load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask); 3146 load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask); 3147 3148 __ BIND(L_doLast); 3149 __ aesenc(xmm_result, xmm_temp1); 3150 __ aesenclast(xmm_result, xmm_temp2); 3151 __ movdqu(Address(to, 0), xmm_result); // store the result 3152 __ xorptr(rax, rax); // return 0 3153 __ leave(); // required for proper stackwalking of RuntimeStub frame 3154 __ ret(0); 3155 3156 return start; 3157 } 3158 3159 3160 // Arguments: 3161 // 3162 // Inputs: 3163 // c_rarg0 - source byte array address 3164 // c_rarg1 - destination byte array address 3165 // c_rarg2 - K (key) in little endian int array 3166 // 3167 address generate_aescrypt_decryptBlock() { 3168 assert(UseAES, "need AES instructions and misaligned SSE support"); 3169 __ align(CodeEntryAlignment); 3170 StubCodeMark mark(this, "StubRoutines", "aescrypt_decryptBlock"); 3171 Label L_doLast; 3172 address start = __ pc(); 3173 3174 const Register from = c_rarg0; // source array address 3175 const Register to = c_rarg1; // destination array address 3176 const Register key = c_rarg2; // key array address 3177 const Register keylen = rax; 3178 3179 const XMMRegister xmm_result = xmm0; 3180 const XMMRegister xmm_key_shuf_mask = xmm1; 3181 // On win64 xmm6-xmm15 must be preserved so don't use them. 3182 const XMMRegister xmm_temp1 = xmm2; 3183 const XMMRegister xmm_temp2 = xmm3; 3184 const XMMRegister xmm_temp3 = xmm4; 3185 const XMMRegister xmm_temp4 = xmm5; 3186 3187 __ enter(); // required for proper stackwalking of RuntimeStub frame 3188 3189 // keylen could be only {11, 13, 15} * 4 = {44, 52, 60} 3190 __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3191 3192 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3193 __ movdqu(xmm_result, Address(from, 0)); 3194 3195 // for decryption java expanded key ordering is rotated one position from what we want 3196 // so we start from 0x10 here and hit 0x00 last 3197 // we don't know if the key is aligned, hence not using load-execute form 3198 load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask); 3199 load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask); 3200 load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask); 3201 load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask); 3202 3203 __ pxor (xmm_result, xmm_temp1); 3204 __ aesdec(xmm_result, xmm_temp2); 3205 __ aesdec(xmm_result, xmm_temp3); 3206 __ aesdec(xmm_result, xmm_temp4); 3207 3208 load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask); 3209 load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask); 3210 load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask); 3211 load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask); 3212 3213 __ aesdec(xmm_result, xmm_temp1); 3214 __ aesdec(xmm_result, xmm_temp2); 3215 __ aesdec(xmm_result, xmm_temp3); 3216 __ aesdec(xmm_result, xmm_temp4); 3217 3218 load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask); 3219 load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask); 3220 load_key(xmm_temp3, key, 0x00, xmm_key_shuf_mask); 3221 3222 __ cmpl(keylen, 44); 3223 __ jccb(Assembler::equal, L_doLast); 3224 3225 __ aesdec(xmm_result, xmm_temp1); 3226 __ aesdec(xmm_result, xmm_temp2); 3227 3228 load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask); 3229 load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask); 3230 3231 __ cmpl(keylen, 52); 3232 __ jccb(Assembler::equal, L_doLast); 3233 3234 __ aesdec(xmm_result, xmm_temp1); 3235 __ aesdec(xmm_result, xmm_temp2); 3236 3237 load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask); 3238 load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask); 3239 3240 __ BIND(L_doLast); 3241 __ aesdec(xmm_result, xmm_temp1); 3242 __ aesdec(xmm_result, xmm_temp2); 3243 3244 // for decryption the aesdeclast operation is always on key+0x00 3245 __ aesdeclast(xmm_result, xmm_temp3); 3246 __ movdqu(Address(to, 0), xmm_result); // store the result 3247 __ xorptr(rax, rax); // return 0 3248 __ leave(); // required for proper stackwalking of RuntimeStub frame 3249 __ ret(0); 3250 3251 return start; 3252 } 3253 3254 3255 // Arguments: 3256 // 3257 // Inputs: 3258 // c_rarg0 - source byte array address 3259 // c_rarg1 - destination byte array address 3260 // c_rarg2 - K (key) in little endian int array 3261 // c_rarg3 - r vector byte array address 3262 // c_rarg4 - input length 3263 // 3264 // Output: 3265 // rax - input length 3266 // 3267 address generate_cipherBlockChaining_encryptAESCrypt() { 3268 assert(UseAES, "need AES instructions and misaligned SSE support"); 3269 __ align(CodeEntryAlignment); 3270 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_encryptAESCrypt"); 3271 address start = __ pc(); 3272 3273 Label L_exit, L_key_192_256, L_key_256, L_loopTop_128, L_loopTop_192, L_loopTop_256; 3274 const Register from = c_rarg0; // source array address 3275 const Register to = c_rarg1; // destination array address 3276 const Register key = c_rarg2; // key array address 3277 const Register rvec = c_rarg3; // r byte array initialized from initvector array address 3278 // and left with the results of the last encryption block 3279 #ifndef _WIN64 3280 const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16) 3281 #else 3282 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 3283 const Register len_reg = r10; // pick the first volatile windows register 3284 #endif 3285 const Register pos = rax; 3286 3287 // xmm register assignments for the loops below 3288 const XMMRegister xmm_result = xmm0; 3289 const XMMRegister xmm_temp = xmm1; 3290 // keys 0-10 preloaded into xmm2-xmm12 3291 const int XMM_REG_NUM_KEY_FIRST = 2; 3292 const int XMM_REG_NUM_KEY_LAST = 15; 3293 const XMMRegister xmm_key0 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST); 3294 const XMMRegister xmm_key10 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+10); 3295 const XMMRegister xmm_key11 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+11); 3296 const XMMRegister xmm_key12 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+12); 3297 const XMMRegister xmm_key13 = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+13); 3298 3299 __ enter(); // required for proper stackwalking of RuntimeStub frame 3300 3301 #ifdef _WIN64 3302 // on win64, fill len_reg from stack position 3303 __ movl(len_reg, len_mem); 3304 // save the xmm registers which must be preserved 6-15 3305 __ subptr(rsp, -rsp_after_call_off * wordSize); 3306 for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) { 3307 __ movdqu(xmm_save(i), as_XMMRegister(i)); 3308 } 3309 #else 3310 __ push(len_reg); // Save 3311 #endif 3312 3313 const XMMRegister xmm_key_shuf_mask = xmm_temp; // used temporarily to swap key bytes up front 3314 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3315 // load up xmm regs xmm2 thru xmm12 with key 0x00 - 0xa0 3316 for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x00; rnum <= XMM_REG_NUM_KEY_FIRST+10; rnum++) { 3317 load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask); 3318 offset += 0x10; 3319 } 3320 __ movdqu(xmm_result, Address(rvec, 0x00)); // initialize xmm_result with r vec 3321 3322 // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256)) 3323 __ movl(rax, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3324 __ cmpl(rax, 44); 3325 __ jcc(Assembler::notEqual, L_key_192_256); 3326 3327 // 128 bit code follows here 3328 __ movptr(pos, 0); 3329 __ align(OptoLoopAlignment); 3330 3331 __ BIND(L_loopTop_128); 3332 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3333 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3334 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3335 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 9; rnum++) { 3336 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3337 } 3338 __ aesenclast(xmm_result, xmm_key10); 3339 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3340 // no need to store r to memory until we exit 3341 __ addptr(pos, AESBlockSize); 3342 __ subptr(len_reg, AESBlockSize); 3343 __ jcc(Assembler::notEqual, L_loopTop_128); 3344 3345 __ BIND(L_exit); 3346 __ movdqu(Address(rvec, 0), xmm_result); // final value of r stored in rvec of CipherBlockChaining object 3347 3348 #ifdef _WIN64 3349 // restore xmm regs belonging to calling function 3350 for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) { 3351 __ movdqu(as_XMMRegister(i), xmm_save(i)); 3352 } 3353 __ movl(rax, len_mem); 3354 #else 3355 __ pop(rax); // return length 3356 #endif 3357 __ leave(); // required for proper stackwalking of RuntimeStub frame 3358 __ ret(0); 3359 3360 __ BIND(L_key_192_256); 3361 // here rax = len in ints of AESCrypt.KLE array (52=192, or 60=256) 3362 load_key(xmm_key11, key, 0xb0, xmm_key_shuf_mask); 3363 load_key(xmm_key12, key, 0xc0, xmm_key_shuf_mask); 3364 __ cmpl(rax, 52); 3365 __ jcc(Assembler::notEqual, L_key_256); 3366 3367 // 192-bit code follows here (could be changed to use more xmm registers) 3368 __ movptr(pos, 0); 3369 __ align(OptoLoopAlignment); 3370 3371 __ BIND(L_loopTop_192); 3372 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3373 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3374 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3375 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 11; rnum++) { 3376 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3377 } 3378 __ aesenclast(xmm_result, xmm_key12); 3379 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3380 // no need to store r to memory until we exit 3381 __ addptr(pos, AESBlockSize); 3382 __ subptr(len_reg, AESBlockSize); 3383 __ jcc(Assembler::notEqual, L_loopTop_192); 3384 __ jmp(L_exit); 3385 3386 __ BIND(L_key_256); 3387 // 256-bit code follows here (could be changed to use more xmm registers) 3388 load_key(xmm_key13, key, 0xd0, xmm_key_shuf_mask); 3389 __ movptr(pos, 0); 3390 __ align(OptoLoopAlignment); 3391 3392 __ BIND(L_loopTop_256); 3393 __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of input 3394 __ pxor (xmm_result, xmm_temp); // xor with the current r vector 3395 __ pxor (xmm_result, xmm_key0); // do the aes rounds 3396 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 13; rnum++) { 3397 __ aesenc(xmm_result, as_XMMRegister(rnum)); 3398 } 3399 load_key(xmm_temp, key, 0xe0); 3400 __ aesenclast(xmm_result, xmm_temp); 3401 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3402 // no need to store r to memory until we exit 3403 __ addptr(pos, AESBlockSize); 3404 __ subptr(len_reg, AESBlockSize); 3405 __ jcc(Assembler::notEqual, L_loopTop_256); 3406 __ jmp(L_exit); 3407 3408 return start; 3409 } 3410 3411 // Safefetch stubs. 3412 void generate_safefetch(const char* name, int size, address* entry, 3413 address* fault_pc, address* continuation_pc) { 3414 // safefetch signatures: 3415 // int SafeFetch32(int* adr, int errValue); 3416 // intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue); 3417 // 3418 // arguments: 3419 // c_rarg0 = adr 3420 // c_rarg1 = errValue 3421 // 3422 // result: 3423 // PPC_RET = *adr or errValue 3424 3425 StubCodeMark mark(this, "StubRoutines", name); 3426 3427 // Entry point, pc or function descriptor. 3428 *entry = __ pc(); 3429 3430 // Load *adr into c_rarg1, may fault. 3431 *fault_pc = __ pc(); 3432 switch (size) { 3433 case 4: 3434 // int32_t 3435 __ movl(c_rarg1, Address(c_rarg0, 0)); 3436 break; 3437 case 8: 3438 // int64_t 3439 __ movq(c_rarg1, Address(c_rarg0, 0)); 3440 break; 3441 default: 3442 ShouldNotReachHere(); 3443 } 3444 3445 // return errValue or *adr 3446 *continuation_pc = __ pc(); 3447 __ movq(rax, c_rarg1); 3448 __ ret(0); 3449 } 3450 3451 // This is a version of CBC/AES Decrypt which does 4 blocks in a loop at a time 3452 // to hide instruction latency 3453 // 3454 // Arguments: 3455 // 3456 // Inputs: 3457 // c_rarg0 - source byte array address 3458 // c_rarg1 - destination byte array address 3459 // c_rarg2 - K (key) in little endian int array 3460 // c_rarg3 - r vector byte array address 3461 // c_rarg4 - input length 3462 // 3463 // Output: 3464 // rax - input length 3465 // 3466 3467 address generate_cipherBlockChaining_decryptAESCrypt_Parallel() { 3468 assert(UseAES, "need AES instructions and misaligned SSE support"); 3469 __ align(CodeEntryAlignment); 3470 StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt"); 3471 address start = __ pc(); 3472 3473 Label L_exit, L_key_192_256, L_key_256; 3474 Label L_singleBlock_loopTop_128, L_multiBlock_loopTop_128; 3475 Label L_singleBlock_loopTop_192, L_singleBlock_loopTop_256; 3476 const Register from = c_rarg0; // source array address 3477 const Register to = c_rarg1; // destination array address 3478 const Register key = c_rarg2; // key array address 3479 const Register rvec = c_rarg3; // r byte array initialized from initvector array address 3480 // and left with the results of the last encryption block 3481 #ifndef _WIN64 3482 const Register len_reg = c_rarg4; // src len (must be multiple of blocksize 16) 3483 #else 3484 const Address len_mem(rbp, 6 * wordSize); // length is on stack on Win64 3485 const Register len_reg = r10; // pick the first volatile windows register 3486 #endif 3487 const Register pos = rax; 3488 3489 // keys 0-10 preloaded into xmm2-xmm12 3490 const int XMM_REG_NUM_KEY_FIRST = 5; 3491 const int XMM_REG_NUM_KEY_LAST = 15; 3492 const XMMRegister xmm_key_first = as_XMMRegister(XMM_REG_NUM_KEY_FIRST); 3493 const XMMRegister xmm_key_last = as_XMMRegister(XMM_REG_NUM_KEY_LAST); 3494 3495 __ enter(); // required for proper stackwalking of RuntimeStub frame 3496 3497 #ifdef _WIN64 3498 // on win64, fill len_reg from stack position 3499 __ movl(len_reg, len_mem); 3500 // save the xmm registers which must be preserved 6-15 3501 __ subptr(rsp, -rsp_after_call_off * wordSize); 3502 for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) { 3503 __ movdqu(xmm_save(i), as_XMMRegister(i)); 3504 } 3505 #else 3506 __ push(len_reg); // Save 3507 #endif 3508 3509 // the java expanded key ordering is rotated one position from what we want 3510 // so we start from 0x10 here and hit 0x00 last 3511 const XMMRegister xmm_key_shuf_mask = xmm1; // used temporarily to swap key bytes up front 3512 __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr())); 3513 // load up xmm regs 5 thru 15 with key 0x10 - 0xa0 - 0x00 3514 for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x10; rnum < XMM_REG_NUM_KEY_LAST; rnum++) { 3515 load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask); 3516 offset += 0x10; 3517 } 3518 load_key(xmm_key_last, key, 0x00, xmm_key_shuf_mask); 3519 3520 const XMMRegister xmm_prev_block_cipher = xmm1; // holds cipher of previous block 3521 3522 // registers holding the four results in the parallelized loop 3523 const XMMRegister xmm_result0 = xmm0; 3524 const XMMRegister xmm_result1 = xmm2; 3525 const XMMRegister xmm_result2 = xmm3; 3526 const XMMRegister xmm_result3 = xmm4; 3527 3528 __ movdqu(xmm_prev_block_cipher, Address(rvec, 0x00)); // initialize with initial rvec 3529 3530 // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256)) 3531 __ movl(rax, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 3532 __ cmpl(rax, 44); 3533 __ jcc(Assembler::notEqual, L_key_192_256); 3534 3535 3536 // 128-bit code follows here, parallelized 3537 __ movptr(pos, 0); 3538 __ align(OptoLoopAlignment); 3539 __ BIND(L_multiBlock_loopTop_128); 3540 __ cmpptr(len_reg, 4*AESBlockSize); // see if at least 4 blocks left 3541 __ jcc(Assembler::less, L_singleBlock_loopTop_128); 3542 3543 __ movdqu(xmm_result0, Address(from, pos, Address::times_1, 0*AESBlockSize)); // get next 4 blocks into xmmresult registers 3544 __ movdqu(xmm_result1, Address(from, pos, Address::times_1, 1*AESBlockSize)); 3545 __ movdqu(xmm_result2, Address(from, pos, Address::times_1, 2*AESBlockSize)); 3546 __ movdqu(xmm_result3, Address(from, pos, Address::times_1, 3*AESBlockSize)); 3547 3548 #define DoFour(opc, src_reg) \ 3549 __ opc(xmm_result0, src_reg); \ 3550 __ opc(xmm_result1, src_reg); \ 3551 __ opc(xmm_result2, src_reg); \ 3552 __ opc(xmm_result3, src_reg); 3553 3554 DoFour(pxor, xmm_key_first); 3555 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST - 1; rnum++) { 3556 DoFour(aesdec, as_XMMRegister(rnum)); 3557 } 3558 DoFour(aesdeclast, xmm_key_last); 3559 // for each result, xor with the r vector of previous cipher block 3560 __ pxor(xmm_result0, xmm_prev_block_cipher); 3561 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 0*AESBlockSize)); 3562 __ pxor(xmm_result1, xmm_prev_block_cipher); 3563 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 1*AESBlockSize)); 3564 __ pxor(xmm_result2, xmm_prev_block_cipher); 3565 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 2*AESBlockSize)); 3566 __ pxor(xmm_result3, xmm_prev_block_cipher); 3567 __ movdqu(xmm_prev_block_cipher, Address(from, pos, Address::times_1, 3*AESBlockSize)); // this will carry over to next set of blocks 3568 3569 __ movdqu(Address(to, pos, Address::times_1, 0*AESBlockSize), xmm_result0); // store 4 results into the next 64 bytes of output 3570 __ movdqu(Address(to, pos, Address::times_1, 1*AESBlockSize), xmm_result1); 3571 __ movdqu(Address(to, pos, Address::times_1, 2*AESBlockSize), xmm_result2); 3572 __ movdqu(Address(to, pos, Address::times_1, 3*AESBlockSize), xmm_result3); 3573 3574 __ addptr(pos, 4*AESBlockSize); 3575 __ subptr(len_reg, 4*AESBlockSize); 3576 __ jmp(L_multiBlock_loopTop_128); 3577 3578 // registers used in the non-parallelized loops 3579 // xmm register assignments for the loops below 3580 const XMMRegister xmm_result = xmm0; 3581 const XMMRegister xmm_prev_block_cipher_save = xmm2; 3582 const XMMRegister xmm_key11 = xmm3; 3583 const XMMRegister xmm_key12 = xmm4; 3584 const XMMRegister xmm_temp = xmm4; 3585 3586 __ align(OptoLoopAlignment); 3587 __ BIND(L_singleBlock_loopTop_128); 3588 __ cmpptr(len_reg, 0); // any blocks left?? 3589 __ jcc(Assembler::equal, L_exit); 3590 __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of cipher input 3591 __ movdqa(xmm_prev_block_cipher_save, xmm_result); // save for next r vector 3592 __ pxor (xmm_result, xmm_key_first); // do the aes dec rounds 3593 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST - 1; rnum++) { 3594 __ aesdec(xmm_result, as_XMMRegister(rnum)); 3595 } 3596 __ aesdeclast(xmm_result, xmm_key_last); 3597 __ pxor (xmm_result, xmm_prev_block_cipher); // xor with the current r vector 3598 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3599 // no need to store r to memory until we exit 3600 __ movdqa(xmm_prev_block_cipher, xmm_prev_block_cipher_save); // set up next r vector with cipher input from this block 3601 3602 __ addptr(pos, AESBlockSize); 3603 __ subptr(len_reg, AESBlockSize); 3604 __ jmp(L_singleBlock_loopTop_128); 3605 3606 3607 __ BIND(L_exit); 3608 __ movdqu(Address(rvec, 0), xmm_prev_block_cipher); // final value of r stored in rvec of CipherBlockChaining object 3609 #ifdef _WIN64 3610 // restore regs belonging to calling function 3611 for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) { 3612 __ movdqu(as_XMMRegister(i), xmm_save(i)); 3613 } 3614 __ movl(rax, len_mem); 3615 #else 3616 __ pop(rax); // return length 3617 #endif 3618 __ leave(); // required for proper stackwalking of RuntimeStub frame 3619 __ ret(0); 3620 3621 3622 __ BIND(L_key_192_256); 3623 // here rax = len in ints of AESCrypt.KLE array (52=192, or 60=256) 3624 load_key(xmm_key11, key, 0xb0); 3625 __ cmpl(rax, 52); 3626 __ jcc(Assembler::notEqual, L_key_256); 3627 3628 // 192-bit code follows here (could be optimized to use parallelism) 3629 load_key(xmm_key12, key, 0xc0); // 192-bit key goes up to c0 3630 __ movptr(pos, 0); 3631 __ align(OptoLoopAlignment); 3632 3633 __ BIND(L_singleBlock_loopTop_192); 3634 __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of cipher input 3635 __ movdqa(xmm_prev_block_cipher_save, xmm_result); // save for next r vector 3636 __ pxor (xmm_result, xmm_key_first); // do the aes dec rounds 3637 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST - 1; rnum++) { 3638 __ aesdec(xmm_result, as_XMMRegister(rnum)); 3639 } 3640 __ aesdec(xmm_result, xmm_key11); 3641 __ aesdec(xmm_result, xmm_key12); 3642 __ aesdeclast(xmm_result, xmm_key_last); // xmm15 always came from key+0 3643 __ pxor (xmm_result, xmm_prev_block_cipher); // xor with the current r vector 3644 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3645 // no need to store r to memory until we exit 3646 __ movdqa(xmm_prev_block_cipher, xmm_prev_block_cipher_save); // set up next r vector with cipher input from this block 3647 __ addptr(pos, AESBlockSize); 3648 __ subptr(len_reg, AESBlockSize); 3649 __ jcc(Assembler::notEqual,L_singleBlock_loopTop_192); 3650 __ jmp(L_exit); 3651 3652 __ BIND(L_key_256); 3653 // 256-bit code follows here (could be optimized to use parallelism) 3654 __ movptr(pos, 0); 3655 __ align(OptoLoopAlignment); 3656 3657 __ BIND(L_singleBlock_loopTop_256); 3658 __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of cipher input 3659 __ movdqa(xmm_prev_block_cipher_save, xmm_result); // save for next r vector 3660 __ pxor (xmm_result, xmm_key_first); // do the aes dec rounds 3661 for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST - 1; rnum++) { 3662 __ aesdec(xmm_result, as_XMMRegister(rnum)); 3663 } 3664 __ aesdec(xmm_result, xmm_key11); 3665 load_key(xmm_temp, key, 0xc0); 3666 __ aesdec(xmm_result, xmm_temp); 3667 load_key(xmm_temp, key, 0xd0); 3668 __ aesdec(xmm_result, xmm_temp); 3669 load_key(xmm_temp, key, 0xe0); // 256-bit key goes up to e0 3670 __ aesdec(xmm_result, xmm_temp); 3671 __ aesdeclast(xmm_result, xmm_key_last); // xmm15 came from key+0 3672 __ pxor (xmm_result, xmm_prev_block_cipher); // xor with the current r vector 3673 __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result); // store into the next 16 bytes of output 3674 // no need to store r to memory until we exit 3675 __ movdqa(xmm_prev_block_cipher, xmm_prev_block_cipher_save); // set up next r vector with cipher input from this block 3676 __ addptr(pos, AESBlockSize); 3677 __ subptr(len_reg, AESBlockSize); 3678 __ jcc(Assembler::notEqual,L_singleBlock_loopTop_256); 3679 __ jmp(L_exit); 3680 3681 return start; 3682 } 3683 3684 /** 3685 * Arguments: 3686 * 3687 * Inputs: 3688 * c_rarg0 - int crc 3689 * c_rarg1 - byte* buf 3690 * c_rarg2 - int length 3691 * 3692 * Ouput: 3693 * rax - int crc result 3694 */ 3695 address generate_updateBytesCRC32() { 3696 assert(UseCRC32Intrinsics, "need AVX and CLMUL instructions"); 3697 3698 __ align(CodeEntryAlignment); 3699 StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32"); 3700 3701 address start = __ pc(); 3702 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 3703 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 3704 // rscratch1: r10 3705 const Register crc = c_rarg0; // crc 3706 const Register buf = c_rarg1; // source java byte array address 3707 const Register len = c_rarg2; // length 3708 const Register table = c_rarg3; // crc_table address (reuse register) 3709 const Register tmp = r11; 3710 assert_different_registers(crc, buf, len, table, tmp, rax); 3711 3712 BLOCK_COMMENT("Entry:"); 3713 __ enter(); // required for proper stackwalking of RuntimeStub frame 3714 3715 __ kernel_crc32(crc, buf, len, table, tmp); 3716 3717 __ movl(rax, crc); 3718 __ leave(); // required for proper stackwalking of RuntimeStub frame 3719 __ ret(0); 3720 3721 return start; 3722 } 3723 3724 3725 /** 3726 * Arguments: 3727 * 3728 * Input: 3729 * c_rarg0 - x address 3730 * c_rarg1 - x length 3731 * c_rarg2 - y address 3732 * c_rarg3 - y lenth 3733 * not Win64 3734 * c_rarg4 - z address 3735 * c_rarg5 - z length 3736 * Win64 3737 * rsp+40 - z address 3738 * rsp+48 - z length 3739 */ 3740 address generate_multiplyToLen() { 3741 __ align(CodeEntryAlignment); 3742 StubCodeMark mark(this, "StubRoutines", "multiplyToLen"); 3743 3744 address start = __ pc(); 3745 // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 3746 // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 3747 const Register x = rdi; 3748 const Register xlen = rax; 3749 const Register y = rsi; 3750 const Register ylen = rcx; 3751 const Register z = r8; 3752 const Register zlen = r11; 3753 3754 // Next registers will be saved on stack in multiply_to_len(). 3755 const Register tmp1 = r12; 3756 const Register tmp2 = r13; 3757 const Register tmp3 = r14; 3758 const Register tmp4 = r15; 3759 const Register tmp5 = rbx; 3760 3761 BLOCK_COMMENT("Entry:"); 3762 __ enter(); // required for proper stackwalking of RuntimeStub frame 3763 3764 #ifndef _WIN64 3765 __ movptr(zlen, r9); // Save r9 in r11 - zlen 3766 #endif 3767 setup_arg_regs(4); // x => rdi, xlen => rsi, y => rdx 3768 // ylen => rcx, z => r8, zlen => r11 3769 // r9 and r10 may be used to save non-volatile registers 3770 #ifdef _WIN64 3771 // last 2 arguments (#4, #5) are on stack on Win64 3772 __ movptr(z, Address(rsp, 6 * wordSize)); 3773 __ movptr(zlen, Address(rsp, 7 * wordSize)); 3774 #endif 3775 3776 __ movptr(xlen, rsi); 3777 __ movptr(y, rdx); 3778 __ multiply_to_len(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5); 3779 3780 restore_arg_regs(); 3781 3782 __ leave(); // required for proper stackwalking of RuntimeStub frame 3783 __ ret(0); 3784 3785 return start; 3786 } 3787 3788 #undef __ 3789 #define __ masm-> 3790 3791 // Continuation point for throwing of implicit exceptions that are 3792 // not handled in the current activation. Fabricates an exception 3793 // oop and initiates normal exception dispatching in this 3794 // frame. Since we need to preserve callee-saved values (currently 3795 // only for C2, but done for C1 as well) we need a callee-saved oop 3796 // map and therefore have to make these stubs into RuntimeStubs 3797 // rather than BufferBlobs. If the compiler needs all registers to 3798 // be preserved between the fault point and the exception handler 3799 // then it must assume responsibility for that in 3800 // AbstractCompiler::continuation_for_implicit_null_exception or 3801 // continuation_for_implicit_division_by_zero_exception. All other 3802 // implicit exceptions (e.g., NullPointerException or 3803 // AbstractMethodError on entry) are either at call sites or 3804 // otherwise assume that stack unwinding will be initiated, so 3805 // caller saved registers were assumed volatile in the compiler. 3806 address generate_throw_exception(const char* name, 3807 address runtime_entry, 3808 Register arg1 = noreg, 3809 Register arg2 = noreg) { 3810 // Information about frame layout at time of blocking runtime call. 3811 // Note that we only have to preserve callee-saved registers since 3812 // the compilers are responsible for supplying a continuation point 3813 // if they expect all registers to be preserved. 3814 enum layout { 3815 rbp_off = frame::arg_reg_save_area_bytes/BytesPerInt, 3816 rbp_off2, 3817 return_off, 3818 return_off2, 3819 framesize // inclusive of return address 3820 }; 3821 3822 int insts_size = 512; 3823 int locs_size = 64; 3824 3825 CodeBuffer code(name, insts_size, locs_size); 3826 OopMapSet* oop_maps = new OopMapSet(); 3827 MacroAssembler* masm = new MacroAssembler(&code); 3828 3829 address start = __ pc(); 3830 3831 // This is an inlined and slightly modified version of call_VM 3832 // which has the ability to fetch the return PC out of 3833 // thread-local storage and also sets up last_Java_sp slightly 3834 // differently than the real call_VM 3835 3836 __ enter(); // required for proper stackwalking of RuntimeStub frame 3837 3838 assert(is_even(framesize/2), "sp not 16-byte aligned"); 3839 3840 // return address and rbp are already in place 3841 __ subptr(rsp, (framesize-4) << LogBytesPerInt); // prolog 3842 3843 int frame_complete = __ pc() - start; 3844 3845 // Set up last_Java_sp and last_Java_fp 3846 address the_pc = __ pc(); 3847 __ set_last_Java_frame(rsp, rbp, the_pc); 3848 __ andptr(rsp, -(StackAlignmentInBytes)); // Align stack 3849 3850 // Call runtime 3851 if (arg1 != noreg) { 3852 assert(arg2 != c_rarg1, "clobbered"); 3853 __ movptr(c_rarg1, arg1); 3854 } 3855 if (arg2 != noreg) { 3856 __ movptr(c_rarg2, arg2); 3857 } 3858 __ movptr(c_rarg0, r15_thread); 3859 BLOCK_COMMENT("call runtime_entry"); 3860 __ call(RuntimeAddress(runtime_entry)); 3861 3862 // Generate oop map 3863 OopMap* map = new OopMap(framesize, 0); 3864 3865 oop_maps->add_gc_map(the_pc - start, map); 3866 3867 __ reset_last_Java_frame(true, true); 3868 3869 __ leave(); // required for proper stackwalking of RuntimeStub frame 3870 3871 // check for pending exceptions 3872 #ifdef ASSERT 3873 Label L; 3874 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), 3875 (int32_t) NULL_WORD); 3876 __ jcc(Assembler::notEqual, L); 3877 __ should_not_reach_here(); 3878 __ bind(L); 3879 #endif // ASSERT 3880 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 3881 3882 3883 // codeBlob framesize is in words (not VMRegImpl::slot_size) 3884 RuntimeStub* stub = 3885 RuntimeStub::new_runtime_stub(name, 3886 &code, 3887 frame_complete, 3888 (framesize >> (LogBytesPerWord - LogBytesPerInt)), 3889 oop_maps, false); 3890 return stub->entry_point(); 3891 } 3892 3893 void create_control_words() { 3894 // Round to nearest, 53-bit mode, exceptions masked 3895 StubRoutines::_fpu_cntrl_wrd_std = 0x027F; 3896 // Round to zero, 53-bit mode, exception mased 3897 StubRoutines::_fpu_cntrl_wrd_trunc = 0x0D7F; 3898 // Round to nearest, 24-bit mode, exceptions masked 3899 StubRoutines::_fpu_cntrl_wrd_24 = 0x007F; 3900 // Round to nearest, 64-bit mode, exceptions masked 3901 StubRoutines::_fpu_cntrl_wrd_64 = 0x037F; 3902 // Round to nearest, 64-bit mode, exceptions masked 3903 StubRoutines::_mxcsr_std = 0x1F80; 3904 // Note: the following two constants are 80-bit values 3905 // layout is critical for correct loading by FPU. 3906 // Bias for strict fp multiply/divide 3907 StubRoutines::_fpu_subnormal_bias1[0]= 0x00000000; // 2^(-15360) == 0x03ff 8000 0000 0000 0000 3908 StubRoutines::_fpu_subnormal_bias1[1]= 0x80000000; 3909 StubRoutines::_fpu_subnormal_bias1[2]= 0x03ff; 3910 // Un-Bias for strict fp multiply/divide 3911 StubRoutines::_fpu_subnormal_bias2[0]= 0x00000000; // 2^(+15360) == 0x7bff 8000 0000 0000 0000 3912 StubRoutines::_fpu_subnormal_bias2[1]= 0x80000000; 3913 StubRoutines::_fpu_subnormal_bias2[2]= 0x7bff; 3914 } 3915 3916 // Initialization 3917 void generate_initial() { 3918 // Generates all stubs and initializes the entry points 3919 3920 // This platform-specific settings are needed by generate_call_stub() 3921 create_control_words(); 3922 3923 // entry points that exist in all platforms Note: This is code 3924 // that could be shared among different platforms - however the 3925 // benefit seems to be smaller than the disadvantage of having a 3926 // much more complicated generator structure. See also comment in 3927 // stubRoutines.hpp. 3928 3929 StubRoutines::_forward_exception_entry = generate_forward_exception(); 3930 3931 StubRoutines::_call_stub_entry = 3932 generate_call_stub(StubRoutines::_call_stub_return_address); 3933 3934 // is referenced by megamorphic call 3935 StubRoutines::_catch_exception_entry = generate_catch_exception(); 3936 3937 // atomic calls 3938 StubRoutines::_atomic_xchg_entry = generate_atomic_xchg(); 3939 StubRoutines::_atomic_xchg_ptr_entry = generate_atomic_xchg_ptr(); 3940 StubRoutines::_atomic_cmpxchg_entry = generate_atomic_cmpxchg(); 3941 StubRoutines::_atomic_cmpxchg_byte_entry = generate_atomic_cmpxchg_byte(); 3942 StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long(); 3943 StubRoutines::_atomic_add_entry = generate_atomic_add(); 3944 StubRoutines::_atomic_add_ptr_entry = generate_atomic_add_ptr(); 3945 StubRoutines::_fence_entry = generate_orderaccess_fence(); 3946 3947 StubRoutines::_handler_for_unsafe_access_entry = 3948 generate_handler_for_unsafe_access(); 3949 3950 // platform dependent 3951 StubRoutines::x86::_get_previous_fp_entry = generate_get_previous_fp(); 3952 StubRoutines::x86::_get_previous_sp_entry = generate_get_previous_sp(); 3953 3954 StubRoutines::x86::_verify_mxcsr_entry = generate_verify_mxcsr(); 3955 3956 // Build this early so it's available for the interpreter. 3957 StubRoutines::_throw_StackOverflowError_entry = 3958 generate_throw_exception("StackOverflowError throw_exception", 3959 CAST_FROM_FN_PTR(address, 3960 SharedRuntime:: 3961 throw_StackOverflowError)); 3962 if (UseCRC32Intrinsics) { 3963 // set table address before stub generation which use it 3964 StubRoutines::_crc_table_adr = (address)StubRoutines::x86::_crc_table; 3965 StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32(); 3966 } 3967 } 3968 3969 void generate_all() { 3970 // Generates all stubs and initializes the entry points 3971 3972 // These entry points require SharedInfo::stack0 to be set up in 3973 // non-core builds and need to be relocatable, so they each 3974 // fabricate a RuntimeStub internally. 3975 StubRoutines::_throw_AbstractMethodError_entry = 3976 generate_throw_exception("AbstractMethodError throw_exception", 3977 CAST_FROM_FN_PTR(address, 3978 SharedRuntime:: 3979 throw_AbstractMethodError)); 3980 3981 StubRoutines::_throw_IncompatibleClassChangeError_entry = 3982 generate_throw_exception("IncompatibleClassChangeError throw_exception", 3983 CAST_FROM_FN_PTR(address, 3984 SharedRuntime:: 3985 throw_IncompatibleClassChangeError)); 3986 3987 StubRoutines::_throw_NullPointerException_at_call_entry = 3988 generate_throw_exception("NullPointerException at call throw_exception", 3989 CAST_FROM_FN_PTR(address, 3990 SharedRuntime:: 3991 throw_NullPointerException_at_call)); 3992 3993 // entry points that are platform specific 3994 StubRoutines::x86::_f2i_fixup = generate_f2i_fixup(); 3995 StubRoutines::x86::_f2l_fixup = generate_f2l_fixup(); 3996 StubRoutines::x86::_d2i_fixup = generate_d2i_fixup(); 3997 StubRoutines::x86::_d2l_fixup = generate_d2l_fixup(); 3998 3999 StubRoutines::x86::_float_sign_mask = generate_fp_mask("float_sign_mask", 0x7FFFFFFF7FFFFFFF); 4000 StubRoutines::x86::_float_sign_flip = generate_fp_mask("float_sign_flip", 0x8000000080000000); 4001 StubRoutines::x86::_double_sign_mask = generate_fp_mask("double_sign_mask", 0x7FFFFFFFFFFFFFFF); 4002 StubRoutines::x86::_double_sign_flip = generate_fp_mask("double_sign_flip", 0x8000000000000000); 4003 4004 // support for verify_oop (must happen after universe_init) 4005 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop(); 4006 4007 // arraycopy stubs used by compilers 4008 generate_arraycopy_stubs(); 4009 4010 generate_math_stubs(); 4011 4012 // don't bother generating these AES intrinsic stubs unless global flag is set 4013 if (UseAESIntrinsics) { 4014 StubRoutines::x86::_key_shuffle_mask_addr = generate_key_shuffle_mask(); // needed by the others 4015 4016 StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock(); 4017 StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock(); 4018 StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt(); 4019 StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt_Parallel(); 4020 } 4021 4022 // Safefetch stubs. 4023 generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry, 4024 &StubRoutines::_safefetch32_fault_pc, 4025 &StubRoutines::_safefetch32_continuation_pc); 4026 generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry, 4027 &StubRoutines::_safefetchN_fault_pc, 4028 &StubRoutines::_safefetchN_continuation_pc); 4029 #ifdef COMPILER2 4030 if (UseMultiplyToLenIntrinsic) { 4031 StubRoutines::_multiplyToLen = generate_multiplyToLen(); 4032 } 4033 #endif 4034 } 4035 4036 public: 4037 StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) { 4038 if (all) { 4039 generate_all(); 4040 } else { 4041 generate_initial(); 4042 } 4043 } 4044 }; // end class declaration 4045 4046 void StubGenerator_generate(CodeBuffer* code, bool all) { 4047 StubGenerator g(code, all); 4048 }