1 /* 2 * Copyright 2003-2010 Sun Microsystems, Inc. All Rights Reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, 20 * CA 95054 USA or visit www.sun.com if you need additional information or 21 * have any questions. 22 * 23 */ 24 25 #include "incls/_precompiled.incl" 26 #include "incls/_stubGenerator_x86_64.cpp.incl" 27 28 // Declaration and definition of StubGenerator (no .hpp file). 29 // For a more detailed description of the stub routine structure 30 // see the comment in stubRoutines.hpp 31 32 #define __ _masm-> 33 #define TIMES_OOP (UseCompressedOops ? Address::times_4 : Address::times_8) 34 #define a__ ((Assembler*)_masm)-> 35 36 #ifdef PRODUCT 37 #define BLOCK_COMMENT(str) /* nothing */ 38 #else 39 #define BLOCK_COMMENT(str) __ block_comment(str) 40 #endif 41 42 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 43 const int MXCSR_MASK = 0xFFC0; // Mask out any pending exceptions 44 45 // Stub Code definitions 46 47 static address handle_unsafe_access() { 48 JavaThread* thread = JavaThread::current(); 49 address pc = thread->saved_exception_pc(); 50 // pc is the instruction which we must emulate 51 // doing a no-op is fine: return garbage from the load 52 // therefore, compute npc 53 address npc = Assembler::locate_next_instruction(pc); 54 55 // request an async exception 56 thread->set_pending_unsafe_access_error(); 57 58 // return address of next instruction to execute 59 return npc; 60 } 61 62 class StubGenerator: public StubCodeGenerator { 63 private: 64 65 #ifdef PRODUCT 66 #define inc_counter_np(counter) (0) 67 #else 68 void inc_counter_np_(int& counter) { 69 __ incrementl(ExternalAddress((address)&counter)); 70 } 71 #define inc_counter_np(counter) \ 72 BLOCK_COMMENT("inc_counter " #counter); \ 73 inc_counter_np_(counter); 74 #endif 75 76 // Call stubs are used to call Java from C 77 // 78 // Linux Arguments: 79 // c_rarg0: call wrapper address address 80 // c_rarg1: result address 81 // c_rarg2: result type BasicType 82 // c_rarg3: method methodOop 83 // c_rarg4: (interpreter) entry point address 84 // c_rarg5: parameters intptr_t* 85 // 16(rbp): parameter size (in words) int 86 // 24(rbp): thread Thread* 87 // 88 // [ return_from_Java ] <--- rsp 89 // [ argument word n ] 90 // ... 91 // -12 [ argument word 1 ] 92 // -11 [ saved r15 ] <--- rsp_after_call 93 // -10 [ saved r14 ] 94 // -9 [ saved r13 ] 95 // -8 [ saved r12 ] 96 // -7 [ saved rbx ] 97 // -6 [ call wrapper ] 98 // -5 [ result ] 99 // -4 [ result type ] 100 // -3 [ method ] 101 // -2 [ entry point ] 102 // -1 [ parameters ] 103 // 0 [ saved rbp ] <--- rbp 104 // 1 [ return address ] 105 // 2 [ parameter size ] 106 // 3 [ thread ] 107 // 108 // Windows Arguments: 109 // c_rarg0: call wrapper address address 110 // c_rarg1: result address 111 // c_rarg2: result type BasicType 112 // c_rarg3: method methodOop 113 // 48(rbp): (interpreter) entry point address 114 // 56(rbp): parameters intptr_t* 115 // 64(rbp): parameter size (in words) int 116 // 72(rbp): thread Thread* 117 // 118 // [ return_from_Java ] <--- rsp 119 // [ argument word n ] 120 // ... 121 // -8 [ argument word 1 ] 122 // -7 [ saved r15 ] <--- rsp_after_call 123 // -6 [ saved r14 ] 124 // -5 [ saved r13 ] 125 // -4 [ saved r12 ] 126 // -3 [ saved rdi ] 127 // -2 [ saved rsi ] 128 // -1 [ saved rbx ] 129 // 0 [ saved rbp ] <--- rbp 130 // 1 [ return address ] 131 // 2 [ call wrapper ] 132 // 3 [ result ] 133 // 4 [ result type ] 134 // 5 [ method ] 135 // 6 [ entry point ] 136 // 7 [ parameters ] 137 // 8 [ parameter size ] 138 // 9 [ thread ] 139 // 140 // Windows reserves the callers stack space for arguments 1-4. 141 // We spill c_rarg0-c_rarg3 to this space. 142 143 // Call stub stack layout word offsets from rbp 144 enum call_stub_layout { 145 #ifdef _WIN64 146 rsp_after_call_off = -7, 147 r15_off = rsp_after_call_off, 148 r14_off = -6, 149 r13_off = -5, 150 r12_off = -4, 151 rdi_off = -3, 152 rsi_off = -2, 153 rbx_off = -1, 154 rbp_off = 0, 155 retaddr_off = 1, 156 call_wrapper_off = 2, 157 result_off = 3, 158 result_type_off = 4, 159 method_off = 5, 160 entry_point_off = 6, 161 parameters_off = 7, 162 parameter_size_off = 8, 163 thread_off = 9 164 #else 165 rsp_after_call_off = -12, 166 mxcsr_off = rsp_after_call_off, 167 r15_off = -11, 168 r14_off = -10, 169 r13_off = -9, 170 r12_off = -8, 171 rbx_off = -7, 172 call_wrapper_off = -6, 173 result_off = -5, 174 result_type_off = -4, 175 method_off = -3, 176 entry_point_off = -2, 177 parameters_off = -1, 178 rbp_off = 0, 179 retaddr_off = 1, 180 parameter_size_off = 2, 181 thread_off = 3 182 #endif 183 }; 184 185 address generate_call_stub(address& return_address) { 186 assert((int)frame::entry_frame_after_call_words == -(int)rsp_after_call_off + 1 && 187 (int)frame::entry_frame_call_wrapper_offset == (int)call_wrapper_off, 188 "adjust this code"); 189 StubCodeMark mark(this, "StubRoutines", "call_stub"); 190 address start = __ pc(); 191 192 // same as in generate_catch_exception()! 193 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize); 194 195 const Address call_wrapper (rbp, call_wrapper_off * wordSize); 196 const Address result (rbp, result_off * wordSize); 197 const Address result_type (rbp, result_type_off * wordSize); 198 const Address method (rbp, method_off * wordSize); 199 const Address entry_point (rbp, entry_point_off * wordSize); 200 const Address parameters (rbp, parameters_off * wordSize); 201 const Address parameter_size(rbp, parameter_size_off * wordSize); 202 203 // same as in generate_catch_exception()! 204 const Address thread (rbp, thread_off * wordSize); 205 206 const Address r15_save(rbp, r15_off * wordSize); 207 const Address r14_save(rbp, r14_off * wordSize); 208 const Address r13_save(rbp, r13_off * wordSize); 209 const Address r12_save(rbp, r12_off * wordSize); 210 const Address rbx_save(rbp, rbx_off * wordSize); 211 212 // stub code 213 __ enter(); 214 __ subptr(rsp, -rsp_after_call_off * wordSize); 215 216 // save register parameters 217 #ifndef _WIN64 218 __ movptr(parameters, c_rarg5); // parameters 219 __ movptr(entry_point, c_rarg4); // entry_point 220 #endif 221 222 __ movptr(method, c_rarg3); // method 223 __ movl(result_type, c_rarg2); // result type 224 __ movptr(result, c_rarg1); // result 225 __ movptr(call_wrapper, c_rarg0); // call wrapper 226 227 // save regs belonging to calling function 228 __ movptr(rbx_save, rbx); 229 __ movptr(r12_save, r12); 230 __ movptr(r13_save, r13); 231 __ movptr(r14_save, r14); 232 __ movptr(r15_save, r15); 233 234 #ifdef _WIN64 235 const Address rdi_save(rbp, rdi_off * wordSize); 236 const Address rsi_save(rbp, rsi_off * wordSize); 237 238 __ movptr(rsi_save, rsi); 239 __ movptr(rdi_save, rdi); 240 #else 241 const Address mxcsr_save(rbp, mxcsr_off * wordSize); 242 { 243 Label skip_ldmx; 244 __ stmxcsr(mxcsr_save); 245 __ movl(rax, mxcsr_save); 246 __ andl(rax, MXCSR_MASK); // Only check control and mask bits 247 ExternalAddress mxcsr_std(StubRoutines::x86::mxcsr_std()); 248 __ cmp32(rax, mxcsr_std); 249 __ jcc(Assembler::equal, skip_ldmx); 250 __ ldmxcsr(mxcsr_std); 251 __ bind(skip_ldmx); 252 } 253 #endif 254 255 // Load up thread register 256 __ movptr(r15_thread, thread); 257 __ reinit_heapbase(); 258 259 #ifdef ASSERT 260 // make sure we have no pending exceptions 261 { 262 Label L; 263 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 264 __ jcc(Assembler::equal, L); 265 __ stop("StubRoutines::call_stub: entered with pending exception"); 266 __ bind(L); 267 } 268 #endif 269 270 // pass parameters if any 271 BLOCK_COMMENT("pass parameters if any"); 272 Label parameters_done; 273 __ movl(c_rarg3, parameter_size); 274 __ testl(c_rarg3, c_rarg3); 275 __ jcc(Assembler::zero, parameters_done); 276 277 Label loop; 278 __ movptr(c_rarg2, parameters); // parameter pointer 279 __ movl(c_rarg1, c_rarg3); // parameter counter is in c_rarg1 280 __ BIND(loop); 281 if (TaggedStackInterpreter) { 282 __ movl(rax, Address(c_rarg2, 0)); // get tag 283 __ addptr(c_rarg2, wordSize); // advance to next tag 284 __ push(rax); // pass tag 285 } 286 __ movptr(rax, Address(c_rarg2, 0));// get parameter 287 __ addptr(c_rarg2, wordSize); // advance to next parameter 288 __ decrementl(c_rarg1); // decrement counter 289 __ push(rax); // pass parameter 290 __ jcc(Assembler::notZero, loop); 291 292 // call Java function 293 __ BIND(parameters_done); 294 __ movptr(rbx, method); // get methodOop 295 __ movptr(c_rarg1, entry_point); // get entry_point 296 __ mov(r13, rsp); // set sender sp 297 BLOCK_COMMENT("call Java function"); 298 __ call(c_rarg1); 299 300 BLOCK_COMMENT("call_stub_return_address:"); 301 return_address = __ pc(); 302 303 // store result depending on type (everything that is not 304 // T_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT) 305 __ movptr(c_rarg0, result); 306 Label is_long, is_float, is_double, exit; 307 __ movl(c_rarg1, result_type); 308 __ cmpl(c_rarg1, T_OBJECT); 309 __ jcc(Assembler::equal, is_long); 310 __ cmpl(c_rarg1, T_LONG); 311 __ jcc(Assembler::equal, is_long); 312 __ cmpl(c_rarg1, T_FLOAT); 313 __ jcc(Assembler::equal, is_float); 314 __ cmpl(c_rarg1, T_DOUBLE); 315 __ jcc(Assembler::equal, is_double); 316 317 // handle T_INT case 318 __ movl(Address(c_rarg0, 0), rax); 319 320 __ BIND(exit); 321 322 // pop parameters 323 __ lea(rsp, rsp_after_call); 324 325 #ifdef ASSERT 326 // verify that threads correspond 327 { 328 Label L, S; 329 __ cmpptr(r15_thread, thread); 330 __ jcc(Assembler::notEqual, S); 331 __ get_thread(rbx); 332 __ cmpptr(r15_thread, rbx); 333 __ jcc(Assembler::equal, L); 334 __ bind(S); 335 __ jcc(Assembler::equal, L); 336 __ stop("StubRoutines::call_stub: threads must correspond"); 337 __ bind(L); 338 } 339 #endif 340 341 // restore regs belonging to calling function 342 __ movptr(r15, r15_save); 343 __ movptr(r14, r14_save); 344 __ movptr(r13, r13_save); 345 __ movptr(r12, r12_save); 346 __ movptr(rbx, rbx_save); 347 348 #ifdef _WIN64 349 __ movptr(rdi, rdi_save); 350 __ movptr(rsi, rsi_save); 351 #else 352 __ ldmxcsr(mxcsr_save); 353 #endif 354 355 // restore rsp 356 __ addptr(rsp, -rsp_after_call_off * wordSize); 357 358 // return 359 __ pop(rbp); 360 __ ret(0); 361 362 // handle return types different from T_INT 363 __ BIND(is_long); 364 __ movq(Address(c_rarg0, 0), rax); 365 __ jmp(exit); 366 367 __ BIND(is_float); 368 __ movflt(Address(c_rarg0, 0), xmm0); 369 __ jmp(exit); 370 371 __ BIND(is_double); 372 __ movdbl(Address(c_rarg0, 0), xmm0); 373 __ jmp(exit); 374 375 return start; 376 } 377 378 // Return point for a Java call if there's an exception thrown in 379 // Java code. The exception is caught and transformed into a 380 // pending exception stored in JavaThread that can be tested from 381 // within the VM. 382 // 383 // Note: Usually the parameters are removed by the callee. In case 384 // of an exception crossing an activation frame boundary, that is 385 // not the case if the callee is compiled code => need to setup the 386 // rsp. 387 // 388 // rax: exception oop 389 390 address generate_catch_exception() { 391 StubCodeMark mark(this, "StubRoutines", "catch_exception"); 392 address start = __ pc(); 393 394 // same as in generate_call_stub(): 395 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize); 396 const Address thread (rbp, thread_off * wordSize); 397 398 #ifdef ASSERT 399 // verify that threads correspond 400 { 401 Label L, S; 402 __ cmpptr(r15_thread, thread); 403 __ jcc(Assembler::notEqual, S); 404 __ get_thread(rbx); 405 __ cmpptr(r15_thread, rbx); 406 __ jcc(Assembler::equal, L); 407 __ bind(S); 408 __ stop("StubRoutines::catch_exception: threads must correspond"); 409 __ bind(L); 410 } 411 #endif 412 413 // set pending exception 414 __ verify_oop(rax); 415 416 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), rax); 417 __ lea(rscratch1, ExternalAddress((address)__FILE__)); 418 __ movptr(Address(r15_thread, Thread::exception_file_offset()), rscratch1); 419 __ movl(Address(r15_thread, Thread::exception_line_offset()), (int) __LINE__); 420 421 // complete return to VM 422 assert(StubRoutines::_call_stub_return_address != NULL, 423 "_call_stub_return_address must have been generated before"); 424 __ jump(RuntimeAddress(StubRoutines::_call_stub_return_address)); 425 426 return start; 427 } 428 429 // Continuation point for runtime calls returning with a pending 430 // exception. The pending exception check happened in the runtime 431 // or native call stub. The pending exception in Thread is 432 // converted into a Java-level exception. 433 // 434 // Contract with Java-level exception handlers: 435 // rax: exception 436 // rdx: throwing pc 437 // 438 // NOTE: At entry of this stub, exception-pc must be on stack !! 439 440 address generate_forward_exception() { 441 StubCodeMark mark(this, "StubRoutines", "forward exception"); 442 address start = __ pc(); 443 444 // Upon entry, the sp points to the return address returning into 445 // Java (interpreted or compiled) code; i.e., the return address 446 // becomes the throwing pc. 447 // 448 // Arguments pushed before the runtime call are still on the stack 449 // but the exception handler will reset the stack pointer -> 450 // ignore them. A potential result in registers can be ignored as 451 // well. 452 453 #ifdef ASSERT 454 // make sure this code is only executed if there is a pending exception 455 { 456 Label L; 457 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t) NULL); 458 __ jcc(Assembler::notEqual, L); 459 __ stop("StubRoutines::forward exception: no pending exception (1)"); 460 __ bind(L); 461 } 462 #endif 463 464 // compute exception handler into rbx 465 __ movptr(c_rarg0, Address(rsp, 0)); 466 BLOCK_COMMENT("call exception_handler_for_return_address"); 467 __ call_VM_leaf(CAST_FROM_FN_PTR(address, 468 SharedRuntime::exception_handler_for_return_address), 469 c_rarg0); 470 __ mov(rbx, rax); 471 472 // setup rax & rdx, remove return address & clear pending exception 473 __ pop(rdx); 474 __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset())); 475 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 476 477 #ifdef ASSERT 478 // make sure exception is set 479 { 480 Label L; 481 __ testptr(rax, rax); 482 __ jcc(Assembler::notEqual, L); 483 __ stop("StubRoutines::forward exception: no pending exception (2)"); 484 __ bind(L); 485 } 486 #endif 487 488 // continue at exception handler (return address removed) 489 // rax: exception 490 // rbx: exception handler 491 // rdx: throwing pc 492 __ verify_oop(rax); 493 __ jmp(rbx); 494 495 return start; 496 } 497 498 // Support for jint atomic::xchg(jint exchange_value, volatile jint* dest) 499 // 500 // Arguments : 501 // c_rarg0: exchange_value 502 // c_rarg0: dest 503 // 504 // Result: 505 // *dest <- ex, return (orig *dest) 506 address generate_atomic_xchg() { 507 StubCodeMark mark(this, "StubRoutines", "atomic_xchg"); 508 address start = __ pc(); 509 510 __ movl(rax, c_rarg0); // Copy to eax we need a return value anyhow 511 __ xchgl(rax, Address(c_rarg1, 0)); // automatic LOCK 512 __ ret(0); 513 514 return start; 515 } 516 517 // Support for intptr_t atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) 518 // 519 // Arguments : 520 // c_rarg0: exchange_value 521 // c_rarg1: dest 522 // 523 // Result: 524 // *dest <- ex, return (orig *dest) 525 address generate_atomic_xchg_ptr() { 526 StubCodeMark mark(this, "StubRoutines", "atomic_xchg_ptr"); 527 address start = __ pc(); 528 529 __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow 530 __ xchgptr(rax, Address(c_rarg1, 0)); // automatic LOCK 531 __ ret(0); 532 533 return start; 534 } 535 536 // Support for jint atomic::atomic_cmpxchg(jint exchange_value, volatile jint* dest, 537 // jint compare_value) 538 // 539 // Arguments : 540 // c_rarg0: exchange_value 541 // c_rarg1: dest 542 // c_rarg2: compare_value 543 // 544 // Result: 545 // if ( compare_value == *dest ) { 546 // *dest = exchange_value 547 // return compare_value; 548 // else 549 // return *dest; 550 address generate_atomic_cmpxchg() { 551 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg"); 552 address start = __ pc(); 553 554 __ movl(rax, c_rarg2); 555 if ( os::is_MP() ) __ lock(); 556 __ cmpxchgl(c_rarg0, Address(c_rarg1, 0)); 557 __ ret(0); 558 559 return start; 560 } 561 562 // Support for jint atomic::atomic_cmpxchg_long(jlong exchange_value, 563 // volatile jlong* dest, 564 // jlong compare_value) 565 // Arguments : 566 // c_rarg0: exchange_value 567 // c_rarg1: dest 568 // c_rarg2: compare_value 569 // 570 // Result: 571 // if ( compare_value == *dest ) { 572 // *dest = exchange_value 573 // return compare_value; 574 // else 575 // return *dest; 576 address generate_atomic_cmpxchg_long() { 577 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long"); 578 address start = __ pc(); 579 580 __ movq(rax, c_rarg2); 581 if ( os::is_MP() ) __ lock(); 582 __ cmpxchgq(c_rarg0, Address(c_rarg1, 0)); 583 __ ret(0); 584 585 return start; 586 } 587 588 // Support for jint atomic::add(jint add_value, volatile jint* dest) 589 // 590 // Arguments : 591 // c_rarg0: add_value 592 // c_rarg1: dest 593 // 594 // Result: 595 // *dest += add_value 596 // return *dest; 597 address generate_atomic_add() { 598 StubCodeMark mark(this, "StubRoutines", "atomic_add"); 599 address start = __ pc(); 600 601 __ movl(rax, c_rarg0); 602 if ( os::is_MP() ) __ lock(); 603 __ xaddl(Address(c_rarg1, 0), c_rarg0); 604 __ addl(rax, c_rarg0); 605 __ ret(0); 606 607 return start; 608 } 609 610 // Support for intptr_t atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) 611 // 612 // Arguments : 613 // c_rarg0: add_value 614 // c_rarg1: dest 615 // 616 // Result: 617 // *dest += add_value 618 // return *dest; 619 address generate_atomic_add_ptr() { 620 StubCodeMark mark(this, "StubRoutines", "atomic_add_ptr"); 621 address start = __ pc(); 622 623 __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow 624 if ( os::is_MP() ) __ lock(); 625 __ xaddptr(Address(c_rarg1, 0), c_rarg0); 626 __ addptr(rax, c_rarg0); 627 __ ret(0); 628 629 return start; 630 } 631 632 // Support for intptr_t OrderAccess::fence() 633 // 634 // Arguments : 635 // 636 // Result: 637 address generate_orderaccess_fence() { 638 StubCodeMark mark(this, "StubRoutines", "orderaccess_fence"); 639 address start = __ pc(); 640 __ membar(Assembler::StoreLoad); 641 __ ret(0); 642 643 return start; 644 } 645 646 // Support for intptr_t get_previous_fp() 647 // 648 // This routine is used to find the previous frame pointer for the 649 // caller (current_frame_guess). This is used as part of debugging 650 // ps() is seemingly lost trying to find frames. 651 // This code assumes that caller current_frame_guess) has a frame. 652 address generate_get_previous_fp() { 653 StubCodeMark mark(this, "StubRoutines", "get_previous_fp"); 654 const Address old_fp(rbp, 0); 655 const Address older_fp(rax, 0); 656 address start = __ pc(); 657 658 __ enter(); 659 __ movptr(rax, old_fp); // callers fp 660 __ movptr(rax, older_fp); // the frame for ps() 661 __ pop(rbp); 662 __ ret(0); 663 664 return start; 665 } 666 667 //---------------------------------------------------------------------------------------------------- 668 // Support for void verify_mxcsr() 669 // 670 // This routine is used with -Xcheck:jni to verify that native 671 // JNI code does not return to Java code without restoring the 672 // MXCSR register to our expected state. 673 674 address generate_verify_mxcsr() { 675 StubCodeMark mark(this, "StubRoutines", "verify_mxcsr"); 676 address start = __ pc(); 677 678 const Address mxcsr_save(rsp, 0); 679 680 if (CheckJNICalls) { 681 Label ok_ret; 682 __ push(rax); 683 __ subptr(rsp, wordSize); // allocate a temp location 684 __ stmxcsr(mxcsr_save); 685 __ movl(rax, mxcsr_save); 686 __ andl(rax, MXCSR_MASK); // Only check control and mask bits 687 __ cmpl(rax, *(int *)(StubRoutines::x86::mxcsr_std())); 688 __ jcc(Assembler::equal, ok_ret); 689 690 __ warn("MXCSR changed by native JNI code, use -XX:+RestoreMXCSROnJNICall"); 691 692 __ ldmxcsr(ExternalAddress(StubRoutines::x86::mxcsr_std())); 693 694 __ bind(ok_ret); 695 __ addptr(rsp, wordSize); 696 __ pop(rax); 697 } 698 699 __ ret(0); 700 701 return start; 702 } 703 704 address generate_f2i_fixup() { 705 StubCodeMark mark(this, "StubRoutines", "f2i_fixup"); 706 Address inout(rsp, 5 * wordSize); // return address + 4 saves 707 708 address start = __ pc(); 709 710 Label L; 711 712 __ push(rax); 713 __ push(c_rarg3); 714 __ push(c_rarg2); 715 __ push(c_rarg1); 716 717 __ movl(rax, 0x7f800000); 718 __ xorl(c_rarg3, c_rarg3); 719 __ movl(c_rarg2, inout); 720 __ movl(c_rarg1, c_rarg2); 721 __ andl(c_rarg1, 0x7fffffff); 722 __ cmpl(rax, c_rarg1); // NaN? -> 0 723 __ jcc(Assembler::negative, L); 724 __ testl(c_rarg2, c_rarg2); // signed ? min_jint : max_jint 725 __ movl(c_rarg3, 0x80000000); 726 __ movl(rax, 0x7fffffff); 727 __ cmovl(Assembler::positive, c_rarg3, rax); 728 729 __ bind(L); 730 __ movptr(inout, c_rarg3); 731 732 __ pop(c_rarg1); 733 __ pop(c_rarg2); 734 __ pop(c_rarg3); 735 __ pop(rax); 736 737 __ ret(0); 738 739 return start; 740 } 741 742 address generate_f2l_fixup() { 743 StubCodeMark mark(this, "StubRoutines", "f2l_fixup"); 744 Address inout(rsp, 5 * wordSize); // return address + 4 saves 745 address start = __ pc(); 746 747 Label L; 748 749 __ push(rax); 750 __ push(c_rarg3); 751 __ push(c_rarg2); 752 __ push(c_rarg1); 753 754 __ movl(rax, 0x7f800000); 755 __ xorl(c_rarg3, c_rarg3); 756 __ movl(c_rarg2, inout); 757 __ movl(c_rarg1, c_rarg2); 758 __ andl(c_rarg1, 0x7fffffff); 759 __ cmpl(rax, c_rarg1); // NaN? -> 0 760 __ jcc(Assembler::negative, L); 761 __ testl(c_rarg2, c_rarg2); // signed ? min_jlong : max_jlong 762 __ mov64(c_rarg3, 0x8000000000000000); 763 __ mov64(rax, 0x7fffffffffffffff); 764 __ cmov(Assembler::positive, c_rarg3, rax); 765 766 __ bind(L); 767 __ movptr(inout, c_rarg3); 768 769 __ pop(c_rarg1); 770 __ pop(c_rarg2); 771 __ pop(c_rarg3); 772 __ pop(rax); 773 774 __ ret(0); 775 776 return start; 777 } 778 779 address generate_d2i_fixup() { 780 StubCodeMark mark(this, "StubRoutines", "d2i_fixup"); 781 Address inout(rsp, 6 * wordSize); // return address + 5 saves 782 783 address start = __ pc(); 784 785 Label L; 786 787 __ push(rax); 788 __ push(c_rarg3); 789 __ push(c_rarg2); 790 __ push(c_rarg1); 791 __ push(c_rarg0); 792 793 __ movl(rax, 0x7ff00000); 794 __ movq(c_rarg2, inout); 795 __ movl(c_rarg3, c_rarg2); 796 __ mov(c_rarg1, c_rarg2); 797 __ mov(c_rarg0, c_rarg2); 798 __ negl(c_rarg3); 799 __ shrptr(c_rarg1, 0x20); 800 __ orl(c_rarg3, c_rarg2); 801 __ andl(c_rarg1, 0x7fffffff); 802 __ xorl(c_rarg2, c_rarg2); 803 __ shrl(c_rarg3, 0x1f); 804 __ orl(c_rarg1, c_rarg3); 805 __ cmpl(rax, c_rarg1); 806 __ jcc(Assembler::negative, L); // NaN -> 0 807 __ testptr(c_rarg0, c_rarg0); // signed ? min_jint : max_jint 808 __ movl(c_rarg2, 0x80000000); 809 __ movl(rax, 0x7fffffff); 810 __ cmov(Assembler::positive, c_rarg2, rax); 811 812 __ bind(L); 813 __ movptr(inout, c_rarg2); 814 815 __ pop(c_rarg0); 816 __ pop(c_rarg1); 817 __ pop(c_rarg2); 818 __ pop(c_rarg3); 819 __ pop(rax); 820 821 __ ret(0); 822 823 return start; 824 } 825 826 address generate_d2l_fixup() { 827 StubCodeMark mark(this, "StubRoutines", "d2l_fixup"); 828 Address inout(rsp, 6 * wordSize); // return address + 5 saves 829 830 address start = __ pc(); 831 832 Label L; 833 834 __ push(rax); 835 __ push(c_rarg3); 836 __ push(c_rarg2); 837 __ push(c_rarg1); 838 __ push(c_rarg0); 839 840 __ movl(rax, 0x7ff00000); 841 __ movq(c_rarg2, inout); 842 __ movl(c_rarg3, c_rarg2); 843 __ mov(c_rarg1, c_rarg2); 844 __ mov(c_rarg0, c_rarg2); 845 __ negl(c_rarg3); 846 __ shrptr(c_rarg1, 0x20); 847 __ orl(c_rarg3, c_rarg2); 848 __ andl(c_rarg1, 0x7fffffff); 849 __ xorl(c_rarg2, c_rarg2); 850 __ shrl(c_rarg3, 0x1f); 851 __ orl(c_rarg1, c_rarg3); 852 __ cmpl(rax, c_rarg1); 853 __ jcc(Assembler::negative, L); // NaN -> 0 854 __ testq(c_rarg0, c_rarg0); // signed ? min_jlong : max_jlong 855 __ mov64(c_rarg2, 0x8000000000000000); 856 __ mov64(rax, 0x7fffffffffffffff); 857 __ cmovq(Assembler::positive, c_rarg2, rax); 858 859 __ bind(L); 860 __ movq(inout, c_rarg2); 861 862 __ pop(c_rarg0); 863 __ pop(c_rarg1); 864 __ pop(c_rarg2); 865 __ pop(c_rarg3); 866 __ pop(rax); 867 868 __ ret(0); 869 870 return start; 871 } 872 873 address generate_fp_mask(const char *stub_name, int64_t mask) { 874 StubCodeMark mark(this, "StubRoutines", stub_name); 875 876 __ align(16); 877 address start = __ pc(); 878 879 __ emit_data64( mask, relocInfo::none ); 880 __ emit_data64( mask, relocInfo::none ); 881 882 return start; 883 } 884 885 // The following routine generates a subroutine to throw an 886 // asynchronous UnknownError when an unsafe access gets a fault that 887 // could not be reasonably prevented by the programmer. (Example: 888 // SIGBUS/OBJERR.) 889 address generate_handler_for_unsafe_access() { 890 StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access"); 891 address start = __ pc(); 892 893 __ push(0); // hole for return address-to-be 894 __ pusha(); // push registers 895 Address next_pc(rsp, RegisterImpl::number_of_registers * BytesPerWord); 896 897 __ subptr(rsp, frame::arg_reg_save_area_bytes); 898 BLOCK_COMMENT("call handle_unsafe_access"); 899 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, handle_unsafe_access))); 900 __ addptr(rsp, frame::arg_reg_save_area_bytes); 901 902 __ movptr(next_pc, rax); // stuff next address 903 __ popa(); 904 __ ret(0); // jump to next address 905 906 return start; 907 } 908 909 // Non-destructive plausibility checks for oops 910 // 911 // Arguments: 912 // all args on stack! 913 // 914 // Stack after saving c_rarg3: 915 // [tos + 0]: saved c_rarg3 916 // [tos + 1]: saved c_rarg2 917 // [tos + 2]: saved r12 (several TemplateTable methods use it) 918 // [tos + 3]: saved flags 919 // [tos + 4]: return address 920 // * [tos + 5]: error message (char*) 921 // * [tos + 6]: object to verify (oop) 922 // * [tos + 7]: saved rax - saved by caller and bashed 923 // * = popped on exit 924 address generate_verify_oop() { 925 StubCodeMark mark(this, "StubRoutines", "verify_oop"); 926 address start = __ pc(); 927 928 Label exit, error; 929 930 __ pushf(); 931 __ incrementl(ExternalAddress((address) StubRoutines::verify_oop_count_addr())); 932 933 __ push(r12); 934 935 // save c_rarg2 and c_rarg3 936 __ push(c_rarg2); 937 __ push(c_rarg3); 938 939 enum { 940 // After previous pushes. 941 oop_to_verify = 6 * wordSize, 942 saved_rax = 7 * wordSize, 943 944 // Before the call to MacroAssembler::debug(), see below. 945 return_addr = 16 * wordSize, 946 error_msg = 17 * wordSize 947 }; 948 949 // get object 950 __ movptr(rax, Address(rsp, oop_to_verify)); 951 952 // make sure object is 'reasonable' 953 __ testptr(rax, rax); 954 __ jcc(Assembler::zero, exit); // if obj is NULL it is OK 955 // Check if the oop is in the right area of memory 956 __ movptr(c_rarg2, rax); 957 __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_mask()); 958 __ andptr(c_rarg2, c_rarg3); 959 __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_bits()); 960 __ cmpptr(c_rarg2, c_rarg3); 961 __ jcc(Assembler::notZero, error); 962 963 // set r12 to heapbase for load_klass() 964 __ reinit_heapbase(); 965 966 // make sure klass is 'reasonable' 967 __ load_klass(rax, rax); // get klass 968 __ testptr(rax, rax); 969 __ jcc(Assembler::zero, error); // if klass is NULL it is broken 970 // Check if the klass is in the right area of memory 971 __ mov(c_rarg2, rax); 972 __ movptr(c_rarg3, (intptr_t) Universe::verify_klass_mask()); 973 __ andptr(c_rarg2, c_rarg3); 974 __ movptr(c_rarg3, (intptr_t) Universe::verify_klass_bits()); 975 __ cmpptr(c_rarg2, c_rarg3); 976 __ jcc(Assembler::notZero, error); 977 978 // make sure klass' klass is 'reasonable' 979 __ load_klass(rax, rax); 980 __ testptr(rax, rax); 981 __ jcc(Assembler::zero, error); // if klass' klass is NULL it is broken 982 // Check if the klass' klass is in the right area of memory 983 __ movptr(c_rarg3, (intptr_t) Universe::verify_klass_mask()); 984 __ andptr(rax, c_rarg3); 985 __ movptr(c_rarg3, (intptr_t) Universe::verify_klass_bits()); 986 __ cmpptr(rax, c_rarg3); 987 __ jcc(Assembler::notZero, error); 988 989 // return if everything seems ok 990 __ bind(exit); 991 __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back 992 __ pop(c_rarg3); // restore c_rarg3 993 __ pop(c_rarg2); // restore c_rarg2 994 __ pop(r12); // restore r12 995 __ popf(); // restore flags 996 __ ret(3 * wordSize); // pop caller saved stuff 997 998 // handle errors 999 __ bind(error); 1000 __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back 1001 __ pop(c_rarg3); // get saved c_rarg3 back 1002 __ pop(c_rarg2); // get saved c_rarg2 back 1003 __ pop(r12); // get saved r12 back 1004 __ popf(); // get saved flags off stack -- 1005 // will be ignored 1006 1007 __ pusha(); // push registers 1008 // (rip is already 1009 // already pushed) 1010 // debug(char* msg, int64_t pc, int64_t regs[]) 1011 // We've popped the registers we'd saved (c_rarg3, c_rarg2 and flags), and 1012 // pushed all the registers, so now the stack looks like: 1013 // [tos + 0] 16 saved registers 1014 // [tos + 16] return address 1015 // * [tos + 17] error message (char*) 1016 // * [tos + 18] object to verify (oop) 1017 // * [tos + 19] saved rax - saved by caller and bashed 1018 // * = popped on exit 1019 1020 __ movptr(c_rarg0, Address(rsp, error_msg)); // pass address of error message 1021 __ movptr(c_rarg1, Address(rsp, return_addr)); // pass return address 1022 __ movq(c_rarg2, rsp); // pass address of regs on stack 1023 __ mov(r12, rsp); // remember rsp 1024 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1025 __ andptr(rsp, -16); // align stack as required by ABI 1026 BLOCK_COMMENT("call MacroAssembler::debug"); 1027 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64))); 1028 __ mov(rsp, r12); // restore rsp 1029 __ popa(); // pop registers (includes r12) 1030 __ ret(3 * wordSize); // pop caller saved stuff 1031 1032 return start; 1033 } 1034 1035 static address disjoint_byte_copy_entry; 1036 static address disjoint_short_copy_entry; 1037 static address disjoint_int_copy_entry; 1038 static address disjoint_long_copy_entry; 1039 static address disjoint_oop_copy_entry; 1040 1041 static address byte_copy_entry; 1042 static address short_copy_entry; 1043 static address int_copy_entry; 1044 static address long_copy_entry; 1045 static address oop_copy_entry; 1046 1047 static address checkcast_copy_entry; 1048 1049 // 1050 // Verify that a register contains clean 32-bits positive value 1051 // (high 32-bits are 0) so it could be used in 64-bits shifts. 1052 // 1053 // Input: 1054 // Rint - 32-bits value 1055 // Rtmp - scratch 1056 // 1057 void assert_clean_int(Register Rint, Register Rtmp) { 1058 #ifdef ASSERT 1059 Label L; 1060 assert_different_registers(Rtmp, Rint); 1061 __ movslq(Rtmp, Rint); 1062 __ cmpq(Rtmp, Rint); 1063 __ jcc(Assembler::equal, L); 1064 __ stop("high 32-bits of int value are not 0"); 1065 __ bind(L); 1066 #endif 1067 } 1068 1069 // Generate overlap test for array copy stubs 1070 // 1071 // Input: 1072 // c_rarg0 - from 1073 // c_rarg1 - to 1074 // c_rarg2 - element count 1075 // 1076 // Output: 1077 // rax - &from[element count - 1] 1078 // 1079 void array_overlap_test(address no_overlap_target, Address::ScaleFactor sf) { 1080 assert(no_overlap_target != NULL, "must be generated"); 1081 array_overlap_test(no_overlap_target, NULL, sf); 1082 } 1083 void array_overlap_test(Label& L_no_overlap, Address::ScaleFactor sf) { 1084 array_overlap_test(NULL, &L_no_overlap, sf); 1085 } 1086 void array_overlap_test(address no_overlap_target, Label* NOLp, Address::ScaleFactor sf) { 1087 const Register from = c_rarg0; 1088 const Register to = c_rarg1; 1089 const Register count = c_rarg2; 1090 const Register end_from = rax; 1091 1092 __ cmpptr(to, from); 1093 __ lea(end_from, Address(from, count, sf, 0)); 1094 if (NOLp == NULL) { 1095 ExternalAddress no_overlap(no_overlap_target); 1096 __ jump_cc(Assembler::belowEqual, no_overlap); 1097 __ cmpptr(to, end_from); 1098 __ jump_cc(Assembler::aboveEqual, no_overlap); 1099 } else { 1100 __ jcc(Assembler::belowEqual, (*NOLp)); 1101 __ cmpptr(to, end_from); 1102 __ jcc(Assembler::aboveEqual, (*NOLp)); 1103 } 1104 } 1105 1106 // Shuffle first three arg regs on Windows into Linux/Solaris locations. 1107 // 1108 // Outputs: 1109 // rdi - rcx 1110 // rsi - rdx 1111 // rdx - r8 1112 // rcx - r9 1113 // 1114 // Registers r9 and r10 are used to save rdi and rsi on Windows, which latter 1115 // are non-volatile. r9 and r10 should not be used by the caller. 1116 // 1117 void setup_arg_regs(int nargs = 3) { 1118 const Register saved_rdi = r9; 1119 const Register saved_rsi = r10; 1120 assert(nargs == 3 || nargs == 4, "else fix"); 1121 #ifdef _WIN64 1122 assert(c_rarg0 == rcx && c_rarg1 == rdx && c_rarg2 == r8 && c_rarg3 == r9, 1123 "unexpected argument registers"); 1124 if (nargs >= 4) 1125 __ mov(rax, r9); // r9 is also saved_rdi 1126 __ movptr(saved_rdi, rdi); 1127 __ movptr(saved_rsi, rsi); 1128 __ mov(rdi, rcx); // c_rarg0 1129 __ mov(rsi, rdx); // c_rarg1 1130 __ mov(rdx, r8); // c_rarg2 1131 if (nargs >= 4) 1132 __ mov(rcx, rax); // c_rarg3 (via rax) 1133 #else 1134 assert(c_rarg0 == rdi && c_rarg1 == rsi && c_rarg2 == rdx && c_rarg3 == rcx, 1135 "unexpected argument registers"); 1136 #endif 1137 } 1138 1139 void restore_arg_regs() { 1140 const Register saved_rdi = r9; 1141 const Register saved_rsi = r10; 1142 #ifdef _WIN64 1143 __ movptr(rdi, saved_rdi); 1144 __ movptr(rsi, saved_rsi); 1145 #endif 1146 } 1147 1148 // Generate code for an array write pre barrier 1149 // 1150 // addr - starting address 1151 // count - element count 1152 // 1153 // Destroy no registers! 1154 // 1155 void gen_write_ref_array_pre_barrier(Register addr, Register count) { 1156 BarrierSet* bs = Universe::heap()->barrier_set(); 1157 switch (bs->kind()) { 1158 case BarrierSet::G1SATBCT: 1159 case BarrierSet::G1SATBCTLogging: 1160 { 1161 __ pusha(); // push registers 1162 if (count == c_rarg0) { 1163 if (addr == c_rarg1) { 1164 // exactly backwards!! 1165 __ xchgptr(c_rarg1, c_rarg0); 1166 } else { 1167 __ movptr(c_rarg1, count); 1168 __ movptr(c_rarg0, addr); 1169 } 1170 1171 } else { 1172 __ movptr(c_rarg0, addr); 1173 __ movptr(c_rarg1, count); 1174 } 1175 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre))); 1176 __ popa(); 1177 } 1178 break; 1179 case BarrierSet::CardTableModRef: 1180 case BarrierSet::CardTableExtension: 1181 case BarrierSet::ModRef: 1182 break; 1183 default: 1184 ShouldNotReachHere(); 1185 1186 } 1187 } 1188 1189 // 1190 // Generate code for an array write post barrier 1191 // 1192 // Input: 1193 // start - register containing starting address of destination array 1194 // end - register containing ending address of destination array 1195 // scratch - scratch register 1196 // 1197 // The input registers are overwritten. 1198 // The ending address is inclusive. 1199 void gen_write_ref_array_post_barrier(Register start, Register end, Register scratch) { 1200 assert_different_registers(start, end, scratch); 1201 BarrierSet* bs = Universe::heap()->barrier_set(); 1202 switch (bs->kind()) { 1203 case BarrierSet::G1SATBCT: 1204 case BarrierSet::G1SATBCTLogging: 1205 1206 { 1207 __ pusha(); // push registers (overkill) 1208 // must compute element count unless barrier set interface is changed (other platforms supply count) 1209 assert_different_registers(start, end, scratch); 1210 __ lea(scratch, Address(end, BytesPerHeapOop)); 1211 __ subptr(scratch, start); // subtract start to get #bytes 1212 __ shrptr(scratch, LogBytesPerHeapOop); // convert to element count 1213 __ mov(c_rarg0, start); 1214 __ mov(c_rarg1, scratch); 1215 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post))); 1216 __ popa(); 1217 } 1218 break; 1219 case BarrierSet::CardTableModRef: 1220 case BarrierSet::CardTableExtension: 1221 { 1222 CardTableModRefBS* ct = (CardTableModRefBS*)bs; 1223 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); 1224 1225 Label L_loop; 1226 1227 __ shrptr(start, CardTableModRefBS::card_shift); 1228 __ addptr(end, BytesPerHeapOop); 1229 __ shrptr(end, CardTableModRefBS::card_shift); 1230 __ subptr(end, start); // number of bytes to copy 1231 1232 intptr_t disp = (intptr_t) ct->byte_map_base; 1233 if (__ is_simm32(disp)) { 1234 Address cardtable(noreg, noreg, Address::no_scale, disp); 1235 __ lea(scratch, cardtable); 1236 } else { 1237 ExternalAddress cardtable((address)disp); 1238 __ lea(scratch, cardtable); 1239 } 1240 1241 const Register count = end; // 'end' register contains bytes count now 1242 __ addptr(start, scratch); 1243 __ BIND(L_loop); 1244 __ movb(Address(start, count, Address::times_1), 0); 1245 __ decrement(count); 1246 __ jcc(Assembler::greaterEqual, L_loop); 1247 } 1248 break; 1249 default: 1250 ShouldNotReachHere(); 1251 1252 } 1253 } 1254 1255 1256 // Copy big chunks forward 1257 // 1258 // Inputs: 1259 // end_from - source arrays end address 1260 // end_to - destination array end address 1261 // qword_count - 64-bits element count, negative 1262 // to - scratch 1263 // L_copy_32_bytes - entry label 1264 // L_copy_8_bytes - exit label 1265 // 1266 void copy_32_bytes_forward(Register end_from, Register end_to, 1267 Register qword_count, Register to, 1268 Label& L_copy_32_bytes, Label& L_copy_8_bytes) { 1269 DEBUG_ONLY(__ stop("enter at entry label, not here")); 1270 Label L_loop; 1271 __ align(16); 1272 __ BIND(L_loop); 1273 if(UseUnalignedLoadStores) { 1274 __ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24)); 1275 __ movdqu(Address(end_to, qword_count, Address::times_8, -24), xmm0); 1276 __ movdqu(xmm1, Address(end_from, qword_count, Address::times_8, - 8)); 1277 __ movdqu(Address(end_to, qword_count, Address::times_8, - 8), xmm1); 1278 1279 } else { 1280 __ movq(to, Address(end_from, qword_count, Address::times_8, -24)); 1281 __ movq(Address(end_to, qword_count, Address::times_8, -24), to); 1282 __ movq(to, Address(end_from, qword_count, Address::times_8, -16)); 1283 __ movq(Address(end_to, qword_count, Address::times_8, -16), to); 1284 __ movq(to, Address(end_from, qword_count, Address::times_8, - 8)); 1285 __ movq(Address(end_to, qword_count, Address::times_8, - 8), to); 1286 __ movq(to, Address(end_from, qword_count, Address::times_8, - 0)); 1287 __ movq(Address(end_to, qword_count, Address::times_8, - 0), to); 1288 } 1289 __ BIND(L_copy_32_bytes); 1290 __ addptr(qword_count, 4); 1291 __ jcc(Assembler::lessEqual, L_loop); 1292 __ subptr(qword_count, 4); 1293 __ jcc(Assembler::less, L_copy_8_bytes); // Copy trailing qwords 1294 } 1295 1296 1297 // Copy big chunks backward 1298 // 1299 // Inputs: 1300 // from - source arrays address 1301 // dest - destination array address 1302 // qword_count - 64-bits element count 1303 // to - scratch 1304 // L_copy_32_bytes - entry label 1305 // L_copy_8_bytes - exit label 1306 // 1307 void copy_32_bytes_backward(Register from, Register dest, 1308 Register qword_count, Register to, 1309 Label& L_copy_32_bytes, Label& L_copy_8_bytes) { 1310 DEBUG_ONLY(__ stop("enter at entry label, not here")); 1311 Label L_loop; 1312 __ align(16); 1313 __ BIND(L_loop); 1314 if(UseUnalignedLoadStores) { 1315 __ movdqu(xmm0, Address(from, qword_count, Address::times_8, 16)); 1316 __ movdqu(Address(dest, qword_count, Address::times_8, 16), xmm0); 1317 __ movdqu(xmm1, Address(from, qword_count, Address::times_8, 0)); 1318 __ movdqu(Address(dest, qword_count, Address::times_8, 0), xmm1); 1319 1320 } else { 1321 __ movq(to, Address(from, qword_count, Address::times_8, 24)); 1322 __ movq(Address(dest, qword_count, Address::times_8, 24), to); 1323 __ movq(to, Address(from, qword_count, Address::times_8, 16)); 1324 __ movq(Address(dest, qword_count, Address::times_8, 16), to); 1325 __ movq(to, Address(from, qword_count, Address::times_8, 8)); 1326 __ movq(Address(dest, qword_count, Address::times_8, 8), to); 1327 __ movq(to, Address(from, qword_count, Address::times_8, 0)); 1328 __ movq(Address(dest, qword_count, Address::times_8, 0), to); 1329 } 1330 __ BIND(L_copy_32_bytes); 1331 __ subptr(qword_count, 4); 1332 __ jcc(Assembler::greaterEqual, L_loop); 1333 __ addptr(qword_count, 4); 1334 __ jcc(Assembler::greater, L_copy_8_bytes); // Copy trailing qwords 1335 } 1336 1337 1338 // Arguments: 1339 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1340 // ignored 1341 // name - stub name string 1342 // 1343 // Inputs: 1344 // c_rarg0 - source array address 1345 // c_rarg1 - destination array address 1346 // c_rarg2 - element count, treated as ssize_t, can be zero 1347 // 1348 // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries, 1349 // we let the hardware handle it. The one to eight bytes within words, 1350 // dwords or qwords that span cache line boundaries will still be loaded 1351 // and stored atomically. 1352 // 1353 // Side Effects: 1354 // disjoint_byte_copy_entry is set to the no-overlap entry point 1355 // used by generate_conjoint_byte_copy(). 1356 // 1357 address generate_disjoint_byte_copy(bool aligned, const char *name) { 1358 __ align(CodeEntryAlignment); 1359 StubCodeMark mark(this, "StubRoutines", name); 1360 address start = __ pc(); 1361 1362 Label L_copy_32_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes; 1363 Label L_copy_byte, L_exit; 1364 const Register from = rdi; // source array address 1365 const Register to = rsi; // destination array address 1366 const Register count = rdx; // elements count 1367 const Register byte_count = rcx; 1368 const Register qword_count = count; 1369 const Register end_from = from; // source array end address 1370 const Register end_to = to; // destination array end address 1371 // End pointers are inclusive, and if count is not zero they point 1372 // to the last unit copied: end_to[0] := end_from[0] 1373 1374 __ enter(); // required for proper stackwalking of RuntimeStub frame 1375 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1376 1377 disjoint_byte_copy_entry = __ pc(); 1378 BLOCK_COMMENT("Entry:"); 1379 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1380 1381 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1382 // r9 and r10 may be used to save non-volatile registers 1383 1384 // 'from', 'to' and 'count' are now valid 1385 __ movptr(byte_count, count); 1386 __ shrptr(count, 3); // count => qword_count 1387 1388 // Copy from low to high addresses. Use 'to' as scratch. 1389 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1390 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1391 __ negptr(qword_count); // make the count negative 1392 __ jmp(L_copy_32_bytes); 1393 1394 // Copy trailing qwords 1395 __ BIND(L_copy_8_bytes); 1396 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1397 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1398 __ increment(qword_count); 1399 __ jcc(Assembler::notZero, L_copy_8_bytes); 1400 1401 // Check for and copy trailing dword 1402 __ BIND(L_copy_4_bytes); 1403 __ testl(byte_count, 4); 1404 __ jccb(Assembler::zero, L_copy_2_bytes); 1405 __ movl(rax, Address(end_from, 8)); 1406 __ movl(Address(end_to, 8), rax); 1407 1408 __ addptr(end_from, 4); 1409 __ addptr(end_to, 4); 1410 1411 // Check for and copy trailing word 1412 __ BIND(L_copy_2_bytes); 1413 __ testl(byte_count, 2); 1414 __ jccb(Assembler::zero, L_copy_byte); 1415 __ movw(rax, Address(end_from, 8)); 1416 __ movw(Address(end_to, 8), rax); 1417 1418 __ addptr(end_from, 2); 1419 __ addptr(end_to, 2); 1420 1421 // Check for and copy trailing byte 1422 __ BIND(L_copy_byte); 1423 __ testl(byte_count, 1); 1424 __ jccb(Assembler::zero, L_exit); 1425 __ movb(rax, Address(end_from, 8)); 1426 __ movb(Address(end_to, 8), rax); 1427 1428 __ BIND(L_exit); 1429 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); 1430 restore_arg_regs(); 1431 __ xorptr(rax, rax); // return 0 1432 __ leave(); // required for proper stackwalking of RuntimeStub frame 1433 __ ret(0); 1434 1435 // Copy in 32-bytes chunks 1436 copy_32_bytes_forward(end_from, end_to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes); 1437 __ jmp(L_copy_4_bytes); 1438 1439 return start; 1440 } 1441 1442 // Arguments: 1443 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1444 // ignored 1445 // name - stub name string 1446 // 1447 // Inputs: 1448 // c_rarg0 - source array address 1449 // c_rarg1 - destination array address 1450 // c_rarg2 - element count, treated as ssize_t, can be zero 1451 // 1452 // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries, 1453 // we let the hardware handle it. The one to eight bytes within words, 1454 // dwords or qwords that span cache line boundaries will still be loaded 1455 // and stored atomically. 1456 // 1457 address generate_conjoint_byte_copy(bool aligned, const char *name) { 1458 __ align(CodeEntryAlignment); 1459 StubCodeMark mark(this, "StubRoutines", name); 1460 address start = __ pc(); 1461 1462 Label L_copy_32_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes; 1463 const Register from = rdi; // source array address 1464 const Register to = rsi; // destination array address 1465 const Register count = rdx; // elements count 1466 const Register byte_count = rcx; 1467 const Register qword_count = count; 1468 1469 __ enter(); // required for proper stackwalking of RuntimeStub frame 1470 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1471 1472 byte_copy_entry = __ pc(); 1473 BLOCK_COMMENT("Entry:"); 1474 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1475 1476 array_overlap_test(disjoint_byte_copy_entry, Address::times_1); 1477 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1478 // r9 and r10 may be used to save non-volatile registers 1479 1480 // 'from', 'to' and 'count' are now valid 1481 __ movptr(byte_count, count); 1482 __ shrptr(count, 3); // count => qword_count 1483 1484 // Copy from high to low addresses. 1485 1486 // Check for and copy trailing byte 1487 __ testl(byte_count, 1); 1488 __ jcc(Assembler::zero, L_copy_2_bytes); 1489 __ movb(rax, Address(from, byte_count, Address::times_1, -1)); 1490 __ movb(Address(to, byte_count, Address::times_1, -1), rax); 1491 __ decrement(byte_count); // Adjust for possible trailing word 1492 1493 // Check for and copy trailing word 1494 __ BIND(L_copy_2_bytes); 1495 __ testl(byte_count, 2); 1496 __ jcc(Assembler::zero, L_copy_4_bytes); 1497 __ movw(rax, Address(from, byte_count, Address::times_1, -2)); 1498 __ movw(Address(to, byte_count, Address::times_1, -2), rax); 1499 1500 // Check for and copy trailing dword 1501 __ BIND(L_copy_4_bytes); 1502 __ testl(byte_count, 4); 1503 __ jcc(Assembler::zero, L_copy_32_bytes); 1504 __ movl(rax, Address(from, qword_count, Address::times_8)); 1505 __ movl(Address(to, qword_count, Address::times_8), rax); 1506 __ jmp(L_copy_32_bytes); 1507 1508 // Copy trailing qwords 1509 __ BIND(L_copy_8_bytes); 1510 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 1511 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 1512 __ decrement(qword_count); 1513 __ jcc(Assembler::notZero, L_copy_8_bytes); 1514 1515 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); 1516 restore_arg_regs(); 1517 __ xorptr(rax, rax); // return 0 1518 __ leave(); // required for proper stackwalking of RuntimeStub frame 1519 __ ret(0); 1520 1521 // Copy in 32-bytes chunks 1522 copy_32_bytes_backward(from, to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes); 1523 1524 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); 1525 restore_arg_regs(); 1526 __ xorptr(rax, rax); // return 0 1527 __ leave(); // required for proper stackwalking of RuntimeStub frame 1528 __ ret(0); 1529 1530 return start; 1531 } 1532 1533 // Arguments: 1534 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1535 // ignored 1536 // name - stub name string 1537 // 1538 // Inputs: 1539 // c_rarg0 - source array address 1540 // c_rarg1 - destination array address 1541 // c_rarg2 - element count, treated as ssize_t, can be zero 1542 // 1543 // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we 1544 // let the hardware handle it. The two or four words within dwords 1545 // or qwords that span cache line boundaries will still be loaded 1546 // and stored atomically. 1547 // 1548 // Side Effects: 1549 // disjoint_short_copy_entry is set to the no-overlap entry point 1550 // used by generate_conjoint_short_copy(). 1551 // 1552 address generate_disjoint_short_copy(bool aligned, const char *name) { 1553 __ align(CodeEntryAlignment); 1554 StubCodeMark mark(this, "StubRoutines", name); 1555 address start = __ pc(); 1556 1557 Label L_copy_32_bytes, L_copy_8_bytes, L_copy_4_bytes,L_copy_2_bytes,L_exit; 1558 const Register from = rdi; // source array address 1559 const Register to = rsi; // destination array address 1560 const Register count = rdx; // elements count 1561 const Register word_count = rcx; 1562 const Register qword_count = count; 1563 const Register end_from = from; // source array end address 1564 const Register end_to = to; // destination array end address 1565 // End pointers are inclusive, and if count is not zero they point 1566 // to the last unit copied: end_to[0] := end_from[0] 1567 1568 __ enter(); // required for proper stackwalking of RuntimeStub frame 1569 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1570 1571 disjoint_short_copy_entry = __ pc(); 1572 BLOCK_COMMENT("Entry:"); 1573 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1574 1575 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1576 // r9 and r10 may be used to save non-volatile registers 1577 1578 // 'from', 'to' and 'count' are now valid 1579 __ movptr(word_count, count); 1580 __ shrptr(count, 2); // count => qword_count 1581 1582 // Copy from low to high addresses. Use 'to' as scratch. 1583 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1584 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1585 __ negptr(qword_count); 1586 __ jmp(L_copy_32_bytes); 1587 1588 // Copy trailing qwords 1589 __ BIND(L_copy_8_bytes); 1590 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1591 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1592 __ increment(qword_count); 1593 __ jcc(Assembler::notZero, L_copy_8_bytes); 1594 1595 // Original 'dest' is trashed, so we can't use it as a 1596 // base register for a possible trailing word copy 1597 1598 // Check for and copy trailing dword 1599 __ BIND(L_copy_4_bytes); 1600 __ testl(word_count, 2); 1601 __ jccb(Assembler::zero, L_copy_2_bytes); 1602 __ movl(rax, Address(end_from, 8)); 1603 __ movl(Address(end_to, 8), rax); 1604 1605 __ addptr(end_from, 4); 1606 __ addptr(end_to, 4); 1607 1608 // Check for and copy trailing word 1609 __ BIND(L_copy_2_bytes); 1610 __ testl(word_count, 1); 1611 __ jccb(Assembler::zero, L_exit); 1612 __ movw(rax, Address(end_from, 8)); 1613 __ movw(Address(end_to, 8), rax); 1614 1615 __ BIND(L_exit); 1616 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); 1617 restore_arg_regs(); 1618 __ xorptr(rax, rax); // return 0 1619 __ leave(); // required for proper stackwalking of RuntimeStub frame 1620 __ ret(0); 1621 1622 // Copy in 32-bytes chunks 1623 copy_32_bytes_forward(end_from, end_to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes); 1624 __ jmp(L_copy_4_bytes); 1625 1626 return start; 1627 } 1628 1629 // Arguments: 1630 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1631 // ignored 1632 // name - stub name string 1633 // 1634 // Inputs: 1635 // c_rarg0 - source array address 1636 // c_rarg1 - destination array address 1637 // c_rarg2 - element count, treated as ssize_t, can be zero 1638 // 1639 // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we 1640 // let the hardware handle it. The two or four words within dwords 1641 // or qwords that span cache line boundaries will still be loaded 1642 // and stored atomically. 1643 // 1644 address generate_conjoint_short_copy(bool aligned, const char *name) { 1645 __ align(CodeEntryAlignment); 1646 StubCodeMark mark(this, "StubRoutines", name); 1647 address start = __ pc(); 1648 1649 Label L_copy_32_bytes, L_copy_8_bytes, L_copy_4_bytes; 1650 const Register from = rdi; // source array address 1651 const Register to = rsi; // destination array address 1652 const Register count = rdx; // elements count 1653 const Register word_count = rcx; 1654 const Register qword_count = count; 1655 1656 __ enter(); // required for proper stackwalking of RuntimeStub frame 1657 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1658 1659 short_copy_entry = __ pc(); 1660 BLOCK_COMMENT("Entry:"); 1661 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1662 1663 array_overlap_test(disjoint_short_copy_entry, Address::times_2); 1664 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1665 // r9 and r10 may be used to save non-volatile registers 1666 1667 // 'from', 'to' and 'count' are now valid 1668 __ movptr(word_count, count); 1669 __ shrptr(count, 2); // count => qword_count 1670 1671 // Copy from high to low addresses. Use 'to' as scratch. 1672 1673 // Check for and copy trailing word 1674 __ testl(word_count, 1); 1675 __ jccb(Assembler::zero, L_copy_4_bytes); 1676 __ movw(rax, Address(from, word_count, Address::times_2, -2)); 1677 __ movw(Address(to, word_count, Address::times_2, -2), rax); 1678 1679 // Check for and copy trailing dword 1680 __ BIND(L_copy_4_bytes); 1681 __ testl(word_count, 2); 1682 __ jcc(Assembler::zero, L_copy_32_bytes); 1683 __ movl(rax, Address(from, qword_count, Address::times_8)); 1684 __ movl(Address(to, qword_count, Address::times_8), rax); 1685 __ jmp(L_copy_32_bytes); 1686 1687 // Copy trailing qwords 1688 __ BIND(L_copy_8_bytes); 1689 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 1690 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 1691 __ decrement(qword_count); 1692 __ jcc(Assembler::notZero, L_copy_8_bytes); 1693 1694 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); 1695 restore_arg_regs(); 1696 __ xorptr(rax, rax); // return 0 1697 __ leave(); // required for proper stackwalking of RuntimeStub frame 1698 __ ret(0); 1699 1700 // Copy in 32-bytes chunks 1701 copy_32_bytes_backward(from, to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes); 1702 1703 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); 1704 restore_arg_regs(); 1705 __ xorptr(rax, rax); // return 0 1706 __ leave(); // required for proper stackwalking of RuntimeStub frame 1707 __ ret(0); 1708 1709 return start; 1710 } 1711 1712 // Arguments: 1713 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1714 // ignored 1715 // is_oop - true => oop array, so generate store check code 1716 // name - stub name string 1717 // 1718 // Inputs: 1719 // c_rarg0 - source array address 1720 // c_rarg1 - destination array address 1721 // c_rarg2 - element count, treated as ssize_t, can be zero 1722 // 1723 // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let 1724 // the hardware handle it. The two dwords within qwords that span 1725 // cache line boundaries will still be loaded and stored atomicly. 1726 // 1727 // Side Effects: 1728 // disjoint_int_copy_entry is set to the no-overlap entry point 1729 // used by generate_conjoint_int_oop_copy(). 1730 // 1731 address generate_disjoint_int_oop_copy(bool aligned, bool is_oop, const char *name) { 1732 __ align(CodeEntryAlignment); 1733 StubCodeMark mark(this, "StubRoutines", name); 1734 address start = __ pc(); 1735 1736 Label L_copy_32_bytes, L_copy_8_bytes, L_copy_4_bytes, L_exit; 1737 const Register from = rdi; // source array address 1738 const Register to = rsi; // destination array address 1739 const Register count = rdx; // elements count 1740 const Register dword_count = rcx; 1741 const Register qword_count = count; 1742 const Register end_from = from; // source array end address 1743 const Register end_to = to; // destination array end address 1744 const Register saved_to = r11; // saved destination array address 1745 // End pointers are inclusive, and if count is not zero they point 1746 // to the last unit copied: end_to[0] := end_from[0] 1747 1748 __ enter(); // required for proper stackwalking of RuntimeStub frame 1749 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1750 1751 (is_oop ? disjoint_oop_copy_entry : disjoint_int_copy_entry) = __ pc(); 1752 1753 if (is_oop) { 1754 // no registers are destroyed by this call 1755 gen_write_ref_array_pre_barrier(/* dest */ c_rarg1, /* count */ c_rarg2); 1756 } 1757 1758 BLOCK_COMMENT("Entry:"); 1759 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1760 1761 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1762 // r9 and r10 may be used to save non-volatile registers 1763 1764 if (is_oop) { 1765 __ movq(saved_to, to); 1766 } 1767 1768 // 'from', 'to' and 'count' are now valid 1769 __ movptr(dword_count, count); 1770 __ shrptr(count, 1); // count => qword_count 1771 1772 // Copy from low to high addresses. Use 'to' as scratch. 1773 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1774 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1775 __ negptr(qword_count); 1776 __ jmp(L_copy_32_bytes); 1777 1778 // Copy trailing qwords 1779 __ BIND(L_copy_8_bytes); 1780 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1781 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1782 __ increment(qword_count); 1783 __ jcc(Assembler::notZero, L_copy_8_bytes); 1784 1785 // Check for and copy trailing dword 1786 __ BIND(L_copy_4_bytes); 1787 __ testl(dword_count, 1); // Only byte test since the value is 0 or 1 1788 __ jccb(Assembler::zero, L_exit); 1789 __ movl(rax, Address(end_from, 8)); 1790 __ movl(Address(end_to, 8), rax); 1791 1792 __ BIND(L_exit); 1793 if (is_oop) { 1794 __ leaq(end_to, Address(saved_to, dword_count, Address::times_4, -4)); 1795 gen_write_ref_array_post_barrier(saved_to, end_to, rax); 1796 } 1797 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); 1798 restore_arg_regs(); 1799 __ xorptr(rax, rax); // return 0 1800 __ leave(); // required for proper stackwalking of RuntimeStub frame 1801 __ ret(0); 1802 1803 // Copy 32-bytes chunks 1804 copy_32_bytes_forward(end_from, end_to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes); 1805 __ jmp(L_copy_4_bytes); 1806 1807 return start; 1808 } 1809 1810 // Arguments: 1811 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1812 // ignored 1813 // is_oop - true => oop array, so generate store check code 1814 // name - stub name string 1815 // 1816 // Inputs: 1817 // c_rarg0 - source array address 1818 // c_rarg1 - destination array address 1819 // c_rarg2 - element count, treated as ssize_t, can be zero 1820 // 1821 // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let 1822 // the hardware handle it. The two dwords within qwords that span 1823 // cache line boundaries will still be loaded and stored atomicly. 1824 // 1825 address generate_conjoint_int_oop_copy(bool aligned, bool is_oop, const char *name) { 1826 __ align(CodeEntryAlignment); 1827 StubCodeMark mark(this, "StubRoutines", name); 1828 address start = __ pc(); 1829 1830 Label L_copy_32_bytes, L_copy_8_bytes, L_copy_2_bytes, L_exit; 1831 const Register from = rdi; // source array address 1832 const Register to = rsi; // destination array address 1833 const Register count = rdx; // elements count 1834 const Register dword_count = rcx; 1835 const Register qword_count = count; 1836 1837 __ enter(); // required for proper stackwalking of RuntimeStub frame 1838 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1839 1840 if (is_oop) { 1841 // no registers are destroyed by this call 1842 gen_write_ref_array_pre_barrier(/* dest */ c_rarg1, /* count */ c_rarg2); 1843 } 1844 1845 (is_oop ? oop_copy_entry : int_copy_entry) = __ pc(); 1846 BLOCK_COMMENT("Entry:"); 1847 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1848 1849 array_overlap_test(is_oop ? disjoint_oop_copy_entry : disjoint_int_copy_entry, 1850 Address::times_4); 1851 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1852 // r9 and r10 may be used to save non-volatile registers 1853 1854 assert_clean_int(count, rax); // Make sure 'count' is clean int. 1855 // 'from', 'to' and 'count' are now valid 1856 __ movptr(dword_count, count); 1857 __ shrptr(count, 1); // count => qword_count 1858 1859 // Copy from high to low addresses. Use 'to' as scratch. 1860 1861 // Check for and copy trailing dword 1862 __ testl(dword_count, 1); 1863 __ jcc(Assembler::zero, L_copy_32_bytes); 1864 __ movl(rax, Address(from, dword_count, Address::times_4, -4)); 1865 __ movl(Address(to, dword_count, Address::times_4, -4), rax); 1866 __ jmp(L_copy_32_bytes); 1867 1868 // Copy trailing qwords 1869 __ BIND(L_copy_8_bytes); 1870 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 1871 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 1872 __ decrement(qword_count); 1873 __ jcc(Assembler::notZero, L_copy_8_bytes); 1874 1875 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); 1876 if (is_oop) { 1877 __ jmp(L_exit); 1878 } 1879 restore_arg_regs(); 1880 __ xorptr(rax, rax); // return 0 1881 __ leave(); // required for proper stackwalking of RuntimeStub frame 1882 __ ret(0); 1883 1884 // Copy in 32-bytes chunks 1885 copy_32_bytes_backward(from, to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes); 1886 1887 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); 1888 __ bind(L_exit); 1889 if (is_oop) { 1890 Register end_to = rdx; 1891 __ leaq(end_to, Address(to, dword_count, Address::times_4, -4)); 1892 gen_write_ref_array_post_barrier(to, end_to, rax); 1893 } 1894 restore_arg_regs(); 1895 __ xorptr(rax, rax); // return 0 1896 __ leave(); // required for proper stackwalking of RuntimeStub frame 1897 __ ret(0); 1898 1899 return start; 1900 } 1901 1902 // Arguments: 1903 // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes 1904 // ignored 1905 // is_oop - true => oop array, so generate store check code 1906 // name - stub name string 1907 // 1908 // Inputs: 1909 // c_rarg0 - source array address 1910 // c_rarg1 - destination array address 1911 // c_rarg2 - element count, treated as ssize_t, can be zero 1912 // 1913 // Side Effects: 1914 // disjoint_oop_copy_entry or disjoint_long_copy_entry is set to the 1915 // no-overlap entry point used by generate_conjoint_long_oop_copy(). 1916 // 1917 address generate_disjoint_long_oop_copy(bool aligned, bool is_oop, const char *name) { 1918 __ align(CodeEntryAlignment); 1919 StubCodeMark mark(this, "StubRoutines", name); 1920 address start = __ pc(); 1921 1922 Label L_copy_32_bytes, L_copy_8_bytes, L_exit; 1923 const Register from = rdi; // source array address 1924 const Register to = rsi; // destination array address 1925 const Register qword_count = rdx; // elements count 1926 const Register end_from = from; // source array end address 1927 const Register end_to = rcx; // destination array end address 1928 const Register saved_to = to; 1929 // End pointers are inclusive, and if count is not zero they point 1930 // to the last unit copied: end_to[0] := end_from[0] 1931 1932 __ enter(); // required for proper stackwalking of RuntimeStub frame 1933 // Save no-overlap entry point for generate_conjoint_long_oop_copy() 1934 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1935 1936 if (is_oop) { 1937 disjoint_oop_copy_entry = __ pc(); 1938 // no registers are destroyed by this call 1939 gen_write_ref_array_pre_barrier(/* dest */ c_rarg1, /* count */ c_rarg2); 1940 } else { 1941 disjoint_long_copy_entry = __ pc(); 1942 } 1943 BLOCK_COMMENT("Entry:"); 1944 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1945 1946 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1947 // r9 and r10 may be used to save non-volatile registers 1948 1949 // 'from', 'to' and 'qword_count' are now valid 1950 1951 // Copy from low to high addresses. Use 'to' as scratch. 1952 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1953 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1954 __ negptr(qword_count); 1955 __ jmp(L_copy_32_bytes); 1956 1957 // Copy trailing qwords 1958 __ BIND(L_copy_8_bytes); 1959 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1960 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1961 __ increment(qword_count); 1962 __ jcc(Assembler::notZero, L_copy_8_bytes); 1963 1964 if (is_oop) { 1965 __ jmp(L_exit); 1966 } else { 1967 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); 1968 restore_arg_regs(); 1969 __ xorptr(rax, rax); // return 0 1970 __ leave(); // required for proper stackwalking of RuntimeStub frame 1971 __ ret(0); 1972 } 1973 1974 // Copy 64-byte chunks 1975 copy_32_bytes_forward(end_from, end_to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes); 1976 1977 if (is_oop) { 1978 __ BIND(L_exit); 1979 gen_write_ref_array_post_barrier(saved_to, end_to, rax); 1980 inc_counter_np(SharedRuntime::_oop_array_copy_ctr); 1981 } else { 1982 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); 1983 } 1984 restore_arg_regs(); 1985 __ xorptr(rax, rax); // return 0 1986 __ leave(); // required for proper stackwalking of RuntimeStub frame 1987 __ ret(0); 1988 1989 return start; 1990 } 1991 1992 // Arguments: 1993 // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes 1994 // ignored 1995 // is_oop - true => oop array, so generate store check code 1996 // name - stub name string 1997 // 1998 // Inputs: 1999 // c_rarg0 - source array address 2000 // c_rarg1 - destination array address 2001 // c_rarg2 - element count, treated as ssize_t, can be zero 2002 // 2003 address generate_conjoint_long_oop_copy(bool aligned, bool is_oop, const char *name) { 2004 __ align(CodeEntryAlignment); 2005 StubCodeMark mark(this, "StubRoutines", name); 2006 address start = __ pc(); 2007 2008 Label L_copy_32_bytes, L_copy_8_bytes, L_exit; 2009 const Register from = rdi; // source array address 2010 const Register to = rsi; // destination array address 2011 const Register qword_count = rdx; // elements count 2012 const Register saved_count = rcx; 2013 2014 __ enter(); // required for proper stackwalking of RuntimeStub frame 2015 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 2016 2017 address disjoint_copy_entry = NULL; 2018 if (is_oop) { 2019 assert(!UseCompressedOops, "shouldn't be called for compressed oops"); 2020 disjoint_copy_entry = disjoint_oop_copy_entry; 2021 oop_copy_entry = __ pc(); 2022 array_overlap_test(disjoint_oop_copy_entry, Address::times_8); 2023 } else { 2024 disjoint_copy_entry = disjoint_long_copy_entry; 2025 long_copy_entry = __ pc(); 2026 array_overlap_test(disjoint_long_copy_entry, Address::times_8); 2027 } 2028 BLOCK_COMMENT("Entry:"); 2029 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2030 2031 array_overlap_test(disjoint_copy_entry, Address::times_8); 2032 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 2033 // r9 and r10 may be used to save non-volatile registers 2034 2035 // 'from', 'to' and 'qword_count' are now valid 2036 2037 if (is_oop) { 2038 // Save to and count for store barrier 2039 __ movptr(saved_count, qword_count); 2040 // No registers are destroyed by this call 2041 gen_write_ref_array_pre_barrier(to, saved_count); 2042 } 2043 2044 __ jmp(L_copy_32_bytes); 2045 2046 // Copy trailing qwords 2047 __ BIND(L_copy_8_bytes); 2048 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 2049 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 2050 __ decrement(qword_count); 2051 __ jcc(Assembler::notZero, L_copy_8_bytes); 2052 2053 if (is_oop) { 2054 __ jmp(L_exit); 2055 } else { 2056 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); 2057 restore_arg_regs(); 2058 __ xorptr(rax, rax); // return 0 2059 __ leave(); // required for proper stackwalking of RuntimeStub frame 2060 __ ret(0); 2061 } 2062 2063 // Copy in 32-bytes chunks 2064 copy_32_bytes_backward(from, to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes); 2065 2066 if (is_oop) { 2067 __ BIND(L_exit); 2068 __ lea(rcx, Address(to, saved_count, Address::times_8, -8)); 2069 gen_write_ref_array_post_barrier(to, rcx, rax); 2070 inc_counter_np(SharedRuntime::_oop_array_copy_ctr); 2071 } else { 2072 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); 2073 } 2074 restore_arg_regs(); 2075 __ xorptr(rax, rax); // return 0 2076 __ leave(); // required for proper stackwalking of RuntimeStub frame 2077 __ ret(0); 2078 2079 return start; 2080 } 2081 2082 2083 // Helper for generating a dynamic type check. 2084 // Smashes no registers. 2085 void generate_type_check(Register sub_klass, 2086 Register super_check_offset, 2087 Register super_klass, 2088 Label& L_success) { 2089 assert_different_registers(sub_klass, super_check_offset, super_klass); 2090 2091 BLOCK_COMMENT("type_check:"); 2092 2093 Label L_miss; 2094 2095 __ check_klass_subtype_fast_path(sub_klass, super_klass, noreg, &L_success, &L_miss, NULL, 2096 super_check_offset); 2097 __ check_klass_subtype_slow_path(sub_klass, super_klass, noreg, noreg, &L_success, NULL); 2098 2099 // Fall through on failure! 2100 __ BIND(L_miss); 2101 } 2102 2103 // 2104 // Generate checkcasting array copy stub 2105 // 2106 // Input: 2107 // c_rarg0 - source array address 2108 // c_rarg1 - destination array address 2109 // c_rarg2 - element count, treated as ssize_t, can be zero 2110 // c_rarg3 - size_t ckoff (super_check_offset) 2111 // not Win64 2112 // c_rarg4 - oop ckval (super_klass) 2113 // Win64 2114 // rsp+40 - oop ckval (super_klass) 2115 // 2116 // Output: 2117 // rax == 0 - success 2118 // rax == -1^K - failure, where K is partial transfer count 2119 // 2120 address generate_checkcast_copy(const char *name) { 2121 2122 Label L_load_element, L_store_element, L_do_card_marks, L_done; 2123 2124 // Input registers (after setup_arg_regs) 2125 const Register from = rdi; // source array address 2126 const Register to = rsi; // destination array address 2127 const Register length = rdx; // elements count 2128 const Register ckoff = rcx; // super_check_offset 2129 const Register ckval = r8; // super_klass 2130 2131 // Registers used as temps (r13, r14 are save-on-entry) 2132 const Register end_from = from; // source array end address 2133 const Register end_to = r13; // destination array end address 2134 const Register count = rdx; // -(count_remaining) 2135 const Register r14_length = r14; // saved copy of length 2136 // End pointers are inclusive, and if length is not zero they point 2137 // to the last unit copied: end_to[0] := end_from[0] 2138 2139 const Register rax_oop = rax; // actual oop copied 2140 const Register r11_klass = r11; // oop._klass 2141 2142 //--------------------------------------------------------------- 2143 // Assembler stub will be used for this call to arraycopy 2144 // if the two arrays are subtypes of Object[] but the 2145 // destination array type is not equal to or a supertype 2146 // of the source type. Each element must be separately 2147 // checked. 2148 2149 __ align(CodeEntryAlignment); 2150 StubCodeMark mark(this, "StubRoutines", name); 2151 address start = __ pc(); 2152 2153 __ enter(); // required for proper stackwalking of RuntimeStub frame 2154 2155 checkcast_copy_entry = __ pc(); 2156 BLOCK_COMMENT("Entry:"); 2157 2158 #ifdef ASSERT 2159 // caller guarantees that the arrays really are different 2160 // otherwise, we would have to make conjoint checks 2161 { Label L; 2162 array_overlap_test(L, TIMES_OOP); 2163 __ stop("checkcast_copy within a single array"); 2164 __ bind(L); 2165 } 2166 #endif //ASSERT 2167 2168 // allocate spill slots for r13, r14 2169 enum { 2170 saved_r13_offset, 2171 saved_r14_offset, 2172 saved_rbp_offset, 2173 saved_rip_offset, 2174 saved_rarg0_offset 2175 }; 2176 __ subptr(rsp, saved_rbp_offset * wordSize); 2177 __ movptr(Address(rsp, saved_r13_offset * wordSize), r13); 2178 __ movptr(Address(rsp, saved_r14_offset * wordSize), r14); 2179 setup_arg_regs(4); // from => rdi, to => rsi, length => rdx 2180 // ckoff => rcx, ckval => r8 2181 // r9 and r10 may be used to save non-volatile registers 2182 #ifdef _WIN64 2183 // last argument (#4) is on stack on Win64 2184 const int ckval_offset = saved_rarg0_offset + 4; 2185 __ movptr(ckval, Address(rsp, ckval_offset * wordSize)); 2186 #endif 2187 2188 // check that int operands are properly extended to size_t 2189 assert_clean_int(length, rax); 2190 assert_clean_int(ckoff, rax); 2191 2192 #ifdef ASSERT 2193 BLOCK_COMMENT("assert consistent ckoff/ckval"); 2194 // The ckoff and ckval must be mutually consistent, 2195 // even though caller generates both. 2196 { Label L; 2197 int sco_offset = (klassOopDesc::header_size() * HeapWordSize + 2198 Klass::super_check_offset_offset_in_bytes()); 2199 __ cmpl(ckoff, Address(ckval, sco_offset)); 2200 __ jcc(Assembler::equal, L); 2201 __ stop("super_check_offset inconsistent"); 2202 __ bind(L); 2203 } 2204 #endif //ASSERT 2205 2206 // Loop-invariant addresses. They are exclusive end pointers. 2207 Address end_from_addr(from, length, TIMES_OOP, 0); 2208 Address end_to_addr(to, length, TIMES_OOP, 0); 2209 // Loop-variant addresses. They assume post-incremented count < 0. 2210 Address from_element_addr(end_from, count, TIMES_OOP, 0); 2211 Address to_element_addr(end_to, count, TIMES_OOP, 0); 2212 2213 gen_write_ref_array_pre_barrier(to, count); 2214 2215 // Copy from low to high addresses, indexed from the end of each array. 2216 __ lea(end_from, end_from_addr); 2217 __ lea(end_to, end_to_addr); 2218 __ movptr(r14_length, length); // save a copy of the length 2219 assert(length == count, ""); // else fix next line: 2220 __ negptr(count); // negate and test the length 2221 __ jcc(Assembler::notZero, L_load_element); 2222 2223 // Empty array: Nothing to do. 2224 __ xorptr(rax, rax); // return 0 on (trivial) success 2225 __ jmp(L_done); 2226 2227 // ======== begin loop ======== 2228 // (Loop is rotated; its entry is L_load_element.) 2229 // Loop control: 2230 // for (count = -count; count != 0; count++) 2231 // Base pointers src, dst are biased by 8*(count-1),to last element. 2232 __ align(16); 2233 2234 __ BIND(L_store_element); 2235 __ store_heap_oop(to_element_addr, rax_oop); // store the oop 2236 __ increment(count); // increment the count toward zero 2237 __ jcc(Assembler::zero, L_do_card_marks); 2238 2239 // ======== loop entry is here ======== 2240 __ BIND(L_load_element); 2241 __ load_heap_oop(rax_oop, from_element_addr); // load the oop 2242 __ testptr(rax_oop, rax_oop); 2243 __ jcc(Assembler::zero, L_store_element); 2244 2245 __ load_klass(r11_klass, rax_oop);// query the object klass 2246 generate_type_check(r11_klass, ckoff, ckval, L_store_element); 2247 // ======== end loop ======== 2248 2249 // It was a real error; we must depend on the caller to finish the job. 2250 // Register rdx = -1 * number of *remaining* oops, r14 = *total* oops. 2251 // Emit GC store barriers for the oops we have copied (r14 + rdx), 2252 // and report their number to the caller. 2253 assert_different_registers(rax, r14_length, count, to, end_to, rcx); 2254 __ lea(end_to, to_element_addr); 2255 __ addptr(end_to, -heapOopSize); // make an inclusive end pointer 2256 gen_write_ref_array_post_barrier(to, end_to, rscratch1); 2257 __ movptr(rax, r14_length); // original oops 2258 __ addptr(rax, count); // K = (original - remaining) oops 2259 __ notptr(rax); // report (-1^K) to caller 2260 __ jmp(L_done); 2261 2262 // Come here on success only. 2263 __ BIND(L_do_card_marks); 2264 __ addptr(end_to, -heapOopSize); // make an inclusive end pointer 2265 gen_write_ref_array_post_barrier(to, end_to, rscratch1); 2266 __ xorptr(rax, rax); // return 0 on success 2267 2268 // Common exit point (success or failure). 2269 __ BIND(L_done); 2270 __ movptr(r13, Address(rsp, saved_r13_offset * wordSize)); 2271 __ movptr(r14, Address(rsp, saved_r14_offset * wordSize)); 2272 inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr); 2273 restore_arg_regs(); 2274 __ leave(); // required for proper stackwalking of RuntimeStub frame 2275 __ ret(0); 2276 2277 return start; 2278 } 2279 2280 // 2281 // Generate 'unsafe' array copy stub 2282 // Though just as safe as the other stubs, it takes an unscaled 2283 // size_t argument instead of an element count. 2284 // 2285 // Input: 2286 // c_rarg0 - source array address 2287 // c_rarg1 - destination array address 2288 // c_rarg2 - byte count, treated as ssize_t, can be zero 2289 // 2290 // Examines the alignment of the operands and dispatches 2291 // to a long, int, short, or byte copy loop. 2292 // 2293 address generate_unsafe_copy(const char *name) { 2294 2295 Label L_long_aligned, L_int_aligned, L_short_aligned; 2296 2297 // Input registers (before setup_arg_regs) 2298 const Register from = c_rarg0; // source array address 2299 const Register to = c_rarg1; // destination array address 2300 const Register size = c_rarg2; // byte count (size_t) 2301 2302 // Register used as a temp 2303 const Register bits = rax; // test copy of low bits 2304 2305 __ align(CodeEntryAlignment); 2306 StubCodeMark mark(this, "StubRoutines", name); 2307 address start = __ pc(); 2308 2309 __ enter(); // required for proper stackwalking of RuntimeStub frame 2310 2311 // bump this on entry, not on exit: 2312 inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr); 2313 2314 __ mov(bits, from); 2315 __ orptr(bits, to); 2316 __ orptr(bits, size); 2317 2318 __ testb(bits, BytesPerLong-1); 2319 __ jccb(Assembler::zero, L_long_aligned); 2320 2321 __ testb(bits, BytesPerInt-1); 2322 __ jccb(Assembler::zero, L_int_aligned); 2323 2324 __ testb(bits, BytesPerShort-1); 2325 __ jump_cc(Assembler::notZero, RuntimeAddress(byte_copy_entry)); 2326 2327 __ BIND(L_short_aligned); 2328 __ shrptr(size, LogBytesPerShort); // size => short_count 2329 __ jump(RuntimeAddress(short_copy_entry)); 2330 2331 __ BIND(L_int_aligned); 2332 __ shrptr(size, LogBytesPerInt); // size => int_count 2333 __ jump(RuntimeAddress(int_copy_entry)); 2334 2335 __ BIND(L_long_aligned); 2336 __ shrptr(size, LogBytesPerLong); // size => qword_count 2337 __ jump(RuntimeAddress(long_copy_entry)); 2338 2339 return start; 2340 } 2341 2342 // Perform range checks on the proposed arraycopy. 2343 // Kills temp, but nothing else. 2344 // Also, clean the sign bits of src_pos and dst_pos. 2345 void arraycopy_range_checks(Register src, // source array oop (c_rarg0) 2346 Register src_pos, // source position (c_rarg1) 2347 Register dst, // destination array oo (c_rarg2) 2348 Register dst_pos, // destination position (c_rarg3) 2349 Register length, 2350 Register temp, 2351 Label& L_failed) { 2352 BLOCK_COMMENT("arraycopy_range_checks:"); 2353 2354 // if (src_pos + length > arrayOop(src)->length()) FAIL; 2355 __ movl(temp, length); 2356 __ addl(temp, src_pos); // src_pos + length 2357 __ cmpl(temp, Address(src, arrayOopDesc::length_offset_in_bytes())); 2358 __ jcc(Assembler::above, L_failed); 2359 2360 // if (dst_pos + length > arrayOop(dst)->length()) FAIL; 2361 __ movl(temp, length); 2362 __ addl(temp, dst_pos); // dst_pos + length 2363 __ cmpl(temp, Address(dst, arrayOopDesc::length_offset_in_bytes())); 2364 __ jcc(Assembler::above, L_failed); 2365 2366 // Have to clean up high 32-bits of 'src_pos' and 'dst_pos'. 2367 // Move with sign extension can be used since they are positive. 2368 __ movslq(src_pos, src_pos); 2369 __ movslq(dst_pos, dst_pos); 2370 2371 BLOCK_COMMENT("arraycopy_range_checks done"); 2372 } 2373 2374 // 2375 // Generate generic array copy stubs 2376 // 2377 // Input: 2378 // c_rarg0 - src oop 2379 // c_rarg1 - src_pos (32-bits) 2380 // c_rarg2 - dst oop 2381 // c_rarg3 - dst_pos (32-bits) 2382 // not Win64 2383 // c_rarg4 - element count (32-bits) 2384 // Win64 2385 // rsp+40 - element count (32-bits) 2386 // 2387 // Output: 2388 // rax == 0 - success 2389 // rax == -1^K - failure, where K is partial transfer count 2390 // 2391 address generate_generic_copy(const char *name) { 2392 2393 Label L_failed, L_failed_0, L_objArray; 2394 Label L_copy_bytes, L_copy_shorts, L_copy_ints, L_copy_longs; 2395 2396 // Input registers 2397 const Register src = c_rarg0; // source array oop 2398 const Register src_pos = c_rarg1; // source position 2399 const Register dst = c_rarg2; // destination array oop 2400 const Register dst_pos = c_rarg3; // destination position 2401 // elements count is on stack on Win64 2402 #ifdef _WIN64 2403 #define C_RARG4 Address(rsp, 6 * wordSize) 2404 #else 2405 #define C_RARG4 c_rarg4 2406 #endif 2407 2408 { int modulus = CodeEntryAlignment; 2409 int target = modulus - 5; // 5 = sizeof jmp(L_failed) 2410 int advance = target - (__ offset() % modulus); 2411 if (advance < 0) advance += modulus; 2412 if (advance > 0) __ nop(advance); 2413 } 2414 StubCodeMark mark(this, "StubRoutines", name); 2415 2416 // Short-hop target to L_failed. Makes for denser prologue code. 2417 __ BIND(L_failed_0); 2418 __ jmp(L_failed); 2419 assert(__ offset() % CodeEntryAlignment == 0, "no further alignment needed"); 2420 2421 __ align(CodeEntryAlignment); 2422 address start = __ pc(); 2423 2424 __ enter(); // required for proper stackwalking of RuntimeStub frame 2425 2426 // bump this on entry, not on exit: 2427 inc_counter_np(SharedRuntime::_generic_array_copy_ctr); 2428 2429 //----------------------------------------------------------------------- 2430 // Assembler stub will be used for this call to arraycopy 2431 // if the following conditions are met: 2432 // 2433 // (1) src and dst must not be null. 2434 // (2) src_pos must not be negative. 2435 // (3) dst_pos must not be negative. 2436 // (4) length must not be negative. 2437 // (5) src klass and dst klass should be the same and not NULL. 2438 // (6) src and dst should be arrays. 2439 // (7) src_pos + length must not exceed length of src. 2440 // (8) dst_pos + length must not exceed length of dst. 2441 // 2442 2443 // if (src == NULL) return -1; 2444 __ testptr(src, src); // src oop 2445 size_t j1off = __ offset(); 2446 __ jccb(Assembler::zero, L_failed_0); 2447 2448 // if (src_pos < 0) return -1; 2449 __ testl(src_pos, src_pos); // src_pos (32-bits) 2450 __ jccb(Assembler::negative, L_failed_0); 2451 2452 // if (dst == NULL) return -1; 2453 __ testptr(dst, dst); // dst oop 2454 __ jccb(Assembler::zero, L_failed_0); 2455 2456 // if (dst_pos < 0) return -1; 2457 __ testl(dst_pos, dst_pos); // dst_pos (32-bits) 2458 size_t j4off = __ offset(); 2459 __ jccb(Assembler::negative, L_failed_0); 2460 2461 // The first four tests are very dense code, 2462 // but not quite dense enough to put four 2463 // jumps in a 16-byte instruction fetch buffer. 2464 // That's good, because some branch predicters 2465 // do not like jumps so close together. 2466 // Make sure of this. 2467 guarantee(((j1off ^ j4off) & ~15) != 0, "I$ line of 1st & 4th jumps"); 2468 2469 // registers used as temp 2470 const Register r11_length = r11; // elements count to copy 2471 const Register r10_src_klass = r10; // array klass 2472 const Register r9_dst_klass = r9; // dest array klass 2473 2474 // if (length < 0) return -1; 2475 __ movl(r11_length, C_RARG4); // length (elements count, 32-bits value) 2476 __ testl(r11_length, r11_length); 2477 __ jccb(Assembler::negative, L_failed_0); 2478 2479 __ load_klass(r10_src_klass, src); 2480 #ifdef ASSERT 2481 // assert(src->klass() != NULL); 2482 BLOCK_COMMENT("assert klasses not null"); 2483 { Label L1, L2; 2484 __ testptr(r10_src_klass, r10_src_klass); 2485 __ jcc(Assembler::notZero, L2); // it is broken if klass is NULL 2486 __ bind(L1); 2487 __ stop("broken null klass"); 2488 __ bind(L2); 2489 __ load_klass(r9_dst_klass, dst); 2490 __ cmpq(r9_dst_klass, 0); 2491 __ jcc(Assembler::equal, L1); // this would be broken also 2492 BLOCK_COMMENT("assert done"); 2493 } 2494 #endif 2495 2496 // Load layout helper (32-bits) 2497 // 2498 // |array_tag| | header_size | element_type | |log2_element_size| 2499 // 32 30 24 16 8 2 0 2500 // 2501 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0 2502 // 2503 2504 int lh_offset = klassOopDesc::header_size() * HeapWordSize + 2505 Klass::layout_helper_offset_in_bytes(); 2506 2507 const Register rax_lh = rax; // layout helper 2508 2509 __ movl(rax_lh, Address(r10_src_klass, lh_offset)); 2510 2511 // Handle objArrays completely differently... 2512 jint objArray_lh = Klass::array_layout_helper(T_OBJECT); 2513 __ cmpl(rax_lh, objArray_lh); 2514 __ jcc(Assembler::equal, L_objArray); 2515 2516 // if (src->klass() != dst->klass()) return -1; 2517 __ load_klass(r9_dst_klass, dst); 2518 __ cmpq(r10_src_klass, r9_dst_klass); 2519 __ jcc(Assembler::notEqual, L_failed); 2520 2521 // if (!src->is_Array()) return -1; 2522 __ cmpl(rax_lh, Klass::_lh_neutral_value); 2523 __ jcc(Assembler::greaterEqual, L_failed); 2524 2525 // At this point, it is known to be a typeArray (array_tag 0x3). 2526 #ifdef ASSERT 2527 { Label L; 2528 __ cmpl(rax_lh, (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift)); 2529 __ jcc(Assembler::greaterEqual, L); 2530 __ stop("must be a primitive array"); 2531 __ bind(L); 2532 } 2533 #endif 2534 2535 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2536 r10, L_failed); 2537 2538 // typeArrayKlass 2539 // 2540 // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize); 2541 // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize); 2542 // 2543 2544 const Register r10_offset = r10; // array offset 2545 const Register rax_elsize = rax_lh; // element size 2546 2547 __ movl(r10_offset, rax_lh); 2548 __ shrl(r10_offset, Klass::_lh_header_size_shift); 2549 __ andptr(r10_offset, Klass::_lh_header_size_mask); // array_offset 2550 __ addptr(src, r10_offset); // src array offset 2551 __ addptr(dst, r10_offset); // dst array offset 2552 BLOCK_COMMENT("choose copy loop based on element size"); 2553 __ andl(rax_lh, Klass::_lh_log2_element_size_mask); // rax_lh -> rax_elsize 2554 2555 // next registers should be set before the jump to corresponding stub 2556 const Register from = c_rarg0; // source array address 2557 const Register to = c_rarg1; // destination array address 2558 const Register count = c_rarg2; // elements count 2559 2560 // 'from', 'to', 'count' registers should be set in such order 2561 // since they are the same as 'src', 'src_pos', 'dst'. 2562 2563 __ BIND(L_copy_bytes); 2564 __ cmpl(rax_elsize, 0); 2565 __ jccb(Assembler::notEqual, L_copy_shorts); 2566 __ lea(from, Address(src, src_pos, Address::times_1, 0));// src_addr 2567 __ lea(to, Address(dst, dst_pos, Address::times_1, 0));// dst_addr 2568 __ movl2ptr(count, r11_length); // length 2569 __ jump(RuntimeAddress(byte_copy_entry)); 2570 2571 __ BIND(L_copy_shorts); 2572 __ cmpl(rax_elsize, LogBytesPerShort); 2573 __ jccb(Assembler::notEqual, L_copy_ints); 2574 __ lea(from, Address(src, src_pos, Address::times_2, 0));// src_addr 2575 __ lea(to, Address(dst, dst_pos, Address::times_2, 0));// dst_addr 2576 __ movl2ptr(count, r11_length); // length 2577 __ jump(RuntimeAddress(short_copy_entry)); 2578 2579 __ BIND(L_copy_ints); 2580 __ cmpl(rax_elsize, LogBytesPerInt); 2581 __ jccb(Assembler::notEqual, L_copy_longs); 2582 __ lea(from, Address(src, src_pos, Address::times_4, 0));// src_addr 2583 __ lea(to, Address(dst, dst_pos, Address::times_4, 0));// dst_addr 2584 __ movl2ptr(count, r11_length); // length 2585 __ jump(RuntimeAddress(int_copy_entry)); 2586 2587 __ BIND(L_copy_longs); 2588 #ifdef ASSERT 2589 { Label L; 2590 __ cmpl(rax_elsize, LogBytesPerLong); 2591 __ jcc(Assembler::equal, L); 2592 __ stop("must be long copy, but elsize is wrong"); 2593 __ bind(L); 2594 } 2595 #endif 2596 __ lea(from, Address(src, src_pos, Address::times_8, 0));// src_addr 2597 __ lea(to, Address(dst, dst_pos, Address::times_8, 0));// dst_addr 2598 __ movl2ptr(count, r11_length); // length 2599 __ jump(RuntimeAddress(long_copy_entry)); 2600 2601 // objArrayKlass 2602 __ BIND(L_objArray); 2603 // live at this point: r10_src_klass, src[_pos], dst[_pos] 2604 2605 Label L_plain_copy, L_checkcast_copy; 2606 // test array classes for subtyping 2607 __ load_klass(r9_dst_klass, dst); 2608 __ cmpq(r10_src_klass, r9_dst_klass); // usual case is exact equality 2609 __ jcc(Assembler::notEqual, L_checkcast_copy); 2610 2611 // Identically typed arrays can be copied without element-wise checks. 2612 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2613 r10, L_failed); 2614 2615 __ lea(from, Address(src, src_pos, TIMES_OOP, 2616 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // src_addr 2617 __ lea(to, Address(dst, dst_pos, TIMES_OOP, 2618 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // dst_addr 2619 __ movl2ptr(count, r11_length); // length 2620 __ BIND(L_plain_copy); 2621 __ jump(RuntimeAddress(oop_copy_entry)); 2622 2623 __ BIND(L_checkcast_copy); 2624 // live at this point: r10_src_klass, !r11_length 2625 { 2626 // assert(r11_length == C_RARG4); // will reload from here 2627 Register r11_dst_klass = r11; 2628 __ load_klass(r11_dst_klass, dst); 2629 2630 // Before looking at dst.length, make sure dst is also an objArray. 2631 __ cmpl(Address(r11_dst_klass, lh_offset), objArray_lh); 2632 __ jcc(Assembler::notEqual, L_failed); 2633 2634 // It is safe to examine both src.length and dst.length. 2635 #ifndef _WIN64 2636 arraycopy_range_checks(src, src_pos, dst, dst_pos, C_RARG4, 2637 rax, L_failed); 2638 #else 2639 __ movl(r11_length, C_RARG4); // reload 2640 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2641 rax, L_failed); 2642 __ load_klass(r11_dst_klass, dst); // reload 2643 #endif 2644 2645 // Marshal the base address arguments now, freeing registers. 2646 __ lea(from, Address(src, src_pos, TIMES_OOP, 2647 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 2648 __ lea(to, Address(dst, dst_pos, TIMES_OOP, 2649 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 2650 __ movl(count, C_RARG4); // length (reloaded) 2651 Register sco_temp = c_rarg3; // this register is free now 2652 assert_different_registers(from, to, count, sco_temp, 2653 r11_dst_klass, r10_src_klass); 2654 assert_clean_int(count, sco_temp); 2655 2656 // Generate the type check. 2657 int sco_offset = (klassOopDesc::header_size() * HeapWordSize + 2658 Klass::super_check_offset_offset_in_bytes()); 2659 __ movl(sco_temp, Address(r11_dst_klass, sco_offset)); 2660 assert_clean_int(sco_temp, rax); 2661 generate_type_check(r10_src_klass, sco_temp, r11_dst_klass, L_plain_copy); 2662 2663 // Fetch destination element klass from the objArrayKlass header. 2664 int ek_offset = (klassOopDesc::header_size() * HeapWordSize + 2665 objArrayKlass::element_klass_offset_in_bytes()); 2666 __ movptr(r11_dst_klass, Address(r11_dst_klass, ek_offset)); 2667 __ movl(sco_temp, Address(r11_dst_klass, sco_offset)); 2668 assert_clean_int(sco_temp, rax); 2669 2670 // the checkcast_copy loop needs two extra arguments: 2671 assert(c_rarg3 == sco_temp, "#3 already in place"); 2672 __ movptr(C_RARG4, r11_dst_klass); // dst.klass.element_klass 2673 __ jump(RuntimeAddress(checkcast_copy_entry)); 2674 } 2675 2676 __ BIND(L_failed); 2677 __ xorptr(rax, rax); 2678 __ notptr(rax); // return -1 2679 __ leave(); // required for proper stackwalking of RuntimeStub frame 2680 __ ret(0); 2681 2682 return start; 2683 } 2684 2685 #undef length_arg 2686 2687 void generate_arraycopy_stubs() { 2688 // Call the conjoint generation methods immediately after 2689 // the disjoint ones so that short branches from the former 2690 // to the latter can be generated. 2691 StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, "jbyte_disjoint_arraycopy"); 2692 StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, "jbyte_arraycopy"); 2693 2694 StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, "jshort_disjoint_arraycopy"); 2695 StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, "jshort_arraycopy"); 2696 2697 StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, false, "jint_disjoint_arraycopy"); 2698 StubRoutines::_jint_arraycopy = generate_conjoint_int_oop_copy(false, false, "jint_arraycopy"); 2699 2700 StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, false, "jlong_disjoint_arraycopy"); 2701 StubRoutines::_jlong_arraycopy = generate_conjoint_long_oop_copy(false, false, "jlong_arraycopy"); 2702 2703 2704 if (UseCompressedOops) { 2705 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, true, "oop_disjoint_arraycopy"); 2706 StubRoutines::_oop_arraycopy = generate_conjoint_int_oop_copy(false, true, "oop_arraycopy"); 2707 } else { 2708 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, true, "oop_disjoint_arraycopy"); 2709 StubRoutines::_oop_arraycopy = generate_conjoint_long_oop_copy(false, true, "oop_arraycopy"); 2710 } 2711 2712 StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy"); 2713 StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy"); 2714 StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy"); 2715 2716 // We don't generate specialized code for HeapWord-aligned source 2717 // arrays, so just use the code we've already generated 2718 StubRoutines::_arrayof_jbyte_disjoint_arraycopy = StubRoutines::_jbyte_disjoint_arraycopy; 2719 StubRoutines::_arrayof_jbyte_arraycopy = StubRoutines::_jbyte_arraycopy; 2720 2721 StubRoutines::_arrayof_jshort_disjoint_arraycopy = StubRoutines::_jshort_disjoint_arraycopy; 2722 StubRoutines::_arrayof_jshort_arraycopy = StubRoutines::_jshort_arraycopy; 2723 2724 StubRoutines::_arrayof_jint_disjoint_arraycopy = StubRoutines::_jint_disjoint_arraycopy; 2725 StubRoutines::_arrayof_jint_arraycopy = StubRoutines::_jint_arraycopy; 2726 2727 StubRoutines::_arrayof_jlong_disjoint_arraycopy = StubRoutines::_jlong_disjoint_arraycopy; 2728 StubRoutines::_arrayof_jlong_arraycopy = StubRoutines::_jlong_arraycopy; 2729 2730 StubRoutines::_arrayof_oop_disjoint_arraycopy = StubRoutines::_oop_disjoint_arraycopy; 2731 StubRoutines::_arrayof_oop_arraycopy = StubRoutines::_oop_arraycopy; 2732 } 2733 2734 void generate_math_stubs() { 2735 { 2736 StubCodeMark mark(this, "StubRoutines", "log"); 2737 StubRoutines::_intrinsic_log = (double (*)(double)) __ pc(); 2738 2739 __ subq(rsp, 8); 2740 __ movdbl(Address(rsp, 0), xmm0); 2741 __ fld_d(Address(rsp, 0)); 2742 __ flog(); 2743 __ fstp_d(Address(rsp, 0)); 2744 __ movdbl(xmm0, Address(rsp, 0)); 2745 __ addq(rsp, 8); 2746 __ ret(0); 2747 } 2748 { 2749 StubCodeMark mark(this, "StubRoutines", "log10"); 2750 StubRoutines::_intrinsic_log10 = (double (*)(double)) __ pc(); 2751 2752 __ subq(rsp, 8); 2753 __ movdbl(Address(rsp, 0), xmm0); 2754 __ fld_d(Address(rsp, 0)); 2755 __ flog10(); 2756 __ fstp_d(Address(rsp, 0)); 2757 __ movdbl(xmm0, Address(rsp, 0)); 2758 __ addq(rsp, 8); 2759 __ ret(0); 2760 } 2761 { 2762 StubCodeMark mark(this, "StubRoutines", "sin"); 2763 StubRoutines::_intrinsic_sin = (double (*)(double)) __ pc(); 2764 2765 __ subq(rsp, 8); 2766 __ movdbl(Address(rsp, 0), xmm0); 2767 __ fld_d(Address(rsp, 0)); 2768 __ trigfunc('s'); 2769 __ fstp_d(Address(rsp, 0)); 2770 __ movdbl(xmm0, Address(rsp, 0)); 2771 __ addq(rsp, 8); 2772 __ ret(0); 2773 } 2774 { 2775 StubCodeMark mark(this, "StubRoutines", "cos"); 2776 StubRoutines::_intrinsic_cos = (double (*)(double)) __ pc(); 2777 2778 __ subq(rsp, 8); 2779 __ movdbl(Address(rsp, 0), xmm0); 2780 __ fld_d(Address(rsp, 0)); 2781 __ trigfunc('c'); 2782 __ fstp_d(Address(rsp, 0)); 2783 __ movdbl(xmm0, Address(rsp, 0)); 2784 __ addq(rsp, 8); 2785 __ ret(0); 2786 } 2787 { 2788 StubCodeMark mark(this, "StubRoutines", "tan"); 2789 StubRoutines::_intrinsic_tan = (double (*)(double)) __ pc(); 2790 2791 __ subq(rsp, 8); 2792 __ movdbl(Address(rsp, 0), xmm0); 2793 __ fld_d(Address(rsp, 0)); 2794 __ trigfunc('t'); 2795 __ fstp_d(Address(rsp, 0)); 2796 __ movdbl(xmm0, Address(rsp, 0)); 2797 __ addq(rsp, 8); 2798 __ ret(0); 2799 } 2800 2801 // The intrinsic version of these seem to return the same value as 2802 // the strict version. 2803 StubRoutines::_intrinsic_exp = SharedRuntime::dexp; 2804 StubRoutines::_intrinsic_pow = SharedRuntime::dpow; 2805 } 2806 2807 #undef __ 2808 #define __ masm-> 2809 2810 // Continuation point for throwing of implicit exceptions that are 2811 // not handled in the current activation. Fabricates an exception 2812 // oop and initiates normal exception dispatching in this 2813 // frame. Since we need to preserve callee-saved values (currently 2814 // only for C2, but done for C1 as well) we need a callee-saved oop 2815 // map and therefore have to make these stubs into RuntimeStubs 2816 // rather than BufferBlobs. If the compiler needs all registers to 2817 // be preserved between the fault point and the exception handler 2818 // then it must assume responsibility for that in 2819 // AbstractCompiler::continuation_for_implicit_null_exception or 2820 // continuation_for_implicit_division_by_zero_exception. All other 2821 // implicit exceptions (e.g., NullPointerException or 2822 // AbstractMethodError on entry) are either at call sites or 2823 // otherwise assume that stack unwinding will be initiated, so 2824 // caller saved registers were assumed volatile in the compiler. 2825 address generate_throw_exception(const char* name, 2826 address runtime_entry, 2827 bool restore_saved_exception_pc) { 2828 // Information about frame layout at time of blocking runtime call. 2829 // Note that we only have to preserve callee-saved registers since 2830 // the compilers are responsible for supplying a continuation point 2831 // if they expect all registers to be preserved. 2832 enum layout { 2833 rbp_off = frame::arg_reg_save_area_bytes/BytesPerInt, 2834 rbp_off2, 2835 return_off, 2836 return_off2, 2837 framesize // inclusive of return address 2838 }; 2839 2840 int insts_size = 512; 2841 int locs_size = 64; 2842 2843 CodeBuffer code(name, insts_size, locs_size); 2844 OopMapSet* oop_maps = new OopMapSet(); 2845 MacroAssembler* masm = new MacroAssembler(&code); 2846 2847 address start = __ pc(); 2848 2849 // This is an inlined and slightly modified version of call_VM 2850 // which has the ability to fetch the return PC out of 2851 // thread-local storage and also sets up last_Java_sp slightly 2852 // differently than the real call_VM 2853 if (restore_saved_exception_pc) { 2854 __ movptr(rax, 2855 Address(r15_thread, 2856 in_bytes(JavaThread::saved_exception_pc_offset()))); 2857 __ push(rax); 2858 } 2859 2860 __ enter(); // required for proper stackwalking of RuntimeStub frame 2861 2862 assert(is_even(framesize/2), "sp not 16-byte aligned"); 2863 2864 // return address and rbp are already in place 2865 __ subptr(rsp, (framesize-4) << LogBytesPerInt); // prolog 2866 2867 int frame_complete = __ pc() - start; 2868 2869 // Set up last_Java_sp and last_Java_fp 2870 __ set_last_Java_frame(rsp, rbp, NULL); 2871 2872 // Call runtime 2873 __ movptr(c_rarg0, r15_thread); 2874 BLOCK_COMMENT("call runtime_entry"); 2875 __ call(RuntimeAddress(runtime_entry)); 2876 2877 // Generate oop map 2878 OopMap* map = new OopMap(framesize, 0); 2879 2880 oop_maps->add_gc_map(__ pc() - start, map); 2881 2882 __ reset_last_Java_frame(true, false); 2883 2884 __ leave(); // required for proper stackwalking of RuntimeStub frame 2885 2886 // check for pending exceptions 2887 #ifdef ASSERT 2888 Label L; 2889 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), 2890 (int32_t) NULL_WORD); 2891 __ jcc(Assembler::notEqual, L); 2892 __ should_not_reach_here(); 2893 __ bind(L); 2894 #endif // ASSERT 2895 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 2896 2897 2898 // codeBlob framesize is in words (not VMRegImpl::slot_size) 2899 RuntimeStub* stub = 2900 RuntimeStub::new_runtime_stub(name, 2901 &code, 2902 frame_complete, 2903 (framesize >> (LogBytesPerWord - LogBytesPerInt)), 2904 oop_maps, false); 2905 return stub->entry_point(); 2906 } 2907 2908 // Initialization 2909 void generate_initial() { 2910 // Generates all stubs and initializes the entry points 2911 2912 // This platform-specific stub is needed by generate_call_stub() 2913 StubRoutines::x86::_mxcsr_std = generate_fp_mask("mxcsr_std", 0x0000000000001F80); 2914 2915 // entry points that exist in all platforms Note: This is code 2916 // that could be shared among different platforms - however the 2917 // benefit seems to be smaller than the disadvantage of having a 2918 // much more complicated generator structure. See also comment in 2919 // stubRoutines.hpp. 2920 2921 StubRoutines::_forward_exception_entry = generate_forward_exception(); 2922 2923 StubRoutines::_call_stub_entry = 2924 generate_call_stub(StubRoutines::_call_stub_return_address); 2925 2926 // is referenced by megamorphic call 2927 StubRoutines::_catch_exception_entry = generate_catch_exception(); 2928 2929 // atomic calls 2930 StubRoutines::_atomic_xchg_entry = generate_atomic_xchg(); 2931 StubRoutines::_atomic_xchg_ptr_entry = generate_atomic_xchg_ptr(); 2932 StubRoutines::_atomic_cmpxchg_entry = generate_atomic_cmpxchg(); 2933 StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long(); 2934 StubRoutines::_atomic_add_entry = generate_atomic_add(); 2935 StubRoutines::_atomic_add_ptr_entry = generate_atomic_add_ptr(); 2936 StubRoutines::_fence_entry = generate_orderaccess_fence(); 2937 2938 StubRoutines::_handler_for_unsafe_access_entry = 2939 generate_handler_for_unsafe_access(); 2940 2941 // platform dependent 2942 StubRoutines::x86::_get_previous_fp_entry = generate_get_previous_fp(); 2943 2944 StubRoutines::x86::_verify_mxcsr_entry = generate_verify_mxcsr(); 2945 } 2946 2947 void generate_all() { 2948 // Generates all stubs and initializes the entry points 2949 2950 // These entry points require SharedInfo::stack0 to be set up in 2951 // non-core builds and need to be relocatable, so they each 2952 // fabricate a RuntimeStub internally. 2953 StubRoutines::_throw_AbstractMethodError_entry = 2954 generate_throw_exception("AbstractMethodError throw_exception", 2955 CAST_FROM_FN_PTR(address, 2956 SharedRuntime:: 2957 throw_AbstractMethodError), 2958 false); 2959 2960 StubRoutines::_throw_IncompatibleClassChangeError_entry = 2961 generate_throw_exception("IncompatibleClassChangeError throw_exception", 2962 CAST_FROM_FN_PTR(address, 2963 SharedRuntime:: 2964 throw_IncompatibleClassChangeError), 2965 false); 2966 2967 StubRoutines::_throw_ArithmeticException_entry = 2968 generate_throw_exception("ArithmeticException throw_exception", 2969 CAST_FROM_FN_PTR(address, 2970 SharedRuntime:: 2971 throw_ArithmeticException), 2972 true); 2973 2974 StubRoutines::_throw_NullPointerException_entry = 2975 generate_throw_exception("NullPointerException throw_exception", 2976 CAST_FROM_FN_PTR(address, 2977 SharedRuntime:: 2978 throw_NullPointerException), 2979 true); 2980 2981 StubRoutines::_throw_NullPointerException_at_call_entry = 2982 generate_throw_exception("NullPointerException at call throw_exception", 2983 CAST_FROM_FN_PTR(address, 2984 SharedRuntime:: 2985 throw_NullPointerException_at_call), 2986 false); 2987 2988 StubRoutines::_throw_StackOverflowError_entry = 2989 generate_throw_exception("StackOverflowError throw_exception", 2990 CAST_FROM_FN_PTR(address, 2991 SharedRuntime:: 2992 throw_StackOverflowError), 2993 false); 2994 2995 // entry points that are platform specific 2996 StubRoutines::x86::_f2i_fixup = generate_f2i_fixup(); 2997 StubRoutines::x86::_f2l_fixup = generate_f2l_fixup(); 2998 StubRoutines::x86::_d2i_fixup = generate_d2i_fixup(); 2999 StubRoutines::x86::_d2l_fixup = generate_d2l_fixup(); 3000 3001 StubRoutines::x86::_float_sign_mask = generate_fp_mask("float_sign_mask", 0x7FFFFFFF7FFFFFFF); 3002 StubRoutines::x86::_float_sign_flip = generate_fp_mask("float_sign_flip", 0x8000000080000000); 3003 StubRoutines::x86::_double_sign_mask = generate_fp_mask("double_sign_mask", 0x7FFFFFFFFFFFFFFF); 3004 StubRoutines::x86::_double_sign_flip = generate_fp_mask("double_sign_flip", 0x8000000000000000); 3005 3006 // support for verify_oop (must happen after universe_init) 3007 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop(); 3008 3009 // arraycopy stubs used by compilers 3010 generate_arraycopy_stubs(); 3011 3012 // generic method handle stubs 3013 if (EnableMethodHandles && SystemDictionary::MethodHandle_klass() != NULL) { 3014 for (MethodHandles::EntryKind ek = MethodHandles::_EK_FIRST; 3015 ek < MethodHandles::_EK_LIMIT; 3016 ek = MethodHandles::EntryKind(1 + (int)ek)) { 3017 StubCodeMark mark(this, "MethodHandle", MethodHandles::entry_name(ek)); 3018 MethodHandles::generate_method_handle_stub(_masm, ek); 3019 } 3020 } 3021 3022 generate_math_stubs(); 3023 } 3024 3025 public: 3026 StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) { 3027 if (all) { 3028 generate_all(); 3029 } else { 3030 generate_initial(); 3031 } 3032 } 3033 }; // end class declaration 3034 3035 address StubGenerator::disjoint_byte_copy_entry = NULL; 3036 address StubGenerator::disjoint_short_copy_entry = NULL; 3037 address StubGenerator::disjoint_int_copy_entry = NULL; 3038 address StubGenerator::disjoint_long_copy_entry = NULL; 3039 address StubGenerator::disjoint_oop_copy_entry = NULL; 3040 3041 address StubGenerator::byte_copy_entry = NULL; 3042 address StubGenerator::short_copy_entry = NULL; 3043 address StubGenerator::int_copy_entry = NULL; 3044 address StubGenerator::long_copy_entry = NULL; 3045 address StubGenerator::oop_copy_entry = NULL; 3046 3047 address StubGenerator::checkcast_copy_entry = NULL; 3048 3049 void StubGenerator_generate(CodeBuffer* code, bool all) { 3050 StubGenerator g(code, all); 3051 }