1 /* 2 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "incls/_precompiled.incl" 26 #include "incls/_stubGenerator_x86_64.cpp.incl" 27 28 // Declaration and definition of StubGenerator (no .hpp file). 29 // For a more detailed description of the stub routine structure 30 // see the comment in stubRoutines.hpp 31 32 #define __ _masm-> 33 #define TIMES_OOP (UseCompressedOops ? Address::times_4 : Address::times_8) 34 #define a__ ((Assembler*)_masm)-> 35 36 #ifdef PRODUCT 37 #define BLOCK_COMMENT(str) /* nothing */ 38 #else 39 #define BLOCK_COMMENT(str) __ block_comment(str) 40 #endif 41 42 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 43 const int MXCSR_MASK = 0xFFC0; // Mask out any pending exceptions 44 45 // Stub Code definitions 46 47 static address handle_unsafe_access() { 48 JavaThread* thread = JavaThread::current(); 49 address pc = thread->saved_exception_pc(); 50 // pc is the instruction which we must emulate 51 // doing a no-op is fine: return garbage from the load 52 // therefore, compute npc 53 address npc = Assembler::locate_next_instruction(pc); 54 55 // request an async exception 56 thread->set_pending_unsafe_access_error(); 57 58 // return address of next instruction to execute 59 return npc; 60 } 61 62 class StubGenerator: public StubCodeGenerator { 63 private: 64 65 #ifdef PRODUCT 66 #define inc_counter_np(counter) (0) 67 #else 68 void inc_counter_np_(int& counter) { 69 __ incrementl(ExternalAddress((address)&counter)); 70 } 71 #define inc_counter_np(counter) \ 72 BLOCK_COMMENT("inc_counter " #counter); \ 73 inc_counter_np_(counter); 74 #endif 75 76 // Call stubs are used to call Java from C 77 // 78 // Linux Arguments: 79 // c_rarg0: call wrapper address address 80 // c_rarg1: result address 81 // c_rarg2: result type BasicType 82 // c_rarg3: method methodOop 83 // c_rarg4: (interpreter) entry point address 84 // c_rarg5: parameters intptr_t* 85 // 16(rbp): parameter size (in words) int 86 // 24(rbp): thread Thread* 87 // 88 // [ return_from_Java ] <--- rsp 89 // [ argument word n ] 90 // ... 91 // -12 [ argument word 1 ] 92 // -11 [ saved r15 ] <--- rsp_after_call 93 // -10 [ saved r14 ] 94 // -9 [ saved r13 ] 95 // -8 [ saved r12 ] 96 // -7 [ saved rbx ] 97 // -6 [ call wrapper ] 98 // -5 [ result ] 99 // -4 [ result type ] 100 // -3 [ method ] 101 // -2 [ entry point ] 102 // -1 [ parameters ] 103 // 0 [ saved rbp ] <--- rbp 104 // 1 [ return address ] 105 // 2 [ parameter size ] 106 // 3 [ thread ] 107 // 108 // Windows Arguments: 109 // c_rarg0: call wrapper address address 110 // c_rarg1: result address 111 // c_rarg2: result type BasicType 112 // c_rarg3: method methodOop 113 // 48(rbp): (interpreter) entry point address 114 // 56(rbp): parameters intptr_t* 115 // 64(rbp): parameter size (in words) int 116 // 72(rbp): thread Thread* 117 // 118 // [ return_from_Java ] <--- rsp 119 // [ argument word n ] 120 // ... 121 // -8 [ argument word 1 ] 122 // -7 [ saved r15 ] <--- rsp_after_call 123 // -6 [ saved r14 ] 124 // -5 [ saved r13 ] 125 // -4 [ saved r12 ] 126 // -3 [ saved rdi ] 127 // -2 [ saved rsi ] 128 // -1 [ saved rbx ] 129 // 0 [ saved rbp ] <--- rbp 130 // 1 [ return address ] 131 // 2 [ call wrapper ] 132 // 3 [ result ] 133 // 4 [ result type ] 134 // 5 [ method ] 135 // 6 [ entry point ] 136 // 7 [ parameters ] 137 // 8 [ parameter size ] 138 // 9 [ thread ] 139 // 140 // Windows reserves the callers stack space for arguments 1-4. 141 // We spill c_rarg0-c_rarg3 to this space. 142 143 // Call stub stack layout word offsets from rbp 144 enum call_stub_layout { 145 #ifdef _WIN64 146 rsp_after_call_off = -7, 147 r15_off = rsp_after_call_off, 148 r14_off = -6, 149 r13_off = -5, 150 r12_off = -4, 151 rdi_off = -3, 152 rsi_off = -2, 153 rbx_off = -1, 154 rbp_off = 0, 155 retaddr_off = 1, 156 call_wrapper_off = 2, 157 result_off = 3, 158 result_type_off = 4, 159 method_off = 5, 160 entry_point_off = 6, 161 parameters_off = 7, 162 parameter_size_off = 8, 163 thread_off = 9 164 #else 165 rsp_after_call_off = -12, 166 mxcsr_off = rsp_after_call_off, 167 r15_off = -11, 168 r14_off = -10, 169 r13_off = -9, 170 r12_off = -8, 171 rbx_off = -7, 172 call_wrapper_off = -6, 173 result_off = -5, 174 result_type_off = -4, 175 method_off = -3, 176 entry_point_off = -2, 177 parameters_off = -1, 178 rbp_off = 0, 179 retaddr_off = 1, 180 parameter_size_off = 2, 181 thread_off = 3 182 #endif 183 }; 184 185 address generate_call_stub(address& return_address) { 186 assert((int)frame::entry_frame_after_call_words == -(int)rsp_after_call_off + 1 && 187 (int)frame::entry_frame_call_wrapper_offset == (int)call_wrapper_off, 188 "adjust this code"); 189 StubCodeMark mark(this, "StubRoutines", "call_stub"); 190 address start = __ pc(); 191 192 // same as in generate_catch_exception()! 193 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize); 194 195 const Address call_wrapper (rbp, call_wrapper_off * wordSize); 196 const Address result (rbp, result_off * wordSize); 197 const Address result_type (rbp, result_type_off * wordSize); 198 const Address method (rbp, method_off * wordSize); 199 const Address entry_point (rbp, entry_point_off * wordSize); 200 const Address parameters (rbp, parameters_off * wordSize); 201 const Address parameter_size(rbp, parameter_size_off * wordSize); 202 203 // same as in generate_catch_exception()! 204 const Address thread (rbp, thread_off * wordSize); 205 206 const Address r15_save(rbp, r15_off * wordSize); 207 const Address r14_save(rbp, r14_off * wordSize); 208 const Address r13_save(rbp, r13_off * wordSize); 209 const Address r12_save(rbp, r12_off * wordSize); 210 const Address rbx_save(rbp, rbx_off * wordSize); 211 212 // stub code 213 __ enter(); 214 __ subptr(rsp, -rsp_after_call_off * wordSize); 215 216 // save register parameters 217 #ifndef _WIN64 218 __ movptr(parameters, c_rarg5); // parameters 219 __ movptr(entry_point, c_rarg4); // entry_point 220 #endif 221 222 __ movptr(method, c_rarg3); // method 223 __ movl(result_type, c_rarg2); // result type 224 __ movptr(result, c_rarg1); // result 225 __ movptr(call_wrapper, c_rarg0); // call wrapper 226 227 // save regs belonging to calling function 228 __ movptr(rbx_save, rbx); 229 __ movptr(r12_save, r12); 230 __ movptr(r13_save, r13); 231 __ movptr(r14_save, r14); 232 __ movptr(r15_save, r15); 233 234 #ifdef _WIN64 235 const Address rdi_save(rbp, rdi_off * wordSize); 236 const Address rsi_save(rbp, rsi_off * wordSize); 237 238 __ movptr(rsi_save, rsi); 239 __ movptr(rdi_save, rdi); 240 #else 241 const Address mxcsr_save(rbp, mxcsr_off * wordSize); 242 { 243 Label skip_ldmx; 244 __ stmxcsr(mxcsr_save); 245 __ movl(rax, mxcsr_save); 246 __ andl(rax, MXCSR_MASK); // Only check control and mask bits 247 ExternalAddress mxcsr_std(StubRoutines::x86::mxcsr_std()); 248 __ cmp32(rax, mxcsr_std); 249 __ jcc(Assembler::equal, skip_ldmx); 250 __ ldmxcsr(mxcsr_std); 251 __ bind(skip_ldmx); 252 } 253 #endif 254 255 // Load up thread register 256 __ movptr(r15_thread, thread); 257 __ reinit_heapbase(); 258 259 #ifdef ASSERT 260 // make sure we have no pending exceptions 261 { 262 Label L; 263 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 264 __ jcc(Assembler::equal, L); 265 __ stop("StubRoutines::call_stub: entered with pending exception"); 266 __ bind(L); 267 } 268 #endif 269 270 // pass parameters if any 271 BLOCK_COMMENT("pass parameters if any"); 272 Label parameters_done; 273 __ movl(c_rarg3, parameter_size); 274 __ testl(c_rarg3, c_rarg3); 275 __ jcc(Assembler::zero, parameters_done); 276 277 Label loop; 278 __ movptr(c_rarg2, parameters); // parameter pointer 279 __ movl(c_rarg1, c_rarg3); // parameter counter is in c_rarg1 280 __ BIND(loop); 281 __ movptr(rax, Address(c_rarg2, 0));// get parameter 282 __ addptr(c_rarg2, wordSize); // advance to next parameter 283 __ decrementl(c_rarg1); // decrement counter 284 __ push(rax); // pass parameter 285 __ jcc(Assembler::notZero, loop); 286 287 // call Java function 288 __ BIND(parameters_done); 289 __ movptr(rbx, method); // get methodOop 290 __ movptr(c_rarg1, entry_point); // get entry_point 291 __ mov(r13, rsp); // set sender sp 292 BLOCK_COMMENT("call Java function"); 293 __ call(c_rarg1); 294 295 BLOCK_COMMENT("call_stub_return_address:"); 296 return_address = __ pc(); 297 298 // store result depending on type (everything that is not 299 // T_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT) 300 __ movptr(c_rarg0, result); 301 Label is_long, is_float, is_double, exit; 302 __ movl(c_rarg1, result_type); 303 __ cmpl(c_rarg1, T_OBJECT); 304 __ jcc(Assembler::equal, is_long); 305 __ cmpl(c_rarg1, T_LONG); 306 __ jcc(Assembler::equal, is_long); 307 __ cmpl(c_rarg1, T_FLOAT); 308 __ jcc(Assembler::equal, is_float); 309 __ cmpl(c_rarg1, T_DOUBLE); 310 __ jcc(Assembler::equal, is_double); 311 312 // handle T_INT case 313 __ movl(Address(c_rarg0, 0), rax); 314 315 __ BIND(exit); 316 317 // pop parameters 318 __ lea(rsp, rsp_after_call); 319 320 #ifdef ASSERT 321 // verify that threads correspond 322 { 323 Label L, S; 324 __ cmpptr(r15_thread, thread); 325 __ jcc(Assembler::notEqual, S); 326 __ get_thread(rbx); 327 __ cmpptr(r15_thread, rbx); 328 __ jcc(Assembler::equal, L); 329 __ bind(S); 330 __ jcc(Assembler::equal, L); 331 __ stop("StubRoutines::call_stub: threads must correspond"); 332 __ bind(L); 333 } 334 #endif 335 336 // restore regs belonging to calling function 337 __ movptr(r15, r15_save); 338 __ movptr(r14, r14_save); 339 __ movptr(r13, r13_save); 340 __ movptr(r12, r12_save); 341 __ movptr(rbx, rbx_save); 342 343 #ifdef _WIN64 344 __ movptr(rdi, rdi_save); 345 __ movptr(rsi, rsi_save); 346 #else 347 __ ldmxcsr(mxcsr_save); 348 #endif 349 350 // restore rsp 351 __ addptr(rsp, -rsp_after_call_off * wordSize); 352 353 // return 354 __ pop(rbp); 355 __ ret(0); 356 357 // handle return types different from T_INT 358 __ BIND(is_long); 359 __ movq(Address(c_rarg0, 0), rax); 360 __ jmp(exit); 361 362 __ BIND(is_float); 363 __ movflt(Address(c_rarg0, 0), xmm0); 364 __ jmp(exit); 365 366 __ BIND(is_double); 367 __ movdbl(Address(c_rarg0, 0), xmm0); 368 __ jmp(exit); 369 370 return start; 371 } 372 373 // Return point for a Java call if there's an exception thrown in 374 // Java code. The exception is caught and transformed into a 375 // pending exception stored in JavaThread that can be tested from 376 // within the VM. 377 // 378 // Note: Usually the parameters are removed by the callee. In case 379 // of an exception crossing an activation frame boundary, that is 380 // not the case if the callee is compiled code => need to setup the 381 // rsp. 382 // 383 // rax: exception oop 384 385 address generate_catch_exception() { 386 StubCodeMark mark(this, "StubRoutines", "catch_exception"); 387 address start = __ pc(); 388 389 // same as in generate_call_stub(): 390 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize); 391 const Address thread (rbp, thread_off * wordSize); 392 393 #ifdef ASSERT 394 // verify that threads correspond 395 { 396 Label L, S; 397 __ cmpptr(r15_thread, thread); 398 __ jcc(Assembler::notEqual, S); 399 __ get_thread(rbx); 400 __ cmpptr(r15_thread, rbx); 401 __ jcc(Assembler::equal, L); 402 __ bind(S); 403 __ stop("StubRoutines::catch_exception: threads must correspond"); 404 __ bind(L); 405 } 406 #endif 407 408 // set pending exception 409 __ verify_oop(rax); 410 411 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), rax); 412 __ lea(rscratch1, ExternalAddress((address)__FILE__)); 413 __ movptr(Address(r15_thread, Thread::exception_file_offset()), rscratch1); 414 __ movl(Address(r15_thread, Thread::exception_line_offset()), (int) __LINE__); 415 416 // complete return to VM 417 assert(StubRoutines::_call_stub_return_address != NULL, 418 "_call_stub_return_address must have been generated before"); 419 __ jump(RuntimeAddress(StubRoutines::_call_stub_return_address)); 420 421 return start; 422 } 423 424 // Continuation point for runtime calls returning with a pending 425 // exception. The pending exception check happened in the runtime 426 // or native call stub. The pending exception in Thread is 427 // converted into a Java-level exception. 428 // 429 // Contract with Java-level exception handlers: 430 // rax: exception 431 // rdx: throwing pc 432 // 433 // NOTE: At entry of this stub, exception-pc must be on stack !! 434 435 address generate_forward_exception() { 436 StubCodeMark mark(this, "StubRoutines", "forward exception"); 437 address start = __ pc(); 438 439 // Upon entry, the sp points to the return address returning into 440 // Java (interpreted or compiled) code; i.e., the return address 441 // becomes the throwing pc. 442 // 443 // Arguments pushed before the runtime call are still on the stack 444 // but the exception handler will reset the stack pointer -> 445 // ignore them. A potential result in registers can be ignored as 446 // well. 447 448 #ifdef ASSERT 449 // make sure this code is only executed if there is a pending exception 450 { 451 Label L; 452 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t) NULL); 453 __ jcc(Assembler::notEqual, L); 454 __ stop("StubRoutines::forward exception: no pending exception (1)"); 455 __ bind(L); 456 } 457 #endif 458 459 // compute exception handler into rbx 460 __ movptr(c_rarg0, Address(rsp, 0)); 461 BLOCK_COMMENT("call exception_handler_for_return_address"); 462 __ call_VM_leaf(CAST_FROM_FN_PTR(address, 463 SharedRuntime::exception_handler_for_return_address), 464 r15_thread, c_rarg0); 465 __ mov(rbx, rax); 466 467 // setup rax & rdx, remove return address & clear pending exception 468 __ pop(rdx); 469 __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset())); 470 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 471 472 #ifdef ASSERT 473 // make sure exception is set 474 { 475 Label L; 476 __ testptr(rax, rax); 477 __ jcc(Assembler::notEqual, L); 478 __ stop("StubRoutines::forward exception: no pending exception (2)"); 479 __ bind(L); 480 } 481 #endif 482 483 // continue at exception handler (return address removed) 484 // rax: exception 485 // rbx: exception handler 486 // rdx: throwing pc 487 __ verify_oop(rax); 488 __ jmp(rbx); 489 490 return start; 491 } 492 493 // Support for jint atomic::xchg(jint exchange_value, volatile jint* dest) 494 // 495 // Arguments : 496 // c_rarg0: exchange_value 497 // c_rarg0: dest 498 // 499 // Result: 500 // *dest <- ex, return (orig *dest) 501 address generate_atomic_xchg() { 502 StubCodeMark mark(this, "StubRoutines", "atomic_xchg"); 503 address start = __ pc(); 504 505 __ movl(rax, c_rarg0); // Copy to eax we need a return value anyhow 506 __ xchgl(rax, Address(c_rarg1, 0)); // automatic LOCK 507 __ ret(0); 508 509 return start; 510 } 511 512 // Support for intptr_t atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) 513 // 514 // Arguments : 515 // c_rarg0: exchange_value 516 // c_rarg1: dest 517 // 518 // Result: 519 // *dest <- ex, return (orig *dest) 520 address generate_atomic_xchg_ptr() { 521 StubCodeMark mark(this, "StubRoutines", "atomic_xchg_ptr"); 522 address start = __ pc(); 523 524 __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow 525 __ xchgptr(rax, Address(c_rarg1, 0)); // automatic LOCK 526 __ ret(0); 527 528 return start; 529 } 530 531 // Support for jint atomic::atomic_cmpxchg(jint exchange_value, volatile jint* dest, 532 // jint compare_value) 533 // 534 // Arguments : 535 // c_rarg0: exchange_value 536 // c_rarg1: dest 537 // c_rarg2: compare_value 538 // 539 // Result: 540 // if ( compare_value == *dest ) { 541 // *dest = exchange_value 542 // return compare_value; 543 // else 544 // return *dest; 545 address generate_atomic_cmpxchg() { 546 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg"); 547 address start = __ pc(); 548 549 __ movl(rax, c_rarg2); 550 if ( os::is_MP() ) __ lock(); 551 __ cmpxchgl(c_rarg0, Address(c_rarg1, 0)); 552 __ ret(0); 553 554 return start; 555 } 556 557 // Support for jint atomic::atomic_cmpxchg_long(jlong exchange_value, 558 // volatile jlong* dest, 559 // jlong compare_value) 560 // Arguments : 561 // c_rarg0: exchange_value 562 // c_rarg1: dest 563 // c_rarg2: compare_value 564 // 565 // Result: 566 // if ( compare_value == *dest ) { 567 // *dest = exchange_value 568 // return compare_value; 569 // else 570 // return *dest; 571 address generate_atomic_cmpxchg_long() { 572 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long"); 573 address start = __ pc(); 574 575 __ movq(rax, c_rarg2); 576 if ( os::is_MP() ) __ lock(); 577 __ cmpxchgq(c_rarg0, Address(c_rarg1, 0)); 578 __ ret(0); 579 580 return start; 581 } 582 583 // Support for jint atomic::add(jint add_value, volatile jint* dest) 584 // 585 // Arguments : 586 // c_rarg0: add_value 587 // c_rarg1: dest 588 // 589 // Result: 590 // *dest += add_value 591 // return *dest; 592 address generate_atomic_add() { 593 StubCodeMark mark(this, "StubRoutines", "atomic_add"); 594 address start = __ pc(); 595 596 __ movl(rax, c_rarg0); 597 if ( os::is_MP() ) __ lock(); 598 __ xaddl(Address(c_rarg1, 0), c_rarg0); 599 __ addl(rax, c_rarg0); 600 __ ret(0); 601 602 return start; 603 } 604 605 // Support for intptr_t atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) 606 // 607 // Arguments : 608 // c_rarg0: add_value 609 // c_rarg1: dest 610 // 611 // Result: 612 // *dest += add_value 613 // return *dest; 614 address generate_atomic_add_ptr() { 615 StubCodeMark mark(this, "StubRoutines", "atomic_add_ptr"); 616 address start = __ pc(); 617 618 __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow 619 if ( os::is_MP() ) __ lock(); 620 __ xaddptr(Address(c_rarg1, 0), c_rarg0); 621 __ addptr(rax, c_rarg0); 622 __ ret(0); 623 624 return start; 625 } 626 627 // Support for intptr_t OrderAccess::fence() 628 // 629 // Arguments : 630 // 631 // Result: 632 address generate_orderaccess_fence() { 633 StubCodeMark mark(this, "StubRoutines", "orderaccess_fence"); 634 address start = __ pc(); 635 __ membar(Assembler::StoreLoad); 636 __ ret(0); 637 638 return start; 639 } 640 641 // Support for intptr_t get_previous_fp() 642 // 643 // This routine is used to find the previous frame pointer for the 644 // caller (current_frame_guess). This is used as part of debugging 645 // ps() is seemingly lost trying to find frames. 646 // This code assumes that caller current_frame_guess) has a frame. 647 address generate_get_previous_fp() { 648 StubCodeMark mark(this, "StubRoutines", "get_previous_fp"); 649 const Address old_fp(rbp, 0); 650 const Address older_fp(rax, 0); 651 address start = __ pc(); 652 653 __ enter(); 654 __ movptr(rax, old_fp); // callers fp 655 __ movptr(rax, older_fp); // the frame for ps() 656 __ pop(rbp); 657 __ ret(0); 658 659 return start; 660 } 661 662 //---------------------------------------------------------------------------------------------------- 663 // Support for void verify_mxcsr() 664 // 665 // This routine is used with -Xcheck:jni to verify that native 666 // JNI code does not return to Java code without restoring the 667 // MXCSR register to our expected state. 668 669 address generate_verify_mxcsr() { 670 StubCodeMark mark(this, "StubRoutines", "verify_mxcsr"); 671 address start = __ pc(); 672 673 const Address mxcsr_save(rsp, 0); 674 675 if (CheckJNICalls) { 676 Label ok_ret; 677 __ push(rax); 678 __ subptr(rsp, wordSize); // allocate a temp location 679 __ stmxcsr(mxcsr_save); 680 __ movl(rax, mxcsr_save); 681 __ andl(rax, MXCSR_MASK); // Only check control and mask bits 682 __ cmpl(rax, *(int *)(StubRoutines::x86::mxcsr_std())); 683 __ jcc(Assembler::equal, ok_ret); 684 685 __ warn("MXCSR changed by native JNI code, use -XX:+RestoreMXCSROnJNICall"); 686 687 __ ldmxcsr(ExternalAddress(StubRoutines::x86::mxcsr_std())); 688 689 __ bind(ok_ret); 690 __ addptr(rsp, wordSize); 691 __ pop(rax); 692 } 693 694 __ ret(0); 695 696 return start; 697 } 698 699 address generate_f2i_fixup() { 700 StubCodeMark mark(this, "StubRoutines", "f2i_fixup"); 701 Address inout(rsp, 5 * wordSize); // return address + 4 saves 702 703 address start = __ pc(); 704 705 Label L; 706 707 __ push(rax); 708 __ push(c_rarg3); 709 __ push(c_rarg2); 710 __ push(c_rarg1); 711 712 __ movl(rax, 0x7f800000); 713 __ xorl(c_rarg3, c_rarg3); 714 __ movl(c_rarg2, inout); 715 __ movl(c_rarg1, c_rarg2); 716 __ andl(c_rarg1, 0x7fffffff); 717 __ cmpl(rax, c_rarg1); // NaN? -> 0 718 __ jcc(Assembler::negative, L); 719 __ testl(c_rarg2, c_rarg2); // signed ? min_jint : max_jint 720 __ movl(c_rarg3, 0x80000000); 721 __ movl(rax, 0x7fffffff); 722 __ cmovl(Assembler::positive, c_rarg3, rax); 723 724 __ bind(L); 725 __ movptr(inout, c_rarg3); 726 727 __ pop(c_rarg1); 728 __ pop(c_rarg2); 729 __ pop(c_rarg3); 730 __ pop(rax); 731 732 __ ret(0); 733 734 return start; 735 } 736 737 address generate_f2l_fixup() { 738 StubCodeMark mark(this, "StubRoutines", "f2l_fixup"); 739 Address inout(rsp, 5 * wordSize); // return address + 4 saves 740 address start = __ pc(); 741 742 Label L; 743 744 __ push(rax); 745 __ push(c_rarg3); 746 __ push(c_rarg2); 747 __ push(c_rarg1); 748 749 __ movl(rax, 0x7f800000); 750 __ xorl(c_rarg3, c_rarg3); 751 __ movl(c_rarg2, inout); 752 __ movl(c_rarg1, c_rarg2); 753 __ andl(c_rarg1, 0x7fffffff); 754 __ cmpl(rax, c_rarg1); // NaN? -> 0 755 __ jcc(Assembler::negative, L); 756 __ testl(c_rarg2, c_rarg2); // signed ? min_jlong : max_jlong 757 __ mov64(c_rarg3, 0x8000000000000000); 758 __ mov64(rax, 0x7fffffffffffffff); 759 __ cmov(Assembler::positive, c_rarg3, rax); 760 761 __ bind(L); 762 __ movptr(inout, c_rarg3); 763 764 __ pop(c_rarg1); 765 __ pop(c_rarg2); 766 __ pop(c_rarg3); 767 __ pop(rax); 768 769 __ ret(0); 770 771 return start; 772 } 773 774 address generate_d2i_fixup() { 775 StubCodeMark mark(this, "StubRoutines", "d2i_fixup"); 776 Address inout(rsp, 6 * wordSize); // return address + 5 saves 777 778 address start = __ pc(); 779 780 Label L; 781 782 __ push(rax); 783 __ push(c_rarg3); 784 __ push(c_rarg2); 785 __ push(c_rarg1); 786 __ push(c_rarg0); 787 788 __ movl(rax, 0x7ff00000); 789 __ movq(c_rarg2, inout); 790 __ movl(c_rarg3, c_rarg2); 791 __ mov(c_rarg1, c_rarg2); 792 __ mov(c_rarg0, c_rarg2); 793 __ negl(c_rarg3); 794 __ shrptr(c_rarg1, 0x20); 795 __ orl(c_rarg3, c_rarg2); 796 __ andl(c_rarg1, 0x7fffffff); 797 __ xorl(c_rarg2, c_rarg2); 798 __ shrl(c_rarg3, 0x1f); 799 __ orl(c_rarg1, c_rarg3); 800 __ cmpl(rax, c_rarg1); 801 __ jcc(Assembler::negative, L); // NaN -> 0 802 __ testptr(c_rarg0, c_rarg0); // signed ? min_jint : max_jint 803 __ movl(c_rarg2, 0x80000000); 804 __ movl(rax, 0x7fffffff); 805 __ cmov(Assembler::positive, c_rarg2, rax); 806 807 __ bind(L); 808 __ movptr(inout, c_rarg2); 809 810 __ pop(c_rarg0); 811 __ pop(c_rarg1); 812 __ pop(c_rarg2); 813 __ pop(c_rarg3); 814 __ pop(rax); 815 816 __ ret(0); 817 818 return start; 819 } 820 821 address generate_d2l_fixup() { 822 StubCodeMark mark(this, "StubRoutines", "d2l_fixup"); 823 Address inout(rsp, 6 * wordSize); // return address + 5 saves 824 825 address start = __ pc(); 826 827 Label L; 828 829 __ push(rax); 830 __ push(c_rarg3); 831 __ push(c_rarg2); 832 __ push(c_rarg1); 833 __ push(c_rarg0); 834 835 __ movl(rax, 0x7ff00000); 836 __ movq(c_rarg2, inout); 837 __ movl(c_rarg3, c_rarg2); 838 __ mov(c_rarg1, c_rarg2); 839 __ mov(c_rarg0, c_rarg2); 840 __ negl(c_rarg3); 841 __ shrptr(c_rarg1, 0x20); 842 __ orl(c_rarg3, c_rarg2); 843 __ andl(c_rarg1, 0x7fffffff); 844 __ xorl(c_rarg2, c_rarg2); 845 __ shrl(c_rarg3, 0x1f); 846 __ orl(c_rarg1, c_rarg3); 847 __ cmpl(rax, c_rarg1); 848 __ jcc(Assembler::negative, L); // NaN -> 0 849 __ testq(c_rarg0, c_rarg0); // signed ? min_jlong : max_jlong 850 __ mov64(c_rarg2, 0x8000000000000000); 851 __ mov64(rax, 0x7fffffffffffffff); 852 __ cmovq(Assembler::positive, c_rarg2, rax); 853 854 __ bind(L); 855 __ movq(inout, c_rarg2); 856 857 __ pop(c_rarg0); 858 __ pop(c_rarg1); 859 __ pop(c_rarg2); 860 __ pop(c_rarg3); 861 __ pop(rax); 862 863 __ ret(0); 864 865 return start; 866 } 867 868 address generate_fp_mask(const char *stub_name, int64_t mask) { 869 __ align(CodeEntryAlignment); 870 StubCodeMark mark(this, "StubRoutines", stub_name); 871 address start = __ pc(); 872 873 __ emit_data64( mask, relocInfo::none ); 874 __ emit_data64( mask, relocInfo::none ); 875 876 return start; 877 } 878 879 // The following routine generates a subroutine to throw an 880 // asynchronous UnknownError when an unsafe access gets a fault that 881 // could not be reasonably prevented by the programmer. (Example: 882 // SIGBUS/OBJERR.) 883 address generate_handler_for_unsafe_access() { 884 StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access"); 885 address start = __ pc(); 886 887 __ push(0); // hole for return address-to-be 888 __ pusha(); // push registers 889 Address next_pc(rsp, RegisterImpl::number_of_registers * BytesPerWord); 890 891 __ subptr(rsp, frame::arg_reg_save_area_bytes); 892 BLOCK_COMMENT("call handle_unsafe_access"); 893 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, handle_unsafe_access))); 894 __ addptr(rsp, frame::arg_reg_save_area_bytes); 895 896 __ movptr(next_pc, rax); // stuff next address 897 __ popa(); 898 __ ret(0); // jump to next address 899 900 return start; 901 } 902 903 // Non-destructive plausibility checks for oops 904 // 905 // Arguments: 906 // all args on stack! 907 // 908 // Stack after saving c_rarg3: 909 // [tos + 0]: saved c_rarg3 910 // [tos + 1]: saved c_rarg2 911 // [tos + 2]: saved r12 (several TemplateTable methods use it) 912 // [tos + 3]: saved flags 913 // [tos + 4]: return address 914 // * [tos + 5]: error message (char*) 915 // * [tos + 6]: object to verify (oop) 916 // [tos + 7]: saved rax - saved by caller 917 // [tos + 8]: saved r10 (rscratch1) - saved by caller 918 // * = popped on exit 919 address generate_verify_oop() { 920 StubCodeMark mark(this, "StubRoutines", "verify_oop"); 921 address start = __ pc(); 922 923 Label exit, error; 924 925 __ pushf(); 926 __ incrementl(ExternalAddress((address) StubRoutines::verify_oop_count_addr())); 927 928 __ push(r12); 929 930 // save c_rarg2 and c_rarg3 931 __ push(c_rarg2); 932 __ push(c_rarg3); 933 934 enum { 935 // After previous pushes. 936 oop_to_verify = 6 * wordSize, 937 saved_rax = 7 * wordSize, 938 939 // Before the call to MacroAssembler::debug(), see below. 940 return_addr = 16 * wordSize, 941 error_msg = 17 * wordSize 942 }; 943 944 // get object 945 __ movptr(rax, Address(rsp, oop_to_verify)); 946 947 // make sure object is 'reasonable' 948 __ testptr(rax, rax); 949 __ jcc(Assembler::zero, exit); // if obj is NULL it is OK 950 // Check if the oop is in the right area of memory 951 __ movptr(c_rarg2, rax); 952 __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_mask()); 953 __ andptr(c_rarg2, c_rarg3); 954 __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_bits()); 955 __ cmpptr(c_rarg2, c_rarg3); 956 __ jcc(Assembler::notZero, error); 957 958 // set r12 to heapbase for load_klass() 959 __ reinit_heapbase(); 960 961 // make sure klass is 'reasonable' 962 __ load_klass(rax, rax); // get klass 963 __ testptr(rax, rax); 964 __ jcc(Assembler::zero, error); // if klass is NULL it is broken 965 // Check if the klass is in the right area of memory 966 __ mov(c_rarg2, rax); 967 __ movptr(c_rarg3, (intptr_t) Universe::verify_klass_mask()); 968 __ andptr(c_rarg2, c_rarg3); 969 __ movptr(c_rarg3, (intptr_t) Universe::verify_klass_bits()); 970 __ cmpptr(c_rarg2, c_rarg3); 971 __ jcc(Assembler::notZero, error); 972 973 // make sure klass' klass is 'reasonable' 974 __ load_klass(rax, rax); 975 __ testptr(rax, rax); 976 __ jcc(Assembler::zero, error); // if klass' klass is NULL it is broken 977 // Check if the klass' klass is in the right area of memory 978 __ movptr(c_rarg3, (intptr_t) Universe::verify_klass_mask()); 979 __ andptr(rax, c_rarg3); 980 __ movptr(c_rarg3, (intptr_t) Universe::verify_klass_bits()); 981 __ cmpptr(rax, c_rarg3); 982 __ jcc(Assembler::notZero, error); 983 984 // return if everything seems ok 985 __ bind(exit); 986 __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back 987 __ pop(c_rarg3); // restore c_rarg3 988 __ pop(c_rarg2); // restore c_rarg2 989 __ pop(r12); // restore r12 990 __ popf(); // restore flags 991 __ ret(2 * wordSize); // pop caller saved stuff 992 993 // handle errors 994 __ bind(error); 995 __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back 996 __ pop(c_rarg3); // get saved c_rarg3 back 997 __ pop(c_rarg2); // get saved c_rarg2 back 998 __ pop(r12); // get saved r12 back 999 __ popf(); // get saved flags off stack -- 1000 // will be ignored 1001 1002 __ pusha(); // push registers 1003 // (rip is already 1004 // already pushed) 1005 // debug(char* msg, int64_t pc, int64_t regs[]) 1006 // We've popped the registers we'd saved (c_rarg3, c_rarg2 and flags), and 1007 // pushed all the registers, so now the stack looks like: 1008 // [tos + 0] 16 saved registers 1009 // [tos + 16] return address 1010 // * [tos + 17] error message (char*) 1011 // * [tos + 18] object to verify (oop) 1012 // [tos + 19] saved rax - saved by caller 1013 // [tos + 20] saved r10 (rscratch1) - saved by caller 1014 // * = popped on exit 1015 1016 __ movptr(c_rarg0, Address(rsp, error_msg)); // pass address of error message 1017 __ movptr(c_rarg1, Address(rsp, return_addr)); // pass return address 1018 __ movq(c_rarg2, rsp); // pass address of regs on stack 1019 __ mov(r12, rsp); // remember rsp 1020 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 1021 __ andptr(rsp, -16); // align stack as required by ABI 1022 BLOCK_COMMENT("call MacroAssembler::debug"); 1023 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64))); 1024 __ mov(rsp, r12); // restore rsp 1025 __ popa(); // pop registers (includes r12) 1026 __ ret(2 * wordSize); // pop caller saved stuff 1027 1028 return start; 1029 } 1030 1031 static address disjoint_byte_copy_entry; 1032 static address disjoint_short_copy_entry; 1033 static address disjoint_int_copy_entry; 1034 static address disjoint_long_copy_entry; 1035 static address disjoint_oop_copy_entry; 1036 1037 static address byte_copy_entry; 1038 static address short_copy_entry; 1039 static address int_copy_entry; 1040 static address long_copy_entry; 1041 static address oop_copy_entry; 1042 1043 static address checkcast_copy_entry; 1044 1045 // 1046 // Verify that a register contains clean 32-bits positive value 1047 // (high 32-bits are 0) so it could be used in 64-bits shifts. 1048 // 1049 // Input: 1050 // Rint - 32-bits value 1051 // Rtmp - scratch 1052 // 1053 void assert_clean_int(Register Rint, Register Rtmp) { 1054 #ifdef ASSERT 1055 Label L; 1056 assert_different_registers(Rtmp, Rint); 1057 __ movslq(Rtmp, Rint); 1058 __ cmpq(Rtmp, Rint); 1059 __ jcc(Assembler::equal, L); 1060 __ stop("high 32-bits of int value are not 0"); 1061 __ bind(L); 1062 #endif 1063 } 1064 1065 // Generate overlap test for array copy stubs 1066 // 1067 // Input: 1068 // c_rarg0 - from 1069 // c_rarg1 - to 1070 // c_rarg2 - element count 1071 // 1072 // Output: 1073 // rax - &from[element count - 1] 1074 // 1075 void array_overlap_test(address no_overlap_target, Address::ScaleFactor sf) { 1076 assert(no_overlap_target != NULL, "must be generated"); 1077 array_overlap_test(no_overlap_target, NULL, sf); 1078 } 1079 void array_overlap_test(Label& L_no_overlap, Address::ScaleFactor sf) { 1080 array_overlap_test(NULL, &L_no_overlap, sf); 1081 } 1082 void array_overlap_test(address no_overlap_target, Label* NOLp, Address::ScaleFactor sf) { 1083 const Register from = c_rarg0; 1084 const Register to = c_rarg1; 1085 const Register count = c_rarg2; 1086 const Register end_from = rax; 1087 1088 __ cmpptr(to, from); 1089 __ lea(end_from, Address(from, count, sf, 0)); 1090 if (NOLp == NULL) { 1091 ExternalAddress no_overlap(no_overlap_target); 1092 __ jump_cc(Assembler::belowEqual, no_overlap); 1093 __ cmpptr(to, end_from); 1094 __ jump_cc(Assembler::aboveEqual, no_overlap); 1095 } else { 1096 __ jcc(Assembler::belowEqual, (*NOLp)); 1097 __ cmpptr(to, end_from); 1098 __ jcc(Assembler::aboveEqual, (*NOLp)); 1099 } 1100 } 1101 1102 // Shuffle first three arg regs on Windows into Linux/Solaris locations. 1103 // 1104 // Outputs: 1105 // rdi - rcx 1106 // rsi - rdx 1107 // rdx - r8 1108 // rcx - r9 1109 // 1110 // Registers r9 and r10 are used to save rdi and rsi on Windows, which latter 1111 // are non-volatile. r9 and r10 should not be used by the caller. 1112 // 1113 void setup_arg_regs(int nargs = 3) { 1114 const Register saved_rdi = r9; 1115 const Register saved_rsi = r10; 1116 assert(nargs == 3 || nargs == 4, "else fix"); 1117 #ifdef _WIN64 1118 assert(c_rarg0 == rcx && c_rarg1 == rdx && c_rarg2 == r8 && c_rarg3 == r9, 1119 "unexpected argument registers"); 1120 if (nargs >= 4) 1121 __ mov(rax, r9); // r9 is also saved_rdi 1122 __ movptr(saved_rdi, rdi); 1123 __ movptr(saved_rsi, rsi); 1124 __ mov(rdi, rcx); // c_rarg0 1125 __ mov(rsi, rdx); // c_rarg1 1126 __ mov(rdx, r8); // c_rarg2 1127 if (nargs >= 4) 1128 __ mov(rcx, rax); // c_rarg3 (via rax) 1129 #else 1130 assert(c_rarg0 == rdi && c_rarg1 == rsi && c_rarg2 == rdx && c_rarg3 == rcx, 1131 "unexpected argument registers"); 1132 #endif 1133 } 1134 1135 void restore_arg_regs() { 1136 const Register saved_rdi = r9; 1137 const Register saved_rsi = r10; 1138 #ifdef _WIN64 1139 __ movptr(rdi, saved_rdi); 1140 __ movptr(rsi, saved_rsi); 1141 #endif 1142 } 1143 1144 // Generate code for an array write pre barrier 1145 // 1146 // addr - starting address 1147 // count - element count 1148 // 1149 // Destroy no registers! 1150 // 1151 void gen_write_ref_array_pre_barrier(Register addr, Register count) { 1152 BarrierSet* bs = Universe::heap()->barrier_set(); 1153 switch (bs->kind()) { 1154 case BarrierSet::G1SATBCT: 1155 case BarrierSet::G1SATBCTLogging: 1156 { 1157 __ pusha(); // push registers 1158 if (count == c_rarg0) { 1159 if (addr == c_rarg1) { 1160 // exactly backwards!! 1161 __ xchgptr(c_rarg1, c_rarg0); 1162 } else { 1163 __ movptr(c_rarg1, count); 1164 __ movptr(c_rarg0, addr); 1165 } 1166 1167 } else { 1168 __ movptr(c_rarg0, addr); 1169 __ movptr(c_rarg1, count); 1170 } 1171 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), 2); 1172 __ popa(); 1173 } 1174 break; 1175 case BarrierSet::CardTableModRef: 1176 case BarrierSet::CardTableExtension: 1177 case BarrierSet::ModRef: 1178 break; 1179 default: 1180 ShouldNotReachHere(); 1181 1182 } 1183 } 1184 1185 // 1186 // Generate code for an array write post barrier 1187 // 1188 // Input: 1189 // start - register containing starting address of destination array 1190 // end - register containing ending address of destination array 1191 // scratch - scratch register 1192 // 1193 // The input registers are overwritten. 1194 // The ending address is inclusive. 1195 void gen_write_ref_array_post_barrier(Register start, Register end, Register scratch) { 1196 assert_different_registers(start, end, scratch); 1197 BarrierSet* bs = Universe::heap()->barrier_set(); 1198 switch (bs->kind()) { 1199 case BarrierSet::G1SATBCT: 1200 case BarrierSet::G1SATBCTLogging: 1201 1202 { 1203 __ pusha(); // push registers (overkill) 1204 // must compute element count unless barrier set interface is changed (other platforms supply count) 1205 assert_different_registers(start, end, scratch); 1206 __ lea(scratch, Address(end, BytesPerHeapOop)); 1207 __ subptr(scratch, start); // subtract start to get #bytes 1208 __ shrptr(scratch, LogBytesPerHeapOop); // convert to element count 1209 __ mov(c_rarg0, start); 1210 __ mov(c_rarg1, scratch); 1211 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), 2); 1212 __ popa(); 1213 } 1214 break; 1215 case BarrierSet::CardTableModRef: 1216 case BarrierSet::CardTableExtension: 1217 { 1218 CardTableModRefBS* ct = (CardTableModRefBS*)bs; 1219 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); 1220 1221 Label L_loop; 1222 1223 __ shrptr(start, CardTableModRefBS::card_shift); 1224 __ addptr(end, BytesPerHeapOop); 1225 __ shrptr(end, CardTableModRefBS::card_shift); 1226 __ subptr(end, start); // number of bytes to copy 1227 1228 intptr_t disp = (intptr_t) ct->byte_map_base; 1229 if (__ is_simm32(disp)) { 1230 Address cardtable(noreg, noreg, Address::no_scale, disp); 1231 __ lea(scratch, cardtable); 1232 } else { 1233 ExternalAddress cardtable((address)disp); 1234 __ lea(scratch, cardtable); 1235 } 1236 1237 const Register count = end; // 'end' register contains bytes count now 1238 __ addptr(start, scratch); 1239 __ BIND(L_loop); 1240 __ movb(Address(start, count, Address::times_1), 0); 1241 __ decrement(count); 1242 __ jcc(Assembler::greaterEqual, L_loop); 1243 } 1244 break; 1245 default: 1246 ShouldNotReachHere(); 1247 1248 } 1249 } 1250 1251 1252 // Copy big chunks forward 1253 // 1254 // Inputs: 1255 // end_from - source arrays end address 1256 // end_to - destination array end address 1257 // qword_count - 64-bits element count, negative 1258 // to - scratch 1259 // L_copy_32_bytes - entry label 1260 // L_copy_8_bytes - exit label 1261 // 1262 void copy_32_bytes_forward(Register end_from, Register end_to, 1263 Register qword_count, Register to, 1264 Label& L_copy_32_bytes, Label& L_copy_8_bytes) { 1265 DEBUG_ONLY(__ stop("enter at entry label, not here")); 1266 Label L_loop; 1267 __ align(OptoLoopAlignment); 1268 __ BIND(L_loop); 1269 if(UseUnalignedLoadStores) { 1270 __ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24)); 1271 __ movdqu(Address(end_to, qword_count, Address::times_8, -24), xmm0); 1272 __ movdqu(xmm1, Address(end_from, qword_count, Address::times_8, - 8)); 1273 __ movdqu(Address(end_to, qword_count, Address::times_8, - 8), xmm1); 1274 1275 } else { 1276 __ movq(to, Address(end_from, qword_count, Address::times_8, -24)); 1277 __ movq(Address(end_to, qword_count, Address::times_8, -24), to); 1278 __ movq(to, Address(end_from, qword_count, Address::times_8, -16)); 1279 __ movq(Address(end_to, qword_count, Address::times_8, -16), to); 1280 __ movq(to, Address(end_from, qword_count, Address::times_8, - 8)); 1281 __ movq(Address(end_to, qword_count, Address::times_8, - 8), to); 1282 __ movq(to, Address(end_from, qword_count, Address::times_8, - 0)); 1283 __ movq(Address(end_to, qword_count, Address::times_8, - 0), to); 1284 } 1285 __ BIND(L_copy_32_bytes); 1286 __ addptr(qword_count, 4); 1287 __ jcc(Assembler::lessEqual, L_loop); 1288 __ subptr(qword_count, 4); 1289 __ jcc(Assembler::less, L_copy_8_bytes); // Copy trailing qwords 1290 } 1291 1292 1293 // Copy big chunks backward 1294 // 1295 // Inputs: 1296 // from - source arrays address 1297 // dest - destination array address 1298 // qword_count - 64-bits element count 1299 // to - scratch 1300 // L_copy_32_bytes - entry label 1301 // L_copy_8_bytes - exit label 1302 // 1303 void copy_32_bytes_backward(Register from, Register dest, 1304 Register qword_count, Register to, 1305 Label& L_copy_32_bytes, Label& L_copy_8_bytes) { 1306 DEBUG_ONLY(__ stop("enter at entry label, not here")); 1307 Label L_loop; 1308 __ align(OptoLoopAlignment); 1309 __ BIND(L_loop); 1310 if(UseUnalignedLoadStores) { 1311 __ movdqu(xmm0, Address(from, qword_count, Address::times_8, 16)); 1312 __ movdqu(Address(dest, qword_count, Address::times_8, 16), xmm0); 1313 __ movdqu(xmm1, Address(from, qword_count, Address::times_8, 0)); 1314 __ movdqu(Address(dest, qword_count, Address::times_8, 0), xmm1); 1315 1316 } else { 1317 __ movq(to, Address(from, qword_count, Address::times_8, 24)); 1318 __ movq(Address(dest, qword_count, Address::times_8, 24), to); 1319 __ movq(to, Address(from, qword_count, Address::times_8, 16)); 1320 __ movq(Address(dest, qword_count, Address::times_8, 16), to); 1321 __ movq(to, Address(from, qword_count, Address::times_8, 8)); 1322 __ movq(Address(dest, qword_count, Address::times_8, 8), to); 1323 __ movq(to, Address(from, qword_count, Address::times_8, 0)); 1324 __ movq(Address(dest, qword_count, Address::times_8, 0), to); 1325 } 1326 __ BIND(L_copy_32_bytes); 1327 __ subptr(qword_count, 4); 1328 __ jcc(Assembler::greaterEqual, L_loop); 1329 __ addptr(qword_count, 4); 1330 __ jcc(Assembler::greater, L_copy_8_bytes); // Copy trailing qwords 1331 } 1332 1333 1334 // Arguments: 1335 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1336 // ignored 1337 // name - stub name string 1338 // 1339 // Inputs: 1340 // c_rarg0 - source array address 1341 // c_rarg1 - destination array address 1342 // c_rarg2 - element count, treated as ssize_t, can be zero 1343 // 1344 // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries, 1345 // we let the hardware handle it. The one to eight bytes within words, 1346 // dwords or qwords that span cache line boundaries will still be loaded 1347 // and stored atomically. 1348 // 1349 // Side Effects: 1350 // disjoint_byte_copy_entry is set to the no-overlap entry point 1351 // used by generate_conjoint_byte_copy(). 1352 // 1353 address generate_disjoint_byte_copy(bool aligned, const char *name) { 1354 __ align(CodeEntryAlignment); 1355 StubCodeMark mark(this, "StubRoutines", name); 1356 address start = __ pc(); 1357 1358 Label L_copy_32_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes; 1359 Label L_copy_byte, L_exit; 1360 const Register from = rdi; // source array address 1361 const Register to = rsi; // destination array address 1362 const Register count = rdx; // elements count 1363 const Register byte_count = rcx; 1364 const Register qword_count = count; 1365 const Register end_from = from; // source array end address 1366 const Register end_to = to; // destination array end address 1367 // End pointers are inclusive, and if count is not zero they point 1368 // to the last unit copied: end_to[0] := end_from[0] 1369 1370 __ enter(); // required for proper stackwalking of RuntimeStub frame 1371 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1372 1373 disjoint_byte_copy_entry = __ pc(); 1374 BLOCK_COMMENT("Entry:"); 1375 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1376 1377 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1378 // r9 and r10 may be used to save non-volatile registers 1379 1380 // 'from', 'to' and 'count' are now valid 1381 __ movptr(byte_count, count); 1382 __ shrptr(count, 3); // count => qword_count 1383 1384 // Copy from low to high addresses. Use 'to' as scratch. 1385 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1386 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1387 __ negptr(qword_count); // make the count negative 1388 __ jmp(L_copy_32_bytes); 1389 1390 // Copy trailing qwords 1391 __ BIND(L_copy_8_bytes); 1392 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1393 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1394 __ increment(qword_count); 1395 __ jcc(Assembler::notZero, L_copy_8_bytes); 1396 1397 // Check for and copy trailing dword 1398 __ BIND(L_copy_4_bytes); 1399 __ testl(byte_count, 4); 1400 __ jccb(Assembler::zero, L_copy_2_bytes); 1401 __ movl(rax, Address(end_from, 8)); 1402 __ movl(Address(end_to, 8), rax); 1403 1404 __ addptr(end_from, 4); 1405 __ addptr(end_to, 4); 1406 1407 // Check for and copy trailing word 1408 __ BIND(L_copy_2_bytes); 1409 __ testl(byte_count, 2); 1410 __ jccb(Assembler::zero, L_copy_byte); 1411 __ movw(rax, Address(end_from, 8)); 1412 __ movw(Address(end_to, 8), rax); 1413 1414 __ addptr(end_from, 2); 1415 __ addptr(end_to, 2); 1416 1417 // Check for and copy trailing byte 1418 __ BIND(L_copy_byte); 1419 __ testl(byte_count, 1); 1420 __ jccb(Assembler::zero, L_exit); 1421 __ movb(rax, Address(end_from, 8)); 1422 __ movb(Address(end_to, 8), rax); 1423 1424 __ BIND(L_exit); 1425 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); 1426 restore_arg_regs(); 1427 __ xorptr(rax, rax); // return 0 1428 __ leave(); // required for proper stackwalking of RuntimeStub frame 1429 __ ret(0); 1430 1431 // Copy in 32-bytes chunks 1432 copy_32_bytes_forward(end_from, end_to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes); 1433 __ jmp(L_copy_4_bytes); 1434 1435 return start; 1436 } 1437 1438 // Arguments: 1439 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1440 // ignored 1441 // name - stub name string 1442 // 1443 // Inputs: 1444 // c_rarg0 - source array address 1445 // c_rarg1 - destination array address 1446 // c_rarg2 - element count, treated as ssize_t, can be zero 1447 // 1448 // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries, 1449 // we let the hardware handle it. The one to eight bytes within words, 1450 // dwords or qwords that span cache line boundaries will still be loaded 1451 // and stored atomically. 1452 // 1453 address generate_conjoint_byte_copy(bool aligned, const char *name) { 1454 __ align(CodeEntryAlignment); 1455 StubCodeMark mark(this, "StubRoutines", name); 1456 address start = __ pc(); 1457 1458 Label L_copy_32_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes; 1459 const Register from = rdi; // source array address 1460 const Register to = rsi; // destination array address 1461 const Register count = rdx; // elements count 1462 const Register byte_count = rcx; 1463 const Register qword_count = count; 1464 1465 __ enter(); // required for proper stackwalking of RuntimeStub frame 1466 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1467 1468 byte_copy_entry = __ pc(); 1469 BLOCK_COMMENT("Entry:"); 1470 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1471 1472 array_overlap_test(disjoint_byte_copy_entry, Address::times_1); 1473 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1474 // r9 and r10 may be used to save non-volatile registers 1475 1476 // 'from', 'to' and 'count' are now valid 1477 __ movptr(byte_count, count); 1478 __ shrptr(count, 3); // count => qword_count 1479 1480 // Copy from high to low addresses. 1481 1482 // Check for and copy trailing byte 1483 __ testl(byte_count, 1); 1484 __ jcc(Assembler::zero, L_copy_2_bytes); 1485 __ movb(rax, Address(from, byte_count, Address::times_1, -1)); 1486 __ movb(Address(to, byte_count, Address::times_1, -1), rax); 1487 __ decrement(byte_count); // Adjust for possible trailing word 1488 1489 // Check for and copy trailing word 1490 __ BIND(L_copy_2_bytes); 1491 __ testl(byte_count, 2); 1492 __ jcc(Assembler::zero, L_copy_4_bytes); 1493 __ movw(rax, Address(from, byte_count, Address::times_1, -2)); 1494 __ movw(Address(to, byte_count, Address::times_1, -2), rax); 1495 1496 // Check for and copy trailing dword 1497 __ BIND(L_copy_4_bytes); 1498 __ testl(byte_count, 4); 1499 __ jcc(Assembler::zero, L_copy_32_bytes); 1500 __ movl(rax, Address(from, qword_count, Address::times_8)); 1501 __ movl(Address(to, qword_count, Address::times_8), rax); 1502 __ jmp(L_copy_32_bytes); 1503 1504 // Copy trailing qwords 1505 __ BIND(L_copy_8_bytes); 1506 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 1507 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 1508 __ decrement(qword_count); 1509 __ jcc(Assembler::notZero, L_copy_8_bytes); 1510 1511 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); 1512 restore_arg_regs(); 1513 __ xorptr(rax, rax); // return 0 1514 __ leave(); // required for proper stackwalking of RuntimeStub frame 1515 __ ret(0); 1516 1517 // Copy in 32-bytes chunks 1518 copy_32_bytes_backward(from, to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes); 1519 1520 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); 1521 restore_arg_regs(); 1522 __ xorptr(rax, rax); // return 0 1523 __ leave(); // required for proper stackwalking of RuntimeStub frame 1524 __ ret(0); 1525 1526 return start; 1527 } 1528 1529 // Arguments: 1530 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1531 // ignored 1532 // name - stub name string 1533 // 1534 // Inputs: 1535 // c_rarg0 - source array address 1536 // c_rarg1 - destination array address 1537 // c_rarg2 - element count, treated as ssize_t, can be zero 1538 // 1539 // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we 1540 // let the hardware handle it. The two or four words within dwords 1541 // or qwords that span cache line boundaries will still be loaded 1542 // and stored atomically. 1543 // 1544 // Side Effects: 1545 // disjoint_short_copy_entry is set to the no-overlap entry point 1546 // used by generate_conjoint_short_copy(). 1547 // 1548 address generate_disjoint_short_copy(bool aligned, const char *name) { 1549 __ align(CodeEntryAlignment); 1550 StubCodeMark mark(this, "StubRoutines", name); 1551 address start = __ pc(); 1552 1553 Label L_copy_32_bytes, L_copy_8_bytes, L_copy_4_bytes,L_copy_2_bytes,L_exit; 1554 const Register from = rdi; // source array address 1555 const Register to = rsi; // destination array address 1556 const Register count = rdx; // elements count 1557 const Register word_count = rcx; 1558 const Register qword_count = count; 1559 const Register end_from = from; // source array end address 1560 const Register end_to = to; // destination array end address 1561 // End pointers are inclusive, and if count is not zero they point 1562 // to the last unit copied: end_to[0] := end_from[0] 1563 1564 __ enter(); // required for proper stackwalking of RuntimeStub frame 1565 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1566 1567 disjoint_short_copy_entry = __ pc(); 1568 BLOCK_COMMENT("Entry:"); 1569 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1570 1571 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1572 // r9 and r10 may be used to save non-volatile registers 1573 1574 // 'from', 'to' and 'count' are now valid 1575 __ movptr(word_count, count); 1576 __ shrptr(count, 2); // count => qword_count 1577 1578 // Copy from low to high addresses. Use 'to' as scratch. 1579 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1580 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1581 __ negptr(qword_count); 1582 __ jmp(L_copy_32_bytes); 1583 1584 // Copy trailing qwords 1585 __ BIND(L_copy_8_bytes); 1586 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1587 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1588 __ increment(qword_count); 1589 __ jcc(Assembler::notZero, L_copy_8_bytes); 1590 1591 // Original 'dest' is trashed, so we can't use it as a 1592 // base register for a possible trailing word copy 1593 1594 // Check for and copy trailing dword 1595 __ BIND(L_copy_4_bytes); 1596 __ testl(word_count, 2); 1597 __ jccb(Assembler::zero, L_copy_2_bytes); 1598 __ movl(rax, Address(end_from, 8)); 1599 __ movl(Address(end_to, 8), rax); 1600 1601 __ addptr(end_from, 4); 1602 __ addptr(end_to, 4); 1603 1604 // Check for and copy trailing word 1605 __ BIND(L_copy_2_bytes); 1606 __ testl(word_count, 1); 1607 __ jccb(Assembler::zero, L_exit); 1608 __ movw(rax, Address(end_from, 8)); 1609 __ movw(Address(end_to, 8), rax); 1610 1611 __ BIND(L_exit); 1612 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); 1613 restore_arg_regs(); 1614 __ xorptr(rax, rax); // return 0 1615 __ leave(); // required for proper stackwalking of RuntimeStub frame 1616 __ ret(0); 1617 1618 // Copy in 32-bytes chunks 1619 copy_32_bytes_forward(end_from, end_to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes); 1620 __ jmp(L_copy_4_bytes); 1621 1622 return start; 1623 } 1624 1625 // Arguments: 1626 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1627 // ignored 1628 // name - stub name string 1629 // 1630 // Inputs: 1631 // c_rarg0 - source array address 1632 // c_rarg1 - destination array address 1633 // c_rarg2 - element count, treated as ssize_t, can be zero 1634 // 1635 // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we 1636 // let the hardware handle it. The two or four words within dwords 1637 // or qwords that span cache line boundaries will still be loaded 1638 // and stored atomically. 1639 // 1640 address generate_conjoint_short_copy(bool aligned, const char *name) { 1641 __ align(CodeEntryAlignment); 1642 StubCodeMark mark(this, "StubRoutines", name); 1643 address start = __ pc(); 1644 1645 Label L_copy_32_bytes, L_copy_8_bytes, L_copy_4_bytes; 1646 const Register from = rdi; // source array address 1647 const Register to = rsi; // destination array address 1648 const Register count = rdx; // elements count 1649 const Register word_count = rcx; 1650 const Register qword_count = count; 1651 1652 __ enter(); // required for proper stackwalking of RuntimeStub frame 1653 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1654 1655 short_copy_entry = __ pc(); 1656 BLOCK_COMMENT("Entry:"); 1657 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1658 1659 array_overlap_test(disjoint_short_copy_entry, Address::times_2); 1660 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1661 // r9 and r10 may be used to save non-volatile registers 1662 1663 // 'from', 'to' and 'count' are now valid 1664 __ movptr(word_count, count); 1665 __ shrptr(count, 2); // count => qword_count 1666 1667 // Copy from high to low addresses. Use 'to' as scratch. 1668 1669 // Check for and copy trailing word 1670 __ testl(word_count, 1); 1671 __ jccb(Assembler::zero, L_copy_4_bytes); 1672 __ movw(rax, Address(from, word_count, Address::times_2, -2)); 1673 __ movw(Address(to, word_count, Address::times_2, -2), rax); 1674 1675 // Check for and copy trailing dword 1676 __ BIND(L_copy_4_bytes); 1677 __ testl(word_count, 2); 1678 __ jcc(Assembler::zero, L_copy_32_bytes); 1679 __ movl(rax, Address(from, qword_count, Address::times_8)); 1680 __ movl(Address(to, qword_count, Address::times_8), rax); 1681 __ jmp(L_copy_32_bytes); 1682 1683 // Copy trailing qwords 1684 __ BIND(L_copy_8_bytes); 1685 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 1686 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 1687 __ decrement(qword_count); 1688 __ jcc(Assembler::notZero, L_copy_8_bytes); 1689 1690 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); 1691 restore_arg_regs(); 1692 __ xorptr(rax, rax); // return 0 1693 __ leave(); // required for proper stackwalking of RuntimeStub frame 1694 __ ret(0); 1695 1696 // Copy in 32-bytes chunks 1697 copy_32_bytes_backward(from, to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes); 1698 1699 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); 1700 restore_arg_regs(); 1701 __ xorptr(rax, rax); // return 0 1702 __ leave(); // required for proper stackwalking of RuntimeStub frame 1703 __ ret(0); 1704 1705 return start; 1706 } 1707 1708 // Arguments: 1709 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1710 // ignored 1711 // is_oop - true => oop array, so generate store check code 1712 // name - stub name string 1713 // 1714 // Inputs: 1715 // c_rarg0 - source array address 1716 // c_rarg1 - destination array address 1717 // c_rarg2 - element count, treated as ssize_t, can be zero 1718 // 1719 // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let 1720 // the hardware handle it. The two dwords within qwords that span 1721 // cache line boundaries will still be loaded and stored atomicly. 1722 // 1723 // Side Effects: 1724 // disjoint_int_copy_entry is set to the no-overlap entry point 1725 // used by generate_conjoint_int_oop_copy(). 1726 // 1727 address generate_disjoint_int_oop_copy(bool aligned, bool is_oop, const char *name) { 1728 __ align(CodeEntryAlignment); 1729 StubCodeMark mark(this, "StubRoutines", name); 1730 address start = __ pc(); 1731 1732 Label L_copy_32_bytes, L_copy_8_bytes, L_copy_4_bytes, L_exit; 1733 const Register from = rdi; // source array address 1734 const Register to = rsi; // destination array address 1735 const Register count = rdx; // elements count 1736 const Register dword_count = rcx; 1737 const Register qword_count = count; 1738 const Register end_from = from; // source array end address 1739 const Register end_to = to; // destination array end address 1740 const Register saved_to = r11; // saved destination array address 1741 // End pointers are inclusive, and if count is not zero they point 1742 // to the last unit copied: end_to[0] := end_from[0] 1743 1744 __ enter(); // required for proper stackwalking of RuntimeStub frame 1745 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1746 1747 (is_oop ? disjoint_oop_copy_entry : disjoint_int_copy_entry) = __ pc(); 1748 1749 if (is_oop) { 1750 // no registers are destroyed by this call 1751 gen_write_ref_array_pre_barrier(/* dest */ c_rarg1, /* count */ c_rarg2); 1752 } 1753 1754 BLOCK_COMMENT("Entry:"); 1755 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1756 1757 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1758 // r9 and r10 may be used to save non-volatile registers 1759 1760 if (is_oop) { 1761 __ movq(saved_to, to); 1762 } 1763 1764 // 'from', 'to' and 'count' are now valid 1765 __ movptr(dword_count, count); 1766 __ shrptr(count, 1); // count => qword_count 1767 1768 // Copy from low to high addresses. Use 'to' as scratch. 1769 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1770 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1771 __ negptr(qword_count); 1772 __ jmp(L_copy_32_bytes); 1773 1774 // Copy trailing qwords 1775 __ BIND(L_copy_8_bytes); 1776 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1777 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1778 __ increment(qword_count); 1779 __ jcc(Assembler::notZero, L_copy_8_bytes); 1780 1781 // Check for and copy trailing dword 1782 __ BIND(L_copy_4_bytes); 1783 __ testl(dword_count, 1); // Only byte test since the value is 0 or 1 1784 __ jccb(Assembler::zero, L_exit); 1785 __ movl(rax, Address(end_from, 8)); 1786 __ movl(Address(end_to, 8), rax); 1787 1788 __ BIND(L_exit); 1789 if (is_oop) { 1790 __ leaq(end_to, Address(saved_to, dword_count, Address::times_4, -4)); 1791 gen_write_ref_array_post_barrier(saved_to, end_to, rax); 1792 } 1793 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); 1794 restore_arg_regs(); 1795 __ xorptr(rax, rax); // return 0 1796 __ leave(); // required for proper stackwalking of RuntimeStub frame 1797 __ ret(0); 1798 1799 // Copy 32-bytes chunks 1800 copy_32_bytes_forward(end_from, end_to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes); 1801 __ jmp(L_copy_4_bytes); 1802 1803 return start; 1804 } 1805 1806 // Arguments: 1807 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary 1808 // ignored 1809 // is_oop - true => oop array, so generate store check code 1810 // name - stub name string 1811 // 1812 // Inputs: 1813 // c_rarg0 - source array address 1814 // c_rarg1 - destination array address 1815 // c_rarg2 - element count, treated as ssize_t, can be zero 1816 // 1817 // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let 1818 // the hardware handle it. The two dwords within qwords that span 1819 // cache line boundaries will still be loaded and stored atomicly. 1820 // 1821 address generate_conjoint_int_oop_copy(bool aligned, bool is_oop, const char *name) { 1822 __ align(CodeEntryAlignment); 1823 StubCodeMark mark(this, "StubRoutines", name); 1824 address start = __ pc(); 1825 1826 Label L_copy_32_bytes, L_copy_8_bytes, L_copy_2_bytes, L_exit; 1827 const Register from = rdi; // source array address 1828 const Register to = rsi; // destination array address 1829 const Register count = rdx; // elements count 1830 const Register dword_count = rcx; 1831 const Register qword_count = count; 1832 1833 __ enter(); // required for proper stackwalking of RuntimeStub frame 1834 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1835 1836 if (is_oop) { 1837 // no registers are destroyed by this call 1838 gen_write_ref_array_pre_barrier(/* dest */ c_rarg1, /* count */ c_rarg2); 1839 } 1840 1841 (is_oop ? oop_copy_entry : int_copy_entry) = __ pc(); 1842 BLOCK_COMMENT("Entry:"); 1843 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1844 1845 array_overlap_test(is_oop ? disjoint_oop_copy_entry : disjoint_int_copy_entry, 1846 Address::times_4); 1847 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1848 // r9 and r10 may be used to save non-volatile registers 1849 1850 assert_clean_int(count, rax); // Make sure 'count' is clean int. 1851 // 'from', 'to' and 'count' are now valid 1852 __ movptr(dword_count, count); 1853 __ shrptr(count, 1); // count => qword_count 1854 1855 // Copy from high to low addresses. Use 'to' as scratch. 1856 1857 // Check for and copy trailing dword 1858 __ testl(dword_count, 1); 1859 __ jcc(Assembler::zero, L_copy_32_bytes); 1860 __ movl(rax, Address(from, dword_count, Address::times_4, -4)); 1861 __ movl(Address(to, dword_count, Address::times_4, -4), rax); 1862 __ jmp(L_copy_32_bytes); 1863 1864 // Copy trailing qwords 1865 __ BIND(L_copy_8_bytes); 1866 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 1867 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 1868 __ decrement(qword_count); 1869 __ jcc(Assembler::notZero, L_copy_8_bytes); 1870 1871 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); 1872 if (is_oop) { 1873 __ jmp(L_exit); 1874 } 1875 restore_arg_regs(); 1876 __ xorptr(rax, rax); // return 0 1877 __ leave(); // required for proper stackwalking of RuntimeStub frame 1878 __ ret(0); 1879 1880 // Copy in 32-bytes chunks 1881 copy_32_bytes_backward(from, to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes); 1882 1883 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); 1884 __ bind(L_exit); 1885 if (is_oop) { 1886 Register end_to = rdx; 1887 __ leaq(end_to, Address(to, dword_count, Address::times_4, -4)); 1888 gen_write_ref_array_post_barrier(to, end_to, rax); 1889 } 1890 restore_arg_regs(); 1891 __ xorptr(rax, rax); // return 0 1892 __ leave(); // required for proper stackwalking of RuntimeStub frame 1893 __ ret(0); 1894 1895 return start; 1896 } 1897 1898 // Arguments: 1899 // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes 1900 // ignored 1901 // is_oop - true => oop array, so generate store check code 1902 // name - stub name string 1903 // 1904 // Inputs: 1905 // c_rarg0 - source array address 1906 // c_rarg1 - destination array address 1907 // c_rarg2 - element count, treated as ssize_t, can be zero 1908 // 1909 // Side Effects: 1910 // disjoint_oop_copy_entry or disjoint_long_copy_entry is set to the 1911 // no-overlap entry point used by generate_conjoint_long_oop_copy(). 1912 // 1913 address generate_disjoint_long_oop_copy(bool aligned, bool is_oop, const char *name) { 1914 __ align(CodeEntryAlignment); 1915 StubCodeMark mark(this, "StubRoutines", name); 1916 address start = __ pc(); 1917 1918 Label L_copy_32_bytes, L_copy_8_bytes, L_exit; 1919 const Register from = rdi; // source array address 1920 const Register to = rsi; // destination array address 1921 const Register qword_count = rdx; // elements count 1922 const Register end_from = from; // source array end address 1923 const Register end_to = rcx; // destination array end address 1924 const Register saved_to = to; 1925 // End pointers are inclusive, and if count is not zero they point 1926 // to the last unit copied: end_to[0] := end_from[0] 1927 1928 __ enter(); // required for proper stackwalking of RuntimeStub frame 1929 // Save no-overlap entry point for generate_conjoint_long_oop_copy() 1930 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 1931 1932 if (is_oop) { 1933 disjoint_oop_copy_entry = __ pc(); 1934 // no registers are destroyed by this call 1935 gen_write_ref_array_pre_barrier(/* dest */ c_rarg1, /* count */ c_rarg2); 1936 } else { 1937 disjoint_long_copy_entry = __ pc(); 1938 } 1939 BLOCK_COMMENT("Entry:"); 1940 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 1941 1942 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 1943 // r9 and r10 may be used to save non-volatile registers 1944 1945 // 'from', 'to' and 'qword_count' are now valid 1946 1947 // Copy from low to high addresses. Use 'to' as scratch. 1948 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 1949 __ lea(end_to, Address(to, qword_count, Address::times_8, -8)); 1950 __ negptr(qword_count); 1951 __ jmp(L_copy_32_bytes); 1952 1953 // Copy trailing qwords 1954 __ BIND(L_copy_8_bytes); 1955 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); 1956 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); 1957 __ increment(qword_count); 1958 __ jcc(Assembler::notZero, L_copy_8_bytes); 1959 1960 if (is_oop) { 1961 __ jmp(L_exit); 1962 } else { 1963 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); 1964 restore_arg_regs(); 1965 __ xorptr(rax, rax); // return 0 1966 __ leave(); // required for proper stackwalking of RuntimeStub frame 1967 __ ret(0); 1968 } 1969 1970 // Copy 64-byte chunks 1971 copy_32_bytes_forward(end_from, end_to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes); 1972 1973 if (is_oop) { 1974 __ BIND(L_exit); 1975 gen_write_ref_array_post_barrier(saved_to, end_to, rax); 1976 inc_counter_np(SharedRuntime::_oop_array_copy_ctr); 1977 } else { 1978 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); 1979 } 1980 restore_arg_regs(); 1981 __ xorptr(rax, rax); // return 0 1982 __ leave(); // required for proper stackwalking of RuntimeStub frame 1983 __ ret(0); 1984 1985 return start; 1986 } 1987 1988 // Arguments: 1989 // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes 1990 // ignored 1991 // is_oop - true => oop array, so generate store check code 1992 // name - stub name string 1993 // 1994 // Inputs: 1995 // c_rarg0 - source array address 1996 // c_rarg1 - destination array address 1997 // c_rarg2 - element count, treated as ssize_t, can be zero 1998 // 1999 address generate_conjoint_long_oop_copy(bool aligned, bool is_oop, const char *name) { 2000 __ align(CodeEntryAlignment); 2001 StubCodeMark mark(this, "StubRoutines", name); 2002 address start = __ pc(); 2003 2004 Label L_copy_32_bytes, L_copy_8_bytes, L_exit; 2005 const Register from = rdi; // source array address 2006 const Register to = rsi; // destination array address 2007 const Register qword_count = rdx; // elements count 2008 const Register saved_count = rcx; 2009 2010 __ enter(); // required for proper stackwalking of RuntimeStub frame 2011 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 2012 2013 address disjoint_copy_entry = NULL; 2014 if (is_oop) { 2015 assert(!UseCompressedOops, "shouldn't be called for compressed oops"); 2016 disjoint_copy_entry = disjoint_oop_copy_entry; 2017 oop_copy_entry = __ pc(); 2018 array_overlap_test(disjoint_oop_copy_entry, Address::times_8); 2019 } else { 2020 disjoint_copy_entry = disjoint_long_copy_entry; 2021 long_copy_entry = __ pc(); 2022 array_overlap_test(disjoint_long_copy_entry, Address::times_8); 2023 } 2024 BLOCK_COMMENT("Entry:"); 2025 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 2026 2027 array_overlap_test(disjoint_copy_entry, Address::times_8); 2028 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 2029 // r9 and r10 may be used to save non-volatile registers 2030 2031 // 'from', 'to' and 'qword_count' are now valid 2032 2033 if (is_oop) { 2034 // Save to and count for store barrier 2035 __ movptr(saved_count, qword_count); 2036 // No registers are destroyed by this call 2037 gen_write_ref_array_pre_barrier(to, saved_count); 2038 } 2039 2040 __ jmp(L_copy_32_bytes); 2041 2042 // Copy trailing qwords 2043 __ BIND(L_copy_8_bytes); 2044 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); 2045 __ movq(Address(to, qword_count, Address::times_8, -8), rax); 2046 __ decrement(qword_count); 2047 __ jcc(Assembler::notZero, L_copy_8_bytes); 2048 2049 if (is_oop) { 2050 __ jmp(L_exit); 2051 } else { 2052 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); 2053 restore_arg_regs(); 2054 __ xorptr(rax, rax); // return 0 2055 __ leave(); // required for proper stackwalking of RuntimeStub frame 2056 __ ret(0); 2057 } 2058 2059 // Copy in 32-bytes chunks 2060 copy_32_bytes_backward(from, to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes); 2061 2062 if (is_oop) { 2063 __ BIND(L_exit); 2064 __ lea(rcx, Address(to, saved_count, Address::times_8, -8)); 2065 gen_write_ref_array_post_barrier(to, rcx, rax); 2066 inc_counter_np(SharedRuntime::_oop_array_copy_ctr); 2067 } else { 2068 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); 2069 } 2070 restore_arg_regs(); 2071 __ xorptr(rax, rax); // return 0 2072 __ leave(); // required for proper stackwalking of RuntimeStub frame 2073 __ ret(0); 2074 2075 return start; 2076 } 2077 2078 2079 // Helper for generating a dynamic type check. 2080 // Smashes no registers. 2081 void generate_type_check(Register sub_klass, 2082 Register super_check_offset, 2083 Register super_klass, 2084 Label& L_success) { 2085 assert_different_registers(sub_klass, super_check_offset, super_klass); 2086 2087 BLOCK_COMMENT("type_check:"); 2088 2089 Label L_miss; 2090 2091 __ check_klass_subtype_fast_path(sub_klass, super_klass, noreg, &L_success, &L_miss, NULL, 2092 super_check_offset); 2093 __ check_klass_subtype_slow_path(sub_klass, super_klass, noreg, noreg, &L_success, NULL); 2094 2095 // Fall through on failure! 2096 __ BIND(L_miss); 2097 } 2098 2099 // 2100 // Generate checkcasting array copy stub 2101 // 2102 // Input: 2103 // c_rarg0 - source array address 2104 // c_rarg1 - destination array address 2105 // c_rarg2 - element count, treated as ssize_t, can be zero 2106 // c_rarg3 - size_t ckoff (super_check_offset) 2107 // not Win64 2108 // c_rarg4 - oop ckval (super_klass) 2109 // Win64 2110 // rsp+40 - oop ckval (super_klass) 2111 // 2112 // Output: 2113 // rax == 0 - success 2114 // rax == -1^K - failure, where K is partial transfer count 2115 // 2116 address generate_checkcast_copy(const char *name) { 2117 2118 Label L_load_element, L_store_element, L_do_card_marks, L_done; 2119 2120 // Input registers (after setup_arg_regs) 2121 const Register from = rdi; // source array address 2122 const Register to = rsi; // destination array address 2123 const Register length = rdx; // elements count 2124 const Register ckoff = rcx; // super_check_offset 2125 const Register ckval = r8; // super_klass 2126 2127 // Registers used as temps (r13, r14 are save-on-entry) 2128 const Register end_from = from; // source array end address 2129 const Register end_to = r13; // destination array end address 2130 const Register count = rdx; // -(count_remaining) 2131 const Register r14_length = r14; // saved copy of length 2132 // End pointers are inclusive, and if length is not zero they point 2133 // to the last unit copied: end_to[0] := end_from[0] 2134 2135 const Register rax_oop = rax; // actual oop copied 2136 const Register r11_klass = r11; // oop._klass 2137 2138 //--------------------------------------------------------------- 2139 // Assembler stub will be used for this call to arraycopy 2140 // if the two arrays are subtypes of Object[] but the 2141 // destination array type is not equal to or a supertype 2142 // of the source type. Each element must be separately 2143 // checked. 2144 2145 __ align(CodeEntryAlignment); 2146 StubCodeMark mark(this, "StubRoutines", name); 2147 address start = __ pc(); 2148 2149 __ enter(); // required for proper stackwalking of RuntimeStub frame 2150 2151 checkcast_copy_entry = __ pc(); 2152 BLOCK_COMMENT("Entry:"); 2153 2154 #ifdef ASSERT 2155 // caller guarantees that the arrays really are different 2156 // otherwise, we would have to make conjoint checks 2157 { Label L; 2158 array_overlap_test(L, TIMES_OOP); 2159 __ stop("checkcast_copy within a single array"); 2160 __ bind(L); 2161 } 2162 #endif //ASSERT 2163 2164 // allocate spill slots for r13, r14 2165 enum { 2166 saved_r13_offset, 2167 saved_r14_offset, 2168 saved_rbp_offset, 2169 saved_rip_offset, 2170 saved_rarg0_offset 2171 }; 2172 __ subptr(rsp, saved_rbp_offset * wordSize); 2173 __ movptr(Address(rsp, saved_r13_offset * wordSize), r13); 2174 __ movptr(Address(rsp, saved_r14_offset * wordSize), r14); 2175 setup_arg_regs(4); // from => rdi, to => rsi, length => rdx 2176 // ckoff => rcx, ckval => r8 2177 // r9 and r10 may be used to save non-volatile registers 2178 #ifdef _WIN64 2179 // last argument (#4) is on stack on Win64 2180 const int ckval_offset = saved_rarg0_offset + 4; 2181 __ movptr(ckval, Address(rsp, ckval_offset * wordSize)); 2182 #endif 2183 2184 // check that int operands are properly extended to size_t 2185 assert_clean_int(length, rax); 2186 assert_clean_int(ckoff, rax); 2187 2188 #ifdef ASSERT 2189 BLOCK_COMMENT("assert consistent ckoff/ckval"); 2190 // The ckoff and ckval must be mutually consistent, 2191 // even though caller generates both. 2192 { Label L; 2193 int sco_offset = (klassOopDesc::header_size() * HeapWordSize + 2194 Klass::super_check_offset_offset_in_bytes()); 2195 __ cmpl(ckoff, Address(ckval, sco_offset)); 2196 __ jcc(Assembler::equal, L); 2197 __ stop("super_check_offset inconsistent"); 2198 __ bind(L); 2199 } 2200 #endif //ASSERT 2201 2202 // Loop-invariant addresses. They are exclusive end pointers. 2203 Address end_from_addr(from, length, TIMES_OOP, 0); 2204 Address end_to_addr(to, length, TIMES_OOP, 0); 2205 // Loop-variant addresses. They assume post-incremented count < 0. 2206 Address from_element_addr(end_from, count, TIMES_OOP, 0); 2207 Address to_element_addr(end_to, count, TIMES_OOP, 0); 2208 2209 gen_write_ref_array_pre_barrier(to, count); 2210 2211 // Copy from low to high addresses, indexed from the end of each array. 2212 __ lea(end_from, end_from_addr); 2213 __ lea(end_to, end_to_addr); 2214 __ movptr(r14_length, length); // save a copy of the length 2215 assert(length == count, ""); // else fix next line: 2216 __ negptr(count); // negate and test the length 2217 __ jcc(Assembler::notZero, L_load_element); 2218 2219 // Empty array: Nothing to do. 2220 __ xorptr(rax, rax); // return 0 on (trivial) success 2221 __ jmp(L_done); 2222 2223 // ======== begin loop ======== 2224 // (Loop is rotated; its entry is L_load_element.) 2225 // Loop control: 2226 // for (count = -count; count != 0; count++) 2227 // Base pointers src, dst are biased by 8*(count-1),to last element. 2228 __ align(OptoLoopAlignment); 2229 2230 __ BIND(L_store_element); 2231 __ store_heap_oop(to_element_addr, rax_oop); // store the oop 2232 __ increment(count); // increment the count toward zero 2233 __ jcc(Assembler::zero, L_do_card_marks); 2234 2235 // ======== loop entry is here ======== 2236 __ BIND(L_load_element); 2237 __ load_heap_oop(rax_oop, from_element_addr); // load the oop 2238 __ testptr(rax_oop, rax_oop); 2239 __ jcc(Assembler::zero, L_store_element); 2240 2241 __ load_klass(r11_klass, rax_oop);// query the object klass 2242 generate_type_check(r11_klass, ckoff, ckval, L_store_element); 2243 // ======== end loop ======== 2244 2245 // It was a real error; we must depend on the caller to finish the job. 2246 // Register rdx = -1 * number of *remaining* oops, r14 = *total* oops. 2247 // Emit GC store barriers for the oops we have copied (r14 + rdx), 2248 // and report their number to the caller. 2249 assert_different_registers(rax, r14_length, count, to, end_to, rcx); 2250 __ lea(end_to, to_element_addr); 2251 __ addptr(end_to, -heapOopSize); // make an inclusive end pointer 2252 gen_write_ref_array_post_barrier(to, end_to, rscratch1); 2253 __ movptr(rax, r14_length); // original oops 2254 __ addptr(rax, count); // K = (original - remaining) oops 2255 __ notptr(rax); // report (-1^K) to caller 2256 __ jmp(L_done); 2257 2258 // Come here on success only. 2259 __ BIND(L_do_card_marks); 2260 __ addptr(end_to, -heapOopSize); // make an inclusive end pointer 2261 gen_write_ref_array_post_barrier(to, end_to, rscratch1); 2262 __ xorptr(rax, rax); // return 0 on success 2263 2264 // Common exit point (success or failure). 2265 __ BIND(L_done); 2266 __ movptr(r13, Address(rsp, saved_r13_offset * wordSize)); 2267 __ movptr(r14, Address(rsp, saved_r14_offset * wordSize)); 2268 inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr); 2269 restore_arg_regs(); 2270 __ leave(); // required for proper stackwalking of RuntimeStub frame 2271 __ ret(0); 2272 2273 return start; 2274 } 2275 2276 // 2277 // Generate 'unsafe' array copy stub 2278 // Though just as safe as the other stubs, it takes an unscaled 2279 // size_t argument instead of an element count. 2280 // 2281 // Input: 2282 // c_rarg0 - source array address 2283 // c_rarg1 - destination array address 2284 // c_rarg2 - byte count, treated as ssize_t, can be zero 2285 // 2286 // Examines the alignment of the operands and dispatches 2287 // to a long, int, short, or byte copy loop. 2288 // 2289 address generate_unsafe_copy(const char *name) { 2290 2291 Label L_long_aligned, L_int_aligned, L_short_aligned; 2292 2293 // Input registers (before setup_arg_regs) 2294 const Register from = c_rarg0; // source array address 2295 const Register to = c_rarg1; // destination array address 2296 const Register size = c_rarg2; // byte count (size_t) 2297 2298 // Register used as a temp 2299 const Register bits = rax; // test copy of low bits 2300 2301 __ align(CodeEntryAlignment); 2302 StubCodeMark mark(this, "StubRoutines", name); 2303 address start = __ pc(); 2304 2305 __ enter(); // required for proper stackwalking of RuntimeStub frame 2306 2307 // bump this on entry, not on exit: 2308 inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr); 2309 2310 __ mov(bits, from); 2311 __ orptr(bits, to); 2312 __ orptr(bits, size); 2313 2314 __ testb(bits, BytesPerLong-1); 2315 __ jccb(Assembler::zero, L_long_aligned); 2316 2317 __ testb(bits, BytesPerInt-1); 2318 __ jccb(Assembler::zero, L_int_aligned); 2319 2320 __ testb(bits, BytesPerShort-1); 2321 __ jump_cc(Assembler::notZero, RuntimeAddress(byte_copy_entry)); 2322 2323 __ BIND(L_short_aligned); 2324 __ shrptr(size, LogBytesPerShort); // size => short_count 2325 __ jump(RuntimeAddress(short_copy_entry)); 2326 2327 __ BIND(L_int_aligned); 2328 __ shrptr(size, LogBytesPerInt); // size => int_count 2329 __ jump(RuntimeAddress(int_copy_entry)); 2330 2331 __ BIND(L_long_aligned); 2332 __ shrptr(size, LogBytesPerLong); // size => qword_count 2333 __ jump(RuntimeAddress(long_copy_entry)); 2334 2335 return start; 2336 } 2337 2338 // Perform range checks on the proposed arraycopy. 2339 // Kills temp, but nothing else. 2340 // Also, clean the sign bits of src_pos and dst_pos. 2341 void arraycopy_range_checks(Register src, // source array oop (c_rarg0) 2342 Register src_pos, // source position (c_rarg1) 2343 Register dst, // destination array oo (c_rarg2) 2344 Register dst_pos, // destination position (c_rarg3) 2345 Register length, 2346 Register temp, 2347 Label& L_failed) { 2348 BLOCK_COMMENT("arraycopy_range_checks:"); 2349 2350 // if (src_pos + length > arrayOop(src)->length()) FAIL; 2351 __ movl(temp, length); 2352 __ addl(temp, src_pos); // src_pos + length 2353 __ cmpl(temp, Address(src, arrayOopDesc::length_offset_in_bytes())); 2354 __ jcc(Assembler::above, L_failed); 2355 2356 // if (dst_pos + length > arrayOop(dst)->length()) FAIL; 2357 __ movl(temp, length); 2358 __ addl(temp, dst_pos); // dst_pos + length 2359 __ cmpl(temp, Address(dst, arrayOopDesc::length_offset_in_bytes())); 2360 __ jcc(Assembler::above, L_failed); 2361 2362 // Have to clean up high 32-bits of 'src_pos' and 'dst_pos'. 2363 // Move with sign extension can be used since they are positive. 2364 __ movslq(src_pos, src_pos); 2365 __ movslq(dst_pos, dst_pos); 2366 2367 BLOCK_COMMENT("arraycopy_range_checks done"); 2368 } 2369 2370 // 2371 // Generate generic array copy stubs 2372 // 2373 // Input: 2374 // c_rarg0 - src oop 2375 // c_rarg1 - src_pos (32-bits) 2376 // c_rarg2 - dst oop 2377 // c_rarg3 - dst_pos (32-bits) 2378 // not Win64 2379 // c_rarg4 - element count (32-bits) 2380 // Win64 2381 // rsp+40 - element count (32-bits) 2382 // 2383 // Output: 2384 // rax == 0 - success 2385 // rax == -1^K - failure, where K is partial transfer count 2386 // 2387 address generate_generic_copy(const char *name) { 2388 2389 Label L_failed, L_failed_0, L_objArray; 2390 Label L_copy_bytes, L_copy_shorts, L_copy_ints, L_copy_longs; 2391 2392 // Input registers 2393 const Register src = c_rarg0; // source array oop 2394 const Register src_pos = c_rarg1; // source position 2395 const Register dst = c_rarg2; // destination array oop 2396 const Register dst_pos = c_rarg3; // destination position 2397 // elements count is on stack on Win64 2398 #ifdef _WIN64 2399 #define C_RARG4 Address(rsp, 6 * wordSize) 2400 #else 2401 #define C_RARG4 c_rarg4 2402 #endif 2403 2404 { int modulus = CodeEntryAlignment; 2405 int target = modulus - 5; // 5 = sizeof jmp(L_failed) 2406 int advance = target - (__ offset() % modulus); 2407 if (advance < 0) advance += modulus; 2408 if (advance > 0) __ nop(advance); 2409 } 2410 StubCodeMark mark(this, "StubRoutines", name); 2411 2412 // Short-hop target to L_failed. Makes for denser prologue code. 2413 __ BIND(L_failed_0); 2414 __ jmp(L_failed); 2415 assert(__ offset() % CodeEntryAlignment == 0, "no further alignment needed"); 2416 2417 __ align(CodeEntryAlignment); 2418 address start = __ pc(); 2419 2420 __ enter(); // required for proper stackwalking of RuntimeStub frame 2421 2422 // bump this on entry, not on exit: 2423 inc_counter_np(SharedRuntime::_generic_array_copy_ctr); 2424 2425 //----------------------------------------------------------------------- 2426 // Assembler stub will be used for this call to arraycopy 2427 // if the following conditions are met: 2428 // 2429 // (1) src and dst must not be null. 2430 // (2) src_pos must not be negative. 2431 // (3) dst_pos must not be negative. 2432 // (4) length must not be negative. 2433 // (5) src klass and dst klass should be the same and not NULL. 2434 // (6) src and dst should be arrays. 2435 // (7) src_pos + length must not exceed length of src. 2436 // (8) dst_pos + length must not exceed length of dst. 2437 // 2438 2439 // if (src == NULL) return -1; 2440 __ testptr(src, src); // src oop 2441 size_t j1off = __ offset(); 2442 __ jccb(Assembler::zero, L_failed_0); 2443 2444 // if (src_pos < 0) return -1; 2445 __ testl(src_pos, src_pos); // src_pos (32-bits) 2446 __ jccb(Assembler::negative, L_failed_0); 2447 2448 // if (dst == NULL) return -1; 2449 __ testptr(dst, dst); // dst oop 2450 __ jccb(Assembler::zero, L_failed_0); 2451 2452 // if (dst_pos < 0) return -1; 2453 __ testl(dst_pos, dst_pos); // dst_pos (32-bits) 2454 size_t j4off = __ offset(); 2455 __ jccb(Assembler::negative, L_failed_0); 2456 2457 // The first four tests are very dense code, 2458 // but not quite dense enough to put four 2459 // jumps in a 16-byte instruction fetch buffer. 2460 // That's good, because some branch predicters 2461 // do not like jumps so close together. 2462 // Make sure of this. 2463 guarantee(((j1off ^ j4off) & ~15) != 0, "I$ line of 1st & 4th jumps"); 2464 2465 // registers used as temp 2466 const Register r11_length = r11; // elements count to copy 2467 const Register r10_src_klass = r10; // array klass 2468 const Register r9_dst_klass = r9; // dest array klass 2469 2470 // if (length < 0) return -1; 2471 __ movl(r11_length, C_RARG4); // length (elements count, 32-bits value) 2472 __ testl(r11_length, r11_length); 2473 __ jccb(Assembler::negative, L_failed_0); 2474 2475 __ load_klass(r10_src_klass, src); 2476 #ifdef ASSERT 2477 // assert(src->klass() != NULL); 2478 BLOCK_COMMENT("assert klasses not null"); 2479 { Label L1, L2; 2480 __ testptr(r10_src_klass, r10_src_klass); 2481 __ jcc(Assembler::notZero, L2); // it is broken if klass is NULL 2482 __ bind(L1); 2483 __ stop("broken null klass"); 2484 __ bind(L2); 2485 __ load_klass(r9_dst_klass, dst); 2486 __ cmpq(r9_dst_klass, 0); 2487 __ jcc(Assembler::equal, L1); // this would be broken also 2488 BLOCK_COMMENT("assert done"); 2489 } 2490 #endif 2491 2492 // Load layout helper (32-bits) 2493 // 2494 // |array_tag| | header_size | element_type | |log2_element_size| 2495 // 32 30 24 16 8 2 0 2496 // 2497 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0 2498 // 2499 2500 int lh_offset = klassOopDesc::header_size() * HeapWordSize + 2501 Klass::layout_helper_offset_in_bytes(); 2502 2503 const Register rax_lh = rax; // layout helper 2504 2505 __ movl(rax_lh, Address(r10_src_klass, lh_offset)); 2506 2507 // Handle objArrays completely differently... 2508 jint objArray_lh = Klass::array_layout_helper(T_OBJECT); 2509 __ cmpl(rax_lh, objArray_lh); 2510 __ jcc(Assembler::equal, L_objArray); 2511 2512 // if (src->klass() != dst->klass()) return -1; 2513 __ load_klass(r9_dst_klass, dst); 2514 __ cmpq(r10_src_klass, r9_dst_klass); 2515 __ jcc(Assembler::notEqual, L_failed); 2516 2517 // if (!src->is_Array()) return -1; 2518 __ cmpl(rax_lh, Klass::_lh_neutral_value); 2519 __ jcc(Assembler::greaterEqual, L_failed); 2520 2521 // At this point, it is known to be a typeArray (array_tag 0x3). 2522 #ifdef ASSERT 2523 { Label L; 2524 __ cmpl(rax_lh, (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift)); 2525 __ jcc(Assembler::greaterEqual, L); 2526 __ stop("must be a primitive array"); 2527 __ bind(L); 2528 } 2529 #endif 2530 2531 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2532 r10, L_failed); 2533 2534 // typeArrayKlass 2535 // 2536 // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize); 2537 // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize); 2538 // 2539 2540 const Register r10_offset = r10; // array offset 2541 const Register rax_elsize = rax_lh; // element size 2542 2543 __ movl(r10_offset, rax_lh); 2544 __ shrl(r10_offset, Klass::_lh_header_size_shift); 2545 __ andptr(r10_offset, Klass::_lh_header_size_mask); // array_offset 2546 __ addptr(src, r10_offset); // src array offset 2547 __ addptr(dst, r10_offset); // dst array offset 2548 BLOCK_COMMENT("choose copy loop based on element size"); 2549 __ andl(rax_lh, Klass::_lh_log2_element_size_mask); // rax_lh -> rax_elsize 2550 2551 // next registers should be set before the jump to corresponding stub 2552 const Register from = c_rarg0; // source array address 2553 const Register to = c_rarg1; // destination array address 2554 const Register count = c_rarg2; // elements count 2555 2556 // 'from', 'to', 'count' registers should be set in such order 2557 // since they are the same as 'src', 'src_pos', 'dst'. 2558 2559 __ BIND(L_copy_bytes); 2560 __ cmpl(rax_elsize, 0); 2561 __ jccb(Assembler::notEqual, L_copy_shorts); 2562 __ lea(from, Address(src, src_pos, Address::times_1, 0));// src_addr 2563 __ lea(to, Address(dst, dst_pos, Address::times_1, 0));// dst_addr 2564 __ movl2ptr(count, r11_length); // length 2565 __ jump(RuntimeAddress(byte_copy_entry)); 2566 2567 __ BIND(L_copy_shorts); 2568 __ cmpl(rax_elsize, LogBytesPerShort); 2569 __ jccb(Assembler::notEqual, L_copy_ints); 2570 __ lea(from, Address(src, src_pos, Address::times_2, 0));// src_addr 2571 __ lea(to, Address(dst, dst_pos, Address::times_2, 0));// dst_addr 2572 __ movl2ptr(count, r11_length); // length 2573 __ jump(RuntimeAddress(short_copy_entry)); 2574 2575 __ BIND(L_copy_ints); 2576 __ cmpl(rax_elsize, LogBytesPerInt); 2577 __ jccb(Assembler::notEqual, L_copy_longs); 2578 __ lea(from, Address(src, src_pos, Address::times_4, 0));// src_addr 2579 __ lea(to, Address(dst, dst_pos, Address::times_4, 0));// dst_addr 2580 __ movl2ptr(count, r11_length); // length 2581 __ jump(RuntimeAddress(int_copy_entry)); 2582 2583 __ BIND(L_copy_longs); 2584 #ifdef ASSERT 2585 { Label L; 2586 __ cmpl(rax_elsize, LogBytesPerLong); 2587 __ jcc(Assembler::equal, L); 2588 __ stop("must be long copy, but elsize is wrong"); 2589 __ bind(L); 2590 } 2591 #endif 2592 __ lea(from, Address(src, src_pos, Address::times_8, 0));// src_addr 2593 __ lea(to, Address(dst, dst_pos, Address::times_8, 0));// dst_addr 2594 __ movl2ptr(count, r11_length); // length 2595 __ jump(RuntimeAddress(long_copy_entry)); 2596 2597 // objArrayKlass 2598 __ BIND(L_objArray); 2599 // live at this point: r10_src_klass, src[_pos], dst[_pos] 2600 2601 Label L_plain_copy, L_checkcast_copy; 2602 // test array classes for subtyping 2603 __ load_klass(r9_dst_klass, dst); 2604 __ cmpq(r10_src_klass, r9_dst_klass); // usual case is exact equality 2605 __ jcc(Assembler::notEqual, L_checkcast_copy); 2606 2607 // Identically typed arrays can be copied without element-wise checks. 2608 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2609 r10, L_failed); 2610 2611 __ lea(from, Address(src, src_pos, TIMES_OOP, 2612 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // src_addr 2613 __ lea(to, Address(dst, dst_pos, TIMES_OOP, 2614 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // dst_addr 2615 __ movl2ptr(count, r11_length); // length 2616 __ BIND(L_plain_copy); 2617 __ jump(RuntimeAddress(oop_copy_entry)); 2618 2619 __ BIND(L_checkcast_copy); 2620 // live at this point: r10_src_klass, !r11_length 2621 { 2622 // assert(r11_length == C_RARG4); // will reload from here 2623 Register r11_dst_klass = r11; 2624 __ load_klass(r11_dst_klass, dst); 2625 2626 // Before looking at dst.length, make sure dst is also an objArray. 2627 __ cmpl(Address(r11_dst_klass, lh_offset), objArray_lh); 2628 __ jcc(Assembler::notEqual, L_failed); 2629 2630 // It is safe to examine both src.length and dst.length. 2631 #ifndef _WIN64 2632 arraycopy_range_checks(src, src_pos, dst, dst_pos, C_RARG4, 2633 rax, L_failed); 2634 #else 2635 __ movl(r11_length, C_RARG4); // reload 2636 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 2637 rax, L_failed); 2638 __ load_klass(r11_dst_klass, dst); // reload 2639 #endif 2640 2641 // Marshal the base address arguments now, freeing registers. 2642 __ lea(from, Address(src, src_pos, TIMES_OOP, 2643 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 2644 __ lea(to, Address(dst, dst_pos, TIMES_OOP, 2645 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 2646 __ movl(count, C_RARG4); // length (reloaded) 2647 Register sco_temp = c_rarg3; // this register is free now 2648 assert_different_registers(from, to, count, sco_temp, 2649 r11_dst_klass, r10_src_klass); 2650 assert_clean_int(count, sco_temp); 2651 2652 // Generate the type check. 2653 int sco_offset = (klassOopDesc::header_size() * HeapWordSize + 2654 Klass::super_check_offset_offset_in_bytes()); 2655 __ movl(sco_temp, Address(r11_dst_klass, sco_offset)); 2656 assert_clean_int(sco_temp, rax); 2657 generate_type_check(r10_src_klass, sco_temp, r11_dst_klass, L_plain_copy); 2658 2659 // Fetch destination element klass from the objArrayKlass header. 2660 int ek_offset = (klassOopDesc::header_size() * HeapWordSize + 2661 objArrayKlass::element_klass_offset_in_bytes()); 2662 __ movptr(r11_dst_klass, Address(r11_dst_klass, ek_offset)); 2663 __ movl(sco_temp, Address(r11_dst_klass, sco_offset)); 2664 assert_clean_int(sco_temp, rax); 2665 2666 // the checkcast_copy loop needs two extra arguments: 2667 assert(c_rarg3 == sco_temp, "#3 already in place"); 2668 __ movptr(C_RARG4, r11_dst_klass); // dst.klass.element_klass 2669 __ jump(RuntimeAddress(checkcast_copy_entry)); 2670 } 2671 2672 __ BIND(L_failed); 2673 __ xorptr(rax, rax); 2674 __ notptr(rax); // return -1 2675 __ leave(); // required for proper stackwalking of RuntimeStub frame 2676 __ ret(0); 2677 2678 return start; 2679 } 2680 2681 #undef length_arg 2682 2683 void generate_arraycopy_stubs() { 2684 // Call the conjoint generation methods immediately after 2685 // the disjoint ones so that short branches from the former 2686 // to the latter can be generated. 2687 StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, "jbyte_disjoint_arraycopy"); 2688 StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, "jbyte_arraycopy"); 2689 2690 StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, "jshort_disjoint_arraycopy"); 2691 StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, "jshort_arraycopy"); 2692 2693 StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, false, "jint_disjoint_arraycopy"); 2694 StubRoutines::_jint_arraycopy = generate_conjoint_int_oop_copy(false, false, "jint_arraycopy"); 2695 2696 StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, false, "jlong_disjoint_arraycopy"); 2697 StubRoutines::_jlong_arraycopy = generate_conjoint_long_oop_copy(false, false, "jlong_arraycopy"); 2698 2699 2700 if (UseCompressedOops) { 2701 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, true, "oop_disjoint_arraycopy"); 2702 StubRoutines::_oop_arraycopy = generate_conjoint_int_oop_copy(false, true, "oop_arraycopy"); 2703 } else { 2704 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, true, "oop_disjoint_arraycopy"); 2705 StubRoutines::_oop_arraycopy = generate_conjoint_long_oop_copy(false, true, "oop_arraycopy"); 2706 } 2707 2708 StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy"); 2709 StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy"); 2710 StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy"); 2711 2712 // We don't generate specialized code for HeapWord-aligned source 2713 // arrays, so just use the code we've already generated 2714 StubRoutines::_arrayof_jbyte_disjoint_arraycopy = StubRoutines::_jbyte_disjoint_arraycopy; 2715 StubRoutines::_arrayof_jbyte_arraycopy = StubRoutines::_jbyte_arraycopy; 2716 2717 StubRoutines::_arrayof_jshort_disjoint_arraycopy = StubRoutines::_jshort_disjoint_arraycopy; 2718 StubRoutines::_arrayof_jshort_arraycopy = StubRoutines::_jshort_arraycopy; 2719 2720 StubRoutines::_arrayof_jint_disjoint_arraycopy = StubRoutines::_jint_disjoint_arraycopy; 2721 StubRoutines::_arrayof_jint_arraycopy = StubRoutines::_jint_arraycopy; 2722 2723 StubRoutines::_arrayof_jlong_disjoint_arraycopy = StubRoutines::_jlong_disjoint_arraycopy; 2724 StubRoutines::_arrayof_jlong_arraycopy = StubRoutines::_jlong_arraycopy; 2725 2726 StubRoutines::_arrayof_oop_disjoint_arraycopy = StubRoutines::_oop_disjoint_arraycopy; 2727 StubRoutines::_arrayof_oop_arraycopy = StubRoutines::_oop_arraycopy; 2728 } 2729 2730 void generate_math_stubs() { 2731 { 2732 StubCodeMark mark(this, "StubRoutines", "log"); 2733 StubRoutines::_intrinsic_log = (double (*)(double)) __ pc(); 2734 2735 __ subq(rsp, 8); 2736 __ movdbl(Address(rsp, 0), xmm0); 2737 __ fld_d(Address(rsp, 0)); 2738 __ flog(); 2739 __ fstp_d(Address(rsp, 0)); 2740 __ movdbl(xmm0, Address(rsp, 0)); 2741 __ addq(rsp, 8); 2742 __ ret(0); 2743 } 2744 { 2745 StubCodeMark mark(this, "StubRoutines", "log10"); 2746 StubRoutines::_intrinsic_log10 = (double (*)(double)) __ pc(); 2747 2748 __ subq(rsp, 8); 2749 __ movdbl(Address(rsp, 0), xmm0); 2750 __ fld_d(Address(rsp, 0)); 2751 __ flog10(); 2752 __ fstp_d(Address(rsp, 0)); 2753 __ movdbl(xmm0, Address(rsp, 0)); 2754 __ addq(rsp, 8); 2755 __ ret(0); 2756 } 2757 { 2758 StubCodeMark mark(this, "StubRoutines", "sin"); 2759 StubRoutines::_intrinsic_sin = (double (*)(double)) __ pc(); 2760 2761 __ subq(rsp, 8); 2762 __ movdbl(Address(rsp, 0), xmm0); 2763 __ fld_d(Address(rsp, 0)); 2764 __ trigfunc('s'); 2765 __ fstp_d(Address(rsp, 0)); 2766 __ movdbl(xmm0, Address(rsp, 0)); 2767 __ addq(rsp, 8); 2768 __ ret(0); 2769 } 2770 { 2771 StubCodeMark mark(this, "StubRoutines", "cos"); 2772 StubRoutines::_intrinsic_cos = (double (*)(double)) __ pc(); 2773 2774 __ subq(rsp, 8); 2775 __ movdbl(Address(rsp, 0), xmm0); 2776 __ fld_d(Address(rsp, 0)); 2777 __ trigfunc('c'); 2778 __ fstp_d(Address(rsp, 0)); 2779 __ movdbl(xmm0, Address(rsp, 0)); 2780 __ addq(rsp, 8); 2781 __ ret(0); 2782 } 2783 { 2784 StubCodeMark mark(this, "StubRoutines", "tan"); 2785 StubRoutines::_intrinsic_tan = (double (*)(double)) __ pc(); 2786 2787 __ subq(rsp, 8); 2788 __ movdbl(Address(rsp, 0), xmm0); 2789 __ fld_d(Address(rsp, 0)); 2790 __ trigfunc('t'); 2791 __ fstp_d(Address(rsp, 0)); 2792 __ movdbl(xmm0, Address(rsp, 0)); 2793 __ addq(rsp, 8); 2794 __ ret(0); 2795 } 2796 2797 // The intrinsic version of these seem to return the same value as 2798 // the strict version. 2799 StubRoutines::_intrinsic_exp = SharedRuntime::dexp; 2800 StubRoutines::_intrinsic_pow = SharedRuntime::dpow; 2801 } 2802 2803 #undef __ 2804 #define __ masm-> 2805 2806 // Continuation point for throwing of implicit exceptions that are 2807 // not handled in the current activation. Fabricates an exception 2808 // oop and initiates normal exception dispatching in this 2809 // frame. Since we need to preserve callee-saved values (currently 2810 // only for C2, but done for C1 as well) we need a callee-saved oop 2811 // map and therefore have to make these stubs into RuntimeStubs 2812 // rather than BufferBlobs. If the compiler needs all registers to 2813 // be preserved between the fault point and the exception handler 2814 // then it must assume responsibility for that in 2815 // AbstractCompiler::continuation_for_implicit_null_exception or 2816 // continuation_for_implicit_division_by_zero_exception. All other 2817 // implicit exceptions (e.g., NullPointerException or 2818 // AbstractMethodError on entry) are either at call sites or 2819 // otherwise assume that stack unwinding will be initiated, so 2820 // caller saved registers were assumed volatile in the compiler. 2821 address generate_throw_exception(const char* name, 2822 address runtime_entry, 2823 bool restore_saved_exception_pc) { 2824 // Information about frame layout at time of blocking runtime call. 2825 // Note that we only have to preserve callee-saved registers since 2826 // the compilers are responsible for supplying a continuation point 2827 // if they expect all registers to be preserved. 2828 enum layout { 2829 rbp_off = frame::arg_reg_save_area_bytes/BytesPerInt, 2830 rbp_off2, 2831 return_off, 2832 return_off2, 2833 framesize // inclusive of return address 2834 }; 2835 2836 int insts_size = 512; 2837 int locs_size = 64; 2838 2839 CodeBuffer code(name, insts_size, locs_size); 2840 OopMapSet* oop_maps = new OopMapSet(); 2841 MacroAssembler* masm = new MacroAssembler(&code); 2842 2843 address start = __ pc(); 2844 2845 // This is an inlined and slightly modified version of call_VM 2846 // which has the ability to fetch the return PC out of 2847 // thread-local storage and also sets up last_Java_sp slightly 2848 // differently than the real call_VM 2849 if (restore_saved_exception_pc) { 2850 __ movptr(rax, 2851 Address(r15_thread, 2852 in_bytes(JavaThread::saved_exception_pc_offset()))); 2853 __ push(rax); 2854 } 2855 2856 __ enter(); // required for proper stackwalking of RuntimeStub frame 2857 2858 assert(is_even(framesize/2), "sp not 16-byte aligned"); 2859 2860 // return address and rbp are already in place 2861 __ subptr(rsp, (framesize-4) << LogBytesPerInt); // prolog 2862 2863 int frame_complete = __ pc() - start; 2864 2865 // Set up last_Java_sp and last_Java_fp 2866 __ set_last_Java_frame(rsp, rbp, NULL); 2867 2868 // Call runtime 2869 __ movptr(c_rarg0, r15_thread); 2870 BLOCK_COMMENT("call runtime_entry"); 2871 __ call(RuntimeAddress(runtime_entry)); 2872 2873 // Generate oop map 2874 OopMap* map = new OopMap(framesize, 0); 2875 2876 oop_maps->add_gc_map(__ pc() - start, map); 2877 2878 __ reset_last_Java_frame(true, false); 2879 2880 __ leave(); // required for proper stackwalking of RuntimeStub frame 2881 2882 // check for pending exceptions 2883 #ifdef ASSERT 2884 Label L; 2885 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), 2886 (int32_t) NULL_WORD); 2887 __ jcc(Assembler::notEqual, L); 2888 __ should_not_reach_here(); 2889 __ bind(L); 2890 #endif // ASSERT 2891 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 2892 2893 2894 // codeBlob framesize is in words (not VMRegImpl::slot_size) 2895 RuntimeStub* stub = 2896 RuntimeStub::new_runtime_stub(name, 2897 &code, 2898 frame_complete, 2899 (framesize >> (LogBytesPerWord - LogBytesPerInt)), 2900 oop_maps, false); 2901 return stub->entry_point(); 2902 } 2903 2904 // Initialization 2905 void generate_initial() { 2906 // Generates all stubs and initializes the entry points 2907 2908 // This platform-specific stub is needed by generate_call_stub() 2909 StubRoutines::x86::_mxcsr_std = generate_fp_mask("mxcsr_std", 0x0000000000001F80); 2910 2911 // entry points that exist in all platforms Note: This is code 2912 // that could be shared among different platforms - however the 2913 // benefit seems to be smaller than the disadvantage of having a 2914 // much more complicated generator structure. See also comment in 2915 // stubRoutines.hpp. 2916 2917 StubRoutines::_forward_exception_entry = generate_forward_exception(); 2918 2919 StubRoutines::_call_stub_entry = 2920 generate_call_stub(StubRoutines::_call_stub_return_address); 2921 2922 // is referenced by megamorphic call 2923 StubRoutines::_catch_exception_entry = generate_catch_exception(); 2924 2925 // atomic calls 2926 StubRoutines::_atomic_xchg_entry = generate_atomic_xchg(); 2927 StubRoutines::_atomic_xchg_ptr_entry = generate_atomic_xchg_ptr(); 2928 StubRoutines::_atomic_cmpxchg_entry = generate_atomic_cmpxchg(); 2929 StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long(); 2930 StubRoutines::_atomic_add_entry = generate_atomic_add(); 2931 StubRoutines::_atomic_add_ptr_entry = generate_atomic_add_ptr(); 2932 StubRoutines::_fence_entry = generate_orderaccess_fence(); 2933 2934 StubRoutines::_handler_for_unsafe_access_entry = 2935 generate_handler_for_unsafe_access(); 2936 2937 // platform dependent 2938 StubRoutines::x86::_get_previous_fp_entry = generate_get_previous_fp(); 2939 2940 StubRoutines::x86::_verify_mxcsr_entry = generate_verify_mxcsr(); 2941 } 2942 2943 void generate_all() { 2944 // Generates all stubs and initializes the entry points 2945 2946 // These entry points require SharedInfo::stack0 to be set up in 2947 // non-core builds and need to be relocatable, so they each 2948 // fabricate a RuntimeStub internally. 2949 StubRoutines::_throw_AbstractMethodError_entry = 2950 generate_throw_exception("AbstractMethodError throw_exception", 2951 CAST_FROM_FN_PTR(address, 2952 SharedRuntime:: 2953 throw_AbstractMethodError), 2954 false); 2955 2956 StubRoutines::_throw_IncompatibleClassChangeError_entry = 2957 generate_throw_exception("IncompatibleClassChangeError throw_exception", 2958 CAST_FROM_FN_PTR(address, 2959 SharedRuntime:: 2960 throw_IncompatibleClassChangeError), 2961 false); 2962 2963 StubRoutines::_throw_ArithmeticException_entry = 2964 generate_throw_exception("ArithmeticException throw_exception", 2965 CAST_FROM_FN_PTR(address, 2966 SharedRuntime:: 2967 throw_ArithmeticException), 2968 true); 2969 2970 StubRoutines::_throw_NullPointerException_entry = 2971 generate_throw_exception("NullPointerException throw_exception", 2972 CAST_FROM_FN_PTR(address, 2973 SharedRuntime:: 2974 throw_NullPointerException), 2975 true); 2976 2977 StubRoutines::_throw_NullPointerException_at_call_entry = 2978 generate_throw_exception("NullPointerException at call throw_exception", 2979 CAST_FROM_FN_PTR(address, 2980 SharedRuntime:: 2981 throw_NullPointerException_at_call), 2982 false); 2983 2984 StubRoutines::_throw_StackOverflowError_entry = 2985 generate_throw_exception("StackOverflowError throw_exception", 2986 CAST_FROM_FN_PTR(address, 2987 SharedRuntime:: 2988 throw_StackOverflowError), 2989 false); 2990 2991 // entry points that are platform specific 2992 StubRoutines::x86::_f2i_fixup = generate_f2i_fixup(); 2993 StubRoutines::x86::_f2l_fixup = generate_f2l_fixup(); 2994 StubRoutines::x86::_d2i_fixup = generate_d2i_fixup(); 2995 StubRoutines::x86::_d2l_fixup = generate_d2l_fixup(); 2996 2997 StubRoutines::x86::_float_sign_mask = generate_fp_mask("float_sign_mask", 0x7FFFFFFF7FFFFFFF); 2998 StubRoutines::x86::_float_sign_flip = generate_fp_mask("float_sign_flip", 0x8000000080000000); 2999 StubRoutines::x86::_double_sign_mask = generate_fp_mask("double_sign_mask", 0x7FFFFFFFFFFFFFFF); 3000 StubRoutines::x86::_double_sign_flip = generate_fp_mask("double_sign_flip", 0x8000000000000000); 3001 3002 // support for verify_oop (must happen after universe_init) 3003 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop(); 3004 3005 // arraycopy stubs used by compilers 3006 generate_arraycopy_stubs(); 3007 3008 generate_math_stubs(); 3009 } 3010 3011 public: 3012 StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) { 3013 if (all) { 3014 generate_all(); 3015 } else { 3016 generate_initial(); 3017 } 3018 } 3019 }; // end class declaration 3020 3021 address StubGenerator::disjoint_byte_copy_entry = NULL; 3022 address StubGenerator::disjoint_short_copy_entry = NULL; 3023 address StubGenerator::disjoint_int_copy_entry = NULL; 3024 address StubGenerator::disjoint_long_copy_entry = NULL; 3025 address StubGenerator::disjoint_oop_copy_entry = NULL; 3026 3027 address StubGenerator::byte_copy_entry = NULL; 3028 address StubGenerator::short_copy_entry = NULL; 3029 address StubGenerator::int_copy_entry = NULL; 3030 address StubGenerator::long_copy_entry = NULL; 3031 address StubGenerator::oop_copy_entry = NULL; 3032 3033 address StubGenerator::checkcast_copy_entry = NULL; 3034 3035 void StubGenerator_generate(CodeBuffer* code, bool all) { 3036 StubGenerator g(code, all); 3037 }