hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp

Print this page
rev 611 : Merge
   1 #ifdef USE_PRAGMA_IDENT_SRC
   2 #pragma ident "@(#)stubGenerator_x86_64.cpp     1.49 07/10/05 19:12:48 JVM"
   3 #endif
   4 /*
   5  * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
   6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   7  *
   8  * This code is free software; you can redistribute it and/or modify it
   9  * under the terms of the GNU General Public License version 2 only, as
  10  * published by the Free Software Foundation.
  11  *
  12  * This code is distributed in the hope that it will be useful, but WITHOUT
  13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  15  * version 2 for more details (a copy is included in the LICENSE file that
  16  * accompanied this code).
  17  *
  18  * You should have received a copy of the GNU General Public License version
  19  * 2 along with this work; if not, write to the Free Software Foundation,
  20  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  21  *
  22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
  23  * CA 95054 USA or visit www.sun.com if you need additional information or
  24  * have any questions.
  25  *  
  26  */
  27 
  28 #include "incls/_precompiled.incl"
  29 #include "incls/_stubGenerator_x86_64.cpp.incl"
  30 
  31 // Declaration and definition of StubGenerator (no .hpp file).
  32 // For a more detailed description of the stub routine structure
  33 // see the comment in stubRoutines.hpp
  34 
  35 #define __ _masm->


  36 
  37 #ifdef PRODUCT
  38 #define BLOCK_COMMENT(str) /* nothing */
  39 #else
  40 #define BLOCK_COMMENT(str) __ block_comment(str)
  41 #endif
  42 
  43 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
  44 const int MXCSR_MASK = 0xFFC0;  // Mask out any pending exceptions
  45 
  46 // Stub Code definitions
  47 
  48 static address handle_unsafe_access() {
  49   JavaThread* thread = JavaThread::current();
  50   address pc = thread->saved_exception_pc();
  51   // pc is the instruction which we must emulate
  52   // doing a no-op is fine:  return garbage from the load
  53   // therefore, compute npc
  54   address npc = Assembler::locate_next_instruction(pc);
  55 


 195 
 196     const Address call_wrapper  (rbp, call_wrapper_off   * wordSize);
 197     const Address result        (rbp, result_off         * wordSize);
 198     const Address result_type   (rbp, result_type_off    * wordSize);
 199     const Address method        (rbp, method_off         * wordSize);
 200     const Address entry_point   (rbp, entry_point_off    * wordSize);
 201     const Address parameters    (rbp, parameters_off     * wordSize);
 202     const Address parameter_size(rbp, parameter_size_off * wordSize);
 203 
 204     // same as in generate_catch_exception()!
 205     const Address thread        (rbp, thread_off         * wordSize);
 206 
 207     const Address r15_save(rbp, r15_off * wordSize);
 208     const Address r14_save(rbp, r14_off * wordSize);
 209     const Address r13_save(rbp, r13_off * wordSize);
 210     const Address r12_save(rbp, r12_off * wordSize);
 211     const Address rbx_save(rbp, rbx_off * wordSize);
 212 
 213     // stub code
 214     __ enter();
 215     __ subq(rsp, -rsp_after_call_off * wordSize);
 216 
 217     // save register parameters
 218 #ifndef _WIN64
 219     __ movq(parameters,   c_rarg5); // parameters
 220     __ movq(entry_point,  c_rarg4); // entry_point
 221 #endif
 222 
 223     __ movq(method,       c_rarg3); // method
 224     __ movl(result_type,  c_rarg2); // result type
 225     __ movq(result,       c_rarg1); // result
 226     __ movq(call_wrapper, c_rarg0); // call wrapper
 227 
 228     // save regs belonging to calling function
 229     __ movq(rbx_save, rbx);
 230     __ movq(r12_save, r12);
 231     __ movq(r13_save, r13);
 232     __ movq(r14_save, r14);
 233     __ movq(r15_save, r15);
 234 
 235 #ifdef _WIN64
 236     const Address rdi_save(rbp, rdi_off * wordSize);
 237     const Address rsi_save(rbp, rsi_off * wordSize);
 238 
 239     __ movq(rsi_save, rsi);
 240     __ movq(rdi_save, rdi);
 241 #else
 242     const Address mxcsr_save(rbp, mxcsr_off * wordSize);
 243     {
 244       Label skip_ldmx;
 245       __ stmxcsr(mxcsr_save);
 246       __ movl(rax, mxcsr_save);
 247       __ andl(rax, MXCSR_MASK);    // Only check control and mask bits
 248       ExternalAddress mxcsr_std(StubRoutines::amd64::mxcsr_std());
 249       __ cmp32(rax, mxcsr_std);
 250       __ jcc(Assembler::equal, skip_ldmx);
 251       __ ldmxcsr(mxcsr_std);
 252       __ bind(skip_ldmx);
 253     }
 254 #endif
 255 
 256     // Load up thread register
 257     __ movq(r15_thread, thread);

 258 
 259 #ifdef ASSERT
 260     // make sure we have no pending exceptions
 261     { 
 262       Label L;
 263       __ cmpq(Address(r15_thread, Thread::pending_exception_offset()), (int)NULL_WORD);
 264       __ jcc(Assembler::equal, L);
 265       __ stop("StubRoutines::call_stub: entered with pending exception");
 266       __ bind(L);
 267     }
 268 #endif
 269 
 270     // pass parameters if any
 271     BLOCK_COMMENT("pass parameters if any");
 272     Label parameters_done;
 273     __ movl(c_rarg3, parameter_size);
 274     __ testl(c_rarg3, c_rarg3);
 275     __ jcc(Assembler::zero, parameters_done);
 276 
 277     Label loop;
 278     __ movq(c_rarg2, parameters);     // parameter pointer
 279     __ movl(c_rarg1, c_rarg3);        // parameter counter is in c_rarg1
 280     __ BIND(loop);
 281     if (TaggedStackInterpreter) {
 282       __ movq(rax, Address(c_rarg2, 0)); // get tag
 283       __ addq(c_rarg2, wordSize);     // advance to next tag
 284       __ pushq(rax);                  // pass tag
 285     }
 286     __ movq(rax, Address(c_rarg2, 0));  // get parameter
 287     __ addq(c_rarg2, wordSize);       // advance to next parameter
 288     __ decrementl(c_rarg1);           // decrement counter
 289     __ pushq(rax);                    // pass parameter
 290     __ jcc(Assembler::notZero, loop);
 291 
 292     // call Java function
 293     __ BIND(parameters_done);
 294     __ movq(rbx, method);             // get methodOop
 295     __ movq(c_rarg1, entry_point);    // get entry_point
 296     __ movq(r13, rsp);                // set sender sp
 297     BLOCK_COMMENT("call Java function");
 298     __ call(c_rarg1);
 299 
 300     BLOCK_COMMENT("call_stub_return_address:");
 301     return_address = __ pc();
 302 
 303     // store result depending on type (everything that is not
 304     // T_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT)
 305     __ movq(c_rarg0, result);
 306     Label is_long, is_float, is_double, exit;
 307     __ movl(c_rarg1, result_type);
 308     __ cmpl(c_rarg1, T_OBJECT);
 309     __ jcc(Assembler::equal, is_long);
 310     __ cmpl(c_rarg1, T_LONG);
 311     __ jcc(Assembler::equal, is_long);
 312     __ cmpl(c_rarg1, T_FLOAT);
 313     __ jcc(Assembler::equal, is_float);
 314     __ cmpl(c_rarg1, T_DOUBLE);
 315     __ jcc(Assembler::equal, is_double);
 316 
 317     // handle T_INT case
 318     __ movl(Address(c_rarg0, 0), rax);
 319 
 320     __ BIND(exit);
 321 
 322     // pop parameters
 323     __ leaq(rsp, rsp_after_call);
 324 
 325 #ifdef ASSERT
 326     // verify that threads correspond
 327     { 
 328       Label L, S;
 329       __ cmpq(r15_thread, thread);
 330       __ jcc(Assembler::notEqual, S);
 331       __ get_thread(rbx);
 332       __ cmpq(r15_thread, rbx);
 333       __ jcc(Assembler::equal, L);
 334       __ bind(S);
 335       __ jcc(Assembler::equal, L);
 336       __ stop("StubRoutines::call_stub: threads must correspond");
 337       __ bind(L);
 338     }
 339 #endif
 340 
 341     // restore regs belonging to calling function
 342     __ movq(r15, r15_save);
 343     __ movq(r14, r14_save);
 344     __ movq(r13, r13_save);
 345     __ movq(r12, r12_save);
 346     __ movq(rbx, rbx_save);
 347 
 348 #ifdef _WIN64
 349     __ movq(rdi, rdi_save);
 350     __ movq(rsi, rsi_save);
 351 #else
 352     __ ldmxcsr(mxcsr_save);
 353 #endif
 354 
 355     // restore rsp
 356     __ addq(rsp, -rsp_after_call_off * wordSize);
 357 
 358     // return
 359     __ popq(rbp);
 360     __ ret(0);
 361 
 362     // handle return types different from T_INT
 363     __ BIND(is_long);
 364     __ movq(Address(c_rarg0, 0), rax);
 365     __ jmp(exit);
 366 
 367     __ BIND(is_float);
 368     __ movflt(Address(c_rarg0, 0), xmm0);
 369     __ jmp(exit);
 370 
 371     __ BIND(is_double);
 372     __ movdbl(Address(c_rarg0, 0), xmm0);
 373     __ jmp(exit);
 374 
 375     return start;
 376   }
 377 
 378   // Return point for a Java call if there's an exception thrown in
 379   // Java code.  The exception is caught and transformed into a


 382   //
 383   // Note: Usually the parameters are removed by the callee. In case
 384   // of an exception crossing an activation frame boundary, that is
 385   // not the case if the callee is compiled code => need to setup the
 386   // rsp.
 387   //
 388   // rax: exception oop
 389 
 390   address generate_catch_exception() {
 391     StubCodeMark mark(this, "StubRoutines", "catch_exception");
 392     address start = __ pc();
 393 
 394     // same as in generate_call_stub():
 395     const Address rsp_after_call(rbp, rsp_after_call_off * wordSize);
 396     const Address thread        (rbp, thread_off         * wordSize);
 397 
 398 #ifdef ASSERT
 399     // verify that threads correspond
 400     { 
 401       Label L, S;
 402       __ cmpq(r15_thread, thread);
 403       __ jcc(Assembler::notEqual, S);
 404       __ get_thread(rbx);
 405       __ cmpq(r15_thread, rbx);
 406       __ jcc(Assembler::equal, L);
 407       __ bind(S);
 408       __ stop("StubRoutines::catch_exception: threads must correspond");
 409       __ bind(L);
 410     }
 411 #endif
 412 
 413     // set pending exception
 414     __ verify_oop(rax);
 415 
 416     __ movq(Address(r15_thread, Thread::pending_exception_offset()), rax);
 417     __ lea(rscratch1, ExternalAddress((address)__FILE__));
 418     __ movq(Address(r15_thread, Thread::exception_file_offset()), rscratch1);
 419     __ movl(Address(r15_thread, Thread::exception_line_offset()), (int)  __LINE__);
 420 
 421     // complete return to VM
 422     assert(StubRoutines::_call_stub_return_address != NULL,
 423            "_call_stub_return_address must have been generated before");
 424     __ jump(RuntimeAddress(StubRoutines::_call_stub_return_address));
 425 
 426     return start;
 427   }
 428   
 429   // Continuation point for runtime calls returning with a pending
 430   // exception.  The pending exception check happened in the runtime
 431   // or native call stub.  The pending exception in Thread is
 432   // converted into a Java-level exception.
 433   //
 434   // Contract with Java-level exception handlers:
 435   // rax: exception
 436   // rdx: throwing pc
 437   //
 438   // NOTE: At entry of this stub, exception-pc must be on stack !!
 439 
 440   address generate_forward_exception() {
 441     StubCodeMark mark(this, "StubRoutines", "forward exception");
 442     address start = __ pc();
 443 
 444     // Upon entry, the sp points to the return address returning into
 445     // Java (interpreted or compiled) code; i.e., the return address
 446     // becomes the throwing pc.
 447     //
 448     // Arguments pushed before the runtime call are still on the stack
 449     // but the exception handler will reset the stack pointer ->
 450     // ignore them.  A potential result in registers can be ignored as
 451     // well.
 452 
 453 #ifdef ASSERT
 454     // make sure this code is only executed if there is a pending exception
 455     { 
 456       Label L;
 457       __ cmpq(Address(r15_thread, Thread::pending_exception_offset()), (int) NULL);
 458       __ jcc(Assembler::notEqual, L);
 459       __ stop("StubRoutines::forward exception: no pending exception (1)");
 460       __ bind(L);
 461     }
 462 #endif
 463 
 464     // compute exception handler into rbx
 465     __ movq(c_rarg0, Address(rsp, 0)); 
 466     BLOCK_COMMENT("call exception_handler_for_return_address");
 467     __ call_VM_leaf(CAST_FROM_FN_PTR(address, 
 468                          SharedRuntime::exception_handler_for_return_address),
 469                     c_rarg0);
 470     __ movq(rbx, rax);
 471 
 472     // setup rax & rdx, remove return address & clear pending exception
 473     __ popq(rdx);
 474     __ movq(rax, Address(r15_thread, Thread::pending_exception_offset()));
 475     __ movptr(Address(r15_thread, Thread::pending_exception_offset()), (int)NULL_WORD);
 476 
 477 #ifdef ASSERT
 478     // make sure exception is set
 479     { 
 480       Label L;
 481       __ testq(rax, rax);
 482       __ jcc(Assembler::notEqual, L);
 483       __ stop("StubRoutines::forward exception: no pending exception (2)");
 484       __ bind(L);
 485     }
 486 #endif
 487 
 488     // continue at exception handler (return address removed)
 489     // rax: exception
 490     // rbx: exception handler
 491     // rdx: throwing pc
 492     __ verify_oop(rax);
 493     __ jmp(rbx);
 494 
 495     return start;
 496   }
 497 
 498   // Support for jint atomic::xchg(jint exchange_value, volatile jint* dest)
 499   // 
 500   // Arguments :
 501   //    c_rarg0: exchange_value


 509 
 510     __ movl(rax, c_rarg0); // Copy to eax we need a return value anyhow
 511     __ xchgl(rax, Address(c_rarg1, 0)); // automatic LOCK
 512     __ ret(0);
 513 
 514     return start;
 515   }
 516 
 517   // Support for intptr_t atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest)
 518   // 
 519   // Arguments :
 520   //    c_rarg0: exchange_value
 521   //    c_rarg1: dest
 522   // 
 523   // Result:
 524   //    *dest <- ex, return (orig *dest)
 525   address generate_atomic_xchg_ptr() {
 526     StubCodeMark mark(this, "StubRoutines", "atomic_xchg_ptr");
 527     address start = __ pc();
 528 
 529     __ movq(rax, c_rarg0); // Copy to eax we need a return value anyhow
 530     __ xchgq(rax, Address(c_rarg1, 0)); // automatic LOCK
 531     __ ret(0);
 532 
 533     return start;
 534   }
 535 
 536   // Support for jint atomic::atomic_cmpxchg(jint exchange_value, volatile jint* dest,
 537   //                                         jint compare_value)
 538   // 
 539   // Arguments :
 540   //    c_rarg0: exchange_value
 541   //    c_rarg1: dest
 542   //    c_rarg2: compare_value
 543   // 
 544   // Result:
 545   //    if ( compare_value == *dest ) { 
 546   //       *dest = exchange_value
 547   //       return compare_value;
 548   //    else 
 549   //       return *dest;
 550   address generate_atomic_cmpxchg() {


 603     __ xaddl(Address(c_rarg1, 0), c_rarg0); 
 604     __ addl(rax, c_rarg0);
 605     __ ret(0);
 606 
 607     return start;
 608   }
 609 
 610   // Support for intptr_t atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest)
 611   // 
 612   // Arguments :
 613   //    c_rarg0: add_value
 614   //    c_rarg1: dest
 615   // 
 616   // Result:
 617   //    *dest += add_value
 618   //    return *dest;
 619   address generate_atomic_add_ptr() {
 620     StubCodeMark mark(this, "StubRoutines", "atomic_add_ptr");
 621     address start = __ pc();
 622 
 623     __ movq(rax, c_rarg0); // Copy to eax we need a return value anyhow
 624    if ( os::is_MP() ) __ lock();
 625     __ xaddl(Address(c_rarg1, 0), c_rarg0); 
 626     __ addl(rax, c_rarg0);
 627     __ ret(0);
 628 
 629     return start;
 630   }
 631 
 632   // Support for intptr_t OrderAccess::fence()
 633   // 
 634   // Arguments :
 635   // 
 636   // Result:
 637   address generate_orderaccess_fence() {
 638     StubCodeMark mark(this, "StubRoutines", "orderaccess_fence");
 639     address start = __ pc();
 640     __ mfence();
 641     __ ret(0);
 642 
 643     return start;
 644   }
 645 
 646   // Support for intptr_t get_previous_fp()
 647   //
 648   // This routine is used to find the previous frame pointer for the
 649   // caller (current_frame_guess). This is used as part of debugging
 650   // ps() is seemingly lost trying to find frames.
 651   // This code assumes that caller current_frame_guess) has a frame.
 652   address generate_get_previous_fp() {
 653     StubCodeMark mark(this, "StubRoutines", "get_previous_fp");
 654     const Address old_fp(rbp, 0);
 655     const Address older_fp(rax, 0);
 656     address start = __ pc();
 657 
 658     __ enter();    
 659     __ movq(rax, old_fp); // callers fp
 660     __ movq(rax, older_fp); // the frame for ps()
 661     __ popq(rbp);
 662     __ ret(0);
 663 
 664     return start;
 665   }
 666   
 667   //----------------------------------------------------------------------------------------------------
 668   // Support for void verify_mxcsr()
 669   // 
 670   // This routine is used with -Xcheck:jni to verify that native 
 671   // JNI code does not return to Java code without restoring the
 672   // MXCSR register to our expected state.
 673 
 674   address generate_verify_mxcsr() {
 675     StubCodeMark mark(this, "StubRoutines", "verify_mxcsr");
 676     address start = __ pc();
 677 
 678     const Address mxcsr_save(rsp, 0);
 679 
 680     if (CheckJNICalls) {
 681       Label ok_ret;
 682       __ pushq(rax);
 683       __ subq(rsp, wordSize);      // allocate a temp location
 684       __ stmxcsr(mxcsr_save);
 685       __ movl(rax, mxcsr_save);
 686       __ andl(rax, MXCSR_MASK);    // Only check control and mask bits
 687       __ cmpl(rax, *(int *)(StubRoutines::amd64::mxcsr_std()));
 688       __ jcc(Assembler::equal, ok_ret);
 689    
 690       __ warn("MXCSR changed by native JNI code, use -XX:+RestoreMXCSROnJNICall");
 691 
 692       __ ldmxcsr(ExternalAddress(StubRoutines::amd64::mxcsr_std()));
 693 
 694       __ bind(ok_ret);
 695       __ addq(rsp, wordSize);
 696       __ popq(rax);
 697     }
 698 
 699     __ ret(0);
 700 
 701     return start;
 702   }
 703 
 704   address generate_f2i_fixup() {
 705     StubCodeMark mark(this, "StubRoutines", "f2i_fixup");
 706     Address inout(rsp, 5 * wordSize); // return address + 4 saves
 707 
 708     address start = __ pc();
 709 
 710     Label L;
 711 
 712     __ pushq(rax);
 713     __ pushq(c_rarg3);
 714     __ pushq(c_rarg2);
 715     __ pushq(c_rarg1);
 716 
 717     __ movl(rax, 0x7f800000);
 718     __ xorl(c_rarg3, c_rarg3);
 719     __ movl(c_rarg2, inout);
 720     __ movl(c_rarg1, c_rarg2);
 721     __ andl(c_rarg1, 0x7fffffff);
 722     __ cmpl(rax, c_rarg1); // NaN? -> 0
 723     __ jcc(Assembler::negative, L);
 724     __ testl(c_rarg2, c_rarg2); // signed ? min_jint : max_jint
 725     __ movl(c_rarg3, 0x80000000);
 726     __ movl(rax, 0x7fffffff);
 727     __ cmovl(Assembler::positive, c_rarg3, rax);
 728 
 729     __ bind(L);
 730     __ movq(inout, c_rarg3);
 731 
 732     __ popq(c_rarg1);
 733     __ popq(c_rarg2);
 734     __ popq(c_rarg3);
 735     __ popq(rax);
 736 
 737     __ ret(0);
 738 
 739     return start;
 740   }
 741 
 742   address generate_f2l_fixup() {
 743     StubCodeMark mark(this, "StubRoutines", "f2l_fixup");
 744     Address inout(rsp, 5 * wordSize); // return address + 4 saves
 745     address start = __ pc();
 746 
 747     Label L;
 748 
 749     __ pushq(rax);
 750     __ pushq(c_rarg3);
 751     __ pushq(c_rarg2);
 752     __ pushq(c_rarg1);
 753 
 754     __ movl(rax, 0x7f800000);
 755     __ xorl(c_rarg3, c_rarg3);
 756     __ movl(c_rarg2, inout);
 757     __ movl(c_rarg1, c_rarg2);
 758     __ andl(c_rarg1, 0x7fffffff);
 759     __ cmpl(rax, c_rarg1); // NaN? -> 0
 760     __ jcc(Assembler::negative, L);
 761     __ testl(c_rarg2, c_rarg2); // signed ? min_jlong : max_jlong
 762     __ mov64(c_rarg3, 0x8000000000000000);
 763     __ mov64(rax, 0x7fffffffffffffff);
 764     __ cmovq(Assembler::positive, c_rarg3, rax);
 765 
 766     __ bind(L);
 767     __ movq(inout, c_rarg3);
 768 
 769     __ popq(c_rarg1);
 770     __ popq(c_rarg2);
 771     __ popq(c_rarg3);
 772     __ popq(rax);
 773 
 774     __ ret(0);
 775 
 776     return start;
 777   }
 778 
 779   address generate_d2i_fixup() {
 780     StubCodeMark mark(this, "StubRoutines", "d2i_fixup");
 781     Address inout(rsp, 6 * wordSize); // return address + 5 saves
 782 
 783     address start = __ pc();
 784 
 785     Label L;
 786 
 787     __ pushq(rax);
 788     __ pushq(c_rarg3);
 789     __ pushq(c_rarg2);
 790     __ pushq(c_rarg1);
 791     __ pushq(c_rarg0);
 792 
 793     __ movl(rax, 0x7ff00000);
 794     __ movq(c_rarg2, inout);
 795     __ movl(c_rarg3, c_rarg2);
 796     __ movq(c_rarg1, c_rarg2);
 797     __ movq(c_rarg0, c_rarg2);
 798     __ negl(c_rarg3);
 799     __ shrq(c_rarg1, 0x20);
 800     __ orl(c_rarg3, c_rarg2);
 801     __ andl(c_rarg1, 0x7fffffff);
 802     __ xorl(c_rarg2, c_rarg2);
 803     __ shrl(c_rarg3, 0x1f);
 804     __ orl(c_rarg1, c_rarg3);
 805     __ cmpl(rax, c_rarg1);
 806     __ jcc(Assembler::negative, L); // NaN -> 0
 807     __ testq(c_rarg0, c_rarg0); // signed ? min_jint : max_jint
 808     __ movl(c_rarg2, 0x80000000);
 809     __ movl(rax, 0x7fffffff);
 810     __ cmovl(Assembler::positive, c_rarg2, rax);
 811     
 812     __ bind(L);
 813     __ movq(inout, c_rarg2);
 814 
 815     __ popq(c_rarg0);
 816     __ popq(c_rarg1);
 817     __ popq(c_rarg2);
 818     __ popq(c_rarg3);
 819     __ popq(rax);
 820 
 821     __ ret(0);
 822 
 823     return start;
 824   }
 825 
 826   address generate_d2l_fixup() {
 827     StubCodeMark mark(this, "StubRoutines", "d2l_fixup");
 828     Address inout(rsp, 6 * wordSize); // return address + 5 saves
 829 
 830     address start = __ pc();
 831 
 832     Label L;
 833 
 834     __ pushq(rax);
 835     __ pushq(c_rarg3);
 836     __ pushq(c_rarg2);
 837     __ pushq(c_rarg1);
 838     __ pushq(c_rarg0);
 839 
 840     __ movl(rax, 0x7ff00000);
 841     __ movq(c_rarg2, inout);
 842     __ movl(c_rarg3, c_rarg2);
 843     __ movq(c_rarg1, c_rarg2);
 844     __ movq(c_rarg0, c_rarg2);
 845     __ negl(c_rarg3);
 846     __ shrq(c_rarg1, 0x20);
 847     __ orl(c_rarg3, c_rarg2);
 848     __ andl(c_rarg1, 0x7fffffff);
 849     __ xorl(c_rarg2, c_rarg2);
 850     __ shrl(c_rarg3, 0x1f);
 851     __ orl(c_rarg1, c_rarg3);
 852     __ cmpl(rax, c_rarg1);
 853     __ jcc(Assembler::negative, L); // NaN -> 0
 854     __ testq(c_rarg0, c_rarg0); // signed ? min_jlong : max_jlong
 855     __ mov64(c_rarg2, 0x8000000000000000);
 856     __ mov64(rax, 0x7fffffffffffffff);
 857     __ cmovq(Assembler::positive, c_rarg2, rax);
 858     
 859     __ bind(L);
 860     __ movq(inout, c_rarg2);
 861 
 862     __ popq(c_rarg0);
 863     __ popq(c_rarg1);
 864     __ popq(c_rarg2);
 865     __ popq(c_rarg3);
 866     __ popq(rax);
 867 
 868     __ ret(0);
 869 
 870     return start;
 871   }
 872 
 873   address generate_fp_mask(const char *stub_name, int64_t mask) {
 874     StubCodeMark mark(this, "StubRoutines", stub_name);
 875 
 876     __ align(16);
 877     address start = __ pc();
 878 
 879     __ emit_data64( mask, relocInfo::none );
 880     __ emit_data64( mask, relocInfo::none );
 881 
 882     return start;
 883   }
 884 
 885   // The following routine generates a subroutine to throw an
 886   // asynchronous UnknownError when an unsafe access gets a fault that
 887   // could not be reasonably prevented by the programmer.  (Example:
 888   // SIGBUS/OBJERR.)
 889   address generate_handler_for_unsafe_access() {
 890     StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access");
 891     address start = __ pc();
 892 
 893     __ pushq(0);                      // hole for return address-to-be
 894     __ pushaq();                      // push registers
 895     Address next_pc(rsp, RegisterImpl::number_of_registers * BytesPerWord);
 896 
 897     __ subq(rsp, frame::arg_reg_save_area_bytes);
 898     BLOCK_COMMENT("call handle_unsafe_access");
 899     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, handle_unsafe_access)));
 900     __ addq(rsp, frame::arg_reg_save_area_bytes);
 901 
 902     __ movq(next_pc, rax);            // stuff next address 
 903     __ popaq();
 904     __ ret(0);                        // jump to next address
 905 
 906     return start;
 907   }
 908 
 909   // Non-destructive plausibility checks for oops
 910   //
 911   // Arguments:
 912   //    all args on stack!
 913   // 
 914   // Stack after saving c_rarg3:
 915   //    [tos + 0]: saved c_rarg3
 916   //    [tos + 1]: saved c_rarg2
 917   //    [tos + 2]: saved flags
 918   //    [tos + 3]: return address
 919   //  * [tos + 4]: error message (char*)
 920   //  * [tos + 5]: object to verify (oop)
 921   //  * [tos + 6]: saved rax - saved by caller and bashed

 922   //  * = popped on exit
 923   address generate_verify_oop() {
 924     StubCodeMark mark(this, "StubRoutines", "verify_oop");
 925     address start = __ pc();
 926     
 927     Label exit, error;
 928 
 929     __ pushfq();
 930     __ incrementl(ExternalAddress((address) StubRoutines::verify_oop_count_addr()));
 931 


 932     // save c_rarg2 and c_rarg3
 933     __ pushq(c_rarg2);
 934     __ pushq(c_rarg3);










 935 
 936     // get object
 937     __ movq(rax, Address(rsp, 5 * wordSize));
 938 
 939     // make sure object is 'reasonable'
 940     __ testq(rax, rax);
 941     __ jcc(Assembler::zero, exit); // if obj is NULL it is OK
 942     // Check if the oop is in the right area of memory
 943     __ movq(c_rarg2, rax);
 944     __ movptr(c_rarg3, (int64_t) Universe::verify_oop_mask());
 945     __ andq(c_rarg2, c_rarg3);
 946     __ movptr(c_rarg3, (int64_t) Universe::verify_oop_bits());
 947     __ cmpq(c_rarg2, c_rarg3);
 948     __ jcc(Assembler::notZero, error);
 949 



 950     // make sure klass is 'reasonable'
 951     __ movq(rax, Address(rax, oopDesc::klass_offset_in_bytes())); // get klass
 952     __ testq(rax, rax);
 953     __ jcc(Assembler::zero, error); // if klass is NULL it is broken
 954     // Check if the klass is in the right area of memory
 955     __ movq(c_rarg2, rax);
 956     __ movptr(c_rarg3, (int64_t) Universe::verify_klass_mask());
 957     __ andq(c_rarg2, c_rarg3);
 958     __ movptr(c_rarg3, (int64_t) Universe::verify_klass_bits());
 959     __ cmpq(c_rarg2, c_rarg3);
 960     __ jcc(Assembler::notZero, error);
 961 
 962     // make sure klass' klass is 'reasonable'
 963     __ movq(rax, Address(rax, oopDesc::klass_offset_in_bytes()));
 964     __ testq(rax, rax);
 965     __ jcc(Assembler::zero, error); // if klass' klass is NULL it is broken
 966     // Check if the klass' klass is in the right area of memory
 967     __ movptr(c_rarg3, (int64_t) Universe::verify_klass_mask());
 968     __ andq(rax, c_rarg3);
 969     __ movptr(c_rarg3, (int64_t) Universe::verify_klass_bits());
 970     __ cmpq(rax, c_rarg3);
 971     __ jcc(Assembler::notZero, error);
 972 
 973     // return if everything seems ok
 974     __ bind(exit);
 975     __ movq(rax, Address(rsp, 6 * wordSize));    // get saved rax back
 976     __ popq(c_rarg3);                              // restore c_rarg3
 977     __ popq(c_rarg2);                              // restore c_rarg2
 978     __ popfq();                                  // restore flags

 979     __ ret(3 * wordSize);                        // pop caller saved stuff
 980 
 981     // handle errors
 982     __ bind(error);
 983     __ movq(rax, Address(rsp, 6 * wordSize));    // get saved rax back
 984     __ popq(c_rarg3);                              // get saved c_rarg3 back
 985     __ popq(c_rarg2);                              // get saved c_rarg2 back
 986     __ popfq();                                  // get saved flags off stack --

 987                                                  // will be ignored
 988 
 989     __ pushaq();                                 // push registers
 990                                                  // (rip is already
 991                                                  // already pushed)
 992     // debug(char* msg, int64_t regs[])
 993     // We've popped the registers we'd saved (c_rarg3, c_rarg2 and flags), and
 994     // pushed all the registers, so now the stack looks like:
 995     //     [tos +  0] 16 saved registers
 996     //     [tos + 16] return address
 997     //     [tos + 17] error message (char*)



 998 
 999     __ movq(c_rarg0, Address(rsp, 17 * wordSize)); // pass address of error message
1000     __ movq(c_rarg1, rsp);                         // pass address of regs on stack
1001     __ movq(r12, rsp);                           // remember rsp
1002     __ subq(rsp, frame::arg_reg_save_area_bytes);// windows
1003     __ andq(rsp, -16);                           // align stack as required by ABI

1004     BLOCK_COMMENT("call MacroAssembler::debug");
1005     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug)));
1006     __ movq(rsp, r12);                           // restore rsp
1007     __ popaq();                                  // pop registers
1008     __ ret(3 * wordSize);                        // pop caller saved stuff
1009 
1010     return start;
1011   }
1012 
1013   static address disjoint_byte_copy_entry;
1014   static address disjoint_short_copy_entry;
1015   static address disjoint_int_copy_entry;
1016   static address disjoint_long_copy_entry;
1017   static address disjoint_oop_copy_entry;
1018 
1019   static address byte_copy_entry;
1020   static address short_copy_entry;
1021   static address int_copy_entry;
1022   static address long_copy_entry;
1023   static address oop_copy_entry;
1024 
1025   static address checkcast_copy_entry;
1026 
1027   //
1028   // Verify that a register contains clean 32-bits positive value
1029   // (high 32-bits are 0) so it could be used in 64-bits shifts.
1030   //
1031   //  Input:
1032   //    Rint  -  32-bits value
1033   //    Rtmp  -  scratch
1034   //
1035   void assert_clean_int(Register Rint, Register Rtmp) {
1036 #ifdef ASSERT
1037     Label L;
1038     assert_different_registers(Rtmp, Rint);
1039     __ movslq(Rtmp, Rint);
1040     __ cmpq(Rtmp, Rint);
1041     __ jccb(Assembler::equal, L);
1042     __ stop("high 32-bits of int value are not 0");
1043     __ bind(L);
1044 #endif
1045   }
1046 
1047   //  Generate overlap test for array copy stubs
1048   //
1049   //  Input:
1050   //     c_rarg0 - from
1051   //     c_rarg1 - to
1052   //     c_rarg2 - element count
1053   //
1054   //  Output:
1055   //     rax   - &from[element count - 1]
1056   //
1057   void array_overlap_test(address no_overlap_target, Address::ScaleFactor sf) {
1058     assert(no_overlap_target != NULL, "must be generated");
1059     array_overlap_test(no_overlap_target, NULL, sf);
1060   }
1061   void array_overlap_test(Label& L_no_overlap, Address::ScaleFactor sf) {
1062     array_overlap_test(NULL, &L_no_overlap, sf);
1063   }
1064   void array_overlap_test(address no_overlap_target, Label* NOLp, Address::ScaleFactor sf) {
1065     const Register from     = c_rarg0;
1066     const Register to       = c_rarg1;
1067     const Register count    = c_rarg2;
1068     const Register end_from = rax;
1069 
1070     __ cmpq(to, from);
1071     __ leaq(end_from, Address(from, count, sf, 0));
1072     if (NOLp == NULL) {
1073       ExternalAddress no_overlap(no_overlap_target);
1074       __ jump_cc(Assembler::belowEqual, no_overlap);
1075       __ cmpq(to, end_from);
1076       __ jump_cc(Assembler::aboveEqual, no_overlap);
1077     } else {
1078       __ jcc(Assembler::belowEqual, (*NOLp));
1079       __ cmpq(to, end_from);
1080       __ jcc(Assembler::aboveEqual, (*NOLp));
1081     }
1082   }
1083 
1084   // Shuffle first three arg regs on Windows into Linux/Solaris locations.
1085   //
1086   // Outputs:
1087   //    rdi - rcx
1088   //    rsi - rdx
1089   //    rdx - r8
1090   //    rcx - r9
1091   //
1092   // Registers r9 and r10 are used to save rdi and rsi on Windows, which latter
1093   // are non-volatile.  r9 and r10 should not be used by the caller.
1094   //
1095   void setup_arg_regs(int nargs = 3) {
1096     const Register saved_rdi = r9;
1097     const Register saved_rsi = r10;
1098     assert(nargs == 3 || nargs == 4, "else fix");
1099 #ifdef _WIN64
1100     assert(c_rarg0 == rcx && c_rarg1 == rdx && c_rarg2 == r8 && c_rarg3 == r9,
1101            "unexpected argument registers"); 
1102     if (nargs >= 4)
1103       __ movq(rax, r9);  // r9 is also saved_rdi
1104     __ movq(saved_rdi, rdi);
1105     __ movq(saved_rsi, rsi);
1106     __ movq(rdi, rcx); // c_rarg0
1107     __ movq(rsi, rdx); // c_rarg1
1108     __ movq(rdx, r8);  // c_rarg2
1109     if (nargs >= 4)
1110       __ movq(rcx, rax); // c_rarg3 (via rax)
1111 #else
1112     assert(c_rarg0 == rdi && c_rarg1 == rsi && c_rarg2 == rdx && c_rarg3 == rcx,
1113            "unexpected argument registers"); 
1114 #endif
1115   }
1116 
1117   void restore_arg_regs() {
1118     const Register saved_rdi = r9;
1119     const Register saved_rsi = r10;
1120 #ifdef _WIN64
1121     __ movq(rdi, saved_rdi);
1122     __ movq(rsi, saved_rsi);
1123 #endif
1124   }
1125 
1126   // Generate code for an array write pre barrier
1127   //
1128   //     addr    -  starting address
1129   //     count    -  element count
1130   //
1131   //     Destroy no registers!
1132   //
1133   void  gen_write_ref_array_pre_barrier(Register addr, Register count) {
1134 #if 0 // G1 - only
1135     assert_different_registers(addr, c_rarg1);
1136     assert_different_registers(count, c_rarg0);
1137     BarrierSet* bs = Universe::heap()->barrier_set();
1138     switch (bs->kind()) {
1139       case BarrierSet::G1SATBCT:
1140       case BarrierSet::G1SATBCTLogging:
1141         {
1142           __ pushaq();                      // push registers
1143           __ movq(c_rarg0, addr);
1144           __ movq(c_rarg1, count);
1145           __ call(RuntimeAddress(BarrierSet::static_write_ref_array_pre));
1146           __ popaq();











1147         }
1148         break;
1149       case BarrierSet::CardTableModRef:
1150       case BarrierSet::CardTableExtension:
1151       case BarrierSet::ModRef: 
1152         break;
1153       default      : 
1154         ShouldNotReachHere();
1155         
1156     }
1157 #endif // 0 G1 - only
1158   }
1159 
1160   //
1161   // Generate code for an array write post barrier
1162   //
1163   //  Input:
1164   //     start    - register containing starting address of destination array
1165   //     end      - register containing ending address of destination array
1166   //     scratch  - scratch register
1167   //
1168   //  The input registers are overwritten.
1169   //  The ending address is inclusive.
1170   void  gen_write_ref_array_post_barrier(Register start, Register end, Register scratch) {
1171     assert_different_registers(start, end, scratch);
1172     BarrierSet* bs = Universe::heap()->barrier_set();
1173     switch (bs->kind()) {
1174 #if 0 // G1 - only
1175       case BarrierSet::G1SATBCT:
1176       case BarrierSet::G1SATBCTLogging:
1177 
1178         {
1179           __ pushaq();                      // push registers (overkill)
1180           // must compute element count unless barrier set interface is changed (other platforms supply count)
1181           assert_different_registers(start, end, scratch);
1182           __ leaq(scratch, Address(end, wordSize));
1183           __ subq(scratch, start);
1184           __ shrq(scratch, LogBytesPerWord);
1185           __ movq(c_rarg0, start);
1186           __ movq(c_rarg1, scratch);
1187           __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post));
1188           __ popaq();
1189         }
1190         break;
1191 #endif // 0 G1 - only
1192       case BarrierSet::CardTableModRef:
1193       case BarrierSet::CardTableExtension:
1194         {
1195           CardTableModRefBS* ct = (CardTableModRefBS*)bs;
1196           assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
1197 
1198           Label L_loop;
1199 
1200            __ shrq(start, CardTableModRefBS::card_shift);
1201            __ shrq(end, CardTableModRefBS::card_shift);
1202            __ subq(end, start); // number of bytes to copy









1203 
1204           const Register count = end; // 'end' register contains bytes count now 
1205           __ lea(scratch, ExternalAddress((address)ct->byte_map_base));
1206           __ addq(start, scratch);
1207         __ BIND(L_loop);
1208           __ movb(Address(start, count, Address::times_1), 0);
1209           __ decrementq(count);
1210           __ jcc(Assembler::greaterEqual, L_loop);
1211         }




1212       }
1213    }
1214 

1215   // Copy big chunks forward
1216   //
1217   // Inputs:
1218   //   end_from     - source arrays end address
1219   //   end_to       - destination array end address
1220   //   qword_count  - 64-bits element count, negative
1221   //   to           - scratch
1222   //   L_copy_32_bytes - entry label
1223   //   L_copy_8_bytes  - exit  label
1224   //
1225   void copy_32_bytes_forward(Register end_from, Register end_to, 
1226                              Register qword_count, Register to, 
1227                              Label& L_copy_32_bytes, Label& L_copy_8_bytes) {
1228     DEBUG_ONLY(__ stop("enter at entry label, not here"));
1229     Label L_loop;
1230     __ align(16);
1231   __ BIND(L_loop);







1232     __ movq(to, Address(end_from, qword_count, Address::times_8, -24));
1233     __ movq(Address(end_to, qword_count, Address::times_8, -24), to);
1234     __ movq(to, Address(end_from, qword_count, Address::times_8, -16));
1235     __ movq(Address(end_to, qword_count, Address::times_8, -16), to);
1236     __ movq(to, Address(end_from, qword_count, Address::times_8, - 8));
1237     __ movq(Address(end_to, qword_count, Address::times_8, - 8), to);
1238     __ movq(to, Address(end_from, qword_count, Address::times_8, - 0));
1239     __ movq(Address(end_to, qword_count, Address::times_8, - 0), to);

1240   __ BIND(L_copy_32_bytes);
1241     __ addq(qword_count, 4);
1242     __ jcc(Assembler::lessEqual, L_loop);
1243     __ subq(qword_count, 4);
1244     __ jcc(Assembler::less, L_copy_8_bytes); // Copy trailing qwords
1245   }
1246 
1247 
1248   // Copy big chunks backward
1249   //
1250   // Inputs:
1251   //   from         - source arrays address
1252   //   dest         - destination array address
1253   //   qword_count  - 64-bits element count
1254   //   to           - scratch
1255   //   L_copy_32_bytes - entry label
1256   //   L_copy_8_bytes  - exit  label
1257   //
1258   void copy_32_bytes_backward(Register from, Register dest, 
1259                               Register qword_count, Register to, 
1260                               Label& L_copy_32_bytes, Label& L_copy_8_bytes) {
1261     DEBUG_ONLY(__ stop("enter at entry label, not here"));
1262     Label L_loop;
1263     __ align(16);
1264   __ BIND(L_loop);







1265     __ movq(to, Address(from, qword_count, Address::times_8, 24));
1266     __ movq(Address(dest, qword_count, Address::times_8, 24), to);
1267     __ movq(to, Address(from, qword_count, Address::times_8, 16));
1268     __ movq(Address(dest, qword_count, Address::times_8, 16), to);
1269     __ movq(to, Address(from, qword_count, Address::times_8,  8));
1270     __ movq(Address(dest, qword_count, Address::times_8,  8), to);
1271     __ movq(to, Address(from, qword_count, Address::times_8,  0));
1272     __ movq(Address(dest, qword_count, Address::times_8,  0), to);

1273   __ BIND(L_copy_32_bytes);
1274     __ subq(qword_count, 4);
1275     __ jcc(Assembler::greaterEqual, L_loop);
1276     __ addq(qword_count, 4);
1277     __ jcc(Assembler::greater, L_copy_8_bytes); // Copy trailing qwords
1278   }
1279 
1280 
1281   // Arguments:
1282   //   aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
1283   //             ignored
1284   //   name    - stub name string
1285   //
1286   // Inputs:
1287   //   c_rarg0   - source array address
1288   //   c_rarg1   - destination array address
1289   //   c_rarg2   - element count, treated as ssize_t, can be zero
1290   //
1291   // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries,
1292   // we let the hardware handle it.  The one to eight bytes within words,
1293   // dwords or qwords that span cache line boundaries will still be loaded
1294   // and stored atomically.
1295   //
1296   // Side Effects:


1308     const Register to          = rsi;  // destination array address
1309     const Register count       = rdx;  // elements count
1310     const Register byte_count  = rcx;
1311     const Register qword_count = count;
1312     const Register end_from    = from; // source array end address
1313     const Register end_to      = to;   // destination array end address
1314     // End pointers are inclusive, and if count is not zero they point
1315     // to the last unit copied:  end_to[0] := end_from[0]
1316 
1317     __ enter(); // required for proper stackwalking of RuntimeStub frame
1318     assert_clean_int(c_rarg2, rax);    // Make sure 'count' is clean int.
1319 
1320     disjoint_byte_copy_entry = __ pc();
1321     BLOCK_COMMENT("Entry:");
1322     // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1323 
1324     setup_arg_regs(); // from => rdi, to => rsi, count => rdx
1325                       // r9 and r10 may be used to save non-volatile registers
1326 
1327     // 'from', 'to' and 'count' are now valid
1328     __ movq(byte_count, count);
1329     __ shrq(count, 3); // count => qword_count
1330 
1331     // Copy from low to high addresses.  Use 'to' as scratch.
1332     __ leaq(end_from, Address(from, qword_count, Address::times_8, -8));
1333     __ leaq(end_to,   Address(to,   qword_count, Address::times_8, -8));
1334     __ negq(qword_count); // make the count negative
1335     __ jmp(L_copy_32_bytes);
1336 
1337     // Copy trailing qwords
1338   __ BIND(L_copy_8_bytes);
1339     __ movq(rax, Address(end_from, qword_count, Address::times_8, 8));
1340     __ movq(Address(end_to, qword_count, Address::times_8, 8), rax);
1341     __ incrementq(qword_count);
1342     __ jcc(Assembler::notZero, L_copy_8_bytes);
1343 
1344     // Check for and copy trailing dword
1345   __ BIND(L_copy_4_bytes);
1346     __ testq(byte_count, 4);
1347     __ jccb(Assembler::zero, L_copy_2_bytes);
1348     __ movl(rax, Address(end_from, 8));
1349     __ movl(Address(end_to, 8), rax);
1350 
1351     __ addq(end_from, 4);
1352     __ addq(end_to, 4);
1353 
1354     // Check for and copy trailing word
1355   __ BIND(L_copy_2_bytes);
1356     __ testq(byte_count, 2);
1357     __ jccb(Assembler::zero, L_copy_byte);
1358     __ movw(rax, Address(end_from, 8));
1359     __ movw(Address(end_to, 8), rax);
1360 
1361     __ addq(end_from, 2);
1362     __ addq(end_to, 2);
1363 
1364     // Check for and copy trailing byte
1365   __ BIND(L_copy_byte);
1366     __ testq(byte_count, 1);
1367     __ jccb(Assembler::zero, L_exit);
1368     __ movb(rax, Address(end_from, 8));
1369     __ movb(Address(end_to, 8), rax);
1370 
1371   __ BIND(L_exit);
1372     inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr);
1373     restore_arg_regs();
1374     __ xorq(rax, rax); // return 0
1375     __ leave(); // required for proper stackwalking of RuntimeStub frame
1376     __ ret(0);
1377 
1378     // Copy in 32-bytes chunks
1379     copy_32_bytes_forward(end_from, end_to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
1380     __ jmp(L_copy_4_bytes);
1381 
1382     return start;
1383   }
1384 
1385   // Arguments:
1386   //   aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
1387   //             ignored
1388   //   name    - stub name string
1389   //
1390   // Inputs:
1391   //   c_rarg0   - source array address
1392   //   c_rarg1   - destination array address
1393   //   c_rarg2   - element count, treated as ssize_t, can be zero
1394   //


1404 
1405     Label L_copy_32_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes;
1406     const Register from        = rdi;  // source array address
1407     const Register to          = rsi;  // destination array address
1408     const Register count       = rdx;  // elements count
1409     const Register byte_count  = rcx;
1410     const Register qword_count = count;
1411 
1412     __ enter(); // required for proper stackwalking of RuntimeStub frame
1413     assert_clean_int(c_rarg2, rax);    // Make sure 'count' is clean int.
1414 
1415     byte_copy_entry = __ pc();
1416     BLOCK_COMMENT("Entry:");
1417     // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1418 
1419     array_overlap_test(disjoint_byte_copy_entry, Address::times_1);
1420     setup_arg_regs(); // from => rdi, to => rsi, count => rdx
1421                       // r9 and r10 may be used to save non-volatile registers
1422 
1423     // 'from', 'to' and 'count' are now valid
1424     __ movq(byte_count, count);
1425     __ shrq(count, 3);   // count => qword_count
1426 
1427     // Copy from high to low addresses.
1428 
1429     // Check for and copy trailing byte
1430     __ testq(byte_count, 1);
1431     __ jcc(Assembler::zero, L_copy_2_bytes);
1432     __ movb(rax, Address(from, byte_count, Address::times_1, -1));
1433     __ movb(Address(to, byte_count, Address::times_1, -1), rax);
1434     __ decrementq(byte_count); // Adjust for possible trailing word 
1435 
1436     // Check for and copy trailing word
1437   __ BIND(L_copy_2_bytes);
1438     __ testq(byte_count, 2);
1439     __ jcc(Assembler::zero, L_copy_4_bytes);
1440     __ movw(rax, Address(from, byte_count, Address::times_1, -2));
1441     __ movw(Address(to, byte_count, Address::times_1, -2), rax);
1442 
1443     // Check for and copy trailing dword
1444   __ BIND(L_copy_4_bytes);
1445     __ testq(byte_count, 4);
1446     __ jcc(Assembler::zero, L_copy_32_bytes);
1447     __ movl(rax, Address(from, qword_count, Address::times_8));
1448     __ movl(Address(to, qword_count, Address::times_8), rax);
1449     __ jmp(L_copy_32_bytes);
1450 
1451     // Copy trailing qwords
1452   __ BIND(L_copy_8_bytes);
1453     __ movq(rax, Address(from, qword_count, Address::times_8, -8));
1454     __ movq(Address(to, qword_count, Address::times_8, -8), rax);
1455     __ decrementq(qword_count);
1456     __ jcc(Assembler::notZero, L_copy_8_bytes);
1457 
1458     inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr);
1459     restore_arg_regs();
1460     __ xorq(rax, rax); // return 0
1461     __ leave(); // required for proper stackwalking of RuntimeStub frame
1462     __ ret(0);
1463 
1464     // Copy in 32-bytes chunks
1465     copy_32_bytes_backward(from, to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
1466 
1467     inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr);
1468     restore_arg_regs();
1469     __ xorq(rax, rax); // return 0
1470     __ leave(); // required for proper stackwalking of RuntimeStub frame
1471     __ ret(0);
1472 
1473     return start;
1474   }
1475 
1476   // Arguments:
1477   //   aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
1478   //             ignored
1479   //   name    - stub name string
1480   //
1481   // Inputs:
1482   //   c_rarg0   - source array address
1483   //   c_rarg1   - destination array address
1484   //   c_rarg2   - element count, treated as ssize_t, can be zero
1485   //
1486   // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we
1487   // let the hardware handle it.  The two or four words within dwords
1488   // or qwords that span cache line boundaries will still be loaded
1489   // and stored atomically.


1502     const Register to          = rsi;  // destination array address
1503     const Register count       = rdx;  // elements count
1504     const Register word_count  = rcx;
1505     const Register qword_count = count;
1506     const Register end_from    = from; // source array end address
1507     const Register end_to      = to;   // destination array end address
1508     // End pointers are inclusive, and if count is not zero they point
1509     // to the last unit copied:  end_to[0] := end_from[0]
1510 
1511     __ enter(); // required for proper stackwalking of RuntimeStub frame
1512     assert_clean_int(c_rarg2, rax);    // Make sure 'count' is clean int.
1513 
1514     disjoint_short_copy_entry = __ pc();
1515     BLOCK_COMMENT("Entry:");
1516     // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1517 
1518     setup_arg_regs(); // from => rdi, to => rsi, count => rdx
1519                       // r9 and r10 may be used to save non-volatile registers
1520 
1521     // 'from', 'to' and 'count' are now valid
1522     __ movq(word_count, count);
1523     __ shrq(count, 2); // count => qword_count
1524 
1525     // Copy from low to high addresses.  Use 'to' as scratch.
1526     __ leaq(end_from, Address(from, qword_count, Address::times_8, -8));
1527     __ leaq(end_to,   Address(to,   qword_count, Address::times_8, -8));
1528     __ negq(qword_count);
1529     __ jmp(L_copy_32_bytes);
1530 
1531     // Copy trailing qwords
1532   __ BIND(L_copy_8_bytes);
1533     __ movq(rax, Address(end_from, qword_count, Address::times_8, 8));
1534     __ movq(Address(end_to, qword_count, Address::times_8, 8), rax);
1535     __ incrementq(qword_count);
1536     __ jcc(Assembler::notZero, L_copy_8_bytes);
1537 
1538     // Original 'dest' is trashed, so we can't use it as a
1539     // base register for a possible trailing word copy
1540 
1541     // Check for and copy trailing dword
1542   __ BIND(L_copy_4_bytes);
1543     __ testq(word_count, 2);
1544     __ jccb(Assembler::zero, L_copy_2_bytes);
1545     __ movl(rax, Address(end_from, 8));
1546     __ movl(Address(end_to, 8), rax);
1547 
1548     __ addq(end_from, 4);
1549     __ addq(end_to, 4);
1550 
1551     // Check for and copy trailing word
1552   __ BIND(L_copy_2_bytes);
1553     __ testq(word_count, 1);
1554     __ jccb(Assembler::zero, L_exit);
1555     __ movw(rax, Address(end_from, 8));
1556     __ movw(Address(end_to, 8), rax);
1557 
1558   __ BIND(L_exit);
1559     inc_counter_np(SharedRuntime::_jshort_array_copy_ctr);
1560     restore_arg_regs();
1561     __ xorq(rax, rax); // return 0
1562     __ leave(); // required for proper stackwalking of RuntimeStub frame
1563     __ ret(0);
1564 
1565     // Copy in 32-bytes chunks
1566     copy_32_bytes_forward(end_from, end_to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
1567     __ jmp(L_copy_4_bytes);
1568 
1569     return start;
1570   }
1571 
1572   // Arguments:
1573   //   aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
1574   //             ignored
1575   //   name    - stub name string
1576   //
1577   // Inputs:
1578   //   c_rarg0   - source array address
1579   //   c_rarg1   - destination array address
1580   //   c_rarg2   - element count, treated as ssize_t, can be zero
1581   //


1591 
1592     Label L_copy_32_bytes, L_copy_8_bytes, L_copy_4_bytes;
1593     const Register from        = rdi;  // source array address
1594     const Register to          = rsi;  // destination array address
1595     const Register count       = rdx;  // elements count
1596     const Register word_count  = rcx;
1597     const Register qword_count = count;
1598 
1599     __ enter(); // required for proper stackwalking of RuntimeStub frame
1600     assert_clean_int(c_rarg2, rax);    // Make sure 'count' is clean int.
1601 
1602     short_copy_entry = __ pc();
1603     BLOCK_COMMENT("Entry:");
1604     // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1605 
1606     array_overlap_test(disjoint_short_copy_entry, Address::times_2);
1607     setup_arg_regs(); // from => rdi, to => rsi, count => rdx
1608                       // r9 and r10 may be used to save non-volatile registers
1609 
1610     // 'from', 'to' and 'count' are now valid
1611     __ movq(word_count, count);
1612     __ shrq(count, 2); // count => qword_count
1613 
1614     // Copy from high to low addresses.  Use 'to' as scratch.
1615 
1616     // Check for and copy trailing word
1617     __ testq(word_count, 1);
1618     __ jccb(Assembler::zero, L_copy_4_bytes);
1619     __ movw(rax, Address(from, word_count, Address::times_2, -2));
1620     __ movw(Address(to, word_count, Address::times_2, -2), rax);
1621 
1622     // Check for and copy trailing dword
1623   __ BIND(L_copy_4_bytes);
1624     __ testq(word_count, 2);
1625     __ jcc(Assembler::zero, L_copy_32_bytes);
1626     __ movl(rax, Address(from, qword_count, Address::times_8));
1627     __ movl(Address(to, qword_count, Address::times_8), rax);
1628     __ jmp(L_copy_32_bytes);
1629 
1630     // Copy trailing qwords
1631   __ BIND(L_copy_8_bytes);
1632     __ movq(rax, Address(from, qword_count, Address::times_8, -8));
1633     __ movq(Address(to, qword_count, Address::times_8, -8), rax);
1634     __ decrementq(qword_count);
1635     __ jcc(Assembler::notZero, L_copy_8_bytes);
1636 
1637     inc_counter_np(SharedRuntime::_jshort_array_copy_ctr);
1638     restore_arg_regs();
1639     __ xorq(rax, rax); // return 0
1640     __ leave(); // required for proper stackwalking of RuntimeStub frame
1641     __ ret(0);
1642 
1643     // Copy in 32-bytes chunks
1644     copy_32_bytes_backward(from, to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
1645 
1646     inc_counter_np(SharedRuntime::_jshort_array_copy_ctr);
1647     restore_arg_regs();
1648     __ xorq(rax, rax); // return 0
1649     __ leave(); // required for proper stackwalking of RuntimeStub frame
1650     __ ret(0);
1651 
1652     return start;
1653   }
1654 
1655   // Arguments:
1656   //   aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
1657   //             ignored

1658   //   name    - stub name string
1659   //
1660   // Inputs:
1661   //   c_rarg0   - source array address
1662   //   c_rarg1   - destination array address
1663   //   c_rarg2   - element count, treated as ssize_t, can be zero
1664   //
1665   // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let
1666   // the hardware handle it.  The two dwords within qwords that span
1667   // cache line boundaries will still be loaded and stored atomicly.
1668   //
1669   // Side Effects:
1670   //   disjoint_int_copy_entry is set to the no-overlap entry point
1671   //   used by generate_conjoint_int_copy().
1672   //
1673   address generate_disjoint_int_copy(bool aligned, const char *name) {
1674     __ align(CodeEntryAlignment);
1675     StubCodeMark mark(this, "StubRoutines", name);
1676     address start = __ pc();
1677 
1678     Label L_copy_32_bytes, L_copy_8_bytes, L_copy_4_bytes, L_exit;
1679     const Register from        = rdi;  // source array address
1680     const Register to          = rsi;  // destination array address
1681     const Register count       = rdx;  // elements count
1682     const Register dword_count = rcx;
1683     const Register qword_count = count;
1684     const Register end_from    = from; // source array end address
1685     const Register end_to      = to;   // destination array end address

1686     // End pointers are inclusive, and if count is not zero they point
1687     // to the last unit copied:  end_to[0] := end_from[0]
1688 
1689     __ enter(); // required for proper stackwalking of RuntimeStub frame
1690     assert_clean_int(c_rarg2, rax);    // Make sure 'count' is clean int.
1691 
1692     disjoint_int_copy_entry = __ pc();






1693     BLOCK_COMMENT("Entry:");
1694     // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1695 
1696     setup_arg_regs(); // from => rdi, to => rsi, count => rdx
1697                       // r9 and r10 may be used to save non-volatile registers
1698 




1699     // 'from', 'to' and 'count' are now valid
1700     __ movq(dword_count, count);
1701     __ shrq(count, 1); // count => qword_count
1702 
1703     // Copy from low to high addresses.  Use 'to' as scratch.
1704     __ leaq(end_from, Address(from, qword_count, Address::times_8, -8));
1705     __ leaq(end_to,   Address(to,   qword_count, Address::times_8, -8));
1706     __ negq(qword_count);
1707     __ jmp(L_copy_32_bytes);
1708 
1709     // Copy trailing qwords
1710   __ BIND(L_copy_8_bytes);
1711     __ movq(rax, Address(end_from, qword_count, Address::times_8, 8));
1712     __ movq(Address(end_to, qword_count, Address::times_8, 8), rax);
1713     __ incrementq(qword_count);
1714     __ jcc(Assembler::notZero, L_copy_8_bytes);
1715 
1716     // Check for and copy trailing dword
1717   __ BIND(L_copy_4_bytes);
1718     __ testq(dword_count, 1); // Only byte test since the value is 0 or 1
1719     __ jccb(Assembler::zero, L_exit);
1720     __ movl(rax, Address(end_from, 8));
1721     __ movl(Address(end_to, 8), rax);
1722 
1723   __ BIND(L_exit);




1724     inc_counter_np(SharedRuntime::_jint_array_copy_ctr);
1725     restore_arg_regs();
1726     __ xorq(rax, rax); // return 0
1727     __ leave(); // required for proper stackwalking of RuntimeStub frame
1728     __ ret(0);
1729 
1730     // Copy 32-bytes chunks
1731     copy_32_bytes_forward(end_from, end_to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
1732     __ jmp(L_copy_4_bytes);
1733 
1734     return start;
1735   }
1736 
1737   // Arguments:
1738   //   aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
1739   //             ignored

1740   //   name    - stub name string
1741   //
1742   // Inputs:
1743   //   c_rarg0   - source array address
1744   //   c_rarg1   - destination array address
1745   //   c_rarg2   - element count, treated as ssize_t, can be zero
1746   //
1747   // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let
1748   // the hardware handle it.  The two dwords within qwords that span
1749   // cache line boundaries will still be loaded and stored atomicly.
1750   //
1751   address generate_conjoint_int_copy(bool aligned, const char *name) {
1752     __ align(CodeEntryAlignment);
1753     StubCodeMark mark(this, "StubRoutines", name);
1754     address start = __ pc();
1755 
1756     Label L_copy_32_bytes, L_copy_8_bytes, L_copy_2_bytes;
1757     const Register from        = rdi;  // source array address
1758     const Register to          = rsi;  // destination array address
1759     const Register count       = rdx;  // elements count
1760     const Register dword_count = rcx;
1761     const Register qword_count = count;
1762 
1763     __ enter(); // required for proper stackwalking of RuntimeStub frame
1764     assert_clean_int(c_rarg2, rax);    // Make sure 'count' is clean int.
1765 
1766     int_copy_entry = __ pc();





1767     BLOCK_COMMENT("Entry:");
1768     // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1769 
1770     array_overlap_test(disjoint_int_copy_entry, Address::times_4);

1771     setup_arg_regs(); // from => rdi, to => rsi, count => rdx
1772                       // r9 and r10 may be used to save non-volatile registers
1773 

1774     // 'from', 'to' and 'count' are now valid
1775     __ movq(dword_count, count);
1776     __ shrq(count, 1); // count => qword_count
1777 
1778     // Copy from high to low addresses.  Use 'to' as scratch.
1779 
1780     // Check for and copy trailing dword
1781     __ testq(dword_count, 1);
1782     __ jcc(Assembler::zero, L_copy_32_bytes);
1783     __ movl(rax, Address(from, dword_count, Address::times_4, -4));
1784     __ movl(Address(to, dword_count, Address::times_4, -4), rax);
1785     __ jmp(L_copy_32_bytes);
1786 
1787     // Copy trailing qwords
1788   __ BIND(L_copy_8_bytes);
1789     __ movq(rax, Address(from, qword_count, Address::times_8, -8));
1790     __ movq(Address(to, qword_count, Address::times_8, -8), rax);
1791     __ decrementq(qword_count);
1792     __ jcc(Assembler::notZero, L_copy_8_bytes);
1793 
1794     inc_counter_np(SharedRuntime::_jint_array_copy_ctr);



1795     restore_arg_regs();
1796     __ xorq(rax, rax); // return 0
1797     __ leave(); // required for proper stackwalking of RuntimeStub frame
1798     __ ret(0);
1799 
1800     // Copy in 32-bytes chunks
1801     copy_32_bytes_backward(from, to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
1802 
1803     inc_counter_np(SharedRuntime::_jint_array_copy_ctr);






1804     restore_arg_regs();
1805     __ xorq(rax, rax); // return 0
1806     __ leave(); // required for proper stackwalking of RuntimeStub frame
1807     __ ret(0);
1808 
1809     return start;
1810   }
1811 
1812   // Arguments:
1813   //   aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes
1814   //             ignored
1815   //   is_oop  - true => oop array, so generate store check code
1816   //   name    - stub name string
1817   //
1818   // Inputs:
1819   //   c_rarg0   - source array address
1820   //   c_rarg1   - destination array address
1821   //   c_rarg2   - element count, treated as ssize_t, can be zero
1822   //
1823   // Side Effects:
1824   //   disjoint_oop_copy_entry or disjoint_long_copy_entry is set to the
1825   //   no-overlap entry point used by generate_conjoint_long_oop_copy().


1842     __ enter(); // required for proper stackwalking of RuntimeStub frame
1843     // Save no-overlap entry point for generate_conjoint_long_oop_copy()
1844     assert_clean_int(c_rarg2, rax);    // Make sure 'count' is clean int.
1845 
1846     if (is_oop) {
1847       disjoint_oop_copy_entry  = __ pc();
1848       // no registers are destroyed by this call
1849       gen_write_ref_array_pre_barrier(/* dest */ c_rarg1, /* count */ c_rarg2);
1850     } else {
1851       disjoint_long_copy_entry = __ pc();
1852     }
1853     BLOCK_COMMENT("Entry:");
1854     // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1855 
1856     setup_arg_regs(); // from => rdi, to => rsi, count => rdx
1857                       // r9 and r10 may be used to save non-volatile registers
1858 
1859     // 'from', 'to' and 'qword_count' are now valid
1860 
1861     // Copy from low to high addresses.  Use 'to' as scratch.
1862     __ leaq(end_from, Address(from, qword_count, Address::times_8, -8));
1863     __ leaq(end_to,   Address(to, qword_count, Address::times_8, -8));
1864     __ negq(qword_count);
1865     __ jmp(L_copy_32_bytes);
1866 
1867     // Copy trailing qwords
1868   __ BIND(L_copy_8_bytes);
1869     __ movq(rax, Address(end_from, qword_count, Address::times_8, 8));
1870     __ movq(Address(end_to, qword_count, Address::times_8, 8), rax);
1871     __ incrementq(qword_count);
1872     __ jcc(Assembler::notZero, L_copy_8_bytes);
1873 
1874     if (is_oop) {
1875       __ jmp(L_exit);
1876     } else {
1877       inc_counter_np(SharedRuntime::_jlong_array_copy_ctr);
1878       restore_arg_regs();
1879       __ xorq(rax, rax); // return 0
1880       __ leave(); // required for proper stackwalking of RuntimeStub frame
1881       __ ret(0);
1882     }
1883 
1884     // Copy 64-byte chunks
1885     copy_32_bytes_forward(end_from, end_to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
1886 
1887     if (is_oop) {
1888     __ BIND(L_exit);
1889       gen_write_ref_array_post_barrier(saved_to, end_to, rax);
1890       inc_counter_np(SharedRuntime::_oop_array_copy_ctr);
1891     } else {
1892       inc_counter_np(SharedRuntime::_jlong_array_copy_ctr);
1893     }
1894     restore_arg_regs();
1895     __ xorq(rax, rax); // return 0
1896     __ leave(); // required for proper stackwalking of RuntimeStub frame
1897     __ ret(0);
1898 
1899     return start;
1900   }
1901 
1902   // Arguments:
1903   //   aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes
1904   //             ignored
1905   //   is_oop  - true => oop array, so generate store check code
1906   //   name    - stub name string
1907   //
1908   // Inputs:
1909   //   c_rarg0   - source array address
1910   //   c_rarg1   - destination array address
1911   //   c_rarg2   - element count, treated as ssize_t, can be zero
1912   //
1913   address generate_conjoint_long_oop_copy(bool aligned, bool is_oop, const char *name) {
1914     __ align(CodeEntryAlignment);
1915     StubCodeMark mark(this, "StubRoutines", name);
1916     address start = __ pc();
1917 
1918     Label L_copy_32_bytes, L_copy_8_bytes, L_exit;
1919     const Register from        = rdi;  // source array address
1920     const Register to          = rsi;  // destination array address
1921     const Register qword_count = rdx;  // elements count
1922     const Register saved_count = rcx;
1923 
1924     __ enter(); // required for proper stackwalking of RuntimeStub frame
1925     assert_clean_int(c_rarg2, rax);    // Make sure 'count' is clean int.
1926 
1927     address disjoint_copy_entry = NULL;
1928     if (is_oop) {

1929       disjoint_copy_entry = disjoint_oop_copy_entry;
1930       oop_copy_entry  = __ pc();

1931     } else {
1932       disjoint_copy_entry = disjoint_long_copy_entry;
1933       long_copy_entry = __ pc();

1934     }
1935     BLOCK_COMMENT("Entry:");
1936     // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1937 
1938     array_overlap_test(disjoint_copy_entry, Address::times_8);
1939     setup_arg_regs(); // from => rdi, to => rsi, count => rdx
1940                       // r9 and r10 may be used to save non-volatile registers
1941 
1942     // 'from', 'to' and 'qword_count' are now valid
1943 
1944     if (is_oop) {
1945       // Save to and count for store barrier
1946       __ movq(saved_count, qword_count);
1947       // No registers are destroyed by this call
1948       gen_write_ref_array_pre_barrier(to, saved_count);
1949     }
1950 
1951     // Copy from high to low addresses.  Use rcx as scratch.
1952 
1953     __ jmp(L_copy_32_bytes);
1954 
1955     // Copy trailing qwords
1956   __ BIND(L_copy_8_bytes);
1957     __ movq(rax, Address(from, qword_count, Address::times_8, -8));
1958     __ movq(Address(to, qword_count, Address::times_8, -8), rax);
1959     __ decrementq(qword_count);
1960     __ jcc(Assembler::notZero, L_copy_8_bytes);
1961 
1962     if (is_oop) {
1963       __ jmp(L_exit);
1964     } else {
1965       inc_counter_np(SharedRuntime::_jlong_array_copy_ctr);
1966       restore_arg_regs();
1967       __ xorq(rax, rax); // return 0
1968       __ leave(); // required for proper stackwalking of RuntimeStub frame
1969       __ ret(0);
1970     }
1971 
1972     // Copy in 32-bytes chunks
1973     copy_32_bytes_backward(from, to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
1974 
1975     if (is_oop) {
1976     __ BIND(L_exit);
1977       __ leaq(rcx, Address(to, saved_count, Address::times_8, -8));
1978       gen_write_ref_array_post_barrier(to, rcx, rax);
1979       inc_counter_np(SharedRuntime::_oop_array_copy_ctr);
1980     } else {
1981       inc_counter_np(SharedRuntime::_jlong_array_copy_ctr);
1982     }
1983     restore_arg_regs();
1984     __ xorq(rax, rax); // return 0
1985     __ leave(); // required for proper stackwalking of RuntimeStub frame
1986     __ ret(0);
1987 
1988     return start;
1989   }
1990 
1991 
1992   // Helper for generating a dynamic type check.
1993   // Smashes no registers.
1994   void generate_type_check(Register sub_klass,
1995                            Register super_check_offset,
1996                            Register super_klass,
1997                            Label& L_success) {
1998     assert_different_registers(sub_klass, super_check_offset, super_klass);
1999 
2000     BLOCK_COMMENT("type_check:");
2001 
2002     Label L_miss;
2003 
2004     // a couple of useful fields in sub_klass:
2005     int ss_offset = (klassOopDesc::header_size() * HeapWordSize +
2006                      Klass::secondary_supers_offset_in_bytes());
2007     int sc_offset = (klassOopDesc::header_size() * HeapWordSize +
2008                      Klass::secondary_super_cache_offset_in_bytes());
2009     Address secondary_supers_addr(sub_klass, ss_offset);
2010     Address super_cache_addr(     sub_klass, sc_offset);
2011 
2012     // if the pointers are equal, we are done (e.g., String[] elements)
2013     __ cmpq(super_klass, sub_klass);
2014     __ jcc(Assembler::equal, L_success);
2015 
2016     // check the supertype display:
2017     Address super_check_addr(sub_klass, super_check_offset, Address::times_1, 0);
2018     __ cmpq(super_klass, super_check_addr); // test the super type
2019     __ jcc(Assembler::equal, L_success);
2020 
2021     // if it was a primary super, we can just fail immediately
2022     __ cmpl(super_check_offset, sc_offset);
2023     __ jcc(Assembler::notEqual, L_miss);
2024 
2025     // Now do a linear scan of the secondary super-klass chain.
2026     // The repne_scan instruction uses fixed registers, which we must spill.
2027     // (We need a couple more temps in any case.)
2028     // This code is rarely used, so simplicity is a virtue here.
2029     inc_counter_np(SharedRuntime::_partial_subtype_ctr);
2030     {
2031       __ pushq(rax);
2032       __ pushq(rcx);
2033       __ pushq(rdi);
2034       assert_different_registers(sub_klass, super_klass, rax, rcx, rdi);
2035 
2036       __ movq(rdi, secondary_supers_addr);
2037       // Load the array length.
2038       __ movl(rcx, Address(rdi, arrayOopDesc::length_offset_in_bytes())); 
2039       // Skip to start of data.
2040       __ addq(rdi, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
2041       // Scan rcx words at [rdi] for occurance of rax
2042       // Set NZ/Z based on last compare
2043       __ movq(rax, super_klass);






2044       __ repne_scan();

2045 
2046       // Unspill the temp. registers:
2047       __ popq(rdi);
2048       __ popq(rcx);
2049       __ popq(rax);
2050 
2051       __ jcc(Assembler::notEqual, L_miss);
2052     }
2053 
2054     // Success.  Cache the super we found and proceed in triumph.
2055     __ movq(super_cache_addr, super_klass); // note: rax is dead
2056     __ jmp(L_success);
2057 
2058     // Fall through on failure!
2059     __ BIND(L_miss);
2060   }
2061 
2062   //
2063   //  Generate checkcasting array copy stub
2064   //
2065   //  Input:
2066   //    c_rarg0   - source array address
2067   //    c_rarg1   - destination array address
2068   //    c_rarg2   - element count, treated as ssize_t, can be zero
2069   //    c_rarg3   - size_t ckoff (super_check_offset)
2070   // not Win64
2071   //    c_rarg4   - oop ckval (super_klass)
2072   // Win64
2073   //    rsp+40    - oop ckval (super_klass)
2074   //
2075   //  Output:


2101     //---------------------------------------------------------------
2102     // Assembler stub will be used for this call to arraycopy 
2103     // if the two arrays are subtypes of Object[] but the
2104     // destination array type is not equal to or a supertype
2105     // of the source type.  Each element must be separately
2106     // checked.
2107 
2108     __ align(CodeEntryAlignment);
2109     StubCodeMark mark(this, "StubRoutines", name);
2110     address start = __ pc();
2111 
2112     __ enter(); // required for proper stackwalking of RuntimeStub frame
2113 
2114     checkcast_copy_entry  = __ pc();
2115     BLOCK_COMMENT("Entry:");
2116 
2117 #ifdef ASSERT
2118     // caller guarantees that the arrays really are different
2119     // otherwise, we would have to make conjoint checks
2120     { Label L;
2121       array_overlap_test(L, Address::times_8);
2122       __ stop("checkcast_copy within a single array");
2123       __ bind(L);
2124     }
2125 #endif //ASSERT
2126 
2127     // allocate spill slots for r13, r14
2128     enum {
2129       saved_r13_offset,
2130       saved_r14_offset,
2131       saved_rbp_offset,
2132       saved_rip_offset,
2133       saved_rarg0_offset
2134     };
2135     __ subq(rsp, saved_rbp_offset * wordSize);
2136     __ movq(Address(rsp, saved_r13_offset * wordSize), r13);
2137     __ movq(Address(rsp, saved_r14_offset * wordSize), r14);
2138     setup_arg_regs(4); // from => rdi, to => rsi, length => rdx
2139                        // ckoff => rcx, ckval => r8
2140                        // r9 and r10 may be used to save non-volatile registers
2141 #ifdef _WIN64
2142     // last argument (#4) is on stack on Win64
2143     const int ckval_offset = saved_rarg0_offset + 4;
2144     __ movq(ckval, Address(rsp, ckval_offset * wordSize));
2145 #endif
2146 
2147     // check that int operands are properly extended to size_t
2148     assert_clean_int(length, rax);
2149     assert_clean_int(ckoff, rax);
2150 
2151 #ifdef ASSERT
2152     BLOCK_COMMENT("assert consistent ckoff/ckval");
2153     // The ckoff and ckval must be mutually consistent,
2154     // even though caller generates both.
2155     { Label L;
2156       int sco_offset = (klassOopDesc::header_size() * HeapWordSize +
2157                         Klass::super_check_offset_offset_in_bytes());
2158       __ cmpl(ckoff, Address(ckval, sco_offset));
2159       __ jcc(Assembler::equal, L);
2160       __ stop("super_check_offset inconsistent");
2161       __ bind(L);
2162     }
2163 #endif //ASSERT
2164 
2165     // Loop-invariant addresses.  They are exclusive end pointers.
2166     Address end_from_addr(from, length, Address::times_8, 0);
2167     Address   end_to_addr(to,   length, Address::times_8, 0);
2168     // Loop-variant addresses.  They assume post-incremented count < 0.
2169     Address from_element_addr(end_from, count, Address::times_8, 0);
2170     Address   to_element_addr(end_to,   count, Address::times_8, 0);
2171     Address oop_klass_addr(rax_oop, oopDesc::klass_offset_in_bytes());
2172 
2173     gen_write_ref_array_pre_barrier(to, count);
2174 
2175     // Copy from low to high addresses, indexed from the end of each array.
2176     __ leaq(end_from, end_from_addr);
2177     __ leaq(end_to,   end_to_addr);
2178     __ movq(r14_length, length);        // save a copy of the length
2179     assert(length == count, "");        // else fix next line:
2180     __ negq(count);                     // negate and test the length
2181     __ jcc(Assembler::notZero, L_load_element);
2182 
2183     // Empty array:  Nothing to do.
2184     __ xorq(rax, rax);                  // return 0 on (trivial) success
2185     __ jmp(L_done);
2186 
2187     // ======== begin loop ========
2188     // (Loop is rotated; its entry is L_load_element.)
2189     // Loop control:
2190     //   for (count = -count; count != 0; count++)
2191     // Base pointers src, dst are biased by 8*(count-1),to last element.
2192     __ align(16);
2193     
2194     __ BIND(L_store_element);
2195     __ movq(to_element_addr, rax_oop);  // store the oop
2196     __ incrementq(count);               // increment the count toward zero
2197     __ jcc(Assembler::zero, L_do_card_marks);
2198 
2199     // ======== loop entry is here ========
2200     __ BIND(L_load_element);
2201     __ movq(rax_oop, from_element_addr); // load the oop
2202     __ testq(rax_oop, rax_oop);
2203     __ jcc(Assembler::zero, L_store_element);
2204 
2205     __ movq(r11_klass, oop_klass_addr); // query the object klass
2206     generate_type_check(r11_klass, ckoff, ckval, L_store_element);
2207     // ======== end loop ========
2208 
2209     // It was a real error; we must depend on the caller to finish the job.
2210     // Register rdx = -1 * number of *remaining* oops, r14 = *total* oops.
2211     // Emit GC store barriers for the oops we have copied (r14 + rdx),
2212     // and report their number to the caller.
2213     assert_different_registers(rax, r14_length, count, to, end_to, rcx);
2214     __ leaq(end_to, to_element_addr);
2215     gen_write_ref_array_post_barrier(to, end_to, rcx);
2216     __ movq(rax, r14_length);           // original oops
2217     __ addq(rax, count);                // K = (original - remaining) oops
2218     __ notq(rax);                       // report (-1^K) to caller
2219     __ jmp(L_done);
2220 
2221     // Come here on success only.
2222     __ BIND(L_do_card_marks);
2223     __ addq(end_to, -wordSize);         // make an inclusive end pointer
2224     gen_write_ref_array_post_barrier(to, end_to, rcx);
2225     __ xorq(rax, rax);                  // return 0 on success
2226 
2227     // Common exit point (success or failure).
2228     __ BIND(L_done);
2229     __ movq(r13, Address(rsp, saved_r13_offset * wordSize));
2230     __ movq(r14, Address(rsp, saved_r14_offset * wordSize));
2231     inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr);
2232     restore_arg_regs();
2233     __ leave(); // required for proper stackwalking of RuntimeStub frame
2234     __ ret(0);
2235 
2236     return start;
2237   }
2238 
2239   //
2240   //  Generate 'unsafe' array copy stub
2241   //  Though just as safe as the other stubs, it takes an unscaled
2242   //  size_t argument instead of an element count.
2243   //
2244   //  Input:
2245   //    c_rarg0   - source array address
2246   //    c_rarg1   - destination array address
2247   //    c_rarg2   - byte count, treated as ssize_t, can be zero
2248   //
2249   // Examines the alignment of the operands and dispatches
2250   // to a long, int, short, or byte copy loop.


2253 
2254     Label L_long_aligned, L_int_aligned, L_short_aligned;
2255 
2256     // Input registers (before setup_arg_regs)
2257     const Register from        = c_rarg0;  // source array address
2258     const Register to          = c_rarg1;  // destination array address
2259     const Register size        = c_rarg2;  // byte count (size_t)
2260 
2261     // Register used as a temp
2262     const Register bits        = rax;      // test copy of low bits
2263 
2264     __ align(CodeEntryAlignment);
2265     StubCodeMark mark(this, "StubRoutines", name);
2266     address start = __ pc();
2267 
2268     __ enter(); // required for proper stackwalking of RuntimeStub frame
2269 
2270     // bump this on entry, not on exit:
2271     inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr);
2272 
2273     __ movq(bits, from);
2274     __ orq(bits, to);
2275     __ orq(bits, size);
2276 
2277     __ testb(bits, BytesPerLong-1);
2278     __ jccb(Assembler::zero, L_long_aligned);
2279 
2280     __ testb(bits, BytesPerInt-1);
2281     __ jccb(Assembler::zero, L_int_aligned);
2282 
2283     __ testb(bits, BytesPerShort-1);
2284     __ jump_cc(Assembler::notZero, RuntimeAddress(byte_copy_entry));
2285 
2286     __ BIND(L_short_aligned);
2287     __ shrq(size, LogBytesPerShort); // size => short_count
2288     __ jump(RuntimeAddress(short_copy_entry));
2289 
2290     __ BIND(L_int_aligned);
2291     __ shrq(size, LogBytesPerInt); // size => int_count
2292     __ jump(RuntimeAddress(int_copy_entry));
2293 
2294     __ BIND(L_long_aligned);
2295     __ shrq(size, LogBytesPerLong); // size => qword_count
2296     __ jump(RuntimeAddress(long_copy_entry));
2297 
2298     return start;
2299   }
2300 
2301   // Perform range checks on the proposed arraycopy.
2302   // Kills temp, but nothing else.
2303   // Also, clean the sign bits of src_pos and dst_pos.
2304   void arraycopy_range_checks(Register src,     // source array oop (c_rarg0)
2305                               Register src_pos, // source position (c_rarg1)
2306                               Register dst,     // destination array oo (c_rarg2)
2307                               Register dst_pos, // destination position (c_rarg3)
2308                               Register length,
2309                               Register temp,
2310                               Label& L_failed) {
2311     BLOCK_COMMENT("arraycopy_range_checks:");
2312 
2313     //  if (src_pos + length > arrayOop(src)->length())  FAIL;
2314     __ movl(temp, length);
2315     __ addl(temp, src_pos);             // src_pos + length


2383     __ enter(); // required for proper stackwalking of RuntimeStub frame
2384 
2385     // bump this on entry, not on exit:
2386     inc_counter_np(SharedRuntime::_generic_array_copy_ctr);
2387 
2388     //-----------------------------------------------------------------------
2389     // Assembler stub will be used for this call to arraycopy 
2390     // if the following conditions are met:
2391     // 
2392     // (1) src and dst must not be null.
2393     // (2) src_pos must not be negative.
2394     // (3) dst_pos must not be negative.
2395     // (4) length  must not be negative.
2396     // (5) src klass and dst klass should be the same and not NULL.
2397     // (6) src and dst should be arrays.
2398     // (7) src_pos + length must not exceed length of src.
2399     // (8) dst_pos + length must not exceed length of dst.
2400     // 
2401 
2402     //  if (src == NULL) return -1;
2403     __ testq(src, src);         // src oop
2404     size_t j1off = __ offset();
2405     __ jccb(Assembler::zero, L_failed_0);
2406 
2407     //  if (src_pos < 0) return -1;
2408     __ testl(src_pos, src_pos); // src_pos (32-bits)
2409     __ jccb(Assembler::negative, L_failed_0);
2410 
2411     //  if (dst == NULL) return -1;
2412     __ testq(dst, dst);         // dst oop
2413     __ jccb(Assembler::zero, L_failed_0);
2414 
2415     //  if (dst_pos < 0) return -1;
2416     __ testl(dst_pos, dst_pos); // dst_pos (32-bits)
2417     size_t j4off = __ offset();
2418     __ jccb(Assembler::negative, L_failed_0);
2419 
2420     // The first four tests are very dense code,
2421     // but not quite dense enough to put four
2422     // jumps in a 16-byte instruction fetch buffer.
2423     // That's good, because some branch predicters
2424     // do not like jumps so close together.
2425     // Make sure of this.
2426     guarantee(((j1off ^ j4off) & ~15) != 0, "I$ line of 1st & 4th jumps");
2427 
2428     // registers used as temp
2429     const Register r11_length    = r11; // elements count to copy
2430     const Register r10_src_klass = r10; // array klass

2431 
2432     //  if (length < 0) return -1;
2433     __ movl(r11_length, C_RARG4);       // length (elements count, 32-bits value)
2434     __ testl(r11_length, r11_length);
2435     __ jccb(Assembler::negative, L_failed_0);
2436 
2437     Address src_klass_addr(src, oopDesc::klass_offset_in_bytes());
2438     Address dst_klass_addr(dst, oopDesc::klass_offset_in_bytes());
2439     __ movq(r10_src_klass, src_klass_addr);
2440 #ifdef ASSERT
2441     //  assert(src->klass() != NULL);
2442     BLOCK_COMMENT("assert klasses not null");
2443     { Label L1, L2;
2444       __ testq(r10_src_klass, r10_src_klass);
2445       __ jcc(Assembler::notZero, L2);   // it is broken if klass is NULL
2446       __ bind(L1);
2447       __ stop("broken null klass");
2448       __ bind(L2);
2449       __ cmpq(dst_klass_addr, 0);

2450       __ jcc(Assembler::equal, L1);     // this would be broken also
2451       BLOCK_COMMENT("assert done");
2452     }
2453 #endif
2454 
2455     // Load layout helper (32-bits)
2456     //
2457     //  |array_tag|     | header_size | element_type |     |log2_element_size|
2458     // 32        30    24            16              8     2                 0
2459     //
2460     //   array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0
2461     //
2462 
2463     int lh_offset = klassOopDesc::header_size() * HeapWordSize +
2464                     Klass::layout_helper_offset_in_bytes();
2465 
2466     const Register rax_lh = rax;  // layout helper
2467 
2468     __ movl(rax_lh, Address(r10_src_klass, lh_offset));
2469 
2470     // Handle objArrays completely differently...
2471     jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2472     __ cmpl(rax_lh, objArray_lh);
2473     __ jcc(Assembler::equal, L_objArray);
2474 
2475     //  if (src->klass() != dst->klass()) return -1;
2476     __ cmpq(r10_src_klass, dst_klass_addr);

2477     __ jcc(Assembler::notEqual, L_failed);
2478 
2479     //  if (!src->is_Array()) return -1;
2480     __ cmpl(rax_lh, Klass::_lh_neutral_value);
2481     __ jcc(Assembler::greaterEqual, L_failed);
2482 
2483     // At this point, it is known to be a typeArray (array_tag 0x3).
2484 #ifdef ASSERT
2485     { Label L;
2486       __ cmpl(rax_lh, (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift));
2487       __ jcc(Assembler::greaterEqual, L);
2488       __ stop("must be a primitive array");
2489       __ bind(L);
2490     }
2491 #endif
2492 
2493     arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length,
2494                            r10, L_failed);
2495 
2496     // typeArrayKlass
2497     //
2498     // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize);
2499     // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize);
2500     //
2501 
2502     const Register r10_offset = r10;    // array offset
2503     const Register rax_elsize = rax_lh; // element size
2504 
2505     __ movl(r10_offset, rax_lh);
2506     __ shrl(r10_offset, Klass::_lh_header_size_shift);
2507     __ andq(r10_offset, Klass::_lh_header_size_mask);   // array_offset
2508     __ addq(src, r10_offset);           // src array offset
2509     __ addq(dst, r10_offset);           // dst array offset
2510     BLOCK_COMMENT("choose copy loop based on element size");
2511     __ andl(rax_lh, Klass::_lh_log2_element_size_mask); // rax_lh -> rax_elsize
2512 
2513     // next registers should be set before the jump to corresponding stub
2514     const Register from     = c_rarg0;  // source array address
2515     const Register to       = c_rarg1;  // destination array address
2516     const Register count    = c_rarg2;  // elements count
2517 
2518     // 'from', 'to', 'count' registers should be set in such order
2519     // since they are the same as 'src', 'src_pos', 'dst'.
2520 
2521   __ BIND(L_copy_bytes);
2522     __ cmpl(rax_elsize, 0);
2523     __ jccb(Assembler::notEqual, L_copy_shorts);
2524     __ leaq(from, Address(src, src_pos, Address::times_1, 0));// src_addr
2525     __ leaq(to,   Address(dst, dst_pos, Address::times_1, 0));// dst_addr
2526     __ movslq(count, r11_length); // length
2527     __ jump(RuntimeAddress(byte_copy_entry));
2528 
2529   __ BIND(L_copy_shorts);
2530     __ cmpl(rax_elsize, LogBytesPerShort);
2531     __ jccb(Assembler::notEqual, L_copy_ints);
2532     __ leaq(from, Address(src, src_pos, Address::times_2, 0));// src_addr
2533     __ leaq(to,   Address(dst, dst_pos, Address::times_2, 0));// dst_addr
2534     __ movslq(count, r11_length); // length
2535     __ jump(RuntimeAddress(short_copy_entry));
2536 
2537   __ BIND(L_copy_ints);
2538     __ cmpl(rax_elsize, LogBytesPerInt);
2539     __ jccb(Assembler::notEqual, L_copy_longs);
2540     __ leaq(from, Address(src, src_pos, Address::times_4, 0));// src_addr
2541     __ leaq(to,   Address(dst, dst_pos, Address::times_4, 0));// dst_addr
2542     __ movslq(count, r11_length); // length
2543     __ jump(RuntimeAddress(int_copy_entry));
2544 
2545   __ BIND(L_copy_longs);
2546 #ifdef ASSERT
2547     { Label L;
2548       __ cmpl(rax_elsize, LogBytesPerLong);
2549       __ jcc(Assembler::equal, L);
2550       __ stop("must be long copy, but elsize is wrong");
2551       __ bind(L);
2552     }
2553 #endif
2554     __ leaq(from, Address(src, src_pos, Address::times_8, 0));// src_addr
2555     __ leaq(to,   Address(dst, dst_pos, Address::times_8, 0));// dst_addr
2556     __ movslq(count, r11_length); // length
2557     __ jump(RuntimeAddress(long_copy_entry));
2558 
2559     // objArrayKlass
2560   __ BIND(L_objArray);
2561     // live at this point:  r10_src_klass, src[_pos], dst[_pos]
2562 
2563     Label L_plain_copy, L_checkcast_copy;
2564     //  test array classes for subtyping
2565     __ cmpq(r10_src_klass, dst_klass_addr); // usual case is exact equality

2566     __ jcc(Assembler::notEqual, L_checkcast_copy);
2567 
2568     // Identically typed arrays can be copied without element-wise checks.
2569     arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length,
2570                            r10, L_failed);
2571 
2572     __ leaq(from, Address(src, src_pos, Address::times_8,
2573                  arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // src_addr
2574     __ leaq(to,   Address(dst, dst_pos, Address::times_8,
2575                  arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // dst_addr
2576     __ movslq(count, r11_length); // length
2577   __ BIND(L_plain_copy);
2578     __ jump(RuntimeAddress(oop_copy_entry));
2579 
2580   __ BIND(L_checkcast_copy);
2581     // live at this point:  r10_src_klass, !r11_length
2582     {
2583       // assert(r11_length == C_RARG4); // will reload from here
2584       Register r11_dst_klass = r11;
2585       __ movq(r11_dst_klass, dst_klass_addr);
2586 
2587       // Before looking at dst.length, make sure dst is also an objArray.
2588       __ cmpl(Address(r11_dst_klass, lh_offset), objArray_lh);
2589       __ jcc(Assembler::notEqual, L_failed);
2590 
2591       // It is safe to examine both src.length and dst.length.
2592 #ifndef _WIN64
2593       arraycopy_range_checks(src, src_pos, dst, dst_pos, C_RARG4,
2594                              rax, L_failed);
2595 #else
2596       __ movl(r11_length, C_RARG4);     // reload
2597       arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length,
2598                              rax, L_failed);
2599       __ movl(r11_dst_klass, dst_klass_addr); // reload
2600 #endif
2601 
2602       // Marshal the base address arguments now, freeing registers.
2603       __ leaq(from, Address(src, src_pos, Address::times_8,
2604                    arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
2605       __ leaq(to,   Address(dst, dst_pos, Address::times_8,
2606                    arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
2607       __ movl(count, C_RARG4);          // length (reloaded)
2608       Register sco_temp = c_rarg3;      // this register is free now
2609       assert_different_registers(from, to, count, sco_temp,
2610                                  r11_dst_klass, r10_src_klass);
2611       assert_clean_int(count, sco_temp);
2612 
2613       // Generate the type check.
2614       int sco_offset = (klassOopDesc::header_size() * HeapWordSize +
2615                         Klass::super_check_offset_offset_in_bytes());
2616       __ movl(sco_temp, Address(r11_dst_klass, sco_offset));
2617       assert_clean_int(sco_temp, rax);
2618       generate_type_check(r10_src_klass, sco_temp, r11_dst_klass, L_plain_copy);
2619 
2620       // Fetch destination element klass from the objArrayKlass header.
2621       int ek_offset = (klassOopDesc::header_size() * HeapWordSize +
2622                        objArrayKlass::element_klass_offset_in_bytes());
2623       __ movq(r11_dst_klass, Address(r11_dst_klass, ek_offset));
2624       __ movl(sco_temp,      Address(r11_dst_klass, sco_offset));
2625       assert_clean_int(sco_temp, rax);
2626 
2627       // the checkcast_copy loop needs two extra arguments:
2628       assert(c_rarg3 == sco_temp, "#3 already in place");
2629       __ movq(C_RARG4, r11_dst_klass);  // dst.klass.element_klass
2630       __ jump(RuntimeAddress(checkcast_copy_entry));
2631     }
2632 
2633   __ BIND(L_failed);
2634     __ xorq(rax, rax);
2635     __ notq(rax); // return -1
2636     __ leave();   // required for proper stackwalking of RuntimeStub frame
2637     __ ret(0);
2638 
2639     return start;
2640   }
2641 
2642 #undef length_arg
2643 
2644   void generate_arraycopy_stubs() {
2645     // Call the conjoint generation methods immediately after
2646     // the disjoint ones so that short branches from the former
2647     // to the latter can be generated.
2648     StubRoutines::_jbyte_disjoint_arraycopy  = generate_disjoint_byte_copy(false, "jbyte_disjoint_arraycopy");
2649     StubRoutines::_jbyte_arraycopy           = generate_conjoint_byte_copy(false, "jbyte_arraycopy");
2650 
2651     StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, "jshort_disjoint_arraycopy");
2652     StubRoutines::_jshort_arraycopy          = generate_conjoint_short_copy(false, "jshort_arraycopy");
2653 
2654     StubRoutines::_jint_disjoint_arraycopy   = generate_disjoint_int_copy(false, "jint_disjoint_arraycopy");
2655     StubRoutines::_jint_arraycopy            = generate_conjoint_int_copy(false, "jint_arraycopy");
2656 
2657     StubRoutines::_jlong_disjoint_arraycopy  = generate_disjoint_long_oop_copy(false, false, "jlong_disjoint_arraycopy");
2658     StubRoutines::_jlong_arraycopy           = generate_conjoint_long_oop_copy(false, false, "jlong_arraycopy");
2659 





2660     StubRoutines::_oop_disjoint_arraycopy    = generate_disjoint_long_oop_copy(false, true, "oop_disjoint_arraycopy");
2661     StubRoutines::_oop_arraycopy             = generate_conjoint_long_oop_copy(false, true, "oop_arraycopy");

2662 
2663     StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy");
2664     StubRoutines::_unsafe_arraycopy    = generate_unsafe_copy("unsafe_arraycopy");
2665     StubRoutines::_generic_arraycopy   = generate_generic_copy("generic_arraycopy");
2666 
2667     // We don't generate specialized code for HeapWord-aligned source
2668     // arrays, so just use the code we've already generated
2669     StubRoutines::_arrayof_jbyte_disjoint_arraycopy  = StubRoutines::_jbyte_disjoint_arraycopy;
2670     StubRoutines::_arrayof_jbyte_arraycopy           = StubRoutines::_jbyte_arraycopy;
2671 
2672     StubRoutines::_arrayof_jshort_disjoint_arraycopy = StubRoutines::_jshort_disjoint_arraycopy;
2673     StubRoutines::_arrayof_jshort_arraycopy          = StubRoutines::_jshort_arraycopy;
2674 
2675     StubRoutines::_arrayof_jint_disjoint_arraycopy   = StubRoutines::_jint_disjoint_arraycopy;
2676     StubRoutines::_arrayof_jint_arraycopy            = StubRoutines::_jint_arraycopy;
2677 
2678     StubRoutines::_arrayof_jlong_disjoint_arraycopy  = StubRoutines::_jlong_disjoint_arraycopy;
2679     StubRoutines::_arrayof_jlong_arraycopy           = StubRoutines::_jlong_arraycopy;
2680 
2681     StubRoutines::_arrayof_oop_disjoint_arraycopy    = StubRoutines::_oop_disjoint_arraycopy;


2712       rbp_off2,
2713       return_off,
2714       return_off2,
2715       framesize // inclusive of return address
2716     };
2717 
2718     int insts_size = 512;
2719     int locs_size  = 64;
2720 
2721     CodeBuffer code(name, insts_size, locs_size);
2722     OopMapSet* oop_maps  = new OopMapSet();
2723     MacroAssembler* masm = new MacroAssembler(&code);
2724 
2725     address start = __ pc();
2726 
2727     // This is an inlined and slightly modified version of call_VM
2728     // which has the ability to fetch the return PC out of
2729     // thread-local storage and also sets up last_Java_sp slightly
2730     // differently than the real call_VM
2731     if (restore_saved_exception_pc) {
2732       __ movq(rax,
2733               Address(r15_thread,
2734                       in_bytes(JavaThread::saved_exception_pc_offset())));
2735       __ pushq(rax);
2736     }
2737       
2738     __ enter(); // required for proper stackwalking of RuntimeStub frame
2739 
2740     assert(is_even(framesize/2), "sp not 16-byte aligned");
2741 
2742     // return address and rbp are already in place
2743     __ subq(rsp, (framesize-4) << LogBytesPerInt); // prolog
2744 
2745     int frame_complete = __ pc() - start;
2746 
2747     // Set up last_Java_sp and last_Java_fp
2748     __ set_last_Java_frame(rsp, rbp, NULL);
2749 
2750     // Call runtime
2751     __ movq(c_rarg0, r15_thread);
2752     BLOCK_COMMENT("call runtime_entry");
2753     __ call(RuntimeAddress(runtime_entry));
2754 
2755     // Generate oop map
2756     OopMap* map = new OopMap(framesize, 0);
2757 
2758     oop_maps->add_gc_map(__ pc() - start, map);
2759 
2760     __ reset_last_Java_frame(true, false);
2761 
2762     __ leave(); // required for proper stackwalking of RuntimeStub frame
2763 
2764     // check for pending exceptions
2765 #ifdef ASSERT
2766     Label L;
2767     __ cmpq(Address(r15_thread, Thread::pending_exception_offset()),
2768             (int) NULL);
2769     __ jcc(Assembler::notEqual, L);
2770     __ should_not_reach_here();
2771     __ bind(L);
2772 #endif // ASSERT
2773     __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2774 
2775 
2776     // codeBlob framesize is in words (not VMRegImpl::slot_size)
2777     RuntimeStub* stub =
2778       RuntimeStub::new_runtime_stub(name, 
2779                                     &code,
2780                                     frame_complete, 
2781                                     (framesize >> (LogBytesPerWord - LogBytesPerInt)),
2782                                     oop_maps, false);
2783     return stub->entry_point();
2784   }
2785 
2786   // Initialization
2787   void generate_initial() {
2788     // Generates all stubs and initializes the entry points
2789 
2790     // This platform-specific stub is needed by generate_call_stub()
2791     StubRoutines::amd64::_mxcsr_std        = generate_fp_mask("mxcsr_std",        0x0000000000001F80);
2792 
2793     // entry points that exist in all platforms Note: This is code
2794     // that could be shared among different platforms - however the
2795     // benefit seems to be smaller than the disadvantage of having a
2796     // much more complicated generator structure. See also comment in
2797     // stubRoutines.hpp.
2798 
2799     StubRoutines::_forward_exception_entry = generate_forward_exception();
2800 
2801     StubRoutines::_call_stub_entry = 
2802       generate_call_stub(StubRoutines::_call_stub_return_address);
2803 
2804     // is referenced by megamorphic call    
2805     StubRoutines::_catch_exception_entry = generate_catch_exception();    
2806 
2807     // atomic calls
2808     StubRoutines::_atomic_xchg_entry         = generate_atomic_xchg();
2809     StubRoutines::_atomic_xchg_ptr_entry     = generate_atomic_xchg_ptr();
2810     StubRoutines::_atomic_cmpxchg_entry      = generate_atomic_cmpxchg();
2811     StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long();
2812     StubRoutines::_atomic_add_entry          = generate_atomic_add();
2813     StubRoutines::_atomic_add_ptr_entry      = generate_atomic_add_ptr();
2814     StubRoutines::_fence_entry               = generate_orderaccess_fence();
2815 
2816     StubRoutines::_handler_for_unsafe_access_entry =
2817       generate_handler_for_unsafe_access();
2818 
2819     // platform dependent
2820     StubRoutines::amd64::_get_previous_fp_entry = generate_get_previous_fp();
2821 
2822     StubRoutines::amd64::_verify_mxcsr_entry    = generate_verify_mxcsr();
2823   }
2824 
2825   void generate_all() {
2826     // Generates all stubs and initializes the entry points
2827     
2828     // These entry points require SharedInfo::stack0 to be set up in
2829     // non-core builds and need to be relocatable, so they each
2830     // fabricate a RuntimeStub internally.
2831     StubRoutines::_throw_AbstractMethodError_entry =
2832       generate_throw_exception("AbstractMethodError throw_exception",
2833                                CAST_FROM_FN_PTR(address, 
2834                                                 SharedRuntime::
2835                                                 throw_AbstractMethodError),
2836                                false);
2837 
2838     StubRoutines::_throw_IncompatibleClassChangeError_entry =
2839       generate_throw_exception("IncompatibleClassChangeError throw_exception",
2840                                CAST_FROM_FN_PTR(address, 
2841                                                 SharedRuntime::
2842                                                 throw_IncompatibleClassChangeError),


2854                                CAST_FROM_FN_PTR(address, 
2855                                                 SharedRuntime::
2856                                                 throw_NullPointerException),
2857                                true);
2858 
2859     StubRoutines::_throw_NullPointerException_at_call_entry =
2860       generate_throw_exception("NullPointerException at call throw_exception",
2861                                CAST_FROM_FN_PTR(address,
2862                                                 SharedRuntime::
2863                                                 throw_NullPointerException_at_call),
2864                                false);
2865 
2866     StubRoutines::_throw_StackOverflowError_entry =
2867       generate_throw_exception("StackOverflowError throw_exception",
2868                                CAST_FROM_FN_PTR(address, 
2869                                                 SharedRuntime::
2870                                                 throw_StackOverflowError),
2871                                false);
2872 
2873     // entry points that are platform specific  
2874     StubRoutines::amd64::_f2i_fixup = generate_f2i_fixup();
2875     StubRoutines::amd64::_f2l_fixup = generate_f2l_fixup();
2876     StubRoutines::amd64::_d2i_fixup = generate_d2i_fixup();
2877     StubRoutines::amd64::_d2l_fixup = generate_d2l_fixup();
2878 
2879     StubRoutines::amd64::_float_sign_mask  = generate_fp_mask("float_sign_mask",  0x7FFFFFFF7FFFFFFF);
2880     StubRoutines::amd64::_float_sign_flip  = generate_fp_mask("float_sign_flip",  0x8000000080000000);
2881     StubRoutines::amd64::_double_sign_mask = generate_fp_mask("double_sign_mask", 0x7FFFFFFFFFFFFFFF);
2882     StubRoutines::amd64::_double_sign_flip = generate_fp_mask("double_sign_flip", 0x8000000000000000);
2883 
2884     // support for verify_oop (must happen after universe_init)
2885     StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop();
2886 
2887     // arraycopy stubs used by compilers
2888     generate_arraycopy_stubs();
2889   }
2890 
2891  public:
2892   StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) { 
2893     if (all) {
2894       generate_all();
2895     } else {
2896       generate_initial();
2897     }
2898   }
2899 }; // end class declaration
2900 
2901 address StubGenerator::disjoint_byte_copy_entry  = NULL;
2902 address StubGenerator::disjoint_short_copy_entry = NULL;



   1 /*
   2  * Copyright 2003-2008 Sun Microsystems, Inc.  All Rights Reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
  20  * CA 95054 USA or visit www.sun.com if you need additional information or
  21  * have any questions.
  22  *
  23  */
  24 
  25 #include "incls/_precompiled.incl"
  26 #include "incls/_stubGenerator_x86_64.cpp.incl"
  27 
  28 // Declaration and definition of StubGenerator (no .hpp file).
  29 // For a more detailed description of the stub routine structure
  30 // see the comment in stubRoutines.hpp
  31 
  32 #define __ _masm->
  33 #define TIMES_OOP (UseCompressedOops ? Address::times_4 : Address::times_8)
  34 #define a__ ((Assembler*)_masm)->
  35 
  36 #ifdef PRODUCT
  37 #define BLOCK_COMMENT(str) /* nothing */
  38 #else
  39 #define BLOCK_COMMENT(str) __ block_comment(str)
  40 #endif
  41 
  42 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
  43 const int MXCSR_MASK = 0xFFC0;  // Mask out any pending exceptions
  44 
  45 // Stub Code definitions
  46 
  47 static address handle_unsafe_access() {
  48   JavaThread* thread = JavaThread::current();
  49   address pc = thread->saved_exception_pc();
  50   // pc is the instruction which we must emulate
  51   // doing a no-op is fine:  return garbage from the load
  52   // therefore, compute npc
  53   address npc = Assembler::locate_next_instruction(pc);
  54 


 194 
 195     const Address call_wrapper  (rbp, call_wrapper_off   * wordSize);
 196     const Address result        (rbp, result_off         * wordSize);
 197     const Address result_type   (rbp, result_type_off    * wordSize);
 198     const Address method        (rbp, method_off         * wordSize);
 199     const Address entry_point   (rbp, entry_point_off    * wordSize);
 200     const Address parameters    (rbp, parameters_off     * wordSize);
 201     const Address parameter_size(rbp, parameter_size_off * wordSize);
 202 
 203     // same as in generate_catch_exception()!
 204     const Address thread        (rbp, thread_off         * wordSize);
 205 
 206     const Address r15_save(rbp, r15_off * wordSize);
 207     const Address r14_save(rbp, r14_off * wordSize);
 208     const Address r13_save(rbp, r13_off * wordSize);
 209     const Address r12_save(rbp, r12_off * wordSize);
 210     const Address rbx_save(rbp, rbx_off * wordSize);
 211 
 212     // stub code
 213     __ enter();
 214     __ subptr(rsp, -rsp_after_call_off * wordSize);
 215 
 216     // save register parameters
 217 #ifndef _WIN64
 218     __ movptr(parameters,   c_rarg5); // parameters
 219     __ movptr(entry_point,  c_rarg4); // entry_point
 220 #endif
 221 
 222     __ movptr(method,       c_rarg3); // method
 223     __ movl(result_type,  c_rarg2);   // result type
 224     __ movptr(result,       c_rarg1); // result
 225     __ movptr(call_wrapper, c_rarg0); // call wrapper
 226 
 227     // save regs belonging to calling function
 228     __ movptr(rbx_save, rbx);
 229     __ movptr(r12_save, r12);
 230     __ movptr(r13_save, r13);
 231     __ movptr(r14_save, r14);
 232     __ movptr(r15_save, r15);
 233 
 234 #ifdef _WIN64
 235     const Address rdi_save(rbp, rdi_off * wordSize);
 236     const Address rsi_save(rbp, rsi_off * wordSize);
 237 
 238     __ movptr(rsi_save, rsi);
 239     __ movptr(rdi_save, rdi);
 240 #else
 241     const Address mxcsr_save(rbp, mxcsr_off * wordSize);
 242     {
 243       Label skip_ldmx;
 244       __ stmxcsr(mxcsr_save);
 245       __ movl(rax, mxcsr_save);
 246       __ andl(rax, MXCSR_MASK);    // Only check control and mask bits
 247       ExternalAddress mxcsr_std(StubRoutines::x86::mxcsr_std());
 248       __ cmp32(rax, mxcsr_std);
 249       __ jcc(Assembler::equal, skip_ldmx);
 250       __ ldmxcsr(mxcsr_std);
 251       __ bind(skip_ldmx);
 252     }
 253 #endif
 254 
 255     // Load up thread register
 256     __ movptr(r15_thread, thread);
 257     __ reinit_heapbase();
 258 
 259 #ifdef ASSERT
 260     // make sure we have no pending exceptions
 261     {
 262       Label L;
 263       __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
 264       __ jcc(Assembler::equal, L);
 265       __ stop("StubRoutines::call_stub: entered with pending exception");
 266       __ bind(L);
 267     }
 268 #endif
 269 
 270     // pass parameters if any
 271     BLOCK_COMMENT("pass parameters if any");
 272     Label parameters_done;
 273     __ movl(c_rarg3, parameter_size);
 274     __ testl(c_rarg3, c_rarg3);
 275     __ jcc(Assembler::zero, parameters_done);
 276 
 277     Label loop;
 278     __ movptr(c_rarg2, parameters);       // parameter pointer
 279     __ movl(c_rarg1, c_rarg3);            // parameter counter is in c_rarg1
 280     __ BIND(loop);
 281     if (TaggedStackInterpreter) {
 282       __ movl(rax, Address(c_rarg2, 0)); // get tag
 283       __ addptr(c_rarg2, wordSize);      // advance to next tag
 284       __ push(rax);                      // pass tag
 285     }
 286     __ movptr(rax, Address(c_rarg2, 0));// get parameter
 287     __ addptr(c_rarg2, wordSize);       // advance to next parameter
 288     __ decrementl(c_rarg1);             // decrement counter
 289     __ push(rax);                       // pass parameter
 290     __ jcc(Assembler::notZero, loop);
 291 
 292     // call Java function
 293     __ BIND(parameters_done);
 294     __ movptr(rbx, method);             // get methodOop
 295     __ movptr(c_rarg1, entry_point);    // get entry_point
 296     __ mov(r13, rsp);                   // set sender sp
 297     BLOCK_COMMENT("call Java function");
 298     __ call(c_rarg1);
 299 
 300     BLOCK_COMMENT("call_stub_return_address:");
 301     return_address = __ pc();
 302 
 303     // store result depending on type (everything that is not
 304     // T_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT)
 305     __ movptr(c_rarg0, result);
 306     Label is_long, is_float, is_double, exit;
 307     __ movl(c_rarg1, result_type);
 308     __ cmpl(c_rarg1, T_OBJECT);
 309     __ jcc(Assembler::equal, is_long);
 310     __ cmpl(c_rarg1, T_LONG);
 311     __ jcc(Assembler::equal, is_long);
 312     __ cmpl(c_rarg1, T_FLOAT);
 313     __ jcc(Assembler::equal, is_float);
 314     __ cmpl(c_rarg1, T_DOUBLE);
 315     __ jcc(Assembler::equal, is_double);
 316 
 317     // handle T_INT case
 318     __ movl(Address(c_rarg0, 0), rax);
 319 
 320     __ BIND(exit);
 321 
 322     // pop parameters
 323     __ lea(rsp, rsp_after_call);
 324 
 325 #ifdef ASSERT
 326     // verify that threads correspond
 327     {
 328       Label L, S;
 329       __ cmpptr(r15_thread, thread);
 330       __ jcc(Assembler::notEqual, S);
 331       __ get_thread(rbx);
 332       __ cmpptr(r15_thread, rbx);
 333       __ jcc(Assembler::equal, L);
 334       __ bind(S);
 335       __ jcc(Assembler::equal, L);
 336       __ stop("StubRoutines::call_stub: threads must correspond");
 337       __ bind(L);
 338     }
 339 #endif
 340 
 341     // restore regs belonging to calling function
 342     __ movptr(r15, r15_save);
 343     __ movptr(r14, r14_save);
 344     __ movptr(r13, r13_save);
 345     __ movptr(r12, r12_save);
 346     __ movptr(rbx, rbx_save);
 347 
 348 #ifdef _WIN64
 349     __ movptr(rdi, rdi_save);
 350     __ movptr(rsi, rsi_save);
 351 #else
 352     __ ldmxcsr(mxcsr_save);
 353 #endif
 354 
 355     // restore rsp
 356     __ addptr(rsp, -rsp_after_call_off * wordSize);
 357 
 358     // return
 359     __ pop(rbp);
 360     __ ret(0);
 361 
 362     // handle return types different from T_INT
 363     __ BIND(is_long);
 364     __ movq(Address(c_rarg0, 0), rax);
 365     __ jmp(exit);
 366 
 367     __ BIND(is_float);
 368     __ movflt(Address(c_rarg0, 0), xmm0);
 369     __ jmp(exit);
 370 
 371     __ BIND(is_double);
 372     __ movdbl(Address(c_rarg0, 0), xmm0);
 373     __ jmp(exit);
 374 
 375     return start;
 376   }
 377 
 378   // Return point for a Java call if there's an exception thrown in
 379   // Java code.  The exception is caught and transformed into a


 382   //
 383   // Note: Usually the parameters are removed by the callee. In case
 384   // of an exception crossing an activation frame boundary, that is
 385   // not the case if the callee is compiled code => need to setup the
 386   // rsp.
 387   //
 388   // rax: exception oop
 389 
 390   address generate_catch_exception() {
 391     StubCodeMark mark(this, "StubRoutines", "catch_exception");
 392     address start = __ pc();
 393 
 394     // same as in generate_call_stub():
 395     const Address rsp_after_call(rbp, rsp_after_call_off * wordSize);
 396     const Address thread        (rbp, thread_off         * wordSize);
 397 
 398 #ifdef ASSERT
 399     // verify that threads correspond
 400     {
 401       Label L, S;
 402       __ cmpptr(r15_thread, thread);
 403       __ jcc(Assembler::notEqual, S);
 404       __ get_thread(rbx);
 405       __ cmpptr(r15_thread, rbx);
 406       __ jcc(Assembler::equal, L);
 407       __ bind(S);
 408       __ stop("StubRoutines::catch_exception: threads must correspond");
 409       __ bind(L);
 410     }
 411 #endif
 412 
 413     // set pending exception
 414     __ verify_oop(rax);
 415 
 416     __ movptr(Address(r15_thread, Thread::pending_exception_offset()), rax);
 417     __ lea(rscratch1, ExternalAddress((address)__FILE__));
 418     __ movptr(Address(r15_thread, Thread::exception_file_offset()), rscratch1);
 419     __ movl(Address(r15_thread, Thread::exception_line_offset()), (int)  __LINE__);
 420 
 421     // complete return to VM
 422     assert(StubRoutines::_call_stub_return_address != NULL,
 423            "_call_stub_return_address must have been generated before");
 424     __ jump(RuntimeAddress(StubRoutines::_call_stub_return_address));
 425 
 426     return start;
 427   }
 428 
 429   // Continuation point for runtime calls returning with a pending
 430   // exception.  The pending exception check happened in the runtime
 431   // or native call stub.  The pending exception in Thread is
 432   // converted into a Java-level exception.
 433   //
 434   // Contract with Java-level exception handlers:
 435   // rax: exception
 436   // rdx: throwing pc
 437   //
 438   // NOTE: At entry of this stub, exception-pc must be on stack !!
 439 
 440   address generate_forward_exception() {
 441     StubCodeMark mark(this, "StubRoutines", "forward exception");
 442     address start = __ pc();
 443 
 444     // Upon entry, the sp points to the return address returning into
 445     // Java (interpreted or compiled) code; i.e., the return address
 446     // becomes the throwing pc.
 447     //
 448     // Arguments pushed before the runtime call are still on the stack
 449     // but the exception handler will reset the stack pointer ->
 450     // ignore them.  A potential result in registers can be ignored as
 451     // well.
 452 
 453 #ifdef ASSERT
 454     // make sure this code is only executed if there is a pending exception
 455     {
 456       Label L;
 457       __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t) NULL);
 458       __ jcc(Assembler::notEqual, L);
 459       __ stop("StubRoutines::forward exception: no pending exception (1)");
 460       __ bind(L);
 461     }
 462 #endif
 463 
 464     // compute exception handler into rbx
 465     __ movptr(c_rarg0, Address(rsp, 0));
 466     BLOCK_COMMENT("call exception_handler_for_return_address");
 467     __ call_VM_leaf(CAST_FROM_FN_PTR(address,
 468                          SharedRuntime::exception_handler_for_return_address),
 469                     c_rarg0);
 470     __ mov(rbx, rax);
 471 
 472     // setup rax & rdx, remove return address & clear pending exception
 473     __ pop(rdx);
 474     __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset()));
 475     __ movptr(Address(r15_thread, Thread::pending_exception_offset()), (int)NULL_WORD);
 476 
 477 #ifdef ASSERT
 478     // make sure exception is set
 479     {
 480       Label L;
 481       __ testptr(rax, rax);
 482       __ jcc(Assembler::notEqual, L);
 483       __ stop("StubRoutines::forward exception: no pending exception (2)");
 484       __ bind(L);
 485     }
 486 #endif
 487 
 488     // continue at exception handler (return address removed)
 489     // rax: exception
 490     // rbx: exception handler
 491     // rdx: throwing pc
 492     __ verify_oop(rax);
 493     __ jmp(rbx);
 494 
 495     return start;
 496   }
 497 
 498   // Support for jint atomic::xchg(jint exchange_value, volatile jint* dest)
 499   //
 500   // Arguments :
 501   //    c_rarg0: exchange_value


 509 
 510     __ movl(rax, c_rarg0); // Copy to eax we need a return value anyhow
 511     __ xchgl(rax, Address(c_rarg1, 0)); // automatic LOCK
 512     __ ret(0);
 513 
 514     return start;
 515   }
 516 
 517   // Support for intptr_t atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest)
 518   //
 519   // Arguments :
 520   //    c_rarg0: exchange_value
 521   //    c_rarg1: dest
 522   //
 523   // Result:
 524   //    *dest <- ex, return (orig *dest)
 525   address generate_atomic_xchg_ptr() {
 526     StubCodeMark mark(this, "StubRoutines", "atomic_xchg_ptr");
 527     address start = __ pc();
 528 
 529     __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow
 530     __ xchgptr(rax, Address(c_rarg1, 0)); // automatic LOCK
 531     __ ret(0);
 532 
 533     return start;
 534   }
 535 
 536   // Support for jint atomic::atomic_cmpxchg(jint exchange_value, volatile jint* dest,
 537   //                                         jint compare_value)
 538   //
 539   // Arguments :
 540   //    c_rarg0: exchange_value
 541   //    c_rarg1: dest
 542   //    c_rarg2: compare_value
 543   //
 544   // Result:
 545   //    if ( compare_value == *dest ) {
 546   //       *dest = exchange_value
 547   //       return compare_value;
 548   //    else
 549   //       return *dest;
 550   address generate_atomic_cmpxchg() {


 603     __ xaddl(Address(c_rarg1, 0), c_rarg0);
 604     __ addl(rax, c_rarg0);
 605     __ ret(0);
 606 
 607     return start;
 608   }
 609 
 610   // Support for intptr_t atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest)
 611   //
 612   // Arguments :
 613   //    c_rarg0: add_value
 614   //    c_rarg1: dest
 615   //
 616   // Result:
 617   //    *dest += add_value
 618   //    return *dest;
 619   address generate_atomic_add_ptr() {
 620     StubCodeMark mark(this, "StubRoutines", "atomic_add_ptr");
 621     address start = __ pc();
 622 
 623     __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow
 624    if ( os::is_MP() ) __ lock();
 625     __ xaddptr(Address(c_rarg1, 0), c_rarg0);
 626     __ addptr(rax, c_rarg0);
 627     __ ret(0);
 628 
 629     return start;
 630   }
 631 
 632   // Support for intptr_t OrderAccess::fence()
 633   //
 634   // Arguments :
 635   //
 636   // Result:
 637   address generate_orderaccess_fence() {
 638     StubCodeMark mark(this, "StubRoutines", "orderaccess_fence");
 639     address start = __ pc();
 640     __ mfence();
 641     __ ret(0);
 642 
 643     return start;
 644   }
 645 
 646   // Support for intptr_t get_previous_fp()
 647   //
 648   // This routine is used to find the previous frame pointer for the
 649   // caller (current_frame_guess). This is used as part of debugging
 650   // ps() is seemingly lost trying to find frames.
 651   // This code assumes that caller current_frame_guess) has a frame.
 652   address generate_get_previous_fp() {
 653     StubCodeMark mark(this, "StubRoutines", "get_previous_fp");
 654     const Address old_fp(rbp, 0);
 655     const Address older_fp(rax, 0);
 656     address start = __ pc();
 657 
 658     __ enter();
 659     __ movptr(rax, old_fp); // callers fp
 660     __ movptr(rax, older_fp); // the frame for ps()
 661     __ pop(rbp);
 662     __ ret(0);
 663 
 664     return start;
 665   }
 666 
 667   //----------------------------------------------------------------------------------------------------
 668   // Support for void verify_mxcsr()
 669   //
 670   // This routine is used with -Xcheck:jni to verify that native
 671   // JNI code does not return to Java code without restoring the
 672   // MXCSR register to our expected state.
 673 
 674   address generate_verify_mxcsr() {
 675     StubCodeMark mark(this, "StubRoutines", "verify_mxcsr");
 676     address start = __ pc();
 677 
 678     const Address mxcsr_save(rsp, 0);
 679 
 680     if (CheckJNICalls) {
 681       Label ok_ret;
 682       __ push(rax);
 683       __ subptr(rsp, wordSize);      // allocate a temp location
 684       __ stmxcsr(mxcsr_save);
 685       __ movl(rax, mxcsr_save);
 686       __ andl(rax, MXCSR_MASK);    // Only check control and mask bits
 687       __ cmpl(rax, *(int *)(StubRoutines::x86::mxcsr_std()));
 688       __ jcc(Assembler::equal, ok_ret);
 689 
 690       __ warn("MXCSR changed by native JNI code, use -XX:+RestoreMXCSROnJNICall");
 691 
 692       __ ldmxcsr(ExternalAddress(StubRoutines::x86::mxcsr_std()));
 693 
 694       __ bind(ok_ret);
 695       __ addptr(rsp, wordSize);
 696       __ pop(rax);
 697     }
 698 
 699     __ ret(0);
 700 
 701     return start;
 702   }
 703 
 704   address generate_f2i_fixup() {
 705     StubCodeMark mark(this, "StubRoutines", "f2i_fixup");
 706     Address inout(rsp, 5 * wordSize); // return address + 4 saves
 707 
 708     address start = __ pc();
 709 
 710     Label L;
 711 
 712     __ push(rax);
 713     __ push(c_rarg3);
 714     __ push(c_rarg2);
 715     __ push(c_rarg1);
 716 
 717     __ movl(rax, 0x7f800000);
 718     __ xorl(c_rarg3, c_rarg3);
 719     __ movl(c_rarg2, inout);
 720     __ movl(c_rarg1, c_rarg2);
 721     __ andl(c_rarg1, 0x7fffffff);
 722     __ cmpl(rax, c_rarg1); // NaN? -> 0
 723     __ jcc(Assembler::negative, L);
 724     __ testl(c_rarg2, c_rarg2); // signed ? min_jint : max_jint
 725     __ movl(c_rarg3, 0x80000000);
 726     __ movl(rax, 0x7fffffff);
 727     __ cmovl(Assembler::positive, c_rarg3, rax);
 728 
 729     __ bind(L);
 730     __ movptr(inout, c_rarg3);
 731 
 732     __ pop(c_rarg1);
 733     __ pop(c_rarg2);
 734     __ pop(c_rarg3);
 735     __ pop(rax);
 736 
 737     __ ret(0);
 738 
 739     return start;
 740   }
 741 
 742   address generate_f2l_fixup() {
 743     StubCodeMark mark(this, "StubRoutines", "f2l_fixup");
 744     Address inout(rsp, 5 * wordSize); // return address + 4 saves
 745     address start = __ pc();
 746 
 747     Label L;
 748 
 749     __ push(rax);
 750     __ push(c_rarg3);
 751     __ push(c_rarg2);
 752     __ push(c_rarg1);
 753 
 754     __ movl(rax, 0x7f800000);
 755     __ xorl(c_rarg3, c_rarg3);
 756     __ movl(c_rarg2, inout);
 757     __ movl(c_rarg1, c_rarg2);
 758     __ andl(c_rarg1, 0x7fffffff);
 759     __ cmpl(rax, c_rarg1); // NaN? -> 0
 760     __ jcc(Assembler::negative, L);
 761     __ testl(c_rarg2, c_rarg2); // signed ? min_jlong : max_jlong
 762     __ mov64(c_rarg3, 0x8000000000000000);
 763     __ mov64(rax, 0x7fffffffffffffff);
 764     __ cmov(Assembler::positive, c_rarg3, rax);
 765 
 766     __ bind(L);
 767     __ movptr(inout, c_rarg3);
 768 
 769     __ pop(c_rarg1);
 770     __ pop(c_rarg2);
 771     __ pop(c_rarg3);
 772     __ pop(rax);
 773 
 774     __ ret(0);
 775 
 776     return start;
 777   }
 778 
 779   address generate_d2i_fixup() {
 780     StubCodeMark mark(this, "StubRoutines", "d2i_fixup");
 781     Address inout(rsp, 6 * wordSize); // return address + 5 saves
 782 
 783     address start = __ pc();
 784 
 785     Label L;
 786 
 787     __ push(rax);
 788     __ push(c_rarg3);
 789     __ push(c_rarg2);
 790     __ push(c_rarg1);
 791     __ push(c_rarg0);
 792 
 793     __ movl(rax, 0x7ff00000);
 794     __ movq(c_rarg2, inout);
 795     __ movl(c_rarg3, c_rarg2);
 796     __ mov(c_rarg1, c_rarg2);
 797     __ mov(c_rarg0, c_rarg2);
 798     __ negl(c_rarg3);
 799     __ shrptr(c_rarg1, 0x20);
 800     __ orl(c_rarg3, c_rarg2);
 801     __ andl(c_rarg1, 0x7fffffff);
 802     __ xorl(c_rarg2, c_rarg2);
 803     __ shrl(c_rarg3, 0x1f);
 804     __ orl(c_rarg1, c_rarg3);
 805     __ cmpl(rax, c_rarg1);
 806     __ jcc(Assembler::negative, L); // NaN -> 0
 807     __ testptr(c_rarg0, c_rarg0); // signed ? min_jint : max_jint
 808     __ movl(c_rarg2, 0x80000000);
 809     __ movl(rax, 0x7fffffff);
 810     __ cmov(Assembler::positive, c_rarg2, rax);
 811 
 812     __ bind(L);
 813     __ movptr(inout, c_rarg2);
 814 
 815     __ pop(c_rarg0);
 816     __ pop(c_rarg1);
 817     __ pop(c_rarg2);
 818     __ pop(c_rarg3);
 819     __ pop(rax);
 820 
 821     __ ret(0);
 822 
 823     return start;
 824   }
 825 
 826   address generate_d2l_fixup() {
 827     StubCodeMark mark(this, "StubRoutines", "d2l_fixup");
 828     Address inout(rsp, 6 * wordSize); // return address + 5 saves
 829 
 830     address start = __ pc();
 831 
 832     Label L;
 833 
 834     __ push(rax);
 835     __ push(c_rarg3);
 836     __ push(c_rarg2);
 837     __ push(c_rarg1);
 838     __ push(c_rarg0);
 839 
 840     __ movl(rax, 0x7ff00000);
 841     __ movq(c_rarg2, inout);
 842     __ movl(c_rarg3, c_rarg2);
 843     __ mov(c_rarg1, c_rarg2);
 844     __ mov(c_rarg0, c_rarg2);
 845     __ negl(c_rarg3);
 846     __ shrptr(c_rarg1, 0x20);
 847     __ orl(c_rarg3, c_rarg2);
 848     __ andl(c_rarg1, 0x7fffffff);
 849     __ xorl(c_rarg2, c_rarg2);
 850     __ shrl(c_rarg3, 0x1f);
 851     __ orl(c_rarg1, c_rarg3);
 852     __ cmpl(rax, c_rarg1);
 853     __ jcc(Assembler::negative, L); // NaN -> 0
 854     __ testq(c_rarg0, c_rarg0); // signed ? min_jlong : max_jlong
 855     __ mov64(c_rarg2, 0x8000000000000000);
 856     __ mov64(rax, 0x7fffffffffffffff);
 857     __ cmovq(Assembler::positive, c_rarg2, rax);
 858 
 859     __ bind(L);
 860     __ movq(inout, c_rarg2);
 861 
 862     __ pop(c_rarg0);
 863     __ pop(c_rarg1);
 864     __ pop(c_rarg2);
 865     __ pop(c_rarg3);
 866     __ pop(rax);
 867 
 868     __ ret(0);
 869 
 870     return start;
 871   }
 872 
 873   address generate_fp_mask(const char *stub_name, int64_t mask) {
 874     StubCodeMark mark(this, "StubRoutines", stub_name);
 875 
 876     __ align(16);
 877     address start = __ pc();
 878 
 879     __ emit_data64( mask, relocInfo::none );
 880     __ emit_data64( mask, relocInfo::none );
 881 
 882     return start;
 883   }
 884 
 885   // The following routine generates a subroutine to throw an
 886   // asynchronous UnknownError when an unsafe access gets a fault that
 887   // could not be reasonably prevented by the programmer.  (Example:
 888   // SIGBUS/OBJERR.)
 889   address generate_handler_for_unsafe_access() {
 890     StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access");
 891     address start = __ pc();
 892 
 893     __ push(0);                       // hole for return address-to-be
 894     __ pusha();                       // push registers
 895     Address next_pc(rsp, RegisterImpl::number_of_registers * BytesPerWord);
 896 
 897     __ subptr(rsp, frame::arg_reg_save_area_bytes);
 898     BLOCK_COMMENT("call handle_unsafe_access");
 899     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, handle_unsafe_access)));
 900     __ addptr(rsp, frame::arg_reg_save_area_bytes);
 901 
 902     __ movptr(next_pc, rax);          // stuff next address
 903     __ popa();
 904     __ ret(0);                        // jump to next address
 905 
 906     return start;
 907   }
 908 
 909   // Non-destructive plausibility checks for oops
 910   //
 911   // Arguments:
 912   //    all args on stack!
 913   //
 914   // Stack after saving c_rarg3:
 915   //    [tos + 0]: saved c_rarg3
 916   //    [tos + 1]: saved c_rarg2
 917   //    [tos + 2]: saved r12 (several TemplateTable methods use it)
 918   //    [tos + 3]: saved flags
 919   //    [tos + 4]: return address
 920   //  * [tos + 5]: error message (char*)
 921   //  * [tos + 6]: object to verify (oop)
 922   //  * [tos + 7]: saved rax - saved by caller and bashed
 923   //  * = popped on exit
 924   address generate_verify_oop() {
 925     StubCodeMark mark(this, "StubRoutines", "verify_oop");
 926     address start = __ pc();
 927 
 928     Label exit, error;
 929 
 930     __ pushf();
 931     __ incrementl(ExternalAddress((address) StubRoutines::verify_oop_count_addr()));
 932 
 933     __ push(r12);
 934 
 935     // save c_rarg2 and c_rarg3
 936     __ push(c_rarg2);
 937     __ push(c_rarg3);
 938 
 939     enum {
 940            // After previous pushes.
 941            oop_to_verify = 6 * wordSize,
 942            saved_rax     = 7 * wordSize,
 943 
 944            // Before the call to MacroAssembler::debug(), see below.
 945            return_addr   = 16 * wordSize,
 946            error_msg     = 17 * wordSize
 947     };
 948 
 949     // get object
 950     __ movptr(rax, Address(rsp, oop_to_verify));
 951 
 952     // make sure object is 'reasonable'
 953     __ testptr(rax, rax);
 954     __ jcc(Assembler::zero, exit); // if obj is NULL it is OK
 955     // Check if the oop is in the right area of memory
 956     __ movptr(c_rarg2, rax);
 957     __ movptr(c_rarg3, (int64_t) Universe::verify_oop_mask());
 958     __ andptr(c_rarg2, c_rarg3);
 959     __ movptr(c_rarg3, (int64_t) Universe::verify_oop_bits());
 960     __ cmpptr(c_rarg2, c_rarg3);
 961     __ jcc(Assembler::notZero, error);
 962 
 963     // set r12 to heapbase for load_klass()
 964     __ reinit_heapbase();
 965 
 966     // make sure klass is 'reasonable'
 967     __ load_klass(rax, rax);  // get klass
 968     __ testptr(rax, rax);
 969     __ jcc(Assembler::zero, error); // if klass is NULL it is broken
 970     // Check if the klass is in the right area of memory
 971     __ mov(c_rarg2, rax);
 972     __ movptr(c_rarg3, (int64_t) Universe::verify_klass_mask());
 973     __ andptr(c_rarg2, c_rarg3);
 974     __ movptr(c_rarg3, (int64_t) Universe::verify_klass_bits());
 975     __ cmpptr(c_rarg2, c_rarg3);
 976     __ jcc(Assembler::notZero, error);
 977 
 978     // make sure klass' klass is 'reasonable'
 979     __ load_klass(rax, rax);
 980     __ testptr(rax, rax);
 981     __ jcc(Assembler::zero, error); // if klass' klass is NULL it is broken
 982     // Check if the klass' klass is in the right area of memory
 983     __ movptr(c_rarg3, (int64_t) Universe::verify_klass_mask());
 984     __ andptr(rax, c_rarg3);
 985     __ movptr(c_rarg3, (int64_t) Universe::verify_klass_bits());
 986     __ cmpptr(rax, c_rarg3);
 987     __ jcc(Assembler::notZero, error);
 988 
 989     // return if everything seems ok
 990     __ bind(exit);
 991     __ movptr(rax, Address(rsp, saved_rax));     // get saved rax back
 992     __ pop(c_rarg3);                             // restore c_rarg3
 993     __ pop(c_rarg2);                             // restore c_rarg2
 994     __ pop(r12);                                 // restore r12
 995     __ popf();                                   // restore flags
 996     __ ret(3 * wordSize);                        // pop caller saved stuff
 997 
 998     // handle errors
 999     __ bind(error);
1000     __ movptr(rax, Address(rsp, saved_rax));     // get saved rax back
1001     __ pop(c_rarg3);                             // get saved c_rarg3 back
1002     __ pop(c_rarg2);                             // get saved c_rarg2 back
1003     __ pop(r12);                                 // get saved r12 back
1004     __ popf();                                   // get saved flags off stack --
1005                                                  // will be ignored
1006 
1007     __ pusha();                                  // push registers
1008                                                  // (rip is already
1009                                                  // already pushed)
1010     // debug(char* msg, int64_t pc, int64_t regs[])
1011     // We've popped the registers we'd saved (c_rarg3, c_rarg2 and flags), and
1012     // pushed all the registers, so now the stack looks like:
1013     //     [tos +  0] 16 saved registers
1014     //     [tos + 16] return address
1015     //   * [tos + 17] error message (char*)
1016     //   * [tos + 18] object to verify (oop)
1017     //   * [tos + 19] saved rax - saved by caller and bashed
1018     //   * = popped on exit
1019 
1020     __ movptr(c_rarg0, Address(rsp, error_msg));    // pass address of error message
1021     __ movptr(c_rarg1, Address(rsp, return_addr));  // pass return address
1022     __ movq(c_rarg2, rsp);                          // pass address of regs on stack
1023     __ mov(r12, rsp);                               // remember rsp
1024     __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
1025     __ andptr(rsp, -16);                            // align stack as required by ABI
1026     BLOCK_COMMENT("call MacroAssembler::debug");
1027     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64)));
1028     __ mov(rsp, r12);                               // restore rsp
1029     __ popa();                                      // pop registers (includes r12)
1030     __ ret(3 * wordSize);                           // pop caller saved stuff
1031 
1032     return start;
1033   }
1034 
1035   static address disjoint_byte_copy_entry;
1036   static address disjoint_short_copy_entry;
1037   static address disjoint_int_copy_entry;
1038   static address disjoint_long_copy_entry;
1039   static address disjoint_oop_copy_entry;
1040 
1041   static address byte_copy_entry;
1042   static address short_copy_entry;
1043   static address int_copy_entry;
1044   static address long_copy_entry;
1045   static address oop_copy_entry;
1046 
1047   static address checkcast_copy_entry;
1048 
1049   //
1050   // Verify that a register contains clean 32-bits positive value
1051   // (high 32-bits are 0) so it could be used in 64-bits shifts.
1052   //
1053   //  Input:
1054   //    Rint  -  32-bits value
1055   //    Rtmp  -  scratch
1056   //
1057   void assert_clean_int(Register Rint, Register Rtmp) {
1058 #ifdef ASSERT
1059     Label L;
1060     assert_different_registers(Rtmp, Rint);
1061     __ movslq(Rtmp, Rint);
1062     __ cmpq(Rtmp, Rint);
1063     __ jcc(Assembler::equal, L);
1064     __ stop("high 32-bits of int value are not 0");
1065     __ bind(L);
1066 #endif
1067   }
1068 
1069   //  Generate overlap test for array copy stubs
1070   //
1071   //  Input:
1072   //     c_rarg0 - from
1073   //     c_rarg1 - to
1074   //     c_rarg2 - element count
1075   //
1076   //  Output:
1077   //     rax   - &from[element count - 1]
1078   //
1079   void array_overlap_test(address no_overlap_target, Address::ScaleFactor sf) {
1080     assert(no_overlap_target != NULL, "must be generated");
1081     array_overlap_test(no_overlap_target, NULL, sf);
1082   }
1083   void array_overlap_test(Label& L_no_overlap, Address::ScaleFactor sf) {
1084     array_overlap_test(NULL, &L_no_overlap, sf);
1085   }
1086   void array_overlap_test(address no_overlap_target, Label* NOLp, Address::ScaleFactor sf) {
1087     const Register from     = c_rarg0;
1088     const Register to       = c_rarg1;
1089     const Register count    = c_rarg2;
1090     const Register end_from = rax;
1091 
1092     __ cmpptr(to, from);
1093     __ lea(end_from, Address(from, count, sf, 0));
1094     if (NOLp == NULL) {
1095       ExternalAddress no_overlap(no_overlap_target);
1096       __ jump_cc(Assembler::belowEqual, no_overlap);
1097       __ cmpptr(to, end_from);
1098       __ jump_cc(Assembler::aboveEqual, no_overlap);
1099     } else {
1100       __ jcc(Assembler::belowEqual, (*NOLp));
1101       __ cmpptr(to, end_from);
1102       __ jcc(Assembler::aboveEqual, (*NOLp));
1103     }
1104   }
1105 
1106   // Shuffle first three arg regs on Windows into Linux/Solaris locations.
1107   //
1108   // Outputs:
1109   //    rdi - rcx
1110   //    rsi - rdx
1111   //    rdx - r8
1112   //    rcx - r9
1113   //
1114   // Registers r9 and r10 are used to save rdi and rsi on Windows, which latter
1115   // are non-volatile.  r9 and r10 should not be used by the caller.
1116   //
1117   void setup_arg_regs(int nargs = 3) {
1118     const Register saved_rdi = r9;
1119     const Register saved_rsi = r10;
1120     assert(nargs == 3 || nargs == 4, "else fix");
1121 #ifdef _WIN64
1122     assert(c_rarg0 == rcx && c_rarg1 == rdx && c_rarg2 == r8 && c_rarg3 == r9,
1123            "unexpected argument registers");
1124     if (nargs >= 4)
1125       __ mov(rax, r9);  // r9 is also saved_rdi
1126     __ movptr(saved_rdi, rdi);
1127     __ movptr(saved_rsi, rsi);
1128     __ mov(rdi, rcx); // c_rarg0
1129     __ mov(rsi, rdx); // c_rarg1
1130     __ mov(rdx, r8);  // c_rarg2
1131     if (nargs >= 4)
1132       __ mov(rcx, rax); // c_rarg3 (via rax)
1133 #else
1134     assert(c_rarg0 == rdi && c_rarg1 == rsi && c_rarg2 == rdx && c_rarg3 == rcx,
1135            "unexpected argument registers");
1136 #endif
1137   }
1138 
1139   void restore_arg_regs() {
1140     const Register saved_rdi = r9;
1141     const Register saved_rsi = r10;
1142 #ifdef _WIN64
1143     __ movptr(rdi, saved_rdi);
1144     __ movptr(rsi, saved_rsi);
1145 #endif
1146   }
1147 
1148   // Generate code for an array write pre barrier
1149   //
1150   //     addr    -  starting address
1151   //     count    -  element count
1152   //
1153   //     Destroy no registers!
1154   //
1155   void  gen_write_ref_array_pre_barrier(Register addr, Register count) {



1156     BarrierSet* bs = Universe::heap()->barrier_set();
1157     switch (bs->kind()) {
1158       case BarrierSet::G1SATBCT:
1159       case BarrierSet::G1SATBCTLogging:
1160         {
1161           __ pusha();                      // push registers
1162           if (count == c_rarg0) {
1163             if (addr == c_rarg1) {
1164               // exactly backwards!!
1165               __ xchgptr(c_rarg1, c_rarg0);
1166             } else {
1167               __ movptr(c_rarg1, count);
1168               __ movptr(c_rarg0, addr);
1169             }
1170 
1171           } else {
1172             __ movptr(c_rarg0, addr);
1173             __ movptr(c_rarg1, count);
1174           }
1175           __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre)));
1176           __ popa();
1177         }
1178         break;
1179       case BarrierSet::CardTableModRef:
1180       case BarrierSet::CardTableExtension:
1181       case BarrierSet::ModRef:
1182         break;
1183       default:
1184         ShouldNotReachHere();
1185 
1186     }

1187   }
1188 
1189   //
1190   // Generate code for an array write post barrier
1191   //
1192   //  Input:
1193   //     start    - register containing starting address of destination array
1194   //     end      - register containing ending address of destination array
1195   //     scratch  - scratch register
1196   //
1197   //  The input registers are overwritten.
1198   //  The ending address is inclusive.
1199   void  gen_write_ref_array_post_barrier(Register start, Register end, Register scratch) {
1200     assert_different_registers(start, end, scratch);
1201     BarrierSet* bs = Universe::heap()->barrier_set();
1202     switch (bs->kind()) {

1203       case BarrierSet::G1SATBCT:
1204       case BarrierSet::G1SATBCTLogging:
1205 
1206         {
1207           __ pusha();                      // push registers (overkill)
1208           // must compute element count unless barrier set interface is changed (other platforms supply count)
1209           assert_different_registers(start, end, scratch);
1210           __ lea(scratch, Address(end, wordSize));
1211           __ subptr(scratch, start);
1212           __ shrptr(scratch, LogBytesPerWord);
1213           __ mov(c_rarg0, start);
1214           __ mov(c_rarg1, scratch);
1215           __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post)));
1216           __ popa();
1217         }
1218         break;

1219       case BarrierSet::CardTableModRef:
1220       case BarrierSet::CardTableExtension:
1221         {
1222           CardTableModRefBS* ct = (CardTableModRefBS*)bs;
1223           assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
1224 
1225           Label L_loop;
1226 
1227            __ shrptr(start, CardTableModRefBS::card_shift);
1228            __ shrptr(end, CardTableModRefBS::card_shift);
1229            __ subptr(end, start); // number of bytes to copy
1230 
1231           intptr_t disp = (intptr_t) ct->byte_map_base;
1232           if (__ is_simm32(disp)) {
1233             Address cardtable(noreg, noreg, Address::no_scale, disp);
1234             __ lea(scratch, cardtable);
1235           } else {
1236             ExternalAddress cardtable((address)disp);
1237             __ lea(scratch, cardtable);
1238           }
1239 
1240           const Register count = end; // 'end' register contains bytes count now
1241           __ addptr(start, scratch);

1242         __ BIND(L_loop);
1243           __ movb(Address(start, count, Address::times_1), 0);
1244           __ decrement(count);
1245           __ jcc(Assembler::greaterEqual, L_loop);
1246         }
1247         break;
1248       default:
1249         ShouldNotReachHere();
1250 
1251     }
1252   }
1253 
1254 
1255   // Copy big chunks forward
1256   //
1257   // Inputs:
1258   //   end_from     - source arrays end address
1259   //   end_to       - destination array end address
1260   //   qword_count  - 64-bits element count, negative
1261   //   to           - scratch
1262   //   L_copy_32_bytes - entry label
1263   //   L_copy_8_bytes  - exit  label
1264   //
1265   void copy_32_bytes_forward(Register end_from, Register end_to,
1266                              Register qword_count, Register to,
1267                              Label& L_copy_32_bytes, Label& L_copy_8_bytes) {
1268     DEBUG_ONLY(__ stop("enter at entry label, not here"));
1269     Label L_loop;
1270     __ align(16);
1271   __ BIND(L_loop);
1272     if(UseUnalignedLoadStores) {
1273       __ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24));
1274       __ movdqu(Address(end_to, qword_count, Address::times_8, -24), xmm0);
1275       __ movdqu(xmm1, Address(end_from, qword_count, Address::times_8, - 8));
1276       __ movdqu(Address(end_to, qword_count, Address::times_8, - 8), xmm1);
1277 
1278     } else {
1279       __ movq(to, Address(end_from, qword_count, Address::times_8, -24));
1280       __ movq(Address(end_to, qword_count, Address::times_8, -24), to);
1281       __ movq(to, Address(end_from, qword_count, Address::times_8, -16));
1282       __ movq(Address(end_to, qword_count, Address::times_8, -16), to);
1283       __ movq(to, Address(end_from, qword_count, Address::times_8, - 8));
1284       __ movq(Address(end_to, qword_count, Address::times_8, - 8), to);
1285       __ movq(to, Address(end_from, qword_count, Address::times_8, - 0));
1286       __ movq(Address(end_to, qword_count, Address::times_8, - 0), to);
1287     }
1288   __ BIND(L_copy_32_bytes);
1289     __ addptr(qword_count, 4);
1290     __ jcc(Assembler::lessEqual, L_loop);
1291     __ subptr(qword_count, 4);
1292     __ jcc(Assembler::less, L_copy_8_bytes); // Copy trailing qwords
1293   }
1294 
1295 
1296   // Copy big chunks backward
1297   //
1298   // Inputs:
1299   //   from         - source arrays address
1300   //   dest         - destination array address
1301   //   qword_count  - 64-bits element count
1302   //   to           - scratch
1303   //   L_copy_32_bytes - entry label
1304   //   L_copy_8_bytes  - exit  label
1305   //
1306   void copy_32_bytes_backward(Register from, Register dest,
1307                               Register qword_count, Register to,
1308                               Label& L_copy_32_bytes, Label& L_copy_8_bytes) {
1309     DEBUG_ONLY(__ stop("enter at entry label, not here"));
1310     Label L_loop;
1311     __ align(16);
1312   __ BIND(L_loop);
1313     if(UseUnalignedLoadStores) {
1314       __ movdqu(xmm0, Address(from, qword_count, Address::times_8, 16));
1315       __ movdqu(Address(dest, qword_count, Address::times_8, 16), xmm0);
1316       __ movdqu(xmm1, Address(from, qword_count, Address::times_8,  0));
1317       __ movdqu(Address(dest, qword_count, Address::times_8,  0), xmm1);
1318 
1319     } else {
1320       __ movq(to, Address(from, qword_count, Address::times_8, 24));
1321       __ movq(Address(dest, qword_count, Address::times_8, 24), to);
1322       __ movq(to, Address(from, qword_count, Address::times_8, 16));
1323       __ movq(Address(dest, qword_count, Address::times_8, 16), to);
1324       __ movq(to, Address(from, qword_count, Address::times_8,  8));
1325       __ movq(Address(dest, qword_count, Address::times_8,  8), to);
1326       __ movq(to, Address(from, qword_count, Address::times_8,  0));
1327       __ movq(Address(dest, qword_count, Address::times_8,  0), to);
1328     }
1329   __ BIND(L_copy_32_bytes);
1330     __ subptr(qword_count, 4);
1331     __ jcc(Assembler::greaterEqual, L_loop);
1332     __ addptr(qword_count, 4);
1333     __ jcc(Assembler::greater, L_copy_8_bytes); // Copy trailing qwords
1334   }
1335 
1336 
1337   // Arguments:
1338   //   aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
1339   //             ignored
1340   //   name    - stub name string
1341   //
1342   // Inputs:
1343   //   c_rarg0   - source array address
1344   //   c_rarg1   - destination array address
1345   //   c_rarg2   - element count, treated as ssize_t, can be zero
1346   //
1347   // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries,
1348   // we let the hardware handle it.  The one to eight bytes within words,
1349   // dwords or qwords that span cache line boundaries will still be loaded
1350   // and stored atomically.
1351   //
1352   // Side Effects:


1364     const Register to          = rsi;  // destination array address
1365     const Register count       = rdx;  // elements count
1366     const Register byte_count  = rcx;
1367     const Register qword_count = count;
1368     const Register end_from    = from; // source array end address
1369     const Register end_to      = to;   // destination array end address
1370     // End pointers are inclusive, and if count is not zero they point
1371     // to the last unit copied:  end_to[0] := end_from[0]
1372 
1373     __ enter(); // required for proper stackwalking of RuntimeStub frame
1374     assert_clean_int(c_rarg2, rax);    // Make sure 'count' is clean int.
1375 
1376     disjoint_byte_copy_entry = __ pc();
1377     BLOCK_COMMENT("Entry:");
1378     // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1379 
1380     setup_arg_regs(); // from => rdi, to => rsi, count => rdx
1381                       // r9 and r10 may be used to save non-volatile registers
1382 
1383     // 'from', 'to' and 'count' are now valid
1384     __ movptr(byte_count, count);
1385     __ shrptr(count, 3); // count => qword_count
1386 
1387     // Copy from low to high addresses.  Use 'to' as scratch.
1388     __ lea(end_from, Address(from, qword_count, Address::times_8, -8));
1389     __ lea(end_to,   Address(to,   qword_count, Address::times_8, -8));
1390     __ negptr(qword_count); // make the count negative
1391     __ jmp(L_copy_32_bytes);
1392 
1393     // Copy trailing qwords
1394   __ BIND(L_copy_8_bytes);
1395     __ movq(rax, Address(end_from, qword_count, Address::times_8, 8));
1396     __ movq(Address(end_to, qword_count, Address::times_8, 8), rax);
1397     __ increment(qword_count);
1398     __ jcc(Assembler::notZero, L_copy_8_bytes);
1399 
1400     // Check for and copy trailing dword
1401   __ BIND(L_copy_4_bytes);
1402     __ testl(byte_count, 4);
1403     __ jccb(Assembler::zero, L_copy_2_bytes);
1404     __ movl(rax, Address(end_from, 8));
1405     __ movl(Address(end_to, 8), rax);
1406 
1407     __ addptr(end_from, 4);
1408     __ addptr(end_to, 4);
1409 
1410     // Check for and copy trailing word
1411   __ BIND(L_copy_2_bytes);
1412     __ testl(byte_count, 2);
1413     __ jccb(Assembler::zero, L_copy_byte);
1414     __ movw(rax, Address(end_from, 8));
1415     __ movw(Address(end_to, 8), rax);
1416 
1417     __ addptr(end_from, 2);
1418     __ addptr(end_to, 2);
1419 
1420     // Check for and copy trailing byte
1421   __ BIND(L_copy_byte);
1422     __ testl(byte_count, 1);
1423     __ jccb(Assembler::zero, L_exit);
1424     __ movb(rax, Address(end_from, 8));
1425     __ movb(Address(end_to, 8), rax);
1426 
1427   __ BIND(L_exit);
1428     inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr);
1429     restore_arg_regs();
1430     __ xorptr(rax, rax); // return 0
1431     __ leave(); // required for proper stackwalking of RuntimeStub frame
1432     __ ret(0);
1433 
1434     // Copy in 32-bytes chunks
1435     copy_32_bytes_forward(end_from, end_to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
1436     __ jmp(L_copy_4_bytes);
1437 
1438     return start;
1439   }
1440 
1441   // Arguments:
1442   //   aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
1443   //             ignored
1444   //   name    - stub name string
1445   //
1446   // Inputs:
1447   //   c_rarg0   - source array address
1448   //   c_rarg1   - destination array address
1449   //   c_rarg2   - element count, treated as ssize_t, can be zero
1450   //


1460 
1461     Label L_copy_32_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes;
1462     const Register from        = rdi;  // source array address
1463     const Register to          = rsi;  // destination array address
1464     const Register count       = rdx;  // elements count
1465     const Register byte_count  = rcx;
1466     const Register qword_count = count;
1467 
1468     __ enter(); // required for proper stackwalking of RuntimeStub frame
1469     assert_clean_int(c_rarg2, rax);    // Make sure 'count' is clean int.
1470 
1471     byte_copy_entry = __ pc();
1472     BLOCK_COMMENT("Entry:");
1473     // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1474 
1475     array_overlap_test(disjoint_byte_copy_entry, Address::times_1);
1476     setup_arg_regs(); // from => rdi, to => rsi, count => rdx
1477                       // r9 and r10 may be used to save non-volatile registers
1478 
1479     // 'from', 'to' and 'count' are now valid
1480     __ movptr(byte_count, count);
1481     __ shrptr(count, 3);   // count => qword_count
1482 
1483     // Copy from high to low addresses.
1484 
1485     // Check for and copy trailing byte
1486     __ testl(byte_count, 1);
1487     __ jcc(Assembler::zero, L_copy_2_bytes);
1488     __ movb(rax, Address(from, byte_count, Address::times_1, -1));
1489     __ movb(Address(to, byte_count, Address::times_1, -1), rax);
1490     __ decrement(byte_count); // Adjust for possible trailing word
1491 
1492     // Check for and copy trailing word
1493   __ BIND(L_copy_2_bytes);
1494     __ testl(byte_count, 2);
1495     __ jcc(Assembler::zero, L_copy_4_bytes);
1496     __ movw(rax, Address(from, byte_count, Address::times_1, -2));
1497     __ movw(Address(to, byte_count, Address::times_1, -2), rax);
1498 
1499     // Check for and copy trailing dword
1500   __ BIND(L_copy_4_bytes);
1501     __ testl(byte_count, 4);
1502     __ jcc(Assembler::zero, L_copy_32_bytes);
1503     __ movl(rax, Address(from, qword_count, Address::times_8));
1504     __ movl(Address(to, qword_count, Address::times_8), rax);
1505     __ jmp(L_copy_32_bytes);
1506 
1507     // Copy trailing qwords
1508   __ BIND(L_copy_8_bytes);
1509     __ movq(rax, Address(from, qword_count, Address::times_8, -8));
1510     __ movq(Address(to, qword_count, Address::times_8, -8), rax);
1511     __ decrement(qword_count);
1512     __ jcc(Assembler::notZero, L_copy_8_bytes);
1513 
1514     inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr);
1515     restore_arg_regs();
1516     __ xorptr(rax, rax); // return 0
1517     __ leave(); // required for proper stackwalking of RuntimeStub frame
1518     __ ret(0);
1519 
1520     // Copy in 32-bytes chunks
1521     copy_32_bytes_backward(from, to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
1522 
1523     inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr);
1524     restore_arg_regs();
1525     __ xorptr(rax, rax); // return 0
1526     __ leave(); // required for proper stackwalking of RuntimeStub frame
1527     __ ret(0);
1528 
1529     return start;
1530   }
1531 
1532   // Arguments:
1533   //   aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
1534   //             ignored
1535   //   name    - stub name string
1536   //
1537   // Inputs:
1538   //   c_rarg0   - source array address
1539   //   c_rarg1   - destination array address
1540   //   c_rarg2   - element count, treated as ssize_t, can be zero
1541   //
1542   // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we
1543   // let the hardware handle it.  The two or four words within dwords
1544   // or qwords that span cache line boundaries will still be loaded
1545   // and stored atomically.


1558     const Register to          = rsi;  // destination array address
1559     const Register count       = rdx;  // elements count
1560     const Register word_count  = rcx;
1561     const Register qword_count = count;
1562     const Register end_from    = from; // source array end address
1563     const Register end_to      = to;   // destination array end address
1564     // End pointers are inclusive, and if count is not zero they point
1565     // to the last unit copied:  end_to[0] := end_from[0]
1566 
1567     __ enter(); // required for proper stackwalking of RuntimeStub frame
1568     assert_clean_int(c_rarg2, rax);    // Make sure 'count' is clean int.
1569 
1570     disjoint_short_copy_entry = __ pc();
1571     BLOCK_COMMENT("Entry:");
1572     // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1573 
1574     setup_arg_regs(); // from => rdi, to => rsi, count => rdx
1575                       // r9 and r10 may be used to save non-volatile registers
1576 
1577     // 'from', 'to' and 'count' are now valid
1578     __ movptr(word_count, count);
1579     __ shrptr(count, 2); // count => qword_count
1580 
1581     // Copy from low to high addresses.  Use 'to' as scratch.
1582     __ lea(end_from, Address(from, qword_count, Address::times_8, -8));
1583     __ lea(end_to,   Address(to,   qword_count, Address::times_8, -8));
1584     __ negptr(qword_count);
1585     __ jmp(L_copy_32_bytes);
1586 
1587     // Copy trailing qwords
1588   __ BIND(L_copy_8_bytes);
1589     __ movq(rax, Address(end_from, qword_count, Address::times_8, 8));
1590     __ movq(Address(end_to, qword_count, Address::times_8, 8), rax);
1591     __ increment(qword_count);
1592     __ jcc(Assembler::notZero, L_copy_8_bytes);
1593 
1594     // Original 'dest' is trashed, so we can't use it as a
1595     // base register for a possible trailing word copy
1596 
1597     // Check for and copy trailing dword
1598   __ BIND(L_copy_4_bytes);
1599     __ testl(word_count, 2);
1600     __ jccb(Assembler::zero, L_copy_2_bytes);
1601     __ movl(rax, Address(end_from, 8));
1602     __ movl(Address(end_to, 8), rax);
1603 
1604     __ addptr(end_from, 4);
1605     __ addptr(end_to, 4);
1606 
1607     // Check for and copy trailing word
1608   __ BIND(L_copy_2_bytes);
1609     __ testl(word_count, 1);
1610     __ jccb(Assembler::zero, L_exit);
1611     __ movw(rax, Address(end_from, 8));
1612     __ movw(Address(end_to, 8), rax);
1613 
1614   __ BIND(L_exit);
1615     inc_counter_np(SharedRuntime::_jshort_array_copy_ctr);
1616     restore_arg_regs();
1617     __ xorptr(rax, rax); // return 0
1618     __ leave(); // required for proper stackwalking of RuntimeStub frame
1619     __ ret(0);
1620 
1621     // Copy in 32-bytes chunks
1622     copy_32_bytes_forward(end_from, end_to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
1623     __ jmp(L_copy_4_bytes);
1624 
1625     return start;
1626   }
1627 
1628   // Arguments:
1629   //   aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
1630   //             ignored
1631   //   name    - stub name string
1632   //
1633   // Inputs:
1634   //   c_rarg0   - source array address
1635   //   c_rarg1   - destination array address
1636   //   c_rarg2   - element count, treated as ssize_t, can be zero
1637   //


1647 
1648     Label L_copy_32_bytes, L_copy_8_bytes, L_copy_4_bytes;
1649     const Register from        = rdi;  // source array address
1650     const Register to          = rsi;  // destination array address
1651     const Register count       = rdx;  // elements count
1652     const Register word_count  = rcx;
1653     const Register qword_count = count;
1654 
1655     __ enter(); // required for proper stackwalking of RuntimeStub frame
1656     assert_clean_int(c_rarg2, rax);    // Make sure 'count' is clean int.
1657 
1658     short_copy_entry = __ pc();
1659     BLOCK_COMMENT("Entry:");
1660     // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1661 
1662     array_overlap_test(disjoint_short_copy_entry, Address::times_2);
1663     setup_arg_regs(); // from => rdi, to => rsi, count => rdx
1664                       // r9 and r10 may be used to save non-volatile registers
1665 
1666     // 'from', 'to' and 'count' are now valid
1667     __ movptr(word_count, count);
1668     __ shrptr(count, 2); // count => qword_count
1669 
1670     // Copy from high to low addresses.  Use 'to' as scratch.
1671 
1672     // Check for and copy trailing word
1673     __ testl(word_count, 1);
1674     __ jccb(Assembler::zero, L_copy_4_bytes);
1675     __ movw(rax, Address(from, word_count, Address::times_2, -2));
1676     __ movw(Address(to, word_count, Address::times_2, -2), rax);
1677 
1678     // Check for and copy trailing dword
1679   __ BIND(L_copy_4_bytes);
1680     __ testl(word_count, 2);
1681     __ jcc(Assembler::zero, L_copy_32_bytes);
1682     __ movl(rax, Address(from, qword_count, Address::times_8));
1683     __ movl(Address(to, qword_count, Address::times_8), rax);
1684     __ jmp(L_copy_32_bytes);
1685 
1686     // Copy trailing qwords
1687   __ BIND(L_copy_8_bytes);
1688     __ movq(rax, Address(from, qword_count, Address::times_8, -8));
1689     __ movq(Address(to, qword_count, Address::times_8, -8), rax);
1690     __ decrement(qword_count);
1691     __ jcc(Assembler::notZero, L_copy_8_bytes);
1692 
1693     inc_counter_np(SharedRuntime::_jshort_array_copy_ctr);
1694     restore_arg_regs();
1695     __ xorptr(rax, rax); // return 0
1696     __ leave(); // required for proper stackwalking of RuntimeStub frame
1697     __ ret(0);
1698 
1699     // Copy in 32-bytes chunks
1700     copy_32_bytes_backward(from, to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
1701 
1702     inc_counter_np(SharedRuntime::_jshort_array_copy_ctr);
1703     restore_arg_regs();
1704     __ xorptr(rax, rax); // return 0
1705     __ leave(); // required for proper stackwalking of RuntimeStub frame
1706     __ ret(0);
1707 
1708     return start;
1709   }
1710 
1711   // Arguments:
1712   //   aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
1713   //             ignored
1714   //   is_oop  - true => oop array, so generate store check code
1715   //   name    - stub name string
1716   //
1717   // Inputs:
1718   //   c_rarg0   - source array address
1719   //   c_rarg1   - destination array address
1720   //   c_rarg2   - element count, treated as ssize_t, can be zero
1721   //
1722   // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let
1723   // the hardware handle it.  The two dwords within qwords that span
1724   // cache line boundaries will still be loaded and stored atomicly.
1725   //
1726   // Side Effects:
1727   //   disjoint_int_copy_entry is set to the no-overlap entry point
1728   //   used by generate_conjoint_int_oop_copy().
1729   //
1730   address generate_disjoint_int_oop_copy(bool aligned, bool is_oop, const char *name) {
1731     __ align(CodeEntryAlignment);
1732     StubCodeMark mark(this, "StubRoutines", name);
1733     address start = __ pc();
1734 
1735     Label L_copy_32_bytes, L_copy_8_bytes, L_copy_4_bytes, L_exit;
1736     const Register from        = rdi;  // source array address
1737     const Register to          = rsi;  // destination array address
1738     const Register count       = rdx;  // elements count
1739     const Register dword_count = rcx;
1740     const Register qword_count = count;
1741     const Register end_from    = from; // source array end address
1742     const Register end_to      = to;   // destination array end address
1743     const Register saved_to    = r11;  // saved destination array address
1744     // End pointers are inclusive, and if count is not zero they point
1745     // to the last unit copied:  end_to[0] := end_from[0]
1746 
1747     __ enter(); // required for proper stackwalking of RuntimeStub frame
1748     assert_clean_int(c_rarg2, rax);    // Make sure 'count' is clean int.
1749 
1750     (is_oop ? disjoint_oop_copy_entry : disjoint_int_copy_entry) = __ pc();
1751 
1752     if (is_oop) {
1753       // no registers are destroyed by this call
1754       gen_write_ref_array_pre_barrier(/* dest */ c_rarg1, /* count */ c_rarg2);
1755     }
1756 
1757     BLOCK_COMMENT("Entry:");
1758     // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1759 
1760     setup_arg_regs(); // from => rdi, to => rsi, count => rdx
1761                       // r9 and r10 may be used to save non-volatile registers
1762 
1763     if (is_oop) {
1764       __ movq(saved_to, to);
1765     }
1766 
1767     // 'from', 'to' and 'count' are now valid
1768     __ movptr(dword_count, count);
1769     __ shrptr(count, 1); // count => qword_count
1770 
1771     // Copy from low to high addresses.  Use 'to' as scratch.
1772     __ lea(end_from, Address(from, qword_count, Address::times_8, -8));
1773     __ lea(end_to,   Address(to,   qword_count, Address::times_8, -8));
1774     __ negptr(qword_count);
1775     __ jmp(L_copy_32_bytes);
1776 
1777     // Copy trailing qwords
1778   __ BIND(L_copy_8_bytes);
1779     __ movq(rax, Address(end_from, qword_count, Address::times_8, 8));
1780     __ movq(Address(end_to, qword_count, Address::times_8, 8), rax);
1781     __ increment(qword_count);
1782     __ jcc(Assembler::notZero, L_copy_8_bytes);
1783 
1784     // Check for and copy trailing dword
1785   __ BIND(L_copy_4_bytes);
1786     __ testl(dword_count, 1); // Only byte test since the value is 0 or 1
1787     __ jccb(Assembler::zero, L_exit);
1788     __ movl(rax, Address(end_from, 8));
1789     __ movl(Address(end_to, 8), rax);
1790 
1791   __ BIND(L_exit);
1792     if (is_oop) {
1793       __ leaq(end_to, Address(saved_to, dword_count, Address::times_4, -4));
1794       gen_write_ref_array_post_barrier(saved_to, end_to, rax);
1795     }
1796     inc_counter_np(SharedRuntime::_jint_array_copy_ctr);
1797     restore_arg_regs();
1798     __ xorptr(rax, rax); // return 0
1799     __ leave(); // required for proper stackwalking of RuntimeStub frame
1800     __ ret(0);
1801 
1802     // Copy 32-bytes chunks
1803     copy_32_bytes_forward(end_from, end_to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
1804     __ jmp(L_copy_4_bytes);
1805 
1806     return start;
1807   }
1808 
1809   // Arguments:
1810   //   aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
1811   //             ignored
1812   //   is_oop  - true => oop array, so generate store check code
1813   //   name    - stub name string
1814   //
1815   // Inputs:
1816   //   c_rarg0   - source array address
1817   //   c_rarg1   - destination array address
1818   //   c_rarg2   - element count, treated as ssize_t, can be zero
1819   //
1820   // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let
1821   // the hardware handle it.  The two dwords within qwords that span
1822   // cache line boundaries will still be loaded and stored atomicly.
1823   //
1824   address generate_conjoint_int_oop_copy(bool aligned, bool is_oop, const char *name) {
1825     __ align(CodeEntryAlignment);
1826     StubCodeMark mark(this, "StubRoutines", name);
1827     address start = __ pc();
1828 
1829     Label L_copy_32_bytes, L_copy_8_bytes, L_copy_2_bytes, L_exit;
1830     const Register from        = rdi;  // source array address
1831     const Register to          = rsi;  // destination array address
1832     const Register count       = rdx;  // elements count
1833     const Register dword_count = rcx;
1834     const Register qword_count = count;
1835 
1836     __ enter(); // required for proper stackwalking of RuntimeStub frame
1837     assert_clean_int(c_rarg2, rax);    // Make sure 'count' is clean int.
1838 
1839     if (is_oop) {
1840       // no registers are destroyed by this call
1841       gen_write_ref_array_pre_barrier(/* dest */ c_rarg1, /* count */ c_rarg2);
1842     }
1843 
1844     (is_oop ? oop_copy_entry : int_copy_entry) = __ pc();
1845     BLOCK_COMMENT("Entry:");
1846     // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1847 
1848     array_overlap_test(is_oop ? disjoint_oop_copy_entry : disjoint_int_copy_entry,
1849                        Address::times_4);
1850     setup_arg_regs(); // from => rdi, to => rsi, count => rdx
1851                       // r9 and r10 may be used to save non-volatile registers
1852 
1853     assert_clean_int(count, rax); // Make sure 'count' is clean int.
1854     // 'from', 'to' and 'count' are now valid
1855     __ movptr(dword_count, count);
1856     __ shrptr(count, 1); // count => qword_count
1857 
1858     // Copy from high to low addresses.  Use 'to' as scratch.
1859 
1860     // Check for and copy trailing dword
1861     __ testl(dword_count, 1);
1862     __ jcc(Assembler::zero, L_copy_32_bytes);
1863     __ movl(rax, Address(from, dword_count, Address::times_4, -4));
1864     __ movl(Address(to, dword_count, Address::times_4, -4), rax);
1865     __ jmp(L_copy_32_bytes);
1866 
1867     // Copy trailing qwords
1868   __ BIND(L_copy_8_bytes);
1869     __ movq(rax, Address(from, qword_count, Address::times_8, -8));
1870     __ movq(Address(to, qword_count, Address::times_8, -8), rax);
1871     __ decrement(qword_count);
1872     __ jcc(Assembler::notZero, L_copy_8_bytes);
1873 
1874     inc_counter_np(SharedRuntime::_jint_array_copy_ctr);
1875     if (is_oop) {
1876       __ jmp(L_exit);
1877     }
1878     restore_arg_regs();
1879     __ xorptr(rax, rax); // return 0
1880     __ leave(); // required for proper stackwalking of RuntimeStub frame
1881     __ ret(0);
1882 
1883     // Copy in 32-bytes chunks
1884     copy_32_bytes_backward(from, to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
1885 
1886    inc_counter_np(SharedRuntime::_jint_array_copy_ctr);
1887    __ bind(L_exit);
1888      if (is_oop) {
1889        Register end_to = rdx;
1890        __ leaq(end_to, Address(to, dword_count, Address::times_4, -4));
1891        gen_write_ref_array_post_barrier(to, end_to, rax);
1892      }
1893     restore_arg_regs();
1894     __ xorptr(rax, rax); // return 0
1895     __ leave(); // required for proper stackwalking of RuntimeStub frame
1896     __ ret(0);
1897 
1898     return start;
1899   }
1900 
1901   // Arguments:
1902   //   aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes
1903   //             ignored
1904   //   is_oop  - true => oop array, so generate store check code
1905   //   name    - stub name string
1906   //
1907   // Inputs:
1908   //   c_rarg0   - source array address
1909   //   c_rarg1   - destination array address
1910   //   c_rarg2   - element count, treated as ssize_t, can be zero
1911   //
1912  // Side Effects:
1913   //   disjoint_oop_copy_entry or disjoint_long_copy_entry is set to the
1914   //   no-overlap entry point used by generate_conjoint_long_oop_copy().


1931     __ enter(); // required for proper stackwalking of RuntimeStub frame
1932     // Save no-overlap entry point for generate_conjoint_long_oop_copy()
1933     assert_clean_int(c_rarg2, rax);    // Make sure 'count' is clean int.
1934 
1935     if (is_oop) {
1936       disjoint_oop_copy_entry  = __ pc();
1937       // no registers are destroyed by this call
1938       gen_write_ref_array_pre_barrier(/* dest */ c_rarg1, /* count */ c_rarg2);
1939     } else {
1940       disjoint_long_copy_entry = __ pc();
1941     }
1942     BLOCK_COMMENT("Entry:");
1943     // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1944 
1945     setup_arg_regs(); // from => rdi, to => rsi, count => rdx
1946                       // r9 and r10 may be used to save non-volatile registers
1947 
1948     // 'from', 'to' and 'qword_count' are now valid
1949 
1950     // Copy from low to high addresses.  Use 'to' as scratch.
1951     __ lea(end_from, Address(from, qword_count, Address::times_8, -8));
1952     __ lea(end_to,   Address(to,   qword_count, Address::times_8, -8));
1953     __ negptr(qword_count);
1954     __ jmp(L_copy_32_bytes);
1955 
1956     // Copy trailing qwords
1957   __ BIND(L_copy_8_bytes);
1958     __ movq(rax, Address(end_from, qword_count, Address::times_8, 8));
1959     __ movq(Address(end_to, qword_count, Address::times_8, 8), rax);
1960     __ increment(qword_count);
1961     __ jcc(Assembler::notZero, L_copy_8_bytes);
1962 
1963     if (is_oop) {
1964       __ jmp(L_exit);
1965     } else {
1966       inc_counter_np(SharedRuntime::_jlong_array_copy_ctr);
1967       restore_arg_regs();
1968       __ xorptr(rax, rax); // return 0
1969       __ leave(); // required for proper stackwalking of RuntimeStub frame
1970       __ ret(0);
1971     }
1972 
1973     // Copy 64-byte chunks
1974     copy_32_bytes_forward(end_from, end_to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
1975 
1976     if (is_oop) {
1977     __ BIND(L_exit);
1978       gen_write_ref_array_post_barrier(saved_to, end_to, rax);
1979       inc_counter_np(SharedRuntime::_oop_array_copy_ctr);
1980     } else {
1981       inc_counter_np(SharedRuntime::_jlong_array_copy_ctr);
1982     }
1983     restore_arg_regs();
1984     __ xorptr(rax, rax); // return 0
1985     __ leave(); // required for proper stackwalking of RuntimeStub frame
1986     __ ret(0);
1987 
1988     return start;
1989   }
1990 
1991   // Arguments:
1992   //   aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes
1993   //             ignored
1994   //   is_oop  - true => oop array, so generate store check code
1995   //   name    - stub name string
1996   //
1997   // Inputs:
1998   //   c_rarg0   - source array address
1999   //   c_rarg1   - destination array address
2000   //   c_rarg2   - element count, treated as ssize_t, can be zero
2001   //
2002   address generate_conjoint_long_oop_copy(bool aligned, bool is_oop, const char *name) {
2003     __ align(CodeEntryAlignment);
2004     StubCodeMark mark(this, "StubRoutines", name);
2005     address start = __ pc();
2006 
2007     Label L_copy_32_bytes, L_copy_8_bytes, L_exit;
2008     const Register from        = rdi;  // source array address
2009     const Register to          = rsi;  // destination array address
2010     const Register qword_count = rdx;  // elements count
2011     const Register saved_count = rcx;
2012 
2013     __ enter(); // required for proper stackwalking of RuntimeStub frame
2014     assert_clean_int(c_rarg2, rax);    // Make sure 'count' is clean int.
2015 
2016     address disjoint_copy_entry = NULL;
2017     if (is_oop) {
2018       assert(!UseCompressedOops, "shouldn't be called for compressed oops");
2019       disjoint_copy_entry = disjoint_oop_copy_entry;
2020       oop_copy_entry  = __ pc();
2021       array_overlap_test(disjoint_oop_copy_entry, Address::times_8);
2022     } else {
2023       disjoint_copy_entry = disjoint_long_copy_entry;
2024       long_copy_entry = __ pc();
2025       array_overlap_test(disjoint_long_copy_entry, Address::times_8);
2026     }
2027     BLOCK_COMMENT("Entry:");
2028     // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
2029 
2030     array_overlap_test(disjoint_copy_entry, Address::times_8);
2031     setup_arg_regs(); // from => rdi, to => rsi, count => rdx
2032                       // r9 and r10 may be used to save non-volatile registers
2033 
2034     // 'from', 'to' and 'qword_count' are now valid
2035 
2036     if (is_oop) {
2037       // Save to and count for store barrier
2038       __ movptr(saved_count, qword_count);
2039       // No registers are destroyed by this call
2040       gen_write_ref_array_pre_barrier(to, saved_count);
2041     }
2042 


2043     __ jmp(L_copy_32_bytes);
2044 
2045     // Copy trailing qwords
2046   __ BIND(L_copy_8_bytes);
2047     __ movq(rax, Address(from, qword_count, Address::times_8, -8));
2048     __ movq(Address(to, qword_count, Address::times_8, -8), rax);
2049     __ decrement(qword_count);
2050     __ jcc(Assembler::notZero, L_copy_8_bytes);
2051 
2052     if (is_oop) {
2053       __ jmp(L_exit);
2054     } else {
2055       inc_counter_np(SharedRuntime::_jlong_array_copy_ctr);
2056       restore_arg_regs();
2057       __ xorptr(rax, rax); // return 0
2058       __ leave(); // required for proper stackwalking of RuntimeStub frame
2059       __ ret(0);
2060     }
2061 
2062     // Copy in 32-bytes chunks
2063     copy_32_bytes_backward(from, to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
2064 
2065     if (is_oop) {
2066     __ BIND(L_exit);
2067       __ lea(rcx, Address(to, saved_count, Address::times_8, -8));
2068       gen_write_ref_array_post_barrier(to, rcx, rax);
2069       inc_counter_np(SharedRuntime::_oop_array_copy_ctr);
2070     } else {
2071       inc_counter_np(SharedRuntime::_jlong_array_copy_ctr);
2072     }
2073     restore_arg_regs();
2074     __ xorptr(rax, rax); // return 0
2075     __ leave(); // required for proper stackwalking of RuntimeStub frame
2076     __ ret(0);
2077 
2078     return start;
2079   }
2080 
2081 
2082   // Helper for generating a dynamic type check.
2083   // Smashes no registers.
2084   void generate_type_check(Register sub_klass,
2085                            Register super_check_offset,
2086                            Register super_klass,
2087                            Label& L_success) {
2088     assert_different_registers(sub_klass, super_check_offset, super_klass);
2089 
2090     BLOCK_COMMENT("type_check:");
2091 
2092     Label L_miss;
2093 
2094     // a couple of useful fields in sub_klass:
2095     int ss_offset = (klassOopDesc::header_size() * HeapWordSize +
2096                      Klass::secondary_supers_offset_in_bytes());
2097     int sc_offset = (klassOopDesc::header_size() * HeapWordSize +
2098                      Klass::secondary_super_cache_offset_in_bytes());
2099     Address secondary_supers_addr(sub_klass, ss_offset);
2100     Address super_cache_addr(     sub_klass, sc_offset);
2101 
2102     // if the pointers are equal, we are done (e.g., String[] elements)
2103     __ cmpptr(super_klass, sub_klass);
2104     __ jcc(Assembler::equal, L_success);
2105 
2106     // check the supertype display:
2107     Address super_check_addr(sub_klass, super_check_offset, Address::times_1, 0);
2108     __ cmpptr(super_klass, super_check_addr); // test the super type
2109     __ jcc(Assembler::equal, L_success);
2110 
2111     // if it was a primary super, we can just fail immediately
2112     __ cmpl(super_check_offset, sc_offset);
2113     __ jcc(Assembler::notEqual, L_miss);
2114 
2115     // Now do a linear scan of the secondary super-klass chain.
2116     // The repne_scan instruction uses fixed registers, which we must spill.
2117     // (We need a couple more temps in any case.)
2118     // This code is rarely used, so simplicity is a virtue here.
2119     inc_counter_np(SharedRuntime::_partial_subtype_ctr);
2120     {
2121       __ push(rax);
2122       __ push(rcx);
2123       __ push(rdi);
2124       assert_different_registers(sub_klass, super_klass, rax, rcx, rdi);
2125 
2126       __ movptr(rdi, secondary_supers_addr);
2127       // Load the array length.
2128       __ movl(rcx, Address(rdi, arrayOopDesc::length_offset_in_bytes()));
2129       // Skip to start of data.
2130       __ addptr(rdi, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
2131       // Scan rcx words at [rdi] for occurance of rax
2132       // Set NZ/Z based on last compare
2133       __ movptr(rax, super_klass);
2134       if (UseCompressedOops) {
2135         // Compare against compressed form.  Don't need to uncompress because
2136         // looks like orig rax is restored in popq below.
2137         __ encode_heap_oop(rax);
2138         __ repne_scanl();
2139       } else {
2140         __ repne_scan();
2141       }
2142 
2143       // Unspill the temp. registers:
2144       __ pop(rdi);
2145       __ pop(rcx);
2146       __ pop(rax);
2147 
2148       __ jcc(Assembler::notEqual, L_miss);
2149     }
2150 
2151     // Success.  Cache the super we found and proceed in triumph.
2152     __ movptr(super_cache_addr, super_klass); // note: rax is dead
2153     __ jmp(L_success);
2154 
2155     // Fall through on failure!
2156     __ BIND(L_miss);
2157   }
2158 
2159   //
2160   //  Generate checkcasting array copy stub
2161   //
2162   //  Input:
2163   //    c_rarg0   - source array address
2164   //    c_rarg1   - destination array address
2165   //    c_rarg2   - element count, treated as ssize_t, can be zero
2166   //    c_rarg3   - size_t ckoff (super_check_offset)
2167   // not Win64
2168   //    c_rarg4   - oop ckval (super_klass)
2169   // Win64
2170   //    rsp+40    - oop ckval (super_klass)
2171   //
2172   //  Output:


2198     //---------------------------------------------------------------
2199     // Assembler stub will be used for this call to arraycopy
2200     // if the two arrays are subtypes of Object[] but the
2201     // destination array type is not equal to or a supertype
2202     // of the source type.  Each element must be separately
2203     // checked.
2204 
2205     __ align(CodeEntryAlignment);
2206     StubCodeMark mark(this, "StubRoutines", name);
2207     address start = __ pc();
2208 
2209     __ enter(); // required for proper stackwalking of RuntimeStub frame
2210 
2211     checkcast_copy_entry  = __ pc();
2212     BLOCK_COMMENT("Entry:");
2213 
2214 #ifdef ASSERT
2215     // caller guarantees that the arrays really are different
2216     // otherwise, we would have to make conjoint checks
2217     { Label L;
2218       array_overlap_test(L, TIMES_OOP);
2219       __ stop("checkcast_copy within a single array");
2220       __ bind(L);
2221     }
2222 #endif //ASSERT
2223 
2224     // allocate spill slots for r13, r14
2225     enum {
2226       saved_r13_offset,
2227       saved_r14_offset,
2228       saved_rbp_offset,
2229       saved_rip_offset,
2230       saved_rarg0_offset
2231     };
2232     __ subptr(rsp, saved_rbp_offset * wordSize);
2233     __ movptr(Address(rsp, saved_r13_offset * wordSize), r13);
2234     __ movptr(Address(rsp, saved_r14_offset * wordSize), r14);
2235     setup_arg_regs(4); // from => rdi, to => rsi, length => rdx
2236                        // ckoff => rcx, ckval => r8
2237                        // r9 and r10 may be used to save non-volatile registers
2238 #ifdef _WIN64
2239     // last argument (#4) is on stack on Win64
2240     const int ckval_offset = saved_rarg0_offset + 4;
2241     __ movptr(ckval, Address(rsp, ckval_offset * wordSize));
2242 #endif
2243 
2244     // check that int operands are properly extended to size_t
2245     assert_clean_int(length, rax);
2246     assert_clean_int(ckoff, rax);
2247 
2248 #ifdef ASSERT
2249     BLOCK_COMMENT("assert consistent ckoff/ckval");
2250     // The ckoff and ckval must be mutually consistent,
2251     // even though caller generates both.
2252     { Label L;
2253       int sco_offset = (klassOopDesc::header_size() * HeapWordSize +
2254                         Klass::super_check_offset_offset_in_bytes());
2255       __ cmpl(ckoff, Address(ckval, sco_offset));
2256       __ jcc(Assembler::equal, L);
2257       __ stop("super_check_offset inconsistent");
2258       __ bind(L);
2259     }
2260 #endif //ASSERT
2261 
2262     // Loop-invariant addresses.  They are exclusive end pointers.
2263     Address end_from_addr(from, length, TIMES_OOP, 0);
2264     Address   end_to_addr(to,   length, TIMES_OOP, 0);
2265     // Loop-variant addresses.  They assume post-incremented count < 0.
2266     Address from_element_addr(end_from, count, TIMES_OOP, 0);
2267     Address   to_element_addr(end_to,   count, TIMES_OOP, 0);

2268 
2269     gen_write_ref_array_pre_barrier(to, count);
2270 
2271     // Copy from low to high addresses, indexed from the end of each array.
2272     __ lea(end_from, end_from_addr);
2273     __ lea(end_to,   end_to_addr);
2274     __ movptr(r14_length, length);        // save a copy of the length
2275     assert(length == count, "");          // else fix next line:
2276     __ negptr(count);                     // negate and test the length
2277     __ jcc(Assembler::notZero, L_load_element);
2278 
2279     // Empty array:  Nothing to do.
2280     __ xorptr(rax, rax);                  // return 0 on (trivial) success
2281     __ jmp(L_done);
2282 
2283     // ======== begin loop ========
2284     // (Loop is rotated; its entry is L_load_element.)
2285     // Loop control:
2286     //   for (count = -count; count != 0; count++)
2287     // Base pointers src, dst are biased by 8*(count-1),to last element.
2288     __ align(16);
2289 
2290     __ BIND(L_store_element);
2291     __ store_heap_oop(to_element_addr, rax_oop);  // store the oop
2292     __ increment(count);               // increment the count toward zero
2293     __ jcc(Assembler::zero, L_do_card_marks);
2294 
2295     // ======== loop entry is here ========
2296     __ BIND(L_load_element);
2297     __ load_heap_oop(rax_oop, from_element_addr); // load the oop
2298     __ testptr(rax_oop, rax_oop);
2299     __ jcc(Assembler::zero, L_store_element);
2300 
2301     __ load_klass(r11_klass, rax_oop);// query the object klass
2302     generate_type_check(r11_klass, ckoff, ckval, L_store_element);
2303     // ======== end loop ========
2304 
2305     // It was a real error; we must depend on the caller to finish the job.
2306     // Register rdx = -1 * number of *remaining* oops, r14 = *total* oops.
2307     // Emit GC store barriers for the oops we have copied (r14 + rdx),
2308     // and report their number to the caller.
2309     assert_different_registers(rax, r14_length, count, to, end_to, rcx);
2310     __ lea(end_to, to_element_addr);
2311     gen_write_ref_array_post_barrier(to, end_to, rscratch1);
2312     __ movptr(rax, r14_length);           // original oops
2313     __ addptr(rax, count);                // K = (original - remaining) oops
2314     __ notptr(rax);                       // report (-1^K) to caller
2315     __ jmp(L_done);
2316 
2317     // Come here on success only.
2318     __ BIND(L_do_card_marks);
2319     __ addptr(end_to, -wordSize);         // make an inclusive end pointer
2320     gen_write_ref_array_post_barrier(to, end_to, rscratch1);
2321     __ xorptr(rax, rax);                  // return 0 on success
2322 
2323     // Common exit point (success or failure).
2324     __ BIND(L_done);
2325     __ movptr(r13, Address(rsp, saved_r13_offset * wordSize));
2326     __ movptr(r14, Address(rsp, saved_r14_offset * wordSize));
2327     inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr);
2328     restore_arg_regs();
2329     __ leave(); // required for proper stackwalking of RuntimeStub frame
2330     __ ret(0);
2331 
2332     return start;
2333   }
2334 
2335   //
2336   //  Generate 'unsafe' array copy stub
2337   //  Though just as safe as the other stubs, it takes an unscaled
2338   //  size_t argument instead of an element count.
2339   //
2340   //  Input:
2341   //    c_rarg0   - source array address
2342   //    c_rarg1   - destination array address
2343   //    c_rarg2   - byte count, treated as ssize_t, can be zero
2344   //
2345   // Examines the alignment of the operands and dispatches
2346   // to a long, int, short, or byte copy loop.


2349 
2350     Label L_long_aligned, L_int_aligned, L_short_aligned;
2351 
2352     // Input registers (before setup_arg_regs)
2353     const Register from        = c_rarg0;  // source array address
2354     const Register to          = c_rarg1;  // destination array address
2355     const Register size        = c_rarg2;  // byte count (size_t)
2356 
2357     // Register used as a temp
2358     const Register bits        = rax;      // test copy of low bits
2359 
2360     __ align(CodeEntryAlignment);
2361     StubCodeMark mark(this, "StubRoutines", name);
2362     address start = __ pc();
2363 
2364     __ enter(); // required for proper stackwalking of RuntimeStub frame
2365 
2366     // bump this on entry, not on exit:
2367     inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr);
2368 
2369     __ mov(bits, from);
2370     __ orptr(bits, to);
2371     __ orptr(bits, size);
2372 
2373     __ testb(bits, BytesPerLong-1);
2374     __ jccb(Assembler::zero, L_long_aligned);
2375 
2376     __ testb(bits, BytesPerInt-1);
2377     __ jccb(Assembler::zero, L_int_aligned);
2378 
2379     __ testb(bits, BytesPerShort-1);
2380     __ jump_cc(Assembler::notZero, RuntimeAddress(byte_copy_entry));
2381 
2382     __ BIND(L_short_aligned);
2383     __ shrptr(size, LogBytesPerShort); // size => short_count
2384     __ jump(RuntimeAddress(short_copy_entry));
2385 
2386     __ BIND(L_int_aligned);
2387     __ shrptr(size, LogBytesPerInt); // size => int_count
2388     __ jump(RuntimeAddress(int_copy_entry));
2389 
2390     __ BIND(L_long_aligned);
2391     __ shrptr(size, LogBytesPerLong); // size => qword_count
2392     __ jump(RuntimeAddress(long_copy_entry));
2393 
2394     return start;
2395   }
2396 
2397   // Perform range checks on the proposed arraycopy.
2398   // Kills temp, but nothing else.
2399   // Also, clean the sign bits of src_pos and dst_pos.
2400   void arraycopy_range_checks(Register src,     // source array oop (c_rarg0)
2401                               Register src_pos, // source position (c_rarg1)
2402                               Register dst,     // destination array oo (c_rarg2)
2403                               Register dst_pos, // destination position (c_rarg3)
2404                               Register length,
2405                               Register temp,
2406                               Label& L_failed) {
2407     BLOCK_COMMENT("arraycopy_range_checks:");
2408 
2409     //  if (src_pos + length > arrayOop(src)->length())  FAIL;
2410     __ movl(temp, length);
2411     __ addl(temp, src_pos);             // src_pos + length


2479     __ enter(); // required for proper stackwalking of RuntimeStub frame
2480 
2481     // bump this on entry, not on exit:
2482     inc_counter_np(SharedRuntime::_generic_array_copy_ctr);
2483 
2484     //-----------------------------------------------------------------------
2485     // Assembler stub will be used for this call to arraycopy
2486     // if the following conditions are met:
2487     //
2488     // (1) src and dst must not be null.
2489     // (2) src_pos must not be negative.
2490     // (3) dst_pos must not be negative.
2491     // (4) length  must not be negative.
2492     // (5) src klass and dst klass should be the same and not NULL.
2493     // (6) src and dst should be arrays.
2494     // (7) src_pos + length must not exceed length of src.
2495     // (8) dst_pos + length must not exceed length of dst.
2496     //
2497 
2498     //  if (src == NULL) return -1;
2499     __ testptr(src, src);         // src oop
2500     size_t j1off = __ offset();
2501     __ jccb(Assembler::zero, L_failed_0);
2502 
2503     //  if (src_pos < 0) return -1;
2504     __ testl(src_pos, src_pos); // src_pos (32-bits)
2505     __ jccb(Assembler::negative, L_failed_0);
2506 
2507     //  if (dst == NULL) return -1;
2508     __ testptr(dst, dst);         // dst oop
2509     __ jccb(Assembler::zero, L_failed_0);
2510 
2511     //  if (dst_pos < 0) return -1;
2512     __ testl(dst_pos, dst_pos); // dst_pos (32-bits)
2513     size_t j4off = __ offset();
2514     __ jccb(Assembler::negative, L_failed_0);
2515 
2516     // The first four tests are very dense code,
2517     // but not quite dense enough to put four
2518     // jumps in a 16-byte instruction fetch buffer.
2519     // That's good, because some branch predicters
2520     // do not like jumps so close together.
2521     // Make sure of this.
2522     guarantee(((j1off ^ j4off) & ~15) != 0, "I$ line of 1st & 4th jumps");
2523 
2524     // registers used as temp
2525     const Register r11_length    = r11; // elements count to copy
2526     const Register r10_src_klass = r10; // array klass
2527     const Register r9_dst_klass  = r9;  // dest array klass
2528 
2529     //  if (length < 0) return -1;
2530     __ movl(r11_length, C_RARG4);       // length (elements count, 32-bits value)
2531     __ testl(r11_length, r11_length);
2532     __ jccb(Assembler::negative, L_failed_0);
2533 
2534     __ load_klass(r10_src_klass, src);


2535 #ifdef ASSERT
2536     //  assert(src->klass() != NULL);
2537     BLOCK_COMMENT("assert klasses not null");
2538     { Label L1, L2;
2539       __ testptr(r10_src_klass, r10_src_klass);
2540       __ jcc(Assembler::notZero, L2);   // it is broken if klass is NULL
2541       __ bind(L1);
2542       __ stop("broken null klass");
2543       __ bind(L2);
2544       __ load_klass(r9_dst_klass, dst);
2545       __ cmpq(r9_dst_klass, 0);
2546       __ jcc(Assembler::equal, L1);     // this would be broken also
2547       BLOCK_COMMENT("assert done");
2548     }
2549 #endif
2550 
2551     // Load layout helper (32-bits)
2552     //
2553     //  |array_tag|     | header_size | element_type |     |log2_element_size|
2554     // 32        30    24            16              8     2                 0
2555     //
2556     //   array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0
2557     //
2558 
2559     int lh_offset = klassOopDesc::header_size() * HeapWordSize +
2560                     Klass::layout_helper_offset_in_bytes();
2561 
2562     const Register rax_lh = rax;  // layout helper
2563 
2564     __ movl(rax_lh, Address(r10_src_klass, lh_offset));
2565 
2566     // Handle objArrays completely differently...
2567     jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2568     __ cmpl(rax_lh, objArray_lh);
2569     __ jcc(Assembler::equal, L_objArray);
2570 
2571     //  if (src->klass() != dst->klass()) return -1;
2572     __ load_klass(r9_dst_klass, dst);
2573     __ cmpq(r10_src_klass, r9_dst_klass);
2574     __ jcc(Assembler::notEqual, L_failed);
2575 
2576     //  if (!src->is_Array()) return -1;
2577     __ cmpl(rax_lh, Klass::_lh_neutral_value);
2578     __ jcc(Assembler::greaterEqual, L_failed);
2579 
2580     // At this point, it is known to be a typeArray (array_tag 0x3).
2581 #ifdef ASSERT
2582     { Label L;
2583       __ cmpl(rax_lh, (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift));
2584       __ jcc(Assembler::greaterEqual, L);
2585       __ stop("must be a primitive array");
2586       __ bind(L);
2587     }
2588 #endif
2589 
2590     arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length,
2591                            r10, L_failed);
2592 
2593     // typeArrayKlass
2594     //
2595     // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize);
2596     // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize);
2597     //
2598 
2599     const Register r10_offset = r10;    // array offset
2600     const Register rax_elsize = rax_lh; // element size
2601 
2602     __ movl(r10_offset, rax_lh);
2603     __ shrl(r10_offset, Klass::_lh_header_size_shift);
2604     __ andptr(r10_offset, Klass::_lh_header_size_mask);   // array_offset
2605     __ addptr(src, r10_offset);           // src array offset
2606     __ addptr(dst, r10_offset);           // dst array offset
2607     BLOCK_COMMENT("choose copy loop based on element size");
2608     __ andl(rax_lh, Klass::_lh_log2_element_size_mask); // rax_lh -> rax_elsize
2609 
2610     // next registers should be set before the jump to corresponding stub
2611     const Register from     = c_rarg0;  // source array address
2612     const Register to       = c_rarg1;  // destination array address
2613     const Register count    = c_rarg2;  // elements count
2614 
2615     // 'from', 'to', 'count' registers should be set in such order
2616     // since they are the same as 'src', 'src_pos', 'dst'.
2617 
2618   __ BIND(L_copy_bytes);
2619     __ cmpl(rax_elsize, 0);
2620     __ jccb(Assembler::notEqual, L_copy_shorts);
2621     __ lea(from, Address(src, src_pos, Address::times_1, 0));// src_addr
2622     __ lea(to,   Address(dst, dst_pos, Address::times_1, 0));// dst_addr
2623     __ movl2ptr(count, r11_length); // length
2624     __ jump(RuntimeAddress(byte_copy_entry));
2625 
2626   __ BIND(L_copy_shorts);
2627     __ cmpl(rax_elsize, LogBytesPerShort);
2628     __ jccb(Assembler::notEqual, L_copy_ints);
2629     __ lea(from, Address(src, src_pos, Address::times_2, 0));// src_addr
2630     __ lea(to,   Address(dst, dst_pos, Address::times_2, 0));// dst_addr
2631     __ movl2ptr(count, r11_length); // length
2632     __ jump(RuntimeAddress(short_copy_entry));
2633 
2634   __ BIND(L_copy_ints);
2635     __ cmpl(rax_elsize, LogBytesPerInt);
2636     __ jccb(Assembler::notEqual, L_copy_longs);
2637     __ lea(from, Address(src, src_pos, Address::times_4, 0));// src_addr
2638     __ lea(to,   Address(dst, dst_pos, Address::times_4, 0));// dst_addr
2639     __ movl2ptr(count, r11_length); // length
2640     __ jump(RuntimeAddress(int_copy_entry));
2641 
2642   __ BIND(L_copy_longs);
2643 #ifdef ASSERT
2644     { Label L;
2645       __ cmpl(rax_elsize, LogBytesPerLong);
2646       __ jcc(Assembler::equal, L);
2647       __ stop("must be long copy, but elsize is wrong");
2648       __ bind(L);
2649     }
2650 #endif
2651     __ lea(from, Address(src, src_pos, Address::times_8, 0));// src_addr
2652     __ lea(to,   Address(dst, dst_pos, Address::times_8, 0));// dst_addr
2653     __ movl2ptr(count, r11_length); // length
2654     __ jump(RuntimeAddress(long_copy_entry));
2655 
2656     // objArrayKlass
2657   __ BIND(L_objArray);
2658     // live at this point:  r10_src_klass, src[_pos], dst[_pos]
2659 
2660     Label L_plain_copy, L_checkcast_copy;
2661     //  test array classes for subtyping
2662     __ load_klass(r9_dst_klass, dst);
2663     __ cmpq(r10_src_klass, r9_dst_klass); // usual case is exact equality
2664     __ jcc(Assembler::notEqual, L_checkcast_copy);
2665 
2666     // Identically typed arrays can be copied without element-wise checks.
2667     arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length,
2668                            r10, L_failed);
2669 
2670     __ lea(from, Address(src, src_pos, TIMES_OOP,
2671                  arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // src_addr
2672     __ lea(to,   Address(dst, dst_pos, TIMES_OOP,
2673                  arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // dst_addr
2674     __ movl2ptr(count, r11_length); // length
2675   __ BIND(L_plain_copy);
2676     __ jump(RuntimeAddress(oop_copy_entry));
2677 
2678   __ BIND(L_checkcast_copy);
2679     // live at this point:  r10_src_klass, !r11_length
2680     {
2681       // assert(r11_length == C_RARG4); // will reload from here
2682       Register r11_dst_klass = r11;
2683       __ load_klass(r11_dst_klass, dst);
2684 
2685       // Before looking at dst.length, make sure dst is also an objArray.
2686       __ cmpl(Address(r11_dst_klass, lh_offset), objArray_lh);
2687       __ jcc(Assembler::notEqual, L_failed);
2688 
2689       // It is safe to examine both src.length and dst.length.
2690 #ifndef _WIN64
2691       arraycopy_range_checks(src, src_pos, dst, dst_pos, C_RARG4,
2692                              rax, L_failed);
2693 #else
2694       __ movl(r11_length, C_RARG4);     // reload
2695       arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length,
2696                              rax, L_failed);
2697       __ load_klass(r11_dst_klass, dst); // reload
2698 #endif
2699 
2700       // Marshal the base address arguments now, freeing registers.
2701       __ lea(from, Address(src, src_pos, TIMES_OOP,
2702                    arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
2703       __ lea(to,   Address(dst, dst_pos, TIMES_OOP,
2704                    arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
2705       __ movl(count, C_RARG4);          // length (reloaded)
2706       Register sco_temp = c_rarg3;      // this register is free now
2707       assert_different_registers(from, to, count, sco_temp,
2708                                  r11_dst_klass, r10_src_klass);
2709       assert_clean_int(count, sco_temp);
2710 
2711       // Generate the type check.
2712       int sco_offset = (klassOopDesc::header_size() * HeapWordSize +
2713                         Klass::super_check_offset_offset_in_bytes());
2714       __ movl(sco_temp, Address(r11_dst_klass, sco_offset));
2715       assert_clean_int(sco_temp, rax);
2716       generate_type_check(r10_src_klass, sco_temp, r11_dst_klass, L_plain_copy);
2717 
2718       // Fetch destination element klass from the objArrayKlass header.
2719       int ek_offset = (klassOopDesc::header_size() * HeapWordSize +
2720                        objArrayKlass::element_klass_offset_in_bytes());
2721       __ movptr(r11_dst_klass, Address(r11_dst_klass, ek_offset));
2722       __ movl(sco_temp,      Address(r11_dst_klass, sco_offset));
2723       assert_clean_int(sco_temp, rax);
2724 
2725       // the checkcast_copy loop needs two extra arguments:
2726       assert(c_rarg3 == sco_temp, "#3 already in place");
2727       __ movptr(C_RARG4, r11_dst_klass);  // dst.klass.element_klass
2728       __ jump(RuntimeAddress(checkcast_copy_entry));
2729     }
2730 
2731   __ BIND(L_failed);
2732     __ xorptr(rax, rax);
2733     __ notptr(rax); // return -1
2734     __ leave();   // required for proper stackwalking of RuntimeStub frame
2735     __ ret(0);
2736 
2737     return start;
2738   }
2739 
2740 #undef length_arg
2741 
2742   void generate_arraycopy_stubs() {
2743     // Call the conjoint generation methods immediately after
2744     // the disjoint ones so that short branches from the former
2745     // to the latter can be generated.
2746     StubRoutines::_jbyte_disjoint_arraycopy  = generate_disjoint_byte_copy(false, "jbyte_disjoint_arraycopy");
2747     StubRoutines::_jbyte_arraycopy           = generate_conjoint_byte_copy(false, "jbyte_arraycopy");
2748 
2749     StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, "jshort_disjoint_arraycopy");
2750     StubRoutines::_jshort_arraycopy          = generate_conjoint_short_copy(false, "jshort_arraycopy");
2751 
2752     StubRoutines::_jint_disjoint_arraycopy   = generate_disjoint_int_oop_copy(false, false, "jint_disjoint_arraycopy");
2753     StubRoutines::_jint_arraycopy            = generate_conjoint_int_oop_copy(false, false, "jint_arraycopy");
2754 
2755     StubRoutines::_jlong_disjoint_arraycopy  = generate_disjoint_long_oop_copy(false, false, "jlong_disjoint_arraycopy");
2756     StubRoutines::_jlong_arraycopy           = generate_conjoint_long_oop_copy(false, false, "jlong_arraycopy");
2757 
2758 
2759     if (UseCompressedOops) {
2760       StubRoutines::_oop_disjoint_arraycopy  = generate_disjoint_int_oop_copy(false, true, "oop_disjoint_arraycopy");
2761       StubRoutines::_oop_arraycopy           = generate_conjoint_int_oop_copy(false, true, "oop_arraycopy");
2762     } else {
2763       StubRoutines::_oop_disjoint_arraycopy  = generate_disjoint_long_oop_copy(false, true, "oop_disjoint_arraycopy");
2764       StubRoutines::_oop_arraycopy           = generate_conjoint_long_oop_copy(false, true, "oop_arraycopy");
2765     }
2766 
2767     StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy");
2768     StubRoutines::_unsafe_arraycopy    = generate_unsafe_copy("unsafe_arraycopy");
2769     StubRoutines::_generic_arraycopy   = generate_generic_copy("generic_arraycopy");
2770 
2771     // We don't generate specialized code for HeapWord-aligned source
2772     // arrays, so just use the code we've already generated
2773     StubRoutines::_arrayof_jbyte_disjoint_arraycopy  = StubRoutines::_jbyte_disjoint_arraycopy;
2774     StubRoutines::_arrayof_jbyte_arraycopy           = StubRoutines::_jbyte_arraycopy;
2775 
2776     StubRoutines::_arrayof_jshort_disjoint_arraycopy = StubRoutines::_jshort_disjoint_arraycopy;
2777     StubRoutines::_arrayof_jshort_arraycopy          = StubRoutines::_jshort_arraycopy;
2778 
2779     StubRoutines::_arrayof_jint_disjoint_arraycopy   = StubRoutines::_jint_disjoint_arraycopy;
2780     StubRoutines::_arrayof_jint_arraycopy            = StubRoutines::_jint_arraycopy;
2781 
2782     StubRoutines::_arrayof_jlong_disjoint_arraycopy  = StubRoutines::_jlong_disjoint_arraycopy;
2783     StubRoutines::_arrayof_jlong_arraycopy           = StubRoutines::_jlong_arraycopy;
2784 
2785     StubRoutines::_arrayof_oop_disjoint_arraycopy    = StubRoutines::_oop_disjoint_arraycopy;


2816       rbp_off2,
2817       return_off,
2818       return_off2,
2819       framesize // inclusive of return address
2820     };
2821 
2822     int insts_size = 512;
2823     int locs_size  = 64;
2824 
2825     CodeBuffer code(name, insts_size, locs_size);
2826     OopMapSet* oop_maps  = new OopMapSet();
2827     MacroAssembler* masm = new MacroAssembler(&code);
2828 
2829     address start = __ pc();
2830 
2831     // This is an inlined and slightly modified version of call_VM
2832     // which has the ability to fetch the return PC out of
2833     // thread-local storage and also sets up last_Java_sp slightly
2834     // differently than the real call_VM
2835     if (restore_saved_exception_pc) {
2836       __ movptr(rax,
2837                 Address(r15_thread,
2838                         in_bytes(JavaThread::saved_exception_pc_offset())));
2839       __ push(rax);
2840     }
2841 
2842     __ enter(); // required for proper stackwalking of RuntimeStub frame
2843 
2844     assert(is_even(framesize/2), "sp not 16-byte aligned");
2845 
2846     // return address and rbp are already in place
2847     __ subptr(rsp, (framesize-4) << LogBytesPerInt); // prolog
2848 
2849     int frame_complete = __ pc() - start;
2850 
2851     // Set up last_Java_sp and last_Java_fp
2852     __ set_last_Java_frame(rsp, rbp, NULL);
2853 
2854     // Call runtime
2855     __ movptr(c_rarg0, r15_thread);
2856     BLOCK_COMMENT("call runtime_entry");
2857     __ call(RuntimeAddress(runtime_entry));
2858 
2859     // Generate oop map
2860     OopMap* map = new OopMap(framesize, 0);
2861 
2862     oop_maps->add_gc_map(__ pc() - start, map);
2863 
2864     __ reset_last_Java_frame(true, false);
2865 
2866     __ leave(); // required for proper stackwalking of RuntimeStub frame
2867 
2868     // check for pending exceptions
2869 #ifdef ASSERT
2870     Label L;
2871     __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()),
2872             (int32_t) NULL_WORD);
2873     __ jcc(Assembler::notEqual, L);
2874     __ should_not_reach_here();
2875     __ bind(L);
2876 #endif // ASSERT
2877     __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2878 
2879 
2880     // codeBlob framesize is in words (not VMRegImpl::slot_size)
2881     RuntimeStub* stub =
2882       RuntimeStub::new_runtime_stub(name,
2883                                     &code,
2884                                     frame_complete,
2885                                     (framesize >> (LogBytesPerWord - LogBytesPerInt)),
2886                                     oop_maps, false);
2887     return stub->entry_point();
2888   }
2889 
2890   // Initialization
2891   void generate_initial() {
2892     // Generates all stubs and initializes the entry points
2893 
2894     // This platform-specific stub is needed by generate_call_stub()
2895     StubRoutines::x86::_mxcsr_std        = generate_fp_mask("mxcsr_std",        0x0000000000001F80);
2896 
2897     // entry points that exist in all platforms Note: This is code
2898     // that could be shared among different platforms - however the
2899     // benefit seems to be smaller than the disadvantage of having a
2900     // much more complicated generator structure. See also comment in
2901     // stubRoutines.hpp.
2902 
2903     StubRoutines::_forward_exception_entry = generate_forward_exception();
2904 
2905     StubRoutines::_call_stub_entry =
2906       generate_call_stub(StubRoutines::_call_stub_return_address);
2907 
2908     // is referenced by megamorphic call
2909     StubRoutines::_catch_exception_entry = generate_catch_exception();
2910 
2911     // atomic calls
2912     StubRoutines::_atomic_xchg_entry         = generate_atomic_xchg();
2913     StubRoutines::_atomic_xchg_ptr_entry     = generate_atomic_xchg_ptr();
2914     StubRoutines::_atomic_cmpxchg_entry      = generate_atomic_cmpxchg();
2915     StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long();
2916     StubRoutines::_atomic_add_entry          = generate_atomic_add();
2917     StubRoutines::_atomic_add_ptr_entry      = generate_atomic_add_ptr();
2918     StubRoutines::_fence_entry               = generate_orderaccess_fence();
2919 
2920     StubRoutines::_handler_for_unsafe_access_entry =
2921       generate_handler_for_unsafe_access();
2922 
2923     // platform dependent
2924     StubRoutines::x86::_get_previous_fp_entry = generate_get_previous_fp();
2925 
2926     StubRoutines::x86::_verify_mxcsr_entry    = generate_verify_mxcsr();
2927   }
2928 
2929   void generate_all() {
2930     // Generates all stubs and initializes the entry points
2931 
2932     // These entry points require SharedInfo::stack0 to be set up in
2933     // non-core builds and need to be relocatable, so they each
2934     // fabricate a RuntimeStub internally.
2935     StubRoutines::_throw_AbstractMethodError_entry =
2936       generate_throw_exception("AbstractMethodError throw_exception",
2937                                CAST_FROM_FN_PTR(address,
2938                                                 SharedRuntime::
2939                                                 throw_AbstractMethodError),
2940                                false);
2941 
2942     StubRoutines::_throw_IncompatibleClassChangeError_entry =
2943       generate_throw_exception("IncompatibleClassChangeError throw_exception",
2944                                CAST_FROM_FN_PTR(address,
2945                                                 SharedRuntime::
2946                                                 throw_IncompatibleClassChangeError),


2958                                CAST_FROM_FN_PTR(address,
2959                                                 SharedRuntime::
2960                                                 throw_NullPointerException),
2961                                true);
2962 
2963     StubRoutines::_throw_NullPointerException_at_call_entry =
2964       generate_throw_exception("NullPointerException at call throw_exception",
2965                                CAST_FROM_FN_PTR(address,
2966                                                 SharedRuntime::
2967                                                 throw_NullPointerException_at_call),
2968                                false);
2969 
2970     StubRoutines::_throw_StackOverflowError_entry =
2971       generate_throw_exception("StackOverflowError throw_exception",
2972                                CAST_FROM_FN_PTR(address,
2973                                                 SharedRuntime::
2974                                                 throw_StackOverflowError),
2975                                false);
2976 
2977     // entry points that are platform specific
2978     StubRoutines::x86::_f2i_fixup = generate_f2i_fixup();
2979     StubRoutines::x86::_f2l_fixup = generate_f2l_fixup();
2980     StubRoutines::x86::_d2i_fixup = generate_d2i_fixup();
2981     StubRoutines::x86::_d2l_fixup = generate_d2l_fixup();
2982 
2983     StubRoutines::x86::_float_sign_mask  = generate_fp_mask("float_sign_mask",  0x7FFFFFFF7FFFFFFF);
2984     StubRoutines::x86::_float_sign_flip  = generate_fp_mask("float_sign_flip",  0x8000000080000000);
2985     StubRoutines::x86::_double_sign_mask = generate_fp_mask("double_sign_mask", 0x7FFFFFFFFFFFFFFF);
2986     StubRoutines::x86::_double_sign_flip = generate_fp_mask("double_sign_flip", 0x8000000000000000);
2987 
2988     // support for verify_oop (must happen after universe_init)
2989     StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop();
2990 
2991     // arraycopy stubs used by compilers
2992     generate_arraycopy_stubs();
2993   }
2994 
2995  public:
2996   StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) {
2997     if (all) {
2998       generate_all();
2999     } else {
3000       generate_initial();
3001     }
3002   }
3003 }; // end class declaration
3004 
3005 address StubGenerator::disjoint_byte_copy_entry  = NULL;
3006 address StubGenerator::disjoint_short_copy_entry = NULL;