1 /*
   2  * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "incls/_precompiled.incl"
  26 #include "incls/_stubGenerator_sparc.cpp.incl"
  27 
  28 // Declaration and definition of StubGenerator (no .hpp file).
  29 // For a more detailed description of the stub routine structure
  30 // see the comment in stubRoutines.hpp.
  31 
  32 #define __ _masm->
  33 
  34 #ifdef PRODUCT
  35 #define BLOCK_COMMENT(str) /* nothing */
  36 #else
  37 #define BLOCK_COMMENT(str) __ block_comment(str)
  38 #endif
  39 
  40 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
  41 
  42 // Note:  The register L7 is used as L7_thread_cache, and may not be used
  43 //        any other way within this module.
  44 
  45 
  46 static const Register& Lstub_temp = L2;
  47 
  48 // -------------------------------------------------------------------------------------------------------------------------
  49 // Stub Code definitions
  50 
  51 static address handle_unsafe_access() {
  52   JavaThread* thread = JavaThread::current();
  53   address pc  = thread->saved_exception_pc();
  54   address npc = thread->saved_exception_npc();
  55   // pc is the instruction which we must emulate
  56   // doing a no-op is fine:  return garbage from the load
  57 
  58   // request an async exception
  59   thread->set_pending_unsafe_access_error();
  60 
  61   // return address of next instruction to execute
  62   return npc;
  63 }
  64 
  65 class StubGenerator: public StubCodeGenerator {
  66  private:
  67 
  68 #ifdef PRODUCT
  69 #define inc_counter_np(a,b,c) (0)
  70 #else
  71 #define inc_counter_np(counter, t1, t2) \
  72   BLOCK_COMMENT("inc_counter " #counter); \
  73   __ inc_counter(&counter, t1, t2);
  74 #endif
  75 
  76   //----------------------------------------------------------------------------------------------------
  77   // Call stubs are used to call Java from C
  78 
  79   address generate_call_stub(address& return_pc) {
  80     StubCodeMark mark(this, "StubRoutines", "call_stub");
  81     address start = __ pc();
  82 
  83     // Incoming arguments:
  84     //
  85     // o0         : call wrapper address
  86     // o1         : result (address)
  87     // o2         : result type
  88     // o3         : method
  89     // o4         : (interpreter) entry point
  90     // o5         : parameters (address)
  91     // [sp + 0x5c]: parameter size (in words)
  92     // [sp + 0x60]: thread
  93     //
  94     // +---------------+ <--- sp + 0
  95     // |               |
  96     // . reg save area .
  97     // |               |
  98     // +---------------+ <--- sp + 0x40
  99     // |               |
 100     // . extra 7 slots .
 101     // |               |
 102     // +---------------+ <--- sp + 0x5c
 103     // |  param. size  |
 104     // +---------------+ <--- sp + 0x60
 105     // |    thread     |
 106     // +---------------+
 107     // |               |
 108 
 109     // note: if the link argument position changes, adjust
 110     //       the code in frame::entry_frame_call_wrapper()
 111 
 112     const Argument link           = Argument(0, false); // used only for GC
 113     const Argument result         = Argument(1, false);
 114     const Argument result_type    = Argument(2, false);
 115     const Argument method         = Argument(3, false);
 116     const Argument entry_point    = Argument(4, false);
 117     const Argument parameters     = Argument(5, false);
 118     const Argument parameter_size = Argument(6, false);
 119     const Argument thread         = Argument(7, false);
 120 
 121     // setup thread register
 122     __ ld_ptr(thread.as_address(), G2_thread);
 123     __ reinit_heapbase();
 124 
 125 #ifdef ASSERT
 126     // make sure we have no pending exceptions
 127     { const Register t = G3_scratch;
 128       Label L;
 129       __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), t);
 130       __ br_null(t, false, Assembler::pt, L);
 131       __ delayed()->nop();
 132       __ stop("StubRoutines::call_stub: entered with pending exception");
 133       __ bind(L);
 134     }
 135 #endif
 136 
 137     // create activation frame & allocate space for parameters
 138     { const Register t = G3_scratch;
 139       __ ld_ptr(parameter_size.as_address(), t);                // get parameter size (in words)
 140       __ add(t, frame::memory_parameter_word_sp_offset, t);     // add space for save area (in words)
 141       __ round_to(t, WordsPerLong);                             // make sure it is multiple of 2 (in words)
 142       __ sll(t, Interpreter::logStackElementSize, t);           // compute number of bytes
 143       __ neg(t);                                                // negate so it can be used with save
 144       __ save(SP, t, SP);                                       // setup new frame
 145     }
 146 
 147     // +---------------+ <--- sp + 0
 148     // |               |
 149     // . reg save area .
 150     // |               |
 151     // +---------------+ <--- sp + 0x40
 152     // |               |
 153     // . extra 7 slots .
 154     // |               |
 155     // +---------------+ <--- sp + 0x5c
 156     // |  empty slot   |      (only if parameter size is even)
 157     // +---------------+
 158     // |               |
 159     // .  parameters   .
 160     // |               |
 161     // +---------------+ <--- fp + 0
 162     // |               |
 163     // . reg save area .
 164     // |               |
 165     // +---------------+ <--- fp + 0x40
 166     // |               |
 167     // . extra 7 slots .
 168     // |               |
 169     // +---------------+ <--- fp + 0x5c
 170     // |  param. size  |
 171     // +---------------+ <--- fp + 0x60
 172     // |    thread     |
 173     // +---------------+
 174     // |               |
 175 
 176     // pass parameters if any
 177     BLOCK_COMMENT("pass parameters if any");
 178     { const Register src = parameters.as_in().as_register();
 179       const Register dst = Lentry_args;
 180       const Register tmp = G3_scratch;
 181       const Register cnt = G4_scratch;
 182 
 183       // test if any parameters & setup of Lentry_args
 184       Label exit;
 185       __ ld_ptr(parameter_size.as_in().as_address(), cnt);      // parameter counter
 186       __ add( FP, STACK_BIAS, dst );
 187       __ tst(cnt);
 188       __ br(Assembler::zero, false, Assembler::pn, exit);
 189       __ delayed()->sub(dst, BytesPerWord, dst);                 // setup Lentry_args
 190 
 191       // copy parameters if any
 192       Label loop;
 193       __ BIND(loop);
 194       // Store parameter value
 195       __ ld_ptr(src, 0, tmp);
 196       __ add(src, BytesPerWord, src);
 197       __ st_ptr(tmp, dst, 0);
 198       __ deccc(cnt);
 199       __ br(Assembler::greater, false, Assembler::pt, loop);
 200       __ delayed()->sub(dst, Interpreter::stackElementSize, dst);
 201 
 202       // done
 203       __ BIND(exit);
 204     }
 205 
 206     // setup parameters, method & call Java function
 207 #ifdef ASSERT
 208     // layout_activation_impl checks it's notion of saved SP against
 209     // this register, so if this changes update it as well.
 210     const Register saved_SP = Lscratch;
 211     __ mov(SP, saved_SP);                               // keep track of SP before call
 212 #endif
 213 
 214     // setup parameters
 215     const Register t = G3_scratch;
 216     __ ld_ptr(parameter_size.as_in().as_address(), t); // get parameter size (in words)
 217     __ sll(t, Interpreter::logStackElementSize, t);    // compute number of bytes
 218     __ sub(FP, t, Gargs);                              // setup parameter pointer
 219 #ifdef _LP64
 220     __ add( Gargs, STACK_BIAS, Gargs );                // Account for LP64 stack bias
 221 #endif
 222     __ mov(SP, O5_savedSP);
 223 
 224 
 225     // do the call
 226     //
 227     // the following register must be setup:
 228     //
 229     // G2_thread
 230     // G5_method
 231     // Gargs
 232     BLOCK_COMMENT("call Java function");
 233     __ jmpl(entry_point.as_in().as_register(), G0, O7);
 234     __ delayed()->mov(method.as_in().as_register(), G5_method);   // setup method
 235 
 236     BLOCK_COMMENT("call_stub_return_address:");
 237     return_pc = __ pc();
 238 
 239     // The callee, if it wasn't interpreted, can return with SP changed so
 240     // we can no longer assert of change of SP.
 241 
 242     // store result depending on type
 243     // (everything that is not T_OBJECT, T_LONG, T_FLOAT, or T_DOUBLE
 244     //  is treated as T_INT)
 245     { const Register addr = result     .as_in().as_register();
 246       const Register type = result_type.as_in().as_register();
 247       Label is_long, is_float, is_double, is_object, exit;
 248       __            cmp(type, T_OBJECT);  __ br(Assembler::equal, false, Assembler::pn, is_object);
 249       __ delayed()->cmp(type, T_FLOAT);   __ br(Assembler::equal, false, Assembler::pn, is_float);
 250       __ delayed()->cmp(type, T_DOUBLE);  __ br(Assembler::equal, false, Assembler::pn, is_double);
 251       __ delayed()->cmp(type, T_LONG);    __ br(Assembler::equal, false, Assembler::pn, is_long);
 252       __ delayed()->nop();
 253 
 254       // store int result
 255       __ st(O0, addr, G0);
 256 
 257       __ BIND(exit);
 258       __ ret();
 259       __ delayed()->restore();
 260 
 261       __ BIND(is_object);
 262       __ ba(false, exit);
 263       __ delayed()->st_ptr(O0, addr, G0);
 264 
 265       __ BIND(is_float);
 266       __ ba(false, exit);
 267       __ delayed()->stf(FloatRegisterImpl::S, F0, addr, G0);
 268 
 269       __ BIND(is_double);
 270       __ ba(false, exit);
 271       __ delayed()->stf(FloatRegisterImpl::D, F0, addr, G0);
 272 
 273       __ BIND(is_long);
 274 #ifdef _LP64
 275       __ ba(false, exit);
 276       __ delayed()->st_long(O0, addr, G0);      // store entire long
 277 #else
 278 #if defined(COMPILER2)
 279   // All return values are where we want them, except for Longs.  C2 returns
 280   // longs in G1 in the 32-bit build whereas the interpreter wants them in O0/O1.
 281   // Since the interpreter will return longs in G1 and O0/O1 in the 32bit
 282   // build we simply always use G1.
 283   // Note: I tried to make c2 return longs in O0/O1 and G1 so we wouldn't have to
 284   // do this here. Unfortunately if we did a rethrow we'd see an machepilog node
 285   // first which would move g1 -> O0/O1 and destroy the exception we were throwing.
 286 
 287       __ ba(false, exit);
 288       __ delayed()->stx(G1, addr, G0);  // store entire long
 289 #else
 290       __ st(O1, addr, BytesPerInt);
 291       __ ba(false, exit);
 292       __ delayed()->st(O0, addr, G0);
 293 #endif /* COMPILER2 */
 294 #endif /* _LP64 */
 295      }
 296      return start;
 297   }
 298 
 299 
 300   //----------------------------------------------------------------------------------------------------
 301   // Return point for a Java call if there's an exception thrown in Java code.
 302   // The exception is caught and transformed into a pending exception stored in
 303   // JavaThread that can be tested from within the VM.
 304   //
 305   // Oexception: exception oop
 306 
 307   address generate_catch_exception() {
 308     StubCodeMark mark(this, "StubRoutines", "catch_exception");
 309 
 310     address start = __ pc();
 311     // verify that thread corresponds
 312     __ verify_thread();
 313 
 314     const Register& temp_reg = Gtemp;
 315     Address pending_exception_addr    (G2_thread, Thread::pending_exception_offset());
 316     Address exception_file_offset_addr(G2_thread, Thread::exception_file_offset   ());
 317     Address exception_line_offset_addr(G2_thread, Thread::exception_line_offset   ());
 318 
 319     // set pending exception
 320     __ verify_oop(Oexception);
 321     __ st_ptr(Oexception, pending_exception_addr);
 322     __ set((intptr_t)__FILE__, temp_reg);
 323     __ st_ptr(temp_reg, exception_file_offset_addr);
 324     __ set((intptr_t)__LINE__, temp_reg);
 325     __ st(temp_reg, exception_line_offset_addr);
 326 
 327     // complete return to VM
 328     assert(StubRoutines::_call_stub_return_address != NULL, "must have been generated before");
 329 
 330     AddressLiteral stub_ret(StubRoutines::_call_stub_return_address);
 331     __ jump_to(stub_ret, temp_reg);
 332     __ delayed()->nop();
 333 
 334     return start;
 335   }
 336 
 337 
 338   //----------------------------------------------------------------------------------------------------
 339   // Continuation point for runtime calls returning with a pending exception
 340   // The pending exception check happened in the runtime or native call stub
 341   // The pending exception in Thread is converted into a Java-level exception
 342   //
 343   // Contract with Java-level exception handler: O0 = exception
 344   //                                             O1 = throwing pc
 345 
 346   address generate_forward_exception() {
 347     StubCodeMark mark(this, "StubRoutines", "forward_exception");
 348     address start = __ pc();
 349 
 350     // Upon entry, O7 has the return address returning into Java
 351     // (interpreted or compiled) code; i.e. the return address
 352     // becomes the throwing pc.
 353 
 354     const Register& handler_reg = Gtemp;
 355 
 356     Address exception_addr(G2_thread, Thread::pending_exception_offset());
 357 
 358 #ifdef ASSERT
 359     // make sure that this code is only executed if there is a pending exception
 360     { Label L;
 361       __ ld_ptr(exception_addr, Gtemp);
 362       __ br_notnull(Gtemp, false, Assembler::pt, L);
 363       __ delayed()->nop();
 364       __ stop("StubRoutines::forward exception: no pending exception (1)");
 365       __ bind(L);
 366     }
 367 #endif
 368 
 369     // compute exception handler into handler_reg
 370     __ get_thread();
 371     __ ld_ptr(exception_addr, Oexception);
 372     __ verify_oop(Oexception);
 373     __ save_frame(0);             // compensates for compiler weakness
 374     __ add(O7->after_save(), frame::pc_return_offset, Lscratch); // save the issuing PC
 375     BLOCK_COMMENT("call exception_handler_for_return_address");
 376     __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), G2_thread, Lscratch);
 377     __ mov(O0, handler_reg);
 378     __ restore();                 // compensates for compiler weakness
 379 
 380     __ ld_ptr(exception_addr, Oexception);
 381     __ add(O7, frame::pc_return_offset, Oissuing_pc); // save the issuing PC
 382 
 383 #ifdef ASSERT
 384     // make sure exception is set
 385     { Label L;
 386       __ br_notnull(Oexception, false, Assembler::pt, L);
 387       __ delayed()->nop();
 388       __ stop("StubRoutines::forward exception: no pending exception (2)");
 389       __ bind(L);
 390     }
 391 #endif
 392     // jump to exception handler
 393     __ jmp(handler_reg, 0);
 394     // clear pending exception
 395     __ delayed()->st_ptr(G0, exception_addr);
 396 
 397     return start;
 398   }
 399 
 400 
 401   //------------------------------------------------------------------------------------------------------------------------
 402   // Continuation point for throwing of implicit exceptions that are not handled in
 403   // the current activation. Fabricates an exception oop and initiates normal
 404   // exception dispatching in this frame. Only callee-saved registers are preserved
 405   // (through the normal register window / RegisterMap handling).
 406   // If the compiler needs all registers to be preserved between the fault
 407   // point and the exception handler then it must assume responsibility for that in
 408   // AbstractCompiler::continuation_for_implicit_null_exception or
 409   // continuation_for_implicit_division_by_zero_exception. All other implicit
 410   // exceptions (e.g., NullPointerException or AbstractMethodError on entry) are
 411   // either at call sites or otherwise assume that stack unwinding will be initiated,
 412   // so caller saved registers were assumed volatile in the compiler.
 413 
 414   // Note that we generate only this stub into a RuntimeStub, because it needs to be
 415   // properly traversed and ignored during GC, so we change the meaning of the "__"
 416   // macro within this method.
 417 #undef __
 418 #define __ masm->
 419 
 420   address generate_throw_exception(const char* name, address runtime_entry, bool restore_saved_exception_pc) {
 421 #ifdef ASSERT
 422     int insts_size = VerifyThread ? 1 * K : 600;
 423 #else
 424     int insts_size = VerifyThread ? 1 * K : 256;
 425 #endif /* ASSERT */
 426     int locs_size  = 32;
 427 
 428     CodeBuffer      code(name, insts_size, locs_size);
 429     MacroAssembler* masm = new MacroAssembler(&code);
 430 
 431     __ verify_thread();
 432 
 433     // This is an inlined and slightly modified version of call_VM
 434     // which has the ability to fetch the return PC out of thread-local storage
 435     __ assert_not_delayed();
 436 
 437     // Note that we always push a frame because on the SPARC
 438     // architecture, for all of our implicit exception kinds at call
 439     // sites, the implicit exception is taken before the callee frame
 440     // is pushed.
 441     __ save_frame(0);
 442 
 443     int frame_complete = __ offset();
 444 
 445     if (restore_saved_exception_pc) {
 446       __ ld_ptr(G2_thread, JavaThread::saved_exception_pc_offset(), I7);
 447       __ sub(I7, frame::pc_return_offset, I7);
 448     }
 449 
 450     // Note that we always have a runtime stub frame on the top of stack by this point
 451     Register last_java_sp = SP;
 452     // 64-bit last_java_sp is biased!
 453     __ set_last_Java_frame(last_java_sp, G0);
 454     if (VerifyThread)  __ mov(G2_thread, O0); // about to be smashed; pass early
 455     __ save_thread(noreg);
 456     // do the call
 457     BLOCK_COMMENT("call runtime_entry");
 458     __ call(runtime_entry, relocInfo::runtime_call_type);
 459     if (!VerifyThread)
 460       __ delayed()->mov(G2_thread, O0);  // pass thread as first argument
 461     else
 462       __ delayed()->nop();             // (thread already passed)
 463     __ restore_thread(noreg);
 464     __ reset_last_Java_frame();
 465 
 466     // check for pending exceptions. use Gtemp as scratch register.
 467 #ifdef ASSERT
 468     Label L;
 469 
 470     Address exception_addr(G2_thread, Thread::pending_exception_offset());
 471     Register scratch_reg = Gtemp;
 472     __ ld_ptr(exception_addr, scratch_reg);
 473     __ br_notnull(scratch_reg, false, Assembler::pt, L);
 474     __ delayed()->nop();
 475     __ should_not_reach_here();
 476     __ bind(L);
 477 #endif // ASSERT
 478     BLOCK_COMMENT("call forward_exception_entry");
 479     __ call(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type);
 480     // we use O7 linkage so that forward_exception_entry has the issuing PC
 481     __ delayed()->restore();
 482 
 483     RuntimeStub* stub = RuntimeStub::new_runtime_stub(name, &code, frame_complete, masm->total_frame_size_in_bytes(0), NULL, false);
 484     return stub->entry_point();
 485   }
 486 
 487 #undef __
 488 #define __ _masm->
 489 
 490 
 491   // Generate a routine that sets all the registers so we
 492   // can tell if the stop routine prints them correctly.
 493   address generate_test_stop() {
 494     StubCodeMark mark(this, "StubRoutines", "test_stop");
 495     address start = __ pc();
 496 
 497     int i;
 498 
 499     __ save_frame(0);
 500 
 501     static jfloat zero = 0.0, one = 1.0;
 502 
 503     // put addr in L0, then load through L0 to F0
 504     __ set((intptr_t)&zero, L0);  __ ldf( FloatRegisterImpl::S, L0, 0, F0);
 505     __ set((intptr_t)&one,  L0);  __ ldf( FloatRegisterImpl::S, L0, 0, F1); // 1.0 to F1
 506 
 507     // use add to put 2..18 in F2..F18
 508     for ( i = 2;  i <= 18;  ++i ) {
 509       __ fadd( FloatRegisterImpl::S, F1, as_FloatRegister(i-1),  as_FloatRegister(i));
 510     }
 511 
 512     // Now put double 2 in F16, double 18 in F18
 513     __ ftof( FloatRegisterImpl::S, FloatRegisterImpl::D, F2, F16 );
 514     __ ftof( FloatRegisterImpl::S, FloatRegisterImpl::D, F18, F18 );
 515 
 516     // use add to put 20..32 in F20..F32
 517     for (i = 20; i < 32; i += 2) {
 518       __ fadd( FloatRegisterImpl::D, F16, as_FloatRegister(i-2),  as_FloatRegister(i));
 519     }
 520 
 521     // put 0..7 in i's, 8..15 in l's, 16..23 in o's, 24..31 in g's
 522     for ( i = 0; i < 8; ++i ) {
 523       if (i < 6) {
 524         __ set(     i, as_iRegister(i));
 525         __ set(16 + i, as_oRegister(i));
 526         __ set(24 + i, as_gRegister(i));
 527       }
 528       __ set( 8 + i, as_lRegister(i));
 529     }
 530 
 531     __ stop("testing stop");
 532 
 533 
 534     __ ret();
 535     __ delayed()->restore();
 536 
 537     return start;
 538   }
 539 
 540 
 541   address generate_stop_subroutine() {
 542     StubCodeMark mark(this, "StubRoutines", "stop_subroutine");
 543     address start = __ pc();
 544 
 545     __ stop_subroutine();
 546 
 547     return start;
 548   }
 549 
 550   address generate_flush_callers_register_windows() {
 551     StubCodeMark mark(this, "StubRoutines", "flush_callers_register_windows");
 552     address start = __ pc();
 553 
 554     __ flush_windows();
 555     __ retl(false);
 556     __ delayed()->add( FP, STACK_BIAS, O0 );
 557     // The returned value must be a stack pointer whose register save area
 558     // is flushed, and will stay flushed while the caller executes.
 559 
 560     return start;
 561   }
 562 
 563   // Helper functions for v8 atomic operations.
 564   //
 565   void get_v8_oop_lock_ptr(Register lock_ptr_reg, Register mark_oop_reg, Register scratch_reg) {
 566     if (mark_oop_reg == noreg) {
 567       address lock_ptr = (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr();
 568       __ set((intptr_t)lock_ptr, lock_ptr_reg);
 569     } else {
 570       assert(scratch_reg != noreg, "just checking");
 571       address lock_ptr = (address)StubRoutines::Sparc::_v8_oop_lock_cache;
 572       __ set((intptr_t)lock_ptr, lock_ptr_reg);
 573       __ and3(mark_oop_reg, StubRoutines::Sparc::v8_oop_lock_mask_in_place, scratch_reg);
 574       __ add(lock_ptr_reg, scratch_reg, lock_ptr_reg);
 575     }
 576   }
 577 
 578   void generate_v8_lock_prologue(Register lock_reg, Register lock_ptr_reg, Register yield_reg, Label& retry, Label& dontyield, Register mark_oop_reg = noreg, Register scratch_reg = noreg) {
 579 
 580     get_v8_oop_lock_ptr(lock_ptr_reg, mark_oop_reg, scratch_reg);
 581     __ set(StubRoutines::Sparc::locked, lock_reg);
 582     // Initialize yield counter
 583     __ mov(G0,yield_reg);
 584 
 585     __ BIND(retry);
 586     __ cmp(yield_reg, V8AtomicOperationUnderLockSpinCount);
 587     __ br(Assembler::less, false, Assembler::pt, dontyield);
 588     __ delayed()->nop();
 589 
 590     // This code can only be called from inside the VM, this
 591     // stub is only invoked from Atomic::add().  We do not
 592     // want to use call_VM, because _last_java_sp and such
 593     // must already be set.
 594     //
 595     // Save the regs and make space for a C call
 596     __ save(SP, -96, SP);
 597     __ save_all_globals_into_locals();
 598     BLOCK_COMMENT("call os::naked_sleep");
 599     __ call(CAST_FROM_FN_PTR(address, os::naked_sleep));
 600     __ delayed()->nop();
 601     __ restore_globals_from_locals();
 602     __ restore();
 603     // reset the counter
 604     __ mov(G0,yield_reg);
 605 
 606     __ BIND(dontyield);
 607 
 608     // try to get lock
 609     __ swap(lock_ptr_reg, 0, lock_reg);
 610 
 611     // did we get the lock?
 612     __ cmp(lock_reg, StubRoutines::Sparc::unlocked);
 613     __ br(Assembler::notEqual, true, Assembler::pn, retry);
 614     __ delayed()->add(yield_reg,1,yield_reg);
 615 
 616     // yes, got lock. do the operation here.
 617   }
 618 
 619   void generate_v8_lock_epilogue(Register lock_reg, Register lock_ptr_reg, Register yield_reg, Label& retry, Label& dontyield, Register mark_oop_reg = noreg, Register scratch_reg = noreg) {
 620     __ st(lock_reg, lock_ptr_reg, 0); // unlock
 621   }
 622 
 623   // Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest).
 624   //
 625   // Arguments :
 626   //
 627   //      exchange_value: O0
 628   //      dest:           O1
 629   //
 630   // Results:
 631   //
 632   //     O0: the value previously stored in dest
 633   //
 634   address generate_atomic_xchg() {
 635     StubCodeMark mark(this, "StubRoutines", "atomic_xchg");
 636     address start = __ pc();
 637 
 638     if (UseCASForSwap) {
 639       // Use CAS instead of swap, just in case the MP hardware
 640       // prefers to work with just one kind of synch. instruction.
 641       Label retry;
 642       __ BIND(retry);
 643       __ mov(O0, O3);       // scratch copy of exchange value
 644       __ ld(O1, 0, O2);     // observe the previous value
 645       // try to replace O2 with O3
 646       __ cas_under_lock(O1, O2, O3,
 647       (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr(),false);
 648       __ cmp(O2, O3);
 649       __ br(Assembler::notEqual, false, Assembler::pn, retry);
 650       __ delayed()->nop();
 651 
 652       __ retl(false);
 653       __ delayed()->mov(O2, O0);  // report previous value to caller
 654 
 655     } else {
 656       if (VM_Version::v9_instructions_work()) {
 657         __ retl(false);
 658         __ delayed()->swap(O1, 0, O0);
 659       } else {
 660         const Register& lock_reg = O2;
 661         const Register& lock_ptr_reg = O3;
 662         const Register& yield_reg = O4;
 663 
 664         Label retry;
 665         Label dontyield;
 666 
 667         generate_v8_lock_prologue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield);
 668         // got the lock, do the swap
 669         __ swap(O1, 0, O0);
 670 
 671         generate_v8_lock_epilogue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield);
 672         __ retl(false);
 673         __ delayed()->nop();
 674       }
 675     }
 676 
 677     return start;
 678   }
 679 
 680 
 681   // Support for jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value)
 682   //
 683   // Arguments :
 684   //
 685   //      exchange_value: O0
 686   //      dest:           O1
 687   //      compare_value:  O2
 688   //
 689   // Results:
 690   //
 691   //     O0: the value previously stored in dest
 692   //
 693   // Overwrites (v8): O3,O4,O5
 694   //
 695   address generate_atomic_cmpxchg() {
 696     StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg");
 697     address start = __ pc();
 698 
 699     // cmpxchg(dest, compare_value, exchange_value)
 700     __ cas_under_lock(O1, O2, O0,
 701       (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr(),false);
 702     __ retl(false);
 703     __ delayed()->nop();
 704 
 705     return start;
 706   }
 707 
 708   // Support for jlong Atomic::cmpxchg(jlong exchange_value, volatile jlong *dest, jlong compare_value)
 709   //
 710   // Arguments :
 711   //
 712   //      exchange_value: O1:O0
 713   //      dest:           O2
 714   //      compare_value:  O4:O3
 715   //
 716   // Results:
 717   //
 718   //     O1:O0: the value previously stored in dest
 719   //
 720   // This only works on V9, on V8 we don't generate any
 721   // code and just return NULL.
 722   //
 723   // Overwrites: G1,G2,G3
 724   //
 725   address generate_atomic_cmpxchg_long() {
 726     StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long");
 727     address start = __ pc();
 728 
 729     if (!VM_Version::supports_cx8())
 730         return NULL;;
 731     __ sllx(O0, 32, O0);
 732     __ srl(O1, 0, O1);
 733     __ or3(O0,O1,O0);      // O0 holds 64-bit value from compare_value
 734     __ sllx(O3, 32, O3);
 735     __ srl(O4, 0, O4);
 736     __ or3(O3,O4,O3);     // O3 holds 64-bit value from exchange_value
 737     __ casx(O2, O3, O0);
 738     __ srl(O0, 0, O1);    // unpacked return value in O1:O0
 739     __ retl(false);
 740     __ delayed()->srlx(O0, 32, O0);
 741 
 742     return start;
 743   }
 744 
 745 
 746   // Support for jint Atomic::add(jint add_value, volatile jint* dest).
 747   //
 748   // Arguments :
 749   //
 750   //      add_value: O0   (e.g., +1 or -1)
 751   //      dest:      O1
 752   //
 753   // Results:
 754   //
 755   //     O0: the new value stored in dest
 756   //
 757   // Overwrites (v9): O3
 758   // Overwrites (v8): O3,O4,O5
 759   //
 760   address generate_atomic_add() {
 761     StubCodeMark mark(this, "StubRoutines", "atomic_add");
 762     address start = __ pc();
 763     __ BIND(_atomic_add_stub);
 764 
 765     if (VM_Version::v9_instructions_work()) {
 766       Label(retry);
 767       __ BIND(retry);
 768 
 769       __ lduw(O1, 0, O2);
 770       __ add(O0,   O2, O3);
 771       __ cas(O1,   O2, O3);
 772       __ cmp(      O2, O3);
 773       __ br(Assembler::notEqual, false, Assembler::pn, retry);
 774       __ delayed()->nop();
 775       __ retl(false);
 776       __ delayed()->add(O0, O2, O0); // note that cas made O2==O3
 777     } else {
 778       const Register& lock_reg = O2;
 779       const Register& lock_ptr_reg = O3;
 780       const Register& value_reg = O4;
 781       const Register& yield_reg = O5;
 782 
 783       Label(retry);
 784       Label(dontyield);
 785 
 786       generate_v8_lock_prologue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield);
 787       // got lock, do the increment
 788       __ ld(O1, 0, value_reg);
 789       __ add(O0, value_reg, value_reg);
 790       __ st(value_reg, O1, 0);
 791 
 792       // %%% only for RMO and PSO
 793       __ membar(Assembler::StoreStore);
 794 
 795       generate_v8_lock_epilogue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield);
 796 
 797       __ retl(false);
 798       __ delayed()->mov(value_reg, O0);
 799     }
 800 
 801     return start;
 802   }
 803   Label _atomic_add_stub;  // called from other stubs
 804 
 805 
 806   //------------------------------------------------------------------------------------------------------------------------
 807   // The following routine generates a subroutine to throw an asynchronous
 808   // UnknownError when an unsafe access gets a fault that could not be
 809   // reasonably prevented by the programmer.  (Example: SIGBUS/OBJERR.)
 810   //
 811   // Arguments :
 812   //
 813   //      trapping PC:    O7
 814   //
 815   // Results:
 816   //     posts an asynchronous exception, skips the trapping instruction
 817   //
 818 
 819   address generate_handler_for_unsafe_access() {
 820     StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access");
 821     address start = __ pc();
 822 
 823     const int preserve_register_words = (64 * 2);
 824     Address preserve_addr(FP, (-preserve_register_words * wordSize) + STACK_BIAS);
 825 
 826     Register Lthread = L7_thread_cache;
 827     int i;
 828 
 829     __ save_frame(0);
 830     __ mov(G1, L1);
 831     __ mov(G2, L2);
 832     __ mov(G3, L3);
 833     __ mov(G4, L4);
 834     __ mov(G5, L5);
 835     for (i = 0; i < (VM_Version::v9_instructions_work() ? 64 : 32); i += 2) {
 836       __ stf(FloatRegisterImpl::D, as_FloatRegister(i), preserve_addr, i * wordSize);
 837     }
 838 
 839     address entry_point = CAST_FROM_FN_PTR(address, handle_unsafe_access);
 840     BLOCK_COMMENT("call handle_unsafe_access");
 841     __ call(entry_point, relocInfo::runtime_call_type);
 842     __ delayed()->nop();
 843 
 844     __ mov(L1, G1);
 845     __ mov(L2, G2);
 846     __ mov(L3, G3);
 847     __ mov(L4, G4);
 848     __ mov(L5, G5);
 849     for (i = 0; i < (VM_Version::v9_instructions_work() ? 64 : 32); i += 2) {
 850       __ ldf(FloatRegisterImpl::D, preserve_addr, as_FloatRegister(i), i * wordSize);
 851     }
 852 
 853     __ verify_thread();
 854 
 855     __ jmp(O0, 0);
 856     __ delayed()->restore();
 857 
 858     return start;
 859   }
 860 
 861 
 862   // Support for uint StubRoutine::Sparc::partial_subtype_check( Klass sub, Klass super );
 863   // Arguments :
 864   //
 865   //      ret  : O0, returned
 866   //      icc/xcc: set as O0 (depending on wordSize)
 867   //      sub  : O1, argument, not changed
 868   //      super: O2, argument, not changed
 869   //      raddr: O7, blown by call
 870   address generate_partial_subtype_check() {
 871     __ align(CodeEntryAlignment);
 872     StubCodeMark mark(this, "StubRoutines", "partial_subtype_check");
 873     address start = __ pc();
 874     Label miss;
 875 
 876 #if defined(COMPILER2) && !defined(_LP64)
 877     // Do not use a 'save' because it blows the 64-bit O registers.
 878     __ add(SP,-4*wordSize,SP);  // Make space for 4 temps (stack must be 2 words aligned)
 879     __ st_ptr(L0,SP,(frame::register_save_words+0)*wordSize);
 880     __ st_ptr(L1,SP,(frame::register_save_words+1)*wordSize);
 881     __ st_ptr(L2,SP,(frame::register_save_words+2)*wordSize);
 882     __ st_ptr(L3,SP,(frame::register_save_words+3)*wordSize);
 883     Register Rret   = O0;
 884     Register Rsub   = O1;
 885     Register Rsuper = O2;
 886 #else
 887     __ save_frame(0);
 888     Register Rret   = I0;
 889     Register Rsub   = I1;
 890     Register Rsuper = I2;
 891 #endif
 892 
 893     Register L0_ary_len = L0;
 894     Register L1_ary_ptr = L1;
 895     Register L2_super   = L2;
 896     Register L3_index   = L3;
 897 
 898     __ check_klass_subtype_slow_path(Rsub, Rsuper,
 899                                      L0, L1, L2, L3,
 900                                      NULL, &miss);
 901 
 902     // Match falls through here.
 903     __ addcc(G0,0,Rret);        // set Z flags, Z result
 904 
 905 #if defined(COMPILER2) && !defined(_LP64)
 906     __ ld_ptr(SP,(frame::register_save_words+0)*wordSize,L0);
 907     __ ld_ptr(SP,(frame::register_save_words+1)*wordSize,L1);
 908     __ ld_ptr(SP,(frame::register_save_words+2)*wordSize,L2);
 909     __ ld_ptr(SP,(frame::register_save_words+3)*wordSize,L3);
 910     __ retl();                  // Result in Rret is zero; flags set to Z
 911     __ delayed()->add(SP,4*wordSize,SP);
 912 #else
 913     __ ret();                   // Result in Rret is zero; flags set to Z
 914     __ delayed()->restore();
 915 #endif
 916 
 917     __ BIND(miss);
 918     __ addcc(G0,1,Rret);        // set NZ flags, NZ result
 919 
 920 #if defined(COMPILER2) && !defined(_LP64)
 921     __ ld_ptr(SP,(frame::register_save_words+0)*wordSize,L0);
 922     __ ld_ptr(SP,(frame::register_save_words+1)*wordSize,L1);
 923     __ ld_ptr(SP,(frame::register_save_words+2)*wordSize,L2);
 924     __ ld_ptr(SP,(frame::register_save_words+3)*wordSize,L3);
 925     __ retl();                  // Result in Rret is != 0; flags set to NZ
 926     __ delayed()->add(SP,4*wordSize,SP);
 927 #else
 928     __ ret();                   // Result in Rret is != 0; flags set to NZ
 929     __ delayed()->restore();
 930 #endif
 931 
 932     return start;
 933   }
 934 
 935 
 936   // Called from MacroAssembler::verify_oop
 937   //
 938   address generate_verify_oop_subroutine() {
 939     StubCodeMark mark(this, "StubRoutines", "verify_oop_stub");
 940 
 941     address start = __ pc();
 942 
 943     __ verify_oop_subroutine();
 944 
 945     return start;
 946   }
 947 
 948   static address disjoint_byte_copy_entry;
 949   static address disjoint_short_copy_entry;
 950   static address disjoint_int_copy_entry;
 951   static address disjoint_long_copy_entry;
 952   static address disjoint_oop_copy_entry;
 953 
 954   static address byte_copy_entry;
 955   static address short_copy_entry;
 956   static address int_copy_entry;
 957   static address long_copy_entry;
 958   static address oop_copy_entry;
 959 
 960   static address checkcast_copy_entry;
 961 
 962   //
 963   // Verify that a register contains clean 32-bits positive value
 964   // (high 32-bits are 0) so it could be used in 64-bits shifts (sllx, srax).
 965   //
 966   //  Input:
 967   //    Rint  -  32-bits value
 968   //    Rtmp  -  scratch
 969   //
 970   void assert_clean_int(Register Rint, Register Rtmp) {
 971 #if defined(ASSERT) && defined(_LP64)
 972     __ signx(Rint, Rtmp);
 973     __ cmp(Rint, Rtmp);
 974     __ breakpoint_trap(Assembler::notEqual, Assembler::xcc);
 975 #endif
 976   }
 977 
 978   //
 979   //  Generate overlap test for array copy stubs
 980   //
 981   //  Input:
 982   //    O0    -  array1
 983   //    O1    -  array2
 984   //    O2    -  element count
 985   //
 986   //  Kills temps:  O3, O4
 987   //
 988   void array_overlap_test(address no_overlap_target, int log2_elem_size) {
 989     assert(no_overlap_target != NULL, "must be generated");
 990     array_overlap_test(no_overlap_target, NULL, log2_elem_size);
 991   }
 992   void array_overlap_test(Label& L_no_overlap, int log2_elem_size) {
 993     array_overlap_test(NULL, &L_no_overlap, log2_elem_size);
 994   }
 995   void array_overlap_test(address no_overlap_target, Label* NOLp, int log2_elem_size) {
 996     const Register from       = O0;
 997     const Register to         = O1;
 998     const Register count      = O2;
 999     const Register to_from    = O3; // to - from
1000     const Register byte_count = O4; // count << log2_elem_size
1001 
1002       __ subcc(to, from, to_from);
1003       __ sll_ptr(count, log2_elem_size, byte_count);
1004       if (NOLp == NULL)
1005         __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, no_overlap_target);
1006       else
1007         __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, (*NOLp));
1008       __ delayed()->cmp(to_from, byte_count);
1009       if (NOLp == NULL)
1010         __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, no_overlap_target);
1011       else
1012         __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, (*NOLp));
1013       __ delayed()->nop();
1014   }
1015 
1016   //
1017   //  Generate pre-write barrier for array.
1018   //
1019   //  Input:
1020   //     addr     - register containing starting address
1021   //     count    - register containing element count
1022   //     tmp      - scratch register
1023   //
1024   //  The input registers are overwritten.
1025   //
1026   void gen_write_ref_array_pre_barrier(Register addr, Register count) {
1027     BarrierSet* bs = Universe::heap()->barrier_set();
1028     if (bs->has_write_ref_pre_barrier()) {
1029       assert(bs->has_write_ref_array_pre_opt(),
1030              "Else unsupported barrier set.");
1031 
1032       __ save_frame(0);
1033       // Save the necessary global regs... will be used after.
1034       if (addr->is_global()) {
1035         __ mov(addr, L0);
1036       }
1037       if (count->is_global()) {
1038         __ mov(count, L1);
1039       }
1040       __ mov(addr->after_save(), O0);
1041       // Get the count into O1
1042       __ call(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre));
1043       __ delayed()->mov(count->after_save(), O1);
1044       if (addr->is_global()) {
1045         __ mov(L0, addr);
1046       }
1047       if (count->is_global()) {
1048         __ mov(L1, count);
1049       }
1050       __ restore();
1051     }
1052   }
1053   //
1054   //  Generate post-write barrier for array.
1055   //
1056   //  Input:
1057   //     addr     - register containing starting address
1058   //     count    - register containing element count
1059   //     tmp      - scratch register
1060   //
1061   //  The input registers are overwritten.
1062   //
1063   void gen_write_ref_array_post_barrier(Register addr, Register count,
1064                                    Register tmp) {
1065     BarrierSet* bs = Universe::heap()->barrier_set();
1066 
1067     switch (bs->kind()) {
1068       case BarrierSet::G1SATBCT:
1069       case BarrierSet::G1SATBCTLogging:
1070         {
1071           // Get some new fresh output registers.
1072           __ save_frame(0);
1073           __ mov(addr->after_save(), O0);
1074           __ call(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post));
1075           __ delayed()->mov(count->after_save(), O1);
1076           __ restore();
1077         }
1078         break;
1079       case BarrierSet::CardTableModRef:
1080       case BarrierSet::CardTableExtension:
1081         {
1082           CardTableModRefBS* ct = (CardTableModRefBS*)bs;
1083           assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
1084           assert_different_registers(addr, count, tmp);
1085 
1086           Label L_loop;
1087 
1088           __ sll_ptr(count, LogBytesPerHeapOop, count);
1089           __ sub(count, BytesPerHeapOop, count);
1090           __ add(count, addr, count);
1091           // Use two shifts to clear out those low order two bits! (Cannot opt. into 1.)
1092           __ srl_ptr(addr, CardTableModRefBS::card_shift, addr);
1093           __ srl_ptr(count, CardTableModRefBS::card_shift, count);
1094           __ sub(count, addr, count);
1095           AddressLiteral rs(ct->byte_map_base);
1096           __ set(rs, tmp);
1097         __ BIND(L_loop);
1098           __ stb(G0, tmp, addr);
1099           __ subcc(count, 1, count);
1100           __ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop);
1101           __ delayed()->add(addr, 1, addr);
1102         }
1103         break;
1104       case BarrierSet::ModRef:
1105         break;
1106       default:
1107         ShouldNotReachHere();
1108     }
1109   }
1110 
1111 
1112   // Copy big chunks forward with shift
1113   //
1114   // Inputs:
1115   //   from      - source arrays
1116   //   to        - destination array aligned to 8-bytes
1117   //   count     - elements count to copy >= the count equivalent to 16 bytes
1118   //   count_dec - elements count's decrement equivalent to 16 bytes
1119   //   L_copy_bytes - copy exit label
1120   //
1121   void copy_16_bytes_forward_with_shift(Register from, Register to,
1122                      Register count, int count_dec, Label& L_copy_bytes) {
1123     Label L_loop, L_aligned_copy, L_copy_last_bytes;
1124 
1125     // if both arrays have the same alignment mod 8, do 8 bytes aligned copy
1126       __ andcc(from, 7, G1); // misaligned bytes
1127       __ br(Assembler::zero, false, Assembler::pt, L_aligned_copy);
1128       __ delayed()->nop();
1129 
1130     const Register left_shift  = G1; // left  shift bit counter
1131     const Register right_shift = G5; // right shift bit counter
1132 
1133       __ sll(G1, LogBitsPerByte, left_shift);
1134       __ mov(64, right_shift);
1135       __ sub(right_shift, left_shift, right_shift);
1136 
1137     //
1138     // Load 2 aligned 8-bytes chunks and use one from previous iteration
1139     // to form 2 aligned 8-bytes chunks to store.
1140     //
1141       __ deccc(count, count_dec); // Pre-decrement 'count'
1142       __ andn(from, 7, from);     // Align address
1143       __ ldx(from, 0, O3);
1144       __ inc(from, 8);
1145       __ align(OptoLoopAlignment);
1146     __ BIND(L_loop);
1147       __ ldx(from, 0, O4);
1148       __ deccc(count, count_dec); // Can we do next iteration after this one?
1149       __ ldx(from, 8, G4);
1150       __ inc(to, 16);
1151       __ inc(from, 16);
1152       __ sllx(O3, left_shift,  O3);
1153       __ srlx(O4, right_shift, G3);
1154       __ bset(G3, O3);
1155       __ stx(O3, to, -16);
1156       __ sllx(O4, left_shift,  O4);
1157       __ srlx(G4, right_shift, G3);
1158       __ bset(G3, O4);
1159       __ stx(O4, to, -8);
1160       __ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop);
1161       __ delayed()->mov(G4, O3);
1162 
1163       __ inccc(count, count_dec>>1 ); // + 8 bytes
1164       __ brx(Assembler::negative, true, Assembler::pn, L_copy_last_bytes);
1165       __ delayed()->inc(count, count_dec>>1); // restore 'count'
1166 
1167       // copy 8 bytes, part of them already loaded in O3
1168       __ ldx(from, 0, O4);
1169       __ inc(to, 8);
1170       __ inc(from, 8);
1171       __ sllx(O3, left_shift,  O3);
1172       __ srlx(O4, right_shift, G3);
1173       __ bset(O3, G3);
1174       __ stx(G3, to, -8);
1175 
1176     __ BIND(L_copy_last_bytes);
1177       __ srl(right_shift, LogBitsPerByte, right_shift); // misaligned bytes
1178       __ br(Assembler::always, false, Assembler::pt, L_copy_bytes);
1179       __ delayed()->sub(from, right_shift, from);       // restore address
1180 
1181     __ BIND(L_aligned_copy);
1182   }
1183 
1184   // Copy big chunks backward with shift
1185   //
1186   // Inputs:
1187   //   end_from  - source arrays end address
1188   //   end_to    - destination array end address aligned to 8-bytes
1189   //   count     - elements count to copy >= the count equivalent to 16 bytes
1190   //   count_dec - elements count's decrement equivalent to 16 bytes
1191   //   L_aligned_copy - aligned copy exit label
1192   //   L_copy_bytes   - copy exit label
1193   //
1194   void copy_16_bytes_backward_with_shift(Register end_from, Register end_to,
1195                      Register count, int count_dec,
1196                      Label& L_aligned_copy, Label& L_copy_bytes) {
1197     Label L_loop, L_copy_last_bytes;
1198 
1199     // if both arrays have the same alignment mod 8, do 8 bytes aligned copy
1200       __ andcc(end_from, 7, G1); // misaligned bytes
1201       __ br(Assembler::zero, false, Assembler::pt, L_aligned_copy);
1202       __ delayed()->deccc(count, count_dec); // Pre-decrement 'count'
1203 
1204     const Register left_shift  = G1; // left  shift bit counter
1205     const Register right_shift = G5; // right shift bit counter
1206 
1207       __ sll(G1, LogBitsPerByte, left_shift);
1208       __ mov(64, right_shift);
1209       __ sub(right_shift, left_shift, right_shift);
1210 
1211     //
1212     // Load 2 aligned 8-bytes chunks and use one from previous iteration
1213     // to form 2 aligned 8-bytes chunks to store.
1214     //
1215       __ andn(end_from, 7, end_from);     // Align address
1216       __ ldx(end_from, 0, O3);
1217       __ align(OptoLoopAlignment);
1218     __ BIND(L_loop);
1219       __ ldx(end_from, -8, O4);
1220       __ deccc(count, count_dec); // Can we do next iteration after this one?
1221       __ ldx(end_from, -16, G4);
1222       __ dec(end_to, 16);
1223       __ dec(end_from, 16);
1224       __ srlx(O3, right_shift, O3);
1225       __ sllx(O4, left_shift,  G3);
1226       __ bset(G3, O3);
1227       __ stx(O3, end_to, 8);
1228       __ srlx(O4, right_shift, O4);
1229       __ sllx(G4, left_shift,  G3);
1230       __ bset(G3, O4);
1231       __ stx(O4, end_to, 0);
1232       __ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop);
1233       __ delayed()->mov(G4, O3);
1234 
1235       __ inccc(count, count_dec>>1 ); // + 8 bytes
1236       __ brx(Assembler::negative, true, Assembler::pn, L_copy_last_bytes);
1237       __ delayed()->inc(count, count_dec>>1); // restore 'count'
1238 
1239       // copy 8 bytes, part of them already loaded in O3
1240       __ ldx(end_from, -8, O4);
1241       __ dec(end_to, 8);
1242       __ dec(end_from, 8);
1243       __ srlx(O3, right_shift, O3);
1244       __ sllx(O4, left_shift,  G3);
1245       __ bset(O3, G3);
1246       __ stx(G3, end_to, 0);
1247 
1248     __ BIND(L_copy_last_bytes);
1249       __ srl(left_shift, LogBitsPerByte, left_shift);    // misaligned bytes
1250       __ br(Assembler::always, false, Assembler::pt, L_copy_bytes);
1251       __ delayed()->add(end_from, left_shift, end_from); // restore address
1252   }
1253 
1254   //
1255   //  Generate stub for disjoint byte copy.  If "aligned" is true, the
1256   //  "from" and "to" addresses are assumed to be heapword aligned.
1257   //
1258   // Arguments for generated stub:
1259   //      from:  O0
1260   //      to:    O1
1261   //      count: O2 treated as signed
1262   //
1263   address generate_disjoint_byte_copy(bool aligned, const char * name) {
1264     __ align(CodeEntryAlignment);
1265     StubCodeMark mark(this, "StubRoutines", name);
1266     address start = __ pc();
1267 
1268     Label L_skip_alignment, L_align;
1269     Label L_copy_byte, L_copy_byte_loop, L_exit;
1270 
1271     const Register from      = O0;   // source array address
1272     const Register to        = O1;   // destination array address
1273     const Register count     = O2;   // elements count
1274     const Register offset    = O5;   // offset from start of arrays
1275     // O3, O4, G3, G4 are used as temp registers
1276 
1277     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
1278 
1279     if (!aligned)  disjoint_byte_copy_entry = __ pc();
1280     // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1281     if (!aligned)  BLOCK_COMMENT("Entry:");
1282 
1283     // for short arrays, just do single element copy
1284     __ cmp(count, 23); // 16 + 7
1285     __ brx(Assembler::less, false, Assembler::pn, L_copy_byte);
1286     __ delayed()->mov(G0, offset);
1287 
1288     if (aligned) {
1289       // 'aligned' == true when it is known statically during compilation
1290       // of this arraycopy call site that both 'from' and 'to' addresses
1291       // are HeapWordSize aligned (see LibraryCallKit::basictype2arraycopy()).
1292       //
1293       // Aligned arrays have 4 bytes alignment in 32-bits VM
1294       // and 8 bytes - in 64-bits VM. So we do it only for 32-bits VM
1295       //
1296 #ifndef _LP64
1297       // copy a 4-bytes word if necessary to align 'to' to 8 bytes
1298       __ andcc(to, 7, G0);
1299       __ br(Assembler::zero, false, Assembler::pn, L_skip_alignment);
1300       __ delayed()->ld(from, 0, O3);
1301       __ inc(from, 4);
1302       __ inc(to, 4);
1303       __ dec(count, 4);
1304       __ st(O3, to, -4);
1305     __ BIND(L_skip_alignment);
1306 #endif
1307     } else {
1308       // copy bytes to align 'to' on 8 byte boundary
1309       __ andcc(to, 7, G1); // misaligned bytes
1310       __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
1311       __ delayed()->neg(G1);
1312       __ inc(G1, 8);       // bytes need to copy to next 8-bytes alignment
1313       __ sub(count, G1, count);
1314     __ BIND(L_align);
1315       __ ldub(from, 0, O3);
1316       __ deccc(G1);
1317       __ inc(from);
1318       __ stb(O3, to, 0);
1319       __ br(Assembler::notZero, false, Assembler::pt, L_align);
1320       __ delayed()->inc(to);
1321     __ BIND(L_skip_alignment);
1322     }
1323 #ifdef _LP64
1324     if (!aligned)
1325 #endif
1326     {
1327       // Copy with shift 16 bytes per iteration if arrays do not have
1328       // the same alignment mod 8, otherwise fall through to the next
1329       // code for aligned copy.
1330       // The compare above (count >= 23) guarantes 'count' >= 16 bytes.
1331       // Also jump over aligned copy after the copy with shift completed.
1332 
1333       copy_16_bytes_forward_with_shift(from, to, count, 16, L_copy_byte);
1334     }
1335 
1336     // Both array are 8 bytes aligned, copy 16 bytes at a time
1337       __ and3(count, 7, G4); // Save count
1338       __ srl(count, 3, count);
1339      generate_disjoint_long_copy_core(aligned);
1340       __ mov(G4, count);     // Restore count
1341 
1342     // copy tailing bytes
1343     __ BIND(L_copy_byte);
1344       __ br_zero(Assembler::zero, false, Assembler::pt, count, L_exit);
1345       __ delayed()->nop();
1346       __ align(OptoLoopAlignment);
1347     __ BIND(L_copy_byte_loop);
1348       __ ldub(from, offset, O3);
1349       __ deccc(count);
1350       __ stb(O3, to, offset);
1351       __ brx(Assembler::notZero, false, Assembler::pt, L_copy_byte_loop);
1352       __ delayed()->inc(offset);
1353 
1354     __ BIND(L_exit);
1355       // O3, O4 are used as temp registers
1356       inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr, O3, O4);
1357       __ retl();
1358       __ delayed()->mov(G0, O0); // return 0
1359     return start;
1360   }
1361 
1362   //
1363   //  Generate stub for conjoint byte copy.  If "aligned" is true, the
1364   //  "from" and "to" addresses are assumed to be heapword aligned.
1365   //
1366   // Arguments for generated stub:
1367   //      from:  O0
1368   //      to:    O1
1369   //      count: O2 treated as signed
1370   //
1371   address generate_conjoint_byte_copy(bool aligned, const char * name) {
1372     // Do reverse copy.
1373 
1374     __ align(CodeEntryAlignment);
1375     StubCodeMark mark(this, "StubRoutines", name);
1376     address start = __ pc();
1377     address nooverlap_target = aligned ?
1378         StubRoutines::arrayof_jbyte_disjoint_arraycopy() :
1379         disjoint_byte_copy_entry;
1380 
1381     Label L_skip_alignment, L_align, L_aligned_copy;
1382     Label L_copy_byte, L_copy_byte_loop, L_exit;
1383 
1384     const Register from      = O0;   // source array address
1385     const Register to        = O1;   // destination array address
1386     const Register count     = O2;   // elements count
1387     const Register end_from  = from; // source array end address
1388     const Register end_to    = to;   // destination array end address
1389 
1390     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
1391 
1392     if (!aligned)  byte_copy_entry = __ pc();
1393     // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1394     if (!aligned)  BLOCK_COMMENT("Entry:");
1395 
1396     array_overlap_test(nooverlap_target, 0);
1397 
1398     __ add(to, count, end_to);       // offset after last copied element
1399 
1400     // for short arrays, just do single element copy
1401     __ cmp(count, 23); // 16 + 7
1402     __ brx(Assembler::less, false, Assembler::pn, L_copy_byte);
1403     __ delayed()->add(from, count, end_from);
1404 
1405     {
1406       // Align end of arrays since they could be not aligned even
1407       // when arrays itself are aligned.
1408 
1409       // copy bytes to align 'end_to' on 8 byte boundary
1410       __ andcc(end_to, 7, G1); // misaligned bytes
1411       __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
1412       __ delayed()->nop();
1413       __ sub(count, G1, count);
1414     __ BIND(L_align);
1415       __ dec(end_from);
1416       __ dec(end_to);
1417       __ ldub(end_from, 0, O3);
1418       __ deccc(G1);
1419       __ brx(Assembler::notZero, false, Assembler::pt, L_align);
1420       __ delayed()->stb(O3, end_to, 0);
1421     __ BIND(L_skip_alignment);
1422     }
1423 #ifdef _LP64
1424     if (aligned) {
1425       // Both arrays are aligned to 8-bytes in 64-bits VM.
1426       // The 'count' is decremented in copy_16_bytes_backward_with_shift()
1427       // in unaligned case.
1428       __ dec(count, 16);
1429     } else
1430 #endif
1431     {
1432       // Copy with shift 16 bytes per iteration if arrays do not have
1433       // the same alignment mod 8, otherwise jump to the next
1434       // code for aligned copy (and substracting 16 from 'count' before jump).
1435       // The compare above (count >= 11) guarantes 'count' >= 16 bytes.
1436       // Also jump over aligned copy after the copy with shift completed.
1437 
1438       copy_16_bytes_backward_with_shift(end_from, end_to, count, 16,
1439                                         L_aligned_copy, L_copy_byte);
1440     }
1441     // copy 4 elements (16 bytes) at a time
1442       __ align(OptoLoopAlignment);
1443     __ BIND(L_aligned_copy);
1444       __ dec(end_from, 16);
1445       __ ldx(end_from, 8, O3);
1446       __ ldx(end_from, 0, O4);
1447       __ dec(end_to, 16);
1448       __ deccc(count, 16);
1449       __ stx(O3, end_to, 8);
1450       __ brx(Assembler::greaterEqual, false, Assembler::pt, L_aligned_copy);
1451       __ delayed()->stx(O4, end_to, 0);
1452       __ inc(count, 16);
1453 
1454     // copy 1 element (2 bytes) at a time
1455     __ BIND(L_copy_byte);
1456       __ br_zero(Assembler::zero, false, Assembler::pt, count, L_exit);
1457       __ delayed()->nop();
1458       __ align(OptoLoopAlignment);
1459     __ BIND(L_copy_byte_loop);
1460       __ dec(end_from);
1461       __ dec(end_to);
1462       __ ldub(end_from, 0, O4);
1463       __ deccc(count);
1464       __ brx(Assembler::greater, false, Assembler::pt, L_copy_byte_loop);
1465       __ delayed()->stb(O4, end_to, 0);
1466 
1467     __ BIND(L_exit);
1468     // O3, O4 are used as temp registers
1469     inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr, O3, O4);
1470     __ retl();
1471     __ delayed()->mov(G0, O0); // return 0
1472     return start;
1473   }
1474 
1475   //
1476   //  Generate stub for disjoint short copy.  If "aligned" is true, the
1477   //  "from" and "to" addresses are assumed to be heapword aligned.
1478   //
1479   // Arguments for generated stub:
1480   //      from:  O0
1481   //      to:    O1
1482   //      count: O2 treated as signed
1483   //
1484   address generate_disjoint_short_copy(bool aligned, const char * name) {
1485     __ align(CodeEntryAlignment);
1486     StubCodeMark mark(this, "StubRoutines", name);
1487     address start = __ pc();
1488 
1489     Label L_skip_alignment, L_skip_alignment2;
1490     Label L_copy_2_bytes, L_copy_2_bytes_loop, L_exit;
1491 
1492     const Register from      = O0;   // source array address
1493     const Register to        = O1;   // destination array address
1494     const Register count     = O2;   // elements count
1495     const Register offset    = O5;   // offset from start of arrays
1496     // O3, O4, G3, G4 are used as temp registers
1497 
1498     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
1499 
1500     if (!aligned)  disjoint_short_copy_entry = __ pc();
1501     // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1502     if (!aligned)  BLOCK_COMMENT("Entry:");
1503 
1504     // for short arrays, just do single element copy
1505     __ cmp(count, 11); // 8 + 3  (22 bytes)
1506     __ brx(Assembler::less, false, Assembler::pn, L_copy_2_bytes);
1507     __ delayed()->mov(G0, offset);
1508 
1509     if (aligned) {
1510       // 'aligned' == true when it is known statically during compilation
1511       // of this arraycopy call site that both 'from' and 'to' addresses
1512       // are HeapWordSize aligned (see LibraryCallKit::basictype2arraycopy()).
1513       //
1514       // Aligned arrays have 4 bytes alignment in 32-bits VM
1515       // and 8 bytes - in 64-bits VM.
1516       //
1517 #ifndef _LP64
1518       // copy a 2-elements word if necessary to align 'to' to 8 bytes
1519       __ andcc(to, 7, G0);
1520       __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
1521       __ delayed()->ld(from, 0, O3);
1522       __ inc(from, 4);
1523       __ inc(to, 4);
1524       __ dec(count, 2);
1525       __ st(O3, to, -4);
1526     __ BIND(L_skip_alignment);
1527 #endif
1528     } else {
1529       // copy 1 element if necessary to align 'to' on an 4 bytes
1530       __ andcc(to, 3, G0);
1531       __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
1532       __ delayed()->lduh(from, 0, O3);
1533       __ inc(from, 2);
1534       __ inc(to, 2);
1535       __ dec(count);
1536       __ sth(O3, to, -2);
1537     __ BIND(L_skip_alignment);
1538 
1539       // copy 2 elements to align 'to' on an 8 byte boundary
1540       __ andcc(to, 7, G0);
1541       __ br(Assembler::zero, false, Assembler::pn, L_skip_alignment2);
1542       __ delayed()->lduh(from, 0, O3);
1543       __ dec(count, 2);
1544       __ lduh(from, 2, O4);
1545       __ inc(from, 4);
1546       __ inc(to, 4);
1547       __ sth(O3, to, -4);
1548       __ sth(O4, to, -2);
1549     __ BIND(L_skip_alignment2);
1550     }
1551 #ifdef _LP64
1552     if (!aligned)
1553 #endif
1554     {
1555       // Copy with shift 16 bytes per iteration if arrays do not have
1556       // the same alignment mod 8, otherwise fall through to the next
1557       // code for aligned copy.
1558       // The compare above (count >= 11) guarantes 'count' >= 16 bytes.
1559       // Also jump over aligned copy after the copy with shift completed.
1560 
1561       copy_16_bytes_forward_with_shift(from, to, count, 8, L_copy_2_bytes);
1562     }
1563 
1564     // Both array are 8 bytes aligned, copy 16 bytes at a time
1565       __ and3(count, 3, G4); // Save
1566       __ srl(count, 2, count);
1567      generate_disjoint_long_copy_core(aligned);
1568       __ mov(G4, count); // restore
1569 
1570     // copy 1 element at a time
1571     __ BIND(L_copy_2_bytes);
1572       __ br_zero(Assembler::zero, false, Assembler::pt, count, L_exit);
1573       __ delayed()->nop();
1574       __ align(OptoLoopAlignment);
1575     __ BIND(L_copy_2_bytes_loop);
1576       __ lduh(from, offset, O3);
1577       __ deccc(count);
1578       __ sth(O3, to, offset);
1579       __ brx(Assembler::notZero, false, Assembler::pt, L_copy_2_bytes_loop);
1580       __ delayed()->inc(offset, 2);
1581 
1582     __ BIND(L_exit);
1583       // O3, O4 are used as temp registers
1584       inc_counter_np(SharedRuntime::_jshort_array_copy_ctr, O3, O4);
1585       __ retl();
1586       __ delayed()->mov(G0, O0); // return 0
1587     return start;
1588   }
1589 
1590   //
1591   //  Generate stub for disjoint short fill.  If "aligned" is true, the
1592   //  "to" address is assumed to be heapword aligned.
1593   //
1594   // Arguments for generated stub:
1595   //      to:    O0
1596   //      value: O1
1597   //      count: O2 treated as signed
1598   //
1599   address generate_fill(BasicType t, bool aligned, const char* name) {
1600     __ align(CodeEntryAlignment);
1601     StubCodeMark mark(this, "StubRoutines", name);
1602     address start = __ pc();
1603 
1604     const Register to        = O0;   // source array address
1605     const Register value     = O1;   // fill value
1606     const Register count     = O2;   // elements count
1607     // O3 is used as a temp register
1608 
1609     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
1610 
1611     Label L_exit, L_skip_align1, L_skip_align2, L_fill_byte;
1612     Label L_fill_2_bytes, L_fill_elements, L_fill_32_bytes;
1613 
1614     int shift = -1;
1615     switch (t) {
1616        case T_BYTE:
1617         shift = 2;
1618         break;
1619        case T_SHORT:
1620         shift = 1;
1621         break;
1622       case T_INT:
1623          shift = 0;
1624         break;
1625       default: ShouldNotReachHere();
1626     }
1627 
1628     BLOCK_COMMENT("Entry:");
1629 
1630     if (t == T_BYTE) {
1631       // Zero extend value
1632       __ and3(value, 0xff, value);
1633       __ sllx(value, 8, O3);
1634       __ or3(value, O3, value);
1635     }
1636     if (t == T_SHORT) {
1637       // Zero extend value
1638       __ sllx(value, 48, value);
1639       __ srlx(value, 48, value);
1640     }
1641     if (t == T_BYTE || t == T_SHORT) {
1642       __ sllx(value, 16, O3);
1643       __ or3(value, O3, value);
1644     }
1645 
1646     __ cmp(count, 2<<shift); // Short arrays (< 8 bytes) fill by element
1647     __ brx(Assembler::lessUnsigned, false, Assembler::pn, L_fill_elements); // use unsigned cmp
1648     __ delayed()->andcc(count, 1, G0);
1649 
1650     if (!aligned && (t == T_BYTE || t == T_SHORT)) {
1651       // align source address at 4 bytes address boundary
1652       if (t == T_BYTE) {
1653         // One byte misalignment happens only for byte arrays
1654         __ andcc(to, 1, G0);
1655         __ br(Assembler::zero, false, Assembler::pt, L_skip_align1);
1656         __ delayed()->nop();
1657         __ stb(value, to, 0);
1658         __ inc(to, 1);
1659         __ dec(count, 1);
1660         __ BIND(L_skip_align1);
1661       }
1662       // Two bytes misalignment happens only for byte and short (char) arrays
1663       __ andcc(to, 2, G0);
1664       __ br(Assembler::zero, false, Assembler::pt, L_skip_align2);
1665       __ delayed()->nop();
1666       __ sth(value, to, 0);
1667       __ inc(to, 2);
1668       __ dec(count, 1 << (shift - 1));
1669       __ BIND(L_skip_align2);
1670     }
1671 #ifdef _LP64
1672     if (!aligned) {
1673 #endif
1674     // align to 8 bytes, we know we are 4 byte aligned to start
1675     __ andcc(to, 7, G0);
1676     __ br(Assembler::zero, false, Assembler::pt, L_fill_32_bytes);
1677     __ delayed()->nop();
1678     __ stw(value, to, 0);
1679     __ inc(to, 4);
1680     __ dec(count, 1 << shift);
1681     __ BIND(L_fill_32_bytes);
1682 #ifdef _LP64
1683     }
1684 #endif
1685 
1686     if (t == T_INT) {
1687       // Zero extend value
1688       __ srl(value, 0, value);
1689     }
1690     if (t == T_BYTE || t == T_SHORT || t == T_INT) {
1691       __ sllx(value, 32, O3);
1692       __ or3(value, O3, value);
1693     }
1694 
1695     Label L_check_fill_8_bytes;
1696     // Fill 32-byte chunks
1697     __ subcc(count, 8 << shift, count);
1698     __ brx(Assembler::less, false, Assembler::pt, L_check_fill_8_bytes);
1699     __ delayed()->nop();
1700 
1701     Label L_fill_32_bytes_loop, L_fill_4_bytes;
1702     __ align(16);
1703     __ BIND(L_fill_32_bytes_loop);
1704 
1705     __ stx(value, to, 0);
1706     __ stx(value, to, 8);
1707     __ stx(value, to, 16);
1708     __ stx(value, to, 24);
1709 
1710     __ subcc(count, 8 << shift, count);
1711     __ brx(Assembler::greaterEqual, false, Assembler::pt, L_fill_32_bytes_loop);
1712     __ delayed()->add(to, 32, to);
1713 
1714     __ BIND(L_check_fill_8_bytes);
1715     __ addcc(count, 8 << shift, count);
1716     __ brx(Assembler::zero, false, Assembler::pn, L_exit);
1717     __ delayed()->subcc(count, 1 << (shift + 1), count);
1718     __ brx(Assembler::less, false, Assembler::pn, L_fill_4_bytes);
1719     __ delayed()->andcc(count, 1<<shift, G0);
1720 
1721     //
1722     // length is too short, just fill 8 bytes at a time
1723     //
1724     Label L_fill_8_bytes_loop;
1725     __ BIND(L_fill_8_bytes_loop);
1726     __ stx(value, to, 0);
1727     __ subcc(count, 1 << (shift + 1), count);
1728     __ brx(Assembler::greaterEqual, false, Assembler::pn, L_fill_8_bytes_loop);
1729     __ delayed()->add(to, 8, to);
1730 
1731     // fill trailing 4 bytes
1732     __ andcc(count, 1<<shift, G0);  // in delay slot of branches
1733     if (t == T_INT) {
1734       __ BIND(L_fill_elements);
1735     }
1736     __ BIND(L_fill_4_bytes);
1737     __ brx(Assembler::zero, false, Assembler::pt, L_fill_2_bytes);
1738     if (t == T_BYTE || t == T_SHORT) {
1739       __ delayed()->andcc(count, 1<<(shift-1), G0);
1740     } else {
1741       __ delayed()->nop();
1742     }
1743     __ stw(value, to, 0);
1744     if (t == T_BYTE || t == T_SHORT) {
1745       __ inc(to, 4);
1746       // fill trailing 2 bytes
1747       __ andcc(count, 1<<(shift-1), G0); // in delay slot of branches
1748       __ BIND(L_fill_2_bytes);
1749       __ brx(Assembler::zero, false, Assembler::pt, L_fill_byte);
1750       __ delayed()->andcc(count, 1, count);
1751       __ sth(value, to, 0);
1752       if (t == T_BYTE) {
1753         __ inc(to, 2);
1754         // fill trailing byte
1755         __ andcc(count, 1, count);  // in delay slot of branches
1756         __ BIND(L_fill_byte);
1757         __ brx(Assembler::zero, false, Assembler::pt, L_exit);
1758         __ delayed()->nop();
1759         __ stb(value, to, 0);
1760       } else {
1761         __ BIND(L_fill_byte);
1762       }
1763     } else {
1764       __ BIND(L_fill_2_bytes);
1765     }
1766     __ BIND(L_exit);
1767     __ retl();
1768     __ delayed()->nop();
1769 
1770     // Handle copies less than 8 bytes.  Int is handled elsewhere.
1771     if (t == T_BYTE) {
1772       __ BIND(L_fill_elements);
1773       Label L_fill_2, L_fill_4;
1774       // in delay slot __ andcc(count, 1, G0);
1775       __ brx(Assembler::zero, false, Assembler::pt, L_fill_2);
1776       __ delayed()->andcc(count, 2, G0);
1777       __ stb(value, to, 0);
1778       __ inc(to, 1);
1779       __ BIND(L_fill_2);
1780       __ brx(Assembler::zero, false, Assembler::pt, L_fill_4);
1781       __ delayed()->andcc(count, 4, G0);
1782       __ stb(value, to, 0);
1783       __ stb(value, to, 1);
1784       __ inc(to, 2);
1785       __ BIND(L_fill_4);
1786       __ brx(Assembler::zero, false, Assembler::pt, L_exit);
1787       __ delayed()->nop();
1788       __ stb(value, to, 0);
1789       __ stb(value, to, 1);
1790       __ stb(value, to, 2);
1791       __ retl();
1792       __ delayed()->stb(value, to, 3);
1793     }
1794 
1795     if (t == T_SHORT) {
1796       Label L_fill_2;
1797       __ BIND(L_fill_elements);
1798       // in delay slot __ andcc(count, 1, G0);
1799       __ brx(Assembler::zero, false, Assembler::pt, L_fill_2);
1800       __ delayed()->andcc(count, 2, G0);
1801       __ sth(value, to, 0);
1802       __ inc(to, 2);
1803       __ BIND(L_fill_2);
1804       __ brx(Assembler::zero, false, Assembler::pt, L_exit);
1805       __ delayed()->nop();
1806       __ sth(value, to, 0);
1807       __ retl();
1808       __ delayed()->sth(value, to, 2);
1809     }
1810     return start;
1811   }
1812 
1813   //
1814   //  Generate stub for conjoint short copy.  If "aligned" is true, the
1815   //  "from" and "to" addresses are assumed to be heapword aligned.
1816   //
1817   // Arguments for generated stub:
1818   //      from:  O0
1819   //      to:    O1
1820   //      count: O2 treated as signed
1821   //
1822   address generate_conjoint_short_copy(bool aligned, const char * name) {
1823     // Do reverse copy.
1824 
1825     __ align(CodeEntryAlignment);
1826     StubCodeMark mark(this, "StubRoutines", name);
1827     address start = __ pc();
1828     address nooverlap_target = aligned ?
1829         StubRoutines::arrayof_jshort_disjoint_arraycopy() :
1830         disjoint_short_copy_entry;
1831 
1832     Label L_skip_alignment, L_skip_alignment2, L_aligned_copy;
1833     Label L_copy_2_bytes, L_copy_2_bytes_loop, L_exit;
1834 
1835     const Register from      = O0;   // source array address
1836     const Register to        = O1;   // destination array address
1837     const Register count     = O2;   // elements count
1838     const Register end_from  = from; // source array end address
1839     const Register end_to    = to;   // destination array end address
1840 
1841     const Register byte_count = O3;  // bytes count to copy
1842 
1843     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
1844 
1845     if (!aligned)  short_copy_entry = __ pc();
1846     // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1847     if (!aligned)  BLOCK_COMMENT("Entry:");
1848 
1849     array_overlap_test(nooverlap_target, 1);
1850 
1851     __ sllx(count, LogBytesPerShort, byte_count);
1852     __ add(to, byte_count, end_to);  // offset after last copied element
1853 
1854     // for short arrays, just do single element copy
1855     __ cmp(count, 11); // 8 + 3  (22 bytes)
1856     __ brx(Assembler::less, false, Assembler::pn, L_copy_2_bytes);
1857     __ delayed()->add(from, byte_count, end_from);
1858 
1859     {
1860       // Align end of arrays since they could be not aligned even
1861       // when arrays itself are aligned.
1862 
1863       // copy 1 element if necessary to align 'end_to' on an 4 bytes
1864       __ andcc(end_to, 3, G0);
1865       __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
1866       __ delayed()->lduh(end_from, -2, O3);
1867       __ dec(end_from, 2);
1868       __ dec(end_to, 2);
1869       __ dec(count);
1870       __ sth(O3, end_to, 0);
1871     __ BIND(L_skip_alignment);
1872 
1873       // copy 2 elements to align 'end_to' on an 8 byte boundary
1874       __ andcc(end_to, 7, G0);
1875       __ br(Assembler::zero, false, Assembler::pn, L_skip_alignment2);
1876       __ delayed()->lduh(end_from, -2, O3);
1877       __ dec(count, 2);
1878       __ lduh(end_from, -4, O4);
1879       __ dec(end_from, 4);
1880       __ dec(end_to, 4);
1881       __ sth(O3, end_to, 2);
1882       __ sth(O4, end_to, 0);
1883     __ BIND(L_skip_alignment2);
1884     }
1885 #ifdef _LP64
1886     if (aligned) {
1887       // Both arrays are aligned to 8-bytes in 64-bits VM.
1888       // The 'count' is decremented in copy_16_bytes_backward_with_shift()
1889       // in unaligned case.
1890       __ dec(count, 8);
1891     } else
1892 #endif
1893     {
1894       // Copy with shift 16 bytes per iteration if arrays do not have
1895       // the same alignment mod 8, otherwise jump to the next
1896       // code for aligned copy (and substracting 8 from 'count' before jump).
1897       // The compare above (count >= 11) guarantes 'count' >= 16 bytes.
1898       // Also jump over aligned copy after the copy with shift completed.
1899 
1900       copy_16_bytes_backward_with_shift(end_from, end_to, count, 8,
1901                                         L_aligned_copy, L_copy_2_bytes);
1902     }
1903     // copy 4 elements (16 bytes) at a time
1904       __ align(OptoLoopAlignment);
1905     __ BIND(L_aligned_copy);
1906       __ dec(end_from, 16);
1907       __ ldx(end_from, 8, O3);
1908       __ ldx(end_from, 0, O4);
1909       __ dec(end_to, 16);
1910       __ deccc(count, 8);
1911       __ stx(O3, end_to, 8);
1912       __ brx(Assembler::greaterEqual, false, Assembler::pt, L_aligned_copy);
1913       __ delayed()->stx(O4, end_to, 0);
1914       __ inc(count, 8);
1915 
1916     // copy 1 element (2 bytes) at a time
1917     __ BIND(L_copy_2_bytes);
1918       __ br_zero(Assembler::zero, false, Assembler::pt, count, L_exit);
1919       __ delayed()->nop();
1920     __ BIND(L_copy_2_bytes_loop);
1921       __ dec(end_from, 2);
1922       __ dec(end_to, 2);
1923       __ lduh(end_from, 0, O4);
1924       __ deccc(count);
1925       __ brx(Assembler::greater, false, Assembler::pt, L_copy_2_bytes_loop);
1926       __ delayed()->sth(O4, end_to, 0);
1927 
1928     __ BIND(L_exit);
1929     // O3, O4 are used as temp registers
1930     inc_counter_np(SharedRuntime::_jshort_array_copy_ctr, O3, O4);
1931     __ retl();
1932     __ delayed()->mov(G0, O0); // return 0
1933     return start;
1934   }
1935 
1936   //
1937   //  Generate core code for disjoint int copy (and oop copy on 32-bit).
1938   //  If "aligned" is true, the "from" and "to" addresses are assumed
1939   //  to be heapword aligned.
1940   //
1941   // Arguments:
1942   //      from:  O0
1943   //      to:    O1
1944   //      count: O2 treated as signed
1945   //
1946   void generate_disjoint_int_copy_core(bool aligned) {
1947 
1948     Label L_skip_alignment, L_aligned_copy;
1949     Label L_copy_16_bytes,  L_copy_4_bytes, L_copy_4_bytes_loop, L_exit;
1950 
1951     const Register from      = O0;   // source array address
1952     const Register to        = O1;   // destination array address
1953     const Register count     = O2;   // elements count
1954     const Register offset    = O5;   // offset from start of arrays
1955     // O3, O4, G3, G4 are used as temp registers
1956 
1957     // 'aligned' == true when it is known statically during compilation
1958     // of this arraycopy call site that both 'from' and 'to' addresses
1959     // are HeapWordSize aligned (see LibraryCallKit::basictype2arraycopy()).
1960     //
1961     // Aligned arrays have 4 bytes alignment in 32-bits VM
1962     // and 8 bytes - in 64-bits VM.
1963     //
1964 #ifdef _LP64
1965     if (!aligned)
1966 #endif
1967     {
1968       // The next check could be put under 'ifndef' since the code in
1969       // generate_disjoint_long_copy_core() has own checks and set 'offset'.
1970 
1971       // for short arrays, just do single element copy
1972       __ cmp(count, 5); // 4 + 1 (20 bytes)
1973       __ brx(Assembler::lessEqual, false, Assembler::pn, L_copy_4_bytes);
1974       __ delayed()->mov(G0, offset);
1975 
1976       // copy 1 element to align 'to' on an 8 byte boundary
1977       __ andcc(to, 7, G0);
1978       __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
1979       __ delayed()->ld(from, 0, O3);
1980       __ inc(from, 4);
1981       __ inc(to, 4);
1982       __ dec(count);
1983       __ st(O3, to, -4);
1984     __ BIND(L_skip_alignment);
1985 
1986     // if arrays have same alignment mod 8, do 4 elements copy
1987       __ andcc(from, 7, G0);
1988       __ br(Assembler::zero, false, Assembler::pt, L_aligned_copy);
1989       __ delayed()->ld(from, 0, O3);
1990 
1991     //
1992     // Load 2 aligned 8-bytes chunks and use one from previous iteration
1993     // to form 2 aligned 8-bytes chunks to store.
1994     //
1995     // copy_16_bytes_forward_with_shift() is not used here since this
1996     // code is more optimal.
1997 
1998     // copy with shift 4 elements (16 bytes) at a time
1999       __ dec(count, 4);   // The cmp at the beginning guaranty count >= 4
2000 
2001       __ align(OptoLoopAlignment);
2002     __ BIND(L_copy_16_bytes);
2003       __ ldx(from, 4, O4);
2004       __ deccc(count, 4); // Can we do next iteration after this one?
2005       __ ldx(from, 12, G4);
2006       __ inc(to, 16);
2007       __ inc(from, 16);
2008       __ sllx(O3, 32, O3);
2009       __ srlx(O4, 32, G3);
2010       __ bset(G3, O3);
2011       __ stx(O3, to, -16);
2012       __ sllx(O4, 32, O4);
2013       __ srlx(G4, 32, G3);
2014       __ bset(G3, O4);
2015       __ stx(O4, to, -8);
2016       __ brx(Assembler::greaterEqual, false, Assembler::pt, L_copy_16_bytes);
2017       __ delayed()->mov(G4, O3);
2018 
2019       __ br(Assembler::always, false, Assembler::pt, L_copy_4_bytes);
2020       __ delayed()->inc(count, 4); // restore 'count'
2021 
2022     __ BIND(L_aligned_copy);
2023     }
2024     // copy 4 elements (16 bytes) at a time
2025       __ and3(count, 1, G4); // Save
2026       __ srl(count, 1, count);
2027      generate_disjoint_long_copy_core(aligned);
2028       __ mov(G4, count);     // Restore
2029 
2030     // copy 1 element at a time
2031     __ BIND(L_copy_4_bytes);
2032       __ br_zero(Assembler::zero, false, Assembler::pt, count, L_exit);
2033       __ delayed()->nop();
2034     __ BIND(L_copy_4_bytes_loop);
2035       __ ld(from, offset, O3);
2036       __ deccc(count);
2037       __ st(O3, to, offset);
2038       __ brx(Assembler::notZero, false, Assembler::pt, L_copy_4_bytes_loop);
2039       __ delayed()->inc(offset, 4);
2040     __ BIND(L_exit);
2041   }
2042 
2043   //
2044   //  Generate stub for disjoint int copy.  If "aligned" is true, the
2045   //  "from" and "to" addresses are assumed to be heapword aligned.
2046   //
2047   // Arguments for generated stub:
2048   //      from:  O0
2049   //      to:    O1
2050   //      count: O2 treated as signed
2051   //
2052   address generate_disjoint_int_copy(bool aligned, const char * name) {
2053     __ align(CodeEntryAlignment);
2054     StubCodeMark mark(this, "StubRoutines", name);
2055     address start = __ pc();
2056 
2057     const Register count = O2;
2058     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
2059 
2060     if (!aligned)  disjoint_int_copy_entry = __ pc();
2061     // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
2062     if (!aligned)  BLOCK_COMMENT("Entry:");
2063 
2064     generate_disjoint_int_copy_core(aligned);
2065 
2066     // O3, O4 are used as temp registers
2067     inc_counter_np(SharedRuntime::_jint_array_copy_ctr, O3, O4);
2068     __ retl();
2069     __ delayed()->mov(G0, O0); // return 0
2070     return start;
2071   }
2072 
2073   //
2074   //  Generate core code for conjoint int copy (and oop copy on 32-bit).
2075   //  If "aligned" is true, the "from" and "to" addresses are assumed
2076   //  to be heapword aligned.
2077   //
2078   // Arguments:
2079   //      from:  O0
2080   //      to:    O1
2081   //      count: O2 treated as signed
2082   //
2083   void generate_conjoint_int_copy_core(bool aligned) {
2084     // Do reverse copy.
2085 
2086     Label L_skip_alignment, L_aligned_copy;
2087     Label L_copy_16_bytes,  L_copy_4_bytes, L_copy_4_bytes_loop, L_exit;
2088 
2089     const Register from      = O0;   // source array address
2090     const Register to        = O1;   // destination array address
2091     const Register count     = O2;   // elements count
2092     const Register end_from  = from; // source array end address
2093     const Register end_to    = to;   // destination array end address
2094     // O3, O4, O5, G3 are used as temp registers
2095 
2096     const Register byte_count = O3;  // bytes count to copy
2097 
2098       __ sllx(count, LogBytesPerInt, byte_count);
2099       __ add(to, byte_count, end_to); // offset after last copied element
2100 
2101       __ cmp(count, 5); // for short arrays, just do single element copy
2102       __ brx(Assembler::lessEqual, false, Assembler::pn, L_copy_4_bytes);
2103       __ delayed()->add(from, byte_count, end_from);
2104 
2105     // copy 1 element to align 'to' on an 8 byte boundary
2106       __ andcc(end_to, 7, G0);
2107       __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
2108       __ delayed()->nop();
2109       __ dec(count);
2110       __ dec(end_from, 4);
2111       __ dec(end_to,   4);
2112       __ ld(end_from, 0, O4);
2113       __ st(O4, end_to, 0);
2114     __ BIND(L_skip_alignment);
2115 
2116     // Check if 'end_from' and 'end_to' has the same alignment.
2117       __ andcc(end_from, 7, G0);
2118       __ br(Assembler::zero, false, Assembler::pt, L_aligned_copy);
2119       __ delayed()->dec(count, 4); // The cmp at the start guaranty cnt >= 4
2120 
2121     // copy with shift 4 elements (16 bytes) at a time
2122     //
2123     // Load 2 aligned 8-bytes chunks and use one from previous iteration
2124     // to form 2 aligned 8-bytes chunks to store.
2125     //
2126       __ ldx(end_from, -4, O3);
2127       __ align(OptoLoopAlignment);
2128     __ BIND(L_copy_16_bytes);
2129       __ ldx(end_from, -12, O4);
2130       __ deccc(count, 4);
2131       __ ldx(end_from, -20, O5);
2132       __ dec(end_to, 16);
2133       __ dec(end_from, 16);
2134       __ srlx(O3, 32, O3);
2135       __ sllx(O4, 32, G3);
2136       __ bset(G3, O3);
2137       __ stx(O3, end_to, 8);
2138       __ srlx(O4, 32, O4);
2139       __ sllx(O5, 32, G3);
2140       __ bset(O4, G3);
2141       __ stx(G3, end_to, 0);
2142       __ brx(Assembler::greaterEqual, false, Assembler::pt, L_copy_16_bytes);
2143       __ delayed()->mov(O5, O3);
2144 
2145       __ br(Assembler::always, false, Assembler::pt, L_copy_4_bytes);
2146       __ delayed()->inc(count, 4);
2147 
2148     // copy 4 elements (16 bytes) at a time
2149       __ align(OptoLoopAlignment);
2150     __ BIND(L_aligned_copy);
2151       __ dec(end_from, 16);
2152       __ ldx(end_from, 8, O3);
2153       __ ldx(end_from, 0, O4);
2154       __ dec(end_to, 16);
2155       __ deccc(count, 4);
2156       __ stx(O3, end_to, 8);
2157       __ brx(Assembler::greaterEqual, false, Assembler::pt, L_aligned_copy);
2158       __ delayed()->stx(O4, end_to, 0);
2159       __ inc(count, 4);
2160 
2161     // copy 1 element (4 bytes) at a time
2162     __ BIND(L_copy_4_bytes);
2163       __ br_zero(Assembler::zero, false, Assembler::pt, count, L_exit);
2164       __ delayed()->nop();
2165     __ BIND(L_copy_4_bytes_loop);
2166       __ dec(end_from, 4);
2167       __ dec(end_to, 4);
2168       __ ld(end_from, 0, O4);
2169       __ deccc(count);
2170       __ brx(Assembler::greater, false, Assembler::pt, L_copy_4_bytes_loop);
2171       __ delayed()->st(O4, end_to, 0);
2172     __ BIND(L_exit);
2173   }
2174 
2175   //
2176   //  Generate stub for conjoint int copy.  If "aligned" is true, the
2177   //  "from" and "to" addresses are assumed to be heapword aligned.
2178   //
2179   // Arguments for generated stub:
2180   //      from:  O0
2181   //      to:    O1
2182   //      count: O2 treated as signed
2183   //
2184   address generate_conjoint_int_copy(bool aligned, const char * name) {
2185     __ align(CodeEntryAlignment);
2186     StubCodeMark mark(this, "StubRoutines", name);
2187     address start = __ pc();
2188 
2189     address nooverlap_target = aligned ?
2190         StubRoutines::arrayof_jint_disjoint_arraycopy() :
2191         disjoint_int_copy_entry;
2192 
2193     assert_clean_int(O2, O3);     // Make sure 'count' is clean int.
2194 
2195     if (!aligned)  int_copy_entry = __ pc();
2196     // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
2197     if (!aligned)  BLOCK_COMMENT("Entry:");
2198 
2199     array_overlap_test(nooverlap_target, 2);
2200 
2201     generate_conjoint_int_copy_core(aligned);
2202 
2203     // O3, O4 are used as temp registers
2204     inc_counter_np(SharedRuntime::_jint_array_copy_ctr, O3, O4);
2205     __ retl();
2206     __ delayed()->mov(G0, O0); // return 0
2207     return start;
2208   }
2209 
2210   //
2211   //  Generate core code for disjoint long copy (and oop copy on 64-bit).
2212   //  "aligned" is ignored, because we must make the stronger
2213   //  assumption that both addresses are always 64-bit aligned.
2214   //
2215   // Arguments:
2216   //      from:  O0
2217   //      to:    O1
2218   //      count: O2 treated as signed
2219   //
2220   // count -= 2;
2221   // if ( count >= 0 ) { // >= 2 elements
2222   //   if ( count > 6) { // >= 8 elements
2223   //     count -= 6; // original count - 8
2224   //     do {
2225   //       copy_8_elements;
2226   //       count -= 8;
2227   //     } while ( count >= 0 );
2228   //     count += 6;
2229   //   }
2230   //   if ( count >= 0 ) { // >= 2 elements
2231   //     do {
2232   //       copy_2_elements;
2233   //     } while ( (count=count-2) >= 0 );
2234   //   }
2235   // }
2236   // count += 2;
2237   // if ( count != 0 ) { // 1 element left
2238   //   copy_1_element;
2239   // }
2240   //
2241   void generate_disjoint_long_copy_core(bool aligned) {
2242     Label L_copy_8_bytes, L_copy_16_bytes, L_exit;
2243     const Register from    = O0;  // source array address
2244     const Register to      = O1;  // destination array address
2245     const Register count   = O2;  // elements count
2246     const Register offset0 = O4;  // element offset
2247     const Register offset8 = O5;  // next element offset
2248 
2249       __ deccc(count, 2);
2250       __ mov(G0, offset0);   // offset from start of arrays (0)
2251       __ brx(Assembler::negative, false, Assembler::pn, L_copy_8_bytes );
2252       __ delayed()->add(offset0, 8, offset8);
2253 
2254     // Copy by 64 bytes chunks
2255     Label L_copy_64_bytes;
2256     const Register from64 = O3;  // source address
2257     const Register to64   = G3;  // destination address
2258       __ subcc(count, 6, O3);
2259       __ brx(Assembler::negative, false, Assembler::pt, L_copy_16_bytes );
2260       __ delayed()->mov(to,   to64);
2261       // Now we can use O4(offset0), O5(offset8) as temps
2262       __ mov(O3, count);
2263       __ mov(from, from64);
2264 
2265       __ align(OptoLoopAlignment);
2266     __ BIND(L_copy_64_bytes);
2267       for( int off = 0; off < 64; off += 16 ) {
2268         __ ldx(from64,  off+0, O4);
2269         __ ldx(from64,  off+8, O5);
2270         __ stx(O4, to64,  off+0);
2271         __ stx(O5, to64,  off+8);
2272       }
2273       __ deccc(count, 8);
2274       __ inc(from64, 64);
2275       __ brx(Assembler::greaterEqual, false, Assembler::pt, L_copy_64_bytes);
2276       __ delayed()->inc(to64, 64);
2277 
2278       // Restore O4(offset0), O5(offset8)
2279       __ sub(from64, from, offset0);
2280       __ inccc(count, 6);
2281       __ brx(Assembler::negative, false, Assembler::pn, L_copy_8_bytes );
2282       __ delayed()->add(offset0, 8, offset8);
2283 
2284       // Copy by 16 bytes chunks
2285       __ align(OptoLoopAlignment);
2286     __ BIND(L_copy_16_bytes);
2287       __ ldx(from, offset0, O3);
2288       __ ldx(from, offset8, G3);
2289       __ deccc(count, 2);
2290       __ stx(O3, to, offset0);
2291       __ inc(offset0, 16);
2292       __ stx(G3, to, offset8);
2293       __ brx(Assembler::greaterEqual, false, Assembler::pt, L_copy_16_bytes);
2294       __ delayed()->inc(offset8, 16);
2295 
2296       // Copy last 8 bytes
2297     __ BIND(L_copy_8_bytes);
2298       __ inccc(count, 2);
2299       __ brx(Assembler::zero, true, Assembler::pn, L_exit );
2300       __ delayed()->mov(offset0, offset8); // Set O5 used by other stubs
2301       __ ldx(from, offset0, O3);
2302       __ stx(O3, to, offset0);
2303     __ BIND(L_exit);
2304   }
2305 
2306   //
2307   //  Generate stub for disjoint long copy.
2308   //  "aligned" is ignored, because we must make the stronger
2309   //  assumption that both addresses are always 64-bit aligned.
2310   //
2311   // Arguments for generated stub:
2312   //      from:  O0
2313   //      to:    O1
2314   //      count: O2 treated as signed
2315   //
2316   address generate_disjoint_long_copy(bool aligned, const char * name) {
2317     __ align(CodeEntryAlignment);
2318     StubCodeMark mark(this, "StubRoutines", name);
2319     address start = __ pc();
2320 
2321     assert_clean_int(O2, O3);     // Make sure 'count' is clean int.
2322 
2323     if (!aligned)  disjoint_long_copy_entry = __ pc();
2324     // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
2325     if (!aligned)  BLOCK_COMMENT("Entry:");
2326 
2327     generate_disjoint_long_copy_core(aligned);
2328 
2329     // O3, O4 are used as temp registers
2330     inc_counter_np(SharedRuntime::_jlong_array_copy_ctr, O3, O4);
2331     __ retl();
2332     __ delayed()->mov(G0, O0); // return 0
2333     return start;
2334   }
2335 
2336   //
2337   //  Generate core code for conjoint long copy (and oop copy on 64-bit).
2338   //  "aligned" is ignored, because we must make the stronger
2339   //  assumption that both addresses are always 64-bit aligned.
2340   //
2341   // Arguments:
2342   //      from:  O0
2343   //      to:    O1
2344   //      count: O2 treated as signed
2345   //
2346   void generate_conjoint_long_copy_core(bool aligned) {
2347     // Do reverse copy.
2348     Label L_copy_8_bytes, L_copy_16_bytes, L_exit;
2349     const Register from    = O0;  // source array address
2350     const Register to      = O1;  // destination array address
2351     const Register count   = O2;  // elements count
2352     const Register offset8 = O4;  // element offset
2353     const Register offset0 = O5;  // previous element offset
2354 
2355       __ subcc(count, 1, count);
2356       __ brx(Assembler::lessEqual, false, Assembler::pn, L_copy_8_bytes );
2357       __ delayed()->sllx(count, LogBytesPerLong, offset8);
2358       __ sub(offset8, 8, offset0);
2359       __ align(OptoLoopAlignment);
2360     __ BIND(L_copy_16_bytes);
2361       __ ldx(from, offset8, O2);
2362       __ ldx(from, offset0, O3);
2363       __ stx(O2, to, offset8);
2364       __ deccc(offset8, 16);      // use offset8 as counter
2365       __ stx(O3, to, offset0);
2366       __ brx(Assembler::greater, false, Assembler::pt, L_copy_16_bytes);
2367       __ delayed()->dec(offset0, 16);
2368 
2369     __ BIND(L_copy_8_bytes);
2370       __ brx(Assembler::negative, false, Assembler::pn, L_exit );
2371       __ delayed()->nop();
2372       __ ldx(from, 0, O3);
2373       __ stx(O3, to, 0);
2374     __ BIND(L_exit);
2375   }
2376 
2377   //  Generate stub for conjoint long copy.
2378   //  "aligned" is ignored, because we must make the stronger
2379   //  assumption that both addresses are always 64-bit aligned.
2380   //
2381   // Arguments for generated stub:
2382   //      from:  O0
2383   //      to:    O1
2384   //      count: O2 treated as signed
2385   //
2386   address generate_conjoint_long_copy(bool aligned, const char * name) {
2387     __ align(CodeEntryAlignment);
2388     StubCodeMark mark(this, "StubRoutines", name);
2389     address start = __ pc();
2390 
2391     assert(!aligned, "usage");
2392     address nooverlap_target = disjoint_long_copy_entry;
2393 
2394     assert_clean_int(O2, O3);     // Make sure 'count' is clean int.
2395 
2396     if (!aligned)  long_copy_entry = __ pc();
2397     // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
2398     if (!aligned)  BLOCK_COMMENT("Entry:");
2399 
2400     array_overlap_test(nooverlap_target, 3);
2401 
2402     generate_conjoint_long_copy_core(aligned);
2403 
2404     // O3, O4 are used as temp registers
2405     inc_counter_np(SharedRuntime::_jlong_array_copy_ctr, O3, O4);
2406     __ retl();
2407     __ delayed()->mov(G0, O0); // return 0
2408     return start;
2409   }
2410 
2411   //  Generate stub for disjoint oop copy.  If "aligned" is true, the
2412   //  "from" and "to" addresses are assumed to be heapword aligned.
2413   //
2414   // Arguments for generated stub:
2415   //      from:  O0
2416   //      to:    O1
2417   //      count: O2 treated as signed
2418   //
2419   address generate_disjoint_oop_copy(bool aligned, const char * name) {
2420 
2421     const Register from  = O0;  // source array address
2422     const Register to    = O1;  // destination array address
2423     const Register count = O2;  // elements count
2424 
2425     __ align(CodeEntryAlignment);
2426     StubCodeMark mark(this, "StubRoutines", name);
2427     address start = __ pc();
2428 
2429     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
2430 
2431     if (!aligned)  disjoint_oop_copy_entry = __ pc();
2432     // caller can pass a 64-bit byte count here
2433     if (!aligned)  BLOCK_COMMENT("Entry:");
2434 
2435     // save arguments for barrier generation
2436     __ mov(to, G1);
2437     __ mov(count, G5);
2438     gen_write_ref_array_pre_barrier(G1, G5);
2439   #ifdef _LP64
2440     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
2441     if (UseCompressedOops) {
2442       generate_disjoint_int_copy_core(aligned);
2443     } else {
2444       generate_disjoint_long_copy_core(aligned);
2445     }
2446   #else
2447     generate_disjoint_int_copy_core(aligned);
2448   #endif
2449     // O0 is used as temp register
2450     gen_write_ref_array_post_barrier(G1, G5, O0);
2451 
2452     // O3, O4 are used as temp registers
2453     inc_counter_np(SharedRuntime::_oop_array_copy_ctr, O3, O4);
2454     __ retl();
2455     __ delayed()->mov(G0, O0); // return 0
2456     return start;
2457   }
2458 
2459   //  Generate stub for conjoint oop copy.  If "aligned" is true, the
2460   //  "from" and "to" addresses are assumed to be heapword aligned.
2461   //
2462   // Arguments for generated stub:
2463   //      from:  O0
2464   //      to:    O1
2465   //      count: O2 treated as signed
2466   //
2467   address generate_conjoint_oop_copy(bool aligned, const char * name) {
2468 
2469     const Register from  = O0;  // source array address
2470     const Register to    = O1;  // destination array address
2471     const Register count = O2;  // elements count
2472 
2473     __ align(CodeEntryAlignment);
2474     StubCodeMark mark(this, "StubRoutines", name);
2475     address start = __ pc();
2476 
2477     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
2478 
2479     if (!aligned)  oop_copy_entry = __ pc();
2480     // caller can pass a 64-bit byte count here
2481     if (!aligned)  BLOCK_COMMENT("Entry:");
2482 
2483     // save arguments for barrier generation
2484     __ mov(to, G1);
2485     __ mov(count, G5);
2486 
2487     gen_write_ref_array_pre_barrier(G1, G5);
2488 
2489     address nooverlap_target = aligned ?
2490         StubRoutines::arrayof_oop_disjoint_arraycopy() :
2491         disjoint_oop_copy_entry;
2492 
2493     array_overlap_test(nooverlap_target, LogBytesPerHeapOop);
2494 
2495   #ifdef _LP64
2496     if (UseCompressedOops) {
2497       generate_conjoint_int_copy_core(aligned);
2498     } else {
2499       generate_conjoint_long_copy_core(aligned);
2500     }
2501   #else
2502     generate_conjoint_int_copy_core(aligned);
2503   #endif
2504 
2505     // O0 is used as temp register
2506     gen_write_ref_array_post_barrier(G1, G5, O0);
2507 
2508     // O3, O4 are used as temp registers
2509     inc_counter_np(SharedRuntime::_oop_array_copy_ctr, O3, O4);
2510     __ retl();
2511     __ delayed()->mov(G0, O0); // return 0
2512     return start;
2513   }
2514 
2515 
2516   // Helper for generating a dynamic type check.
2517   // Smashes only the given temp registers.
2518   void generate_type_check(Register sub_klass,
2519                            Register super_check_offset,
2520                            Register super_klass,
2521                            Register temp,
2522                            Label& L_success) {
2523     assert_different_registers(sub_klass, super_check_offset, super_klass, temp);
2524 
2525     BLOCK_COMMENT("type_check:");
2526 
2527     Label L_miss, L_pop_to_miss;
2528 
2529     assert_clean_int(super_check_offset, temp);
2530 
2531     __ check_klass_subtype_fast_path(sub_klass, super_klass, temp, noreg,
2532                                      &L_success, &L_miss, NULL,
2533                                      super_check_offset);
2534 
2535     BLOCK_COMMENT("type_check_slow_path:");
2536     __ save_frame(0);
2537     __ check_klass_subtype_slow_path(sub_klass->after_save(),
2538                                      super_klass->after_save(),
2539                                      L0, L1, L2, L4,
2540                                      NULL, &L_pop_to_miss);
2541     __ ba(false, L_success);
2542     __ delayed()->restore();
2543 
2544     __ bind(L_pop_to_miss);
2545     __ restore();
2546 
2547     // Fall through on failure!
2548     __ BIND(L_miss);
2549   }
2550 
2551 
2552   //  Generate stub for checked oop copy.
2553   //
2554   // Arguments for generated stub:
2555   //      from:  O0
2556   //      to:    O1
2557   //      count: O2 treated as signed
2558   //      ckoff: O3 (super_check_offset)
2559   //      ckval: O4 (super_klass)
2560   //      ret:   O0 zero for success; (-1^K) where K is partial transfer count
2561   //
2562   address generate_checkcast_copy(const char* name) {
2563 
2564     const Register O0_from   = O0;      // source array address
2565     const Register O1_to     = O1;      // destination array address
2566     const Register O2_count  = O2;      // elements count
2567     const Register O3_ckoff  = O3;      // super_check_offset
2568     const Register O4_ckval  = O4;      // super_klass
2569 
2570     const Register O5_offset = O5;      // loop var, with stride wordSize
2571     const Register G1_remain = G1;      // loop var, with stride -1
2572     const Register G3_oop    = G3;      // actual oop copied
2573     const Register G4_klass  = G4;      // oop._klass
2574     const Register G5_super  = G5;      // oop._klass._primary_supers[ckval]
2575 
2576     __ align(CodeEntryAlignment);
2577     StubCodeMark mark(this, "StubRoutines", name);
2578     address start = __ pc();
2579 
2580     gen_write_ref_array_pre_barrier(O1, O2);
2581 
2582 #ifdef ASSERT
2583     // We sometimes save a frame (see generate_type_check below).
2584     // If this will cause trouble, let's fail now instead of later.
2585     __ save_frame(0);
2586     __ restore();
2587 #endif
2588 
2589     assert_clean_int(O2_count, G1);     // Make sure 'count' is clean int.
2590 
2591 #ifdef ASSERT
2592     // caller guarantees that the arrays really are different
2593     // otherwise, we would have to make conjoint checks
2594     { Label L;
2595       __ mov(O3, G1);           // spill: overlap test smashes O3
2596       __ mov(O4, G4);           // spill: overlap test smashes O4
2597       array_overlap_test(L, LogBytesPerHeapOop);
2598       __ stop("checkcast_copy within a single array");
2599       __ bind(L);
2600       __ mov(G1, O3);
2601       __ mov(G4, O4);
2602     }
2603 #endif //ASSERT
2604 
2605     checkcast_copy_entry = __ pc();
2606     // caller can pass a 64-bit byte count here (from generic stub)
2607     BLOCK_COMMENT("Entry:");
2608 
2609     Label load_element, store_element, do_card_marks, fail, done;
2610     __ addcc(O2_count, 0, G1_remain);   // initialize loop index, and test it
2611     __ brx(Assembler::notZero, false, Assembler::pt, load_element);
2612     __ delayed()->mov(G0, O5_offset);   // offset from start of arrays
2613 
2614     // Empty array:  Nothing to do.
2615     inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr, O3, O4);
2616     __ retl();
2617     __ delayed()->set(0, O0);           // return 0 on (trivial) success
2618 
2619     // ======== begin loop ========
2620     // (Loop is rotated; its entry is load_element.)
2621     // Loop variables:
2622     //   (O5 = 0; ; O5 += wordSize) --- offset from src, dest arrays
2623     //   (O2 = len; O2 != 0; O2--) --- number of oops *remaining*
2624     //   G3, G4, G5 --- current oop, oop.klass, oop.klass.super
2625     __ align(OptoLoopAlignment);
2626 
2627     __ BIND(store_element);
2628     __ deccc(G1_remain);                // decrement the count
2629     __ store_heap_oop(G3_oop, O1_to, O5_offset); // store the oop
2630     __ inc(O5_offset, heapOopSize);     // step to next offset
2631     __ brx(Assembler::zero, true, Assembler::pt, do_card_marks);
2632     __ delayed()->set(0, O0);           // return -1 on success
2633 
2634     // ======== loop entry is here ========
2635     __ BIND(load_element);
2636     __ load_heap_oop(O0_from, O5_offset, G3_oop);  // load the oop
2637     __ br_null(G3_oop, true, Assembler::pt, store_element);
2638     __ delayed()->nop();
2639 
2640     __ load_klass(G3_oop, G4_klass); // query the object klass
2641 
2642     generate_type_check(G4_klass, O3_ckoff, O4_ckval, G5_super,
2643                         // branch to this on success:
2644                         store_element);
2645     // ======== end loop ========
2646 
2647     // It was a real error; we must depend on the caller to finish the job.
2648     // Register G1 has number of *remaining* oops, O2 number of *total* oops.
2649     // Emit GC store barriers for the oops we have copied (O2 minus G1),
2650     // and report their number to the caller.
2651     __ BIND(fail);
2652     __ subcc(O2_count, G1_remain, O2_count);
2653     __ brx(Assembler::zero, false, Assembler::pt, done);
2654     __ delayed()->not1(O2_count, O0);   // report (-1^K) to caller
2655 
2656     __ BIND(do_card_marks);
2657     gen_write_ref_array_post_barrier(O1_to, O2_count, O3);   // store check on O1[0..O2]
2658 
2659     __ BIND(done);
2660     inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr, O3, O4);
2661     __ retl();
2662     __ delayed()->nop();             // return value in 00
2663 
2664     return start;
2665   }
2666 
2667 
2668   //  Generate 'unsafe' array copy stub
2669   //  Though just as safe as the other stubs, it takes an unscaled
2670   //  size_t argument instead of an element count.
2671   //
2672   // Arguments for generated stub:
2673   //      from:  O0
2674   //      to:    O1
2675   //      count: O2 byte count, treated as ssize_t, can be zero
2676   //
2677   // Examines the alignment of the operands and dispatches
2678   // to a long, int, short, or byte copy loop.
2679   //
2680   address generate_unsafe_copy(const char* name) {
2681 
2682     const Register O0_from   = O0;      // source array address
2683     const Register O1_to     = O1;      // destination array address
2684     const Register O2_count  = O2;      // elements count
2685 
2686     const Register G1_bits   = G1;      // test copy of low bits
2687 
2688     __ align(CodeEntryAlignment);
2689     StubCodeMark mark(this, "StubRoutines", name);
2690     address start = __ pc();
2691 
2692     // bump this on entry, not on exit:
2693     inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr, G1, G3);
2694 
2695     __ or3(O0_from, O1_to, G1_bits);
2696     __ or3(O2_count,       G1_bits, G1_bits);
2697 
2698     __ btst(BytesPerLong-1, G1_bits);
2699     __ br(Assembler::zero, true, Assembler::pt,
2700           long_copy_entry, relocInfo::runtime_call_type);
2701     // scale the count on the way out:
2702     __ delayed()->srax(O2_count, LogBytesPerLong, O2_count);
2703 
2704     __ btst(BytesPerInt-1, G1_bits);
2705     __ br(Assembler::zero, true, Assembler::pt,
2706           int_copy_entry, relocInfo::runtime_call_type);
2707     // scale the count on the way out:
2708     __ delayed()->srax(O2_count, LogBytesPerInt, O2_count);
2709 
2710     __ btst(BytesPerShort-1, G1_bits);
2711     __ br(Assembler::zero, true, Assembler::pt,
2712           short_copy_entry, relocInfo::runtime_call_type);
2713     // scale the count on the way out:
2714     __ delayed()->srax(O2_count, LogBytesPerShort, O2_count);
2715 
2716     __ br(Assembler::always, false, Assembler::pt,
2717           byte_copy_entry, relocInfo::runtime_call_type);
2718     __ delayed()->nop();
2719 
2720     return start;
2721   }
2722 
2723 
2724   // Perform range checks on the proposed arraycopy.
2725   // Kills the two temps, but nothing else.
2726   // Also, clean the sign bits of src_pos and dst_pos.
2727   void arraycopy_range_checks(Register src,     // source array oop (O0)
2728                               Register src_pos, // source position (O1)
2729                               Register dst,     // destination array oo (O2)
2730                               Register dst_pos, // destination position (O3)
2731                               Register length,  // length of copy (O4)
2732                               Register temp1, Register temp2,
2733                               Label& L_failed) {
2734     BLOCK_COMMENT("arraycopy_range_checks:");
2735 
2736     //  if (src_pos + length > arrayOop(src)->length() ) FAIL;
2737 
2738     const Register array_length = temp1;  // scratch
2739     const Register end_pos      = temp2;  // scratch
2740 
2741     // Note:  This next instruction may be in the delay slot of a branch:
2742     __ add(length, src_pos, end_pos);  // src_pos + length
2743     __ lduw(src, arrayOopDesc::length_offset_in_bytes(), array_length);
2744     __ cmp(end_pos, array_length);
2745     __ br(Assembler::greater, false, Assembler::pn, L_failed);
2746 
2747     //  if (dst_pos + length > arrayOop(dst)->length() ) FAIL;
2748     __ delayed()->add(length, dst_pos, end_pos); // dst_pos + length
2749     __ lduw(dst, arrayOopDesc::length_offset_in_bytes(), array_length);
2750     __ cmp(end_pos, array_length);
2751     __ br(Assembler::greater, false, Assembler::pn, L_failed);
2752 
2753     // Have to clean up high 32-bits of 'src_pos' and 'dst_pos'.
2754     // Move with sign extension can be used since they are positive.
2755     __ delayed()->signx(src_pos, src_pos);
2756     __ signx(dst_pos, dst_pos);
2757 
2758     BLOCK_COMMENT("arraycopy_range_checks done");
2759   }
2760 
2761 
2762   //
2763   //  Generate generic array copy stubs
2764   //
2765   //  Input:
2766   //    O0    -  src oop
2767   //    O1    -  src_pos
2768   //    O2    -  dst oop
2769   //    O3    -  dst_pos
2770   //    O4    -  element count
2771   //
2772   //  Output:
2773   //    O0 ==  0  -  success
2774   //    O0 == -1  -  need to call System.arraycopy
2775   //
2776   address generate_generic_copy(const char *name) {
2777 
2778     Label L_failed, L_objArray;
2779 
2780     // Input registers
2781     const Register src      = O0;  // source array oop
2782     const Register src_pos  = O1;  // source position
2783     const Register dst      = O2;  // destination array oop
2784     const Register dst_pos  = O3;  // destination position
2785     const Register length   = O4;  // elements count
2786 
2787     // registers used as temp
2788     const Register G3_src_klass = G3; // source array klass
2789     const Register G4_dst_klass = G4; // destination array klass
2790     const Register G5_lh        = G5; // layout handler
2791     const Register O5_temp      = O5;
2792 
2793     __ align(CodeEntryAlignment);
2794     StubCodeMark mark(this, "StubRoutines", name);
2795     address start = __ pc();
2796 
2797     // bump this on entry, not on exit:
2798     inc_counter_np(SharedRuntime::_generic_array_copy_ctr, G1, G3);
2799 
2800     // In principle, the int arguments could be dirty.
2801     //assert_clean_int(src_pos, G1);
2802     //assert_clean_int(dst_pos, G1);
2803     //assert_clean_int(length, G1);
2804 
2805     //-----------------------------------------------------------------------
2806     // Assembler stubs will be used for this call to arraycopy
2807     // if the following conditions are met:
2808     //
2809     // (1) src and dst must not be null.
2810     // (2) src_pos must not be negative.
2811     // (3) dst_pos must not be negative.
2812     // (4) length  must not be negative.
2813     // (5) src klass and dst klass should be the same and not NULL.
2814     // (6) src and dst should be arrays.
2815     // (7) src_pos + length must not exceed length of src.
2816     // (8) dst_pos + length must not exceed length of dst.
2817     BLOCK_COMMENT("arraycopy initial argument checks");
2818 
2819     //  if (src == NULL) return -1;
2820     __ br_null(src, false, Assembler::pn, L_failed);
2821 
2822     //  if (src_pos < 0) return -1;
2823     __ delayed()->tst(src_pos);
2824     __ br(Assembler::negative, false, Assembler::pn, L_failed);
2825     __ delayed()->nop();
2826 
2827     //  if (dst == NULL) return -1;
2828     __ br_null(dst, false, Assembler::pn, L_failed);
2829 
2830     //  if (dst_pos < 0) return -1;
2831     __ delayed()->tst(dst_pos);
2832     __ br(Assembler::negative, false, Assembler::pn, L_failed);
2833 
2834     //  if (length < 0) return -1;
2835     __ delayed()->tst(length);
2836     __ br(Assembler::negative, false, Assembler::pn, L_failed);
2837 
2838     BLOCK_COMMENT("arraycopy argument klass checks");
2839     //  get src->klass()
2840     if (UseCompressedOops) {
2841       __ delayed()->nop(); // ??? not good
2842       __ load_klass(src, G3_src_klass);
2843     } else {
2844       __ delayed()->ld_ptr(src, oopDesc::klass_offset_in_bytes(), G3_src_klass);
2845     }
2846 
2847 #ifdef ASSERT
2848     //  assert(src->klass() != NULL);
2849     BLOCK_COMMENT("assert klasses not null");
2850     { Label L_a, L_b;
2851       __ br_notnull(G3_src_klass, false, Assembler::pt, L_b); // it is broken if klass is NULL
2852       __ delayed()->nop();
2853       __ bind(L_a);
2854       __ stop("broken null klass");
2855       __ bind(L_b);
2856       __ load_klass(dst, G4_dst_klass);
2857       __ br_null(G4_dst_klass, false, Assembler::pn, L_a); // this would be broken also
2858       __ delayed()->mov(G0, G4_dst_klass);      // scribble the temp
2859       BLOCK_COMMENT("assert done");
2860     }
2861 #endif
2862 
2863     // Load layout helper
2864     //
2865     //  |array_tag|     | header_size | element_type |     |log2_element_size|
2866     // 32        30    24            16              8     2                 0
2867     //
2868     //   array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0
2869     //
2870 
2871     int lh_offset = klassOopDesc::header_size() * HeapWordSize +
2872                     Klass::layout_helper_offset_in_bytes();
2873 
2874     // Load 32-bits signed value. Use br() instruction with it to check icc.
2875     __ lduw(G3_src_klass, lh_offset, G5_lh);
2876 
2877     if (UseCompressedOops) {
2878       __ load_klass(dst, G4_dst_klass);
2879     }
2880     // Handle objArrays completely differently...
2881     juint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2882     __ set(objArray_lh, O5_temp);
2883     __ cmp(G5_lh,       O5_temp);
2884     __ br(Assembler::equal, false, Assembler::pt, L_objArray);
2885     if (UseCompressedOops) {
2886       __ delayed()->nop();
2887     } else {
2888       __ delayed()->ld_ptr(dst, oopDesc::klass_offset_in_bytes(), G4_dst_klass);
2889     }
2890 
2891     //  if (src->klass() != dst->klass()) return -1;
2892     __ cmp(G3_src_klass, G4_dst_klass);
2893     __ brx(Assembler::notEqual, false, Assembler::pn, L_failed);
2894     __ delayed()->nop();
2895 
2896     //  if (!src->is_Array()) return -1;
2897     __ cmp(G5_lh, Klass::_lh_neutral_value); // < 0
2898     __ br(Assembler::greaterEqual, false, Assembler::pn, L_failed);
2899 
2900     // At this point, it is known to be a typeArray (array_tag 0x3).
2901 #ifdef ASSERT
2902     __ delayed()->nop();
2903     { Label L;
2904       jint lh_prim_tag_in_place = (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift);
2905       __ set(lh_prim_tag_in_place, O5_temp);
2906       __ cmp(G5_lh,                O5_temp);
2907       __ br(Assembler::greaterEqual, false, Assembler::pt, L);
2908       __ delayed()->nop();
2909       __ stop("must be a primitive array");
2910       __ bind(L);
2911     }
2912 #else
2913     __ delayed();                               // match next insn to prev branch
2914 #endif
2915 
2916     arraycopy_range_checks(src, src_pos, dst, dst_pos, length,
2917                            O5_temp, G4_dst_klass, L_failed);
2918 
2919     // typeArrayKlass
2920     //
2921     // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize);
2922     // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize);
2923     //
2924 
2925     const Register G4_offset = G4_dst_klass;    // array offset
2926     const Register G3_elsize = G3_src_klass;    // log2 element size
2927 
2928     __ srl(G5_lh, Klass::_lh_header_size_shift, G4_offset);
2929     __ and3(G4_offset, Klass::_lh_header_size_mask, G4_offset); // array_offset
2930     __ add(src, G4_offset, src);       // src array offset
2931     __ add(dst, G4_offset, dst);       // dst array offset
2932     __ and3(G5_lh, Klass::_lh_log2_element_size_mask, G3_elsize); // log2 element size
2933 
2934     // next registers should be set before the jump to corresponding stub
2935     const Register from     = O0;  // source array address
2936     const Register to       = O1;  // destination array address
2937     const Register count    = O2;  // elements count
2938 
2939     // 'from', 'to', 'count' registers should be set in this order
2940     // since they are the same as 'src', 'src_pos', 'dst'.
2941 
2942     BLOCK_COMMENT("scale indexes to element size");
2943     __ sll_ptr(src_pos, G3_elsize, src_pos);
2944     __ sll_ptr(dst_pos, G3_elsize, dst_pos);
2945     __ add(src, src_pos, from);       // src_addr
2946     __ add(dst, dst_pos, to);         // dst_addr
2947 
2948     BLOCK_COMMENT("choose copy loop based on element size");
2949     __ cmp(G3_elsize, 0);
2950     __ br(Assembler::equal,true,Assembler::pt,StubRoutines::_jbyte_arraycopy);
2951     __ delayed()->signx(length, count); // length
2952 
2953     __ cmp(G3_elsize, LogBytesPerShort);
2954     __ br(Assembler::equal,true,Assembler::pt,StubRoutines::_jshort_arraycopy);
2955     __ delayed()->signx(length, count); // length
2956 
2957     __ cmp(G3_elsize, LogBytesPerInt);
2958     __ br(Assembler::equal,true,Assembler::pt,StubRoutines::_jint_arraycopy);
2959     __ delayed()->signx(length, count); // length
2960 #ifdef ASSERT
2961     { Label L;
2962       __ cmp(G3_elsize, LogBytesPerLong);
2963       __ br(Assembler::equal, false, Assembler::pt, L);
2964       __ delayed()->nop();
2965       __ stop("must be long copy, but elsize is wrong");
2966       __ bind(L);
2967     }
2968 #endif
2969     __ br(Assembler::always,false,Assembler::pt,StubRoutines::_jlong_arraycopy);
2970     __ delayed()->signx(length, count); // length
2971 
2972     // objArrayKlass
2973   __ BIND(L_objArray);
2974     // live at this point:  G3_src_klass, G4_dst_klass, src[_pos], dst[_pos], length
2975 
2976     Label L_plain_copy, L_checkcast_copy;
2977     //  test array classes for subtyping
2978     __ cmp(G3_src_klass, G4_dst_klass);         // usual case is exact equality
2979     __ brx(Assembler::notEqual, true, Assembler::pn, L_checkcast_copy);
2980     __ delayed()->lduw(G4_dst_klass, lh_offset, O5_temp); // hoisted from below
2981 
2982     // Identically typed arrays can be copied without element-wise checks.
2983     arraycopy_range_checks(src, src_pos, dst, dst_pos, length,
2984                            O5_temp, G5_lh, L_failed);
2985 
2986     __ add(src, arrayOopDesc::base_offset_in_bytes(T_OBJECT), src); //src offset
2987     __ add(dst, arrayOopDesc::base_offset_in_bytes(T_OBJECT), dst); //dst offset
2988     __ sll_ptr(src_pos, LogBytesPerHeapOop, src_pos);
2989     __ sll_ptr(dst_pos, LogBytesPerHeapOop, dst_pos);
2990     __ add(src, src_pos, from);       // src_addr
2991     __ add(dst, dst_pos, to);         // dst_addr
2992   __ BIND(L_plain_copy);
2993     __ br(Assembler::always, false, Assembler::pt,StubRoutines::_oop_arraycopy);
2994     __ delayed()->signx(length, count); // length
2995 
2996   __ BIND(L_checkcast_copy);
2997     // live at this point:  G3_src_klass, G4_dst_klass
2998     {
2999       // Before looking at dst.length, make sure dst is also an objArray.
3000       // lduw(G4_dst_klass, lh_offset, O5_temp); // hoisted to delay slot
3001       __ cmp(G5_lh,                    O5_temp);
3002       __ br(Assembler::notEqual, false, Assembler::pn, L_failed);
3003 
3004       // It is safe to examine both src.length and dst.length.
3005       __ delayed();                             // match next insn to prev branch
3006       arraycopy_range_checks(src, src_pos, dst, dst_pos, length,
3007                              O5_temp, G5_lh, L_failed);
3008 
3009       // Marshal the base address arguments now, freeing registers.
3010       __ add(src, arrayOopDesc::base_offset_in_bytes(T_OBJECT), src); //src offset
3011       __ add(dst, arrayOopDesc::base_offset_in_bytes(T_OBJECT), dst); //dst offset
3012       __ sll_ptr(src_pos, LogBytesPerHeapOop, src_pos);
3013       __ sll_ptr(dst_pos, LogBytesPerHeapOop, dst_pos);
3014       __ add(src, src_pos, from);               // src_addr
3015       __ add(dst, dst_pos, to);                 // dst_addr
3016       __ signx(length, count);                  // length (reloaded)
3017 
3018       Register sco_temp = O3;                   // this register is free now
3019       assert_different_registers(from, to, count, sco_temp,
3020                                  G4_dst_klass, G3_src_klass);
3021 
3022       // Generate the type check.
3023       int sco_offset = (klassOopDesc::header_size() * HeapWordSize +
3024                         Klass::super_check_offset_offset_in_bytes());
3025       __ lduw(G4_dst_klass, sco_offset, sco_temp);
3026       generate_type_check(G3_src_klass, sco_temp, G4_dst_klass,
3027                           O5_temp, L_plain_copy);
3028 
3029       // Fetch destination element klass from the objArrayKlass header.
3030       int ek_offset = (klassOopDesc::header_size() * HeapWordSize +
3031                        objArrayKlass::element_klass_offset_in_bytes());
3032 
3033       // the checkcast_copy loop needs two extra arguments:
3034       __ ld_ptr(G4_dst_klass, ek_offset, O4);   // dest elem klass
3035       // lduw(O4, sco_offset, O3);              // sco of elem klass
3036 
3037       __ br(Assembler::always, false, Assembler::pt, checkcast_copy_entry);
3038       __ delayed()->lduw(O4, sco_offset, O3);
3039     }
3040 
3041   __ BIND(L_failed);
3042     __ retl();
3043     __ delayed()->sub(G0, 1, O0); // return -1
3044     return start;
3045   }
3046 
3047   void generate_arraycopy_stubs() {
3048 
3049     // Note:  the disjoint stubs must be generated first, some of
3050     //        the conjoint stubs use them.
3051     StubRoutines::_jbyte_disjoint_arraycopy  = generate_disjoint_byte_copy(false, "jbyte_disjoint_arraycopy");
3052     StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, "jshort_disjoint_arraycopy");
3053     StubRoutines::_jint_disjoint_arraycopy   = generate_disjoint_int_copy(false, "jint_disjoint_arraycopy");
3054     StubRoutines::_jlong_disjoint_arraycopy  = generate_disjoint_long_copy(false, "jlong_disjoint_arraycopy");
3055     StubRoutines::_oop_disjoint_arraycopy    = generate_disjoint_oop_copy(false, "oop_disjoint_arraycopy");
3056     StubRoutines::_arrayof_jbyte_disjoint_arraycopy  = generate_disjoint_byte_copy(true, "arrayof_jbyte_disjoint_arraycopy");
3057     StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_disjoint_short_copy(true, "arrayof_jshort_disjoint_arraycopy");
3058     StubRoutines::_arrayof_jint_disjoint_arraycopy   = generate_disjoint_int_copy(true, "arrayof_jint_disjoint_arraycopy");
3059     StubRoutines::_arrayof_jlong_disjoint_arraycopy  = generate_disjoint_long_copy(true, "arrayof_jlong_disjoint_arraycopy");
3060     StubRoutines::_arrayof_oop_disjoint_arraycopy    =  generate_disjoint_oop_copy(true, "arrayof_oop_disjoint_arraycopy");
3061 
3062     StubRoutines::_jbyte_arraycopy  = generate_conjoint_byte_copy(false, "jbyte_arraycopy");
3063     StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, "jshort_arraycopy");
3064     StubRoutines::_jint_arraycopy   = generate_conjoint_int_copy(false, "jint_arraycopy");
3065     StubRoutines::_jlong_arraycopy  = generate_conjoint_long_copy(false, "jlong_arraycopy");
3066     StubRoutines::_oop_arraycopy    = generate_conjoint_oop_copy(false, "oop_arraycopy");
3067     StubRoutines::_arrayof_jbyte_arraycopy    = generate_conjoint_byte_copy(true, "arrayof_jbyte_arraycopy");
3068     StubRoutines::_arrayof_jshort_arraycopy   = generate_conjoint_short_copy(true, "arrayof_jshort_arraycopy");
3069 #ifdef _LP64
3070     // since sizeof(jint) < sizeof(HeapWord), there's a different flavor:
3071     StubRoutines::_arrayof_jint_arraycopy     = generate_conjoint_int_copy(true, "arrayof_jint_arraycopy");
3072   #else
3073     StubRoutines::_arrayof_jint_arraycopy     = StubRoutines::_jint_arraycopy;
3074 #endif
3075     StubRoutines::_arrayof_jlong_arraycopy    = StubRoutines::_jlong_arraycopy;
3076     StubRoutines::_arrayof_oop_arraycopy      = StubRoutines::_oop_arraycopy;
3077 
3078     StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy");
3079     StubRoutines::_unsafe_arraycopy    = generate_unsafe_copy("unsafe_arraycopy");
3080     StubRoutines::_generic_arraycopy   = generate_generic_copy("generic_arraycopy");
3081 
3082     StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill");
3083     StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill");
3084     StubRoutines::_jint_fill = generate_fill(T_INT, false, "jint_fill");
3085     StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill");
3086     StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill");
3087     StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill");
3088   }
3089 
3090   void generate_initial() {
3091     // Generates all stubs and initializes the entry points
3092 
3093     //------------------------------------------------------------------------------------------------------------------------
3094     // entry points that exist in all platforms
3095     // Note: This is code that could be shared among different platforms - however the benefit seems to be smaller than
3096     //       the disadvantage of having a much more complicated generator structure. See also comment in stubRoutines.hpp.
3097     StubRoutines::_forward_exception_entry                 = generate_forward_exception();
3098 
3099     StubRoutines::_call_stub_entry                         = generate_call_stub(StubRoutines::_call_stub_return_address);
3100     StubRoutines::_catch_exception_entry                   = generate_catch_exception();
3101 
3102     //------------------------------------------------------------------------------------------------------------------------
3103     // entry points that are platform specific
3104     StubRoutines::Sparc::_test_stop_entry                  = generate_test_stop();
3105 
3106     StubRoutines::Sparc::_stop_subroutine_entry            = generate_stop_subroutine();
3107     StubRoutines::Sparc::_flush_callers_register_windows_entry = generate_flush_callers_register_windows();
3108 
3109 #if !defined(COMPILER2) && !defined(_LP64)
3110     StubRoutines::_atomic_xchg_entry         = generate_atomic_xchg();
3111     StubRoutines::_atomic_cmpxchg_entry      = generate_atomic_cmpxchg();
3112     StubRoutines::_atomic_add_entry          = generate_atomic_add();
3113     StubRoutines::_atomic_xchg_ptr_entry     = StubRoutines::_atomic_xchg_entry;
3114     StubRoutines::_atomic_cmpxchg_ptr_entry  = StubRoutines::_atomic_cmpxchg_entry;
3115     StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long();
3116     StubRoutines::_atomic_add_ptr_entry      = StubRoutines::_atomic_add_entry;
3117 #endif  // COMPILER2 !=> _LP64
3118   }
3119 
3120 
3121   void generate_all() {
3122     // Generates all stubs and initializes the entry points
3123 
3124     // Generate partial_subtype_check first here since its code depends on
3125     // UseZeroBaseCompressedOops which is defined after heap initialization.
3126     StubRoutines::Sparc::_partial_subtype_check                = generate_partial_subtype_check();
3127     // These entry points require SharedInfo::stack0 to be set up in non-core builds
3128     StubRoutines::_throw_AbstractMethodError_entry         = generate_throw_exception("AbstractMethodError throw_exception",          CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError),  false);
3129     StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError),  false);
3130     StubRoutines::_throw_ArithmeticException_entry         = generate_throw_exception("ArithmeticException throw_exception",          CAST_FROM_FN_PTR(address, SharedRuntime::throw_ArithmeticException),  true);
3131     StubRoutines::_throw_NullPointerException_entry        = generate_throw_exception("NullPointerException throw_exception",         CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException), true);
3132     StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call), false);
3133     StubRoutines::_throw_StackOverflowError_entry          = generate_throw_exception("StackOverflowError throw_exception",           CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError),   false);
3134 
3135     StubRoutines::_handler_for_unsafe_access_entry =
3136       generate_handler_for_unsafe_access();
3137 
3138     // support for verify_oop (must happen after universe_init)
3139     StubRoutines::_verify_oop_subroutine_entry     = generate_verify_oop_subroutine();
3140 
3141     // arraycopy stubs used by compilers
3142     generate_arraycopy_stubs();
3143 
3144     // Don't initialize the platform math functions since sparc
3145     // doesn't have intrinsics for these operations.
3146   }
3147 
3148 
3149  public:
3150   StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) {
3151     // replace the standard masm with a special one:
3152     _masm = new MacroAssembler(code);
3153 
3154     _stub_count = !all ? 0x100 : 0x200;
3155     if (all) {
3156       generate_all();
3157     } else {
3158       generate_initial();
3159     }
3160 
3161     // make sure this stub is available for all local calls
3162     if (_atomic_add_stub.is_unbound()) {
3163       // generate a second time, if necessary
3164       (void) generate_atomic_add();
3165     }
3166   }
3167 
3168 
3169  private:
3170   int _stub_count;
3171   void stub_prolog(StubCodeDesc* cdesc) {
3172     # ifdef ASSERT
3173       // put extra information in the stub code, to make it more readable
3174 #ifdef _LP64
3175 // Write the high part of the address
3176 // [RGV] Check if there is a dependency on the size of this prolog
3177       __ emit_data((intptr_t)cdesc >> 32,    relocInfo::none);
3178 #endif
3179       __ emit_data((intptr_t)cdesc,    relocInfo::none);
3180       __ emit_data(++_stub_count, relocInfo::none);
3181     # endif
3182     align(true);
3183   }
3184 
3185   void align(bool at_header = false) {
3186     // %%%%% move this constant somewhere else
3187     // UltraSPARC cache line size is 8 instructions:
3188     const unsigned int icache_line_size = 32;
3189     const unsigned int icache_half_line_size = 16;
3190 
3191     if (at_header) {
3192       while ((intptr_t)(__ pc()) % icache_line_size != 0) {
3193         __ emit_data(0, relocInfo::none);
3194       }
3195     } else {
3196       while ((intptr_t)(__ pc()) % icache_half_line_size != 0) {
3197         __ nop();
3198       }
3199     }
3200   }
3201 
3202 }; // end class declaration
3203 
3204 
3205 address StubGenerator::disjoint_byte_copy_entry  = NULL;
3206 address StubGenerator::disjoint_short_copy_entry = NULL;
3207 address StubGenerator::disjoint_int_copy_entry   = NULL;
3208 address StubGenerator::disjoint_long_copy_entry  = NULL;
3209 address StubGenerator::disjoint_oop_copy_entry   = NULL;
3210 
3211 address StubGenerator::byte_copy_entry  = NULL;
3212 address StubGenerator::short_copy_entry = NULL;
3213 address StubGenerator::int_copy_entry   = NULL;
3214 address StubGenerator::long_copy_entry  = NULL;
3215 address StubGenerator::oop_copy_entry   = NULL;
3216 
3217 address StubGenerator::checkcast_copy_entry = NULL;
3218 
3219 void StubGenerator_generate(CodeBuffer* code, bool all) {
3220   StubGenerator g(code, all);
3221 }