1 /*
   2  * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/assembler.hpp"
  27 #include "assembler_sparc.inline.hpp"
  28 #include "interpreter/interpreter.hpp"
  29 #include "nativeInst_sparc.hpp"
  30 #include "oops/instanceOop.hpp"
  31 #include "oops/methodOop.hpp"
  32 #include "oops/objArrayKlass.hpp"
  33 #include "oops/oop.inline.hpp"
  34 #include "prims/methodHandles.hpp"
  35 #include "runtime/frame.inline.hpp"
  36 #include "runtime/handles.inline.hpp"
  37 #include "runtime/sharedRuntime.hpp"
  38 #include "runtime/stubCodeGenerator.hpp"
  39 #include "runtime/stubRoutines.hpp"
  40 #include "utilities/top.hpp"
  41 #ifdef TARGET_OS_FAMILY_linux
  42 # include "thread_linux.inline.hpp"
  43 #endif
  44 #ifdef TARGET_OS_FAMILY_solaris
  45 # include "thread_solaris.inline.hpp"
  46 #endif
  47 #ifdef COMPILER2
  48 #include "opto/runtime.hpp"
  49 #endif
  50 
  51 // Declaration and definition of StubGenerator (no .hpp file).
  52 // For a more detailed description of the stub routine structure
  53 // see the comment in stubRoutines.hpp.
  54 
  55 #define __ _masm->
  56 
  57 #ifdef PRODUCT
  58 #define BLOCK_COMMENT(str) /* nothing */
  59 #else
  60 #define BLOCK_COMMENT(str) __ block_comment(str)
  61 #endif
  62 
  63 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
  64 
  65 // Note:  The register L7 is used as L7_thread_cache, and may not be used
  66 //        any other way within this module.
  67 
  68 
  69 static const Register& Lstub_temp = L2;
  70 
  71 // -------------------------------------------------------------------------------------------------------------------------
  72 // Stub Code definitions
  73 
  74 static address handle_unsafe_access() {
  75   JavaThread* thread = JavaThread::current();
  76   address pc  = thread->saved_exception_pc();
  77   address npc = thread->saved_exception_npc();
  78   // pc is the instruction which we must emulate
  79   // doing a no-op is fine:  return garbage from the load
  80 
  81   // request an async exception
  82   thread->set_pending_unsafe_access_error();
  83 
  84   // return address of next instruction to execute
  85   return npc;
  86 }
  87 
  88 class StubGenerator: public StubCodeGenerator {
  89  private:
  90 
  91 #ifdef PRODUCT
  92 #define inc_counter_np(a,b,c) (0)
  93 #else
  94 #define inc_counter_np(counter, t1, t2) \
  95   BLOCK_COMMENT("inc_counter " #counter); \
  96   __ inc_counter(&counter, t1, t2);
  97 #endif
  98 
  99   //----------------------------------------------------------------------------------------------------
 100   // Call stubs are used to call Java from C
 101 
 102   address generate_call_stub(address& return_pc) {
 103     StubCodeMark mark(this, "StubRoutines", "call_stub");
 104     address start = __ pc();
 105 
 106     // Incoming arguments:
 107     //
 108     // o0         : call wrapper address
 109     // o1         : result (address)
 110     // o2         : result type
 111     // o3         : method
 112     // o4         : (interpreter) entry point
 113     // o5         : parameters (address)
 114     // [sp + 0x5c]: parameter size (in words)
 115     // [sp + 0x60]: thread
 116     //
 117     // +---------------+ <--- sp + 0
 118     // |               |
 119     // . reg save area .
 120     // |               |
 121     // +---------------+ <--- sp + 0x40
 122     // |               |
 123     // . extra 7 slots .
 124     // |               |
 125     // +---------------+ <--- sp + 0x5c
 126     // |  param. size  |
 127     // +---------------+ <--- sp + 0x60
 128     // |    thread     |
 129     // +---------------+
 130     // |               |
 131 
 132     // note: if the link argument position changes, adjust
 133     //       the code in frame::entry_frame_call_wrapper()
 134 
 135     const Argument link           = Argument(0, false); // used only for GC
 136     const Argument result         = Argument(1, false);
 137     const Argument result_type    = Argument(2, false);
 138     const Argument method         = Argument(3, false);
 139     const Argument entry_point    = Argument(4, false);
 140     const Argument parameters     = Argument(5, false);
 141     const Argument parameter_size = Argument(6, false);
 142     const Argument thread         = Argument(7, false);
 143 
 144     // setup thread register
 145     __ ld_ptr(thread.as_address(), G2_thread);
 146     __ reinit_heapbase();
 147 
 148 #ifdef ASSERT
 149     // make sure we have no pending exceptions
 150     { const Register t = G3_scratch;
 151       Label L;
 152       __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), t);
 153       __ br_null_short(t, Assembler::pt, L);
 154       __ stop("StubRoutines::call_stub: entered with pending exception");
 155       __ bind(L);
 156     }
 157 #endif
 158 
 159     // create activation frame & allocate space for parameters
 160     { const Register t = G3_scratch;
 161       __ ld_ptr(parameter_size.as_address(), t);                // get parameter size (in words)
 162       __ add(t, frame::memory_parameter_word_sp_offset, t);     // add space for save area (in words)
 163       __ round_to(t, WordsPerLong);                             // make sure it is multiple of 2 (in words)
 164       __ sll(t, Interpreter::logStackElementSize, t);           // compute number of bytes
 165       __ neg(t);                                                // negate so it can be used with save
 166       __ save(SP, t, SP);                                       // setup new frame
 167     }
 168 
 169     // +---------------+ <--- sp + 0
 170     // |               |
 171     // . reg save area .
 172     // |               |
 173     // +---------------+ <--- sp + 0x40
 174     // |               |
 175     // . extra 7 slots .
 176     // |               |
 177     // +---------------+ <--- sp + 0x5c
 178     // |  empty slot   |      (only if parameter size is even)
 179     // +---------------+
 180     // |               |
 181     // .  parameters   .
 182     // |               |
 183     // +---------------+ <--- fp + 0
 184     // |               |
 185     // . reg save area .
 186     // |               |
 187     // +---------------+ <--- fp + 0x40
 188     // |               |
 189     // . extra 7 slots .
 190     // |               |
 191     // +---------------+ <--- fp + 0x5c
 192     // |  param. size  |
 193     // +---------------+ <--- fp + 0x60
 194     // |    thread     |
 195     // +---------------+
 196     // |               |
 197 
 198     // pass parameters if any
 199     BLOCK_COMMENT("pass parameters if any");
 200     { const Register src = parameters.as_in().as_register();
 201       const Register dst = Lentry_args;
 202       const Register tmp = G3_scratch;
 203       const Register cnt = G4_scratch;
 204 
 205       // test if any parameters & setup of Lentry_args
 206       Label exit;
 207       __ ld_ptr(parameter_size.as_in().as_address(), cnt);      // parameter counter
 208       __ add( FP, STACK_BIAS, dst );
 209       __ cmp_zero_and_br(Assembler::zero, cnt, exit);
 210       __ delayed()->sub(dst, BytesPerWord, dst);                 // setup Lentry_args
 211 
 212       // copy parameters if any
 213       Label loop;
 214       __ BIND(loop);
 215       // Store parameter value
 216       __ ld_ptr(src, 0, tmp);
 217       __ add(src, BytesPerWord, src);
 218       __ st_ptr(tmp, dst, 0);
 219       __ deccc(cnt);
 220       __ br(Assembler::greater, false, Assembler::pt, loop);
 221       __ delayed()->sub(dst, Interpreter::stackElementSize, dst);
 222 
 223       // done
 224       __ BIND(exit);
 225     }
 226 
 227     // setup parameters, method & call Java function
 228 #ifdef ASSERT
 229     // layout_activation_impl checks it's notion of saved SP against
 230     // this register, so if this changes update it as well.
 231     const Register saved_SP = Lscratch;
 232     __ mov(SP, saved_SP);                               // keep track of SP before call
 233 #endif
 234 
 235     // setup parameters
 236     const Register t = G3_scratch;
 237     __ ld_ptr(parameter_size.as_in().as_address(), t); // get parameter size (in words)
 238     __ sll(t, Interpreter::logStackElementSize, t);    // compute number of bytes
 239     __ sub(FP, t, Gargs);                              // setup parameter pointer
 240 #ifdef _LP64
 241     __ add( Gargs, STACK_BIAS, Gargs );                // Account for LP64 stack bias
 242 #endif
 243     __ mov(SP, O5_savedSP);
 244 
 245 
 246     // do the call
 247     //
 248     // the following register must be setup:
 249     //
 250     // G2_thread
 251     // G5_method
 252     // Gargs
 253     BLOCK_COMMENT("call Java function");
 254     __ jmpl(entry_point.as_in().as_register(), G0, O7);
 255     __ delayed()->mov(method.as_in().as_register(), G5_method);   // setup method
 256 
 257     BLOCK_COMMENT("call_stub_return_address:");
 258     return_pc = __ pc();
 259 
 260     // The callee, if it wasn't interpreted, can return with SP changed so
 261     // we can no longer assert of change of SP.
 262 
 263     // store result depending on type
 264     // (everything that is not T_OBJECT, T_LONG, T_FLOAT, or T_DOUBLE
 265     //  is treated as T_INT)
 266     { const Register addr = result     .as_in().as_register();
 267       const Register type = result_type.as_in().as_register();
 268       Label is_long, is_float, is_double, is_object, exit;
 269       __            cmp(type, T_OBJECT);  __ br(Assembler::equal, false, Assembler::pn, is_object);
 270       __ delayed()->cmp(type, T_FLOAT);   __ br(Assembler::equal, false, Assembler::pn, is_float);
 271       __ delayed()->cmp(type, T_DOUBLE);  __ br(Assembler::equal, false, Assembler::pn, is_double);
 272       __ delayed()->cmp(type, T_LONG);    __ br(Assembler::equal, false, Assembler::pn, is_long);
 273       __ delayed()->nop();
 274 
 275       // store int result
 276       __ st(O0, addr, G0);
 277 
 278       __ BIND(exit);
 279       __ ret();
 280       __ delayed()->restore();
 281 
 282       __ BIND(is_object);
 283       __ ba(exit);
 284       __ delayed()->st_ptr(O0, addr, G0);
 285 
 286       __ BIND(is_float);
 287       __ ba(exit);
 288       __ delayed()->stf(FloatRegisterImpl::S, F0, addr, G0);
 289 
 290       __ BIND(is_double);
 291       __ ba(exit);
 292       __ delayed()->stf(FloatRegisterImpl::D, F0, addr, G0);
 293 
 294       __ BIND(is_long);
 295 #ifdef _LP64
 296       __ ba(exit);
 297       __ delayed()->st_long(O0, addr, G0);      // store entire long
 298 #else
 299 #if defined(COMPILER2)
 300   // All return values are where we want them, except for Longs.  C2 returns
 301   // longs in G1 in the 32-bit build whereas the interpreter wants them in O0/O1.
 302   // Since the interpreter will return longs in G1 and O0/O1 in the 32bit
 303   // build we simply always use G1.
 304   // Note: I tried to make c2 return longs in O0/O1 and G1 so we wouldn't have to
 305   // do this here. Unfortunately if we did a rethrow we'd see an machepilog node
 306   // first which would move g1 -> O0/O1 and destroy the exception we were throwing.
 307 
 308       __ ba(exit);
 309       __ delayed()->stx(G1, addr, G0);  // store entire long
 310 #else
 311       __ st(O1, addr, BytesPerInt);
 312       __ ba(exit);
 313       __ delayed()->st(O0, addr, G0);
 314 #endif /* COMPILER2 */
 315 #endif /* _LP64 */
 316      }
 317      return start;
 318   }
 319 
 320 
 321   //----------------------------------------------------------------------------------------------------
 322   // Return point for a Java call if there's an exception thrown in Java code.
 323   // The exception is caught and transformed into a pending exception stored in
 324   // JavaThread that can be tested from within the VM.
 325   //
 326   // Oexception: exception oop
 327 
 328   address generate_catch_exception() {
 329     StubCodeMark mark(this, "StubRoutines", "catch_exception");
 330 
 331     address start = __ pc();
 332     // verify that thread corresponds
 333     __ verify_thread();
 334 
 335     const Register& temp_reg = Gtemp;
 336     Address pending_exception_addr    (G2_thread, Thread::pending_exception_offset());
 337     Address exception_file_offset_addr(G2_thread, Thread::exception_file_offset   ());
 338     Address exception_line_offset_addr(G2_thread, Thread::exception_line_offset   ());
 339 
 340     // set pending exception
 341     __ verify_oop(Oexception);
 342     __ st_ptr(Oexception, pending_exception_addr);
 343     __ set((intptr_t)__FILE__, temp_reg);
 344     __ st_ptr(temp_reg, exception_file_offset_addr);
 345     __ set((intptr_t)__LINE__, temp_reg);
 346     __ st(temp_reg, exception_line_offset_addr);
 347 
 348     // complete return to VM
 349     assert(StubRoutines::_call_stub_return_address != NULL, "must have been generated before");
 350 
 351     AddressLiteral stub_ret(StubRoutines::_call_stub_return_address);
 352     __ jump_to(stub_ret, temp_reg);
 353     __ delayed()->nop();
 354 
 355     return start;
 356   }
 357 
 358 
 359   //----------------------------------------------------------------------------------------------------
 360   // Continuation point for runtime calls returning with a pending exception
 361   // The pending exception check happened in the runtime or native call stub
 362   // The pending exception in Thread is converted into a Java-level exception
 363   //
 364   // Contract with Java-level exception handler: O0 = exception
 365   //                                             O1 = throwing pc
 366 
 367   address generate_forward_exception() {
 368     StubCodeMark mark(this, "StubRoutines", "forward_exception");
 369     address start = __ pc();
 370 
 371     // Upon entry, O7 has the return address returning into Java
 372     // (interpreted or compiled) code; i.e. the return address
 373     // becomes the throwing pc.
 374 
 375     const Register& handler_reg = Gtemp;
 376 
 377     Address exception_addr(G2_thread, Thread::pending_exception_offset());
 378 
 379 #ifdef ASSERT
 380     // make sure that this code is only executed if there is a pending exception
 381     { Label L;
 382       __ ld_ptr(exception_addr, Gtemp);
 383       __ br_notnull_short(Gtemp, Assembler::pt, L);
 384       __ stop("StubRoutines::forward exception: no pending exception (1)");
 385       __ bind(L);
 386     }
 387 #endif
 388 
 389     // compute exception handler into handler_reg
 390     __ get_thread();
 391     __ ld_ptr(exception_addr, Oexception);
 392     __ verify_oop(Oexception);
 393     __ save_frame(0);             // compensates for compiler weakness
 394     __ add(O7->after_save(), frame::pc_return_offset, Lscratch); // save the issuing PC
 395     BLOCK_COMMENT("call exception_handler_for_return_address");
 396     __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), G2_thread, Lscratch);
 397     __ mov(O0, handler_reg);
 398     __ restore();                 // compensates for compiler weakness
 399 
 400     __ ld_ptr(exception_addr, Oexception);
 401     __ add(O7, frame::pc_return_offset, Oissuing_pc); // save the issuing PC
 402 
 403 #ifdef ASSERT
 404     // make sure exception is set
 405     { Label L;
 406       __ br_notnull_short(Oexception, Assembler::pt, L);
 407       __ stop("StubRoutines::forward exception: no pending exception (2)");
 408       __ bind(L);
 409     }
 410 #endif
 411     // jump to exception handler
 412     __ jmp(handler_reg, 0);
 413     // clear pending exception
 414     __ delayed()->st_ptr(G0, exception_addr);
 415 
 416     return start;
 417   }
 418 
 419 
 420   //------------------------------------------------------------------------------------------------------------------------
 421   // Continuation point for throwing of implicit exceptions that are not handled in
 422   // the current activation. Fabricates an exception oop and initiates normal
 423   // exception dispatching in this frame. Only callee-saved registers are preserved
 424   // (through the normal register window / RegisterMap handling).
 425   // If the compiler needs all registers to be preserved between the fault
 426   // point and the exception handler then it must assume responsibility for that in
 427   // AbstractCompiler::continuation_for_implicit_null_exception or
 428   // continuation_for_implicit_division_by_zero_exception. All other implicit
 429   // exceptions (e.g., NullPointerException or AbstractMethodError on entry) are
 430   // either at call sites or otherwise assume that stack unwinding will be initiated,
 431   // so caller saved registers were assumed volatile in the compiler.
 432 
 433   // Note that we generate only this stub into a RuntimeStub, because it needs to be
 434   // properly traversed and ignored during GC, so we change the meaning of the "__"
 435   // macro within this method.
 436 #undef __
 437 #define __ masm->
 438 
 439   address generate_throw_exception(const char* name, address runtime_entry, bool restore_saved_exception_pc,
 440                                    Register arg1 = noreg, Register arg2 = noreg) {
 441 #ifdef ASSERT
 442     int insts_size = VerifyThread ? 1 * K : 600;
 443 #else
 444     int insts_size = VerifyThread ? 1 * K : 256;
 445 #endif /* ASSERT */
 446     int locs_size  = 32;
 447 
 448     CodeBuffer      code(name, insts_size, locs_size);
 449     MacroAssembler* masm = new MacroAssembler(&code);
 450 
 451     __ verify_thread();
 452 
 453     // This is an inlined and slightly modified version of call_VM
 454     // which has the ability to fetch the return PC out of thread-local storage
 455     __ assert_not_delayed();
 456 
 457     // Note that we always push a frame because on the SPARC
 458     // architecture, for all of our implicit exception kinds at call
 459     // sites, the implicit exception is taken before the callee frame
 460     // is pushed.
 461     __ save_frame(0);
 462 
 463     int frame_complete = __ offset();
 464 
 465     if (restore_saved_exception_pc) {
 466       __ ld_ptr(G2_thread, JavaThread::saved_exception_pc_offset(), I7);
 467       __ sub(I7, frame::pc_return_offset, I7);
 468     }
 469 
 470     // Note that we always have a runtime stub frame on the top of stack by this point
 471     Register last_java_sp = SP;
 472     // 64-bit last_java_sp is biased!
 473     __ set_last_Java_frame(last_java_sp, G0);
 474     if (VerifyThread)  __ mov(G2_thread, O0); // about to be smashed; pass early
 475     __ save_thread(noreg);
 476     if (arg1 != noreg) {
 477       assert(arg2 != O1, "clobbered");
 478       __ mov(arg1, O1);
 479     }
 480     if (arg2 != noreg) {
 481       __ mov(arg2, O2);
 482     }
 483     // do the call
 484     BLOCK_COMMENT("call runtime_entry");
 485     __ call(runtime_entry, relocInfo::runtime_call_type);
 486     if (!VerifyThread)
 487       __ delayed()->mov(G2_thread, O0);  // pass thread as first argument
 488     else
 489       __ delayed()->nop();             // (thread already passed)
 490     __ restore_thread(noreg);
 491     __ reset_last_Java_frame();
 492 
 493     // check for pending exceptions. use Gtemp as scratch register.
 494 #ifdef ASSERT
 495     Label L;
 496 
 497     Address exception_addr(G2_thread, Thread::pending_exception_offset());
 498     Register scratch_reg = Gtemp;
 499     __ ld_ptr(exception_addr, scratch_reg);
 500     __ br_notnull_short(scratch_reg, Assembler::pt, L);
 501     __ should_not_reach_here();
 502     __ bind(L);
 503 #endif // ASSERT
 504     BLOCK_COMMENT("call forward_exception_entry");
 505     __ call(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type);
 506     // we use O7 linkage so that forward_exception_entry has the issuing PC
 507     __ delayed()->restore();
 508 
 509     RuntimeStub* stub = RuntimeStub::new_runtime_stub(name, &code, frame_complete, masm->total_frame_size_in_bytes(0), NULL, false);
 510     return stub->entry_point();
 511   }
 512 
 513 #undef __
 514 #define __ _masm->
 515 
 516 
 517   // Generate a routine that sets all the registers so we
 518   // can tell if the stop routine prints them correctly.
 519   address generate_test_stop() {
 520     StubCodeMark mark(this, "StubRoutines", "test_stop");
 521     address start = __ pc();
 522 
 523     int i;
 524 
 525     __ save_frame(0);
 526 
 527     static jfloat zero = 0.0, one = 1.0;
 528 
 529     // put addr in L0, then load through L0 to F0
 530     __ set((intptr_t)&zero, L0);  __ ldf( FloatRegisterImpl::S, L0, 0, F0);
 531     __ set((intptr_t)&one,  L0);  __ ldf( FloatRegisterImpl::S, L0, 0, F1); // 1.0 to F1
 532 
 533     // use add to put 2..18 in F2..F18
 534     for ( i = 2;  i <= 18;  ++i ) {
 535       __ fadd( FloatRegisterImpl::S, F1, as_FloatRegister(i-1),  as_FloatRegister(i));
 536     }
 537 
 538     // Now put double 2 in F16, double 18 in F18
 539     __ ftof( FloatRegisterImpl::S, FloatRegisterImpl::D, F2, F16 );
 540     __ ftof( FloatRegisterImpl::S, FloatRegisterImpl::D, F18, F18 );
 541 
 542     // use add to put 20..32 in F20..F32
 543     for (i = 20; i < 32; i += 2) {
 544       __ fadd( FloatRegisterImpl::D, F16, as_FloatRegister(i-2),  as_FloatRegister(i));
 545     }
 546 
 547     // put 0..7 in i's, 8..15 in l's, 16..23 in o's, 24..31 in g's
 548     for ( i = 0; i < 8; ++i ) {
 549       if (i < 6) {
 550         __ set(     i, as_iRegister(i));
 551         __ set(16 + i, as_oRegister(i));
 552         __ set(24 + i, as_gRegister(i));
 553       }
 554       __ set( 8 + i, as_lRegister(i));
 555     }
 556 
 557     __ stop("testing stop");
 558 
 559 
 560     __ ret();
 561     __ delayed()->restore();
 562 
 563     return start;
 564   }
 565 
 566 
 567   address generate_stop_subroutine() {
 568     StubCodeMark mark(this, "StubRoutines", "stop_subroutine");
 569     address start = __ pc();
 570 
 571     __ stop_subroutine();
 572 
 573     return start;
 574   }
 575 
 576   address generate_flush_callers_register_windows() {
 577     StubCodeMark mark(this, "StubRoutines", "flush_callers_register_windows");
 578     address start = __ pc();
 579 
 580     __ flush_windows();
 581     __ retl(false);
 582     __ delayed()->add( FP, STACK_BIAS, O0 );
 583     // The returned value must be a stack pointer whose register save area
 584     // is flushed, and will stay flushed while the caller executes.
 585 
 586     return start;
 587   }
 588 
 589   // Helper functions for v8 atomic operations.
 590   //
 591   void get_v8_oop_lock_ptr(Register lock_ptr_reg, Register mark_oop_reg, Register scratch_reg) {
 592     if (mark_oop_reg == noreg) {
 593       address lock_ptr = (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr();
 594       __ set((intptr_t)lock_ptr, lock_ptr_reg);
 595     } else {
 596       assert(scratch_reg != noreg, "just checking");
 597       address lock_ptr = (address)StubRoutines::Sparc::_v8_oop_lock_cache;
 598       __ set((intptr_t)lock_ptr, lock_ptr_reg);
 599       __ and3(mark_oop_reg, StubRoutines::Sparc::v8_oop_lock_mask_in_place, scratch_reg);
 600       __ add(lock_ptr_reg, scratch_reg, lock_ptr_reg);
 601     }
 602   }
 603 
 604   void generate_v8_lock_prologue(Register lock_reg, Register lock_ptr_reg, Register yield_reg, Label& retry, Label& dontyield, Register mark_oop_reg = noreg, Register scratch_reg = noreg) {
 605 
 606     get_v8_oop_lock_ptr(lock_ptr_reg, mark_oop_reg, scratch_reg);
 607     __ set(StubRoutines::Sparc::locked, lock_reg);
 608     // Initialize yield counter
 609     __ mov(G0,yield_reg);
 610 
 611     __ BIND(retry);
 612     __ cmp_and_br_short(yield_reg, V8AtomicOperationUnderLockSpinCount, Assembler::less, Assembler::pt, dontyield);
 613 
 614     // This code can only be called from inside the VM, this
 615     // stub is only invoked from Atomic::add().  We do not
 616     // want to use call_VM, because _last_java_sp and such
 617     // must already be set.
 618     //
 619     // Save the regs and make space for a C call
 620     __ save(SP, -96, SP);
 621     __ save_all_globals_into_locals();
 622     BLOCK_COMMENT("call os::naked_sleep");
 623     __ call(CAST_FROM_FN_PTR(address, os::naked_sleep));
 624     __ delayed()->nop();
 625     __ restore_globals_from_locals();
 626     __ restore();
 627     // reset the counter
 628     __ mov(G0,yield_reg);
 629 
 630     __ BIND(dontyield);
 631 
 632     // try to get lock
 633     __ swap(lock_ptr_reg, 0, lock_reg);
 634 
 635     // did we get the lock?
 636     __ cmp(lock_reg, StubRoutines::Sparc::unlocked);
 637     __ br(Assembler::notEqual, true, Assembler::pn, retry);
 638     __ delayed()->add(yield_reg,1,yield_reg);
 639 
 640     // yes, got lock. do the operation here.
 641   }
 642 
 643   void generate_v8_lock_epilogue(Register lock_reg, Register lock_ptr_reg, Register yield_reg, Label& retry, Label& dontyield, Register mark_oop_reg = noreg, Register scratch_reg = noreg) {
 644     __ st(lock_reg, lock_ptr_reg, 0); // unlock
 645   }
 646 
 647   // Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest).
 648   //
 649   // Arguments :
 650   //
 651   //      exchange_value: O0
 652   //      dest:           O1
 653   //
 654   // Results:
 655   //
 656   //     O0: the value previously stored in dest
 657   //
 658   address generate_atomic_xchg() {
 659     StubCodeMark mark(this, "StubRoutines", "atomic_xchg");
 660     address start = __ pc();
 661 
 662     if (UseCASForSwap) {
 663       // Use CAS instead of swap, just in case the MP hardware
 664       // prefers to work with just one kind of synch. instruction.
 665       Label retry;
 666       __ BIND(retry);
 667       __ mov(O0, O3);       // scratch copy of exchange value
 668       __ ld(O1, 0, O2);     // observe the previous value
 669       // try to replace O2 with O3
 670       __ cas_under_lock(O1, O2, O3,
 671       (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr(),false);
 672       __ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pn, retry);
 673 
 674       __ retl(false);
 675       __ delayed()->mov(O2, O0);  // report previous value to caller
 676 
 677     } else {
 678       if (VM_Version::v9_instructions_work()) {
 679         __ retl(false);
 680         __ delayed()->swap(O1, 0, O0);
 681       } else {
 682         const Register& lock_reg = O2;
 683         const Register& lock_ptr_reg = O3;
 684         const Register& yield_reg = O4;
 685 
 686         Label retry;
 687         Label dontyield;
 688 
 689         generate_v8_lock_prologue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield);
 690         // got the lock, do the swap
 691         __ swap(O1, 0, O0);
 692 
 693         generate_v8_lock_epilogue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield);
 694         __ retl(false);
 695         __ delayed()->nop();
 696       }
 697     }
 698 
 699     return start;
 700   }
 701 
 702 
 703   // Support for jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value)
 704   //
 705   // Arguments :
 706   //
 707   //      exchange_value: O0
 708   //      dest:           O1
 709   //      compare_value:  O2
 710   //
 711   // Results:
 712   //
 713   //     O0: the value previously stored in dest
 714   //
 715   // Overwrites (v8): O3,O4,O5
 716   //
 717   address generate_atomic_cmpxchg() {
 718     StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg");
 719     address start = __ pc();
 720 
 721     // cmpxchg(dest, compare_value, exchange_value)
 722     __ cas_under_lock(O1, O2, O0,
 723       (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr(),false);
 724     __ retl(false);
 725     __ delayed()->nop();
 726 
 727     return start;
 728   }
 729 
 730   // Support for jlong Atomic::cmpxchg(jlong exchange_value, volatile jlong *dest, jlong compare_value)
 731   //
 732   // Arguments :
 733   //
 734   //      exchange_value: O1:O0
 735   //      dest:           O2
 736   //      compare_value:  O4:O3
 737   //
 738   // Results:
 739   //
 740   //     O1:O0: the value previously stored in dest
 741   //
 742   // This only works on V9, on V8 we don't generate any
 743   // code and just return NULL.
 744   //
 745   // Overwrites: G1,G2,G3
 746   //
 747   address generate_atomic_cmpxchg_long() {
 748     StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long");
 749     address start = __ pc();
 750 
 751     if (!VM_Version::supports_cx8())
 752         return NULL;;
 753     __ sllx(O0, 32, O0);
 754     __ srl(O1, 0, O1);
 755     __ or3(O0,O1,O0);      // O0 holds 64-bit value from compare_value
 756     __ sllx(O3, 32, O3);
 757     __ srl(O4, 0, O4);
 758     __ or3(O3,O4,O3);     // O3 holds 64-bit value from exchange_value
 759     __ casx(O2, O3, O0);
 760     __ srl(O0, 0, O1);    // unpacked return value in O1:O0
 761     __ retl(false);
 762     __ delayed()->srlx(O0, 32, O0);
 763 
 764     return start;
 765   }
 766 
 767 
 768   // Support for jint Atomic::add(jint add_value, volatile jint* dest).
 769   //
 770   // Arguments :
 771   //
 772   //      add_value: O0   (e.g., +1 or -1)
 773   //      dest:      O1
 774   //
 775   // Results:
 776   //
 777   //     O0: the new value stored in dest
 778   //
 779   // Overwrites (v9): O3
 780   // Overwrites (v8): O3,O4,O5
 781   //
 782   address generate_atomic_add() {
 783     StubCodeMark mark(this, "StubRoutines", "atomic_add");
 784     address start = __ pc();
 785     __ BIND(_atomic_add_stub);
 786 
 787     if (VM_Version::v9_instructions_work()) {
 788       Label(retry);
 789       __ BIND(retry);
 790 
 791       __ lduw(O1, 0, O2);
 792       __ add(O0, O2, O3);
 793       __ cas(O1, O2, O3);
 794       __ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pn, retry);
 795       __ retl(false);
 796       __ delayed()->add(O0, O2, O0); // note that cas made O2==O3
 797     } else {
 798       const Register& lock_reg = O2;
 799       const Register& lock_ptr_reg = O3;
 800       const Register& value_reg = O4;
 801       const Register& yield_reg = O5;
 802 
 803       Label(retry);
 804       Label(dontyield);
 805 
 806       generate_v8_lock_prologue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield);
 807       // got lock, do the increment
 808       __ ld(O1, 0, value_reg);
 809       __ add(O0, value_reg, value_reg);
 810       __ st(value_reg, O1, 0);
 811 
 812       // %%% only for RMO and PSO
 813       __ membar(Assembler::StoreStore);
 814 
 815       generate_v8_lock_epilogue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield);
 816 
 817       __ retl(false);
 818       __ delayed()->mov(value_reg, O0);
 819     }
 820 
 821     return start;
 822   }
 823   Label _atomic_add_stub;  // called from other stubs
 824 
 825 
 826   //------------------------------------------------------------------------------------------------------------------------
 827   // The following routine generates a subroutine to throw an asynchronous
 828   // UnknownError when an unsafe access gets a fault that could not be
 829   // reasonably prevented by the programmer.  (Example: SIGBUS/OBJERR.)
 830   //
 831   // Arguments :
 832   //
 833   //      trapping PC:    O7
 834   //
 835   // Results:
 836   //     posts an asynchronous exception, skips the trapping instruction
 837   //
 838 
 839   address generate_handler_for_unsafe_access() {
 840     StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access");
 841     address start = __ pc();
 842 
 843     const int preserve_register_words = (64 * 2);
 844     Address preserve_addr(FP, (-preserve_register_words * wordSize) + STACK_BIAS);
 845 
 846     Register Lthread = L7_thread_cache;
 847     int i;
 848 
 849     __ save_frame(0);
 850     __ mov(G1, L1);
 851     __ mov(G2, L2);
 852     __ mov(G3, L3);
 853     __ mov(G4, L4);
 854     __ mov(G5, L5);
 855     for (i = 0; i < (VM_Version::v9_instructions_work() ? 64 : 32); i += 2) {
 856       __ stf(FloatRegisterImpl::D, as_FloatRegister(i), preserve_addr, i * wordSize);
 857     }
 858 
 859     address entry_point = CAST_FROM_FN_PTR(address, handle_unsafe_access);
 860     BLOCK_COMMENT("call handle_unsafe_access");
 861     __ call(entry_point, relocInfo::runtime_call_type);
 862     __ delayed()->nop();
 863 
 864     __ mov(L1, G1);
 865     __ mov(L2, G2);
 866     __ mov(L3, G3);
 867     __ mov(L4, G4);
 868     __ mov(L5, G5);
 869     for (i = 0; i < (VM_Version::v9_instructions_work() ? 64 : 32); i += 2) {
 870       __ ldf(FloatRegisterImpl::D, preserve_addr, as_FloatRegister(i), i * wordSize);
 871     }
 872 
 873     __ verify_thread();
 874 
 875     __ jmp(O0, 0);
 876     __ delayed()->restore();
 877 
 878     return start;
 879   }
 880 
 881 
 882   // Support for uint StubRoutine::Sparc::partial_subtype_check( Klass sub, Klass super );
 883   // Arguments :
 884   //
 885   //      ret  : O0, returned
 886   //      icc/xcc: set as O0 (depending on wordSize)
 887   //      sub  : O1, argument, not changed
 888   //      super: O2, argument, not changed
 889   //      raddr: O7, blown by call
 890   address generate_partial_subtype_check() {
 891     __ align(CodeEntryAlignment);
 892     StubCodeMark mark(this, "StubRoutines", "partial_subtype_check");
 893     address start = __ pc();
 894     Label miss;
 895 
 896 #if defined(COMPILER2) && !defined(_LP64)
 897     // Do not use a 'save' because it blows the 64-bit O registers.
 898     __ add(SP,-4*wordSize,SP);  // Make space for 4 temps (stack must be 2 words aligned)
 899     __ st_ptr(L0,SP,(frame::register_save_words+0)*wordSize);
 900     __ st_ptr(L1,SP,(frame::register_save_words+1)*wordSize);
 901     __ st_ptr(L2,SP,(frame::register_save_words+2)*wordSize);
 902     __ st_ptr(L3,SP,(frame::register_save_words+3)*wordSize);
 903     Register Rret   = O0;
 904     Register Rsub   = O1;
 905     Register Rsuper = O2;
 906 #else
 907     __ save_frame(0);
 908     Register Rret   = I0;
 909     Register Rsub   = I1;
 910     Register Rsuper = I2;
 911 #endif
 912 
 913     Register L0_ary_len = L0;
 914     Register L1_ary_ptr = L1;
 915     Register L2_super   = L2;
 916     Register L3_index   = L3;
 917 
 918     __ check_klass_subtype_slow_path(Rsub, Rsuper,
 919                                      L0, L1, L2, L3,
 920                                      NULL, &miss);
 921 
 922     // Match falls through here.
 923     __ addcc(G0,0,Rret);        // set Z flags, Z result
 924 
 925 #if defined(COMPILER2) && !defined(_LP64)
 926     __ ld_ptr(SP,(frame::register_save_words+0)*wordSize,L0);
 927     __ ld_ptr(SP,(frame::register_save_words+1)*wordSize,L1);
 928     __ ld_ptr(SP,(frame::register_save_words+2)*wordSize,L2);
 929     __ ld_ptr(SP,(frame::register_save_words+3)*wordSize,L3);
 930     __ retl();                  // Result in Rret is zero; flags set to Z
 931     __ delayed()->add(SP,4*wordSize,SP);
 932 #else
 933     __ ret();                   // Result in Rret is zero; flags set to Z
 934     __ delayed()->restore();
 935 #endif
 936 
 937     __ BIND(miss);
 938     __ addcc(G0,1,Rret);        // set NZ flags, NZ result
 939 
 940 #if defined(COMPILER2) && !defined(_LP64)
 941     __ ld_ptr(SP,(frame::register_save_words+0)*wordSize,L0);
 942     __ ld_ptr(SP,(frame::register_save_words+1)*wordSize,L1);
 943     __ ld_ptr(SP,(frame::register_save_words+2)*wordSize,L2);
 944     __ ld_ptr(SP,(frame::register_save_words+3)*wordSize,L3);
 945     __ retl();                  // Result in Rret is != 0; flags set to NZ
 946     __ delayed()->add(SP,4*wordSize,SP);
 947 #else
 948     __ ret();                   // Result in Rret is != 0; flags set to NZ
 949     __ delayed()->restore();
 950 #endif
 951 
 952     return start;
 953   }
 954 
 955 
 956   // Called from MacroAssembler::verify_oop
 957   //
 958   address generate_verify_oop_subroutine() {
 959     StubCodeMark mark(this, "StubRoutines", "verify_oop_stub");
 960 
 961     address start = __ pc();
 962 
 963     __ verify_oop_subroutine();
 964 
 965     return start;
 966   }
 967 
 968 
 969   //
 970   // Verify that a register contains clean 32-bits positive value
 971   // (high 32-bits are 0) so it could be used in 64-bits shifts (sllx, srax).
 972   //
 973   //  Input:
 974   //    Rint  -  32-bits value
 975   //    Rtmp  -  scratch
 976   //
 977   void assert_clean_int(Register Rint, Register Rtmp) {
 978 #if defined(ASSERT) && defined(_LP64)
 979     __ signx(Rint, Rtmp);
 980     __ cmp(Rint, Rtmp);
 981     __ breakpoint_trap(Assembler::notEqual, Assembler::xcc);
 982 #endif
 983   }
 984 
 985   //
 986   //  Generate overlap test for array copy stubs
 987   //
 988   //  Input:
 989   //    O0    -  array1
 990   //    O1    -  array2
 991   //    O2    -  element count
 992   //
 993   //  Kills temps:  O3, O4
 994   //
 995   void array_overlap_test(address no_overlap_target, int log2_elem_size) {
 996     assert(no_overlap_target != NULL, "must be generated");
 997     array_overlap_test(no_overlap_target, NULL, log2_elem_size);
 998   }
 999   void array_overlap_test(Label& L_no_overlap, int log2_elem_size) {
1000     array_overlap_test(NULL, &L_no_overlap, log2_elem_size);
1001   }
1002   void array_overlap_test(address no_overlap_target, Label* NOLp, int log2_elem_size) {
1003     const Register from       = O0;
1004     const Register to         = O1;
1005     const Register count      = O2;
1006     const Register to_from    = O3; // to - from
1007     const Register byte_count = O4; // count << log2_elem_size
1008 
1009       __ subcc(to, from, to_from);
1010       __ sll_ptr(count, log2_elem_size, byte_count);
1011       if (NOLp == NULL)
1012         __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, no_overlap_target);
1013       else
1014         __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, (*NOLp));
1015       __ delayed()->cmp(to_from, byte_count);
1016       if (NOLp == NULL)
1017         __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, no_overlap_target);
1018       else
1019         __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, (*NOLp));
1020       __ delayed()->nop();
1021   }
1022 
1023   //
1024   //  Generate pre-write barrier for array.
1025   //
1026   //  Input:
1027   //     addr     - register containing starting address
1028   //     count    - register containing element count
1029   //     tmp      - scratch register
1030   //
1031   //  The input registers are overwritten.
1032   //
1033   void gen_write_ref_array_pre_barrier(Register addr, Register count, bool dest_uninitialized) {
1034     BarrierSet* bs = Universe::heap()->barrier_set();
1035     switch (bs->kind()) {
1036       case BarrierSet::G1SATBCT:
1037       case BarrierSet::G1SATBCTLogging:
1038         // With G1, don't generate the call if we statically know that the target in uninitialized
1039         if (!dest_uninitialized) {
1040           __ save_frame(0);
1041           // Save the necessary global regs... will be used after.
1042           if (addr->is_global()) {
1043             __ mov(addr, L0);
1044           }
1045           if (count->is_global()) {
1046             __ mov(count, L1);
1047           }
1048           __ mov(addr->after_save(), O0);
1049           // Get the count into O1
1050           __ call(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre));
1051           __ delayed()->mov(count->after_save(), O1);
1052           if (addr->is_global()) {
1053             __ mov(L0, addr);
1054           }
1055           if (count->is_global()) {
1056             __ mov(L1, count);
1057           }
1058           __ restore();
1059         }
1060         break;
1061       case BarrierSet::CardTableModRef:
1062       case BarrierSet::CardTableExtension:
1063       case BarrierSet::ModRef:
1064         break;
1065       default:
1066         ShouldNotReachHere();
1067     }
1068   }
1069   //
1070   //  Generate post-write barrier for array.
1071   //
1072   //  Input:
1073   //     addr     - register containing starting address
1074   //     count    - register containing element count
1075   //     tmp      - scratch register
1076   //
1077   //  The input registers are overwritten.
1078   //
1079   void gen_write_ref_array_post_barrier(Register addr, Register count,
1080                                         Register tmp) {
1081     BarrierSet* bs = Universe::heap()->barrier_set();
1082 
1083     switch (bs->kind()) {
1084       case BarrierSet::G1SATBCT:
1085       case BarrierSet::G1SATBCTLogging:
1086         {
1087           // Get some new fresh output registers.
1088           __ save_frame(0);
1089           __ mov(addr->after_save(), O0);
1090           __ call(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post));
1091           __ delayed()->mov(count->after_save(), O1);
1092           __ restore();
1093         }
1094         break;
1095       case BarrierSet::CardTableModRef:
1096       case BarrierSet::CardTableExtension:
1097         {
1098           CardTableModRefBS* ct = (CardTableModRefBS*)bs;
1099           assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
1100           assert_different_registers(addr, count, tmp);
1101 
1102           Label L_loop;
1103 
1104           __ sll_ptr(count, LogBytesPerHeapOop, count);
1105           __ sub(count, BytesPerHeapOop, count);
1106           __ add(count, addr, count);
1107           // Use two shifts to clear out those low order two bits! (Cannot opt. into 1.)
1108           __ srl_ptr(addr, CardTableModRefBS::card_shift, addr);
1109           __ srl_ptr(count, CardTableModRefBS::card_shift, count);
1110           __ sub(count, addr, count);
1111           AddressLiteral rs(ct->byte_map_base);
1112           __ set(rs, tmp);
1113         __ BIND(L_loop);
1114           __ stb(G0, tmp, addr);
1115           __ subcc(count, 1, count);
1116           __ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop);
1117           __ delayed()->add(addr, 1, addr);
1118         }
1119         break;
1120       case BarrierSet::ModRef:
1121         break;
1122       default:
1123         ShouldNotReachHere();
1124     }
1125   }
1126 
1127 
1128   // Copy big chunks forward with shift
1129   //
1130   // Inputs:
1131   //   from      - source arrays
1132   //   to        - destination array aligned to 8-bytes
1133   //   count     - elements count to copy >= the count equivalent to 16 bytes
1134   //   count_dec - elements count's decrement equivalent to 16 bytes
1135   //   L_copy_bytes - copy exit label
1136   //
1137   void copy_16_bytes_forward_with_shift(Register from, Register to,
1138                      Register count, int count_dec, Label& L_copy_bytes) {
1139     Label L_loop, L_aligned_copy, L_copy_last_bytes;
1140 
1141     // if both arrays have the same alignment mod 8, do 8 bytes aligned copy
1142       __ andcc(from, 7, G1); // misaligned bytes
1143       __ br(Assembler::zero, false, Assembler::pt, L_aligned_copy);
1144       __ delayed()->nop();
1145 
1146     const Register left_shift  = G1; // left  shift bit counter
1147     const Register right_shift = G5; // right shift bit counter
1148 
1149       __ sll(G1, LogBitsPerByte, left_shift);
1150       __ mov(64, right_shift);
1151       __ sub(right_shift, left_shift, right_shift);
1152 
1153     //
1154     // Load 2 aligned 8-bytes chunks and use one from previous iteration
1155     // to form 2 aligned 8-bytes chunks to store.
1156     //
1157       __ deccc(count, count_dec); // Pre-decrement 'count'
1158       __ andn(from, 7, from);     // Align address
1159       __ ldx(from, 0, O3);
1160       __ inc(from, 8);
1161       __ align(OptoLoopAlignment);
1162     __ BIND(L_loop);
1163       __ ldx(from, 0, O4);
1164       __ deccc(count, count_dec); // Can we do next iteration after this one?
1165       __ ldx(from, 8, G4);
1166       __ inc(to, 16);
1167       __ inc(from, 16);
1168       __ sllx(O3, left_shift,  O3);
1169       __ srlx(O4, right_shift, G3);
1170       __ bset(G3, O3);
1171       __ stx(O3, to, -16);
1172       __ sllx(O4, left_shift,  O4);
1173       __ srlx(G4, right_shift, G3);
1174       __ bset(G3, O4);
1175       __ stx(O4, to, -8);
1176       __ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop);
1177       __ delayed()->mov(G4, O3);
1178 
1179       __ inccc(count, count_dec>>1 ); // + 8 bytes
1180       __ brx(Assembler::negative, true, Assembler::pn, L_copy_last_bytes);
1181       __ delayed()->inc(count, count_dec>>1); // restore 'count'
1182 
1183       // copy 8 bytes, part of them already loaded in O3
1184       __ ldx(from, 0, O4);
1185       __ inc(to, 8);
1186       __ inc(from, 8);
1187       __ sllx(O3, left_shift,  O3);
1188       __ srlx(O4, right_shift, G3);
1189       __ bset(O3, G3);
1190       __ stx(G3, to, -8);
1191 
1192     __ BIND(L_copy_last_bytes);
1193       __ srl(right_shift, LogBitsPerByte, right_shift); // misaligned bytes
1194       __ br(Assembler::always, false, Assembler::pt, L_copy_bytes);
1195       __ delayed()->sub(from, right_shift, from);       // restore address
1196 
1197     __ BIND(L_aligned_copy);
1198   }
1199 
1200   // Copy big chunks backward with shift
1201   //
1202   // Inputs:
1203   //   end_from  - source arrays end address
1204   //   end_to    - destination array end address aligned to 8-bytes
1205   //   count     - elements count to copy >= the count equivalent to 16 bytes
1206   //   count_dec - elements count's decrement equivalent to 16 bytes
1207   //   L_aligned_copy - aligned copy exit label
1208   //   L_copy_bytes   - copy exit label
1209   //
1210   void copy_16_bytes_backward_with_shift(Register end_from, Register end_to,
1211                      Register count, int count_dec,
1212                      Label& L_aligned_copy, Label& L_copy_bytes) {
1213     Label L_loop, L_copy_last_bytes;
1214 
1215     // if both arrays have the same alignment mod 8, do 8 bytes aligned copy
1216       __ andcc(end_from, 7, G1); // misaligned bytes
1217       __ br(Assembler::zero, false, Assembler::pt, L_aligned_copy);
1218       __ delayed()->deccc(count, count_dec); // Pre-decrement 'count'
1219 
1220     const Register left_shift  = G1; // left  shift bit counter
1221     const Register right_shift = G5; // right shift bit counter
1222 
1223       __ sll(G1, LogBitsPerByte, left_shift);
1224       __ mov(64, right_shift);
1225       __ sub(right_shift, left_shift, right_shift);
1226 
1227     //
1228     // Load 2 aligned 8-bytes chunks and use one from previous iteration
1229     // to form 2 aligned 8-bytes chunks to store.
1230     //
1231       __ andn(end_from, 7, end_from);     // Align address
1232       __ ldx(end_from, 0, O3);
1233       __ align(OptoLoopAlignment);
1234     __ BIND(L_loop);
1235       __ ldx(end_from, -8, O4);
1236       __ deccc(count, count_dec); // Can we do next iteration after this one?
1237       __ ldx(end_from, -16, G4);
1238       __ dec(end_to, 16);
1239       __ dec(end_from, 16);
1240       __ srlx(O3, right_shift, O3);
1241       __ sllx(O4, left_shift,  G3);
1242       __ bset(G3, O3);
1243       __ stx(O3, end_to, 8);
1244       __ srlx(O4, right_shift, O4);
1245       __ sllx(G4, left_shift,  G3);
1246       __ bset(G3, O4);
1247       __ stx(O4, end_to, 0);
1248       __ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop);
1249       __ delayed()->mov(G4, O3);
1250 
1251       __ inccc(count, count_dec>>1 ); // + 8 bytes
1252       __ brx(Assembler::negative, true, Assembler::pn, L_copy_last_bytes);
1253       __ delayed()->inc(count, count_dec>>1); // restore 'count'
1254 
1255       // copy 8 bytes, part of them already loaded in O3
1256       __ ldx(end_from, -8, O4);
1257       __ dec(end_to, 8);
1258       __ dec(end_from, 8);
1259       __ srlx(O3, right_shift, O3);
1260       __ sllx(O4, left_shift,  G3);
1261       __ bset(O3, G3);
1262       __ stx(G3, end_to, 0);
1263 
1264     __ BIND(L_copy_last_bytes);
1265       __ srl(left_shift, LogBitsPerByte, left_shift);    // misaligned bytes
1266       __ br(Assembler::always, false, Assembler::pt, L_copy_bytes);
1267       __ delayed()->add(end_from, left_shift, end_from); // restore address
1268   }
1269 
1270   //
1271   //  Generate stub for disjoint byte copy.  If "aligned" is true, the
1272   //  "from" and "to" addresses are assumed to be heapword aligned.
1273   //
1274   // Arguments for generated stub:
1275   //      from:  O0
1276   //      to:    O1
1277   //      count: O2 treated as signed
1278   //
1279   address generate_disjoint_byte_copy(bool aligned, address *entry, const char *name) {
1280     __ align(CodeEntryAlignment);
1281     StubCodeMark mark(this, "StubRoutines", name);
1282     address start = __ pc();
1283 
1284     Label L_skip_alignment, L_align;
1285     Label L_copy_byte, L_copy_byte_loop, L_exit;
1286 
1287     const Register from      = O0;   // source array address
1288     const Register to        = O1;   // destination array address
1289     const Register count     = O2;   // elements count
1290     const Register offset    = O5;   // offset from start of arrays
1291     // O3, O4, G3, G4 are used as temp registers
1292 
1293     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
1294 
1295     if (entry != NULL) {
1296       *entry = __ pc();
1297       // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1298       BLOCK_COMMENT("Entry:");
1299     }
1300 
1301     // for short arrays, just do single element copy
1302     __ cmp(count, 23); // 16 + 7
1303     __ brx(Assembler::less, false, Assembler::pn, L_copy_byte);
1304     __ delayed()->mov(G0, offset);
1305 
1306     if (aligned) {
1307       // 'aligned' == true when it is known statically during compilation
1308       // of this arraycopy call site that both 'from' and 'to' addresses
1309       // are HeapWordSize aligned (see LibraryCallKit::basictype2arraycopy()).
1310       //
1311       // Aligned arrays have 4 bytes alignment in 32-bits VM
1312       // and 8 bytes - in 64-bits VM. So we do it only for 32-bits VM
1313       //
1314 #ifndef _LP64
1315       // copy a 4-bytes word if necessary to align 'to' to 8 bytes
1316       __ andcc(to, 7, G0);
1317       __ br(Assembler::zero, false, Assembler::pn, L_skip_alignment);
1318       __ delayed()->ld(from, 0, O3);
1319       __ inc(from, 4);
1320       __ inc(to, 4);
1321       __ dec(count, 4);
1322       __ st(O3, to, -4);
1323     __ BIND(L_skip_alignment);
1324 #endif
1325     } else {
1326       // copy bytes to align 'to' on 8 byte boundary
1327       __ andcc(to, 7, G1); // misaligned bytes
1328       __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
1329       __ delayed()->neg(G1);
1330       __ inc(G1, 8);       // bytes need to copy to next 8-bytes alignment
1331       __ sub(count, G1, count);
1332     __ BIND(L_align);
1333       __ ldub(from, 0, O3);
1334       __ deccc(G1);
1335       __ inc(from);
1336       __ stb(O3, to, 0);
1337       __ br(Assembler::notZero, false, Assembler::pt, L_align);
1338       __ delayed()->inc(to);
1339     __ BIND(L_skip_alignment);
1340     }
1341 #ifdef _LP64
1342     if (!aligned)
1343 #endif
1344     {
1345       // Copy with shift 16 bytes per iteration if arrays do not have
1346       // the same alignment mod 8, otherwise fall through to the next
1347       // code for aligned copy.
1348       // The compare above (count >= 23) guarantes 'count' >= 16 bytes.
1349       // Also jump over aligned copy after the copy with shift completed.
1350 
1351       copy_16_bytes_forward_with_shift(from, to, count, 16, L_copy_byte);
1352     }
1353 
1354     // Both array are 8 bytes aligned, copy 16 bytes at a time
1355       __ and3(count, 7, G4); // Save count
1356       __ srl(count, 3, count);
1357      generate_disjoint_long_copy_core(aligned);
1358       __ mov(G4, count);     // Restore count
1359 
1360     // copy tailing bytes
1361     __ BIND(L_copy_byte);
1362       __ cmp_and_br_short(count, 0, Assembler::equal, Assembler::pt, L_exit);
1363       __ align(OptoLoopAlignment);
1364     __ BIND(L_copy_byte_loop);
1365       __ ldub(from, offset, O3);
1366       __ deccc(count);
1367       __ stb(O3, to, offset);
1368       __ brx(Assembler::notZero, false, Assembler::pt, L_copy_byte_loop);
1369       __ delayed()->inc(offset);
1370 
1371     __ BIND(L_exit);
1372       // O3, O4 are used as temp registers
1373       inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr, O3, O4);
1374       __ retl();
1375       __ delayed()->mov(G0, O0); // return 0
1376     return start;
1377   }
1378 
1379   //
1380   //  Generate stub for conjoint byte copy.  If "aligned" is true, the
1381   //  "from" and "to" addresses are assumed to be heapword aligned.
1382   //
1383   // Arguments for generated stub:
1384   //      from:  O0
1385   //      to:    O1
1386   //      count: O2 treated as signed
1387   //
1388   address generate_conjoint_byte_copy(bool aligned, address nooverlap_target,
1389                                       address *entry, const char *name) {
1390     // Do reverse copy.
1391 
1392     __ align(CodeEntryAlignment);
1393     StubCodeMark mark(this, "StubRoutines", name);
1394     address start = __ pc();
1395 
1396     Label L_skip_alignment, L_align, L_aligned_copy;
1397     Label L_copy_byte, L_copy_byte_loop, L_exit;
1398 
1399     const Register from      = O0;   // source array address
1400     const Register to        = O1;   // destination array address
1401     const Register count     = O2;   // elements count
1402     const Register end_from  = from; // source array end address
1403     const Register end_to    = to;   // destination array end address
1404 
1405     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
1406 
1407     if (entry != NULL) {
1408       *entry = __ pc();
1409       // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1410       BLOCK_COMMENT("Entry:");
1411     }
1412 
1413     array_overlap_test(nooverlap_target, 0);
1414 
1415     __ add(to, count, end_to);       // offset after last copied element
1416 
1417     // for short arrays, just do single element copy
1418     __ cmp(count, 23); // 16 + 7
1419     __ brx(Assembler::less, false, Assembler::pn, L_copy_byte);
1420     __ delayed()->add(from, count, end_from);
1421 
1422     {
1423       // Align end of arrays since they could be not aligned even
1424       // when arrays itself are aligned.
1425 
1426       // copy bytes to align 'end_to' on 8 byte boundary
1427       __ andcc(end_to, 7, G1); // misaligned bytes
1428       __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
1429       __ delayed()->nop();
1430       __ sub(count, G1, count);
1431     __ BIND(L_align);
1432       __ dec(end_from);
1433       __ dec(end_to);
1434       __ ldub(end_from, 0, O3);
1435       __ deccc(G1);
1436       __ brx(Assembler::notZero, false, Assembler::pt, L_align);
1437       __ delayed()->stb(O3, end_to, 0);
1438     __ BIND(L_skip_alignment);
1439     }
1440 #ifdef _LP64
1441     if (aligned) {
1442       // Both arrays are aligned to 8-bytes in 64-bits VM.
1443       // The 'count' is decremented in copy_16_bytes_backward_with_shift()
1444       // in unaligned case.
1445       __ dec(count, 16);
1446     } else
1447 #endif
1448     {
1449       // Copy with shift 16 bytes per iteration if arrays do not have
1450       // the same alignment mod 8, otherwise jump to the next
1451       // code for aligned copy (and substracting 16 from 'count' before jump).
1452       // The compare above (count >= 11) guarantes 'count' >= 16 bytes.
1453       // Also jump over aligned copy after the copy with shift completed.
1454 
1455       copy_16_bytes_backward_with_shift(end_from, end_to, count, 16,
1456                                         L_aligned_copy, L_copy_byte);
1457     }
1458     // copy 4 elements (16 bytes) at a time
1459       __ align(OptoLoopAlignment);
1460     __ BIND(L_aligned_copy);
1461       __ dec(end_from, 16);
1462       __ ldx(end_from, 8, O3);
1463       __ ldx(end_from, 0, O4);
1464       __ dec(end_to, 16);
1465       __ deccc(count, 16);
1466       __ stx(O3, end_to, 8);
1467       __ brx(Assembler::greaterEqual, false, Assembler::pt, L_aligned_copy);
1468       __ delayed()->stx(O4, end_to, 0);
1469       __ inc(count, 16);
1470 
1471     // copy 1 element (2 bytes) at a time
1472     __ BIND(L_copy_byte);
1473       __ cmp_and_br_short(count, 0, Assembler::equal, Assembler::pt, L_exit);
1474       __ align(OptoLoopAlignment);
1475     __ BIND(L_copy_byte_loop);
1476       __ dec(end_from);
1477       __ dec(end_to);
1478       __ ldub(end_from, 0, O4);
1479       __ deccc(count);
1480       __ brx(Assembler::greater, false, Assembler::pt, L_copy_byte_loop);
1481       __ delayed()->stb(O4, end_to, 0);
1482 
1483     __ BIND(L_exit);
1484     // O3, O4 are used as temp registers
1485     inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr, O3, O4);
1486     __ retl();
1487     __ delayed()->mov(G0, O0); // return 0
1488     return start;
1489   }
1490 
1491   //
1492   //  Generate stub for disjoint short copy.  If "aligned" is true, the
1493   //  "from" and "to" addresses are assumed to be heapword aligned.
1494   //
1495   // Arguments for generated stub:
1496   //      from:  O0
1497   //      to:    O1
1498   //      count: O2 treated as signed
1499   //
1500   address generate_disjoint_short_copy(bool aligned, address *entry, const char * name) {
1501     __ align(CodeEntryAlignment);
1502     StubCodeMark mark(this, "StubRoutines", name);
1503     address start = __ pc();
1504 
1505     Label L_skip_alignment, L_skip_alignment2;
1506     Label L_copy_2_bytes, L_copy_2_bytes_loop, L_exit;
1507 
1508     const Register from      = O0;   // source array address
1509     const Register to        = O1;   // destination array address
1510     const Register count     = O2;   // elements count
1511     const Register offset    = O5;   // offset from start of arrays
1512     // O3, O4, G3, G4 are used as temp registers
1513 
1514     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
1515 
1516     if (entry != NULL) {
1517       *entry = __ pc();
1518       // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1519       BLOCK_COMMENT("Entry:");
1520     }
1521 
1522     // for short arrays, just do single element copy
1523     __ cmp(count, 11); // 8 + 3  (22 bytes)
1524     __ brx(Assembler::less, false, Assembler::pn, L_copy_2_bytes);
1525     __ delayed()->mov(G0, offset);
1526 
1527     if (aligned) {
1528       // 'aligned' == true when it is known statically during compilation
1529       // of this arraycopy call site that both 'from' and 'to' addresses
1530       // are HeapWordSize aligned (see LibraryCallKit::basictype2arraycopy()).
1531       //
1532       // Aligned arrays have 4 bytes alignment in 32-bits VM
1533       // and 8 bytes - in 64-bits VM.
1534       //
1535 #ifndef _LP64
1536       // copy a 2-elements word if necessary to align 'to' to 8 bytes
1537       __ andcc(to, 7, G0);
1538       __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
1539       __ delayed()->ld(from, 0, O3);
1540       __ inc(from, 4);
1541       __ inc(to, 4);
1542       __ dec(count, 2);
1543       __ st(O3, to, -4);
1544     __ BIND(L_skip_alignment);
1545 #endif
1546     } else {
1547       // copy 1 element if necessary to align 'to' on an 4 bytes
1548       __ andcc(to, 3, G0);
1549       __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
1550       __ delayed()->lduh(from, 0, O3);
1551       __ inc(from, 2);
1552       __ inc(to, 2);
1553       __ dec(count);
1554       __ sth(O3, to, -2);
1555     __ BIND(L_skip_alignment);
1556 
1557       // copy 2 elements to align 'to' on an 8 byte boundary
1558       __ andcc(to, 7, G0);
1559       __ br(Assembler::zero, false, Assembler::pn, L_skip_alignment2);
1560       __ delayed()->lduh(from, 0, O3);
1561       __ dec(count, 2);
1562       __ lduh(from, 2, O4);
1563       __ inc(from, 4);
1564       __ inc(to, 4);
1565       __ sth(O3, to, -4);
1566       __ sth(O4, to, -2);
1567     __ BIND(L_skip_alignment2);
1568     }
1569 #ifdef _LP64
1570     if (!aligned)
1571 #endif
1572     {
1573       // Copy with shift 16 bytes per iteration if arrays do not have
1574       // the same alignment mod 8, otherwise fall through to the next
1575       // code for aligned copy.
1576       // The compare above (count >= 11) guarantes 'count' >= 16 bytes.
1577       // Also jump over aligned copy after the copy with shift completed.
1578 
1579       copy_16_bytes_forward_with_shift(from, to, count, 8, L_copy_2_bytes);
1580     }
1581 
1582     // Both array are 8 bytes aligned, copy 16 bytes at a time
1583       __ and3(count, 3, G4); // Save
1584       __ srl(count, 2, count);
1585      generate_disjoint_long_copy_core(aligned);
1586       __ mov(G4, count); // restore
1587 
1588     // copy 1 element at a time
1589     __ BIND(L_copy_2_bytes);
1590       __ cmp_and_br_short(count, 0, Assembler::equal, Assembler::pt, L_exit);
1591       __ align(OptoLoopAlignment);
1592     __ BIND(L_copy_2_bytes_loop);
1593       __ lduh(from, offset, O3);
1594       __ deccc(count);
1595       __ sth(O3, to, offset);
1596       __ brx(Assembler::notZero, false, Assembler::pt, L_copy_2_bytes_loop);
1597       __ delayed()->inc(offset, 2);
1598 
1599     __ BIND(L_exit);
1600       // O3, O4 are used as temp registers
1601       inc_counter_np(SharedRuntime::_jshort_array_copy_ctr, O3, O4);
1602       __ retl();
1603       __ delayed()->mov(G0, O0); // return 0
1604     return start;
1605   }
1606 
1607   //
1608   //  Generate stub for disjoint short fill.  If "aligned" is true, the
1609   //  "to" address is assumed to be heapword aligned.
1610   //
1611   // Arguments for generated stub:
1612   //      to:    O0
1613   //      value: O1
1614   //      count: O2 treated as signed
1615   //
1616   address generate_fill(BasicType t, bool aligned, const char* name) {
1617     __ align(CodeEntryAlignment);
1618     StubCodeMark mark(this, "StubRoutines", name);
1619     address start = __ pc();
1620 
1621     const Register to        = O0;   // source array address
1622     const Register value     = O1;   // fill value
1623     const Register count     = O2;   // elements count
1624     // O3 is used as a temp register
1625 
1626     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
1627 
1628     Label L_exit, L_skip_align1, L_skip_align2, L_fill_byte;
1629     Label L_fill_2_bytes, L_fill_elements, L_fill_32_bytes;
1630 
1631     int shift = -1;
1632     switch (t) {
1633        case T_BYTE:
1634         shift = 2;
1635         break;
1636        case T_SHORT:
1637         shift = 1;
1638         break;
1639       case T_INT:
1640          shift = 0;
1641         break;
1642       default: ShouldNotReachHere();
1643     }
1644 
1645     BLOCK_COMMENT("Entry:");
1646 
1647     if (t == T_BYTE) {
1648       // Zero extend value
1649       __ and3(value, 0xff, value);
1650       __ sllx(value, 8, O3);
1651       __ or3(value, O3, value);
1652     }
1653     if (t == T_SHORT) {
1654       // Zero extend value
1655       __ sllx(value, 48, value);
1656       __ srlx(value, 48, value);
1657     }
1658     if (t == T_BYTE || t == T_SHORT) {
1659       __ sllx(value, 16, O3);
1660       __ or3(value, O3, value);
1661     }
1662 
1663     __ cmp(count, 2<<shift); // Short arrays (< 8 bytes) fill by element
1664     __ brx(Assembler::lessUnsigned, false, Assembler::pn, L_fill_elements); // use unsigned cmp
1665     __ delayed()->andcc(count, 1, G0);
1666 
1667     if (!aligned && (t == T_BYTE || t == T_SHORT)) {
1668       // align source address at 4 bytes address boundary
1669       if (t == T_BYTE) {
1670         // One byte misalignment happens only for byte arrays
1671         __ andcc(to, 1, G0);
1672         __ br(Assembler::zero, false, Assembler::pt, L_skip_align1);
1673         __ delayed()->nop();
1674         __ stb(value, to, 0);
1675         __ inc(to, 1);
1676         __ dec(count, 1);
1677         __ BIND(L_skip_align1);
1678       }
1679       // Two bytes misalignment happens only for byte and short (char) arrays
1680       __ andcc(to, 2, G0);
1681       __ br(Assembler::zero, false, Assembler::pt, L_skip_align2);
1682       __ delayed()->nop();
1683       __ sth(value, to, 0);
1684       __ inc(to, 2);
1685       __ dec(count, 1 << (shift - 1));
1686       __ BIND(L_skip_align2);
1687     }
1688 #ifdef _LP64
1689     if (!aligned) {
1690 #endif
1691     // align to 8 bytes, we know we are 4 byte aligned to start
1692     __ andcc(to, 7, G0);
1693     __ br(Assembler::zero, false, Assembler::pt, L_fill_32_bytes);
1694     __ delayed()->nop();
1695     __ stw(value, to, 0);
1696     __ inc(to, 4);
1697     __ dec(count, 1 << shift);
1698     __ BIND(L_fill_32_bytes);
1699 #ifdef _LP64
1700     }
1701 #endif
1702 
1703     if (t == T_INT) {
1704       // Zero extend value
1705       __ srl(value, 0, value);
1706     }
1707     if (t == T_BYTE || t == T_SHORT || t == T_INT) {
1708       __ sllx(value, 32, O3);
1709       __ or3(value, O3, value);
1710     }
1711 
1712     Label L_check_fill_8_bytes;
1713     // Fill 32-byte chunks
1714     __ subcc(count, 8 << shift, count);
1715     __ brx(Assembler::less, false, Assembler::pt, L_check_fill_8_bytes);
1716     __ delayed()->nop();
1717 
1718     Label L_fill_32_bytes_loop, L_fill_4_bytes;
1719     __ align(16);
1720     __ BIND(L_fill_32_bytes_loop);
1721 
1722     __ stx(value, to, 0);
1723     __ stx(value, to, 8);
1724     __ stx(value, to, 16);
1725     __ stx(value, to, 24);
1726 
1727     __ subcc(count, 8 << shift, count);
1728     __ brx(Assembler::greaterEqual, false, Assembler::pt, L_fill_32_bytes_loop);
1729     __ delayed()->add(to, 32, to);
1730 
1731     __ BIND(L_check_fill_8_bytes);
1732     __ addcc(count, 8 << shift, count);
1733     __ brx(Assembler::zero, false, Assembler::pn, L_exit);
1734     __ delayed()->subcc(count, 1 << (shift + 1), count);
1735     __ brx(Assembler::less, false, Assembler::pn, L_fill_4_bytes);
1736     __ delayed()->andcc(count, 1<<shift, G0);
1737 
1738     //
1739     // length is too short, just fill 8 bytes at a time
1740     //
1741     Label L_fill_8_bytes_loop;
1742     __ BIND(L_fill_8_bytes_loop);
1743     __ stx(value, to, 0);
1744     __ subcc(count, 1 << (shift + 1), count);
1745     __ brx(Assembler::greaterEqual, false, Assembler::pn, L_fill_8_bytes_loop);
1746     __ delayed()->add(to, 8, to);
1747 
1748     // fill trailing 4 bytes
1749     __ andcc(count, 1<<shift, G0);  // in delay slot of branches
1750     if (t == T_INT) {
1751       __ BIND(L_fill_elements);
1752     }
1753     __ BIND(L_fill_4_bytes);
1754     __ brx(Assembler::zero, false, Assembler::pt, L_fill_2_bytes);
1755     if (t == T_BYTE || t == T_SHORT) {
1756       __ delayed()->andcc(count, 1<<(shift-1), G0);
1757     } else {
1758       __ delayed()->nop();
1759     }
1760     __ stw(value, to, 0);
1761     if (t == T_BYTE || t == T_SHORT) {
1762       __ inc(to, 4);
1763       // fill trailing 2 bytes
1764       __ andcc(count, 1<<(shift-1), G0); // in delay slot of branches
1765       __ BIND(L_fill_2_bytes);
1766       __ brx(Assembler::zero, false, Assembler::pt, L_fill_byte);
1767       __ delayed()->andcc(count, 1, count);
1768       __ sth(value, to, 0);
1769       if (t == T_BYTE) {
1770         __ inc(to, 2);
1771         // fill trailing byte
1772         __ andcc(count, 1, count);  // in delay slot of branches
1773         __ BIND(L_fill_byte);
1774         __ brx(Assembler::zero, false, Assembler::pt, L_exit);
1775         __ delayed()->nop();
1776         __ stb(value, to, 0);
1777       } else {
1778         __ BIND(L_fill_byte);
1779       }
1780     } else {
1781       __ BIND(L_fill_2_bytes);
1782     }
1783     __ BIND(L_exit);
1784     __ retl();
1785     __ delayed()->nop();
1786 
1787     // Handle copies less than 8 bytes.  Int is handled elsewhere.
1788     if (t == T_BYTE) {
1789       __ BIND(L_fill_elements);
1790       Label L_fill_2, L_fill_4;
1791       // in delay slot __ andcc(count, 1, G0);
1792       __ brx(Assembler::zero, false, Assembler::pt, L_fill_2);
1793       __ delayed()->andcc(count, 2, G0);
1794       __ stb(value, to, 0);
1795       __ inc(to, 1);
1796       __ BIND(L_fill_2);
1797       __ brx(Assembler::zero, false, Assembler::pt, L_fill_4);
1798       __ delayed()->andcc(count, 4, G0);
1799       __ stb(value, to, 0);
1800       __ stb(value, to, 1);
1801       __ inc(to, 2);
1802       __ BIND(L_fill_4);
1803       __ brx(Assembler::zero, false, Assembler::pt, L_exit);
1804       __ delayed()->nop();
1805       __ stb(value, to, 0);
1806       __ stb(value, to, 1);
1807       __ stb(value, to, 2);
1808       __ retl();
1809       __ delayed()->stb(value, to, 3);
1810     }
1811 
1812     if (t == T_SHORT) {
1813       Label L_fill_2;
1814       __ BIND(L_fill_elements);
1815       // in delay slot __ andcc(count, 1, G0);
1816       __ brx(Assembler::zero, false, Assembler::pt, L_fill_2);
1817       __ delayed()->andcc(count, 2, G0);
1818       __ sth(value, to, 0);
1819       __ inc(to, 2);
1820       __ BIND(L_fill_2);
1821       __ brx(Assembler::zero, false, Assembler::pt, L_exit);
1822       __ delayed()->nop();
1823       __ sth(value, to, 0);
1824       __ retl();
1825       __ delayed()->sth(value, to, 2);
1826     }
1827     return start;
1828   }
1829 
1830   //
1831   //  Generate stub for conjoint short copy.  If "aligned" is true, the
1832   //  "from" and "to" addresses are assumed to be heapword aligned.
1833   //
1834   // Arguments for generated stub:
1835   //      from:  O0
1836   //      to:    O1
1837   //      count: O2 treated as signed
1838   //
1839   address generate_conjoint_short_copy(bool aligned, address nooverlap_target,
1840                                        address *entry, const char *name) {
1841     // Do reverse copy.
1842 
1843     __ align(CodeEntryAlignment);
1844     StubCodeMark mark(this, "StubRoutines", name);
1845     address start = __ pc();
1846 
1847     Label L_skip_alignment, L_skip_alignment2, L_aligned_copy;
1848     Label L_copy_2_bytes, L_copy_2_bytes_loop, L_exit;
1849 
1850     const Register from      = O0;   // source array address
1851     const Register to        = O1;   // destination array address
1852     const Register count     = O2;   // elements count
1853     const Register end_from  = from; // source array end address
1854     const Register end_to    = to;   // destination array end address
1855 
1856     const Register byte_count = O3;  // bytes count to copy
1857 
1858     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
1859 
1860     if (entry != NULL) {
1861       *entry = __ pc();
1862       // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1863       BLOCK_COMMENT("Entry:");
1864     }
1865 
1866     array_overlap_test(nooverlap_target, 1);
1867 
1868     __ sllx(count, LogBytesPerShort, byte_count);
1869     __ add(to, byte_count, end_to);  // offset after last copied element
1870 
1871     // for short arrays, just do single element copy
1872     __ cmp(count, 11); // 8 + 3  (22 bytes)
1873     __ brx(Assembler::less, false, Assembler::pn, L_copy_2_bytes);
1874     __ delayed()->add(from, byte_count, end_from);
1875 
1876     {
1877       // Align end of arrays since they could be not aligned even
1878       // when arrays itself are aligned.
1879 
1880       // copy 1 element if necessary to align 'end_to' on an 4 bytes
1881       __ andcc(end_to, 3, G0);
1882       __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
1883       __ delayed()->lduh(end_from, -2, O3);
1884       __ dec(end_from, 2);
1885       __ dec(end_to, 2);
1886       __ dec(count);
1887       __ sth(O3, end_to, 0);
1888     __ BIND(L_skip_alignment);
1889 
1890       // copy 2 elements to align 'end_to' on an 8 byte boundary
1891       __ andcc(end_to, 7, G0);
1892       __ br(Assembler::zero, false, Assembler::pn, L_skip_alignment2);
1893       __ delayed()->lduh(end_from, -2, O3);
1894       __ dec(count, 2);
1895       __ lduh(end_from, -4, O4);
1896       __ dec(end_from, 4);
1897       __ dec(end_to, 4);
1898       __ sth(O3, end_to, 2);
1899       __ sth(O4, end_to, 0);
1900     __ BIND(L_skip_alignment2);
1901     }
1902 #ifdef _LP64
1903     if (aligned) {
1904       // Both arrays are aligned to 8-bytes in 64-bits VM.
1905       // The 'count' is decremented in copy_16_bytes_backward_with_shift()
1906       // in unaligned case.
1907       __ dec(count, 8);
1908     } else
1909 #endif
1910     {
1911       // Copy with shift 16 bytes per iteration if arrays do not have
1912       // the same alignment mod 8, otherwise jump to the next
1913       // code for aligned copy (and substracting 8 from 'count' before jump).
1914       // The compare above (count >= 11) guarantes 'count' >= 16 bytes.
1915       // Also jump over aligned copy after the copy with shift completed.
1916 
1917       copy_16_bytes_backward_with_shift(end_from, end_to, count, 8,
1918                                         L_aligned_copy, L_copy_2_bytes);
1919     }
1920     // copy 4 elements (16 bytes) at a time
1921       __ align(OptoLoopAlignment);
1922     __ BIND(L_aligned_copy);
1923       __ dec(end_from, 16);
1924       __ ldx(end_from, 8, O3);
1925       __ ldx(end_from, 0, O4);
1926       __ dec(end_to, 16);
1927       __ deccc(count, 8);
1928       __ stx(O3, end_to, 8);
1929       __ brx(Assembler::greaterEqual, false, Assembler::pt, L_aligned_copy);
1930       __ delayed()->stx(O4, end_to, 0);
1931       __ inc(count, 8);
1932 
1933     // copy 1 element (2 bytes) at a time
1934     __ BIND(L_copy_2_bytes);
1935       __ cmp_and_br_short(count, 0, Assembler::equal, Assembler::pt, L_exit);
1936     __ BIND(L_copy_2_bytes_loop);
1937       __ dec(end_from, 2);
1938       __ dec(end_to, 2);
1939       __ lduh(end_from, 0, O4);
1940       __ deccc(count);
1941       __ brx(Assembler::greater, false, Assembler::pt, L_copy_2_bytes_loop);
1942       __ delayed()->sth(O4, end_to, 0);
1943 
1944     __ BIND(L_exit);
1945     // O3, O4 are used as temp registers
1946     inc_counter_np(SharedRuntime::_jshort_array_copy_ctr, O3, O4);
1947     __ retl();
1948     __ delayed()->mov(G0, O0); // return 0
1949     return start;
1950   }
1951 
1952   //
1953   //  Generate core code for disjoint int copy (and oop copy on 32-bit).
1954   //  If "aligned" is true, the "from" and "to" addresses are assumed
1955   //  to be heapword aligned.
1956   //
1957   // Arguments:
1958   //      from:  O0
1959   //      to:    O1
1960   //      count: O2 treated as signed
1961   //
1962   void generate_disjoint_int_copy_core(bool aligned) {
1963 
1964     Label L_skip_alignment, L_aligned_copy;
1965     Label L_copy_16_bytes,  L_copy_4_bytes, L_copy_4_bytes_loop, L_exit;
1966 
1967     const Register from      = O0;   // source array address
1968     const Register to        = O1;   // destination array address
1969     const Register count     = O2;   // elements count
1970     const Register offset    = O5;   // offset from start of arrays
1971     // O3, O4, G3, G4 are used as temp registers
1972 
1973     // 'aligned' == true when it is known statically during compilation
1974     // of this arraycopy call site that both 'from' and 'to' addresses
1975     // are HeapWordSize aligned (see LibraryCallKit::basictype2arraycopy()).
1976     //
1977     // Aligned arrays have 4 bytes alignment in 32-bits VM
1978     // and 8 bytes - in 64-bits VM.
1979     //
1980 #ifdef _LP64
1981     if (!aligned)
1982 #endif
1983     {
1984       // The next check could be put under 'ifndef' since the code in
1985       // generate_disjoint_long_copy_core() has own checks and set 'offset'.
1986 
1987       // for short arrays, just do single element copy
1988       __ cmp(count, 5); // 4 + 1 (20 bytes)
1989       __ brx(Assembler::lessEqual, false, Assembler::pn, L_copy_4_bytes);
1990       __ delayed()->mov(G0, offset);
1991 
1992       // copy 1 element to align 'to' on an 8 byte boundary
1993       __ andcc(to, 7, G0);
1994       __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
1995       __ delayed()->ld(from, 0, O3);
1996       __ inc(from, 4);
1997       __ inc(to, 4);
1998       __ dec(count);
1999       __ st(O3, to, -4);
2000     __ BIND(L_skip_alignment);
2001 
2002     // if arrays have same alignment mod 8, do 4 elements copy
2003       __ andcc(from, 7, G0);
2004       __ br(Assembler::zero, false, Assembler::pt, L_aligned_copy);
2005       __ delayed()->ld(from, 0, O3);
2006 
2007     //
2008     // Load 2 aligned 8-bytes chunks and use one from previous iteration
2009     // to form 2 aligned 8-bytes chunks to store.
2010     //
2011     // copy_16_bytes_forward_with_shift() is not used here since this
2012     // code is more optimal.
2013 
2014     // copy with shift 4 elements (16 bytes) at a time
2015       __ dec(count, 4);   // The cmp at the beginning guaranty count >= 4
2016 
2017       __ align(OptoLoopAlignment);
2018     __ BIND(L_copy_16_bytes);
2019       __ ldx(from, 4, O4);
2020       __ deccc(count, 4); // Can we do next iteration after this one?
2021       __ ldx(from, 12, G4);
2022       __ inc(to, 16);
2023       __ inc(from, 16);
2024       __ sllx(O3, 32, O3);
2025       __ srlx(O4, 32, G3);
2026       __ bset(G3, O3);
2027       __ stx(O3, to, -16);
2028       __ sllx(O4, 32, O4);
2029       __ srlx(G4, 32, G3);
2030       __ bset(G3, O4);
2031       __ stx(O4, to, -8);
2032       __ brx(Assembler::greaterEqual, false, Assembler::pt, L_copy_16_bytes);
2033       __ delayed()->mov(G4, O3);
2034 
2035       __ br(Assembler::always, false, Assembler::pt, L_copy_4_bytes);
2036       __ delayed()->inc(count, 4); // restore 'count'
2037 
2038     __ BIND(L_aligned_copy);
2039     }
2040     // copy 4 elements (16 bytes) at a time
2041       __ and3(count, 1, G4); // Save
2042       __ srl(count, 1, count);
2043      generate_disjoint_long_copy_core(aligned);
2044       __ mov(G4, count);     // Restore
2045 
2046     // copy 1 element at a time
2047     __ BIND(L_copy_4_bytes);
2048       __ cmp_and_br_short(count, 0, Assembler::equal, Assembler::pt, L_exit);
2049     __ BIND(L_copy_4_bytes_loop);
2050       __ ld(from, offset, O3);
2051       __ deccc(count);
2052       __ st(O3, to, offset);
2053       __ brx(Assembler::notZero, false, Assembler::pt, L_copy_4_bytes_loop);
2054       __ delayed()->inc(offset, 4);
2055     __ BIND(L_exit);
2056   }
2057 
2058   //
2059   //  Generate stub for disjoint int copy.  If "aligned" is true, the
2060   //  "from" and "to" addresses are assumed to be heapword aligned.
2061   //
2062   // Arguments for generated stub:
2063   //      from:  O0
2064   //      to:    O1
2065   //      count: O2 treated as signed
2066   //
2067   address generate_disjoint_int_copy(bool aligned, address *entry, const char *name) {
2068     __ align(CodeEntryAlignment);
2069     StubCodeMark mark(this, "StubRoutines", name);
2070     address start = __ pc();
2071 
2072     const Register count = O2;
2073     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
2074 
2075     if (entry != NULL) {
2076       *entry = __ pc();
2077       // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
2078       BLOCK_COMMENT("Entry:");
2079     }
2080 
2081     generate_disjoint_int_copy_core(aligned);
2082 
2083     // O3, O4 are used as temp registers
2084     inc_counter_np(SharedRuntime::_jint_array_copy_ctr, O3, O4);
2085     __ retl();
2086     __ delayed()->mov(G0, O0); // return 0
2087     return start;
2088   }
2089 
2090   //
2091   //  Generate core code for conjoint int copy (and oop copy on 32-bit).
2092   //  If "aligned" is true, the "from" and "to" addresses are assumed
2093   //  to be heapword aligned.
2094   //
2095   // Arguments:
2096   //      from:  O0
2097   //      to:    O1
2098   //      count: O2 treated as signed
2099   //
2100   void generate_conjoint_int_copy_core(bool aligned) {
2101     // Do reverse copy.
2102 
2103     Label L_skip_alignment, L_aligned_copy;
2104     Label L_copy_16_bytes,  L_copy_4_bytes, L_copy_4_bytes_loop, L_exit;
2105 
2106     const Register from      = O0;   // source array address
2107     const Register to        = O1;   // destination array address
2108     const Register count     = O2;   // elements count
2109     const Register end_from  = from; // source array end address
2110     const Register end_to    = to;   // destination array end address
2111     // O3, O4, O5, G3 are used as temp registers
2112 
2113     const Register byte_count = O3;  // bytes count to copy
2114 
2115       __ sllx(count, LogBytesPerInt, byte_count);
2116       __ add(to, byte_count, end_to); // offset after last copied element
2117 
2118       __ cmp(count, 5); // for short arrays, just do single element copy
2119       __ brx(Assembler::lessEqual, false, Assembler::pn, L_copy_4_bytes);
2120       __ delayed()->add(from, byte_count, end_from);
2121 
2122     // copy 1 element to align 'to' on an 8 byte boundary
2123       __ andcc(end_to, 7, G0);
2124       __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
2125       __ delayed()->nop();
2126       __ dec(count);
2127       __ dec(end_from, 4);
2128       __ dec(end_to,   4);
2129       __ ld(end_from, 0, O4);
2130       __ st(O4, end_to, 0);
2131     __ BIND(L_skip_alignment);
2132 
2133     // Check if 'end_from' and 'end_to' has the same alignment.
2134       __ andcc(end_from, 7, G0);
2135       __ br(Assembler::zero, false, Assembler::pt, L_aligned_copy);
2136       __ delayed()->dec(count, 4); // The cmp at the start guaranty cnt >= 4
2137 
2138     // copy with shift 4 elements (16 bytes) at a time
2139     //
2140     // Load 2 aligned 8-bytes chunks and use one from previous iteration
2141     // to form 2 aligned 8-bytes chunks to store.
2142     //
2143       __ ldx(end_from, -4, O3);
2144       __ align(OptoLoopAlignment);
2145     __ BIND(L_copy_16_bytes);
2146       __ ldx(end_from, -12, O4);
2147       __ deccc(count, 4);
2148       __ ldx(end_from, -20, O5);
2149       __ dec(end_to, 16);
2150       __ dec(end_from, 16);
2151       __ srlx(O3, 32, O3);
2152       __ sllx(O4, 32, G3);
2153       __ bset(G3, O3);
2154       __ stx(O3, end_to, 8);
2155       __ srlx(O4, 32, O4);
2156       __ sllx(O5, 32, G3);
2157       __ bset(O4, G3);
2158       __ stx(G3, end_to, 0);
2159       __ brx(Assembler::greaterEqual, false, Assembler::pt, L_copy_16_bytes);
2160       __ delayed()->mov(O5, O3);
2161 
2162       __ br(Assembler::always, false, Assembler::pt, L_copy_4_bytes);
2163       __ delayed()->inc(count, 4);
2164 
2165     // copy 4 elements (16 bytes) at a time
2166       __ align(OptoLoopAlignment);
2167     __ BIND(L_aligned_copy);
2168       __ dec(end_from, 16);
2169       __ ldx(end_from, 8, O3);
2170       __ ldx(end_from, 0, O4);
2171       __ dec(end_to, 16);
2172       __ deccc(count, 4);
2173       __ stx(O3, end_to, 8);
2174       __ brx(Assembler::greaterEqual, false, Assembler::pt, L_aligned_copy);
2175       __ delayed()->stx(O4, end_to, 0);
2176       __ inc(count, 4);
2177 
2178     // copy 1 element (4 bytes) at a time
2179     __ BIND(L_copy_4_bytes);
2180       __ cmp_and_br_short(count, 0, Assembler::equal, Assembler::pt, L_exit);
2181     __ BIND(L_copy_4_bytes_loop);
2182       __ dec(end_from, 4);
2183       __ dec(end_to, 4);
2184       __ ld(end_from, 0, O4);
2185       __ deccc(count);
2186       __ brx(Assembler::greater, false, Assembler::pt, L_copy_4_bytes_loop);
2187       __ delayed()->st(O4, end_to, 0);
2188     __ BIND(L_exit);
2189   }
2190 
2191   //
2192   //  Generate stub for conjoint int copy.  If "aligned" is true, the
2193   //  "from" and "to" addresses are assumed to be heapword aligned.
2194   //
2195   // Arguments for generated stub:
2196   //      from:  O0
2197   //      to:    O1
2198   //      count: O2 treated as signed
2199   //
2200   address generate_conjoint_int_copy(bool aligned, address nooverlap_target,
2201                                      address *entry, const char *name) {
2202     __ align(CodeEntryAlignment);
2203     StubCodeMark mark(this, "StubRoutines", name);
2204     address start = __ pc();
2205 
2206     assert_clean_int(O2, O3);     // Make sure 'count' is clean int.
2207 
2208     if (entry != NULL) {
2209       *entry = __ pc();
2210       // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
2211       BLOCK_COMMENT("Entry:");
2212     }
2213 
2214     array_overlap_test(nooverlap_target, 2);
2215 
2216     generate_conjoint_int_copy_core(aligned);
2217 
2218     // O3, O4 are used as temp registers
2219     inc_counter_np(SharedRuntime::_jint_array_copy_ctr, O3, O4);
2220     __ retl();
2221     __ delayed()->mov(G0, O0); // return 0
2222     return start;
2223   }
2224 
2225   //
2226   //  Generate core code for disjoint long copy (and oop copy on 64-bit).
2227   //  "aligned" is ignored, because we must make the stronger
2228   //  assumption that both addresses are always 64-bit aligned.
2229   //
2230   // Arguments:
2231   //      from:  O0
2232   //      to:    O1
2233   //      count: O2 treated as signed
2234   //
2235   // count -= 2;
2236   // if ( count >= 0 ) { // >= 2 elements
2237   //   if ( count > 6) { // >= 8 elements
2238   //     count -= 6; // original count - 8
2239   //     do {
2240   //       copy_8_elements;
2241   //       count -= 8;
2242   //     } while ( count >= 0 );
2243   //     count += 6;
2244   //   }
2245   //   if ( count >= 0 ) { // >= 2 elements
2246   //     do {
2247   //       copy_2_elements;
2248   //     } while ( (count=count-2) >= 0 );
2249   //   }
2250   // }
2251   // count += 2;
2252   // if ( count != 0 ) { // 1 element left
2253   //   copy_1_element;
2254   // }
2255   //
2256   void generate_disjoint_long_copy_core(bool aligned) {
2257     Label L_copy_8_bytes, L_copy_16_bytes, L_exit;
2258     const Register from    = O0;  // source array address
2259     const Register to      = O1;  // destination array address
2260     const Register count   = O2;  // elements count
2261     const Register offset0 = O4;  // element offset
2262     const Register offset8 = O5;  // next element offset
2263 
2264       __ deccc(count, 2);
2265       __ mov(G0, offset0);   // offset from start of arrays (0)
2266       __ brx(Assembler::negative, false, Assembler::pn, L_copy_8_bytes );
2267       __ delayed()->add(offset0, 8, offset8);
2268 
2269     // Copy by 64 bytes chunks
2270     Label L_copy_64_bytes;
2271     const Register from64 = O3;  // source address
2272     const Register to64   = G3;  // destination address
2273       __ subcc(count, 6, O3);
2274       __ brx(Assembler::negative, false, Assembler::pt, L_copy_16_bytes );
2275       __ delayed()->mov(to,   to64);
2276       // Now we can use O4(offset0), O5(offset8) as temps
2277       __ mov(O3, count);
2278       __ mov(from, from64);
2279 
2280       __ align(OptoLoopAlignment);
2281     __ BIND(L_copy_64_bytes);
2282       for( int off = 0; off < 64; off += 16 ) {
2283         __ ldx(from64,  off+0, O4);
2284         __ ldx(from64,  off+8, O5);
2285         __ stx(O4, to64,  off+0);
2286         __ stx(O5, to64,  off+8);
2287       }
2288       __ deccc(count, 8);
2289       __ inc(from64, 64);
2290       __ brx(Assembler::greaterEqual, false, Assembler::pt, L_copy_64_bytes);
2291       __ delayed()->inc(to64, 64);
2292 
2293       // Restore O4(offset0), O5(offset8)
2294       __ sub(from64, from, offset0);
2295       __ inccc(count, 6);
2296       __ brx(Assembler::negative, false, Assembler::pn, L_copy_8_bytes );
2297       __ delayed()->add(offset0, 8, offset8);
2298 
2299       // Copy by 16 bytes chunks
2300       __ align(OptoLoopAlignment);
2301     __ BIND(L_copy_16_bytes);
2302       __ ldx(from, offset0, O3);
2303       __ ldx(from, offset8, G3);
2304       __ deccc(count, 2);
2305       __ stx(O3, to, offset0);
2306       __ inc(offset0, 16);
2307       __ stx(G3, to, offset8);
2308       __ brx(Assembler::greaterEqual, false, Assembler::pt, L_copy_16_bytes);
2309       __ delayed()->inc(offset8, 16);
2310 
2311       // Copy last 8 bytes
2312     __ BIND(L_copy_8_bytes);
2313       __ inccc(count, 2);
2314       __ brx(Assembler::zero, true, Assembler::pn, L_exit );
2315       __ delayed()->mov(offset0, offset8); // Set O5 used by other stubs
2316       __ ldx(from, offset0, O3);
2317       __ stx(O3, to, offset0);
2318     __ BIND(L_exit);
2319   }
2320 
2321   //
2322   //  Generate stub for disjoint long copy.
2323   //  "aligned" is ignored, because we must make the stronger
2324   //  assumption that both addresses are always 64-bit aligned.
2325   //
2326   // Arguments for generated stub:
2327   //      from:  O0
2328   //      to:    O1
2329   //      count: O2 treated as signed
2330   //
2331   address generate_disjoint_long_copy(bool aligned, address *entry, const char *name) {
2332     __ align(CodeEntryAlignment);
2333     StubCodeMark mark(this, "StubRoutines", name);
2334     address start = __ pc();
2335 
2336     assert_clean_int(O2, O3);     // Make sure 'count' is clean int.
2337 
2338     if (entry != NULL) {
2339       *entry = __ pc();
2340       // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
2341       BLOCK_COMMENT("Entry:");
2342     }
2343 
2344     generate_disjoint_long_copy_core(aligned);
2345 
2346     // O3, O4 are used as temp registers
2347     inc_counter_np(SharedRuntime::_jlong_array_copy_ctr, O3, O4);
2348     __ retl();
2349     __ delayed()->mov(G0, O0); // return 0
2350     return start;
2351   }
2352 
2353   //
2354   //  Generate core code for conjoint long copy (and oop copy on 64-bit).
2355   //  "aligned" is ignored, because we must make the stronger
2356   //  assumption that both addresses are always 64-bit aligned.
2357   //
2358   // Arguments:
2359   //      from:  O0
2360   //      to:    O1
2361   //      count: O2 treated as signed
2362   //
2363   void generate_conjoint_long_copy_core(bool aligned) {
2364     // Do reverse copy.
2365     Label L_copy_8_bytes, L_copy_16_bytes, L_exit;
2366     const Register from    = O0;  // source array address
2367     const Register to      = O1;  // destination array address
2368     const Register count   = O2;  // elements count
2369     const Register offset8 = O4;  // element offset
2370     const Register offset0 = O5;  // previous element offset
2371 
2372       __ subcc(count, 1, count);
2373       __ brx(Assembler::lessEqual, false, Assembler::pn, L_copy_8_bytes );
2374       __ delayed()->sllx(count, LogBytesPerLong, offset8);
2375       __ sub(offset8, 8, offset0);
2376       __ align(OptoLoopAlignment);
2377     __ BIND(L_copy_16_bytes);
2378       __ ldx(from, offset8, O2);
2379       __ ldx(from, offset0, O3);
2380       __ stx(O2, to, offset8);
2381       __ deccc(offset8, 16);      // use offset8 as counter
2382       __ stx(O3, to, offset0);
2383       __ brx(Assembler::greater, false, Assembler::pt, L_copy_16_bytes);
2384       __ delayed()->dec(offset0, 16);
2385 
2386     __ BIND(L_copy_8_bytes);
2387       __ brx(Assembler::negative, false, Assembler::pn, L_exit );
2388       __ delayed()->nop();
2389       __ ldx(from, 0, O3);
2390       __ stx(O3, to, 0);
2391     __ BIND(L_exit);
2392   }
2393 
2394   //  Generate stub for conjoint long copy.
2395   //  "aligned" is ignored, because we must make the stronger
2396   //  assumption that both addresses are always 64-bit aligned.
2397   //
2398   // Arguments for generated stub:
2399   //      from:  O0
2400   //      to:    O1
2401   //      count: O2 treated as signed
2402   //
2403   address generate_conjoint_long_copy(bool aligned, address nooverlap_target,
2404                                       address *entry, const char *name) {
2405     __ align(CodeEntryAlignment);
2406     StubCodeMark mark(this, "StubRoutines", name);
2407     address start = __ pc();
2408 
2409     assert(aligned, "Should always be aligned");
2410 
2411     assert_clean_int(O2, O3);     // Make sure 'count' is clean int.
2412 
2413     if (entry != NULL) {
2414       *entry = __ pc();
2415       // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
2416       BLOCK_COMMENT("Entry:");
2417     }
2418 
2419     array_overlap_test(nooverlap_target, 3);
2420 
2421     generate_conjoint_long_copy_core(aligned);
2422 
2423     // O3, O4 are used as temp registers
2424     inc_counter_np(SharedRuntime::_jlong_array_copy_ctr, O3, O4);
2425     __ retl();
2426     __ delayed()->mov(G0, O0); // return 0
2427     return start;
2428   }
2429 
2430   //  Generate stub for disjoint oop copy.  If "aligned" is true, the
2431   //  "from" and "to" addresses are assumed to be heapword aligned.
2432   //
2433   // Arguments for generated stub:
2434   //      from:  O0
2435   //      to:    O1
2436   //      count: O2 treated as signed
2437   //
2438   address generate_disjoint_oop_copy(bool aligned, address *entry, const char *name,
2439                                      bool dest_uninitialized = false) {
2440 
2441     const Register from  = O0;  // source array address
2442     const Register to    = O1;  // destination array address
2443     const Register count = O2;  // elements count
2444 
2445     __ align(CodeEntryAlignment);
2446     StubCodeMark mark(this, "StubRoutines", name);
2447     address start = __ pc();
2448 
2449     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
2450 
2451     if (entry != NULL) {
2452       *entry = __ pc();
2453       // caller can pass a 64-bit byte count here
2454       BLOCK_COMMENT("Entry:");
2455     }
2456 
2457     // save arguments for barrier generation
2458     __ mov(to, G1);
2459     __ mov(count, G5);
2460     gen_write_ref_array_pre_barrier(G1, G5, dest_uninitialized);
2461   #ifdef _LP64
2462     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
2463     if (UseCompressedOops) {
2464       generate_disjoint_int_copy_core(aligned);
2465     } else {
2466       generate_disjoint_long_copy_core(aligned);
2467     }
2468   #else
2469     generate_disjoint_int_copy_core(aligned);
2470   #endif
2471     // O0 is used as temp register
2472     gen_write_ref_array_post_barrier(G1, G5, O0);
2473 
2474     // O3, O4 are used as temp registers
2475     inc_counter_np(SharedRuntime::_oop_array_copy_ctr, O3, O4);
2476     __ retl();
2477     __ delayed()->mov(G0, O0); // return 0
2478     return start;
2479   }
2480 
2481   //  Generate stub for conjoint oop copy.  If "aligned" is true, the
2482   //  "from" and "to" addresses are assumed to be heapword aligned.
2483   //
2484   // Arguments for generated stub:
2485   //      from:  O0
2486   //      to:    O1
2487   //      count: O2 treated as signed
2488   //
2489   address generate_conjoint_oop_copy(bool aligned, address nooverlap_target,
2490                                      address *entry, const char *name,
2491                                      bool dest_uninitialized = false) {
2492 
2493     const Register from  = O0;  // source array address
2494     const Register to    = O1;  // destination array address
2495     const Register count = O2;  // elements count
2496 
2497     __ align(CodeEntryAlignment);
2498     StubCodeMark mark(this, "StubRoutines", name);
2499     address start = __ pc();
2500 
2501     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
2502 
2503     if (entry != NULL) {
2504       *entry = __ pc();
2505       // caller can pass a 64-bit byte count here
2506       BLOCK_COMMENT("Entry:");
2507     }
2508 
2509     array_overlap_test(nooverlap_target, LogBytesPerHeapOop);
2510 
2511     // save arguments for barrier generation
2512     __ mov(to, G1);
2513     __ mov(count, G5);
2514     gen_write_ref_array_pre_barrier(G1, G5, dest_uninitialized);
2515 
2516   #ifdef _LP64
2517     if (UseCompressedOops) {
2518       generate_conjoint_int_copy_core(aligned);
2519     } else {
2520       generate_conjoint_long_copy_core(aligned);
2521     }
2522   #else
2523     generate_conjoint_int_copy_core(aligned);
2524   #endif
2525 
2526     // O0 is used as temp register
2527     gen_write_ref_array_post_barrier(G1, G5, O0);
2528 
2529     // O3, O4 are used as temp registers
2530     inc_counter_np(SharedRuntime::_oop_array_copy_ctr, O3, O4);
2531     __ retl();
2532     __ delayed()->mov(G0, O0); // return 0
2533     return start;
2534   }
2535 
2536 
2537   // Helper for generating a dynamic type check.
2538   // Smashes only the given temp registers.
2539   void generate_type_check(Register sub_klass,
2540                            Register super_check_offset,
2541                            Register super_klass,
2542                            Register temp,
2543                            Label& L_success) {
2544     assert_different_registers(sub_klass, super_check_offset, super_klass, temp);
2545 
2546     BLOCK_COMMENT("type_check:");
2547 
2548     Label L_miss, L_pop_to_miss;
2549 
2550     assert_clean_int(super_check_offset, temp);
2551 
2552     __ check_klass_subtype_fast_path(sub_klass, super_klass, temp, noreg,
2553                                      &L_success, &L_miss, NULL,
2554                                      super_check_offset);
2555 
2556     BLOCK_COMMENT("type_check_slow_path:");
2557     __ save_frame(0);
2558     __ check_klass_subtype_slow_path(sub_klass->after_save(),
2559                                      super_klass->after_save(),
2560                                      L0, L1, L2, L4,
2561                                      NULL, &L_pop_to_miss);
2562     __ ba(L_success);
2563     __ delayed()->restore();
2564 
2565     __ bind(L_pop_to_miss);
2566     __ restore();
2567 
2568     // Fall through on failure!
2569     __ BIND(L_miss);
2570   }
2571 
2572 
2573   //  Generate stub for checked oop copy.
2574   //
2575   // Arguments for generated stub:
2576   //      from:  O0
2577   //      to:    O1
2578   //      count: O2 treated as signed
2579   //      ckoff: O3 (super_check_offset)
2580   //      ckval: O4 (super_klass)
2581   //      ret:   O0 zero for success; (-1^K) where K is partial transfer count
2582   //
2583   address generate_checkcast_copy(const char *name, address *entry, bool dest_uninitialized = false) {
2584 
2585     const Register O0_from   = O0;      // source array address
2586     const Register O1_to     = O1;      // destination array address
2587     const Register O2_count  = O2;      // elements count
2588     const Register O3_ckoff  = O3;      // super_check_offset
2589     const Register O4_ckval  = O4;      // super_klass
2590 
2591     const Register O5_offset = O5;      // loop var, with stride wordSize
2592     const Register G1_remain = G1;      // loop var, with stride -1
2593     const Register G3_oop    = G3;      // actual oop copied
2594     const Register G4_klass  = G4;      // oop._klass
2595     const Register G5_super  = G5;      // oop._klass._primary_supers[ckval]
2596 
2597     __ align(CodeEntryAlignment);
2598     StubCodeMark mark(this, "StubRoutines", name);
2599     address start = __ pc();
2600 
2601 #ifdef ASSERT
2602     // We sometimes save a frame (see generate_type_check below).
2603     // If this will cause trouble, let's fail now instead of later.
2604     __ save_frame(0);
2605     __ restore();
2606 #endif
2607 
2608     assert_clean_int(O2_count, G1);     // Make sure 'count' is clean int.
2609 
2610 #ifdef ASSERT
2611     // caller guarantees that the arrays really are different
2612     // otherwise, we would have to make conjoint checks
2613     { Label L;
2614       __ mov(O3, G1);           // spill: overlap test smashes O3
2615       __ mov(O4, G4);           // spill: overlap test smashes O4
2616       array_overlap_test(L, LogBytesPerHeapOop);
2617       __ stop("checkcast_copy within a single array");
2618       __ bind(L);
2619       __ mov(G1, O3);
2620       __ mov(G4, O4);
2621     }
2622 #endif //ASSERT
2623 
2624     if (entry != NULL) {
2625       *entry = __ pc();
2626       // caller can pass a 64-bit byte count here (from generic stub)
2627       BLOCK_COMMENT("Entry:");
2628     }
2629     gen_write_ref_array_pre_barrier(O1_to, O2_count, dest_uninitialized);
2630 
2631     Label load_element, store_element, do_card_marks, fail, done;
2632     __ addcc(O2_count, 0, G1_remain);   // initialize loop index, and test it
2633     __ brx(Assembler::notZero, false, Assembler::pt, load_element);
2634     __ delayed()->mov(G0, O5_offset);   // offset from start of arrays
2635 
2636     // Empty array:  Nothing to do.
2637     inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr, O3, O4);
2638     __ retl();
2639     __ delayed()->set(0, O0);           // return 0 on (trivial) success
2640 
2641     // ======== begin loop ========
2642     // (Loop is rotated; its entry is load_element.)
2643     // Loop variables:
2644     //   (O5 = 0; ; O5 += wordSize) --- offset from src, dest arrays
2645     //   (O2 = len; O2 != 0; O2--) --- number of oops *remaining*
2646     //   G3, G4, G5 --- current oop, oop.klass, oop.klass.super
2647     __ align(OptoLoopAlignment);
2648 
2649     __ BIND(store_element);
2650     __ deccc(G1_remain);                // decrement the count
2651     __ store_heap_oop(G3_oop, O1_to, O5_offset); // store the oop
2652     __ inc(O5_offset, heapOopSize);     // step to next offset
2653     __ brx(Assembler::zero, true, Assembler::pt, do_card_marks);
2654     __ delayed()->set(0, O0);           // return -1 on success
2655 
2656     // ======== loop entry is here ========
2657     __ BIND(load_element);
2658     __ load_heap_oop(O0_from, O5_offset, G3_oop);  // load the oop
2659     __ br_null_short(G3_oop, Assembler::pt, store_element);
2660 
2661     __ load_klass(G3_oop, G4_klass); // query the object klass
2662 
2663     generate_type_check(G4_klass, O3_ckoff, O4_ckval, G5_super,
2664                         // branch to this on success:
2665                         store_element);
2666     // ======== end loop ========
2667 
2668     // It was a real error; we must depend on the caller to finish the job.
2669     // Register G1 has number of *remaining* oops, O2 number of *total* oops.
2670     // Emit GC store barriers for the oops we have copied (O2 minus G1),
2671     // and report their number to the caller.
2672     __ BIND(fail);
2673     __ subcc(O2_count, G1_remain, O2_count);
2674     __ brx(Assembler::zero, false, Assembler::pt, done);
2675     __ delayed()->not1(O2_count, O0);   // report (-1^K) to caller
2676 
2677     __ BIND(do_card_marks);
2678     gen_write_ref_array_post_barrier(O1_to, O2_count, O3);   // store check on O1[0..O2]
2679 
2680     __ BIND(done);
2681     inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr, O3, O4);
2682     __ retl();
2683     __ delayed()->nop();             // return value in 00
2684 
2685     return start;
2686   }
2687 
2688 
2689   //  Generate 'unsafe' array copy stub
2690   //  Though just as safe as the other stubs, it takes an unscaled
2691   //  size_t argument instead of an element count.
2692   //
2693   // Arguments for generated stub:
2694   //      from:  O0
2695   //      to:    O1
2696   //      count: O2 byte count, treated as ssize_t, can be zero
2697   //
2698   // Examines the alignment of the operands and dispatches
2699   // to a long, int, short, or byte copy loop.
2700   //
2701   address generate_unsafe_copy(const char* name,
2702                                address byte_copy_entry,
2703                                address short_copy_entry,
2704                                address int_copy_entry,
2705                                address long_copy_entry) {
2706 
2707     const Register O0_from   = O0;      // source array address
2708     const Register O1_to     = O1;      // destination array address
2709     const Register O2_count  = O2;      // elements count
2710 
2711     const Register G1_bits   = G1;      // test copy of low bits
2712 
2713     __ align(CodeEntryAlignment);
2714     StubCodeMark mark(this, "StubRoutines", name);
2715     address start = __ pc();
2716 
2717     // bump this on entry, not on exit:
2718     inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr, G1, G3);
2719 
2720     __ or3(O0_from, O1_to, G1_bits);
2721     __ or3(O2_count,       G1_bits, G1_bits);
2722 
2723     __ btst(BytesPerLong-1, G1_bits);
2724     __ br(Assembler::zero, true, Assembler::pt,
2725           long_copy_entry, relocInfo::runtime_call_type);
2726     // scale the count on the way out:
2727     __ delayed()->srax(O2_count, LogBytesPerLong, O2_count);
2728 
2729     __ btst(BytesPerInt-1, G1_bits);
2730     __ br(Assembler::zero, true, Assembler::pt,
2731           int_copy_entry, relocInfo::runtime_call_type);
2732     // scale the count on the way out:
2733     __ delayed()->srax(O2_count, LogBytesPerInt, O2_count);
2734 
2735     __ btst(BytesPerShort-1, G1_bits);
2736     __ br(Assembler::zero, true, Assembler::pt,
2737           short_copy_entry, relocInfo::runtime_call_type);
2738     // scale the count on the way out:
2739     __ delayed()->srax(O2_count, LogBytesPerShort, O2_count);
2740 
2741     __ br(Assembler::always, false, Assembler::pt,
2742           byte_copy_entry, relocInfo::runtime_call_type);
2743     __ delayed()->nop();
2744 
2745     return start;
2746   }
2747 
2748 
2749   // Perform range checks on the proposed arraycopy.
2750   // Kills the two temps, but nothing else.
2751   // Also, clean the sign bits of src_pos and dst_pos.
2752   void arraycopy_range_checks(Register src,     // source array oop (O0)
2753                               Register src_pos, // source position (O1)
2754                               Register dst,     // destination array oo (O2)
2755                               Register dst_pos, // destination position (O3)
2756                               Register length,  // length of copy (O4)
2757                               Register temp1, Register temp2,
2758                               Label& L_failed) {
2759     BLOCK_COMMENT("arraycopy_range_checks:");
2760 
2761     //  if (src_pos + length > arrayOop(src)->length() ) FAIL;
2762 
2763     const Register array_length = temp1;  // scratch
2764     const Register end_pos      = temp2;  // scratch
2765 
2766     // Note:  This next instruction may be in the delay slot of a branch:
2767     __ add(length, src_pos, end_pos);  // src_pos + length
2768     __ lduw(src, arrayOopDesc::length_offset_in_bytes(), array_length);
2769     __ cmp(end_pos, array_length);
2770     __ br(Assembler::greater, false, Assembler::pn, L_failed);
2771 
2772     //  if (dst_pos + length > arrayOop(dst)->length() ) FAIL;
2773     __ delayed()->add(length, dst_pos, end_pos); // dst_pos + length
2774     __ lduw(dst, arrayOopDesc::length_offset_in_bytes(), array_length);
2775     __ cmp(end_pos, array_length);
2776     __ br(Assembler::greater, false, Assembler::pn, L_failed);
2777 
2778     // Have to clean up high 32-bits of 'src_pos' and 'dst_pos'.
2779     // Move with sign extension can be used since they are positive.
2780     __ delayed()->signx(src_pos, src_pos);
2781     __ signx(dst_pos, dst_pos);
2782 
2783     BLOCK_COMMENT("arraycopy_range_checks done");
2784   }
2785 
2786 
2787   //
2788   //  Generate generic array copy stubs
2789   //
2790   //  Input:
2791   //    O0    -  src oop
2792   //    O1    -  src_pos
2793   //    O2    -  dst oop
2794   //    O3    -  dst_pos
2795   //    O4    -  element count
2796   //
2797   //  Output:
2798   //    O0 ==  0  -  success
2799   //    O0 == -1  -  need to call System.arraycopy
2800   //
2801   address generate_generic_copy(const char *name,
2802                                 address entry_jbyte_arraycopy,
2803                                 address entry_jshort_arraycopy,
2804                                 address entry_jint_arraycopy,
2805                                 address entry_oop_arraycopy,
2806                                 address entry_jlong_arraycopy,
2807                                 address entry_checkcast_arraycopy) {
2808     Label L_failed, L_objArray;
2809 
2810     // Input registers
2811     const Register src      = O0;  // source array oop
2812     const Register src_pos  = O1;  // source position
2813     const Register dst      = O2;  // destination array oop
2814     const Register dst_pos  = O3;  // destination position
2815     const Register length   = O4;  // elements count
2816 
2817     // registers used as temp
2818     const Register G3_src_klass = G3; // source array klass
2819     const Register G4_dst_klass = G4; // destination array klass
2820     const Register G5_lh        = G5; // layout handler
2821     const Register O5_temp      = O5;
2822 
2823     __ align(CodeEntryAlignment);
2824     StubCodeMark mark(this, "StubRoutines", name);
2825     address start = __ pc();
2826 
2827     // bump this on entry, not on exit:
2828     inc_counter_np(SharedRuntime::_generic_array_copy_ctr, G1, G3);
2829 
2830     // In principle, the int arguments could be dirty.
2831     //assert_clean_int(src_pos, G1);
2832     //assert_clean_int(dst_pos, G1);
2833     //assert_clean_int(length, G1);
2834 
2835     //-----------------------------------------------------------------------
2836     // Assembler stubs will be used for this call to arraycopy
2837     // if the following conditions are met:
2838     //
2839     // (1) src and dst must not be null.
2840     // (2) src_pos must not be negative.
2841     // (3) dst_pos must not be negative.
2842     // (4) length  must not be negative.
2843     // (5) src klass and dst klass should be the same and not NULL.
2844     // (6) src and dst should be arrays.
2845     // (7) src_pos + length must not exceed length of src.
2846     // (8) dst_pos + length must not exceed length of dst.
2847     BLOCK_COMMENT("arraycopy initial argument checks");
2848 
2849     //  if (src == NULL) return -1;
2850     __ br_null(src, false, Assembler::pn, L_failed);
2851 
2852     //  if (src_pos < 0) return -1;
2853     __ delayed()->tst(src_pos);
2854     __ br(Assembler::negative, false, Assembler::pn, L_failed);
2855     __ delayed()->nop();
2856 
2857     //  if (dst == NULL) return -1;
2858     __ br_null(dst, false, Assembler::pn, L_failed);
2859 
2860     //  if (dst_pos < 0) return -1;
2861     __ delayed()->tst(dst_pos);
2862     __ br(Assembler::negative, false, Assembler::pn, L_failed);
2863 
2864     //  if (length < 0) return -1;
2865     __ delayed()->tst(length);
2866     __ br(Assembler::negative, false, Assembler::pn, L_failed);
2867 
2868     BLOCK_COMMENT("arraycopy argument klass checks");
2869     //  get src->klass()
2870     if (UseCompressedOops) {
2871       __ delayed()->nop(); // ??? not good
2872       __ load_klass(src, G3_src_klass);
2873     } else {
2874       __ delayed()->ld_ptr(src, oopDesc::klass_offset_in_bytes(), G3_src_klass);
2875     }
2876 
2877 #ifdef ASSERT
2878     //  assert(src->klass() != NULL);
2879     BLOCK_COMMENT("assert klasses not null");
2880     { Label L_a, L_b;
2881       __ br_notnull_short(G3_src_klass, Assembler::pt, L_b); // it is broken if klass is NULL
2882       __ bind(L_a);
2883       __ stop("broken null klass");
2884       __ bind(L_b);
2885       __ load_klass(dst, G4_dst_klass);
2886       __ br_null(G4_dst_klass, false, Assembler::pn, L_a); // this would be broken also
2887       __ delayed()->mov(G0, G4_dst_klass);      // scribble the temp
2888       BLOCK_COMMENT("assert done");
2889     }
2890 #endif
2891 
2892     // Load layout helper
2893     //
2894     //  |array_tag|     | header_size | element_type |     |log2_element_size|
2895     // 32        30    24            16              8     2                 0
2896     //
2897     //   array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0
2898     //
2899 
2900     int lh_offset = klassOopDesc::header_size() * HeapWordSize +
2901                     Klass::layout_helper_offset_in_bytes();
2902 
2903     // Load 32-bits signed value. Use br() instruction with it to check icc.
2904     __ lduw(G3_src_klass, lh_offset, G5_lh);
2905 
2906     if (UseCompressedOops) {
2907       __ load_klass(dst, G4_dst_klass);
2908     }
2909     // Handle objArrays completely differently...
2910     juint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2911     __ set(objArray_lh, O5_temp);
2912     __ cmp(G5_lh,       O5_temp);
2913     __ br(Assembler::equal, false, Assembler::pt, L_objArray);
2914     if (UseCompressedOops) {
2915       __ delayed()->nop();
2916     } else {
2917       __ delayed()->ld_ptr(dst, oopDesc::klass_offset_in_bytes(), G4_dst_klass);
2918     }
2919 
2920     //  if (src->klass() != dst->klass()) return -1;
2921     __ cmp_and_brx_short(G3_src_klass, G4_dst_klass, Assembler::notEqual, Assembler::pn, L_failed);
2922 
2923     //  if (!src->is_Array()) return -1;
2924     __ cmp(G5_lh, Klass::_lh_neutral_value); // < 0
2925     __ br(Assembler::greaterEqual, false, Assembler::pn, L_failed);
2926 
2927     // At this point, it is known to be a typeArray (array_tag 0x3).
2928 #ifdef ASSERT
2929     __ delayed()->nop();
2930     { Label L;
2931       jint lh_prim_tag_in_place = (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift);
2932       __ set(lh_prim_tag_in_place, O5_temp);
2933       __ cmp(G5_lh,                O5_temp);
2934       __ br(Assembler::greaterEqual, false, Assembler::pt, L);
2935       __ delayed()->nop();
2936       __ stop("must be a primitive array");
2937       __ bind(L);
2938     }
2939 #else
2940     __ delayed();                               // match next insn to prev branch
2941 #endif
2942 
2943     arraycopy_range_checks(src, src_pos, dst, dst_pos, length,
2944                            O5_temp, G4_dst_klass, L_failed);
2945 
2946     // typeArrayKlass
2947     //
2948     // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize);
2949     // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize);
2950     //
2951 
2952     const Register G4_offset = G4_dst_klass;    // array offset
2953     const Register G3_elsize = G3_src_klass;    // log2 element size
2954 
2955     __ srl(G5_lh, Klass::_lh_header_size_shift, G4_offset);
2956     __ and3(G4_offset, Klass::_lh_header_size_mask, G4_offset); // array_offset
2957     __ add(src, G4_offset, src);       // src array offset
2958     __ add(dst, G4_offset, dst);       // dst array offset
2959     __ and3(G5_lh, Klass::_lh_log2_element_size_mask, G3_elsize); // log2 element size
2960 
2961     // next registers should be set before the jump to corresponding stub
2962     const Register from     = O0;  // source array address
2963     const Register to       = O1;  // destination array address
2964     const Register count    = O2;  // elements count
2965 
2966     // 'from', 'to', 'count' registers should be set in this order
2967     // since they are the same as 'src', 'src_pos', 'dst'.
2968 
2969     BLOCK_COMMENT("scale indexes to element size");
2970     __ sll_ptr(src_pos, G3_elsize, src_pos);
2971     __ sll_ptr(dst_pos, G3_elsize, dst_pos);
2972     __ add(src, src_pos, from);       // src_addr
2973     __ add(dst, dst_pos, to);         // dst_addr
2974 
2975     BLOCK_COMMENT("choose copy loop based on element size");
2976     __ cmp(G3_elsize, 0);
2977     __ br(Assembler::equal, true, Assembler::pt, entry_jbyte_arraycopy);
2978     __ delayed()->signx(length, count); // length
2979 
2980     __ cmp(G3_elsize, LogBytesPerShort);
2981     __ br(Assembler::equal, true, Assembler::pt, entry_jshort_arraycopy);
2982     __ delayed()->signx(length, count); // length
2983 
2984     __ cmp(G3_elsize, LogBytesPerInt);
2985     __ br(Assembler::equal, true, Assembler::pt, entry_jint_arraycopy);
2986     __ delayed()->signx(length, count); // length
2987 #ifdef ASSERT
2988     { Label L;
2989       __ cmp_and_br_short(G3_elsize, LogBytesPerLong, Assembler::equal, Assembler::pt, L);
2990       __ stop("must be long copy, but elsize is wrong");
2991       __ bind(L);
2992     }
2993 #endif
2994     __ br(Assembler::always, false, Assembler::pt, entry_jlong_arraycopy);
2995     __ delayed()->signx(length, count); // length
2996 
2997     // objArrayKlass
2998   __ BIND(L_objArray);
2999     // live at this point:  G3_src_klass, G4_dst_klass, src[_pos], dst[_pos], length
3000 
3001     Label L_plain_copy, L_checkcast_copy;
3002     //  test array classes for subtyping
3003     __ cmp(G3_src_klass, G4_dst_klass);         // usual case is exact equality
3004     __ brx(Assembler::notEqual, true, Assembler::pn, L_checkcast_copy);
3005     __ delayed()->lduw(G4_dst_klass, lh_offset, O5_temp); // hoisted from below
3006 
3007     // Identically typed arrays can be copied without element-wise checks.
3008     arraycopy_range_checks(src, src_pos, dst, dst_pos, length,
3009                            O5_temp, G5_lh, L_failed);
3010 
3011     __ add(src, arrayOopDesc::base_offset_in_bytes(T_OBJECT), src); //src offset
3012     __ add(dst, arrayOopDesc::base_offset_in_bytes(T_OBJECT), dst); //dst offset
3013     __ sll_ptr(src_pos, LogBytesPerHeapOop, src_pos);
3014     __ sll_ptr(dst_pos, LogBytesPerHeapOop, dst_pos);
3015     __ add(src, src_pos, from);       // src_addr
3016     __ add(dst, dst_pos, to);         // dst_addr
3017   __ BIND(L_plain_copy);
3018     __ br(Assembler::always, false, Assembler::pt, entry_oop_arraycopy);
3019     __ delayed()->signx(length, count); // length
3020 
3021   __ BIND(L_checkcast_copy);
3022     // live at this point:  G3_src_klass, G4_dst_klass
3023     {
3024       // Before looking at dst.length, make sure dst is also an objArray.
3025       // lduw(G4_dst_klass, lh_offset, O5_temp); // hoisted to delay slot
3026       __ cmp(G5_lh,                    O5_temp);
3027       __ br(Assembler::notEqual, false, Assembler::pn, L_failed);
3028 
3029       // It is safe to examine both src.length and dst.length.
3030       __ delayed();                             // match next insn to prev branch
3031       arraycopy_range_checks(src, src_pos, dst, dst_pos, length,
3032                              O5_temp, G5_lh, L_failed);
3033 
3034       // Marshal the base address arguments now, freeing registers.
3035       __ add(src, arrayOopDesc::base_offset_in_bytes(T_OBJECT), src); //src offset
3036       __ add(dst, arrayOopDesc::base_offset_in_bytes(T_OBJECT), dst); //dst offset
3037       __ sll_ptr(src_pos, LogBytesPerHeapOop, src_pos);
3038       __ sll_ptr(dst_pos, LogBytesPerHeapOop, dst_pos);
3039       __ add(src, src_pos, from);               // src_addr
3040       __ add(dst, dst_pos, to);                 // dst_addr
3041       __ signx(length, count);                  // length (reloaded)
3042 
3043       Register sco_temp = O3;                   // this register is free now
3044       assert_different_registers(from, to, count, sco_temp,
3045                                  G4_dst_klass, G3_src_klass);
3046 
3047       // Generate the type check.
3048       int sco_offset = (klassOopDesc::header_size() * HeapWordSize +
3049                         Klass::super_check_offset_offset_in_bytes());
3050       __ lduw(G4_dst_klass, sco_offset, sco_temp);
3051       generate_type_check(G3_src_klass, sco_temp, G4_dst_klass,
3052                           O5_temp, L_plain_copy);
3053 
3054       // Fetch destination element klass from the objArrayKlass header.
3055       int ek_offset = (klassOopDesc::header_size() * HeapWordSize +
3056                        objArrayKlass::element_klass_offset_in_bytes());
3057 
3058       // the checkcast_copy loop needs two extra arguments:
3059       __ ld_ptr(G4_dst_klass, ek_offset, O4);   // dest elem klass
3060       // lduw(O4, sco_offset, O3);              // sco of elem klass
3061 
3062       __ br(Assembler::always, false, Assembler::pt, entry_checkcast_arraycopy);
3063       __ delayed()->lduw(O4, sco_offset, O3);
3064     }
3065 
3066   __ BIND(L_failed);
3067     __ retl();
3068     __ delayed()->sub(G0, 1, O0); // return -1
3069     return start;
3070   }
3071 
3072   void generate_arraycopy_stubs() {
3073     address entry;
3074     address entry_jbyte_arraycopy;
3075     address entry_jshort_arraycopy;
3076     address entry_jint_arraycopy;
3077     address entry_oop_arraycopy;
3078     address entry_jlong_arraycopy;
3079     address entry_checkcast_arraycopy;
3080 
3081     //*** jbyte
3082     // Always need aligned and unaligned versions
3083     StubRoutines::_jbyte_disjoint_arraycopy         = generate_disjoint_byte_copy(false, &entry,
3084                                                                                   "jbyte_disjoint_arraycopy");
3085     StubRoutines::_jbyte_arraycopy                  = generate_conjoint_byte_copy(false, entry,
3086                                                                                   &entry_jbyte_arraycopy,
3087                                                                                   "jbyte_arraycopy");
3088     StubRoutines::_arrayof_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(true, &entry,
3089                                                                                   "arrayof_jbyte_disjoint_arraycopy");
3090     StubRoutines::_arrayof_jbyte_arraycopy          = generate_conjoint_byte_copy(true, entry, NULL,
3091                                                                                   "arrayof_jbyte_arraycopy");
3092 
3093     //*** jshort
3094     // Always need aligned and unaligned versions
3095     StubRoutines::_jshort_disjoint_arraycopy         = generate_disjoint_short_copy(false, &entry,
3096                                                                                     "jshort_disjoint_arraycopy");
3097     StubRoutines::_jshort_arraycopy                  = generate_conjoint_short_copy(false, entry,
3098                                                                                     &entry_jshort_arraycopy,
3099                                                                                     "jshort_arraycopy");
3100     StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_disjoint_short_copy(true, &entry,
3101                                                                                     "arrayof_jshort_disjoint_arraycopy");
3102     StubRoutines::_arrayof_jshort_arraycopy          = generate_conjoint_short_copy(true, entry, NULL,
3103                                                                                     "arrayof_jshort_arraycopy");
3104 
3105     //*** jint
3106     // Aligned versions
3107     StubRoutines::_arrayof_jint_disjoint_arraycopy = generate_disjoint_int_copy(true, &entry,
3108                                                                                 "arrayof_jint_disjoint_arraycopy");
3109     StubRoutines::_arrayof_jint_arraycopy          = generate_conjoint_int_copy(true, entry, &entry_jint_arraycopy,
3110                                                                                 "arrayof_jint_arraycopy");
3111 #ifdef _LP64
3112     // In 64 bit we need both aligned and unaligned versions of jint arraycopy.
3113     // entry_jint_arraycopy always points to the unaligned version (notice that we overwrite it).
3114     StubRoutines::_jint_disjoint_arraycopy         = generate_disjoint_int_copy(false, &entry,
3115                                                                                 "jint_disjoint_arraycopy");
3116     StubRoutines::_jint_arraycopy                  = generate_conjoint_int_copy(false, entry,
3117                                                                                 &entry_jint_arraycopy,
3118                                                                                 "jint_arraycopy");
3119 #else
3120     // In 32 bit jints are always HeapWordSize aligned, so always use the aligned version
3121     // (in fact in 32bit we always have a pre-loop part even in the aligned version,
3122     //  because it uses 64-bit loads/stores, so the aligned flag is actually ignored).
3123     StubRoutines::_jint_disjoint_arraycopy = StubRoutines::_arrayof_jint_disjoint_arraycopy;
3124     StubRoutines::_jint_arraycopy          = StubRoutines::_arrayof_jint_arraycopy;
3125 #endif
3126 
3127 
3128     //*** jlong
3129     // It is always aligned
3130     StubRoutines::_arrayof_jlong_disjoint_arraycopy = generate_disjoint_long_copy(true, &entry,
3131                                                                                   "arrayof_jlong_disjoint_arraycopy");
3132     StubRoutines::_arrayof_jlong_arraycopy          = generate_conjoint_long_copy(true, entry, &entry_jlong_arraycopy,
3133                                                                                   "arrayof_jlong_arraycopy");
3134     StubRoutines::_jlong_disjoint_arraycopy         = StubRoutines::_arrayof_jlong_disjoint_arraycopy;
3135     StubRoutines::_jlong_arraycopy                  = StubRoutines::_arrayof_jlong_arraycopy;
3136 
3137 
3138     //*** oops
3139     // Aligned versions
3140     StubRoutines::_arrayof_oop_disjoint_arraycopy        = generate_disjoint_oop_copy(true, &entry,
3141                                                                                       "arrayof_oop_disjoint_arraycopy");
3142     StubRoutines::_arrayof_oop_arraycopy                 = generate_conjoint_oop_copy(true, entry, &entry_oop_arraycopy,
3143                                                                                       "arrayof_oop_arraycopy");
3144     // Aligned versions without pre-barriers
3145     StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy(true, &entry,
3146                                                                                       "arrayof_oop_disjoint_arraycopy_uninit",
3147                                                                                       /*dest_uninitialized*/true);
3148     StubRoutines::_arrayof_oop_arraycopy_uninit          = generate_conjoint_oop_copy(true, entry, NULL,
3149                                                                                       "arrayof_oop_arraycopy_uninit",
3150                                                                                       /*dest_uninitialized*/true);
3151 #ifdef _LP64
3152     if (UseCompressedOops) {
3153       // With compressed oops we need unaligned versions, notice that we overwrite entry_oop_arraycopy.
3154       StubRoutines::_oop_disjoint_arraycopy            = generate_disjoint_oop_copy(false, &entry,
3155                                                                                     "oop_disjoint_arraycopy");
3156       StubRoutines::_oop_arraycopy                     = generate_conjoint_oop_copy(false, entry, &entry_oop_arraycopy,
3157                                                                                     "oop_arraycopy");
3158       // Unaligned versions without pre-barriers
3159       StubRoutines::_oop_disjoint_arraycopy_uninit     = generate_disjoint_oop_copy(false, &entry,
3160                                                                                     "oop_disjoint_arraycopy_uninit",
3161                                                                                     /*dest_uninitialized*/true);
3162       StubRoutines::_oop_arraycopy_uninit              = generate_conjoint_oop_copy(false, entry, NULL,
3163                                                                                     "oop_arraycopy_uninit",
3164                                                                                     /*dest_uninitialized*/true);
3165     } else
3166 #endif
3167     {
3168       // oop arraycopy is always aligned on 32bit and 64bit without compressed oops
3169       StubRoutines::_oop_disjoint_arraycopy            = StubRoutines::_arrayof_oop_disjoint_arraycopy;
3170       StubRoutines::_oop_arraycopy                     = StubRoutines::_arrayof_oop_arraycopy;
3171       StubRoutines::_oop_disjoint_arraycopy_uninit     = StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit;
3172       StubRoutines::_oop_arraycopy_uninit              = StubRoutines::_arrayof_oop_arraycopy_uninit;
3173     }
3174 
3175     StubRoutines::_checkcast_arraycopy        = generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy);
3176     StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", NULL,
3177                                                                         /*dest_uninitialized*/true);
3178 
3179     StubRoutines::_unsafe_arraycopy    = generate_unsafe_copy("unsafe_arraycopy",
3180                                                               entry_jbyte_arraycopy,
3181                                                               entry_jshort_arraycopy,
3182                                                               entry_jint_arraycopy,
3183                                                               entry_jlong_arraycopy);
3184     StubRoutines::_generic_arraycopy   = generate_generic_copy("generic_arraycopy",
3185                                                                entry_jbyte_arraycopy,
3186                                                                entry_jshort_arraycopy,
3187                                                                entry_jint_arraycopy,
3188                                                                entry_oop_arraycopy,
3189                                                                entry_jlong_arraycopy,
3190                                                                entry_checkcast_arraycopy);
3191 
3192     StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill");
3193     StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill");
3194     StubRoutines::_jint_fill = generate_fill(T_INT, false, "jint_fill");
3195     StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill");
3196     StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill");
3197     StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill");
3198   }
3199 
3200   void generate_initial() {
3201     // Generates all stubs and initializes the entry points
3202 
3203     //------------------------------------------------------------------------------------------------------------------------
3204     // entry points that exist in all platforms
3205     // Note: This is code that could be shared among different platforms - however the benefit seems to be smaller than
3206     //       the disadvantage of having a much more complicated generator structure. See also comment in stubRoutines.hpp.
3207     StubRoutines::_forward_exception_entry                 = generate_forward_exception();
3208 
3209     StubRoutines::_call_stub_entry                         = generate_call_stub(StubRoutines::_call_stub_return_address);
3210     StubRoutines::_catch_exception_entry                   = generate_catch_exception();
3211 
3212     //------------------------------------------------------------------------------------------------------------------------
3213     // entry points that are platform specific
3214     StubRoutines::Sparc::_test_stop_entry                  = generate_test_stop();
3215 
3216     StubRoutines::Sparc::_stop_subroutine_entry            = generate_stop_subroutine();
3217     StubRoutines::Sparc::_flush_callers_register_windows_entry = generate_flush_callers_register_windows();
3218 
3219 #if !defined(COMPILER2) && !defined(_LP64)
3220     StubRoutines::_atomic_xchg_entry         = generate_atomic_xchg();
3221     StubRoutines::_atomic_cmpxchg_entry      = generate_atomic_cmpxchg();
3222     StubRoutines::_atomic_add_entry          = generate_atomic_add();
3223     StubRoutines::_atomic_xchg_ptr_entry     = StubRoutines::_atomic_xchg_entry;
3224     StubRoutines::_atomic_cmpxchg_ptr_entry  = StubRoutines::_atomic_cmpxchg_entry;
3225     StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long();
3226     StubRoutines::_atomic_add_ptr_entry      = StubRoutines::_atomic_add_entry;
3227 #endif  // COMPILER2 !=> _LP64
3228 
3229     // Build this early so it's available for the interpreter.  The
3230     // stub expects the required and actual type to already be in O1
3231     // and O2 respectively.
3232     StubRoutines::_throw_WrongMethodTypeException_entry =
3233       generate_throw_exception("WrongMethodTypeException throw_exception",
3234                                CAST_FROM_FN_PTR(address, SharedRuntime::throw_WrongMethodTypeException),
3235                                false, G5_method_type, G3_method_handle);
3236   }
3237 
3238 
3239   void generate_all() {
3240     // Generates all stubs and initializes the entry points
3241 
3242     // Generate partial_subtype_check first here since its code depends on
3243     // UseZeroBaseCompressedOops which is defined after heap initialization.
3244     StubRoutines::Sparc::_partial_subtype_check                = generate_partial_subtype_check();
3245     // These entry points require SharedInfo::stack0 to be set up in non-core builds
3246     StubRoutines::_throw_AbstractMethodError_entry         = generate_throw_exception("AbstractMethodError throw_exception",          CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError),  false);
3247     StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError),  false);
3248     StubRoutines::_throw_ArithmeticException_entry         = generate_throw_exception("ArithmeticException throw_exception",          CAST_FROM_FN_PTR(address, SharedRuntime::throw_ArithmeticException),  true);
3249     StubRoutines::_throw_NullPointerException_entry        = generate_throw_exception("NullPointerException throw_exception",         CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException), true);
3250     StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call), false);
3251     StubRoutines::_throw_StackOverflowError_entry          = generate_throw_exception("StackOverflowError throw_exception",           CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError),   false);
3252 
3253     StubRoutines::_handler_for_unsafe_access_entry =
3254       generate_handler_for_unsafe_access();
3255 
3256     // support for verify_oop (must happen after universe_init)
3257     StubRoutines::_verify_oop_subroutine_entry     = generate_verify_oop_subroutine();
3258 
3259     // arraycopy stubs used by compilers
3260     generate_arraycopy_stubs();
3261 
3262     // Don't initialize the platform math functions since sparc
3263     // doesn't have intrinsics for these operations.
3264   }
3265 
3266 
3267  public:
3268   StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) {
3269     // replace the standard masm with a special one:
3270     _masm = new MacroAssembler(code);
3271 
3272     _stub_count = !all ? 0x100 : 0x200;
3273     if (all) {
3274       generate_all();
3275     } else {
3276       generate_initial();
3277     }
3278 
3279     // make sure this stub is available for all local calls
3280     if (_atomic_add_stub.is_unbound()) {
3281       // generate a second time, if necessary
3282       (void) generate_atomic_add();
3283     }
3284   }
3285 
3286 
3287  private:
3288   int _stub_count;
3289   void stub_prolog(StubCodeDesc* cdesc) {
3290     # ifdef ASSERT
3291       // put extra information in the stub code, to make it more readable
3292 #ifdef _LP64
3293 // Write the high part of the address
3294 // [RGV] Check if there is a dependency on the size of this prolog
3295       __ emit_data((intptr_t)cdesc >> 32,    relocInfo::none);
3296 #endif
3297       __ emit_data((intptr_t)cdesc,    relocInfo::none);
3298       __ emit_data(++_stub_count, relocInfo::none);
3299     # endif
3300     align(true);
3301   }
3302 
3303   void align(bool at_header = false) {
3304     // %%%%% move this constant somewhere else
3305     // UltraSPARC cache line size is 8 instructions:
3306     const unsigned int icache_line_size = 32;
3307     const unsigned int icache_half_line_size = 16;
3308 
3309     if (at_header) {
3310       while ((intptr_t)(__ pc()) % icache_line_size != 0) {
3311         __ emit_data(0, relocInfo::none);
3312       }
3313     } else {
3314       while ((intptr_t)(__ pc()) % icache_half_line_size != 0) {
3315         __ nop();
3316       }
3317     }
3318   }
3319 
3320 }; // end class declaration
3321 
3322 void StubGenerator_generate(CodeBuffer* code, bool all) {
3323   StubGenerator g(code, all);
3324 }