1 /*
   2  * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.inline.hpp"
  27 #include "interpreter/interpreter.hpp"
  28 #include "nativeInst_sparc.hpp"
  29 #include "oops/instanceOop.hpp"
  30 #include "oops/method.hpp"
  31 #include "oops/objArrayKlass.hpp"
  32 #include "oops/oop.inline.hpp"
  33 #include "prims/methodHandles.hpp"
  34 #include "runtime/frame.inline.hpp"
  35 #include "runtime/handles.inline.hpp"
  36 #include "runtime/sharedRuntime.hpp"
  37 #include "runtime/stubCodeGenerator.hpp"
  38 #include "runtime/stubRoutines.hpp"
  39 #include "runtime/thread.inline.hpp"
  40 #include "utilities/top.hpp"
  41 #ifdef COMPILER2
  42 #include "opto/runtime.hpp"
  43 #endif
  44 
  45 // Declaration and definition of StubGenerator (no .hpp file).
  46 // For a more detailed description of the stub routine structure
  47 // see the comment in stubRoutines.hpp.
  48 
  49 #define __ _masm->
  50 
  51 #ifdef PRODUCT
  52 #define BLOCK_COMMENT(str) /* nothing */
  53 #else
  54 #define BLOCK_COMMENT(str) __ block_comment(str)
  55 #endif
  56 
  57 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
  58 
  59 // Note:  The register L7 is used as L7_thread_cache, and may not be used
  60 //        any other way within this module.
  61 
  62 
  63 static const Register& Lstub_temp = L2;
  64 
  65 // -------------------------------------------------------------------------------------------------------------------------
  66 // Stub Code definitions
  67 
  68 static address handle_unsafe_access() {
  69   JavaThread* thread = JavaThread::current();
  70   address pc  = thread->saved_exception_pc();
  71   address npc = thread->saved_exception_npc();
  72   // pc is the instruction which we must emulate
  73   // doing a no-op is fine:  return garbage from the load
  74 
  75   // request an async exception
  76   thread->set_pending_unsafe_access_error();
  77 
  78   // return address of next instruction to execute
  79   return npc;
  80 }
  81 
  82 class StubGenerator: public StubCodeGenerator {
  83  private:
  84 
  85 #ifdef PRODUCT
  86 #define inc_counter_np(a,b,c) (0)
  87 #else
  88 #define inc_counter_np(counter, t1, t2) \
  89   BLOCK_COMMENT("inc_counter " #counter); \
  90   __ inc_counter(&counter, t1, t2);
  91 #endif
  92 
  93   //----------------------------------------------------------------------------------------------------
  94   // Call stubs are used to call Java from C
  95 
  96   address generate_call_stub(address& return_pc) {
  97     StubCodeMark mark(this, "StubRoutines", "call_stub");
  98     address start = __ pc();
  99 
 100     // Incoming arguments:
 101     //
 102     // o0         : call wrapper address
 103     // o1         : result (address)
 104     // o2         : result type
 105     // o3         : method
 106     // o4         : (interpreter) entry point
 107     // o5         : parameters (address)
 108     // [sp + 0x5c]: parameter size (in words)
 109     // [sp + 0x60]: thread
 110     //
 111     // +---------------+ <--- sp + 0
 112     // |               |
 113     // . reg save area .
 114     // |               |
 115     // +---------------+ <--- sp + 0x40
 116     // |               |
 117     // . extra 7 slots .
 118     // |               |
 119     // +---------------+ <--- sp + 0x5c
 120     // |  param. size  |
 121     // +---------------+ <--- sp + 0x60
 122     // |    thread     |
 123     // +---------------+
 124     // |               |
 125 
 126     // note: if the link argument position changes, adjust
 127     //       the code in frame::entry_frame_call_wrapper()
 128 
 129     const Argument link           = Argument(0, false); // used only for GC
 130     const Argument result         = Argument(1, false);
 131     const Argument result_type    = Argument(2, false);
 132     const Argument method         = Argument(3, false);
 133     const Argument entry_point    = Argument(4, false);
 134     const Argument parameters     = Argument(5, false);
 135     const Argument parameter_size = Argument(6, false);
 136     const Argument thread         = Argument(7, false);
 137 
 138     // setup thread register
 139     __ ld_ptr(thread.as_address(), G2_thread);
 140     __ reinit_heapbase();
 141 
 142 #ifdef ASSERT
 143     // make sure we have no pending exceptions
 144     { const Register t = G3_scratch;
 145       Label L;
 146       __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), t);
 147       __ br_null_short(t, Assembler::pt, L);
 148       __ stop("StubRoutines::call_stub: entered with pending exception");
 149       __ bind(L);
 150     }
 151 #endif
 152 
 153     // create activation frame & allocate space for parameters
 154     { const Register t = G3_scratch;
 155       __ ld_ptr(parameter_size.as_address(), t);                // get parameter size (in words)
 156       __ add(t, frame::memory_parameter_word_sp_offset, t);     // add space for save area (in words)
 157       __ round_to(t, WordsPerLong);                             // make sure it is multiple of 2 (in words)
 158       __ sll(t, Interpreter::logStackElementSize, t);           // compute number of bytes
 159       __ neg(t);                                                // negate so it can be used with save
 160       __ save(SP, t, SP);                                       // setup new frame
 161     }
 162 
 163     // +---------------+ <--- sp + 0
 164     // |               |
 165     // . reg save area .
 166     // |               |
 167     // +---------------+ <--- sp + 0x40
 168     // |               |
 169     // . extra 7 slots .
 170     // |               |
 171     // +---------------+ <--- sp + 0x5c
 172     // |  empty slot   |      (only if parameter size is even)
 173     // +---------------+
 174     // |               |
 175     // .  parameters   .
 176     // |               |
 177     // +---------------+ <--- fp + 0
 178     // |               |
 179     // . reg save area .
 180     // |               |
 181     // +---------------+ <--- fp + 0x40
 182     // |               |
 183     // . extra 7 slots .
 184     // |               |
 185     // +---------------+ <--- fp + 0x5c
 186     // |  param. size  |
 187     // +---------------+ <--- fp + 0x60
 188     // |    thread     |
 189     // +---------------+
 190     // |               |
 191 
 192     // pass parameters if any
 193     BLOCK_COMMENT("pass parameters if any");
 194     { const Register src = parameters.as_in().as_register();
 195       const Register dst = Lentry_args;
 196       const Register tmp = G3_scratch;
 197       const Register cnt = G4_scratch;
 198 
 199       // test if any parameters & setup of Lentry_args
 200       Label exit;
 201       __ ld_ptr(parameter_size.as_in().as_address(), cnt);      // parameter counter
 202       __ add( FP, STACK_BIAS, dst );
 203       __ cmp_zero_and_br(Assembler::zero, cnt, exit);
 204       __ delayed()->sub(dst, BytesPerWord, dst);                 // setup Lentry_args
 205 
 206       // copy parameters if any
 207       Label loop;
 208       __ BIND(loop);
 209       // Store parameter value
 210       __ ld_ptr(src, 0, tmp);
 211       __ add(src, BytesPerWord, src);
 212       __ st_ptr(tmp, dst, 0);
 213       __ deccc(cnt);
 214       __ br(Assembler::greater, false, Assembler::pt, loop);
 215       __ delayed()->sub(dst, Interpreter::stackElementSize, dst);
 216 
 217       // done
 218       __ BIND(exit);
 219     }
 220 
 221     // setup parameters, method & call Java function
 222 #ifdef ASSERT
 223     // layout_activation_impl checks it's notion of saved SP against
 224     // this register, so if this changes update it as well.
 225     const Register saved_SP = Lscratch;
 226     __ mov(SP, saved_SP);                               // keep track of SP before call
 227 #endif
 228 
 229     // setup parameters
 230     const Register t = G3_scratch;
 231     __ ld_ptr(parameter_size.as_in().as_address(), t); // get parameter size (in words)
 232     __ sll(t, Interpreter::logStackElementSize, t);    // compute number of bytes
 233     __ sub(FP, t, Gargs);                              // setup parameter pointer
 234 #ifdef _LP64
 235     __ add( Gargs, STACK_BIAS, Gargs );                // Account for LP64 stack bias
 236 #endif
 237     __ mov(SP, O5_savedSP);
 238 
 239 
 240     // do the call
 241     //
 242     // the following register must be setup:
 243     //
 244     // G2_thread
 245     // G5_method
 246     // Gargs
 247     BLOCK_COMMENT("call Java function");
 248     __ jmpl(entry_point.as_in().as_register(), G0, O7);
 249     __ delayed()->mov(method.as_in().as_register(), G5_method);   // setup method
 250 
 251     BLOCK_COMMENT("call_stub_return_address:");
 252     return_pc = __ pc();
 253 
 254     // The callee, if it wasn't interpreted, can return with SP changed so
 255     // we can no longer assert of change of SP.
 256 
 257     // store result depending on type
 258     // (everything that is not T_OBJECT, T_LONG, T_FLOAT, or T_DOUBLE
 259     //  is treated as T_INT)
 260     { const Register addr = result     .as_in().as_register();
 261       const Register type = result_type.as_in().as_register();
 262       Label is_long, is_float, is_double, is_object, exit;
 263       __            cmp(type, T_OBJECT);  __ br(Assembler::equal, false, Assembler::pn, is_object);
 264       __ delayed()->cmp(type, T_FLOAT);   __ br(Assembler::equal, false, Assembler::pn, is_float);
 265       __ delayed()->cmp(type, T_DOUBLE);  __ br(Assembler::equal, false, Assembler::pn, is_double);
 266       __ delayed()->cmp(type, T_LONG);    __ br(Assembler::equal, false, Assembler::pn, is_long);
 267       __ delayed()->nop();
 268 
 269       // store int result
 270       __ st(O0, addr, G0);
 271 
 272       __ BIND(exit);
 273       __ ret();
 274       __ delayed()->restore();
 275 
 276       __ BIND(is_object);
 277       __ ba(exit);
 278       __ delayed()->st_ptr(O0, addr, G0);
 279 
 280       __ BIND(is_float);
 281       __ ba(exit);
 282       __ delayed()->stf(FloatRegisterImpl::S, F0, addr, G0);
 283 
 284       __ BIND(is_double);
 285       __ ba(exit);
 286       __ delayed()->stf(FloatRegisterImpl::D, F0, addr, G0);
 287 
 288       __ BIND(is_long);
 289 #ifdef _LP64
 290       __ ba(exit);
 291       __ delayed()->st_long(O0, addr, G0);      // store entire long
 292 #else
 293 #if defined(COMPILER2)
 294   // All return values are where we want them, except for Longs.  C2 returns
 295   // longs in G1 in the 32-bit build whereas the interpreter wants them in O0/O1.
 296   // Since the interpreter will return longs in G1 and O0/O1 in the 32bit
 297   // build we simply always use G1.
 298   // Note: I tried to make c2 return longs in O0/O1 and G1 so we wouldn't have to
 299   // do this here. Unfortunately if we did a rethrow we'd see an machepilog node
 300   // first which would move g1 -> O0/O1 and destroy the exception we were throwing.
 301 
 302       __ ba(exit);
 303       __ delayed()->stx(G1, addr, G0);  // store entire long
 304 #else
 305       __ st(O1, addr, BytesPerInt);
 306       __ ba(exit);
 307       __ delayed()->st(O0, addr, G0);
 308 #endif /* COMPILER2 */
 309 #endif /* _LP64 */
 310      }
 311      return start;
 312   }
 313 
 314 
 315   //----------------------------------------------------------------------------------------------------
 316   // Return point for a Java call if there's an exception thrown in Java code.
 317   // The exception is caught and transformed into a pending exception stored in
 318   // JavaThread that can be tested from within the VM.
 319   //
 320   // Oexception: exception oop
 321 
 322   address generate_catch_exception() {
 323     StubCodeMark mark(this, "StubRoutines", "catch_exception");
 324 
 325     address start = __ pc();
 326     // verify that thread corresponds
 327     __ verify_thread();
 328 
 329     const Register& temp_reg = Gtemp;
 330     Address pending_exception_addr    (G2_thread, Thread::pending_exception_offset());
 331     Address exception_file_offset_addr(G2_thread, Thread::exception_file_offset   ());
 332     Address exception_line_offset_addr(G2_thread, Thread::exception_line_offset   ());
 333 
 334     // set pending exception
 335     __ verify_oop(Oexception);
 336     __ st_ptr(Oexception, pending_exception_addr);
 337     __ set((intptr_t)__FILE__, temp_reg);
 338     __ st_ptr(temp_reg, exception_file_offset_addr);
 339     __ set((intptr_t)__LINE__, temp_reg);
 340     __ st(temp_reg, exception_line_offset_addr);
 341 
 342     // complete return to VM
 343     assert(StubRoutines::_call_stub_return_address != NULL, "must have been generated before");
 344 
 345     AddressLiteral stub_ret(StubRoutines::_call_stub_return_address);
 346     __ jump_to(stub_ret, temp_reg);
 347     __ delayed()->nop();
 348 
 349     return start;
 350   }
 351 
 352 
 353   //----------------------------------------------------------------------------------------------------
 354   // Continuation point for runtime calls returning with a pending exception
 355   // The pending exception check happened in the runtime or native call stub
 356   // The pending exception in Thread is converted into a Java-level exception
 357   //
 358   // Contract with Java-level exception handler: O0 = exception
 359   //                                             O1 = throwing pc
 360 
 361   address generate_forward_exception() {
 362     StubCodeMark mark(this, "StubRoutines", "forward_exception");
 363     address start = __ pc();
 364 
 365     // Upon entry, O7 has the return address returning into Java
 366     // (interpreted or compiled) code; i.e. the return address
 367     // becomes the throwing pc.
 368 
 369     const Register& handler_reg = Gtemp;
 370 
 371     Address exception_addr(G2_thread, Thread::pending_exception_offset());
 372 
 373 #ifdef ASSERT
 374     // make sure that this code is only executed if there is a pending exception
 375     { Label L;
 376       __ ld_ptr(exception_addr, Gtemp);
 377       __ br_notnull_short(Gtemp, Assembler::pt, L);
 378       __ stop("StubRoutines::forward exception: no pending exception (1)");
 379       __ bind(L);
 380     }
 381 #endif
 382 
 383     // compute exception handler into handler_reg
 384     __ get_thread();
 385     __ ld_ptr(exception_addr, Oexception);
 386     __ verify_oop(Oexception);
 387     __ save_frame(0);             // compensates for compiler weakness
 388     __ add(O7->after_save(), frame::pc_return_offset, Lscratch); // save the issuing PC
 389     BLOCK_COMMENT("call exception_handler_for_return_address");
 390     __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), G2_thread, Lscratch);
 391     __ mov(O0, handler_reg);
 392     __ restore();                 // compensates for compiler weakness
 393 
 394     __ ld_ptr(exception_addr, Oexception);
 395     __ add(O7, frame::pc_return_offset, Oissuing_pc); // save the issuing PC
 396 
 397 #ifdef ASSERT
 398     // make sure exception is set
 399     { Label L;
 400       __ br_notnull_short(Oexception, Assembler::pt, L);
 401       __ stop("StubRoutines::forward exception: no pending exception (2)");
 402       __ bind(L);
 403     }
 404 #endif
 405     // jump to exception handler
 406     __ jmp(handler_reg, 0);
 407     // clear pending exception
 408     __ delayed()->st_ptr(G0, exception_addr);
 409 
 410     return start;
 411   }
 412 
 413 
 414   //------------------------------------------------------------------------------------------------------------------------
 415   // Continuation point for throwing of implicit exceptions that are not handled in
 416   // the current activation. Fabricates an exception oop and initiates normal
 417   // exception dispatching in this frame. Only callee-saved registers are preserved
 418   // (through the normal register window / RegisterMap handling).
 419   // If the compiler needs all registers to be preserved between the fault
 420   // point and the exception handler then it must assume responsibility for that in
 421   // AbstractCompiler::continuation_for_implicit_null_exception or
 422   // continuation_for_implicit_division_by_zero_exception. All other implicit
 423   // exceptions (e.g., NullPointerException or AbstractMethodError on entry) are
 424   // either at call sites or otherwise assume that stack unwinding will be initiated,
 425   // so caller saved registers were assumed volatile in the compiler.
 426 
 427   // Note that we generate only this stub into a RuntimeStub, because it needs to be
 428   // properly traversed and ignored during GC, so we change the meaning of the "__"
 429   // macro within this method.
 430 #undef __
 431 #define __ masm->
 432 
 433   address generate_throw_exception(const char* name, address runtime_entry,
 434                                    Register arg1 = noreg, Register arg2 = noreg) {
 435 #ifdef ASSERT
 436     int insts_size = VerifyThread ? 1 * K : 600;
 437 #else
 438     int insts_size = VerifyThread ? 1 * K : 256;
 439 #endif /* ASSERT */
 440     int locs_size  = 32;
 441 
 442     CodeBuffer      code(name, insts_size, locs_size);
 443     MacroAssembler* masm = new MacroAssembler(&code);
 444 
 445     __ verify_thread();
 446 
 447     // This is an inlined and slightly modified version of call_VM
 448     // which has the ability to fetch the return PC out of thread-local storage
 449     __ assert_not_delayed();
 450 
 451     // Note that we always push a frame because on the SPARC
 452     // architecture, for all of our implicit exception kinds at call
 453     // sites, the implicit exception is taken before the callee frame
 454     // is pushed.
 455     __ save_frame(0);
 456 
 457     int frame_complete = __ offset();
 458 
 459     // Note that we always have a runtime stub frame on the top of stack by this point
 460     Register last_java_sp = SP;
 461     // 64-bit last_java_sp is biased!
 462     __ set_last_Java_frame(last_java_sp, G0);
 463     if (VerifyThread)  __ mov(G2_thread, O0); // about to be smashed; pass early
 464     __ save_thread(noreg);
 465     if (arg1 != noreg) {
 466       assert(arg2 != O1, "clobbered");
 467       __ mov(arg1, O1);
 468     }
 469     if (arg2 != noreg) {
 470       __ mov(arg2, O2);
 471     }
 472     // do the call
 473     BLOCK_COMMENT("call runtime_entry");
 474     __ call(runtime_entry, relocInfo::runtime_call_type);
 475     if (!VerifyThread)
 476       __ delayed()->mov(G2_thread, O0);  // pass thread as first argument
 477     else
 478       __ delayed()->nop();             // (thread already passed)
 479     __ restore_thread(noreg);
 480     __ reset_last_Java_frame();
 481 
 482     // check for pending exceptions. use Gtemp as scratch register.
 483 #ifdef ASSERT
 484     Label L;
 485 
 486     Address exception_addr(G2_thread, Thread::pending_exception_offset());
 487     Register scratch_reg = Gtemp;
 488     __ ld_ptr(exception_addr, scratch_reg);
 489     __ br_notnull_short(scratch_reg, Assembler::pt, L);
 490     __ should_not_reach_here();
 491     __ bind(L);
 492 #endif // ASSERT
 493     BLOCK_COMMENT("call forward_exception_entry");
 494     __ call(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type);
 495     // we use O7 linkage so that forward_exception_entry has the issuing PC
 496     __ delayed()->restore();
 497 
 498     RuntimeStub* stub = RuntimeStub::new_runtime_stub(name, &code, frame_complete, masm->total_frame_size_in_bytes(0), NULL, false);
 499     return stub->entry_point();
 500   }
 501 
 502 #undef __
 503 #define __ _masm->
 504 
 505 
 506   // Generate a routine that sets all the registers so we
 507   // can tell if the stop routine prints them correctly.
 508   address generate_test_stop() {
 509     StubCodeMark mark(this, "StubRoutines", "test_stop");
 510     address start = __ pc();
 511 
 512     int i;
 513 
 514     __ save_frame(0);
 515 
 516     static jfloat zero = 0.0, one = 1.0;
 517 
 518     // put addr in L0, then load through L0 to F0
 519     __ set((intptr_t)&zero, L0);  __ ldf( FloatRegisterImpl::S, L0, 0, F0);
 520     __ set((intptr_t)&one,  L0);  __ ldf( FloatRegisterImpl::S, L0, 0, F1); // 1.0 to F1
 521 
 522     // use add to put 2..18 in F2..F18
 523     for ( i = 2;  i <= 18;  ++i ) {
 524       __ fadd( FloatRegisterImpl::S, F1, as_FloatRegister(i-1),  as_FloatRegister(i));
 525     }
 526 
 527     // Now put double 2 in F16, double 18 in F18
 528     __ ftof( FloatRegisterImpl::S, FloatRegisterImpl::D, F2, F16 );
 529     __ ftof( FloatRegisterImpl::S, FloatRegisterImpl::D, F18, F18 );
 530 
 531     // use add to put 20..32 in F20..F32
 532     for (i = 20; i < 32; i += 2) {
 533       __ fadd( FloatRegisterImpl::D, F16, as_FloatRegister(i-2),  as_FloatRegister(i));
 534     }
 535 
 536     // put 0..7 in i's, 8..15 in l's, 16..23 in o's, 24..31 in g's
 537     for ( i = 0; i < 8; ++i ) {
 538       if (i < 6) {
 539         __ set(     i, as_iRegister(i));
 540         __ set(16 + i, as_oRegister(i));
 541         __ set(24 + i, as_gRegister(i));
 542       }
 543       __ set( 8 + i, as_lRegister(i));
 544     }
 545 
 546     __ stop("testing stop");
 547 
 548 
 549     __ ret();
 550     __ delayed()->restore();
 551 
 552     return start;
 553   }
 554 
 555 
 556   address generate_stop_subroutine() {
 557     StubCodeMark mark(this, "StubRoutines", "stop_subroutine");
 558     address start = __ pc();
 559 
 560     __ stop_subroutine();
 561 
 562     return start;
 563   }
 564 
 565   address generate_flush_callers_register_windows() {
 566     StubCodeMark mark(this, "StubRoutines", "flush_callers_register_windows");
 567     address start = __ pc();
 568 
 569     __ flush_windows();
 570     __ retl(false);
 571     __ delayed()->add( FP, STACK_BIAS, O0 );
 572     // The returned value must be a stack pointer whose register save area
 573     // is flushed, and will stay flushed while the caller executes.
 574 
 575     return start;
 576   }
 577 
 578   // Helper functions for v8 atomic operations.
 579   //
 580   void get_v8_oop_lock_ptr(Register lock_ptr_reg, Register mark_oop_reg, Register scratch_reg) {
 581     if (mark_oop_reg == noreg) {
 582       address lock_ptr = (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr();
 583       __ set((intptr_t)lock_ptr, lock_ptr_reg);
 584     } else {
 585       assert(scratch_reg != noreg, "just checking");
 586       address lock_ptr = (address)StubRoutines::Sparc::_v8_oop_lock_cache;
 587       __ set((intptr_t)lock_ptr, lock_ptr_reg);
 588       __ and3(mark_oop_reg, StubRoutines::Sparc::v8_oop_lock_mask_in_place, scratch_reg);
 589       __ add(lock_ptr_reg, scratch_reg, lock_ptr_reg);
 590     }
 591   }
 592 
 593   void generate_v8_lock_prologue(Register lock_reg, Register lock_ptr_reg, Register yield_reg, Label& retry, Label& dontyield, Register mark_oop_reg = noreg, Register scratch_reg = noreg) {
 594 
 595     get_v8_oop_lock_ptr(lock_ptr_reg, mark_oop_reg, scratch_reg);
 596     __ set(StubRoutines::Sparc::locked, lock_reg);
 597     // Initialize yield counter
 598     __ mov(G0,yield_reg);
 599 
 600     __ BIND(retry);
 601     __ cmp_and_br_short(yield_reg, V8AtomicOperationUnderLockSpinCount, Assembler::less, Assembler::pt, dontyield);
 602 
 603     // This code can only be called from inside the VM, this
 604     // stub is only invoked from Atomic::add().  We do not
 605     // want to use call_VM, because _last_java_sp and such
 606     // must already be set.
 607     //
 608     // Save the regs and make space for a C call
 609     __ save(SP, -96, SP);
 610     __ save_all_globals_into_locals();
 611     BLOCK_COMMENT("call os::naked_sleep");
 612     __ call(CAST_FROM_FN_PTR(address, os::naked_sleep));
 613     __ delayed()->nop();
 614     __ restore_globals_from_locals();
 615     __ restore();
 616     // reset the counter
 617     __ mov(G0,yield_reg);
 618 
 619     __ BIND(dontyield);
 620 
 621     // try to get lock
 622     __ swap(lock_ptr_reg, 0, lock_reg);
 623 
 624     // did we get the lock?
 625     __ cmp(lock_reg, StubRoutines::Sparc::unlocked);
 626     __ br(Assembler::notEqual, true, Assembler::pn, retry);
 627     __ delayed()->add(yield_reg,1,yield_reg);
 628 
 629     // yes, got lock. do the operation here.
 630   }
 631 
 632   void generate_v8_lock_epilogue(Register lock_reg, Register lock_ptr_reg, Register yield_reg, Label& retry, Label& dontyield, Register mark_oop_reg = noreg, Register scratch_reg = noreg) {
 633     __ st(lock_reg, lock_ptr_reg, 0); // unlock
 634   }
 635 
 636   // Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest).
 637   //
 638   // Arguments :
 639   //
 640   //      exchange_value: O0
 641   //      dest:           O1
 642   //
 643   // Results:
 644   //
 645   //     O0: the value previously stored in dest
 646   //
 647   address generate_atomic_xchg() {
 648     StubCodeMark mark(this, "StubRoutines", "atomic_xchg");
 649     address start = __ pc();
 650 
 651     if (UseCASForSwap) {
 652       // Use CAS instead of swap, just in case the MP hardware
 653       // prefers to work with just one kind of synch. instruction.
 654       Label retry;
 655       __ BIND(retry);
 656       __ mov(O0, O3);       // scratch copy of exchange value
 657       __ ld(O1, 0, O2);     // observe the previous value
 658       // try to replace O2 with O3
 659       __ cas_under_lock(O1, O2, O3,
 660       (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr(),false);
 661       __ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pn, retry);
 662 
 663       __ retl(false);
 664       __ delayed()->mov(O2, O0);  // report previous value to caller
 665 
 666     } else {
 667       if (VM_Version::v9_instructions_work()) {
 668         __ retl(false);
 669         __ delayed()->swap(O1, 0, O0);
 670       } else {
 671         const Register& lock_reg = O2;
 672         const Register& lock_ptr_reg = O3;
 673         const Register& yield_reg = O4;
 674 
 675         Label retry;
 676         Label dontyield;
 677 
 678         generate_v8_lock_prologue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield);
 679         // got the lock, do the swap
 680         __ swap(O1, 0, O0);
 681 
 682         generate_v8_lock_epilogue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield);
 683         __ retl(false);
 684         __ delayed()->nop();
 685       }
 686     }
 687 
 688     return start;
 689   }
 690 
 691 
 692   // Support for jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value)
 693   //
 694   // Arguments :
 695   //
 696   //      exchange_value: O0
 697   //      dest:           O1
 698   //      compare_value:  O2
 699   //
 700   // Results:
 701   //
 702   //     O0: the value previously stored in dest
 703   //
 704   // Overwrites (v8): O3,O4,O5
 705   //
 706   address generate_atomic_cmpxchg() {
 707     StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg");
 708     address start = __ pc();
 709 
 710     // cmpxchg(dest, compare_value, exchange_value)
 711     __ cas_under_lock(O1, O2, O0,
 712       (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr(),false);
 713     __ retl(false);
 714     __ delayed()->nop();
 715 
 716     return start;
 717   }
 718 
 719   // Support for jlong Atomic::cmpxchg(jlong exchange_value, volatile jlong *dest, jlong compare_value)
 720   //
 721   // Arguments :
 722   //
 723   //      exchange_value: O1:O0
 724   //      dest:           O2
 725   //      compare_value:  O4:O3
 726   //
 727   // Results:
 728   //
 729   //     O1:O0: the value previously stored in dest
 730   //
 731   // This only works on V9, on V8 we don't generate any
 732   // code and just return NULL.
 733   //
 734   // Overwrites: G1,G2,G3
 735   //
 736   address generate_atomic_cmpxchg_long() {
 737     StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long");
 738     address start = __ pc();
 739 
 740     if (!VM_Version::supports_cx8())
 741         return NULL;;
 742     __ sllx(O0, 32, O0);
 743     __ srl(O1, 0, O1);
 744     __ or3(O0,O1,O0);      // O0 holds 64-bit value from compare_value
 745     __ sllx(O3, 32, O3);
 746     __ srl(O4, 0, O4);
 747     __ or3(O3,O4,O3);     // O3 holds 64-bit value from exchange_value
 748     __ casx(O2, O3, O0);
 749     __ srl(O0, 0, O1);    // unpacked return value in O1:O0
 750     __ retl(false);
 751     __ delayed()->srlx(O0, 32, O0);
 752 
 753     return start;
 754   }
 755 
 756 
 757   // Support for jint Atomic::add(jint add_value, volatile jint* dest).
 758   //
 759   // Arguments :
 760   //
 761   //      add_value: O0   (e.g., +1 or -1)
 762   //      dest:      O1
 763   //
 764   // Results:
 765   //
 766   //     O0: the new value stored in dest
 767   //
 768   // Overwrites (v9): O3
 769   // Overwrites (v8): O3,O4,O5
 770   //
 771   address generate_atomic_add() {
 772     StubCodeMark mark(this, "StubRoutines", "atomic_add");
 773     address start = __ pc();
 774     __ BIND(_atomic_add_stub);
 775 
 776     if (VM_Version::v9_instructions_work()) {
 777       Label(retry);
 778       __ BIND(retry);
 779 
 780       __ lduw(O1, 0, O2);
 781       __ add(O0, O2, O3);
 782       __ cas(O1, O2, O3);
 783       __ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pn, retry);
 784       __ retl(false);
 785       __ delayed()->add(O0, O2, O0); // note that cas made O2==O3
 786     } else {
 787       const Register& lock_reg = O2;
 788       const Register& lock_ptr_reg = O3;
 789       const Register& value_reg = O4;
 790       const Register& yield_reg = O5;
 791 
 792       Label(retry);
 793       Label(dontyield);
 794 
 795       generate_v8_lock_prologue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield);
 796       // got lock, do the increment
 797       __ ld(O1, 0, value_reg);
 798       __ add(O0, value_reg, value_reg);
 799       __ st(value_reg, O1, 0);
 800 
 801       // %%% only for RMO and PSO
 802       __ membar(Assembler::StoreStore);
 803 
 804       generate_v8_lock_epilogue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield);
 805 
 806       __ retl(false);
 807       __ delayed()->mov(value_reg, O0);
 808     }
 809 
 810     return start;
 811   }
 812   Label _atomic_add_stub;  // called from other stubs
 813 
 814 
 815   //------------------------------------------------------------------------------------------------------------------------
 816   // The following routine generates a subroutine to throw an asynchronous
 817   // UnknownError when an unsafe access gets a fault that could not be
 818   // reasonably prevented by the programmer.  (Example: SIGBUS/OBJERR.)
 819   //
 820   // Arguments :
 821   //
 822   //      trapping PC:    O7
 823   //
 824   // Results:
 825   //     posts an asynchronous exception, skips the trapping instruction
 826   //
 827 
 828   address generate_handler_for_unsafe_access() {
 829     StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access");
 830     address start = __ pc();
 831 
 832     const int preserve_register_words = (64 * 2);
 833     Address preserve_addr(FP, (-preserve_register_words * wordSize) + STACK_BIAS);
 834 
 835     Register Lthread = L7_thread_cache;
 836     int i;
 837 
 838     __ save_frame(0);
 839     __ mov(G1, L1);
 840     __ mov(G2, L2);
 841     __ mov(G3, L3);
 842     __ mov(G4, L4);
 843     __ mov(G5, L5);
 844     for (i = 0; i < (VM_Version::v9_instructions_work() ? 64 : 32); i += 2) {
 845       __ stf(FloatRegisterImpl::D, as_FloatRegister(i), preserve_addr, i * wordSize);
 846     }
 847 
 848     address entry_point = CAST_FROM_FN_PTR(address, handle_unsafe_access);
 849     BLOCK_COMMENT("call handle_unsafe_access");
 850     __ call(entry_point, relocInfo::runtime_call_type);
 851     __ delayed()->nop();
 852 
 853     __ mov(L1, G1);
 854     __ mov(L2, G2);
 855     __ mov(L3, G3);
 856     __ mov(L4, G4);
 857     __ mov(L5, G5);
 858     for (i = 0; i < (VM_Version::v9_instructions_work() ? 64 : 32); i += 2) {
 859       __ ldf(FloatRegisterImpl::D, preserve_addr, as_FloatRegister(i), i * wordSize);
 860     }
 861 
 862     __ verify_thread();
 863 
 864     __ jmp(O0, 0);
 865     __ delayed()->restore();
 866 
 867     return start;
 868   }
 869 
 870 
 871   // Support for uint StubRoutine::Sparc::partial_subtype_check( Klass sub, Klass super );
 872   // Arguments :
 873   //
 874   //      ret  : O0, returned
 875   //      icc/xcc: set as O0 (depending on wordSize)
 876   //      sub  : O1, argument, not changed
 877   //      super: O2, argument, not changed
 878   //      raddr: O7, blown by call
 879   address generate_partial_subtype_check() {
 880     __ align(CodeEntryAlignment);
 881     StubCodeMark mark(this, "StubRoutines", "partial_subtype_check");
 882     address start = __ pc();
 883     Label miss;
 884 
 885 #if defined(COMPILER2) && !defined(_LP64)
 886     // Do not use a 'save' because it blows the 64-bit O registers.
 887     __ add(SP,-4*wordSize,SP);  // Make space for 4 temps (stack must be 2 words aligned)
 888     __ st_ptr(L0,SP,(frame::register_save_words+0)*wordSize);
 889     __ st_ptr(L1,SP,(frame::register_save_words+1)*wordSize);
 890     __ st_ptr(L2,SP,(frame::register_save_words+2)*wordSize);
 891     __ st_ptr(L3,SP,(frame::register_save_words+3)*wordSize);
 892     Register Rret   = O0;
 893     Register Rsub   = O1;
 894     Register Rsuper = O2;
 895 #else
 896     __ save_frame(0);
 897     Register Rret   = I0;
 898     Register Rsub   = I1;
 899     Register Rsuper = I2;
 900 #endif
 901 
 902     Register L0_ary_len = L0;
 903     Register L1_ary_ptr = L1;
 904     Register L2_super   = L2;
 905     Register L3_index   = L3;
 906 
 907     __ check_klass_subtype_slow_path(Rsub, Rsuper,
 908                                      L0, L1, L2, L3,
 909                                      NULL, &miss);
 910 
 911     // Match falls through here.
 912     __ addcc(G0,0,Rret);        // set Z flags, Z result
 913 
 914 #if defined(COMPILER2) && !defined(_LP64)
 915     __ ld_ptr(SP,(frame::register_save_words+0)*wordSize,L0);
 916     __ ld_ptr(SP,(frame::register_save_words+1)*wordSize,L1);
 917     __ ld_ptr(SP,(frame::register_save_words+2)*wordSize,L2);
 918     __ ld_ptr(SP,(frame::register_save_words+3)*wordSize,L3);
 919     __ retl();                  // Result in Rret is zero; flags set to Z
 920     __ delayed()->add(SP,4*wordSize,SP);
 921 #else
 922     __ ret();                   // Result in Rret is zero; flags set to Z
 923     __ delayed()->restore();
 924 #endif
 925 
 926     __ BIND(miss);
 927     __ addcc(G0,1,Rret);        // set NZ flags, NZ result
 928 
 929 #if defined(COMPILER2) && !defined(_LP64)
 930     __ ld_ptr(SP,(frame::register_save_words+0)*wordSize,L0);
 931     __ ld_ptr(SP,(frame::register_save_words+1)*wordSize,L1);
 932     __ ld_ptr(SP,(frame::register_save_words+2)*wordSize,L2);
 933     __ ld_ptr(SP,(frame::register_save_words+3)*wordSize,L3);
 934     __ retl();                  // Result in Rret is != 0; flags set to NZ
 935     __ delayed()->add(SP,4*wordSize,SP);
 936 #else
 937     __ ret();                   // Result in Rret is != 0; flags set to NZ
 938     __ delayed()->restore();
 939 #endif
 940 
 941     return start;
 942   }
 943 
 944 
 945   // Called from MacroAssembler::verify_oop
 946   //
 947   address generate_verify_oop_subroutine() {
 948     StubCodeMark mark(this, "StubRoutines", "verify_oop_stub");
 949 
 950     address start = __ pc();
 951 
 952     __ verify_oop_subroutine();
 953 
 954     return start;
 955   }
 956 
 957 
 958   //
 959   // Verify that a register contains clean 32-bits positive value
 960   // (high 32-bits are 0) so it could be used in 64-bits shifts (sllx, srax).
 961   //
 962   //  Input:
 963   //    Rint  -  32-bits value
 964   //    Rtmp  -  scratch
 965   //
 966   void assert_clean_int(Register Rint, Register Rtmp) {
 967 #if defined(ASSERT) && defined(_LP64)
 968     __ signx(Rint, Rtmp);
 969     __ cmp(Rint, Rtmp);
 970     __ breakpoint_trap(Assembler::notEqual, Assembler::xcc);
 971 #endif
 972   }
 973 
 974   //
 975   //  Generate overlap test for array copy stubs
 976   //
 977   //  Input:
 978   //    O0    -  array1
 979   //    O1    -  array2
 980   //    O2    -  element count
 981   //
 982   //  Kills temps:  O3, O4
 983   //
 984   void array_overlap_test(address no_overlap_target, int log2_elem_size) {
 985     assert(no_overlap_target != NULL, "must be generated");
 986     array_overlap_test(no_overlap_target, NULL, log2_elem_size);
 987   }
 988   void array_overlap_test(Label& L_no_overlap, int log2_elem_size) {
 989     array_overlap_test(NULL, &L_no_overlap, log2_elem_size);
 990   }
 991   void array_overlap_test(address no_overlap_target, Label* NOLp, int log2_elem_size) {
 992     const Register from       = O0;
 993     const Register to         = O1;
 994     const Register count      = O2;
 995     const Register to_from    = O3; // to - from
 996     const Register byte_count = O4; // count << log2_elem_size
 997 
 998       __ subcc(to, from, to_from);
 999       __ sll_ptr(count, log2_elem_size, byte_count);
1000       if (NOLp == NULL)
1001         __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, no_overlap_target);
1002       else
1003         __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, (*NOLp));
1004       __ delayed()->cmp(to_from, byte_count);
1005       if (NOLp == NULL)
1006         __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, no_overlap_target);
1007       else
1008         __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, (*NOLp));
1009       __ delayed()->nop();
1010   }
1011 
1012   //
1013   //  Generate pre-write barrier for array.
1014   //
1015   //  Input:
1016   //     addr     - register containing starting address
1017   //     count    - register containing element count
1018   //     tmp      - scratch register
1019   //
1020   //  The input registers are overwritten.
1021   //
1022   void gen_write_ref_array_pre_barrier(Register addr, Register count, bool dest_uninitialized) {
1023     BarrierSet* bs = Universe::heap()->barrier_set();
1024     switch (bs->kind()) {
1025       case BarrierSet::G1SATBCT:
1026       case BarrierSet::G1SATBCTLogging:
1027         // With G1, don't generate the call if we statically know that the target in uninitialized
1028         if (!dest_uninitialized) {
1029           __ save_frame(0);
1030           // Save the necessary global regs... will be used after.
1031           if (addr->is_global()) {
1032             __ mov(addr, L0);
1033           }
1034           if (count->is_global()) {
1035             __ mov(count, L1);
1036           }
1037           __ mov(addr->after_save(), O0);
1038           // Get the count into O1
1039           __ call(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre));
1040           __ delayed()->mov(count->after_save(), O1);
1041           if (addr->is_global()) {
1042             __ mov(L0, addr);
1043           }
1044           if (count->is_global()) {
1045             __ mov(L1, count);
1046           }
1047           __ restore();
1048         }
1049         break;
1050       case BarrierSet::CardTableModRef:
1051       case BarrierSet::CardTableExtension:
1052       case BarrierSet::ModRef:
1053         break;
1054       default:
1055         ShouldNotReachHere();
1056     }
1057   }
1058   //
1059   //  Generate post-write barrier for array.
1060   //
1061   //  Input:
1062   //     addr     - register containing starting address
1063   //     count    - register containing element count
1064   //     tmp      - scratch register
1065   //
1066   //  The input registers are overwritten.
1067   //
1068   void gen_write_ref_array_post_barrier(Register addr, Register count,
1069                                         Register tmp) {
1070     BarrierSet* bs = Universe::heap()->barrier_set();
1071 
1072     switch (bs->kind()) {
1073       case BarrierSet::G1SATBCT:
1074       case BarrierSet::G1SATBCTLogging:
1075         {
1076           // Get some new fresh output registers.
1077           __ save_frame(0);
1078           __ mov(addr->after_save(), O0);
1079           __ call(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post));
1080           __ delayed()->mov(count->after_save(), O1);
1081           __ restore();
1082         }
1083         break;
1084       case BarrierSet::CardTableModRef:
1085       case BarrierSet::CardTableExtension:
1086         {
1087           CardTableModRefBS* ct = (CardTableModRefBS*)bs;
1088           assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
1089           assert_different_registers(addr, count, tmp);
1090 
1091           Label L_loop;
1092 
1093           __ sll_ptr(count, LogBytesPerHeapOop, count);
1094           __ sub(count, BytesPerHeapOop, count);
1095           __ add(count, addr, count);
1096           // Use two shifts to clear out those low order two bits! (Cannot opt. into 1.)
1097           __ srl_ptr(addr, CardTableModRefBS::card_shift, addr);
1098           __ srl_ptr(count, CardTableModRefBS::card_shift, count);
1099           __ sub(count, addr, count);
1100           AddressLiteral rs(ct->byte_map_base);
1101           __ set(rs, tmp);
1102         __ BIND(L_loop);
1103           __ stb(G0, tmp, addr);
1104           __ subcc(count, 1, count);
1105           __ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop);
1106           __ delayed()->add(addr, 1, addr);
1107         }
1108         break;
1109       case BarrierSet::ModRef:
1110         break;
1111       default:
1112         ShouldNotReachHere();
1113     }
1114   }
1115 
1116   //
1117   // Generate main code for disjoint arraycopy
1118   //
1119   typedef void (StubGenerator::*CopyLoopFunc)(Register from, Register to, Register count, int count_dec,
1120                                               Label& L_loop, bool use_prefetch, bool use_bis);
1121 
1122   void disjoint_copy_core(Register from, Register to, Register count, int log2_elem_size,
1123                           int iter_size, CopyLoopFunc copy_loop_func) {
1124     Label L_copy;
1125 
1126     assert(log2_elem_size <= 3, "the following code should be changed");
1127     int count_dec = 16>>log2_elem_size;
1128 
1129     int prefetch_dist = MAX2(ArraycopySrcPrefetchDistance, ArraycopyDstPrefetchDistance);
1130     assert(prefetch_dist < 4096, "invalid value");
1131     prefetch_dist = (prefetch_dist + (iter_size-1)) & (-iter_size); // round up to one iteration copy size
1132     int prefetch_count = (prefetch_dist >> log2_elem_size); // elements count
1133 
1134     if (UseBlockCopy) {
1135       Label L_block_copy, L_block_copy_prefetch, L_skip_block_copy;
1136 
1137       // 64 bytes tail + bytes copied in one loop iteration
1138       int tail_size = 64 + iter_size;
1139       int block_copy_count = (MAX2(tail_size, (int)BlockCopyLowLimit)) >> log2_elem_size;
1140       // Use BIS copy only for big arrays since it requires membar.
1141       __ set(block_copy_count, O4);
1142       __ cmp_and_br_short(count, O4, Assembler::lessUnsigned, Assembler::pt, L_skip_block_copy);
1143       // This code is for disjoint source and destination:
1144       //   to <= from || to >= from+count
1145       // but BIS will stomp over 'from' if (to > from-tail_size && to <= from)
1146       __ sub(from, to, O4);
1147       __ srax(O4, 4, O4); // divide by 16 since following short branch have only 5 bits for imm.
1148       __ cmp_and_br_short(O4, (tail_size>>4), Assembler::lessEqualUnsigned, Assembler::pn, L_skip_block_copy);
1149 
1150       __ wrasi(G0, Assembler::ASI_ST_BLKINIT_PRIMARY);
1151       // BIS should not be used to copy tail (64 bytes+iter_size)
1152       // to avoid zeroing of following values.
1153       __ sub(count, (tail_size>>log2_elem_size), count); // count is still positive >= 0
1154 
1155       if (prefetch_count > 0) { // rounded up to one iteration count
1156         // Do prefetching only if copy size is bigger
1157         // than prefetch distance.
1158         __ set(prefetch_count, O4);
1159         __ cmp_and_brx_short(count, O4, Assembler::less, Assembler::pt, L_block_copy);
1160         __ sub(count, prefetch_count, count);
1161 
1162         (this->*copy_loop_func)(from, to, count, count_dec, L_block_copy_prefetch, true, true);
1163         __ add(count, prefetch_count, count); // restore count
1164 
1165       } // prefetch_count > 0
1166 
1167       (this->*copy_loop_func)(from, to, count, count_dec, L_block_copy, false, true);
1168       __ add(count, (tail_size>>log2_elem_size), count); // restore count
1169 
1170       __ wrasi(G0, Assembler::ASI_PRIMARY_NOFAULT);
1171       // BIS needs membar.
1172       __ membar(Assembler::StoreLoad);
1173       // Copy tail
1174       __ ba_short(L_copy);
1175 
1176       __ BIND(L_skip_block_copy);
1177     } // UseBlockCopy
1178 
1179     if (prefetch_count > 0) { // rounded up to one iteration count
1180       // Do prefetching only if copy size is bigger
1181       // than prefetch distance.
1182       __ set(prefetch_count, O4);
1183       __ cmp_and_brx_short(count, O4, Assembler::lessUnsigned, Assembler::pt, L_copy);
1184       __ sub(count, prefetch_count, count);
1185 
1186       Label L_copy_prefetch;
1187       (this->*copy_loop_func)(from, to, count, count_dec, L_copy_prefetch, true, false);
1188       __ add(count, prefetch_count, count); // restore count
1189 
1190     } // prefetch_count > 0
1191 
1192     (this->*copy_loop_func)(from, to, count, count_dec, L_copy, false, false);
1193   }
1194 
1195 
1196 
1197   //
1198   // Helper methods for copy_16_bytes_forward_with_shift()
1199   //
1200   void copy_16_bytes_shift_loop(Register from, Register to, Register count, int count_dec,
1201                                 Label& L_loop, bool use_prefetch, bool use_bis) {
1202 
1203     const Register left_shift  = G1; // left  shift bit counter
1204     const Register right_shift = G5; // right shift bit counter
1205 
1206     __ align(OptoLoopAlignment);
1207     __ BIND(L_loop);
1208     if (use_prefetch) {
1209       if (ArraycopySrcPrefetchDistance > 0) {
1210         __ prefetch(from, ArraycopySrcPrefetchDistance, Assembler::severalReads);
1211       }
1212       if (ArraycopyDstPrefetchDistance > 0) {
1213         __ prefetch(to, ArraycopyDstPrefetchDistance, Assembler::severalWritesAndPossiblyReads);
1214       }
1215     }
1216     __ ldx(from, 0, O4);
1217     __ ldx(from, 8, G4);
1218     __ inc(to, 16);
1219     __ inc(from, 16);
1220     __ deccc(count, count_dec); // Can we do next iteration after this one?
1221     __ srlx(O4, right_shift, G3);
1222     __ bset(G3, O3);
1223     __ sllx(O4, left_shift,  O4);
1224     __ srlx(G4, right_shift, G3);
1225     __ bset(G3, O4);
1226     if (use_bis) {
1227       __ stxa(O3, to, -16);
1228       __ stxa(O4, to, -8);
1229     } else {
1230       __ stx(O3, to, -16);
1231       __ stx(O4, to, -8);
1232     }
1233     __ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop);
1234     __ delayed()->sllx(G4, left_shift,  O3);
1235   }
1236 
1237   // Copy big chunks forward with shift
1238   //
1239   // Inputs:
1240   //   from      - source arrays
1241   //   to        - destination array aligned to 8-bytes
1242   //   count     - elements count to copy >= the count equivalent to 16 bytes
1243   //   count_dec - elements count's decrement equivalent to 16 bytes
1244   //   L_copy_bytes - copy exit label
1245   //
1246   void copy_16_bytes_forward_with_shift(Register from, Register to,
1247                      Register count, int log2_elem_size, Label& L_copy_bytes) {
1248     Label L_aligned_copy, L_copy_last_bytes;
1249     assert(log2_elem_size <= 3, "the following code should be changed");
1250     int count_dec = 16>>log2_elem_size;
1251 
1252     // if both arrays have the same alignment mod 8, do 8 bytes aligned copy
1253     __ andcc(from, 7, G1); // misaligned bytes
1254     __ br(Assembler::zero, false, Assembler::pt, L_aligned_copy);
1255     __ delayed()->nop();
1256 
1257     const Register left_shift  = G1; // left  shift bit counter
1258     const Register right_shift = G5; // right shift bit counter
1259 
1260     __ sll(G1, LogBitsPerByte, left_shift);
1261     __ mov(64, right_shift);
1262     __ sub(right_shift, left_shift, right_shift);
1263 
1264     //
1265     // Load 2 aligned 8-bytes chunks and use one from previous iteration
1266     // to form 2 aligned 8-bytes chunks to store.
1267     //
1268     __ dec(count, count_dec);   // Pre-decrement 'count'
1269     __ andn(from, 7, from);     // Align address
1270     __ ldx(from, 0, O3);
1271     __ inc(from, 8);
1272     __ sllx(O3, left_shift,  O3);
1273 
1274     disjoint_copy_core(from, to, count, log2_elem_size, 16, copy_16_bytes_shift_loop);
1275 
1276     __ inccc(count, count_dec>>1 ); // + 8 bytes
1277     __ brx(Assembler::negative, true, Assembler::pn, L_copy_last_bytes);
1278     __ delayed()->inc(count, count_dec>>1); // restore 'count'
1279 
1280     // copy 8 bytes, part of them already loaded in O3
1281     __ ldx(from, 0, O4);
1282     __ inc(to, 8);
1283     __ inc(from, 8);
1284     __ srlx(O4, right_shift, G3);
1285     __ bset(O3, G3);
1286     __ stx(G3, to, -8);
1287 
1288     __ BIND(L_copy_last_bytes);
1289     __ srl(right_shift, LogBitsPerByte, right_shift); // misaligned bytes
1290     __ br(Assembler::always, false, Assembler::pt, L_copy_bytes);
1291     __ delayed()->sub(from, right_shift, from);       // restore address
1292 
1293     __ BIND(L_aligned_copy);
1294   }
1295 
1296   // Copy big chunks backward with shift
1297   //
1298   // Inputs:
1299   //   end_from  - source arrays end address
1300   //   end_to    - destination array end address aligned to 8-bytes
1301   //   count     - elements count to copy >= the count equivalent to 16 bytes
1302   //   count_dec - elements count's decrement equivalent to 16 bytes
1303   //   L_aligned_copy - aligned copy exit label
1304   //   L_copy_bytes   - copy exit label
1305   //
1306   void copy_16_bytes_backward_with_shift(Register end_from, Register end_to,
1307                      Register count, int count_dec,
1308                      Label& L_aligned_copy, Label& L_copy_bytes) {
1309     Label L_loop, L_copy_last_bytes;
1310 
1311     // if both arrays have the same alignment mod 8, do 8 bytes aligned copy
1312       __ andcc(end_from, 7, G1); // misaligned bytes
1313       __ br(Assembler::zero, false, Assembler::pt, L_aligned_copy);
1314       __ delayed()->deccc(count, count_dec); // Pre-decrement 'count'
1315 
1316     const Register left_shift  = G1; // left  shift bit counter
1317     const Register right_shift = G5; // right shift bit counter
1318 
1319       __ sll(G1, LogBitsPerByte, left_shift);
1320       __ mov(64, right_shift);
1321       __ sub(right_shift, left_shift, right_shift);
1322 
1323     //
1324     // Load 2 aligned 8-bytes chunks and use one from previous iteration
1325     // to form 2 aligned 8-bytes chunks to store.
1326     //
1327       __ andn(end_from, 7, end_from);     // Align address
1328       __ ldx(end_from, 0, O3);
1329       __ align(OptoLoopAlignment);
1330     __ BIND(L_loop);
1331       __ ldx(end_from, -8, O4);
1332       __ deccc(count, count_dec); // Can we do next iteration after this one?
1333       __ ldx(end_from, -16, G4);
1334       __ dec(end_to, 16);
1335       __ dec(end_from, 16);
1336       __ srlx(O3, right_shift, O3);
1337       __ sllx(O4, left_shift,  G3);
1338       __ bset(G3, O3);
1339       __ stx(O3, end_to, 8);
1340       __ srlx(O4, right_shift, O4);
1341       __ sllx(G4, left_shift,  G3);
1342       __ bset(G3, O4);
1343       __ stx(O4, end_to, 0);
1344       __ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop);
1345       __ delayed()->mov(G4, O3);
1346 
1347       __ inccc(count, count_dec>>1 ); // + 8 bytes
1348       __ brx(Assembler::negative, true, Assembler::pn, L_copy_last_bytes);
1349       __ delayed()->inc(count, count_dec>>1); // restore 'count'
1350 
1351       // copy 8 bytes, part of them already loaded in O3
1352       __ ldx(end_from, -8, O4);
1353       __ dec(end_to, 8);
1354       __ dec(end_from, 8);
1355       __ srlx(O3, right_shift, O3);
1356       __ sllx(O4, left_shift,  G3);
1357       __ bset(O3, G3);
1358       __ stx(G3, end_to, 0);
1359 
1360     __ BIND(L_copy_last_bytes);
1361       __ srl(left_shift, LogBitsPerByte, left_shift);    // misaligned bytes
1362       __ br(Assembler::always, false, Assembler::pt, L_copy_bytes);
1363       __ delayed()->add(end_from, left_shift, end_from); // restore address
1364   }
1365 
1366   //
1367   //  Generate stub for disjoint byte copy.  If "aligned" is true, the
1368   //  "from" and "to" addresses are assumed to be heapword aligned.
1369   //
1370   // Arguments for generated stub:
1371   //      from:  O0
1372   //      to:    O1
1373   //      count: O2 treated as signed
1374   //
1375   address generate_disjoint_byte_copy(bool aligned, address *entry, const char *name) {
1376     __ align(CodeEntryAlignment);
1377     StubCodeMark mark(this, "StubRoutines", name);
1378     address start = __ pc();
1379 
1380     Label L_skip_alignment, L_align;
1381     Label L_copy_byte, L_copy_byte_loop, L_exit;
1382 
1383     const Register from      = O0;   // source array address
1384     const Register to        = O1;   // destination array address
1385     const Register count     = O2;   // elements count
1386     const Register offset    = O5;   // offset from start of arrays
1387     // O3, O4, G3, G4 are used as temp registers
1388 
1389     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
1390 
1391     if (entry != NULL) {
1392       *entry = __ pc();
1393       // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1394       BLOCK_COMMENT("Entry:");
1395     }
1396 
1397     // for short arrays, just do single element copy
1398     __ cmp(count, 23); // 16 + 7
1399     __ brx(Assembler::less, false, Assembler::pn, L_copy_byte);
1400     __ delayed()->mov(G0, offset);
1401 
1402     if (aligned) {
1403       // 'aligned' == true when it is known statically during compilation
1404       // of this arraycopy call site that both 'from' and 'to' addresses
1405       // are HeapWordSize aligned (see LibraryCallKit::basictype2arraycopy()).
1406       //
1407       // Aligned arrays have 4 bytes alignment in 32-bits VM
1408       // and 8 bytes - in 64-bits VM. So we do it only for 32-bits VM
1409       //
1410 #ifndef _LP64
1411       // copy a 4-bytes word if necessary to align 'to' to 8 bytes
1412       __ andcc(to, 7, G0);
1413       __ br(Assembler::zero, false, Assembler::pn, L_skip_alignment);
1414       __ delayed()->ld(from, 0, O3);
1415       __ inc(from, 4);
1416       __ inc(to, 4);
1417       __ dec(count, 4);
1418       __ st(O3, to, -4);
1419     __ BIND(L_skip_alignment);
1420 #endif
1421     } else {
1422       // copy bytes to align 'to' on 8 byte boundary
1423       __ andcc(to, 7, G1); // misaligned bytes
1424       __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
1425       __ delayed()->neg(G1);
1426       __ inc(G1, 8);       // bytes need to copy to next 8-bytes alignment
1427       __ sub(count, G1, count);
1428     __ BIND(L_align);
1429       __ ldub(from, 0, O3);
1430       __ deccc(G1);
1431       __ inc(from);
1432       __ stb(O3, to, 0);
1433       __ br(Assembler::notZero, false, Assembler::pt, L_align);
1434       __ delayed()->inc(to);
1435     __ BIND(L_skip_alignment);
1436     }
1437 #ifdef _LP64
1438     if (!aligned)
1439 #endif
1440     {
1441       // Copy with shift 16 bytes per iteration if arrays do not have
1442       // the same alignment mod 8, otherwise fall through to the next
1443       // code for aligned copy.
1444       // The compare above (count >= 23) guarantes 'count' >= 16 bytes.
1445       // Also jump over aligned copy after the copy with shift completed.
1446 
1447       copy_16_bytes_forward_with_shift(from, to, count, 0, L_copy_byte);
1448     }
1449 
1450     // Both array are 8 bytes aligned, copy 16 bytes at a time
1451       __ and3(count, 7, G4); // Save count
1452       __ srl(count, 3, count);
1453      generate_disjoint_long_copy_core(aligned);
1454       __ mov(G4, count);     // Restore count
1455 
1456     // copy tailing bytes
1457     __ BIND(L_copy_byte);
1458       __ cmp_and_br_short(count, 0, Assembler::equal, Assembler::pt, L_exit);
1459       __ align(OptoLoopAlignment);
1460     __ BIND(L_copy_byte_loop);
1461       __ ldub(from, offset, O3);
1462       __ deccc(count);
1463       __ stb(O3, to, offset);
1464       __ brx(Assembler::notZero, false, Assembler::pt, L_copy_byte_loop);
1465       __ delayed()->inc(offset);
1466 
1467     __ BIND(L_exit);
1468       // O3, O4 are used as temp registers
1469       inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr, O3, O4);
1470       __ retl();
1471       __ delayed()->mov(G0, O0); // return 0
1472     return start;
1473   }
1474 
1475   //
1476   //  Generate stub for conjoint byte copy.  If "aligned" is true, the
1477   //  "from" and "to" addresses are assumed to be heapword aligned.
1478   //
1479   // Arguments for generated stub:
1480   //      from:  O0
1481   //      to:    O1
1482   //      count: O2 treated as signed
1483   //
1484   address generate_conjoint_byte_copy(bool aligned, address nooverlap_target,
1485                                       address *entry, const char *name) {
1486     // Do reverse copy.
1487 
1488     __ align(CodeEntryAlignment);
1489     StubCodeMark mark(this, "StubRoutines", name);
1490     address start = __ pc();
1491 
1492     Label L_skip_alignment, L_align, L_aligned_copy;
1493     Label L_copy_byte, L_copy_byte_loop, L_exit;
1494 
1495     const Register from      = O0;   // source array address
1496     const Register to        = O1;   // destination array address
1497     const Register count     = O2;   // elements count
1498     const Register end_from  = from; // source array end address
1499     const Register end_to    = to;   // destination array end address
1500 
1501     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
1502 
1503     if (entry != NULL) {
1504       *entry = __ pc();
1505       // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1506       BLOCK_COMMENT("Entry:");
1507     }
1508 
1509     array_overlap_test(nooverlap_target, 0);
1510 
1511     __ add(to, count, end_to);       // offset after last copied element
1512 
1513     // for short arrays, just do single element copy
1514     __ cmp(count, 23); // 16 + 7
1515     __ brx(Assembler::less, false, Assembler::pn, L_copy_byte);
1516     __ delayed()->add(from, count, end_from);
1517 
1518     {
1519       // Align end of arrays since they could be not aligned even
1520       // when arrays itself are aligned.
1521 
1522       // copy bytes to align 'end_to' on 8 byte boundary
1523       __ andcc(end_to, 7, G1); // misaligned bytes
1524       __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
1525       __ delayed()->nop();
1526       __ sub(count, G1, count);
1527     __ BIND(L_align);
1528       __ dec(end_from);
1529       __ dec(end_to);
1530       __ ldub(end_from, 0, O3);
1531       __ deccc(G1);
1532       __ brx(Assembler::notZero, false, Assembler::pt, L_align);
1533       __ delayed()->stb(O3, end_to, 0);
1534     __ BIND(L_skip_alignment);
1535     }
1536 #ifdef _LP64
1537     if (aligned) {
1538       // Both arrays are aligned to 8-bytes in 64-bits VM.
1539       // The 'count' is decremented in copy_16_bytes_backward_with_shift()
1540       // in unaligned case.
1541       __ dec(count, 16);
1542     } else
1543 #endif
1544     {
1545       // Copy with shift 16 bytes per iteration if arrays do not have
1546       // the same alignment mod 8, otherwise jump to the next
1547       // code for aligned copy (and substracting 16 from 'count' before jump).
1548       // The compare above (count >= 11) guarantes 'count' >= 16 bytes.
1549       // Also jump over aligned copy after the copy with shift completed.
1550 
1551       copy_16_bytes_backward_with_shift(end_from, end_to, count, 16,
1552                                         L_aligned_copy, L_copy_byte);
1553     }
1554     // copy 4 elements (16 bytes) at a time
1555       __ align(OptoLoopAlignment);
1556     __ BIND(L_aligned_copy);
1557       __ dec(end_from, 16);
1558       __ ldx(end_from, 8, O3);
1559       __ ldx(end_from, 0, O4);
1560       __ dec(end_to, 16);
1561       __ deccc(count, 16);
1562       __ stx(O3, end_to, 8);
1563       __ brx(Assembler::greaterEqual, false, Assembler::pt, L_aligned_copy);
1564       __ delayed()->stx(O4, end_to, 0);
1565       __ inc(count, 16);
1566 
1567     // copy 1 element (2 bytes) at a time
1568     __ BIND(L_copy_byte);
1569       __ cmp_and_br_short(count, 0, Assembler::equal, Assembler::pt, L_exit);
1570       __ align(OptoLoopAlignment);
1571     __ BIND(L_copy_byte_loop);
1572       __ dec(end_from);
1573       __ dec(end_to);
1574       __ ldub(end_from, 0, O4);
1575       __ deccc(count);
1576       __ brx(Assembler::greater, false, Assembler::pt, L_copy_byte_loop);
1577       __ delayed()->stb(O4, end_to, 0);
1578 
1579     __ BIND(L_exit);
1580     // O3, O4 are used as temp registers
1581     inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr, O3, O4);
1582     __ retl();
1583     __ delayed()->mov(G0, O0); // return 0
1584     return start;
1585   }
1586 
1587   //
1588   //  Generate stub for disjoint short copy.  If "aligned" is true, the
1589   //  "from" and "to" addresses are assumed to be heapword aligned.
1590   //
1591   // Arguments for generated stub:
1592   //      from:  O0
1593   //      to:    O1
1594   //      count: O2 treated as signed
1595   //
1596   address generate_disjoint_short_copy(bool aligned, address *entry, const char * name) {
1597     __ align(CodeEntryAlignment);
1598     StubCodeMark mark(this, "StubRoutines", name);
1599     address start = __ pc();
1600 
1601     Label L_skip_alignment, L_skip_alignment2;
1602     Label L_copy_2_bytes, L_copy_2_bytes_loop, L_exit;
1603 
1604     const Register from      = O0;   // source array address
1605     const Register to        = O1;   // destination array address
1606     const Register count     = O2;   // elements count
1607     const Register offset    = O5;   // offset from start of arrays
1608     // O3, O4, G3, G4 are used as temp registers
1609 
1610     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
1611 
1612     if (entry != NULL) {
1613       *entry = __ pc();
1614       // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1615       BLOCK_COMMENT("Entry:");
1616     }
1617 
1618     // for short arrays, just do single element copy
1619     __ cmp(count, 11); // 8 + 3  (22 bytes)
1620     __ brx(Assembler::less, false, Assembler::pn, L_copy_2_bytes);
1621     __ delayed()->mov(G0, offset);
1622 
1623     if (aligned) {
1624       // 'aligned' == true when it is known statically during compilation
1625       // of this arraycopy call site that both 'from' and 'to' addresses
1626       // are HeapWordSize aligned (see LibraryCallKit::basictype2arraycopy()).
1627       //
1628       // Aligned arrays have 4 bytes alignment in 32-bits VM
1629       // and 8 bytes - in 64-bits VM.
1630       //
1631 #ifndef _LP64
1632       // copy a 2-elements word if necessary to align 'to' to 8 bytes
1633       __ andcc(to, 7, G0);
1634       __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
1635       __ delayed()->ld(from, 0, O3);
1636       __ inc(from, 4);
1637       __ inc(to, 4);
1638       __ dec(count, 2);
1639       __ st(O3, to, -4);
1640     __ BIND(L_skip_alignment);
1641 #endif
1642     } else {
1643       // copy 1 element if necessary to align 'to' on an 4 bytes
1644       __ andcc(to, 3, G0);
1645       __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
1646       __ delayed()->lduh(from, 0, O3);
1647       __ inc(from, 2);
1648       __ inc(to, 2);
1649       __ dec(count);
1650       __ sth(O3, to, -2);
1651     __ BIND(L_skip_alignment);
1652 
1653       // copy 2 elements to align 'to' on an 8 byte boundary
1654       __ andcc(to, 7, G0);
1655       __ br(Assembler::zero, false, Assembler::pn, L_skip_alignment2);
1656       __ delayed()->lduh(from, 0, O3);
1657       __ dec(count, 2);
1658       __ lduh(from, 2, O4);
1659       __ inc(from, 4);
1660       __ inc(to, 4);
1661       __ sth(O3, to, -4);
1662       __ sth(O4, to, -2);
1663     __ BIND(L_skip_alignment2);
1664     }
1665 #ifdef _LP64
1666     if (!aligned)
1667 #endif
1668     {
1669       // Copy with shift 16 bytes per iteration if arrays do not have
1670       // the same alignment mod 8, otherwise fall through to the next
1671       // code for aligned copy.
1672       // The compare above (count >= 11) guarantes 'count' >= 16 bytes.
1673       // Also jump over aligned copy after the copy with shift completed.
1674 
1675       copy_16_bytes_forward_with_shift(from, to, count, 1, L_copy_2_bytes);
1676     }
1677 
1678     // Both array are 8 bytes aligned, copy 16 bytes at a time
1679       __ and3(count, 3, G4); // Save
1680       __ srl(count, 2, count);
1681      generate_disjoint_long_copy_core(aligned);
1682       __ mov(G4, count); // restore
1683 
1684     // copy 1 element at a time
1685     __ BIND(L_copy_2_bytes);
1686       __ cmp_and_br_short(count, 0, Assembler::equal, Assembler::pt, L_exit);
1687       __ align(OptoLoopAlignment);
1688     __ BIND(L_copy_2_bytes_loop);
1689       __ lduh(from, offset, O3);
1690       __ deccc(count);
1691       __ sth(O3, to, offset);
1692       __ brx(Assembler::notZero, false, Assembler::pt, L_copy_2_bytes_loop);
1693       __ delayed()->inc(offset, 2);
1694 
1695     __ BIND(L_exit);
1696       // O3, O4 are used as temp registers
1697       inc_counter_np(SharedRuntime::_jshort_array_copy_ctr, O3, O4);
1698       __ retl();
1699       __ delayed()->mov(G0, O0); // return 0
1700     return start;
1701   }
1702 
1703   //
1704   //  Generate stub for disjoint short fill.  If "aligned" is true, the
1705   //  "to" address is assumed to be heapword aligned.
1706   //
1707   // Arguments for generated stub:
1708   //      to:    O0
1709   //      value: O1
1710   //      count: O2 treated as signed
1711   //
1712   address generate_fill(BasicType t, bool aligned, const char* name) {
1713     __ align(CodeEntryAlignment);
1714     StubCodeMark mark(this, "StubRoutines", name);
1715     address start = __ pc();
1716 
1717     const Register to        = O0;   // source array address
1718     const Register value     = O1;   // fill value
1719     const Register count     = O2;   // elements count
1720     // O3 is used as a temp register
1721 
1722     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
1723 
1724     Label L_exit, L_skip_align1, L_skip_align2, L_fill_byte;
1725     Label L_fill_2_bytes, L_fill_elements, L_fill_32_bytes;
1726 
1727     int shift = -1;
1728     switch (t) {
1729        case T_BYTE:
1730         shift = 2;
1731         break;
1732        case T_SHORT:
1733         shift = 1;
1734         break;
1735       case T_INT:
1736          shift = 0;
1737         break;
1738       default: ShouldNotReachHere();
1739     }
1740 
1741     BLOCK_COMMENT("Entry:");
1742 
1743     if (t == T_BYTE) {
1744       // Zero extend value
1745       __ and3(value, 0xff, value);
1746       __ sllx(value, 8, O3);
1747       __ or3(value, O3, value);
1748     }
1749     if (t == T_SHORT) {
1750       // Zero extend value
1751       __ sllx(value, 48, value);
1752       __ srlx(value, 48, value);
1753     }
1754     if (t == T_BYTE || t == T_SHORT) {
1755       __ sllx(value, 16, O3);
1756       __ or3(value, O3, value);
1757     }
1758 
1759     __ cmp(count, 2<<shift); // Short arrays (< 8 bytes) fill by element
1760     __ brx(Assembler::lessUnsigned, false, Assembler::pn, L_fill_elements); // use unsigned cmp
1761     __ delayed()->andcc(count, 1, G0);
1762 
1763     if (!aligned && (t == T_BYTE || t == T_SHORT)) {
1764       // align source address at 4 bytes address boundary
1765       if (t == T_BYTE) {
1766         // One byte misalignment happens only for byte arrays
1767         __ andcc(to, 1, G0);
1768         __ br(Assembler::zero, false, Assembler::pt, L_skip_align1);
1769         __ delayed()->nop();
1770         __ stb(value, to, 0);
1771         __ inc(to, 1);
1772         __ dec(count, 1);
1773         __ BIND(L_skip_align1);
1774       }
1775       // Two bytes misalignment happens only for byte and short (char) arrays
1776       __ andcc(to, 2, G0);
1777       __ br(Assembler::zero, false, Assembler::pt, L_skip_align2);
1778       __ delayed()->nop();
1779       __ sth(value, to, 0);
1780       __ inc(to, 2);
1781       __ dec(count, 1 << (shift - 1));
1782       __ BIND(L_skip_align2);
1783     }
1784 #ifdef _LP64
1785     if (!aligned) {
1786 #endif
1787     // align to 8 bytes, we know we are 4 byte aligned to start
1788     __ andcc(to, 7, G0);
1789     __ br(Assembler::zero, false, Assembler::pt, L_fill_32_bytes);
1790     __ delayed()->nop();
1791     __ stw(value, to, 0);
1792     __ inc(to, 4);
1793     __ dec(count, 1 << shift);
1794     __ BIND(L_fill_32_bytes);
1795 #ifdef _LP64
1796     }
1797 #endif
1798 
1799     if (t == T_INT) {
1800       // Zero extend value
1801       __ srl(value, 0, value);
1802     }
1803     if (t == T_BYTE || t == T_SHORT || t == T_INT) {
1804       __ sllx(value, 32, O3);
1805       __ or3(value, O3, value);
1806     }
1807 
1808     Label L_check_fill_8_bytes;
1809     // Fill 32-byte chunks
1810     __ subcc(count, 8 << shift, count);
1811     __ brx(Assembler::less, false, Assembler::pt, L_check_fill_8_bytes);
1812     __ delayed()->nop();
1813 
1814     Label L_fill_32_bytes_loop, L_fill_4_bytes;
1815     __ align(16);
1816     __ BIND(L_fill_32_bytes_loop);
1817 
1818     __ stx(value, to, 0);
1819     __ stx(value, to, 8);
1820     __ stx(value, to, 16);
1821     __ stx(value, to, 24);
1822 
1823     __ subcc(count, 8 << shift, count);
1824     __ brx(Assembler::greaterEqual, false, Assembler::pt, L_fill_32_bytes_loop);
1825     __ delayed()->add(to, 32, to);
1826 
1827     __ BIND(L_check_fill_8_bytes);
1828     __ addcc(count, 8 << shift, count);
1829     __ brx(Assembler::zero, false, Assembler::pn, L_exit);
1830     __ delayed()->subcc(count, 1 << (shift + 1), count);
1831     __ brx(Assembler::less, false, Assembler::pn, L_fill_4_bytes);
1832     __ delayed()->andcc(count, 1<<shift, G0);
1833 
1834     //
1835     // length is too short, just fill 8 bytes at a time
1836     //
1837     Label L_fill_8_bytes_loop;
1838     __ BIND(L_fill_8_bytes_loop);
1839     __ stx(value, to, 0);
1840     __ subcc(count, 1 << (shift + 1), count);
1841     __ brx(Assembler::greaterEqual, false, Assembler::pn, L_fill_8_bytes_loop);
1842     __ delayed()->add(to, 8, to);
1843 
1844     // fill trailing 4 bytes
1845     __ andcc(count, 1<<shift, G0);  // in delay slot of branches
1846     if (t == T_INT) {
1847       __ BIND(L_fill_elements);
1848     }
1849     __ BIND(L_fill_4_bytes);
1850     __ brx(Assembler::zero, false, Assembler::pt, L_fill_2_bytes);
1851     if (t == T_BYTE || t == T_SHORT) {
1852       __ delayed()->andcc(count, 1<<(shift-1), G0);
1853     } else {
1854       __ delayed()->nop();
1855     }
1856     __ stw(value, to, 0);
1857     if (t == T_BYTE || t == T_SHORT) {
1858       __ inc(to, 4);
1859       // fill trailing 2 bytes
1860       __ andcc(count, 1<<(shift-1), G0); // in delay slot of branches
1861       __ BIND(L_fill_2_bytes);
1862       __ brx(Assembler::zero, false, Assembler::pt, L_fill_byte);
1863       __ delayed()->andcc(count, 1, count);
1864       __ sth(value, to, 0);
1865       if (t == T_BYTE) {
1866         __ inc(to, 2);
1867         // fill trailing byte
1868         __ andcc(count, 1, count);  // in delay slot of branches
1869         __ BIND(L_fill_byte);
1870         __ brx(Assembler::zero, false, Assembler::pt, L_exit);
1871         __ delayed()->nop();
1872         __ stb(value, to, 0);
1873       } else {
1874         __ BIND(L_fill_byte);
1875       }
1876     } else {
1877       __ BIND(L_fill_2_bytes);
1878     }
1879     __ BIND(L_exit);
1880     __ retl();
1881     __ delayed()->nop();
1882 
1883     // Handle copies less than 8 bytes.  Int is handled elsewhere.
1884     if (t == T_BYTE) {
1885       __ BIND(L_fill_elements);
1886       Label L_fill_2, L_fill_4;
1887       // in delay slot __ andcc(count, 1, G0);
1888       __ brx(Assembler::zero, false, Assembler::pt, L_fill_2);
1889       __ delayed()->andcc(count, 2, G0);
1890       __ stb(value, to, 0);
1891       __ inc(to, 1);
1892       __ BIND(L_fill_2);
1893       __ brx(Assembler::zero, false, Assembler::pt, L_fill_4);
1894       __ delayed()->andcc(count, 4, G0);
1895       __ stb(value, to, 0);
1896       __ stb(value, to, 1);
1897       __ inc(to, 2);
1898       __ BIND(L_fill_4);
1899       __ brx(Assembler::zero, false, Assembler::pt, L_exit);
1900       __ delayed()->nop();
1901       __ stb(value, to, 0);
1902       __ stb(value, to, 1);
1903       __ stb(value, to, 2);
1904       __ retl();
1905       __ delayed()->stb(value, to, 3);
1906     }
1907 
1908     if (t == T_SHORT) {
1909       Label L_fill_2;
1910       __ BIND(L_fill_elements);
1911       // in delay slot __ andcc(count, 1, G0);
1912       __ brx(Assembler::zero, false, Assembler::pt, L_fill_2);
1913       __ delayed()->andcc(count, 2, G0);
1914       __ sth(value, to, 0);
1915       __ inc(to, 2);
1916       __ BIND(L_fill_2);
1917       __ brx(Assembler::zero, false, Assembler::pt, L_exit);
1918       __ delayed()->nop();
1919       __ sth(value, to, 0);
1920       __ retl();
1921       __ delayed()->sth(value, to, 2);
1922     }
1923     return start;
1924   }
1925 
1926   //
1927   //  Generate stub for conjoint short copy.  If "aligned" is true, the
1928   //  "from" and "to" addresses are assumed to be heapword aligned.
1929   //
1930   // Arguments for generated stub:
1931   //      from:  O0
1932   //      to:    O1
1933   //      count: O2 treated as signed
1934   //
1935   address generate_conjoint_short_copy(bool aligned, address nooverlap_target,
1936                                        address *entry, const char *name) {
1937     // Do reverse copy.
1938 
1939     __ align(CodeEntryAlignment);
1940     StubCodeMark mark(this, "StubRoutines", name);
1941     address start = __ pc();
1942 
1943     Label L_skip_alignment, L_skip_alignment2, L_aligned_copy;
1944     Label L_copy_2_bytes, L_copy_2_bytes_loop, L_exit;
1945 
1946     const Register from      = O0;   // source array address
1947     const Register to        = O1;   // destination array address
1948     const Register count     = O2;   // elements count
1949     const Register end_from  = from; // source array end address
1950     const Register end_to    = to;   // destination array end address
1951 
1952     const Register byte_count = O3;  // bytes count to copy
1953 
1954     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
1955 
1956     if (entry != NULL) {
1957       *entry = __ pc();
1958       // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1959       BLOCK_COMMENT("Entry:");
1960     }
1961 
1962     array_overlap_test(nooverlap_target, 1);
1963 
1964     __ sllx(count, LogBytesPerShort, byte_count);
1965     __ add(to, byte_count, end_to);  // offset after last copied element
1966 
1967     // for short arrays, just do single element copy
1968     __ cmp(count, 11); // 8 + 3  (22 bytes)
1969     __ brx(Assembler::less, false, Assembler::pn, L_copy_2_bytes);
1970     __ delayed()->add(from, byte_count, end_from);
1971 
1972     {
1973       // Align end of arrays since they could be not aligned even
1974       // when arrays itself are aligned.
1975 
1976       // copy 1 element if necessary to align 'end_to' on an 4 bytes
1977       __ andcc(end_to, 3, G0);
1978       __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
1979       __ delayed()->lduh(end_from, -2, O3);
1980       __ dec(end_from, 2);
1981       __ dec(end_to, 2);
1982       __ dec(count);
1983       __ sth(O3, end_to, 0);
1984     __ BIND(L_skip_alignment);
1985 
1986       // copy 2 elements to align 'end_to' on an 8 byte boundary
1987       __ andcc(end_to, 7, G0);
1988       __ br(Assembler::zero, false, Assembler::pn, L_skip_alignment2);
1989       __ delayed()->lduh(end_from, -2, O3);
1990       __ dec(count, 2);
1991       __ lduh(end_from, -4, O4);
1992       __ dec(end_from, 4);
1993       __ dec(end_to, 4);
1994       __ sth(O3, end_to, 2);
1995       __ sth(O4, end_to, 0);
1996     __ BIND(L_skip_alignment2);
1997     }
1998 #ifdef _LP64
1999     if (aligned) {
2000       // Both arrays are aligned to 8-bytes in 64-bits VM.
2001       // The 'count' is decremented in copy_16_bytes_backward_with_shift()
2002       // in unaligned case.
2003       __ dec(count, 8);
2004     } else
2005 #endif
2006     {
2007       // Copy with shift 16 bytes per iteration if arrays do not have
2008       // the same alignment mod 8, otherwise jump to the next
2009       // code for aligned copy (and substracting 8 from 'count' before jump).
2010       // The compare above (count >= 11) guarantes 'count' >= 16 bytes.
2011       // Also jump over aligned copy after the copy with shift completed.
2012 
2013       copy_16_bytes_backward_with_shift(end_from, end_to, count, 8,
2014                                         L_aligned_copy, L_copy_2_bytes);
2015     }
2016     // copy 4 elements (16 bytes) at a time
2017       __ align(OptoLoopAlignment);
2018     __ BIND(L_aligned_copy);
2019       __ dec(end_from, 16);
2020       __ ldx(end_from, 8, O3);
2021       __ ldx(end_from, 0, O4);
2022       __ dec(end_to, 16);
2023       __ deccc(count, 8);
2024       __ stx(O3, end_to, 8);
2025       __ brx(Assembler::greaterEqual, false, Assembler::pt, L_aligned_copy);
2026       __ delayed()->stx(O4, end_to, 0);
2027       __ inc(count, 8);
2028 
2029     // copy 1 element (2 bytes) at a time
2030     __ BIND(L_copy_2_bytes);
2031       __ cmp_and_br_short(count, 0, Assembler::equal, Assembler::pt, L_exit);
2032     __ BIND(L_copy_2_bytes_loop);
2033       __ dec(end_from, 2);
2034       __ dec(end_to, 2);
2035       __ lduh(end_from, 0, O4);
2036       __ deccc(count);
2037       __ brx(Assembler::greater, false, Assembler::pt, L_copy_2_bytes_loop);
2038       __ delayed()->sth(O4, end_to, 0);
2039 
2040     __ BIND(L_exit);
2041     // O3, O4 are used as temp registers
2042     inc_counter_np(SharedRuntime::_jshort_array_copy_ctr, O3, O4);
2043     __ retl();
2044     __ delayed()->mov(G0, O0); // return 0
2045     return start;
2046   }
2047 
2048   //
2049   // Helper methods for generate_disjoint_int_copy_core()
2050   //
2051   void copy_16_bytes_loop(Register from, Register to, Register count, int count_dec,
2052                           Label& L_loop, bool use_prefetch, bool use_bis) {
2053 
2054     __ align(OptoLoopAlignment);
2055     __ BIND(L_loop);
2056     if (use_prefetch) {
2057       if (ArraycopySrcPrefetchDistance > 0) {
2058         __ prefetch(from, ArraycopySrcPrefetchDistance, Assembler::severalReads);
2059       }
2060       if (ArraycopyDstPrefetchDistance > 0) {
2061         __ prefetch(to, ArraycopyDstPrefetchDistance, Assembler::severalWritesAndPossiblyReads);
2062       }
2063     }
2064     __ ldx(from, 4, O4);
2065     __ ldx(from, 12, G4);
2066     __ inc(to, 16);
2067     __ inc(from, 16);
2068     __ deccc(count, 4); // Can we do next iteration after this one?
2069 
2070     __ srlx(O4, 32, G3);
2071     __ bset(G3, O3);
2072     __ sllx(O4, 32, O4);
2073     __ srlx(G4, 32, G3);
2074     __ bset(G3, O4);
2075     if (use_bis) {
2076       __ stxa(O3, to, -16);
2077       __ stxa(O4, to, -8);
2078     } else {
2079       __ stx(O3, to, -16);
2080       __ stx(O4, to, -8);
2081     }
2082     __ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop);
2083     __ delayed()->sllx(G4, 32,  O3);
2084 
2085   }
2086 
2087   //
2088   //  Generate core code for disjoint int copy (and oop copy on 32-bit).
2089   //  If "aligned" is true, the "from" and "to" addresses are assumed
2090   //  to be heapword aligned.
2091   //
2092   // Arguments:
2093   //      from:  O0
2094   //      to:    O1
2095   //      count: O2 treated as signed
2096   //
2097   void generate_disjoint_int_copy_core(bool aligned) {
2098 
2099     Label L_skip_alignment, L_aligned_copy;
2100     Label L_copy_4_bytes, L_copy_4_bytes_loop, L_exit;
2101 
2102     const Register from      = O0;   // source array address
2103     const Register to        = O1;   // destination array address
2104     const Register count     = O2;   // elements count
2105     const Register offset    = O5;   // offset from start of arrays
2106     // O3, O4, G3, G4 are used as temp registers
2107 
2108     // 'aligned' == true when it is known statically during compilation
2109     // of this arraycopy call site that both 'from' and 'to' addresses
2110     // are HeapWordSize aligned (see LibraryCallKit::basictype2arraycopy()).
2111     //
2112     // Aligned arrays have 4 bytes alignment in 32-bits VM
2113     // and 8 bytes - in 64-bits VM.
2114     //
2115 #ifdef _LP64
2116     if (!aligned)
2117 #endif
2118     {
2119       // The next check could be put under 'ifndef' since the code in
2120       // generate_disjoint_long_copy_core() has own checks and set 'offset'.
2121 
2122       // for short arrays, just do single element copy
2123       __ cmp(count, 5); // 4 + 1 (20 bytes)
2124       __ brx(Assembler::lessEqual, false, Assembler::pn, L_copy_4_bytes);
2125       __ delayed()->mov(G0, offset);
2126 
2127       // copy 1 element to align 'to' on an 8 byte boundary
2128       __ andcc(to, 7, G0);
2129       __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
2130       __ delayed()->ld(from, 0, O3);
2131       __ inc(from, 4);
2132       __ inc(to, 4);
2133       __ dec(count);
2134       __ st(O3, to, -4);
2135     __ BIND(L_skip_alignment);
2136 
2137     // if arrays have same alignment mod 8, do 4 elements copy
2138       __ andcc(from, 7, G0);
2139       __ br(Assembler::zero, false, Assembler::pt, L_aligned_copy);
2140       __ delayed()->ld(from, 0, O3);
2141 
2142     //
2143     // Load 2 aligned 8-bytes chunks and use one from previous iteration
2144     // to form 2 aligned 8-bytes chunks to store.
2145     //
2146     // copy_16_bytes_forward_with_shift() is not used here since this
2147     // code is more optimal.
2148 
2149     // copy with shift 4 elements (16 bytes) at a time
2150       __ dec(count, 4);   // The cmp at the beginning guaranty count >= 4
2151       __ sllx(O3, 32,  O3);
2152 
2153       disjoint_copy_core(from, to, count, 2, 16, copy_16_bytes_loop);
2154 
2155       __ br(Assembler::always, false, Assembler::pt, L_copy_4_bytes);
2156       __ delayed()->inc(count, 4); // restore 'count'
2157 
2158     __ BIND(L_aligned_copy);
2159     } // !aligned
2160 
2161     // copy 4 elements (16 bytes) at a time
2162       __ and3(count, 1, G4); // Save
2163       __ srl(count, 1, count);
2164      generate_disjoint_long_copy_core(aligned);
2165       __ mov(G4, count);     // Restore
2166 
2167     // copy 1 element at a time
2168     __ BIND(L_copy_4_bytes);
2169       __ cmp_and_br_short(count, 0, Assembler::equal, Assembler::pt, L_exit);
2170     __ BIND(L_copy_4_bytes_loop);
2171       __ ld(from, offset, O3);
2172       __ deccc(count);
2173       __ st(O3, to, offset);
2174       __ brx(Assembler::notZero, false, Assembler::pt, L_copy_4_bytes_loop);
2175       __ delayed()->inc(offset, 4);
2176     __ BIND(L_exit);
2177   }
2178 
2179   //
2180   //  Generate stub for disjoint int copy.  If "aligned" is true, the
2181   //  "from" and "to" addresses are assumed to be heapword aligned.
2182   //
2183   // Arguments for generated stub:
2184   //      from:  O0
2185   //      to:    O1
2186   //      count: O2 treated as signed
2187   //
2188   address generate_disjoint_int_copy(bool aligned, address *entry, const char *name) {
2189     __ align(CodeEntryAlignment);
2190     StubCodeMark mark(this, "StubRoutines", name);
2191     address start = __ pc();
2192 
2193     const Register count = O2;
2194     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
2195 
2196     if (entry != NULL) {
2197       *entry = __ pc();
2198       // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
2199       BLOCK_COMMENT("Entry:");
2200     }
2201 
2202     generate_disjoint_int_copy_core(aligned);
2203 
2204     // O3, O4 are used as temp registers
2205     inc_counter_np(SharedRuntime::_jint_array_copy_ctr, O3, O4);
2206     __ retl();
2207     __ delayed()->mov(G0, O0); // return 0
2208     return start;
2209   }
2210 
2211   //
2212   //  Generate core code for conjoint int copy (and oop copy on 32-bit).
2213   //  If "aligned" is true, the "from" and "to" addresses are assumed
2214   //  to be heapword aligned.
2215   //
2216   // Arguments:
2217   //      from:  O0
2218   //      to:    O1
2219   //      count: O2 treated as signed
2220   //
2221   void generate_conjoint_int_copy_core(bool aligned) {
2222     // Do reverse copy.
2223 
2224     Label L_skip_alignment, L_aligned_copy;
2225     Label L_copy_16_bytes,  L_copy_4_bytes, L_copy_4_bytes_loop, L_exit;
2226 
2227     const Register from      = O0;   // source array address
2228     const Register to        = O1;   // destination array address
2229     const Register count     = O2;   // elements count
2230     const Register end_from  = from; // source array end address
2231     const Register end_to    = to;   // destination array end address
2232     // O3, O4, O5, G3 are used as temp registers
2233 
2234     const Register byte_count = O3;  // bytes count to copy
2235 
2236       __ sllx(count, LogBytesPerInt, byte_count);
2237       __ add(to, byte_count, end_to); // offset after last copied element
2238 
2239       __ cmp(count, 5); // for short arrays, just do single element copy
2240       __ brx(Assembler::lessEqual, false, Assembler::pn, L_copy_4_bytes);
2241       __ delayed()->add(from, byte_count, end_from);
2242 
2243     // copy 1 element to align 'to' on an 8 byte boundary
2244       __ andcc(end_to, 7, G0);
2245       __ br(Assembler::zero, false, Assembler::pt, L_skip_alignment);
2246       __ delayed()->nop();
2247       __ dec(count);
2248       __ dec(end_from, 4);
2249       __ dec(end_to,   4);
2250       __ ld(end_from, 0, O4);
2251       __ st(O4, end_to, 0);
2252     __ BIND(L_skip_alignment);
2253 
2254     // Check if 'end_from' and 'end_to' has the same alignment.
2255       __ andcc(end_from, 7, G0);
2256       __ br(Assembler::zero, false, Assembler::pt, L_aligned_copy);
2257       __ delayed()->dec(count, 4); // The cmp at the start guaranty cnt >= 4
2258 
2259     // copy with shift 4 elements (16 bytes) at a time
2260     //
2261     // Load 2 aligned 8-bytes chunks and use one from previous iteration
2262     // to form 2 aligned 8-bytes chunks to store.
2263     //
2264       __ ldx(end_from, -4, O3);
2265       __ align(OptoLoopAlignment);
2266     __ BIND(L_copy_16_bytes);
2267       __ ldx(end_from, -12, O4);
2268       __ deccc(count, 4);
2269       __ ldx(end_from, -20, O5);
2270       __ dec(end_to, 16);
2271       __ dec(end_from, 16);
2272       __ srlx(O3, 32, O3);
2273       __ sllx(O4, 32, G3);
2274       __ bset(G3, O3);
2275       __ stx(O3, end_to, 8);
2276       __ srlx(O4, 32, O4);
2277       __ sllx(O5, 32, G3);
2278       __ bset(O4, G3);
2279       __ stx(G3, end_to, 0);
2280       __ brx(Assembler::greaterEqual, false, Assembler::pt, L_copy_16_bytes);
2281       __ delayed()->mov(O5, O3);
2282 
2283       __ br(Assembler::always, false, Assembler::pt, L_copy_4_bytes);
2284       __ delayed()->inc(count, 4);
2285 
2286     // copy 4 elements (16 bytes) at a time
2287       __ align(OptoLoopAlignment);
2288     __ BIND(L_aligned_copy);
2289       __ dec(end_from, 16);
2290       __ ldx(end_from, 8, O3);
2291       __ ldx(end_from, 0, O4);
2292       __ dec(end_to, 16);
2293       __ deccc(count, 4);
2294       __ stx(O3, end_to, 8);
2295       __ brx(Assembler::greaterEqual, false, Assembler::pt, L_aligned_copy);
2296       __ delayed()->stx(O4, end_to, 0);
2297       __ inc(count, 4);
2298 
2299     // copy 1 element (4 bytes) at a time
2300     __ BIND(L_copy_4_bytes);
2301       __ cmp_and_br_short(count, 0, Assembler::equal, Assembler::pt, L_exit);
2302     __ BIND(L_copy_4_bytes_loop);
2303       __ dec(end_from, 4);
2304       __ dec(end_to, 4);
2305       __ ld(end_from, 0, O4);
2306       __ deccc(count);
2307       __ brx(Assembler::greater, false, Assembler::pt, L_copy_4_bytes_loop);
2308       __ delayed()->st(O4, end_to, 0);
2309     __ BIND(L_exit);
2310   }
2311 
2312   //
2313   //  Generate stub for conjoint int copy.  If "aligned" is true, the
2314   //  "from" and "to" addresses are assumed to be heapword aligned.
2315   //
2316   // Arguments for generated stub:
2317   //      from:  O0
2318   //      to:    O1
2319   //      count: O2 treated as signed
2320   //
2321   address generate_conjoint_int_copy(bool aligned, address nooverlap_target,
2322                                      address *entry, const char *name) {
2323     __ align(CodeEntryAlignment);
2324     StubCodeMark mark(this, "StubRoutines", name);
2325     address start = __ pc();
2326 
2327     assert_clean_int(O2, O3);     // Make sure 'count' is clean int.
2328 
2329     if (entry != NULL) {
2330       *entry = __ pc();
2331       // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
2332       BLOCK_COMMENT("Entry:");
2333     }
2334 
2335     array_overlap_test(nooverlap_target, 2);
2336 
2337     generate_conjoint_int_copy_core(aligned);
2338 
2339     // O3, O4 are used as temp registers
2340     inc_counter_np(SharedRuntime::_jint_array_copy_ctr, O3, O4);
2341     __ retl();
2342     __ delayed()->mov(G0, O0); // return 0
2343     return start;
2344   }
2345 
2346   //
2347   // Helper methods for generate_disjoint_long_copy_core()
2348   //
2349   void copy_64_bytes_loop(Register from, Register to, Register count, int count_dec,
2350                           Label& L_loop, bool use_prefetch, bool use_bis) {
2351     __ align(OptoLoopAlignment);
2352     __ BIND(L_loop);
2353     for (int off = 0; off < 64; off += 16) {
2354       if (use_prefetch && (off & 31) == 0) {
2355         if (ArraycopySrcPrefetchDistance > 0) {
2356           __ prefetch(from, ArraycopySrcPrefetchDistance+off, Assembler::severalReads);
2357         }
2358         if (ArraycopyDstPrefetchDistance > 0) {
2359           __ prefetch(to, ArraycopyDstPrefetchDistance+off, Assembler::severalWritesAndPossiblyReads);
2360         }
2361       }
2362       __ ldx(from,  off+0, O4);
2363       __ ldx(from,  off+8, O5);
2364       if (use_bis) {
2365         __ stxa(O4, to,  off+0);
2366         __ stxa(O5, to,  off+8);
2367       } else {
2368         __ stx(O4, to,  off+0);
2369         __ stx(O5, to,  off+8);
2370       }
2371     }
2372     __ deccc(count, 8);
2373     __ inc(from, 64);
2374     __ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop);
2375     __ delayed()->inc(to, 64);
2376   }
2377 
2378   //
2379   //  Generate core code for disjoint long copy (and oop copy on 64-bit).
2380   //  "aligned" is ignored, because we must make the stronger
2381   //  assumption that both addresses are always 64-bit aligned.
2382   //
2383   // Arguments:
2384   //      from:  O0
2385   //      to:    O1
2386   //      count: O2 treated as signed
2387   //
2388   // count -= 2;
2389   // if ( count >= 0 ) { // >= 2 elements
2390   //   if ( count > 6) { // >= 8 elements
2391   //     count -= 6; // original count - 8
2392   //     do {
2393   //       copy_8_elements;
2394   //       count -= 8;
2395   //     } while ( count >= 0 );
2396   //     count += 6;
2397   //   }
2398   //   if ( count >= 0 ) { // >= 2 elements
2399   //     do {
2400   //       copy_2_elements;
2401   //     } while ( (count=count-2) >= 0 );
2402   //   }
2403   // }
2404   // count += 2;
2405   // if ( count != 0 ) { // 1 element left
2406   //   copy_1_element;
2407   // }
2408   //
2409   void generate_disjoint_long_copy_core(bool aligned) {
2410     Label L_copy_8_bytes, L_copy_16_bytes, L_exit;
2411     const Register from    = O0;  // source array address
2412     const Register to      = O1;  // destination array address
2413     const Register count   = O2;  // elements count
2414     const Register offset0 = O4;  // element offset
2415     const Register offset8 = O5;  // next element offset
2416 
2417     __ deccc(count, 2);
2418     __ mov(G0, offset0);   // offset from start of arrays (0)
2419     __ brx(Assembler::negative, false, Assembler::pn, L_copy_8_bytes );
2420     __ delayed()->add(offset0, 8, offset8);
2421 
2422     // Copy by 64 bytes chunks
2423 
2424     const Register from64 = O3;  // source address
2425     const Register to64   = G3;  // destination address
2426     __ subcc(count, 6, O3);
2427     __ brx(Assembler::negative, false, Assembler::pt, L_copy_16_bytes );
2428     __ delayed()->mov(to,   to64);
2429     // Now we can use O4(offset0), O5(offset8) as temps
2430     __ mov(O3, count);
2431     // count >= 0 (original count - 8)
2432     __ mov(from, from64);
2433 
2434     disjoint_copy_core(from64, to64, count, 3, 64, copy_64_bytes_loop);
2435 
2436       // Restore O4(offset0), O5(offset8)
2437       __ sub(from64, from, offset0);
2438       __ inccc(count, 6); // restore count
2439       __ brx(Assembler::negative, false, Assembler::pn, L_copy_8_bytes );
2440       __ delayed()->add(offset0, 8, offset8);
2441 
2442       // Copy by 16 bytes chunks
2443       __ align(OptoLoopAlignment);
2444     __ BIND(L_copy_16_bytes);
2445       __ ldx(from, offset0, O3);
2446       __ ldx(from, offset8, G3);
2447       __ deccc(count, 2);
2448       __ stx(O3, to, offset0);
2449       __ inc(offset0, 16);
2450       __ stx(G3, to, offset8);
2451       __ brx(Assembler::greaterEqual, false, Assembler::pt, L_copy_16_bytes);
2452       __ delayed()->inc(offset8, 16);
2453 
2454       // Copy last 8 bytes
2455     __ BIND(L_copy_8_bytes);
2456       __ inccc(count, 2);
2457       __ brx(Assembler::zero, true, Assembler::pn, L_exit );
2458       __ delayed()->mov(offset0, offset8); // Set O5 used by other stubs
2459       __ ldx(from, offset0, O3);
2460       __ stx(O3, to, offset0);
2461     __ BIND(L_exit);
2462   }
2463 
2464   //
2465   //  Generate stub for disjoint long copy.
2466   //  "aligned" is ignored, because we must make the stronger
2467   //  assumption that both addresses are always 64-bit aligned.
2468   //
2469   // Arguments for generated stub:
2470   //      from:  O0
2471   //      to:    O1
2472   //      count: O2 treated as signed
2473   //
2474   address generate_disjoint_long_copy(bool aligned, address *entry, const char *name) {
2475     __ align(CodeEntryAlignment);
2476     StubCodeMark mark(this, "StubRoutines", name);
2477     address start = __ pc();
2478 
2479     assert_clean_int(O2, O3);     // Make sure 'count' is clean int.
2480 
2481     if (entry != NULL) {
2482       *entry = __ pc();
2483       // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
2484       BLOCK_COMMENT("Entry:");
2485     }
2486 
2487     generate_disjoint_long_copy_core(aligned);
2488 
2489     // O3, O4 are used as temp registers
2490     inc_counter_np(SharedRuntime::_jlong_array_copy_ctr, O3, O4);
2491     __ retl();
2492     __ delayed()->mov(G0, O0); // return 0
2493     return start;
2494   }
2495 
2496   //
2497   //  Generate core code for conjoint long copy (and oop copy on 64-bit).
2498   //  "aligned" is ignored, because we must make the stronger
2499   //  assumption that both addresses are always 64-bit aligned.
2500   //
2501   // Arguments:
2502   //      from:  O0
2503   //      to:    O1
2504   //      count: O2 treated as signed
2505   //
2506   void generate_conjoint_long_copy_core(bool aligned) {
2507     // Do reverse copy.
2508     Label L_copy_8_bytes, L_copy_16_bytes, L_exit;
2509     const Register from    = O0;  // source array address
2510     const Register to      = O1;  // destination array address
2511     const Register count   = O2;  // elements count
2512     const Register offset8 = O4;  // element offset
2513     const Register offset0 = O5;  // previous element offset
2514 
2515       __ subcc(count, 1, count);
2516       __ brx(Assembler::lessEqual, false, Assembler::pn, L_copy_8_bytes );
2517       __ delayed()->sllx(count, LogBytesPerLong, offset8);
2518       __ sub(offset8, 8, offset0);
2519       __ align(OptoLoopAlignment);
2520     __ BIND(L_copy_16_bytes);
2521       __ ldx(from, offset8, O2);
2522       __ ldx(from, offset0, O3);
2523       __ stx(O2, to, offset8);
2524       __ deccc(offset8, 16);      // use offset8 as counter
2525       __ stx(O3, to, offset0);
2526       __ brx(Assembler::greater, false, Assembler::pt, L_copy_16_bytes);
2527       __ delayed()->dec(offset0, 16);
2528 
2529     __ BIND(L_copy_8_bytes);
2530       __ brx(Assembler::negative, false, Assembler::pn, L_exit );
2531       __ delayed()->nop();
2532       __ ldx(from, 0, O3);
2533       __ stx(O3, to, 0);
2534     __ BIND(L_exit);
2535   }
2536 
2537   //  Generate stub for conjoint long copy.
2538   //  "aligned" is ignored, because we must make the stronger
2539   //  assumption that both addresses are always 64-bit aligned.
2540   //
2541   // Arguments for generated stub:
2542   //      from:  O0
2543   //      to:    O1
2544   //      count: O2 treated as signed
2545   //
2546   address generate_conjoint_long_copy(bool aligned, address nooverlap_target,
2547                                       address *entry, const char *name) {
2548     __ align(CodeEntryAlignment);
2549     StubCodeMark mark(this, "StubRoutines", name);
2550     address start = __ pc();
2551 
2552     assert(aligned, "Should always be aligned");
2553 
2554     assert_clean_int(O2, O3);     // Make sure 'count' is clean int.
2555 
2556     if (entry != NULL) {
2557       *entry = __ pc();
2558       // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
2559       BLOCK_COMMENT("Entry:");
2560     }
2561 
2562     array_overlap_test(nooverlap_target, 3);
2563 
2564     generate_conjoint_long_copy_core(aligned);
2565 
2566     // O3, O4 are used as temp registers
2567     inc_counter_np(SharedRuntime::_jlong_array_copy_ctr, O3, O4);
2568     __ retl();
2569     __ delayed()->mov(G0, O0); // return 0
2570     return start;
2571   }
2572 
2573   //  Generate stub for disjoint oop copy.  If "aligned" is true, the
2574   //  "from" and "to" addresses are assumed to be heapword aligned.
2575   //
2576   // Arguments for generated stub:
2577   //      from:  O0
2578   //      to:    O1
2579   //      count: O2 treated as signed
2580   //
2581   address generate_disjoint_oop_copy(bool aligned, address *entry, const char *name,
2582                                      bool dest_uninitialized = false) {
2583 
2584     const Register from  = O0;  // source array address
2585     const Register to    = O1;  // destination array address
2586     const Register count = O2;  // elements count
2587 
2588     __ align(CodeEntryAlignment);
2589     StubCodeMark mark(this, "StubRoutines", name);
2590     address start = __ pc();
2591 
2592     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
2593 
2594     if (entry != NULL) {
2595       *entry = __ pc();
2596       // caller can pass a 64-bit byte count here
2597       BLOCK_COMMENT("Entry:");
2598     }
2599 
2600     // save arguments for barrier generation
2601     __ mov(to, G1);
2602     __ mov(count, G5);
2603     gen_write_ref_array_pre_barrier(G1, G5, dest_uninitialized);
2604   #ifdef _LP64
2605     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
2606     if (UseCompressedOops) {
2607       generate_disjoint_int_copy_core(aligned);
2608     } else {
2609       generate_disjoint_long_copy_core(aligned);
2610     }
2611   #else
2612     generate_disjoint_int_copy_core(aligned);
2613   #endif
2614     // O0 is used as temp register
2615     gen_write_ref_array_post_barrier(G1, G5, O0);
2616 
2617     // O3, O4 are used as temp registers
2618     inc_counter_np(SharedRuntime::_oop_array_copy_ctr, O3, O4);
2619     __ retl();
2620     __ delayed()->mov(G0, O0); // return 0
2621     return start;
2622   }
2623 
2624   //  Generate stub for conjoint oop copy.  If "aligned" is true, the
2625   //  "from" and "to" addresses are assumed to be heapword aligned.
2626   //
2627   // Arguments for generated stub:
2628   //      from:  O0
2629   //      to:    O1
2630   //      count: O2 treated as signed
2631   //
2632   address generate_conjoint_oop_copy(bool aligned, address nooverlap_target,
2633                                      address *entry, const char *name,
2634                                      bool dest_uninitialized = false) {
2635 
2636     const Register from  = O0;  // source array address
2637     const Register to    = O1;  // destination array address
2638     const Register count = O2;  // elements count
2639 
2640     __ align(CodeEntryAlignment);
2641     StubCodeMark mark(this, "StubRoutines", name);
2642     address start = __ pc();
2643 
2644     assert_clean_int(count, O3);     // Make sure 'count' is clean int.
2645 
2646     if (entry != NULL) {
2647       *entry = __ pc();
2648       // caller can pass a 64-bit byte count here
2649       BLOCK_COMMENT("Entry:");
2650     }
2651 
2652     array_overlap_test(nooverlap_target, LogBytesPerHeapOop);
2653 
2654     // save arguments for barrier generation
2655     __ mov(to, G1);
2656     __ mov(count, G5);
2657     gen_write_ref_array_pre_barrier(G1, G5, dest_uninitialized);
2658 
2659   #ifdef _LP64
2660     if (UseCompressedOops) {
2661       generate_conjoint_int_copy_core(aligned);
2662     } else {
2663       generate_conjoint_long_copy_core(aligned);
2664     }
2665   #else
2666     generate_conjoint_int_copy_core(aligned);
2667   #endif
2668 
2669     // O0 is used as temp register
2670     gen_write_ref_array_post_barrier(G1, G5, O0);
2671 
2672     // O3, O4 are used as temp registers
2673     inc_counter_np(SharedRuntime::_oop_array_copy_ctr, O3, O4);
2674     __ retl();
2675     __ delayed()->mov(G0, O0); // return 0
2676     return start;
2677   }
2678 
2679 
2680   // Helper for generating a dynamic type check.
2681   // Smashes only the given temp registers.
2682   void generate_type_check(Register sub_klass,
2683                            Register super_check_offset,
2684                            Register super_klass,
2685                            Register temp,
2686                            Label& L_success) {
2687     assert_different_registers(sub_klass, super_check_offset, super_klass, temp);
2688 
2689     BLOCK_COMMENT("type_check:");
2690 
2691     Label L_miss, L_pop_to_miss;
2692 
2693     assert_clean_int(super_check_offset, temp);
2694 
2695     __ check_klass_subtype_fast_path(sub_klass, super_klass, temp, noreg,
2696                                      &L_success, &L_miss, NULL,
2697                                      super_check_offset);
2698 
2699     BLOCK_COMMENT("type_check_slow_path:");
2700     __ save_frame(0);
2701     __ check_klass_subtype_slow_path(sub_klass->after_save(),
2702                                      super_klass->after_save(),
2703                                      L0, L1, L2, L4,
2704                                      NULL, &L_pop_to_miss);
2705     __ ba(L_success);
2706     __ delayed()->restore();
2707 
2708     __ bind(L_pop_to_miss);
2709     __ restore();
2710 
2711     // Fall through on failure!
2712     __ BIND(L_miss);
2713   }
2714 
2715 
2716   //  Generate stub for checked oop copy.
2717   //
2718   // Arguments for generated stub:
2719   //      from:  O0
2720   //      to:    O1
2721   //      count: O2 treated as signed
2722   //      ckoff: O3 (super_check_offset)
2723   //      ckval: O4 (super_klass)
2724   //      ret:   O0 zero for success; (-1^K) where K is partial transfer count
2725   //
2726   address generate_checkcast_copy(const char *name, address *entry, bool dest_uninitialized = false) {
2727 
2728     const Register O0_from   = O0;      // source array address
2729     const Register O1_to     = O1;      // destination array address
2730     const Register O2_count  = O2;      // elements count
2731     const Register O3_ckoff  = O3;      // super_check_offset
2732     const Register O4_ckval  = O4;      // super_klass
2733 
2734     const Register O5_offset = O5;      // loop var, with stride wordSize
2735     const Register G1_remain = G1;      // loop var, with stride -1
2736     const Register G3_oop    = G3;      // actual oop copied
2737     const Register G4_klass  = G4;      // oop._klass
2738     const Register G5_super  = G5;      // oop._klass._primary_supers[ckval]
2739 
2740     __ align(CodeEntryAlignment);
2741     StubCodeMark mark(this, "StubRoutines", name);
2742     address start = __ pc();
2743 
2744 #ifdef ASSERT
2745     // We sometimes save a frame (see generate_type_check below).
2746     // If this will cause trouble, let's fail now instead of later.
2747     __ save_frame(0);
2748     __ restore();
2749 #endif
2750 
2751     assert_clean_int(O2_count, G1);     // Make sure 'count' is clean int.
2752 
2753 #ifdef ASSERT
2754     // caller guarantees that the arrays really are different
2755     // otherwise, we would have to make conjoint checks
2756     { Label L;
2757       __ mov(O3, G1);           // spill: overlap test smashes O3
2758       __ mov(O4, G4);           // spill: overlap test smashes O4
2759       array_overlap_test(L, LogBytesPerHeapOop);
2760       __ stop("checkcast_copy within a single array");
2761       __ bind(L);
2762       __ mov(G1, O3);
2763       __ mov(G4, O4);
2764     }
2765 #endif //ASSERT
2766 
2767     if (entry != NULL) {
2768       *entry = __ pc();
2769       // caller can pass a 64-bit byte count here (from generic stub)
2770       BLOCK_COMMENT("Entry:");
2771     }
2772     gen_write_ref_array_pre_barrier(O1_to, O2_count, dest_uninitialized);
2773 
2774     Label load_element, store_element, do_card_marks, fail, done;
2775     __ addcc(O2_count, 0, G1_remain);   // initialize loop index, and test it
2776     __ brx(Assembler::notZero, false, Assembler::pt, load_element);
2777     __ delayed()->mov(G0, O5_offset);   // offset from start of arrays
2778 
2779     // Empty array:  Nothing to do.
2780     inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr, O3, O4);
2781     __ retl();
2782     __ delayed()->set(0, O0);           // return 0 on (trivial) success
2783 
2784     // ======== begin loop ========
2785     // (Loop is rotated; its entry is load_element.)
2786     // Loop variables:
2787     //   (O5 = 0; ; O5 += wordSize) --- offset from src, dest arrays
2788     //   (O2 = len; O2 != 0; O2--) --- number of oops *remaining*
2789     //   G3, G4, G5 --- current oop, oop.klass, oop.klass.super
2790     __ align(OptoLoopAlignment);
2791 
2792     __ BIND(store_element);
2793     __ deccc(G1_remain);                // decrement the count
2794     __ store_heap_oop(G3_oop, O1_to, O5_offset); // store the oop
2795     __ inc(O5_offset, heapOopSize);     // step to next offset
2796     __ brx(Assembler::zero, true, Assembler::pt, do_card_marks);
2797     __ delayed()->set(0, O0);           // return -1 on success
2798 
2799     // ======== loop entry is here ========
2800     __ BIND(load_element);
2801     __ load_heap_oop(O0_from, O5_offset, G3_oop);  // load the oop
2802     __ br_null_short(G3_oop, Assembler::pt, store_element);
2803 
2804     __ load_klass(G3_oop, G4_klass); // query the object klass
2805 
2806     generate_type_check(G4_klass, O3_ckoff, O4_ckval, G5_super,
2807                         // branch to this on success:
2808                         store_element);
2809     // ======== end loop ========
2810 
2811     // It was a real error; we must depend on the caller to finish the job.
2812     // Register G1 has number of *remaining* oops, O2 number of *total* oops.
2813     // Emit GC store barriers for the oops we have copied (O2 minus G1),
2814     // and report their number to the caller.
2815     __ BIND(fail);
2816     __ subcc(O2_count, G1_remain, O2_count);
2817     __ brx(Assembler::zero, false, Assembler::pt, done);
2818     __ delayed()->not1(O2_count, O0);   // report (-1^K) to caller
2819 
2820     __ BIND(do_card_marks);
2821     gen_write_ref_array_post_barrier(O1_to, O2_count, O3);   // store check on O1[0..O2]
2822 
2823     __ BIND(done);
2824     inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr, O3, O4);
2825     __ retl();
2826     __ delayed()->nop();             // return value in 00
2827 
2828     return start;
2829   }
2830 
2831 
2832   //  Generate 'unsafe' array copy stub
2833   //  Though just as safe as the other stubs, it takes an unscaled
2834   //  size_t argument instead of an element count.
2835   //
2836   // Arguments for generated stub:
2837   //      from:  O0
2838   //      to:    O1
2839   //      count: O2 byte count, treated as ssize_t, can be zero
2840   //
2841   // Examines the alignment of the operands and dispatches
2842   // to a long, int, short, or byte copy loop.
2843   //
2844   address generate_unsafe_copy(const char* name,
2845                                address byte_copy_entry,
2846                                address short_copy_entry,
2847                                address int_copy_entry,
2848                                address long_copy_entry) {
2849 
2850     const Register O0_from   = O0;      // source array address
2851     const Register O1_to     = O1;      // destination array address
2852     const Register O2_count  = O2;      // elements count
2853 
2854     const Register G1_bits   = G1;      // test copy of low bits
2855 
2856     __ align(CodeEntryAlignment);
2857     StubCodeMark mark(this, "StubRoutines", name);
2858     address start = __ pc();
2859 
2860     // bump this on entry, not on exit:
2861     inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr, G1, G3);
2862 
2863     __ or3(O0_from, O1_to, G1_bits);
2864     __ or3(O2_count,       G1_bits, G1_bits);
2865 
2866     __ btst(BytesPerLong-1, G1_bits);
2867     __ br(Assembler::zero, true, Assembler::pt,
2868           long_copy_entry, relocInfo::runtime_call_type);
2869     // scale the count on the way out:
2870     __ delayed()->srax(O2_count, LogBytesPerLong, O2_count);
2871 
2872     __ btst(BytesPerInt-1, G1_bits);
2873     __ br(Assembler::zero, true, Assembler::pt,
2874           int_copy_entry, relocInfo::runtime_call_type);
2875     // scale the count on the way out:
2876     __ delayed()->srax(O2_count, LogBytesPerInt, O2_count);
2877 
2878     __ btst(BytesPerShort-1, G1_bits);
2879     __ br(Assembler::zero, true, Assembler::pt,
2880           short_copy_entry, relocInfo::runtime_call_type);
2881     // scale the count on the way out:
2882     __ delayed()->srax(O2_count, LogBytesPerShort, O2_count);
2883 
2884     __ br(Assembler::always, false, Assembler::pt,
2885           byte_copy_entry, relocInfo::runtime_call_type);
2886     __ delayed()->nop();
2887 
2888     return start;
2889   }
2890 
2891 
2892   // Perform range checks on the proposed arraycopy.
2893   // Kills the two temps, but nothing else.
2894   // Also, clean the sign bits of src_pos and dst_pos.
2895   void arraycopy_range_checks(Register src,     // source array oop (O0)
2896                               Register src_pos, // source position (O1)
2897                               Register dst,     // destination array oo (O2)
2898                               Register dst_pos, // destination position (O3)
2899                               Register length,  // length of copy (O4)
2900                               Register temp1, Register temp2,
2901                               Label& L_failed) {
2902     BLOCK_COMMENT("arraycopy_range_checks:");
2903 
2904     //  if (src_pos + length > arrayOop(src)->length() ) FAIL;
2905 
2906     const Register array_length = temp1;  // scratch
2907     const Register end_pos      = temp2;  // scratch
2908 
2909     // Note:  This next instruction may be in the delay slot of a branch:
2910     __ add(length, src_pos, end_pos);  // src_pos + length
2911     __ lduw(src, arrayOopDesc::length_offset_in_bytes(), array_length);
2912     __ cmp(end_pos, array_length);
2913     __ br(Assembler::greater, false, Assembler::pn, L_failed);
2914 
2915     //  if (dst_pos + length > arrayOop(dst)->length() ) FAIL;
2916     __ delayed()->add(length, dst_pos, end_pos); // dst_pos + length
2917     __ lduw(dst, arrayOopDesc::length_offset_in_bytes(), array_length);
2918     __ cmp(end_pos, array_length);
2919     __ br(Assembler::greater, false, Assembler::pn, L_failed);
2920 
2921     // Have to clean up high 32-bits of 'src_pos' and 'dst_pos'.
2922     // Move with sign extension can be used since they are positive.
2923     __ delayed()->signx(src_pos, src_pos);
2924     __ signx(dst_pos, dst_pos);
2925 
2926     BLOCK_COMMENT("arraycopy_range_checks done");
2927   }
2928 
2929 
2930   //
2931   //  Generate generic array copy stubs
2932   //
2933   //  Input:
2934   //    O0    -  src oop
2935   //    O1    -  src_pos
2936   //    O2    -  dst oop
2937   //    O3    -  dst_pos
2938   //    O4    -  element count
2939   //
2940   //  Output:
2941   //    O0 ==  0  -  success
2942   //    O0 == -1  -  need to call System.arraycopy
2943   //
2944   address generate_generic_copy(const char *name,
2945                                 address entry_jbyte_arraycopy,
2946                                 address entry_jshort_arraycopy,
2947                                 address entry_jint_arraycopy,
2948                                 address entry_oop_arraycopy,
2949                                 address entry_jlong_arraycopy,
2950                                 address entry_checkcast_arraycopy) {
2951     Label L_failed, L_objArray;
2952 
2953     // Input registers
2954     const Register src      = O0;  // source array oop
2955     const Register src_pos  = O1;  // source position
2956     const Register dst      = O2;  // destination array oop
2957     const Register dst_pos  = O3;  // destination position
2958     const Register length   = O4;  // elements count
2959 
2960     // registers used as temp
2961     const Register G3_src_klass = G3; // source array klass
2962     const Register G4_dst_klass = G4; // destination array klass
2963     const Register G5_lh        = G5; // layout handler
2964     const Register O5_temp      = O5;
2965 
2966     __ align(CodeEntryAlignment);
2967     StubCodeMark mark(this, "StubRoutines", name);
2968     address start = __ pc();
2969 
2970     // bump this on entry, not on exit:
2971     inc_counter_np(SharedRuntime::_generic_array_copy_ctr, G1, G3);
2972 
2973     // In principle, the int arguments could be dirty.
2974     //assert_clean_int(src_pos, G1);
2975     //assert_clean_int(dst_pos, G1);
2976     //assert_clean_int(length, G1);
2977 
2978     //-----------------------------------------------------------------------
2979     // Assembler stubs will be used for this call to arraycopy
2980     // if the following conditions are met:
2981     //
2982     // (1) src and dst must not be null.
2983     // (2) src_pos must not be negative.
2984     // (3) dst_pos must not be negative.
2985     // (4) length  must not be negative.
2986     // (5) src klass and dst klass should be the same and not NULL.
2987     // (6) src and dst should be arrays.
2988     // (7) src_pos + length must not exceed length of src.
2989     // (8) dst_pos + length must not exceed length of dst.
2990     BLOCK_COMMENT("arraycopy initial argument checks");
2991 
2992     //  if (src == NULL) return -1;
2993     __ br_null(src, false, Assembler::pn, L_failed);
2994 
2995     //  if (src_pos < 0) return -1;
2996     __ delayed()->tst(src_pos);
2997     __ br(Assembler::negative, false, Assembler::pn, L_failed);
2998     __ delayed()->nop();
2999 
3000     //  if (dst == NULL) return -1;
3001     __ br_null(dst, false, Assembler::pn, L_failed);
3002 
3003     //  if (dst_pos < 0) return -1;
3004     __ delayed()->tst(dst_pos);
3005     __ br(Assembler::negative, false, Assembler::pn, L_failed);
3006 
3007     //  if (length < 0) return -1;
3008     __ delayed()->tst(length);
3009     __ br(Assembler::negative, false, Assembler::pn, L_failed);
3010 
3011     BLOCK_COMMENT("arraycopy argument klass checks");
3012     //  get src->klass()
3013     if (UseCompressedKlassPointers) {
3014       __ delayed()->nop(); // ??? not good
3015       __ load_klass(src, G3_src_klass);
3016     } else {
3017       __ delayed()->ld_ptr(src, oopDesc::klass_offset_in_bytes(), G3_src_klass);
3018     }
3019 
3020 #ifdef ASSERT
3021     //  assert(src->klass() != NULL);
3022     BLOCK_COMMENT("assert klasses not null");
3023     { Label L_a, L_b;
3024       __ br_notnull_short(G3_src_klass, Assembler::pt, L_b); // it is broken if klass is NULL
3025       __ bind(L_a);
3026       __ stop("broken null klass");
3027       __ bind(L_b);
3028       __ load_klass(dst, G4_dst_klass);
3029       __ br_null(G4_dst_klass, false, Assembler::pn, L_a); // this would be broken also
3030       __ delayed()->mov(G0, G4_dst_klass);      // scribble the temp
3031       BLOCK_COMMENT("assert done");
3032     }
3033 #endif
3034 
3035     // Load layout helper
3036     //
3037     //  |array_tag|     | header_size | element_type |     |log2_element_size|
3038     // 32        30    24            16              8     2                 0
3039     //
3040     //   array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0
3041     //
3042 
3043     int lh_offset = in_bytes(Klass::layout_helper_offset());
3044 
3045     // Load 32-bits signed value. Use br() instruction with it to check icc.
3046     __ lduw(G3_src_klass, lh_offset, G5_lh);
3047 
3048     if (UseCompressedKlassPointers) {
3049       __ load_klass(dst, G4_dst_klass);
3050     }
3051     // Handle objArrays completely differently...
3052     juint objArray_lh = Klass::array_layout_helper(T_OBJECT);
3053     __ set(objArray_lh, O5_temp);
3054     __ cmp(G5_lh,       O5_temp);
3055     __ br(Assembler::equal, false, Assembler::pt, L_objArray);
3056     if (UseCompressedKlassPointers) {
3057       __ delayed()->nop();
3058     } else {
3059       __ delayed()->ld_ptr(dst, oopDesc::klass_offset_in_bytes(), G4_dst_klass);
3060     }
3061 
3062     //  if (src->klass() != dst->klass()) return -1;
3063     __ cmp_and_brx_short(G3_src_klass, G4_dst_klass, Assembler::notEqual, Assembler::pn, L_failed);
3064 
3065     //  if (!src->is_Array()) return -1;
3066     __ cmp(G5_lh, Klass::_lh_neutral_value); // < 0
3067     __ br(Assembler::greaterEqual, false, Assembler::pn, L_failed);
3068 
3069     // At this point, it is known to be a typeArray (array_tag 0x3).
3070 #ifdef ASSERT
3071     __ delayed()->nop();
3072     { Label L;
3073       jint lh_prim_tag_in_place = (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift);
3074       __ set(lh_prim_tag_in_place, O5_temp);
3075       __ cmp(G5_lh,                O5_temp);
3076       __ br(Assembler::greaterEqual, false, Assembler::pt, L);
3077       __ delayed()->nop();
3078       __ stop("must be a primitive array");
3079       __ bind(L);
3080     }
3081 #else
3082     __ delayed();                               // match next insn to prev branch
3083 #endif
3084 
3085     arraycopy_range_checks(src, src_pos, dst, dst_pos, length,
3086                            O5_temp, G4_dst_klass, L_failed);
3087 
3088     // TypeArrayKlass
3089     //
3090     // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize);
3091     // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize);
3092     //
3093 
3094     const Register G4_offset = G4_dst_klass;    // array offset
3095     const Register G3_elsize = G3_src_klass;    // log2 element size
3096 
3097     __ srl(G5_lh, Klass::_lh_header_size_shift, G4_offset);
3098     __ and3(G4_offset, Klass::_lh_header_size_mask, G4_offset); // array_offset
3099     __ add(src, G4_offset, src);       // src array offset
3100     __ add(dst, G4_offset, dst);       // dst array offset
3101     __ and3(G5_lh, Klass::_lh_log2_element_size_mask, G3_elsize); // log2 element size
3102 
3103     // next registers should be set before the jump to corresponding stub
3104     const Register from     = O0;  // source array address
3105     const Register to       = O1;  // destination array address
3106     const Register count    = O2;  // elements count
3107 
3108     // 'from', 'to', 'count' registers should be set in this order
3109     // since they are the same as 'src', 'src_pos', 'dst'.
3110 
3111     BLOCK_COMMENT("scale indexes to element size");
3112     __ sll_ptr(src_pos, G3_elsize, src_pos);
3113     __ sll_ptr(dst_pos, G3_elsize, dst_pos);
3114     __ add(src, src_pos, from);       // src_addr
3115     __ add(dst, dst_pos, to);         // dst_addr
3116 
3117     BLOCK_COMMENT("choose copy loop based on element size");
3118     __ cmp(G3_elsize, 0);
3119     __ br(Assembler::equal, true, Assembler::pt, entry_jbyte_arraycopy);
3120     __ delayed()->signx(length, count); // length
3121 
3122     __ cmp(G3_elsize, LogBytesPerShort);
3123     __ br(Assembler::equal, true, Assembler::pt, entry_jshort_arraycopy);
3124     __ delayed()->signx(length, count); // length
3125 
3126     __ cmp(G3_elsize, LogBytesPerInt);
3127     __ br(Assembler::equal, true, Assembler::pt, entry_jint_arraycopy);
3128     __ delayed()->signx(length, count); // length
3129 #ifdef ASSERT
3130     { Label L;
3131       __ cmp_and_br_short(G3_elsize, LogBytesPerLong, Assembler::equal, Assembler::pt, L);
3132       __ stop("must be long copy, but elsize is wrong");
3133       __ bind(L);
3134     }
3135 #endif
3136     __ br(Assembler::always, false, Assembler::pt, entry_jlong_arraycopy);
3137     __ delayed()->signx(length, count); // length
3138 
3139     // ObjArrayKlass
3140   __ BIND(L_objArray);
3141     // live at this point:  G3_src_klass, G4_dst_klass, src[_pos], dst[_pos], length
3142 
3143     Label L_plain_copy, L_checkcast_copy;
3144     //  test array classes for subtyping
3145     __ cmp(G3_src_klass, G4_dst_klass);         // usual case is exact equality
3146     __ brx(Assembler::notEqual, true, Assembler::pn, L_checkcast_copy);
3147     __ delayed()->lduw(G4_dst_klass, lh_offset, O5_temp); // hoisted from below
3148 
3149     // Identically typed arrays can be copied without element-wise checks.
3150     arraycopy_range_checks(src, src_pos, dst, dst_pos, length,
3151                            O5_temp, G5_lh, L_failed);
3152 
3153     __ add(src, arrayOopDesc::base_offset_in_bytes(T_OBJECT), src); //src offset
3154     __ add(dst, arrayOopDesc::base_offset_in_bytes(T_OBJECT), dst); //dst offset
3155     __ sll_ptr(src_pos, LogBytesPerHeapOop, src_pos);
3156     __ sll_ptr(dst_pos, LogBytesPerHeapOop, dst_pos);
3157     __ add(src, src_pos, from);       // src_addr
3158     __ add(dst, dst_pos, to);         // dst_addr
3159   __ BIND(L_plain_copy);
3160     __ br(Assembler::always, false, Assembler::pt, entry_oop_arraycopy);
3161     __ delayed()->signx(length, count); // length
3162 
3163   __ BIND(L_checkcast_copy);
3164     // live at this point:  G3_src_klass, G4_dst_klass
3165     {
3166       // Before looking at dst.length, make sure dst is also an objArray.
3167       // lduw(G4_dst_klass, lh_offset, O5_temp); // hoisted to delay slot
3168       __ cmp(G5_lh,                    O5_temp);
3169       __ br(Assembler::notEqual, false, Assembler::pn, L_failed);
3170 
3171       // It is safe to examine both src.length and dst.length.
3172       __ delayed();                             // match next insn to prev branch
3173       arraycopy_range_checks(src, src_pos, dst, dst_pos, length,
3174                              O5_temp, G5_lh, L_failed);
3175 
3176       // Marshal the base address arguments now, freeing registers.
3177       __ add(src, arrayOopDesc::base_offset_in_bytes(T_OBJECT), src); //src offset
3178       __ add(dst, arrayOopDesc::base_offset_in_bytes(T_OBJECT), dst); //dst offset
3179       __ sll_ptr(src_pos, LogBytesPerHeapOop, src_pos);
3180       __ sll_ptr(dst_pos, LogBytesPerHeapOop, dst_pos);
3181       __ add(src, src_pos, from);               // src_addr
3182       __ add(dst, dst_pos, to);                 // dst_addr
3183       __ signx(length, count);                  // length (reloaded)
3184 
3185       Register sco_temp = O3;                   // this register is free now
3186       assert_different_registers(from, to, count, sco_temp,
3187                                  G4_dst_klass, G3_src_klass);
3188 
3189       // Generate the type check.
3190       int sco_offset = in_bytes(Klass::super_check_offset_offset());
3191       __ lduw(G4_dst_klass, sco_offset, sco_temp);
3192       generate_type_check(G3_src_klass, sco_temp, G4_dst_klass,
3193                           O5_temp, L_plain_copy);
3194 
3195       // Fetch destination element klass from the ObjArrayKlass header.
3196       int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset());
3197 
3198       // the checkcast_copy loop needs two extra arguments:
3199       __ ld_ptr(G4_dst_klass, ek_offset, O4);   // dest elem klass
3200       // lduw(O4, sco_offset, O3);              // sco of elem klass
3201 
3202       __ br(Assembler::always, false, Assembler::pt, entry_checkcast_arraycopy);
3203       __ delayed()->lduw(O4, sco_offset, O3);
3204     }
3205 
3206   __ BIND(L_failed);
3207     __ retl();
3208     __ delayed()->sub(G0, 1, O0); // return -1
3209     return start;
3210   }
3211 
3212   //
3213   //  Generate stub for heap zeroing.
3214   //  "to" address is aligned to jlong (8 bytes).
3215   //
3216   // Arguments for generated stub:
3217   //      to:    O0
3218   //      count: O1 treated as signed (count of HeapWord)
3219   //             count could be 0
3220   //
3221   address generate_zero_aligned_words(const char* name) {
3222     __ align(CodeEntryAlignment);
3223     StubCodeMark mark(this, "StubRoutines", name);
3224     address start = __ pc();
3225 
3226     const Register to    = O0;   // source array address
3227     const Register count = O1;   // HeapWords count
3228     const Register temp  = O2;   // scratch
3229 
3230     Label Ldone;
3231     __ sllx(count, LogHeapWordSize, count); // to bytes count
3232     // Use BIS for zeroing
3233     __ bis_zeroing(to, count, temp, Ldone);
3234     __ bind(Ldone);
3235     __ retl();
3236     __ delayed()->nop();
3237     return start;
3238 }
3239 
3240   void generate_arraycopy_stubs() {
3241     address entry;
3242     address entry_jbyte_arraycopy;
3243     address entry_jshort_arraycopy;
3244     address entry_jint_arraycopy;
3245     address entry_oop_arraycopy;
3246     address entry_jlong_arraycopy;
3247     address entry_checkcast_arraycopy;
3248 
3249     //*** jbyte
3250     // Always need aligned and unaligned versions
3251     StubRoutines::_jbyte_disjoint_arraycopy         = generate_disjoint_byte_copy(false, &entry,
3252                                                                                   "jbyte_disjoint_arraycopy");
3253     StubRoutines::_jbyte_arraycopy                  = generate_conjoint_byte_copy(false, entry,
3254                                                                                   &entry_jbyte_arraycopy,
3255                                                                                   "jbyte_arraycopy");
3256     StubRoutines::_arrayof_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(true, &entry,
3257                                                                                   "arrayof_jbyte_disjoint_arraycopy");
3258     StubRoutines::_arrayof_jbyte_arraycopy          = generate_conjoint_byte_copy(true, entry, NULL,
3259                                                                                   "arrayof_jbyte_arraycopy");
3260 
3261     //*** jshort
3262     // Always need aligned and unaligned versions
3263     StubRoutines::_jshort_disjoint_arraycopy         = generate_disjoint_short_copy(false, &entry,
3264                                                                                     "jshort_disjoint_arraycopy");
3265     StubRoutines::_jshort_arraycopy                  = generate_conjoint_short_copy(false, entry,
3266                                                                                     &entry_jshort_arraycopy,
3267                                                                                     "jshort_arraycopy");
3268     StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_disjoint_short_copy(true, &entry,
3269                                                                                     "arrayof_jshort_disjoint_arraycopy");
3270     StubRoutines::_arrayof_jshort_arraycopy          = generate_conjoint_short_copy(true, entry, NULL,
3271                                                                                     "arrayof_jshort_arraycopy");
3272 
3273     //*** jint
3274     // Aligned versions
3275     StubRoutines::_arrayof_jint_disjoint_arraycopy = generate_disjoint_int_copy(true, &entry,
3276                                                                                 "arrayof_jint_disjoint_arraycopy");
3277     StubRoutines::_arrayof_jint_arraycopy          = generate_conjoint_int_copy(true, entry, &entry_jint_arraycopy,
3278                                                                                 "arrayof_jint_arraycopy");
3279 #ifdef _LP64
3280     // In 64 bit we need both aligned and unaligned versions of jint arraycopy.
3281     // entry_jint_arraycopy always points to the unaligned version (notice that we overwrite it).
3282     StubRoutines::_jint_disjoint_arraycopy         = generate_disjoint_int_copy(false, &entry,
3283                                                                                 "jint_disjoint_arraycopy");
3284     StubRoutines::_jint_arraycopy                  = generate_conjoint_int_copy(false, entry,
3285                                                                                 &entry_jint_arraycopy,
3286                                                                                 "jint_arraycopy");
3287 #else
3288     // In 32 bit jints are always HeapWordSize aligned, so always use the aligned version
3289     // (in fact in 32bit we always have a pre-loop part even in the aligned version,
3290     //  because it uses 64-bit loads/stores, so the aligned flag is actually ignored).
3291     StubRoutines::_jint_disjoint_arraycopy = StubRoutines::_arrayof_jint_disjoint_arraycopy;
3292     StubRoutines::_jint_arraycopy          = StubRoutines::_arrayof_jint_arraycopy;
3293 #endif
3294 
3295 
3296     //*** jlong
3297     // It is always aligned
3298     StubRoutines::_arrayof_jlong_disjoint_arraycopy = generate_disjoint_long_copy(true, &entry,
3299                                                                                   "arrayof_jlong_disjoint_arraycopy");
3300     StubRoutines::_arrayof_jlong_arraycopy          = generate_conjoint_long_copy(true, entry, &entry_jlong_arraycopy,
3301                                                                                   "arrayof_jlong_arraycopy");
3302     StubRoutines::_jlong_disjoint_arraycopy         = StubRoutines::_arrayof_jlong_disjoint_arraycopy;
3303     StubRoutines::_jlong_arraycopy                  = StubRoutines::_arrayof_jlong_arraycopy;
3304 
3305 
3306     //*** oops
3307     // Aligned versions
3308     StubRoutines::_arrayof_oop_disjoint_arraycopy        = generate_disjoint_oop_copy(true, &entry,
3309                                                                                       "arrayof_oop_disjoint_arraycopy");
3310     StubRoutines::_arrayof_oop_arraycopy                 = generate_conjoint_oop_copy(true, entry, &entry_oop_arraycopy,
3311                                                                                       "arrayof_oop_arraycopy");
3312     // Aligned versions without pre-barriers
3313     StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy(true, &entry,
3314                                                                                       "arrayof_oop_disjoint_arraycopy_uninit",
3315                                                                                       /*dest_uninitialized*/true);
3316     StubRoutines::_arrayof_oop_arraycopy_uninit          = generate_conjoint_oop_copy(true, entry, NULL,
3317                                                                                       "arrayof_oop_arraycopy_uninit",
3318                                                                                       /*dest_uninitialized*/true);
3319 #ifdef _LP64
3320     if (UseCompressedOops) {
3321       // With compressed oops we need unaligned versions, notice that we overwrite entry_oop_arraycopy.
3322       StubRoutines::_oop_disjoint_arraycopy            = generate_disjoint_oop_copy(false, &entry,
3323                                                                                     "oop_disjoint_arraycopy");
3324       StubRoutines::_oop_arraycopy                     = generate_conjoint_oop_copy(false, entry, &entry_oop_arraycopy,
3325                                                                                     "oop_arraycopy");
3326       // Unaligned versions without pre-barriers
3327       StubRoutines::_oop_disjoint_arraycopy_uninit     = generate_disjoint_oop_copy(false, &entry,
3328                                                                                     "oop_disjoint_arraycopy_uninit",
3329                                                                                     /*dest_uninitialized*/true);
3330       StubRoutines::_oop_arraycopy_uninit              = generate_conjoint_oop_copy(false, entry, NULL,
3331                                                                                     "oop_arraycopy_uninit",
3332                                                                                     /*dest_uninitialized*/true);
3333     } else
3334 #endif
3335     {
3336       // oop arraycopy is always aligned on 32bit and 64bit without compressed oops
3337       StubRoutines::_oop_disjoint_arraycopy            = StubRoutines::_arrayof_oop_disjoint_arraycopy;
3338       StubRoutines::_oop_arraycopy                     = StubRoutines::_arrayof_oop_arraycopy;
3339       StubRoutines::_oop_disjoint_arraycopy_uninit     = StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit;
3340       StubRoutines::_oop_arraycopy_uninit              = StubRoutines::_arrayof_oop_arraycopy_uninit;
3341     }
3342 
3343     StubRoutines::_checkcast_arraycopy        = generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy);
3344     StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", NULL,
3345                                                                         /*dest_uninitialized*/true);
3346 
3347     StubRoutines::_unsafe_arraycopy    = generate_unsafe_copy("unsafe_arraycopy",
3348                                                               entry_jbyte_arraycopy,
3349                                                               entry_jshort_arraycopy,
3350                                                               entry_jint_arraycopy,
3351                                                               entry_jlong_arraycopy);
3352     StubRoutines::_generic_arraycopy   = generate_generic_copy("generic_arraycopy",
3353                                                                entry_jbyte_arraycopy,
3354                                                                entry_jshort_arraycopy,
3355                                                                entry_jint_arraycopy,
3356                                                                entry_oop_arraycopy,
3357                                                                entry_jlong_arraycopy,
3358                                                                entry_checkcast_arraycopy);
3359 
3360     StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill");
3361     StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill");
3362     StubRoutines::_jint_fill = generate_fill(T_INT, false, "jint_fill");
3363     StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill");
3364     StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill");
3365     StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill");
3366 
3367     if (UseBlockZeroing) {
3368       StubRoutines::_zero_aligned_words = generate_zero_aligned_words("zero_aligned_words");
3369     }
3370   }
3371 
3372   void generate_initial() {
3373     // Generates all stubs and initializes the entry points
3374 
3375     //------------------------------------------------------------------------------------------------------------------------
3376     // entry points that exist in all platforms
3377     // Note: This is code that could be shared among different platforms - however the benefit seems to be smaller than
3378     //       the disadvantage of having a much more complicated generator structure. See also comment in stubRoutines.hpp.
3379     StubRoutines::_forward_exception_entry                 = generate_forward_exception();
3380 
3381     StubRoutines::_call_stub_entry                         = generate_call_stub(StubRoutines::_call_stub_return_address);
3382     StubRoutines::_catch_exception_entry                   = generate_catch_exception();
3383 
3384     //------------------------------------------------------------------------------------------------------------------------
3385     // entry points that are platform specific
3386     StubRoutines::Sparc::_test_stop_entry                  = generate_test_stop();
3387 
3388     StubRoutines::Sparc::_stop_subroutine_entry            = generate_stop_subroutine();
3389     StubRoutines::Sparc::_flush_callers_register_windows_entry = generate_flush_callers_register_windows();
3390 
3391 #if !defined(COMPILER2) && !defined(_LP64)
3392     StubRoutines::_atomic_xchg_entry         = generate_atomic_xchg();
3393     StubRoutines::_atomic_cmpxchg_entry      = generate_atomic_cmpxchg();
3394     StubRoutines::_atomic_add_entry          = generate_atomic_add();
3395     StubRoutines::_atomic_xchg_ptr_entry     = StubRoutines::_atomic_xchg_entry;
3396     StubRoutines::_atomic_cmpxchg_ptr_entry  = StubRoutines::_atomic_cmpxchg_entry;
3397     StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long();
3398     StubRoutines::_atomic_add_ptr_entry      = StubRoutines::_atomic_add_entry;
3399 #endif  // COMPILER2 !=> _LP64
3400 
3401     // Build this early so it's available for the interpreter.
3402     StubRoutines::_throw_StackOverflowError_entry          = generate_throw_exception("StackOverflowError throw_exception",           CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError));
3403   }
3404 
3405 
3406   void generate_all() {
3407     // Generates all stubs and initializes the entry points
3408 
3409     // Generate partial_subtype_check first here since its code depends on
3410     // UseZeroBaseCompressedOops which is defined after heap initialization.
3411     StubRoutines::Sparc::_partial_subtype_check                = generate_partial_subtype_check();
3412     // These entry points require SharedInfo::stack0 to be set up in non-core builds
3413     StubRoutines::_throw_AbstractMethodError_entry         = generate_throw_exception("AbstractMethodError throw_exception",          CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError));
3414     StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError));
3415     StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call));
3416 
3417     StubRoutines::_handler_for_unsafe_access_entry =
3418       generate_handler_for_unsafe_access();
3419 
3420     // support for verify_oop (must happen after universe_init)
3421     StubRoutines::_verify_oop_subroutine_entry     = generate_verify_oop_subroutine();
3422 
3423     // arraycopy stubs used by compilers
3424     generate_arraycopy_stubs();
3425 
3426     // Don't initialize the platform math functions since sparc
3427     // doesn't have intrinsics for these operations.
3428   }
3429 
3430 
3431  public:
3432   StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) {
3433     // replace the standard masm with a special one:
3434     _masm = new MacroAssembler(code);
3435 
3436     _stub_count = !all ? 0x100 : 0x200;
3437     if (all) {
3438       generate_all();
3439     } else {
3440       generate_initial();
3441     }
3442 
3443     // make sure this stub is available for all local calls
3444     if (_atomic_add_stub.is_unbound()) {
3445       // generate a second time, if necessary
3446       (void) generate_atomic_add();
3447     }
3448   }
3449 
3450 
3451  private:
3452   int _stub_count;
3453   void stub_prolog(StubCodeDesc* cdesc) {
3454     # ifdef ASSERT
3455       // put extra information in the stub code, to make it more readable
3456 #ifdef _LP64
3457 // Write the high part of the address
3458 // [RGV] Check if there is a dependency on the size of this prolog
3459       __ emit_data((intptr_t)cdesc >> 32,    relocInfo::none);
3460 #endif
3461       __ emit_data((intptr_t)cdesc,    relocInfo::none);
3462       __ emit_data(++_stub_count, relocInfo::none);
3463     # endif
3464     align(true);
3465   }
3466 
3467   void align(bool at_header = false) {
3468     // %%%%% move this constant somewhere else
3469     // UltraSPARC cache line size is 8 instructions:
3470     const unsigned int icache_line_size = 32;
3471     const unsigned int icache_half_line_size = 16;
3472 
3473     if (at_header) {
3474       while ((intptr_t)(__ pc()) % icache_line_size != 0) {
3475         __ emit_data(0, relocInfo::none);
3476       }
3477     } else {
3478       while ((intptr_t)(__ pc()) % icache_half_line_size != 0) {
3479         __ nop();
3480       }
3481     }
3482   }
3483 
3484 }; // end class declaration
3485 
3486 void StubGenerator_generate(CodeBuffer* code, bool all) {
3487   StubGenerator g(code, all);
3488 }